1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Driver for Atmel AT91 Serial ports 4 * Copyright (C) 2003 Rick Bronson 5 * 6 * Based on drivers/char/serial_sa1100.c, by Deep Blue Solutions Ltd. 7 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. 8 * 9 * DMA support added by Chip Coldwell. 10 */ 11 #include <linux/tty.h> 12 #include <linux/ioport.h> 13 #include <linux/slab.h> 14 #include <linux/init.h> 15 #include <linux/serial.h> 16 #include <linux/clk.h> 17 #include <linux/console.h> 18 #include <linux/sysrq.h> 19 #include <linux/tty_flip.h> 20 #include <linux/platform_device.h> 21 #include <linux/of.h> 22 #include <linux/of_device.h> 23 #include <linux/dma-mapping.h> 24 #include <linux/dmaengine.h> 25 #include <linux/atmel_pdc.h> 26 #include <linux/uaccess.h> 27 #include <linux/platform_data/atmel.h> 28 #include <linux/timer.h> 29 #include <linux/err.h> 30 #include <linux/irq.h> 31 #include <linux/suspend.h> 32 #include <linux/mm.h> 33 34 #include <asm/div64.h> 35 #include <asm/io.h> 36 #include <asm/ioctls.h> 37 38 #define PDC_BUFFER_SIZE 512 39 /* Revisit: We should calculate this based on the actual port settings */ 40 #define PDC_RX_TIMEOUT (3 * 10) /* 3 bytes */ 41 42 /* The minium number of data FIFOs should be able to contain */ 43 #define ATMEL_MIN_FIFO_SIZE 8 44 /* 45 * These two offsets are substracted from the RX FIFO size to define the RTS 46 * high and low thresholds 47 */ 48 #define ATMEL_RTS_HIGH_OFFSET 16 49 #define ATMEL_RTS_LOW_OFFSET 20 50 51 #include <linux/serial_core.h> 52 53 #include "serial_mctrl_gpio.h" 54 #include "atmel_serial.h" 55 56 static void atmel_start_rx(struct uart_port *port); 57 static void atmel_stop_rx(struct uart_port *port); 58 59 #ifdef CONFIG_SERIAL_ATMEL_TTYAT 60 61 /* Use device name ttyAT, major 204 and minor 154-169. This is necessary if we 62 * should coexist with the 8250 driver, such as if we have an external 16C550 63 * UART. */ 64 #define SERIAL_ATMEL_MAJOR 204 65 #define MINOR_START 154 66 #define ATMEL_DEVICENAME "ttyAT" 67 68 #else 69 70 /* Use device name ttyS, major 4, minor 64-68. This is the usual serial port 71 * name, but it is legally reserved for the 8250 driver. */ 72 #define SERIAL_ATMEL_MAJOR TTY_MAJOR 73 #define MINOR_START 64 74 #define ATMEL_DEVICENAME "ttyS" 75 76 #endif 77 78 #define ATMEL_ISR_PASS_LIMIT 256 79 80 struct atmel_dma_buffer { 81 unsigned char *buf; 82 dma_addr_t dma_addr; 83 unsigned int dma_size; 84 unsigned int ofs; 85 }; 86 87 struct atmel_uart_char { 88 u16 status; 89 u16 ch; 90 }; 91 92 /* 93 * Be careful, the real size of the ring buffer is 94 * sizeof(atmel_uart_char) * ATMEL_SERIAL_RINGSIZE. It means that ring buffer 95 * can contain up to 1024 characters in PIO mode and up to 4096 characters in 96 * DMA mode. 97 */ 98 #define ATMEL_SERIAL_RINGSIZE 1024 99 100 /* 101 * at91: 6 USARTs and one DBGU port (SAM9260) 102 * samx7: 3 USARTs and 5 UARTs 103 */ 104 #define ATMEL_MAX_UART 8 105 106 /* 107 * We wrap our port structure around the generic uart_port. 108 */ 109 struct atmel_uart_port { 110 struct uart_port uart; /* uart */ 111 struct clk *clk; /* uart clock */ 112 int may_wakeup; /* cached value of device_may_wakeup for times we need to disable it */ 113 u32 backup_imr; /* IMR saved during suspend */ 114 int break_active; /* break being received */ 115 116 bool use_dma_rx; /* enable DMA receiver */ 117 bool use_pdc_rx; /* enable PDC receiver */ 118 short pdc_rx_idx; /* current PDC RX buffer */ 119 struct atmel_dma_buffer pdc_rx[2]; /* PDC receier */ 120 121 bool use_dma_tx; /* enable DMA transmitter */ 122 bool use_pdc_tx; /* enable PDC transmitter */ 123 struct atmel_dma_buffer pdc_tx; /* PDC transmitter */ 124 125 spinlock_t lock_tx; /* port lock */ 126 spinlock_t lock_rx; /* port lock */ 127 struct dma_chan *chan_tx; 128 struct dma_chan *chan_rx; 129 struct dma_async_tx_descriptor *desc_tx; 130 struct dma_async_tx_descriptor *desc_rx; 131 dma_cookie_t cookie_tx; 132 dma_cookie_t cookie_rx; 133 struct scatterlist sg_tx; 134 struct scatterlist sg_rx; 135 struct tasklet_struct tasklet_rx; 136 struct tasklet_struct tasklet_tx; 137 atomic_t tasklet_shutdown; 138 unsigned int irq_status_prev; 139 unsigned int tx_len; 140 141 struct circ_buf rx_ring; 142 143 struct mctrl_gpios *gpios; 144 u32 backup_mode; /* MR saved during iso7816 operations */ 145 u32 backup_brgr; /* BRGR saved during iso7816 operations */ 146 unsigned int tx_done_mask; 147 u32 fifo_size; 148 u32 rts_high; 149 u32 rts_low; 150 bool ms_irq_enabled; 151 u32 rtor; /* address of receiver timeout register if it exists */ 152 bool has_frac_baudrate; 153 bool has_hw_timer; 154 struct timer_list uart_timer; 155 156 bool tx_stopped; 157 bool suspended; 158 unsigned int pending; 159 unsigned int pending_status; 160 spinlock_t lock_suspended; 161 162 bool hd_start_rx; /* can start RX during half-duplex operation */ 163 164 /* ISO7816 */ 165 unsigned int fidi_min; 166 unsigned int fidi_max; 167 168 #ifdef CONFIG_PM 169 struct { 170 u32 cr; 171 u32 mr; 172 u32 imr; 173 u32 brgr; 174 u32 rtor; 175 u32 ttgr; 176 u32 fmr; 177 u32 fimr; 178 } cache; 179 #endif 180 181 int (*prepare_rx)(struct uart_port *port); 182 int (*prepare_tx)(struct uart_port *port); 183 void (*schedule_rx)(struct uart_port *port); 184 void (*schedule_tx)(struct uart_port *port); 185 void (*release_rx)(struct uart_port *port); 186 void (*release_tx)(struct uart_port *port); 187 }; 188 189 static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART]; 190 static DECLARE_BITMAP(atmel_ports_in_use, ATMEL_MAX_UART); 191 192 #if defined(CONFIG_OF) 193 static const struct of_device_id atmel_serial_dt_ids[] = { 194 { .compatible = "atmel,at91rm9200-usart-serial" }, 195 { /* sentinel */ } 196 }; 197 #endif 198 199 static inline struct atmel_uart_port * 200 to_atmel_uart_port(struct uart_port *uart) 201 { 202 return container_of(uart, struct atmel_uart_port, uart); 203 } 204 205 static inline u32 atmel_uart_readl(struct uart_port *port, u32 reg) 206 { 207 return __raw_readl(port->membase + reg); 208 } 209 210 static inline void atmel_uart_writel(struct uart_port *port, u32 reg, u32 value) 211 { 212 __raw_writel(value, port->membase + reg); 213 } 214 215 static inline u8 atmel_uart_read_char(struct uart_port *port) 216 { 217 return __raw_readb(port->membase + ATMEL_US_RHR); 218 } 219 220 static inline void atmel_uart_write_char(struct uart_port *port, u8 value) 221 { 222 __raw_writeb(value, port->membase + ATMEL_US_THR); 223 } 224 225 static inline int atmel_uart_is_half_duplex(struct uart_port *port) 226 { 227 return ((port->rs485.flags & SER_RS485_ENABLED) && 228 !(port->rs485.flags & SER_RS485_RX_DURING_TX)) || 229 (port->iso7816.flags & SER_ISO7816_ENABLED); 230 } 231 232 #ifdef CONFIG_SERIAL_ATMEL_PDC 233 static bool atmel_use_pdc_rx(struct uart_port *port) 234 { 235 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 236 237 return atmel_port->use_pdc_rx; 238 } 239 240 static bool atmel_use_pdc_tx(struct uart_port *port) 241 { 242 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 243 244 return atmel_port->use_pdc_tx; 245 } 246 #else 247 static bool atmel_use_pdc_rx(struct uart_port *port) 248 { 249 return false; 250 } 251 252 static bool atmel_use_pdc_tx(struct uart_port *port) 253 { 254 return false; 255 } 256 #endif 257 258 static bool atmel_use_dma_tx(struct uart_port *port) 259 { 260 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 261 262 return atmel_port->use_dma_tx; 263 } 264 265 static bool atmel_use_dma_rx(struct uart_port *port) 266 { 267 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 268 269 return atmel_port->use_dma_rx; 270 } 271 272 static bool atmel_use_fifo(struct uart_port *port) 273 { 274 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 275 276 return atmel_port->fifo_size; 277 } 278 279 static void atmel_tasklet_schedule(struct atmel_uart_port *atmel_port, 280 struct tasklet_struct *t) 281 { 282 if (!atomic_read(&atmel_port->tasklet_shutdown)) 283 tasklet_schedule(t); 284 } 285 286 /* Enable or disable the rs485 support */ 287 static int atmel_config_rs485(struct uart_port *port, 288 struct serial_rs485 *rs485conf) 289 { 290 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 291 unsigned int mode; 292 293 /* Disable interrupts */ 294 atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask); 295 296 mode = atmel_uart_readl(port, ATMEL_US_MR); 297 298 /* Resetting serial mode to RS232 (0x0) */ 299 mode &= ~ATMEL_US_USMODE; 300 301 port->rs485 = *rs485conf; 302 303 if (rs485conf->flags & SER_RS485_ENABLED) { 304 dev_dbg(port->dev, "Setting UART to RS485\n"); 305 if (port->rs485.flags & SER_RS485_RX_DURING_TX) 306 atmel_port->tx_done_mask = ATMEL_US_TXRDY; 307 else 308 atmel_port->tx_done_mask = ATMEL_US_TXEMPTY; 309 310 atmel_uart_writel(port, ATMEL_US_TTGR, 311 rs485conf->delay_rts_after_send); 312 mode |= ATMEL_US_USMODE_RS485; 313 } else { 314 dev_dbg(port->dev, "Setting UART to RS232\n"); 315 if (atmel_use_pdc_tx(port)) 316 atmel_port->tx_done_mask = ATMEL_US_ENDTX | 317 ATMEL_US_TXBUFE; 318 else 319 atmel_port->tx_done_mask = ATMEL_US_TXRDY; 320 } 321 atmel_uart_writel(port, ATMEL_US_MR, mode); 322 323 /* Enable interrupts */ 324 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask); 325 326 return 0; 327 } 328 329 static unsigned int atmel_calc_cd(struct uart_port *port, 330 struct serial_iso7816 *iso7816conf) 331 { 332 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 333 unsigned int cd; 334 u64 mck_rate; 335 336 mck_rate = (u64)clk_get_rate(atmel_port->clk); 337 do_div(mck_rate, iso7816conf->clk); 338 cd = mck_rate; 339 return cd; 340 } 341 342 static unsigned int atmel_calc_fidi(struct uart_port *port, 343 struct serial_iso7816 *iso7816conf) 344 { 345 u64 fidi = 0; 346 347 if (iso7816conf->sc_fi && iso7816conf->sc_di) { 348 fidi = (u64)iso7816conf->sc_fi; 349 do_div(fidi, iso7816conf->sc_di); 350 } 351 return (u32)fidi; 352 } 353 354 /* Enable or disable the iso7816 support */ 355 /* Called with interrupts disabled */ 356 static int atmel_config_iso7816(struct uart_port *port, 357 struct serial_iso7816 *iso7816conf) 358 { 359 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 360 unsigned int mode; 361 unsigned int cd, fidi; 362 int ret = 0; 363 364 /* Disable interrupts */ 365 atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask); 366 367 mode = atmel_uart_readl(port, ATMEL_US_MR); 368 369 if (iso7816conf->flags & SER_ISO7816_ENABLED) { 370 mode &= ~ATMEL_US_USMODE; 371 372 if (iso7816conf->tg > 255) { 373 dev_err(port->dev, "ISO7816: Timeguard exceeding 255\n"); 374 memset(iso7816conf, 0, sizeof(struct serial_iso7816)); 375 ret = -EINVAL; 376 goto err_out; 377 } 378 379 if ((iso7816conf->flags & SER_ISO7816_T_PARAM) 380 == SER_ISO7816_T(0)) { 381 mode |= ATMEL_US_USMODE_ISO7816_T0 | ATMEL_US_DSNACK; 382 } else if ((iso7816conf->flags & SER_ISO7816_T_PARAM) 383 == SER_ISO7816_T(1)) { 384 mode |= ATMEL_US_USMODE_ISO7816_T1 | ATMEL_US_INACK; 385 } else { 386 dev_err(port->dev, "ISO7816: Type not supported\n"); 387 memset(iso7816conf, 0, sizeof(struct serial_iso7816)); 388 ret = -EINVAL; 389 goto err_out; 390 } 391 392 mode &= ~(ATMEL_US_USCLKS | ATMEL_US_NBSTOP | ATMEL_US_PAR); 393 394 /* select mck clock, and output */ 395 mode |= ATMEL_US_USCLKS_MCK | ATMEL_US_CLKO; 396 /* set parity for normal/inverse mode + max iterations */ 397 mode |= ATMEL_US_PAR_EVEN | ATMEL_US_NBSTOP_1 | ATMEL_US_MAX_ITER(3); 398 399 cd = atmel_calc_cd(port, iso7816conf); 400 fidi = atmel_calc_fidi(port, iso7816conf); 401 if (fidi == 0) { 402 dev_warn(port->dev, "ISO7816 fidi = 0, Generator generates no signal\n"); 403 } else if (fidi < atmel_port->fidi_min 404 || fidi > atmel_port->fidi_max) { 405 dev_err(port->dev, "ISO7816 fidi = %u, value not supported\n", fidi); 406 memset(iso7816conf, 0, sizeof(struct serial_iso7816)); 407 ret = -EINVAL; 408 goto err_out; 409 } 410 411 if (!(port->iso7816.flags & SER_ISO7816_ENABLED)) { 412 /* port not yet in iso7816 mode: store configuration */ 413 atmel_port->backup_mode = atmel_uart_readl(port, ATMEL_US_MR); 414 atmel_port->backup_brgr = atmel_uart_readl(port, ATMEL_US_BRGR); 415 } 416 417 atmel_uart_writel(port, ATMEL_US_TTGR, iso7816conf->tg); 418 atmel_uart_writel(port, ATMEL_US_BRGR, cd); 419 atmel_uart_writel(port, ATMEL_US_FIDI, fidi); 420 421 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS | ATMEL_US_RXEN); 422 atmel_port->tx_done_mask = ATMEL_US_TXEMPTY | ATMEL_US_NACK | ATMEL_US_ITERATION; 423 } else { 424 dev_dbg(port->dev, "Setting UART back to RS232\n"); 425 /* back to last RS232 settings */ 426 mode = atmel_port->backup_mode; 427 memset(iso7816conf, 0, sizeof(struct serial_iso7816)); 428 atmel_uart_writel(port, ATMEL_US_TTGR, 0); 429 atmel_uart_writel(port, ATMEL_US_BRGR, atmel_port->backup_brgr); 430 atmel_uart_writel(port, ATMEL_US_FIDI, 0x174); 431 432 if (atmel_use_pdc_tx(port)) 433 atmel_port->tx_done_mask = ATMEL_US_ENDTX | 434 ATMEL_US_TXBUFE; 435 else 436 atmel_port->tx_done_mask = ATMEL_US_TXRDY; 437 } 438 439 port->iso7816 = *iso7816conf; 440 441 atmel_uart_writel(port, ATMEL_US_MR, mode); 442 443 err_out: 444 /* Enable interrupts */ 445 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask); 446 447 return ret; 448 } 449 450 /* 451 * Return TIOCSER_TEMT when transmitter FIFO and Shift register is empty. 452 */ 453 static u_int atmel_tx_empty(struct uart_port *port) 454 { 455 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 456 457 if (atmel_port->tx_stopped) 458 return TIOCSER_TEMT; 459 return (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXEMPTY) ? 460 TIOCSER_TEMT : 461 0; 462 } 463 464 /* 465 * Set state of the modem control output lines 466 */ 467 static void atmel_set_mctrl(struct uart_port *port, u_int mctrl) 468 { 469 unsigned int control = 0; 470 unsigned int mode = atmel_uart_readl(port, ATMEL_US_MR); 471 unsigned int rts_paused, rts_ready; 472 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 473 474 /* override mode to RS485 if needed, otherwise keep the current mode */ 475 if (port->rs485.flags & SER_RS485_ENABLED) { 476 atmel_uart_writel(port, ATMEL_US_TTGR, 477 port->rs485.delay_rts_after_send); 478 mode &= ~ATMEL_US_USMODE; 479 mode |= ATMEL_US_USMODE_RS485; 480 } 481 482 /* set the RTS line state according to the mode */ 483 if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) { 484 /* force RTS line to high level */ 485 rts_paused = ATMEL_US_RTSEN; 486 487 /* give the control of the RTS line back to the hardware */ 488 rts_ready = ATMEL_US_RTSDIS; 489 } else { 490 /* force RTS line to high level */ 491 rts_paused = ATMEL_US_RTSDIS; 492 493 /* force RTS line to low level */ 494 rts_ready = ATMEL_US_RTSEN; 495 } 496 497 if (mctrl & TIOCM_RTS) 498 control |= rts_ready; 499 else 500 control |= rts_paused; 501 502 if (mctrl & TIOCM_DTR) 503 control |= ATMEL_US_DTREN; 504 else 505 control |= ATMEL_US_DTRDIS; 506 507 atmel_uart_writel(port, ATMEL_US_CR, control); 508 509 mctrl_gpio_set(atmel_port->gpios, mctrl); 510 511 /* Local loopback mode? */ 512 mode &= ~ATMEL_US_CHMODE; 513 if (mctrl & TIOCM_LOOP) 514 mode |= ATMEL_US_CHMODE_LOC_LOOP; 515 else 516 mode |= ATMEL_US_CHMODE_NORMAL; 517 518 atmel_uart_writel(port, ATMEL_US_MR, mode); 519 } 520 521 /* 522 * Get state of the modem control input lines 523 */ 524 static u_int atmel_get_mctrl(struct uart_port *port) 525 { 526 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 527 unsigned int ret = 0, status; 528 529 status = atmel_uart_readl(port, ATMEL_US_CSR); 530 531 /* 532 * The control signals are active low. 533 */ 534 if (!(status & ATMEL_US_DCD)) 535 ret |= TIOCM_CD; 536 if (!(status & ATMEL_US_CTS)) 537 ret |= TIOCM_CTS; 538 if (!(status & ATMEL_US_DSR)) 539 ret |= TIOCM_DSR; 540 if (!(status & ATMEL_US_RI)) 541 ret |= TIOCM_RI; 542 543 return mctrl_gpio_get(atmel_port->gpios, &ret); 544 } 545 546 /* 547 * Stop transmitting. 548 */ 549 static void atmel_stop_tx(struct uart_port *port) 550 { 551 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 552 553 if (atmel_use_pdc_tx(port)) { 554 /* disable PDC transmit */ 555 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS); 556 } 557 558 /* 559 * Disable the transmitter. 560 * This is mandatory when DMA is used, otherwise the DMA buffer 561 * is fully transmitted. 562 */ 563 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS); 564 atmel_port->tx_stopped = true; 565 566 /* Disable interrupts */ 567 atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask); 568 569 if (atmel_uart_is_half_duplex(port)) 570 if (!atomic_read(&atmel_port->tasklet_shutdown)) 571 atmel_start_rx(port); 572 573 } 574 575 /* 576 * Start transmitting. 577 */ 578 static void atmel_start_tx(struct uart_port *port) 579 { 580 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 581 582 if (atmel_use_pdc_tx(port) && (atmel_uart_readl(port, ATMEL_PDC_PTSR) 583 & ATMEL_PDC_TXTEN)) 584 /* The transmitter is already running. Yes, we 585 really need this.*/ 586 return; 587 588 if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port)) 589 if (atmel_uart_is_half_duplex(port)) 590 atmel_stop_rx(port); 591 592 if (atmel_use_pdc_tx(port)) 593 /* re-enable PDC transmit */ 594 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); 595 596 /* Enable interrupts */ 597 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask); 598 599 /* re-enable the transmitter */ 600 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN); 601 atmel_port->tx_stopped = false; 602 } 603 604 /* 605 * start receiving - port is in process of being opened. 606 */ 607 static void atmel_start_rx(struct uart_port *port) 608 { 609 /* reset status and receiver */ 610 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA); 611 612 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXEN); 613 614 if (atmel_use_pdc_rx(port)) { 615 /* enable PDC controller */ 616 atmel_uart_writel(port, ATMEL_US_IER, 617 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT | 618 port->read_status_mask); 619 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN); 620 } else { 621 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY); 622 } 623 } 624 625 /* 626 * Stop receiving - port is in process of being closed. 627 */ 628 static void atmel_stop_rx(struct uart_port *port) 629 { 630 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXDIS); 631 632 if (atmel_use_pdc_rx(port)) { 633 /* disable PDC receive */ 634 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS); 635 atmel_uart_writel(port, ATMEL_US_IDR, 636 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT | 637 port->read_status_mask); 638 } else { 639 atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXRDY); 640 } 641 } 642 643 /* 644 * Enable modem status interrupts 645 */ 646 static void atmel_enable_ms(struct uart_port *port) 647 { 648 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 649 uint32_t ier = 0; 650 651 /* 652 * Interrupt should not be enabled twice 653 */ 654 if (atmel_port->ms_irq_enabled) 655 return; 656 657 atmel_port->ms_irq_enabled = true; 658 659 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) 660 ier |= ATMEL_US_CTSIC; 661 662 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DSR)) 663 ier |= ATMEL_US_DSRIC; 664 665 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_RI)) 666 ier |= ATMEL_US_RIIC; 667 668 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DCD)) 669 ier |= ATMEL_US_DCDIC; 670 671 atmel_uart_writel(port, ATMEL_US_IER, ier); 672 673 mctrl_gpio_enable_ms(atmel_port->gpios); 674 } 675 676 /* 677 * Disable modem status interrupts 678 */ 679 static void atmel_disable_ms(struct uart_port *port) 680 { 681 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 682 uint32_t idr = 0; 683 684 /* 685 * Interrupt should not be disabled twice 686 */ 687 if (!atmel_port->ms_irq_enabled) 688 return; 689 690 atmel_port->ms_irq_enabled = false; 691 692 mctrl_gpio_disable_ms(atmel_port->gpios); 693 694 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) 695 idr |= ATMEL_US_CTSIC; 696 697 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DSR)) 698 idr |= ATMEL_US_DSRIC; 699 700 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_RI)) 701 idr |= ATMEL_US_RIIC; 702 703 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DCD)) 704 idr |= ATMEL_US_DCDIC; 705 706 atmel_uart_writel(port, ATMEL_US_IDR, idr); 707 } 708 709 /* 710 * Control the transmission of a break signal 711 */ 712 static void atmel_break_ctl(struct uart_port *port, int break_state) 713 { 714 if (break_state != 0) 715 /* start break */ 716 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTBRK); 717 else 718 /* stop break */ 719 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STPBRK); 720 } 721 722 /* 723 * Stores the incoming character in the ring buffer 724 */ 725 static void 726 atmel_buffer_rx_char(struct uart_port *port, unsigned int status, 727 unsigned int ch) 728 { 729 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 730 struct circ_buf *ring = &atmel_port->rx_ring; 731 struct atmel_uart_char *c; 732 733 if (!CIRC_SPACE(ring->head, ring->tail, ATMEL_SERIAL_RINGSIZE)) 734 /* Buffer overflow, ignore char */ 735 return; 736 737 c = &((struct atmel_uart_char *)ring->buf)[ring->head]; 738 c->status = status; 739 c->ch = ch; 740 741 /* Make sure the character is stored before we update head. */ 742 smp_wmb(); 743 744 ring->head = (ring->head + 1) & (ATMEL_SERIAL_RINGSIZE - 1); 745 } 746 747 /* 748 * Deal with parity, framing and overrun errors. 749 */ 750 static void atmel_pdc_rxerr(struct uart_port *port, unsigned int status) 751 { 752 /* clear error */ 753 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA); 754 755 if (status & ATMEL_US_RXBRK) { 756 /* ignore side-effect */ 757 status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME); 758 port->icount.brk++; 759 } 760 if (status & ATMEL_US_PARE) 761 port->icount.parity++; 762 if (status & ATMEL_US_FRAME) 763 port->icount.frame++; 764 if (status & ATMEL_US_OVRE) 765 port->icount.overrun++; 766 } 767 768 /* 769 * Characters received (called from interrupt handler) 770 */ 771 static void atmel_rx_chars(struct uart_port *port) 772 { 773 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 774 unsigned int status, ch; 775 776 status = atmel_uart_readl(port, ATMEL_US_CSR); 777 while (status & ATMEL_US_RXRDY) { 778 ch = atmel_uart_read_char(port); 779 780 /* 781 * note that the error handling code is 782 * out of the main execution path 783 */ 784 if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME 785 | ATMEL_US_OVRE | ATMEL_US_RXBRK) 786 || atmel_port->break_active)) { 787 788 /* clear error */ 789 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA); 790 791 if (status & ATMEL_US_RXBRK 792 && !atmel_port->break_active) { 793 atmel_port->break_active = 1; 794 atmel_uart_writel(port, ATMEL_US_IER, 795 ATMEL_US_RXBRK); 796 } else { 797 /* 798 * This is either the end-of-break 799 * condition or we've received at 800 * least one character without RXBRK 801 * being set. In both cases, the next 802 * RXBRK will indicate start-of-break. 803 */ 804 atmel_uart_writel(port, ATMEL_US_IDR, 805 ATMEL_US_RXBRK); 806 status &= ~ATMEL_US_RXBRK; 807 atmel_port->break_active = 0; 808 } 809 } 810 811 atmel_buffer_rx_char(port, status, ch); 812 status = atmel_uart_readl(port, ATMEL_US_CSR); 813 } 814 815 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx); 816 } 817 818 /* 819 * Transmit characters (called from tasklet with TXRDY interrupt 820 * disabled) 821 */ 822 static void atmel_tx_chars(struct uart_port *port) 823 { 824 struct circ_buf *xmit = &port->state->xmit; 825 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 826 827 if (port->x_char && 828 (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY)) { 829 atmel_uart_write_char(port, port->x_char); 830 port->icount.tx++; 831 port->x_char = 0; 832 } 833 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) 834 return; 835 836 while (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY) { 837 atmel_uart_write_char(port, xmit->buf[xmit->tail]); 838 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 839 port->icount.tx++; 840 if (uart_circ_empty(xmit)) 841 break; 842 } 843 844 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 845 uart_write_wakeup(port); 846 847 if (!uart_circ_empty(xmit)) { 848 /* we still have characters to transmit, so we should continue 849 * transmitting them when TX is ready, regardless of 850 * mode or duplexity 851 */ 852 atmel_port->tx_done_mask |= ATMEL_US_TXRDY; 853 854 /* Enable interrupts */ 855 atmel_uart_writel(port, ATMEL_US_IER, 856 atmel_port->tx_done_mask); 857 } else { 858 if (atmel_uart_is_half_duplex(port)) 859 atmel_port->tx_done_mask &= ~ATMEL_US_TXRDY; 860 } 861 } 862 863 static void atmel_complete_tx_dma(void *arg) 864 { 865 struct atmel_uart_port *atmel_port = arg; 866 struct uart_port *port = &atmel_port->uart; 867 struct circ_buf *xmit = &port->state->xmit; 868 struct dma_chan *chan = atmel_port->chan_tx; 869 unsigned long flags; 870 871 spin_lock_irqsave(&port->lock, flags); 872 873 if (chan) 874 dmaengine_terminate_all(chan); 875 xmit->tail += atmel_port->tx_len; 876 xmit->tail &= UART_XMIT_SIZE - 1; 877 878 port->icount.tx += atmel_port->tx_len; 879 880 spin_lock_irq(&atmel_port->lock_tx); 881 async_tx_ack(atmel_port->desc_tx); 882 atmel_port->cookie_tx = -EINVAL; 883 atmel_port->desc_tx = NULL; 884 spin_unlock_irq(&atmel_port->lock_tx); 885 886 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 887 uart_write_wakeup(port); 888 889 /* 890 * xmit is a circular buffer so, if we have just send data from 891 * xmit->tail to the end of xmit->buf, now we have to transmit the 892 * remaining data from the beginning of xmit->buf to xmit->head. 893 */ 894 if (!uart_circ_empty(xmit)) 895 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx); 896 else if (atmel_uart_is_half_duplex(port)) { 897 /* 898 * DMA done, re-enable TXEMPTY and signal that we can stop 899 * TX and start RX for RS485 900 */ 901 atmel_port->hd_start_rx = true; 902 atmel_uart_writel(port, ATMEL_US_IER, 903 atmel_port->tx_done_mask); 904 } 905 906 spin_unlock_irqrestore(&port->lock, flags); 907 } 908 909 static void atmel_release_tx_dma(struct uart_port *port) 910 { 911 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 912 struct dma_chan *chan = atmel_port->chan_tx; 913 914 if (chan) { 915 dmaengine_terminate_all(chan); 916 dma_release_channel(chan); 917 dma_unmap_sg(port->dev, &atmel_port->sg_tx, 1, 918 DMA_TO_DEVICE); 919 } 920 921 atmel_port->desc_tx = NULL; 922 atmel_port->chan_tx = NULL; 923 atmel_port->cookie_tx = -EINVAL; 924 } 925 926 /* 927 * Called from tasklet with TXRDY interrupt is disabled. 928 */ 929 static void atmel_tx_dma(struct uart_port *port) 930 { 931 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 932 struct circ_buf *xmit = &port->state->xmit; 933 struct dma_chan *chan = atmel_port->chan_tx; 934 struct dma_async_tx_descriptor *desc; 935 struct scatterlist sgl[2], *sg, *sg_tx = &atmel_port->sg_tx; 936 unsigned int tx_len, part1_len, part2_len, sg_len; 937 dma_addr_t phys_addr; 938 939 /* Make sure we have an idle channel */ 940 if (atmel_port->desc_tx != NULL) 941 return; 942 943 if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) { 944 /* 945 * DMA is idle now. 946 * Port xmit buffer is already mapped, 947 * and it is one page... Just adjust 948 * offsets and lengths. Since it is a circular buffer, 949 * we have to transmit till the end, and then the rest. 950 * Take the port lock to get a 951 * consistent xmit buffer state. 952 */ 953 tx_len = CIRC_CNT_TO_END(xmit->head, 954 xmit->tail, 955 UART_XMIT_SIZE); 956 957 if (atmel_port->fifo_size) { 958 /* multi data mode */ 959 part1_len = (tx_len & ~0x3); /* DWORD access */ 960 part2_len = (tx_len & 0x3); /* BYTE access */ 961 } else { 962 /* single data (legacy) mode */ 963 part1_len = 0; 964 part2_len = tx_len; /* BYTE access only */ 965 } 966 967 sg_init_table(sgl, 2); 968 sg_len = 0; 969 phys_addr = sg_dma_address(sg_tx) + xmit->tail; 970 if (part1_len) { 971 sg = &sgl[sg_len++]; 972 sg_dma_address(sg) = phys_addr; 973 sg_dma_len(sg) = part1_len; 974 975 phys_addr += part1_len; 976 } 977 978 if (part2_len) { 979 sg = &sgl[sg_len++]; 980 sg_dma_address(sg) = phys_addr; 981 sg_dma_len(sg) = part2_len; 982 } 983 984 /* 985 * save tx_len so atmel_complete_tx_dma() will increase 986 * xmit->tail correctly 987 */ 988 atmel_port->tx_len = tx_len; 989 990 desc = dmaengine_prep_slave_sg(chan, 991 sgl, 992 sg_len, 993 DMA_MEM_TO_DEV, 994 DMA_PREP_INTERRUPT | 995 DMA_CTRL_ACK); 996 if (!desc) { 997 dev_err(port->dev, "Failed to send via dma!\n"); 998 return; 999 } 1000 1001 dma_sync_sg_for_device(port->dev, sg_tx, 1, DMA_TO_DEVICE); 1002 1003 atmel_port->desc_tx = desc; 1004 desc->callback = atmel_complete_tx_dma; 1005 desc->callback_param = atmel_port; 1006 atmel_port->cookie_tx = dmaengine_submit(desc); 1007 } 1008 1009 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 1010 uart_write_wakeup(port); 1011 } 1012 1013 static int atmel_prepare_tx_dma(struct uart_port *port) 1014 { 1015 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1016 struct device *mfd_dev = port->dev->parent; 1017 dma_cap_mask_t mask; 1018 struct dma_slave_config config; 1019 int ret, nent; 1020 1021 dma_cap_zero(mask); 1022 dma_cap_set(DMA_SLAVE, mask); 1023 1024 atmel_port->chan_tx = dma_request_slave_channel(mfd_dev, "tx"); 1025 if (atmel_port->chan_tx == NULL) 1026 goto chan_err; 1027 dev_info(port->dev, "using %s for tx DMA transfers\n", 1028 dma_chan_name(atmel_port->chan_tx)); 1029 1030 spin_lock_init(&atmel_port->lock_tx); 1031 sg_init_table(&atmel_port->sg_tx, 1); 1032 /* UART circular tx buffer is an aligned page. */ 1033 BUG_ON(!PAGE_ALIGNED(port->state->xmit.buf)); 1034 sg_set_page(&atmel_port->sg_tx, 1035 virt_to_page(port->state->xmit.buf), 1036 UART_XMIT_SIZE, 1037 offset_in_page(port->state->xmit.buf)); 1038 nent = dma_map_sg(port->dev, 1039 &atmel_port->sg_tx, 1040 1, 1041 DMA_TO_DEVICE); 1042 1043 if (!nent) { 1044 dev_dbg(port->dev, "need to release resource of dma\n"); 1045 goto chan_err; 1046 } else { 1047 dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__, 1048 sg_dma_len(&atmel_port->sg_tx), 1049 port->state->xmit.buf, 1050 &sg_dma_address(&atmel_port->sg_tx)); 1051 } 1052 1053 /* Configure the slave DMA */ 1054 memset(&config, 0, sizeof(config)); 1055 config.direction = DMA_MEM_TO_DEV; 1056 config.dst_addr_width = (atmel_port->fifo_size) ? 1057 DMA_SLAVE_BUSWIDTH_4_BYTES : 1058 DMA_SLAVE_BUSWIDTH_1_BYTE; 1059 config.dst_addr = port->mapbase + ATMEL_US_THR; 1060 config.dst_maxburst = 1; 1061 1062 ret = dmaengine_slave_config(atmel_port->chan_tx, 1063 &config); 1064 if (ret) { 1065 dev_err(port->dev, "DMA tx slave configuration failed\n"); 1066 goto chan_err; 1067 } 1068 1069 return 0; 1070 1071 chan_err: 1072 dev_err(port->dev, "TX channel not available, switch to pio\n"); 1073 atmel_port->use_dma_tx = false; 1074 if (atmel_port->chan_tx) 1075 atmel_release_tx_dma(port); 1076 return -EINVAL; 1077 } 1078 1079 static void atmel_complete_rx_dma(void *arg) 1080 { 1081 struct uart_port *port = arg; 1082 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1083 1084 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx); 1085 } 1086 1087 static void atmel_release_rx_dma(struct uart_port *port) 1088 { 1089 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1090 struct dma_chan *chan = atmel_port->chan_rx; 1091 1092 if (chan) { 1093 dmaengine_terminate_all(chan); 1094 dma_release_channel(chan); 1095 dma_unmap_sg(port->dev, &atmel_port->sg_rx, 1, 1096 DMA_FROM_DEVICE); 1097 } 1098 1099 atmel_port->desc_rx = NULL; 1100 atmel_port->chan_rx = NULL; 1101 atmel_port->cookie_rx = -EINVAL; 1102 } 1103 1104 static void atmel_rx_from_dma(struct uart_port *port) 1105 { 1106 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1107 struct tty_port *tport = &port->state->port; 1108 struct circ_buf *ring = &atmel_port->rx_ring; 1109 struct dma_chan *chan = atmel_port->chan_rx; 1110 struct dma_tx_state state; 1111 enum dma_status dmastat; 1112 size_t count; 1113 1114 1115 /* Reset the UART timeout early so that we don't miss one */ 1116 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO); 1117 dmastat = dmaengine_tx_status(chan, 1118 atmel_port->cookie_rx, 1119 &state); 1120 /* Restart a new tasklet if DMA status is error */ 1121 if (dmastat == DMA_ERROR) { 1122 dev_dbg(port->dev, "Get residue error, restart tasklet\n"); 1123 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT); 1124 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx); 1125 return; 1126 } 1127 1128 /* CPU claims ownership of RX DMA buffer */ 1129 dma_sync_sg_for_cpu(port->dev, 1130 &atmel_port->sg_rx, 1131 1, 1132 DMA_FROM_DEVICE); 1133 1134 /* 1135 * ring->head points to the end of data already written by the DMA. 1136 * ring->tail points to the beginning of data to be read by the 1137 * framework. 1138 * The current transfer size should not be larger than the dma buffer 1139 * length. 1140 */ 1141 ring->head = sg_dma_len(&atmel_port->sg_rx) - state.residue; 1142 BUG_ON(ring->head > sg_dma_len(&atmel_port->sg_rx)); 1143 /* 1144 * At this point ring->head may point to the first byte right after the 1145 * last byte of the dma buffer: 1146 * 0 <= ring->head <= sg_dma_len(&atmel_port->sg_rx) 1147 * 1148 * However ring->tail must always points inside the dma buffer: 1149 * 0 <= ring->tail <= sg_dma_len(&atmel_port->sg_rx) - 1 1150 * 1151 * Since we use a ring buffer, we have to handle the case 1152 * where head is lower than tail. In such a case, we first read from 1153 * tail to the end of the buffer then reset tail. 1154 */ 1155 if (ring->head < ring->tail) { 1156 count = sg_dma_len(&atmel_port->sg_rx) - ring->tail; 1157 1158 tty_insert_flip_string(tport, ring->buf + ring->tail, count); 1159 ring->tail = 0; 1160 port->icount.rx += count; 1161 } 1162 1163 /* Finally we read data from tail to head */ 1164 if (ring->tail < ring->head) { 1165 count = ring->head - ring->tail; 1166 1167 tty_insert_flip_string(tport, ring->buf + ring->tail, count); 1168 /* Wrap ring->head if needed */ 1169 if (ring->head >= sg_dma_len(&atmel_port->sg_rx)) 1170 ring->head = 0; 1171 ring->tail = ring->head; 1172 port->icount.rx += count; 1173 } 1174 1175 /* USART retreives ownership of RX DMA buffer */ 1176 dma_sync_sg_for_device(port->dev, 1177 &atmel_port->sg_rx, 1178 1, 1179 DMA_FROM_DEVICE); 1180 1181 /* 1182 * Drop the lock here since it might end up calling 1183 * uart_start(), which takes the lock. 1184 */ 1185 spin_unlock(&port->lock); 1186 tty_flip_buffer_push(tport); 1187 spin_lock(&port->lock); 1188 1189 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT); 1190 } 1191 1192 static int atmel_prepare_rx_dma(struct uart_port *port) 1193 { 1194 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1195 struct device *mfd_dev = port->dev->parent; 1196 struct dma_async_tx_descriptor *desc; 1197 dma_cap_mask_t mask; 1198 struct dma_slave_config config; 1199 struct circ_buf *ring; 1200 int ret, nent; 1201 1202 ring = &atmel_port->rx_ring; 1203 1204 dma_cap_zero(mask); 1205 dma_cap_set(DMA_CYCLIC, mask); 1206 1207 atmel_port->chan_rx = dma_request_slave_channel(mfd_dev, "rx"); 1208 if (atmel_port->chan_rx == NULL) 1209 goto chan_err; 1210 dev_info(port->dev, "using %s for rx DMA transfers\n", 1211 dma_chan_name(atmel_port->chan_rx)); 1212 1213 spin_lock_init(&atmel_port->lock_rx); 1214 sg_init_table(&atmel_port->sg_rx, 1); 1215 /* UART circular rx buffer is an aligned page. */ 1216 BUG_ON(!PAGE_ALIGNED(ring->buf)); 1217 sg_set_page(&atmel_port->sg_rx, 1218 virt_to_page(ring->buf), 1219 sizeof(struct atmel_uart_char) * ATMEL_SERIAL_RINGSIZE, 1220 offset_in_page(ring->buf)); 1221 nent = dma_map_sg(port->dev, 1222 &atmel_port->sg_rx, 1223 1, 1224 DMA_FROM_DEVICE); 1225 1226 if (!nent) { 1227 dev_dbg(port->dev, "need to release resource of dma\n"); 1228 goto chan_err; 1229 } else { 1230 dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__, 1231 sg_dma_len(&atmel_port->sg_rx), 1232 ring->buf, 1233 &sg_dma_address(&atmel_port->sg_rx)); 1234 } 1235 1236 /* Configure the slave DMA */ 1237 memset(&config, 0, sizeof(config)); 1238 config.direction = DMA_DEV_TO_MEM; 1239 config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 1240 config.src_addr = port->mapbase + ATMEL_US_RHR; 1241 config.src_maxburst = 1; 1242 1243 ret = dmaengine_slave_config(atmel_port->chan_rx, 1244 &config); 1245 if (ret) { 1246 dev_err(port->dev, "DMA rx slave configuration failed\n"); 1247 goto chan_err; 1248 } 1249 /* 1250 * Prepare a cyclic dma transfer, assign 2 descriptors, 1251 * each one is half ring buffer size 1252 */ 1253 desc = dmaengine_prep_dma_cyclic(atmel_port->chan_rx, 1254 sg_dma_address(&atmel_port->sg_rx), 1255 sg_dma_len(&atmel_port->sg_rx), 1256 sg_dma_len(&atmel_port->sg_rx)/2, 1257 DMA_DEV_TO_MEM, 1258 DMA_PREP_INTERRUPT); 1259 if (!desc) { 1260 dev_err(port->dev, "Preparing DMA cyclic failed\n"); 1261 goto chan_err; 1262 } 1263 desc->callback = atmel_complete_rx_dma; 1264 desc->callback_param = port; 1265 atmel_port->desc_rx = desc; 1266 atmel_port->cookie_rx = dmaengine_submit(desc); 1267 1268 return 0; 1269 1270 chan_err: 1271 dev_err(port->dev, "RX channel not available, switch to pio\n"); 1272 atmel_port->use_dma_rx = false; 1273 if (atmel_port->chan_rx) 1274 atmel_release_rx_dma(port); 1275 return -EINVAL; 1276 } 1277 1278 static void atmel_uart_timer_callback(struct timer_list *t) 1279 { 1280 struct atmel_uart_port *atmel_port = from_timer(atmel_port, t, 1281 uart_timer); 1282 struct uart_port *port = &atmel_port->uart; 1283 1284 if (!atomic_read(&atmel_port->tasklet_shutdown)) { 1285 tasklet_schedule(&atmel_port->tasklet_rx); 1286 mod_timer(&atmel_port->uart_timer, 1287 jiffies + uart_poll_timeout(port)); 1288 } 1289 } 1290 1291 /* 1292 * receive interrupt handler. 1293 */ 1294 static void 1295 atmel_handle_receive(struct uart_port *port, unsigned int pending) 1296 { 1297 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1298 1299 if (atmel_use_pdc_rx(port)) { 1300 /* 1301 * PDC receive. Just schedule the tasklet and let it 1302 * figure out the details. 1303 * 1304 * TODO: We're not handling error flags correctly at 1305 * the moment. 1306 */ 1307 if (pending & (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)) { 1308 atmel_uart_writel(port, ATMEL_US_IDR, 1309 (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)); 1310 atmel_tasklet_schedule(atmel_port, 1311 &atmel_port->tasklet_rx); 1312 } 1313 1314 if (pending & (ATMEL_US_RXBRK | ATMEL_US_OVRE | 1315 ATMEL_US_FRAME | ATMEL_US_PARE)) 1316 atmel_pdc_rxerr(port, pending); 1317 } 1318 1319 if (atmel_use_dma_rx(port)) { 1320 if (pending & ATMEL_US_TIMEOUT) { 1321 atmel_uart_writel(port, ATMEL_US_IDR, 1322 ATMEL_US_TIMEOUT); 1323 atmel_tasklet_schedule(atmel_port, 1324 &atmel_port->tasklet_rx); 1325 } 1326 } 1327 1328 /* Interrupt receive */ 1329 if (pending & ATMEL_US_RXRDY) 1330 atmel_rx_chars(port); 1331 else if (pending & ATMEL_US_RXBRK) { 1332 /* 1333 * End of break detected. If it came along with a 1334 * character, atmel_rx_chars will handle it. 1335 */ 1336 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA); 1337 atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXBRK); 1338 atmel_port->break_active = 0; 1339 } 1340 } 1341 1342 /* 1343 * transmit interrupt handler. (Transmit is IRQF_NODELAY safe) 1344 */ 1345 static void 1346 atmel_handle_transmit(struct uart_port *port, unsigned int pending) 1347 { 1348 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1349 1350 if (pending & atmel_port->tx_done_mask) { 1351 atmel_uart_writel(port, ATMEL_US_IDR, 1352 atmel_port->tx_done_mask); 1353 1354 /* Start RX if flag was set and FIFO is empty */ 1355 if (atmel_port->hd_start_rx) { 1356 if (!(atmel_uart_readl(port, ATMEL_US_CSR) 1357 & ATMEL_US_TXEMPTY)) 1358 dev_warn(port->dev, "Should start RX, but TX fifo is not empty\n"); 1359 1360 atmel_port->hd_start_rx = false; 1361 atmel_start_rx(port); 1362 } 1363 1364 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx); 1365 } 1366 } 1367 1368 /* 1369 * status flags interrupt handler. 1370 */ 1371 static void 1372 atmel_handle_status(struct uart_port *port, unsigned int pending, 1373 unsigned int status) 1374 { 1375 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1376 unsigned int status_change; 1377 1378 if (pending & (ATMEL_US_RIIC | ATMEL_US_DSRIC | ATMEL_US_DCDIC 1379 | ATMEL_US_CTSIC)) { 1380 status_change = status ^ atmel_port->irq_status_prev; 1381 atmel_port->irq_status_prev = status; 1382 1383 if (status_change & (ATMEL_US_RI | ATMEL_US_DSR 1384 | ATMEL_US_DCD | ATMEL_US_CTS)) { 1385 /* TODO: All reads to CSR will clear these interrupts! */ 1386 if (status_change & ATMEL_US_RI) 1387 port->icount.rng++; 1388 if (status_change & ATMEL_US_DSR) 1389 port->icount.dsr++; 1390 if (status_change & ATMEL_US_DCD) 1391 uart_handle_dcd_change(port, !(status & ATMEL_US_DCD)); 1392 if (status_change & ATMEL_US_CTS) 1393 uart_handle_cts_change(port, !(status & ATMEL_US_CTS)); 1394 1395 wake_up_interruptible(&port->state->port.delta_msr_wait); 1396 } 1397 } 1398 1399 if (pending & (ATMEL_US_NACK | ATMEL_US_ITERATION)) 1400 dev_dbg(port->dev, "ISO7816 ERROR (0x%08x)\n", pending); 1401 } 1402 1403 /* 1404 * Interrupt handler 1405 */ 1406 static irqreturn_t atmel_interrupt(int irq, void *dev_id) 1407 { 1408 struct uart_port *port = dev_id; 1409 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1410 unsigned int status, pending, mask, pass_counter = 0; 1411 1412 spin_lock(&atmel_port->lock_suspended); 1413 1414 do { 1415 status = atmel_uart_readl(port, ATMEL_US_CSR); 1416 mask = atmel_uart_readl(port, ATMEL_US_IMR); 1417 pending = status & mask; 1418 if (!pending) 1419 break; 1420 1421 if (atmel_port->suspended) { 1422 atmel_port->pending |= pending; 1423 atmel_port->pending_status = status; 1424 atmel_uart_writel(port, ATMEL_US_IDR, mask); 1425 pm_system_wakeup(); 1426 break; 1427 } 1428 1429 atmel_handle_receive(port, pending); 1430 atmel_handle_status(port, pending, status); 1431 atmel_handle_transmit(port, pending); 1432 } while (pass_counter++ < ATMEL_ISR_PASS_LIMIT); 1433 1434 spin_unlock(&atmel_port->lock_suspended); 1435 1436 return pass_counter ? IRQ_HANDLED : IRQ_NONE; 1437 } 1438 1439 static void atmel_release_tx_pdc(struct uart_port *port) 1440 { 1441 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1442 struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx; 1443 1444 dma_unmap_single(port->dev, 1445 pdc->dma_addr, 1446 pdc->dma_size, 1447 DMA_TO_DEVICE); 1448 } 1449 1450 /* 1451 * Called from tasklet with ENDTX and TXBUFE interrupts disabled. 1452 */ 1453 static void atmel_tx_pdc(struct uart_port *port) 1454 { 1455 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1456 struct circ_buf *xmit = &port->state->xmit; 1457 struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx; 1458 int count; 1459 1460 /* nothing left to transmit? */ 1461 if (atmel_uart_readl(port, ATMEL_PDC_TCR)) 1462 return; 1463 1464 xmit->tail += pdc->ofs; 1465 xmit->tail &= UART_XMIT_SIZE - 1; 1466 1467 port->icount.tx += pdc->ofs; 1468 pdc->ofs = 0; 1469 1470 /* more to transmit - setup next transfer */ 1471 1472 /* disable PDC transmit */ 1473 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS); 1474 1475 if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) { 1476 dma_sync_single_for_device(port->dev, 1477 pdc->dma_addr, 1478 pdc->dma_size, 1479 DMA_TO_DEVICE); 1480 1481 count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); 1482 pdc->ofs = count; 1483 1484 atmel_uart_writel(port, ATMEL_PDC_TPR, 1485 pdc->dma_addr + xmit->tail); 1486 atmel_uart_writel(port, ATMEL_PDC_TCR, count); 1487 /* re-enable PDC transmit */ 1488 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); 1489 /* Enable interrupts */ 1490 atmel_uart_writel(port, ATMEL_US_IER, 1491 atmel_port->tx_done_mask); 1492 } else { 1493 if (atmel_uart_is_half_duplex(port)) { 1494 /* DMA done, stop TX, start RX for RS485 */ 1495 atmel_start_rx(port); 1496 } 1497 } 1498 1499 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 1500 uart_write_wakeup(port); 1501 } 1502 1503 static int atmel_prepare_tx_pdc(struct uart_port *port) 1504 { 1505 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1506 struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx; 1507 struct circ_buf *xmit = &port->state->xmit; 1508 1509 pdc->buf = xmit->buf; 1510 pdc->dma_addr = dma_map_single(port->dev, 1511 pdc->buf, 1512 UART_XMIT_SIZE, 1513 DMA_TO_DEVICE); 1514 pdc->dma_size = UART_XMIT_SIZE; 1515 pdc->ofs = 0; 1516 1517 return 0; 1518 } 1519 1520 static void atmel_rx_from_ring(struct uart_port *port) 1521 { 1522 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1523 struct circ_buf *ring = &atmel_port->rx_ring; 1524 unsigned int flg; 1525 unsigned int status; 1526 1527 while (ring->head != ring->tail) { 1528 struct atmel_uart_char c; 1529 1530 /* Make sure c is loaded after head. */ 1531 smp_rmb(); 1532 1533 c = ((struct atmel_uart_char *)ring->buf)[ring->tail]; 1534 1535 ring->tail = (ring->tail + 1) & (ATMEL_SERIAL_RINGSIZE - 1); 1536 1537 port->icount.rx++; 1538 status = c.status; 1539 flg = TTY_NORMAL; 1540 1541 /* 1542 * note that the error handling code is 1543 * out of the main execution path 1544 */ 1545 if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME 1546 | ATMEL_US_OVRE | ATMEL_US_RXBRK))) { 1547 if (status & ATMEL_US_RXBRK) { 1548 /* ignore side-effect */ 1549 status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME); 1550 1551 port->icount.brk++; 1552 if (uart_handle_break(port)) 1553 continue; 1554 } 1555 if (status & ATMEL_US_PARE) 1556 port->icount.parity++; 1557 if (status & ATMEL_US_FRAME) 1558 port->icount.frame++; 1559 if (status & ATMEL_US_OVRE) 1560 port->icount.overrun++; 1561 1562 status &= port->read_status_mask; 1563 1564 if (status & ATMEL_US_RXBRK) 1565 flg = TTY_BREAK; 1566 else if (status & ATMEL_US_PARE) 1567 flg = TTY_PARITY; 1568 else if (status & ATMEL_US_FRAME) 1569 flg = TTY_FRAME; 1570 } 1571 1572 1573 if (uart_handle_sysrq_char(port, c.ch)) 1574 continue; 1575 1576 uart_insert_char(port, status, ATMEL_US_OVRE, c.ch, flg); 1577 } 1578 1579 /* 1580 * Drop the lock here since it might end up calling 1581 * uart_start(), which takes the lock. 1582 */ 1583 spin_unlock(&port->lock); 1584 tty_flip_buffer_push(&port->state->port); 1585 spin_lock(&port->lock); 1586 } 1587 1588 static void atmel_release_rx_pdc(struct uart_port *port) 1589 { 1590 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1591 int i; 1592 1593 for (i = 0; i < 2; i++) { 1594 struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i]; 1595 1596 dma_unmap_single(port->dev, 1597 pdc->dma_addr, 1598 pdc->dma_size, 1599 DMA_FROM_DEVICE); 1600 kfree(pdc->buf); 1601 } 1602 } 1603 1604 static void atmel_rx_from_pdc(struct uart_port *port) 1605 { 1606 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1607 struct tty_port *tport = &port->state->port; 1608 struct atmel_dma_buffer *pdc; 1609 int rx_idx = atmel_port->pdc_rx_idx; 1610 unsigned int head; 1611 unsigned int tail; 1612 unsigned int count; 1613 1614 do { 1615 /* Reset the UART timeout early so that we don't miss one */ 1616 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO); 1617 1618 pdc = &atmel_port->pdc_rx[rx_idx]; 1619 head = atmel_uart_readl(port, ATMEL_PDC_RPR) - pdc->dma_addr; 1620 tail = pdc->ofs; 1621 1622 /* If the PDC has switched buffers, RPR won't contain 1623 * any address within the current buffer. Since head 1624 * is unsigned, we just need a one-way comparison to 1625 * find out. 1626 * 1627 * In this case, we just need to consume the entire 1628 * buffer and resubmit it for DMA. This will clear the 1629 * ENDRX bit as well, so that we can safely re-enable 1630 * all interrupts below. 1631 */ 1632 head = min(head, pdc->dma_size); 1633 1634 if (likely(head != tail)) { 1635 dma_sync_single_for_cpu(port->dev, pdc->dma_addr, 1636 pdc->dma_size, DMA_FROM_DEVICE); 1637 1638 /* 1639 * head will only wrap around when we recycle 1640 * the DMA buffer, and when that happens, we 1641 * explicitly set tail to 0. So head will 1642 * always be greater than tail. 1643 */ 1644 count = head - tail; 1645 1646 tty_insert_flip_string(tport, pdc->buf + pdc->ofs, 1647 count); 1648 1649 dma_sync_single_for_device(port->dev, pdc->dma_addr, 1650 pdc->dma_size, DMA_FROM_DEVICE); 1651 1652 port->icount.rx += count; 1653 pdc->ofs = head; 1654 } 1655 1656 /* 1657 * If the current buffer is full, we need to check if 1658 * the next one contains any additional data. 1659 */ 1660 if (head >= pdc->dma_size) { 1661 pdc->ofs = 0; 1662 atmel_uart_writel(port, ATMEL_PDC_RNPR, pdc->dma_addr); 1663 atmel_uart_writel(port, ATMEL_PDC_RNCR, pdc->dma_size); 1664 1665 rx_idx = !rx_idx; 1666 atmel_port->pdc_rx_idx = rx_idx; 1667 } 1668 } while (head >= pdc->dma_size); 1669 1670 /* 1671 * Drop the lock here since it might end up calling 1672 * uart_start(), which takes the lock. 1673 */ 1674 spin_unlock(&port->lock); 1675 tty_flip_buffer_push(tport); 1676 spin_lock(&port->lock); 1677 1678 atmel_uart_writel(port, ATMEL_US_IER, 1679 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT); 1680 } 1681 1682 static int atmel_prepare_rx_pdc(struct uart_port *port) 1683 { 1684 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1685 int i; 1686 1687 for (i = 0; i < 2; i++) { 1688 struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i]; 1689 1690 pdc->buf = kmalloc(PDC_BUFFER_SIZE, GFP_KERNEL); 1691 if (pdc->buf == NULL) { 1692 if (i != 0) { 1693 dma_unmap_single(port->dev, 1694 atmel_port->pdc_rx[0].dma_addr, 1695 PDC_BUFFER_SIZE, 1696 DMA_FROM_DEVICE); 1697 kfree(atmel_port->pdc_rx[0].buf); 1698 } 1699 atmel_port->use_pdc_rx = false; 1700 return -ENOMEM; 1701 } 1702 pdc->dma_addr = dma_map_single(port->dev, 1703 pdc->buf, 1704 PDC_BUFFER_SIZE, 1705 DMA_FROM_DEVICE); 1706 pdc->dma_size = PDC_BUFFER_SIZE; 1707 pdc->ofs = 0; 1708 } 1709 1710 atmel_port->pdc_rx_idx = 0; 1711 1712 atmel_uart_writel(port, ATMEL_PDC_RPR, atmel_port->pdc_rx[0].dma_addr); 1713 atmel_uart_writel(port, ATMEL_PDC_RCR, PDC_BUFFER_SIZE); 1714 1715 atmel_uart_writel(port, ATMEL_PDC_RNPR, 1716 atmel_port->pdc_rx[1].dma_addr); 1717 atmel_uart_writel(port, ATMEL_PDC_RNCR, PDC_BUFFER_SIZE); 1718 1719 return 0; 1720 } 1721 1722 /* 1723 * tasklet handling tty stuff outside the interrupt handler. 1724 */ 1725 static void atmel_tasklet_rx_func(unsigned long data) 1726 { 1727 struct uart_port *port = (struct uart_port *)data; 1728 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1729 1730 /* The interrupt handler does not take the lock */ 1731 spin_lock(&port->lock); 1732 atmel_port->schedule_rx(port); 1733 spin_unlock(&port->lock); 1734 } 1735 1736 static void atmel_tasklet_tx_func(unsigned long data) 1737 { 1738 struct uart_port *port = (struct uart_port *)data; 1739 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1740 1741 /* The interrupt handler does not take the lock */ 1742 spin_lock(&port->lock); 1743 atmel_port->schedule_tx(port); 1744 spin_unlock(&port->lock); 1745 } 1746 1747 static void atmel_init_property(struct atmel_uart_port *atmel_port, 1748 struct platform_device *pdev) 1749 { 1750 struct device_node *np = pdev->dev.of_node; 1751 1752 /* DMA/PDC usage specification */ 1753 if (of_property_read_bool(np, "atmel,use-dma-rx")) { 1754 if (of_property_read_bool(np, "dmas")) { 1755 atmel_port->use_dma_rx = true; 1756 atmel_port->use_pdc_rx = false; 1757 } else { 1758 atmel_port->use_dma_rx = false; 1759 atmel_port->use_pdc_rx = true; 1760 } 1761 } else { 1762 atmel_port->use_dma_rx = false; 1763 atmel_port->use_pdc_rx = false; 1764 } 1765 1766 if (of_property_read_bool(np, "atmel,use-dma-tx")) { 1767 if (of_property_read_bool(np, "dmas")) { 1768 atmel_port->use_dma_tx = true; 1769 atmel_port->use_pdc_tx = false; 1770 } else { 1771 atmel_port->use_dma_tx = false; 1772 atmel_port->use_pdc_tx = true; 1773 } 1774 } else { 1775 atmel_port->use_dma_tx = false; 1776 atmel_port->use_pdc_tx = false; 1777 } 1778 } 1779 1780 static void atmel_set_ops(struct uart_port *port) 1781 { 1782 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1783 1784 if (atmel_use_dma_rx(port)) { 1785 atmel_port->prepare_rx = &atmel_prepare_rx_dma; 1786 atmel_port->schedule_rx = &atmel_rx_from_dma; 1787 atmel_port->release_rx = &atmel_release_rx_dma; 1788 } else if (atmel_use_pdc_rx(port)) { 1789 atmel_port->prepare_rx = &atmel_prepare_rx_pdc; 1790 atmel_port->schedule_rx = &atmel_rx_from_pdc; 1791 atmel_port->release_rx = &atmel_release_rx_pdc; 1792 } else { 1793 atmel_port->prepare_rx = NULL; 1794 atmel_port->schedule_rx = &atmel_rx_from_ring; 1795 atmel_port->release_rx = NULL; 1796 } 1797 1798 if (atmel_use_dma_tx(port)) { 1799 atmel_port->prepare_tx = &atmel_prepare_tx_dma; 1800 atmel_port->schedule_tx = &atmel_tx_dma; 1801 atmel_port->release_tx = &atmel_release_tx_dma; 1802 } else if (atmel_use_pdc_tx(port)) { 1803 atmel_port->prepare_tx = &atmel_prepare_tx_pdc; 1804 atmel_port->schedule_tx = &atmel_tx_pdc; 1805 atmel_port->release_tx = &atmel_release_tx_pdc; 1806 } else { 1807 atmel_port->prepare_tx = NULL; 1808 atmel_port->schedule_tx = &atmel_tx_chars; 1809 atmel_port->release_tx = NULL; 1810 } 1811 } 1812 1813 /* 1814 * Get ip name usart or uart 1815 */ 1816 static void atmel_get_ip_name(struct uart_port *port) 1817 { 1818 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1819 int name = atmel_uart_readl(port, ATMEL_US_NAME); 1820 u32 version; 1821 u32 usart, dbgu_uart, new_uart; 1822 /* ASCII decoding for IP version */ 1823 usart = 0x55534152; /* USAR(T) */ 1824 dbgu_uart = 0x44424755; /* DBGU */ 1825 new_uart = 0x55415254; /* UART */ 1826 1827 /* 1828 * Only USART devices from at91sam9260 SOC implement fractional 1829 * baudrate. It is available for all asynchronous modes, with the 1830 * following restriction: the sampling clock's duty cycle is not 1831 * constant. 1832 */ 1833 atmel_port->has_frac_baudrate = false; 1834 atmel_port->has_hw_timer = false; 1835 1836 if (name == new_uart) { 1837 dev_dbg(port->dev, "Uart with hw timer"); 1838 atmel_port->has_hw_timer = true; 1839 atmel_port->rtor = ATMEL_UA_RTOR; 1840 } else if (name == usart) { 1841 dev_dbg(port->dev, "Usart\n"); 1842 atmel_port->has_frac_baudrate = true; 1843 atmel_port->has_hw_timer = true; 1844 atmel_port->rtor = ATMEL_US_RTOR; 1845 version = atmel_uart_readl(port, ATMEL_US_VERSION); 1846 switch (version) { 1847 case 0x814: /* sama5d2 */ 1848 /* fall through */ 1849 case 0x701: /* sama5d4 */ 1850 atmel_port->fidi_min = 3; 1851 atmel_port->fidi_max = 65535; 1852 break; 1853 case 0x502: /* sam9x5, sama5d3 */ 1854 atmel_port->fidi_min = 3; 1855 atmel_port->fidi_max = 2047; 1856 break; 1857 default: 1858 atmel_port->fidi_min = 1; 1859 atmel_port->fidi_max = 2047; 1860 } 1861 } else if (name == dbgu_uart) { 1862 dev_dbg(port->dev, "Dbgu or uart without hw timer\n"); 1863 } else { 1864 /* fallback for older SoCs: use version field */ 1865 version = atmel_uart_readl(port, ATMEL_US_VERSION); 1866 switch (version) { 1867 case 0x302: 1868 case 0x10213: 1869 case 0x10302: 1870 dev_dbg(port->dev, "This version is usart\n"); 1871 atmel_port->has_frac_baudrate = true; 1872 atmel_port->has_hw_timer = true; 1873 atmel_port->rtor = ATMEL_US_RTOR; 1874 break; 1875 case 0x203: 1876 case 0x10202: 1877 dev_dbg(port->dev, "This version is uart\n"); 1878 break; 1879 default: 1880 dev_err(port->dev, "Not supported ip name nor version, set to uart\n"); 1881 } 1882 } 1883 } 1884 1885 /* 1886 * Perform initialization and enable port for reception 1887 */ 1888 static int atmel_startup(struct uart_port *port) 1889 { 1890 struct platform_device *pdev = to_platform_device(port->dev); 1891 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1892 int retval; 1893 1894 /* 1895 * Ensure that no interrupts are enabled otherwise when 1896 * request_irq() is called we could get stuck trying to 1897 * handle an unexpected interrupt 1898 */ 1899 atmel_uart_writel(port, ATMEL_US_IDR, -1); 1900 atmel_port->ms_irq_enabled = false; 1901 1902 /* 1903 * Allocate the IRQ 1904 */ 1905 retval = request_irq(port->irq, atmel_interrupt, 1906 IRQF_SHARED | IRQF_COND_SUSPEND, 1907 dev_name(&pdev->dev), port); 1908 if (retval) { 1909 dev_err(port->dev, "atmel_startup - Can't get irq\n"); 1910 return retval; 1911 } 1912 1913 atomic_set(&atmel_port->tasklet_shutdown, 0); 1914 tasklet_init(&atmel_port->tasklet_rx, atmel_tasklet_rx_func, 1915 (unsigned long)port); 1916 tasklet_init(&atmel_port->tasklet_tx, atmel_tasklet_tx_func, 1917 (unsigned long)port); 1918 1919 /* 1920 * Initialize DMA (if necessary) 1921 */ 1922 atmel_init_property(atmel_port, pdev); 1923 atmel_set_ops(port); 1924 1925 if (atmel_port->prepare_rx) { 1926 retval = atmel_port->prepare_rx(port); 1927 if (retval < 0) 1928 atmel_set_ops(port); 1929 } 1930 1931 if (atmel_port->prepare_tx) { 1932 retval = atmel_port->prepare_tx(port); 1933 if (retval < 0) 1934 atmel_set_ops(port); 1935 } 1936 1937 /* 1938 * Enable FIFO when available 1939 */ 1940 if (atmel_port->fifo_size) { 1941 unsigned int txrdym = ATMEL_US_ONE_DATA; 1942 unsigned int rxrdym = ATMEL_US_ONE_DATA; 1943 unsigned int fmr; 1944 1945 atmel_uart_writel(port, ATMEL_US_CR, 1946 ATMEL_US_FIFOEN | 1947 ATMEL_US_RXFCLR | 1948 ATMEL_US_TXFLCLR); 1949 1950 if (atmel_use_dma_tx(port)) 1951 txrdym = ATMEL_US_FOUR_DATA; 1952 1953 fmr = ATMEL_US_TXRDYM(txrdym) | ATMEL_US_RXRDYM(rxrdym); 1954 if (atmel_port->rts_high && 1955 atmel_port->rts_low) 1956 fmr |= ATMEL_US_FRTSC | 1957 ATMEL_US_RXFTHRES(atmel_port->rts_high) | 1958 ATMEL_US_RXFTHRES2(atmel_port->rts_low); 1959 1960 atmel_uart_writel(port, ATMEL_US_FMR, fmr); 1961 } 1962 1963 /* Save current CSR for comparison in atmel_tasklet_func() */ 1964 atmel_port->irq_status_prev = atmel_uart_readl(port, ATMEL_US_CSR); 1965 1966 /* 1967 * Finally, enable the serial port 1968 */ 1969 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX); 1970 /* enable xmit & rcvr */ 1971 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN); 1972 atmel_port->tx_stopped = false; 1973 1974 timer_setup(&atmel_port->uart_timer, atmel_uart_timer_callback, 0); 1975 1976 if (atmel_use_pdc_rx(port)) { 1977 /* set UART timeout */ 1978 if (!atmel_port->has_hw_timer) { 1979 mod_timer(&atmel_port->uart_timer, 1980 jiffies + uart_poll_timeout(port)); 1981 /* set USART timeout */ 1982 } else { 1983 atmel_uart_writel(port, atmel_port->rtor, 1984 PDC_RX_TIMEOUT); 1985 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO); 1986 1987 atmel_uart_writel(port, ATMEL_US_IER, 1988 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT); 1989 } 1990 /* enable PDC controller */ 1991 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN); 1992 } else if (atmel_use_dma_rx(port)) { 1993 /* set UART timeout */ 1994 if (!atmel_port->has_hw_timer) { 1995 mod_timer(&atmel_port->uart_timer, 1996 jiffies + uart_poll_timeout(port)); 1997 /* set USART timeout */ 1998 } else { 1999 atmel_uart_writel(port, atmel_port->rtor, 2000 PDC_RX_TIMEOUT); 2001 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO); 2002 2003 atmel_uart_writel(port, ATMEL_US_IER, 2004 ATMEL_US_TIMEOUT); 2005 } 2006 } else { 2007 /* enable receive only */ 2008 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY); 2009 } 2010 2011 return 0; 2012 } 2013 2014 /* 2015 * Flush any TX data submitted for DMA. Called when the TX circular 2016 * buffer is reset. 2017 */ 2018 static void atmel_flush_buffer(struct uart_port *port) 2019 { 2020 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2021 2022 if (atmel_use_pdc_tx(port)) { 2023 atmel_uart_writel(port, ATMEL_PDC_TCR, 0); 2024 atmel_port->pdc_tx.ofs = 0; 2025 } 2026 /* 2027 * in uart_flush_buffer(), the xmit circular buffer has just 2028 * been cleared, so we have to reset tx_len accordingly. 2029 */ 2030 atmel_port->tx_len = 0; 2031 } 2032 2033 /* 2034 * Disable the port 2035 */ 2036 static void atmel_shutdown(struct uart_port *port) 2037 { 2038 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2039 2040 /* Disable modem control lines interrupts */ 2041 atmel_disable_ms(port); 2042 2043 /* Disable interrupts at device level */ 2044 atmel_uart_writel(port, ATMEL_US_IDR, -1); 2045 2046 /* Prevent spurious interrupts from scheduling the tasklet */ 2047 atomic_inc(&atmel_port->tasklet_shutdown); 2048 2049 /* 2050 * Prevent any tasklets being scheduled during 2051 * cleanup 2052 */ 2053 del_timer_sync(&atmel_port->uart_timer); 2054 2055 /* Make sure that no interrupt is on the fly */ 2056 synchronize_irq(port->irq); 2057 2058 /* 2059 * Clear out any scheduled tasklets before 2060 * we destroy the buffers 2061 */ 2062 tasklet_kill(&atmel_port->tasklet_rx); 2063 tasklet_kill(&atmel_port->tasklet_tx); 2064 2065 /* 2066 * Ensure everything is stopped and 2067 * disable port and break condition. 2068 */ 2069 atmel_stop_rx(port); 2070 atmel_stop_tx(port); 2071 2072 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA); 2073 2074 /* 2075 * Shut-down the DMA. 2076 */ 2077 if (atmel_port->release_rx) 2078 atmel_port->release_rx(port); 2079 if (atmel_port->release_tx) 2080 atmel_port->release_tx(port); 2081 2082 /* 2083 * Reset ring buffer pointers 2084 */ 2085 atmel_port->rx_ring.head = 0; 2086 atmel_port->rx_ring.tail = 0; 2087 2088 /* 2089 * Free the interrupts 2090 */ 2091 free_irq(port->irq, port); 2092 2093 atmel_flush_buffer(port); 2094 } 2095 2096 /* 2097 * Power / Clock management. 2098 */ 2099 static void atmel_serial_pm(struct uart_port *port, unsigned int state, 2100 unsigned int oldstate) 2101 { 2102 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2103 2104 switch (state) { 2105 case 0: 2106 /* 2107 * Enable the peripheral clock for this serial port. 2108 * This is called on uart_open() or a resume event. 2109 */ 2110 clk_prepare_enable(atmel_port->clk); 2111 2112 /* re-enable interrupts if we disabled some on suspend */ 2113 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->backup_imr); 2114 break; 2115 case 3: 2116 /* Back up the interrupt mask and disable all interrupts */ 2117 atmel_port->backup_imr = atmel_uart_readl(port, ATMEL_US_IMR); 2118 atmel_uart_writel(port, ATMEL_US_IDR, -1); 2119 2120 /* 2121 * Disable the peripheral clock for this serial port. 2122 * This is called on uart_close() or a suspend event. 2123 */ 2124 clk_disable_unprepare(atmel_port->clk); 2125 break; 2126 default: 2127 dev_err(port->dev, "atmel_serial: unknown pm %d\n", state); 2128 } 2129 } 2130 2131 /* 2132 * Change the port parameters 2133 */ 2134 static void atmel_set_termios(struct uart_port *port, struct ktermios *termios, 2135 struct ktermios *old) 2136 { 2137 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2138 unsigned long flags; 2139 unsigned int old_mode, mode, imr, quot, baud, div, cd, fp = 0; 2140 2141 /* save the current mode register */ 2142 mode = old_mode = atmel_uart_readl(port, ATMEL_US_MR); 2143 2144 /* reset the mode, clock divisor, parity, stop bits and data size */ 2145 mode &= ~(ATMEL_US_USCLKS | ATMEL_US_CHRL | ATMEL_US_NBSTOP | 2146 ATMEL_US_PAR | ATMEL_US_USMODE); 2147 2148 baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16); 2149 2150 /* byte size */ 2151 switch (termios->c_cflag & CSIZE) { 2152 case CS5: 2153 mode |= ATMEL_US_CHRL_5; 2154 break; 2155 case CS6: 2156 mode |= ATMEL_US_CHRL_6; 2157 break; 2158 case CS7: 2159 mode |= ATMEL_US_CHRL_7; 2160 break; 2161 default: 2162 mode |= ATMEL_US_CHRL_8; 2163 break; 2164 } 2165 2166 /* stop bits */ 2167 if (termios->c_cflag & CSTOPB) 2168 mode |= ATMEL_US_NBSTOP_2; 2169 2170 /* parity */ 2171 if (termios->c_cflag & PARENB) { 2172 /* Mark or Space parity */ 2173 if (termios->c_cflag & CMSPAR) { 2174 if (termios->c_cflag & PARODD) 2175 mode |= ATMEL_US_PAR_MARK; 2176 else 2177 mode |= ATMEL_US_PAR_SPACE; 2178 } else if (termios->c_cflag & PARODD) 2179 mode |= ATMEL_US_PAR_ODD; 2180 else 2181 mode |= ATMEL_US_PAR_EVEN; 2182 } else 2183 mode |= ATMEL_US_PAR_NONE; 2184 2185 spin_lock_irqsave(&port->lock, flags); 2186 2187 port->read_status_mask = ATMEL_US_OVRE; 2188 if (termios->c_iflag & INPCK) 2189 port->read_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE); 2190 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) 2191 port->read_status_mask |= ATMEL_US_RXBRK; 2192 2193 if (atmel_use_pdc_rx(port)) 2194 /* need to enable error interrupts */ 2195 atmel_uart_writel(port, ATMEL_US_IER, port->read_status_mask); 2196 2197 /* 2198 * Characters to ignore 2199 */ 2200 port->ignore_status_mask = 0; 2201 if (termios->c_iflag & IGNPAR) 2202 port->ignore_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE); 2203 if (termios->c_iflag & IGNBRK) { 2204 port->ignore_status_mask |= ATMEL_US_RXBRK; 2205 /* 2206 * If we're ignoring parity and break indicators, 2207 * ignore overruns too (for real raw support). 2208 */ 2209 if (termios->c_iflag & IGNPAR) 2210 port->ignore_status_mask |= ATMEL_US_OVRE; 2211 } 2212 /* TODO: Ignore all characters if CREAD is set.*/ 2213 2214 /* update the per-port timeout */ 2215 uart_update_timeout(port, termios->c_cflag, baud); 2216 2217 /* 2218 * save/disable interrupts. The tty layer will ensure that the 2219 * transmitter is empty if requested by the caller, so there's 2220 * no need to wait for it here. 2221 */ 2222 imr = atmel_uart_readl(port, ATMEL_US_IMR); 2223 atmel_uart_writel(port, ATMEL_US_IDR, -1); 2224 2225 /* disable receiver and transmitter */ 2226 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS | ATMEL_US_RXDIS); 2227 atmel_port->tx_stopped = true; 2228 2229 /* mode */ 2230 if (port->rs485.flags & SER_RS485_ENABLED) { 2231 atmel_uart_writel(port, ATMEL_US_TTGR, 2232 port->rs485.delay_rts_after_send); 2233 mode |= ATMEL_US_USMODE_RS485; 2234 } else if (port->iso7816.flags & SER_ISO7816_ENABLED) { 2235 atmel_uart_writel(port, ATMEL_US_TTGR, port->iso7816.tg); 2236 /* select mck clock, and output */ 2237 mode |= ATMEL_US_USCLKS_MCK | ATMEL_US_CLKO; 2238 /* set max iterations */ 2239 mode |= ATMEL_US_MAX_ITER(3); 2240 if ((port->iso7816.flags & SER_ISO7816_T_PARAM) 2241 == SER_ISO7816_T(0)) 2242 mode |= ATMEL_US_USMODE_ISO7816_T0; 2243 else 2244 mode |= ATMEL_US_USMODE_ISO7816_T1; 2245 } else if (termios->c_cflag & CRTSCTS) { 2246 /* RS232 with hardware handshake (RTS/CTS) */ 2247 if (atmel_use_fifo(port) && 2248 !mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) { 2249 /* 2250 * with ATMEL_US_USMODE_HWHS set, the controller will 2251 * be able to drive the RTS pin high/low when the RX 2252 * FIFO is above RXFTHRES/below RXFTHRES2. 2253 * It will also disable the transmitter when the CTS 2254 * pin is high. 2255 * This mode is not activated if CTS pin is a GPIO 2256 * because in this case, the transmitter is always 2257 * disabled (there must be an internal pull-up 2258 * responsible for this behaviour). 2259 * If the RTS pin is a GPIO, the controller won't be 2260 * able to drive it according to the FIFO thresholds, 2261 * but it will be handled by the driver. 2262 */ 2263 mode |= ATMEL_US_USMODE_HWHS; 2264 } else { 2265 /* 2266 * For platforms without FIFO, the flow control is 2267 * handled by the driver. 2268 */ 2269 mode |= ATMEL_US_USMODE_NORMAL; 2270 } 2271 } else { 2272 /* RS232 without hadware handshake */ 2273 mode |= ATMEL_US_USMODE_NORMAL; 2274 } 2275 2276 /* 2277 * Set the baud rate: 2278 * Fractional baudrate allows to setup output frequency more 2279 * accurately. This feature is enabled only when using normal mode. 2280 * baudrate = selected clock / (8 * (2 - OVER) * (CD + FP / 8)) 2281 * Currently, OVER is always set to 0 so we get 2282 * baudrate = selected clock / (16 * (CD + FP / 8)) 2283 * then 2284 * 8 CD + FP = selected clock / (2 * baudrate) 2285 */ 2286 if (atmel_port->has_frac_baudrate) { 2287 div = DIV_ROUND_CLOSEST(port->uartclk, baud * 2); 2288 cd = div >> 3; 2289 fp = div & ATMEL_US_FP_MASK; 2290 } else { 2291 cd = uart_get_divisor(port, baud); 2292 } 2293 2294 if (cd > 65535) { /* BRGR is 16-bit, so switch to slower clock */ 2295 cd /= 8; 2296 mode |= ATMEL_US_USCLKS_MCK_DIV8; 2297 } 2298 quot = cd | fp << ATMEL_US_FP_OFFSET; 2299 2300 if (!(port->iso7816.flags & SER_ISO7816_ENABLED)) 2301 atmel_uart_writel(port, ATMEL_US_BRGR, quot); 2302 2303 /* set the mode, clock divisor, parity, stop bits and data size */ 2304 atmel_uart_writel(port, ATMEL_US_MR, mode); 2305 2306 /* 2307 * when switching the mode, set the RTS line state according to the 2308 * new mode, otherwise keep the former state 2309 */ 2310 if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) { 2311 unsigned int rts_state; 2312 2313 if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) { 2314 /* let the hardware control the RTS line */ 2315 rts_state = ATMEL_US_RTSDIS; 2316 } else { 2317 /* force RTS line to low level */ 2318 rts_state = ATMEL_US_RTSEN; 2319 } 2320 2321 atmel_uart_writel(port, ATMEL_US_CR, rts_state); 2322 } 2323 2324 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX); 2325 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN); 2326 atmel_port->tx_stopped = false; 2327 2328 /* restore interrupts */ 2329 atmel_uart_writel(port, ATMEL_US_IER, imr); 2330 2331 /* CTS flow-control and modem-status interrupts */ 2332 if (UART_ENABLE_MS(port, termios->c_cflag)) 2333 atmel_enable_ms(port); 2334 else 2335 atmel_disable_ms(port); 2336 2337 spin_unlock_irqrestore(&port->lock, flags); 2338 } 2339 2340 static void atmel_set_ldisc(struct uart_port *port, struct ktermios *termios) 2341 { 2342 if (termios->c_line == N_PPS) { 2343 port->flags |= UPF_HARDPPS_CD; 2344 spin_lock_irq(&port->lock); 2345 atmel_enable_ms(port); 2346 spin_unlock_irq(&port->lock); 2347 } else { 2348 port->flags &= ~UPF_HARDPPS_CD; 2349 if (!UART_ENABLE_MS(port, termios->c_cflag)) { 2350 spin_lock_irq(&port->lock); 2351 atmel_disable_ms(port); 2352 spin_unlock_irq(&port->lock); 2353 } 2354 } 2355 } 2356 2357 /* 2358 * Return string describing the specified port 2359 */ 2360 static const char *atmel_type(struct uart_port *port) 2361 { 2362 return (port->type == PORT_ATMEL) ? "ATMEL_SERIAL" : NULL; 2363 } 2364 2365 /* 2366 * Release the memory region(s) being used by 'port'. 2367 */ 2368 static void atmel_release_port(struct uart_port *port) 2369 { 2370 struct platform_device *mpdev = to_platform_device(port->dev->parent); 2371 int size = resource_size(mpdev->resource); 2372 2373 release_mem_region(port->mapbase, size); 2374 2375 if (port->flags & UPF_IOREMAP) { 2376 iounmap(port->membase); 2377 port->membase = NULL; 2378 } 2379 } 2380 2381 /* 2382 * Request the memory region(s) being used by 'port'. 2383 */ 2384 static int atmel_request_port(struct uart_port *port) 2385 { 2386 struct platform_device *mpdev = to_platform_device(port->dev->parent); 2387 int size = resource_size(mpdev->resource); 2388 2389 if (!request_mem_region(port->mapbase, size, "atmel_serial")) 2390 return -EBUSY; 2391 2392 if (port->flags & UPF_IOREMAP) { 2393 port->membase = ioremap(port->mapbase, size); 2394 if (port->membase == NULL) { 2395 release_mem_region(port->mapbase, size); 2396 return -ENOMEM; 2397 } 2398 } 2399 2400 return 0; 2401 } 2402 2403 /* 2404 * Configure/autoconfigure the port. 2405 */ 2406 static void atmel_config_port(struct uart_port *port, int flags) 2407 { 2408 if (flags & UART_CONFIG_TYPE) { 2409 port->type = PORT_ATMEL; 2410 atmel_request_port(port); 2411 } 2412 } 2413 2414 /* 2415 * Verify the new serial_struct (for TIOCSSERIAL). 2416 */ 2417 static int atmel_verify_port(struct uart_port *port, struct serial_struct *ser) 2418 { 2419 int ret = 0; 2420 if (ser->type != PORT_UNKNOWN && ser->type != PORT_ATMEL) 2421 ret = -EINVAL; 2422 if (port->irq != ser->irq) 2423 ret = -EINVAL; 2424 if (ser->io_type != SERIAL_IO_MEM) 2425 ret = -EINVAL; 2426 if (port->uartclk / 16 != ser->baud_base) 2427 ret = -EINVAL; 2428 if (port->mapbase != (unsigned long)ser->iomem_base) 2429 ret = -EINVAL; 2430 if (port->iobase != ser->port) 2431 ret = -EINVAL; 2432 if (ser->hub6 != 0) 2433 ret = -EINVAL; 2434 return ret; 2435 } 2436 2437 #ifdef CONFIG_CONSOLE_POLL 2438 static int atmel_poll_get_char(struct uart_port *port) 2439 { 2440 while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_RXRDY)) 2441 cpu_relax(); 2442 2443 return atmel_uart_read_char(port); 2444 } 2445 2446 static void atmel_poll_put_char(struct uart_port *port, unsigned char ch) 2447 { 2448 while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY)) 2449 cpu_relax(); 2450 2451 atmel_uart_write_char(port, ch); 2452 } 2453 #endif 2454 2455 static const struct uart_ops atmel_pops = { 2456 .tx_empty = atmel_tx_empty, 2457 .set_mctrl = atmel_set_mctrl, 2458 .get_mctrl = atmel_get_mctrl, 2459 .stop_tx = atmel_stop_tx, 2460 .start_tx = atmel_start_tx, 2461 .stop_rx = atmel_stop_rx, 2462 .enable_ms = atmel_enable_ms, 2463 .break_ctl = atmel_break_ctl, 2464 .startup = atmel_startup, 2465 .shutdown = atmel_shutdown, 2466 .flush_buffer = atmel_flush_buffer, 2467 .set_termios = atmel_set_termios, 2468 .set_ldisc = atmel_set_ldisc, 2469 .type = atmel_type, 2470 .release_port = atmel_release_port, 2471 .request_port = atmel_request_port, 2472 .config_port = atmel_config_port, 2473 .verify_port = atmel_verify_port, 2474 .pm = atmel_serial_pm, 2475 #ifdef CONFIG_CONSOLE_POLL 2476 .poll_get_char = atmel_poll_get_char, 2477 .poll_put_char = atmel_poll_put_char, 2478 #endif 2479 }; 2480 2481 /* 2482 * Configure the port from the platform device resource info. 2483 */ 2484 static int atmel_init_port(struct atmel_uart_port *atmel_port, 2485 struct platform_device *pdev) 2486 { 2487 int ret; 2488 struct uart_port *port = &atmel_port->uart; 2489 struct platform_device *mpdev = to_platform_device(pdev->dev.parent); 2490 2491 atmel_init_property(atmel_port, pdev); 2492 atmel_set_ops(port); 2493 2494 port->iotype = UPIO_MEM; 2495 port->flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP; 2496 port->ops = &atmel_pops; 2497 port->fifosize = 1; 2498 port->dev = &pdev->dev; 2499 port->mapbase = mpdev->resource[0].start; 2500 port->irq = mpdev->resource[1].start; 2501 port->rs485_config = atmel_config_rs485; 2502 port->iso7816_config = atmel_config_iso7816; 2503 port->membase = NULL; 2504 2505 memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring)); 2506 2507 ret = uart_get_rs485_mode(port); 2508 if (ret) 2509 return ret; 2510 2511 /* for console, the clock could already be configured */ 2512 if (!atmel_port->clk) { 2513 atmel_port->clk = clk_get(&mpdev->dev, "usart"); 2514 if (IS_ERR(atmel_port->clk)) { 2515 ret = PTR_ERR(atmel_port->clk); 2516 atmel_port->clk = NULL; 2517 return ret; 2518 } 2519 ret = clk_prepare_enable(atmel_port->clk); 2520 if (ret) { 2521 clk_put(atmel_port->clk); 2522 atmel_port->clk = NULL; 2523 return ret; 2524 } 2525 port->uartclk = clk_get_rate(atmel_port->clk); 2526 clk_disable_unprepare(atmel_port->clk); 2527 /* only enable clock when USART is in use */ 2528 } 2529 2530 /* 2531 * Use TXEMPTY for interrupt when rs485 or ISO7816 else TXRDY or 2532 * ENDTX|TXBUFE 2533 */ 2534 if (atmel_uart_is_half_duplex(port)) 2535 atmel_port->tx_done_mask = ATMEL_US_TXEMPTY; 2536 else if (atmel_use_pdc_tx(port)) { 2537 port->fifosize = PDC_BUFFER_SIZE; 2538 atmel_port->tx_done_mask = ATMEL_US_ENDTX | ATMEL_US_TXBUFE; 2539 } else { 2540 atmel_port->tx_done_mask = ATMEL_US_TXRDY; 2541 } 2542 2543 return 0; 2544 } 2545 2546 #ifdef CONFIG_SERIAL_ATMEL_CONSOLE 2547 static void atmel_console_putchar(struct uart_port *port, int ch) 2548 { 2549 while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY)) 2550 cpu_relax(); 2551 atmel_uart_write_char(port, ch); 2552 } 2553 2554 /* 2555 * Interrupts are disabled on entering 2556 */ 2557 static void atmel_console_write(struct console *co, const char *s, u_int count) 2558 { 2559 struct uart_port *port = &atmel_ports[co->index].uart; 2560 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2561 unsigned int status, imr; 2562 unsigned int pdc_tx; 2563 2564 /* 2565 * First, save IMR and then disable interrupts 2566 */ 2567 imr = atmel_uart_readl(port, ATMEL_US_IMR); 2568 atmel_uart_writel(port, ATMEL_US_IDR, 2569 ATMEL_US_RXRDY | atmel_port->tx_done_mask); 2570 2571 /* Store PDC transmit status and disable it */ 2572 pdc_tx = atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN; 2573 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS); 2574 2575 /* Make sure that tx path is actually able to send characters */ 2576 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN); 2577 atmel_port->tx_stopped = false; 2578 2579 uart_console_write(port, s, count, atmel_console_putchar); 2580 2581 /* 2582 * Finally, wait for transmitter to become empty 2583 * and restore IMR 2584 */ 2585 do { 2586 status = atmel_uart_readl(port, ATMEL_US_CSR); 2587 } while (!(status & ATMEL_US_TXRDY)); 2588 2589 /* Restore PDC transmit status */ 2590 if (pdc_tx) 2591 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); 2592 2593 /* set interrupts back the way they were */ 2594 atmel_uart_writel(port, ATMEL_US_IER, imr); 2595 } 2596 2597 /* 2598 * If the port was already initialised (eg, by a boot loader), 2599 * try to determine the current setup. 2600 */ 2601 static void __init atmel_console_get_options(struct uart_port *port, int *baud, 2602 int *parity, int *bits) 2603 { 2604 unsigned int mr, quot; 2605 2606 /* 2607 * If the baud rate generator isn't running, the port wasn't 2608 * initialized by the boot loader. 2609 */ 2610 quot = atmel_uart_readl(port, ATMEL_US_BRGR) & ATMEL_US_CD; 2611 if (!quot) 2612 return; 2613 2614 mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_CHRL; 2615 if (mr == ATMEL_US_CHRL_8) 2616 *bits = 8; 2617 else 2618 *bits = 7; 2619 2620 mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_PAR; 2621 if (mr == ATMEL_US_PAR_EVEN) 2622 *parity = 'e'; 2623 else if (mr == ATMEL_US_PAR_ODD) 2624 *parity = 'o'; 2625 2626 /* 2627 * The serial core only rounds down when matching this to a 2628 * supported baud rate. Make sure we don't end up slightly 2629 * lower than one of those, as it would make us fall through 2630 * to a much lower baud rate than we really want. 2631 */ 2632 *baud = port->uartclk / (16 * (quot - 1)); 2633 } 2634 2635 static int __init atmel_console_setup(struct console *co, char *options) 2636 { 2637 int ret; 2638 struct uart_port *port = &atmel_ports[co->index].uart; 2639 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2640 int baud = 115200; 2641 int bits = 8; 2642 int parity = 'n'; 2643 int flow = 'n'; 2644 2645 if (port->membase == NULL) { 2646 /* Port not initialized yet - delay setup */ 2647 return -ENODEV; 2648 } 2649 2650 ret = clk_prepare_enable(atmel_ports[co->index].clk); 2651 if (ret) 2652 return ret; 2653 2654 atmel_uart_writel(port, ATMEL_US_IDR, -1); 2655 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX); 2656 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN); 2657 atmel_port->tx_stopped = false; 2658 2659 if (options) 2660 uart_parse_options(options, &baud, &parity, &bits, &flow); 2661 else 2662 atmel_console_get_options(port, &baud, &parity, &bits); 2663 2664 return uart_set_options(port, co, baud, parity, bits, flow); 2665 } 2666 2667 static struct uart_driver atmel_uart; 2668 2669 static struct console atmel_console = { 2670 .name = ATMEL_DEVICENAME, 2671 .write = atmel_console_write, 2672 .device = uart_console_device, 2673 .setup = atmel_console_setup, 2674 .flags = CON_PRINTBUFFER, 2675 .index = -1, 2676 .data = &atmel_uart, 2677 }; 2678 2679 #define ATMEL_CONSOLE_DEVICE (&atmel_console) 2680 2681 #else 2682 #define ATMEL_CONSOLE_DEVICE NULL 2683 #endif 2684 2685 static struct uart_driver atmel_uart = { 2686 .owner = THIS_MODULE, 2687 .driver_name = "atmel_serial", 2688 .dev_name = ATMEL_DEVICENAME, 2689 .major = SERIAL_ATMEL_MAJOR, 2690 .minor = MINOR_START, 2691 .nr = ATMEL_MAX_UART, 2692 .cons = ATMEL_CONSOLE_DEVICE, 2693 }; 2694 2695 #ifdef CONFIG_PM 2696 static bool atmel_serial_clk_will_stop(void) 2697 { 2698 #ifdef CONFIG_ARCH_AT91 2699 return at91_suspend_entering_slow_clock(); 2700 #else 2701 return false; 2702 #endif 2703 } 2704 2705 static int atmel_serial_suspend(struct platform_device *pdev, 2706 pm_message_t state) 2707 { 2708 struct uart_port *port = platform_get_drvdata(pdev); 2709 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2710 2711 if (uart_console(port) && console_suspend_enabled) { 2712 /* Drain the TX shifter */ 2713 while (!(atmel_uart_readl(port, ATMEL_US_CSR) & 2714 ATMEL_US_TXEMPTY)) 2715 cpu_relax(); 2716 } 2717 2718 if (uart_console(port) && !console_suspend_enabled) { 2719 /* Cache register values as we won't get a full shutdown/startup 2720 * cycle 2721 */ 2722 atmel_port->cache.mr = atmel_uart_readl(port, ATMEL_US_MR); 2723 atmel_port->cache.imr = atmel_uart_readl(port, ATMEL_US_IMR); 2724 atmel_port->cache.brgr = atmel_uart_readl(port, ATMEL_US_BRGR); 2725 atmel_port->cache.rtor = atmel_uart_readl(port, 2726 atmel_port->rtor); 2727 atmel_port->cache.ttgr = atmel_uart_readl(port, ATMEL_US_TTGR); 2728 atmel_port->cache.fmr = atmel_uart_readl(port, ATMEL_US_FMR); 2729 atmel_port->cache.fimr = atmel_uart_readl(port, ATMEL_US_FIMR); 2730 } 2731 2732 /* we can not wake up if we're running on slow clock */ 2733 atmel_port->may_wakeup = device_may_wakeup(&pdev->dev); 2734 if (atmel_serial_clk_will_stop()) { 2735 unsigned long flags; 2736 2737 spin_lock_irqsave(&atmel_port->lock_suspended, flags); 2738 atmel_port->suspended = true; 2739 spin_unlock_irqrestore(&atmel_port->lock_suspended, flags); 2740 device_set_wakeup_enable(&pdev->dev, 0); 2741 } 2742 2743 uart_suspend_port(&atmel_uart, port); 2744 2745 return 0; 2746 } 2747 2748 static int atmel_serial_resume(struct platform_device *pdev) 2749 { 2750 struct uart_port *port = platform_get_drvdata(pdev); 2751 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2752 unsigned long flags; 2753 2754 if (uart_console(port) && !console_suspend_enabled) { 2755 atmel_uart_writel(port, ATMEL_US_MR, atmel_port->cache.mr); 2756 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->cache.imr); 2757 atmel_uart_writel(port, ATMEL_US_BRGR, atmel_port->cache.brgr); 2758 atmel_uart_writel(port, atmel_port->rtor, 2759 atmel_port->cache.rtor); 2760 atmel_uart_writel(port, ATMEL_US_TTGR, atmel_port->cache.ttgr); 2761 2762 if (atmel_port->fifo_size) { 2763 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_FIFOEN | 2764 ATMEL_US_RXFCLR | ATMEL_US_TXFLCLR); 2765 atmel_uart_writel(port, ATMEL_US_FMR, 2766 atmel_port->cache.fmr); 2767 atmel_uart_writel(port, ATMEL_US_FIER, 2768 atmel_port->cache.fimr); 2769 } 2770 atmel_start_rx(port); 2771 } 2772 2773 spin_lock_irqsave(&atmel_port->lock_suspended, flags); 2774 if (atmel_port->pending) { 2775 atmel_handle_receive(port, atmel_port->pending); 2776 atmel_handle_status(port, atmel_port->pending, 2777 atmel_port->pending_status); 2778 atmel_handle_transmit(port, atmel_port->pending); 2779 atmel_port->pending = 0; 2780 } 2781 atmel_port->suspended = false; 2782 spin_unlock_irqrestore(&atmel_port->lock_suspended, flags); 2783 2784 uart_resume_port(&atmel_uart, port); 2785 device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup); 2786 2787 return 0; 2788 } 2789 #else 2790 #define atmel_serial_suspend NULL 2791 #define atmel_serial_resume NULL 2792 #endif 2793 2794 static void atmel_serial_probe_fifos(struct atmel_uart_port *atmel_port, 2795 struct platform_device *pdev) 2796 { 2797 atmel_port->fifo_size = 0; 2798 atmel_port->rts_low = 0; 2799 atmel_port->rts_high = 0; 2800 2801 if (of_property_read_u32(pdev->dev.of_node, 2802 "atmel,fifo-size", 2803 &atmel_port->fifo_size)) 2804 return; 2805 2806 if (!atmel_port->fifo_size) 2807 return; 2808 2809 if (atmel_port->fifo_size < ATMEL_MIN_FIFO_SIZE) { 2810 atmel_port->fifo_size = 0; 2811 dev_err(&pdev->dev, "Invalid FIFO size\n"); 2812 return; 2813 } 2814 2815 /* 2816 * 0 <= rts_low <= rts_high <= fifo_size 2817 * Once their CTS line asserted by the remote peer, some x86 UARTs tend 2818 * to flush their internal TX FIFO, commonly up to 16 data, before 2819 * actually stopping to send new data. So we try to set the RTS High 2820 * Threshold to a reasonably high value respecting this 16 data 2821 * empirical rule when possible. 2822 */ 2823 atmel_port->rts_high = max_t(int, atmel_port->fifo_size >> 1, 2824 atmel_port->fifo_size - ATMEL_RTS_HIGH_OFFSET); 2825 atmel_port->rts_low = max_t(int, atmel_port->fifo_size >> 2, 2826 atmel_port->fifo_size - ATMEL_RTS_LOW_OFFSET); 2827 2828 dev_info(&pdev->dev, "Using FIFO (%u data)\n", 2829 atmel_port->fifo_size); 2830 dev_dbg(&pdev->dev, "RTS High Threshold : %2u data\n", 2831 atmel_port->rts_high); 2832 dev_dbg(&pdev->dev, "RTS Low Threshold : %2u data\n", 2833 atmel_port->rts_low); 2834 } 2835 2836 static int atmel_serial_probe(struct platform_device *pdev) 2837 { 2838 struct atmel_uart_port *atmel_port; 2839 struct device_node *np = pdev->dev.parent->of_node; 2840 void *data; 2841 int ret; 2842 bool rs485_enabled; 2843 2844 BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1)); 2845 2846 /* 2847 * In device tree there is no node with "atmel,at91rm9200-usart-serial" 2848 * as compatible string. This driver is probed by at91-usart mfd driver 2849 * which is just a wrapper over the atmel_serial driver and 2850 * spi-at91-usart driver. All attributes needed by this driver are 2851 * found in of_node of parent. 2852 */ 2853 pdev->dev.of_node = np; 2854 2855 ret = of_alias_get_id(np, "serial"); 2856 if (ret < 0) 2857 /* port id not found in platform data nor device-tree aliases: 2858 * auto-enumerate it */ 2859 ret = find_first_zero_bit(atmel_ports_in_use, ATMEL_MAX_UART); 2860 2861 if (ret >= ATMEL_MAX_UART) { 2862 ret = -ENODEV; 2863 goto err; 2864 } 2865 2866 if (test_and_set_bit(ret, atmel_ports_in_use)) { 2867 /* port already in use */ 2868 ret = -EBUSY; 2869 goto err; 2870 } 2871 2872 atmel_port = &atmel_ports[ret]; 2873 atmel_port->backup_imr = 0; 2874 atmel_port->uart.line = ret; 2875 atmel_port->uart.has_sysrq = IS_ENABLED(CONFIG_SERIAL_ATMEL_CONSOLE); 2876 atmel_serial_probe_fifos(atmel_port, pdev); 2877 2878 atomic_set(&atmel_port->tasklet_shutdown, 0); 2879 spin_lock_init(&atmel_port->lock_suspended); 2880 2881 ret = atmel_init_port(atmel_port, pdev); 2882 if (ret) 2883 goto err_clear_bit; 2884 2885 atmel_port->gpios = mctrl_gpio_init(&atmel_port->uart, 0); 2886 if (IS_ERR(atmel_port->gpios)) { 2887 ret = PTR_ERR(atmel_port->gpios); 2888 goto err_clear_bit; 2889 } 2890 2891 if (!atmel_use_pdc_rx(&atmel_port->uart)) { 2892 ret = -ENOMEM; 2893 data = kmalloc_array(ATMEL_SERIAL_RINGSIZE, 2894 sizeof(struct atmel_uart_char), 2895 GFP_KERNEL); 2896 if (!data) 2897 goto err_alloc_ring; 2898 atmel_port->rx_ring.buf = data; 2899 } 2900 2901 rs485_enabled = atmel_port->uart.rs485.flags & SER_RS485_ENABLED; 2902 2903 ret = uart_add_one_port(&atmel_uart, &atmel_port->uart); 2904 if (ret) 2905 goto err_add_port; 2906 2907 #ifdef CONFIG_SERIAL_ATMEL_CONSOLE 2908 if (uart_console(&atmel_port->uart) 2909 && ATMEL_CONSOLE_DEVICE->flags & CON_ENABLED) { 2910 /* 2911 * The serial core enabled the clock for us, so undo 2912 * the clk_prepare_enable() in atmel_console_setup() 2913 */ 2914 clk_disable_unprepare(atmel_port->clk); 2915 } 2916 #endif 2917 2918 device_init_wakeup(&pdev->dev, 1); 2919 platform_set_drvdata(pdev, atmel_port); 2920 2921 /* 2922 * The peripheral clock has been disabled by atmel_init_port(): 2923 * enable it before accessing I/O registers 2924 */ 2925 clk_prepare_enable(atmel_port->clk); 2926 2927 if (rs485_enabled) { 2928 atmel_uart_writel(&atmel_port->uart, ATMEL_US_MR, 2929 ATMEL_US_USMODE_NORMAL); 2930 atmel_uart_writel(&atmel_port->uart, ATMEL_US_CR, 2931 ATMEL_US_RTSEN); 2932 } 2933 2934 /* 2935 * Get port name of usart or uart 2936 */ 2937 atmel_get_ip_name(&atmel_port->uart); 2938 2939 /* 2940 * The peripheral clock can now safely be disabled till the port 2941 * is used 2942 */ 2943 clk_disable_unprepare(atmel_port->clk); 2944 2945 return 0; 2946 2947 err_add_port: 2948 kfree(atmel_port->rx_ring.buf); 2949 atmel_port->rx_ring.buf = NULL; 2950 err_alloc_ring: 2951 if (!uart_console(&atmel_port->uart)) { 2952 clk_put(atmel_port->clk); 2953 atmel_port->clk = NULL; 2954 } 2955 err_clear_bit: 2956 clear_bit(atmel_port->uart.line, atmel_ports_in_use); 2957 err: 2958 return ret; 2959 } 2960 2961 /* 2962 * Even if the driver is not modular, it makes sense to be able to 2963 * unbind a device: there can be many bound devices, and there are 2964 * situations where dynamic binding and unbinding can be useful. 2965 * 2966 * For example, a connected device can require a specific firmware update 2967 * protocol that needs bitbanging on IO lines, but use the regular serial 2968 * port in the normal case. 2969 */ 2970 static int atmel_serial_remove(struct platform_device *pdev) 2971 { 2972 struct uart_port *port = platform_get_drvdata(pdev); 2973 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2974 int ret = 0; 2975 2976 tasklet_kill(&atmel_port->tasklet_rx); 2977 tasklet_kill(&atmel_port->tasklet_tx); 2978 2979 device_init_wakeup(&pdev->dev, 0); 2980 2981 ret = uart_remove_one_port(&atmel_uart, port); 2982 2983 kfree(atmel_port->rx_ring.buf); 2984 2985 /* "port" is allocated statically, so we shouldn't free it */ 2986 2987 clear_bit(port->line, atmel_ports_in_use); 2988 2989 clk_put(atmel_port->clk); 2990 atmel_port->clk = NULL; 2991 pdev->dev.of_node = NULL; 2992 2993 return ret; 2994 } 2995 2996 static struct platform_driver atmel_serial_driver = { 2997 .probe = atmel_serial_probe, 2998 .remove = atmel_serial_remove, 2999 .suspend = atmel_serial_suspend, 3000 .resume = atmel_serial_resume, 3001 .driver = { 3002 .name = "atmel_usart_serial", 3003 .of_match_table = of_match_ptr(atmel_serial_dt_ids), 3004 }, 3005 }; 3006 3007 static int __init atmel_serial_init(void) 3008 { 3009 int ret; 3010 3011 ret = uart_register_driver(&atmel_uart); 3012 if (ret) 3013 return ret; 3014 3015 ret = platform_driver_register(&atmel_serial_driver); 3016 if (ret) 3017 uart_unregister_driver(&atmel_uart); 3018 3019 return ret; 3020 } 3021 device_initcall(atmel_serial_init); 3022