1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Driver for Atmel AT91 Serial ports 4 * Copyright (C) 2003 Rick Bronson 5 * 6 * Based on drivers/char/serial_sa1100.c, by Deep Blue Solutions Ltd. 7 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. 8 * 9 * DMA support added by Chip Coldwell. 10 */ 11 #include <linux/tty.h> 12 #include <linux/ioport.h> 13 #include <linux/slab.h> 14 #include <linux/init.h> 15 #include <linux/serial.h> 16 #include <linux/clk.h> 17 #include <linux/console.h> 18 #include <linux/sysrq.h> 19 #include <linux/tty_flip.h> 20 #include <linux/platform_device.h> 21 #include <linux/of.h> 22 #include <linux/of_device.h> 23 #include <linux/of_gpio.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/dmaengine.h> 26 #include <linux/atmel_pdc.h> 27 #include <linux/uaccess.h> 28 #include <linux/platform_data/atmel.h> 29 #include <linux/timer.h> 30 #include <linux/gpio.h> 31 #include <linux/gpio/consumer.h> 32 #include <linux/err.h> 33 #include <linux/irq.h> 34 #include <linux/suspend.h> 35 #include <linux/mm.h> 36 37 #include <asm/div64.h> 38 #include <asm/io.h> 39 #include <asm/ioctls.h> 40 41 #define PDC_BUFFER_SIZE 512 42 /* Revisit: We should calculate this based on the actual port settings */ 43 #define PDC_RX_TIMEOUT (3 * 10) /* 3 bytes */ 44 45 /* The minium number of data FIFOs should be able to contain */ 46 #define ATMEL_MIN_FIFO_SIZE 8 47 /* 48 * These two offsets are substracted from the RX FIFO size to define the RTS 49 * high and low thresholds 50 */ 51 #define ATMEL_RTS_HIGH_OFFSET 16 52 #define ATMEL_RTS_LOW_OFFSET 20 53 54 #if defined(CONFIG_SERIAL_ATMEL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) 55 #define SUPPORT_SYSRQ 56 #endif 57 58 #include <linux/serial_core.h> 59 60 #include "serial_mctrl_gpio.h" 61 #include "atmel_serial.h" 62 63 static void atmel_start_rx(struct uart_port *port); 64 static void atmel_stop_rx(struct uart_port *port); 65 66 #ifdef CONFIG_SERIAL_ATMEL_TTYAT 67 68 /* Use device name ttyAT, major 204 and minor 154-169. This is necessary if we 69 * should coexist with the 8250 driver, such as if we have an external 16C550 70 * UART. */ 71 #define SERIAL_ATMEL_MAJOR 204 72 #define MINOR_START 154 73 #define ATMEL_DEVICENAME "ttyAT" 74 75 #else 76 77 /* Use device name ttyS, major 4, minor 64-68. This is the usual serial port 78 * name, but it is legally reserved for the 8250 driver. */ 79 #define SERIAL_ATMEL_MAJOR TTY_MAJOR 80 #define MINOR_START 64 81 #define ATMEL_DEVICENAME "ttyS" 82 83 #endif 84 85 #define ATMEL_ISR_PASS_LIMIT 256 86 87 struct atmel_dma_buffer { 88 unsigned char *buf; 89 dma_addr_t dma_addr; 90 unsigned int dma_size; 91 unsigned int ofs; 92 }; 93 94 struct atmel_uart_char { 95 u16 status; 96 u16 ch; 97 }; 98 99 /* 100 * Be careful, the real size of the ring buffer is 101 * sizeof(atmel_uart_char) * ATMEL_SERIAL_RINGSIZE. It means that ring buffer 102 * can contain up to 1024 characters in PIO mode and up to 4096 characters in 103 * DMA mode. 104 */ 105 #define ATMEL_SERIAL_RINGSIZE 1024 106 107 /* 108 * at91: 6 USARTs and one DBGU port (SAM9260) 109 * samx7: 3 USARTs and 5 UARTs 110 */ 111 #define ATMEL_MAX_UART 8 112 113 /* 114 * We wrap our port structure around the generic uart_port. 115 */ 116 struct atmel_uart_port { 117 struct uart_port uart; /* uart */ 118 struct clk *clk; /* uart clock */ 119 int may_wakeup; /* cached value of device_may_wakeup for times we need to disable it */ 120 u32 backup_imr; /* IMR saved during suspend */ 121 int break_active; /* break being received */ 122 123 bool use_dma_rx; /* enable DMA receiver */ 124 bool use_pdc_rx; /* enable PDC receiver */ 125 short pdc_rx_idx; /* current PDC RX buffer */ 126 struct atmel_dma_buffer pdc_rx[2]; /* PDC receier */ 127 128 bool use_dma_tx; /* enable DMA transmitter */ 129 bool use_pdc_tx; /* enable PDC transmitter */ 130 struct atmel_dma_buffer pdc_tx; /* PDC transmitter */ 131 132 spinlock_t lock_tx; /* port lock */ 133 spinlock_t lock_rx; /* port lock */ 134 struct dma_chan *chan_tx; 135 struct dma_chan *chan_rx; 136 struct dma_async_tx_descriptor *desc_tx; 137 struct dma_async_tx_descriptor *desc_rx; 138 dma_cookie_t cookie_tx; 139 dma_cookie_t cookie_rx; 140 struct scatterlist sg_tx; 141 struct scatterlist sg_rx; 142 struct tasklet_struct tasklet_rx; 143 struct tasklet_struct tasklet_tx; 144 atomic_t tasklet_shutdown; 145 unsigned int irq_status_prev; 146 unsigned int tx_len; 147 148 struct circ_buf rx_ring; 149 150 struct mctrl_gpios *gpios; 151 u32 backup_mode; /* MR saved during iso7816 operations */ 152 u32 backup_brgr; /* BRGR saved during iso7816 operations */ 153 unsigned int tx_done_mask; 154 u32 fifo_size; 155 u32 rts_high; 156 u32 rts_low; 157 bool ms_irq_enabled; 158 u32 rtor; /* address of receiver timeout register if it exists */ 159 bool has_frac_baudrate; 160 bool has_hw_timer; 161 struct timer_list uart_timer; 162 163 bool tx_stopped; 164 bool suspended; 165 unsigned int pending; 166 unsigned int pending_status; 167 spinlock_t lock_suspended; 168 169 /* ISO7816 */ 170 unsigned int fidi_min; 171 unsigned int fidi_max; 172 173 #ifdef CONFIG_PM 174 struct { 175 u32 cr; 176 u32 mr; 177 u32 imr; 178 u32 brgr; 179 u32 rtor; 180 u32 ttgr; 181 u32 fmr; 182 u32 fimr; 183 } cache; 184 #endif 185 186 int (*prepare_rx)(struct uart_port *port); 187 int (*prepare_tx)(struct uart_port *port); 188 void (*schedule_rx)(struct uart_port *port); 189 void (*schedule_tx)(struct uart_port *port); 190 void (*release_rx)(struct uart_port *port); 191 void (*release_tx)(struct uart_port *port); 192 }; 193 194 static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART]; 195 static DECLARE_BITMAP(atmel_ports_in_use, ATMEL_MAX_UART); 196 197 #ifdef SUPPORT_SYSRQ 198 static struct console atmel_console; 199 #endif 200 201 #if defined(CONFIG_OF) 202 static const struct of_device_id atmel_serial_dt_ids[] = { 203 { .compatible = "atmel,at91rm9200-usart-serial" }, 204 { /* sentinel */ } 205 }; 206 #endif 207 208 static inline struct atmel_uart_port * 209 to_atmel_uart_port(struct uart_port *uart) 210 { 211 return container_of(uart, struct atmel_uart_port, uart); 212 } 213 214 static inline u32 atmel_uart_readl(struct uart_port *port, u32 reg) 215 { 216 return __raw_readl(port->membase + reg); 217 } 218 219 static inline void atmel_uart_writel(struct uart_port *port, u32 reg, u32 value) 220 { 221 __raw_writel(value, port->membase + reg); 222 } 223 224 static inline u8 atmel_uart_read_char(struct uart_port *port) 225 { 226 return __raw_readb(port->membase + ATMEL_US_RHR); 227 } 228 229 static inline void atmel_uart_write_char(struct uart_port *port, u8 value) 230 { 231 __raw_writeb(value, port->membase + ATMEL_US_THR); 232 } 233 234 #ifdef CONFIG_SERIAL_ATMEL_PDC 235 static bool atmel_use_pdc_rx(struct uart_port *port) 236 { 237 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 238 239 return atmel_port->use_pdc_rx; 240 } 241 242 static bool atmel_use_pdc_tx(struct uart_port *port) 243 { 244 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 245 246 return atmel_port->use_pdc_tx; 247 } 248 #else 249 static bool atmel_use_pdc_rx(struct uart_port *port) 250 { 251 return false; 252 } 253 254 static bool atmel_use_pdc_tx(struct uart_port *port) 255 { 256 return false; 257 } 258 #endif 259 260 static bool atmel_use_dma_tx(struct uart_port *port) 261 { 262 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 263 264 return atmel_port->use_dma_tx; 265 } 266 267 static bool atmel_use_dma_rx(struct uart_port *port) 268 { 269 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 270 271 return atmel_port->use_dma_rx; 272 } 273 274 static bool atmel_use_fifo(struct uart_port *port) 275 { 276 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 277 278 return atmel_port->fifo_size; 279 } 280 281 static void atmel_tasklet_schedule(struct atmel_uart_port *atmel_port, 282 struct tasklet_struct *t) 283 { 284 if (!atomic_read(&atmel_port->tasklet_shutdown)) 285 tasklet_schedule(t); 286 } 287 288 static unsigned int atmel_get_lines_status(struct uart_port *port) 289 { 290 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 291 unsigned int status, ret = 0; 292 293 status = atmel_uart_readl(port, ATMEL_US_CSR); 294 295 mctrl_gpio_get(atmel_port->gpios, &ret); 296 297 if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios, 298 UART_GPIO_CTS))) { 299 if (ret & TIOCM_CTS) 300 status &= ~ATMEL_US_CTS; 301 else 302 status |= ATMEL_US_CTS; 303 } 304 305 if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios, 306 UART_GPIO_DSR))) { 307 if (ret & TIOCM_DSR) 308 status &= ~ATMEL_US_DSR; 309 else 310 status |= ATMEL_US_DSR; 311 } 312 313 if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios, 314 UART_GPIO_RI))) { 315 if (ret & TIOCM_RI) 316 status &= ~ATMEL_US_RI; 317 else 318 status |= ATMEL_US_RI; 319 } 320 321 if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(atmel_port->gpios, 322 UART_GPIO_DCD))) { 323 if (ret & TIOCM_CD) 324 status &= ~ATMEL_US_DCD; 325 else 326 status |= ATMEL_US_DCD; 327 } 328 329 return status; 330 } 331 332 /* Enable or disable the rs485 support */ 333 static int atmel_config_rs485(struct uart_port *port, 334 struct serial_rs485 *rs485conf) 335 { 336 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 337 unsigned int mode; 338 339 /* Disable interrupts */ 340 atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask); 341 342 mode = atmel_uart_readl(port, ATMEL_US_MR); 343 344 /* Resetting serial mode to RS232 (0x0) */ 345 mode &= ~ATMEL_US_USMODE; 346 347 port->rs485 = *rs485conf; 348 349 if (rs485conf->flags & SER_RS485_ENABLED) { 350 dev_dbg(port->dev, "Setting UART to RS485\n"); 351 atmel_port->tx_done_mask = ATMEL_US_TXEMPTY; 352 atmel_uart_writel(port, ATMEL_US_TTGR, 353 rs485conf->delay_rts_after_send); 354 mode |= ATMEL_US_USMODE_RS485; 355 } else { 356 dev_dbg(port->dev, "Setting UART to RS232\n"); 357 if (atmel_use_pdc_tx(port)) 358 atmel_port->tx_done_mask = ATMEL_US_ENDTX | 359 ATMEL_US_TXBUFE; 360 else 361 atmel_port->tx_done_mask = ATMEL_US_TXRDY; 362 } 363 atmel_uart_writel(port, ATMEL_US_MR, mode); 364 365 /* Enable interrupts */ 366 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask); 367 368 return 0; 369 } 370 371 static unsigned int atmel_calc_cd(struct uart_port *port, 372 struct serial_iso7816 *iso7816conf) 373 { 374 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 375 unsigned int cd; 376 u64 mck_rate; 377 378 mck_rate = (u64)clk_get_rate(atmel_port->clk); 379 do_div(mck_rate, iso7816conf->clk); 380 cd = mck_rate; 381 return cd; 382 } 383 384 static unsigned int atmel_calc_fidi(struct uart_port *port, 385 struct serial_iso7816 *iso7816conf) 386 { 387 u64 fidi = 0; 388 389 if (iso7816conf->sc_fi && iso7816conf->sc_di) { 390 fidi = (u64)iso7816conf->sc_fi; 391 do_div(fidi, iso7816conf->sc_di); 392 } 393 return (u32)fidi; 394 } 395 396 /* Enable or disable the iso7816 support */ 397 /* Called with interrupts disabled */ 398 static int atmel_config_iso7816(struct uart_port *port, 399 struct serial_iso7816 *iso7816conf) 400 { 401 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 402 unsigned int mode; 403 unsigned int cd, fidi; 404 int ret = 0; 405 406 /* Disable interrupts */ 407 atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask); 408 409 mode = atmel_uart_readl(port, ATMEL_US_MR); 410 411 if (iso7816conf->flags & SER_ISO7816_ENABLED) { 412 mode &= ~ATMEL_US_USMODE; 413 414 if (iso7816conf->tg > 255) { 415 dev_err(port->dev, "ISO7816: Timeguard exceeding 255\n"); 416 memset(iso7816conf, 0, sizeof(struct serial_iso7816)); 417 ret = -EINVAL; 418 goto err_out; 419 } 420 421 if ((iso7816conf->flags & SER_ISO7816_T_PARAM) 422 == SER_ISO7816_T(0)) { 423 mode |= ATMEL_US_USMODE_ISO7816_T0 | ATMEL_US_DSNACK; 424 } else if ((iso7816conf->flags & SER_ISO7816_T_PARAM) 425 == SER_ISO7816_T(1)) { 426 mode |= ATMEL_US_USMODE_ISO7816_T1 | ATMEL_US_INACK; 427 } else { 428 dev_err(port->dev, "ISO7816: Type not supported\n"); 429 memset(iso7816conf, 0, sizeof(struct serial_iso7816)); 430 ret = -EINVAL; 431 goto err_out; 432 } 433 434 mode &= ~(ATMEL_US_USCLKS | ATMEL_US_NBSTOP | ATMEL_US_PAR); 435 436 /* select mck clock, and output */ 437 mode |= ATMEL_US_USCLKS_MCK | ATMEL_US_CLKO; 438 /* set parity for normal/inverse mode + max iterations */ 439 mode |= ATMEL_US_PAR_EVEN | ATMEL_US_NBSTOP_1 | ATMEL_US_MAX_ITER(3); 440 441 cd = atmel_calc_cd(port, iso7816conf); 442 fidi = atmel_calc_fidi(port, iso7816conf); 443 if (fidi == 0) { 444 dev_warn(port->dev, "ISO7816 fidi = 0, Generator generates no signal\n"); 445 } else if (fidi < atmel_port->fidi_min 446 || fidi > atmel_port->fidi_max) { 447 dev_err(port->dev, "ISO7816 fidi = %u, value not supported\n", fidi); 448 memset(iso7816conf, 0, sizeof(struct serial_iso7816)); 449 ret = -EINVAL; 450 goto err_out; 451 } 452 453 if (!(port->iso7816.flags & SER_ISO7816_ENABLED)) { 454 /* port not yet in iso7816 mode: store configuration */ 455 atmel_port->backup_mode = atmel_uart_readl(port, ATMEL_US_MR); 456 atmel_port->backup_brgr = atmel_uart_readl(port, ATMEL_US_BRGR); 457 } 458 459 atmel_uart_writel(port, ATMEL_US_TTGR, iso7816conf->tg); 460 atmel_uart_writel(port, ATMEL_US_BRGR, cd); 461 atmel_uart_writel(port, ATMEL_US_FIDI, fidi); 462 463 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS | ATMEL_US_RXEN); 464 atmel_port->tx_done_mask = ATMEL_US_TXEMPTY | ATMEL_US_NACK | ATMEL_US_ITERATION; 465 } else { 466 dev_dbg(port->dev, "Setting UART back to RS232\n"); 467 /* back to last RS232 settings */ 468 mode = atmel_port->backup_mode; 469 memset(iso7816conf, 0, sizeof(struct serial_iso7816)); 470 atmel_uart_writel(port, ATMEL_US_TTGR, 0); 471 atmel_uart_writel(port, ATMEL_US_BRGR, atmel_port->backup_brgr); 472 atmel_uart_writel(port, ATMEL_US_FIDI, 0x174); 473 474 if (atmel_use_pdc_tx(port)) 475 atmel_port->tx_done_mask = ATMEL_US_ENDTX | 476 ATMEL_US_TXBUFE; 477 else 478 atmel_port->tx_done_mask = ATMEL_US_TXRDY; 479 } 480 481 port->iso7816 = *iso7816conf; 482 483 atmel_uart_writel(port, ATMEL_US_MR, mode); 484 485 err_out: 486 /* Enable interrupts */ 487 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask); 488 489 return ret; 490 } 491 492 /* 493 * Return TIOCSER_TEMT when transmitter FIFO and Shift register is empty. 494 */ 495 static u_int atmel_tx_empty(struct uart_port *port) 496 { 497 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 498 499 if (atmel_port->tx_stopped) 500 return TIOCSER_TEMT; 501 return (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXEMPTY) ? 502 TIOCSER_TEMT : 503 0; 504 } 505 506 /* 507 * Set state of the modem control output lines 508 */ 509 static void atmel_set_mctrl(struct uart_port *port, u_int mctrl) 510 { 511 unsigned int control = 0; 512 unsigned int mode = atmel_uart_readl(port, ATMEL_US_MR); 513 unsigned int rts_paused, rts_ready; 514 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 515 516 /* override mode to RS485 if needed, otherwise keep the current mode */ 517 if (port->rs485.flags & SER_RS485_ENABLED) { 518 atmel_uart_writel(port, ATMEL_US_TTGR, 519 port->rs485.delay_rts_after_send); 520 mode &= ~ATMEL_US_USMODE; 521 mode |= ATMEL_US_USMODE_RS485; 522 } 523 524 /* set the RTS line state according to the mode */ 525 if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) { 526 /* force RTS line to high level */ 527 rts_paused = ATMEL_US_RTSEN; 528 529 /* give the control of the RTS line back to the hardware */ 530 rts_ready = ATMEL_US_RTSDIS; 531 } else { 532 /* force RTS line to high level */ 533 rts_paused = ATMEL_US_RTSDIS; 534 535 /* force RTS line to low level */ 536 rts_ready = ATMEL_US_RTSEN; 537 } 538 539 if (mctrl & TIOCM_RTS) 540 control |= rts_ready; 541 else 542 control |= rts_paused; 543 544 if (mctrl & TIOCM_DTR) 545 control |= ATMEL_US_DTREN; 546 else 547 control |= ATMEL_US_DTRDIS; 548 549 atmel_uart_writel(port, ATMEL_US_CR, control); 550 551 mctrl_gpio_set(atmel_port->gpios, mctrl); 552 553 /* Local loopback mode? */ 554 mode &= ~ATMEL_US_CHMODE; 555 if (mctrl & TIOCM_LOOP) 556 mode |= ATMEL_US_CHMODE_LOC_LOOP; 557 else 558 mode |= ATMEL_US_CHMODE_NORMAL; 559 560 atmel_uart_writel(port, ATMEL_US_MR, mode); 561 } 562 563 /* 564 * Get state of the modem control input lines 565 */ 566 static u_int atmel_get_mctrl(struct uart_port *port) 567 { 568 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 569 unsigned int ret = 0, status; 570 571 status = atmel_uart_readl(port, ATMEL_US_CSR); 572 573 /* 574 * The control signals are active low. 575 */ 576 if (!(status & ATMEL_US_DCD)) 577 ret |= TIOCM_CD; 578 if (!(status & ATMEL_US_CTS)) 579 ret |= TIOCM_CTS; 580 if (!(status & ATMEL_US_DSR)) 581 ret |= TIOCM_DSR; 582 if (!(status & ATMEL_US_RI)) 583 ret |= TIOCM_RI; 584 585 return mctrl_gpio_get(atmel_port->gpios, &ret); 586 } 587 588 /* 589 * Stop transmitting. 590 */ 591 static void atmel_stop_tx(struct uart_port *port) 592 { 593 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 594 595 if (atmel_use_pdc_tx(port)) { 596 /* disable PDC transmit */ 597 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS); 598 } 599 600 /* 601 * Disable the transmitter. 602 * This is mandatory when DMA is used, otherwise the DMA buffer 603 * is fully transmitted. 604 */ 605 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS); 606 atmel_port->tx_stopped = true; 607 608 /* Disable interrupts */ 609 atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask); 610 611 if (((port->rs485.flags & SER_RS485_ENABLED) && 612 !(port->rs485.flags & SER_RS485_RX_DURING_TX)) || 613 port->iso7816.flags & SER_ISO7816_ENABLED) 614 atmel_start_rx(port); 615 } 616 617 /* 618 * Start transmitting. 619 */ 620 static void atmel_start_tx(struct uart_port *port) 621 { 622 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 623 624 if (atmel_use_pdc_tx(port) && (atmel_uart_readl(port, ATMEL_PDC_PTSR) 625 & ATMEL_PDC_TXTEN)) 626 /* The transmitter is already running. Yes, we 627 really need this.*/ 628 return; 629 630 if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port)) 631 if (((port->rs485.flags & SER_RS485_ENABLED) && 632 !(port->rs485.flags & SER_RS485_RX_DURING_TX)) || 633 port->iso7816.flags & SER_ISO7816_ENABLED) 634 atmel_stop_rx(port); 635 636 if (atmel_use_pdc_tx(port)) 637 /* re-enable PDC transmit */ 638 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); 639 640 /* Enable interrupts */ 641 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask); 642 643 /* re-enable the transmitter */ 644 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN); 645 atmel_port->tx_stopped = false; 646 } 647 648 /* 649 * start receiving - port is in process of being opened. 650 */ 651 static void atmel_start_rx(struct uart_port *port) 652 { 653 /* reset status and receiver */ 654 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA); 655 656 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXEN); 657 658 if (atmel_use_pdc_rx(port)) { 659 /* enable PDC controller */ 660 atmel_uart_writel(port, ATMEL_US_IER, 661 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT | 662 port->read_status_mask); 663 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN); 664 } else { 665 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY); 666 } 667 } 668 669 /* 670 * Stop receiving - port is in process of being closed. 671 */ 672 static void atmel_stop_rx(struct uart_port *port) 673 { 674 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXDIS); 675 676 if (atmel_use_pdc_rx(port)) { 677 /* disable PDC receive */ 678 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS); 679 atmel_uart_writel(port, ATMEL_US_IDR, 680 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT | 681 port->read_status_mask); 682 } else { 683 atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXRDY); 684 } 685 } 686 687 /* 688 * Enable modem status interrupts 689 */ 690 static void atmel_enable_ms(struct uart_port *port) 691 { 692 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 693 uint32_t ier = 0; 694 695 /* 696 * Interrupt should not be enabled twice 697 */ 698 if (atmel_port->ms_irq_enabled) 699 return; 700 701 atmel_port->ms_irq_enabled = true; 702 703 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) 704 ier |= ATMEL_US_CTSIC; 705 706 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DSR)) 707 ier |= ATMEL_US_DSRIC; 708 709 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_RI)) 710 ier |= ATMEL_US_RIIC; 711 712 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DCD)) 713 ier |= ATMEL_US_DCDIC; 714 715 atmel_uart_writel(port, ATMEL_US_IER, ier); 716 717 mctrl_gpio_enable_ms(atmel_port->gpios); 718 } 719 720 /* 721 * Disable modem status interrupts 722 */ 723 static void atmel_disable_ms(struct uart_port *port) 724 { 725 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 726 uint32_t idr = 0; 727 728 /* 729 * Interrupt should not be disabled twice 730 */ 731 if (!atmel_port->ms_irq_enabled) 732 return; 733 734 atmel_port->ms_irq_enabled = false; 735 736 mctrl_gpio_disable_ms(atmel_port->gpios); 737 738 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) 739 idr |= ATMEL_US_CTSIC; 740 741 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DSR)) 742 idr |= ATMEL_US_DSRIC; 743 744 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_RI)) 745 idr |= ATMEL_US_RIIC; 746 747 if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_DCD)) 748 idr |= ATMEL_US_DCDIC; 749 750 atmel_uart_writel(port, ATMEL_US_IDR, idr); 751 } 752 753 /* 754 * Control the transmission of a break signal 755 */ 756 static void atmel_break_ctl(struct uart_port *port, int break_state) 757 { 758 if (break_state != 0) 759 /* start break */ 760 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTBRK); 761 else 762 /* stop break */ 763 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STPBRK); 764 } 765 766 /* 767 * Stores the incoming character in the ring buffer 768 */ 769 static void 770 atmel_buffer_rx_char(struct uart_port *port, unsigned int status, 771 unsigned int ch) 772 { 773 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 774 struct circ_buf *ring = &atmel_port->rx_ring; 775 struct atmel_uart_char *c; 776 777 if (!CIRC_SPACE(ring->head, ring->tail, ATMEL_SERIAL_RINGSIZE)) 778 /* Buffer overflow, ignore char */ 779 return; 780 781 c = &((struct atmel_uart_char *)ring->buf)[ring->head]; 782 c->status = status; 783 c->ch = ch; 784 785 /* Make sure the character is stored before we update head. */ 786 smp_wmb(); 787 788 ring->head = (ring->head + 1) & (ATMEL_SERIAL_RINGSIZE - 1); 789 } 790 791 /* 792 * Deal with parity, framing and overrun errors. 793 */ 794 static void atmel_pdc_rxerr(struct uart_port *port, unsigned int status) 795 { 796 /* clear error */ 797 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA); 798 799 if (status & ATMEL_US_RXBRK) { 800 /* ignore side-effect */ 801 status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME); 802 port->icount.brk++; 803 } 804 if (status & ATMEL_US_PARE) 805 port->icount.parity++; 806 if (status & ATMEL_US_FRAME) 807 port->icount.frame++; 808 if (status & ATMEL_US_OVRE) 809 port->icount.overrun++; 810 } 811 812 /* 813 * Characters received (called from interrupt handler) 814 */ 815 static void atmel_rx_chars(struct uart_port *port) 816 { 817 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 818 unsigned int status, ch; 819 820 status = atmel_uart_readl(port, ATMEL_US_CSR); 821 while (status & ATMEL_US_RXRDY) { 822 ch = atmel_uart_read_char(port); 823 824 /* 825 * note that the error handling code is 826 * out of the main execution path 827 */ 828 if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME 829 | ATMEL_US_OVRE | ATMEL_US_RXBRK) 830 || atmel_port->break_active)) { 831 832 /* clear error */ 833 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA); 834 835 if (status & ATMEL_US_RXBRK 836 && !atmel_port->break_active) { 837 atmel_port->break_active = 1; 838 atmel_uart_writel(port, ATMEL_US_IER, 839 ATMEL_US_RXBRK); 840 } else { 841 /* 842 * This is either the end-of-break 843 * condition or we've received at 844 * least one character without RXBRK 845 * being set. In both cases, the next 846 * RXBRK will indicate start-of-break. 847 */ 848 atmel_uart_writel(port, ATMEL_US_IDR, 849 ATMEL_US_RXBRK); 850 status &= ~ATMEL_US_RXBRK; 851 atmel_port->break_active = 0; 852 } 853 } 854 855 atmel_buffer_rx_char(port, status, ch); 856 status = atmel_uart_readl(port, ATMEL_US_CSR); 857 } 858 859 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx); 860 } 861 862 /* 863 * Transmit characters (called from tasklet with TXRDY interrupt 864 * disabled) 865 */ 866 static void atmel_tx_chars(struct uart_port *port) 867 { 868 struct circ_buf *xmit = &port->state->xmit; 869 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 870 871 if (port->x_char && 872 (atmel_uart_readl(port, ATMEL_US_CSR) & atmel_port->tx_done_mask)) { 873 atmel_uart_write_char(port, port->x_char); 874 port->icount.tx++; 875 port->x_char = 0; 876 } 877 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) 878 return; 879 880 while (atmel_uart_readl(port, ATMEL_US_CSR) & 881 atmel_port->tx_done_mask) { 882 atmel_uart_write_char(port, xmit->buf[xmit->tail]); 883 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 884 port->icount.tx++; 885 if (uart_circ_empty(xmit)) 886 break; 887 } 888 889 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 890 uart_write_wakeup(port); 891 892 if (!uart_circ_empty(xmit)) 893 /* Enable interrupts */ 894 atmel_uart_writel(port, ATMEL_US_IER, 895 atmel_port->tx_done_mask); 896 } 897 898 static void atmel_complete_tx_dma(void *arg) 899 { 900 struct atmel_uart_port *atmel_port = arg; 901 struct uart_port *port = &atmel_port->uart; 902 struct circ_buf *xmit = &port->state->xmit; 903 struct dma_chan *chan = atmel_port->chan_tx; 904 unsigned long flags; 905 906 spin_lock_irqsave(&port->lock, flags); 907 908 if (chan) 909 dmaengine_terminate_all(chan); 910 xmit->tail += atmel_port->tx_len; 911 xmit->tail &= UART_XMIT_SIZE - 1; 912 913 port->icount.tx += atmel_port->tx_len; 914 915 spin_lock_irq(&atmel_port->lock_tx); 916 async_tx_ack(atmel_port->desc_tx); 917 atmel_port->cookie_tx = -EINVAL; 918 atmel_port->desc_tx = NULL; 919 spin_unlock_irq(&atmel_port->lock_tx); 920 921 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 922 uart_write_wakeup(port); 923 924 /* 925 * xmit is a circular buffer so, if we have just send data from 926 * xmit->tail to the end of xmit->buf, now we have to transmit the 927 * remaining data from the beginning of xmit->buf to xmit->head. 928 */ 929 if (!uart_circ_empty(xmit)) 930 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx); 931 else if (((port->rs485.flags & SER_RS485_ENABLED) && 932 !(port->rs485.flags & SER_RS485_RX_DURING_TX)) || 933 port->iso7816.flags & SER_ISO7816_ENABLED) { 934 /* DMA done, stop TX, start RX for RS485 */ 935 atmel_start_rx(port); 936 } 937 938 spin_unlock_irqrestore(&port->lock, flags); 939 } 940 941 static void atmel_release_tx_dma(struct uart_port *port) 942 { 943 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 944 struct dma_chan *chan = atmel_port->chan_tx; 945 946 if (chan) { 947 dmaengine_terminate_all(chan); 948 dma_release_channel(chan); 949 dma_unmap_sg(port->dev, &atmel_port->sg_tx, 1, 950 DMA_TO_DEVICE); 951 } 952 953 atmel_port->desc_tx = NULL; 954 atmel_port->chan_tx = NULL; 955 atmel_port->cookie_tx = -EINVAL; 956 } 957 958 /* 959 * Called from tasklet with TXRDY interrupt is disabled. 960 */ 961 static void atmel_tx_dma(struct uart_port *port) 962 { 963 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 964 struct circ_buf *xmit = &port->state->xmit; 965 struct dma_chan *chan = atmel_port->chan_tx; 966 struct dma_async_tx_descriptor *desc; 967 struct scatterlist sgl[2], *sg, *sg_tx = &atmel_port->sg_tx; 968 unsigned int tx_len, part1_len, part2_len, sg_len; 969 dma_addr_t phys_addr; 970 971 /* Make sure we have an idle channel */ 972 if (atmel_port->desc_tx != NULL) 973 return; 974 975 if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) { 976 /* 977 * DMA is idle now. 978 * Port xmit buffer is already mapped, 979 * and it is one page... Just adjust 980 * offsets and lengths. Since it is a circular buffer, 981 * we have to transmit till the end, and then the rest. 982 * Take the port lock to get a 983 * consistent xmit buffer state. 984 */ 985 tx_len = CIRC_CNT_TO_END(xmit->head, 986 xmit->tail, 987 UART_XMIT_SIZE); 988 989 if (atmel_port->fifo_size) { 990 /* multi data mode */ 991 part1_len = (tx_len & ~0x3); /* DWORD access */ 992 part2_len = (tx_len & 0x3); /* BYTE access */ 993 } else { 994 /* single data (legacy) mode */ 995 part1_len = 0; 996 part2_len = tx_len; /* BYTE access only */ 997 } 998 999 sg_init_table(sgl, 2); 1000 sg_len = 0; 1001 phys_addr = sg_dma_address(sg_tx) + xmit->tail; 1002 if (part1_len) { 1003 sg = &sgl[sg_len++]; 1004 sg_dma_address(sg) = phys_addr; 1005 sg_dma_len(sg) = part1_len; 1006 1007 phys_addr += part1_len; 1008 } 1009 1010 if (part2_len) { 1011 sg = &sgl[sg_len++]; 1012 sg_dma_address(sg) = phys_addr; 1013 sg_dma_len(sg) = part2_len; 1014 } 1015 1016 /* 1017 * save tx_len so atmel_complete_tx_dma() will increase 1018 * xmit->tail correctly 1019 */ 1020 atmel_port->tx_len = tx_len; 1021 1022 desc = dmaengine_prep_slave_sg(chan, 1023 sgl, 1024 sg_len, 1025 DMA_MEM_TO_DEV, 1026 DMA_PREP_INTERRUPT | 1027 DMA_CTRL_ACK); 1028 if (!desc) { 1029 dev_err(port->dev, "Failed to send via dma!\n"); 1030 return; 1031 } 1032 1033 dma_sync_sg_for_device(port->dev, sg_tx, 1, DMA_TO_DEVICE); 1034 1035 atmel_port->desc_tx = desc; 1036 desc->callback = atmel_complete_tx_dma; 1037 desc->callback_param = atmel_port; 1038 atmel_port->cookie_tx = dmaengine_submit(desc); 1039 } 1040 1041 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 1042 uart_write_wakeup(port); 1043 } 1044 1045 static int atmel_prepare_tx_dma(struct uart_port *port) 1046 { 1047 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1048 struct device *mfd_dev = port->dev->parent; 1049 dma_cap_mask_t mask; 1050 struct dma_slave_config config; 1051 int ret, nent; 1052 1053 dma_cap_zero(mask); 1054 dma_cap_set(DMA_SLAVE, mask); 1055 1056 atmel_port->chan_tx = dma_request_slave_channel(mfd_dev, "tx"); 1057 if (atmel_port->chan_tx == NULL) 1058 goto chan_err; 1059 dev_info(port->dev, "using %s for tx DMA transfers\n", 1060 dma_chan_name(atmel_port->chan_tx)); 1061 1062 spin_lock_init(&atmel_port->lock_tx); 1063 sg_init_table(&atmel_port->sg_tx, 1); 1064 /* UART circular tx buffer is an aligned page. */ 1065 BUG_ON(!PAGE_ALIGNED(port->state->xmit.buf)); 1066 sg_set_page(&atmel_port->sg_tx, 1067 virt_to_page(port->state->xmit.buf), 1068 UART_XMIT_SIZE, 1069 offset_in_page(port->state->xmit.buf)); 1070 nent = dma_map_sg(port->dev, 1071 &atmel_port->sg_tx, 1072 1, 1073 DMA_TO_DEVICE); 1074 1075 if (!nent) { 1076 dev_dbg(port->dev, "need to release resource of dma\n"); 1077 goto chan_err; 1078 } else { 1079 dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__, 1080 sg_dma_len(&atmel_port->sg_tx), 1081 port->state->xmit.buf, 1082 &sg_dma_address(&atmel_port->sg_tx)); 1083 } 1084 1085 /* Configure the slave DMA */ 1086 memset(&config, 0, sizeof(config)); 1087 config.direction = DMA_MEM_TO_DEV; 1088 config.dst_addr_width = (atmel_port->fifo_size) ? 1089 DMA_SLAVE_BUSWIDTH_4_BYTES : 1090 DMA_SLAVE_BUSWIDTH_1_BYTE; 1091 config.dst_addr = port->mapbase + ATMEL_US_THR; 1092 config.dst_maxburst = 1; 1093 1094 ret = dmaengine_slave_config(atmel_port->chan_tx, 1095 &config); 1096 if (ret) { 1097 dev_err(port->dev, "DMA tx slave configuration failed\n"); 1098 goto chan_err; 1099 } 1100 1101 return 0; 1102 1103 chan_err: 1104 dev_err(port->dev, "TX channel not available, switch to pio\n"); 1105 atmel_port->use_dma_tx = 0; 1106 if (atmel_port->chan_tx) 1107 atmel_release_tx_dma(port); 1108 return -EINVAL; 1109 } 1110 1111 static void atmel_complete_rx_dma(void *arg) 1112 { 1113 struct uart_port *port = arg; 1114 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1115 1116 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx); 1117 } 1118 1119 static void atmel_release_rx_dma(struct uart_port *port) 1120 { 1121 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1122 struct dma_chan *chan = atmel_port->chan_rx; 1123 1124 if (chan) { 1125 dmaengine_terminate_all(chan); 1126 dma_release_channel(chan); 1127 dma_unmap_sg(port->dev, &atmel_port->sg_rx, 1, 1128 DMA_FROM_DEVICE); 1129 } 1130 1131 atmel_port->desc_rx = NULL; 1132 atmel_port->chan_rx = NULL; 1133 atmel_port->cookie_rx = -EINVAL; 1134 } 1135 1136 static void atmel_rx_from_dma(struct uart_port *port) 1137 { 1138 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1139 struct tty_port *tport = &port->state->port; 1140 struct circ_buf *ring = &atmel_port->rx_ring; 1141 struct dma_chan *chan = atmel_port->chan_rx; 1142 struct dma_tx_state state; 1143 enum dma_status dmastat; 1144 size_t count; 1145 1146 1147 /* Reset the UART timeout early so that we don't miss one */ 1148 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO); 1149 dmastat = dmaengine_tx_status(chan, 1150 atmel_port->cookie_rx, 1151 &state); 1152 /* Restart a new tasklet if DMA status is error */ 1153 if (dmastat == DMA_ERROR) { 1154 dev_dbg(port->dev, "Get residue error, restart tasklet\n"); 1155 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT); 1156 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx); 1157 return; 1158 } 1159 1160 /* CPU claims ownership of RX DMA buffer */ 1161 dma_sync_sg_for_cpu(port->dev, 1162 &atmel_port->sg_rx, 1163 1, 1164 DMA_FROM_DEVICE); 1165 1166 /* 1167 * ring->head points to the end of data already written by the DMA. 1168 * ring->tail points to the beginning of data to be read by the 1169 * framework. 1170 * The current transfer size should not be larger than the dma buffer 1171 * length. 1172 */ 1173 ring->head = sg_dma_len(&atmel_port->sg_rx) - state.residue; 1174 BUG_ON(ring->head > sg_dma_len(&atmel_port->sg_rx)); 1175 /* 1176 * At this point ring->head may point to the first byte right after the 1177 * last byte of the dma buffer: 1178 * 0 <= ring->head <= sg_dma_len(&atmel_port->sg_rx) 1179 * 1180 * However ring->tail must always points inside the dma buffer: 1181 * 0 <= ring->tail <= sg_dma_len(&atmel_port->sg_rx) - 1 1182 * 1183 * Since we use a ring buffer, we have to handle the case 1184 * where head is lower than tail. In such a case, we first read from 1185 * tail to the end of the buffer then reset tail. 1186 */ 1187 if (ring->head < ring->tail) { 1188 count = sg_dma_len(&atmel_port->sg_rx) - ring->tail; 1189 1190 tty_insert_flip_string(tport, ring->buf + ring->tail, count); 1191 ring->tail = 0; 1192 port->icount.rx += count; 1193 } 1194 1195 /* Finally we read data from tail to head */ 1196 if (ring->tail < ring->head) { 1197 count = ring->head - ring->tail; 1198 1199 tty_insert_flip_string(tport, ring->buf + ring->tail, count); 1200 /* Wrap ring->head if needed */ 1201 if (ring->head >= sg_dma_len(&atmel_port->sg_rx)) 1202 ring->head = 0; 1203 ring->tail = ring->head; 1204 port->icount.rx += count; 1205 } 1206 1207 /* USART retreives ownership of RX DMA buffer */ 1208 dma_sync_sg_for_device(port->dev, 1209 &atmel_port->sg_rx, 1210 1, 1211 DMA_FROM_DEVICE); 1212 1213 /* 1214 * Drop the lock here since it might end up calling 1215 * uart_start(), which takes the lock. 1216 */ 1217 spin_unlock(&port->lock); 1218 tty_flip_buffer_push(tport); 1219 spin_lock(&port->lock); 1220 1221 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT); 1222 } 1223 1224 static int atmel_prepare_rx_dma(struct uart_port *port) 1225 { 1226 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1227 struct device *mfd_dev = port->dev->parent; 1228 struct dma_async_tx_descriptor *desc; 1229 dma_cap_mask_t mask; 1230 struct dma_slave_config config; 1231 struct circ_buf *ring; 1232 int ret, nent; 1233 1234 ring = &atmel_port->rx_ring; 1235 1236 dma_cap_zero(mask); 1237 dma_cap_set(DMA_CYCLIC, mask); 1238 1239 atmel_port->chan_rx = dma_request_slave_channel(mfd_dev, "rx"); 1240 if (atmel_port->chan_rx == NULL) 1241 goto chan_err; 1242 dev_info(port->dev, "using %s for rx DMA transfers\n", 1243 dma_chan_name(atmel_port->chan_rx)); 1244 1245 spin_lock_init(&atmel_port->lock_rx); 1246 sg_init_table(&atmel_port->sg_rx, 1); 1247 /* UART circular rx buffer is an aligned page. */ 1248 BUG_ON(!PAGE_ALIGNED(ring->buf)); 1249 sg_set_page(&atmel_port->sg_rx, 1250 virt_to_page(ring->buf), 1251 sizeof(struct atmel_uart_char) * ATMEL_SERIAL_RINGSIZE, 1252 offset_in_page(ring->buf)); 1253 nent = dma_map_sg(port->dev, 1254 &atmel_port->sg_rx, 1255 1, 1256 DMA_FROM_DEVICE); 1257 1258 if (!nent) { 1259 dev_dbg(port->dev, "need to release resource of dma\n"); 1260 goto chan_err; 1261 } else { 1262 dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__, 1263 sg_dma_len(&atmel_port->sg_rx), 1264 ring->buf, 1265 &sg_dma_address(&atmel_port->sg_rx)); 1266 } 1267 1268 /* Configure the slave DMA */ 1269 memset(&config, 0, sizeof(config)); 1270 config.direction = DMA_DEV_TO_MEM; 1271 config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 1272 config.src_addr = port->mapbase + ATMEL_US_RHR; 1273 config.src_maxburst = 1; 1274 1275 ret = dmaengine_slave_config(atmel_port->chan_rx, 1276 &config); 1277 if (ret) { 1278 dev_err(port->dev, "DMA rx slave configuration failed\n"); 1279 goto chan_err; 1280 } 1281 /* 1282 * Prepare a cyclic dma transfer, assign 2 descriptors, 1283 * each one is half ring buffer size 1284 */ 1285 desc = dmaengine_prep_dma_cyclic(atmel_port->chan_rx, 1286 sg_dma_address(&atmel_port->sg_rx), 1287 sg_dma_len(&atmel_port->sg_rx), 1288 sg_dma_len(&atmel_port->sg_rx)/2, 1289 DMA_DEV_TO_MEM, 1290 DMA_PREP_INTERRUPT); 1291 desc->callback = atmel_complete_rx_dma; 1292 desc->callback_param = port; 1293 atmel_port->desc_rx = desc; 1294 atmel_port->cookie_rx = dmaengine_submit(desc); 1295 1296 return 0; 1297 1298 chan_err: 1299 dev_err(port->dev, "RX channel not available, switch to pio\n"); 1300 atmel_port->use_dma_rx = 0; 1301 if (atmel_port->chan_rx) 1302 atmel_release_rx_dma(port); 1303 return -EINVAL; 1304 } 1305 1306 static void atmel_uart_timer_callback(struct timer_list *t) 1307 { 1308 struct atmel_uart_port *atmel_port = from_timer(atmel_port, t, 1309 uart_timer); 1310 struct uart_port *port = &atmel_port->uart; 1311 1312 if (!atomic_read(&atmel_port->tasklet_shutdown)) { 1313 tasklet_schedule(&atmel_port->tasklet_rx); 1314 mod_timer(&atmel_port->uart_timer, 1315 jiffies + uart_poll_timeout(port)); 1316 } 1317 } 1318 1319 /* 1320 * receive interrupt handler. 1321 */ 1322 static void 1323 atmel_handle_receive(struct uart_port *port, unsigned int pending) 1324 { 1325 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1326 1327 if (atmel_use_pdc_rx(port)) { 1328 /* 1329 * PDC receive. Just schedule the tasklet and let it 1330 * figure out the details. 1331 * 1332 * TODO: We're not handling error flags correctly at 1333 * the moment. 1334 */ 1335 if (pending & (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)) { 1336 atmel_uart_writel(port, ATMEL_US_IDR, 1337 (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)); 1338 atmel_tasklet_schedule(atmel_port, 1339 &atmel_port->tasklet_rx); 1340 } 1341 1342 if (pending & (ATMEL_US_RXBRK | ATMEL_US_OVRE | 1343 ATMEL_US_FRAME | ATMEL_US_PARE)) 1344 atmel_pdc_rxerr(port, pending); 1345 } 1346 1347 if (atmel_use_dma_rx(port)) { 1348 if (pending & ATMEL_US_TIMEOUT) { 1349 atmel_uart_writel(port, ATMEL_US_IDR, 1350 ATMEL_US_TIMEOUT); 1351 atmel_tasklet_schedule(atmel_port, 1352 &atmel_port->tasklet_rx); 1353 } 1354 } 1355 1356 /* Interrupt receive */ 1357 if (pending & ATMEL_US_RXRDY) 1358 atmel_rx_chars(port); 1359 else if (pending & ATMEL_US_RXBRK) { 1360 /* 1361 * End of break detected. If it came along with a 1362 * character, atmel_rx_chars will handle it. 1363 */ 1364 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA); 1365 atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXBRK); 1366 atmel_port->break_active = 0; 1367 } 1368 } 1369 1370 /* 1371 * transmit interrupt handler. (Transmit is IRQF_NODELAY safe) 1372 */ 1373 static void 1374 atmel_handle_transmit(struct uart_port *port, unsigned int pending) 1375 { 1376 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1377 1378 if (pending & atmel_port->tx_done_mask) { 1379 /* Either PDC or interrupt transmission */ 1380 atmel_uart_writel(port, ATMEL_US_IDR, 1381 atmel_port->tx_done_mask); 1382 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx); 1383 } 1384 } 1385 1386 /* 1387 * status flags interrupt handler. 1388 */ 1389 static void 1390 atmel_handle_status(struct uart_port *port, unsigned int pending, 1391 unsigned int status) 1392 { 1393 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1394 unsigned int status_change; 1395 1396 if (pending & (ATMEL_US_RIIC | ATMEL_US_DSRIC | ATMEL_US_DCDIC 1397 | ATMEL_US_CTSIC)) { 1398 status_change = status ^ atmel_port->irq_status_prev; 1399 atmel_port->irq_status_prev = status; 1400 1401 if (status_change & (ATMEL_US_RI | ATMEL_US_DSR 1402 | ATMEL_US_DCD | ATMEL_US_CTS)) { 1403 /* TODO: All reads to CSR will clear these interrupts! */ 1404 if (status_change & ATMEL_US_RI) 1405 port->icount.rng++; 1406 if (status_change & ATMEL_US_DSR) 1407 port->icount.dsr++; 1408 if (status_change & ATMEL_US_DCD) 1409 uart_handle_dcd_change(port, !(status & ATMEL_US_DCD)); 1410 if (status_change & ATMEL_US_CTS) 1411 uart_handle_cts_change(port, !(status & ATMEL_US_CTS)); 1412 1413 wake_up_interruptible(&port->state->port.delta_msr_wait); 1414 } 1415 } 1416 1417 if (pending & (ATMEL_US_NACK | ATMEL_US_ITERATION)) 1418 dev_dbg(port->dev, "ISO7816 ERROR (0x%08x)\n", pending); 1419 } 1420 1421 /* 1422 * Interrupt handler 1423 */ 1424 static irqreturn_t atmel_interrupt(int irq, void *dev_id) 1425 { 1426 struct uart_port *port = dev_id; 1427 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1428 unsigned int status, pending, mask, pass_counter = 0; 1429 1430 spin_lock(&atmel_port->lock_suspended); 1431 1432 do { 1433 status = atmel_get_lines_status(port); 1434 mask = atmel_uart_readl(port, ATMEL_US_IMR); 1435 pending = status & mask; 1436 if (!pending) 1437 break; 1438 1439 if (atmel_port->suspended) { 1440 atmel_port->pending |= pending; 1441 atmel_port->pending_status = status; 1442 atmel_uart_writel(port, ATMEL_US_IDR, mask); 1443 pm_system_wakeup(); 1444 break; 1445 } 1446 1447 atmel_handle_receive(port, pending); 1448 atmel_handle_status(port, pending, status); 1449 atmel_handle_transmit(port, pending); 1450 } while (pass_counter++ < ATMEL_ISR_PASS_LIMIT); 1451 1452 spin_unlock(&atmel_port->lock_suspended); 1453 1454 return pass_counter ? IRQ_HANDLED : IRQ_NONE; 1455 } 1456 1457 static void atmel_release_tx_pdc(struct uart_port *port) 1458 { 1459 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1460 struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx; 1461 1462 dma_unmap_single(port->dev, 1463 pdc->dma_addr, 1464 pdc->dma_size, 1465 DMA_TO_DEVICE); 1466 } 1467 1468 /* 1469 * Called from tasklet with ENDTX and TXBUFE interrupts disabled. 1470 */ 1471 static void atmel_tx_pdc(struct uart_port *port) 1472 { 1473 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1474 struct circ_buf *xmit = &port->state->xmit; 1475 struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx; 1476 int count; 1477 1478 /* nothing left to transmit? */ 1479 if (atmel_uart_readl(port, ATMEL_PDC_TCR)) 1480 return; 1481 1482 xmit->tail += pdc->ofs; 1483 xmit->tail &= UART_XMIT_SIZE - 1; 1484 1485 port->icount.tx += pdc->ofs; 1486 pdc->ofs = 0; 1487 1488 /* more to transmit - setup next transfer */ 1489 1490 /* disable PDC transmit */ 1491 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS); 1492 1493 if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) { 1494 dma_sync_single_for_device(port->dev, 1495 pdc->dma_addr, 1496 pdc->dma_size, 1497 DMA_TO_DEVICE); 1498 1499 count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); 1500 pdc->ofs = count; 1501 1502 atmel_uart_writel(port, ATMEL_PDC_TPR, 1503 pdc->dma_addr + xmit->tail); 1504 atmel_uart_writel(port, ATMEL_PDC_TCR, count); 1505 /* re-enable PDC transmit */ 1506 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); 1507 /* Enable interrupts */ 1508 atmel_uart_writel(port, ATMEL_US_IER, 1509 atmel_port->tx_done_mask); 1510 } else { 1511 if (((port->rs485.flags & SER_RS485_ENABLED) && 1512 !(port->rs485.flags & SER_RS485_RX_DURING_TX)) || 1513 port->iso7816.flags & SER_ISO7816_ENABLED) { 1514 /* DMA done, stop TX, start RX for RS485 */ 1515 atmel_start_rx(port); 1516 } 1517 } 1518 1519 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 1520 uart_write_wakeup(port); 1521 } 1522 1523 static int atmel_prepare_tx_pdc(struct uart_port *port) 1524 { 1525 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1526 struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx; 1527 struct circ_buf *xmit = &port->state->xmit; 1528 1529 pdc->buf = xmit->buf; 1530 pdc->dma_addr = dma_map_single(port->dev, 1531 pdc->buf, 1532 UART_XMIT_SIZE, 1533 DMA_TO_DEVICE); 1534 pdc->dma_size = UART_XMIT_SIZE; 1535 pdc->ofs = 0; 1536 1537 return 0; 1538 } 1539 1540 static void atmel_rx_from_ring(struct uart_port *port) 1541 { 1542 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1543 struct circ_buf *ring = &atmel_port->rx_ring; 1544 unsigned int flg; 1545 unsigned int status; 1546 1547 while (ring->head != ring->tail) { 1548 struct atmel_uart_char c; 1549 1550 /* Make sure c is loaded after head. */ 1551 smp_rmb(); 1552 1553 c = ((struct atmel_uart_char *)ring->buf)[ring->tail]; 1554 1555 ring->tail = (ring->tail + 1) & (ATMEL_SERIAL_RINGSIZE - 1); 1556 1557 port->icount.rx++; 1558 status = c.status; 1559 flg = TTY_NORMAL; 1560 1561 /* 1562 * note that the error handling code is 1563 * out of the main execution path 1564 */ 1565 if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME 1566 | ATMEL_US_OVRE | ATMEL_US_RXBRK))) { 1567 if (status & ATMEL_US_RXBRK) { 1568 /* ignore side-effect */ 1569 status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME); 1570 1571 port->icount.brk++; 1572 if (uart_handle_break(port)) 1573 continue; 1574 } 1575 if (status & ATMEL_US_PARE) 1576 port->icount.parity++; 1577 if (status & ATMEL_US_FRAME) 1578 port->icount.frame++; 1579 if (status & ATMEL_US_OVRE) 1580 port->icount.overrun++; 1581 1582 status &= port->read_status_mask; 1583 1584 if (status & ATMEL_US_RXBRK) 1585 flg = TTY_BREAK; 1586 else if (status & ATMEL_US_PARE) 1587 flg = TTY_PARITY; 1588 else if (status & ATMEL_US_FRAME) 1589 flg = TTY_FRAME; 1590 } 1591 1592 1593 if (uart_handle_sysrq_char(port, c.ch)) 1594 continue; 1595 1596 uart_insert_char(port, status, ATMEL_US_OVRE, c.ch, flg); 1597 } 1598 1599 /* 1600 * Drop the lock here since it might end up calling 1601 * uart_start(), which takes the lock. 1602 */ 1603 spin_unlock(&port->lock); 1604 tty_flip_buffer_push(&port->state->port); 1605 spin_lock(&port->lock); 1606 } 1607 1608 static void atmel_release_rx_pdc(struct uart_port *port) 1609 { 1610 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1611 int i; 1612 1613 for (i = 0; i < 2; i++) { 1614 struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i]; 1615 1616 dma_unmap_single(port->dev, 1617 pdc->dma_addr, 1618 pdc->dma_size, 1619 DMA_FROM_DEVICE); 1620 kfree(pdc->buf); 1621 } 1622 } 1623 1624 static void atmel_rx_from_pdc(struct uart_port *port) 1625 { 1626 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1627 struct tty_port *tport = &port->state->port; 1628 struct atmel_dma_buffer *pdc; 1629 int rx_idx = atmel_port->pdc_rx_idx; 1630 unsigned int head; 1631 unsigned int tail; 1632 unsigned int count; 1633 1634 do { 1635 /* Reset the UART timeout early so that we don't miss one */ 1636 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO); 1637 1638 pdc = &atmel_port->pdc_rx[rx_idx]; 1639 head = atmel_uart_readl(port, ATMEL_PDC_RPR) - pdc->dma_addr; 1640 tail = pdc->ofs; 1641 1642 /* If the PDC has switched buffers, RPR won't contain 1643 * any address within the current buffer. Since head 1644 * is unsigned, we just need a one-way comparison to 1645 * find out. 1646 * 1647 * In this case, we just need to consume the entire 1648 * buffer and resubmit it for DMA. This will clear the 1649 * ENDRX bit as well, so that we can safely re-enable 1650 * all interrupts below. 1651 */ 1652 head = min(head, pdc->dma_size); 1653 1654 if (likely(head != tail)) { 1655 dma_sync_single_for_cpu(port->dev, pdc->dma_addr, 1656 pdc->dma_size, DMA_FROM_DEVICE); 1657 1658 /* 1659 * head will only wrap around when we recycle 1660 * the DMA buffer, and when that happens, we 1661 * explicitly set tail to 0. So head will 1662 * always be greater than tail. 1663 */ 1664 count = head - tail; 1665 1666 tty_insert_flip_string(tport, pdc->buf + pdc->ofs, 1667 count); 1668 1669 dma_sync_single_for_device(port->dev, pdc->dma_addr, 1670 pdc->dma_size, DMA_FROM_DEVICE); 1671 1672 port->icount.rx += count; 1673 pdc->ofs = head; 1674 } 1675 1676 /* 1677 * If the current buffer is full, we need to check if 1678 * the next one contains any additional data. 1679 */ 1680 if (head >= pdc->dma_size) { 1681 pdc->ofs = 0; 1682 atmel_uart_writel(port, ATMEL_PDC_RNPR, pdc->dma_addr); 1683 atmel_uart_writel(port, ATMEL_PDC_RNCR, pdc->dma_size); 1684 1685 rx_idx = !rx_idx; 1686 atmel_port->pdc_rx_idx = rx_idx; 1687 } 1688 } while (head >= pdc->dma_size); 1689 1690 /* 1691 * Drop the lock here since it might end up calling 1692 * uart_start(), which takes the lock. 1693 */ 1694 spin_unlock(&port->lock); 1695 tty_flip_buffer_push(tport); 1696 spin_lock(&port->lock); 1697 1698 atmel_uart_writel(port, ATMEL_US_IER, 1699 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT); 1700 } 1701 1702 static int atmel_prepare_rx_pdc(struct uart_port *port) 1703 { 1704 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1705 int i; 1706 1707 for (i = 0; i < 2; i++) { 1708 struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i]; 1709 1710 pdc->buf = kmalloc(PDC_BUFFER_SIZE, GFP_KERNEL); 1711 if (pdc->buf == NULL) { 1712 if (i != 0) { 1713 dma_unmap_single(port->dev, 1714 atmel_port->pdc_rx[0].dma_addr, 1715 PDC_BUFFER_SIZE, 1716 DMA_FROM_DEVICE); 1717 kfree(atmel_port->pdc_rx[0].buf); 1718 } 1719 atmel_port->use_pdc_rx = 0; 1720 return -ENOMEM; 1721 } 1722 pdc->dma_addr = dma_map_single(port->dev, 1723 pdc->buf, 1724 PDC_BUFFER_SIZE, 1725 DMA_FROM_DEVICE); 1726 pdc->dma_size = PDC_BUFFER_SIZE; 1727 pdc->ofs = 0; 1728 } 1729 1730 atmel_port->pdc_rx_idx = 0; 1731 1732 atmel_uart_writel(port, ATMEL_PDC_RPR, atmel_port->pdc_rx[0].dma_addr); 1733 atmel_uart_writel(port, ATMEL_PDC_RCR, PDC_BUFFER_SIZE); 1734 1735 atmel_uart_writel(port, ATMEL_PDC_RNPR, 1736 atmel_port->pdc_rx[1].dma_addr); 1737 atmel_uart_writel(port, ATMEL_PDC_RNCR, PDC_BUFFER_SIZE); 1738 1739 return 0; 1740 } 1741 1742 /* 1743 * tasklet handling tty stuff outside the interrupt handler. 1744 */ 1745 static void atmel_tasklet_rx_func(unsigned long data) 1746 { 1747 struct uart_port *port = (struct uart_port *)data; 1748 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1749 1750 /* The interrupt handler does not take the lock */ 1751 spin_lock(&port->lock); 1752 atmel_port->schedule_rx(port); 1753 spin_unlock(&port->lock); 1754 } 1755 1756 static void atmel_tasklet_tx_func(unsigned long data) 1757 { 1758 struct uart_port *port = (struct uart_port *)data; 1759 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1760 1761 /* The interrupt handler does not take the lock */ 1762 spin_lock(&port->lock); 1763 atmel_port->schedule_tx(port); 1764 spin_unlock(&port->lock); 1765 } 1766 1767 static void atmel_init_property(struct atmel_uart_port *atmel_port, 1768 struct platform_device *pdev) 1769 { 1770 struct device_node *np = pdev->dev.of_node; 1771 1772 /* DMA/PDC usage specification */ 1773 if (of_property_read_bool(np, "atmel,use-dma-rx")) { 1774 if (of_property_read_bool(np, "dmas")) { 1775 atmel_port->use_dma_rx = true; 1776 atmel_port->use_pdc_rx = false; 1777 } else { 1778 atmel_port->use_dma_rx = false; 1779 atmel_port->use_pdc_rx = true; 1780 } 1781 } else { 1782 atmel_port->use_dma_rx = false; 1783 atmel_port->use_pdc_rx = false; 1784 } 1785 1786 if (of_property_read_bool(np, "atmel,use-dma-tx")) { 1787 if (of_property_read_bool(np, "dmas")) { 1788 atmel_port->use_dma_tx = true; 1789 atmel_port->use_pdc_tx = false; 1790 } else { 1791 atmel_port->use_dma_tx = false; 1792 atmel_port->use_pdc_tx = true; 1793 } 1794 } else { 1795 atmel_port->use_dma_tx = false; 1796 atmel_port->use_pdc_tx = false; 1797 } 1798 } 1799 1800 static void atmel_set_ops(struct uart_port *port) 1801 { 1802 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1803 1804 if (atmel_use_dma_rx(port)) { 1805 atmel_port->prepare_rx = &atmel_prepare_rx_dma; 1806 atmel_port->schedule_rx = &atmel_rx_from_dma; 1807 atmel_port->release_rx = &atmel_release_rx_dma; 1808 } else if (atmel_use_pdc_rx(port)) { 1809 atmel_port->prepare_rx = &atmel_prepare_rx_pdc; 1810 atmel_port->schedule_rx = &atmel_rx_from_pdc; 1811 atmel_port->release_rx = &atmel_release_rx_pdc; 1812 } else { 1813 atmel_port->prepare_rx = NULL; 1814 atmel_port->schedule_rx = &atmel_rx_from_ring; 1815 atmel_port->release_rx = NULL; 1816 } 1817 1818 if (atmel_use_dma_tx(port)) { 1819 atmel_port->prepare_tx = &atmel_prepare_tx_dma; 1820 atmel_port->schedule_tx = &atmel_tx_dma; 1821 atmel_port->release_tx = &atmel_release_tx_dma; 1822 } else if (atmel_use_pdc_tx(port)) { 1823 atmel_port->prepare_tx = &atmel_prepare_tx_pdc; 1824 atmel_port->schedule_tx = &atmel_tx_pdc; 1825 atmel_port->release_tx = &atmel_release_tx_pdc; 1826 } else { 1827 atmel_port->prepare_tx = NULL; 1828 atmel_port->schedule_tx = &atmel_tx_chars; 1829 atmel_port->release_tx = NULL; 1830 } 1831 } 1832 1833 /* 1834 * Get ip name usart or uart 1835 */ 1836 static void atmel_get_ip_name(struct uart_port *port) 1837 { 1838 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1839 int name = atmel_uart_readl(port, ATMEL_US_NAME); 1840 u32 version; 1841 u32 usart, dbgu_uart, new_uart; 1842 /* ASCII decoding for IP version */ 1843 usart = 0x55534152; /* USAR(T) */ 1844 dbgu_uart = 0x44424755; /* DBGU */ 1845 new_uart = 0x55415254; /* UART */ 1846 1847 /* 1848 * Only USART devices from at91sam9260 SOC implement fractional 1849 * baudrate. It is available for all asynchronous modes, with the 1850 * following restriction: the sampling clock's duty cycle is not 1851 * constant. 1852 */ 1853 atmel_port->has_frac_baudrate = false; 1854 atmel_port->has_hw_timer = false; 1855 1856 if (name == new_uart) { 1857 dev_dbg(port->dev, "Uart with hw timer"); 1858 atmel_port->has_hw_timer = true; 1859 atmel_port->rtor = ATMEL_UA_RTOR; 1860 } else if (name == usart) { 1861 dev_dbg(port->dev, "Usart\n"); 1862 atmel_port->has_frac_baudrate = true; 1863 atmel_port->has_hw_timer = true; 1864 atmel_port->rtor = ATMEL_US_RTOR; 1865 version = atmel_uart_readl(port, ATMEL_US_VERSION); 1866 switch (version) { 1867 case 0x814: /* sama5d2 */ 1868 /* fall through */ 1869 case 0x701: /* sama5d4 */ 1870 atmel_port->fidi_min = 3; 1871 atmel_port->fidi_max = 65535; 1872 break; 1873 case 0x502: /* sam9x5, sama5d3 */ 1874 atmel_port->fidi_min = 3; 1875 atmel_port->fidi_max = 2047; 1876 break; 1877 default: 1878 atmel_port->fidi_min = 1; 1879 atmel_port->fidi_max = 2047; 1880 } 1881 } else if (name == dbgu_uart) { 1882 dev_dbg(port->dev, "Dbgu or uart without hw timer\n"); 1883 } else { 1884 /* fallback for older SoCs: use version field */ 1885 version = atmel_uart_readl(port, ATMEL_US_VERSION); 1886 switch (version) { 1887 case 0x302: 1888 case 0x10213: 1889 case 0x10302: 1890 dev_dbg(port->dev, "This version is usart\n"); 1891 atmel_port->has_frac_baudrate = true; 1892 atmel_port->has_hw_timer = true; 1893 atmel_port->rtor = ATMEL_US_RTOR; 1894 break; 1895 case 0x203: 1896 case 0x10202: 1897 dev_dbg(port->dev, "This version is uart\n"); 1898 break; 1899 default: 1900 dev_err(port->dev, "Not supported ip name nor version, set to uart\n"); 1901 } 1902 } 1903 } 1904 1905 /* 1906 * Perform initialization and enable port for reception 1907 */ 1908 static int atmel_startup(struct uart_port *port) 1909 { 1910 struct platform_device *pdev = to_platform_device(port->dev); 1911 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1912 int retval; 1913 1914 /* 1915 * Ensure that no interrupts are enabled otherwise when 1916 * request_irq() is called we could get stuck trying to 1917 * handle an unexpected interrupt 1918 */ 1919 atmel_uart_writel(port, ATMEL_US_IDR, -1); 1920 atmel_port->ms_irq_enabled = false; 1921 1922 /* 1923 * Allocate the IRQ 1924 */ 1925 retval = request_irq(port->irq, atmel_interrupt, 1926 IRQF_SHARED | IRQF_COND_SUSPEND, 1927 dev_name(&pdev->dev), port); 1928 if (retval) { 1929 dev_err(port->dev, "atmel_startup - Can't get irq\n"); 1930 return retval; 1931 } 1932 1933 atomic_set(&atmel_port->tasklet_shutdown, 0); 1934 tasklet_init(&atmel_port->tasklet_rx, atmel_tasklet_rx_func, 1935 (unsigned long)port); 1936 tasklet_init(&atmel_port->tasklet_tx, atmel_tasklet_tx_func, 1937 (unsigned long)port); 1938 1939 /* 1940 * Initialize DMA (if necessary) 1941 */ 1942 atmel_init_property(atmel_port, pdev); 1943 atmel_set_ops(port); 1944 1945 if (atmel_port->prepare_rx) { 1946 retval = atmel_port->prepare_rx(port); 1947 if (retval < 0) 1948 atmel_set_ops(port); 1949 } 1950 1951 if (atmel_port->prepare_tx) { 1952 retval = atmel_port->prepare_tx(port); 1953 if (retval < 0) 1954 atmel_set_ops(port); 1955 } 1956 1957 /* 1958 * Enable FIFO when available 1959 */ 1960 if (atmel_port->fifo_size) { 1961 unsigned int txrdym = ATMEL_US_ONE_DATA; 1962 unsigned int rxrdym = ATMEL_US_ONE_DATA; 1963 unsigned int fmr; 1964 1965 atmel_uart_writel(port, ATMEL_US_CR, 1966 ATMEL_US_FIFOEN | 1967 ATMEL_US_RXFCLR | 1968 ATMEL_US_TXFLCLR); 1969 1970 if (atmel_use_dma_tx(port)) 1971 txrdym = ATMEL_US_FOUR_DATA; 1972 1973 fmr = ATMEL_US_TXRDYM(txrdym) | ATMEL_US_RXRDYM(rxrdym); 1974 if (atmel_port->rts_high && 1975 atmel_port->rts_low) 1976 fmr |= ATMEL_US_FRTSC | 1977 ATMEL_US_RXFTHRES(atmel_port->rts_high) | 1978 ATMEL_US_RXFTHRES2(atmel_port->rts_low); 1979 1980 atmel_uart_writel(port, ATMEL_US_FMR, fmr); 1981 } 1982 1983 /* Save current CSR for comparison in atmel_tasklet_func() */ 1984 atmel_port->irq_status_prev = atmel_get_lines_status(port); 1985 1986 /* 1987 * Finally, enable the serial port 1988 */ 1989 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX); 1990 /* enable xmit & rcvr */ 1991 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN); 1992 atmel_port->tx_stopped = false; 1993 1994 timer_setup(&atmel_port->uart_timer, atmel_uart_timer_callback, 0); 1995 1996 if (atmel_use_pdc_rx(port)) { 1997 /* set UART timeout */ 1998 if (!atmel_port->has_hw_timer) { 1999 mod_timer(&atmel_port->uart_timer, 2000 jiffies + uart_poll_timeout(port)); 2001 /* set USART timeout */ 2002 } else { 2003 atmel_uart_writel(port, atmel_port->rtor, 2004 PDC_RX_TIMEOUT); 2005 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO); 2006 2007 atmel_uart_writel(port, ATMEL_US_IER, 2008 ATMEL_US_ENDRX | ATMEL_US_TIMEOUT); 2009 } 2010 /* enable PDC controller */ 2011 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN); 2012 } else if (atmel_use_dma_rx(port)) { 2013 /* set UART timeout */ 2014 if (!atmel_port->has_hw_timer) { 2015 mod_timer(&atmel_port->uart_timer, 2016 jiffies + uart_poll_timeout(port)); 2017 /* set USART timeout */ 2018 } else { 2019 atmel_uart_writel(port, atmel_port->rtor, 2020 PDC_RX_TIMEOUT); 2021 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO); 2022 2023 atmel_uart_writel(port, ATMEL_US_IER, 2024 ATMEL_US_TIMEOUT); 2025 } 2026 } else { 2027 /* enable receive only */ 2028 atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY); 2029 } 2030 2031 return 0; 2032 } 2033 2034 /* 2035 * Flush any TX data submitted for DMA. Called when the TX circular 2036 * buffer is reset. 2037 */ 2038 static void atmel_flush_buffer(struct uart_port *port) 2039 { 2040 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2041 2042 if (atmel_use_pdc_tx(port)) { 2043 atmel_uart_writel(port, ATMEL_PDC_TCR, 0); 2044 atmel_port->pdc_tx.ofs = 0; 2045 } 2046 /* 2047 * in uart_flush_buffer(), the xmit circular buffer has just 2048 * been cleared, so we have to reset tx_len accordingly. 2049 */ 2050 atmel_port->tx_len = 0; 2051 } 2052 2053 /* 2054 * Disable the port 2055 */ 2056 static void atmel_shutdown(struct uart_port *port) 2057 { 2058 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2059 2060 /* Disable modem control lines interrupts */ 2061 atmel_disable_ms(port); 2062 2063 /* Disable interrupts at device level */ 2064 atmel_uart_writel(port, ATMEL_US_IDR, -1); 2065 2066 /* Prevent spurious interrupts from scheduling the tasklet */ 2067 atomic_inc(&atmel_port->tasklet_shutdown); 2068 2069 /* 2070 * Prevent any tasklets being scheduled during 2071 * cleanup 2072 */ 2073 del_timer_sync(&atmel_port->uart_timer); 2074 2075 /* Make sure that no interrupt is on the fly */ 2076 synchronize_irq(port->irq); 2077 2078 /* 2079 * Clear out any scheduled tasklets before 2080 * we destroy the buffers 2081 */ 2082 tasklet_kill(&atmel_port->tasklet_rx); 2083 tasklet_kill(&atmel_port->tasklet_tx); 2084 2085 /* 2086 * Ensure everything is stopped and 2087 * disable port and break condition. 2088 */ 2089 atmel_stop_rx(port); 2090 atmel_stop_tx(port); 2091 2092 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA); 2093 2094 /* 2095 * Shut-down the DMA. 2096 */ 2097 if (atmel_port->release_rx) 2098 atmel_port->release_rx(port); 2099 if (atmel_port->release_tx) 2100 atmel_port->release_tx(port); 2101 2102 /* 2103 * Reset ring buffer pointers 2104 */ 2105 atmel_port->rx_ring.head = 0; 2106 atmel_port->rx_ring.tail = 0; 2107 2108 /* 2109 * Free the interrupts 2110 */ 2111 free_irq(port->irq, port); 2112 2113 atmel_flush_buffer(port); 2114 } 2115 2116 /* 2117 * Power / Clock management. 2118 */ 2119 static void atmel_serial_pm(struct uart_port *port, unsigned int state, 2120 unsigned int oldstate) 2121 { 2122 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2123 2124 switch (state) { 2125 case 0: 2126 /* 2127 * Enable the peripheral clock for this serial port. 2128 * This is called on uart_open() or a resume event. 2129 */ 2130 clk_prepare_enable(atmel_port->clk); 2131 2132 /* re-enable interrupts if we disabled some on suspend */ 2133 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->backup_imr); 2134 break; 2135 case 3: 2136 /* Back up the interrupt mask and disable all interrupts */ 2137 atmel_port->backup_imr = atmel_uart_readl(port, ATMEL_US_IMR); 2138 atmel_uart_writel(port, ATMEL_US_IDR, -1); 2139 2140 /* 2141 * Disable the peripheral clock for this serial port. 2142 * This is called on uart_close() or a suspend event. 2143 */ 2144 clk_disable_unprepare(atmel_port->clk); 2145 break; 2146 default: 2147 dev_err(port->dev, "atmel_serial: unknown pm %d\n", state); 2148 } 2149 } 2150 2151 /* 2152 * Change the port parameters 2153 */ 2154 static void atmel_set_termios(struct uart_port *port, struct ktermios *termios, 2155 struct ktermios *old) 2156 { 2157 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2158 unsigned long flags; 2159 unsigned int old_mode, mode, imr, quot, baud, div, cd, fp = 0; 2160 2161 /* save the current mode register */ 2162 mode = old_mode = atmel_uart_readl(port, ATMEL_US_MR); 2163 2164 /* reset the mode, clock divisor, parity, stop bits and data size */ 2165 mode &= ~(ATMEL_US_USCLKS | ATMEL_US_CHRL | ATMEL_US_NBSTOP | 2166 ATMEL_US_PAR | ATMEL_US_USMODE); 2167 2168 baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16); 2169 2170 /* byte size */ 2171 switch (termios->c_cflag & CSIZE) { 2172 case CS5: 2173 mode |= ATMEL_US_CHRL_5; 2174 break; 2175 case CS6: 2176 mode |= ATMEL_US_CHRL_6; 2177 break; 2178 case CS7: 2179 mode |= ATMEL_US_CHRL_7; 2180 break; 2181 default: 2182 mode |= ATMEL_US_CHRL_8; 2183 break; 2184 } 2185 2186 /* stop bits */ 2187 if (termios->c_cflag & CSTOPB) 2188 mode |= ATMEL_US_NBSTOP_2; 2189 2190 /* parity */ 2191 if (termios->c_cflag & PARENB) { 2192 /* Mark or Space parity */ 2193 if (termios->c_cflag & CMSPAR) { 2194 if (termios->c_cflag & PARODD) 2195 mode |= ATMEL_US_PAR_MARK; 2196 else 2197 mode |= ATMEL_US_PAR_SPACE; 2198 } else if (termios->c_cflag & PARODD) 2199 mode |= ATMEL_US_PAR_ODD; 2200 else 2201 mode |= ATMEL_US_PAR_EVEN; 2202 } else 2203 mode |= ATMEL_US_PAR_NONE; 2204 2205 spin_lock_irqsave(&port->lock, flags); 2206 2207 port->read_status_mask = ATMEL_US_OVRE; 2208 if (termios->c_iflag & INPCK) 2209 port->read_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE); 2210 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) 2211 port->read_status_mask |= ATMEL_US_RXBRK; 2212 2213 if (atmel_use_pdc_rx(port)) 2214 /* need to enable error interrupts */ 2215 atmel_uart_writel(port, ATMEL_US_IER, port->read_status_mask); 2216 2217 /* 2218 * Characters to ignore 2219 */ 2220 port->ignore_status_mask = 0; 2221 if (termios->c_iflag & IGNPAR) 2222 port->ignore_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE); 2223 if (termios->c_iflag & IGNBRK) { 2224 port->ignore_status_mask |= ATMEL_US_RXBRK; 2225 /* 2226 * If we're ignoring parity and break indicators, 2227 * ignore overruns too (for real raw support). 2228 */ 2229 if (termios->c_iflag & IGNPAR) 2230 port->ignore_status_mask |= ATMEL_US_OVRE; 2231 } 2232 /* TODO: Ignore all characters if CREAD is set.*/ 2233 2234 /* update the per-port timeout */ 2235 uart_update_timeout(port, termios->c_cflag, baud); 2236 2237 /* 2238 * save/disable interrupts. The tty layer will ensure that the 2239 * transmitter is empty if requested by the caller, so there's 2240 * no need to wait for it here. 2241 */ 2242 imr = atmel_uart_readl(port, ATMEL_US_IMR); 2243 atmel_uart_writel(port, ATMEL_US_IDR, -1); 2244 2245 /* disable receiver and transmitter */ 2246 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS | ATMEL_US_RXDIS); 2247 atmel_port->tx_stopped = true; 2248 2249 /* mode */ 2250 if (port->rs485.flags & SER_RS485_ENABLED) { 2251 atmel_uart_writel(port, ATMEL_US_TTGR, 2252 port->rs485.delay_rts_after_send); 2253 mode |= ATMEL_US_USMODE_RS485; 2254 } else if (port->iso7816.flags & SER_ISO7816_ENABLED) { 2255 atmel_uart_writel(port, ATMEL_US_TTGR, port->iso7816.tg); 2256 /* select mck clock, and output */ 2257 mode |= ATMEL_US_USCLKS_MCK | ATMEL_US_CLKO; 2258 /* set max iterations */ 2259 mode |= ATMEL_US_MAX_ITER(3); 2260 if ((port->iso7816.flags & SER_ISO7816_T_PARAM) 2261 == SER_ISO7816_T(0)) 2262 mode |= ATMEL_US_USMODE_ISO7816_T0; 2263 else 2264 mode |= ATMEL_US_USMODE_ISO7816_T1; 2265 } else if (termios->c_cflag & CRTSCTS) { 2266 /* RS232 with hardware handshake (RTS/CTS) */ 2267 if (atmel_use_fifo(port) && 2268 !mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) { 2269 /* 2270 * with ATMEL_US_USMODE_HWHS set, the controller will 2271 * be able to drive the RTS pin high/low when the RX 2272 * FIFO is above RXFTHRES/below RXFTHRES2. 2273 * It will also disable the transmitter when the CTS 2274 * pin is high. 2275 * This mode is not activated if CTS pin is a GPIO 2276 * because in this case, the transmitter is always 2277 * disabled (there must be an internal pull-up 2278 * responsible for this behaviour). 2279 * If the RTS pin is a GPIO, the controller won't be 2280 * able to drive it according to the FIFO thresholds, 2281 * but it will be handled by the driver. 2282 */ 2283 mode |= ATMEL_US_USMODE_HWHS; 2284 } else { 2285 /* 2286 * For platforms without FIFO, the flow control is 2287 * handled by the driver. 2288 */ 2289 mode |= ATMEL_US_USMODE_NORMAL; 2290 } 2291 } else { 2292 /* RS232 without hadware handshake */ 2293 mode |= ATMEL_US_USMODE_NORMAL; 2294 } 2295 2296 /* set the mode, clock divisor, parity, stop bits and data size */ 2297 atmel_uart_writel(port, ATMEL_US_MR, mode); 2298 2299 /* 2300 * when switching the mode, set the RTS line state according to the 2301 * new mode, otherwise keep the former state 2302 */ 2303 if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) { 2304 unsigned int rts_state; 2305 2306 if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) { 2307 /* let the hardware control the RTS line */ 2308 rts_state = ATMEL_US_RTSDIS; 2309 } else { 2310 /* force RTS line to low level */ 2311 rts_state = ATMEL_US_RTSEN; 2312 } 2313 2314 atmel_uart_writel(port, ATMEL_US_CR, rts_state); 2315 } 2316 2317 /* 2318 * Set the baud rate: 2319 * Fractional baudrate allows to setup output frequency more 2320 * accurately. This feature is enabled only when using normal mode. 2321 * baudrate = selected clock / (8 * (2 - OVER) * (CD + FP / 8)) 2322 * Currently, OVER is always set to 0 so we get 2323 * baudrate = selected clock / (16 * (CD + FP / 8)) 2324 * then 2325 * 8 CD + FP = selected clock / (2 * baudrate) 2326 */ 2327 if (atmel_port->has_frac_baudrate) { 2328 div = DIV_ROUND_CLOSEST(port->uartclk, baud * 2); 2329 cd = div >> 3; 2330 fp = div & ATMEL_US_FP_MASK; 2331 } else { 2332 cd = uart_get_divisor(port, baud); 2333 } 2334 2335 if (cd > 65535) { /* BRGR is 16-bit, so switch to slower clock */ 2336 cd /= 8; 2337 mode |= ATMEL_US_USCLKS_MCK_DIV8; 2338 } 2339 quot = cd | fp << ATMEL_US_FP_OFFSET; 2340 2341 if (!(port->iso7816.flags & SER_ISO7816_ENABLED)) 2342 atmel_uart_writel(port, ATMEL_US_BRGR, quot); 2343 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX); 2344 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN); 2345 atmel_port->tx_stopped = false; 2346 2347 /* restore interrupts */ 2348 atmel_uart_writel(port, ATMEL_US_IER, imr); 2349 2350 /* CTS flow-control and modem-status interrupts */ 2351 if (UART_ENABLE_MS(port, termios->c_cflag)) 2352 atmel_enable_ms(port); 2353 else 2354 atmel_disable_ms(port); 2355 2356 spin_unlock_irqrestore(&port->lock, flags); 2357 } 2358 2359 static void atmel_set_ldisc(struct uart_port *port, struct ktermios *termios) 2360 { 2361 if (termios->c_line == N_PPS) { 2362 port->flags |= UPF_HARDPPS_CD; 2363 spin_lock_irq(&port->lock); 2364 atmel_enable_ms(port); 2365 spin_unlock_irq(&port->lock); 2366 } else { 2367 port->flags &= ~UPF_HARDPPS_CD; 2368 if (!UART_ENABLE_MS(port, termios->c_cflag)) { 2369 spin_lock_irq(&port->lock); 2370 atmel_disable_ms(port); 2371 spin_unlock_irq(&port->lock); 2372 } 2373 } 2374 } 2375 2376 /* 2377 * Return string describing the specified port 2378 */ 2379 static const char *atmel_type(struct uart_port *port) 2380 { 2381 return (port->type == PORT_ATMEL) ? "ATMEL_SERIAL" : NULL; 2382 } 2383 2384 /* 2385 * Release the memory region(s) being used by 'port'. 2386 */ 2387 static void atmel_release_port(struct uart_port *port) 2388 { 2389 struct platform_device *mpdev = to_platform_device(port->dev->parent); 2390 int size = resource_size(mpdev->resource); 2391 2392 release_mem_region(port->mapbase, size); 2393 2394 if (port->flags & UPF_IOREMAP) { 2395 iounmap(port->membase); 2396 port->membase = NULL; 2397 } 2398 } 2399 2400 /* 2401 * Request the memory region(s) being used by 'port'. 2402 */ 2403 static int atmel_request_port(struct uart_port *port) 2404 { 2405 struct platform_device *mpdev = to_platform_device(port->dev->parent); 2406 int size = resource_size(mpdev->resource); 2407 2408 if (!request_mem_region(port->mapbase, size, "atmel_serial")) 2409 return -EBUSY; 2410 2411 if (port->flags & UPF_IOREMAP) { 2412 port->membase = ioremap(port->mapbase, size); 2413 if (port->membase == NULL) { 2414 release_mem_region(port->mapbase, size); 2415 return -ENOMEM; 2416 } 2417 } 2418 2419 return 0; 2420 } 2421 2422 /* 2423 * Configure/autoconfigure the port. 2424 */ 2425 static void atmel_config_port(struct uart_port *port, int flags) 2426 { 2427 if (flags & UART_CONFIG_TYPE) { 2428 port->type = PORT_ATMEL; 2429 atmel_request_port(port); 2430 } 2431 } 2432 2433 /* 2434 * Verify the new serial_struct (for TIOCSSERIAL). 2435 */ 2436 static int atmel_verify_port(struct uart_port *port, struct serial_struct *ser) 2437 { 2438 int ret = 0; 2439 if (ser->type != PORT_UNKNOWN && ser->type != PORT_ATMEL) 2440 ret = -EINVAL; 2441 if (port->irq != ser->irq) 2442 ret = -EINVAL; 2443 if (ser->io_type != SERIAL_IO_MEM) 2444 ret = -EINVAL; 2445 if (port->uartclk / 16 != ser->baud_base) 2446 ret = -EINVAL; 2447 if (port->mapbase != (unsigned long)ser->iomem_base) 2448 ret = -EINVAL; 2449 if (port->iobase != ser->port) 2450 ret = -EINVAL; 2451 if (ser->hub6 != 0) 2452 ret = -EINVAL; 2453 return ret; 2454 } 2455 2456 #ifdef CONFIG_CONSOLE_POLL 2457 static int atmel_poll_get_char(struct uart_port *port) 2458 { 2459 while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_RXRDY)) 2460 cpu_relax(); 2461 2462 return atmel_uart_read_char(port); 2463 } 2464 2465 static void atmel_poll_put_char(struct uart_port *port, unsigned char ch) 2466 { 2467 while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY)) 2468 cpu_relax(); 2469 2470 atmel_uart_write_char(port, ch); 2471 } 2472 #endif 2473 2474 static const struct uart_ops atmel_pops = { 2475 .tx_empty = atmel_tx_empty, 2476 .set_mctrl = atmel_set_mctrl, 2477 .get_mctrl = atmel_get_mctrl, 2478 .stop_tx = atmel_stop_tx, 2479 .start_tx = atmel_start_tx, 2480 .stop_rx = atmel_stop_rx, 2481 .enable_ms = atmel_enable_ms, 2482 .break_ctl = atmel_break_ctl, 2483 .startup = atmel_startup, 2484 .shutdown = atmel_shutdown, 2485 .flush_buffer = atmel_flush_buffer, 2486 .set_termios = atmel_set_termios, 2487 .set_ldisc = atmel_set_ldisc, 2488 .type = atmel_type, 2489 .release_port = atmel_release_port, 2490 .request_port = atmel_request_port, 2491 .config_port = atmel_config_port, 2492 .verify_port = atmel_verify_port, 2493 .pm = atmel_serial_pm, 2494 #ifdef CONFIG_CONSOLE_POLL 2495 .poll_get_char = atmel_poll_get_char, 2496 .poll_put_char = atmel_poll_put_char, 2497 #endif 2498 }; 2499 2500 /* 2501 * Configure the port from the platform device resource info. 2502 */ 2503 static int atmel_init_port(struct atmel_uart_port *atmel_port, 2504 struct platform_device *pdev) 2505 { 2506 int ret; 2507 struct uart_port *port = &atmel_port->uart; 2508 struct platform_device *mpdev = to_platform_device(pdev->dev.parent); 2509 2510 atmel_init_property(atmel_port, pdev); 2511 atmel_set_ops(port); 2512 2513 uart_get_rs485_mode(&mpdev->dev, &port->rs485); 2514 2515 port->iotype = UPIO_MEM; 2516 port->flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP; 2517 port->ops = &atmel_pops; 2518 port->fifosize = 1; 2519 port->dev = &pdev->dev; 2520 port->mapbase = mpdev->resource[0].start; 2521 port->irq = mpdev->resource[1].start; 2522 port->rs485_config = atmel_config_rs485; 2523 port->iso7816_config = atmel_config_iso7816; 2524 port->membase = NULL; 2525 2526 memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring)); 2527 2528 /* for console, the clock could already be configured */ 2529 if (!atmel_port->clk) { 2530 atmel_port->clk = clk_get(&mpdev->dev, "usart"); 2531 if (IS_ERR(atmel_port->clk)) { 2532 ret = PTR_ERR(atmel_port->clk); 2533 atmel_port->clk = NULL; 2534 return ret; 2535 } 2536 ret = clk_prepare_enable(atmel_port->clk); 2537 if (ret) { 2538 clk_put(atmel_port->clk); 2539 atmel_port->clk = NULL; 2540 return ret; 2541 } 2542 port->uartclk = clk_get_rate(atmel_port->clk); 2543 clk_disable_unprepare(atmel_port->clk); 2544 /* only enable clock when USART is in use */ 2545 } 2546 2547 /* 2548 * Use TXEMPTY for interrupt when rs485 or ISO7816 else TXRDY or 2549 * ENDTX|TXBUFE 2550 */ 2551 if (port->rs485.flags & SER_RS485_ENABLED || 2552 port->iso7816.flags & SER_ISO7816_ENABLED) 2553 atmel_port->tx_done_mask = ATMEL_US_TXEMPTY; 2554 else if (atmel_use_pdc_tx(port)) { 2555 port->fifosize = PDC_BUFFER_SIZE; 2556 atmel_port->tx_done_mask = ATMEL_US_ENDTX | ATMEL_US_TXBUFE; 2557 } else { 2558 atmel_port->tx_done_mask = ATMEL_US_TXRDY; 2559 } 2560 2561 return 0; 2562 } 2563 2564 #ifdef CONFIG_SERIAL_ATMEL_CONSOLE 2565 static void atmel_console_putchar(struct uart_port *port, int ch) 2566 { 2567 while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY)) 2568 cpu_relax(); 2569 atmel_uart_write_char(port, ch); 2570 } 2571 2572 /* 2573 * Interrupts are disabled on entering 2574 */ 2575 static void atmel_console_write(struct console *co, const char *s, u_int count) 2576 { 2577 struct uart_port *port = &atmel_ports[co->index].uart; 2578 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2579 unsigned int status, imr; 2580 unsigned int pdc_tx; 2581 2582 /* 2583 * First, save IMR and then disable interrupts 2584 */ 2585 imr = atmel_uart_readl(port, ATMEL_US_IMR); 2586 atmel_uart_writel(port, ATMEL_US_IDR, 2587 ATMEL_US_RXRDY | atmel_port->tx_done_mask); 2588 2589 /* Store PDC transmit status and disable it */ 2590 pdc_tx = atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN; 2591 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS); 2592 2593 /* Make sure that tx path is actually able to send characters */ 2594 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN); 2595 atmel_port->tx_stopped = false; 2596 2597 uart_console_write(port, s, count, atmel_console_putchar); 2598 2599 /* 2600 * Finally, wait for transmitter to become empty 2601 * and restore IMR 2602 */ 2603 do { 2604 status = atmel_uart_readl(port, ATMEL_US_CSR); 2605 } while (!(status & ATMEL_US_TXRDY)); 2606 2607 /* Restore PDC transmit status */ 2608 if (pdc_tx) 2609 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); 2610 2611 /* set interrupts back the way they were */ 2612 atmel_uart_writel(port, ATMEL_US_IER, imr); 2613 } 2614 2615 /* 2616 * If the port was already initialised (eg, by a boot loader), 2617 * try to determine the current setup. 2618 */ 2619 static void __init atmel_console_get_options(struct uart_port *port, int *baud, 2620 int *parity, int *bits) 2621 { 2622 unsigned int mr, quot; 2623 2624 /* 2625 * If the baud rate generator isn't running, the port wasn't 2626 * initialized by the boot loader. 2627 */ 2628 quot = atmel_uart_readl(port, ATMEL_US_BRGR) & ATMEL_US_CD; 2629 if (!quot) 2630 return; 2631 2632 mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_CHRL; 2633 if (mr == ATMEL_US_CHRL_8) 2634 *bits = 8; 2635 else 2636 *bits = 7; 2637 2638 mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_PAR; 2639 if (mr == ATMEL_US_PAR_EVEN) 2640 *parity = 'e'; 2641 else if (mr == ATMEL_US_PAR_ODD) 2642 *parity = 'o'; 2643 2644 /* 2645 * The serial core only rounds down when matching this to a 2646 * supported baud rate. Make sure we don't end up slightly 2647 * lower than one of those, as it would make us fall through 2648 * to a much lower baud rate than we really want. 2649 */ 2650 *baud = port->uartclk / (16 * (quot - 1)); 2651 } 2652 2653 static int __init atmel_console_setup(struct console *co, char *options) 2654 { 2655 int ret; 2656 struct uart_port *port = &atmel_ports[co->index].uart; 2657 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2658 int baud = 115200; 2659 int bits = 8; 2660 int parity = 'n'; 2661 int flow = 'n'; 2662 2663 if (port->membase == NULL) { 2664 /* Port not initialized yet - delay setup */ 2665 return -ENODEV; 2666 } 2667 2668 ret = clk_prepare_enable(atmel_ports[co->index].clk); 2669 if (ret) 2670 return ret; 2671 2672 atmel_uart_writel(port, ATMEL_US_IDR, -1); 2673 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX); 2674 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN); 2675 atmel_port->tx_stopped = false; 2676 2677 if (options) 2678 uart_parse_options(options, &baud, &parity, &bits, &flow); 2679 else 2680 atmel_console_get_options(port, &baud, &parity, &bits); 2681 2682 return uart_set_options(port, co, baud, parity, bits, flow); 2683 } 2684 2685 static struct uart_driver atmel_uart; 2686 2687 static struct console atmel_console = { 2688 .name = ATMEL_DEVICENAME, 2689 .write = atmel_console_write, 2690 .device = uart_console_device, 2691 .setup = atmel_console_setup, 2692 .flags = CON_PRINTBUFFER, 2693 .index = -1, 2694 .data = &atmel_uart, 2695 }; 2696 2697 #define ATMEL_CONSOLE_DEVICE (&atmel_console) 2698 2699 static inline bool atmel_is_console_port(struct uart_port *port) 2700 { 2701 return port->cons && port->cons->index == port->line; 2702 } 2703 2704 #else 2705 #define ATMEL_CONSOLE_DEVICE NULL 2706 2707 static inline bool atmel_is_console_port(struct uart_port *port) 2708 { 2709 return false; 2710 } 2711 #endif 2712 2713 static struct uart_driver atmel_uart = { 2714 .owner = THIS_MODULE, 2715 .driver_name = "atmel_serial", 2716 .dev_name = ATMEL_DEVICENAME, 2717 .major = SERIAL_ATMEL_MAJOR, 2718 .minor = MINOR_START, 2719 .nr = ATMEL_MAX_UART, 2720 .cons = ATMEL_CONSOLE_DEVICE, 2721 }; 2722 2723 #ifdef CONFIG_PM 2724 static bool atmel_serial_clk_will_stop(void) 2725 { 2726 #ifdef CONFIG_ARCH_AT91 2727 return at91_suspend_entering_slow_clock(); 2728 #else 2729 return false; 2730 #endif 2731 } 2732 2733 static int atmel_serial_suspend(struct platform_device *pdev, 2734 pm_message_t state) 2735 { 2736 struct uart_port *port = platform_get_drvdata(pdev); 2737 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2738 2739 if (atmel_is_console_port(port) && console_suspend_enabled) { 2740 /* Drain the TX shifter */ 2741 while (!(atmel_uart_readl(port, ATMEL_US_CSR) & 2742 ATMEL_US_TXEMPTY)) 2743 cpu_relax(); 2744 } 2745 2746 if (atmel_is_console_port(port) && !console_suspend_enabled) { 2747 /* Cache register values as we won't get a full shutdown/startup 2748 * cycle 2749 */ 2750 atmel_port->cache.mr = atmel_uart_readl(port, ATMEL_US_MR); 2751 atmel_port->cache.imr = atmel_uart_readl(port, ATMEL_US_IMR); 2752 atmel_port->cache.brgr = atmel_uart_readl(port, ATMEL_US_BRGR); 2753 atmel_port->cache.rtor = atmel_uart_readl(port, 2754 atmel_port->rtor); 2755 atmel_port->cache.ttgr = atmel_uart_readl(port, ATMEL_US_TTGR); 2756 atmel_port->cache.fmr = atmel_uart_readl(port, ATMEL_US_FMR); 2757 atmel_port->cache.fimr = atmel_uart_readl(port, ATMEL_US_FIMR); 2758 } 2759 2760 /* we can not wake up if we're running on slow clock */ 2761 atmel_port->may_wakeup = device_may_wakeup(&pdev->dev); 2762 if (atmel_serial_clk_will_stop()) { 2763 unsigned long flags; 2764 2765 spin_lock_irqsave(&atmel_port->lock_suspended, flags); 2766 atmel_port->suspended = true; 2767 spin_unlock_irqrestore(&atmel_port->lock_suspended, flags); 2768 device_set_wakeup_enable(&pdev->dev, 0); 2769 } 2770 2771 uart_suspend_port(&atmel_uart, port); 2772 2773 return 0; 2774 } 2775 2776 static int atmel_serial_resume(struct platform_device *pdev) 2777 { 2778 struct uart_port *port = platform_get_drvdata(pdev); 2779 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 2780 unsigned long flags; 2781 2782 if (atmel_is_console_port(port) && !console_suspend_enabled) { 2783 atmel_uart_writel(port, ATMEL_US_MR, atmel_port->cache.mr); 2784 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->cache.imr); 2785 atmel_uart_writel(port, ATMEL_US_BRGR, atmel_port->cache.brgr); 2786 atmel_uart_writel(port, atmel_port->rtor, 2787 atmel_port->cache.rtor); 2788 atmel_uart_writel(port, ATMEL_US_TTGR, atmel_port->cache.ttgr); 2789 2790 if (atmel_port->fifo_size) { 2791 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_FIFOEN | 2792 ATMEL_US_RXFCLR | ATMEL_US_TXFLCLR); 2793 atmel_uart_writel(port, ATMEL_US_FMR, 2794 atmel_port->cache.fmr); 2795 atmel_uart_writel(port, ATMEL_US_FIER, 2796 atmel_port->cache.fimr); 2797 } 2798 atmel_start_rx(port); 2799 } 2800 2801 spin_lock_irqsave(&atmel_port->lock_suspended, flags); 2802 if (atmel_port->pending) { 2803 atmel_handle_receive(port, atmel_port->pending); 2804 atmel_handle_status(port, atmel_port->pending, 2805 atmel_port->pending_status); 2806 atmel_handle_transmit(port, atmel_port->pending); 2807 atmel_port->pending = 0; 2808 } 2809 atmel_port->suspended = false; 2810 spin_unlock_irqrestore(&atmel_port->lock_suspended, flags); 2811 2812 uart_resume_port(&atmel_uart, port); 2813 device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup); 2814 2815 return 0; 2816 } 2817 #else 2818 #define atmel_serial_suspend NULL 2819 #define atmel_serial_resume NULL 2820 #endif 2821 2822 static void atmel_serial_probe_fifos(struct atmel_uart_port *atmel_port, 2823 struct platform_device *pdev) 2824 { 2825 atmel_port->fifo_size = 0; 2826 atmel_port->rts_low = 0; 2827 atmel_port->rts_high = 0; 2828 2829 if (of_property_read_u32(pdev->dev.of_node, 2830 "atmel,fifo-size", 2831 &atmel_port->fifo_size)) 2832 return; 2833 2834 if (!atmel_port->fifo_size) 2835 return; 2836 2837 if (atmel_port->fifo_size < ATMEL_MIN_FIFO_SIZE) { 2838 atmel_port->fifo_size = 0; 2839 dev_err(&pdev->dev, "Invalid FIFO size\n"); 2840 return; 2841 } 2842 2843 /* 2844 * 0 <= rts_low <= rts_high <= fifo_size 2845 * Once their CTS line asserted by the remote peer, some x86 UARTs tend 2846 * to flush their internal TX FIFO, commonly up to 16 data, before 2847 * actually stopping to send new data. So we try to set the RTS High 2848 * Threshold to a reasonably high value respecting this 16 data 2849 * empirical rule when possible. 2850 */ 2851 atmel_port->rts_high = max_t(int, atmel_port->fifo_size >> 1, 2852 atmel_port->fifo_size - ATMEL_RTS_HIGH_OFFSET); 2853 atmel_port->rts_low = max_t(int, atmel_port->fifo_size >> 2, 2854 atmel_port->fifo_size - ATMEL_RTS_LOW_OFFSET); 2855 2856 dev_info(&pdev->dev, "Using FIFO (%u data)\n", 2857 atmel_port->fifo_size); 2858 dev_dbg(&pdev->dev, "RTS High Threshold : %2u data\n", 2859 atmel_port->rts_high); 2860 dev_dbg(&pdev->dev, "RTS Low Threshold : %2u data\n", 2861 atmel_port->rts_low); 2862 } 2863 2864 static int atmel_serial_probe(struct platform_device *pdev) 2865 { 2866 struct atmel_uart_port *atmel_port; 2867 struct device_node *np = pdev->dev.parent->of_node; 2868 void *data; 2869 int ret = -ENODEV; 2870 bool rs485_enabled; 2871 2872 BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1)); 2873 2874 /* 2875 * In device tree there is no node with "atmel,at91rm9200-usart-serial" 2876 * as compatible string. This driver is probed by at91-usart mfd driver 2877 * which is just a wrapper over the atmel_serial driver and 2878 * spi-at91-usart driver. All attributes needed by this driver are 2879 * found in of_node of parent. 2880 */ 2881 pdev->dev.of_node = np; 2882 2883 ret = of_alias_get_id(np, "serial"); 2884 if (ret < 0) 2885 /* port id not found in platform data nor device-tree aliases: 2886 * auto-enumerate it */ 2887 ret = find_first_zero_bit(atmel_ports_in_use, ATMEL_MAX_UART); 2888 2889 if (ret >= ATMEL_MAX_UART) { 2890 ret = -ENODEV; 2891 goto err; 2892 } 2893 2894 if (test_and_set_bit(ret, atmel_ports_in_use)) { 2895 /* port already in use */ 2896 ret = -EBUSY; 2897 goto err; 2898 } 2899 2900 atmel_port = &atmel_ports[ret]; 2901 atmel_port->backup_imr = 0; 2902 atmel_port->uart.line = ret; 2903 atmel_serial_probe_fifos(atmel_port, pdev); 2904 2905 atomic_set(&atmel_port->tasklet_shutdown, 0); 2906 spin_lock_init(&atmel_port->lock_suspended); 2907 2908 ret = atmel_init_port(atmel_port, pdev); 2909 if (ret) 2910 goto err_clear_bit; 2911 2912 atmel_port->gpios = mctrl_gpio_init(&atmel_port->uart, 0); 2913 if (IS_ERR(atmel_port->gpios)) { 2914 ret = PTR_ERR(atmel_port->gpios); 2915 goto err_clear_bit; 2916 } 2917 2918 if (!atmel_use_pdc_rx(&atmel_port->uart)) { 2919 ret = -ENOMEM; 2920 data = kmalloc_array(ATMEL_SERIAL_RINGSIZE, 2921 sizeof(struct atmel_uart_char), 2922 GFP_KERNEL); 2923 if (!data) 2924 goto err_alloc_ring; 2925 atmel_port->rx_ring.buf = data; 2926 } 2927 2928 rs485_enabled = atmel_port->uart.rs485.flags & SER_RS485_ENABLED; 2929 2930 ret = uart_add_one_port(&atmel_uart, &atmel_port->uart); 2931 if (ret) 2932 goto err_add_port; 2933 2934 #ifdef CONFIG_SERIAL_ATMEL_CONSOLE 2935 if (atmel_is_console_port(&atmel_port->uart) 2936 && ATMEL_CONSOLE_DEVICE->flags & CON_ENABLED) { 2937 /* 2938 * The serial core enabled the clock for us, so undo 2939 * the clk_prepare_enable() in atmel_console_setup() 2940 */ 2941 clk_disable_unprepare(atmel_port->clk); 2942 } 2943 #endif 2944 2945 device_init_wakeup(&pdev->dev, 1); 2946 platform_set_drvdata(pdev, atmel_port); 2947 2948 /* 2949 * The peripheral clock has been disabled by atmel_init_port(): 2950 * enable it before accessing I/O registers 2951 */ 2952 clk_prepare_enable(atmel_port->clk); 2953 2954 if (rs485_enabled) { 2955 atmel_uart_writel(&atmel_port->uart, ATMEL_US_MR, 2956 ATMEL_US_USMODE_NORMAL); 2957 atmel_uart_writel(&atmel_port->uart, ATMEL_US_CR, 2958 ATMEL_US_RTSEN); 2959 } 2960 2961 /* 2962 * Get port name of usart or uart 2963 */ 2964 atmel_get_ip_name(&atmel_port->uart); 2965 2966 /* 2967 * The peripheral clock can now safely be disabled till the port 2968 * is used 2969 */ 2970 clk_disable_unprepare(atmel_port->clk); 2971 2972 return 0; 2973 2974 err_add_port: 2975 kfree(atmel_port->rx_ring.buf); 2976 atmel_port->rx_ring.buf = NULL; 2977 err_alloc_ring: 2978 if (!atmel_is_console_port(&atmel_port->uart)) { 2979 clk_put(atmel_port->clk); 2980 atmel_port->clk = NULL; 2981 } 2982 err_clear_bit: 2983 clear_bit(atmel_port->uart.line, atmel_ports_in_use); 2984 err: 2985 return ret; 2986 } 2987 2988 /* 2989 * Even if the driver is not modular, it makes sense to be able to 2990 * unbind a device: there can be many bound devices, and there are 2991 * situations where dynamic binding and unbinding can be useful. 2992 * 2993 * For example, a connected device can require a specific firmware update 2994 * protocol that needs bitbanging on IO lines, but use the regular serial 2995 * port in the normal case. 2996 */ 2997 static int atmel_serial_remove(struct platform_device *pdev) 2998 { 2999 struct uart_port *port = platform_get_drvdata(pdev); 3000 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 3001 int ret = 0; 3002 3003 tasklet_kill(&atmel_port->tasklet_rx); 3004 tasklet_kill(&atmel_port->tasklet_tx); 3005 3006 device_init_wakeup(&pdev->dev, 0); 3007 3008 ret = uart_remove_one_port(&atmel_uart, port); 3009 3010 kfree(atmel_port->rx_ring.buf); 3011 3012 /* "port" is allocated statically, so we shouldn't free it */ 3013 3014 clear_bit(port->line, atmel_ports_in_use); 3015 3016 clk_put(atmel_port->clk); 3017 atmel_port->clk = NULL; 3018 pdev->dev.of_node = NULL; 3019 3020 return ret; 3021 } 3022 3023 static struct platform_driver atmel_serial_driver = { 3024 .probe = atmel_serial_probe, 3025 .remove = atmel_serial_remove, 3026 .suspend = atmel_serial_suspend, 3027 .resume = atmel_serial_resume, 3028 .driver = { 3029 .name = "atmel_usart_serial", 3030 .of_match_table = of_match_ptr(atmel_serial_dt_ids), 3031 }, 3032 }; 3033 3034 static int __init atmel_serial_init(void) 3035 { 3036 int ret; 3037 3038 ret = uart_register_driver(&atmel_uart); 3039 if (ret) 3040 return ret; 3041 3042 ret = platform_driver_register(&atmel_serial_driver); 3043 if (ret) 3044 uart_unregister_driver(&atmel_uart); 3045 3046 return ret; 3047 } 3048 device_initcall(atmel_serial_init); 3049