1 /* 2 * Driver for AMBA serial ports 3 * 4 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. 5 * 6 * Copyright 1999 ARM Limited 7 * Copyright (C) 2000 Deep Blue Solutions Ltd. 8 * Copyright (C) 2010 ST-Ericsson SA 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 23 * 24 * This is a generic driver for ARM AMBA-type serial ports. They 25 * have a lot of 16550-like features, but are not register compatible. 26 * Note that although they do have CTS, DCD and DSR inputs, they do 27 * not have an RI input, nor do they have DTR or RTS outputs. If 28 * required, these have to be supplied via some other means (eg, GPIO) 29 * and hooked into this driver. 30 */ 31 32 33 #if defined(CONFIG_SERIAL_AMBA_PL011_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) 34 #define SUPPORT_SYSRQ 35 #endif 36 37 #include <linux/module.h> 38 #include <linux/ioport.h> 39 #include <linux/init.h> 40 #include <linux/console.h> 41 #include <linux/sysrq.h> 42 #include <linux/device.h> 43 #include <linux/tty.h> 44 #include <linux/tty_flip.h> 45 #include <linux/serial_core.h> 46 #include <linux/serial.h> 47 #include <linux/amba/bus.h> 48 #include <linux/amba/serial.h> 49 #include <linux/clk.h> 50 #include <linux/slab.h> 51 #include <linux/dmaengine.h> 52 #include <linux/dma-mapping.h> 53 #include <linux/scatterlist.h> 54 #include <linux/delay.h> 55 #include <linux/types.h> 56 #include <linux/of.h> 57 #include <linux/of_device.h> 58 #include <linux/pinctrl/consumer.h> 59 #include <linux/sizes.h> 60 #include <linux/io.h> 61 #include <linux/workqueue.h> 62 63 #define UART_NR 14 64 65 #define SERIAL_AMBA_MAJOR 204 66 #define SERIAL_AMBA_MINOR 64 67 #define SERIAL_AMBA_NR UART_NR 68 69 #define AMBA_ISR_PASS_LIMIT 256 70 71 #define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE) 72 #define UART_DUMMY_DR_RX (1 << 16) 73 74 /* There is by now at least one vendor with differing details, so handle it */ 75 struct vendor_data { 76 unsigned int ifls; 77 unsigned int lcrh_tx; 78 unsigned int lcrh_rx; 79 bool oversampling; 80 bool dma_threshold; 81 bool cts_event_workaround; 82 83 unsigned int (*get_fifosize)(struct amba_device *dev); 84 }; 85 86 static unsigned int get_fifosize_arm(struct amba_device *dev) 87 { 88 return amba_rev(dev) < 3 ? 16 : 32; 89 } 90 91 static struct vendor_data vendor_arm = { 92 .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8, 93 .lcrh_tx = UART011_LCRH, 94 .lcrh_rx = UART011_LCRH, 95 .oversampling = false, 96 .dma_threshold = false, 97 .cts_event_workaround = false, 98 .get_fifosize = get_fifosize_arm, 99 }; 100 101 static unsigned int get_fifosize_st(struct amba_device *dev) 102 { 103 return 64; 104 } 105 106 static struct vendor_data vendor_st = { 107 .ifls = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF, 108 .lcrh_tx = ST_UART011_LCRH_TX, 109 .lcrh_rx = ST_UART011_LCRH_RX, 110 .oversampling = true, 111 .dma_threshold = true, 112 .cts_event_workaround = true, 113 .get_fifosize = get_fifosize_st, 114 }; 115 116 /* Deals with DMA transactions */ 117 118 struct pl011_sgbuf { 119 struct scatterlist sg; 120 char *buf; 121 }; 122 123 struct pl011_dmarx_data { 124 struct dma_chan *chan; 125 struct completion complete; 126 bool use_buf_b; 127 struct pl011_sgbuf sgbuf_a; 128 struct pl011_sgbuf sgbuf_b; 129 dma_cookie_t cookie; 130 bool running; 131 struct timer_list timer; 132 unsigned int last_residue; 133 unsigned long last_jiffies; 134 bool auto_poll_rate; 135 unsigned int poll_rate; 136 unsigned int poll_timeout; 137 }; 138 139 struct pl011_dmatx_data { 140 struct dma_chan *chan; 141 struct scatterlist sg; 142 char *buf; 143 bool queued; 144 }; 145 146 /* 147 * We wrap our port structure around the generic uart_port. 148 */ 149 struct uart_amba_port { 150 struct uart_port port; 151 struct clk *clk; 152 const struct vendor_data *vendor; 153 unsigned int dmacr; /* dma control reg */ 154 unsigned int im; /* interrupt mask */ 155 unsigned int old_status; 156 unsigned int fifosize; /* vendor-specific */ 157 unsigned int lcrh_tx; /* vendor-specific */ 158 unsigned int lcrh_rx; /* vendor-specific */ 159 unsigned int old_cr; /* state during shutdown */ 160 struct delayed_work tx_softirq_work; 161 bool autorts; 162 unsigned int tx_irq_seen; /* 0=none, 1=1, 2=2 or more */ 163 char type[12]; 164 #ifdef CONFIG_DMA_ENGINE 165 /* DMA stuff */ 166 bool using_tx_dma; 167 bool using_rx_dma; 168 struct pl011_dmarx_data dmarx; 169 struct pl011_dmatx_data dmatx; 170 bool dma_probed; 171 #endif 172 }; 173 174 /* 175 * Reads up to 256 characters from the FIFO or until it's empty and 176 * inserts them into the TTY layer. Returns the number of characters 177 * read from the FIFO. 178 */ 179 static int pl011_fifo_to_tty(struct uart_amba_port *uap) 180 { 181 u16 status, ch; 182 unsigned int flag, max_count = 256; 183 int fifotaken = 0; 184 185 while (max_count--) { 186 status = readw(uap->port.membase + UART01x_FR); 187 if (status & UART01x_FR_RXFE) 188 break; 189 190 /* Take chars from the FIFO and update status */ 191 ch = readw(uap->port.membase + UART01x_DR) | 192 UART_DUMMY_DR_RX; 193 flag = TTY_NORMAL; 194 uap->port.icount.rx++; 195 fifotaken++; 196 197 if (unlikely(ch & UART_DR_ERROR)) { 198 if (ch & UART011_DR_BE) { 199 ch &= ~(UART011_DR_FE | UART011_DR_PE); 200 uap->port.icount.brk++; 201 if (uart_handle_break(&uap->port)) 202 continue; 203 } else if (ch & UART011_DR_PE) 204 uap->port.icount.parity++; 205 else if (ch & UART011_DR_FE) 206 uap->port.icount.frame++; 207 if (ch & UART011_DR_OE) 208 uap->port.icount.overrun++; 209 210 ch &= uap->port.read_status_mask; 211 212 if (ch & UART011_DR_BE) 213 flag = TTY_BREAK; 214 else if (ch & UART011_DR_PE) 215 flag = TTY_PARITY; 216 else if (ch & UART011_DR_FE) 217 flag = TTY_FRAME; 218 } 219 220 if (uart_handle_sysrq_char(&uap->port, ch & 255)) 221 continue; 222 223 uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag); 224 } 225 226 return fifotaken; 227 } 228 229 230 /* 231 * All the DMA operation mode stuff goes inside this ifdef. 232 * This assumes that you have a generic DMA device interface, 233 * no custom DMA interfaces are supported. 234 */ 235 #ifdef CONFIG_DMA_ENGINE 236 237 #define PL011_DMA_BUFFER_SIZE PAGE_SIZE 238 239 static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg, 240 enum dma_data_direction dir) 241 { 242 dma_addr_t dma_addr; 243 244 sg->buf = dma_alloc_coherent(chan->device->dev, 245 PL011_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL); 246 if (!sg->buf) 247 return -ENOMEM; 248 249 sg_init_table(&sg->sg, 1); 250 sg_set_page(&sg->sg, phys_to_page(dma_addr), 251 PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr)); 252 sg_dma_address(&sg->sg) = dma_addr; 253 sg_dma_len(&sg->sg) = PL011_DMA_BUFFER_SIZE; 254 255 return 0; 256 } 257 258 static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg, 259 enum dma_data_direction dir) 260 { 261 if (sg->buf) { 262 dma_free_coherent(chan->device->dev, 263 PL011_DMA_BUFFER_SIZE, sg->buf, 264 sg_dma_address(&sg->sg)); 265 } 266 } 267 268 static void pl011_dma_probe(struct uart_amba_port *uap) 269 { 270 /* DMA is the sole user of the platform data right now */ 271 struct amba_pl011_data *plat = dev_get_platdata(uap->port.dev); 272 struct device *dev = uap->port.dev; 273 struct dma_slave_config tx_conf = { 274 .dst_addr = uap->port.mapbase + UART01x_DR, 275 .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, 276 .direction = DMA_MEM_TO_DEV, 277 .dst_maxburst = uap->fifosize >> 1, 278 .device_fc = false, 279 }; 280 struct dma_chan *chan; 281 dma_cap_mask_t mask; 282 283 uap->dma_probed = true; 284 chan = dma_request_slave_channel_reason(dev, "tx"); 285 if (IS_ERR(chan)) { 286 if (PTR_ERR(chan) == -EPROBE_DEFER) { 287 uap->dma_probed = false; 288 return; 289 } 290 291 /* We need platform data */ 292 if (!plat || !plat->dma_filter) { 293 dev_info(uap->port.dev, "no DMA platform data\n"); 294 return; 295 } 296 297 /* Try to acquire a generic DMA engine slave TX channel */ 298 dma_cap_zero(mask); 299 dma_cap_set(DMA_SLAVE, mask); 300 301 chan = dma_request_channel(mask, plat->dma_filter, 302 plat->dma_tx_param); 303 if (!chan) { 304 dev_err(uap->port.dev, "no TX DMA channel!\n"); 305 return; 306 } 307 } 308 309 dmaengine_slave_config(chan, &tx_conf); 310 uap->dmatx.chan = chan; 311 312 dev_info(uap->port.dev, "DMA channel TX %s\n", 313 dma_chan_name(uap->dmatx.chan)); 314 315 /* Optionally make use of an RX channel as well */ 316 chan = dma_request_slave_channel(dev, "rx"); 317 318 if (!chan && plat->dma_rx_param) { 319 chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param); 320 321 if (!chan) { 322 dev_err(uap->port.dev, "no RX DMA channel!\n"); 323 return; 324 } 325 } 326 327 if (chan) { 328 struct dma_slave_config rx_conf = { 329 .src_addr = uap->port.mapbase + UART01x_DR, 330 .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, 331 .direction = DMA_DEV_TO_MEM, 332 .src_maxburst = uap->fifosize >> 2, 333 .device_fc = false, 334 }; 335 struct dma_slave_caps caps; 336 337 /* 338 * Some DMA controllers provide information on their capabilities. 339 * If the controller does, check for suitable residue processing 340 * otherwise assime all is well. 341 */ 342 if (0 == dma_get_slave_caps(chan, &caps)) { 343 if (caps.residue_granularity == 344 DMA_RESIDUE_GRANULARITY_DESCRIPTOR) { 345 dma_release_channel(chan); 346 dev_info(uap->port.dev, 347 "RX DMA disabled - no residue processing\n"); 348 return; 349 } 350 } 351 dmaengine_slave_config(chan, &rx_conf); 352 uap->dmarx.chan = chan; 353 354 uap->dmarx.auto_poll_rate = false; 355 if (plat && plat->dma_rx_poll_enable) { 356 /* Set poll rate if specified. */ 357 if (plat->dma_rx_poll_rate) { 358 uap->dmarx.auto_poll_rate = false; 359 uap->dmarx.poll_rate = plat->dma_rx_poll_rate; 360 } else { 361 /* 362 * 100 ms defaults to poll rate if not 363 * specified. This will be adjusted with 364 * the baud rate at set_termios. 365 */ 366 uap->dmarx.auto_poll_rate = true; 367 uap->dmarx.poll_rate = 100; 368 } 369 /* 3 secs defaults poll_timeout if not specified. */ 370 if (plat->dma_rx_poll_timeout) 371 uap->dmarx.poll_timeout = 372 plat->dma_rx_poll_timeout; 373 else 374 uap->dmarx.poll_timeout = 3000; 375 } else if (!plat && dev->of_node) { 376 uap->dmarx.auto_poll_rate = of_property_read_bool( 377 dev->of_node, "auto-poll"); 378 if (uap->dmarx.auto_poll_rate) { 379 u32 x; 380 381 if (0 == of_property_read_u32(dev->of_node, 382 "poll-rate-ms", &x)) 383 uap->dmarx.poll_rate = x; 384 else 385 uap->dmarx.poll_rate = 100; 386 if (0 == of_property_read_u32(dev->of_node, 387 "poll-timeout-ms", &x)) 388 uap->dmarx.poll_timeout = x; 389 else 390 uap->dmarx.poll_timeout = 3000; 391 } 392 } 393 dev_info(uap->port.dev, "DMA channel RX %s\n", 394 dma_chan_name(uap->dmarx.chan)); 395 } 396 } 397 398 static void pl011_dma_remove(struct uart_amba_port *uap) 399 { 400 if (uap->dmatx.chan) 401 dma_release_channel(uap->dmatx.chan); 402 if (uap->dmarx.chan) 403 dma_release_channel(uap->dmarx.chan); 404 } 405 406 /* Forward declare these for the refill routine */ 407 static int pl011_dma_tx_refill(struct uart_amba_port *uap); 408 static void pl011_start_tx_pio(struct uart_amba_port *uap); 409 410 /* 411 * The current DMA TX buffer has been sent. 412 * Try to queue up another DMA buffer. 413 */ 414 static void pl011_dma_tx_callback(void *data) 415 { 416 struct uart_amba_port *uap = data; 417 struct pl011_dmatx_data *dmatx = &uap->dmatx; 418 unsigned long flags; 419 u16 dmacr; 420 421 spin_lock_irqsave(&uap->port.lock, flags); 422 if (uap->dmatx.queued) 423 dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1, 424 DMA_TO_DEVICE); 425 426 dmacr = uap->dmacr; 427 uap->dmacr = dmacr & ~UART011_TXDMAE; 428 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 429 430 /* 431 * If TX DMA was disabled, it means that we've stopped the DMA for 432 * some reason (eg, XOFF received, or we want to send an X-char.) 433 * 434 * Note: we need to be careful here of a potential race between DMA 435 * and the rest of the driver - if the driver disables TX DMA while 436 * a TX buffer completing, we must update the tx queued status to 437 * get further refills (hence we check dmacr). 438 */ 439 if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) || 440 uart_circ_empty(&uap->port.state->xmit)) { 441 uap->dmatx.queued = false; 442 spin_unlock_irqrestore(&uap->port.lock, flags); 443 return; 444 } 445 446 if (pl011_dma_tx_refill(uap) <= 0) 447 /* 448 * We didn't queue a DMA buffer for some reason, but we 449 * have data pending to be sent. Re-enable the TX IRQ. 450 */ 451 pl011_start_tx_pio(uap); 452 453 spin_unlock_irqrestore(&uap->port.lock, flags); 454 } 455 456 /* 457 * Try to refill the TX DMA buffer. 458 * Locking: called with port lock held and IRQs disabled. 459 * Returns: 460 * 1 if we queued up a TX DMA buffer. 461 * 0 if we didn't want to handle this by DMA 462 * <0 on error 463 */ 464 static int pl011_dma_tx_refill(struct uart_amba_port *uap) 465 { 466 struct pl011_dmatx_data *dmatx = &uap->dmatx; 467 struct dma_chan *chan = dmatx->chan; 468 struct dma_device *dma_dev = chan->device; 469 struct dma_async_tx_descriptor *desc; 470 struct circ_buf *xmit = &uap->port.state->xmit; 471 unsigned int count; 472 473 /* 474 * Try to avoid the overhead involved in using DMA if the 475 * transaction fits in the first half of the FIFO, by using 476 * the standard interrupt handling. This ensures that we 477 * issue a uart_write_wakeup() at the appropriate time. 478 */ 479 count = uart_circ_chars_pending(xmit); 480 if (count < (uap->fifosize >> 1)) { 481 uap->dmatx.queued = false; 482 return 0; 483 } 484 485 /* 486 * Bodge: don't send the last character by DMA, as this 487 * will prevent XON from notifying us to restart DMA. 488 */ 489 count -= 1; 490 491 /* Else proceed to copy the TX chars to the DMA buffer and fire DMA */ 492 if (count > PL011_DMA_BUFFER_SIZE) 493 count = PL011_DMA_BUFFER_SIZE; 494 495 if (xmit->tail < xmit->head) 496 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count); 497 else { 498 size_t first = UART_XMIT_SIZE - xmit->tail; 499 size_t second; 500 501 if (first > count) 502 first = count; 503 second = count - first; 504 505 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first); 506 if (second) 507 memcpy(&dmatx->buf[first], &xmit->buf[0], second); 508 } 509 510 dmatx->sg.length = count; 511 512 if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) { 513 uap->dmatx.queued = false; 514 dev_dbg(uap->port.dev, "unable to map TX DMA\n"); 515 return -EBUSY; 516 } 517 518 desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV, 519 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 520 if (!desc) { 521 dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE); 522 uap->dmatx.queued = false; 523 /* 524 * If DMA cannot be used right now, we complete this 525 * transaction via IRQ and let the TTY layer retry. 526 */ 527 dev_dbg(uap->port.dev, "TX DMA busy\n"); 528 return -EBUSY; 529 } 530 531 /* Some data to go along to the callback */ 532 desc->callback = pl011_dma_tx_callback; 533 desc->callback_param = uap; 534 535 /* All errors should happen at prepare time */ 536 dmaengine_submit(desc); 537 538 /* Fire the DMA transaction */ 539 dma_dev->device_issue_pending(chan); 540 541 uap->dmacr |= UART011_TXDMAE; 542 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 543 uap->dmatx.queued = true; 544 545 /* 546 * Now we know that DMA will fire, so advance the ring buffer 547 * with the stuff we just dispatched. 548 */ 549 xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1); 550 uap->port.icount.tx += count; 551 552 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 553 uart_write_wakeup(&uap->port); 554 555 return 1; 556 } 557 558 /* 559 * We received a transmit interrupt without a pending X-char but with 560 * pending characters. 561 * Locking: called with port lock held and IRQs disabled. 562 * Returns: 563 * false if we want to use PIO to transmit 564 * true if we queued a DMA buffer 565 */ 566 static bool pl011_dma_tx_irq(struct uart_amba_port *uap) 567 { 568 if (!uap->using_tx_dma) 569 return false; 570 571 /* 572 * If we already have a TX buffer queued, but received a 573 * TX interrupt, it will be because we've just sent an X-char. 574 * Ensure the TX DMA is enabled and the TX IRQ is disabled. 575 */ 576 if (uap->dmatx.queued) { 577 uap->dmacr |= UART011_TXDMAE; 578 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 579 uap->im &= ~UART011_TXIM; 580 writew(uap->im, uap->port.membase + UART011_IMSC); 581 return true; 582 } 583 584 /* 585 * We don't have a TX buffer queued, so try to queue one. 586 * If we successfully queued a buffer, mask the TX IRQ. 587 */ 588 if (pl011_dma_tx_refill(uap) > 0) { 589 uap->im &= ~UART011_TXIM; 590 writew(uap->im, uap->port.membase + UART011_IMSC); 591 return true; 592 } 593 return false; 594 } 595 596 /* 597 * Stop the DMA transmit (eg, due to received XOFF). 598 * Locking: called with port lock held and IRQs disabled. 599 */ 600 static inline void pl011_dma_tx_stop(struct uart_amba_port *uap) 601 { 602 if (uap->dmatx.queued) { 603 uap->dmacr &= ~UART011_TXDMAE; 604 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 605 } 606 } 607 608 /* 609 * Try to start a DMA transmit, or in the case of an XON/OFF 610 * character queued for send, try to get that character out ASAP. 611 * Locking: called with port lock held and IRQs disabled. 612 * Returns: 613 * false if we want the TX IRQ to be enabled 614 * true if we have a buffer queued 615 */ 616 static inline bool pl011_dma_tx_start(struct uart_amba_port *uap) 617 { 618 u16 dmacr; 619 620 if (!uap->using_tx_dma) 621 return false; 622 623 if (!uap->port.x_char) { 624 /* no X-char, try to push chars out in DMA mode */ 625 bool ret = true; 626 627 if (!uap->dmatx.queued) { 628 if (pl011_dma_tx_refill(uap) > 0) { 629 uap->im &= ~UART011_TXIM; 630 writew(uap->im, uap->port.membase + 631 UART011_IMSC); 632 } else 633 ret = false; 634 } else if (!(uap->dmacr & UART011_TXDMAE)) { 635 uap->dmacr |= UART011_TXDMAE; 636 writew(uap->dmacr, 637 uap->port.membase + UART011_DMACR); 638 } 639 return ret; 640 } 641 642 /* 643 * We have an X-char to send. Disable DMA to prevent it loading 644 * the TX fifo, and then see if we can stuff it into the FIFO. 645 */ 646 dmacr = uap->dmacr; 647 uap->dmacr &= ~UART011_TXDMAE; 648 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 649 650 if (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF) { 651 /* 652 * No space in the FIFO, so enable the transmit interrupt 653 * so we know when there is space. Note that once we've 654 * loaded the character, we should just re-enable DMA. 655 */ 656 return false; 657 } 658 659 writew(uap->port.x_char, uap->port.membase + UART01x_DR); 660 uap->port.icount.tx++; 661 uap->port.x_char = 0; 662 663 /* Success - restore the DMA state */ 664 uap->dmacr = dmacr; 665 writew(dmacr, uap->port.membase + UART011_DMACR); 666 667 return true; 668 } 669 670 /* 671 * Flush the transmit buffer. 672 * Locking: called with port lock held and IRQs disabled. 673 */ 674 static void pl011_dma_flush_buffer(struct uart_port *port) 675 __releases(&uap->port.lock) 676 __acquires(&uap->port.lock) 677 { 678 struct uart_amba_port *uap = 679 container_of(port, struct uart_amba_port, port); 680 681 if (!uap->using_tx_dma) 682 return; 683 684 /* Avoid deadlock with the DMA engine callback */ 685 spin_unlock(&uap->port.lock); 686 dmaengine_terminate_all(uap->dmatx.chan); 687 spin_lock(&uap->port.lock); 688 if (uap->dmatx.queued) { 689 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1, 690 DMA_TO_DEVICE); 691 uap->dmatx.queued = false; 692 uap->dmacr &= ~UART011_TXDMAE; 693 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 694 } 695 } 696 697 static void pl011_dma_rx_callback(void *data); 698 699 static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap) 700 { 701 struct dma_chan *rxchan = uap->dmarx.chan; 702 struct pl011_dmarx_data *dmarx = &uap->dmarx; 703 struct dma_async_tx_descriptor *desc; 704 struct pl011_sgbuf *sgbuf; 705 706 if (!rxchan) 707 return -EIO; 708 709 /* Start the RX DMA job */ 710 sgbuf = uap->dmarx.use_buf_b ? 711 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; 712 desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1, 713 DMA_DEV_TO_MEM, 714 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 715 /* 716 * If the DMA engine is busy and cannot prepare a 717 * channel, no big deal, the driver will fall back 718 * to interrupt mode as a result of this error code. 719 */ 720 if (!desc) { 721 uap->dmarx.running = false; 722 dmaengine_terminate_all(rxchan); 723 return -EBUSY; 724 } 725 726 /* Some data to go along to the callback */ 727 desc->callback = pl011_dma_rx_callback; 728 desc->callback_param = uap; 729 dmarx->cookie = dmaengine_submit(desc); 730 dma_async_issue_pending(rxchan); 731 732 uap->dmacr |= UART011_RXDMAE; 733 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 734 uap->dmarx.running = true; 735 736 uap->im &= ~UART011_RXIM; 737 writew(uap->im, uap->port.membase + UART011_IMSC); 738 739 return 0; 740 } 741 742 /* 743 * This is called when either the DMA job is complete, or 744 * the FIFO timeout interrupt occurred. This must be called 745 * with the port spinlock uap->port.lock held. 746 */ 747 static void pl011_dma_rx_chars(struct uart_amba_port *uap, 748 u32 pending, bool use_buf_b, 749 bool readfifo) 750 { 751 struct tty_port *port = &uap->port.state->port; 752 struct pl011_sgbuf *sgbuf = use_buf_b ? 753 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; 754 int dma_count = 0; 755 u32 fifotaken = 0; /* only used for vdbg() */ 756 757 struct pl011_dmarx_data *dmarx = &uap->dmarx; 758 int dmataken = 0; 759 760 if (uap->dmarx.poll_rate) { 761 /* The data can be taken by polling */ 762 dmataken = sgbuf->sg.length - dmarx->last_residue; 763 /* Recalculate the pending size */ 764 if (pending >= dmataken) 765 pending -= dmataken; 766 } 767 768 /* Pick the remain data from the DMA */ 769 if (pending) { 770 771 /* 772 * First take all chars in the DMA pipe, then look in the FIFO. 773 * Note that tty_insert_flip_buf() tries to take as many chars 774 * as it can. 775 */ 776 dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken, 777 pending); 778 779 uap->port.icount.rx += dma_count; 780 if (dma_count < pending) 781 dev_warn(uap->port.dev, 782 "couldn't insert all characters (TTY is full?)\n"); 783 } 784 785 /* Reset the last_residue for Rx DMA poll */ 786 if (uap->dmarx.poll_rate) 787 dmarx->last_residue = sgbuf->sg.length; 788 789 /* 790 * Only continue with trying to read the FIFO if all DMA chars have 791 * been taken first. 792 */ 793 if (dma_count == pending && readfifo) { 794 /* Clear any error flags */ 795 writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS, 796 uap->port.membase + UART011_ICR); 797 798 /* 799 * If we read all the DMA'd characters, and we had an 800 * incomplete buffer, that could be due to an rx error, or 801 * maybe we just timed out. Read any pending chars and check 802 * the error status. 803 * 804 * Error conditions will only occur in the FIFO, these will 805 * trigger an immediate interrupt and stop the DMA job, so we 806 * will always find the error in the FIFO, never in the DMA 807 * buffer. 808 */ 809 fifotaken = pl011_fifo_to_tty(uap); 810 } 811 812 spin_unlock(&uap->port.lock); 813 dev_vdbg(uap->port.dev, 814 "Took %d chars from DMA buffer and %d chars from the FIFO\n", 815 dma_count, fifotaken); 816 tty_flip_buffer_push(port); 817 spin_lock(&uap->port.lock); 818 } 819 820 static void pl011_dma_rx_irq(struct uart_amba_port *uap) 821 { 822 struct pl011_dmarx_data *dmarx = &uap->dmarx; 823 struct dma_chan *rxchan = dmarx->chan; 824 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ? 825 &dmarx->sgbuf_b : &dmarx->sgbuf_a; 826 size_t pending; 827 struct dma_tx_state state; 828 enum dma_status dmastat; 829 830 /* 831 * Pause the transfer so we can trust the current counter, 832 * do this before we pause the PL011 block, else we may 833 * overflow the FIFO. 834 */ 835 if (dmaengine_pause(rxchan)) 836 dev_err(uap->port.dev, "unable to pause DMA transfer\n"); 837 dmastat = rxchan->device->device_tx_status(rxchan, 838 dmarx->cookie, &state); 839 if (dmastat != DMA_PAUSED) 840 dev_err(uap->port.dev, "unable to pause DMA transfer\n"); 841 842 /* Disable RX DMA - incoming data will wait in the FIFO */ 843 uap->dmacr &= ~UART011_RXDMAE; 844 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 845 uap->dmarx.running = false; 846 847 pending = sgbuf->sg.length - state.residue; 848 BUG_ON(pending > PL011_DMA_BUFFER_SIZE); 849 /* Then we terminate the transfer - we now know our residue */ 850 dmaengine_terminate_all(rxchan); 851 852 /* 853 * This will take the chars we have so far and insert 854 * into the framework. 855 */ 856 pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true); 857 858 /* Switch buffer & re-trigger DMA job */ 859 dmarx->use_buf_b = !dmarx->use_buf_b; 860 if (pl011_dma_rx_trigger_dma(uap)) { 861 dev_dbg(uap->port.dev, "could not retrigger RX DMA job " 862 "fall back to interrupt mode\n"); 863 uap->im |= UART011_RXIM; 864 writew(uap->im, uap->port.membase + UART011_IMSC); 865 } 866 } 867 868 static void pl011_dma_rx_callback(void *data) 869 { 870 struct uart_amba_port *uap = data; 871 struct pl011_dmarx_data *dmarx = &uap->dmarx; 872 struct dma_chan *rxchan = dmarx->chan; 873 bool lastbuf = dmarx->use_buf_b; 874 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ? 875 &dmarx->sgbuf_b : &dmarx->sgbuf_a; 876 size_t pending; 877 struct dma_tx_state state; 878 int ret; 879 880 /* 881 * This completion interrupt occurs typically when the 882 * RX buffer is totally stuffed but no timeout has yet 883 * occurred. When that happens, we just want the RX 884 * routine to flush out the secondary DMA buffer while 885 * we immediately trigger the next DMA job. 886 */ 887 spin_lock_irq(&uap->port.lock); 888 /* 889 * Rx data can be taken by the UART interrupts during 890 * the DMA irq handler. So we check the residue here. 891 */ 892 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state); 893 pending = sgbuf->sg.length - state.residue; 894 BUG_ON(pending > PL011_DMA_BUFFER_SIZE); 895 /* Then we terminate the transfer - we now know our residue */ 896 dmaengine_terminate_all(rxchan); 897 898 uap->dmarx.running = false; 899 dmarx->use_buf_b = !lastbuf; 900 ret = pl011_dma_rx_trigger_dma(uap); 901 902 pl011_dma_rx_chars(uap, pending, lastbuf, false); 903 spin_unlock_irq(&uap->port.lock); 904 /* 905 * Do this check after we picked the DMA chars so we don't 906 * get some IRQ immediately from RX. 907 */ 908 if (ret) { 909 dev_dbg(uap->port.dev, "could not retrigger RX DMA job " 910 "fall back to interrupt mode\n"); 911 uap->im |= UART011_RXIM; 912 writew(uap->im, uap->port.membase + UART011_IMSC); 913 } 914 } 915 916 /* 917 * Stop accepting received characters, when we're shutting down or 918 * suspending this port. 919 * Locking: called with port lock held and IRQs disabled. 920 */ 921 static inline void pl011_dma_rx_stop(struct uart_amba_port *uap) 922 { 923 /* FIXME. Just disable the DMA enable */ 924 uap->dmacr &= ~UART011_RXDMAE; 925 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 926 } 927 928 /* 929 * Timer handler for Rx DMA polling. 930 * Every polling, It checks the residue in the dma buffer and transfer 931 * data to the tty. Also, last_residue is updated for the next polling. 932 */ 933 static void pl011_dma_rx_poll(unsigned long args) 934 { 935 struct uart_amba_port *uap = (struct uart_amba_port *)args; 936 struct tty_port *port = &uap->port.state->port; 937 struct pl011_dmarx_data *dmarx = &uap->dmarx; 938 struct dma_chan *rxchan = uap->dmarx.chan; 939 unsigned long flags = 0; 940 unsigned int dmataken = 0; 941 unsigned int size = 0; 942 struct pl011_sgbuf *sgbuf; 943 int dma_count; 944 struct dma_tx_state state; 945 946 sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; 947 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state); 948 if (likely(state.residue < dmarx->last_residue)) { 949 dmataken = sgbuf->sg.length - dmarx->last_residue; 950 size = dmarx->last_residue - state.residue; 951 dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken, 952 size); 953 if (dma_count == size) 954 dmarx->last_residue = state.residue; 955 dmarx->last_jiffies = jiffies; 956 } 957 tty_flip_buffer_push(port); 958 959 /* 960 * If no data is received in poll_timeout, the driver will fall back 961 * to interrupt mode. We will retrigger DMA at the first interrupt. 962 */ 963 if (jiffies_to_msecs(jiffies - dmarx->last_jiffies) 964 > uap->dmarx.poll_timeout) { 965 966 spin_lock_irqsave(&uap->port.lock, flags); 967 pl011_dma_rx_stop(uap); 968 uap->im |= UART011_RXIM; 969 writew(uap->im, uap->port.membase + UART011_IMSC); 970 spin_unlock_irqrestore(&uap->port.lock, flags); 971 972 uap->dmarx.running = false; 973 dmaengine_terminate_all(rxchan); 974 del_timer(&uap->dmarx.timer); 975 } else { 976 mod_timer(&uap->dmarx.timer, 977 jiffies + msecs_to_jiffies(uap->dmarx.poll_rate)); 978 } 979 } 980 981 static void pl011_dma_startup(struct uart_amba_port *uap) 982 { 983 int ret; 984 985 if (!uap->dma_probed) 986 pl011_dma_probe(uap); 987 988 if (!uap->dmatx.chan) 989 return; 990 991 uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL | __GFP_DMA); 992 if (!uap->dmatx.buf) { 993 dev_err(uap->port.dev, "no memory for DMA TX buffer\n"); 994 uap->port.fifosize = uap->fifosize; 995 return; 996 } 997 998 sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE); 999 1000 /* The DMA buffer is now the FIFO the TTY subsystem can use */ 1001 uap->port.fifosize = PL011_DMA_BUFFER_SIZE; 1002 uap->using_tx_dma = true; 1003 1004 if (!uap->dmarx.chan) 1005 goto skip_rx; 1006 1007 /* Allocate and map DMA RX buffers */ 1008 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a, 1009 DMA_FROM_DEVICE); 1010 if (ret) { 1011 dev_err(uap->port.dev, "failed to init DMA %s: %d\n", 1012 "RX buffer A", ret); 1013 goto skip_rx; 1014 } 1015 1016 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b, 1017 DMA_FROM_DEVICE); 1018 if (ret) { 1019 dev_err(uap->port.dev, "failed to init DMA %s: %d\n", 1020 "RX buffer B", ret); 1021 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, 1022 DMA_FROM_DEVICE); 1023 goto skip_rx; 1024 } 1025 1026 uap->using_rx_dma = true; 1027 1028 skip_rx: 1029 /* Turn on DMA error (RX/TX will be enabled on demand) */ 1030 uap->dmacr |= UART011_DMAONERR; 1031 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 1032 1033 /* 1034 * ST Micro variants has some specific dma burst threshold 1035 * compensation. Set this to 16 bytes, so burst will only 1036 * be issued above/below 16 bytes. 1037 */ 1038 if (uap->vendor->dma_threshold) 1039 writew(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16, 1040 uap->port.membase + ST_UART011_DMAWM); 1041 1042 if (uap->using_rx_dma) { 1043 if (pl011_dma_rx_trigger_dma(uap)) 1044 dev_dbg(uap->port.dev, "could not trigger initial " 1045 "RX DMA job, fall back to interrupt mode\n"); 1046 if (uap->dmarx.poll_rate) { 1047 init_timer(&(uap->dmarx.timer)); 1048 uap->dmarx.timer.function = pl011_dma_rx_poll; 1049 uap->dmarx.timer.data = (unsigned long)uap; 1050 mod_timer(&uap->dmarx.timer, 1051 jiffies + 1052 msecs_to_jiffies(uap->dmarx.poll_rate)); 1053 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE; 1054 uap->dmarx.last_jiffies = jiffies; 1055 } 1056 } 1057 } 1058 1059 static void pl011_dma_shutdown(struct uart_amba_port *uap) 1060 { 1061 if (!(uap->using_tx_dma || uap->using_rx_dma)) 1062 return; 1063 1064 /* Disable RX and TX DMA */ 1065 while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY) 1066 barrier(); 1067 1068 spin_lock_irq(&uap->port.lock); 1069 uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE); 1070 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 1071 spin_unlock_irq(&uap->port.lock); 1072 1073 if (uap->using_tx_dma) { 1074 /* In theory, this should already be done by pl011_dma_flush_buffer */ 1075 dmaengine_terminate_all(uap->dmatx.chan); 1076 if (uap->dmatx.queued) { 1077 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1, 1078 DMA_TO_DEVICE); 1079 uap->dmatx.queued = false; 1080 } 1081 1082 kfree(uap->dmatx.buf); 1083 uap->using_tx_dma = false; 1084 } 1085 1086 if (uap->using_rx_dma) { 1087 dmaengine_terminate_all(uap->dmarx.chan); 1088 /* Clean up the RX DMA */ 1089 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE); 1090 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE); 1091 if (uap->dmarx.poll_rate) 1092 del_timer_sync(&uap->dmarx.timer); 1093 uap->using_rx_dma = false; 1094 } 1095 } 1096 1097 static inline bool pl011_dma_rx_available(struct uart_amba_port *uap) 1098 { 1099 return uap->using_rx_dma; 1100 } 1101 1102 static inline bool pl011_dma_rx_running(struct uart_amba_port *uap) 1103 { 1104 return uap->using_rx_dma && uap->dmarx.running; 1105 } 1106 1107 #else 1108 /* Blank functions if the DMA engine is not available */ 1109 static inline void pl011_dma_probe(struct uart_amba_port *uap) 1110 { 1111 } 1112 1113 static inline void pl011_dma_remove(struct uart_amba_port *uap) 1114 { 1115 } 1116 1117 static inline void pl011_dma_startup(struct uart_amba_port *uap) 1118 { 1119 } 1120 1121 static inline void pl011_dma_shutdown(struct uart_amba_port *uap) 1122 { 1123 } 1124 1125 static inline bool pl011_dma_tx_irq(struct uart_amba_port *uap) 1126 { 1127 return false; 1128 } 1129 1130 static inline void pl011_dma_tx_stop(struct uart_amba_port *uap) 1131 { 1132 } 1133 1134 static inline bool pl011_dma_tx_start(struct uart_amba_port *uap) 1135 { 1136 return false; 1137 } 1138 1139 static inline void pl011_dma_rx_irq(struct uart_amba_port *uap) 1140 { 1141 } 1142 1143 static inline void pl011_dma_rx_stop(struct uart_amba_port *uap) 1144 { 1145 } 1146 1147 static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap) 1148 { 1149 return -EIO; 1150 } 1151 1152 static inline bool pl011_dma_rx_available(struct uart_amba_port *uap) 1153 { 1154 return false; 1155 } 1156 1157 static inline bool pl011_dma_rx_running(struct uart_amba_port *uap) 1158 { 1159 return false; 1160 } 1161 1162 #define pl011_dma_flush_buffer NULL 1163 #endif 1164 1165 static void pl011_stop_tx(struct uart_port *port) 1166 { 1167 struct uart_amba_port *uap = 1168 container_of(port, struct uart_amba_port, port); 1169 1170 uap->im &= ~UART011_TXIM; 1171 writew(uap->im, uap->port.membase + UART011_IMSC); 1172 pl011_dma_tx_stop(uap); 1173 } 1174 1175 static bool pl011_tx_chars(struct uart_amba_port *uap); 1176 1177 /* Start TX with programmed I/O only (no DMA) */ 1178 static void pl011_start_tx_pio(struct uart_amba_port *uap) 1179 { 1180 uap->im |= UART011_TXIM; 1181 writew(uap->im, uap->port.membase + UART011_IMSC); 1182 if (!uap->tx_irq_seen) 1183 pl011_tx_chars(uap); 1184 } 1185 1186 static void pl011_start_tx(struct uart_port *port) 1187 { 1188 struct uart_amba_port *uap = 1189 container_of(port, struct uart_amba_port, port); 1190 1191 if (!pl011_dma_tx_start(uap)) 1192 pl011_start_tx_pio(uap); 1193 } 1194 1195 static void pl011_stop_rx(struct uart_port *port) 1196 { 1197 struct uart_amba_port *uap = 1198 container_of(port, struct uart_amba_port, port); 1199 1200 uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM| 1201 UART011_PEIM|UART011_BEIM|UART011_OEIM); 1202 writew(uap->im, uap->port.membase + UART011_IMSC); 1203 1204 pl011_dma_rx_stop(uap); 1205 } 1206 1207 static void pl011_enable_ms(struct uart_port *port) 1208 { 1209 struct uart_amba_port *uap = 1210 container_of(port, struct uart_amba_port, port); 1211 1212 uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM; 1213 writew(uap->im, uap->port.membase + UART011_IMSC); 1214 } 1215 1216 static void pl011_rx_chars(struct uart_amba_port *uap) 1217 __releases(&uap->port.lock) 1218 __acquires(&uap->port.lock) 1219 { 1220 pl011_fifo_to_tty(uap); 1221 1222 spin_unlock(&uap->port.lock); 1223 tty_flip_buffer_push(&uap->port.state->port); 1224 /* 1225 * If we were temporarily out of DMA mode for a while, 1226 * attempt to switch back to DMA mode again. 1227 */ 1228 if (pl011_dma_rx_available(uap)) { 1229 if (pl011_dma_rx_trigger_dma(uap)) { 1230 dev_dbg(uap->port.dev, "could not trigger RX DMA job " 1231 "fall back to interrupt mode again\n"); 1232 uap->im |= UART011_RXIM; 1233 writew(uap->im, uap->port.membase + UART011_IMSC); 1234 } else { 1235 #ifdef CONFIG_DMA_ENGINE 1236 /* Start Rx DMA poll */ 1237 if (uap->dmarx.poll_rate) { 1238 uap->dmarx.last_jiffies = jiffies; 1239 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE; 1240 mod_timer(&uap->dmarx.timer, 1241 jiffies + 1242 msecs_to_jiffies(uap->dmarx.poll_rate)); 1243 } 1244 #endif 1245 } 1246 } 1247 spin_lock(&uap->port.lock); 1248 } 1249 1250 /* 1251 * Transmit a character 1252 * There must be at least one free entry in the TX FIFO to accept the char. 1253 * 1254 * Returns true if the FIFO might have space in it afterwards; 1255 * returns false if the FIFO definitely became full. 1256 */ 1257 static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c) 1258 { 1259 writew(c, uap->port.membase + UART01x_DR); 1260 uap->port.icount.tx++; 1261 1262 if (likely(uap->tx_irq_seen > 1)) 1263 return true; 1264 1265 return !(readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF); 1266 } 1267 1268 static bool pl011_tx_chars(struct uart_amba_port *uap) 1269 { 1270 struct circ_buf *xmit = &uap->port.state->xmit; 1271 int count; 1272 1273 if (unlikely(uap->tx_irq_seen < 2)) 1274 /* 1275 * Initial FIFO fill level unknown: we must check TXFF 1276 * after each write, so just try to fill up the FIFO. 1277 */ 1278 count = uap->fifosize; 1279 else /* tx_irq_seen >= 2 */ 1280 /* 1281 * FIFO initially at least half-empty, so we can simply 1282 * write half the FIFO without polling TXFF. 1283 1284 * Note: the *first* TX IRQ can still race with 1285 * pl011_start_tx_pio(), which can result in the FIFO 1286 * being fuller than expected in that case. 1287 */ 1288 count = uap->fifosize >> 1; 1289 1290 /* 1291 * If the FIFO is full we're guaranteed a TX IRQ at some later point, 1292 * and can't transmit immediately in any case: 1293 */ 1294 if (unlikely(uap->tx_irq_seen < 2 && 1295 readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)) 1296 return false; 1297 1298 if (uap->port.x_char) { 1299 pl011_tx_char(uap, uap->port.x_char); 1300 uap->port.x_char = 0; 1301 --count; 1302 } 1303 if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) { 1304 pl011_stop_tx(&uap->port); 1305 goto done; 1306 } 1307 1308 /* If we are using DMA mode, try to send some characters. */ 1309 if (pl011_dma_tx_irq(uap)) 1310 goto done; 1311 1312 while (count-- > 0 && pl011_tx_char(uap, xmit->buf[xmit->tail])) { 1313 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 1314 if (uart_circ_empty(xmit)) 1315 break; 1316 } 1317 1318 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 1319 uart_write_wakeup(&uap->port); 1320 1321 if (uart_circ_empty(xmit)) { 1322 pl011_stop_tx(&uap->port); 1323 goto done; 1324 } 1325 1326 if (unlikely(!uap->tx_irq_seen)) 1327 schedule_delayed_work(&uap->tx_softirq_work, uap->port.timeout); 1328 1329 done: 1330 return false; 1331 } 1332 1333 static void pl011_modem_status(struct uart_amba_port *uap) 1334 { 1335 unsigned int status, delta; 1336 1337 status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY; 1338 1339 delta = status ^ uap->old_status; 1340 uap->old_status = status; 1341 1342 if (!delta) 1343 return; 1344 1345 if (delta & UART01x_FR_DCD) 1346 uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD); 1347 1348 if (delta & UART01x_FR_DSR) 1349 uap->port.icount.dsr++; 1350 1351 if (delta & UART01x_FR_CTS) 1352 uart_handle_cts_change(&uap->port, status & UART01x_FR_CTS); 1353 1354 wake_up_interruptible(&uap->port.state->port.delta_msr_wait); 1355 } 1356 1357 static void pl011_tx_softirq(struct work_struct *work) 1358 { 1359 struct delayed_work *dwork = to_delayed_work(work); 1360 struct uart_amba_port *uap = 1361 container_of(dwork, struct uart_amba_port, tx_softirq_work); 1362 1363 spin_lock(&uap->port.lock); 1364 while (pl011_tx_chars(uap)) ; 1365 spin_unlock(&uap->port.lock); 1366 } 1367 1368 static void pl011_tx_irq_seen(struct uart_amba_port *uap) 1369 { 1370 if (likely(uap->tx_irq_seen > 1)) 1371 return; 1372 1373 uap->tx_irq_seen++; 1374 if (uap->tx_irq_seen < 2) 1375 /* first TX IRQ */ 1376 cancel_delayed_work(&uap->tx_softirq_work); 1377 } 1378 1379 static irqreturn_t pl011_int(int irq, void *dev_id) 1380 { 1381 struct uart_amba_port *uap = dev_id; 1382 unsigned long flags; 1383 unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT; 1384 int handled = 0; 1385 unsigned int dummy_read; 1386 1387 spin_lock_irqsave(&uap->port.lock, flags); 1388 status = readw(uap->port.membase + UART011_MIS); 1389 if (status) { 1390 do { 1391 if (uap->vendor->cts_event_workaround) { 1392 /* workaround to make sure that all bits are unlocked.. */ 1393 writew(0x00, uap->port.membase + UART011_ICR); 1394 1395 /* 1396 * WA: introduce 26ns(1 uart clk) delay before W1C; 1397 * single apb access will incur 2 pclk(133.12Mhz) delay, 1398 * so add 2 dummy reads 1399 */ 1400 dummy_read = readw(uap->port.membase + UART011_ICR); 1401 dummy_read = readw(uap->port.membase + UART011_ICR); 1402 } 1403 1404 writew(status & ~(UART011_TXIS|UART011_RTIS| 1405 UART011_RXIS), 1406 uap->port.membase + UART011_ICR); 1407 1408 if (status & (UART011_RTIS|UART011_RXIS)) { 1409 if (pl011_dma_rx_running(uap)) 1410 pl011_dma_rx_irq(uap); 1411 else 1412 pl011_rx_chars(uap); 1413 } 1414 if (status & (UART011_DSRMIS|UART011_DCDMIS| 1415 UART011_CTSMIS|UART011_RIMIS)) 1416 pl011_modem_status(uap); 1417 if (status & UART011_TXIS) { 1418 pl011_tx_irq_seen(uap); 1419 pl011_tx_chars(uap); 1420 } 1421 1422 if (pass_counter-- == 0) 1423 break; 1424 1425 status = readw(uap->port.membase + UART011_MIS); 1426 } while (status != 0); 1427 handled = 1; 1428 } 1429 1430 spin_unlock_irqrestore(&uap->port.lock, flags); 1431 1432 return IRQ_RETVAL(handled); 1433 } 1434 1435 static unsigned int pl011_tx_empty(struct uart_port *port) 1436 { 1437 struct uart_amba_port *uap = 1438 container_of(port, struct uart_amba_port, port); 1439 unsigned int status = readw(uap->port.membase + UART01x_FR); 1440 return status & (UART01x_FR_BUSY|UART01x_FR_TXFF) ? 0 : TIOCSER_TEMT; 1441 } 1442 1443 static unsigned int pl011_get_mctrl(struct uart_port *port) 1444 { 1445 struct uart_amba_port *uap = 1446 container_of(port, struct uart_amba_port, port); 1447 unsigned int result = 0; 1448 unsigned int status = readw(uap->port.membase + UART01x_FR); 1449 1450 #define TIOCMBIT(uartbit, tiocmbit) \ 1451 if (status & uartbit) \ 1452 result |= tiocmbit 1453 1454 TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR); 1455 TIOCMBIT(UART01x_FR_DSR, TIOCM_DSR); 1456 TIOCMBIT(UART01x_FR_CTS, TIOCM_CTS); 1457 TIOCMBIT(UART011_FR_RI, TIOCM_RNG); 1458 #undef TIOCMBIT 1459 return result; 1460 } 1461 1462 static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl) 1463 { 1464 struct uart_amba_port *uap = 1465 container_of(port, struct uart_amba_port, port); 1466 unsigned int cr; 1467 1468 cr = readw(uap->port.membase + UART011_CR); 1469 1470 #define TIOCMBIT(tiocmbit, uartbit) \ 1471 if (mctrl & tiocmbit) \ 1472 cr |= uartbit; \ 1473 else \ 1474 cr &= ~uartbit 1475 1476 TIOCMBIT(TIOCM_RTS, UART011_CR_RTS); 1477 TIOCMBIT(TIOCM_DTR, UART011_CR_DTR); 1478 TIOCMBIT(TIOCM_OUT1, UART011_CR_OUT1); 1479 TIOCMBIT(TIOCM_OUT2, UART011_CR_OUT2); 1480 TIOCMBIT(TIOCM_LOOP, UART011_CR_LBE); 1481 1482 if (uap->autorts) { 1483 /* We need to disable auto-RTS if we want to turn RTS off */ 1484 TIOCMBIT(TIOCM_RTS, UART011_CR_RTSEN); 1485 } 1486 #undef TIOCMBIT 1487 1488 writew(cr, uap->port.membase + UART011_CR); 1489 } 1490 1491 static void pl011_break_ctl(struct uart_port *port, int break_state) 1492 { 1493 struct uart_amba_port *uap = 1494 container_of(port, struct uart_amba_port, port); 1495 unsigned long flags; 1496 unsigned int lcr_h; 1497 1498 spin_lock_irqsave(&uap->port.lock, flags); 1499 lcr_h = readw(uap->port.membase + uap->lcrh_tx); 1500 if (break_state == -1) 1501 lcr_h |= UART01x_LCRH_BRK; 1502 else 1503 lcr_h &= ~UART01x_LCRH_BRK; 1504 writew(lcr_h, uap->port.membase + uap->lcrh_tx); 1505 spin_unlock_irqrestore(&uap->port.lock, flags); 1506 } 1507 1508 #ifdef CONFIG_CONSOLE_POLL 1509 1510 static void pl011_quiesce_irqs(struct uart_port *port) 1511 { 1512 struct uart_amba_port *uap = 1513 container_of(port, struct uart_amba_port, port); 1514 unsigned char __iomem *regs = uap->port.membase; 1515 1516 writew(readw(regs + UART011_MIS), regs + UART011_ICR); 1517 /* 1518 * There is no way to clear TXIM as this is "ready to transmit IRQ", so 1519 * we simply mask it. start_tx() will unmask it. 1520 * 1521 * Note we can race with start_tx(), and if the race happens, the 1522 * polling user might get another interrupt just after we clear it. 1523 * But it should be OK and can happen even w/o the race, e.g. 1524 * controller immediately got some new data and raised the IRQ. 1525 * 1526 * And whoever uses polling routines assumes that it manages the device 1527 * (including tx queue), so we're also fine with start_tx()'s caller 1528 * side. 1529 */ 1530 writew(readw(regs + UART011_IMSC) & ~UART011_TXIM, regs + UART011_IMSC); 1531 } 1532 1533 static int pl011_get_poll_char(struct uart_port *port) 1534 { 1535 struct uart_amba_port *uap = 1536 container_of(port, struct uart_amba_port, port); 1537 unsigned int status; 1538 1539 /* 1540 * The caller might need IRQs lowered, e.g. if used with KDB NMI 1541 * debugger. 1542 */ 1543 pl011_quiesce_irqs(port); 1544 1545 status = readw(uap->port.membase + UART01x_FR); 1546 if (status & UART01x_FR_RXFE) 1547 return NO_POLL_CHAR; 1548 1549 return readw(uap->port.membase + UART01x_DR); 1550 } 1551 1552 static void pl011_put_poll_char(struct uart_port *port, 1553 unsigned char ch) 1554 { 1555 struct uart_amba_port *uap = 1556 container_of(port, struct uart_amba_port, port); 1557 1558 while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF) 1559 barrier(); 1560 1561 writew(ch, uap->port.membase + UART01x_DR); 1562 } 1563 1564 #endif /* CONFIG_CONSOLE_POLL */ 1565 1566 static int pl011_hwinit(struct uart_port *port) 1567 { 1568 struct uart_amba_port *uap = 1569 container_of(port, struct uart_amba_port, port); 1570 int retval; 1571 1572 /* Optionaly enable pins to be muxed in and configured */ 1573 pinctrl_pm_select_default_state(port->dev); 1574 1575 /* 1576 * Try to enable the clock producer. 1577 */ 1578 retval = clk_prepare_enable(uap->clk); 1579 if (retval) 1580 return retval; 1581 1582 uap->port.uartclk = clk_get_rate(uap->clk); 1583 1584 /* Clear pending error and receive interrupts */ 1585 writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS | 1586 UART011_RTIS | UART011_RXIS, uap->port.membase + UART011_ICR); 1587 1588 /* 1589 * Save interrupts enable mask, and enable RX interrupts in case if 1590 * the interrupt is used for NMI entry. 1591 */ 1592 uap->im = readw(uap->port.membase + UART011_IMSC); 1593 writew(UART011_RTIM | UART011_RXIM, uap->port.membase + UART011_IMSC); 1594 1595 if (dev_get_platdata(uap->port.dev)) { 1596 struct amba_pl011_data *plat; 1597 1598 plat = dev_get_platdata(uap->port.dev); 1599 if (plat->init) 1600 plat->init(); 1601 } 1602 return 0; 1603 } 1604 1605 static void pl011_write_lcr_h(struct uart_amba_port *uap, unsigned int lcr_h) 1606 { 1607 writew(lcr_h, uap->port.membase + uap->lcrh_rx); 1608 if (uap->lcrh_rx != uap->lcrh_tx) { 1609 int i; 1610 /* 1611 * Wait 10 PCLKs before writing LCRH_TX register, 1612 * to get this delay write read only register 10 times 1613 */ 1614 for (i = 0; i < 10; ++i) 1615 writew(0xff, uap->port.membase + UART011_MIS); 1616 writew(lcr_h, uap->port.membase + uap->lcrh_tx); 1617 } 1618 } 1619 1620 static int pl011_startup(struct uart_port *port) 1621 { 1622 struct uart_amba_port *uap = 1623 container_of(port, struct uart_amba_port, port); 1624 unsigned int cr; 1625 int retval; 1626 1627 retval = pl011_hwinit(port); 1628 if (retval) 1629 goto clk_dis; 1630 1631 writew(uap->im, uap->port.membase + UART011_IMSC); 1632 1633 /* 1634 * Allocate the IRQ 1635 */ 1636 retval = request_irq(uap->port.irq, pl011_int, 0, "uart-pl011", uap); 1637 if (retval) 1638 goto clk_dis; 1639 1640 writew(uap->vendor->ifls, uap->port.membase + UART011_IFLS); 1641 1642 spin_lock_irq(&uap->port.lock); 1643 1644 /* restore RTS and DTR */ 1645 cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR); 1646 cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE; 1647 writew(cr, uap->port.membase + UART011_CR); 1648 1649 spin_unlock_irq(&uap->port.lock); 1650 1651 /* 1652 * initialise the old status of the modem signals 1653 */ 1654 uap->old_status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY; 1655 1656 /* Startup DMA */ 1657 pl011_dma_startup(uap); 1658 1659 /* 1660 * Finally, enable interrupts, only timeouts when using DMA 1661 * if initial RX DMA job failed, start in interrupt mode 1662 * as well. 1663 */ 1664 spin_lock_irq(&uap->port.lock); 1665 /* Clear out any spuriously appearing RX interrupts */ 1666 writew(UART011_RTIS | UART011_RXIS, 1667 uap->port.membase + UART011_ICR); 1668 uap->im = UART011_RTIM; 1669 if (!pl011_dma_rx_running(uap)) 1670 uap->im |= UART011_RXIM; 1671 writew(uap->im, uap->port.membase + UART011_IMSC); 1672 spin_unlock_irq(&uap->port.lock); 1673 1674 return 0; 1675 1676 clk_dis: 1677 clk_disable_unprepare(uap->clk); 1678 return retval; 1679 } 1680 1681 static void pl011_shutdown_channel(struct uart_amba_port *uap, 1682 unsigned int lcrh) 1683 { 1684 unsigned long val; 1685 1686 val = readw(uap->port.membase + lcrh); 1687 val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN); 1688 writew(val, uap->port.membase + lcrh); 1689 } 1690 1691 static void pl011_shutdown(struct uart_port *port) 1692 { 1693 struct uart_amba_port *uap = 1694 container_of(port, struct uart_amba_port, port); 1695 unsigned int cr; 1696 1697 cancel_delayed_work_sync(&uap->tx_softirq_work); 1698 1699 /* 1700 * disable all interrupts 1701 */ 1702 spin_lock_irq(&uap->port.lock); 1703 uap->im = 0; 1704 writew(uap->im, uap->port.membase + UART011_IMSC); 1705 writew(0xffff & ~UART011_TXIS, uap->port.membase + UART011_ICR); 1706 spin_unlock_irq(&uap->port.lock); 1707 1708 pl011_dma_shutdown(uap); 1709 1710 /* 1711 * Free the interrupt 1712 */ 1713 free_irq(uap->port.irq, uap); 1714 1715 /* 1716 * disable the port 1717 * disable the port. It should not disable RTS and DTR. 1718 * Also RTS and DTR state should be preserved to restore 1719 * it during startup(). 1720 */ 1721 uap->autorts = false; 1722 spin_lock_irq(&uap->port.lock); 1723 cr = readw(uap->port.membase + UART011_CR); 1724 uap->old_cr = cr; 1725 cr &= UART011_CR_RTS | UART011_CR_DTR; 1726 cr |= UART01x_CR_UARTEN | UART011_CR_TXE; 1727 writew(cr, uap->port.membase + UART011_CR); 1728 spin_unlock_irq(&uap->port.lock); 1729 1730 /* 1731 * disable break condition and fifos 1732 */ 1733 pl011_shutdown_channel(uap, uap->lcrh_rx); 1734 if (uap->lcrh_rx != uap->lcrh_tx) 1735 pl011_shutdown_channel(uap, uap->lcrh_tx); 1736 1737 /* 1738 * Shut down the clock producer 1739 */ 1740 clk_disable_unprepare(uap->clk); 1741 /* Optionally let pins go into sleep states */ 1742 pinctrl_pm_select_sleep_state(port->dev); 1743 1744 if (dev_get_platdata(uap->port.dev)) { 1745 struct amba_pl011_data *plat; 1746 1747 plat = dev_get_platdata(uap->port.dev); 1748 if (plat->exit) 1749 plat->exit(); 1750 } 1751 1752 if (uap->port.ops->flush_buffer) 1753 uap->port.ops->flush_buffer(port); 1754 } 1755 1756 static void 1757 pl011_set_termios(struct uart_port *port, struct ktermios *termios, 1758 struct ktermios *old) 1759 { 1760 struct uart_amba_port *uap = 1761 container_of(port, struct uart_amba_port, port); 1762 unsigned int lcr_h, old_cr; 1763 unsigned long flags; 1764 unsigned int baud, quot, clkdiv; 1765 1766 if (uap->vendor->oversampling) 1767 clkdiv = 8; 1768 else 1769 clkdiv = 16; 1770 1771 /* 1772 * Ask the core to calculate the divisor for us. 1773 */ 1774 baud = uart_get_baud_rate(port, termios, old, 0, 1775 port->uartclk / clkdiv); 1776 #ifdef CONFIG_DMA_ENGINE 1777 /* 1778 * Adjust RX DMA polling rate with baud rate if not specified. 1779 */ 1780 if (uap->dmarx.auto_poll_rate) 1781 uap->dmarx.poll_rate = DIV_ROUND_UP(10000000, baud); 1782 #endif 1783 1784 if (baud > port->uartclk/16) 1785 quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud); 1786 else 1787 quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud); 1788 1789 switch (termios->c_cflag & CSIZE) { 1790 case CS5: 1791 lcr_h = UART01x_LCRH_WLEN_5; 1792 break; 1793 case CS6: 1794 lcr_h = UART01x_LCRH_WLEN_6; 1795 break; 1796 case CS7: 1797 lcr_h = UART01x_LCRH_WLEN_7; 1798 break; 1799 default: // CS8 1800 lcr_h = UART01x_LCRH_WLEN_8; 1801 break; 1802 } 1803 if (termios->c_cflag & CSTOPB) 1804 lcr_h |= UART01x_LCRH_STP2; 1805 if (termios->c_cflag & PARENB) { 1806 lcr_h |= UART01x_LCRH_PEN; 1807 if (!(termios->c_cflag & PARODD)) 1808 lcr_h |= UART01x_LCRH_EPS; 1809 } 1810 if (uap->fifosize > 1) 1811 lcr_h |= UART01x_LCRH_FEN; 1812 1813 spin_lock_irqsave(&port->lock, flags); 1814 1815 /* 1816 * Update the per-port timeout. 1817 */ 1818 uart_update_timeout(port, termios->c_cflag, baud); 1819 1820 port->read_status_mask = UART011_DR_OE | 255; 1821 if (termios->c_iflag & INPCK) 1822 port->read_status_mask |= UART011_DR_FE | UART011_DR_PE; 1823 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) 1824 port->read_status_mask |= UART011_DR_BE; 1825 1826 /* 1827 * Characters to ignore 1828 */ 1829 port->ignore_status_mask = 0; 1830 if (termios->c_iflag & IGNPAR) 1831 port->ignore_status_mask |= UART011_DR_FE | UART011_DR_PE; 1832 if (termios->c_iflag & IGNBRK) { 1833 port->ignore_status_mask |= UART011_DR_BE; 1834 /* 1835 * If we're ignoring parity and break indicators, 1836 * ignore overruns too (for real raw support). 1837 */ 1838 if (termios->c_iflag & IGNPAR) 1839 port->ignore_status_mask |= UART011_DR_OE; 1840 } 1841 1842 /* 1843 * Ignore all characters if CREAD is not set. 1844 */ 1845 if ((termios->c_cflag & CREAD) == 0) 1846 port->ignore_status_mask |= UART_DUMMY_DR_RX; 1847 1848 if (UART_ENABLE_MS(port, termios->c_cflag)) 1849 pl011_enable_ms(port); 1850 1851 /* first, disable everything */ 1852 old_cr = readw(port->membase + UART011_CR); 1853 writew(0, port->membase + UART011_CR); 1854 1855 if (termios->c_cflag & CRTSCTS) { 1856 if (old_cr & UART011_CR_RTS) 1857 old_cr |= UART011_CR_RTSEN; 1858 1859 old_cr |= UART011_CR_CTSEN; 1860 uap->autorts = true; 1861 } else { 1862 old_cr &= ~(UART011_CR_CTSEN | UART011_CR_RTSEN); 1863 uap->autorts = false; 1864 } 1865 1866 if (uap->vendor->oversampling) { 1867 if (baud > port->uartclk / 16) 1868 old_cr |= ST_UART011_CR_OVSFACT; 1869 else 1870 old_cr &= ~ST_UART011_CR_OVSFACT; 1871 } 1872 1873 /* 1874 * Workaround for the ST Micro oversampling variants to 1875 * increase the bitrate slightly, by lowering the divisor, 1876 * to avoid delayed sampling of start bit at high speeds, 1877 * else we see data corruption. 1878 */ 1879 if (uap->vendor->oversampling) { 1880 if ((baud >= 3000000) && (baud < 3250000) && (quot > 1)) 1881 quot -= 1; 1882 else if ((baud > 3250000) && (quot > 2)) 1883 quot -= 2; 1884 } 1885 /* Set baud rate */ 1886 writew(quot & 0x3f, port->membase + UART011_FBRD); 1887 writew(quot >> 6, port->membase + UART011_IBRD); 1888 1889 /* 1890 * ----------v----------v----------v----------v----- 1891 * NOTE: lcrh_tx and lcrh_rx MUST BE WRITTEN AFTER 1892 * UART011_FBRD & UART011_IBRD. 1893 * ----------^----------^----------^----------^----- 1894 */ 1895 pl011_write_lcr_h(uap, lcr_h); 1896 writew(old_cr, port->membase + UART011_CR); 1897 1898 spin_unlock_irqrestore(&port->lock, flags); 1899 } 1900 1901 static const char *pl011_type(struct uart_port *port) 1902 { 1903 struct uart_amba_port *uap = 1904 container_of(port, struct uart_amba_port, port); 1905 return uap->port.type == PORT_AMBA ? uap->type : NULL; 1906 } 1907 1908 /* 1909 * Release the memory region(s) being used by 'port' 1910 */ 1911 static void pl011_release_port(struct uart_port *port) 1912 { 1913 release_mem_region(port->mapbase, SZ_4K); 1914 } 1915 1916 /* 1917 * Request the memory region(s) being used by 'port' 1918 */ 1919 static int pl011_request_port(struct uart_port *port) 1920 { 1921 return request_mem_region(port->mapbase, SZ_4K, "uart-pl011") 1922 != NULL ? 0 : -EBUSY; 1923 } 1924 1925 /* 1926 * Configure/autoconfigure the port. 1927 */ 1928 static void pl011_config_port(struct uart_port *port, int flags) 1929 { 1930 if (flags & UART_CONFIG_TYPE) { 1931 port->type = PORT_AMBA; 1932 pl011_request_port(port); 1933 } 1934 } 1935 1936 /* 1937 * verify the new serial_struct (for TIOCSSERIAL). 1938 */ 1939 static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser) 1940 { 1941 int ret = 0; 1942 if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA) 1943 ret = -EINVAL; 1944 if (ser->irq < 0 || ser->irq >= nr_irqs) 1945 ret = -EINVAL; 1946 if (ser->baud_base < 9600) 1947 ret = -EINVAL; 1948 return ret; 1949 } 1950 1951 static struct uart_ops amba_pl011_pops = { 1952 .tx_empty = pl011_tx_empty, 1953 .set_mctrl = pl011_set_mctrl, 1954 .get_mctrl = pl011_get_mctrl, 1955 .stop_tx = pl011_stop_tx, 1956 .start_tx = pl011_start_tx, 1957 .stop_rx = pl011_stop_rx, 1958 .enable_ms = pl011_enable_ms, 1959 .break_ctl = pl011_break_ctl, 1960 .startup = pl011_startup, 1961 .shutdown = pl011_shutdown, 1962 .flush_buffer = pl011_dma_flush_buffer, 1963 .set_termios = pl011_set_termios, 1964 .type = pl011_type, 1965 .release_port = pl011_release_port, 1966 .request_port = pl011_request_port, 1967 .config_port = pl011_config_port, 1968 .verify_port = pl011_verify_port, 1969 #ifdef CONFIG_CONSOLE_POLL 1970 .poll_init = pl011_hwinit, 1971 .poll_get_char = pl011_get_poll_char, 1972 .poll_put_char = pl011_put_poll_char, 1973 #endif 1974 }; 1975 1976 static struct uart_amba_port *amba_ports[UART_NR]; 1977 1978 #ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE 1979 1980 static void pl011_console_putchar(struct uart_port *port, int ch) 1981 { 1982 struct uart_amba_port *uap = 1983 container_of(port, struct uart_amba_port, port); 1984 1985 while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF) 1986 barrier(); 1987 writew(ch, uap->port.membase + UART01x_DR); 1988 } 1989 1990 static void 1991 pl011_console_write(struct console *co, const char *s, unsigned int count) 1992 { 1993 struct uart_amba_port *uap = amba_ports[co->index]; 1994 unsigned int status, old_cr, new_cr; 1995 unsigned long flags; 1996 int locked = 1; 1997 1998 clk_enable(uap->clk); 1999 2000 local_irq_save(flags); 2001 if (uap->port.sysrq) 2002 locked = 0; 2003 else if (oops_in_progress) 2004 locked = spin_trylock(&uap->port.lock); 2005 else 2006 spin_lock(&uap->port.lock); 2007 2008 /* 2009 * First save the CR then disable the interrupts 2010 */ 2011 old_cr = readw(uap->port.membase + UART011_CR); 2012 new_cr = old_cr & ~UART011_CR_CTSEN; 2013 new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE; 2014 writew(new_cr, uap->port.membase + UART011_CR); 2015 2016 uart_console_write(&uap->port, s, count, pl011_console_putchar); 2017 2018 /* 2019 * Finally, wait for transmitter to become empty 2020 * and restore the TCR 2021 */ 2022 do { 2023 status = readw(uap->port.membase + UART01x_FR); 2024 } while (status & UART01x_FR_BUSY); 2025 writew(old_cr, uap->port.membase + UART011_CR); 2026 2027 if (locked) 2028 spin_unlock(&uap->port.lock); 2029 local_irq_restore(flags); 2030 2031 clk_disable(uap->clk); 2032 } 2033 2034 static void __init 2035 pl011_console_get_options(struct uart_amba_port *uap, int *baud, 2036 int *parity, int *bits) 2037 { 2038 if (readw(uap->port.membase + UART011_CR) & UART01x_CR_UARTEN) { 2039 unsigned int lcr_h, ibrd, fbrd; 2040 2041 lcr_h = readw(uap->port.membase + uap->lcrh_tx); 2042 2043 *parity = 'n'; 2044 if (lcr_h & UART01x_LCRH_PEN) { 2045 if (lcr_h & UART01x_LCRH_EPS) 2046 *parity = 'e'; 2047 else 2048 *parity = 'o'; 2049 } 2050 2051 if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7) 2052 *bits = 7; 2053 else 2054 *bits = 8; 2055 2056 ibrd = readw(uap->port.membase + UART011_IBRD); 2057 fbrd = readw(uap->port.membase + UART011_FBRD); 2058 2059 *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd); 2060 2061 if (uap->vendor->oversampling) { 2062 if (readw(uap->port.membase + UART011_CR) 2063 & ST_UART011_CR_OVSFACT) 2064 *baud *= 2; 2065 } 2066 } 2067 } 2068 2069 static int __init pl011_console_setup(struct console *co, char *options) 2070 { 2071 struct uart_amba_port *uap; 2072 int baud = 38400; 2073 int bits = 8; 2074 int parity = 'n'; 2075 int flow = 'n'; 2076 int ret; 2077 2078 /* 2079 * Check whether an invalid uart number has been specified, and 2080 * if so, search for the first available port that does have 2081 * console support. 2082 */ 2083 if (co->index >= UART_NR) 2084 co->index = 0; 2085 uap = amba_ports[co->index]; 2086 if (!uap) 2087 return -ENODEV; 2088 2089 /* Allow pins to be muxed in and configured */ 2090 pinctrl_pm_select_default_state(uap->port.dev); 2091 2092 ret = clk_prepare(uap->clk); 2093 if (ret) 2094 return ret; 2095 2096 if (dev_get_platdata(uap->port.dev)) { 2097 struct amba_pl011_data *plat; 2098 2099 plat = dev_get_platdata(uap->port.dev); 2100 if (plat->init) 2101 plat->init(); 2102 } 2103 2104 uap->port.uartclk = clk_get_rate(uap->clk); 2105 2106 if (options) 2107 uart_parse_options(options, &baud, &parity, &bits, &flow); 2108 else 2109 pl011_console_get_options(uap, &baud, &parity, &bits); 2110 2111 return uart_set_options(&uap->port, co, baud, parity, bits, flow); 2112 } 2113 2114 static struct uart_driver amba_reg; 2115 static struct console amba_console = { 2116 .name = "ttyAMA", 2117 .write = pl011_console_write, 2118 .device = uart_console_device, 2119 .setup = pl011_console_setup, 2120 .flags = CON_PRINTBUFFER, 2121 .index = -1, 2122 .data = &amba_reg, 2123 }; 2124 2125 #define AMBA_CONSOLE (&amba_console) 2126 2127 static void pl011_putc(struct uart_port *port, int c) 2128 { 2129 while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF) 2130 ; 2131 writeb(c, port->membase + UART01x_DR); 2132 while (readl(port->membase + UART01x_FR) & UART01x_FR_BUSY) 2133 ; 2134 } 2135 2136 static void pl011_early_write(struct console *con, const char *s, unsigned n) 2137 { 2138 struct earlycon_device *dev = con->data; 2139 2140 uart_console_write(&dev->port, s, n, pl011_putc); 2141 } 2142 2143 static int __init pl011_early_console_setup(struct earlycon_device *device, 2144 const char *opt) 2145 { 2146 if (!device->port.membase) 2147 return -ENODEV; 2148 2149 device->con->write = pl011_early_write; 2150 return 0; 2151 } 2152 EARLYCON_DECLARE(pl011, pl011_early_console_setup); 2153 OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup); 2154 2155 #else 2156 #define AMBA_CONSOLE NULL 2157 #endif 2158 2159 static struct uart_driver amba_reg = { 2160 .owner = THIS_MODULE, 2161 .driver_name = "ttyAMA", 2162 .dev_name = "ttyAMA", 2163 .major = SERIAL_AMBA_MAJOR, 2164 .minor = SERIAL_AMBA_MINOR, 2165 .nr = UART_NR, 2166 .cons = AMBA_CONSOLE, 2167 }; 2168 2169 static int pl011_probe_dt_alias(int index, struct device *dev) 2170 { 2171 struct device_node *np; 2172 static bool seen_dev_with_alias = false; 2173 static bool seen_dev_without_alias = false; 2174 int ret = index; 2175 2176 if (!IS_ENABLED(CONFIG_OF)) 2177 return ret; 2178 2179 np = dev->of_node; 2180 if (!np) 2181 return ret; 2182 2183 ret = of_alias_get_id(np, "serial"); 2184 if (IS_ERR_VALUE(ret)) { 2185 seen_dev_without_alias = true; 2186 ret = index; 2187 } else { 2188 seen_dev_with_alias = true; 2189 if (ret >= ARRAY_SIZE(amba_ports) || amba_ports[ret] != NULL) { 2190 dev_warn(dev, "requested serial port %d not available.\n", ret); 2191 ret = index; 2192 } 2193 } 2194 2195 if (seen_dev_with_alias && seen_dev_without_alias) 2196 dev_warn(dev, "aliased and non-aliased serial devices found in device tree. Serial port enumeration may be unpredictable.\n"); 2197 2198 return ret; 2199 } 2200 2201 static int pl011_probe(struct amba_device *dev, const struct amba_id *id) 2202 { 2203 struct uart_amba_port *uap; 2204 struct vendor_data *vendor = id->data; 2205 void __iomem *base; 2206 int i, ret; 2207 2208 for (i = 0; i < ARRAY_SIZE(amba_ports); i++) 2209 if (amba_ports[i] == NULL) 2210 break; 2211 2212 if (i == ARRAY_SIZE(amba_ports)) 2213 return -EBUSY; 2214 2215 uap = devm_kzalloc(&dev->dev, sizeof(struct uart_amba_port), 2216 GFP_KERNEL); 2217 if (uap == NULL) 2218 return -ENOMEM; 2219 2220 i = pl011_probe_dt_alias(i, &dev->dev); 2221 2222 base = devm_ioremap(&dev->dev, dev->res.start, 2223 resource_size(&dev->res)); 2224 if (!base) 2225 return -ENOMEM; 2226 2227 uap->clk = devm_clk_get(&dev->dev, NULL); 2228 if (IS_ERR(uap->clk)) 2229 return PTR_ERR(uap->clk); 2230 2231 uap->vendor = vendor; 2232 uap->lcrh_rx = vendor->lcrh_rx; 2233 uap->lcrh_tx = vendor->lcrh_tx; 2234 uap->old_cr = 0; 2235 uap->fifosize = vendor->get_fifosize(dev); 2236 uap->port.dev = &dev->dev; 2237 uap->port.mapbase = dev->res.start; 2238 uap->port.membase = base; 2239 uap->port.iotype = UPIO_MEM; 2240 uap->port.irq = dev->irq[0]; 2241 uap->port.fifosize = uap->fifosize; 2242 uap->port.ops = &amba_pl011_pops; 2243 uap->port.flags = UPF_BOOT_AUTOCONF; 2244 uap->port.line = i; 2245 INIT_DELAYED_WORK(&uap->tx_softirq_work, pl011_tx_softirq); 2246 2247 /* Ensure interrupts from this UART are masked and cleared */ 2248 writew(0, uap->port.membase + UART011_IMSC); 2249 writew(0xffff, uap->port.membase + UART011_ICR); 2250 2251 snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev)); 2252 2253 amba_ports[i] = uap; 2254 2255 amba_set_drvdata(dev, uap); 2256 2257 if (!amba_reg.state) { 2258 ret = uart_register_driver(&amba_reg); 2259 if (ret < 0) { 2260 dev_err(&dev->dev, 2261 "Failed to register AMBA-PL011 driver\n"); 2262 return ret; 2263 } 2264 } 2265 2266 ret = uart_add_one_port(&amba_reg, &uap->port); 2267 if (ret) { 2268 amba_ports[i] = NULL; 2269 uart_unregister_driver(&amba_reg); 2270 } 2271 2272 return ret; 2273 } 2274 2275 static int pl011_remove(struct amba_device *dev) 2276 { 2277 struct uart_amba_port *uap = amba_get_drvdata(dev); 2278 bool busy = false; 2279 int i; 2280 2281 uart_remove_one_port(&amba_reg, &uap->port); 2282 2283 for (i = 0; i < ARRAY_SIZE(amba_ports); i++) 2284 if (amba_ports[i] == uap) 2285 amba_ports[i] = NULL; 2286 else if (amba_ports[i]) 2287 busy = true; 2288 2289 pl011_dma_remove(uap); 2290 if (!busy) 2291 uart_unregister_driver(&amba_reg); 2292 return 0; 2293 } 2294 2295 #ifdef CONFIG_PM_SLEEP 2296 static int pl011_suspend(struct device *dev) 2297 { 2298 struct uart_amba_port *uap = dev_get_drvdata(dev); 2299 2300 if (!uap) 2301 return -EINVAL; 2302 2303 return uart_suspend_port(&amba_reg, &uap->port); 2304 } 2305 2306 static int pl011_resume(struct device *dev) 2307 { 2308 struct uart_amba_port *uap = dev_get_drvdata(dev); 2309 2310 if (!uap) 2311 return -EINVAL; 2312 2313 return uart_resume_port(&amba_reg, &uap->port); 2314 } 2315 #endif 2316 2317 static SIMPLE_DEV_PM_OPS(pl011_dev_pm_ops, pl011_suspend, pl011_resume); 2318 2319 static struct amba_id pl011_ids[] = { 2320 { 2321 .id = 0x00041011, 2322 .mask = 0x000fffff, 2323 .data = &vendor_arm, 2324 }, 2325 { 2326 .id = 0x00380802, 2327 .mask = 0x00ffffff, 2328 .data = &vendor_st, 2329 }, 2330 { 0, 0 }, 2331 }; 2332 2333 MODULE_DEVICE_TABLE(amba, pl011_ids); 2334 2335 static struct amba_driver pl011_driver = { 2336 .drv = { 2337 .name = "uart-pl011", 2338 .pm = &pl011_dev_pm_ops, 2339 }, 2340 .id_table = pl011_ids, 2341 .probe = pl011_probe, 2342 .remove = pl011_remove, 2343 }; 2344 2345 static int __init pl011_init(void) 2346 { 2347 printk(KERN_INFO "Serial: AMBA PL011 UART driver\n"); 2348 2349 return amba_driver_register(&pl011_driver); 2350 } 2351 2352 static void __exit pl011_exit(void) 2353 { 2354 amba_driver_unregister(&pl011_driver); 2355 } 2356 2357 /* 2358 * While this can be a module, if builtin it's most likely the console 2359 * So let's leave module_exit but move module_init to an earlier place 2360 */ 2361 arch_initcall(pl011_init); 2362 module_exit(pl011_exit); 2363 2364 MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd"); 2365 MODULE_DESCRIPTION("ARM AMBA serial port driver"); 2366 MODULE_LICENSE("GPL"); 2367