1 /* 2 * Driver for AMBA serial ports 3 * 4 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. 5 * 6 * Copyright 1999 ARM Limited 7 * Copyright (C) 2000 Deep Blue Solutions Ltd. 8 * Copyright (C) 2010 ST-Ericsson SA 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 23 * 24 * This is a generic driver for ARM AMBA-type serial ports. They 25 * have a lot of 16550-like features, but are not register compatible. 26 * Note that although they do have CTS, DCD and DSR inputs, they do 27 * not have an RI input, nor do they have DTR or RTS outputs. If 28 * required, these have to be supplied via some other means (eg, GPIO) 29 * and hooked into this driver. 30 */ 31 32 33 #if defined(CONFIG_SERIAL_AMBA_PL011_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) 34 #define SUPPORT_SYSRQ 35 #endif 36 37 #include <linux/module.h> 38 #include <linux/ioport.h> 39 #include <linux/init.h> 40 #include <linux/console.h> 41 #include <linux/sysrq.h> 42 #include <linux/device.h> 43 #include <linux/tty.h> 44 #include <linux/tty_flip.h> 45 #include <linux/serial_core.h> 46 #include <linux/serial.h> 47 #include <linux/amba/bus.h> 48 #include <linux/amba/serial.h> 49 #include <linux/clk.h> 50 #include <linux/slab.h> 51 #include <linux/dmaengine.h> 52 #include <linux/dma-mapping.h> 53 #include <linux/scatterlist.h> 54 #include <linux/delay.h> 55 #include <linux/types.h> 56 #include <linux/of.h> 57 #include <linux/of_device.h> 58 #include <linux/pinctrl/consumer.h> 59 #include <linux/sizes.h> 60 #include <linux/io.h> 61 62 #define UART_NR 14 63 64 #define SERIAL_AMBA_MAJOR 204 65 #define SERIAL_AMBA_MINOR 64 66 #define SERIAL_AMBA_NR UART_NR 67 68 #define AMBA_ISR_PASS_LIMIT 256 69 70 #define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE) 71 #define UART_DUMMY_DR_RX (1 << 16) 72 73 /* There is by now at least one vendor with differing details, so handle it */ 74 struct vendor_data { 75 unsigned int ifls; 76 unsigned int lcrh_tx; 77 unsigned int lcrh_rx; 78 bool oversampling; 79 bool dma_threshold; 80 bool cts_event_workaround; 81 82 unsigned int (*get_fifosize)(unsigned int periphid); 83 }; 84 85 static unsigned int get_fifosize_arm(unsigned int periphid) 86 { 87 unsigned int rev = (periphid >> 20) & 0xf; 88 return rev < 3 ? 16 : 32; 89 } 90 91 static struct vendor_data vendor_arm = { 92 .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8, 93 .lcrh_tx = UART011_LCRH, 94 .lcrh_rx = UART011_LCRH, 95 .oversampling = false, 96 .dma_threshold = false, 97 .cts_event_workaround = false, 98 .get_fifosize = get_fifosize_arm, 99 }; 100 101 static unsigned int get_fifosize_st(unsigned int periphid) 102 { 103 return 64; 104 } 105 106 static struct vendor_data vendor_st = { 107 .ifls = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF, 108 .lcrh_tx = ST_UART011_LCRH_TX, 109 .lcrh_rx = ST_UART011_LCRH_RX, 110 .oversampling = true, 111 .dma_threshold = true, 112 .cts_event_workaround = true, 113 .get_fifosize = get_fifosize_st, 114 }; 115 116 static struct uart_amba_port *amba_ports[UART_NR]; 117 118 /* Deals with DMA transactions */ 119 120 struct pl011_sgbuf { 121 struct scatterlist sg; 122 char *buf; 123 }; 124 125 struct pl011_dmarx_data { 126 struct dma_chan *chan; 127 struct completion complete; 128 bool use_buf_b; 129 struct pl011_sgbuf sgbuf_a; 130 struct pl011_sgbuf sgbuf_b; 131 dma_cookie_t cookie; 132 bool running; 133 struct timer_list timer; 134 unsigned int last_residue; 135 unsigned long last_jiffies; 136 bool auto_poll_rate; 137 unsigned int poll_rate; 138 unsigned int poll_timeout; 139 }; 140 141 struct pl011_dmatx_data { 142 struct dma_chan *chan; 143 struct scatterlist sg; 144 char *buf; 145 bool queued; 146 }; 147 148 /* 149 * We wrap our port structure around the generic uart_port. 150 */ 151 struct uart_amba_port { 152 struct uart_port port; 153 struct clk *clk; 154 /* Two optional pin states - default & sleep */ 155 struct pinctrl *pinctrl; 156 struct pinctrl_state *pins_default; 157 struct pinctrl_state *pins_sleep; 158 const struct vendor_data *vendor; 159 unsigned int dmacr; /* dma control reg */ 160 unsigned int im; /* interrupt mask */ 161 unsigned int old_status; 162 unsigned int fifosize; /* vendor-specific */ 163 unsigned int lcrh_tx; /* vendor-specific */ 164 unsigned int lcrh_rx; /* vendor-specific */ 165 unsigned int old_cr; /* state during shutdown */ 166 bool autorts; 167 char type[12]; 168 #ifdef CONFIG_DMA_ENGINE 169 /* DMA stuff */ 170 bool using_tx_dma; 171 bool using_rx_dma; 172 struct pl011_dmarx_data dmarx; 173 struct pl011_dmatx_data dmatx; 174 #endif 175 }; 176 177 /* 178 * Reads up to 256 characters from the FIFO or until it's empty and 179 * inserts them into the TTY layer. Returns the number of characters 180 * read from the FIFO. 181 */ 182 static int pl011_fifo_to_tty(struct uart_amba_port *uap) 183 { 184 u16 status, ch; 185 unsigned int flag, max_count = 256; 186 int fifotaken = 0; 187 188 while (max_count--) { 189 status = readw(uap->port.membase + UART01x_FR); 190 if (status & UART01x_FR_RXFE) 191 break; 192 193 /* Take chars from the FIFO and update status */ 194 ch = readw(uap->port.membase + UART01x_DR) | 195 UART_DUMMY_DR_RX; 196 flag = TTY_NORMAL; 197 uap->port.icount.rx++; 198 fifotaken++; 199 200 if (unlikely(ch & UART_DR_ERROR)) { 201 if (ch & UART011_DR_BE) { 202 ch &= ~(UART011_DR_FE | UART011_DR_PE); 203 uap->port.icount.brk++; 204 if (uart_handle_break(&uap->port)) 205 continue; 206 } else if (ch & UART011_DR_PE) 207 uap->port.icount.parity++; 208 else if (ch & UART011_DR_FE) 209 uap->port.icount.frame++; 210 if (ch & UART011_DR_OE) 211 uap->port.icount.overrun++; 212 213 ch &= uap->port.read_status_mask; 214 215 if (ch & UART011_DR_BE) 216 flag = TTY_BREAK; 217 else if (ch & UART011_DR_PE) 218 flag = TTY_PARITY; 219 else if (ch & UART011_DR_FE) 220 flag = TTY_FRAME; 221 } 222 223 if (uart_handle_sysrq_char(&uap->port, ch & 255)) 224 continue; 225 226 uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag); 227 } 228 229 return fifotaken; 230 } 231 232 233 /* 234 * All the DMA operation mode stuff goes inside this ifdef. 235 * This assumes that you have a generic DMA device interface, 236 * no custom DMA interfaces are supported. 237 */ 238 #ifdef CONFIG_DMA_ENGINE 239 240 #define PL011_DMA_BUFFER_SIZE PAGE_SIZE 241 242 static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg, 243 enum dma_data_direction dir) 244 { 245 dma_addr_t dma_addr; 246 247 sg->buf = dma_alloc_coherent(chan->device->dev, 248 PL011_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL); 249 if (!sg->buf) 250 return -ENOMEM; 251 252 sg_init_table(&sg->sg, 1); 253 sg_set_page(&sg->sg, phys_to_page(dma_addr), 254 PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr)); 255 sg_dma_address(&sg->sg) = dma_addr; 256 257 return 0; 258 } 259 260 static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg, 261 enum dma_data_direction dir) 262 { 263 if (sg->buf) { 264 dma_free_coherent(chan->device->dev, 265 PL011_DMA_BUFFER_SIZE, sg->buf, 266 sg_dma_address(&sg->sg)); 267 } 268 } 269 270 static void pl011_dma_probe_initcall(struct device *dev, struct uart_amba_port *uap) 271 { 272 /* DMA is the sole user of the platform data right now */ 273 struct amba_pl011_data *plat = uap->port.dev->platform_data; 274 struct dma_slave_config tx_conf = { 275 .dst_addr = uap->port.mapbase + UART01x_DR, 276 .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, 277 .direction = DMA_MEM_TO_DEV, 278 .dst_maxburst = uap->fifosize >> 1, 279 .device_fc = false, 280 }; 281 struct dma_chan *chan; 282 dma_cap_mask_t mask; 283 284 chan = dma_request_slave_channel(dev, "tx"); 285 286 if (!chan) { 287 /* We need platform data */ 288 if (!plat || !plat->dma_filter) { 289 dev_info(uap->port.dev, "no DMA platform data\n"); 290 return; 291 } 292 293 /* Try to acquire a generic DMA engine slave TX channel */ 294 dma_cap_zero(mask); 295 dma_cap_set(DMA_SLAVE, mask); 296 297 chan = dma_request_channel(mask, plat->dma_filter, 298 plat->dma_tx_param); 299 if (!chan) { 300 dev_err(uap->port.dev, "no TX DMA channel!\n"); 301 return; 302 } 303 } 304 305 dmaengine_slave_config(chan, &tx_conf); 306 uap->dmatx.chan = chan; 307 308 dev_info(uap->port.dev, "DMA channel TX %s\n", 309 dma_chan_name(uap->dmatx.chan)); 310 311 /* Optionally make use of an RX channel as well */ 312 chan = dma_request_slave_channel(dev, "rx"); 313 314 if (!chan && plat->dma_rx_param) { 315 chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param); 316 317 if (!chan) { 318 dev_err(uap->port.dev, "no RX DMA channel!\n"); 319 return; 320 } 321 } 322 323 if (chan) { 324 struct dma_slave_config rx_conf = { 325 .src_addr = uap->port.mapbase + UART01x_DR, 326 .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, 327 .direction = DMA_DEV_TO_MEM, 328 .src_maxburst = uap->fifosize >> 1, 329 .device_fc = false, 330 }; 331 332 dmaengine_slave_config(chan, &rx_conf); 333 uap->dmarx.chan = chan; 334 335 if (plat && plat->dma_rx_poll_enable) { 336 /* Set poll rate if specified. */ 337 if (plat->dma_rx_poll_rate) { 338 uap->dmarx.auto_poll_rate = false; 339 uap->dmarx.poll_rate = plat->dma_rx_poll_rate; 340 } else { 341 /* 342 * 100 ms defaults to poll rate if not 343 * specified. This will be adjusted with 344 * the baud rate at set_termios. 345 */ 346 uap->dmarx.auto_poll_rate = true; 347 uap->dmarx.poll_rate = 100; 348 } 349 /* 3 secs defaults poll_timeout if not specified. */ 350 if (plat->dma_rx_poll_timeout) 351 uap->dmarx.poll_timeout = 352 plat->dma_rx_poll_timeout; 353 else 354 uap->dmarx.poll_timeout = 3000; 355 } else 356 uap->dmarx.auto_poll_rate = false; 357 358 dev_info(uap->port.dev, "DMA channel RX %s\n", 359 dma_chan_name(uap->dmarx.chan)); 360 } 361 } 362 363 #ifndef MODULE 364 /* 365 * Stack up the UARTs and let the above initcall be done at device 366 * initcall time, because the serial driver is called as an arch 367 * initcall, and at this time the DMA subsystem is not yet registered. 368 * At this point the driver will switch over to using DMA where desired. 369 */ 370 struct dma_uap { 371 struct list_head node; 372 struct uart_amba_port *uap; 373 struct device *dev; 374 }; 375 376 static LIST_HEAD(pl011_dma_uarts); 377 378 static int __init pl011_dma_initcall(void) 379 { 380 struct list_head *node, *tmp; 381 382 list_for_each_safe(node, tmp, &pl011_dma_uarts) { 383 struct dma_uap *dmau = list_entry(node, struct dma_uap, node); 384 pl011_dma_probe_initcall(dmau->dev, dmau->uap); 385 list_del(node); 386 kfree(dmau); 387 } 388 return 0; 389 } 390 391 device_initcall(pl011_dma_initcall); 392 393 static void pl011_dma_probe(struct device *dev, struct uart_amba_port *uap) 394 { 395 struct dma_uap *dmau = kzalloc(sizeof(struct dma_uap), GFP_KERNEL); 396 if (dmau) { 397 dmau->uap = uap; 398 dmau->dev = dev; 399 list_add_tail(&dmau->node, &pl011_dma_uarts); 400 } 401 } 402 #else 403 static void pl011_dma_probe(struct device *dev, struct uart_amba_port *uap) 404 { 405 pl011_dma_probe_initcall(dev, uap); 406 } 407 #endif 408 409 static void pl011_dma_remove(struct uart_amba_port *uap) 410 { 411 /* TODO: remove the initcall if it has not yet executed */ 412 if (uap->dmatx.chan) 413 dma_release_channel(uap->dmatx.chan); 414 if (uap->dmarx.chan) 415 dma_release_channel(uap->dmarx.chan); 416 } 417 418 /* Forward declare this for the refill routine */ 419 static int pl011_dma_tx_refill(struct uart_amba_port *uap); 420 421 /* 422 * The current DMA TX buffer has been sent. 423 * Try to queue up another DMA buffer. 424 */ 425 static void pl011_dma_tx_callback(void *data) 426 { 427 struct uart_amba_port *uap = data; 428 struct pl011_dmatx_data *dmatx = &uap->dmatx; 429 unsigned long flags; 430 u16 dmacr; 431 432 spin_lock_irqsave(&uap->port.lock, flags); 433 if (uap->dmatx.queued) 434 dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1, 435 DMA_TO_DEVICE); 436 437 dmacr = uap->dmacr; 438 uap->dmacr = dmacr & ~UART011_TXDMAE; 439 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 440 441 /* 442 * If TX DMA was disabled, it means that we've stopped the DMA for 443 * some reason (eg, XOFF received, or we want to send an X-char.) 444 * 445 * Note: we need to be careful here of a potential race between DMA 446 * and the rest of the driver - if the driver disables TX DMA while 447 * a TX buffer completing, we must update the tx queued status to 448 * get further refills (hence we check dmacr). 449 */ 450 if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) || 451 uart_circ_empty(&uap->port.state->xmit)) { 452 uap->dmatx.queued = false; 453 spin_unlock_irqrestore(&uap->port.lock, flags); 454 return; 455 } 456 457 if (pl011_dma_tx_refill(uap) <= 0) { 458 /* 459 * We didn't queue a DMA buffer for some reason, but we 460 * have data pending to be sent. Re-enable the TX IRQ. 461 */ 462 uap->im |= UART011_TXIM; 463 writew(uap->im, uap->port.membase + UART011_IMSC); 464 } 465 spin_unlock_irqrestore(&uap->port.lock, flags); 466 } 467 468 /* 469 * Try to refill the TX DMA buffer. 470 * Locking: called with port lock held and IRQs disabled. 471 * Returns: 472 * 1 if we queued up a TX DMA buffer. 473 * 0 if we didn't want to handle this by DMA 474 * <0 on error 475 */ 476 static int pl011_dma_tx_refill(struct uart_amba_port *uap) 477 { 478 struct pl011_dmatx_data *dmatx = &uap->dmatx; 479 struct dma_chan *chan = dmatx->chan; 480 struct dma_device *dma_dev = chan->device; 481 struct dma_async_tx_descriptor *desc; 482 struct circ_buf *xmit = &uap->port.state->xmit; 483 unsigned int count; 484 485 /* 486 * Try to avoid the overhead involved in using DMA if the 487 * transaction fits in the first half of the FIFO, by using 488 * the standard interrupt handling. This ensures that we 489 * issue a uart_write_wakeup() at the appropriate time. 490 */ 491 count = uart_circ_chars_pending(xmit); 492 if (count < (uap->fifosize >> 1)) { 493 uap->dmatx.queued = false; 494 return 0; 495 } 496 497 /* 498 * Bodge: don't send the last character by DMA, as this 499 * will prevent XON from notifying us to restart DMA. 500 */ 501 count -= 1; 502 503 /* Else proceed to copy the TX chars to the DMA buffer and fire DMA */ 504 if (count > PL011_DMA_BUFFER_SIZE) 505 count = PL011_DMA_BUFFER_SIZE; 506 507 if (xmit->tail < xmit->head) 508 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count); 509 else { 510 size_t first = UART_XMIT_SIZE - xmit->tail; 511 size_t second = xmit->head; 512 513 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first); 514 if (second) 515 memcpy(&dmatx->buf[first], &xmit->buf[0], second); 516 } 517 518 dmatx->sg.length = count; 519 520 if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) { 521 uap->dmatx.queued = false; 522 dev_dbg(uap->port.dev, "unable to map TX DMA\n"); 523 return -EBUSY; 524 } 525 526 desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV, 527 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 528 if (!desc) { 529 dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE); 530 uap->dmatx.queued = false; 531 /* 532 * If DMA cannot be used right now, we complete this 533 * transaction via IRQ and let the TTY layer retry. 534 */ 535 dev_dbg(uap->port.dev, "TX DMA busy\n"); 536 return -EBUSY; 537 } 538 539 /* Some data to go along to the callback */ 540 desc->callback = pl011_dma_tx_callback; 541 desc->callback_param = uap; 542 543 /* All errors should happen at prepare time */ 544 dmaengine_submit(desc); 545 546 /* Fire the DMA transaction */ 547 dma_dev->device_issue_pending(chan); 548 549 uap->dmacr |= UART011_TXDMAE; 550 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 551 uap->dmatx.queued = true; 552 553 /* 554 * Now we know that DMA will fire, so advance the ring buffer 555 * with the stuff we just dispatched. 556 */ 557 xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1); 558 uap->port.icount.tx += count; 559 560 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 561 uart_write_wakeup(&uap->port); 562 563 return 1; 564 } 565 566 /* 567 * We received a transmit interrupt without a pending X-char but with 568 * pending characters. 569 * Locking: called with port lock held and IRQs disabled. 570 * Returns: 571 * false if we want to use PIO to transmit 572 * true if we queued a DMA buffer 573 */ 574 static bool pl011_dma_tx_irq(struct uart_amba_port *uap) 575 { 576 if (!uap->using_tx_dma) 577 return false; 578 579 /* 580 * If we already have a TX buffer queued, but received a 581 * TX interrupt, it will be because we've just sent an X-char. 582 * Ensure the TX DMA is enabled and the TX IRQ is disabled. 583 */ 584 if (uap->dmatx.queued) { 585 uap->dmacr |= UART011_TXDMAE; 586 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 587 uap->im &= ~UART011_TXIM; 588 writew(uap->im, uap->port.membase + UART011_IMSC); 589 return true; 590 } 591 592 /* 593 * We don't have a TX buffer queued, so try to queue one. 594 * If we successfully queued a buffer, mask the TX IRQ. 595 */ 596 if (pl011_dma_tx_refill(uap) > 0) { 597 uap->im &= ~UART011_TXIM; 598 writew(uap->im, uap->port.membase + UART011_IMSC); 599 return true; 600 } 601 return false; 602 } 603 604 /* 605 * Stop the DMA transmit (eg, due to received XOFF). 606 * Locking: called with port lock held and IRQs disabled. 607 */ 608 static inline void pl011_dma_tx_stop(struct uart_amba_port *uap) 609 { 610 if (uap->dmatx.queued) { 611 uap->dmacr &= ~UART011_TXDMAE; 612 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 613 } 614 } 615 616 /* 617 * Try to start a DMA transmit, or in the case of an XON/OFF 618 * character queued for send, try to get that character out ASAP. 619 * Locking: called with port lock held and IRQs disabled. 620 * Returns: 621 * false if we want the TX IRQ to be enabled 622 * true if we have a buffer queued 623 */ 624 static inline bool pl011_dma_tx_start(struct uart_amba_port *uap) 625 { 626 u16 dmacr; 627 628 if (!uap->using_tx_dma) 629 return false; 630 631 if (!uap->port.x_char) { 632 /* no X-char, try to push chars out in DMA mode */ 633 bool ret = true; 634 635 if (!uap->dmatx.queued) { 636 if (pl011_dma_tx_refill(uap) > 0) { 637 uap->im &= ~UART011_TXIM; 638 ret = true; 639 } else { 640 uap->im |= UART011_TXIM; 641 ret = false; 642 } 643 writew(uap->im, uap->port.membase + UART011_IMSC); 644 } else if (!(uap->dmacr & UART011_TXDMAE)) { 645 uap->dmacr |= UART011_TXDMAE; 646 writew(uap->dmacr, 647 uap->port.membase + UART011_DMACR); 648 } 649 return ret; 650 } 651 652 /* 653 * We have an X-char to send. Disable DMA to prevent it loading 654 * the TX fifo, and then see if we can stuff it into the FIFO. 655 */ 656 dmacr = uap->dmacr; 657 uap->dmacr &= ~UART011_TXDMAE; 658 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 659 660 if (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF) { 661 /* 662 * No space in the FIFO, so enable the transmit interrupt 663 * so we know when there is space. Note that once we've 664 * loaded the character, we should just re-enable DMA. 665 */ 666 return false; 667 } 668 669 writew(uap->port.x_char, uap->port.membase + UART01x_DR); 670 uap->port.icount.tx++; 671 uap->port.x_char = 0; 672 673 /* Success - restore the DMA state */ 674 uap->dmacr = dmacr; 675 writew(dmacr, uap->port.membase + UART011_DMACR); 676 677 return true; 678 } 679 680 /* 681 * Flush the transmit buffer. 682 * Locking: called with port lock held and IRQs disabled. 683 */ 684 static void pl011_dma_flush_buffer(struct uart_port *port) 685 { 686 struct uart_amba_port *uap = (struct uart_amba_port *)port; 687 688 if (!uap->using_tx_dma) 689 return; 690 691 /* Avoid deadlock with the DMA engine callback */ 692 spin_unlock(&uap->port.lock); 693 dmaengine_terminate_all(uap->dmatx.chan); 694 spin_lock(&uap->port.lock); 695 if (uap->dmatx.queued) { 696 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1, 697 DMA_TO_DEVICE); 698 uap->dmatx.queued = false; 699 uap->dmacr &= ~UART011_TXDMAE; 700 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 701 } 702 } 703 704 static void pl011_dma_rx_callback(void *data); 705 706 static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap) 707 { 708 struct dma_chan *rxchan = uap->dmarx.chan; 709 struct pl011_dmarx_data *dmarx = &uap->dmarx; 710 struct dma_async_tx_descriptor *desc; 711 struct pl011_sgbuf *sgbuf; 712 713 if (!rxchan) 714 return -EIO; 715 716 /* Start the RX DMA job */ 717 sgbuf = uap->dmarx.use_buf_b ? 718 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; 719 desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1, 720 DMA_DEV_TO_MEM, 721 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 722 /* 723 * If the DMA engine is busy and cannot prepare a 724 * channel, no big deal, the driver will fall back 725 * to interrupt mode as a result of this error code. 726 */ 727 if (!desc) { 728 uap->dmarx.running = false; 729 dmaengine_terminate_all(rxchan); 730 return -EBUSY; 731 } 732 733 /* Some data to go along to the callback */ 734 desc->callback = pl011_dma_rx_callback; 735 desc->callback_param = uap; 736 dmarx->cookie = dmaengine_submit(desc); 737 dma_async_issue_pending(rxchan); 738 739 uap->dmacr |= UART011_RXDMAE; 740 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 741 uap->dmarx.running = true; 742 743 uap->im &= ~UART011_RXIM; 744 writew(uap->im, uap->port.membase + UART011_IMSC); 745 746 return 0; 747 } 748 749 /* 750 * This is called when either the DMA job is complete, or 751 * the FIFO timeout interrupt occurred. This must be called 752 * with the port spinlock uap->port.lock held. 753 */ 754 static void pl011_dma_rx_chars(struct uart_amba_port *uap, 755 u32 pending, bool use_buf_b, 756 bool readfifo) 757 { 758 struct tty_port *port = &uap->port.state->port; 759 struct pl011_sgbuf *sgbuf = use_buf_b ? 760 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; 761 int dma_count = 0; 762 u32 fifotaken = 0; /* only used for vdbg() */ 763 764 struct pl011_dmarx_data *dmarx = &uap->dmarx; 765 int dmataken = 0; 766 767 if (uap->dmarx.poll_rate) { 768 /* The data can be taken by polling */ 769 dmataken = sgbuf->sg.length - dmarx->last_residue; 770 /* Recalculate the pending size */ 771 if (pending >= dmataken) 772 pending -= dmataken; 773 } 774 775 /* Pick the remain data from the DMA */ 776 if (pending) { 777 778 /* 779 * First take all chars in the DMA pipe, then look in the FIFO. 780 * Note that tty_insert_flip_buf() tries to take as many chars 781 * as it can. 782 */ 783 dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken, 784 pending); 785 786 uap->port.icount.rx += dma_count; 787 if (dma_count < pending) 788 dev_warn(uap->port.dev, 789 "couldn't insert all characters (TTY is full?)\n"); 790 } 791 792 /* Reset the last_residue for Rx DMA poll */ 793 if (uap->dmarx.poll_rate) 794 dmarx->last_residue = sgbuf->sg.length; 795 796 /* 797 * Only continue with trying to read the FIFO if all DMA chars have 798 * been taken first. 799 */ 800 if (dma_count == pending && readfifo) { 801 /* Clear any error flags */ 802 writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS, 803 uap->port.membase + UART011_ICR); 804 805 /* 806 * If we read all the DMA'd characters, and we had an 807 * incomplete buffer, that could be due to an rx error, or 808 * maybe we just timed out. Read any pending chars and check 809 * the error status. 810 * 811 * Error conditions will only occur in the FIFO, these will 812 * trigger an immediate interrupt and stop the DMA job, so we 813 * will always find the error in the FIFO, never in the DMA 814 * buffer. 815 */ 816 fifotaken = pl011_fifo_to_tty(uap); 817 } 818 819 spin_unlock(&uap->port.lock); 820 dev_vdbg(uap->port.dev, 821 "Took %d chars from DMA buffer and %d chars from the FIFO\n", 822 dma_count, fifotaken); 823 tty_flip_buffer_push(port); 824 spin_lock(&uap->port.lock); 825 } 826 827 static void pl011_dma_rx_irq(struct uart_amba_port *uap) 828 { 829 struct pl011_dmarx_data *dmarx = &uap->dmarx; 830 struct dma_chan *rxchan = dmarx->chan; 831 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ? 832 &dmarx->sgbuf_b : &dmarx->sgbuf_a; 833 size_t pending; 834 struct dma_tx_state state; 835 enum dma_status dmastat; 836 837 /* 838 * Pause the transfer so we can trust the current counter, 839 * do this before we pause the PL011 block, else we may 840 * overflow the FIFO. 841 */ 842 if (dmaengine_pause(rxchan)) 843 dev_err(uap->port.dev, "unable to pause DMA transfer\n"); 844 dmastat = rxchan->device->device_tx_status(rxchan, 845 dmarx->cookie, &state); 846 if (dmastat != DMA_PAUSED) 847 dev_err(uap->port.dev, "unable to pause DMA transfer\n"); 848 849 /* Disable RX DMA - incoming data will wait in the FIFO */ 850 uap->dmacr &= ~UART011_RXDMAE; 851 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 852 uap->dmarx.running = false; 853 854 pending = sgbuf->sg.length - state.residue; 855 BUG_ON(pending > PL011_DMA_BUFFER_SIZE); 856 /* Then we terminate the transfer - we now know our residue */ 857 dmaengine_terminate_all(rxchan); 858 859 /* 860 * This will take the chars we have so far and insert 861 * into the framework. 862 */ 863 pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true); 864 865 /* Switch buffer & re-trigger DMA job */ 866 dmarx->use_buf_b = !dmarx->use_buf_b; 867 if (pl011_dma_rx_trigger_dma(uap)) { 868 dev_dbg(uap->port.dev, "could not retrigger RX DMA job " 869 "fall back to interrupt mode\n"); 870 uap->im |= UART011_RXIM; 871 writew(uap->im, uap->port.membase + UART011_IMSC); 872 } 873 } 874 875 static void pl011_dma_rx_callback(void *data) 876 { 877 struct uart_amba_port *uap = data; 878 struct pl011_dmarx_data *dmarx = &uap->dmarx; 879 struct dma_chan *rxchan = dmarx->chan; 880 bool lastbuf = dmarx->use_buf_b; 881 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ? 882 &dmarx->sgbuf_b : &dmarx->sgbuf_a; 883 size_t pending; 884 struct dma_tx_state state; 885 int ret; 886 887 /* 888 * This completion interrupt occurs typically when the 889 * RX buffer is totally stuffed but no timeout has yet 890 * occurred. When that happens, we just want the RX 891 * routine to flush out the secondary DMA buffer while 892 * we immediately trigger the next DMA job. 893 */ 894 spin_lock_irq(&uap->port.lock); 895 /* 896 * Rx data can be taken by the UART interrupts during 897 * the DMA irq handler. So we check the residue here. 898 */ 899 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state); 900 pending = sgbuf->sg.length - state.residue; 901 BUG_ON(pending > PL011_DMA_BUFFER_SIZE); 902 /* Then we terminate the transfer - we now know our residue */ 903 dmaengine_terminate_all(rxchan); 904 905 uap->dmarx.running = false; 906 dmarx->use_buf_b = !lastbuf; 907 ret = pl011_dma_rx_trigger_dma(uap); 908 909 pl011_dma_rx_chars(uap, pending, lastbuf, false); 910 spin_unlock_irq(&uap->port.lock); 911 /* 912 * Do this check after we picked the DMA chars so we don't 913 * get some IRQ immediately from RX. 914 */ 915 if (ret) { 916 dev_dbg(uap->port.dev, "could not retrigger RX DMA job " 917 "fall back to interrupt mode\n"); 918 uap->im |= UART011_RXIM; 919 writew(uap->im, uap->port.membase + UART011_IMSC); 920 } 921 } 922 923 /* 924 * Stop accepting received characters, when we're shutting down or 925 * suspending this port. 926 * Locking: called with port lock held and IRQs disabled. 927 */ 928 static inline void pl011_dma_rx_stop(struct uart_amba_port *uap) 929 { 930 /* FIXME. Just disable the DMA enable */ 931 uap->dmacr &= ~UART011_RXDMAE; 932 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 933 } 934 935 /* 936 * Timer handler for Rx DMA polling. 937 * Every polling, It checks the residue in the dma buffer and transfer 938 * data to the tty. Also, last_residue is updated for the next polling. 939 */ 940 static void pl011_dma_rx_poll(unsigned long args) 941 { 942 struct uart_amba_port *uap = (struct uart_amba_port *)args; 943 struct tty_port *port = &uap->port.state->port; 944 struct pl011_dmarx_data *dmarx = &uap->dmarx; 945 struct dma_chan *rxchan = uap->dmarx.chan; 946 unsigned long flags = 0; 947 unsigned int dmataken = 0; 948 unsigned int size = 0; 949 struct pl011_sgbuf *sgbuf; 950 int dma_count; 951 struct dma_tx_state state; 952 953 sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; 954 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state); 955 if (likely(state.residue < dmarx->last_residue)) { 956 dmataken = sgbuf->sg.length - dmarx->last_residue; 957 size = dmarx->last_residue - state.residue; 958 dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken, 959 size); 960 if (dma_count == size) 961 dmarx->last_residue = state.residue; 962 dmarx->last_jiffies = jiffies; 963 } 964 tty_flip_buffer_push(port); 965 966 /* 967 * If no data is received in poll_timeout, the driver will fall back 968 * to interrupt mode. We will retrigger DMA at the first interrupt. 969 */ 970 if (jiffies_to_msecs(jiffies - dmarx->last_jiffies) 971 > uap->dmarx.poll_timeout) { 972 973 spin_lock_irqsave(&uap->port.lock, flags); 974 pl011_dma_rx_stop(uap); 975 spin_unlock_irqrestore(&uap->port.lock, flags); 976 977 uap->dmarx.running = false; 978 dmaengine_terminate_all(rxchan); 979 del_timer(&uap->dmarx.timer); 980 } else { 981 mod_timer(&uap->dmarx.timer, 982 jiffies + msecs_to_jiffies(uap->dmarx.poll_rate)); 983 } 984 } 985 986 static void pl011_dma_startup(struct uart_amba_port *uap) 987 { 988 int ret; 989 990 if (!uap->dmatx.chan) 991 return; 992 993 uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL); 994 if (!uap->dmatx.buf) { 995 dev_err(uap->port.dev, "no memory for DMA TX buffer\n"); 996 uap->port.fifosize = uap->fifosize; 997 return; 998 } 999 1000 sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE); 1001 1002 /* The DMA buffer is now the FIFO the TTY subsystem can use */ 1003 uap->port.fifosize = PL011_DMA_BUFFER_SIZE; 1004 uap->using_tx_dma = true; 1005 1006 if (!uap->dmarx.chan) 1007 goto skip_rx; 1008 1009 /* Allocate and map DMA RX buffers */ 1010 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a, 1011 DMA_FROM_DEVICE); 1012 if (ret) { 1013 dev_err(uap->port.dev, "failed to init DMA %s: %d\n", 1014 "RX buffer A", ret); 1015 goto skip_rx; 1016 } 1017 1018 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b, 1019 DMA_FROM_DEVICE); 1020 if (ret) { 1021 dev_err(uap->port.dev, "failed to init DMA %s: %d\n", 1022 "RX buffer B", ret); 1023 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, 1024 DMA_FROM_DEVICE); 1025 goto skip_rx; 1026 } 1027 1028 uap->using_rx_dma = true; 1029 1030 skip_rx: 1031 /* Turn on DMA error (RX/TX will be enabled on demand) */ 1032 uap->dmacr |= UART011_DMAONERR; 1033 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 1034 1035 /* 1036 * ST Micro variants has some specific dma burst threshold 1037 * compensation. Set this to 16 bytes, so burst will only 1038 * be issued above/below 16 bytes. 1039 */ 1040 if (uap->vendor->dma_threshold) 1041 writew(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16, 1042 uap->port.membase + ST_UART011_DMAWM); 1043 1044 if (uap->using_rx_dma) { 1045 if (pl011_dma_rx_trigger_dma(uap)) 1046 dev_dbg(uap->port.dev, "could not trigger initial " 1047 "RX DMA job, fall back to interrupt mode\n"); 1048 if (uap->dmarx.poll_rate) { 1049 init_timer(&(uap->dmarx.timer)); 1050 uap->dmarx.timer.function = pl011_dma_rx_poll; 1051 uap->dmarx.timer.data = (unsigned long)uap; 1052 mod_timer(&uap->dmarx.timer, 1053 jiffies + 1054 msecs_to_jiffies(uap->dmarx.poll_rate)); 1055 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE; 1056 uap->dmarx.last_jiffies = jiffies; 1057 } 1058 } 1059 } 1060 1061 static void pl011_dma_shutdown(struct uart_amba_port *uap) 1062 { 1063 if (!(uap->using_tx_dma || uap->using_rx_dma)) 1064 return; 1065 1066 /* Disable RX and TX DMA */ 1067 while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY) 1068 barrier(); 1069 1070 spin_lock_irq(&uap->port.lock); 1071 uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE); 1072 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 1073 spin_unlock_irq(&uap->port.lock); 1074 1075 if (uap->using_tx_dma) { 1076 /* In theory, this should already be done by pl011_dma_flush_buffer */ 1077 dmaengine_terminate_all(uap->dmatx.chan); 1078 if (uap->dmatx.queued) { 1079 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1, 1080 DMA_TO_DEVICE); 1081 uap->dmatx.queued = false; 1082 } 1083 1084 kfree(uap->dmatx.buf); 1085 uap->using_tx_dma = false; 1086 } 1087 1088 if (uap->using_rx_dma) { 1089 dmaengine_terminate_all(uap->dmarx.chan); 1090 /* Clean up the RX DMA */ 1091 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE); 1092 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE); 1093 if (uap->dmarx.poll_rate) 1094 del_timer_sync(&uap->dmarx.timer); 1095 uap->using_rx_dma = false; 1096 } 1097 } 1098 1099 static inline bool pl011_dma_rx_available(struct uart_amba_port *uap) 1100 { 1101 return uap->using_rx_dma; 1102 } 1103 1104 static inline bool pl011_dma_rx_running(struct uart_amba_port *uap) 1105 { 1106 return uap->using_rx_dma && uap->dmarx.running; 1107 } 1108 1109 #else 1110 /* Blank functions if the DMA engine is not available */ 1111 static inline void pl011_dma_probe(struct device *dev, struct uart_amba_port *uap) 1112 { 1113 } 1114 1115 static inline void pl011_dma_remove(struct uart_amba_port *uap) 1116 { 1117 } 1118 1119 static inline void pl011_dma_startup(struct uart_amba_port *uap) 1120 { 1121 } 1122 1123 static inline void pl011_dma_shutdown(struct uart_amba_port *uap) 1124 { 1125 } 1126 1127 static inline bool pl011_dma_tx_irq(struct uart_amba_port *uap) 1128 { 1129 return false; 1130 } 1131 1132 static inline void pl011_dma_tx_stop(struct uart_amba_port *uap) 1133 { 1134 } 1135 1136 static inline bool pl011_dma_tx_start(struct uart_amba_port *uap) 1137 { 1138 return false; 1139 } 1140 1141 static inline void pl011_dma_rx_irq(struct uart_amba_port *uap) 1142 { 1143 } 1144 1145 static inline void pl011_dma_rx_stop(struct uart_amba_port *uap) 1146 { 1147 } 1148 1149 static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap) 1150 { 1151 return -EIO; 1152 } 1153 1154 static inline bool pl011_dma_rx_available(struct uart_amba_port *uap) 1155 { 1156 return false; 1157 } 1158 1159 static inline bool pl011_dma_rx_running(struct uart_amba_port *uap) 1160 { 1161 return false; 1162 } 1163 1164 #define pl011_dma_flush_buffer NULL 1165 #endif 1166 1167 static void pl011_stop_tx(struct uart_port *port) 1168 { 1169 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1170 1171 uap->im &= ~UART011_TXIM; 1172 writew(uap->im, uap->port.membase + UART011_IMSC); 1173 pl011_dma_tx_stop(uap); 1174 } 1175 1176 static void pl011_start_tx(struct uart_port *port) 1177 { 1178 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1179 1180 if (!pl011_dma_tx_start(uap)) { 1181 uap->im |= UART011_TXIM; 1182 writew(uap->im, uap->port.membase + UART011_IMSC); 1183 } 1184 } 1185 1186 static void pl011_stop_rx(struct uart_port *port) 1187 { 1188 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1189 1190 uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM| 1191 UART011_PEIM|UART011_BEIM|UART011_OEIM); 1192 writew(uap->im, uap->port.membase + UART011_IMSC); 1193 1194 pl011_dma_rx_stop(uap); 1195 } 1196 1197 static void pl011_enable_ms(struct uart_port *port) 1198 { 1199 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1200 1201 uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM; 1202 writew(uap->im, uap->port.membase + UART011_IMSC); 1203 } 1204 1205 static void pl011_rx_chars(struct uart_amba_port *uap) 1206 { 1207 pl011_fifo_to_tty(uap); 1208 1209 spin_unlock(&uap->port.lock); 1210 tty_flip_buffer_push(&uap->port.state->port); 1211 /* 1212 * If we were temporarily out of DMA mode for a while, 1213 * attempt to switch back to DMA mode again. 1214 */ 1215 if (pl011_dma_rx_available(uap)) { 1216 if (pl011_dma_rx_trigger_dma(uap)) { 1217 dev_dbg(uap->port.dev, "could not trigger RX DMA job " 1218 "fall back to interrupt mode again\n"); 1219 uap->im |= UART011_RXIM; 1220 } else { 1221 uap->im &= ~UART011_RXIM; 1222 #ifdef CONFIG_DMA_ENGINE 1223 /* Start Rx DMA poll */ 1224 if (uap->dmarx.poll_rate) { 1225 uap->dmarx.last_jiffies = jiffies; 1226 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE; 1227 mod_timer(&uap->dmarx.timer, 1228 jiffies + 1229 msecs_to_jiffies(uap->dmarx.poll_rate)); 1230 } 1231 #endif 1232 } 1233 1234 writew(uap->im, uap->port.membase + UART011_IMSC); 1235 } 1236 spin_lock(&uap->port.lock); 1237 } 1238 1239 static void pl011_tx_chars(struct uart_amba_port *uap) 1240 { 1241 struct circ_buf *xmit = &uap->port.state->xmit; 1242 int count; 1243 1244 if (uap->port.x_char) { 1245 writew(uap->port.x_char, uap->port.membase + UART01x_DR); 1246 uap->port.icount.tx++; 1247 uap->port.x_char = 0; 1248 return; 1249 } 1250 if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) { 1251 pl011_stop_tx(&uap->port); 1252 return; 1253 } 1254 1255 /* If we are using DMA mode, try to send some characters. */ 1256 if (pl011_dma_tx_irq(uap)) 1257 return; 1258 1259 count = uap->fifosize >> 1; 1260 do { 1261 writew(xmit->buf[xmit->tail], uap->port.membase + UART01x_DR); 1262 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 1263 uap->port.icount.tx++; 1264 if (uart_circ_empty(xmit)) 1265 break; 1266 } while (--count > 0); 1267 1268 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 1269 uart_write_wakeup(&uap->port); 1270 1271 if (uart_circ_empty(xmit)) 1272 pl011_stop_tx(&uap->port); 1273 } 1274 1275 static void pl011_modem_status(struct uart_amba_port *uap) 1276 { 1277 unsigned int status, delta; 1278 1279 status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY; 1280 1281 delta = status ^ uap->old_status; 1282 uap->old_status = status; 1283 1284 if (!delta) 1285 return; 1286 1287 if (delta & UART01x_FR_DCD) 1288 uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD); 1289 1290 if (delta & UART01x_FR_DSR) 1291 uap->port.icount.dsr++; 1292 1293 if (delta & UART01x_FR_CTS) 1294 uart_handle_cts_change(&uap->port, status & UART01x_FR_CTS); 1295 1296 wake_up_interruptible(&uap->port.state->port.delta_msr_wait); 1297 } 1298 1299 static irqreturn_t pl011_int(int irq, void *dev_id) 1300 { 1301 struct uart_amba_port *uap = dev_id; 1302 unsigned long flags; 1303 unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT; 1304 int handled = 0; 1305 unsigned int dummy_read; 1306 1307 spin_lock_irqsave(&uap->port.lock, flags); 1308 status = readw(uap->port.membase + UART011_MIS); 1309 if (status) { 1310 do { 1311 if (uap->vendor->cts_event_workaround) { 1312 /* workaround to make sure that all bits are unlocked.. */ 1313 writew(0x00, uap->port.membase + UART011_ICR); 1314 1315 /* 1316 * WA: introduce 26ns(1 uart clk) delay before W1C; 1317 * single apb access will incur 2 pclk(133.12Mhz) delay, 1318 * so add 2 dummy reads 1319 */ 1320 dummy_read = readw(uap->port.membase + UART011_ICR); 1321 dummy_read = readw(uap->port.membase + UART011_ICR); 1322 } 1323 1324 writew(status & ~(UART011_TXIS|UART011_RTIS| 1325 UART011_RXIS), 1326 uap->port.membase + UART011_ICR); 1327 1328 if (status & (UART011_RTIS|UART011_RXIS)) { 1329 if (pl011_dma_rx_running(uap)) 1330 pl011_dma_rx_irq(uap); 1331 else 1332 pl011_rx_chars(uap); 1333 } 1334 if (status & (UART011_DSRMIS|UART011_DCDMIS| 1335 UART011_CTSMIS|UART011_RIMIS)) 1336 pl011_modem_status(uap); 1337 if (status & UART011_TXIS) 1338 pl011_tx_chars(uap); 1339 1340 if (pass_counter-- == 0) 1341 break; 1342 1343 status = readw(uap->port.membase + UART011_MIS); 1344 } while (status != 0); 1345 handled = 1; 1346 } 1347 1348 spin_unlock_irqrestore(&uap->port.lock, flags); 1349 1350 return IRQ_RETVAL(handled); 1351 } 1352 1353 static unsigned int pl011_tx_empty(struct uart_port *port) 1354 { 1355 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1356 unsigned int status = readw(uap->port.membase + UART01x_FR); 1357 return status & (UART01x_FR_BUSY|UART01x_FR_TXFF) ? 0 : TIOCSER_TEMT; 1358 } 1359 1360 static unsigned int pl011_get_mctrl(struct uart_port *port) 1361 { 1362 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1363 unsigned int result = 0; 1364 unsigned int status = readw(uap->port.membase + UART01x_FR); 1365 1366 #define TIOCMBIT(uartbit, tiocmbit) \ 1367 if (status & uartbit) \ 1368 result |= tiocmbit 1369 1370 TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR); 1371 TIOCMBIT(UART01x_FR_DSR, TIOCM_DSR); 1372 TIOCMBIT(UART01x_FR_CTS, TIOCM_CTS); 1373 TIOCMBIT(UART011_FR_RI, TIOCM_RNG); 1374 #undef TIOCMBIT 1375 return result; 1376 } 1377 1378 static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl) 1379 { 1380 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1381 unsigned int cr; 1382 1383 cr = readw(uap->port.membase + UART011_CR); 1384 1385 #define TIOCMBIT(tiocmbit, uartbit) \ 1386 if (mctrl & tiocmbit) \ 1387 cr |= uartbit; \ 1388 else \ 1389 cr &= ~uartbit 1390 1391 TIOCMBIT(TIOCM_RTS, UART011_CR_RTS); 1392 TIOCMBIT(TIOCM_DTR, UART011_CR_DTR); 1393 TIOCMBIT(TIOCM_OUT1, UART011_CR_OUT1); 1394 TIOCMBIT(TIOCM_OUT2, UART011_CR_OUT2); 1395 TIOCMBIT(TIOCM_LOOP, UART011_CR_LBE); 1396 1397 if (uap->autorts) { 1398 /* We need to disable auto-RTS if we want to turn RTS off */ 1399 TIOCMBIT(TIOCM_RTS, UART011_CR_RTSEN); 1400 } 1401 #undef TIOCMBIT 1402 1403 writew(cr, uap->port.membase + UART011_CR); 1404 } 1405 1406 static void pl011_break_ctl(struct uart_port *port, int break_state) 1407 { 1408 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1409 unsigned long flags; 1410 unsigned int lcr_h; 1411 1412 spin_lock_irqsave(&uap->port.lock, flags); 1413 lcr_h = readw(uap->port.membase + uap->lcrh_tx); 1414 if (break_state == -1) 1415 lcr_h |= UART01x_LCRH_BRK; 1416 else 1417 lcr_h &= ~UART01x_LCRH_BRK; 1418 writew(lcr_h, uap->port.membase + uap->lcrh_tx); 1419 spin_unlock_irqrestore(&uap->port.lock, flags); 1420 } 1421 1422 #ifdef CONFIG_CONSOLE_POLL 1423 1424 static void pl011_quiesce_irqs(struct uart_port *port) 1425 { 1426 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1427 unsigned char __iomem *regs = uap->port.membase; 1428 1429 writew(readw(regs + UART011_MIS), regs + UART011_ICR); 1430 /* 1431 * There is no way to clear TXIM as this is "ready to transmit IRQ", so 1432 * we simply mask it. start_tx() will unmask it. 1433 * 1434 * Note we can race with start_tx(), and if the race happens, the 1435 * polling user might get another interrupt just after we clear it. 1436 * But it should be OK and can happen even w/o the race, e.g. 1437 * controller immediately got some new data and raised the IRQ. 1438 * 1439 * And whoever uses polling routines assumes that it manages the device 1440 * (including tx queue), so we're also fine with start_tx()'s caller 1441 * side. 1442 */ 1443 writew(readw(regs + UART011_IMSC) & ~UART011_TXIM, regs + UART011_IMSC); 1444 } 1445 1446 static int pl011_get_poll_char(struct uart_port *port) 1447 { 1448 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1449 unsigned int status; 1450 1451 /* 1452 * The caller might need IRQs lowered, e.g. if used with KDB NMI 1453 * debugger. 1454 */ 1455 pl011_quiesce_irqs(port); 1456 1457 status = readw(uap->port.membase + UART01x_FR); 1458 if (status & UART01x_FR_RXFE) 1459 return NO_POLL_CHAR; 1460 1461 return readw(uap->port.membase + UART01x_DR); 1462 } 1463 1464 static void pl011_put_poll_char(struct uart_port *port, 1465 unsigned char ch) 1466 { 1467 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1468 1469 while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF) 1470 barrier(); 1471 1472 writew(ch, uap->port.membase + UART01x_DR); 1473 } 1474 1475 #endif /* CONFIG_CONSOLE_POLL */ 1476 1477 static int pl011_hwinit(struct uart_port *port) 1478 { 1479 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1480 int retval; 1481 1482 /* Optionaly enable pins to be muxed in and configured */ 1483 if (!IS_ERR(uap->pins_default)) { 1484 retval = pinctrl_select_state(uap->pinctrl, uap->pins_default); 1485 if (retval) 1486 dev_err(port->dev, 1487 "could not set default pins\n"); 1488 } 1489 1490 /* 1491 * Try to enable the clock producer. 1492 */ 1493 retval = clk_prepare_enable(uap->clk); 1494 if (retval) 1495 goto out; 1496 1497 uap->port.uartclk = clk_get_rate(uap->clk); 1498 1499 /* Clear pending error and receive interrupts */ 1500 writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS | 1501 UART011_RTIS | UART011_RXIS, uap->port.membase + UART011_ICR); 1502 1503 /* 1504 * Save interrupts enable mask, and enable RX interrupts in case if 1505 * the interrupt is used for NMI entry. 1506 */ 1507 uap->im = readw(uap->port.membase + UART011_IMSC); 1508 writew(UART011_RTIM | UART011_RXIM, uap->port.membase + UART011_IMSC); 1509 1510 if (uap->port.dev->platform_data) { 1511 struct amba_pl011_data *plat; 1512 1513 plat = uap->port.dev->platform_data; 1514 if (plat->init) 1515 plat->init(); 1516 } 1517 return 0; 1518 out: 1519 return retval; 1520 } 1521 1522 static int pl011_startup(struct uart_port *port) 1523 { 1524 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1525 unsigned int cr; 1526 int retval; 1527 1528 retval = pl011_hwinit(port); 1529 if (retval) 1530 goto clk_dis; 1531 1532 writew(uap->im, uap->port.membase + UART011_IMSC); 1533 1534 /* 1535 * Allocate the IRQ 1536 */ 1537 retval = request_irq(uap->port.irq, pl011_int, 0, "uart-pl011", uap); 1538 if (retval) 1539 goto clk_dis; 1540 1541 writew(uap->vendor->ifls, uap->port.membase + UART011_IFLS); 1542 1543 /* 1544 * Provoke TX FIFO interrupt into asserting. 1545 */ 1546 cr = UART01x_CR_UARTEN | UART011_CR_TXE | UART011_CR_LBE; 1547 writew(cr, uap->port.membase + UART011_CR); 1548 writew(0, uap->port.membase + UART011_FBRD); 1549 writew(1, uap->port.membase + UART011_IBRD); 1550 writew(0, uap->port.membase + uap->lcrh_rx); 1551 if (uap->lcrh_tx != uap->lcrh_rx) { 1552 int i; 1553 /* 1554 * Wait 10 PCLKs before writing LCRH_TX register, 1555 * to get this delay write read only register 10 times 1556 */ 1557 for (i = 0; i < 10; ++i) 1558 writew(0xff, uap->port.membase + UART011_MIS); 1559 writew(0, uap->port.membase + uap->lcrh_tx); 1560 } 1561 writew(0, uap->port.membase + UART01x_DR); 1562 while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY) 1563 barrier(); 1564 1565 /* restore RTS and DTR */ 1566 cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR); 1567 cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE; 1568 writew(cr, uap->port.membase + UART011_CR); 1569 1570 /* 1571 * initialise the old status of the modem signals 1572 */ 1573 uap->old_status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY; 1574 1575 /* Startup DMA */ 1576 pl011_dma_startup(uap); 1577 1578 /* 1579 * Finally, enable interrupts, only timeouts when using DMA 1580 * if initial RX DMA job failed, start in interrupt mode 1581 * as well. 1582 */ 1583 spin_lock_irq(&uap->port.lock); 1584 /* Clear out any spuriously appearing RX interrupts */ 1585 writew(UART011_RTIS | UART011_RXIS, 1586 uap->port.membase + UART011_ICR); 1587 uap->im = UART011_RTIM; 1588 if (!pl011_dma_rx_running(uap)) 1589 uap->im |= UART011_RXIM; 1590 writew(uap->im, uap->port.membase + UART011_IMSC); 1591 spin_unlock_irq(&uap->port.lock); 1592 1593 return 0; 1594 1595 clk_dis: 1596 clk_disable_unprepare(uap->clk); 1597 return retval; 1598 } 1599 1600 static void pl011_shutdown_channel(struct uart_amba_port *uap, 1601 unsigned int lcrh) 1602 { 1603 unsigned long val; 1604 1605 val = readw(uap->port.membase + lcrh); 1606 val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN); 1607 writew(val, uap->port.membase + lcrh); 1608 } 1609 1610 static void pl011_shutdown(struct uart_port *port) 1611 { 1612 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1613 unsigned int cr; 1614 int retval; 1615 1616 /* 1617 * disable all interrupts 1618 */ 1619 spin_lock_irq(&uap->port.lock); 1620 uap->im = 0; 1621 writew(uap->im, uap->port.membase + UART011_IMSC); 1622 writew(0xffff, uap->port.membase + UART011_ICR); 1623 spin_unlock_irq(&uap->port.lock); 1624 1625 pl011_dma_shutdown(uap); 1626 1627 /* 1628 * Free the interrupt 1629 */ 1630 free_irq(uap->port.irq, uap); 1631 1632 /* 1633 * disable the port 1634 * disable the port. It should not disable RTS and DTR. 1635 * Also RTS and DTR state should be preserved to restore 1636 * it during startup(). 1637 */ 1638 uap->autorts = false; 1639 cr = readw(uap->port.membase + UART011_CR); 1640 uap->old_cr = cr; 1641 cr &= UART011_CR_RTS | UART011_CR_DTR; 1642 cr |= UART01x_CR_UARTEN | UART011_CR_TXE; 1643 writew(cr, uap->port.membase + UART011_CR); 1644 1645 /* 1646 * disable break condition and fifos 1647 */ 1648 pl011_shutdown_channel(uap, uap->lcrh_rx); 1649 if (uap->lcrh_rx != uap->lcrh_tx) 1650 pl011_shutdown_channel(uap, uap->lcrh_tx); 1651 1652 /* 1653 * Shut down the clock producer 1654 */ 1655 clk_disable_unprepare(uap->clk); 1656 /* Optionally let pins go into sleep states */ 1657 if (!IS_ERR(uap->pins_sleep)) { 1658 retval = pinctrl_select_state(uap->pinctrl, uap->pins_sleep); 1659 if (retval) 1660 dev_err(port->dev, 1661 "could not set pins to sleep state\n"); 1662 } 1663 1664 1665 if (uap->port.dev->platform_data) { 1666 struct amba_pl011_data *plat; 1667 1668 plat = uap->port.dev->platform_data; 1669 if (plat->exit) 1670 plat->exit(); 1671 } 1672 1673 } 1674 1675 static void 1676 pl011_set_termios(struct uart_port *port, struct ktermios *termios, 1677 struct ktermios *old) 1678 { 1679 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1680 unsigned int lcr_h, old_cr; 1681 unsigned long flags; 1682 unsigned int baud, quot, clkdiv; 1683 1684 if (uap->vendor->oversampling) 1685 clkdiv = 8; 1686 else 1687 clkdiv = 16; 1688 1689 /* 1690 * Ask the core to calculate the divisor for us. 1691 */ 1692 baud = uart_get_baud_rate(port, termios, old, 0, 1693 port->uartclk / clkdiv); 1694 #ifdef CONFIG_DMA_ENGINE 1695 /* 1696 * Adjust RX DMA polling rate with baud rate if not specified. 1697 */ 1698 if (uap->dmarx.auto_poll_rate) 1699 uap->dmarx.poll_rate = DIV_ROUND_UP(10000000, baud); 1700 #endif 1701 1702 if (baud > port->uartclk/16) 1703 quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud); 1704 else 1705 quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud); 1706 1707 switch (termios->c_cflag & CSIZE) { 1708 case CS5: 1709 lcr_h = UART01x_LCRH_WLEN_5; 1710 break; 1711 case CS6: 1712 lcr_h = UART01x_LCRH_WLEN_6; 1713 break; 1714 case CS7: 1715 lcr_h = UART01x_LCRH_WLEN_7; 1716 break; 1717 default: // CS8 1718 lcr_h = UART01x_LCRH_WLEN_8; 1719 break; 1720 } 1721 if (termios->c_cflag & CSTOPB) 1722 lcr_h |= UART01x_LCRH_STP2; 1723 if (termios->c_cflag & PARENB) { 1724 lcr_h |= UART01x_LCRH_PEN; 1725 if (!(termios->c_cflag & PARODD)) 1726 lcr_h |= UART01x_LCRH_EPS; 1727 } 1728 if (uap->fifosize > 1) 1729 lcr_h |= UART01x_LCRH_FEN; 1730 1731 spin_lock_irqsave(&port->lock, flags); 1732 1733 /* 1734 * Update the per-port timeout. 1735 */ 1736 uart_update_timeout(port, termios->c_cflag, baud); 1737 1738 port->read_status_mask = UART011_DR_OE | 255; 1739 if (termios->c_iflag & INPCK) 1740 port->read_status_mask |= UART011_DR_FE | UART011_DR_PE; 1741 if (termios->c_iflag & (BRKINT | PARMRK)) 1742 port->read_status_mask |= UART011_DR_BE; 1743 1744 /* 1745 * Characters to ignore 1746 */ 1747 port->ignore_status_mask = 0; 1748 if (termios->c_iflag & IGNPAR) 1749 port->ignore_status_mask |= UART011_DR_FE | UART011_DR_PE; 1750 if (termios->c_iflag & IGNBRK) { 1751 port->ignore_status_mask |= UART011_DR_BE; 1752 /* 1753 * If we're ignoring parity and break indicators, 1754 * ignore overruns too (for real raw support). 1755 */ 1756 if (termios->c_iflag & IGNPAR) 1757 port->ignore_status_mask |= UART011_DR_OE; 1758 } 1759 1760 /* 1761 * Ignore all characters if CREAD is not set. 1762 */ 1763 if ((termios->c_cflag & CREAD) == 0) 1764 port->ignore_status_mask |= UART_DUMMY_DR_RX; 1765 1766 if (UART_ENABLE_MS(port, termios->c_cflag)) 1767 pl011_enable_ms(port); 1768 1769 /* first, disable everything */ 1770 old_cr = readw(port->membase + UART011_CR); 1771 writew(0, port->membase + UART011_CR); 1772 1773 if (termios->c_cflag & CRTSCTS) { 1774 if (old_cr & UART011_CR_RTS) 1775 old_cr |= UART011_CR_RTSEN; 1776 1777 old_cr |= UART011_CR_CTSEN; 1778 uap->autorts = true; 1779 } else { 1780 old_cr &= ~(UART011_CR_CTSEN | UART011_CR_RTSEN); 1781 uap->autorts = false; 1782 } 1783 1784 if (uap->vendor->oversampling) { 1785 if (baud > port->uartclk / 16) 1786 old_cr |= ST_UART011_CR_OVSFACT; 1787 else 1788 old_cr &= ~ST_UART011_CR_OVSFACT; 1789 } 1790 1791 /* 1792 * Workaround for the ST Micro oversampling variants to 1793 * increase the bitrate slightly, by lowering the divisor, 1794 * to avoid delayed sampling of start bit at high speeds, 1795 * else we see data corruption. 1796 */ 1797 if (uap->vendor->oversampling) { 1798 if ((baud >= 3000000) && (baud < 3250000) && (quot > 1)) 1799 quot -= 1; 1800 else if ((baud > 3250000) && (quot > 2)) 1801 quot -= 2; 1802 } 1803 /* Set baud rate */ 1804 writew(quot & 0x3f, port->membase + UART011_FBRD); 1805 writew(quot >> 6, port->membase + UART011_IBRD); 1806 1807 /* 1808 * ----------v----------v----------v----------v----- 1809 * NOTE: lcrh_tx and lcrh_rx MUST BE WRITTEN AFTER 1810 * UART011_FBRD & UART011_IBRD. 1811 * ----------^----------^----------^----------^----- 1812 */ 1813 writew(lcr_h, port->membase + uap->lcrh_rx); 1814 if (uap->lcrh_rx != uap->lcrh_tx) { 1815 int i; 1816 /* 1817 * Wait 10 PCLKs before writing LCRH_TX register, 1818 * to get this delay write read only register 10 times 1819 */ 1820 for (i = 0; i < 10; ++i) 1821 writew(0xff, uap->port.membase + UART011_MIS); 1822 writew(lcr_h, port->membase + uap->lcrh_tx); 1823 } 1824 writew(old_cr, port->membase + UART011_CR); 1825 1826 spin_unlock_irqrestore(&port->lock, flags); 1827 } 1828 1829 static const char *pl011_type(struct uart_port *port) 1830 { 1831 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1832 return uap->port.type == PORT_AMBA ? uap->type : NULL; 1833 } 1834 1835 /* 1836 * Release the memory region(s) being used by 'port' 1837 */ 1838 static void pl011_release_port(struct uart_port *port) 1839 { 1840 release_mem_region(port->mapbase, SZ_4K); 1841 } 1842 1843 /* 1844 * Request the memory region(s) being used by 'port' 1845 */ 1846 static int pl011_request_port(struct uart_port *port) 1847 { 1848 return request_mem_region(port->mapbase, SZ_4K, "uart-pl011") 1849 != NULL ? 0 : -EBUSY; 1850 } 1851 1852 /* 1853 * Configure/autoconfigure the port. 1854 */ 1855 static void pl011_config_port(struct uart_port *port, int flags) 1856 { 1857 if (flags & UART_CONFIG_TYPE) { 1858 port->type = PORT_AMBA; 1859 pl011_request_port(port); 1860 } 1861 } 1862 1863 /* 1864 * verify the new serial_struct (for TIOCSSERIAL). 1865 */ 1866 static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser) 1867 { 1868 int ret = 0; 1869 if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA) 1870 ret = -EINVAL; 1871 if (ser->irq < 0 || ser->irq >= nr_irqs) 1872 ret = -EINVAL; 1873 if (ser->baud_base < 9600) 1874 ret = -EINVAL; 1875 return ret; 1876 } 1877 1878 static struct uart_ops amba_pl011_pops = { 1879 .tx_empty = pl011_tx_empty, 1880 .set_mctrl = pl011_set_mctrl, 1881 .get_mctrl = pl011_get_mctrl, 1882 .stop_tx = pl011_stop_tx, 1883 .start_tx = pl011_start_tx, 1884 .stop_rx = pl011_stop_rx, 1885 .enable_ms = pl011_enable_ms, 1886 .break_ctl = pl011_break_ctl, 1887 .startup = pl011_startup, 1888 .shutdown = pl011_shutdown, 1889 .flush_buffer = pl011_dma_flush_buffer, 1890 .set_termios = pl011_set_termios, 1891 .type = pl011_type, 1892 .release_port = pl011_release_port, 1893 .request_port = pl011_request_port, 1894 .config_port = pl011_config_port, 1895 .verify_port = pl011_verify_port, 1896 #ifdef CONFIG_CONSOLE_POLL 1897 .poll_init = pl011_hwinit, 1898 .poll_get_char = pl011_get_poll_char, 1899 .poll_put_char = pl011_put_poll_char, 1900 #endif 1901 }; 1902 1903 static struct uart_amba_port *amba_ports[UART_NR]; 1904 1905 #ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE 1906 1907 static void pl011_console_putchar(struct uart_port *port, int ch) 1908 { 1909 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1910 1911 while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF) 1912 barrier(); 1913 writew(ch, uap->port.membase + UART01x_DR); 1914 } 1915 1916 static void 1917 pl011_console_write(struct console *co, const char *s, unsigned int count) 1918 { 1919 struct uart_amba_port *uap = amba_ports[co->index]; 1920 unsigned int status, old_cr, new_cr; 1921 unsigned long flags; 1922 int locked = 1; 1923 1924 clk_enable(uap->clk); 1925 1926 local_irq_save(flags); 1927 if (uap->port.sysrq) 1928 locked = 0; 1929 else if (oops_in_progress) 1930 locked = spin_trylock(&uap->port.lock); 1931 else 1932 spin_lock(&uap->port.lock); 1933 1934 /* 1935 * First save the CR then disable the interrupts 1936 */ 1937 old_cr = readw(uap->port.membase + UART011_CR); 1938 new_cr = old_cr & ~UART011_CR_CTSEN; 1939 new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE; 1940 writew(new_cr, uap->port.membase + UART011_CR); 1941 1942 uart_console_write(&uap->port, s, count, pl011_console_putchar); 1943 1944 /* 1945 * Finally, wait for transmitter to become empty 1946 * and restore the TCR 1947 */ 1948 do { 1949 status = readw(uap->port.membase + UART01x_FR); 1950 } while (status & UART01x_FR_BUSY); 1951 writew(old_cr, uap->port.membase + UART011_CR); 1952 1953 if (locked) 1954 spin_unlock(&uap->port.lock); 1955 local_irq_restore(flags); 1956 1957 clk_disable(uap->clk); 1958 } 1959 1960 static void __init 1961 pl011_console_get_options(struct uart_amba_port *uap, int *baud, 1962 int *parity, int *bits) 1963 { 1964 if (readw(uap->port.membase + UART011_CR) & UART01x_CR_UARTEN) { 1965 unsigned int lcr_h, ibrd, fbrd; 1966 1967 lcr_h = readw(uap->port.membase + uap->lcrh_tx); 1968 1969 *parity = 'n'; 1970 if (lcr_h & UART01x_LCRH_PEN) { 1971 if (lcr_h & UART01x_LCRH_EPS) 1972 *parity = 'e'; 1973 else 1974 *parity = 'o'; 1975 } 1976 1977 if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7) 1978 *bits = 7; 1979 else 1980 *bits = 8; 1981 1982 ibrd = readw(uap->port.membase + UART011_IBRD); 1983 fbrd = readw(uap->port.membase + UART011_FBRD); 1984 1985 *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd); 1986 1987 if (uap->vendor->oversampling) { 1988 if (readw(uap->port.membase + UART011_CR) 1989 & ST_UART011_CR_OVSFACT) 1990 *baud *= 2; 1991 } 1992 } 1993 } 1994 1995 static int __init pl011_console_setup(struct console *co, char *options) 1996 { 1997 struct uart_amba_port *uap; 1998 int baud = 38400; 1999 int bits = 8; 2000 int parity = 'n'; 2001 int flow = 'n'; 2002 int ret; 2003 2004 /* 2005 * Check whether an invalid uart number has been specified, and 2006 * if so, search for the first available port that does have 2007 * console support. 2008 */ 2009 if (co->index >= UART_NR) 2010 co->index = 0; 2011 uap = amba_ports[co->index]; 2012 if (!uap) 2013 return -ENODEV; 2014 2015 /* Allow pins to be muxed in and configured */ 2016 if (!IS_ERR(uap->pins_default)) { 2017 ret = pinctrl_select_state(uap->pinctrl, uap->pins_default); 2018 if (ret) 2019 dev_err(uap->port.dev, 2020 "could not set default pins\n"); 2021 } 2022 2023 ret = clk_prepare(uap->clk); 2024 if (ret) 2025 return ret; 2026 2027 if (uap->port.dev->platform_data) { 2028 struct amba_pl011_data *plat; 2029 2030 plat = uap->port.dev->platform_data; 2031 if (plat->init) 2032 plat->init(); 2033 } 2034 2035 uap->port.uartclk = clk_get_rate(uap->clk); 2036 2037 if (options) 2038 uart_parse_options(options, &baud, &parity, &bits, &flow); 2039 else 2040 pl011_console_get_options(uap, &baud, &parity, &bits); 2041 2042 return uart_set_options(&uap->port, co, baud, parity, bits, flow); 2043 } 2044 2045 static struct uart_driver amba_reg; 2046 static struct console amba_console = { 2047 .name = "ttyAMA", 2048 .write = pl011_console_write, 2049 .device = uart_console_device, 2050 .setup = pl011_console_setup, 2051 .flags = CON_PRINTBUFFER, 2052 .index = -1, 2053 .data = &amba_reg, 2054 }; 2055 2056 #define AMBA_CONSOLE (&amba_console) 2057 #else 2058 #define AMBA_CONSOLE NULL 2059 #endif 2060 2061 static struct uart_driver amba_reg = { 2062 .owner = THIS_MODULE, 2063 .driver_name = "ttyAMA", 2064 .dev_name = "ttyAMA", 2065 .major = SERIAL_AMBA_MAJOR, 2066 .minor = SERIAL_AMBA_MINOR, 2067 .nr = UART_NR, 2068 .cons = AMBA_CONSOLE, 2069 }; 2070 2071 static int pl011_probe_dt_alias(int index, struct device *dev) 2072 { 2073 struct device_node *np; 2074 static bool seen_dev_with_alias = false; 2075 static bool seen_dev_without_alias = false; 2076 int ret = index; 2077 2078 if (!IS_ENABLED(CONFIG_OF)) 2079 return ret; 2080 2081 np = dev->of_node; 2082 if (!np) 2083 return ret; 2084 2085 ret = of_alias_get_id(np, "serial"); 2086 if (IS_ERR_VALUE(ret)) { 2087 seen_dev_without_alias = true; 2088 ret = index; 2089 } else { 2090 seen_dev_with_alias = true; 2091 if (ret >= ARRAY_SIZE(amba_ports) || amba_ports[ret] != NULL) { 2092 dev_warn(dev, "requested serial port %d not available.\n", ret); 2093 ret = index; 2094 } 2095 } 2096 2097 if (seen_dev_with_alias && seen_dev_without_alias) 2098 dev_warn(dev, "aliased and non-aliased serial devices found in device tree. Serial port enumeration may be unpredictable.\n"); 2099 2100 return ret; 2101 } 2102 2103 static int pl011_probe(struct amba_device *dev, const struct amba_id *id) 2104 { 2105 struct uart_amba_port *uap; 2106 struct vendor_data *vendor = id->data; 2107 void __iomem *base; 2108 int i, ret; 2109 2110 for (i = 0; i < ARRAY_SIZE(amba_ports); i++) 2111 if (amba_ports[i] == NULL) 2112 break; 2113 2114 if (i == ARRAY_SIZE(amba_ports)) { 2115 ret = -EBUSY; 2116 goto out; 2117 } 2118 2119 uap = devm_kzalloc(&dev->dev, sizeof(struct uart_amba_port), 2120 GFP_KERNEL); 2121 if (uap == NULL) { 2122 ret = -ENOMEM; 2123 goto out; 2124 } 2125 2126 i = pl011_probe_dt_alias(i, &dev->dev); 2127 2128 base = devm_ioremap(&dev->dev, dev->res.start, 2129 resource_size(&dev->res)); 2130 if (!base) { 2131 ret = -ENOMEM; 2132 goto out; 2133 } 2134 2135 uap->pinctrl = devm_pinctrl_get(&dev->dev); 2136 if (IS_ERR(uap->pinctrl)) { 2137 ret = PTR_ERR(uap->pinctrl); 2138 goto out; 2139 } 2140 uap->pins_default = pinctrl_lookup_state(uap->pinctrl, 2141 PINCTRL_STATE_DEFAULT); 2142 if (IS_ERR(uap->pins_default)) 2143 dev_err(&dev->dev, "could not get default pinstate\n"); 2144 2145 uap->pins_sleep = pinctrl_lookup_state(uap->pinctrl, 2146 PINCTRL_STATE_SLEEP); 2147 if (IS_ERR(uap->pins_sleep)) 2148 dev_dbg(&dev->dev, "could not get sleep pinstate\n"); 2149 2150 uap->clk = devm_clk_get(&dev->dev, NULL); 2151 if (IS_ERR(uap->clk)) { 2152 ret = PTR_ERR(uap->clk); 2153 goto out; 2154 } 2155 2156 uap->vendor = vendor; 2157 uap->lcrh_rx = vendor->lcrh_rx; 2158 uap->lcrh_tx = vendor->lcrh_tx; 2159 uap->old_cr = 0; 2160 uap->fifosize = vendor->get_fifosize(dev->periphid); 2161 uap->port.dev = &dev->dev; 2162 uap->port.mapbase = dev->res.start; 2163 uap->port.membase = base; 2164 uap->port.iotype = UPIO_MEM; 2165 uap->port.irq = dev->irq[0]; 2166 uap->port.fifosize = uap->fifosize; 2167 uap->port.ops = &amba_pl011_pops; 2168 uap->port.flags = UPF_BOOT_AUTOCONF; 2169 uap->port.line = i; 2170 pl011_dma_probe(&dev->dev, uap); 2171 2172 /* Ensure interrupts from this UART are masked and cleared */ 2173 writew(0, uap->port.membase + UART011_IMSC); 2174 writew(0xffff, uap->port.membase + UART011_ICR); 2175 2176 snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev)); 2177 2178 amba_ports[i] = uap; 2179 2180 amba_set_drvdata(dev, uap); 2181 ret = uart_add_one_port(&amba_reg, &uap->port); 2182 if (ret) { 2183 amba_set_drvdata(dev, NULL); 2184 amba_ports[i] = NULL; 2185 pl011_dma_remove(uap); 2186 } 2187 out: 2188 return ret; 2189 } 2190 2191 static int pl011_remove(struct amba_device *dev) 2192 { 2193 struct uart_amba_port *uap = amba_get_drvdata(dev); 2194 int i; 2195 2196 amba_set_drvdata(dev, NULL); 2197 2198 uart_remove_one_port(&amba_reg, &uap->port); 2199 2200 for (i = 0; i < ARRAY_SIZE(amba_ports); i++) 2201 if (amba_ports[i] == uap) 2202 amba_ports[i] = NULL; 2203 2204 pl011_dma_remove(uap); 2205 return 0; 2206 } 2207 2208 #ifdef CONFIG_PM 2209 static int pl011_suspend(struct amba_device *dev, pm_message_t state) 2210 { 2211 struct uart_amba_port *uap = amba_get_drvdata(dev); 2212 2213 if (!uap) 2214 return -EINVAL; 2215 2216 return uart_suspend_port(&amba_reg, &uap->port); 2217 } 2218 2219 static int pl011_resume(struct amba_device *dev) 2220 { 2221 struct uart_amba_port *uap = amba_get_drvdata(dev); 2222 2223 if (!uap) 2224 return -EINVAL; 2225 2226 return uart_resume_port(&amba_reg, &uap->port); 2227 } 2228 #endif 2229 2230 static struct amba_id pl011_ids[] = { 2231 { 2232 .id = 0x00041011, 2233 .mask = 0x000fffff, 2234 .data = &vendor_arm, 2235 }, 2236 { 2237 .id = 0x00380802, 2238 .mask = 0x00ffffff, 2239 .data = &vendor_st, 2240 }, 2241 { 0, 0 }, 2242 }; 2243 2244 MODULE_DEVICE_TABLE(amba, pl011_ids); 2245 2246 static struct amba_driver pl011_driver = { 2247 .drv = { 2248 .name = "uart-pl011", 2249 }, 2250 .id_table = pl011_ids, 2251 .probe = pl011_probe, 2252 .remove = pl011_remove, 2253 #ifdef CONFIG_PM 2254 .suspend = pl011_suspend, 2255 .resume = pl011_resume, 2256 #endif 2257 }; 2258 2259 static int __init pl011_init(void) 2260 { 2261 int ret; 2262 printk(KERN_INFO "Serial: AMBA PL011 UART driver\n"); 2263 2264 ret = uart_register_driver(&amba_reg); 2265 if (ret == 0) { 2266 ret = amba_driver_register(&pl011_driver); 2267 if (ret) 2268 uart_unregister_driver(&amba_reg); 2269 } 2270 return ret; 2271 } 2272 2273 static void __exit pl011_exit(void) 2274 { 2275 amba_driver_unregister(&pl011_driver); 2276 uart_unregister_driver(&amba_reg); 2277 } 2278 2279 /* 2280 * While this can be a module, if builtin it's most likely the console 2281 * So let's leave module_exit but move module_init to an earlier place 2282 */ 2283 arch_initcall(pl011_init); 2284 module_exit(pl011_exit); 2285 2286 MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd"); 2287 MODULE_DESCRIPTION("ARM AMBA serial port driver"); 2288 MODULE_LICENSE("GPL"); 2289