1 /* 2 * Driver for AMBA serial ports 3 * 4 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. 5 * 6 * Copyright 1999 ARM Limited 7 * Copyright (C) 2000 Deep Blue Solutions Ltd. 8 * Copyright (C) 2010 ST-Ericsson SA 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 23 * 24 * This is a generic driver for ARM AMBA-type serial ports. They 25 * have a lot of 16550-like features, but are not register compatible. 26 * Note that although they do have CTS, DCD and DSR inputs, they do 27 * not have an RI input, nor do they have DTR or RTS outputs. If 28 * required, these have to be supplied via some other means (eg, GPIO) 29 * and hooked into this driver. 30 */ 31 32 #if defined(CONFIG_SERIAL_AMBA_PL011_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) 33 #define SUPPORT_SYSRQ 34 #endif 35 36 #include <linux/module.h> 37 #include <linux/ioport.h> 38 #include <linux/init.h> 39 #include <linux/console.h> 40 #include <linux/sysrq.h> 41 #include <linux/device.h> 42 #include <linux/tty.h> 43 #include <linux/tty_flip.h> 44 #include <linux/serial_core.h> 45 #include <linux/serial.h> 46 #include <linux/amba/bus.h> 47 #include <linux/amba/serial.h> 48 #include <linux/clk.h> 49 #include <linux/slab.h> 50 #include <linux/dmaengine.h> 51 #include <linux/dma-mapping.h> 52 #include <linux/scatterlist.h> 53 #include <linux/delay.h> 54 #include <linux/types.h> 55 56 #include <asm/io.h> 57 #include <asm/sizes.h> 58 59 #define UART_NR 14 60 61 #define SERIAL_AMBA_MAJOR 204 62 #define SERIAL_AMBA_MINOR 64 63 #define SERIAL_AMBA_NR UART_NR 64 65 #define AMBA_ISR_PASS_LIMIT 256 66 67 #define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE) 68 #define UART_DUMMY_DR_RX (1 << 16) 69 70 71 #define UART_WA_SAVE_NR 14 72 73 static void pl011_lockup_wa(unsigned long data); 74 static const u32 uart_wa_reg[UART_WA_SAVE_NR] = { 75 ST_UART011_DMAWM, 76 ST_UART011_TIMEOUT, 77 ST_UART011_LCRH_RX, 78 UART011_IBRD, 79 UART011_FBRD, 80 ST_UART011_LCRH_TX, 81 UART011_IFLS, 82 ST_UART011_XFCR, 83 ST_UART011_XON1, 84 ST_UART011_XON2, 85 ST_UART011_XOFF1, 86 ST_UART011_XOFF2, 87 UART011_CR, 88 UART011_IMSC 89 }; 90 91 static u32 uart_wa_regdata[UART_WA_SAVE_NR]; 92 static DECLARE_TASKLET(pl011_lockup_tlet, pl011_lockup_wa, 0); 93 94 /* There is by now at least one vendor with differing details, so handle it */ 95 struct vendor_data { 96 unsigned int ifls; 97 unsigned int fifosize; 98 unsigned int lcrh_tx; 99 unsigned int lcrh_rx; 100 bool oversampling; 101 bool interrupt_may_hang; /* vendor-specific */ 102 bool dma_threshold; 103 }; 104 105 static struct vendor_data vendor_arm = { 106 .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8, 107 .fifosize = 16, 108 .lcrh_tx = UART011_LCRH, 109 .lcrh_rx = UART011_LCRH, 110 .oversampling = false, 111 .dma_threshold = false, 112 }; 113 114 static struct vendor_data vendor_st = { 115 .ifls = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF, 116 .fifosize = 64, 117 .lcrh_tx = ST_UART011_LCRH_TX, 118 .lcrh_rx = ST_UART011_LCRH_RX, 119 .oversampling = true, 120 .interrupt_may_hang = true, 121 .dma_threshold = true, 122 }; 123 124 static struct uart_amba_port *amba_ports[UART_NR]; 125 126 /* Deals with DMA transactions */ 127 128 struct pl011_sgbuf { 129 struct scatterlist sg; 130 char *buf; 131 }; 132 133 struct pl011_dmarx_data { 134 struct dma_chan *chan; 135 struct completion complete; 136 bool use_buf_b; 137 struct pl011_sgbuf sgbuf_a; 138 struct pl011_sgbuf sgbuf_b; 139 dma_cookie_t cookie; 140 bool running; 141 }; 142 143 struct pl011_dmatx_data { 144 struct dma_chan *chan; 145 struct scatterlist sg; 146 char *buf; 147 bool queued; 148 }; 149 150 /* 151 * We wrap our port structure around the generic uart_port. 152 */ 153 struct uart_amba_port { 154 struct uart_port port; 155 struct clk *clk; 156 const struct vendor_data *vendor; 157 unsigned int dmacr; /* dma control reg */ 158 unsigned int im; /* interrupt mask */ 159 unsigned int old_status; 160 unsigned int fifosize; /* vendor-specific */ 161 unsigned int lcrh_tx; /* vendor-specific */ 162 unsigned int lcrh_rx; /* vendor-specific */ 163 unsigned int old_cr; /* state during shutdown */ 164 bool autorts; 165 char type[12]; 166 bool interrupt_may_hang; /* vendor-specific */ 167 #ifdef CONFIG_DMA_ENGINE 168 /* DMA stuff */ 169 bool using_tx_dma; 170 bool using_rx_dma; 171 struct pl011_dmarx_data dmarx; 172 struct pl011_dmatx_data dmatx; 173 #endif 174 }; 175 176 /* 177 * Reads up to 256 characters from the FIFO or until it's empty and 178 * inserts them into the TTY layer. Returns the number of characters 179 * read from the FIFO. 180 */ 181 static int pl011_fifo_to_tty(struct uart_amba_port *uap) 182 { 183 u16 status, ch; 184 unsigned int flag, max_count = 256; 185 int fifotaken = 0; 186 187 while (max_count--) { 188 status = readw(uap->port.membase + UART01x_FR); 189 if (status & UART01x_FR_RXFE) 190 break; 191 192 /* Take chars from the FIFO and update status */ 193 ch = readw(uap->port.membase + UART01x_DR) | 194 UART_DUMMY_DR_RX; 195 flag = TTY_NORMAL; 196 uap->port.icount.rx++; 197 fifotaken++; 198 199 if (unlikely(ch & UART_DR_ERROR)) { 200 if (ch & UART011_DR_BE) { 201 ch &= ~(UART011_DR_FE | UART011_DR_PE); 202 uap->port.icount.brk++; 203 if (uart_handle_break(&uap->port)) 204 continue; 205 } else if (ch & UART011_DR_PE) 206 uap->port.icount.parity++; 207 else if (ch & UART011_DR_FE) 208 uap->port.icount.frame++; 209 if (ch & UART011_DR_OE) 210 uap->port.icount.overrun++; 211 212 ch &= uap->port.read_status_mask; 213 214 if (ch & UART011_DR_BE) 215 flag = TTY_BREAK; 216 else if (ch & UART011_DR_PE) 217 flag = TTY_PARITY; 218 else if (ch & UART011_DR_FE) 219 flag = TTY_FRAME; 220 } 221 222 if (uart_handle_sysrq_char(&uap->port, ch & 255)) 223 continue; 224 225 uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag); 226 } 227 228 return fifotaken; 229 } 230 231 232 /* 233 * All the DMA operation mode stuff goes inside this ifdef. 234 * This assumes that you have a generic DMA device interface, 235 * no custom DMA interfaces are supported. 236 */ 237 #ifdef CONFIG_DMA_ENGINE 238 239 #define PL011_DMA_BUFFER_SIZE PAGE_SIZE 240 241 static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg, 242 enum dma_data_direction dir) 243 { 244 sg->buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL); 245 if (!sg->buf) 246 return -ENOMEM; 247 248 sg_init_one(&sg->sg, sg->buf, PL011_DMA_BUFFER_SIZE); 249 250 if (dma_map_sg(chan->device->dev, &sg->sg, 1, dir) != 1) { 251 kfree(sg->buf); 252 return -EINVAL; 253 } 254 return 0; 255 } 256 257 static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg, 258 enum dma_data_direction dir) 259 { 260 if (sg->buf) { 261 dma_unmap_sg(chan->device->dev, &sg->sg, 1, dir); 262 kfree(sg->buf); 263 } 264 } 265 266 static void pl011_dma_probe_initcall(struct uart_amba_port *uap) 267 { 268 /* DMA is the sole user of the platform data right now */ 269 struct amba_pl011_data *plat = uap->port.dev->platform_data; 270 struct dma_slave_config tx_conf = { 271 .dst_addr = uap->port.mapbase + UART01x_DR, 272 .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, 273 .direction = DMA_MEM_TO_DEV, 274 .dst_maxburst = uap->fifosize >> 1, 275 .device_fc = false, 276 }; 277 struct dma_chan *chan; 278 dma_cap_mask_t mask; 279 280 /* We need platform data */ 281 if (!plat || !plat->dma_filter) { 282 dev_info(uap->port.dev, "no DMA platform data\n"); 283 return; 284 } 285 286 /* Try to acquire a generic DMA engine slave TX channel */ 287 dma_cap_zero(mask); 288 dma_cap_set(DMA_SLAVE, mask); 289 290 chan = dma_request_channel(mask, plat->dma_filter, plat->dma_tx_param); 291 if (!chan) { 292 dev_err(uap->port.dev, "no TX DMA channel!\n"); 293 return; 294 } 295 296 dmaengine_slave_config(chan, &tx_conf); 297 uap->dmatx.chan = chan; 298 299 dev_info(uap->port.dev, "DMA channel TX %s\n", 300 dma_chan_name(uap->dmatx.chan)); 301 302 /* Optionally make use of an RX channel as well */ 303 if (plat->dma_rx_param) { 304 struct dma_slave_config rx_conf = { 305 .src_addr = uap->port.mapbase + UART01x_DR, 306 .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, 307 .direction = DMA_DEV_TO_MEM, 308 .src_maxburst = uap->fifosize >> 1, 309 .device_fc = false, 310 }; 311 312 chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param); 313 if (!chan) { 314 dev_err(uap->port.dev, "no RX DMA channel!\n"); 315 return; 316 } 317 318 dmaengine_slave_config(chan, &rx_conf); 319 uap->dmarx.chan = chan; 320 321 dev_info(uap->port.dev, "DMA channel RX %s\n", 322 dma_chan_name(uap->dmarx.chan)); 323 } 324 } 325 326 #ifndef MODULE 327 /* 328 * Stack up the UARTs and let the above initcall be done at device 329 * initcall time, because the serial driver is called as an arch 330 * initcall, and at this time the DMA subsystem is not yet registered. 331 * At this point the driver will switch over to using DMA where desired. 332 */ 333 struct dma_uap { 334 struct list_head node; 335 struct uart_amba_port *uap; 336 }; 337 338 static LIST_HEAD(pl011_dma_uarts); 339 340 static int __init pl011_dma_initcall(void) 341 { 342 struct list_head *node, *tmp; 343 344 list_for_each_safe(node, tmp, &pl011_dma_uarts) { 345 struct dma_uap *dmau = list_entry(node, struct dma_uap, node); 346 pl011_dma_probe_initcall(dmau->uap); 347 list_del(node); 348 kfree(dmau); 349 } 350 return 0; 351 } 352 353 device_initcall(pl011_dma_initcall); 354 355 static void pl011_dma_probe(struct uart_amba_port *uap) 356 { 357 struct dma_uap *dmau = kzalloc(sizeof(struct dma_uap), GFP_KERNEL); 358 if (dmau) { 359 dmau->uap = uap; 360 list_add_tail(&dmau->node, &pl011_dma_uarts); 361 } 362 } 363 #else 364 static void pl011_dma_probe(struct uart_amba_port *uap) 365 { 366 pl011_dma_probe_initcall(uap); 367 } 368 #endif 369 370 static void pl011_dma_remove(struct uart_amba_port *uap) 371 { 372 /* TODO: remove the initcall if it has not yet executed */ 373 if (uap->dmatx.chan) 374 dma_release_channel(uap->dmatx.chan); 375 if (uap->dmarx.chan) 376 dma_release_channel(uap->dmarx.chan); 377 } 378 379 /* Forward declare this for the refill routine */ 380 static int pl011_dma_tx_refill(struct uart_amba_port *uap); 381 382 /* 383 * The current DMA TX buffer has been sent. 384 * Try to queue up another DMA buffer. 385 */ 386 static void pl011_dma_tx_callback(void *data) 387 { 388 struct uart_amba_port *uap = data; 389 struct pl011_dmatx_data *dmatx = &uap->dmatx; 390 unsigned long flags; 391 u16 dmacr; 392 393 spin_lock_irqsave(&uap->port.lock, flags); 394 if (uap->dmatx.queued) 395 dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1, 396 DMA_TO_DEVICE); 397 398 dmacr = uap->dmacr; 399 uap->dmacr = dmacr & ~UART011_TXDMAE; 400 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 401 402 /* 403 * If TX DMA was disabled, it means that we've stopped the DMA for 404 * some reason (eg, XOFF received, or we want to send an X-char.) 405 * 406 * Note: we need to be careful here of a potential race between DMA 407 * and the rest of the driver - if the driver disables TX DMA while 408 * a TX buffer completing, we must update the tx queued status to 409 * get further refills (hence we check dmacr). 410 */ 411 if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) || 412 uart_circ_empty(&uap->port.state->xmit)) { 413 uap->dmatx.queued = false; 414 spin_unlock_irqrestore(&uap->port.lock, flags); 415 return; 416 } 417 418 if (pl011_dma_tx_refill(uap) <= 0) { 419 /* 420 * We didn't queue a DMA buffer for some reason, but we 421 * have data pending to be sent. Re-enable the TX IRQ. 422 */ 423 uap->im |= UART011_TXIM; 424 writew(uap->im, uap->port.membase + UART011_IMSC); 425 } 426 spin_unlock_irqrestore(&uap->port.lock, flags); 427 } 428 429 /* 430 * Try to refill the TX DMA buffer. 431 * Locking: called with port lock held and IRQs disabled. 432 * Returns: 433 * 1 if we queued up a TX DMA buffer. 434 * 0 if we didn't want to handle this by DMA 435 * <0 on error 436 */ 437 static int pl011_dma_tx_refill(struct uart_amba_port *uap) 438 { 439 struct pl011_dmatx_data *dmatx = &uap->dmatx; 440 struct dma_chan *chan = dmatx->chan; 441 struct dma_device *dma_dev = chan->device; 442 struct dma_async_tx_descriptor *desc; 443 struct circ_buf *xmit = &uap->port.state->xmit; 444 unsigned int count; 445 446 /* 447 * Try to avoid the overhead involved in using DMA if the 448 * transaction fits in the first half of the FIFO, by using 449 * the standard interrupt handling. This ensures that we 450 * issue a uart_write_wakeup() at the appropriate time. 451 */ 452 count = uart_circ_chars_pending(xmit); 453 if (count < (uap->fifosize >> 1)) { 454 uap->dmatx.queued = false; 455 return 0; 456 } 457 458 /* 459 * Bodge: don't send the last character by DMA, as this 460 * will prevent XON from notifying us to restart DMA. 461 */ 462 count -= 1; 463 464 /* Else proceed to copy the TX chars to the DMA buffer and fire DMA */ 465 if (count > PL011_DMA_BUFFER_SIZE) 466 count = PL011_DMA_BUFFER_SIZE; 467 468 if (xmit->tail < xmit->head) 469 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count); 470 else { 471 size_t first = UART_XMIT_SIZE - xmit->tail; 472 size_t second = xmit->head; 473 474 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first); 475 if (second) 476 memcpy(&dmatx->buf[first], &xmit->buf[0], second); 477 } 478 479 dmatx->sg.length = count; 480 481 if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) { 482 uap->dmatx.queued = false; 483 dev_dbg(uap->port.dev, "unable to map TX DMA\n"); 484 return -EBUSY; 485 } 486 487 desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV, 488 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 489 if (!desc) { 490 dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE); 491 uap->dmatx.queued = false; 492 /* 493 * If DMA cannot be used right now, we complete this 494 * transaction via IRQ and let the TTY layer retry. 495 */ 496 dev_dbg(uap->port.dev, "TX DMA busy\n"); 497 return -EBUSY; 498 } 499 500 /* Some data to go along to the callback */ 501 desc->callback = pl011_dma_tx_callback; 502 desc->callback_param = uap; 503 504 /* All errors should happen at prepare time */ 505 dmaengine_submit(desc); 506 507 /* Fire the DMA transaction */ 508 dma_dev->device_issue_pending(chan); 509 510 uap->dmacr |= UART011_TXDMAE; 511 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 512 uap->dmatx.queued = true; 513 514 /* 515 * Now we know that DMA will fire, so advance the ring buffer 516 * with the stuff we just dispatched. 517 */ 518 xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1); 519 uap->port.icount.tx += count; 520 521 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 522 uart_write_wakeup(&uap->port); 523 524 return 1; 525 } 526 527 /* 528 * We received a transmit interrupt without a pending X-char but with 529 * pending characters. 530 * Locking: called with port lock held and IRQs disabled. 531 * Returns: 532 * false if we want to use PIO to transmit 533 * true if we queued a DMA buffer 534 */ 535 static bool pl011_dma_tx_irq(struct uart_amba_port *uap) 536 { 537 if (!uap->using_tx_dma) 538 return false; 539 540 /* 541 * If we already have a TX buffer queued, but received a 542 * TX interrupt, it will be because we've just sent an X-char. 543 * Ensure the TX DMA is enabled and the TX IRQ is disabled. 544 */ 545 if (uap->dmatx.queued) { 546 uap->dmacr |= UART011_TXDMAE; 547 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 548 uap->im &= ~UART011_TXIM; 549 writew(uap->im, uap->port.membase + UART011_IMSC); 550 return true; 551 } 552 553 /* 554 * We don't have a TX buffer queued, so try to queue one. 555 * If we successfully queued a buffer, mask the TX IRQ. 556 */ 557 if (pl011_dma_tx_refill(uap) > 0) { 558 uap->im &= ~UART011_TXIM; 559 writew(uap->im, uap->port.membase + UART011_IMSC); 560 return true; 561 } 562 return false; 563 } 564 565 /* 566 * Stop the DMA transmit (eg, due to received XOFF). 567 * Locking: called with port lock held and IRQs disabled. 568 */ 569 static inline void pl011_dma_tx_stop(struct uart_amba_port *uap) 570 { 571 if (uap->dmatx.queued) { 572 uap->dmacr &= ~UART011_TXDMAE; 573 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 574 } 575 } 576 577 /* 578 * Try to start a DMA transmit, or in the case of an XON/OFF 579 * character queued for send, try to get that character out ASAP. 580 * Locking: called with port lock held and IRQs disabled. 581 * Returns: 582 * false if we want the TX IRQ to be enabled 583 * true if we have a buffer queued 584 */ 585 static inline bool pl011_dma_tx_start(struct uart_amba_port *uap) 586 { 587 u16 dmacr; 588 589 if (!uap->using_tx_dma) 590 return false; 591 592 if (!uap->port.x_char) { 593 /* no X-char, try to push chars out in DMA mode */ 594 bool ret = true; 595 596 if (!uap->dmatx.queued) { 597 if (pl011_dma_tx_refill(uap) > 0) { 598 uap->im &= ~UART011_TXIM; 599 ret = true; 600 } else { 601 uap->im |= UART011_TXIM; 602 ret = false; 603 } 604 writew(uap->im, uap->port.membase + UART011_IMSC); 605 } else if (!(uap->dmacr & UART011_TXDMAE)) { 606 uap->dmacr |= UART011_TXDMAE; 607 writew(uap->dmacr, 608 uap->port.membase + UART011_DMACR); 609 } 610 return ret; 611 } 612 613 /* 614 * We have an X-char to send. Disable DMA to prevent it loading 615 * the TX fifo, and then see if we can stuff it into the FIFO. 616 */ 617 dmacr = uap->dmacr; 618 uap->dmacr &= ~UART011_TXDMAE; 619 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 620 621 if (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF) { 622 /* 623 * No space in the FIFO, so enable the transmit interrupt 624 * so we know when there is space. Note that once we've 625 * loaded the character, we should just re-enable DMA. 626 */ 627 return false; 628 } 629 630 writew(uap->port.x_char, uap->port.membase + UART01x_DR); 631 uap->port.icount.tx++; 632 uap->port.x_char = 0; 633 634 /* Success - restore the DMA state */ 635 uap->dmacr = dmacr; 636 writew(dmacr, uap->port.membase + UART011_DMACR); 637 638 return true; 639 } 640 641 /* 642 * Flush the transmit buffer. 643 * Locking: called with port lock held and IRQs disabled. 644 */ 645 static void pl011_dma_flush_buffer(struct uart_port *port) 646 { 647 struct uart_amba_port *uap = (struct uart_amba_port *)port; 648 649 if (!uap->using_tx_dma) 650 return; 651 652 /* Avoid deadlock with the DMA engine callback */ 653 spin_unlock(&uap->port.lock); 654 dmaengine_terminate_all(uap->dmatx.chan); 655 spin_lock(&uap->port.lock); 656 if (uap->dmatx.queued) { 657 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1, 658 DMA_TO_DEVICE); 659 uap->dmatx.queued = false; 660 uap->dmacr &= ~UART011_TXDMAE; 661 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 662 } 663 } 664 665 static void pl011_dma_rx_callback(void *data); 666 667 static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap) 668 { 669 struct dma_chan *rxchan = uap->dmarx.chan; 670 struct pl011_dmarx_data *dmarx = &uap->dmarx; 671 struct dma_async_tx_descriptor *desc; 672 struct pl011_sgbuf *sgbuf; 673 674 if (!rxchan) 675 return -EIO; 676 677 /* Start the RX DMA job */ 678 sgbuf = uap->dmarx.use_buf_b ? 679 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; 680 desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1, 681 DMA_DEV_TO_MEM, 682 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 683 /* 684 * If the DMA engine is busy and cannot prepare a 685 * channel, no big deal, the driver will fall back 686 * to interrupt mode as a result of this error code. 687 */ 688 if (!desc) { 689 uap->dmarx.running = false; 690 dmaengine_terminate_all(rxchan); 691 return -EBUSY; 692 } 693 694 /* Some data to go along to the callback */ 695 desc->callback = pl011_dma_rx_callback; 696 desc->callback_param = uap; 697 dmarx->cookie = dmaengine_submit(desc); 698 dma_async_issue_pending(rxchan); 699 700 uap->dmacr |= UART011_RXDMAE; 701 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 702 uap->dmarx.running = true; 703 704 uap->im &= ~UART011_RXIM; 705 writew(uap->im, uap->port.membase + UART011_IMSC); 706 707 return 0; 708 } 709 710 /* 711 * This is called when either the DMA job is complete, or 712 * the FIFO timeout interrupt occurred. This must be called 713 * with the port spinlock uap->port.lock held. 714 */ 715 static void pl011_dma_rx_chars(struct uart_amba_port *uap, 716 u32 pending, bool use_buf_b, 717 bool readfifo) 718 { 719 struct tty_struct *tty = uap->port.state->port.tty; 720 struct pl011_sgbuf *sgbuf = use_buf_b ? 721 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; 722 struct device *dev = uap->dmarx.chan->device->dev; 723 int dma_count = 0; 724 u32 fifotaken = 0; /* only used for vdbg() */ 725 726 /* Pick everything from the DMA first */ 727 if (pending) { 728 /* Sync in buffer */ 729 dma_sync_sg_for_cpu(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE); 730 731 /* 732 * First take all chars in the DMA pipe, then look in the FIFO. 733 * Note that tty_insert_flip_buf() tries to take as many chars 734 * as it can. 735 */ 736 dma_count = tty_insert_flip_string(uap->port.state->port.tty, 737 sgbuf->buf, pending); 738 739 /* Return buffer to device */ 740 dma_sync_sg_for_device(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE); 741 742 uap->port.icount.rx += dma_count; 743 if (dma_count < pending) 744 dev_warn(uap->port.dev, 745 "couldn't insert all characters (TTY is full?)\n"); 746 } 747 748 /* 749 * Only continue with trying to read the FIFO if all DMA chars have 750 * been taken first. 751 */ 752 if (dma_count == pending && readfifo) { 753 /* Clear any error flags */ 754 writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS, 755 uap->port.membase + UART011_ICR); 756 757 /* 758 * If we read all the DMA'd characters, and we had an 759 * incomplete buffer, that could be due to an rx error, or 760 * maybe we just timed out. Read any pending chars and check 761 * the error status. 762 * 763 * Error conditions will only occur in the FIFO, these will 764 * trigger an immediate interrupt and stop the DMA job, so we 765 * will always find the error in the FIFO, never in the DMA 766 * buffer. 767 */ 768 fifotaken = pl011_fifo_to_tty(uap); 769 } 770 771 spin_unlock(&uap->port.lock); 772 dev_vdbg(uap->port.dev, 773 "Took %d chars from DMA buffer and %d chars from the FIFO\n", 774 dma_count, fifotaken); 775 tty_flip_buffer_push(tty); 776 spin_lock(&uap->port.lock); 777 } 778 779 static void pl011_dma_rx_irq(struct uart_amba_port *uap) 780 { 781 struct pl011_dmarx_data *dmarx = &uap->dmarx; 782 struct dma_chan *rxchan = dmarx->chan; 783 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ? 784 &dmarx->sgbuf_b : &dmarx->sgbuf_a; 785 size_t pending; 786 struct dma_tx_state state; 787 enum dma_status dmastat; 788 789 /* 790 * Pause the transfer so we can trust the current counter, 791 * do this before we pause the PL011 block, else we may 792 * overflow the FIFO. 793 */ 794 if (dmaengine_pause(rxchan)) 795 dev_err(uap->port.dev, "unable to pause DMA transfer\n"); 796 dmastat = rxchan->device->device_tx_status(rxchan, 797 dmarx->cookie, &state); 798 if (dmastat != DMA_PAUSED) 799 dev_err(uap->port.dev, "unable to pause DMA transfer\n"); 800 801 /* Disable RX DMA - incoming data will wait in the FIFO */ 802 uap->dmacr &= ~UART011_RXDMAE; 803 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 804 uap->dmarx.running = false; 805 806 pending = sgbuf->sg.length - state.residue; 807 BUG_ON(pending > PL011_DMA_BUFFER_SIZE); 808 /* Then we terminate the transfer - we now know our residue */ 809 dmaengine_terminate_all(rxchan); 810 811 /* 812 * This will take the chars we have so far and insert 813 * into the framework. 814 */ 815 pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true); 816 817 /* Switch buffer & re-trigger DMA job */ 818 dmarx->use_buf_b = !dmarx->use_buf_b; 819 if (pl011_dma_rx_trigger_dma(uap)) { 820 dev_dbg(uap->port.dev, "could not retrigger RX DMA job " 821 "fall back to interrupt mode\n"); 822 uap->im |= UART011_RXIM; 823 writew(uap->im, uap->port.membase + UART011_IMSC); 824 } 825 } 826 827 static void pl011_dma_rx_callback(void *data) 828 { 829 struct uart_amba_port *uap = data; 830 struct pl011_dmarx_data *dmarx = &uap->dmarx; 831 struct dma_chan *rxchan = dmarx->chan; 832 bool lastbuf = dmarx->use_buf_b; 833 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ? 834 &dmarx->sgbuf_b : &dmarx->sgbuf_a; 835 size_t pending; 836 struct dma_tx_state state; 837 int ret; 838 839 /* 840 * This completion interrupt occurs typically when the 841 * RX buffer is totally stuffed but no timeout has yet 842 * occurred. When that happens, we just want the RX 843 * routine to flush out the secondary DMA buffer while 844 * we immediately trigger the next DMA job. 845 */ 846 spin_lock_irq(&uap->port.lock); 847 /* 848 * Rx data can be taken by the UART interrupts during 849 * the DMA irq handler. So we check the residue here. 850 */ 851 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state); 852 pending = sgbuf->sg.length - state.residue; 853 BUG_ON(pending > PL011_DMA_BUFFER_SIZE); 854 /* Then we terminate the transfer - we now know our residue */ 855 dmaengine_terminate_all(rxchan); 856 857 uap->dmarx.running = false; 858 dmarx->use_buf_b = !lastbuf; 859 ret = pl011_dma_rx_trigger_dma(uap); 860 861 pl011_dma_rx_chars(uap, pending, lastbuf, false); 862 spin_unlock_irq(&uap->port.lock); 863 /* 864 * Do this check after we picked the DMA chars so we don't 865 * get some IRQ immediately from RX. 866 */ 867 if (ret) { 868 dev_dbg(uap->port.dev, "could not retrigger RX DMA job " 869 "fall back to interrupt mode\n"); 870 uap->im |= UART011_RXIM; 871 writew(uap->im, uap->port.membase + UART011_IMSC); 872 } 873 } 874 875 /* 876 * Stop accepting received characters, when we're shutting down or 877 * suspending this port. 878 * Locking: called with port lock held and IRQs disabled. 879 */ 880 static inline void pl011_dma_rx_stop(struct uart_amba_port *uap) 881 { 882 /* FIXME. Just disable the DMA enable */ 883 uap->dmacr &= ~UART011_RXDMAE; 884 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 885 } 886 887 static void pl011_dma_startup(struct uart_amba_port *uap) 888 { 889 int ret; 890 891 if (!uap->dmatx.chan) 892 return; 893 894 uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL); 895 if (!uap->dmatx.buf) { 896 dev_err(uap->port.dev, "no memory for DMA TX buffer\n"); 897 uap->port.fifosize = uap->fifosize; 898 return; 899 } 900 901 sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE); 902 903 /* The DMA buffer is now the FIFO the TTY subsystem can use */ 904 uap->port.fifosize = PL011_DMA_BUFFER_SIZE; 905 uap->using_tx_dma = true; 906 907 if (!uap->dmarx.chan) 908 goto skip_rx; 909 910 /* Allocate and map DMA RX buffers */ 911 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a, 912 DMA_FROM_DEVICE); 913 if (ret) { 914 dev_err(uap->port.dev, "failed to init DMA %s: %d\n", 915 "RX buffer A", ret); 916 goto skip_rx; 917 } 918 919 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b, 920 DMA_FROM_DEVICE); 921 if (ret) { 922 dev_err(uap->port.dev, "failed to init DMA %s: %d\n", 923 "RX buffer B", ret); 924 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, 925 DMA_FROM_DEVICE); 926 goto skip_rx; 927 } 928 929 uap->using_rx_dma = true; 930 931 skip_rx: 932 /* Turn on DMA error (RX/TX will be enabled on demand) */ 933 uap->dmacr |= UART011_DMAONERR; 934 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 935 936 /* 937 * ST Micro variants has some specific dma burst threshold 938 * compensation. Set this to 16 bytes, so burst will only 939 * be issued above/below 16 bytes. 940 */ 941 if (uap->vendor->dma_threshold) 942 writew(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16, 943 uap->port.membase + ST_UART011_DMAWM); 944 945 if (uap->using_rx_dma) { 946 if (pl011_dma_rx_trigger_dma(uap)) 947 dev_dbg(uap->port.dev, "could not trigger initial " 948 "RX DMA job, fall back to interrupt mode\n"); 949 } 950 } 951 952 static void pl011_dma_shutdown(struct uart_amba_port *uap) 953 { 954 if (!(uap->using_tx_dma || uap->using_rx_dma)) 955 return; 956 957 /* Disable RX and TX DMA */ 958 while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY) 959 barrier(); 960 961 spin_lock_irq(&uap->port.lock); 962 uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE); 963 writew(uap->dmacr, uap->port.membase + UART011_DMACR); 964 spin_unlock_irq(&uap->port.lock); 965 966 if (uap->using_tx_dma) { 967 /* In theory, this should already be done by pl011_dma_flush_buffer */ 968 dmaengine_terminate_all(uap->dmatx.chan); 969 if (uap->dmatx.queued) { 970 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1, 971 DMA_TO_DEVICE); 972 uap->dmatx.queued = false; 973 } 974 975 kfree(uap->dmatx.buf); 976 uap->using_tx_dma = false; 977 } 978 979 if (uap->using_rx_dma) { 980 dmaengine_terminate_all(uap->dmarx.chan); 981 /* Clean up the RX DMA */ 982 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE); 983 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE); 984 uap->using_rx_dma = false; 985 } 986 } 987 988 static inline bool pl011_dma_rx_available(struct uart_amba_port *uap) 989 { 990 return uap->using_rx_dma; 991 } 992 993 static inline bool pl011_dma_rx_running(struct uart_amba_port *uap) 994 { 995 return uap->using_rx_dma && uap->dmarx.running; 996 } 997 998 999 #else 1000 /* Blank functions if the DMA engine is not available */ 1001 static inline void pl011_dma_probe(struct uart_amba_port *uap) 1002 { 1003 } 1004 1005 static inline void pl011_dma_remove(struct uart_amba_port *uap) 1006 { 1007 } 1008 1009 static inline void pl011_dma_startup(struct uart_amba_port *uap) 1010 { 1011 } 1012 1013 static inline void pl011_dma_shutdown(struct uart_amba_port *uap) 1014 { 1015 } 1016 1017 static inline bool pl011_dma_tx_irq(struct uart_amba_port *uap) 1018 { 1019 return false; 1020 } 1021 1022 static inline void pl011_dma_tx_stop(struct uart_amba_port *uap) 1023 { 1024 } 1025 1026 static inline bool pl011_dma_tx_start(struct uart_amba_port *uap) 1027 { 1028 return false; 1029 } 1030 1031 static inline void pl011_dma_rx_irq(struct uart_amba_port *uap) 1032 { 1033 } 1034 1035 static inline void pl011_dma_rx_stop(struct uart_amba_port *uap) 1036 { 1037 } 1038 1039 static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap) 1040 { 1041 return -EIO; 1042 } 1043 1044 static inline bool pl011_dma_rx_available(struct uart_amba_port *uap) 1045 { 1046 return false; 1047 } 1048 1049 static inline bool pl011_dma_rx_running(struct uart_amba_port *uap) 1050 { 1051 return false; 1052 } 1053 1054 #define pl011_dma_flush_buffer NULL 1055 #endif 1056 1057 1058 /* 1059 * pl011_lockup_wa 1060 * This workaround aims to break the deadlock situation 1061 * when after long transfer over uart in hardware flow 1062 * control, uart interrupt registers cannot be cleared. 1063 * Hence uart transfer gets blocked. 1064 * 1065 * It is seen that during such deadlock condition ICR 1066 * don't get cleared even on multiple write. This leads 1067 * pass_counter to decrease and finally reach zero. This 1068 * can be taken as trigger point to run this UART_BT_WA. 1069 * 1070 */ 1071 static void pl011_lockup_wa(unsigned long data) 1072 { 1073 struct uart_amba_port *uap = amba_ports[0]; 1074 void __iomem *base = uap->port.membase; 1075 struct circ_buf *xmit = &uap->port.state->xmit; 1076 struct tty_struct *tty = uap->port.state->port.tty; 1077 int buf_empty_retries = 200; 1078 int loop; 1079 1080 /* Stop HCI layer from submitting data for tx */ 1081 tty->hw_stopped = 1; 1082 while (!uart_circ_empty(xmit)) { 1083 if (buf_empty_retries-- == 0) 1084 break; 1085 udelay(100); 1086 } 1087 1088 /* Backup registers */ 1089 for (loop = 0; loop < UART_WA_SAVE_NR; loop++) 1090 uart_wa_regdata[loop] = readl(base + uart_wa_reg[loop]); 1091 1092 /* Disable UART so that FIFO data is flushed out */ 1093 writew(0x00, uap->port.membase + UART011_CR); 1094 1095 /* Soft reset UART module */ 1096 if (uap->port.dev->platform_data) { 1097 struct amba_pl011_data *plat; 1098 1099 plat = uap->port.dev->platform_data; 1100 if (plat->reset) 1101 plat->reset(); 1102 } 1103 1104 /* Restore registers */ 1105 for (loop = 0; loop < UART_WA_SAVE_NR; loop++) 1106 writew(uart_wa_regdata[loop] , 1107 uap->port.membase + uart_wa_reg[loop]); 1108 1109 /* Initialise the old status of the modem signals */ 1110 uap->old_status = readw(uap->port.membase + UART01x_FR) & 1111 UART01x_FR_MODEM_ANY; 1112 1113 if (readl(base + UART011_MIS) & 0x2) 1114 printk(KERN_EMERG "UART_BT_WA: ***FAILED***\n"); 1115 1116 /* Start Tx/Rx */ 1117 tty->hw_stopped = 0; 1118 } 1119 1120 static void pl011_stop_tx(struct uart_port *port) 1121 { 1122 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1123 1124 uap->im &= ~UART011_TXIM; 1125 writew(uap->im, uap->port.membase + UART011_IMSC); 1126 pl011_dma_tx_stop(uap); 1127 } 1128 1129 static void pl011_start_tx(struct uart_port *port) 1130 { 1131 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1132 1133 if (!pl011_dma_tx_start(uap)) { 1134 uap->im |= UART011_TXIM; 1135 writew(uap->im, uap->port.membase + UART011_IMSC); 1136 } 1137 } 1138 1139 static void pl011_stop_rx(struct uart_port *port) 1140 { 1141 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1142 1143 uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM| 1144 UART011_PEIM|UART011_BEIM|UART011_OEIM); 1145 writew(uap->im, uap->port.membase + UART011_IMSC); 1146 1147 pl011_dma_rx_stop(uap); 1148 } 1149 1150 static void pl011_enable_ms(struct uart_port *port) 1151 { 1152 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1153 1154 uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM; 1155 writew(uap->im, uap->port.membase + UART011_IMSC); 1156 } 1157 1158 static void pl011_rx_chars(struct uart_amba_port *uap) 1159 { 1160 struct tty_struct *tty = uap->port.state->port.tty; 1161 1162 pl011_fifo_to_tty(uap); 1163 1164 spin_unlock(&uap->port.lock); 1165 tty_flip_buffer_push(tty); 1166 /* 1167 * If we were temporarily out of DMA mode for a while, 1168 * attempt to switch back to DMA mode again. 1169 */ 1170 if (pl011_dma_rx_available(uap)) { 1171 if (pl011_dma_rx_trigger_dma(uap)) { 1172 dev_dbg(uap->port.dev, "could not trigger RX DMA job " 1173 "fall back to interrupt mode again\n"); 1174 uap->im |= UART011_RXIM; 1175 } else 1176 uap->im &= ~UART011_RXIM; 1177 writew(uap->im, uap->port.membase + UART011_IMSC); 1178 } 1179 spin_lock(&uap->port.lock); 1180 } 1181 1182 static void pl011_tx_chars(struct uart_amba_port *uap) 1183 { 1184 struct circ_buf *xmit = &uap->port.state->xmit; 1185 int count; 1186 1187 if (uap->port.x_char) { 1188 writew(uap->port.x_char, uap->port.membase + UART01x_DR); 1189 uap->port.icount.tx++; 1190 uap->port.x_char = 0; 1191 return; 1192 } 1193 if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) { 1194 pl011_stop_tx(&uap->port); 1195 return; 1196 } 1197 1198 /* If we are using DMA mode, try to send some characters. */ 1199 if (pl011_dma_tx_irq(uap)) 1200 return; 1201 1202 count = uap->fifosize >> 1; 1203 do { 1204 writew(xmit->buf[xmit->tail], uap->port.membase + UART01x_DR); 1205 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 1206 uap->port.icount.tx++; 1207 if (uart_circ_empty(xmit)) 1208 break; 1209 } while (--count > 0); 1210 1211 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 1212 uart_write_wakeup(&uap->port); 1213 1214 if (uart_circ_empty(xmit)) 1215 pl011_stop_tx(&uap->port); 1216 } 1217 1218 static void pl011_modem_status(struct uart_amba_port *uap) 1219 { 1220 unsigned int status, delta; 1221 1222 status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY; 1223 1224 delta = status ^ uap->old_status; 1225 uap->old_status = status; 1226 1227 if (!delta) 1228 return; 1229 1230 if (delta & UART01x_FR_DCD) 1231 uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD); 1232 1233 if (delta & UART01x_FR_DSR) 1234 uap->port.icount.dsr++; 1235 1236 if (delta & UART01x_FR_CTS) 1237 uart_handle_cts_change(&uap->port, status & UART01x_FR_CTS); 1238 1239 wake_up_interruptible(&uap->port.state->port.delta_msr_wait); 1240 } 1241 1242 static irqreturn_t pl011_int(int irq, void *dev_id) 1243 { 1244 struct uart_amba_port *uap = dev_id; 1245 unsigned long flags; 1246 unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT; 1247 int handled = 0; 1248 1249 spin_lock_irqsave(&uap->port.lock, flags); 1250 1251 status = readw(uap->port.membase + UART011_MIS); 1252 if (status) { 1253 do { 1254 writew(status & ~(UART011_TXIS|UART011_RTIS| 1255 UART011_RXIS), 1256 uap->port.membase + UART011_ICR); 1257 1258 if (status & (UART011_RTIS|UART011_RXIS)) { 1259 if (pl011_dma_rx_running(uap)) 1260 pl011_dma_rx_irq(uap); 1261 else 1262 pl011_rx_chars(uap); 1263 } 1264 if (status & (UART011_DSRMIS|UART011_DCDMIS| 1265 UART011_CTSMIS|UART011_RIMIS)) 1266 pl011_modem_status(uap); 1267 if (status & UART011_TXIS) 1268 pl011_tx_chars(uap); 1269 1270 if (pass_counter-- == 0) { 1271 if (uap->interrupt_may_hang) 1272 tasklet_schedule(&pl011_lockup_tlet); 1273 break; 1274 } 1275 1276 status = readw(uap->port.membase + UART011_MIS); 1277 } while (status != 0); 1278 handled = 1; 1279 } 1280 1281 spin_unlock_irqrestore(&uap->port.lock, flags); 1282 1283 return IRQ_RETVAL(handled); 1284 } 1285 1286 static unsigned int pl01x_tx_empty(struct uart_port *port) 1287 { 1288 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1289 unsigned int status = readw(uap->port.membase + UART01x_FR); 1290 return status & (UART01x_FR_BUSY|UART01x_FR_TXFF) ? 0 : TIOCSER_TEMT; 1291 } 1292 1293 static unsigned int pl01x_get_mctrl(struct uart_port *port) 1294 { 1295 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1296 unsigned int result = 0; 1297 unsigned int status = readw(uap->port.membase + UART01x_FR); 1298 1299 #define TIOCMBIT(uartbit, tiocmbit) \ 1300 if (status & uartbit) \ 1301 result |= tiocmbit 1302 1303 TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR); 1304 TIOCMBIT(UART01x_FR_DSR, TIOCM_DSR); 1305 TIOCMBIT(UART01x_FR_CTS, TIOCM_CTS); 1306 TIOCMBIT(UART011_FR_RI, TIOCM_RNG); 1307 #undef TIOCMBIT 1308 return result; 1309 } 1310 1311 static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl) 1312 { 1313 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1314 unsigned int cr; 1315 1316 cr = readw(uap->port.membase + UART011_CR); 1317 1318 #define TIOCMBIT(tiocmbit, uartbit) \ 1319 if (mctrl & tiocmbit) \ 1320 cr |= uartbit; \ 1321 else \ 1322 cr &= ~uartbit 1323 1324 TIOCMBIT(TIOCM_RTS, UART011_CR_RTS); 1325 TIOCMBIT(TIOCM_DTR, UART011_CR_DTR); 1326 TIOCMBIT(TIOCM_OUT1, UART011_CR_OUT1); 1327 TIOCMBIT(TIOCM_OUT2, UART011_CR_OUT2); 1328 TIOCMBIT(TIOCM_LOOP, UART011_CR_LBE); 1329 1330 if (uap->autorts) { 1331 /* We need to disable auto-RTS if we want to turn RTS off */ 1332 TIOCMBIT(TIOCM_RTS, UART011_CR_RTSEN); 1333 } 1334 #undef TIOCMBIT 1335 1336 writew(cr, uap->port.membase + UART011_CR); 1337 } 1338 1339 static void pl011_break_ctl(struct uart_port *port, int break_state) 1340 { 1341 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1342 unsigned long flags; 1343 unsigned int lcr_h; 1344 1345 spin_lock_irqsave(&uap->port.lock, flags); 1346 lcr_h = readw(uap->port.membase + uap->lcrh_tx); 1347 if (break_state == -1) 1348 lcr_h |= UART01x_LCRH_BRK; 1349 else 1350 lcr_h &= ~UART01x_LCRH_BRK; 1351 writew(lcr_h, uap->port.membase + uap->lcrh_tx); 1352 spin_unlock_irqrestore(&uap->port.lock, flags); 1353 } 1354 1355 #ifdef CONFIG_CONSOLE_POLL 1356 static int pl010_get_poll_char(struct uart_port *port) 1357 { 1358 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1359 unsigned int status; 1360 1361 status = readw(uap->port.membase + UART01x_FR); 1362 if (status & UART01x_FR_RXFE) 1363 return NO_POLL_CHAR; 1364 1365 return readw(uap->port.membase + UART01x_DR); 1366 } 1367 1368 static void pl010_put_poll_char(struct uart_port *port, 1369 unsigned char ch) 1370 { 1371 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1372 1373 while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF) 1374 barrier(); 1375 1376 writew(ch, uap->port.membase + UART01x_DR); 1377 } 1378 1379 #endif /* CONFIG_CONSOLE_POLL */ 1380 1381 static int pl011_startup(struct uart_port *port) 1382 { 1383 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1384 unsigned int cr; 1385 int retval; 1386 1387 retval = clk_prepare(uap->clk); 1388 if (retval) 1389 goto out; 1390 1391 /* 1392 * Try to enable the clock producer. 1393 */ 1394 retval = clk_enable(uap->clk); 1395 if (retval) 1396 goto clk_unprep; 1397 1398 uap->port.uartclk = clk_get_rate(uap->clk); 1399 1400 /* Clear pending error and receive interrupts */ 1401 writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS | 1402 UART011_RTIS | UART011_RXIS, uap->port.membase + UART011_ICR); 1403 1404 /* 1405 * Allocate the IRQ 1406 */ 1407 retval = request_irq(uap->port.irq, pl011_int, 0, "uart-pl011", uap); 1408 if (retval) 1409 goto clk_dis; 1410 1411 writew(uap->vendor->ifls, uap->port.membase + UART011_IFLS); 1412 1413 /* 1414 * Provoke TX FIFO interrupt into asserting. 1415 */ 1416 cr = UART01x_CR_UARTEN | UART011_CR_TXE | UART011_CR_LBE; 1417 writew(cr, uap->port.membase + UART011_CR); 1418 writew(0, uap->port.membase + UART011_FBRD); 1419 writew(1, uap->port.membase + UART011_IBRD); 1420 writew(0, uap->port.membase + uap->lcrh_rx); 1421 if (uap->lcrh_tx != uap->lcrh_rx) { 1422 int i; 1423 /* 1424 * Wait 10 PCLKs before writing LCRH_TX register, 1425 * to get this delay write read only register 10 times 1426 */ 1427 for (i = 0; i < 10; ++i) 1428 writew(0xff, uap->port.membase + UART011_MIS); 1429 writew(0, uap->port.membase + uap->lcrh_tx); 1430 } 1431 writew(0, uap->port.membase + UART01x_DR); 1432 while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY) 1433 barrier(); 1434 1435 /* restore RTS and DTR */ 1436 cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR); 1437 cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE; 1438 writew(cr, uap->port.membase + UART011_CR); 1439 1440 /* 1441 * initialise the old status of the modem signals 1442 */ 1443 uap->old_status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY; 1444 1445 /* Startup DMA */ 1446 pl011_dma_startup(uap); 1447 1448 /* 1449 * Finally, enable interrupts, only timeouts when using DMA 1450 * if initial RX DMA job failed, start in interrupt mode 1451 * as well. 1452 */ 1453 spin_lock_irq(&uap->port.lock); 1454 /* Clear out any spuriously appearing RX interrupts */ 1455 writew(UART011_RTIS | UART011_RXIS, 1456 uap->port.membase + UART011_ICR); 1457 uap->im = UART011_RTIM; 1458 if (!pl011_dma_rx_running(uap)) 1459 uap->im |= UART011_RXIM; 1460 writew(uap->im, uap->port.membase + UART011_IMSC); 1461 spin_unlock_irq(&uap->port.lock); 1462 1463 if (uap->port.dev->platform_data) { 1464 struct amba_pl011_data *plat; 1465 1466 plat = uap->port.dev->platform_data; 1467 if (plat->init) 1468 plat->init(); 1469 } 1470 1471 return 0; 1472 1473 clk_dis: 1474 clk_disable(uap->clk); 1475 clk_unprep: 1476 clk_unprepare(uap->clk); 1477 out: 1478 return retval; 1479 } 1480 1481 static void pl011_shutdown_channel(struct uart_amba_port *uap, 1482 unsigned int lcrh) 1483 { 1484 unsigned long val; 1485 1486 val = readw(uap->port.membase + lcrh); 1487 val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN); 1488 writew(val, uap->port.membase + lcrh); 1489 } 1490 1491 static void pl011_shutdown(struct uart_port *port) 1492 { 1493 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1494 unsigned int cr; 1495 1496 /* 1497 * disable all interrupts 1498 */ 1499 spin_lock_irq(&uap->port.lock); 1500 uap->im = 0; 1501 writew(uap->im, uap->port.membase + UART011_IMSC); 1502 writew(0xffff, uap->port.membase + UART011_ICR); 1503 spin_unlock_irq(&uap->port.lock); 1504 1505 pl011_dma_shutdown(uap); 1506 1507 /* 1508 * Free the interrupt 1509 */ 1510 free_irq(uap->port.irq, uap); 1511 1512 /* 1513 * disable the port 1514 * disable the port. It should not disable RTS and DTR. 1515 * Also RTS and DTR state should be preserved to restore 1516 * it during startup(). 1517 */ 1518 uap->autorts = false; 1519 cr = readw(uap->port.membase + UART011_CR); 1520 uap->old_cr = cr; 1521 cr &= UART011_CR_RTS | UART011_CR_DTR; 1522 cr |= UART01x_CR_UARTEN | UART011_CR_TXE; 1523 writew(cr, uap->port.membase + UART011_CR); 1524 1525 /* 1526 * disable break condition and fifos 1527 */ 1528 pl011_shutdown_channel(uap, uap->lcrh_rx); 1529 if (uap->lcrh_rx != uap->lcrh_tx) 1530 pl011_shutdown_channel(uap, uap->lcrh_tx); 1531 1532 /* 1533 * Shut down the clock producer 1534 */ 1535 clk_disable(uap->clk); 1536 clk_unprepare(uap->clk); 1537 1538 if (uap->port.dev->platform_data) { 1539 struct amba_pl011_data *plat; 1540 1541 plat = uap->port.dev->platform_data; 1542 if (plat->exit) 1543 plat->exit(); 1544 } 1545 1546 } 1547 1548 static void 1549 pl011_set_termios(struct uart_port *port, struct ktermios *termios, 1550 struct ktermios *old) 1551 { 1552 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1553 unsigned int lcr_h, old_cr; 1554 unsigned long flags; 1555 unsigned int baud, quot, clkdiv; 1556 1557 if (uap->vendor->oversampling) 1558 clkdiv = 8; 1559 else 1560 clkdiv = 16; 1561 1562 /* 1563 * Ask the core to calculate the divisor for us. 1564 */ 1565 baud = uart_get_baud_rate(port, termios, old, 0, 1566 port->uartclk / clkdiv); 1567 1568 if (baud > port->uartclk/16) 1569 quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud); 1570 else 1571 quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud); 1572 1573 switch (termios->c_cflag & CSIZE) { 1574 case CS5: 1575 lcr_h = UART01x_LCRH_WLEN_5; 1576 break; 1577 case CS6: 1578 lcr_h = UART01x_LCRH_WLEN_6; 1579 break; 1580 case CS7: 1581 lcr_h = UART01x_LCRH_WLEN_7; 1582 break; 1583 default: // CS8 1584 lcr_h = UART01x_LCRH_WLEN_8; 1585 break; 1586 } 1587 if (termios->c_cflag & CSTOPB) 1588 lcr_h |= UART01x_LCRH_STP2; 1589 if (termios->c_cflag & PARENB) { 1590 lcr_h |= UART01x_LCRH_PEN; 1591 if (!(termios->c_cflag & PARODD)) 1592 lcr_h |= UART01x_LCRH_EPS; 1593 } 1594 if (uap->fifosize > 1) 1595 lcr_h |= UART01x_LCRH_FEN; 1596 1597 spin_lock_irqsave(&port->lock, flags); 1598 1599 /* 1600 * Update the per-port timeout. 1601 */ 1602 uart_update_timeout(port, termios->c_cflag, baud); 1603 1604 port->read_status_mask = UART011_DR_OE | 255; 1605 if (termios->c_iflag & INPCK) 1606 port->read_status_mask |= UART011_DR_FE | UART011_DR_PE; 1607 if (termios->c_iflag & (BRKINT | PARMRK)) 1608 port->read_status_mask |= UART011_DR_BE; 1609 1610 /* 1611 * Characters to ignore 1612 */ 1613 port->ignore_status_mask = 0; 1614 if (termios->c_iflag & IGNPAR) 1615 port->ignore_status_mask |= UART011_DR_FE | UART011_DR_PE; 1616 if (termios->c_iflag & IGNBRK) { 1617 port->ignore_status_mask |= UART011_DR_BE; 1618 /* 1619 * If we're ignoring parity and break indicators, 1620 * ignore overruns too (for real raw support). 1621 */ 1622 if (termios->c_iflag & IGNPAR) 1623 port->ignore_status_mask |= UART011_DR_OE; 1624 } 1625 1626 /* 1627 * Ignore all characters if CREAD is not set. 1628 */ 1629 if ((termios->c_cflag & CREAD) == 0) 1630 port->ignore_status_mask |= UART_DUMMY_DR_RX; 1631 1632 if (UART_ENABLE_MS(port, termios->c_cflag)) 1633 pl011_enable_ms(port); 1634 1635 /* first, disable everything */ 1636 old_cr = readw(port->membase + UART011_CR); 1637 writew(0, port->membase + UART011_CR); 1638 1639 if (termios->c_cflag & CRTSCTS) { 1640 if (old_cr & UART011_CR_RTS) 1641 old_cr |= UART011_CR_RTSEN; 1642 1643 old_cr |= UART011_CR_CTSEN; 1644 uap->autorts = true; 1645 } else { 1646 old_cr &= ~(UART011_CR_CTSEN | UART011_CR_RTSEN); 1647 uap->autorts = false; 1648 } 1649 1650 if (uap->vendor->oversampling) { 1651 if (baud > port->uartclk / 16) 1652 old_cr |= ST_UART011_CR_OVSFACT; 1653 else 1654 old_cr &= ~ST_UART011_CR_OVSFACT; 1655 } 1656 1657 /* Set baud rate */ 1658 writew(quot & 0x3f, port->membase + UART011_FBRD); 1659 writew(quot >> 6, port->membase + UART011_IBRD); 1660 1661 /* 1662 * ----------v----------v----------v----------v----- 1663 * NOTE: MUST BE WRITTEN AFTER UARTLCR_M & UARTLCR_L 1664 * ----------^----------^----------^----------^----- 1665 */ 1666 writew(lcr_h, port->membase + uap->lcrh_rx); 1667 if (uap->lcrh_rx != uap->lcrh_tx) { 1668 int i; 1669 /* 1670 * Wait 10 PCLKs before writing LCRH_TX register, 1671 * to get this delay write read only register 10 times 1672 */ 1673 for (i = 0; i < 10; ++i) 1674 writew(0xff, uap->port.membase + UART011_MIS); 1675 writew(lcr_h, port->membase + uap->lcrh_tx); 1676 } 1677 writew(old_cr, port->membase + UART011_CR); 1678 1679 spin_unlock_irqrestore(&port->lock, flags); 1680 } 1681 1682 static const char *pl011_type(struct uart_port *port) 1683 { 1684 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1685 return uap->port.type == PORT_AMBA ? uap->type : NULL; 1686 } 1687 1688 /* 1689 * Release the memory region(s) being used by 'port' 1690 */ 1691 static void pl010_release_port(struct uart_port *port) 1692 { 1693 release_mem_region(port->mapbase, SZ_4K); 1694 } 1695 1696 /* 1697 * Request the memory region(s) being used by 'port' 1698 */ 1699 static int pl010_request_port(struct uart_port *port) 1700 { 1701 return request_mem_region(port->mapbase, SZ_4K, "uart-pl011") 1702 != NULL ? 0 : -EBUSY; 1703 } 1704 1705 /* 1706 * Configure/autoconfigure the port. 1707 */ 1708 static void pl010_config_port(struct uart_port *port, int flags) 1709 { 1710 if (flags & UART_CONFIG_TYPE) { 1711 port->type = PORT_AMBA; 1712 pl010_request_port(port); 1713 } 1714 } 1715 1716 /* 1717 * verify the new serial_struct (for TIOCSSERIAL). 1718 */ 1719 static int pl010_verify_port(struct uart_port *port, struct serial_struct *ser) 1720 { 1721 int ret = 0; 1722 if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA) 1723 ret = -EINVAL; 1724 if (ser->irq < 0 || ser->irq >= nr_irqs) 1725 ret = -EINVAL; 1726 if (ser->baud_base < 9600) 1727 ret = -EINVAL; 1728 return ret; 1729 } 1730 1731 static struct uart_ops amba_pl011_pops = { 1732 .tx_empty = pl01x_tx_empty, 1733 .set_mctrl = pl011_set_mctrl, 1734 .get_mctrl = pl01x_get_mctrl, 1735 .stop_tx = pl011_stop_tx, 1736 .start_tx = pl011_start_tx, 1737 .stop_rx = pl011_stop_rx, 1738 .enable_ms = pl011_enable_ms, 1739 .break_ctl = pl011_break_ctl, 1740 .startup = pl011_startup, 1741 .shutdown = pl011_shutdown, 1742 .flush_buffer = pl011_dma_flush_buffer, 1743 .set_termios = pl011_set_termios, 1744 .type = pl011_type, 1745 .release_port = pl010_release_port, 1746 .request_port = pl010_request_port, 1747 .config_port = pl010_config_port, 1748 .verify_port = pl010_verify_port, 1749 #ifdef CONFIG_CONSOLE_POLL 1750 .poll_get_char = pl010_get_poll_char, 1751 .poll_put_char = pl010_put_poll_char, 1752 #endif 1753 }; 1754 1755 static struct uart_amba_port *amba_ports[UART_NR]; 1756 1757 #ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE 1758 1759 static void pl011_console_putchar(struct uart_port *port, int ch) 1760 { 1761 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1762 1763 while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF) 1764 barrier(); 1765 writew(ch, uap->port.membase + UART01x_DR); 1766 } 1767 1768 static void 1769 pl011_console_write(struct console *co, const char *s, unsigned int count) 1770 { 1771 struct uart_amba_port *uap = amba_ports[co->index]; 1772 unsigned int status, old_cr, new_cr; 1773 unsigned long flags; 1774 int locked = 1; 1775 1776 clk_enable(uap->clk); 1777 1778 local_irq_save(flags); 1779 if (uap->port.sysrq) 1780 locked = 0; 1781 else if (oops_in_progress) 1782 locked = spin_trylock(&uap->port.lock); 1783 else 1784 spin_lock(&uap->port.lock); 1785 1786 /* 1787 * First save the CR then disable the interrupts 1788 */ 1789 old_cr = readw(uap->port.membase + UART011_CR); 1790 new_cr = old_cr & ~UART011_CR_CTSEN; 1791 new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE; 1792 writew(new_cr, uap->port.membase + UART011_CR); 1793 1794 uart_console_write(&uap->port, s, count, pl011_console_putchar); 1795 1796 /* 1797 * Finally, wait for transmitter to become empty 1798 * and restore the TCR 1799 */ 1800 do { 1801 status = readw(uap->port.membase + UART01x_FR); 1802 } while (status & UART01x_FR_BUSY); 1803 writew(old_cr, uap->port.membase + UART011_CR); 1804 1805 if (locked) 1806 spin_unlock(&uap->port.lock); 1807 local_irq_restore(flags); 1808 1809 clk_disable(uap->clk); 1810 } 1811 1812 static void __init 1813 pl011_console_get_options(struct uart_amba_port *uap, int *baud, 1814 int *parity, int *bits) 1815 { 1816 if (readw(uap->port.membase + UART011_CR) & UART01x_CR_UARTEN) { 1817 unsigned int lcr_h, ibrd, fbrd; 1818 1819 lcr_h = readw(uap->port.membase + uap->lcrh_tx); 1820 1821 *parity = 'n'; 1822 if (lcr_h & UART01x_LCRH_PEN) { 1823 if (lcr_h & UART01x_LCRH_EPS) 1824 *parity = 'e'; 1825 else 1826 *parity = 'o'; 1827 } 1828 1829 if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7) 1830 *bits = 7; 1831 else 1832 *bits = 8; 1833 1834 ibrd = readw(uap->port.membase + UART011_IBRD); 1835 fbrd = readw(uap->port.membase + UART011_FBRD); 1836 1837 *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd); 1838 1839 if (uap->vendor->oversampling) { 1840 if (readw(uap->port.membase + UART011_CR) 1841 & ST_UART011_CR_OVSFACT) 1842 *baud *= 2; 1843 } 1844 } 1845 } 1846 1847 static int __init pl011_console_setup(struct console *co, char *options) 1848 { 1849 struct uart_amba_port *uap; 1850 int baud = 38400; 1851 int bits = 8; 1852 int parity = 'n'; 1853 int flow = 'n'; 1854 int ret; 1855 1856 /* 1857 * Check whether an invalid uart number has been specified, and 1858 * if so, search for the first available port that does have 1859 * console support. 1860 */ 1861 if (co->index >= UART_NR) 1862 co->index = 0; 1863 uap = amba_ports[co->index]; 1864 if (!uap) 1865 return -ENODEV; 1866 1867 ret = clk_prepare(uap->clk); 1868 if (ret) 1869 return ret; 1870 1871 if (uap->port.dev->platform_data) { 1872 struct amba_pl011_data *plat; 1873 1874 plat = uap->port.dev->platform_data; 1875 if (plat->init) 1876 plat->init(); 1877 } 1878 1879 uap->port.uartclk = clk_get_rate(uap->clk); 1880 1881 if (options) 1882 uart_parse_options(options, &baud, &parity, &bits, &flow); 1883 else 1884 pl011_console_get_options(uap, &baud, &parity, &bits); 1885 1886 return uart_set_options(&uap->port, co, baud, parity, bits, flow); 1887 } 1888 1889 static struct uart_driver amba_reg; 1890 static struct console amba_console = { 1891 .name = "ttyAMA", 1892 .write = pl011_console_write, 1893 .device = uart_console_device, 1894 .setup = pl011_console_setup, 1895 .flags = CON_PRINTBUFFER, 1896 .index = -1, 1897 .data = &amba_reg, 1898 }; 1899 1900 #define AMBA_CONSOLE (&amba_console) 1901 #else 1902 #define AMBA_CONSOLE NULL 1903 #endif 1904 1905 static struct uart_driver amba_reg = { 1906 .owner = THIS_MODULE, 1907 .driver_name = "ttyAMA", 1908 .dev_name = "ttyAMA", 1909 .major = SERIAL_AMBA_MAJOR, 1910 .minor = SERIAL_AMBA_MINOR, 1911 .nr = UART_NR, 1912 .cons = AMBA_CONSOLE, 1913 }; 1914 1915 static int pl011_probe(struct amba_device *dev, const struct amba_id *id) 1916 { 1917 struct uart_amba_port *uap; 1918 struct vendor_data *vendor = id->data; 1919 void __iomem *base; 1920 int i, ret; 1921 1922 for (i = 0; i < ARRAY_SIZE(amba_ports); i++) 1923 if (amba_ports[i] == NULL) 1924 break; 1925 1926 if (i == ARRAY_SIZE(amba_ports)) { 1927 ret = -EBUSY; 1928 goto out; 1929 } 1930 1931 uap = kzalloc(sizeof(struct uart_amba_port), GFP_KERNEL); 1932 if (uap == NULL) { 1933 ret = -ENOMEM; 1934 goto out; 1935 } 1936 1937 base = ioremap(dev->res.start, resource_size(&dev->res)); 1938 if (!base) { 1939 ret = -ENOMEM; 1940 goto free; 1941 } 1942 1943 uap->clk = clk_get(&dev->dev, NULL); 1944 if (IS_ERR(uap->clk)) { 1945 ret = PTR_ERR(uap->clk); 1946 goto unmap; 1947 } 1948 1949 uap->vendor = vendor; 1950 uap->lcrh_rx = vendor->lcrh_rx; 1951 uap->lcrh_tx = vendor->lcrh_tx; 1952 uap->old_cr = 0; 1953 uap->fifosize = vendor->fifosize; 1954 uap->interrupt_may_hang = vendor->interrupt_may_hang; 1955 uap->port.dev = &dev->dev; 1956 uap->port.mapbase = dev->res.start; 1957 uap->port.membase = base; 1958 uap->port.iotype = UPIO_MEM; 1959 uap->port.irq = dev->irq[0]; 1960 uap->port.fifosize = uap->fifosize; 1961 uap->port.ops = &amba_pl011_pops; 1962 uap->port.flags = UPF_BOOT_AUTOCONF; 1963 uap->port.line = i; 1964 pl011_dma_probe(uap); 1965 1966 /* Ensure interrupts from this UART are masked and cleared */ 1967 writew(0, uap->port.membase + UART011_IMSC); 1968 writew(0xffff, uap->port.membase + UART011_ICR); 1969 1970 snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev)); 1971 1972 amba_ports[i] = uap; 1973 1974 amba_set_drvdata(dev, uap); 1975 ret = uart_add_one_port(&amba_reg, &uap->port); 1976 if (ret) { 1977 amba_set_drvdata(dev, NULL); 1978 amba_ports[i] = NULL; 1979 pl011_dma_remove(uap); 1980 clk_put(uap->clk); 1981 unmap: 1982 iounmap(base); 1983 free: 1984 kfree(uap); 1985 } 1986 out: 1987 return ret; 1988 } 1989 1990 static int pl011_remove(struct amba_device *dev) 1991 { 1992 struct uart_amba_port *uap = amba_get_drvdata(dev); 1993 int i; 1994 1995 amba_set_drvdata(dev, NULL); 1996 1997 uart_remove_one_port(&amba_reg, &uap->port); 1998 1999 for (i = 0; i < ARRAY_SIZE(amba_ports); i++) 2000 if (amba_ports[i] == uap) 2001 amba_ports[i] = NULL; 2002 2003 pl011_dma_remove(uap); 2004 iounmap(uap->port.membase); 2005 clk_put(uap->clk); 2006 kfree(uap); 2007 return 0; 2008 } 2009 2010 #ifdef CONFIG_PM 2011 static int pl011_suspend(struct amba_device *dev, pm_message_t state) 2012 { 2013 struct uart_amba_port *uap = amba_get_drvdata(dev); 2014 2015 if (!uap) 2016 return -EINVAL; 2017 2018 return uart_suspend_port(&amba_reg, &uap->port); 2019 } 2020 2021 static int pl011_resume(struct amba_device *dev) 2022 { 2023 struct uart_amba_port *uap = amba_get_drvdata(dev); 2024 2025 if (!uap) 2026 return -EINVAL; 2027 2028 return uart_resume_port(&amba_reg, &uap->port); 2029 } 2030 #endif 2031 2032 static struct amba_id pl011_ids[] = { 2033 { 2034 .id = 0x00041011, 2035 .mask = 0x000fffff, 2036 .data = &vendor_arm, 2037 }, 2038 { 2039 .id = 0x00380802, 2040 .mask = 0x00ffffff, 2041 .data = &vendor_st, 2042 }, 2043 { 0, 0 }, 2044 }; 2045 2046 MODULE_DEVICE_TABLE(amba, pl011_ids); 2047 2048 static struct amba_driver pl011_driver = { 2049 .drv = { 2050 .name = "uart-pl011", 2051 }, 2052 .id_table = pl011_ids, 2053 .probe = pl011_probe, 2054 .remove = pl011_remove, 2055 #ifdef CONFIG_PM 2056 .suspend = pl011_suspend, 2057 .resume = pl011_resume, 2058 #endif 2059 }; 2060 2061 static int __init pl011_init(void) 2062 { 2063 int ret; 2064 printk(KERN_INFO "Serial: AMBA PL011 UART driver\n"); 2065 2066 ret = uart_register_driver(&amba_reg); 2067 if (ret == 0) { 2068 ret = amba_driver_register(&pl011_driver); 2069 if (ret) 2070 uart_unregister_driver(&amba_reg); 2071 } 2072 return ret; 2073 } 2074 2075 static void __exit pl011_exit(void) 2076 { 2077 amba_driver_unregister(&pl011_driver); 2078 uart_unregister_driver(&amba_reg); 2079 } 2080 2081 /* 2082 * While this can be a module, if builtin it's most likely the console 2083 * So let's leave module_exit but move module_init to an earlier place 2084 */ 2085 arch_initcall(pl011_init); 2086 module_exit(pl011_exit); 2087 2088 MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd"); 2089 MODULE_DESCRIPTION("ARM AMBA serial port driver"); 2090 MODULE_LICENSE("GPL"); 2091