1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * libata-sff.c - helper library for PCI IDE BMDMA 4 * 5 * Copyright 2003-2006 Red Hat, Inc. All rights reserved. 6 * Copyright 2003-2006 Jeff Garzik 7 * 8 * libata documentation is available via 'make {ps|pdf}docs', 9 * as Documentation/driver-api/libata.rst 10 * 11 * Hardware documentation available from http://www.t13.org/ and 12 * http://www.sata-io.org/ 13 */ 14 15 #include <linux/kernel.h> 16 #include <linux/gfp.h> 17 #include <linux/pci.h> 18 #include <linux/module.h> 19 #include <linux/libata.h> 20 #include <linux/highmem.h> 21 #include <trace/events/libata.h> 22 #include "libata.h" 23 24 static struct workqueue_struct *ata_sff_wq; 25 26 const struct ata_port_operations ata_sff_port_ops = { 27 .inherits = &ata_base_port_ops, 28 29 .qc_prep = ata_noop_qc_prep, 30 .qc_issue = ata_sff_qc_issue, 31 .qc_fill_rtf = ata_sff_qc_fill_rtf, 32 33 .freeze = ata_sff_freeze, 34 .thaw = ata_sff_thaw, 35 .prereset = ata_sff_prereset, 36 .softreset = ata_sff_softreset, 37 .hardreset = sata_sff_hardreset, 38 .postreset = ata_sff_postreset, 39 .error_handler = ata_sff_error_handler, 40 41 .sff_dev_select = ata_sff_dev_select, 42 .sff_check_status = ata_sff_check_status, 43 .sff_tf_load = ata_sff_tf_load, 44 .sff_tf_read = ata_sff_tf_read, 45 .sff_exec_command = ata_sff_exec_command, 46 .sff_data_xfer = ata_sff_data_xfer, 47 .sff_drain_fifo = ata_sff_drain_fifo, 48 49 .lost_interrupt = ata_sff_lost_interrupt, 50 }; 51 EXPORT_SYMBOL_GPL(ata_sff_port_ops); 52 53 /** 54 * ata_sff_check_status - Read device status reg & clear interrupt 55 * @ap: port where the device is 56 * 57 * Reads ATA taskfile status register for currently-selected device 58 * and return its value. This also clears pending interrupts 59 * from this device 60 * 61 * LOCKING: 62 * Inherited from caller. 63 */ 64 u8 ata_sff_check_status(struct ata_port *ap) 65 { 66 return ioread8(ap->ioaddr.status_addr); 67 } 68 EXPORT_SYMBOL_GPL(ata_sff_check_status); 69 70 /** 71 * ata_sff_altstatus - Read device alternate status reg 72 * @ap: port where the device is 73 * @status: pointer to a status value 74 * 75 * Reads ATA alternate status register for currently-selected device 76 * and return its value. 77 * 78 * RETURN: 79 * true if the register exists, false if not. 80 * 81 * LOCKING: 82 * Inherited from caller. 83 */ 84 static bool ata_sff_altstatus(struct ata_port *ap, u8 *status) 85 { 86 u8 tmp; 87 88 if (ap->ops->sff_check_altstatus) { 89 tmp = ap->ops->sff_check_altstatus(ap); 90 goto read; 91 } 92 if (ap->ioaddr.altstatus_addr) { 93 tmp = ioread8(ap->ioaddr.altstatus_addr); 94 goto read; 95 } 96 return false; 97 98 read: 99 if (status) 100 *status = tmp; 101 return true; 102 } 103 104 /** 105 * ata_sff_irq_status - Check if the device is busy 106 * @ap: port where the device is 107 * 108 * Determine if the port is currently busy. Uses altstatus 109 * if available in order to avoid clearing shared IRQ status 110 * when finding an IRQ source. Non ctl capable devices don't 111 * share interrupt lines fortunately for us. 112 * 113 * LOCKING: 114 * Inherited from caller. 115 */ 116 static u8 ata_sff_irq_status(struct ata_port *ap) 117 { 118 u8 status; 119 120 /* Not us: We are busy */ 121 if (ata_sff_altstatus(ap, &status) && (status & ATA_BUSY)) 122 return status; 123 /* Clear INTRQ latch */ 124 status = ap->ops->sff_check_status(ap); 125 return status; 126 } 127 128 /** 129 * ata_sff_sync - Flush writes 130 * @ap: Port to wait for. 131 * 132 * CAUTION: 133 * If we have an mmio device with no ctl and no altstatus 134 * method this will fail. No such devices are known to exist. 135 * 136 * LOCKING: 137 * Inherited from caller. 138 */ 139 140 static void ata_sff_sync(struct ata_port *ap) 141 { 142 ata_sff_altstatus(ap, NULL); 143 } 144 145 /** 146 * ata_sff_pause - Flush writes and wait 400nS 147 * @ap: Port to pause for. 148 * 149 * CAUTION: 150 * If we have an mmio device with no ctl and no altstatus 151 * method this will fail. No such devices are known to exist. 152 * 153 * LOCKING: 154 * Inherited from caller. 155 */ 156 157 void ata_sff_pause(struct ata_port *ap) 158 { 159 ata_sff_sync(ap); 160 ndelay(400); 161 } 162 EXPORT_SYMBOL_GPL(ata_sff_pause); 163 164 /** 165 * ata_sff_dma_pause - Pause before commencing DMA 166 * @ap: Port to pause for. 167 * 168 * Perform I/O fencing and ensure sufficient cycle delays occur 169 * for the HDMA1:0 transition 170 */ 171 172 void ata_sff_dma_pause(struct ata_port *ap) 173 { 174 /* 175 * An altstatus read will cause the needed delay without 176 * messing up the IRQ status 177 */ 178 if (ata_sff_altstatus(ap, NULL)) 179 return; 180 /* There are no DMA controllers without ctl. BUG here to ensure 181 we never violate the HDMA1:0 transition timing and risk 182 corruption. */ 183 BUG(); 184 } 185 EXPORT_SYMBOL_GPL(ata_sff_dma_pause); 186 187 /** 188 * ata_sff_busy_sleep - sleep until BSY clears, or timeout 189 * @ap: port containing status register to be polled 190 * @tmout_pat: impatience timeout in msecs 191 * @tmout: overall timeout in msecs 192 * 193 * Sleep until ATA Status register bit BSY clears, 194 * or a timeout occurs. 195 * 196 * LOCKING: 197 * Kernel thread context (may sleep). 198 * 199 * RETURNS: 200 * 0 on success, -errno otherwise. 201 */ 202 int ata_sff_busy_sleep(struct ata_port *ap, 203 unsigned long tmout_pat, unsigned long tmout) 204 { 205 unsigned long timer_start, timeout; 206 u8 status; 207 208 status = ata_sff_busy_wait(ap, ATA_BUSY, 300); 209 timer_start = jiffies; 210 timeout = ata_deadline(timer_start, tmout_pat); 211 while (status != 0xff && (status & ATA_BUSY) && 212 time_before(jiffies, timeout)) { 213 ata_msleep(ap, 50); 214 status = ata_sff_busy_wait(ap, ATA_BUSY, 3); 215 } 216 217 if (status != 0xff && (status & ATA_BUSY)) 218 ata_port_warn(ap, 219 "port is slow to respond, please be patient (Status 0x%x)\n", 220 status); 221 222 timeout = ata_deadline(timer_start, tmout); 223 while (status != 0xff && (status & ATA_BUSY) && 224 time_before(jiffies, timeout)) { 225 ata_msleep(ap, 50); 226 status = ap->ops->sff_check_status(ap); 227 } 228 229 if (status == 0xff) 230 return -ENODEV; 231 232 if (status & ATA_BUSY) { 233 ata_port_err(ap, 234 "port failed to respond (%lu secs, Status 0x%x)\n", 235 DIV_ROUND_UP(tmout, 1000), status); 236 return -EBUSY; 237 } 238 239 return 0; 240 } 241 EXPORT_SYMBOL_GPL(ata_sff_busy_sleep); 242 243 static int ata_sff_check_ready(struct ata_link *link) 244 { 245 u8 status = link->ap->ops->sff_check_status(link->ap); 246 247 return ata_check_ready(status); 248 } 249 250 /** 251 * ata_sff_wait_ready - sleep until BSY clears, or timeout 252 * @link: SFF link to wait ready status for 253 * @deadline: deadline jiffies for the operation 254 * 255 * Sleep until ATA Status register bit BSY clears, or timeout 256 * occurs. 257 * 258 * LOCKING: 259 * Kernel thread context (may sleep). 260 * 261 * RETURNS: 262 * 0 on success, -errno otherwise. 263 */ 264 int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline) 265 { 266 return ata_wait_ready(link, deadline, ata_sff_check_ready); 267 } 268 EXPORT_SYMBOL_GPL(ata_sff_wait_ready); 269 270 /** 271 * ata_sff_set_devctl - Write device control reg 272 * @ap: port where the device is 273 * @ctl: value to write 274 * 275 * Writes ATA device control register. 276 * 277 * RETURN: 278 * true if the register exists, false if not. 279 * 280 * LOCKING: 281 * Inherited from caller. 282 */ 283 static bool ata_sff_set_devctl(struct ata_port *ap, u8 ctl) 284 { 285 if (ap->ops->sff_set_devctl) { 286 ap->ops->sff_set_devctl(ap, ctl); 287 return true; 288 } 289 if (ap->ioaddr.ctl_addr) { 290 iowrite8(ctl, ap->ioaddr.ctl_addr); 291 return true; 292 } 293 294 return false; 295 } 296 297 /** 298 * ata_sff_dev_select - Select device 0/1 on ATA bus 299 * @ap: ATA channel to manipulate 300 * @device: ATA device (numbered from zero) to select 301 * 302 * Use the method defined in the ATA specification to 303 * make either device 0, or device 1, active on the 304 * ATA channel. Works with both PIO and MMIO. 305 * 306 * May be used as the dev_select() entry in ata_port_operations. 307 * 308 * LOCKING: 309 * caller. 310 */ 311 void ata_sff_dev_select(struct ata_port *ap, unsigned int device) 312 { 313 u8 tmp; 314 315 if (device == 0) 316 tmp = ATA_DEVICE_OBS; 317 else 318 tmp = ATA_DEVICE_OBS | ATA_DEV1; 319 320 iowrite8(tmp, ap->ioaddr.device_addr); 321 ata_sff_pause(ap); /* needed; also flushes, for mmio */ 322 } 323 EXPORT_SYMBOL_GPL(ata_sff_dev_select); 324 325 /** 326 * ata_dev_select - Select device 0/1 on ATA bus 327 * @ap: ATA channel to manipulate 328 * @device: ATA device (numbered from zero) to select 329 * @wait: non-zero to wait for Status register BSY bit to clear 330 * @can_sleep: non-zero if context allows sleeping 331 * 332 * Use the method defined in the ATA specification to 333 * make either device 0, or device 1, active on the 334 * ATA channel. 335 * 336 * This is a high-level version of ata_sff_dev_select(), which 337 * additionally provides the services of inserting the proper 338 * pauses and status polling, where needed. 339 * 340 * LOCKING: 341 * caller. 342 */ 343 static void ata_dev_select(struct ata_port *ap, unsigned int device, 344 unsigned int wait, unsigned int can_sleep) 345 { 346 if (wait) 347 ata_wait_idle(ap); 348 349 ap->ops->sff_dev_select(ap, device); 350 351 if (wait) { 352 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI) 353 ata_msleep(ap, 150); 354 ata_wait_idle(ap); 355 } 356 } 357 358 /** 359 * ata_sff_irq_on - Enable interrupts on a port. 360 * @ap: Port on which interrupts are enabled. 361 * 362 * Enable interrupts on a legacy IDE device using MMIO or PIO, 363 * wait for idle, clear any pending interrupts. 364 * 365 * Note: may NOT be used as the sff_irq_on() entry in 366 * ata_port_operations. 367 * 368 * LOCKING: 369 * Inherited from caller. 370 */ 371 void ata_sff_irq_on(struct ata_port *ap) 372 { 373 if (ap->ops->sff_irq_on) { 374 ap->ops->sff_irq_on(ap); 375 return; 376 } 377 378 ap->ctl &= ~ATA_NIEN; 379 ap->last_ctl = ap->ctl; 380 381 ata_sff_set_devctl(ap, ap->ctl); 382 ata_wait_idle(ap); 383 384 if (ap->ops->sff_irq_clear) 385 ap->ops->sff_irq_clear(ap); 386 } 387 EXPORT_SYMBOL_GPL(ata_sff_irq_on); 388 389 /** 390 * ata_sff_tf_load - send taskfile registers to host controller 391 * @ap: Port to which output is sent 392 * @tf: ATA taskfile register set 393 * 394 * Outputs ATA taskfile to standard ATA host controller. 395 * 396 * LOCKING: 397 * Inherited from caller. 398 */ 399 void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) 400 { 401 struct ata_ioports *ioaddr = &ap->ioaddr; 402 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; 403 404 if (tf->ctl != ap->last_ctl) { 405 if (ioaddr->ctl_addr) 406 iowrite8(tf->ctl, ioaddr->ctl_addr); 407 ap->last_ctl = tf->ctl; 408 ata_wait_idle(ap); 409 } 410 411 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { 412 WARN_ON_ONCE(!ioaddr->ctl_addr); 413 iowrite8(tf->hob_feature, ioaddr->feature_addr); 414 iowrite8(tf->hob_nsect, ioaddr->nsect_addr); 415 iowrite8(tf->hob_lbal, ioaddr->lbal_addr); 416 iowrite8(tf->hob_lbam, ioaddr->lbam_addr); 417 iowrite8(tf->hob_lbah, ioaddr->lbah_addr); 418 } 419 420 if (is_addr) { 421 iowrite8(tf->feature, ioaddr->feature_addr); 422 iowrite8(tf->nsect, ioaddr->nsect_addr); 423 iowrite8(tf->lbal, ioaddr->lbal_addr); 424 iowrite8(tf->lbam, ioaddr->lbam_addr); 425 iowrite8(tf->lbah, ioaddr->lbah_addr); 426 } 427 428 if (tf->flags & ATA_TFLAG_DEVICE) 429 iowrite8(tf->device, ioaddr->device_addr); 430 431 ata_wait_idle(ap); 432 } 433 EXPORT_SYMBOL_GPL(ata_sff_tf_load); 434 435 /** 436 * ata_sff_tf_read - input device's ATA taskfile shadow registers 437 * @ap: Port from which input is read 438 * @tf: ATA taskfile register set for storing input 439 * 440 * Reads ATA taskfile registers for currently-selected device 441 * into @tf. Assumes the device has a fully SFF compliant task file 442 * layout and behaviour. If you device does not (eg has a different 443 * status method) then you will need to provide a replacement tf_read 444 * 445 * LOCKING: 446 * Inherited from caller. 447 */ 448 void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf) 449 { 450 struct ata_ioports *ioaddr = &ap->ioaddr; 451 452 tf->status = ata_sff_check_status(ap); 453 tf->error = ioread8(ioaddr->error_addr); 454 tf->nsect = ioread8(ioaddr->nsect_addr); 455 tf->lbal = ioread8(ioaddr->lbal_addr); 456 tf->lbam = ioread8(ioaddr->lbam_addr); 457 tf->lbah = ioread8(ioaddr->lbah_addr); 458 tf->device = ioread8(ioaddr->device_addr); 459 460 if (tf->flags & ATA_TFLAG_LBA48) { 461 if (likely(ioaddr->ctl_addr)) { 462 iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr); 463 tf->hob_feature = ioread8(ioaddr->error_addr); 464 tf->hob_nsect = ioread8(ioaddr->nsect_addr); 465 tf->hob_lbal = ioread8(ioaddr->lbal_addr); 466 tf->hob_lbam = ioread8(ioaddr->lbam_addr); 467 tf->hob_lbah = ioread8(ioaddr->lbah_addr); 468 iowrite8(tf->ctl, ioaddr->ctl_addr); 469 ap->last_ctl = tf->ctl; 470 } else 471 WARN_ON_ONCE(1); 472 } 473 } 474 EXPORT_SYMBOL_GPL(ata_sff_tf_read); 475 476 /** 477 * ata_sff_exec_command - issue ATA command to host controller 478 * @ap: port to which command is being issued 479 * @tf: ATA taskfile register set 480 * 481 * Issues ATA command, with proper synchronization with interrupt 482 * handler / other threads. 483 * 484 * LOCKING: 485 * spin_lock_irqsave(host lock) 486 */ 487 void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf) 488 { 489 iowrite8(tf->command, ap->ioaddr.command_addr); 490 ata_sff_pause(ap); 491 } 492 EXPORT_SYMBOL_GPL(ata_sff_exec_command); 493 494 /** 495 * ata_tf_to_host - issue ATA taskfile to host controller 496 * @ap: port to which command is being issued 497 * @tf: ATA taskfile register set 498 * @tag: tag of the associated command 499 * 500 * Issues ATA taskfile register set to ATA host controller, 501 * with proper synchronization with interrupt handler and 502 * other threads. 503 * 504 * LOCKING: 505 * spin_lock_irqsave(host lock) 506 */ 507 static inline void ata_tf_to_host(struct ata_port *ap, 508 const struct ata_taskfile *tf, 509 unsigned int tag) 510 { 511 trace_ata_tf_load(ap, tf); 512 ap->ops->sff_tf_load(ap, tf); 513 trace_ata_exec_command(ap, tf, tag); 514 ap->ops->sff_exec_command(ap, tf); 515 } 516 517 /** 518 * ata_sff_data_xfer - Transfer data by PIO 519 * @qc: queued command 520 * @buf: data buffer 521 * @buflen: buffer length 522 * @rw: read/write 523 * 524 * Transfer data from/to the device data register by PIO. 525 * 526 * LOCKING: 527 * Inherited from caller. 528 * 529 * RETURNS: 530 * Bytes consumed. 531 */ 532 unsigned int ata_sff_data_xfer(struct ata_queued_cmd *qc, unsigned char *buf, 533 unsigned int buflen, int rw) 534 { 535 struct ata_port *ap = qc->dev->link->ap; 536 void __iomem *data_addr = ap->ioaddr.data_addr; 537 unsigned int words = buflen >> 1; 538 539 /* Transfer multiple of 2 bytes */ 540 if (rw == READ) 541 ioread16_rep(data_addr, buf, words); 542 else 543 iowrite16_rep(data_addr, buf, words); 544 545 /* Transfer trailing byte, if any. */ 546 if (unlikely(buflen & 0x01)) { 547 unsigned char pad[2] = { }; 548 549 /* Point buf to the tail of buffer */ 550 buf += buflen - 1; 551 552 /* 553 * Use io*16_rep() accessors here as well to avoid pointlessly 554 * swapping bytes to and from on the big endian machines... 555 */ 556 if (rw == READ) { 557 ioread16_rep(data_addr, pad, 1); 558 *buf = pad[0]; 559 } else { 560 pad[0] = *buf; 561 iowrite16_rep(data_addr, pad, 1); 562 } 563 words++; 564 } 565 566 return words << 1; 567 } 568 EXPORT_SYMBOL_GPL(ata_sff_data_xfer); 569 570 /** 571 * ata_sff_data_xfer32 - Transfer data by PIO 572 * @qc: queued command 573 * @buf: data buffer 574 * @buflen: buffer length 575 * @rw: read/write 576 * 577 * Transfer data from/to the device data register by PIO using 32bit 578 * I/O operations. 579 * 580 * LOCKING: 581 * Inherited from caller. 582 * 583 * RETURNS: 584 * Bytes consumed. 585 */ 586 587 unsigned int ata_sff_data_xfer32(struct ata_queued_cmd *qc, unsigned char *buf, 588 unsigned int buflen, int rw) 589 { 590 struct ata_device *dev = qc->dev; 591 struct ata_port *ap = dev->link->ap; 592 void __iomem *data_addr = ap->ioaddr.data_addr; 593 unsigned int words = buflen >> 2; 594 int slop = buflen & 3; 595 596 if (!(ap->pflags & ATA_PFLAG_PIO32)) 597 return ata_sff_data_xfer(qc, buf, buflen, rw); 598 599 /* Transfer multiple of 4 bytes */ 600 if (rw == READ) 601 ioread32_rep(data_addr, buf, words); 602 else 603 iowrite32_rep(data_addr, buf, words); 604 605 /* Transfer trailing bytes, if any */ 606 if (unlikely(slop)) { 607 unsigned char pad[4] = { }; 608 609 /* Point buf to the tail of buffer */ 610 buf += buflen - slop; 611 612 /* 613 * Use io*_rep() accessors here as well to avoid pointlessly 614 * swapping bytes to and from on the big endian machines... 615 */ 616 if (rw == READ) { 617 if (slop < 3) 618 ioread16_rep(data_addr, pad, 1); 619 else 620 ioread32_rep(data_addr, pad, 1); 621 memcpy(buf, pad, slop); 622 } else { 623 memcpy(pad, buf, slop); 624 if (slop < 3) 625 iowrite16_rep(data_addr, pad, 1); 626 else 627 iowrite32_rep(data_addr, pad, 1); 628 } 629 } 630 return (buflen + 1) & ~1; 631 } 632 EXPORT_SYMBOL_GPL(ata_sff_data_xfer32); 633 634 static void ata_pio_xfer(struct ata_queued_cmd *qc, struct page *page, 635 unsigned int offset, size_t xfer_size) 636 { 637 bool do_write = (qc->tf.flags & ATA_TFLAG_WRITE); 638 unsigned char *buf; 639 640 buf = kmap_atomic(page); 641 qc->ap->ops->sff_data_xfer(qc, buf + offset, xfer_size, do_write); 642 kunmap_atomic(buf); 643 644 if (!do_write && !PageSlab(page)) 645 flush_dcache_page(page); 646 } 647 648 /** 649 * ata_pio_sector - Transfer a sector of data. 650 * @qc: Command on going 651 * 652 * Transfer qc->sect_size bytes of data from/to the ATA device. 653 * 654 * LOCKING: 655 * Inherited from caller. 656 */ 657 static void ata_pio_sector(struct ata_queued_cmd *qc) 658 { 659 struct ata_port *ap = qc->ap; 660 struct page *page; 661 unsigned int offset; 662 663 if (!qc->cursg) { 664 qc->curbytes = qc->nbytes; 665 return; 666 } 667 if (qc->curbytes == qc->nbytes - qc->sect_size) 668 ap->hsm_task_state = HSM_ST_LAST; 669 670 page = sg_page(qc->cursg); 671 offset = qc->cursg->offset + qc->cursg_ofs; 672 673 /* get the current page and offset */ 674 page = nth_page(page, (offset >> PAGE_SHIFT)); 675 offset %= PAGE_SIZE; 676 677 trace_ata_sff_pio_transfer_data(qc, offset, qc->sect_size); 678 679 /* 680 * Split the transfer when it splits a page boundary. Note that the 681 * split still has to be dword aligned like all ATA data transfers. 682 */ 683 WARN_ON_ONCE(offset % 4); 684 if (offset + qc->sect_size > PAGE_SIZE) { 685 unsigned int split_len = PAGE_SIZE - offset; 686 687 ata_pio_xfer(qc, page, offset, split_len); 688 ata_pio_xfer(qc, nth_page(page, 1), 0, 689 qc->sect_size - split_len); 690 } else { 691 ata_pio_xfer(qc, page, offset, qc->sect_size); 692 } 693 694 qc->curbytes += qc->sect_size; 695 qc->cursg_ofs += qc->sect_size; 696 697 if (qc->cursg_ofs == qc->cursg->length) { 698 qc->cursg = sg_next(qc->cursg); 699 if (!qc->cursg) 700 ap->hsm_task_state = HSM_ST_LAST; 701 qc->cursg_ofs = 0; 702 } 703 } 704 705 /** 706 * ata_pio_sectors - Transfer one or many sectors. 707 * @qc: Command on going 708 * 709 * Transfer one or many sectors of data from/to the 710 * ATA device for the DRQ request. 711 * 712 * LOCKING: 713 * Inherited from caller. 714 */ 715 static void ata_pio_sectors(struct ata_queued_cmd *qc) 716 { 717 if (is_multi_taskfile(&qc->tf)) { 718 /* READ/WRITE MULTIPLE */ 719 unsigned int nsect; 720 721 WARN_ON_ONCE(qc->dev->multi_count == 0); 722 723 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size, 724 qc->dev->multi_count); 725 while (nsect--) 726 ata_pio_sector(qc); 727 } else 728 ata_pio_sector(qc); 729 730 ata_sff_sync(qc->ap); /* flush */ 731 } 732 733 /** 734 * atapi_send_cdb - Write CDB bytes to hardware 735 * @ap: Port to which ATAPI device is attached. 736 * @qc: Taskfile currently active 737 * 738 * When device has indicated its readiness to accept 739 * a CDB, this function is called. Send the CDB. 740 * 741 * LOCKING: 742 * caller. 743 */ 744 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) 745 { 746 /* send SCSI cdb */ 747 trace_atapi_send_cdb(qc, 0, qc->dev->cdb_len); 748 WARN_ON_ONCE(qc->dev->cdb_len < 12); 749 750 ap->ops->sff_data_xfer(qc, qc->cdb, qc->dev->cdb_len, 1); 751 ata_sff_sync(ap); 752 /* FIXME: If the CDB is for DMA do we need to do the transition delay 753 or is bmdma_start guaranteed to do it ? */ 754 switch (qc->tf.protocol) { 755 case ATAPI_PROT_PIO: 756 ap->hsm_task_state = HSM_ST; 757 break; 758 case ATAPI_PROT_NODATA: 759 ap->hsm_task_state = HSM_ST_LAST; 760 break; 761 #ifdef CONFIG_ATA_BMDMA 762 case ATAPI_PROT_DMA: 763 ap->hsm_task_state = HSM_ST_LAST; 764 /* initiate bmdma */ 765 trace_ata_bmdma_start(ap, &qc->tf, qc->tag); 766 ap->ops->bmdma_start(qc); 767 break; 768 #endif /* CONFIG_ATA_BMDMA */ 769 default: 770 BUG(); 771 } 772 } 773 774 /** 775 * __atapi_pio_bytes - Transfer data from/to the ATAPI device. 776 * @qc: Command on going 777 * @bytes: number of bytes 778 * 779 * Transfer Transfer data from/to the ATAPI device. 780 * 781 * LOCKING: 782 * Inherited from caller. 783 * 784 */ 785 static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) 786 { 787 int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ; 788 struct ata_port *ap = qc->ap; 789 struct ata_device *dev = qc->dev; 790 struct ata_eh_info *ehi = &dev->link->eh_info; 791 struct scatterlist *sg; 792 struct page *page; 793 unsigned char *buf; 794 unsigned int offset, count, consumed; 795 796 next_sg: 797 sg = qc->cursg; 798 if (unlikely(!sg)) { 799 ata_ehi_push_desc(ehi, "unexpected or too much trailing data " 800 "buf=%u cur=%u bytes=%u", 801 qc->nbytes, qc->curbytes, bytes); 802 return -1; 803 } 804 805 page = sg_page(sg); 806 offset = sg->offset + qc->cursg_ofs; 807 808 /* get the current page and offset */ 809 page = nth_page(page, (offset >> PAGE_SHIFT)); 810 offset %= PAGE_SIZE; 811 812 /* don't overrun current sg */ 813 count = min(sg->length - qc->cursg_ofs, bytes); 814 815 /* don't cross page boundaries */ 816 count = min(count, (unsigned int)PAGE_SIZE - offset); 817 818 trace_atapi_pio_transfer_data(qc, offset, count); 819 820 /* do the actual data transfer */ 821 buf = kmap_atomic(page); 822 consumed = ap->ops->sff_data_xfer(qc, buf + offset, count, rw); 823 kunmap_atomic(buf); 824 825 bytes -= min(bytes, consumed); 826 qc->curbytes += count; 827 qc->cursg_ofs += count; 828 829 if (qc->cursg_ofs == sg->length) { 830 qc->cursg = sg_next(qc->cursg); 831 qc->cursg_ofs = 0; 832 } 833 834 /* 835 * There used to be a WARN_ON_ONCE(qc->cursg && count != consumed); 836 * Unfortunately __atapi_pio_bytes doesn't know enough to do the WARN 837 * check correctly as it doesn't know if it is the last request being 838 * made. Somebody should implement a proper sanity check. 839 */ 840 if (bytes) 841 goto next_sg; 842 return 0; 843 } 844 845 /** 846 * atapi_pio_bytes - Transfer data from/to the ATAPI device. 847 * @qc: Command on going 848 * 849 * Transfer Transfer data from/to the ATAPI device. 850 * 851 * LOCKING: 852 * Inherited from caller. 853 */ 854 static void atapi_pio_bytes(struct ata_queued_cmd *qc) 855 { 856 struct ata_port *ap = qc->ap; 857 struct ata_device *dev = qc->dev; 858 struct ata_eh_info *ehi = &dev->link->eh_info; 859 unsigned int ireason, bc_lo, bc_hi, bytes; 860 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0; 861 862 /* Abuse qc->result_tf for temp storage of intermediate TF 863 * here to save some kernel stack usage. 864 * For normal completion, qc->result_tf is not relevant. For 865 * error, qc->result_tf is later overwritten by ata_qc_complete(). 866 * So, the correctness of qc->result_tf is not affected. 867 */ 868 ap->ops->sff_tf_read(ap, &qc->result_tf); 869 ireason = qc->result_tf.nsect; 870 bc_lo = qc->result_tf.lbam; 871 bc_hi = qc->result_tf.lbah; 872 bytes = (bc_hi << 8) | bc_lo; 873 874 /* shall be cleared to zero, indicating xfer of data */ 875 if (unlikely(ireason & ATAPI_COD)) 876 goto atapi_check; 877 878 /* make sure transfer direction matches expected */ 879 i_write = ((ireason & ATAPI_IO) == 0) ? 1 : 0; 880 if (unlikely(do_write != i_write)) 881 goto atapi_check; 882 883 if (unlikely(!bytes)) 884 goto atapi_check; 885 886 if (unlikely(__atapi_pio_bytes(qc, bytes))) 887 goto err_out; 888 ata_sff_sync(ap); /* flush */ 889 890 return; 891 892 atapi_check: 893 ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)", 894 ireason, bytes); 895 err_out: 896 qc->err_mask |= AC_ERR_HSM; 897 ap->hsm_task_state = HSM_ST_ERR; 898 } 899 900 /** 901 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue. 902 * @ap: the target ata_port 903 * @qc: qc on going 904 * 905 * RETURNS: 906 * 1 if ok in workqueue, 0 otherwise. 907 */ 908 static inline int ata_hsm_ok_in_wq(struct ata_port *ap, 909 struct ata_queued_cmd *qc) 910 { 911 if (qc->tf.flags & ATA_TFLAG_POLLING) 912 return 1; 913 914 if (ap->hsm_task_state == HSM_ST_FIRST) { 915 if (qc->tf.protocol == ATA_PROT_PIO && 916 (qc->tf.flags & ATA_TFLAG_WRITE)) 917 return 1; 918 919 if (ata_is_atapi(qc->tf.protocol) && 920 !(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 921 return 1; 922 } 923 924 return 0; 925 } 926 927 /** 928 * ata_hsm_qc_complete - finish a qc running on standard HSM 929 * @qc: Command to complete 930 * @in_wq: 1 if called from workqueue, 0 otherwise 931 * 932 * Finish @qc which is running on standard HSM. 933 * 934 * LOCKING: 935 * If @in_wq is zero, spin_lock_irqsave(host lock). 936 * Otherwise, none on entry and grabs host lock. 937 */ 938 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) 939 { 940 struct ata_port *ap = qc->ap; 941 942 if (ap->ops->error_handler) { 943 if (in_wq) { 944 /* EH might have kicked in while host lock is 945 * released. 946 */ 947 qc = ata_qc_from_tag(ap, qc->tag); 948 if (qc) { 949 if (likely(!(qc->err_mask & AC_ERR_HSM))) { 950 ata_sff_irq_on(ap); 951 ata_qc_complete(qc); 952 } else 953 ata_port_freeze(ap); 954 } 955 } else { 956 if (likely(!(qc->err_mask & AC_ERR_HSM))) 957 ata_qc_complete(qc); 958 else 959 ata_port_freeze(ap); 960 } 961 } else { 962 if (in_wq) { 963 ata_sff_irq_on(ap); 964 ata_qc_complete(qc); 965 } else 966 ata_qc_complete(qc); 967 } 968 } 969 970 /** 971 * ata_sff_hsm_move - move the HSM to the next state. 972 * @ap: the target ata_port 973 * @qc: qc on going 974 * @status: current device status 975 * @in_wq: 1 if called from workqueue, 0 otherwise 976 * 977 * RETURNS: 978 * 1 when poll next status needed, 0 otherwise. 979 */ 980 int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, 981 u8 status, int in_wq) 982 { 983 struct ata_link *link = qc->dev->link; 984 struct ata_eh_info *ehi = &link->eh_info; 985 int poll_next; 986 987 lockdep_assert_held(ap->lock); 988 989 WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0); 990 991 /* Make sure ata_sff_qc_issue() does not throw things 992 * like DMA polling into the workqueue. Notice that 993 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING). 994 */ 995 WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc)); 996 997 fsm_start: 998 trace_ata_sff_hsm_state(qc, status); 999 1000 switch (ap->hsm_task_state) { 1001 case HSM_ST_FIRST: 1002 /* Send first data block or PACKET CDB */ 1003 1004 /* If polling, we will stay in the work queue after 1005 * sending the data. Otherwise, interrupt handler 1006 * takes over after sending the data. 1007 */ 1008 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING); 1009 1010 /* check device status */ 1011 if (unlikely((status & ATA_DRQ) == 0)) { 1012 /* handle BSY=0, DRQ=0 as error */ 1013 if (likely(status & (ATA_ERR | ATA_DF))) 1014 /* device stops HSM for abort/error */ 1015 qc->err_mask |= AC_ERR_DEV; 1016 else { 1017 /* HSM violation. Let EH handle this */ 1018 ata_ehi_push_desc(ehi, 1019 "ST_FIRST: !(DRQ|ERR|DF)"); 1020 qc->err_mask |= AC_ERR_HSM; 1021 } 1022 1023 ap->hsm_task_state = HSM_ST_ERR; 1024 goto fsm_start; 1025 } 1026 1027 /* Device should not ask for data transfer (DRQ=1) 1028 * when it finds something wrong. 1029 * We ignore DRQ here and stop the HSM by 1030 * changing hsm_task_state to HSM_ST_ERR and 1031 * let the EH abort the command or reset the device. 1032 */ 1033 if (unlikely(status & (ATA_ERR | ATA_DF))) { 1034 /* Some ATAPI tape drives forget to clear the ERR bit 1035 * when doing the next command (mostly request sense). 1036 * We ignore ERR here to workaround and proceed sending 1037 * the CDB. 1038 */ 1039 if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) { 1040 ata_ehi_push_desc(ehi, "ST_FIRST: " 1041 "DRQ=1 with device error, " 1042 "dev_stat 0x%X", status); 1043 qc->err_mask |= AC_ERR_HSM; 1044 ap->hsm_task_state = HSM_ST_ERR; 1045 goto fsm_start; 1046 } 1047 } 1048 1049 if (qc->tf.protocol == ATA_PROT_PIO) { 1050 /* PIO data out protocol. 1051 * send first data block. 1052 */ 1053 1054 /* ata_pio_sectors() might change the state 1055 * to HSM_ST_LAST. so, the state is changed here 1056 * before ata_pio_sectors(). 1057 */ 1058 ap->hsm_task_state = HSM_ST; 1059 ata_pio_sectors(qc); 1060 } else 1061 /* send CDB */ 1062 atapi_send_cdb(ap, qc); 1063 1064 /* if polling, ata_sff_pio_task() handles the rest. 1065 * otherwise, interrupt handler takes over from here. 1066 */ 1067 break; 1068 1069 case HSM_ST: 1070 /* complete command or read/write the data register */ 1071 if (qc->tf.protocol == ATAPI_PROT_PIO) { 1072 /* ATAPI PIO protocol */ 1073 if ((status & ATA_DRQ) == 0) { 1074 /* No more data to transfer or device error. 1075 * Device error will be tagged in HSM_ST_LAST. 1076 */ 1077 ap->hsm_task_state = HSM_ST_LAST; 1078 goto fsm_start; 1079 } 1080 1081 /* Device should not ask for data transfer (DRQ=1) 1082 * when it finds something wrong. 1083 * We ignore DRQ here and stop the HSM by 1084 * changing hsm_task_state to HSM_ST_ERR and 1085 * let the EH abort the command or reset the device. 1086 */ 1087 if (unlikely(status & (ATA_ERR | ATA_DF))) { 1088 ata_ehi_push_desc(ehi, "ST-ATAPI: " 1089 "DRQ=1 with device error, " 1090 "dev_stat 0x%X", status); 1091 qc->err_mask |= AC_ERR_HSM; 1092 ap->hsm_task_state = HSM_ST_ERR; 1093 goto fsm_start; 1094 } 1095 1096 atapi_pio_bytes(qc); 1097 1098 if (unlikely(ap->hsm_task_state == HSM_ST_ERR)) 1099 /* bad ireason reported by device */ 1100 goto fsm_start; 1101 1102 } else { 1103 /* ATA PIO protocol */ 1104 if (unlikely((status & ATA_DRQ) == 0)) { 1105 /* handle BSY=0, DRQ=0 as error */ 1106 if (likely(status & (ATA_ERR | ATA_DF))) { 1107 /* device stops HSM for abort/error */ 1108 qc->err_mask |= AC_ERR_DEV; 1109 1110 /* If diagnostic failed and this is 1111 * IDENTIFY, it's likely a phantom 1112 * device. Mark hint. 1113 */ 1114 if (qc->dev->horkage & 1115 ATA_HORKAGE_DIAGNOSTIC) 1116 qc->err_mask |= 1117 AC_ERR_NODEV_HINT; 1118 } else { 1119 /* HSM violation. Let EH handle this. 1120 * Phantom devices also trigger this 1121 * condition. Mark hint. 1122 */ 1123 ata_ehi_push_desc(ehi, "ST-ATA: " 1124 "DRQ=0 without device error, " 1125 "dev_stat 0x%X", status); 1126 qc->err_mask |= AC_ERR_HSM | 1127 AC_ERR_NODEV_HINT; 1128 } 1129 1130 ap->hsm_task_state = HSM_ST_ERR; 1131 goto fsm_start; 1132 } 1133 1134 /* For PIO reads, some devices may ask for 1135 * data transfer (DRQ=1) alone with ERR=1. 1136 * We respect DRQ here and transfer one 1137 * block of junk data before changing the 1138 * hsm_task_state to HSM_ST_ERR. 1139 * 1140 * For PIO writes, ERR=1 DRQ=1 doesn't make 1141 * sense since the data block has been 1142 * transferred to the device. 1143 */ 1144 if (unlikely(status & (ATA_ERR | ATA_DF))) { 1145 /* data might be corrputed */ 1146 qc->err_mask |= AC_ERR_DEV; 1147 1148 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) { 1149 ata_pio_sectors(qc); 1150 status = ata_wait_idle(ap); 1151 } 1152 1153 if (status & (ATA_BUSY | ATA_DRQ)) { 1154 ata_ehi_push_desc(ehi, "ST-ATA: " 1155 "BUSY|DRQ persists on ERR|DF, " 1156 "dev_stat 0x%X", status); 1157 qc->err_mask |= AC_ERR_HSM; 1158 } 1159 1160 /* There are oddball controllers with 1161 * status register stuck at 0x7f and 1162 * lbal/m/h at zero which makes it 1163 * pass all other presence detection 1164 * mechanisms we have. Set NODEV_HINT 1165 * for it. Kernel bz#7241. 1166 */ 1167 if (status == 0x7f) 1168 qc->err_mask |= AC_ERR_NODEV_HINT; 1169 1170 /* ata_pio_sectors() might change the 1171 * state to HSM_ST_LAST. so, the state 1172 * is changed after ata_pio_sectors(). 1173 */ 1174 ap->hsm_task_state = HSM_ST_ERR; 1175 goto fsm_start; 1176 } 1177 1178 ata_pio_sectors(qc); 1179 1180 if (ap->hsm_task_state == HSM_ST_LAST && 1181 (!(qc->tf.flags & ATA_TFLAG_WRITE))) { 1182 /* all data read */ 1183 status = ata_wait_idle(ap); 1184 goto fsm_start; 1185 } 1186 } 1187 1188 poll_next = 1; 1189 break; 1190 1191 case HSM_ST_LAST: 1192 if (unlikely(!ata_ok(status))) { 1193 qc->err_mask |= __ac_err_mask(status); 1194 ap->hsm_task_state = HSM_ST_ERR; 1195 goto fsm_start; 1196 } 1197 1198 /* no more data to transfer */ 1199 trace_ata_sff_hsm_command_complete(qc, status); 1200 1201 WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM)); 1202 1203 ap->hsm_task_state = HSM_ST_IDLE; 1204 1205 /* complete taskfile transaction */ 1206 ata_hsm_qc_complete(qc, in_wq); 1207 1208 poll_next = 0; 1209 break; 1210 1211 case HSM_ST_ERR: 1212 ap->hsm_task_state = HSM_ST_IDLE; 1213 1214 /* complete taskfile transaction */ 1215 ata_hsm_qc_complete(qc, in_wq); 1216 1217 poll_next = 0; 1218 break; 1219 default: 1220 poll_next = 0; 1221 WARN(true, "ata%d: SFF host state machine in invalid state %d", 1222 ap->print_id, ap->hsm_task_state); 1223 } 1224 1225 return poll_next; 1226 } 1227 EXPORT_SYMBOL_GPL(ata_sff_hsm_move); 1228 1229 void ata_sff_queue_work(struct work_struct *work) 1230 { 1231 queue_work(ata_sff_wq, work); 1232 } 1233 EXPORT_SYMBOL_GPL(ata_sff_queue_work); 1234 1235 void ata_sff_queue_delayed_work(struct delayed_work *dwork, unsigned long delay) 1236 { 1237 queue_delayed_work(ata_sff_wq, dwork, delay); 1238 } 1239 EXPORT_SYMBOL_GPL(ata_sff_queue_delayed_work); 1240 1241 void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay) 1242 { 1243 struct ata_port *ap = link->ap; 1244 1245 WARN_ON((ap->sff_pio_task_link != NULL) && 1246 (ap->sff_pio_task_link != link)); 1247 ap->sff_pio_task_link = link; 1248 1249 /* may fail if ata_sff_flush_pio_task() in progress */ 1250 ata_sff_queue_delayed_work(&ap->sff_pio_task, msecs_to_jiffies(delay)); 1251 } 1252 EXPORT_SYMBOL_GPL(ata_sff_queue_pio_task); 1253 1254 void ata_sff_flush_pio_task(struct ata_port *ap) 1255 { 1256 trace_ata_sff_flush_pio_task(ap); 1257 1258 cancel_delayed_work_sync(&ap->sff_pio_task); 1259 1260 /* 1261 * We wanna reset the HSM state to IDLE. If we do so without 1262 * grabbing the port lock, critical sections protected by it which 1263 * expect the HSM state to stay stable may get surprised. For 1264 * example, we may set IDLE in between the time 1265 * __ata_sff_port_intr() checks for HSM_ST_IDLE and before it calls 1266 * ata_sff_hsm_move() causing ata_sff_hsm_move() to BUG(). 1267 */ 1268 spin_lock_irq(ap->lock); 1269 ap->hsm_task_state = HSM_ST_IDLE; 1270 spin_unlock_irq(ap->lock); 1271 1272 ap->sff_pio_task_link = NULL; 1273 } 1274 1275 static void ata_sff_pio_task(struct work_struct *work) 1276 { 1277 struct ata_port *ap = 1278 container_of(work, struct ata_port, sff_pio_task.work); 1279 struct ata_link *link = ap->sff_pio_task_link; 1280 struct ata_queued_cmd *qc; 1281 u8 status; 1282 int poll_next; 1283 1284 spin_lock_irq(ap->lock); 1285 1286 BUG_ON(ap->sff_pio_task_link == NULL); 1287 /* qc can be NULL if timeout occurred */ 1288 qc = ata_qc_from_tag(ap, link->active_tag); 1289 if (!qc) { 1290 ap->sff_pio_task_link = NULL; 1291 goto out_unlock; 1292 } 1293 1294 fsm_start: 1295 WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE); 1296 1297 /* 1298 * This is purely heuristic. This is a fast path. 1299 * Sometimes when we enter, BSY will be cleared in 1300 * a chk-status or two. If not, the drive is probably seeking 1301 * or something. Snooze for a couple msecs, then 1302 * chk-status again. If still busy, queue delayed work. 1303 */ 1304 status = ata_sff_busy_wait(ap, ATA_BUSY, 5); 1305 if (status & ATA_BUSY) { 1306 spin_unlock_irq(ap->lock); 1307 ata_msleep(ap, 2); 1308 spin_lock_irq(ap->lock); 1309 1310 status = ata_sff_busy_wait(ap, ATA_BUSY, 10); 1311 if (status & ATA_BUSY) { 1312 ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE); 1313 goto out_unlock; 1314 } 1315 } 1316 1317 /* 1318 * hsm_move() may trigger another command to be processed. 1319 * clean the link beforehand. 1320 */ 1321 ap->sff_pio_task_link = NULL; 1322 /* move the HSM */ 1323 poll_next = ata_sff_hsm_move(ap, qc, status, 1); 1324 1325 /* another command or interrupt handler 1326 * may be running at this point. 1327 */ 1328 if (poll_next) 1329 goto fsm_start; 1330 out_unlock: 1331 spin_unlock_irq(ap->lock); 1332 } 1333 1334 /** 1335 * ata_sff_qc_issue - issue taskfile to a SFF controller 1336 * @qc: command to issue to device 1337 * 1338 * This function issues a PIO or NODATA command to a SFF 1339 * controller. 1340 * 1341 * LOCKING: 1342 * spin_lock_irqsave(host lock) 1343 * 1344 * RETURNS: 1345 * Zero on success, AC_ERR_* mask on failure 1346 */ 1347 unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) 1348 { 1349 struct ata_port *ap = qc->ap; 1350 struct ata_link *link = qc->dev->link; 1351 1352 /* Use polling pio if the LLD doesn't handle 1353 * interrupt driven pio and atapi CDB interrupt. 1354 */ 1355 if (ap->flags & ATA_FLAG_PIO_POLLING) 1356 qc->tf.flags |= ATA_TFLAG_POLLING; 1357 1358 /* select the device */ 1359 ata_dev_select(ap, qc->dev->devno, 1, 0); 1360 1361 /* start the command */ 1362 switch (qc->tf.protocol) { 1363 case ATA_PROT_NODATA: 1364 if (qc->tf.flags & ATA_TFLAG_POLLING) 1365 ata_qc_set_polling(qc); 1366 1367 ata_tf_to_host(ap, &qc->tf, qc->tag); 1368 ap->hsm_task_state = HSM_ST_LAST; 1369 1370 if (qc->tf.flags & ATA_TFLAG_POLLING) 1371 ata_sff_queue_pio_task(link, 0); 1372 1373 break; 1374 1375 case ATA_PROT_PIO: 1376 if (qc->tf.flags & ATA_TFLAG_POLLING) 1377 ata_qc_set_polling(qc); 1378 1379 ata_tf_to_host(ap, &qc->tf, qc->tag); 1380 1381 if (qc->tf.flags & ATA_TFLAG_WRITE) { 1382 /* PIO data out protocol */ 1383 ap->hsm_task_state = HSM_ST_FIRST; 1384 ata_sff_queue_pio_task(link, 0); 1385 1386 /* always send first data block using the 1387 * ata_sff_pio_task() codepath. 1388 */ 1389 } else { 1390 /* PIO data in protocol */ 1391 ap->hsm_task_state = HSM_ST; 1392 1393 if (qc->tf.flags & ATA_TFLAG_POLLING) 1394 ata_sff_queue_pio_task(link, 0); 1395 1396 /* if polling, ata_sff_pio_task() handles the 1397 * rest. otherwise, interrupt handler takes 1398 * over from here. 1399 */ 1400 } 1401 1402 break; 1403 1404 case ATAPI_PROT_PIO: 1405 case ATAPI_PROT_NODATA: 1406 if (qc->tf.flags & ATA_TFLAG_POLLING) 1407 ata_qc_set_polling(qc); 1408 1409 ata_tf_to_host(ap, &qc->tf, qc->tag); 1410 1411 ap->hsm_task_state = HSM_ST_FIRST; 1412 1413 /* send cdb by polling if no cdb interrupt */ 1414 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || 1415 (qc->tf.flags & ATA_TFLAG_POLLING)) 1416 ata_sff_queue_pio_task(link, 0); 1417 break; 1418 1419 default: 1420 return AC_ERR_SYSTEM; 1421 } 1422 1423 return 0; 1424 } 1425 EXPORT_SYMBOL_GPL(ata_sff_qc_issue); 1426 1427 /** 1428 * ata_sff_qc_fill_rtf - fill result TF using ->sff_tf_read 1429 * @qc: qc to fill result TF for 1430 * 1431 * @qc is finished and result TF needs to be filled. Fill it 1432 * using ->sff_tf_read. 1433 * 1434 * LOCKING: 1435 * spin_lock_irqsave(host lock) 1436 * 1437 * RETURNS: 1438 * true indicating that result TF is successfully filled. 1439 */ 1440 bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc) 1441 { 1442 qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf); 1443 return true; 1444 } 1445 EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf); 1446 1447 static unsigned int ata_sff_idle_irq(struct ata_port *ap) 1448 { 1449 ap->stats.idle_irq++; 1450 1451 #ifdef ATA_IRQ_TRAP 1452 if ((ap->stats.idle_irq % 1000) == 0) { 1453 ap->ops->sff_check_status(ap); 1454 if (ap->ops->sff_irq_clear) 1455 ap->ops->sff_irq_clear(ap); 1456 ata_port_warn(ap, "irq trap\n"); 1457 return 1; 1458 } 1459 #endif 1460 return 0; /* irq not handled */ 1461 } 1462 1463 static unsigned int __ata_sff_port_intr(struct ata_port *ap, 1464 struct ata_queued_cmd *qc, 1465 bool hsmv_on_idle) 1466 { 1467 u8 status; 1468 1469 trace_ata_sff_port_intr(qc, hsmv_on_idle); 1470 1471 /* Check whether we are expecting interrupt in this state */ 1472 switch (ap->hsm_task_state) { 1473 case HSM_ST_FIRST: 1474 /* Some pre-ATAPI-4 devices assert INTRQ 1475 * at this state when ready to receive CDB. 1476 */ 1477 1478 /* Check the ATA_DFLAG_CDB_INTR flag is enough here. 1479 * The flag was turned on only for atapi devices. No 1480 * need to check ata_is_atapi(qc->tf.protocol) again. 1481 */ 1482 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 1483 return ata_sff_idle_irq(ap); 1484 break; 1485 case HSM_ST_IDLE: 1486 return ata_sff_idle_irq(ap); 1487 default: 1488 break; 1489 } 1490 1491 /* check main status, clearing INTRQ if needed */ 1492 status = ata_sff_irq_status(ap); 1493 if (status & ATA_BUSY) { 1494 if (hsmv_on_idle) { 1495 /* BMDMA engine is already stopped, we're screwed */ 1496 qc->err_mask |= AC_ERR_HSM; 1497 ap->hsm_task_state = HSM_ST_ERR; 1498 } else 1499 return ata_sff_idle_irq(ap); 1500 } 1501 1502 /* clear irq events */ 1503 if (ap->ops->sff_irq_clear) 1504 ap->ops->sff_irq_clear(ap); 1505 1506 ata_sff_hsm_move(ap, qc, status, 0); 1507 1508 return 1; /* irq handled */ 1509 } 1510 1511 /** 1512 * ata_sff_port_intr - Handle SFF port interrupt 1513 * @ap: Port on which interrupt arrived (possibly...) 1514 * @qc: Taskfile currently active in engine 1515 * 1516 * Handle port interrupt for given queued command. 1517 * 1518 * LOCKING: 1519 * spin_lock_irqsave(host lock) 1520 * 1521 * RETURNS: 1522 * One if interrupt was handled, zero if not (shared irq). 1523 */ 1524 unsigned int ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc) 1525 { 1526 return __ata_sff_port_intr(ap, qc, false); 1527 } 1528 EXPORT_SYMBOL_GPL(ata_sff_port_intr); 1529 1530 static inline irqreturn_t __ata_sff_interrupt(int irq, void *dev_instance, 1531 unsigned int (*port_intr)(struct ata_port *, struct ata_queued_cmd *)) 1532 { 1533 struct ata_host *host = dev_instance; 1534 bool retried = false; 1535 unsigned int i; 1536 unsigned int handled, idle, polling; 1537 unsigned long flags; 1538 1539 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */ 1540 spin_lock_irqsave(&host->lock, flags); 1541 1542 retry: 1543 handled = idle = polling = 0; 1544 for (i = 0; i < host->n_ports; i++) { 1545 struct ata_port *ap = host->ports[i]; 1546 struct ata_queued_cmd *qc; 1547 1548 qc = ata_qc_from_tag(ap, ap->link.active_tag); 1549 if (qc) { 1550 if (!(qc->tf.flags & ATA_TFLAG_POLLING)) 1551 handled |= port_intr(ap, qc); 1552 else 1553 polling |= 1 << i; 1554 } else 1555 idle |= 1 << i; 1556 } 1557 1558 /* 1559 * If no port was expecting IRQ but the controller is actually 1560 * asserting IRQ line, nobody cared will ensue. Check IRQ 1561 * pending status if available and clear spurious IRQ. 1562 */ 1563 if (!handled && !retried) { 1564 bool retry = false; 1565 1566 for (i = 0; i < host->n_ports; i++) { 1567 struct ata_port *ap = host->ports[i]; 1568 1569 if (polling & (1 << i)) 1570 continue; 1571 1572 if (!ap->ops->sff_irq_check || 1573 !ap->ops->sff_irq_check(ap)) 1574 continue; 1575 1576 if (idle & (1 << i)) { 1577 ap->ops->sff_check_status(ap); 1578 if (ap->ops->sff_irq_clear) 1579 ap->ops->sff_irq_clear(ap); 1580 } else { 1581 /* clear INTRQ and check if BUSY cleared */ 1582 if (!(ap->ops->sff_check_status(ap) & ATA_BUSY)) 1583 retry |= true; 1584 /* 1585 * With command in flight, we can't do 1586 * sff_irq_clear() w/o racing with completion. 1587 */ 1588 } 1589 } 1590 1591 if (retry) { 1592 retried = true; 1593 goto retry; 1594 } 1595 } 1596 1597 spin_unlock_irqrestore(&host->lock, flags); 1598 1599 return IRQ_RETVAL(handled); 1600 } 1601 1602 /** 1603 * ata_sff_interrupt - Default SFF ATA host interrupt handler 1604 * @irq: irq line (unused) 1605 * @dev_instance: pointer to our ata_host information structure 1606 * 1607 * Default interrupt handler for PCI IDE devices. Calls 1608 * ata_sff_port_intr() for each port that is not disabled. 1609 * 1610 * LOCKING: 1611 * Obtains host lock during operation. 1612 * 1613 * RETURNS: 1614 * IRQ_NONE or IRQ_HANDLED. 1615 */ 1616 irqreturn_t ata_sff_interrupt(int irq, void *dev_instance) 1617 { 1618 return __ata_sff_interrupt(irq, dev_instance, ata_sff_port_intr); 1619 } 1620 EXPORT_SYMBOL_GPL(ata_sff_interrupt); 1621 1622 /** 1623 * ata_sff_lost_interrupt - Check for an apparent lost interrupt 1624 * @ap: port that appears to have timed out 1625 * 1626 * Called from the libata error handlers when the core code suspects 1627 * an interrupt has been lost. If it has complete anything we can and 1628 * then return. Interface must support altstatus for this faster 1629 * recovery to occur. 1630 * 1631 * Locking: 1632 * Caller holds host lock 1633 */ 1634 1635 void ata_sff_lost_interrupt(struct ata_port *ap) 1636 { 1637 u8 status; 1638 struct ata_queued_cmd *qc; 1639 1640 /* Only one outstanding command per SFF channel */ 1641 qc = ata_qc_from_tag(ap, ap->link.active_tag); 1642 /* We cannot lose an interrupt on a non-existent or polled command */ 1643 if (!qc || qc->tf.flags & ATA_TFLAG_POLLING) 1644 return; 1645 /* See if the controller thinks it is still busy - if so the command 1646 isn't a lost IRQ but is still in progress */ 1647 if (WARN_ON_ONCE(!ata_sff_altstatus(ap, &status))) 1648 return; 1649 if (status & ATA_BUSY) 1650 return; 1651 1652 /* There was a command running, we are no longer busy and we have 1653 no interrupt. */ 1654 ata_port_warn(ap, "lost interrupt (Status 0x%x)\n", status); 1655 /* Run the host interrupt logic as if the interrupt had not been 1656 lost */ 1657 ata_sff_port_intr(ap, qc); 1658 } 1659 EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt); 1660 1661 /** 1662 * ata_sff_freeze - Freeze SFF controller port 1663 * @ap: port to freeze 1664 * 1665 * Freeze SFF controller port. 1666 * 1667 * LOCKING: 1668 * Inherited from caller. 1669 */ 1670 void ata_sff_freeze(struct ata_port *ap) 1671 { 1672 ap->ctl |= ATA_NIEN; 1673 ap->last_ctl = ap->ctl; 1674 1675 ata_sff_set_devctl(ap, ap->ctl); 1676 1677 /* Under certain circumstances, some controllers raise IRQ on 1678 * ATA_NIEN manipulation. Also, many controllers fail to mask 1679 * previously pending IRQ on ATA_NIEN assertion. Clear it. 1680 */ 1681 ap->ops->sff_check_status(ap); 1682 1683 if (ap->ops->sff_irq_clear) 1684 ap->ops->sff_irq_clear(ap); 1685 } 1686 EXPORT_SYMBOL_GPL(ata_sff_freeze); 1687 1688 /** 1689 * ata_sff_thaw - Thaw SFF controller port 1690 * @ap: port to thaw 1691 * 1692 * Thaw SFF controller port. 1693 * 1694 * LOCKING: 1695 * Inherited from caller. 1696 */ 1697 void ata_sff_thaw(struct ata_port *ap) 1698 { 1699 /* clear & re-enable interrupts */ 1700 ap->ops->sff_check_status(ap); 1701 if (ap->ops->sff_irq_clear) 1702 ap->ops->sff_irq_clear(ap); 1703 ata_sff_irq_on(ap); 1704 } 1705 EXPORT_SYMBOL_GPL(ata_sff_thaw); 1706 1707 /** 1708 * ata_sff_prereset - prepare SFF link for reset 1709 * @link: SFF link to be reset 1710 * @deadline: deadline jiffies for the operation 1711 * 1712 * SFF link @link is about to be reset. Initialize it. It first 1713 * calls ata_std_prereset() and wait for !BSY if the port is 1714 * being softreset. 1715 * 1716 * LOCKING: 1717 * Kernel thread context (may sleep) 1718 * 1719 * RETURNS: 1720 * Always 0. 1721 */ 1722 int ata_sff_prereset(struct ata_link *link, unsigned long deadline) 1723 { 1724 struct ata_eh_context *ehc = &link->eh_context; 1725 int rc; 1726 1727 /* The standard prereset is best-effort and always returns 0 */ 1728 ata_std_prereset(link, deadline); 1729 1730 /* if we're about to do hardreset, nothing more to do */ 1731 if (ehc->i.action & ATA_EH_HARDRESET) 1732 return 0; 1733 1734 /* wait for !BSY if we don't know that no device is attached */ 1735 if (!ata_link_offline(link)) { 1736 rc = ata_sff_wait_ready(link, deadline); 1737 if (rc && rc != -ENODEV) { 1738 ata_link_warn(link, 1739 "device not ready (errno=%d), forcing hardreset\n", 1740 rc); 1741 ehc->i.action |= ATA_EH_HARDRESET; 1742 } 1743 } 1744 1745 return 0; 1746 } 1747 EXPORT_SYMBOL_GPL(ata_sff_prereset); 1748 1749 /** 1750 * ata_devchk - PATA device presence detection 1751 * @ap: ATA channel to examine 1752 * @device: Device to examine (starting at zero) 1753 * 1754 * This technique was originally described in 1755 * Hale Landis's ATADRVR (www.ata-atapi.com), and 1756 * later found its way into the ATA/ATAPI spec. 1757 * 1758 * Write a pattern to the ATA shadow registers, 1759 * and if a device is present, it will respond by 1760 * correctly storing and echoing back the 1761 * ATA shadow register contents. 1762 * 1763 * RETURN: 1764 * true if device is present, false if not. 1765 * 1766 * LOCKING: 1767 * caller. 1768 */ 1769 static bool ata_devchk(struct ata_port *ap, unsigned int device) 1770 { 1771 struct ata_ioports *ioaddr = &ap->ioaddr; 1772 u8 nsect, lbal; 1773 1774 ap->ops->sff_dev_select(ap, device); 1775 1776 iowrite8(0x55, ioaddr->nsect_addr); 1777 iowrite8(0xaa, ioaddr->lbal_addr); 1778 1779 iowrite8(0xaa, ioaddr->nsect_addr); 1780 iowrite8(0x55, ioaddr->lbal_addr); 1781 1782 iowrite8(0x55, ioaddr->nsect_addr); 1783 iowrite8(0xaa, ioaddr->lbal_addr); 1784 1785 nsect = ioread8(ioaddr->nsect_addr); 1786 lbal = ioread8(ioaddr->lbal_addr); 1787 1788 if ((nsect == 0x55) && (lbal == 0xaa)) 1789 return true; /* we found a device */ 1790 1791 return false; /* nothing found */ 1792 } 1793 1794 /** 1795 * ata_sff_dev_classify - Parse returned ATA device signature 1796 * @dev: ATA device to classify (starting at zero) 1797 * @present: device seems present 1798 * @r_err: Value of error register on completion 1799 * 1800 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs, 1801 * an ATA/ATAPI-defined set of values is placed in the ATA 1802 * shadow registers, indicating the results of device detection 1803 * and diagnostics. 1804 * 1805 * Select the ATA device, and read the values from the ATA shadow 1806 * registers. Then parse according to the Error register value, 1807 * and the spec-defined values examined by ata_dev_classify(). 1808 * 1809 * LOCKING: 1810 * caller. 1811 * 1812 * RETURNS: 1813 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE. 1814 */ 1815 unsigned int ata_sff_dev_classify(struct ata_device *dev, int present, 1816 u8 *r_err) 1817 { 1818 struct ata_port *ap = dev->link->ap; 1819 struct ata_taskfile tf; 1820 unsigned int class; 1821 u8 err; 1822 1823 ap->ops->sff_dev_select(ap, dev->devno); 1824 1825 memset(&tf, 0, sizeof(tf)); 1826 1827 ap->ops->sff_tf_read(ap, &tf); 1828 err = tf.error; 1829 if (r_err) 1830 *r_err = err; 1831 1832 /* see if device passed diags: continue and warn later */ 1833 if (err == 0) 1834 /* diagnostic fail : do nothing _YET_ */ 1835 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC; 1836 else if (err == 1) 1837 /* do nothing */ ; 1838 else if ((dev->devno == 0) && (err == 0x81)) 1839 /* do nothing */ ; 1840 else 1841 return ATA_DEV_NONE; 1842 1843 /* determine if device is ATA or ATAPI */ 1844 class = ata_port_classify(ap, &tf); 1845 switch (class) { 1846 case ATA_DEV_UNKNOWN: 1847 /* 1848 * If the device failed diagnostic, it's likely to 1849 * have reported incorrect device signature too. 1850 * Assume ATA device if the device seems present but 1851 * device signature is invalid with diagnostic 1852 * failure. 1853 */ 1854 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC)) 1855 class = ATA_DEV_ATA; 1856 else 1857 class = ATA_DEV_NONE; 1858 break; 1859 case ATA_DEV_ATA: 1860 if (ap->ops->sff_check_status(ap) == 0) 1861 class = ATA_DEV_NONE; 1862 break; 1863 } 1864 return class; 1865 } 1866 EXPORT_SYMBOL_GPL(ata_sff_dev_classify); 1867 1868 /** 1869 * ata_sff_wait_after_reset - wait for devices to become ready after reset 1870 * @link: SFF link which is just reset 1871 * @devmask: mask of present devices 1872 * @deadline: deadline jiffies for the operation 1873 * 1874 * Wait devices attached to SFF @link to become ready after 1875 * reset. It contains preceding 150ms wait to avoid accessing TF 1876 * status register too early. 1877 * 1878 * LOCKING: 1879 * Kernel thread context (may sleep). 1880 * 1881 * RETURNS: 1882 * 0 on success, -ENODEV if some or all of devices in @devmask 1883 * don't seem to exist. -errno on other errors. 1884 */ 1885 int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask, 1886 unsigned long deadline) 1887 { 1888 struct ata_port *ap = link->ap; 1889 struct ata_ioports *ioaddr = &ap->ioaddr; 1890 unsigned int dev0 = devmask & (1 << 0); 1891 unsigned int dev1 = devmask & (1 << 1); 1892 int rc, ret = 0; 1893 1894 ata_msleep(ap, ATA_WAIT_AFTER_RESET); 1895 1896 /* always check readiness of the master device */ 1897 rc = ata_sff_wait_ready(link, deadline); 1898 /* -ENODEV means the odd clown forgot the D7 pulldown resistor 1899 * and TF status is 0xff, bail out on it too. 1900 */ 1901 if (rc) 1902 return rc; 1903 1904 /* if device 1 was found in ata_devchk, wait for register 1905 * access briefly, then wait for BSY to clear. 1906 */ 1907 if (dev1) { 1908 int i; 1909 1910 ap->ops->sff_dev_select(ap, 1); 1911 1912 /* Wait for register access. Some ATAPI devices fail 1913 * to set nsect/lbal after reset, so don't waste too 1914 * much time on it. We're gonna wait for !BSY anyway. 1915 */ 1916 for (i = 0; i < 2; i++) { 1917 u8 nsect, lbal; 1918 1919 nsect = ioread8(ioaddr->nsect_addr); 1920 lbal = ioread8(ioaddr->lbal_addr); 1921 if ((nsect == 1) && (lbal == 1)) 1922 break; 1923 ata_msleep(ap, 50); /* give drive a breather */ 1924 } 1925 1926 rc = ata_sff_wait_ready(link, deadline); 1927 if (rc) { 1928 if (rc != -ENODEV) 1929 return rc; 1930 ret = rc; 1931 } 1932 } 1933 1934 /* is all this really necessary? */ 1935 ap->ops->sff_dev_select(ap, 0); 1936 if (dev1) 1937 ap->ops->sff_dev_select(ap, 1); 1938 if (dev0) 1939 ap->ops->sff_dev_select(ap, 0); 1940 1941 return ret; 1942 } 1943 EXPORT_SYMBOL_GPL(ata_sff_wait_after_reset); 1944 1945 static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask, 1946 unsigned long deadline) 1947 { 1948 struct ata_ioports *ioaddr = &ap->ioaddr; 1949 1950 if (ap->ioaddr.ctl_addr) { 1951 /* software reset. causes dev0 to be selected */ 1952 iowrite8(ap->ctl, ioaddr->ctl_addr); 1953 udelay(20); /* FIXME: flush */ 1954 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr); 1955 udelay(20); /* FIXME: flush */ 1956 iowrite8(ap->ctl, ioaddr->ctl_addr); 1957 ap->last_ctl = ap->ctl; 1958 } 1959 1960 /* wait the port to become ready */ 1961 return ata_sff_wait_after_reset(&ap->link, devmask, deadline); 1962 } 1963 1964 /** 1965 * ata_sff_softreset - reset host port via ATA SRST 1966 * @link: ATA link to reset 1967 * @classes: resulting classes of attached devices 1968 * @deadline: deadline jiffies for the operation 1969 * 1970 * Reset host port using ATA SRST. 1971 * 1972 * LOCKING: 1973 * Kernel thread context (may sleep) 1974 * 1975 * RETURNS: 1976 * 0 on success, -errno otherwise. 1977 */ 1978 int ata_sff_softreset(struct ata_link *link, unsigned int *classes, 1979 unsigned long deadline) 1980 { 1981 struct ata_port *ap = link->ap; 1982 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; 1983 unsigned int devmask = 0; 1984 int rc; 1985 u8 err; 1986 1987 /* determine if device 0/1 are present */ 1988 if (ata_devchk(ap, 0)) 1989 devmask |= (1 << 0); 1990 if (slave_possible && ata_devchk(ap, 1)) 1991 devmask |= (1 << 1); 1992 1993 /* select device 0 again */ 1994 ap->ops->sff_dev_select(ap, 0); 1995 1996 /* issue bus reset */ 1997 rc = ata_bus_softreset(ap, devmask, deadline); 1998 /* if link is occupied, -ENODEV too is an error */ 1999 if (rc && (rc != -ENODEV || sata_scr_valid(link))) { 2000 ata_link_err(link, "SRST failed (errno=%d)\n", rc); 2001 return rc; 2002 } 2003 2004 /* determine by signature whether we have ATA or ATAPI devices */ 2005 classes[0] = ata_sff_dev_classify(&link->device[0], 2006 devmask & (1 << 0), &err); 2007 if (slave_possible && err != 0x81) 2008 classes[1] = ata_sff_dev_classify(&link->device[1], 2009 devmask & (1 << 1), &err); 2010 2011 return 0; 2012 } 2013 EXPORT_SYMBOL_GPL(ata_sff_softreset); 2014 2015 /** 2016 * sata_sff_hardreset - reset host port via SATA phy reset 2017 * @link: link to reset 2018 * @class: resulting class of attached device 2019 * @deadline: deadline jiffies for the operation 2020 * 2021 * SATA phy-reset host port using DET bits of SControl register, 2022 * wait for !BSY and classify the attached device. 2023 * 2024 * LOCKING: 2025 * Kernel thread context (may sleep) 2026 * 2027 * RETURNS: 2028 * 0 on success, -errno otherwise. 2029 */ 2030 int sata_sff_hardreset(struct ata_link *link, unsigned int *class, 2031 unsigned long deadline) 2032 { 2033 struct ata_eh_context *ehc = &link->eh_context; 2034 const unsigned long *timing = sata_ehc_deb_timing(ehc); 2035 bool online; 2036 int rc; 2037 2038 rc = sata_link_hardreset(link, timing, deadline, &online, 2039 ata_sff_check_ready); 2040 if (online) 2041 *class = ata_sff_dev_classify(link->device, 1, NULL); 2042 2043 return rc; 2044 } 2045 EXPORT_SYMBOL_GPL(sata_sff_hardreset); 2046 2047 /** 2048 * ata_sff_postreset - SFF postreset callback 2049 * @link: the target SFF ata_link 2050 * @classes: classes of attached devices 2051 * 2052 * This function is invoked after a successful reset. It first 2053 * calls ata_std_postreset() and performs SFF specific postreset 2054 * processing. 2055 * 2056 * LOCKING: 2057 * Kernel thread context (may sleep) 2058 */ 2059 void ata_sff_postreset(struct ata_link *link, unsigned int *classes) 2060 { 2061 struct ata_port *ap = link->ap; 2062 2063 ata_std_postreset(link, classes); 2064 2065 /* is double-select really necessary? */ 2066 if (classes[0] != ATA_DEV_NONE) 2067 ap->ops->sff_dev_select(ap, 1); 2068 if (classes[1] != ATA_DEV_NONE) 2069 ap->ops->sff_dev_select(ap, 0); 2070 2071 /* bail out if no device is present */ 2072 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) 2073 return; 2074 2075 /* set up device control */ 2076 if (ata_sff_set_devctl(ap, ap->ctl)) 2077 ap->last_ctl = ap->ctl; 2078 } 2079 EXPORT_SYMBOL_GPL(ata_sff_postreset); 2080 2081 /** 2082 * ata_sff_drain_fifo - Stock FIFO drain logic for SFF controllers 2083 * @qc: command 2084 * 2085 * Drain the FIFO and device of any stuck data following a command 2086 * failing to complete. In some cases this is necessary before a 2087 * reset will recover the device. 2088 * 2089 */ 2090 2091 void ata_sff_drain_fifo(struct ata_queued_cmd *qc) 2092 { 2093 int count; 2094 struct ata_port *ap; 2095 2096 /* We only need to flush incoming data when a command was running */ 2097 if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE) 2098 return; 2099 2100 ap = qc->ap; 2101 /* Drain up to 64K of data before we give up this recovery method */ 2102 for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ) 2103 && count < 65536; count += 2) 2104 ioread16(ap->ioaddr.data_addr); 2105 2106 if (count) 2107 ata_port_dbg(ap, "drained %d bytes to clear DRQ\n", count); 2108 2109 } 2110 EXPORT_SYMBOL_GPL(ata_sff_drain_fifo); 2111 2112 /** 2113 * ata_sff_error_handler - Stock error handler for SFF controller 2114 * @ap: port to handle error for 2115 * 2116 * Stock error handler for SFF controller. It can handle both 2117 * PATA and SATA controllers. Many controllers should be able to 2118 * use this EH as-is or with some added handling before and 2119 * after. 2120 * 2121 * LOCKING: 2122 * Kernel thread context (may sleep) 2123 */ 2124 void ata_sff_error_handler(struct ata_port *ap) 2125 { 2126 ata_reset_fn_t softreset = ap->ops->softreset; 2127 ata_reset_fn_t hardreset = ap->ops->hardreset; 2128 struct ata_queued_cmd *qc; 2129 unsigned long flags; 2130 2131 qc = __ata_qc_from_tag(ap, ap->link.active_tag); 2132 if (qc && !(qc->flags & ATA_QCFLAG_FAILED)) 2133 qc = NULL; 2134 2135 spin_lock_irqsave(ap->lock, flags); 2136 2137 /* 2138 * We *MUST* do FIFO draining before we issue a reset as 2139 * several devices helpfully clear their internal state and 2140 * will lock solid if we touch the data port post reset. Pass 2141 * qc in case anyone wants to do different PIO/DMA recovery or 2142 * has per command fixups 2143 */ 2144 if (ap->ops->sff_drain_fifo) 2145 ap->ops->sff_drain_fifo(qc); 2146 2147 spin_unlock_irqrestore(ap->lock, flags); 2148 2149 /* ignore built-in hardresets if SCR access is not available */ 2150 if ((hardreset == sata_std_hardreset || 2151 hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link)) 2152 hardreset = NULL; 2153 2154 ata_do_eh(ap, ap->ops->prereset, softreset, hardreset, 2155 ap->ops->postreset); 2156 } 2157 EXPORT_SYMBOL_GPL(ata_sff_error_handler); 2158 2159 /** 2160 * ata_sff_std_ports - initialize ioaddr with standard port offsets. 2161 * @ioaddr: IO address structure to be initialized 2162 * 2163 * Utility function which initializes data_addr, error_addr, 2164 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr, 2165 * device_addr, status_addr, and command_addr to standard offsets 2166 * relative to cmd_addr. 2167 * 2168 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr. 2169 */ 2170 void ata_sff_std_ports(struct ata_ioports *ioaddr) 2171 { 2172 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA; 2173 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR; 2174 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE; 2175 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT; 2176 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL; 2177 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM; 2178 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH; 2179 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE; 2180 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS; 2181 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD; 2182 } 2183 EXPORT_SYMBOL_GPL(ata_sff_std_ports); 2184 2185 #ifdef CONFIG_PCI 2186 2187 static bool ata_resources_present(struct pci_dev *pdev, int port) 2188 { 2189 int i; 2190 2191 /* Check the PCI resources for this channel are enabled */ 2192 port *= 2; 2193 for (i = 0; i < 2; i++) { 2194 if (pci_resource_start(pdev, port + i) == 0 || 2195 pci_resource_len(pdev, port + i) == 0) 2196 return false; 2197 } 2198 return true; 2199 } 2200 2201 /** 2202 * ata_pci_sff_init_host - acquire native PCI ATA resources and init host 2203 * @host: target ATA host 2204 * 2205 * Acquire native PCI ATA resources for @host and initialize the 2206 * first two ports of @host accordingly. Ports marked dummy are 2207 * skipped and allocation failure makes the port dummy. 2208 * 2209 * Note that native PCI resources are valid even for legacy hosts 2210 * as we fix up pdev resources array early in boot, so this 2211 * function can be used for both native and legacy SFF hosts. 2212 * 2213 * LOCKING: 2214 * Inherited from calling layer (may sleep). 2215 * 2216 * RETURNS: 2217 * 0 if at least one port is initialized, -ENODEV if no port is 2218 * available. 2219 */ 2220 int ata_pci_sff_init_host(struct ata_host *host) 2221 { 2222 struct device *gdev = host->dev; 2223 struct pci_dev *pdev = to_pci_dev(gdev); 2224 unsigned int mask = 0; 2225 int i, rc; 2226 2227 /* request, iomap BARs and init port addresses accordingly */ 2228 for (i = 0; i < 2; i++) { 2229 struct ata_port *ap = host->ports[i]; 2230 int base = i * 2; 2231 void __iomem * const *iomap; 2232 2233 if (ata_port_is_dummy(ap)) 2234 continue; 2235 2236 /* Discard disabled ports. Some controllers show 2237 * their unused channels this way. Disabled ports are 2238 * made dummy. 2239 */ 2240 if (!ata_resources_present(pdev, i)) { 2241 ap->ops = &ata_dummy_port_ops; 2242 continue; 2243 } 2244 2245 rc = pcim_iomap_regions(pdev, 0x3 << base, 2246 dev_driver_string(gdev)); 2247 if (rc) { 2248 dev_warn(gdev, 2249 "failed to request/iomap BARs for port %d (errno=%d)\n", 2250 i, rc); 2251 if (rc == -EBUSY) 2252 pcim_pin_device(pdev); 2253 ap->ops = &ata_dummy_port_ops; 2254 continue; 2255 } 2256 host->iomap = iomap = pcim_iomap_table(pdev); 2257 2258 ap->ioaddr.cmd_addr = iomap[base]; 2259 ap->ioaddr.altstatus_addr = 2260 ap->ioaddr.ctl_addr = (void __iomem *) 2261 ((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS); 2262 ata_sff_std_ports(&ap->ioaddr); 2263 2264 ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx", 2265 (unsigned long long)pci_resource_start(pdev, base), 2266 (unsigned long long)pci_resource_start(pdev, base + 1)); 2267 2268 mask |= 1 << i; 2269 } 2270 2271 if (!mask) { 2272 dev_err(gdev, "no available native port\n"); 2273 return -ENODEV; 2274 } 2275 2276 return 0; 2277 } 2278 EXPORT_SYMBOL_GPL(ata_pci_sff_init_host); 2279 2280 /** 2281 * ata_pci_sff_prepare_host - helper to prepare PCI PIO-only SFF ATA host 2282 * @pdev: target PCI device 2283 * @ppi: array of port_info, must be enough for two ports 2284 * @r_host: out argument for the initialized ATA host 2285 * 2286 * Helper to allocate PIO-only SFF ATA host for @pdev, acquire 2287 * all PCI resources and initialize it accordingly in one go. 2288 * 2289 * LOCKING: 2290 * Inherited from calling layer (may sleep). 2291 * 2292 * RETURNS: 2293 * 0 on success, -errno otherwise. 2294 */ 2295 int ata_pci_sff_prepare_host(struct pci_dev *pdev, 2296 const struct ata_port_info * const *ppi, 2297 struct ata_host **r_host) 2298 { 2299 struct ata_host *host; 2300 int rc; 2301 2302 if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) 2303 return -ENOMEM; 2304 2305 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2); 2306 if (!host) { 2307 dev_err(&pdev->dev, "failed to allocate ATA host\n"); 2308 rc = -ENOMEM; 2309 goto err_out; 2310 } 2311 2312 rc = ata_pci_sff_init_host(host); 2313 if (rc) 2314 goto err_out; 2315 2316 devres_remove_group(&pdev->dev, NULL); 2317 *r_host = host; 2318 return 0; 2319 2320 err_out: 2321 devres_release_group(&pdev->dev, NULL); 2322 return rc; 2323 } 2324 EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host); 2325 2326 /** 2327 * ata_pci_sff_activate_host - start SFF host, request IRQ and register it 2328 * @host: target SFF ATA host 2329 * @irq_handler: irq_handler used when requesting IRQ(s) 2330 * @sht: scsi_host_template to use when registering the host 2331 * 2332 * This is the counterpart of ata_host_activate() for SFF ATA 2333 * hosts. This separate helper is necessary because SFF hosts 2334 * use two separate interrupts in legacy mode. 2335 * 2336 * LOCKING: 2337 * Inherited from calling layer (may sleep). 2338 * 2339 * RETURNS: 2340 * 0 on success, -errno otherwise. 2341 */ 2342 int ata_pci_sff_activate_host(struct ata_host *host, 2343 irq_handler_t irq_handler, 2344 struct scsi_host_template *sht) 2345 { 2346 struct device *dev = host->dev; 2347 struct pci_dev *pdev = to_pci_dev(dev); 2348 const char *drv_name = dev_driver_string(host->dev); 2349 int legacy_mode = 0, rc; 2350 2351 rc = ata_host_start(host); 2352 if (rc) 2353 return rc; 2354 2355 if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) { 2356 u8 tmp8, mask = 0; 2357 2358 /* 2359 * ATA spec says we should use legacy mode when one 2360 * port is in legacy mode, but disabled ports on some 2361 * PCI hosts appear as fixed legacy ports, e.g SB600/700 2362 * on which the secondary port is not wired, so 2363 * ignore ports that are marked as 'dummy' during 2364 * this check 2365 */ 2366 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8); 2367 if (!ata_port_is_dummy(host->ports[0])) 2368 mask |= (1 << 0); 2369 if (!ata_port_is_dummy(host->ports[1])) 2370 mask |= (1 << 2); 2371 if ((tmp8 & mask) != mask) 2372 legacy_mode = 1; 2373 } 2374 2375 if (!devres_open_group(dev, NULL, GFP_KERNEL)) 2376 return -ENOMEM; 2377 2378 if (!legacy_mode && pdev->irq) { 2379 int i; 2380 2381 rc = devm_request_irq(dev, pdev->irq, irq_handler, 2382 IRQF_SHARED, drv_name, host); 2383 if (rc) 2384 goto out; 2385 2386 for (i = 0; i < 2; i++) { 2387 if (ata_port_is_dummy(host->ports[i])) 2388 continue; 2389 ata_port_desc(host->ports[i], "irq %d", pdev->irq); 2390 } 2391 } else if (legacy_mode) { 2392 if (!ata_port_is_dummy(host->ports[0])) { 2393 rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev), 2394 irq_handler, IRQF_SHARED, 2395 drv_name, host); 2396 if (rc) 2397 goto out; 2398 2399 ata_port_desc(host->ports[0], "irq %d", 2400 ATA_PRIMARY_IRQ(pdev)); 2401 } 2402 2403 if (!ata_port_is_dummy(host->ports[1])) { 2404 rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev), 2405 irq_handler, IRQF_SHARED, 2406 drv_name, host); 2407 if (rc) 2408 goto out; 2409 2410 ata_port_desc(host->ports[1], "irq %d", 2411 ATA_SECONDARY_IRQ(pdev)); 2412 } 2413 } 2414 2415 rc = ata_host_register(host, sht); 2416 out: 2417 if (rc == 0) 2418 devres_remove_group(dev, NULL); 2419 else 2420 devres_release_group(dev, NULL); 2421 2422 return rc; 2423 } 2424 EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host); 2425 2426 static const struct ata_port_info *ata_sff_find_valid_pi( 2427 const struct ata_port_info * const *ppi) 2428 { 2429 int i; 2430 2431 /* look up the first valid port_info */ 2432 for (i = 0; i < 2 && ppi[i]; i++) 2433 if (ppi[i]->port_ops != &ata_dummy_port_ops) 2434 return ppi[i]; 2435 2436 return NULL; 2437 } 2438 2439 static int ata_pci_init_one(struct pci_dev *pdev, 2440 const struct ata_port_info * const *ppi, 2441 struct scsi_host_template *sht, void *host_priv, 2442 int hflags, bool bmdma) 2443 { 2444 struct device *dev = &pdev->dev; 2445 const struct ata_port_info *pi; 2446 struct ata_host *host = NULL; 2447 int rc; 2448 2449 pi = ata_sff_find_valid_pi(ppi); 2450 if (!pi) { 2451 dev_err(&pdev->dev, "no valid port_info specified\n"); 2452 return -EINVAL; 2453 } 2454 2455 if (!devres_open_group(dev, NULL, GFP_KERNEL)) 2456 return -ENOMEM; 2457 2458 rc = pcim_enable_device(pdev); 2459 if (rc) 2460 goto out; 2461 2462 #ifdef CONFIG_ATA_BMDMA 2463 if (bmdma) 2464 /* prepare and activate BMDMA host */ 2465 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); 2466 else 2467 #endif 2468 /* prepare and activate SFF host */ 2469 rc = ata_pci_sff_prepare_host(pdev, ppi, &host); 2470 if (rc) 2471 goto out; 2472 host->private_data = host_priv; 2473 host->flags |= hflags; 2474 2475 #ifdef CONFIG_ATA_BMDMA 2476 if (bmdma) { 2477 pci_set_master(pdev); 2478 rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht); 2479 } else 2480 #endif 2481 rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht); 2482 out: 2483 if (rc == 0) 2484 devres_remove_group(&pdev->dev, NULL); 2485 else 2486 devres_release_group(&pdev->dev, NULL); 2487 2488 return rc; 2489 } 2490 2491 /** 2492 * ata_pci_sff_init_one - Initialize/register PIO-only PCI IDE controller 2493 * @pdev: Controller to be initialized 2494 * @ppi: array of port_info, must be enough for two ports 2495 * @sht: scsi_host_template to use when registering the host 2496 * @host_priv: host private_data 2497 * @hflag: host flags 2498 * 2499 * This is a helper function which can be called from a driver's 2500 * xxx_init_one() probe function if the hardware uses traditional 2501 * IDE taskfile registers and is PIO only. 2502 * 2503 * ASSUMPTION: 2504 * Nobody makes a single channel controller that appears solely as 2505 * the secondary legacy port on PCI. 2506 * 2507 * LOCKING: 2508 * Inherited from PCI layer (may sleep). 2509 * 2510 * RETURNS: 2511 * Zero on success, negative on errno-based value on error. 2512 */ 2513 int ata_pci_sff_init_one(struct pci_dev *pdev, 2514 const struct ata_port_info * const *ppi, 2515 struct scsi_host_template *sht, void *host_priv, int hflag) 2516 { 2517 return ata_pci_init_one(pdev, ppi, sht, host_priv, hflag, 0); 2518 } 2519 EXPORT_SYMBOL_GPL(ata_pci_sff_init_one); 2520 2521 #endif /* CONFIG_PCI */ 2522 2523 /* 2524 * BMDMA support 2525 */ 2526 2527 #ifdef CONFIG_ATA_BMDMA 2528 2529 const struct ata_port_operations ata_bmdma_port_ops = { 2530 .inherits = &ata_sff_port_ops, 2531 2532 .error_handler = ata_bmdma_error_handler, 2533 .post_internal_cmd = ata_bmdma_post_internal_cmd, 2534 2535 .qc_prep = ata_bmdma_qc_prep, 2536 .qc_issue = ata_bmdma_qc_issue, 2537 2538 .sff_irq_clear = ata_bmdma_irq_clear, 2539 .bmdma_setup = ata_bmdma_setup, 2540 .bmdma_start = ata_bmdma_start, 2541 .bmdma_stop = ata_bmdma_stop, 2542 .bmdma_status = ata_bmdma_status, 2543 2544 .port_start = ata_bmdma_port_start, 2545 }; 2546 EXPORT_SYMBOL_GPL(ata_bmdma_port_ops); 2547 2548 const struct ata_port_operations ata_bmdma32_port_ops = { 2549 .inherits = &ata_bmdma_port_ops, 2550 2551 .sff_data_xfer = ata_sff_data_xfer32, 2552 .port_start = ata_bmdma_port_start32, 2553 }; 2554 EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops); 2555 2556 /** 2557 * ata_bmdma_fill_sg - Fill PCI IDE PRD table 2558 * @qc: Metadata associated with taskfile to be transferred 2559 * 2560 * Fill PCI IDE PRD (scatter-gather) table with segments 2561 * associated with the current disk command. 2562 * 2563 * LOCKING: 2564 * spin_lock_irqsave(host lock) 2565 * 2566 */ 2567 static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc) 2568 { 2569 struct ata_port *ap = qc->ap; 2570 struct ata_bmdma_prd *prd = ap->bmdma_prd; 2571 struct scatterlist *sg; 2572 unsigned int si, pi; 2573 2574 pi = 0; 2575 for_each_sg(qc->sg, sg, qc->n_elem, si) { 2576 u32 addr, offset; 2577 u32 sg_len, len; 2578 2579 /* determine if physical DMA addr spans 64K boundary. 2580 * Note h/w doesn't support 64-bit, so we unconditionally 2581 * truncate dma_addr_t to u32. 2582 */ 2583 addr = (u32) sg_dma_address(sg); 2584 sg_len = sg_dma_len(sg); 2585 2586 while (sg_len) { 2587 offset = addr & 0xffff; 2588 len = sg_len; 2589 if ((offset + sg_len) > 0x10000) 2590 len = 0x10000 - offset; 2591 2592 prd[pi].addr = cpu_to_le32(addr); 2593 prd[pi].flags_len = cpu_to_le32(len & 0xffff); 2594 2595 pi++; 2596 sg_len -= len; 2597 addr += len; 2598 } 2599 } 2600 2601 prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); 2602 } 2603 2604 /** 2605 * ata_bmdma_fill_sg_dumb - Fill PCI IDE PRD table 2606 * @qc: Metadata associated with taskfile to be transferred 2607 * 2608 * Fill PCI IDE PRD (scatter-gather) table with segments 2609 * associated with the current disk command. Perform the fill 2610 * so that we avoid writing any length 64K records for 2611 * controllers that don't follow the spec. 2612 * 2613 * LOCKING: 2614 * spin_lock_irqsave(host lock) 2615 * 2616 */ 2617 static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc) 2618 { 2619 struct ata_port *ap = qc->ap; 2620 struct ata_bmdma_prd *prd = ap->bmdma_prd; 2621 struct scatterlist *sg; 2622 unsigned int si, pi; 2623 2624 pi = 0; 2625 for_each_sg(qc->sg, sg, qc->n_elem, si) { 2626 u32 addr, offset; 2627 u32 sg_len, len, blen; 2628 2629 /* determine if physical DMA addr spans 64K boundary. 2630 * Note h/w doesn't support 64-bit, so we unconditionally 2631 * truncate dma_addr_t to u32. 2632 */ 2633 addr = (u32) sg_dma_address(sg); 2634 sg_len = sg_dma_len(sg); 2635 2636 while (sg_len) { 2637 offset = addr & 0xffff; 2638 len = sg_len; 2639 if ((offset + sg_len) > 0x10000) 2640 len = 0x10000 - offset; 2641 2642 blen = len & 0xffff; 2643 prd[pi].addr = cpu_to_le32(addr); 2644 if (blen == 0) { 2645 /* Some PATA chipsets like the CS5530 can't 2646 cope with 0x0000 meaning 64K as the spec 2647 says */ 2648 prd[pi].flags_len = cpu_to_le32(0x8000); 2649 blen = 0x8000; 2650 prd[++pi].addr = cpu_to_le32(addr + 0x8000); 2651 } 2652 prd[pi].flags_len = cpu_to_le32(blen); 2653 2654 pi++; 2655 sg_len -= len; 2656 addr += len; 2657 } 2658 } 2659 2660 prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); 2661 } 2662 2663 /** 2664 * ata_bmdma_qc_prep - Prepare taskfile for submission 2665 * @qc: Metadata associated with taskfile to be prepared 2666 * 2667 * Prepare ATA taskfile for submission. 2668 * 2669 * LOCKING: 2670 * spin_lock_irqsave(host lock) 2671 */ 2672 enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc) 2673 { 2674 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 2675 return AC_ERR_OK; 2676 2677 ata_bmdma_fill_sg(qc); 2678 2679 return AC_ERR_OK; 2680 } 2681 EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep); 2682 2683 /** 2684 * ata_bmdma_dumb_qc_prep - Prepare taskfile for submission 2685 * @qc: Metadata associated with taskfile to be prepared 2686 * 2687 * Prepare ATA taskfile for submission. 2688 * 2689 * LOCKING: 2690 * spin_lock_irqsave(host lock) 2691 */ 2692 enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc) 2693 { 2694 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 2695 return AC_ERR_OK; 2696 2697 ata_bmdma_fill_sg_dumb(qc); 2698 2699 return AC_ERR_OK; 2700 } 2701 EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep); 2702 2703 /** 2704 * ata_bmdma_qc_issue - issue taskfile to a BMDMA controller 2705 * @qc: command to issue to device 2706 * 2707 * This function issues a PIO, NODATA or DMA command to a 2708 * SFF/BMDMA controller. PIO and NODATA are handled by 2709 * ata_sff_qc_issue(). 2710 * 2711 * LOCKING: 2712 * spin_lock_irqsave(host lock) 2713 * 2714 * RETURNS: 2715 * Zero on success, AC_ERR_* mask on failure 2716 */ 2717 unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc) 2718 { 2719 struct ata_port *ap = qc->ap; 2720 struct ata_link *link = qc->dev->link; 2721 2722 /* defer PIO handling to sff_qc_issue */ 2723 if (!ata_is_dma(qc->tf.protocol)) 2724 return ata_sff_qc_issue(qc); 2725 2726 /* select the device */ 2727 ata_dev_select(ap, qc->dev->devno, 1, 0); 2728 2729 /* start the command */ 2730 switch (qc->tf.protocol) { 2731 case ATA_PROT_DMA: 2732 WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); 2733 2734 trace_ata_tf_load(ap, &qc->tf); 2735 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ 2736 trace_ata_bmdma_setup(ap, &qc->tf, qc->tag); 2737 ap->ops->bmdma_setup(qc); /* set up bmdma */ 2738 trace_ata_bmdma_start(ap, &qc->tf, qc->tag); 2739 ap->ops->bmdma_start(qc); /* initiate bmdma */ 2740 ap->hsm_task_state = HSM_ST_LAST; 2741 break; 2742 2743 case ATAPI_PROT_DMA: 2744 WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); 2745 2746 trace_ata_tf_load(ap, &qc->tf); 2747 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ 2748 trace_ata_bmdma_setup(ap, &qc->tf, qc->tag); 2749 ap->ops->bmdma_setup(qc); /* set up bmdma */ 2750 ap->hsm_task_state = HSM_ST_FIRST; 2751 2752 /* send cdb by polling if no cdb interrupt */ 2753 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 2754 ata_sff_queue_pio_task(link, 0); 2755 break; 2756 2757 default: 2758 WARN_ON(1); 2759 return AC_ERR_SYSTEM; 2760 } 2761 2762 return 0; 2763 } 2764 EXPORT_SYMBOL_GPL(ata_bmdma_qc_issue); 2765 2766 /** 2767 * ata_bmdma_port_intr - Handle BMDMA port interrupt 2768 * @ap: Port on which interrupt arrived (possibly...) 2769 * @qc: Taskfile currently active in engine 2770 * 2771 * Handle port interrupt for given queued command. 2772 * 2773 * LOCKING: 2774 * spin_lock_irqsave(host lock) 2775 * 2776 * RETURNS: 2777 * One if interrupt was handled, zero if not (shared irq). 2778 */ 2779 unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc) 2780 { 2781 struct ata_eh_info *ehi = &ap->link.eh_info; 2782 u8 host_stat = 0; 2783 bool bmdma_stopped = false; 2784 unsigned int handled; 2785 2786 if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) { 2787 /* check status of DMA engine */ 2788 host_stat = ap->ops->bmdma_status(ap); 2789 trace_ata_bmdma_status(ap, host_stat); 2790 2791 /* if it's not our irq... */ 2792 if (!(host_stat & ATA_DMA_INTR)) 2793 return ata_sff_idle_irq(ap); 2794 2795 /* before we do anything else, clear DMA-Start bit */ 2796 trace_ata_bmdma_stop(ap, &qc->tf, qc->tag); 2797 ap->ops->bmdma_stop(qc); 2798 bmdma_stopped = true; 2799 2800 if (unlikely(host_stat & ATA_DMA_ERR)) { 2801 /* error when transferring data to/from memory */ 2802 qc->err_mask |= AC_ERR_HOST_BUS; 2803 ap->hsm_task_state = HSM_ST_ERR; 2804 } 2805 } 2806 2807 handled = __ata_sff_port_intr(ap, qc, bmdma_stopped); 2808 2809 if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol)) 2810 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat); 2811 2812 return handled; 2813 } 2814 EXPORT_SYMBOL_GPL(ata_bmdma_port_intr); 2815 2816 /** 2817 * ata_bmdma_interrupt - Default BMDMA ATA host interrupt handler 2818 * @irq: irq line (unused) 2819 * @dev_instance: pointer to our ata_host information structure 2820 * 2821 * Default interrupt handler for PCI IDE devices. Calls 2822 * ata_bmdma_port_intr() for each port that is not disabled. 2823 * 2824 * LOCKING: 2825 * Obtains host lock during operation. 2826 * 2827 * RETURNS: 2828 * IRQ_NONE or IRQ_HANDLED. 2829 */ 2830 irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance) 2831 { 2832 return __ata_sff_interrupt(irq, dev_instance, ata_bmdma_port_intr); 2833 } 2834 EXPORT_SYMBOL_GPL(ata_bmdma_interrupt); 2835 2836 /** 2837 * ata_bmdma_error_handler - Stock error handler for BMDMA controller 2838 * @ap: port to handle error for 2839 * 2840 * Stock error handler for BMDMA controller. It can handle both 2841 * PATA and SATA controllers. Most BMDMA controllers should be 2842 * able to use this EH as-is or with some added handling before 2843 * and after. 2844 * 2845 * LOCKING: 2846 * Kernel thread context (may sleep) 2847 */ 2848 void ata_bmdma_error_handler(struct ata_port *ap) 2849 { 2850 struct ata_queued_cmd *qc; 2851 unsigned long flags; 2852 bool thaw = false; 2853 2854 qc = __ata_qc_from_tag(ap, ap->link.active_tag); 2855 if (qc && !(qc->flags & ATA_QCFLAG_FAILED)) 2856 qc = NULL; 2857 2858 /* reset PIO HSM and stop DMA engine */ 2859 spin_lock_irqsave(ap->lock, flags); 2860 2861 if (qc && ata_is_dma(qc->tf.protocol)) { 2862 u8 host_stat; 2863 2864 host_stat = ap->ops->bmdma_status(ap); 2865 trace_ata_bmdma_status(ap, host_stat); 2866 2867 /* BMDMA controllers indicate host bus error by 2868 * setting DMA_ERR bit and timing out. As it wasn't 2869 * really a timeout event, adjust error mask and 2870 * cancel frozen state. 2871 */ 2872 if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) { 2873 qc->err_mask = AC_ERR_HOST_BUS; 2874 thaw = true; 2875 } 2876 2877 trace_ata_bmdma_stop(ap, &qc->tf, qc->tag); 2878 ap->ops->bmdma_stop(qc); 2879 2880 /* if we're gonna thaw, make sure IRQ is clear */ 2881 if (thaw) { 2882 ap->ops->sff_check_status(ap); 2883 if (ap->ops->sff_irq_clear) 2884 ap->ops->sff_irq_clear(ap); 2885 } 2886 } 2887 2888 spin_unlock_irqrestore(ap->lock, flags); 2889 2890 if (thaw) 2891 ata_eh_thaw_port(ap); 2892 2893 ata_sff_error_handler(ap); 2894 } 2895 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler); 2896 2897 /** 2898 * ata_bmdma_post_internal_cmd - Stock post_internal_cmd for BMDMA 2899 * @qc: internal command to clean up 2900 * 2901 * LOCKING: 2902 * Kernel thread context (may sleep) 2903 */ 2904 void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc) 2905 { 2906 struct ata_port *ap = qc->ap; 2907 unsigned long flags; 2908 2909 if (ata_is_dma(qc->tf.protocol)) { 2910 spin_lock_irqsave(ap->lock, flags); 2911 trace_ata_bmdma_stop(ap, &qc->tf, qc->tag); 2912 ap->ops->bmdma_stop(qc); 2913 spin_unlock_irqrestore(ap->lock, flags); 2914 } 2915 } 2916 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd); 2917 2918 /** 2919 * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt. 2920 * @ap: Port associated with this ATA transaction. 2921 * 2922 * Clear interrupt and error flags in DMA status register. 2923 * 2924 * May be used as the irq_clear() entry in ata_port_operations. 2925 * 2926 * LOCKING: 2927 * spin_lock_irqsave(host lock) 2928 */ 2929 void ata_bmdma_irq_clear(struct ata_port *ap) 2930 { 2931 void __iomem *mmio = ap->ioaddr.bmdma_addr; 2932 2933 if (!mmio) 2934 return; 2935 2936 iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS); 2937 } 2938 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear); 2939 2940 /** 2941 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction 2942 * @qc: Info associated with this ATA transaction. 2943 * 2944 * LOCKING: 2945 * spin_lock_irqsave(host lock) 2946 */ 2947 void ata_bmdma_setup(struct ata_queued_cmd *qc) 2948 { 2949 struct ata_port *ap = qc->ap; 2950 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); 2951 u8 dmactl; 2952 2953 /* load PRD table addr. */ 2954 mb(); /* make sure PRD table writes are visible to controller */ 2955 iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); 2956 2957 /* specify data direction, triple-check start bit is clear */ 2958 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 2959 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); 2960 if (!rw) 2961 dmactl |= ATA_DMA_WR; 2962 iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 2963 2964 /* issue r/w command */ 2965 ap->ops->sff_exec_command(ap, &qc->tf); 2966 } 2967 EXPORT_SYMBOL_GPL(ata_bmdma_setup); 2968 2969 /** 2970 * ata_bmdma_start - Start a PCI IDE BMDMA transaction 2971 * @qc: Info associated with this ATA transaction. 2972 * 2973 * LOCKING: 2974 * spin_lock_irqsave(host lock) 2975 */ 2976 void ata_bmdma_start(struct ata_queued_cmd *qc) 2977 { 2978 struct ata_port *ap = qc->ap; 2979 u8 dmactl; 2980 2981 /* start host DMA transaction */ 2982 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 2983 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 2984 2985 /* Strictly, one may wish to issue an ioread8() here, to 2986 * flush the mmio write. However, control also passes 2987 * to the hardware at this point, and it will interrupt 2988 * us when we are to resume control. So, in effect, 2989 * we don't care when the mmio write flushes. 2990 * Further, a read of the DMA status register _immediately_ 2991 * following the write may not be what certain flaky hardware 2992 * is expected, so I think it is best to not add a readb() 2993 * without first all the MMIO ATA cards/mobos. 2994 * Or maybe I'm just being paranoid. 2995 * 2996 * FIXME: The posting of this write means I/O starts are 2997 * unnecessarily delayed for MMIO 2998 */ 2999 } 3000 EXPORT_SYMBOL_GPL(ata_bmdma_start); 3001 3002 /** 3003 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer 3004 * @qc: Command we are ending DMA for 3005 * 3006 * Clears the ATA_DMA_START flag in the dma control register 3007 * 3008 * May be used as the bmdma_stop() entry in ata_port_operations. 3009 * 3010 * LOCKING: 3011 * spin_lock_irqsave(host lock) 3012 */ 3013 void ata_bmdma_stop(struct ata_queued_cmd *qc) 3014 { 3015 struct ata_port *ap = qc->ap; 3016 void __iomem *mmio = ap->ioaddr.bmdma_addr; 3017 3018 /* clear start/stop bit */ 3019 iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START, 3020 mmio + ATA_DMA_CMD); 3021 3022 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ 3023 ata_sff_dma_pause(ap); 3024 } 3025 EXPORT_SYMBOL_GPL(ata_bmdma_stop); 3026 3027 /** 3028 * ata_bmdma_status - Read PCI IDE BMDMA status 3029 * @ap: Port associated with this ATA transaction. 3030 * 3031 * Read and return BMDMA status register. 3032 * 3033 * May be used as the bmdma_status() entry in ata_port_operations. 3034 * 3035 * LOCKING: 3036 * spin_lock_irqsave(host lock) 3037 */ 3038 u8 ata_bmdma_status(struct ata_port *ap) 3039 { 3040 return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 3041 } 3042 EXPORT_SYMBOL_GPL(ata_bmdma_status); 3043 3044 3045 /** 3046 * ata_bmdma_port_start - Set port up for bmdma. 3047 * @ap: Port to initialize 3048 * 3049 * Called just after data structures for each port are 3050 * initialized. Allocates space for PRD table. 3051 * 3052 * May be used as the port_start() entry in ata_port_operations. 3053 * 3054 * LOCKING: 3055 * Inherited from caller. 3056 */ 3057 int ata_bmdma_port_start(struct ata_port *ap) 3058 { 3059 if (ap->mwdma_mask || ap->udma_mask) { 3060 ap->bmdma_prd = 3061 dmam_alloc_coherent(ap->host->dev, ATA_PRD_TBL_SZ, 3062 &ap->bmdma_prd_dma, GFP_KERNEL); 3063 if (!ap->bmdma_prd) 3064 return -ENOMEM; 3065 } 3066 3067 return 0; 3068 } 3069 EXPORT_SYMBOL_GPL(ata_bmdma_port_start); 3070 3071 /** 3072 * ata_bmdma_port_start32 - Set port up for dma. 3073 * @ap: Port to initialize 3074 * 3075 * Called just after data structures for each port are 3076 * initialized. Enables 32bit PIO and allocates space for PRD 3077 * table. 3078 * 3079 * May be used as the port_start() entry in ata_port_operations for 3080 * devices that are capable of 32bit PIO. 3081 * 3082 * LOCKING: 3083 * Inherited from caller. 3084 */ 3085 int ata_bmdma_port_start32(struct ata_port *ap) 3086 { 3087 ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE; 3088 return ata_bmdma_port_start(ap); 3089 } 3090 EXPORT_SYMBOL_GPL(ata_bmdma_port_start32); 3091 3092 #ifdef CONFIG_PCI 3093 3094 /** 3095 * ata_pci_bmdma_clear_simplex - attempt to kick device out of simplex 3096 * @pdev: PCI device 3097 * 3098 * Some PCI ATA devices report simplex mode but in fact can be told to 3099 * enter non simplex mode. This implements the necessary logic to 3100 * perform the task on such devices. Calling it on other devices will 3101 * have -undefined- behaviour. 3102 */ 3103 int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev) 3104 { 3105 unsigned long bmdma = pci_resource_start(pdev, 4); 3106 u8 simplex; 3107 3108 if (bmdma == 0) 3109 return -ENOENT; 3110 3111 simplex = inb(bmdma + 0x02); 3112 outb(simplex & 0x60, bmdma + 0x02); 3113 simplex = inb(bmdma + 0x02); 3114 if (simplex & 0x80) 3115 return -EOPNOTSUPP; 3116 return 0; 3117 } 3118 EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex); 3119 3120 static void ata_bmdma_nodma(struct ata_host *host, const char *reason) 3121 { 3122 int i; 3123 3124 dev_err(host->dev, "BMDMA: %s, falling back to PIO\n", reason); 3125 3126 for (i = 0; i < 2; i++) { 3127 host->ports[i]->mwdma_mask = 0; 3128 host->ports[i]->udma_mask = 0; 3129 } 3130 } 3131 3132 /** 3133 * ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host 3134 * @host: target ATA host 3135 * 3136 * Acquire PCI BMDMA resources and initialize @host accordingly. 3137 * 3138 * LOCKING: 3139 * Inherited from calling layer (may sleep). 3140 */ 3141 void ata_pci_bmdma_init(struct ata_host *host) 3142 { 3143 struct device *gdev = host->dev; 3144 struct pci_dev *pdev = to_pci_dev(gdev); 3145 int i, rc; 3146 3147 /* No BAR4 allocation: No DMA */ 3148 if (pci_resource_start(pdev, 4) == 0) { 3149 ata_bmdma_nodma(host, "BAR4 is zero"); 3150 return; 3151 } 3152 3153 /* 3154 * Some controllers require BMDMA region to be initialized 3155 * even if DMA is not in use to clear IRQ status via 3156 * ->sff_irq_clear method. Try to initialize bmdma_addr 3157 * regardless of dma masks. 3158 */ 3159 rc = dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK); 3160 if (rc) 3161 ata_bmdma_nodma(host, "failed to set dma mask"); 3162 3163 /* request and iomap DMA region */ 3164 rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev)); 3165 if (rc) { 3166 ata_bmdma_nodma(host, "failed to request/iomap BAR4"); 3167 return; 3168 } 3169 host->iomap = pcim_iomap_table(pdev); 3170 3171 for (i = 0; i < 2; i++) { 3172 struct ata_port *ap = host->ports[i]; 3173 void __iomem *bmdma = host->iomap[4] + 8 * i; 3174 3175 if (ata_port_is_dummy(ap)) 3176 continue; 3177 3178 ap->ioaddr.bmdma_addr = bmdma; 3179 if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) && 3180 (ioread8(bmdma + 2) & 0x80)) 3181 host->flags |= ATA_HOST_SIMPLEX; 3182 3183 ata_port_desc(ap, "bmdma 0x%llx", 3184 (unsigned long long)pci_resource_start(pdev, 4) + 8 * i); 3185 } 3186 } 3187 EXPORT_SYMBOL_GPL(ata_pci_bmdma_init); 3188 3189 /** 3190 * ata_pci_bmdma_prepare_host - helper to prepare PCI BMDMA ATA host 3191 * @pdev: target PCI device 3192 * @ppi: array of port_info, must be enough for two ports 3193 * @r_host: out argument for the initialized ATA host 3194 * 3195 * Helper to allocate BMDMA ATA host for @pdev, acquire all PCI 3196 * resources and initialize it accordingly in one go. 3197 * 3198 * LOCKING: 3199 * Inherited from calling layer (may sleep). 3200 * 3201 * RETURNS: 3202 * 0 on success, -errno otherwise. 3203 */ 3204 int ata_pci_bmdma_prepare_host(struct pci_dev *pdev, 3205 const struct ata_port_info * const * ppi, 3206 struct ata_host **r_host) 3207 { 3208 int rc; 3209 3210 rc = ata_pci_sff_prepare_host(pdev, ppi, r_host); 3211 if (rc) 3212 return rc; 3213 3214 ata_pci_bmdma_init(*r_host); 3215 return 0; 3216 } 3217 EXPORT_SYMBOL_GPL(ata_pci_bmdma_prepare_host); 3218 3219 /** 3220 * ata_pci_bmdma_init_one - Initialize/register BMDMA PCI IDE controller 3221 * @pdev: Controller to be initialized 3222 * @ppi: array of port_info, must be enough for two ports 3223 * @sht: scsi_host_template to use when registering the host 3224 * @host_priv: host private_data 3225 * @hflags: host flags 3226 * 3227 * This function is similar to ata_pci_sff_init_one() but also 3228 * takes care of BMDMA initialization. 3229 * 3230 * LOCKING: 3231 * Inherited from PCI layer (may sleep). 3232 * 3233 * RETURNS: 3234 * Zero on success, negative on errno-based value on error. 3235 */ 3236 int ata_pci_bmdma_init_one(struct pci_dev *pdev, 3237 const struct ata_port_info * const * ppi, 3238 struct scsi_host_template *sht, void *host_priv, 3239 int hflags) 3240 { 3241 return ata_pci_init_one(pdev, ppi, sht, host_priv, hflags, 1); 3242 } 3243 EXPORT_SYMBOL_GPL(ata_pci_bmdma_init_one); 3244 3245 #endif /* CONFIG_PCI */ 3246 #endif /* CONFIG_ATA_BMDMA */ 3247 3248 /** 3249 * ata_sff_port_init - Initialize SFF/BMDMA ATA port 3250 * @ap: Port to initialize 3251 * 3252 * Called on port allocation to initialize SFF/BMDMA specific 3253 * fields. 3254 * 3255 * LOCKING: 3256 * None. 3257 */ 3258 void ata_sff_port_init(struct ata_port *ap) 3259 { 3260 INIT_DELAYED_WORK(&ap->sff_pio_task, ata_sff_pio_task); 3261 ap->ctl = ATA_DEVCTL_OBS; 3262 ap->last_ctl = 0xFF; 3263 } 3264 3265 int __init ata_sff_init(void) 3266 { 3267 ata_sff_wq = alloc_workqueue("ata_sff", WQ_MEM_RECLAIM, WQ_MAX_ACTIVE); 3268 if (!ata_sff_wq) 3269 return -ENOMEM; 3270 3271 return 0; 3272 } 3273 3274 void ata_sff_exit(void) 3275 { 3276 destroy_workqueue(ata_sff_wq); 3277 } 3278