1 /* 2 * libata-sff.c - helper library for PCI IDE BMDMA 3 * 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2003-2006 Red Hat, Inc. All rights reserved. 9 * Copyright 2003-2006 Jeff Garzik 10 * 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; see the file COPYING. If not, write to 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 26 * 27 * libata documentation is available via 'make {ps|pdf}docs', 28 * as Documentation/DocBook/libata.* 29 * 30 * Hardware documentation available from http://www.t13.org/ and 31 * http://www.sata-io.org/ 32 * 33 */ 34 35 #include <linux/kernel.h> 36 #include <linux/pci.h> 37 #include <linux/libata.h> 38 #include <linux/highmem.h> 39 40 #include "libata.h" 41 42 const struct ata_port_operations ata_sff_port_ops = { 43 .inherits = &ata_base_port_ops, 44 45 .qc_prep = ata_sff_qc_prep, 46 .qc_issue = ata_sff_qc_issue, 47 .qc_fill_rtf = ata_sff_qc_fill_rtf, 48 49 .freeze = ata_sff_freeze, 50 .thaw = ata_sff_thaw, 51 .prereset = ata_sff_prereset, 52 .softreset = ata_sff_softreset, 53 .hardreset = sata_sff_hardreset, 54 .postreset = ata_sff_postreset, 55 .drain_fifo = ata_sff_drain_fifo, 56 .error_handler = ata_sff_error_handler, 57 .post_internal_cmd = ata_sff_post_internal_cmd, 58 59 .sff_dev_select = ata_sff_dev_select, 60 .sff_check_status = ata_sff_check_status, 61 .sff_tf_load = ata_sff_tf_load, 62 .sff_tf_read = ata_sff_tf_read, 63 .sff_exec_command = ata_sff_exec_command, 64 .sff_data_xfer = ata_sff_data_xfer, 65 .sff_irq_on = ata_sff_irq_on, 66 .sff_irq_clear = ata_sff_irq_clear, 67 68 .lost_interrupt = ata_sff_lost_interrupt, 69 70 .port_start = ata_sff_port_start, 71 }; 72 EXPORT_SYMBOL_GPL(ata_sff_port_ops); 73 74 const struct ata_port_operations ata_bmdma_port_ops = { 75 .inherits = &ata_sff_port_ops, 76 77 .mode_filter = ata_bmdma_mode_filter, 78 79 .bmdma_setup = ata_bmdma_setup, 80 .bmdma_start = ata_bmdma_start, 81 .bmdma_stop = ata_bmdma_stop, 82 .bmdma_status = ata_bmdma_status, 83 }; 84 EXPORT_SYMBOL_GPL(ata_bmdma_port_ops); 85 86 const struct ata_port_operations ata_bmdma32_port_ops = { 87 .inherits = &ata_bmdma_port_ops, 88 89 .sff_data_xfer = ata_sff_data_xfer32, 90 }; 91 EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops); 92 93 /** 94 * ata_fill_sg - Fill PCI IDE PRD table 95 * @qc: Metadata associated with taskfile to be transferred 96 * 97 * Fill PCI IDE PRD (scatter-gather) table with segments 98 * associated with the current disk command. 99 * 100 * LOCKING: 101 * spin_lock_irqsave(host lock) 102 * 103 */ 104 static void ata_fill_sg(struct ata_queued_cmd *qc) 105 { 106 struct ata_port *ap = qc->ap; 107 struct scatterlist *sg; 108 unsigned int si, pi; 109 110 pi = 0; 111 for_each_sg(qc->sg, sg, qc->n_elem, si) { 112 u32 addr, offset; 113 u32 sg_len, len; 114 115 /* determine if physical DMA addr spans 64K boundary. 116 * Note h/w doesn't support 64-bit, so we unconditionally 117 * truncate dma_addr_t to u32. 118 */ 119 addr = (u32) sg_dma_address(sg); 120 sg_len = sg_dma_len(sg); 121 122 while (sg_len) { 123 offset = addr & 0xffff; 124 len = sg_len; 125 if ((offset + sg_len) > 0x10000) 126 len = 0x10000 - offset; 127 128 ap->prd[pi].addr = cpu_to_le32(addr); 129 ap->prd[pi].flags_len = cpu_to_le32(len & 0xffff); 130 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len); 131 132 pi++; 133 sg_len -= len; 134 addr += len; 135 } 136 } 137 138 ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); 139 } 140 141 /** 142 * ata_fill_sg_dumb - Fill PCI IDE PRD table 143 * @qc: Metadata associated with taskfile to be transferred 144 * 145 * Fill PCI IDE PRD (scatter-gather) table with segments 146 * associated with the current disk command. Perform the fill 147 * so that we avoid writing any length 64K records for 148 * controllers that don't follow the spec. 149 * 150 * LOCKING: 151 * spin_lock_irqsave(host lock) 152 * 153 */ 154 static void ata_fill_sg_dumb(struct ata_queued_cmd *qc) 155 { 156 struct ata_port *ap = qc->ap; 157 struct scatterlist *sg; 158 unsigned int si, pi; 159 160 pi = 0; 161 for_each_sg(qc->sg, sg, qc->n_elem, si) { 162 u32 addr, offset; 163 u32 sg_len, len, blen; 164 165 /* determine if physical DMA addr spans 64K boundary. 166 * Note h/w doesn't support 64-bit, so we unconditionally 167 * truncate dma_addr_t to u32. 168 */ 169 addr = (u32) sg_dma_address(sg); 170 sg_len = sg_dma_len(sg); 171 172 while (sg_len) { 173 offset = addr & 0xffff; 174 len = sg_len; 175 if ((offset + sg_len) > 0x10000) 176 len = 0x10000 - offset; 177 178 blen = len & 0xffff; 179 ap->prd[pi].addr = cpu_to_le32(addr); 180 if (blen == 0) { 181 /* Some PATA chipsets like the CS5530 can't 182 cope with 0x0000 meaning 64K as the spec 183 says */ 184 ap->prd[pi].flags_len = cpu_to_le32(0x8000); 185 blen = 0x8000; 186 ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000); 187 } 188 ap->prd[pi].flags_len = cpu_to_le32(blen); 189 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len); 190 191 pi++; 192 sg_len -= len; 193 addr += len; 194 } 195 } 196 197 ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); 198 } 199 200 /** 201 * ata_sff_qc_prep - Prepare taskfile for submission 202 * @qc: Metadata associated with taskfile to be prepared 203 * 204 * Prepare ATA taskfile for submission. 205 * 206 * LOCKING: 207 * spin_lock_irqsave(host lock) 208 */ 209 void ata_sff_qc_prep(struct ata_queued_cmd *qc) 210 { 211 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 212 return; 213 214 ata_fill_sg(qc); 215 } 216 EXPORT_SYMBOL_GPL(ata_sff_qc_prep); 217 218 /** 219 * ata_sff_dumb_qc_prep - Prepare taskfile for submission 220 * @qc: Metadata associated with taskfile to be prepared 221 * 222 * Prepare ATA taskfile for submission. 223 * 224 * LOCKING: 225 * spin_lock_irqsave(host lock) 226 */ 227 void ata_sff_dumb_qc_prep(struct ata_queued_cmd *qc) 228 { 229 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 230 return; 231 232 ata_fill_sg_dumb(qc); 233 } 234 EXPORT_SYMBOL_GPL(ata_sff_dumb_qc_prep); 235 236 /** 237 * ata_sff_check_status - Read device status reg & clear interrupt 238 * @ap: port where the device is 239 * 240 * Reads ATA taskfile status register for currently-selected device 241 * and return its value. This also clears pending interrupts 242 * from this device 243 * 244 * LOCKING: 245 * Inherited from caller. 246 */ 247 u8 ata_sff_check_status(struct ata_port *ap) 248 { 249 return ioread8(ap->ioaddr.status_addr); 250 } 251 EXPORT_SYMBOL_GPL(ata_sff_check_status); 252 253 /** 254 * ata_sff_altstatus - Read device alternate status reg 255 * @ap: port where the device is 256 * 257 * Reads ATA taskfile alternate status register for 258 * currently-selected device and return its value. 259 * 260 * Note: may NOT be used as the check_altstatus() entry in 261 * ata_port_operations. 262 * 263 * LOCKING: 264 * Inherited from caller. 265 */ 266 static u8 ata_sff_altstatus(struct ata_port *ap) 267 { 268 if (ap->ops->sff_check_altstatus) 269 return ap->ops->sff_check_altstatus(ap); 270 271 return ioread8(ap->ioaddr.altstatus_addr); 272 } 273 274 /** 275 * ata_sff_irq_status - Check if the device is busy 276 * @ap: port where the device is 277 * 278 * Determine if the port is currently busy. Uses altstatus 279 * if available in order to avoid clearing shared IRQ status 280 * when finding an IRQ source. Non ctl capable devices don't 281 * share interrupt lines fortunately for us. 282 * 283 * LOCKING: 284 * Inherited from caller. 285 */ 286 static u8 ata_sff_irq_status(struct ata_port *ap) 287 { 288 u8 status; 289 290 if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) { 291 status = ata_sff_altstatus(ap); 292 /* Not us: We are busy */ 293 if (status & ATA_BUSY) 294 return status; 295 } 296 /* Clear INTRQ latch */ 297 status = ap->ops->sff_check_status(ap); 298 return status; 299 } 300 301 /** 302 * ata_sff_sync - Flush writes 303 * @ap: Port to wait for. 304 * 305 * CAUTION: 306 * If we have an mmio device with no ctl and no altstatus 307 * method this will fail. No such devices are known to exist. 308 * 309 * LOCKING: 310 * Inherited from caller. 311 */ 312 313 static void ata_sff_sync(struct ata_port *ap) 314 { 315 if (ap->ops->sff_check_altstatus) 316 ap->ops->sff_check_altstatus(ap); 317 else if (ap->ioaddr.altstatus_addr) 318 ioread8(ap->ioaddr.altstatus_addr); 319 } 320 321 /** 322 * ata_sff_pause - Flush writes and wait 400nS 323 * @ap: Port to pause for. 324 * 325 * CAUTION: 326 * If we have an mmio device with no ctl and no altstatus 327 * method this will fail. No such devices are known to exist. 328 * 329 * LOCKING: 330 * Inherited from caller. 331 */ 332 333 void ata_sff_pause(struct ata_port *ap) 334 { 335 ata_sff_sync(ap); 336 ndelay(400); 337 } 338 EXPORT_SYMBOL_GPL(ata_sff_pause); 339 340 /** 341 * ata_sff_dma_pause - Pause before commencing DMA 342 * @ap: Port to pause for. 343 * 344 * Perform I/O fencing and ensure sufficient cycle delays occur 345 * for the HDMA1:0 transition 346 */ 347 348 void ata_sff_dma_pause(struct ata_port *ap) 349 { 350 if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) { 351 /* An altstatus read will cause the needed delay without 352 messing up the IRQ status */ 353 ata_sff_altstatus(ap); 354 return; 355 } 356 /* There are no DMA controllers without ctl. BUG here to ensure 357 we never violate the HDMA1:0 transition timing and risk 358 corruption. */ 359 BUG(); 360 } 361 EXPORT_SYMBOL_GPL(ata_sff_dma_pause); 362 363 /** 364 * ata_sff_busy_sleep - sleep until BSY clears, or timeout 365 * @ap: port containing status register to be polled 366 * @tmout_pat: impatience timeout in msecs 367 * @tmout: overall timeout in msecs 368 * 369 * Sleep until ATA Status register bit BSY clears, 370 * or a timeout occurs. 371 * 372 * LOCKING: 373 * Kernel thread context (may sleep). 374 * 375 * RETURNS: 376 * 0 on success, -errno otherwise. 377 */ 378 int ata_sff_busy_sleep(struct ata_port *ap, 379 unsigned long tmout_pat, unsigned long tmout) 380 { 381 unsigned long timer_start, timeout; 382 u8 status; 383 384 status = ata_sff_busy_wait(ap, ATA_BUSY, 300); 385 timer_start = jiffies; 386 timeout = ata_deadline(timer_start, tmout_pat); 387 while (status != 0xff && (status & ATA_BUSY) && 388 time_before(jiffies, timeout)) { 389 msleep(50); 390 status = ata_sff_busy_wait(ap, ATA_BUSY, 3); 391 } 392 393 if (status != 0xff && (status & ATA_BUSY)) 394 ata_port_printk(ap, KERN_WARNING, 395 "port is slow to respond, please be patient " 396 "(Status 0x%x)\n", status); 397 398 timeout = ata_deadline(timer_start, tmout); 399 while (status != 0xff && (status & ATA_BUSY) && 400 time_before(jiffies, timeout)) { 401 msleep(50); 402 status = ap->ops->sff_check_status(ap); 403 } 404 405 if (status == 0xff) 406 return -ENODEV; 407 408 if (status & ATA_BUSY) { 409 ata_port_printk(ap, KERN_ERR, "port failed to respond " 410 "(%lu secs, Status 0x%x)\n", 411 DIV_ROUND_UP(tmout, 1000), status); 412 return -EBUSY; 413 } 414 415 return 0; 416 } 417 EXPORT_SYMBOL_GPL(ata_sff_busy_sleep); 418 419 static int ata_sff_check_ready(struct ata_link *link) 420 { 421 u8 status = link->ap->ops->sff_check_status(link->ap); 422 423 return ata_check_ready(status); 424 } 425 426 /** 427 * ata_sff_wait_ready - sleep until BSY clears, or timeout 428 * @link: SFF link to wait ready status for 429 * @deadline: deadline jiffies for the operation 430 * 431 * Sleep until ATA Status register bit BSY clears, or timeout 432 * occurs. 433 * 434 * LOCKING: 435 * Kernel thread context (may sleep). 436 * 437 * RETURNS: 438 * 0 on success, -errno otherwise. 439 */ 440 int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline) 441 { 442 return ata_wait_ready(link, deadline, ata_sff_check_ready); 443 } 444 EXPORT_SYMBOL_GPL(ata_sff_wait_ready); 445 446 /** 447 * ata_sff_dev_select - Select device 0/1 on ATA bus 448 * @ap: ATA channel to manipulate 449 * @device: ATA device (numbered from zero) to select 450 * 451 * Use the method defined in the ATA specification to 452 * make either device 0, or device 1, active on the 453 * ATA channel. Works with both PIO and MMIO. 454 * 455 * May be used as the dev_select() entry in ata_port_operations. 456 * 457 * LOCKING: 458 * caller. 459 */ 460 void ata_sff_dev_select(struct ata_port *ap, unsigned int device) 461 { 462 u8 tmp; 463 464 if (device == 0) 465 tmp = ATA_DEVICE_OBS; 466 else 467 tmp = ATA_DEVICE_OBS | ATA_DEV1; 468 469 iowrite8(tmp, ap->ioaddr.device_addr); 470 ata_sff_pause(ap); /* needed; also flushes, for mmio */ 471 } 472 EXPORT_SYMBOL_GPL(ata_sff_dev_select); 473 474 /** 475 * ata_dev_select - Select device 0/1 on ATA bus 476 * @ap: ATA channel to manipulate 477 * @device: ATA device (numbered from zero) to select 478 * @wait: non-zero to wait for Status register BSY bit to clear 479 * @can_sleep: non-zero if context allows sleeping 480 * 481 * Use the method defined in the ATA specification to 482 * make either device 0, or device 1, active on the 483 * ATA channel. 484 * 485 * This is a high-level version of ata_sff_dev_select(), which 486 * additionally provides the services of inserting the proper 487 * pauses and status polling, where needed. 488 * 489 * LOCKING: 490 * caller. 491 */ 492 void ata_dev_select(struct ata_port *ap, unsigned int device, 493 unsigned int wait, unsigned int can_sleep) 494 { 495 if (ata_msg_probe(ap)) 496 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, " 497 "device %u, wait %u\n", device, wait); 498 499 if (wait) 500 ata_wait_idle(ap); 501 502 ap->ops->sff_dev_select(ap, device); 503 504 if (wait) { 505 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI) 506 msleep(150); 507 ata_wait_idle(ap); 508 } 509 } 510 511 /** 512 * ata_sff_irq_on - Enable interrupts on a port. 513 * @ap: Port on which interrupts are enabled. 514 * 515 * Enable interrupts on a legacy IDE device using MMIO or PIO, 516 * wait for idle, clear any pending interrupts. 517 * 518 * LOCKING: 519 * Inherited from caller. 520 */ 521 u8 ata_sff_irq_on(struct ata_port *ap) 522 { 523 struct ata_ioports *ioaddr = &ap->ioaddr; 524 u8 tmp; 525 526 ap->ctl &= ~ATA_NIEN; 527 ap->last_ctl = ap->ctl; 528 529 if (ioaddr->ctl_addr) 530 iowrite8(ap->ctl, ioaddr->ctl_addr); 531 tmp = ata_wait_idle(ap); 532 533 ap->ops->sff_irq_clear(ap); 534 535 return tmp; 536 } 537 EXPORT_SYMBOL_GPL(ata_sff_irq_on); 538 539 /** 540 * ata_sff_irq_clear - Clear PCI IDE BMDMA interrupt. 541 * @ap: Port associated with this ATA transaction. 542 * 543 * Clear interrupt and error flags in DMA status register. 544 * 545 * May be used as the irq_clear() entry in ata_port_operations. 546 * 547 * LOCKING: 548 * spin_lock_irqsave(host lock) 549 */ 550 void ata_sff_irq_clear(struct ata_port *ap) 551 { 552 void __iomem *mmio = ap->ioaddr.bmdma_addr; 553 554 if (!mmio) 555 return; 556 557 iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS); 558 } 559 EXPORT_SYMBOL_GPL(ata_sff_irq_clear); 560 561 /** 562 * ata_sff_tf_load - send taskfile registers to host controller 563 * @ap: Port to which output is sent 564 * @tf: ATA taskfile register set 565 * 566 * Outputs ATA taskfile to standard ATA host controller. 567 * 568 * LOCKING: 569 * Inherited from caller. 570 */ 571 void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) 572 { 573 struct ata_ioports *ioaddr = &ap->ioaddr; 574 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; 575 576 if (tf->ctl != ap->last_ctl) { 577 if (ioaddr->ctl_addr) 578 iowrite8(tf->ctl, ioaddr->ctl_addr); 579 ap->last_ctl = tf->ctl; 580 ata_wait_idle(ap); 581 } 582 583 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { 584 WARN_ON_ONCE(!ioaddr->ctl_addr); 585 iowrite8(tf->hob_feature, ioaddr->feature_addr); 586 iowrite8(tf->hob_nsect, ioaddr->nsect_addr); 587 iowrite8(tf->hob_lbal, ioaddr->lbal_addr); 588 iowrite8(tf->hob_lbam, ioaddr->lbam_addr); 589 iowrite8(tf->hob_lbah, ioaddr->lbah_addr); 590 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", 591 tf->hob_feature, 592 tf->hob_nsect, 593 tf->hob_lbal, 594 tf->hob_lbam, 595 tf->hob_lbah); 596 } 597 598 if (is_addr) { 599 iowrite8(tf->feature, ioaddr->feature_addr); 600 iowrite8(tf->nsect, ioaddr->nsect_addr); 601 iowrite8(tf->lbal, ioaddr->lbal_addr); 602 iowrite8(tf->lbam, ioaddr->lbam_addr); 603 iowrite8(tf->lbah, ioaddr->lbah_addr); 604 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", 605 tf->feature, 606 tf->nsect, 607 tf->lbal, 608 tf->lbam, 609 tf->lbah); 610 } 611 612 if (tf->flags & ATA_TFLAG_DEVICE) { 613 iowrite8(tf->device, ioaddr->device_addr); 614 VPRINTK("device 0x%X\n", tf->device); 615 } 616 617 ata_wait_idle(ap); 618 } 619 EXPORT_SYMBOL_GPL(ata_sff_tf_load); 620 621 /** 622 * ata_sff_tf_read - input device's ATA taskfile shadow registers 623 * @ap: Port from which input is read 624 * @tf: ATA taskfile register set for storing input 625 * 626 * Reads ATA taskfile registers for currently-selected device 627 * into @tf. Assumes the device has a fully SFF compliant task file 628 * layout and behaviour. If you device does not (eg has a different 629 * status method) then you will need to provide a replacement tf_read 630 * 631 * LOCKING: 632 * Inherited from caller. 633 */ 634 void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf) 635 { 636 struct ata_ioports *ioaddr = &ap->ioaddr; 637 638 tf->command = ata_sff_check_status(ap); 639 tf->feature = ioread8(ioaddr->error_addr); 640 tf->nsect = ioread8(ioaddr->nsect_addr); 641 tf->lbal = ioread8(ioaddr->lbal_addr); 642 tf->lbam = ioread8(ioaddr->lbam_addr); 643 tf->lbah = ioread8(ioaddr->lbah_addr); 644 tf->device = ioread8(ioaddr->device_addr); 645 646 if (tf->flags & ATA_TFLAG_LBA48) { 647 if (likely(ioaddr->ctl_addr)) { 648 iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr); 649 tf->hob_feature = ioread8(ioaddr->error_addr); 650 tf->hob_nsect = ioread8(ioaddr->nsect_addr); 651 tf->hob_lbal = ioread8(ioaddr->lbal_addr); 652 tf->hob_lbam = ioread8(ioaddr->lbam_addr); 653 tf->hob_lbah = ioread8(ioaddr->lbah_addr); 654 iowrite8(tf->ctl, ioaddr->ctl_addr); 655 ap->last_ctl = tf->ctl; 656 } else 657 WARN_ON_ONCE(1); 658 } 659 } 660 EXPORT_SYMBOL_GPL(ata_sff_tf_read); 661 662 /** 663 * ata_sff_exec_command - issue ATA command to host controller 664 * @ap: port to which command is being issued 665 * @tf: ATA taskfile register set 666 * 667 * Issues ATA command, with proper synchronization with interrupt 668 * handler / other threads. 669 * 670 * LOCKING: 671 * spin_lock_irqsave(host lock) 672 */ 673 void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf) 674 { 675 DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command); 676 677 iowrite8(tf->command, ap->ioaddr.command_addr); 678 ata_sff_pause(ap); 679 } 680 EXPORT_SYMBOL_GPL(ata_sff_exec_command); 681 682 /** 683 * ata_tf_to_host - issue ATA taskfile to host controller 684 * @ap: port to which command is being issued 685 * @tf: ATA taskfile register set 686 * 687 * Issues ATA taskfile register set to ATA host controller, 688 * with proper synchronization with interrupt handler and 689 * other threads. 690 * 691 * LOCKING: 692 * spin_lock_irqsave(host lock) 693 */ 694 static inline void ata_tf_to_host(struct ata_port *ap, 695 const struct ata_taskfile *tf) 696 { 697 ap->ops->sff_tf_load(ap, tf); 698 ap->ops->sff_exec_command(ap, tf); 699 } 700 701 /** 702 * ata_sff_data_xfer - Transfer data by PIO 703 * @dev: device to target 704 * @buf: data buffer 705 * @buflen: buffer length 706 * @rw: read/write 707 * 708 * Transfer data from/to the device data register by PIO. 709 * 710 * LOCKING: 711 * Inherited from caller. 712 * 713 * RETURNS: 714 * Bytes consumed. 715 */ 716 unsigned int ata_sff_data_xfer(struct ata_device *dev, unsigned char *buf, 717 unsigned int buflen, int rw) 718 { 719 struct ata_port *ap = dev->link->ap; 720 void __iomem *data_addr = ap->ioaddr.data_addr; 721 unsigned int words = buflen >> 1; 722 723 /* Transfer multiple of 2 bytes */ 724 if (rw == READ) 725 ioread16_rep(data_addr, buf, words); 726 else 727 iowrite16_rep(data_addr, buf, words); 728 729 /* Transfer trailing 1 byte, if any. */ 730 if (unlikely(buflen & 0x01)) { 731 __le16 align_buf[1] = { 0 }; 732 unsigned char *trailing_buf = buf + buflen - 1; 733 734 if (rw == READ) { 735 align_buf[0] = cpu_to_le16(ioread16(data_addr)); 736 memcpy(trailing_buf, align_buf, 1); 737 } else { 738 memcpy(align_buf, trailing_buf, 1); 739 iowrite16(le16_to_cpu(align_buf[0]), data_addr); 740 } 741 words++; 742 } 743 744 return words << 1; 745 } 746 EXPORT_SYMBOL_GPL(ata_sff_data_xfer); 747 748 /** 749 * ata_sff_data_xfer32 - Transfer data by PIO 750 * @dev: device to target 751 * @buf: data buffer 752 * @buflen: buffer length 753 * @rw: read/write 754 * 755 * Transfer data from/to the device data register by PIO using 32bit 756 * I/O operations. 757 * 758 * LOCKING: 759 * Inherited from caller. 760 * 761 * RETURNS: 762 * Bytes consumed. 763 */ 764 765 unsigned int ata_sff_data_xfer32(struct ata_device *dev, unsigned char *buf, 766 unsigned int buflen, int rw) 767 { 768 struct ata_port *ap = dev->link->ap; 769 void __iomem *data_addr = ap->ioaddr.data_addr; 770 unsigned int words = buflen >> 2; 771 int slop = buflen & 3; 772 773 /* Transfer multiple of 4 bytes */ 774 if (rw == READ) 775 ioread32_rep(data_addr, buf, words); 776 else 777 iowrite32_rep(data_addr, buf, words); 778 779 /* Transfer trailing bytes, if any */ 780 if (unlikely(slop)) { 781 unsigned char pad[4]; 782 783 /* Point buf to the tail of buffer */ 784 buf += buflen - slop; 785 786 /* 787 * Use io*_rep() accessors here as well to avoid pointlessly 788 * swapping bytes to and fro on the big endian machines... 789 */ 790 if (rw == READ) { 791 if (slop < 3) 792 ioread16_rep(data_addr, pad, 1); 793 else 794 ioread32_rep(data_addr, pad, 1); 795 memcpy(buf, pad, slop); 796 } else { 797 memcpy(pad, buf, slop); 798 if (slop < 3) 799 iowrite16_rep(data_addr, pad, 1); 800 else 801 iowrite32_rep(data_addr, pad, 1); 802 } 803 } 804 return (buflen + 1) & ~1; 805 } 806 EXPORT_SYMBOL_GPL(ata_sff_data_xfer32); 807 808 /** 809 * ata_sff_data_xfer_noirq - Transfer data by PIO 810 * @dev: device to target 811 * @buf: data buffer 812 * @buflen: buffer length 813 * @rw: read/write 814 * 815 * Transfer data from/to the device data register by PIO. Do the 816 * transfer with interrupts disabled. 817 * 818 * LOCKING: 819 * Inherited from caller. 820 * 821 * RETURNS: 822 * Bytes consumed. 823 */ 824 unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, unsigned char *buf, 825 unsigned int buflen, int rw) 826 { 827 unsigned long flags; 828 unsigned int consumed; 829 830 local_irq_save(flags); 831 consumed = ata_sff_data_xfer(dev, buf, buflen, rw); 832 local_irq_restore(flags); 833 834 return consumed; 835 } 836 EXPORT_SYMBOL_GPL(ata_sff_data_xfer_noirq); 837 838 /** 839 * ata_pio_sector - Transfer a sector of data. 840 * @qc: Command on going 841 * 842 * Transfer qc->sect_size bytes of data from/to the ATA device. 843 * 844 * LOCKING: 845 * Inherited from caller. 846 */ 847 static void ata_pio_sector(struct ata_queued_cmd *qc) 848 { 849 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); 850 struct ata_port *ap = qc->ap; 851 struct page *page; 852 unsigned int offset; 853 unsigned char *buf; 854 855 if (qc->curbytes == qc->nbytes - qc->sect_size) 856 ap->hsm_task_state = HSM_ST_LAST; 857 858 page = sg_page(qc->cursg); 859 offset = qc->cursg->offset + qc->cursg_ofs; 860 861 /* get the current page and offset */ 862 page = nth_page(page, (offset >> PAGE_SHIFT)); 863 offset %= PAGE_SIZE; 864 865 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); 866 867 if (PageHighMem(page)) { 868 unsigned long flags; 869 870 /* FIXME: use a bounce buffer */ 871 local_irq_save(flags); 872 buf = kmap_atomic(page, KM_IRQ0); 873 874 /* do the actual data transfer */ 875 ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size, 876 do_write); 877 878 kunmap_atomic(buf, KM_IRQ0); 879 local_irq_restore(flags); 880 } else { 881 buf = page_address(page); 882 ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size, 883 do_write); 884 } 885 886 qc->curbytes += qc->sect_size; 887 qc->cursg_ofs += qc->sect_size; 888 889 if (qc->cursg_ofs == qc->cursg->length) { 890 qc->cursg = sg_next(qc->cursg); 891 qc->cursg_ofs = 0; 892 } 893 } 894 895 /** 896 * ata_pio_sectors - Transfer one or many sectors. 897 * @qc: Command on going 898 * 899 * Transfer one or many sectors of data from/to the 900 * ATA device for the DRQ request. 901 * 902 * LOCKING: 903 * Inherited from caller. 904 */ 905 static void ata_pio_sectors(struct ata_queued_cmd *qc) 906 { 907 if (is_multi_taskfile(&qc->tf)) { 908 /* READ/WRITE MULTIPLE */ 909 unsigned int nsect; 910 911 WARN_ON_ONCE(qc->dev->multi_count == 0); 912 913 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size, 914 qc->dev->multi_count); 915 while (nsect--) 916 ata_pio_sector(qc); 917 } else 918 ata_pio_sector(qc); 919 920 ata_sff_sync(qc->ap); /* flush */ 921 } 922 923 /** 924 * atapi_send_cdb - Write CDB bytes to hardware 925 * @ap: Port to which ATAPI device is attached. 926 * @qc: Taskfile currently active 927 * 928 * When device has indicated its readiness to accept 929 * a CDB, this function is called. Send the CDB. 930 * 931 * LOCKING: 932 * caller. 933 */ 934 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) 935 { 936 /* send SCSI cdb */ 937 DPRINTK("send cdb\n"); 938 WARN_ON_ONCE(qc->dev->cdb_len < 12); 939 940 ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1); 941 ata_sff_sync(ap); 942 /* FIXME: If the CDB is for DMA do we need to do the transition delay 943 or is bmdma_start guaranteed to do it ? */ 944 switch (qc->tf.protocol) { 945 case ATAPI_PROT_PIO: 946 ap->hsm_task_state = HSM_ST; 947 break; 948 case ATAPI_PROT_NODATA: 949 ap->hsm_task_state = HSM_ST_LAST; 950 break; 951 case ATAPI_PROT_DMA: 952 ap->hsm_task_state = HSM_ST_LAST; 953 /* initiate bmdma */ 954 ap->ops->bmdma_start(qc); 955 break; 956 } 957 } 958 959 /** 960 * __atapi_pio_bytes - Transfer data from/to the ATAPI device. 961 * @qc: Command on going 962 * @bytes: number of bytes 963 * 964 * Transfer Transfer data from/to the ATAPI device. 965 * 966 * LOCKING: 967 * Inherited from caller. 968 * 969 */ 970 static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) 971 { 972 int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ; 973 struct ata_port *ap = qc->ap; 974 struct ata_device *dev = qc->dev; 975 struct ata_eh_info *ehi = &dev->link->eh_info; 976 struct scatterlist *sg; 977 struct page *page; 978 unsigned char *buf; 979 unsigned int offset, count, consumed; 980 981 next_sg: 982 sg = qc->cursg; 983 if (unlikely(!sg)) { 984 ata_ehi_push_desc(ehi, "unexpected or too much trailing data " 985 "buf=%u cur=%u bytes=%u", 986 qc->nbytes, qc->curbytes, bytes); 987 return -1; 988 } 989 990 page = sg_page(sg); 991 offset = sg->offset + qc->cursg_ofs; 992 993 /* get the current page and offset */ 994 page = nth_page(page, (offset >> PAGE_SHIFT)); 995 offset %= PAGE_SIZE; 996 997 /* don't overrun current sg */ 998 count = min(sg->length - qc->cursg_ofs, bytes); 999 1000 /* don't cross page boundaries */ 1001 count = min(count, (unsigned int)PAGE_SIZE - offset); 1002 1003 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); 1004 1005 if (PageHighMem(page)) { 1006 unsigned long flags; 1007 1008 /* FIXME: use bounce buffer */ 1009 local_irq_save(flags); 1010 buf = kmap_atomic(page, KM_IRQ0); 1011 1012 /* do the actual data transfer */ 1013 consumed = ap->ops->sff_data_xfer(dev, buf + offset, 1014 count, rw); 1015 1016 kunmap_atomic(buf, KM_IRQ0); 1017 local_irq_restore(flags); 1018 } else { 1019 buf = page_address(page); 1020 consumed = ap->ops->sff_data_xfer(dev, buf + offset, 1021 count, rw); 1022 } 1023 1024 bytes -= min(bytes, consumed); 1025 qc->curbytes += count; 1026 qc->cursg_ofs += count; 1027 1028 if (qc->cursg_ofs == sg->length) { 1029 qc->cursg = sg_next(qc->cursg); 1030 qc->cursg_ofs = 0; 1031 } 1032 1033 /* 1034 * There used to be a WARN_ON_ONCE(qc->cursg && count != consumed); 1035 * Unfortunately __atapi_pio_bytes doesn't know enough to do the WARN 1036 * check correctly as it doesn't know if it is the last request being 1037 * made. Somebody should implement a proper sanity check. 1038 */ 1039 if (bytes) 1040 goto next_sg; 1041 return 0; 1042 } 1043 1044 /** 1045 * atapi_pio_bytes - Transfer data from/to the ATAPI device. 1046 * @qc: Command on going 1047 * 1048 * Transfer Transfer data from/to the ATAPI device. 1049 * 1050 * LOCKING: 1051 * Inherited from caller. 1052 */ 1053 static void atapi_pio_bytes(struct ata_queued_cmd *qc) 1054 { 1055 struct ata_port *ap = qc->ap; 1056 struct ata_device *dev = qc->dev; 1057 struct ata_eh_info *ehi = &dev->link->eh_info; 1058 unsigned int ireason, bc_lo, bc_hi, bytes; 1059 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0; 1060 1061 /* Abuse qc->result_tf for temp storage of intermediate TF 1062 * here to save some kernel stack usage. 1063 * For normal completion, qc->result_tf is not relevant. For 1064 * error, qc->result_tf is later overwritten by ata_qc_complete(). 1065 * So, the correctness of qc->result_tf is not affected. 1066 */ 1067 ap->ops->sff_tf_read(ap, &qc->result_tf); 1068 ireason = qc->result_tf.nsect; 1069 bc_lo = qc->result_tf.lbam; 1070 bc_hi = qc->result_tf.lbah; 1071 bytes = (bc_hi << 8) | bc_lo; 1072 1073 /* shall be cleared to zero, indicating xfer of data */ 1074 if (unlikely(ireason & (1 << 0))) 1075 goto atapi_check; 1076 1077 /* make sure transfer direction matches expected */ 1078 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0; 1079 if (unlikely(do_write != i_write)) 1080 goto atapi_check; 1081 1082 if (unlikely(!bytes)) 1083 goto atapi_check; 1084 1085 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes); 1086 1087 if (unlikely(__atapi_pio_bytes(qc, bytes))) 1088 goto err_out; 1089 ata_sff_sync(ap); /* flush */ 1090 1091 return; 1092 1093 atapi_check: 1094 ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)", 1095 ireason, bytes); 1096 err_out: 1097 qc->err_mask |= AC_ERR_HSM; 1098 ap->hsm_task_state = HSM_ST_ERR; 1099 } 1100 1101 /** 1102 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue. 1103 * @ap: the target ata_port 1104 * @qc: qc on going 1105 * 1106 * RETURNS: 1107 * 1 if ok in workqueue, 0 otherwise. 1108 */ 1109 static inline int ata_hsm_ok_in_wq(struct ata_port *ap, 1110 struct ata_queued_cmd *qc) 1111 { 1112 if (qc->tf.flags & ATA_TFLAG_POLLING) 1113 return 1; 1114 1115 if (ap->hsm_task_state == HSM_ST_FIRST) { 1116 if (qc->tf.protocol == ATA_PROT_PIO && 1117 (qc->tf.flags & ATA_TFLAG_WRITE)) 1118 return 1; 1119 1120 if (ata_is_atapi(qc->tf.protocol) && 1121 !(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 1122 return 1; 1123 } 1124 1125 return 0; 1126 } 1127 1128 /** 1129 * ata_hsm_qc_complete - finish a qc running on standard HSM 1130 * @qc: Command to complete 1131 * @in_wq: 1 if called from workqueue, 0 otherwise 1132 * 1133 * Finish @qc which is running on standard HSM. 1134 * 1135 * LOCKING: 1136 * If @in_wq is zero, spin_lock_irqsave(host lock). 1137 * Otherwise, none on entry and grabs host lock. 1138 */ 1139 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) 1140 { 1141 struct ata_port *ap = qc->ap; 1142 unsigned long flags; 1143 1144 if (ap->ops->error_handler) { 1145 if (in_wq) { 1146 spin_lock_irqsave(ap->lock, flags); 1147 1148 /* EH might have kicked in while host lock is 1149 * released. 1150 */ 1151 qc = ata_qc_from_tag(ap, qc->tag); 1152 if (qc) { 1153 if (likely(!(qc->err_mask & AC_ERR_HSM))) { 1154 ap->ops->sff_irq_on(ap); 1155 ata_qc_complete(qc); 1156 } else 1157 ata_port_freeze(ap); 1158 } 1159 1160 spin_unlock_irqrestore(ap->lock, flags); 1161 } else { 1162 if (likely(!(qc->err_mask & AC_ERR_HSM))) 1163 ata_qc_complete(qc); 1164 else 1165 ata_port_freeze(ap); 1166 } 1167 } else { 1168 if (in_wq) { 1169 spin_lock_irqsave(ap->lock, flags); 1170 ap->ops->sff_irq_on(ap); 1171 ata_qc_complete(qc); 1172 spin_unlock_irqrestore(ap->lock, flags); 1173 } else 1174 ata_qc_complete(qc); 1175 } 1176 } 1177 1178 /** 1179 * ata_sff_hsm_move - move the HSM to the next state. 1180 * @ap: the target ata_port 1181 * @qc: qc on going 1182 * @status: current device status 1183 * @in_wq: 1 if called from workqueue, 0 otherwise 1184 * 1185 * RETURNS: 1186 * 1 when poll next status needed, 0 otherwise. 1187 */ 1188 int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, 1189 u8 status, int in_wq) 1190 { 1191 struct ata_eh_info *ehi = &ap->link.eh_info; 1192 unsigned long flags = 0; 1193 int poll_next; 1194 1195 WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0); 1196 1197 /* Make sure ata_sff_qc_issue() does not throw things 1198 * like DMA polling into the workqueue. Notice that 1199 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING). 1200 */ 1201 WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc)); 1202 1203 fsm_start: 1204 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n", 1205 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status); 1206 1207 switch (ap->hsm_task_state) { 1208 case HSM_ST_FIRST: 1209 /* Send first data block or PACKET CDB */ 1210 1211 /* If polling, we will stay in the work queue after 1212 * sending the data. Otherwise, interrupt handler 1213 * takes over after sending the data. 1214 */ 1215 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING); 1216 1217 /* check device status */ 1218 if (unlikely((status & ATA_DRQ) == 0)) { 1219 /* handle BSY=0, DRQ=0 as error */ 1220 if (likely(status & (ATA_ERR | ATA_DF))) 1221 /* device stops HSM for abort/error */ 1222 qc->err_mask |= AC_ERR_DEV; 1223 else { 1224 /* HSM violation. Let EH handle this */ 1225 ata_ehi_push_desc(ehi, 1226 "ST_FIRST: !(DRQ|ERR|DF)"); 1227 qc->err_mask |= AC_ERR_HSM; 1228 } 1229 1230 ap->hsm_task_state = HSM_ST_ERR; 1231 goto fsm_start; 1232 } 1233 1234 /* Device should not ask for data transfer (DRQ=1) 1235 * when it finds something wrong. 1236 * We ignore DRQ here and stop the HSM by 1237 * changing hsm_task_state to HSM_ST_ERR and 1238 * let the EH abort the command or reset the device. 1239 */ 1240 if (unlikely(status & (ATA_ERR | ATA_DF))) { 1241 /* Some ATAPI tape drives forget to clear the ERR bit 1242 * when doing the next command (mostly request sense). 1243 * We ignore ERR here to workaround and proceed sending 1244 * the CDB. 1245 */ 1246 if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) { 1247 ata_ehi_push_desc(ehi, "ST_FIRST: " 1248 "DRQ=1 with device error, " 1249 "dev_stat 0x%X", status); 1250 qc->err_mask |= AC_ERR_HSM; 1251 ap->hsm_task_state = HSM_ST_ERR; 1252 goto fsm_start; 1253 } 1254 } 1255 1256 /* Send the CDB (atapi) or the first data block (ata pio out). 1257 * During the state transition, interrupt handler shouldn't 1258 * be invoked before the data transfer is complete and 1259 * hsm_task_state is changed. Hence, the following locking. 1260 */ 1261 if (in_wq) 1262 spin_lock_irqsave(ap->lock, flags); 1263 1264 if (qc->tf.protocol == ATA_PROT_PIO) { 1265 /* PIO data out protocol. 1266 * send first data block. 1267 */ 1268 1269 /* ata_pio_sectors() might change the state 1270 * to HSM_ST_LAST. so, the state is changed here 1271 * before ata_pio_sectors(). 1272 */ 1273 ap->hsm_task_state = HSM_ST; 1274 ata_pio_sectors(qc); 1275 } else 1276 /* send CDB */ 1277 atapi_send_cdb(ap, qc); 1278 1279 if (in_wq) 1280 spin_unlock_irqrestore(ap->lock, flags); 1281 1282 /* if polling, ata_pio_task() handles the rest. 1283 * otherwise, interrupt handler takes over from here. 1284 */ 1285 break; 1286 1287 case HSM_ST: 1288 /* complete command or read/write the data register */ 1289 if (qc->tf.protocol == ATAPI_PROT_PIO) { 1290 /* ATAPI PIO protocol */ 1291 if ((status & ATA_DRQ) == 0) { 1292 /* No more data to transfer or device error. 1293 * Device error will be tagged in HSM_ST_LAST. 1294 */ 1295 ap->hsm_task_state = HSM_ST_LAST; 1296 goto fsm_start; 1297 } 1298 1299 /* Device should not ask for data transfer (DRQ=1) 1300 * when it finds something wrong. 1301 * We ignore DRQ here and stop the HSM by 1302 * changing hsm_task_state to HSM_ST_ERR and 1303 * let the EH abort the command or reset the device. 1304 */ 1305 if (unlikely(status & (ATA_ERR | ATA_DF))) { 1306 ata_ehi_push_desc(ehi, "ST-ATAPI: " 1307 "DRQ=1 with device error, " 1308 "dev_stat 0x%X", status); 1309 qc->err_mask |= AC_ERR_HSM; 1310 ap->hsm_task_state = HSM_ST_ERR; 1311 goto fsm_start; 1312 } 1313 1314 atapi_pio_bytes(qc); 1315 1316 if (unlikely(ap->hsm_task_state == HSM_ST_ERR)) 1317 /* bad ireason reported by device */ 1318 goto fsm_start; 1319 1320 } else { 1321 /* ATA PIO protocol */ 1322 if (unlikely((status & ATA_DRQ) == 0)) { 1323 /* handle BSY=0, DRQ=0 as error */ 1324 if (likely(status & (ATA_ERR | ATA_DF))) { 1325 /* device stops HSM for abort/error */ 1326 qc->err_mask |= AC_ERR_DEV; 1327 1328 /* If diagnostic failed and this is 1329 * IDENTIFY, it's likely a phantom 1330 * device. Mark hint. 1331 */ 1332 if (qc->dev->horkage & 1333 ATA_HORKAGE_DIAGNOSTIC) 1334 qc->err_mask |= 1335 AC_ERR_NODEV_HINT; 1336 } else { 1337 /* HSM violation. Let EH handle this. 1338 * Phantom devices also trigger this 1339 * condition. Mark hint. 1340 */ 1341 ata_ehi_push_desc(ehi, "ST-ATA: " 1342 "DRQ=0 without device error, " 1343 "dev_stat 0x%X", status); 1344 qc->err_mask |= AC_ERR_HSM | 1345 AC_ERR_NODEV_HINT; 1346 } 1347 1348 ap->hsm_task_state = HSM_ST_ERR; 1349 goto fsm_start; 1350 } 1351 1352 /* For PIO reads, some devices may ask for 1353 * data transfer (DRQ=1) alone with ERR=1. 1354 * We respect DRQ here and transfer one 1355 * block of junk data before changing the 1356 * hsm_task_state to HSM_ST_ERR. 1357 * 1358 * For PIO writes, ERR=1 DRQ=1 doesn't make 1359 * sense since the data block has been 1360 * transferred to the device. 1361 */ 1362 if (unlikely(status & (ATA_ERR | ATA_DF))) { 1363 /* data might be corrputed */ 1364 qc->err_mask |= AC_ERR_DEV; 1365 1366 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) { 1367 ata_pio_sectors(qc); 1368 status = ata_wait_idle(ap); 1369 } 1370 1371 if (status & (ATA_BUSY | ATA_DRQ)) { 1372 ata_ehi_push_desc(ehi, "ST-ATA: " 1373 "BUSY|DRQ persists on ERR|DF, " 1374 "dev_stat 0x%X", status); 1375 qc->err_mask |= AC_ERR_HSM; 1376 } 1377 1378 /* There are oddball controllers with 1379 * status register stuck at 0x7f and 1380 * lbal/m/h at zero which makes it 1381 * pass all other presence detection 1382 * mechanisms we have. Set NODEV_HINT 1383 * for it. Kernel bz#7241. 1384 */ 1385 if (status == 0x7f) 1386 qc->err_mask |= AC_ERR_NODEV_HINT; 1387 1388 /* ata_pio_sectors() might change the 1389 * state to HSM_ST_LAST. so, the state 1390 * is changed after ata_pio_sectors(). 1391 */ 1392 ap->hsm_task_state = HSM_ST_ERR; 1393 goto fsm_start; 1394 } 1395 1396 ata_pio_sectors(qc); 1397 1398 if (ap->hsm_task_state == HSM_ST_LAST && 1399 (!(qc->tf.flags & ATA_TFLAG_WRITE))) { 1400 /* all data read */ 1401 status = ata_wait_idle(ap); 1402 goto fsm_start; 1403 } 1404 } 1405 1406 poll_next = 1; 1407 break; 1408 1409 case HSM_ST_LAST: 1410 if (unlikely(!ata_ok(status))) { 1411 qc->err_mask |= __ac_err_mask(status); 1412 ap->hsm_task_state = HSM_ST_ERR; 1413 goto fsm_start; 1414 } 1415 1416 /* no more data to transfer */ 1417 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n", 1418 ap->print_id, qc->dev->devno, status); 1419 1420 WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM)); 1421 1422 ap->hsm_task_state = HSM_ST_IDLE; 1423 1424 /* complete taskfile transaction */ 1425 ata_hsm_qc_complete(qc, in_wq); 1426 1427 poll_next = 0; 1428 break; 1429 1430 case HSM_ST_ERR: 1431 ap->hsm_task_state = HSM_ST_IDLE; 1432 1433 /* complete taskfile transaction */ 1434 ata_hsm_qc_complete(qc, in_wq); 1435 1436 poll_next = 0; 1437 break; 1438 default: 1439 poll_next = 0; 1440 BUG(); 1441 } 1442 1443 return poll_next; 1444 } 1445 EXPORT_SYMBOL_GPL(ata_sff_hsm_move); 1446 1447 void ata_pio_task(struct work_struct *work) 1448 { 1449 struct ata_port *ap = 1450 container_of(work, struct ata_port, port_task.work); 1451 struct ata_queued_cmd *qc = ap->port_task_data; 1452 u8 status; 1453 int poll_next; 1454 1455 fsm_start: 1456 WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE); 1457 1458 /* 1459 * This is purely heuristic. This is a fast path. 1460 * Sometimes when we enter, BSY will be cleared in 1461 * a chk-status or two. If not, the drive is probably seeking 1462 * or something. Snooze for a couple msecs, then 1463 * chk-status again. If still busy, queue delayed work. 1464 */ 1465 status = ata_sff_busy_wait(ap, ATA_BUSY, 5); 1466 if (status & ATA_BUSY) { 1467 msleep(2); 1468 status = ata_sff_busy_wait(ap, ATA_BUSY, 10); 1469 if (status & ATA_BUSY) { 1470 ata_pio_queue_task(ap, qc, ATA_SHORT_PAUSE); 1471 return; 1472 } 1473 } 1474 1475 /* move the HSM */ 1476 poll_next = ata_sff_hsm_move(ap, qc, status, 1); 1477 1478 /* another command or interrupt handler 1479 * may be running at this point. 1480 */ 1481 if (poll_next) 1482 goto fsm_start; 1483 } 1484 1485 /** 1486 * ata_sff_qc_issue - issue taskfile to device in proto-dependent manner 1487 * @qc: command to issue to device 1488 * 1489 * Using various libata functions and hooks, this function 1490 * starts an ATA command. ATA commands are grouped into 1491 * classes called "protocols", and issuing each type of protocol 1492 * is slightly different. 1493 * 1494 * May be used as the qc_issue() entry in ata_port_operations. 1495 * 1496 * LOCKING: 1497 * spin_lock_irqsave(host lock) 1498 * 1499 * RETURNS: 1500 * Zero on success, AC_ERR_* mask on failure 1501 */ 1502 unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) 1503 { 1504 struct ata_port *ap = qc->ap; 1505 1506 /* Use polling pio if the LLD doesn't handle 1507 * interrupt driven pio and atapi CDB interrupt. 1508 */ 1509 if (ap->flags & ATA_FLAG_PIO_POLLING) { 1510 switch (qc->tf.protocol) { 1511 case ATA_PROT_PIO: 1512 case ATA_PROT_NODATA: 1513 case ATAPI_PROT_PIO: 1514 case ATAPI_PROT_NODATA: 1515 qc->tf.flags |= ATA_TFLAG_POLLING; 1516 break; 1517 case ATAPI_PROT_DMA: 1518 if (qc->dev->flags & ATA_DFLAG_CDB_INTR) 1519 /* see ata_dma_blacklisted() */ 1520 BUG(); 1521 break; 1522 default: 1523 break; 1524 } 1525 } 1526 1527 /* select the device */ 1528 ata_dev_select(ap, qc->dev->devno, 1, 0); 1529 1530 /* start the command */ 1531 switch (qc->tf.protocol) { 1532 case ATA_PROT_NODATA: 1533 if (qc->tf.flags & ATA_TFLAG_POLLING) 1534 ata_qc_set_polling(qc); 1535 1536 ata_tf_to_host(ap, &qc->tf); 1537 ap->hsm_task_state = HSM_ST_LAST; 1538 1539 if (qc->tf.flags & ATA_TFLAG_POLLING) 1540 ata_pio_queue_task(ap, qc, 0); 1541 1542 break; 1543 1544 case ATA_PROT_DMA: 1545 WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); 1546 1547 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ 1548 ap->ops->bmdma_setup(qc); /* set up bmdma */ 1549 ap->ops->bmdma_start(qc); /* initiate bmdma */ 1550 ap->hsm_task_state = HSM_ST_LAST; 1551 break; 1552 1553 case ATA_PROT_PIO: 1554 if (qc->tf.flags & ATA_TFLAG_POLLING) 1555 ata_qc_set_polling(qc); 1556 1557 ata_tf_to_host(ap, &qc->tf); 1558 1559 if (qc->tf.flags & ATA_TFLAG_WRITE) { 1560 /* PIO data out protocol */ 1561 ap->hsm_task_state = HSM_ST_FIRST; 1562 ata_pio_queue_task(ap, qc, 0); 1563 1564 /* always send first data block using 1565 * the ata_pio_task() codepath. 1566 */ 1567 } else { 1568 /* PIO data in protocol */ 1569 ap->hsm_task_state = HSM_ST; 1570 1571 if (qc->tf.flags & ATA_TFLAG_POLLING) 1572 ata_pio_queue_task(ap, qc, 0); 1573 1574 /* if polling, ata_pio_task() handles the rest. 1575 * otherwise, interrupt handler takes over from here. 1576 */ 1577 } 1578 1579 break; 1580 1581 case ATAPI_PROT_PIO: 1582 case ATAPI_PROT_NODATA: 1583 if (qc->tf.flags & ATA_TFLAG_POLLING) 1584 ata_qc_set_polling(qc); 1585 1586 ata_tf_to_host(ap, &qc->tf); 1587 1588 ap->hsm_task_state = HSM_ST_FIRST; 1589 1590 /* send cdb by polling if no cdb interrupt */ 1591 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || 1592 (qc->tf.flags & ATA_TFLAG_POLLING)) 1593 ata_pio_queue_task(ap, qc, 0); 1594 break; 1595 1596 case ATAPI_PROT_DMA: 1597 WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); 1598 1599 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ 1600 ap->ops->bmdma_setup(qc); /* set up bmdma */ 1601 ap->hsm_task_state = HSM_ST_FIRST; 1602 1603 /* send cdb by polling if no cdb interrupt */ 1604 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 1605 ata_pio_queue_task(ap, qc, 0); 1606 break; 1607 1608 default: 1609 WARN_ON_ONCE(1); 1610 return AC_ERR_SYSTEM; 1611 } 1612 1613 return 0; 1614 } 1615 EXPORT_SYMBOL_GPL(ata_sff_qc_issue); 1616 1617 /** 1618 * ata_sff_qc_fill_rtf - fill result TF using ->sff_tf_read 1619 * @qc: qc to fill result TF for 1620 * 1621 * @qc is finished and result TF needs to be filled. Fill it 1622 * using ->sff_tf_read. 1623 * 1624 * LOCKING: 1625 * spin_lock_irqsave(host lock) 1626 * 1627 * RETURNS: 1628 * true indicating that result TF is successfully filled. 1629 */ 1630 bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc) 1631 { 1632 qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf); 1633 return true; 1634 } 1635 EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf); 1636 1637 /** 1638 * ata_sff_host_intr - Handle host interrupt for given (port, task) 1639 * @ap: Port on which interrupt arrived (possibly...) 1640 * @qc: Taskfile currently active in engine 1641 * 1642 * Handle host interrupt for given queued command. Currently, 1643 * only DMA interrupts are handled. All other commands are 1644 * handled via polling with interrupts disabled (nIEN bit). 1645 * 1646 * LOCKING: 1647 * spin_lock_irqsave(host lock) 1648 * 1649 * RETURNS: 1650 * One if interrupt was handled, zero if not (shared irq). 1651 */ 1652 unsigned int ata_sff_host_intr(struct ata_port *ap, 1653 struct ata_queued_cmd *qc) 1654 { 1655 struct ata_eh_info *ehi = &ap->link.eh_info; 1656 u8 status, host_stat = 0; 1657 1658 VPRINTK("ata%u: protocol %d task_state %d\n", 1659 ap->print_id, qc->tf.protocol, ap->hsm_task_state); 1660 1661 /* Check whether we are expecting interrupt in this state */ 1662 switch (ap->hsm_task_state) { 1663 case HSM_ST_FIRST: 1664 /* Some pre-ATAPI-4 devices assert INTRQ 1665 * at this state when ready to receive CDB. 1666 */ 1667 1668 /* Check the ATA_DFLAG_CDB_INTR flag is enough here. 1669 * The flag was turned on only for atapi devices. No 1670 * need to check ata_is_atapi(qc->tf.protocol) again. 1671 */ 1672 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 1673 goto idle_irq; 1674 break; 1675 case HSM_ST_LAST: 1676 if (qc->tf.protocol == ATA_PROT_DMA || 1677 qc->tf.protocol == ATAPI_PROT_DMA) { 1678 /* check status of DMA engine */ 1679 host_stat = ap->ops->bmdma_status(ap); 1680 VPRINTK("ata%u: host_stat 0x%X\n", 1681 ap->print_id, host_stat); 1682 1683 /* if it's not our irq... */ 1684 if (!(host_stat & ATA_DMA_INTR)) 1685 goto idle_irq; 1686 1687 /* before we do anything else, clear DMA-Start bit */ 1688 ap->ops->bmdma_stop(qc); 1689 1690 if (unlikely(host_stat & ATA_DMA_ERR)) { 1691 /* error when transfering data to/from memory */ 1692 qc->err_mask |= AC_ERR_HOST_BUS; 1693 ap->hsm_task_state = HSM_ST_ERR; 1694 } 1695 } 1696 break; 1697 case HSM_ST: 1698 break; 1699 default: 1700 goto idle_irq; 1701 } 1702 1703 1704 /* check main status, clearing INTRQ if needed */ 1705 status = ata_sff_irq_status(ap); 1706 if (status & ATA_BUSY) 1707 goto idle_irq; 1708 1709 /* ack bmdma irq events */ 1710 ap->ops->sff_irq_clear(ap); 1711 1712 ata_sff_hsm_move(ap, qc, status, 0); 1713 1714 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA || 1715 qc->tf.protocol == ATAPI_PROT_DMA)) 1716 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat); 1717 1718 return 1; /* irq handled */ 1719 1720 idle_irq: 1721 ap->stats.idle_irq++; 1722 1723 #ifdef ATA_IRQ_TRAP 1724 if ((ap->stats.idle_irq % 1000) == 0) { 1725 ap->ops->sff_check_status(ap); 1726 ap->ops->sff_irq_clear(ap); 1727 ata_port_printk(ap, KERN_WARNING, "irq trap\n"); 1728 return 1; 1729 } 1730 #endif 1731 return 0; /* irq not handled */ 1732 } 1733 EXPORT_SYMBOL_GPL(ata_sff_host_intr); 1734 1735 /** 1736 * ata_sff_interrupt - Default ATA host interrupt handler 1737 * @irq: irq line (unused) 1738 * @dev_instance: pointer to our ata_host information structure 1739 * 1740 * Default interrupt handler for PCI IDE devices. Calls 1741 * ata_sff_host_intr() for each port that is not disabled. 1742 * 1743 * LOCKING: 1744 * Obtains host lock during operation. 1745 * 1746 * RETURNS: 1747 * IRQ_NONE or IRQ_HANDLED. 1748 */ 1749 irqreturn_t ata_sff_interrupt(int irq, void *dev_instance) 1750 { 1751 struct ata_host *host = dev_instance; 1752 unsigned int i; 1753 unsigned int handled = 0; 1754 unsigned long flags; 1755 1756 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */ 1757 spin_lock_irqsave(&host->lock, flags); 1758 1759 for (i = 0; i < host->n_ports; i++) { 1760 struct ata_port *ap; 1761 1762 ap = host->ports[i]; 1763 if (ap && 1764 !(ap->flags & ATA_FLAG_DISABLED)) { 1765 struct ata_queued_cmd *qc; 1766 1767 qc = ata_qc_from_tag(ap, ap->link.active_tag); 1768 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) && 1769 (qc->flags & ATA_QCFLAG_ACTIVE)) 1770 handled |= ata_sff_host_intr(ap, qc); 1771 } 1772 } 1773 1774 spin_unlock_irqrestore(&host->lock, flags); 1775 1776 return IRQ_RETVAL(handled); 1777 } 1778 EXPORT_SYMBOL_GPL(ata_sff_interrupt); 1779 1780 /** 1781 * ata_sff_lost_interrupt - Check for an apparent lost interrupt 1782 * @ap: port that appears to have timed out 1783 * 1784 * Called from the libata error handlers when the core code suspects 1785 * an interrupt has been lost. If it has complete anything we can and 1786 * then return. Interface must support altstatus for this faster 1787 * recovery to occur. 1788 * 1789 * Locking: 1790 * Caller holds host lock 1791 */ 1792 1793 void ata_sff_lost_interrupt(struct ata_port *ap) 1794 { 1795 u8 status; 1796 struct ata_queued_cmd *qc; 1797 1798 /* Only one outstanding command per SFF channel */ 1799 qc = ata_qc_from_tag(ap, ap->link.active_tag); 1800 /* Check we have a live one.. */ 1801 if (qc == NULL || !(qc->flags & ATA_QCFLAG_ACTIVE)) 1802 return; 1803 /* We cannot lose an interrupt on a polled command */ 1804 if (qc->tf.flags & ATA_TFLAG_POLLING) 1805 return; 1806 /* See if the controller thinks it is still busy - if so the command 1807 isn't a lost IRQ but is still in progress */ 1808 status = ata_sff_altstatus(ap); 1809 if (status & ATA_BUSY) 1810 return; 1811 1812 /* There was a command running, we are no longer busy and we have 1813 no interrupt. */ 1814 ata_port_printk(ap, KERN_WARNING, "lost interrupt (Status 0x%x)\n", 1815 status); 1816 /* Run the host interrupt logic as if the interrupt had not been 1817 lost */ 1818 ata_sff_host_intr(ap, qc); 1819 } 1820 EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt); 1821 1822 /** 1823 * ata_sff_freeze - Freeze SFF controller port 1824 * @ap: port to freeze 1825 * 1826 * Freeze BMDMA controller port. 1827 * 1828 * LOCKING: 1829 * Inherited from caller. 1830 */ 1831 void ata_sff_freeze(struct ata_port *ap) 1832 { 1833 struct ata_ioports *ioaddr = &ap->ioaddr; 1834 1835 ap->ctl |= ATA_NIEN; 1836 ap->last_ctl = ap->ctl; 1837 1838 if (ioaddr->ctl_addr) 1839 iowrite8(ap->ctl, ioaddr->ctl_addr); 1840 1841 /* Under certain circumstances, some controllers raise IRQ on 1842 * ATA_NIEN manipulation. Also, many controllers fail to mask 1843 * previously pending IRQ on ATA_NIEN assertion. Clear it. 1844 */ 1845 ap->ops->sff_check_status(ap); 1846 1847 ap->ops->sff_irq_clear(ap); 1848 } 1849 EXPORT_SYMBOL_GPL(ata_sff_freeze); 1850 1851 /** 1852 * ata_sff_thaw - Thaw SFF controller port 1853 * @ap: port to thaw 1854 * 1855 * Thaw SFF controller port. 1856 * 1857 * LOCKING: 1858 * Inherited from caller. 1859 */ 1860 void ata_sff_thaw(struct ata_port *ap) 1861 { 1862 /* clear & re-enable interrupts */ 1863 ap->ops->sff_check_status(ap); 1864 ap->ops->sff_irq_clear(ap); 1865 ap->ops->sff_irq_on(ap); 1866 } 1867 EXPORT_SYMBOL_GPL(ata_sff_thaw); 1868 1869 /** 1870 * ata_sff_prereset - prepare SFF link for reset 1871 * @link: SFF link to be reset 1872 * @deadline: deadline jiffies for the operation 1873 * 1874 * SFF link @link is about to be reset. Initialize it. It first 1875 * calls ata_std_prereset() and wait for !BSY if the port is 1876 * being softreset. 1877 * 1878 * LOCKING: 1879 * Kernel thread context (may sleep) 1880 * 1881 * RETURNS: 1882 * 0 on success, -errno otherwise. 1883 */ 1884 int ata_sff_prereset(struct ata_link *link, unsigned long deadline) 1885 { 1886 struct ata_eh_context *ehc = &link->eh_context; 1887 int rc; 1888 1889 rc = ata_std_prereset(link, deadline); 1890 if (rc) 1891 return rc; 1892 1893 /* if we're about to do hardreset, nothing more to do */ 1894 if (ehc->i.action & ATA_EH_HARDRESET) 1895 return 0; 1896 1897 /* wait for !BSY if we don't know that no device is attached */ 1898 if (!ata_link_offline(link)) { 1899 rc = ata_sff_wait_ready(link, deadline); 1900 if (rc && rc != -ENODEV) { 1901 ata_link_printk(link, KERN_WARNING, "device not ready " 1902 "(errno=%d), forcing hardreset\n", rc); 1903 ehc->i.action |= ATA_EH_HARDRESET; 1904 } 1905 } 1906 1907 return 0; 1908 } 1909 EXPORT_SYMBOL_GPL(ata_sff_prereset); 1910 1911 /** 1912 * ata_devchk - PATA device presence detection 1913 * @ap: ATA channel to examine 1914 * @device: Device to examine (starting at zero) 1915 * 1916 * This technique was originally described in 1917 * Hale Landis's ATADRVR (www.ata-atapi.com), and 1918 * later found its way into the ATA/ATAPI spec. 1919 * 1920 * Write a pattern to the ATA shadow registers, 1921 * and if a device is present, it will respond by 1922 * correctly storing and echoing back the 1923 * ATA shadow register contents. 1924 * 1925 * LOCKING: 1926 * caller. 1927 */ 1928 static unsigned int ata_devchk(struct ata_port *ap, unsigned int device) 1929 { 1930 struct ata_ioports *ioaddr = &ap->ioaddr; 1931 u8 nsect, lbal; 1932 1933 ap->ops->sff_dev_select(ap, device); 1934 1935 iowrite8(0x55, ioaddr->nsect_addr); 1936 iowrite8(0xaa, ioaddr->lbal_addr); 1937 1938 iowrite8(0xaa, ioaddr->nsect_addr); 1939 iowrite8(0x55, ioaddr->lbal_addr); 1940 1941 iowrite8(0x55, ioaddr->nsect_addr); 1942 iowrite8(0xaa, ioaddr->lbal_addr); 1943 1944 nsect = ioread8(ioaddr->nsect_addr); 1945 lbal = ioread8(ioaddr->lbal_addr); 1946 1947 if ((nsect == 0x55) && (lbal == 0xaa)) 1948 return 1; /* we found a device */ 1949 1950 return 0; /* nothing found */ 1951 } 1952 1953 /** 1954 * ata_sff_dev_classify - Parse returned ATA device signature 1955 * @dev: ATA device to classify (starting at zero) 1956 * @present: device seems present 1957 * @r_err: Value of error register on completion 1958 * 1959 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs, 1960 * an ATA/ATAPI-defined set of values is placed in the ATA 1961 * shadow registers, indicating the results of device detection 1962 * and diagnostics. 1963 * 1964 * Select the ATA device, and read the values from the ATA shadow 1965 * registers. Then parse according to the Error register value, 1966 * and the spec-defined values examined by ata_dev_classify(). 1967 * 1968 * LOCKING: 1969 * caller. 1970 * 1971 * RETURNS: 1972 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE. 1973 */ 1974 unsigned int ata_sff_dev_classify(struct ata_device *dev, int present, 1975 u8 *r_err) 1976 { 1977 struct ata_port *ap = dev->link->ap; 1978 struct ata_taskfile tf; 1979 unsigned int class; 1980 u8 err; 1981 1982 ap->ops->sff_dev_select(ap, dev->devno); 1983 1984 memset(&tf, 0, sizeof(tf)); 1985 1986 ap->ops->sff_tf_read(ap, &tf); 1987 err = tf.feature; 1988 if (r_err) 1989 *r_err = err; 1990 1991 /* see if device passed diags: continue and warn later */ 1992 if (err == 0) 1993 /* diagnostic fail : do nothing _YET_ */ 1994 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC; 1995 else if (err == 1) 1996 /* do nothing */ ; 1997 else if ((dev->devno == 0) && (err == 0x81)) 1998 /* do nothing */ ; 1999 else 2000 return ATA_DEV_NONE; 2001 2002 /* determine if device is ATA or ATAPI */ 2003 class = ata_dev_classify(&tf); 2004 2005 if (class == ATA_DEV_UNKNOWN) { 2006 /* If the device failed diagnostic, it's likely to 2007 * have reported incorrect device signature too. 2008 * Assume ATA device if the device seems present but 2009 * device signature is invalid with diagnostic 2010 * failure. 2011 */ 2012 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC)) 2013 class = ATA_DEV_ATA; 2014 else 2015 class = ATA_DEV_NONE; 2016 } else if ((class == ATA_DEV_ATA) && 2017 (ap->ops->sff_check_status(ap) == 0)) 2018 class = ATA_DEV_NONE; 2019 2020 return class; 2021 } 2022 EXPORT_SYMBOL_GPL(ata_sff_dev_classify); 2023 2024 /** 2025 * ata_sff_wait_after_reset - wait for devices to become ready after reset 2026 * @link: SFF link which is just reset 2027 * @devmask: mask of present devices 2028 * @deadline: deadline jiffies for the operation 2029 * 2030 * Wait devices attached to SFF @link to become ready after 2031 * reset. It contains preceding 150ms wait to avoid accessing TF 2032 * status register too early. 2033 * 2034 * LOCKING: 2035 * Kernel thread context (may sleep). 2036 * 2037 * RETURNS: 2038 * 0 on success, -ENODEV if some or all of devices in @devmask 2039 * don't seem to exist. -errno on other errors. 2040 */ 2041 int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask, 2042 unsigned long deadline) 2043 { 2044 struct ata_port *ap = link->ap; 2045 struct ata_ioports *ioaddr = &ap->ioaddr; 2046 unsigned int dev0 = devmask & (1 << 0); 2047 unsigned int dev1 = devmask & (1 << 1); 2048 int rc, ret = 0; 2049 2050 msleep(ATA_WAIT_AFTER_RESET); 2051 2052 /* always check readiness of the master device */ 2053 rc = ata_sff_wait_ready(link, deadline); 2054 /* -ENODEV means the odd clown forgot the D7 pulldown resistor 2055 * and TF status is 0xff, bail out on it too. 2056 */ 2057 if (rc) 2058 return rc; 2059 2060 /* if device 1 was found in ata_devchk, wait for register 2061 * access briefly, then wait for BSY to clear. 2062 */ 2063 if (dev1) { 2064 int i; 2065 2066 ap->ops->sff_dev_select(ap, 1); 2067 2068 /* Wait for register access. Some ATAPI devices fail 2069 * to set nsect/lbal after reset, so don't waste too 2070 * much time on it. We're gonna wait for !BSY anyway. 2071 */ 2072 for (i = 0; i < 2; i++) { 2073 u8 nsect, lbal; 2074 2075 nsect = ioread8(ioaddr->nsect_addr); 2076 lbal = ioread8(ioaddr->lbal_addr); 2077 if ((nsect == 1) && (lbal == 1)) 2078 break; 2079 msleep(50); /* give drive a breather */ 2080 } 2081 2082 rc = ata_sff_wait_ready(link, deadline); 2083 if (rc) { 2084 if (rc != -ENODEV) 2085 return rc; 2086 ret = rc; 2087 } 2088 } 2089 2090 /* is all this really necessary? */ 2091 ap->ops->sff_dev_select(ap, 0); 2092 if (dev1) 2093 ap->ops->sff_dev_select(ap, 1); 2094 if (dev0) 2095 ap->ops->sff_dev_select(ap, 0); 2096 2097 return ret; 2098 } 2099 EXPORT_SYMBOL_GPL(ata_sff_wait_after_reset); 2100 2101 static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask, 2102 unsigned long deadline) 2103 { 2104 struct ata_ioports *ioaddr = &ap->ioaddr; 2105 2106 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id); 2107 2108 /* software reset. causes dev0 to be selected */ 2109 iowrite8(ap->ctl, ioaddr->ctl_addr); 2110 udelay(20); /* FIXME: flush */ 2111 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr); 2112 udelay(20); /* FIXME: flush */ 2113 iowrite8(ap->ctl, ioaddr->ctl_addr); 2114 ap->last_ctl = ap->ctl; 2115 2116 /* wait the port to become ready */ 2117 return ata_sff_wait_after_reset(&ap->link, devmask, deadline); 2118 } 2119 2120 /** 2121 * ata_sff_softreset - reset host port via ATA SRST 2122 * @link: ATA link to reset 2123 * @classes: resulting classes of attached devices 2124 * @deadline: deadline jiffies for the operation 2125 * 2126 * Reset host port using ATA SRST. 2127 * 2128 * LOCKING: 2129 * Kernel thread context (may sleep) 2130 * 2131 * RETURNS: 2132 * 0 on success, -errno otherwise. 2133 */ 2134 int ata_sff_softreset(struct ata_link *link, unsigned int *classes, 2135 unsigned long deadline) 2136 { 2137 struct ata_port *ap = link->ap; 2138 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; 2139 unsigned int devmask = 0; 2140 int rc; 2141 u8 err; 2142 2143 DPRINTK("ENTER\n"); 2144 2145 /* determine if device 0/1 are present */ 2146 if (ata_devchk(ap, 0)) 2147 devmask |= (1 << 0); 2148 if (slave_possible && ata_devchk(ap, 1)) 2149 devmask |= (1 << 1); 2150 2151 /* select device 0 again */ 2152 ap->ops->sff_dev_select(ap, 0); 2153 2154 /* issue bus reset */ 2155 DPRINTK("about to softreset, devmask=%x\n", devmask); 2156 rc = ata_bus_softreset(ap, devmask, deadline); 2157 /* if link is occupied, -ENODEV too is an error */ 2158 if (rc && (rc != -ENODEV || sata_scr_valid(link))) { 2159 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc); 2160 return rc; 2161 } 2162 2163 /* determine by signature whether we have ATA or ATAPI devices */ 2164 classes[0] = ata_sff_dev_classify(&link->device[0], 2165 devmask & (1 << 0), &err); 2166 if (slave_possible && err != 0x81) 2167 classes[1] = ata_sff_dev_classify(&link->device[1], 2168 devmask & (1 << 1), &err); 2169 2170 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]); 2171 return 0; 2172 } 2173 EXPORT_SYMBOL_GPL(ata_sff_softreset); 2174 2175 /** 2176 * sata_sff_hardreset - reset host port via SATA phy reset 2177 * @link: link to reset 2178 * @class: resulting class of attached device 2179 * @deadline: deadline jiffies for the operation 2180 * 2181 * SATA phy-reset host port using DET bits of SControl register, 2182 * wait for !BSY and classify the attached device. 2183 * 2184 * LOCKING: 2185 * Kernel thread context (may sleep) 2186 * 2187 * RETURNS: 2188 * 0 on success, -errno otherwise. 2189 */ 2190 int sata_sff_hardreset(struct ata_link *link, unsigned int *class, 2191 unsigned long deadline) 2192 { 2193 struct ata_eh_context *ehc = &link->eh_context; 2194 const unsigned long *timing = sata_ehc_deb_timing(ehc); 2195 bool online; 2196 int rc; 2197 2198 rc = sata_link_hardreset(link, timing, deadline, &online, 2199 ata_sff_check_ready); 2200 if (online) 2201 *class = ata_sff_dev_classify(link->device, 1, NULL); 2202 2203 DPRINTK("EXIT, class=%u\n", *class); 2204 return rc; 2205 } 2206 EXPORT_SYMBOL_GPL(sata_sff_hardreset); 2207 2208 /** 2209 * ata_sff_postreset - SFF postreset callback 2210 * @link: the target SFF ata_link 2211 * @classes: classes of attached devices 2212 * 2213 * This function is invoked after a successful reset. It first 2214 * calls ata_std_postreset() and performs SFF specific postreset 2215 * processing. 2216 * 2217 * LOCKING: 2218 * Kernel thread context (may sleep) 2219 */ 2220 void ata_sff_postreset(struct ata_link *link, unsigned int *classes) 2221 { 2222 struct ata_port *ap = link->ap; 2223 2224 ata_std_postreset(link, classes); 2225 2226 /* is double-select really necessary? */ 2227 if (classes[0] != ATA_DEV_NONE) 2228 ap->ops->sff_dev_select(ap, 1); 2229 if (classes[1] != ATA_DEV_NONE) 2230 ap->ops->sff_dev_select(ap, 0); 2231 2232 /* bail out if no device is present */ 2233 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) { 2234 DPRINTK("EXIT, no device\n"); 2235 return; 2236 } 2237 2238 /* set up device control */ 2239 if (ap->ioaddr.ctl_addr) { 2240 iowrite8(ap->ctl, ap->ioaddr.ctl_addr); 2241 ap->last_ctl = ap->ctl; 2242 } 2243 } 2244 EXPORT_SYMBOL_GPL(ata_sff_postreset); 2245 2246 /** 2247 * ata_sff_drain_fifo - Stock FIFO drain logic for SFF controllers 2248 * @qc: command 2249 * 2250 * Drain the FIFO and device of any stuck data following a command 2251 * failing to complete. In some cases this is neccessary before a 2252 * reset will recover the device. 2253 * 2254 */ 2255 2256 void ata_sff_drain_fifo(struct ata_queued_cmd *qc) 2257 { 2258 int count; 2259 struct ata_port *ap; 2260 2261 /* We only need to flush incoming data when a command was running */ 2262 if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE) 2263 return; 2264 2265 ap = qc->ap; 2266 /* Drain up to 64K of data before we give up this recovery method */ 2267 for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ) 2268 && count < 32768; count++) 2269 ioread16(ap->ioaddr.data_addr); 2270 2271 /* Can become DEBUG later */ 2272 if (count) 2273 ata_port_printk(ap, KERN_DEBUG, 2274 "drained %d bytes to clear DRQ.\n", count); 2275 2276 } 2277 EXPORT_SYMBOL_GPL(ata_sff_drain_fifo); 2278 2279 /** 2280 * ata_sff_error_handler - Stock error handler for BMDMA controller 2281 * @ap: port to handle error for 2282 * 2283 * Stock error handler for SFF controller. It can handle both 2284 * PATA and SATA controllers. Many controllers should be able to 2285 * use this EH as-is or with some added handling before and 2286 * after. 2287 * 2288 * LOCKING: 2289 * Kernel thread context (may sleep) 2290 */ 2291 void ata_sff_error_handler(struct ata_port *ap) 2292 { 2293 ata_reset_fn_t softreset = ap->ops->softreset; 2294 ata_reset_fn_t hardreset = ap->ops->hardreset; 2295 struct ata_queued_cmd *qc; 2296 unsigned long flags; 2297 int thaw = 0; 2298 2299 qc = __ata_qc_from_tag(ap, ap->link.active_tag); 2300 if (qc && !(qc->flags & ATA_QCFLAG_FAILED)) 2301 qc = NULL; 2302 2303 /* reset PIO HSM and stop DMA engine */ 2304 spin_lock_irqsave(ap->lock, flags); 2305 2306 ap->hsm_task_state = HSM_ST_IDLE; 2307 2308 if (ap->ioaddr.bmdma_addr && 2309 qc && (qc->tf.protocol == ATA_PROT_DMA || 2310 qc->tf.protocol == ATAPI_PROT_DMA)) { 2311 u8 host_stat; 2312 2313 host_stat = ap->ops->bmdma_status(ap); 2314 2315 /* BMDMA controllers indicate host bus error by 2316 * setting DMA_ERR bit and timing out. As it wasn't 2317 * really a timeout event, adjust error mask and 2318 * cancel frozen state. 2319 */ 2320 if (qc->err_mask == AC_ERR_TIMEOUT 2321 && (host_stat & ATA_DMA_ERR)) { 2322 qc->err_mask = AC_ERR_HOST_BUS; 2323 thaw = 1; 2324 } 2325 2326 ap->ops->bmdma_stop(qc); 2327 } 2328 2329 ata_sff_sync(ap); /* FIXME: We don't need this */ 2330 ap->ops->sff_check_status(ap); 2331 ap->ops->sff_irq_clear(ap); 2332 /* We *MUST* do FIFO draining before we issue a reset as several 2333 * devices helpfully clear their internal state and will lock solid 2334 * if we touch the data port post reset. Pass qc in case anyone wants 2335 * to do different PIO/DMA recovery or has per command fixups 2336 */ 2337 if (ap->ops->drain_fifo) 2338 ap->ops->drain_fifo(qc); 2339 2340 spin_unlock_irqrestore(ap->lock, flags); 2341 2342 if (thaw) 2343 ata_eh_thaw_port(ap); 2344 2345 /* PIO and DMA engines have been stopped, perform recovery */ 2346 2347 /* Ignore ata_sff_softreset if ctl isn't accessible and 2348 * built-in hardresets if SCR access isn't available. 2349 */ 2350 if (softreset == ata_sff_softreset && !ap->ioaddr.ctl_addr) 2351 softreset = NULL; 2352 if (ata_is_builtin_hardreset(hardreset) && !sata_scr_valid(&ap->link)) 2353 hardreset = NULL; 2354 2355 ata_do_eh(ap, ap->ops->prereset, softreset, hardreset, 2356 ap->ops->postreset); 2357 } 2358 EXPORT_SYMBOL_GPL(ata_sff_error_handler); 2359 2360 /** 2361 * ata_sff_post_internal_cmd - Stock post_internal_cmd for SFF controller 2362 * @qc: internal command to clean up 2363 * 2364 * LOCKING: 2365 * Kernel thread context (may sleep) 2366 */ 2367 void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc) 2368 { 2369 struct ata_port *ap = qc->ap; 2370 unsigned long flags; 2371 2372 spin_lock_irqsave(ap->lock, flags); 2373 2374 ap->hsm_task_state = HSM_ST_IDLE; 2375 2376 if (ap->ioaddr.bmdma_addr) 2377 ata_bmdma_stop(qc); 2378 2379 spin_unlock_irqrestore(ap->lock, flags); 2380 } 2381 EXPORT_SYMBOL_GPL(ata_sff_post_internal_cmd); 2382 2383 /** 2384 * ata_sff_port_start - Set port up for dma. 2385 * @ap: Port to initialize 2386 * 2387 * Called just after data structures for each port are 2388 * initialized. Allocates space for PRD table if the device 2389 * is DMA capable SFF. 2390 * 2391 * May be used as the port_start() entry in ata_port_operations. 2392 * 2393 * LOCKING: 2394 * Inherited from caller. 2395 */ 2396 int ata_sff_port_start(struct ata_port *ap) 2397 { 2398 if (ap->ioaddr.bmdma_addr) 2399 return ata_port_start(ap); 2400 return 0; 2401 } 2402 EXPORT_SYMBOL_GPL(ata_sff_port_start); 2403 2404 /** 2405 * ata_sff_std_ports - initialize ioaddr with standard port offsets. 2406 * @ioaddr: IO address structure to be initialized 2407 * 2408 * Utility function which initializes data_addr, error_addr, 2409 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr, 2410 * device_addr, status_addr, and command_addr to standard offsets 2411 * relative to cmd_addr. 2412 * 2413 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr. 2414 */ 2415 void ata_sff_std_ports(struct ata_ioports *ioaddr) 2416 { 2417 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA; 2418 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR; 2419 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE; 2420 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT; 2421 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL; 2422 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM; 2423 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH; 2424 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE; 2425 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS; 2426 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD; 2427 } 2428 EXPORT_SYMBOL_GPL(ata_sff_std_ports); 2429 2430 unsigned long ata_bmdma_mode_filter(struct ata_device *adev, 2431 unsigned long xfer_mask) 2432 { 2433 /* Filter out DMA modes if the device has been configured by 2434 the BIOS as PIO only */ 2435 2436 if (adev->link->ap->ioaddr.bmdma_addr == NULL) 2437 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 2438 return xfer_mask; 2439 } 2440 EXPORT_SYMBOL_GPL(ata_bmdma_mode_filter); 2441 2442 /** 2443 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction 2444 * @qc: Info associated with this ATA transaction. 2445 * 2446 * LOCKING: 2447 * spin_lock_irqsave(host lock) 2448 */ 2449 void ata_bmdma_setup(struct ata_queued_cmd *qc) 2450 { 2451 struct ata_port *ap = qc->ap; 2452 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); 2453 u8 dmactl; 2454 2455 /* load PRD table addr. */ 2456 mb(); /* make sure PRD table writes are visible to controller */ 2457 iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); 2458 2459 /* specify data direction, triple-check start bit is clear */ 2460 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 2461 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); 2462 if (!rw) 2463 dmactl |= ATA_DMA_WR; 2464 iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 2465 2466 /* issue r/w command */ 2467 ap->ops->sff_exec_command(ap, &qc->tf); 2468 } 2469 EXPORT_SYMBOL_GPL(ata_bmdma_setup); 2470 2471 /** 2472 * ata_bmdma_start - Start a PCI IDE BMDMA transaction 2473 * @qc: Info associated with this ATA transaction. 2474 * 2475 * LOCKING: 2476 * spin_lock_irqsave(host lock) 2477 */ 2478 void ata_bmdma_start(struct ata_queued_cmd *qc) 2479 { 2480 struct ata_port *ap = qc->ap; 2481 u8 dmactl; 2482 2483 /* start host DMA transaction */ 2484 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 2485 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 2486 2487 /* Strictly, one may wish to issue an ioread8() here, to 2488 * flush the mmio write. However, control also passes 2489 * to the hardware at this point, and it will interrupt 2490 * us when we are to resume control. So, in effect, 2491 * we don't care when the mmio write flushes. 2492 * Further, a read of the DMA status register _immediately_ 2493 * following the write may not be what certain flaky hardware 2494 * is expected, so I think it is best to not add a readb() 2495 * without first all the MMIO ATA cards/mobos. 2496 * Or maybe I'm just being paranoid. 2497 * 2498 * FIXME: The posting of this write means I/O starts are 2499 * unneccessarily delayed for MMIO 2500 */ 2501 } 2502 EXPORT_SYMBOL_GPL(ata_bmdma_start); 2503 2504 /** 2505 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer 2506 * @qc: Command we are ending DMA for 2507 * 2508 * Clears the ATA_DMA_START flag in the dma control register 2509 * 2510 * May be used as the bmdma_stop() entry in ata_port_operations. 2511 * 2512 * LOCKING: 2513 * spin_lock_irqsave(host lock) 2514 */ 2515 void ata_bmdma_stop(struct ata_queued_cmd *qc) 2516 { 2517 struct ata_port *ap = qc->ap; 2518 void __iomem *mmio = ap->ioaddr.bmdma_addr; 2519 2520 /* clear start/stop bit */ 2521 iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START, 2522 mmio + ATA_DMA_CMD); 2523 2524 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ 2525 ata_sff_dma_pause(ap); 2526 } 2527 EXPORT_SYMBOL_GPL(ata_bmdma_stop); 2528 2529 /** 2530 * ata_bmdma_status - Read PCI IDE BMDMA status 2531 * @ap: Port associated with this ATA transaction. 2532 * 2533 * Read and return BMDMA status register. 2534 * 2535 * May be used as the bmdma_status() entry in ata_port_operations. 2536 * 2537 * LOCKING: 2538 * spin_lock_irqsave(host lock) 2539 */ 2540 u8 ata_bmdma_status(struct ata_port *ap) 2541 { 2542 return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 2543 } 2544 EXPORT_SYMBOL_GPL(ata_bmdma_status); 2545 2546 /** 2547 * ata_bus_reset - reset host port and associated ATA channel 2548 * @ap: port to reset 2549 * 2550 * This is typically the first time we actually start issuing 2551 * commands to the ATA channel. We wait for BSY to clear, then 2552 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its 2553 * result. Determine what devices, if any, are on the channel 2554 * by looking at the device 0/1 error register. Look at the signature 2555 * stored in each device's taskfile registers, to determine if 2556 * the device is ATA or ATAPI. 2557 * 2558 * LOCKING: 2559 * PCI/etc. bus probe sem. 2560 * Obtains host lock. 2561 * 2562 * SIDE EFFECTS: 2563 * Sets ATA_FLAG_DISABLED if bus reset fails. 2564 * 2565 * DEPRECATED: 2566 * This function is only for drivers which still use old EH and 2567 * will be removed soon. 2568 */ 2569 void ata_bus_reset(struct ata_port *ap) 2570 { 2571 struct ata_device *device = ap->link.device; 2572 struct ata_ioports *ioaddr = &ap->ioaddr; 2573 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; 2574 u8 err; 2575 unsigned int dev0, dev1 = 0, devmask = 0; 2576 int rc; 2577 2578 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no); 2579 2580 /* determine if device 0/1 are present */ 2581 if (ap->flags & ATA_FLAG_SATA_RESET) 2582 dev0 = 1; 2583 else { 2584 dev0 = ata_devchk(ap, 0); 2585 if (slave_possible) 2586 dev1 = ata_devchk(ap, 1); 2587 } 2588 2589 if (dev0) 2590 devmask |= (1 << 0); 2591 if (dev1) 2592 devmask |= (1 << 1); 2593 2594 /* select device 0 again */ 2595 ap->ops->sff_dev_select(ap, 0); 2596 2597 /* issue bus reset */ 2598 if (ap->flags & ATA_FLAG_SRST) { 2599 rc = ata_bus_softreset(ap, devmask, 2600 ata_deadline(jiffies, 40000)); 2601 if (rc && rc != -ENODEV) 2602 goto err_out; 2603 } 2604 2605 /* 2606 * determine by signature whether we have ATA or ATAPI devices 2607 */ 2608 device[0].class = ata_sff_dev_classify(&device[0], dev0, &err); 2609 if ((slave_possible) && (err != 0x81)) 2610 device[1].class = ata_sff_dev_classify(&device[1], dev1, &err); 2611 2612 /* is double-select really necessary? */ 2613 if (device[1].class != ATA_DEV_NONE) 2614 ap->ops->sff_dev_select(ap, 1); 2615 if (device[0].class != ATA_DEV_NONE) 2616 ap->ops->sff_dev_select(ap, 0); 2617 2618 /* if no devices were detected, disable this port */ 2619 if ((device[0].class == ATA_DEV_NONE) && 2620 (device[1].class == ATA_DEV_NONE)) 2621 goto err_out; 2622 2623 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) { 2624 /* set up device control for ATA_FLAG_SATA_RESET */ 2625 iowrite8(ap->ctl, ioaddr->ctl_addr); 2626 ap->last_ctl = ap->ctl; 2627 } 2628 2629 DPRINTK("EXIT\n"); 2630 return; 2631 2632 err_out: 2633 ata_port_printk(ap, KERN_ERR, "disabling port\n"); 2634 ata_port_disable(ap); 2635 2636 DPRINTK("EXIT\n"); 2637 } 2638 EXPORT_SYMBOL_GPL(ata_bus_reset); 2639 2640 #ifdef CONFIG_PCI 2641 2642 /** 2643 * ata_pci_bmdma_clear_simplex - attempt to kick device out of simplex 2644 * @pdev: PCI device 2645 * 2646 * Some PCI ATA devices report simplex mode but in fact can be told to 2647 * enter non simplex mode. This implements the necessary logic to 2648 * perform the task on such devices. Calling it on other devices will 2649 * have -undefined- behaviour. 2650 */ 2651 int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev) 2652 { 2653 unsigned long bmdma = pci_resource_start(pdev, 4); 2654 u8 simplex; 2655 2656 if (bmdma == 0) 2657 return -ENOENT; 2658 2659 simplex = inb(bmdma + 0x02); 2660 outb(simplex & 0x60, bmdma + 0x02); 2661 simplex = inb(bmdma + 0x02); 2662 if (simplex & 0x80) 2663 return -EOPNOTSUPP; 2664 return 0; 2665 } 2666 EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex); 2667 2668 /** 2669 * ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host 2670 * @host: target ATA host 2671 * 2672 * Acquire PCI BMDMA resources and initialize @host accordingly. 2673 * 2674 * LOCKING: 2675 * Inherited from calling layer (may sleep). 2676 * 2677 * RETURNS: 2678 * 0 on success, -errno otherwise. 2679 */ 2680 int ata_pci_bmdma_init(struct ata_host *host) 2681 { 2682 struct device *gdev = host->dev; 2683 struct pci_dev *pdev = to_pci_dev(gdev); 2684 int i, rc; 2685 2686 /* No BAR4 allocation: No DMA */ 2687 if (pci_resource_start(pdev, 4) == 0) 2688 return 0; 2689 2690 /* TODO: If we get no DMA mask we should fall back to PIO */ 2691 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 2692 if (rc) 2693 return rc; 2694 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 2695 if (rc) 2696 return rc; 2697 2698 /* request and iomap DMA region */ 2699 rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev)); 2700 if (rc) { 2701 dev_printk(KERN_ERR, gdev, "failed to request/iomap BAR4\n"); 2702 return -ENOMEM; 2703 } 2704 host->iomap = pcim_iomap_table(pdev); 2705 2706 for (i = 0; i < 2; i++) { 2707 struct ata_port *ap = host->ports[i]; 2708 void __iomem *bmdma = host->iomap[4] + 8 * i; 2709 2710 if (ata_port_is_dummy(ap)) 2711 continue; 2712 2713 ap->ioaddr.bmdma_addr = bmdma; 2714 if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) && 2715 (ioread8(bmdma + 2) & 0x80)) 2716 host->flags |= ATA_HOST_SIMPLEX; 2717 2718 ata_port_desc(ap, "bmdma 0x%llx", 2719 (unsigned long long)pci_resource_start(pdev, 4) + 8 * i); 2720 } 2721 2722 return 0; 2723 } 2724 EXPORT_SYMBOL_GPL(ata_pci_bmdma_init); 2725 2726 static int ata_resources_present(struct pci_dev *pdev, int port) 2727 { 2728 int i; 2729 2730 /* Check the PCI resources for this channel are enabled */ 2731 port = port * 2; 2732 for (i = 0; i < 2; i++) { 2733 if (pci_resource_start(pdev, port + i) == 0 || 2734 pci_resource_len(pdev, port + i) == 0) 2735 return 0; 2736 } 2737 return 1; 2738 } 2739 2740 /** 2741 * ata_pci_sff_init_host - acquire native PCI ATA resources and init host 2742 * @host: target ATA host 2743 * 2744 * Acquire native PCI ATA resources for @host and initialize the 2745 * first two ports of @host accordingly. Ports marked dummy are 2746 * skipped and allocation failure makes the port dummy. 2747 * 2748 * Note that native PCI resources are valid even for legacy hosts 2749 * as we fix up pdev resources array early in boot, so this 2750 * function can be used for both native and legacy SFF hosts. 2751 * 2752 * LOCKING: 2753 * Inherited from calling layer (may sleep). 2754 * 2755 * RETURNS: 2756 * 0 if at least one port is initialized, -ENODEV if no port is 2757 * available. 2758 */ 2759 int ata_pci_sff_init_host(struct ata_host *host) 2760 { 2761 struct device *gdev = host->dev; 2762 struct pci_dev *pdev = to_pci_dev(gdev); 2763 unsigned int mask = 0; 2764 int i, rc; 2765 2766 /* request, iomap BARs and init port addresses accordingly */ 2767 for (i = 0; i < 2; i++) { 2768 struct ata_port *ap = host->ports[i]; 2769 int base = i * 2; 2770 void __iomem * const *iomap; 2771 2772 if (ata_port_is_dummy(ap)) 2773 continue; 2774 2775 /* Discard disabled ports. Some controllers show 2776 * their unused channels this way. Disabled ports are 2777 * made dummy. 2778 */ 2779 if (!ata_resources_present(pdev, i)) { 2780 ap->ops = &ata_dummy_port_ops; 2781 continue; 2782 } 2783 2784 rc = pcim_iomap_regions(pdev, 0x3 << base, 2785 dev_driver_string(gdev)); 2786 if (rc) { 2787 dev_printk(KERN_WARNING, gdev, 2788 "failed to request/iomap BARs for port %d " 2789 "(errno=%d)\n", i, rc); 2790 if (rc == -EBUSY) 2791 pcim_pin_device(pdev); 2792 ap->ops = &ata_dummy_port_ops; 2793 continue; 2794 } 2795 host->iomap = iomap = pcim_iomap_table(pdev); 2796 2797 ap->ioaddr.cmd_addr = iomap[base]; 2798 ap->ioaddr.altstatus_addr = 2799 ap->ioaddr.ctl_addr = (void __iomem *) 2800 ((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS); 2801 ata_sff_std_ports(&ap->ioaddr); 2802 2803 ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx", 2804 (unsigned long long)pci_resource_start(pdev, base), 2805 (unsigned long long)pci_resource_start(pdev, base + 1)); 2806 2807 mask |= 1 << i; 2808 } 2809 2810 if (!mask) { 2811 dev_printk(KERN_ERR, gdev, "no available native port\n"); 2812 return -ENODEV; 2813 } 2814 2815 return 0; 2816 } 2817 EXPORT_SYMBOL_GPL(ata_pci_sff_init_host); 2818 2819 /** 2820 * ata_pci_sff_prepare_host - helper to prepare native PCI ATA host 2821 * @pdev: target PCI device 2822 * @ppi: array of port_info, must be enough for two ports 2823 * @r_host: out argument for the initialized ATA host 2824 * 2825 * Helper to allocate ATA host for @pdev, acquire all native PCI 2826 * resources and initialize it accordingly in one go. 2827 * 2828 * LOCKING: 2829 * Inherited from calling layer (may sleep). 2830 * 2831 * RETURNS: 2832 * 0 on success, -errno otherwise. 2833 */ 2834 int ata_pci_sff_prepare_host(struct pci_dev *pdev, 2835 const struct ata_port_info * const *ppi, 2836 struct ata_host **r_host) 2837 { 2838 struct ata_host *host; 2839 int rc; 2840 2841 if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) 2842 return -ENOMEM; 2843 2844 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2); 2845 if (!host) { 2846 dev_printk(KERN_ERR, &pdev->dev, 2847 "failed to allocate ATA host\n"); 2848 rc = -ENOMEM; 2849 goto err_out; 2850 } 2851 2852 rc = ata_pci_sff_init_host(host); 2853 if (rc) 2854 goto err_out; 2855 2856 /* init DMA related stuff */ 2857 rc = ata_pci_bmdma_init(host); 2858 if (rc) 2859 goto err_bmdma; 2860 2861 devres_remove_group(&pdev->dev, NULL); 2862 *r_host = host; 2863 return 0; 2864 2865 err_bmdma: 2866 /* This is necessary because PCI and iomap resources are 2867 * merged and releasing the top group won't release the 2868 * acquired resources if some of those have been acquired 2869 * before entering this function. 2870 */ 2871 pcim_iounmap_regions(pdev, 0xf); 2872 err_out: 2873 devres_release_group(&pdev->dev, NULL); 2874 return rc; 2875 } 2876 EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host); 2877 2878 /** 2879 * ata_pci_sff_activate_host - start SFF host, request IRQ and register it 2880 * @host: target SFF ATA host 2881 * @irq_handler: irq_handler used when requesting IRQ(s) 2882 * @sht: scsi_host_template to use when registering the host 2883 * 2884 * This is the counterpart of ata_host_activate() for SFF ATA 2885 * hosts. This separate helper is necessary because SFF hosts 2886 * use two separate interrupts in legacy mode. 2887 * 2888 * LOCKING: 2889 * Inherited from calling layer (may sleep). 2890 * 2891 * RETURNS: 2892 * 0 on success, -errno otherwise. 2893 */ 2894 int ata_pci_sff_activate_host(struct ata_host *host, 2895 irq_handler_t irq_handler, 2896 struct scsi_host_template *sht) 2897 { 2898 struct device *dev = host->dev; 2899 struct pci_dev *pdev = to_pci_dev(dev); 2900 const char *drv_name = dev_driver_string(host->dev); 2901 int legacy_mode = 0, rc; 2902 2903 rc = ata_host_start(host); 2904 if (rc) 2905 return rc; 2906 2907 if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) { 2908 u8 tmp8, mask; 2909 2910 /* TODO: What if one channel is in native mode ... */ 2911 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8); 2912 mask = (1 << 2) | (1 << 0); 2913 if ((tmp8 & mask) != mask) 2914 legacy_mode = 1; 2915 #if defined(CONFIG_NO_ATA_LEGACY) 2916 /* Some platforms with PCI limits cannot address compat 2917 port space. In that case we punt if their firmware has 2918 left a device in compatibility mode */ 2919 if (legacy_mode) { 2920 printk(KERN_ERR "ata: Compatibility mode ATA is not supported on this platform, skipping.\n"); 2921 return -EOPNOTSUPP; 2922 } 2923 #endif 2924 } 2925 2926 if (!devres_open_group(dev, NULL, GFP_KERNEL)) 2927 return -ENOMEM; 2928 2929 if (!legacy_mode && pdev->irq) { 2930 rc = devm_request_irq(dev, pdev->irq, irq_handler, 2931 IRQF_SHARED, drv_name, host); 2932 if (rc) 2933 goto out; 2934 2935 ata_port_desc(host->ports[0], "irq %d", pdev->irq); 2936 ata_port_desc(host->ports[1], "irq %d", pdev->irq); 2937 } else if (legacy_mode) { 2938 if (!ata_port_is_dummy(host->ports[0])) { 2939 rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev), 2940 irq_handler, IRQF_SHARED, 2941 drv_name, host); 2942 if (rc) 2943 goto out; 2944 2945 ata_port_desc(host->ports[0], "irq %d", 2946 ATA_PRIMARY_IRQ(pdev)); 2947 } 2948 2949 if (!ata_port_is_dummy(host->ports[1])) { 2950 rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev), 2951 irq_handler, IRQF_SHARED, 2952 drv_name, host); 2953 if (rc) 2954 goto out; 2955 2956 ata_port_desc(host->ports[1], "irq %d", 2957 ATA_SECONDARY_IRQ(pdev)); 2958 } 2959 } 2960 2961 rc = ata_host_register(host, sht); 2962 out: 2963 if (rc == 0) 2964 devres_remove_group(dev, NULL); 2965 else 2966 devres_release_group(dev, NULL); 2967 2968 return rc; 2969 } 2970 EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host); 2971 2972 /** 2973 * ata_pci_sff_init_one - Initialize/register PCI IDE host controller 2974 * @pdev: Controller to be initialized 2975 * @ppi: array of port_info, must be enough for two ports 2976 * @sht: scsi_host_template to use when registering the host 2977 * @host_priv: host private_data 2978 * 2979 * This is a helper function which can be called from a driver's 2980 * xxx_init_one() probe function if the hardware uses traditional 2981 * IDE taskfile registers. 2982 * 2983 * This function calls pci_enable_device(), reserves its register 2984 * regions, sets the dma mask, enables bus master mode, and calls 2985 * ata_device_add() 2986 * 2987 * ASSUMPTION: 2988 * Nobody makes a single channel controller that appears solely as 2989 * the secondary legacy port on PCI. 2990 * 2991 * LOCKING: 2992 * Inherited from PCI layer (may sleep). 2993 * 2994 * RETURNS: 2995 * Zero on success, negative on errno-based value on error. 2996 */ 2997 int ata_pci_sff_init_one(struct pci_dev *pdev, 2998 const struct ata_port_info * const *ppi, 2999 struct scsi_host_template *sht, void *host_priv) 3000 { 3001 struct device *dev = &pdev->dev; 3002 const struct ata_port_info *pi = NULL; 3003 struct ata_host *host = NULL; 3004 int i, rc; 3005 3006 DPRINTK("ENTER\n"); 3007 3008 /* look up the first valid port_info */ 3009 for (i = 0; i < 2 && ppi[i]; i++) { 3010 if (ppi[i]->port_ops != &ata_dummy_port_ops) { 3011 pi = ppi[i]; 3012 break; 3013 } 3014 } 3015 3016 if (!pi) { 3017 dev_printk(KERN_ERR, &pdev->dev, 3018 "no valid port_info specified\n"); 3019 return -EINVAL; 3020 } 3021 3022 if (!devres_open_group(dev, NULL, GFP_KERNEL)) 3023 return -ENOMEM; 3024 3025 rc = pcim_enable_device(pdev); 3026 if (rc) 3027 goto out; 3028 3029 /* prepare and activate SFF host */ 3030 rc = ata_pci_sff_prepare_host(pdev, ppi, &host); 3031 if (rc) 3032 goto out; 3033 host->private_data = host_priv; 3034 3035 pci_set_master(pdev); 3036 rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht); 3037 out: 3038 if (rc == 0) 3039 devres_remove_group(&pdev->dev, NULL); 3040 else 3041 devres_release_group(&pdev->dev, NULL); 3042 3043 return rc; 3044 } 3045 EXPORT_SYMBOL_GPL(ata_pci_sff_init_one); 3046 3047 #endif /* CONFIG_PCI */ 3048