1 /* 2 * libata-sff.c - helper library for PCI IDE BMDMA 3 * 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2003-2006 Red Hat, Inc. All rights reserved. 9 * Copyright 2003-2006 Jeff Garzik 10 * 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; see the file COPYING. If not, write to 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 26 * 27 * libata documentation is available via 'make {ps|pdf}docs', 28 * as Documentation/DocBook/libata.* 29 * 30 * Hardware documentation available from http://www.t13.org/ and 31 * http://www.sata-io.org/ 32 * 33 */ 34 35 #include <linux/kernel.h> 36 #include <linux/pci.h> 37 #include <linux/libata.h> 38 39 #include "libata.h" 40 41 /** 42 * ata_irq_on - Enable interrupts on a port. 43 * @ap: Port on which interrupts are enabled. 44 * 45 * Enable interrupts on a legacy IDE device using MMIO or PIO, 46 * wait for idle, clear any pending interrupts. 47 * 48 * LOCKING: 49 * Inherited from caller. 50 */ 51 u8 ata_irq_on(struct ata_port *ap) 52 { 53 struct ata_ioports *ioaddr = &ap->ioaddr; 54 u8 tmp; 55 56 ap->ctl &= ~ATA_NIEN; 57 ap->last_ctl = ap->ctl; 58 59 iowrite8(ap->ctl, ioaddr->ctl_addr); 60 tmp = ata_wait_idle(ap); 61 62 ap->ops->irq_clear(ap); 63 64 return tmp; 65 } 66 67 /** 68 * ata_tf_load - send taskfile registers to host controller 69 * @ap: Port to which output is sent 70 * @tf: ATA taskfile register set 71 * 72 * Outputs ATA taskfile to standard ATA host controller. 73 * 74 * LOCKING: 75 * Inherited from caller. 76 */ 77 78 void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) 79 { 80 struct ata_ioports *ioaddr = &ap->ioaddr; 81 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; 82 83 if (tf->ctl != ap->last_ctl) { 84 iowrite8(tf->ctl, ioaddr->ctl_addr); 85 ap->last_ctl = tf->ctl; 86 ata_wait_idle(ap); 87 } 88 89 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { 90 iowrite8(tf->hob_feature, ioaddr->feature_addr); 91 iowrite8(tf->hob_nsect, ioaddr->nsect_addr); 92 iowrite8(tf->hob_lbal, ioaddr->lbal_addr); 93 iowrite8(tf->hob_lbam, ioaddr->lbam_addr); 94 iowrite8(tf->hob_lbah, ioaddr->lbah_addr); 95 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", 96 tf->hob_feature, 97 tf->hob_nsect, 98 tf->hob_lbal, 99 tf->hob_lbam, 100 tf->hob_lbah); 101 } 102 103 if (is_addr) { 104 iowrite8(tf->feature, ioaddr->feature_addr); 105 iowrite8(tf->nsect, ioaddr->nsect_addr); 106 iowrite8(tf->lbal, ioaddr->lbal_addr); 107 iowrite8(tf->lbam, ioaddr->lbam_addr); 108 iowrite8(tf->lbah, ioaddr->lbah_addr); 109 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", 110 tf->feature, 111 tf->nsect, 112 tf->lbal, 113 tf->lbam, 114 tf->lbah); 115 } 116 117 if (tf->flags & ATA_TFLAG_DEVICE) { 118 iowrite8(tf->device, ioaddr->device_addr); 119 VPRINTK("device 0x%X\n", tf->device); 120 } 121 122 ata_wait_idle(ap); 123 } 124 125 /** 126 * ata_exec_command - issue ATA command to host controller 127 * @ap: port to which command is being issued 128 * @tf: ATA taskfile register set 129 * 130 * Issues ATA command, with proper synchronization with interrupt 131 * handler / other threads. 132 * 133 * LOCKING: 134 * spin_lock_irqsave(host lock) 135 */ 136 void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf) 137 { 138 DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command); 139 140 iowrite8(tf->command, ap->ioaddr.command_addr); 141 ata_pause(ap); 142 } 143 144 /** 145 * ata_tf_read - input device's ATA taskfile shadow registers 146 * @ap: Port from which input is read 147 * @tf: ATA taskfile register set for storing input 148 * 149 * Reads ATA taskfile registers for currently-selected device 150 * into @tf. Assumes the device has a fully SFF compliant task file 151 * layout and behaviour. If you device does not (eg has a different 152 * status method) then you will need to provide a replacement tf_read 153 * 154 * LOCKING: 155 * Inherited from caller. 156 */ 157 void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) 158 { 159 struct ata_ioports *ioaddr = &ap->ioaddr; 160 161 tf->command = ata_check_status(ap); 162 tf->feature = ioread8(ioaddr->error_addr); 163 tf->nsect = ioread8(ioaddr->nsect_addr); 164 tf->lbal = ioread8(ioaddr->lbal_addr); 165 tf->lbam = ioread8(ioaddr->lbam_addr); 166 tf->lbah = ioread8(ioaddr->lbah_addr); 167 tf->device = ioread8(ioaddr->device_addr); 168 169 if (tf->flags & ATA_TFLAG_LBA48) { 170 iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr); 171 tf->hob_feature = ioread8(ioaddr->error_addr); 172 tf->hob_nsect = ioread8(ioaddr->nsect_addr); 173 tf->hob_lbal = ioread8(ioaddr->lbal_addr); 174 tf->hob_lbam = ioread8(ioaddr->lbam_addr); 175 tf->hob_lbah = ioread8(ioaddr->lbah_addr); 176 iowrite8(tf->ctl, ioaddr->ctl_addr); 177 ap->last_ctl = tf->ctl; 178 } 179 } 180 181 /** 182 * ata_check_status - Read device status reg & clear interrupt 183 * @ap: port where the device is 184 * 185 * Reads ATA taskfile status register for currently-selected device 186 * and return its value. This also clears pending interrupts 187 * from this device 188 * 189 * LOCKING: 190 * Inherited from caller. 191 */ 192 u8 ata_check_status(struct ata_port *ap) 193 { 194 return ioread8(ap->ioaddr.status_addr); 195 } 196 197 /** 198 * ata_altstatus - Read device alternate status reg 199 * @ap: port where the device is 200 * 201 * Reads ATA taskfile alternate status register for 202 * currently-selected device and return its value. 203 * 204 * Note: may NOT be used as the check_altstatus() entry in 205 * ata_port_operations. 206 * 207 * LOCKING: 208 * Inherited from caller. 209 */ 210 u8 ata_altstatus(struct ata_port *ap) 211 { 212 if (ap->ops->check_altstatus) 213 return ap->ops->check_altstatus(ap); 214 215 return ioread8(ap->ioaddr.altstatus_addr); 216 } 217 218 /** 219 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction 220 * @qc: Info associated with this ATA transaction. 221 * 222 * LOCKING: 223 * spin_lock_irqsave(host lock) 224 */ 225 void ata_bmdma_setup(struct ata_queued_cmd *qc) 226 { 227 struct ata_port *ap = qc->ap; 228 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); 229 u8 dmactl; 230 231 /* load PRD table addr. */ 232 mb(); /* make sure PRD table writes are visible to controller */ 233 iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); 234 235 /* specify data direction, triple-check start bit is clear */ 236 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 237 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); 238 if (!rw) 239 dmactl |= ATA_DMA_WR; 240 iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 241 242 /* issue r/w command */ 243 ap->ops->exec_command(ap, &qc->tf); 244 } 245 246 /** 247 * ata_bmdma_start - Start a PCI IDE BMDMA transaction 248 * @qc: Info associated with this ATA transaction. 249 * 250 * LOCKING: 251 * spin_lock_irqsave(host lock) 252 */ 253 void ata_bmdma_start(struct ata_queued_cmd *qc) 254 { 255 struct ata_port *ap = qc->ap; 256 u8 dmactl; 257 258 /* start host DMA transaction */ 259 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 260 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 261 262 /* Strictly, one may wish to issue an ioread8() here, to 263 * flush the mmio write. However, control also passes 264 * to the hardware at this point, and it will interrupt 265 * us when we are to resume control. So, in effect, 266 * we don't care when the mmio write flushes. 267 * Further, a read of the DMA status register _immediately_ 268 * following the write may not be what certain flaky hardware 269 * is expected, so I think it is best to not add a readb() 270 * without first all the MMIO ATA cards/mobos. 271 * Or maybe I'm just being paranoid. 272 * 273 * FIXME: The posting of this write means I/O starts are 274 * unneccessarily delayed for MMIO 275 */ 276 } 277 278 /** 279 * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt. 280 * @ap: Port associated with this ATA transaction. 281 * 282 * Clear interrupt and error flags in DMA status register. 283 * 284 * May be used as the irq_clear() entry in ata_port_operations. 285 * 286 * LOCKING: 287 * spin_lock_irqsave(host lock) 288 */ 289 void ata_bmdma_irq_clear(struct ata_port *ap) 290 { 291 void __iomem *mmio = ap->ioaddr.bmdma_addr; 292 293 if (!mmio) 294 return; 295 296 iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS); 297 } 298 299 /** 300 * ata_bmdma_status - Read PCI IDE BMDMA status 301 * @ap: Port associated with this ATA transaction. 302 * 303 * Read and return BMDMA status register. 304 * 305 * May be used as the bmdma_status() entry in ata_port_operations. 306 * 307 * LOCKING: 308 * spin_lock_irqsave(host lock) 309 */ 310 u8 ata_bmdma_status(struct ata_port *ap) 311 { 312 return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 313 } 314 315 /** 316 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer 317 * @qc: Command we are ending DMA for 318 * 319 * Clears the ATA_DMA_START flag in the dma control register 320 * 321 * May be used as the bmdma_stop() entry in ata_port_operations. 322 * 323 * LOCKING: 324 * spin_lock_irqsave(host lock) 325 */ 326 void ata_bmdma_stop(struct ata_queued_cmd *qc) 327 { 328 struct ata_port *ap = qc->ap; 329 void __iomem *mmio = ap->ioaddr.bmdma_addr; 330 331 /* clear start/stop bit */ 332 iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START, 333 mmio + ATA_DMA_CMD); 334 335 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ 336 ata_altstatus(ap); /* dummy read */ 337 } 338 339 /** 340 * ata_bmdma_freeze - Freeze BMDMA controller port 341 * @ap: port to freeze 342 * 343 * Freeze BMDMA controller port. 344 * 345 * LOCKING: 346 * Inherited from caller. 347 */ 348 void ata_bmdma_freeze(struct ata_port *ap) 349 { 350 struct ata_ioports *ioaddr = &ap->ioaddr; 351 352 ap->ctl |= ATA_NIEN; 353 ap->last_ctl = ap->ctl; 354 355 iowrite8(ap->ctl, ioaddr->ctl_addr); 356 357 /* Under certain circumstances, some controllers raise IRQ on 358 * ATA_NIEN manipulation. Also, many controllers fail to mask 359 * previously pending IRQ on ATA_NIEN assertion. Clear it. 360 */ 361 ata_chk_status(ap); 362 363 ap->ops->irq_clear(ap); 364 } 365 366 /** 367 * ata_bmdma_thaw - Thaw BMDMA controller port 368 * @ap: port to thaw 369 * 370 * Thaw BMDMA controller port. 371 * 372 * LOCKING: 373 * Inherited from caller. 374 */ 375 void ata_bmdma_thaw(struct ata_port *ap) 376 { 377 /* clear & re-enable interrupts */ 378 ata_chk_status(ap); 379 ap->ops->irq_clear(ap); 380 ap->ops->irq_on(ap); 381 } 382 383 /** 384 * ata_bmdma_drive_eh - Perform EH with given methods for BMDMA controller 385 * @ap: port to handle error for 386 * @prereset: prereset method (can be NULL) 387 * @softreset: softreset method (can be NULL) 388 * @hardreset: hardreset method (can be NULL) 389 * @postreset: postreset method (can be NULL) 390 * 391 * Handle error for ATA BMDMA controller. It can handle both 392 * PATA and SATA controllers. Many controllers should be able to 393 * use this EH as-is or with some added handling before and 394 * after. 395 * 396 * This function is intended to be used for constructing 397 * ->error_handler callback by low level drivers. 398 * 399 * LOCKING: 400 * Kernel thread context (may sleep) 401 */ 402 void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset, 403 ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 404 ata_postreset_fn_t postreset) 405 { 406 struct ata_queued_cmd *qc; 407 unsigned long flags; 408 int thaw = 0; 409 410 qc = __ata_qc_from_tag(ap, ap->link.active_tag); 411 if (qc && !(qc->flags & ATA_QCFLAG_FAILED)) 412 qc = NULL; 413 414 /* reset PIO HSM and stop DMA engine */ 415 spin_lock_irqsave(ap->lock, flags); 416 417 ap->hsm_task_state = HSM_ST_IDLE; 418 419 if (qc && (qc->tf.protocol == ATA_PROT_DMA || 420 qc->tf.protocol == ATAPI_PROT_DMA)) { 421 u8 host_stat; 422 423 host_stat = ap->ops->bmdma_status(ap); 424 425 /* BMDMA controllers indicate host bus error by 426 * setting DMA_ERR bit and timing out. As it wasn't 427 * really a timeout event, adjust error mask and 428 * cancel frozen state. 429 */ 430 if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) { 431 qc->err_mask = AC_ERR_HOST_BUS; 432 thaw = 1; 433 } 434 435 ap->ops->bmdma_stop(qc); 436 } 437 438 ata_altstatus(ap); 439 ata_chk_status(ap); 440 ap->ops->irq_clear(ap); 441 442 spin_unlock_irqrestore(ap->lock, flags); 443 444 if (thaw) 445 ata_eh_thaw_port(ap); 446 447 /* PIO and DMA engines have been stopped, perform recovery */ 448 ata_do_eh(ap, prereset, softreset, hardreset, postreset); 449 } 450 451 /** 452 * ata_bmdma_error_handler - Stock error handler for BMDMA controller 453 * @ap: port to handle error for 454 * 455 * Stock error handler for BMDMA controller. 456 * 457 * LOCKING: 458 * Kernel thread context (may sleep) 459 */ 460 void ata_bmdma_error_handler(struct ata_port *ap) 461 { 462 ata_reset_fn_t hardreset; 463 464 hardreset = NULL; 465 if (sata_scr_valid(&ap->link)) 466 hardreset = sata_std_hardreset; 467 468 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, hardreset, 469 ata_std_postreset); 470 } 471 472 /** 473 * ata_bmdma_post_internal_cmd - Stock post_internal_cmd for 474 * BMDMA controller 475 * @qc: internal command to clean up 476 * 477 * LOCKING: 478 * Kernel thread context (may sleep) 479 */ 480 void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc) 481 { 482 if (qc->ap->ioaddr.bmdma_addr) 483 ata_bmdma_stop(qc); 484 } 485 486 /** 487 * ata_sff_port_start - Set port up for dma. 488 * @ap: Port to initialize 489 * 490 * Called just after data structures for each port are 491 * initialized. Allocates space for PRD table if the device 492 * is DMA capable SFF. 493 * 494 * May be used as the port_start() entry in ata_port_operations. 495 * 496 * LOCKING: 497 * Inherited from caller. 498 */ 499 500 int ata_sff_port_start(struct ata_port *ap) 501 { 502 if (ap->ioaddr.bmdma_addr) 503 return ata_port_start(ap); 504 return 0; 505 } 506 507 #ifdef CONFIG_PCI 508 509 static int ata_resources_present(struct pci_dev *pdev, int port) 510 { 511 int i; 512 513 /* Check the PCI resources for this channel are enabled */ 514 port = port * 2; 515 for (i = 0; i < 2; i ++) { 516 if (pci_resource_start(pdev, port + i) == 0 || 517 pci_resource_len(pdev, port + i) == 0) 518 return 0; 519 } 520 return 1; 521 } 522 523 /** 524 * ata_pci_init_bmdma - acquire PCI BMDMA resources and init ATA host 525 * @host: target ATA host 526 * 527 * Acquire PCI BMDMA resources and initialize @host accordingly. 528 * 529 * LOCKING: 530 * Inherited from calling layer (may sleep). 531 * 532 * RETURNS: 533 * 0 on success, -errno otherwise. 534 */ 535 int ata_pci_init_bmdma(struct ata_host *host) 536 { 537 struct device *gdev = host->dev; 538 struct pci_dev *pdev = to_pci_dev(gdev); 539 int i, rc; 540 541 /* No BAR4 allocation: No DMA */ 542 if (pci_resource_start(pdev, 4) == 0) 543 return 0; 544 545 /* TODO: If we get no DMA mask we should fall back to PIO */ 546 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 547 if (rc) 548 return rc; 549 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 550 if (rc) 551 return rc; 552 553 /* request and iomap DMA region */ 554 rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev)); 555 if (rc) { 556 dev_printk(KERN_ERR, gdev, "failed to request/iomap BAR4\n"); 557 return -ENOMEM; 558 } 559 host->iomap = pcim_iomap_table(pdev); 560 561 for (i = 0; i < 2; i++) { 562 struct ata_port *ap = host->ports[i]; 563 void __iomem *bmdma = host->iomap[4] + 8 * i; 564 565 if (ata_port_is_dummy(ap)) 566 continue; 567 568 ap->ioaddr.bmdma_addr = bmdma; 569 if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) && 570 (ioread8(bmdma + 2) & 0x80)) 571 host->flags |= ATA_HOST_SIMPLEX; 572 573 ata_port_desc(ap, "bmdma 0x%llx", 574 (unsigned long long)pci_resource_start(pdev, 4) + 8 * i); 575 } 576 577 return 0; 578 } 579 580 /** 581 * ata_pci_init_sff_host - acquire native PCI ATA resources and init host 582 * @host: target ATA host 583 * 584 * Acquire native PCI ATA resources for @host and initialize the 585 * first two ports of @host accordingly. Ports marked dummy are 586 * skipped and allocation failure makes the port dummy. 587 * 588 * Note that native PCI resources are valid even for legacy hosts 589 * as we fix up pdev resources array early in boot, so this 590 * function can be used for both native and legacy SFF hosts. 591 * 592 * LOCKING: 593 * Inherited from calling layer (may sleep). 594 * 595 * RETURNS: 596 * 0 if at least one port is initialized, -ENODEV if no port is 597 * available. 598 */ 599 int ata_pci_init_sff_host(struct ata_host *host) 600 { 601 struct device *gdev = host->dev; 602 struct pci_dev *pdev = to_pci_dev(gdev); 603 unsigned int mask = 0; 604 int i, rc; 605 606 /* request, iomap BARs and init port addresses accordingly */ 607 for (i = 0; i < 2; i++) { 608 struct ata_port *ap = host->ports[i]; 609 int base = i * 2; 610 void __iomem * const *iomap; 611 612 if (ata_port_is_dummy(ap)) 613 continue; 614 615 /* Discard disabled ports. Some controllers show 616 * their unused channels this way. Disabled ports are 617 * made dummy. 618 */ 619 if (!ata_resources_present(pdev, i)) { 620 ap->ops = &ata_dummy_port_ops; 621 continue; 622 } 623 624 rc = pcim_iomap_regions(pdev, 0x3 << base, 625 dev_driver_string(gdev)); 626 if (rc) { 627 dev_printk(KERN_WARNING, gdev, 628 "failed to request/iomap BARs for port %d " 629 "(errno=%d)\n", i, rc); 630 if (rc == -EBUSY) 631 pcim_pin_device(pdev); 632 ap->ops = &ata_dummy_port_ops; 633 continue; 634 } 635 host->iomap = iomap = pcim_iomap_table(pdev); 636 637 ap->ioaddr.cmd_addr = iomap[base]; 638 ap->ioaddr.altstatus_addr = 639 ap->ioaddr.ctl_addr = (void __iomem *) 640 ((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS); 641 ata_std_ports(&ap->ioaddr); 642 643 ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx", 644 (unsigned long long)pci_resource_start(pdev, base), 645 (unsigned long long)pci_resource_start(pdev, base + 1)); 646 647 mask |= 1 << i; 648 } 649 650 if (!mask) { 651 dev_printk(KERN_ERR, gdev, "no available native port\n"); 652 return -ENODEV; 653 } 654 655 return 0; 656 } 657 658 /** 659 * ata_pci_prepare_sff_host - helper to prepare native PCI ATA host 660 * @pdev: target PCI device 661 * @ppi: array of port_info, must be enough for two ports 662 * @r_host: out argument for the initialized ATA host 663 * 664 * Helper to allocate ATA host for @pdev, acquire all native PCI 665 * resources and initialize it accordingly in one go. 666 * 667 * LOCKING: 668 * Inherited from calling layer (may sleep). 669 * 670 * RETURNS: 671 * 0 on success, -errno otherwise. 672 */ 673 int ata_pci_prepare_sff_host(struct pci_dev *pdev, 674 const struct ata_port_info * const * ppi, 675 struct ata_host **r_host) 676 { 677 struct ata_host *host; 678 int rc; 679 680 if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) 681 return -ENOMEM; 682 683 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2); 684 if (!host) { 685 dev_printk(KERN_ERR, &pdev->dev, 686 "failed to allocate ATA host\n"); 687 rc = -ENOMEM; 688 goto err_out; 689 } 690 691 rc = ata_pci_init_sff_host(host); 692 if (rc) 693 goto err_out; 694 695 /* init DMA related stuff */ 696 rc = ata_pci_init_bmdma(host); 697 if (rc) 698 goto err_bmdma; 699 700 devres_remove_group(&pdev->dev, NULL); 701 *r_host = host; 702 return 0; 703 704 err_bmdma: 705 /* This is necessary because PCI and iomap resources are 706 * merged and releasing the top group won't release the 707 * acquired resources if some of those have been acquired 708 * before entering this function. 709 */ 710 pcim_iounmap_regions(pdev, 0xf); 711 err_out: 712 devres_release_group(&pdev->dev, NULL); 713 return rc; 714 } 715 716 /** 717 * ata_pci_activate_sff_host - start SFF host, request IRQ and register it 718 * @host: target SFF ATA host 719 * @irq_handler: irq_handler used when requesting IRQ(s) 720 * @sht: scsi_host_template to use when registering the host 721 * 722 * This is the counterpart of ata_host_activate() for SFF ATA 723 * hosts. This separate helper is necessary because SFF hosts 724 * use two separate interrupts in legacy mode. 725 * 726 * LOCKING: 727 * Inherited from calling layer (may sleep). 728 * 729 * RETURNS: 730 * 0 on success, -errno otherwise. 731 */ 732 int ata_pci_activate_sff_host(struct ata_host *host, 733 irq_handler_t irq_handler, 734 struct scsi_host_template *sht) 735 { 736 struct device *dev = host->dev; 737 struct pci_dev *pdev = to_pci_dev(dev); 738 const char *drv_name = dev_driver_string(host->dev); 739 int legacy_mode = 0, rc; 740 741 rc = ata_host_start(host); 742 if (rc) 743 return rc; 744 745 if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) { 746 u8 tmp8, mask; 747 748 /* TODO: What if one channel is in native mode ... */ 749 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8); 750 mask = (1 << 2) | (1 << 0); 751 if ((tmp8 & mask) != mask) 752 legacy_mode = 1; 753 #if defined(CONFIG_NO_ATA_LEGACY) 754 /* Some platforms with PCI limits cannot address compat 755 port space. In that case we punt if their firmware has 756 left a device in compatibility mode */ 757 if (legacy_mode) { 758 printk(KERN_ERR "ata: Compatibility mode ATA is not supported on this platform, skipping.\n"); 759 return -EOPNOTSUPP; 760 } 761 #endif 762 } 763 764 if (!devres_open_group(dev, NULL, GFP_KERNEL)) 765 return -ENOMEM; 766 767 if (!legacy_mode && pdev->irq) { 768 rc = devm_request_irq(dev, pdev->irq, irq_handler, 769 IRQF_SHARED, drv_name, host); 770 if (rc) 771 goto out; 772 773 ata_port_desc(host->ports[0], "irq %d", pdev->irq); 774 ata_port_desc(host->ports[1], "irq %d", pdev->irq); 775 } else if (legacy_mode) { 776 if (!ata_port_is_dummy(host->ports[0])) { 777 rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev), 778 irq_handler, IRQF_SHARED, 779 drv_name, host); 780 if (rc) 781 goto out; 782 783 ata_port_desc(host->ports[0], "irq %d", 784 ATA_PRIMARY_IRQ(pdev)); 785 } 786 787 if (!ata_port_is_dummy(host->ports[1])) { 788 rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev), 789 irq_handler, IRQF_SHARED, 790 drv_name, host); 791 if (rc) 792 goto out; 793 794 ata_port_desc(host->ports[1], "irq %d", 795 ATA_SECONDARY_IRQ(pdev)); 796 } 797 } 798 799 rc = ata_host_register(host, sht); 800 out: 801 if (rc == 0) 802 devres_remove_group(dev, NULL); 803 else 804 devres_release_group(dev, NULL); 805 806 return rc; 807 } 808 809 /** 810 * ata_pci_init_one - Initialize/register PCI IDE host controller 811 * @pdev: Controller to be initialized 812 * @ppi: array of port_info, must be enough for two ports 813 * 814 * This is a helper function which can be called from a driver's 815 * xxx_init_one() probe function if the hardware uses traditional 816 * IDE taskfile registers. 817 * 818 * This function calls pci_enable_device(), reserves its register 819 * regions, sets the dma mask, enables bus master mode, and calls 820 * ata_device_add() 821 * 822 * ASSUMPTION: 823 * Nobody makes a single channel controller that appears solely as 824 * the secondary legacy port on PCI. 825 * 826 * LOCKING: 827 * Inherited from PCI layer (may sleep). 828 * 829 * RETURNS: 830 * Zero on success, negative on errno-based value on error. 831 */ 832 int ata_pci_init_one(struct pci_dev *pdev, 833 const struct ata_port_info * const * ppi) 834 { 835 struct device *dev = &pdev->dev; 836 const struct ata_port_info *pi = NULL; 837 struct ata_host *host = NULL; 838 int i, rc; 839 840 DPRINTK("ENTER\n"); 841 842 /* look up the first valid port_info */ 843 for (i = 0; i < 2 && ppi[i]; i++) { 844 if (ppi[i]->port_ops != &ata_dummy_port_ops) { 845 pi = ppi[i]; 846 break; 847 } 848 } 849 850 if (!pi) { 851 dev_printk(KERN_ERR, &pdev->dev, 852 "no valid port_info specified\n"); 853 return -EINVAL; 854 } 855 856 if (!devres_open_group(dev, NULL, GFP_KERNEL)) 857 return -ENOMEM; 858 859 rc = pcim_enable_device(pdev); 860 if (rc) 861 goto out; 862 863 /* prepare and activate SFF host */ 864 rc = ata_pci_prepare_sff_host(pdev, ppi, &host); 865 if (rc) 866 goto out; 867 868 pci_set_master(pdev); 869 rc = ata_pci_activate_sff_host(host, pi->port_ops->irq_handler, 870 pi->sht); 871 out: 872 if (rc == 0) 873 devres_remove_group(&pdev->dev, NULL); 874 else 875 devres_release_group(&pdev->dev, NULL); 876 877 return rc; 878 } 879 880 /** 881 * ata_pci_clear_simplex - attempt to kick device out of simplex 882 * @pdev: PCI device 883 * 884 * Some PCI ATA devices report simplex mode but in fact can be told to 885 * enter non simplex mode. This implements the necessary logic to 886 * perform the task on such devices. Calling it on other devices will 887 * have -undefined- behaviour. 888 */ 889 890 int ata_pci_clear_simplex(struct pci_dev *pdev) 891 { 892 unsigned long bmdma = pci_resource_start(pdev, 4); 893 u8 simplex; 894 895 if (bmdma == 0) 896 return -ENOENT; 897 898 simplex = inb(bmdma + 0x02); 899 outb(simplex & 0x60, bmdma + 0x02); 900 simplex = inb(bmdma + 0x02); 901 if (simplex & 0x80) 902 return -EOPNOTSUPP; 903 return 0; 904 } 905 906 unsigned long ata_pci_default_filter(struct ata_device *adev, unsigned long xfer_mask) 907 { 908 /* Filter out DMA modes if the device has been configured by 909 the BIOS as PIO only */ 910 911 if (adev->link->ap->ioaddr.bmdma_addr == NULL) 912 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 913 return xfer_mask; 914 } 915 916 #endif /* CONFIG_PCI */ 917 918