1 /* 2 * libata-sff.c - helper library for PCI IDE BMDMA 3 * 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2003-2006 Red Hat, Inc. All rights reserved. 9 * Copyright 2003-2006 Jeff Garzik 10 * 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; see the file COPYING. If not, write to 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 26 * 27 * libata documentation is available via 'make {ps|pdf}docs', 28 * as Documentation/DocBook/libata.* 29 * 30 * Hardware documentation available from http://www.t13.org/ and 31 * http://www.sata-io.org/ 32 * 33 */ 34 35 #include <linux/kernel.h> 36 #include <linux/pci.h> 37 #include <linux/libata.h> 38 39 #include "libata.h" 40 41 /** 42 * ata_irq_on - Enable interrupts on a port. 43 * @ap: Port on which interrupts are enabled. 44 * 45 * Enable interrupts on a legacy IDE device using MMIO or PIO, 46 * wait for idle, clear any pending interrupts. 47 * 48 * LOCKING: 49 * Inherited from caller. 50 */ 51 u8 ata_irq_on(struct ata_port *ap) 52 { 53 struct ata_ioports *ioaddr = &ap->ioaddr; 54 u8 tmp; 55 56 ap->ctl &= ~ATA_NIEN; 57 ap->last_ctl = ap->ctl; 58 59 iowrite8(ap->ctl, ioaddr->ctl_addr); 60 tmp = ata_wait_idle(ap); 61 62 ap->ops->irq_clear(ap); 63 64 return tmp; 65 } 66 67 u8 ata_dummy_irq_on (struct ata_port *ap) { return 0; } 68 69 /** 70 * ata_irq_ack - Acknowledge a device interrupt. 71 * @ap: Port on which interrupts are enabled. 72 * 73 * Wait up to 10 ms for legacy IDE device to become idle (BUSY 74 * or BUSY+DRQ clear). Obtain dma status and port status from 75 * device. Clear the interrupt. Return port status. 76 * 77 * LOCKING: 78 */ 79 80 u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq) 81 { 82 unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY; 83 u8 host_stat = 0, post_stat = 0, status; 84 85 status = ata_busy_wait(ap, bits, 1000); 86 if (status & bits) 87 if (ata_msg_err(ap)) 88 printk(KERN_ERR "abnormal status 0x%X\n", status); 89 90 if (ap->ioaddr.bmdma_addr) { 91 /* get controller status; clear intr, err bits */ 92 host_stat = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 93 iowrite8(host_stat | ATA_DMA_INTR | ATA_DMA_ERR, 94 ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 95 96 post_stat = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 97 } 98 if (ata_msg_intr(ap)) 99 printk(KERN_INFO "%s: irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n", 100 __FUNCTION__, 101 host_stat, post_stat, status); 102 return status; 103 } 104 105 u8 ata_dummy_irq_ack(struct ata_port *ap, unsigned int chk_drq) { return 0; } 106 107 /** 108 * ata_tf_load - send taskfile registers to host controller 109 * @ap: Port to which output is sent 110 * @tf: ATA taskfile register set 111 * 112 * Outputs ATA taskfile to standard ATA host controller. 113 * 114 * LOCKING: 115 * Inherited from caller. 116 */ 117 118 void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) 119 { 120 struct ata_ioports *ioaddr = &ap->ioaddr; 121 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; 122 123 if (tf->ctl != ap->last_ctl) { 124 iowrite8(tf->ctl, ioaddr->ctl_addr); 125 ap->last_ctl = tf->ctl; 126 ata_wait_idle(ap); 127 } 128 129 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { 130 iowrite8(tf->hob_feature, ioaddr->feature_addr); 131 iowrite8(tf->hob_nsect, ioaddr->nsect_addr); 132 iowrite8(tf->hob_lbal, ioaddr->lbal_addr); 133 iowrite8(tf->hob_lbam, ioaddr->lbam_addr); 134 iowrite8(tf->hob_lbah, ioaddr->lbah_addr); 135 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", 136 tf->hob_feature, 137 tf->hob_nsect, 138 tf->hob_lbal, 139 tf->hob_lbam, 140 tf->hob_lbah); 141 } 142 143 if (is_addr) { 144 iowrite8(tf->feature, ioaddr->feature_addr); 145 iowrite8(tf->nsect, ioaddr->nsect_addr); 146 iowrite8(tf->lbal, ioaddr->lbal_addr); 147 iowrite8(tf->lbam, ioaddr->lbam_addr); 148 iowrite8(tf->lbah, ioaddr->lbah_addr); 149 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", 150 tf->feature, 151 tf->nsect, 152 tf->lbal, 153 tf->lbam, 154 tf->lbah); 155 } 156 157 if (tf->flags & ATA_TFLAG_DEVICE) { 158 iowrite8(tf->device, ioaddr->device_addr); 159 VPRINTK("device 0x%X\n", tf->device); 160 } 161 162 ata_wait_idle(ap); 163 } 164 165 /** 166 * ata_exec_command - issue ATA command to host controller 167 * @ap: port to which command is being issued 168 * @tf: ATA taskfile register set 169 * 170 * Issues ATA command, with proper synchronization with interrupt 171 * handler / other threads. 172 * 173 * LOCKING: 174 * spin_lock_irqsave(host lock) 175 */ 176 void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf) 177 { 178 DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command); 179 180 iowrite8(tf->command, ap->ioaddr.command_addr); 181 ata_pause(ap); 182 } 183 184 /** 185 * ata_tf_read - input device's ATA taskfile shadow registers 186 * @ap: Port from which input is read 187 * @tf: ATA taskfile register set for storing input 188 * 189 * Reads ATA taskfile registers for currently-selected device 190 * into @tf. 191 * 192 * LOCKING: 193 * Inherited from caller. 194 */ 195 void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) 196 { 197 struct ata_ioports *ioaddr = &ap->ioaddr; 198 199 tf->command = ata_check_status(ap); 200 tf->feature = ioread8(ioaddr->error_addr); 201 tf->nsect = ioread8(ioaddr->nsect_addr); 202 tf->lbal = ioread8(ioaddr->lbal_addr); 203 tf->lbam = ioread8(ioaddr->lbam_addr); 204 tf->lbah = ioread8(ioaddr->lbah_addr); 205 tf->device = ioread8(ioaddr->device_addr); 206 207 if (tf->flags & ATA_TFLAG_LBA48) { 208 iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr); 209 tf->hob_feature = ioread8(ioaddr->error_addr); 210 tf->hob_nsect = ioread8(ioaddr->nsect_addr); 211 tf->hob_lbal = ioread8(ioaddr->lbal_addr); 212 tf->hob_lbam = ioread8(ioaddr->lbam_addr); 213 tf->hob_lbah = ioread8(ioaddr->lbah_addr); 214 iowrite8(tf->ctl, ioaddr->ctl_addr); 215 ap->last_ctl = tf->ctl; 216 } 217 } 218 219 /** 220 * ata_check_status - Read device status reg & clear interrupt 221 * @ap: port where the device is 222 * 223 * Reads ATA taskfile status register for currently-selected device 224 * and return its value. This also clears pending interrupts 225 * from this device 226 * 227 * LOCKING: 228 * Inherited from caller. 229 */ 230 u8 ata_check_status(struct ata_port *ap) 231 { 232 return ioread8(ap->ioaddr.status_addr); 233 } 234 235 /** 236 * ata_altstatus - Read device alternate status reg 237 * @ap: port where the device is 238 * 239 * Reads ATA taskfile alternate status register for 240 * currently-selected device and return its value. 241 * 242 * Note: may NOT be used as the check_altstatus() entry in 243 * ata_port_operations. 244 * 245 * LOCKING: 246 * Inherited from caller. 247 */ 248 u8 ata_altstatus(struct ata_port *ap) 249 { 250 if (ap->ops->check_altstatus) 251 return ap->ops->check_altstatus(ap); 252 253 return ioread8(ap->ioaddr.altstatus_addr); 254 } 255 256 /** 257 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction 258 * @qc: Info associated with this ATA transaction. 259 * 260 * LOCKING: 261 * spin_lock_irqsave(host lock) 262 */ 263 void ata_bmdma_setup(struct ata_queued_cmd *qc) 264 { 265 struct ata_port *ap = qc->ap; 266 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); 267 u8 dmactl; 268 269 /* load PRD table addr. */ 270 mb(); /* make sure PRD table writes are visible to controller */ 271 iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); 272 273 /* specify data direction, triple-check start bit is clear */ 274 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 275 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); 276 if (!rw) 277 dmactl |= ATA_DMA_WR; 278 iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 279 280 /* issue r/w command */ 281 ap->ops->exec_command(ap, &qc->tf); 282 } 283 284 /** 285 * ata_bmdma_start - Start a PCI IDE BMDMA transaction 286 * @qc: Info associated with this ATA transaction. 287 * 288 * LOCKING: 289 * spin_lock_irqsave(host lock) 290 */ 291 void ata_bmdma_start (struct ata_queued_cmd *qc) 292 { 293 struct ata_port *ap = qc->ap; 294 u8 dmactl; 295 296 /* start host DMA transaction */ 297 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 298 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 299 300 /* Strictly, one may wish to issue a readb() here, to 301 * flush the mmio write. However, control also passes 302 * to the hardware at this point, and it will interrupt 303 * us when we are to resume control. So, in effect, 304 * we don't care when the mmio write flushes. 305 * Further, a read of the DMA status register _immediately_ 306 * following the write may not be what certain flaky hardware 307 * is expected, so I think it is best to not add a readb() 308 * without first all the MMIO ATA cards/mobos. 309 * Or maybe I'm just being paranoid. 310 */ 311 } 312 313 /** 314 * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt. 315 * @ap: Port associated with this ATA transaction. 316 * 317 * Clear interrupt and error flags in DMA status register. 318 * 319 * May be used as the irq_clear() entry in ata_port_operations. 320 * 321 * LOCKING: 322 * spin_lock_irqsave(host lock) 323 */ 324 void ata_bmdma_irq_clear(struct ata_port *ap) 325 { 326 void __iomem *mmio = ap->ioaddr.bmdma_addr; 327 328 if (!mmio) 329 return; 330 331 iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS); 332 } 333 334 /** 335 * ata_bmdma_status - Read PCI IDE BMDMA status 336 * @ap: Port associated with this ATA transaction. 337 * 338 * Read and return BMDMA status register. 339 * 340 * May be used as the bmdma_status() entry in ata_port_operations. 341 * 342 * LOCKING: 343 * spin_lock_irqsave(host lock) 344 */ 345 u8 ata_bmdma_status(struct ata_port *ap) 346 { 347 return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 348 } 349 350 /** 351 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer 352 * @qc: Command we are ending DMA for 353 * 354 * Clears the ATA_DMA_START flag in the dma control register 355 * 356 * May be used as the bmdma_stop() entry in ata_port_operations. 357 * 358 * LOCKING: 359 * spin_lock_irqsave(host lock) 360 */ 361 void ata_bmdma_stop(struct ata_queued_cmd *qc) 362 { 363 struct ata_port *ap = qc->ap; 364 void __iomem *mmio = ap->ioaddr.bmdma_addr; 365 366 /* clear start/stop bit */ 367 iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START, 368 mmio + ATA_DMA_CMD); 369 370 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ 371 ata_altstatus(ap); /* dummy read */ 372 } 373 374 /** 375 * ata_bmdma_freeze - Freeze BMDMA controller port 376 * @ap: port to freeze 377 * 378 * Freeze BMDMA controller port. 379 * 380 * LOCKING: 381 * Inherited from caller. 382 */ 383 void ata_bmdma_freeze(struct ata_port *ap) 384 { 385 struct ata_ioports *ioaddr = &ap->ioaddr; 386 387 ap->ctl |= ATA_NIEN; 388 ap->last_ctl = ap->ctl; 389 390 iowrite8(ap->ctl, ioaddr->ctl_addr); 391 392 /* Under certain circumstances, some controllers raise IRQ on 393 * ATA_NIEN manipulation. Also, many controllers fail to mask 394 * previously pending IRQ on ATA_NIEN assertion. Clear it. 395 */ 396 ata_chk_status(ap); 397 398 ap->ops->irq_clear(ap); 399 } 400 401 /** 402 * ata_bmdma_thaw - Thaw BMDMA controller port 403 * @ap: port to thaw 404 * 405 * Thaw BMDMA controller port. 406 * 407 * LOCKING: 408 * Inherited from caller. 409 */ 410 void ata_bmdma_thaw(struct ata_port *ap) 411 { 412 /* clear & re-enable interrupts */ 413 ata_chk_status(ap); 414 ap->ops->irq_clear(ap); 415 ap->ops->irq_on(ap); 416 } 417 418 /** 419 * ata_bmdma_drive_eh - Perform EH with given methods for BMDMA controller 420 * @ap: port to handle error for 421 * @prereset: prereset method (can be NULL) 422 * @softreset: softreset method (can be NULL) 423 * @hardreset: hardreset method (can be NULL) 424 * @postreset: postreset method (can be NULL) 425 * 426 * Handle error for ATA BMDMA controller. It can handle both 427 * PATA and SATA controllers. Many controllers should be able to 428 * use this EH as-is or with some added handling before and 429 * after. 430 * 431 * This function is intended to be used for constructing 432 * ->error_handler callback by low level drivers. 433 * 434 * LOCKING: 435 * Kernel thread context (may sleep) 436 */ 437 void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset, 438 ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 439 ata_postreset_fn_t postreset) 440 { 441 struct ata_queued_cmd *qc; 442 unsigned long flags; 443 int thaw = 0; 444 445 qc = __ata_qc_from_tag(ap, ap->active_tag); 446 if (qc && !(qc->flags & ATA_QCFLAG_FAILED)) 447 qc = NULL; 448 449 /* reset PIO HSM and stop DMA engine */ 450 spin_lock_irqsave(ap->lock, flags); 451 452 ap->hsm_task_state = HSM_ST_IDLE; 453 454 if (qc && (qc->tf.protocol == ATA_PROT_DMA || 455 qc->tf.protocol == ATA_PROT_ATAPI_DMA)) { 456 u8 host_stat; 457 458 host_stat = ap->ops->bmdma_status(ap); 459 460 /* BMDMA controllers indicate host bus error by 461 * setting DMA_ERR bit and timing out. As it wasn't 462 * really a timeout event, adjust error mask and 463 * cancel frozen state. 464 */ 465 if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) { 466 qc->err_mask = AC_ERR_HOST_BUS; 467 thaw = 1; 468 } 469 470 ap->ops->bmdma_stop(qc); 471 } 472 473 ata_altstatus(ap); 474 ata_chk_status(ap); 475 ap->ops->irq_clear(ap); 476 477 spin_unlock_irqrestore(ap->lock, flags); 478 479 if (thaw) 480 ata_eh_thaw_port(ap); 481 482 /* PIO and DMA engines have been stopped, perform recovery */ 483 ata_do_eh(ap, prereset, softreset, hardreset, postreset); 484 } 485 486 /** 487 * ata_bmdma_error_handler - Stock error handler for BMDMA controller 488 * @ap: port to handle error for 489 * 490 * Stock error handler for BMDMA controller. 491 * 492 * LOCKING: 493 * Kernel thread context (may sleep) 494 */ 495 void ata_bmdma_error_handler(struct ata_port *ap) 496 { 497 ata_reset_fn_t hardreset; 498 499 hardreset = NULL; 500 if (sata_scr_valid(ap)) 501 hardreset = sata_std_hardreset; 502 503 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, hardreset, 504 ata_std_postreset); 505 } 506 507 /** 508 * ata_bmdma_post_internal_cmd - Stock post_internal_cmd for 509 * BMDMA controller 510 * @qc: internal command to clean up 511 * 512 * LOCKING: 513 * Kernel thread context (may sleep) 514 */ 515 void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc) 516 { 517 if (qc->ap->ioaddr.bmdma_addr) 518 ata_bmdma_stop(qc); 519 } 520 521 /** 522 * ata_sff_port_start - Set port up for dma. 523 * @ap: Port to initialize 524 * 525 * Called just after data structures for each port are 526 * initialized. Allocates space for PRD table if the device 527 * is DMA capable SFF. 528 * 529 * May be used as the port_start() entry in ata_port_operations. 530 * 531 * LOCKING: 532 * Inherited from caller. 533 */ 534 535 int ata_sff_port_start(struct ata_port *ap) 536 { 537 if (ap->ioaddr.bmdma_addr) 538 return ata_port_start(ap); 539 return 0; 540 } 541 542 #ifdef CONFIG_PCI 543 544 static int ata_resources_present(struct pci_dev *pdev, int port) 545 { 546 int i; 547 548 /* Check the PCI resources for this channel are enabled */ 549 port = port * 2; 550 for (i = 0; i < 2; i ++) { 551 if (pci_resource_start(pdev, port + i) == 0 || 552 pci_resource_len(pdev, port + i) == 0) 553 return 0; 554 } 555 return 1; 556 } 557 558 /** 559 * ata_pci_init_bmdma - acquire PCI BMDMA resources and init ATA host 560 * @host: target ATA host 561 * 562 * Acquire PCI BMDMA resources and initialize @host accordingly. 563 * 564 * LOCKING: 565 * Inherited from calling layer (may sleep). 566 * 567 * RETURNS: 568 * 0 on success, -errno otherwise. 569 */ 570 int ata_pci_init_bmdma(struct ata_host *host) 571 { 572 struct device *gdev = host->dev; 573 struct pci_dev *pdev = to_pci_dev(gdev); 574 int i, rc; 575 576 /* TODO: If we get no DMA mask we should fall back to PIO */ 577 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 578 if (rc) 579 return rc; 580 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 581 if (rc) 582 return rc; 583 584 /* request and iomap DMA region */ 585 rc = pcim_iomap_regions(pdev, 1 << 4, DRV_NAME); 586 if (rc) { 587 dev_printk(KERN_ERR, gdev, "failed to request/iomap BAR4\n"); 588 return -ENOMEM; 589 } 590 host->iomap = pcim_iomap_table(pdev); 591 592 for (i = 0; i < 2; i++) { 593 struct ata_port *ap = host->ports[i]; 594 void __iomem *bmdma = host->iomap[4] + 8 * i; 595 596 if (ata_port_is_dummy(ap)) 597 continue; 598 599 ap->ioaddr.bmdma_addr = bmdma; 600 if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) && 601 (ioread8(bmdma + 2) & 0x80)) 602 host->flags |= ATA_HOST_SIMPLEX; 603 } 604 605 return 0; 606 } 607 608 /** 609 * ata_pci_init_sff_host - acquire native PCI ATA resources and init host 610 * @host: target ATA host 611 * 612 * Acquire native PCI ATA resources for @host and initialize the 613 * first two ports of @host accordingly. Ports marked dummy are 614 * skipped and allocation failure makes the port dummy. 615 * 616 * Note that native PCI resources are valid even for legacy hosts 617 * as we fix up pdev resources array early in boot, so this 618 * function can be used for both native and legacy SFF hosts. 619 * 620 * LOCKING: 621 * Inherited from calling layer (may sleep). 622 * 623 * RETURNS: 624 * 0 if at least one port is initialized, -ENODEV if no port is 625 * available. 626 */ 627 int ata_pci_init_sff_host(struct ata_host *host) 628 { 629 struct device *gdev = host->dev; 630 struct pci_dev *pdev = to_pci_dev(gdev); 631 unsigned int mask = 0; 632 int i, rc; 633 634 /* request, iomap BARs and init port addresses accordingly */ 635 for (i = 0; i < 2; i++) { 636 struct ata_port *ap = host->ports[i]; 637 int base = i * 2; 638 void __iomem * const *iomap; 639 640 if (ata_port_is_dummy(ap)) 641 continue; 642 643 /* Discard disabled ports. Some controllers show 644 * their unused channels this way. Disabled ports are 645 * made dummy. 646 */ 647 if (!ata_resources_present(pdev, i)) { 648 ap->ops = &ata_dummy_port_ops; 649 continue; 650 } 651 652 rc = pcim_iomap_regions(pdev, 0x3 << base, DRV_NAME); 653 if (rc) { 654 dev_printk(KERN_WARNING, gdev, 655 "failed to request/iomap BARs for port %d " 656 "(errno=%d)\n", i, rc); 657 if (rc == -EBUSY) 658 pcim_pin_device(pdev); 659 ap->ops = &ata_dummy_port_ops; 660 continue; 661 } 662 host->iomap = iomap = pcim_iomap_table(pdev); 663 664 ap->ioaddr.cmd_addr = iomap[base]; 665 ap->ioaddr.altstatus_addr = 666 ap->ioaddr.ctl_addr = (void __iomem *) 667 ((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS); 668 ata_std_ports(&ap->ioaddr); 669 670 mask |= 1 << i; 671 } 672 673 if (!mask) { 674 dev_printk(KERN_ERR, gdev, "no available native port\n"); 675 return -ENODEV; 676 } 677 678 return 0; 679 } 680 681 /** 682 * ata_pci_prepare_sff_host - helper to prepare native PCI ATA host 683 * @pdev: target PCI device 684 * @ppi: array of port_info, must be enough for two ports 685 * @r_host: out argument for the initialized ATA host 686 * 687 * Helper to allocate ATA host for @pdev, acquire all native PCI 688 * resources and initialize it accordingly in one go. 689 * 690 * LOCKING: 691 * Inherited from calling layer (may sleep). 692 * 693 * RETURNS: 694 * 0 on success, -errno otherwise. 695 */ 696 int ata_pci_prepare_sff_host(struct pci_dev *pdev, 697 const struct ata_port_info * const * ppi, 698 struct ata_host **r_host) 699 { 700 struct ata_host *host; 701 int rc; 702 703 if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) 704 return -ENOMEM; 705 706 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2); 707 if (!host) { 708 dev_printk(KERN_ERR, &pdev->dev, 709 "failed to allocate ATA host\n"); 710 rc = -ENOMEM; 711 goto err_out; 712 } 713 714 rc = ata_pci_init_sff_host(host); 715 if (rc) 716 goto err_out; 717 718 /* init DMA related stuff */ 719 rc = ata_pci_init_bmdma(host); 720 if (rc) 721 goto err_bmdma; 722 723 devres_remove_group(&pdev->dev, NULL); 724 *r_host = host; 725 return 0; 726 727 err_bmdma: 728 /* This is necessary because PCI and iomap resources are 729 * merged and releasing the top group won't release the 730 * acquired resources if some of those have been acquired 731 * before entering this function. 732 */ 733 pcim_iounmap_regions(pdev, 0xf); 734 err_out: 735 devres_release_group(&pdev->dev, NULL); 736 return rc; 737 } 738 739 /** 740 * ata_pci_init_one - Initialize/register PCI IDE host controller 741 * @pdev: Controller to be initialized 742 * @ppi: array of port_info, must be enough for two ports 743 * 744 * This is a helper function which can be called from a driver's 745 * xxx_init_one() probe function if the hardware uses traditional 746 * IDE taskfile registers. 747 * 748 * This function calls pci_enable_device(), reserves its register 749 * regions, sets the dma mask, enables bus master mode, and calls 750 * ata_device_add() 751 * 752 * ASSUMPTION: 753 * Nobody makes a single channel controller that appears solely as 754 * the secondary legacy port on PCI. 755 * 756 * LOCKING: 757 * Inherited from PCI layer (may sleep). 758 * 759 * RETURNS: 760 * Zero on success, negative on errno-based value on error. 761 */ 762 int ata_pci_init_one(struct pci_dev *pdev, 763 const struct ata_port_info * const * ppi) 764 { 765 struct device *dev = &pdev->dev; 766 const struct ata_port_info *pi = NULL; 767 struct ata_host *host = NULL; 768 u8 mask; 769 int legacy_mode = 0; 770 int i, rc; 771 772 DPRINTK("ENTER\n"); 773 774 /* look up the first valid port_info */ 775 for (i = 0; i < 2 && ppi[i]; i++) { 776 if (ppi[i]->port_ops != &ata_dummy_port_ops) { 777 pi = ppi[i]; 778 break; 779 } 780 } 781 782 if (!pi) { 783 dev_printk(KERN_ERR, &pdev->dev, 784 "no valid port_info specified\n"); 785 return -EINVAL; 786 } 787 788 if (!devres_open_group(dev, NULL, GFP_KERNEL)) 789 return -ENOMEM; 790 791 /* FIXME: Really for ATA it isn't safe because the device may be 792 multi-purpose and we want to leave it alone if it was already 793 enabled. Secondly for shared use as Arjan says we want refcounting 794 795 Checking dev->is_enabled is insufficient as this is not set at 796 boot for the primary video which is BIOS enabled 797 */ 798 799 rc = pcim_enable_device(pdev); 800 if (rc) 801 goto err_out; 802 803 if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) { 804 u8 tmp8; 805 806 /* TODO: What if one channel is in native mode ... */ 807 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8); 808 mask = (1 << 2) | (1 << 0); 809 if ((tmp8 & mask) != mask) 810 legacy_mode = 1; 811 #if defined(CONFIG_NO_ATA_LEGACY) 812 /* Some platforms with PCI limits cannot address compat 813 port space. In that case we punt if their firmware has 814 left a device in compatibility mode */ 815 if (legacy_mode) { 816 printk(KERN_ERR "ata: Compatibility mode ATA is not supported on this platform, skipping.\n"); 817 rc = -EOPNOTSUPP; 818 goto err_out; 819 } 820 #endif 821 } 822 823 /* prepare host */ 824 rc = ata_pci_prepare_sff_host(pdev, ppi, &host); 825 if (rc) 826 goto err_out; 827 828 pci_set_master(pdev); 829 830 /* start host and request IRQ */ 831 rc = ata_host_start(host); 832 if (rc) 833 goto err_out; 834 835 if (!legacy_mode) { 836 rc = devm_request_irq(dev, pdev->irq, pi->port_ops->irq_handler, 837 IRQF_SHARED, DRV_NAME, host); 838 if (rc) 839 goto err_out; 840 host->irq = pdev->irq; 841 } else { 842 if (!ata_port_is_dummy(host->ports[0])) { 843 host->irq = ATA_PRIMARY_IRQ(pdev); 844 rc = devm_request_irq(dev, host->irq, 845 pi->port_ops->irq_handler, 846 IRQF_SHARED, DRV_NAME, host); 847 if (rc) 848 goto err_out; 849 } 850 851 if (!ata_port_is_dummy(host->ports[1])) { 852 host->irq2 = ATA_SECONDARY_IRQ(pdev); 853 rc = devm_request_irq(dev, host->irq2, 854 pi->port_ops->irq_handler, 855 IRQF_SHARED, DRV_NAME, host); 856 if (rc) 857 goto err_out; 858 } 859 } 860 861 /* register */ 862 rc = ata_host_register(host, pi->sht); 863 if (rc) 864 goto err_out; 865 866 devres_remove_group(dev, NULL); 867 return 0; 868 869 err_out: 870 devres_release_group(dev, NULL); 871 return rc; 872 } 873 874 /** 875 * ata_pci_clear_simplex - attempt to kick device out of simplex 876 * @pdev: PCI device 877 * 878 * Some PCI ATA devices report simplex mode but in fact can be told to 879 * enter non simplex mode. This implements the neccessary logic to 880 * perform the task on such devices. Calling it on other devices will 881 * have -undefined- behaviour. 882 */ 883 884 int ata_pci_clear_simplex(struct pci_dev *pdev) 885 { 886 unsigned long bmdma = pci_resource_start(pdev, 4); 887 u8 simplex; 888 889 if (bmdma == 0) 890 return -ENOENT; 891 892 simplex = inb(bmdma + 0x02); 893 outb(simplex & 0x60, bmdma + 0x02); 894 simplex = inb(bmdma + 0x02); 895 if (simplex & 0x80) 896 return -EOPNOTSUPP; 897 return 0; 898 } 899 900 unsigned long ata_pci_default_filter(struct ata_device *adev, unsigned long xfer_mask) 901 { 902 /* Filter out DMA modes if the device has been configured by 903 the BIOS as PIO only */ 904 905 if (adev->ap->ioaddr.bmdma_addr == 0) 906 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 907 return xfer_mask; 908 } 909 910 #endif /* CONFIG_PCI */ 911 912