sata_inic162x.c (b3f677e501a494aa1582d4ff35fb3ac6f0a59b08) | sata_inic162x.c (f8b0685a8ea8e3974f8953378ede2111f8d49d22) |
---|---|
1/* 2 * sata_inic162x.c - Driver for Initio 162x SATA controllers 3 * 4 * Copyright 2006 SUSE Linux Products GmbH 5 * Copyright 2006 Tejun Heo <teheo@novell.com> 6 * 7 * This file is released under GPL v2. 8 * --- 87 unchanged lines hidden (view full) --- 96 PIRQ_ONLINE = (1 << 1), /* device plugged */ 97 PIRQ_COMPLETE = (1 << 2), /* completion interrupt */ 98 PIRQ_FATAL = (1 << 3), /* fatal error */ 99 PIRQ_ATA = (1 << 4), /* ATA interrupt */ 100 PIRQ_REPLY = (1 << 5), /* reply FIFO not empty */ 101 PIRQ_PENDING = (1 << 7), /* port IRQ pending (STAT only) */ 102 103 PIRQ_ERR = PIRQ_OFFLINE | PIRQ_ONLINE | PIRQ_FATAL, | 1/* 2 * sata_inic162x.c - Driver for Initio 162x SATA controllers 3 * 4 * Copyright 2006 SUSE Linux Products GmbH 5 * Copyright 2006 Tejun Heo <teheo@novell.com> 6 * 7 * This file is released under GPL v2. 8 * --- 87 unchanged lines hidden (view full) --- 96 PIRQ_ONLINE = (1 << 1), /* device plugged */ 97 PIRQ_COMPLETE = (1 << 2), /* completion interrupt */ 98 PIRQ_FATAL = (1 << 3), /* fatal error */ 99 PIRQ_ATA = (1 << 4), /* ATA interrupt */ 100 PIRQ_REPLY = (1 << 5), /* reply FIFO not empty */ 101 PIRQ_PENDING = (1 << 7), /* port IRQ pending (STAT only) */ 102 103 PIRQ_ERR = PIRQ_OFFLINE | PIRQ_ONLINE | PIRQ_FATAL, |
104 PIRQ_MASK_DEFAULT = PIRQ_REPLY, | 104 PIRQ_MASK_DEFAULT = PIRQ_REPLY | PIRQ_ATA, |
105 PIRQ_MASK_FREEZE = 0xff, 106 107 /* PORT_PRD_CTL bits */ 108 PRD_CTL_START = (1 << 0), 109 PRD_CTL_WR = (1 << 3), 110 PRD_CTL_DMAEN = (1 << 7), /* DMA enable */ 111 112 /* PORT_IDMA_CTL bits */ --- 109 unchanged lines hidden (view full) --- 222static void __iomem *inic_port_base(struct ata_port *ap) 223{ 224 return ap->host->iomap[MMIO_BAR] + ap->port_no * PORT_SIZE; 225} 226 227static void inic_reset_port(void __iomem *port_base) 228{ 229 void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; | 105 PIRQ_MASK_FREEZE = 0xff, 106 107 /* PORT_PRD_CTL bits */ 108 PRD_CTL_START = (1 << 0), 109 PRD_CTL_WR = (1 << 3), 110 PRD_CTL_DMAEN = (1 << 7), /* DMA enable */ 111 112 /* PORT_IDMA_CTL bits */ --- 109 unchanged lines hidden (view full) --- 222static void __iomem *inic_port_base(struct ata_port *ap) 223{ 224 return ap->host->iomap[MMIO_BAR] + ap->port_no * PORT_SIZE; 225} 226 227static void inic_reset_port(void __iomem *port_base) 228{ 229 void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; |
230 u16 ctl; | |
231 | 230 |
232 ctl = readw(idma_ctl); 233 ctl &= ~(IDMA_CTL_RST_IDMA | IDMA_CTL_ATA_NIEN | IDMA_CTL_GO); | 231 /* stop IDMA engine */ 232 readw(idma_ctl); /* flush */ 233 msleep(1); |
234 235 /* mask IRQ and assert reset */ | 234 235 /* mask IRQ and assert reset */ |
236 writew(ctl | IDMA_CTL_RST_IDMA | IDMA_CTL_ATA_NIEN, idma_ctl); | 236 writew(IDMA_CTL_RST_IDMA, idma_ctl); |
237 readw(idma_ctl); /* flush */ | 237 readw(idma_ctl); /* flush */ |
238 239 /* give it some time */ | |
240 msleep(1); 241 242 /* release reset */ | 238 msleep(1); 239 240 /* release reset */ |
243 writew(ctl | IDMA_CTL_ATA_NIEN, idma_ctl); | 241 writew(0, idma_ctl); |
244 245 /* clear irq */ 246 writeb(0xff, port_base + PORT_IRQ_STAT); | 242 243 /* clear irq */ 244 writeb(0xff, port_base + PORT_IRQ_STAT); |
247 248 /* reenable ATA IRQ, turn off IDMA mode */ 249 writew(ctl, idma_ctl); | |
250} 251 252static int inic_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val) 253{ | 245} 246 247static int inic_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val) 248{ |
254 void __iomem *scr_addr = ap->ioaddr.scr_addr; | 249 void __iomem *scr_addr = inic_port_base(ap) + PORT_SCR; |
255 void __iomem *addr; 256 257 if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) 258 return -EINVAL; 259 260 addr = scr_addr + scr_map[sc_reg] * 4; 261 *val = readl(scr_addr + scr_map[sc_reg] * 4); 262 263 /* this controller has stuck DIAG.N, ignore it */ 264 if (sc_reg == SCR_ERROR) 265 *val &= ~SERR_PHYRDY_CHG; 266 return 0; 267} 268 269static int inic_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val) 270{ | 250 void __iomem *addr; 251 252 if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) 253 return -EINVAL; 254 255 addr = scr_addr + scr_map[sc_reg] * 4; 256 *val = readl(scr_addr + scr_map[sc_reg] * 4); 257 258 /* this controller has stuck DIAG.N, ignore it */ 259 if (sc_reg == SCR_ERROR) 260 *val &= ~SERR_PHYRDY_CHG; 261 return 0; 262} 263 264static int inic_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val) 265{ |
271 void __iomem *scr_addr = ap->ioaddr.scr_addr; | 266 void __iomem *scr_addr = inic_port_base(ap) + PORT_SCR; |
272 273 if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) 274 return -EINVAL; 275 276 writel(val, scr_addr + scr_map[sc_reg] * 4); 277 return 0; 278} 279 --- 72 unchanged lines hidden (view full) --- 352 /* read and clear IRQ status */ 353 irq_stat = readb(port_base + PORT_IRQ_STAT); 354 writeb(irq_stat, port_base + PORT_IRQ_STAT); 355 idma_stat = readw(port_base + PORT_IDMA_STAT); 356 357 if (unlikely((irq_stat & PIRQ_ERR) || (idma_stat & IDMA_STAT_ERR))) 358 inic_host_err_intr(ap, irq_stat, idma_stat); 359 | 267 268 if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) 269 return -EINVAL; 270 271 writel(val, scr_addr + scr_map[sc_reg] * 4); 272 return 0; 273} 274 --- 72 unchanged lines hidden (view full) --- 347 /* read and clear IRQ status */ 348 irq_stat = readb(port_base + PORT_IRQ_STAT); 349 writeb(irq_stat, port_base + PORT_IRQ_STAT); 350 idma_stat = readw(port_base + PORT_IDMA_STAT); 351 352 if (unlikely((irq_stat & PIRQ_ERR) || (idma_stat & IDMA_STAT_ERR))) 353 inic_host_err_intr(ap, irq_stat, idma_stat); 354 |
360 if (unlikely(!qc)) { 361 ap->ops->sff_check_status(ap); /* clear ATA interrupt */ | 355 if (unlikely(!qc)) |
362 goto spurious; | 356 goto spurious; |
363 } | |
364 365 if (likely(idma_stat & IDMA_STAT_DONE)) { 366 inic_stop_idma(ap); 367 368 /* Depending on circumstances, device error 369 * isn't reported by IDMA, check it explicitly. 370 */ 371 if (unlikely(readb(port_base + PORT_TF_COMMAND) & 372 (ATA_DF | ATA_ERR))) 373 qc->err_mask |= AC_ERR_DEV; 374 375 ata_qc_complete(qc); 376 return; 377 } 378 379 spurious: | 357 358 if (likely(idma_stat & IDMA_STAT_DONE)) { 359 inic_stop_idma(ap); 360 361 /* Depending on circumstances, device error 362 * isn't reported by IDMA, check it explicitly. 363 */ 364 if (unlikely(readb(port_base + PORT_TF_COMMAND) & 365 (ATA_DF | ATA_ERR))) 366 qc->err_mask |= AC_ERR_DEV; 367 368 ata_qc_complete(qc); 369 return; 370 } 371 372 spurious: |
380 ap->ops->sff_check_status(ap); /* clear ATA interrupt */ | 373 ata_port_printk(ap, KERN_WARNING, "unhandled interrupt: " 374 "cmd=0x%x irq_stat=0x%x idma_stat=0x%x\n", 375 qc ? qc->tf.command : 0xff, irq_stat, idma_stat); |
381} 382 383static irqreturn_t inic_interrupt(int irq, void *dev_instance) 384{ 385 struct ata_host *host = dev_instance; 386 void __iomem *mmio_base = host->iomap[MMIO_BAR]; 387 u16 host_irq_stat; 388 int i, handled = 0;; --- 174 unchanged lines hidden (view full) --- 563 return true; 564} 565 566static void inic_freeze(struct ata_port *ap) 567{ 568 void __iomem *port_base = inic_port_base(ap); 569 570 writeb(PIRQ_MASK_FREEZE, port_base + PORT_IRQ_MASK); | 376} 377 378static irqreturn_t inic_interrupt(int irq, void *dev_instance) 379{ 380 struct ata_host *host = dev_instance; 381 void __iomem *mmio_base = host->iomap[MMIO_BAR]; 382 u16 host_irq_stat; 383 int i, handled = 0;; --- 174 unchanged lines hidden (view full) --- 558 return true; 559} 560 561static void inic_freeze(struct ata_port *ap) 562{ 563 void __iomem *port_base = inic_port_base(ap); 564 565 writeb(PIRQ_MASK_FREEZE, port_base + PORT_IRQ_MASK); |
571 ap->ops->sff_check_status(ap); | |
572 writeb(0xff, port_base + PORT_IRQ_STAT); 573} 574 575static void inic_thaw(struct ata_port *ap) 576{ 577 void __iomem *port_base = inic_port_base(ap); 578 | 566 writeb(0xff, port_base + PORT_IRQ_STAT); 567} 568 569static void inic_thaw(struct ata_port *ap) 570{ 571 void __iomem *port_base = inic_port_base(ap); 572 |
579 ap->ops->sff_check_status(ap); | |
580 writeb(0xff, port_base + PORT_IRQ_STAT); 581 writeb(PIRQ_MASK_DEFAULT, port_base + PORT_IRQ_MASK); 582} 583 584static int inic_check_ready(struct ata_link *link) 585{ 586 void __iomem *port_base = inic_port_base(link->ap); 587 --- 6 unchanged lines hidden (view full) --- 594 */ 595static int inic_hardreset(struct ata_link *link, unsigned int *class, 596 unsigned long deadline) 597{ 598 struct ata_port *ap = link->ap; 599 void __iomem *port_base = inic_port_base(ap); 600 void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; 601 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); | 573 writeb(0xff, port_base + PORT_IRQ_STAT); 574 writeb(PIRQ_MASK_DEFAULT, port_base + PORT_IRQ_MASK); 575} 576 577static int inic_check_ready(struct ata_link *link) 578{ 579 void __iomem *port_base = inic_port_base(link->ap); 580 --- 6 unchanged lines hidden (view full) --- 587 */ 588static int inic_hardreset(struct ata_link *link, unsigned int *class, 589 unsigned long deadline) 590{ 591 struct ata_port *ap = link->ap; 592 void __iomem *port_base = inic_port_base(ap); 593 void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; 594 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); |
602 u16 val; | |
603 int rc; 604 605 /* hammer it into sane state */ 606 inic_reset_port(port_base); 607 | 595 int rc; 596 597 /* hammer it into sane state */ 598 inic_reset_port(port_base); 599 |
608 val = readw(idma_ctl); 609 writew(val | IDMA_CTL_RST_ATA, idma_ctl); | 600 writew(IDMA_CTL_RST_ATA, idma_ctl); |
610 readw(idma_ctl); /* flush */ 611 msleep(1); | 601 readw(idma_ctl); /* flush */ 602 msleep(1); |
612 writew(val & ~IDMA_CTL_RST_ATA, idma_ctl); | 603 writew(0, idma_ctl); |
613 614 rc = sata_link_resume(link, timing, deadline); 615 if (rc) { 616 ata_link_printk(link, KERN_WARNING, "failed to resume " 617 "link after reset (errno=%d)\n", rc); 618 return rc; 619 } 620 --- 15 unchanged lines hidden (view full) --- 636 } 637 638 return 0; 639} 640 641static void inic_error_handler(struct ata_port *ap) 642{ 643 void __iomem *port_base = inic_port_base(ap); | 604 605 rc = sata_link_resume(link, timing, deadline); 606 if (rc) { 607 ata_link_printk(link, KERN_WARNING, "failed to resume " 608 "link after reset (errno=%d)\n", rc); 609 return rc; 610 } 611 --- 15 unchanged lines hidden (view full) --- 627 } 628 629 return 0; 630} 631 632static void inic_error_handler(struct ata_port *ap) 633{ 634 void __iomem *port_base = inic_port_base(ap); |
644 unsigned long flags; | |
645 | 635 |
646 /* reset PIO HSM and stop DMA engine */ | |
647 inic_reset_port(port_base); | 636 inic_reset_port(port_base); |
648 649 spin_lock_irqsave(ap->lock, flags); 650 ap->hsm_task_state = HSM_ST_IDLE; 651 spin_unlock_irqrestore(ap->lock, flags); 652 653 /* PIO and DMA engines have been stopped, perform recovery */ | |
654 ata_std_error_handler(ap); 655} 656 657static void inic_post_internal_cmd(struct ata_queued_cmd *qc) 658{ 659 /* make DMA engine forget about the failed command */ 660 if (qc->flags & ATA_QCFLAG_FAILED) 661 inic_reset_port(inic_port_base(qc->ap)); --- 47 unchanged lines hidden (view full) --- 709 return -ENOMEM; 710 711 init_port(ap); 712 713 return 0; 714} 715 716static struct ata_port_operations inic_port_ops = { | 637 ata_std_error_handler(ap); 638} 639 640static void inic_post_internal_cmd(struct ata_queued_cmd *qc) 641{ 642 /* make DMA engine forget about the failed command */ 643 if (qc->flags & ATA_QCFLAG_FAILED) 644 inic_reset_port(inic_port_base(qc->ap)); --- 47 unchanged lines hidden (view full) --- 692 return -ENOMEM; 693 694 init_port(ap); 695 696 return 0; 697} 698 699static struct ata_port_operations inic_port_ops = { |
717 .inherits = &ata_sff_port_ops, | 700 .inherits = &sata_port_ops, |
718 719 .check_atapi_dma = inic_check_atapi_dma, 720 .qc_prep = inic_qc_prep, 721 .qc_issue = inic_qc_issue, 722 .qc_fill_rtf = inic_qc_fill_rtf, 723 724 .freeze = inic_freeze, 725 .thaw = inic_thaw, | 701 702 .check_atapi_dma = inic_check_atapi_dma, 703 .qc_prep = inic_qc_prep, 704 .qc_issue = inic_qc_issue, 705 .qc_fill_rtf = inic_qc_fill_rtf, 706 707 .freeze = inic_freeze, 708 .thaw = inic_thaw, |
726 .softreset = ATA_OP_NULL, /* softreset is broken */ | |
727 .hardreset = inic_hardreset, 728 .error_handler = inic_error_handler, 729 .post_internal_cmd = inic_post_internal_cmd, 730 731 .scr_read = inic_scr_read, 732 .scr_write = inic_scr_write, 733 734 .port_resume = inic_port_resume, --- 92 unchanged lines hidden (view full) --- 827 828 host->private_data = hpriv; 829 830 /* acquire resources and fill host */ 831 rc = pcim_enable_device(pdev); 832 if (rc) 833 return rc; 834 | 709 .hardreset = inic_hardreset, 710 .error_handler = inic_error_handler, 711 .post_internal_cmd = inic_post_internal_cmd, 712 713 .scr_read = inic_scr_read, 714 .scr_write = inic_scr_write, 715 716 .port_resume = inic_port_resume, --- 92 unchanged lines hidden (view full) --- 809 810 host->private_data = hpriv; 811 812 /* acquire resources and fill host */ 813 rc = pcim_enable_device(pdev); 814 if (rc) 815 return rc; 816 |
835 rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME); | 817 rc = pcim_iomap_regions(pdev, 1 << MMIO_BAR, DRV_NAME); |
836 if (rc) 837 return rc; 838 host->iomap = iomap = pcim_iomap_table(pdev); | 818 if (rc) 819 return rc; 820 host->iomap = iomap = pcim_iomap_table(pdev); |
821 hpriv->cached_hctl = readw(iomap[MMIO_BAR] + HOST_CTL); |
|
839 840 for (i = 0; i < NR_PORTS; i++) { 841 struct ata_port *ap = host->ports[i]; | 822 823 for (i = 0; i < NR_PORTS; i++) { 824 struct ata_port *ap = host->ports[i]; |
842 struct ata_ioports *port = &ap->ioaddr; 843 unsigned int offset = i * PORT_SIZE; | |
844 | 825 |
845 port->cmd_addr = iomap[2 * i]; 846 port->altstatus_addr = 847 port->ctl_addr = (void __iomem *) 848 ((unsigned long)iomap[2 * i + 1] | ATA_PCI_CTL_OFS); 849 port->scr_addr = iomap[MMIO_BAR] + offset + PORT_SCR; 850 851 ata_sff_std_ports(port); 852 | |
853 ata_port_pbar_desc(ap, MMIO_BAR, -1, "mmio"); | 826 ata_port_pbar_desc(ap, MMIO_BAR, -1, "mmio"); |
854 ata_port_pbar_desc(ap, MMIO_BAR, offset, "port"); 855 ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx", 856 (unsigned long long)pci_resource_start(pdev, 2 * i), 857 (unsigned long long)pci_resource_start(pdev, (2 * i + 1)) | 858 ATA_PCI_CTL_OFS); | 827 ata_port_pbar_desc(ap, MMIO_BAR, i * PORT_SIZE, "port"); |
859 } 860 | 828 } 829 |
861 hpriv->cached_hctl = readw(iomap[MMIO_BAR] + HOST_CTL); 862 | |
863 /* Set dma_mask. This devices doesn't support 64bit addressing. */ 864 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 865 if (rc) { 866 dev_printk(KERN_ERR, &pdev->dev, 867 "32-bit DMA enable failed\n"); 868 return rc; 869 } 870 --- 65 unchanged lines hidden --- | 830 /* Set dma_mask. This devices doesn't support 64bit addressing. */ 831 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 832 if (rc) { 833 dev_printk(KERN_ERR, &pdev->dev, 834 "32-bit DMA enable failed\n"); 835 return rc; 836 } 837 --- 65 unchanged lines hidden --- |