11fd7a697STejun Heo /* 21fd7a697STejun Heo * sata_inic162x.c - Driver for Initio 162x SATA controllers 31fd7a697STejun Heo * 41fd7a697STejun Heo * Copyright 2006 SUSE Linux Products GmbH 51fd7a697STejun Heo * Copyright 2006 Tejun Heo <teheo@novell.com> 61fd7a697STejun Heo * 71fd7a697STejun Heo * This file is released under GPL v2. 81fd7a697STejun Heo * 91fd7a697STejun Heo * This controller is eccentric and easily locks up if something isn't 101fd7a697STejun Heo * right. Documentation is available at initio's website but it only 111fd7a697STejun Heo * documents registers (not programming model). 121fd7a697STejun Heo * 131fd7a697STejun Heo * - ATA disks work. 141fd7a697STejun Heo * - Hotplug works. 151fd7a697STejun Heo * - ATAPI read works but burning doesn't. This thing is really 161fd7a697STejun Heo * peculiar about ATAPI and I couldn't figure out how ATAPI PIO and 171fd7a697STejun Heo * ATAPI DMA WRITE should be programmed. If you've got a clue, be 181fd7a697STejun Heo * my guest. 191fd7a697STejun Heo * - Both STR and STD work. 201fd7a697STejun Heo */ 211fd7a697STejun Heo 221fd7a697STejun Heo #include <linux/kernel.h> 231fd7a697STejun Heo #include <linux/module.h> 241fd7a697STejun Heo #include <linux/pci.h> 251fd7a697STejun Heo #include <scsi/scsi_host.h> 261fd7a697STejun Heo #include <linux/libata.h> 271fd7a697STejun Heo #include <linux/blkdev.h> 281fd7a697STejun Heo #include <scsi/scsi_device.h> 291fd7a697STejun Heo 301fd7a697STejun Heo #define DRV_NAME "sata_inic162x" 312a3103ceSJeff Garzik #define DRV_VERSION "0.3" 321fd7a697STejun Heo 331fd7a697STejun Heo enum { 341fd7a697STejun Heo MMIO_BAR = 5, 351fd7a697STejun Heo 361fd7a697STejun Heo NR_PORTS = 2, 371fd7a697STejun Heo 38b0dd9b8eSTejun Heo HOST_ACTRL = 0x08, 391fd7a697STejun Heo HOST_CTL = 0x7c, 401fd7a697STejun Heo HOST_STAT = 0x7e, 411fd7a697STejun Heo HOST_IRQ_STAT = 0xbc, 421fd7a697STejun Heo HOST_IRQ_MASK = 0xbe, 431fd7a697STejun Heo 441fd7a697STejun Heo PORT_SIZE = 0x40, 451fd7a697STejun Heo 461fd7a697STejun Heo /* registers for ATA TF operation */ 47b0dd9b8eSTejun Heo PORT_TF_DATA = 0x00, 48b0dd9b8eSTejun Heo PORT_TF_FEATURE = 0x01, 49b0dd9b8eSTejun Heo PORT_TF_NSECT = 0x02, 50b0dd9b8eSTejun Heo PORT_TF_LBAL = 0x03, 51b0dd9b8eSTejun Heo PORT_TF_LBAM = 0x04, 52b0dd9b8eSTejun Heo PORT_TF_LBAH = 0x05, 53b0dd9b8eSTejun Heo PORT_TF_DEVICE = 0x06, 54b0dd9b8eSTejun Heo PORT_TF_COMMAND = 0x07, 55b0dd9b8eSTejun Heo PORT_TF_ALT_STAT = 0x08, 561fd7a697STejun Heo PORT_IRQ_STAT = 0x09, 571fd7a697STejun Heo PORT_IRQ_MASK = 0x0a, 581fd7a697STejun Heo PORT_PRD_CTL = 0x0b, 591fd7a697STejun Heo PORT_PRD_ADDR = 0x0c, 601fd7a697STejun Heo PORT_PRD_XFERLEN = 0x10, 61b0dd9b8eSTejun Heo PORT_CPB_CPBLAR = 0x18, 62b0dd9b8eSTejun Heo PORT_CPB_PTQFIFO = 0x1c, 631fd7a697STejun Heo 641fd7a697STejun Heo /* IDMA register */ 651fd7a697STejun Heo PORT_IDMA_CTL = 0x14, 66b0dd9b8eSTejun Heo PORT_IDMA_STAT = 0x16, 67b0dd9b8eSTejun Heo 68b0dd9b8eSTejun Heo PORT_RPQ_FIFO = 0x1e, 69b0dd9b8eSTejun Heo PORT_RPQ_CNT = 0x1f, 701fd7a697STejun Heo 711fd7a697STejun Heo PORT_SCR = 0x20, 721fd7a697STejun Heo 731fd7a697STejun Heo /* HOST_CTL bits */ 741fd7a697STejun Heo HCTL_IRQOFF = (1 << 8), /* global IRQ off */ 75b0dd9b8eSTejun Heo HCTL_FTHD0 = (1 << 10), /* fifo threshold 0 */ 76b0dd9b8eSTejun Heo HCTL_FTHD1 = (1 << 11), /* fifo threshold 1*/ 77b0dd9b8eSTejun Heo HCTL_PWRDWN = (1 << 12), /* power down PHYs */ 781fd7a697STejun Heo HCTL_SOFTRST = (1 << 13), /* global reset (no phy reset) */ 791fd7a697STejun Heo HCTL_RPGSEL = (1 << 15), /* register page select */ 801fd7a697STejun Heo 811fd7a697STejun Heo HCTL_KNOWN_BITS = HCTL_IRQOFF | HCTL_PWRDWN | HCTL_SOFTRST | 821fd7a697STejun Heo HCTL_RPGSEL, 831fd7a697STejun Heo 841fd7a697STejun Heo /* HOST_IRQ_(STAT|MASK) bits */ 851fd7a697STejun Heo HIRQ_PORT0 = (1 << 0), 861fd7a697STejun Heo HIRQ_PORT1 = (1 << 1), 871fd7a697STejun Heo HIRQ_SOFT = (1 << 14), 881fd7a697STejun Heo HIRQ_GLOBAL = (1 << 15), /* STAT only */ 891fd7a697STejun Heo 901fd7a697STejun Heo /* PORT_IRQ_(STAT|MASK) bits */ 911fd7a697STejun Heo PIRQ_OFFLINE = (1 << 0), /* device unplugged */ 921fd7a697STejun Heo PIRQ_ONLINE = (1 << 1), /* device plugged */ 931fd7a697STejun Heo PIRQ_COMPLETE = (1 << 2), /* completion interrupt */ 941fd7a697STejun Heo PIRQ_FATAL = (1 << 3), /* fatal error */ 951fd7a697STejun Heo PIRQ_ATA = (1 << 4), /* ATA interrupt */ 961fd7a697STejun Heo PIRQ_REPLY = (1 << 5), /* reply FIFO not empty */ 971fd7a697STejun Heo PIRQ_PENDING = (1 << 7), /* port IRQ pending (STAT only) */ 981fd7a697STejun Heo 991fd7a697STejun Heo PIRQ_ERR = PIRQ_OFFLINE | PIRQ_ONLINE | PIRQ_FATAL, 1001fd7a697STejun Heo 1011fd7a697STejun Heo PIRQ_MASK_DMA_READ = PIRQ_REPLY | PIRQ_ATA, 1021fd7a697STejun Heo PIRQ_MASK_OTHER = PIRQ_REPLY | PIRQ_COMPLETE, 1031fd7a697STejun Heo PIRQ_MASK_FREEZE = 0xff, 1041fd7a697STejun Heo 1051fd7a697STejun Heo /* PORT_PRD_CTL bits */ 1061fd7a697STejun Heo PRD_CTL_START = (1 << 0), 1071fd7a697STejun Heo PRD_CTL_WR = (1 << 3), 1081fd7a697STejun Heo PRD_CTL_DMAEN = (1 << 7), /* DMA enable */ 1091fd7a697STejun Heo 1101fd7a697STejun Heo /* PORT_IDMA_CTL bits */ 1111fd7a697STejun Heo IDMA_CTL_RST_ATA = (1 << 2), /* hardreset ATA bus */ 1121fd7a697STejun Heo IDMA_CTL_RST_IDMA = (1 << 5), /* reset IDMA machinary */ 1131fd7a697STejun Heo IDMA_CTL_GO = (1 << 7), /* IDMA mode go */ 1141fd7a697STejun Heo IDMA_CTL_ATA_NIEN = (1 << 8), /* ATA IRQ disable */ 115b0dd9b8eSTejun Heo 116b0dd9b8eSTejun Heo /* PORT_IDMA_STAT bits */ 117b0dd9b8eSTejun Heo IDMA_STAT_PERR = (1 << 0), /* PCI ERROR MODE */ 118b0dd9b8eSTejun Heo IDMA_STAT_CPBERR = (1 << 1), /* ADMA CPB error */ 119b0dd9b8eSTejun Heo IDMA_STAT_LGCY = (1 << 3), /* ADMA legacy */ 120b0dd9b8eSTejun Heo IDMA_STAT_UIRQ = (1 << 4), /* ADMA unsolicited irq */ 121b0dd9b8eSTejun Heo IDMA_STAT_STPD = (1 << 5), /* ADMA stopped */ 122b0dd9b8eSTejun Heo IDMA_STAT_PSD = (1 << 6), /* ADMA pause */ 123b0dd9b8eSTejun Heo IDMA_STAT_DONE = (1 << 7), /* ADMA done */ 124b0dd9b8eSTejun Heo 125b0dd9b8eSTejun Heo IDMA_STAT_ERR = IDMA_STAT_PERR | IDMA_STAT_CPBERR, 126b0dd9b8eSTejun Heo 127b0dd9b8eSTejun Heo /* CPB Control Flags*/ 128b0dd9b8eSTejun Heo CPB_CTL_VALID = (1 << 0), /* CPB valid */ 129b0dd9b8eSTejun Heo CPB_CTL_QUEUED = (1 << 1), /* queued command */ 130b0dd9b8eSTejun Heo CPB_CTL_DATA = (1 << 2), /* data, rsvd in datasheet */ 131b0dd9b8eSTejun Heo CPB_CTL_IEN = (1 << 3), /* PCI interrupt enable */ 132b0dd9b8eSTejun Heo CPB_CTL_DEVDIR = (1 << 4), /* device direction control */ 133b0dd9b8eSTejun Heo 134b0dd9b8eSTejun Heo /* CPB Response Flags */ 135b0dd9b8eSTejun Heo CPB_RESP_DONE = (1 << 0), /* ATA command complete */ 136b0dd9b8eSTejun Heo CPB_RESP_REL = (1 << 1), /* ATA release */ 137b0dd9b8eSTejun Heo CPB_RESP_IGNORED = (1 << 2), /* CPB ignored */ 138b0dd9b8eSTejun Heo CPB_RESP_ATA_ERR = (1 << 3), /* ATA command error */ 139b0dd9b8eSTejun Heo CPB_RESP_SPURIOUS = (1 << 4), /* ATA spurious interrupt error */ 140b0dd9b8eSTejun Heo CPB_RESP_UNDERFLOW = (1 << 5), /* APRD deficiency length error */ 141b0dd9b8eSTejun Heo CPB_RESP_OVERFLOW = (1 << 6), /* APRD exccess length error */ 142b0dd9b8eSTejun Heo CPB_RESP_CPB_ERR = (1 << 7), /* CPB error flag */ 143b0dd9b8eSTejun Heo 144b0dd9b8eSTejun Heo /* PRD Control Flags */ 145b0dd9b8eSTejun Heo PRD_DRAIN = (1 << 1), /* ignore data excess */ 146b0dd9b8eSTejun Heo PRD_CDB = (1 << 2), /* atapi packet command pointer */ 147b0dd9b8eSTejun Heo PRD_DIRECT_INTR = (1 << 3), /* direct interrupt */ 148b0dd9b8eSTejun Heo PRD_DMA = (1 << 4), /* data transfer method */ 149b0dd9b8eSTejun Heo PRD_WRITE = (1 << 5), /* data dir, rsvd in datasheet */ 150b0dd9b8eSTejun Heo PRD_IOM = (1 << 6), /* io/memory transfer */ 151b0dd9b8eSTejun Heo PRD_END = (1 << 7), /* APRD chain end */ 1521fd7a697STejun Heo }; 1531fd7a697STejun Heo 1541fd7a697STejun Heo struct inic_host_priv { 1551fd7a697STejun Heo u16 cached_hctl; 1561fd7a697STejun Heo }; 1571fd7a697STejun Heo 1581fd7a697STejun Heo struct inic_port_priv { 1591fd7a697STejun Heo u8 dfl_prdctl; 1601fd7a697STejun Heo u8 cached_prdctl; 1611fd7a697STejun Heo u8 cached_pirq_mask; 1621fd7a697STejun Heo }; 1631fd7a697STejun Heo 1641fd7a697STejun Heo static struct scsi_host_template inic_sht = { 16568d1d07bSTejun Heo ATA_BMDMA_SHT(DRV_NAME), 1661fd7a697STejun Heo }; 1671fd7a697STejun Heo 1681fd7a697STejun Heo static const int scr_map[] = { 1691fd7a697STejun Heo [SCR_STATUS] = 0, 1701fd7a697STejun Heo [SCR_ERROR] = 1, 1711fd7a697STejun Heo [SCR_CONTROL] = 2, 1721fd7a697STejun Heo }; 1731fd7a697STejun Heo 1741fd7a697STejun Heo static void __iomem *inic_port_base(struct ata_port *ap) 1751fd7a697STejun Heo { 1760d5ff566STejun Heo return ap->host->iomap[MMIO_BAR] + ap->port_no * PORT_SIZE; 1771fd7a697STejun Heo } 1781fd7a697STejun Heo 1791fd7a697STejun Heo static void __inic_set_pirq_mask(struct ata_port *ap, u8 mask) 1801fd7a697STejun Heo { 1811fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 1821fd7a697STejun Heo struct inic_port_priv *pp = ap->private_data; 1831fd7a697STejun Heo 1841fd7a697STejun Heo writeb(mask, port_base + PORT_IRQ_MASK); 1851fd7a697STejun Heo pp->cached_pirq_mask = mask; 1861fd7a697STejun Heo } 1871fd7a697STejun Heo 1881fd7a697STejun Heo static void inic_set_pirq_mask(struct ata_port *ap, u8 mask) 1891fd7a697STejun Heo { 1901fd7a697STejun Heo struct inic_port_priv *pp = ap->private_data; 1911fd7a697STejun Heo 1921fd7a697STejun Heo if (pp->cached_pirq_mask != mask) 1931fd7a697STejun Heo __inic_set_pirq_mask(ap, mask); 1941fd7a697STejun Heo } 1951fd7a697STejun Heo 1961fd7a697STejun Heo static void inic_reset_port(void __iomem *port_base) 1971fd7a697STejun Heo { 1981fd7a697STejun Heo void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; 1991fd7a697STejun Heo u16 ctl; 2001fd7a697STejun Heo 2011fd7a697STejun Heo ctl = readw(idma_ctl); 2021fd7a697STejun Heo ctl &= ~(IDMA_CTL_RST_IDMA | IDMA_CTL_ATA_NIEN | IDMA_CTL_GO); 2031fd7a697STejun Heo 2041fd7a697STejun Heo /* mask IRQ and assert reset */ 2051fd7a697STejun Heo writew(ctl | IDMA_CTL_RST_IDMA | IDMA_CTL_ATA_NIEN, idma_ctl); 2061fd7a697STejun Heo readw(idma_ctl); /* flush */ 2071fd7a697STejun Heo 2081fd7a697STejun Heo /* give it some time */ 2091fd7a697STejun Heo msleep(1); 2101fd7a697STejun Heo 2111fd7a697STejun Heo /* release reset */ 2121fd7a697STejun Heo writew(ctl | IDMA_CTL_ATA_NIEN, idma_ctl); 2131fd7a697STejun Heo 2141fd7a697STejun Heo /* clear irq */ 2151fd7a697STejun Heo writeb(0xff, port_base + PORT_IRQ_STAT); 2161fd7a697STejun Heo 2171fd7a697STejun Heo /* reenable ATA IRQ, turn off IDMA mode */ 2181fd7a697STejun Heo writew(ctl, idma_ctl); 2191fd7a697STejun Heo } 2201fd7a697STejun Heo 221da3dbb17STejun Heo static int inic_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val) 2221fd7a697STejun Heo { 22359f99880SJeff Garzik void __iomem *scr_addr = ap->ioaddr.scr_addr; 2241fd7a697STejun Heo void __iomem *addr; 2251fd7a697STejun Heo 2261fd7a697STejun Heo if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) 227da3dbb17STejun Heo return -EINVAL; 2281fd7a697STejun Heo 2291fd7a697STejun Heo addr = scr_addr + scr_map[sc_reg] * 4; 230da3dbb17STejun Heo *val = readl(scr_addr + scr_map[sc_reg] * 4); 2311fd7a697STejun Heo 2321fd7a697STejun Heo /* this controller has stuck DIAG.N, ignore it */ 2331fd7a697STejun Heo if (sc_reg == SCR_ERROR) 234da3dbb17STejun Heo *val &= ~SERR_PHYRDY_CHG; 235da3dbb17STejun Heo return 0; 2361fd7a697STejun Heo } 2371fd7a697STejun Heo 238da3dbb17STejun Heo static int inic_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val) 2391fd7a697STejun Heo { 24059f99880SJeff Garzik void __iomem *scr_addr = ap->ioaddr.scr_addr; 2411fd7a697STejun Heo 2421fd7a697STejun Heo if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) 243da3dbb17STejun Heo return -EINVAL; 2441fd7a697STejun Heo 2451fd7a697STejun Heo writel(val, scr_addr + scr_map[sc_reg] * 4); 246da3dbb17STejun Heo return 0; 2471fd7a697STejun Heo } 2481fd7a697STejun Heo 2491fd7a697STejun Heo /* 2501fd7a697STejun Heo * In TF mode, inic162x is very similar to SFF device. TF registers 2511fd7a697STejun Heo * function the same. DMA engine behaves similary using the same PRD 2521fd7a697STejun Heo * format as BMDMA but different command register, interrupt and event 2531fd7a697STejun Heo * notification methods are used. The following inic_bmdma_*() 2541fd7a697STejun Heo * functions do the impedance matching. 2551fd7a697STejun Heo */ 2561fd7a697STejun Heo static void inic_bmdma_setup(struct ata_queued_cmd *qc) 2571fd7a697STejun Heo { 2581fd7a697STejun Heo struct ata_port *ap = qc->ap; 2591fd7a697STejun Heo struct inic_port_priv *pp = ap->private_data; 2601fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 2611fd7a697STejun Heo int rw = qc->tf.flags & ATA_TFLAG_WRITE; 2621fd7a697STejun Heo 2631fd7a697STejun Heo /* make sure device sees PRD table writes */ 2641fd7a697STejun Heo wmb(); 2651fd7a697STejun Heo 2661fd7a697STejun Heo /* load transfer length */ 2671fd7a697STejun Heo writel(qc->nbytes, port_base + PORT_PRD_XFERLEN); 2681fd7a697STejun Heo 2691fd7a697STejun Heo /* turn on DMA and specify data direction */ 2701fd7a697STejun Heo pp->cached_prdctl = pp->dfl_prdctl | PRD_CTL_DMAEN; 2711fd7a697STejun Heo if (!rw) 2721fd7a697STejun Heo pp->cached_prdctl |= PRD_CTL_WR; 2731fd7a697STejun Heo writeb(pp->cached_prdctl, port_base + PORT_PRD_CTL); 2741fd7a697STejun Heo 2751fd7a697STejun Heo /* issue r/w command */ 2765682ed33STejun Heo ap->ops->sff_exec_command(ap, &qc->tf); 2771fd7a697STejun Heo } 2781fd7a697STejun Heo 2791fd7a697STejun Heo static void inic_bmdma_start(struct ata_queued_cmd *qc) 2801fd7a697STejun Heo { 2811fd7a697STejun Heo struct ata_port *ap = qc->ap; 2821fd7a697STejun Heo struct inic_port_priv *pp = ap->private_data; 2831fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 2841fd7a697STejun Heo 2851fd7a697STejun Heo /* start host DMA transaction */ 2861fd7a697STejun Heo pp->cached_prdctl |= PRD_CTL_START; 2871fd7a697STejun Heo writeb(pp->cached_prdctl, port_base + PORT_PRD_CTL); 2881fd7a697STejun Heo } 2891fd7a697STejun Heo 2901fd7a697STejun Heo static void inic_bmdma_stop(struct ata_queued_cmd *qc) 2911fd7a697STejun Heo { 2921fd7a697STejun Heo struct ata_port *ap = qc->ap; 2931fd7a697STejun Heo struct inic_port_priv *pp = ap->private_data; 2941fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 2951fd7a697STejun Heo 2961fd7a697STejun Heo /* stop DMA engine */ 2971fd7a697STejun Heo writeb(pp->dfl_prdctl, port_base + PORT_PRD_CTL); 2981fd7a697STejun Heo } 2991fd7a697STejun Heo 3001fd7a697STejun Heo static u8 inic_bmdma_status(struct ata_port *ap) 3011fd7a697STejun Heo { 3021fd7a697STejun Heo /* event is already verified by the interrupt handler */ 3031fd7a697STejun Heo return ATA_DMA_INTR; 3041fd7a697STejun Heo } 3051fd7a697STejun Heo 3061fd7a697STejun Heo static void inic_host_intr(struct ata_port *ap) 3071fd7a697STejun Heo { 3081fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 3099af5c9c9STejun Heo struct ata_eh_info *ehi = &ap->link.eh_info; 3101fd7a697STejun Heo u8 irq_stat; 3111fd7a697STejun Heo 3121fd7a697STejun Heo /* fetch and clear irq */ 3131fd7a697STejun Heo irq_stat = readb(port_base + PORT_IRQ_STAT); 3141fd7a697STejun Heo writeb(irq_stat, port_base + PORT_IRQ_STAT); 3151fd7a697STejun Heo 3161fd7a697STejun Heo if (likely(!(irq_stat & PIRQ_ERR))) { 3179af5c9c9STejun Heo struct ata_queued_cmd *qc = 3189af5c9c9STejun Heo ata_qc_from_tag(ap, ap->link.active_tag); 3191fd7a697STejun Heo 3201fd7a697STejun Heo if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { 3215682ed33STejun Heo ap->ops->sff_check_status(ap); /* clear ATA interrupt */ 3221fd7a697STejun Heo return; 3231fd7a697STejun Heo } 3241fd7a697STejun Heo 3259363c382STejun Heo if (likely(ata_sff_host_intr(ap, qc))) 3261fd7a697STejun Heo return; 3271fd7a697STejun Heo 3285682ed33STejun Heo ap->ops->sff_check_status(ap); /* clear ATA interrupt */ 3291fd7a697STejun Heo ata_port_printk(ap, KERN_WARNING, "unhandled " 3301fd7a697STejun Heo "interrupt, irq_stat=%x\n", irq_stat); 3311fd7a697STejun Heo return; 3321fd7a697STejun Heo } 3331fd7a697STejun Heo 3341fd7a697STejun Heo /* error */ 3351fd7a697STejun Heo ata_ehi_push_desc(ehi, "irq_stat=0x%x", irq_stat); 3361fd7a697STejun Heo 3371fd7a697STejun Heo if (irq_stat & (PIRQ_OFFLINE | PIRQ_ONLINE)) { 3381fd7a697STejun Heo ata_ehi_hotplugged(ehi); 3391fd7a697STejun Heo ata_port_freeze(ap); 3401fd7a697STejun Heo } else 3411fd7a697STejun Heo ata_port_abort(ap); 3421fd7a697STejun Heo } 3431fd7a697STejun Heo 3441fd7a697STejun Heo static irqreturn_t inic_interrupt(int irq, void *dev_instance) 3451fd7a697STejun Heo { 3461fd7a697STejun Heo struct ata_host *host = dev_instance; 3470d5ff566STejun Heo void __iomem *mmio_base = host->iomap[MMIO_BAR]; 3481fd7a697STejun Heo u16 host_irq_stat; 3491fd7a697STejun Heo int i, handled = 0;; 3501fd7a697STejun Heo 3511fd7a697STejun Heo host_irq_stat = readw(mmio_base + HOST_IRQ_STAT); 3521fd7a697STejun Heo 3531fd7a697STejun Heo if (unlikely(!(host_irq_stat & HIRQ_GLOBAL))) 3541fd7a697STejun Heo goto out; 3551fd7a697STejun Heo 3561fd7a697STejun Heo spin_lock(&host->lock); 3571fd7a697STejun Heo 3581fd7a697STejun Heo for (i = 0; i < NR_PORTS; i++) { 3591fd7a697STejun Heo struct ata_port *ap = host->ports[i]; 3601fd7a697STejun Heo 3611fd7a697STejun Heo if (!(host_irq_stat & (HIRQ_PORT0 << i))) 3621fd7a697STejun Heo continue; 3631fd7a697STejun Heo 3641fd7a697STejun Heo if (likely(ap && !(ap->flags & ATA_FLAG_DISABLED))) { 3651fd7a697STejun Heo inic_host_intr(ap); 3661fd7a697STejun Heo handled++; 3671fd7a697STejun Heo } else { 3681fd7a697STejun Heo if (ata_ratelimit()) 3691fd7a697STejun Heo dev_printk(KERN_ERR, host->dev, "interrupt " 3701fd7a697STejun Heo "from disabled port %d (0x%x)\n", 3711fd7a697STejun Heo i, host_irq_stat); 3721fd7a697STejun Heo } 3731fd7a697STejun Heo } 3741fd7a697STejun Heo 3751fd7a697STejun Heo spin_unlock(&host->lock); 3761fd7a697STejun Heo 3771fd7a697STejun Heo out: 3781fd7a697STejun Heo return IRQ_RETVAL(handled); 3791fd7a697STejun Heo } 3801fd7a697STejun Heo 3811fd7a697STejun Heo static unsigned int inic_qc_issue(struct ata_queued_cmd *qc) 3821fd7a697STejun Heo { 3831fd7a697STejun Heo struct ata_port *ap = qc->ap; 3841fd7a697STejun Heo 3851fd7a697STejun Heo /* ATA IRQ doesn't wait for DMA transfer completion and vice 3861fd7a697STejun Heo * versa. Mask IRQ selectively to detect command completion. 3871fd7a697STejun Heo * Without it, ATA DMA read command can cause data corruption. 3881fd7a697STejun Heo * 3891fd7a697STejun Heo * Something similar might be needed for ATAPI writes. I 3901fd7a697STejun Heo * tried a lot of combinations but couldn't find the solution. 3911fd7a697STejun Heo */ 3921fd7a697STejun Heo if (qc->tf.protocol == ATA_PROT_DMA && 3931fd7a697STejun Heo !(qc->tf.flags & ATA_TFLAG_WRITE)) 3941fd7a697STejun Heo inic_set_pirq_mask(ap, PIRQ_MASK_DMA_READ); 3951fd7a697STejun Heo else 3961fd7a697STejun Heo inic_set_pirq_mask(ap, PIRQ_MASK_OTHER); 3971fd7a697STejun Heo 3981fd7a697STejun Heo /* Issuing a command to yet uninitialized port locks up the 3991fd7a697STejun Heo * controller. Most of the time, this happens for the first 4001fd7a697STejun Heo * command after reset which are ATA and ATAPI IDENTIFYs. 4011fd7a697STejun Heo * Fast fail if stat is 0x7f or 0xff for those commands. 4021fd7a697STejun Heo */ 4031fd7a697STejun Heo if (unlikely(qc->tf.command == ATA_CMD_ID_ATA || 4041fd7a697STejun Heo qc->tf.command == ATA_CMD_ID_ATAPI)) { 4055682ed33STejun Heo u8 stat = ap->ops->sff_check_status(ap); 4061fd7a697STejun Heo if (stat == 0x7f || stat == 0xff) 4071fd7a697STejun Heo return AC_ERR_HSM; 4081fd7a697STejun Heo } 4091fd7a697STejun Heo 4109363c382STejun Heo return ata_sff_qc_issue(qc); 4111fd7a697STejun Heo } 4121fd7a697STejun Heo 413*364fac0eSTejun Heo static void inic_tf_read(struct ata_port *ap, struct ata_taskfile *tf) 414*364fac0eSTejun Heo { 415*364fac0eSTejun Heo void __iomem *port_base = inic_port_base(ap); 416*364fac0eSTejun Heo 417*364fac0eSTejun Heo tf->feature = readb(port_base + PORT_TF_FEATURE); 418*364fac0eSTejun Heo tf->nsect = readb(port_base + PORT_TF_NSECT); 419*364fac0eSTejun Heo tf->lbal = readb(port_base + PORT_TF_LBAL); 420*364fac0eSTejun Heo tf->lbam = readb(port_base + PORT_TF_LBAM); 421*364fac0eSTejun Heo tf->lbah = readb(port_base + PORT_TF_LBAH); 422*364fac0eSTejun Heo tf->device = readb(port_base + PORT_TF_DEVICE); 423*364fac0eSTejun Heo tf->command = readb(port_base + PORT_TF_COMMAND); 424*364fac0eSTejun Heo } 425*364fac0eSTejun Heo 426*364fac0eSTejun Heo static bool inic_qc_fill_rtf(struct ata_queued_cmd *qc) 427*364fac0eSTejun Heo { 428*364fac0eSTejun Heo struct ata_taskfile *rtf = &qc->result_tf; 429*364fac0eSTejun Heo struct ata_taskfile tf; 430*364fac0eSTejun Heo 431*364fac0eSTejun Heo /* FIXME: Except for status and error, result TF access 432*364fac0eSTejun Heo * doesn't work. I tried reading from BAR0/2, CPB and BAR5. 433*364fac0eSTejun Heo * None works regardless of which command interface is used. 434*364fac0eSTejun Heo * For now return true iff status indicates device error. 435*364fac0eSTejun Heo * This means that we're reporting bogus sector for RW 436*364fac0eSTejun Heo * failures. Eeekk.... 437*364fac0eSTejun Heo */ 438*364fac0eSTejun Heo inic_tf_read(qc->ap, &tf); 439*364fac0eSTejun Heo 440*364fac0eSTejun Heo if (!(tf.command & ATA_ERR)) 441*364fac0eSTejun Heo return false; 442*364fac0eSTejun Heo 443*364fac0eSTejun Heo rtf->command = tf.command; 444*364fac0eSTejun Heo rtf->feature = tf.feature; 445*364fac0eSTejun Heo return true; 446*364fac0eSTejun Heo } 447*364fac0eSTejun Heo 4481fd7a697STejun Heo static void inic_freeze(struct ata_port *ap) 4491fd7a697STejun Heo { 4501fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 4511fd7a697STejun Heo 4521fd7a697STejun Heo __inic_set_pirq_mask(ap, PIRQ_MASK_FREEZE); 4531fd7a697STejun Heo 4545682ed33STejun Heo ap->ops->sff_check_status(ap); 4551fd7a697STejun Heo writeb(0xff, port_base + PORT_IRQ_STAT); 4561fd7a697STejun Heo } 4571fd7a697STejun Heo 4581fd7a697STejun Heo static void inic_thaw(struct ata_port *ap) 4591fd7a697STejun Heo { 4601fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 4611fd7a697STejun Heo 4625682ed33STejun Heo ap->ops->sff_check_status(ap); 4631fd7a697STejun Heo writeb(0xff, port_base + PORT_IRQ_STAT); 4641fd7a697STejun Heo 4651fd7a697STejun Heo __inic_set_pirq_mask(ap, PIRQ_MASK_OTHER); 4661fd7a697STejun Heo } 4671fd7a697STejun Heo 468*364fac0eSTejun Heo static int inic_check_ready(struct ata_link *link) 469*364fac0eSTejun Heo { 470*364fac0eSTejun Heo void __iomem *port_base = inic_port_base(link->ap); 471*364fac0eSTejun Heo 472*364fac0eSTejun Heo return ata_check_ready(readb(port_base + PORT_TF_COMMAND)); 473*364fac0eSTejun Heo } 474*364fac0eSTejun Heo 4751fd7a697STejun Heo /* 4761fd7a697STejun Heo * SRST and SControl hardreset don't give valid signature on this 4771fd7a697STejun Heo * controller. Only controller specific hardreset mechanism works. 4781fd7a697STejun Heo */ 479cc0680a5STejun Heo static int inic_hardreset(struct ata_link *link, unsigned int *class, 480d4b2bab4STejun Heo unsigned long deadline) 4811fd7a697STejun Heo { 482cc0680a5STejun Heo struct ata_port *ap = link->ap; 4831fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 4841fd7a697STejun Heo void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; 485cc0680a5STejun Heo const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); 4861fd7a697STejun Heo u16 val; 4871fd7a697STejun Heo int rc; 4881fd7a697STejun Heo 4891fd7a697STejun Heo /* hammer it into sane state */ 4901fd7a697STejun Heo inic_reset_port(port_base); 4911fd7a697STejun Heo 4921fd7a697STejun Heo val = readw(idma_ctl); 4931fd7a697STejun Heo writew(val | IDMA_CTL_RST_ATA, idma_ctl); 4941fd7a697STejun Heo readw(idma_ctl); /* flush */ 4951fd7a697STejun Heo msleep(1); 4961fd7a697STejun Heo writew(val & ~IDMA_CTL_RST_ATA, idma_ctl); 4971fd7a697STejun Heo 498cc0680a5STejun Heo rc = sata_link_resume(link, timing, deadline); 4991fd7a697STejun Heo if (rc) { 500cc0680a5STejun Heo ata_link_printk(link, KERN_WARNING, "failed to resume " 501fe334602STejun Heo "link after reset (errno=%d)\n", rc); 5021fd7a697STejun Heo return rc; 5031fd7a697STejun Heo } 5041fd7a697STejun Heo 5051fd7a697STejun Heo *class = ATA_DEV_NONE; 506cc0680a5STejun Heo if (ata_link_online(link)) { 5071fd7a697STejun Heo struct ata_taskfile tf; 5081fd7a697STejun Heo 509705e76beSTejun Heo /* wait for link to become ready */ 510*364fac0eSTejun Heo rc = ata_wait_after_reset(link, deadline, inic_check_ready); 5119b89391cSTejun Heo /* link occupied, -ENODEV too is an error */ 5129b89391cSTejun Heo if (rc) { 513cc0680a5STejun Heo ata_link_printk(link, KERN_WARNING, "device not ready " 514d4b2bab4STejun Heo "after hardreset (errno=%d)\n", rc); 515d4b2bab4STejun Heo return rc; 5161fd7a697STejun Heo } 5171fd7a697STejun Heo 518*364fac0eSTejun Heo inic_tf_read(ap, &tf); 5191fd7a697STejun Heo *class = ata_dev_classify(&tf); 5201fd7a697STejun Heo } 5211fd7a697STejun Heo 5221fd7a697STejun Heo return 0; 5231fd7a697STejun Heo } 5241fd7a697STejun Heo 5251fd7a697STejun Heo static void inic_error_handler(struct ata_port *ap) 5261fd7a697STejun Heo { 5271fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 5281fd7a697STejun Heo struct inic_port_priv *pp = ap->private_data; 5291fd7a697STejun Heo unsigned long flags; 5301fd7a697STejun Heo 5311fd7a697STejun Heo /* reset PIO HSM and stop DMA engine */ 5321fd7a697STejun Heo inic_reset_port(port_base); 5331fd7a697STejun Heo 5341fd7a697STejun Heo spin_lock_irqsave(ap->lock, flags); 5351fd7a697STejun Heo ap->hsm_task_state = HSM_ST_IDLE; 5361fd7a697STejun Heo writeb(pp->dfl_prdctl, port_base + PORT_PRD_CTL); 5371fd7a697STejun Heo spin_unlock_irqrestore(ap->lock, flags); 5381fd7a697STejun Heo 5391fd7a697STejun Heo /* PIO and DMA engines have been stopped, perform recovery */ 540a1efdabaSTejun Heo ata_std_error_handler(ap); 5411fd7a697STejun Heo } 5421fd7a697STejun Heo 5431fd7a697STejun Heo static void inic_post_internal_cmd(struct ata_queued_cmd *qc) 5441fd7a697STejun Heo { 5451fd7a697STejun Heo /* make DMA engine forget about the failed command */ 546a51d644aSTejun Heo if (qc->flags & ATA_QCFLAG_FAILED) 5471fd7a697STejun Heo inic_reset_port(inic_port_base(qc->ap)); 5481fd7a697STejun Heo } 5491fd7a697STejun Heo 550cd0d3bbcSAlan static void inic_dev_config(struct ata_device *dev) 5511fd7a697STejun Heo { 5521fd7a697STejun Heo /* inic can only handle upto LBA28 max sectors */ 5531fd7a697STejun Heo if (dev->max_sectors > ATA_MAX_SECTORS) 5541fd7a697STejun Heo dev->max_sectors = ATA_MAX_SECTORS; 55590c93785STejun Heo 55690c93785STejun Heo if (dev->n_sectors >= 1 << 28) { 55790c93785STejun Heo ata_dev_printk(dev, KERN_ERR, 55890c93785STejun Heo "ERROR: This driver doesn't support LBA48 yet and may cause\n" 55990c93785STejun Heo " data corruption on such devices. Disabling.\n"); 56090c93785STejun Heo ata_dev_disable(dev); 56190c93785STejun Heo } 5621fd7a697STejun Heo } 5631fd7a697STejun Heo 5641fd7a697STejun Heo static void init_port(struct ata_port *ap) 5651fd7a697STejun Heo { 5661fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 5671fd7a697STejun Heo 5681fd7a697STejun Heo /* Setup PRD address */ 5691fd7a697STejun Heo writel(ap->prd_dma, port_base + PORT_PRD_ADDR); 5701fd7a697STejun Heo } 5711fd7a697STejun Heo 5721fd7a697STejun Heo static int inic_port_resume(struct ata_port *ap) 5731fd7a697STejun Heo { 5741fd7a697STejun Heo init_port(ap); 5751fd7a697STejun Heo return 0; 5761fd7a697STejun Heo } 5771fd7a697STejun Heo 5781fd7a697STejun Heo static int inic_port_start(struct ata_port *ap) 5791fd7a697STejun Heo { 5801fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 5811fd7a697STejun Heo struct inic_port_priv *pp; 5821fd7a697STejun Heo u8 tmp; 5831fd7a697STejun Heo int rc; 5841fd7a697STejun Heo 5851fd7a697STejun Heo /* alloc and initialize private data */ 58624dc5f33STejun Heo pp = devm_kzalloc(ap->host->dev, sizeof(*pp), GFP_KERNEL); 5871fd7a697STejun Heo if (!pp) 5881fd7a697STejun Heo return -ENOMEM; 5891fd7a697STejun Heo ap->private_data = pp; 5901fd7a697STejun Heo 5911fd7a697STejun Heo /* default PRD_CTL value, DMAEN, WR and START off */ 5921fd7a697STejun Heo tmp = readb(port_base + PORT_PRD_CTL); 5931fd7a697STejun Heo tmp &= ~(PRD_CTL_DMAEN | PRD_CTL_WR | PRD_CTL_START); 5941fd7a697STejun Heo pp->dfl_prdctl = tmp; 5951fd7a697STejun Heo 5961fd7a697STejun Heo /* Alloc resources */ 5971fd7a697STejun Heo rc = ata_port_start(ap); 59836f674d9STejun Heo if (rc) 5991fd7a697STejun Heo return rc; 6001fd7a697STejun Heo 6011fd7a697STejun Heo init_port(ap); 6021fd7a697STejun Heo 6031fd7a697STejun Heo return 0; 6041fd7a697STejun Heo } 6051fd7a697STejun Heo 6061fd7a697STejun Heo static struct ata_port_operations inic_port_ops = { 607029cfd6bSTejun Heo .inherits = &ata_sff_port_ops, 6081fd7a697STejun Heo 6091fd7a697STejun Heo .bmdma_setup = inic_bmdma_setup, 6101fd7a697STejun Heo .bmdma_start = inic_bmdma_start, 6111fd7a697STejun Heo .bmdma_stop = inic_bmdma_stop, 6121fd7a697STejun Heo .bmdma_status = inic_bmdma_status, 6131fd7a697STejun Heo .qc_issue = inic_qc_issue, 614*364fac0eSTejun Heo .qc_fill_rtf = inic_qc_fill_rtf, 6151fd7a697STejun Heo 6161fd7a697STejun Heo .freeze = inic_freeze, 6171fd7a697STejun Heo .thaw = inic_thaw, 618a1efdabaSTejun Heo .softreset = ATA_OP_NULL, /* softreset is broken */ 619a1efdabaSTejun Heo .hardreset = inic_hardreset, 6201fd7a697STejun Heo .error_handler = inic_error_handler, 6211fd7a697STejun Heo .post_internal_cmd = inic_post_internal_cmd, 6221fd7a697STejun Heo .dev_config = inic_dev_config, 6231fd7a697STejun Heo 624029cfd6bSTejun Heo .scr_read = inic_scr_read, 625029cfd6bSTejun Heo .scr_write = inic_scr_write, 6261fd7a697STejun Heo 627029cfd6bSTejun Heo .port_resume = inic_port_resume, 6281fd7a697STejun Heo .port_start = inic_port_start, 6291fd7a697STejun Heo }; 6301fd7a697STejun Heo 6311fd7a697STejun Heo static struct ata_port_info inic_port_info = { 6320dc36888STejun Heo /* For some reason, ATAPI_PROT_PIO is broken on this 6331fd7a697STejun Heo * controller, and no, PIO_POLLING does't fix it. It somehow 6341fd7a697STejun Heo * manages to report the wrong ireason and ignoring ireason 6351fd7a697STejun Heo * results in machine lock up. Tell libata to always prefer 6361fd7a697STejun Heo * DMA. 6371fd7a697STejun Heo */ 6381fd7a697STejun Heo .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA, 6391fd7a697STejun Heo .pio_mask = 0x1f, /* pio0-4 */ 6401fd7a697STejun Heo .mwdma_mask = 0x07, /* mwdma0-2 */ 641bf6263a8SJeff Garzik .udma_mask = ATA_UDMA6, 6421fd7a697STejun Heo .port_ops = &inic_port_ops 6431fd7a697STejun Heo }; 6441fd7a697STejun Heo 6451fd7a697STejun Heo static int init_controller(void __iomem *mmio_base, u16 hctl) 6461fd7a697STejun Heo { 6471fd7a697STejun Heo int i; 6481fd7a697STejun Heo u16 val; 6491fd7a697STejun Heo 6501fd7a697STejun Heo hctl &= ~HCTL_KNOWN_BITS; 6511fd7a697STejun Heo 6521fd7a697STejun Heo /* Soft reset whole controller. Spec says reset duration is 3 6531fd7a697STejun Heo * PCI clocks, be generous and give it 10ms. 6541fd7a697STejun Heo */ 6551fd7a697STejun Heo writew(hctl | HCTL_SOFTRST, mmio_base + HOST_CTL); 6561fd7a697STejun Heo readw(mmio_base + HOST_CTL); /* flush */ 6571fd7a697STejun Heo 6581fd7a697STejun Heo for (i = 0; i < 10; i++) { 6591fd7a697STejun Heo msleep(1); 6601fd7a697STejun Heo val = readw(mmio_base + HOST_CTL); 6611fd7a697STejun Heo if (!(val & HCTL_SOFTRST)) 6621fd7a697STejun Heo break; 6631fd7a697STejun Heo } 6641fd7a697STejun Heo 6651fd7a697STejun Heo if (val & HCTL_SOFTRST) 6661fd7a697STejun Heo return -EIO; 6671fd7a697STejun Heo 6681fd7a697STejun Heo /* mask all interrupts and reset ports */ 6691fd7a697STejun Heo for (i = 0; i < NR_PORTS; i++) { 6701fd7a697STejun Heo void __iomem *port_base = mmio_base + i * PORT_SIZE; 6711fd7a697STejun Heo 6721fd7a697STejun Heo writeb(0xff, port_base + PORT_IRQ_MASK); 6731fd7a697STejun Heo inic_reset_port(port_base); 6741fd7a697STejun Heo } 6751fd7a697STejun Heo 6761fd7a697STejun Heo /* port IRQ is masked now, unmask global IRQ */ 6771fd7a697STejun Heo writew(hctl & ~HCTL_IRQOFF, mmio_base + HOST_CTL); 6781fd7a697STejun Heo val = readw(mmio_base + HOST_IRQ_MASK); 6791fd7a697STejun Heo val &= ~(HIRQ_PORT0 | HIRQ_PORT1); 6801fd7a697STejun Heo writew(val, mmio_base + HOST_IRQ_MASK); 6811fd7a697STejun Heo 6821fd7a697STejun Heo return 0; 6831fd7a697STejun Heo } 6841fd7a697STejun Heo 685438ac6d5STejun Heo #ifdef CONFIG_PM 6861fd7a697STejun Heo static int inic_pci_device_resume(struct pci_dev *pdev) 6871fd7a697STejun Heo { 6881fd7a697STejun Heo struct ata_host *host = dev_get_drvdata(&pdev->dev); 6891fd7a697STejun Heo struct inic_host_priv *hpriv = host->private_data; 6900d5ff566STejun Heo void __iomem *mmio_base = host->iomap[MMIO_BAR]; 6911fd7a697STejun Heo int rc; 6921fd7a697STejun Heo 6935aea408dSDmitriy Monakhov rc = ata_pci_device_do_resume(pdev); 6945aea408dSDmitriy Monakhov if (rc) 6955aea408dSDmitriy Monakhov return rc; 6961fd7a697STejun Heo 6971fd7a697STejun Heo if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { 6981fd7a697STejun Heo rc = init_controller(mmio_base, hpriv->cached_hctl); 6991fd7a697STejun Heo if (rc) 7001fd7a697STejun Heo return rc; 7011fd7a697STejun Heo } 7021fd7a697STejun Heo 7031fd7a697STejun Heo ata_host_resume(host); 7041fd7a697STejun Heo 7051fd7a697STejun Heo return 0; 7061fd7a697STejun Heo } 707438ac6d5STejun Heo #endif 7081fd7a697STejun Heo 7091fd7a697STejun Heo static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 7101fd7a697STejun Heo { 7111fd7a697STejun Heo static int printed_version; 7124447d351STejun Heo const struct ata_port_info *ppi[] = { &inic_port_info, NULL }; 7134447d351STejun Heo struct ata_host *host; 7141fd7a697STejun Heo struct inic_host_priv *hpriv; 7150d5ff566STejun Heo void __iomem * const *iomap; 7161fd7a697STejun Heo int i, rc; 7171fd7a697STejun Heo 7181fd7a697STejun Heo if (!printed_version++) 7191fd7a697STejun Heo dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 7201fd7a697STejun Heo 7214447d351STejun Heo /* alloc host */ 7224447d351STejun Heo host = ata_host_alloc_pinfo(&pdev->dev, ppi, NR_PORTS); 7234447d351STejun Heo hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); 7244447d351STejun Heo if (!host || !hpriv) 7254447d351STejun Heo return -ENOMEM; 7264447d351STejun Heo 7274447d351STejun Heo host->private_data = hpriv; 7284447d351STejun Heo 7294447d351STejun Heo /* acquire resources and fill host */ 73024dc5f33STejun Heo rc = pcim_enable_device(pdev); 7311fd7a697STejun Heo if (rc) 7321fd7a697STejun Heo return rc; 7331fd7a697STejun Heo 7340d5ff566STejun Heo rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME); 7350d5ff566STejun Heo if (rc) 7360d5ff566STejun Heo return rc; 7374447d351STejun Heo host->iomap = iomap = pcim_iomap_table(pdev); 7384447d351STejun Heo 7394447d351STejun Heo for (i = 0; i < NR_PORTS; i++) { 740cbcdd875STejun Heo struct ata_port *ap = host->ports[i]; 741cbcdd875STejun Heo struct ata_ioports *port = &ap->ioaddr; 742cbcdd875STejun Heo unsigned int offset = i * PORT_SIZE; 7434447d351STejun Heo 7444447d351STejun Heo port->cmd_addr = iomap[2 * i]; 7454447d351STejun Heo port->altstatus_addr = 7464447d351STejun Heo port->ctl_addr = (void __iomem *) 7474447d351STejun Heo ((unsigned long)iomap[2 * i + 1] | ATA_PCI_CTL_OFS); 748cbcdd875STejun Heo port->scr_addr = iomap[MMIO_BAR] + offset + PORT_SCR; 7494447d351STejun Heo 7509363c382STejun Heo ata_sff_std_ports(port); 751cbcdd875STejun Heo 752cbcdd875STejun Heo ata_port_pbar_desc(ap, MMIO_BAR, -1, "mmio"); 753cbcdd875STejun Heo ata_port_pbar_desc(ap, MMIO_BAR, offset, "port"); 754cbcdd875STejun Heo ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx", 755cbcdd875STejun Heo (unsigned long long)pci_resource_start(pdev, 2 * i), 756cbcdd875STejun Heo (unsigned long long)pci_resource_start(pdev, (2 * i + 1)) | 757cbcdd875STejun Heo ATA_PCI_CTL_OFS); 7584447d351STejun Heo } 7594447d351STejun Heo 7604447d351STejun Heo hpriv->cached_hctl = readw(iomap[MMIO_BAR] + HOST_CTL); 7611fd7a697STejun Heo 7621fd7a697STejun Heo /* Set dma_mask. This devices doesn't support 64bit addressing. */ 7631fd7a697STejun Heo rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 7641fd7a697STejun Heo if (rc) { 7651fd7a697STejun Heo dev_printk(KERN_ERR, &pdev->dev, 7661fd7a697STejun Heo "32-bit DMA enable failed\n"); 76724dc5f33STejun Heo return rc; 7681fd7a697STejun Heo } 7691fd7a697STejun Heo 7701fd7a697STejun Heo rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 7711fd7a697STejun Heo if (rc) { 7721fd7a697STejun Heo dev_printk(KERN_ERR, &pdev->dev, 7731fd7a697STejun Heo "32-bit consistent DMA enable failed\n"); 77424dc5f33STejun Heo return rc; 7751fd7a697STejun Heo } 7761fd7a697STejun Heo 777b7d8629fSFUJITA Tomonori /* 778b7d8629fSFUJITA Tomonori * This controller is braindamaged. dma_boundary is 0xffff 779b7d8629fSFUJITA Tomonori * like others but it will lock up the whole machine HARD if 780b7d8629fSFUJITA Tomonori * 65536 byte PRD entry is fed. Reduce maximum segment size. 781b7d8629fSFUJITA Tomonori */ 782b7d8629fSFUJITA Tomonori rc = pci_set_dma_max_seg_size(pdev, 65536 - 512); 783b7d8629fSFUJITA Tomonori if (rc) { 784b7d8629fSFUJITA Tomonori dev_printk(KERN_ERR, &pdev->dev, 785b7d8629fSFUJITA Tomonori "failed to set the maximum segment size.\n"); 786b7d8629fSFUJITA Tomonori return rc; 787b7d8629fSFUJITA Tomonori } 788b7d8629fSFUJITA Tomonori 7890d5ff566STejun Heo rc = init_controller(iomap[MMIO_BAR], hpriv->cached_hctl); 7901fd7a697STejun Heo if (rc) { 7911fd7a697STejun Heo dev_printk(KERN_ERR, &pdev->dev, 7921fd7a697STejun Heo "failed to initialize controller\n"); 79324dc5f33STejun Heo return rc; 7941fd7a697STejun Heo } 7951fd7a697STejun Heo 7961fd7a697STejun Heo pci_set_master(pdev); 7974447d351STejun Heo return ata_host_activate(host, pdev->irq, inic_interrupt, IRQF_SHARED, 7984447d351STejun Heo &inic_sht); 7991fd7a697STejun Heo } 8001fd7a697STejun Heo 8011fd7a697STejun Heo static const struct pci_device_id inic_pci_tbl[] = { 8021fd7a697STejun Heo { PCI_VDEVICE(INIT, 0x1622), }, 8031fd7a697STejun Heo { }, 8041fd7a697STejun Heo }; 8051fd7a697STejun Heo 8061fd7a697STejun Heo static struct pci_driver inic_pci_driver = { 8071fd7a697STejun Heo .name = DRV_NAME, 8081fd7a697STejun Heo .id_table = inic_pci_tbl, 809438ac6d5STejun Heo #ifdef CONFIG_PM 8101fd7a697STejun Heo .suspend = ata_pci_device_suspend, 8111fd7a697STejun Heo .resume = inic_pci_device_resume, 812438ac6d5STejun Heo #endif 8131fd7a697STejun Heo .probe = inic_init_one, 8141fd7a697STejun Heo .remove = ata_pci_remove_one, 8151fd7a697STejun Heo }; 8161fd7a697STejun Heo 8171fd7a697STejun Heo static int __init inic_init(void) 8181fd7a697STejun Heo { 8191fd7a697STejun Heo return pci_register_driver(&inic_pci_driver); 8201fd7a697STejun Heo } 8211fd7a697STejun Heo 8221fd7a697STejun Heo static void __exit inic_exit(void) 8231fd7a697STejun Heo { 8241fd7a697STejun Heo pci_unregister_driver(&inic_pci_driver); 8251fd7a697STejun Heo } 8261fd7a697STejun Heo 8271fd7a697STejun Heo MODULE_AUTHOR("Tejun Heo"); 8281fd7a697STejun Heo MODULE_DESCRIPTION("low-level driver for Initio 162x SATA"); 8291fd7a697STejun Heo MODULE_LICENSE("GPL v2"); 8301fd7a697STejun Heo MODULE_DEVICE_TABLE(pci, inic_pci_tbl); 8311fd7a697STejun Heo MODULE_VERSION(DRV_VERSION); 8321fd7a697STejun Heo 8331fd7a697STejun Heo module_init(inic_init); 8341fd7a697STejun Heo module_exit(inic_exit); 835