11fd7a697STejun Heo /* 21fd7a697STejun Heo * sata_inic162x.c - Driver for Initio 162x SATA controllers 31fd7a697STejun Heo * 41fd7a697STejun Heo * Copyright 2006 SUSE Linux Products GmbH 51fd7a697STejun Heo * Copyright 2006 Tejun Heo <teheo@novell.com> 61fd7a697STejun Heo * 71fd7a697STejun Heo * This file is released under GPL v2. 81fd7a697STejun Heo * 91fd7a697STejun Heo * This controller is eccentric and easily locks up if something isn't 101fd7a697STejun Heo * right. Documentation is available at initio's website but it only 111fd7a697STejun Heo * documents registers (not programming model). 121fd7a697STejun Heo * 131fd7a697STejun Heo * - ATA disks work. 141fd7a697STejun Heo * - Hotplug works. 151fd7a697STejun Heo * - ATAPI read works but burning doesn't. This thing is really 161fd7a697STejun Heo * peculiar about ATAPI and I couldn't figure out how ATAPI PIO and 171fd7a697STejun Heo * ATAPI DMA WRITE should be programmed. If you've got a clue, be 181fd7a697STejun Heo * my guest. 191fd7a697STejun Heo * - Both STR and STD work. 201fd7a697STejun Heo */ 211fd7a697STejun Heo 221fd7a697STejun Heo #include <linux/kernel.h> 231fd7a697STejun Heo #include <linux/module.h> 241fd7a697STejun Heo #include <linux/pci.h> 251fd7a697STejun Heo #include <scsi/scsi_host.h> 261fd7a697STejun Heo #include <linux/libata.h> 271fd7a697STejun Heo #include <linux/blkdev.h> 281fd7a697STejun Heo #include <scsi/scsi_device.h> 291fd7a697STejun Heo 301fd7a697STejun Heo #define DRV_NAME "sata_inic162x" 312a3103ceSJeff Garzik #define DRV_VERSION "0.3" 321fd7a697STejun Heo 331fd7a697STejun Heo enum { 341fd7a697STejun Heo MMIO_BAR = 5, 351fd7a697STejun Heo 361fd7a697STejun Heo NR_PORTS = 2, 371fd7a697STejun Heo 383ad400a9STejun Heo IDMA_CPB_TBL_SIZE = 4 * 32, 393ad400a9STejun Heo 403ad400a9STejun Heo INIC_DMA_BOUNDARY = 0xffffff, 413ad400a9STejun Heo 42b0dd9b8eSTejun Heo HOST_ACTRL = 0x08, 431fd7a697STejun Heo HOST_CTL = 0x7c, 441fd7a697STejun Heo HOST_STAT = 0x7e, 451fd7a697STejun Heo HOST_IRQ_STAT = 0xbc, 461fd7a697STejun Heo HOST_IRQ_MASK = 0xbe, 471fd7a697STejun Heo 481fd7a697STejun Heo PORT_SIZE = 0x40, 491fd7a697STejun Heo 501fd7a697STejun Heo /* registers for ATA TF operation */ 51b0dd9b8eSTejun Heo PORT_TF_DATA = 0x00, 52b0dd9b8eSTejun Heo PORT_TF_FEATURE = 0x01, 53b0dd9b8eSTejun Heo PORT_TF_NSECT = 0x02, 54b0dd9b8eSTejun Heo PORT_TF_LBAL = 0x03, 55b0dd9b8eSTejun Heo PORT_TF_LBAM = 0x04, 56b0dd9b8eSTejun Heo PORT_TF_LBAH = 0x05, 57b0dd9b8eSTejun Heo PORT_TF_DEVICE = 0x06, 58b0dd9b8eSTejun Heo PORT_TF_COMMAND = 0x07, 59b0dd9b8eSTejun Heo PORT_TF_ALT_STAT = 0x08, 601fd7a697STejun Heo PORT_IRQ_STAT = 0x09, 611fd7a697STejun Heo PORT_IRQ_MASK = 0x0a, 621fd7a697STejun Heo PORT_PRD_CTL = 0x0b, 631fd7a697STejun Heo PORT_PRD_ADDR = 0x0c, 641fd7a697STejun Heo PORT_PRD_XFERLEN = 0x10, 65b0dd9b8eSTejun Heo PORT_CPB_CPBLAR = 0x18, 66b0dd9b8eSTejun Heo PORT_CPB_PTQFIFO = 0x1c, 671fd7a697STejun Heo 681fd7a697STejun Heo /* IDMA register */ 691fd7a697STejun Heo PORT_IDMA_CTL = 0x14, 70b0dd9b8eSTejun Heo PORT_IDMA_STAT = 0x16, 71b0dd9b8eSTejun Heo 72b0dd9b8eSTejun Heo PORT_RPQ_FIFO = 0x1e, 73b0dd9b8eSTejun Heo PORT_RPQ_CNT = 0x1f, 741fd7a697STejun Heo 751fd7a697STejun Heo PORT_SCR = 0x20, 761fd7a697STejun Heo 771fd7a697STejun Heo /* HOST_CTL bits */ 781fd7a697STejun Heo HCTL_IRQOFF = (1 << 8), /* global IRQ off */ 79b0dd9b8eSTejun Heo HCTL_FTHD0 = (1 << 10), /* fifo threshold 0 */ 80b0dd9b8eSTejun Heo HCTL_FTHD1 = (1 << 11), /* fifo threshold 1*/ 81b0dd9b8eSTejun Heo HCTL_PWRDWN = (1 << 12), /* power down PHYs */ 821fd7a697STejun Heo HCTL_SOFTRST = (1 << 13), /* global reset (no phy reset) */ 831fd7a697STejun Heo HCTL_RPGSEL = (1 << 15), /* register page select */ 841fd7a697STejun Heo 851fd7a697STejun Heo HCTL_KNOWN_BITS = HCTL_IRQOFF | HCTL_PWRDWN | HCTL_SOFTRST | 861fd7a697STejun Heo HCTL_RPGSEL, 871fd7a697STejun Heo 881fd7a697STejun Heo /* HOST_IRQ_(STAT|MASK) bits */ 891fd7a697STejun Heo HIRQ_PORT0 = (1 << 0), 901fd7a697STejun Heo HIRQ_PORT1 = (1 << 1), 911fd7a697STejun Heo HIRQ_SOFT = (1 << 14), 921fd7a697STejun Heo HIRQ_GLOBAL = (1 << 15), /* STAT only */ 931fd7a697STejun Heo 941fd7a697STejun Heo /* PORT_IRQ_(STAT|MASK) bits */ 951fd7a697STejun Heo PIRQ_OFFLINE = (1 << 0), /* device unplugged */ 961fd7a697STejun Heo PIRQ_ONLINE = (1 << 1), /* device plugged */ 971fd7a697STejun Heo PIRQ_COMPLETE = (1 << 2), /* completion interrupt */ 981fd7a697STejun Heo PIRQ_FATAL = (1 << 3), /* fatal error */ 991fd7a697STejun Heo PIRQ_ATA = (1 << 4), /* ATA interrupt */ 1001fd7a697STejun Heo PIRQ_REPLY = (1 << 5), /* reply FIFO not empty */ 1011fd7a697STejun Heo PIRQ_PENDING = (1 << 7), /* port IRQ pending (STAT only) */ 1021fd7a697STejun Heo 1031fd7a697STejun Heo PIRQ_ERR = PIRQ_OFFLINE | PIRQ_ONLINE | PIRQ_FATAL, 104*ab5b0235STejun Heo PIRQ_MASK_DEFAULT = PIRQ_REPLY, 1051fd7a697STejun Heo PIRQ_MASK_FREEZE = 0xff, 1061fd7a697STejun Heo 1071fd7a697STejun Heo /* PORT_PRD_CTL bits */ 1081fd7a697STejun Heo PRD_CTL_START = (1 << 0), 1091fd7a697STejun Heo PRD_CTL_WR = (1 << 3), 1101fd7a697STejun Heo PRD_CTL_DMAEN = (1 << 7), /* DMA enable */ 1111fd7a697STejun Heo 1121fd7a697STejun Heo /* PORT_IDMA_CTL bits */ 1131fd7a697STejun Heo IDMA_CTL_RST_ATA = (1 << 2), /* hardreset ATA bus */ 1141fd7a697STejun Heo IDMA_CTL_RST_IDMA = (1 << 5), /* reset IDMA machinary */ 1151fd7a697STejun Heo IDMA_CTL_GO = (1 << 7), /* IDMA mode go */ 1161fd7a697STejun Heo IDMA_CTL_ATA_NIEN = (1 << 8), /* ATA IRQ disable */ 117b0dd9b8eSTejun Heo 118b0dd9b8eSTejun Heo /* PORT_IDMA_STAT bits */ 119b0dd9b8eSTejun Heo IDMA_STAT_PERR = (1 << 0), /* PCI ERROR MODE */ 120b0dd9b8eSTejun Heo IDMA_STAT_CPBERR = (1 << 1), /* ADMA CPB error */ 121b0dd9b8eSTejun Heo IDMA_STAT_LGCY = (1 << 3), /* ADMA legacy */ 122b0dd9b8eSTejun Heo IDMA_STAT_UIRQ = (1 << 4), /* ADMA unsolicited irq */ 123b0dd9b8eSTejun Heo IDMA_STAT_STPD = (1 << 5), /* ADMA stopped */ 124b0dd9b8eSTejun Heo IDMA_STAT_PSD = (1 << 6), /* ADMA pause */ 125b0dd9b8eSTejun Heo IDMA_STAT_DONE = (1 << 7), /* ADMA done */ 126b0dd9b8eSTejun Heo 127b0dd9b8eSTejun Heo IDMA_STAT_ERR = IDMA_STAT_PERR | IDMA_STAT_CPBERR, 128b0dd9b8eSTejun Heo 129b0dd9b8eSTejun Heo /* CPB Control Flags*/ 130b0dd9b8eSTejun Heo CPB_CTL_VALID = (1 << 0), /* CPB valid */ 131b0dd9b8eSTejun Heo CPB_CTL_QUEUED = (1 << 1), /* queued command */ 132b0dd9b8eSTejun Heo CPB_CTL_DATA = (1 << 2), /* data, rsvd in datasheet */ 133b0dd9b8eSTejun Heo CPB_CTL_IEN = (1 << 3), /* PCI interrupt enable */ 134b0dd9b8eSTejun Heo CPB_CTL_DEVDIR = (1 << 4), /* device direction control */ 135b0dd9b8eSTejun Heo 136b0dd9b8eSTejun Heo /* CPB Response Flags */ 137b0dd9b8eSTejun Heo CPB_RESP_DONE = (1 << 0), /* ATA command complete */ 138b0dd9b8eSTejun Heo CPB_RESP_REL = (1 << 1), /* ATA release */ 139b0dd9b8eSTejun Heo CPB_RESP_IGNORED = (1 << 2), /* CPB ignored */ 140b0dd9b8eSTejun Heo CPB_RESP_ATA_ERR = (1 << 3), /* ATA command error */ 141b0dd9b8eSTejun Heo CPB_RESP_SPURIOUS = (1 << 4), /* ATA spurious interrupt error */ 142b0dd9b8eSTejun Heo CPB_RESP_UNDERFLOW = (1 << 5), /* APRD deficiency length error */ 143b0dd9b8eSTejun Heo CPB_RESP_OVERFLOW = (1 << 6), /* APRD exccess length error */ 144b0dd9b8eSTejun Heo CPB_RESP_CPB_ERR = (1 << 7), /* CPB error flag */ 145b0dd9b8eSTejun Heo 146b0dd9b8eSTejun Heo /* PRD Control Flags */ 147b0dd9b8eSTejun Heo PRD_DRAIN = (1 << 1), /* ignore data excess */ 148b0dd9b8eSTejun Heo PRD_CDB = (1 << 2), /* atapi packet command pointer */ 149b0dd9b8eSTejun Heo PRD_DIRECT_INTR = (1 << 3), /* direct interrupt */ 150b0dd9b8eSTejun Heo PRD_DMA = (1 << 4), /* data transfer method */ 151b0dd9b8eSTejun Heo PRD_WRITE = (1 << 5), /* data dir, rsvd in datasheet */ 152b0dd9b8eSTejun Heo PRD_IOM = (1 << 6), /* io/memory transfer */ 153b0dd9b8eSTejun Heo PRD_END = (1 << 7), /* APRD chain end */ 1541fd7a697STejun Heo }; 1551fd7a697STejun Heo 1563ad400a9STejun Heo /* Comman Parameter Block */ 1573ad400a9STejun Heo struct inic_cpb { 1583ad400a9STejun Heo u8 resp_flags; /* Response Flags */ 1593ad400a9STejun Heo u8 error; /* ATA Error */ 1603ad400a9STejun Heo u8 status; /* ATA Status */ 1613ad400a9STejun Heo u8 ctl_flags; /* Control Flags */ 1623ad400a9STejun Heo __le32 len; /* Total Transfer Length */ 1633ad400a9STejun Heo __le32 prd; /* First PRD pointer */ 1643ad400a9STejun Heo u8 rsvd[4]; 1653ad400a9STejun Heo /* 16 bytes */ 1663ad400a9STejun Heo u8 feature; /* ATA Feature */ 1673ad400a9STejun Heo u8 hob_feature; /* ATA Ex. Feature */ 1683ad400a9STejun Heo u8 device; /* ATA Device/Head */ 1693ad400a9STejun Heo u8 mirctl; /* Mirror Control */ 1703ad400a9STejun Heo u8 nsect; /* ATA Sector Count */ 1713ad400a9STejun Heo u8 hob_nsect; /* ATA Ex. Sector Count */ 1723ad400a9STejun Heo u8 lbal; /* ATA Sector Number */ 1733ad400a9STejun Heo u8 hob_lbal; /* ATA Ex. Sector Number */ 1743ad400a9STejun Heo u8 lbam; /* ATA Cylinder Low */ 1753ad400a9STejun Heo u8 hob_lbam; /* ATA Ex. Cylinder Low */ 1763ad400a9STejun Heo u8 lbah; /* ATA Cylinder High */ 1773ad400a9STejun Heo u8 hob_lbah; /* ATA Ex. Cylinder High */ 1783ad400a9STejun Heo u8 command; /* ATA Command */ 1793ad400a9STejun Heo u8 ctl; /* ATA Control */ 1803ad400a9STejun Heo u8 slave_error; /* Slave ATA Error */ 1813ad400a9STejun Heo u8 slave_status; /* Slave ATA Status */ 1823ad400a9STejun Heo /* 32 bytes */ 1833ad400a9STejun Heo } __packed; 1843ad400a9STejun Heo 1853ad400a9STejun Heo /* Physical Region Descriptor */ 1863ad400a9STejun Heo struct inic_prd { 1873ad400a9STejun Heo __le32 mad; /* Physical Memory Address */ 1883ad400a9STejun Heo __le16 len; /* Transfer Length */ 1893ad400a9STejun Heo u8 rsvd; 1903ad400a9STejun Heo u8 flags; /* Control Flags */ 1913ad400a9STejun Heo } __packed; 1923ad400a9STejun Heo 1933ad400a9STejun Heo struct inic_pkt { 1943ad400a9STejun Heo struct inic_cpb cpb; 1953ad400a9STejun Heo struct inic_prd prd[LIBATA_MAX_PRD]; 1963ad400a9STejun Heo } __packed; 1973ad400a9STejun Heo 1981fd7a697STejun Heo struct inic_host_priv { 1991fd7a697STejun Heo u16 cached_hctl; 2001fd7a697STejun Heo }; 2011fd7a697STejun Heo 2021fd7a697STejun Heo struct inic_port_priv { 2033ad400a9STejun Heo struct inic_pkt *pkt; 2043ad400a9STejun Heo dma_addr_t pkt_dma; 2053ad400a9STejun Heo u32 *cpb_tbl; 2063ad400a9STejun Heo dma_addr_t cpb_tbl_dma; 2071fd7a697STejun Heo }; 2081fd7a697STejun Heo 2091fd7a697STejun Heo static struct scsi_host_template inic_sht = { 210*ab5b0235STejun Heo ATA_BASE_SHT(DRV_NAME), 211*ab5b0235STejun Heo .sg_tablesize = LIBATA_MAX_PRD, /* maybe it can be larger? */ 2123ad400a9STejun Heo .dma_boundary = INIC_DMA_BOUNDARY, 2131fd7a697STejun Heo }; 2141fd7a697STejun Heo 2151fd7a697STejun Heo static const int scr_map[] = { 2161fd7a697STejun Heo [SCR_STATUS] = 0, 2171fd7a697STejun Heo [SCR_ERROR] = 1, 2181fd7a697STejun Heo [SCR_CONTROL] = 2, 2191fd7a697STejun Heo }; 2201fd7a697STejun Heo 2211fd7a697STejun Heo static void __iomem *inic_port_base(struct ata_port *ap) 2221fd7a697STejun Heo { 2230d5ff566STejun Heo return ap->host->iomap[MMIO_BAR] + ap->port_no * PORT_SIZE; 2241fd7a697STejun Heo } 2251fd7a697STejun Heo 2261fd7a697STejun Heo static void inic_reset_port(void __iomem *port_base) 2271fd7a697STejun Heo { 2281fd7a697STejun Heo void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; 2291fd7a697STejun Heo u16 ctl; 2301fd7a697STejun Heo 2311fd7a697STejun Heo ctl = readw(idma_ctl); 2321fd7a697STejun Heo ctl &= ~(IDMA_CTL_RST_IDMA | IDMA_CTL_ATA_NIEN | IDMA_CTL_GO); 2331fd7a697STejun Heo 2341fd7a697STejun Heo /* mask IRQ and assert reset */ 2351fd7a697STejun Heo writew(ctl | IDMA_CTL_RST_IDMA | IDMA_CTL_ATA_NIEN, idma_ctl); 2361fd7a697STejun Heo readw(idma_ctl); /* flush */ 2371fd7a697STejun Heo 2381fd7a697STejun Heo /* give it some time */ 2391fd7a697STejun Heo msleep(1); 2401fd7a697STejun Heo 2411fd7a697STejun Heo /* release reset */ 2421fd7a697STejun Heo writew(ctl | IDMA_CTL_ATA_NIEN, idma_ctl); 2431fd7a697STejun Heo 2441fd7a697STejun Heo /* clear irq */ 2451fd7a697STejun Heo writeb(0xff, port_base + PORT_IRQ_STAT); 2461fd7a697STejun Heo 2471fd7a697STejun Heo /* reenable ATA IRQ, turn off IDMA mode */ 2481fd7a697STejun Heo writew(ctl, idma_ctl); 2491fd7a697STejun Heo } 2501fd7a697STejun Heo 251da3dbb17STejun Heo static int inic_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val) 2521fd7a697STejun Heo { 25359f99880SJeff Garzik void __iomem *scr_addr = ap->ioaddr.scr_addr; 2541fd7a697STejun Heo void __iomem *addr; 2551fd7a697STejun Heo 2561fd7a697STejun Heo if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) 257da3dbb17STejun Heo return -EINVAL; 2581fd7a697STejun Heo 2591fd7a697STejun Heo addr = scr_addr + scr_map[sc_reg] * 4; 260da3dbb17STejun Heo *val = readl(scr_addr + scr_map[sc_reg] * 4); 2611fd7a697STejun Heo 2621fd7a697STejun Heo /* this controller has stuck DIAG.N, ignore it */ 2631fd7a697STejun Heo if (sc_reg == SCR_ERROR) 264da3dbb17STejun Heo *val &= ~SERR_PHYRDY_CHG; 265da3dbb17STejun Heo return 0; 2661fd7a697STejun Heo } 2671fd7a697STejun Heo 268da3dbb17STejun Heo static int inic_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val) 2691fd7a697STejun Heo { 27059f99880SJeff Garzik void __iomem *scr_addr = ap->ioaddr.scr_addr; 2711fd7a697STejun Heo 2721fd7a697STejun Heo if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) 273da3dbb17STejun Heo return -EINVAL; 2741fd7a697STejun Heo 2751fd7a697STejun Heo writel(val, scr_addr + scr_map[sc_reg] * 4); 276da3dbb17STejun Heo return 0; 2771fd7a697STejun Heo } 2781fd7a697STejun Heo 2793ad400a9STejun Heo static void inic_stop_idma(struct ata_port *ap) 2803ad400a9STejun Heo { 2813ad400a9STejun Heo void __iomem *port_base = inic_port_base(ap); 2823ad400a9STejun Heo 2833ad400a9STejun Heo readb(port_base + PORT_RPQ_FIFO); 2843ad400a9STejun Heo readb(port_base + PORT_RPQ_CNT); 2853ad400a9STejun Heo writew(0, port_base + PORT_IDMA_CTL); 2863ad400a9STejun Heo } 2873ad400a9STejun Heo 2883ad400a9STejun Heo static void inic_host_err_intr(struct ata_port *ap, u8 irq_stat, u16 idma_stat) 2893ad400a9STejun Heo { 2903ad400a9STejun Heo struct ata_eh_info *ehi = &ap->link.eh_info; 2913ad400a9STejun Heo struct inic_port_priv *pp = ap->private_data; 2923ad400a9STejun Heo struct inic_cpb *cpb = &pp->pkt->cpb; 2933ad400a9STejun Heo bool freeze = false; 2943ad400a9STejun Heo 2953ad400a9STejun Heo ata_ehi_clear_desc(ehi); 2963ad400a9STejun Heo ata_ehi_push_desc(ehi, "irq_stat=0x%x idma_stat=0x%x", 2973ad400a9STejun Heo irq_stat, idma_stat); 2983ad400a9STejun Heo 2993ad400a9STejun Heo inic_stop_idma(ap); 3003ad400a9STejun Heo 3013ad400a9STejun Heo if (irq_stat & (PIRQ_OFFLINE | PIRQ_ONLINE)) { 3023ad400a9STejun Heo ata_ehi_push_desc(ehi, "hotplug"); 3033ad400a9STejun Heo ata_ehi_hotplugged(ehi); 3043ad400a9STejun Heo freeze = true; 3053ad400a9STejun Heo } 3063ad400a9STejun Heo 3073ad400a9STejun Heo if (idma_stat & IDMA_STAT_PERR) { 3083ad400a9STejun Heo ata_ehi_push_desc(ehi, "PCI error"); 3093ad400a9STejun Heo freeze = true; 3103ad400a9STejun Heo } 3113ad400a9STejun Heo 3123ad400a9STejun Heo if (idma_stat & IDMA_STAT_CPBERR) { 3133ad400a9STejun Heo ata_ehi_push_desc(ehi, "CPB error"); 3143ad400a9STejun Heo 3153ad400a9STejun Heo if (cpb->resp_flags & CPB_RESP_IGNORED) { 3163ad400a9STejun Heo __ata_ehi_push_desc(ehi, " ignored"); 3173ad400a9STejun Heo ehi->err_mask |= AC_ERR_INVALID; 3183ad400a9STejun Heo freeze = true; 3193ad400a9STejun Heo } 3203ad400a9STejun Heo 3213ad400a9STejun Heo if (cpb->resp_flags & CPB_RESP_ATA_ERR) 3223ad400a9STejun Heo ehi->err_mask |= AC_ERR_DEV; 3233ad400a9STejun Heo 3243ad400a9STejun Heo if (cpb->resp_flags & CPB_RESP_SPURIOUS) { 3253ad400a9STejun Heo __ata_ehi_push_desc(ehi, " spurious-intr"); 3263ad400a9STejun Heo ehi->err_mask |= AC_ERR_HSM; 3273ad400a9STejun Heo freeze = true; 3283ad400a9STejun Heo } 3293ad400a9STejun Heo 3303ad400a9STejun Heo if (cpb->resp_flags & 3313ad400a9STejun Heo (CPB_RESP_UNDERFLOW | CPB_RESP_OVERFLOW)) { 3323ad400a9STejun Heo __ata_ehi_push_desc(ehi, " data-over/underflow"); 3333ad400a9STejun Heo ehi->err_mask |= AC_ERR_HSM; 3343ad400a9STejun Heo freeze = true; 3353ad400a9STejun Heo } 3363ad400a9STejun Heo } 3373ad400a9STejun Heo 3383ad400a9STejun Heo if (freeze) 3393ad400a9STejun Heo ata_port_freeze(ap); 3403ad400a9STejun Heo else 3413ad400a9STejun Heo ata_port_abort(ap); 3423ad400a9STejun Heo } 3433ad400a9STejun Heo 3441fd7a697STejun Heo static void inic_host_intr(struct ata_port *ap) 3451fd7a697STejun Heo { 3461fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 3473ad400a9STejun Heo struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); 3481fd7a697STejun Heo u8 irq_stat; 3493ad400a9STejun Heo u16 idma_stat; 3501fd7a697STejun Heo 3513ad400a9STejun Heo /* read and clear IRQ status */ 3521fd7a697STejun Heo irq_stat = readb(port_base + PORT_IRQ_STAT); 3531fd7a697STejun Heo writeb(irq_stat, port_base + PORT_IRQ_STAT); 3543ad400a9STejun Heo idma_stat = readw(port_base + PORT_IDMA_STAT); 3551fd7a697STejun Heo 3563ad400a9STejun Heo if (unlikely((irq_stat & PIRQ_ERR) || (idma_stat & IDMA_STAT_ERR))) 3573ad400a9STejun Heo inic_host_err_intr(ap, irq_stat, idma_stat); 3581fd7a697STejun Heo 3591fd7a697STejun Heo if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { 3605682ed33STejun Heo ap->ops->sff_check_status(ap); /* clear ATA interrupt */ 3613ad400a9STejun Heo goto spurious; 3621fd7a697STejun Heo } 3631fd7a697STejun Heo 3643ad400a9STejun Heo if (qc->tf.protocol == ATA_PROT_DMA) { 3653ad400a9STejun Heo if (likely(idma_stat & IDMA_STAT_DONE)) { 3663ad400a9STejun Heo inic_stop_idma(ap); 3673ad400a9STejun Heo 3683ad400a9STejun Heo /* Depending on circumstances, device error 3693ad400a9STejun Heo * isn't reported by IDMA, check it explicitly. 3703ad400a9STejun Heo */ 3713ad400a9STejun Heo if (unlikely(readb(port_base + PORT_TF_COMMAND) & 3723ad400a9STejun Heo (ATA_DF | ATA_ERR))) 3733ad400a9STejun Heo qc->err_mask |= AC_ERR_DEV; 3743ad400a9STejun Heo 3753ad400a9STejun Heo ata_qc_complete(qc); 3763ad400a9STejun Heo return; 3773ad400a9STejun Heo } 3783ad400a9STejun Heo } else { 3799363c382STejun Heo if (likely(ata_sff_host_intr(ap, qc))) 3801fd7a697STejun Heo return; 3811fd7a697STejun Heo } 3821fd7a697STejun Heo 3833ad400a9STejun Heo spurious: 3843ad400a9STejun Heo ap->ops->sff_check_status(ap); /* clear ATA interrupt */ 3851fd7a697STejun Heo } 3861fd7a697STejun Heo 3871fd7a697STejun Heo static irqreturn_t inic_interrupt(int irq, void *dev_instance) 3881fd7a697STejun Heo { 3891fd7a697STejun Heo struct ata_host *host = dev_instance; 3900d5ff566STejun Heo void __iomem *mmio_base = host->iomap[MMIO_BAR]; 3911fd7a697STejun Heo u16 host_irq_stat; 3921fd7a697STejun Heo int i, handled = 0;; 3931fd7a697STejun Heo 3941fd7a697STejun Heo host_irq_stat = readw(mmio_base + HOST_IRQ_STAT); 3951fd7a697STejun Heo 3961fd7a697STejun Heo if (unlikely(!(host_irq_stat & HIRQ_GLOBAL))) 3971fd7a697STejun Heo goto out; 3981fd7a697STejun Heo 3991fd7a697STejun Heo spin_lock(&host->lock); 4001fd7a697STejun Heo 4011fd7a697STejun Heo for (i = 0; i < NR_PORTS; i++) { 4021fd7a697STejun Heo struct ata_port *ap = host->ports[i]; 4031fd7a697STejun Heo 4041fd7a697STejun Heo if (!(host_irq_stat & (HIRQ_PORT0 << i))) 4051fd7a697STejun Heo continue; 4061fd7a697STejun Heo 4071fd7a697STejun Heo if (likely(ap && !(ap->flags & ATA_FLAG_DISABLED))) { 4081fd7a697STejun Heo inic_host_intr(ap); 4091fd7a697STejun Heo handled++; 4101fd7a697STejun Heo } else { 4111fd7a697STejun Heo if (ata_ratelimit()) 4121fd7a697STejun Heo dev_printk(KERN_ERR, host->dev, "interrupt " 4131fd7a697STejun Heo "from disabled port %d (0x%x)\n", 4141fd7a697STejun Heo i, host_irq_stat); 4151fd7a697STejun Heo } 4161fd7a697STejun Heo } 4171fd7a697STejun Heo 4181fd7a697STejun Heo spin_unlock(&host->lock); 4191fd7a697STejun Heo 4201fd7a697STejun Heo out: 4211fd7a697STejun Heo return IRQ_RETVAL(handled); 4221fd7a697STejun Heo } 4231fd7a697STejun Heo 4243ad400a9STejun Heo static void inic_fill_sg(struct inic_prd *prd, struct ata_queued_cmd *qc) 4253ad400a9STejun Heo { 4263ad400a9STejun Heo struct scatterlist *sg; 4273ad400a9STejun Heo unsigned int si; 4283ad400a9STejun Heo u8 flags = PRD_DMA; 4293ad400a9STejun Heo 4303ad400a9STejun Heo if (qc->tf.flags & ATA_TFLAG_WRITE) 4313ad400a9STejun Heo flags |= PRD_WRITE; 4323ad400a9STejun Heo 4333ad400a9STejun Heo for_each_sg(qc->sg, sg, qc->n_elem, si) { 4343ad400a9STejun Heo prd->mad = cpu_to_le32(sg_dma_address(sg)); 4353ad400a9STejun Heo prd->len = cpu_to_le16(sg_dma_len(sg)); 4363ad400a9STejun Heo prd->flags = flags; 4373ad400a9STejun Heo prd++; 4383ad400a9STejun Heo } 4393ad400a9STejun Heo 4403ad400a9STejun Heo WARN_ON(!si); 4413ad400a9STejun Heo prd[-1].flags |= PRD_END; 4423ad400a9STejun Heo } 4433ad400a9STejun Heo 4443ad400a9STejun Heo static void inic_qc_prep(struct ata_queued_cmd *qc) 4453ad400a9STejun Heo { 4463ad400a9STejun Heo struct inic_port_priv *pp = qc->ap->private_data; 4473ad400a9STejun Heo struct inic_pkt *pkt = pp->pkt; 4483ad400a9STejun Heo struct inic_cpb *cpb = &pkt->cpb; 4493ad400a9STejun Heo struct inic_prd *prd = pkt->prd; 4503ad400a9STejun Heo 4513ad400a9STejun Heo VPRINTK("ENTER\n"); 4523ad400a9STejun Heo 4533ad400a9STejun Heo if (qc->tf.protocol != ATA_PROT_DMA) 4543ad400a9STejun Heo return; 4553ad400a9STejun Heo 4563ad400a9STejun Heo /* prepare packet, based on initio driver */ 4573ad400a9STejun Heo memset(pkt, 0, sizeof(struct inic_pkt)); 4583ad400a9STejun Heo 4593ad400a9STejun Heo cpb->ctl_flags = CPB_CTL_VALID | CPB_CTL_IEN | CPB_CTL_DATA; 4603ad400a9STejun Heo 4613ad400a9STejun Heo cpb->len = cpu_to_le32(qc->nbytes); 4623ad400a9STejun Heo cpb->prd = cpu_to_le32(pp->pkt_dma + offsetof(struct inic_pkt, prd)); 4633ad400a9STejun Heo 4643ad400a9STejun Heo cpb->device = qc->tf.device; 4653ad400a9STejun Heo cpb->feature = qc->tf.feature; 4663ad400a9STejun Heo cpb->nsect = qc->tf.nsect; 4673ad400a9STejun Heo cpb->lbal = qc->tf.lbal; 4683ad400a9STejun Heo cpb->lbam = qc->tf.lbam; 4693ad400a9STejun Heo cpb->lbah = qc->tf.lbah; 4703ad400a9STejun Heo 4713ad400a9STejun Heo if (qc->tf.flags & ATA_TFLAG_LBA48) { 4723ad400a9STejun Heo cpb->hob_feature = qc->tf.hob_feature; 4733ad400a9STejun Heo cpb->hob_nsect = qc->tf.hob_nsect; 4743ad400a9STejun Heo cpb->hob_lbal = qc->tf.hob_lbal; 4753ad400a9STejun Heo cpb->hob_lbam = qc->tf.hob_lbam; 4763ad400a9STejun Heo cpb->hob_lbah = qc->tf.hob_lbah; 4773ad400a9STejun Heo } 4783ad400a9STejun Heo 4793ad400a9STejun Heo cpb->command = qc->tf.command; 4803ad400a9STejun Heo /* don't load ctl - dunno why. it's like that in the initio driver */ 4813ad400a9STejun Heo 4823ad400a9STejun Heo /* setup sg table */ 4833ad400a9STejun Heo inic_fill_sg(prd, qc); 4843ad400a9STejun Heo 4853ad400a9STejun Heo pp->cpb_tbl[0] = pp->pkt_dma; 4863ad400a9STejun Heo } 4873ad400a9STejun Heo 4881fd7a697STejun Heo static unsigned int inic_qc_issue(struct ata_queued_cmd *qc) 4891fd7a697STejun Heo { 4901fd7a697STejun Heo struct ata_port *ap = qc->ap; 4913ad400a9STejun Heo void __iomem *port_base = inic_port_base(ap); 4921fd7a697STejun Heo 4933ad400a9STejun Heo if (qc->tf.protocol == ATA_PROT_DMA) { 4943ad400a9STejun Heo /* fire up the ADMA engine */ 4953ad400a9STejun Heo writew(HCTL_FTHD0, port_base + HOST_CTL); 4963ad400a9STejun Heo writew(IDMA_CTL_GO, port_base + PORT_IDMA_CTL); 4973ad400a9STejun Heo writeb(0, port_base + PORT_CPB_PTQFIFO); 4983ad400a9STejun Heo 4993ad400a9STejun Heo return 0; 5003ad400a9STejun Heo } 5011fd7a697STejun Heo 5021fd7a697STejun Heo /* Issuing a command to yet uninitialized port locks up the 5031fd7a697STejun Heo * controller. Most of the time, this happens for the first 5041fd7a697STejun Heo * command after reset which are ATA and ATAPI IDENTIFYs. 5051fd7a697STejun Heo * Fast fail if stat is 0x7f or 0xff for those commands. 5061fd7a697STejun Heo */ 5071fd7a697STejun Heo if (unlikely(qc->tf.command == ATA_CMD_ID_ATA || 5081fd7a697STejun Heo qc->tf.command == ATA_CMD_ID_ATAPI)) { 5095682ed33STejun Heo u8 stat = ap->ops->sff_check_status(ap); 5101fd7a697STejun Heo if (stat == 0x7f || stat == 0xff) 5111fd7a697STejun Heo return AC_ERR_HSM; 5121fd7a697STejun Heo } 5131fd7a697STejun Heo 5149363c382STejun Heo return ata_sff_qc_issue(qc); 5151fd7a697STejun Heo } 5161fd7a697STejun Heo 517364fac0eSTejun Heo static void inic_tf_read(struct ata_port *ap, struct ata_taskfile *tf) 518364fac0eSTejun Heo { 519364fac0eSTejun Heo void __iomem *port_base = inic_port_base(ap); 520364fac0eSTejun Heo 521364fac0eSTejun Heo tf->feature = readb(port_base + PORT_TF_FEATURE); 522364fac0eSTejun Heo tf->nsect = readb(port_base + PORT_TF_NSECT); 523364fac0eSTejun Heo tf->lbal = readb(port_base + PORT_TF_LBAL); 524364fac0eSTejun Heo tf->lbam = readb(port_base + PORT_TF_LBAM); 525364fac0eSTejun Heo tf->lbah = readb(port_base + PORT_TF_LBAH); 526364fac0eSTejun Heo tf->device = readb(port_base + PORT_TF_DEVICE); 527364fac0eSTejun Heo tf->command = readb(port_base + PORT_TF_COMMAND); 528364fac0eSTejun Heo } 529364fac0eSTejun Heo 530364fac0eSTejun Heo static bool inic_qc_fill_rtf(struct ata_queued_cmd *qc) 531364fac0eSTejun Heo { 532364fac0eSTejun Heo struct ata_taskfile *rtf = &qc->result_tf; 533364fac0eSTejun Heo struct ata_taskfile tf; 534364fac0eSTejun Heo 535364fac0eSTejun Heo /* FIXME: Except for status and error, result TF access 536364fac0eSTejun Heo * doesn't work. I tried reading from BAR0/2, CPB and BAR5. 537364fac0eSTejun Heo * None works regardless of which command interface is used. 538364fac0eSTejun Heo * For now return true iff status indicates device error. 539364fac0eSTejun Heo * This means that we're reporting bogus sector for RW 540364fac0eSTejun Heo * failures. Eeekk.... 541364fac0eSTejun Heo */ 542364fac0eSTejun Heo inic_tf_read(qc->ap, &tf); 543364fac0eSTejun Heo 544364fac0eSTejun Heo if (!(tf.command & ATA_ERR)) 545364fac0eSTejun Heo return false; 546364fac0eSTejun Heo 547364fac0eSTejun Heo rtf->command = tf.command; 548364fac0eSTejun Heo rtf->feature = tf.feature; 549364fac0eSTejun Heo return true; 550364fac0eSTejun Heo } 551364fac0eSTejun Heo 5521fd7a697STejun Heo static void inic_freeze(struct ata_port *ap) 5531fd7a697STejun Heo { 5541fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 5551fd7a697STejun Heo 556*ab5b0235STejun Heo writeb(PIRQ_MASK_FREEZE, port_base + PORT_IRQ_MASK); 5575682ed33STejun Heo ap->ops->sff_check_status(ap); 5581fd7a697STejun Heo writeb(0xff, port_base + PORT_IRQ_STAT); 5591fd7a697STejun Heo } 5601fd7a697STejun Heo 5611fd7a697STejun Heo static void inic_thaw(struct ata_port *ap) 5621fd7a697STejun Heo { 5631fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 5641fd7a697STejun Heo 5655682ed33STejun Heo ap->ops->sff_check_status(ap); 5661fd7a697STejun Heo writeb(0xff, port_base + PORT_IRQ_STAT); 567*ab5b0235STejun Heo writeb(PIRQ_MASK_DEFAULT, port_base + PORT_IRQ_MASK); 5681fd7a697STejun Heo } 5691fd7a697STejun Heo 570364fac0eSTejun Heo static int inic_check_ready(struct ata_link *link) 571364fac0eSTejun Heo { 572364fac0eSTejun Heo void __iomem *port_base = inic_port_base(link->ap); 573364fac0eSTejun Heo 574364fac0eSTejun Heo return ata_check_ready(readb(port_base + PORT_TF_COMMAND)); 575364fac0eSTejun Heo } 576364fac0eSTejun Heo 5771fd7a697STejun Heo /* 5781fd7a697STejun Heo * SRST and SControl hardreset don't give valid signature on this 5791fd7a697STejun Heo * controller. Only controller specific hardreset mechanism works. 5801fd7a697STejun Heo */ 581cc0680a5STejun Heo static int inic_hardreset(struct ata_link *link, unsigned int *class, 582d4b2bab4STejun Heo unsigned long deadline) 5831fd7a697STejun Heo { 584cc0680a5STejun Heo struct ata_port *ap = link->ap; 5851fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 5861fd7a697STejun Heo void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; 587cc0680a5STejun Heo const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); 5881fd7a697STejun Heo u16 val; 5891fd7a697STejun Heo int rc; 5901fd7a697STejun Heo 5911fd7a697STejun Heo /* hammer it into sane state */ 5921fd7a697STejun Heo inic_reset_port(port_base); 5931fd7a697STejun Heo 5941fd7a697STejun Heo val = readw(idma_ctl); 5951fd7a697STejun Heo writew(val | IDMA_CTL_RST_ATA, idma_ctl); 5961fd7a697STejun Heo readw(idma_ctl); /* flush */ 5971fd7a697STejun Heo msleep(1); 5981fd7a697STejun Heo writew(val & ~IDMA_CTL_RST_ATA, idma_ctl); 5991fd7a697STejun Heo 600cc0680a5STejun Heo rc = sata_link_resume(link, timing, deadline); 6011fd7a697STejun Heo if (rc) { 602cc0680a5STejun Heo ata_link_printk(link, KERN_WARNING, "failed to resume " 603fe334602STejun Heo "link after reset (errno=%d)\n", rc); 6041fd7a697STejun Heo return rc; 6051fd7a697STejun Heo } 6061fd7a697STejun Heo 6071fd7a697STejun Heo *class = ATA_DEV_NONE; 608cc0680a5STejun Heo if (ata_link_online(link)) { 6091fd7a697STejun Heo struct ata_taskfile tf; 6101fd7a697STejun Heo 611705e76beSTejun Heo /* wait for link to become ready */ 612364fac0eSTejun Heo rc = ata_wait_after_reset(link, deadline, inic_check_ready); 6139b89391cSTejun Heo /* link occupied, -ENODEV too is an error */ 6149b89391cSTejun Heo if (rc) { 615cc0680a5STejun Heo ata_link_printk(link, KERN_WARNING, "device not ready " 616d4b2bab4STejun Heo "after hardreset (errno=%d)\n", rc); 617d4b2bab4STejun Heo return rc; 6181fd7a697STejun Heo } 6191fd7a697STejun Heo 620364fac0eSTejun Heo inic_tf_read(ap, &tf); 6211fd7a697STejun Heo *class = ata_dev_classify(&tf); 6221fd7a697STejun Heo } 6231fd7a697STejun Heo 6241fd7a697STejun Heo return 0; 6251fd7a697STejun Heo } 6261fd7a697STejun Heo 6271fd7a697STejun Heo static void inic_error_handler(struct ata_port *ap) 6281fd7a697STejun Heo { 6291fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 6301fd7a697STejun Heo unsigned long flags; 6311fd7a697STejun Heo 6321fd7a697STejun Heo /* reset PIO HSM and stop DMA engine */ 6331fd7a697STejun Heo inic_reset_port(port_base); 6341fd7a697STejun Heo 6351fd7a697STejun Heo spin_lock_irqsave(ap->lock, flags); 6361fd7a697STejun Heo ap->hsm_task_state = HSM_ST_IDLE; 6371fd7a697STejun Heo spin_unlock_irqrestore(ap->lock, flags); 6381fd7a697STejun Heo 6391fd7a697STejun Heo /* PIO and DMA engines have been stopped, perform recovery */ 640a1efdabaSTejun Heo ata_std_error_handler(ap); 6411fd7a697STejun Heo } 6421fd7a697STejun Heo 6431fd7a697STejun Heo static void inic_post_internal_cmd(struct ata_queued_cmd *qc) 6441fd7a697STejun Heo { 6451fd7a697STejun Heo /* make DMA engine forget about the failed command */ 646a51d644aSTejun Heo if (qc->flags & ATA_QCFLAG_FAILED) 6471fd7a697STejun Heo inic_reset_port(inic_port_base(qc->ap)); 6481fd7a697STejun Heo } 6491fd7a697STejun Heo 650cd0d3bbcSAlan static void inic_dev_config(struct ata_device *dev) 6511fd7a697STejun Heo { 6521fd7a697STejun Heo /* inic can only handle upto LBA28 max sectors */ 6531fd7a697STejun Heo if (dev->max_sectors > ATA_MAX_SECTORS) 6541fd7a697STejun Heo dev->max_sectors = ATA_MAX_SECTORS; 65590c93785STejun Heo 65690c93785STejun Heo if (dev->n_sectors >= 1 << 28) { 65790c93785STejun Heo ata_dev_printk(dev, KERN_ERR, 65890c93785STejun Heo "ERROR: This driver doesn't support LBA48 yet and may cause\n" 65990c93785STejun Heo " data corruption on such devices. Disabling.\n"); 66090c93785STejun Heo ata_dev_disable(dev); 66190c93785STejun Heo } 6621fd7a697STejun Heo } 6631fd7a697STejun Heo 6641fd7a697STejun Heo static void init_port(struct ata_port *ap) 6651fd7a697STejun Heo { 6661fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 6673ad400a9STejun Heo struct inic_port_priv *pp = ap->private_data; 6681fd7a697STejun Heo 6693ad400a9STejun Heo /* clear packet and CPB table */ 6703ad400a9STejun Heo memset(pp->pkt, 0, sizeof(struct inic_pkt)); 6713ad400a9STejun Heo memset(pp->cpb_tbl, 0, IDMA_CPB_TBL_SIZE); 6723ad400a9STejun Heo 6733ad400a9STejun Heo /* setup PRD and CPB lookup table addresses */ 6741fd7a697STejun Heo writel(ap->prd_dma, port_base + PORT_PRD_ADDR); 6753ad400a9STejun Heo writel(pp->cpb_tbl_dma, port_base + PORT_CPB_CPBLAR); 6761fd7a697STejun Heo } 6771fd7a697STejun Heo 6781fd7a697STejun Heo static int inic_port_resume(struct ata_port *ap) 6791fd7a697STejun Heo { 6801fd7a697STejun Heo init_port(ap); 6811fd7a697STejun Heo return 0; 6821fd7a697STejun Heo } 6831fd7a697STejun Heo 6841fd7a697STejun Heo static int inic_port_start(struct ata_port *ap) 6851fd7a697STejun Heo { 6863ad400a9STejun Heo struct device *dev = ap->host->dev; 6871fd7a697STejun Heo struct inic_port_priv *pp; 6881fd7a697STejun Heo int rc; 6891fd7a697STejun Heo 6901fd7a697STejun Heo /* alloc and initialize private data */ 6913ad400a9STejun Heo pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 6921fd7a697STejun Heo if (!pp) 6931fd7a697STejun Heo return -ENOMEM; 6941fd7a697STejun Heo ap->private_data = pp; 6951fd7a697STejun Heo 6961fd7a697STejun Heo /* Alloc resources */ 6971fd7a697STejun Heo rc = ata_port_start(ap); 69836f674d9STejun Heo if (rc) 6991fd7a697STejun Heo return rc; 7001fd7a697STejun Heo 7013ad400a9STejun Heo pp->pkt = dmam_alloc_coherent(dev, sizeof(struct inic_pkt), 7023ad400a9STejun Heo &pp->pkt_dma, GFP_KERNEL); 7033ad400a9STejun Heo if (!pp->pkt) 7043ad400a9STejun Heo return -ENOMEM; 7053ad400a9STejun Heo 7063ad400a9STejun Heo pp->cpb_tbl = dmam_alloc_coherent(dev, IDMA_CPB_TBL_SIZE, 7073ad400a9STejun Heo &pp->cpb_tbl_dma, GFP_KERNEL); 7083ad400a9STejun Heo if (!pp->cpb_tbl) 7093ad400a9STejun Heo return -ENOMEM; 7103ad400a9STejun Heo 7111fd7a697STejun Heo init_port(ap); 7121fd7a697STejun Heo 7131fd7a697STejun Heo return 0; 7141fd7a697STejun Heo } 7151fd7a697STejun Heo 7161fd7a697STejun Heo static struct ata_port_operations inic_port_ops = { 717029cfd6bSTejun Heo .inherits = &ata_sff_port_ops, 7181fd7a697STejun Heo 7193ad400a9STejun Heo .qc_prep = inic_qc_prep, 7201fd7a697STejun Heo .qc_issue = inic_qc_issue, 721364fac0eSTejun Heo .qc_fill_rtf = inic_qc_fill_rtf, 7221fd7a697STejun Heo 7231fd7a697STejun Heo .freeze = inic_freeze, 7241fd7a697STejun Heo .thaw = inic_thaw, 725a1efdabaSTejun Heo .softreset = ATA_OP_NULL, /* softreset is broken */ 726a1efdabaSTejun Heo .hardreset = inic_hardreset, 7271fd7a697STejun Heo .error_handler = inic_error_handler, 7281fd7a697STejun Heo .post_internal_cmd = inic_post_internal_cmd, 7291fd7a697STejun Heo .dev_config = inic_dev_config, 7301fd7a697STejun Heo 731029cfd6bSTejun Heo .scr_read = inic_scr_read, 732029cfd6bSTejun Heo .scr_write = inic_scr_write, 7331fd7a697STejun Heo 734029cfd6bSTejun Heo .port_resume = inic_port_resume, 7351fd7a697STejun Heo .port_start = inic_port_start, 7361fd7a697STejun Heo }; 7371fd7a697STejun Heo 7381fd7a697STejun Heo static struct ata_port_info inic_port_info = { 7390dc36888STejun Heo /* For some reason, ATAPI_PROT_PIO is broken on this 7401fd7a697STejun Heo * controller, and no, PIO_POLLING does't fix it. It somehow 7411fd7a697STejun Heo * manages to report the wrong ireason and ignoring ireason 7421fd7a697STejun Heo * results in machine lock up. Tell libata to always prefer 7431fd7a697STejun Heo * DMA. 7441fd7a697STejun Heo */ 7451fd7a697STejun Heo .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA, 7461fd7a697STejun Heo .pio_mask = 0x1f, /* pio0-4 */ 7471fd7a697STejun Heo .mwdma_mask = 0x07, /* mwdma0-2 */ 748bf6263a8SJeff Garzik .udma_mask = ATA_UDMA6, 7491fd7a697STejun Heo .port_ops = &inic_port_ops 7501fd7a697STejun Heo }; 7511fd7a697STejun Heo 7521fd7a697STejun Heo static int init_controller(void __iomem *mmio_base, u16 hctl) 7531fd7a697STejun Heo { 7541fd7a697STejun Heo int i; 7551fd7a697STejun Heo u16 val; 7561fd7a697STejun Heo 7571fd7a697STejun Heo hctl &= ~HCTL_KNOWN_BITS; 7581fd7a697STejun Heo 7591fd7a697STejun Heo /* Soft reset whole controller. Spec says reset duration is 3 7601fd7a697STejun Heo * PCI clocks, be generous and give it 10ms. 7611fd7a697STejun Heo */ 7621fd7a697STejun Heo writew(hctl | HCTL_SOFTRST, mmio_base + HOST_CTL); 7631fd7a697STejun Heo readw(mmio_base + HOST_CTL); /* flush */ 7641fd7a697STejun Heo 7651fd7a697STejun Heo for (i = 0; i < 10; i++) { 7661fd7a697STejun Heo msleep(1); 7671fd7a697STejun Heo val = readw(mmio_base + HOST_CTL); 7681fd7a697STejun Heo if (!(val & HCTL_SOFTRST)) 7691fd7a697STejun Heo break; 7701fd7a697STejun Heo } 7711fd7a697STejun Heo 7721fd7a697STejun Heo if (val & HCTL_SOFTRST) 7731fd7a697STejun Heo return -EIO; 7741fd7a697STejun Heo 7751fd7a697STejun Heo /* mask all interrupts and reset ports */ 7761fd7a697STejun Heo for (i = 0; i < NR_PORTS; i++) { 7771fd7a697STejun Heo void __iomem *port_base = mmio_base + i * PORT_SIZE; 7781fd7a697STejun Heo 7791fd7a697STejun Heo writeb(0xff, port_base + PORT_IRQ_MASK); 7801fd7a697STejun Heo inic_reset_port(port_base); 7811fd7a697STejun Heo } 7821fd7a697STejun Heo 7831fd7a697STejun Heo /* port IRQ is masked now, unmask global IRQ */ 7841fd7a697STejun Heo writew(hctl & ~HCTL_IRQOFF, mmio_base + HOST_CTL); 7851fd7a697STejun Heo val = readw(mmio_base + HOST_IRQ_MASK); 7861fd7a697STejun Heo val &= ~(HIRQ_PORT0 | HIRQ_PORT1); 7871fd7a697STejun Heo writew(val, mmio_base + HOST_IRQ_MASK); 7881fd7a697STejun Heo 7891fd7a697STejun Heo return 0; 7901fd7a697STejun Heo } 7911fd7a697STejun Heo 792438ac6d5STejun Heo #ifdef CONFIG_PM 7931fd7a697STejun Heo static int inic_pci_device_resume(struct pci_dev *pdev) 7941fd7a697STejun Heo { 7951fd7a697STejun Heo struct ata_host *host = dev_get_drvdata(&pdev->dev); 7961fd7a697STejun Heo struct inic_host_priv *hpriv = host->private_data; 7970d5ff566STejun Heo void __iomem *mmio_base = host->iomap[MMIO_BAR]; 7981fd7a697STejun Heo int rc; 7991fd7a697STejun Heo 8005aea408dSDmitriy Monakhov rc = ata_pci_device_do_resume(pdev); 8015aea408dSDmitriy Monakhov if (rc) 8025aea408dSDmitriy Monakhov return rc; 8031fd7a697STejun Heo 8041fd7a697STejun Heo if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { 8051fd7a697STejun Heo rc = init_controller(mmio_base, hpriv->cached_hctl); 8061fd7a697STejun Heo if (rc) 8071fd7a697STejun Heo return rc; 8081fd7a697STejun Heo } 8091fd7a697STejun Heo 8101fd7a697STejun Heo ata_host_resume(host); 8111fd7a697STejun Heo 8121fd7a697STejun Heo return 0; 8131fd7a697STejun Heo } 814438ac6d5STejun Heo #endif 8151fd7a697STejun Heo 8161fd7a697STejun Heo static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 8171fd7a697STejun Heo { 8181fd7a697STejun Heo static int printed_version; 8194447d351STejun Heo const struct ata_port_info *ppi[] = { &inic_port_info, NULL }; 8204447d351STejun Heo struct ata_host *host; 8211fd7a697STejun Heo struct inic_host_priv *hpriv; 8220d5ff566STejun Heo void __iomem * const *iomap; 8231fd7a697STejun Heo int i, rc; 8241fd7a697STejun Heo 8251fd7a697STejun Heo if (!printed_version++) 8261fd7a697STejun Heo dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 8271fd7a697STejun Heo 8284447d351STejun Heo /* alloc host */ 8294447d351STejun Heo host = ata_host_alloc_pinfo(&pdev->dev, ppi, NR_PORTS); 8304447d351STejun Heo hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); 8314447d351STejun Heo if (!host || !hpriv) 8324447d351STejun Heo return -ENOMEM; 8334447d351STejun Heo 8344447d351STejun Heo host->private_data = hpriv; 8354447d351STejun Heo 8364447d351STejun Heo /* acquire resources and fill host */ 83724dc5f33STejun Heo rc = pcim_enable_device(pdev); 8381fd7a697STejun Heo if (rc) 8391fd7a697STejun Heo return rc; 8401fd7a697STejun Heo 8410d5ff566STejun Heo rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME); 8420d5ff566STejun Heo if (rc) 8430d5ff566STejun Heo return rc; 8444447d351STejun Heo host->iomap = iomap = pcim_iomap_table(pdev); 8454447d351STejun Heo 8464447d351STejun Heo for (i = 0; i < NR_PORTS; i++) { 847cbcdd875STejun Heo struct ata_port *ap = host->ports[i]; 848cbcdd875STejun Heo struct ata_ioports *port = &ap->ioaddr; 849cbcdd875STejun Heo unsigned int offset = i * PORT_SIZE; 8504447d351STejun Heo 8514447d351STejun Heo port->cmd_addr = iomap[2 * i]; 8524447d351STejun Heo port->altstatus_addr = 8534447d351STejun Heo port->ctl_addr = (void __iomem *) 8544447d351STejun Heo ((unsigned long)iomap[2 * i + 1] | ATA_PCI_CTL_OFS); 855cbcdd875STejun Heo port->scr_addr = iomap[MMIO_BAR] + offset + PORT_SCR; 8564447d351STejun Heo 8579363c382STejun Heo ata_sff_std_ports(port); 858cbcdd875STejun Heo 859cbcdd875STejun Heo ata_port_pbar_desc(ap, MMIO_BAR, -1, "mmio"); 860cbcdd875STejun Heo ata_port_pbar_desc(ap, MMIO_BAR, offset, "port"); 861cbcdd875STejun Heo ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx", 862cbcdd875STejun Heo (unsigned long long)pci_resource_start(pdev, 2 * i), 863cbcdd875STejun Heo (unsigned long long)pci_resource_start(pdev, (2 * i + 1)) | 864cbcdd875STejun Heo ATA_PCI_CTL_OFS); 8654447d351STejun Heo } 8664447d351STejun Heo 8674447d351STejun Heo hpriv->cached_hctl = readw(iomap[MMIO_BAR] + HOST_CTL); 8681fd7a697STejun Heo 8691fd7a697STejun Heo /* Set dma_mask. This devices doesn't support 64bit addressing. */ 8701fd7a697STejun Heo rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 8711fd7a697STejun Heo if (rc) { 8721fd7a697STejun Heo dev_printk(KERN_ERR, &pdev->dev, 8731fd7a697STejun Heo "32-bit DMA enable failed\n"); 87424dc5f33STejun Heo return rc; 8751fd7a697STejun Heo } 8761fd7a697STejun Heo 8771fd7a697STejun Heo rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 8781fd7a697STejun Heo if (rc) { 8791fd7a697STejun Heo dev_printk(KERN_ERR, &pdev->dev, 8801fd7a697STejun Heo "32-bit consistent DMA enable failed\n"); 88124dc5f33STejun Heo return rc; 8821fd7a697STejun Heo } 8831fd7a697STejun Heo 884b7d8629fSFUJITA Tomonori /* 885b7d8629fSFUJITA Tomonori * This controller is braindamaged. dma_boundary is 0xffff 886b7d8629fSFUJITA Tomonori * like others but it will lock up the whole machine HARD if 887b7d8629fSFUJITA Tomonori * 65536 byte PRD entry is fed. Reduce maximum segment size. 888b7d8629fSFUJITA Tomonori */ 889b7d8629fSFUJITA Tomonori rc = pci_set_dma_max_seg_size(pdev, 65536 - 512); 890b7d8629fSFUJITA Tomonori if (rc) { 891b7d8629fSFUJITA Tomonori dev_printk(KERN_ERR, &pdev->dev, 892b7d8629fSFUJITA Tomonori "failed to set the maximum segment size.\n"); 893b7d8629fSFUJITA Tomonori return rc; 894b7d8629fSFUJITA Tomonori } 895b7d8629fSFUJITA Tomonori 8960d5ff566STejun Heo rc = init_controller(iomap[MMIO_BAR], hpriv->cached_hctl); 8971fd7a697STejun Heo if (rc) { 8981fd7a697STejun Heo dev_printk(KERN_ERR, &pdev->dev, 8991fd7a697STejun Heo "failed to initialize controller\n"); 90024dc5f33STejun Heo return rc; 9011fd7a697STejun Heo } 9021fd7a697STejun Heo 9031fd7a697STejun Heo pci_set_master(pdev); 9044447d351STejun Heo return ata_host_activate(host, pdev->irq, inic_interrupt, IRQF_SHARED, 9054447d351STejun Heo &inic_sht); 9061fd7a697STejun Heo } 9071fd7a697STejun Heo 9081fd7a697STejun Heo static const struct pci_device_id inic_pci_tbl[] = { 9091fd7a697STejun Heo { PCI_VDEVICE(INIT, 0x1622), }, 9101fd7a697STejun Heo { }, 9111fd7a697STejun Heo }; 9121fd7a697STejun Heo 9131fd7a697STejun Heo static struct pci_driver inic_pci_driver = { 9141fd7a697STejun Heo .name = DRV_NAME, 9151fd7a697STejun Heo .id_table = inic_pci_tbl, 916438ac6d5STejun Heo #ifdef CONFIG_PM 9171fd7a697STejun Heo .suspend = ata_pci_device_suspend, 9181fd7a697STejun Heo .resume = inic_pci_device_resume, 919438ac6d5STejun Heo #endif 9201fd7a697STejun Heo .probe = inic_init_one, 9211fd7a697STejun Heo .remove = ata_pci_remove_one, 9221fd7a697STejun Heo }; 9231fd7a697STejun Heo 9241fd7a697STejun Heo static int __init inic_init(void) 9251fd7a697STejun Heo { 9261fd7a697STejun Heo return pci_register_driver(&inic_pci_driver); 9271fd7a697STejun Heo } 9281fd7a697STejun Heo 9291fd7a697STejun Heo static void __exit inic_exit(void) 9301fd7a697STejun Heo { 9311fd7a697STejun Heo pci_unregister_driver(&inic_pci_driver); 9321fd7a697STejun Heo } 9331fd7a697STejun Heo 9341fd7a697STejun Heo MODULE_AUTHOR("Tejun Heo"); 9351fd7a697STejun Heo MODULE_DESCRIPTION("low-level driver for Initio 162x SATA"); 9361fd7a697STejun Heo MODULE_LICENSE("GPL v2"); 9371fd7a697STejun Heo MODULE_DEVICE_TABLE(pci, inic_pci_tbl); 9381fd7a697STejun Heo MODULE_VERSION(DRV_VERSION); 9391fd7a697STejun Heo 9401fd7a697STejun Heo module_init(inic_init); 9411fd7a697STejun Heo module_exit(inic_exit); 942