11fd7a697STejun Heo /* 21fd7a697STejun Heo * sata_inic162x.c - Driver for Initio 162x SATA controllers 31fd7a697STejun Heo * 41fd7a697STejun Heo * Copyright 2006 SUSE Linux Products GmbH 51fd7a697STejun Heo * Copyright 2006 Tejun Heo <teheo@novell.com> 61fd7a697STejun Heo * 71fd7a697STejun Heo * This file is released under GPL v2. 81fd7a697STejun Heo * 91fd7a697STejun Heo * This controller is eccentric and easily locks up if something isn't 101fd7a697STejun Heo * right. Documentation is available at initio's website but it only 111fd7a697STejun Heo * documents registers (not programming model). 121fd7a697STejun Heo * 1322bfc6d5STejun Heo * This driver has interesting history. The first version was written 1422bfc6d5STejun Heo * from the documentation and a 2.4 IDE driver posted on a Taiwan 1522bfc6d5STejun Heo * company, which didn't use any IDMA features and couldn't handle 1622bfc6d5STejun Heo * LBA48. The resulting driver couldn't handle LBA48 devices either 1722bfc6d5STejun Heo * making it pretty useless. 1822bfc6d5STejun Heo * 1922bfc6d5STejun Heo * After a while, initio picked the driver up, renamed it to 2022bfc6d5STejun Heo * sata_initio162x, updated it to use IDMA for ATA DMA commands and 2122bfc6d5STejun Heo * posted it on their website. It only used ATA_PROT_DMA for IDMA and 2222bfc6d5STejun Heo * attaching both devices and issuing IDMA and !IDMA commands 2322bfc6d5STejun Heo * simultaneously broke it due to PIRQ masking interaction but it did 2422bfc6d5STejun Heo * show how to use the IDMA (ADMA + some initio specific twists) 2522bfc6d5STejun Heo * engine. 2622bfc6d5STejun Heo * 2722bfc6d5STejun Heo * Then, I picked up their changes again and here's the usable driver 2822bfc6d5STejun Heo * which uses IDMA for everything. Everything works now including 2922bfc6d5STejun Heo * LBA48, CD/DVD burning, suspend/resume and hotplug. There are some 3022bfc6d5STejun Heo * issues tho. Result Tf is not resported properly, NCQ isn't 3122bfc6d5STejun Heo * supported yet and CD/DVD writing works with DMA assisted PIO 3222bfc6d5STejun Heo * protocol (which, for native SATA devices, shouldn't cause any 3322bfc6d5STejun Heo * noticeable difference). 3422bfc6d5STejun Heo * 3522bfc6d5STejun Heo * Anyways, so, here's finally a working driver for inic162x. Enjoy! 3622bfc6d5STejun Heo * 3722bfc6d5STejun Heo * initio: If you guys wanna improve the driver regarding result TF 3822bfc6d5STejun Heo * access and other stuff, please feel free to contact me. I'll be 3922bfc6d5STejun Heo * happy to assist. 401fd7a697STejun Heo */ 411fd7a697STejun Heo 421fd7a697STejun Heo #include <linux/kernel.h> 431fd7a697STejun Heo #include <linux/module.h> 441fd7a697STejun Heo #include <linux/pci.h> 451fd7a697STejun Heo #include <scsi/scsi_host.h> 461fd7a697STejun Heo #include <linux/libata.h> 471fd7a697STejun Heo #include <linux/blkdev.h> 481fd7a697STejun Heo #include <scsi/scsi_device.h> 491fd7a697STejun Heo 501fd7a697STejun Heo #define DRV_NAME "sata_inic162x" 5122bfc6d5STejun Heo #define DRV_VERSION "0.4" 521fd7a697STejun Heo 531fd7a697STejun Heo enum { 54ba66b242STejun Heo MMIO_BAR_PCI = 5, 55ba66b242STejun Heo MMIO_BAR_CARDBUS = 1, 561fd7a697STejun Heo 571fd7a697STejun Heo NR_PORTS = 2, 581fd7a697STejun Heo 593ad400a9STejun Heo IDMA_CPB_TBL_SIZE = 4 * 32, 603ad400a9STejun Heo 613ad400a9STejun Heo INIC_DMA_BOUNDARY = 0xffffff, 623ad400a9STejun Heo 63b0dd9b8eSTejun Heo HOST_ACTRL = 0x08, 641fd7a697STejun Heo HOST_CTL = 0x7c, 651fd7a697STejun Heo HOST_STAT = 0x7e, 661fd7a697STejun Heo HOST_IRQ_STAT = 0xbc, 671fd7a697STejun Heo HOST_IRQ_MASK = 0xbe, 681fd7a697STejun Heo 691fd7a697STejun Heo PORT_SIZE = 0x40, 701fd7a697STejun Heo 711fd7a697STejun Heo /* registers for ATA TF operation */ 72b0dd9b8eSTejun Heo PORT_TF_DATA = 0x00, 73b0dd9b8eSTejun Heo PORT_TF_FEATURE = 0x01, 74b0dd9b8eSTejun Heo PORT_TF_NSECT = 0x02, 75b0dd9b8eSTejun Heo PORT_TF_LBAL = 0x03, 76b0dd9b8eSTejun Heo PORT_TF_LBAM = 0x04, 77b0dd9b8eSTejun Heo PORT_TF_LBAH = 0x05, 78b0dd9b8eSTejun Heo PORT_TF_DEVICE = 0x06, 79b0dd9b8eSTejun Heo PORT_TF_COMMAND = 0x07, 80b0dd9b8eSTejun Heo PORT_TF_ALT_STAT = 0x08, 811fd7a697STejun Heo PORT_IRQ_STAT = 0x09, 821fd7a697STejun Heo PORT_IRQ_MASK = 0x0a, 831fd7a697STejun Heo PORT_PRD_CTL = 0x0b, 841fd7a697STejun Heo PORT_PRD_ADDR = 0x0c, 851fd7a697STejun Heo PORT_PRD_XFERLEN = 0x10, 86b0dd9b8eSTejun Heo PORT_CPB_CPBLAR = 0x18, 87b0dd9b8eSTejun Heo PORT_CPB_PTQFIFO = 0x1c, 881fd7a697STejun Heo 891fd7a697STejun Heo /* IDMA register */ 901fd7a697STejun Heo PORT_IDMA_CTL = 0x14, 91b0dd9b8eSTejun Heo PORT_IDMA_STAT = 0x16, 92b0dd9b8eSTejun Heo 93b0dd9b8eSTejun Heo PORT_RPQ_FIFO = 0x1e, 94b0dd9b8eSTejun Heo PORT_RPQ_CNT = 0x1f, 951fd7a697STejun Heo 961fd7a697STejun Heo PORT_SCR = 0x20, 971fd7a697STejun Heo 981fd7a697STejun Heo /* HOST_CTL bits */ 9999580664SBob Stewart HCTL_LEDEN = (1 << 3), /* enable LED operation */ 1001fd7a697STejun Heo HCTL_IRQOFF = (1 << 8), /* global IRQ off */ 101b0dd9b8eSTejun Heo HCTL_FTHD0 = (1 << 10), /* fifo threshold 0 */ 102b0dd9b8eSTejun Heo HCTL_FTHD1 = (1 << 11), /* fifo threshold 1*/ 103b0dd9b8eSTejun Heo HCTL_PWRDWN = (1 << 12), /* power down PHYs */ 1041fd7a697STejun Heo HCTL_SOFTRST = (1 << 13), /* global reset (no phy reset) */ 1051fd7a697STejun Heo HCTL_RPGSEL = (1 << 15), /* register page select */ 1061fd7a697STejun Heo 1071fd7a697STejun Heo HCTL_KNOWN_BITS = HCTL_IRQOFF | HCTL_PWRDWN | HCTL_SOFTRST | 1081fd7a697STejun Heo HCTL_RPGSEL, 1091fd7a697STejun Heo 1101fd7a697STejun Heo /* HOST_IRQ_(STAT|MASK) bits */ 1111fd7a697STejun Heo HIRQ_PORT0 = (1 << 0), 1121fd7a697STejun Heo HIRQ_PORT1 = (1 << 1), 1131fd7a697STejun Heo HIRQ_SOFT = (1 << 14), 1141fd7a697STejun Heo HIRQ_GLOBAL = (1 << 15), /* STAT only */ 1151fd7a697STejun Heo 1161fd7a697STejun Heo /* PORT_IRQ_(STAT|MASK) bits */ 1171fd7a697STejun Heo PIRQ_OFFLINE = (1 << 0), /* device unplugged */ 1181fd7a697STejun Heo PIRQ_ONLINE = (1 << 1), /* device plugged */ 1191fd7a697STejun Heo PIRQ_COMPLETE = (1 << 2), /* completion interrupt */ 1201fd7a697STejun Heo PIRQ_FATAL = (1 << 3), /* fatal error */ 1211fd7a697STejun Heo PIRQ_ATA = (1 << 4), /* ATA interrupt */ 1221fd7a697STejun Heo PIRQ_REPLY = (1 << 5), /* reply FIFO not empty */ 1231fd7a697STejun Heo PIRQ_PENDING = (1 << 7), /* port IRQ pending (STAT only) */ 1241fd7a697STejun Heo 1251fd7a697STejun Heo PIRQ_ERR = PIRQ_OFFLINE | PIRQ_ONLINE | PIRQ_FATAL, 126f8b0685aSTejun Heo PIRQ_MASK_DEFAULT = PIRQ_REPLY | PIRQ_ATA, 1271fd7a697STejun Heo PIRQ_MASK_FREEZE = 0xff, 1281fd7a697STejun Heo 1291fd7a697STejun Heo /* PORT_PRD_CTL bits */ 1301fd7a697STejun Heo PRD_CTL_START = (1 << 0), 1311fd7a697STejun Heo PRD_CTL_WR = (1 << 3), 1321fd7a697STejun Heo PRD_CTL_DMAEN = (1 << 7), /* DMA enable */ 1331fd7a697STejun Heo 1341fd7a697STejun Heo /* PORT_IDMA_CTL bits */ 1351fd7a697STejun Heo IDMA_CTL_RST_ATA = (1 << 2), /* hardreset ATA bus */ 1361fd7a697STejun Heo IDMA_CTL_RST_IDMA = (1 << 5), /* reset IDMA machinary */ 1371fd7a697STejun Heo IDMA_CTL_GO = (1 << 7), /* IDMA mode go */ 1381fd7a697STejun Heo IDMA_CTL_ATA_NIEN = (1 << 8), /* ATA IRQ disable */ 139b0dd9b8eSTejun Heo 140b0dd9b8eSTejun Heo /* PORT_IDMA_STAT bits */ 141b0dd9b8eSTejun Heo IDMA_STAT_PERR = (1 << 0), /* PCI ERROR MODE */ 142b0dd9b8eSTejun Heo IDMA_STAT_CPBERR = (1 << 1), /* ADMA CPB error */ 143b0dd9b8eSTejun Heo IDMA_STAT_LGCY = (1 << 3), /* ADMA legacy */ 144b0dd9b8eSTejun Heo IDMA_STAT_UIRQ = (1 << 4), /* ADMA unsolicited irq */ 145b0dd9b8eSTejun Heo IDMA_STAT_STPD = (1 << 5), /* ADMA stopped */ 146b0dd9b8eSTejun Heo IDMA_STAT_PSD = (1 << 6), /* ADMA pause */ 147b0dd9b8eSTejun Heo IDMA_STAT_DONE = (1 << 7), /* ADMA done */ 148b0dd9b8eSTejun Heo 149b0dd9b8eSTejun Heo IDMA_STAT_ERR = IDMA_STAT_PERR | IDMA_STAT_CPBERR, 150b0dd9b8eSTejun Heo 151b0dd9b8eSTejun Heo /* CPB Control Flags*/ 152b0dd9b8eSTejun Heo CPB_CTL_VALID = (1 << 0), /* CPB valid */ 153b0dd9b8eSTejun Heo CPB_CTL_QUEUED = (1 << 1), /* queued command */ 154b0dd9b8eSTejun Heo CPB_CTL_DATA = (1 << 2), /* data, rsvd in datasheet */ 155b0dd9b8eSTejun Heo CPB_CTL_IEN = (1 << 3), /* PCI interrupt enable */ 156b0dd9b8eSTejun Heo CPB_CTL_DEVDIR = (1 << 4), /* device direction control */ 157b0dd9b8eSTejun Heo 158b0dd9b8eSTejun Heo /* CPB Response Flags */ 159b0dd9b8eSTejun Heo CPB_RESP_DONE = (1 << 0), /* ATA command complete */ 160b0dd9b8eSTejun Heo CPB_RESP_REL = (1 << 1), /* ATA release */ 161b0dd9b8eSTejun Heo CPB_RESP_IGNORED = (1 << 2), /* CPB ignored */ 162b0dd9b8eSTejun Heo CPB_RESP_ATA_ERR = (1 << 3), /* ATA command error */ 163b0dd9b8eSTejun Heo CPB_RESP_SPURIOUS = (1 << 4), /* ATA spurious interrupt error */ 164b0dd9b8eSTejun Heo CPB_RESP_UNDERFLOW = (1 << 5), /* APRD deficiency length error */ 165b0dd9b8eSTejun Heo CPB_RESP_OVERFLOW = (1 << 6), /* APRD exccess length error */ 166b0dd9b8eSTejun Heo CPB_RESP_CPB_ERR = (1 << 7), /* CPB error flag */ 167b0dd9b8eSTejun Heo 168b0dd9b8eSTejun Heo /* PRD Control Flags */ 169b0dd9b8eSTejun Heo PRD_DRAIN = (1 << 1), /* ignore data excess */ 170b0dd9b8eSTejun Heo PRD_CDB = (1 << 2), /* atapi packet command pointer */ 171b0dd9b8eSTejun Heo PRD_DIRECT_INTR = (1 << 3), /* direct interrupt */ 172b0dd9b8eSTejun Heo PRD_DMA = (1 << 4), /* data transfer method */ 173b0dd9b8eSTejun Heo PRD_WRITE = (1 << 5), /* data dir, rsvd in datasheet */ 174b0dd9b8eSTejun Heo PRD_IOM = (1 << 6), /* io/memory transfer */ 175b0dd9b8eSTejun Heo PRD_END = (1 << 7), /* APRD chain end */ 1761fd7a697STejun Heo }; 1771fd7a697STejun Heo 1783ad400a9STejun Heo /* Comman Parameter Block */ 1793ad400a9STejun Heo struct inic_cpb { 1803ad400a9STejun Heo u8 resp_flags; /* Response Flags */ 1813ad400a9STejun Heo u8 error; /* ATA Error */ 1823ad400a9STejun Heo u8 status; /* ATA Status */ 1833ad400a9STejun Heo u8 ctl_flags; /* Control Flags */ 1843ad400a9STejun Heo __le32 len; /* Total Transfer Length */ 1853ad400a9STejun Heo __le32 prd; /* First PRD pointer */ 1863ad400a9STejun Heo u8 rsvd[4]; 1873ad400a9STejun Heo /* 16 bytes */ 1883ad400a9STejun Heo u8 feature; /* ATA Feature */ 1893ad400a9STejun Heo u8 hob_feature; /* ATA Ex. Feature */ 1903ad400a9STejun Heo u8 device; /* ATA Device/Head */ 1913ad400a9STejun Heo u8 mirctl; /* Mirror Control */ 1923ad400a9STejun Heo u8 nsect; /* ATA Sector Count */ 1933ad400a9STejun Heo u8 hob_nsect; /* ATA Ex. Sector Count */ 1943ad400a9STejun Heo u8 lbal; /* ATA Sector Number */ 1953ad400a9STejun Heo u8 hob_lbal; /* ATA Ex. Sector Number */ 1963ad400a9STejun Heo u8 lbam; /* ATA Cylinder Low */ 1973ad400a9STejun Heo u8 hob_lbam; /* ATA Ex. Cylinder Low */ 1983ad400a9STejun Heo u8 lbah; /* ATA Cylinder High */ 1993ad400a9STejun Heo u8 hob_lbah; /* ATA Ex. Cylinder High */ 2003ad400a9STejun Heo u8 command; /* ATA Command */ 2013ad400a9STejun Heo u8 ctl; /* ATA Control */ 2023ad400a9STejun Heo u8 slave_error; /* Slave ATA Error */ 2033ad400a9STejun Heo u8 slave_status; /* Slave ATA Status */ 2043ad400a9STejun Heo /* 32 bytes */ 2053ad400a9STejun Heo } __packed; 2063ad400a9STejun Heo 2073ad400a9STejun Heo /* Physical Region Descriptor */ 2083ad400a9STejun Heo struct inic_prd { 2093ad400a9STejun Heo __le32 mad; /* Physical Memory Address */ 2103ad400a9STejun Heo __le16 len; /* Transfer Length */ 2113ad400a9STejun Heo u8 rsvd; 2123ad400a9STejun Heo u8 flags; /* Control Flags */ 2133ad400a9STejun Heo } __packed; 2143ad400a9STejun Heo 2153ad400a9STejun Heo struct inic_pkt { 2163ad400a9STejun Heo struct inic_cpb cpb; 217b3f677e5STejun Heo struct inic_prd prd[LIBATA_MAX_PRD + 1]; /* + 1 for cdb */ 218b3f677e5STejun Heo u8 cdb[ATAPI_CDB_LEN]; 2193ad400a9STejun Heo } __packed; 2203ad400a9STejun Heo 2211fd7a697STejun Heo struct inic_host_priv { 222ba66b242STejun Heo void __iomem *mmio_base; 2231fd7a697STejun Heo u16 cached_hctl; 2241fd7a697STejun Heo }; 2251fd7a697STejun Heo 2261fd7a697STejun Heo struct inic_port_priv { 2273ad400a9STejun Heo struct inic_pkt *pkt; 2283ad400a9STejun Heo dma_addr_t pkt_dma; 2293ad400a9STejun Heo u32 *cpb_tbl; 2303ad400a9STejun Heo dma_addr_t cpb_tbl_dma; 2311fd7a697STejun Heo }; 2321fd7a697STejun Heo 2331fd7a697STejun Heo static struct scsi_host_template inic_sht = { 234ab5b0235STejun Heo ATA_BASE_SHT(DRV_NAME), 235ab5b0235STejun Heo .sg_tablesize = LIBATA_MAX_PRD, /* maybe it can be larger? */ 2363ad400a9STejun Heo .dma_boundary = INIC_DMA_BOUNDARY, 2371fd7a697STejun Heo }; 2381fd7a697STejun Heo 2391fd7a697STejun Heo static const int scr_map[] = { 2401fd7a697STejun Heo [SCR_STATUS] = 0, 2411fd7a697STejun Heo [SCR_ERROR] = 1, 2421fd7a697STejun Heo [SCR_CONTROL] = 2, 2431fd7a697STejun Heo }; 2441fd7a697STejun Heo 2451fd7a697STejun Heo static void __iomem *inic_port_base(struct ata_port *ap) 2461fd7a697STejun Heo { 247ba66b242STejun Heo struct inic_host_priv *hpriv = ap->host->private_data; 248ba66b242STejun Heo 249ba66b242STejun Heo return hpriv->mmio_base + ap->port_no * PORT_SIZE; 2501fd7a697STejun Heo } 2511fd7a697STejun Heo 2521fd7a697STejun Heo static void inic_reset_port(void __iomem *port_base) 2531fd7a697STejun Heo { 2541fd7a697STejun Heo void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; 2551fd7a697STejun Heo 256f8b0685aSTejun Heo /* stop IDMA engine */ 257f8b0685aSTejun Heo readw(idma_ctl); /* flush */ 258f8b0685aSTejun Heo msleep(1); 2591fd7a697STejun Heo 2601fd7a697STejun Heo /* mask IRQ and assert reset */ 261f8b0685aSTejun Heo writew(IDMA_CTL_RST_IDMA, idma_ctl); 2621fd7a697STejun Heo readw(idma_ctl); /* flush */ 2631fd7a697STejun Heo msleep(1); 2641fd7a697STejun Heo 2651fd7a697STejun Heo /* release reset */ 266f8b0685aSTejun Heo writew(0, idma_ctl); 2671fd7a697STejun Heo 2681fd7a697STejun Heo /* clear irq */ 2691fd7a697STejun Heo writeb(0xff, port_base + PORT_IRQ_STAT); 2701fd7a697STejun Heo } 2711fd7a697STejun Heo 27282ef04fbSTejun Heo static int inic_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val) 2731fd7a697STejun Heo { 27482ef04fbSTejun Heo void __iomem *scr_addr = inic_port_base(link->ap) + PORT_SCR; 2751fd7a697STejun Heo void __iomem *addr; 2761fd7a697STejun Heo 2771fd7a697STejun Heo if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) 278da3dbb17STejun Heo return -EINVAL; 2791fd7a697STejun Heo 2801fd7a697STejun Heo addr = scr_addr + scr_map[sc_reg] * 4; 281da3dbb17STejun Heo *val = readl(scr_addr + scr_map[sc_reg] * 4); 2821fd7a697STejun Heo 2831fd7a697STejun Heo /* this controller has stuck DIAG.N, ignore it */ 2841fd7a697STejun Heo if (sc_reg == SCR_ERROR) 285da3dbb17STejun Heo *val &= ~SERR_PHYRDY_CHG; 286da3dbb17STejun Heo return 0; 2871fd7a697STejun Heo } 2881fd7a697STejun Heo 28982ef04fbSTejun Heo static int inic_scr_write(struct ata_link *link, unsigned sc_reg, u32 val) 2901fd7a697STejun Heo { 29182ef04fbSTejun Heo void __iomem *scr_addr = inic_port_base(link->ap) + PORT_SCR; 2921fd7a697STejun Heo 2931fd7a697STejun Heo if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) 294da3dbb17STejun Heo return -EINVAL; 2951fd7a697STejun Heo 2961fd7a697STejun Heo writel(val, scr_addr + scr_map[sc_reg] * 4); 297da3dbb17STejun Heo return 0; 2981fd7a697STejun Heo } 2991fd7a697STejun Heo 3003ad400a9STejun Heo static void inic_stop_idma(struct ata_port *ap) 3013ad400a9STejun Heo { 3023ad400a9STejun Heo void __iomem *port_base = inic_port_base(ap); 3033ad400a9STejun Heo 3043ad400a9STejun Heo readb(port_base + PORT_RPQ_FIFO); 3053ad400a9STejun Heo readb(port_base + PORT_RPQ_CNT); 3063ad400a9STejun Heo writew(0, port_base + PORT_IDMA_CTL); 3073ad400a9STejun Heo } 3083ad400a9STejun Heo 3093ad400a9STejun Heo static void inic_host_err_intr(struct ata_port *ap, u8 irq_stat, u16 idma_stat) 3103ad400a9STejun Heo { 3113ad400a9STejun Heo struct ata_eh_info *ehi = &ap->link.eh_info; 3123ad400a9STejun Heo struct inic_port_priv *pp = ap->private_data; 3133ad400a9STejun Heo struct inic_cpb *cpb = &pp->pkt->cpb; 3143ad400a9STejun Heo bool freeze = false; 3153ad400a9STejun Heo 3163ad400a9STejun Heo ata_ehi_clear_desc(ehi); 3173ad400a9STejun Heo ata_ehi_push_desc(ehi, "irq_stat=0x%x idma_stat=0x%x", 3183ad400a9STejun Heo irq_stat, idma_stat); 3193ad400a9STejun Heo 3203ad400a9STejun Heo inic_stop_idma(ap); 3213ad400a9STejun Heo 3223ad400a9STejun Heo if (irq_stat & (PIRQ_OFFLINE | PIRQ_ONLINE)) { 3233ad400a9STejun Heo ata_ehi_push_desc(ehi, "hotplug"); 3243ad400a9STejun Heo ata_ehi_hotplugged(ehi); 3253ad400a9STejun Heo freeze = true; 3263ad400a9STejun Heo } 3273ad400a9STejun Heo 3283ad400a9STejun Heo if (idma_stat & IDMA_STAT_PERR) { 3293ad400a9STejun Heo ata_ehi_push_desc(ehi, "PCI error"); 3303ad400a9STejun Heo freeze = true; 3313ad400a9STejun Heo } 3323ad400a9STejun Heo 3333ad400a9STejun Heo if (idma_stat & IDMA_STAT_CPBERR) { 3343ad400a9STejun Heo ata_ehi_push_desc(ehi, "CPB error"); 3353ad400a9STejun Heo 3363ad400a9STejun Heo if (cpb->resp_flags & CPB_RESP_IGNORED) { 3373ad400a9STejun Heo __ata_ehi_push_desc(ehi, " ignored"); 3383ad400a9STejun Heo ehi->err_mask |= AC_ERR_INVALID; 3393ad400a9STejun Heo freeze = true; 3403ad400a9STejun Heo } 3413ad400a9STejun Heo 3423ad400a9STejun Heo if (cpb->resp_flags & CPB_RESP_ATA_ERR) 3433ad400a9STejun Heo ehi->err_mask |= AC_ERR_DEV; 3443ad400a9STejun Heo 3453ad400a9STejun Heo if (cpb->resp_flags & CPB_RESP_SPURIOUS) { 3463ad400a9STejun Heo __ata_ehi_push_desc(ehi, " spurious-intr"); 3473ad400a9STejun Heo ehi->err_mask |= AC_ERR_HSM; 3483ad400a9STejun Heo freeze = true; 3493ad400a9STejun Heo } 3503ad400a9STejun Heo 3513ad400a9STejun Heo if (cpb->resp_flags & 3523ad400a9STejun Heo (CPB_RESP_UNDERFLOW | CPB_RESP_OVERFLOW)) { 3533ad400a9STejun Heo __ata_ehi_push_desc(ehi, " data-over/underflow"); 3543ad400a9STejun Heo ehi->err_mask |= AC_ERR_HSM; 3553ad400a9STejun Heo freeze = true; 3563ad400a9STejun Heo } 3573ad400a9STejun Heo } 3583ad400a9STejun Heo 3593ad400a9STejun Heo if (freeze) 3603ad400a9STejun Heo ata_port_freeze(ap); 3613ad400a9STejun Heo else 3623ad400a9STejun Heo ata_port_abort(ap); 3633ad400a9STejun Heo } 3643ad400a9STejun Heo 3651fd7a697STejun Heo static void inic_host_intr(struct ata_port *ap) 3661fd7a697STejun Heo { 3671fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 3683ad400a9STejun Heo struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); 3691fd7a697STejun Heo u8 irq_stat; 3703ad400a9STejun Heo u16 idma_stat; 3711fd7a697STejun Heo 3723ad400a9STejun Heo /* read and clear IRQ status */ 3731fd7a697STejun Heo irq_stat = readb(port_base + PORT_IRQ_STAT); 3741fd7a697STejun Heo writeb(irq_stat, port_base + PORT_IRQ_STAT); 3753ad400a9STejun Heo idma_stat = readw(port_base + PORT_IDMA_STAT); 3761fd7a697STejun Heo 3773ad400a9STejun Heo if (unlikely((irq_stat & PIRQ_ERR) || (idma_stat & IDMA_STAT_ERR))) 3783ad400a9STejun Heo inic_host_err_intr(ap, irq_stat, idma_stat); 3791fd7a697STejun Heo 380f8b0685aSTejun Heo if (unlikely(!qc)) 3813ad400a9STejun Heo goto spurious; 3821fd7a697STejun Heo 3833ad400a9STejun Heo if (likely(idma_stat & IDMA_STAT_DONE)) { 3843ad400a9STejun Heo inic_stop_idma(ap); 3853ad400a9STejun Heo 3863ad400a9STejun Heo /* Depending on circumstances, device error 3873ad400a9STejun Heo * isn't reported by IDMA, check it explicitly. 3883ad400a9STejun Heo */ 3893ad400a9STejun Heo if (unlikely(readb(port_base + PORT_TF_COMMAND) & 3903ad400a9STejun Heo (ATA_DF | ATA_ERR))) 3913ad400a9STejun Heo qc->err_mask |= AC_ERR_DEV; 3923ad400a9STejun Heo 3933ad400a9STejun Heo ata_qc_complete(qc); 3943ad400a9STejun Heo return; 3953ad400a9STejun Heo } 3961fd7a697STejun Heo 3973ad400a9STejun Heo spurious: 398f8b0685aSTejun Heo ata_port_printk(ap, KERN_WARNING, "unhandled interrupt: " 399f8b0685aSTejun Heo "cmd=0x%x irq_stat=0x%x idma_stat=0x%x\n", 400f8b0685aSTejun Heo qc ? qc->tf.command : 0xff, irq_stat, idma_stat); 4011fd7a697STejun Heo } 4021fd7a697STejun Heo 4031fd7a697STejun Heo static irqreturn_t inic_interrupt(int irq, void *dev_instance) 4041fd7a697STejun Heo { 4051fd7a697STejun Heo struct ata_host *host = dev_instance; 406ba66b242STejun Heo struct inic_host_priv *hpriv = host->private_data; 4071fd7a697STejun Heo u16 host_irq_stat; 4081fd7a697STejun Heo int i, handled = 0;; 4091fd7a697STejun Heo 410ba66b242STejun Heo host_irq_stat = readw(hpriv->mmio_base + HOST_IRQ_STAT); 4111fd7a697STejun Heo 4121fd7a697STejun Heo if (unlikely(!(host_irq_stat & HIRQ_GLOBAL))) 4131fd7a697STejun Heo goto out; 4141fd7a697STejun Heo 4151fd7a697STejun Heo spin_lock(&host->lock); 4161fd7a697STejun Heo 4171fd7a697STejun Heo for (i = 0; i < NR_PORTS; i++) { 4181fd7a697STejun Heo struct ata_port *ap = host->ports[i]; 4191fd7a697STejun Heo 4201fd7a697STejun Heo if (!(host_irq_stat & (HIRQ_PORT0 << i))) 4211fd7a697STejun Heo continue; 4221fd7a697STejun Heo 4231fd7a697STejun Heo if (likely(ap && !(ap->flags & ATA_FLAG_DISABLED))) { 4241fd7a697STejun Heo inic_host_intr(ap); 4251fd7a697STejun Heo handled++; 4261fd7a697STejun Heo } else { 4271fd7a697STejun Heo if (ata_ratelimit()) 4281fd7a697STejun Heo dev_printk(KERN_ERR, host->dev, "interrupt " 4291fd7a697STejun Heo "from disabled port %d (0x%x)\n", 4301fd7a697STejun Heo i, host_irq_stat); 4311fd7a697STejun Heo } 4321fd7a697STejun Heo } 4331fd7a697STejun Heo 4341fd7a697STejun Heo spin_unlock(&host->lock); 4351fd7a697STejun Heo 4361fd7a697STejun Heo out: 4371fd7a697STejun Heo return IRQ_RETVAL(handled); 4381fd7a697STejun Heo } 4391fd7a697STejun Heo 440b3f677e5STejun Heo static int inic_check_atapi_dma(struct ata_queued_cmd *qc) 441b3f677e5STejun Heo { 442b3f677e5STejun Heo /* For some reason ATAPI_PROT_DMA doesn't work for some 443b3f677e5STejun Heo * commands including writes and other misc ops. Use PIO 444b3f677e5STejun Heo * protocol instead, which BTW is driven by the DMA engine 445b3f677e5STejun Heo * anyway, so it shouldn't make much difference for native 446b3f677e5STejun Heo * SATA devices. 447b3f677e5STejun Heo */ 448b3f677e5STejun Heo if (atapi_cmd_type(qc->cdb[0]) == READ) 449b3f677e5STejun Heo return 0; 450b3f677e5STejun Heo return 1; 451b3f677e5STejun Heo } 452b3f677e5STejun Heo 4533ad400a9STejun Heo static void inic_fill_sg(struct inic_prd *prd, struct ata_queued_cmd *qc) 4543ad400a9STejun Heo { 4553ad400a9STejun Heo struct scatterlist *sg; 4563ad400a9STejun Heo unsigned int si; 457049e8e04STejun Heo u8 flags = 0; 4583ad400a9STejun Heo 4593ad400a9STejun Heo if (qc->tf.flags & ATA_TFLAG_WRITE) 4603ad400a9STejun Heo flags |= PRD_WRITE; 4613ad400a9STejun Heo 462049e8e04STejun Heo if (ata_is_dma(qc->tf.protocol)) 463049e8e04STejun Heo flags |= PRD_DMA; 464049e8e04STejun Heo 4653ad400a9STejun Heo for_each_sg(qc->sg, sg, qc->n_elem, si) { 4663ad400a9STejun Heo prd->mad = cpu_to_le32(sg_dma_address(sg)); 4673ad400a9STejun Heo prd->len = cpu_to_le16(sg_dma_len(sg)); 4683ad400a9STejun Heo prd->flags = flags; 4693ad400a9STejun Heo prd++; 4703ad400a9STejun Heo } 4713ad400a9STejun Heo 4723ad400a9STejun Heo WARN_ON(!si); 4733ad400a9STejun Heo prd[-1].flags |= PRD_END; 4743ad400a9STejun Heo } 4753ad400a9STejun Heo 4763ad400a9STejun Heo static void inic_qc_prep(struct ata_queued_cmd *qc) 4773ad400a9STejun Heo { 4783ad400a9STejun Heo struct inic_port_priv *pp = qc->ap->private_data; 4793ad400a9STejun Heo struct inic_pkt *pkt = pp->pkt; 4803ad400a9STejun Heo struct inic_cpb *cpb = &pkt->cpb; 4813ad400a9STejun Heo struct inic_prd *prd = pkt->prd; 482049e8e04STejun Heo bool is_atapi = ata_is_atapi(qc->tf.protocol); 483049e8e04STejun Heo bool is_data = ata_is_data(qc->tf.protocol); 484b3f677e5STejun Heo unsigned int cdb_len = 0; 4853ad400a9STejun Heo 4863ad400a9STejun Heo VPRINTK("ENTER\n"); 4873ad400a9STejun Heo 488049e8e04STejun Heo if (is_atapi) 489b3f677e5STejun Heo cdb_len = qc->dev->cdb_len; 4903ad400a9STejun Heo 4913ad400a9STejun Heo /* prepare packet, based on initio driver */ 4923ad400a9STejun Heo memset(pkt, 0, sizeof(struct inic_pkt)); 4933ad400a9STejun Heo 494049e8e04STejun Heo cpb->ctl_flags = CPB_CTL_VALID | CPB_CTL_IEN; 495b3f677e5STejun Heo if (is_atapi || is_data) 496049e8e04STejun Heo cpb->ctl_flags |= CPB_CTL_DATA; 4973ad400a9STejun Heo 498b3f677e5STejun Heo cpb->len = cpu_to_le32(qc->nbytes + cdb_len); 4993ad400a9STejun Heo cpb->prd = cpu_to_le32(pp->pkt_dma + offsetof(struct inic_pkt, prd)); 5003ad400a9STejun Heo 5013ad400a9STejun Heo cpb->device = qc->tf.device; 5023ad400a9STejun Heo cpb->feature = qc->tf.feature; 5033ad400a9STejun Heo cpb->nsect = qc->tf.nsect; 5043ad400a9STejun Heo cpb->lbal = qc->tf.lbal; 5053ad400a9STejun Heo cpb->lbam = qc->tf.lbam; 5063ad400a9STejun Heo cpb->lbah = qc->tf.lbah; 5073ad400a9STejun Heo 5083ad400a9STejun Heo if (qc->tf.flags & ATA_TFLAG_LBA48) { 5093ad400a9STejun Heo cpb->hob_feature = qc->tf.hob_feature; 5103ad400a9STejun Heo cpb->hob_nsect = qc->tf.hob_nsect; 5113ad400a9STejun Heo cpb->hob_lbal = qc->tf.hob_lbal; 5123ad400a9STejun Heo cpb->hob_lbam = qc->tf.hob_lbam; 5133ad400a9STejun Heo cpb->hob_lbah = qc->tf.hob_lbah; 5143ad400a9STejun Heo } 5153ad400a9STejun Heo 5163ad400a9STejun Heo cpb->command = qc->tf.command; 5173ad400a9STejun Heo /* don't load ctl - dunno why. it's like that in the initio driver */ 5183ad400a9STejun Heo 519b3f677e5STejun Heo /* setup PRD for CDB */ 520b3f677e5STejun Heo if (is_atapi) { 521b3f677e5STejun Heo memcpy(pkt->cdb, qc->cdb, ATAPI_CDB_LEN); 522b3f677e5STejun Heo prd->mad = cpu_to_le32(pp->pkt_dma + 523b3f677e5STejun Heo offsetof(struct inic_pkt, cdb)); 524b3f677e5STejun Heo prd->len = cpu_to_le16(cdb_len); 525b3f677e5STejun Heo prd->flags = PRD_CDB | PRD_WRITE; 526b3f677e5STejun Heo if (!is_data) 527b3f677e5STejun Heo prd->flags |= PRD_END; 528b3f677e5STejun Heo prd++; 529b3f677e5STejun Heo } 530b3f677e5STejun Heo 5313ad400a9STejun Heo /* setup sg table */ 532049e8e04STejun Heo if (is_data) 5333ad400a9STejun Heo inic_fill_sg(prd, qc); 5343ad400a9STejun Heo 5353ad400a9STejun Heo pp->cpb_tbl[0] = pp->pkt_dma; 5363ad400a9STejun Heo } 5373ad400a9STejun Heo 5381fd7a697STejun Heo static unsigned int inic_qc_issue(struct ata_queued_cmd *qc) 5391fd7a697STejun Heo { 5401fd7a697STejun Heo struct ata_port *ap = qc->ap; 5413ad400a9STejun Heo void __iomem *port_base = inic_port_base(ap); 5421fd7a697STejun Heo 5433ad400a9STejun Heo /* fire up the ADMA engine */ 54499580664SBob Stewart writew(HCTL_FTHD0 | HCTL_LEDEN, port_base + HOST_CTL); 5453ad400a9STejun Heo writew(IDMA_CTL_GO, port_base + PORT_IDMA_CTL); 5463ad400a9STejun Heo writeb(0, port_base + PORT_CPB_PTQFIFO); 5473ad400a9STejun Heo 5483ad400a9STejun Heo return 0; 5493ad400a9STejun Heo } 5501fd7a697STejun Heo 551364fac0eSTejun Heo static void inic_tf_read(struct ata_port *ap, struct ata_taskfile *tf) 552364fac0eSTejun Heo { 553364fac0eSTejun Heo void __iomem *port_base = inic_port_base(ap); 554364fac0eSTejun Heo 555364fac0eSTejun Heo tf->feature = readb(port_base + PORT_TF_FEATURE); 556364fac0eSTejun Heo tf->nsect = readb(port_base + PORT_TF_NSECT); 557364fac0eSTejun Heo tf->lbal = readb(port_base + PORT_TF_LBAL); 558364fac0eSTejun Heo tf->lbam = readb(port_base + PORT_TF_LBAM); 559364fac0eSTejun Heo tf->lbah = readb(port_base + PORT_TF_LBAH); 560364fac0eSTejun Heo tf->device = readb(port_base + PORT_TF_DEVICE); 561364fac0eSTejun Heo tf->command = readb(port_base + PORT_TF_COMMAND); 562364fac0eSTejun Heo } 563364fac0eSTejun Heo 564364fac0eSTejun Heo static bool inic_qc_fill_rtf(struct ata_queued_cmd *qc) 565364fac0eSTejun Heo { 566364fac0eSTejun Heo struct ata_taskfile *rtf = &qc->result_tf; 567364fac0eSTejun Heo struct ata_taskfile tf; 568364fac0eSTejun Heo 569364fac0eSTejun Heo /* FIXME: Except for status and error, result TF access 570364fac0eSTejun Heo * doesn't work. I tried reading from BAR0/2, CPB and BAR5. 571364fac0eSTejun Heo * None works regardless of which command interface is used. 572364fac0eSTejun Heo * For now return true iff status indicates device error. 573364fac0eSTejun Heo * This means that we're reporting bogus sector for RW 574364fac0eSTejun Heo * failures. Eeekk.... 575364fac0eSTejun Heo */ 576364fac0eSTejun Heo inic_tf_read(qc->ap, &tf); 577364fac0eSTejun Heo 578364fac0eSTejun Heo if (!(tf.command & ATA_ERR)) 579364fac0eSTejun Heo return false; 580364fac0eSTejun Heo 581364fac0eSTejun Heo rtf->command = tf.command; 582364fac0eSTejun Heo rtf->feature = tf.feature; 583364fac0eSTejun Heo return true; 584364fac0eSTejun Heo } 585364fac0eSTejun Heo 5861fd7a697STejun Heo static void inic_freeze(struct ata_port *ap) 5871fd7a697STejun Heo { 5881fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 5891fd7a697STejun Heo 590ab5b0235STejun Heo writeb(PIRQ_MASK_FREEZE, port_base + PORT_IRQ_MASK); 5911fd7a697STejun Heo writeb(0xff, port_base + PORT_IRQ_STAT); 5921fd7a697STejun Heo } 5931fd7a697STejun Heo 5941fd7a697STejun Heo static void inic_thaw(struct ata_port *ap) 5951fd7a697STejun Heo { 5961fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 5971fd7a697STejun Heo 5981fd7a697STejun Heo writeb(0xff, port_base + PORT_IRQ_STAT); 599ab5b0235STejun Heo writeb(PIRQ_MASK_DEFAULT, port_base + PORT_IRQ_MASK); 6001fd7a697STejun Heo } 6011fd7a697STejun Heo 602364fac0eSTejun Heo static int inic_check_ready(struct ata_link *link) 603364fac0eSTejun Heo { 604364fac0eSTejun Heo void __iomem *port_base = inic_port_base(link->ap); 605364fac0eSTejun Heo 606364fac0eSTejun Heo return ata_check_ready(readb(port_base + PORT_TF_COMMAND)); 607364fac0eSTejun Heo } 608364fac0eSTejun Heo 6091fd7a697STejun Heo /* 6101fd7a697STejun Heo * SRST and SControl hardreset don't give valid signature on this 6111fd7a697STejun Heo * controller. Only controller specific hardreset mechanism works. 6121fd7a697STejun Heo */ 613cc0680a5STejun Heo static int inic_hardreset(struct ata_link *link, unsigned int *class, 614d4b2bab4STejun Heo unsigned long deadline) 6151fd7a697STejun Heo { 616cc0680a5STejun Heo struct ata_port *ap = link->ap; 6171fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 6181fd7a697STejun Heo void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; 619cc0680a5STejun Heo const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); 6201fd7a697STejun Heo int rc; 6211fd7a697STejun Heo 6221fd7a697STejun Heo /* hammer it into sane state */ 6231fd7a697STejun Heo inic_reset_port(port_base); 6241fd7a697STejun Heo 625f8b0685aSTejun Heo writew(IDMA_CTL_RST_ATA, idma_ctl); 6261fd7a697STejun Heo readw(idma_ctl); /* flush */ 6271fd7a697STejun Heo msleep(1); 628f8b0685aSTejun Heo writew(0, idma_ctl); 6291fd7a697STejun Heo 630cc0680a5STejun Heo rc = sata_link_resume(link, timing, deadline); 6311fd7a697STejun Heo if (rc) { 632cc0680a5STejun Heo ata_link_printk(link, KERN_WARNING, "failed to resume " 633fe334602STejun Heo "link after reset (errno=%d)\n", rc); 6341fd7a697STejun Heo return rc; 6351fd7a697STejun Heo } 6361fd7a697STejun Heo 6371fd7a697STejun Heo *class = ATA_DEV_NONE; 638cc0680a5STejun Heo if (ata_link_online(link)) { 6391fd7a697STejun Heo struct ata_taskfile tf; 6401fd7a697STejun Heo 641705e76beSTejun Heo /* wait for link to become ready */ 642364fac0eSTejun Heo rc = ata_wait_after_reset(link, deadline, inic_check_ready); 6439b89391cSTejun Heo /* link occupied, -ENODEV too is an error */ 6449b89391cSTejun Heo if (rc) { 645cc0680a5STejun Heo ata_link_printk(link, KERN_WARNING, "device not ready " 646d4b2bab4STejun Heo "after hardreset (errno=%d)\n", rc); 647d4b2bab4STejun Heo return rc; 6481fd7a697STejun Heo } 6491fd7a697STejun Heo 650364fac0eSTejun Heo inic_tf_read(ap, &tf); 6511fd7a697STejun Heo *class = ata_dev_classify(&tf); 6521fd7a697STejun Heo } 6531fd7a697STejun Heo 6541fd7a697STejun Heo return 0; 6551fd7a697STejun Heo } 6561fd7a697STejun Heo 6571fd7a697STejun Heo static void inic_error_handler(struct ata_port *ap) 6581fd7a697STejun Heo { 6591fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 6601fd7a697STejun Heo 6611fd7a697STejun Heo inic_reset_port(port_base); 662a1efdabaSTejun Heo ata_std_error_handler(ap); 6631fd7a697STejun Heo } 6641fd7a697STejun Heo 6651fd7a697STejun Heo static void inic_post_internal_cmd(struct ata_queued_cmd *qc) 6661fd7a697STejun Heo { 6671fd7a697STejun Heo /* make DMA engine forget about the failed command */ 668a51d644aSTejun Heo if (qc->flags & ATA_QCFLAG_FAILED) 6691fd7a697STejun Heo inic_reset_port(inic_port_base(qc->ap)); 6701fd7a697STejun Heo } 6711fd7a697STejun Heo 6721fd7a697STejun Heo static void init_port(struct ata_port *ap) 6731fd7a697STejun Heo { 6741fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 6753ad400a9STejun Heo struct inic_port_priv *pp = ap->private_data; 6761fd7a697STejun Heo 6773ad400a9STejun Heo /* clear packet and CPB table */ 6783ad400a9STejun Heo memset(pp->pkt, 0, sizeof(struct inic_pkt)); 6793ad400a9STejun Heo memset(pp->cpb_tbl, 0, IDMA_CPB_TBL_SIZE); 6803ad400a9STejun Heo 6813ad400a9STejun Heo /* setup PRD and CPB lookup table addresses */ 6821fd7a697STejun Heo writel(ap->prd_dma, port_base + PORT_PRD_ADDR); 6833ad400a9STejun Heo writel(pp->cpb_tbl_dma, port_base + PORT_CPB_CPBLAR); 6841fd7a697STejun Heo } 6851fd7a697STejun Heo 6861fd7a697STejun Heo static int inic_port_resume(struct ata_port *ap) 6871fd7a697STejun Heo { 6881fd7a697STejun Heo init_port(ap); 6891fd7a697STejun Heo return 0; 6901fd7a697STejun Heo } 6911fd7a697STejun Heo 6921fd7a697STejun Heo static int inic_port_start(struct ata_port *ap) 6931fd7a697STejun Heo { 6943ad400a9STejun Heo struct device *dev = ap->host->dev; 6951fd7a697STejun Heo struct inic_port_priv *pp; 6961fd7a697STejun Heo int rc; 6971fd7a697STejun Heo 6981fd7a697STejun Heo /* alloc and initialize private data */ 6993ad400a9STejun Heo pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 7001fd7a697STejun Heo if (!pp) 7011fd7a697STejun Heo return -ENOMEM; 7021fd7a697STejun Heo ap->private_data = pp; 7031fd7a697STejun Heo 7041fd7a697STejun Heo /* Alloc resources */ 7051fd7a697STejun Heo rc = ata_port_start(ap); 70636f674d9STejun Heo if (rc) 7071fd7a697STejun Heo return rc; 7081fd7a697STejun Heo 7093ad400a9STejun Heo pp->pkt = dmam_alloc_coherent(dev, sizeof(struct inic_pkt), 7103ad400a9STejun Heo &pp->pkt_dma, GFP_KERNEL); 7113ad400a9STejun Heo if (!pp->pkt) 7123ad400a9STejun Heo return -ENOMEM; 7133ad400a9STejun Heo 7143ad400a9STejun Heo pp->cpb_tbl = dmam_alloc_coherent(dev, IDMA_CPB_TBL_SIZE, 7153ad400a9STejun Heo &pp->cpb_tbl_dma, GFP_KERNEL); 7163ad400a9STejun Heo if (!pp->cpb_tbl) 7173ad400a9STejun Heo return -ENOMEM; 7183ad400a9STejun Heo 7191fd7a697STejun Heo init_port(ap); 7201fd7a697STejun Heo 7211fd7a697STejun Heo return 0; 7221fd7a697STejun Heo } 7231fd7a697STejun Heo 7241fd7a697STejun Heo static struct ata_port_operations inic_port_ops = { 725f8b0685aSTejun Heo .inherits = &sata_port_ops, 7261fd7a697STejun Heo 727b3f677e5STejun Heo .check_atapi_dma = inic_check_atapi_dma, 7283ad400a9STejun Heo .qc_prep = inic_qc_prep, 7291fd7a697STejun Heo .qc_issue = inic_qc_issue, 730364fac0eSTejun Heo .qc_fill_rtf = inic_qc_fill_rtf, 7311fd7a697STejun Heo 7321fd7a697STejun Heo .freeze = inic_freeze, 7331fd7a697STejun Heo .thaw = inic_thaw, 734a1efdabaSTejun Heo .hardreset = inic_hardreset, 7351fd7a697STejun Heo .error_handler = inic_error_handler, 7361fd7a697STejun Heo .post_internal_cmd = inic_post_internal_cmd, 7371fd7a697STejun Heo 738029cfd6bSTejun Heo .scr_read = inic_scr_read, 739029cfd6bSTejun Heo .scr_write = inic_scr_write, 7401fd7a697STejun Heo 741029cfd6bSTejun Heo .port_resume = inic_port_resume, 7421fd7a697STejun Heo .port_start = inic_port_start, 7431fd7a697STejun Heo }; 7441fd7a697STejun Heo 7451fd7a697STejun Heo static struct ata_port_info inic_port_info = { 7461fd7a697STejun Heo .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA, 74714bdef98SErik Inge Bolsø .pio_mask = ATA_PIO4, 74814bdef98SErik Inge Bolsø .mwdma_mask = ATA_MWDMA2, 749bf6263a8SJeff Garzik .udma_mask = ATA_UDMA6, 7501fd7a697STejun Heo .port_ops = &inic_port_ops 7511fd7a697STejun Heo }; 7521fd7a697STejun Heo 7531fd7a697STejun Heo static int init_controller(void __iomem *mmio_base, u16 hctl) 7541fd7a697STejun Heo { 7551fd7a697STejun Heo int i; 7561fd7a697STejun Heo u16 val; 7571fd7a697STejun Heo 7581fd7a697STejun Heo hctl &= ~HCTL_KNOWN_BITS; 7591fd7a697STejun Heo 7601fd7a697STejun Heo /* Soft reset whole controller. Spec says reset duration is 3 7611fd7a697STejun Heo * PCI clocks, be generous and give it 10ms. 7621fd7a697STejun Heo */ 7631fd7a697STejun Heo writew(hctl | HCTL_SOFTRST, mmio_base + HOST_CTL); 7641fd7a697STejun Heo readw(mmio_base + HOST_CTL); /* flush */ 7651fd7a697STejun Heo 7661fd7a697STejun Heo for (i = 0; i < 10; i++) { 7671fd7a697STejun Heo msleep(1); 7681fd7a697STejun Heo val = readw(mmio_base + HOST_CTL); 7691fd7a697STejun Heo if (!(val & HCTL_SOFTRST)) 7701fd7a697STejun Heo break; 7711fd7a697STejun Heo } 7721fd7a697STejun Heo 7731fd7a697STejun Heo if (val & HCTL_SOFTRST) 7741fd7a697STejun Heo return -EIO; 7751fd7a697STejun Heo 7761fd7a697STejun Heo /* mask all interrupts and reset ports */ 7771fd7a697STejun Heo for (i = 0; i < NR_PORTS; i++) { 7781fd7a697STejun Heo void __iomem *port_base = mmio_base + i * PORT_SIZE; 7791fd7a697STejun Heo 7801fd7a697STejun Heo writeb(0xff, port_base + PORT_IRQ_MASK); 7811fd7a697STejun Heo inic_reset_port(port_base); 7821fd7a697STejun Heo } 7831fd7a697STejun Heo 7841fd7a697STejun Heo /* port IRQ is masked now, unmask global IRQ */ 7851fd7a697STejun Heo writew(hctl & ~HCTL_IRQOFF, mmio_base + HOST_CTL); 7861fd7a697STejun Heo val = readw(mmio_base + HOST_IRQ_MASK); 7871fd7a697STejun Heo val &= ~(HIRQ_PORT0 | HIRQ_PORT1); 7881fd7a697STejun Heo writew(val, mmio_base + HOST_IRQ_MASK); 7891fd7a697STejun Heo 7901fd7a697STejun Heo return 0; 7911fd7a697STejun Heo } 7921fd7a697STejun Heo 793438ac6d5STejun Heo #ifdef CONFIG_PM 7941fd7a697STejun Heo static int inic_pci_device_resume(struct pci_dev *pdev) 7951fd7a697STejun Heo { 7961fd7a697STejun Heo struct ata_host *host = dev_get_drvdata(&pdev->dev); 7971fd7a697STejun Heo struct inic_host_priv *hpriv = host->private_data; 7981fd7a697STejun Heo int rc; 7991fd7a697STejun Heo 8005aea408dSDmitriy Monakhov rc = ata_pci_device_do_resume(pdev); 8015aea408dSDmitriy Monakhov if (rc) 8025aea408dSDmitriy Monakhov return rc; 8031fd7a697STejun Heo 8041fd7a697STejun Heo if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { 805ba66b242STejun Heo rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl); 8061fd7a697STejun Heo if (rc) 8071fd7a697STejun Heo return rc; 8081fd7a697STejun Heo } 8091fd7a697STejun Heo 8101fd7a697STejun Heo ata_host_resume(host); 8111fd7a697STejun Heo 8121fd7a697STejun Heo return 0; 8131fd7a697STejun Heo } 814438ac6d5STejun Heo #endif 8151fd7a697STejun Heo 8161fd7a697STejun Heo static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 8171fd7a697STejun Heo { 8181fd7a697STejun Heo static int printed_version; 8194447d351STejun Heo const struct ata_port_info *ppi[] = { &inic_port_info, NULL }; 8204447d351STejun Heo struct ata_host *host; 8211fd7a697STejun Heo struct inic_host_priv *hpriv; 8220d5ff566STejun Heo void __iomem * const *iomap; 823ba66b242STejun Heo int mmio_bar; 8241fd7a697STejun Heo int i, rc; 8251fd7a697STejun Heo 8261fd7a697STejun Heo if (!printed_version++) 8271fd7a697STejun Heo dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 8281fd7a697STejun Heo 8294447d351STejun Heo /* alloc host */ 8304447d351STejun Heo host = ata_host_alloc_pinfo(&pdev->dev, ppi, NR_PORTS); 8314447d351STejun Heo hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); 8324447d351STejun Heo if (!host || !hpriv) 8334447d351STejun Heo return -ENOMEM; 8344447d351STejun Heo 8354447d351STejun Heo host->private_data = hpriv; 8364447d351STejun Heo 837ba66b242STejun Heo /* Acquire resources and fill host. Note that PCI and cardbus 838ba66b242STejun Heo * use different BARs. 839ba66b242STejun Heo */ 84024dc5f33STejun Heo rc = pcim_enable_device(pdev); 8411fd7a697STejun Heo if (rc) 8421fd7a697STejun Heo return rc; 8431fd7a697STejun Heo 844ba66b242STejun Heo if (pci_resource_flags(pdev, MMIO_BAR_PCI) & IORESOURCE_MEM) 845ba66b242STejun Heo mmio_bar = MMIO_BAR_PCI; 846ba66b242STejun Heo else 847ba66b242STejun Heo mmio_bar = MMIO_BAR_CARDBUS; 848ba66b242STejun Heo 849ba66b242STejun Heo rc = pcim_iomap_regions(pdev, 1 << mmio_bar, DRV_NAME); 8500d5ff566STejun Heo if (rc) 8510d5ff566STejun Heo return rc; 8524447d351STejun Heo host->iomap = iomap = pcim_iomap_table(pdev); 853ba66b242STejun Heo hpriv->mmio_base = iomap[mmio_bar]; 854ba66b242STejun Heo hpriv->cached_hctl = readw(hpriv->mmio_base + HOST_CTL); 8554447d351STejun Heo 8564447d351STejun Heo for (i = 0; i < NR_PORTS; i++) { 857cbcdd875STejun Heo struct ata_port *ap = host->ports[i]; 858cbcdd875STejun Heo 859ba66b242STejun Heo ata_port_pbar_desc(ap, mmio_bar, -1, "mmio"); 860ba66b242STejun Heo ata_port_pbar_desc(ap, mmio_bar, i * PORT_SIZE, "port"); 8614447d351STejun Heo } 8624447d351STejun Heo 8631fd7a697STejun Heo /* Set dma_mask. This devices doesn't support 64bit addressing. */ 864*284901a9SYang Hongyang rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 8651fd7a697STejun Heo if (rc) { 8661fd7a697STejun Heo dev_printk(KERN_ERR, &pdev->dev, 8671fd7a697STejun Heo "32-bit DMA enable failed\n"); 86824dc5f33STejun Heo return rc; 8691fd7a697STejun Heo } 8701fd7a697STejun Heo 871*284901a9SYang Hongyang rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 8721fd7a697STejun Heo if (rc) { 8731fd7a697STejun Heo dev_printk(KERN_ERR, &pdev->dev, 8741fd7a697STejun Heo "32-bit consistent DMA enable failed\n"); 87524dc5f33STejun Heo return rc; 8761fd7a697STejun Heo } 8771fd7a697STejun Heo 878b7d8629fSFUJITA Tomonori /* 879b7d8629fSFUJITA Tomonori * This controller is braindamaged. dma_boundary is 0xffff 880b7d8629fSFUJITA Tomonori * like others but it will lock up the whole machine HARD if 881b7d8629fSFUJITA Tomonori * 65536 byte PRD entry is fed. Reduce maximum segment size. 882b7d8629fSFUJITA Tomonori */ 883b7d8629fSFUJITA Tomonori rc = pci_set_dma_max_seg_size(pdev, 65536 - 512); 884b7d8629fSFUJITA Tomonori if (rc) { 885b7d8629fSFUJITA Tomonori dev_printk(KERN_ERR, &pdev->dev, 886b7d8629fSFUJITA Tomonori "failed to set the maximum segment size.\n"); 887b7d8629fSFUJITA Tomonori return rc; 888b7d8629fSFUJITA Tomonori } 889b7d8629fSFUJITA Tomonori 890ba66b242STejun Heo rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl); 8911fd7a697STejun Heo if (rc) { 8921fd7a697STejun Heo dev_printk(KERN_ERR, &pdev->dev, 8931fd7a697STejun Heo "failed to initialize controller\n"); 89424dc5f33STejun Heo return rc; 8951fd7a697STejun Heo } 8961fd7a697STejun Heo 8971fd7a697STejun Heo pci_set_master(pdev); 8984447d351STejun Heo return ata_host_activate(host, pdev->irq, inic_interrupt, IRQF_SHARED, 8994447d351STejun Heo &inic_sht); 9001fd7a697STejun Heo } 9011fd7a697STejun Heo 9021fd7a697STejun Heo static const struct pci_device_id inic_pci_tbl[] = { 9031fd7a697STejun Heo { PCI_VDEVICE(INIT, 0x1622), }, 9041fd7a697STejun Heo { }, 9051fd7a697STejun Heo }; 9061fd7a697STejun Heo 9071fd7a697STejun Heo static struct pci_driver inic_pci_driver = { 9081fd7a697STejun Heo .name = DRV_NAME, 9091fd7a697STejun Heo .id_table = inic_pci_tbl, 910438ac6d5STejun Heo #ifdef CONFIG_PM 9111fd7a697STejun Heo .suspend = ata_pci_device_suspend, 9121fd7a697STejun Heo .resume = inic_pci_device_resume, 913438ac6d5STejun Heo #endif 9141fd7a697STejun Heo .probe = inic_init_one, 9151fd7a697STejun Heo .remove = ata_pci_remove_one, 9161fd7a697STejun Heo }; 9171fd7a697STejun Heo 9181fd7a697STejun Heo static int __init inic_init(void) 9191fd7a697STejun Heo { 9201fd7a697STejun Heo return pci_register_driver(&inic_pci_driver); 9211fd7a697STejun Heo } 9221fd7a697STejun Heo 9231fd7a697STejun Heo static void __exit inic_exit(void) 9241fd7a697STejun Heo { 9251fd7a697STejun Heo pci_unregister_driver(&inic_pci_driver); 9261fd7a697STejun Heo } 9271fd7a697STejun Heo 9281fd7a697STejun Heo MODULE_AUTHOR("Tejun Heo"); 9291fd7a697STejun Heo MODULE_DESCRIPTION("low-level driver for Initio 162x SATA"); 9301fd7a697STejun Heo MODULE_LICENSE("GPL v2"); 9311fd7a697STejun Heo MODULE_DEVICE_TABLE(pci, inic_pci_tbl); 9321fd7a697STejun Heo MODULE_VERSION(DRV_VERSION); 9331fd7a697STejun Heo 9341fd7a697STejun Heo module_init(inic_init); 9351fd7a697STejun Heo module_exit(inic_exit); 936