11fd7a697STejun Heo /* 21fd7a697STejun Heo * sata_inic162x.c - Driver for Initio 162x SATA controllers 31fd7a697STejun Heo * 41fd7a697STejun Heo * Copyright 2006 SUSE Linux Products GmbH 51fd7a697STejun Heo * Copyright 2006 Tejun Heo <teheo@novell.com> 61fd7a697STejun Heo * 71fd7a697STejun Heo * This file is released under GPL v2. 81fd7a697STejun Heo * 91fd7a697STejun Heo * This controller is eccentric and easily locks up if something isn't 101fd7a697STejun Heo * right. Documentation is available at initio's website but it only 111fd7a697STejun Heo * documents registers (not programming model). 121fd7a697STejun Heo * 131fd7a697STejun Heo * - ATA disks work. 141fd7a697STejun Heo * - Hotplug works. 151fd7a697STejun Heo * - ATAPI read works but burning doesn't. This thing is really 161fd7a697STejun Heo * peculiar about ATAPI and I couldn't figure out how ATAPI PIO and 171fd7a697STejun Heo * ATAPI DMA WRITE should be programmed. If you've got a clue, be 181fd7a697STejun Heo * my guest. 191fd7a697STejun Heo * - Both STR and STD work. 201fd7a697STejun Heo */ 211fd7a697STejun Heo 221fd7a697STejun Heo #include <linux/kernel.h> 231fd7a697STejun Heo #include <linux/module.h> 241fd7a697STejun Heo #include <linux/pci.h> 251fd7a697STejun Heo #include <scsi/scsi_host.h> 261fd7a697STejun Heo #include <linux/libata.h> 271fd7a697STejun Heo #include <linux/blkdev.h> 281fd7a697STejun Heo #include <scsi/scsi_device.h> 291fd7a697STejun Heo 301fd7a697STejun Heo #define DRV_NAME "sata_inic162x" 311fd7a697STejun Heo #define DRV_VERSION "0.1" 321fd7a697STejun Heo 331fd7a697STejun Heo enum { 341fd7a697STejun Heo MMIO_BAR = 5, 351fd7a697STejun Heo 361fd7a697STejun Heo NR_PORTS = 2, 371fd7a697STejun Heo 381fd7a697STejun Heo HOST_CTL = 0x7c, 391fd7a697STejun Heo HOST_STAT = 0x7e, 401fd7a697STejun Heo HOST_IRQ_STAT = 0xbc, 411fd7a697STejun Heo HOST_IRQ_MASK = 0xbe, 421fd7a697STejun Heo 431fd7a697STejun Heo PORT_SIZE = 0x40, 441fd7a697STejun Heo 451fd7a697STejun Heo /* registers for ATA TF operation */ 461fd7a697STejun Heo PORT_TF = 0x00, 471fd7a697STejun Heo PORT_ALT_STAT = 0x08, 481fd7a697STejun Heo PORT_IRQ_STAT = 0x09, 491fd7a697STejun Heo PORT_IRQ_MASK = 0x0a, 501fd7a697STejun Heo PORT_PRD_CTL = 0x0b, 511fd7a697STejun Heo PORT_PRD_ADDR = 0x0c, 521fd7a697STejun Heo PORT_PRD_XFERLEN = 0x10, 531fd7a697STejun Heo 541fd7a697STejun Heo /* IDMA register */ 551fd7a697STejun Heo PORT_IDMA_CTL = 0x14, 561fd7a697STejun Heo 571fd7a697STejun Heo PORT_SCR = 0x20, 581fd7a697STejun Heo 591fd7a697STejun Heo /* HOST_CTL bits */ 601fd7a697STejun Heo HCTL_IRQOFF = (1 << 8), /* global IRQ off */ 611fd7a697STejun Heo HCTL_PWRDWN = (1 << 13), /* power down PHYs */ 621fd7a697STejun Heo HCTL_SOFTRST = (1 << 13), /* global reset (no phy reset) */ 631fd7a697STejun Heo HCTL_RPGSEL = (1 << 15), /* register page select */ 641fd7a697STejun Heo 651fd7a697STejun Heo HCTL_KNOWN_BITS = HCTL_IRQOFF | HCTL_PWRDWN | HCTL_SOFTRST | 661fd7a697STejun Heo HCTL_RPGSEL, 671fd7a697STejun Heo 681fd7a697STejun Heo /* HOST_IRQ_(STAT|MASK) bits */ 691fd7a697STejun Heo HIRQ_PORT0 = (1 << 0), 701fd7a697STejun Heo HIRQ_PORT1 = (1 << 1), 711fd7a697STejun Heo HIRQ_SOFT = (1 << 14), 721fd7a697STejun Heo HIRQ_GLOBAL = (1 << 15), /* STAT only */ 731fd7a697STejun Heo 741fd7a697STejun Heo /* PORT_IRQ_(STAT|MASK) bits */ 751fd7a697STejun Heo PIRQ_OFFLINE = (1 << 0), /* device unplugged */ 761fd7a697STejun Heo PIRQ_ONLINE = (1 << 1), /* device plugged */ 771fd7a697STejun Heo PIRQ_COMPLETE = (1 << 2), /* completion interrupt */ 781fd7a697STejun Heo PIRQ_FATAL = (1 << 3), /* fatal error */ 791fd7a697STejun Heo PIRQ_ATA = (1 << 4), /* ATA interrupt */ 801fd7a697STejun Heo PIRQ_REPLY = (1 << 5), /* reply FIFO not empty */ 811fd7a697STejun Heo PIRQ_PENDING = (1 << 7), /* port IRQ pending (STAT only) */ 821fd7a697STejun Heo 831fd7a697STejun Heo PIRQ_ERR = PIRQ_OFFLINE | PIRQ_ONLINE | PIRQ_FATAL, 841fd7a697STejun Heo 851fd7a697STejun Heo PIRQ_MASK_DMA_READ = PIRQ_REPLY | PIRQ_ATA, 861fd7a697STejun Heo PIRQ_MASK_OTHER = PIRQ_REPLY | PIRQ_COMPLETE, 871fd7a697STejun Heo PIRQ_MASK_FREEZE = 0xff, 881fd7a697STejun Heo 891fd7a697STejun Heo /* PORT_PRD_CTL bits */ 901fd7a697STejun Heo PRD_CTL_START = (1 << 0), 911fd7a697STejun Heo PRD_CTL_WR = (1 << 3), 921fd7a697STejun Heo PRD_CTL_DMAEN = (1 << 7), /* DMA enable */ 931fd7a697STejun Heo 941fd7a697STejun Heo /* PORT_IDMA_CTL bits */ 951fd7a697STejun Heo IDMA_CTL_RST_ATA = (1 << 2), /* hardreset ATA bus */ 961fd7a697STejun Heo IDMA_CTL_RST_IDMA = (1 << 5), /* reset IDMA machinary */ 971fd7a697STejun Heo IDMA_CTL_GO = (1 << 7), /* IDMA mode go */ 981fd7a697STejun Heo IDMA_CTL_ATA_NIEN = (1 << 8), /* ATA IRQ disable */ 991fd7a697STejun Heo }; 1001fd7a697STejun Heo 1011fd7a697STejun Heo struct inic_host_priv { 1021fd7a697STejun Heo u16 cached_hctl; 1031fd7a697STejun Heo }; 1041fd7a697STejun Heo 1051fd7a697STejun Heo struct inic_port_priv { 1061fd7a697STejun Heo u8 dfl_prdctl; 1071fd7a697STejun Heo u8 cached_prdctl; 1081fd7a697STejun Heo u8 cached_pirq_mask; 1091fd7a697STejun Heo }; 1101fd7a697STejun Heo 1111fd7a697STejun Heo static int inic_slave_config(struct scsi_device *sdev) 1121fd7a697STejun Heo { 1131fd7a697STejun Heo /* This controller is braindamaged. dma_boundary is 0xffff 1141fd7a697STejun Heo * like others but it will lock up the whole machine HARD if 1151fd7a697STejun Heo * 65536 byte PRD entry is fed. Reduce maximum segment size. 1161fd7a697STejun Heo */ 1171fd7a697STejun Heo blk_queue_max_segment_size(sdev->request_queue, 65536 - 512); 1181fd7a697STejun Heo 1191fd7a697STejun Heo return ata_scsi_slave_config(sdev); 1201fd7a697STejun Heo } 1211fd7a697STejun Heo 1221fd7a697STejun Heo static struct scsi_host_template inic_sht = { 1231fd7a697STejun Heo .module = THIS_MODULE, 1241fd7a697STejun Heo .name = DRV_NAME, 1251fd7a697STejun Heo .ioctl = ata_scsi_ioctl, 1261fd7a697STejun Heo .queuecommand = ata_scsi_queuecmd, 1271fd7a697STejun Heo .can_queue = ATA_DEF_QUEUE, 1281fd7a697STejun Heo .this_id = ATA_SHT_THIS_ID, 1291fd7a697STejun Heo .sg_tablesize = LIBATA_MAX_PRD, 1301fd7a697STejun Heo .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 1311fd7a697STejun Heo .emulated = ATA_SHT_EMULATED, 1321fd7a697STejun Heo .use_clustering = ATA_SHT_USE_CLUSTERING, 1331fd7a697STejun Heo .proc_name = DRV_NAME, 1341fd7a697STejun Heo .dma_boundary = ATA_DMA_BOUNDARY, 1351fd7a697STejun Heo .slave_configure = inic_slave_config, 1361fd7a697STejun Heo .slave_destroy = ata_scsi_slave_destroy, 1371fd7a697STejun Heo .bios_param = ata_std_bios_param, 1381fd7a697STejun Heo .suspend = ata_scsi_device_suspend, 1391fd7a697STejun Heo .resume = ata_scsi_device_resume, 1401fd7a697STejun Heo }; 1411fd7a697STejun Heo 1421fd7a697STejun Heo static const int scr_map[] = { 1431fd7a697STejun Heo [SCR_STATUS] = 0, 1441fd7a697STejun Heo [SCR_ERROR] = 1, 1451fd7a697STejun Heo [SCR_CONTROL] = 2, 1461fd7a697STejun Heo }; 1471fd7a697STejun Heo 1481fd7a697STejun Heo static void __iomem * inic_port_base(struct ata_port *ap) 1491fd7a697STejun Heo { 1500d5ff566STejun Heo return ap->host->iomap[MMIO_BAR] + ap->port_no * PORT_SIZE; 1511fd7a697STejun Heo } 1521fd7a697STejun Heo 1531fd7a697STejun Heo static void __inic_set_pirq_mask(struct ata_port *ap, u8 mask) 1541fd7a697STejun Heo { 1551fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 1561fd7a697STejun Heo struct inic_port_priv *pp = ap->private_data; 1571fd7a697STejun Heo 1581fd7a697STejun Heo writeb(mask, port_base + PORT_IRQ_MASK); 1591fd7a697STejun Heo pp->cached_pirq_mask = mask; 1601fd7a697STejun Heo } 1611fd7a697STejun Heo 1621fd7a697STejun Heo static void inic_set_pirq_mask(struct ata_port *ap, u8 mask) 1631fd7a697STejun Heo { 1641fd7a697STejun Heo struct inic_port_priv *pp = ap->private_data; 1651fd7a697STejun Heo 1661fd7a697STejun Heo if (pp->cached_pirq_mask != mask) 1671fd7a697STejun Heo __inic_set_pirq_mask(ap, mask); 1681fd7a697STejun Heo } 1691fd7a697STejun Heo 1701fd7a697STejun Heo static void inic_reset_port(void __iomem *port_base) 1711fd7a697STejun Heo { 1721fd7a697STejun Heo void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; 1731fd7a697STejun Heo u16 ctl; 1741fd7a697STejun Heo 1751fd7a697STejun Heo ctl = readw(idma_ctl); 1761fd7a697STejun Heo ctl &= ~(IDMA_CTL_RST_IDMA | IDMA_CTL_ATA_NIEN | IDMA_CTL_GO); 1771fd7a697STejun Heo 1781fd7a697STejun Heo /* mask IRQ and assert reset */ 1791fd7a697STejun Heo writew(ctl | IDMA_CTL_RST_IDMA | IDMA_CTL_ATA_NIEN, idma_ctl); 1801fd7a697STejun Heo readw(idma_ctl); /* flush */ 1811fd7a697STejun Heo 1821fd7a697STejun Heo /* give it some time */ 1831fd7a697STejun Heo msleep(1); 1841fd7a697STejun Heo 1851fd7a697STejun Heo /* release reset */ 1861fd7a697STejun Heo writew(ctl | IDMA_CTL_ATA_NIEN, idma_ctl); 1871fd7a697STejun Heo 1881fd7a697STejun Heo /* clear irq */ 1891fd7a697STejun Heo writeb(0xff, port_base + PORT_IRQ_STAT); 1901fd7a697STejun Heo 1911fd7a697STejun Heo /* reenable ATA IRQ, turn off IDMA mode */ 1921fd7a697STejun Heo writew(ctl, idma_ctl); 1931fd7a697STejun Heo } 1941fd7a697STejun Heo 1951fd7a697STejun Heo static u32 inic_scr_read(struct ata_port *ap, unsigned sc_reg) 1961fd7a697STejun Heo { 1971fd7a697STejun Heo void __iomem *scr_addr = (void __iomem *)ap->ioaddr.scr_addr; 1981fd7a697STejun Heo void __iomem *addr; 1991fd7a697STejun Heo u32 val; 2001fd7a697STejun Heo 2011fd7a697STejun Heo if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) 2021fd7a697STejun Heo return 0xffffffffU; 2031fd7a697STejun Heo 2041fd7a697STejun Heo addr = scr_addr + scr_map[sc_reg] * 4; 2051fd7a697STejun Heo val = readl(scr_addr + scr_map[sc_reg] * 4); 2061fd7a697STejun Heo 2071fd7a697STejun Heo /* this controller has stuck DIAG.N, ignore it */ 2081fd7a697STejun Heo if (sc_reg == SCR_ERROR) 2091fd7a697STejun Heo val &= ~SERR_PHYRDY_CHG; 2101fd7a697STejun Heo return val; 2111fd7a697STejun Heo } 2121fd7a697STejun Heo 2131fd7a697STejun Heo static void inic_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val) 2141fd7a697STejun Heo { 2151fd7a697STejun Heo void __iomem *scr_addr = (void __iomem *)ap->ioaddr.scr_addr; 2161fd7a697STejun Heo void __iomem *addr; 2171fd7a697STejun Heo 2181fd7a697STejun Heo if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) 2191fd7a697STejun Heo return; 2201fd7a697STejun Heo 2211fd7a697STejun Heo addr = scr_addr + scr_map[sc_reg] * 4; 2221fd7a697STejun Heo writel(val, scr_addr + scr_map[sc_reg] * 4); 2231fd7a697STejun Heo } 2241fd7a697STejun Heo 2251fd7a697STejun Heo /* 2261fd7a697STejun Heo * In TF mode, inic162x is very similar to SFF device. TF registers 2271fd7a697STejun Heo * function the same. DMA engine behaves similary using the same PRD 2281fd7a697STejun Heo * format as BMDMA but different command register, interrupt and event 2291fd7a697STejun Heo * notification methods are used. The following inic_bmdma_*() 2301fd7a697STejun Heo * functions do the impedance matching. 2311fd7a697STejun Heo */ 2321fd7a697STejun Heo static void inic_bmdma_setup(struct ata_queued_cmd *qc) 2331fd7a697STejun Heo { 2341fd7a697STejun Heo struct ata_port *ap = qc->ap; 2351fd7a697STejun Heo struct inic_port_priv *pp = ap->private_data; 2361fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 2371fd7a697STejun Heo int rw = qc->tf.flags & ATA_TFLAG_WRITE; 2381fd7a697STejun Heo 2391fd7a697STejun Heo /* make sure device sees PRD table writes */ 2401fd7a697STejun Heo wmb(); 2411fd7a697STejun Heo 2421fd7a697STejun Heo /* load transfer length */ 2431fd7a697STejun Heo writel(qc->nbytes, port_base + PORT_PRD_XFERLEN); 2441fd7a697STejun Heo 2451fd7a697STejun Heo /* turn on DMA and specify data direction */ 2461fd7a697STejun Heo pp->cached_prdctl = pp->dfl_prdctl | PRD_CTL_DMAEN; 2471fd7a697STejun Heo if (!rw) 2481fd7a697STejun Heo pp->cached_prdctl |= PRD_CTL_WR; 2491fd7a697STejun Heo writeb(pp->cached_prdctl, port_base + PORT_PRD_CTL); 2501fd7a697STejun Heo 2511fd7a697STejun Heo /* issue r/w command */ 2521fd7a697STejun Heo ap->ops->exec_command(ap, &qc->tf); 2531fd7a697STejun Heo } 2541fd7a697STejun Heo 2551fd7a697STejun Heo static void inic_bmdma_start(struct ata_queued_cmd *qc) 2561fd7a697STejun Heo { 2571fd7a697STejun Heo struct ata_port *ap = qc->ap; 2581fd7a697STejun Heo struct inic_port_priv *pp = ap->private_data; 2591fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 2601fd7a697STejun Heo 2611fd7a697STejun Heo /* start host DMA transaction */ 2621fd7a697STejun Heo pp->cached_prdctl |= PRD_CTL_START; 2631fd7a697STejun Heo writeb(pp->cached_prdctl, port_base + PORT_PRD_CTL); 2641fd7a697STejun Heo } 2651fd7a697STejun Heo 2661fd7a697STejun Heo static void inic_bmdma_stop(struct ata_queued_cmd *qc) 2671fd7a697STejun Heo { 2681fd7a697STejun Heo struct ata_port *ap = qc->ap; 2691fd7a697STejun Heo struct inic_port_priv *pp = ap->private_data; 2701fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 2711fd7a697STejun Heo 2721fd7a697STejun Heo /* stop DMA engine */ 2731fd7a697STejun Heo writeb(pp->dfl_prdctl, port_base + PORT_PRD_CTL); 2741fd7a697STejun Heo } 2751fd7a697STejun Heo 2761fd7a697STejun Heo static u8 inic_bmdma_status(struct ata_port *ap) 2771fd7a697STejun Heo { 2781fd7a697STejun Heo /* event is already verified by the interrupt handler */ 2791fd7a697STejun Heo return ATA_DMA_INTR; 2801fd7a697STejun Heo } 2811fd7a697STejun Heo 2821fd7a697STejun Heo static void inic_irq_clear(struct ata_port *ap) 2831fd7a697STejun Heo { 2841fd7a697STejun Heo /* noop */ 2851fd7a697STejun Heo } 2861fd7a697STejun Heo 2871fd7a697STejun Heo static void inic_host_intr(struct ata_port *ap) 2881fd7a697STejun Heo { 2891fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 2901fd7a697STejun Heo struct ata_eh_info *ehi = &ap->eh_info; 2911fd7a697STejun Heo u8 irq_stat; 2921fd7a697STejun Heo 2931fd7a697STejun Heo /* fetch and clear irq */ 2941fd7a697STejun Heo irq_stat = readb(port_base + PORT_IRQ_STAT); 2951fd7a697STejun Heo writeb(irq_stat, port_base + PORT_IRQ_STAT); 2961fd7a697STejun Heo 2971fd7a697STejun Heo if (likely(!(irq_stat & PIRQ_ERR))) { 2981fd7a697STejun Heo struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag); 2991fd7a697STejun Heo 3001fd7a697STejun Heo if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { 3011fd7a697STejun Heo ata_chk_status(ap); /* clear ATA interrupt */ 3021fd7a697STejun Heo return; 3031fd7a697STejun Heo } 3041fd7a697STejun Heo 3051fd7a697STejun Heo if (likely(ata_host_intr(ap, qc))) 3061fd7a697STejun Heo return; 3071fd7a697STejun Heo 3081fd7a697STejun Heo ata_chk_status(ap); /* clear ATA interrupt */ 3091fd7a697STejun Heo ata_port_printk(ap, KERN_WARNING, "unhandled " 3101fd7a697STejun Heo "interrupt, irq_stat=%x\n", irq_stat); 3111fd7a697STejun Heo return; 3121fd7a697STejun Heo } 3131fd7a697STejun Heo 3141fd7a697STejun Heo /* error */ 3151fd7a697STejun Heo ata_ehi_push_desc(ehi, "irq_stat=0x%x", irq_stat); 3161fd7a697STejun Heo 3171fd7a697STejun Heo if (irq_stat & (PIRQ_OFFLINE | PIRQ_ONLINE)) { 3181fd7a697STejun Heo ata_ehi_hotplugged(ehi); 3191fd7a697STejun Heo ata_port_freeze(ap); 3201fd7a697STejun Heo } else 3211fd7a697STejun Heo ata_port_abort(ap); 3221fd7a697STejun Heo } 3231fd7a697STejun Heo 3241fd7a697STejun Heo static irqreturn_t inic_interrupt(int irq, void *dev_instance) 3251fd7a697STejun Heo { 3261fd7a697STejun Heo struct ata_host *host = dev_instance; 3270d5ff566STejun Heo void __iomem *mmio_base = host->iomap[MMIO_BAR]; 3281fd7a697STejun Heo u16 host_irq_stat; 3291fd7a697STejun Heo int i, handled = 0;; 3301fd7a697STejun Heo 3311fd7a697STejun Heo host_irq_stat = readw(mmio_base + HOST_IRQ_STAT); 3321fd7a697STejun Heo 3331fd7a697STejun Heo if (unlikely(!(host_irq_stat & HIRQ_GLOBAL))) 3341fd7a697STejun Heo goto out; 3351fd7a697STejun Heo 3361fd7a697STejun Heo spin_lock(&host->lock); 3371fd7a697STejun Heo 3381fd7a697STejun Heo for (i = 0; i < NR_PORTS; i++) { 3391fd7a697STejun Heo struct ata_port *ap = host->ports[i]; 3401fd7a697STejun Heo 3411fd7a697STejun Heo if (!(host_irq_stat & (HIRQ_PORT0 << i))) 3421fd7a697STejun Heo continue; 3431fd7a697STejun Heo 3441fd7a697STejun Heo if (likely(ap && !(ap->flags & ATA_FLAG_DISABLED))) { 3451fd7a697STejun Heo inic_host_intr(ap); 3461fd7a697STejun Heo handled++; 3471fd7a697STejun Heo } else { 3481fd7a697STejun Heo if (ata_ratelimit()) 3491fd7a697STejun Heo dev_printk(KERN_ERR, host->dev, "interrupt " 3501fd7a697STejun Heo "from disabled port %d (0x%x)\n", 3511fd7a697STejun Heo i, host_irq_stat); 3521fd7a697STejun Heo } 3531fd7a697STejun Heo } 3541fd7a697STejun Heo 3551fd7a697STejun Heo spin_unlock(&host->lock); 3561fd7a697STejun Heo 3571fd7a697STejun Heo out: 3581fd7a697STejun Heo return IRQ_RETVAL(handled); 3591fd7a697STejun Heo } 3601fd7a697STejun Heo 3611fd7a697STejun Heo static unsigned int inic_qc_issue(struct ata_queued_cmd *qc) 3621fd7a697STejun Heo { 3631fd7a697STejun Heo struct ata_port *ap = qc->ap; 3641fd7a697STejun Heo 3651fd7a697STejun Heo /* ATA IRQ doesn't wait for DMA transfer completion and vice 3661fd7a697STejun Heo * versa. Mask IRQ selectively to detect command completion. 3671fd7a697STejun Heo * Without it, ATA DMA read command can cause data corruption. 3681fd7a697STejun Heo * 3691fd7a697STejun Heo * Something similar might be needed for ATAPI writes. I 3701fd7a697STejun Heo * tried a lot of combinations but couldn't find the solution. 3711fd7a697STejun Heo */ 3721fd7a697STejun Heo if (qc->tf.protocol == ATA_PROT_DMA && 3731fd7a697STejun Heo !(qc->tf.flags & ATA_TFLAG_WRITE)) 3741fd7a697STejun Heo inic_set_pirq_mask(ap, PIRQ_MASK_DMA_READ); 3751fd7a697STejun Heo else 3761fd7a697STejun Heo inic_set_pirq_mask(ap, PIRQ_MASK_OTHER); 3771fd7a697STejun Heo 3781fd7a697STejun Heo /* Issuing a command to yet uninitialized port locks up the 3791fd7a697STejun Heo * controller. Most of the time, this happens for the first 3801fd7a697STejun Heo * command after reset which are ATA and ATAPI IDENTIFYs. 3811fd7a697STejun Heo * Fast fail if stat is 0x7f or 0xff for those commands. 3821fd7a697STejun Heo */ 3831fd7a697STejun Heo if (unlikely(qc->tf.command == ATA_CMD_ID_ATA || 3841fd7a697STejun Heo qc->tf.command == ATA_CMD_ID_ATAPI)) { 3851fd7a697STejun Heo u8 stat = ata_chk_status(ap); 3861fd7a697STejun Heo if (stat == 0x7f || stat == 0xff) 3871fd7a697STejun Heo return AC_ERR_HSM; 3881fd7a697STejun Heo } 3891fd7a697STejun Heo 3901fd7a697STejun Heo return ata_qc_issue_prot(qc); 3911fd7a697STejun Heo } 3921fd7a697STejun Heo 3931fd7a697STejun Heo static void inic_freeze(struct ata_port *ap) 3941fd7a697STejun Heo { 3951fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 3961fd7a697STejun Heo 3971fd7a697STejun Heo __inic_set_pirq_mask(ap, PIRQ_MASK_FREEZE); 3981fd7a697STejun Heo 3991fd7a697STejun Heo ata_chk_status(ap); 4001fd7a697STejun Heo writeb(0xff, port_base + PORT_IRQ_STAT); 4011fd7a697STejun Heo 4021fd7a697STejun Heo readb(port_base + PORT_IRQ_STAT); /* flush */ 4031fd7a697STejun Heo } 4041fd7a697STejun Heo 4051fd7a697STejun Heo static void inic_thaw(struct ata_port *ap) 4061fd7a697STejun Heo { 4071fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 4081fd7a697STejun Heo 4091fd7a697STejun Heo ata_chk_status(ap); 4101fd7a697STejun Heo writeb(0xff, port_base + PORT_IRQ_STAT); 4111fd7a697STejun Heo 4121fd7a697STejun Heo __inic_set_pirq_mask(ap, PIRQ_MASK_OTHER); 4131fd7a697STejun Heo 4141fd7a697STejun Heo readb(port_base + PORT_IRQ_STAT); /* flush */ 4151fd7a697STejun Heo } 4161fd7a697STejun Heo 4171fd7a697STejun Heo /* 4181fd7a697STejun Heo * SRST and SControl hardreset don't give valid signature on this 4191fd7a697STejun Heo * controller. Only controller specific hardreset mechanism works. 4201fd7a697STejun Heo */ 4211fd7a697STejun Heo static int inic_hardreset(struct ata_port *ap, unsigned int *class) 4221fd7a697STejun Heo { 4231fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 4241fd7a697STejun Heo void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; 4251fd7a697STejun Heo const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context); 4261fd7a697STejun Heo u16 val; 4271fd7a697STejun Heo int rc; 4281fd7a697STejun Heo 4291fd7a697STejun Heo /* hammer it into sane state */ 4301fd7a697STejun Heo inic_reset_port(port_base); 4311fd7a697STejun Heo 4321fd7a697STejun Heo val = readw(idma_ctl); 4331fd7a697STejun Heo writew(val | IDMA_CTL_RST_ATA, idma_ctl); 4341fd7a697STejun Heo readw(idma_ctl); /* flush */ 4351fd7a697STejun Heo msleep(1); 4361fd7a697STejun Heo writew(val & ~IDMA_CTL_RST_ATA, idma_ctl); 4371fd7a697STejun Heo 4381fd7a697STejun Heo rc = sata_phy_resume(ap, timing); 4391fd7a697STejun Heo if (rc) { 4401fd7a697STejun Heo ata_port_printk(ap, KERN_WARNING, "failed to resume " 441*fe334602STejun Heo "link after reset (errno=%d)\n", rc); 4421fd7a697STejun Heo return rc; 4431fd7a697STejun Heo } 4441fd7a697STejun Heo 4451fd7a697STejun Heo *class = ATA_DEV_NONE; 4461fd7a697STejun Heo if (ata_port_online(ap)) { 4471fd7a697STejun Heo struct ata_taskfile tf; 4481fd7a697STejun Heo 449*fe334602STejun Heo /* wait a while before checking status */ 450*fe334602STejun Heo msleep(150); 451*fe334602STejun Heo 4521fd7a697STejun Heo if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) { 4531fd7a697STejun Heo ata_port_printk(ap, KERN_WARNING, 4541fd7a697STejun Heo "device busy after hardreset\n"); 4551fd7a697STejun Heo return -EIO; 4561fd7a697STejun Heo } 4571fd7a697STejun Heo 4581fd7a697STejun Heo ata_tf_read(ap, &tf); 4591fd7a697STejun Heo *class = ata_dev_classify(&tf); 4601fd7a697STejun Heo if (*class == ATA_DEV_UNKNOWN) 4611fd7a697STejun Heo *class = ATA_DEV_NONE; 4621fd7a697STejun Heo } 4631fd7a697STejun Heo 4641fd7a697STejun Heo return 0; 4651fd7a697STejun Heo } 4661fd7a697STejun Heo 4671fd7a697STejun Heo static void inic_error_handler(struct ata_port *ap) 4681fd7a697STejun Heo { 4691fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 4701fd7a697STejun Heo struct inic_port_priv *pp = ap->private_data; 4711fd7a697STejun Heo unsigned long flags; 4721fd7a697STejun Heo 4731fd7a697STejun Heo /* reset PIO HSM and stop DMA engine */ 4741fd7a697STejun Heo inic_reset_port(port_base); 4751fd7a697STejun Heo 4761fd7a697STejun Heo spin_lock_irqsave(ap->lock, flags); 4771fd7a697STejun Heo ap->hsm_task_state = HSM_ST_IDLE; 4781fd7a697STejun Heo writeb(pp->dfl_prdctl, port_base + PORT_PRD_CTL); 4791fd7a697STejun Heo spin_unlock_irqrestore(ap->lock, flags); 4801fd7a697STejun Heo 4811fd7a697STejun Heo /* PIO and DMA engines have been stopped, perform recovery */ 4821fd7a697STejun Heo ata_do_eh(ap, ata_std_prereset, NULL, inic_hardreset, 4831fd7a697STejun Heo ata_std_postreset); 4841fd7a697STejun Heo } 4851fd7a697STejun Heo 4861fd7a697STejun Heo static void inic_post_internal_cmd(struct ata_queued_cmd *qc) 4871fd7a697STejun Heo { 4881fd7a697STejun Heo /* make DMA engine forget about the failed command */ 4891fd7a697STejun Heo if (qc->err_mask) 4901fd7a697STejun Heo inic_reset_port(inic_port_base(qc->ap)); 4911fd7a697STejun Heo } 4921fd7a697STejun Heo 4931fd7a697STejun Heo static void inic_dev_config(struct ata_port *ap, struct ata_device *dev) 4941fd7a697STejun Heo { 4951fd7a697STejun Heo /* inic can only handle upto LBA28 max sectors */ 4961fd7a697STejun Heo if (dev->max_sectors > ATA_MAX_SECTORS) 4971fd7a697STejun Heo dev->max_sectors = ATA_MAX_SECTORS; 4981fd7a697STejun Heo } 4991fd7a697STejun Heo 5001fd7a697STejun Heo static void init_port(struct ata_port *ap) 5011fd7a697STejun Heo { 5021fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 5031fd7a697STejun Heo 5041fd7a697STejun Heo /* Setup PRD address */ 5051fd7a697STejun Heo writel(ap->prd_dma, port_base + PORT_PRD_ADDR); 5061fd7a697STejun Heo } 5071fd7a697STejun Heo 5081fd7a697STejun Heo static int inic_port_resume(struct ata_port *ap) 5091fd7a697STejun Heo { 5101fd7a697STejun Heo init_port(ap); 5111fd7a697STejun Heo return 0; 5121fd7a697STejun Heo } 5131fd7a697STejun Heo 5141fd7a697STejun Heo static int inic_port_start(struct ata_port *ap) 5151fd7a697STejun Heo { 5161fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 5171fd7a697STejun Heo struct inic_port_priv *pp; 5181fd7a697STejun Heo u8 tmp; 5191fd7a697STejun Heo int rc; 5201fd7a697STejun Heo 5211fd7a697STejun Heo /* alloc and initialize private data */ 52224dc5f33STejun Heo pp = devm_kzalloc(ap->host->dev, sizeof(*pp), GFP_KERNEL); 5231fd7a697STejun Heo if (!pp) 5241fd7a697STejun Heo return -ENOMEM; 5251fd7a697STejun Heo ap->private_data = pp; 5261fd7a697STejun Heo 5271fd7a697STejun Heo /* default PRD_CTL value, DMAEN, WR and START off */ 5281fd7a697STejun Heo tmp = readb(port_base + PORT_PRD_CTL); 5291fd7a697STejun Heo tmp &= ~(PRD_CTL_DMAEN | PRD_CTL_WR | PRD_CTL_START); 5301fd7a697STejun Heo pp->dfl_prdctl = tmp; 5311fd7a697STejun Heo 5321fd7a697STejun Heo /* Alloc resources */ 5331fd7a697STejun Heo rc = ata_port_start(ap); 5341fd7a697STejun Heo if (rc) { 5351fd7a697STejun Heo kfree(pp); 5361fd7a697STejun Heo return rc; 5371fd7a697STejun Heo } 5381fd7a697STejun Heo 5391fd7a697STejun Heo init_port(ap); 5401fd7a697STejun Heo 5411fd7a697STejun Heo return 0; 5421fd7a697STejun Heo } 5431fd7a697STejun Heo 5441fd7a697STejun Heo static struct ata_port_operations inic_port_ops = { 5451fd7a697STejun Heo .port_disable = ata_port_disable, 5461fd7a697STejun Heo .tf_load = ata_tf_load, 5471fd7a697STejun Heo .tf_read = ata_tf_read, 5481fd7a697STejun Heo .check_status = ata_check_status, 5491fd7a697STejun Heo .exec_command = ata_exec_command, 5501fd7a697STejun Heo .dev_select = ata_std_dev_select, 5511fd7a697STejun Heo 5521fd7a697STejun Heo .scr_read = inic_scr_read, 5531fd7a697STejun Heo .scr_write = inic_scr_write, 5541fd7a697STejun Heo 5551fd7a697STejun Heo .bmdma_setup = inic_bmdma_setup, 5561fd7a697STejun Heo .bmdma_start = inic_bmdma_start, 5571fd7a697STejun Heo .bmdma_stop = inic_bmdma_stop, 5581fd7a697STejun Heo .bmdma_status = inic_bmdma_status, 5591fd7a697STejun Heo 5601fd7a697STejun Heo .irq_handler = inic_interrupt, 5611fd7a697STejun Heo .irq_clear = inic_irq_clear, 562246ce3b6SAkira Iguchi .irq_on = ata_irq_on, 563246ce3b6SAkira Iguchi .irq_ack = ata_irq_ack, 5641fd7a697STejun Heo 5651fd7a697STejun Heo .qc_prep = ata_qc_prep, 5661fd7a697STejun Heo .qc_issue = inic_qc_issue, 5670d5ff566STejun Heo .data_xfer = ata_data_xfer, 5681fd7a697STejun Heo 5691fd7a697STejun Heo .freeze = inic_freeze, 5701fd7a697STejun Heo .thaw = inic_thaw, 5711fd7a697STejun Heo .error_handler = inic_error_handler, 5721fd7a697STejun Heo .post_internal_cmd = inic_post_internal_cmd, 5731fd7a697STejun Heo .dev_config = inic_dev_config, 5741fd7a697STejun Heo 5751fd7a697STejun Heo .port_resume = inic_port_resume, 5761fd7a697STejun Heo 5771fd7a697STejun Heo .port_start = inic_port_start, 5781fd7a697STejun Heo }; 5791fd7a697STejun Heo 5801fd7a697STejun Heo static struct ata_port_info inic_port_info = { 5811fd7a697STejun Heo .sht = &inic_sht, 5821fd7a697STejun Heo /* For some reason, ATA_PROT_ATAPI is broken on this 5831fd7a697STejun Heo * controller, and no, PIO_POLLING does't fix it. It somehow 5841fd7a697STejun Heo * manages to report the wrong ireason and ignoring ireason 5851fd7a697STejun Heo * results in machine lock up. Tell libata to always prefer 5861fd7a697STejun Heo * DMA. 5871fd7a697STejun Heo */ 5881fd7a697STejun Heo .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA, 5891fd7a697STejun Heo .pio_mask = 0x1f, /* pio0-4 */ 5901fd7a697STejun Heo .mwdma_mask = 0x07, /* mwdma0-2 */ 5911fd7a697STejun Heo .udma_mask = 0x7f, /* udma0-6 */ 5921fd7a697STejun Heo .port_ops = &inic_port_ops 5931fd7a697STejun Heo }; 5941fd7a697STejun Heo 5951fd7a697STejun Heo static int init_controller(void __iomem *mmio_base, u16 hctl) 5961fd7a697STejun Heo { 5971fd7a697STejun Heo int i; 5981fd7a697STejun Heo u16 val; 5991fd7a697STejun Heo 6001fd7a697STejun Heo hctl &= ~HCTL_KNOWN_BITS; 6011fd7a697STejun Heo 6021fd7a697STejun Heo /* Soft reset whole controller. Spec says reset duration is 3 6031fd7a697STejun Heo * PCI clocks, be generous and give it 10ms. 6041fd7a697STejun Heo */ 6051fd7a697STejun Heo writew(hctl | HCTL_SOFTRST, mmio_base + HOST_CTL); 6061fd7a697STejun Heo readw(mmio_base + HOST_CTL); /* flush */ 6071fd7a697STejun Heo 6081fd7a697STejun Heo for (i = 0; i < 10; i++) { 6091fd7a697STejun Heo msleep(1); 6101fd7a697STejun Heo val = readw(mmio_base + HOST_CTL); 6111fd7a697STejun Heo if (!(val & HCTL_SOFTRST)) 6121fd7a697STejun Heo break; 6131fd7a697STejun Heo } 6141fd7a697STejun Heo 6151fd7a697STejun Heo if (val & HCTL_SOFTRST) 6161fd7a697STejun Heo return -EIO; 6171fd7a697STejun Heo 6181fd7a697STejun Heo /* mask all interrupts and reset ports */ 6191fd7a697STejun Heo for (i = 0; i < NR_PORTS; i++) { 6201fd7a697STejun Heo void __iomem *port_base = mmio_base + i * PORT_SIZE; 6211fd7a697STejun Heo 6221fd7a697STejun Heo writeb(0xff, port_base + PORT_IRQ_MASK); 6231fd7a697STejun Heo inic_reset_port(port_base); 6241fd7a697STejun Heo } 6251fd7a697STejun Heo 6261fd7a697STejun Heo /* port IRQ is masked now, unmask global IRQ */ 6271fd7a697STejun Heo writew(hctl & ~HCTL_IRQOFF, mmio_base + HOST_CTL); 6281fd7a697STejun Heo val = readw(mmio_base + HOST_IRQ_MASK); 6291fd7a697STejun Heo val &= ~(HIRQ_PORT0 | HIRQ_PORT1); 6301fd7a697STejun Heo writew(val, mmio_base + HOST_IRQ_MASK); 6311fd7a697STejun Heo 6321fd7a697STejun Heo return 0; 6331fd7a697STejun Heo } 6341fd7a697STejun Heo 6351fd7a697STejun Heo static int inic_pci_device_resume(struct pci_dev *pdev) 6361fd7a697STejun Heo { 6371fd7a697STejun Heo struct ata_host *host = dev_get_drvdata(&pdev->dev); 6381fd7a697STejun Heo struct inic_host_priv *hpriv = host->private_data; 6390d5ff566STejun Heo void __iomem *mmio_base = host->iomap[MMIO_BAR]; 6401fd7a697STejun Heo int rc; 6411fd7a697STejun Heo 6421fd7a697STejun Heo ata_pci_device_do_resume(pdev); 6431fd7a697STejun Heo 6441fd7a697STejun Heo if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { 6451fd7a697STejun Heo printk("XXX\n"); 6461fd7a697STejun Heo rc = init_controller(mmio_base, hpriv->cached_hctl); 6471fd7a697STejun Heo if (rc) 6481fd7a697STejun Heo return rc; 6491fd7a697STejun Heo } 6501fd7a697STejun Heo 6511fd7a697STejun Heo ata_host_resume(host); 6521fd7a697STejun Heo 6531fd7a697STejun Heo return 0; 6541fd7a697STejun Heo } 6551fd7a697STejun Heo 6561fd7a697STejun Heo static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 6571fd7a697STejun Heo { 6581fd7a697STejun Heo static int printed_version; 6591fd7a697STejun Heo struct ata_port_info *pinfo = &inic_port_info; 6601fd7a697STejun Heo struct ata_probe_ent *probe_ent; 6611fd7a697STejun Heo struct inic_host_priv *hpriv; 6620d5ff566STejun Heo void __iomem * const *iomap; 6631fd7a697STejun Heo int i, rc; 6641fd7a697STejun Heo 6651fd7a697STejun Heo if (!printed_version++) 6661fd7a697STejun Heo dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 6671fd7a697STejun Heo 66824dc5f33STejun Heo rc = pcim_enable_device(pdev); 6691fd7a697STejun Heo if (rc) 6701fd7a697STejun Heo return rc; 6711fd7a697STejun Heo 6721fd7a697STejun Heo rc = pci_request_regions(pdev, DRV_NAME); 6731fd7a697STejun Heo if (rc) 67424dc5f33STejun Heo return rc; 6751fd7a697STejun Heo 6760d5ff566STejun Heo rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME); 6770d5ff566STejun Heo if (rc) 6780d5ff566STejun Heo return rc; 6790d5ff566STejun Heo iomap = pcim_iomap_table(pdev); 6801fd7a697STejun Heo 6811fd7a697STejun Heo /* Set dma_mask. This devices doesn't support 64bit addressing. */ 6821fd7a697STejun Heo rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 6831fd7a697STejun Heo if (rc) { 6841fd7a697STejun Heo dev_printk(KERN_ERR, &pdev->dev, 6851fd7a697STejun Heo "32-bit DMA enable failed\n"); 68624dc5f33STejun Heo return rc; 6871fd7a697STejun Heo } 6881fd7a697STejun Heo 6891fd7a697STejun Heo rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 6901fd7a697STejun Heo if (rc) { 6911fd7a697STejun Heo dev_printk(KERN_ERR, &pdev->dev, 6921fd7a697STejun Heo "32-bit consistent DMA enable failed\n"); 69324dc5f33STejun Heo return rc; 6941fd7a697STejun Heo } 6951fd7a697STejun Heo 69624dc5f33STejun Heo probe_ent = devm_kzalloc(&pdev->dev, sizeof(*probe_ent), GFP_KERNEL); 69724dc5f33STejun Heo hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); 69824dc5f33STejun Heo if (!probe_ent || !hpriv) 69924dc5f33STejun Heo return -ENOMEM; 7001fd7a697STejun Heo 7011fd7a697STejun Heo probe_ent->dev = &pdev->dev; 7021fd7a697STejun Heo INIT_LIST_HEAD(&probe_ent->node); 7031fd7a697STejun Heo 7041fd7a697STejun Heo probe_ent->sht = pinfo->sht; 7051fd7a697STejun Heo probe_ent->port_flags = pinfo->flags; 7061fd7a697STejun Heo probe_ent->pio_mask = pinfo->pio_mask; 7071fd7a697STejun Heo probe_ent->mwdma_mask = pinfo->mwdma_mask; 7081fd7a697STejun Heo probe_ent->udma_mask = pinfo->udma_mask; 7091fd7a697STejun Heo probe_ent->port_ops = pinfo->port_ops; 7101fd7a697STejun Heo probe_ent->n_ports = NR_PORTS; 7111fd7a697STejun Heo 7121fd7a697STejun Heo probe_ent->irq = pdev->irq; 7131fd7a697STejun Heo probe_ent->irq_flags = SA_SHIRQ; 7141fd7a697STejun Heo 7150d5ff566STejun Heo probe_ent->iomap = iomap; 7161fd7a697STejun Heo 7171fd7a697STejun Heo for (i = 0; i < NR_PORTS; i++) { 7181fd7a697STejun Heo struct ata_ioports *port = &probe_ent->port[i]; 7190d5ff566STejun Heo void __iomem *port_base = iomap[MMIO_BAR] + i * PORT_SIZE; 7201fd7a697STejun Heo 7210d5ff566STejun Heo port->cmd_addr = iomap[2 * i]; 7221fd7a697STejun Heo port->altstatus_addr = 7230d5ff566STejun Heo port->ctl_addr = (void __iomem *) 7240d5ff566STejun Heo ((unsigned long)iomap[2 * i + 1] | ATA_PCI_CTL_OFS); 7251fd7a697STejun Heo port->scr_addr = port_base + PORT_SCR; 7261fd7a697STejun Heo 7271fd7a697STejun Heo ata_std_ports(port); 7281fd7a697STejun Heo } 7291fd7a697STejun Heo 7301fd7a697STejun Heo probe_ent->private_data = hpriv; 7310d5ff566STejun Heo hpriv->cached_hctl = readw(iomap[MMIO_BAR] + HOST_CTL); 7321fd7a697STejun Heo 7330d5ff566STejun Heo rc = init_controller(iomap[MMIO_BAR], hpriv->cached_hctl); 7341fd7a697STejun Heo if (rc) { 7351fd7a697STejun Heo dev_printk(KERN_ERR, &pdev->dev, 7361fd7a697STejun Heo "failed to initialize controller\n"); 73724dc5f33STejun Heo return rc; 7381fd7a697STejun Heo } 7391fd7a697STejun Heo 7401fd7a697STejun Heo pci_set_master(pdev); 7411fd7a697STejun Heo 7421fd7a697STejun Heo if (!ata_device_add(probe_ent)) 74324dc5f33STejun Heo return -ENODEV; 7441fd7a697STejun Heo 74524dc5f33STejun Heo devm_kfree(&pdev->dev, probe_ent); 7461fd7a697STejun Heo 7471fd7a697STejun Heo return 0; 7481fd7a697STejun Heo } 7491fd7a697STejun Heo 7501fd7a697STejun Heo static const struct pci_device_id inic_pci_tbl[] = { 7511fd7a697STejun Heo { PCI_VDEVICE(INIT, 0x1622), }, 7521fd7a697STejun Heo { }, 7531fd7a697STejun Heo }; 7541fd7a697STejun Heo 7551fd7a697STejun Heo static struct pci_driver inic_pci_driver = { 7561fd7a697STejun Heo .name = DRV_NAME, 7571fd7a697STejun Heo .id_table = inic_pci_tbl, 7581fd7a697STejun Heo .suspend = ata_pci_device_suspend, 7591fd7a697STejun Heo .resume = inic_pci_device_resume, 7601fd7a697STejun Heo .probe = inic_init_one, 7611fd7a697STejun Heo .remove = ata_pci_remove_one, 7621fd7a697STejun Heo }; 7631fd7a697STejun Heo 7641fd7a697STejun Heo static int __init inic_init(void) 7651fd7a697STejun Heo { 7661fd7a697STejun Heo return pci_register_driver(&inic_pci_driver); 7671fd7a697STejun Heo } 7681fd7a697STejun Heo 7691fd7a697STejun Heo static void __exit inic_exit(void) 7701fd7a697STejun Heo { 7711fd7a697STejun Heo pci_unregister_driver(&inic_pci_driver); 7721fd7a697STejun Heo } 7731fd7a697STejun Heo 7741fd7a697STejun Heo MODULE_AUTHOR("Tejun Heo"); 7751fd7a697STejun Heo MODULE_DESCRIPTION("low-level driver for Initio 162x SATA"); 7761fd7a697STejun Heo MODULE_LICENSE("GPL v2"); 7771fd7a697STejun Heo MODULE_DEVICE_TABLE(pci, inic_pci_tbl); 7781fd7a697STejun Heo MODULE_VERSION(DRV_VERSION); 7791fd7a697STejun Heo 7801fd7a697STejun Heo module_init(inic_init); 7811fd7a697STejun Heo module_exit(inic_exit); 782