11fd7a697STejun Heo /* 21fd7a697STejun Heo * sata_inic162x.c - Driver for Initio 162x SATA controllers 31fd7a697STejun Heo * 41fd7a697STejun Heo * Copyright 2006 SUSE Linux Products GmbH 51fd7a697STejun Heo * Copyright 2006 Tejun Heo <teheo@novell.com> 61fd7a697STejun Heo * 71fd7a697STejun Heo * This file is released under GPL v2. 81fd7a697STejun Heo * 91fd7a697STejun Heo * This controller is eccentric and easily locks up if something isn't 101fd7a697STejun Heo * right. Documentation is available at initio's website but it only 111fd7a697STejun Heo * documents registers (not programming model). 121fd7a697STejun Heo * 131fd7a697STejun Heo * - ATA disks work. 141fd7a697STejun Heo * - Hotplug works. 151fd7a697STejun Heo * - ATAPI read works but burning doesn't. This thing is really 161fd7a697STejun Heo * peculiar about ATAPI and I couldn't figure out how ATAPI PIO and 171fd7a697STejun Heo * ATAPI DMA WRITE should be programmed. If you've got a clue, be 181fd7a697STejun Heo * my guest. 191fd7a697STejun Heo * - Both STR and STD work. 201fd7a697STejun Heo */ 211fd7a697STejun Heo 221fd7a697STejun Heo #include <linux/kernel.h> 231fd7a697STejun Heo #include <linux/module.h> 241fd7a697STejun Heo #include <linux/pci.h> 251fd7a697STejun Heo #include <scsi/scsi_host.h> 261fd7a697STejun Heo #include <linux/libata.h> 271fd7a697STejun Heo #include <linux/blkdev.h> 281fd7a697STejun Heo #include <scsi/scsi_device.h> 291fd7a697STejun Heo 301fd7a697STejun Heo #define DRV_NAME "sata_inic162x" 311fd7a697STejun Heo #define DRV_VERSION "0.1" 321fd7a697STejun Heo 331fd7a697STejun Heo enum { 341fd7a697STejun Heo MMIO_BAR = 5, 351fd7a697STejun Heo 361fd7a697STejun Heo NR_PORTS = 2, 371fd7a697STejun Heo 381fd7a697STejun Heo HOST_CTL = 0x7c, 391fd7a697STejun Heo HOST_STAT = 0x7e, 401fd7a697STejun Heo HOST_IRQ_STAT = 0xbc, 411fd7a697STejun Heo HOST_IRQ_MASK = 0xbe, 421fd7a697STejun Heo 431fd7a697STejun Heo PORT_SIZE = 0x40, 441fd7a697STejun Heo 451fd7a697STejun Heo /* registers for ATA TF operation */ 461fd7a697STejun Heo PORT_TF = 0x00, 471fd7a697STejun Heo PORT_ALT_STAT = 0x08, 481fd7a697STejun Heo PORT_IRQ_STAT = 0x09, 491fd7a697STejun Heo PORT_IRQ_MASK = 0x0a, 501fd7a697STejun Heo PORT_PRD_CTL = 0x0b, 511fd7a697STejun Heo PORT_PRD_ADDR = 0x0c, 521fd7a697STejun Heo PORT_PRD_XFERLEN = 0x10, 531fd7a697STejun Heo 541fd7a697STejun Heo /* IDMA register */ 551fd7a697STejun Heo PORT_IDMA_CTL = 0x14, 561fd7a697STejun Heo 571fd7a697STejun Heo PORT_SCR = 0x20, 581fd7a697STejun Heo 591fd7a697STejun Heo /* HOST_CTL bits */ 601fd7a697STejun Heo HCTL_IRQOFF = (1 << 8), /* global IRQ off */ 611fd7a697STejun Heo HCTL_PWRDWN = (1 << 13), /* power down PHYs */ 621fd7a697STejun Heo HCTL_SOFTRST = (1 << 13), /* global reset (no phy reset) */ 631fd7a697STejun Heo HCTL_RPGSEL = (1 << 15), /* register page select */ 641fd7a697STejun Heo 651fd7a697STejun Heo HCTL_KNOWN_BITS = HCTL_IRQOFF | HCTL_PWRDWN | HCTL_SOFTRST | 661fd7a697STejun Heo HCTL_RPGSEL, 671fd7a697STejun Heo 681fd7a697STejun Heo /* HOST_IRQ_(STAT|MASK) bits */ 691fd7a697STejun Heo HIRQ_PORT0 = (1 << 0), 701fd7a697STejun Heo HIRQ_PORT1 = (1 << 1), 711fd7a697STejun Heo HIRQ_SOFT = (1 << 14), 721fd7a697STejun Heo HIRQ_GLOBAL = (1 << 15), /* STAT only */ 731fd7a697STejun Heo 741fd7a697STejun Heo /* PORT_IRQ_(STAT|MASK) bits */ 751fd7a697STejun Heo PIRQ_OFFLINE = (1 << 0), /* device unplugged */ 761fd7a697STejun Heo PIRQ_ONLINE = (1 << 1), /* device plugged */ 771fd7a697STejun Heo PIRQ_COMPLETE = (1 << 2), /* completion interrupt */ 781fd7a697STejun Heo PIRQ_FATAL = (1 << 3), /* fatal error */ 791fd7a697STejun Heo PIRQ_ATA = (1 << 4), /* ATA interrupt */ 801fd7a697STejun Heo PIRQ_REPLY = (1 << 5), /* reply FIFO not empty */ 811fd7a697STejun Heo PIRQ_PENDING = (1 << 7), /* port IRQ pending (STAT only) */ 821fd7a697STejun Heo 831fd7a697STejun Heo PIRQ_ERR = PIRQ_OFFLINE | PIRQ_ONLINE | PIRQ_FATAL, 841fd7a697STejun Heo 851fd7a697STejun Heo PIRQ_MASK_DMA_READ = PIRQ_REPLY | PIRQ_ATA, 861fd7a697STejun Heo PIRQ_MASK_OTHER = PIRQ_REPLY | PIRQ_COMPLETE, 871fd7a697STejun Heo PIRQ_MASK_FREEZE = 0xff, 881fd7a697STejun Heo 891fd7a697STejun Heo /* PORT_PRD_CTL bits */ 901fd7a697STejun Heo PRD_CTL_START = (1 << 0), 911fd7a697STejun Heo PRD_CTL_WR = (1 << 3), 921fd7a697STejun Heo PRD_CTL_DMAEN = (1 << 7), /* DMA enable */ 931fd7a697STejun Heo 941fd7a697STejun Heo /* PORT_IDMA_CTL bits */ 951fd7a697STejun Heo IDMA_CTL_RST_ATA = (1 << 2), /* hardreset ATA bus */ 961fd7a697STejun Heo IDMA_CTL_RST_IDMA = (1 << 5), /* reset IDMA machinary */ 971fd7a697STejun Heo IDMA_CTL_GO = (1 << 7), /* IDMA mode go */ 981fd7a697STejun Heo IDMA_CTL_ATA_NIEN = (1 << 8), /* ATA IRQ disable */ 991fd7a697STejun Heo }; 1001fd7a697STejun Heo 1011fd7a697STejun Heo struct inic_host_priv { 1021fd7a697STejun Heo u16 cached_hctl; 1031fd7a697STejun Heo }; 1041fd7a697STejun Heo 1051fd7a697STejun Heo struct inic_port_priv { 1061fd7a697STejun Heo u8 dfl_prdctl; 1071fd7a697STejun Heo u8 cached_prdctl; 1081fd7a697STejun Heo u8 cached_pirq_mask; 1091fd7a697STejun Heo }; 1101fd7a697STejun Heo 1111fd7a697STejun Heo static int inic_slave_config(struct scsi_device *sdev) 1121fd7a697STejun Heo { 1131fd7a697STejun Heo /* This controller is braindamaged. dma_boundary is 0xffff 1141fd7a697STejun Heo * like others but it will lock up the whole machine HARD if 1151fd7a697STejun Heo * 65536 byte PRD entry is fed. Reduce maximum segment size. 1161fd7a697STejun Heo */ 1171fd7a697STejun Heo blk_queue_max_segment_size(sdev->request_queue, 65536 - 512); 1181fd7a697STejun Heo 1191fd7a697STejun Heo return ata_scsi_slave_config(sdev); 1201fd7a697STejun Heo } 1211fd7a697STejun Heo 1221fd7a697STejun Heo static struct scsi_host_template inic_sht = { 1231fd7a697STejun Heo .module = THIS_MODULE, 1241fd7a697STejun Heo .name = DRV_NAME, 1251fd7a697STejun Heo .ioctl = ata_scsi_ioctl, 1261fd7a697STejun Heo .queuecommand = ata_scsi_queuecmd, 1271fd7a697STejun Heo .can_queue = ATA_DEF_QUEUE, 1281fd7a697STejun Heo .this_id = ATA_SHT_THIS_ID, 1291fd7a697STejun Heo .sg_tablesize = LIBATA_MAX_PRD, 1301fd7a697STejun Heo .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 1311fd7a697STejun Heo .emulated = ATA_SHT_EMULATED, 1321fd7a697STejun Heo .use_clustering = ATA_SHT_USE_CLUSTERING, 1331fd7a697STejun Heo .proc_name = DRV_NAME, 1341fd7a697STejun Heo .dma_boundary = ATA_DMA_BOUNDARY, 1351fd7a697STejun Heo .slave_configure = inic_slave_config, 1361fd7a697STejun Heo .slave_destroy = ata_scsi_slave_destroy, 1371fd7a697STejun Heo .bios_param = ata_std_bios_param, 138438ac6d5STejun Heo #ifdef CONFIG_PM 1391fd7a697STejun Heo .suspend = ata_scsi_device_suspend, 1401fd7a697STejun Heo .resume = ata_scsi_device_resume, 141438ac6d5STejun Heo #endif 1421fd7a697STejun Heo }; 1431fd7a697STejun Heo 1441fd7a697STejun Heo static const int scr_map[] = { 1451fd7a697STejun Heo [SCR_STATUS] = 0, 1461fd7a697STejun Heo [SCR_ERROR] = 1, 1471fd7a697STejun Heo [SCR_CONTROL] = 2, 1481fd7a697STejun Heo }; 1491fd7a697STejun Heo 1501fd7a697STejun Heo static void __iomem * inic_port_base(struct ata_port *ap) 1511fd7a697STejun Heo { 1520d5ff566STejun Heo return ap->host->iomap[MMIO_BAR] + ap->port_no * PORT_SIZE; 1531fd7a697STejun Heo } 1541fd7a697STejun Heo 1551fd7a697STejun Heo static void __inic_set_pirq_mask(struct ata_port *ap, u8 mask) 1561fd7a697STejun Heo { 1571fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 1581fd7a697STejun Heo struct inic_port_priv *pp = ap->private_data; 1591fd7a697STejun Heo 1601fd7a697STejun Heo writeb(mask, port_base + PORT_IRQ_MASK); 1611fd7a697STejun Heo pp->cached_pirq_mask = mask; 1621fd7a697STejun Heo } 1631fd7a697STejun Heo 1641fd7a697STejun Heo static void inic_set_pirq_mask(struct ata_port *ap, u8 mask) 1651fd7a697STejun Heo { 1661fd7a697STejun Heo struct inic_port_priv *pp = ap->private_data; 1671fd7a697STejun Heo 1681fd7a697STejun Heo if (pp->cached_pirq_mask != mask) 1691fd7a697STejun Heo __inic_set_pirq_mask(ap, mask); 1701fd7a697STejun Heo } 1711fd7a697STejun Heo 1721fd7a697STejun Heo static void inic_reset_port(void __iomem *port_base) 1731fd7a697STejun Heo { 1741fd7a697STejun Heo void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; 1751fd7a697STejun Heo u16 ctl; 1761fd7a697STejun Heo 1771fd7a697STejun Heo ctl = readw(idma_ctl); 1781fd7a697STejun Heo ctl &= ~(IDMA_CTL_RST_IDMA | IDMA_CTL_ATA_NIEN | IDMA_CTL_GO); 1791fd7a697STejun Heo 1801fd7a697STejun Heo /* mask IRQ and assert reset */ 1811fd7a697STejun Heo writew(ctl | IDMA_CTL_RST_IDMA | IDMA_CTL_ATA_NIEN, idma_ctl); 1821fd7a697STejun Heo readw(idma_ctl); /* flush */ 1831fd7a697STejun Heo 1841fd7a697STejun Heo /* give it some time */ 1851fd7a697STejun Heo msleep(1); 1861fd7a697STejun Heo 1871fd7a697STejun Heo /* release reset */ 1881fd7a697STejun Heo writew(ctl | IDMA_CTL_ATA_NIEN, idma_ctl); 1891fd7a697STejun Heo 1901fd7a697STejun Heo /* clear irq */ 1911fd7a697STejun Heo writeb(0xff, port_base + PORT_IRQ_STAT); 1921fd7a697STejun Heo 1931fd7a697STejun Heo /* reenable ATA IRQ, turn off IDMA mode */ 1941fd7a697STejun Heo writew(ctl, idma_ctl); 1951fd7a697STejun Heo } 1961fd7a697STejun Heo 1971fd7a697STejun Heo static u32 inic_scr_read(struct ata_port *ap, unsigned sc_reg) 1981fd7a697STejun Heo { 1991fd7a697STejun Heo void __iomem *scr_addr = (void __iomem *)ap->ioaddr.scr_addr; 2001fd7a697STejun Heo void __iomem *addr; 2011fd7a697STejun Heo u32 val; 2021fd7a697STejun Heo 2031fd7a697STejun Heo if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) 2041fd7a697STejun Heo return 0xffffffffU; 2051fd7a697STejun Heo 2061fd7a697STejun Heo addr = scr_addr + scr_map[sc_reg] * 4; 2071fd7a697STejun Heo val = readl(scr_addr + scr_map[sc_reg] * 4); 2081fd7a697STejun Heo 2091fd7a697STejun Heo /* this controller has stuck DIAG.N, ignore it */ 2101fd7a697STejun Heo if (sc_reg == SCR_ERROR) 2111fd7a697STejun Heo val &= ~SERR_PHYRDY_CHG; 2121fd7a697STejun Heo return val; 2131fd7a697STejun Heo } 2141fd7a697STejun Heo 2151fd7a697STejun Heo static void inic_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val) 2161fd7a697STejun Heo { 2171fd7a697STejun Heo void __iomem *scr_addr = (void __iomem *)ap->ioaddr.scr_addr; 2181fd7a697STejun Heo void __iomem *addr; 2191fd7a697STejun Heo 2201fd7a697STejun Heo if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) 2211fd7a697STejun Heo return; 2221fd7a697STejun Heo 2231fd7a697STejun Heo addr = scr_addr + scr_map[sc_reg] * 4; 2241fd7a697STejun Heo writel(val, scr_addr + scr_map[sc_reg] * 4); 2251fd7a697STejun Heo } 2261fd7a697STejun Heo 2271fd7a697STejun Heo /* 2281fd7a697STejun Heo * In TF mode, inic162x is very similar to SFF device. TF registers 2291fd7a697STejun Heo * function the same. DMA engine behaves similary using the same PRD 2301fd7a697STejun Heo * format as BMDMA but different command register, interrupt and event 2311fd7a697STejun Heo * notification methods are used. The following inic_bmdma_*() 2321fd7a697STejun Heo * functions do the impedance matching. 2331fd7a697STejun Heo */ 2341fd7a697STejun Heo static void inic_bmdma_setup(struct ata_queued_cmd *qc) 2351fd7a697STejun Heo { 2361fd7a697STejun Heo struct ata_port *ap = qc->ap; 2371fd7a697STejun Heo struct inic_port_priv *pp = ap->private_data; 2381fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 2391fd7a697STejun Heo int rw = qc->tf.flags & ATA_TFLAG_WRITE; 2401fd7a697STejun Heo 2411fd7a697STejun Heo /* make sure device sees PRD table writes */ 2421fd7a697STejun Heo wmb(); 2431fd7a697STejun Heo 2441fd7a697STejun Heo /* load transfer length */ 2451fd7a697STejun Heo writel(qc->nbytes, port_base + PORT_PRD_XFERLEN); 2461fd7a697STejun Heo 2471fd7a697STejun Heo /* turn on DMA and specify data direction */ 2481fd7a697STejun Heo pp->cached_prdctl = pp->dfl_prdctl | PRD_CTL_DMAEN; 2491fd7a697STejun Heo if (!rw) 2501fd7a697STejun Heo pp->cached_prdctl |= PRD_CTL_WR; 2511fd7a697STejun Heo writeb(pp->cached_prdctl, port_base + PORT_PRD_CTL); 2521fd7a697STejun Heo 2531fd7a697STejun Heo /* issue r/w command */ 2541fd7a697STejun Heo ap->ops->exec_command(ap, &qc->tf); 2551fd7a697STejun Heo } 2561fd7a697STejun Heo 2571fd7a697STejun Heo static void inic_bmdma_start(struct ata_queued_cmd *qc) 2581fd7a697STejun Heo { 2591fd7a697STejun Heo struct ata_port *ap = qc->ap; 2601fd7a697STejun Heo struct inic_port_priv *pp = ap->private_data; 2611fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 2621fd7a697STejun Heo 2631fd7a697STejun Heo /* start host DMA transaction */ 2641fd7a697STejun Heo pp->cached_prdctl |= PRD_CTL_START; 2651fd7a697STejun Heo writeb(pp->cached_prdctl, port_base + PORT_PRD_CTL); 2661fd7a697STejun Heo } 2671fd7a697STejun Heo 2681fd7a697STejun Heo static void inic_bmdma_stop(struct ata_queued_cmd *qc) 2691fd7a697STejun Heo { 2701fd7a697STejun Heo struct ata_port *ap = qc->ap; 2711fd7a697STejun Heo struct inic_port_priv *pp = ap->private_data; 2721fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 2731fd7a697STejun Heo 2741fd7a697STejun Heo /* stop DMA engine */ 2751fd7a697STejun Heo writeb(pp->dfl_prdctl, port_base + PORT_PRD_CTL); 2761fd7a697STejun Heo } 2771fd7a697STejun Heo 2781fd7a697STejun Heo static u8 inic_bmdma_status(struct ata_port *ap) 2791fd7a697STejun Heo { 2801fd7a697STejun Heo /* event is already verified by the interrupt handler */ 2811fd7a697STejun Heo return ATA_DMA_INTR; 2821fd7a697STejun Heo } 2831fd7a697STejun Heo 2841fd7a697STejun Heo static void inic_irq_clear(struct ata_port *ap) 2851fd7a697STejun Heo { 2861fd7a697STejun Heo /* noop */ 2871fd7a697STejun Heo } 2881fd7a697STejun Heo 2891fd7a697STejun Heo static void inic_host_intr(struct ata_port *ap) 2901fd7a697STejun Heo { 2911fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 2921fd7a697STejun Heo struct ata_eh_info *ehi = &ap->eh_info; 2931fd7a697STejun Heo u8 irq_stat; 2941fd7a697STejun Heo 2951fd7a697STejun Heo /* fetch and clear irq */ 2961fd7a697STejun Heo irq_stat = readb(port_base + PORT_IRQ_STAT); 2971fd7a697STejun Heo writeb(irq_stat, port_base + PORT_IRQ_STAT); 2981fd7a697STejun Heo 2991fd7a697STejun Heo if (likely(!(irq_stat & PIRQ_ERR))) { 3001fd7a697STejun Heo struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag); 3011fd7a697STejun Heo 3021fd7a697STejun Heo if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { 3031fd7a697STejun Heo ata_chk_status(ap); /* clear ATA interrupt */ 3041fd7a697STejun Heo return; 3051fd7a697STejun Heo } 3061fd7a697STejun Heo 3071fd7a697STejun Heo if (likely(ata_host_intr(ap, qc))) 3081fd7a697STejun Heo return; 3091fd7a697STejun Heo 3101fd7a697STejun Heo ata_chk_status(ap); /* clear ATA interrupt */ 3111fd7a697STejun Heo ata_port_printk(ap, KERN_WARNING, "unhandled " 3121fd7a697STejun Heo "interrupt, irq_stat=%x\n", irq_stat); 3131fd7a697STejun Heo return; 3141fd7a697STejun Heo } 3151fd7a697STejun Heo 3161fd7a697STejun Heo /* error */ 3171fd7a697STejun Heo ata_ehi_push_desc(ehi, "irq_stat=0x%x", irq_stat); 3181fd7a697STejun Heo 3191fd7a697STejun Heo if (irq_stat & (PIRQ_OFFLINE | PIRQ_ONLINE)) { 3201fd7a697STejun Heo ata_ehi_hotplugged(ehi); 3211fd7a697STejun Heo ata_port_freeze(ap); 3221fd7a697STejun Heo } else 3231fd7a697STejun Heo ata_port_abort(ap); 3241fd7a697STejun Heo } 3251fd7a697STejun Heo 3261fd7a697STejun Heo static irqreturn_t inic_interrupt(int irq, void *dev_instance) 3271fd7a697STejun Heo { 3281fd7a697STejun Heo struct ata_host *host = dev_instance; 3290d5ff566STejun Heo void __iomem *mmio_base = host->iomap[MMIO_BAR]; 3301fd7a697STejun Heo u16 host_irq_stat; 3311fd7a697STejun Heo int i, handled = 0;; 3321fd7a697STejun Heo 3331fd7a697STejun Heo host_irq_stat = readw(mmio_base + HOST_IRQ_STAT); 3341fd7a697STejun Heo 3351fd7a697STejun Heo if (unlikely(!(host_irq_stat & HIRQ_GLOBAL))) 3361fd7a697STejun Heo goto out; 3371fd7a697STejun Heo 3381fd7a697STejun Heo spin_lock(&host->lock); 3391fd7a697STejun Heo 3401fd7a697STejun Heo for (i = 0; i < NR_PORTS; i++) { 3411fd7a697STejun Heo struct ata_port *ap = host->ports[i]; 3421fd7a697STejun Heo 3431fd7a697STejun Heo if (!(host_irq_stat & (HIRQ_PORT0 << i))) 3441fd7a697STejun Heo continue; 3451fd7a697STejun Heo 3461fd7a697STejun Heo if (likely(ap && !(ap->flags & ATA_FLAG_DISABLED))) { 3471fd7a697STejun Heo inic_host_intr(ap); 3481fd7a697STejun Heo handled++; 3491fd7a697STejun Heo } else { 3501fd7a697STejun Heo if (ata_ratelimit()) 3511fd7a697STejun Heo dev_printk(KERN_ERR, host->dev, "interrupt " 3521fd7a697STejun Heo "from disabled port %d (0x%x)\n", 3531fd7a697STejun Heo i, host_irq_stat); 3541fd7a697STejun Heo } 3551fd7a697STejun Heo } 3561fd7a697STejun Heo 3571fd7a697STejun Heo spin_unlock(&host->lock); 3581fd7a697STejun Heo 3591fd7a697STejun Heo out: 3601fd7a697STejun Heo return IRQ_RETVAL(handled); 3611fd7a697STejun Heo } 3621fd7a697STejun Heo 3631fd7a697STejun Heo static unsigned int inic_qc_issue(struct ata_queued_cmd *qc) 3641fd7a697STejun Heo { 3651fd7a697STejun Heo struct ata_port *ap = qc->ap; 3661fd7a697STejun Heo 3671fd7a697STejun Heo /* ATA IRQ doesn't wait for DMA transfer completion and vice 3681fd7a697STejun Heo * versa. Mask IRQ selectively to detect command completion. 3691fd7a697STejun Heo * Without it, ATA DMA read command can cause data corruption. 3701fd7a697STejun Heo * 3711fd7a697STejun Heo * Something similar might be needed for ATAPI writes. I 3721fd7a697STejun Heo * tried a lot of combinations but couldn't find the solution. 3731fd7a697STejun Heo */ 3741fd7a697STejun Heo if (qc->tf.protocol == ATA_PROT_DMA && 3751fd7a697STejun Heo !(qc->tf.flags & ATA_TFLAG_WRITE)) 3761fd7a697STejun Heo inic_set_pirq_mask(ap, PIRQ_MASK_DMA_READ); 3771fd7a697STejun Heo else 3781fd7a697STejun Heo inic_set_pirq_mask(ap, PIRQ_MASK_OTHER); 3791fd7a697STejun Heo 3801fd7a697STejun Heo /* Issuing a command to yet uninitialized port locks up the 3811fd7a697STejun Heo * controller. Most of the time, this happens for the first 3821fd7a697STejun Heo * command after reset which are ATA and ATAPI IDENTIFYs. 3831fd7a697STejun Heo * Fast fail if stat is 0x7f or 0xff for those commands. 3841fd7a697STejun Heo */ 3851fd7a697STejun Heo if (unlikely(qc->tf.command == ATA_CMD_ID_ATA || 3861fd7a697STejun Heo qc->tf.command == ATA_CMD_ID_ATAPI)) { 3871fd7a697STejun Heo u8 stat = ata_chk_status(ap); 3881fd7a697STejun Heo if (stat == 0x7f || stat == 0xff) 3891fd7a697STejun Heo return AC_ERR_HSM; 3901fd7a697STejun Heo } 3911fd7a697STejun Heo 3921fd7a697STejun Heo return ata_qc_issue_prot(qc); 3931fd7a697STejun Heo } 3941fd7a697STejun Heo 3951fd7a697STejun Heo static void inic_freeze(struct ata_port *ap) 3961fd7a697STejun Heo { 3971fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 3981fd7a697STejun Heo 3991fd7a697STejun Heo __inic_set_pirq_mask(ap, PIRQ_MASK_FREEZE); 4001fd7a697STejun Heo 4011fd7a697STejun Heo ata_chk_status(ap); 4021fd7a697STejun Heo writeb(0xff, port_base + PORT_IRQ_STAT); 4031fd7a697STejun Heo 4041fd7a697STejun Heo readb(port_base + PORT_IRQ_STAT); /* flush */ 4051fd7a697STejun Heo } 4061fd7a697STejun Heo 4071fd7a697STejun Heo static void inic_thaw(struct ata_port *ap) 4081fd7a697STejun Heo { 4091fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 4101fd7a697STejun Heo 4111fd7a697STejun Heo ata_chk_status(ap); 4121fd7a697STejun Heo writeb(0xff, port_base + PORT_IRQ_STAT); 4131fd7a697STejun Heo 4141fd7a697STejun Heo __inic_set_pirq_mask(ap, PIRQ_MASK_OTHER); 4151fd7a697STejun Heo 4161fd7a697STejun Heo readb(port_base + PORT_IRQ_STAT); /* flush */ 4171fd7a697STejun Heo } 4181fd7a697STejun Heo 4191fd7a697STejun Heo /* 4201fd7a697STejun Heo * SRST and SControl hardreset don't give valid signature on this 4211fd7a697STejun Heo * controller. Only controller specific hardreset mechanism works. 4221fd7a697STejun Heo */ 423*d4b2bab4STejun Heo static int inic_hardreset(struct ata_port *ap, unsigned int *class, 424*d4b2bab4STejun Heo unsigned long deadline) 4251fd7a697STejun Heo { 4261fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 4271fd7a697STejun Heo void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; 4281fd7a697STejun Heo const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context); 4291fd7a697STejun Heo u16 val; 4301fd7a697STejun Heo int rc; 4311fd7a697STejun Heo 4321fd7a697STejun Heo /* hammer it into sane state */ 4331fd7a697STejun Heo inic_reset_port(port_base); 4341fd7a697STejun Heo 4351fd7a697STejun Heo val = readw(idma_ctl); 4361fd7a697STejun Heo writew(val | IDMA_CTL_RST_ATA, idma_ctl); 4371fd7a697STejun Heo readw(idma_ctl); /* flush */ 4381fd7a697STejun Heo msleep(1); 4391fd7a697STejun Heo writew(val & ~IDMA_CTL_RST_ATA, idma_ctl); 4401fd7a697STejun Heo 441*d4b2bab4STejun Heo rc = sata_phy_resume(ap, timing, deadline); 4421fd7a697STejun Heo if (rc) { 4431fd7a697STejun Heo ata_port_printk(ap, KERN_WARNING, "failed to resume " 444fe334602STejun Heo "link after reset (errno=%d)\n", rc); 4451fd7a697STejun Heo return rc; 4461fd7a697STejun Heo } 4471fd7a697STejun Heo 4481fd7a697STejun Heo *class = ATA_DEV_NONE; 4491fd7a697STejun Heo if (ata_port_online(ap)) { 4501fd7a697STejun Heo struct ata_taskfile tf; 4511fd7a697STejun Heo 452fe334602STejun Heo /* wait a while before checking status */ 453fe334602STejun Heo msleep(150); 454fe334602STejun Heo 455*d4b2bab4STejun Heo rc = ata_wait_ready(ap, deadline); 456*d4b2bab4STejun Heo if (rc && rc != -ENODEV) { 457*d4b2bab4STejun Heo ata_port_printk(ap, KERN_WARNING, "device not ready " 458*d4b2bab4STejun Heo "after hardreset (errno=%d)\n", rc); 459*d4b2bab4STejun Heo return rc; 4601fd7a697STejun Heo } 4611fd7a697STejun Heo 4621fd7a697STejun Heo ata_tf_read(ap, &tf); 4631fd7a697STejun Heo *class = ata_dev_classify(&tf); 4641fd7a697STejun Heo if (*class == ATA_DEV_UNKNOWN) 4651fd7a697STejun Heo *class = ATA_DEV_NONE; 4661fd7a697STejun Heo } 4671fd7a697STejun Heo 4681fd7a697STejun Heo return 0; 4691fd7a697STejun Heo } 4701fd7a697STejun Heo 4711fd7a697STejun Heo static void inic_error_handler(struct ata_port *ap) 4721fd7a697STejun Heo { 4731fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 4741fd7a697STejun Heo struct inic_port_priv *pp = ap->private_data; 4751fd7a697STejun Heo unsigned long flags; 4761fd7a697STejun Heo 4771fd7a697STejun Heo /* reset PIO HSM and stop DMA engine */ 4781fd7a697STejun Heo inic_reset_port(port_base); 4791fd7a697STejun Heo 4801fd7a697STejun Heo spin_lock_irqsave(ap->lock, flags); 4811fd7a697STejun Heo ap->hsm_task_state = HSM_ST_IDLE; 4821fd7a697STejun Heo writeb(pp->dfl_prdctl, port_base + PORT_PRD_CTL); 4831fd7a697STejun Heo spin_unlock_irqrestore(ap->lock, flags); 4841fd7a697STejun Heo 4851fd7a697STejun Heo /* PIO and DMA engines have been stopped, perform recovery */ 4861fd7a697STejun Heo ata_do_eh(ap, ata_std_prereset, NULL, inic_hardreset, 4871fd7a697STejun Heo ata_std_postreset); 4881fd7a697STejun Heo } 4891fd7a697STejun Heo 4901fd7a697STejun Heo static void inic_post_internal_cmd(struct ata_queued_cmd *qc) 4911fd7a697STejun Heo { 4921fd7a697STejun Heo /* make DMA engine forget about the failed command */ 493a51d644aSTejun Heo if (qc->flags & ATA_QCFLAG_FAILED) 4941fd7a697STejun Heo inic_reset_port(inic_port_base(qc->ap)); 4951fd7a697STejun Heo } 4961fd7a697STejun Heo 497cd0d3bbcSAlan static void inic_dev_config(struct ata_device *dev) 4981fd7a697STejun Heo { 4991fd7a697STejun Heo /* inic can only handle upto LBA28 max sectors */ 5001fd7a697STejun Heo if (dev->max_sectors > ATA_MAX_SECTORS) 5011fd7a697STejun Heo dev->max_sectors = ATA_MAX_SECTORS; 5021fd7a697STejun Heo } 5031fd7a697STejun Heo 5041fd7a697STejun Heo static void init_port(struct ata_port *ap) 5051fd7a697STejun Heo { 5061fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 5071fd7a697STejun Heo 5081fd7a697STejun Heo /* Setup PRD address */ 5091fd7a697STejun Heo writel(ap->prd_dma, port_base + PORT_PRD_ADDR); 5101fd7a697STejun Heo } 5111fd7a697STejun Heo 5121fd7a697STejun Heo static int inic_port_resume(struct ata_port *ap) 5131fd7a697STejun Heo { 5141fd7a697STejun Heo init_port(ap); 5151fd7a697STejun Heo return 0; 5161fd7a697STejun Heo } 5171fd7a697STejun Heo 5181fd7a697STejun Heo static int inic_port_start(struct ata_port *ap) 5191fd7a697STejun Heo { 5201fd7a697STejun Heo void __iomem *port_base = inic_port_base(ap); 5211fd7a697STejun Heo struct inic_port_priv *pp; 5221fd7a697STejun Heo u8 tmp; 5231fd7a697STejun Heo int rc; 5241fd7a697STejun Heo 5251fd7a697STejun Heo /* alloc and initialize private data */ 52624dc5f33STejun Heo pp = devm_kzalloc(ap->host->dev, sizeof(*pp), GFP_KERNEL); 5271fd7a697STejun Heo if (!pp) 5281fd7a697STejun Heo return -ENOMEM; 5291fd7a697STejun Heo ap->private_data = pp; 5301fd7a697STejun Heo 5311fd7a697STejun Heo /* default PRD_CTL value, DMAEN, WR and START off */ 5321fd7a697STejun Heo tmp = readb(port_base + PORT_PRD_CTL); 5331fd7a697STejun Heo tmp &= ~(PRD_CTL_DMAEN | PRD_CTL_WR | PRD_CTL_START); 5341fd7a697STejun Heo pp->dfl_prdctl = tmp; 5351fd7a697STejun Heo 5361fd7a697STejun Heo /* Alloc resources */ 5371fd7a697STejun Heo rc = ata_port_start(ap); 5381fd7a697STejun Heo if (rc) { 5391fd7a697STejun Heo kfree(pp); 5401fd7a697STejun Heo return rc; 5411fd7a697STejun Heo } 5421fd7a697STejun Heo 5431fd7a697STejun Heo init_port(ap); 5441fd7a697STejun Heo 5451fd7a697STejun Heo return 0; 5461fd7a697STejun Heo } 5471fd7a697STejun Heo 5481fd7a697STejun Heo static struct ata_port_operations inic_port_ops = { 5491fd7a697STejun Heo .port_disable = ata_port_disable, 5501fd7a697STejun Heo .tf_load = ata_tf_load, 5511fd7a697STejun Heo .tf_read = ata_tf_read, 5521fd7a697STejun Heo .check_status = ata_check_status, 5531fd7a697STejun Heo .exec_command = ata_exec_command, 5541fd7a697STejun Heo .dev_select = ata_std_dev_select, 5551fd7a697STejun Heo 5561fd7a697STejun Heo .scr_read = inic_scr_read, 5571fd7a697STejun Heo .scr_write = inic_scr_write, 5581fd7a697STejun Heo 5591fd7a697STejun Heo .bmdma_setup = inic_bmdma_setup, 5601fd7a697STejun Heo .bmdma_start = inic_bmdma_start, 5611fd7a697STejun Heo .bmdma_stop = inic_bmdma_stop, 5621fd7a697STejun Heo .bmdma_status = inic_bmdma_status, 5631fd7a697STejun Heo 5641fd7a697STejun Heo .irq_clear = inic_irq_clear, 565246ce3b6SAkira Iguchi .irq_on = ata_irq_on, 566246ce3b6SAkira Iguchi .irq_ack = ata_irq_ack, 5671fd7a697STejun Heo 5681fd7a697STejun Heo .qc_prep = ata_qc_prep, 5691fd7a697STejun Heo .qc_issue = inic_qc_issue, 5700d5ff566STejun Heo .data_xfer = ata_data_xfer, 5711fd7a697STejun Heo 5721fd7a697STejun Heo .freeze = inic_freeze, 5731fd7a697STejun Heo .thaw = inic_thaw, 5741fd7a697STejun Heo .error_handler = inic_error_handler, 5751fd7a697STejun Heo .post_internal_cmd = inic_post_internal_cmd, 5761fd7a697STejun Heo .dev_config = inic_dev_config, 5771fd7a697STejun Heo 5781fd7a697STejun Heo .port_resume = inic_port_resume, 5791fd7a697STejun Heo 5801fd7a697STejun Heo .port_start = inic_port_start, 5811fd7a697STejun Heo }; 5821fd7a697STejun Heo 5831fd7a697STejun Heo static struct ata_port_info inic_port_info = { 5841fd7a697STejun Heo /* For some reason, ATA_PROT_ATAPI is broken on this 5851fd7a697STejun Heo * controller, and no, PIO_POLLING does't fix it. It somehow 5861fd7a697STejun Heo * manages to report the wrong ireason and ignoring ireason 5871fd7a697STejun Heo * results in machine lock up. Tell libata to always prefer 5881fd7a697STejun Heo * DMA. 5891fd7a697STejun Heo */ 5901fd7a697STejun Heo .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA, 5911fd7a697STejun Heo .pio_mask = 0x1f, /* pio0-4 */ 5921fd7a697STejun Heo .mwdma_mask = 0x07, /* mwdma0-2 */ 5931fd7a697STejun Heo .udma_mask = 0x7f, /* udma0-6 */ 5941fd7a697STejun Heo .port_ops = &inic_port_ops 5951fd7a697STejun Heo }; 5961fd7a697STejun Heo 5971fd7a697STejun Heo static int init_controller(void __iomem *mmio_base, u16 hctl) 5981fd7a697STejun Heo { 5991fd7a697STejun Heo int i; 6001fd7a697STejun Heo u16 val; 6011fd7a697STejun Heo 6021fd7a697STejun Heo hctl &= ~HCTL_KNOWN_BITS; 6031fd7a697STejun Heo 6041fd7a697STejun Heo /* Soft reset whole controller. Spec says reset duration is 3 6051fd7a697STejun Heo * PCI clocks, be generous and give it 10ms. 6061fd7a697STejun Heo */ 6071fd7a697STejun Heo writew(hctl | HCTL_SOFTRST, mmio_base + HOST_CTL); 6081fd7a697STejun Heo readw(mmio_base + HOST_CTL); /* flush */ 6091fd7a697STejun Heo 6101fd7a697STejun Heo for (i = 0; i < 10; i++) { 6111fd7a697STejun Heo msleep(1); 6121fd7a697STejun Heo val = readw(mmio_base + HOST_CTL); 6131fd7a697STejun Heo if (!(val & HCTL_SOFTRST)) 6141fd7a697STejun Heo break; 6151fd7a697STejun Heo } 6161fd7a697STejun Heo 6171fd7a697STejun Heo if (val & HCTL_SOFTRST) 6181fd7a697STejun Heo return -EIO; 6191fd7a697STejun Heo 6201fd7a697STejun Heo /* mask all interrupts and reset ports */ 6211fd7a697STejun Heo for (i = 0; i < NR_PORTS; i++) { 6221fd7a697STejun Heo void __iomem *port_base = mmio_base + i * PORT_SIZE; 6231fd7a697STejun Heo 6241fd7a697STejun Heo writeb(0xff, port_base + PORT_IRQ_MASK); 6251fd7a697STejun Heo inic_reset_port(port_base); 6261fd7a697STejun Heo } 6271fd7a697STejun Heo 6281fd7a697STejun Heo /* port IRQ is masked now, unmask global IRQ */ 6291fd7a697STejun Heo writew(hctl & ~HCTL_IRQOFF, mmio_base + HOST_CTL); 6301fd7a697STejun Heo val = readw(mmio_base + HOST_IRQ_MASK); 6311fd7a697STejun Heo val &= ~(HIRQ_PORT0 | HIRQ_PORT1); 6321fd7a697STejun Heo writew(val, mmio_base + HOST_IRQ_MASK); 6331fd7a697STejun Heo 6341fd7a697STejun Heo return 0; 6351fd7a697STejun Heo } 6361fd7a697STejun Heo 637438ac6d5STejun Heo #ifdef CONFIG_PM 6381fd7a697STejun Heo static int inic_pci_device_resume(struct pci_dev *pdev) 6391fd7a697STejun Heo { 6401fd7a697STejun Heo struct ata_host *host = dev_get_drvdata(&pdev->dev); 6411fd7a697STejun Heo struct inic_host_priv *hpriv = host->private_data; 6420d5ff566STejun Heo void __iomem *mmio_base = host->iomap[MMIO_BAR]; 6431fd7a697STejun Heo int rc; 6441fd7a697STejun Heo 6455aea408dSDmitriy Monakhov rc = ata_pci_device_do_resume(pdev); 6465aea408dSDmitriy Monakhov if (rc) 6475aea408dSDmitriy Monakhov return rc; 6481fd7a697STejun Heo 6491fd7a697STejun Heo if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { 6501fd7a697STejun Heo rc = init_controller(mmio_base, hpriv->cached_hctl); 6511fd7a697STejun Heo if (rc) 6521fd7a697STejun Heo return rc; 6531fd7a697STejun Heo } 6541fd7a697STejun Heo 6551fd7a697STejun Heo ata_host_resume(host); 6561fd7a697STejun Heo 6571fd7a697STejun Heo return 0; 6581fd7a697STejun Heo } 659438ac6d5STejun Heo #endif 6601fd7a697STejun Heo 6611fd7a697STejun Heo static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 6621fd7a697STejun Heo { 6631fd7a697STejun Heo static int printed_version; 6644447d351STejun Heo const struct ata_port_info *ppi[] = { &inic_port_info, NULL }; 6654447d351STejun Heo struct ata_host *host; 6661fd7a697STejun Heo struct inic_host_priv *hpriv; 6670d5ff566STejun Heo void __iomem * const *iomap; 6681fd7a697STejun Heo int i, rc; 6691fd7a697STejun Heo 6701fd7a697STejun Heo if (!printed_version++) 6711fd7a697STejun Heo dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 6721fd7a697STejun Heo 6734447d351STejun Heo /* alloc host */ 6744447d351STejun Heo host = ata_host_alloc_pinfo(&pdev->dev, ppi, NR_PORTS); 6754447d351STejun Heo hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); 6764447d351STejun Heo if (!host || !hpriv) 6774447d351STejun Heo return -ENOMEM; 6784447d351STejun Heo 6794447d351STejun Heo host->private_data = hpriv; 6804447d351STejun Heo 6814447d351STejun Heo /* acquire resources and fill host */ 68224dc5f33STejun Heo rc = pcim_enable_device(pdev); 6831fd7a697STejun Heo if (rc) 6841fd7a697STejun Heo return rc; 6851fd7a697STejun Heo 6860d5ff566STejun Heo rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME); 6870d5ff566STejun Heo if (rc) 6880d5ff566STejun Heo return rc; 6894447d351STejun Heo host->iomap = iomap = pcim_iomap_table(pdev); 6904447d351STejun Heo 6914447d351STejun Heo for (i = 0; i < NR_PORTS; i++) { 6924447d351STejun Heo struct ata_ioports *port = &host->ports[i]->ioaddr; 6934447d351STejun Heo void __iomem *port_base = iomap[MMIO_BAR] + i * PORT_SIZE; 6944447d351STejun Heo 6954447d351STejun Heo port->cmd_addr = iomap[2 * i]; 6964447d351STejun Heo port->altstatus_addr = 6974447d351STejun Heo port->ctl_addr = (void __iomem *) 6984447d351STejun Heo ((unsigned long)iomap[2 * i + 1] | ATA_PCI_CTL_OFS); 6994447d351STejun Heo port->scr_addr = port_base + PORT_SCR; 7004447d351STejun Heo 7014447d351STejun Heo ata_std_ports(port); 7024447d351STejun Heo } 7034447d351STejun Heo 7044447d351STejun Heo hpriv->cached_hctl = readw(iomap[MMIO_BAR] + HOST_CTL); 7051fd7a697STejun Heo 7061fd7a697STejun Heo /* Set dma_mask. This devices doesn't support 64bit addressing. */ 7071fd7a697STejun Heo rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 7081fd7a697STejun Heo if (rc) { 7091fd7a697STejun Heo dev_printk(KERN_ERR, &pdev->dev, 7101fd7a697STejun Heo "32-bit DMA enable failed\n"); 71124dc5f33STejun Heo return rc; 7121fd7a697STejun Heo } 7131fd7a697STejun Heo 7141fd7a697STejun Heo rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 7151fd7a697STejun Heo if (rc) { 7161fd7a697STejun Heo dev_printk(KERN_ERR, &pdev->dev, 7171fd7a697STejun Heo "32-bit consistent DMA enable failed\n"); 71824dc5f33STejun Heo return rc; 7191fd7a697STejun Heo } 7201fd7a697STejun Heo 7210d5ff566STejun Heo rc = init_controller(iomap[MMIO_BAR], hpriv->cached_hctl); 7221fd7a697STejun Heo if (rc) { 7231fd7a697STejun Heo dev_printk(KERN_ERR, &pdev->dev, 7241fd7a697STejun Heo "failed to initialize controller\n"); 72524dc5f33STejun Heo return rc; 7261fd7a697STejun Heo } 7271fd7a697STejun Heo 7281fd7a697STejun Heo pci_set_master(pdev); 7294447d351STejun Heo return ata_host_activate(host, pdev->irq, inic_interrupt, IRQF_SHARED, 7304447d351STejun Heo &inic_sht); 7311fd7a697STejun Heo } 7321fd7a697STejun Heo 7331fd7a697STejun Heo static const struct pci_device_id inic_pci_tbl[] = { 7341fd7a697STejun Heo { PCI_VDEVICE(INIT, 0x1622), }, 7351fd7a697STejun Heo { }, 7361fd7a697STejun Heo }; 7371fd7a697STejun Heo 7381fd7a697STejun Heo static struct pci_driver inic_pci_driver = { 7391fd7a697STejun Heo .name = DRV_NAME, 7401fd7a697STejun Heo .id_table = inic_pci_tbl, 741438ac6d5STejun Heo #ifdef CONFIG_PM 7421fd7a697STejun Heo .suspend = ata_pci_device_suspend, 7431fd7a697STejun Heo .resume = inic_pci_device_resume, 744438ac6d5STejun Heo #endif 7451fd7a697STejun Heo .probe = inic_init_one, 7461fd7a697STejun Heo .remove = ata_pci_remove_one, 7471fd7a697STejun Heo }; 7481fd7a697STejun Heo 7491fd7a697STejun Heo static int __init inic_init(void) 7501fd7a697STejun Heo { 7511fd7a697STejun Heo return pci_register_driver(&inic_pci_driver); 7521fd7a697STejun Heo } 7531fd7a697STejun Heo 7541fd7a697STejun Heo static void __exit inic_exit(void) 7551fd7a697STejun Heo { 7561fd7a697STejun Heo pci_unregister_driver(&inic_pci_driver); 7571fd7a697STejun Heo } 7581fd7a697STejun Heo 7591fd7a697STejun Heo MODULE_AUTHOR("Tejun Heo"); 7601fd7a697STejun Heo MODULE_DESCRIPTION("low-level driver for Initio 162x SATA"); 7611fd7a697STejun Heo MODULE_LICENSE("GPL v2"); 7621fd7a697STejun Heo MODULE_DEVICE_TABLE(pci, inic_pci_tbl); 7631fd7a697STejun Heo MODULE_VERSION(DRV_VERSION); 7641fd7a697STejun Heo 7651fd7a697STejun Heo module_init(inic_init); 7661fd7a697STejun Heo module_exit(inic_exit); 767