xref: /openbmc/linux/drivers/ata/sata_inic162x.c (revision 1fd7a697a37bcd484b130a71326e43cd68ced90c)
1*1fd7a697STejun Heo /*
2*1fd7a697STejun Heo  * sata_inic162x.c - Driver for Initio 162x SATA controllers
3*1fd7a697STejun Heo  *
4*1fd7a697STejun Heo  * Copyright 2006  SUSE Linux Products GmbH
5*1fd7a697STejun Heo  * Copyright 2006  Tejun Heo <teheo@novell.com>
6*1fd7a697STejun Heo  *
7*1fd7a697STejun Heo  * This file is released under GPL v2.
8*1fd7a697STejun Heo  *
9*1fd7a697STejun Heo  * This controller is eccentric and easily locks up if something isn't
10*1fd7a697STejun Heo  * right.  Documentation is available at initio's website but it only
11*1fd7a697STejun Heo  * documents registers (not programming model).
12*1fd7a697STejun Heo  *
13*1fd7a697STejun Heo  * - ATA disks work.
14*1fd7a697STejun Heo  * - Hotplug works.
15*1fd7a697STejun Heo  * - ATAPI read works but burning doesn't.  This thing is really
16*1fd7a697STejun Heo  *   peculiar about ATAPI and I couldn't figure out how ATAPI PIO and
17*1fd7a697STejun Heo  *   ATAPI DMA WRITE should be programmed.  If you've got a clue, be
18*1fd7a697STejun Heo  *   my guest.
19*1fd7a697STejun Heo  * - Both STR and STD work.
20*1fd7a697STejun Heo  */
21*1fd7a697STejun Heo 
22*1fd7a697STejun Heo #include <linux/kernel.h>
23*1fd7a697STejun Heo #include <linux/module.h>
24*1fd7a697STejun Heo #include <linux/pci.h>
25*1fd7a697STejun Heo #include <scsi/scsi_host.h>
26*1fd7a697STejun Heo #include <linux/libata.h>
27*1fd7a697STejun Heo #include <linux/blkdev.h>
28*1fd7a697STejun Heo #include <scsi/scsi_device.h>
29*1fd7a697STejun Heo 
30*1fd7a697STejun Heo #define DRV_NAME	"sata_inic162x"
31*1fd7a697STejun Heo #define DRV_VERSION	"0.1"
32*1fd7a697STejun Heo 
33*1fd7a697STejun Heo enum {
34*1fd7a697STejun Heo 	MMIO_BAR		= 5,
35*1fd7a697STejun Heo 
36*1fd7a697STejun Heo 	NR_PORTS		= 2,
37*1fd7a697STejun Heo 
38*1fd7a697STejun Heo 	HOST_CTL		= 0x7c,
39*1fd7a697STejun Heo 	HOST_STAT		= 0x7e,
40*1fd7a697STejun Heo 	HOST_IRQ_STAT		= 0xbc,
41*1fd7a697STejun Heo 	HOST_IRQ_MASK		= 0xbe,
42*1fd7a697STejun Heo 
43*1fd7a697STejun Heo 	PORT_SIZE		= 0x40,
44*1fd7a697STejun Heo 
45*1fd7a697STejun Heo 	/* registers for ATA TF operation */
46*1fd7a697STejun Heo 	PORT_TF			= 0x00,
47*1fd7a697STejun Heo 	PORT_ALT_STAT		= 0x08,
48*1fd7a697STejun Heo 	PORT_IRQ_STAT		= 0x09,
49*1fd7a697STejun Heo 	PORT_IRQ_MASK		= 0x0a,
50*1fd7a697STejun Heo 	PORT_PRD_CTL		= 0x0b,
51*1fd7a697STejun Heo 	PORT_PRD_ADDR		= 0x0c,
52*1fd7a697STejun Heo 	PORT_PRD_XFERLEN	= 0x10,
53*1fd7a697STejun Heo 
54*1fd7a697STejun Heo 	/* IDMA register */
55*1fd7a697STejun Heo 	PORT_IDMA_CTL		= 0x14,
56*1fd7a697STejun Heo 
57*1fd7a697STejun Heo 	PORT_SCR		= 0x20,
58*1fd7a697STejun Heo 
59*1fd7a697STejun Heo 	/* HOST_CTL bits */
60*1fd7a697STejun Heo 	HCTL_IRQOFF		= (1 << 8),  /* global IRQ off */
61*1fd7a697STejun Heo 	HCTL_PWRDWN		= (1 << 13), /* power down PHYs */
62*1fd7a697STejun Heo 	HCTL_SOFTRST		= (1 << 13), /* global reset (no phy reset) */
63*1fd7a697STejun Heo 	HCTL_RPGSEL		= (1 << 15), /* register page select */
64*1fd7a697STejun Heo 
65*1fd7a697STejun Heo 	HCTL_KNOWN_BITS		= HCTL_IRQOFF | HCTL_PWRDWN | HCTL_SOFTRST |
66*1fd7a697STejun Heo 				  HCTL_RPGSEL,
67*1fd7a697STejun Heo 
68*1fd7a697STejun Heo 	/* HOST_IRQ_(STAT|MASK) bits */
69*1fd7a697STejun Heo 	HIRQ_PORT0		= (1 << 0),
70*1fd7a697STejun Heo 	HIRQ_PORT1		= (1 << 1),
71*1fd7a697STejun Heo 	HIRQ_SOFT		= (1 << 14),
72*1fd7a697STejun Heo 	HIRQ_GLOBAL		= (1 << 15), /* STAT only */
73*1fd7a697STejun Heo 
74*1fd7a697STejun Heo 	/* PORT_IRQ_(STAT|MASK) bits */
75*1fd7a697STejun Heo 	PIRQ_OFFLINE		= (1 << 0),  /* device unplugged */
76*1fd7a697STejun Heo 	PIRQ_ONLINE		= (1 << 1),  /* device plugged */
77*1fd7a697STejun Heo 	PIRQ_COMPLETE		= (1 << 2),  /* completion interrupt */
78*1fd7a697STejun Heo 	PIRQ_FATAL		= (1 << 3),  /* fatal error */
79*1fd7a697STejun Heo 	PIRQ_ATA		= (1 << 4),  /* ATA interrupt */
80*1fd7a697STejun Heo 	PIRQ_REPLY		= (1 << 5),  /* reply FIFO not empty */
81*1fd7a697STejun Heo 	PIRQ_PENDING		= (1 << 7),  /* port IRQ pending (STAT only) */
82*1fd7a697STejun Heo 
83*1fd7a697STejun Heo 	PIRQ_ERR		= PIRQ_OFFLINE | PIRQ_ONLINE | PIRQ_FATAL,
84*1fd7a697STejun Heo 
85*1fd7a697STejun Heo 	PIRQ_MASK_DMA_READ	= PIRQ_REPLY | PIRQ_ATA,
86*1fd7a697STejun Heo 	PIRQ_MASK_OTHER		= PIRQ_REPLY | PIRQ_COMPLETE,
87*1fd7a697STejun Heo 	PIRQ_MASK_FREEZE	= 0xff,
88*1fd7a697STejun Heo 
89*1fd7a697STejun Heo 	/* PORT_PRD_CTL bits */
90*1fd7a697STejun Heo 	PRD_CTL_START		= (1 << 0),
91*1fd7a697STejun Heo 	PRD_CTL_WR		= (1 << 3),
92*1fd7a697STejun Heo 	PRD_CTL_DMAEN		= (1 << 7),  /* DMA enable */
93*1fd7a697STejun Heo 
94*1fd7a697STejun Heo 	/* PORT_IDMA_CTL bits */
95*1fd7a697STejun Heo 	IDMA_CTL_RST_ATA	= (1 << 2),  /* hardreset ATA bus */
96*1fd7a697STejun Heo 	IDMA_CTL_RST_IDMA	= (1 << 5),  /* reset IDMA machinary */
97*1fd7a697STejun Heo 	IDMA_CTL_GO		= (1 << 7),  /* IDMA mode go */
98*1fd7a697STejun Heo 	IDMA_CTL_ATA_NIEN	= (1 << 8),  /* ATA IRQ disable */
99*1fd7a697STejun Heo };
100*1fd7a697STejun Heo 
101*1fd7a697STejun Heo struct inic_host_priv {
102*1fd7a697STejun Heo 	u16	cached_hctl;
103*1fd7a697STejun Heo };
104*1fd7a697STejun Heo 
105*1fd7a697STejun Heo struct inic_port_priv {
106*1fd7a697STejun Heo 	u8	dfl_prdctl;
107*1fd7a697STejun Heo 	u8	cached_prdctl;
108*1fd7a697STejun Heo 	u8	cached_pirq_mask;
109*1fd7a697STejun Heo };
110*1fd7a697STejun Heo 
111*1fd7a697STejun Heo static int inic_slave_config(struct scsi_device *sdev)
112*1fd7a697STejun Heo {
113*1fd7a697STejun Heo 	/* This controller is braindamaged.  dma_boundary is 0xffff
114*1fd7a697STejun Heo 	 * like others but it will lock up the whole machine HARD if
115*1fd7a697STejun Heo 	 * 65536 byte PRD entry is fed.  Reduce maximum segment size.
116*1fd7a697STejun Heo 	 */
117*1fd7a697STejun Heo 	blk_queue_max_segment_size(sdev->request_queue, 65536 - 512);
118*1fd7a697STejun Heo 
119*1fd7a697STejun Heo 	return ata_scsi_slave_config(sdev);
120*1fd7a697STejun Heo }
121*1fd7a697STejun Heo 
122*1fd7a697STejun Heo static struct scsi_host_template inic_sht = {
123*1fd7a697STejun Heo 	.module			= THIS_MODULE,
124*1fd7a697STejun Heo 	.name			= DRV_NAME,
125*1fd7a697STejun Heo 	.ioctl			= ata_scsi_ioctl,
126*1fd7a697STejun Heo 	.queuecommand		= ata_scsi_queuecmd,
127*1fd7a697STejun Heo 	.can_queue		= ATA_DEF_QUEUE,
128*1fd7a697STejun Heo 	.this_id		= ATA_SHT_THIS_ID,
129*1fd7a697STejun Heo 	.sg_tablesize		= LIBATA_MAX_PRD,
130*1fd7a697STejun Heo 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
131*1fd7a697STejun Heo 	.emulated		= ATA_SHT_EMULATED,
132*1fd7a697STejun Heo 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
133*1fd7a697STejun Heo 	.proc_name		= DRV_NAME,
134*1fd7a697STejun Heo 	.dma_boundary		= ATA_DMA_BOUNDARY,
135*1fd7a697STejun Heo 	.slave_configure	= inic_slave_config,
136*1fd7a697STejun Heo 	.slave_destroy		= ata_scsi_slave_destroy,
137*1fd7a697STejun Heo 	.bios_param		= ata_std_bios_param,
138*1fd7a697STejun Heo 	.suspend		= ata_scsi_device_suspend,
139*1fd7a697STejun Heo 	.resume			= ata_scsi_device_resume,
140*1fd7a697STejun Heo };
141*1fd7a697STejun Heo 
142*1fd7a697STejun Heo static const int scr_map[] = {
143*1fd7a697STejun Heo 	[SCR_STATUS]	= 0,
144*1fd7a697STejun Heo 	[SCR_ERROR]	= 1,
145*1fd7a697STejun Heo 	[SCR_CONTROL]	= 2,
146*1fd7a697STejun Heo };
147*1fd7a697STejun Heo 
148*1fd7a697STejun Heo static void __iomem * inic_port_base(struct ata_port *ap)
149*1fd7a697STejun Heo {
150*1fd7a697STejun Heo 	return ap->host->mmio_base + ap->port_no * PORT_SIZE;
151*1fd7a697STejun Heo }
152*1fd7a697STejun Heo 
153*1fd7a697STejun Heo static void __inic_set_pirq_mask(struct ata_port *ap, u8 mask)
154*1fd7a697STejun Heo {
155*1fd7a697STejun Heo 	void __iomem *port_base = inic_port_base(ap);
156*1fd7a697STejun Heo 	struct inic_port_priv *pp = ap->private_data;
157*1fd7a697STejun Heo 
158*1fd7a697STejun Heo 	writeb(mask, port_base + PORT_IRQ_MASK);
159*1fd7a697STejun Heo 	pp->cached_pirq_mask = mask;
160*1fd7a697STejun Heo }
161*1fd7a697STejun Heo 
162*1fd7a697STejun Heo static void inic_set_pirq_mask(struct ata_port *ap, u8 mask)
163*1fd7a697STejun Heo {
164*1fd7a697STejun Heo 	struct inic_port_priv *pp = ap->private_data;
165*1fd7a697STejun Heo 
166*1fd7a697STejun Heo 	if (pp->cached_pirq_mask != mask)
167*1fd7a697STejun Heo 		__inic_set_pirq_mask(ap, mask);
168*1fd7a697STejun Heo }
169*1fd7a697STejun Heo 
170*1fd7a697STejun Heo static void inic_reset_port(void __iomem *port_base)
171*1fd7a697STejun Heo {
172*1fd7a697STejun Heo 	void __iomem *idma_ctl = port_base + PORT_IDMA_CTL;
173*1fd7a697STejun Heo 	u16 ctl;
174*1fd7a697STejun Heo 
175*1fd7a697STejun Heo 	ctl = readw(idma_ctl);
176*1fd7a697STejun Heo 	ctl &= ~(IDMA_CTL_RST_IDMA | IDMA_CTL_ATA_NIEN | IDMA_CTL_GO);
177*1fd7a697STejun Heo 
178*1fd7a697STejun Heo 	/* mask IRQ and assert reset */
179*1fd7a697STejun Heo 	writew(ctl | IDMA_CTL_RST_IDMA | IDMA_CTL_ATA_NIEN, idma_ctl);
180*1fd7a697STejun Heo 	readw(idma_ctl); /* flush */
181*1fd7a697STejun Heo 
182*1fd7a697STejun Heo 	/* give it some time */
183*1fd7a697STejun Heo 	msleep(1);
184*1fd7a697STejun Heo 
185*1fd7a697STejun Heo 	/* release reset */
186*1fd7a697STejun Heo 	writew(ctl | IDMA_CTL_ATA_NIEN, idma_ctl);
187*1fd7a697STejun Heo 
188*1fd7a697STejun Heo 	/* clear irq */
189*1fd7a697STejun Heo 	writeb(0xff, port_base + PORT_IRQ_STAT);
190*1fd7a697STejun Heo 
191*1fd7a697STejun Heo 	/* reenable ATA IRQ, turn off IDMA mode */
192*1fd7a697STejun Heo 	writew(ctl, idma_ctl);
193*1fd7a697STejun Heo }
194*1fd7a697STejun Heo 
195*1fd7a697STejun Heo static u32 inic_scr_read(struct ata_port *ap, unsigned sc_reg)
196*1fd7a697STejun Heo {
197*1fd7a697STejun Heo 	void __iomem *scr_addr = (void __iomem *)ap->ioaddr.scr_addr;
198*1fd7a697STejun Heo 	void __iomem *addr;
199*1fd7a697STejun Heo 	u32 val;
200*1fd7a697STejun Heo 
201*1fd7a697STejun Heo 	if (unlikely(sc_reg >= ARRAY_SIZE(scr_map)))
202*1fd7a697STejun Heo 		return 0xffffffffU;
203*1fd7a697STejun Heo 
204*1fd7a697STejun Heo 	addr = scr_addr + scr_map[sc_reg] * 4;
205*1fd7a697STejun Heo 	val = readl(scr_addr + scr_map[sc_reg] * 4);
206*1fd7a697STejun Heo 
207*1fd7a697STejun Heo 	/* this controller has stuck DIAG.N, ignore it */
208*1fd7a697STejun Heo 	if (sc_reg == SCR_ERROR)
209*1fd7a697STejun Heo 		val &= ~SERR_PHYRDY_CHG;
210*1fd7a697STejun Heo 	return val;
211*1fd7a697STejun Heo }
212*1fd7a697STejun Heo 
213*1fd7a697STejun Heo static void inic_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val)
214*1fd7a697STejun Heo {
215*1fd7a697STejun Heo 	void __iomem *scr_addr = (void __iomem *)ap->ioaddr.scr_addr;
216*1fd7a697STejun Heo 	void __iomem *addr;
217*1fd7a697STejun Heo 
218*1fd7a697STejun Heo 	if (unlikely(sc_reg >= ARRAY_SIZE(scr_map)))
219*1fd7a697STejun Heo 		return;
220*1fd7a697STejun Heo 
221*1fd7a697STejun Heo 	addr = scr_addr + scr_map[sc_reg] * 4;
222*1fd7a697STejun Heo 	writel(val, scr_addr + scr_map[sc_reg] * 4);
223*1fd7a697STejun Heo }
224*1fd7a697STejun Heo 
225*1fd7a697STejun Heo /*
226*1fd7a697STejun Heo  * In TF mode, inic162x is very similar to SFF device.  TF registers
227*1fd7a697STejun Heo  * function the same.  DMA engine behaves similary using the same PRD
228*1fd7a697STejun Heo  * format as BMDMA but different command register, interrupt and event
229*1fd7a697STejun Heo  * notification methods are used.  The following inic_bmdma_*()
230*1fd7a697STejun Heo  * functions do the impedance matching.
231*1fd7a697STejun Heo  */
232*1fd7a697STejun Heo static void inic_bmdma_setup(struct ata_queued_cmd *qc)
233*1fd7a697STejun Heo {
234*1fd7a697STejun Heo 	struct ata_port *ap = qc->ap;
235*1fd7a697STejun Heo 	struct inic_port_priv *pp = ap->private_data;
236*1fd7a697STejun Heo 	void __iomem *port_base = inic_port_base(ap);
237*1fd7a697STejun Heo 	int rw = qc->tf.flags & ATA_TFLAG_WRITE;
238*1fd7a697STejun Heo 
239*1fd7a697STejun Heo 	/* make sure device sees PRD table writes */
240*1fd7a697STejun Heo 	wmb();
241*1fd7a697STejun Heo 
242*1fd7a697STejun Heo 	/* load transfer length */
243*1fd7a697STejun Heo 	writel(qc->nbytes, port_base + PORT_PRD_XFERLEN);
244*1fd7a697STejun Heo 
245*1fd7a697STejun Heo 	/* turn on DMA and specify data direction */
246*1fd7a697STejun Heo 	pp->cached_prdctl = pp->dfl_prdctl | PRD_CTL_DMAEN;
247*1fd7a697STejun Heo 	if (!rw)
248*1fd7a697STejun Heo 		pp->cached_prdctl |= PRD_CTL_WR;
249*1fd7a697STejun Heo 	writeb(pp->cached_prdctl, port_base + PORT_PRD_CTL);
250*1fd7a697STejun Heo 
251*1fd7a697STejun Heo 	/* issue r/w command */
252*1fd7a697STejun Heo 	ap->ops->exec_command(ap, &qc->tf);
253*1fd7a697STejun Heo }
254*1fd7a697STejun Heo 
255*1fd7a697STejun Heo static void inic_bmdma_start(struct ata_queued_cmd *qc)
256*1fd7a697STejun Heo {
257*1fd7a697STejun Heo 	struct ata_port *ap = qc->ap;
258*1fd7a697STejun Heo 	struct inic_port_priv *pp = ap->private_data;
259*1fd7a697STejun Heo 	void __iomem *port_base = inic_port_base(ap);
260*1fd7a697STejun Heo 
261*1fd7a697STejun Heo 	/* start host DMA transaction */
262*1fd7a697STejun Heo 	pp->cached_prdctl |= PRD_CTL_START;
263*1fd7a697STejun Heo 	writeb(pp->cached_prdctl, port_base + PORT_PRD_CTL);
264*1fd7a697STejun Heo }
265*1fd7a697STejun Heo 
266*1fd7a697STejun Heo static void inic_bmdma_stop(struct ata_queued_cmd *qc)
267*1fd7a697STejun Heo {
268*1fd7a697STejun Heo 	struct ata_port *ap = qc->ap;
269*1fd7a697STejun Heo 	struct inic_port_priv *pp = ap->private_data;
270*1fd7a697STejun Heo 	void __iomem *port_base = inic_port_base(ap);
271*1fd7a697STejun Heo 
272*1fd7a697STejun Heo 	/* stop DMA engine */
273*1fd7a697STejun Heo 	writeb(pp->dfl_prdctl, port_base + PORT_PRD_CTL);
274*1fd7a697STejun Heo }
275*1fd7a697STejun Heo 
276*1fd7a697STejun Heo static u8 inic_bmdma_status(struct ata_port *ap)
277*1fd7a697STejun Heo {
278*1fd7a697STejun Heo 	/* event is already verified by the interrupt handler */
279*1fd7a697STejun Heo 	return ATA_DMA_INTR;
280*1fd7a697STejun Heo }
281*1fd7a697STejun Heo 
282*1fd7a697STejun Heo static void inic_irq_clear(struct ata_port *ap)
283*1fd7a697STejun Heo {
284*1fd7a697STejun Heo 	/* noop */
285*1fd7a697STejun Heo }
286*1fd7a697STejun Heo 
287*1fd7a697STejun Heo static void inic_host_intr(struct ata_port *ap)
288*1fd7a697STejun Heo {
289*1fd7a697STejun Heo 	void __iomem *port_base = inic_port_base(ap);
290*1fd7a697STejun Heo 	struct ata_eh_info *ehi = &ap->eh_info;
291*1fd7a697STejun Heo 	u8 irq_stat;
292*1fd7a697STejun Heo 
293*1fd7a697STejun Heo 	/* fetch and clear irq */
294*1fd7a697STejun Heo 	irq_stat = readb(port_base + PORT_IRQ_STAT);
295*1fd7a697STejun Heo 	writeb(irq_stat, port_base + PORT_IRQ_STAT);
296*1fd7a697STejun Heo 
297*1fd7a697STejun Heo 	if (likely(!(irq_stat & PIRQ_ERR))) {
298*1fd7a697STejun Heo 		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
299*1fd7a697STejun Heo 
300*1fd7a697STejun Heo 		if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
301*1fd7a697STejun Heo 			ata_chk_status(ap);	/* clear ATA interrupt */
302*1fd7a697STejun Heo 			return;
303*1fd7a697STejun Heo 		}
304*1fd7a697STejun Heo 
305*1fd7a697STejun Heo 		if (likely(ata_host_intr(ap, qc)))
306*1fd7a697STejun Heo 			return;
307*1fd7a697STejun Heo 
308*1fd7a697STejun Heo 		ata_chk_status(ap);	/* clear ATA interrupt */
309*1fd7a697STejun Heo 		ata_port_printk(ap, KERN_WARNING, "unhandled "
310*1fd7a697STejun Heo 				"interrupt, irq_stat=%x\n", irq_stat);
311*1fd7a697STejun Heo 		return;
312*1fd7a697STejun Heo 	}
313*1fd7a697STejun Heo 
314*1fd7a697STejun Heo 	/* error */
315*1fd7a697STejun Heo 	ata_ehi_push_desc(ehi, "irq_stat=0x%x", irq_stat);
316*1fd7a697STejun Heo 
317*1fd7a697STejun Heo 	if (irq_stat & (PIRQ_OFFLINE | PIRQ_ONLINE)) {
318*1fd7a697STejun Heo 		ata_ehi_hotplugged(ehi);
319*1fd7a697STejun Heo 		ata_port_freeze(ap);
320*1fd7a697STejun Heo 	} else
321*1fd7a697STejun Heo 		ata_port_abort(ap);
322*1fd7a697STejun Heo }
323*1fd7a697STejun Heo 
324*1fd7a697STejun Heo static irqreturn_t inic_interrupt(int irq, void *dev_instance)
325*1fd7a697STejun Heo {
326*1fd7a697STejun Heo 	struct ata_host *host = dev_instance;
327*1fd7a697STejun Heo 	void __iomem *mmio_base = host->mmio_base;
328*1fd7a697STejun Heo 	u16 host_irq_stat;
329*1fd7a697STejun Heo 	int i, handled = 0;;
330*1fd7a697STejun Heo 
331*1fd7a697STejun Heo 	host_irq_stat = readw(mmio_base + HOST_IRQ_STAT);
332*1fd7a697STejun Heo 
333*1fd7a697STejun Heo 	if (unlikely(!(host_irq_stat & HIRQ_GLOBAL)))
334*1fd7a697STejun Heo 		goto out;
335*1fd7a697STejun Heo 
336*1fd7a697STejun Heo 	spin_lock(&host->lock);
337*1fd7a697STejun Heo 
338*1fd7a697STejun Heo 	for (i = 0; i < NR_PORTS; i++) {
339*1fd7a697STejun Heo 		struct ata_port *ap = host->ports[i];
340*1fd7a697STejun Heo 
341*1fd7a697STejun Heo 		if (!(host_irq_stat & (HIRQ_PORT0 << i)))
342*1fd7a697STejun Heo 			continue;
343*1fd7a697STejun Heo 
344*1fd7a697STejun Heo 		if (likely(ap && !(ap->flags & ATA_FLAG_DISABLED))) {
345*1fd7a697STejun Heo 			inic_host_intr(ap);
346*1fd7a697STejun Heo 			handled++;
347*1fd7a697STejun Heo 		} else {
348*1fd7a697STejun Heo 			if (ata_ratelimit())
349*1fd7a697STejun Heo 				dev_printk(KERN_ERR, host->dev, "interrupt "
350*1fd7a697STejun Heo 					   "from disabled port %d (0x%x)\n",
351*1fd7a697STejun Heo 					   i, host_irq_stat);
352*1fd7a697STejun Heo 		}
353*1fd7a697STejun Heo 	}
354*1fd7a697STejun Heo 
355*1fd7a697STejun Heo 	spin_unlock(&host->lock);
356*1fd7a697STejun Heo 
357*1fd7a697STejun Heo  out:
358*1fd7a697STejun Heo 	return IRQ_RETVAL(handled);
359*1fd7a697STejun Heo }
360*1fd7a697STejun Heo 
361*1fd7a697STejun Heo static unsigned int inic_qc_issue(struct ata_queued_cmd *qc)
362*1fd7a697STejun Heo {
363*1fd7a697STejun Heo 	struct ata_port *ap = qc->ap;
364*1fd7a697STejun Heo 
365*1fd7a697STejun Heo 	/* ATA IRQ doesn't wait for DMA transfer completion and vice
366*1fd7a697STejun Heo 	 * versa.  Mask IRQ selectively to detect command completion.
367*1fd7a697STejun Heo 	 * Without it, ATA DMA read command can cause data corruption.
368*1fd7a697STejun Heo 	 *
369*1fd7a697STejun Heo 	 * Something similar might be needed for ATAPI writes.  I
370*1fd7a697STejun Heo 	 * tried a lot of combinations but couldn't find the solution.
371*1fd7a697STejun Heo 	 */
372*1fd7a697STejun Heo 	if (qc->tf.protocol == ATA_PROT_DMA &&
373*1fd7a697STejun Heo 	    !(qc->tf.flags & ATA_TFLAG_WRITE))
374*1fd7a697STejun Heo 		inic_set_pirq_mask(ap, PIRQ_MASK_DMA_READ);
375*1fd7a697STejun Heo 	else
376*1fd7a697STejun Heo 		inic_set_pirq_mask(ap, PIRQ_MASK_OTHER);
377*1fd7a697STejun Heo 
378*1fd7a697STejun Heo 	/* Issuing a command to yet uninitialized port locks up the
379*1fd7a697STejun Heo 	 * controller.  Most of the time, this happens for the first
380*1fd7a697STejun Heo 	 * command after reset which are ATA and ATAPI IDENTIFYs.
381*1fd7a697STejun Heo 	 * Fast fail if stat is 0x7f or 0xff for those commands.
382*1fd7a697STejun Heo 	 */
383*1fd7a697STejun Heo 	if (unlikely(qc->tf.command == ATA_CMD_ID_ATA ||
384*1fd7a697STejun Heo 		     qc->tf.command == ATA_CMD_ID_ATAPI)) {
385*1fd7a697STejun Heo 		u8 stat = ata_chk_status(ap);
386*1fd7a697STejun Heo 		if (stat == 0x7f || stat == 0xff)
387*1fd7a697STejun Heo 			return AC_ERR_HSM;
388*1fd7a697STejun Heo 	}
389*1fd7a697STejun Heo 
390*1fd7a697STejun Heo 	return ata_qc_issue_prot(qc);
391*1fd7a697STejun Heo }
392*1fd7a697STejun Heo 
393*1fd7a697STejun Heo static void inic_freeze(struct ata_port *ap)
394*1fd7a697STejun Heo {
395*1fd7a697STejun Heo 	void __iomem *port_base = inic_port_base(ap);
396*1fd7a697STejun Heo 
397*1fd7a697STejun Heo 	__inic_set_pirq_mask(ap, PIRQ_MASK_FREEZE);
398*1fd7a697STejun Heo 
399*1fd7a697STejun Heo 	ata_chk_status(ap);
400*1fd7a697STejun Heo 	writeb(0xff, port_base + PORT_IRQ_STAT);
401*1fd7a697STejun Heo 
402*1fd7a697STejun Heo 	readb(port_base + PORT_IRQ_STAT); /* flush */
403*1fd7a697STejun Heo }
404*1fd7a697STejun Heo 
405*1fd7a697STejun Heo static void inic_thaw(struct ata_port *ap)
406*1fd7a697STejun Heo {
407*1fd7a697STejun Heo 	void __iomem *port_base = inic_port_base(ap);
408*1fd7a697STejun Heo 
409*1fd7a697STejun Heo 	ata_chk_status(ap);
410*1fd7a697STejun Heo 	writeb(0xff, port_base + PORT_IRQ_STAT);
411*1fd7a697STejun Heo 
412*1fd7a697STejun Heo 	__inic_set_pirq_mask(ap, PIRQ_MASK_OTHER);
413*1fd7a697STejun Heo 
414*1fd7a697STejun Heo 	readb(port_base + PORT_IRQ_STAT); /* flush */
415*1fd7a697STejun Heo }
416*1fd7a697STejun Heo 
417*1fd7a697STejun Heo /*
418*1fd7a697STejun Heo  * SRST and SControl hardreset don't give valid signature on this
419*1fd7a697STejun Heo  * controller.  Only controller specific hardreset mechanism works.
420*1fd7a697STejun Heo  */
421*1fd7a697STejun Heo static int inic_hardreset(struct ata_port *ap, unsigned int *class)
422*1fd7a697STejun Heo {
423*1fd7a697STejun Heo 	void __iomem *port_base = inic_port_base(ap);
424*1fd7a697STejun Heo 	void __iomem *idma_ctl = port_base + PORT_IDMA_CTL;
425*1fd7a697STejun Heo 	const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
426*1fd7a697STejun Heo 	u16 val;
427*1fd7a697STejun Heo 	int rc;
428*1fd7a697STejun Heo 
429*1fd7a697STejun Heo 	/* hammer it into sane state */
430*1fd7a697STejun Heo 	inic_reset_port(port_base);
431*1fd7a697STejun Heo 
432*1fd7a697STejun Heo 	if (ata_port_offline(ap)) {
433*1fd7a697STejun Heo 		*class = ATA_DEV_NONE;
434*1fd7a697STejun Heo 		return 0;
435*1fd7a697STejun Heo 	}
436*1fd7a697STejun Heo 
437*1fd7a697STejun Heo 	val = readw(idma_ctl);
438*1fd7a697STejun Heo 	writew(val | IDMA_CTL_RST_ATA, idma_ctl);
439*1fd7a697STejun Heo 	readw(idma_ctl);	/* flush */
440*1fd7a697STejun Heo 	msleep(1);
441*1fd7a697STejun Heo 	writew(val & ~IDMA_CTL_RST_ATA, idma_ctl);
442*1fd7a697STejun Heo 
443*1fd7a697STejun Heo 	rc = sata_phy_resume(ap, timing);
444*1fd7a697STejun Heo 	if (rc) {
445*1fd7a697STejun Heo 		ata_port_printk(ap, KERN_WARNING, "failed to resume "
446*1fd7a697STejun Heo 				"link for reset (errno=%d)\n", rc);
447*1fd7a697STejun Heo 		return rc;
448*1fd7a697STejun Heo 	}
449*1fd7a697STejun Heo 
450*1fd7a697STejun Heo 	msleep(150);
451*1fd7a697STejun Heo 
452*1fd7a697STejun Heo 	*class = ATA_DEV_NONE;
453*1fd7a697STejun Heo 	if (ata_port_online(ap)) {
454*1fd7a697STejun Heo 		struct ata_taskfile tf;
455*1fd7a697STejun Heo 
456*1fd7a697STejun Heo 		if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
457*1fd7a697STejun Heo 			ata_port_printk(ap, KERN_WARNING,
458*1fd7a697STejun Heo 					"device busy after hardreset\n");
459*1fd7a697STejun Heo 			return -EIO;
460*1fd7a697STejun Heo 		}
461*1fd7a697STejun Heo 
462*1fd7a697STejun Heo 		ata_tf_read(ap, &tf);
463*1fd7a697STejun Heo 		*class = ata_dev_classify(&tf);
464*1fd7a697STejun Heo 		if (*class == ATA_DEV_UNKNOWN)
465*1fd7a697STejun Heo 			*class = ATA_DEV_NONE;
466*1fd7a697STejun Heo 	}
467*1fd7a697STejun Heo 
468*1fd7a697STejun Heo 	return 0;
469*1fd7a697STejun Heo }
470*1fd7a697STejun Heo 
471*1fd7a697STejun Heo static void inic_error_handler(struct ata_port *ap)
472*1fd7a697STejun Heo {
473*1fd7a697STejun Heo 	void __iomem *port_base = inic_port_base(ap);
474*1fd7a697STejun Heo 	struct inic_port_priv *pp = ap->private_data;
475*1fd7a697STejun Heo 	unsigned long flags;
476*1fd7a697STejun Heo 
477*1fd7a697STejun Heo 	/* reset PIO HSM and stop DMA engine */
478*1fd7a697STejun Heo 	inic_reset_port(port_base);
479*1fd7a697STejun Heo 
480*1fd7a697STejun Heo 	spin_lock_irqsave(ap->lock, flags);
481*1fd7a697STejun Heo 	ap->hsm_task_state = HSM_ST_IDLE;
482*1fd7a697STejun Heo 	writeb(pp->dfl_prdctl, port_base + PORT_PRD_CTL);
483*1fd7a697STejun Heo 	spin_unlock_irqrestore(ap->lock, flags);
484*1fd7a697STejun Heo 
485*1fd7a697STejun Heo 	/* PIO and DMA engines have been stopped, perform recovery */
486*1fd7a697STejun Heo 	ata_do_eh(ap, ata_std_prereset, NULL, inic_hardreset,
487*1fd7a697STejun Heo 		  ata_std_postreset);
488*1fd7a697STejun Heo }
489*1fd7a697STejun Heo 
490*1fd7a697STejun Heo static void inic_post_internal_cmd(struct ata_queued_cmd *qc)
491*1fd7a697STejun Heo {
492*1fd7a697STejun Heo 	/* make DMA engine forget about the failed command */
493*1fd7a697STejun Heo 	if (qc->err_mask)
494*1fd7a697STejun Heo 		inic_reset_port(inic_port_base(qc->ap));
495*1fd7a697STejun Heo }
496*1fd7a697STejun Heo 
497*1fd7a697STejun Heo static void inic_dev_config(struct ata_port *ap, struct ata_device *dev)
498*1fd7a697STejun Heo {
499*1fd7a697STejun Heo 	/* inic can only handle upto LBA28 max sectors */
500*1fd7a697STejun Heo 	if (dev->max_sectors > ATA_MAX_SECTORS)
501*1fd7a697STejun Heo 		dev->max_sectors = ATA_MAX_SECTORS;
502*1fd7a697STejun Heo }
503*1fd7a697STejun Heo 
504*1fd7a697STejun Heo static void init_port(struct ata_port *ap)
505*1fd7a697STejun Heo {
506*1fd7a697STejun Heo 	void __iomem *port_base = inic_port_base(ap);
507*1fd7a697STejun Heo 
508*1fd7a697STejun Heo 	/* Setup PRD address */
509*1fd7a697STejun Heo 	writel(ap->prd_dma, port_base + PORT_PRD_ADDR);
510*1fd7a697STejun Heo }
511*1fd7a697STejun Heo 
512*1fd7a697STejun Heo static int inic_port_resume(struct ata_port *ap)
513*1fd7a697STejun Heo {
514*1fd7a697STejun Heo 	init_port(ap);
515*1fd7a697STejun Heo 	return 0;
516*1fd7a697STejun Heo }
517*1fd7a697STejun Heo 
518*1fd7a697STejun Heo static int inic_port_start(struct ata_port *ap)
519*1fd7a697STejun Heo {
520*1fd7a697STejun Heo 	void __iomem *port_base = inic_port_base(ap);
521*1fd7a697STejun Heo 	struct inic_port_priv *pp;
522*1fd7a697STejun Heo 	u8 tmp;
523*1fd7a697STejun Heo 	int rc;
524*1fd7a697STejun Heo 
525*1fd7a697STejun Heo 	/* alloc and initialize private data */
526*1fd7a697STejun Heo 	pp = kzalloc(sizeof(*pp), GFP_KERNEL);
527*1fd7a697STejun Heo 	if (!pp)
528*1fd7a697STejun Heo 		return -ENOMEM;
529*1fd7a697STejun Heo 	ap->private_data = pp;
530*1fd7a697STejun Heo 
531*1fd7a697STejun Heo 	/* default PRD_CTL value, DMAEN, WR and START off */
532*1fd7a697STejun Heo 	tmp = readb(port_base + PORT_PRD_CTL);
533*1fd7a697STejun Heo 	tmp &= ~(PRD_CTL_DMAEN | PRD_CTL_WR | PRD_CTL_START);
534*1fd7a697STejun Heo 	pp->dfl_prdctl = tmp;
535*1fd7a697STejun Heo 
536*1fd7a697STejun Heo 	/* Alloc resources */
537*1fd7a697STejun Heo 	rc = ata_port_start(ap);
538*1fd7a697STejun Heo 	if (rc) {
539*1fd7a697STejun Heo 		kfree(pp);
540*1fd7a697STejun Heo 		return rc;
541*1fd7a697STejun Heo 	}
542*1fd7a697STejun Heo 
543*1fd7a697STejun Heo 	init_port(ap);
544*1fd7a697STejun Heo 
545*1fd7a697STejun Heo 	return 0;
546*1fd7a697STejun Heo }
547*1fd7a697STejun Heo 
548*1fd7a697STejun Heo static void inic_port_stop(struct ata_port *ap)
549*1fd7a697STejun Heo {
550*1fd7a697STejun Heo 	ata_port_stop(ap);
551*1fd7a697STejun Heo 	kfree(ap->private_data);
552*1fd7a697STejun Heo }
553*1fd7a697STejun Heo 
554*1fd7a697STejun Heo static struct ata_port_operations inic_port_ops = {
555*1fd7a697STejun Heo 	.port_disable		= ata_port_disable,
556*1fd7a697STejun Heo 	.tf_load		= ata_tf_load,
557*1fd7a697STejun Heo 	.tf_read		= ata_tf_read,
558*1fd7a697STejun Heo 	.check_status		= ata_check_status,
559*1fd7a697STejun Heo 	.exec_command		= ata_exec_command,
560*1fd7a697STejun Heo 	.dev_select		= ata_std_dev_select,
561*1fd7a697STejun Heo 
562*1fd7a697STejun Heo 	.scr_read		= inic_scr_read,
563*1fd7a697STejun Heo 	.scr_write		= inic_scr_write,
564*1fd7a697STejun Heo 
565*1fd7a697STejun Heo 	.bmdma_setup		= inic_bmdma_setup,
566*1fd7a697STejun Heo 	.bmdma_start		= inic_bmdma_start,
567*1fd7a697STejun Heo 	.bmdma_stop		= inic_bmdma_stop,
568*1fd7a697STejun Heo 	.bmdma_status		= inic_bmdma_status,
569*1fd7a697STejun Heo 
570*1fd7a697STejun Heo 	.irq_handler		= inic_interrupt,
571*1fd7a697STejun Heo 	.irq_clear		= inic_irq_clear,
572*1fd7a697STejun Heo 
573*1fd7a697STejun Heo 	.qc_prep	 	= ata_qc_prep,
574*1fd7a697STejun Heo 	.qc_issue		= inic_qc_issue,
575*1fd7a697STejun Heo 	.data_xfer		= ata_pio_data_xfer,
576*1fd7a697STejun Heo 
577*1fd7a697STejun Heo 	.freeze			= inic_freeze,
578*1fd7a697STejun Heo 	.thaw			= inic_thaw,
579*1fd7a697STejun Heo 	.error_handler		= inic_error_handler,
580*1fd7a697STejun Heo 	.post_internal_cmd	= inic_post_internal_cmd,
581*1fd7a697STejun Heo 	.dev_config		= inic_dev_config,
582*1fd7a697STejun Heo 
583*1fd7a697STejun Heo 	.port_resume		= inic_port_resume,
584*1fd7a697STejun Heo 
585*1fd7a697STejun Heo 	.port_start		= inic_port_start,
586*1fd7a697STejun Heo 	.port_stop		= inic_port_stop,
587*1fd7a697STejun Heo 	.host_stop		= ata_pci_host_stop
588*1fd7a697STejun Heo };
589*1fd7a697STejun Heo 
590*1fd7a697STejun Heo static struct ata_port_info inic_port_info = {
591*1fd7a697STejun Heo 	.sht			= &inic_sht,
592*1fd7a697STejun Heo 	/* For some reason, ATA_PROT_ATAPI is broken on this
593*1fd7a697STejun Heo 	 * controller, and no, PIO_POLLING does't fix it.  It somehow
594*1fd7a697STejun Heo 	 * manages to report the wrong ireason and ignoring ireason
595*1fd7a697STejun Heo 	 * results in machine lock up.  Tell libata to always prefer
596*1fd7a697STejun Heo 	 * DMA.
597*1fd7a697STejun Heo 	 */
598*1fd7a697STejun Heo 	.flags			= ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
599*1fd7a697STejun Heo 	.pio_mask		= 0x1f,	/* pio0-4 */
600*1fd7a697STejun Heo 	.mwdma_mask		= 0x07, /* mwdma0-2 */
601*1fd7a697STejun Heo 	.udma_mask		= 0x7f,	/* udma0-6 */
602*1fd7a697STejun Heo 	.port_ops		= &inic_port_ops
603*1fd7a697STejun Heo };
604*1fd7a697STejun Heo 
605*1fd7a697STejun Heo static int init_controller(void __iomem *mmio_base, u16 hctl)
606*1fd7a697STejun Heo {
607*1fd7a697STejun Heo 	int i;
608*1fd7a697STejun Heo 	u16 val;
609*1fd7a697STejun Heo 
610*1fd7a697STejun Heo 	hctl &= ~HCTL_KNOWN_BITS;
611*1fd7a697STejun Heo 
612*1fd7a697STejun Heo 	/* Soft reset whole controller.  Spec says reset duration is 3
613*1fd7a697STejun Heo 	 * PCI clocks, be generous and give it 10ms.
614*1fd7a697STejun Heo 	 */
615*1fd7a697STejun Heo 	writew(hctl | HCTL_SOFTRST, mmio_base + HOST_CTL);
616*1fd7a697STejun Heo 	readw(mmio_base + HOST_CTL); /* flush */
617*1fd7a697STejun Heo 
618*1fd7a697STejun Heo 	for (i = 0; i < 10; i++) {
619*1fd7a697STejun Heo 		msleep(1);
620*1fd7a697STejun Heo 		val = readw(mmio_base + HOST_CTL);
621*1fd7a697STejun Heo 		if (!(val & HCTL_SOFTRST))
622*1fd7a697STejun Heo 			break;
623*1fd7a697STejun Heo 	}
624*1fd7a697STejun Heo 
625*1fd7a697STejun Heo 	if (val & HCTL_SOFTRST)
626*1fd7a697STejun Heo 		return -EIO;
627*1fd7a697STejun Heo 
628*1fd7a697STejun Heo 	/* mask all interrupts and reset ports */
629*1fd7a697STejun Heo 	for (i = 0; i < NR_PORTS; i++) {
630*1fd7a697STejun Heo 		void __iomem *port_base = mmio_base + i * PORT_SIZE;
631*1fd7a697STejun Heo 
632*1fd7a697STejun Heo 		writeb(0xff, port_base + PORT_IRQ_MASK);
633*1fd7a697STejun Heo 		inic_reset_port(port_base);
634*1fd7a697STejun Heo 	}
635*1fd7a697STejun Heo 
636*1fd7a697STejun Heo 	/* port IRQ is masked now, unmask global IRQ */
637*1fd7a697STejun Heo 	writew(hctl & ~HCTL_IRQOFF, mmio_base + HOST_CTL);
638*1fd7a697STejun Heo 	val = readw(mmio_base + HOST_IRQ_MASK);
639*1fd7a697STejun Heo 	val &= ~(HIRQ_PORT0 | HIRQ_PORT1);
640*1fd7a697STejun Heo 	writew(val, mmio_base + HOST_IRQ_MASK);
641*1fd7a697STejun Heo 
642*1fd7a697STejun Heo 	return 0;
643*1fd7a697STejun Heo }
644*1fd7a697STejun Heo 
645*1fd7a697STejun Heo static int inic_pci_device_resume(struct pci_dev *pdev)
646*1fd7a697STejun Heo {
647*1fd7a697STejun Heo 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
648*1fd7a697STejun Heo 	struct inic_host_priv *hpriv = host->private_data;
649*1fd7a697STejun Heo 	void __iomem *mmio_base = host->mmio_base;
650*1fd7a697STejun Heo 	int rc;
651*1fd7a697STejun Heo 
652*1fd7a697STejun Heo 	ata_pci_device_do_resume(pdev);
653*1fd7a697STejun Heo 
654*1fd7a697STejun Heo 	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
655*1fd7a697STejun Heo 		printk("XXX\n");
656*1fd7a697STejun Heo 		rc = init_controller(mmio_base, hpriv->cached_hctl);
657*1fd7a697STejun Heo 		if (rc)
658*1fd7a697STejun Heo 			return rc;
659*1fd7a697STejun Heo 	}
660*1fd7a697STejun Heo 
661*1fd7a697STejun Heo 	ata_host_resume(host);
662*1fd7a697STejun Heo 
663*1fd7a697STejun Heo 	return 0;
664*1fd7a697STejun Heo }
665*1fd7a697STejun Heo 
666*1fd7a697STejun Heo static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
667*1fd7a697STejun Heo {
668*1fd7a697STejun Heo 	static int printed_version;
669*1fd7a697STejun Heo 	struct ata_port_info *pinfo = &inic_port_info;
670*1fd7a697STejun Heo 	struct ata_probe_ent *probe_ent;
671*1fd7a697STejun Heo 	struct inic_host_priv *hpriv;
672*1fd7a697STejun Heo 	void __iomem *mmio_base;
673*1fd7a697STejun Heo 	int i, rc;
674*1fd7a697STejun Heo 
675*1fd7a697STejun Heo 	if (!printed_version++)
676*1fd7a697STejun Heo 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
677*1fd7a697STejun Heo 
678*1fd7a697STejun Heo 	rc = pci_enable_device(pdev);
679*1fd7a697STejun Heo 	if (rc)
680*1fd7a697STejun Heo 		return rc;
681*1fd7a697STejun Heo 
682*1fd7a697STejun Heo 	rc = pci_request_regions(pdev, DRV_NAME);
683*1fd7a697STejun Heo 	if (rc)
684*1fd7a697STejun Heo 		goto err_out;
685*1fd7a697STejun Heo 
686*1fd7a697STejun Heo 	rc = -ENOMEM;
687*1fd7a697STejun Heo 	mmio_base = pci_iomap(pdev, MMIO_BAR, 0);
688*1fd7a697STejun Heo 	if (!mmio_base)
689*1fd7a697STejun Heo 		goto err_out_regions;
690*1fd7a697STejun Heo 
691*1fd7a697STejun Heo 	/* Set dma_mask.  This devices doesn't support 64bit addressing. */
692*1fd7a697STejun Heo 	rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
693*1fd7a697STejun Heo 	if (rc) {
694*1fd7a697STejun Heo 		dev_printk(KERN_ERR, &pdev->dev,
695*1fd7a697STejun Heo 			   "32-bit DMA enable failed\n");
696*1fd7a697STejun Heo 		goto err_out_map;
697*1fd7a697STejun Heo 	}
698*1fd7a697STejun Heo 
699*1fd7a697STejun Heo 	rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
700*1fd7a697STejun Heo 	if (rc) {
701*1fd7a697STejun Heo 		dev_printk(KERN_ERR, &pdev->dev,
702*1fd7a697STejun Heo 			   "32-bit consistent DMA enable failed\n");
703*1fd7a697STejun Heo 		goto err_out_map;
704*1fd7a697STejun Heo 	}
705*1fd7a697STejun Heo 
706*1fd7a697STejun Heo 	rc = -ENOMEM;
707*1fd7a697STejun Heo 	probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
708*1fd7a697STejun Heo 	if (!probe_ent)
709*1fd7a697STejun Heo 		goto err_out_map;
710*1fd7a697STejun Heo 
711*1fd7a697STejun Heo 	hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL);
712*1fd7a697STejun Heo 	if (!hpriv)
713*1fd7a697STejun Heo 		goto err_out_ent;
714*1fd7a697STejun Heo 
715*1fd7a697STejun Heo 	probe_ent->dev = &pdev->dev;
716*1fd7a697STejun Heo 	INIT_LIST_HEAD(&probe_ent->node);
717*1fd7a697STejun Heo 
718*1fd7a697STejun Heo 	probe_ent->sht			= pinfo->sht;
719*1fd7a697STejun Heo 	probe_ent->port_flags		= pinfo->flags;
720*1fd7a697STejun Heo 	probe_ent->pio_mask		= pinfo->pio_mask;
721*1fd7a697STejun Heo 	probe_ent->mwdma_mask		= pinfo->mwdma_mask;
722*1fd7a697STejun Heo 	probe_ent->udma_mask		= pinfo->udma_mask;
723*1fd7a697STejun Heo 	probe_ent->port_ops		= pinfo->port_ops;
724*1fd7a697STejun Heo 	probe_ent->n_ports		= NR_PORTS;
725*1fd7a697STejun Heo 
726*1fd7a697STejun Heo 	probe_ent->irq = pdev->irq;
727*1fd7a697STejun Heo 	probe_ent->irq_flags = SA_SHIRQ;
728*1fd7a697STejun Heo 
729*1fd7a697STejun Heo 	probe_ent->mmio_base = mmio_base;
730*1fd7a697STejun Heo 
731*1fd7a697STejun Heo 	for (i = 0; i < NR_PORTS; i++) {
732*1fd7a697STejun Heo 		struct ata_ioports *port = &probe_ent->port[i];
733*1fd7a697STejun Heo 		unsigned long port_base =
734*1fd7a697STejun Heo 			(unsigned long)mmio_base + i * PORT_SIZE;
735*1fd7a697STejun Heo 
736*1fd7a697STejun Heo 		port->cmd_addr = pci_resource_start(pdev, 2 * i);
737*1fd7a697STejun Heo 		port->altstatus_addr =
738*1fd7a697STejun Heo 		port->ctl_addr =
739*1fd7a697STejun Heo 			pci_resource_start(pdev, 2 * i + 1) | ATA_PCI_CTL_OFS;
740*1fd7a697STejun Heo 		port->scr_addr = port_base + PORT_SCR;
741*1fd7a697STejun Heo 
742*1fd7a697STejun Heo 		ata_std_ports(port);
743*1fd7a697STejun Heo 	}
744*1fd7a697STejun Heo 
745*1fd7a697STejun Heo 	probe_ent->private_data = hpriv;
746*1fd7a697STejun Heo 	hpriv->cached_hctl = readw(mmio_base + HOST_CTL);
747*1fd7a697STejun Heo 
748*1fd7a697STejun Heo 	rc = init_controller(mmio_base, hpriv->cached_hctl);
749*1fd7a697STejun Heo 	if (rc) {
750*1fd7a697STejun Heo 		dev_printk(KERN_ERR, &pdev->dev,
751*1fd7a697STejun Heo 			   "failed to initialize controller\n");
752*1fd7a697STejun Heo 		goto err_out_hpriv;
753*1fd7a697STejun Heo 	}
754*1fd7a697STejun Heo 
755*1fd7a697STejun Heo 	pci_set_master(pdev);
756*1fd7a697STejun Heo 
757*1fd7a697STejun Heo 	rc = -ENODEV;
758*1fd7a697STejun Heo 	if (!ata_device_add(probe_ent))
759*1fd7a697STejun Heo 		goto err_out_hpriv;
760*1fd7a697STejun Heo 
761*1fd7a697STejun Heo 	kfree(probe_ent);
762*1fd7a697STejun Heo 
763*1fd7a697STejun Heo 	return 0;
764*1fd7a697STejun Heo 
765*1fd7a697STejun Heo  err_out_hpriv:
766*1fd7a697STejun Heo 	kfree(hpriv);
767*1fd7a697STejun Heo  err_out_ent:
768*1fd7a697STejun Heo 	kfree(probe_ent);
769*1fd7a697STejun Heo  err_out_map:
770*1fd7a697STejun Heo 	pci_iounmap(pdev, mmio_base);
771*1fd7a697STejun Heo  err_out_regions:
772*1fd7a697STejun Heo 	pci_release_regions(pdev);
773*1fd7a697STejun Heo  err_out:
774*1fd7a697STejun Heo 	pci_disable_device(pdev);
775*1fd7a697STejun Heo 	return rc;
776*1fd7a697STejun Heo }
777*1fd7a697STejun Heo 
778*1fd7a697STejun Heo static const struct pci_device_id inic_pci_tbl[] = {
779*1fd7a697STejun Heo 	{ PCI_VDEVICE(INIT, 0x1622), },
780*1fd7a697STejun Heo 	{ },
781*1fd7a697STejun Heo };
782*1fd7a697STejun Heo 
783*1fd7a697STejun Heo static struct pci_driver inic_pci_driver = {
784*1fd7a697STejun Heo 	.name 		= DRV_NAME,
785*1fd7a697STejun Heo 	.id_table	= inic_pci_tbl,
786*1fd7a697STejun Heo 	.suspend	= ata_pci_device_suspend,
787*1fd7a697STejun Heo 	.resume		= inic_pci_device_resume,
788*1fd7a697STejun Heo 	.probe 		= inic_init_one,
789*1fd7a697STejun Heo 	.remove		= ata_pci_remove_one,
790*1fd7a697STejun Heo };
791*1fd7a697STejun Heo 
792*1fd7a697STejun Heo static int __init inic_init(void)
793*1fd7a697STejun Heo {
794*1fd7a697STejun Heo 	return pci_register_driver(&inic_pci_driver);
795*1fd7a697STejun Heo }
796*1fd7a697STejun Heo 
797*1fd7a697STejun Heo static void __exit inic_exit(void)
798*1fd7a697STejun Heo {
799*1fd7a697STejun Heo 	pci_unregister_driver(&inic_pci_driver);
800*1fd7a697STejun Heo }
801*1fd7a697STejun Heo 
802*1fd7a697STejun Heo MODULE_AUTHOR("Tejun Heo");
803*1fd7a697STejun Heo MODULE_DESCRIPTION("low-level driver for Initio 162x SATA");
804*1fd7a697STejun Heo MODULE_LICENSE("GPL v2");
805*1fd7a697STejun Heo MODULE_DEVICE_TABLE(pci, inic_pci_tbl);
806*1fd7a697STejun Heo MODULE_VERSION(DRV_VERSION);
807*1fd7a697STejun Heo 
808*1fd7a697STejun Heo module_init(inic_init);
809*1fd7a697STejun Heo module_exit(inic_exit);
810