xref: /openbmc/linux/drivers/ata/sata_mv.c (revision c6fd280766a050b13360d7c2d59a3d6bd3a27d9a)
1*c6fd2807SJeff Garzik /*
2*c6fd2807SJeff Garzik  * sata_mv.c - Marvell SATA support
3*c6fd2807SJeff Garzik  *
4*c6fd2807SJeff Garzik  * Copyright 2005: EMC Corporation, all rights reserved.
5*c6fd2807SJeff Garzik  * Copyright 2005 Red Hat, Inc.  All rights reserved.
6*c6fd2807SJeff Garzik  *
7*c6fd2807SJeff Garzik  * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8*c6fd2807SJeff Garzik  *
9*c6fd2807SJeff Garzik  * This program is free software; you can redistribute it and/or modify
10*c6fd2807SJeff Garzik  * it under the terms of the GNU General Public License as published by
11*c6fd2807SJeff Garzik  * the Free Software Foundation; version 2 of the License.
12*c6fd2807SJeff Garzik  *
13*c6fd2807SJeff Garzik  * This program is distributed in the hope that it will be useful,
14*c6fd2807SJeff Garzik  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15*c6fd2807SJeff Garzik  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16*c6fd2807SJeff Garzik  * GNU General Public License for more details.
17*c6fd2807SJeff Garzik  *
18*c6fd2807SJeff Garzik  * You should have received a copy of the GNU General Public License
19*c6fd2807SJeff Garzik  * along with this program; if not, write to the Free Software
20*c6fd2807SJeff Garzik  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21*c6fd2807SJeff Garzik  *
22*c6fd2807SJeff Garzik  */
23*c6fd2807SJeff Garzik 
24*c6fd2807SJeff Garzik #include <linux/kernel.h>
25*c6fd2807SJeff Garzik #include <linux/module.h>
26*c6fd2807SJeff Garzik #include <linux/pci.h>
27*c6fd2807SJeff Garzik #include <linux/init.h>
28*c6fd2807SJeff Garzik #include <linux/blkdev.h>
29*c6fd2807SJeff Garzik #include <linux/delay.h>
30*c6fd2807SJeff Garzik #include <linux/interrupt.h>
31*c6fd2807SJeff Garzik #include <linux/sched.h>
32*c6fd2807SJeff Garzik #include <linux/dma-mapping.h>
33*c6fd2807SJeff Garzik #include <linux/device.h>
34*c6fd2807SJeff Garzik #include <scsi/scsi_host.h>
35*c6fd2807SJeff Garzik #include <scsi/scsi_cmnd.h>
36*c6fd2807SJeff Garzik #include <linux/libata.h>
37*c6fd2807SJeff Garzik #include <asm/io.h>
38*c6fd2807SJeff Garzik 
39*c6fd2807SJeff Garzik #define DRV_NAME	"sata_mv"
40*c6fd2807SJeff Garzik #define DRV_VERSION	"0.7"
41*c6fd2807SJeff Garzik 
42*c6fd2807SJeff Garzik enum {
43*c6fd2807SJeff Garzik 	/* BAR's are enumerated in terms of pci_resource_start() terms */
44*c6fd2807SJeff Garzik 	MV_PRIMARY_BAR		= 0,	/* offset 0x10: memory space */
45*c6fd2807SJeff Garzik 	MV_IO_BAR		= 2,	/* offset 0x18: IO space */
46*c6fd2807SJeff Garzik 	MV_MISC_BAR		= 3,	/* offset 0x1c: FLASH, NVRAM, SRAM */
47*c6fd2807SJeff Garzik 
48*c6fd2807SJeff Garzik 	MV_MAJOR_REG_AREA_SZ	= 0x10000,	/* 64KB */
49*c6fd2807SJeff Garzik 	MV_MINOR_REG_AREA_SZ	= 0x2000,	/* 8KB */
50*c6fd2807SJeff Garzik 
51*c6fd2807SJeff Garzik 	MV_PCI_REG_BASE		= 0,
52*c6fd2807SJeff Garzik 	MV_IRQ_COAL_REG_BASE	= 0x18000,	/* 6xxx part only */
53*c6fd2807SJeff Garzik 	MV_IRQ_COAL_CAUSE		= (MV_IRQ_COAL_REG_BASE + 0x08),
54*c6fd2807SJeff Garzik 	MV_IRQ_COAL_CAUSE_LO		= (MV_IRQ_COAL_REG_BASE + 0x88),
55*c6fd2807SJeff Garzik 	MV_IRQ_COAL_CAUSE_HI		= (MV_IRQ_COAL_REG_BASE + 0x8c),
56*c6fd2807SJeff Garzik 	MV_IRQ_COAL_THRESHOLD		= (MV_IRQ_COAL_REG_BASE + 0xcc),
57*c6fd2807SJeff Garzik 	MV_IRQ_COAL_TIME_THRESHOLD	= (MV_IRQ_COAL_REG_BASE + 0xd0),
58*c6fd2807SJeff Garzik 
59*c6fd2807SJeff Garzik 	MV_SATAHC0_REG_BASE	= 0x20000,
60*c6fd2807SJeff Garzik 	MV_FLASH_CTL		= 0x1046c,
61*c6fd2807SJeff Garzik 	MV_GPIO_PORT_CTL	= 0x104f0,
62*c6fd2807SJeff Garzik 	MV_RESET_CFG		= 0x180d8,
63*c6fd2807SJeff Garzik 
64*c6fd2807SJeff Garzik 	MV_PCI_REG_SZ		= MV_MAJOR_REG_AREA_SZ,
65*c6fd2807SJeff Garzik 	MV_SATAHC_REG_SZ	= MV_MAJOR_REG_AREA_SZ,
66*c6fd2807SJeff Garzik 	MV_SATAHC_ARBTR_REG_SZ	= MV_MINOR_REG_AREA_SZ,		/* arbiter */
67*c6fd2807SJeff Garzik 	MV_PORT_REG_SZ		= MV_MINOR_REG_AREA_SZ,
68*c6fd2807SJeff Garzik 
69*c6fd2807SJeff Garzik 	MV_USE_Q_DEPTH		= ATA_DEF_QUEUE,
70*c6fd2807SJeff Garzik 
71*c6fd2807SJeff Garzik 	MV_MAX_Q_DEPTH		= 32,
72*c6fd2807SJeff Garzik 	MV_MAX_Q_DEPTH_MASK	= MV_MAX_Q_DEPTH - 1,
73*c6fd2807SJeff Garzik 
74*c6fd2807SJeff Garzik 	/* CRQB needs alignment on a 1KB boundary. Size == 1KB
75*c6fd2807SJeff Garzik 	 * CRPB needs alignment on a 256B boundary. Size == 256B
76*c6fd2807SJeff Garzik 	 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
77*c6fd2807SJeff Garzik 	 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
78*c6fd2807SJeff Garzik 	 */
79*c6fd2807SJeff Garzik 	MV_CRQB_Q_SZ		= (32 * MV_MAX_Q_DEPTH),
80*c6fd2807SJeff Garzik 	MV_CRPB_Q_SZ		= (8 * MV_MAX_Q_DEPTH),
81*c6fd2807SJeff Garzik 	MV_MAX_SG_CT		= 176,
82*c6fd2807SJeff Garzik 	MV_SG_TBL_SZ		= (16 * MV_MAX_SG_CT),
83*c6fd2807SJeff Garzik 	MV_PORT_PRIV_DMA_SZ	= (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
84*c6fd2807SJeff Garzik 
85*c6fd2807SJeff Garzik 	MV_PORTS_PER_HC		= 4,
86*c6fd2807SJeff Garzik 	/* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
87*c6fd2807SJeff Garzik 	MV_PORT_HC_SHIFT	= 2,
88*c6fd2807SJeff Garzik 	/* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
89*c6fd2807SJeff Garzik 	MV_PORT_MASK		= 3,
90*c6fd2807SJeff Garzik 
91*c6fd2807SJeff Garzik 	/* Host Flags */
92*c6fd2807SJeff Garzik 	MV_FLAG_DUAL_HC		= (1 << 30),  /* two SATA Host Controllers */
93*c6fd2807SJeff Garzik 	MV_FLAG_IRQ_COALESCE	= (1 << 29),  /* IRQ coalescing capability */
94*c6fd2807SJeff Garzik 	MV_COMMON_FLAGS		= (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
95*c6fd2807SJeff Garzik 				   ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
96*c6fd2807SJeff Garzik 				   ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING),
97*c6fd2807SJeff Garzik 	MV_6XXX_FLAGS		= MV_FLAG_IRQ_COALESCE,
98*c6fd2807SJeff Garzik 
99*c6fd2807SJeff Garzik 	CRQB_FLAG_READ		= (1 << 0),
100*c6fd2807SJeff Garzik 	CRQB_TAG_SHIFT		= 1,
101*c6fd2807SJeff Garzik 	CRQB_CMD_ADDR_SHIFT	= 8,
102*c6fd2807SJeff Garzik 	CRQB_CMD_CS		= (0x2 << 11),
103*c6fd2807SJeff Garzik 	CRQB_CMD_LAST		= (1 << 15),
104*c6fd2807SJeff Garzik 
105*c6fd2807SJeff Garzik 	CRPB_FLAG_STATUS_SHIFT	= 8,
106*c6fd2807SJeff Garzik 
107*c6fd2807SJeff Garzik 	EPRD_FLAG_END_OF_TBL	= (1 << 31),
108*c6fd2807SJeff Garzik 
109*c6fd2807SJeff Garzik 	/* PCI interface registers */
110*c6fd2807SJeff Garzik 
111*c6fd2807SJeff Garzik 	PCI_COMMAND_OFS		= 0xc00,
112*c6fd2807SJeff Garzik 
113*c6fd2807SJeff Garzik 	PCI_MAIN_CMD_STS_OFS	= 0xd30,
114*c6fd2807SJeff Garzik 	STOP_PCI_MASTER		= (1 << 2),
115*c6fd2807SJeff Garzik 	PCI_MASTER_EMPTY	= (1 << 3),
116*c6fd2807SJeff Garzik 	GLOB_SFT_RST		= (1 << 4),
117*c6fd2807SJeff Garzik 
118*c6fd2807SJeff Garzik 	MV_PCI_MODE		= 0xd00,
119*c6fd2807SJeff Garzik 	MV_PCI_EXP_ROM_BAR_CTL	= 0xd2c,
120*c6fd2807SJeff Garzik 	MV_PCI_DISC_TIMER	= 0xd04,
121*c6fd2807SJeff Garzik 	MV_PCI_MSI_TRIGGER	= 0xc38,
122*c6fd2807SJeff Garzik 	MV_PCI_SERR_MASK	= 0xc28,
123*c6fd2807SJeff Garzik 	MV_PCI_XBAR_TMOUT	= 0x1d04,
124*c6fd2807SJeff Garzik 	MV_PCI_ERR_LOW_ADDRESS	= 0x1d40,
125*c6fd2807SJeff Garzik 	MV_PCI_ERR_HIGH_ADDRESS	= 0x1d44,
126*c6fd2807SJeff Garzik 	MV_PCI_ERR_ATTRIBUTE	= 0x1d48,
127*c6fd2807SJeff Garzik 	MV_PCI_ERR_COMMAND	= 0x1d50,
128*c6fd2807SJeff Garzik 
129*c6fd2807SJeff Garzik 	PCI_IRQ_CAUSE_OFS		= 0x1d58,
130*c6fd2807SJeff Garzik 	PCI_IRQ_MASK_OFS		= 0x1d5c,
131*c6fd2807SJeff Garzik 	PCI_UNMASK_ALL_IRQS	= 0x7fffff,	/* bits 22-0 */
132*c6fd2807SJeff Garzik 
133*c6fd2807SJeff Garzik 	HC_MAIN_IRQ_CAUSE_OFS	= 0x1d60,
134*c6fd2807SJeff Garzik 	HC_MAIN_IRQ_MASK_OFS	= 0x1d64,
135*c6fd2807SJeff Garzik 	PORT0_ERR		= (1 << 0),	/* shift by port # */
136*c6fd2807SJeff Garzik 	PORT0_DONE		= (1 << 1),	/* shift by port # */
137*c6fd2807SJeff Garzik 	HC0_IRQ_PEND		= 0x1ff,	/* bits 0-8 = HC0's ports */
138*c6fd2807SJeff Garzik 	HC_SHIFT		= 9,		/* bits 9-17 = HC1's ports */
139*c6fd2807SJeff Garzik 	PCI_ERR			= (1 << 18),
140*c6fd2807SJeff Garzik 	TRAN_LO_DONE		= (1 << 19),	/* 6xxx: IRQ coalescing */
141*c6fd2807SJeff Garzik 	TRAN_HI_DONE		= (1 << 20),	/* 6xxx: IRQ coalescing */
142*c6fd2807SJeff Garzik 	PORTS_0_7_COAL_DONE	= (1 << 21),	/* 6xxx: IRQ coalescing */
143*c6fd2807SJeff Garzik 	GPIO_INT		= (1 << 22),
144*c6fd2807SJeff Garzik 	SELF_INT		= (1 << 23),
145*c6fd2807SJeff Garzik 	TWSI_INT		= (1 << 24),
146*c6fd2807SJeff Garzik 	HC_MAIN_RSVD		= (0x7f << 25),	/* bits 31-25 */
147*c6fd2807SJeff Garzik 	HC_MAIN_MASKED_IRQS	= (TRAN_LO_DONE | TRAN_HI_DONE |
148*c6fd2807SJeff Garzik 				   PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
149*c6fd2807SJeff Garzik 				   HC_MAIN_RSVD),
150*c6fd2807SJeff Garzik 
151*c6fd2807SJeff Garzik 	/* SATAHC registers */
152*c6fd2807SJeff Garzik 	HC_CFG_OFS		= 0,
153*c6fd2807SJeff Garzik 
154*c6fd2807SJeff Garzik 	HC_IRQ_CAUSE_OFS	= 0x14,
155*c6fd2807SJeff Garzik 	CRPB_DMA_DONE		= (1 << 0),	/* shift by port # */
156*c6fd2807SJeff Garzik 	HC_IRQ_COAL		= (1 << 4),	/* IRQ coalescing */
157*c6fd2807SJeff Garzik 	DEV_IRQ			= (1 << 8),	/* shift by port # */
158*c6fd2807SJeff Garzik 
159*c6fd2807SJeff Garzik 	/* Shadow block registers */
160*c6fd2807SJeff Garzik 	SHD_BLK_OFS		= 0x100,
161*c6fd2807SJeff Garzik 	SHD_CTL_AST_OFS		= 0x20,		/* ofs from SHD_BLK_OFS */
162*c6fd2807SJeff Garzik 
163*c6fd2807SJeff Garzik 	/* SATA registers */
164*c6fd2807SJeff Garzik 	SATA_STATUS_OFS		= 0x300,  /* ctrl, err regs follow status */
165*c6fd2807SJeff Garzik 	SATA_ACTIVE_OFS		= 0x350,
166*c6fd2807SJeff Garzik 	PHY_MODE3		= 0x310,
167*c6fd2807SJeff Garzik 	PHY_MODE4		= 0x314,
168*c6fd2807SJeff Garzik 	PHY_MODE2		= 0x330,
169*c6fd2807SJeff Garzik 	MV5_PHY_MODE		= 0x74,
170*c6fd2807SJeff Garzik 	MV5_LT_MODE		= 0x30,
171*c6fd2807SJeff Garzik 	MV5_PHY_CTL		= 0x0C,
172*c6fd2807SJeff Garzik 	SATA_INTERFACE_CTL	= 0x050,
173*c6fd2807SJeff Garzik 
174*c6fd2807SJeff Garzik 	MV_M2_PREAMP_MASK	= 0x7e0,
175*c6fd2807SJeff Garzik 
176*c6fd2807SJeff Garzik 	/* Port registers */
177*c6fd2807SJeff Garzik 	EDMA_CFG_OFS		= 0,
178*c6fd2807SJeff Garzik 	EDMA_CFG_Q_DEPTH	= 0,			/* queueing disabled */
179*c6fd2807SJeff Garzik 	EDMA_CFG_NCQ		= (1 << 5),
180*c6fd2807SJeff Garzik 	EDMA_CFG_NCQ_GO_ON_ERR	= (1 << 14),		/* continue on error */
181*c6fd2807SJeff Garzik 	EDMA_CFG_RD_BRST_EXT	= (1 << 11),		/* read burst 512B */
182*c6fd2807SJeff Garzik 	EDMA_CFG_WR_BUFF_LEN	= (1 << 13),		/* write buffer 512B */
183*c6fd2807SJeff Garzik 
184*c6fd2807SJeff Garzik 	EDMA_ERR_IRQ_CAUSE_OFS	= 0x8,
185*c6fd2807SJeff Garzik 	EDMA_ERR_IRQ_MASK_OFS	= 0xc,
186*c6fd2807SJeff Garzik 	EDMA_ERR_D_PAR		= (1 << 0),
187*c6fd2807SJeff Garzik 	EDMA_ERR_PRD_PAR	= (1 << 1),
188*c6fd2807SJeff Garzik 	EDMA_ERR_DEV		= (1 << 2),
189*c6fd2807SJeff Garzik 	EDMA_ERR_DEV_DCON	= (1 << 3),
190*c6fd2807SJeff Garzik 	EDMA_ERR_DEV_CON	= (1 << 4),
191*c6fd2807SJeff Garzik 	EDMA_ERR_SERR		= (1 << 5),
192*c6fd2807SJeff Garzik 	EDMA_ERR_SELF_DIS	= (1 << 7),
193*c6fd2807SJeff Garzik 	EDMA_ERR_BIST_ASYNC	= (1 << 8),
194*c6fd2807SJeff Garzik 	EDMA_ERR_CRBQ_PAR	= (1 << 9),
195*c6fd2807SJeff Garzik 	EDMA_ERR_CRPB_PAR	= (1 << 10),
196*c6fd2807SJeff Garzik 	EDMA_ERR_INTRL_PAR	= (1 << 11),
197*c6fd2807SJeff Garzik 	EDMA_ERR_IORDY		= (1 << 12),
198*c6fd2807SJeff Garzik 	EDMA_ERR_LNK_CTRL_RX	= (0xf << 13),
199*c6fd2807SJeff Garzik 	EDMA_ERR_LNK_CTRL_RX_2	= (1 << 15),
200*c6fd2807SJeff Garzik 	EDMA_ERR_LNK_DATA_RX	= (0xf << 17),
201*c6fd2807SJeff Garzik 	EDMA_ERR_LNK_CTRL_TX	= (0x1f << 21),
202*c6fd2807SJeff Garzik 	EDMA_ERR_LNK_DATA_TX	= (0x1f << 26),
203*c6fd2807SJeff Garzik 	EDMA_ERR_TRANS_PROTO	= (1 << 31),
204*c6fd2807SJeff Garzik 	EDMA_ERR_FATAL		= (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
205*c6fd2807SJeff Garzik 				   EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR |
206*c6fd2807SJeff Garzik 				   EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR |
207*c6fd2807SJeff Garzik 				   EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 |
208*c6fd2807SJeff Garzik 				   EDMA_ERR_LNK_DATA_RX |
209*c6fd2807SJeff Garzik 				   EDMA_ERR_LNK_DATA_TX |
210*c6fd2807SJeff Garzik 				   EDMA_ERR_TRANS_PROTO),
211*c6fd2807SJeff Garzik 
212*c6fd2807SJeff Garzik 	EDMA_REQ_Q_BASE_HI_OFS	= 0x10,
213*c6fd2807SJeff Garzik 	EDMA_REQ_Q_IN_PTR_OFS	= 0x14,		/* also contains BASE_LO */
214*c6fd2807SJeff Garzik 
215*c6fd2807SJeff Garzik 	EDMA_REQ_Q_OUT_PTR_OFS	= 0x18,
216*c6fd2807SJeff Garzik 	EDMA_REQ_Q_PTR_SHIFT	= 5,
217*c6fd2807SJeff Garzik 
218*c6fd2807SJeff Garzik 	EDMA_RSP_Q_BASE_HI_OFS	= 0x1c,
219*c6fd2807SJeff Garzik 	EDMA_RSP_Q_IN_PTR_OFS	= 0x20,
220*c6fd2807SJeff Garzik 	EDMA_RSP_Q_OUT_PTR_OFS	= 0x24,		/* also contains BASE_LO */
221*c6fd2807SJeff Garzik 	EDMA_RSP_Q_PTR_SHIFT	= 3,
222*c6fd2807SJeff Garzik 
223*c6fd2807SJeff Garzik 	EDMA_CMD_OFS		= 0x28,
224*c6fd2807SJeff Garzik 	EDMA_EN			= (1 << 0),
225*c6fd2807SJeff Garzik 	EDMA_DS			= (1 << 1),
226*c6fd2807SJeff Garzik 	ATA_RST			= (1 << 2),
227*c6fd2807SJeff Garzik 
228*c6fd2807SJeff Garzik 	EDMA_IORDY_TMOUT	= 0x34,
229*c6fd2807SJeff Garzik 	EDMA_ARB_CFG		= 0x38,
230*c6fd2807SJeff Garzik 
231*c6fd2807SJeff Garzik 	/* Host private flags (hp_flags) */
232*c6fd2807SJeff Garzik 	MV_HP_FLAG_MSI		= (1 << 0),
233*c6fd2807SJeff Garzik 	MV_HP_ERRATA_50XXB0	= (1 << 1),
234*c6fd2807SJeff Garzik 	MV_HP_ERRATA_50XXB2	= (1 << 2),
235*c6fd2807SJeff Garzik 	MV_HP_ERRATA_60X1B2	= (1 << 3),
236*c6fd2807SJeff Garzik 	MV_HP_ERRATA_60X1C0	= (1 << 4),
237*c6fd2807SJeff Garzik 	MV_HP_ERRATA_XX42A0	= (1 << 5),
238*c6fd2807SJeff Garzik 	MV_HP_50XX		= (1 << 6),
239*c6fd2807SJeff Garzik 	MV_HP_GEN_IIE		= (1 << 7),
240*c6fd2807SJeff Garzik 
241*c6fd2807SJeff Garzik 	/* Port private flags (pp_flags) */
242*c6fd2807SJeff Garzik 	MV_PP_FLAG_EDMA_EN	= (1 << 0),
243*c6fd2807SJeff Garzik 	MV_PP_FLAG_EDMA_DS_ACT	= (1 << 1),
244*c6fd2807SJeff Garzik };
245*c6fd2807SJeff Garzik 
246*c6fd2807SJeff Garzik #define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
247*c6fd2807SJeff Garzik #define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
248*c6fd2807SJeff Garzik #define IS_GEN_I(hpriv) IS_50XX(hpriv)
249*c6fd2807SJeff Garzik #define IS_GEN_II(hpriv) IS_60XX(hpriv)
250*c6fd2807SJeff Garzik #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
251*c6fd2807SJeff Garzik 
252*c6fd2807SJeff Garzik enum {
253*c6fd2807SJeff Garzik 	/* Our DMA boundary is determined by an ePRD being unable to handle
254*c6fd2807SJeff Garzik 	 * anything larger than 64KB
255*c6fd2807SJeff Garzik 	 */
256*c6fd2807SJeff Garzik 	MV_DMA_BOUNDARY		= 0xffffU,
257*c6fd2807SJeff Garzik 
258*c6fd2807SJeff Garzik 	EDMA_REQ_Q_BASE_LO_MASK	= 0xfffffc00U,
259*c6fd2807SJeff Garzik 
260*c6fd2807SJeff Garzik 	EDMA_RSP_Q_BASE_LO_MASK	= 0xffffff00U,
261*c6fd2807SJeff Garzik };
262*c6fd2807SJeff Garzik 
263*c6fd2807SJeff Garzik enum chip_type {
264*c6fd2807SJeff Garzik 	chip_504x,
265*c6fd2807SJeff Garzik 	chip_508x,
266*c6fd2807SJeff Garzik 	chip_5080,
267*c6fd2807SJeff Garzik 	chip_604x,
268*c6fd2807SJeff Garzik 	chip_608x,
269*c6fd2807SJeff Garzik 	chip_6042,
270*c6fd2807SJeff Garzik 	chip_7042,
271*c6fd2807SJeff Garzik };
272*c6fd2807SJeff Garzik 
273*c6fd2807SJeff Garzik /* Command ReQuest Block: 32B */
274*c6fd2807SJeff Garzik struct mv_crqb {
275*c6fd2807SJeff Garzik 	__le32			sg_addr;
276*c6fd2807SJeff Garzik 	__le32			sg_addr_hi;
277*c6fd2807SJeff Garzik 	__le16			ctrl_flags;
278*c6fd2807SJeff Garzik 	__le16			ata_cmd[11];
279*c6fd2807SJeff Garzik };
280*c6fd2807SJeff Garzik 
281*c6fd2807SJeff Garzik struct mv_crqb_iie {
282*c6fd2807SJeff Garzik 	__le32			addr;
283*c6fd2807SJeff Garzik 	__le32			addr_hi;
284*c6fd2807SJeff Garzik 	__le32			flags;
285*c6fd2807SJeff Garzik 	__le32			len;
286*c6fd2807SJeff Garzik 	__le32			ata_cmd[4];
287*c6fd2807SJeff Garzik };
288*c6fd2807SJeff Garzik 
289*c6fd2807SJeff Garzik /* Command ResPonse Block: 8B */
290*c6fd2807SJeff Garzik struct mv_crpb {
291*c6fd2807SJeff Garzik 	__le16			id;
292*c6fd2807SJeff Garzik 	__le16			flags;
293*c6fd2807SJeff Garzik 	__le32			tmstmp;
294*c6fd2807SJeff Garzik };
295*c6fd2807SJeff Garzik 
296*c6fd2807SJeff Garzik /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
297*c6fd2807SJeff Garzik struct mv_sg {
298*c6fd2807SJeff Garzik 	__le32			addr;
299*c6fd2807SJeff Garzik 	__le32			flags_size;
300*c6fd2807SJeff Garzik 	__le32			addr_hi;
301*c6fd2807SJeff Garzik 	__le32			reserved;
302*c6fd2807SJeff Garzik };
303*c6fd2807SJeff Garzik 
304*c6fd2807SJeff Garzik struct mv_port_priv {
305*c6fd2807SJeff Garzik 	struct mv_crqb		*crqb;
306*c6fd2807SJeff Garzik 	dma_addr_t		crqb_dma;
307*c6fd2807SJeff Garzik 	struct mv_crpb		*crpb;
308*c6fd2807SJeff Garzik 	dma_addr_t		crpb_dma;
309*c6fd2807SJeff Garzik 	struct mv_sg		*sg_tbl;
310*c6fd2807SJeff Garzik 	dma_addr_t		sg_tbl_dma;
311*c6fd2807SJeff Garzik 	u32			pp_flags;
312*c6fd2807SJeff Garzik };
313*c6fd2807SJeff Garzik 
314*c6fd2807SJeff Garzik struct mv_port_signal {
315*c6fd2807SJeff Garzik 	u32			amps;
316*c6fd2807SJeff Garzik 	u32			pre;
317*c6fd2807SJeff Garzik };
318*c6fd2807SJeff Garzik 
319*c6fd2807SJeff Garzik struct mv_host_priv;
320*c6fd2807SJeff Garzik struct mv_hw_ops {
321*c6fd2807SJeff Garzik 	void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
322*c6fd2807SJeff Garzik 			   unsigned int port);
323*c6fd2807SJeff Garzik 	void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
324*c6fd2807SJeff Garzik 	void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
325*c6fd2807SJeff Garzik 			   void __iomem *mmio);
326*c6fd2807SJeff Garzik 	int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
327*c6fd2807SJeff Garzik 			unsigned int n_hc);
328*c6fd2807SJeff Garzik 	void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
329*c6fd2807SJeff Garzik 	void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
330*c6fd2807SJeff Garzik };
331*c6fd2807SJeff Garzik 
332*c6fd2807SJeff Garzik struct mv_host_priv {
333*c6fd2807SJeff Garzik 	u32			hp_flags;
334*c6fd2807SJeff Garzik 	struct mv_port_signal	signal[8];
335*c6fd2807SJeff Garzik 	const struct mv_hw_ops	*ops;
336*c6fd2807SJeff Garzik };
337*c6fd2807SJeff Garzik 
338*c6fd2807SJeff Garzik static void mv_irq_clear(struct ata_port *ap);
339*c6fd2807SJeff Garzik static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
340*c6fd2807SJeff Garzik static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
341*c6fd2807SJeff Garzik static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
342*c6fd2807SJeff Garzik static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
343*c6fd2807SJeff Garzik static void mv_phy_reset(struct ata_port *ap);
344*c6fd2807SJeff Garzik static void __mv_phy_reset(struct ata_port *ap, int can_sleep);
345*c6fd2807SJeff Garzik static void mv_host_stop(struct ata_host_set *host_set);
346*c6fd2807SJeff Garzik static int mv_port_start(struct ata_port *ap);
347*c6fd2807SJeff Garzik static void mv_port_stop(struct ata_port *ap);
348*c6fd2807SJeff Garzik static void mv_qc_prep(struct ata_queued_cmd *qc);
349*c6fd2807SJeff Garzik static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
350*c6fd2807SJeff Garzik static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
351*c6fd2807SJeff Garzik static irqreturn_t mv_interrupt(int irq, void *dev_instance,
352*c6fd2807SJeff Garzik 				struct pt_regs *regs);
353*c6fd2807SJeff Garzik static void mv_eng_timeout(struct ata_port *ap);
354*c6fd2807SJeff Garzik static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
355*c6fd2807SJeff Garzik 
356*c6fd2807SJeff Garzik static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
357*c6fd2807SJeff Garzik 			   unsigned int port);
358*c6fd2807SJeff Garzik static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
359*c6fd2807SJeff Garzik static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
360*c6fd2807SJeff Garzik 			   void __iomem *mmio);
361*c6fd2807SJeff Garzik static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
362*c6fd2807SJeff Garzik 			unsigned int n_hc);
363*c6fd2807SJeff Garzik static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
364*c6fd2807SJeff Garzik static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
365*c6fd2807SJeff Garzik 
366*c6fd2807SJeff Garzik static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
367*c6fd2807SJeff Garzik 			   unsigned int port);
368*c6fd2807SJeff Garzik static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
369*c6fd2807SJeff Garzik static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
370*c6fd2807SJeff Garzik 			   void __iomem *mmio);
371*c6fd2807SJeff Garzik static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
372*c6fd2807SJeff Garzik 			unsigned int n_hc);
373*c6fd2807SJeff Garzik static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
374*c6fd2807SJeff Garzik static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
375*c6fd2807SJeff Garzik static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
376*c6fd2807SJeff Garzik 			     unsigned int port_no);
377*c6fd2807SJeff Garzik static void mv_stop_and_reset(struct ata_port *ap);
378*c6fd2807SJeff Garzik 
379*c6fd2807SJeff Garzik static struct scsi_host_template mv_sht = {
380*c6fd2807SJeff Garzik 	.module			= THIS_MODULE,
381*c6fd2807SJeff Garzik 	.name			= DRV_NAME,
382*c6fd2807SJeff Garzik 	.ioctl			= ata_scsi_ioctl,
383*c6fd2807SJeff Garzik 	.queuecommand		= ata_scsi_queuecmd,
384*c6fd2807SJeff Garzik 	.can_queue		= MV_USE_Q_DEPTH,
385*c6fd2807SJeff Garzik 	.this_id		= ATA_SHT_THIS_ID,
386*c6fd2807SJeff Garzik 	.sg_tablesize		= MV_MAX_SG_CT / 2,
387*c6fd2807SJeff Garzik 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
388*c6fd2807SJeff Garzik 	.emulated		= ATA_SHT_EMULATED,
389*c6fd2807SJeff Garzik 	.use_clustering		= ATA_SHT_USE_CLUSTERING,
390*c6fd2807SJeff Garzik 	.proc_name		= DRV_NAME,
391*c6fd2807SJeff Garzik 	.dma_boundary		= MV_DMA_BOUNDARY,
392*c6fd2807SJeff Garzik 	.slave_configure	= ata_scsi_slave_config,
393*c6fd2807SJeff Garzik 	.slave_destroy		= ata_scsi_slave_destroy,
394*c6fd2807SJeff Garzik 	.bios_param		= ata_std_bios_param,
395*c6fd2807SJeff Garzik };
396*c6fd2807SJeff Garzik 
397*c6fd2807SJeff Garzik static const struct ata_port_operations mv5_ops = {
398*c6fd2807SJeff Garzik 	.port_disable		= ata_port_disable,
399*c6fd2807SJeff Garzik 
400*c6fd2807SJeff Garzik 	.tf_load		= ata_tf_load,
401*c6fd2807SJeff Garzik 	.tf_read		= ata_tf_read,
402*c6fd2807SJeff Garzik 	.check_status		= ata_check_status,
403*c6fd2807SJeff Garzik 	.exec_command		= ata_exec_command,
404*c6fd2807SJeff Garzik 	.dev_select		= ata_std_dev_select,
405*c6fd2807SJeff Garzik 
406*c6fd2807SJeff Garzik 	.phy_reset		= mv_phy_reset,
407*c6fd2807SJeff Garzik 
408*c6fd2807SJeff Garzik 	.qc_prep		= mv_qc_prep,
409*c6fd2807SJeff Garzik 	.qc_issue		= mv_qc_issue,
410*c6fd2807SJeff Garzik 	.data_xfer		= ata_mmio_data_xfer,
411*c6fd2807SJeff Garzik 
412*c6fd2807SJeff Garzik 	.eng_timeout		= mv_eng_timeout,
413*c6fd2807SJeff Garzik 
414*c6fd2807SJeff Garzik 	.irq_handler		= mv_interrupt,
415*c6fd2807SJeff Garzik 	.irq_clear		= mv_irq_clear,
416*c6fd2807SJeff Garzik 
417*c6fd2807SJeff Garzik 	.scr_read		= mv5_scr_read,
418*c6fd2807SJeff Garzik 	.scr_write		= mv5_scr_write,
419*c6fd2807SJeff Garzik 
420*c6fd2807SJeff Garzik 	.port_start		= mv_port_start,
421*c6fd2807SJeff Garzik 	.port_stop		= mv_port_stop,
422*c6fd2807SJeff Garzik 	.host_stop		= mv_host_stop,
423*c6fd2807SJeff Garzik };
424*c6fd2807SJeff Garzik 
425*c6fd2807SJeff Garzik static const struct ata_port_operations mv6_ops = {
426*c6fd2807SJeff Garzik 	.port_disable		= ata_port_disable,
427*c6fd2807SJeff Garzik 
428*c6fd2807SJeff Garzik 	.tf_load		= ata_tf_load,
429*c6fd2807SJeff Garzik 	.tf_read		= ata_tf_read,
430*c6fd2807SJeff Garzik 	.check_status		= ata_check_status,
431*c6fd2807SJeff Garzik 	.exec_command		= ata_exec_command,
432*c6fd2807SJeff Garzik 	.dev_select		= ata_std_dev_select,
433*c6fd2807SJeff Garzik 
434*c6fd2807SJeff Garzik 	.phy_reset		= mv_phy_reset,
435*c6fd2807SJeff Garzik 
436*c6fd2807SJeff Garzik 	.qc_prep		= mv_qc_prep,
437*c6fd2807SJeff Garzik 	.qc_issue		= mv_qc_issue,
438*c6fd2807SJeff Garzik 	.data_xfer		= ata_mmio_data_xfer,
439*c6fd2807SJeff Garzik 
440*c6fd2807SJeff Garzik 	.eng_timeout		= mv_eng_timeout,
441*c6fd2807SJeff Garzik 
442*c6fd2807SJeff Garzik 	.irq_handler		= mv_interrupt,
443*c6fd2807SJeff Garzik 	.irq_clear		= mv_irq_clear,
444*c6fd2807SJeff Garzik 
445*c6fd2807SJeff Garzik 	.scr_read		= mv_scr_read,
446*c6fd2807SJeff Garzik 	.scr_write		= mv_scr_write,
447*c6fd2807SJeff Garzik 
448*c6fd2807SJeff Garzik 	.port_start		= mv_port_start,
449*c6fd2807SJeff Garzik 	.port_stop		= mv_port_stop,
450*c6fd2807SJeff Garzik 	.host_stop		= mv_host_stop,
451*c6fd2807SJeff Garzik };
452*c6fd2807SJeff Garzik 
453*c6fd2807SJeff Garzik static const struct ata_port_operations mv_iie_ops = {
454*c6fd2807SJeff Garzik 	.port_disable		= ata_port_disable,
455*c6fd2807SJeff Garzik 
456*c6fd2807SJeff Garzik 	.tf_load		= ata_tf_load,
457*c6fd2807SJeff Garzik 	.tf_read		= ata_tf_read,
458*c6fd2807SJeff Garzik 	.check_status		= ata_check_status,
459*c6fd2807SJeff Garzik 	.exec_command		= ata_exec_command,
460*c6fd2807SJeff Garzik 	.dev_select		= ata_std_dev_select,
461*c6fd2807SJeff Garzik 
462*c6fd2807SJeff Garzik 	.phy_reset		= mv_phy_reset,
463*c6fd2807SJeff Garzik 
464*c6fd2807SJeff Garzik 	.qc_prep		= mv_qc_prep_iie,
465*c6fd2807SJeff Garzik 	.qc_issue		= mv_qc_issue,
466*c6fd2807SJeff Garzik 
467*c6fd2807SJeff Garzik 	.eng_timeout		= mv_eng_timeout,
468*c6fd2807SJeff Garzik 
469*c6fd2807SJeff Garzik 	.irq_handler		= mv_interrupt,
470*c6fd2807SJeff Garzik 	.irq_clear		= mv_irq_clear,
471*c6fd2807SJeff Garzik 
472*c6fd2807SJeff Garzik 	.scr_read		= mv_scr_read,
473*c6fd2807SJeff Garzik 	.scr_write		= mv_scr_write,
474*c6fd2807SJeff Garzik 
475*c6fd2807SJeff Garzik 	.port_start		= mv_port_start,
476*c6fd2807SJeff Garzik 	.port_stop		= mv_port_stop,
477*c6fd2807SJeff Garzik 	.host_stop		= mv_host_stop,
478*c6fd2807SJeff Garzik };
479*c6fd2807SJeff Garzik 
480*c6fd2807SJeff Garzik static const struct ata_port_info mv_port_info[] = {
481*c6fd2807SJeff Garzik 	{  /* chip_504x */
482*c6fd2807SJeff Garzik 		.sht		= &mv_sht,
483*c6fd2807SJeff Garzik 		.host_flags	= MV_COMMON_FLAGS,
484*c6fd2807SJeff Garzik 		.pio_mask	= 0x1f,	/* pio0-4 */
485*c6fd2807SJeff Garzik 		.udma_mask	= 0x7f,	/* udma0-6 */
486*c6fd2807SJeff Garzik 		.port_ops	= &mv5_ops,
487*c6fd2807SJeff Garzik 	},
488*c6fd2807SJeff Garzik 	{  /* chip_508x */
489*c6fd2807SJeff Garzik 		.sht		= &mv_sht,
490*c6fd2807SJeff Garzik 		.host_flags	= (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
491*c6fd2807SJeff Garzik 		.pio_mask	= 0x1f,	/* pio0-4 */
492*c6fd2807SJeff Garzik 		.udma_mask	= 0x7f,	/* udma0-6 */
493*c6fd2807SJeff Garzik 		.port_ops	= &mv5_ops,
494*c6fd2807SJeff Garzik 	},
495*c6fd2807SJeff Garzik 	{  /* chip_5080 */
496*c6fd2807SJeff Garzik 		.sht		= &mv_sht,
497*c6fd2807SJeff Garzik 		.host_flags	= (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
498*c6fd2807SJeff Garzik 		.pio_mask	= 0x1f,	/* pio0-4 */
499*c6fd2807SJeff Garzik 		.udma_mask	= 0x7f,	/* udma0-6 */
500*c6fd2807SJeff Garzik 		.port_ops	= &mv5_ops,
501*c6fd2807SJeff Garzik 	},
502*c6fd2807SJeff Garzik 	{  /* chip_604x */
503*c6fd2807SJeff Garzik 		.sht		= &mv_sht,
504*c6fd2807SJeff Garzik 		.host_flags	= (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
505*c6fd2807SJeff Garzik 		.pio_mask	= 0x1f,	/* pio0-4 */
506*c6fd2807SJeff Garzik 		.udma_mask	= 0x7f,	/* udma0-6 */
507*c6fd2807SJeff Garzik 		.port_ops	= &mv6_ops,
508*c6fd2807SJeff Garzik 	},
509*c6fd2807SJeff Garzik 	{  /* chip_608x */
510*c6fd2807SJeff Garzik 		.sht		= &mv_sht,
511*c6fd2807SJeff Garzik 		.host_flags	= (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
512*c6fd2807SJeff Garzik 				   MV_FLAG_DUAL_HC),
513*c6fd2807SJeff Garzik 		.pio_mask	= 0x1f,	/* pio0-4 */
514*c6fd2807SJeff Garzik 		.udma_mask	= 0x7f,	/* udma0-6 */
515*c6fd2807SJeff Garzik 		.port_ops	= &mv6_ops,
516*c6fd2807SJeff Garzik 	},
517*c6fd2807SJeff Garzik 	{  /* chip_6042 */
518*c6fd2807SJeff Garzik 		.sht		= &mv_sht,
519*c6fd2807SJeff Garzik 		.host_flags	= (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
520*c6fd2807SJeff Garzik 		.pio_mask	= 0x1f,	/* pio0-4 */
521*c6fd2807SJeff Garzik 		.udma_mask	= 0x7f,	/* udma0-6 */
522*c6fd2807SJeff Garzik 		.port_ops	= &mv_iie_ops,
523*c6fd2807SJeff Garzik 	},
524*c6fd2807SJeff Garzik 	{  /* chip_7042 */
525*c6fd2807SJeff Garzik 		.sht		= &mv_sht,
526*c6fd2807SJeff Garzik 		.host_flags	= (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
527*c6fd2807SJeff Garzik 				   MV_FLAG_DUAL_HC),
528*c6fd2807SJeff Garzik 		.pio_mask	= 0x1f,	/* pio0-4 */
529*c6fd2807SJeff Garzik 		.udma_mask	= 0x7f,	/* udma0-6 */
530*c6fd2807SJeff Garzik 		.port_ops	= &mv_iie_ops,
531*c6fd2807SJeff Garzik 	},
532*c6fd2807SJeff Garzik };
533*c6fd2807SJeff Garzik 
534*c6fd2807SJeff Garzik static const struct pci_device_id mv_pci_tbl[] = {
535*c6fd2807SJeff Garzik 	{PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5040), 0, 0, chip_504x},
536*c6fd2807SJeff Garzik 	{PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5041), 0, 0, chip_504x},
537*c6fd2807SJeff Garzik 	{PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5080), 0, 0, chip_5080},
538*c6fd2807SJeff Garzik 	{PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5081), 0, 0, chip_508x},
539*c6fd2807SJeff Garzik 
540*c6fd2807SJeff Garzik 	{PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6040), 0, 0, chip_604x},
541*c6fd2807SJeff Garzik 	{PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6041), 0, 0, chip_604x},
542*c6fd2807SJeff Garzik 	{PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6042), 0, 0, chip_6042},
543*c6fd2807SJeff Garzik 	{PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6080), 0, 0, chip_608x},
544*c6fd2807SJeff Garzik 	{PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6081), 0, 0, chip_608x},
545*c6fd2807SJeff Garzik 
546*c6fd2807SJeff Garzik 	{PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x0241), 0, 0, chip_604x},
547*c6fd2807SJeff Garzik 	{}			/* terminate list */
548*c6fd2807SJeff Garzik };
549*c6fd2807SJeff Garzik 
550*c6fd2807SJeff Garzik static struct pci_driver mv_pci_driver = {
551*c6fd2807SJeff Garzik 	.name			= DRV_NAME,
552*c6fd2807SJeff Garzik 	.id_table		= mv_pci_tbl,
553*c6fd2807SJeff Garzik 	.probe			= mv_init_one,
554*c6fd2807SJeff Garzik 	.remove			= ata_pci_remove_one,
555*c6fd2807SJeff Garzik };
556*c6fd2807SJeff Garzik 
557*c6fd2807SJeff Garzik static const struct mv_hw_ops mv5xxx_ops = {
558*c6fd2807SJeff Garzik 	.phy_errata		= mv5_phy_errata,
559*c6fd2807SJeff Garzik 	.enable_leds		= mv5_enable_leds,
560*c6fd2807SJeff Garzik 	.read_preamp		= mv5_read_preamp,
561*c6fd2807SJeff Garzik 	.reset_hc		= mv5_reset_hc,
562*c6fd2807SJeff Garzik 	.reset_flash		= mv5_reset_flash,
563*c6fd2807SJeff Garzik 	.reset_bus		= mv5_reset_bus,
564*c6fd2807SJeff Garzik };
565*c6fd2807SJeff Garzik 
566*c6fd2807SJeff Garzik static const struct mv_hw_ops mv6xxx_ops = {
567*c6fd2807SJeff Garzik 	.phy_errata		= mv6_phy_errata,
568*c6fd2807SJeff Garzik 	.enable_leds		= mv6_enable_leds,
569*c6fd2807SJeff Garzik 	.read_preamp		= mv6_read_preamp,
570*c6fd2807SJeff Garzik 	.reset_hc		= mv6_reset_hc,
571*c6fd2807SJeff Garzik 	.reset_flash		= mv6_reset_flash,
572*c6fd2807SJeff Garzik 	.reset_bus		= mv_reset_pci_bus,
573*c6fd2807SJeff Garzik };
574*c6fd2807SJeff Garzik 
575*c6fd2807SJeff Garzik /*
576*c6fd2807SJeff Garzik  * module options
577*c6fd2807SJeff Garzik  */
578*c6fd2807SJeff Garzik static int msi;	      /* Use PCI msi; either zero (off, default) or non-zero */
579*c6fd2807SJeff Garzik 
580*c6fd2807SJeff Garzik 
581*c6fd2807SJeff Garzik /*
582*c6fd2807SJeff Garzik  * Functions
583*c6fd2807SJeff Garzik  */
584*c6fd2807SJeff Garzik 
585*c6fd2807SJeff Garzik static inline void writelfl(unsigned long data, void __iomem *addr)
586*c6fd2807SJeff Garzik {
587*c6fd2807SJeff Garzik 	writel(data, addr);
588*c6fd2807SJeff Garzik 	(void) readl(addr);	/* flush to avoid PCI posted write */
589*c6fd2807SJeff Garzik }
590*c6fd2807SJeff Garzik 
591*c6fd2807SJeff Garzik static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
592*c6fd2807SJeff Garzik {
593*c6fd2807SJeff Garzik 	return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
594*c6fd2807SJeff Garzik }
595*c6fd2807SJeff Garzik 
596*c6fd2807SJeff Garzik static inline unsigned int mv_hc_from_port(unsigned int port)
597*c6fd2807SJeff Garzik {
598*c6fd2807SJeff Garzik 	return port >> MV_PORT_HC_SHIFT;
599*c6fd2807SJeff Garzik }
600*c6fd2807SJeff Garzik 
601*c6fd2807SJeff Garzik static inline unsigned int mv_hardport_from_port(unsigned int port)
602*c6fd2807SJeff Garzik {
603*c6fd2807SJeff Garzik 	return port & MV_PORT_MASK;
604*c6fd2807SJeff Garzik }
605*c6fd2807SJeff Garzik 
606*c6fd2807SJeff Garzik static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
607*c6fd2807SJeff Garzik 						 unsigned int port)
608*c6fd2807SJeff Garzik {
609*c6fd2807SJeff Garzik 	return mv_hc_base(base, mv_hc_from_port(port));
610*c6fd2807SJeff Garzik }
611*c6fd2807SJeff Garzik 
612*c6fd2807SJeff Garzik static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
613*c6fd2807SJeff Garzik {
614*c6fd2807SJeff Garzik 	return  mv_hc_base_from_port(base, port) +
615*c6fd2807SJeff Garzik 		MV_SATAHC_ARBTR_REG_SZ +
616*c6fd2807SJeff Garzik 		(mv_hardport_from_port(port) * MV_PORT_REG_SZ);
617*c6fd2807SJeff Garzik }
618*c6fd2807SJeff Garzik 
619*c6fd2807SJeff Garzik static inline void __iomem *mv_ap_base(struct ata_port *ap)
620*c6fd2807SJeff Garzik {
621*c6fd2807SJeff Garzik 	return mv_port_base(ap->host_set->mmio_base, ap->port_no);
622*c6fd2807SJeff Garzik }
623*c6fd2807SJeff Garzik 
624*c6fd2807SJeff Garzik static inline int mv_get_hc_count(unsigned long host_flags)
625*c6fd2807SJeff Garzik {
626*c6fd2807SJeff Garzik 	return ((host_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
627*c6fd2807SJeff Garzik }
628*c6fd2807SJeff Garzik 
629*c6fd2807SJeff Garzik static void mv_irq_clear(struct ata_port *ap)
630*c6fd2807SJeff Garzik {
631*c6fd2807SJeff Garzik }
632*c6fd2807SJeff Garzik 
633*c6fd2807SJeff Garzik /**
634*c6fd2807SJeff Garzik  *      mv_start_dma - Enable eDMA engine
635*c6fd2807SJeff Garzik  *      @base: port base address
636*c6fd2807SJeff Garzik  *      @pp: port private data
637*c6fd2807SJeff Garzik  *
638*c6fd2807SJeff Garzik  *      Verify the local cache of the eDMA state is accurate with a
639*c6fd2807SJeff Garzik  *      WARN_ON.
640*c6fd2807SJeff Garzik  *
641*c6fd2807SJeff Garzik  *      LOCKING:
642*c6fd2807SJeff Garzik  *      Inherited from caller.
643*c6fd2807SJeff Garzik  */
644*c6fd2807SJeff Garzik static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
645*c6fd2807SJeff Garzik {
646*c6fd2807SJeff Garzik 	if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) {
647*c6fd2807SJeff Garzik 		writelfl(EDMA_EN, base + EDMA_CMD_OFS);
648*c6fd2807SJeff Garzik 		pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
649*c6fd2807SJeff Garzik 	}
650*c6fd2807SJeff Garzik 	WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
651*c6fd2807SJeff Garzik }
652*c6fd2807SJeff Garzik 
653*c6fd2807SJeff Garzik /**
654*c6fd2807SJeff Garzik  *      mv_stop_dma - Disable eDMA engine
655*c6fd2807SJeff Garzik  *      @ap: ATA channel to manipulate
656*c6fd2807SJeff Garzik  *
657*c6fd2807SJeff Garzik  *      Verify the local cache of the eDMA state is accurate with a
658*c6fd2807SJeff Garzik  *      WARN_ON.
659*c6fd2807SJeff Garzik  *
660*c6fd2807SJeff Garzik  *      LOCKING:
661*c6fd2807SJeff Garzik  *      Inherited from caller.
662*c6fd2807SJeff Garzik  */
663*c6fd2807SJeff Garzik static void mv_stop_dma(struct ata_port *ap)
664*c6fd2807SJeff Garzik {
665*c6fd2807SJeff Garzik 	void __iomem *port_mmio = mv_ap_base(ap);
666*c6fd2807SJeff Garzik 	struct mv_port_priv *pp	= ap->private_data;
667*c6fd2807SJeff Garzik 	u32 reg;
668*c6fd2807SJeff Garzik 	int i;
669*c6fd2807SJeff Garzik 
670*c6fd2807SJeff Garzik 	if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) {
671*c6fd2807SJeff Garzik 		/* Disable EDMA if active.   The disable bit auto clears.
672*c6fd2807SJeff Garzik 		 */
673*c6fd2807SJeff Garzik 		writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
674*c6fd2807SJeff Garzik 		pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
675*c6fd2807SJeff Garzik 	} else {
676*c6fd2807SJeff Garzik 		WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
677*c6fd2807SJeff Garzik   	}
678*c6fd2807SJeff Garzik 
679*c6fd2807SJeff Garzik 	/* now properly wait for the eDMA to stop */
680*c6fd2807SJeff Garzik 	for (i = 1000; i > 0; i--) {
681*c6fd2807SJeff Garzik 		reg = readl(port_mmio + EDMA_CMD_OFS);
682*c6fd2807SJeff Garzik 		if (!(EDMA_EN & reg)) {
683*c6fd2807SJeff Garzik 			break;
684*c6fd2807SJeff Garzik 		}
685*c6fd2807SJeff Garzik 		udelay(100);
686*c6fd2807SJeff Garzik 	}
687*c6fd2807SJeff Garzik 
688*c6fd2807SJeff Garzik 	if (EDMA_EN & reg) {
689*c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
690*c6fd2807SJeff Garzik 		/* FIXME: Consider doing a reset here to recover */
691*c6fd2807SJeff Garzik 	}
692*c6fd2807SJeff Garzik }
693*c6fd2807SJeff Garzik 
694*c6fd2807SJeff Garzik #ifdef ATA_DEBUG
695*c6fd2807SJeff Garzik static void mv_dump_mem(void __iomem *start, unsigned bytes)
696*c6fd2807SJeff Garzik {
697*c6fd2807SJeff Garzik 	int b, w;
698*c6fd2807SJeff Garzik 	for (b = 0; b < bytes; ) {
699*c6fd2807SJeff Garzik 		DPRINTK("%p: ", start + b);
700*c6fd2807SJeff Garzik 		for (w = 0; b < bytes && w < 4; w++) {
701*c6fd2807SJeff Garzik 			printk("%08x ",readl(start + b));
702*c6fd2807SJeff Garzik 			b += sizeof(u32);
703*c6fd2807SJeff Garzik 		}
704*c6fd2807SJeff Garzik 		printk("\n");
705*c6fd2807SJeff Garzik 	}
706*c6fd2807SJeff Garzik }
707*c6fd2807SJeff Garzik #endif
708*c6fd2807SJeff Garzik 
709*c6fd2807SJeff Garzik static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
710*c6fd2807SJeff Garzik {
711*c6fd2807SJeff Garzik #ifdef ATA_DEBUG
712*c6fd2807SJeff Garzik 	int b, w;
713*c6fd2807SJeff Garzik 	u32 dw;
714*c6fd2807SJeff Garzik 	for (b = 0; b < bytes; ) {
715*c6fd2807SJeff Garzik 		DPRINTK("%02x: ", b);
716*c6fd2807SJeff Garzik 		for (w = 0; b < bytes && w < 4; w++) {
717*c6fd2807SJeff Garzik 			(void) pci_read_config_dword(pdev,b,&dw);
718*c6fd2807SJeff Garzik 			printk("%08x ",dw);
719*c6fd2807SJeff Garzik 			b += sizeof(u32);
720*c6fd2807SJeff Garzik 		}
721*c6fd2807SJeff Garzik 		printk("\n");
722*c6fd2807SJeff Garzik 	}
723*c6fd2807SJeff Garzik #endif
724*c6fd2807SJeff Garzik }
725*c6fd2807SJeff Garzik static void mv_dump_all_regs(void __iomem *mmio_base, int port,
726*c6fd2807SJeff Garzik 			     struct pci_dev *pdev)
727*c6fd2807SJeff Garzik {
728*c6fd2807SJeff Garzik #ifdef ATA_DEBUG
729*c6fd2807SJeff Garzik 	void __iomem *hc_base = mv_hc_base(mmio_base,
730*c6fd2807SJeff Garzik 					   port >> MV_PORT_HC_SHIFT);
731*c6fd2807SJeff Garzik 	void __iomem *port_base;
732*c6fd2807SJeff Garzik 	int start_port, num_ports, p, start_hc, num_hcs, hc;
733*c6fd2807SJeff Garzik 
734*c6fd2807SJeff Garzik 	if (0 > port) {
735*c6fd2807SJeff Garzik 		start_hc = start_port = 0;
736*c6fd2807SJeff Garzik 		num_ports = 8;		/* shld be benign for 4 port devs */
737*c6fd2807SJeff Garzik 		num_hcs = 2;
738*c6fd2807SJeff Garzik 	} else {
739*c6fd2807SJeff Garzik 		start_hc = port >> MV_PORT_HC_SHIFT;
740*c6fd2807SJeff Garzik 		start_port = port;
741*c6fd2807SJeff Garzik 		num_ports = num_hcs = 1;
742*c6fd2807SJeff Garzik 	}
743*c6fd2807SJeff Garzik 	DPRINTK("All registers for port(s) %u-%u:\n", start_port,
744*c6fd2807SJeff Garzik 		num_ports > 1 ? num_ports - 1 : start_port);
745*c6fd2807SJeff Garzik 
746*c6fd2807SJeff Garzik 	if (NULL != pdev) {
747*c6fd2807SJeff Garzik 		DPRINTK("PCI config space regs:\n");
748*c6fd2807SJeff Garzik 		mv_dump_pci_cfg(pdev, 0x68);
749*c6fd2807SJeff Garzik 	}
750*c6fd2807SJeff Garzik 	DPRINTK("PCI regs:\n");
751*c6fd2807SJeff Garzik 	mv_dump_mem(mmio_base+0xc00, 0x3c);
752*c6fd2807SJeff Garzik 	mv_dump_mem(mmio_base+0xd00, 0x34);
753*c6fd2807SJeff Garzik 	mv_dump_mem(mmio_base+0xf00, 0x4);
754*c6fd2807SJeff Garzik 	mv_dump_mem(mmio_base+0x1d00, 0x6c);
755*c6fd2807SJeff Garzik 	for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
756*c6fd2807SJeff Garzik 		hc_base = mv_hc_base(mmio_base, hc);
757*c6fd2807SJeff Garzik 		DPRINTK("HC regs (HC %i):\n", hc);
758*c6fd2807SJeff Garzik 		mv_dump_mem(hc_base, 0x1c);
759*c6fd2807SJeff Garzik 	}
760*c6fd2807SJeff Garzik 	for (p = start_port; p < start_port + num_ports; p++) {
761*c6fd2807SJeff Garzik 		port_base = mv_port_base(mmio_base, p);
762*c6fd2807SJeff Garzik 		DPRINTK("EDMA regs (port %i):\n",p);
763*c6fd2807SJeff Garzik 		mv_dump_mem(port_base, 0x54);
764*c6fd2807SJeff Garzik 		DPRINTK("SATA regs (port %i):\n",p);
765*c6fd2807SJeff Garzik 		mv_dump_mem(port_base+0x300, 0x60);
766*c6fd2807SJeff Garzik 	}
767*c6fd2807SJeff Garzik #endif
768*c6fd2807SJeff Garzik }
769*c6fd2807SJeff Garzik 
770*c6fd2807SJeff Garzik static unsigned int mv_scr_offset(unsigned int sc_reg_in)
771*c6fd2807SJeff Garzik {
772*c6fd2807SJeff Garzik 	unsigned int ofs;
773*c6fd2807SJeff Garzik 
774*c6fd2807SJeff Garzik 	switch (sc_reg_in) {
775*c6fd2807SJeff Garzik 	case SCR_STATUS:
776*c6fd2807SJeff Garzik 	case SCR_CONTROL:
777*c6fd2807SJeff Garzik 	case SCR_ERROR:
778*c6fd2807SJeff Garzik 		ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
779*c6fd2807SJeff Garzik 		break;
780*c6fd2807SJeff Garzik 	case SCR_ACTIVE:
781*c6fd2807SJeff Garzik 		ofs = SATA_ACTIVE_OFS;   /* active is not with the others */
782*c6fd2807SJeff Garzik 		break;
783*c6fd2807SJeff Garzik 	default:
784*c6fd2807SJeff Garzik 		ofs = 0xffffffffU;
785*c6fd2807SJeff Garzik 		break;
786*c6fd2807SJeff Garzik 	}
787*c6fd2807SJeff Garzik 	return ofs;
788*c6fd2807SJeff Garzik }
789*c6fd2807SJeff Garzik 
790*c6fd2807SJeff Garzik static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
791*c6fd2807SJeff Garzik {
792*c6fd2807SJeff Garzik 	unsigned int ofs = mv_scr_offset(sc_reg_in);
793*c6fd2807SJeff Garzik 
794*c6fd2807SJeff Garzik 	if (0xffffffffU != ofs) {
795*c6fd2807SJeff Garzik 		return readl(mv_ap_base(ap) + ofs);
796*c6fd2807SJeff Garzik 	} else {
797*c6fd2807SJeff Garzik 		return (u32) ofs;
798*c6fd2807SJeff Garzik 	}
799*c6fd2807SJeff Garzik }
800*c6fd2807SJeff Garzik 
801*c6fd2807SJeff Garzik static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
802*c6fd2807SJeff Garzik {
803*c6fd2807SJeff Garzik 	unsigned int ofs = mv_scr_offset(sc_reg_in);
804*c6fd2807SJeff Garzik 
805*c6fd2807SJeff Garzik 	if (0xffffffffU != ofs) {
806*c6fd2807SJeff Garzik 		writelfl(val, mv_ap_base(ap) + ofs);
807*c6fd2807SJeff Garzik 	}
808*c6fd2807SJeff Garzik }
809*c6fd2807SJeff Garzik 
810*c6fd2807SJeff Garzik /**
811*c6fd2807SJeff Garzik  *      mv_host_stop - Host specific cleanup/stop routine.
812*c6fd2807SJeff Garzik  *      @host_set: host data structure
813*c6fd2807SJeff Garzik  *
814*c6fd2807SJeff Garzik  *      Disable ints, cleanup host memory, call general purpose
815*c6fd2807SJeff Garzik  *      host_stop.
816*c6fd2807SJeff Garzik  *
817*c6fd2807SJeff Garzik  *      LOCKING:
818*c6fd2807SJeff Garzik  *      Inherited from caller.
819*c6fd2807SJeff Garzik  */
820*c6fd2807SJeff Garzik static void mv_host_stop(struct ata_host_set *host_set)
821*c6fd2807SJeff Garzik {
822*c6fd2807SJeff Garzik 	struct mv_host_priv *hpriv = host_set->private_data;
823*c6fd2807SJeff Garzik 	struct pci_dev *pdev = to_pci_dev(host_set->dev);
824*c6fd2807SJeff Garzik 
825*c6fd2807SJeff Garzik 	if (hpriv->hp_flags & MV_HP_FLAG_MSI) {
826*c6fd2807SJeff Garzik 		pci_disable_msi(pdev);
827*c6fd2807SJeff Garzik 	} else {
828*c6fd2807SJeff Garzik 		pci_intx(pdev, 0);
829*c6fd2807SJeff Garzik 	}
830*c6fd2807SJeff Garzik 	kfree(hpriv);
831*c6fd2807SJeff Garzik 	ata_host_stop(host_set);
832*c6fd2807SJeff Garzik }
833*c6fd2807SJeff Garzik 
834*c6fd2807SJeff Garzik static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev)
835*c6fd2807SJeff Garzik {
836*c6fd2807SJeff Garzik 	dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma);
837*c6fd2807SJeff Garzik }
838*c6fd2807SJeff Garzik 
839*c6fd2807SJeff Garzik static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio)
840*c6fd2807SJeff Garzik {
841*c6fd2807SJeff Garzik 	u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
842*c6fd2807SJeff Garzik 
843*c6fd2807SJeff Garzik 	/* set up non-NCQ EDMA configuration */
844*c6fd2807SJeff Garzik 	cfg &= ~0x1f;		/* clear queue depth */
845*c6fd2807SJeff Garzik 	cfg &= ~EDMA_CFG_NCQ;	/* clear NCQ mode */
846*c6fd2807SJeff Garzik 	cfg &= ~(1 << 9);	/* disable equeue */
847*c6fd2807SJeff Garzik 
848*c6fd2807SJeff Garzik 	if (IS_GEN_I(hpriv))
849*c6fd2807SJeff Garzik 		cfg |= (1 << 8);	/* enab config burst size mask */
850*c6fd2807SJeff Garzik 
851*c6fd2807SJeff Garzik 	else if (IS_GEN_II(hpriv))
852*c6fd2807SJeff Garzik 		cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
853*c6fd2807SJeff Garzik 
854*c6fd2807SJeff Garzik 	else if (IS_GEN_IIE(hpriv)) {
855*c6fd2807SJeff Garzik 		cfg |= (1 << 23);	/* dis RX PM port mask */
856*c6fd2807SJeff Garzik 		cfg &= ~(1 << 16);	/* dis FIS-based switching (for now) */
857*c6fd2807SJeff Garzik 		cfg &= ~(1 << 19);	/* dis 128-entry queue (for now?) */
858*c6fd2807SJeff Garzik 		cfg |= (1 << 18);	/* enab early completion */
859*c6fd2807SJeff Garzik 		cfg |= (1 << 17);	/* enab host q cache */
860*c6fd2807SJeff Garzik 		cfg |= (1 << 22);	/* enab cutthrough */
861*c6fd2807SJeff Garzik 	}
862*c6fd2807SJeff Garzik 
863*c6fd2807SJeff Garzik 	writelfl(cfg, port_mmio + EDMA_CFG_OFS);
864*c6fd2807SJeff Garzik }
865*c6fd2807SJeff Garzik 
866*c6fd2807SJeff Garzik /**
867*c6fd2807SJeff Garzik  *      mv_port_start - Port specific init/start routine.
868*c6fd2807SJeff Garzik  *      @ap: ATA channel to manipulate
869*c6fd2807SJeff Garzik  *
870*c6fd2807SJeff Garzik  *      Allocate and point to DMA memory, init port private memory,
871*c6fd2807SJeff Garzik  *      zero indices.
872*c6fd2807SJeff Garzik  *
873*c6fd2807SJeff Garzik  *      LOCKING:
874*c6fd2807SJeff Garzik  *      Inherited from caller.
875*c6fd2807SJeff Garzik  */
876*c6fd2807SJeff Garzik static int mv_port_start(struct ata_port *ap)
877*c6fd2807SJeff Garzik {
878*c6fd2807SJeff Garzik 	struct device *dev = ap->host_set->dev;
879*c6fd2807SJeff Garzik 	struct mv_host_priv *hpriv = ap->host_set->private_data;
880*c6fd2807SJeff Garzik 	struct mv_port_priv *pp;
881*c6fd2807SJeff Garzik 	void __iomem *port_mmio = mv_ap_base(ap);
882*c6fd2807SJeff Garzik 	void *mem;
883*c6fd2807SJeff Garzik 	dma_addr_t mem_dma;
884*c6fd2807SJeff Garzik 	int rc = -ENOMEM;
885*c6fd2807SJeff Garzik 
886*c6fd2807SJeff Garzik 	pp = kmalloc(sizeof(*pp), GFP_KERNEL);
887*c6fd2807SJeff Garzik 	if (!pp)
888*c6fd2807SJeff Garzik 		goto err_out;
889*c6fd2807SJeff Garzik 	memset(pp, 0, sizeof(*pp));
890*c6fd2807SJeff Garzik 
891*c6fd2807SJeff Garzik 	mem = dma_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
892*c6fd2807SJeff Garzik 				 GFP_KERNEL);
893*c6fd2807SJeff Garzik 	if (!mem)
894*c6fd2807SJeff Garzik 		goto err_out_pp;
895*c6fd2807SJeff Garzik 	memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
896*c6fd2807SJeff Garzik 
897*c6fd2807SJeff Garzik 	rc = ata_pad_alloc(ap, dev);
898*c6fd2807SJeff Garzik 	if (rc)
899*c6fd2807SJeff Garzik 		goto err_out_priv;
900*c6fd2807SJeff Garzik 
901*c6fd2807SJeff Garzik 	/* First item in chunk of DMA memory:
902*c6fd2807SJeff Garzik 	 * 32-slot command request table (CRQB), 32 bytes each in size
903*c6fd2807SJeff Garzik 	 */
904*c6fd2807SJeff Garzik 	pp->crqb = mem;
905*c6fd2807SJeff Garzik 	pp->crqb_dma = mem_dma;
906*c6fd2807SJeff Garzik 	mem += MV_CRQB_Q_SZ;
907*c6fd2807SJeff Garzik 	mem_dma += MV_CRQB_Q_SZ;
908*c6fd2807SJeff Garzik 
909*c6fd2807SJeff Garzik 	/* Second item:
910*c6fd2807SJeff Garzik 	 * 32-slot command response table (CRPB), 8 bytes each in size
911*c6fd2807SJeff Garzik 	 */
912*c6fd2807SJeff Garzik 	pp->crpb = mem;
913*c6fd2807SJeff Garzik 	pp->crpb_dma = mem_dma;
914*c6fd2807SJeff Garzik 	mem += MV_CRPB_Q_SZ;
915*c6fd2807SJeff Garzik 	mem_dma += MV_CRPB_Q_SZ;
916*c6fd2807SJeff Garzik 
917*c6fd2807SJeff Garzik 	/* Third item:
918*c6fd2807SJeff Garzik 	 * Table of scatter-gather descriptors (ePRD), 16 bytes each
919*c6fd2807SJeff Garzik 	 */
920*c6fd2807SJeff Garzik 	pp->sg_tbl = mem;
921*c6fd2807SJeff Garzik 	pp->sg_tbl_dma = mem_dma;
922*c6fd2807SJeff Garzik 
923*c6fd2807SJeff Garzik 	mv_edma_cfg(hpriv, port_mmio);
924*c6fd2807SJeff Garzik 
925*c6fd2807SJeff Garzik 	writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
926*c6fd2807SJeff Garzik 	writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
927*c6fd2807SJeff Garzik 		 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
928*c6fd2807SJeff Garzik 
929*c6fd2807SJeff Garzik 	if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
930*c6fd2807SJeff Garzik 		writelfl(pp->crqb_dma & 0xffffffff,
931*c6fd2807SJeff Garzik 			 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
932*c6fd2807SJeff Garzik 	else
933*c6fd2807SJeff Garzik 		writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
934*c6fd2807SJeff Garzik 
935*c6fd2807SJeff Garzik 	writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
936*c6fd2807SJeff Garzik 
937*c6fd2807SJeff Garzik 	if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
938*c6fd2807SJeff Garzik 		writelfl(pp->crpb_dma & 0xffffffff,
939*c6fd2807SJeff Garzik 			 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
940*c6fd2807SJeff Garzik 	else
941*c6fd2807SJeff Garzik 		writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
942*c6fd2807SJeff Garzik 
943*c6fd2807SJeff Garzik 	writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
944*c6fd2807SJeff Garzik 		 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
945*c6fd2807SJeff Garzik 
946*c6fd2807SJeff Garzik 	/* Don't turn on EDMA here...do it before DMA commands only.  Else
947*c6fd2807SJeff Garzik 	 * we'll be unable to send non-data, PIO, etc due to restricted access
948*c6fd2807SJeff Garzik 	 * to shadow regs.
949*c6fd2807SJeff Garzik 	 */
950*c6fd2807SJeff Garzik 	ap->private_data = pp;
951*c6fd2807SJeff Garzik 	return 0;
952*c6fd2807SJeff Garzik 
953*c6fd2807SJeff Garzik err_out_priv:
954*c6fd2807SJeff Garzik 	mv_priv_free(pp, dev);
955*c6fd2807SJeff Garzik err_out_pp:
956*c6fd2807SJeff Garzik 	kfree(pp);
957*c6fd2807SJeff Garzik err_out:
958*c6fd2807SJeff Garzik 	return rc;
959*c6fd2807SJeff Garzik }
960*c6fd2807SJeff Garzik 
961*c6fd2807SJeff Garzik /**
962*c6fd2807SJeff Garzik  *      mv_port_stop - Port specific cleanup/stop routine.
963*c6fd2807SJeff Garzik  *      @ap: ATA channel to manipulate
964*c6fd2807SJeff Garzik  *
965*c6fd2807SJeff Garzik  *      Stop DMA, cleanup port memory.
966*c6fd2807SJeff Garzik  *
967*c6fd2807SJeff Garzik  *      LOCKING:
968*c6fd2807SJeff Garzik  *      This routine uses the host_set lock to protect the DMA stop.
969*c6fd2807SJeff Garzik  */
970*c6fd2807SJeff Garzik static void mv_port_stop(struct ata_port *ap)
971*c6fd2807SJeff Garzik {
972*c6fd2807SJeff Garzik 	struct device *dev = ap->host_set->dev;
973*c6fd2807SJeff Garzik 	struct mv_port_priv *pp = ap->private_data;
974*c6fd2807SJeff Garzik 	unsigned long flags;
975*c6fd2807SJeff Garzik 
976*c6fd2807SJeff Garzik 	spin_lock_irqsave(&ap->host_set->lock, flags);
977*c6fd2807SJeff Garzik 	mv_stop_dma(ap);
978*c6fd2807SJeff Garzik 	spin_unlock_irqrestore(&ap->host_set->lock, flags);
979*c6fd2807SJeff Garzik 
980*c6fd2807SJeff Garzik 	ap->private_data = NULL;
981*c6fd2807SJeff Garzik 	ata_pad_free(ap, dev);
982*c6fd2807SJeff Garzik 	mv_priv_free(pp, dev);
983*c6fd2807SJeff Garzik 	kfree(pp);
984*c6fd2807SJeff Garzik }
985*c6fd2807SJeff Garzik 
986*c6fd2807SJeff Garzik /**
987*c6fd2807SJeff Garzik  *      mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
988*c6fd2807SJeff Garzik  *      @qc: queued command whose SG list to source from
989*c6fd2807SJeff Garzik  *
990*c6fd2807SJeff Garzik  *      Populate the SG list and mark the last entry.
991*c6fd2807SJeff Garzik  *
992*c6fd2807SJeff Garzik  *      LOCKING:
993*c6fd2807SJeff Garzik  *      Inherited from caller.
994*c6fd2807SJeff Garzik  */
995*c6fd2807SJeff Garzik static void mv_fill_sg(struct ata_queued_cmd *qc)
996*c6fd2807SJeff Garzik {
997*c6fd2807SJeff Garzik 	struct mv_port_priv *pp = qc->ap->private_data;
998*c6fd2807SJeff Garzik 	unsigned int i = 0;
999*c6fd2807SJeff Garzik 	struct scatterlist *sg;
1000*c6fd2807SJeff Garzik 
1001*c6fd2807SJeff Garzik 	ata_for_each_sg(sg, qc) {
1002*c6fd2807SJeff Garzik 		dma_addr_t addr;
1003*c6fd2807SJeff Garzik 		u32 sg_len, len, offset;
1004*c6fd2807SJeff Garzik 
1005*c6fd2807SJeff Garzik 		addr = sg_dma_address(sg);
1006*c6fd2807SJeff Garzik 		sg_len = sg_dma_len(sg);
1007*c6fd2807SJeff Garzik 
1008*c6fd2807SJeff Garzik 		while (sg_len) {
1009*c6fd2807SJeff Garzik 			offset = addr & MV_DMA_BOUNDARY;
1010*c6fd2807SJeff Garzik 			len = sg_len;
1011*c6fd2807SJeff Garzik 			if ((offset + sg_len) > 0x10000)
1012*c6fd2807SJeff Garzik 				len = 0x10000 - offset;
1013*c6fd2807SJeff Garzik 
1014*c6fd2807SJeff Garzik 			pp->sg_tbl[i].addr = cpu_to_le32(addr & 0xffffffff);
1015*c6fd2807SJeff Garzik 			pp->sg_tbl[i].addr_hi = cpu_to_le32((addr >> 16) >> 16);
1016*c6fd2807SJeff Garzik 			pp->sg_tbl[i].flags_size = cpu_to_le32(len & 0xffff);
1017*c6fd2807SJeff Garzik 
1018*c6fd2807SJeff Garzik 			sg_len -= len;
1019*c6fd2807SJeff Garzik 			addr += len;
1020*c6fd2807SJeff Garzik 
1021*c6fd2807SJeff Garzik 			if (!sg_len && ata_sg_is_last(sg, qc))
1022*c6fd2807SJeff Garzik 				pp->sg_tbl[i].flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1023*c6fd2807SJeff Garzik 
1024*c6fd2807SJeff Garzik 			i++;
1025*c6fd2807SJeff Garzik 		}
1026*c6fd2807SJeff Garzik 	}
1027*c6fd2807SJeff Garzik }
1028*c6fd2807SJeff Garzik 
1029*c6fd2807SJeff Garzik static inline unsigned mv_inc_q_index(unsigned index)
1030*c6fd2807SJeff Garzik {
1031*c6fd2807SJeff Garzik 	return (index + 1) & MV_MAX_Q_DEPTH_MASK;
1032*c6fd2807SJeff Garzik }
1033*c6fd2807SJeff Garzik 
1034*c6fd2807SJeff Garzik static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1035*c6fd2807SJeff Garzik {
1036*c6fd2807SJeff Garzik 	u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1037*c6fd2807SJeff Garzik 		(last ? CRQB_CMD_LAST : 0);
1038*c6fd2807SJeff Garzik 	*cmdw = cpu_to_le16(tmp);
1039*c6fd2807SJeff Garzik }
1040*c6fd2807SJeff Garzik 
1041*c6fd2807SJeff Garzik /**
1042*c6fd2807SJeff Garzik  *      mv_qc_prep - Host specific command preparation.
1043*c6fd2807SJeff Garzik  *      @qc: queued command to prepare
1044*c6fd2807SJeff Garzik  *
1045*c6fd2807SJeff Garzik  *      This routine simply redirects to the general purpose routine
1046*c6fd2807SJeff Garzik  *      if command is not DMA.  Else, it handles prep of the CRQB
1047*c6fd2807SJeff Garzik  *      (command request block), does some sanity checking, and calls
1048*c6fd2807SJeff Garzik  *      the SG load routine.
1049*c6fd2807SJeff Garzik  *
1050*c6fd2807SJeff Garzik  *      LOCKING:
1051*c6fd2807SJeff Garzik  *      Inherited from caller.
1052*c6fd2807SJeff Garzik  */
1053*c6fd2807SJeff Garzik static void mv_qc_prep(struct ata_queued_cmd *qc)
1054*c6fd2807SJeff Garzik {
1055*c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
1056*c6fd2807SJeff Garzik 	struct mv_port_priv *pp = ap->private_data;
1057*c6fd2807SJeff Garzik 	__le16 *cw;
1058*c6fd2807SJeff Garzik 	struct ata_taskfile *tf;
1059*c6fd2807SJeff Garzik 	u16 flags = 0;
1060*c6fd2807SJeff Garzik 	unsigned in_index;
1061*c6fd2807SJeff Garzik 
1062*c6fd2807SJeff Garzik  	if (ATA_PROT_DMA != qc->tf.protocol)
1063*c6fd2807SJeff Garzik 		return;
1064*c6fd2807SJeff Garzik 
1065*c6fd2807SJeff Garzik 	/* Fill in command request block
1066*c6fd2807SJeff Garzik 	 */
1067*c6fd2807SJeff Garzik 	if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1068*c6fd2807SJeff Garzik 		flags |= CRQB_FLAG_READ;
1069*c6fd2807SJeff Garzik 	WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1070*c6fd2807SJeff Garzik 	flags |= qc->tag << CRQB_TAG_SHIFT;
1071*c6fd2807SJeff Garzik 
1072*c6fd2807SJeff Garzik 	/* get current queue index from hardware */
1073*c6fd2807SJeff Garzik 	in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1074*c6fd2807SJeff Garzik 			>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1075*c6fd2807SJeff Garzik 
1076*c6fd2807SJeff Garzik 	pp->crqb[in_index].sg_addr =
1077*c6fd2807SJeff Garzik 		cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1078*c6fd2807SJeff Garzik 	pp->crqb[in_index].sg_addr_hi =
1079*c6fd2807SJeff Garzik 		cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1080*c6fd2807SJeff Garzik 	pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1081*c6fd2807SJeff Garzik 
1082*c6fd2807SJeff Garzik 	cw = &pp->crqb[in_index].ata_cmd[0];
1083*c6fd2807SJeff Garzik 	tf = &qc->tf;
1084*c6fd2807SJeff Garzik 
1085*c6fd2807SJeff Garzik 	/* Sadly, the CRQB cannot accomodate all registers--there are
1086*c6fd2807SJeff Garzik 	 * only 11 bytes...so we must pick and choose required
1087*c6fd2807SJeff Garzik 	 * registers based on the command.  So, we drop feature and
1088*c6fd2807SJeff Garzik 	 * hob_feature for [RW] DMA commands, but they are needed for
1089*c6fd2807SJeff Garzik 	 * NCQ.  NCQ will drop hob_nsect.
1090*c6fd2807SJeff Garzik 	 */
1091*c6fd2807SJeff Garzik 	switch (tf->command) {
1092*c6fd2807SJeff Garzik 	case ATA_CMD_READ:
1093*c6fd2807SJeff Garzik 	case ATA_CMD_READ_EXT:
1094*c6fd2807SJeff Garzik 	case ATA_CMD_WRITE:
1095*c6fd2807SJeff Garzik 	case ATA_CMD_WRITE_EXT:
1096*c6fd2807SJeff Garzik 	case ATA_CMD_WRITE_FUA_EXT:
1097*c6fd2807SJeff Garzik 		mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1098*c6fd2807SJeff Garzik 		break;
1099*c6fd2807SJeff Garzik #ifdef LIBATA_NCQ		/* FIXME: remove this line when NCQ added */
1100*c6fd2807SJeff Garzik 	case ATA_CMD_FPDMA_READ:
1101*c6fd2807SJeff Garzik 	case ATA_CMD_FPDMA_WRITE:
1102*c6fd2807SJeff Garzik 		mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1103*c6fd2807SJeff Garzik 		mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1104*c6fd2807SJeff Garzik 		break;
1105*c6fd2807SJeff Garzik #endif				/* FIXME: remove this line when NCQ added */
1106*c6fd2807SJeff Garzik 	default:
1107*c6fd2807SJeff Garzik 		/* The only other commands EDMA supports in non-queued and
1108*c6fd2807SJeff Garzik 		 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1109*c6fd2807SJeff Garzik 		 * of which are defined/used by Linux.  If we get here, this
1110*c6fd2807SJeff Garzik 		 * driver needs work.
1111*c6fd2807SJeff Garzik 		 *
1112*c6fd2807SJeff Garzik 		 * FIXME: modify libata to give qc_prep a return value and
1113*c6fd2807SJeff Garzik 		 * return error here.
1114*c6fd2807SJeff Garzik 		 */
1115*c6fd2807SJeff Garzik 		BUG_ON(tf->command);
1116*c6fd2807SJeff Garzik 		break;
1117*c6fd2807SJeff Garzik 	}
1118*c6fd2807SJeff Garzik 	mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1119*c6fd2807SJeff Garzik 	mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1120*c6fd2807SJeff Garzik 	mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1121*c6fd2807SJeff Garzik 	mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1122*c6fd2807SJeff Garzik 	mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1123*c6fd2807SJeff Garzik 	mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1124*c6fd2807SJeff Garzik 	mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1125*c6fd2807SJeff Garzik 	mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1126*c6fd2807SJeff Garzik 	mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1);	/* last */
1127*c6fd2807SJeff Garzik 
1128*c6fd2807SJeff Garzik 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1129*c6fd2807SJeff Garzik 		return;
1130*c6fd2807SJeff Garzik 	mv_fill_sg(qc);
1131*c6fd2807SJeff Garzik }
1132*c6fd2807SJeff Garzik 
1133*c6fd2807SJeff Garzik /**
1134*c6fd2807SJeff Garzik  *      mv_qc_prep_iie - Host specific command preparation.
1135*c6fd2807SJeff Garzik  *      @qc: queued command to prepare
1136*c6fd2807SJeff Garzik  *
1137*c6fd2807SJeff Garzik  *      This routine simply redirects to the general purpose routine
1138*c6fd2807SJeff Garzik  *      if command is not DMA.  Else, it handles prep of the CRQB
1139*c6fd2807SJeff Garzik  *      (command request block), does some sanity checking, and calls
1140*c6fd2807SJeff Garzik  *      the SG load routine.
1141*c6fd2807SJeff Garzik  *
1142*c6fd2807SJeff Garzik  *      LOCKING:
1143*c6fd2807SJeff Garzik  *      Inherited from caller.
1144*c6fd2807SJeff Garzik  */
1145*c6fd2807SJeff Garzik static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1146*c6fd2807SJeff Garzik {
1147*c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
1148*c6fd2807SJeff Garzik 	struct mv_port_priv *pp = ap->private_data;
1149*c6fd2807SJeff Garzik 	struct mv_crqb_iie *crqb;
1150*c6fd2807SJeff Garzik 	struct ata_taskfile *tf;
1151*c6fd2807SJeff Garzik 	unsigned in_index;
1152*c6fd2807SJeff Garzik 	u32 flags = 0;
1153*c6fd2807SJeff Garzik 
1154*c6fd2807SJeff Garzik  	if (ATA_PROT_DMA != qc->tf.protocol)
1155*c6fd2807SJeff Garzik 		return;
1156*c6fd2807SJeff Garzik 
1157*c6fd2807SJeff Garzik 	/* Fill in Gen IIE command request block
1158*c6fd2807SJeff Garzik 	 */
1159*c6fd2807SJeff Garzik 	if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1160*c6fd2807SJeff Garzik 		flags |= CRQB_FLAG_READ;
1161*c6fd2807SJeff Garzik 
1162*c6fd2807SJeff Garzik 	WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1163*c6fd2807SJeff Garzik 	flags |= qc->tag << CRQB_TAG_SHIFT;
1164*c6fd2807SJeff Garzik 
1165*c6fd2807SJeff Garzik 	/* get current queue index from hardware */
1166*c6fd2807SJeff Garzik 	in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1167*c6fd2807SJeff Garzik 			>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1168*c6fd2807SJeff Garzik 
1169*c6fd2807SJeff Garzik 	crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1170*c6fd2807SJeff Garzik 	crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1171*c6fd2807SJeff Garzik 	crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1172*c6fd2807SJeff Garzik 	crqb->flags = cpu_to_le32(flags);
1173*c6fd2807SJeff Garzik 
1174*c6fd2807SJeff Garzik 	tf = &qc->tf;
1175*c6fd2807SJeff Garzik 	crqb->ata_cmd[0] = cpu_to_le32(
1176*c6fd2807SJeff Garzik 			(tf->command << 16) |
1177*c6fd2807SJeff Garzik 			(tf->feature << 24)
1178*c6fd2807SJeff Garzik 		);
1179*c6fd2807SJeff Garzik 	crqb->ata_cmd[1] = cpu_to_le32(
1180*c6fd2807SJeff Garzik 			(tf->lbal << 0) |
1181*c6fd2807SJeff Garzik 			(tf->lbam << 8) |
1182*c6fd2807SJeff Garzik 			(tf->lbah << 16) |
1183*c6fd2807SJeff Garzik 			(tf->device << 24)
1184*c6fd2807SJeff Garzik 		);
1185*c6fd2807SJeff Garzik 	crqb->ata_cmd[2] = cpu_to_le32(
1186*c6fd2807SJeff Garzik 			(tf->hob_lbal << 0) |
1187*c6fd2807SJeff Garzik 			(tf->hob_lbam << 8) |
1188*c6fd2807SJeff Garzik 			(tf->hob_lbah << 16) |
1189*c6fd2807SJeff Garzik 			(tf->hob_feature << 24)
1190*c6fd2807SJeff Garzik 		);
1191*c6fd2807SJeff Garzik 	crqb->ata_cmd[3] = cpu_to_le32(
1192*c6fd2807SJeff Garzik 			(tf->nsect << 0) |
1193*c6fd2807SJeff Garzik 			(tf->hob_nsect << 8)
1194*c6fd2807SJeff Garzik 		);
1195*c6fd2807SJeff Garzik 
1196*c6fd2807SJeff Garzik 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1197*c6fd2807SJeff Garzik 		return;
1198*c6fd2807SJeff Garzik 	mv_fill_sg(qc);
1199*c6fd2807SJeff Garzik }
1200*c6fd2807SJeff Garzik 
1201*c6fd2807SJeff Garzik /**
1202*c6fd2807SJeff Garzik  *      mv_qc_issue - Initiate a command to the host
1203*c6fd2807SJeff Garzik  *      @qc: queued command to start
1204*c6fd2807SJeff Garzik  *
1205*c6fd2807SJeff Garzik  *      This routine simply redirects to the general purpose routine
1206*c6fd2807SJeff Garzik  *      if command is not DMA.  Else, it sanity checks our local
1207*c6fd2807SJeff Garzik  *      caches of the request producer/consumer indices then enables
1208*c6fd2807SJeff Garzik  *      DMA and bumps the request producer index.
1209*c6fd2807SJeff Garzik  *
1210*c6fd2807SJeff Garzik  *      LOCKING:
1211*c6fd2807SJeff Garzik  *      Inherited from caller.
1212*c6fd2807SJeff Garzik  */
1213*c6fd2807SJeff Garzik static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1214*c6fd2807SJeff Garzik {
1215*c6fd2807SJeff Garzik 	void __iomem *port_mmio = mv_ap_base(qc->ap);
1216*c6fd2807SJeff Garzik 	struct mv_port_priv *pp = qc->ap->private_data;
1217*c6fd2807SJeff Garzik 	unsigned in_index;
1218*c6fd2807SJeff Garzik 	u32 in_ptr;
1219*c6fd2807SJeff Garzik 
1220*c6fd2807SJeff Garzik 	if (ATA_PROT_DMA != qc->tf.protocol) {
1221*c6fd2807SJeff Garzik 		/* We're about to send a non-EDMA capable command to the
1222*c6fd2807SJeff Garzik 		 * port.  Turn off EDMA so there won't be problems accessing
1223*c6fd2807SJeff Garzik 		 * shadow block, etc registers.
1224*c6fd2807SJeff Garzik 		 */
1225*c6fd2807SJeff Garzik 		mv_stop_dma(qc->ap);
1226*c6fd2807SJeff Garzik 		return ata_qc_issue_prot(qc);
1227*c6fd2807SJeff Garzik 	}
1228*c6fd2807SJeff Garzik 
1229*c6fd2807SJeff Garzik 	in_ptr   = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1230*c6fd2807SJeff Garzik 	in_index = (in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1231*c6fd2807SJeff Garzik 
1232*c6fd2807SJeff Garzik 	/* until we do queuing, the queue should be empty at this point */
1233*c6fd2807SJeff Garzik 	WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1234*c6fd2807SJeff Garzik 		>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1235*c6fd2807SJeff Garzik 
1236*c6fd2807SJeff Garzik 	in_index = mv_inc_q_index(in_index);	/* now incr producer index */
1237*c6fd2807SJeff Garzik 
1238*c6fd2807SJeff Garzik 	mv_start_dma(port_mmio, pp);
1239*c6fd2807SJeff Garzik 
1240*c6fd2807SJeff Garzik 	/* and write the request in pointer to kick the EDMA to life */
1241*c6fd2807SJeff Garzik 	in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
1242*c6fd2807SJeff Garzik 	in_ptr |= in_index << EDMA_REQ_Q_PTR_SHIFT;
1243*c6fd2807SJeff Garzik 	writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1244*c6fd2807SJeff Garzik 
1245*c6fd2807SJeff Garzik 	return 0;
1246*c6fd2807SJeff Garzik }
1247*c6fd2807SJeff Garzik 
1248*c6fd2807SJeff Garzik /**
1249*c6fd2807SJeff Garzik  *      mv_get_crpb_status - get status from most recently completed cmd
1250*c6fd2807SJeff Garzik  *      @ap: ATA channel to manipulate
1251*c6fd2807SJeff Garzik  *
1252*c6fd2807SJeff Garzik  *      This routine is for use when the port is in DMA mode, when it
1253*c6fd2807SJeff Garzik  *      will be using the CRPB (command response block) method of
1254*c6fd2807SJeff Garzik  *      returning command completion information.  We check indices
1255*c6fd2807SJeff Garzik  *      are good, grab status, and bump the response consumer index to
1256*c6fd2807SJeff Garzik  *      prove that we're up to date.
1257*c6fd2807SJeff Garzik  *
1258*c6fd2807SJeff Garzik  *      LOCKING:
1259*c6fd2807SJeff Garzik  *      Inherited from caller.
1260*c6fd2807SJeff Garzik  */
1261*c6fd2807SJeff Garzik static u8 mv_get_crpb_status(struct ata_port *ap)
1262*c6fd2807SJeff Garzik {
1263*c6fd2807SJeff Garzik 	void __iomem *port_mmio = mv_ap_base(ap);
1264*c6fd2807SJeff Garzik 	struct mv_port_priv *pp = ap->private_data;
1265*c6fd2807SJeff Garzik 	unsigned out_index;
1266*c6fd2807SJeff Garzik 	u32 out_ptr;
1267*c6fd2807SJeff Garzik 	u8 ata_status;
1268*c6fd2807SJeff Garzik 
1269*c6fd2807SJeff Garzik 	out_ptr   = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1270*c6fd2807SJeff Garzik 	out_index = (out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1271*c6fd2807SJeff Garzik 
1272*c6fd2807SJeff Garzik 	ata_status = le16_to_cpu(pp->crpb[out_index].flags)
1273*c6fd2807SJeff Garzik 					>> CRPB_FLAG_STATUS_SHIFT;
1274*c6fd2807SJeff Garzik 
1275*c6fd2807SJeff Garzik 	/* increment our consumer index... */
1276*c6fd2807SJeff Garzik 	out_index = mv_inc_q_index(out_index);
1277*c6fd2807SJeff Garzik 
1278*c6fd2807SJeff Garzik 	/* and, until we do NCQ, there should only be 1 CRPB waiting */
1279*c6fd2807SJeff Garzik 	WARN_ON(out_index != ((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1280*c6fd2807SJeff Garzik 		>> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1281*c6fd2807SJeff Garzik 
1282*c6fd2807SJeff Garzik 	/* write out our inc'd consumer index so EDMA knows we're caught up */
1283*c6fd2807SJeff Garzik 	out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
1284*c6fd2807SJeff Garzik 	out_ptr |= out_index << EDMA_RSP_Q_PTR_SHIFT;
1285*c6fd2807SJeff Garzik 	writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1286*c6fd2807SJeff Garzik 
1287*c6fd2807SJeff Garzik 	/* Return ATA status register for completed CRPB */
1288*c6fd2807SJeff Garzik 	return ata_status;
1289*c6fd2807SJeff Garzik }
1290*c6fd2807SJeff Garzik 
1291*c6fd2807SJeff Garzik /**
1292*c6fd2807SJeff Garzik  *      mv_err_intr - Handle error interrupts on the port
1293*c6fd2807SJeff Garzik  *      @ap: ATA channel to manipulate
1294*c6fd2807SJeff Garzik  *      @reset_allowed: bool: 0 == don't trigger from reset here
1295*c6fd2807SJeff Garzik  *
1296*c6fd2807SJeff Garzik  *      In most cases, just clear the interrupt and move on.  However,
1297*c6fd2807SJeff Garzik  *      some cases require an eDMA reset, which is done right before
1298*c6fd2807SJeff Garzik  *      the COMRESET in mv_phy_reset().  The SERR case requires a
1299*c6fd2807SJeff Garzik  *      clear of pending errors in the SATA SERROR register.  Finally,
1300*c6fd2807SJeff Garzik  *      if the port disabled DMA, update our cached copy to match.
1301*c6fd2807SJeff Garzik  *
1302*c6fd2807SJeff Garzik  *      LOCKING:
1303*c6fd2807SJeff Garzik  *      Inherited from caller.
1304*c6fd2807SJeff Garzik  */
1305*c6fd2807SJeff Garzik static void mv_err_intr(struct ata_port *ap, int reset_allowed)
1306*c6fd2807SJeff Garzik {
1307*c6fd2807SJeff Garzik 	void __iomem *port_mmio = mv_ap_base(ap);
1308*c6fd2807SJeff Garzik 	u32 edma_err_cause, serr = 0;
1309*c6fd2807SJeff Garzik 
1310*c6fd2807SJeff Garzik 	edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1311*c6fd2807SJeff Garzik 
1312*c6fd2807SJeff Garzik 	if (EDMA_ERR_SERR & edma_err_cause) {
1313*c6fd2807SJeff Garzik 		sata_scr_read(ap, SCR_ERROR, &serr);
1314*c6fd2807SJeff Garzik 		sata_scr_write_flush(ap, SCR_ERROR, serr);
1315*c6fd2807SJeff Garzik 	}
1316*c6fd2807SJeff Garzik 	if (EDMA_ERR_SELF_DIS & edma_err_cause) {
1317*c6fd2807SJeff Garzik 		struct mv_port_priv *pp	= ap->private_data;
1318*c6fd2807SJeff Garzik 		pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1319*c6fd2807SJeff Garzik 	}
1320*c6fd2807SJeff Garzik 	DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x "
1321*c6fd2807SJeff Garzik 		"SERR: 0x%08x\n", ap->id, edma_err_cause, serr);
1322*c6fd2807SJeff Garzik 
1323*c6fd2807SJeff Garzik 	/* Clear EDMA now that SERR cleanup done */
1324*c6fd2807SJeff Garzik 	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1325*c6fd2807SJeff Garzik 
1326*c6fd2807SJeff Garzik 	/* check for fatal here and recover if needed */
1327*c6fd2807SJeff Garzik 	if (reset_allowed && (EDMA_ERR_FATAL & edma_err_cause))
1328*c6fd2807SJeff Garzik 		mv_stop_and_reset(ap);
1329*c6fd2807SJeff Garzik }
1330*c6fd2807SJeff Garzik 
1331*c6fd2807SJeff Garzik /**
1332*c6fd2807SJeff Garzik  *      mv_host_intr - Handle all interrupts on the given host controller
1333*c6fd2807SJeff Garzik  *      @host_set: host specific structure
1334*c6fd2807SJeff Garzik  *      @relevant: port error bits relevant to this host controller
1335*c6fd2807SJeff Garzik  *      @hc: which host controller we're to look at
1336*c6fd2807SJeff Garzik  *
1337*c6fd2807SJeff Garzik  *      Read then write clear the HC interrupt status then walk each
1338*c6fd2807SJeff Garzik  *      port connected to the HC and see if it needs servicing.  Port
1339*c6fd2807SJeff Garzik  *      success ints are reported in the HC interrupt status reg, the
1340*c6fd2807SJeff Garzik  *      port error ints are reported in the higher level main
1341*c6fd2807SJeff Garzik  *      interrupt status register and thus are passed in via the
1342*c6fd2807SJeff Garzik  *      'relevant' argument.
1343*c6fd2807SJeff Garzik  *
1344*c6fd2807SJeff Garzik  *      LOCKING:
1345*c6fd2807SJeff Garzik  *      Inherited from caller.
1346*c6fd2807SJeff Garzik  */
1347*c6fd2807SJeff Garzik static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
1348*c6fd2807SJeff Garzik 			 unsigned int hc)
1349*c6fd2807SJeff Garzik {
1350*c6fd2807SJeff Garzik 	void __iomem *mmio = host_set->mmio_base;
1351*c6fd2807SJeff Garzik 	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1352*c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc;
1353*c6fd2807SJeff Garzik 	u32 hc_irq_cause;
1354*c6fd2807SJeff Garzik 	int shift, port, port0, hard_port, handled;
1355*c6fd2807SJeff Garzik 	unsigned int err_mask;
1356*c6fd2807SJeff Garzik 
1357*c6fd2807SJeff Garzik 	if (hc == 0) {
1358*c6fd2807SJeff Garzik 		port0 = 0;
1359*c6fd2807SJeff Garzik 	} else {
1360*c6fd2807SJeff Garzik 		port0 = MV_PORTS_PER_HC;
1361*c6fd2807SJeff Garzik 	}
1362*c6fd2807SJeff Garzik 
1363*c6fd2807SJeff Garzik 	/* we'll need the HC success int register in most cases */
1364*c6fd2807SJeff Garzik 	hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1365*c6fd2807SJeff Garzik 	if (hc_irq_cause) {
1366*c6fd2807SJeff Garzik 		writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1367*c6fd2807SJeff Garzik 	}
1368*c6fd2807SJeff Garzik 
1369*c6fd2807SJeff Garzik 	VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1370*c6fd2807SJeff Garzik 		hc,relevant,hc_irq_cause);
1371*c6fd2807SJeff Garzik 
1372*c6fd2807SJeff Garzik 	for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1373*c6fd2807SJeff Garzik 		u8 ata_status = 0;
1374*c6fd2807SJeff Garzik 		struct ata_port *ap = host_set->ports[port];
1375*c6fd2807SJeff Garzik 		struct mv_port_priv *pp = ap->private_data;
1376*c6fd2807SJeff Garzik 
1377*c6fd2807SJeff Garzik 		hard_port = mv_hardport_from_port(port); /* range 0..3 */
1378*c6fd2807SJeff Garzik 		handled = 0;	/* ensure ata_status is set if handled++ */
1379*c6fd2807SJeff Garzik 
1380*c6fd2807SJeff Garzik 		/* Note that DEV_IRQ might happen spuriously during EDMA,
1381*c6fd2807SJeff Garzik 		 * and should be ignored in such cases.
1382*c6fd2807SJeff Garzik 		 * The cause of this is still under investigation.
1383*c6fd2807SJeff Garzik 		 */
1384*c6fd2807SJeff Garzik 		if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1385*c6fd2807SJeff Garzik 			/* EDMA: check for response queue interrupt */
1386*c6fd2807SJeff Garzik 			if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
1387*c6fd2807SJeff Garzik 				ata_status = mv_get_crpb_status(ap);
1388*c6fd2807SJeff Garzik 				handled = 1;
1389*c6fd2807SJeff Garzik 			}
1390*c6fd2807SJeff Garzik 		} else {
1391*c6fd2807SJeff Garzik 			/* PIO: check for device (drive) interrupt */
1392*c6fd2807SJeff Garzik 			if ((DEV_IRQ << hard_port) & hc_irq_cause) {
1393*c6fd2807SJeff Garzik 				ata_status = readb((void __iomem *)
1394*c6fd2807SJeff Garzik 					   ap->ioaddr.status_addr);
1395*c6fd2807SJeff Garzik 				handled = 1;
1396*c6fd2807SJeff Garzik 				/* ignore spurious intr if drive still BUSY */
1397*c6fd2807SJeff Garzik 				if (ata_status & ATA_BUSY) {
1398*c6fd2807SJeff Garzik 					ata_status = 0;
1399*c6fd2807SJeff Garzik 					handled = 0;
1400*c6fd2807SJeff Garzik 				}
1401*c6fd2807SJeff Garzik 			}
1402*c6fd2807SJeff Garzik 		}
1403*c6fd2807SJeff Garzik 
1404*c6fd2807SJeff Garzik 		if (ap && (ap->flags & ATA_FLAG_DISABLED))
1405*c6fd2807SJeff Garzik 			continue;
1406*c6fd2807SJeff Garzik 
1407*c6fd2807SJeff Garzik 		err_mask = ac_err_mask(ata_status);
1408*c6fd2807SJeff Garzik 
1409*c6fd2807SJeff Garzik 		shift = port << 1;		/* (port * 2) */
1410*c6fd2807SJeff Garzik 		if (port >= MV_PORTS_PER_HC) {
1411*c6fd2807SJeff Garzik 			shift++;	/* skip bit 8 in the HC Main IRQ reg */
1412*c6fd2807SJeff Garzik 		}
1413*c6fd2807SJeff Garzik 		if ((PORT0_ERR << shift) & relevant) {
1414*c6fd2807SJeff Garzik 			mv_err_intr(ap, 1);
1415*c6fd2807SJeff Garzik 			err_mask |= AC_ERR_OTHER;
1416*c6fd2807SJeff Garzik 			handled = 1;
1417*c6fd2807SJeff Garzik 		}
1418*c6fd2807SJeff Garzik 
1419*c6fd2807SJeff Garzik 		if (handled) {
1420*c6fd2807SJeff Garzik 			qc = ata_qc_from_tag(ap, ap->active_tag);
1421*c6fd2807SJeff Garzik 			if (qc && (qc->flags & ATA_QCFLAG_ACTIVE)) {
1422*c6fd2807SJeff Garzik 				VPRINTK("port %u IRQ found for qc, "
1423*c6fd2807SJeff Garzik 					"ata_status 0x%x\n", port,ata_status);
1424*c6fd2807SJeff Garzik 				/* mark qc status appropriately */
1425*c6fd2807SJeff Garzik 				if (!(qc->tf.flags & ATA_TFLAG_POLLING)) {
1426*c6fd2807SJeff Garzik 					qc->err_mask |= err_mask;
1427*c6fd2807SJeff Garzik 					ata_qc_complete(qc);
1428*c6fd2807SJeff Garzik 				}
1429*c6fd2807SJeff Garzik 			}
1430*c6fd2807SJeff Garzik 		}
1431*c6fd2807SJeff Garzik 	}
1432*c6fd2807SJeff Garzik 	VPRINTK("EXIT\n");
1433*c6fd2807SJeff Garzik }
1434*c6fd2807SJeff Garzik 
1435*c6fd2807SJeff Garzik /**
1436*c6fd2807SJeff Garzik  *      mv_interrupt -
1437*c6fd2807SJeff Garzik  *      @irq: unused
1438*c6fd2807SJeff Garzik  *      @dev_instance: private data; in this case the host structure
1439*c6fd2807SJeff Garzik  *      @regs: unused
1440*c6fd2807SJeff Garzik  *
1441*c6fd2807SJeff Garzik  *      Read the read only register to determine if any host
1442*c6fd2807SJeff Garzik  *      controllers have pending interrupts.  If so, call lower level
1443*c6fd2807SJeff Garzik  *      routine to handle.  Also check for PCI errors which are only
1444*c6fd2807SJeff Garzik  *      reported here.
1445*c6fd2807SJeff Garzik  *
1446*c6fd2807SJeff Garzik  *      LOCKING:
1447*c6fd2807SJeff Garzik  *      This routine holds the host_set lock while processing pending
1448*c6fd2807SJeff Garzik  *      interrupts.
1449*c6fd2807SJeff Garzik  */
1450*c6fd2807SJeff Garzik static irqreturn_t mv_interrupt(int irq, void *dev_instance,
1451*c6fd2807SJeff Garzik 				struct pt_regs *regs)
1452*c6fd2807SJeff Garzik {
1453*c6fd2807SJeff Garzik 	struct ata_host_set *host_set = dev_instance;
1454*c6fd2807SJeff Garzik 	unsigned int hc, handled = 0, n_hcs;
1455*c6fd2807SJeff Garzik 	void __iomem *mmio = host_set->mmio_base;
1456*c6fd2807SJeff Garzik 	struct mv_host_priv *hpriv;
1457*c6fd2807SJeff Garzik 	u32 irq_stat;
1458*c6fd2807SJeff Garzik 
1459*c6fd2807SJeff Garzik 	irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1460*c6fd2807SJeff Garzik 
1461*c6fd2807SJeff Garzik 	/* check the cases where we either have nothing pending or have read
1462*c6fd2807SJeff Garzik 	 * a bogus register value which can indicate HW removal or PCI fault
1463*c6fd2807SJeff Garzik 	 */
1464*c6fd2807SJeff Garzik 	if (!irq_stat || (0xffffffffU == irq_stat)) {
1465*c6fd2807SJeff Garzik 		return IRQ_NONE;
1466*c6fd2807SJeff Garzik 	}
1467*c6fd2807SJeff Garzik 
1468*c6fd2807SJeff Garzik 	n_hcs = mv_get_hc_count(host_set->ports[0]->flags);
1469*c6fd2807SJeff Garzik 	spin_lock(&host_set->lock);
1470*c6fd2807SJeff Garzik 
1471*c6fd2807SJeff Garzik 	for (hc = 0; hc < n_hcs; hc++) {
1472*c6fd2807SJeff Garzik 		u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1473*c6fd2807SJeff Garzik 		if (relevant) {
1474*c6fd2807SJeff Garzik 			mv_host_intr(host_set, relevant, hc);
1475*c6fd2807SJeff Garzik 			handled++;
1476*c6fd2807SJeff Garzik 		}
1477*c6fd2807SJeff Garzik 	}
1478*c6fd2807SJeff Garzik 
1479*c6fd2807SJeff Garzik 	hpriv = host_set->private_data;
1480*c6fd2807SJeff Garzik 	if (IS_60XX(hpriv)) {
1481*c6fd2807SJeff Garzik 		/* deal with the interrupt coalescing bits */
1482*c6fd2807SJeff Garzik 		if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) {
1483*c6fd2807SJeff Garzik 			writelfl(0, mmio + MV_IRQ_COAL_CAUSE_LO);
1484*c6fd2807SJeff Garzik 			writelfl(0, mmio + MV_IRQ_COAL_CAUSE_HI);
1485*c6fd2807SJeff Garzik 			writelfl(0, mmio + MV_IRQ_COAL_CAUSE);
1486*c6fd2807SJeff Garzik 		}
1487*c6fd2807SJeff Garzik 	}
1488*c6fd2807SJeff Garzik 
1489*c6fd2807SJeff Garzik 	if (PCI_ERR & irq_stat) {
1490*c6fd2807SJeff Garzik 		printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n",
1491*c6fd2807SJeff Garzik 		       readl(mmio + PCI_IRQ_CAUSE_OFS));
1492*c6fd2807SJeff Garzik 
1493*c6fd2807SJeff Garzik 		DPRINTK("All regs @ PCI error\n");
1494*c6fd2807SJeff Garzik 		mv_dump_all_regs(mmio, -1, to_pci_dev(host_set->dev));
1495*c6fd2807SJeff Garzik 
1496*c6fd2807SJeff Garzik 		writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1497*c6fd2807SJeff Garzik 		handled++;
1498*c6fd2807SJeff Garzik 	}
1499*c6fd2807SJeff Garzik 	spin_unlock(&host_set->lock);
1500*c6fd2807SJeff Garzik 
1501*c6fd2807SJeff Garzik 	return IRQ_RETVAL(handled);
1502*c6fd2807SJeff Garzik }
1503*c6fd2807SJeff Garzik 
1504*c6fd2807SJeff Garzik static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1505*c6fd2807SJeff Garzik {
1506*c6fd2807SJeff Garzik 	void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1507*c6fd2807SJeff Garzik 	unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1508*c6fd2807SJeff Garzik 
1509*c6fd2807SJeff Garzik 	return hc_mmio + ofs;
1510*c6fd2807SJeff Garzik }
1511*c6fd2807SJeff Garzik 
1512*c6fd2807SJeff Garzik static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1513*c6fd2807SJeff Garzik {
1514*c6fd2807SJeff Garzik 	unsigned int ofs;
1515*c6fd2807SJeff Garzik 
1516*c6fd2807SJeff Garzik 	switch (sc_reg_in) {
1517*c6fd2807SJeff Garzik 	case SCR_STATUS:
1518*c6fd2807SJeff Garzik 	case SCR_ERROR:
1519*c6fd2807SJeff Garzik 	case SCR_CONTROL:
1520*c6fd2807SJeff Garzik 		ofs = sc_reg_in * sizeof(u32);
1521*c6fd2807SJeff Garzik 		break;
1522*c6fd2807SJeff Garzik 	default:
1523*c6fd2807SJeff Garzik 		ofs = 0xffffffffU;
1524*c6fd2807SJeff Garzik 		break;
1525*c6fd2807SJeff Garzik 	}
1526*c6fd2807SJeff Garzik 	return ofs;
1527*c6fd2807SJeff Garzik }
1528*c6fd2807SJeff Garzik 
1529*c6fd2807SJeff Garzik static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
1530*c6fd2807SJeff Garzik {
1531*c6fd2807SJeff Garzik 	void __iomem *mmio = mv5_phy_base(ap->host_set->mmio_base, ap->port_no);
1532*c6fd2807SJeff Garzik 	unsigned int ofs = mv5_scr_offset(sc_reg_in);
1533*c6fd2807SJeff Garzik 
1534*c6fd2807SJeff Garzik 	if (ofs != 0xffffffffU)
1535*c6fd2807SJeff Garzik 		return readl(mmio + ofs);
1536*c6fd2807SJeff Garzik 	else
1537*c6fd2807SJeff Garzik 		return (u32) ofs;
1538*c6fd2807SJeff Garzik }
1539*c6fd2807SJeff Garzik 
1540*c6fd2807SJeff Garzik static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1541*c6fd2807SJeff Garzik {
1542*c6fd2807SJeff Garzik 	void __iomem *mmio = mv5_phy_base(ap->host_set->mmio_base, ap->port_no);
1543*c6fd2807SJeff Garzik 	unsigned int ofs = mv5_scr_offset(sc_reg_in);
1544*c6fd2807SJeff Garzik 
1545*c6fd2807SJeff Garzik 	if (ofs != 0xffffffffU)
1546*c6fd2807SJeff Garzik 		writelfl(val, mmio + ofs);
1547*c6fd2807SJeff Garzik }
1548*c6fd2807SJeff Garzik 
1549*c6fd2807SJeff Garzik static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1550*c6fd2807SJeff Garzik {
1551*c6fd2807SJeff Garzik 	u8 rev_id;
1552*c6fd2807SJeff Garzik 	int early_5080;
1553*c6fd2807SJeff Garzik 
1554*c6fd2807SJeff Garzik 	pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
1555*c6fd2807SJeff Garzik 
1556*c6fd2807SJeff Garzik 	early_5080 = (pdev->device == 0x5080) && (rev_id == 0);
1557*c6fd2807SJeff Garzik 
1558*c6fd2807SJeff Garzik 	if (!early_5080) {
1559*c6fd2807SJeff Garzik 		u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1560*c6fd2807SJeff Garzik 		tmp |= (1 << 0);
1561*c6fd2807SJeff Garzik 		writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1562*c6fd2807SJeff Garzik 	}
1563*c6fd2807SJeff Garzik 
1564*c6fd2807SJeff Garzik 	mv_reset_pci_bus(pdev, mmio);
1565*c6fd2807SJeff Garzik }
1566*c6fd2807SJeff Garzik 
1567*c6fd2807SJeff Garzik static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1568*c6fd2807SJeff Garzik {
1569*c6fd2807SJeff Garzik 	writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1570*c6fd2807SJeff Garzik }
1571*c6fd2807SJeff Garzik 
1572*c6fd2807SJeff Garzik static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1573*c6fd2807SJeff Garzik 			   void __iomem *mmio)
1574*c6fd2807SJeff Garzik {
1575*c6fd2807SJeff Garzik 	void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1576*c6fd2807SJeff Garzik 	u32 tmp;
1577*c6fd2807SJeff Garzik 
1578*c6fd2807SJeff Garzik 	tmp = readl(phy_mmio + MV5_PHY_MODE);
1579*c6fd2807SJeff Garzik 
1580*c6fd2807SJeff Garzik 	hpriv->signal[idx].pre = tmp & 0x1800;	/* bits 12:11 */
1581*c6fd2807SJeff Garzik 	hpriv->signal[idx].amps = tmp & 0xe0;	/* bits 7:5 */
1582*c6fd2807SJeff Garzik }
1583*c6fd2807SJeff Garzik 
1584*c6fd2807SJeff Garzik static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1585*c6fd2807SJeff Garzik {
1586*c6fd2807SJeff Garzik 	u32 tmp;
1587*c6fd2807SJeff Garzik 
1588*c6fd2807SJeff Garzik 	writel(0, mmio + MV_GPIO_PORT_CTL);
1589*c6fd2807SJeff Garzik 
1590*c6fd2807SJeff Garzik 	/* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1591*c6fd2807SJeff Garzik 
1592*c6fd2807SJeff Garzik 	tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1593*c6fd2807SJeff Garzik 	tmp |= ~(1 << 0);
1594*c6fd2807SJeff Garzik 	writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1595*c6fd2807SJeff Garzik }
1596*c6fd2807SJeff Garzik 
1597*c6fd2807SJeff Garzik static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1598*c6fd2807SJeff Garzik 			   unsigned int port)
1599*c6fd2807SJeff Garzik {
1600*c6fd2807SJeff Garzik 	void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1601*c6fd2807SJeff Garzik 	const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1602*c6fd2807SJeff Garzik 	u32 tmp;
1603*c6fd2807SJeff Garzik 	int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1604*c6fd2807SJeff Garzik 
1605*c6fd2807SJeff Garzik 	if (fix_apm_sq) {
1606*c6fd2807SJeff Garzik 		tmp = readl(phy_mmio + MV5_LT_MODE);
1607*c6fd2807SJeff Garzik 		tmp |= (1 << 19);
1608*c6fd2807SJeff Garzik 		writel(tmp, phy_mmio + MV5_LT_MODE);
1609*c6fd2807SJeff Garzik 
1610*c6fd2807SJeff Garzik 		tmp = readl(phy_mmio + MV5_PHY_CTL);
1611*c6fd2807SJeff Garzik 		tmp &= ~0x3;
1612*c6fd2807SJeff Garzik 		tmp |= 0x1;
1613*c6fd2807SJeff Garzik 		writel(tmp, phy_mmio + MV5_PHY_CTL);
1614*c6fd2807SJeff Garzik 	}
1615*c6fd2807SJeff Garzik 
1616*c6fd2807SJeff Garzik 	tmp = readl(phy_mmio + MV5_PHY_MODE);
1617*c6fd2807SJeff Garzik 	tmp &= ~mask;
1618*c6fd2807SJeff Garzik 	tmp |= hpriv->signal[port].pre;
1619*c6fd2807SJeff Garzik 	tmp |= hpriv->signal[port].amps;
1620*c6fd2807SJeff Garzik 	writel(tmp, phy_mmio + MV5_PHY_MODE);
1621*c6fd2807SJeff Garzik }
1622*c6fd2807SJeff Garzik 
1623*c6fd2807SJeff Garzik 
1624*c6fd2807SJeff Garzik #undef ZERO
1625*c6fd2807SJeff Garzik #define ZERO(reg) writel(0, port_mmio + (reg))
1626*c6fd2807SJeff Garzik static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1627*c6fd2807SJeff Garzik 			     unsigned int port)
1628*c6fd2807SJeff Garzik {
1629*c6fd2807SJeff Garzik 	void __iomem *port_mmio = mv_port_base(mmio, port);
1630*c6fd2807SJeff Garzik 
1631*c6fd2807SJeff Garzik 	writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1632*c6fd2807SJeff Garzik 
1633*c6fd2807SJeff Garzik 	mv_channel_reset(hpriv, mmio, port);
1634*c6fd2807SJeff Garzik 
1635*c6fd2807SJeff Garzik 	ZERO(0x028);	/* command */
1636*c6fd2807SJeff Garzik 	writel(0x11f, port_mmio + EDMA_CFG_OFS);
1637*c6fd2807SJeff Garzik 	ZERO(0x004);	/* timer */
1638*c6fd2807SJeff Garzik 	ZERO(0x008);	/* irq err cause */
1639*c6fd2807SJeff Garzik 	ZERO(0x00c);	/* irq err mask */
1640*c6fd2807SJeff Garzik 	ZERO(0x010);	/* rq bah */
1641*c6fd2807SJeff Garzik 	ZERO(0x014);	/* rq inp */
1642*c6fd2807SJeff Garzik 	ZERO(0x018);	/* rq outp */
1643*c6fd2807SJeff Garzik 	ZERO(0x01c);	/* respq bah */
1644*c6fd2807SJeff Garzik 	ZERO(0x024);	/* respq outp */
1645*c6fd2807SJeff Garzik 	ZERO(0x020);	/* respq inp */
1646*c6fd2807SJeff Garzik 	ZERO(0x02c);	/* test control */
1647*c6fd2807SJeff Garzik 	writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1648*c6fd2807SJeff Garzik }
1649*c6fd2807SJeff Garzik #undef ZERO
1650*c6fd2807SJeff Garzik 
1651*c6fd2807SJeff Garzik #define ZERO(reg) writel(0, hc_mmio + (reg))
1652*c6fd2807SJeff Garzik static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1653*c6fd2807SJeff Garzik 			unsigned int hc)
1654*c6fd2807SJeff Garzik {
1655*c6fd2807SJeff Garzik 	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1656*c6fd2807SJeff Garzik 	u32 tmp;
1657*c6fd2807SJeff Garzik 
1658*c6fd2807SJeff Garzik 	ZERO(0x00c);
1659*c6fd2807SJeff Garzik 	ZERO(0x010);
1660*c6fd2807SJeff Garzik 	ZERO(0x014);
1661*c6fd2807SJeff Garzik 	ZERO(0x018);
1662*c6fd2807SJeff Garzik 
1663*c6fd2807SJeff Garzik 	tmp = readl(hc_mmio + 0x20);
1664*c6fd2807SJeff Garzik 	tmp &= 0x1c1c1c1c;
1665*c6fd2807SJeff Garzik 	tmp |= 0x03030303;
1666*c6fd2807SJeff Garzik 	writel(tmp, hc_mmio + 0x20);
1667*c6fd2807SJeff Garzik }
1668*c6fd2807SJeff Garzik #undef ZERO
1669*c6fd2807SJeff Garzik 
1670*c6fd2807SJeff Garzik static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1671*c6fd2807SJeff Garzik 			unsigned int n_hc)
1672*c6fd2807SJeff Garzik {
1673*c6fd2807SJeff Garzik 	unsigned int hc, port;
1674*c6fd2807SJeff Garzik 
1675*c6fd2807SJeff Garzik 	for (hc = 0; hc < n_hc; hc++) {
1676*c6fd2807SJeff Garzik 		for (port = 0; port < MV_PORTS_PER_HC; port++)
1677*c6fd2807SJeff Garzik 			mv5_reset_hc_port(hpriv, mmio,
1678*c6fd2807SJeff Garzik 					  (hc * MV_PORTS_PER_HC) + port);
1679*c6fd2807SJeff Garzik 
1680*c6fd2807SJeff Garzik 		mv5_reset_one_hc(hpriv, mmio, hc);
1681*c6fd2807SJeff Garzik 	}
1682*c6fd2807SJeff Garzik 
1683*c6fd2807SJeff Garzik 	return 0;
1684*c6fd2807SJeff Garzik }
1685*c6fd2807SJeff Garzik 
1686*c6fd2807SJeff Garzik #undef ZERO
1687*c6fd2807SJeff Garzik #define ZERO(reg) writel(0, mmio + (reg))
1688*c6fd2807SJeff Garzik static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1689*c6fd2807SJeff Garzik {
1690*c6fd2807SJeff Garzik 	u32 tmp;
1691*c6fd2807SJeff Garzik 
1692*c6fd2807SJeff Garzik 	tmp = readl(mmio + MV_PCI_MODE);
1693*c6fd2807SJeff Garzik 	tmp &= 0xff00ffff;
1694*c6fd2807SJeff Garzik 	writel(tmp, mmio + MV_PCI_MODE);
1695*c6fd2807SJeff Garzik 
1696*c6fd2807SJeff Garzik 	ZERO(MV_PCI_DISC_TIMER);
1697*c6fd2807SJeff Garzik 	ZERO(MV_PCI_MSI_TRIGGER);
1698*c6fd2807SJeff Garzik 	writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1699*c6fd2807SJeff Garzik 	ZERO(HC_MAIN_IRQ_MASK_OFS);
1700*c6fd2807SJeff Garzik 	ZERO(MV_PCI_SERR_MASK);
1701*c6fd2807SJeff Garzik 	ZERO(PCI_IRQ_CAUSE_OFS);
1702*c6fd2807SJeff Garzik 	ZERO(PCI_IRQ_MASK_OFS);
1703*c6fd2807SJeff Garzik 	ZERO(MV_PCI_ERR_LOW_ADDRESS);
1704*c6fd2807SJeff Garzik 	ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1705*c6fd2807SJeff Garzik 	ZERO(MV_PCI_ERR_ATTRIBUTE);
1706*c6fd2807SJeff Garzik 	ZERO(MV_PCI_ERR_COMMAND);
1707*c6fd2807SJeff Garzik }
1708*c6fd2807SJeff Garzik #undef ZERO
1709*c6fd2807SJeff Garzik 
1710*c6fd2807SJeff Garzik static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1711*c6fd2807SJeff Garzik {
1712*c6fd2807SJeff Garzik 	u32 tmp;
1713*c6fd2807SJeff Garzik 
1714*c6fd2807SJeff Garzik 	mv5_reset_flash(hpriv, mmio);
1715*c6fd2807SJeff Garzik 
1716*c6fd2807SJeff Garzik 	tmp = readl(mmio + MV_GPIO_PORT_CTL);
1717*c6fd2807SJeff Garzik 	tmp &= 0x3;
1718*c6fd2807SJeff Garzik 	tmp |= (1 << 5) | (1 << 6);
1719*c6fd2807SJeff Garzik 	writel(tmp, mmio + MV_GPIO_PORT_CTL);
1720*c6fd2807SJeff Garzik }
1721*c6fd2807SJeff Garzik 
1722*c6fd2807SJeff Garzik /**
1723*c6fd2807SJeff Garzik  *      mv6_reset_hc - Perform the 6xxx global soft reset
1724*c6fd2807SJeff Garzik  *      @mmio: base address of the HBA
1725*c6fd2807SJeff Garzik  *
1726*c6fd2807SJeff Garzik  *      This routine only applies to 6xxx parts.
1727*c6fd2807SJeff Garzik  *
1728*c6fd2807SJeff Garzik  *      LOCKING:
1729*c6fd2807SJeff Garzik  *      Inherited from caller.
1730*c6fd2807SJeff Garzik  */
1731*c6fd2807SJeff Garzik static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1732*c6fd2807SJeff Garzik 			unsigned int n_hc)
1733*c6fd2807SJeff Garzik {
1734*c6fd2807SJeff Garzik 	void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1735*c6fd2807SJeff Garzik 	int i, rc = 0;
1736*c6fd2807SJeff Garzik 	u32 t;
1737*c6fd2807SJeff Garzik 
1738*c6fd2807SJeff Garzik 	/* Following procedure defined in PCI "main command and status
1739*c6fd2807SJeff Garzik 	 * register" table.
1740*c6fd2807SJeff Garzik 	 */
1741*c6fd2807SJeff Garzik 	t = readl(reg);
1742*c6fd2807SJeff Garzik 	writel(t | STOP_PCI_MASTER, reg);
1743*c6fd2807SJeff Garzik 
1744*c6fd2807SJeff Garzik 	for (i = 0; i < 1000; i++) {
1745*c6fd2807SJeff Garzik 		udelay(1);
1746*c6fd2807SJeff Garzik 		t = readl(reg);
1747*c6fd2807SJeff Garzik 		if (PCI_MASTER_EMPTY & t) {
1748*c6fd2807SJeff Garzik 			break;
1749*c6fd2807SJeff Garzik 		}
1750*c6fd2807SJeff Garzik 	}
1751*c6fd2807SJeff Garzik 	if (!(PCI_MASTER_EMPTY & t)) {
1752*c6fd2807SJeff Garzik 		printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
1753*c6fd2807SJeff Garzik 		rc = 1;
1754*c6fd2807SJeff Garzik 		goto done;
1755*c6fd2807SJeff Garzik 	}
1756*c6fd2807SJeff Garzik 
1757*c6fd2807SJeff Garzik 	/* set reset */
1758*c6fd2807SJeff Garzik 	i = 5;
1759*c6fd2807SJeff Garzik 	do {
1760*c6fd2807SJeff Garzik 		writel(t | GLOB_SFT_RST, reg);
1761*c6fd2807SJeff Garzik 		t = readl(reg);
1762*c6fd2807SJeff Garzik 		udelay(1);
1763*c6fd2807SJeff Garzik 	} while (!(GLOB_SFT_RST & t) && (i-- > 0));
1764*c6fd2807SJeff Garzik 
1765*c6fd2807SJeff Garzik 	if (!(GLOB_SFT_RST & t)) {
1766*c6fd2807SJeff Garzik 		printk(KERN_ERR DRV_NAME ": can't set global reset\n");
1767*c6fd2807SJeff Garzik 		rc = 1;
1768*c6fd2807SJeff Garzik 		goto done;
1769*c6fd2807SJeff Garzik 	}
1770*c6fd2807SJeff Garzik 
1771*c6fd2807SJeff Garzik 	/* clear reset and *reenable the PCI master* (not mentioned in spec) */
1772*c6fd2807SJeff Garzik 	i = 5;
1773*c6fd2807SJeff Garzik 	do {
1774*c6fd2807SJeff Garzik 		writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
1775*c6fd2807SJeff Garzik 		t = readl(reg);
1776*c6fd2807SJeff Garzik 		udelay(1);
1777*c6fd2807SJeff Garzik 	} while ((GLOB_SFT_RST & t) && (i-- > 0));
1778*c6fd2807SJeff Garzik 
1779*c6fd2807SJeff Garzik 	if (GLOB_SFT_RST & t) {
1780*c6fd2807SJeff Garzik 		printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
1781*c6fd2807SJeff Garzik 		rc = 1;
1782*c6fd2807SJeff Garzik 	}
1783*c6fd2807SJeff Garzik done:
1784*c6fd2807SJeff Garzik 	return rc;
1785*c6fd2807SJeff Garzik }
1786*c6fd2807SJeff Garzik 
1787*c6fd2807SJeff Garzik static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
1788*c6fd2807SJeff Garzik 			   void __iomem *mmio)
1789*c6fd2807SJeff Garzik {
1790*c6fd2807SJeff Garzik 	void __iomem *port_mmio;
1791*c6fd2807SJeff Garzik 	u32 tmp;
1792*c6fd2807SJeff Garzik 
1793*c6fd2807SJeff Garzik 	tmp = readl(mmio + MV_RESET_CFG);
1794*c6fd2807SJeff Garzik 	if ((tmp & (1 << 0)) == 0) {
1795*c6fd2807SJeff Garzik 		hpriv->signal[idx].amps = 0x7 << 8;
1796*c6fd2807SJeff Garzik 		hpriv->signal[idx].pre = 0x1 << 5;
1797*c6fd2807SJeff Garzik 		return;
1798*c6fd2807SJeff Garzik 	}
1799*c6fd2807SJeff Garzik 
1800*c6fd2807SJeff Garzik 	port_mmio = mv_port_base(mmio, idx);
1801*c6fd2807SJeff Garzik 	tmp = readl(port_mmio + PHY_MODE2);
1802*c6fd2807SJeff Garzik 
1803*c6fd2807SJeff Garzik 	hpriv->signal[idx].amps = tmp & 0x700;	/* bits 10:8 */
1804*c6fd2807SJeff Garzik 	hpriv->signal[idx].pre = tmp & 0xe0;	/* bits 7:5 */
1805*c6fd2807SJeff Garzik }
1806*c6fd2807SJeff Garzik 
1807*c6fd2807SJeff Garzik static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1808*c6fd2807SJeff Garzik {
1809*c6fd2807SJeff Garzik 	writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
1810*c6fd2807SJeff Garzik }
1811*c6fd2807SJeff Garzik 
1812*c6fd2807SJeff Garzik static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1813*c6fd2807SJeff Garzik 			   unsigned int port)
1814*c6fd2807SJeff Garzik {
1815*c6fd2807SJeff Garzik 	void __iomem *port_mmio = mv_port_base(mmio, port);
1816*c6fd2807SJeff Garzik 
1817*c6fd2807SJeff Garzik 	u32 hp_flags = hpriv->hp_flags;
1818*c6fd2807SJeff Garzik 	int fix_phy_mode2 =
1819*c6fd2807SJeff Garzik 		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
1820*c6fd2807SJeff Garzik 	int fix_phy_mode4 =
1821*c6fd2807SJeff Garzik 		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
1822*c6fd2807SJeff Garzik 	u32 m2, tmp;
1823*c6fd2807SJeff Garzik 
1824*c6fd2807SJeff Garzik 	if (fix_phy_mode2) {
1825*c6fd2807SJeff Garzik 		m2 = readl(port_mmio + PHY_MODE2);
1826*c6fd2807SJeff Garzik 		m2 &= ~(1 << 16);
1827*c6fd2807SJeff Garzik 		m2 |= (1 << 31);
1828*c6fd2807SJeff Garzik 		writel(m2, port_mmio + PHY_MODE2);
1829*c6fd2807SJeff Garzik 
1830*c6fd2807SJeff Garzik 		udelay(200);
1831*c6fd2807SJeff Garzik 
1832*c6fd2807SJeff Garzik 		m2 = readl(port_mmio + PHY_MODE2);
1833*c6fd2807SJeff Garzik 		m2 &= ~((1 << 16) | (1 << 31));
1834*c6fd2807SJeff Garzik 		writel(m2, port_mmio + PHY_MODE2);
1835*c6fd2807SJeff Garzik 
1836*c6fd2807SJeff Garzik 		udelay(200);
1837*c6fd2807SJeff Garzik 	}
1838*c6fd2807SJeff Garzik 
1839*c6fd2807SJeff Garzik 	/* who knows what this magic does */
1840*c6fd2807SJeff Garzik 	tmp = readl(port_mmio + PHY_MODE3);
1841*c6fd2807SJeff Garzik 	tmp &= ~0x7F800000;
1842*c6fd2807SJeff Garzik 	tmp |= 0x2A800000;
1843*c6fd2807SJeff Garzik 	writel(tmp, port_mmio + PHY_MODE3);
1844*c6fd2807SJeff Garzik 
1845*c6fd2807SJeff Garzik 	if (fix_phy_mode4) {
1846*c6fd2807SJeff Garzik 		u32 m4;
1847*c6fd2807SJeff Garzik 
1848*c6fd2807SJeff Garzik 		m4 = readl(port_mmio + PHY_MODE4);
1849*c6fd2807SJeff Garzik 
1850*c6fd2807SJeff Garzik 		if (hp_flags & MV_HP_ERRATA_60X1B2)
1851*c6fd2807SJeff Garzik 			tmp = readl(port_mmio + 0x310);
1852*c6fd2807SJeff Garzik 
1853*c6fd2807SJeff Garzik 		m4 = (m4 & ~(1 << 1)) | (1 << 0);
1854*c6fd2807SJeff Garzik 
1855*c6fd2807SJeff Garzik 		writel(m4, port_mmio + PHY_MODE4);
1856*c6fd2807SJeff Garzik 
1857*c6fd2807SJeff Garzik 		if (hp_flags & MV_HP_ERRATA_60X1B2)
1858*c6fd2807SJeff Garzik 			writel(tmp, port_mmio + 0x310);
1859*c6fd2807SJeff Garzik 	}
1860*c6fd2807SJeff Garzik 
1861*c6fd2807SJeff Garzik 	/* Revert values of pre-emphasis and signal amps to the saved ones */
1862*c6fd2807SJeff Garzik 	m2 = readl(port_mmio + PHY_MODE2);
1863*c6fd2807SJeff Garzik 
1864*c6fd2807SJeff Garzik 	m2 &= ~MV_M2_PREAMP_MASK;
1865*c6fd2807SJeff Garzik 	m2 |= hpriv->signal[port].amps;
1866*c6fd2807SJeff Garzik 	m2 |= hpriv->signal[port].pre;
1867*c6fd2807SJeff Garzik 	m2 &= ~(1 << 16);
1868*c6fd2807SJeff Garzik 
1869*c6fd2807SJeff Garzik 	/* according to mvSata 3.6.1, some IIE values are fixed */
1870*c6fd2807SJeff Garzik 	if (IS_GEN_IIE(hpriv)) {
1871*c6fd2807SJeff Garzik 		m2 &= ~0xC30FF01F;
1872*c6fd2807SJeff Garzik 		m2 |= 0x0000900F;
1873*c6fd2807SJeff Garzik 	}
1874*c6fd2807SJeff Garzik 
1875*c6fd2807SJeff Garzik 	writel(m2, port_mmio + PHY_MODE2);
1876*c6fd2807SJeff Garzik }
1877*c6fd2807SJeff Garzik 
1878*c6fd2807SJeff Garzik static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
1879*c6fd2807SJeff Garzik 			     unsigned int port_no)
1880*c6fd2807SJeff Garzik {
1881*c6fd2807SJeff Garzik 	void __iomem *port_mmio = mv_port_base(mmio, port_no);
1882*c6fd2807SJeff Garzik 
1883*c6fd2807SJeff Garzik 	writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
1884*c6fd2807SJeff Garzik 
1885*c6fd2807SJeff Garzik 	if (IS_60XX(hpriv)) {
1886*c6fd2807SJeff Garzik 		u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
1887*c6fd2807SJeff Garzik 		ifctl |= (1 << 7);		/* enable gen2i speed */
1888*c6fd2807SJeff Garzik 		ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
1889*c6fd2807SJeff Garzik 		writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
1890*c6fd2807SJeff Garzik 	}
1891*c6fd2807SJeff Garzik 
1892*c6fd2807SJeff Garzik 	udelay(25);		/* allow reset propagation */
1893*c6fd2807SJeff Garzik 
1894*c6fd2807SJeff Garzik 	/* Spec never mentions clearing the bit.  Marvell's driver does
1895*c6fd2807SJeff Garzik 	 * clear the bit, however.
1896*c6fd2807SJeff Garzik 	 */
1897*c6fd2807SJeff Garzik 	writelfl(0, port_mmio + EDMA_CMD_OFS);
1898*c6fd2807SJeff Garzik 
1899*c6fd2807SJeff Garzik 	hpriv->ops->phy_errata(hpriv, mmio, port_no);
1900*c6fd2807SJeff Garzik 
1901*c6fd2807SJeff Garzik 	if (IS_50XX(hpriv))
1902*c6fd2807SJeff Garzik 		mdelay(1);
1903*c6fd2807SJeff Garzik }
1904*c6fd2807SJeff Garzik 
1905*c6fd2807SJeff Garzik static void mv_stop_and_reset(struct ata_port *ap)
1906*c6fd2807SJeff Garzik {
1907*c6fd2807SJeff Garzik 	struct mv_host_priv *hpriv = ap->host_set->private_data;
1908*c6fd2807SJeff Garzik 	void __iomem *mmio = ap->host_set->mmio_base;
1909*c6fd2807SJeff Garzik 
1910*c6fd2807SJeff Garzik 	mv_stop_dma(ap);
1911*c6fd2807SJeff Garzik 
1912*c6fd2807SJeff Garzik 	mv_channel_reset(hpriv, mmio, ap->port_no);
1913*c6fd2807SJeff Garzik 
1914*c6fd2807SJeff Garzik 	__mv_phy_reset(ap, 0);
1915*c6fd2807SJeff Garzik }
1916*c6fd2807SJeff Garzik 
1917*c6fd2807SJeff Garzik static inline void __msleep(unsigned int msec, int can_sleep)
1918*c6fd2807SJeff Garzik {
1919*c6fd2807SJeff Garzik 	if (can_sleep)
1920*c6fd2807SJeff Garzik 		msleep(msec);
1921*c6fd2807SJeff Garzik 	else
1922*c6fd2807SJeff Garzik 		mdelay(msec);
1923*c6fd2807SJeff Garzik }
1924*c6fd2807SJeff Garzik 
1925*c6fd2807SJeff Garzik /**
1926*c6fd2807SJeff Garzik  *      __mv_phy_reset - Perform eDMA reset followed by COMRESET
1927*c6fd2807SJeff Garzik  *      @ap: ATA channel to manipulate
1928*c6fd2807SJeff Garzik  *
1929*c6fd2807SJeff Garzik  *      Part of this is taken from __sata_phy_reset and modified to
1930*c6fd2807SJeff Garzik  *      not sleep since this routine gets called from interrupt level.
1931*c6fd2807SJeff Garzik  *
1932*c6fd2807SJeff Garzik  *      LOCKING:
1933*c6fd2807SJeff Garzik  *      Inherited from caller.  This is coded to safe to call at
1934*c6fd2807SJeff Garzik  *      interrupt level, i.e. it does not sleep.
1935*c6fd2807SJeff Garzik  */
1936*c6fd2807SJeff Garzik static void __mv_phy_reset(struct ata_port *ap, int can_sleep)
1937*c6fd2807SJeff Garzik {
1938*c6fd2807SJeff Garzik 	struct mv_port_priv *pp	= ap->private_data;
1939*c6fd2807SJeff Garzik 	struct mv_host_priv *hpriv = ap->host_set->private_data;
1940*c6fd2807SJeff Garzik 	void __iomem *port_mmio = mv_ap_base(ap);
1941*c6fd2807SJeff Garzik 	struct ata_taskfile tf;
1942*c6fd2807SJeff Garzik 	struct ata_device *dev = &ap->device[0];
1943*c6fd2807SJeff Garzik 	unsigned long timeout;
1944*c6fd2807SJeff Garzik 	int retry = 5;
1945*c6fd2807SJeff Garzik 	u32 sstatus;
1946*c6fd2807SJeff Garzik 
1947*c6fd2807SJeff Garzik 	VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
1948*c6fd2807SJeff Garzik 
1949*c6fd2807SJeff Garzik 	DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
1950*c6fd2807SJeff Garzik 		"SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1951*c6fd2807SJeff Garzik 		mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
1952*c6fd2807SJeff Garzik 
1953*c6fd2807SJeff Garzik 	/* Issue COMRESET via SControl */
1954*c6fd2807SJeff Garzik comreset_retry:
1955*c6fd2807SJeff Garzik 	sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
1956*c6fd2807SJeff Garzik 	__msleep(1, can_sleep);
1957*c6fd2807SJeff Garzik 
1958*c6fd2807SJeff Garzik 	sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1959*c6fd2807SJeff Garzik 	__msleep(20, can_sleep);
1960*c6fd2807SJeff Garzik 
1961*c6fd2807SJeff Garzik 	timeout = jiffies + msecs_to_jiffies(200);
1962*c6fd2807SJeff Garzik 	do {
1963*c6fd2807SJeff Garzik 		sata_scr_read(ap, SCR_STATUS, &sstatus);
1964*c6fd2807SJeff Garzik 		sstatus &= 0x3;
1965*c6fd2807SJeff Garzik 		if ((sstatus == 3) || (sstatus == 0))
1966*c6fd2807SJeff Garzik 			break;
1967*c6fd2807SJeff Garzik 
1968*c6fd2807SJeff Garzik 		__msleep(1, can_sleep);
1969*c6fd2807SJeff Garzik 	} while (time_before(jiffies, timeout));
1970*c6fd2807SJeff Garzik 
1971*c6fd2807SJeff Garzik 	/* work around errata */
1972*c6fd2807SJeff Garzik 	if (IS_60XX(hpriv) &&
1973*c6fd2807SJeff Garzik 	    (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
1974*c6fd2807SJeff Garzik 	    (retry-- > 0))
1975*c6fd2807SJeff Garzik 		goto comreset_retry;
1976*c6fd2807SJeff Garzik 
1977*c6fd2807SJeff Garzik 	DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
1978*c6fd2807SJeff Garzik 		"SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1979*c6fd2807SJeff Garzik 		mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
1980*c6fd2807SJeff Garzik 
1981*c6fd2807SJeff Garzik 	if (ata_port_online(ap)) {
1982*c6fd2807SJeff Garzik 		ata_port_probe(ap);
1983*c6fd2807SJeff Garzik 	} else {
1984*c6fd2807SJeff Garzik 		sata_scr_read(ap, SCR_STATUS, &sstatus);
1985*c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_INFO,
1986*c6fd2807SJeff Garzik 				"no device found (phy stat %08x)\n", sstatus);
1987*c6fd2807SJeff Garzik 		ata_port_disable(ap);
1988*c6fd2807SJeff Garzik 		return;
1989*c6fd2807SJeff Garzik 	}
1990*c6fd2807SJeff Garzik 	ap->cbl = ATA_CBL_SATA;
1991*c6fd2807SJeff Garzik 
1992*c6fd2807SJeff Garzik 	/* even after SStatus reflects that device is ready,
1993*c6fd2807SJeff Garzik 	 * it seems to take a while for link to be fully
1994*c6fd2807SJeff Garzik 	 * established (and thus Status no longer 0x80/0x7F),
1995*c6fd2807SJeff Garzik 	 * so we poll a bit for that, here.
1996*c6fd2807SJeff Garzik 	 */
1997*c6fd2807SJeff Garzik 	retry = 20;
1998*c6fd2807SJeff Garzik 	while (1) {
1999*c6fd2807SJeff Garzik 		u8 drv_stat = ata_check_status(ap);
2000*c6fd2807SJeff Garzik 		if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2001*c6fd2807SJeff Garzik 			break;
2002*c6fd2807SJeff Garzik 		__msleep(500, can_sleep);
2003*c6fd2807SJeff Garzik 		if (retry-- <= 0)
2004*c6fd2807SJeff Garzik 			break;
2005*c6fd2807SJeff Garzik 	}
2006*c6fd2807SJeff Garzik 
2007*c6fd2807SJeff Garzik 	tf.lbah = readb((void __iomem *) ap->ioaddr.lbah_addr);
2008*c6fd2807SJeff Garzik 	tf.lbam = readb((void __iomem *) ap->ioaddr.lbam_addr);
2009*c6fd2807SJeff Garzik 	tf.lbal = readb((void __iomem *) ap->ioaddr.lbal_addr);
2010*c6fd2807SJeff Garzik 	tf.nsect = readb((void __iomem *) ap->ioaddr.nsect_addr);
2011*c6fd2807SJeff Garzik 
2012*c6fd2807SJeff Garzik 	dev->class = ata_dev_classify(&tf);
2013*c6fd2807SJeff Garzik 	if (!ata_dev_enabled(dev)) {
2014*c6fd2807SJeff Garzik 		VPRINTK("Port disabled post-sig: No device present.\n");
2015*c6fd2807SJeff Garzik 		ata_port_disable(ap);
2016*c6fd2807SJeff Garzik 	}
2017*c6fd2807SJeff Garzik 
2018*c6fd2807SJeff Garzik 	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2019*c6fd2807SJeff Garzik 
2020*c6fd2807SJeff Garzik 	pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2021*c6fd2807SJeff Garzik 
2022*c6fd2807SJeff Garzik 	VPRINTK("EXIT\n");
2023*c6fd2807SJeff Garzik }
2024*c6fd2807SJeff Garzik 
2025*c6fd2807SJeff Garzik static void mv_phy_reset(struct ata_port *ap)
2026*c6fd2807SJeff Garzik {
2027*c6fd2807SJeff Garzik 	__mv_phy_reset(ap, 1);
2028*c6fd2807SJeff Garzik }
2029*c6fd2807SJeff Garzik 
2030*c6fd2807SJeff Garzik /**
2031*c6fd2807SJeff Garzik  *      mv_eng_timeout - Routine called by libata when SCSI times out I/O
2032*c6fd2807SJeff Garzik  *      @ap: ATA channel to manipulate
2033*c6fd2807SJeff Garzik  *
2034*c6fd2807SJeff Garzik  *      Intent is to clear all pending error conditions, reset the
2035*c6fd2807SJeff Garzik  *      chip/bus, fail the command, and move on.
2036*c6fd2807SJeff Garzik  *
2037*c6fd2807SJeff Garzik  *      LOCKING:
2038*c6fd2807SJeff Garzik  *      This routine holds the host_set lock while failing the command.
2039*c6fd2807SJeff Garzik  */
2040*c6fd2807SJeff Garzik static void mv_eng_timeout(struct ata_port *ap)
2041*c6fd2807SJeff Garzik {
2042*c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc;
2043*c6fd2807SJeff Garzik 	unsigned long flags;
2044*c6fd2807SJeff Garzik 
2045*c6fd2807SJeff Garzik 	ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n");
2046*c6fd2807SJeff Garzik 	DPRINTK("All regs @ start of eng_timeout\n");
2047*c6fd2807SJeff Garzik 	mv_dump_all_regs(ap->host_set->mmio_base, ap->port_no,
2048*c6fd2807SJeff Garzik 			 to_pci_dev(ap->host_set->dev));
2049*c6fd2807SJeff Garzik 
2050*c6fd2807SJeff Garzik 	qc = ata_qc_from_tag(ap, ap->active_tag);
2051*c6fd2807SJeff Garzik         printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
2052*c6fd2807SJeff Garzik 	       ap->host_set->mmio_base, ap, qc, qc->scsicmd,
2053*c6fd2807SJeff Garzik 	       &qc->scsicmd->cmnd);
2054*c6fd2807SJeff Garzik 
2055*c6fd2807SJeff Garzik 	spin_lock_irqsave(&ap->host_set->lock, flags);
2056*c6fd2807SJeff Garzik 	mv_err_intr(ap, 0);
2057*c6fd2807SJeff Garzik 	mv_stop_and_reset(ap);
2058*c6fd2807SJeff Garzik 	spin_unlock_irqrestore(&ap->host_set->lock, flags);
2059*c6fd2807SJeff Garzik 
2060*c6fd2807SJeff Garzik 	WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
2061*c6fd2807SJeff Garzik 	if (qc->flags & ATA_QCFLAG_ACTIVE) {
2062*c6fd2807SJeff Garzik 		qc->err_mask |= AC_ERR_TIMEOUT;
2063*c6fd2807SJeff Garzik 		ata_eh_qc_complete(qc);
2064*c6fd2807SJeff Garzik 	}
2065*c6fd2807SJeff Garzik }
2066*c6fd2807SJeff Garzik 
2067*c6fd2807SJeff Garzik /**
2068*c6fd2807SJeff Garzik  *      mv_port_init - Perform some early initialization on a single port.
2069*c6fd2807SJeff Garzik  *      @port: libata data structure storing shadow register addresses
2070*c6fd2807SJeff Garzik  *      @port_mmio: base address of the port
2071*c6fd2807SJeff Garzik  *
2072*c6fd2807SJeff Garzik  *      Initialize shadow register mmio addresses, clear outstanding
2073*c6fd2807SJeff Garzik  *      interrupts on the port, and unmask interrupts for the future
2074*c6fd2807SJeff Garzik  *      start of the port.
2075*c6fd2807SJeff Garzik  *
2076*c6fd2807SJeff Garzik  *      LOCKING:
2077*c6fd2807SJeff Garzik  *      Inherited from caller.
2078*c6fd2807SJeff Garzik  */
2079*c6fd2807SJeff Garzik static void mv_port_init(struct ata_ioports *port,  void __iomem *port_mmio)
2080*c6fd2807SJeff Garzik {
2081*c6fd2807SJeff Garzik 	unsigned long shd_base = (unsigned long) port_mmio + SHD_BLK_OFS;
2082*c6fd2807SJeff Garzik 	unsigned serr_ofs;
2083*c6fd2807SJeff Garzik 
2084*c6fd2807SJeff Garzik 	/* PIO related setup
2085*c6fd2807SJeff Garzik 	 */
2086*c6fd2807SJeff Garzik 	port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2087*c6fd2807SJeff Garzik 	port->error_addr =
2088*c6fd2807SJeff Garzik 		port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2089*c6fd2807SJeff Garzik 	port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2090*c6fd2807SJeff Garzik 	port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2091*c6fd2807SJeff Garzik 	port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2092*c6fd2807SJeff Garzik 	port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2093*c6fd2807SJeff Garzik 	port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2094*c6fd2807SJeff Garzik 	port->status_addr =
2095*c6fd2807SJeff Garzik 		port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2096*c6fd2807SJeff Garzik 	/* special case: control/altstatus doesn't have ATA_REG_ address */
2097*c6fd2807SJeff Garzik 	port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2098*c6fd2807SJeff Garzik 
2099*c6fd2807SJeff Garzik 	/* unused: */
2100*c6fd2807SJeff Garzik 	port->cmd_addr = port->bmdma_addr = port->scr_addr = 0;
2101*c6fd2807SJeff Garzik 
2102*c6fd2807SJeff Garzik 	/* Clear any currently outstanding port interrupt conditions */
2103*c6fd2807SJeff Garzik 	serr_ofs = mv_scr_offset(SCR_ERROR);
2104*c6fd2807SJeff Garzik 	writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2105*c6fd2807SJeff Garzik 	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2106*c6fd2807SJeff Garzik 
2107*c6fd2807SJeff Garzik 	/* unmask all EDMA error interrupts */
2108*c6fd2807SJeff Garzik 	writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2109*c6fd2807SJeff Garzik 
2110*c6fd2807SJeff Garzik 	VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2111*c6fd2807SJeff Garzik 		readl(port_mmio + EDMA_CFG_OFS),
2112*c6fd2807SJeff Garzik 		readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2113*c6fd2807SJeff Garzik 		readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2114*c6fd2807SJeff Garzik }
2115*c6fd2807SJeff Garzik 
2116*c6fd2807SJeff Garzik static int mv_chip_id(struct pci_dev *pdev, struct mv_host_priv *hpriv,
2117*c6fd2807SJeff Garzik 		      unsigned int board_idx)
2118*c6fd2807SJeff Garzik {
2119*c6fd2807SJeff Garzik 	u8 rev_id;
2120*c6fd2807SJeff Garzik 	u32 hp_flags = hpriv->hp_flags;
2121*c6fd2807SJeff Garzik 
2122*c6fd2807SJeff Garzik 	pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2123*c6fd2807SJeff Garzik 
2124*c6fd2807SJeff Garzik 	switch(board_idx) {
2125*c6fd2807SJeff Garzik 	case chip_5080:
2126*c6fd2807SJeff Garzik 		hpriv->ops = &mv5xxx_ops;
2127*c6fd2807SJeff Garzik 		hp_flags |= MV_HP_50XX;
2128*c6fd2807SJeff Garzik 
2129*c6fd2807SJeff Garzik 		switch (rev_id) {
2130*c6fd2807SJeff Garzik 		case 0x1:
2131*c6fd2807SJeff Garzik 			hp_flags |= MV_HP_ERRATA_50XXB0;
2132*c6fd2807SJeff Garzik 			break;
2133*c6fd2807SJeff Garzik 		case 0x3:
2134*c6fd2807SJeff Garzik 			hp_flags |= MV_HP_ERRATA_50XXB2;
2135*c6fd2807SJeff Garzik 			break;
2136*c6fd2807SJeff Garzik 		default:
2137*c6fd2807SJeff Garzik 			dev_printk(KERN_WARNING, &pdev->dev,
2138*c6fd2807SJeff Garzik 			   "Applying 50XXB2 workarounds to unknown rev\n");
2139*c6fd2807SJeff Garzik 			hp_flags |= MV_HP_ERRATA_50XXB2;
2140*c6fd2807SJeff Garzik 			break;
2141*c6fd2807SJeff Garzik 		}
2142*c6fd2807SJeff Garzik 		break;
2143*c6fd2807SJeff Garzik 
2144*c6fd2807SJeff Garzik 	case chip_504x:
2145*c6fd2807SJeff Garzik 	case chip_508x:
2146*c6fd2807SJeff Garzik 		hpriv->ops = &mv5xxx_ops;
2147*c6fd2807SJeff Garzik 		hp_flags |= MV_HP_50XX;
2148*c6fd2807SJeff Garzik 
2149*c6fd2807SJeff Garzik 		switch (rev_id) {
2150*c6fd2807SJeff Garzik 		case 0x0:
2151*c6fd2807SJeff Garzik 			hp_flags |= MV_HP_ERRATA_50XXB0;
2152*c6fd2807SJeff Garzik 			break;
2153*c6fd2807SJeff Garzik 		case 0x3:
2154*c6fd2807SJeff Garzik 			hp_flags |= MV_HP_ERRATA_50XXB2;
2155*c6fd2807SJeff Garzik 			break;
2156*c6fd2807SJeff Garzik 		default:
2157*c6fd2807SJeff Garzik 			dev_printk(KERN_WARNING, &pdev->dev,
2158*c6fd2807SJeff Garzik 			   "Applying B2 workarounds to unknown rev\n");
2159*c6fd2807SJeff Garzik 			hp_flags |= MV_HP_ERRATA_50XXB2;
2160*c6fd2807SJeff Garzik 			break;
2161*c6fd2807SJeff Garzik 		}
2162*c6fd2807SJeff Garzik 		break;
2163*c6fd2807SJeff Garzik 
2164*c6fd2807SJeff Garzik 	case chip_604x:
2165*c6fd2807SJeff Garzik 	case chip_608x:
2166*c6fd2807SJeff Garzik 		hpriv->ops = &mv6xxx_ops;
2167*c6fd2807SJeff Garzik 
2168*c6fd2807SJeff Garzik 		switch (rev_id) {
2169*c6fd2807SJeff Garzik 		case 0x7:
2170*c6fd2807SJeff Garzik 			hp_flags |= MV_HP_ERRATA_60X1B2;
2171*c6fd2807SJeff Garzik 			break;
2172*c6fd2807SJeff Garzik 		case 0x9:
2173*c6fd2807SJeff Garzik 			hp_flags |= MV_HP_ERRATA_60X1C0;
2174*c6fd2807SJeff Garzik 			break;
2175*c6fd2807SJeff Garzik 		default:
2176*c6fd2807SJeff Garzik 			dev_printk(KERN_WARNING, &pdev->dev,
2177*c6fd2807SJeff Garzik 				   "Applying B2 workarounds to unknown rev\n");
2178*c6fd2807SJeff Garzik 			hp_flags |= MV_HP_ERRATA_60X1B2;
2179*c6fd2807SJeff Garzik 			break;
2180*c6fd2807SJeff Garzik 		}
2181*c6fd2807SJeff Garzik 		break;
2182*c6fd2807SJeff Garzik 
2183*c6fd2807SJeff Garzik 	case chip_7042:
2184*c6fd2807SJeff Garzik 	case chip_6042:
2185*c6fd2807SJeff Garzik 		hpriv->ops = &mv6xxx_ops;
2186*c6fd2807SJeff Garzik 
2187*c6fd2807SJeff Garzik 		hp_flags |= MV_HP_GEN_IIE;
2188*c6fd2807SJeff Garzik 
2189*c6fd2807SJeff Garzik 		switch (rev_id) {
2190*c6fd2807SJeff Garzik 		case 0x0:
2191*c6fd2807SJeff Garzik 			hp_flags |= MV_HP_ERRATA_XX42A0;
2192*c6fd2807SJeff Garzik 			break;
2193*c6fd2807SJeff Garzik 		case 0x1:
2194*c6fd2807SJeff Garzik 			hp_flags |= MV_HP_ERRATA_60X1C0;
2195*c6fd2807SJeff Garzik 			break;
2196*c6fd2807SJeff Garzik 		default:
2197*c6fd2807SJeff Garzik 			dev_printk(KERN_WARNING, &pdev->dev,
2198*c6fd2807SJeff Garzik 			   "Applying 60X1C0 workarounds to unknown rev\n");
2199*c6fd2807SJeff Garzik 			hp_flags |= MV_HP_ERRATA_60X1C0;
2200*c6fd2807SJeff Garzik 			break;
2201*c6fd2807SJeff Garzik 		}
2202*c6fd2807SJeff Garzik 		break;
2203*c6fd2807SJeff Garzik 
2204*c6fd2807SJeff Garzik 	default:
2205*c6fd2807SJeff Garzik 		printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2206*c6fd2807SJeff Garzik 		return 1;
2207*c6fd2807SJeff Garzik 	}
2208*c6fd2807SJeff Garzik 
2209*c6fd2807SJeff Garzik 	hpriv->hp_flags = hp_flags;
2210*c6fd2807SJeff Garzik 
2211*c6fd2807SJeff Garzik 	return 0;
2212*c6fd2807SJeff Garzik }
2213*c6fd2807SJeff Garzik 
2214*c6fd2807SJeff Garzik /**
2215*c6fd2807SJeff Garzik  *      mv_init_host - Perform some early initialization of the host.
2216*c6fd2807SJeff Garzik  *	@pdev: host PCI device
2217*c6fd2807SJeff Garzik  *      @probe_ent: early data struct representing the host
2218*c6fd2807SJeff Garzik  *
2219*c6fd2807SJeff Garzik  *      If possible, do an early global reset of the host.  Then do
2220*c6fd2807SJeff Garzik  *      our port init and clear/unmask all/relevant host interrupts.
2221*c6fd2807SJeff Garzik  *
2222*c6fd2807SJeff Garzik  *      LOCKING:
2223*c6fd2807SJeff Garzik  *      Inherited from caller.
2224*c6fd2807SJeff Garzik  */
2225*c6fd2807SJeff Garzik static int mv_init_host(struct pci_dev *pdev, struct ata_probe_ent *probe_ent,
2226*c6fd2807SJeff Garzik 			unsigned int board_idx)
2227*c6fd2807SJeff Garzik {
2228*c6fd2807SJeff Garzik 	int rc = 0, n_hc, port, hc;
2229*c6fd2807SJeff Garzik 	void __iomem *mmio = probe_ent->mmio_base;
2230*c6fd2807SJeff Garzik 	struct mv_host_priv *hpriv = probe_ent->private_data;
2231*c6fd2807SJeff Garzik 
2232*c6fd2807SJeff Garzik 	/* global interrupt mask */
2233*c6fd2807SJeff Garzik 	writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2234*c6fd2807SJeff Garzik 
2235*c6fd2807SJeff Garzik 	rc = mv_chip_id(pdev, hpriv, board_idx);
2236*c6fd2807SJeff Garzik 	if (rc)
2237*c6fd2807SJeff Garzik 		goto done;
2238*c6fd2807SJeff Garzik 
2239*c6fd2807SJeff Garzik 	n_hc = mv_get_hc_count(probe_ent->host_flags);
2240*c6fd2807SJeff Garzik 	probe_ent->n_ports = MV_PORTS_PER_HC * n_hc;
2241*c6fd2807SJeff Garzik 
2242*c6fd2807SJeff Garzik 	for (port = 0; port < probe_ent->n_ports; port++)
2243*c6fd2807SJeff Garzik 		hpriv->ops->read_preamp(hpriv, port, mmio);
2244*c6fd2807SJeff Garzik 
2245*c6fd2807SJeff Garzik 	rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2246*c6fd2807SJeff Garzik 	if (rc)
2247*c6fd2807SJeff Garzik 		goto done;
2248*c6fd2807SJeff Garzik 
2249*c6fd2807SJeff Garzik 	hpriv->ops->reset_flash(hpriv, mmio);
2250*c6fd2807SJeff Garzik 	hpriv->ops->reset_bus(pdev, mmio);
2251*c6fd2807SJeff Garzik 	hpriv->ops->enable_leds(hpriv, mmio);
2252*c6fd2807SJeff Garzik 
2253*c6fd2807SJeff Garzik 	for (port = 0; port < probe_ent->n_ports; port++) {
2254*c6fd2807SJeff Garzik 		if (IS_60XX(hpriv)) {
2255*c6fd2807SJeff Garzik 			void __iomem *port_mmio = mv_port_base(mmio, port);
2256*c6fd2807SJeff Garzik 
2257*c6fd2807SJeff Garzik 			u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2258*c6fd2807SJeff Garzik 			ifctl |= (1 << 7);		/* enable gen2i speed */
2259*c6fd2807SJeff Garzik 			ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2260*c6fd2807SJeff Garzik 			writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2261*c6fd2807SJeff Garzik 		}
2262*c6fd2807SJeff Garzik 
2263*c6fd2807SJeff Garzik 		hpriv->ops->phy_errata(hpriv, mmio, port);
2264*c6fd2807SJeff Garzik 	}
2265*c6fd2807SJeff Garzik 
2266*c6fd2807SJeff Garzik 	for (port = 0; port < probe_ent->n_ports; port++) {
2267*c6fd2807SJeff Garzik 		void __iomem *port_mmio = mv_port_base(mmio, port);
2268*c6fd2807SJeff Garzik 		mv_port_init(&probe_ent->port[port], port_mmio);
2269*c6fd2807SJeff Garzik 	}
2270*c6fd2807SJeff Garzik 
2271*c6fd2807SJeff Garzik 	for (hc = 0; hc < n_hc; hc++) {
2272*c6fd2807SJeff Garzik 		void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2273*c6fd2807SJeff Garzik 
2274*c6fd2807SJeff Garzik 		VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2275*c6fd2807SJeff Garzik 			"(before clear)=0x%08x\n", hc,
2276*c6fd2807SJeff Garzik 			readl(hc_mmio + HC_CFG_OFS),
2277*c6fd2807SJeff Garzik 			readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2278*c6fd2807SJeff Garzik 
2279*c6fd2807SJeff Garzik 		/* Clear any currently outstanding hc interrupt conditions */
2280*c6fd2807SJeff Garzik 		writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2281*c6fd2807SJeff Garzik 	}
2282*c6fd2807SJeff Garzik 
2283*c6fd2807SJeff Garzik 	/* Clear any currently outstanding host interrupt conditions */
2284*c6fd2807SJeff Garzik 	writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2285*c6fd2807SJeff Garzik 
2286*c6fd2807SJeff Garzik 	/* and unmask interrupt generation for host regs */
2287*c6fd2807SJeff Garzik 	writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
2288*c6fd2807SJeff Garzik 	writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2289*c6fd2807SJeff Garzik 
2290*c6fd2807SJeff Garzik 	VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2291*c6fd2807SJeff Garzik 		"PCI int cause/mask=0x%08x/0x%08x\n",
2292*c6fd2807SJeff Garzik 		readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2293*c6fd2807SJeff Garzik 		readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2294*c6fd2807SJeff Garzik 		readl(mmio + PCI_IRQ_CAUSE_OFS),
2295*c6fd2807SJeff Garzik 		readl(mmio + PCI_IRQ_MASK_OFS));
2296*c6fd2807SJeff Garzik 
2297*c6fd2807SJeff Garzik done:
2298*c6fd2807SJeff Garzik 	return rc;
2299*c6fd2807SJeff Garzik }
2300*c6fd2807SJeff Garzik 
2301*c6fd2807SJeff Garzik /**
2302*c6fd2807SJeff Garzik  *      mv_print_info - Dump key info to kernel log for perusal.
2303*c6fd2807SJeff Garzik  *      @probe_ent: early data struct representing the host
2304*c6fd2807SJeff Garzik  *
2305*c6fd2807SJeff Garzik  *      FIXME: complete this.
2306*c6fd2807SJeff Garzik  *
2307*c6fd2807SJeff Garzik  *      LOCKING:
2308*c6fd2807SJeff Garzik  *      Inherited from caller.
2309*c6fd2807SJeff Garzik  */
2310*c6fd2807SJeff Garzik static void mv_print_info(struct ata_probe_ent *probe_ent)
2311*c6fd2807SJeff Garzik {
2312*c6fd2807SJeff Garzik 	struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
2313*c6fd2807SJeff Garzik 	struct mv_host_priv *hpriv = probe_ent->private_data;
2314*c6fd2807SJeff Garzik 	u8 rev_id, scc;
2315*c6fd2807SJeff Garzik 	const char *scc_s;
2316*c6fd2807SJeff Garzik 
2317*c6fd2807SJeff Garzik 	/* Use this to determine the HW stepping of the chip so we know
2318*c6fd2807SJeff Garzik 	 * what errata to workaround
2319*c6fd2807SJeff Garzik 	 */
2320*c6fd2807SJeff Garzik 	pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2321*c6fd2807SJeff Garzik 
2322*c6fd2807SJeff Garzik 	pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2323*c6fd2807SJeff Garzik 	if (scc == 0)
2324*c6fd2807SJeff Garzik 		scc_s = "SCSI";
2325*c6fd2807SJeff Garzik 	else if (scc == 0x01)
2326*c6fd2807SJeff Garzik 		scc_s = "RAID";
2327*c6fd2807SJeff Garzik 	else
2328*c6fd2807SJeff Garzik 		scc_s = "unknown";
2329*c6fd2807SJeff Garzik 
2330*c6fd2807SJeff Garzik 	dev_printk(KERN_INFO, &pdev->dev,
2331*c6fd2807SJeff Garzik 	       "%u slots %u ports %s mode IRQ via %s\n",
2332*c6fd2807SJeff Garzik 	       (unsigned)MV_MAX_Q_DEPTH, probe_ent->n_ports,
2333*c6fd2807SJeff Garzik 	       scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2334*c6fd2807SJeff Garzik }
2335*c6fd2807SJeff Garzik 
2336*c6fd2807SJeff Garzik /**
2337*c6fd2807SJeff Garzik  *      mv_init_one - handle a positive probe of a Marvell host
2338*c6fd2807SJeff Garzik  *      @pdev: PCI device found
2339*c6fd2807SJeff Garzik  *      @ent: PCI device ID entry for the matched host
2340*c6fd2807SJeff Garzik  *
2341*c6fd2807SJeff Garzik  *      LOCKING:
2342*c6fd2807SJeff Garzik  *      Inherited from caller.
2343*c6fd2807SJeff Garzik  */
2344*c6fd2807SJeff Garzik static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2345*c6fd2807SJeff Garzik {
2346*c6fd2807SJeff Garzik 	static int printed_version = 0;
2347*c6fd2807SJeff Garzik 	struct ata_probe_ent *probe_ent = NULL;
2348*c6fd2807SJeff Garzik 	struct mv_host_priv *hpriv;
2349*c6fd2807SJeff Garzik 	unsigned int board_idx = (unsigned int)ent->driver_data;
2350*c6fd2807SJeff Garzik 	void __iomem *mmio_base;
2351*c6fd2807SJeff Garzik 	int pci_dev_busy = 0, rc;
2352*c6fd2807SJeff Garzik 
2353*c6fd2807SJeff Garzik 	if (!printed_version++)
2354*c6fd2807SJeff Garzik 		dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2355*c6fd2807SJeff Garzik 
2356*c6fd2807SJeff Garzik 	rc = pci_enable_device(pdev);
2357*c6fd2807SJeff Garzik 	if (rc) {
2358*c6fd2807SJeff Garzik 		return rc;
2359*c6fd2807SJeff Garzik 	}
2360*c6fd2807SJeff Garzik 	pci_set_master(pdev);
2361*c6fd2807SJeff Garzik 
2362*c6fd2807SJeff Garzik 	rc = pci_request_regions(pdev, DRV_NAME);
2363*c6fd2807SJeff Garzik 	if (rc) {
2364*c6fd2807SJeff Garzik 		pci_dev_busy = 1;
2365*c6fd2807SJeff Garzik 		goto err_out;
2366*c6fd2807SJeff Garzik 	}
2367*c6fd2807SJeff Garzik 
2368*c6fd2807SJeff Garzik 	probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
2369*c6fd2807SJeff Garzik 	if (probe_ent == NULL) {
2370*c6fd2807SJeff Garzik 		rc = -ENOMEM;
2371*c6fd2807SJeff Garzik 		goto err_out_regions;
2372*c6fd2807SJeff Garzik 	}
2373*c6fd2807SJeff Garzik 
2374*c6fd2807SJeff Garzik 	memset(probe_ent, 0, sizeof(*probe_ent));
2375*c6fd2807SJeff Garzik 	probe_ent->dev = pci_dev_to_dev(pdev);
2376*c6fd2807SJeff Garzik 	INIT_LIST_HEAD(&probe_ent->node);
2377*c6fd2807SJeff Garzik 
2378*c6fd2807SJeff Garzik 	mmio_base = pci_iomap(pdev, MV_PRIMARY_BAR, 0);
2379*c6fd2807SJeff Garzik 	if (mmio_base == NULL) {
2380*c6fd2807SJeff Garzik 		rc = -ENOMEM;
2381*c6fd2807SJeff Garzik 		goto err_out_free_ent;
2382*c6fd2807SJeff Garzik 	}
2383*c6fd2807SJeff Garzik 
2384*c6fd2807SJeff Garzik 	hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
2385*c6fd2807SJeff Garzik 	if (!hpriv) {
2386*c6fd2807SJeff Garzik 		rc = -ENOMEM;
2387*c6fd2807SJeff Garzik 		goto err_out_iounmap;
2388*c6fd2807SJeff Garzik 	}
2389*c6fd2807SJeff Garzik 	memset(hpriv, 0, sizeof(*hpriv));
2390*c6fd2807SJeff Garzik 
2391*c6fd2807SJeff Garzik 	probe_ent->sht = mv_port_info[board_idx].sht;
2392*c6fd2807SJeff Garzik 	probe_ent->host_flags = mv_port_info[board_idx].host_flags;
2393*c6fd2807SJeff Garzik 	probe_ent->pio_mask = mv_port_info[board_idx].pio_mask;
2394*c6fd2807SJeff Garzik 	probe_ent->udma_mask = mv_port_info[board_idx].udma_mask;
2395*c6fd2807SJeff Garzik 	probe_ent->port_ops = mv_port_info[board_idx].port_ops;
2396*c6fd2807SJeff Garzik 
2397*c6fd2807SJeff Garzik 	probe_ent->irq = pdev->irq;
2398*c6fd2807SJeff Garzik 	probe_ent->irq_flags = IRQF_SHARED;
2399*c6fd2807SJeff Garzik 	probe_ent->mmio_base = mmio_base;
2400*c6fd2807SJeff Garzik 	probe_ent->private_data = hpriv;
2401*c6fd2807SJeff Garzik 
2402*c6fd2807SJeff Garzik 	/* initialize adapter */
2403*c6fd2807SJeff Garzik 	rc = mv_init_host(pdev, probe_ent, board_idx);
2404*c6fd2807SJeff Garzik 	if (rc) {
2405*c6fd2807SJeff Garzik 		goto err_out_hpriv;
2406*c6fd2807SJeff Garzik 	}
2407*c6fd2807SJeff Garzik 
2408*c6fd2807SJeff Garzik 	/* Enable interrupts */
2409*c6fd2807SJeff Garzik 	if (msi && pci_enable_msi(pdev) == 0) {
2410*c6fd2807SJeff Garzik 		hpriv->hp_flags |= MV_HP_FLAG_MSI;
2411*c6fd2807SJeff Garzik 	} else {
2412*c6fd2807SJeff Garzik 		pci_intx(pdev, 1);
2413*c6fd2807SJeff Garzik 	}
2414*c6fd2807SJeff Garzik 
2415*c6fd2807SJeff Garzik 	mv_dump_pci_cfg(pdev, 0x68);
2416*c6fd2807SJeff Garzik 	mv_print_info(probe_ent);
2417*c6fd2807SJeff Garzik 
2418*c6fd2807SJeff Garzik 	if (ata_device_add(probe_ent) == 0) {
2419*c6fd2807SJeff Garzik 		rc = -ENODEV;		/* No devices discovered */
2420*c6fd2807SJeff Garzik 		goto err_out_dev_add;
2421*c6fd2807SJeff Garzik 	}
2422*c6fd2807SJeff Garzik 
2423*c6fd2807SJeff Garzik 	kfree(probe_ent);
2424*c6fd2807SJeff Garzik 	return 0;
2425*c6fd2807SJeff Garzik 
2426*c6fd2807SJeff Garzik err_out_dev_add:
2427*c6fd2807SJeff Garzik 	if (MV_HP_FLAG_MSI & hpriv->hp_flags) {
2428*c6fd2807SJeff Garzik 		pci_disable_msi(pdev);
2429*c6fd2807SJeff Garzik 	} else {
2430*c6fd2807SJeff Garzik 		pci_intx(pdev, 0);
2431*c6fd2807SJeff Garzik 	}
2432*c6fd2807SJeff Garzik err_out_hpriv:
2433*c6fd2807SJeff Garzik 	kfree(hpriv);
2434*c6fd2807SJeff Garzik err_out_iounmap:
2435*c6fd2807SJeff Garzik 	pci_iounmap(pdev, mmio_base);
2436*c6fd2807SJeff Garzik err_out_free_ent:
2437*c6fd2807SJeff Garzik 	kfree(probe_ent);
2438*c6fd2807SJeff Garzik err_out_regions:
2439*c6fd2807SJeff Garzik 	pci_release_regions(pdev);
2440*c6fd2807SJeff Garzik err_out:
2441*c6fd2807SJeff Garzik 	if (!pci_dev_busy) {
2442*c6fd2807SJeff Garzik 		pci_disable_device(pdev);
2443*c6fd2807SJeff Garzik 	}
2444*c6fd2807SJeff Garzik 
2445*c6fd2807SJeff Garzik 	return rc;
2446*c6fd2807SJeff Garzik }
2447*c6fd2807SJeff Garzik 
2448*c6fd2807SJeff Garzik static int __init mv_init(void)
2449*c6fd2807SJeff Garzik {
2450*c6fd2807SJeff Garzik 	return pci_register_driver(&mv_pci_driver);
2451*c6fd2807SJeff Garzik }
2452*c6fd2807SJeff Garzik 
2453*c6fd2807SJeff Garzik static void __exit mv_exit(void)
2454*c6fd2807SJeff Garzik {
2455*c6fd2807SJeff Garzik 	pci_unregister_driver(&mv_pci_driver);
2456*c6fd2807SJeff Garzik }
2457*c6fd2807SJeff Garzik 
2458*c6fd2807SJeff Garzik MODULE_AUTHOR("Brett Russ");
2459*c6fd2807SJeff Garzik MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2460*c6fd2807SJeff Garzik MODULE_LICENSE("GPL");
2461*c6fd2807SJeff Garzik MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2462*c6fd2807SJeff Garzik MODULE_VERSION(DRV_VERSION);
2463*c6fd2807SJeff Garzik 
2464*c6fd2807SJeff Garzik module_param(msi, int, 0444);
2465*c6fd2807SJeff Garzik MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2466*c6fd2807SJeff Garzik 
2467*c6fd2807SJeff Garzik module_init(mv_init);
2468*c6fd2807SJeff Garzik module_exit(mv_exit);
2469