xref: /openbmc/linux/drivers/ata/sata_mv.c (revision a1e58bbd)
1 /*
2  * sata_mv.c - Marvell SATA support
3  *
4  * Copyright 2005: EMC Corporation, all rights reserved.
5  * Copyright 2005 Red Hat, Inc.  All rights reserved.
6  *
7  * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; version 2 of the License.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23 
24 /*
25   sata_mv TODO list:
26 
27   1) Needs a full errata audit for all chipsets.  I implemented most
28   of the errata workarounds found in the Marvell vendor driver, but
29   I distinctly remember a couple workarounds (one related to PCI-X)
30   are still needed.
31 
32   2) Improve/fix IRQ and error handling sequences.
33 
34   3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
35 
36   4) Think about TCQ support here, and for libata in general
37   with controllers that suppport it via host-queuing hardware
38   (a software-only implementation could be a nightmare).
39 
40   5) Investigate problems with PCI Message Signalled Interrupts (MSI).
41 
42   6) Add port multiplier support (intermediate)
43 
44   8) Develop a low-power-consumption strategy, and implement it.
45 
46   9) [Experiment, low priority] See if ATAPI can be supported using
47   "unknown FIS" or "vendor-specific FIS" support, or something creative
48   like that.
49 
50   10) [Experiment, low priority] Investigate interrupt coalescing.
51   Quite often, especially with PCI Message Signalled Interrupts (MSI),
52   the overhead reduced by interrupt mitigation is quite often not
53   worth the latency cost.
54 
55   11) [Experiment, Marvell value added] Is it possible to use target
56   mode to cross-connect two Linux boxes with Marvell cards?  If so,
57   creating LibATA target mode support would be very interesting.
58 
59   Target mode, for those without docs, is the ability to directly
60   connect two SATA controllers.
61 
62 */
63 
64 
65 #include <linux/kernel.h>
66 #include <linux/module.h>
67 #include <linux/pci.h>
68 #include <linux/init.h>
69 #include <linux/blkdev.h>
70 #include <linux/delay.h>
71 #include <linux/interrupt.h>
72 #include <linux/dmapool.h>
73 #include <linux/dma-mapping.h>
74 #include <linux/device.h>
75 #include <linux/platform_device.h>
76 #include <linux/ata_platform.h>
77 #include <scsi/scsi_host.h>
78 #include <scsi/scsi_cmnd.h>
79 #include <scsi/scsi_device.h>
80 #include <linux/libata.h>
81 
82 #define DRV_NAME	"sata_mv"
83 #define DRV_VERSION	"1.20"
84 
85 enum {
86 	/* BAR's are enumerated in terms of pci_resource_start() terms */
87 	MV_PRIMARY_BAR		= 0,	/* offset 0x10: memory space */
88 	MV_IO_BAR		= 2,	/* offset 0x18: IO space */
89 	MV_MISC_BAR		= 3,	/* offset 0x1c: FLASH, NVRAM, SRAM */
90 
91 	MV_MAJOR_REG_AREA_SZ	= 0x10000,	/* 64KB */
92 	MV_MINOR_REG_AREA_SZ	= 0x2000,	/* 8KB */
93 
94 	MV_PCI_REG_BASE		= 0,
95 	MV_IRQ_COAL_REG_BASE	= 0x18000,	/* 6xxx part only */
96 	MV_IRQ_COAL_CAUSE		= (MV_IRQ_COAL_REG_BASE + 0x08),
97 	MV_IRQ_COAL_CAUSE_LO		= (MV_IRQ_COAL_REG_BASE + 0x88),
98 	MV_IRQ_COAL_CAUSE_HI		= (MV_IRQ_COAL_REG_BASE + 0x8c),
99 	MV_IRQ_COAL_THRESHOLD		= (MV_IRQ_COAL_REG_BASE + 0xcc),
100 	MV_IRQ_COAL_TIME_THRESHOLD	= (MV_IRQ_COAL_REG_BASE + 0xd0),
101 
102 	MV_SATAHC0_REG_BASE	= 0x20000,
103 	MV_FLASH_CTL		= 0x1046c,
104 	MV_GPIO_PORT_CTL	= 0x104f0,
105 	MV_RESET_CFG		= 0x180d8,
106 
107 	MV_PCI_REG_SZ		= MV_MAJOR_REG_AREA_SZ,
108 	MV_SATAHC_REG_SZ	= MV_MAJOR_REG_AREA_SZ,
109 	MV_SATAHC_ARBTR_REG_SZ	= MV_MINOR_REG_AREA_SZ,		/* arbiter */
110 	MV_PORT_REG_SZ		= MV_MINOR_REG_AREA_SZ,
111 
112 	MV_MAX_Q_DEPTH		= 32,
113 	MV_MAX_Q_DEPTH_MASK	= MV_MAX_Q_DEPTH - 1,
114 
115 	/* CRQB needs alignment on a 1KB boundary. Size == 1KB
116 	 * CRPB needs alignment on a 256B boundary. Size == 256B
117 	 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
118 	 */
119 	MV_CRQB_Q_SZ		= (32 * MV_MAX_Q_DEPTH),
120 	MV_CRPB_Q_SZ		= (8 * MV_MAX_Q_DEPTH),
121 	MV_MAX_SG_CT		= 256,
122 	MV_SG_TBL_SZ		= (16 * MV_MAX_SG_CT),
123 
124 	MV_PORTS_PER_HC		= 4,
125 	/* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
126 	MV_PORT_HC_SHIFT	= 2,
127 	/* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
128 	MV_PORT_MASK		= 3,
129 
130 	/* Host Flags */
131 	MV_FLAG_DUAL_HC		= (1 << 30),  /* two SATA Host Controllers */
132 	MV_FLAG_IRQ_COALESCE	= (1 << 29),  /* IRQ coalescing capability */
133 	/* SoC integrated controllers, no PCI interface */
134 	MV_FLAG_SOC = (1 << 28),
135 
136 	MV_COMMON_FLAGS		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
137 				  ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
138 				  ATA_FLAG_PIO_POLLING,
139 	MV_6XXX_FLAGS		= MV_FLAG_IRQ_COALESCE,
140 
141 	CRQB_FLAG_READ		= (1 << 0),
142 	CRQB_TAG_SHIFT		= 1,
143 	CRQB_IOID_SHIFT		= 6,	/* CRQB Gen-II/IIE IO Id shift */
144 	CRQB_HOSTQ_SHIFT	= 17,	/* CRQB Gen-II/IIE HostQueTag shift */
145 	CRQB_CMD_ADDR_SHIFT	= 8,
146 	CRQB_CMD_CS		= (0x2 << 11),
147 	CRQB_CMD_LAST		= (1 << 15),
148 
149 	CRPB_FLAG_STATUS_SHIFT	= 8,
150 	CRPB_IOID_SHIFT_6	= 5,	/* CRPB Gen-II IO Id shift */
151 	CRPB_IOID_SHIFT_7	= 7,	/* CRPB Gen-IIE IO Id shift */
152 
153 	EPRD_FLAG_END_OF_TBL	= (1 << 31),
154 
155 	/* PCI interface registers */
156 
157 	PCI_COMMAND_OFS		= 0xc00,
158 
159 	PCI_MAIN_CMD_STS_OFS	= 0xd30,
160 	STOP_PCI_MASTER		= (1 << 2),
161 	PCI_MASTER_EMPTY	= (1 << 3),
162 	GLOB_SFT_RST		= (1 << 4),
163 
164 	MV_PCI_MODE		= 0xd00,
165 	MV_PCI_EXP_ROM_BAR_CTL	= 0xd2c,
166 	MV_PCI_DISC_TIMER	= 0xd04,
167 	MV_PCI_MSI_TRIGGER	= 0xc38,
168 	MV_PCI_SERR_MASK	= 0xc28,
169 	MV_PCI_XBAR_TMOUT	= 0x1d04,
170 	MV_PCI_ERR_LOW_ADDRESS	= 0x1d40,
171 	MV_PCI_ERR_HIGH_ADDRESS	= 0x1d44,
172 	MV_PCI_ERR_ATTRIBUTE	= 0x1d48,
173 	MV_PCI_ERR_COMMAND	= 0x1d50,
174 
175 	PCI_IRQ_CAUSE_OFS	= 0x1d58,
176 	PCI_IRQ_MASK_OFS	= 0x1d5c,
177 	PCI_UNMASK_ALL_IRQS	= 0x7fffff,	/* bits 22-0 */
178 
179 	PCIE_IRQ_CAUSE_OFS	= 0x1900,
180 	PCIE_IRQ_MASK_OFS	= 0x1910,
181 	PCIE_UNMASK_ALL_IRQS	= 0x40a,	/* assorted bits */
182 
183 	HC_MAIN_IRQ_CAUSE_OFS	= 0x1d60,
184 	HC_MAIN_IRQ_MASK_OFS	= 0x1d64,
185 	HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
186 	HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
187 	PORT0_ERR		= (1 << 0),	/* shift by port # */
188 	PORT0_DONE		= (1 << 1),	/* shift by port # */
189 	HC0_IRQ_PEND		= 0x1ff,	/* bits 0-8 = HC0's ports */
190 	HC_SHIFT		= 9,		/* bits 9-17 = HC1's ports */
191 	PCI_ERR			= (1 << 18),
192 	TRAN_LO_DONE		= (1 << 19),	/* 6xxx: IRQ coalescing */
193 	TRAN_HI_DONE		= (1 << 20),	/* 6xxx: IRQ coalescing */
194 	PORTS_0_3_COAL_DONE	= (1 << 8),
195 	PORTS_4_7_COAL_DONE	= (1 << 17),
196 	PORTS_0_7_COAL_DONE	= (1 << 21),	/* 6xxx: IRQ coalescing */
197 	GPIO_INT		= (1 << 22),
198 	SELF_INT		= (1 << 23),
199 	TWSI_INT		= (1 << 24),
200 	HC_MAIN_RSVD		= (0x7f << 25),	/* bits 31-25 */
201 	HC_MAIN_RSVD_5		= (0x1fff << 19), /* bits 31-19 */
202 	HC_MAIN_RSVD_SOC 	= (0x3fffffb << 6),     /* bits 31-9, 7-6 */
203 	HC_MAIN_MASKED_IRQS	= (TRAN_LO_DONE | TRAN_HI_DONE |
204 				   PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
205 				   HC_MAIN_RSVD),
206 	HC_MAIN_MASKED_IRQS_5	= (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
207 				   HC_MAIN_RSVD_5),
208 	HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
209 
210 	/* SATAHC registers */
211 	HC_CFG_OFS		= 0,
212 
213 	HC_IRQ_CAUSE_OFS	= 0x14,
214 	CRPB_DMA_DONE		= (1 << 0),	/* shift by port # */
215 	HC_IRQ_COAL		= (1 << 4),	/* IRQ coalescing */
216 	DEV_IRQ			= (1 << 8),	/* shift by port # */
217 
218 	/* Shadow block registers */
219 	SHD_BLK_OFS		= 0x100,
220 	SHD_CTL_AST_OFS		= 0x20,		/* ofs from SHD_BLK_OFS */
221 
222 	/* SATA registers */
223 	SATA_STATUS_OFS		= 0x300,  /* ctrl, err regs follow status */
224 	SATA_ACTIVE_OFS		= 0x350,
225 	SATA_FIS_IRQ_CAUSE_OFS	= 0x364,
226 	PHY_MODE3		= 0x310,
227 	PHY_MODE4		= 0x314,
228 	PHY_MODE2		= 0x330,
229 	MV5_PHY_MODE		= 0x74,
230 	MV5_LT_MODE		= 0x30,
231 	MV5_PHY_CTL		= 0x0C,
232 	SATA_INTERFACE_CTL	= 0x050,
233 
234 	MV_M2_PREAMP_MASK	= 0x7e0,
235 
236 	/* Port registers */
237 	EDMA_CFG_OFS		= 0,
238 	EDMA_CFG_Q_DEPTH	= 0x1f,		/* max device queue depth */
239 	EDMA_CFG_NCQ		= (1 << 5),	/* for R/W FPDMA queued */
240 	EDMA_CFG_NCQ_GO_ON_ERR	= (1 << 14),	/* continue on error */
241 	EDMA_CFG_RD_BRST_EXT	= (1 << 11),	/* read burst 512B */
242 	EDMA_CFG_WR_BUFF_LEN	= (1 << 13),	/* write buffer 512B */
243 
244 	EDMA_ERR_IRQ_CAUSE_OFS	= 0x8,
245 	EDMA_ERR_IRQ_MASK_OFS	= 0xc,
246 	EDMA_ERR_D_PAR		= (1 << 0),	/* UDMA data parity err */
247 	EDMA_ERR_PRD_PAR	= (1 << 1),	/* UDMA PRD parity err */
248 	EDMA_ERR_DEV		= (1 << 2),	/* device error */
249 	EDMA_ERR_DEV_DCON	= (1 << 3),	/* device disconnect */
250 	EDMA_ERR_DEV_CON	= (1 << 4),	/* device connected */
251 	EDMA_ERR_SERR		= (1 << 5),	/* SError bits [WBDST] raised */
252 	EDMA_ERR_SELF_DIS	= (1 << 7),	/* Gen II/IIE self-disable */
253 	EDMA_ERR_SELF_DIS_5	= (1 << 8),	/* Gen I self-disable */
254 	EDMA_ERR_BIST_ASYNC	= (1 << 8),	/* BIST FIS or Async Notify */
255 	EDMA_ERR_TRANS_IRQ_7	= (1 << 8),	/* Gen IIE transprt layer irq */
256 	EDMA_ERR_CRQB_PAR	= (1 << 9),	/* CRQB parity error */
257 	EDMA_ERR_CRPB_PAR	= (1 << 10),	/* CRPB parity error */
258 	EDMA_ERR_INTRL_PAR	= (1 << 11),	/* internal parity error */
259 	EDMA_ERR_IORDY		= (1 << 12),	/* IORdy timeout */
260 
261 	EDMA_ERR_LNK_CTRL_RX	= (0xf << 13),	/* link ctrl rx error */
262 	EDMA_ERR_LNK_CTRL_RX_0	= (1 << 13),	/* transient: CRC err */
263 	EDMA_ERR_LNK_CTRL_RX_1	= (1 << 14),	/* transient: FIFO err */
264 	EDMA_ERR_LNK_CTRL_RX_2	= (1 << 15),	/* fatal: caught SYNC */
265 	EDMA_ERR_LNK_CTRL_RX_3	= (1 << 16),	/* transient: FIS rx err */
266 
267 	EDMA_ERR_LNK_DATA_RX	= (0xf << 17),	/* link data rx error */
268 
269 	EDMA_ERR_LNK_CTRL_TX	= (0x1f << 21),	/* link ctrl tx error */
270 	EDMA_ERR_LNK_CTRL_TX_0	= (1 << 21),	/* transient: CRC err */
271 	EDMA_ERR_LNK_CTRL_TX_1	= (1 << 22),	/* transient: FIFO err */
272 	EDMA_ERR_LNK_CTRL_TX_2	= (1 << 23),	/* transient: caught SYNC */
273 	EDMA_ERR_LNK_CTRL_TX_3	= (1 << 24),	/* transient: caught DMAT */
274 	EDMA_ERR_LNK_CTRL_TX_4	= (1 << 25),	/* transient: FIS collision */
275 
276 	EDMA_ERR_LNK_DATA_TX	= (0x1f << 26),	/* link data tx error */
277 
278 	EDMA_ERR_TRANS_PROTO	= (1 << 31),	/* transport protocol error */
279 	EDMA_ERR_OVERRUN_5	= (1 << 5),
280 	EDMA_ERR_UNDERRUN_5	= (1 << 6),
281 
282 	EDMA_ERR_IRQ_TRANSIENT  = EDMA_ERR_LNK_CTRL_RX_0 |
283 				  EDMA_ERR_LNK_CTRL_RX_1 |
284 				  EDMA_ERR_LNK_CTRL_RX_3 |
285 				  EDMA_ERR_LNK_CTRL_TX,
286 
287 	EDMA_EH_FREEZE		= EDMA_ERR_D_PAR |
288 				  EDMA_ERR_PRD_PAR |
289 				  EDMA_ERR_DEV_DCON |
290 				  EDMA_ERR_DEV_CON |
291 				  EDMA_ERR_SERR |
292 				  EDMA_ERR_SELF_DIS |
293 				  EDMA_ERR_CRQB_PAR |
294 				  EDMA_ERR_CRPB_PAR |
295 				  EDMA_ERR_INTRL_PAR |
296 				  EDMA_ERR_IORDY |
297 				  EDMA_ERR_LNK_CTRL_RX_2 |
298 				  EDMA_ERR_LNK_DATA_RX |
299 				  EDMA_ERR_LNK_DATA_TX |
300 				  EDMA_ERR_TRANS_PROTO,
301 	EDMA_EH_FREEZE_5	= EDMA_ERR_D_PAR |
302 				  EDMA_ERR_PRD_PAR |
303 				  EDMA_ERR_DEV_DCON |
304 				  EDMA_ERR_DEV_CON |
305 				  EDMA_ERR_OVERRUN_5 |
306 				  EDMA_ERR_UNDERRUN_5 |
307 				  EDMA_ERR_SELF_DIS_5 |
308 				  EDMA_ERR_CRQB_PAR |
309 				  EDMA_ERR_CRPB_PAR |
310 				  EDMA_ERR_INTRL_PAR |
311 				  EDMA_ERR_IORDY,
312 
313 	EDMA_REQ_Q_BASE_HI_OFS	= 0x10,
314 	EDMA_REQ_Q_IN_PTR_OFS	= 0x14,		/* also contains BASE_LO */
315 
316 	EDMA_REQ_Q_OUT_PTR_OFS	= 0x18,
317 	EDMA_REQ_Q_PTR_SHIFT	= 5,
318 
319 	EDMA_RSP_Q_BASE_HI_OFS	= 0x1c,
320 	EDMA_RSP_Q_IN_PTR_OFS	= 0x20,
321 	EDMA_RSP_Q_OUT_PTR_OFS	= 0x24,		/* also contains BASE_LO */
322 	EDMA_RSP_Q_PTR_SHIFT	= 3,
323 
324 	EDMA_CMD_OFS		= 0x28,		/* EDMA command register */
325 	EDMA_EN			= (1 << 0),	/* enable EDMA */
326 	EDMA_DS			= (1 << 1),	/* disable EDMA; self-negated */
327 	ATA_RST			= (1 << 2),	/* reset trans/link/phy */
328 
329 	EDMA_IORDY_TMOUT	= 0x34,
330 	EDMA_ARB_CFG		= 0x38,
331 
332 	/* Host private flags (hp_flags) */
333 	MV_HP_FLAG_MSI		= (1 << 0),
334 	MV_HP_ERRATA_50XXB0	= (1 << 1),
335 	MV_HP_ERRATA_50XXB2	= (1 << 2),
336 	MV_HP_ERRATA_60X1B2	= (1 << 3),
337 	MV_HP_ERRATA_60X1C0	= (1 << 4),
338 	MV_HP_ERRATA_XX42A0	= (1 << 5),
339 	MV_HP_GEN_I		= (1 << 6),	/* Generation I: 50xx */
340 	MV_HP_GEN_II		= (1 << 7),	/* Generation II: 60xx */
341 	MV_HP_GEN_IIE		= (1 << 8),	/* Generation IIE: 6042/7042 */
342 	MV_HP_PCIE		= (1 << 9),	/* PCIe bus/regs: 7042 */
343 
344 	/* Port private flags (pp_flags) */
345 	MV_PP_FLAG_EDMA_EN	= (1 << 0),	/* is EDMA engine enabled? */
346 	MV_PP_FLAG_NCQ_EN	= (1 << 1),	/* is EDMA set up for NCQ? */
347 	MV_PP_FLAG_HAD_A_RESET	= (1 << 2),	/* 1st hard reset complete? */
348 };
349 
350 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
351 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
352 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
353 #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
354 
355 enum {
356 	/* DMA boundary 0xffff is required by the s/g splitting
357 	 * we need on /length/ in mv_fill-sg().
358 	 */
359 	MV_DMA_BOUNDARY		= 0xffffU,
360 
361 	/* mask of register bits containing lower 32 bits
362 	 * of EDMA request queue DMA address
363 	 */
364 	EDMA_REQ_Q_BASE_LO_MASK	= 0xfffffc00U,
365 
366 	/* ditto, for response queue */
367 	EDMA_RSP_Q_BASE_LO_MASK	= 0xffffff00U,
368 };
369 
370 enum chip_type {
371 	chip_504x,
372 	chip_508x,
373 	chip_5080,
374 	chip_604x,
375 	chip_608x,
376 	chip_6042,
377 	chip_7042,
378 	chip_soc,
379 };
380 
381 /* Command ReQuest Block: 32B */
382 struct mv_crqb {
383 	__le32			sg_addr;
384 	__le32			sg_addr_hi;
385 	__le16			ctrl_flags;
386 	__le16			ata_cmd[11];
387 };
388 
389 struct mv_crqb_iie {
390 	__le32			addr;
391 	__le32			addr_hi;
392 	__le32			flags;
393 	__le32			len;
394 	__le32			ata_cmd[4];
395 };
396 
397 /* Command ResPonse Block: 8B */
398 struct mv_crpb {
399 	__le16			id;
400 	__le16			flags;
401 	__le32			tmstmp;
402 };
403 
404 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
405 struct mv_sg {
406 	__le32			addr;
407 	__le32			flags_size;
408 	__le32			addr_hi;
409 	__le32			reserved;
410 };
411 
412 struct mv_port_priv {
413 	struct mv_crqb		*crqb;
414 	dma_addr_t		crqb_dma;
415 	struct mv_crpb		*crpb;
416 	dma_addr_t		crpb_dma;
417 	struct mv_sg		*sg_tbl[MV_MAX_Q_DEPTH];
418 	dma_addr_t		sg_tbl_dma[MV_MAX_Q_DEPTH];
419 
420 	unsigned int		req_idx;
421 	unsigned int		resp_idx;
422 
423 	u32			pp_flags;
424 };
425 
426 struct mv_port_signal {
427 	u32			amps;
428 	u32			pre;
429 };
430 
431 struct mv_host_priv {
432 	u32			hp_flags;
433 	struct mv_port_signal	signal[8];
434 	const struct mv_hw_ops	*ops;
435 	int			n_ports;
436 	void __iomem		*base;
437 	void __iomem		*main_cause_reg_addr;
438 	void __iomem		*main_mask_reg_addr;
439 	u32			irq_cause_ofs;
440 	u32			irq_mask_ofs;
441 	u32			unmask_all_irqs;
442 	/*
443 	 * These consistent DMA memory pools give us guaranteed
444 	 * alignment for hardware-accessed data structures,
445 	 * and less memory waste in accomplishing the alignment.
446 	 */
447 	struct dma_pool		*crqb_pool;
448 	struct dma_pool		*crpb_pool;
449 	struct dma_pool		*sg_tbl_pool;
450 };
451 
452 struct mv_hw_ops {
453 	void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
454 			   unsigned int port);
455 	void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
456 	void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
457 			   void __iomem *mmio);
458 	int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
459 			unsigned int n_hc);
460 	void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
461 	void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
462 };
463 
464 static void mv_irq_clear(struct ata_port *ap);
465 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
466 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
467 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
468 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
469 static int mv_port_start(struct ata_port *ap);
470 static void mv_port_stop(struct ata_port *ap);
471 static void mv_qc_prep(struct ata_queued_cmd *qc);
472 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
473 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
474 static void mv_error_handler(struct ata_port *ap);
475 static void mv_eh_freeze(struct ata_port *ap);
476 static void mv_eh_thaw(struct ata_port *ap);
477 static void mv6_dev_config(struct ata_device *dev);
478 
479 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
480 			   unsigned int port);
481 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
482 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
483 			   void __iomem *mmio);
484 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
485 			unsigned int n_hc);
486 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
487 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
488 
489 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
490 			   unsigned int port);
491 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
492 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
493 			   void __iomem *mmio);
494 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
495 			unsigned int n_hc);
496 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
497 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
498 				      void __iomem *mmio);
499 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
500 				      void __iomem *mmio);
501 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
502 				  void __iomem *mmio, unsigned int n_hc);
503 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
504 				      void __iomem *mmio);
505 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
506 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
507 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
508 			     unsigned int port_no);
509 static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
510 			void __iomem *port_mmio, int want_ncq);
511 static int __mv_stop_dma(struct ata_port *ap);
512 
513 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
514  * because we have to allow room for worst case splitting of
515  * PRDs for 64K boundaries in mv_fill_sg().
516  */
517 static struct scsi_host_template mv5_sht = {
518 	.module			= THIS_MODULE,
519 	.name			= DRV_NAME,
520 	.ioctl			= ata_scsi_ioctl,
521 	.queuecommand		= ata_scsi_queuecmd,
522 	.can_queue		= ATA_DEF_QUEUE,
523 	.this_id		= ATA_SHT_THIS_ID,
524 	.sg_tablesize		= MV_MAX_SG_CT / 2,
525 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
526 	.emulated		= ATA_SHT_EMULATED,
527 	.use_clustering		= 1,
528 	.proc_name		= DRV_NAME,
529 	.dma_boundary		= MV_DMA_BOUNDARY,
530 	.slave_configure	= ata_scsi_slave_config,
531 	.slave_destroy		= ata_scsi_slave_destroy,
532 	.bios_param		= ata_std_bios_param,
533 };
534 
535 static struct scsi_host_template mv6_sht = {
536 	.module			= THIS_MODULE,
537 	.name			= DRV_NAME,
538 	.ioctl			= ata_scsi_ioctl,
539 	.queuecommand		= ata_scsi_queuecmd,
540 	.change_queue_depth	= ata_scsi_change_queue_depth,
541 	.can_queue		= MV_MAX_Q_DEPTH - 1,
542 	.this_id		= ATA_SHT_THIS_ID,
543 	.sg_tablesize		= MV_MAX_SG_CT / 2,
544 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
545 	.emulated		= ATA_SHT_EMULATED,
546 	.use_clustering		= 1,
547 	.proc_name		= DRV_NAME,
548 	.dma_boundary		= MV_DMA_BOUNDARY,
549 	.slave_configure	= ata_scsi_slave_config,
550 	.slave_destroy		= ata_scsi_slave_destroy,
551 	.bios_param		= ata_std_bios_param,
552 };
553 
554 static const struct ata_port_operations mv5_ops = {
555 	.tf_load		= ata_tf_load,
556 	.tf_read		= ata_tf_read,
557 	.check_status		= ata_check_status,
558 	.exec_command		= ata_exec_command,
559 	.dev_select		= ata_std_dev_select,
560 
561 	.cable_detect		= ata_cable_sata,
562 
563 	.qc_prep		= mv_qc_prep,
564 	.qc_issue		= mv_qc_issue,
565 	.data_xfer		= ata_data_xfer,
566 
567 	.irq_clear		= mv_irq_clear,
568 	.irq_on			= ata_irq_on,
569 
570 	.error_handler		= mv_error_handler,
571 	.freeze			= mv_eh_freeze,
572 	.thaw			= mv_eh_thaw,
573 
574 	.scr_read		= mv5_scr_read,
575 	.scr_write		= mv5_scr_write,
576 
577 	.port_start		= mv_port_start,
578 	.port_stop		= mv_port_stop,
579 };
580 
581 static const struct ata_port_operations mv6_ops = {
582 	.dev_config             = mv6_dev_config,
583 	.tf_load		= ata_tf_load,
584 	.tf_read		= ata_tf_read,
585 	.check_status		= ata_check_status,
586 	.exec_command		= ata_exec_command,
587 	.dev_select		= ata_std_dev_select,
588 
589 	.cable_detect		= ata_cable_sata,
590 
591 	.qc_prep		= mv_qc_prep,
592 	.qc_issue		= mv_qc_issue,
593 	.data_xfer		= ata_data_xfer,
594 
595 	.irq_clear		= mv_irq_clear,
596 	.irq_on			= ata_irq_on,
597 
598 	.error_handler		= mv_error_handler,
599 	.freeze			= mv_eh_freeze,
600 	.thaw			= mv_eh_thaw,
601 	.qc_defer		= ata_std_qc_defer,
602 
603 	.scr_read		= mv_scr_read,
604 	.scr_write		= mv_scr_write,
605 
606 	.port_start		= mv_port_start,
607 	.port_stop		= mv_port_stop,
608 };
609 
610 static const struct ata_port_operations mv_iie_ops = {
611 	.tf_load		= ata_tf_load,
612 	.tf_read		= ata_tf_read,
613 	.check_status		= ata_check_status,
614 	.exec_command		= ata_exec_command,
615 	.dev_select		= ata_std_dev_select,
616 
617 	.cable_detect		= ata_cable_sata,
618 
619 	.qc_prep		= mv_qc_prep_iie,
620 	.qc_issue		= mv_qc_issue,
621 	.data_xfer		= ata_data_xfer,
622 
623 	.irq_clear		= mv_irq_clear,
624 	.irq_on			= ata_irq_on,
625 
626 	.error_handler		= mv_error_handler,
627 	.freeze			= mv_eh_freeze,
628 	.thaw			= mv_eh_thaw,
629 	.qc_defer		= ata_std_qc_defer,
630 
631 	.scr_read		= mv_scr_read,
632 	.scr_write		= mv_scr_write,
633 
634 	.port_start		= mv_port_start,
635 	.port_stop		= mv_port_stop,
636 };
637 
638 static const struct ata_port_info mv_port_info[] = {
639 	{  /* chip_504x */
640 		.flags		= MV_COMMON_FLAGS,
641 		.pio_mask	= 0x1f,	/* pio0-4 */
642 		.udma_mask	= ATA_UDMA6,
643 		.port_ops	= &mv5_ops,
644 	},
645 	{  /* chip_508x */
646 		.flags		= MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
647 		.pio_mask	= 0x1f,	/* pio0-4 */
648 		.udma_mask	= ATA_UDMA6,
649 		.port_ops	= &mv5_ops,
650 	},
651 	{  /* chip_5080 */
652 		.flags		= MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
653 		.pio_mask	= 0x1f,	/* pio0-4 */
654 		.udma_mask	= ATA_UDMA6,
655 		.port_ops	= &mv5_ops,
656 	},
657 	{  /* chip_604x */
658 		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS |
659 				  ATA_FLAG_NCQ,
660 		.pio_mask	= 0x1f,	/* pio0-4 */
661 		.udma_mask	= ATA_UDMA6,
662 		.port_ops	= &mv6_ops,
663 	},
664 	{  /* chip_608x */
665 		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS |
666 				  ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
667 		.pio_mask	= 0x1f,	/* pio0-4 */
668 		.udma_mask	= ATA_UDMA6,
669 		.port_ops	= &mv6_ops,
670 	},
671 	{  /* chip_6042 */
672 		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS |
673 				  ATA_FLAG_NCQ,
674 		.pio_mask	= 0x1f,	/* pio0-4 */
675 		.udma_mask	= ATA_UDMA6,
676 		.port_ops	= &mv_iie_ops,
677 	},
678 	{  /* chip_7042 */
679 		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS |
680 				  ATA_FLAG_NCQ,
681 		.pio_mask	= 0x1f,	/* pio0-4 */
682 		.udma_mask	= ATA_UDMA6,
683 		.port_ops	= &mv_iie_ops,
684 	},
685 	{  /* chip_soc */
686 		.flags = MV_COMMON_FLAGS | MV_FLAG_SOC,
687 		.pio_mask = 0x1f,      /* pio0-4 */
688 		.udma_mask = ATA_UDMA6,
689 		.port_ops = &mv_iie_ops,
690 	},
691 };
692 
693 static const struct pci_device_id mv_pci_tbl[] = {
694 	{ PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
695 	{ PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
696 	{ PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
697 	{ PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
698 	/* RocketRAID 1740/174x have different identifiers */
699 	{ PCI_VDEVICE(TTI, 0x1740), chip_508x },
700 	{ PCI_VDEVICE(TTI, 0x1742), chip_508x },
701 
702 	{ PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
703 	{ PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
704 	{ PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
705 	{ PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
706 	{ PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
707 
708 	{ PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
709 
710 	/* Adaptec 1430SA */
711 	{ PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
712 
713 	/* Marvell 7042 support */
714 	{ PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
715 
716 	/* Highpoint RocketRAID PCIe series */
717 	{ PCI_VDEVICE(TTI, 0x2300), chip_7042 },
718 	{ PCI_VDEVICE(TTI, 0x2310), chip_7042 },
719 
720 	{ }			/* terminate list */
721 };
722 
723 static const struct mv_hw_ops mv5xxx_ops = {
724 	.phy_errata		= mv5_phy_errata,
725 	.enable_leds		= mv5_enable_leds,
726 	.read_preamp		= mv5_read_preamp,
727 	.reset_hc		= mv5_reset_hc,
728 	.reset_flash		= mv5_reset_flash,
729 	.reset_bus		= mv5_reset_bus,
730 };
731 
732 static const struct mv_hw_ops mv6xxx_ops = {
733 	.phy_errata		= mv6_phy_errata,
734 	.enable_leds		= mv6_enable_leds,
735 	.read_preamp		= mv6_read_preamp,
736 	.reset_hc		= mv6_reset_hc,
737 	.reset_flash		= mv6_reset_flash,
738 	.reset_bus		= mv_reset_pci_bus,
739 };
740 
741 static const struct mv_hw_ops mv_soc_ops = {
742 	.phy_errata		= mv6_phy_errata,
743 	.enable_leds		= mv_soc_enable_leds,
744 	.read_preamp		= mv_soc_read_preamp,
745 	.reset_hc		= mv_soc_reset_hc,
746 	.reset_flash		= mv_soc_reset_flash,
747 	.reset_bus		= mv_soc_reset_bus,
748 };
749 
750 /*
751  * Functions
752  */
753 
754 static inline void writelfl(unsigned long data, void __iomem *addr)
755 {
756 	writel(data, addr);
757 	(void) readl(addr);	/* flush to avoid PCI posted write */
758 }
759 
760 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
761 {
762 	return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
763 }
764 
765 static inline unsigned int mv_hc_from_port(unsigned int port)
766 {
767 	return port >> MV_PORT_HC_SHIFT;
768 }
769 
770 static inline unsigned int mv_hardport_from_port(unsigned int port)
771 {
772 	return port & MV_PORT_MASK;
773 }
774 
775 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
776 						 unsigned int port)
777 {
778 	return mv_hc_base(base, mv_hc_from_port(port));
779 }
780 
781 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
782 {
783 	return  mv_hc_base_from_port(base, port) +
784 		MV_SATAHC_ARBTR_REG_SZ +
785 		(mv_hardport_from_port(port) * MV_PORT_REG_SZ);
786 }
787 
788 static inline void __iomem *mv_host_base(struct ata_host *host)
789 {
790 	struct mv_host_priv *hpriv = host->private_data;
791 	return hpriv->base;
792 }
793 
794 static inline void __iomem *mv_ap_base(struct ata_port *ap)
795 {
796 	return mv_port_base(mv_host_base(ap->host), ap->port_no);
797 }
798 
799 static inline int mv_get_hc_count(unsigned long port_flags)
800 {
801 	return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
802 }
803 
804 static void mv_irq_clear(struct ata_port *ap)
805 {
806 }
807 
808 static void mv_set_edma_ptrs(void __iomem *port_mmio,
809 			     struct mv_host_priv *hpriv,
810 			     struct mv_port_priv *pp)
811 {
812 	u32 index;
813 
814 	/*
815 	 * initialize request queue
816 	 */
817 	index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
818 
819 	WARN_ON(pp->crqb_dma & 0x3ff);
820 	writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
821 	writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
822 		 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
823 
824 	if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
825 		writelfl((pp->crqb_dma & 0xffffffff) | index,
826 			 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
827 	else
828 		writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
829 
830 	/*
831 	 * initialize response queue
832 	 */
833 	index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
834 
835 	WARN_ON(pp->crpb_dma & 0xff);
836 	writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
837 
838 	if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
839 		writelfl((pp->crpb_dma & 0xffffffff) | index,
840 			 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
841 	else
842 		writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
843 
844 	writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
845 		 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
846 }
847 
848 /**
849  *      mv_start_dma - Enable eDMA engine
850  *      @base: port base address
851  *      @pp: port private data
852  *
853  *      Verify the local cache of the eDMA state is accurate with a
854  *      WARN_ON.
855  *
856  *      LOCKING:
857  *      Inherited from caller.
858  */
859 static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
860 			 struct mv_port_priv *pp, u8 protocol)
861 {
862 	int want_ncq = (protocol == ATA_PROT_NCQ);
863 
864 	if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
865 		int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
866 		if (want_ncq != using_ncq)
867 			__mv_stop_dma(ap);
868 	}
869 	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
870 		struct mv_host_priv *hpriv = ap->host->private_data;
871 		int hard_port = mv_hardport_from_port(ap->port_no);
872 		void __iomem *hc_mmio = mv_hc_base_from_port(
873 					mv_host_base(ap->host), hard_port);
874 		u32 hc_irq_cause, ipending;
875 
876 		/* clear EDMA event indicators, if any */
877 		writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
878 
879 		/* clear EDMA interrupt indicator, if any */
880 		hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
881 		ipending = (DEV_IRQ << hard_port) |
882 				(CRPB_DMA_DONE << hard_port);
883 		if (hc_irq_cause & ipending) {
884 			writelfl(hc_irq_cause & ~ipending,
885 				 hc_mmio + HC_IRQ_CAUSE_OFS);
886 		}
887 
888 		mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
889 
890 		/* clear FIS IRQ Cause */
891 		writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
892 
893 		mv_set_edma_ptrs(port_mmio, hpriv, pp);
894 
895 		writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
896 		pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
897 	}
898 	WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
899 }
900 
901 /**
902  *      __mv_stop_dma - Disable eDMA engine
903  *      @ap: ATA channel to manipulate
904  *
905  *      Verify the local cache of the eDMA state is accurate with a
906  *      WARN_ON.
907  *
908  *      LOCKING:
909  *      Inherited from caller.
910  */
911 static int __mv_stop_dma(struct ata_port *ap)
912 {
913 	void __iomem *port_mmio = mv_ap_base(ap);
914 	struct mv_port_priv *pp	= ap->private_data;
915 	u32 reg;
916 	int i, err = 0;
917 
918 	if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
919 		/* Disable EDMA if active.   The disable bit auto clears.
920 		 */
921 		writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
922 		pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
923 	} else {
924 		WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
925 	}
926 
927 	/* now properly wait for the eDMA to stop */
928 	for (i = 1000; i > 0; i--) {
929 		reg = readl(port_mmio + EDMA_CMD_OFS);
930 		if (!(reg & EDMA_EN))
931 			break;
932 
933 		udelay(100);
934 	}
935 
936 	if (reg & EDMA_EN) {
937 		ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
938 		err = -EIO;
939 	}
940 
941 	return err;
942 }
943 
944 static int mv_stop_dma(struct ata_port *ap)
945 {
946 	unsigned long flags;
947 	int rc;
948 
949 	spin_lock_irqsave(&ap->host->lock, flags);
950 	rc = __mv_stop_dma(ap);
951 	spin_unlock_irqrestore(&ap->host->lock, flags);
952 
953 	return rc;
954 }
955 
956 #ifdef ATA_DEBUG
957 static void mv_dump_mem(void __iomem *start, unsigned bytes)
958 {
959 	int b, w;
960 	for (b = 0; b < bytes; ) {
961 		DPRINTK("%p: ", start + b);
962 		for (w = 0; b < bytes && w < 4; w++) {
963 			printk("%08x ", readl(start + b));
964 			b += sizeof(u32);
965 		}
966 		printk("\n");
967 	}
968 }
969 #endif
970 
971 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
972 {
973 #ifdef ATA_DEBUG
974 	int b, w;
975 	u32 dw;
976 	for (b = 0; b < bytes; ) {
977 		DPRINTK("%02x: ", b);
978 		for (w = 0; b < bytes && w < 4; w++) {
979 			(void) pci_read_config_dword(pdev, b, &dw);
980 			printk("%08x ", dw);
981 			b += sizeof(u32);
982 		}
983 		printk("\n");
984 	}
985 #endif
986 }
987 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
988 			     struct pci_dev *pdev)
989 {
990 #ifdef ATA_DEBUG
991 	void __iomem *hc_base = mv_hc_base(mmio_base,
992 					   port >> MV_PORT_HC_SHIFT);
993 	void __iomem *port_base;
994 	int start_port, num_ports, p, start_hc, num_hcs, hc;
995 
996 	if (0 > port) {
997 		start_hc = start_port = 0;
998 		num_ports = 8;		/* shld be benign for 4 port devs */
999 		num_hcs = 2;
1000 	} else {
1001 		start_hc = port >> MV_PORT_HC_SHIFT;
1002 		start_port = port;
1003 		num_ports = num_hcs = 1;
1004 	}
1005 	DPRINTK("All registers for port(s) %u-%u:\n", start_port,
1006 		num_ports > 1 ? num_ports - 1 : start_port);
1007 
1008 	if (NULL != pdev) {
1009 		DPRINTK("PCI config space regs:\n");
1010 		mv_dump_pci_cfg(pdev, 0x68);
1011 	}
1012 	DPRINTK("PCI regs:\n");
1013 	mv_dump_mem(mmio_base+0xc00, 0x3c);
1014 	mv_dump_mem(mmio_base+0xd00, 0x34);
1015 	mv_dump_mem(mmio_base+0xf00, 0x4);
1016 	mv_dump_mem(mmio_base+0x1d00, 0x6c);
1017 	for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
1018 		hc_base = mv_hc_base(mmio_base, hc);
1019 		DPRINTK("HC regs (HC %i):\n", hc);
1020 		mv_dump_mem(hc_base, 0x1c);
1021 	}
1022 	for (p = start_port; p < start_port + num_ports; p++) {
1023 		port_base = mv_port_base(mmio_base, p);
1024 		DPRINTK("EDMA regs (port %i):\n", p);
1025 		mv_dump_mem(port_base, 0x54);
1026 		DPRINTK("SATA regs (port %i):\n", p);
1027 		mv_dump_mem(port_base+0x300, 0x60);
1028 	}
1029 #endif
1030 }
1031 
1032 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1033 {
1034 	unsigned int ofs;
1035 
1036 	switch (sc_reg_in) {
1037 	case SCR_STATUS:
1038 	case SCR_CONTROL:
1039 	case SCR_ERROR:
1040 		ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1041 		break;
1042 	case SCR_ACTIVE:
1043 		ofs = SATA_ACTIVE_OFS;   /* active is not with the others */
1044 		break;
1045 	default:
1046 		ofs = 0xffffffffU;
1047 		break;
1048 	}
1049 	return ofs;
1050 }
1051 
1052 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1053 {
1054 	unsigned int ofs = mv_scr_offset(sc_reg_in);
1055 
1056 	if (ofs != 0xffffffffU) {
1057 		*val = readl(mv_ap_base(ap) + ofs);
1058 		return 0;
1059 	} else
1060 		return -EINVAL;
1061 }
1062 
1063 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1064 {
1065 	unsigned int ofs = mv_scr_offset(sc_reg_in);
1066 
1067 	if (ofs != 0xffffffffU) {
1068 		writelfl(val, mv_ap_base(ap) + ofs);
1069 		return 0;
1070 	} else
1071 		return -EINVAL;
1072 }
1073 
1074 static void mv6_dev_config(struct ata_device *adev)
1075 {
1076 	/*
1077 	 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1078 	 * See mv_qc_prep() for more info.
1079 	 */
1080 	if (adev->flags & ATA_DFLAG_NCQ)
1081 		if (adev->max_sectors > ATA_MAX_SECTORS)
1082 			adev->max_sectors = ATA_MAX_SECTORS;
1083 }
1084 
1085 static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1086 			void __iomem *port_mmio, int want_ncq)
1087 {
1088 	u32 cfg;
1089 
1090 	/* set up non-NCQ EDMA configuration */
1091 	cfg = EDMA_CFG_Q_DEPTH;		/* always 0x1f for *all* chips */
1092 
1093 	if (IS_GEN_I(hpriv))
1094 		cfg |= (1 << 8);	/* enab config burst size mask */
1095 
1096 	else if (IS_GEN_II(hpriv))
1097 		cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1098 
1099 	else if (IS_GEN_IIE(hpriv)) {
1100 		cfg |= (1 << 23);	/* do not mask PM field in rx'd FIS */
1101 		cfg |= (1 << 22);	/* enab 4-entry host queue cache */
1102 		cfg |= (1 << 18);	/* enab early completion */
1103 		cfg |= (1 << 17);	/* enab cut-through (dis stor&forwrd) */
1104 	}
1105 
1106 	if (want_ncq) {
1107 		cfg |= EDMA_CFG_NCQ;
1108 		pp->pp_flags |=  MV_PP_FLAG_NCQ_EN;
1109 	} else
1110 		pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1111 
1112 	writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1113 }
1114 
1115 static void mv_port_free_dma_mem(struct ata_port *ap)
1116 {
1117 	struct mv_host_priv *hpriv = ap->host->private_data;
1118 	struct mv_port_priv *pp = ap->private_data;
1119 	int tag;
1120 
1121 	if (pp->crqb) {
1122 		dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1123 		pp->crqb = NULL;
1124 	}
1125 	if (pp->crpb) {
1126 		dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1127 		pp->crpb = NULL;
1128 	}
1129 	/*
1130 	 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1131 	 * For later hardware, we have one unique sg_tbl per NCQ tag.
1132 	 */
1133 	for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1134 		if (pp->sg_tbl[tag]) {
1135 			if (tag == 0 || !IS_GEN_I(hpriv))
1136 				dma_pool_free(hpriv->sg_tbl_pool,
1137 					      pp->sg_tbl[tag],
1138 					      pp->sg_tbl_dma[tag]);
1139 			pp->sg_tbl[tag] = NULL;
1140 		}
1141 	}
1142 }
1143 
1144 /**
1145  *      mv_port_start - Port specific init/start routine.
1146  *      @ap: ATA channel to manipulate
1147  *
1148  *      Allocate and point to DMA memory, init port private memory,
1149  *      zero indices.
1150  *
1151  *      LOCKING:
1152  *      Inherited from caller.
1153  */
1154 static int mv_port_start(struct ata_port *ap)
1155 {
1156 	struct device *dev = ap->host->dev;
1157 	struct mv_host_priv *hpriv = ap->host->private_data;
1158 	struct mv_port_priv *pp;
1159 	void __iomem *port_mmio = mv_ap_base(ap);
1160 	unsigned long flags;
1161 	int tag;
1162 
1163 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1164 	if (!pp)
1165 		return -ENOMEM;
1166 	ap->private_data = pp;
1167 
1168 	pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1169 	if (!pp->crqb)
1170 		return -ENOMEM;
1171 	memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1172 
1173 	pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1174 	if (!pp->crpb)
1175 		goto out_port_free_dma_mem;
1176 	memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1177 
1178 	/*
1179 	 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1180 	 * For later hardware, we need one unique sg_tbl per NCQ tag.
1181 	 */
1182 	for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1183 		if (tag == 0 || !IS_GEN_I(hpriv)) {
1184 			pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1185 					      GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1186 			if (!pp->sg_tbl[tag])
1187 				goto out_port_free_dma_mem;
1188 		} else {
1189 			pp->sg_tbl[tag]     = pp->sg_tbl[0];
1190 			pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1191 		}
1192 	}
1193 
1194 	spin_lock_irqsave(&ap->host->lock, flags);
1195 
1196 	mv_edma_cfg(pp, hpriv, port_mmio, 0);
1197 	mv_set_edma_ptrs(port_mmio, hpriv, pp);
1198 
1199 	spin_unlock_irqrestore(&ap->host->lock, flags);
1200 
1201 	/* Don't turn on EDMA here...do it before DMA commands only.  Else
1202 	 * we'll be unable to send non-data, PIO, etc due to restricted access
1203 	 * to shadow regs.
1204 	 */
1205 	return 0;
1206 
1207 out_port_free_dma_mem:
1208 	mv_port_free_dma_mem(ap);
1209 	return -ENOMEM;
1210 }
1211 
1212 /**
1213  *      mv_port_stop - Port specific cleanup/stop routine.
1214  *      @ap: ATA channel to manipulate
1215  *
1216  *      Stop DMA, cleanup port memory.
1217  *
1218  *      LOCKING:
1219  *      This routine uses the host lock to protect the DMA stop.
1220  */
1221 static void mv_port_stop(struct ata_port *ap)
1222 {
1223 	mv_stop_dma(ap);
1224 	mv_port_free_dma_mem(ap);
1225 }
1226 
1227 /**
1228  *      mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1229  *      @qc: queued command whose SG list to source from
1230  *
1231  *      Populate the SG list and mark the last entry.
1232  *
1233  *      LOCKING:
1234  *      Inherited from caller.
1235  */
1236 static void mv_fill_sg(struct ata_queued_cmd *qc)
1237 {
1238 	struct mv_port_priv *pp = qc->ap->private_data;
1239 	struct scatterlist *sg;
1240 	struct mv_sg *mv_sg, *last_sg = NULL;
1241 	unsigned int si;
1242 
1243 	mv_sg = pp->sg_tbl[qc->tag];
1244 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
1245 		dma_addr_t addr = sg_dma_address(sg);
1246 		u32 sg_len = sg_dma_len(sg);
1247 
1248 		while (sg_len) {
1249 			u32 offset = addr & 0xffff;
1250 			u32 len = sg_len;
1251 
1252 			if ((offset + sg_len > 0x10000))
1253 				len = 0x10000 - offset;
1254 
1255 			mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1256 			mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1257 			mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1258 
1259 			sg_len -= len;
1260 			addr += len;
1261 
1262 			last_sg = mv_sg;
1263 			mv_sg++;
1264 		}
1265 	}
1266 
1267 	if (likely(last_sg))
1268 		last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1269 }
1270 
1271 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1272 {
1273 	u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1274 		(last ? CRQB_CMD_LAST : 0);
1275 	*cmdw = cpu_to_le16(tmp);
1276 }
1277 
1278 /**
1279  *      mv_qc_prep - Host specific command preparation.
1280  *      @qc: queued command to prepare
1281  *
1282  *      This routine simply redirects to the general purpose routine
1283  *      if command is not DMA.  Else, it handles prep of the CRQB
1284  *      (command request block), does some sanity checking, and calls
1285  *      the SG load routine.
1286  *
1287  *      LOCKING:
1288  *      Inherited from caller.
1289  */
1290 static void mv_qc_prep(struct ata_queued_cmd *qc)
1291 {
1292 	struct ata_port *ap = qc->ap;
1293 	struct mv_port_priv *pp = ap->private_data;
1294 	__le16 *cw;
1295 	struct ata_taskfile *tf;
1296 	u16 flags = 0;
1297 	unsigned in_index;
1298 
1299 	if ((qc->tf.protocol != ATA_PROT_DMA) &&
1300 	    (qc->tf.protocol != ATA_PROT_NCQ))
1301 		return;
1302 
1303 	/* Fill in command request block
1304 	 */
1305 	if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1306 		flags |= CRQB_FLAG_READ;
1307 	WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1308 	flags |= qc->tag << CRQB_TAG_SHIFT;
1309 
1310 	/* get current queue index from software */
1311 	in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1312 
1313 	pp->crqb[in_index].sg_addr =
1314 		cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1315 	pp->crqb[in_index].sg_addr_hi =
1316 		cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1317 	pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1318 
1319 	cw = &pp->crqb[in_index].ata_cmd[0];
1320 	tf = &qc->tf;
1321 
1322 	/* Sadly, the CRQB cannot accomodate all registers--there are
1323 	 * only 11 bytes...so we must pick and choose required
1324 	 * registers based on the command.  So, we drop feature and
1325 	 * hob_feature for [RW] DMA commands, but they are needed for
1326 	 * NCQ.  NCQ will drop hob_nsect.
1327 	 */
1328 	switch (tf->command) {
1329 	case ATA_CMD_READ:
1330 	case ATA_CMD_READ_EXT:
1331 	case ATA_CMD_WRITE:
1332 	case ATA_CMD_WRITE_EXT:
1333 	case ATA_CMD_WRITE_FUA_EXT:
1334 		mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1335 		break;
1336 	case ATA_CMD_FPDMA_READ:
1337 	case ATA_CMD_FPDMA_WRITE:
1338 		mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1339 		mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1340 		break;
1341 	default:
1342 		/* The only other commands EDMA supports in non-queued and
1343 		 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1344 		 * of which are defined/used by Linux.  If we get here, this
1345 		 * driver needs work.
1346 		 *
1347 		 * FIXME: modify libata to give qc_prep a return value and
1348 		 * return error here.
1349 		 */
1350 		BUG_ON(tf->command);
1351 		break;
1352 	}
1353 	mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1354 	mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1355 	mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1356 	mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1357 	mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1358 	mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1359 	mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1360 	mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1361 	mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1);	/* last */
1362 
1363 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1364 		return;
1365 	mv_fill_sg(qc);
1366 }
1367 
1368 /**
1369  *      mv_qc_prep_iie - Host specific command preparation.
1370  *      @qc: queued command to prepare
1371  *
1372  *      This routine simply redirects to the general purpose routine
1373  *      if command is not DMA.  Else, it handles prep of the CRQB
1374  *      (command request block), does some sanity checking, and calls
1375  *      the SG load routine.
1376  *
1377  *      LOCKING:
1378  *      Inherited from caller.
1379  */
1380 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1381 {
1382 	struct ata_port *ap = qc->ap;
1383 	struct mv_port_priv *pp = ap->private_data;
1384 	struct mv_crqb_iie *crqb;
1385 	struct ata_taskfile *tf;
1386 	unsigned in_index;
1387 	u32 flags = 0;
1388 
1389 	if ((qc->tf.protocol != ATA_PROT_DMA) &&
1390 	    (qc->tf.protocol != ATA_PROT_NCQ))
1391 		return;
1392 
1393 	/* Fill in Gen IIE command request block
1394 	 */
1395 	if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1396 		flags |= CRQB_FLAG_READ;
1397 
1398 	WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1399 	flags |= qc->tag << CRQB_TAG_SHIFT;
1400 	flags |= qc->tag << CRQB_HOSTQ_SHIFT;
1401 
1402 	/* get current queue index from software */
1403 	in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1404 
1405 	crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1406 	crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1407 	crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1408 	crqb->flags = cpu_to_le32(flags);
1409 
1410 	tf = &qc->tf;
1411 	crqb->ata_cmd[0] = cpu_to_le32(
1412 			(tf->command << 16) |
1413 			(tf->feature << 24)
1414 		);
1415 	crqb->ata_cmd[1] = cpu_to_le32(
1416 			(tf->lbal << 0) |
1417 			(tf->lbam << 8) |
1418 			(tf->lbah << 16) |
1419 			(tf->device << 24)
1420 		);
1421 	crqb->ata_cmd[2] = cpu_to_le32(
1422 			(tf->hob_lbal << 0) |
1423 			(tf->hob_lbam << 8) |
1424 			(tf->hob_lbah << 16) |
1425 			(tf->hob_feature << 24)
1426 		);
1427 	crqb->ata_cmd[3] = cpu_to_le32(
1428 			(tf->nsect << 0) |
1429 			(tf->hob_nsect << 8)
1430 		);
1431 
1432 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1433 		return;
1434 	mv_fill_sg(qc);
1435 }
1436 
1437 /**
1438  *      mv_qc_issue - Initiate a command to the host
1439  *      @qc: queued command to start
1440  *
1441  *      This routine simply redirects to the general purpose routine
1442  *      if command is not DMA.  Else, it sanity checks our local
1443  *      caches of the request producer/consumer indices then enables
1444  *      DMA and bumps the request producer index.
1445  *
1446  *      LOCKING:
1447  *      Inherited from caller.
1448  */
1449 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1450 {
1451 	struct ata_port *ap = qc->ap;
1452 	void __iomem *port_mmio = mv_ap_base(ap);
1453 	struct mv_port_priv *pp = ap->private_data;
1454 	u32 in_index;
1455 
1456 	if ((qc->tf.protocol != ATA_PROT_DMA) &&
1457 	    (qc->tf.protocol != ATA_PROT_NCQ)) {
1458 		/* We're about to send a non-EDMA capable command to the
1459 		 * port.  Turn off EDMA so there won't be problems accessing
1460 		 * shadow block, etc registers.
1461 		 */
1462 		__mv_stop_dma(ap);
1463 		return ata_qc_issue_prot(qc);
1464 	}
1465 
1466 	mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
1467 
1468 	pp->req_idx++;
1469 
1470 	in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1471 
1472 	/* and write the request in pointer to kick the EDMA to life */
1473 	writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1474 		 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1475 
1476 	return 0;
1477 }
1478 
1479 /**
1480  *      mv_err_intr - Handle error interrupts on the port
1481  *      @ap: ATA channel to manipulate
1482  *      @reset_allowed: bool: 0 == don't trigger from reset here
1483  *
1484  *      In most cases, just clear the interrupt and move on.  However,
1485  *      some cases require an eDMA reset, which is done right before
1486  *      the COMRESET in mv_phy_reset().  The SERR case requires a
1487  *      clear of pending errors in the SATA SERROR register.  Finally,
1488  *      if the port disabled DMA, update our cached copy to match.
1489  *
1490  *      LOCKING:
1491  *      Inherited from caller.
1492  */
1493 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1494 {
1495 	void __iomem *port_mmio = mv_ap_base(ap);
1496 	u32 edma_err_cause, eh_freeze_mask, serr = 0;
1497 	struct mv_port_priv *pp = ap->private_data;
1498 	struct mv_host_priv *hpriv = ap->host->private_data;
1499 	unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1500 	unsigned int action = 0, err_mask = 0;
1501 	struct ata_eh_info *ehi = &ap->link.eh_info;
1502 
1503 	ata_ehi_clear_desc(ehi);
1504 
1505 	if (!edma_enabled) {
1506 		/* just a guess: do we need to do this? should we
1507 		 * expand this, and do it in all cases?
1508 		 */
1509 		sata_scr_read(&ap->link, SCR_ERROR, &serr);
1510 		sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1511 	}
1512 
1513 	edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1514 
1515 	ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1516 
1517 	/*
1518 	 * all generations share these EDMA error cause bits
1519 	 */
1520 
1521 	if (edma_err_cause & EDMA_ERR_DEV)
1522 		err_mask |= AC_ERR_DEV;
1523 	if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1524 			EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1525 			EDMA_ERR_INTRL_PAR)) {
1526 		err_mask |= AC_ERR_ATA_BUS;
1527 		action |= ATA_EH_HARDRESET;
1528 		ata_ehi_push_desc(ehi, "parity error");
1529 	}
1530 	if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1531 		ata_ehi_hotplugged(ehi);
1532 		ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1533 			"dev disconnect" : "dev connect");
1534 		action |= ATA_EH_HARDRESET;
1535 	}
1536 
1537 	if (IS_GEN_I(hpriv)) {
1538 		eh_freeze_mask = EDMA_EH_FREEZE_5;
1539 
1540 		if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1541 			pp = ap->private_data;
1542 			pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1543 			ata_ehi_push_desc(ehi, "EDMA self-disable");
1544 		}
1545 	} else {
1546 		eh_freeze_mask = EDMA_EH_FREEZE;
1547 
1548 		if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1549 			pp = ap->private_data;
1550 			pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1551 			ata_ehi_push_desc(ehi, "EDMA self-disable");
1552 		}
1553 
1554 		if (edma_err_cause & EDMA_ERR_SERR) {
1555 			sata_scr_read(&ap->link, SCR_ERROR, &serr);
1556 			sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1557 			err_mask = AC_ERR_ATA_BUS;
1558 			action |= ATA_EH_HARDRESET;
1559 		}
1560 	}
1561 
1562 	/* Clear EDMA now that SERR cleanup done */
1563 	writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1564 
1565 	if (!err_mask) {
1566 		err_mask = AC_ERR_OTHER;
1567 		action |= ATA_EH_HARDRESET;
1568 	}
1569 
1570 	ehi->serror |= serr;
1571 	ehi->action |= action;
1572 
1573 	if (qc)
1574 		qc->err_mask |= err_mask;
1575 	else
1576 		ehi->err_mask |= err_mask;
1577 
1578 	if (edma_err_cause & eh_freeze_mask)
1579 		ata_port_freeze(ap);
1580 	else
1581 		ata_port_abort(ap);
1582 }
1583 
1584 static void mv_intr_pio(struct ata_port *ap)
1585 {
1586 	struct ata_queued_cmd *qc;
1587 	u8 ata_status;
1588 
1589 	/* ignore spurious intr if drive still BUSY */
1590 	ata_status = readb(ap->ioaddr.status_addr);
1591 	if (unlikely(ata_status & ATA_BUSY))
1592 		return;
1593 
1594 	/* get active ATA command */
1595 	qc = ata_qc_from_tag(ap, ap->link.active_tag);
1596 	if (unlikely(!qc))			/* no active tag */
1597 		return;
1598 	if (qc->tf.flags & ATA_TFLAG_POLLING)	/* polling; we don't own qc */
1599 		return;
1600 
1601 	/* and finally, complete the ATA command */
1602 	qc->err_mask |= ac_err_mask(ata_status);
1603 	ata_qc_complete(qc);
1604 }
1605 
1606 static void mv_intr_edma(struct ata_port *ap)
1607 {
1608 	void __iomem *port_mmio = mv_ap_base(ap);
1609 	struct mv_host_priv *hpriv = ap->host->private_data;
1610 	struct mv_port_priv *pp = ap->private_data;
1611 	struct ata_queued_cmd *qc;
1612 	u32 out_index, in_index;
1613 	bool work_done = false;
1614 
1615 	/* get h/w response queue pointer */
1616 	in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1617 			>> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1618 
1619 	while (1) {
1620 		u16 status;
1621 		unsigned int tag;
1622 
1623 		/* get s/w response queue last-read pointer, and compare */
1624 		out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1625 		if (in_index == out_index)
1626 			break;
1627 
1628 		/* 50xx: get active ATA command */
1629 		if (IS_GEN_I(hpriv))
1630 			tag = ap->link.active_tag;
1631 
1632 		/* Gen II/IIE: get active ATA command via tag, to enable
1633 		 * support for queueing.  this works transparently for
1634 		 * queued and non-queued modes.
1635 		 */
1636 		else
1637 			tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
1638 
1639 		qc = ata_qc_from_tag(ap, tag);
1640 
1641 		/* For non-NCQ mode, the lower 8 bits of status
1642 		 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1643 		 * which should be zero if all went well.
1644 		 */
1645 		status = le16_to_cpu(pp->crpb[out_index].flags);
1646 		if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1647 			mv_err_intr(ap, qc);
1648 			return;
1649 		}
1650 
1651 		/* and finally, complete the ATA command */
1652 		if (qc) {
1653 			qc->err_mask |=
1654 				ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1655 			ata_qc_complete(qc);
1656 		}
1657 
1658 		/* advance software response queue pointer, to
1659 		 * indicate (after the loop completes) to hardware
1660 		 * that we have consumed a response queue entry.
1661 		 */
1662 		work_done = true;
1663 		pp->resp_idx++;
1664 	}
1665 
1666 	if (work_done)
1667 		writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1668 			 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1669 			 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1670 }
1671 
1672 /**
1673  *      mv_host_intr - Handle all interrupts on the given host controller
1674  *      @host: host specific structure
1675  *      @relevant: port error bits relevant to this host controller
1676  *      @hc: which host controller we're to look at
1677  *
1678  *      Read then write clear the HC interrupt status then walk each
1679  *      port connected to the HC and see if it needs servicing.  Port
1680  *      success ints are reported in the HC interrupt status reg, the
1681  *      port error ints are reported in the higher level main
1682  *      interrupt status register and thus are passed in via the
1683  *      'relevant' argument.
1684  *
1685  *      LOCKING:
1686  *      Inherited from caller.
1687  */
1688 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1689 {
1690 	struct mv_host_priv *hpriv = host->private_data;
1691 	void __iomem *mmio = hpriv->base;
1692 	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1693 	u32 hc_irq_cause;
1694 	int port, port0, last_port;
1695 
1696 	if (hc == 0)
1697 		port0 = 0;
1698 	else
1699 		port0 = MV_PORTS_PER_HC;
1700 
1701 	if (HAS_PCI(host))
1702 		last_port = port0 + MV_PORTS_PER_HC;
1703 	else
1704 		last_port = port0 + hpriv->n_ports;
1705 	/* we'll need the HC success int register in most cases */
1706 	hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1707 	if (!hc_irq_cause)
1708 		return;
1709 
1710 	writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1711 
1712 	VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1713 		hc, relevant, hc_irq_cause);
1714 
1715 	for (port = port0; port < last_port; port++) {
1716 		struct ata_port *ap = host->ports[port];
1717 		struct mv_port_priv *pp;
1718 		int have_err_bits, hard_port, shift;
1719 
1720 		if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1721 			continue;
1722 
1723 		pp = ap->private_data;
1724 
1725 		shift = port << 1;		/* (port * 2) */
1726 		if (port >= MV_PORTS_PER_HC) {
1727 			shift++;	/* skip bit 8 in the HC Main IRQ reg */
1728 		}
1729 		have_err_bits = ((PORT0_ERR << shift) & relevant);
1730 
1731 		if (unlikely(have_err_bits)) {
1732 			struct ata_queued_cmd *qc;
1733 
1734 			qc = ata_qc_from_tag(ap, ap->link.active_tag);
1735 			if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1736 				continue;
1737 
1738 			mv_err_intr(ap, qc);
1739 			continue;
1740 		}
1741 
1742 		hard_port = mv_hardport_from_port(port); /* range 0..3 */
1743 
1744 		if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1745 			if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1746 				mv_intr_edma(ap);
1747 		} else {
1748 			if ((DEV_IRQ << hard_port) & hc_irq_cause)
1749 				mv_intr_pio(ap);
1750 		}
1751 	}
1752 	VPRINTK("EXIT\n");
1753 }
1754 
1755 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1756 {
1757 	struct mv_host_priv *hpriv = host->private_data;
1758 	struct ata_port *ap;
1759 	struct ata_queued_cmd *qc;
1760 	struct ata_eh_info *ehi;
1761 	unsigned int i, err_mask, printed = 0;
1762 	u32 err_cause;
1763 
1764 	err_cause = readl(mmio + hpriv->irq_cause_ofs);
1765 
1766 	dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1767 		   err_cause);
1768 
1769 	DPRINTK("All regs @ PCI error\n");
1770 	mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1771 
1772 	writelfl(0, mmio + hpriv->irq_cause_ofs);
1773 
1774 	for (i = 0; i < host->n_ports; i++) {
1775 		ap = host->ports[i];
1776 		if (!ata_link_offline(&ap->link)) {
1777 			ehi = &ap->link.eh_info;
1778 			ata_ehi_clear_desc(ehi);
1779 			if (!printed++)
1780 				ata_ehi_push_desc(ehi,
1781 					"PCI err cause 0x%08x", err_cause);
1782 			err_mask = AC_ERR_HOST_BUS;
1783 			ehi->action = ATA_EH_HARDRESET;
1784 			qc = ata_qc_from_tag(ap, ap->link.active_tag);
1785 			if (qc)
1786 				qc->err_mask |= err_mask;
1787 			else
1788 				ehi->err_mask |= err_mask;
1789 
1790 			ata_port_freeze(ap);
1791 		}
1792 	}
1793 }
1794 
1795 /**
1796  *      mv_interrupt - Main interrupt event handler
1797  *      @irq: unused
1798  *      @dev_instance: private data; in this case the host structure
1799  *
1800  *      Read the read only register to determine if any host
1801  *      controllers have pending interrupts.  If so, call lower level
1802  *      routine to handle.  Also check for PCI errors which are only
1803  *      reported here.
1804  *
1805  *      LOCKING:
1806  *      This routine holds the host lock while processing pending
1807  *      interrupts.
1808  */
1809 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1810 {
1811 	struct ata_host *host = dev_instance;
1812 	struct mv_host_priv *hpriv = host->private_data;
1813 	unsigned int hc, handled = 0, n_hcs;
1814 	void __iomem *mmio = hpriv->base;
1815 	u32 irq_stat, irq_mask;
1816 
1817 	spin_lock(&host->lock);
1818 
1819 	irq_stat = readl(hpriv->main_cause_reg_addr);
1820 	irq_mask = readl(hpriv->main_mask_reg_addr);
1821 
1822 	/* check the cases where we either have nothing pending or have read
1823 	 * a bogus register value which can indicate HW removal or PCI fault
1824 	 */
1825 	if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1826 		goto out_unlock;
1827 
1828 	n_hcs = mv_get_hc_count(host->ports[0]->flags);
1829 
1830 	if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
1831 		mv_pci_error(host, mmio);
1832 		handled = 1;
1833 		goto out_unlock;	/* skip all other HC irq handling */
1834 	}
1835 
1836 	for (hc = 0; hc < n_hcs; hc++) {
1837 		u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1838 		if (relevant) {
1839 			mv_host_intr(host, relevant, hc);
1840 			handled = 1;
1841 		}
1842 	}
1843 
1844 out_unlock:
1845 	spin_unlock(&host->lock);
1846 
1847 	return IRQ_RETVAL(handled);
1848 }
1849 
1850 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1851 {
1852 	void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1853 	unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1854 
1855 	return hc_mmio + ofs;
1856 }
1857 
1858 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1859 {
1860 	unsigned int ofs;
1861 
1862 	switch (sc_reg_in) {
1863 	case SCR_STATUS:
1864 	case SCR_ERROR:
1865 	case SCR_CONTROL:
1866 		ofs = sc_reg_in * sizeof(u32);
1867 		break;
1868 	default:
1869 		ofs = 0xffffffffU;
1870 		break;
1871 	}
1872 	return ofs;
1873 }
1874 
1875 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1876 {
1877 	struct mv_host_priv *hpriv = ap->host->private_data;
1878 	void __iomem *mmio = hpriv->base;
1879 	void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1880 	unsigned int ofs = mv5_scr_offset(sc_reg_in);
1881 
1882 	if (ofs != 0xffffffffU) {
1883 		*val = readl(addr + ofs);
1884 		return 0;
1885 	} else
1886 		return -EINVAL;
1887 }
1888 
1889 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1890 {
1891 	struct mv_host_priv *hpriv = ap->host->private_data;
1892 	void __iomem *mmio = hpriv->base;
1893 	void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1894 	unsigned int ofs = mv5_scr_offset(sc_reg_in);
1895 
1896 	if (ofs != 0xffffffffU) {
1897 		writelfl(val, addr + ofs);
1898 		return 0;
1899 	} else
1900 		return -EINVAL;
1901 }
1902 
1903 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
1904 {
1905 	struct pci_dev *pdev = to_pci_dev(host->dev);
1906 	int early_5080;
1907 
1908 	early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1909 
1910 	if (!early_5080) {
1911 		u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1912 		tmp |= (1 << 0);
1913 		writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1914 	}
1915 
1916 	mv_reset_pci_bus(host, mmio);
1917 }
1918 
1919 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1920 {
1921 	writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1922 }
1923 
1924 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1925 			   void __iomem *mmio)
1926 {
1927 	void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1928 	u32 tmp;
1929 
1930 	tmp = readl(phy_mmio + MV5_PHY_MODE);
1931 
1932 	hpriv->signal[idx].pre = tmp & 0x1800;	/* bits 12:11 */
1933 	hpriv->signal[idx].amps = tmp & 0xe0;	/* bits 7:5 */
1934 }
1935 
1936 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1937 {
1938 	u32 tmp;
1939 
1940 	writel(0, mmio + MV_GPIO_PORT_CTL);
1941 
1942 	/* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1943 
1944 	tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1945 	tmp |= ~(1 << 0);
1946 	writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1947 }
1948 
1949 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1950 			   unsigned int port)
1951 {
1952 	void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1953 	const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1954 	u32 tmp;
1955 	int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1956 
1957 	if (fix_apm_sq) {
1958 		tmp = readl(phy_mmio + MV5_LT_MODE);
1959 		tmp |= (1 << 19);
1960 		writel(tmp, phy_mmio + MV5_LT_MODE);
1961 
1962 		tmp = readl(phy_mmio + MV5_PHY_CTL);
1963 		tmp &= ~0x3;
1964 		tmp |= 0x1;
1965 		writel(tmp, phy_mmio + MV5_PHY_CTL);
1966 	}
1967 
1968 	tmp = readl(phy_mmio + MV5_PHY_MODE);
1969 	tmp &= ~mask;
1970 	tmp |= hpriv->signal[port].pre;
1971 	tmp |= hpriv->signal[port].amps;
1972 	writel(tmp, phy_mmio + MV5_PHY_MODE);
1973 }
1974 
1975 
1976 #undef ZERO
1977 #define ZERO(reg) writel(0, port_mmio + (reg))
1978 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1979 			     unsigned int port)
1980 {
1981 	void __iomem *port_mmio = mv_port_base(mmio, port);
1982 
1983 	writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1984 
1985 	mv_channel_reset(hpriv, mmio, port);
1986 
1987 	ZERO(0x028);	/* command */
1988 	writel(0x11f, port_mmio + EDMA_CFG_OFS);
1989 	ZERO(0x004);	/* timer */
1990 	ZERO(0x008);	/* irq err cause */
1991 	ZERO(0x00c);	/* irq err mask */
1992 	ZERO(0x010);	/* rq bah */
1993 	ZERO(0x014);	/* rq inp */
1994 	ZERO(0x018);	/* rq outp */
1995 	ZERO(0x01c);	/* respq bah */
1996 	ZERO(0x024);	/* respq outp */
1997 	ZERO(0x020);	/* respq inp */
1998 	ZERO(0x02c);	/* test control */
1999 	writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2000 }
2001 #undef ZERO
2002 
2003 #define ZERO(reg) writel(0, hc_mmio + (reg))
2004 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2005 			unsigned int hc)
2006 {
2007 	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2008 	u32 tmp;
2009 
2010 	ZERO(0x00c);
2011 	ZERO(0x010);
2012 	ZERO(0x014);
2013 	ZERO(0x018);
2014 
2015 	tmp = readl(hc_mmio + 0x20);
2016 	tmp &= 0x1c1c1c1c;
2017 	tmp |= 0x03030303;
2018 	writel(tmp, hc_mmio + 0x20);
2019 }
2020 #undef ZERO
2021 
2022 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2023 			unsigned int n_hc)
2024 {
2025 	unsigned int hc, port;
2026 
2027 	for (hc = 0; hc < n_hc; hc++) {
2028 		for (port = 0; port < MV_PORTS_PER_HC; port++)
2029 			mv5_reset_hc_port(hpriv, mmio,
2030 					  (hc * MV_PORTS_PER_HC) + port);
2031 
2032 		mv5_reset_one_hc(hpriv, mmio, hc);
2033 	}
2034 
2035 	return 0;
2036 }
2037 
2038 #undef ZERO
2039 #define ZERO(reg) writel(0, mmio + (reg))
2040 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
2041 {
2042 	struct mv_host_priv *hpriv = host->private_data;
2043 	u32 tmp;
2044 
2045 	tmp = readl(mmio + MV_PCI_MODE);
2046 	tmp &= 0xff00ffff;
2047 	writel(tmp, mmio + MV_PCI_MODE);
2048 
2049 	ZERO(MV_PCI_DISC_TIMER);
2050 	ZERO(MV_PCI_MSI_TRIGGER);
2051 	writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2052 	ZERO(HC_MAIN_IRQ_MASK_OFS);
2053 	ZERO(MV_PCI_SERR_MASK);
2054 	ZERO(hpriv->irq_cause_ofs);
2055 	ZERO(hpriv->irq_mask_ofs);
2056 	ZERO(MV_PCI_ERR_LOW_ADDRESS);
2057 	ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2058 	ZERO(MV_PCI_ERR_ATTRIBUTE);
2059 	ZERO(MV_PCI_ERR_COMMAND);
2060 }
2061 #undef ZERO
2062 
2063 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2064 {
2065 	u32 tmp;
2066 
2067 	mv5_reset_flash(hpriv, mmio);
2068 
2069 	tmp = readl(mmio + MV_GPIO_PORT_CTL);
2070 	tmp &= 0x3;
2071 	tmp |= (1 << 5) | (1 << 6);
2072 	writel(tmp, mmio + MV_GPIO_PORT_CTL);
2073 }
2074 
2075 /**
2076  *      mv6_reset_hc - Perform the 6xxx global soft reset
2077  *      @mmio: base address of the HBA
2078  *
2079  *      This routine only applies to 6xxx parts.
2080  *
2081  *      LOCKING:
2082  *      Inherited from caller.
2083  */
2084 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2085 			unsigned int n_hc)
2086 {
2087 	void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2088 	int i, rc = 0;
2089 	u32 t;
2090 
2091 	/* Following procedure defined in PCI "main command and status
2092 	 * register" table.
2093 	 */
2094 	t = readl(reg);
2095 	writel(t | STOP_PCI_MASTER, reg);
2096 
2097 	for (i = 0; i < 1000; i++) {
2098 		udelay(1);
2099 		t = readl(reg);
2100 		if (PCI_MASTER_EMPTY & t)
2101 			break;
2102 	}
2103 	if (!(PCI_MASTER_EMPTY & t)) {
2104 		printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2105 		rc = 1;
2106 		goto done;
2107 	}
2108 
2109 	/* set reset */
2110 	i = 5;
2111 	do {
2112 		writel(t | GLOB_SFT_RST, reg);
2113 		t = readl(reg);
2114 		udelay(1);
2115 	} while (!(GLOB_SFT_RST & t) && (i-- > 0));
2116 
2117 	if (!(GLOB_SFT_RST & t)) {
2118 		printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2119 		rc = 1;
2120 		goto done;
2121 	}
2122 
2123 	/* clear reset and *reenable the PCI master* (not mentioned in spec) */
2124 	i = 5;
2125 	do {
2126 		writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2127 		t = readl(reg);
2128 		udelay(1);
2129 	} while ((GLOB_SFT_RST & t) && (i-- > 0));
2130 
2131 	if (GLOB_SFT_RST & t) {
2132 		printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2133 		rc = 1;
2134 	}
2135 done:
2136 	return rc;
2137 }
2138 
2139 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2140 			   void __iomem *mmio)
2141 {
2142 	void __iomem *port_mmio;
2143 	u32 tmp;
2144 
2145 	tmp = readl(mmio + MV_RESET_CFG);
2146 	if ((tmp & (1 << 0)) == 0) {
2147 		hpriv->signal[idx].amps = 0x7 << 8;
2148 		hpriv->signal[idx].pre = 0x1 << 5;
2149 		return;
2150 	}
2151 
2152 	port_mmio = mv_port_base(mmio, idx);
2153 	tmp = readl(port_mmio + PHY_MODE2);
2154 
2155 	hpriv->signal[idx].amps = tmp & 0x700;	/* bits 10:8 */
2156 	hpriv->signal[idx].pre = tmp & 0xe0;	/* bits 7:5 */
2157 }
2158 
2159 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2160 {
2161 	writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2162 }
2163 
2164 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2165 			   unsigned int port)
2166 {
2167 	void __iomem *port_mmio = mv_port_base(mmio, port);
2168 
2169 	u32 hp_flags = hpriv->hp_flags;
2170 	int fix_phy_mode2 =
2171 		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2172 	int fix_phy_mode4 =
2173 		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2174 	u32 m2, tmp;
2175 
2176 	if (fix_phy_mode2) {
2177 		m2 = readl(port_mmio + PHY_MODE2);
2178 		m2 &= ~(1 << 16);
2179 		m2 |= (1 << 31);
2180 		writel(m2, port_mmio + PHY_MODE2);
2181 
2182 		udelay(200);
2183 
2184 		m2 = readl(port_mmio + PHY_MODE2);
2185 		m2 &= ~((1 << 16) | (1 << 31));
2186 		writel(m2, port_mmio + PHY_MODE2);
2187 
2188 		udelay(200);
2189 	}
2190 
2191 	/* who knows what this magic does */
2192 	tmp = readl(port_mmio + PHY_MODE3);
2193 	tmp &= ~0x7F800000;
2194 	tmp |= 0x2A800000;
2195 	writel(tmp, port_mmio + PHY_MODE3);
2196 
2197 	if (fix_phy_mode4) {
2198 		u32 m4;
2199 
2200 		m4 = readl(port_mmio + PHY_MODE4);
2201 
2202 		if (hp_flags & MV_HP_ERRATA_60X1B2)
2203 			tmp = readl(port_mmio + 0x310);
2204 
2205 		m4 = (m4 & ~(1 << 1)) | (1 << 0);
2206 
2207 		writel(m4, port_mmio + PHY_MODE4);
2208 
2209 		if (hp_flags & MV_HP_ERRATA_60X1B2)
2210 			writel(tmp, port_mmio + 0x310);
2211 	}
2212 
2213 	/* Revert values of pre-emphasis and signal amps to the saved ones */
2214 	m2 = readl(port_mmio + PHY_MODE2);
2215 
2216 	m2 &= ~MV_M2_PREAMP_MASK;
2217 	m2 |= hpriv->signal[port].amps;
2218 	m2 |= hpriv->signal[port].pre;
2219 	m2 &= ~(1 << 16);
2220 
2221 	/* according to mvSata 3.6.1, some IIE values are fixed */
2222 	if (IS_GEN_IIE(hpriv)) {
2223 		m2 &= ~0xC30FF01F;
2224 		m2 |= 0x0000900F;
2225 	}
2226 
2227 	writel(m2, port_mmio + PHY_MODE2);
2228 }
2229 
2230 /* TODO: use the generic LED interface to configure the SATA Presence */
2231 /* & Acitivy LEDs on the board */
2232 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2233 				      void __iomem *mmio)
2234 {
2235 	return;
2236 }
2237 
2238 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2239 			   void __iomem *mmio)
2240 {
2241 	void __iomem *port_mmio;
2242 	u32 tmp;
2243 
2244 	port_mmio = mv_port_base(mmio, idx);
2245 	tmp = readl(port_mmio + PHY_MODE2);
2246 
2247 	hpriv->signal[idx].amps = tmp & 0x700;	/* bits 10:8 */
2248 	hpriv->signal[idx].pre = tmp & 0xe0;	/* bits 7:5 */
2249 }
2250 
2251 #undef ZERO
2252 #define ZERO(reg) writel(0, port_mmio + (reg))
2253 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2254 					void __iomem *mmio, unsigned int port)
2255 {
2256 	void __iomem *port_mmio = mv_port_base(mmio, port);
2257 
2258 	writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
2259 
2260 	mv_channel_reset(hpriv, mmio, port);
2261 
2262 	ZERO(0x028);		/* command */
2263 	writel(0x101f, port_mmio + EDMA_CFG_OFS);
2264 	ZERO(0x004);		/* timer */
2265 	ZERO(0x008);		/* irq err cause */
2266 	ZERO(0x00c);		/* irq err mask */
2267 	ZERO(0x010);		/* rq bah */
2268 	ZERO(0x014);		/* rq inp */
2269 	ZERO(0x018);		/* rq outp */
2270 	ZERO(0x01c);		/* respq bah */
2271 	ZERO(0x024);		/* respq outp */
2272 	ZERO(0x020);		/* respq inp */
2273 	ZERO(0x02c);		/* test control */
2274 	writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2275 }
2276 
2277 #undef ZERO
2278 
2279 #define ZERO(reg) writel(0, hc_mmio + (reg))
2280 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2281 				       void __iomem *mmio)
2282 {
2283 	void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2284 
2285 	ZERO(0x00c);
2286 	ZERO(0x010);
2287 	ZERO(0x014);
2288 
2289 }
2290 
2291 #undef ZERO
2292 
2293 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2294 				  void __iomem *mmio, unsigned int n_hc)
2295 {
2296 	unsigned int port;
2297 
2298 	for (port = 0; port < hpriv->n_ports; port++)
2299 		mv_soc_reset_hc_port(hpriv, mmio, port);
2300 
2301 	mv_soc_reset_one_hc(hpriv, mmio);
2302 
2303 	return 0;
2304 }
2305 
2306 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2307 				      void __iomem *mmio)
2308 {
2309 	return;
2310 }
2311 
2312 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2313 {
2314 	return;
2315 }
2316 
2317 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2318 			     unsigned int port_no)
2319 {
2320 	void __iomem *port_mmio = mv_port_base(mmio, port_no);
2321 
2322 	writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2323 
2324 	if (IS_GEN_II(hpriv)) {
2325 		u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2326 		ifctl |= (1 << 7);		/* enable gen2i speed */
2327 		ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2328 		writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2329 	}
2330 
2331 	udelay(25);		/* allow reset propagation */
2332 
2333 	/* Spec never mentions clearing the bit.  Marvell's driver does
2334 	 * clear the bit, however.
2335 	 */
2336 	writelfl(0, port_mmio + EDMA_CMD_OFS);
2337 
2338 	hpriv->ops->phy_errata(hpriv, mmio, port_no);
2339 
2340 	if (IS_GEN_I(hpriv))
2341 		mdelay(1);
2342 }
2343 
2344 /**
2345  *      mv_phy_reset - Perform eDMA reset followed by COMRESET
2346  *      @ap: ATA channel to manipulate
2347  *
2348  *      Part of this is taken from __sata_phy_reset and modified to
2349  *      not sleep since this routine gets called from interrupt level.
2350  *
2351  *      LOCKING:
2352  *      Inherited from caller.  This is coded to safe to call at
2353  *      interrupt level, i.e. it does not sleep.
2354  */
2355 static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2356 			 unsigned long deadline)
2357 {
2358 	struct mv_port_priv *pp	= ap->private_data;
2359 	struct mv_host_priv *hpriv = ap->host->private_data;
2360 	void __iomem *port_mmio = mv_ap_base(ap);
2361 	int retry = 5;
2362 	u32 sstatus;
2363 
2364 	VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2365 
2366 #ifdef DEBUG
2367 	{
2368 		u32 sstatus, serror, scontrol;
2369 
2370 		mv_scr_read(ap, SCR_STATUS, &sstatus);
2371 		mv_scr_read(ap, SCR_ERROR, &serror);
2372 		mv_scr_read(ap, SCR_CONTROL, &scontrol);
2373 		DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2374 			"SCtrl 0x%08x\n", sstatus, serror, scontrol);
2375 	}
2376 #endif
2377 
2378 	/* Issue COMRESET via SControl */
2379 comreset_retry:
2380 	sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
2381 	msleep(1);
2382 
2383 	sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
2384 	msleep(20);
2385 
2386 	do {
2387 		sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
2388 		if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2389 			break;
2390 
2391 		msleep(1);
2392 	} while (time_before(jiffies, deadline));
2393 
2394 	/* work around errata */
2395 	if (IS_GEN_II(hpriv) &&
2396 	    (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2397 	    (retry-- > 0))
2398 		goto comreset_retry;
2399 
2400 #ifdef DEBUG
2401 	{
2402 		u32 sstatus, serror, scontrol;
2403 
2404 		mv_scr_read(ap, SCR_STATUS, &sstatus);
2405 		mv_scr_read(ap, SCR_ERROR, &serror);
2406 		mv_scr_read(ap, SCR_CONTROL, &scontrol);
2407 		DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2408 			"SCtrl 0x%08x\n", sstatus, serror, scontrol);
2409 	}
2410 #endif
2411 
2412 	if (ata_link_offline(&ap->link)) {
2413 		*class = ATA_DEV_NONE;
2414 		return;
2415 	}
2416 
2417 	/* even after SStatus reflects that device is ready,
2418 	 * it seems to take a while for link to be fully
2419 	 * established (and thus Status no longer 0x80/0x7F),
2420 	 * so we poll a bit for that, here.
2421 	 */
2422 	retry = 20;
2423 	while (1) {
2424 		u8 drv_stat = ata_check_status(ap);
2425 		if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2426 			break;
2427 		msleep(500);
2428 		if (retry-- <= 0)
2429 			break;
2430 		if (time_after(jiffies, deadline))
2431 			break;
2432 	}
2433 
2434 	/* FIXME: if we passed the deadline, the following
2435 	 * code probably produces an invalid result
2436 	 */
2437 
2438 	/* finally, read device signature from TF registers */
2439 	*class = ata_dev_try_classify(ap->link.device, 1, NULL);
2440 
2441 	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2442 
2443 	WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2444 
2445 	VPRINTK("EXIT\n");
2446 }
2447 
2448 static int mv_prereset(struct ata_link *link, unsigned long deadline)
2449 {
2450 	struct ata_port *ap = link->ap;
2451 	struct mv_port_priv *pp	= ap->private_data;
2452 	struct ata_eh_context *ehc = &link->eh_context;
2453 	int rc;
2454 
2455 	rc = mv_stop_dma(ap);
2456 	if (rc)
2457 		ehc->i.action |= ATA_EH_HARDRESET;
2458 
2459 	if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2460 		pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2461 		ehc->i.action |= ATA_EH_HARDRESET;
2462 	}
2463 
2464 	/* if we're about to do hardreset, nothing more to do */
2465 	if (ehc->i.action & ATA_EH_HARDRESET)
2466 		return 0;
2467 
2468 	if (ata_link_online(link))
2469 		rc = ata_wait_ready(ap, deadline);
2470 	else
2471 		rc = -ENODEV;
2472 
2473 	return rc;
2474 }
2475 
2476 static int mv_hardreset(struct ata_link *link, unsigned int *class,
2477 			unsigned long deadline)
2478 {
2479 	struct ata_port *ap = link->ap;
2480 	struct mv_host_priv *hpriv = ap->host->private_data;
2481 	void __iomem *mmio = hpriv->base;
2482 
2483 	mv_stop_dma(ap);
2484 
2485 	mv_channel_reset(hpriv, mmio, ap->port_no);
2486 
2487 	mv_phy_reset(ap, class, deadline);
2488 
2489 	return 0;
2490 }
2491 
2492 static void mv_postreset(struct ata_link *link, unsigned int *classes)
2493 {
2494 	struct ata_port *ap = link->ap;
2495 	u32 serr;
2496 
2497 	/* print link status */
2498 	sata_print_link_status(link);
2499 
2500 	/* clear SError */
2501 	sata_scr_read(link, SCR_ERROR, &serr);
2502 	sata_scr_write_flush(link, SCR_ERROR, serr);
2503 
2504 	/* bail out if no device is present */
2505 	if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2506 		DPRINTK("EXIT, no device\n");
2507 		return;
2508 	}
2509 
2510 	/* set up device control */
2511 	iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2512 }
2513 
2514 static void mv_error_handler(struct ata_port *ap)
2515 {
2516 	ata_do_eh(ap, mv_prereset, ata_std_softreset,
2517 		  mv_hardreset, mv_postreset);
2518 }
2519 
2520 static void mv_eh_freeze(struct ata_port *ap)
2521 {
2522 	struct mv_host_priv *hpriv = ap->host->private_data;
2523 	unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2524 	u32 tmp, mask;
2525 	unsigned int shift;
2526 
2527 	/* FIXME: handle coalescing completion events properly */
2528 
2529 	shift = ap->port_no * 2;
2530 	if (hc > 0)
2531 		shift++;
2532 
2533 	mask = 0x3 << shift;
2534 
2535 	/* disable assertion of portN err, done events */
2536 	tmp = readl(hpriv->main_mask_reg_addr);
2537 	writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
2538 }
2539 
2540 static void mv_eh_thaw(struct ata_port *ap)
2541 {
2542 	struct mv_host_priv *hpriv = ap->host->private_data;
2543 	void __iomem *mmio = hpriv->base;
2544 	unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2545 	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2546 	void __iomem *port_mmio = mv_ap_base(ap);
2547 	u32 tmp, mask, hc_irq_cause;
2548 	unsigned int shift, hc_port_no = ap->port_no;
2549 
2550 	/* FIXME: handle coalescing completion events properly */
2551 
2552 	shift = ap->port_no * 2;
2553 	if (hc > 0) {
2554 		shift++;
2555 		hc_port_no -= 4;
2556 	}
2557 
2558 	mask = 0x3 << shift;
2559 
2560 	/* clear EDMA errors on this port */
2561 	writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2562 
2563 	/* clear pending irq events */
2564 	hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2565 	hc_irq_cause &= ~(1 << hc_port_no);	/* clear CRPB-done */
2566 	hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2567 	writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2568 
2569 	/* enable assertion of portN err, done events */
2570 	tmp = readl(hpriv->main_mask_reg_addr);
2571 	writelfl(tmp | mask, hpriv->main_mask_reg_addr);
2572 }
2573 
2574 /**
2575  *      mv_port_init - Perform some early initialization on a single port.
2576  *      @port: libata data structure storing shadow register addresses
2577  *      @port_mmio: base address of the port
2578  *
2579  *      Initialize shadow register mmio addresses, clear outstanding
2580  *      interrupts on the port, and unmask interrupts for the future
2581  *      start of the port.
2582  *
2583  *      LOCKING:
2584  *      Inherited from caller.
2585  */
2586 static void mv_port_init(struct ata_ioports *port,  void __iomem *port_mmio)
2587 {
2588 	void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2589 	unsigned serr_ofs;
2590 
2591 	/* PIO related setup
2592 	 */
2593 	port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2594 	port->error_addr =
2595 		port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2596 	port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2597 	port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2598 	port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2599 	port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2600 	port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2601 	port->status_addr =
2602 		port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2603 	/* special case: control/altstatus doesn't have ATA_REG_ address */
2604 	port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2605 
2606 	/* unused: */
2607 	port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2608 
2609 	/* Clear any currently outstanding port interrupt conditions */
2610 	serr_ofs = mv_scr_offset(SCR_ERROR);
2611 	writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2612 	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2613 
2614 	/* unmask all non-transient EDMA error interrupts */
2615 	writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2616 
2617 	VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2618 		readl(port_mmio + EDMA_CFG_OFS),
2619 		readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2620 		readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2621 }
2622 
2623 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2624 {
2625 	struct pci_dev *pdev = to_pci_dev(host->dev);
2626 	struct mv_host_priv *hpriv = host->private_data;
2627 	u32 hp_flags = hpriv->hp_flags;
2628 
2629 	switch (board_idx) {
2630 	case chip_5080:
2631 		hpriv->ops = &mv5xxx_ops;
2632 		hp_flags |= MV_HP_GEN_I;
2633 
2634 		switch (pdev->revision) {
2635 		case 0x1:
2636 			hp_flags |= MV_HP_ERRATA_50XXB0;
2637 			break;
2638 		case 0x3:
2639 			hp_flags |= MV_HP_ERRATA_50XXB2;
2640 			break;
2641 		default:
2642 			dev_printk(KERN_WARNING, &pdev->dev,
2643 			   "Applying 50XXB2 workarounds to unknown rev\n");
2644 			hp_flags |= MV_HP_ERRATA_50XXB2;
2645 			break;
2646 		}
2647 		break;
2648 
2649 	case chip_504x:
2650 	case chip_508x:
2651 		hpriv->ops = &mv5xxx_ops;
2652 		hp_flags |= MV_HP_GEN_I;
2653 
2654 		switch (pdev->revision) {
2655 		case 0x0:
2656 			hp_flags |= MV_HP_ERRATA_50XXB0;
2657 			break;
2658 		case 0x3:
2659 			hp_flags |= MV_HP_ERRATA_50XXB2;
2660 			break;
2661 		default:
2662 			dev_printk(KERN_WARNING, &pdev->dev,
2663 			   "Applying B2 workarounds to unknown rev\n");
2664 			hp_flags |= MV_HP_ERRATA_50XXB2;
2665 			break;
2666 		}
2667 		break;
2668 
2669 	case chip_604x:
2670 	case chip_608x:
2671 		hpriv->ops = &mv6xxx_ops;
2672 		hp_flags |= MV_HP_GEN_II;
2673 
2674 		switch (pdev->revision) {
2675 		case 0x7:
2676 			hp_flags |= MV_HP_ERRATA_60X1B2;
2677 			break;
2678 		case 0x9:
2679 			hp_flags |= MV_HP_ERRATA_60X1C0;
2680 			break;
2681 		default:
2682 			dev_printk(KERN_WARNING, &pdev->dev,
2683 				   "Applying B2 workarounds to unknown rev\n");
2684 			hp_flags |= MV_HP_ERRATA_60X1B2;
2685 			break;
2686 		}
2687 		break;
2688 
2689 	case chip_7042:
2690 		hp_flags |= MV_HP_PCIE;
2691 		if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2692 		    (pdev->device == 0x2300 || pdev->device == 0x2310))
2693 		{
2694 			/*
2695 			 * Highpoint RocketRAID PCIe 23xx series cards:
2696 			 *
2697 			 * Unconfigured drives are treated as "Legacy"
2698 			 * by the BIOS, and it overwrites sector 8 with
2699 			 * a "Lgcy" metadata block prior to Linux boot.
2700 			 *
2701 			 * Configured drives (RAID or JBOD) leave sector 8
2702 			 * alone, but instead overwrite a high numbered
2703 			 * sector for the RAID metadata.  This sector can
2704 			 * be determined exactly, by truncating the physical
2705 			 * drive capacity to a nice even GB value.
2706 			 *
2707 			 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2708 			 *
2709 			 * Warn the user, lest they think we're just buggy.
2710 			 */
2711 			printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2712 				" BIOS CORRUPTS DATA on all attached drives,"
2713 				" regardless of if/how they are configured."
2714 				" BEWARE!\n");
2715 			printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2716 				" use sectors 8-9 on \"Legacy\" drives,"
2717 				" and avoid the final two gigabytes on"
2718 				" all RocketRAID BIOS initialized drives.\n");
2719 		}
2720 	case chip_6042:
2721 		hpriv->ops = &mv6xxx_ops;
2722 		hp_flags |= MV_HP_GEN_IIE;
2723 
2724 		switch (pdev->revision) {
2725 		case 0x0:
2726 			hp_flags |= MV_HP_ERRATA_XX42A0;
2727 			break;
2728 		case 0x1:
2729 			hp_flags |= MV_HP_ERRATA_60X1C0;
2730 			break;
2731 		default:
2732 			dev_printk(KERN_WARNING, &pdev->dev,
2733 			   "Applying 60X1C0 workarounds to unknown rev\n");
2734 			hp_flags |= MV_HP_ERRATA_60X1C0;
2735 			break;
2736 		}
2737 		break;
2738 	case chip_soc:
2739 		hpriv->ops = &mv_soc_ops;
2740 		hp_flags |= MV_HP_ERRATA_60X1C0;
2741 		break;
2742 
2743 	default:
2744 		dev_printk(KERN_ERR, host->dev,
2745 			   "BUG: invalid board index %u\n", board_idx);
2746 		return 1;
2747 	}
2748 
2749 	hpriv->hp_flags = hp_flags;
2750 	if (hp_flags & MV_HP_PCIE) {
2751 		hpriv->irq_cause_ofs	= PCIE_IRQ_CAUSE_OFS;
2752 		hpriv->irq_mask_ofs	= PCIE_IRQ_MASK_OFS;
2753 		hpriv->unmask_all_irqs	= PCIE_UNMASK_ALL_IRQS;
2754 	} else {
2755 		hpriv->irq_cause_ofs	= PCI_IRQ_CAUSE_OFS;
2756 		hpriv->irq_mask_ofs	= PCI_IRQ_MASK_OFS;
2757 		hpriv->unmask_all_irqs	= PCI_UNMASK_ALL_IRQS;
2758 	}
2759 
2760 	return 0;
2761 }
2762 
2763 /**
2764  *      mv_init_host - Perform some early initialization of the host.
2765  *	@host: ATA host to initialize
2766  *      @board_idx: controller index
2767  *
2768  *      If possible, do an early global reset of the host.  Then do
2769  *      our port init and clear/unmask all/relevant host interrupts.
2770  *
2771  *      LOCKING:
2772  *      Inherited from caller.
2773  */
2774 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2775 {
2776 	int rc = 0, n_hc, port, hc;
2777 	struct mv_host_priv *hpriv = host->private_data;
2778 	void __iomem *mmio = hpriv->base;
2779 
2780 	rc = mv_chip_id(host, board_idx);
2781 	if (rc)
2782 	goto done;
2783 
2784 	if (HAS_PCI(host)) {
2785 		hpriv->main_cause_reg_addr = hpriv->base +
2786 		  HC_MAIN_IRQ_CAUSE_OFS;
2787 		hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2788 	} else {
2789 		hpriv->main_cause_reg_addr = hpriv->base +
2790 		  HC_SOC_MAIN_IRQ_CAUSE_OFS;
2791 		hpriv->main_mask_reg_addr = hpriv->base +
2792 		  HC_SOC_MAIN_IRQ_MASK_OFS;
2793 	}
2794 	/* global interrupt mask */
2795 	writel(0, hpriv->main_mask_reg_addr);
2796 
2797 	n_hc = mv_get_hc_count(host->ports[0]->flags);
2798 
2799 	for (port = 0; port < host->n_ports; port++)
2800 		hpriv->ops->read_preamp(hpriv, port, mmio);
2801 
2802 	rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2803 	if (rc)
2804 		goto done;
2805 
2806 	hpriv->ops->reset_flash(hpriv, mmio);
2807 	hpriv->ops->reset_bus(host, mmio);
2808 	hpriv->ops->enable_leds(hpriv, mmio);
2809 
2810 	for (port = 0; port < host->n_ports; port++) {
2811 		if (IS_GEN_II(hpriv)) {
2812 			void __iomem *port_mmio = mv_port_base(mmio, port);
2813 
2814 			u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2815 			ifctl |= (1 << 7);		/* enable gen2i speed */
2816 			ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2817 			writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2818 		}
2819 
2820 		hpriv->ops->phy_errata(hpriv, mmio, port);
2821 	}
2822 
2823 	for (port = 0; port < host->n_ports; port++) {
2824 		struct ata_port *ap = host->ports[port];
2825 		void __iomem *port_mmio = mv_port_base(mmio, port);
2826 
2827 		mv_port_init(&ap->ioaddr, port_mmio);
2828 
2829 #ifdef CONFIG_PCI
2830 		if (HAS_PCI(host)) {
2831 			unsigned int offset = port_mmio - mmio;
2832 			ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2833 			ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2834 		}
2835 #endif
2836 	}
2837 
2838 	for (hc = 0; hc < n_hc; hc++) {
2839 		void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2840 
2841 		VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2842 			"(before clear)=0x%08x\n", hc,
2843 			readl(hc_mmio + HC_CFG_OFS),
2844 			readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2845 
2846 		/* Clear any currently outstanding hc interrupt conditions */
2847 		writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2848 	}
2849 
2850 	if (HAS_PCI(host)) {
2851 		/* Clear any currently outstanding host interrupt conditions */
2852 		writelfl(0, mmio + hpriv->irq_cause_ofs);
2853 
2854 		/* and unmask interrupt generation for host regs */
2855 		writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2856 		if (IS_GEN_I(hpriv))
2857 			writelfl(~HC_MAIN_MASKED_IRQS_5,
2858 				 hpriv->main_mask_reg_addr);
2859 		else
2860 			writelfl(~HC_MAIN_MASKED_IRQS,
2861 				 hpriv->main_mask_reg_addr);
2862 
2863 		VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2864 			"PCI int cause/mask=0x%08x/0x%08x\n",
2865 			readl(hpriv->main_cause_reg_addr),
2866 			readl(hpriv->main_mask_reg_addr),
2867 			readl(mmio + hpriv->irq_cause_ofs),
2868 			readl(mmio + hpriv->irq_mask_ofs));
2869 	} else {
2870 		writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2871 			 hpriv->main_mask_reg_addr);
2872 		VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2873 			readl(hpriv->main_cause_reg_addr),
2874 			readl(hpriv->main_mask_reg_addr));
2875 	}
2876 done:
2877 	return rc;
2878 }
2879 
2880 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2881 {
2882 	hpriv->crqb_pool   = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2883 							     MV_CRQB_Q_SZ, 0);
2884 	if (!hpriv->crqb_pool)
2885 		return -ENOMEM;
2886 
2887 	hpriv->crpb_pool   = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2888 							     MV_CRPB_Q_SZ, 0);
2889 	if (!hpriv->crpb_pool)
2890 		return -ENOMEM;
2891 
2892 	hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2893 							     MV_SG_TBL_SZ, 0);
2894 	if (!hpriv->sg_tbl_pool)
2895 		return -ENOMEM;
2896 
2897 	return 0;
2898 }
2899 
2900 /**
2901  *      mv_platform_probe - handle a positive probe of an soc Marvell
2902  *      host
2903  *      @pdev: platform device found
2904  *
2905  *      LOCKING:
2906  *      Inherited from caller.
2907  */
2908 static int mv_platform_probe(struct platform_device *pdev)
2909 {
2910 	static int printed_version;
2911 	const struct mv_sata_platform_data *mv_platform_data;
2912 	const struct ata_port_info *ppi[] =
2913 	    { &mv_port_info[chip_soc], NULL };
2914 	struct ata_host *host;
2915 	struct mv_host_priv *hpriv;
2916 	struct resource *res;
2917 	int n_ports, rc;
2918 
2919 	if (!printed_version++)
2920 		dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2921 
2922 	/*
2923 	 * Simple resource validation ..
2924 	 */
2925 	if (unlikely(pdev->num_resources != 2)) {
2926 		dev_err(&pdev->dev, "invalid number of resources\n");
2927 		return -EINVAL;
2928 	}
2929 
2930 	/*
2931 	 * Get the register base first
2932 	 */
2933 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2934 	if (res == NULL)
2935 		return -EINVAL;
2936 
2937 	/* allocate host */
2938 	mv_platform_data = pdev->dev.platform_data;
2939 	n_ports = mv_platform_data->n_ports;
2940 
2941 	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2942 	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2943 
2944 	if (!host || !hpriv)
2945 		return -ENOMEM;
2946 	host->private_data = hpriv;
2947 	hpriv->n_ports = n_ports;
2948 
2949 	host->iomap = NULL;
2950 	hpriv->base = devm_ioremap(&pdev->dev, res->start,
2951 				   res->end - res->start + 1);
2952 	hpriv->base -= MV_SATAHC0_REG_BASE;
2953 
2954 	rc = mv_create_dma_pools(hpriv, &pdev->dev);
2955 	if (rc)
2956 		return rc;
2957 
2958 	/* initialize adapter */
2959 	rc = mv_init_host(host, chip_soc);
2960 	if (rc)
2961 		return rc;
2962 
2963 	dev_printk(KERN_INFO, &pdev->dev,
2964 		   "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2965 		   host->n_ports);
2966 
2967 	return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2968 				 IRQF_SHARED, &mv6_sht);
2969 }
2970 
2971 /*
2972  *
2973  *      mv_platform_remove    -       unplug a platform interface
2974  *      @pdev: platform device
2975  *
2976  *      A platform bus SATA device has been unplugged. Perform the needed
2977  *      cleanup. Also called on module unload for any active devices.
2978  */
2979 static int __devexit mv_platform_remove(struct platform_device *pdev)
2980 {
2981 	struct device *dev = &pdev->dev;
2982 	struct ata_host *host = dev_get_drvdata(dev);
2983 
2984 	ata_host_detach(host);
2985 	return 0;
2986 }
2987 
2988 static struct platform_driver mv_platform_driver = {
2989 	.probe			= mv_platform_probe,
2990 	.remove			= __devexit_p(mv_platform_remove),
2991 	.driver			= {
2992 				   .name = DRV_NAME,
2993 				   .owner = THIS_MODULE,
2994 				  },
2995 };
2996 
2997 
2998 #ifdef CONFIG_PCI
2999 static int mv_pci_init_one(struct pci_dev *pdev,
3000 			   const struct pci_device_id *ent);
3001 
3002 
3003 static struct pci_driver mv_pci_driver = {
3004 	.name			= DRV_NAME,
3005 	.id_table		= mv_pci_tbl,
3006 	.probe			= mv_pci_init_one,
3007 	.remove			= ata_pci_remove_one,
3008 };
3009 
3010 /*
3011  * module options
3012  */
3013 static int msi;	      /* Use PCI msi; either zero (off, default) or non-zero */
3014 
3015 
3016 /* move to PCI layer or libata core? */
3017 static int pci_go_64(struct pci_dev *pdev)
3018 {
3019 	int rc;
3020 
3021 	if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
3022 		rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3023 		if (rc) {
3024 			rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3025 			if (rc) {
3026 				dev_printk(KERN_ERR, &pdev->dev,
3027 					   "64-bit DMA enable failed\n");
3028 				return rc;
3029 			}
3030 		}
3031 	} else {
3032 		rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3033 		if (rc) {
3034 			dev_printk(KERN_ERR, &pdev->dev,
3035 				   "32-bit DMA enable failed\n");
3036 			return rc;
3037 		}
3038 		rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3039 		if (rc) {
3040 			dev_printk(KERN_ERR, &pdev->dev,
3041 				   "32-bit consistent DMA enable failed\n");
3042 			return rc;
3043 		}
3044 	}
3045 
3046 	return rc;
3047 }
3048 
3049 /**
3050  *      mv_print_info - Dump key info to kernel log for perusal.
3051  *      @host: ATA host to print info about
3052  *
3053  *      FIXME: complete this.
3054  *
3055  *      LOCKING:
3056  *      Inherited from caller.
3057  */
3058 static void mv_print_info(struct ata_host *host)
3059 {
3060 	struct pci_dev *pdev = to_pci_dev(host->dev);
3061 	struct mv_host_priv *hpriv = host->private_data;
3062 	u8 scc;
3063 	const char *scc_s, *gen;
3064 
3065 	/* Use this to determine the HW stepping of the chip so we know
3066 	 * what errata to workaround
3067 	 */
3068 	pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
3069 	if (scc == 0)
3070 		scc_s = "SCSI";
3071 	else if (scc == 0x01)
3072 		scc_s = "RAID";
3073 	else
3074 		scc_s = "?";
3075 
3076 	if (IS_GEN_I(hpriv))
3077 		gen = "I";
3078 	else if (IS_GEN_II(hpriv))
3079 		gen = "II";
3080 	else if (IS_GEN_IIE(hpriv))
3081 		gen = "IIE";
3082 	else
3083 		gen = "?";
3084 
3085 	dev_printk(KERN_INFO, &pdev->dev,
3086 	       "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
3087 	       gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
3088 	       scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
3089 }
3090 
3091 /**
3092  *      mv_pci_init_one - handle a positive probe of a PCI Marvell host
3093  *      @pdev: PCI device found
3094  *      @ent: PCI device ID entry for the matched host
3095  *
3096  *      LOCKING:
3097  *      Inherited from caller.
3098  */
3099 static int mv_pci_init_one(struct pci_dev *pdev,
3100 			   const struct pci_device_id *ent)
3101 {
3102 	static int printed_version;
3103 	unsigned int board_idx = (unsigned int)ent->driver_data;
3104 	const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3105 	struct ata_host *host;
3106 	struct mv_host_priv *hpriv;
3107 	int n_ports, rc;
3108 
3109 	if (!printed_version++)
3110 		dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
3111 
3112 	/* allocate host */
3113 	n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3114 
3115 	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3116 	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3117 	if (!host || !hpriv)
3118 		return -ENOMEM;
3119 	host->private_data = hpriv;
3120 	hpriv->n_ports = n_ports;
3121 
3122 	/* acquire resources */
3123 	rc = pcim_enable_device(pdev);
3124 	if (rc)
3125 		return rc;
3126 
3127 	rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3128 	if (rc == -EBUSY)
3129 		pcim_pin_device(pdev);
3130 	if (rc)
3131 		return rc;
3132 	host->iomap = pcim_iomap_table(pdev);
3133 	hpriv->base = host->iomap[MV_PRIMARY_BAR];
3134 
3135 	rc = pci_go_64(pdev);
3136 	if (rc)
3137 		return rc;
3138 
3139 	rc = mv_create_dma_pools(hpriv, &pdev->dev);
3140 	if (rc)
3141 		return rc;
3142 
3143 	/* initialize adapter */
3144 	rc = mv_init_host(host, board_idx);
3145 	if (rc)
3146 		return rc;
3147 
3148 	/* Enable interrupts */
3149 	if (msi && pci_enable_msi(pdev))
3150 		pci_intx(pdev, 1);
3151 
3152 	mv_dump_pci_cfg(pdev, 0x68);
3153 	mv_print_info(host);
3154 
3155 	pci_set_master(pdev);
3156 	pci_try_set_mwi(pdev);
3157 	return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
3158 				 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
3159 }
3160 #endif
3161 
3162 static int mv_platform_probe(struct platform_device *pdev);
3163 static int __devexit mv_platform_remove(struct platform_device *pdev);
3164 
3165 static int __init mv_init(void)
3166 {
3167 	int rc = -ENODEV;
3168 #ifdef CONFIG_PCI
3169 	rc = pci_register_driver(&mv_pci_driver);
3170 	if (rc < 0)
3171 		return rc;
3172 #endif
3173 	rc = platform_driver_register(&mv_platform_driver);
3174 
3175 #ifdef CONFIG_PCI
3176 	if (rc < 0)
3177 		pci_unregister_driver(&mv_pci_driver);
3178 #endif
3179 	return rc;
3180 }
3181 
3182 static void __exit mv_exit(void)
3183 {
3184 #ifdef CONFIG_PCI
3185 	pci_unregister_driver(&mv_pci_driver);
3186 #endif
3187 	platform_driver_unregister(&mv_platform_driver);
3188 }
3189 
3190 MODULE_AUTHOR("Brett Russ");
3191 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3192 MODULE_LICENSE("GPL");
3193 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3194 MODULE_VERSION(DRV_VERSION);
3195 MODULE_ALIAS("platform:sata_mv");
3196 
3197 #ifdef CONFIG_PCI
3198 module_param(msi, int, 0444);
3199 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
3200 #endif
3201 
3202 module_init(mv_init);
3203 module_exit(mv_exit);
3204