xref: /openbmc/linux/drivers/ata/sata_mv.c (revision 8b4a4080)
1 /*
2  * sata_mv.c - Marvell SATA support
3  *
4  * Copyright 2005: EMC Corporation, all rights reserved.
5  * Copyright 2005 Red Hat, Inc.  All rights reserved.
6  *
7  * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; version 2 of the License.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23 
24 /*
25   sata_mv TODO list:
26 
27   1) Needs a full errata audit for all chipsets.  I implemented most
28   of the errata workarounds found in the Marvell vendor driver, but
29   I distinctly remember a couple workarounds (one related to PCI-X)
30   are still needed.
31 
32   4) Add NCQ support (easy to intermediate, once new-EH support appears)
33 
34   5) Investigate problems with PCI Message Signalled Interrupts (MSI).
35 
36   6) Add port multiplier support (intermediate)
37 
38   7) Test and verify 3.0 Gbps support
39 
40   8) Develop a low-power-consumption strategy, and implement it.
41 
42   9) [Experiment, low priority] See if ATAPI can be supported using
43   "unknown FIS" or "vendor-specific FIS" support, or something creative
44   like that.
45 
46   10) [Experiment, low priority] Investigate interrupt coalescing.
47   Quite often, especially with PCI Message Signalled Interrupts (MSI),
48   the overhead reduced by interrupt mitigation is quite often not
49   worth the latency cost.
50 
51   11) [Experiment, Marvell value added] Is it possible to use target
52   mode to cross-connect two Linux boxes with Marvell cards?  If so,
53   creating LibATA target mode support would be very interesting.
54 
55   Target mode, for those without docs, is the ability to directly
56   connect two SATA controllers.
57 
58   13) Verify that 7042 is fully supported.  I only have a 6042.
59 
60 */
61 
62 
63 #include <linux/kernel.h>
64 #include <linux/module.h>
65 #include <linux/pci.h>
66 #include <linux/init.h>
67 #include <linux/blkdev.h>
68 #include <linux/delay.h>
69 #include <linux/interrupt.h>
70 #include <linux/dma-mapping.h>
71 #include <linux/device.h>
72 #include <scsi/scsi_host.h>
73 #include <scsi/scsi_cmnd.h>
74 #include <linux/libata.h>
75 
76 #define DRV_NAME	"sata_mv"
77 #define DRV_VERSION	"0.81"
78 
79 enum {
80 	/* BAR's are enumerated in terms of pci_resource_start() terms */
81 	MV_PRIMARY_BAR		= 0,	/* offset 0x10: memory space */
82 	MV_IO_BAR		= 2,	/* offset 0x18: IO space */
83 	MV_MISC_BAR		= 3,	/* offset 0x1c: FLASH, NVRAM, SRAM */
84 
85 	MV_MAJOR_REG_AREA_SZ	= 0x10000,	/* 64KB */
86 	MV_MINOR_REG_AREA_SZ	= 0x2000,	/* 8KB */
87 
88 	MV_PCI_REG_BASE		= 0,
89 	MV_IRQ_COAL_REG_BASE	= 0x18000,	/* 6xxx part only */
90 	MV_IRQ_COAL_CAUSE		= (MV_IRQ_COAL_REG_BASE + 0x08),
91 	MV_IRQ_COAL_CAUSE_LO		= (MV_IRQ_COAL_REG_BASE + 0x88),
92 	MV_IRQ_COAL_CAUSE_HI		= (MV_IRQ_COAL_REG_BASE + 0x8c),
93 	MV_IRQ_COAL_THRESHOLD		= (MV_IRQ_COAL_REG_BASE + 0xcc),
94 	MV_IRQ_COAL_TIME_THRESHOLD	= (MV_IRQ_COAL_REG_BASE + 0xd0),
95 
96 	MV_SATAHC0_REG_BASE	= 0x20000,
97 	MV_FLASH_CTL		= 0x1046c,
98 	MV_GPIO_PORT_CTL	= 0x104f0,
99 	MV_RESET_CFG		= 0x180d8,
100 
101 	MV_PCI_REG_SZ		= MV_MAJOR_REG_AREA_SZ,
102 	MV_SATAHC_REG_SZ	= MV_MAJOR_REG_AREA_SZ,
103 	MV_SATAHC_ARBTR_REG_SZ	= MV_MINOR_REG_AREA_SZ,		/* arbiter */
104 	MV_PORT_REG_SZ		= MV_MINOR_REG_AREA_SZ,
105 
106 	MV_MAX_Q_DEPTH		= 32,
107 	MV_MAX_Q_DEPTH_MASK	= MV_MAX_Q_DEPTH - 1,
108 
109 	/* CRQB needs alignment on a 1KB boundary. Size == 1KB
110 	 * CRPB needs alignment on a 256B boundary. Size == 256B
111 	 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
112 	 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
113 	 */
114 	MV_CRQB_Q_SZ		= (32 * MV_MAX_Q_DEPTH),
115 	MV_CRPB_Q_SZ		= (8 * MV_MAX_Q_DEPTH),
116 	MV_MAX_SG_CT		= 176,
117 	MV_SG_TBL_SZ		= (16 * MV_MAX_SG_CT),
118 	MV_PORT_PRIV_DMA_SZ	= (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
119 
120 	MV_PORTS_PER_HC		= 4,
121 	/* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
122 	MV_PORT_HC_SHIFT	= 2,
123 	/* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
124 	MV_PORT_MASK		= 3,
125 
126 	/* Host Flags */
127 	MV_FLAG_DUAL_HC		= (1 << 30),  /* two SATA Host Controllers */
128 	MV_FLAG_IRQ_COALESCE	= (1 << 29),  /* IRQ coalescing capability */
129 	MV_COMMON_FLAGS		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
130 				  ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
131 				  ATA_FLAG_PIO_POLLING,
132 	MV_6XXX_FLAGS		= MV_FLAG_IRQ_COALESCE,
133 
134 	CRQB_FLAG_READ		= (1 << 0),
135 	CRQB_TAG_SHIFT		= 1,
136 	CRQB_IOID_SHIFT		= 6,	/* CRQB Gen-II/IIE IO Id shift */
137 	CRQB_HOSTQ_SHIFT	= 17,	/* CRQB Gen-II/IIE HostQueTag shift */
138 	CRQB_CMD_ADDR_SHIFT	= 8,
139 	CRQB_CMD_CS		= (0x2 << 11),
140 	CRQB_CMD_LAST		= (1 << 15),
141 
142 	CRPB_FLAG_STATUS_SHIFT	= 8,
143 	CRPB_IOID_SHIFT_6	= 5,	/* CRPB Gen-II IO Id shift */
144 	CRPB_IOID_SHIFT_7	= 7,	/* CRPB Gen-IIE IO Id shift */
145 
146 	EPRD_FLAG_END_OF_TBL	= (1 << 31),
147 
148 	/* PCI interface registers */
149 
150 	PCI_COMMAND_OFS		= 0xc00,
151 
152 	PCI_MAIN_CMD_STS_OFS	= 0xd30,
153 	STOP_PCI_MASTER		= (1 << 2),
154 	PCI_MASTER_EMPTY	= (1 << 3),
155 	GLOB_SFT_RST		= (1 << 4),
156 
157 	MV_PCI_MODE		= 0xd00,
158 	MV_PCI_EXP_ROM_BAR_CTL	= 0xd2c,
159 	MV_PCI_DISC_TIMER	= 0xd04,
160 	MV_PCI_MSI_TRIGGER	= 0xc38,
161 	MV_PCI_SERR_MASK	= 0xc28,
162 	MV_PCI_XBAR_TMOUT	= 0x1d04,
163 	MV_PCI_ERR_LOW_ADDRESS	= 0x1d40,
164 	MV_PCI_ERR_HIGH_ADDRESS	= 0x1d44,
165 	MV_PCI_ERR_ATTRIBUTE	= 0x1d48,
166 	MV_PCI_ERR_COMMAND	= 0x1d50,
167 
168 	PCI_IRQ_CAUSE_OFS		= 0x1d58,
169 	PCI_IRQ_MASK_OFS		= 0x1d5c,
170 	PCI_UNMASK_ALL_IRQS	= 0x7fffff,	/* bits 22-0 */
171 
172 	HC_MAIN_IRQ_CAUSE_OFS	= 0x1d60,
173 	HC_MAIN_IRQ_MASK_OFS	= 0x1d64,
174 	PORT0_ERR		= (1 << 0),	/* shift by port # */
175 	PORT0_DONE		= (1 << 1),	/* shift by port # */
176 	HC0_IRQ_PEND		= 0x1ff,	/* bits 0-8 = HC0's ports */
177 	HC_SHIFT		= 9,		/* bits 9-17 = HC1's ports */
178 	PCI_ERR			= (1 << 18),
179 	TRAN_LO_DONE		= (1 << 19),	/* 6xxx: IRQ coalescing */
180 	TRAN_HI_DONE		= (1 << 20),	/* 6xxx: IRQ coalescing */
181 	PORTS_0_3_COAL_DONE	= (1 << 8),
182 	PORTS_4_7_COAL_DONE	= (1 << 17),
183 	PORTS_0_7_COAL_DONE	= (1 << 21),	/* 6xxx: IRQ coalescing */
184 	GPIO_INT		= (1 << 22),
185 	SELF_INT		= (1 << 23),
186 	TWSI_INT		= (1 << 24),
187 	HC_MAIN_RSVD		= (0x7f << 25),	/* bits 31-25 */
188 	HC_MAIN_RSVD_5		= (0x1fff << 19), /* bits 31-19 */
189 	HC_MAIN_MASKED_IRQS	= (TRAN_LO_DONE | TRAN_HI_DONE |
190 				   PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
191 				   HC_MAIN_RSVD),
192 	HC_MAIN_MASKED_IRQS_5	= (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
193 				   HC_MAIN_RSVD_5),
194 
195 	/* SATAHC registers */
196 	HC_CFG_OFS		= 0,
197 
198 	HC_IRQ_CAUSE_OFS	= 0x14,
199 	CRPB_DMA_DONE		= (1 << 0),	/* shift by port # */
200 	HC_IRQ_COAL		= (1 << 4),	/* IRQ coalescing */
201 	DEV_IRQ			= (1 << 8),	/* shift by port # */
202 
203 	/* Shadow block registers */
204 	SHD_BLK_OFS		= 0x100,
205 	SHD_CTL_AST_OFS		= 0x20,		/* ofs from SHD_BLK_OFS */
206 
207 	/* SATA registers */
208 	SATA_STATUS_OFS		= 0x300,  /* ctrl, err regs follow status */
209 	SATA_ACTIVE_OFS		= 0x350,
210 	PHY_MODE3		= 0x310,
211 	PHY_MODE4		= 0x314,
212 	PHY_MODE2		= 0x330,
213 	MV5_PHY_MODE		= 0x74,
214 	MV5_LT_MODE		= 0x30,
215 	MV5_PHY_CTL		= 0x0C,
216 	SATA_INTERFACE_CTL	= 0x050,
217 
218 	MV_M2_PREAMP_MASK	= 0x7e0,
219 
220 	/* Port registers */
221 	EDMA_CFG_OFS		= 0,
222 	EDMA_CFG_Q_DEPTH	= 0,			/* queueing disabled */
223 	EDMA_CFG_NCQ		= (1 << 5),
224 	EDMA_CFG_NCQ_GO_ON_ERR	= (1 << 14),		/* continue on error */
225 	EDMA_CFG_RD_BRST_EXT	= (1 << 11),		/* read burst 512B */
226 	EDMA_CFG_WR_BUFF_LEN	= (1 << 13),		/* write buffer 512B */
227 
228 	EDMA_ERR_IRQ_CAUSE_OFS	= 0x8,
229 	EDMA_ERR_IRQ_MASK_OFS	= 0xc,
230 	EDMA_ERR_D_PAR		= (1 << 0),
231 	EDMA_ERR_PRD_PAR	= (1 << 1),
232 	EDMA_ERR_DEV		= (1 << 2),
233 	EDMA_ERR_DEV_DCON	= (1 << 3),
234 	EDMA_ERR_DEV_CON	= (1 << 4),
235 	EDMA_ERR_SERR		= (1 << 5),
236 	EDMA_ERR_SELF_DIS	= (1 << 7),	/* Gen II/IIE self-disable */
237 	EDMA_ERR_SELF_DIS_5	= (1 << 8),	/* Gen I self-disable */
238 	EDMA_ERR_BIST_ASYNC	= (1 << 8),
239 	EDMA_ERR_TRANS_IRQ_7	= (1 << 8),	/* Gen IIE transprt layer irq */
240 	EDMA_ERR_CRBQ_PAR	= (1 << 9),
241 	EDMA_ERR_CRPB_PAR	= (1 << 10),
242 	EDMA_ERR_INTRL_PAR	= (1 << 11),
243 	EDMA_ERR_IORDY		= (1 << 12),
244 	EDMA_ERR_LNK_CTRL_RX	= (0xf << 13),
245 	EDMA_ERR_LNK_CTRL_RX_2	= (1 << 15),
246 	EDMA_ERR_LNK_DATA_RX	= (0xf << 17),
247 	EDMA_ERR_LNK_CTRL_TX	= (0x1f << 21),
248 	EDMA_ERR_LNK_DATA_TX	= (0x1f << 26),
249 	EDMA_ERR_TRANS_PROTO	= (1 << 31),
250 	EDMA_ERR_OVERRUN_5	= (1 << 5),
251 	EDMA_ERR_UNDERRUN_5	= (1 << 6),
252 	EDMA_EH_FREEZE		= EDMA_ERR_D_PAR |
253 				  EDMA_ERR_PRD_PAR |
254 				  EDMA_ERR_DEV_DCON |
255 				  EDMA_ERR_DEV_CON |
256 				  EDMA_ERR_SERR |
257 				  EDMA_ERR_SELF_DIS |
258 				  EDMA_ERR_CRBQ_PAR |
259 				  EDMA_ERR_CRPB_PAR |
260 				  EDMA_ERR_INTRL_PAR |
261 				  EDMA_ERR_IORDY |
262 				  EDMA_ERR_LNK_CTRL_RX_2 |
263 				  EDMA_ERR_LNK_DATA_RX |
264 				  EDMA_ERR_LNK_DATA_TX |
265 				  EDMA_ERR_TRANS_PROTO,
266 	EDMA_EH_FREEZE_5	= EDMA_ERR_D_PAR |
267 				  EDMA_ERR_PRD_PAR |
268 				  EDMA_ERR_DEV_DCON |
269 				  EDMA_ERR_DEV_CON |
270 				  EDMA_ERR_OVERRUN_5 |
271 				  EDMA_ERR_UNDERRUN_5 |
272 				  EDMA_ERR_SELF_DIS_5 |
273 				  EDMA_ERR_CRBQ_PAR |
274 				  EDMA_ERR_CRPB_PAR |
275 				  EDMA_ERR_INTRL_PAR |
276 				  EDMA_ERR_IORDY,
277 
278 	EDMA_REQ_Q_BASE_HI_OFS	= 0x10,
279 	EDMA_REQ_Q_IN_PTR_OFS	= 0x14,		/* also contains BASE_LO */
280 
281 	EDMA_REQ_Q_OUT_PTR_OFS	= 0x18,
282 	EDMA_REQ_Q_PTR_SHIFT	= 5,
283 
284 	EDMA_RSP_Q_BASE_HI_OFS	= 0x1c,
285 	EDMA_RSP_Q_IN_PTR_OFS	= 0x20,
286 	EDMA_RSP_Q_OUT_PTR_OFS	= 0x24,		/* also contains BASE_LO */
287 	EDMA_RSP_Q_PTR_SHIFT	= 3,
288 
289 	EDMA_CMD_OFS		= 0x28,
290 	EDMA_EN			= (1 << 0),
291 	EDMA_DS			= (1 << 1),
292 	ATA_RST			= (1 << 2),
293 
294 	EDMA_IORDY_TMOUT	= 0x34,
295 	EDMA_ARB_CFG		= 0x38,
296 
297 	/* Host private flags (hp_flags) */
298 	MV_HP_FLAG_MSI		= (1 << 0),
299 	MV_HP_ERRATA_50XXB0	= (1 << 1),
300 	MV_HP_ERRATA_50XXB2	= (1 << 2),
301 	MV_HP_ERRATA_60X1B2	= (1 << 3),
302 	MV_HP_ERRATA_60X1C0	= (1 << 4),
303 	MV_HP_ERRATA_XX42A0	= (1 << 5),
304 	MV_HP_GEN_I		= (1 << 6),
305 	MV_HP_GEN_II		= (1 << 7),
306 	MV_HP_GEN_IIE		= (1 << 8),
307 
308 	/* Port private flags (pp_flags) */
309 	MV_PP_FLAG_EDMA_EN	= (1 << 0),
310 	MV_PP_FLAG_EDMA_DS_ACT	= (1 << 1),
311 	MV_PP_FLAG_HAD_A_RESET	= (1 << 2),
312 };
313 
314 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
315 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
316 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
317 
318 enum {
319 	MV_DMA_BOUNDARY		= 0xffffffffU,
320 
321 	EDMA_REQ_Q_BASE_LO_MASK	= 0xfffffc00U,
322 
323 	EDMA_RSP_Q_BASE_LO_MASK	= 0xffffff00U,
324 };
325 
326 enum chip_type {
327 	chip_504x,
328 	chip_508x,
329 	chip_5080,
330 	chip_604x,
331 	chip_608x,
332 	chip_6042,
333 	chip_7042,
334 };
335 
336 /* Command ReQuest Block: 32B */
337 struct mv_crqb {
338 	__le32			sg_addr;
339 	__le32			sg_addr_hi;
340 	__le16			ctrl_flags;
341 	__le16			ata_cmd[11];
342 };
343 
344 struct mv_crqb_iie {
345 	__le32			addr;
346 	__le32			addr_hi;
347 	__le32			flags;
348 	__le32			len;
349 	__le32			ata_cmd[4];
350 };
351 
352 /* Command ResPonse Block: 8B */
353 struct mv_crpb {
354 	__le16			id;
355 	__le16			flags;
356 	__le32			tmstmp;
357 };
358 
359 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
360 struct mv_sg {
361 	__le32			addr;
362 	__le32			flags_size;
363 	__le32			addr_hi;
364 	__le32			reserved;
365 };
366 
367 struct mv_port_priv {
368 	struct mv_crqb		*crqb;
369 	dma_addr_t		crqb_dma;
370 	struct mv_crpb		*crpb;
371 	dma_addr_t		crpb_dma;
372 	struct mv_sg		*sg_tbl;
373 	dma_addr_t		sg_tbl_dma;
374 
375 	unsigned int		req_idx;
376 	unsigned int		resp_idx;
377 
378 	u32			pp_flags;
379 };
380 
381 struct mv_port_signal {
382 	u32			amps;
383 	u32			pre;
384 };
385 
386 struct mv_host_priv;
387 struct mv_hw_ops {
388 	void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
389 			   unsigned int port);
390 	void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
391 	void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
392 			   void __iomem *mmio);
393 	int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
394 			unsigned int n_hc);
395 	void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
396 	void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
397 };
398 
399 struct mv_host_priv {
400 	u32			hp_flags;
401 	struct mv_port_signal	signal[8];
402 	const struct mv_hw_ops	*ops;
403 };
404 
405 static void mv_irq_clear(struct ata_port *ap);
406 static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
407 static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
408 static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
409 static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
410 static int mv_port_start(struct ata_port *ap);
411 static void mv_port_stop(struct ata_port *ap);
412 static void mv_qc_prep(struct ata_queued_cmd *qc);
413 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
414 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
415 static void mv_error_handler(struct ata_port *ap);
416 static void mv_post_int_cmd(struct ata_queued_cmd *qc);
417 static void mv_eh_freeze(struct ata_port *ap);
418 static void mv_eh_thaw(struct ata_port *ap);
419 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
420 
421 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
422 			   unsigned int port);
423 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
424 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
425 			   void __iomem *mmio);
426 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
427 			unsigned int n_hc);
428 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
429 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
430 
431 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
432 			   unsigned int port);
433 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
434 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
435 			   void __iomem *mmio);
436 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
437 			unsigned int n_hc);
438 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
439 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
440 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
441 			     unsigned int port_no);
442 
443 static struct scsi_host_template mv5_sht = {
444 	.module			= THIS_MODULE,
445 	.name			= DRV_NAME,
446 	.ioctl			= ata_scsi_ioctl,
447 	.queuecommand		= ata_scsi_queuecmd,
448 	.can_queue		= ATA_DEF_QUEUE,
449 	.this_id		= ATA_SHT_THIS_ID,
450 	.sg_tablesize		= MV_MAX_SG_CT,
451 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
452 	.emulated		= ATA_SHT_EMULATED,
453 	.use_clustering		= 1,
454 	.proc_name		= DRV_NAME,
455 	.dma_boundary		= MV_DMA_BOUNDARY,
456 	.slave_configure	= ata_scsi_slave_config,
457 	.slave_destroy		= ata_scsi_slave_destroy,
458 	.bios_param		= ata_std_bios_param,
459 };
460 
461 static struct scsi_host_template mv6_sht = {
462 	.module			= THIS_MODULE,
463 	.name			= DRV_NAME,
464 	.ioctl			= ata_scsi_ioctl,
465 	.queuecommand		= ata_scsi_queuecmd,
466 	.can_queue		= ATA_DEF_QUEUE,
467 	.this_id		= ATA_SHT_THIS_ID,
468 	.sg_tablesize		= MV_MAX_SG_CT,
469 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
470 	.emulated		= ATA_SHT_EMULATED,
471 	.use_clustering		= 1,
472 	.proc_name		= DRV_NAME,
473 	.dma_boundary		= MV_DMA_BOUNDARY,
474 	.slave_configure	= ata_scsi_slave_config,
475 	.slave_destroy		= ata_scsi_slave_destroy,
476 	.bios_param		= ata_std_bios_param,
477 };
478 
479 static const struct ata_port_operations mv5_ops = {
480 	.port_disable		= ata_port_disable,
481 
482 	.tf_load		= ata_tf_load,
483 	.tf_read		= ata_tf_read,
484 	.check_status		= ata_check_status,
485 	.exec_command		= ata_exec_command,
486 	.dev_select		= ata_std_dev_select,
487 
488 	.cable_detect		= ata_cable_sata,
489 
490 	.qc_prep		= mv_qc_prep,
491 	.qc_issue		= mv_qc_issue,
492 	.data_xfer		= ata_data_xfer,
493 
494 	.irq_clear		= mv_irq_clear,
495 	.irq_on			= ata_irq_on,
496 	.irq_ack		= ata_irq_ack,
497 
498 	.error_handler		= mv_error_handler,
499 	.post_internal_cmd	= mv_post_int_cmd,
500 	.freeze			= mv_eh_freeze,
501 	.thaw			= mv_eh_thaw,
502 
503 	.scr_read		= mv5_scr_read,
504 	.scr_write		= mv5_scr_write,
505 
506 	.port_start		= mv_port_start,
507 	.port_stop		= mv_port_stop,
508 };
509 
510 static const struct ata_port_operations mv6_ops = {
511 	.port_disable		= ata_port_disable,
512 
513 	.tf_load		= ata_tf_load,
514 	.tf_read		= ata_tf_read,
515 	.check_status		= ata_check_status,
516 	.exec_command		= ata_exec_command,
517 	.dev_select		= ata_std_dev_select,
518 
519 	.cable_detect		= ata_cable_sata,
520 
521 	.qc_prep		= mv_qc_prep,
522 	.qc_issue		= mv_qc_issue,
523 	.data_xfer		= ata_data_xfer,
524 
525 	.irq_clear		= mv_irq_clear,
526 	.irq_on			= ata_irq_on,
527 	.irq_ack		= ata_irq_ack,
528 
529 	.error_handler		= mv_error_handler,
530 	.post_internal_cmd	= mv_post_int_cmd,
531 	.freeze			= mv_eh_freeze,
532 	.thaw			= mv_eh_thaw,
533 
534 	.scr_read		= mv_scr_read,
535 	.scr_write		= mv_scr_write,
536 
537 	.port_start		= mv_port_start,
538 	.port_stop		= mv_port_stop,
539 };
540 
541 static const struct ata_port_operations mv_iie_ops = {
542 	.port_disable		= ata_port_disable,
543 
544 	.tf_load		= ata_tf_load,
545 	.tf_read		= ata_tf_read,
546 	.check_status		= ata_check_status,
547 	.exec_command		= ata_exec_command,
548 	.dev_select		= ata_std_dev_select,
549 
550 	.cable_detect		= ata_cable_sata,
551 
552 	.qc_prep		= mv_qc_prep_iie,
553 	.qc_issue		= mv_qc_issue,
554 	.data_xfer		= ata_data_xfer,
555 
556 	.irq_clear		= mv_irq_clear,
557 	.irq_on			= ata_irq_on,
558 	.irq_ack		= ata_irq_ack,
559 
560 	.error_handler		= mv_error_handler,
561 	.post_internal_cmd	= mv_post_int_cmd,
562 	.freeze			= mv_eh_freeze,
563 	.thaw			= mv_eh_thaw,
564 
565 	.scr_read		= mv_scr_read,
566 	.scr_write		= mv_scr_write,
567 
568 	.port_start		= mv_port_start,
569 	.port_stop		= mv_port_stop,
570 };
571 
572 static const struct ata_port_info mv_port_info[] = {
573 	{  /* chip_504x */
574 		.flags		= MV_COMMON_FLAGS,
575 		.pio_mask	= 0x1f,	/* pio0-4 */
576 		.udma_mask	= ATA_UDMA6,
577 		.port_ops	= &mv5_ops,
578 	},
579 	{  /* chip_508x */
580 		.flags		= MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
581 		.pio_mask	= 0x1f,	/* pio0-4 */
582 		.udma_mask	= ATA_UDMA6,
583 		.port_ops	= &mv5_ops,
584 	},
585 	{  /* chip_5080 */
586 		.flags		= MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
587 		.pio_mask	= 0x1f,	/* pio0-4 */
588 		.udma_mask	= ATA_UDMA6,
589 		.port_ops	= &mv5_ops,
590 	},
591 	{  /* chip_604x */
592 		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS,
593 		.pio_mask	= 0x1f,	/* pio0-4 */
594 		.udma_mask	= ATA_UDMA6,
595 		.port_ops	= &mv6_ops,
596 	},
597 	{  /* chip_608x */
598 		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS |
599 				  MV_FLAG_DUAL_HC,
600 		.pio_mask	= 0x1f,	/* pio0-4 */
601 		.udma_mask	= ATA_UDMA6,
602 		.port_ops	= &mv6_ops,
603 	},
604 	{  /* chip_6042 */
605 		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS,
606 		.pio_mask	= 0x1f,	/* pio0-4 */
607 		.udma_mask	= ATA_UDMA6,
608 		.port_ops	= &mv_iie_ops,
609 	},
610 	{  /* chip_7042 */
611 		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS,
612 		.pio_mask	= 0x1f,	/* pio0-4 */
613 		.udma_mask	= ATA_UDMA6,
614 		.port_ops	= &mv_iie_ops,
615 	},
616 };
617 
618 static const struct pci_device_id mv_pci_tbl[] = {
619 	{ PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
620 	{ PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
621 	{ PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
622 	{ PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
623 
624 	{ PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
625 	{ PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
626 	{ PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
627 	{ PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
628 	{ PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
629 
630 	{ PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
631 
632 	/* Adaptec 1430SA */
633 	{ PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
634 
635 	{ PCI_VDEVICE(TTI, 0x2310), chip_7042 },
636 
637 	/* add Marvell 7042 support */
638 	{ PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
639 
640 	{ }			/* terminate list */
641 };
642 
643 static struct pci_driver mv_pci_driver = {
644 	.name			= DRV_NAME,
645 	.id_table		= mv_pci_tbl,
646 	.probe			= mv_init_one,
647 	.remove			= ata_pci_remove_one,
648 };
649 
650 static const struct mv_hw_ops mv5xxx_ops = {
651 	.phy_errata		= mv5_phy_errata,
652 	.enable_leds		= mv5_enable_leds,
653 	.read_preamp		= mv5_read_preamp,
654 	.reset_hc		= mv5_reset_hc,
655 	.reset_flash		= mv5_reset_flash,
656 	.reset_bus		= mv5_reset_bus,
657 };
658 
659 static const struct mv_hw_ops mv6xxx_ops = {
660 	.phy_errata		= mv6_phy_errata,
661 	.enable_leds		= mv6_enable_leds,
662 	.read_preamp		= mv6_read_preamp,
663 	.reset_hc		= mv6_reset_hc,
664 	.reset_flash		= mv6_reset_flash,
665 	.reset_bus		= mv_reset_pci_bus,
666 };
667 
668 /*
669  * module options
670  */
671 static int msi;	      /* Use PCI msi; either zero (off, default) or non-zero */
672 
673 
674 /* move to PCI layer or libata core? */
675 static int pci_go_64(struct pci_dev *pdev)
676 {
677 	int rc;
678 
679 	if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
680 		rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
681 		if (rc) {
682 			rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
683 			if (rc) {
684 				dev_printk(KERN_ERR, &pdev->dev,
685 					   "64-bit DMA enable failed\n");
686 				return rc;
687 			}
688 		}
689 	} else {
690 		rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
691 		if (rc) {
692 			dev_printk(KERN_ERR, &pdev->dev,
693 				   "32-bit DMA enable failed\n");
694 			return rc;
695 		}
696 		rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
697 		if (rc) {
698 			dev_printk(KERN_ERR, &pdev->dev,
699 				   "32-bit consistent DMA enable failed\n");
700 			return rc;
701 		}
702 	}
703 
704 	return rc;
705 }
706 
707 /*
708  * Functions
709  */
710 
711 static inline void writelfl(unsigned long data, void __iomem *addr)
712 {
713 	writel(data, addr);
714 	(void) readl(addr);	/* flush to avoid PCI posted write */
715 }
716 
717 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
718 {
719 	return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
720 }
721 
722 static inline unsigned int mv_hc_from_port(unsigned int port)
723 {
724 	return port >> MV_PORT_HC_SHIFT;
725 }
726 
727 static inline unsigned int mv_hardport_from_port(unsigned int port)
728 {
729 	return port & MV_PORT_MASK;
730 }
731 
732 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
733 						 unsigned int port)
734 {
735 	return mv_hc_base(base, mv_hc_from_port(port));
736 }
737 
738 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
739 {
740 	return  mv_hc_base_from_port(base, port) +
741 		MV_SATAHC_ARBTR_REG_SZ +
742 		(mv_hardport_from_port(port) * MV_PORT_REG_SZ);
743 }
744 
745 static inline void __iomem *mv_ap_base(struct ata_port *ap)
746 {
747 	return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
748 }
749 
750 static inline int mv_get_hc_count(unsigned long port_flags)
751 {
752 	return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
753 }
754 
755 static void mv_irq_clear(struct ata_port *ap)
756 {
757 }
758 
759 static void mv_set_edma_ptrs(void __iomem *port_mmio,
760 			     struct mv_host_priv *hpriv,
761 			     struct mv_port_priv *pp)
762 {
763 	u32 index;
764 
765 	/*
766 	 * initialize request queue
767 	 */
768 	index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
769 
770 	WARN_ON(pp->crqb_dma & 0x3ff);
771 	writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
772 	writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
773 		 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
774 
775 	if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
776 		writelfl((pp->crqb_dma & 0xffffffff) | index,
777 			 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
778 	else
779 		writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
780 
781 	/*
782 	 * initialize response queue
783 	 */
784 	index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
785 
786 	WARN_ON(pp->crpb_dma & 0xff);
787 	writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
788 
789 	if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
790 		writelfl((pp->crpb_dma & 0xffffffff) | index,
791 			 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
792 	else
793 		writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
794 
795 	writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
796 		 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
797 }
798 
799 /**
800  *      mv_start_dma - Enable eDMA engine
801  *      @base: port base address
802  *      @pp: port private data
803  *
804  *      Verify the local cache of the eDMA state is accurate with a
805  *      WARN_ON.
806  *
807  *      LOCKING:
808  *      Inherited from caller.
809  */
810 static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
811 			 struct mv_port_priv *pp)
812 {
813 	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
814 		/* clear EDMA event indicators, if any */
815 		writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS);
816 
817 		mv_set_edma_ptrs(base, hpriv, pp);
818 
819 		writelfl(EDMA_EN, base + EDMA_CMD_OFS);
820 		pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
821 	}
822 	WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
823 }
824 
825 /**
826  *      mv_stop_dma - Disable eDMA engine
827  *      @ap: ATA channel to manipulate
828  *
829  *      Verify the local cache of the eDMA state is accurate with a
830  *      WARN_ON.
831  *
832  *      LOCKING:
833  *      Inherited from caller.
834  */
835 static int mv_stop_dma(struct ata_port *ap)
836 {
837 	void __iomem *port_mmio = mv_ap_base(ap);
838 	struct mv_port_priv *pp	= ap->private_data;
839 	u32 reg;
840 	int i, err = 0;
841 
842 	if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
843 		/* Disable EDMA if active.   The disable bit auto clears.
844 		 */
845 		writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
846 		pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
847 	} else {
848 		WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
849   	}
850 
851 	/* now properly wait for the eDMA to stop */
852 	for (i = 1000; i > 0; i--) {
853 		reg = readl(port_mmio + EDMA_CMD_OFS);
854 		if (!(reg & EDMA_EN))
855 			break;
856 
857 		udelay(100);
858 	}
859 
860 	if (reg & EDMA_EN) {
861 		ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
862 		err = -EIO;
863 	}
864 
865 	return err;
866 }
867 
868 #ifdef ATA_DEBUG
869 static void mv_dump_mem(void __iomem *start, unsigned bytes)
870 {
871 	int b, w;
872 	for (b = 0; b < bytes; ) {
873 		DPRINTK("%p: ", start + b);
874 		for (w = 0; b < bytes && w < 4; w++) {
875 			printk("%08x ",readl(start + b));
876 			b += sizeof(u32);
877 		}
878 		printk("\n");
879 	}
880 }
881 #endif
882 
883 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
884 {
885 #ifdef ATA_DEBUG
886 	int b, w;
887 	u32 dw;
888 	for (b = 0; b < bytes; ) {
889 		DPRINTK("%02x: ", b);
890 		for (w = 0; b < bytes && w < 4; w++) {
891 			(void) pci_read_config_dword(pdev,b,&dw);
892 			printk("%08x ",dw);
893 			b += sizeof(u32);
894 		}
895 		printk("\n");
896 	}
897 #endif
898 }
899 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
900 			     struct pci_dev *pdev)
901 {
902 #ifdef ATA_DEBUG
903 	void __iomem *hc_base = mv_hc_base(mmio_base,
904 					   port >> MV_PORT_HC_SHIFT);
905 	void __iomem *port_base;
906 	int start_port, num_ports, p, start_hc, num_hcs, hc;
907 
908 	if (0 > port) {
909 		start_hc = start_port = 0;
910 		num_ports = 8;		/* shld be benign for 4 port devs */
911 		num_hcs = 2;
912 	} else {
913 		start_hc = port >> MV_PORT_HC_SHIFT;
914 		start_port = port;
915 		num_ports = num_hcs = 1;
916 	}
917 	DPRINTK("All registers for port(s) %u-%u:\n", start_port,
918 		num_ports > 1 ? num_ports - 1 : start_port);
919 
920 	if (NULL != pdev) {
921 		DPRINTK("PCI config space regs:\n");
922 		mv_dump_pci_cfg(pdev, 0x68);
923 	}
924 	DPRINTK("PCI regs:\n");
925 	mv_dump_mem(mmio_base+0xc00, 0x3c);
926 	mv_dump_mem(mmio_base+0xd00, 0x34);
927 	mv_dump_mem(mmio_base+0xf00, 0x4);
928 	mv_dump_mem(mmio_base+0x1d00, 0x6c);
929 	for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
930 		hc_base = mv_hc_base(mmio_base, hc);
931 		DPRINTK("HC regs (HC %i):\n", hc);
932 		mv_dump_mem(hc_base, 0x1c);
933 	}
934 	for (p = start_port; p < start_port + num_ports; p++) {
935 		port_base = mv_port_base(mmio_base, p);
936 		DPRINTK("EDMA regs (port %i):\n",p);
937 		mv_dump_mem(port_base, 0x54);
938 		DPRINTK("SATA regs (port %i):\n",p);
939 		mv_dump_mem(port_base+0x300, 0x60);
940 	}
941 #endif
942 }
943 
944 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
945 {
946 	unsigned int ofs;
947 
948 	switch (sc_reg_in) {
949 	case SCR_STATUS:
950 	case SCR_CONTROL:
951 	case SCR_ERROR:
952 		ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
953 		break;
954 	case SCR_ACTIVE:
955 		ofs = SATA_ACTIVE_OFS;   /* active is not with the others */
956 		break;
957 	default:
958 		ofs = 0xffffffffU;
959 		break;
960 	}
961 	return ofs;
962 }
963 
964 static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
965 {
966 	unsigned int ofs = mv_scr_offset(sc_reg_in);
967 
968 	if (0xffffffffU != ofs)
969 		return readl(mv_ap_base(ap) + ofs);
970 	else
971 		return (u32) ofs;
972 }
973 
974 static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
975 {
976 	unsigned int ofs = mv_scr_offset(sc_reg_in);
977 
978 	if (0xffffffffU != ofs)
979 		writelfl(val, mv_ap_base(ap) + ofs);
980 }
981 
982 static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
983 			void __iomem *port_mmio)
984 {
985 	u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
986 
987 	/* set up non-NCQ EDMA configuration */
988 	cfg &= ~(1 << 9);	/* disable eQue */
989 
990 	if (IS_GEN_I(hpriv)) {
991 		cfg &= ~0x1f;		/* clear queue depth */
992 		cfg |= (1 << 8);	/* enab config burst size mask */
993 	}
994 
995 	else if (IS_GEN_II(hpriv)) {
996 		cfg &= ~0x1f;		/* clear queue depth */
997 		cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
998 		cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
999 	}
1000 
1001 	else if (IS_GEN_IIE(hpriv)) {
1002 		cfg |= (1 << 23);	/* do not mask PM field in rx'd FIS */
1003 		cfg |= (1 << 22);	/* enab 4-entry host queue cache */
1004 		cfg &= ~(1 << 19);	/* dis 128-entry queue (for now?) */
1005 		cfg |= (1 << 18);	/* enab early completion */
1006 		cfg |= (1 << 17);	/* enab cut-through (dis stor&forwrd) */
1007 		cfg &= ~(1 << 16);	/* dis FIS-based switching (for now) */
1008 		cfg &= ~(EDMA_CFG_NCQ);	/* clear NCQ */
1009 	}
1010 
1011 	writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1012 }
1013 
1014 /**
1015  *      mv_port_start - Port specific init/start routine.
1016  *      @ap: ATA channel to manipulate
1017  *
1018  *      Allocate and point to DMA memory, init port private memory,
1019  *      zero indices.
1020  *
1021  *      LOCKING:
1022  *      Inherited from caller.
1023  */
1024 static int mv_port_start(struct ata_port *ap)
1025 {
1026 	struct device *dev = ap->host->dev;
1027 	struct mv_host_priv *hpriv = ap->host->private_data;
1028 	struct mv_port_priv *pp;
1029 	void __iomem *port_mmio = mv_ap_base(ap);
1030 	void *mem;
1031 	dma_addr_t mem_dma;
1032 	int rc;
1033 
1034 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1035 	if (!pp)
1036 		return -ENOMEM;
1037 
1038 	mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1039 				  GFP_KERNEL);
1040 	if (!mem)
1041 		return -ENOMEM;
1042 	memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1043 
1044 	rc = ata_pad_alloc(ap, dev);
1045 	if (rc)
1046 		return rc;
1047 
1048 	/* First item in chunk of DMA memory:
1049 	 * 32-slot command request table (CRQB), 32 bytes each in size
1050 	 */
1051 	pp->crqb = mem;
1052 	pp->crqb_dma = mem_dma;
1053 	mem += MV_CRQB_Q_SZ;
1054 	mem_dma += MV_CRQB_Q_SZ;
1055 
1056 	/* Second item:
1057 	 * 32-slot command response table (CRPB), 8 bytes each in size
1058 	 */
1059 	pp->crpb = mem;
1060 	pp->crpb_dma = mem_dma;
1061 	mem += MV_CRPB_Q_SZ;
1062 	mem_dma += MV_CRPB_Q_SZ;
1063 
1064 	/* Third item:
1065 	 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1066 	 */
1067 	pp->sg_tbl = mem;
1068 	pp->sg_tbl_dma = mem_dma;
1069 
1070 	mv_edma_cfg(ap, hpriv, port_mmio);
1071 
1072 	mv_set_edma_ptrs(port_mmio, hpriv, pp);
1073 
1074 	/* Don't turn on EDMA here...do it before DMA commands only.  Else
1075 	 * we'll be unable to send non-data, PIO, etc due to restricted access
1076 	 * to shadow regs.
1077 	 */
1078 	ap->private_data = pp;
1079 	return 0;
1080 }
1081 
1082 /**
1083  *      mv_port_stop - Port specific cleanup/stop routine.
1084  *      @ap: ATA channel to manipulate
1085  *
1086  *      Stop DMA, cleanup port memory.
1087  *
1088  *      LOCKING:
1089  *      This routine uses the host lock to protect the DMA stop.
1090  */
1091 static void mv_port_stop(struct ata_port *ap)
1092 {
1093 	unsigned long flags;
1094 
1095 	spin_lock_irqsave(&ap->host->lock, flags);
1096 	mv_stop_dma(ap);
1097 	spin_unlock_irqrestore(&ap->host->lock, flags);
1098 }
1099 
1100 /**
1101  *      mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1102  *      @qc: queued command whose SG list to source from
1103  *
1104  *      Populate the SG list and mark the last entry.
1105  *
1106  *      LOCKING:
1107  *      Inherited from caller.
1108  */
1109 static unsigned int mv_fill_sg(struct ata_queued_cmd *qc)
1110 {
1111 	struct mv_port_priv *pp = qc->ap->private_data;
1112 	unsigned int n_sg = 0;
1113 	struct scatterlist *sg;
1114 	struct mv_sg *mv_sg;
1115 
1116 	mv_sg = pp->sg_tbl;
1117 	ata_for_each_sg(sg, qc) {
1118 		dma_addr_t addr = sg_dma_address(sg);
1119 		u32 sg_len = sg_dma_len(sg);
1120 
1121 		mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1122 		mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1123 		mv_sg->flags_size = cpu_to_le32(sg_len & 0xffff);
1124 
1125 		if (ata_sg_is_last(sg, qc))
1126 			mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1127 
1128 		mv_sg++;
1129 		n_sg++;
1130 	}
1131 
1132 	return n_sg;
1133 }
1134 
1135 static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1136 {
1137 	u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1138 		(last ? CRQB_CMD_LAST : 0);
1139 	*cmdw = cpu_to_le16(tmp);
1140 }
1141 
1142 /**
1143  *      mv_qc_prep - Host specific command preparation.
1144  *      @qc: queued command to prepare
1145  *
1146  *      This routine simply redirects to the general purpose routine
1147  *      if command is not DMA.  Else, it handles prep of the CRQB
1148  *      (command request block), does some sanity checking, and calls
1149  *      the SG load routine.
1150  *
1151  *      LOCKING:
1152  *      Inherited from caller.
1153  */
1154 static void mv_qc_prep(struct ata_queued_cmd *qc)
1155 {
1156 	struct ata_port *ap = qc->ap;
1157 	struct mv_port_priv *pp = ap->private_data;
1158 	__le16 *cw;
1159 	struct ata_taskfile *tf;
1160 	u16 flags = 0;
1161 	unsigned in_index;
1162 
1163  	if (qc->tf.protocol != ATA_PROT_DMA)
1164 		return;
1165 
1166 	/* Fill in command request block
1167 	 */
1168 	if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1169 		flags |= CRQB_FLAG_READ;
1170 	WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1171 	flags |= qc->tag << CRQB_TAG_SHIFT;
1172 	flags |= qc->tag << CRQB_IOID_SHIFT;	/* 50xx appears to ignore this*/
1173 
1174 	/* get current queue index from software */
1175 	in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1176 
1177 	pp->crqb[in_index].sg_addr =
1178 		cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1179 	pp->crqb[in_index].sg_addr_hi =
1180 		cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1181 	pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1182 
1183 	cw = &pp->crqb[in_index].ata_cmd[0];
1184 	tf = &qc->tf;
1185 
1186 	/* Sadly, the CRQB cannot accomodate all registers--there are
1187 	 * only 11 bytes...so we must pick and choose required
1188 	 * registers based on the command.  So, we drop feature and
1189 	 * hob_feature for [RW] DMA commands, but they are needed for
1190 	 * NCQ.  NCQ will drop hob_nsect.
1191 	 */
1192 	switch (tf->command) {
1193 	case ATA_CMD_READ:
1194 	case ATA_CMD_READ_EXT:
1195 	case ATA_CMD_WRITE:
1196 	case ATA_CMD_WRITE_EXT:
1197 	case ATA_CMD_WRITE_FUA_EXT:
1198 		mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1199 		break;
1200 #ifdef LIBATA_NCQ		/* FIXME: remove this line when NCQ added */
1201 	case ATA_CMD_FPDMA_READ:
1202 	case ATA_CMD_FPDMA_WRITE:
1203 		mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1204 		mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1205 		break;
1206 #endif				/* FIXME: remove this line when NCQ added */
1207 	default:
1208 		/* The only other commands EDMA supports in non-queued and
1209 		 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1210 		 * of which are defined/used by Linux.  If we get here, this
1211 		 * driver needs work.
1212 		 *
1213 		 * FIXME: modify libata to give qc_prep a return value and
1214 		 * return error here.
1215 		 */
1216 		BUG_ON(tf->command);
1217 		break;
1218 	}
1219 	mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1220 	mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1221 	mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1222 	mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1223 	mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1224 	mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1225 	mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1226 	mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1227 	mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1);	/* last */
1228 
1229 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1230 		return;
1231 	mv_fill_sg(qc);
1232 }
1233 
1234 /**
1235  *      mv_qc_prep_iie - Host specific command preparation.
1236  *      @qc: queued command to prepare
1237  *
1238  *      This routine simply redirects to the general purpose routine
1239  *      if command is not DMA.  Else, it handles prep of the CRQB
1240  *      (command request block), does some sanity checking, and calls
1241  *      the SG load routine.
1242  *
1243  *      LOCKING:
1244  *      Inherited from caller.
1245  */
1246 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1247 {
1248 	struct ata_port *ap = qc->ap;
1249 	struct mv_port_priv *pp = ap->private_data;
1250 	struct mv_crqb_iie *crqb;
1251 	struct ata_taskfile *tf;
1252 	unsigned in_index;
1253 	u32 flags = 0;
1254 
1255  	if (qc->tf.protocol != ATA_PROT_DMA)
1256 		return;
1257 
1258 	/* Fill in Gen IIE command request block
1259 	 */
1260 	if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1261 		flags |= CRQB_FLAG_READ;
1262 
1263 	WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1264 	flags |= qc->tag << CRQB_TAG_SHIFT;
1265 	flags |= qc->tag << CRQB_IOID_SHIFT;	/* "I/O Id" is -really-
1266 						   what we use as our tag */
1267 
1268 	/* get current queue index from software */
1269 	in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1270 
1271 	crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1272 	crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1273 	crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1274 	crqb->flags = cpu_to_le32(flags);
1275 
1276 	tf = &qc->tf;
1277 	crqb->ata_cmd[0] = cpu_to_le32(
1278 			(tf->command << 16) |
1279 			(tf->feature << 24)
1280 		);
1281 	crqb->ata_cmd[1] = cpu_to_le32(
1282 			(tf->lbal << 0) |
1283 			(tf->lbam << 8) |
1284 			(tf->lbah << 16) |
1285 			(tf->device << 24)
1286 		);
1287 	crqb->ata_cmd[2] = cpu_to_le32(
1288 			(tf->hob_lbal << 0) |
1289 			(tf->hob_lbam << 8) |
1290 			(tf->hob_lbah << 16) |
1291 			(tf->hob_feature << 24)
1292 		);
1293 	crqb->ata_cmd[3] = cpu_to_le32(
1294 			(tf->nsect << 0) |
1295 			(tf->hob_nsect << 8)
1296 		);
1297 
1298 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1299 		return;
1300 	mv_fill_sg(qc);
1301 }
1302 
1303 /**
1304  *      mv_qc_issue - Initiate a command to the host
1305  *      @qc: queued command to start
1306  *
1307  *      This routine simply redirects to the general purpose routine
1308  *      if command is not DMA.  Else, it sanity checks our local
1309  *      caches of the request producer/consumer indices then enables
1310  *      DMA and bumps the request producer index.
1311  *
1312  *      LOCKING:
1313  *      Inherited from caller.
1314  */
1315 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1316 {
1317 	struct ata_port *ap = qc->ap;
1318 	void __iomem *port_mmio = mv_ap_base(ap);
1319 	struct mv_port_priv *pp = ap->private_data;
1320 	struct mv_host_priv *hpriv = ap->host->private_data;
1321 	u32 in_index;
1322 
1323 	if (qc->tf.protocol != ATA_PROT_DMA) {
1324 		/* We're about to send a non-EDMA capable command to the
1325 		 * port.  Turn off EDMA so there won't be problems accessing
1326 		 * shadow block, etc registers.
1327 		 */
1328 		mv_stop_dma(ap);
1329 		return ata_qc_issue_prot(qc);
1330 	}
1331 
1332 	mv_start_dma(port_mmio, hpriv, pp);
1333 
1334 	in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1335 
1336 	/* until we do queuing, the queue should be empty at this point */
1337 	WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1338 		>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1339 
1340 	pp->req_idx++;
1341 
1342 	in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1343 
1344 	/* and write the request in pointer to kick the EDMA to life */
1345 	writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1346 		 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1347 
1348 	return 0;
1349 }
1350 
1351 /**
1352  *      mv_err_intr - Handle error interrupts on the port
1353  *      @ap: ATA channel to manipulate
1354  *      @reset_allowed: bool: 0 == don't trigger from reset here
1355  *
1356  *      In most cases, just clear the interrupt and move on.  However,
1357  *      some cases require an eDMA reset, which is done right before
1358  *      the COMRESET in mv_phy_reset().  The SERR case requires a
1359  *      clear of pending errors in the SATA SERROR register.  Finally,
1360  *      if the port disabled DMA, update our cached copy to match.
1361  *
1362  *      LOCKING:
1363  *      Inherited from caller.
1364  */
1365 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1366 {
1367 	void __iomem *port_mmio = mv_ap_base(ap);
1368 	u32 edma_err_cause, eh_freeze_mask, serr = 0;
1369 	struct mv_port_priv *pp = ap->private_data;
1370 	struct mv_host_priv *hpriv = ap->host->private_data;
1371 	unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1372 	unsigned int action = 0, err_mask = 0;
1373 	struct ata_eh_info *ehi = &ap->eh_info;
1374 
1375 	ata_ehi_clear_desc(ehi);
1376 
1377 	if (!edma_enabled) {
1378 		/* just a guess: do we need to do this? should we
1379 		 * expand this, and do it in all cases?
1380 		 */
1381 		sata_scr_read(ap, SCR_ERROR, &serr);
1382 		sata_scr_write_flush(ap, SCR_ERROR, serr);
1383 	}
1384 
1385 	edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1386 
1387 	ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1388 
1389 	/*
1390 	 * all generations share these EDMA error cause bits
1391 	 */
1392 
1393 	if (edma_err_cause & EDMA_ERR_DEV)
1394 		err_mask |= AC_ERR_DEV;
1395 	if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1396 			EDMA_ERR_CRBQ_PAR | EDMA_ERR_CRPB_PAR |
1397 			EDMA_ERR_INTRL_PAR)) {
1398 		err_mask |= AC_ERR_ATA_BUS;
1399 		action |= ATA_EH_HARDRESET;
1400 		ata_ehi_push_desc(ehi, ", parity error");
1401 	}
1402 	if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1403 		ata_ehi_hotplugged(ehi);
1404 		ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1405 			", dev disconnect" : ", dev connect");
1406 	}
1407 
1408 	if (IS_GEN_I(hpriv)) {
1409 		eh_freeze_mask = EDMA_EH_FREEZE_5;
1410 
1411 		if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1412 			struct mv_port_priv *pp	= ap->private_data;
1413 			pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1414 			ata_ehi_push_desc(ehi, ", EDMA self-disable");
1415 		}
1416 	} else {
1417 		eh_freeze_mask = EDMA_EH_FREEZE;
1418 
1419 		if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1420 			struct mv_port_priv *pp	= ap->private_data;
1421 			pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1422 			ata_ehi_push_desc(ehi, ", EDMA self-disable");
1423 		}
1424 
1425 		if (edma_err_cause & EDMA_ERR_SERR) {
1426 			sata_scr_read(ap, SCR_ERROR, &serr);
1427 			sata_scr_write_flush(ap, SCR_ERROR, serr);
1428 			err_mask = AC_ERR_ATA_BUS;
1429 			action |= ATA_EH_HARDRESET;
1430 		}
1431 	}
1432 
1433 	/* Clear EDMA now that SERR cleanup done */
1434 	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1435 
1436 	if (!err_mask) {
1437 		err_mask = AC_ERR_OTHER;
1438 		action |= ATA_EH_HARDRESET;
1439 	}
1440 
1441 	ehi->serror |= serr;
1442 	ehi->action |= action;
1443 
1444 	if (qc)
1445 		qc->err_mask |= err_mask;
1446 	else
1447 		ehi->err_mask |= err_mask;
1448 
1449 	if (edma_err_cause & eh_freeze_mask)
1450 		ata_port_freeze(ap);
1451 	else
1452 		ata_port_abort(ap);
1453 }
1454 
1455 static void mv_intr_pio(struct ata_port *ap)
1456 {
1457 	struct ata_queued_cmd *qc;
1458 	u8 ata_status;
1459 
1460 	/* ignore spurious intr if drive still BUSY */
1461 	ata_status = readb(ap->ioaddr.status_addr);
1462 	if (unlikely(ata_status & ATA_BUSY))
1463 		return;
1464 
1465 	/* get active ATA command */
1466 	qc = ata_qc_from_tag(ap, ap->active_tag);
1467 	if (unlikely(!qc))			/* no active tag */
1468 		return;
1469 	if (qc->tf.flags & ATA_TFLAG_POLLING)	/* polling; we don't own qc */
1470 		return;
1471 
1472 	/* and finally, complete the ATA command */
1473 	qc->err_mask |= ac_err_mask(ata_status);
1474 	ata_qc_complete(qc);
1475 }
1476 
1477 static void mv_intr_edma(struct ata_port *ap)
1478 {
1479 	void __iomem *port_mmio = mv_ap_base(ap);
1480 	struct mv_host_priv *hpriv = ap->host->private_data;
1481 	struct mv_port_priv *pp = ap->private_data;
1482 	struct ata_queued_cmd *qc;
1483 	u32 out_index, in_index;
1484 	bool work_done = false;
1485 
1486 	/* get h/w response queue pointer */
1487 	in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1488 			>> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1489 
1490 	while (1) {
1491 		u16 status;
1492 
1493 		/* get s/w response queue last-read pointer, and compare */
1494 		out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1495 		if (in_index == out_index)
1496 			break;
1497 
1498 
1499 		/* 50xx: get active ATA command */
1500 		if (IS_GEN_I(hpriv))
1501 			qc = ata_qc_from_tag(ap, ap->active_tag);
1502 
1503 		/* 60xx: get active ATA command via tag, to enable support
1504 		 * for queueing.  this works transparently for queued and
1505 		 * non-queued modes.
1506 		 */
1507 		else {
1508 			unsigned int tag;
1509 
1510 			if (IS_GEN_II(hpriv))
1511 				tag = (le16_to_cpu(pp->crpb[out_index].id)
1512 					>> CRPB_IOID_SHIFT_6) & 0x3f;
1513 			else
1514 				tag = (le16_to_cpu(pp->crpb[out_index].id)
1515 					>> CRPB_IOID_SHIFT_7) & 0x3f;
1516 
1517 			qc = ata_qc_from_tag(ap, tag);
1518 		}
1519 
1520 		/* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1521 		 * bits (WARNING: might not necessarily be associated
1522 		 * with this command), which -should- be clear
1523 		 * if all is well
1524 		 */
1525 		status = le16_to_cpu(pp->crpb[out_index].flags);
1526 		if (unlikely(status & 0xff)) {
1527 			mv_err_intr(ap, qc);
1528 			return;
1529 		}
1530 
1531 		/* and finally, complete the ATA command */
1532 		if (qc) {
1533 			qc->err_mask |=
1534 				ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1535 			ata_qc_complete(qc);
1536 		}
1537 
1538 		/* advance software response queue pointer, to
1539 		 * indicate (after the loop completes) to hardware
1540 		 * that we have consumed a response queue entry.
1541 		 */
1542 		work_done = true;
1543 		pp->resp_idx++;
1544 	}
1545 
1546 	if (work_done)
1547 		writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1548 			 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1549 			 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1550 }
1551 
1552 /**
1553  *      mv_host_intr - Handle all interrupts on the given host controller
1554  *      @host: host specific structure
1555  *      @relevant: port error bits relevant to this host controller
1556  *      @hc: which host controller we're to look at
1557  *
1558  *      Read then write clear the HC interrupt status then walk each
1559  *      port connected to the HC and see if it needs servicing.  Port
1560  *      success ints are reported in the HC interrupt status reg, the
1561  *      port error ints are reported in the higher level main
1562  *      interrupt status register and thus are passed in via the
1563  *      'relevant' argument.
1564  *
1565  *      LOCKING:
1566  *      Inherited from caller.
1567  */
1568 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1569 {
1570 	void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1571 	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1572 	u32 hc_irq_cause;
1573 	int port, port0;
1574 
1575 	if (hc == 0)
1576 		port0 = 0;
1577 	else
1578 		port0 = MV_PORTS_PER_HC;
1579 
1580 	/* we'll need the HC success int register in most cases */
1581 	hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1582 	if (!hc_irq_cause)
1583 		return;
1584 
1585 	writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1586 
1587 	VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1588 		hc,relevant,hc_irq_cause);
1589 
1590 	for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1591 		struct ata_port *ap = host->ports[port];
1592 		struct mv_port_priv *pp = ap->private_data;
1593 		int have_err_bits, hard_port, shift;
1594 
1595 		if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1596 			continue;
1597 
1598 		shift = port << 1;		/* (port * 2) */
1599 		if (port >= MV_PORTS_PER_HC) {
1600 			shift++;	/* skip bit 8 in the HC Main IRQ reg */
1601 		}
1602 		have_err_bits = ((PORT0_ERR << shift) & relevant);
1603 
1604 		if (unlikely(have_err_bits)) {
1605 			struct ata_queued_cmd *qc;
1606 
1607 			qc = ata_qc_from_tag(ap, ap->active_tag);
1608 			if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1609 				continue;
1610 
1611 			mv_err_intr(ap, qc);
1612 			continue;
1613 		}
1614 
1615 		hard_port = mv_hardport_from_port(port); /* range 0..3 */
1616 
1617 		if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1618 			if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1619 				mv_intr_edma(ap);
1620 		} else {
1621 			if ((DEV_IRQ << hard_port) & hc_irq_cause)
1622 				mv_intr_pio(ap);
1623 		}
1624 	}
1625 	VPRINTK("EXIT\n");
1626 }
1627 
1628 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1629 {
1630 	struct ata_port *ap;
1631 	struct ata_queued_cmd *qc;
1632 	struct ata_eh_info *ehi;
1633 	unsigned int i, err_mask, printed = 0;
1634 	u32 err_cause;
1635 
1636 	err_cause = readl(mmio + PCI_IRQ_CAUSE_OFS);
1637 
1638 	dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1639 		   err_cause);
1640 
1641 	DPRINTK("All regs @ PCI error\n");
1642 	mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1643 
1644 	writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1645 
1646 	for (i = 0; i < host->n_ports; i++) {
1647 		ap = host->ports[i];
1648 		if (!ata_port_offline(ap)) {
1649 			ehi = &ap->eh_info;
1650 			ata_ehi_clear_desc(ehi);
1651 			if (!printed++)
1652 				ata_ehi_push_desc(ehi,
1653 					"PCI err cause 0x%08x", err_cause);
1654 			err_mask = AC_ERR_HOST_BUS;
1655 			ehi->action = ATA_EH_HARDRESET;
1656 			qc = ata_qc_from_tag(ap, ap->active_tag);
1657 			if (qc)
1658 				qc->err_mask |= err_mask;
1659 			else
1660 				ehi->err_mask |= err_mask;
1661 
1662 			ata_port_freeze(ap);
1663 		}
1664 	}
1665 }
1666 
1667 /**
1668  *      mv_interrupt - Main interrupt event handler
1669  *      @irq: unused
1670  *      @dev_instance: private data; in this case the host structure
1671  *
1672  *      Read the read only register to determine if any host
1673  *      controllers have pending interrupts.  If so, call lower level
1674  *      routine to handle.  Also check for PCI errors which are only
1675  *      reported here.
1676  *
1677  *      LOCKING:
1678  *      This routine holds the host lock while processing pending
1679  *      interrupts.
1680  */
1681 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1682 {
1683 	struct ata_host *host = dev_instance;
1684 	unsigned int hc, handled = 0, n_hcs;
1685 	void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1686 	u32 irq_stat;
1687 
1688 	irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1689 
1690 	/* check the cases where we either have nothing pending or have read
1691 	 * a bogus register value which can indicate HW removal or PCI fault
1692 	 */
1693 	if (!irq_stat || (0xffffffffU == irq_stat))
1694 		return IRQ_NONE;
1695 
1696 	n_hcs = mv_get_hc_count(host->ports[0]->flags);
1697 	spin_lock(&host->lock);
1698 
1699 	if (unlikely(irq_stat & PCI_ERR)) {
1700 		mv_pci_error(host, mmio);
1701 		handled = 1;
1702 		goto out_unlock;	/* skip all other HC irq handling */
1703 	}
1704 
1705 	for (hc = 0; hc < n_hcs; hc++) {
1706 		u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1707 		if (relevant) {
1708 			mv_host_intr(host, relevant, hc);
1709 			handled = 1;
1710 		}
1711 	}
1712 
1713 out_unlock:
1714 	spin_unlock(&host->lock);
1715 
1716 	return IRQ_RETVAL(handled);
1717 }
1718 
1719 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1720 {
1721 	void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1722 	unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1723 
1724 	return hc_mmio + ofs;
1725 }
1726 
1727 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1728 {
1729 	unsigned int ofs;
1730 
1731 	switch (sc_reg_in) {
1732 	case SCR_STATUS:
1733 	case SCR_ERROR:
1734 	case SCR_CONTROL:
1735 		ofs = sc_reg_in * sizeof(u32);
1736 		break;
1737 	default:
1738 		ofs = 0xffffffffU;
1739 		break;
1740 	}
1741 	return ofs;
1742 }
1743 
1744 static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
1745 {
1746 	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1747 	void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1748 	unsigned int ofs = mv5_scr_offset(sc_reg_in);
1749 
1750 	if (ofs != 0xffffffffU)
1751 		return readl(addr + ofs);
1752 	else
1753 		return (u32) ofs;
1754 }
1755 
1756 static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1757 {
1758 	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1759 	void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1760 	unsigned int ofs = mv5_scr_offset(sc_reg_in);
1761 
1762 	if (ofs != 0xffffffffU)
1763 		writelfl(val, addr + ofs);
1764 }
1765 
1766 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1767 {
1768 	int early_5080;
1769 
1770 	early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1771 
1772 	if (!early_5080) {
1773 		u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1774 		tmp |= (1 << 0);
1775 		writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1776 	}
1777 
1778 	mv_reset_pci_bus(pdev, mmio);
1779 }
1780 
1781 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1782 {
1783 	writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1784 }
1785 
1786 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1787 			   void __iomem *mmio)
1788 {
1789 	void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1790 	u32 tmp;
1791 
1792 	tmp = readl(phy_mmio + MV5_PHY_MODE);
1793 
1794 	hpriv->signal[idx].pre = tmp & 0x1800;	/* bits 12:11 */
1795 	hpriv->signal[idx].amps = tmp & 0xe0;	/* bits 7:5 */
1796 }
1797 
1798 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1799 {
1800 	u32 tmp;
1801 
1802 	writel(0, mmio + MV_GPIO_PORT_CTL);
1803 
1804 	/* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1805 
1806 	tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1807 	tmp |= ~(1 << 0);
1808 	writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1809 }
1810 
1811 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1812 			   unsigned int port)
1813 {
1814 	void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1815 	const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1816 	u32 tmp;
1817 	int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1818 
1819 	if (fix_apm_sq) {
1820 		tmp = readl(phy_mmio + MV5_LT_MODE);
1821 		tmp |= (1 << 19);
1822 		writel(tmp, phy_mmio + MV5_LT_MODE);
1823 
1824 		tmp = readl(phy_mmio + MV5_PHY_CTL);
1825 		tmp &= ~0x3;
1826 		tmp |= 0x1;
1827 		writel(tmp, phy_mmio + MV5_PHY_CTL);
1828 	}
1829 
1830 	tmp = readl(phy_mmio + MV5_PHY_MODE);
1831 	tmp &= ~mask;
1832 	tmp |= hpriv->signal[port].pre;
1833 	tmp |= hpriv->signal[port].amps;
1834 	writel(tmp, phy_mmio + MV5_PHY_MODE);
1835 }
1836 
1837 
1838 #undef ZERO
1839 #define ZERO(reg) writel(0, port_mmio + (reg))
1840 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1841 			     unsigned int port)
1842 {
1843 	void __iomem *port_mmio = mv_port_base(mmio, port);
1844 
1845 	writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1846 
1847 	mv_channel_reset(hpriv, mmio, port);
1848 
1849 	ZERO(0x028);	/* command */
1850 	writel(0x11f, port_mmio + EDMA_CFG_OFS);
1851 	ZERO(0x004);	/* timer */
1852 	ZERO(0x008);	/* irq err cause */
1853 	ZERO(0x00c);	/* irq err mask */
1854 	ZERO(0x010);	/* rq bah */
1855 	ZERO(0x014);	/* rq inp */
1856 	ZERO(0x018);	/* rq outp */
1857 	ZERO(0x01c);	/* respq bah */
1858 	ZERO(0x024);	/* respq outp */
1859 	ZERO(0x020);	/* respq inp */
1860 	ZERO(0x02c);	/* test control */
1861 	writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1862 }
1863 #undef ZERO
1864 
1865 #define ZERO(reg) writel(0, hc_mmio + (reg))
1866 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1867 			unsigned int hc)
1868 {
1869 	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1870 	u32 tmp;
1871 
1872 	ZERO(0x00c);
1873 	ZERO(0x010);
1874 	ZERO(0x014);
1875 	ZERO(0x018);
1876 
1877 	tmp = readl(hc_mmio + 0x20);
1878 	tmp &= 0x1c1c1c1c;
1879 	tmp |= 0x03030303;
1880 	writel(tmp, hc_mmio + 0x20);
1881 }
1882 #undef ZERO
1883 
1884 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1885 			unsigned int n_hc)
1886 {
1887 	unsigned int hc, port;
1888 
1889 	for (hc = 0; hc < n_hc; hc++) {
1890 		for (port = 0; port < MV_PORTS_PER_HC; port++)
1891 			mv5_reset_hc_port(hpriv, mmio,
1892 					  (hc * MV_PORTS_PER_HC) + port);
1893 
1894 		mv5_reset_one_hc(hpriv, mmio, hc);
1895 	}
1896 
1897 	return 0;
1898 }
1899 
1900 #undef ZERO
1901 #define ZERO(reg) writel(0, mmio + (reg))
1902 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1903 {
1904 	u32 tmp;
1905 
1906 	tmp = readl(mmio + MV_PCI_MODE);
1907 	tmp &= 0xff00ffff;
1908 	writel(tmp, mmio + MV_PCI_MODE);
1909 
1910 	ZERO(MV_PCI_DISC_TIMER);
1911 	ZERO(MV_PCI_MSI_TRIGGER);
1912 	writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1913 	ZERO(HC_MAIN_IRQ_MASK_OFS);
1914 	ZERO(MV_PCI_SERR_MASK);
1915 	ZERO(PCI_IRQ_CAUSE_OFS);
1916 	ZERO(PCI_IRQ_MASK_OFS);
1917 	ZERO(MV_PCI_ERR_LOW_ADDRESS);
1918 	ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1919 	ZERO(MV_PCI_ERR_ATTRIBUTE);
1920 	ZERO(MV_PCI_ERR_COMMAND);
1921 }
1922 #undef ZERO
1923 
1924 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1925 {
1926 	u32 tmp;
1927 
1928 	mv5_reset_flash(hpriv, mmio);
1929 
1930 	tmp = readl(mmio + MV_GPIO_PORT_CTL);
1931 	tmp &= 0x3;
1932 	tmp |= (1 << 5) | (1 << 6);
1933 	writel(tmp, mmio + MV_GPIO_PORT_CTL);
1934 }
1935 
1936 /**
1937  *      mv6_reset_hc - Perform the 6xxx global soft reset
1938  *      @mmio: base address of the HBA
1939  *
1940  *      This routine only applies to 6xxx parts.
1941  *
1942  *      LOCKING:
1943  *      Inherited from caller.
1944  */
1945 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1946 			unsigned int n_hc)
1947 {
1948 	void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1949 	int i, rc = 0;
1950 	u32 t;
1951 
1952 	/* Following procedure defined in PCI "main command and status
1953 	 * register" table.
1954 	 */
1955 	t = readl(reg);
1956 	writel(t | STOP_PCI_MASTER, reg);
1957 
1958 	for (i = 0; i < 1000; i++) {
1959 		udelay(1);
1960 		t = readl(reg);
1961 		if (PCI_MASTER_EMPTY & t) {
1962 			break;
1963 		}
1964 	}
1965 	if (!(PCI_MASTER_EMPTY & t)) {
1966 		printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
1967 		rc = 1;
1968 		goto done;
1969 	}
1970 
1971 	/* set reset */
1972 	i = 5;
1973 	do {
1974 		writel(t | GLOB_SFT_RST, reg);
1975 		t = readl(reg);
1976 		udelay(1);
1977 	} while (!(GLOB_SFT_RST & t) && (i-- > 0));
1978 
1979 	if (!(GLOB_SFT_RST & t)) {
1980 		printk(KERN_ERR DRV_NAME ": can't set global reset\n");
1981 		rc = 1;
1982 		goto done;
1983 	}
1984 
1985 	/* clear reset and *reenable the PCI master* (not mentioned in spec) */
1986 	i = 5;
1987 	do {
1988 		writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
1989 		t = readl(reg);
1990 		udelay(1);
1991 	} while ((GLOB_SFT_RST & t) && (i-- > 0));
1992 
1993 	if (GLOB_SFT_RST & t) {
1994 		printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
1995 		rc = 1;
1996 	}
1997 done:
1998 	return rc;
1999 }
2000 
2001 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2002 			   void __iomem *mmio)
2003 {
2004 	void __iomem *port_mmio;
2005 	u32 tmp;
2006 
2007 	tmp = readl(mmio + MV_RESET_CFG);
2008 	if ((tmp & (1 << 0)) == 0) {
2009 		hpriv->signal[idx].amps = 0x7 << 8;
2010 		hpriv->signal[idx].pre = 0x1 << 5;
2011 		return;
2012 	}
2013 
2014 	port_mmio = mv_port_base(mmio, idx);
2015 	tmp = readl(port_mmio + PHY_MODE2);
2016 
2017 	hpriv->signal[idx].amps = tmp & 0x700;	/* bits 10:8 */
2018 	hpriv->signal[idx].pre = tmp & 0xe0;	/* bits 7:5 */
2019 }
2020 
2021 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2022 {
2023 	writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2024 }
2025 
2026 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2027 			   unsigned int port)
2028 {
2029 	void __iomem *port_mmio = mv_port_base(mmio, port);
2030 
2031 	u32 hp_flags = hpriv->hp_flags;
2032 	int fix_phy_mode2 =
2033 		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2034 	int fix_phy_mode4 =
2035 		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2036 	u32 m2, tmp;
2037 
2038 	if (fix_phy_mode2) {
2039 		m2 = readl(port_mmio + PHY_MODE2);
2040 		m2 &= ~(1 << 16);
2041 		m2 |= (1 << 31);
2042 		writel(m2, port_mmio + PHY_MODE2);
2043 
2044 		udelay(200);
2045 
2046 		m2 = readl(port_mmio + PHY_MODE2);
2047 		m2 &= ~((1 << 16) | (1 << 31));
2048 		writel(m2, port_mmio + PHY_MODE2);
2049 
2050 		udelay(200);
2051 	}
2052 
2053 	/* who knows what this magic does */
2054 	tmp = readl(port_mmio + PHY_MODE3);
2055 	tmp &= ~0x7F800000;
2056 	tmp |= 0x2A800000;
2057 	writel(tmp, port_mmio + PHY_MODE3);
2058 
2059 	if (fix_phy_mode4) {
2060 		u32 m4;
2061 
2062 		m4 = readl(port_mmio + PHY_MODE4);
2063 
2064 		if (hp_flags & MV_HP_ERRATA_60X1B2)
2065 			tmp = readl(port_mmio + 0x310);
2066 
2067 		m4 = (m4 & ~(1 << 1)) | (1 << 0);
2068 
2069 		writel(m4, port_mmio + PHY_MODE4);
2070 
2071 		if (hp_flags & MV_HP_ERRATA_60X1B2)
2072 			writel(tmp, port_mmio + 0x310);
2073 	}
2074 
2075 	/* Revert values of pre-emphasis and signal amps to the saved ones */
2076 	m2 = readl(port_mmio + PHY_MODE2);
2077 
2078 	m2 &= ~MV_M2_PREAMP_MASK;
2079 	m2 |= hpriv->signal[port].amps;
2080 	m2 |= hpriv->signal[port].pre;
2081 	m2 &= ~(1 << 16);
2082 
2083 	/* according to mvSata 3.6.1, some IIE values are fixed */
2084 	if (IS_GEN_IIE(hpriv)) {
2085 		m2 &= ~0xC30FF01F;
2086 		m2 |= 0x0000900F;
2087 	}
2088 
2089 	writel(m2, port_mmio + PHY_MODE2);
2090 }
2091 
2092 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2093 			     unsigned int port_no)
2094 {
2095 	void __iomem *port_mmio = mv_port_base(mmio, port_no);
2096 
2097 	writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2098 
2099 	if (IS_GEN_II(hpriv)) {
2100 		u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2101 		ifctl |= (1 << 7);		/* enable gen2i speed */
2102 		ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2103 		writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2104 	}
2105 
2106 	udelay(25);		/* allow reset propagation */
2107 
2108 	/* Spec never mentions clearing the bit.  Marvell's driver does
2109 	 * clear the bit, however.
2110 	 */
2111 	writelfl(0, port_mmio + EDMA_CMD_OFS);
2112 
2113 	hpriv->ops->phy_errata(hpriv, mmio, port_no);
2114 
2115 	if (IS_GEN_I(hpriv))
2116 		mdelay(1);
2117 }
2118 
2119 /**
2120  *      mv_phy_reset - Perform eDMA reset followed by COMRESET
2121  *      @ap: ATA channel to manipulate
2122  *
2123  *      Part of this is taken from __sata_phy_reset and modified to
2124  *      not sleep since this routine gets called from interrupt level.
2125  *
2126  *      LOCKING:
2127  *      Inherited from caller.  This is coded to safe to call at
2128  *      interrupt level, i.e. it does not sleep.
2129  */
2130 static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2131 			 unsigned long deadline)
2132 {
2133 	struct mv_port_priv *pp	= ap->private_data;
2134 	struct mv_host_priv *hpriv = ap->host->private_data;
2135 	void __iomem *port_mmio = mv_ap_base(ap);
2136 	int retry = 5;
2137 	u32 sstatus;
2138 
2139 	VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2140 
2141 	DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2142 		"SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
2143 		mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
2144 
2145 	/* Issue COMRESET via SControl */
2146 comreset_retry:
2147 	sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
2148 	msleep(1);
2149 
2150 	sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
2151 	msleep(20);
2152 
2153 	do {
2154 		sata_scr_read(ap, SCR_STATUS, &sstatus);
2155 		if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2156 			break;
2157 
2158 		msleep(1);
2159 	} while (time_before(jiffies, deadline));
2160 
2161 	/* work around errata */
2162 	if (IS_GEN_II(hpriv) &&
2163 	    (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2164 	    (retry-- > 0))
2165 		goto comreset_retry;
2166 
2167 	DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2168 		"SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
2169 		mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
2170 
2171 	if (ata_port_offline(ap)) {
2172 		*class = ATA_DEV_NONE;
2173 		return;
2174 	}
2175 
2176 	/* even after SStatus reflects that device is ready,
2177 	 * it seems to take a while for link to be fully
2178 	 * established (and thus Status no longer 0x80/0x7F),
2179 	 * so we poll a bit for that, here.
2180 	 */
2181 	retry = 20;
2182 	while (1) {
2183 		u8 drv_stat = ata_check_status(ap);
2184 		if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2185 			break;
2186 		msleep(500);
2187 		if (retry-- <= 0)
2188 			break;
2189 		if (time_after(jiffies, deadline))
2190 			break;
2191 	}
2192 
2193 	/* FIXME: if we passed the deadline, the following
2194 	 * code probably produces an invalid result
2195 	 */
2196 
2197 	/* finally, read device signature from TF registers */
2198 	*class = ata_dev_try_classify(ap, 0, NULL);
2199 
2200 	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2201 
2202 	WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2203 
2204 	VPRINTK("EXIT\n");
2205 }
2206 
2207 static int mv_prereset(struct ata_port *ap, unsigned long deadline)
2208 {
2209 	struct mv_port_priv *pp	= ap->private_data;
2210 	struct ata_eh_context *ehc = &ap->eh_context;
2211 	int rc;
2212 
2213 	rc = mv_stop_dma(ap);
2214 	if (rc)
2215 		ehc->i.action |= ATA_EH_HARDRESET;
2216 
2217 	if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2218 		pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2219 		ehc->i.action |= ATA_EH_HARDRESET;
2220 	}
2221 
2222 	/* if we're about to do hardreset, nothing more to do */
2223 	if (ehc->i.action & ATA_EH_HARDRESET)
2224 		return 0;
2225 
2226 	if (ata_port_online(ap))
2227 		rc = ata_wait_ready(ap, deadline);
2228 	else
2229 		rc = -ENODEV;
2230 
2231 	return rc;
2232 }
2233 
2234 static int mv_hardreset(struct ata_port *ap, unsigned int *class,
2235 			unsigned long deadline)
2236 {
2237 	struct mv_host_priv *hpriv = ap->host->private_data;
2238 	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2239 
2240 	mv_stop_dma(ap);
2241 
2242 	mv_channel_reset(hpriv, mmio, ap->port_no);
2243 
2244 	mv_phy_reset(ap, class, deadline);
2245 
2246 	return 0;
2247 }
2248 
2249 static void mv_postreset(struct ata_port *ap, unsigned int *classes)
2250 {
2251 	u32 serr;
2252 
2253 	/* print link status */
2254 	sata_print_link_status(ap);
2255 
2256 	/* clear SError */
2257 	sata_scr_read(ap, SCR_ERROR, &serr);
2258 	sata_scr_write_flush(ap, SCR_ERROR, serr);
2259 
2260 	/* bail out if no device is present */
2261 	if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2262 		DPRINTK("EXIT, no device\n");
2263 		return;
2264 	}
2265 
2266 	/* set up device control */
2267 	iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2268 }
2269 
2270 static void mv_error_handler(struct ata_port *ap)
2271 {
2272 	ata_do_eh(ap, mv_prereset, ata_std_softreset,
2273 		  mv_hardreset, mv_postreset);
2274 }
2275 
2276 static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2277 {
2278 	mv_stop_dma(qc->ap);
2279 }
2280 
2281 static void mv_eh_freeze(struct ata_port *ap)
2282 {
2283 	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2284 	unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2285 	u32 tmp, mask;
2286 	unsigned int shift;
2287 
2288 	/* FIXME: handle coalescing completion events properly */
2289 
2290 	shift = ap->port_no * 2;
2291 	if (hc > 0)
2292 		shift++;
2293 
2294 	mask = 0x3 << shift;
2295 
2296 	/* disable assertion of portN err, done events */
2297 	tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2298 	writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2299 }
2300 
2301 static void mv_eh_thaw(struct ata_port *ap)
2302 {
2303 	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2304 	unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2305 	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2306 	void __iomem *port_mmio = mv_ap_base(ap);
2307 	u32 tmp, mask, hc_irq_cause;
2308 	unsigned int shift, hc_port_no = ap->port_no;
2309 
2310 	/* FIXME: handle coalescing completion events properly */
2311 
2312 	shift = ap->port_no * 2;
2313 	if (hc > 0) {
2314 		shift++;
2315 		hc_port_no -= 4;
2316 	}
2317 
2318 	mask = 0x3 << shift;
2319 
2320 	/* clear EDMA errors on this port */
2321 	writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2322 
2323 	/* clear pending irq events */
2324 	hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2325 	hc_irq_cause &= ~(1 << hc_port_no);	/* clear CRPB-done */
2326 	hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2327 	writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2328 
2329 	/* enable assertion of portN err, done events */
2330 	tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2331 	writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2332 }
2333 
2334 /**
2335  *      mv_port_init - Perform some early initialization on a single port.
2336  *      @port: libata data structure storing shadow register addresses
2337  *      @port_mmio: base address of the port
2338  *
2339  *      Initialize shadow register mmio addresses, clear outstanding
2340  *      interrupts on the port, and unmask interrupts for the future
2341  *      start of the port.
2342  *
2343  *      LOCKING:
2344  *      Inherited from caller.
2345  */
2346 static void mv_port_init(struct ata_ioports *port,  void __iomem *port_mmio)
2347 {
2348 	void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2349 	unsigned serr_ofs;
2350 
2351 	/* PIO related setup
2352 	 */
2353 	port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2354 	port->error_addr =
2355 		port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2356 	port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2357 	port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2358 	port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2359 	port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2360 	port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2361 	port->status_addr =
2362 		port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2363 	/* special case: control/altstatus doesn't have ATA_REG_ address */
2364 	port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2365 
2366 	/* unused: */
2367 	port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2368 
2369 	/* Clear any currently outstanding port interrupt conditions */
2370 	serr_ofs = mv_scr_offset(SCR_ERROR);
2371 	writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2372 	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2373 
2374 	/* unmask all EDMA error interrupts */
2375 	writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2376 
2377 	VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2378 		readl(port_mmio + EDMA_CFG_OFS),
2379 		readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2380 		readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2381 }
2382 
2383 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2384 {
2385 	struct pci_dev *pdev = to_pci_dev(host->dev);
2386 	struct mv_host_priv *hpriv = host->private_data;
2387 	u32 hp_flags = hpriv->hp_flags;
2388 
2389 	switch(board_idx) {
2390 	case chip_5080:
2391 		hpriv->ops = &mv5xxx_ops;
2392 		hp_flags |= MV_HP_GEN_I;
2393 
2394 		switch (pdev->revision) {
2395 		case 0x1:
2396 			hp_flags |= MV_HP_ERRATA_50XXB0;
2397 			break;
2398 		case 0x3:
2399 			hp_flags |= MV_HP_ERRATA_50XXB2;
2400 			break;
2401 		default:
2402 			dev_printk(KERN_WARNING, &pdev->dev,
2403 			   "Applying 50XXB2 workarounds to unknown rev\n");
2404 			hp_flags |= MV_HP_ERRATA_50XXB2;
2405 			break;
2406 		}
2407 		break;
2408 
2409 	case chip_504x:
2410 	case chip_508x:
2411 		hpriv->ops = &mv5xxx_ops;
2412 		hp_flags |= MV_HP_GEN_I;
2413 
2414 		switch (pdev->revision) {
2415 		case 0x0:
2416 			hp_flags |= MV_HP_ERRATA_50XXB0;
2417 			break;
2418 		case 0x3:
2419 			hp_flags |= MV_HP_ERRATA_50XXB2;
2420 			break;
2421 		default:
2422 			dev_printk(KERN_WARNING, &pdev->dev,
2423 			   "Applying B2 workarounds to unknown rev\n");
2424 			hp_flags |= MV_HP_ERRATA_50XXB2;
2425 			break;
2426 		}
2427 		break;
2428 
2429 	case chip_604x:
2430 	case chip_608x:
2431 		hpriv->ops = &mv6xxx_ops;
2432 		hp_flags |= MV_HP_GEN_II;
2433 
2434 		switch (pdev->revision) {
2435 		case 0x7:
2436 			hp_flags |= MV_HP_ERRATA_60X1B2;
2437 			break;
2438 		case 0x9:
2439 			hp_flags |= MV_HP_ERRATA_60X1C0;
2440 			break;
2441 		default:
2442 			dev_printk(KERN_WARNING, &pdev->dev,
2443 				   "Applying B2 workarounds to unknown rev\n");
2444 			hp_flags |= MV_HP_ERRATA_60X1B2;
2445 			break;
2446 		}
2447 		break;
2448 
2449 	case chip_7042:
2450 	case chip_6042:
2451 		hpriv->ops = &mv6xxx_ops;
2452 		hp_flags |= MV_HP_GEN_IIE;
2453 
2454 		switch (pdev->revision) {
2455 		case 0x0:
2456 			hp_flags |= MV_HP_ERRATA_XX42A0;
2457 			break;
2458 		case 0x1:
2459 			hp_flags |= MV_HP_ERRATA_60X1C0;
2460 			break;
2461 		default:
2462 			dev_printk(KERN_WARNING, &pdev->dev,
2463 			   "Applying 60X1C0 workarounds to unknown rev\n");
2464 			hp_flags |= MV_HP_ERRATA_60X1C0;
2465 			break;
2466 		}
2467 		break;
2468 
2469 	default:
2470 		printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2471 		return 1;
2472 	}
2473 
2474 	hpriv->hp_flags = hp_flags;
2475 
2476 	return 0;
2477 }
2478 
2479 /**
2480  *      mv_init_host - Perform some early initialization of the host.
2481  *	@host: ATA host to initialize
2482  *      @board_idx: controller index
2483  *
2484  *      If possible, do an early global reset of the host.  Then do
2485  *      our port init and clear/unmask all/relevant host interrupts.
2486  *
2487  *      LOCKING:
2488  *      Inherited from caller.
2489  */
2490 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2491 {
2492 	int rc = 0, n_hc, port, hc;
2493 	struct pci_dev *pdev = to_pci_dev(host->dev);
2494 	void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2495 	struct mv_host_priv *hpriv = host->private_data;
2496 
2497 	/* global interrupt mask */
2498 	writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2499 
2500 	rc = mv_chip_id(host, board_idx);
2501 	if (rc)
2502 		goto done;
2503 
2504 	n_hc = mv_get_hc_count(host->ports[0]->flags);
2505 
2506 	for (port = 0; port < host->n_ports; port++)
2507 		hpriv->ops->read_preamp(hpriv, port, mmio);
2508 
2509 	rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2510 	if (rc)
2511 		goto done;
2512 
2513 	hpriv->ops->reset_flash(hpriv, mmio);
2514 	hpriv->ops->reset_bus(pdev, mmio);
2515 	hpriv->ops->enable_leds(hpriv, mmio);
2516 
2517 	for (port = 0; port < host->n_ports; port++) {
2518 		if (IS_GEN_II(hpriv)) {
2519 			void __iomem *port_mmio = mv_port_base(mmio, port);
2520 
2521 			u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2522 			ifctl |= (1 << 7);		/* enable gen2i speed */
2523 			ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2524 			writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2525 		}
2526 
2527 		hpriv->ops->phy_errata(hpriv, mmio, port);
2528 	}
2529 
2530 	for (port = 0; port < host->n_ports; port++) {
2531 		void __iomem *port_mmio = mv_port_base(mmio, port);
2532 		mv_port_init(&host->ports[port]->ioaddr, port_mmio);
2533 	}
2534 
2535 	for (hc = 0; hc < n_hc; hc++) {
2536 		void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2537 
2538 		VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2539 			"(before clear)=0x%08x\n", hc,
2540 			readl(hc_mmio + HC_CFG_OFS),
2541 			readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2542 
2543 		/* Clear any currently outstanding hc interrupt conditions */
2544 		writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2545 	}
2546 
2547 	/* Clear any currently outstanding host interrupt conditions */
2548 	writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2549 
2550 	/* and unmask interrupt generation for host regs */
2551 	writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
2552 
2553 	if (IS_GEN_I(hpriv))
2554 		writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2555 	else
2556 		writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2557 
2558 	VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2559 		"PCI int cause/mask=0x%08x/0x%08x\n",
2560 		readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2561 		readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2562 		readl(mmio + PCI_IRQ_CAUSE_OFS),
2563 		readl(mmio + PCI_IRQ_MASK_OFS));
2564 
2565 done:
2566 	return rc;
2567 }
2568 
2569 /**
2570  *      mv_print_info - Dump key info to kernel log for perusal.
2571  *      @host: ATA host to print info about
2572  *
2573  *      FIXME: complete this.
2574  *
2575  *      LOCKING:
2576  *      Inherited from caller.
2577  */
2578 static void mv_print_info(struct ata_host *host)
2579 {
2580 	struct pci_dev *pdev = to_pci_dev(host->dev);
2581 	struct mv_host_priv *hpriv = host->private_data;
2582 	u8 scc;
2583 	const char *scc_s, *gen;
2584 
2585 	/* Use this to determine the HW stepping of the chip so we know
2586 	 * what errata to workaround
2587 	 */
2588 	pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2589 	if (scc == 0)
2590 		scc_s = "SCSI";
2591 	else if (scc == 0x01)
2592 		scc_s = "RAID";
2593 	else
2594 		scc_s = "?";
2595 
2596 	if (IS_GEN_I(hpriv))
2597 		gen = "I";
2598 	else if (IS_GEN_II(hpriv))
2599 		gen = "II";
2600 	else if (IS_GEN_IIE(hpriv))
2601 		gen = "IIE";
2602 	else
2603 		gen = "?";
2604 
2605 	dev_printk(KERN_INFO, &pdev->dev,
2606 	       "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2607 	       gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2608 	       scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2609 }
2610 
2611 /**
2612  *      mv_init_one - handle a positive probe of a Marvell host
2613  *      @pdev: PCI device found
2614  *      @ent: PCI device ID entry for the matched host
2615  *
2616  *      LOCKING:
2617  *      Inherited from caller.
2618  */
2619 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2620 {
2621 	static int printed_version = 0;
2622 	unsigned int board_idx = (unsigned int)ent->driver_data;
2623 	const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2624 	struct ata_host *host;
2625 	struct mv_host_priv *hpriv;
2626 	int n_ports, rc;
2627 
2628 	if (!printed_version++)
2629 		dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2630 
2631 	/* allocate host */
2632 	n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2633 
2634 	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2635 	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2636 	if (!host || !hpriv)
2637 		return -ENOMEM;
2638 	host->private_data = hpriv;
2639 
2640 	/* acquire resources */
2641 	rc = pcim_enable_device(pdev);
2642 	if (rc)
2643 		return rc;
2644 
2645 	rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2646 	if (rc == -EBUSY)
2647 		pcim_pin_device(pdev);
2648 	if (rc)
2649 		return rc;
2650 	host->iomap = pcim_iomap_table(pdev);
2651 
2652 	rc = pci_go_64(pdev);
2653 	if (rc)
2654 		return rc;
2655 
2656 	/* initialize adapter */
2657 	rc = mv_init_host(host, board_idx);
2658 	if (rc)
2659 		return rc;
2660 
2661 	/* Enable interrupts */
2662 	if (msi && pci_enable_msi(pdev))
2663 		pci_intx(pdev, 1);
2664 
2665 	mv_dump_pci_cfg(pdev, 0x68);
2666 	mv_print_info(host);
2667 
2668 	pci_set_master(pdev);
2669 	pci_try_set_mwi(pdev);
2670 	return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2671 				 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
2672 }
2673 
2674 static int __init mv_init(void)
2675 {
2676 	return pci_register_driver(&mv_pci_driver);
2677 }
2678 
2679 static void __exit mv_exit(void)
2680 {
2681 	pci_unregister_driver(&mv_pci_driver);
2682 }
2683 
2684 MODULE_AUTHOR("Brett Russ");
2685 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2686 MODULE_LICENSE("GPL");
2687 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2688 MODULE_VERSION(DRV_VERSION);
2689 
2690 module_param(msi, int, 0444);
2691 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2692 
2693 module_init(mv_init);
2694 module_exit(mv_exit);
2695