xref: /openbmc/linux/drivers/ata/sata_mv.c (revision c21b37f6)
1 /*
2  * sata_mv.c - Marvell SATA support
3  *
4  * Copyright 2005: EMC Corporation, all rights reserved.
5  * Copyright 2005 Red Hat, Inc.  All rights reserved.
6  *
7  * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; version 2 of the License.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23 
24 /*
25   sata_mv TODO list:
26 
27   1) Needs a full errata audit for all chipsets.  I implemented most
28   of the errata workarounds found in the Marvell vendor driver, but
29   I distinctly remember a couple workarounds (one related to PCI-X)
30   are still needed.
31 
32   4) Add NCQ support (easy to intermediate, once new-EH support appears)
33 
34   5) Investigate problems with PCI Message Signalled Interrupts (MSI).
35 
36   6) Add port multiplier support (intermediate)
37 
38   8) Develop a low-power-consumption strategy, and implement it.
39 
40   9) [Experiment, low priority] See if ATAPI can be supported using
41   "unknown FIS" or "vendor-specific FIS" support, or something creative
42   like that.
43 
44   10) [Experiment, low priority] Investigate interrupt coalescing.
45   Quite often, especially with PCI Message Signalled Interrupts (MSI),
46   the overhead reduced by interrupt mitigation is quite often not
47   worth the latency cost.
48 
49   11) [Experiment, Marvell value added] Is it possible to use target
50   mode to cross-connect two Linux boxes with Marvell cards?  If so,
51   creating LibATA target mode support would be very interesting.
52 
53   Target mode, for those without docs, is the ability to directly
54   connect two SATA controllers.
55 
56   13) Verify that 7042 is fully supported.  I only have a 6042.
57 
58 */
59 
60 
61 #include <linux/kernel.h>
62 #include <linux/module.h>
63 #include <linux/pci.h>
64 #include <linux/init.h>
65 #include <linux/blkdev.h>
66 #include <linux/delay.h>
67 #include <linux/interrupt.h>
68 #include <linux/dma-mapping.h>
69 #include <linux/device.h>
70 #include <scsi/scsi_host.h>
71 #include <scsi/scsi_cmnd.h>
72 #include <linux/libata.h>
73 
74 #define DRV_NAME	"sata_mv"
75 #define DRV_VERSION	"0.81"
76 
77 enum {
78 	/* BAR's are enumerated in terms of pci_resource_start() terms */
79 	MV_PRIMARY_BAR		= 0,	/* offset 0x10: memory space */
80 	MV_IO_BAR		= 2,	/* offset 0x18: IO space */
81 	MV_MISC_BAR		= 3,	/* offset 0x1c: FLASH, NVRAM, SRAM */
82 
83 	MV_MAJOR_REG_AREA_SZ	= 0x10000,	/* 64KB */
84 	MV_MINOR_REG_AREA_SZ	= 0x2000,	/* 8KB */
85 
86 	MV_PCI_REG_BASE		= 0,
87 	MV_IRQ_COAL_REG_BASE	= 0x18000,	/* 6xxx part only */
88 	MV_IRQ_COAL_CAUSE		= (MV_IRQ_COAL_REG_BASE + 0x08),
89 	MV_IRQ_COAL_CAUSE_LO		= (MV_IRQ_COAL_REG_BASE + 0x88),
90 	MV_IRQ_COAL_CAUSE_HI		= (MV_IRQ_COAL_REG_BASE + 0x8c),
91 	MV_IRQ_COAL_THRESHOLD		= (MV_IRQ_COAL_REG_BASE + 0xcc),
92 	MV_IRQ_COAL_TIME_THRESHOLD	= (MV_IRQ_COAL_REG_BASE + 0xd0),
93 
94 	MV_SATAHC0_REG_BASE	= 0x20000,
95 	MV_FLASH_CTL		= 0x1046c,
96 	MV_GPIO_PORT_CTL	= 0x104f0,
97 	MV_RESET_CFG		= 0x180d8,
98 
99 	MV_PCI_REG_SZ		= MV_MAJOR_REG_AREA_SZ,
100 	MV_SATAHC_REG_SZ	= MV_MAJOR_REG_AREA_SZ,
101 	MV_SATAHC_ARBTR_REG_SZ	= MV_MINOR_REG_AREA_SZ,		/* arbiter */
102 	MV_PORT_REG_SZ		= MV_MINOR_REG_AREA_SZ,
103 
104 	MV_MAX_Q_DEPTH		= 32,
105 	MV_MAX_Q_DEPTH_MASK	= MV_MAX_Q_DEPTH - 1,
106 
107 	/* CRQB needs alignment on a 1KB boundary. Size == 1KB
108 	 * CRPB needs alignment on a 256B boundary. Size == 256B
109 	 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
110 	 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
111 	 */
112 	MV_CRQB_Q_SZ		= (32 * MV_MAX_Q_DEPTH),
113 	MV_CRPB_Q_SZ		= (8 * MV_MAX_Q_DEPTH),
114 	MV_MAX_SG_CT		= 176,
115 	MV_SG_TBL_SZ		= (16 * MV_MAX_SG_CT),
116 	MV_PORT_PRIV_DMA_SZ	= (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
117 
118 	MV_PORTS_PER_HC		= 4,
119 	/* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
120 	MV_PORT_HC_SHIFT	= 2,
121 	/* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
122 	MV_PORT_MASK		= 3,
123 
124 	/* Host Flags */
125 	MV_FLAG_DUAL_HC		= (1 << 30),  /* two SATA Host Controllers */
126 	MV_FLAG_IRQ_COALESCE	= (1 << 29),  /* IRQ coalescing capability */
127 	MV_COMMON_FLAGS		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
128 				  ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
129 				  ATA_FLAG_PIO_POLLING,
130 	MV_6XXX_FLAGS		= MV_FLAG_IRQ_COALESCE,
131 
132 	CRQB_FLAG_READ		= (1 << 0),
133 	CRQB_TAG_SHIFT		= 1,
134 	CRQB_IOID_SHIFT		= 6,	/* CRQB Gen-II/IIE IO Id shift */
135 	CRQB_HOSTQ_SHIFT	= 17,	/* CRQB Gen-II/IIE HostQueTag shift */
136 	CRQB_CMD_ADDR_SHIFT	= 8,
137 	CRQB_CMD_CS		= (0x2 << 11),
138 	CRQB_CMD_LAST		= (1 << 15),
139 
140 	CRPB_FLAG_STATUS_SHIFT	= 8,
141 	CRPB_IOID_SHIFT_6	= 5,	/* CRPB Gen-II IO Id shift */
142 	CRPB_IOID_SHIFT_7	= 7,	/* CRPB Gen-IIE IO Id shift */
143 
144 	EPRD_FLAG_END_OF_TBL	= (1 << 31),
145 
146 	/* PCI interface registers */
147 
148 	PCI_COMMAND_OFS		= 0xc00,
149 
150 	PCI_MAIN_CMD_STS_OFS	= 0xd30,
151 	STOP_PCI_MASTER		= (1 << 2),
152 	PCI_MASTER_EMPTY	= (1 << 3),
153 	GLOB_SFT_RST		= (1 << 4),
154 
155 	MV_PCI_MODE		= 0xd00,
156 	MV_PCI_EXP_ROM_BAR_CTL	= 0xd2c,
157 	MV_PCI_DISC_TIMER	= 0xd04,
158 	MV_PCI_MSI_TRIGGER	= 0xc38,
159 	MV_PCI_SERR_MASK	= 0xc28,
160 	MV_PCI_XBAR_TMOUT	= 0x1d04,
161 	MV_PCI_ERR_LOW_ADDRESS	= 0x1d40,
162 	MV_PCI_ERR_HIGH_ADDRESS	= 0x1d44,
163 	MV_PCI_ERR_ATTRIBUTE	= 0x1d48,
164 	MV_PCI_ERR_COMMAND	= 0x1d50,
165 
166 	PCI_IRQ_CAUSE_OFS		= 0x1d58,
167 	PCI_IRQ_MASK_OFS		= 0x1d5c,
168 	PCI_UNMASK_ALL_IRQS	= 0x7fffff,	/* bits 22-0 */
169 
170 	HC_MAIN_IRQ_CAUSE_OFS	= 0x1d60,
171 	HC_MAIN_IRQ_MASK_OFS	= 0x1d64,
172 	PORT0_ERR		= (1 << 0),	/* shift by port # */
173 	PORT0_DONE		= (1 << 1),	/* shift by port # */
174 	HC0_IRQ_PEND		= 0x1ff,	/* bits 0-8 = HC0's ports */
175 	HC_SHIFT		= 9,		/* bits 9-17 = HC1's ports */
176 	PCI_ERR			= (1 << 18),
177 	TRAN_LO_DONE		= (1 << 19),	/* 6xxx: IRQ coalescing */
178 	TRAN_HI_DONE		= (1 << 20),	/* 6xxx: IRQ coalescing */
179 	PORTS_0_3_COAL_DONE	= (1 << 8),
180 	PORTS_4_7_COAL_DONE	= (1 << 17),
181 	PORTS_0_7_COAL_DONE	= (1 << 21),	/* 6xxx: IRQ coalescing */
182 	GPIO_INT		= (1 << 22),
183 	SELF_INT		= (1 << 23),
184 	TWSI_INT		= (1 << 24),
185 	HC_MAIN_RSVD		= (0x7f << 25),	/* bits 31-25 */
186 	HC_MAIN_RSVD_5		= (0x1fff << 19), /* bits 31-19 */
187 	HC_MAIN_MASKED_IRQS	= (TRAN_LO_DONE | TRAN_HI_DONE |
188 				   PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
189 				   HC_MAIN_RSVD),
190 	HC_MAIN_MASKED_IRQS_5	= (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
191 				   HC_MAIN_RSVD_5),
192 
193 	/* SATAHC registers */
194 	HC_CFG_OFS		= 0,
195 
196 	HC_IRQ_CAUSE_OFS	= 0x14,
197 	CRPB_DMA_DONE		= (1 << 0),	/* shift by port # */
198 	HC_IRQ_COAL		= (1 << 4),	/* IRQ coalescing */
199 	DEV_IRQ			= (1 << 8),	/* shift by port # */
200 
201 	/* Shadow block registers */
202 	SHD_BLK_OFS		= 0x100,
203 	SHD_CTL_AST_OFS		= 0x20,		/* ofs from SHD_BLK_OFS */
204 
205 	/* SATA registers */
206 	SATA_STATUS_OFS		= 0x300,  /* ctrl, err regs follow status */
207 	SATA_ACTIVE_OFS		= 0x350,
208 	PHY_MODE3		= 0x310,
209 	PHY_MODE4		= 0x314,
210 	PHY_MODE2		= 0x330,
211 	MV5_PHY_MODE		= 0x74,
212 	MV5_LT_MODE		= 0x30,
213 	MV5_PHY_CTL		= 0x0C,
214 	SATA_INTERFACE_CTL	= 0x050,
215 
216 	MV_M2_PREAMP_MASK	= 0x7e0,
217 
218 	/* Port registers */
219 	EDMA_CFG_OFS		= 0,
220 	EDMA_CFG_Q_DEPTH	= 0,			/* queueing disabled */
221 	EDMA_CFG_NCQ		= (1 << 5),
222 	EDMA_CFG_NCQ_GO_ON_ERR	= (1 << 14),		/* continue on error */
223 	EDMA_CFG_RD_BRST_EXT	= (1 << 11),		/* read burst 512B */
224 	EDMA_CFG_WR_BUFF_LEN	= (1 << 13),		/* write buffer 512B */
225 
226 	EDMA_ERR_IRQ_CAUSE_OFS	= 0x8,
227 	EDMA_ERR_IRQ_MASK_OFS	= 0xc,
228 	EDMA_ERR_D_PAR		= (1 << 0),	/* UDMA data parity err */
229 	EDMA_ERR_PRD_PAR	= (1 << 1),	/* UDMA PRD parity err */
230 	EDMA_ERR_DEV		= (1 << 2),	/* device error */
231 	EDMA_ERR_DEV_DCON	= (1 << 3),	/* device disconnect */
232 	EDMA_ERR_DEV_CON	= (1 << 4),	/* device connected */
233 	EDMA_ERR_SERR		= (1 << 5),	/* SError bits [WBDST] raised */
234 	EDMA_ERR_SELF_DIS	= (1 << 7),	/* Gen II/IIE self-disable */
235 	EDMA_ERR_SELF_DIS_5	= (1 << 8),	/* Gen I self-disable */
236 	EDMA_ERR_BIST_ASYNC	= (1 << 8),	/* BIST FIS or Async Notify */
237 	EDMA_ERR_TRANS_IRQ_7	= (1 << 8),	/* Gen IIE transprt layer irq */
238 	EDMA_ERR_CRQB_PAR	= (1 << 9),	/* CRQB parity error */
239 	EDMA_ERR_CRPB_PAR	= (1 << 10),	/* CRPB parity error */
240 	EDMA_ERR_INTRL_PAR	= (1 << 11),	/* internal parity error */
241 	EDMA_ERR_IORDY		= (1 << 12),	/* IORdy timeout */
242 	EDMA_ERR_LNK_CTRL_RX	= (0xf << 13),	/* link ctrl rx error */
243 	EDMA_ERR_LNK_CTRL_RX_2	= (1 << 15),
244 	EDMA_ERR_LNK_DATA_RX	= (0xf << 17),	/* link data rx error */
245 	EDMA_ERR_LNK_CTRL_TX	= (0x1f << 21),	/* link ctrl tx error */
246 	EDMA_ERR_LNK_DATA_TX	= (0x1f << 26),	/* link data tx error */
247 	EDMA_ERR_TRANS_PROTO	= (1 << 31),	/* transport protocol error */
248 	EDMA_ERR_OVERRUN_5	= (1 << 5),
249 	EDMA_ERR_UNDERRUN_5	= (1 << 6),
250 	EDMA_EH_FREEZE		= EDMA_ERR_D_PAR |
251 				  EDMA_ERR_PRD_PAR |
252 				  EDMA_ERR_DEV_DCON |
253 				  EDMA_ERR_DEV_CON |
254 				  EDMA_ERR_SERR |
255 				  EDMA_ERR_SELF_DIS |
256 				  EDMA_ERR_CRQB_PAR |
257 				  EDMA_ERR_CRPB_PAR |
258 				  EDMA_ERR_INTRL_PAR |
259 				  EDMA_ERR_IORDY |
260 				  EDMA_ERR_LNK_CTRL_RX_2 |
261 				  EDMA_ERR_LNK_DATA_RX |
262 				  EDMA_ERR_LNK_DATA_TX |
263 				  EDMA_ERR_TRANS_PROTO,
264 	EDMA_EH_FREEZE_5	= EDMA_ERR_D_PAR |
265 				  EDMA_ERR_PRD_PAR |
266 				  EDMA_ERR_DEV_DCON |
267 				  EDMA_ERR_DEV_CON |
268 				  EDMA_ERR_OVERRUN_5 |
269 				  EDMA_ERR_UNDERRUN_5 |
270 				  EDMA_ERR_SELF_DIS_5 |
271 				  EDMA_ERR_CRQB_PAR |
272 				  EDMA_ERR_CRPB_PAR |
273 				  EDMA_ERR_INTRL_PAR |
274 				  EDMA_ERR_IORDY,
275 
276 	EDMA_REQ_Q_BASE_HI_OFS	= 0x10,
277 	EDMA_REQ_Q_IN_PTR_OFS	= 0x14,		/* also contains BASE_LO */
278 
279 	EDMA_REQ_Q_OUT_PTR_OFS	= 0x18,
280 	EDMA_REQ_Q_PTR_SHIFT	= 5,
281 
282 	EDMA_RSP_Q_BASE_HI_OFS	= 0x1c,
283 	EDMA_RSP_Q_IN_PTR_OFS	= 0x20,
284 	EDMA_RSP_Q_OUT_PTR_OFS	= 0x24,		/* also contains BASE_LO */
285 	EDMA_RSP_Q_PTR_SHIFT	= 3,
286 
287 	EDMA_CMD_OFS		= 0x28,		/* EDMA command register */
288 	EDMA_EN			= (1 << 0),	/* enable EDMA */
289 	EDMA_DS			= (1 << 1),	/* disable EDMA; self-negated */
290 	ATA_RST			= (1 << 2),	/* reset trans/link/phy */
291 
292 	EDMA_IORDY_TMOUT	= 0x34,
293 	EDMA_ARB_CFG		= 0x38,
294 
295 	/* Host private flags (hp_flags) */
296 	MV_HP_FLAG_MSI		= (1 << 0),
297 	MV_HP_ERRATA_50XXB0	= (1 << 1),
298 	MV_HP_ERRATA_50XXB2	= (1 << 2),
299 	MV_HP_ERRATA_60X1B2	= (1 << 3),
300 	MV_HP_ERRATA_60X1C0	= (1 << 4),
301 	MV_HP_ERRATA_XX42A0	= (1 << 5),
302 	MV_HP_GEN_I		= (1 << 6),	/* Generation I: 50xx */
303 	MV_HP_GEN_II		= (1 << 7),	/* Generation II: 60xx */
304 	MV_HP_GEN_IIE		= (1 << 8),	/* Generation IIE: 6042/7042 */
305 
306 	/* Port private flags (pp_flags) */
307 	MV_PP_FLAG_EDMA_EN	= (1 << 0),	/* is EDMA engine enabled? */
308 	MV_PP_FLAG_HAD_A_RESET	= (1 << 2),	/* 1st hard reset complete? */
309 };
310 
311 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
312 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
313 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
314 
315 enum {
316 	MV_DMA_BOUNDARY		= 0xffffffffU,
317 
318 	/* mask of register bits containing lower 32 bits
319 	 * of EDMA request queue DMA address
320 	 */
321 	EDMA_REQ_Q_BASE_LO_MASK	= 0xfffffc00U,
322 
323 	/* ditto, for response queue */
324 	EDMA_RSP_Q_BASE_LO_MASK	= 0xffffff00U,
325 };
326 
327 enum chip_type {
328 	chip_504x,
329 	chip_508x,
330 	chip_5080,
331 	chip_604x,
332 	chip_608x,
333 	chip_6042,
334 	chip_7042,
335 };
336 
337 /* Command ReQuest Block: 32B */
338 struct mv_crqb {
339 	__le32			sg_addr;
340 	__le32			sg_addr_hi;
341 	__le16			ctrl_flags;
342 	__le16			ata_cmd[11];
343 };
344 
345 struct mv_crqb_iie {
346 	__le32			addr;
347 	__le32			addr_hi;
348 	__le32			flags;
349 	__le32			len;
350 	__le32			ata_cmd[4];
351 };
352 
353 /* Command ResPonse Block: 8B */
354 struct mv_crpb {
355 	__le16			id;
356 	__le16			flags;
357 	__le32			tmstmp;
358 };
359 
360 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
361 struct mv_sg {
362 	__le32			addr;
363 	__le32			flags_size;
364 	__le32			addr_hi;
365 	__le32			reserved;
366 };
367 
368 struct mv_port_priv {
369 	struct mv_crqb		*crqb;
370 	dma_addr_t		crqb_dma;
371 	struct mv_crpb		*crpb;
372 	dma_addr_t		crpb_dma;
373 	struct mv_sg		*sg_tbl;
374 	dma_addr_t		sg_tbl_dma;
375 
376 	unsigned int		req_idx;
377 	unsigned int		resp_idx;
378 
379 	u32			pp_flags;
380 };
381 
382 struct mv_port_signal {
383 	u32			amps;
384 	u32			pre;
385 };
386 
387 struct mv_host_priv;
388 struct mv_hw_ops {
389 	void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
390 			   unsigned int port);
391 	void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
392 	void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
393 			   void __iomem *mmio);
394 	int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
395 			unsigned int n_hc);
396 	void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
397 	void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
398 };
399 
400 struct mv_host_priv {
401 	u32			hp_flags;
402 	struct mv_port_signal	signal[8];
403 	const struct mv_hw_ops	*ops;
404 };
405 
406 static void mv_irq_clear(struct ata_port *ap);
407 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
408 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
409 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
410 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
411 static int mv_port_start(struct ata_port *ap);
412 static void mv_port_stop(struct ata_port *ap);
413 static void mv_qc_prep(struct ata_queued_cmd *qc);
414 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
415 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
416 static void mv_error_handler(struct ata_port *ap);
417 static void mv_post_int_cmd(struct ata_queued_cmd *qc);
418 static void mv_eh_freeze(struct ata_port *ap);
419 static void mv_eh_thaw(struct ata_port *ap);
420 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
421 
422 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
423 			   unsigned int port);
424 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
425 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
426 			   void __iomem *mmio);
427 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
428 			unsigned int n_hc);
429 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
430 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
431 
432 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
433 			   unsigned int port);
434 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
435 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
436 			   void __iomem *mmio);
437 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
438 			unsigned int n_hc);
439 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
440 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
441 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
442 			     unsigned int port_no);
443 
444 static struct scsi_host_template mv5_sht = {
445 	.module			= THIS_MODULE,
446 	.name			= DRV_NAME,
447 	.ioctl			= ata_scsi_ioctl,
448 	.queuecommand		= ata_scsi_queuecmd,
449 	.can_queue		= ATA_DEF_QUEUE,
450 	.this_id		= ATA_SHT_THIS_ID,
451 	.sg_tablesize		= MV_MAX_SG_CT,
452 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
453 	.emulated		= ATA_SHT_EMULATED,
454 	.use_clustering		= 1,
455 	.proc_name		= DRV_NAME,
456 	.dma_boundary		= MV_DMA_BOUNDARY,
457 	.slave_configure	= ata_scsi_slave_config,
458 	.slave_destroy		= ata_scsi_slave_destroy,
459 	.bios_param		= ata_std_bios_param,
460 };
461 
462 static struct scsi_host_template mv6_sht = {
463 	.module			= THIS_MODULE,
464 	.name			= DRV_NAME,
465 	.ioctl			= ata_scsi_ioctl,
466 	.queuecommand		= ata_scsi_queuecmd,
467 	.can_queue		= ATA_DEF_QUEUE,
468 	.this_id		= ATA_SHT_THIS_ID,
469 	.sg_tablesize		= MV_MAX_SG_CT,
470 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
471 	.emulated		= ATA_SHT_EMULATED,
472 	.use_clustering		= 1,
473 	.proc_name		= DRV_NAME,
474 	.dma_boundary		= MV_DMA_BOUNDARY,
475 	.slave_configure	= ata_scsi_slave_config,
476 	.slave_destroy		= ata_scsi_slave_destroy,
477 	.bios_param		= ata_std_bios_param,
478 };
479 
480 static const struct ata_port_operations mv5_ops = {
481 	.port_disable		= ata_port_disable,
482 
483 	.tf_load		= ata_tf_load,
484 	.tf_read		= ata_tf_read,
485 	.check_status		= ata_check_status,
486 	.exec_command		= ata_exec_command,
487 	.dev_select		= ata_std_dev_select,
488 
489 	.cable_detect		= ata_cable_sata,
490 
491 	.qc_prep		= mv_qc_prep,
492 	.qc_issue		= mv_qc_issue,
493 	.data_xfer		= ata_data_xfer,
494 
495 	.irq_clear		= mv_irq_clear,
496 	.irq_on			= ata_irq_on,
497 	.irq_ack		= ata_irq_ack,
498 
499 	.error_handler		= mv_error_handler,
500 	.post_internal_cmd	= mv_post_int_cmd,
501 	.freeze			= mv_eh_freeze,
502 	.thaw			= mv_eh_thaw,
503 
504 	.scr_read		= mv5_scr_read,
505 	.scr_write		= mv5_scr_write,
506 
507 	.port_start		= mv_port_start,
508 	.port_stop		= mv_port_stop,
509 };
510 
511 static const struct ata_port_operations mv6_ops = {
512 	.port_disable		= ata_port_disable,
513 
514 	.tf_load		= ata_tf_load,
515 	.tf_read		= ata_tf_read,
516 	.check_status		= ata_check_status,
517 	.exec_command		= ata_exec_command,
518 	.dev_select		= ata_std_dev_select,
519 
520 	.cable_detect		= ata_cable_sata,
521 
522 	.qc_prep		= mv_qc_prep,
523 	.qc_issue		= mv_qc_issue,
524 	.data_xfer		= ata_data_xfer,
525 
526 	.irq_clear		= mv_irq_clear,
527 	.irq_on			= ata_irq_on,
528 	.irq_ack		= ata_irq_ack,
529 
530 	.error_handler		= mv_error_handler,
531 	.post_internal_cmd	= mv_post_int_cmd,
532 	.freeze			= mv_eh_freeze,
533 	.thaw			= mv_eh_thaw,
534 
535 	.scr_read		= mv_scr_read,
536 	.scr_write		= mv_scr_write,
537 
538 	.port_start		= mv_port_start,
539 	.port_stop		= mv_port_stop,
540 };
541 
542 static const struct ata_port_operations mv_iie_ops = {
543 	.port_disable		= ata_port_disable,
544 
545 	.tf_load		= ata_tf_load,
546 	.tf_read		= ata_tf_read,
547 	.check_status		= ata_check_status,
548 	.exec_command		= ata_exec_command,
549 	.dev_select		= ata_std_dev_select,
550 
551 	.cable_detect		= ata_cable_sata,
552 
553 	.qc_prep		= mv_qc_prep_iie,
554 	.qc_issue		= mv_qc_issue,
555 	.data_xfer		= ata_data_xfer,
556 
557 	.irq_clear		= mv_irq_clear,
558 	.irq_on			= ata_irq_on,
559 	.irq_ack		= ata_irq_ack,
560 
561 	.error_handler		= mv_error_handler,
562 	.post_internal_cmd	= mv_post_int_cmd,
563 	.freeze			= mv_eh_freeze,
564 	.thaw			= mv_eh_thaw,
565 
566 	.scr_read		= mv_scr_read,
567 	.scr_write		= mv_scr_write,
568 
569 	.port_start		= mv_port_start,
570 	.port_stop		= mv_port_stop,
571 };
572 
573 static const struct ata_port_info mv_port_info[] = {
574 	{  /* chip_504x */
575 		.flags		= MV_COMMON_FLAGS,
576 		.pio_mask	= 0x1f,	/* pio0-4 */
577 		.udma_mask	= ATA_UDMA6,
578 		.port_ops	= &mv5_ops,
579 	},
580 	{  /* chip_508x */
581 		.flags		= MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
582 		.pio_mask	= 0x1f,	/* pio0-4 */
583 		.udma_mask	= ATA_UDMA6,
584 		.port_ops	= &mv5_ops,
585 	},
586 	{  /* chip_5080 */
587 		.flags		= MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
588 		.pio_mask	= 0x1f,	/* pio0-4 */
589 		.udma_mask	= ATA_UDMA6,
590 		.port_ops	= &mv5_ops,
591 	},
592 	{  /* chip_604x */
593 		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS,
594 		.pio_mask	= 0x1f,	/* pio0-4 */
595 		.udma_mask	= ATA_UDMA6,
596 		.port_ops	= &mv6_ops,
597 	},
598 	{  /* chip_608x */
599 		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS |
600 				  MV_FLAG_DUAL_HC,
601 		.pio_mask	= 0x1f,	/* pio0-4 */
602 		.udma_mask	= ATA_UDMA6,
603 		.port_ops	= &mv6_ops,
604 	},
605 	{  /* chip_6042 */
606 		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS,
607 		.pio_mask	= 0x1f,	/* pio0-4 */
608 		.udma_mask	= ATA_UDMA6,
609 		.port_ops	= &mv_iie_ops,
610 	},
611 	{  /* chip_7042 */
612 		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS,
613 		.pio_mask	= 0x1f,	/* pio0-4 */
614 		.udma_mask	= ATA_UDMA6,
615 		.port_ops	= &mv_iie_ops,
616 	},
617 };
618 
619 static const struct pci_device_id mv_pci_tbl[] = {
620 	{ PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
621 	{ PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
622 	{ PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
623 	{ PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
624 
625 	{ PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
626 	{ PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
627 	{ PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
628 	{ PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
629 	{ PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
630 
631 	{ PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
632 
633 	/* Adaptec 1430SA */
634 	{ PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
635 
636 	{ PCI_VDEVICE(TTI, 0x2310), chip_7042 },
637 
638 	/* add Marvell 7042 support */
639 	{ PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
640 
641 	{ }			/* terminate list */
642 };
643 
644 static struct pci_driver mv_pci_driver = {
645 	.name			= DRV_NAME,
646 	.id_table		= mv_pci_tbl,
647 	.probe			= mv_init_one,
648 	.remove			= ata_pci_remove_one,
649 };
650 
651 static const struct mv_hw_ops mv5xxx_ops = {
652 	.phy_errata		= mv5_phy_errata,
653 	.enable_leds		= mv5_enable_leds,
654 	.read_preamp		= mv5_read_preamp,
655 	.reset_hc		= mv5_reset_hc,
656 	.reset_flash		= mv5_reset_flash,
657 	.reset_bus		= mv5_reset_bus,
658 };
659 
660 static const struct mv_hw_ops mv6xxx_ops = {
661 	.phy_errata		= mv6_phy_errata,
662 	.enable_leds		= mv6_enable_leds,
663 	.read_preamp		= mv6_read_preamp,
664 	.reset_hc		= mv6_reset_hc,
665 	.reset_flash		= mv6_reset_flash,
666 	.reset_bus		= mv_reset_pci_bus,
667 };
668 
669 /*
670  * module options
671  */
672 static int msi;	      /* Use PCI msi; either zero (off, default) or non-zero */
673 
674 
675 /* move to PCI layer or libata core? */
676 static int pci_go_64(struct pci_dev *pdev)
677 {
678 	int rc;
679 
680 	if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
681 		rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
682 		if (rc) {
683 			rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
684 			if (rc) {
685 				dev_printk(KERN_ERR, &pdev->dev,
686 					   "64-bit DMA enable failed\n");
687 				return rc;
688 			}
689 		}
690 	} else {
691 		rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
692 		if (rc) {
693 			dev_printk(KERN_ERR, &pdev->dev,
694 				   "32-bit DMA enable failed\n");
695 			return rc;
696 		}
697 		rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
698 		if (rc) {
699 			dev_printk(KERN_ERR, &pdev->dev,
700 				   "32-bit consistent DMA enable failed\n");
701 			return rc;
702 		}
703 	}
704 
705 	return rc;
706 }
707 
708 /*
709  * Functions
710  */
711 
712 static inline void writelfl(unsigned long data, void __iomem *addr)
713 {
714 	writel(data, addr);
715 	(void) readl(addr);	/* flush to avoid PCI posted write */
716 }
717 
718 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
719 {
720 	return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
721 }
722 
723 static inline unsigned int mv_hc_from_port(unsigned int port)
724 {
725 	return port >> MV_PORT_HC_SHIFT;
726 }
727 
728 static inline unsigned int mv_hardport_from_port(unsigned int port)
729 {
730 	return port & MV_PORT_MASK;
731 }
732 
733 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
734 						 unsigned int port)
735 {
736 	return mv_hc_base(base, mv_hc_from_port(port));
737 }
738 
739 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
740 {
741 	return  mv_hc_base_from_port(base, port) +
742 		MV_SATAHC_ARBTR_REG_SZ +
743 		(mv_hardport_from_port(port) * MV_PORT_REG_SZ);
744 }
745 
746 static inline void __iomem *mv_ap_base(struct ata_port *ap)
747 {
748 	return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
749 }
750 
751 static inline int mv_get_hc_count(unsigned long port_flags)
752 {
753 	return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
754 }
755 
756 static void mv_irq_clear(struct ata_port *ap)
757 {
758 }
759 
760 static void mv_set_edma_ptrs(void __iomem *port_mmio,
761 			     struct mv_host_priv *hpriv,
762 			     struct mv_port_priv *pp)
763 {
764 	u32 index;
765 
766 	/*
767 	 * initialize request queue
768 	 */
769 	index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
770 
771 	WARN_ON(pp->crqb_dma & 0x3ff);
772 	writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
773 	writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
774 		 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
775 
776 	if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
777 		writelfl((pp->crqb_dma & 0xffffffff) | index,
778 			 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
779 	else
780 		writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
781 
782 	/*
783 	 * initialize response queue
784 	 */
785 	index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
786 
787 	WARN_ON(pp->crpb_dma & 0xff);
788 	writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
789 
790 	if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
791 		writelfl((pp->crpb_dma & 0xffffffff) | index,
792 			 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
793 	else
794 		writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
795 
796 	writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
797 		 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
798 }
799 
800 /**
801  *      mv_start_dma - Enable eDMA engine
802  *      @base: port base address
803  *      @pp: port private data
804  *
805  *      Verify the local cache of the eDMA state is accurate with a
806  *      WARN_ON.
807  *
808  *      LOCKING:
809  *      Inherited from caller.
810  */
811 static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
812 			 struct mv_port_priv *pp)
813 {
814 	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
815 		/* clear EDMA event indicators, if any */
816 		writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS);
817 
818 		mv_set_edma_ptrs(base, hpriv, pp);
819 
820 		writelfl(EDMA_EN, base + EDMA_CMD_OFS);
821 		pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
822 	}
823 	WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
824 }
825 
826 /**
827  *      __mv_stop_dma - Disable eDMA engine
828  *      @ap: ATA channel to manipulate
829  *
830  *      Verify the local cache of the eDMA state is accurate with a
831  *      WARN_ON.
832  *
833  *      LOCKING:
834  *      Inherited from caller.
835  */
836 static int __mv_stop_dma(struct ata_port *ap)
837 {
838 	void __iomem *port_mmio = mv_ap_base(ap);
839 	struct mv_port_priv *pp	= ap->private_data;
840 	u32 reg;
841 	int i, err = 0;
842 
843 	if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
844 		/* Disable EDMA if active.   The disable bit auto clears.
845 		 */
846 		writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
847 		pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
848 	} else {
849 		WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
850   	}
851 
852 	/* now properly wait for the eDMA to stop */
853 	for (i = 1000; i > 0; i--) {
854 		reg = readl(port_mmio + EDMA_CMD_OFS);
855 		if (!(reg & EDMA_EN))
856 			break;
857 
858 		udelay(100);
859 	}
860 
861 	if (reg & EDMA_EN) {
862 		ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
863 		err = -EIO;
864 	}
865 
866 	return err;
867 }
868 
869 static int mv_stop_dma(struct ata_port *ap)
870 {
871 	unsigned long flags;
872 	int rc;
873 
874 	spin_lock_irqsave(&ap->host->lock, flags);
875 	rc = __mv_stop_dma(ap);
876 	spin_unlock_irqrestore(&ap->host->lock, flags);
877 
878 	return rc;
879 }
880 
881 #ifdef ATA_DEBUG
882 static void mv_dump_mem(void __iomem *start, unsigned bytes)
883 {
884 	int b, w;
885 	for (b = 0; b < bytes; ) {
886 		DPRINTK("%p: ", start + b);
887 		for (w = 0; b < bytes && w < 4; w++) {
888 			printk("%08x ",readl(start + b));
889 			b += sizeof(u32);
890 		}
891 		printk("\n");
892 	}
893 }
894 #endif
895 
896 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
897 {
898 #ifdef ATA_DEBUG
899 	int b, w;
900 	u32 dw;
901 	for (b = 0; b < bytes; ) {
902 		DPRINTK("%02x: ", b);
903 		for (w = 0; b < bytes && w < 4; w++) {
904 			(void) pci_read_config_dword(pdev,b,&dw);
905 			printk("%08x ",dw);
906 			b += sizeof(u32);
907 		}
908 		printk("\n");
909 	}
910 #endif
911 }
912 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
913 			     struct pci_dev *pdev)
914 {
915 #ifdef ATA_DEBUG
916 	void __iomem *hc_base = mv_hc_base(mmio_base,
917 					   port >> MV_PORT_HC_SHIFT);
918 	void __iomem *port_base;
919 	int start_port, num_ports, p, start_hc, num_hcs, hc;
920 
921 	if (0 > port) {
922 		start_hc = start_port = 0;
923 		num_ports = 8;		/* shld be benign for 4 port devs */
924 		num_hcs = 2;
925 	} else {
926 		start_hc = port >> MV_PORT_HC_SHIFT;
927 		start_port = port;
928 		num_ports = num_hcs = 1;
929 	}
930 	DPRINTK("All registers for port(s) %u-%u:\n", start_port,
931 		num_ports > 1 ? num_ports - 1 : start_port);
932 
933 	if (NULL != pdev) {
934 		DPRINTK("PCI config space regs:\n");
935 		mv_dump_pci_cfg(pdev, 0x68);
936 	}
937 	DPRINTK("PCI regs:\n");
938 	mv_dump_mem(mmio_base+0xc00, 0x3c);
939 	mv_dump_mem(mmio_base+0xd00, 0x34);
940 	mv_dump_mem(mmio_base+0xf00, 0x4);
941 	mv_dump_mem(mmio_base+0x1d00, 0x6c);
942 	for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
943 		hc_base = mv_hc_base(mmio_base, hc);
944 		DPRINTK("HC regs (HC %i):\n", hc);
945 		mv_dump_mem(hc_base, 0x1c);
946 	}
947 	for (p = start_port; p < start_port + num_ports; p++) {
948 		port_base = mv_port_base(mmio_base, p);
949 		DPRINTK("EDMA regs (port %i):\n",p);
950 		mv_dump_mem(port_base, 0x54);
951 		DPRINTK("SATA regs (port %i):\n",p);
952 		mv_dump_mem(port_base+0x300, 0x60);
953 	}
954 #endif
955 }
956 
957 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
958 {
959 	unsigned int ofs;
960 
961 	switch (sc_reg_in) {
962 	case SCR_STATUS:
963 	case SCR_CONTROL:
964 	case SCR_ERROR:
965 		ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
966 		break;
967 	case SCR_ACTIVE:
968 		ofs = SATA_ACTIVE_OFS;   /* active is not with the others */
969 		break;
970 	default:
971 		ofs = 0xffffffffU;
972 		break;
973 	}
974 	return ofs;
975 }
976 
977 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
978 {
979 	unsigned int ofs = mv_scr_offset(sc_reg_in);
980 
981 	if (ofs != 0xffffffffU) {
982 		*val = readl(mv_ap_base(ap) + ofs);
983 		return 0;
984 	} else
985 		return -EINVAL;
986 }
987 
988 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
989 {
990 	unsigned int ofs = mv_scr_offset(sc_reg_in);
991 
992 	if (ofs != 0xffffffffU) {
993 		writelfl(val, mv_ap_base(ap) + ofs);
994 		return 0;
995 	} else
996 		return -EINVAL;
997 }
998 
999 static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
1000 			void __iomem *port_mmio)
1001 {
1002 	u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
1003 
1004 	/* set up non-NCQ EDMA configuration */
1005 	cfg &= ~(1 << 9);	/* disable eQue */
1006 
1007 	if (IS_GEN_I(hpriv)) {
1008 		cfg &= ~0x1f;		/* clear queue depth */
1009 		cfg |= (1 << 8);	/* enab config burst size mask */
1010 	}
1011 
1012 	else if (IS_GEN_II(hpriv)) {
1013 		cfg &= ~0x1f;		/* clear queue depth */
1014 		cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1015 		cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
1016 	}
1017 
1018 	else if (IS_GEN_IIE(hpriv)) {
1019 		cfg |= (1 << 23);	/* do not mask PM field in rx'd FIS */
1020 		cfg |= (1 << 22);	/* enab 4-entry host queue cache */
1021 		cfg &= ~(1 << 19);	/* dis 128-entry queue (for now?) */
1022 		cfg |= (1 << 18);	/* enab early completion */
1023 		cfg |= (1 << 17);	/* enab cut-through (dis stor&forwrd) */
1024 		cfg &= ~(1 << 16);	/* dis FIS-based switching (for now) */
1025 		cfg &= ~(EDMA_CFG_NCQ);	/* clear NCQ */
1026 	}
1027 
1028 	writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1029 }
1030 
1031 /**
1032  *      mv_port_start - Port specific init/start routine.
1033  *      @ap: ATA channel to manipulate
1034  *
1035  *      Allocate and point to DMA memory, init port private memory,
1036  *      zero indices.
1037  *
1038  *      LOCKING:
1039  *      Inherited from caller.
1040  */
1041 static int mv_port_start(struct ata_port *ap)
1042 {
1043 	struct device *dev = ap->host->dev;
1044 	struct mv_host_priv *hpriv = ap->host->private_data;
1045 	struct mv_port_priv *pp;
1046 	void __iomem *port_mmio = mv_ap_base(ap);
1047 	void *mem;
1048 	dma_addr_t mem_dma;
1049 	unsigned long flags;
1050 	int rc;
1051 
1052 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1053 	if (!pp)
1054 		return -ENOMEM;
1055 
1056 	mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1057 				  GFP_KERNEL);
1058 	if (!mem)
1059 		return -ENOMEM;
1060 	memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1061 
1062 	rc = ata_pad_alloc(ap, dev);
1063 	if (rc)
1064 		return rc;
1065 
1066 	/* First item in chunk of DMA memory:
1067 	 * 32-slot command request table (CRQB), 32 bytes each in size
1068 	 */
1069 	pp->crqb = mem;
1070 	pp->crqb_dma = mem_dma;
1071 	mem += MV_CRQB_Q_SZ;
1072 	mem_dma += MV_CRQB_Q_SZ;
1073 
1074 	/* Second item:
1075 	 * 32-slot command response table (CRPB), 8 bytes each in size
1076 	 */
1077 	pp->crpb = mem;
1078 	pp->crpb_dma = mem_dma;
1079 	mem += MV_CRPB_Q_SZ;
1080 	mem_dma += MV_CRPB_Q_SZ;
1081 
1082 	/* Third item:
1083 	 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1084 	 */
1085 	pp->sg_tbl = mem;
1086 	pp->sg_tbl_dma = mem_dma;
1087 
1088 	spin_lock_irqsave(&ap->host->lock, flags);
1089 
1090 	mv_edma_cfg(ap, hpriv, port_mmio);
1091 
1092 	mv_set_edma_ptrs(port_mmio, hpriv, pp);
1093 
1094 	spin_unlock_irqrestore(&ap->host->lock, flags);
1095 
1096 	/* Don't turn on EDMA here...do it before DMA commands only.  Else
1097 	 * we'll be unable to send non-data, PIO, etc due to restricted access
1098 	 * to shadow regs.
1099 	 */
1100 	ap->private_data = pp;
1101 	return 0;
1102 }
1103 
1104 /**
1105  *      mv_port_stop - Port specific cleanup/stop routine.
1106  *      @ap: ATA channel to manipulate
1107  *
1108  *      Stop DMA, cleanup port memory.
1109  *
1110  *      LOCKING:
1111  *      This routine uses the host lock to protect the DMA stop.
1112  */
1113 static void mv_port_stop(struct ata_port *ap)
1114 {
1115 	mv_stop_dma(ap);
1116 }
1117 
1118 /**
1119  *      mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1120  *      @qc: queued command whose SG list to source from
1121  *
1122  *      Populate the SG list and mark the last entry.
1123  *
1124  *      LOCKING:
1125  *      Inherited from caller.
1126  */
1127 static unsigned int mv_fill_sg(struct ata_queued_cmd *qc)
1128 {
1129 	struct mv_port_priv *pp = qc->ap->private_data;
1130 	unsigned int n_sg = 0;
1131 	struct scatterlist *sg;
1132 	struct mv_sg *mv_sg;
1133 
1134 	mv_sg = pp->sg_tbl;
1135 	ata_for_each_sg(sg, qc) {
1136 		dma_addr_t addr = sg_dma_address(sg);
1137 		u32 sg_len = sg_dma_len(sg);
1138 
1139 		mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1140 		mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1141 		mv_sg->flags_size = cpu_to_le32(sg_len & 0xffff);
1142 
1143 		if (ata_sg_is_last(sg, qc))
1144 			mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1145 
1146 		mv_sg++;
1147 		n_sg++;
1148 	}
1149 
1150 	return n_sg;
1151 }
1152 
1153 static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1154 {
1155 	u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1156 		(last ? CRQB_CMD_LAST : 0);
1157 	*cmdw = cpu_to_le16(tmp);
1158 }
1159 
1160 /**
1161  *      mv_qc_prep - Host specific command preparation.
1162  *      @qc: queued command to prepare
1163  *
1164  *      This routine simply redirects to the general purpose routine
1165  *      if command is not DMA.  Else, it handles prep of the CRQB
1166  *      (command request block), does some sanity checking, and calls
1167  *      the SG load routine.
1168  *
1169  *      LOCKING:
1170  *      Inherited from caller.
1171  */
1172 static void mv_qc_prep(struct ata_queued_cmd *qc)
1173 {
1174 	struct ata_port *ap = qc->ap;
1175 	struct mv_port_priv *pp = ap->private_data;
1176 	__le16 *cw;
1177 	struct ata_taskfile *tf;
1178 	u16 flags = 0;
1179 	unsigned in_index;
1180 
1181  	if (qc->tf.protocol != ATA_PROT_DMA)
1182 		return;
1183 
1184 	/* Fill in command request block
1185 	 */
1186 	if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1187 		flags |= CRQB_FLAG_READ;
1188 	WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1189 	flags |= qc->tag << CRQB_TAG_SHIFT;
1190 	flags |= qc->tag << CRQB_IOID_SHIFT;	/* 50xx appears to ignore this*/
1191 
1192 	/* get current queue index from software */
1193 	in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1194 
1195 	pp->crqb[in_index].sg_addr =
1196 		cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1197 	pp->crqb[in_index].sg_addr_hi =
1198 		cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1199 	pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1200 
1201 	cw = &pp->crqb[in_index].ata_cmd[0];
1202 	tf = &qc->tf;
1203 
1204 	/* Sadly, the CRQB cannot accomodate all registers--there are
1205 	 * only 11 bytes...so we must pick and choose required
1206 	 * registers based on the command.  So, we drop feature and
1207 	 * hob_feature for [RW] DMA commands, but they are needed for
1208 	 * NCQ.  NCQ will drop hob_nsect.
1209 	 */
1210 	switch (tf->command) {
1211 	case ATA_CMD_READ:
1212 	case ATA_CMD_READ_EXT:
1213 	case ATA_CMD_WRITE:
1214 	case ATA_CMD_WRITE_EXT:
1215 	case ATA_CMD_WRITE_FUA_EXT:
1216 		mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1217 		break;
1218 #ifdef LIBATA_NCQ		/* FIXME: remove this line when NCQ added */
1219 	case ATA_CMD_FPDMA_READ:
1220 	case ATA_CMD_FPDMA_WRITE:
1221 		mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1222 		mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1223 		break;
1224 #endif				/* FIXME: remove this line when NCQ added */
1225 	default:
1226 		/* The only other commands EDMA supports in non-queued and
1227 		 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1228 		 * of which are defined/used by Linux.  If we get here, this
1229 		 * driver needs work.
1230 		 *
1231 		 * FIXME: modify libata to give qc_prep a return value and
1232 		 * return error here.
1233 		 */
1234 		BUG_ON(tf->command);
1235 		break;
1236 	}
1237 	mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1238 	mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1239 	mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1240 	mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1241 	mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1242 	mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1243 	mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1244 	mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1245 	mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1);	/* last */
1246 
1247 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1248 		return;
1249 	mv_fill_sg(qc);
1250 }
1251 
1252 /**
1253  *      mv_qc_prep_iie - Host specific command preparation.
1254  *      @qc: queued command to prepare
1255  *
1256  *      This routine simply redirects to the general purpose routine
1257  *      if command is not DMA.  Else, it handles prep of the CRQB
1258  *      (command request block), does some sanity checking, and calls
1259  *      the SG load routine.
1260  *
1261  *      LOCKING:
1262  *      Inherited from caller.
1263  */
1264 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1265 {
1266 	struct ata_port *ap = qc->ap;
1267 	struct mv_port_priv *pp = ap->private_data;
1268 	struct mv_crqb_iie *crqb;
1269 	struct ata_taskfile *tf;
1270 	unsigned in_index;
1271 	u32 flags = 0;
1272 
1273  	if (qc->tf.protocol != ATA_PROT_DMA)
1274 		return;
1275 
1276 	/* Fill in Gen IIE command request block
1277 	 */
1278 	if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1279 		flags |= CRQB_FLAG_READ;
1280 
1281 	WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1282 	flags |= qc->tag << CRQB_TAG_SHIFT;
1283 	flags |= qc->tag << CRQB_IOID_SHIFT;	/* "I/O Id" is -really-
1284 						   what we use as our tag */
1285 
1286 	/* get current queue index from software */
1287 	in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1288 
1289 	crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1290 	crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1291 	crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1292 	crqb->flags = cpu_to_le32(flags);
1293 
1294 	tf = &qc->tf;
1295 	crqb->ata_cmd[0] = cpu_to_le32(
1296 			(tf->command << 16) |
1297 			(tf->feature << 24)
1298 		);
1299 	crqb->ata_cmd[1] = cpu_to_le32(
1300 			(tf->lbal << 0) |
1301 			(tf->lbam << 8) |
1302 			(tf->lbah << 16) |
1303 			(tf->device << 24)
1304 		);
1305 	crqb->ata_cmd[2] = cpu_to_le32(
1306 			(tf->hob_lbal << 0) |
1307 			(tf->hob_lbam << 8) |
1308 			(tf->hob_lbah << 16) |
1309 			(tf->hob_feature << 24)
1310 		);
1311 	crqb->ata_cmd[3] = cpu_to_le32(
1312 			(tf->nsect << 0) |
1313 			(tf->hob_nsect << 8)
1314 		);
1315 
1316 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1317 		return;
1318 	mv_fill_sg(qc);
1319 }
1320 
1321 /**
1322  *      mv_qc_issue - Initiate a command to the host
1323  *      @qc: queued command to start
1324  *
1325  *      This routine simply redirects to the general purpose routine
1326  *      if command is not DMA.  Else, it sanity checks our local
1327  *      caches of the request producer/consumer indices then enables
1328  *      DMA and bumps the request producer index.
1329  *
1330  *      LOCKING:
1331  *      Inherited from caller.
1332  */
1333 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1334 {
1335 	struct ata_port *ap = qc->ap;
1336 	void __iomem *port_mmio = mv_ap_base(ap);
1337 	struct mv_port_priv *pp = ap->private_data;
1338 	struct mv_host_priv *hpriv = ap->host->private_data;
1339 	u32 in_index;
1340 
1341 	if (qc->tf.protocol != ATA_PROT_DMA) {
1342 		/* We're about to send a non-EDMA capable command to the
1343 		 * port.  Turn off EDMA so there won't be problems accessing
1344 		 * shadow block, etc registers.
1345 		 */
1346 		__mv_stop_dma(ap);
1347 		return ata_qc_issue_prot(qc);
1348 	}
1349 
1350 	mv_start_dma(port_mmio, hpriv, pp);
1351 
1352 	in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1353 
1354 	/* until we do queuing, the queue should be empty at this point */
1355 	WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1356 		>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1357 
1358 	pp->req_idx++;
1359 
1360 	in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1361 
1362 	/* and write the request in pointer to kick the EDMA to life */
1363 	writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1364 		 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1365 
1366 	return 0;
1367 }
1368 
1369 /**
1370  *      mv_err_intr - Handle error interrupts on the port
1371  *      @ap: ATA channel to manipulate
1372  *      @reset_allowed: bool: 0 == don't trigger from reset here
1373  *
1374  *      In most cases, just clear the interrupt and move on.  However,
1375  *      some cases require an eDMA reset, which is done right before
1376  *      the COMRESET in mv_phy_reset().  The SERR case requires a
1377  *      clear of pending errors in the SATA SERROR register.  Finally,
1378  *      if the port disabled DMA, update our cached copy to match.
1379  *
1380  *      LOCKING:
1381  *      Inherited from caller.
1382  */
1383 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1384 {
1385 	void __iomem *port_mmio = mv_ap_base(ap);
1386 	u32 edma_err_cause, eh_freeze_mask, serr = 0;
1387 	struct mv_port_priv *pp = ap->private_data;
1388 	struct mv_host_priv *hpriv = ap->host->private_data;
1389 	unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1390 	unsigned int action = 0, err_mask = 0;
1391 	struct ata_eh_info *ehi = &ap->eh_info;
1392 
1393 	ata_ehi_clear_desc(ehi);
1394 
1395 	if (!edma_enabled) {
1396 		/* just a guess: do we need to do this? should we
1397 		 * expand this, and do it in all cases?
1398 		 */
1399 		sata_scr_read(ap, SCR_ERROR, &serr);
1400 		sata_scr_write_flush(ap, SCR_ERROR, serr);
1401 	}
1402 
1403 	edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1404 
1405 	ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1406 
1407 	/*
1408 	 * all generations share these EDMA error cause bits
1409 	 */
1410 
1411 	if (edma_err_cause & EDMA_ERR_DEV)
1412 		err_mask |= AC_ERR_DEV;
1413 	if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1414 			EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1415 			EDMA_ERR_INTRL_PAR)) {
1416 		err_mask |= AC_ERR_ATA_BUS;
1417 		action |= ATA_EH_HARDRESET;
1418 		ata_ehi_push_desc(ehi, "parity error");
1419 	}
1420 	if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1421 		ata_ehi_hotplugged(ehi);
1422 		ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1423 			"dev disconnect" : "dev connect");
1424 	}
1425 
1426 	if (IS_GEN_I(hpriv)) {
1427 		eh_freeze_mask = EDMA_EH_FREEZE_5;
1428 
1429 		if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1430 			struct mv_port_priv *pp	= ap->private_data;
1431 			pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1432 			ata_ehi_push_desc(ehi, "EDMA self-disable");
1433 		}
1434 	} else {
1435 		eh_freeze_mask = EDMA_EH_FREEZE;
1436 
1437 		if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1438 			struct mv_port_priv *pp	= ap->private_data;
1439 			pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1440 			ata_ehi_push_desc(ehi, "EDMA self-disable");
1441 		}
1442 
1443 		if (edma_err_cause & EDMA_ERR_SERR) {
1444 			sata_scr_read(ap, SCR_ERROR, &serr);
1445 			sata_scr_write_flush(ap, SCR_ERROR, serr);
1446 			err_mask = AC_ERR_ATA_BUS;
1447 			action |= ATA_EH_HARDRESET;
1448 		}
1449 	}
1450 
1451 	/* Clear EDMA now that SERR cleanup done */
1452 	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1453 
1454 	if (!err_mask) {
1455 		err_mask = AC_ERR_OTHER;
1456 		action |= ATA_EH_HARDRESET;
1457 	}
1458 
1459 	ehi->serror |= serr;
1460 	ehi->action |= action;
1461 
1462 	if (qc)
1463 		qc->err_mask |= err_mask;
1464 	else
1465 		ehi->err_mask |= err_mask;
1466 
1467 	if (edma_err_cause & eh_freeze_mask)
1468 		ata_port_freeze(ap);
1469 	else
1470 		ata_port_abort(ap);
1471 }
1472 
1473 static void mv_intr_pio(struct ata_port *ap)
1474 {
1475 	struct ata_queued_cmd *qc;
1476 	u8 ata_status;
1477 
1478 	/* ignore spurious intr if drive still BUSY */
1479 	ata_status = readb(ap->ioaddr.status_addr);
1480 	if (unlikely(ata_status & ATA_BUSY))
1481 		return;
1482 
1483 	/* get active ATA command */
1484 	qc = ata_qc_from_tag(ap, ap->active_tag);
1485 	if (unlikely(!qc))			/* no active tag */
1486 		return;
1487 	if (qc->tf.flags & ATA_TFLAG_POLLING)	/* polling; we don't own qc */
1488 		return;
1489 
1490 	/* and finally, complete the ATA command */
1491 	qc->err_mask |= ac_err_mask(ata_status);
1492 	ata_qc_complete(qc);
1493 }
1494 
1495 static void mv_intr_edma(struct ata_port *ap)
1496 {
1497 	void __iomem *port_mmio = mv_ap_base(ap);
1498 	struct mv_host_priv *hpriv = ap->host->private_data;
1499 	struct mv_port_priv *pp = ap->private_data;
1500 	struct ata_queued_cmd *qc;
1501 	u32 out_index, in_index;
1502 	bool work_done = false;
1503 
1504 	/* get h/w response queue pointer */
1505 	in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1506 			>> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1507 
1508 	while (1) {
1509 		u16 status;
1510 		unsigned int tag;
1511 
1512 		/* get s/w response queue last-read pointer, and compare */
1513 		out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1514 		if (in_index == out_index)
1515 			break;
1516 
1517 		/* 50xx: get active ATA command */
1518 		if (IS_GEN_I(hpriv))
1519 			tag = ap->active_tag;
1520 
1521 		/* Gen II/IIE: get active ATA command via tag, to enable
1522 		 * support for queueing.  this works transparently for
1523 		 * queued and non-queued modes.
1524 		 */
1525 		else if (IS_GEN_II(hpriv))
1526 			tag = (le16_to_cpu(pp->crpb[out_index].id)
1527 				>> CRPB_IOID_SHIFT_6) & 0x3f;
1528 
1529 		else /* IS_GEN_IIE */
1530 			tag = (le16_to_cpu(pp->crpb[out_index].id)
1531 				>> CRPB_IOID_SHIFT_7) & 0x3f;
1532 
1533 		qc = ata_qc_from_tag(ap, tag);
1534 
1535 		/* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1536 		 * bits (WARNING: might not necessarily be associated
1537 		 * with this command), which -should- be clear
1538 		 * if all is well
1539 		 */
1540 		status = le16_to_cpu(pp->crpb[out_index].flags);
1541 		if (unlikely(status & 0xff)) {
1542 			mv_err_intr(ap, qc);
1543 			return;
1544 		}
1545 
1546 		/* and finally, complete the ATA command */
1547 		if (qc) {
1548 			qc->err_mask |=
1549 				ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1550 			ata_qc_complete(qc);
1551 		}
1552 
1553 		/* advance software response queue pointer, to
1554 		 * indicate (after the loop completes) to hardware
1555 		 * that we have consumed a response queue entry.
1556 		 */
1557 		work_done = true;
1558 		pp->resp_idx++;
1559 	}
1560 
1561 	if (work_done)
1562 		writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1563 			 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1564 			 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1565 }
1566 
1567 /**
1568  *      mv_host_intr - Handle all interrupts on the given host controller
1569  *      @host: host specific structure
1570  *      @relevant: port error bits relevant to this host controller
1571  *      @hc: which host controller we're to look at
1572  *
1573  *      Read then write clear the HC interrupt status then walk each
1574  *      port connected to the HC and see if it needs servicing.  Port
1575  *      success ints are reported in the HC interrupt status reg, the
1576  *      port error ints are reported in the higher level main
1577  *      interrupt status register and thus are passed in via the
1578  *      'relevant' argument.
1579  *
1580  *      LOCKING:
1581  *      Inherited from caller.
1582  */
1583 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1584 {
1585 	void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1586 	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1587 	u32 hc_irq_cause;
1588 	int port, port0;
1589 
1590 	if (hc == 0)
1591 		port0 = 0;
1592 	else
1593 		port0 = MV_PORTS_PER_HC;
1594 
1595 	/* we'll need the HC success int register in most cases */
1596 	hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1597 	if (!hc_irq_cause)
1598 		return;
1599 
1600 	writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1601 
1602 	VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1603 		hc,relevant,hc_irq_cause);
1604 
1605 	for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1606 		struct ata_port *ap = host->ports[port];
1607 		struct mv_port_priv *pp = ap->private_data;
1608 		int have_err_bits, hard_port, shift;
1609 
1610 		if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1611 			continue;
1612 
1613 		shift = port << 1;		/* (port * 2) */
1614 		if (port >= MV_PORTS_PER_HC) {
1615 			shift++;	/* skip bit 8 in the HC Main IRQ reg */
1616 		}
1617 		have_err_bits = ((PORT0_ERR << shift) & relevant);
1618 
1619 		if (unlikely(have_err_bits)) {
1620 			struct ata_queued_cmd *qc;
1621 
1622 			qc = ata_qc_from_tag(ap, ap->active_tag);
1623 			if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1624 				continue;
1625 
1626 			mv_err_intr(ap, qc);
1627 			continue;
1628 		}
1629 
1630 		hard_port = mv_hardport_from_port(port); /* range 0..3 */
1631 
1632 		if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1633 			if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1634 				mv_intr_edma(ap);
1635 		} else {
1636 			if ((DEV_IRQ << hard_port) & hc_irq_cause)
1637 				mv_intr_pio(ap);
1638 		}
1639 	}
1640 	VPRINTK("EXIT\n");
1641 }
1642 
1643 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1644 {
1645 	struct ata_port *ap;
1646 	struct ata_queued_cmd *qc;
1647 	struct ata_eh_info *ehi;
1648 	unsigned int i, err_mask, printed = 0;
1649 	u32 err_cause;
1650 
1651 	err_cause = readl(mmio + PCI_IRQ_CAUSE_OFS);
1652 
1653 	dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1654 		   err_cause);
1655 
1656 	DPRINTK("All regs @ PCI error\n");
1657 	mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1658 
1659 	writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1660 
1661 	for (i = 0; i < host->n_ports; i++) {
1662 		ap = host->ports[i];
1663 		if (!ata_port_offline(ap)) {
1664 			ehi = &ap->eh_info;
1665 			ata_ehi_clear_desc(ehi);
1666 			if (!printed++)
1667 				ata_ehi_push_desc(ehi,
1668 					"PCI err cause 0x%08x", err_cause);
1669 			err_mask = AC_ERR_HOST_BUS;
1670 			ehi->action = ATA_EH_HARDRESET;
1671 			qc = ata_qc_from_tag(ap, ap->active_tag);
1672 			if (qc)
1673 				qc->err_mask |= err_mask;
1674 			else
1675 				ehi->err_mask |= err_mask;
1676 
1677 			ata_port_freeze(ap);
1678 		}
1679 	}
1680 }
1681 
1682 /**
1683  *      mv_interrupt - Main interrupt event handler
1684  *      @irq: unused
1685  *      @dev_instance: private data; in this case the host structure
1686  *
1687  *      Read the read only register to determine if any host
1688  *      controllers have pending interrupts.  If so, call lower level
1689  *      routine to handle.  Also check for PCI errors which are only
1690  *      reported here.
1691  *
1692  *      LOCKING:
1693  *      This routine holds the host lock while processing pending
1694  *      interrupts.
1695  */
1696 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1697 {
1698 	struct ata_host *host = dev_instance;
1699 	unsigned int hc, handled = 0, n_hcs;
1700 	void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1701 	u32 irq_stat;
1702 
1703 	irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1704 
1705 	/* check the cases where we either have nothing pending or have read
1706 	 * a bogus register value which can indicate HW removal or PCI fault
1707 	 */
1708 	if (!irq_stat || (0xffffffffU == irq_stat))
1709 		return IRQ_NONE;
1710 
1711 	n_hcs = mv_get_hc_count(host->ports[0]->flags);
1712 	spin_lock(&host->lock);
1713 
1714 	if (unlikely(irq_stat & PCI_ERR)) {
1715 		mv_pci_error(host, mmio);
1716 		handled = 1;
1717 		goto out_unlock;	/* skip all other HC irq handling */
1718 	}
1719 
1720 	for (hc = 0; hc < n_hcs; hc++) {
1721 		u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1722 		if (relevant) {
1723 			mv_host_intr(host, relevant, hc);
1724 			handled = 1;
1725 		}
1726 	}
1727 
1728 out_unlock:
1729 	spin_unlock(&host->lock);
1730 
1731 	return IRQ_RETVAL(handled);
1732 }
1733 
1734 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1735 {
1736 	void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1737 	unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1738 
1739 	return hc_mmio + ofs;
1740 }
1741 
1742 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1743 {
1744 	unsigned int ofs;
1745 
1746 	switch (sc_reg_in) {
1747 	case SCR_STATUS:
1748 	case SCR_ERROR:
1749 	case SCR_CONTROL:
1750 		ofs = sc_reg_in * sizeof(u32);
1751 		break;
1752 	default:
1753 		ofs = 0xffffffffU;
1754 		break;
1755 	}
1756 	return ofs;
1757 }
1758 
1759 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1760 {
1761 	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1762 	void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1763 	unsigned int ofs = mv5_scr_offset(sc_reg_in);
1764 
1765 	if (ofs != 0xffffffffU) {
1766 		*val = readl(addr + ofs);
1767 		return 0;
1768 	} else
1769 		return -EINVAL;
1770 }
1771 
1772 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1773 {
1774 	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1775 	void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1776 	unsigned int ofs = mv5_scr_offset(sc_reg_in);
1777 
1778 	if (ofs != 0xffffffffU) {
1779 		writelfl(val, addr + ofs);
1780 		return 0;
1781 	} else
1782 		return -EINVAL;
1783 }
1784 
1785 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1786 {
1787 	int early_5080;
1788 
1789 	early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1790 
1791 	if (!early_5080) {
1792 		u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1793 		tmp |= (1 << 0);
1794 		writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1795 	}
1796 
1797 	mv_reset_pci_bus(pdev, mmio);
1798 }
1799 
1800 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1801 {
1802 	writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1803 }
1804 
1805 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1806 			   void __iomem *mmio)
1807 {
1808 	void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1809 	u32 tmp;
1810 
1811 	tmp = readl(phy_mmio + MV5_PHY_MODE);
1812 
1813 	hpriv->signal[idx].pre = tmp & 0x1800;	/* bits 12:11 */
1814 	hpriv->signal[idx].amps = tmp & 0xe0;	/* bits 7:5 */
1815 }
1816 
1817 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1818 {
1819 	u32 tmp;
1820 
1821 	writel(0, mmio + MV_GPIO_PORT_CTL);
1822 
1823 	/* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1824 
1825 	tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1826 	tmp |= ~(1 << 0);
1827 	writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1828 }
1829 
1830 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1831 			   unsigned int port)
1832 {
1833 	void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1834 	const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1835 	u32 tmp;
1836 	int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1837 
1838 	if (fix_apm_sq) {
1839 		tmp = readl(phy_mmio + MV5_LT_MODE);
1840 		tmp |= (1 << 19);
1841 		writel(tmp, phy_mmio + MV5_LT_MODE);
1842 
1843 		tmp = readl(phy_mmio + MV5_PHY_CTL);
1844 		tmp &= ~0x3;
1845 		tmp |= 0x1;
1846 		writel(tmp, phy_mmio + MV5_PHY_CTL);
1847 	}
1848 
1849 	tmp = readl(phy_mmio + MV5_PHY_MODE);
1850 	tmp &= ~mask;
1851 	tmp |= hpriv->signal[port].pre;
1852 	tmp |= hpriv->signal[port].amps;
1853 	writel(tmp, phy_mmio + MV5_PHY_MODE);
1854 }
1855 
1856 
1857 #undef ZERO
1858 #define ZERO(reg) writel(0, port_mmio + (reg))
1859 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1860 			     unsigned int port)
1861 {
1862 	void __iomem *port_mmio = mv_port_base(mmio, port);
1863 
1864 	writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1865 
1866 	mv_channel_reset(hpriv, mmio, port);
1867 
1868 	ZERO(0x028);	/* command */
1869 	writel(0x11f, port_mmio + EDMA_CFG_OFS);
1870 	ZERO(0x004);	/* timer */
1871 	ZERO(0x008);	/* irq err cause */
1872 	ZERO(0x00c);	/* irq err mask */
1873 	ZERO(0x010);	/* rq bah */
1874 	ZERO(0x014);	/* rq inp */
1875 	ZERO(0x018);	/* rq outp */
1876 	ZERO(0x01c);	/* respq bah */
1877 	ZERO(0x024);	/* respq outp */
1878 	ZERO(0x020);	/* respq inp */
1879 	ZERO(0x02c);	/* test control */
1880 	writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1881 }
1882 #undef ZERO
1883 
1884 #define ZERO(reg) writel(0, hc_mmio + (reg))
1885 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1886 			unsigned int hc)
1887 {
1888 	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1889 	u32 tmp;
1890 
1891 	ZERO(0x00c);
1892 	ZERO(0x010);
1893 	ZERO(0x014);
1894 	ZERO(0x018);
1895 
1896 	tmp = readl(hc_mmio + 0x20);
1897 	tmp &= 0x1c1c1c1c;
1898 	tmp |= 0x03030303;
1899 	writel(tmp, hc_mmio + 0x20);
1900 }
1901 #undef ZERO
1902 
1903 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1904 			unsigned int n_hc)
1905 {
1906 	unsigned int hc, port;
1907 
1908 	for (hc = 0; hc < n_hc; hc++) {
1909 		for (port = 0; port < MV_PORTS_PER_HC; port++)
1910 			mv5_reset_hc_port(hpriv, mmio,
1911 					  (hc * MV_PORTS_PER_HC) + port);
1912 
1913 		mv5_reset_one_hc(hpriv, mmio, hc);
1914 	}
1915 
1916 	return 0;
1917 }
1918 
1919 #undef ZERO
1920 #define ZERO(reg) writel(0, mmio + (reg))
1921 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1922 {
1923 	u32 tmp;
1924 
1925 	tmp = readl(mmio + MV_PCI_MODE);
1926 	tmp &= 0xff00ffff;
1927 	writel(tmp, mmio + MV_PCI_MODE);
1928 
1929 	ZERO(MV_PCI_DISC_TIMER);
1930 	ZERO(MV_PCI_MSI_TRIGGER);
1931 	writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1932 	ZERO(HC_MAIN_IRQ_MASK_OFS);
1933 	ZERO(MV_PCI_SERR_MASK);
1934 	ZERO(PCI_IRQ_CAUSE_OFS);
1935 	ZERO(PCI_IRQ_MASK_OFS);
1936 	ZERO(MV_PCI_ERR_LOW_ADDRESS);
1937 	ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1938 	ZERO(MV_PCI_ERR_ATTRIBUTE);
1939 	ZERO(MV_PCI_ERR_COMMAND);
1940 }
1941 #undef ZERO
1942 
1943 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1944 {
1945 	u32 tmp;
1946 
1947 	mv5_reset_flash(hpriv, mmio);
1948 
1949 	tmp = readl(mmio + MV_GPIO_PORT_CTL);
1950 	tmp &= 0x3;
1951 	tmp |= (1 << 5) | (1 << 6);
1952 	writel(tmp, mmio + MV_GPIO_PORT_CTL);
1953 }
1954 
1955 /**
1956  *      mv6_reset_hc - Perform the 6xxx global soft reset
1957  *      @mmio: base address of the HBA
1958  *
1959  *      This routine only applies to 6xxx parts.
1960  *
1961  *      LOCKING:
1962  *      Inherited from caller.
1963  */
1964 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1965 			unsigned int n_hc)
1966 {
1967 	void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1968 	int i, rc = 0;
1969 	u32 t;
1970 
1971 	/* Following procedure defined in PCI "main command and status
1972 	 * register" table.
1973 	 */
1974 	t = readl(reg);
1975 	writel(t | STOP_PCI_MASTER, reg);
1976 
1977 	for (i = 0; i < 1000; i++) {
1978 		udelay(1);
1979 		t = readl(reg);
1980 		if (PCI_MASTER_EMPTY & t) {
1981 			break;
1982 		}
1983 	}
1984 	if (!(PCI_MASTER_EMPTY & t)) {
1985 		printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
1986 		rc = 1;
1987 		goto done;
1988 	}
1989 
1990 	/* set reset */
1991 	i = 5;
1992 	do {
1993 		writel(t | GLOB_SFT_RST, reg);
1994 		t = readl(reg);
1995 		udelay(1);
1996 	} while (!(GLOB_SFT_RST & t) && (i-- > 0));
1997 
1998 	if (!(GLOB_SFT_RST & t)) {
1999 		printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2000 		rc = 1;
2001 		goto done;
2002 	}
2003 
2004 	/* clear reset and *reenable the PCI master* (not mentioned in spec) */
2005 	i = 5;
2006 	do {
2007 		writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2008 		t = readl(reg);
2009 		udelay(1);
2010 	} while ((GLOB_SFT_RST & t) && (i-- > 0));
2011 
2012 	if (GLOB_SFT_RST & t) {
2013 		printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2014 		rc = 1;
2015 	}
2016 done:
2017 	return rc;
2018 }
2019 
2020 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2021 			   void __iomem *mmio)
2022 {
2023 	void __iomem *port_mmio;
2024 	u32 tmp;
2025 
2026 	tmp = readl(mmio + MV_RESET_CFG);
2027 	if ((tmp & (1 << 0)) == 0) {
2028 		hpriv->signal[idx].amps = 0x7 << 8;
2029 		hpriv->signal[idx].pre = 0x1 << 5;
2030 		return;
2031 	}
2032 
2033 	port_mmio = mv_port_base(mmio, idx);
2034 	tmp = readl(port_mmio + PHY_MODE2);
2035 
2036 	hpriv->signal[idx].amps = tmp & 0x700;	/* bits 10:8 */
2037 	hpriv->signal[idx].pre = tmp & 0xe0;	/* bits 7:5 */
2038 }
2039 
2040 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2041 {
2042 	writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2043 }
2044 
2045 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2046 			   unsigned int port)
2047 {
2048 	void __iomem *port_mmio = mv_port_base(mmio, port);
2049 
2050 	u32 hp_flags = hpriv->hp_flags;
2051 	int fix_phy_mode2 =
2052 		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2053 	int fix_phy_mode4 =
2054 		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2055 	u32 m2, tmp;
2056 
2057 	if (fix_phy_mode2) {
2058 		m2 = readl(port_mmio + PHY_MODE2);
2059 		m2 &= ~(1 << 16);
2060 		m2 |= (1 << 31);
2061 		writel(m2, port_mmio + PHY_MODE2);
2062 
2063 		udelay(200);
2064 
2065 		m2 = readl(port_mmio + PHY_MODE2);
2066 		m2 &= ~((1 << 16) | (1 << 31));
2067 		writel(m2, port_mmio + PHY_MODE2);
2068 
2069 		udelay(200);
2070 	}
2071 
2072 	/* who knows what this magic does */
2073 	tmp = readl(port_mmio + PHY_MODE3);
2074 	tmp &= ~0x7F800000;
2075 	tmp |= 0x2A800000;
2076 	writel(tmp, port_mmio + PHY_MODE3);
2077 
2078 	if (fix_phy_mode4) {
2079 		u32 m4;
2080 
2081 		m4 = readl(port_mmio + PHY_MODE4);
2082 
2083 		if (hp_flags & MV_HP_ERRATA_60X1B2)
2084 			tmp = readl(port_mmio + 0x310);
2085 
2086 		m4 = (m4 & ~(1 << 1)) | (1 << 0);
2087 
2088 		writel(m4, port_mmio + PHY_MODE4);
2089 
2090 		if (hp_flags & MV_HP_ERRATA_60X1B2)
2091 			writel(tmp, port_mmio + 0x310);
2092 	}
2093 
2094 	/* Revert values of pre-emphasis and signal amps to the saved ones */
2095 	m2 = readl(port_mmio + PHY_MODE2);
2096 
2097 	m2 &= ~MV_M2_PREAMP_MASK;
2098 	m2 |= hpriv->signal[port].amps;
2099 	m2 |= hpriv->signal[port].pre;
2100 	m2 &= ~(1 << 16);
2101 
2102 	/* according to mvSata 3.6.1, some IIE values are fixed */
2103 	if (IS_GEN_IIE(hpriv)) {
2104 		m2 &= ~0xC30FF01F;
2105 		m2 |= 0x0000900F;
2106 	}
2107 
2108 	writel(m2, port_mmio + PHY_MODE2);
2109 }
2110 
2111 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2112 			     unsigned int port_no)
2113 {
2114 	void __iomem *port_mmio = mv_port_base(mmio, port_no);
2115 
2116 	writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2117 
2118 	if (IS_GEN_II(hpriv)) {
2119 		u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2120 		ifctl |= (1 << 7);		/* enable gen2i speed */
2121 		ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2122 		writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2123 	}
2124 
2125 	udelay(25);		/* allow reset propagation */
2126 
2127 	/* Spec never mentions clearing the bit.  Marvell's driver does
2128 	 * clear the bit, however.
2129 	 */
2130 	writelfl(0, port_mmio + EDMA_CMD_OFS);
2131 
2132 	hpriv->ops->phy_errata(hpriv, mmio, port_no);
2133 
2134 	if (IS_GEN_I(hpriv))
2135 		mdelay(1);
2136 }
2137 
2138 /**
2139  *      mv_phy_reset - Perform eDMA reset followed by COMRESET
2140  *      @ap: ATA channel to manipulate
2141  *
2142  *      Part of this is taken from __sata_phy_reset and modified to
2143  *      not sleep since this routine gets called from interrupt level.
2144  *
2145  *      LOCKING:
2146  *      Inherited from caller.  This is coded to safe to call at
2147  *      interrupt level, i.e. it does not sleep.
2148  */
2149 static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2150 			 unsigned long deadline)
2151 {
2152 	struct mv_port_priv *pp	= ap->private_data;
2153 	struct mv_host_priv *hpriv = ap->host->private_data;
2154 	void __iomem *port_mmio = mv_ap_base(ap);
2155 	int retry = 5;
2156 	u32 sstatus;
2157 
2158 	VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2159 
2160 #ifdef DEBUG
2161 	{
2162 		u32 sstatus, serror, scontrol;
2163 
2164 		mv_scr_read(ap, SCR_STATUS, &sstatus);
2165 		mv_scr_read(ap, SCR_ERROR, &serror);
2166 		mv_scr_read(ap, SCR_CONTROL, &scontrol);
2167 		DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2168 			"SCtrl 0x%08x\n", status, serror, scontrol);
2169 	}
2170 #endif
2171 
2172 	/* Issue COMRESET via SControl */
2173 comreset_retry:
2174 	sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
2175 	msleep(1);
2176 
2177 	sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
2178 	msleep(20);
2179 
2180 	do {
2181 		sata_scr_read(ap, SCR_STATUS, &sstatus);
2182 		if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2183 			break;
2184 
2185 		msleep(1);
2186 	} while (time_before(jiffies, deadline));
2187 
2188 	/* work around errata */
2189 	if (IS_GEN_II(hpriv) &&
2190 	    (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2191 	    (retry-- > 0))
2192 		goto comreset_retry;
2193 
2194 #ifdef DEBUG
2195 	{
2196 		u32 sstatus, serror, scontrol;
2197 
2198 		mv_scr_read(ap, SCR_STATUS, &sstatus);
2199 		mv_scr_read(ap, SCR_ERROR, &serror);
2200 		mv_scr_read(ap, SCR_CONTROL, &scontrol);
2201 		DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2202 			"SCtrl 0x%08x\n", sstatus, serror, scontrol);
2203 	}
2204 #endif
2205 
2206 	if (ata_port_offline(ap)) {
2207 		*class = ATA_DEV_NONE;
2208 		return;
2209 	}
2210 
2211 	/* even after SStatus reflects that device is ready,
2212 	 * it seems to take a while for link to be fully
2213 	 * established (and thus Status no longer 0x80/0x7F),
2214 	 * so we poll a bit for that, here.
2215 	 */
2216 	retry = 20;
2217 	while (1) {
2218 		u8 drv_stat = ata_check_status(ap);
2219 		if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2220 			break;
2221 		msleep(500);
2222 		if (retry-- <= 0)
2223 			break;
2224 		if (time_after(jiffies, deadline))
2225 			break;
2226 	}
2227 
2228 	/* FIXME: if we passed the deadline, the following
2229 	 * code probably produces an invalid result
2230 	 */
2231 
2232 	/* finally, read device signature from TF registers */
2233 	*class = ata_dev_try_classify(ap, 0, NULL);
2234 
2235 	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2236 
2237 	WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2238 
2239 	VPRINTK("EXIT\n");
2240 }
2241 
2242 static int mv_prereset(struct ata_port *ap, unsigned long deadline)
2243 {
2244 	struct mv_port_priv *pp	= ap->private_data;
2245 	struct ata_eh_context *ehc = &ap->eh_context;
2246 	int rc;
2247 
2248 	rc = mv_stop_dma(ap);
2249 	if (rc)
2250 		ehc->i.action |= ATA_EH_HARDRESET;
2251 
2252 	if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2253 		pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2254 		ehc->i.action |= ATA_EH_HARDRESET;
2255 	}
2256 
2257 	/* if we're about to do hardreset, nothing more to do */
2258 	if (ehc->i.action & ATA_EH_HARDRESET)
2259 		return 0;
2260 
2261 	if (ata_port_online(ap))
2262 		rc = ata_wait_ready(ap, deadline);
2263 	else
2264 		rc = -ENODEV;
2265 
2266 	return rc;
2267 }
2268 
2269 static int mv_hardreset(struct ata_port *ap, unsigned int *class,
2270 			unsigned long deadline)
2271 {
2272 	struct mv_host_priv *hpriv = ap->host->private_data;
2273 	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2274 
2275 	mv_stop_dma(ap);
2276 
2277 	mv_channel_reset(hpriv, mmio, ap->port_no);
2278 
2279 	mv_phy_reset(ap, class, deadline);
2280 
2281 	return 0;
2282 }
2283 
2284 static void mv_postreset(struct ata_port *ap, unsigned int *classes)
2285 {
2286 	u32 serr;
2287 
2288 	/* print link status */
2289 	sata_print_link_status(ap);
2290 
2291 	/* clear SError */
2292 	sata_scr_read(ap, SCR_ERROR, &serr);
2293 	sata_scr_write_flush(ap, SCR_ERROR, serr);
2294 
2295 	/* bail out if no device is present */
2296 	if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2297 		DPRINTK("EXIT, no device\n");
2298 		return;
2299 	}
2300 
2301 	/* set up device control */
2302 	iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2303 }
2304 
2305 static void mv_error_handler(struct ata_port *ap)
2306 {
2307 	ata_do_eh(ap, mv_prereset, ata_std_softreset,
2308 		  mv_hardreset, mv_postreset);
2309 }
2310 
2311 static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2312 {
2313 	mv_stop_dma(qc->ap);
2314 }
2315 
2316 static void mv_eh_freeze(struct ata_port *ap)
2317 {
2318 	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2319 	unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2320 	u32 tmp, mask;
2321 	unsigned int shift;
2322 
2323 	/* FIXME: handle coalescing completion events properly */
2324 
2325 	shift = ap->port_no * 2;
2326 	if (hc > 0)
2327 		shift++;
2328 
2329 	mask = 0x3 << shift;
2330 
2331 	/* disable assertion of portN err, done events */
2332 	tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2333 	writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2334 }
2335 
2336 static void mv_eh_thaw(struct ata_port *ap)
2337 {
2338 	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2339 	unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2340 	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2341 	void __iomem *port_mmio = mv_ap_base(ap);
2342 	u32 tmp, mask, hc_irq_cause;
2343 	unsigned int shift, hc_port_no = ap->port_no;
2344 
2345 	/* FIXME: handle coalescing completion events properly */
2346 
2347 	shift = ap->port_no * 2;
2348 	if (hc > 0) {
2349 		shift++;
2350 		hc_port_no -= 4;
2351 	}
2352 
2353 	mask = 0x3 << shift;
2354 
2355 	/* clear EDMA errors on this port */
2356 	writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2357 
2358 	/* clear pending irq events */
2359 	hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2360 	hc_irq_cause &= ~(1 << hc_port_no);	/* clear CRPB-done */
2361 	hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2362 	writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2363 
2364 	/* enable assertion of portN err, done events */
2365 	tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2366 	writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2367 }
2368 
2369 /**
2370  *      mv_port_init - Perform some early initialization on a single port.
2371  *      @port: libata data structure storing shadow register addresses
2372  *      @port_mmio: base address of the port
2373  *
2374  *      Initialize shadow register mmio addresses, clear outstanding
2375  *      interrupts on the port, and unmask interrupts for the future
2376  *      start of the port.
2377  *
2378  *      LOCKING:
2379  *      Inherited from caller.
2380  */
2381 static void mv_port_init(struct ata_ioports *port,  void __iomem *port_mmio)
2382 {
2383 	void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2384 	unsigned serr_ofs;
2385 
2386 	/* PIO related setup
2387 	 */
2388 	port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2389 	port->error_addr =
2390 		port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2391 	port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2392 	port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2393 	port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2394 	port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2395 	port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2396 	port->status_addr =
2397 		port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2398 	/* special case: control/altstatus doesn't have ATA_REG_ address */
2399 	port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2400 
2401 	/* unused: */
2402 	port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2403 
2404 	/* Clear any currently outstanding port interrupt conditions */
2405 	serr_ofs = mv_scr_offset(SCR_ERROR);
2406 	writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2407 	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2408 
2409 	/* unmask all EDMA error interrupts */
2410 	writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2411 
2412 	VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2413 		readl(port_mmio + EDMA_CFG_OFS),
2414 		readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2415 		readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2416 }
2417 
2418 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2419 {
2420 	struct pci_dev *pdev = to_pci_dev(host->dev);
2421 	struct mv_host_priv *hpriv = host->private_data;
2422 	u32 hp_flags = hpriv->hp_flags;
2423 
2424 	switch(board_idx) {
2425 	case chip_5080:
2426 		hpriv->ops = &mv5xxx_ops;
2427 		hp_flags |= MV_HP_GEN_I;
2428 
2429 		switch (pdev->revision) {
2430 		case 0x1:
2431 			hp_flags |= MV_HP_ERRATA_50XXB0;
2432 			break;
2433 		case 0x3:
2434 			hp_flags |= MV_HP_ERRATA_50XXB2;
2435 			break;
2436 		default:
2437 			dev_printk(KERN_WARNING, &pdev->dev,
2438 			   "Applying 50XXB2 workarounds to unknown rev\n");
2439 			hp_flags |= MV_HP_ERRATA_50XXB2;
2440 			break;
2441 		}
2442 		break;
2443 
2444 	case chip_504x:
2445 	case chip_508x:
2446 		hpriv->ops = &mv5xxx_ops;
2447 		hp_flags |= MV_HP_GEN_I;
2448 
2449 		switch (pdev->revision) {
2450 		case 0x0:
2451 			hp_flags |= MV_HP_ERRATA_50XXB0;
2452 			break;
2453 		case 0x3:
2454 			hp_flags |= MV_HP_ERRATA_50XXB2;
2455 			break;
2456 		default:
2457 			dev_printk(KERN_WARNING, &pdev->dev,
2458 			   "Applying B2 workarounds to unknown rev\n");
2459 			hp_flags |= MV_HP_ERRATA_50XXB2;
2460 			break;
2461 		}
2462 		break;
2463 
2464 	case chip_604x:
2465 	case chip_608x:
2466 		hpriv->ops = &mv6xxx_ops;
2467 		hp_flags |= MV_HP_GEN_II;
2468 
2469 		switch (pdev->revision) {
2470 		case 0x7:
2471 			hp_flags |= MV_HP_ERRATA_60X1B2;
2472 			break;
2473 		case 0x9:
2474 			hp_flags |= MV_HP_ERRATA_60X1C0;
2475 			break;
2476 		default:
2477 			dev_printk(KERN_WARNING, &pdev->dev,
2478 				   "Applying B2 workarounds to unknown rev\n");
2479 			hp_flags |= MV_HP_ERRATA_60X1B2;
2480 			break;
2481 		}
2482 		break;
2483 
2484 	case chip_7042:
2485 	case chip_6042:
2486 		hpriv->ops = &mv6xxx_ops;
2487 		hp_flags |= MV_HP_GEN_IIE;
2488 
2489 		switch (pdev->revision) {
2490 		case 0x0:
2491 			hp_flags |= MV_HP_ERRATA_XX42A0;
2492 			break;
2493 		case 0x1:
2494 			hp_flags |= MV_HP_ERRATA_60X1C0;
2495 			break;
2496 		default:
2497 			dev_printk(KERN_WARNING, &pdev->dev,
2498 			   "Applying 60X1C0 workarounds to unknown rev\n");
2499 			hp_flags |= MV_HP_ERRATA_60X1C0;
2500 			break;
2501 		}
2502 		break;
2503 
2504 	default:
2505 		printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2506 		return 1;
2507 	}
2508 
2509 	hpriv->hp_flags = hp_flags;
2510 
2511 	return 0;
2512 }
2513 
2514 /**
2515  *      mv_init_host - Perform some early initialization of the host.
2516  *	@host: ATA host to initialize
2517  *      @board_idx: controller index
2518  *
2519  *      If possible, do an early global reset of the host.  Then do
2520  *      our port init and clear/unmask all/relevant host interrupts.
2521  *
2522  *      LOCKING:
2523  *      Inherited from caller.
2524  */
2525 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2526 {
2527 	int rc = 0, n_hc, port, hc;
2528 	struct pci_dev *pdev = to_pci_dev(host->dev);
2529 	void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2530 	struct mv_host_priv *hpriv = host->private_data;
2531 
2532 	/* global interrupt mask */
2533 	writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2534 
2535 	rc = mv_chip_id(host, board_idx);
2536 	if (rc)
2537 		goto done;
2538 
2539 	n_hc = mv_get_hc_count(host->ports[0]->flags);
2540 
2541 	for (port = 0; port < host->n_ports; port++)
2542 		hpriv->ops->read_preamp(hpriv, port, mmio);
2543 
2544 	rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2545 	if (rc)
2546 		goto done;
2547 
2548 	hpriv->ops->reset_flash(hpriv, mmio);
2549 	hpriv->ops->reset_bus(pdev, mmio);
2550 	hpriv->ops->enable_leds(hpriv, mmio);
2551 
2552 	for (port = 0; port < host->n_ports; port++) {
2553 		if (IS_GEN_II(hpriv)) {
2554 			void __iomem *port_mmio = mv_port_base(mmio, port);
2555 
2556 			u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2557 			ifctl |= (1 << 7);		/* enable gen2i speed */
2558 			ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2559 			writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2560 		}
2561 
2562 		hpriv->ops->phy_errata(hpriv, mmio, port);
2563 	}
2564 
2565 	for (port = 0; port < host->n_ports; port++) {
2566 		void __iomem *port_mmio = mv_port_base(mmio, port);
2567 		mv_port_init(&host->ports[port]->ioaddr, port_mmio);
2568 	}
2569 
2570 	for (hc = 0; hc < n_hc; hc++) {
2571 		void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2572 
2573 		VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2574 			"(before clear)=0x%08x\n", hc,
2575 			readl(hc_mmio + HC_CFG_OFS),
2576 			readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2577 
2578 		/* Clear any currently outstanding hc interrupt conditions */
2579 		writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2580 	}
2581 
2582 	/* Clear any currently outstanding host interrupt conditions */
2583 	writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2584 
2585 	/* and unmask interrupt generation for host regs */
2586 	writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
2587 
2588 	if (IS_GEN_I(hpriv))
2589 		writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2590 	else
2591 		writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2592 
2593 	VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2594 		"PCI int cause/mask=0x%08x/0x%08x\n",
2595 		readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2596 		readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2597 		readl(mmio + PCI_IRQ_CAUSE_OFS),
2598 		readl(mmio + PCI_IRQ_MASK_OFS));
2599 
2600 done:
2601 	return rc;
2602 }
2603 
2604 /**
2605  *      mv_print_info - Dump key info to kernel log for perusal.
2606  *      @host: ATA host to print info about
2607  *
2608  *      FIXME: complete this.
2609  *
2610  *      LOCKING:
2611  *      Inherited from caller.
2612  */
2613 static void mv_print_info(struct ata_host *host)
2614 {
2615 	struct pci_dev *pdev = to_pci_dev(host->dev);
2616 	struct mv_host_priv *hpriv = host->private_data;
2617 	u8 scc;
2618 	const char *scc_s, *gen;
2619 
2620 	/* Use this to determine the HW stepping of the chip so we know
2621 	 * what errata to workaround
2622 	 */
2623 	pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2624 	if (scc == 0)
2625 		scc_s = "SCSI";
2626 	else if (scc == 0x01)
2627 		scc_s = "RAID";
2628 	else
2629 		scc_s = "?";
2630 
2631 	if (IS_GEN_I(hpriv))
2632 		gen = "I";
2633 	else if (IS_GEN_II(hpriv))
2634 		gen = "II";
2635 	else if (IS_GEN_IIE(hpriv))
2636 		gen = "IIE";
2637 	else
2638 		gen = "?";
2639 
2640 	dev_printk(KERN_INFO, &pdev->dev,
2641 	       "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2642 	       gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2643 	       scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2644 }
2645 
2646 /**
2647  *      mv_init_one - handle a positive probe of a Marvell host
2648  *      @pdev: PCI device found
2649  *      @ent: PCI device ID entry for the matched host
2650  *
2651  *      LOCKING:
2652  *      Inherited from caller.
2653  */
2654 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2655 {
2656 	static int printed_version = 0;
2657 	unsigned int board_idx = (unsigned int)ent->driver_data;
2658 	const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2659 	struct ata_host *host;
2660 	struct mv_host_priv *hpriv;
2661 	int n_ports, rc;
2662 
2663 	if (!printed_version++)
2664 		dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2665 
2666 	/* allocate host */
2667 	n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2668 
2669 	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2670 	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2671 	if (!host || !hpriv)
2672 		return -ENOMEM;
2673 	host->private_data = hpriv;
2674 
2675 	/* acquire resources */
2676 	rc = pcim_enable_device(pdev);
2677 	if (rc)
2678 		return rc;
2679 
2680 	rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2681 	if (rc == -EBUSY)
2682 		pcim_pin_device(pdev);
2683 	if (rc)
2684 		return rc;
2685 	host->iomap = pcim_iomap_table(pdev);
2686 
2687 	rc = pci_go_64(pdev);
2688 	if (rc)
2689 		return rc;
2690 
2691 	/* initialize adapter */
2692 	rc = mv_init_host(host, board_idx);
2693 	if (rc)
2694 		return rc;
2695 
2696 	/* Enable interrupts */
2697 	if (msi && pci_enable_msi(pdev))
2698 		pci_intx(pdev, 1);
2699 
2700 	mv_dump_pci_cfg(pdev, 0x68);
2701 	mv_print_info(host);
2702 
2703 	pci_set_master(pdev);
2704 	pci_try_set_mwi(pdev);
2705 	return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2706 				 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
2707 }
2708 
2709 static int __init mv_init(void)
2710 {
2711 	return pci_register_driver(&mv_pci_driver);
2712 }
2713 
2714 static void __exit mv_exit(void)
2715 {
2716 	pci_unregister_driver(&mv_pci_driver);
2717 }
2718 
2719 MODULE_AUTHOR("Brett Russ");
2720 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2721 MODULE_LICENSE("GPL");
2722 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2723 MODULE_VERSION(DRV_VERSION);
2724 
2725 module_param(msi, int, 0444);
2726 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2727 
2728 module_init(mv_init);
2729 module_exit(mv_exit);
2730