xref: /openbmc/linux/drivers/ata/sata_mv.c (revision bf74b964)
1 /*
2  * sata_mv.c - Marvell SATA support
3  *
4  * Copyright 2005: EMC Corporation, all rights reserved.
5  * Copyright 2005 Red Hat, Inc.  All rights reserved.
6  *
7  * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; version 2 of the License.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23 
24 /*
25   sata_mv TODO list:
26 
27   1) Needs a full errata audit for all chipsets.  I implemented most
28   of the errata workarounds found in the Marvell vendor driver, but
29   I distinctly remember a couple workarounds (one related to PCI-X)
30   are still needed.
31 
32   2) Convert to LibATA new EH.  Required for hotplug, NCQ, and sane
33   probing/error handling in general.  MUST HAVE.
34 
35   3) Add hotplug support (easy, once new-EH support appears)
36 
37   4) Add NCQ support (easy to intermediate, once new-EH support appears)
38 
39   5) Investigate problems with PCI Message Signalled Interrupts (MSI).
40 
41   6) Add port multiplier support (intermediate)
42 
43   7) Test and verify 3.0 Gbps support
44 
45   8) Develop a low-power-consumption strategy, and implement it.
46 
47   9) [Experiment, low priority] See if ATAPI can be supported using
48   "unknown FIS" or "vendor-specific FIS" support, or something creative
49   like that.
50 
51   10) [Experiment, low priority] Investigate interrupt coalescing.
52   Quite often, especially with PCI Message Signalled Interrupts (MSI),
53   the overhead reduced by interrupt mitigation is quite often not
54   worth the latency cost.
55 
56   11) [Experiment, Marvell value added] Is it possible to use target
57   mode to cross-connect two Linux boxes with Marvell cards?  If so,
58   creating LibATA target mode support would be very interesting.
59 
60   Target mode, for those without docs, is the ability to directly
61   connect two SATA controllers.
62 
63   13) Verify that 7042 is fully supported.  I only have a 6042.
64 
65 */
66 
67 
68 #include <linux/kernel.h>
69 #include <linux/module.h>
70 #include <linux/pci.h>
71 #include <linux/init.h>
72 #include <linux/blkdev.h>
73 #include <linux/delay.h>
74 #include <linux/interrupt.h>
75 #include <linux/dma-mapping.h>
76 #include <linux/device.h>
77 #include <scsi/scsi_host.h>
78 #include <scsi/scsi_cmnd.h>
79 #include <linux/libata.h>
80 
81 #define DRV_NAME	"sata_mv"
82 #define DRV_VERSION	"0.81"
83 
84 enum {
85 	/* BAR's are enumerated in terms of pci_resource_start() terms */
86 	MV_PRIMARY_BAR		= 0,	/* offset 0x10: memory space */
87 	MV_IO_BAR		= 2,	/* offset 0x18: IO space */
88 	MV_MISC_BAR		= 3,	/* offset 0x1c: FLASH, NVRAM, SRAM */
89 
90 	MV_MAJOR_REG_AREA_SZ	= 0x10000,	/* 64KB */
91 	MV_MINOR_REG_AREA_SZ	= 0x2000,	/* 8KB */
92 
93 	MV_PCI_REG_BASE		= 0,
94 	MV_IRQ_COAL_REG_BASE	= 0x18000,	/* 6xxx part only */
95 	MV_IRQ_COAL_CAUSE		= (MV_IRQ_COAL_REG_BASE + 0x08),
96 	MV_IRQ_COAL_CAUSE_LO		= (MV_IRQ_COAL_REG_BASE + 0x88),
97 	MV_IRQ_COAL_CAUSE_HI		= (MV_IRQ_COAL_REG_BASE + 0x8c),
98 	MV_IRQ_COAL_THRESHOLD		= (MV_IRQ_COAL_REG_BASE + 0xcc),
99 	MV_IRQ_COAL_TIME_THRESHOLD	= (MV_IRQ_COAL_REG_BASE + 0xd0),
100 
101 	MV_SATAHC0_REG_BASE	= 0x20000,
102 	MV_FLASH_CTL		= 0x1046c,
103 	MV_GPIO_PORT_CTL	= 0x104f0,
104 	MV_RESET_CFG		= 0x180d8,
105 
106 	MV_PCI_REG_SZ		= MV_MAJOR_REG_AREA_SZ,
107 	MV_SATAHC_REG_SZ	= MV_MAJOR_REG_AREA_SZ,
108 	MV_SATAHC_ARBTR_REG_SZ	= MV_MINOR_REG_AREA_SZ,		/* arbiter */
109 	MV_PORT_REG_SZ		= MV_MINOR_REG_AREA_SZ,
110 
111 	MV_USE_Q_DEPTH		= ATA_DEF_QUEUE,
112 
113 	MV_MAX_Q_DEPTH		= 32,
114 	MV_MAX_Q_DEPTH_MASK	= MV_MAX_Q_DEPTH - 1,
115 
116 	/* CRQB needs alignment on a 1KB boundary. Size == 1KB
117 	 * CRPB needs alignment on a 256B boundary. Size == 256B
118 	 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
119 	 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
120 	 */
121 	MV_CRQB_Q_SZ		= (32 * MV_MAX_Q_DEPTH),
122 	MV_CRPB_Q_SZ		= (8 * MV_MAX_Q_DEPTH),
123 	MV_MAX_SG_CT		= 176,
124 	MV_SG_TBL_SZ		= (16 * MV_MAX_SG_CT),
125 	MV_PORT_PRIV_DMA_SZ	= (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
126 
127 	MV_PORTS_PER_HC		= 4,
128 	/* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
129 	MV_PORT_HC_SHIFT	= 2,
130 	/* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
131 	MV_PORT_MASK		= 3,
132 
133 	/* Host Flags */
134 	MV_FLAG_DUAL_HC		= (1 << 30),  /* two SATA Host Controllers */
135 	MV_FLAG_IRQ_COALESCE	= (1 << 29),  /* IRQ coalescing capability */
136 	MV_COMMON_FLAGS		= (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
137 				   ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
138 				   ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING),
139 	MV_6XXX_FLAGS		= MV_FLAG_IRQ_COALESCE,
140 
141 	CRQB_FLAG_READ		= (1 << 0),
142 	CRQB_TAG_SHIFT		= 1,
143 	CRQB_CMD_ADDR_SHIFT	= 8,
144 	CRQB_CMD_CS		= (0x2 << 11),
145 	CRQB_CMD_LAST		= (1 << 15),
146 
147 	CRPB_FLAG_STATUS_SHIFT	= 8,
148 
149 	EPRD_FLAG_END_OF_TBL	= (1 << 31),
150 
151 	/* PCI interface registers */
152 
153 	PCI_COMMAND_OFS		= 0xc00,
154 
155 	PCI_MAIN_CMD_STS_OFS	= 0xd30,
156 	STOP_PCI_MASTER		= (1 << 2),
157 	PCI_MASTER_EMPTY	= (1 << 3),
158 	GLOB_SFT_RST		= (1 << 4),
159 
160 	MV_PCI_MODE		= 0xd00,
161 	MV_PCI_EXP_ROM_BAR_CTL	= 0xd2c,
162 	MV_PCI_DISC_TIMER	= 0xd04,
163 	MV_PCI_MSI_TRIGGER	= 0xc38,
164 	MV_PCI_SERR_MASK	= 0xc28,
165 	MV_PCI_XBAR_TMOUT	= 0x1d04,
166 	MV_PCI_ERR_LOW_ADDRESS	= 0x1d40,
167 	MV_PCI_ERR_HIGH_ADDRESS	= 0x1d44,
168 	MV_PCI_ERR_ATTRIBUTE	= 0x1d48,
169 	MV_PCI_ERR_COMMAND	= 0x1d50,
170 
171 	PCI_IRQ_CAUSE_OFS		= 0x1d58,
172 	PCI_IRQ_MASK_OFS		= 0x1d5c,
173 	PCI_UNMASK_ALL_IRQS	= 0x7fffff,	/* bits 22-0 */
174 
175 	HC_MAIN_IRQ_CAUSE_OFS	= 0x1d60,
176 	HC_MAIN_IRQ_MASK_OFS	= 0x1d64,
177 	PORT0_ERR		= (1 << 0),	/* shift by port # */
178 	PORT0_DONE		= (1 << 1),	/* shift by port # */
179 	HC0_IRQ_PEND		= 0x1ff,	/* bits 0-8 = HC0's ports */
180 	HC_SHIFT		= 9,		/* bits 9-17 = HC1's ports */
181 	PCI_ERR			= (1 << 18),
182 	TRAN_LO_DONE		= (1 << 19),	/* 6xxx: IRQ coalescing */
183 	TRAN_HI_DONE		= (1 << 20),	/* 6xxx: IRQ coalescing */
184 	PORTS_0_3_COAL_DONE	= (1 << 8),
185 	PORTS_4_7_COAL_DONE	= (1 << 17),
186 	PORTS_0_7_COAL_DONE	= (1 << 21),	/* 6xxx: IRQ coalescing */
187 	GPIO_INT		= (1 << 22),
188 	SELF_INT		= (1 << 23),
189 	TWSI_INT		= (1 << 24),
190 	HC_MAIN_RSVD		= (0x7f << 25),	/* bits 31-25 */
191 	HC_MAIN_RSVD_5		= (0x1fff << 19), /* bits 31-19 */
192 	HC_MAIN_MASKED_IRQS	= (TRAN_LO_DONE | TRAN_HI_DONE |
193 				   PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
194 				   HC_MAIN_RSVD),
195 	HC_MAIN_MASKED_IRQS_5	= (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
196 				   HC_MAIN_RSVD_5),
197 
198 	/* SATAHC registers */
199 	HC_CFG_OFS		= 0,
200 
201 	HC_IRQ_CAUSE_OFS	= 0x14,
202 	CRPB_DMA_DONE		= (1 << 0),	/* shift by port # */
203 	HC_IRQ_COAL		= (1 << 4),	/* IRQ coalescing */
204 	DEV_IRQ			= (1 << 8),	/* shift by port # */
205 
206 	/* Shadow block registers */
207 	SHD_BLK_OFS		= 0x100,
208 	SHD_CTL_AST_OFS		= 0x20,		/* ofs from SHD_BLK_OFS */
209 
210 	/* SATA registers */
211 	SATA_STATUS_OFS		= 0x300,  /* ctrl, err regs follow status */
212 	SATA_ACTIVE_OFS		= 0x350,
213 	PHY_MODE3		= 0x310,
214 	PHY_MODE4		= 0x314,
215 	PHY_MODE2		= 0x330,
216 	MV5_PHY_MODE		= 0x74,
217 	MV5_LT_MODE		= 0x30,
218 	MV5_PHY_CTL		= 0x0C,
219 	SATA_INTERFACE_CTL	= 0x050,
220 
221 	MV_M2_PREAMP_MASK	= 0x7e0,
222 
223 	/* Port registers */
224 	EDMA_CFG_OFS		= 0,
225 	EDMA_CFG_Q_DEPTH	= 0,			/* queueing disabled */
226 	EDMA_CFG_NCQ		= (1 << 5),
227 	EDMA_CFG_NCQ_GO_ON_ERR	= (1 << 14),		/* continue on error */
228 	EDMA_CFG_RD_BRST_EXT	= (1 << 11),		/* read burst 512B */
229 	EDMA_CFG_WR_BUFF_LEN	= (1 << 13),		/* write buffer 512B */
230 
231 	EDMA_ERR_IRQ_CAUSE_OFS	= 0x8,
232 	EDMA_ERR_IRQ_MASK_OFS	= 0xc,
233 	EDMA_ERR_D_PAR		= (1 << 0),
234 	EDMA_ERR_PRD_PAR	= (1 << 1),
235 	EDMA_ERR_DEV		= (1 << 2),
236 	EDMA_ERR_DEV_DCON	= (1 << 3),
237 	EDMA_ERR_DEV_CON	= (1 << 4),
238 	EDMA_ERR_SERR		= (1 << 5),
239 	EDMA_ERR_SELF_DIS	= (1 << 7),
240 	EDMA_ERR_BIST_ASYNC	= (1 << 8),
241 	EDMA_ERR_CRBQ_PAR	= (1 << 9),
242 	EDMA_ERR_CRPB_PAR	= (1 << 10),
243 	EDMA_ERR_INTRL_PAR	= (1 << 11),
244 	EDMA_ERR_IORDY		= (1 << 12),
245 	EDMA_ERR_LNK_CTRL_RX	= (0xf << 13),
246 	EDMA_ERR_LNK_CTRL_RX_2	= (1 << 15),
247 	EDMA_ERR_LNK_DATA_RX	= (0xf << 17),
248 	EDMA_ERR_LNK_CTRL_TX	= (0x1f << 21),
249 	EDMA_ERR_LNK_DATA_TX	= (0x1f << 26),
250 	EDMA_ERR_TRANS_PROTO	= (1 << 31),
251 	EDMA_ERR_FATAL		= (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
252 				   EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR |
253 				   EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR |
254 				   EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 |
255 				   EDMA_ERR_LNK_DATA_RX |
256 				   EDMA_ERR_LNK_DATA_TX |
257 				   EDMA_ERR_TRANS_PROTO),
258 
259 	EDMA_REQ_Q_BASE_HI_OFS	= 0x10,
260 	EDMA_REQ_Q_IN_PTR_OFS	= 0x14,		/* also contains BASE_LO */
261 
262 	EDMA_REQ_Q_OUT_PTR_OFS	= 0x18,
263 	EDMA_REQ_Q_PTR_SHIFT	= 5,
264 
265 	EDMA_RSP_Q_BASE_HI_OFS	= 0x1c,
266 	EDMA_RSP_Q_IN_PTR_OFS	= 0x20,
267 	EDMA_RSP_Q_OUT_PTR_OFS	= 0x24,		/* also contains BASE_LO */
268 	EDMA_RSP_Q_PTR_SHIFT	= 3,
269 
270 	EDMA_CMD_OFS		= 0x28,
271 	EDMA_EN			= (1 << 0),
272 	EDMA_DS			= (1 << 1),
273 	ATA_RST			= (1 << 2),
274 
275 	EDMA_IORDY_TMOUT	= 0x34,
276 	EDMA_ARB_CFG		= 0x38,
277 
278 	/* Host private flags (hp_flags) */
279 	MV_HP_FLAG_MSI		= (1 << 0),
280 	MV_HP_ERRATA_50XXB0	= (1 << 1),
281 	MV_HP_ERRATA_50XXB2	= (1 << 2),
282 	MV_HP_ERRATA_60X1B2	= (1 << 3),
283 	MV_HP_ERRATA_60X1C0	= (1 << 4),
284 	MV_HP_ERRATA_XX42A0	= (1 << 5),
285 	MV_HP_50XX		= (1 << 6),
286 	MV_HP_GEN_IIE		= (1 << 7),
287 
288 	/* Port private flags (pp_flags) */
289 	MV_PP_FLAG_EDMA_EN	= (1 << 0),
290 	MV_PP_FLAG_EDMA_DS_ACT	= (1 << 1),
291 };
292 
293 #define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
294 #define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
295 #define IS_GEN_I(hpriv) IS_50XX(hpriv)
296 #define IS_GEN_II(hpriv) IS_60XX(hpriv)
297 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
298 
299 enum {
300 	MV_DMA_BOUNDARY		= 0xffffffffU,
301 
302 	EDMA_REQ_Q_BASE_LO_MASK	= 0xfffffc00U,
303 
304 	EDMA_RSP_Q_BASE_LO_MASK	= 0xffffff00U,
305 };
306 
307 enum chip_type {
308 	chip_504x,
309 	chip_508x,
310 	chip_5080,
311 	chip_604x,
312 	chip_608x,
313 	chip_6042,
314 	chip_7042,
315 };
316 
317 /* Command ReQuest Block: 32B */
318 struct mv_crqb {
319 	__le32			sg_addr;
320 	__le32			sg_addr_hi;
321 	__le16			ctrl_flags;
322 	__le16			ata_cmd[11];
323 };
324 
325 struct mv_crqb_iie {
326 	__le32			addr;
327 	__le32			addr_hi;
328 	__le32			flags;
329 	__le32			len;
330 	__le32			ata_cmd[4];
331 };
332 
333 /* Command ResPonse Block: 8B */
334 struct mv_crpb {
335 	__le16			id;
336 	__le16			flags;
337 	__le32			tmstmp;
338 };
339 
340 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
341 struct mv_sg {
342 	__le32			addr;
343 	__le32			flags_size;
344 	__le32			addr_hi;
345 	__le32			reserved;
346 };
347 
348 struct mv_port_priv {
349 	struct mv_crqb		*crqb;
350 	dma_addr_t		crqb_dma;
351 	struct mv_crpb		*crpb;
352 	dma_addr_t		crpb_dma;
353 	struct mv_sg		*sg_tbl;
354 	dma_addr_t		sg_tbl_dma;
355 	u32			pp_flags;
356 };
357 
358 struct mv_port_signal {
359 	u32			amps;
360 	u32			pre;
361 };
362 
363 struct mv_host_priv;
364 struct mv_hw_ops {
365 	void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
366 			   unsigned int port);
367 	void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
368 	void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
369 			   void __iomem *mmio);
370 	int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
371 			unsigned int n_hc);
372 	void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
373 	void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
374 };
375 
376 struct mv_host_priv {
377 	u32			hp_flags;
378 	struct mv_port_signal	signal[8];
379 	const struct mv_hw_ops	*ops;
380 };
381 
382 static void mv_irq_clear(struct ata_port *ap);
383 static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
384 static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
385 static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
386 static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
387 static void mv_phy_reset(struct ata_port *ap);
388 static void __mv_phy_reset(struct ata_port *ap, int can_sleep);
389 static int mv_port_start(struct ata_port *ap);
390 static void mv_port_stop(struct ata_port *ap);
391 static void mv_qc_prep(struct ata_queued_cmd *qc);
392 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
393 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
394 static void mv_eng_timeout(struct ata_port *ap);
395 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
396 
397 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
398 			   unsigned int port);
399 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
400 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
401 			   void __iomem *mmio);
402 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
403 			unsigned int n_hc);
404 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
405 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
406 
407 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
408 			   unsigned int port);
409 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
410 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
411 			   void __iomem *mmio);
412 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
413 			unsigned int n_hc);
414 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
415 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
416 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
417 			     unsigned int port_no);
418 static void mv_stop_and_reset(struct ata_port *ap);
419 
420 static struct scsi_host_template mv_sht = {
421 	.module			= THIS_MODULE,
422 	.name			= DRV_NAME,
423 	.ioctl			= ata_scsi_ioctl,
424 	.queuecommand		= ata_scsi_queuecmd,
425 	.can_queue		= MV_USE_Q_DEPTH,
426 	.this_id		= ATA_SHT_THIS_ID,
427 	.sg_tablesize		= MV_MAX_SG_CT,
428 	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
429 	.emulated		= ATA_SHT_EMULATED,
430 	.use_clustering		= 1,
431 	.proc_name		= DRV_NAME,
432 	.dma_boundary		= MV_DMA_BOUNDARY,
433 	.slave_configure	= ata_scsi_slave_config,
434 	.slave_destroy		= ata_scsi_slave_destroy,
435 	.bios_param		= ata_std_bios_param,
436 };
437 
438 static const struct ata_port_operations mv5_ops = {
439 	.port_disable		= ata_port_disable,
440 
441 	.tf_load		= ata_tf_load,
442 	.tf_read		= ata_tf_read,
443 	.check_status		= ata_check_status,
444 	.exec_command		= ata_exec_command,
445 	.dev_select		= ata_std_dev_select,
446 
447 	.phy_reset		= mv_phy_reset,
448 	.cable_detect		= ata_cable_sata,
449 
450 	.qc_prep		= mv_qc_prep,
451 	.qc_issue		= mv_qc_issue,
452 	.data_xfer		= ata_data_xfer,
453 
454 	.eng_timeout		= mv_eng_timeout,
455 
456 	.irq_clear		= mv_irq_clear,
457 	.irq_on			= ata_irq_on,
458 	.irq_ack		= ata_irq_ack,
459 
460 	.scr_read		= mv5_scr_read,
461 	.scr_write		= mv5_scr_write,
462 
463 	.port_start		= mv_port_start,
464 	.port_stop		= mv_port_stop,
465 };
466 
467 static const struct ata_port_operations mv6_ops = {
468 	.port_disable		= ata_port_disable,
469 
470 	.tf_load		= ata_tf_load,
471 	.tf_read		= ata_tf_read,
472 	.check_status		= ata_check_status,
473 	.exec_command		= ata_exec_command,
474 	.dev_select		= ata_std_dev_select,
475 
476 	.phy_reset		= mv_phy_reset,
477 	.cable_detect		= ata_cable_sata,
478 
479 	.qc_prep		= mv_qc_prep,
480 	.qc_issue		= mv_qc_issue,
481 	.data_xfer		= ata_data_xfer,
482 
483 	.eng_timeout		= mv_eng_timeout,
484 
485 	.irq_clear		= mv_irq_clear,
486 	.irq_on			= ata_irq_on,
487 	.irq_ack		= ata_irq_ack,
488 
489 	.scr_read		= mv_scr_read,
490 	.scr_write		= mv_scr_write,
491 
492 	.port_start		= mv_port_start,
493 	.port_stop		= mv_port_stop,
494 };
495 
496 static const struct ata_port_operations mv_iie_ops = {
497 	.port_disable		= ata_port_disable,
498 
499 	.tf_load		= ata_tf_load,
500 	.tf_read		= ata_tf_read,
501 	.check_status		= ata_check_status,
502 	.exec_command		= ata_exec_command,
503 	.dev_select		= ata_std_dev_select,
504 
505 	.phy_reset		= mv_phy_reset,
506 	.cable_detect		= ata_cable_sata,
507 
508 	.qc_prep		= mv_qc_prep_iie,
509 	.qc_issue		= mv_qc_issue,
510 	.data_xfer		= ata_data_xfer,
511 
512 	.eng_timeout		= mv_eng_timeout,
513 
514 	.irq_clear		= mv_irq_clear,
515 	.irq_on			= ata_irq_on,
516 	.irq_ack		= ata_irq_ack,
517 
518 	.scr_read		= mv_scr_read,
519 	.scr_write		= mv_scr_write,
520 
521 	.port_start		= mv_port_start,
522 	.port_stop		= mv_port_stop,
523 };
524 
525 static const struct ata_port_info mv_port_info[] = {
526 	{  /* chip_504x */
527 		.flags		= MV_COMMON_FLAGS,
528 		.pio_mask	= 0x1f,	/* pio0-4 */
529 		.udma_mask	= 0x7f,	/* udma0-6 */
530 		.port_ops	= &mv5_ops,
531 	},
532 	{  /* chip_508x */
533 		.flags		= (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
534 		.pio_mask	= 0x1f,	/* pio0-4 */
535 		.udma_mask	= 0x7f,	/* udma0-6 */
536 		.port_ops	= &mv5_ops,
537 	},
538 	{  /* chip_5080 */
539 		.flags		= (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
540 		.pio_mask	= 0x1f,	/* pio0-4 */
541 		.udma_mask	= 0x7f,	/* udma0-6 */
542 		.port_ops	= &mv5_ops,
543 	},
544 	{  /* chip_604x */
545 		.flags		= (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
546 		.pio_mask	= 0x1f,	/* pio0-4 */
547 		.udma_mask	= 0x7f,	/* udma0-6 */
548 		.port_ops	= &mv6_ops,
549 	},
550 	{  /* chip_608x */
551 		.flags		= (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
552 				   MV_FLAG_DUAL_HC),
553 		.pio_mask	= 0x1f,	/* pio0-4 */
554 		.udma_mask	= 0x7f,	/* udma0-6 */
555 		.port_ops	= &mv6_ops,
556 	},
557 	{  /* chip_6042 */
558 		.flags		= (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
559 		.pio_mask	= 0x1f,	/* pio0-4 */
560 		.udma_mask	= 0x7f,	/* udma0-6 */
561 		.port_ops	= &mv_iie_ops,
562 	},
563 	{  /* chip_7042 */
564 		.flags		= (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
565 		.pio_mask	= 0x1f,	/* pio0-4 */
566 		.udma_mask	= 0x7f,	/* udma0-6 */
567 		.port_ops	= &mv_iie_ops,
568 	},
569 };
570 
571 static const struct pci_device_id mv_pci_tbl[] = {
572 	{ PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
573 	{ PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
574 	{ PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
575 	{ PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
576 
577 	{ PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
578 	{ PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
579 	{ PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
580 	{ PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
581 	{ PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
582 
583 	{ PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
584 
585 	/* Adaptec 1430SA */
586 	{ PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
587 
588 	{ PCI_VDEVICE(TTI, 0x2310), chip_7042 },
589 
590 	/* add Marvell 7042 support */
591 	{ PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
592 
593 	{ }			/* terminate list */
594 };
595 
596 static struct pci_driver mv_pci_driver = {
597 	.name			= DRV_NAME,
598 	.id_table		= mv_pci_tbl,
599 	.probe			= mv_init_one,
600 	.remove			= ata_pci_remove_one,
601 };
602 
603 static const struct mv_hw_ops mv5xxx_ops = {
604 	.phy_errata		= mv5_phy_errata,
605 	.enable_leds		= mv5_enable_leds,
606 	.read_preamp		= mv5_read_preamp,
607 	.reset_hc		= mv5_reset_hc,
608 	.reset_flash		= mv5_reset_flash,
609 	.reset_bus		= mv5_reset_bus,
610 };
611 
612 static const struct mv_hw_ops mv6xxx_ops = {
613 	.phy_errata		= mv6_phy_errata,
614 	.enable_leds		= mv6_enable_leds,
615 	.read_preamp		= mv6_read_preamp,
616 	.reset_hc		= mv6_reset_hc,
617 	.reset_flash		= mv6_reset_flash,
618 	.reset_bus		= mv_reset_pci_bus,
619 };
620 
621 /*
622  * module options
623  */
624 static int msi;	      /* Use PCI msi; either zero (off, default) or non-zero */
625 
626 
627 /* move to PCI layer or libata core? */
628 static int pci_go_64(struct pci_dev *pdev)
629 {
630 	int rc;
631 
632 	if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
633 		rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
634 		if (rc) {
635 			rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
636 			if (rc) {
637 				dev_printk(KERN_ERR, &pdev->dev,
638 					   "64-bit DMA enable failed\n");
639 				return rc;
640 			}
641 		}
642 	} else {
643 		rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
644 		if (rc) {
645 			dev_printk(KERN_ERR, &pdev->dev,
646 				   "32-bit DMA enable failed\n");
647 			return rc;
648 		}
649 		rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
650 		if (rc) {
651 			dev_printk(KERN_ERR, &pdev->dev,
652 				   "32-bit consistent DMA enable failed\n");
653 			return rc;
654 		}
655 	}
656 
657 	return rc;
658 }
659 
660 /*
661  * Functions
662  */
663 
664 static inline void writelfl(unsigned long data, void __iomem *addr)
665 {
666 	writel(data, addr);
667 	(void) readl(addr);	/* flush to avoid PCI posted write */
668 }
669 
670 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
671 {
672 	return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
673 }
674 
675 static inline unsigned int mv_hc_from_port(unsigned int port)
676 {
677 	return port >> MV_PORT_HC_SHIFT;
678 }
679 
680 static inline unsigned int mv_hardport_from_port(unsigned int port)
681 {
682 	return port & MV_PORT_MASK;
683 }
684 
685 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
686 						 unsigned int port)
687 {
688 	return mv_hc_base(base, mv_hc_from_port(port));
689 }
690 
691 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
692 {
693 	return  mv_hc_base_from_port(base, port) +
694 		MV_SATAHC_ARBTR_REG_SZ +
695 		(mv_hardport_from_port(port) * MV_PORT_REG_SZ);
696 }
697 
698 static inline void __iomem *mv_ap_base(struct ata_port *ap)
699 {
700 	return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
701 }
702 
703 static inline int mv_get_hc_count(unsigned long port_flags)
704 {
705 	return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
706 }
707 
708 static void mv_irq_clear(struct ata_port *ap)
709 {
710 }
711 
712 /**
713  *      mv_start_dma - Enable eDMA engine
714  *      @base: port base address
715  *      @pp: port private data
716  *
717  *      Verify the local cache of the eDMA state is accurate with a
718  *      WARN_ON.
719  *
720  *      LOCKING:
721  *      Inherited from caller.
722  */
723 static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
724 {
725 	if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) {
726 		writelfl(EDMA_EN, base + EDMA_CMD_OFS);
727 		pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
728 	}
729 	WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
730 }
731 
732 /**
733  *      mv_stop_dma - Disable eDMA engine
734  *      @ap: ATA channel to manipulate
735  *
736  *      Verify the local cache of the eDMA state is accurate with a
737  *      WARN_ON.
738  *
739  *      LOCKING:
740  *      Inherited from caller.
741  */
742 static void mv_stop_dma(struct ata_port *ap)
743 {
744 	void __iomem *port_mmio = mv_ap_base(ap);
745 	struct mv_port_priv *pp	= ap->private_data;
746 	u32 reg;
747 	int i;
748 
749 	if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) {
750 		/* Disable EDMA if active.   The disable bit auto clears.
751 		 */
752 		writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
753 		pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
754 	} else {
755 		WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
756   	}
757 
758 	/* now properly wait for the eDMA to stop */
759 	for (i = 1000; i > 0; i--) {
760 		reg = readl(port_mmio + EDMA_CMD_OFS);
761 		if (!(EDMA_EN & reg)) {
762 			break;
763 		}
764 		udelay(100);
765 	}
766 
767 	if (EDMA_EN & reg) {
768 		ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
769 		/* FIXME: Consider doing a reset here to recover */
770 	}
771 }
772 
773 #ifdef ATA_DEBUG
774 static void mv_dump_mem(void __iomem *start, unsigned bytes)
775 {
776 	int b, w;
777 	for (b = 0; b < bytes; ) {
778 		DPRINTK("%p: ", start + b);
779 		for (w = 0; b < bytes && w < 4; w++) {
780 			printk("%08x ",readl(start + b));
781 			b += sizeof(u32);
782 		}
783 		printk("\n");
784 	}
785 }
786 #endif
787 
788 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
789 {
790 #ifdef ATA_DEBUG
791 	int b, w;
792 	u32 dw;
793 	for (b = 0; b < bytes; ) {
794 		DPRINTK("%02x: ", b);
795 		for (w = 0; b < bytes && w < 4; w++) {
796 			(void) pci_read_config_dword(pdev,b,&dw);
797 			printk("%08x ",dw);
798 			b += sizeof(u32);
799 		}
800 		printk("\n");
801 	}
802 #endif
803 }
804 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
805 			     struct pci_dev *pdev)
806 {
807 #ifdef ATA_DEBUG
808 	void __iomem *hc_base = mv_hc_base(mmio_base,
809 					   port >> MV_PORT_HC_SHIFT);
810 	void __iomem *port_base;
811 	int start_port, num_ports, p, start_hc, num_hcs, hc;
812 
813 	if (0 > port) {
814 		start_hc = start_port = 0;
815 		num_ports = 8;		/* shld be benign for 4 port devs */
816 		num_hcs = 2;
817 	} else {
818 		start_hc = port >> MV_PORT_HC_SHIFT;
819 		start_port = port;
820 		num_ports = num_hcs = 1;
821 	}
822 	DPRINTK("All registers for port(s) %u-%u:\n", start_port,
823 		num_ports > 1 ? num_ports - 1 : start_port);
824 
825 	if (NULL != pdev) {
826 		DPRINTK("PCI config space regs:\n");
827 		mv_dump_pci_cfg(pdev, 0x68);
828 	}
829 	DPRINTK("PCI regs:\n");
830 	mv_dump_mem(mmio_base+0xc00, 0x3c);
831 	mv_dump_mem(mmio_base+0xd00, 0x34);
832 	mv_dump_mem(mmio_base+0xf00, 0x4);
833 	mv_dump_mem(mmio_base+0x1d00, 0x6c);
834 	for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
835 		hc_base = mv_hc_base(mmio_base, hc);
836 		DPRINTK("HC regs (HC %i):\n", hc);
837 		mv_dump_mem(hc_base, 0x1c);
838 	}
839 	for (p = start_port; p < start_port + num_ports; p++) {
840 		port_base = mv_port_base(mmio_base, p);
841 		DPRINTK("EDMA regs (port %i):\n",p);
842 		mv_dump_mem(port_base, 0x54);
843 		DPRINTK("SATA regs (port %i):\n",p);
844 		mv_dump_mem(port_base+0x300, 0x60);
845 	}
846 #endif
847 }
848 
849 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
850 {
851 	unsigned int ofs;
852 
853 	switch (sc_reg_in) {
854 	case SCR_STATUS:
855 	case SCR_CONTROL:
856 	case SCR_ERROR:
857 		ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
858 		break;
859 	case SCR_ACTIVE:
860 		ofs = SATA_ACTIVE_OFS;   /* active is not with the others */
861 		break;
862 	default:
863 		ofs = 0xffffffffU;
864 		break;
865 	}
866 	return ofs;
867 }
868 
869 static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
870 {
871 	unsigned int ofs = mv_scr_offset(sc_reg_in);
872 
873 	if (0xffffffffU != ofs)
874 		return readl(mv_ap_base(ap) + ofs);
875 	else
876 		return (u32) ofs;
877 }
878 
879 static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
880 {
881 	unsigned int ofs = mv_scr_offset(sc_reg_in);
882 
883 	if (0xffffffffU != ofs)
884 		writelfl(val, mv_ap_base(ap) + ofs);
885 }
886 
887 static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio)
888 {
889 	u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
890 
891 	/* set up non-NCQ EDMA configuration */
892 	cfg &= ~(1 << 9);	/* disable equeue */
893 
894 	if (IS_GEN_I(hpriv)) {
895 		cfg &= ~0x1f;		/* clear queue depth */
896 		cfg |= (1 << 8);	/* enab config burst size mask */
897 	}
898 
899 	else if (IS_GEN_II(hpriv)) {
900 		cfg &= ~0x1f;		/* clear queue depth */
901 		cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
902 		cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
903 	}
904 
905 	else if (IS_GEN_IIE(hpriv)) {
906 		cfg |= (1 << 23);	/* do not mask PM field in rx'd FIS */
907 		cfg |= (1 << 22);	/* enab 4-entry host queue cache */
908 		cfg &= ~(1 << 19);	/* dis 128-entry queue (for now?) */
909 		cfg |= (1 << 18);	/* enab early completion */
910 		cfg |= (1 << 17);	/* enab cut-through (dis stor&forwrd) */
911 		cfg &= ~(1 << 16);	/* dis FIS-based switching (for now) */
912 		cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
913 	}
914 
915 	writelfl(cfg, port_mmio + EDMA_CFG_OFS);
916 }
917 
918 /**
919  *      mv_port_start - Port specific init/start routine.
920  *      @ap: ATA channel to manipulate
921  *
922  *      Allocate and point to DMA memory, init port private memory,
923  *      zero indices.
924  *
925  *      LOCKING:
926  *      Inherited from caller.
927  */
928 static int mv_port_start(struct ata_port *ap)
929 {
930 	struct device *dev = ap->host->dev;
931 	struct mv_host_priv *hpriv = ap->host->private_data;
932 	struct mv_port_priv *pp;
933 	void __iomem *port_mmio = mv_ap_base(ap);
934 	void *mem;
935 	dma_addr_t mem_dma;
936 	int rc;
937 
938 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
939 	if (!pp)
940 		return -ENOMEM;
941 
942 	mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
943 				  GFP_KERNEL);
944 	if (!mem)
945 		return -ENOMEM;
946 	memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
947 
948 	rc = ata_pad_alloc(ap, dev);
949 	if (rc)
950 		return rc;
951 
952 	/* First item in chunk of DMA memory:
953 	 * 32-slot command request table (CRQB), 32 bytes each in size
954 	 */
955 	pp->crqb = mem;
956 	pp->crqb_dma = mem_dma;
957 	mem += MV_CRQB_Q_SZ;
958 	mem_dma += MV_CRQB_Q_SZ;
959 
960 	/* Second item:
961 	 * 32-slot command response table (CRPB), 8 bytes each in size
962 	 */
963 	pp->crpb = mem;
964 	pp->crpb_dma = mem_dma;
965 	mem += MV_CRPB_Q_SZ;
966 	mem_dma += MV_CRPB_Q_SZ;
967 
968 	/* Third item:
969 	 * Table of scatter-gather descriptors (ePRD), 16 bytes each
970 	 */
971 	pp->sg_tbl = mem;
972 	pp->sg_tbl_dma = mem_dma;
973 
974 	mv_edma_cfg(hpriv, port_mmio);
975 
976 	writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
977 	writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
978 		 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
979 
980 	if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
981 		writelfl(pp->crqb_dma & 0xffffffff,
982 			 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
983 	else
984 		writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
985 
986 	writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
987 
988 	if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
989 		writelfl(pp->crpb_dma & 0xffffffff,
990 			 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
991 	else
992 		writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
993 
994 	writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
995 		 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
996 
997 	/* Don't turn on EDMA here...do it before DMA commands only.  Else
998 	 * we'll be unable to send non-data, PIO, etc due to restricted access
999 	 * to shadow regs.
1000 	 */
1001 	ap->private_data = pp;
1002 	return 0;
1003 }
1004 
1005 /**
1006  *      mv_port_stop - Port specific cleanup/stop routine.
1007  *      @ap: ATA channel to manipulate
1008  *
1009  *      Stop DMA, cleanup port memory.
1010  *
1011  *      LOCKING:
1012  *      This routine uses the host lock to protect the DMA stop.
1013  */
1014 static void mv_port_stop(struct ata_port *ap)
1015 {
1016 	unsigned long flags;
1017 
1018 	spin_lock_irqsave(&ap->host->lock, flags);
1019 	mv_stop_dma(ap);
1020 	spin_unlock_irqrestore(&ap->host->lock, flags);
1021 }
1022 
1023 /**
1024  *      mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1025  *      @qc: queued command whose SG list to source from
1026  *
1027  *      Populate the SG list and mark the last entry.
1028  *
1029  *      LOCKING:
1030  *      Inherited from caller.
1031  */
1032 static unsigned int mv_fill_sg(struct ata_queued_cmd *qc)
1033 {
1034 	struct mv_port_priv *pp = qc->ap->private_data;
1035 	unsigned int n_sg = 0;
1036 	struct scatterlist *sg;
1037 	struct mv_sg *mv_sg;
1038 
1039 	mv_sg = pp->sg_tbl;
1040 	ata_for_each_sg(sg, qc) {
1041 		dma_addr_t addr = sg_dma_address(sg);
1042 		u32 sg_len = sg_dma_len(sg);
1043 
1044 		mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1045 		mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1046 		mv_sg->flags_size = cpu_to_le32(sg_len & 0xffff);
1047 
1048 		if (ata_sg_is_last(sg, qc))
1049 			mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1050 
1051 		mv_sg++;
1052 		n_sg++;
1053 	}
1054 
1055 	return n_sg;
1056 }
1057 
1058 static inline unsigned mv_inc_q_index(unsigned index)
1059 {
1060 	return (index + 1) & MV_MAX_Q_DEPTH_MASK;
1061 }
1062 
1063 static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1064 {
1065 	u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1066 		(last ? CRQB_CMD_LAST : 0);
1067 	*cmdw = cpu_to_le16(tmp);
1068 }
1069 
1070 /**
1071  *      mv_qc_prep - Host specific command preparation.
1072  *      @qc: queued command to prepare
1073  *
1074  *      This routine simply redirects to the general purpose routine
1075  *      if command is not DMA.  Else, it handles prep of the CRQB
1076  *      (command request block), does some sanity checking, and calls
1077  *      the SG load routine.
1078  *
1079  *      LOCKING:
1080  *      Inherited from caller.
1081  */
1082 static void mv_qc_prep(struct ata_queued_cmd *qc)
1083 {
1084 	struct ata_port *ap = qc->ap;
1085 	struct mv_port_priv *pp = ap->private_data;
1086 	__le16 *cw;
1087 	struct ata_taskfile *tf;
1088 	u16 flags = 0;
1089 	unsigned in_index;
1090 
1091  	if (ATA_PROT_DMA != qc->tf.protocol)
1092 		return;
1093 
1094 	/* Fill in command request block
1095 	 */
1096 	if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1097 		flags |= CRQB_FLAG_READ;
1098 	WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1099 	flags |= qc->tag << CRQB_TAG_SHIFT;
1100 
1101 	/* get current queue index from hardware */
1102 	in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1103 			>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1104 
1105 	pp->crqb[in_index].sg_addr =
1106 		cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1107 	pp->crqb[in_index].sg_addr_hi =
1108 		cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1109 	pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1110 
1111 	cw = &pp->crqb[in_index].ata_cmd[0];
1112 	tf = &qc->tf;
1113 
1114 	/* Sadly, the CRQB cannot accomodate all registers--there are
1115 	 * only 11 bytes...so we must pick and choose required
1116 	 * registers based on the command.  So, we drop feature and
1117 	 * hob_feature for [RW] DMA commands, but they are needed for
1118 	 * NCQ.  NCQ will drop hob_nsect.
1119 	 */
1120 	switch (tf->command) {
1121 	case ATA_CMD_READ:
1122 	case ATA_CMD_READ_EXT:
1123 	case ATA_CMD_WRITE:
1124 	case ATA_CMD_WRITE_EXT:
1125 	case ATA_CMD_WRITE_FUA_EXT:
1126 		mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1127 		break;
1128 #ifdef LIBATA_NCQ		/* FIXME: remove this line when NCQ added */
1129 	case ATA_CMD_FPDMA_READ:
1130 	case ATA_CMD_FPDMA_WRITE:
1131 		mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1132 		mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1133 		break;
1134 #endif				/* FIXME: remove this line when NCQ added */
1135 	default:
1136 		/* The only other commands EDMA supports in non-queued and
1137 		 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1138 		 * of which are defined/used by Linux.  If we get here, this
1139 		 * driver needs work.
1140 		 *
1141 		 * FIXME: modify libata to give qc_prep a return value and
1142 		 * return error here.
1143 		 */
1144 		BUG_ON(tf->command);
1145 		break;
1146 	}
1147 	mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1148 	mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1149 	mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1150 	mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1151 	mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1152 	mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1153 	mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1154 	mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1155 	mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1);	/* last */
1156 
1157 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1158 		return;
1159 	mv_fill_sg(qc);
1160 }
1161 
1162 /**
1163  *      mv_qc_prep_iie - Host specific command preparation.
1164  *      @qc: queued command to prepare
1165  *
1166  *      This routine simply redirects to the general purpose routine
1167  *      if command is not DMA.  Else, it handles prep of the CRQB
1168  *      (command request block), does some sanity checking, and calls
1169  *      the SG load routine.
1170  *
1171  *      LOCKING:
1172  *      Inherited from caller.
1173  */
1174 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1175 {
1176 	struct ata_port *ap = qc->ap;
1177 	struct mv_port_priv *pp = ap->private_data;
1178 	struct mv_crqb_iie *crqb;
1179 	struct ata_taskfile *tf;
1180 	unsigned in_index;
1181 	u32 flags = 0;
1182 
1183  	if (ATA_PROT_DMA != qc->tf.protocol)
1184 		return;
1185 
1186 	/* Fill in Gen IIE command request block
1187 	 */
1188 	if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1189 		flags |= CRQB_FLAG_READ;
1190 
1191 	WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1192 	flags |= qc->tag << CRQB_TAG_SHIFT;
1193 
1194 	/* get current queue index from hardware */
1195 	in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1196 			>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1197 
1198 	crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1199 	crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1200 	crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1201 	crqb->flags = cpu_to_le32(flags);
1202 
1203 	tf = &qc->tf;
1204 	crqb->ata_cmd[0] = cpu_to_le32(
1205 			(tf->command << 16) |
1206 			(tf->feature << 24)
1207 		);
1208 	crqb->ata_cmd[1] = cpu_to_le32(
1209 			(tf->lbal << 0) |
1210 			(tf->lbam << 8) |
1211 			(tf->lbah << 16) |
1212 			(tf->device << 24)
1213 		);
1214 	crqb->ata_cmd[2] = cpu_to_le32(
1215 			(tf->hob_lbal << 0) |
1216 			(tf->hob_lbam << 8) |
1217 			(tf->hob_lbah << 16) |
1218 			(tf->hob_feature << 24)
1219 		);
1220 	crqb->ata_cmd[3] = cpu_to_le32(
1221 			(tf->nsect << 0) |
1222 			(tf->hob_nsect << 8)
1223 		);
1224 
1225 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1226 		return;
1227 	mv_fill_sg(qc);
1228 }
1229 
1230 /**
1231  *      mv_qc_issue - Initiate a command to the host
1232  *      @qc: queued command to start
1233  *
1234  *      This routine simply redirects to the general purpose routine
1235  *      if command is not DMA.  Else, it sanity checks our local
1236  *      caches of the request producer/consumer indices then enables
1237  *      DMA and bumps the request producer index.
1238  *
1239  *      LOCKING:
1240  *      Inherited from caller.
1241  */
1242 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1243 {
1244 	void __iomem *port_mmio = mv_ap_base(qc->ap);
1245 	struct mv_port_priv *pp = qc->ap->private_data;
1246 	unsigned in_index;
1247 	u32 in_ptr;
1248 
1249 	if (ATA_PROT_DMA != qc->tf.protocol) {
1250 		/* We're about to send a non-EDMA capable command to the
1251 		 * port.  Turn off EDMA so there won't be problems accessing
1252 		 * shadow block, etc registers.
1253 		 */
1254 		mv_stop_dma(qc->ap);
1255 		return ata_qc_issue_prot(qc);
1256 	}
1257 
1258 	in_ptr   = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1259 	in_index = (in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1260 
1261 	/* until we do queuing, the queue should be empty at this point */
1262 	WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1263 		>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1264 
1265 	in_index = mv_inc_q_index(in_index);	/* now incr producer index */
1266 
1267 	mv_start_dma(port_mmio, pp);
1268 
1269 	/* and write the request in pointer to kick the EDMA to life */
1270 	in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
1271 	in_ptr |= in_index << EDMA_REQ_Q_PTR_SHIFT;
1272 	writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1273 
1274 	return 0;
1275 }
1276 
1277 /**
1278  *      mv_get_crpb_status - get status from most recently completed cmd
1279  *      @ap: ATA channel to manipulate
1280  *
1281  *      This routine is for use when the port is in DMA mode, when it
1282  *      will be using the CRPB (command response block) method of
1283  *      returning command completion information.  We check indices
1284  *      are good, grab status, and bump the response consumer index to
1285  *      prove that we're up to date.
1286  *
1287  *      LOCKING:
1288  *      Inherited from caller.
1289  */
1290 static u8 mv_get_crpb_status(struct ata_port *ap)
1291 {
1292 	void __iomem *port_mmio = mv_ap_base(ap);
1293 	struct mv_port_priv *pp = ap->private_data;
1294 	unsigned out_index;
1295 	u32 out_ptr;
1296 	u8 ata_status;
1297 
1298 	out_ptr   = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1299 	out_index = (out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1300 
1301 	ata_status = le16_to_cpu(pp->crpb[out_index].flags)
1302 					>> CRPB_FLAG_STATUS_SHIFT;
1303 
1304 	/* increment our consumer index... */
1305 	out_index = mv_inc_q_index(out_index);
1306 
1307 	/* and, until we do NCQ, there should only be 1 CRPB waiting */
1308 	WARN_ON(out_index != ((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1309 		>> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1310 
1311 	/* write out our inc'd consumer index so EDMA knows we're caught up */
1312 	out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
1313 	out_ptr |= out_index << EDMA_RSP_Q_PTR_SHIFT;
1314 	writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1315 
1316 	/* Return ATA status register for completed CRPB */
1317 	return ata_status;
1318 }
1319 
1320 /**
1321  *      mv_err_intr - Handle error interrupts on the port
1322  *      @ap: ATA channel to manipulate
1323  *      @reset_allowed: bool: 0 == don't trigger from reset here
1324  *
1325  *      In most cases, just clear the interrupt and move on.  However,
1326  *      some cases require an eDMA reset, which is done right before
1327  *      the COMRESET in mv_phy_reset().  The SERR case requires a
1328  *      clear of pending errors in the SATA SERROR register.  Finally,
1329  *      if the port disabled DMA, update our cached copy to match.
1330  *
1331  *      LOCKING:
1332  *      Inherited from caller.
1333  */
1334 static void mv_err_intr(struct ata_port *ap, int reset_allowed)
1335 {
1336 	void __iomem *port_mmio = mv_ap_base(ap);
1337 	u32 edma_err_cause, serr = 0;
1338 
1339 	edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1340 
1341 	if (EDMA_ERR_SERR & edma_err_cause) {
1342 		sata_scr_read(ap, SCR_ERROR, &serr);
1343 		sata_scr_write_flush(ap, SCR_ERROR, serr);
1344 	}
1345 	if (EDMA_ERR_SELF_DIS & edma_err_cause) {
1346 		struct mv_port_priv *pp	= ap->private_data;
1347 		pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1348 	}
1349 	DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x "
1350 		"SERR: 0x%08x\n", ap->print_id, edma_err_cause, serr);
1351 
1352 	/* Clear EDMA now that SERR cleanup done */
1353 	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1354 
1355 	/* check for fatal here and recover if needed */
1356 	if (reset_allowed && (EDMA_ERR_FATAL & edma_err_cause))
1357 		mv_stop_and_reset(ap);
1358 }
1359 
1360 /**
1361  *      mv_host_intr - Handle all interrupts on the given host controller
1362  *      @host: host specific structure
1363  *      @relevant: port error bits relevant to this host controller
1364  *      @hc: which host controller we're to look at
1365  *
1366  *      Read then write clear the HC interrupt status then walk each
1367  *      port connected to the HC and see if it needs servicing.  Port
1368  *      success ints are reported in the HC interrupt status reg, the
1369  *      port error ints are reported in the higher level main
1370  *      interrupt status register and thus are passed in via the
1371  *      'relevant' argument.
1372  *
1373  *      LOCKING:
1374  *      Inherited from caller.
1375  */
1376 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1377 {
1378 	void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1379 	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1380 	struct ata_queued_cmd *qc;
1381 	u32 hc_irq_cause;
1382 	int shift, port, port0, hard_port, handled;
1383 	unsigned int err_mask;
1384 
1385 	if (hc == 0)
1386 		port0 = 0;
1387 	else
1388 		port0 = MV_PORTS_PER_HC;
1389 
1390 	/* we'll need the HC success int register in most cases */
1391 	hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1392 	if (hc_irq_cause)
1393 		writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1394 
1395 	VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1396 		hc,relevant,hc_irq_cause);
1397 
1398 	for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1399 		u8 ata_status = 0;
1400 		struct ata_port *ap = host->ports[port];
1401 		struct mv_port_priv *pp = ap->private_data;
1402 
1403 		hard_port = mv_hardport_from_port(port); /* range 0..3 */
1404 		handled = 0;	/* ensure ata_status is set if handled++ */
1405 
1406 		/* Note that DEV_IRQ might happen spuriously during EDMA,
1407 		 * and should be ignored in such cases.
1408 		 * The cause of this is still under investigation.
1409 		 */
1410 		if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1411 			/* EDMA: check for response queue interrupt */
1412 			if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
1413 				ata_status = mv_get_crpb_status(ap);
1414 				handled = 1;
1415 			}
1416 		} else {
1417 			/* PIO: check for device (drive) interrupt */
1418 			if ((DEV_IRQ << hard_port) & hc_irq_cause) {
1419 				ata_status = readb(ap->ioaddr.status_addr);
1420 				handled = 1;
1421 				/* ignore spurious intr if drive still BUSY */
1422 				if (ata_status & ATA_BUSY) {
1423 					ata_status = 0;
1424 					handled = 0;
1425 				}
1426 			}
1427 		}
1428 
1429 		if (ap && (ap->flags & ATA_FLAG_DISABLED))
1430 			continue;
1431 
1432 		err_mask = ac_err_mask(ata_status);
1433 
1434 		shift = port << 1;		/* (port * 2) */
1435 		if (port >= MV_PORTS_PER_HC) {
1436 			shift++;	/* skip bit 8 in the HC Main IRQ reg */
1437 		}
1438 		if ((PORT0_ERR << shift) & relevant) {
1439 			mv_err_intr(ap, 1);
1440 			err_mask |= AC_ERR_OTHER;
1441 			handled = 1;
1442 		}
1443 
1444 		if (handled) {
1445 			qc = ata_qc_from_tag(ap, ap->active_tag);
1446 			if (qc && (qc->flags & ATA_QCFLAG_ACTIVE)) {
1447 				VPRINTK("port %u IRQ found for qc, "
1448 					"ata_status 0x%x\n", port,ata_status);
1449 				/* mark qc status appropriately */
1450 				if (!(qc->tf.flags & ATA_TFLAG_POLLING)) {
1451 					qc->err_mask |= err_mask;
1452 					ata_qc_complete(qc);
1453 				}
1454 			}
1455 		}
1456 	}
1457 	VPRINTK("EXIT\n");
1458 }
1459 
1460 /**
1461  *      mv_interrupt -
1462  *      @irq: unused
1463  *      @dev_instance: private data; in this case the host structure
1464  *      @regs: unused
1465  *
1466  *      Read the read only register to determine if any host
1467  *      controllers have pending interrupts.  If so, call lower level
1468  *      routine to handle.  Also check for PCI errors which are only
1469  *      reported here.
1470  *
1471  *      LOCKING:
1472  *      This routine holds the host lock while processing pending
1473  *      interrupts.
1474  */
1475 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1476 {
1477 	struct ata_host *host = dev_instance;
1478 	unsigned int hc, handled = 0, n_hcs;
1479 	void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1480 	struct mv_host_priv *hpriv;
1481 	u32 irq_stat;
1482 
1483 	irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1484 
1485 	/* check the cases where we either have nothing pending or have read
1486 	 * a bogus register value which can indicate HW removal or PCI fault
1487 	 */
1488 	if (!irq_stat || (0xffffffffU == irq_stat))
1489 		return IRQ_NONE;
1490 
1491 	n_hcs = mv_get_hc_count(host->ports[0]->flags);
1492 	spin_lock(&host->lock);
1493 
1494 	for (hc = 0; hc < n_hcs; hc++) {
1495 		u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1496 		if (relevant) {
1497 			mv_host_intr(host, relevant, hc);
1498 			handled++;
1499 		}
1500 	}
1501 
1502 	hpriv = host->private_data;
1503 	if (IS_60XX(hpriv)) {
1504 		/* deal with the interrupt coalescing bits */
1505 		if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) {
1506 			writelfl(0, mmio + MV_IRQ_COAL_CAUSE_LO);
1507 			writelfl(0, mmio + MV_IRQ_COAL_CAUSE_HI);
1508 			writelfl(0, mmio + MV_IRQ_COAL_CAUSE);
1509 		}
1510 	}
1511 
1512 	if (PCI_ERR & irq_stat) {
1513 		printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n",
1514 		       readl(mmio + PCI_IRQ_CAUSE_OFS));
1515 
1516 		DPRINTK("All regs @ PCI error\n");
1517 		mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1518 
1519 		writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1520 		handled++;
1521 	}
1522 	spin_unlock(&host->lock);
1523 
1524 	return IRQ_RETVAL(handled);
1525 }
1526 
1527 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1528 {
1529 	void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1530 	unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1531 
1532 	return hc_mmio + ofs;
1533 }
1534 
1535 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1536 {
1537 	unsigned int ofs;
1538 
1539 	switch (sc_reg_in) {
1540 	case SCR_STATUS:
1541 	case SCR_ERROR:
1542 	case SCR_CONTROL:
1543 		ofs = sc_reg_in * sizeof(u32);
1544 		break;
1545 	default:
1546 		ofs = 0xffffffffU;
1547 		break;
1548 	}
1549 	return ofs;
1550 }
1551 
1552 static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
1553 {
1554 	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1555 	void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1556 	unsigned int ofs = mv5_scr_offset(sc_reg_in);
1557 
1558 	if (ofs != 0xffffffffU)
1559 		return readl(addr + ofs);
1560 	else
1561 		return (u32) ofs;
1562 }
1563 
1564 static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1565 {
1566 	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1567 	void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1568 	unsigned int ofs = mv5_scr_offset(sc_reg_in);
1569 
1570 	if (ofs != 0xffffffffU)
1571 		writelfl(val, addr + ofs);
1572 }
1573 
1574 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1575 {
1576 	u8 rev_id;
1577 	int early_5080;
1578 
1579 	pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
1580 
1581 	early_5080 = (pdev->device == 0x5080) && (rev_id == 0);
1582 
1583 	if (!early_5080) {
1584 		u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1585 		tmp |= (1 << 0);
1586 		writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1587 	}
1588 
1589 	mv_reset_pci_bus(pdev, mmio);
1590 }
1591 
1592 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1593 {
1594 	writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1595 }
1596 
1597 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1598 			   void __iomem *mmio)
1599 {
1600 	void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1601 	u32 tmp;
1602 
1603 	tmp = readl(phy_mmio + MV5_PHY_MODE);
1604 
1605 	hpriv->signal[idx].pre = tmp & 0x1800;	/* bits 12:11 */
1606 	hpriv->signal[idx].amps = tmp & 0xe0;	/* bits 7:5 */
1607 }
1608 
1609 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1610 {
1611 	u32 tmp;
1612 
1613 	writel(0, mmio + MV_GPIO_PORT_CTL);
1614 
1615 	/* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1616 
1617 	tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1618 	tmp |= ~(1 << 0);
1619 	writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1620 }
1621 
1622 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1623 			   unsigned int port)
1624 {
1625 	void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1626 	const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1627 	u32 tmp;
1628 	int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1629 
1630 	if (fix_apm_sq) {
1631 		tmp = readl(phy_mmio + MV5_LT_MODE);
1632 		tmp |= (1 << 19);
1633 		writel(tmp, phy_mmio + MV5_LT_MODE);
1634 
1635 		tmp = readl(phy_mmio + MV5_PHY_CTL);
1636 		tmp &= ~0x3;
1637 		tmp |= 0x1;
1638 		writel(tmp, phy_mmio + MV5_PHY_CTL);
1639 	}
1640 
1641 	tmp = readl(phy_mmio + MV5_PHY_MODE);
1642 	tmp &= ~mask;
1643 	tmp |= hpriv->signal[port].pre;
1644 	tmp |= hpriv->signal[port].amps;
1645 	writel(tmp, phy_mmio + MV5_PHY_MODE);
1646 }
1647 
1648 
1649 #undef ZERO
1650 #define ZERO(reg) writel(0, port_mmio + (reg))
1651 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1652 			     unsigned int port)
1653 {
1654 	void __iomem *port_mmio = mv_port_base(mmio, port);
1655 
1656 	writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1657 
1658 	mv_channel_reset(hpriv, mmio, port);
1659 
1660 	ZERO(0x028);	/* command */
1661 	writel(0x11f, port_mmio + EDMA_CFG_OFS);
1662 	ZERO(0x004);	/* timer */
1663 	ZERO(0x008);	/* irq err cause */
1664 	ZERO(0x00c);	/* irq err mask */
1665 	ZERO(0x010);	/* rq bah */
1666 	ZERO(0x014);	/* rq inp */
1667 	ZERO(0x018);	/* rq outp */
1668 	ZERO(0x01c);	/* respq bah */
1669 	ZERO(0x024);	/* respq outp */
1670 	ZERO(0x020);	/* respq inp */
1671 	ZERO(0x02c);	/* test control */
1672 	writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1673 }
1674 #undef ZERO
1675 
1676 #define ZERO(reg) writel(0, hc_mmio + (reg))
1677 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1678 			unsigned int hc)
1679 {
1680 	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1681 	u32 tmp;
1682 
1683 	ZERO(0x00c);
1684 	ZERO(0x010);
1685 	ZERO(0x014);
1686 	ZERO(0x018);
1687 
1688 	tmp = readl(hc_mmio + 0x20);
1689 	tmp &= 0x1c1c1c1c;
1690 	tmp |= 0x03030303;
1691 	writel(tmp, hc_mmio + 0x20);
1692 }
1693 #undef ZERO
1694 
1695 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1696 			unsigned int n_hc)
1697 {
1698 	unsigned int hc, port;
1699 
1700 	for (hc = 0; hc < n_hc; hc++) {
1701 		for (port = 0; port < MV_PORTS_PER_HC; port++)
1702 			mv5_reset_hc_port(hpriv, mmio,
1703 					  (hc * MV_PORTS_PER_HC) + port);
1704 
1705 		mv5_reset_one_hc(hpriv, mmio, hc);
1706 	}
1707 
1708 	return 0;
1709 }
1710 
1711 #undef ZERO
1712 #define ZERO(reg) writel(0, mmio + (reg))
1713 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1714 {
1715 	u32 tmp;
1716 
1717 	tmp = readl(mmio + MV_PCI_MODE);
1718 	tmp &= 0xff00ffff;
1719 	writel(tmp, mmio + MV_PCI_MODE);
1720 
1721 	ZERO(MV_PCI_DISC_TIMER);
1722 	ZERO(MV_PCI_MSI_TRIGGER);
1723 	writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1724 	ZERO(HC_MAIN_IRQ_MASK_OFS);
1725 	ZERO(MV_PCI_SERR_MASK);
1726 	ZERO(PCI_IRQ_CAUSE_OFS);
1727 	ZERO(PCI_IRQ_MASK_OFS);
1728 	ZERO(MV_PCI_ERR_LOW_ADDRESS);
1729 	ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1730 	ZERO(MV_PCI_ERR_ATTRIBUTE);
1731 	ZERO(MV_PCI_ERR_COMMAND);
1732 }
1733 #undef ZERO
1734 
1735 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1736 {
1737 	u32 tmp;
1738 
1739 	mv5_reset_flash(hpriv, mmio);
1740 
1741 	tmp = readl(mmio + MV_GPIO_PORT_CTL);
1742 	tmp &= 0x3;
1743 	tmp |= (1 << 5) | (1 << 6);
1744 	writel(tmp, mmio + MV_GPIO_PORT_CTL);
1745 }
1746 
1747 /**
1748  *      mv6_reset_hc - Perform the 6xxx global soft reset
1749  *      @mmio: base address of the HBA
1750  *
1751  *      This routine only applies to 6xxx parts.
1752  *
1753  *      LOCKING:
1754  *      Inherited from caller.
1755  */
1756 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1757 			unsigned int n_hc)
1758 {
1759 	void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1760 	int i, rc = 0;
1761 	u32 t;
1762 
1763 	/* Following procedure defined in PCI "main command and status
1764 	 * register" table.
1765 	 */
1766 	t = readl(reg);
1767 	writel(t | STOP_PCI_MASTER, reg);
1768 
1769 	for (i = 0; i < 1000; i++) {
1770 		udelay(1);
1771 		t = readl(reg);
1772 		if (PCI_MASTER_EMPTY & t) {
1773 			break;
1774 		}
1775 	}
1776 	if (!(PCI_MASTER_EMPTY & t)) {
1777 		printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
1778 		rc = 1;
1779 		goto done;
1780 	}
1781 
1782 	/* set reset */
1783 	i = 5;
1784 	do {
1785 		writel(t | GLOB_SFT_RST, reg);
1786 		t = readl(reg);
1787 		udelay(1);
1788 	} while (!(GLOB_SFT_RST & t) && (i-- > 0));
1789 
1790 	if (!(GLOB_SFT_RST & t)) {
1791 		printk(KERN_ERR DRV_NAME ": can't set global reset\n");
1792 		rc = 1;
1793 		goto done;
1794 	}
1795 
1796 	/* clear reset and *reenable the PCI master* (not mentioned in spec) */
1797 	i = 5;
1798 	do {
1799 		writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
1800 		t = readl(reg);
1801 		udelay(1);
1802 	} while ((GLOB_SFT_RST & t) && (i-- > 0));
1803 
1804 	if (GLOB_SFT_RST & t) {
1805 		printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
1806 		rc = 1;
1807 	}
1808 done:
1809 	return rc;
1810 }
1811 
1812 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
1813 			   void __iomem *mmio)
1814 {
1815 	void __iomem *port_mmio;
1816 	u32 tmp;
1817 
1818 	tmp = readl(mmio + MV_RESET_CFG);
1819 	if ((tmp & (1 << 0)) == 0) {
1820 		hpriv->signal[idx].amps = 0x7 << 8;
1821 		hpriv->signal[idx].pre = 0x1 << 5;
1822 		return;
1823 	}
1824 
1825 	port_mmio = mv_port_base(mmio, idx);
1826 	tmp = readl(port_mmio + PHY_MODE2);
1827 
1828 	hpriv->signal[idx].amps = tmp & 0x700;	/* bits 10:8 */
1829 	hpriv->signal[idx].pre = tmp & 0xe0;	/* bits 7:5 */
1830 }
1831 
1832 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1833 {
1834 	writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
1835 }
1836 
1837 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1838 			   unsigned int port)
1839 {
1840 	void __iomem *port_mmio = mv_port_base(mmio, port);
1841 
1842 	u32 hp_flags = hpriv->hp_flags;
1843 	int fix_phy_mode2 =
1844 		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
1845 	int fix_phy_mode4 =
1846 		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
1847 	u32 m2, tmp;
1848 
1849 	if (fix_phy_mode2) {
1850 		m2 = readl(port_mmio + PHY_MODE2);
1851 		m2 &= ~(1 << 16);
1852 		m2 |= (1 << 31);
1853 		writel(m2, port_mmio + PHY_MODE2);
1854 
1855 		udelay(200);
1856 
1857 		m2 = readl(port_mmio + PHY_MODE2);
1858 		m2 &= ~((1 << 16) | (1 << 31));
1859 		writel(m2, port_mmio + PHY_MODE2);
1860 
1861 		udelay(200);
1862 	}
1863 
1864 	/* who knows what this magic does */
1865 	tmp = readl(port_mmio + PHY_MODE3);
1866 	tmp &= ~0x7F800000;
1867 	tmp |= 0x2A800000;
1868 	writel(tmp, port_mmio + PHY_MODE3);
1869 
1870 	if (fix_phy_mode4) {
1871 		u32 m4;
1872 
1873 		m4 = readl(port_mmio + PHY_MODE4);
1874 
1875 		if (hp_flags & MV_HP_ERRATA_60X1B2)
1876 			tmp = readl(port_mmio + 0x310);
1877 
1878 		m4 = (m4 & ~(1 << 1)) | (1 << 0);
1879 
1880 		writel(m4, port_mmio + PHY_MODE4);
1881 
1882 		if (hp_flags & MV_HP_ERRATA_60X1B2)
1883 			writel(tmp, port_mmio + 0x310);
1884 	}
1885 
1886 	/* Revert values of pre-emphasis and signal amps to the saved ones */
1887 	m2 = readl(port_mmio + PHY_MODE2);
1888 
1889 	m2 &= ~MV_M2_PREAMP_MASK;
1890 	m2 |= hpriv->signal[port].amps;
1891 	m2 |= hpriv->signal[port].pre;
1892 	m2 &= ~(1 << 16);
1893 
1894 	/* according to mvSata 3.6.1, some IIE values are fixed */
1895 	if (IS_GEN_IIE(hpriv)) {
1896 		m2 &= ~0xC30FF01F;
1897 		m2 |= 0x0000900F;
1898 	}
1899 
1900 	writel(m2, port_mmio + PHY_MODE2);
1901 }
1902 
1903 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
1904 			     unsigned int port_no)
1905 {
1906 	void __iomem *port_mmio = mv_port_base(mmio, port_no);
1907 
1908 	writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
1909 
1910 	if (IS_60XX(hpriv)) {
1911 		u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
1912 		ifctl |= (1 << 7);		/* enable gen2i speed */
1913 		ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
1914 		writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
1915 	}
1916 
1917 	udelay(25);		/* allow reset propagation */
1918 
1919 	/* Spec never mentions clearing the bit.  Marvell's driver does
1920 	 * clear the bit, however.
1921 	 */
1922 	writelfl(0, port_mmio + EDMA_CMD_OFS);
1923 
1924 	hpriv->ops->phy_errata(hpriv, mmio, port_no);
1925 
1926 	if (IS_50XX(hpriv))
1927 		mdelay(1);
1928 }
1929 
1930 static void mv_stop_and_reset(struct ata_port *ap)
1931 {
1932 	struct mv_host_priv *hpriv = ap->host->private_data;
1933 	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1934 
1935 	mv_stop_dma(ap);
1936 
1937 	mv_channel_reset(hpriv, mmio, ap->port_no);
1938 
1939 	__mv_phy_reset(ap, 0);
1940 }
1941 
1942 static inline void __msleep(unsigned int msec, int can_sleep)
1943 {
1944 	if (can_sleep)
1945 		msleep(msec);
1946 	else
1947 		mdelay(msec);
1948 }
1949 
1950 /**
1951  *      __mv_phy_reset - Perform eDMA reset followed by COMRESET
1952  *      @ap: ATA channel to manipulate
1953  *
1954  *      Part of this is taken from __sata_phy_reset and modified to
1955  *      not sleep since this routine gets called from interrupt level.
1956  *
1957  *      LOCKING:
1958  *      Inherited from caller.  This is coded to safe to call at
1959  *      interrupt level, i.e. it does not sleep.
1960  */
1961 static void __mv_phy_reset(struct ata_port *ap, int can_sleep)
1962 {
1963 	struct mv_port_priv *pp	= ap->private_data;
1964 	struct mv_host_priv *hpriv = ap->host->private_data;
1965 	void __iomem *port_mmio = mv_ap_base(ap);
1966 	struct ata_taskfile tf;
1967 	struct ata_device *dev = &ap->device[0];
1968 	unsigned long timeout;
1969 	int retry = 5;
1970 	u32 sstatus;
1971 
1972 	VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
1973 
1974 	DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
1975 		"SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1976 		mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
1977 
1978 	/* Issue COMRESET via SControl */
1979 comreset_retry:
1980 	sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
1981 	__msleep(1, can_sleep);
1982 
1983 	sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1984 	__msleep(20, can_sleep);
1985 
1986 	timeout = jiffies + msecs_to_jiffies(200);
1987 	do {
1988 		sata_scr_read(ap, SCR_STATUS, &sstatus);
1989 		if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
1990 			break;
1991 
1992 		__msleep(1, can_sleep);
1993 	} while (time_before(jiffies, timeout));
1994 
1995 	/* work around errata */
1996 	if (IS_60XX(hpriv) &&
1997 	    (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
1998 	    (retry-- > 0))
1999 		goto comreset_retry;
2000 
2001 	DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2002 		"SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
2003 		mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
2004 
2005 	if (ata_port_online(ap)) {
2006 		ata_port_probe(ap);
2007 	} else {
2008 		sata_scr_read(ap, SCR_STATUS, &sstatus);
2009 		ata_port_printk(ap, KERN_INFO,
2010 				"no device found (phy stat %08x)\n", sstatus);
2011 		ata_port_disable(ap);
2012 		return;
2013 	}
2014 
2015 	/* even after SStatus reflects that device is ready,
2016 	 * it seems to take a while for link to be fully
2017 	 * established (and thus Status no longer 0x80/0x7F),
2018 	 * so we poll a bit for that, here.
2019 	 */
2020 	retry = 20;
2021 	while (1) {
2022 		u8 drv_stat = ata_check_status(ap);
2023 		if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2024 			break;
2025 		__msleep(500, can_sleep);
2026 		if (retry-- <= 0)
2027 			break;
2028 	}
2029 
2030 	tf.lbah = readb(ap->ioaddr.lbah_addr);
2031 	tf.lbam = readb(ap->ioaddr.lbam_addr);
2032 	tf.lbal = readb(ap->ioaddr.lbal_addr);
2033 	tf.nsect = readb(ap->ioaddr.nsect_addr);
2034 
2035 	dev->class = ata_dev_classify(&tf);
2036 	if (!ata_dev_enabled(dev)) {
2037 		VPRINTK("Port disabled post-sig: No device present.\n");
2038 		ata_port_disable(ap);
2039 	}
2040 
2041 	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2042 
2043 	pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2044 
2045 	VPRINTK("EXIT\n");
2046 }
2047 
2048 static void mv_phy_reset(struct ata_port *ap)
2049 {
2050 	__mv_phy_reset(ap, 1);
2051 }
2052 
2053 /**
2054  *      mv_eng_timeout - Routine called by libata when SCSI times out I/O
2055  *      @ap: ATA channel to manipulate
2056  *
2057  *      Intent is to clear all pending error conditions, reset the
2058  *      chip/bus, fail the command, and move on.
2059  *
2060  *      LOCKING:
2061  *      This routine holds the host lock while failing the command.
2062  */
2063 static void mv_eng_timeout(struct ata_port *ap)
2064 {
2065 	void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2066 	struct ata_queued_cmd *qc;
2067 	unsigned long flags;
2068 
2069 	ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n");
2070 	DPRINTK("All regs @ start of eng_timeout\n");
2071 	mv_dump_all_regs(mmio, ap->port_no, to_pci_dev(ap->host->dev));
2072 
2073 	qc = ata_qc_from_tag(ap, ap->active_tag);
2074         printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
2075 	       mmio, ap, qc, qc->scsicmd, &qc->scsicmd->cmnd);
2076 
2077 	spin_lock_irqsave(&ap->host->lock, flags);
2078 	mv_err_intr(ap, 0);
2079 	mv_stop_and_reset(ap);
2080 	spin_unlock_irqrestore(&ap->host->lock, flags);
2081 
2082 	WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
2083 	if (qc->flags & ATA_QCFLAG_ACTIVE) {
2084 		qc->err_mask |= AC_ERR_TIMEOUT;
2085 		ata_eh_qc_complete(qc);
2086 	}
2087 }
2088 
2089 /**
2090  *      mv_port_init - Perform some early initialization on a single port.
2091  *      @port: libata data structure storing shadow register addresses
2092  *      @port_mmio: base address of the port
2093  *
2094  *      Initialize shadow register mmio addresses, clear outstanding
2095  *      interrupts on the port, and unmask interrupts for the future
2096  *      start of the port.
2097  *
2098  *      LOCKING:
2099  *      Inherited from caller.
2100  */
2101 static void mv_port_init(struct ata_ioports *port,  void __iomem *port_mmio)
2102 {
2103 	void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2104 	unsigned serr_ofs;
2105 
2106 	/* PIO related setup
2107 	 */
2108 	port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2109 	port->error_addr =
2110 		port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2111 	port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2112 	port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2113 	port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2114 	port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2115 	port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2116 	port->status_addr =
2117 		port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2118 	/* special case: control/altstatus doesn't have ATA_REG_ address */
2119 	port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2120 
2121 	/* unused: */
2122 	port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2123 
2124 	/* Clear any currently outstanding port interrupt conditions */
2125 	serr_ofs = mv_scr_offset(SCR_ERROR);
2126 	writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2127 	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2128 
2129 	/* unmask all EDMA error interrupts */
2130 	writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2131 
2132 	VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2133 		readl(port_mmio + EDMA_CFG_OFS),
2134 		readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2135 		readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2136 }
2137 
2138 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2139 {
2140 	struct pci_dev *pdev = to_pci_dev(host->dev);
2141 	struct mv_host_priv *hpriv = host->private_data;
2142 	u8 rev_id;
2143 	u32 hp_flags = hpriv->hp_flags;
2144 
2145 	pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2146 
2147 	switch(board_idx) {
2148 	case chip_5080:
2149 		hpriv->ops = &mv5xxx_ops;
2150 		hp_flags |= MV_HP_50XX;
2151 
2152 		switch (rev_id) {
2153 		case 0x1:
2154 			hp_flags |= MV_HP_ERRATA_50XXB0;
2155 			break;
2156 		case 0x3:
2157 			hp_flags |= MV_HP_ERRATA_50XXB2;
2158 			break;
2159 		default:
2160 			dev_printk(KERN_WARNING, &pdev->dev,
2161 			   "Applying 50XXB2 workarounds to unknown rev\n");
2162 			hp_flags |= MV_HP_ERRATA_50XXB2;
2163 			break;
2164 		}
2165 		break;
2166 
2167 	case chip_504x:
2168 	case chip_508x:
2169 		hpriv->ops = &mv5xxx_ops;
2170 		hp_flags |= MV_HP_50XX;
2171 
2172 		switch (rev_id) {
2173 		case 0x0:
2174 			hp_flags |= MV_HP_ERRATA_50XXB0;
2175 			break;
2176 		case 0x3:
2177 			hp_flags |= MV_HP_ERRATA_50XXB2;
2178 			break;
2179 		default:
2180 			dev_printk(KERN_WARNING, &pdev->dev,
2181 			   "Applying B2 workarounds to unknown rev\n");
2182 			hp_flags |= MV_HP_ERRATA_50XXB2;
2183 			break;
2184 		}
2185 		break;
2186 
2187 	case chip_604x:
2188 	case chip_608x:
2189 		hpriv->ops = &mv6xxx_ops;
2190 
2191 		switch (rev_id) {
2192 		case 0x7:
2193 			hp_flags |= MV_HP_ERRATA_60X1B2;
2194 			break;
2195 		case 0x9:
2196 			hp_flags |= MV_HP_ERRATA_60X1C0;
2197 			break;
2198 		default:
2199 			dev_printk(KERN_WARNING, &pdev->dev,
2200 				   "Applying B2 workarounds to unknown rev\n");
2201 			hp_flags |= MV_HP_ERRATA_60X1B2;
2202 			break;
2203 		}
2204 		break;
2205 
2206 	case chip_7042:
2207 	case chip_6042:
2208 		hpriv->ops = &mv6xxx_ops;
2209 
2210 		hp_flags |= MV_HP_GEN_IIE;
2211 
2212 		switch (rev_id) {
2213 		case 0x0:
2214 			hp_flags |= MV_HP_ERRATA_XX42A0;
2215 			break;
2216 		case 0x1:
2217 			hp_flags |= MV_HP_ERRATA_60X1C0;
2218 			break;
2219 		default:
2220 			dev_printk(KERN_WARNING, &pdev->dev,
2221 			   "Applying 60X1C0 workarounds to unknown rev\n");
2222 			hp_flags |= MV_HP_ERRATA_60X1C0;
2223 			break;
2224 		}
2225 		break;
2226 
2227 	default:
2228 		printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2229 		return 1;
2230 	}
2231 
2232 	hpriv->hp_flags = hp_flags;
2233 
2234 	return 0;
2235 }
2236 
2237 /**
2238  *      mv_init_host - Perform some early initialization of the host.
2239  *	@host: ATA host to initialize
2240  *      @board_idx: controller index
2241  *
2242  *      If possible, do an early global reset of the host.  Then do
2243  *      our port init and clear/unmask all/relevant host interrupts.
2244  *
2245  *      LOCKING:
2246  *      Inherited from caller.
2247  */
2248 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2249 {
2250 	int rc = 0, n_hc, port, hc;
2251 	struct pci_dev *pdev = to_pci_dev(host->dev);
2252 	void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2253 	struct mv_host_priv *hpriv = host->private_data;
2254 
2255 	/* global interrupt mask */
2256 	writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2257 
2258 	rc = mv_chip_id(host, board_idx);
2259 	if (rc)
2260 		goto done;
2261 
2262 	n_hc = mv_get_hc_count(host->ports[0]->flags);
2263 
2264 	for (port = 0; port < host->n_ports; port++)
2265 		hpriv->ops->read_preamp(hpriv, port, mmio);
2266 
2267 	rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2268 	if (rc)
2269 		goto done;
2270 
2271 	hpriv->ops->reset_flash(hpriv, mmio);
2272 	hpriv->ops->reset_bus(pdev, mmio);
2273 	hpriv->ops->enable_leds(hpriv, mmio);
2274 
2275 	for (port = 0; port < host->n_ports; port++) {
2276 		if (IS_60XX(hpriv)) {
2277 			void __iomem *port_mmio = mv_port_base(mmio, port);
2278 
2279 			u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2280 			ifctl |= (1 << 7);		/* enable gen2i speed */
2281 			ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2282 			writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2283 		}
2284 
2285 		hpriv->ops->phy_errata(hpriv, mmio, port);
2286 	}
2287 
2288 	for (port = 0; port < host->n_ports; port++) {
2289 		void __iomem *port_mmio = mv_port_base(mmio, port);
2290 		mv_port_init(&host->ports[port]->ioaddr, port_mmio);
2291 	}
2292 
2293 	for (hc = 0; hc < n_hc; hc++) {
2294 		void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2295 
2296 		VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2297 			"(before clear)=0x%08x\n", hc,
2298 			readl(hc_mmio + HC_CFG_OFS),
2299 			readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2300 
2301 		/* Clear any currently outstanding hc interrupt conditions */
2302 		writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2303 	}
2304 
2305 	/* Clear any currently outstanding host interrupt conditions */
2306 	writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2307 
2308 	/* and unmask interrupt generation for host regs */
2309 	writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
2310 
2311 	if (IS_50XX(hpriv))
2312 		writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2313 	else
2314 		writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2315 
2316 	VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2317 		"PCI int cause/mask=0x%08x/0x%08x\n",
2318 		readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2319 		readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2320 		readl(mmio + PCI_IRQ_CAUSE_OFS),
2321 		readl(mmio + PCI_IRQ_MASK_OFS));
2322 
2323 done:
2324 	return rc;
2325 }
2326 
2327 /**
2328  *      mv_print_info - Dump key info to kernel log for perusal.
2329  *      @host: ATA host to print info about
2330  *
2331  *      FIXME: complete this.
2332  *
2333  *      LOCKING:
2334  *      Inherited from caller.
2335  */
2336 static void mv_print_info(struct ata_host *host)
2337 {
2338 	struct pci_dev *pdev = to_pci_dev(host->dev);
2339 	struct mv_host_priv *hpriv = host->private_data;
2340 	u8 rev_id, scc;
2341 	const char *scc_s;
2342 
2343 	/* Use this to determine the HW stepping of the chip so we know
2344 	 * what errata to workaround
2345 	 */
2346 	pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2347 
2348 	pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2349 	if (scc == 0)
2350 		scc_s = "SCSI";
2351 	else if (scc == 0x01)
2352 		scc_s = "RAID";
2353 	else
2354 		scc_s = "unknown";
2355 
2356 	dev_printk(KERN_INFO, &pdev->dev,
2357 	       "%u slots %u ports %s mode IRQ via %s\n",
2358 	       (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2359 	       scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2360 }
2361 
2362 /**
2363  *      mv_init_one - handle a positive probe of a Marvell host
2364  *      @pdev: PCI device found
2365  *      @ent: PCI device ID entry for the matched host
2366  *
2367  *      LOCKING:
2368  *      Inherited from caller.
2369  */
2370 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2371 {
2372 	static int printed_version = 0;
2373 	unsigned int board_idx = (unsigned int)ent->driver_data;
2374 	const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2375 	struct ata_host *host;
2376 	struct mv_host_priv *hpriv;
2377 	int n_ports, rc;
2378 
2379 	if (!printed_version++)
2380 		dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2381 
2382 	/* allocate host */
2383 	n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2384 
2385 	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2386 	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2387 	if (!host || !hpriv)
2388 		return -ENOMEM;
2389 	host->private_data = hpriv;
2390 
2391 	/* acquire resources */
2392 	rc = pcim_enable_device(pdev);
2393 	if (rc)
2394 		return rc;
2395 
2396 	rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2397 	if (rc == -EBUSY)
2398 		pcim_pin_device(pdev);
2399 	if (rc)
2400 		return rc;
2401 	host->iomap = pcim_iomap_table(pdev);
2402 
2403 	rc = pci_go_64(pdev);
2404 	if (rc)
2405 		return rc;
2406 
2407 	/* initialize adapter */
2408 	rc = mv_init_host(host, board_idx);
2409 	if (rc)
2410 		return rc;
2411 
2412 	/* Enable interrupts */
2413 	if (msi && pci_enable_msi(pdev))
2414 		pci_intx(pdev, 1);
2415 
2416 	mv_dump_pci_cfg(pdev, 0x68);
2417 	mv_print_info(host);
2418 
2419 	pci_set_master(pdev);
2420 	return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2421 				 &mv_sht);
2422 }
2423 
2424 static int __init mv_init(void)
2425 {
2426 	return pci_register_driver(&mv_pci_driver);
2427 }
2428 
2429 static void __exit mv_exit(void)
2430 {
2431 	pci_unregister_driver(&mv_pci_driver);
2432 }
2433 
2434 MODULE_AUTHOR("Brett Russ");
2435 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2436 MODULE_LICENSE("GPL");
2437 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2438 MODULE_VERSION(DRV_VERSION);
2439 
2440 module_param(msi, int, 0444);
2441 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2442 
2443 module_init(mv_init);
2444 module_exit(mv_exit);
2445