xref: /openbmc/linux/drivers/ata/sata_mv.c (revision a3718c1f230240361ed92d3e53342df0ff7efa8c)
1 /*
2  * sata_mv.c - Marvell SATA support
3  *
4  * Copyright 2008: Marvell Corporation, all rights reserved.
5  * Copyright 2005: EMC Corporation, all rights reserved.
6  * Copyright 2005 Red Hat, Inc.  All rights reserved.
7  *
8  * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; version 2 of the License.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
22  *
23  */
24 
25 /*
26   sata_mv TODO list:
27 
28   1) Needs a full errata audit for all chipsets.  I implemented most
29   of the errata workarounds found in the Marvell vendor driver, but
30   I distinctly remember a couple workarounds (one related to PCI-X)
31   are still needed.
32 
33   2) Improve/fix IRQ and error handling sequences.
34 
35   3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
36 
37   4) Think about TCQ support here, and for libata in general
38   with controllers that suppport it via host-queuing hardware
39   (a software-only implementation could be a nightmare).
40 
41   5) Investigate problems with PCI Message Signalled Interrupts (MSI).
42 
43   6) Cache frequently-accessed registers in mv_port_priv to reduce overhead.
44 
45   7) Fix/reenable hot plug/unplug (should happen as a side-effect of (2) above).
46 
47   8) Develop a low-power-consumption strategy, and implement it.
48 
49   9) [Experiment, low priority] See if ATAPI can be supported using
50   "unknown FIS" or "vendor-specific FIS" support, or something creative
51   like that.
52 
53   10) [Experiment, low priority] Investigate interrupt coalescing.
54   Quite often, especially with PCI Message Signalled Interrupts (MSI),
55   the overhead reduced by interrupt mitigation is quite often not
56   worth the latency cost.
57 
58   11) [Experiment, Marvell value added] Is it possible to use target
59   mode to cross-connect two Linux boxes with Marvell cards?  If so,
60   creating LibATA target mode support would be very interesting.
61 
62   Target mode, for those without docs, is the ability to directly
63   connect two SATA controllers.
64 
65 */
66 
67 #include <linux/kernel.h>
68 #include <linux/module.h>
69 #include <linux/pci.h>
70 #include <linux/init.h>
71 #include <linux/blkdev.h>
72 #include <linux/delay.h>
73 #include <linux/interrupt.h>
74 #include <linux/dmapool.h>
75 #include <linux/dma-mapping.h>
76 #include <linux/device.h>
77 #include <linux/platform_device.h>
78 #include <linux/ata_platform.h>
79 #include <linux/mbus.h>
80 #include <scsi/scsi_host.h>
81 #include <scsi/scsi_cmnd.h>
82 #include <scsi/scsi_device.h>
83 #include <linux/libata.h>
84 
85 #define DRV_NAME	"sata_mv"
86 #define DRV_VERSION	"1.20"
87 
88 enum {
89 	/* BAR's are enumerated in terms of pci_resource_start() terms */
90 	MV_PRIMARY_BAR		= 0,	/* offset 0x10: memory space */
91 	MV_IO_BAR		= 2,	/* offset 0x18: IO space */
92 	MV_MISC_BAR		= 3,	/* offset 0x1c: FLASH, NVRAM, SRAM */
93 
94 	MV_MAJOR_REG_AREA_SZ	= 0x10000,	/* 64KB */
95 	MV_MINOR_REG_AREA_SZ	= 0x2000,	/* 8KB */
96 
97 	MV_PCI_REG_BASE		= 0,
98 	MV_IRQ_COAL_REG_BASE	= 0x18000,	/* 6xxx part only */
99 	MV_IRQ_COAL_CAUSE		= (MV_IRQ_COAL_REG_BASE + 0x08),
100 	MV_IRQ_COAL_CAUSE_LO		= (MV_IRQ_COAL_REG_BASE + 0x88),
101 	MV_IRQ_COAL_CAUSE_HI		= (MV_IRQ_COAL_REG_BASE + 0x8c),
102 	MV_IRQ_COAL_THRESHOLD		= (MV_IRQ_COAL_REG_BASE + 0xcc),
103 	MV_IRQ_COAL_TIME_THRESHOLD	= (MV_IRQ_COAL_REG_BASE + 0xd0),
104 
105 	MV_SATAHC0_REG_BASE	= 0x20000,
106 	MV_FLASH_CTL		= 0x1046c,
107 	MV_GPIO_PORT_CTL	= 0x104f0,
108 	MV_RESET_CFG		= 0x180d8,
109 
110 	MV_PCI_REG_SZ		= MV_MAJOR_REG_AREA_SZ,
111 	MV_SATAHC_REG_SZ	= MV_MAJOR_REG_AREA_SZ,
112 	MV_SATAHC_ARBTR_REG_SZ	= MV_MINOR_REG_AREA_SZ,		/* arbiter */
113 	MV_PORT_REG_SZ		= MV_MINOR_REG_AREA_SZ,
114 
115 	MV_MAX_Q_DEPTH		= 32,
116 	MV_MAX_Q_DEPTH_MASK	= MV_MAX_Q_DEPTH - 1,
117 
118 	/* CRQB needs alignment on a 1KB boundary. Size == 1KB
119 	 * CRPB needs alignment on a 256B boundary. Size == 256B
120 	 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
121 	 */
122 	MV_CRQB_Q_SZ		= (32 * MV_MAX_Q_DEPTH),
123 	MV_CRPB_Q_SZ		= (8 * MV_MAX_Q_DEPTH),
124 	MV_MAX_SG_CT		= 256,
125 	MV_SG_TBL_SZ		= (16 * MV_MAX_SG_CT),
126 
127 	/* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
128 	MV_PORT_HC_SHIFT	= 2,
129 	MV_PORTS_PER_HC		= (1 << MV_PORT_HC_SHIFT), /* 4 */
130 	/* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
131 	MV_PORT_MASK		= (MV_PORTS_PER_HC - 1),   /* 3 */
132 
133 	/* Host Flags */
134 	MV_FLAG_DUAL_HC		= (1 << 30),  /* two SATA Host Controllers */
135 	MV_FLAG_IRQ_COALESCE	= (1 << 29),  /* IRQ coalescing capability */
136 	/* SoC integrated controllers, no PCI interface */
137 	MV_FLAG_SOC		= (1 << 28),
138 
139 	MV_COMMON_FLAGS		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
140 				  ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
141 				  ATA_FLAG_PIO_POLLING,
142 	MV_6XXX_FLAGS		= MV_FLAG_IRQ_COALESCE,
143 
144 	CRQB_FLAG_READ		= (1 << 0),
145 	CRQB_TAG_SHIFT		= 1,
146 	CRQB_IOID_SHIFT		= 6,	/* CRQB Gen-II/IIE IO Id shift */
147 	CRQB_PMP_SHIFT		= 12,	/* CRQB Gen-II/IIE PMP shift */
148 	CRQB_HOSTQ_SHIFT	= 17,	/* CRQB Gen-II/IIE HostQueTag shift */
149 	CRQB_CMD_ADDR_SHIFT	= 8,
150 	CRQB_CMD_CS		= (0x2 << 11),
151 	CRQB_CMD_LAST		= (1 << 15),
152 
153 	CRPB_FLAG_STATUS_SHIFT	= 8,
154 	CRPB_IOID_SHIFT_6	= 5,	/* CRPB Gen-II IO Id shift */
155 	CRPB_IOID_SHIFT_7	= 7,	/* CRPB Gen-IIE IO Id shift */
156 
157 	EPRD_FLAG_END_OF_TBL	= (1 << 31),
158 
159 	/* PCI interface registers */
160 
161 	PCI_COMMAND_OFS		= 0xc00,
162 
163 	PCI_MAIN_CMD_STS_OFS	= 0xd30,
164 	STOP_PCI_MASTER		= (1 << 2),
165 	PCI_MASTER_EMPTY	= (1 << 3),
166 	GLOB_SFT_RST		= (1 << 4),
167 
168 	MV_PCI_MODE		= 0xd00,
169 	MV_PCI_EXP_ROM_BAR_CTL	= 0xd2c,
170 	MV_PCI_DISC_TIMER	= 0xd04,
171 	MV_PCI_MSI_TRIGGER	= 0xc38,
172 	MV_PCI_SERR_MASK	= 0xc28,
173 	MV_PCI_XBAR_TMOUT	= 0x1d04,
174 	MV_PCI_ERR_LOW_ADDRESS	= 0x1d40,
175 	MV_PCI_ERR_HIGH_ADDRESS	= 0x1d44,
176 	MV_PCI_ERR_ATTRIBUTE	= 0x1d48,
177 	MV_PCI_ERR_COMMAND	= 0x1d50,
178 
179 	PCI_IRQ_CAUSE_OFS	= 0x1d58,
180 	PCI_IRQ_MASK_OFS	= 0x1d5c,
181 	PCI_UNMASK_ALL_IRQS	= 0x7fffff,	/* bits 22-0 */
182 
183 	PCIE_IRQ_CAUSE_OFS	= 0x1900,
184 	PCIE_IRQ_MASK_OFS	= 0x1910,
185 	PCIE_UNMASK_ALL_IRQS	= 0x40a,	/* assorted bits */
186 
187 	HC_MAIN_IRQ_CAUSE_OFS	= 0x1d60,
188 	HC_MAIN_IRQ_MASK_OFS	= 0x1d64,
189 	HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
190 	HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
191 	ERR_IRQ			= (1 << 0),	/* shift by port # */
192 	DONE_IRQ		= (1 << 1),	/* shift by port # */
193 	HC0_IRQ_PEND		= 0x1ff,	/* bits 0-8 = HC0's ports */
194 	HC_SHIFT		= 9,		/* bits 9-17 = HC1's ports */
195 	PCI_ERR			= (1 << 18),
196 	TRAN_LO_DONE		= (1 << 19),	/* 6xxx: IRQ coalescing */
197 	TRAN_HI_DONE		= (1 << 20),	/* 6xxx: IRQ coalescing */
198 	PORTS_0_3_COAL_DONE	= (1 << 8),
199 	PORTS_4_7_COAL_DONE	= (1 << 17),
200 	PORTS_0_7_COAL_DONE	= (1 << 21),	/* 6xxx: IRQ coalescing */
201 	GPIO_INT		= (1 << 22),
202 	SELF_INT		= (1 << 23),
203 	TWSI_INT		= (1 << 24),
204 	HC_MAIN_RSVD		= (0x7f << 25),	/* bits 31-25 */
205 	HC_MAIN_RSVD_5		= (0x1fff << 19), /* bits 31-19 */
206 	HC_MAIN_RSVD_SOC	= (0x3fffffb << 6),     /* bits 31-9, 7-6 */
207 	HC_MAIN_MASKED_IRQS	= (TRAN_LO_DONE | TRAN_HI_DONE |
208 				   PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
209 				   PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
210 				   HC_MAIN_RSVD),
211 	HC_MAIN_MASKED_IRQS_5	= (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
212 				   HC_MAIN_RSVD_5),
213 	HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
214 
215 	/* SATAHC registers */
216 	HC_CFG_OFS		= 0,
217 
218 	HC_IRQ_CAUSE_OFS	= 0x14,
219 	DMA_IRQ			= (1 << 0),	/* shift by port # */
220 	HC_COAL_IRQ		= (1 << 4),	/* IRQ coalescing */
221 	DEV_IRQ			= (1 << 8),	/* shift by port # */
222 
223 	/* Shadow block registers */
224 	SHD_BLK_OFS		= 0x100,
225 	SHD_CTL_AST_OFS		= 0x20,		/* ofs from SHD_BLK_OFS */
226 
227 	/* SATA registers */
228 	SATA_STATUS_OFS		= 0x300,  /* ctrl, err regs follow status */
229 	SATA_ACTIVE_OFS		= 0x350,
230 	SATA_FIS_IRQ_CAUSE_OFS	= 0x364,
231 
232 	LTMODE_OFS		= 0x30c,
233 	LTMODE_BIT8		= (1 << 8),	/* unknown, but necessary */
234 
235 	PHY_MODE3		= 0x310,
236 	PHY_MODE4		= 0x314,
237 	PHY_MODE2		= 0x330,
238 	SATA_IFCTL_OFS		= 0x344,
239 	SATA_IFSTAT_OFS		= 0x34c,
240 	VENDOR_UNIQUE_FIS_OFS	= 0x35c,
241 
242 	FIS_CFG_OFS		= 0x360,
243 	FIS_CFG_SINGLE_SYNC	= (1 << 16),	/* SYNC on DMA activation */
244 
245 	MV5_PHY_MODE		= 0x74,
246 	MV5_LT_MODE		= 0x30,
247 	MV5_PHY_CTL		= 0x0C,
248 	SATA_INTERFACE_CFG	= 0x050,
249 
250 	MV_M2_PREAMP_MASK	= 0x7e0,
251 
252 	/* Port registers */
253 	EDMA_CFG_OFS		= 0,
254 	EDMA_CFG_Q_DEPTH	= 0x1f,		/* max device queue depth */
255 	EDMA_CFG_NCQ		= (1 << 5),	/* for R/W FPDMA queued */
256 	EDMA_CFG_NCQ_GO_ON_ERR	= (1 << 14),	/* continue on error */
257 	EDMA_CFG_RD_BRST_EXT	= (1 << 11),	/* read burst 512B */
258 	EDMA_CFG_WR_BUFF_LEN	= (1 << 13),	/* write buffer 512B */
259 	EDMA_CFG_EDMA_FBS	= (1 << 16),	/* EDMA FIS-Based Switching */
260 	EDMA_CFG_FBS		= (1 << 26),	/* FIS-Based Switching */
261 
262 	EDMA_ERR_IRQ_CAUSE_OFS	= 0x8,
263 	EDMA_ERR_IRQ_MASK_OFS	= 0xc,
264 	EDMA_ERR_D_PAR		= (1 << 0),	/* UDMA data parity err */
265 	EDMA_ERR_PRD_PAR	= (1 << 1),	/* UDMA PRD parity err */
266 	EDMA_ERR_DEV		= (1 << 2),	/* device error */
267 	EDMA_ERR_DEV_DCON	= (1 << 3),	/* device disconnect */
268 	EDMA_ERR_DEV_CON	= (1 << 4),	/* device connected */
269 	EDMA_ERR_SERR		= (1 << 5),	/* SError bits [WBDST] raised */
270 	EDMA_ERR_SELF_DIS	= (1 << 7),	/* Gen II/IIE self-disable */
271 	EDMA_ERR_SELF_DIS_5	= (1 << 8),	/* Gen I self-disable */
272 	EDMA_ERR_BIST_ASYNC	= (1 << 8),	/* BIST FIS or Async Notify */
273 	EDMA_ERR_TRANS_IRQ_7	= (1 << 8),	/* Gen IIE transprt layer irq */
274 	EDMA_ERR_CRQB_PAR	= (1 << 9),	/* CRQB parity error */
275 	EDMA_ERR_CRPB_PAR	= (1 << 10),	/* CRPB parity error */
276 	EDMA_ERR_INTRL_PAR	= (1 << 11),	/* internal parity error */
277 	EDMA_ERR_IORDY		= (1 << 12),	/* IORdy timeout */
278 
279 	EDMA_ERR_LNK_CTRL_RX	= (0xf << 13),	/* link ctrl rx error */
280 	EDMA_ERR_LNK_CTRL_RX_0	= (1 << 13),	/* transient: CRC err */
281 	EDMA_ERR_LNK_CTRL_RX_1	= (1 << 14),	/* transient: FIFO err */
282 	EDMA_ERR_LNK_CTRL_RX_2	= (1 << 15),	/* fatal: caught SYNC */
283 	EDMA_ERR_LNK_CTRL_RX_3	= (1 << 16),	/* transient: FIS rx err */
284 
285 	EDMA_ERR_LNK_DATA_RX	= (0xf << 17),	/* link data rx error */
286 
287 	EDMA_ERR_LNK_CTRL_TX	= (0x1f << 21),	/* link ctrl tx error */
288 	EDMA_ERR_LNK_CTRL_TX_0	= (1 << 21),	/* transient: CRC err */
289 	EDMA_ERR_LNK_CTRL_TX_1	= (1 << 22),	/* transient: FIFO err */
290 	EDMA_ERR_LNK_CTRL_TX_2	= (1 << 23),	/* transient: caught SYNC */
291 	EDMA_ERR_LNK_CTRL_TX_3	= (1 << 24),	/* transient: caught DMAT */
292 	EDMA_ERR_LNK_CTRL_TX_4	= (1 << 25),	/* transient: FIS collision */
293 
294 	EDMA_ERR_LNK_DATA_TX	= (0x1f << 26),	/* link data tx error */
295 
296 	EDMA_ERR_TRANS_PROTO	= (1 << 31),	/* transport protocol error */
297 	EDMA_ERR_OVERRUN_5	= (1 << 5),
298 	EDMA_ERR_UNDERRUN_5	= (1 << 6),
299 
300 	EDMA_ERR_IRQ_TRANSIENT  = EDMA_ERR_LNK_CTRL_RX_0 |
301 				  EDMA_ERR_LNK_CTRL_RX_1 |
302 				  EDMA_ERR_LNK_CTRL_RX_3 |
303 				  EDMA_ERR_LNK_CTRL_TX |
304 				 /* temporary, until we fix hotplug: */
305 				 (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON),
306 
307 	EDMA_EH_FREEZE		= EDMA_ERR_D_PAR |
308 				  EDMA_ERR_PRD_PAR |
309 				  EDMA_ERR_DEV_DCON |
310 				  EDMA_ERR_DEV_CON |
311 				  EDMA_ERR_SERR |
312 				  EDMA_ERR_SELF_DIS |
313 				  EDMA_ERR_CRQB_PAR |
314 				  EDMA_ERR_CRPB_PAR |
315 				  EDMA_ERR_INTRL_PAR |
316 				  EDMA_ERR_IORDY |
317 				  EDMA_ERR_LNK_CTRL_RX_2 |
318 				  EDMA_ERR_LNK_DATA_RX |
319 				  EDMA_ERR_LNK_DATA_TX |
320 				  EDMA_ERR_TRANS_PROTO,
321 
322 	EDMA_EH_FREEZE_5	= EDMA_ERR_D_PAR |
323 				  EDMA_ERR_PRD_PAR |
324 				  EDMA_ERR_DEV_DCON |
325 				  EDMA_ERR_DEV_CON |
326 				  EDMA_ERR_OVERRUN_5 |
327 				  EDMA_ERR_UNDERRUN_5 |
328 				  EDMA_ERR_SELF_DIS_5 |
329 				  EDMA_ERR_CRQB_PAR |
330 				  EDMA_ERR_CRPB_PAR |
331 				  EDMA_ERR_INTRL_PAR |
332 				  EDMA_ERR_IORDY,
333 
334 	EDMA_REQ_Q_BASE_HI_OFS	= 0x10,
335 	EDMA_REQ_Q_IN_PTR_OFS	= 0x14,		/* also contains BASE_LO */
336 
337 	EDMA_REQ_Q_OUT_PTR_OFS	= 0x18,
338 	EDMA_REQ_Q_PTR_SHIFT	= 5,
339 
340 	EDMA_RSP_Q_BASE_HI_OFS	= 0x1c,
341 	EDMA_RSP_Q_IN_PTR_OFS	= 0x20,
342 	EDMA_RSP_Q_OUT_PTR_OFS	= 0x24,		/* also contains BASE_LO */
343 	EDMA_RSP_Q_PTR_SHIFT	= 3,
344 
345 	EDMA_CMD_OFS		= 0x28,		/* EDMA command register */
346 	EDMA_EN			= (1 << 0),	/* enable EDMA */
347 	EDMA_DS			= (1 << 1),	/* disable EDMA; self-negated */
348 	ATA_RST			= (1 << 2),	/* reset trans/link/phy */
349 
350 	EDMA_IORDY_TMOUT	= 0x34,
351 	EDMA_ARB_CFG		= 0x38,
352 
353 	GEN_II_NCQ_MAX_SECTORS	= 256,		/* max sects/io on Gen2 w/NCQ */
354 
355 	/* Host private flags (hp_flags) */
356 	MV_HP_FLAG_MSI		= (1 << 0),
357 	MV_HP_ERRATA_50XXB0	= (1 << 1),
358 	MV_HP_ERRATA_50XXB2	= (1 << 2),
359 	MV_HP_ERRATA_60X1B2	= (1 << 3),
360 	MV_HP_ERRATA_60X1C0	= (1 << 4),
361 	MV_HP_ERRATA_XX42A0	= (1 << 5),
362 	MV_HP_GEN_I		= (1 << 6),	/* Generation I: 50xx */
363 	MV_HP_GEN_II		= (1 << 7),	/* Generation II: 60xx */
364 	MV_HP_GEN_IIE		= (1 << 8),	/* Generation IIE: 6042/7042 */
365 	MV_HP_PCIE		= (1 << 9),	/* PCIe bus/regs: 7042 */
366 
367 	/* Port private flags (pp_flags) */
368 	MV_PP_FLAG_EDMA_EN	= (1 << 0),	/* is EDMA engine enabled? */
369 	MV_PP_FLAG_NCQ_EN	= (1 << 1),	/* is EDMA set up for NCQ? */
370 };
371 
372 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
373 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
374 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
375 #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
376 
377 #define WINDOW_CTRL(i)		(0x20030 + ((i) << 4))
378 #define WINDOW_BASE(i)		(0x20034 + ((i) << 4))
379 
380 enum {
381 	/* DMA boundary 0xffff is required by the s/g splitting
382 	 * we need on /length/ in mv_fill-sg().
383 	 */
384 	MV_DMA_BOUNDARY		= 0xffffU,
385 
386 	/* mask of register bits containing lower 32 bits
387 	 * of EDMA request queue DMA address
388 	 */
389 	EDMA_REQ_Q_BASE_LO_MASK	= 0xfffffc00U,
390 
391 	/* ditto, for response queue */
392 	EDMA_RSP_Q_BASE_LO_MASK	= 0xffffff00U,
393 };
394 
395 enum chip_type {
396 	chip_504x,
397 	chip_508x,
398 	chip_5080,
399 	chip_604x,
400 	chip_608x,
401 	chip_6042,
402 	chip_7042,
403 	chip_soc,
404 };
405 
406 /* Command ReQuest Block: 32B */
407 struct mv_crqb {
408 	__le32			sg_addr;
409 	__le32			sg_addr_hi;
410 	__le16			ctrl_flags;
411 	__le16			ata_cmd[11];
412 };
413 
414 struct mv_crqb_iie {
415 	__le32			addr;
416 	__le32			addr_hi;
417 	__le32			flags;
418 	__le32			len;
419 	__le32			ata_cmd[4];
420 };
421 
422 /* Command ResPonse Block: 8B */
423 struct mv_crpb {
424 	__le16			id;
425 	__le16			flags;
426 	__le32			tmstmp;
427 };
428 
429 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
430 struct mv_sg {
431 	__le32			addr;
432 	__le32			flags_size;
433 	__le32			addr_hi;
434 	__le32			reserved;
435 };
436 
437 struct mv_port_priv {
438 	struct mv_crqb		*crqb;
439 	dma_addr_t		crqb_dma;
440 	struct mv_crpb		*crpb;
441 	dma_addr_t		crpb_dma;
442 	struct mv_sg		*sg_tbl[MV_MAX_Q_DEPTH];
443 	dma_addr_t		sg_tbl_dma[MV_MAX_Q_DEPTH];
444 
445 	unsigned int		req_idx;
446 	unsigned int		resp_idx;
447 
448 	u32			pp_flags;
449 };
450 
451 struct mv_port_signal {
452 	u32			amps;
453 	u32			pre;
454 };
455 
456 struct mv_host_priv {
457 	u32			hp_flags;
458 	struct mv_port_signal	signal[8];
459 	const struct mv_hw_ops	*ops;
460 	int			n_ports;
461 	void __iomem		*base;
462 	void __iomem		*main_cause_reg_addr;
463 	void __iomem		*main_mask_reg_addr;
464 	u32			irq_cause_ofs;
465 	u32			irq_mask_ofs;
466 	u32			unmask_all_irqs;
467 	/*
468 	 * These consistent DMA memory pools give us guaranteed
469 	 * alignment for hardware-accessed data structures,
470 	 * and less memory waste in accomplishing the alignment.
471 	 */
472 	struct dma_pool		*crqb_pool;
473 	struct dma_pool		*crpb_pool;
474 	struct dma_pool		*sg_tbl_pool;
475 };
476 
477 struct mv_hw_ops {
478 	void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
479 			   unsigned int port);
480 	void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
481 	void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
482 			   void __iomem *mmio);
483 	int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
484 			unsigned int n_hc);
485 	void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
486 	void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
487 };
488 
489 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
490 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
491 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
492 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
493 static int mv_port_start(struct ata_port *ap);
494 static void mv_port_stop(struct ata_port *ap);
495 static void mv_qc_prep(struct ata_queued_cmd *qc);
496 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
497 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
498 static int mv_hardreset(struct ata_link *link, unsigned int *class,
499 			unsigned long deadline);
500 static void mv_eh_freeze(struct ata_port *ap);
501 static void mv_eh_thaw(struct ata_port *ap);
502 static void mv6_dev_config(struct ata_device *dev);
503 
504 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
505 			   unsigned int port);
506 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
507 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
508 			   void __iomem *mmio);
509 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
510 			unsigned int n_hc);
511 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
512 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
513 
514 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
515 			   unsigned int port);
516 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
517 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
518 			   void __iomem *mmio);
519 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
520 			unsigned int n_hc);
521 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
522 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
523 				      void __iomem *mmio);
524 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
525 				      void __iomem *mmio);
526 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
527 				  void __iomem *mmio, unsigned int n_hc);
528 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
529 				      void __iomem *mmio);
530 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
531 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
532 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
533 			     unsigned int port_no);
534 static int mv_stop_edma(struct ata_port *ap);
535 static int mv_stop_edma_engine(void __iomem *port_mmio);
536 static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
537 
538 static void mv_pmp_select(struct ata_port *ap, int pmp);
539 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
540 				unsigned long deadline);
541 static int  mv_softreset(struct ata_link *link, unsigned int *class,
542 				unsigned long deadline);
543 
544 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
545  * because we have to allow room for worst case splitting of
546  * PRDs for 64K boundaries in mv_fill_sg().
547  */
548 static struct scsi_host_template mv5_sht = {
549 	ATA_BASE_SHT(DRV_NAME),
550 	.sg_tablesize		= MV_MAX_SG_CT / 2,
551 	.dma_boundary		= MV_DMA_BOUNDARY,
552 };
553 
554 static struct scsi_host_template mv6_sht = {
555 	ATA_NCQ_SHT(DRV_NAME),
556 	.can_queue		= MV_MAX_Q_DEPTH - 1,
557 	.sg_tablesize		= MV_MAX_SG_CT / 2,
558 	.dma_boundary		= MV_DMA_BOUNDARY,
559 };
560 
561 static struct ata_port_operations mv5_ops = {
562 	.inherits		= &ata_sff_port_ops,
563 
564 	.qc_prep		= mv_qc_prep,
565 	.qc_issue		= mv_qc_issue,
566 
567 	.freeze			= mv_eh_freeze,
568 	.thaw			= mv_eh_thaw,
569 	.hardreset		= mv_hardreset,
570 	.error_handler		= ata_std_error_handler, /* avoid SFF EH */
571 	.post_internal_cmd	= ATA_OP_NULL,
572 
573 	.scr_read		= mv5_scr_read,
574 	.scr_write		= mv5_scr_write,
575 
576 	.port_start		= mv_port_start,
577 	.port_stop		= mv_port_stop,
578 };
579 
580 static struct ata_port_operations mv6_ops = {
581 	.inherits		= &mv5_ops,
582 	.qc_defer		= sata_pmp_qc_defer_cmd_switch,
583 	.dev_config             = mv6_dev_config,
584 	.scr_read		= mv_scr_read,
585 	.scr_write		= mv_scr_write,
586 
587 	.pmp_hardreset		= mv_pmp_hardreset,
588 	.pmp_softreset		= mv_softreset,
589 	.softreset		= mv_softreset,
590 	.error_handler		= sata_pmp_error_handler,
591 };
592 
593 static struct ata_port_operations mv_iie_ops = {
594 	.inherits		= &mv6_ops,
595 	.qc_defer		= ata_std_qc_defer, /* FIS-based switching */
596 	.dev_config		= ATA_OP_NULL,
597 	.qc_prep		= mv_qc_prep_iie,
598 };
599 
600 static const struct ata_port_info mv_port_info[] = {
601 	{  /* chip_504x */
602 		.flags		= MV_COMMON_FLAGS,
603 		.pio_mask	= 0x1f,	/* pio0-4 */
604 		.udma_mask	= ATA_UDMA6,
605 		.port_ops	= &mv5_ops,
606 	},
607 	{  /* chip_508x */
608 		.flags		= MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
609 		.pio_mask	= 0x1f,	/* pio0-4 */
610 		.udma_mask	= ATA_UDMA6,
611 		.port_ops	= &mv5_ops,
612 	},
613 	{  /* chip_5080 */
614 		.flags		= MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
615 		.pio_mask	= 0x1f,	/* pio0-4 */
616 		.udma_mask	= ATA_UDMA6,
617 		.port_ops	= &mv5_ops,
618 	},
619 	{  /* chip_604x */
620 		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS |
621 				  ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
622 				  ATA_FLAG_NCQ,
623 		.pio_mask	= 0x1f,	/* pio0-4 */
624 		.udma_mask	= ATA_UDMA6,
625 		.port_ops	= &mv6_ops,
626 	},
627 	{  /* chip_608x */
628 		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS |
629 				  ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
630 				  ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
631 		.pio_mask	= 0x1f,	/* pio0-4 */
632 		.udma_mask	= ATA_UDMA6,
633 		.port_ops	= &mv6_ops,
634 	},
635 	{  /* chip_6042 */
636 		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS |
637 				  ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
638 				  ATA_FLAG_NCQ,
639 		.pio_mask	= 0x1f,	/* pio0-4 */
640 		.udma_mask	= ATA_UDMA6,
641 		.port_ops	= &mv_iie_ops,
642 	},
643 	{  /* chip_7042 */
644 		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS |
645 				  ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
646 				  ATA_FLAG_NCQ,
647 		.pio_mask	= 0x1f,	/* pio0-4 */
648 		.udma_mask	= ATA_UDMA6,
649 		.port_ops	= &mv_iie_ops,
650 	},
651 	{  /* chip_soc */
652 		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS |
653 				  ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
654 				  ATA_FLAG_NCQ | MV_FLAG_SOC,
655 		.pio_mask	= 0x1f,	/* pio0-4 */
656 		.udma_mask	= ATA_UDMA6,
657 		.port_ops	= &mv_iie_ops,
658 	},
659 };
660 
661 static const struct pci_device_id mv_pci_tbl[] = {
662 	{ PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
663 	{ PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
664 	{ PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
665 	{ PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
666 	/* RocketRAID 1740/174x have different identifiers */
667 	{ PCI_VDEVICE(TTI, 0x1740), chip_508x },
668 	{ PCI_VDEVICE(TTI, 0x1742), chip_508x },
669 
670 	{ PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
671 	{ PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
672 	{ PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
673 	{ PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
674 	{ PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
675 
676 	{ PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
677 
678 	/* Adaptec 1430SA */
679 	{ PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
680 
681 	/* Marvell 7042 support */
682 	{ PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
683 
684 	/* Highpoint RocketRAID PCIe series */
685 	{ PCI_VDEVICE(TTI, 0x2300), chip_7042 },
686 	{ PCI_VDEVICE(TTI, 0x2310), chip_7042 },
687 
688 	{ }			/* terminate list */
689 };
690 
691 static const struct mv_hw_ops mv5xxx_ops = {
692 	.phy_errata		= mv5_phy_errata,
693 	.enable_leds		= mv5_enable_leds,
694 	.read_preamp		= mv5_read_preamp,
695 	.reset_hc		= mv5_reset_hc,
696 	.reset_flash		= mv5_reset_flash,
697 	.reset_bus		= mv5_reset_bus,
698 };
699 
700 static const struct mv_hw_ops mv6xxx_ops = {
701 	.phy_errata		= mv6_phy_errata,
702 	.enable_leds		= mv6_enable_leds,
703 	.read_preamp		= mv6_read_preamp,
704 	.reset_hc		= mv6_reset_hc,
705 	.reset_flash		= mv6_reset_flash,
706 	.reset_bus		= mv_reset_pci_bus,
707 };
708 
709 static const struct mv_hw_ops mv_soc_ops = {
710 	.phy_errata		= mv6_phy_errata,
711 	.enable_leds		= mv_soc_enable_leds,
712 	.read_preamp		= mv_soc_read_preamp,
713 	.reset_hc		= mv_soc_reset_hc,
714 	.reset_flash		= mv_soc_reset_flash,
715 	.reset_bus		= mv_soc_reset_bus,
716 };
717 
718 /*
719  * Functions
720  */
721 
722 static inline void writelfl(unsigned long data, void __iomem *addr)
723 {
724 	writel(data, addr);
725 	(void) readl(addr);	/* flush to avoid PCI posted write */
726 }
727 
728 static inline unsigned int mv_hc_from_port(unsigned int port)
729 {
730 	return port >> MV_PORT_HC_SHIFT;
731 }
732 
733 static inline unsigned int mv_hardport_from_port(unsigned int port)
734 {
735 	return port & MV_PORT_MASK;
736 }
737 
738 /*
739  * Consolidate some rather tricky bit shift calculations.
740  * This is hot-path stuff, so not a function.
741  * Simple code, with two return values, so macro rather than inline.
742  *
743  * port is the sole input, in range 0..7.
744  * shift is one output, for use with the main_cause and main_mask registers.
745  * hardport is the other output, in range 0..3
746  *
747  * Note that port and hardport may be the same variable in some cases.
748  */
749 #define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport)	\
750 {								\
751 	shift    = mv_hc_from_port(port) * HC_SHIFT;		\
752 	hardport = mv_hardport_from_port(port);			\
753 	shift   += hardport * 2;				\
754 }
755 
756 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
757 {
758 	return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
759 }
760 
761 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
762 						 unsigned int port)
763 {
764 	return mv_hc_base(base, mv_hc_from_port(port));
765 }
766 
767 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
768 {
769 	return  mv_hc_base_from_port(base, port) +
770 		MV_SATAHC_ARBTR_REG_SZ +
771 		(mv_hardport_from_port(port) * MV_PORT_REG_SZ);
772 }
773 
774 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
775 {
776 	void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
777 	unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
778 
779 	return hc_mmio + ofs;
780 }
781 
782 static inline void __iomem *mv_host_base(struct ata_host *host)
783 {
784 	struct mv_host_priv *hpriv = host->private_data;
785 	return hpriv->base;
786 }
787 
788 static inline void __iomem *mv_ap_base(struct ata_port *ap)
789 {
790 	return mv_port_base(mv_host_base(ap->host), ap->port_no);
791 }
792 
793 static inline int mv_get_hc_count(unsigned long port_flags)
794 {
795 	return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
796 }
797 
798 static void mv_set_edma_ptrs(void __iomem *port_mmio,
799 			     struct mv_host_priv *hpriv,
800 			     struct mv_port_priv *pp)
801 {
802 	u32 index;
803 
804 	/*
805 	 * initialize request queue
806 	 */
807 	pp->req_idx &= MV_MAX_Q_DEPTH_MASK;	/* paranoia */
808 	index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
809 
810 	WARN_ON(pp->crqb_dma & 0x3ff);
811 	writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
812 	writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
813 		 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
814 
815 	if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
816 		writelfl((pp->crqb_dma & 0xffffffff) | index,
817 			 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
818 	else
819 		writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
820 
821 	/*
822 	 * initialize response queue
823 	 */
824 	pp->resp_idx &= MV_MAX_Q_DEPTH_MASK;	/* paranoia */
825 	index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
826 
827 	WARN_ON(pp->crpb_dma & 0xff);
828 	writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
829 
830 	if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
831 		writelfl((pp->crpb_dma & 0xffffffff) | index,
832 			 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
833 	else
834 		writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
835 
836 	writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
837 		 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
838 }
839 
840 /**
841  *      mv_start_dma - Enable eDMA engine
842  *      @base: port base address
843  *      @pp: port private data
844  *
845  *      Verify the local cache of the eDMA state is accurate with a
846  *      WARN_ON.
847  *
848  *      LOCKING:
849  *      Inherited from caller.
850  */
851 static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
852 			 struct mv_port_priv *pp, u8 protocol)
853 {
854 	int want_ncq = (protocol == ATA_PROT_NCQ);
855 
856 	if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
857 		int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
858 		if (want_ncq != using_ncq)
859 			mv_stop_edma(ap);
860 	}
861 	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
862 		struct mv_host_priv *hpriv = ap->host->private_data;
863 		int hardport = mv_hardport_from_port(ap->port_no);
864 		void __iomem *hc_mmio = mv_hc_base_from_port(
865 					mv_host_base(ap->host), hardport);
866 		u32 hc_irq_cause, ipending;
867 
868 		/* clear EDMA event indicators, if any */
869 		writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
870 
871 		/* clear EDMA interrupt indicator, if any */
872 		hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
873 		ipending = (DEV_IRQ | DMA_IRQ) << hardport;
874 		if (hc_irq_cause & ipending) {
875 			writelfl(hc_irq_cause & ~ipending,
876 				 hc_mmio + HC_IRQ_CAUSE_OFS);
877 		}
878 
879 		mv_edma_cfg(ap, want_ncq);
880 
881 		/* clear FIS IRQ Cause */
882 		writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
883 
884 		mv_set_edma_ptrs(port_mmio, hpriv, pp);
885 
886 		writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
887 		pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
888 	}
889 }
890 
891 /**
892  *      mv_stop_edma_engine - Disable eDMA engine
893  *      @port_mmio: io base address
894  *
895  *      LOCKING:
896  *      Inherited from caller.
897  */
898 static int mv_stop_edma_engine(void __iomem *port_mmio)
899 {
900 	int i;
901 
902 	/* Disable eDMA.  The disable bit auto clears. */
903 	writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
904 
905 	/* Wait for the chip to confirm eDMA is off. */
906 	for (i = 10000; i > 0; i--) {
907 		u32 reg = readl(port_mmio + EDMA_CMD_OFS);
908 		if (!(reg & EDMA_EN))
909 			return 0;
910 		udelay(10);
911 	}
912 	return -EIO;
913 }
914 
915 static int mv_stop_edma(struct ata_port *ap)
916 {
917 	void __iomem *port_mmio = mv_ap_base(ap);
918 	struct mv_port_priv *pp = ap->private_data;
919 
920 	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
921 		return 0;
922 	pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
923 	if (mv_stop_edma_engine(port_mmio)) {
924 		ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
925 		return -EIO;
926 	}
927 	return 0;
928 }
929 
930 #ifdef ATA_DEBUG
931 static void mv_dump_mem(void __iomem *start, unsigned bytes)
932 {
933 	int b, w;
934 	for (b = 0; b < bytes; ) {
935 		DPRINTK("%p: ", start + b);
936 		for (w = 0; b < bytes && w < 4; w++) {
937 			printk("%08x ", readl(start + b));
938 			b += sizeof(u32);
939 		}
940 		printk("\n");
941 	}
942 }
943 #endif
944 
945 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
946 {
947 #ifdef ATA_DEBUG
948 	int b, w;
949 	u32 dw;
950 	for (b = 0; b < bytes; ) {
951 		DPRINTK("%02x: ", b);
952 		for (w = 0; b < bytes && w < 4; w++) {
953 			(void) pci_read_config_dword(pdev, b, &dw);
954 			printk("%08x ", dw);
955 			b += sizeof(u32);
956 		}
957 		printk("\n");
958 	}
959 #endif
960 }
961 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
962 			     struct pci_dev *pdev)
963 {
964 #ifdef ATA_DEBUG
965 	void __iomem *hc_base = mv_hc_base(mmio_base,
966 					   port >> MV_PORT_HC_SHIFT);
967 	void __iomem *port_base;
968 	int start_port, num_ports, p, start_hc, num_hcs, hc;
969 
970 	if (0 > port) {
971 		start_hc = start_port = 0;
972 		num_ports = 8;		/* shld be benign for 4 port devs */
973 		num_hcs = 2;
974 	} else {
975 		start_hc = port >> MV_PORT_HC_SHIFT;
976 		start_port = port;
977 		num_ports = num_hcs = 1;
978 	}
979 	DPRINTK("All registers for port(s) %u-%u:\n", start_port,
980 		num_ports > 1 ? num_ports - 1 : start_port);
981 
982 	if (NULL != pdev) {
983 		DPRINTK("PCI config space regs:\n");
984 		mv_dump_pci_cfg(pdev, 0x68);
985 	}
986 	DPRINTK("PCI regs:\n");
987 	mv_dump_mem(mmio_base+0xc00, 0x3c);
988 	mv_dump_mem(mmio_base+0xd00, 0x34);
989 	mv_dump_mem(mmio_base+0xf00, 0x4);
990 	mv_dump_mem(mmio_base+0x1d00, 0x6c);
991 	for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
992 		hc_base = mv_hc_base(mmio_base, hc);
993 		DPRINTK("HC regs (HC %i):\n", hc);
994 		mv_dump_mem(hc_base, 0x1c);
995 	}
996 	for (p = start_port; p < start_port + num_ports; p++) {
997 		port_base = mv_port_base(mmio_base, p);
998 		DPRINTK("EDMA regs (port %i):\n", p);
999 		mv_dump_mem(port_base, 0x54);
1000 		DPRINTK("SATA regs (port %i):\n", p);
1001 		mv_dump_mem(port_base+0x300, 0x60);
1002 	}
1003 #endif
1004 }
1005 
1006 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1007 {
1008 	unsigned int ofs;
1009 
1010 	switch (sc_reg_in) {
1011 	case SCR_STATUS:
1012 	case SCR_CONTROL:
1013 	case SCR_ERROR:
1014 		ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1015 		break;
1016 	case SCR_ACTIVE:
1017 		ofs = SATA_ACTIVE_OFS;   /* active is not with the others */
1018 		break;
1019 	default:
1020 		ofs = 0xffffffffU;
1021 		break;
1022 	}
1023 	return ofs;
1024 }
1025 
1026 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1027 {
1028 	unsigned int ofs = mv_scr_offset(sc_reg_in);
1029 
1030 	if (ofs != 0xffffffffU) {
1031 		*val = readl(mv_ap_base(ap) + ofs);
1032 		return 0;
1033 	} else
1034 		return -EINVAL;
1035 }
1036 
1037 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1038 {
1039 	unsigned int ofs = mv_scr_offset(sc_reg_in);
1040 
1041 	if (ofs != 0xffffffffU) {
1042 		writelfl(val, mv_ap_base(ap) + ofs);
1043 		return 0;
1044 	} else
1045 		return -EINVAL;
1046 }
1047 
1048 static void mv6_dev_config(struct ata_device *adev)
1049 {
1050 	/*
1051 	 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1052 	 *
1053 	 * Gen-II does not support NCQ over a port multiplier
1054 	 *  (no FIS-based switching).
1055 	 *
1056 	 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1057 	 * See mv_qc_prep() for more info.
1058 	 */
1059 	if (adev->flags & ATA_DFLAG_NCQ) {
1060 		if (sata_pmp_attached(adev->link->ap)) {
1061 			adev->flags &= ~ATA_DFLAG_NCQ;
1062 			ata_dev_printk(adev, KERN_INFO,
1063 				"NCQ disabled for command-based switching\n");
1064 		} else if (adev->max_sectors > GEN_II_NCQ_MAX_SECTORS) {
1065 			adev->max_sectors = GEN_II_NCQ_MAX_SECTORS;
1066 			ata_dev_printk(adev, KERN_INFO,
1067 				"max_sectors limited to %u for NCQ\n",
1068 				adev->max_sectors);
1069 		}
1070 	}
1071 }
1072 
1073 static void mv_config_fbs(void __iomem *port_mmio, int enable_fbs)
1074 {
1075 	u32 old_fcfg, new_fcfg, old_ltmode, new_ltmode;
1076 	/*
1077 	 * Various bit settings required for operation
1078 	 * in FIS-based switching (fbs) mode on GenIIe:
1079 	 */
1080 	old_fcfg   = readl(port_mmio + FIS_CFG_OFS);
1081 	old_ltmode = readl(port_mmio + LTMODE_OFS);
1082 	if (enable_fbs) {
1083 		new_fcfg   = old_fcfg   |  FIS_CFG_SINGLE_SYNC;
1084 		new_ltmode = old_ltmode |  LTMODE_BIT8;
1085 	} else { /* disable fbs */
1086 		new_fcfg   = old_fcfg   & ~FIS_CFG_SINGLE_SYNC;
1087 		new_ltmode = old_ltmode & ~LTMODE_BIT8;
1088 	}
1089 	if (new_fcfg != old_fcfg)
1090 		writelfl(new_fcfg, port_mmio + FIS_CFG_OFS);
1091 	if (new_ltmode != old_ltmode)
1092 		writelfl(new_ltmode, port_mmio + LTMODE_OFS);
1093 }
1094 
1095 static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
1096 {
1097 	u32 cfg;
1098 	struct mv_port_priv *pp    = ap->private_data;
1099 	struct mv_host_priv *hpriv = ap->host->private_data;
1100 	void __iomem *port_mmio    = mv_ap_base(ap);
1101 
1102 	/* set up non-NCQ EDMA configuration */
1103 	cfg = EDMA_CFG_Q_DEPTH;		/* always 0x1f for *all* chips */
1104 
1105 	if (IS_GEN_I(hpriv))
1106 		cfg |= (1 << 8);	/* enab config burst size mask */
1107 
1108 	else if (IS_GEN_II(hpriv))
1109 		cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1110 
1111 	else if (IS_GEN_IIE(hpriv)) {
1112 		cfg |= (1 << 23);	/* do not mask PM field in rx'd FIS */
1113 		cfg |= (1 << 22);	/* enab 4-entry host queue cache */
1114 		cfg |= (1 << 18);	/* enab early completion */
1115 		cfg |= (1 << 17);	/* enab cut-through (dis stor&forwrd) */
1116 
1117 		if (want_ncq && sata_pmp_attached(ap)) {
1118 			cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1119 			mv_config_fbs(port_mmio, 1);
1120 		} else {
1121 			mv_config_fbs(port_mmio, 0);
1122 		}
1123 	}
1124 
1125 	if (want_ncq) {
1126 		cfg |= EDMA_CFG_NCQ;
1127 		pp->pp_flags |=  MV_PP_FLAG_NCQ_EN;
1128 	} else
1129 		pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1130 
1131 	writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1132 }
1133 
1134 static void mv_port_free_dma_mem(struct ata_port *ap)
1135 {
1136 	struct mv_host_priv *hpriv = ap->host->private_data;
1137 	struct mv_port_priv *pp = ap->private_data;
1138 	int tag;
1139 
1140 	if (pp->crqb) {
1141 		dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1142 		pp->crqb = NULL;
1143 	}
1144 	if (pp->crpb) {
1145 		dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1146 		pp->crpb = NULL;
1147 	}
1148 	/*
1149 	 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1150 	 * For later hardware, we have one unique sg_tbl per NCQ tag.
1151 	 */
1152 	for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1153 		if (pp->sg_tbl[tag]) {
1154 			if (tag == 0 || !IS_GEN_I(hpriv))
1155 				dma_pool_free(hpriv->sg_tbl_pool,
1156 					      pp->sg_tbl[tag],
1157 					      pp->sg_tbl_dma[tag]);
1158 			pp->sg_tbl[tag] = NULL;
1159 		}
1160 	}
1161 }
1162 
1163 /**
1164  *      mv_port_start - Port specific init/start routine.
1165  *      @ap: ATA channel to manipulate
1166  *
1167  *      Allocate and point to DMA memory, init port private memory,
1168  *      zero indices.
1169  *
1170  *      LOCKING:
1171  *      Inherited from caller.
1172  */
1173 static int mv_port_start(struct ata_port *ap)
1174 {
1175 	struct device *dev = ap->host->dev;
1176 	struct mv_host_priv *hpriv = ap->host->private_data;
1177 	struct mv_port_priv *pp;
1178 	int tag;
1179 
1180 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1181 	if (!pp)
1182 		return -ENOMEM;
1183 	ap->private_data = pp;
1184 
1185 	pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1186 	if (!pp->crqb)
1187 		return -ENOMEM;
1188 	memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1189 
1190 	pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1191 	if (!pp->crpb)
1192 		goto out_port_free_dma_mem;
1193 	memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1194 
1195 	/*
1196 	 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1197 	 * For later hardware, we need one unique sg_tbl per NCQ tag.
1198 	 */
1199 	for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1200 		if (tag == 0 || !IS_GEN_I(hpriv)) {
1201 			pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1202 					      GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1203 			if (!pp->sg_tbl[tag])
1204 				goto out_port_free_dma_mem;
1205 		} else {
1206 			pp->sg_tbl[tag]     = pp->sg_tbl[0];
1207 			pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1208 		}
1209 	}
1210 	return 0;
1211 
1212 out_port_free_dma_mem:
1213 	mv_port_free_dma_mem(ap);
1214 	return -ENOMEM;
1215 }
1216 
1217 /**
1218  *      mv_port_stop - Port specific cleanup/stop routine.
1219  *      @ap: ATA channel to manipulate
1220  *
1221  *      Stop DMA, cleanup port memory.
1222  *
1223  *      LOCKING:
1224  *      This routine uses the host lock to protect the DMA stop.
1225  */
1226 static void mv_port_stop(struct ata_port *ap)
1227 {
1228 	mv_stop_edma(ap);
1229 	mv_port_free_dma_mem(ap);
1230 }
1231 
1232 /**
1233  *      mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1234  *      @qc: queued command whose SG list to source from
1235  *
1236  *      Populate the SG list and mark the last entry.
1237  *
1238  *      LOCKING:
1239  *      Inherited from caller.
1240  */
1241 static void mv_fill_sg(struct ata_queued_cmd *qc)
1242 {
1243 	struct mv_port_priv *pp = qc->ap->private_data;
1244 	struct scatterlist *sg;
1245 	struct mv_sg *mv_sg, *last_sg = NULL;
1246 	unsigned int si;
1247 
1248 	mv_sg = pp->sg_tbl[qc->tag];
1249 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
1250 		dma_addr_t addr = sg_dma_address(sg);
1251 		u32 sg_len = sg_dma_len(sg);
1252 
1253 		while (sg_len) {
1254 			u32 offset = addr & 0xffff;
1255 			u32 len = sg_len;
1256 
1257 			if ((offset + sg_len > 0x10000))
1258 				len = 0x10000 - offset;
1259 
1260 			mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1261 			mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1262 			mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1263 
1264 			sg_len -= len;
1265 			addr += len;
1266 
1267 			last_sg = mv_sg;
1268 			mv_sg++;
1269 		}
1270 	}
1271 
1272 	if (likely(last_sg))
1273 		last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1274 }
1275 
1276 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1277 {
1278 	u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1279 		(last ? CRQB_CMD_LAST : 0);
1280 	*cmdw = cpu_to_le16(tmp);
1281 }
1282 
1283 /**
1284  *      mv_qc_prep - Host specific command preparation.
1285  *      @qc: queued command to prepare
1286  *
1287  *      This routine simply redirects to the general purpose routine
1288  *      if command is not DMA.  Else, it handles prep of the CRQB
1289  *      (command request block), does some sanity checking, and calls
1290  *      the SG load routine.
1291  *
1292  *      LOCKING:
1293  *      Inherited from caller.
1294  */
1295 static void mv_qc_prep(struct ata_queued_cmd *qc)
1296 {
1297 	struct ata_port *ap = qc->ap;
1298 	struct mv_port_priv *pp = ap->private_data;
1299 	__le16 *cw;
1300 	struct ata_taskfile *tf;
1301 	u16 flags = 0;
1302 	unsigned in_index;
1303 
1304 	if ((qc->tf.protocol != ATA_PROT_DMA) &&
1305 	    (qc->tf.protocol != ATA_PROT_NCQ))
1306 		return;
1307 
1308 	/* Fill in command request block
1309 	 */
1310 	if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1311 		flags |= CRQB_FLAG_READ;
1312 	WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1313 	flags |= qc->tag << CRQB_TAG_SHIFT;
1314 	flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
1315 
1316 	/* get current queue index from software */
1317 	in_index = pp->req_idx;
1318 
1319 	pp->crqb[in_index].sg_addr =
1320 		cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1321 	pp->crqb[in_index].sg_addr_hi =
1322 		cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1323 	pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1324 
1325 	cw = &pp->crqb[in_index].ata_cmd[0];
1326 	tf = &qc->tf;
1327 
1328 	/* Sadly, the CRQB cannot accomodate all registers--there are
1329 	 * only 11 bytes...so we must pick and choose required
1330 	 * registers based on the command.  So, we drop feature and
1331 	 * hob_feature for [RW] DMA commands, but they are needed for
1332 	 * NCQ.  NCQ will drop hob_nsect.
1333 	 */
1334 	switch (tf->command) {
1335 	case ATA_CMD_READ:
1336 	case ATA_CMD_READ_EXT:
1337 	case ATA_CMD_WRITE:
1338 	case ATA_CMD_WRITE_EXT:
1339 	case ATA_CMD_WRITE_FUA_EXT:
1340 		mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1341 		break;
1342 	case ATA_CMD_FPDMA_READ:
1343 	case ATA_CMD_FPDMA_WRITE:
1344 		mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1345 		mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1346 		break;
1347 	default:
1348 		/* The only other commands EDMA supports in non-queued and
1349 		 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1350 		 * of which are defined/used by Linux.  If we get here, this
1351 		 * driver needs work.
1352 		 *
1353 		 * FIXME: modify libata to give qc_prep a return value and
1354 		 * return error here.
1355 		 */
1356 		BUG_ON(tf->command);
1357 		break;
1358 	}
1359 	mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1360 	mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1361 	mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1362 	mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1363 	mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1364 	mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1365 	mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1366 	mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1367 	mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1);	/* last */
1368 
1369 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1370 		return;
1371 	mv_fill_sg(qc);
1372 }
1373 
1374 /**
1375  *      mv_qc_prep_iie - Host specific command preparation.
1376  *      @qc: queued command to prepare
1377  *
1378  *      This routine simply redirects to the general purpose routine
1379  *      if command is not DMA.  Else, it handles prep of the CRQB
1380  *      (command request block), does some sanity checking, and calls
1381  *      the SG load routine.
1382  *
1383  *      LOCKING:
1384  *      Inherited from caller.
1385  */
1386 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1387 {
1388 	struct ata_port *ap = qc->ap;
1389 	struct mv_port_priv *pp = ap->private_data;
1390 	struct mv_crqb_iie *crqb;
1391 	struct ata_taskfile *tf;
1392 	unsigned in_index;
1393 	u32 flags = 0;
1394 
1395 	if ((qc->tf.protocol != ATA_PROT_DMA) &&
1396 	    (qc->tf.protocol != ATA_PROT_NCQ))
1397 		return;
1398 
1399 	/* Fill in Gen IIE command request block */
1400 	if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1401 		flags |= CRQB_FLAG_READ;
1402 
1403 	WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1404 	flags |= qc->tag << CRQB_TAG_SHIFT;
1405 	flags |= qc->tag << CRQB_HOSTQ_SHIFT;
1406 	flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
1407 
1408 	/* get current queue index from software */
1409 	in_index = pp->req_idx;
1410 
1411 	crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1412 	crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1413 	crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1414 	crqb->flags = cpu_to_le32(flags);
1415 
1416 	tf = &qc->tf;
1417 	crqb->ata_cmd[0] = cpu_to_le32(
1418 			(tf->command << 16) |
1419 			(tf->feature << 24)
1420 		);
1421 	crqb->ata_cmd[1] = cpu_to_le32(
1422 			(tf->lbal << 0) |
1423 			(tf->lbam << 8) |
1424 			(tf->lbah << 16) |
1425 			(tf->device << 24)
1426 		);
1427 	crqb->ata_cmd[2] = cpu_to_le32(
1428 			(tf->hob_lbal << 0) |
1429 			(tf->hob_lbam << 8) |
1430 			(tf->hob_lbah << 16) |
1431 			(tf->hob_feature << 24)
1432 		);
1433 	crqb->ata_cmd[3] = cpu_to_le32(
1434 			(tf->nsect << 0) |
1435 			(tf->hob_nsect << 8)
1436 		);
1437 
1438 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1439 		return;
1440 	mv_fill_sg(qc);
1441 }
1442 
1443 /**
1444  *      mv_qc_issue - Initiate a command to the host
1445  *      @qc: queued command to start
1446  *
1447  *      This routine simply redirects to the general purpose routine
1448  *      if command is not DMA.  Else, it sanity checks our local
1449  *      caches of the request producer/consumer indices then enables
1450  *      DMA and bumps the request producer index.
1451  *
1452  *      LOCKING:
1453  *      Inherited from caller.
1454  */
1455 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1456 {
1457 	struct ata_port *ap = qc->ap;
1458 	void __iomem *port_mmio = mv_ap_base(ap);
1459 	struct mv_port_priv *pp = ap->private_data;
1460 	u32 in_index;
1461 
1462 	if ((qc->tf.protocol != ATA_PROT_DMA) &&
1463 	    (qc->tf.protocol != ATA_PROT_NCQ)) {
1464 		/*
1465 		 * We're about to send a non-EDMA capable command to the
1466 		 * port.  Turn off EDMA so there won't be problems accessing
1467 		 * shadow block, etc registers.
1468 		 */
1469 		mv_stop_edma(ap);
1470 		mv_pmp_select(ap, qc->dev->link->pmp);
1471 		return ata_sff_qc_issue(qc);
1472 	}
1473 
1474 	mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
1475 
1476 	pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
1477 	in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
1478 
1479 	/* and write the request in pointer to kick the EDMA to life */
1480 	writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1481 		 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1482 
1483 	return 0;
1484 }
1485 
1486 /**
1487  *      mv_err_intr - Handle error interrupts on the port
1488  *      @ap: ATA channel to manipulate
1489  *      @reset_allowed: bool: 0 == don't trigger from reset here
1490  *
1491  *      In most cases, just clear the interrupt and move on.  However,
1492  *      some cases require an eDMA reset, which also performs a COMRESET.
1493  *      The SERR case requires a clear of pending errors in the SATA
1494  *      SERROR register.  Finally, if the port disabled DMA,
1495  *      update our cached copy to match.
1496  *
1497  *      LOCKING:
1498  *      Inherited from caller.
1499  */
1500 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1501 {
1502 	void __iomem *port_mmio = mv_ap_base(ap);
1503 	u32 edma_err_cause, eh_freeze_mask, serr = 0;
1504 	struct mv_port_priv *pp = ap->private_data;
1505 	struct mv_host_priv *hpriv = ap->host->private_data;
1506 	unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1507 	unsigned int action = 0, err_mask = 0;
1508 	struct ata_eh_info *ehi = &ap->link.eh_info;
1509 
1510 	ata_ehi_clear_desc(ehi);
1511 
1512 	if (!edma_enabled) {
1513 		/* just a guess: do we need to do this? should we
1514 		 * expand this, and do it in all cases?
1515 		 */
1516 		sata_scr_read(&ap->link, SCR_ERROR, &serr);
1517 		sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1518 	}
1519 
1520 	edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1521 
1522 	ata_ehi_push_desc(ehi, "edma_err_cause=%08x", edma_err_cause);
1523 
1524 	/*
1525 	 * All generations share these EDMA error cause bits:
1526 	 */
1527 	if (edma_err_cause & EDMA_ERR_DEV)
1528 		err_mask |= AC_ERR_DEV;
1529 	if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1530 			EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1531 			EDMA_ERR_INTRL_PAR)) {
1532 		err_mask |= AC_ERR_ATA_BUS;
1533 		action |= ATA_EH_RESET;
1534 		ata_ehi_push_desc(ehi, "parity error");
1535 	}
1536 	if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1537 		ata_ehi_hotplugged(ehi);
1538 		ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1539 			"dev disconnect" : "dev connect");
1540 		action |= ATA_EH_RESET;
1541 	}
1542 
1543 	/*
1544 	 * Gen-I has a different SELF_DIS bit,
1545 	 * different FREEZE bits, and no SERR bit:
1546 	 */
1547 	if (IS_GEN_I(hpriv)) {
1548 		eh_freeze_mask = EDMA_EH_FREEZE_5;
1549 		if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1550 			pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1551 			ata_ehi_push_desc(ehi, "EDMA self-disable");
1552 		}
1553 	} else {
1554 		eh_freeze_mask = EDMA_EH_FREEZE;
1555 		if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1556 			pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1557 			ata_ehi_push_desc(ehi, "EDMA self-disable");
1558 		}
1559 		if (edma_err_cause & EDMA_ERR_SERR) {
1560 			sata_scr_read(&ap->link, SCR_ERROR, &serr);
1561 			sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1562 			err_mask = AC_ERR_ATA_BUS;
1563 			action |= ATA_EH_RESET;
1564 		}
1565 	}
1566 
1567 	/* Clear EDMA now that SERR cleanup done */
1568 	writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1569 
1570 	if (!err_mask) {
1571 		err_mask = AC_ERR_OTHER;
1572 		action |= ATA_EH_RESET;
1573 	}
1574 
1575 	ehi->serror |= serr;
1576 	ehi->action |= action;
1577 
1578 	if (qc)
1579 		qc->err_mask |= err_mask;
1580 	else
1581 		ehi->err_mask |= err_mask;
1582 
1583 	if (edma_err_cause & eh_freeze_mask)
1584 		ata_port_freeze(ap);
1585 	else
1586 		ata_port_abort(ap);
1587 }
1588 
1589 static void mv_intr_pio(struct ata_port *ap)
1590 {
1591 	struct ata_queued_cmd *qc;
1592 	u8 ata_status;
1593 
1594 	/* ignore spurious intr if drive still BUSY */
1595 	ata_status = readb(ap->ioaddr.status_addr);
1596 	if (unlikely(ata_status & ATA_BUSY))
1597 		return;
1598 
1599 	/* get active ATA command */
1600 	qc = ata_qc_from_tag(ap, ap->link.active_tag);
1601 	if (unlikely(!qc))			/* no active tag */
1602 		return;
1603 	if (qc->tf.flags & ATA_TFLAG_POLLING)	/* polling; we don't own qc */
1604 		return;
1605 
1606 	/* and finally, complete the ATA command */
1607 	qc->err_mask |= ac_err_mask(ata_status);
1608 	ata_qc_complete(qc);
1609 }
1610 
1611 static void mv_process_crpb_response(struct ata_port *ap,
1612 		struct mv_crpb *response, unsigned int tag, int ncq_enabled)
1613 {
1614 	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
1615 
1616 	if (qc) {
1617 		u8 ata_status;
1618 		u16 edma_status = le16_to_cpu(response->flags);
1619 		/*
1620 		 * edma_status from a response queue entry:
1621 		 *   LSB is from EDMA_ERR_IRQ_CAUSE_OFS (non-NCQ only).
1622 		 *   MSB is saved ATA status from command completion.
1623 		 */
1624 		if (!ncq_enabled) {
1625 			u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
1626 			if (err_cause) {
1627 				/*
1628 				 * Error will be seen/handled by mv_err_intr().
1629 				 * So do nothing at all here.
1630 				 */
1631 				return;
1632 			}
1633 		}
1634 		ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
1635 		qc->err_mask |= ac_err_mask(ata_status);
1636 		ata_qc_complete(qc);
1637 	} else {
1638 		ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n",
1639 				__func__, tag);
1640 	}
1641 }
1642 
1643 static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
1644 {
1645 	void __iomem *port_mmio = mv_ap_base(ap);
1646 	struct mv_host_priv *hpriv = ap->host->private_data;
1647 	u32 in_index;
1648 	bool work_done = false;
1649 	int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
1650 
1651 	/* Get the hardware queue position index */
1652 	in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1653 			>> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1654 
1655 	/* Process new responses from since the last time we looked */
1656 	while (in_index != pp->resp_idx) {
1657 		unsigned int tag;
1658 		struct mv_crpb *response = &pp->crpb[pp->resp_idx];
1659 
1660 		pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
1661 
1662 		if (IS_GEN_I(hpriv)) {
1663 			/* 50xx: no NCQ, only one command active at a time */
1664 			tag = ap->link.active_tag;
1665 		} else {
1666 			/* Gen II/IIE: get command tag from CRPB entry */
1667 			tag = le16_to_cpu(response->id) & 0x1f;
1668 		}
1669 		mv_process_crpb_response(ap, response, tag, ncq_enabled);
1670 		work_done = true;
1671 	}
1672 
1673 	/* Update the software queue position index in hardware */
1674 	if (work_done)
1675 		writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1676 			 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
1677 			 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1678 }
1679 
1680 /**
1681  *      mv_host_intr - Handle all interrupts on the given host controller
1682  *      @host: host specific structure
1683  *      @relevant: port error bits relevant to this host controller
1684  *      @hc: which host controller we're to look at
1685  *
1686  *      Read then write clear the HC interrupt status then walk each
1687  *      port connected to the HC and see if it needs servicing.  Port
1688  *      success ints are reported in the HC interrupt status reg, the
1689  *      port error ints are reported in the higher level main
1690  *      interrupt status register and thus are passed in via the
1691  *      'relevant' argument.
1692  *
1693  *      LOCKING:
1694  *      Inherited from caller.
1695  */
1696 static int mv_host_intr(struct ata_host *host, u32 main_cause)
1697 {
1698 	struct mv_host_priv *hpriv = host->private_data;
1699 	void __iomem *mmio = hpriv->base, *hc_mmio = NULL;
1700 	u32 hc_irq_cause = 0;
1701 	unsigned int handled = 0, port;
1702 
1703 	for (port = 0; port < hpriv->n_ports; port++) {
1704 		struct ata_port *ap = host->ports[port];
1705 		struct mv_port_priv *pp;
1706 		unsigned int shift, hardport, port_cause;
1707 		/*
1708 		 * When we move to the second hc, flag our cached
1709 		 * copies of hc_mmio (and hc_irq_cause) as invalid again.
1710 		 */
1711 		if (port == MV_PORTS_PER_HC)
1712 			hc_mmio = NULL;
1713 		/*
1714 		 * Do nothing if port is not interrupting or is disabled:
1715 		 */
1716 		MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
1717 		port_cause = (main_cause >> shift) & (DONE_IRQ | ERR_IRQ);
1718 		if (!port_cause || !ap || (ap->flags & ATA_FLAG_DISABLED))
1719 			continue;
1720 		/*
1721 		 * Each hc within the host has its own hc_irq_cause register.
1722 		 * We defer reading it until we know we need it, right now:
1723 		 *
1724 		 * FIXME later: we don't really need to read this register
1725 		 * (some logic changes required below if we go that way),
1726 		 * because it doesn't tell us anything new.  But we do need
1727 		 * to write to it, outside the top of this loop,
1728 		 * to reset the interrupt triggers for next time.
1729 		 */
1730 		if (!hc_mmio) {
1731 			hc_mmio = mv_hc_base_from_port(mmio, port);
1732 			hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1733 			writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1734 			handled = 1;
1735 		}
1736 
1737 		if (unlikely(port_cause & ERR_IRQ)) {
1738 			struct ata_queued_cmd *qc;
1739 
1740 			qc = ata_qc_from_tag(ap, ap->link.active_tag);
1741 			if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1742 				continue;
1743 
1744 			mv_err_intr(ap, qc);
1745 			continue;
1746 		}
1747 
1748 		pp = ap->private_data;
1749 		if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1750 			if ((DMA_IRQ << hardport) & hc_irq_cause)
1751 				mv_process_crpb_entries(ap, pp);
1752 		} else {
1753 			if ((DEV_IRQ << hardport) & hc_irq_cause)
1754 				mv_intr_pio(ap);
1755 		}
1756 	}
1757 	return handled;
1758 }
1759 
1760 static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
1761 {
1762 	struct mv_host_priv *hpriv = host->private_data;
1763 	struct ata_port *ap;
1764 	struct ata_queued_cmd *qc;
1765 	struct ata_eh_info *ehi;
1766 	unsigned int i, err_mask, printed = 0;
1767 	u32 err_cause;
1768 
1769 	err_cause = readl(mmio + hpriv->irq_cause_ofs);
1770 
1771 	dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1772 		   err_cause);
1773 
1774 	DPRINTK("All regs @ PCI error\n");
1775 	mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1776 
1777 	writelfl(0, mmio + hpriv->irq_cause_ofs);
1778 
1779 	for (i = 0; i < host->n_ports; i++) {
1780 		ap = host->ports[i];
1781 		if (!ata_link_offline(&ap->link)) {
1782 			ehi = &ap->link.eh_info;
1783 			ata_ehi_clear_desc(ehi);
1784 			if (!printed++)
1785 				ata_ehi_push_desc(ehi,
1786 					"PCI err cause 0x%08x", err_cause);
1787 			err_mask = AC_ERR_HOST_BUS;
1788 			ehi->action = ATA_EH_RESET;
1789 			qc = ata_qc_from_tag(ap, ap->link.active_tag);
1790 			if (qc)
1791 				qc->err_mask |= err_mask;
1792 			else
1793 				ehi->err_mask |= err_mask;
1794 
1795 			ata_port_freeze(ap);
1796 		}
1797 	}
1798 	return 1;	/* handled */
1799 }
1800 
1801 /**
1802  *      mv_interrupt - Main interrupt event handler
1803  *      @irq: unused
1804  *      @dev_instance: private data; in this case the host structure
1805  *
1806  *      Read the read only register to determine if any host
1807  *      controllers have pending interrupts.  If so, call lower level
1808  *      routine to handle.  Also check for PCI errors which are only
1809  *      reported here.
1810  *
1811  *      LOCKING:
1812  *      This routine holds the host lock while processing pending
1813  *      interrupts.
1814  */
1815 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1816 {
1817 	struct ata_host *host = dev_instance;
1818 	struct mv_host_priv *hpriv = host->private_data;
1819 	unsigned int handled = 0;
1820 	u32 main_cause, main_mask;
1821 
1822 	spin_lock(&host->lock);
1823 	main_cause = readl(hpriv->main_cause_reg_addr);
1824 	main_mask  = readl(hpriv->main_mask_reg_addr);
1825 	/*
1826 	 * Deal with cases where we either have nothing pending, or have read
1827 	 * a bogus register value which can indicate HW removal or PCI fault.
1828 	 */
1829 	if ((main_cause & main_mask) && (main_cause != 0xffffffffU)) {
1830 		if (unlikely((main_cause & PCI_ERR) && HAS_PCI(host)))
1831 			handled = mv_pci_error(host, hpriv->base);
1832 		else
1833 			handled = mv_host_intr(host, main_cause);
1834 	}
1835 	spin_unlock(&host->lock);
1836 	return IRQ_RETVAL(handled);
1837 }
1838 
1839 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1840 {
1841 	unsigned int ofs;
1842 
1843 	switch (sc_reg_in) {
1844 	case SCR_STATUS:
1845 	case SCR_ERROR:
1846 	case SCR_CONTROL:
1847 		ofs = sc_reg_in * sizeof(u32);
1848 		break;
1849 	default:
1850 		ofs = 0xffffffffU;
1851 		break;
1852 	}
1853 	return ofs;
1854 }
1855 
1856 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1857 {
1858 	struct mv_host_priv *hpriv = ap->host->private_data;
1859 	void __iomem *mmio = hpriv->base;
1860 	void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1861 	unsigned int ofs = mv5_scr_offset(sc_reg_in);
1862 
1863 	if (ofs != 0xffffffffU) {
1864 		*val = readl(addr + ofs);
1865 		return 0;
1866 	} else
1867 		return -EINVAL;
1868 }
1869 
1870 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1871 {
1872 	struct mv_host_priv *hpriv = ap->host->private_data;
1873 	void __iomem *mmio = hpriv->base;
1874 	void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1875 	unsigned int ofs = mv5_scr_offset(sc_reg_in);
1876 
1877 	if (ofs != 0xffffffffU) {
1878 		writelfl(val, addr + ofs);
1879 		return 0;
1880 	} else
1881 		return -EINVAL;
1882 }
1883 
1884 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
1885 {
1886 	struct pci_dev *pdev = to_pci_dev(host->dev);
1887 	int early_5080;
1888 
1889 	early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1890 
1891 	if (!early_5080) {
1892 		u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1893 		tmp |= (1 << 0);
1894 		writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1895 	}
1896 
1897 	mv_reset_pci_bus(host, mmio);
1898 }
1899 
1900 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1901 {
1902 	writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1903 }
1904 
1905 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1906 			   void __iomem *mmio)
1907 {
1908 	void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1909 	u32 tmp;
1910 
1911 	tmp = readl(phy_mmio + MV5_PHY_MODE);
1912 
1913 	hpriv->signal[idx].pre = tmp & 0x1800;	/* bits 12:11 */
1914 	hpriv->signal[idx].amps = tmp & 0xe0;	/* bits 7:5 */
1915 }
1916 
1917 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1918 {
1919 	u32 tmp;
1920 
1921 	writel(0, mmio + MV_GPIO_PORT_CTL);
1922 
1923 	/* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1924 
1925 	tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1926 	tmp |= ~(1 << 0);
1927 	writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1928 }
1929 
1930 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1931 			   unsigned int port)
1932 {
1933 	void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1934 	const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1935 	u32 tmp;
1936 	int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1937 
1938 	if (fix_apm_sq) {
1939 		tmp = readl(phy_mmio + MV5_LT_MODE);
1940 		tmp |= (1 << 19);
1941 		writel(tmp, phy_mmio + MV5_LT_MODE);
1942 
1943 		tmp = readl(phy_mmio + MV5_PHY_CTL);
1944 		tmp &= ~0x3;
1945 		tmp |= 0x1;
1946 		writel(tmp, phy_mmio + MV5_PHY_CTL);
1947 	}
1948 
1949 	tmp = readl(phy_mmio + MV5_PHY_MODE);
1950 	tmp &= ~mask;
1951 	tmp |= hpriv->signal[port].pre;
1952 	tmp |= hpriv->signal[port].amps;
1953 	writel(tmp, phy_mmio + MV5_PHY_MODE);
1954 }
1955 
1956 
1957 #undef ZERO
1958 #define ZERO(reg) writel(0, port_mmio + (reg))
1959 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1960 			     unsigned int port)
1961 {
1962 	void __iomem *port_mmio = mv_port_base(mmio, port);
1963 
1964 	/*
1965 	 * The datasheet warns against setting ATA_RST when EDMA is active
1966 	 * (but doesn't say what the problem might be).  So we first try
1967 	 * to disable the EDMA engine before doing the ATA_RST operation.
1968 	 */
1969 	mv_reset_channel(hpriv, mmio, port);
1970 
1971 	ZERO(0x028);	/* command */
1972 	writel(0x11f, port_mmio + EDMA_CFG_OFS);
1973 	ZERO(0x004);	/* timer */
1974 	ZERO(0x008);	/* irq err cause */
1975 	ZERO(0x00c);	/* irq err mask */
1976 	ZERO(0x010);	/* rq bah */
1977 	ZERO(0x014);	/* rq inp */
1978 	ZERO(0x018);	/* rq outp */
1979 	ZERO(0x01c);	/* respq bah */
1980 	ZERO(0x024);	/* respq outp */
1981 	ZERO(0x020);	/* respq inp */
1982 	ZERO(0x02c);	/* test control */
1983 	writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1984 }
1985 #undef ZERO
1986 
1987 #define ZERO(reg) writel(0, hc_mmio + (reg))
1988 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1989 			unsigned int hc)
1990 {
1991 	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1992 	u32 tmp;
1993 
1994 	ZERO(0x00c);
1995 	ZERO(0x010);
1996 	ZERO(0x014);
1997 	ZERO(0x018);
1998 
1999 	tmp = readl(hc_mmio + 0x20);
2000 	tmp &= 0x1c1c1c1c;
2001 	tmp |= 0x03030303;
2002 	writel(tmp, hc_mmio + 0x20);
2003 }
2004 #undef ZERO
2005 
2006 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2007 			unsigned int n_hc)
2008 {
2009 	unsigned int hc, port;
2010 
2011 	for (hc = 0; hc < n_hc; hc++) {
2012 		for (port = 0; port < MV_PORTS_PER_HC; port++)
2013 			mv5_reset_hc_port(hpriv, mmio,
2014 					  (hc * MV_PORTS_PER_HC) + port);
2015 
2016 		mv5_reset_one_hc(hpriv, mmio, hc);
2017 	}
2018 
2019 	return 0;
2020 }
2021 
2022 #undef ZERO
2023 #define ZERO(reg) writel(0, mmio + (reg))
2024 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
2025 {
2026 	struct mv_host_priv *hpriv = host->private_data;
2027 	u32 tmp;
2028 
2029 	tmp = readl(mmio + MV_PCI_MODE);
2030 	tmp &= 0xff00ffff;
2031 	writel(tmp, mmio + MV_PCI_MODE);
2032 
2033 	ZERO(MV_PCI_DISC_TIMER);
2034 	ZERO(MV_PCI_MSI_TRIGGER);
2035 	writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2036 	ZERO(HC_MAIN_IRQ_MASK_OFS);
2037 	ZERO(MV_PCI_SERR_MASK);
2038 	ZERO(hpriv->irq_cause_ofs);
2039 	ZERO(hpriv->irq_mask_ofs);
2040 	ZERO(MV_PCI_ERR_LOW_ADDRESS);
2041 	ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2042 	ZERO(MV_PCI_ERR_ATTRIBUTE);
2043 	ZERO(MV_PCI_ERR_COMMAND);
2044 }
2045 #undef ZERO
2046 
2047 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2048 {
2049 	u32 tmp;
2050 
2051 	mv5_reset_flash(hpriv, mmio);
2052 
2053 	tmp = readl(mmio + MV_GPIO_PORT_CTL);
2054 	tmp &= 0x3;
2055 	tmp |= (1 << 5) | (1 << 6);
2056 	writel(tmp, mmio + MV_GPIO_PORT_CTL);
2057 }
2058 
2059 /**
2060  *      mv6_reset_hc - Perform the 6xxx global soft reset
2061  *      @mmio: base address of the HBA
2062  *
2063  *      This routine only applies to 6xxx parts.
2064  *
2065  *      LOCKING:
2066  *      Inherited from caller.
2067  */
2068 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2069 			unsigned int n_hc)
2070 {
2071 	void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2072 	int i, rc = 0;
2073 	u32 t;
2074 
2075 	/* Following procedure defined in PCI "main command and status
2076 	 * register" table.
2077 	 */
2078 	t = readl(reg);
2079 	writel(t | STOP_PCI_MASTER, reg);
2080 
2081 	for (i = 0; i < 1000; i++) {
2082 		udelay(1);
2083 		t = readl(reg);
2084 		if (PCI_MASTER_EMPTY & t)
2085 			break;
2086 	}
2087 	if (!(PCI_MASTER_EMPTY & t)) {
2088 		printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2089 		rc = 1;
2090 		goto done;
2091 	}
2092 
2093 	/* set reset */
2094 	i = 5;
2095 	do {
2096 		writel(t | GLOB_SFT_RST, reg);
2097 		t = readl(reg);
2098 		udelay(1);
2099 	} while (!(GLOB_SFT_RST & t) && (i-- > 0));
2100 
2101 	if (!(GLOB_SFT_RST & t)) {
2102 		printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2103 		rc = 1;
2104 		goto done;
2105 	}
2106 
2107 	/* clear reset and *reenable the PCI master* (not mentioned in spec) */
2108 	i = 5;
2109 	do {
2110 		writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2111 		t = readl(reg);
2112 		udelay(1);
2113 	} while ((GLOB_SFT_RST & t) && (i-- > 0));
2114 
2115 	if (GLOB_SFT_RST & t) {
2116 		printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2117 		rc = 1;
2118 	}
2119 	/*
2120 	 * Temporary: wait 3 seconds before port-probing can happen,
2121 	 * so that we don't miss finding sleepy SilXXXX port-multipliers.
2122 	 * This can go away once hotplug is fully/correctly implemented.
2123 	 */
2124 	if (rc == 0)
2125 		msleep(3000);
2126 done:
2127 	return rc;
2128 }
2129 
2130 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2131 			   void __iomem *mmio)
2132 {
2133 	void __iomem *port_mmio;
2134 	u32 tmp;
2135 
2136 	tmp = readl(mmio + MV_RESET_CFG);
2137 	if ((tmp & (1 << 0)) == 0) {
2138 		hpriv->signal[idx].amps = 0x7 << 8;
2139 		hpriv->signal[idx].pre = 0x1 << 5;
2140 		return;
2141 	}
2142 
2143 	port_mmio = mv_port_base(mmio, idx);
2144 	tmp = readl(port_mmio + PHY_MODE2);
2145 
2146 	hpriv->signal[idx].amps = tmp & 0x700;	/* bits 10:8 */
2147 	hpriv->signal[idx].pre = tmp & 0xe0;	/* bits 7:5 */
2148 }
2149 
2150 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2151 {
2152 	writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2153 }
2154 
2155 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2156 			   unsigned int port)
2157 {
2158 	void __iomem *port_mmio = mv_port_base(mmio, port);
2159 
2160 	u32 hp_flags = hpriv->hp_flags;
2161 	int fix_phy_mode2 =
2162 		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2163 	int fix_phy_mode4 =
2164 		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2165 	u32 m2, tmp;
2166 
2167 	if (fix_phy_mode2) {
2168 		m2 = readl(port_mmio + PHY_MODE2);
2169 		m2 &= ~(1 << 16);
2170 		m2 |= (1 << 31);
2171 		writel(m2, port_mmio + PHY_MODE2);
2172 
2173 		udelay(200);
2174 
2175 		m2 = readl(port_mmio + PHY_MODE2);
2176 		m2 &= ~((1 << 16) | (1 << 31));
2177 		writel(m2, port_mmio + PHY_MODE2);
2178 
2179 		udelay(200);
2180 	}
2181 
2182 	/* who knows what this magic does */
2183 	tmp = readl(port_mmio + PHY_MODE3);
2184 	tmp &= ~0x7F800000;
2185 	tmp |= 0x2A800000;
2186 	writel(tmp, port_mmio + PHY_MODE3);
2187 
2188 	if (fix_phy_mode4) {
2189 		u32 m4;
2190 
2191 		m4 = readl(port_mmio + PHY_MODE4);
2192 
2193 		if (hp_flags & MV_HP_ERRATA_60X1B2)
2194 			tmp = readl(port_mmio + PHY_MODE3);
2195 
2196 		/* workaround for errata FEr SATA#10 (part 1) */
2197 		m4 = (m4 & ~(1 << 1)) | (1 << 0);
2198 
2199 		writel(m4, port_mmio + PHY_MODE4);
2200 
2201 		if (hp_flags & MV_HP_ERRATA_60X1B2)
2202 			writel(tmp, port_mmio + PHY_MODE3);
2203 	}
2204 
2205 	/* Revert values of pre-emphasis and signal amps to the saved ones */
2206 	m2 = readl(port_mmio + PHY_MODE2);
2207 
2208 	m2 &= ~MV_M2_PREAMP_MASK;
2209 	m2 |= hpriv->signal[port].amps;
2210 	m2 |= hpriv->signal[port].pre;
2211 	m2 &= ~(1 << 16);
2212 
2213 	/* according to mvSata 3.6.1, some IIE values are fixed */
2214 	if (IS_GEN_IIE(hpriv)) {
2215 		m2 &= ~0xC30FF01F;
2216 		m2 |= 0x0000900F;
2217 	}
2218 
2219 	writel(m2, port_mmio + PHY_MODE2);
2220 }
2221 
2222 /* TODO: use the generic LED interface to configure the SATA Presence */
2223 /* & Acitivy LEDs on the board */
2224 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2225 				      void __iomem *mmio)
2226 {
2227 	return;
2228 }
2229 
2230 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2231 			   void __iomem *mmio)
2232 {
2233 	void __iomem *port_mmio;
2234 	u32 tmp;
2235 
2236 	port_mmio = mv_port_base(mmio, idx);
2237 	tmp = readl(port_mmio + PHY_MODE2);
2238 
2239 	hpriv->signal[idx].amps = tmp & 0x700;	/* bits 10:8 */
2240 	hpriv->signal[idx].pre = tmp & 0xe0;	/* bits 7:5 */
2241 }
2242 
2243 #undef ZERO
2244 #define ZERO(reg) writel(0, port_mmio + (reg))
2245 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2246 					void __iomem *mmio, unsigned int port)
2247 {
2248 	void __iomem *port_mmio = mv_port_base(mmio, port);
2249 
2250 	/*
2251 	 * The datasheet warns against setting ATA_RST when EDMA is active
2252 	 * (but doesn't say what the problem might be).  So we first try
2253 	 * to disable the EDMA engine before doing the ATA_RST operation.
2254 	 */
2255 	mv_reset_channel(hpriv, mmio, port);
2256 
2257 	ZERO(0x028);		/* command */
2258 	writel(0x101f, port_mmio + EDMA_CFG_OFS);
2259 	ZERO(0x004);		/* timer */
2260 	ZERO(0x008);		/* irq err cause */
2261 	ZERO(0x00c);		/* irq err mask */
2262 	ZERO(0x010);		/* rq bah */
2263 	ZERO(0x014);		/* rq inp */
2264 	ZERO(0x018);		/* rq outp */
2265 	ZERO(0x01c);		/* respq bah */
2266 	ZERO(0x024);		/* respq outp */
2267 	ZERO(0x020);		/* respq inp */
2268 	ZERO(0x02c);		/* test control */
2269 	writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2270 }
2271 
2272 #undef ZERO
2273 
2274 #define ZERO(reg) writel(0, hc_mmio + (reg))
2275 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2276 				       void __iomem *mmio)
2277 {
2278 	void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2279 
2280 	ZERO(0x00c);
2281 	ZERO(0x010);
2282 	ZERO(0x014);
2283 
2284 }
2285 
2286 #undef ZERO
2287 
2288 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2289 				  void __iomem *mmio, unsigned int n_hc)
2290 {
2291 	unsigned int port;
2292 
2293 	for (port = 0; port < hpriv->n_ports; port++)
2294 		mv_soc_reset_hc_port(hpriv, mmio, port);
2295 
2296 	mv_soc_reset_one_hc(hpriv, mmio);
2297 
2298 	return 0;
2299 }
2300 
2301 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2302 				      void __iomem *mmio)
2303 {
2304 	return;
2305 }
2306 
2307 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2308 {
2309 	return;
2310 }
2311 
2312 static void mv_setup_ifctl(void __iomem *port_mmio, int want_gen2i)
2313 {
2314 	u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG);
2315 
2316 	ifctl = (ifctl & 0xf7f) | 0x9b1000;	/* from chip spec */
2317 	if (want_gen2i)
2318 		ifctl |= (1 << 7);		/* enable gen2i speed */
2319 	writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG);
2320 }
2321 
2322 /*
2323  * Caller must ensure that EDMA is not active,
2324  * by first doing mv_stop_edma() where needed.
2325  */
2326 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
2327 			     unsigned int port_no)
2328 {
2329 	void __iomem *port_mmio = mv_port_base(mmio, port_no);
2330 
2331 	mv_stop_edma_engine(port_mmio);
2332 	writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2333 
2334 	if (!IS_GEN_I(hpriv)) {
2335 		/* Enable 3.0gb/s link speed */
2336 		mv_setup_ifctl(port_mmio, 1);
2337 	}
2338 	/*
2339 	 * Strobing ATA_RST here causes a hard reset of the SATA transport,
2340 	 * link, and physical layers.  It resets all SATA interface registers
2341 	 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
2342 	 */
2343 	writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2344 	udelay(25);	/* allow reset propagation */
2345 	writelfl(0, port_mmio + EDMA_CMD_OFS);
2346 
2347 	hpriv->ops->phy_errata(hpriv, mmio, port_no);
2348 
2349 	if (IS_GEN_I(hpriv))
2350 		mdelay(1);
2351 }
2352 
2353 static void mv_pmp_select(struct ata_port *ap, int pmp)
2354 {
2355 	if (sata_pmp_supported(ap)) {
2356 		void __iomem *port_mmio = mv_ap_base(ap);
2357 		u32 reg = readl(port_mmio + SATA_IFCTL_OFS);
2358 		int old = reg & 0xf;
2359 
2360 		if (old != pmp) {
2361 			reg = (reg & ~0xf) | pmp;
2362 			writelfl(reg, port_mmio + SATA_IFCTL_OFS);
2363 		}
2364 	}
2365 }
2366 
2367 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
2368 				unsigned long deadline)
2369 {
2370 	mv_pmp_select(link->ap, sata_srst_pmp(link));
2371 	return sata_std_hardreset(link, class, deadline);
2372 }
2373 
2374 static int mv_softreset(struct ata_link *link, unsigned int *class,
2375 				unsigned long deadline)
2376 {
2377 	mv_pmp_select(link->ap, sata_srst_pmp(link));
2378 	return ata_sff_softreset(link, class, deadline);
2379 }
2380 
2381 static int mv_hardreset(struct ata_link *link, unsigned int *class,
2382 			unsigned long deadline)
2383 {
2384 	struct ata_port *ap = link->ap;
2385 	struct mv_host_priv *hpriv = ap->host->private_data;
2386 	struct mv_port_priv *pp = ap->private_data;
2387 	void __iomem *mmio = hpriv->base;
2388 	int rc, attempts = 0, extra = 0;
2389 	u32 sstatus;
2390 	bool online;
2391 
2392 	mv_reset_channel(hpriv, mmio, ap->port_no);
2393 	pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2394 
2395 	/* Workaround for errata FEr SATA#10 (part 2) */
2396 	do {
2397 		const unsigned long *timing =
2398 				sata_ehc_deb_timing(&link->eh_context);
2399 
2400 		rc = sata_link_hardreset(link, timing, deadline + extra,
2401 					 &online, NULL);
2402 		if (rc)
2403 			return rc;
2404 		sata_scr_read(link, SCR_STATUS, &sstatus);
2405 		if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
2406 			/* Force 1.5gb/s link speed and try again */
2407 			mv_setup_ifctl(mv_ap_base(ap), 0);
2408 			if (time_after(jiffies + HZ, deadline))
2409 				extra = HZ; /* only extend it once, max */
2410 		}
2411 	} while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
2412 
2413 	return rc;
2414 }
2415 
2416 static void mv_eh_freeze(struct ata_port *ap)
2417 {
2418 	struct mv_host_priv *hpriv = ap->host->private_data;
2419 	unsigned int shift, hardport, port = ap->port_no;
2420 	u32 main_mask;
2421 
2422 	/* FIXME: handle coalescing completion events properly */
2423 
2424 	mv_stop_edma(ap);
2425 	MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
2426 
2427 	/* disable assertion of portN err, done events */
2428 	main_mask = readl(hpriv->main_mask_reg_addr);
2429 	main_mask &= ~((DONE_IRQ | ERR_IRQ) << shift);
2430 	writelfl(main_mask, hpriv->main_mask_reg_addr);
2431 }
2432 
2433 static void mv_eh_thaw(struct ata_port *ap)
2434 {
2435 	struct mv_host_priv *hpriv = ap->host->private_data;
2436 	unsigned int shift, hardport, port = ap->port_no;
2437 	void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
2438 	void __iomem *port_mmio = mv_ap_base(ap);
2439 	u32 main_mask, hc_irq_cause;
2440 
2441 	/* FIXME: handle coalescing completion events properly */
2442 
2443 	MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
2444 
2445 	/* clear EDMA errors on this port */
2446 	writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2447 
2448 	/* clear pending irq events */
2449 	hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2450 	hc_irq_cause &= ~((DEV_IRQ | DMA_IRQ) << hardport);
2451 	writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2452 
2453 	/* enable assertion of portN err, done events */
2454 	main_mask = readl(hpriv->main_mask_reg_addr);
2455 	main_mask |= ((DONE_IRQ | ERR_IRQ) << shift);
2456 	writelfl(main_mask, hpriv->main_mask_reg_addr);
2457 }
2458 
2459 /**
2460  *      mv_port_init - Perform some early initialization on a single port.
2461  *      @port: libata data structure storing shadow register addresses
2462  *      @port_mmio: base address of the port
2463  *
2464  *      Initialize shadow register mmio addresses, clear outstanding
2465  *      interrupts on the port, and unmask interrupts for the future
2466  *      start of the port.
2467  *
2468  *      LOCKING:
2469  *      Inherited from caller.
2470  */
2471 static void mv_port_init(struct ata_ioports *port,  void __iomem *port_mmio)
2472 {
2473 	void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2474 	unsigned serr_ofs;
2475 
2476 	/* PIO related setup
2477 	 */
2478 	port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2479 	port->error_addr =
2480 		port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2481 	port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2482 	port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2483 	port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2484 	port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2485 	port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2486 	port->status_addr =
2487 		port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2488 	/* special case: control/altstatus doesn't have ATA_REG_ address */
2489 	port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2490 
2491 	/* unused: */
2492 	port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2493 
2494 	/* Clear any currently outstanding port interrupt conditions */
2495 	serr_ofs = mv_scr_offset(SCR_ERROR);
2496 	writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2497 	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2498 
2499 	/* unmask all non-transient EDMA error interrupts */
2500 	writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2501 
2502 	VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2503 		readl(port_mmio + EDMA_CFG_OFS),
2504 		readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2505 		readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2506 }
2507 
2508 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2509 {
2510 	struct pci_dev *pdev = to_pci_dev(host->dev);
2511 	struct mv_host_priv *hpriv = host->private_data;
2512 	u32 hp_flags = hpriv->hp_flags;
2513 
2514 	switch (board_idx) {
2515 	case chip_5080:
2516 		hpriv->ops = &mv5xxx_ops;
2517 		hp_flags |= MV_HP_GEN_I;
2518 
2519 		switch (pdev->revision) {
2520 		case 0x1:
2521 			hp_flags |= MV_HP_ERRATA_50XXB0;
2522 			break;
2523 		case 0x3:
2524 			hp_flags |= MV_HP_ERRATA_50XXB2;
2525 			break;
2526 		default:
2527 			dev_printk(KERN_WARNING, &pdev->dev,
2528 			   "Applying 50XXB2 workarounds to unknown rev\n");
2529 			hp_flags |= MV_HP_ERRATA_50XXB2;
2530 			break;
2531 		}
2532 		break;
2533 
2534 	case chip_504x:
2535 	case chip_508x:
2536 		hpriv->ops = &mv5xxx_ops;
2537 		hp_flags |= MV_HP_GEN_I;
2538 
2539 		switch (pdev->revision) {
2540 		case 0x0:
2541 			hp_flags |= MV_HP_ERRATA_50XXB0;
2542 			break;
2543 		case 0x3:
2544 			hp_flags |= MV_HP_ERRATA_50XXB2;
2545 			break;
2546 		default:
2547 			dev_printk(KERN_WARNING, &pdev->dev,
2548 			   "Applying B2 workarounds to unknown rev\n");
2549 			hp_flags |= MV_HP_ERRATA_50XXB2;
2550 			break;
2551 		}
2552 		break;
2553 
2554 	case chip_604x:
2555 	case chip_608x:
2556 		hpriv->ops = &mv6xxx_ops;
2557 		hp_flags |= MV_HP_GEN_II;
2558 
2559 		switch (pdev->revision) {
2560 		case 0x7:
2561 			hp_flags |= MV_HP_ERRATA_60X1B2;
2562 			break;
2563 		case 0x9:
2564 			hp_flags |= MV_HP_ERRATA_60X1C0;
2565 			break;
2566 		default:
2567 			dev_printk(KERN_WARNING, &pdev->dev,
2568 				   "Applying B2 workarounds to unknown rev\n");
2569 			hp_flags |= MV_HP_ERRATA_60X1B2;
2570 			break;
2571 		}
2572 		break;
2573 
2574 	case chip_7042:
2575 		hp_flags |= MV_HP_PCIE;
2576 		if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2577 		    (pdev->device == 0x2300 || pdev->device == 0x2310))
2578 		{
2579 			/*
2580 			 * Highpoint RocketRAID PCIe 23xx series cards:
2581 			 *
2582 			 * Unconfigured drives are treated as "Legacy"
2583 			 * by the BIOS, and it overwrites sector 8 with
2584 			 * a "Lgcy" metadata block prior to Linux boot.
2585 			 *
2586 			 * Configured drives (RAID or JBOD) leave sector 8
2587 			 * alone, but instead overwrite a high numbered
2588 			 * sector for the RAID metadata.  This sector can
2589 			 * be determined exactly, by truncating the physical
2590 			 * drive capacity to a nice even GB value.
2591 			 *
2592 			 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2593 			 *
2594 			 * Warn the user, lest they think we're just buggy.
2595 			 */
2596 			printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2597 				" BIOS CORRUPTS DATA on all attached drives,"
2598 				" regardless of if/how they are configured."
2599 				" BEWARE!\n");
2600 			printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2601 				" use sectors 8-9 on \"Legacy\" drives,"
2602 				" and avoid the final two gigabytes on"
2603 				" all RocketRAID BIOS initialized drives.\n");
2604 		}
2605 	case chip_6042:
2606 		hpriv->ops = &mv6xxx_ops;
2607 		hp_flags |= MV_HP_GEN_IIE;
2608 
2609 		switch (pdev->revision) {
2610 		case 0x0:
2611 			hp_flags |= MV_HP_ERRATA_XX42A0;
2612 			break;
2613 		case 0x1:
2614 			hp_flags |= MV_HP_ERRATA_60X1C0;
2615 			break;
2616 		default:
2617 			dev_printk(KERN_WARNING, &pdev->dev,
2618 			   "Applying 60X1C0 workarounds to unknown rev\n");
2619 			hp_flags |= MV_HP_ERRATA_60X1C0;
2620 			break;
2621 		}
2622 		break;
2623 	case chip_soc:
2624 		hpriv->ops = &mv_soc_ops;
2625 		hp_flags |= MV_HP_ERRATA_60X1C0;
2626 		break;
2627 
2628 	default:
2629 		dev_printk(KERN_ERR, host->dev,
2630 			   "BUG: invalid board index %u\n", board_idx);
2631 		return 1;
2632 	}
2633 
2634 	hpriv->hp_flags = hp_flags;
2635 	if (hp_flags & MV_HP_PCIE) {
2636 		hpriv->irq_cause_ofs	= PCIE_IRQ_CAUSE_OFS;
2637 		hpriv->irq_mask_ofs	= PCIE_IRQ_MASK_OFS;
2638 		hpriv->unmask_all_irqs	= PCIE_UNMASK_ALL_IRQS;
2639 	} else {
2640 		hpriv->irq_cause_ofs	= PCI_IRQ_CAUSE_OFS;
2641 		hpriv->irq_mask_ofs	= PCI_IRQ_MASK_OFS;
2642 		hpriv->unmask_all_irqs	= PCI_UNMASK_ALL_IRQS;
2643 	}
2644 
2645 	return 0;
2646 }
2647 
2648 /**
2649  *      mv_init_host - Perform some early initialization of the host.
2650  *	@host: ATA host to initialize
2651  *      @board_idx: controller index
2652  *
2653  *      If possible, do an early global reset of the host.  Then do
2654  *      our port init and clear/unmask all/relevant host interrupts.
2655  *
2656  *      LOCKING:
2657  *      Inherited from caller.
2658  */
2659 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2660 {
2661 	int rc = 0, n_hc, port, hc;
2662 	struct mv_host_priv *hpriv = host->private_data;
2663 	void __iomem *mmio = hpriv->base;
2664 
2665 	rc = mv_chip_id(host, board_idx);
2666 	if (rc)
2667 		goto done;
2668 
2669 	if (HAS_PCI(host)) {
2670 		hpriv->main_cause_reg_addr = mmio + HC_MAIN_IRQ_CAUSE_OFS;
2671 		hpriv->main_mask_reg_addr  = mmio + HC_MAIN_IRQ_MASK_OFS;
2672 	} else {
2673 		hpriv->main_cause_reg_addr = mmio + HC_SOC_MAIN_IRQ_CAUSE_OFS;
2674 		hpriv->main_mask_reg_addr  = mmio + HC_SOC_MAIN_IRQ_MASK_OFS;
2675 	}
2676 
2677 	/* global interrupt mask: 0 == mask everything */
2678 	writel(0, hpriv->main_mask_reg_addr);
2679 
2680 	n_hc = mv_get_hc_count(host->ports[0]->flags);
2681 
2682 	for (port = 0; port < host->n_ports; port++)
2683 		hpriv->ops->read_preamp(hpriv, port, mmio);
2684 
2685 	rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2686 	if (rc)
2687 		goto done;
2688 
2689 	hpriv->ops->reset_flash(hpriv, mmio);
2690 	hpriv->ops->reset_bus(host, mmio);
2691 	hpriv->ops->enable_leds(hpriv, mmio);
2692 
2693 	for (port = 0; port < host->n_ports; port++) {
2694 		struct ata_port *ap = host->ports[port];
2695 		void __iomem *port_mmio = mv_port_base(mmio, port);
2696 
2697 		mv_port_init(&ap->ioaddr, port_mmio);
2698 
2699 #ifdef CONFIG_PCI
2700 		if (HAS_PCI(host)) {
2701 			unsigned int offset = port_mmio - mmio;
2702 			ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2703 			ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2704 		}
2705 #endif
2706 	}
2707 
2708 	for (hc = 0; hc < n_hc; hc++) {
2709 		void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2710 
2711 		VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2712 			"(before clear)=0x%08x\n", hc,
2713 			readl(hc_mmio + HC_CFG_OFS),
2714 			readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2715 
2716 		/* Clear any currently outstanding hc interrupt conditions */
2717 		writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2718 	}
2719 
2720 	if (HAS_PCI(host)) {
2721 		/* Clear any currently outstanding host interrupt conditions */
2722 		writelfl(0, mmio + hpriv->irq_cause_ofs);
2723 
2724 		/* and unmask interrupt generation for host regs */
2725 		writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2726 		if (IS_GEN_I(hpriv))
2727 			writelfl(~HC_MAIN_MASKED_IRQS_5,
2728 				 hpriv->main_mask_reg_addr);
2729 		else
2730 			writelfl(~HC_MAIN_MASKED_IRQS,
2731 				 hpriv->main_mask_reg_addr);
2732 
2733 		VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2734 			"PCI int cause/mask=0x%08x/0x%08x\n",
2735 			readl(hpriv->main_cause_reg_addr),
2736 			readl(hpriv->main_mask_reg_addr),
2737 			readl(mmio + hpriv->irq_cause_ofs),
2738 			readl(mmio + hpriv->irq_mask_ofs));
2739 	} else {
2740 		writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2741 			 hpriv->main_mask_reg_addr);
2742 		VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2743 			readl(hpriv->main_cause_reg_addr),
2744 			readl(hpriv->main_mask_reg_addr));
2745 	}
2746 done:
2747 	return rc;
2748 }
2749 
2750 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2751 {
2752 	hpriv->crqb_pool   = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2753 							     MV_CRQB_Q_SZ, 0);
2754 	if (!hpriv->crqb_pool)
2755 		return -ENOMEM;
2756 
2757 	hpriv->crpb_pool   = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2758 							     MV_CRPB_Q_SZ, 0);
2759 	if (!hpriv->crpb_pool)
2760 		return -ENOMEM;
2761 
2762 	hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2763 							     MV_SG_TBL_SZ, 0);
2764 	if (!hpriv->sg_tbl_pool)
2765 		return -ENOMEM;
2766 
2767 	return 0;
2768 }
2769 
2770 static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
2771 				 struct mbus_dram_target_info *dram)
2772 {
2773 	int i;
2774 
2775 	for (i = 0; i < 4; i++) {
2776 		writel(0, hpriv->base + WINDOW_CTRL(i));
2777 		writel(0, hpriv->base + WINDOW_BASE(i));
2778 	}
2779 
2780 	for (i = 0; i < dram->num_cs; i++) {
2781 		struct mbus_dram_window *cs = dram->cs + i;
2782 
2783 		writel(((cs->size - 1) & 0xffff0000) |
2784 			(cs->mbus_attr << 8) |
2785 			(dram->mbus_dram_target_id << 4) | 1,
2786 			hpriv->base + WINDOW_CTRL(i));
2787 		writel(cs->base, hpriv->base + WINDOW_BASE(i));
2788 	}
2789 }
2790 
2791 /**
2792  *      mv_platform_probe - handle a positive probe of an soc Marvell
2793  *      host
2794  *      @pdev: platform device found
2795  *
2796  *      LOCKING:
2797  *      Inherited from caller.
2798  */
2799 static int mv_platform_probe(struct platform_device *pdev)
2800 {
2801 	static int printed_version;
2802 	const struct mv_sata_platform_data *mv_platform_data;
2803 	const struct ata_port_info *ppi[] =
2804 	    { &mv_port_info[chip_soc], NULL };
2805 	struct ata_host *host;
2806 	struct mv_host_priv *hpriv;
2807 	struct resource *res;
2808 	int n_ports, rc;
2809 
2810 	if (!printed_version++)
2811 		dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2812 
2813 	/*
2814 	 * Simple resource validation ..
2815 	 */
2816 	if (unlikely(pdev->num_resources != 2)) {
2817 		dev_err(&pdev->dev, "invalid number of resources\n");
2818 		return -EINVAL;
2819 	}
2820 
2821 	/*
2822 	 * Get the register base first
2823 	 */
2824 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2825 	if (res == NULL)
2826 		return -EINVAL;
2827 
2828 	/* allocate host */
2829 	mv_platform_data = pdev->dev.platform_data;
2830 	n_ports = mv_platform_data->n_ports;
2831 
2832 	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2833 	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2834 
2835 	if (!host || !hpriv)
2836 		return -ENOMEM;
2837 	host->private_data = hpriv;
2838 	hpriv->n_ports = n_ports;
2839 
2840 	host->iomap = NULL;
2841 	hpriv->base = devm_ioremap(&pdev->dev, res->start,
2842 				   res->end - res->start + 1);
2843 	hpriv->base -= MV_SATAHC0_REG_BASE;
2844 
2845 	/*
2846 	 * (Re-)program MBUS remapping windows if we are asked to.
2847 	 */
2848 	if (mv_platform_data->dram != NULL)
2849 		mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
2850 
2851 	rc = mv_create_dma_pools(hpriv, &pdev->dev);
2852 	if (rc)
2853 		return rc;
2854 
2855 	/* initialize adapter */
2856 	rc = mv_init_host(host, chip_soc);
2857 	if (rc)
2858 		return rc;
2859 
2860 	dev_printk(KERN_INFO, &pdev->dev,
2861 		   "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2862 		   host->n_ports);
2863 
2864 	return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2865 				 IRQF_SHARED, &mv6_sht);
2866 }
2867 
2868 /*
2869  *
2870  *      mv_platform_remove    -       unplug a platform interface
2871  *      @pdev: platform device
2872  *
2873  *      A platform bus SATA device has been unplugged. Perform the needed
2874  *      cleanup. Also called on module unload for any active devices.
2875  */
2876 static int __devexit mv_platform_remove(struct platform_device *pdev)
2877 {
2878 	struct device *dev = &pdev->dev;
2879 	struct ata_host *host = dev_get_drvdata(dev);
2880 
2881 	ata_host_detach(host);
2882 	return 0;
2883 }
2884 
2885 static struct platform_driver mv_platform_driver = {
2886 	.probe			= mv_platform_probe,
2887 	.remove			= __devexit_p(mv_platform_remove),
2888 	.driver			= {
2889 				   .name = DRV_NAME,
2890 				   .owner = THIS_MODULE,
2891 				  },
2892 };
2893 
2894 
2895 #ifdef CONFIG_PCI
2896 static int mv_pci_init_one(struct pci_dev *pdev,
2897 			   const struct pci_device_id *ent);
2898 
2899 
2900 static struct pci_driver mv_pci_driver = {
2901 	.name			= DRV_NAME,
2902 	.id_table		= mv_pci_tbl,
2903 	.probe			= mv_pci_init_one,
2904 	.remove			= ata_pci_remove_one,
2905 };
2906 
2907 /*
2908  * module options
2909  */
2910 static int msi;	      /* Use PCI msi; either zero (off, default) or non-zero */
2911 
2912 
2913 /* move to PCI layer or libata core? */
2914 static int pci_go_64(struct pci_dev *pdev)
2915 {
2916 	int rc;
2917 
2918 	if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2919 		rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2920 		if (rc) {
2921 			rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2922 			if (rc) {
2923 				dev_printk(KERN_ERR, &pdev->dev,
2924 					   "64-bit DMA enable failed\n");
2925 				return rc;
2926 			}
2927 		}
2928 	} else {
2929 		rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2930 		if (rc) {
2931 			dev_printk(KERN_ERR, &pdev->dev,
2932 				   "32-bit DMA enable failed\n");
2933 			return rc;
2934 		}
2935 		rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2936 		if (rc) {
2937 			dev_printk(KERN_ERR, &pdev->dev,
2938 				   "32-bit consistent DMA enable failed\n");
2939 			return rc;
2940 		}
2941 	}
2942 
2943 	return rc;
2944 }
2945 
2946 /**
2947  *      mv_print_info - Dump key info to kernel log for perusal.
2948  *      @host: ATA host to print info about
2949  *
2950  *      FIXME: complete this.
2951  *
2952  *      LOCKING:
2953  *      Inherited from caller.
2954  */
2955 static void mv_print_info(struct ata_host *host)
2956 {
2957 	struct pci_dev *pdev = to_pci_dev(host->dev);
2958 	struct mv_host_priv *hpriv = host->private_data;
2959 	u8 scc;
2960 	const char *scc_s, *gen;
2961 
2962 	/* Use this to determine the HW stepping of the chip so we know
2963 	 * what errata to workaround
2964 	 */
2965 	pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2966 	if (scc == 0)
2967 		scc_s = "SCSI";
2968 	else if (scc == 0x01)
2969 		scc_s = "RAID";
2970 	else
2971 		scc_s = "?";
2972 
2973 	if (IS_GEN_I(hpriv))
2974 		gen = "I";
2975 	else if (IS_GEN_II(hpriv))
2976 		gen = "II";
2977 	else if (IS_GEN_IIE(hpriv))
2978 		gen = "IIE";
2979 	else
2980 		gen = "?";
2981 
2982 	dev_printk(KERN_INFO, &pdev->dev,
2983 	       "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2984 	       gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2985 	       scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2986 }
2987 
2988 /**
2989  *      mv_pci_init_one - handle a positive probe of a PCI Marvell host
2990  *      @pdev: PCI device found
2991  *      @ent: PCI device ID entry for the matched host
2992  *
2993  *      LOCKING:
2994  *      Inherited from caller.
2995  */
2996 static int mv_pci_init_one(struct pci_dev *pdev,
2997 			   const struct pci_device_id *ent)
2998 {
2999 	static int printed_version;
3000 	unsigned int board_idx = (unsigned int)ent->driver_data;
3001 	const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3002 	struct ata_host *host;
3003 	struct mv_host_priv *hpriv;
3004 	int n_ports, rc;
3005 
3006 	if (!printed_version++)
3007 		dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
3008 
3009 	/* allocate host */
3010 	n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3011 
3012 	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3013 	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3014 	if (!host || !hpriv)
3015 		return -ENOMEM;
3016 	host->private_data = hpriv;
3017 	hpriv->n_ports = n_ports;
3018 
3019 	/* acquire resources */
3020 	rc = pcim_enable_device(pdev);
3021 	if (rc)
3022 		return rc;
3023 
3024 	rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3025 	if (rc == -EBUSY)
3026 		pcim_pin_device(pdev);
3027 	if (rc)
3028 		return rc;
3029 	host->iomap = pcim_iomap_table(pdev);
3030 	hpriv->base = host->iomap[MV_PRIMARY_BAR];
3031 
3032 	rc = pci_go_64(pdev);
3033 	if (rc)
3034 		return rc;
3035 
3036 	rc = mv_create_dma_pools(hpriv, &pdev->dev);
3037 	if (rc)
3038 		return rc;
3039 
3040 	/* initialize adapter */
3041 	rc = mv_init_host(host, board_idx);
3042 	if (rc)
3043 		return rc;
3044 
3045 	/* Enable interrupts */
3046 	if (msi && pci_enable_msi(pdev))
3047 		pci_intx(pdev, 1);
3048 
3049 	mv_dump_pci_cfg(pdev, 0x68);
3050 	mv_print_info(host);
3051 
3052 	pci_set_master(pdev);
3053 	pci_try_set_mwi(pdev);
3054 	return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
3055 				 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
3056 }
3057 #endif
3058 
3059 static int mv_platform_probe(struct platform_device *pdev);
3060 static int __devexit mv_platform_remove(struct platform_device *pdev);
3061 
3062 static int __init mv_init(void)
3063 {
3064 	int rc = -ENODEV;
3065 #ifdef CONFIG_PCI
3066 	rc = pci_register_driver(&mv_pci_driver);
3067 	if (rc < 0)
3068 		return rc;
3069 #endif
3070 	rc = platform_driver_register(&mv_platform_driver);
3071 
3072 #ifdef CONFIG_PCI
3073 	if (rc < 0)
3074 		pci_unregister_driver(&mv_pci_driver);
3075 #endif
3076 	return rc;
3077 }
3078 
3079 static void __exit mv_exit(void)
3080 {
3081 #ifdef CONFIG_PCI
3082 	pci_unregister_driver(&mv_pci_driver);
3083 #endif
3084 	platform_driver_unregister(&mv_platform_driver);
3085 }
3086 
3087 MODULE_AUTHOR("Brett Russ");
3088 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3089 MODULE_LICENSE("GPL");
3090 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3091 MODULE_VERSION(DRV_VERSION);
3092 MODULE_ALIAS("platform:" DRV_NAME);
3093 
3094 #ifdef CONFIG_PCI
3095 module_param(msi, int, 0444);
3096 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
3097 #endif
3098 
3099 module_init(mv_init);
3100 module_exit(mv_exit);
3101