xref: /openbmc/linux/drivers/ata/sata_mv.c (revision fcfb1f77cea81f74d865b4d33f2e452ffa1973e8)
1 /*
2  * sata_mv.c - Marvell SATA support
3  *
4  * Copyright 2008: Marvell Corporation, all rights reserved.
5  * Copyright 2005: EMC Corporation, all rights reserved.
6  * Copyright 2005 Red Hat, Inc.  All rights reserved.
7  *
8  * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; version 2 of the License.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
22  *
23  */
24 
25 /*
26   sata_mv TODO list:
27 
28   1) Needs a full errata audit for all chipsets.  I implemented most
29   of the errata workarounds found in the Marvell vendor driver, but
30   I distinctly remember a couple workarounds (one related to PCI-X)
31   are still needed.
32 
33   2) Improve/fix IRQ and error handling sequences.
34 
35   3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
36 
37   4) Think about TCQ support here, and for libata in general
38   with controllers that suppport it via host-queuing hardware
39   (a software-only implementation could be a nightmare).
40 
41   5) Investigate problems with PCI Message Signalled Interrupts (MSI).
42 
43   6) Cache frequently-accessed registers in mv_port_priv to reduce overhead.
44 
45   7) Fix/reenable hot plug/unplug (should happen as a side-effect of (2) above).
46 
47   8) Develop a low-power-consumption strategy, and implement it.
48 
49   9) [Experiment, low priority] See if ATAPI can be supported using
50   "unknown FIS" or "vendor-specific FIS" support, or something creative
51   like that.
52 
53   10) [Experiment, low priority] Investigate interrupt coalescing.
54   Quite often, especially with PCI Message Signalled Interrupts (MSI),
55   the overhead reduced by interrupt mitigation is quite often not
56   worth the latency cost.
57 
58   11) [Experiment, Marvell value added] Is it possible to use target
59   mode to cross-connect two Linux boxes with Marvell cards?  If so,
60   creating LibATA target mode support would be very interesting.
61 
62   Target mode, for those without docs, is the ability to directly
63   connect two SATA controllers.
64 
65 */
66 
67 #include <linux/kernel.h>
68 #include <linux/module.h>
69 #include <linux/pci.h>
70 #include <linux/init.h>
71 #include <linux/blkdev.h>
72 #include <linux/delay.h>
73 #include <linux/interrupt.h>
74 #include <linux/dmapool.h>
75 #include <linux/dma-mapping.h>
76 #include <linux/device.h>
77 #include <linux/platform_device.h>
78 #include <linux/ata_platform.h>
79 #include <linux/mbus.h>
80 #include <scsi/scsi_host.h>
81 #include <scsi/scsi_cmnd.h>
82 #include <scsi/scsi_device.h>
83 #include <linux/libata.h>
84 
85 #define DRV_NAME	"sata_mv"
86 #define DRV_VERSION	"1.20"
87 
88 enum {
89 	/* BAR's are enumerated in terms of pci_resource_start() terms */
90 	MV_PRIMARY_BAR		= 0,	/* offset 0x10: memory space */
91 	MV_IO_BAR		= 2,	/* offset 0x18: IO space */
92 	MV_MISC_BAR		= 3,	/* offset 0x1c: FLASH, NVRAM, SRAM */
93 
94 	MV_MAJOR_REG_AREA_SZ	= 0x10000,	/* 64KB */
95 	MV_MINOR_REG_AREA_SZ	= 0x2000,	/* 8KB */
96 
97 	MV_PCI_REG_BASE		= 0,
98 	MV_IRQ_COAL_REG_BASE	= 0x18000,	/* 6xxx part only */
99 	MV_IRQ_COAL_CAUSE		= (MV_IRQ_COAL_REG_BASE + 0x08),
100 	MV_IRQ_COAL_CAUSE_LO		= (MV_IRQ_COAL_REG_BASE + 0x88),
101 	MV_IRQ_COAL_CAUSE_HI		= (MV_IRQ_COAL_REG_BASE + 0x8c),
102 	MV_IRQ_COAL_THRESHOLD		= (MV_IRQ_COAL_REG_BASE + 0xcc),
103 	MV_IRQ_COAL_TIME_THRESHOLD	= (MV_IRQ_COAL_REG_BASE + 0xd0),
104 
105 	MV_SATAHC0_REG_BASE	= 0x20000,
106 	MV_FLASH_CTL		= 0x1046c,
107 	MV_GPIO_PORT_CTL	= 0x104f0,
108 	MV_RESET_CFG		= 0x180d8,
109 
110 	MV_PCI_REG_SZ		= MV_MAJOR_REG_AREA_SZ,
111 	MV_SATAHC_REG_SZ	= MV_MAJOR_REG_AREA_SZ,
112 	MV_SATAHC_ARBTR_REG_SZ	= MV_MINOR_REG_AREA_SZ,		/* arbiter */
113 	MV_PORT_REG_SZ		= MV_MINOR_REG_AREA_SZ,
114 
115 	MV_MAX_Q_DEPTH		= 32,
116 	MV_MAX_Q_DEPTH_MASK	= MV_MAX_Q_DEPTH - 1,
117 
118 	/* CRQB needs alignment on a 1KB boundary. Size == 1KB
119 	 * CRPB needs alignment on a 256B boundary. Size == 256B
120 	 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
121 	 */
122 	MV_CRQB_Q_SZ		= (32 * MV_MAX_Q_DEPTH),
123 	MV_CRPB_Q_SZ		= (8 * MV_MAX_Q_DEPTH),
124 	MV_MAX_SG_CT		= 256,
125 	MV_SG_TBL_SZ		= (16 * MV_MAX_SG_CT),
126 
127 	/* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
128 	MV_PORT_HC_SHIFT	= 2,
129 	MV_PORTS_PER_HC		= (1 << MV_PORT_HC_SHIFT), /* 4 */
130 	/* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
131 	MV_PORT_MASK		= (MV_PORTS_PER_HC - 1),   /* 3 */
132 
133 	/* Host Flags */
134 	MV_FLAG_DUAL_HC		= (1 << 30),  /* two SATA Host Controllers */
135 	MV_FLAG_IRQ_COALESCE	= (1 << 29),  /* IRQ coalescing capability */
136 	/* SoC integrated controllers, no PCI interface */
137 	MV_FLAG_SOC		= (1 << 28),
138 
139 	MV_COMMON_FLAGS		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
140 				  ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
141 				  ATA_FLAG_PIO_POLLING,
142 	MV_6XXX_FLAGS		= MV_FLAG_IRQ_COALESCE,
143 
144 	CRQB_FLAG_READ		= (1 << 0),
145 	CRQB_TAG_SHIFT		= 1,
146 	CRQB_IOID_SHIFT		= 6,	/* CRQB Gen-II/IIE IO Id shift */
147 	CRQB_PMP_SHIFT		= 12,	/* CRQB Gen-II/IIE PMP shift */
148 	CRQB_HOSTQ_SHIFT	= 17,	/* CRQB Gen-II/IIE HostQueTag shift */
149 	CRQB_CMD_ADDR_SHIFT	= 8,
150 	CRQB_CMD_CS		= (0x2 << 11),
151 	CRQB_CMD_LAST		= (1 << 15),
152 
153 	CRPB_FLAG_STATUS_SHIFT	= 8,
154 	CRPB_IOID_SHIFT_6	= 5,	/* CRPB Gen-II IO Id shift */
155 	CRPB_IOID_SHIFT_7	= 7,	/* CRPB Gen-IIE IO Id shift */
156 
157 	EPRD_FLAG_END_OF_TBL	= (1 << 31),
158 
159 	/* PCI interface registers */
160 
161 	PCI_COMMAND_OFS		= 0xc00,
162 
163 	PCI_MAIN_CMD_STS_OFS	= 0xd30,
164 	STOP_PCI_MASTER		= (1 << 2),
165 	PCI_MASTER_EMPTY	= (1 << 3),
166 	GLOB_SFT_RST		= (1 << 4),
167 
168 	MV_PCI_MODE		= 0xd00,
169 	MV_PCI_EXP_ROM_BAR_CTL	= 0xd2c,
170 	MV_PCI_DISC_TIMER	= 0xd04,
171 	MV_PCI_MSI_TRIGGER	= 0xc38,
172 	MV_PCI_SERR_MASK	= 0xc28,
173 	MV_PCI_XBAR_TMOUT	= 0x1d04,
174 	MV_PCI_ERR_LOW_ADDRESS	= 0x1d40,
175 	MV_PCI_ERR_HIGH_ADDRESS	= 0x1d44,
176 	MV_PCI_ERR_ATTRIBUTE	= 0x1d48,
177 	MV_PCI_ERR_COMMAND	= 0x1d50,
178 
179 	PCI_IRQ_CAUSE_OFS	= 0x1d58,
180 	PCI_IRQ_MASK_OFS	= 0x1d5c,
181 	PCI_UNMASK_ALL_IRQS	= 0x7fffff,	/* bits 22-0 */
182 
183 	PCIE_IRQ_CAUSE_OFS	= 0x1900,
184 	PCIE_IRQ_MASK_OFS	= 0x1910,
185 	PCIE_UNMASK_ALL_IRQS	= 0x40a,	/* assorted bits */
186 
187 	HC_MAIN_IRQ_CAUSE_OFS	= 0x1d60,
188 	HC_MAIN_IRQ_MASK_OFS	= 0x1d64,
189 	HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
190 	HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
191 	ERR_IRQ			= (1 << 0),	/* shift by port # */
192 	DONE_IRQ		= (1 << 1),	/* shift by port # */
193 	HC0_IRQ_PEND		= 0x1ff,	/* bits 0-8 = HC0's ports */
194 	HC_SHIFT		= 9,		/* bits 9-17 = HC1's ports */
195 	PCI_ERR			= (1 << 18),
196 	TRAN_LO_DONE		= (1 << 19),	/* 6xxx: IRQ coalescing */
197 	TRAN_HI_DONE		= (1 << 20),	/* 6xxx: IRQ coalescing */
198 	PORTS_0_3_COAL_DONE	= (1 << 8),
199 	PORTS_4_7_COAL_DONE	= (1 << 17),
200 	PORTS_0_7_COAL_DONE	= (1 << 21),	/* 6xxx: IRQ coalescing */
201 	GPIO_INT		= (1 << 22),
202 	SELF_INT		= (1 << 23),
203 	TWSI_INT		= (1 << 24),
204 	HC_MAIN_RSVD		= (0x7f << 25),	/* bits 31-25 */
205 	HC_MAIN_RSVD_5		= (0x1fff << 19), /* bits 31-19 */
206 	HC_MAIN_RSVD_SOC	= (0x3fffffb << 6),     /* bits 31-9, 7-6 */
207 	HC_MAIN_MASKED_IRQS	= (TRAN_LO_DONE | TRAN_HI_DONE |
208 				   PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
209 				   PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
210 				   HC_MAIN_RSVD),
211 	HC_MAIN_MASKED_IRQS_5	= (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
212 				   HC_MAIN_RSVD_5),
213 	HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
214 
215 	/* SATAHC registers */
216 	HC_CFG_OFS		= 0,
217 
218 	HC_IRQ_CAUSE_OFS	= 0x14,
219 	DMA_IRQ			= (1 << 0),	/* shift by port # */
220 	HC_COAL_IRQ		= (1 << 4),	/* IRQ coalescing */
221 	DEV_IRQ			= (1 << 8),	/* shift by port # */
222 
223 	/* Shadow block registers */
224 	SHD_BLK_OFS		= 0x100,
225 	SHD_CTL_AST_OFS		= 0x20,		/* ofs from SHD_BLK_OFS */
226 
227 	/* SATA registers */
228 	SATA_STATUS_OFS		= 0x300,  /* ctrl, err regs follow status */
229 	SATA_ACTIVE_OFS		= 0x350,
230 	SATA_FIS_IRQ_CAUSE_OFS	= 0x364,
231 
232 	LTMODE_OFS		= 0x30c,
233 	LTMODE_BIT8		= (1 << 8),	/* unknown, but necessary */
234 
235 	PHY_MODE3		= 0x310,
236 	PHY_MODE4		= 0x314,
237 	PHY_MODE2		= 0x330,
238 	SATA_IFCTL_OFS		= 0x344,
239 	SATA_IFSTAT_OFS		= 0x34c,
240 	VENDOR_UNIQUE_FIS_OFS	= 0x35c,
241 
242 	FIS_CFG_OFS		= 0x360,
243 	FIS_CFG_SINGLE_SYNC	= (1 << 16),	/* SYNC on DMA activation */
244 
245 	MV5_PHY_MODE		= 0x74,
246 	MV5_LT_MODE		= 0x30,
247 	MV5_PHY_CTL		= 0x0C,
248 	SATA_INTERFACE_CFG	= 0x050,
249 
250 	MV_M2_PREAMP_MASK	= 0x7e0,
251 
252 	/* Port registers */
253 	EDMA_CFG_OFS		= 0,
254 	EDMA_CFG_Q_DEPTH	= 0x1f,		/* max device queue depth */
255 	EDMA_CFG_NCQ		= (1 << 5),	/* for R/W FPDMA queued */
256 	EDMA_CFG_NCQ_GO_ON_ERR	= (1 << 14),	/* continue on error */
257 	EDMA_CFG_RD_BRST_EXT	= (1 << 11),	/* read burst 512B */
258 	EDMA_CFG_WR_BUFF_LEN	= (1 << 13),	/* write buffer 512B */
259 	EDMA_CFG_EDMA_FBS	= (1 << 16),	/* EDMA FIS-Based Switching */
260 	EDMA_CFG_FBS		= (1 << 26),	/* FIS-Based Switching */
261 
262 	EDMA_ERR_IRQ_CAUSE_OFS	= 0x8,
263 	EDMA_ERR_IRQ_MASK_OFS	= 0xc,
264 	EDMA_ERR_D_PAR		= (1 << 0),	/* UDMA data parity err */
265 	EDMA_ERR_PRD_PAR	= (1 << 1),	/* UDMA PRD parity err */
266 	EDMA_ERR_DEV		= (1 << 2),	/* device error */
267 	EDMA_ERR_DEV_DCON	= (1 << 3),	/* device disconnect */
268 	EDMA_ERR_DEV_CON	= (1 << 4),	/* device connected */
269 	EDMA_ERR_SERR		= (1 << 5),	/* SError bits [WBDST] raised */
270 	EDMA_ERR_SELF_DIS	= (1 << 7),	/* Gen II/IIE self-disable */
271 	EDMA_ERR_SELF_DIS_5	= (1 << 8),	/* Gen I self-disable */
272 	EDMA_ERR_BIST_ASYNC	= (1 << 8),	/* BIST FIS or Async Notify */
273 	EDMA_ERR_TRANS_IRQ_7	= (1 << 8),	/* Gen IIE transprt layer irq */
274 	EDMA_ERR_CRQB_PAR	= (1 << 9),	/* CRQB parity error */
275 	EDMA_ERR_CRPB_PAR	= (1 << 10),	/* CRPB parity error */
276 	EDMA_ERR_INTRL_PAR	= (1 << 11),	/* internal parity error */
277 	EDMA_ERR_IORDY		= (1 << 12),	/* IORdy timeout */
278 
279 	EDMA_ERR_LNK_CTRL_RX	= (0xf << 13),	/* link ctrl rx error */
280 	EDMA_ERR_LNK_CTRL_RX_0	= (1 << 13),	/* transient: CRC err */
281 	EDMA_ERR_LNK_CTRL_RX_1	= (1 << 14),	/* transient: FIFO err */
282 	EDMA_ERR_LNK_CTRL_RX_2	= (1 << 15),	/* fatal: caught SYNC */
283 	EDMA_ERR_LNK_CTRL_RX_3	= (1 << 16),	/* transient: FIS rx err */
284 
285 	EDMA_ERR_LNK_DATA_RX	= (0xf << 17),	/* link data rx error */
286 
287 	EDMA_ERR_LNK_CTRL_TX	= (0x1f << 21),	/* link ctrl tx error */
288 	EDMA_ERR_LNK_CTRL_TX_0	= (1 << 21),	/* transient: CRC err */
289 	EDMA_ERR_LNK_CTRL_TX_1	= (1 << 22),	/* transient: FIFO err */
290 	EDMA_ERR_LNK_CTRL_TX_2	= (1 << 23),	/* transient: caught SYNC */
291 	EDMA_ERR_LNK_CTRL_TX_3	= (1 << 24),	/* transient: caught DMAT */
292 	EDMA_ERR_LNK_CTRL_TX_4	= (1 << 25),	/* transient: FIS collision */
293 
294 	EDMA_ERR_LNK_DATA_TX	= (0x1f << 26),	/* link data tx error */
295 
296 	EDMA_ERR_TRANS_PROTO	= (1 << 31),	/* transport protocol error */
297 	EDMA_ERR_OVERRUN_5	= (1 << 5),
298 	EDMA_ERR_UNDERRUN_5	= (1 << 6),
299 
300 	EDMA_ERR_IRQ_TRANSIENT  = EDMA_ERR_LNK_CTRL_RX_0 |
301 				  EDMA_ERR_LNK_CTRL_RX_1 |
302 				  EDMA_ERR_LNK_CTRL_RX_3 |
303 				  EDMA_ERR_LNK_CTRL_TX |
304 				 /* temporary, until we fix hotplug: */
305 				 (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON),
306 
307 	EDMA_EH_FREEZE		= EDMA_ERR_D_PAR |
308 				  EDMA_ERR_PRD_PAR |
309 				  EDMA_ERR_DEV_DCON |
310 				  EDMA_ERR_DEV_CON |
311 				  EDMA_ERR_SERR |
312 				  EDMA_ERR_SELF_DIS |
313 				  EDMA_ERR_CRQB_PAR |
314 				  EDMA_ERR_CRPB_PAR |
315 				  EDMA_ERR_INTRL_PAR |
316 				  EDMA_ERR_IORDY |
317 				  EDMA_ERR_LNK_CTRL_RX_2 |
318 				  EDMA_ERR_LNK_DATA_RX |
319 				  EDMA_ERR_LNK_DATA_TX |
320 				  EDMA_ERR_TRANS_PROTO,
321 
322 	EDMA_EH_FREEZE_5	= EDMA_ERR_D_PAR |
323 				  EDMA_ERR_PRD_PAR |
324 				  EDMA_ERR_DEV_DCON |
325 				  EDMA_ERR_DEV_CON |
326 				  EDMA_ERR_OVERRUN_5 |
327 				  EDMA_ERR_UNDERRUN_5 |
328 				  EDMA_ERR_SELF_DIS_5 |
329 				  EDMA_ERR_CRQB_PAR |
330 				  EDMA_ERR_CRPB_PAR |
331 				  EDMA_ERR_INTRL_PAR |
332 				  EDMA_ERR_IORDY,
333 
334 	EDMA_REQ_Q_BASE_HI_OFS	= 0x10,
335 	EDMA_REQ_Q_IN_PTR_OFS	= 0x14,		/* also contains BASE_LO */
336 
337 	EDMA_REQ_Q_OUT_PTR_OFS	= 0x18,
338 	EDMA_REQ_Q_PTR_SHIFT	= 5,
339 
340 	EDMA_RSP_Q_BASE_HI_OFS	= 0x1c,
341 	EDMA_RSP_Q_IN_PTR_OFS	= 0x20,
342 	EDMA_RSP_Q_OUT_PTR_OFS	= 0x24,		/* also contains BASE_LO */
343 	EDMA_RSP_Q_PTR_SHIFT	= 3,
344 
345 	EDMA_CMD_OFS		= 0x28,		/* EDMA command register */
346 	EDMA_EN			= (1 << 0),	/* enable EDMA */
347 	EDMA_DS			= (1 << 1),	/* disable EDMA; self-negated */
348 	ATA_RST			= (1 << 2),	/* reset trans/link/phy */
349 
350 	EDMA_IORDY_TMOUT	= 0x34,
351 	EDMA_ARB_CFG		= 0x38,
352 
353 	GEN_II_NCQ_MAX_SECTORS	= 256,		/* max sects/io on Gen2 w/NCQ */
354 
355 	/* Host private flags (hp_flags) */
356 	MV_HP_FLAG_MSI		= (1 << 0),
357 	MV_HP_ERRATA_50XXB0	= (1 << 1),
358 	MV_HP_ERRATA_50XXB2	= (1 << 2),
359 	MV_HP_ERRATA_60X1B2	= (1 << 3),
360 	MV_HP_ERRATA_60X1C0	= (1 << 4),
361 	MV_HP_ERRATA_XX42A0	= (1 << 5),
362 	MV_HP_GEN_I		= (1 << 6),	/* Generation I: 50xx */
363 	MV_HP_GEN_II		= (1 << 7),	/* Generation II: 60xx */
364 	MV_HP_GEN_IIE		= (1 << 8),	/* Generation IIE: 6042/7042 */
365 	MV_HP_PCIE		= (1 << 9),	/* PCIe bus/regs: 7042 */
366 
367 	/* Port private flags (pp_flags) */
368 	MV_PP_FLAG_EDMA_EN	= (1 << 0),	/* is EDMA engine enabled? */
369 	MV_PP_FLAG_NCQ_EN	= (1 << 1),	/* is EDMA set up for NCQ? */
370 };
371 
372 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
373 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
374 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
375 #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
376 
377 #define WINDOW_CTRL(i)		(0x20030 + ((i) << 4))
378 #define WINDOW_BASE(i)		(0x20034 + ((i) << 4))
379 
380 enum {
381 	/* DMA boundary 0xffff is required by the s/g splitting
382 	 * we need on /length/ in mv_fill-sg().
383 	 */
384 	MV_DMA_BOUNDARY		= 0xffffU,
385 
386 	/* mask of register bits containing lower 32 bits
387 	 * of EDMA request queue DMA address
388 	 */
389 	EDMA_REQ_Q_BASE_LO_MASK	= 0xfffffc00U,
390 
391 	/* ditto, for response queue */
392 	EDMA_RSP_Q_BASE_LO_MASK	= 0xffffff00U,
393 };
394 
395 enum chip_type {
396 	chip_504x,
397 	chip_508x,
398 	chip_5080,
399 	chip_604x,
400 	chip_608x,
401 	chip_6042,
402 	chip_7042,
403 	chip_soc,
404 };
405 
406 /* Command ReQuest Block: 32B */
407 struct mv_crqb {
408 	__le32			sg_addr;
409 	__le32			sg_addr_hi;
410 	__le16			ctrl_flags;
411 	__le16			ata_cmd[11];
412 };
413 
414 struct mv_crqb_iie {
415 	__le32			addr;
416 	__le32			addr_hi;
417 	__le32			flags;
418 	__le32			len;
419 	__le32			ata_cmd[4];
420 };
421 
422 /* Command ResPonse Block: 8B */
423 struct mv_crpb {
424 	__le16			id;
425 	__le16			flags;
426 	__le32			tmstmp;
427 };
428 
429 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
430 struct mv_sg {
431 	__le32			addr;
432 	__le32			flags_size;
433 	__le32			addr_hi;
434 	__le32			reserved;
435 };
436 
437 struct mv_port_priv {
438 	struct mv_crqb		*crqb;
439 	dma_addr_t		crqb_dma;
440 	struct mv_crpb		*crpb;
441 	dma_addr_t		crpb_dma;
442 	struct mv_sg		*sg_tbl[MV_MAX_Q_DEPTH];
443 	dma_addr_t		sg_tbl_dma[MV_MAX_Q_DEPTH];
444 
445 	unsigned int		req_idx;
446 	unsigned int		resp_idx;
447 
448 	u32			pp_flags;
449 };
450 
451 struct mv_port_signal {
452 	u32			amps;
453 	u32			pre;
454 };
455 
456 struct mv_host_priv {
457 	u32			hp_flags;
458 	struct mv_port_signal	signal[8];
459 	const struct mv_hw_ops	*ops;
460 	int			n_ports;
461 	void __iomem		*base;
462 	void __iomem		*main_cause_reg_addr;
463 	void __iomem		*main_mask_reg_addr;
464 	u32			irq_cause_ofs;
465 	u32			irq_mask_ofs;
466 	u32			unmask_all_irqs;
467 	/*
468 	 * These consistent DMA memory pools give us guaranteed
469 	 * alignment for hardware-accessed data structures,
470 	 * and less memory waste in accomplishing the alignment.
471 	 */
472 	struct dma_pool		*crqb_pool;
473 	struct dma_pool		*crpb_pool;
474 	struct dma_pool		*sg_tbl_pool;
475 };
476 
477 struct mv_hw_ops {
478 	void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
479 			   unsigned int port);
480 	void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
481 	void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
482 			   void __iomem *mmio);
483 	int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
484 			unsigned int n_hc);
485 	void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
486 	void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
487 };
488 
489 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
490 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
491 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
492 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
493 static int mv_port_start(struct ata_port *ap);
494 static void mv_port_stop(struct ata_port *ap);
495 static void mv_qc_prep(struct ata_queued_cmd *qc);
496 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
497 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
498 static int mv_hardreset(struct ata_link *link, unsigned int *class,
499 			unsigned long deadline);
500 static void mv_eh_freeze(struct ata_port *ap);
501 static void mv_eh_thaw(struct ata_port *ap);
502 static void mv6_dev_config(struct ata_device *dev);
503 
504 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
505 			   unsigned int port);
506 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
507 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
508 			   void __iomem *mmio);
509 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
510 			unsigned int n_hc);
511 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
512 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
513 
514 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
515 			   unsigned int port);
516 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
517 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
518 			   void __iomem *mmio);
519 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
520 			unsigned int n_hc);
521 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
522 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
523 				      void __iomem *mmio);
524 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
525 				      void __iomem *mmio);
526 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
527 				  void __iomem *mmio, unsigned int n_hc);
528 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
529 				      void __iomem *mmio);
530 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
531 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
532 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
533 			     unsigned int port_no);
534 static int mv_stop_edma(struct ata_port *ap);
535 static int mv_stop_edma_engine(void __iomem *port_mmio);
536 static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
537 
538 static void mv_pmp_select(struct ata_port *ap, int pmp);
539 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
540 				unsigned long deadline);
541 static int  mv_softreset(struct ata_link *link, unsigned int *class,
542 				unsigned long deadline);
543 
544 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
545  * because we have to allow room for worst case splitting of
546  * PRDs for 64K boundaries in mv_fill_sg().
547  */
548 static struct scsi_host_template mv5_sht = {
549 	ATA_BASE_SHT(DRV_NAME),
550 	.sg_tablesize		= MV_MAX_SG_CT / 2,
551 	.dma_boundary		= MV_DMA_BOUNDARY,
552 };
553 
554 static struct scsi_host_template mv6_sht = {
555 	ATA_NCQ_SHT(DRV_NAME),
556 	.can_queue		= MV_MAX_Q_DEPTH - 1,
557 	.sg_tablesize		= MV_MAX_SG_CT / 2,
558 	.dma_boundary		= MV_DMA_BOUNDARY,
559 };
560 
561 static struct ata_port_operations mv5_ops = {
562 	.inherits		= &ata_sff_port_ops,
563 
564 	.qc_prep		= mv_qc_prep,
565 	.qc_issue		= mv_qc_issue,
566 
567 	.freeze			= mv_eh_freeze,
568 	.thaw			= mv_eh_thaw,
569 	.hardreset		= mv_hardreset,
570 	.error_handler		= ata_std_error_handler, /* avoid SFF EH */
571 	.post_internal_cmd	= ATA_OP_NULL,
572 
573 	.scr_read		= mv5_scr_read,
574 	.scr_write		= mv5_scr_write,
575 
576 	.port_start		= mv_port_start,
577 	.port_stop		= mv_port_stop,
578 };
579 
580 static struct ata_port_operations mv6_ops = {
581 	.inherits		= &mv5_ops,
582 	.qc_defer		= sata_pmp_qc_defer_cmd_switch,
583 	.dev_config             = mv6_dev_config,
584 	.scr_read		= mv_scr_read,
585 	.scr_write		= mv_scr_write,
586 
587 	.pmp_hardreset		= mv_pmp_hardreset,
588 	.pmp_softreset		= mv_softreset,
589 	.softreset		= mv_softreset,
590 	.error_handler		= sata_pmp_error_handler,
591 };
592 
593 static struct ata_port_operations mv_iie_ops = {
594 	.inherits		= &mv6_ops,
595 	.qc_defer		= ata_std_qc_defer, /* FIS-based switching */
596 	.dev_config		= ATA_OP_NULL,
597 	.qc_prep		= mv_qc_prep_iie,
598 };
599 
600 static const struct ata_port_info mv_port_info[] = {
601 	{  /* chip_504x */
602 		.flags		= MV_COMMON_FLAGS,
603 		.pio_mask	= 0x1f,	/* pio0-4 */
604 		.udma_mask	= ATA_UDMA6,
605 		.port_ops	= &mv5_ops,
606 	},
607 	{  /* chip_508x */
608 		.flags		= MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
609 		.pio_mask	= 0x1f,	/* pio0-4 */
610 		.udma_mask	= ATA_UDMA6,
611 		.port_ops	= &mv5_ops,
612 	},
613 	{  /* chip_5080 */
614 		.flags		= MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
615 		.pio_mask	= 0x1f,	/* pio0-4 */
616 		.udma_mask	= ATA_UDMA6,
617 		.port_ops	= &mv5_ops,
618 	},
619 	{  /* chip_604x */
620 		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS |
621 				  ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
622 				  ATA_FLAG_NCQ,
623 		.pio_mask	= 0x1f,	/* pio0-4 */
624 		.udma_mask	= ATA_UDMA6,
625 		.port_ops	= &mv6_ops,
626 	},
627 	{  /* chip_608x */
628 		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS |
629 				  ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
630 				  ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
631 		.pio_mask	= 0x1f,	/* pio0-4 */
632 		.udma_mask	= ATA_UDMA6,
633 		.port_ops	= &mv6_ops,
634 	},
635 	{  /* chip_6042 */
636 		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS |
637 				  ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
638 				  ATA_FLAG_NCQ,
639 		.pio_mask	= 0x1f,	/* pio0-4 */
640 		.udma_mask	= ATA_UDMA6,
641 		.port_ops	= &mv_iie_ops,
642 	},
643 	{  /* chip_7042 */
644 		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS |
645 				  ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
646 				  ATA_FLAG_NCQ,
647 		.pio_mask	= 0x1f,	/* pio0-4 */
648 		.udma_mask	= ATA_UDMA6,
649 		.port_ops	= &mv_iie_ops,
650 	},
651 	{  /* chip_soc */
652 		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS |
653 				  ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
654 				  ATA_FLAG_NCQ | MV_FLAG_SOC,
655 		.pio_mask	= 0x1f,	/* pio0-4 */
656 		.udma_mask	= ATA_UDMA6,
657 		.port_ops	= &mv_iie_ops,
658 	},
659 };
660 
661 static const struct pci_device_id mv_pci_tbl[] = {
662 	{ PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
663 	{ PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
664 	{ PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
665 	{ PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
666 	/* RocketRAID 1740/174x have different identifiers */
667 	{ PCI_VDEVICE(TTI, 0x1740), chip_508x },
668 	{ PCI_VDEVICE(TTI, 0x1742), chip_508x },
669 
670 	{ PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
671 	{ PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
672 	{ PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
673 	{ PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
674 	{ PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
675 
676 	{ PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
677 
678 	/* Adaptec 1430SA */
679 	{ PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
680 
681 	/* Marvell 7042 support */
682 	{ PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
683 
684 	/* Highpoint RocketRAID PCIe series */
685 	{ PCI_VDEVICE(TTI, 0x2300), chip_7042 },
686 	{ PCI_VDEVICE(TTI, 0x2310), chip_7042 },
687 
688 	{ }			/* terminate list */
689 };
690 
691 static const struct mv_hw_ops mv5xxx_ops = {
692 	.phy_errata		= mv5_phy_errata,
693 	.enable_leds		= mv5_enable_leds,
694 	.read_preamp		= mv5_read_preamp,
695 	.reset_hc		= mv5_reset_hc,
696 	.reset_flash		= mv5_reset_flash,
697 	.reset_bus		= mv5_reset_bus,
698 };
699 
700 static const struct mv_hw_ops mv6xxx_ops = {
701 	.phy_errata		= mv6_phy_errata,
702 	.enable_leds		= mv6_enable_leds,
703 	.read_preamp		= mv6_read_preamp,
704 	.reset_hc		= mv6_reset_hc,
705 	.reset_flash		= mv6_reset_flash,
706 	.reset_bus		= mv_reset_pci_bus,
707 };
708 
709 static const struct mv_hw_ops mv_soc_ops = {
710 	.phy_errata		= mv6_phy_errata,
711 	.enable_leds		= mv_soc_enable_leds,
712 	.read_preamp		= mv_soc_read_preamp,
713 	.reset_hc		= mv_soc_reset_hc,
714 	.reset_flash		= mv_soc_reset_flash,
715 	.reset_bus		= mv_soc_reset_bus,
716 };
717 
718 /*
719  * Functions
720  */
721 
722 static inline void writelfl(unsigned long data, void __iomem *addr)
723 {
724 	writel(data, addr);
725 	(void) readl(addr);	/* flush to avoid PCI posted write */
726 }
727 
728 static inline unsigned int mv_hc_from_port(unsigned int port)
729 {
730 	return port >> MV_PORT_HC_SHIFT;
731 }
732 
733 static inline unsigned int mv_hardport_from_port(unsigned int port)
734 {
735 	return port & MV_PORT_MASK;
736 }
737 
738 /*
739  * Consolidate some rather tricky bit shift calculations.
740  * This is hot-path stuff, so not a function.
741  * Simple code, with two return values, so macro rather than inline.
742  *
743  * port is the sole input, in range 0..7.
744  * shift is one output, for use with the main_cause and main_mask registers.
745  * hardport is the other output, in range 0..3
746  *
747  * Note that port and hardport may be the same variable in some cases.
748  */
749 #define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport)	\
750 {								\
751 	shift    = mv_hc_from_port(port) * HC_SHIFT;		\
752 	hardport = mv_hardport_from_port(port);			\
753 	shift   += hardport * 2;				\
754 }
755 
756 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
757 {
758 	return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
759 }
760 
761 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
762 						 unsigned int port)
763 {
764 	return mv_hc_base(base, mv_hc_from_port(port));
765 }
766 
767 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
768 {
769 	return  mv_hc_base_from_port(base, port) +
770 		MV_SATAHC_ARBTR_REG_SZ +
771 		(mv_hardport_from_port(port) * MV_PORT_REG_SZ);
772 }
773 
774 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
775 {
776 	void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
777 	unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
778 
779 	return hc_mmio + ofs;
780 }
781 
782 static inline void __iomem *mv_host_base(struct ata_host *host)
783 {
784 	struct mv_host_priv *hpriv = host->private_data;
785 	return hpriv->base;
786 }
787 
788 static inline void __iomem *mv_ap_base(struct ata_port *ap)
789 {
790 	return mv_port_base(mv_host_base(ap->host), ap->port_no);
791 }
792 
793 static inline int mv_get_hc_count(unsigned long port_flags)
794 {
795 	return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
796 }
797 
798 static void mv_set_edma_ptrs(void __iomem *port_mmio,
799 			     struct mv_host_priv *hpriv,
800 			     struct mv_port_priv *pp)
801 {
802 	u32 index;
803 
804 	/*
805 	 * initialize request queue
806 	 */
807 	pp->req_idx &= MV_MAX_Q_DEPTH_MASK;	/* paranoia */
808 	index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
809 
810 	WARN_ON(pp->crqb_dma & 0x3ff);
811 	writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
812 	writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
813 		 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
814 
815 	if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
816 		writelfl((pp->crqb_dma & 0xffffffff) | index,
817 			 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
818 	else
819 		writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
820 
821 	/*
822 	 * initialize response queue
823 	 */
824 	pp->resp_idx &= MV_MAX_Q_DEPTH_MASK;	/* paranoia */
825 	index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
826 
827 	WARN_ON(pp->crpb_dma & 0xff);
828 	writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
829 
830 	if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
831 		writelfl((pp->crpb_dma & 0xffffffff) | index,
832 			 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
833 	else
834 		writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
835 
836 	writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
837 		 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
838 }
839 
840 /**
841  *      mv_start_dma - Enable eDMA engine
842  *      @base: port base address
843  *      @pp: port private data
844  *
845  *      Verify the local cache of the eDMA state is accurate with a
846  *      WARN_ON.
847  *
848  *      LOCKING:
849  *      Inherited from caller.
850  */
851 static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
852 			 struct mv_port_priv *pp, u8 protocol)
853 {
854 	int want_ncq = (protocol == ATA_PROT_NCQ);
855 
856 	if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
857 		int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
858 		if (want_ncq != using_ncq)
859 			mv_stop_edma(ap);
860 	}
861 	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
862 		struct mv_host_priv *hpriv = ap->host->private_data;
863 		int hardport = mv_hardport_from_port(ap->port_no);
864 		void __iomem *hc_mmio = mv_hc_base_from_port(
865 					mv_host_base(ap->host), hardport);
866 		u32 hc_irq_cause, ipending;
867 
868 		/* clear EDMA event indicators, if any */
869 		writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
870 
871 		/* clear EDMA interrupt indicator, if any */
872 		hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
873 		ipending = (DEV_IRQ | DMA_IRQ) << hardport;
874 		if (hc_irq_cause & ipending) {
875 			writelfl(hc_irq_cause & ~ipending,
876 				 hc_mmio + HC_IRQ_CAUSE_OFS);
877 		}
878 
879 		mv_edma_cfg(ap, want_ncq);
880 
881 		/* clear FIS IRQ Cause */
882 		writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
883 
884 		mv_set_edma_ptrs(port_mmio, hpriv, pp);
885 
886 		writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
887 		pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
888 	}
889 }
890 
891 /**
892  *      mv_stop_edma_engine - Disable eDMA engine
893  *      @port_mmio: io base address
894  *
895  *      LOCKING:
896  *      Inherited from caller.
897  */
898 static int mv_stop_edma_engine(void __iomem *port_mmio)
899 {
900 	int i;
901 
902 	/* Disable eDMA.  The disable bit auto clears. */
903 	writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
904 
905 	/* Wait for the chip to confirm eDMA is off. */
906 	for (i = 10000; i > 0; i--) {
907 		u32 reg = readl(port_mmio + EDMA_CMD_OFS);
908 		if (!(reg & EDMA_EN))
909 			return 0;
910 		udelay(10);
911 	}
912 	return -EIO;
913 }
914 
915 static int mv_stop_edma(struct ata_port *ap)
916 {
917 	void __iomem *port_mmio = mv_ap_base(ap);
918 	struct mv_port_priv *pp = ap->private_data;
919 
920 	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
921 		return 0;
922 	pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
923 	if (mv_stop_edma_engine(port_mmio)) {
924 		ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
925 		return -EIO;
926 	}
927 	return 0;
928 }
929 
930 #ifdef ATA_DEBUG
931 static void mv_dump_mem(void __iomem *start, unsigned bytes)
932 {
933 	int b, w;
934 	for (b = 0; b < bytes; ) {
935 		DPRINTK("%p: ", start + b);
936 		for (w = 0; b < bytes && w < 4; w++) {
937 			printk("%08x ", readl(start + b));
938 			b += sizeof(u32);
939 		}
940 		printk("\n");
941 	}
942 }
943 #endif
944 
945 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
946 {
947 #ifdef ATA_DEBUG
948 	int b, w;
949 	u32 dw;
950 	for (b = 0; b < bytes; ) {
951 		DPRINTK("%02x: ", b);
952 		for (w = 0; b < bytes && w < 4; w++) {
953 			(void) pci_read_config_dword(pdev, b, &dw);
954 			printk("%08x ", dw);
955 			b += sizeof(u32);
956 		}
957 		printk("\n");
958 	}
959 #endif
960 }
961 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
962 			     struct pci_dev *pdev)
963 {
964 #ifdef ATA_DEBUG
965 	void __iomem *hc_base = mv_hc_base(mmio_base,
966 					   port >> MV_PORT_HC_SHIFT);
967 	void __iomem *port_base;
968 	int start_port, num_ports, p, start_hc, num_hcs, hc;
969 
970 	if (0 > port) {
971 		start_hc = start_port = 0;
972 		num_ports = 8;		/* shld be benign for 4 port devs */
973 		num_hcs = 2;
974 	} else {
975 		start_hc = port >> MV_PORT_HC_SHIFT;
976 		start_port = port;
977 		num_ports = num_hcs = 1;
978 	}
979 	DPRINTK("All registers for port(s) %u-%u:\n", start_port,
980 		num_ports > 1 ? num_ports - 1 : start_port);
981 
982 	if (NULL != pdev) {
983 		DPRINTK("PCI config space regs:\n");
984 		mv_dump_pci_cfg(pdev, 0x68);
985 	}
986 	DPRINTK("PCI regs:\n");
987 	mv_dump_mem(mmio_base+0xc00, 0x3c);
988 	mv_dump_mem(mmio_base+0xd00, 0x34);
989 	mv_dump_mem(mmio_base+0xf00, 0x4);
990 	mv_dump_mem(mmio_base+0x1d00, 0x6c);
991 	for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
992 		hc_base = mv_hc_base(mmio_base, hc);
993 		DPRINTK("HC regs (HC %i):\n", hc);
994 		mv_dump_mem(hc_base, 0x1c);
995 	}
996 	for (p = start_port; p < start_port + num_ports; p++) {
997 		port_base = mv_port_base(mmio_base, p);
998 		DPRINTK("EDMA regs (port %i):\n", p);
999 		mv_dump_mem(port_base, 0x54);
1000 		DPRINTK("SATA regs (port %i):\n", p);
1001 		mv_dump_mem(port_base+0x300, 0x60);
1002 	}
1003 #endif
1004 }
1005 
1006 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1007 {
1008 	unsigned int ofs;
1009 
1010 	switch (sc_reg_in) {
1011 	case SCR_STATUS:
1012 	case SCR_CONTROL:
1013 	case SCR_ERROR:
1014 		ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1015 		break;
1016 	case SCR_ACTIVE:
1017 		ofs = SATA_ACTIVE_OFS;   /* active is not with the others */
1018 		break;
1019 	default:
1020 		ofs = 0xffffffffU;
1021 		break;
1022 	}
1023 	return ofs;
1024 }
1025 
1026 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1027 {
1028 	unsigned int ofs = mv_scr_offset(sc_reg_in);
1029 
1030 	if (ofs != 0xffffffffU) {
1031 		*val = readl(mv_ap_base(ap) + ofs);
1032 		return 0;
1033 	} else
1034 		return -EINVAL;
1035 }
1036 
1037 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1038 {
1039 	unsigned int ofs = mv_scr_offset(sc_reg_in);
1040 
1041 	if (ofs != 0xffffffffU) {
1042 		writelfl(val, mv_ap_base(ap) + ofs);
1043 		return 0;
1044 	} else
1045 		return -EINVAL;
1046 }
1047 
1048 static void mv6_dev_config(struct ata_device *adev)
1049 {
1050 	/*
1051 	 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1052 	 *
1053 	 * Gen-II does not support NCQ over a port multiplier
1054 	 *  (no FIS-based switching).
1055 	 *
1056 	 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1057 	 * See mv_qc_prep() for more info.
1058 	 */
1059 	if (adev->flags & ATA_DFLAG_NCQ) {
1060 		if (sata_pmp_attached(adev->link->ap)) {
1061 			adev->flags &= ~ATA_DFLAG_NCQ;
1062 			ata_dev_printk(adev, KERN_INFO,
1063 				"NCQ disabled for command-based switching\n");
1064 		} else if (adev->max_sectors > GEN_II_NCQ_MAX_SECTORS) {
1065 			adev->max_sectors = GEN_II_NCQ_MAX_SECTORS;
1066 			ata_dev_printk(adev, KERN_INFO,
1067 				"max_sectors limited to %u for NCQ\n",
1068 				adev->max_sectors);
1069 		}
1070 	}
1071 }
1072 
1073 static void mv_config_fbs(void __iomem *port_mmio, int enable_fbs)
1074 {
1075 	u32 old_fcfg, new_fcfg, old_ltmode, new_ltmode;
1076 	/*
1077 	 * Various bit settings required for operation
1078 	 * in FIS-based switching (fbs) mode on GenIIe:
1079 	 */
1080 	old_fcfg   = readl(port_mmio + FIS_CFG_OFS);
1081 	old_ltmode = readl(port_mmio + LTMODE_OFS);
1082 	if (enable_fbs) {
1083 		new_fcfg   = old_fcfg   |  FIS_CFG_SINGLE_SYNC;
1084 		new_ltmode = old_ltmode |  LTMODE_BIT8;
1085 	} else { /* disable fbs */
1086 		new_fcfg   = old_fcfg   & ~FIS_CFG_SINGLE_SYNC;
1087 		new_ltmode = old_ltmode & ~LTMODE_BIT8;
1088 	}
1089 	if (new_fcfg != old_fcfg)
1090 		writelfl(new_fcfg, port_mmio + FIS_CFG_OFS);
1091 	if (new_ltmode != old_ltmode)
1092 		writelfl(new_ltmode, port_mmio + LTMODE_OFS);
1093 }
1094 
1095 static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
1096 {
1097 	u32 cfg;
1098 	struct mv_port_priv *pp    = ap->private_data;
1099 	struct mv_host_priv *hpriv = ap->host->private_data;
1100 	void __iomem *port_mmio    = mv_ap_base(ap);
1101 
1102 	/* set up non-NCQ EDMA configuration */
1103 	cfg = EDMA_CFG_Q_DEPTH;		/* always 0x1f for *all* chips */
1104 
1105 	if (IS_GEN_I(hpriv))
1106 		cfg |= (1 << 8);	/* enab config burst size mask */
1107 
1108 	else if (IS_GEN_II(hpriv))
1109 		cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1110 
1111 	else if (IS_GEN_IIE(hpriv)) {
1112 		cfg |= (1 << 23);	/* do not mask PM field in rx'd FIS */
1113 		cfg |= (1 << 22);	/* enab 4-entry host queue cache */
1114 		cfg |= (1 << 18);	/* enab early completion */
1115 		cfg |= (1 << 17);	/* enab cut-through (dis stor&forwrd) */
1116 
1117 		if (want_ncq && sata_pmp_attached(ap)) {
1118 			cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1119 			mv_config_fbs(port_mmio, 1);
1120 		} else {
1121 			mv_config_fbs(port_mmio, 0);
1122 		}
1123 	}
1124 
1125 	if (want_ncq) {
1126 		cfg |= EDMA_CFG_NCQ;
1127 		pp->pp_flags |=  MV_PP_FLAG_NCQ_EN;
1128 	} else
1129 		pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1130 
1131 	writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1132 }
1133 
1134 static void mv_port_free_dma_mem(struct ata_port *ap)
1135 {
1136 	struct mv_host_priv *hpriv = ap->host->private_data;
1137 	struct mv_port_priv *pp = ap->private_data;
1138 	int tag;
1139 
1140 	if (pp->crqb) {
1141 		dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1142 		pp->crqb = NULL;
1143 	}
1144 	if (pp->crpb) {
1145 		dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1146 		pp->crpb = NULL;
1147 	}
1148 	/*
1149 	 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1150 	 * For later hardware, we have one unique sg_tbl per NCQ tag.
1151 	 */
1152 	for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1153 		if (pp->sg_tbl[tag]) {
1154 			if (tag == 0 || !IS_GEN_I(hpriv))
1155 				dma_pool_free(hpriv->sg_tbl_pool,
1156 					      pp->sg_tbl[tag],
1157 					      pp->sg_tbl_dma[tag]);
1158 			pp->sg_tbl[tag] = NULL;
1159 		}
1160 	}
1161 }
1162 
1163 /**
1164  *      mv_port_start - Port specific init/start routine.
1165  *      @ap: ATA channel to manipulate
1166  *
1167  *      Allocate and point to DMA memory, init port private memory,
1168  *      zero indices.
1169  *
1170  *      LOCKING:
1171  *      Inherited from caller.
1172  */
1173 static int mv_port_start(struct ata_port *ap)
1174 {
1175 	struct device *dev = ap->host->dev;
1176 	struct mv_host_priv *hpriv = ap->host->private_data;
1177 	struct mv_port_priv *pp;
1178 	int tag;
1179 
1180 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1181 	if (!pp)
1182 		return -ENOMEM;
1183 	ap->private_data = pp;
1184 
1185 	pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1186 	if (!pp->crqb)
1187 		return -ENOMEM;
1188 	memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1189 
1190 	pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1191 	if (!pp->crpb)
1192 		goto out_port_free_dma_mem;
1193 	memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1194 
1195 	/*
1196 	 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1197 	 * For later hardware, we need one unique sg_tbl per NCQ tag.
1198 	 */
1199 	for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1200 		if (tag == 0 || !IS_GEN_I(hpriv)) {
1201 			pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1202 					      GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1203 			if (!pp->sg_tbl[tag])
1204 				goto out_port_free_dma_mem;
1205 		} else {
1206 			pp->sg_tbl[tag]     = pp->sg_tbl[0];
1207 			pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1208 		}
1209 	}
1210 	return 0;
1211 
1212 out_port_free_dma_mem:
1213 	mv_port_free_dma_mem(ap);
1214 	return -ENOMEM;
1215 }
1216 
1217 /**
1218  *      mv_port_stop - Port specific cleanup/stop routine.
1219  *      @ap: ATA channel to manipulate
1220  *
1221  *      Stop DMA, cleanup port memory.
1222  *
1223  *      LOCKING:
1224  *      This routine uses the host lock to protect the DMA stop.
1225  */
1226 static void mv_port_stop(struct ata_port *ap)
1227 {
1228 	mv_stop_edma(ap);
1229 	mv_port_free_dma_mem(ap);
1230 }
1231 
1232 /**
1233  *      mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1234  *      @qc: queued command whose SG list to source from
1235  *
1236  *      Populate the SG list and mark the last entry.
1237  *
1238  *      LOCKING:
1239  *      Inherited from caller.
1240  */
1241 static void mv_fill_sg(struct ata_queued_cmd *qc)
1242 {
1243 	struct mv_port_priv *pp = qc->ap->private_data;
1244 	struct scatterlist *sg;
1245 	struct mv_sg *mv_sg, *last_sg = NULL;
1246 	unsigned int si;
1247 
1248 	mv_sg = pp->sg_tbl[qc->tag];
1249 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
1250 		dma_addr_t addr = sg_dma_address(sg);
1251 		u32 sg_len = sg_dma_len(sg);
1252 
1253 		while (sg_len) {
1254 			u32 offset = addr & 0xffff;
1255 			u32 len = sg_len;
1256 
1257 			if ((offset + sg_len > 0x10000))
1258 				len = 0x10000 - offset;
1259 
1260 			mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1261 			mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1262 			mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1263 
1264 			sg_len -= len;
1265 			addr += len;
1266 
1267 			last_sg = mv_sg;
1268 			mv_sg++;
1269 		}
1270 	}
1271 
1272 	if (likely(last_sg))
1273 		last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1274 }
1275 
1276 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1277 {
1278 	u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1279 		(last ? CRQB_CMD_LAST : 0);
1280 	*cmdw = cpu_to_le16(tmp);
1281 }
1282 
1283 /**
1284  *      mv_qc_prep - Host specific command preparation.
1285  *      @qc: queued command to prepare
1286  *
1287  *      This routine simply redirects to the general purpose routine
1288  *      if command is not DMA.  Else, it handles prep of the CRQB
1289  *      (command request block), does some sanity checking, and calls
1290  *      the SG load routine.
1291  *
1292  *      LOCKING:
1293  *      Inherited from caller.
1294  */
1295 static void mv_qc_prep(struct ata_queued_cmd *qc)
1296 {
1297 	struct ata_port *ap = qc->ap;
1298 	struct mv_port_priv *pp = ap->private_data;
1299 	__le16 *cw;
1300 	struct ata_taskfile *tf;
1301 	u16 flags = 0;
1302 	unsigned in_index;
1303 
1304 	if ((qc->tf.protocol != ATA_PROT_DMA) &&
1305 	    (qc->tf.protocol != ATA_PROT_NCQ))
1306 		return;
1307 
1308 	/* Fill in command request block
1309 	 */
1310 	if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1311 		flags |= CRQB_FLAG_READ;
1312 	WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1313 	flags |= qc->tag << CRQB_TAG_SHIFT;
1314 	flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
1315 
1316 	/* get current queue index from software */
1317 	in_index = pp->req_idx;
1318 
1319 	pp->crqb[in_index].sg_addr =
1320 		cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1321 	pp->crqb[in_index].sg_addr_hi =
1322 		cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1323 	pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1324 
1325 	cw = &pp->crqb[in_index].ata_cmd[0];
1326 	tf = &qc->tf;
1327 
1328 	/* Sadly, the CRQB cannot accomodate all registers--there are
1329 	 * only 11 bytes...so we must pick and choose required
1330 	 * registers based on the command.  So, we drop feature and
1331 	 * hob_feature for [RW] DMA commands, but they are needed for
1332 	 * NCQ.  NCQ will drop hob_nsect.
1333 	 */
1334 	switch (tf->command) {
1335 	case ATA_CMD_READ:
1336 	case ATA_CMD_READ_EXT:
1337 	case ATA_CMD_WRITE:
1338 	case ATA_CMD_WRITE_EXT:
1339 	case ATA_CMD_WRITE_FUA_EXT:
1340 		mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1341 		break;
1342 	case ATA_CMD_FPDMA_READ:
1343 	case ATA_CMD_FPDMA_WRITE:
1344 		mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1345 		mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1346 		break;
1347 	default:
1348 		/* The only other commands EDMA supports in non-queued and
1349 		 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1350 		 * of which are defined/used by Linux.  If we get here, this
1351 		 * driver needs work.
1352 		 *
1353 		 * FIXME: modify libata to give qc_prep a return value and
1354 		 * return error here.
1355 		 */
1356 		BUG_ON(tf->command);
1357 		break;
1358 	}
1359 	mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1360 	mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1361 	mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1362 	mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1363 	mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1364 	mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1365 	mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1366 	mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1367 	mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1);	/* last */
1368 
1369 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1370 		return;
1371 	mv_fill_sg(qc);
1372 }
1373 
1374 /**
1375  *      mv_qc_prep_iie - Host specific command preparation.
1376  *      @qc: queued command to prepare
1377  *
1378  *      This routine simply redirects to the general purpose routine
1379  *      if command is not DMA.  Else, it handles prep of the CRQB
1380  *      (command request block), does some sanity checking, and calls
1381  *      the SG load routine.
1382  *
1383  *      LOCKING:
1384  *      Inherited from caller.
1385  */
1386 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1387 {
1388 	struct ata_port *ap = qc->ap;
1389 	struct mv_port_priv *pp = ap->private_data;
1390 	struct mv_crqb_iie *crqb;
1391 	struct ata_taskfile *tf;
1392 	unsigned in_index;
1393 	u32 flags = 0;
1394 
1395 	if ((qc->tf.protocol != ATA_PROT_DMA) &&
1396 	    (qc->tf.protocol != ATA_PROT_NCQ))
1397 		return;
1398 
1399 	/* Fill in Gen IIE command request block */
1400 	if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1401 		flags |= CRQB_FLAG_READ;
1402 
1403 	WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1404 	flags |= qc->tag << CRQB_TAG_SHIFT;
1405 	flags |= qc->tag << CRQB_HOSTQ_SHIFT;
1406 	flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
1407 
1408 	/* get current queue index from software */
1409 	in_index = pp->req_idx;
1410 
1411 	crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1412 	crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1413 	crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1414 	crqb->flags = cpu_to_le32(flags);
1415 
1416 	tf = &qc->tf;
1417 	crqb->ata_cmd[0] = cpu_to_le32(
1418 			(tf->command << 16) |
1419 			(tf->feature << 24)
1420 		);
1421 	crqb->ata_cmd[1] = cpu_to_le32(
1422 			(tf->lbal << 0) |
1423 			(tf->lbam << 8) |
1424 			(tf->lbah << 16) |
1425 			(tf->device << 24)
1426 		);
1427 	crqb->ata_cmd[2] = cpu_to_le32(
1428 			(tf->hob_lbal << 0) |
1429 			(tf->hob_lbam << 8) |
1430 			(tf->hob_lbah << 16) |
1431 			(tf->hob_feature << 24)
1432 		);
1433 	crqb->ata_cmd[3] = cpu_to_le32(
1434 			(tf->nsect << 0) |
1435 			(tf->hob_nsect << 8)
1436 		);
1437 
1438 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1439 		return;
1440 	mv_fill_sg(qc);
1441 }
1442 
1443 /**
1444  *      mv_qc_issue - Initiate a command to the host
1445  *      @qc: queued command to start
1446  *
1447  *      This routine simply redirects to the general purpose routine
1448  *      if command is not DMA.  Else, it sanity checks our local
1449  *      caches of the request producer/consumer indices then enables
1450  *      DMA and bumps the request producer index.
1451  *
1452  *      LOCKING:
1453  *      Inherited from caller.
1454  */
1455 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1456 {
1457 	struct ata_port *ap = qc->ap;
1458 	void __iomem *port_mmio = mv_ap_base(ap);
1459 	struct mv_port_priv *pp = ap->private_data;
1460 	u32 in_index;
1461 
1462 	if ((qc->tf.protocol != ATA_PROT_DMA) &&
1463 	    (qc->tf.protocol != ATA_PROT_NCQ)) {
1464 		/*
1465 		 * We're about to send a non-EDMA capable command to the
1466 		 * port.  Turn off EDMA so there won't be problems accessing
1467 		 * shadow block, etc registers.
1468 		 */
1469 		mv_stop_edma(ap);
1470 		mv_pmp_select(ap, qc->dev->link->pmp);
1471 		return ata_sff_qc_issue(qc);
1472 	}
1473 
1474 	mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
1475 
1476 	pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
1477 	in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
1478 
1479 	/* and write the request in pointer to kick the EDMA to life */
1480 	writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1481 		 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1482 
1483 	return 0;
1484 }
1485 
1486 /**
1487  *      mv_err_intr - Handle error interrupts on the port
1488  *      @ap: ATA channel to manipulate
1489  *      @reset_allowed: bool: 0 == don't trigger from reset here
1490  *
1491  *      In most cases, just clear the interrupt and move on.  However,
1492  *      some cases require an eDMA reset, which also performs a COMRESET.
1493  *      The SERR case requires a clear of pending errors in the SATA
1494  *      SERROR register.  Finally, if the port disabled DMA,
1495  *      update our cached copy to match.
1496  *
1497  *      LOCKING:
1498  *      Inherited from caller.
1499  */
1500 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1501 {
1502 	void __iomem *port_mmio = mv_ap_base(ap);
1503 	u32 edma_err_cause, eh_freeze_mask, serr = 0;
1504 	struct mv_port_priv *pp = ap->private_data;
1505 	struct mv_host_priv *hpriv = ap->host->private_data;
1506 	unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1507 	unsigned int action = 0, err_mask = 0;
1508 	struct ata_eh_info *ehi = &ap->link.eh_info;
1509 
1510 	ata_ehi_clear_desc(ehi);
1511 
1512 	if (!edma_enabled) {
1513 		/* just a guess: do we need to do this? should we
1514 		 * expand this, and do it in all cases?
1515 		 */
1516 		sata_scr_read(&ap->link, SCR_ERROR, &serr);
1517 		sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1518 	}
1519 
1520 	edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1521 
1522 	ata_ehi_push_desc(ehi, "edma_err_cause=%08x", edma_err_cause);
1523 
1524 	/*
1525 	 * All generations share these EDMA error cause bits:
1526 	 */
1527 	if (edma_err_cause & EDMA_ERR_DEV)
1528 		err_mask |= AC_ERR_DEV;
1529 	if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1530 			EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1531 			EDMA_ERR_INTRL_PAR)) {
1532 		err_mask |= AC_ERR_ATA_BUS;
1533 		action |= ATA_EH_RESET;
1534 		ata_ehi_push_desc(ehi, "parity error");
1535 	}
1536 	if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1537 		ata_ehi_hotplugged(ehi);
1538 		ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1539 			"dev disconnect" : "dev connect");
1540 		action |= ATA_EH_RESET;
1541 	}
1542 
1543 	/*
1544 	 * Gen-I has a different SELF_DIS bit,
1545 	 * different FREEZE bits, and no SERR bit:
1546 	 */
1547 	if (IS_GEN_I(hpriv)) {
1548 		eh_freeze_mask = EDMA_EH_FREEZE_5;
1549 		if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1550 			pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1551 			ata_ehi_push_desc(ehi, "EDMA self-disable");
1552 		}
1553 	} else {
1554 		eh_freeze_mask = EDMA_EH_FREEZE;
1555 		if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1556 			pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1557 			ata_ehi_push_desc(ehi, "EDMA self-disable");
1558 		}
1559 		if (edma_err_cause & EDMA_ERR_SERR) {
1560 			sata_scr_read(&ap->link, SCR_ERROR, &serr);
1561 			sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1562 			err_mask = AC_ERR_ATA_BUS;
1563 			action |= ATA_EH_RESET;
1564 		}
1565 	}
1566 
1567 	/* Clear EDMA now that SERR cleanup done */
1568 	writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1569 
1570 	if (!err_mask) {
1571 		err_mask = AC_ERR_OTHER;
1572 		action |= ATA_EH_RESET;
1573 	}
1574 
1575 	ehi->serror |= serr;
1576 	ehi->action |= action;
1577 
1578 	if (qc)
1579 		qc->err_mask |= err_mask;
1580 	else
1581 		ehi->err_mask |= err_mask;
1582 
1583 	if (edma_err_cause & eh_freeze_mask)
1584 		ata_port_freeze(ap);
1585 	else
1586 		ata_port_abort(ap);
1587 }
1588 
1589 static void mv_intr_pio(struct ata_port *ap)
1590 {
1591 	struct ata_queued_cmd *qc;
1592 	u8 ata_status;
1593 
1594 	/* ignore spurious intr if drive still BUSY */
1595 	ata_status = readb(ap->ioaddr.status_addr);
1596 	if (unlikely(ata_status & ATA_BUSY))
1597 		return;
1598 
1599 	/* get active ATA command */
1600 	qc = ata_qc_from_tag(ap, ap->link.active_tag);
1601 	if (unlikely(!qc))			/* no active tag */
1602 		return;
1603 	if (qc->tf.flags & ATA_TFLAG_POLLING)	/* polling; we don't own qc */
1604 		return;
1605 
1606 	/* and finally, complete the ATA command */
1607 	qc->err_mask |= ac_err_mask(ata_status);
1608 	ata_qc_complete(qc);
1609 }
1610 
1611 static void mv_process_crpb_response(struct ata_port *ap,
1612 		struct mv_crpb *response, unsigned int tag, int ncq_enabled)
1613 {
1614 	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
1615 
1616 	if (qc) {
1617 		u8 ata_status;
1618 		u16 edma_status = le16_to_cpu(response->flags);
1619 		/*
1620 		 * edma_status from a response queue entry:
1621 		 *   LSB is from EDMA_ERR_IRQ_CAUSE_OFS (non-NCQ only).
1622 		 *   MSB is saved ATA status from command completion.
1623 		 */
1624 		if (!ncq_enabled) {
1625 			u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
1626 			if (err_cause) {
1627 				/*
1628 				 * Error will be seen/handled by mv_err_intr().
1629 				 * So do nothing at all here.
1630 				 */
1631 				return;
1632 			}
1633 		}
1634 		ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
1635 		qc->err_mask |= ac_err_mask(ata_status);
1636 		ata_qc_complete(qc);
1637 	} else {
1638 		ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n",
1639 				__func__, tag);
1640 	}
1641 }
1642 
1643 static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
1644 {
1645 	void __iomem *port_mmio = mv_ap_base(ap);
1646 	struct mv_host_priv *hpriv = ap->host->private_data;
1647 	u32 in_index;
1648 	bool work_done = false;
1649 	int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
1650 
1651 	/* Get the hardware queue position index */
1652 	in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1653 			>> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1654 
1655 	/* Process new responses from since the last time we looked */
1656 	while (in_index != pp->resp_idx) {
1657 		unsigned int tag;
1658 		struct mv_crpb *response = &pp->crpb[pp->resp_idx];
1659 
1660 		pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
1661 
1662 		if (IS_GEN_I(hpriv)) {
1663 			/* 50xx: no NCQ, only one command active at a time */
1664 			tag = ap->link.active_tag;
1665 		} else {
1666 			/* Gen II/IIE: get command tag from CRPB entry */
1667 			tag = le16_to_cpu(response->id) & 0x1f;
1668 		}
1669 		mv_process_crpb_response(ap, response, tag, ncq_enabled);
1670 		work_done = true;
1671 	}
1672 
1673 	/* Update the software queue position index in hardware */
1674 	if (work_done)
1675 		writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1676 			 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
1677 			 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1678 }
1679 
1680 /**
1681  *      mv_host_intr - Handle all interrupts on the given host controller
1682  *      @host: host specific structure
1683  *      @relevant: port error bits relevant to this host controller
1684  *      @hc: which host controller we're to look at
1685  *
1686  *      Read then write clear the HC interrupt status then walk each
1687  *      port connected to the HC and see if it needs servicing.  Port
1688  *      success ints are reported in the HC interrupt status reg, the
1689  *      port error ints are reported in the higher level main
1690  *      interrupt status register and thus are passed in via the
1691  *      'relevant' argument.
1692  *
1693  *      LOCKING:
1694  *      Inherited from caller.
1695  */
1696 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1697 {
1698 	struct mv_host_priv *hpriv = host->private_data;
1699 	void __iomem *mmio = hpriv->base;
1700 	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1701 	u32 hc_irq_cause;
1702 	int port, port0, last_port;
1703 
1704 	if (hc == 0)
1705 		port0 = 0;
1706 	else
1707 		port0 = MV_PORTS_PER_HC;
1708 
1709 	if (HAS_PCI(host))
1710 		last_port = port0 + MV_PORTS_PER_HC;
1711 	else
1712 		last_port = port0 + hpriv->n_ports;
1713 	/* we'll need the HC success int register in most cases */
1714 	hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1715 	if (!hc_irq_cause)
1716 		return;
1717 
1718 	writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1719 
1720 	VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1721 		hc, relevant, hc_irq_cause);
1722 
1723 	for (port = port0; port < last_port; port++) {
1724 		struct ata_port *ap = host->ports[port];
1725 		struct mv_port_priv *pp;
1726 		int have_err_bits, hardport, shift;
1727 
1728 		if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1729 			continue;
1730 
1731 		pp = ap->private_data;
1732 
1733 		shift = port << 1;		/* (port * 2) */
1734 		if (port >= MV_PORTS_PER_HC)
1735 			shift++;	/* skip bit 8 in the HC Main IRQ reg */
1736 
1737 		have_err_bits = ((ERR_IRQ << shift) & relevant);
1738 
1739 		if (unlikely(have_err_bits)) {
1740 			struct ata_queued_cmd *qc;
1741 
1742 			qc = ata_qc_from_tag(ap, ap->link.active_tag);
1743 			if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1744 				continue;
1745 
1746 			mv_err_intr(ap, qc);
1747 			continue;
1748 		}
1749 
1750 		hardport = mv_hardport_from_port(port); /* range 0..3 */
1751 
1752 		if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1753 			if ((DMA_IRQ << hardport) & hc_irq_cause)
1754 				mv_process_crpb_entries(ap, pp);
1755 		} else {
1756 			if ((DEV_IRQ << hardport) & hc_irq_cause)
1757 				mv_intr_pio(ap);
1758 		}
1759 	}
1760 	VPRINTK("EXIT\n");
1761 }
1762 
1763 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1764 {
1765 	struct mv_host_priv *hpriv = host->private_data;
1766 	struct ata_port *ap;
1767 	struct ata_queued_cmd *qc;
1768 	struct ata_eh_info *ehi;
1769 	unsigned int i, err_mask, printed = 0;
1770 	u32 err_cause;
1771 
1772 	err_cause = readl(mmio + hpriv->irq_cause_ofs);
1773 
1774 	dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1775 		   err_cause);
1776 
1777 	DPRINTK("All regs @ PCI error\n");
1778 	mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1779 
1780 	writelfl(0, mmio + hpriv->irq_cause_ofs);
1781 
1782 	for (i = 0; i < host->n_ports; i++) {
1783 		ap = host->ports[i];
1784 		if (!ata_link_offline(&ap->link)) {
1785 			ehi = &ap->link.eh_info;
1786 			ata_ehi_clear_desc(ehi);
1787 			if (!printed++)
1788 				ata_ehi_push_desc(ehi,
1789 					"PCI err cause 0x%08x", err_cause);
1790 			err_mask = AC_ERR_HOST_BUS;
1791 			ehi->action = ATA_EH_RESET;
1792 			qc = ata_qc_from_tag(ap, ap->link.active_tag);
1793 			if (qc)
1794 				qc->err_mask |= err_mask;
1795 			else
1796 				ehi->err_mask |= err_mask;
1797 
1798 			ata_port_freeze(ap);
1799 		}
1800 	}
1801 }
1802 
1803 /**
1804  *      mv_interrupt - Main interrupt event handler
1805  *      @irq: unused
1806  *      @dev_instance: private data; in this case the host structure
1807  *
1808  *      Read the read only register to determine if any host
1809  *      controllers have pending interrupts.  If so, call lower level
1810  *      routine to handle.  Also check for PCI errors which are only
1811  *      reported here.
1812  *
1813  *      LOCKING:
1814  *      This routine holds the host lock while processing pending
1815  *      interrupts.
1816  */
1817 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1818 {
1819 	struct ata_host *host = dev_instance;
1820 	struct mv_host_priv *hpriv = host->private_data;
1821 	unsigned int hc, handled = 0, n_hcs;
1822 	void __iomem *mmio = hpriv->base;
1823 	u32 main_cause, main_mask;
1824 
1825 	spin_lock(&host->lock);
1826 	main_cause = readl(hpriv->main_cause_reg_addr);
1827 	main_mask  = readl(hpriv->main_mask_reg_addr);
1828 	/*
1829 	 * Deal with cases where we either have nothing pending, or have read
1830 	 * a bogus register value which can indicate HW removal or PCI fault.
1831 	 */
1832 	if (!(main_cause & main_mask) || (main_cause == 0xffffffffU))
1833 		goto out_unlock;
1834 
1835 	n_hcs = mv_get_hc_count(host->ports[0]->flags);
1836 
1837 	if (unlikely((main_cause & PCI_ERR) && HAS_PCI(host))) {
1838 		mv_pci_error(host, mmio);
1839 		handled = 1;
1840 		goto out_unlock;	/* skip all other HC irq handling */
1841 	}
1842 
1843 	for (hc = 0; hc < n_hcs; hc++) {
1844 		u32 relevant = main_cause & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1845 		if (relevant) {
1846 			mv_host_intr(host, relevant, hc);
1847 			handled = 1;
1848 		}
1849 	}
1850 
1851 out_unlock:
1852 	spin_unlock(&host->lock);
1853 	return IRQ_RETVAL(handled);
1854 }
1855 
1856 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1857 {
1858 	unsigned int ofs;
1859 
1860 	switch (sc_reg_in) {
1861 	case SCR_STATUS:
1862 	case SCR_ERROR:
1863 	case SCR_CONTROL:
1864 		ofs = sc_reg_in * sizeof(u32);
1865 		break;
1866 	default:
1867 		ofs = 0xffffffffU;
1868 		break;
1869 	}
1870 	return ofs;
1871 }
1872 
1873 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1874 {
1875 	struct mv_host_priv *hpriv = ap->host->private_data;
1876 	void __iomem *mmio = hpriv->base;
1877 	void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1878 	unsigned int ofs = mv5_scr_offset(sc_reg_in);
1879 
1880 	if (ofs != 0xffffffffU) {
1881 		*val = readl(addr + ofs);
1882 		return 0;
1883 	} else
1884 		return -EINVAL;
1885 }
1886 
1887 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1888 {
1889 	struct mv_host_priv *hpriv = ap->host->private_data;
1890 	void __iomem *mmio = hpriv->base;
1891 	void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1892 	unsigned int ofs = mv5_scr_offset(sc_reg_in);
1893 
1894 	if (ofs != 0xffffffffU) {
1895 		writelfl(val, addr + ofs);
1896 		return 0;
1897 	} else
1898 		return -EINVAL;
1899 }
1900 
1901 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
1902 {
1903 	struct pci_dev *pdev = to_pci_dev(host->dev);
1904 	int early_5080;
1905 
1906 	early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1907 
1908 	if (!early_5080) {
1909 		u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1910 		tmp |= (1 << 0);
1911 		writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1912 	}
1913 
1914 	mv_reset_pci_bus(host, mmio);
1915 }
1916 
1917 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1918 {
1919 	writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1920 }
1921 
1922 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1923 			   void __iomem *mmio)
1924 {
1925 	void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1926 	u32 tmp;
1927 
1928 	tmp = readl(phy_mmio + MV5_PHY_MODE);
1929 
1930 	hpriv->signal[idx].pre = tmp & 0x1800;	/* bits 12:11 */
1931 	hpriv->signal[idx].amps = tmp & 0xe0;	/* bits 7:5 */
1932 }
1933 
1934 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1935 {
1936 	u32 tmp;
1937 
1938 	writel(0, mmio + MV_GPIO_PORT_CTL);
1939 
1940 	/* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1941 
1942 	tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1943 	tmp |= ~(1 << 0);
1944 	writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1945 }
1946 
1947 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1948 			   unsigned int port)
1949 {
1950 	void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1951 	const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1952 	u32 tmp;
1953 	int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1954 
1955 	if (fix_apm_sq) {
1956 		tmp = readl(phy_mmio + MV5_LT_MODE);
1957 		tmp |= (1 << 19);
1958 		writel(tmp, phy_mmio + MV5_LT_MODE);
1959 
1960 		tmp = readl(phy_mmio + MV5_PHY_CTL);
1961 		tmp &= ~0x3;
1962 		tmp |= 0x1;
1963 		writel(tmp, phy_mmio + MV5_PHY_CTL);
1964 	}
1965 
1966 	tmp = readl(phy_mmio + MV5_PHY_MODE);
1967 	tmp &= ~mask;
1968 	tmp |= hpriv->signal[port].pre;
1969 	tmp |= hpriv->signal[port].amps;
1970 	writel(tmp, phy_mmio + MV5_PHY_MODE);
1971 }
1972 
1973 
1974 #undef ZERO
1975 #define ZERO(reg) writel(0, port_mmio + (reg))
1976 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1977 			     unsigned int port)
1978 {
1979 	void __iomem *port_mmio = mv_port_base(mmio, port);
1980 
1981 	/*
1982 	 * The datasheet warns against setting ATA_RST when EDMA is active
1983 	 * (but doesn't say what the problem might be).  So we first try
1984 	 * to disable the EDMA engine before doing the ATA_RST operation.
1985 	 */
1986 	mv_reset_channel(hpriv, mmio, port);
1987 
1988 	ZERO(0x028);	/* command */
1989 	writel(0x11f, port_mmio + EDMA_CFG_OFS);
1990 	ZERO(0x004);	/* timer */
1991 	ZERO(0x008);	/* irq err cause */
1992 	ZERO(0x00c);	/* irq err mask */
1993 	ZERO(0x010);	/* rq bah */
1994 	ZERO(0x014);	/* rq inp */
1995 	ZERO(0x018);	/* rq outp */
1996 	ZERO(0x01c);	/* respq bah */
1997 	ZERO(0x024);	/* respq outp */
1998 	ZERO(0x020);	/* respq inp */
1999 	ZERO(0x02c);	/* test control */
2000 	writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2001 }
2002 #undef ZERO
2003 
2004 #define ZERO(reg) writel(0, hc_mmio + (reg))
2005 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2006 			unsigned int hc)
2007 {
2008 	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2009 	u32 tmp;
2010 
2011 	ZERO(0x00c);
2012 	ZERO(0x010);
2013 	ZERO(0x014);
2014 	ZERO(0x018);
2015 
2016 	tmp = readl(hc_mmio + 0x20);
2017 	tmp &= 0x1c1c1c1c;
2018 	tmp |= 0x03030303;
2019 	writel(tmp, hc_mmio + 0x20);
2020 }
2021 #undef ZERO
2022 
2023 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2024 			unsigned int n_hc)
2025 {
2026 	unsigned int hc, port;
2027 
2028 	for (hc = 0; hc < n_hc; hc++) {
2029 		for (port = 0; port < MV_PORTS_PER_HC; port++)
2030 			mv5_reset_hc_port(hpriv, mmio,
2031 					  (hc * MV_PORTS_PER_HC) + port);
2032 
2033 		mv5_reset_one_hc(hpriv, mmio, hc);
2034 	}
2035 
2036 	return 0;
2037 }
2038 
2039 #undef ZERO
2040 #define ZERO(reg) writel(0, mmio + (reg))
2041 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
2042 {
2043 	struct mv_host_priv *hpriv = host->private_data;
2044 	u32 tmp;
2045 
2046 	tmp = readl(mmio + MV_PCI_MODE);
2047 	tmp &= 0xff00ffff;
2048 	writel(tmp, mmio + MV_PCI_MODE);
2049 
2050 	ZERO(MV_PCI_DISC_TIMER);
2051 	ZERO(MV_PCI_MSI_TRIGGER);
2052 	writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2053 	ZERO(HC_MAIN_IRQ_MASK_OFS);
2054 	ZERO(MV_PCI_SERR_MASK);
2055 	ZERO(hpriv->irq_cause_ofs);
2056 	ZERO(hpriv->irq_mask_ofs);
2057 	ZERO(MV_PCI_ERR_LOW_ADDRESS);
2058 	ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2059 	ZERO(MV_PCI_ERR_ATTRIBUTE);
2060 	ZERO(MV_PCI_ERR_COMMAND);
2061 }
2062 #undef ZERO
2063 
2064 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2065 {
2066 	u32 tmp;
2067 
2068 	mv5_reset_flash(hpriv, mmio);
2069 
2070 	tmp = readl(mmio + MV_GPIO_PORT_CTL);
2071 	tmp &= 0x3;
2072 	tmp |= (1 << 5) | (1 << 6);
2073 	writel(tmp, mmio + MV_GPIO_PORT_CTL);
2074 }
2075 
2076 /**
2077  *      mv6_reset_hc - Perform the 6xxx global soft reset
2078  *      @mmio: base address of the HBA
2079  *
2080  *      This routine only applies to 6xxx parts.
2081  *
2082  *      LOCKING:
2083  *      Inherited from caller.
2084  */
2085 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2086 			unsigned int n_hc)
2087 {
2088 	void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2089 	int i, rc = 0;
2090 	u32 t;
2091 
2092 	/* Following procedure defined in PCI "main command and status
2093 	 * register" table.
2094 	 */
2095 	t = readl(reg);
2096 	writel(t | STOP_PCI_MASTER, reg);
2097 
2098 	for (i = 0; i < 1000; i++) {
2099 		udelay(1);
2100 		t = readl(reg);
2101 		if (PCI_MASTER_EMPTY & t)
2102 			break;
2103 	}
2104 	if (!(PCI_MASTER_EMPTY & t)) {
2105 		printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2106 		rc = 1;
2107 		goto done;
2108 	}
2109 
2110 	/* set reset */
2111 	i = 5;
2112 	do {
2113 		writel(t | GLOB_SFT_RST, reg);
2114 		t = readl(reg);
2115 		udelay(1);
2116 	} while (!(GLOB_SFT_RST & t) && (i-- > 0));
2117 
2118 	if (!(GLOB_SFT_RST & t)) {
2119 		printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2120 		rc = 1;
2121 		goto done;
2122 	}
2123 
2124 	/* clear reset and *reenable the PCI master* (not mentioned in spec) */
2125 	i = 5;
2126 	do {
2127 		writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2128 		t = readl(reg);
2129 		udelay(1);
2130 	} while ((GLOB_SFT_RST & t) && (i-- > 0));
2131 
2132 	if (GLOB_SFT_RST & t) {
2133 		printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2134 		rc = 1;
2135 	}
2136 	/*
2137 	 * Temporary: wait 3 seconds before port-probing can happen,
2138 	 * so that we don't miss finding sleepy SilXXXX port-multipliers.
2139 	 * This can go away once hotplug is fully/correctly implemented.
2140 	 */
2141 	if (rc == 0)
2142 		msleep(3000);
2143 done:
2144 	return rc;
2145 }
2146 
2147 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2148 			   void __iomem *mmio)
2149 {
2150 	void __iomem *port_mmio;
2151 	u32 tmp;
2152 
2153 	tmp = readl(mmio + MV_RESET_CFG);
2154 	if ((tmp & (1 << 0)) == 0) {
2155 		hpriv->signal[idx].amps = 0x7 << 8;
2156 		hpriv->signal[idx].pre = 0x1 << 5;
2157 		return;
2158 	}
2159 
2160 	port_mmio = mv_port_base(mmio, idx);
2161 	tmp = readl(port_mmio + PHY_MODE2);
2162 
2163 	hpriv->signal[idx].amps = tmp & 0x700;	/* bits 10:8 */
2164 	hpriv->signal[idx].pre = tmp & 0xe0;	/* bits 7:5 */
2165 }
2166 
2167 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2168 {
2169 	writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2170 }
2171 
2172 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2173 			   unsigned int port)
2174 {
2175 	void __iomem *port_mmio = mv_port_base(mmio, port);
2176 
2177 	u32 hp_flags = hpriv->hp_flags;
2178 	int fix_phy_mode2 =
2179 		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2180 	int fix_phy_mode4 =
2181 		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2182 	u32 m2, tmp;
2183 
2184 	if (fix_phy_mode2) {
2185 		m2 = readl(port_mmio + PHY_MODE2);
2186 		m2 &= ~(1 << 16);
2187 		m2 |= (1 << 31);
2188 		writel(m2, port_mmio + PHY_MODE2);
2189 
2190 		udelay(200);
2191 
2192 		m2 = readl(port_mmio + PHY_MODE2);
2193 		m2 &= ~((1 << 16) | (1 << 31));
2194 		writel(m2, port_mmio + PHY_MODE2);
2195 
2196 		udelay(200);
2197 	}
2198 
2199 	/* who knows what this magic does */
2200 	tmp = readl(port_mmio + PHY_MODE3);
2201 	tmp &= ~0x7F800000;
2202 	tmp |= 0x2A800000;
2203 	writel(tmp, port_mmio + PHY_MODE3);
2204 
2205 	if (fix_phy_mode4) {
2206 		u32 m4;
2207 
2208 		m4 = readl(port_mmio + PHY_MODE4);
2209 
2210 		if (hp_flags & MV_HP_ERRATA_60X1B2)
2211 			tmp = readl(port_mmio + PHY_MODE3);
2212 
2213 		/* workaround for errata FEr SATA#10 (part 1) */
2214 		m4 = (m4 & ~(1 << 1)) | (1 << 0);
2215 
2216 		writel(m4, port_mmio + PHY_MODE4);
2217 
2218 		if (hp_flags & MV_HP_ERRATA_60X1B2)
2219 			writel(tmp, port_mmio + PHY_MODE3);
2220 	}
2221 
2222 	/* Revert values of pre-emphasis and signal amps to the saved ones */
2223 	m2 = readl(port_mmio + PHY_MODE2);
2224 
2225 	m2 &= ~MV_M2_PREAMP_MASK;
2226 	m2 |= hpriv->signal[port].amps;
2227 	m2 |= hpriv->signal[port].pre;
2228 	m2 &= ~(1 << 16);
2229 
2230 	/* according to mvSata 3.6.1, some IIE values are fixed */
2231 	if (IS_GEN_IIE(hpriv)) {
2232 		m2 &= ~0xC30FF01F;
2233 		m2 |= 0x0000900F;
2234 	}
2235 
2236 	writel(m2, port_mmio + PHY_MODE2);
2237 }
2238 
2239 /* TODO: use the generic LED interface to configure the SATA Presence */
2240 /* & Acitivy LEDs on the board */
2241 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2242 				      void __iomem *mmio)
2243 {
2244 	return;
2245 }
2246 
2247 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2248 			   void __iomem *mmio)
2249 {
2250 	void __iomem *port_mmio;
2251 	u32 tmp;
2252 
2253 	port_mmio = mv_port_base(mmio, idx);
2254 	tmp = readl(port_mmio + PHY_MODE2);
2255 
2256 	hpriv->signal[idx].amps = tmp & 0x700;	/* bits 10:8 */
2257 	hpriv->signal[idx].pre = tmp & 0xe0;	/* bits 7:5 */
2258 }
2259 
2260 #undef ZERO
2261 #define ZERO(reg) writel(0, port_mmio + (reg))
2262 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2263 					void __iomem *mmio, unsigned int port)
2264 {
2265 	void __iomem *port_mmio = mv_port_base(mmio, port);
2266 
2267 	/*
2268 	 * The datasheet warns against setting ATA_RST when EDMA is active
2269 	 * (but doesn't say what the problem might be).  So we first try
2270 	 * to disable the EDMA engine before doing the ATA_RST operation.
2271 	 */
2272 	mv_reset_channel(hpriv, mmio, port);
2273 
2274 	ZERO(0x028);		/* command */
2275 	writel(0x101f, port_mmio + EDMA_CFG_OFS);
2276 	ZERO(0x004);		/* timer */
2277 	ZERO(0x008);		/* irq err cause */
2278 	ZERO(0x00c);		/* irq err mask */
2279 	ZERO(0x010);		/* rq bah */
2280 	ZERO(0x014);		/* rq inp */
2281 	ZERO(0x018);		/* rq outp */
2282 	ZERO(0x01c);		/* respq bah */
2283 	ZERO(0x024);		/* respq outp */
2284 	ZERO(0x020);		/* respq inp */
2285 	ZERO(0x02c);		/* test control */
2286 	writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2287 }
2288 
2289 #undef ZERO
2290 
2291 #define ZERO(reg) writel(0, hc_mmio + (reg))
2292 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2293 				       void __iomem *mmio)
2294 {
2295 	void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2296 
2297 	ZERO(0x00c);
2298 	ZERO(0x010);
2299 	ZERO(0x014);
2300 
2301 }
2302 
2303 #undef ZERO
2304 
2305 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2306 				  void __iomem *mmio, unsigned int n_hc)
2307 {
2308 	unsigned int port;
2309 
2310 	for (port = 0; port < hpriv->n_ports; port++)
2311 		mv_soc_reset_hc_port(hpriv, mmio, port);
2312 
2313 	mv_soc_reset_one_hc(hpriv, mmio);
2314 
2315 	return 0;
2316 }
2317 
2318 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2319 				      void __iomem *mmio)
2320 {
2321 	return;
2322 }
2323 
2324 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2325 {
2326 	return;
2327 }
2328 
2329 static void mv_setup_ifctl(void __iomem *port_mmio, int want_gen2i)
2330 {
2331 	u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG);
2332 
2333 	ifctl = (ifctl & 0xf7f) | 0x9b1000;	/* from chip spec */
2334 	if (want_gen2i)
2335 		ifctl |= (1 << 7);		/* enable gen2i speed */
2336 	writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG);
2337 }
2338 
2339 /*
2340  * Caller must ensure that EDMA is not active,
2341  * by first doing mv_stop_edma() where needed.
2342  */
2343 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
2344 			     unsigned int port_no)
2345 {
2346 	void __iomem *port_mmio = mv_port_base(mmio, port_no);
2347 
2348 	mv_stop_edma_engine(port_mmio);
2349 	writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2350 
2351 	if (!IS_GEN_I(hpriv)) {
2352 		/* Enable 3.0gb/s link speed */
2353 		mv_setup_ifctl(port_mmio, 1);
2354 	}
2355 	/*
2356 	 * Strobing ATA_RST here causes a hard reset of the SATA transport,
2357 	 * link, and physical layers.  It resets all SATA interface registers
2358 	 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
2359 	 */
2360 	writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2361 	udelay(25);	/* allow reset propagation */
2362 	writelfl(0, port_mmio + EDMA_CMD_OFS);
2363 
2364 	hpriv->ops->phy_errata(hpriv, mmio, port_no);
2365 
2366 	if (IS_GEN_I(hpriv))
2367 		mdelay(1);
2368 }
2369 
2370 static void mv_pmp_select(struct ata_port *ap, int pmp)
2371 {
2372 	if (sata_pmp_supported(ap)) {
2373 		void __iomem *port_mmio = mv_ap_base(ap);
2374 		u32 reg = readl(port_mmio + SATA_IFCTL_OFS);
2375 		int old = reg & 0xf;
2376 
2377 		if (old != pmp) {
2378 			reg = (reg & ~0xf) | pmp;
2379 			writelfl(reg, port_mmio + SATA_IFCTL_OFS);
2380 		}
2381 	}
2382 }
2383 
2384 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
2385 				unsigned long deadline)
2386 {
2387 	mv_pmp_select(link->ap, sata_srst_pmp(link));
2388 	return sata_std_hardreset(link, class, deadline);
2389 }
2390 
2391 static int mv_softreset(struct ata_link *link, unsigned int *class,
2392 				unsigned long deadline)
2393 {
2394 	mv_pmp_select(link->ap, sata_srst_pmp(link));
2395 	return ata_sff_softreset(link, class, deadline);
2396 }
2397 
2398 static int mv_hardreset(struct ata_link *link, unsigned int *class,
2399 			unsigned long deadline)
2400 {
2401 	struct ata_port *ap = link->ap;
2402 	struct mv_host_priv *hpriv = ap->host->private_data;
2403 	struct mv_port_priv *pp = ap->private_data;
2404 	void __iomem *mmio = hpriv->base;
2405 	int rc, attempts = 0, extra = 0;
2406 	u32 sstatus;
2407 	bool online;
2408 
2409 	mv_reset_channel(hpriv, mmio, ap->port_no);
2410 	pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2411 
2412 	/* Workaround for errata FEr SATA#10 (part 2) */
2413 	do {
2414 		const unsigned long *timing =
2415 				sata_ehc_deb_timing(&link->eh_context);
2416 
2417 		rc = sata_link_hardreset(link, timing, deadline + extra,
2418 					 &online, NULL);
2419 		if (rc)
2420 			return rc;
2421 		sata_scr_read(link, SCR_STATUS, &sstatus);
2422 		if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
2423 			/* Force 1.5gb/s link speed and try again */
2424 			mv_setup_ifctl(mv_ap_base(ap), 0);
2425 			if (time_after(jiffies + HZ, deadline))
2426 				extra = HZ; /* only extend it once, max */
2427 		}
2428 	} while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
2429 
2430 	return rc;
2431 }
2432 
2433 static void mv_eh_freeze(struct ata_port *ap)
2434 {
2435 	struct mv_host_priv *hpriv = ap->host->private_data;
2436 	unsigned int shift, hardport, port = ap->port_no;
2437 	u32 main_mask;
2438 
2439 	/* FIXME: handle coalescing completion events properly */
2440 
2441 	mv_stop_edma(ap);
2442 	MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
2443 
2444 	/* disable assertion of portN err, done events */
2445 	main_mask = readl(hpriv->main_mask_reg_addr);
2446 	main_mask &= ~((DONE_IRQ | ERR_IRQ) << shift);
2447 	writelfl(main_mask, hpriv->main_mask_reg_addr);
2448 }
2449 
2450 static void mv_eh_thaw(struct ata_port *ap)
2451 {
2452 	struct mv_host_priv *hpriv = ap->host->private_data;
2453 	unsigned int shift, hardport, port = ap->port_no;
2454 	void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
2455 	void __iomem *port_mmio = mv_ap_base(ap);
2456 	u32 main_mask, hc_irq_cause;
2457 
2458 	/* FIXME: handle coalescing completion events properly */
2459 
2460 	MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
2461 
2462 	/* clear EDMA errors on this port */
2463 	writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2464 
2465 	/* clear pending irq events */
2466 	hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2467 	hc_irq_cause &= ~((DEV_IRQ | DMA_IRQ) << hardport);
2468 	writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2469 
2470 	/* enable assertion of portN err, done events */
2471 	main_mask = readl(hpriv->main_mask_reg_addr);
2472 	main_mask |= ((DONE_IRQ | ERR_IRQ) << shift);
2473 	writelfl(main_mask, hpriv->main_mask_reg_addr);
2474 }
2475 
2476 /**
2477  *      mv_port_init - Perform some early initialization on a single port.
2478  *      @port: libata data structure storing shadow register addresses
2479  *      @port_mmio: base address of the port
2480  *
2481  *      Initialize shadow register mmio addresses, clear outstanding
2482  *      interrupts on the port, and unmask interrupts for the future
2483  *      start of the port.
2484  *
2485  *      LOCKING:
2486  *      Inherited from caller.
2487  */
2488 static void mv_port_init(struct ata_ioports *port,  void __iomem *port_mmio)
2489 {
2490 	void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2491 	unsigned serr_ofs;
2492 
2493 	/* PIO related setup
2494 	 */
2495 	port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2496 	port->error_addr =
2497 		port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2498 	port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2499 	port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2500 	port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2501 	port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2502 	port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2503 	port->status_addr =
2504 		port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2505 	/* special case: control/altstatus doesn't have ATA_REG_ address */
2506 	port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2507 
2508 	/* unused: */
2509 	port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2510 
2511 	/* Clear any currently outstanding port interrupt conditions */
2512 	serr_ofs = mv_scr_offset(SCR_ERROR);
2513 	writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2514 	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2515 
2516 	/* unmask all non-transient EDMA error interrupts */
2517 	writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2518 
2519 	VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2520 		readl(port_mmio + EDMA_CFG_OFS),
2521 		readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2522 		readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2523 }
2524 
2525 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2526 {
2527 	struct pci_dev *pdev = to_pci_dev(host->dev);
2528 	struct mv_host_priv *hpriv = host->private_data;
2529 	u32 hp_flags = hpriv->hp_flags;
2530 
2531 	switch (board_idx) {
2532 	case chip_5080:
2533 		hpriv->ops = &mv5xxx_ops;
2534 		hp_flags |= MV_HP_GEN_I;
2535 
2536 		switch (pdev->revision) {
2537 		case 0x1:
2538 			hp_flags |= MV_HP_ERRATA_50XXB0;
2539 			break;
2540 		case 0x3:
2541 			hp_flags |= MV_HP_ERRATA_50XXB2;
2542 			break;
2543 		default:
2544 			dev_printk(KERN_WARNING, &pdev->dev,
2545 			   "Applying 50XXB2 workarounds to unknown rev\n");
2546 			hp_flags |= MV_HP_ERRATA_50XXB2;
2547 			break;
2548 		}
2549 		break;
2550 
2551 	case chip_504x:
2552 	case chip_508x:
2553 		hpriv->ops = &mv5xxx_ops;
2554 		hp_flags |= MV_HP_GEN_I;
2555 
2556 		switch (pdev->revision) {
2557 		case 0x0:
2558 			hp_flags |= MV_HP_ERRATA_50XXB0;
2559 			break;
2560 		case 0x3:
2561 			hp_flags |= MV_HP_ERRATA_50XXB2;
2562 			break;
2563 		default:
2564 			dev_printk(KERN_WARNING, &pdev->dev,
2565 			   "Applying B2 workarounds to unknown rev\n");
2566 			hp_flags |= MV_HP_ERRATA_50XXB2;
2567 			break;
2568 		}
2569 		break;
2570 
2571 	case chip_604x:
2572 	case chip_608x:
2573 		hpriv->ops = &mv6xxx_ops;
2574 		hp_flags |= MV_HP_GEN_II;
2575 
2576 		switch (pdev->revision) {
2577 		case 0x7:
2578 			hp_flags |= MV_HP_ERRATA_60X1B2;
2579 			break;
2580 		case 0x9:
2581 			hp_flags |= MV_HP_ERRATA_60X1C0;
2582 			break;
2583 		default:
2584 			dev_printk(KERN_WARNING, &pdev->dev,
2585 				   "Applying B2 workarounds to unknown rev\n");
2586 			hp_flags |= MV_HP_ERRATA_60X1B2;
2587 			break;
2588 		}
2589 		break;
2590 
2591 	case chip_7042:
2592 		hp_flags |= MV_HP_PCIE;
2593 		if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2594 		    (pdev->device == 0x2300 || pdev->device == 0x2310))
2595 		{
2596 			/*
2597 			 * Highpoint RocketRAID PCIe 23xx series cards:
2598 			 *
2599 			 * Unconfigured drives are treated as "Legacy"
2600 			 * by the BIOS, and it overwrites sector 8 with
2601 			 * a "Lgcy" metadata block prior to Linux boot.
2602 			 *
2603 			 * Configured drives (RAID or JBOD) leave sector 8
2604 			 * alone, but instead overwrite a high numbered
2605 			 * sector for the RAID metadata.  This sector can
2606 			 * be determined exactly, by truncating the physical
2607 			 * drive capacity to a nice even GB value.
2608 			 *
2609 			 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2610 			 *
2611 			 * Warn the user, lest they think we're just buggy.
2612 			 */
2613 			printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2614 				" BIOS CORRUPTS DATA on all attached drives,"
2615 				" regardless of if/how they are configured."
2616 				" BEWARE!\n");
2617 			printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2618 				" use sectors 8-9 on \"Legacy\" drives,"
2619 				" and avoid the final two gigabytes on"
2620 				" all RocketRAID BIOS initialized drives.\n");
2621 		}
2622 	case chip_6042:
2623 		hpriv->ops = &mv6xxx_ops;
2624 		hp_flags |= MV_HP_GEN_IIE;
2625 
2626 		switch (pdev->revision) {
2627 		case 0x0:
2628 			hp_flags |= MV_HP_ERRATA_XX42A0;
2629 			break;
2630 		case 0x1:
2631 			hp_flags |= MV_HP_ERRATA_60X1C0;
2632 			break;
2633 		default:
2634 			dev_printk(KERN_WARNING, &pdev->dev,
2635 			   "Applying 60X1C0 workarounds to unknown rev\n");
2636 			hp_flags |= MV_HP_ERRATA_60X1C0;
2637 			break;
2638 		}
2639 		break;
2640 	case chip_soc:
2641 		hpriv->ops = &mv_soc_ops;
2642 		hp_flags |= MV_HP_ERRATA_60X1C0;
2643 		break;
2644 
2645 	default:
2646 		dev_printk(KERN_ERR, host->dev,
2647 			   "BUG: invalid board index %u\n", board_idx);
2648 		return 1;
2649 	}
2650 
2651 	hpriv->hp_flags = hp_flags;
2652 	if (hp_flags & MV_HP_PCIE) {
2653 		hpriv->irq_cause_ofs	= PCIE_IRQ_CAUSE_OFS;
2654 		hpriv->irq_mask_ofs	= PCIE_IRQ_MASK_OFS;
2655 		hpriv->unmask_all_irqs	= PCIE_UNMASK_ALL_IRQS;
2656 	} else {
2657 		hpriv->irq_cause_ofs	= PCI_IRQ_CAUSE_OFS;
2658 		hpriv->irq_mask_ofs	= PCI_IRQ_MASK_OFS;
2659 		hpriv->unmask_all_irqs	= PCI_UNMASK_ALL_IRQS;
2660 	}
2661 
2662 	return 0;
2663 }
2664 
2665 /**
2666  *      mv_init_host - Perform some early initialization of the host.
2667  *	@host: ATA host to initialize
2668  *      @board_idx: controller index
2669  *
2670  *      If possible, do an early global reset of the host.  Then do
2671  *      our port init and clear/unmask all/relevant host interrupts.
2672  *
2673  *      LOCKING:
2674  *      Inherited from caller.
2675  */
2676 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2677 {
2678 	int rc = 0, n_hc, port, hc;
2679 	struct mv_host_priv *hpriv = host->private_data;
2680 	void __iomem *mmio = hpriv->base;
2681 
2682 	rc = mv_chip_id(host, board_idx);
2683 	if (rc)
2684 		goto done;
2685 
2686 	if (HAS_PCI(host)) {
2687 		hpriv->main_cause_reg_addr = mmio + HC_MAIN_IRQ_CAUSE_OFS;
2688 		hpriv->main_mask_reg_addr  = mmio + HC_MAIN_IRQ_MASK_OFS;
2689 	} else {
2690 		hpriv->main_cause_reg_addr = mmio + HC_SOC_MAIN_IRQ_CAUSE_OFS;
2691 		hpriv->main_mask_reg_addr  = mmio + HC_SOC_MAIN_IRQ_MASK_OFS;
2692 	}
2693 
2694 	/* global interrupt mask: 0 == mask everything */
2695 	writel(0, hpriv->main_mask_reg_addr);
2696 
2697 	n_hc = mv_get_hc_count(host->ports[0]->flags);
2698 
2699 	for (port = 0; port < host->n_ports; port++)
2700 		hpriv->ops->read_preamp(hpriv, port, mmio);
2701 
2702 	rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2703 	if (rc)
2704 		goto done;
2705 
2706 	hpriv->ops->reset_flash(hpriv, mmio);
2707 	hpriv->ops->reset_bus(host, mmio);
2708 	hpriv->ops->enable_leds(hpriv, mmio);
2709 
2710 	for (port = 0; port < host->n_ports; port++) {
2711 		struct ata_port *ap = host->ports[port];
2712 		void __iomem *port_mmio = mv_port_base(mmio, port);
2713 
2714 		mv_port_init(&ap->ioaddr, port_mmio);
2715 
2716 #ifdef CONFIG_PCI
2717 		if (HAS_PCI(host)) {
2718 			unsigned int offset = port_mmio - mmio;
2719 			ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2720 			ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2721 		}
2722 #endif
2723 	}
2724 
2725 	for (hc = 0; hc < n_hc; hc++) {
2726 		void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2727 
2728 		VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2729 			"(before clear)=0x%08x\n", hc,
2730 			readl(hc_mmio + HC_CFG_OFS),
2731 			readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2732 
2733 		/* Clear any currently outstanding hc interrupt conditions */
2734 		writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2735 	}
2736 
2737 	if (HAS_PCI(host)) {
2738 		/* Clear any currently outstanding host interrupt conditions */
2739 		writelfl(0, mmio + hpriv->irq_cause_ofs);
2740 
2741 		/* and unmask interrupt generation for host regs */
2742 		writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2743 		if (IS_GEN_I(hpriv))
2744 			writelfl(~HC_MAIN_MASKED_IRQS_5,
2745 				 hpriv->main_mask_reg_addr);
2746 		else
2747 			writelfl(~HC_MAIN_MASKED_IRQS,
2748 				 hpriv->main_mask_reg_addr);
2749 
2750 		VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2751 			"PCI int cause/mask=0x%08x/0x%08x\n",
2752 			readl(hpriv->main_cause_reg_addr),
2753 			readl(hpriv->main_mask_reg_addr),
2754 			readl(mmio + hpriv->irq_cause_ofs),
2755 			readl(mmio + hpriv->irq_mask_ofs));
2756 	} else {
2757 		writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2758 			 hpriv->main_mask_reg_addr);
2759 		VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2760 			readl(hpriv->main_cause_reg_addr),
2761 			readl(hpriv->main_mask_reg_addr));
2762 	}
2763 done:
2764 	return rc;
2765 }
2766 
2767 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2768 {
2769 	hpriv->crqb_pool   = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2770 							     MV_CRQB_Q_SZ, 0);
2771 	if (!hpriv->crqb_pool)
2772 		return -ENOMEM;
2773 
2774 	hpriv->crpb_pool   = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2775 							     MV_CRPB_Q_SZ, 0);
2776 	if (!hpriv->crpb_pool)
2777 		return -ENOMEM;
2778 
2779 	hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2780 							     MV_SG_TBL_SZ, 0);
2781 	if (!hpriv->sg_tbl_pool)
2782 		return -ENOMEM;
2783 
2784 	return 0;
2785 }
2786 
2787 static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
2788 				 struct mbus_dram_target_info *dram)
2789 {
2790 	int i;
2791 
2792 	for (i = 0; i < 4; i++) {
2793 		writel(0, hpriv->base + WINDOW_CTRL(i));
2794 		writel(0, hpriv->base + WINDOW_BASE(i));
2795 	}
2796 
2797 	for (i = 0; i < dram->num_cs; i++) {
2798 		struct mbus_dram_window *cs = dram->cs + i;
2799 
2800 		writel(((cs->size - 1) & 0xffff0000) |
2801 			(cs->mbus_attr << 8) |
2802 			(dram->mbus_dram_target_id << 4) | 1,
2803 			hpriv->base + WINDOW_CTRL(i));
2804 		writel(cs->base, hpriv->base + WINDOW_BASE(i));
2805 	}
2806 }
2807 
2808 /**
2809  *      mv_platform_probe - handle a positive probe of an soc Marvell
2810  *      host
2811  *      @pdev: platform device found
2812  *
2813  *      LOCKING:
2814  *      Inherited from caller.
2815  */
2816 static int mv_platform_probe(struct platform_device *pdev)
2817 {
2818 	static int printed_version;
2819 	const struct mv_sata_platform_data *mv_platform_data;
2820 	const struct ata_port_info *ppi[] =
2821 	    { &mv_port_info[chip_soc], NULL };
2822 	struct ata_host *host;
2823 	struct mv_host_priv *hpriv;
2824 	struct resource *res;
2825 	int n_ports, rc;
2826 
2827 	if (!printed_version++)
2828 		dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2829 
2830 	/*
2831 	 * Simple resource validation ..
2832 	 */
2833 	if (unlikely(pdev->num_resources != 2)) {
2834 		dev_err(&pdev->dev, "invalid number of resources\n");
2835 		return -EINVAL;
2836 	}
2837 
2838 	/*
2839 	 * Get the register base first
2840 	 */
2841 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2842 	if (res == NULL)
2843 		return -EINVAL;
2844 
2845 	/* allocate host */
2846 	mv_platform_data = pdev->dev.platform_data;
2847 	n_ports = mv_platform_data->n_ports;
2848 
2849 	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2850 	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2851 
2852 	if (!host || !hpriv)
2853 		return -ENOMEM;
2854 	host->private_data = hpriv;
2855 	hpriv->n_ports = n_ports;
2856 
2857 	host->iomap = NULL;
2858 	hpriv->base = devm_ioremap(&pdev->dev, res->start,
2859 				   res->end - res->start + 1);
2860 	hpriv->base -= MV_SATAHC0_REG_BASE;
2861 
2862 	/*
2863 	 * (Re-)program MBUS remapping windows if we are asked to.
2864 	 */
2865 	if (mv_platform_data->dram != NULL)
2866 		mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
2867 
2868 	rc = mv_create_dma_pools(hpriv, &pdev->dev);
2869 	if (rc)
2870 		return rc;
2871 
2872 	/* initialize adapter */
2873 	rc = mv_init_host(host, chip_soc);
2874 	if (rc)
2875 		return rc;
2876 
2877 	dev_printk(KERN_INFO, &pdev->dev,
2878 		   "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2879 		   host->n_ports);
2880 
2881 	return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2882 				 IRQF_SHARED, &mv6_sht);
2883 }
2884 
2885 /*
2886  *
2887  *      mv_platform_remove    -       unplug a platform interface
2888  *      @pdev: platform device
2889  *
2890  *      A platform bus SATA device has been unplugged. Perform the needed
2891  *      cleanup. Also called on module unload for any active devices.
2892  */
2893 static int __devexit mv_platform_remove(struct platform_device *pdev)
2894 {
2895 	struct device *dev = &pdev->dev;
2896 	struct ata_host *host = dev_get_drvdata(dev);
2897 
2898 	ata_host_detach(host);
2899 	return 0;
2900 }
2901 
2902 static struct platform_driver mv_platform_driver = {
2903 	.probe			= mv_platform_probe,
2904 	.remove			= __devexit_p(mv_platform_remove),
2905 	.driver			= {
2906 				   .name = DRV_NAME,
2907 				   .owner = THIS_MODULE,
2908 				  },
2909 };
2910 
2911 
2912 #ifdef CONFIG_PCI
2913 static int mv_pci_init_one(struct pci_dev *pdev,
2914 			   const struct pci_device_id *ent);
2915 
2916 
2917 static struct pci_driver mv_pci_driver = {
2918 	.name			= DRV_NAME,
2919 	.id_table		= mv_pci_tbl,
2920 	.probe			= mv_pci_init_one,
2921 	.remove			= ata_pci_remove_one,
2922 };
2923 
2924 /*
2925  * module options
2926  */
2927 static int msi;	      /* Use PCI msi; either zero (off, default) or non-zero */
2928 
2929 
2930 /* move to PCI layer or libata core? */
2931 static int pci_go_64(struct pci_dev *pdev)
2932 {
2933 	int rc;
2934 
2935 	if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2936 		rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2937 		if (rc) {
2938 			rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2939 			if (rc) {
2940 				dev_printk(KERN_ERR, &pdev->dev,
2941 					   "64-bit DMA enable failed\n");
2942 				return rc;
2943 			}
2944 		}
2945 	} else {
2946 		rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2947 		if (rc) {
2948 			dev_printk(KERN_ERR, &pdev->dev,
2949 				   "32-bit DMA enable failed\n");
2950 			return rc;
2951 		}
2952 		rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2953 		if (rc) {
2954 			dev_printk(KERN_ERR, &pdev->dev,
2955 				   "32-bit consistent DMA enable failed\n");
2956 			return rc;
2957 		}
2958 	}
2959 
2960 	return rc;
2961 }
2962 
2963 /**
2964  *      mv_print_info - Dump key info to kernel log for perusal.
2965  *      @host: ATA host to print info about
2966  *
2967  *      FIXME: complete this.
2968  *
2969  *      LOCKING:
2970  *      Inherited from caller.
2971  */
2972 static void mv_print_info(struct ata_host *host)
2973 {
2974 	struct pci_dev *pdev = to_pci_dev(host->dev);
2975 	struct mv_host_priv *hpriv = host->private_data;
2976 	u8 scc;
2977 	const char *scc_s, *gen;
2978 
2979 	/* Use this to determine the HW stepping of the chip so we know
2980 	 * what errata to workaround
2981 	 */
2982 	pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2983 	if (scc == 0)
2984 		scc_s = "SCSI";
2985 	else if (scc == 0x01)
2986 		scc_s = "RAID";
2987 	else
2988 		scc_s = "?";
2989 
2990 	if (IS_GEN_I(hpriv))
2991 		gen = "I";
2992 	else if (IS_GEN_II(hpriv))
2993 		gen = "II";
2994 	else if (IS_GEN_IIE(hpriv))
2995 		gen = "IIE";
2996 	else
2997 		gen = "?";
2998 
2999 	dev_printk(KERN_INFO, &pdev->dev,
3000 	       "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
3001 	       gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
3002 	       scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
3003 }
3004 
3005 /**
3006  *      mv_pci_init_one - handle a positive probe of a PCI Marvell host
3007  *      @pdev: PCI device found
3008  *      @ent: PCI device ID entry for the matched host
3009  *
3010  *      LOCKING:
3011  *      Inherited from caller.
3012  */
3013 static int mv_pci_init_one(struct pci_dev *pdev,
3014 			   const struct pci_device_id *ent)
3015 {
3016 	static int printed_version;
3017 	unsigned int board_idx = (unsigned int)ent->driver_data;
3018 	const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3019 	struct ata_host *host;
3020 	struct mv_host_priv *hpriv;
3021 	int n_ports, rc;
3022 
3023 	if (!printed_version++)
3024 		dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
3025 
3026 	/* allocate host */
3027 	n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3028 
3029 	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3030 	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3031 	if (!host || !hpriv)
3032 		return -ENOMEM;
3033 	host->private_data = hpriv;
3034 	hpriv->n_ports = n_ports;
3035 
3036 	/* acquire resources */
3037 	rc = pcim_enable_device(pdev);
3038 	if (rc)
3039 		return rc;
3040 
3041 	rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3042 	if (rc == -EBUSY)
3043 		pcim_pin_device(pdev);
3044 	if (rc)
3045 		return rc;
3046 	host->iomap = pcim_iomap_table(pdev);
3047 	hpriv->base = host->iomap[MV_PRIMARY_BAR];
3048 
3049 	rc = pci_go_64(pdev);
3050 	if (rc)
3051 		return rc;
3052 
3053 	rc = mv_create_dma_pools(hpriv, &pdev->dev);
3054 	if (rc)
3055 		return rc;
3056 
3057 	/* initialize adapter */
3058 	rc = mv_init_host(host, board_idx);
3059 	if (rc)
3060 		return rc;
3061 
3062 	/* Enable interrupts */
3063 	if (msi && pci_enable_msi(pdev))
3064 		pci_intx(pdev, 1);
3065 
3066 	mv_dump_pci_cfg(pdev, 0x68);
3067 	mv_print_info(host);
3068 
3069 	pci_set_master(pdev);
3070 	pci_try_set_mwi(pdev);
3071 	return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
3072 				 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
3073 }
3074 #endif
3075 
3076 static int mv_platform_probe(struct platform_device *pdev);
3077 static int __devexit mv_platform_remove(struct platform_device *pdev);
3078 
3079 static int __init mv_init(void)
3080 {
3081 	int rc = -ENODEV;
3082 #ifdef CONFIG_PCI
3083 	rc = pci_register_driver(&mv_pci_driver);
3084 	if (rc < 0)
3085 		return rc;
3086 #endif
3087 	rc = platform_driver_register(&mv_platform_driver);
3088 
3089 #ifdef CONFIG_PCI
3090 	if (rc < 0)
3091 		pci_unregister_driver(&mv_pci_driver);
3092 #endif
3093 	return rc;
3094 }
3095 
3096 static void __exit mv_exit(void)
3097 {
3098 #ifdef CONFIG_PCI
3099 	pci_unregister_driver(&mv_pci_driver);
3100 #endif
3101 	platform_driver_unregister(&mv_platform_driver);
3102 }
3103 
3104 MODULE_AUTHOR("Brett Russ");
3105 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3106 MODULE_LICENSE("GPL");
3107 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3108 MODULE_VERSION(DRV_VERSION);
3109 MODULE_ALIAS("platform:" DRV_NAME);
3110 
3111 #ifdef CONFIG_PCI
3112 module_param(msi, int, 0444);
3113 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
3114 #endif
3115 
3116 module_init(mv_init);
3117 module_exit(mv_exit);
3118