xref: /openbmc/linux/drivers/ata/sata_nv.c (revision 7e6f7d24)
1 /*
2  *  sata_nv.c - NVIDIA nForce SATA
3  *
4  *  Copyright 2004 NVIDIA Corp.  All rights reserved.
5  *  Copyright 2004 Andrew Chew
6  *
7  *
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 2, or (at your option)
11  *  any later version.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; see the file COPYING.  If not, write to
20  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21  *
22  *
23  *  libata documentation is available via 'make {ps|pdf}docs',
24  *  as Documentation/driver-api/libata.rst
25  *
26  *  No hardware documentation available outside of NVIDIA.
27  *  This driver programs the NVIDIA SATA controller in a similar
28  *  fashion as with other PCI IDE BMDMA controllers, with a few
29  *  NV-specific details such as register offsets, SATA phy location,
30  *  hotplug info, etc.
31  *
32  *  CK804/MCP04 controllers support an alternate programming interface
33  *  similar to the ADMA specification (with some modifications).
34  *  This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35  *  sent through the legacy interface.
36  *
37  */
38 
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/gfp.h>
42 #include <linux/pci.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
50 
51 #define DRV_NAME			"sata_nv"
52 #define DRV_VERSION			"3.5"
53 
54 #define NV_ADMA_DMA_BOUNDARY		0xffffffffUL
55 
56 enum {
57 	NV_MMIO_BAR			= 5,
58 
59 	NV_PORTS			= 2,
60 	NV_PIO_MASK			= ATA_PIO4,
61 	NV_MWDMA_MASK			= ATA_MWDMA2,
62 	NV_UDMA_MASK			= ATA_UDMA6,
63 	NV_PORT0_SCR_REG_OFFSET		= 0x00,
64 	NV_PORT1_SCR_REG_OFFSET		= 0x40,
65 
66 	/* INT_STATUS/ENABLE */
67 	NV_INT_STATUS			= 0x10,
68 	NV_INT_ENABLE			= 0x11,
69 	NV_INT_STATUS_CK804		= 0x440,
70 	NV_INT_ENABLE_CK804		= 0x441,
71 
72 	/* INT_STATUS/ENABLE bits */
73 	NV_INT_DEV			= 0x01,
74 	NV_INT_PM			= 0x02,
75 	NV_INT_ADDED			= 0x04,
76 	NV_INT_REMOVED			= 0x08,
77 
78 	NV_INT_PORT_SHIFT		= 4,	/* each port occupies 4 bits */
79 
80 	NV_INT_ALL			= 0x0f,
81 	NV_INT_MASK			= NV_INT_DEV |
82 					  NV_INT_ADDED | NV_INT_REMOVED,
83 
84 	/* INT_CONFIG */
85 	NV_INT_CONFIG			= 0x12,
86 	NV_INT_CONFIG_METHD		= 0x01, // 0 = INT, 1 = SMI
87 
88 	// For PCI config register 20
89 	NV_MCP_SATA_CFG_20		= 0x50,
90 	NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
91 	NV_MCP_SATA_CFG_20_PORT0_EN	= (1 << 17),
92 	NV_MCP_SATA_CFG_20_PORT1_EN	= (1 << 16),
93 	NV_MCP_SATA_CFG_20_PORT0_PWB_EN	= (1 << 14),
94 	NV_MCP_SATA_CFG_20_PORT1_PWB_EN	= (1 << 12),
95 
96 	NV_ADMA_MAX_CPBS		= 32,
97 	NV_ADMA_CPB_SZ			= 128,
98 	NV_ADMA_APRD_SZ			= 16,
99 	NV_ADMA_SGTBL_LEN		= (1024 - NV_ADMA_CPB_SZ) /
100 					   NV_ADMA_APRD_SZ,
101 	NV_ADMA_SGTBL_TOTAL_LEN		= NV_ADMA_SGTBL_LEN + 5,
102 	NV_ADMA_SGTBL_SZ                = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 	NV_ADMA_PORT_PRIV_DMA_SZ        = NV_ADMA_MAX_CPBS *
104 					   (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
105 
106 	/* BAR5 offset to ADMA general registers */
107 	NV_ADMA_GEN			= 0x400,
108 	NV_ADMA_GEN_CTL			= 0x00,
109 	NV_ADMA_NOTIFIER_CLEAR		= 0x30,
110 
111 	/* BAR5 offset to ADMA ports */
112 	NV_ADMA_PORT			= 0x480,
113 
114 	/* size of ADMA port register space  */
115 	NV_ADMA_PORT_SIZE		= 0x100,
116 
117 	/* ADMA port registers */
118 	NV_ADMA_CTL			= 0x40,
119 	NV_ADMA_CPB_COUNT		= 0x42,
120 	NV_ADMA_NEXT_CPB_IDX		= 0x43,
121 	NV_ADMA_STAT			= 0x44,
122 	NV_ADMA_CPB_BASE_LOW		= 0x48,
123 	NV_ADMA_CPB_BASE_HIGH		= 0x4C,
124 	NV_ADMA_APPEND			= 0x50,
125 	NV_ADMA_NOTIFIER		= 0x68,
126 	NV_ADMA_NOTIFIER_ERROR		= 0x6C,
127 
128 	/* NV_ADMA_CTL register bits */
129 	NV_ADMA_CTL_HOTPLUG_IEN		= (1 << 0),
130 	NV_ADMA_CTL_CHANNEL_RESET	= (1 << 5),
131 	NV_ADMA_CTL_GO			= (1 << 7),
132 	NV_ADMA_CTL_AIEN		= (1 << 8),
133 	NV_ADMA_CTL_READ_NON_COHERENT	= (1 << 11),
134 	NV_ADMA_CTL_WRITE_NON_COHERENT	= (1 << 12),
135 
136 	/* CPB response flag bits */
137 	NV_CPB_RESP_DONE		= (1 << 0),
138 	NV_CPB_RESP_ATA_ERR		= (1 << 3),
139 	NV_CPB_RESP_CMD_ERR		= (1 << 4),
140 	NV_CPB_RESP_CPB_ERR		= (1 << 7),
141 
142 	/* CPB control flag bits */
143 	NV_CPB_CTL_CPB_VALID		= (1 << 0),
144 	NV_CPB_CTL_QUEUE		= (1 << 1),
145 	NV_CPB_CTL_APRD_VALID		= (1 << 2),
146 	NV_CPB_CTL_IEN			= (1 << 3),
147 	NV_CPB_CTL_FPDMA		= (1 << 4),
148 
149 	/* APRD flags */
150 	NV_APRD_WRITE			= (1 << 1),
151 	NV_APRD_END			= (1 << 2),
152 	NV_APRD_CONT			= (1 << 3),
153 
154 	/* NV_ADMA_STAT flags */
155 	NV_ADMA_STAT_TIMEOUT		= (1 << 0),
156 	NV_ADMA_STAT_HOTUNPLUG		= (1 << 1),
157 	NV_ADMA_STAT_HOTPLUG		= (1 << 2),
158 	NV_ADMA_STAT_CPBERR		= (1 << 4),
159 	NV_ADMA_STAT_SERROR		= (1 << 5),
160 	NV_ADMA_STAT_CMD_COMPLETE	= (1 << 6),
161 	NV_ADMA_STAT_IDLE		= (1 << 8),
162 	NV_ADMA_STAT_LEGACY		= (1 << 9),
163 	NV_ADMA_STAT_STOPPED		= (1 << 10),
164 	NV_ADMA_STAT_DONE		= (1 << 12),
165 	NV_ADMA_STAT_ERR		= NV_ADMA_STAT_CPBERR |
166 					  NV_ADMA_STAT_TIMEOUT,
167 
168 	/* port flags */
169 	NV_ADMA_PORT_REGISTER_MODE	= (1 << 0),
170 	NV_ADMA_ATAPI_SETUP_COMPLETE	= (1 << 1),
171 
172 	/* MCP55 reg offset */
173 	NV_CTL_MCP55			= 0x400,
174 	NV_INT_STATUS_MCP55		= 0x440,
175 	NV_INT_ENABLE_MCP55		= 0x444,
176 	NV_NCQ_REG_MCP55		= 0x448,
177 
178 	/* MCP55 */
179 	NV_INT_ALL_MCP55		= 0xffff,
180 	NV_INT_PORT_SHIFT_MCP55		= 16,	/* each port occupies 16 bits */
181 	NV_INT_MASK_MCP55		= NV_INT_ALL_MCP55 & 0xfffd,
182 
183 	/* SWNCQ ENABLE BITS*/
184 	NV_CTL_PRI_SWNCQ		= 0x02,
185 	NV_CTL_SEC_SWNCQ		= 0x04,
186 
187 	/* SW NCQ status bits*/
188 	NV_SWNCQ_IRQ_DEV		= (1 << 0),
189 	NV_SWNCQ_IRQ_PM			= (1 << 1),
190 	NV_SWNCQ_IRQ_ADDED		= (1 << 2),
191 	NV_SWNCQ_IRQ_REMOVED		= (1 << 3),
192 
193 	NV_SWNCQ_IRQ_BACKOUT		= (1 << 4),
194 	NV_SWNCQ_IRQ_SDBFIS		= (1 << 5),
195 	NV_SWNCQ_IRQ_DHREGFIS		= (1 << 6),
196 	NV_SWNCQ_IRQ_DMASETUP		= (1 << 7),
197 
198 	NV_SWNCQ_IRQ_HOTPLUG		= NV_SWNCQ_IRQ_ADDED |
199 					  NV_SWNCQ_IRQ_REMOVED,
200 
201 };
202 
203 /* ADMA Physical Region Descriptor - one SG segment */
204 struct nv_adma_prd {
205 	__le64			addr;
206 	__le32			len;
207 	u8			flags;
208 	u8			packet_len;
209 	__le16			reserved;
210 };
211 
212 enum nv_adma_regbits {
213 	CMDEND	= (1 << 15),		/* end of command list */
214 	WNB	= (1 << 14),		/* wait-not-BSY */
215 	IGN	= (1 << 13),		/* ignore this entry */
216 	CS1n	= (1 << (4 + 8)),	/* std. PATA signals follow... */
217 	DA2	= (1 << (2 + 8)),
218 	DA1	= (1 << (1 + 8)),
219 	DA0	= (1 << (0 + 8)),
220 };
221 
222 /* ADMA Command Parameter Block
223    The first 5 SG segments are stored inside the Command Parameter Block itself.
224    If there are more than 5 segments the remainder are stored in a separate
225    memory area indicated by next_aprd. */
226 struct nv_adma_cpb {
227 	u8			resp_flags;    /* 0 */
228 	u8			reserved1;     /* 1 */
229 	u8			ctl_flags;     /* 2 */
230 	/* len is length of taskfile in 64 bit words */
231 	u8			len;		/* 3  */
232 	u8			tag;           /* 4 */
233 	u8			next_cpb_idx;  /* 5 */
234 	__le16			reserved2;     /* 6-7 */
235 	__le16			tf[12];        /* 8-31 */
236 	struct nv_adma_prd	aprd[5];       /* 32-111 */
237 	__le64			next_aprd;     /* 112-119 */
238 	__le64			reserved3;     /* 120-127 */
239 };
240 
241 
242 struct nv_adma_port_priv {
243 	struct nv_adma_cpb	*cpb;
244 	dma_addr_t		cpb_dma;
245 	struct nv_adma_prd	*aprd;
246 	dma_addr_t		aprd_dma;
247 	void __iomem		*ctl_block;
248 	void __iomem		*gen_block;
249 	void __iomem		*notifier_clear_block;
250 	u64			adma_dma_mask;
251 	u8			flags;
252 	int			last_issue_ncq;
253 };
254 
255 struct nv_host_priv {
256 	unsigned long		type;
257 };
258 
259 struct defer_queue {
260 	u32		defer_bits;
261 	unsigned int	head;
262 	unsigned int	tail;
263 	unsigned int	tag[ATA_MAX_QUEUE];
264 };
265 
266 enum ncq_saw_flag_list {
267 	ncq_saw_d2h	= (1U << 0),
268 	ncq_saw_dmas	= (1U << 1),
269 	ncq_saw_sdb	= (1U << 2),
270 	ncq_saw_backout	= (1U << 3),
271 };
272 
273 struct nv_swncq_port_priv {
274 	struct ata_bmdma_prd *prd;	 /* our SG list */
275 	dma_addr_t	prd_dma; /* and its DMA mapping */
276 	void __iomem	*sactive_block;
277 	void __iomem	*irq_block;
278 	void __iomem	*tag_block;
279 	u32		qc_active;
280 
281 	unsigned int	last_issue_tag;
282 
283 	/* fifo circular queue to store deferral command */
284 	struct defer_queue defer_queue;
285 
286 	/* for NCQ interrupt analysis */
287 	u32		dhfis_bits;
288 	u32		dmafis_bits;
289 	u32		sdbfis_bits;
290 
291 	unsigned int	ncq_flags;
292 };
293 
294 
295 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
296 
297 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
298 #ifdef CONFIG_PM_SLEEP
299 static int nv_pci_device_resume(struct pci_dev *pdev);
300 #endif
301 static void nv_ck804_host_stop(struct ata_host *host);
302 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
303 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
304 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
305 static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
306 static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
307 
308 static int nv_hardreset(struct ata_link *link, unsigned int *class,
309 			unsigned long deadline);
310 static void nv_nf2_freeze(struct ata_port *ap);
311 static void nv_nf2_thaw(struct ata_port *ap);
312 static void nv_ck804_freeze(struct ata_port *ap);
313 static void nv_ck804_thaw(struct ata_port *ap);
314 static int nv_adma_slave_config(struct scsi_device *sdev);
315 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
316 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
317 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
318 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
319 static void nv_adma_irq_clear(struct ata_port *ap);
320 static int nv_adma_port_start(struct ata_port *ap);
321 static void nv_adma_port_stop(struct ata_port *ap);
322 #ifdef CONFIG_PM
323 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
324 static int nv_adma_port_resume(struct ata_port *ap);
325 #endif
326 static void nv_adma_freeze(struct ata_port *ap);
327 static void nv_adma_thaw(struct ata_port *ap);
328 static void nv_adma_error_handler(struct ata_port *ap);
329 static void nv_adma_host_stop(struct ata_host *host);
330 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
331 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
332 
333 static void nv_mcp55_thaw(struct ata_port *ap);
334 static void nv_mcp55_freeze(struct ata_port *ap);
335 static void nv_swncq_error_handler(struct ata_port *ap);
336 static int nv_swncq_slave_config(struct scsi_device *sdev);
337 static int nv_swncq_port_start(struct ata_port *ap);
338 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
339 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
340 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
341 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
342 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
343 #ifdef CONFIG_PM
344 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
345 static int nv_swncq_port_resume(struct ata_port *ap);
346 #endif
347 
348 enum nv_host_type
349 {
350 	GENERIC,
351 	NFORCE2,
352 	NFORCE3 = NFORCE2,	/* NF2 == NF3 as far as sata_nv is concerned */
353 	CK804,
354 	ADMA,
355 	MCP5x,
356 	SWNCQ,
357 };
358 
359 static const struct pci_device_id nv_pci_tbl[] = {
360 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
361 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
362 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
363 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
364 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
365 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
366 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
367 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
368 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
369 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
370 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
371 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
372 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
373 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
374 
375 	{ } /* terminate list */
376 };
377 
378 static struct pci_driver nv_pci_driver = {
379 	.name			= DRV_NAME,
380 	.id_table		= nv_pci_tbl,
381 	.probe			= nv_init_one,
382 #ifdef CONFIG_PM_SLEEP
383 	.suspend		= ata_pci_device_suspend,
384 	.resume			= nv_pci_device_resume,
385 #endif
386 	.remove			= ata_pci_remove_one,
387 };
388 
389 static struct scsi_host_template nv_sht = {
390 	ATA_BMDMA_SHT(DRV_NAME),
391 };
392 
393 static struct scsi_host_template nv_adma_sht = {
394 	ATA_NCQ_SHT(DRV_NAME),
395 	.can_queue		= NV_ADMA_MAX_CPBS,
396 	.sg_tablesize		= NV_ADMA_SGTBL_TOTAL_LEN,
397 	.dma_boundary		= NV_ADMA_DMA_BOUNDARY,
398 	.slave_configure	= nv_adma_slave_config,
399 };
400 
401 static struct scsi_host_template nv_swncq_sht = {
402 	ATA_NCQ_SHT(DRV_NAME),
403 	.can_queue		= ATA_MAX_QUEUE - 1,
404 	.sg_tablesize		= LIBATA_MAX_PRD,
405 	.dma_boundary		= ATA_DMA_BOUNDARY,
406 	.slave_configure	= nv_swncq_slave_config,
407 };
408 
409 /*
410  * NV SATA controllers have various different problems with hardreset
411  * protocol depending on the specific controller and device.
412  *
413  * GENERIC:
414  *
415  *  bko11195 reports that link doesn't come online after hardreset on
416  *  generic nv's and there have been several other similar reports on
417  *  linux-ide.
418  *
419  *  bko12351#c23 reports that warmplug on MCP61 doesn't work with
420  *  softreset.
421  *
422  * NF2/3:
423  *
424  *  bko3352 reports nf2/3 controllers can't determine device signature
425  *  reliably after hardreset.  The following thread reports detection
426  *  failure on cold boot with the standard debouncing timing.
427  *
428  *  http://thread.gmane.org/gmane.linux.ide/34098
429  *
430  *  bko12176 reports that hardreset fails to bring up the link during
431  *  boot on nf2.
432  *
433  * CK804:
434  *
435  *  For initial probing after boot and hot plugging, hardreset mostly
436  *  works fine on CK804 but curiously, reprobing on the initial port
437  *  by rescanning or rmmod/insmod fails to acquire the initial D2H Reg
438  *  FIS in somewhat undeterministic way.
439  *
440  * SWNCQ:
441  *
442  *  bko12351 reports that when SWNCQ is enabled, for hotplug to work,
443  *  hardreset should be used and hardreset can't report proper
444  *  signature, which suggests that mcp5x is closer to nf2 as long as
445  *  reset quirkiness is concerned.
446  *
447  *  bko12703 reports that boot probing fails for intel SSD with
448  *  hardreset.  Link fails to come online.  Softreset works fine.
449  *
450  * The failures are varied but the following patterns seem true for
451  * all flavors.
452  *
453  * - Softreset during boot always works.
454  *
455  * - Hardreset during boot sometimes fails to bring up the link on
456  *   certain comibnations and device signature acquisition is
457  *   unreliable.
458  *
459  * - Hardreset is often necessary after hotplug.
460  *
461  * So, preferring softreset for boot probing and error handling (as
462  * hardreset might bring down the link) but using hardreset for
463  * post-boot probing should work around the above issues in most
464  * cases.  Define nv_hardreset() which only kicks in for post-boot
465  * probing and use it for all variants.
466  */
467 static struct ata_port_operations nv_generic_ops = {
468 	.inherits		= &ata_bmdma_port_ops,
469 	.lost_interrupt		= ATA_OP_NULL,
470 	.scr_read		= nv_scr_read,
471 	.scr_write		= nv_scr_write,
472 	.hardreset		= nv_hardreset,
473 };
474 
475 static struct ata_port_operations nv_nf2_ops = {
476 	.inherits		= &nv_generic_ops,
477 	.freeze			= nv_nf2_freeze,
478 	.thaw			= nv_nf2_thaw,
479 };
480 
481 static struct ata_port_operations nv_ck804_ops = {
482 	.inherits		= &nv_generic_ops,
483 	.freeze			= nv_ck804_freeze,
484 	.thaw			= nv_ck804_thaw,
485 	.host_stop		= nv_ck804_host_stop,
486 };
487 
488 static struct ata_port_operations nv_adma_ops = {
489 	.inherits		= &nv_ck804_ops,
490 
491 	.check_atapi_dma	= nv_adma_check_atapi_dma,
492 	.sff_tf_read		= nv_adma_tf_read,
493 	.qc_defer		= ata_std_qc_defer,
494 	.qc_prep		= nv_adma_qc_prep,
495 	.qc_issue		= nv_adma_qc_issue,
496 	.sff_irq_clear		= nv_adma_irq_clear,
497 
498 	.freeze			= nv_adma_freeze,
499 	.thaw			= nv_adma_thaw,
500 	.error_handler		= nv_adma_error_handler,
501 	.post_internal_cmd	= nv_adma_post_internal_cmd,
502 
503 	.port_start		= nv_adma_port_start,
504 	.port_stop		= nv_adma_port_stop,
505 #ifdef CONFIG_PM
506 	.port_suspend		= nv_adma_port_suspend,
507 	.port_resume		= nv_adma_port_resume,
508 #endif
509 	.host_stop		= nv_adma_host_stop,
510 };
511 
512 static struct ata_port_operations nv_swncq_ops = {
513 	.inherits		= &nv_generic_ops,
514 
515 	.qc_defer		= ata_std_qc_defer,
516 	.qc_prep		= nv_swncq_qc_prep,
517 	.qc_issue		= nv_swncq_qc_issue,
518 
519 	.freeze			= nv_mcp55_freeze,
520 	.thaw			= nv_mcp55_thaw,
521 	.error_handler		= nv_swncq_error_handler,
522 
523 #ifdef CONFIG_PM
524 	.port_suspend		= nv_swncq_port_suspend,
525 	.port_resume		= nv_swncq_port_resume,
526 #endif
527 	.port_start		= nv_swncq_port_start,
528 };
529 
530 struct nv_pi_priv {
531 	irq_handler_t			irq_handler;
532 	struct scsi_host_template	*sht;
533 };
534 
535 #define NV_PI_PRIV(_irq_handler, _sht) \
536 	&(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
537 
538 static const struct ata_port_info nv_port_info[] = {
539 	/* generic */
540 	{
541 		.flags		= ATA_FLAG_SATA,
542 		.pio_mask	= NV_PIO_MASK,
543 		.mwdma_mask	= NV_MWDMA_MASK,
544 		.udma_mask	= NV_UDMA_MASK,
545 		.port_ops	= &nv_generic_ops,
546 		.private_data	= NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
547 	},
548 	/* nforce2/3 */
549 	{
550 		.flags		= ATA_FLAG_SATA,
551 		.pio_mask	= NV_PIO_MASK,
552 		.mwdma_mask	= NV_MWDMA_MASK,
553 		.udma_mask	= NV_UDMA_MASK,
554 		.port_ops	= &nv_nf2_ops,
555 		.private_data	= NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
556 	},
557 	/* ck804 */
558 	{
559 		.flags		= ATA_FLAG_SATA,
560 		.pio_mask	= NV_PIO_MASK,
561 		.mwdma_mask	= NV_MWDMA_MASK,
562 		.udma_mask	= NV_UDMA_MASK,
563 		.port_ops	= &nv_ck804_ops,
564 		.private_data	= NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
565 	},
566 	/* ADMA */
567 	{
568 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NCQ,
569 		.pio_mask	= NV_PIO_MASK,
570 		.mwdma_mask	= NV_MWDMA_MASK,
571 		.udma_mask	= NV_UDMA_MASK,
572 		.port_ops	= &nv_adma_ops,
573 		.private_data	= NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
574 	},
575 	/* MCP5x */
576 	{
577 		.flags		= ATA_FLAG_SATA,
578 		.pio_mask	= NV_PIO_MASK,
579 		.mwdma_mask	= NV_MWDMA_MASK,
580 		.udma_mask	= NV_UDMA_MASK,
581 		.port_ops	= &nv_generic_ops,
582 		.private_data	= NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
583 	},
584 	/* SWNCQ */
585 	{
586 		.flags	        = ATA_FLAG_SATA | ATA_FLAG_NCQ,
587 		.pio_mask	= NV_PIO_MASK,
588 		.mwdma_mask	= NV_MWDMA_MASK,
589 		.udma_mask	= NV_UDMA_MASK,
590 		.port_ops	= &nv_swncq_ops,
591 		.private_data	= NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
592 	},
593 };
594 
595 MODULE_AUTHOR("NVIDIA");
596 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
597 MODULE_LICENSE("GPL");
598 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
599 MODULE_VERSION(DRV_VERSION);
600 
601 static bool adma_enabled;
602 static bool swncq_enabled = true;
603 static bool msi_enabled;
604 
605 static void nv_adma_register_mode(struct ata_port *ap)
606 {
607 	struct nv_adma_port_priv *pp = ap->private_data;
608 	void __iomem *mmio = pp->ctl_block;
609 	u16 tmp, status;
610 	int count = 0;
611 
612 	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
613 		return;
614 
615 	status = readw(mmio + NV_ADMA_STAT);
616 	while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
617 		ndelay(50);
618 		status = readw(mmio + NV_ADMA_STAT);
619 		count++;
620 	}
621 	if (count == 20)
622 		ata_port_warn(ap, "timeout waiting for ADMA IDLE, stat=0x%hx\n",
623 			      status);
624 
625 	tmp = readw(mmio + NV_ADMA_CTL);
626 	writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
627 
628 	count = 0;
629 	status = readw(mmio + NV_ADMA_STAT);
630 	while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
631 		ndelay(50);
632 		status = readw(mmio + NV_ADMA_STAT);
633 		count++;
634 	}
635 	if (count == 20)
636 		ata_port_warn(ap,
637 			      "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
638 			      status);
639 
640 	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
641 }
642 
643 static void nv_adma_mode(struct ata_port *ap)
644 {
645 	struct nv_adma_port_priv *pp = ap->private_data;
646 	void __iomem *mmio = pp->ctl_block;
647 	u16 tmp, status;
648 	int count = 0;
649 
650 	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
651 		return;
652 
653 	WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
654 
655 	tmp = readw(mmio + NV_ADMA_CTL);
656 	writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
657 
658 	status = readw(mmio + NV_ADMA_STAT);
659 	while (((status & NV_ADMA_STAT_LEGACY) ||
660 	      !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
661 		ndelay(50);
662 		status = readw(mmio + NV_ADMA_STAT);
663 		count++;
664 	}
665 	if (count == 20)
666 		ata_port_warn(ap,
667 			"timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
668 			status);
669 
670 	pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
671 }
672 
673 static int nv_adma_slave_config(struct scsi_device *sdev)
674 {
675 	struct ata_port *ap = ata_shost_to_port(sdev->host);
676 	struct nv_adma_port_priv *pp = ap->private_data;
677 	struct nv_adma_port_priv *port0, *port1;
678 	struct scsi_device *sdev0, *sdev1;
679 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
680 	unsigned long segment_boundary, flags;
681 	unsigned short sg_tablesize;
682 	int rc;
683 	int adma_enable;
684 	u32 current_reg, new_reg, config_mask;
685 
686 	rc = ata_scsi_slave_config(sdev);
687 
688 	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
689 		/* Not a proper libata device, ignore */
690 		return rc;
691 
692 	spin_lock_irqsave(ap->lock, flags);
693 
694 	if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
695 		/*
696 		 * NVIDIA reports that ADMA mode does not support ATAPI commands.
697 		 * Therefore ATAPI commands are sent through the legacy interface.
698 		 * However, the legacy interface only supports 32-bit DMA.
699 		 * Restrict DMA parameters as required by the legacy interface
700 		 * when an ATAPI device is connected.
701 		 */
702 		segment_boundary = ATA_DMA_BOUNDARY;
703 		/* Subtract 1 since an extra entry may be needed for padding, see
704 		   libata-scsi.c */
705 		sg_tablesize = LIBATA_MAX_PRD - 1;
706 
707 		/* Since the legacy DMA engine is in use, we need to disable ADMA
708 		   on the port. */
709 		adma_enable = 0;
710 		nv_adma_register_mode(ap);
711 	} else {
712 		segment_boundary = NV_ADMA_DMA_BOUNDARY;
713 		sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
714 		adma_enable = 1;
715 	}
716 
717 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
718 
719 	if (ap->port_no == 1)
720 		config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
721 			      NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
722 	else
723 		config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
724 			      NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
725 
726 	if (adma_enable) {
727 		new_reg = current_reg | config_mask;
728 		pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
729 	} else {
730 		new_reg = current_reg & ~config_mask;
731 		pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
732 	}
733 
734 	if (current_reg != new_reg)
735 		pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
736 
737 	port0 = ap->host->ports[0]->private_data;
738 	port1 = ap->host->ports[1]->private_data;
739 	sdev0 = ap->host->ports[0]->link.device[0].sdev;
740 	sdev1 = ap->host->ports[1]->link.device[0].sdev;
741 	if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
742 	    (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
743 		/*
744 		 * We have to set the DMA mask to 32-bit if either port is in
745 		 * ATAPI mode, since they are on the same PCI device which is
746 		 * used for DMA mapping.  If either SCSI device is not allocated
747 		 * yet, it's OK since that port will discover its correct
748 		 * setting when it does get allocated.
749 		 */
750 		rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
751 	} else {
752 		rc = dma_set_mask(&pdev->dev, pp->adma_dma_mask);
753 	}
754 
755 	blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
756 	blk_queue_max_segments(sdev->request_queue, sg_tablesize);
757 	ata_port_info(ap,
758 		      "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
759 		      (unsigned long long)*ap->host->dev->dma_mask,
760 		      segment_boundary, sg_tablesize);
761 
762 	spin_unlock_irqrestore(ap->lock, flags);
763 
764 	return rc;
765 }
766 
767 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
768 {
769 	struct nv_adma_port_priv *pp = qc->ap->private_data;
770 	return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
771 }
772 
773 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
774 {
775 	/* Other than when internal or pass-through commands are executed,
776 	   the only time this function will be called in ADMA mode will be
777 	   if a command fails. In the failure case we don't care about going
778 	   into register mode with ADMA commands pending, as the commands will
779 	   all shortly be aborted anyway. We assume that NCQ commands are not
780 	   issued via passthrough, which is the only way that switching into
781 	   ADMA mode could abort outstanding commands. */
782 	nv_adma_register_mode(ap);
783 
784 	ata_sff_tf_read(ap, tf);
785 }
786 
787 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
788 {
789 	unsigned int idx = 0;
790 
791 	if (tf->flags & ATA_TFLAG_ISADDR) {
792 		if (tf->flags & ATA_TFLAG_LBA48) {
793 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR   << 8) | tf->hob_feature | WNB);
794 			cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
795 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAL  << 8) | tf->hob_lbal);
796 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAM  << 8) | tf->hob_lbam);
797 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAH  << 8) | tf->hob_lbah);
798 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature);
799 		} else
800 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature | WNB);
801 
802 		cpb[idx++] = cpu_to_le16((ATA_REG_NSECT  << 8) | tf->nsect);
803 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAL   << 8) | tf->lbal);
804 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAM   << 8) | tf->lbam);
805 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAH   << 8) | tf->lbah);
806 	}
807 
808 	if (tf->flags & ATA_TFLAG_DEVICE)
809 		cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
810 
811 	cpb[idx++] = cpu_to_le16((ATA_REG_CMD    << 8) | tf->command | CMDEND);
812 
813 	while (idx < 12)
814 		cpb[idx++] = cpu_to_le16(IGN);
815 
816 	return idx;
817 }
818 
819 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
820 {
821 	struct nv_adma_port_priv *pp = ap->private_data;
822 	u8 flags = pp->cpb[cpb_num].resp_flags;
823 
824 	VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
825 
826 	if (unlikely((force_err ||
827 		     flags & (NV_CPB_RESP_ATA_ERR |
828 			      NV_CPB_RESP_CMD_ERR |
829 			      NV_CPB_RESP_CPB_ERR)))) {
830 		struct ata_eh_info *ehi = &ap->link.eh_info;
831 		int freeze = 0;
832 
833 		ata_ehi_clear_desc(ehi);
834 		__ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
835 		if (flags & NV_CPB_RESP_ATA_ERR) {
836 			ata_ehi_push_desc(ehi, "ATA error");
837 			ehi->err_mask |= AC_ERR_DEV;
838 		} else if (flags & NV_CPB_RESP_CMD_ERR) {
839 			ata_ehi_push_desc(ehi, "CMD error");
840 			ehi->err_mask |= AC_ERR_DEV;
841 		} else if (flags & NV_CPB_RESP_CPB_ERR) {
842 			ata_ehi_push_desc(ehi, "CPB error");
843 			ehi->err_mask |= AC_ERR_SYSTEM;
844 			freeze = 1;
845 		} else {
846 			/* notifier error, but no error in CPB flags? */
847 			ata_ehi_push_desc(ehi, "unknown");
848 			ehi->err_mask |= AC_ERR_OTHER;
849 			freeze = 1;
850 		}
851 		/* Kill all commands. EH will determine what actually failed. */
852 		if (freeze)
853 			ata_port_freeze(ap);
854 		else
855 			ata_port_abort(ap);
856 		return -1;
857 	}
858 
859 	if (likely(flags & NV_CPB_RESP_DONE))
860 		return 1;
861 	return 0;
862 }
863 
864 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
865 {
866 	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
867 
868 	/* freeze if hotplugged */
869 	if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
870 		ata_port_freeze(ap);
871 		return 1;
872 	}
873 
874 	/* bail out if not our interrupt */
875 	if (!(irq_stat & NV_INT_DEV))
876 		return 0;
877 
878 	/* DEV interrupt w/ no active qc? */
879 	if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
880 		ata_sff_check_status(ap);
881 		return 1;
882 	}
883 
884 	/* handle interrupt */
885 	return ata_bmdma_port_intr(ap, qc);
886 }
887 
888 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
889 {
890 	struct ata_host *host = dev_instance;
891 	int i, handled = 0;
892 	u32 notifier_clears[2];
893 
894 	spin_lock(&host->lock);
895 
896 	for (i = 0; i < host->n_ports; i++) {
897 		struct ata_port *ap = host->ports[i];
898 		struct nv_adma_port_priv *pp = ap->private_data;
899 		void __iomem *mmio = pp->ctl_block;
900 		u16 status;
901 		u32 gen_ctl;
902 		u32 notifier, notifier_error;
903 
904 		notifier_clears[i] = 0;
905 
906 		/* if ADMA is disabled, use standard ata interrupt handler */
907 		if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
908 			u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
909 				>> (NV_INT_PORT_SHIFT * i);
910 			handled += nv_host_intr(ap, irq_stat);
911 			continue;
912 		}
913 
914 		/* if in ATA register mode, check for standard interrupts */
915 		if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
916 			u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
917 				>> (NV_INT_PORT_SHIFT * i);
918 			if (ata_tag_valid(ap->link.active_tag))
919 				/** NV_INT_DEV indication seems unreliable
920 				    at times at least in ADMA mode. Force it
921 				    on always when a command is active, to
922 				    prevent losing interrupts. */
923 				irq_stat |= NV_INT_DEV;
924 			handled += nv_host_intr(ap, irq_stat);
925 		}
926 
927 		notifier = readl(mmio + NV_ADMA_NOTIFIER);
928 		notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
929 		notifier_clears[i] = notifier | notifier_error;
930 
931 		gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
932 
933 		if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
934 		    !notifier_error)
935 			/* Nothing to do */
936 			continue;
937 
938 		status = readw(mmio + NV_ADMA_STAT);
939 
940 		/*
941 		 * Clear status. Ensure the controller sees the
942 		 * clearing before we start looking at any of the CPB
943 		 * statuses, so that any CPB completions after this
944 		 * point in the handler will raise another interrupt.
945 		 */
946 		writew(status, mmio + NV_ADMA_STAT);
947 		readw(mmio + NV_ADMA_STAT); /* flush posted write */
948 		rmb();
949 
950 		handled++; /* irq handled if we got here */
951 
952 		/* freeze if hotplugged or controller error */
953 		if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
954 				       NV_ADMA_STAT_HOTUNPLUG |
955 				       NV_ADMA_STAT_TIMEOUT |
956 				       NV_ADMA_STAT_SERROR))) {
957 			struct ata_eh_info *ehi = &ap->link.eh_info;
958 
959 			ata_ehi_clear_desc(ehi);
960 			__ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
961 			if (status & NV_ADMA_STAT_TIMEOUT) {
962 				ehi->err_mask |= AC_ERR_SYSTEM;
963 				ata_ehi_push_desc(ehi, "timeout");
964 			} else if (status & NV_ADMA_STAT_HOTPLUG) {
965 				ata_ehi_hotplugged(ehi);
966 				ata_ehi_push_desc(ehi, "hotplug");
967 			} else if (status & NV_ADMA_STAT_HOTUNPLUG) {
968 				ata_ehi_hotplugged(ehi);
969 				ata_ehi_push_desc(ehi, "hot unplug");
970 			} else if (status & NV_ADMA_STAT_SERROR) {
971 				/* let EH analyze SError and figure out cause */
972 				ata_ehi_push_desc(ehi, "SError");
973 			} else
974 				ata_ehi_push_desc(ehi, "unknown");
975 			ata_port_freeze(ap);
976 			continue;
977 		}
978 
979 		if (status & (NV_ADMA_STAT_DONE |
980 			      NV_ADMA_STAT_CPBERR |
981 			      NV_ADMA_STAT_CMD_COMPLETE)) {
982 			u32 check_commands = notifier_clears[i];
983 			u32 done_mask = 0;
984 			int pos, rc;
985 
986 			if (status & NV_ADMA_STAT_CPBERR) {
987 				/* check all active commands */
988 				if (ata_tag_valid(ap->link.active_tag))
989 					check_commands = 1 <<
990 						ap->link.active_tag;
991 				else
992 					check_commands = ap->link.sactive;
993 			}
994 
995 			/* check CPBs for completed commands */
996 			while ((pos = ffs(check_commands))) {
997 				pos--;
998 				rc = nv_adma_check_cpb(ap, pos,
999 						notifier_error & (1 << pos));
1000 				if (rc > 0)
1001 					done_mask |= 1 << pos;
1002 				else if (unlikely(rc < 0))
1003 					check_commands = 0;
1004 				check_commands &= ~(1 << pos);
1005 			}
1006 			ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
1007 		}
1008 	}
1009 
1010 	if (notifier_clears[0] || notifier_clears[1]) {
1011 		/* Note: Both notifier clear registers must be written
1012 		   if either is set, even if one is zero, according to NVIDIA. */
1013 		struct nv_adma_port_priv *pp = host->ports[0]->private_data;
1014 		writel(notifier_clears[0], pp->notifier_clear_block);
1015 		pp = host->ports[1]->private_data;
1016 		writel(notifier_clears[1], pp->notifier_clear_block);
1017 	}
1018 
1019 	spin_unlock(&host->lock);
1020 
1021 	return IRQ_RETVAL(handled);
1022 }
1023 
1024 static void nv_adma_freeze(struct ata_port *ap)
1025 {
1026 	struct nv_adma_port_priv *pp = ap->private_data;
1027 	void __iomem *mmio = pp->ctl_block;
1028 	u16 tmp;
1029 
1030 	nv_ck804_freeze(ap);
1031 
1032 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1033 		return;
1034 
1035 	/* clear any outstanding CK804 notifications */
1036 	writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1037 		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1038 
1039 	/* Disable interrupt */
1040 	tmp = readw(mmio + NV_ADMA_CTL);
1041 	writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1042 		mmio + NV_ADMA_CTL);
1043 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1044 }
1045 
1046 static void nv_adma_thaw(struct ata_port *ap)
1047 {
1048 	struct nv_adma_port_priv *pp = ap->private_data;
1049 	void __iomem *mmio = pp->ctl_block;
1050 	u16 tmp;
1051 
1052 	nv_ck804_thaw(ap);
1053 
1054 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1055 		return;
1056 
1057 	/* Enable interrupt */
1058 	tmp = readw(mmio + NV_ADMA_CTL);
1059 	writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1060 		mmio + NV_ADMA_CTL);
1061 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1062 }
1063 
1064 static void nv_adma_irq_clear(struct ata_port *ap)
1065 {
1066 	struct nv_adma_port_priv *pp = ap->private_data;
1067 	void __iomem *mmio = pp->ctl_block;
1068 	u32 notifier_clears[2];
1069 
1070 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1071 		ata_bmdma_irq_clear(ap);
1072 		return;
1073 	}
1074 
1075 	/* clear any outstanding CK804 notifications */
1076 	writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1077 		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1078 
1079 	/* clear ADMA status */
1080 	writew(0xffff, mmio + NV_ADMA_STAT);
1081 
1082 	/* clear notifiers - note both ports need to be written with
1083 	   something even though we are only clearing on one */
1084 	if (ap->port_no == 0) {
1085 		notifier_clears[0] = 0xFFFFFFFF;
1086 		notifier_clears[1] = 0;
1087 	} else {
1088 		notifier_clears[0] = 0;
1089 		notifier_clears[1] = 0xFFFFFFFF;
1090 	}
1091 	pp = ap->host->ports[0]->private_data;
1092 	writel(notifier_clears[0], pp->notifier_clear_block);
1093 	pp = ap->host->ports[1]->private_data;
1094 	writel(notifier_clears[1], pp->notifier_clear_block);
1095 }
1096 
1097 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1098 {
1099 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1100 
1101 	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1102 		ata_bmdma_post_internal_cmd(qc);
1103 }
1104 
1105 static int nv_adma_port_start(struct ata_port *ap)
1106 {
1107 	struct device *dev = ap->host->dev;
1108 	struct nv_adma_port_priv *pp;
1109 	int rc;
1110 	void *mem;
1111 	dma_addr_t mem_dma;
1112 	void __iomem *mmio;
1113 	struct pci_dev *pdev = to_pci_dev(dev);
1114 	u16 tmp;
1115 
1116 	VPRINTK("ENTER\n");
1117 
1118 	/*
1119 	 * Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1120 	 * pad buffers.
1121 	 */
1122 	rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1123 	if (rc)
1124 		return rc;
1125 
1126 	/* we might fallback to bmdma, allocate bmdma resources */
1127 	rc = ata_bmdma_port_start(ap);
1128 	if (rc)
1129 		return rc;
1130 
1131 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1132 	if (!pp)
1133 		return -ENOMEM;
1134 
1135 	mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1136 	       ap->port_no * NV_ADMA_PORT_SIZE;
1137 	pp->ctl_block = mmio;
1138 	pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1139 	pp->notifier_clear_block = pp->gen_block +
1140 	       NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1141 
1142 	/*
1143 	 * Now that the legacy PRD and padding buffer are allocated we can
1144 	 * try to raise the DMA mask to allocate the CPB/APRD table.
1145 	 */
1146 	rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1147 	if (rc) {
1148 		rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1149 		if (rc)
1150 			return rc;
1151 	}
1152 	pp->adma_dma_mask = *dev->dma_mask;
1153 
1154 	mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1155 				  &mem_dma, GFP_KERNEL);
1156 	if (!mem)
1157 		return -ENOMEM;
1158 	memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1159 
1160 	/*
1161 	 * First item in chunk of DMA memory:
1162 	 * 128-byte command parameter block (CPB)
1163 	 * one for each command tag
1164 	 */
1165 	pp->cpb     = mem;
1166 	pp->cpb_dma = mem_dma;
1167 
1168 	writel(mem_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
1169 	writel((mem_dma >> 16) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
1170 
1171 	mem     += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1172 	mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1173 
1174 	/*
1175 	 * Second item: block of ADMA_SGTBL_LEN s/g entries
1176 	 */
1177 	pp->aprd = mem;
1178 	pp->aprd_dma = mem_dma;
1179 
1180 	ap->private_data = pp;
1181 
1182 	/* clear any outstanding interrupt conditions */
1183 	writew(0xffff, mmio + NV_ADMA_STAT);
1184 
1185 	/* initialize port variables */
1186 	pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1187 
1188 	/* clear CPB fetch count */
1189 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1190 
1191 	/* clear GO for register mode, enable interrupt */
1192 	tmp = readw(mmio + NV_ADMA_CTL);
1193 	writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1194 		NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1195 
1196 	tmp = readw(mmio + NV_ADMA_CTL);
1197 	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1198 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1199 	udelay(1);
1200 	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1201 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1202 
1203 	return 0;
1204 }
1205 
1206 static void nv_adma_port_stop(struct ata_port *ap)
1207 {
1208 	struct nv_adma_port_priv *pp = ap->private_data;
1209 	void __iomem *mmio = pp->ctl_block;
1210 
1211 	VPRINTK("ENTER\n");
1212 	writew(0, mmio + NV_ADMA_CTL);
1213 }
1214 
1215 #ifdef CONFIG_PM
1216 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1217 {
1218 	struct nv_adma_port_priv *pp = ap->private_data;
1219 	void __iomem *mmio = pp->ctl_block;
1220 
1221 	/* Go to register mode - clears GO */
1222 	nv_adma_register_mode(ap);
1223 
1224 	/* clear CPB fetch count */
1225 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1226 
1227 	/* disable interrupt, shut down port */
1228 	writew(0, mmio + NV_ADMA_CTL);
1229 
1230 	return 0;
1231 }
1232 
1233 static int nv_adma_port_resume(struct ata_port *ap)
1234 {
1235 	struct nv_adma_port_priv *pp = ap->private_data;
1236 	void __iomem *mmio = pp->ctl_block;
1237 	u16 tmp;
1238 
1239 	/* set CPB block location */
1240 	writel(pp->cpb_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
1241 	writel((pp->cpb_dma >> 16) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
1242 
1243 	/* clear any outstanding interrupt conditions */
1244 	writew(0xffff, mmio + NV_ADMA_STAT);
1245 
1246 	/* initialize port variables */
1247 	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1248 
1249 	/* clear CPB fetch count */
1250 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1251 
1252 	/* clear GO for register mode, enable interrupt */
1253 	tmp = readw(mmio + NV_ADMA_CTL);
1254 	writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1255 		NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1256 
1257 	tmp = readw(mmio + NV_ADMA_CTL);
1258 	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1259 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1260 	udelay(1);
1261 	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1262 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1263 
1264 	return 0;
1265 }
1266 #endif
1267 
1268 static void nv_adma_setup_port(struct ata_port *ap)
1269 {
1270 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1271 	struct ata_ioports *ioport = &ap->ioaddr;
1272 
1273 	VPRINTK("ENTER\n");
1274 
1275 	mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1276 
1277 	ioport->cmd_addr	= mmio;
1278 	ioport->data_addr	= mmio + (ATA_REG_DATA * 4);
1279 	ioport->error_addr	=
1280 	ioport->feature_addr	= mmio + (ATA_REG_ERR * 4);
1281 	ioport->nsect_addr	= mmio + (ATA_REG_NSECT * 4);
1282 	ioport->lbal_addr	= mmio + (ATA_REG_LBAL * 4);
1283 	ioport->lbam_addr	= mmio + (ATA_REG_LBAM * 4);
1284 	ioport->lbah_addr	= mmio + (ATA_REG_LBAH * 4);
1285 	ioport->device_addr	= mmio + (ATA_REG_DEVICE * 4);
1286 	ioport->status_addr	=
1287 	ioport->command_addr	= mmio + (ATA_REG_STATUS * 4);
1288 	ioport->altstatus_addr	=
1289 	ioport->ctl_addr	= mmio + 0x20;
1290 }
1291 
1292 static int nv_adma_host_init(struct ata_host *host)
1293 {
1294 	struct pci_dev *pdev = to_pci_dev(host->dev);
1295 	unsigned int i;
1296 	u32 tmp32;
1297 
1298 	VPRINTK("ENTER\n");
1299 
1300 	/* enable ADMA on the ports */
1301 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1302 	tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1303 		 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1304 		 NV_MCP_SATA_CFG_20_PORT1_EN |
1305 		 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1306 
1307 	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1308 
1309 	for (i = 0; i < host->n_ports; i++)
1310 		nv_adma_setup_port(host->ports[i]);
1311 
1312 	return 0;
1313 }
1314 
1315 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1316 			      struct scatterlist *sg,
1317 			      int idx,
1318 			      struct nv_adma_prd *aprd)
1319 {
1320 	u8 flags = 0;
1321 	if (qc->tf.flags & ATA_TFLAG_WRITE)
1322 		flags |= NV_APRD_WRITE;
1323 	if (idx == qc->n_elem - 1)
1324 		flags |= NV_APRD_END;
1325 	else if (idx != 4)
1326 		flags |= NV_APRD_CONT;
1327 
1328 	aprd->addr  = cpu_to_le64(((u64)sg_dma_address(sg)));
1329 	aprd->len   = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1330 	aprd->flags = flags;
1331 	aprd->packet_len = 0;
1332 }
1333 
1334 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1335 {
1336 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1337 	struct nv_adma_prd *aprd;
1338 	struct scatterlist *sg;
1339 	unsigned int si;
1340 
1341 	VPRINTK("ENTER\n");
1342 
1343 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
1344 		aprd = (si < 5) ? &cpb->aprd[si] :
1345 			&pp->aprd[NV_ADMA_SGTBL_LEN * qc->hw_tag + (si-5)];
1346 		nv_adma_fill_aprd(qc, sg, si, aprd);
1347 	}
1348 	if (si > 5)
1349 		cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->hw_tag)));
1350 	else
1351 		cpb->next_aprd = cpu_to_le64(0);
1352 }
1353 
1354 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1355 {
1356 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1357 
1358 	/* ADMA engine can only be used for non-ATAPI DMA commands,
1359 	   or interrupt-driven no-data commands. */
1360 	if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1361 	   (qc->tf.flags & ATA_TFLAG_POLLING))
1362 		return 1;
1363 
1364 	if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1365 	   (qc->tf.protocol == ATA_PROT_NODATA))
1366 		return 0;
1367 
1368 	return 1;
1369 }
1370 
1371 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1372 {
1373 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1374 	struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag];
1375 	u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1376 		       NV_CPB_CTL_IEN;
1377 
1378 	if (nv_adma_use_reg_mode(qc)) {
1379 		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1380 			(qc->flags & ATA_QCFLAG_DMAMAP));
1381 		nv_adma_register_mode(qc->ap);
1382 		ata_bmdma_qc_prep(qc);
1383 		return;
1384 	}
1385 
1386 	cpb->resp_flags = NV_CPB_RESP_DONE;
1387 	wmb();
1388 	cpb->ctl_flags = 0;
1389 	wmb();
1390 
1391 	cpb->len		= 3;
1392 	cpb->tag		= qc->hw_tag;
1393 	cpb->next_cpb_idx	= 0;
1394 
1395 	/* turn on NCQ flags for NCQ commands */
1396 	if (qc->tf.protocol == ATA_PROT_NCQ)
1397 		ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1398 
1399 	VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1400 
1401 	nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1402 
1403 	if (qc->flags & ATA_QCFLAG_DMAMAP) {
1404 		nv_adma_fill_sg(qc, cpb);
1405 		ctl_flags |= NV_CPB_CTL_APRD_VALID;
1406 	} else
1407 		memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1408 
1409 	/* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1410 	   until we are finished filling in all of the contents */
1411 	wmb();
1412 	cpb->ctl_flags = ctl_flags;
1413 	wmb();
1414 	cpb->resp_flags = 0;
1415 }
1416 
1417 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1418 {
1419 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1420 	void __iomem *mmio = pp->ctl_block;
1421 	int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1422 
1423 	VPRINTK("ENTER\n");
1424 
1425 	/* We can't handle result taskfile with NCQ commands, since
1426 	   retrieving the taskfile switches us out of ADMA mode and would abort
1427 	   existing commands. */
1428 	if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1429 		     (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1430 		ata_dev_err(qc->dev, "NCQ w/ RESULT_TF not allowed\n");
1431 		return AC_ERR_SYSTEM;
1432 	}
1433 
1434 	if (nv_adma_use_reg_mode(qc)) {
1435 		/* use ATA register mode */
1436 		VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1437 		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1438 			(qc->flags & ATA_QCFLAG_DMAMAP));
1439 		nv_adma_register_mode(qc->ap);
1440 		return ata_bmdma_qc_issue(qc);
1441 	} else
1442 		nv_adma_mode(qc->ap);
1443 
1444 	/* write append register, command tag in lower 8 bits
1445 	   and (number of cpbs to append -1) in top 8 bits */
1446 	wmb();
1447 
1448 	if (curr_ncq != pp->last_issue_ncq) {
1449 		/* Seems to need some delay before switching between NCQ and
1450 		   non-NCQ commands, else we get command timeouts and such. */
1451 		udelay(20);
1452 		pp->last_issue_ncq = curr_ncq;
1453 	}
1454 
1455 	writew(qc->hw_tag, mmio + NV_ADMA_APPEND);
1456 
1457 	DPRINTK("Issued tag %u\n", qc->hw_tag);
1458 
1459 	return 0;
1460 }
1461 
1462 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1463 {
1464 	struct ata_host *host = dev_instance;
1465 	unsigned int i;
1466 	unsigned int handled = 0;
1467 	unsigned long flags;
1468 
1469 	spin_lock_irqsave(&host->lock, flags);
1470 
1471 	for (i = 0; i < host->n_ports; i++) {
1472 		struct ata_port *ap = host->ports[i];
1473 		struct ata_queued_cmd *qc;
1474 
1475 		qc = ata_qc_from_tag(ap, ap->link.active_tag);
1476 		if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
1477 			handled += ata_bmdma_port_intr(ap, qc);
1478 		} else {
1479 			/*
1480 			 * No request pending?  Clear interrupt status
1481 			 * anyway, in case there's one pending.
1482 			 */
1483 			ap->ops->sff_check_status(ap);
1484 		}
1485 	}
1486 
1487 	spin_unlock_irqrestore(&host->lock, flags);
1488 
1489 	return IRQ_RETVAL(handled);
1490 }
1491 
1492 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1493 {
1494 	int i, handled = 0;
1495 
1496 	for (i = 0; i < host->n_ports; i++) {
1497 		handled += nv_host_intr(host->ports[i], irq_stat);
1498 		irq_stat >>= NV_INT_PORT_SHIFT;
1499 	}
1500 
1501 	return IRQ_RETVAL(handled);
1502 }
1503 
1504 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1505 {
1506 	struct ata_host *host = dev_instance;
1507 	u8 irq_stat;
1508 	irqreturn_t ret;
1509 
1510 	spin_lock(&host->lock);
1511 	irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1512 	ret = nv_do_interrupt(host, irq_stat);
1513 	spin_unlock(&host->lock);
1514 
1515 	return ret;
1516 }
1517 
1518 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1519 {
1520 	struct ata_host *host = dev_instance;
1521 	u8 irq_stat;
1522 	irqreturn_t ret;
1523 
1524 	spin_lock(&host->lock);
1525 	irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1526 	ret = nv_do_interrupt(host, irq_stat);
1527 	spin_unlock(&host->lock);
1528 
1529 	return ret;
1530 }
1531 
1532 static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1533 {
1534 	if (sc_reg > SCR_CONTROL)
1535 		return -EINVAL;
1536 
1537 	*val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
1538 	return 0;
1539 }
1540 
1541 static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1542 {
1543 	if (sc_reg > SCR_CONTROL)
1544 		return -EINVAL;
1545 
1546 	iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
1547 	return 0;
1548 }
1549 
1550 static int nv_hardreset(struct ata_link *link, unsigned int *class,
1551 			unsigned long deadline)
1552 {
1553 	struct ata_eh_context *ehc = &link->eh_context;
1554 
1555 	/* Do hardreset iff it's post-boot probing, please read the
1556 	 * comment above port ops for details.
1557 	 */
1558 	if (!(link->ap->pflags & ATA_PFLAG_LOADING) &&
1559 	    !ata_dev_enabled(link->device))
1560 		sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1561 				    NULL, NULL);
1562 	else {
1563 		const unsigned long *timing = sata_ehc_deb_timing(ehc);
1564 		int rc;
1565 
1566 		if (!(ehc->i.flags & ATA_EHI_QUIET))
1567 			ata_link_info(link,
1568 				      "nv: skipping hardreset on occupied port\n");
1569 
1570 		/* make sure the link is online */
1571 		rc = sata_link_resume(link, timing, deadline);
1572 		/* whine about phy resume failure but proceed */
1573 		if (rc && rc != -EOPNOTSUPP)
1574 			ata_link_warn(link, "failed to resume link (errno=%d)\n",
1575 				      rc);
1576 	}
1577 
1578 	/* device signature acquisition is unreliable */
1579 	return -EAGAIN;
1580 }
1581 
1582 static void nv_nf2_freeze(struct ata_port *ap)
1583 {
1584 	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1585 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1586 	u8 mask;
1587 
1588 	mask = ioread8(scr_addr + NV_INT_ENABLE);
1589 	mask &= ~(NV_INT_ALL << shift);
1590 	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1591 }
1592 
1593 static void nv_nf2_thaw(struct ata_port *ap)
1594 {
1595 	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1596 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1597 	u8 mask;
1598 
1599 	iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1600 
1601 	mask = ioread8(scr_addr + NV_INT_ENABLE);
1602 	mask |= (NV_INT_MASK << shift);
1603 	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1604 }
1605 
1606 static void nv_ck804_freeze(struct ata_port *ap)
1607 {
1608 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1609 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1610 	u8 mask;
1611 
1612 	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1613 	mask &= ~(NV_INT_ALL << shift);
1614 	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1615 }
1616 
1617 static void nv_ck804_thaw(struct ata_port *ap)
1618 {
1619 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1620 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1621 	u8 mask;
1622 
1623 	writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1624 
1625 	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1626 	mask |= (NV_INT_MASK << shift);
1627 	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1628 }
1629 
1630 static void nv_mcp55_freeze(struct ata_port *ap)
1631 {
1632 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1633 	int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1634 	u32 mask;
1635 
1636 	writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1637 
1638 	mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1639 	mask &= ~(NV_INT_ALL_MCP55 << shift);
1640 	writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1641 }
1642 
1643 static void nv_mcp55_thaw(struct ata_port *ap)
1644 {
1645 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1646 	int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1647 	u32 mask;
1648 
1649 	writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1650 
1651 	mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1652 	mask |= (NV_INT_MASK_MCP55 << shift);
1653 	writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1654 }
1655 
1656 static void nv_adma_error_handler(struct ata_port *ap)
1657 {
1658 	struct nv_adma_port_priv *pp = ap->private_data;
1659 	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1660 		void __iomem *mmio = pp->ctl_block;
1661 		int i;
1662 		u16 tmp;
1663 
1664 		if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1665 			u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1666 			u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1667 			u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1668 			u32 status = readw(mmio + NV_ADMA_STAT);
1669 			u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1670 			u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1671 
1672 			ata_port_err(ap,
1673 				"EH in ADMA mode, notifier 0x%X "
1674 				"notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1675 				"next cpb count 0x%X next cpb idx 0x%x\n",
1676 				notifier, notifier_error, gen_ctl, status,
1677 				cpb_count, next_cpb_idx);
1678 
1679 			for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
1680 				struct nv_adma_cpb *cpb = &pp->cpb[i];
1681 				if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1682 				    ap->link.sactive & (1 << i))
1683 					ata_port_err(ap,
1684 						"CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1685 						i, cpb->ctl_flags, cpb->resp_flags);
1686 			}
1687 		}
1688 
1689 		/* Push us back into port register mode for error handling. */
1690 		nv_adma_register_mode(ap);
1691 
1692 		/* Mark all of the CPBs as invalid to prevent them from
1693 		   being executed */
1694 		for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1695 			pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1696 
1697 		/* clear CPB fetch count */
1698 		writew(0, mmio + NV_ADMA_CPB_COUNT);
1699 
1700 		/* Reset channel */
1701 		tmp = readw(mmio + NV_ADMA_CTL);
1702 		writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1703 		readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1704 		udelay(1);
1705 		writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1706 		readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1707 	}
1708 
1709 	ata_bmdma_error_handler(ap);
1710 }
1711 
1712 static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1713 {
1714 	struct nv_swncq_port_priv *pp = ap->private_data;
1715 	struct defer_queue *dq = &pp->defer_queue;
1716 
1717 	/* queue is full */
1718 	WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1719 	dq->defer_bits |= (1 << qc->hw_tag);
1720 	dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->hw_tag;
1721 }
1722 
1723 static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1724 {
1725 	struct nv_swncq_port_priv *pp = ap->private_data;
1726 	struct defer_queue *dq = &pp->defer_queue;
1727 	unsigned int tag;
1728 
1729 	if (dq->head == dq->tail)	/* null queue */
1730 		return NULL;
1731 
1732 	tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1733 	dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1734 	WARN_ON(!(dq->defer_bits & (1 << tag)));
1735 	dq->defer_bits &= ~(1 << tag);
1736 
1737 	return ata_qc_from_tag(ap, tag);
1738 }
1739 
1740 static void nv_swncq_fis_reinit(struct ata_port *ap)
1741 {
1742 	struct nv_swncq_port_priv *pp = ap->private_data;
1743 
1744 	pp->dhfis_bits = 0;
1745 	pp->dmafis_bits = 0;
1746 	pp->sdbfis_bits = 0;
1747 	pp->ncq_flags = 0;
1748 }
1749 
1750 static void nv_swncq_pp_reinit(struct ata_port *ap)
1751 {
1752 	struct nv_swncq_port_priv *pp = ap->private_data;
1753 	struct defer_queue *dq = &pp->defer_queue;
1754 
1755 	dq->head = 0;
1756 	dq->tail = 0;
1757 	dq->defer_bits = 0;
1758 	pp->qc_active = 0;
1759 	pp->last_issue_tag = ATA_TAG_POISON;
1760 	nv_swncq_fis_reinit(ap);
1761 }
1762 
1763 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1764 {
1765 	struct nv_swncq_port_priv *pp = ap->private_data;
1766 
1767 	writew(fis, pp->irq_block);
1768 }
1769 
1770 static void __ata_bmdma_stop(struct ata_port *ap)
1771 {
1772 	struct ata_queued_cmd qc;
1773 
1774 	qc.ap = ap;
1775 	ata_bmdma_stop(&qc);
1776 }
1777 
1778 static void nv_swncq_ncq_stop(struct ata_port *ap)
1779 {
1780 	struct nv_swncq_port_priv *pp = ap->private_data;
1781 	unsigned int i;
1782 	u32 sactive;
1783 	u32 done_mask;
1784 
1785 	ata_port_err(ap, "EH in SWNCQ mode,QC:qc_active 0x%llX sactive 0x%X\n",
1786 		     ap->qc_active, ap->link.sactive);
1787 	ata_port_err(ap,
1788 		"SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n  "
1789 		"dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1790 		pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1791 		pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1792 
1793 	ata_port_err(ap, "ATA_REG 0x%X ERR_REG 0x%X\n",
1794 		     ap->ops->sff_check_status(ap),
1795 		     ioread8(ap->ioaddr.error_addr));
1796 
1797 	sactive = readl(pp->sactive_block);
1798 	done_mask = pp->qc_active ^ sactive;
1799 
1800 	ata_port_err(ap, "tag : dhfis dmafis sdbfis sactive\n");
1801 	for (i = 0; i < ATA_MAX_QUEUE; i++) {
1802 		u8 err = 0;
1803 		if (pp->qc_active & (1 << i))
1804 			err = 0;
1805 		else if (done_mask & (1 << i))
1806 			err = 1;
1807 		else
1808 			continue;
1809 
1810 		ata_port_err(ap,
1811 			     "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1812 			     (pp->dhfis_bits >> i) & 0x1,
1813 			     (pp->dmafis_bits >> i) & 0x1,
1814 			     (pp->sdbfis_bits >> i) & 0x1,
1815 			     (sactive >> i) & 0x1,
1816 			     (err ? "error! tag doesn't exit" : " "));
1817 	}
1818 
1819 	nv_swncq_pp_reinit(ap);
1820 	ap->ops->sff_irq_clear(ap);
1821 	__ata_bmdma_stop(ap);
1822 	nv_swncq_irq_clear(ap, 0xffff);
1823 }
1824 
1825 static void nv_swncq_error_handler(struct ata_port *ap)
1826 {
1827 	struct ata_eh_context *ehc = &ap->link.eh_context;
1828 
1829 	if (ap->link.sactive) {
1830 		nv_swncq_ncq_stop(ap);
1831 		ehc->i.action |= ATA_EH_RESET;
1832 	}
1833 
1834 	ata_bmdma_error_handler(ap);
1835 }
1836 
1837 #ifdef CONFIG_PM
1838 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1839 {
1840 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1841 	u32 tmp;
1842 
1843 	/* clear irq */
1844 	writel(~0, mmio + NV_INT_STATUS_MCP55);
1845 
1846 	/* disable irq */
1847 	writel(0, mmio + NV_INT_ENABLE_MCP55);
1848 
1849 	/* disable swncq */
1850 	tmp = readl(mmio + NV_CTL_MCP55);
1851 	tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1852 	writel(tmp, mmio + NV_CTL_MCP55);
1853 
1854 	return 0;
1855 }
1856 
1857 static int nv_swncq_port_resume(struct ata_port *ap)
1858 {
1859 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1860 	u32 tmp;
1861 
1862 	/* clear irq */
1863 	writel(~0, mmio + NV_INT_STATUS_MCP55);
1864 
1865 	/* enable irq */
1866 	writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1867 
1868 	/* enable swncq */
1869 	tmp = readl(mmio + NV_CTL_MCP55);
1870 	writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1871 
1872 	return 0;
1873 }
1874 #endif
1875 
1876 static void nv_swncq_host_init(struct ata_host *host)
1877 {
1878 	u32 tmp;
1879 	void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1880 	struct pci_dev *pdev = to_pci_dev(host->dev);
1881 	u8 regval;
1882 
1883 	/* disable  ECO 398 */
1884 	pci_read_config_byte(pdev, 0x7f, &regval);
1885 	regval &= ~(1 << 7);
1886 	pci_write_config_byte(pdev, 0x7f, regval);
1887 
1888 	/* enable swncq */
1889 	tmp = readl(mmio + NV_CTL_MCP55);
1890 	VPRINTK("HOST_CTL:0x%X\n", tmp);
1891 	writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1892 
1893 	/* enable irq intr */
1894 	tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1895 	VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1896 	writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1897 
1898 	/*  clear port irq */
1899 	writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1900 }
1901 
1902 static int nv_swncq_slave_config(struct scsi_device *sdev)
1903 {
1904 	struct ata_port *ap = ata_shost_to_port(sdev->host);
1905 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1906 	struct ata_device *dev;
1907 	int rc;
1908 	u8 rev;
1909 	u8 check_maxtor = 0;
1910 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
1911 
1912 	rc = ata_scsi_slave_config(sdev);
1913 	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1914 		/* Not a proper libata device, ignore */
1915 		return rc;
1916 
1917 	dev = &ap->link.device[sdev->id];
1918 	if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1919 		return rc;
1920 
1921 	/* if MCP51 and Maxtor, then disable ncq */
1922 	if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1923 		pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1924 		check_maxtor = 1;
1925 
1926 	/* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1927 	if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1928 		pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1929 		pci_read_config_byte(pdev, 0x8, &rev);
1930 		if (rev <= 0xa2)
1931 			check_maxtor = 1;
1932 	}
1933 
1934 	if (!check_maxtor)
1935 		return rc;
1936 
1937 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1938 
1939 	if (strncmp(model_num, "Maxtor", 6) == 0) {
1940 		ata_scsi_change_queue_depth(sdev, 1);
1941 		ata_dev_notice(dev, "Disabling SWNCQ mode (depth %x)\n",
1942 			       sdev->queue_depth);
1943 	}
1944 
1945 	return rc;
1946 }
1947 
1948 static int nv_swncq_port_start(struct ata_port *ap)
1949 {
1950 	struct device *dev = ap->host->dev;
1951 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1952 	struct nv_swncq_port_priv *pp;
1953 	int rc;
1954 
1955 	/* we might fallback to bmdma, allocate bmdma resources */
1956 	rc = ata_bmdma_port_start(ap);
1957 	if (rc)
1958 		return rc;
1959 
1960 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1961 	if (!pp)
1962 		return -ENOMEM;
1963 
1964 	pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1965 				      &pp->prd_dma, GFP_KERNEL);
1966 	if (!pp->prd)
1967 		return -ENOMEM;
1968 	memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
1969 
1970 	ap->private_data = pp;
1971 	pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1972 	pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1973 	pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1974 
1975 	return 0;
1976 }
1977 
1978 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1979 {
1980 	if (qc->tf.protocol != ATA_PROT_NCQ) {
1981 		ata_bmdma_qc_prep(qc);
1982 		return;
1983 	}
1984 
1985 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1986 		return;
1987 
1988 	nv_swncq_fill_sg(qc);
1989 }
1990 
1991 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1992 {
1993 	struct ata_port *ap = qc->ap;
1994 	struct scatterlist *sg;
1995 	struct nv_swncq_port_priv *pp = ap->private_data;
1996 	struct ata_bmdma_prd *prd;
1997 	unsigned int si, idx;
1998 
1999 	prd = pp->prd + ATA_MAX_PRD * qc->hw_tag;
2000 
2001 	idx = 0;
2002 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
2003 		u32 addr, offset;
2004 		u32 sg_len, len;
2005 
2006 		addr = (u32)sg_dma_address(sg);
2007 		sg_len = sg_dma_len(sg);
2008 
2009 		while (sg_len) {
2010 			offset = addr & 0xffff;
2011 			len = sg_len;
2012 			if ((offset + sg_len) > 0x10000)
2013 				len = 0x10000 - offset;
2014 
2015 			prd[idx].addr = cpu_to_le32(addr);
2016 			prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2017 
2018 			idx++;
2019 			sg_len -= len;
2020 			addr += len;
2021 		}
2022 	}
2023 
2024 	prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2025 }
2026 
2027 static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2028 					  struct ata_queued_cmd *qc)
2029 {
2030 	struct nv_swncq_port_priv *pp = ap->private_data;
2031 
2032 	if (qc == NULL)
2033 		return 0;
2034 
2035 	DPRINTK("Enter\n");
2036 
2037 	writel((1 << qc->hw_tag), pp->sactive_block);
2038 	pp->last_issue_tag = qc->hw_tag;
2039 	pp->dhfis_bits &= ~(1 << qc->hw_tag);
2040 	pp->dmafis_bits &= ~(1 << qc->hw_tag);
2041 	pp->qc_active |= (0x1 << qc->hw_tag);
2042 
2043 	ap->ops->sff_tf_load(ap, &qc->tf);	 /* load tf registers */
2044 	ap->ops->sff_exec_command(ap, &qc->tf);
2045 
2046 	DPRINTK("Issued tag %u\n", qc->hw_tag);
2047 
2048 	return 0;
2049 }
2050 
2051 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2052 {
2053 	struct ata_port *ap = qc->ap;
2054 	struct nv_swncq_port_priv *pp = ap->private_data;
2055 
2056 	if (qc->tf.protocol != ATA_PROT_NCQ)
2057 		return ata_bmdma_qc_issue(qc);
2058 
2059 	DPRINTK("Enter\n");
2060 
2061 	if (!pp->qc_active)
2062 		nv_swncq_issue_atacmd(ap, qc);
2063 	else
2064 		nv_swncq_qc_to_dq(ap, qc);	/* add qc to defer queue */
2065 
2066 	return 0;
2067 }
2068 
2069 static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2070 {
2071 	u32 serror;
2072 	struct ata_eh_info *ehi = &ap->link.eh_info;
2073 
2074 	ata_ehi_clear_desc(ehi);
2075 
2076 	/* AHCI needs SError cleared; otherwise, it might lock up */
2077 	sata_scr_read(&ap->link, SCR_ERROR, &serror);
2078 	sata_scr_write(&ap->link, SCR_ERROR, serror);
2079 
2080 	/* analyze @irq_stat */
2081 	if (fis & NV_SWNCQ_IRQ_ADDED)
2082 		ata_ehi_push_desc(ehi, "hot plug");
2083 	else if (fis & NV_SWNCQ_IRQ_REMOVED)
2084 		ata_ehi_push_desc(ehi, "hot unplug");
2085 
2086 	ata_ehi_hotplugged(ehi);
2087 
2088 	/* okay, let's hand over to EH */
2089 	ehi->serror |= serror;
2090 
2091 	ata_port_freeze(ap);
2092 }
2093 
2094 static int nv_swncq_sdbfis(struct ata_port *ap)
2095 {
2096 	struct ata_queued_cmd *qc;
2097 	struct nv_swncq_port_priv *pp = ap->private_data;
2098 	struct ata_eh_info *ehi = &ap->link.eh_info;
2099 	u32 sactive;
2100 	u32 done_mask;
2101 	u8 host_stat;
2102 	u8 lack_dhfis = 0;
2103 
2104 	host_stat = ap->ops->bmdma_status(ap);
2105 	if (unlikely(host_stat & ATA_DMA_ERR)) {
2106 		/* error when transferring data to/from memory */
2107 		ata_ehi_clear_desc(ehi);
2108 		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2109 		ehi->err_mask |= AC_ERR_HOST_BUS;
2110 		ehi->action |= ATA_EH_RESET;
2111 		return -EINVAL;
2112 	}
2113 
2114 	ap->ops->sff_irq_clear(ap);
2115 	__ata_bmdma_stop(ap);
2116 
2117 	sactive = readl(pp->sactive_block);
2118 	done_mask = pp->qc_active ^ sactive;
2119 
2120 	pp->qc_active &= ~done_mask;
2121 	pp->dhfis_bits &= ~done_mask;
2122 	pp->dmafis_bits &= ~done_mask;
2123 	pp->sdbfis_bits |= done_mask;
2124 	ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
2125 
2126 	if (!ap->qc_active) {
2127 		DPRINTK("over\n");
2128 		nv_swncq_pp_reinit(ap);
2129 		return 0;
2130 	}
2131 
2132 	if (pp->qc_active & pp->dhfis_bits)
2133 		return 0;
2134 
2135 	if ((pp->ncq_flags & ncq_saw_backout) ||
2136 	    (pp->qc_active ^ pp->dhfis_bits))
2137 		/* if the controller can't get a device to host register FIS,
2138 		 * The driver needs to reissue the new command.
2139 		 */
2140 		lack_dhfis = 1;
2141 
2142 	DPRINTK("id 0x%x QC: qc_active 0x%x,"
2143 		"SWNCQ:qc_active 0x%X defer_bits %X "
2144 		"dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2145 		ap->print_id, ap->qc_active, pp->qc_active,
2146 		pp->defer_queue.defer_bits, pp->dhfis_bits,
2147 		pp->dmafis_bits, pp->last_issue_tag);
2148 
2149 	nv_swncq_fis_reinit(ap);
2150 
2151 	if (lack_dhfis) {
2152 		qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2153 		nv_swncq_issue_atacmd(ap, qc);
2154 		return 0;
2155 	}
2156 
2157 	if (pp->defer_queue.defer_bits) {
2158 		/* send deferral queue command */
2159 		qc = nv_swncq_qc_from_dq(ap);
2160 		WARN_ON(qc == NULL);
2161 		nv_swncq_issue_atacmd(ap, qc);
2162 	}
2163 
2164 	return 0;
2165 }
2166 
2167 static inline u32 nv_swncq_tag(struct ata_port *ap)
2168 {
2169 	struct nv_swncq_port_priv *pp = ap->private_data;
2170 	u32 tag;
2171 
2172 	tag = readb(pp->tag_block) >> 2;
2173 	return (tag & 0x1f);
2174 }
2175 
2176 static void nv_swncq_dmafis(struct ata_port *ap)
2177 {
2178 	struct ata_queued_cmd *qc;
2179 	unsigned int rw;
2180 	u8 dmactl;
2181 	u32 tag;
2182 	struct nv_swncq_port_priv *pp = ap->private_data;
2183 
2184 	__ata_bmdma_stop(ap);
2185 	tag = nv_swncq_tag(ap);
2186 
2187 	DPRINTK("dma setup tag 0x%x\n", tag);
2188 	qc = ata_qc_from_tag(ap, tag);
2189 
2190 	if (unlikely(!qc))
2191 		return;
2192 
2193 	rw = qc->tf.flags & ATA_TFLAG_WRITE;
2194 
2195 	/* load PRD table addr. */
2196 	iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->hw_tag,
2197 		  ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2198 
2199 	/* specify data direction, triple-check start bit is clear */
2200 	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2201 	dmactl &= ~ATA_DMA_WR;
2202 	if (!rw)
2203 		dmactl |= ATA_DMA_WR;
2204 
2205 	iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2206 }
2207 
2208 static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2209 {
2210 	struct nv_swncq_port_priv *pp = ap->private_data;
2211 	struct ata_queued_cmd *qc;
2212 	struct ata_eh_info *ehi = &ap->link.eh_info;
2213 	u32 serror;
2214 	u8 ata_stat;
2215 
2216 	ata_stat = ap->ops->sff_check_status(ap);
2217 	nv_swncq_irq_clear(ap, fis);
2218 	if (!fis)
2219 		return;
2220 
2221 	if (ap->pflags & ATA_PFLAG_FROZEN)
2222 		return;
2223 
2224 	if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2225 		nv_swncq_hotplug(ap, fis);
2226 		return;
2227 	}
2228 
2229 	if (!pp->qc_active)
2230 		return;
2231 
2232 	if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
2233 		return;
2234 	ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
2235 
2236 	if (ata_stat & ATA_ERR) {
2237 		ata_ehi_clear_desc(ehi);
2238 		ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2239 		ehi->err_mask |= AC_ERR_DEV;
2240 		ehi->serror |= serror;
2241 		ehi->action |= ATA_EH_RESET;
2242 		ata_port_freeze(ap);
2243 		return;
2244 	}
2245 
2246 	if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2247 		/* If the IRQ is backout, driver must issue
2248 		 * the new command again some time later.
2249 		 */
2250 		pp->ncq_flags |= ncq_saw_backout;
2251 	}
2252 
2253 	if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2254 		pp->ncq_flags |= ncq_saw_sdb;
2255 		DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2256 			"dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2257 			ap->print_id, pp->qc_active, pp->dhfis_bits,
2258 			pp->dmafis_bits, readl(pp->sactive_block));
2259 		if (nv_swncq_sdbfis(ap) < 0)
2260 			goto irq_error;
2261 	}
2262 
2263 	if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2264 		/* The interrupt indicates the new command
2265 		 * was transmitted correctly to the drive.
2266 		 */
2267 		pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2268 		pp->ncq_flags |= ncq_saw_d2h;
2269 		if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2270 			ata_ehi_push_desc(ehi, "illegal fis transaction");
2271 			ehi->err_mask |= AC_ERR_HSM;
2272 			ehi->action |= ATA_EH_RESET;
2273 			goto irq_error;
2274 		}
2275 
2276 		if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2277 		    !(pp->ncq_flags & ncq_saw_dmas)) {
2278 			ata_stat = ap->ops->sff_check_status(ap);
2279 			if (ata_stat & ATA_BUSY)
2280 				goto irq_exit;
2281 
2282 			if (pp->defer_queue.defer_bits) {
2283 				DPRINTK("send next command\n");
2284 				qc = nv_swncq_qc_from_dq(ap);
2285 				nv_swncq_issue_atacmd(ap, qc);
2286 			}
2287 		}
2288 	}
2289 
2290 	if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2291 		/* program the dma controller with appropriate PRD buffers
2292 		 * and start the DMA transfer for requested command.
2293 		 */
2294 		pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2295 		pp->ncq_flags |= ncq_saw_dmas;
2296 		nv_swncq_dmafis(ap);
2297 	}
2298 
2299 irq_exit:
2300 	return;
2301 irq_error:
2302 	ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2303 	ata_port_freeze(ap);
2304 	return;
2305 }
2306 
2307 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2308 {
2309 	struct ata_host *host = dev_instance;
2310 	unsigned int i;
2311 	unsigned int handled = 0;
2312 	unsigned long flags;
2313 	u32 irq_stat;
2314 
2315 	spin_lock_irqsave(&host->lock, flags);
2316 
2317 	irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2318 
2319 	for (i = 0; i < host->n_ports; i++) {
2320 		struct ata_port *ap = host->ports[i];
2321 
2322 		if (ap->link.sactive) {
2323 			nv_swncq_host_interrupt(ap, (u16)irq_stat);
2324 			handled = 1;
2325 		} else {
2326 			if (irq_stat)	/* reserve Hotplug */
2327 				nv_swncq_irq_clear(ap, 0xfff0);
2328 
2329 			handled += nv_host_intr(ap, (u8)irq_stat);
2330 		}
2331 		irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2332 	}
2333 
2334 	spin_unlock_irqrestore(&host->lock, flags);
2335 
2336 	return IRQ_RETVAL(handled);
2337 }
2338 
2339 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2340 {
2341 	const struct ata_port_info *ppi[] = { NULL, NULL };
2342 	struct nv_pi_priv *ipriv;
2343 	struct ata_host *host;
2344 	struct nv_host_priv *hpriv;
2345 	int rc;
2346 	u32 bar;
2347 	void __iomem *base;
2348 	unsigned long type = ent->driver_data;
2349 
2350         // Make sure this is a SATA controller by counting the number of bars
2351         // (NVIDIA SATA controllers will always have six bars).  Otherwise,
2352         // it's an IDE controller and we ignore it.
2353 	for (bar = 0; bar < 6; bar++)
2354 		if (pci_resource_start(pdev, bar) == 0)
2355 			return -ENODEV;
2356 
2357 	ata_print_version_once(&pdev->dev, DRV_VERSION);
2358 
2359 	rc = pcim_enable_device(pdev);
2360 	if (rc)
2361 		return rc;
2362 
2363 	/* determine type and allocate host */
2364 	if (type == CK804 && adma_enabled) {
2365 		dev_notice(&pdev->dev, "Using ADMA mode\n");
2366 		type = ADMA;
2367 	} else if (type == MCP5x && swncq_enabled) {
2368 		dev_notice(&pdev->dev, "Using SWNCQ mode\n");
2369 		type = SWNCQ;
2370 	}
2371 
2372 	ppi[0] = &nv_port_info[type];
2373 	ipriv = ppi[0]->private_data;
2374 	rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
2375 	if (rc)
2376 		return rc;
2377 
2378 	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2379 	if (!hpriv)
2380 		return -ENOMEM;
2381 	hpriv->type = type;
2382 	host->private_data = hpriv;
2383 
2384 	/* request and iomap NV_MMIO_BAR */
2385 	rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2386 	if (rc)
2387 		return rc;
2388 
2389 	/* configure SCR access */
2390 	base = host->iomap[NV_MMIO_BAR];
2391 	host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2392 	host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2393 
2394 	/* enable SATA space for CK804 */
2395 	if (type >= CK804) {
2396 		u8 regval;
2397 
2398 		pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2399 		regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2400 		pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2401 	}
2402 
2403 	/* init ADMA */
2404 	if (type == ADMA) {
2405 		rc = nv_adma_host_init(host);
2406 		if (rc)
2407 			return rc;
2408 	} else if (type == SWNCQ)
2409 		nv_swncq_host_init(host);
2410 
2411 	if (msi_enabled) {
2412 		dev_notice(&pdev->dev, "Using MSI\n");
2413 		pci_enable_msi(pdev);
2414 	}
2415 
2416 	pci_set_master(pdev);
2417 	return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
2418 }
2419 
2420 #ifdef CONFIG_PM_SLEEP
2421 static int nv_pci_device_resume(struct pci_dev *pdev)
2422 {
2423 	struct ata_host *host = pci_get_drvdata(pdev);
2424 	struct nv_host_priv *hpriv = host->private_data;
2425 	int rc;
2426 
2427 	rc = ata_pci_device_do_resume(pdev);
2428 	if (rc)
2429 		return rc;
2430 
2431 	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2432 		if (hpriv->type >= CK804) {
2433 			u8 regval;
2434 
2435 			pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2436 			regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2437 			pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2438 		}
2439 		if (hpriv->type == ADMA) {
2440 			u32 tmp32;
2441 			struct nv_adma_port_priv *pp;
2442 			/* enable/disable ADMA on the ports appropriately */
2443 			pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2444 
2445 			pp = host->ports[0]->private_data;
2446 			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2447 				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2448 					   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2449 			else
2450 				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT0_EN |
2451 					   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2452 			pp = host->ports[1]->private_data;
2453 			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2454 				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2455 					   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2456 			else
2457 				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT1_EN |
2458 					   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2459 
2460 			pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2461 		}
2462 	}
2463 
2464 	ata_host_resume(host);
2465 
2466 	return 0;
2467 }
2468 #endif
2469 
2470 static void nv_ck804_host_stop(struct ata_host *host)
2471 {
2472 	struct pci_dev *pdev = to_pci_dev(host->dev);
2473 	u8 regval;
2474 
2475 	/* disable SATA space for CK804 */
2476 	pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2477 	regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2478 	pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2479 }
2480 
2481 static void nv_adma_host_stop(struct ata_host *host)
2482 {
2483 	struct pci_dev *pdev = to_pci_dev(host->dev);
2484 	u32 tmp32;
2485 
2486 	/* disable ADMA on the ports */
2487 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2488 	tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2489 		   NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2490 		   NV_MCP_SATA_CFG_20_PORT1_EN |
2491 		   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2492 
2493 	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2494 
2495 	nv_ck804_host_stop(host);
2496 }
2497 
2498 module_pci_driver(nv_pci_driver);
2499 
2500 module_param_named(adma, adma_enabled, bool, 0444);
2501 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
2502 module_param_named(swncq, swncq_enabled, bool, 0444);
2503 MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
2504 module_param_named(msi, msi_enabled, bool, 0444);
2505 MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)");
2506