xref: /openbmc/linux/drivers/ata/sata_nv.c (revision f66501dc)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  sata_nv.c - NVIDIA nForce SATA
4  *
5  *  Copyright 2004 NVIDIA Corp.  All rights reserved.
6  *  Copyright 2004 Andrew Chew
7  *
8  *  libata documentation is available via 'make {ps|pdf}docs',
9  *  as Documentation/driver-api/libata.rst
10  *
11  *  No hardware documentation available outside of NVIDIA.
12  *  This driver programs the NVIDIA SATA controller in a similar
13  *  fashion as with other PCI IDE BMDMA controllers, with a few
14  *  NV-specific details such as register offsets, SATA phy location,
15  *  hotplug info, etc.
16  *
17  *  CK804/MCP04 controllers support an alternate programming interface
18  *  similar to the ADMA specification (with some modifications).
19  *  This allows the use of NCQ. Non-DMA-mapped ATA commands are still
20  *  sent through the legacy interface.
21  */
22 
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/gfp.h>
26 #include <linux/pci.h>
27 #include <linux/blkdev.h>
28 #include <linux/delay.h>
29 #include <linux/interrupt.h>
30 #include <linux/device.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_device.h>
33 #include <linux/libata.h>
34 
35 #define DRV_NAME			"sata_nv"
36 #define DRV_VERSION			"3.5"
37 
38 #define NV_ADMA_DMA_BOUNDARY		0xffffffffUL
39 
40 enum {
41 	NV_MMIO_BAR			= 5,
42 
43 	NV_PORTS			= 2,
44 	NV_PIO_MASK			= ATA_PIO4,
45 	NV_MWDMA_MASK			= ATA_MWDMA2,
46 	NV_UDMA_MASK			= ATA_UDMA6,
47 	NV_PORT0_SCR_REG_OFFSET		= 0x00,
48 	NV_PORT1_SCR_REG_OFFSET		= 0x40,
49 
50 	/* INT_STATUS/ENABLE */
51 	NV_INT_STATUS			= 0x10,
52 	NV_INT_ENABLE			= 0x11,
53 	NV_INT_STATUS_CK804		= 0x440,
54 	NV_INT_ENABLE_CK804		= 0x441,
55 
56 	/* INT_STATUS/ENABLE bits */
57 	NV_INT_DEV			= 0x01,
58 	NV_INT_PM			= 0x02,
59 	NV_INT_ADDED			= 0x04,
60 	NV_INT_REMOVED			= 0x08,
61 
62 	NV_INT_PORT_SHIFT		= 4,	/* each port occupies 4 bits */
63 
64 	NV_INT_ALL			= 0x0f,
65 	NV_INT_MASK			= NV_INT_DEV |
66 					  NV_INT_ADDED | NV_INT_REMOVED,
67 
68 	/* INT_CONFIG */
69 	NV_INT_CONFIG			= 0x12,
70 	NV_INT_CONFIG_METHD		= 0x01, // 0 = INT, 1 = SMI
71 
72 	// For PCI config register 20
73 	NV_MCP_SATA_CFG_20		= 0x50,
74 	NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
75 	NV_MCP_SATA_CFG_20_PORT0_EN	= (1 << 17),
76 	NV_MCP_SATA_CFG_20_PORT1_EN	= (1 << 16),
77 	NV_MCP_SATA_CFG_20_PORT0_PWB_EN	= (1 << 14),
78 	NV_MCP_SATA_CFG_20_PORT1_PWB_EN	= (1 << 12),
79 
80 	NV_ADMA_MAX_CPBS		= 32,
81 	NV_ADMA_CPB_SZ			= 128,
82 	NV_ADMA_APRD_SZ			= 16,
83 	NV_ADMA_SGTBL_LEN		= (1024 - NV_ADMA_CPB_SZ) /
84 					   NV_ADMA_APRD_SZ,
85 	NV_ADMA_SGTBL_TOTAL_LEN		= NV_ADMA_SGTBL_LEN + 5,
86 	NV_ADMA_SGTBL_SZ                = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
87 	NV_ADMA_PORT_PRIV_DMA_SZ        = NV_ADMA_MAX_CPBS *
88 					   (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
89 
90 	/* BAR5 offset to ADMA general registers */
91 	NV_ADMA_GEN			= 0x400,
92 	NV_ADMA_GEN_CTL			= 0x00,
93 	NV_ADMA_NOTIFIER_CLEAR		= 0x30,
94 
95 	/* BAR5 offset to ADMA ports */
96 	NV_ADMA_PORT			= 0x480,
97 
98 	/* size of ADMA port register space  */
99 	NV_ADMA_PORT_SIZE		= 0x100,
100 
101 	/* ADMA port registers */
102 	NV_ADMA_CTL			= 0x40,
103 	NV_ADMA_CPB_COUNT		= 0x42,
104 	NV_ADMA_NEXT_CPB_IDX		= 0x43,
105 	NV_ADMA_STAT			= 0x44,
106 	NV_ADMA_CPB_BASE_LOW		= 0x48,
107 	NV_ADMA_CPB_BASE_HIGH		= 0x4C,
108 	NV_ADMA_APPEND			= 0x50,
109 	NV_ADMA_NOTIFIER		= 0x68,
110 	NV_ADMA_NOTIFIER_ERROR		= 0x6C,
111 
112 	/* NV_ADMA_CTL register bits */
113 	NV_ADMA_CTL_HOTPLUG_IEN		= (1 << 0),
114 	NV_ADMA_CTL_CHANNEL_RESET	= (1 << 5),
115 	NV_ADMA_CTL_GO			= (1 << 7),
116 	NV_ADMA_CTL_AIEN		= (1 << 8),
117 	NV_ADMA_CTL_READ_NON_COHERENT	= (1 << 11),
118 	NV_ADMA_CTL_WRITE_NON_COHERENT	= (1 << 12),
119 
120 	/* CPB response flag bits */
121 	NV_CPB_RESP_DONE		= (1 << 0),
122 	NV_CPB_RESP_ATA_ERR		= (1 << 3),
123 	NV_CPB_RESP_CMD_ERR		= (1 << 4),
124 	NV_CPB_RESP_CPB_ERR		= (1 << 7),
125 
126 	/* CPB control flag bits */
127 	NV_CPB_CTL_CPB_VALID		= (1 << 0),
128 	NV_CPB_CTL_QUEUE		= (1 << 1),
129 	NV_CPB_CTL_APRD_VALID		= (1 << 2),
130 	NV_CPB_CTL_IEN			= (1 << 3),
131 	NV_CPB_CTL_FPDMA		= (1 << 4),
132 
133 	/* APRD flags */
134 	NV_APRD_WRITE			= (1 << 1),
135 	NV_APRD_END			= (1 << 2),
136 	NV_APRD_CONT			= (1 << 3),
137 
138 	/* NV_ADMA_STAT flags */
139 	NV_ADMA_STAT_TIMEOUT		= (1 << 0),
140 	NV_ADMA_STAT_HOTUNPLUG		= (1 << 1),
141 	NV_ADMA_STAT_HOTPLUG		= (1 << 2),
142 	NV_ADMA_STAT_CPBERR		= (1 << 4),
143 	NV_ADMA_STAT_SERROR		= (1 << 5),
144 	NV_ADMA_STAT_CMD_COMPLETE	= (1 << 6),
145 	NV_ADMA_STAT_IDLE		= (1 << 8),
146 	NV_ADMA_STAT_LEGACY		= (1 << 9),
147 	NV_ADMA_STAT_STOPPED		= (1 << 10),
148 	NV_ADMA_STAT_DONE		= (1 << 12),
149 	NV_ADMA_STAT_ERR		= NV_ADMA_STAT_CPBERR |
150 					  NV_ADMA_STAT_TIMEOUT,
151 
152 	/* port flags */
153 	NV_ADMA_PORT_REGISTER_MODE	= (1 << 0),
154 	NV_ADMA_ATAPI_SETUP_COMPLETE	= (1 << 1),
155 
156 	/* MCP55 reg offset */
157 	NV_CTL_MCP55			= 0x400,
158 	NV_INT_STATUS_MCP55		= 0x440,
159 	NV_INT_ENABLE_MCP55		= 0x444,
160 	NV_NCQ_REG_MCP55		= 0x448,
161 
162 	/* MCP55 */
163 	NV_INT_ALL_MCP55		= 0xffff,
164 	NV_INT_PORT_SHIFT_MCP55		= 16,	/* each port occupies 16 bits */
165 	NV_INT_MASK_MCP55		= NV_INT_ALL_MCP55 & 0xfffd,
166 
167 	/* SWNCQ ENABLE BITS*/
168 	NV_CTL_PRI_SWNCQ		= 0x02,
169 	NV_CTL_SEC_SWNCQ		= 0x04,
170 
171 	/* SW NCQ status bits*/
172 	NV_SWNCQ_IRQ_DEV		= (1 << 0),
173 	NV_SWNCQ_IRQ_PM			= (1 << 1),
174 	NV_SWNCQ_IRQ_ADDED		= (1 << 2),
175 	NV_SWNCQ_IRQ_REMOVED		= (1 << 3),
176 
177 	NV_SWNCQ_IRQ_BACKOUT		= (1 << 4),
178 	NV_SWNCQ_IRQ_SDBFIS		= (1 << 5),
179 	NV_SWNCQ_IRQ_DHREGFIS		= (1 << 6),
180 	NV_SWNCQ_IRQ_DMASETUP		= (1 << 7),
181 
182 	NV_SWNCQ_IRQ_HOTPLUG		= NV_SWNCQ_IRQ_ADDED |
183 					  NV_SWNCQ_IRQ_REMOVED,
184 
185 };
186 
187 /* ADMA Physical Region Descriptor - one SG segment */
188 struct nv_adma_prd {
189 	__le64			addr;
190 	__le32			len;
191 	u8			flags;
192 	u8			packet_len;
193 	__le16			reserved;
194 };
195 
196 enum nv_adma_regbits {
197 	CMDEND	= (1 << 15),		/* end of command list */
198 	WNB	= (1 << 14),		/* wait-not-BSY */
199 	IGN	= (1 << 13),		/* ignore this entry */
200 	CS1n	= (1 << (4 + 8)),	/* std. PATA signals follow... */
201 	DA2	= (1 << (2 + 8)),
202 	DA1	= (1 << (1 + 8)),
203 	DA0	= (1 << (0 + 8)),
204 };
205 
206 /* ADMA Command Parameter Block
207    The first 5 SG segments are stored inside the Command Parameter Block itself.
208    If there are more than 5 segments the remainder are stored in a separate
209    memory area indicated by next_aprd. */
210 struct nv_adma_cpb {
211 	u8			resp_flags;    /* 0 */
212 	u8			reserved1;     /* 1 */
213 	u8			ctl_flags;     /* 2 */
214 	/* len is length of taskfile in 64 bit words */
215 	u8			len;		/* 3  */
216 	u8			tag;           /* 4 */
217 	u8			next_cpb_idx;  /* 5 */
218 	__le16			reserved2;     /* 6-7 */
219 	__le16			tf[12];        /* 8-31 */
220 	struct nv_adma_prd	aprd[5];       /* 32-111 */
221 	__le64			next_aprd;     /* 112-119 */
222 	__le64			reserved3;     /* 120-127 */
223 };
224 
225 
226 struct nv_adma_port_priv {
227 	struct nv_adma_cpb	*cpb;
228 	dma_addr_t		cpb_dma;
229 	struct nv_adma_prd	*aprd;
230 	dma_addr_t		aprd_dma;
231 	void __iomem		*ctl_block;
232 	void __iomem		*gen_block;
233 	void __iomem		*notifier_clear_block;
234 	u64			adma_dma_mask;
235 	u8			flags;
236 	int			last_issue_ncq;
237 };
238 
239 struct nv_host_priv {
240 	unsigned long		type;
241 };
242 
243 struct defer_queue {
244 	u32		defer_bits;
245 	unsigned int	head;
246 	unsigned int	tail;
247 	unsigned int	tag[ATA_MAX_QUEUE];
248 };
249 
250 enum ncq_saw_flag_list {
251 	ncq_saw_d2h	= (1U << 0),
252 	ncq_saw_dmas	= (1U << 1),
253 	ncq_saw_sdb	= (1U << 2),
254 	ncq_saw_backout	= (1U << 3),
255 };
256 
257 struct nv_swncq_port_priv {
258 	struct ata_bmdma_prd *prd;	 /* our SG list */
259 	dma_addr_t	prd_dma; /* and its DMA mapping */
260 	void __iomem	*sactive_block;
261 	void __iomem	*irq_block;
262 	void __iomem	*tag_block;
263 	u32		qc_active;
264 
265 	unsigned int	last_issue_tag;
266 
267 	/* fifo circular queue to store deferral command */
268 	struct defer_queue defer_queue;
269 
270 	/* for NCQ interrupt analysis */
271 	u32		dhfis_bits;
272 	u32		dmafis_bits;
273 	u32		sdbfis_bits;
274 
275 	unsigned int	ncq_flags;
276 };
277 
278 
279 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
280 
281 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
282 #ifdef CONFIG_PM_SLEEP
283 static int nv_pci_device_resume(struct pci_dev *pdev);
284 #endif
285 static void nv_ck804_host_stop(struct ata_host *host);
286 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
287 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
288 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
289 static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
290 static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
291 
292 static int nv_hardreset(struct ata_link *link, unsigned int *class,
293 			unsigned long deadline);
294 static void nv_nf2_freeze(struct ata_port *ap);
295 static void nv_nf2_thaw(struct ata_port *ap);
296 static void nv_ck804_freeze(struct ata_port *ap);
297 static void nv_ck804_thaw(struct ata_port *ap);
298 static int nv_adma_slave_config(struct scsi_device *sdev);
299 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
300 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
301 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
302 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
303 static void nv_adma_irq_clear(struct ata_port *ap);
304 static int nv_adma_port_start(struct ata_port *ap);
305 static void nv_adma_port_stop(struct ata_port *ap);
306 #ifdef CONFIG_PM
307 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
308 static int nv_adma_port_resume(struct ata_port *ap);
309 #endif
310 static void nv_adma_freeze(struct ata_port *ap);
311 static void nv_adma_thaw(struct ata_port *ap);
312 static void nv_adma_error_handler(struct ata_port *ap);
313 static void nv_adma_host_stop(struct ata_host *host);
314 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
315 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
316 
317 static void nv_mcp55_thaw(struct ata_port *ap);
318 static void nv_mcp55_freeze(struct ata_port *ap);
319 static void nv_swncq_error_handler(struct ata_port *ap);
320 static int nv_swncq_slave_config(struct scsi_device *sdev);
321 static int nv_swncq_port_start(struct ata_port *ap);
322 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
323 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
324 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
325 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
326 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
327 #ifdef CONFIG_PM
328 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
329 static int nv_swncq_port_resume(struct ata_port *ap);
330 #endif
331 
332 enum nv_host_type
333 {
334 	GENERIC,
335 	NFORCE2,
336 	NFORCE3 = NFORCE2,	/* NF2 == NF3 as far as sata_nv is concerned */
337 	CK804,
338 	ADMA,
339 	MCP5x,
340 	SWNCQ,
341 };
342 
343 static const struct pci_device_id nv_pci_tbl[] = {
344 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
345 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
346 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
347 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
348 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
349 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
350 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
351 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
352 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
353 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
354 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
355 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
356 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
357 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
358 
359 	{ } /* terminate list */
360 };
361 
362 static struct pci_driver nv_pci_driver = {
363 	.name			= DRV_NAME,
364 	.id_table		= nv_pci_tbl,
365 	.probe			= nv_init_one,
366 #ifdef CONFIG_PM_SLEEP
367 	.suspend		= ata_pci_device_suspend,
368 	.resume			= nv_pci_device_resume,
369 #endif
370 	.remove			= ata_pci_remove_one,
371 };
372 
373 static struct scsi_host_template nv_sht = {
374 	ATA_BMDMA_SHT(DRV_NAME),
375 };
376 
377 static struct scsi_host_template nv_adma_sht = {
378 	ATA_NCQ_SHT(DRV_NAME),
379 	.can_queue		= NV_ADMA_MAX_CPBS,
380 	.sg_tablesize		= NV_ADMA_SGTBL_TOTAL_LEN,
381 	.dma_boundary		= NV_ADMA_DMA_BOUNDARY,
382 	.slave_configure	= nv_adma_slave_config,
383 };
384 
385 static struct scsi_host_template nv_swncq_sht = {
386 	ATA_NCQ_SHT(DRV_NAME),
387 	.can_queue		= ATA_MAX_QUEUE - 1,
388 	.sg_tablesize		= LIBATA_MAX_PRD,
389 	.dma_boundary		= ATA_DMA_BOUNDARY,
390 	.slave_configure	= nv_swncq_slave_config,
391 };
392 
393 /*
394  * NV SATA controllers have various different problems with hardreset
395  * protocol depending on the specific controller and device.
396  *
397  * GENERIC:
398  *
399  *  bko11195 reports that link doesn't come online after hardreset on
400  *  generic nv's and there have been several other similar reports on
401  *  linux-ide.
402  *
403  *  bko12351#c23 reports that warmplug on MCP61 doesn't work with
404  *  softreset.
405  *
406  * NF2/3:
407  *
408  *  bko3352 reports nf2/3 controllers can't determine device signature
409  *  reliably after hardreset.  The following thread reports detection
410  *  failure on cold boot with the standard debouncing timing.
411  *
412  *  http://thread.gmane.org/gmane.linux.ide/34098
413  *
414  *  bko12176 reports that hardreset fails to bring up the link during
415  *  boot on nf2.
416  *
417  * CK804:
418  *
419  *  For initial probing after boot and hot plugging, hardreset mostly
420  *  works fine on CK804 but curiously, reprobing on the initial port
421  *  by rescanning or rmmod/insmod fails to acquire the initial D2H Reg
422  *  FIS in somewhat undeterministic way.
423  *
424  * SWNCQ:
425  *
426  *  bko12351 reports that when SWNCQ is enabled, for hotplug to work,
427  *  hardreset should be used and hardreset can't report proper
428  *  signature, which suggests that mcp5x is closer to nf2 as long as
429  *  reset quirkiness is concerned.
430  *
431  *  bko12703 reports that boot probing fails for intel SSD with
432  *  hardreset.  Link fails to come online.  Softreset works fine.
433  *
434  * The failures are varied but the following patterns seem true for
435  * all flavors.
436  *
437  * - Softreset during boot always works.
438  *
439  * - Hardreset during boot sometimes fails to bring up the link on
440  *   certain comibnations and device signature acquisition is
441  *   unreliable.
442  *
443  * - Hardreset is often necessary after hotplug.
444  *
445  * So, preferring softreset for boot probing and error handling (as
446  * hardreset might bring down the link) but using hardreset for
447  * post-boot probing should work around the above issues in most
448  * cases.  Define nv_hardreset() which only kicks in for post-boot
449  * probing and use it for all variants.
450  */
451 static struct ata_port_operations nv_generic_ops = {
452 	.inherits		= &ata_bmdma_port_ops,
453 	.lost_interrupt		= ATA_OP_NULL,
454 	.scr_read		= nv_scr_read,
455 	.scr_write		= nv_scr_write,
456 	.hardreset		= nv_hardreset,
457 };
458 
459 static struct ata_port_operations nv_nf2_ops = {
460 	.inherits		= &nv_generic_ops,
461 	.freeze			= nv_nf2_freeze,
462 	.thaw			= nv_nf2_thaw,
463 };
464 
465 static struct ata_port_operations nv_ck804_ops = {
466 	.inherits		= &nv_generic_ops,
467 	.freeze			= nv_ck804_freeze,
468 	.thaw			= nv_ck804_thaw,
469 	.host_stop		= nv_ck804_host_stop,
470 };
471 
472 static struct ata_port_operations nv_adma_ops = {
473 	.inherits		= &nv_ck804_ops,
474 
475 	.check_atapi_dma	= nv_adma_check_atapi_dma,
476 	.sff_tf_read		= nv_adma_tf_read,
477 	.qc_defer		= ata_std_qc_defer,
478 	.qc_prep		= nv_adma_qc_prep,
479 	.qc_issue		= nv_adma_qc_issue,
480 	.sff_irq_clear		= nv_adma_irq_clear,
481 
482 	.freeze			= nv_adma_freeze,
483 	.thaw			= nv_adma_thaw,
484 	.error_handler		= nv_adma_error_handler,
485 	.post_internal_cmd	= nv_adma_post_internal_cmd,
486 
487 	.port_start		= nv_adma_port_start,
488 	.port_stop		= nv_adma_port_stop,
489 #ifdef CONFIG_PM
490 	.port_suspend		= nv_adma_port_suspend,
491 	.port_resume		= nv_adma_port_resume,
492 #endif
493 	.host_stop		= nv_adma_host_stop,
494 };
495 
496 static struct ata_port_operations nv_swncq_ops = {
497 	.inherits		= &nv_generic_ops,
498 
499 	.qc_defer		= ata_std_qc_defer,
500 	.qc_prep		= nv_swncq_qc_prep,
501 	.qc_issue		= nv_swncq_qc_issue,
502 
503 	.freeze			= nv_mcp55_freeze,
504 	.thaw			= nv_mcp55_thaw,
505 	.error_handler		= nv_swncq_error_handler,
506 
507 #ifdef CONFIG_PM
508 	.port_suspend		= nv_swncq_port_suspend,
509 	.port_resume		= nv_swncq_port_resume,
510 #endif
511 	.port_start		= nv_swncq_port_start,
512 };
513 
514 struct nv_pi_priv {
515 	irq_handler_t			irq_handler;
516 	struct scsi_host_template	*sht;
517 };
518 
519 #define NV_PI_PRIV(_irq_handler, _sht) \
520 	&(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
521 
522 static const struct ata_port_info nv_port_info[] = {
523 	/* generic */
524 	{
525 		.flags		= ATA_FLAG_SATA,
526 		.pio_mask	= NV_PIO_MASK,
527 		.mwdma_mask	= NV_MWDMA_MASK,
528 		.udma_mask	= NV_UDMA_MASK,
529 		.port_ops	= &nv_generic_ops,
530 		.private_data	= NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
531 	},
532 	/* nforce2/3 */
533 	{
534 		.flags		= ATA_FLAG_SATA,
535 		.pio_mask	= NV_PIO_MASK,
536 		.mwdma_mask	= NV_MWDMA_MASK,
537 		.udma_mask	= NV_UDMA_MASK,
538 		.port_ops	= &nv_nf2_ops,
539 		.private_data	= NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
540 	},
541 	/* ck804 */
542 	{
543 		.flags		= ATA_FLAG_SATA,
544 		.pio_mask	= NV_PIO_MASK,
545 		.mwdma_mask	= NV_MWDMA_MASK,
546 		.udma_mask	= NV_UDMA_MASK,
547 		.port_ops	= &nv_ck804_ops,
548 		.private_data	= NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
549 	},
550 	/* ADMA */
551 	{
552 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NCQ,
553 		.pio_mask	= NV_PIO_MASK,
554 		.mwdma_mask	= NV_MWDMA_MASK,
555 		.udma_mask	= NV_UDMA_MASK,
556 		.port_ops	= &nv_adma_ops,
557 		.private_data	= NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
558 	},
559 	/* MCP5x */
560 	{
561 		.flags		= ATA_FLAG_SATA,
562 		.pio_mask	= NV_PIO_MASK,
563 		.mwdma_mask	= NV_MWDMA_MASK,
564 		.udma_mask	= NV_UDMA_MASK,
565 		.port_ops	= &nv_generic_ops,
566 		.private_data	= NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
567 	},
568 	/* SWNCQ */
569 	{
570 		.flags	        = ATA_FLAG_SATA | ATA_FLAG_NCQ,
571 		.pio_mask	= NV_PIO_MASK,
572 		.mwdma_mask	= NV_MWDMA_MASK,
573 		.udma_mask	= NV_UDMA_MASK,
574 		.port_ops	= &nv_swncq_ops,
575 		.private_data	= NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
576 	},
577 };
578 
579 MODULE_AUTHOR("NVIDIA");
580 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
581 MODULE_LICENSE("GPL");
582 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
583 MODULE_VERSION(DRV_VERSION);
584 
585 static bool adma_enabled;
586 static bool swncq_enabled = true;
587 static bool msi_enabled;
588 
589 static void nv_adma_register_mode(struct ata_port *ap)
590 {
591 	struct nv_adma_port_priv *pp = ap->private_data;
592 	void __iomem *mmio = pp->ctl_block;
593 	u16 tmp, status;
594 	int count = 0;
595 
596 	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
597 		return;
598 
599 	status = readw(mmio + NV_ADMA_STAT);
600 	while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
601 		ndelay(50);
602 		status = readw(mmio + NV_ADMA_STAT);
603 		count++;
604 	}
605 	if (count == 20)
606 		ata_port_warn(ap, "timeout waiting for ADMA IDLE, stat=0x%hx\n",
607 			      status);
608 
609 	tmp = readw(mmio + NV_ADMA_CTL);
610 	writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
611 
612 	count = 0;
613 	status = readw(mmio + NV_ADMA_STAT);
614 	while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
615 		ndelay(50);
616 		status = readw(mmio + NV_ADMA_STAT);
617 		count++;
618 	}
619 	if (count == 20)
620 		ata_port_warn(ap,
621 			      "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
622 			      status);
623 
624 	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
625 }
626 
627 static void nv_adma_mode(struct ata_port *ap)
628 {
629 	struct nv_adma_port_priv *pp = ap->private_data;
630 	void __iomem *mmio = pp->ctl_block;
631 	u16 tmp, status;
632 	int count = 0;
633 
634 	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
635 		return;
636 
637 	WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
638 
639 	tmp = readw(mmio + NV_ADMA_CTL);
640 	writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
641 
642 	status = readw(mmio + NV_ADMA_STAT);
643 	while (((status & NV_ADMA_STAT_LEGACY) ||
644 	      !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
645 		ndelay(50);
646 		status = readw(mmio + NV_ADMA_STAT);
647 		count++;
648 	}
649 	if (count == 20)
650 		ata_port_warn(ap,
651 			"timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
652 			status);
653 
654 	pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
655 }
656 
657 static int nv_adma_slave_config(struct scsi_device *sdev)
658 {
659 	struct ata_port *ap = ata_shost_to_port(sdev->host);
660 	struct nv_adma_port_priv *pp = ap->private_data;
661 	struct nv_adma_port_priv *port0, *port1;
662 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
663 	unsigned long segment_boundary, flags;
664 	unsigned short sg_tablesize;
665 	int rc;
666 	int adma_enable;
667 	u32 current_reg, new_reg, config_mask;
668 
669 	rc = ata_scsi_slave_config(sdev);
670 
671 	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
672 		/* Not a proper libata device, ignore */
673 		return rc;
674 
675 	spin_lock_irqsave(ap->lock, flags);
676 
677 	if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
678 		/*
679 		 * NVIDIA reports that ADMA mode does not support ATAPI commands.
680 		 * Therefore ATAPI commands are sent through the legacy interface.
681 		 * However, the legacy interface only supports 32-bit DMA.
682 		 * Restrict DMA parameters as required by the legacy interface
683 		 * when an ATAPI device is connected.
684 		 */
685 		segment_boundary = ATA_DMA_BOUNDARY;
686 		/* Subtract 1 since an extra entry may be needed for padding, see
687 		   libata-scsi.c */
688 		sg_tablesize = LIBATA_MAX_PRD - 1;
689 
690 		/* Since the legacy DMA engine is in use, we need to disable ADMA
691 		   on the port. */
692 		adma_enable = 0;
693 		nv_adma_register_mode(ap);
694 	} else {
695 		segment_boundary = NV_ADMA_DMA_BOUNDARY;
696 		sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
697 		adma_enable = 1;
698 	}
699 
700 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
701 
702 	if (ap->port_no == 1)
703 		config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
704 			      NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
705 	else
706 		config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
707 			      NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
708 
709 	if (adma_enable) {
710 		new_reg = current_reg | config_mask;
711 		pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
712 	} else {
713 		new_reg = current_reg & ~config_mask;
714 		pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
715 	}
716 
717 	if (current_reg != new_reg)
718 		pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
719 
720 	port0 = ap->host->ports[0]->private_data;
721 	port1 = ap->host->ports[1]->private_data;
722 	if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
723 	    (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
724 		/*
725 		 * We have to set the DMA mask to 32-bit if either port is in
726 		 * ATAPI mode, since they are on the same PCI device which is
727 		 * used for DMA mapping.  If either SCSI device is not allocated
728 		 * yet, it's OK since that port will discover its correct
729 		 * setting when it does get allocated.
730 		 */
731 		rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
732 	} else {
733 		rc = dma_set_mask(&pdev->dev, pp->adma_dma_mask);
734 	}
735 
736 	blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
737 	blk_queue_max_segments(sdev->request_queue, sg_tablesize);
738 	ata_port_info(ap,
739 		      "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
740 		      (unsigned long long)*ap->host->dev->dma_mask,
741 		      segment_boundary, sg_tablesize);
742 
743 	spin_unlock_irqrestore(ap->lock, flags);
744 
745 	return rc;
746 }
747 
748 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
749 {
750 	struct nv_adma_port_priv *pp = qc->ap->private_data;
751 	return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
752 }
753 
754 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
755 {
756 	/* Other than when internal or pass-through commands are executed,
757 	   the only time this function will be called in ADMA mode will be
758 	   if a command fails. In the failure case we don't care about going
759 	   into register mode with ADMA commands pending, as the commands will
760 	   all shortly be aborted anyway. We assume that NCQ commands are not
761 	   issued via passthrough, which is the only way that switching into
762 	   ADMA mode could abort outstanding commands. */
763 	nv_adma_register_mode(ap);
764 
765 	ata_sff_tf_read(ap, tf);
766 }
767 
768 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
769 {
770 	unsigned int idx = 0;
771 
772 	if (tf->flags & ATA_TFLAG_ISADDR) {
773 		if (tf->flags & ATA_TFLAG_LBA48) {
774 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR   << 8) | tf->hob_feature | WNB);
775 			cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
776 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAL  << 8) | tf->hob_lbal);
777 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAM  << 8) | tf->hob_lbam);
778 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAH  << 8) | tf->hob_lbah);
779 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature);
780 		} else
781 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature | WNB);
782 
783 		cpb[idx++] = cpu_to_le16((ATA_REG_NSECT  << 8) | tf->nsect);
784 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAL   << 8) | tf->lbal);
785 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAM   << 8) | tf->lbam);
786 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAH   << 8) | tf->lbah);
787 	}
788 
789 	if (tf->flags & ATA_TFLAG_DEVICE)
790 		cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
791 
792 	cpb[idx++] = cpu_to_le16((ATA_REG_CMD    << 8) | tf->command | CMDEND);
793 
794 	while (idx < 12)
795 		cpb[idx++] = cpu_to_le16(IGN);
796 
797 	return idx;
798 }
799 
800 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
801 {
802 	struct nv_adma_port_priv *pp = ap->private_data;
803 	u8 flags = pp->cpb[cpb_num].resp_flags;
804 
805 	VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
806 
807 	if (unlikely((force_err ||
808 		     flags & (NV_CPB_RESP_ATA_ERR |
809 			      NV_CPB_RESP_CMD_ERR |
810 			      NV_CPB_RESP_CPB_ERR)))) {
811 		struct ata_eh_info *ehi = &ap->link.eh_info;
812 		int freeze = 0;
813 
814 		ata_ehi_clear_desc(ehi);
815 		__ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
816 		if (flags & NV_CPB_RESP_ATA_ERR) {
817 			ata_ehi_push_desc(ehi, "ATA error");
818 			ehi->err_mask |= AC_ERR_DEV;
819 		} else if (flags & NV_CPB_RESP_CMD_ERR) {
820 			ata_ehi_push_desc(ehi, "CMD error");
821 			ehi->err_mask |= AC_ERR_DEV;
822 		} else if (flags & NV_CPB_RESP_CPB_ERR) {
823 			ata_ehi_push_desc(ehi, "CPB error");
824 			ehi->err_mask |= AC_ERR_SYSTEM;
825 			freeze = 1;
826 		} else {
827 			/* notifier error, but no error in CPB flags? */
828 			ata_ehi_push_desc(ehi, "unknown");
829 			ehi->err_mask |= AC_ERR_OTHER;
830 			freeze = 1;
831 		}
832 		/* Kill all commands. EH will determine what actually failed. */
833 		if (freeze)
834 			ata_port_freeze(ap);
835 		else
836 			ata_port_abort(ap);
837 		return -1;
838 	}
839 
840 	if (likely(flags & NV_CPB_RESP_DONE))
841 		return 1;
842 	return 0;
843 }
844 
845 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
846 {
847 	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
848 
849 	/* freeze if hotplugged */
850 	if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
851 		ata_port_freeze(ap);
852 		return 1;
853 	}
854 
855 	/* bail out if not our interrupt */
856 	if (!(irq_stat & NV_INT_DEV))
857 		return 0;
858 
859 	/* DEV interrupt w/ no active qc? */
860 	if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
861 		ata_sff_check_status(ap);
862 		return 1;
863 	}
864 
865 	/* handle interrupt */
866 	return ata_bmdma_port_intr(ap, qc);
867 }
868 
869 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
870 {
871 	struct ata_host *host = dev_instance;
872 	int i, handled = 0;
873 	u32 notifier_clears[2];
874 
875 	spin_lock(&host->lock);
876 
877 	for (i = 0; i < host->n_ports; i++) {
878 		struct ata_port *ap = host->ports[i];
879 		struct nv_adma_port_priv *pp = ap->private_data;
880 		void __iomem *mmio = pp->ctl_block;
881 		u16 status;
882 		u32 gen_ctl;
883 		u32 notifier, notifier_error;
884 
885 		notifier_clears[i] = 0;
886 
887 		/* if ADMA is disabled, use standard ata interrupt handler */
888 		if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
889 			u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
890 				>> (NV_INT_PORT_SHIFT * i);
891 			handled += nv_host_intr(ap, irq_stat);
892 			continue;
893 		}
894 
895 		/* if in ATA register mode, check for standard interrupts */
896 		if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
897 			u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
898 				>> (NV_INT_PORT_SHIFT * i);
899 			if (ata_tag_valid(ap->link.active_tag))
900 				/** NV_INT_DEV indication seems unreliable
901 				    at times at least in ADMA mode. Force it
902 				    on always when a command is active, to
903 				    prevent losing interrupts. */
904 				irq_stat |= NV_INT_DEV;
905 			handled += nv_host_intr(ap, irq_stat);
906 		}
907 
908 		notifier = readl(mmio + NV_ADMA_NOTIFIER);
909 		notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
910 		notifier_clears[i] = notifier | notifier_error;
911 
912 		gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
913 
914 		if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
915 		    !notifier_error)
916 			/* Nothing to do */
917 			continue;
918 
919 		status = readw(mmio + NV_ADMA_STAT);
920 
921 		/*
922 		 * Clear status. Ensure the controller sees the
923 		 * clearing before we start looking at any of the CPB
924 		 * statuses, so that any CPB completions after this
925 		 * point in the handler will raise another interrupt.
926 		 */
927 		writew(status, mmio + NV_ADMA_STAT);
928 		readw(mmio + NV_ADMA_STAT); /* flush posted write */
929 		rmb();
930 
931 		handled++; /* irq handled if we got here */
932 
933 		/* freeze if hotplugged or controller error */
934 		if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
935 				       NV_ADMA_STAT_HOTUNPLUG |
936 				       NV_ADMA_STAT_TIMEOUT |
937 				       NV_ADMA_STAT_SERROR))) {
938 			struct ata_eh_info *ehi = &ap->link.eh_info;
939 
940 			ata_ehi_clear_desc(ehi);
941 			__ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
942 			if (status & NV_ADMA_STAT_TIMEOUT) {
943 				ehi->err_mask |= AC_ERR_SYSTEM;
944 				ata_ehi_push_desc(ehi, "timeout");
945 			} else if (status & NV_ADMA_STAT_HOTPLUG) {
946 				ata_ehi_hotplugged(ehi);
947 				ata_ehi_push_desc(ehi, "hotplug");
948 			} else if (status & NV_ADMA_STAT_HOTUNPLUG) {
949 				ata_ehi_hotplugged(ehi);
950 				ata_ehi_push_desc(ehi, "hot unplug");
951 			} else if (status & NV_ADMA_STAT_SERROR) {
952 				/* let EH analyze SError and figure out cause */
953 				ata_ehi_push_desc(ehi, "SError");
954 			} else
955 				ata_ehi_push_desc(ehi, "unknown");
956 			ata_port_freeze(ap);
957 			continue;
958 		}
959 
960 		if (status & (NV_ADMA_STAT_DONE |
961 			      NV_ADMA_STAT_CPBERR |
962 			      NV_ADMA_STAT_CMD_COMPLETE)) {
963 			u32 check_commands = notifier_clears[i];
964 			u32 done_mask = 0;
965 			int pos, rc;
966 
967 			if (status & NV_ADMA_STAT_CPBERR) {
968 				/* check all active commands */
969 				if (ata_tag_valid(ap->link.active_tag))
970 					check_commands = 1 <<
971 						ap->link.active_tag;
972 				else
973 					check_commands = ap->link.sactive;
974 			}
975 
976 			/* check CPBs for completed commands */
977 			while ((pos = ffs(check_commands))) {
978 				pos--;
979 				rc = nv_adma_check_cpb(ap, pos,
980 						notifier_error & (1 << pos));
981 				if (rc > 0)
982 					done_mask |= 1 << pos;
983 				else if (unlikely(rc < 0))
984 					check_commands = 0;
985 				check_commands &= ~(1 << pos);
986 			}
987 			ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
988 		}
989 	}
990 
991 	if (notifier_clears[0] || notifier_clears[1]) {
992 		/* Note: Both notifier clear registers must be written
993 		   if either is set, even if one is zero, according to NVIDIA. */
994 		struct nv_adma_port_priv *pp = host->ports[0]->private_data;
995 		writel(notifier_clears[0], pp->notifier_clear_block);
996 		pp = host->ports[1]->private_data;
997 		writel(notifier_clears[1], pp->notifier_clear_block);
998 	}
999 
1000 	spin_unlock(&host->lock);
1001 
1002 	return IRQ_RETVAL(handled);
1003 }
1004 
1005 static void nv_adma_freeze(struct ata_port *ap)
1006 {
1007 	struct nv_adma_port_priv *pp = ap->private_data;
1008 	void __iomem *mmio = pp->ctl_block;
1009 	u16 tmp;
1010 
1011 	nv_ck804_freeze(ap);
1012 
1013 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1014 		return;
1015 
1016 	/* clear any outstanding CK804 notifications */
1017 	writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1018 		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1019 
1020 	/* Disable interrupt */
1021 	tmp = readw(mmio + NV_ADMA_CTL);
1022 	writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1023 		mmio + NV_ADMA_CTL);
1024 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1025 }
1026 
1027 static void nv_adma_thaw(struct ata_port *ap)
1028 {
1029 	struct nv_adma_port_priv *pp = ap->private_data;
1030 	void __iomem *mmio = pp->ctl_block;
1031 	u16 tmp;
1032 
1033 	nv_ck804_thaw(ap);
1034 
1035 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1036 		return;
1037 
1038 	/* Enable interrupt */
1039 	tmp = readw(mmio + NV_ADMA_CTL);
1040 	writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1041 		mmio + NV_ADMA_CTL);
1042 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1043 }
1044 
1045 static void nv_adma_irq_clear(struct ata_port *ap)
1046 {
1047 	struct nv_adma_port_priv *pp = ap->private_data;
1048 	void __iomem *mmio = pp->ctl_block;
1049 	u32 notifier_clears[2];
1050 
1051 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1052 		ata_bmdma_irq_clear(ap);
1053 		return;
1054 	}
1055 
1056 	/* clear any outstanding CK804 notifications */
1057 	writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1058 		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1059 
1060 	/* clear ADMA status */
1061 	writew(0xffff, mmio + NV_ADMA_STAT);
1062 
1063 	/* clear notifiers - note both ports need to be written with
1064 	   something even though we are only clearing on one */
1065 	if (ap->port_no == 0) {
1066 		notifier_clears[0] = 0xFFFFFFFF;
1067 		notifier_clears[1] = 0;
1068 	} else {
1069 		notifier_clears[0] = 0;
1070 		notifier_clears[1] = 0xFFFFFFFF;
1071 	}
1072 	pp = ap->host->ports[0]->private_data;
1073 	writel(notifier_clears[0], pp->notifier_clear_block);
1074 	pp = ap->host->ports[1]->private_data;
1075 	writel(notifier_clears[1], pp->notifier_clear_block);
1076 }
1077 
1078 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1079 {
1080 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1081 
1082 	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1083 		ata_bmdma_post_internal_cmd(qc);
1084 }
1085 
1086 static int nv_adma_port_start(struct ata_port *ap)
1087 {
1088 	struct device *dev = ap->host->dev;
1089 	struct nv_adma_port_priv *pp;
1090 	int rc;
1091 	void *mem;
1092 	dma_addr_t mem_dma;
1093 	void __iomem *mmio;
1094 	struct pci_dev *pdev = to_pci_dev(dev);
1095 	u16 tmp;
1096 
1097 	VPRINTK("ENTER\n");
1098 
1099 	/*
1100 	 * Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1101 	 * pad buffers.
1102 	 */
1103 	rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1104 	if (rc)
1105 		return rc;
1106 
1107 	/* we might fallback to bmdma, allocate bmdma resources */
1108 	rc = ata_bmdma_port_start(ap);
1109 	if (rc)
1110 		return rc;
1111 
1112 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1113 	if (!pp)
1114 		return -ENOMEM;
1115 
1116 	mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1117 	       ap->port_no * NV_ADMA_PORT_SIZE;
1118 	pp->ctl_block = mmio;
1119 	pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1120 	pp->notifier_clear_block = pp->gen_block +
1121 	       NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1122 
1123 	/*
1124 	 * Now that the legacy PRD and padding buffer are allocated we can
1125 	 * try to raise the DMA mask to allocate the CPB/APRD table.
1126 	 */
1127 	rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1128 	if (rc) {
1129 		rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1130 		if (rc)
1131 			return rc;
1132 	}
1133 	pp->adma_dma_mask = *dev->dma_mask;
1134 
1135 	mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1136 				  &mem_dma, GFP_KERNEL);
1137 	if (!mem)
1138 		return -ENOMEM;
1139 	memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1140 
1141 	/*
1142 	 * First item in chunk of DMA memory:
1143 	 * 128-byte command parameter block (CPB)
1144 	 * one for each command tag
1145 	 */
1146 	pp->cpb     = mem;
1147 	pp->cpb_dma = mem_dma;
1148 
1149 	writel(mem_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
1150 	writel((mem_dma >> 16) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
1151 
1152 	mem     += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1153 	mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1154 
1155 	/*
1156 	 * Second item: block of ADMA_SGTBL_LEN s/g entries
1157 	 */
1158 	pp->aprd = mem;
1159 	pp->aprd_dma = mem_dma;
1160 
1161 	ap->private_data = pp;
1162 
1163 	/* clear any outstanding interrupt conditions */
1164 	writew(0xffff, mmio + NV_ADMA_STAT);
1165 
1166 	/* initialize port variables */
1167 	pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1168 
1169 	/* clear CPB fetch count */
1170 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1171 
1172 	/* clear GO for register mode, enable interrupt */
1173 	tmp = readw(mmio + NV_ADMA_CTL);
1174 	writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1175 		NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1176 
1177 	tmp = readw(mmio + NV_ADMA_CTL);
1178 	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1179 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1180 	udelay(1);
1181 	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1182 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1183 
1184 	return 0;
1185 }
1186 
1187 static void nv_adma_port_stop(struct ata_port *ap)
1188 {
1189 	struct nv_adma_port_priv *pp = ap->private_data;
1190 	void __iomem *mmio = pp->ctl_block;
1191 
1192 	VPRINTK("ENTER\n");
1193 	writew(0, mmio + NV_ADMA_CTL);
1194 }
1195 
1196 #ifdef CONFIG_PM
1197 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1198 {
1199 	struct nv_adma_port_priv *pp = ap->private_data;
1200 	void __iomem *mmio = pp->ctl_block;
1201 
1202 	/* Go to register mode - clears GO */
1203 	nv_adma_register_mode(ap);
1204 
1205 	/* clear CPB fetch count */
1206 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1207 
1208 	/* disable interrupt, shut down port */
1209 	writew(0, mmio + NV_ADMA_CTL);
1210 
1211 	return 0;
1212 }
1213 
1214 static int nv_adma_port_resume(struct ata_port *ap)
1215 {
1216 	struct nv_adma_port_priv *pp = ap->private_data;
1217 	void __iomem *mmio = pp->ctl_block;
1218 	u16 tmp;
1219 
1220 	/* set CPB block location */
1221 	writel(pp->cpb_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
1222 	writel((pp->cpb_dma >> 16) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
1223 
1224 	/* clear any outstanding interrupt conditions */
1225 	writew(0xffff, mmio + NV_ADMA_STAT);
1226 
1227 	/* initialize port variables */
1228 	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1229 
1230 	/* clear CPB fetch count */
1231 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1232 
1233 	/* clear GO for register mode, enable interrupt */
1234 	tmp = readw(mmio + NV_ADMA_CTL);
1235 	writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1236 		NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1237 
1238 	tmp = readw(mmio + NV_ADMA_CTL);
1239 	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1240 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1241 	udelay(1);
1242 	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1243 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1244 
1245 	return 0;
1246 }
1247 #endif
1248 
1249 static void nv_adma_setup_port(struct ata_port *ap)
1250 {
1251 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1252 	struct ata_ioports *ioport = &ap->ioaddr;
1253 
1254 	VPRINTK("ENTER\n");
1255 
1256 	mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1257 
1258 	ioport->cmd_addr	= mmio;
1259 	ioport->data_addr	= mmio + (ATA_REG_DATA * 4);
1260 	ioport->error_addr	=
1261 	ioport->feature_addr	= mmio + (ATA_REG_ERR * 4);
1262 	ioport->nsect_addr	= mmio + (ATA_REG_NSECT * 4);
1263 	ioport->lbal_addr	= mmio + (ATA_REG_LBAL * 4);
1264 	ioport->lbam_addr	= mmio + (ATA_REG_LBAM * 4);
1265 	ioport->lbah_addr	= mmio + (ATA_REG_LBAH * 4);
1266 	ioport->device_addr	= mmio + (ATA_REG_DEVICE * 4);
1267 	ioport->status_addr	=
1268 	ioport->command_addr	= mmio + (ATA_REG_STATUS * 4);
1269 	ioport->altstatus_addr	=
1270 	ioport->ctl_addr	= mmio + 0x20;
1271 }
1272 
1273 static int nv_adma_host_init(struct ata_host *host)
1274 {
1275 	struct pci_dev *pdev = to_pci_dev(host->dev);
1276 	unsigned int i;
1277 	u32 tmp32;
1278 
1279 	VPRINTK("ENTER\n");
1280 
1281 	/* enable ADMA on the ports */
1282 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1283 	tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1284 		 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1285 		 NV_MCP_SATA_CFG_20_PORT1_EN |
1286 		 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1287 
1288 	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1289 
1290 	for (i = 0; i < host->n_ports; i++)
1291 		nv_adma_setup_port(host->ports[i]);
1292 
1293 	return 0;
1294 }
1295 
1296 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1297 			      struct scatterlist *sg,
1298 			      int idx,
1299 			      struct nv_adma_prd *aprd)
1300 {
1301 	u8 flags = 0;
1302 	if (qc->tf.flags & ATA_TFLAG_WRITE)
1303 		flags |= NV_APRD_WRITE;
1304 	if (idx == qc->n_elem - 1)
1305 		flags |= NV_APRD_END;
1306 	else if (idx != 4)
1307 		flags |= NV_APRD_CONT;
1308 
1309 	aprd->addr  = cpu_to_le64(((u64)sg_dma_address(sg)));
1310 	aprd->len   = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1311 	aprd->flags = flags;
1312 	aprd->packet_len = 0;
1313 }
1314 
1315 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1316 {
1317 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1318 	struct nv_adma_prd *aprd;
1319 	struct scatterlist *sg;
1320 	unsigned int si;
1321 
1322 	VPRINTK("ENTER\n");
1323 
1324 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
1325 		aprd = (si < 5) ? &cpb->aprd[si] :
1326 			&pp->aprd[NV_ADMA_SGTBL_LEN * qc->hw_tag + (si-5)];
1327 		nv_adma_fill_aprd(qc, sg, si, aprd);
1328 	}
1329 	if (si > 5)
1330 		cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->hw_tag)));
1331 	else
1332 		cpb->next_aprd = cpu_to_le64(0);
1333 }
1334 
1335 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1336 {
1337 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1338 
1339 	/* ADMA engine can only be used for non-ATAPI DMA commands,
1340 	   or interrupt-driven no-data commands. */
1341 	if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1342 	   (qc->tf.flags & ATA_TFLAG_POLLING))
1343 		return 1;
1344 
1345 	if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1346 	   (qc->tf.protocol == ATA_PROT_NODATA))
1347 		return 0;
1348 
1349 	return 1;
1350 }
1351 
1352 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1353 {
1354 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1355 	struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag];
1356 	u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1357 		       NV_CPB_CTL_IEN;
1358 
1359 	if (nv_adma_use_reg_mode(qc)) {
1360 		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1361 			(qc->flags & ATA_QCFLAG_DMAMAP));
1362 		nv_adma_register_mode(qc->ap);
1363 		ata_bmdma_qc_prep(qc);
1364 		return;
1365 	}
1366 
1367 	cpb->resp_flags = NV_CPB_RESP_DONE;
1368 	wmb();
1369 	cpb->ctl_flags = 0;
1370 	wmb();
1371 
1372 	cpb->len		= 3;
1373 	cpb->tag		= qc->hw_tag;
1374 	cpb->next_cpb_idx	= 0;
1375 
1376 	/* turn on NCQ flags for NCQ commands */
1377 	if (qc->tf.protocol == ATA_PROT_NCQ)
1378 		ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1379 
1380 	VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1381 
1382 	nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1383 
1384 	if (qc->flags & ATA_QCFLAG_DMAMAP) {
1385 		nv_adma_fill_sg(qc, cpb);
1386 		ctl_flags |= NV_CPB_CTL_APRD_VALID;
1387 	} else
1388 		memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1389 
1390 	/* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1391 	   until we are finished filling in all of the contents */
1392 	wmb();
1393 	cpb->ctl_flags = ctl_flags;
1394 	wmb();
1395 	cpb->resp_flags = 0;
1396 }
1397 
1398 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1399 {
1400 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1401 	void __iomem *mmio = pp->ctl_block;
1402 	int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1403 
1404 	VPRINTK("ENTER\n");
1405 
1406 	/* We can't handle result taskfile with NCQ commands, since
1407 	   retrieving the taskfile switches us out of ADMA mode and would abort
1408 	   existing commands. */
1409 	if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1410 		     (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1411 		ata_dev_err(qc->dev, "NCQ w/ RESULT_TF not allowed\n");
1412 		return AC_ERR_SYSTEM;
1413 	}
1414 
1415 	if (nv_adma_use_reg_mode(qc)) {
1416 		/* use ATA register mode */
1417 		VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1418 		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1419 			(qc->flags & ATA_QCFLAG_DMAMAP));
1420 		nv_adma_register_mode(qc->ap);
1421 		return ata_bmdma_qc_issue(qc);
1422 	} else
1423 		nv_adma_mode(qc->ap);
1424 
1425 	/* write append register, command tag in lower 8 bits
1426 	   and (number of cpbs to append -1) in top 8 bits */
1427 	wmb();
1428 
1429 	if (curr_ncq != pp->last_issue_ncq) {
1430 		/* Seems to need some delay before switching between NCQ and
1431 		   non-NCQ commands, else we get command timeouts and such. */
1432 		udelay(20);
1433 		pp->last_issue_ncq = curr_ncq;
1434 	}
1435 
1436 	writew(qc->hw_tag, mmio + NV_ADMA_APPEND);
1437 
1438 	DPRINTK("Issued tag %u\n", qc->hw_tag);
1439 
1440 	return 0;
1441 }
1442 
1443 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1444 {
1445 	struct ata_host *host = dev_instance;
1446 	unsigned int i;
1447 	unsigned int handled = 0;
1448 	unsigned long flags;
1449 
1450 	spin_lock_irqsave(&host->lock, flags);
1451 
1452 	for (i = 0; i < host->n_ports; i++) {
1453 		struct ata_port *ap = host->ports[i];
1454 		struct ata_queued_cmd *qc;
1455 
1456 		qc = ata_qc_from_tag(ap, ap->link.active_tag);
1457 		if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
1458 			handled += ata_bmdma_port_intr(ap, qc);
1459 		} else {
1460 			/*
1461 			 * No request pending?  Clear interrupt status
1462 			 * anyway, in case there's one pending.
1463 			 */
1464 			ap->ops->sff_check_status(ap);
1465 		}
1466 	}
1467 
1468 	spin_unlock_irqrestore(&host->lock, flags);
1469 
1470 	return IRQ_RETVAL(handled);
1471 }
1472 
1473 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1474 {
1475 	int i, handled = 0;
1476 
1477 	for (i = 0; i < host->n_ports; i++) {
1478 		handled += nv_host_intr(host->ports[i], irq_stat);
1479 		irq_stat >>= NV_INT_PORT_SHIFT;
1480 	}
1481 
1482 	return IRQ_RETVAL(handled);
1483 }
1484 
1485 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1486 {
1487 	struct ata_host *host = dev_instance;
1488 	u8 irq_stat;
1489 	irqreturn_t ret;
1490 
1491 	spin_lock(&host->lock);
1492 	irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1493 	ret = nv_do_interrupt(host, irq_stat);
1494 	spin_unlock(&host->lock);
1495 
1496 	return ret;
1497 }
1498 
1499 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1500 {
1501 	struct ata_host *host = dev_instance;
1502 	u8 irq_stat;
1503 	irqreturn_t ret;
1504 
1505 	spin_lock(&host->lock);
1506 	irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1507 	ret = nv_do_interrupt(host, irq_stat);
1508 	spin_unlock(&host->lock);
1509 
1510 	return ret;
1511 }
1512 
1513 static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1514 {
1515 	if (sc_reg > SCR_CONTROL)
1516 		return -EINVAL;
1517 
1518 	*val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
1519 	return 0;
1520 }
1521 
1522 static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1523 {
1524 	if (sc_reg > SCR_CONTROL)
1525 		return -EINVAL;
1526 
1527 	iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
1528 	return 0;
1529 }
1530 
1531 static int nv_hardreset(struct ata_link *link, unsigned int *class,
1532 			unsigned long deadline)
1533 {
1534 	struct ata_eh_context *ehc = &link->eh_context;
1535 
1536 	/* Do hardreset iff it's post-boot probing, please read the
1537 	 * comment above port ops for details.
1538 	 */
1539 	if (!(link->ap->pflags & ATA_PFLAG_LOADING) &&
1540 	    !ata_dev_enabled(link->device))
1541 		sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1542 				    NULL, NULL);
1543 	else {
1544 		const unsigned long *timing = sata_ehc_deb_timing(ehc);
1545 		int rc;
1546 
1547 		if (!(ehc->i.flags & ATA_EHI_QUIET))
1548 			ata_link_info(link,
1549 				      "nv: skipping hardreset on occupied port\n");
1550 
1551 		/* make sure the link is online */
1552 		rc = sata_link_resume(link, timing, deadline);
1553 		/* whine about phy resume failure but proceed */
1554 		if (rc && rc != -EOPNOTSUPP)
1555 			ata_link_warn(link, "failed to resume link (errno=%d)\n",
1556 				      rc);
1557 	}
1558 
1559 	/* device signature acquisition is unreliable */
1560 	return -EAGAIN;
1561 }
1562 
1563 static void nv_nf2_freeze(struct ata_port *ap)
1564 {
1565 	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1566 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1567 	u8 mask;
1568 
1569 	mask = ioread8(scr_addr + NV_INT_ENABLE);
1570 	mask &= ~(NV_INT_ALL << shift);
1571 	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1572 }
1573 
1574 static void nv_nf2_thaw(struct ata_port *ap)
1575 {
1576 	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1577 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1578 	u8 mask;
1579 
1580 	iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1581 
1582 	mask = ioread8(scr_addr + NV_INT_ENABLE);
1583 	mask |= (NV_INT_MASK << shift);
1584 	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1585 }
1586 
1587 static void nv_ck804_freeze(struct ata_port *ap)
1588 {
1589 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1590 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1591 	u8 mask;
1592 
1593 	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1594 	mask &= ~(NV_INT_ALL << shift);
1595 	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1596 }
1597 
1598 static void nv_ck804_thaw(struct ata_port *ap)
1599 {
1600 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1601 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1602 	u8 mask;
1603 
1604 	writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1605 
1606 	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1607 	mask |= (NV_INT_MASK << shift);
1608 	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1609 }
1610 
1611 static void nv_mcp55_freeze(struct ata_port *ap)
1612 {
1613 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1614 	int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1615 	u32 mask;
1616 
1617 	writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1618 
1619 	mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1620 	mask &= ~(NV_INT_ALL_MCP55 << shift);
1621 	writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1622 }
1623 
1624 static void nv_mcp55_thaw(struct ata_port *ap)
1625 {
1626 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1627 	int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1628 	u32 mask;
1629 
1630 	writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1631 
1632 	mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1633 	mask |= (NV_INT_MASK_MCP55 << shift);
1634 	writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1635 }
1636 
1637 static void nv_adma_error_handler(struct ata_port *ap)
1638 {
1639 	struct nv_adma_port_priv *pp = ap->private_data;
1640 	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1641 		void __iomem *mmio = pp->ctl_block;
1642 		int i;
1643 		u16 tmp;
1644 
1645 		if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1646 			u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1647 			u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1648 			u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1649 			u32 status = readw(mmio + NV_ADMA_STAT);
1650 			u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1651 			u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1652 
1653 			ata_port_err(ap,
1654 				"EH in ADMA mode, notifier 0x%X "
1655 				"notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1656 				"next cpb count 0x%X next cpb idx 0x%x\n",
1657 				notifier, notifier_error, gen_ctl, status,
1658 				cpb_count, next_cpb_idx);
1659 
1660 			for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
1661 				struct nv_adma_cpb *cpb = &pp->cpb[i];
1662 				if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1663 				    ap->link.sactive & (1 << i))
1664 					ata_port_err(ap,
1665 						"CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1666 						i, cpb->ctl_flags, cpb->resp_flags);
1667 			}
1668 		}
1669 
1670 		/* Push us back into port register mode for error handling. */
1671 		nv_adma_register_mode(ap);
1672 
1673 		/* Mark all of the CPBs as invalid to prevent them from
1674 		   being executed */
1675 		for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1676 			pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1677 
1678 		/* clear CPB fetch count */
1679 		writew(0, mmio + NV_ADMA_CPB_COUNT);
1680 
1681 		/* Reset channel */
1682 		tmp = readw(mmio + NV_ADMA_CTL);
1683 		writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1684 		readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1685 		udelay(1);
1686 		writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1687 		readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1688 	}
1689 
1690 	ata_bmdma_error_handler(ap);
1691 }
1692 
1693 static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1694 {
1695 	struct nv_swncq_port_priv *pp = ap->private_data;
1696 	struct defer_queue *dq = &pp->defer_queue;
1697 
1698 	/* queue is full */
1699 	WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1700 	dq->defer_bits |= (1 << qc->hw_tag);
1701 	dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->hw_tag;
1702 }
1703 
1704 static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1705 {
1706 	struct nv_swncq_port_priv *pp = ap->private_data;
1707 	struct defer_queue *dq = &pp->defer_queue;
1708 	unsigned int tag;
1709 
1710 	if (dq->head == dq->tail)	/* null queue */
1711 		return NULL;
1712 
1713 	tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1714 	dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1715 	WARN_ON(!(dq->defer_bits & (1 << tag)));
1716 	dq->defer_bits &= ~(1 << tag);
1717 
1718 	return ata_qc_from_tag(ap, tag);
1719 }
1720 
1721 static void nv_swncq_fis_reinit(struct ata_port *ap)
1722 {
1723 	struct nv_swncq_port_priv *pp = ap->private_data;
1724 
1725 	pp->dhfis_bits = 0;
1726 	pp->dmafis_bits = 0;
1727 	pp->sdbfis_bits = 0;
1728 	pp->ncq_flags = 0;
1729 }
1730 
1731 static void nv_swncq_pp_reinit(struct ata_port *ap)
1732 {
1733 	struct nv_swncq_port_priv *pp = ap->private_data;
1734 	struct defer_queue *dq = &pp->defer_queue;
1735 
1736 	dq->head = 0;
1737 	dq->tail = 0;
1738 	dq->defer_bits = 0;
1739 	pp->qc_active = 0;
1740 	pp->last_issue_tag = ATA_TAG_POISON;
1741 	nv_swncq_fis_reinit(ap);
1742 }
1743 
1744 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1745 {
1746 	struct nv_swncq_port_priv *pp = ap->private_data;
1747 
1748 	writew(fis, pp->irq_block);
1749 }
1750 
1751 static void __ata_bmdma_stop(struct ata_port *ap)
1752 {
1753 	struct ata_queued_cmd qc;
1754 
1755 	qc.ap = ap;
1756 	ata_bmdma_stop(&qc);
1757 }
1758 
1759 static void nv_swncq_ncq_stop(struct ata_port *ap)
1760 {
1761 	struct nv_swncq_port_priv *pp = ap->private_data;
1762 	unsigned int i;
1763 	u32 sactive;
1764 	u32 done_mask;
1765 
1766 	ata_port_err(ap, "EH in SWNCQ mode,QC:qc_active 0x%llX sactive 0x%X\n",
1767 		     ap->qc_active, ap->link.sactive);
1768 	ata_port_err(ap,
1769 		"SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n  "
1770 		"dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1771 		pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1772 		pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1773 
1774 	ata_port_err(ap, "ATA_REG 0x%X ERR_REG 0x%X\n",
1775 		     ap->ops->sff_check_status(ap),
1776 		     ioread8(ap->ioaddr.error_addr));
1777 
1778 	sactive = readl(pp->sactive_block);
1779 	done_mask = pp->qc_active ^ sactive;
1780 
1781 	ata_port_err(ap, "tag : dhfis dmafis sdbfis sactive\n");
1782 	for (i = 0; i < ATA_MAX_QUEUE; i++) {
1783 		u8 err = 0;
1784 		if (pp->qc_active & (1 << i))
1785 			err = 0;
1786 		else if (done_mask & (1 << i))
1787 			err = 1;
1788 		else
1789 			continue;
1790 
1791 		ata_port_err(ap,
1792 			     "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1793 			     (pp->dhfis_bits >> i) & 0x1,
1794 			     (pp->dmafis_bits >> i) & 0x1,
1795 			     (pp->sdbfis_bits >> i) & 0x1,
1796 			     (sactive >> i) & 0x1,
1797 			     (err ? "error! tag doesn't exit" : " "));
1798 	}
1799 
1800 	nv_swncq_pp_reinit(ap);
1801 	ap->ops->sff_irq_clear(ap);
1802 	__ata_bmdma_stop(ap);
1803 	nv_swncq_irq_clear(ap, 0xffff);
1804 }
1805 
1806 static void nv_swncq_error_handler(struct ata_port *ap)
1807 {
1808 	struct ata_eh_context *ehc = &ap->link.eh_context;
1809 
1810 	if (ap->link.sactive) {
1811 		nv_swncq_ncq_stop(ap);
1812 		ehc->i.action |= ATA_EH_RESET;
1813 	}
1814 
1815 	ata_bmdma_error_handler(ap);
1816 }
1817 
1818 #ifdef CONFIG_PM
1819 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1820 {
1821 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1822 	u32 tmp;
1823 
1824 	/* clear irq */
1825 	writel(~0, mmio + NV_INT_STATUS_MCP55);
1826 
1827 	/* disable irq */
1828 	writel(0, mmio + NV_INT_ENABLE_MCP55);
1829 
1830 	/* disable swncq */
1831 	tmp = readl(mmio + NV_CTL_MCP55);
1832 	tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1833 	writel(tmp, mmio + NV_CTL_MCP55);
1834 
1835 	return 0;
1836 }
1837 
1838 static int nv_swncq_port_resume(struct ata_port *ap)
1839 {
1840 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1841 	u32 tmp;
1842 
1843 	/* clear irq */
1844 	writel(~0, mmio + NV_INT_STATUS_MCP55);
1845 
1846 	/* enable irq */
1847 	writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1848 
1849 	/* enable swncq */
1850 	tmp = readl(mmio + NV_CTL_MCP55);
1851 	writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1852 
1853 	return 0;
1854 }
1855 #endif
1856 
1857 static void nv_swncq_host_init(struct ata_host *host)
1858 {
1859 	u32 tmp;
1860 	void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1861 	struct pci_dev *pdev = to_pci_dev(host->dev);
1862 	u8 regval;
1863 
1864 	/* disable  ECO 398 */
1865 	pci_read_config_byte(pdev, 0x7f, &regval);
1866 	regval &= ~(1 << 7);
1867 	pci_write_config_byte(pdev, 0x7f, regval);
1868 
1869 	/* enable swncq */
1870 	tmp = readl(mmio + NV_CTL_MCP55);
1871 	VPRINTK("HOST_CTL:0x%X\n", tmp);
1872 	writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1873 
1874 	/* enable irq intr */
1875 	tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1876 	VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1877 	writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1878 
1879 	/*  clear port irq */
1880 	writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1881 }
1882 
1883 static int nv_swncq_slave_config(struct scsi_device *sdev)
1884 {
1885 	struct ata_port *ap = ata_shost_to_port(sdev->host);
1886 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1887 	struct ata_device *dev;
1888 	int rc;
1889 	u8 rev;
1890 	u8 check_maxtor = 0;
1891 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
1892 
1893 	rc = ata_scsi_slave_config(sdev);
1894 	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1895 		/* Not a proper libata device, ignore */
1896 		return rc;
1897 
1898 	dev = &ap->link.device[sdev->id];
1899 	if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1900 		return rc;
1901 
1902 	/* if MCP51 and Maxtor, then disable ncq */
1903 	if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1904 		pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1905 		check_maxtor = 1;
1906 
1907 	/* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1908 	if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1909 		pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1910 		pci_read_config_byte(pdev, 0x8, &rev);
1911 		if (rev <= 0xa2)
1912 			check_maxtor = 1;
1913 	}
1914 
1915 	if (!check_maxtor)
1916 		return rc;
1917 
1918 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1919 
1920 	if (strncmp(model_num, "Maxtor", 6) == 0) {
1921 		ata_scsi_change_queue_depth(sdev, 1);
1922 		ata_dev_notice(dev, "Disabling SWNCQ mode (depth %x)\n",
1923 			       sdev->queue_depth);
1924 	}
1925 
1926 	return rc;
1927 }
1928 
1929 static int nv_swncq_port_start(struct ata_port *ap)
1930 {
1931 	struct device *dev = ap->host->dev;
1932 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1933 	struct nv_swncq_port_priv *pp;
1934 	int rc;
1935 
1936 	/* we might fallback to bmdma, allocate bmdma resources */
1937 	rc = ata_bmdma_port_start(ap);
1938 	if (rc)
1939 		return rc;
1940 
1941 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1942 	if (!pp)
1943 		return -ENOMEM;
1944 
1945 	pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1946 				      &pp->prd_dma, GFP_KERNEL);
1947 	if (!pp->prd)
1948 		return -ENOMEM;
1949 	memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
1950 
1951 	ap->private_data = pp;
1952 	pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1953 	pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1954 	pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1955 
1956 	return 0;
1957 }
1958 
1959 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1960 {
1961 	if (qc->tf.protocol != ATA_PROT_NCQ) {
1962 		ata_bmdma_qc_prep(qc);
1963 		return;
1964 	}
1965 
1966 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1967 		return;
1968 
1969 	nv_swncq_fill_sg(qc);
1970 }
1971 
1972 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1973 {
1974 	struct ata_port *ap = qc->ap;
1975 	struct scatterlist *sg;
1976 	struct nv_swncq_port_priv *pp = ap->private_data;
1977 	struct ata_bmdma_prd *prd;
1978 	unsigned int si, idx;
1979 
1980 	prd = pp->prd + ATA_MAX_PRD * qc->hw_tag;
1981 
1982 	idx = 0;
1983 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
1984 		u32 addr, offset;
1985 		u32 sg_len, len;
1986 
1987 		addr = (u32)sg_dma_address(sg);
1988 		sg_len = sg_dma_len(sg);
1989 
1990 		while (sg_len) {
1991 			offset = addr & 0xffff;
1992 			len = sg_len;
1993 			if ((offset + sg_len) > 0x10000)
1994 				len = 0x10000 - offset;
1995 
1996 			prd[idx].addr = cpu_to_le32(addr);
1997 			prd[idx].flags_len = cpu_to_le32(len & 0xffff);
1998 
1999 			idx++;
2000 			sg_len -= len;
2001 			addr += len;
2002 		}
2003 	}
2004 
2005 	prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2006 }
2007 
2008 static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2009 					  struct ata_queued_cmd *qc)
2010 {
2011 	struct nv_swncq_port_priv *pp = ap->private_data;
2012 
2013 	if (qc == NULL)
2014 		return 0;
2015 
2016 	DPRINTK("Enter\n");
2017 
2018 	writel((1 << qc->hw_tag), pp->sactive_block);
2019 	pp->last_issue_tag = qc->hw_tag;
2020 	pp->dhfis_bits &= ~(1 << qc->hw_tag);
2021 	pp->dmafis_bits &= ~(1 << qc->hw_tag);
2022 	pp->qc_active |= (0x1 << qc->hw_tag);
2023 
2024 	ap->ops->sff_tf_load(ap, &qc->tf);	 /* load tf registers */
2025 	ap->ops->sff_exec_command(ap, &qc->tf);
2026 
2027 	DPRINTK("Issued tag %u\n", qc->hw_tag);
2028 
2029 	return 0;
2030 }
2031 
2032 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2033 {
2034 	struct ata_port *ap = qc->ap;
2035 	struct nv_swncq_port_priv *pp = ap->private_data;
2036 
2037 	if (qc->tf.protocol != ATA_PROT_NCQ)
2038 		return ata_bmdma_qc_issue(qc);
2039 
2040 	DPRINTK("Enter\n");
2041 
2042 	if (!pp->qc_active)
2043 		nv_swncq_issue_atacmd(ap, qc);
2044 	else
2045 		nv_swncq_qc_to_dq(ap, qc);	/* add qc to defer queue */
2046 
2047 	return 0;
2048 }
2049 
2050 static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2051 {
2052 	u32 serror;
2053 	struct ata_eh_info *ehi = &ap->link.eh_info;
2054 
2055 	ata_ehi_clear_desc(ehi);
2056 
2057 	/* AHCI needs SError cleared; otherwise, it might lock up */
2058 	sata_scr_read(&ap->link, SCR_ERROR, &serror);
2059 	sata_scr_write(&ap->link, SCR_ERROR, serror);
2060 
2061 	/* analyze @irq_stat */
2062 	if (fis & NV_SWNCQ_IRQ_ADDED)
2063 		ata_ehi_push_desc(ehi, "hot plug");
2064 	else if (fis & NV_SWNCQ_IRQ_REMOVED)
2065 		ata_ehi_push_desc(ehi, "hot unplug");
2066 
2067 	ata_ehi_hotplugged(ehi);
2068 
2069 	/* okay, let's hand over to EH */
2070 	ehi->serror |= serror;
2071 
2072 	ata_port_freeze(ap);
2073 }
2074 
2075 static int nv_swncq_sdbfis(struct ata_port *ap)
2076 {
2077 	struct ata_queued_cmd *qc;
2078 	struct nv_swncq_port_priv *pp = ap->private_data;
2079 	struct ata_eh_info *ehi = &ap->link.eh_info;
2080 	u32 sactive;
2081 	u32 done_mask;
2082 	u8 host_stat;
2083 	u8 lack_dhfis = 0;
2084 
2085 	host_stat = ap->ops->bmdma_status(ap);
2086 	if (unlikely(host_stat & ATA_DMA_ERR)) {
2087 		/* error when transferring data to/from memory */
2088 		ata_ehi_clear_desc(ehi);
2089 		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2090 		ehi->err_mask |= AC_ERR_HOST_BUS;
2091 		ehi->action |= ATA_EH_RESET;
2092 		return -EINVAL;
2093 	}
2094 
2095 	ap->ops->sff_irq_clear(ap);
2096 	__ata_bmdma_stop(ap);
2097 
2098 	sactive = readl(pp->sactive_block);
2099 	done_mask = pp->qc_active ^ sactive;
2100 
2101 	pp->qc_active &= ~done_mask;
2102 	pp->dhfis_bits &= ~done_mask;
2103 	pp->dmafis_bits &= ~done_mask;
2104 	pp->sdbfis_bits |= done_mask;
2105 	ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
2106 
2107 	if (!ap->qc_active) {
2108 		DPRINTK("over\n");
2109 		nv_swncq_pp_reinit(ap);
2110 		return 0;
2111 	}
2112 
2113 	if (pp->qc_active & pp->dhfis_bits)
2114 		return 0;
2115 
2116 	if ((pp->ncq_flags & ncq_saw_backout) ||
2117 	    (pp->qc_active ^ pp->dhfis_bits))
2118 		/* if the controller can't get a device to host register FIS,
2119 		 * The driver needs to reissue the new command.
2120 		 */
2121 		lack_dhfis = 1;
2122 
2123 	DPRINTK("id 0x%x QC: qc_active 0x%x,"
2124 		"SWNCQ:qc_active 0x%X defer_bits %X "
2125 		"dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2126 		ap->print_id, ap->qc_active, pp->qc_active,
2127 		pp->defer_queue.defer_bits, pp->dhfis_bits,
2128 		pp->dmafis_bits, pp->last_issue_tag);
2129 
2130 	nv_swncq_fis_reinit(ap);
2131 
2132 	if (lack_dhfis) {
2133 		qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2134 		nv_swncq_issue_atacmd(ap, qc);
2135 		return 0;
2136 	}
2137 
2138 	if (pp->defer_queue.defer_bits) {
2139 		/* send deferral queue command */
2140 		qc = nv_swncq_qc_from_dq(ap);
2141 		WARN_ON(qc == NULL);
2142 		nv_swncq_issue_atacmd(ap, qc);
2143 	}
2144 
2145 	return 0;
2146 }
2147 
2148 static inline u32 nv_swncq_tag(struct ata_port *ap)
2149 {
2150 	struct nv_swncq_port_priv *pp = ap->private_data;
2151 	u32 tag;
2152 
2153 	tag = readb(pp->tag_block) >> 2;
2154 	return (tag & 0x1f);
2155 }
2156 
2157 static void nv_swncq_dmafis(struct ata_port *ap)
2158 {
2159 	struct ata_queued_cmd *qc;
2160 	unsigned int rw;
2161 	u8 dmactl;
2162 	u32 tag;
2163 	struct nv_swncq_port_priv *pp = ap->private_data;
2164 
2165 	__ata_bmdma_stop(ap);
2166 	tag = nv_swncq_tag(ap);
2167 
2168 	DPRINTK("dma setup tag 0x%x\n", tag);
2169 	qc = ata_qc_from_tag(ap, tag);
2170 
2171 	if (unlikely(!qc))
2172 		return;
2173 
2174 	rw = qc->tf.flags & ATA_TFLAG_WRITE;
2175 
2176 	/* load PRD table addr. */
2177 	iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->hw_tag,
2178 		  ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2179 
2180 	/* specify data direction, triple-check start bit is clear */
2181 	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2182 	dmactl &= ~ATA_DMA_WR;
2183 	if (!rw)
2184 		dmactl |= ATA_DMA_WR;
2185 
2186 	iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2187 }
2188 
2189 static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2190 {
2191 	struct nv_swncq_port_priv *pp = ap->private_data;
2192 	struct ata_queued_cmd *qc;
2193 	struct ata_eh_info *ehi = &ap->link.eh_info;
2194 	u32 serror;
2195 	u8 ata_stat;
2196 
2197 	ata_stat = ap->ops->sff_check_status(ap);
2198 	nv_swncq_irq_clear(ap, fis);
2199 	if (!fis)
2200 		return;
2201 
2202 	if (ap->pflags & ATA_PFLAG_FROZEN)
2203 		return;
2204 
2205 	if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2206 		nv_swncq_hotplug(ap, fis);
2207 		return;
2208 	}
2209 
2210 	if (!pp->qc_active)
2211 		return;
2212 
2213 	if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
2214 		return;
2215 	ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
2216 
2217 	if (ata_stat & ATA_ERR) {
2218 		ata_ehi_clear_desc(ehi);
2219 		ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2220 		ehi->err_mask |= AC_ERR_DEV;
2221 		ehi->serror |= serror;
2222 		ehi->action |= ATA_EH_RESET;
2223 		ata_port_freeze(ap);
2224 		return;
2225 	}
2226 
2227 	if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2228 		/* If the IRQ is backout, driver must issue
2229 		 * the new command again some time later.
2230 		 */
2231 		pp->ncq_flags |= ncq_saw_backout;
2232 	}
2233 
2234 	if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2235 		pp->ncq_flags |= ncq_saw_sdb;
2236 		DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2237 			"dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2238 			ap->print_id, pp->qc_active, pp->dhfis_bits,
2239 			pp->dmafis_bits, readl(pp->sactive_block));
2240 		if (nv_swncq_sdbfis(ap) < 0)
2241 			goto irq_error;
2242 	}
2243 
2244 	if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2245 		/* The interrupt indicates the new command
2246 		 * was transmitted correctly to the drive.
2247 		 */
2248 		pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2249 		pp->ncq_flags |= ncq_saw_d2h;
2250 		if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2251 			ata_ehi_push_desc(ehi, "illegal fis transaction");
2252 			ehi->err_mask |= AC_ERR_HSM;
2253 			ehi->action |= ATA_EH_RESET;
2254 			goto irq_error;
2255 		}
2256 
2257 		if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2258 		    !(pp->ncq_flags & ncq_saw_dmas)) {
2259 			ata_stat = ap->ops->sff_check_status(ap);
2260 			if (ata_stat & ATA_BUSY)
2261 				goto irq_exit;
2262 
2263 			if (pp->defer_queue.defer_bits) {
2264 				DPRINTK("send next command\n");
2265 				qc = nv_swncq_qc_from_dq(ap);
2266 				nv_swncq_issue_atacmd(ap, qc);
2267 			}
2268 		}
2269 	}
2270 
2271 	if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2272 		/* program the dma controller with appropriate PRD buffers
2273 		 * and start the DMA transfer for requested command.
2274 		 */
2275 		pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2276 		pp->ncq_flags |= ncq_saw_dmas;
2277 		nv_swncq_dmafis(ap);
2278 	}
2279 
2280 irq_exit:
2281 	return;
2282 irq_error:
2283 	ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2284 	ata_port_freeze(ap);
2285 	return;
2286 }
2287 
2288 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2289 {
2290 	struct ata_host *host = dev_instance;
2291 	unsigned int i;
2292 	unsigned int handled = 0;
2293 	unsigned long flags;
2294 	u32 irq_stat;
2295 
2296 	spin_lock_irqsave(&host->lock, flags);
2297 
2298 	irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2299 
2300 	for (i = 0; i < host->n_ports; i++) {
2301 		struct ata_port *ap = host->ports[i];
2302 
2303 		if (ap->link.sactive) {
2304 			nv_swncq_host_interrupt(ap, (u16)irq_stat);
2305 			handled = 1;
2306 		} else {
2307 			if (irq_stat)	/* reserve Hotplug */
2308 				nv_swncq_irq_clear(ap, 0xfff0);
2309 
2310 			handled += nv_host_intr(ap, (u8)irq_stat);
2311 		}
2312 		irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2313 	}
2314 
2315 	spin_unlock_irqrestore(&host->lock, flags);
2316 
2317 	return IRQ_RETVAL(handled);
2318 }
2319 
2320 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2321 {
2322 	const struct ata_port_info *ppi[] = { NULL, NULL };
2323 	struct nv_pi_priv *ipriv;
2324 	struct ata_host *host;
2325 	struct nv_host_priv *hpriv;
2326 	int rc;
2327 	u32 bar;
2328 	void __iomem *base;
2329 	unsigned long type = ent->driver_data;
2330 
2331         // Make sure this is a SATA controller by counting the number of bars
2332         // (NVIDIA SATA controllers will always have six bars).  Otherwise,
2333         // it's an IDE controller and we ignore it.
2334 	for (bar = 0; bar < 6; bar++)
2335 		if (pci_resource_start(pdev, bar) == 0)
2336 			return -ENODEV;
2337 
2338 	ata_print_version_once(&pdev->dev, DRV_VERSION);
2339 
2340 	rc = pcim_enable_device(pdev);
2341 	if (rc)
2342 		return rc;
2343 
2344 	/* determine type and allocate host */
2345 	if (type == CK804 && adma_enabled) {
2346 		dev_notice(&pdev->dev, "Using ADMA mode\n");
2347 		type = ADMA;
2348 	} else if (type == MCP5x && swncq_enabled) {
2349 		dev_notice(&pdev->dev, "Using SWNCQ mode\n");
2350 		type = SWNCQ;
2351 	}
2352 
2353 	ppi[0] = &nv_port_info[type];
2354 	ipriv = ppi[0]->private_data;
2355 	rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
2356 	if (rc)
2357 		return rc;
2358 
2359 	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2360 	if (!hpriv)
2361 		return -ENOMEM;
2362 	hpriv->type = type;
2363 	host->private_data = hpriv;
2364 
2365 	/* request and iomap NV_MMIO_BAR */
2366 	rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2367 	if (rc)
2368 		return rc;
2369 
2370 	/* configure SCR access */
2371 	base = host->iomap[NV_MMIO_BAR];
2372 	host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2373 	host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2374 
2375 	/* enable SATA space for CK804 */
2376 	if (type >= CK804) {
2377 		u8 regval;
2378 
2379 		pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2380 		regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2381 		pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2382 	}
2383 
2384 	/* init ADMA */
2385 	if (type == ADMA) {
2386 		rc = nv_adma_host_init(host);
2387 		if (rc)
2388 			return rc;
2389 	} else if (type == SWNCQ)
2390 		nv_swncq_host_init(host);
2391 
2392 	if (msi_enabled) {
2393 		dev_notice(&pdev->dev, "Using MSI\n");
2394 		pci_enable_msi(pdev);
2395 	}
2396 
2397 	pci_set_master(pdev);
2398 	return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
2399 }
2400 
2401 #ifdef CONFIG_PM_SLEEP
2402 static int nv_pci_device_resume(struct pci_dev *pdev)
2403 {
2404 	struct ata_host *host = pci_get_drvdata(pdev);
2405 	struct nv_host_priv *hpriv = host->private_data;
2406 	int rc;
2407 
2408 	rc = ata_pci_device_do_resume(pdev);
2409 	if (rc)
2410 		return rc;
2411 
2412 	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2413 		if (hpriv->type >= CK804) {
2414 			u8 regval;
2415 
2416 			pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2417 			regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2418 			pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2419 		}
2420 		if (hpriv->type == ADMA) {
2421 			u32 tmp32;
2422 			struct nv_adma_port_priv *pp;
2423 			/* enable/disable ADMA on the ports appropriately */
2424 			pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2425 
2426 			pp = host->ports[0]->private_data;
2427 			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2428 				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2429 					   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2430 			else
2431 				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT0_EN |
2432 					   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2433 			pp = host->ports[1]->private_data;
2434 			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2435 				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2436 					   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2437 			else
2438 				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT1_EN |
2439 					   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2440 
2441 			pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2442 		}
2443 	}
2444 
2445 	ata_host_resume(host);
2446 
2447 	return 0;
2448 }
2449 #endif
2450 
2451 static void nv_ck804_host_stop(struct ata_host *host)
2452 {
2453 	struct pci_dev *pdev = to_pci_dev(host->dev);
2454 	u8 regval;
2455 
2456 	/* disable SATA space for CK804 */
2457 	pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2458 	regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2459 	pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2460 }
2461 
2462 static void nv_adma_host_stop(struct ata_host *host)
2463 {
2464 	struct pci_dev *pdev = to_pci_dev(host->dev);
2465 	u32 tmp32;
2466 
2467 	/* disable ADMA on the ports */
2468 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2469 	tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2470 		   NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2471 		   NV_MCP_SATA_CFG_20_PORT1_EN |
2472 		   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2473 
2474 	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2475 
2476 	nv_ck804_host_stop(host);
2477 }
2478 
2479 module_pci_driver(nv_pci_driver);
2480 
2481 module_param_named(adma, adma_enabled, bool, 0444);
2482 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
2483 module_param_named(swncq, swncq_enabled, bool, 0444);
2484 MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
2485 module_param_named(msi, msi_enabled, bool, 0444);
2486 MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)");
2487