xref: /openbmc/linux/drivers/ata/sata_nv.c (revision 22246614)
1 /*
2  *  sata_nv.c - NVIDIA nForce SATA
3  *
4  *  Copyright 2004 NVIDIA Corp.  All rights reserved.
5  *  Copyright 2004 Andrew Chew
6  *
7  *
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 2, or (at your option)
11  *  any later version.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; see the file COPYING.  If not, write to
20  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21  *
22  *
23  *  libata documentation is available via 'make {ps|pdf}docs',
24  *  as Documentation/DocBook/libata.*
25  *
26  *  No hardware documentation available outside of NVIDIA.
27  *  This driver programs the NVIDIA SATA controller in a similar
28  *  fashion as with other PCI IDE BMDMA controllers, with a few
29  *  NV-specific details such as register offsets, SATA phy location,
30  *  hotplug info, etc.
31  *
32  *  CK804/MCP04 controllers support an alternate programming interface
33  *  similar to the ADMA specification (with some modifications).
34  *  This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35  *  sent through the legacy interface.
36  *
37  */
38 
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
50 
51 #define DRV_NAME			"sata_nv"
52 #define DRV_VERSION			"3.5"
53 
54 #define NV_ADMA_DMA_BOUNDARY		0xffffffffUL
55 
56 enum {
57 	NV_MMIO_BAR			= 5,
58 
59 	NV_PORTS			= 2,
60 	NV_PIO_MASK			= 0x1f,
61 	NV_MWDMA_MASK			= 0x07,
62 	NV_UDMA_MASK			= 0x7f,
63 	NV_PORT0_SCR_REG_OFFSET		= 0x00,
64 	NV_PORT1_SCR_REG_OFFSET		= 0x40,
65 
66 	/* INT_STATUS/ENABLE */
67 	NV_INT_STATUS			= 0x10,
68 	NV_INT_ENABLE			= 0x11,
69 	NV_INT_STATUS_CK804		= 0x440,
70 	NV_INT_ENABLE_CK804		= 0x441,
71 
72 	/* INT_STATUS/ENABLE bits */
73 	NV_INT_DEV			= 0x01,
74 	NV_INT_PM			= 0x02,
75 	NV_INT_ADDED			= 0x04,
76 	NV_INT_REMOVED			= 0x08,
77 
78 	NV_INT_PORT_SHIFT		= 4,	/* each port occupies 4 bits */
79 
80 	NV_INT_ALL			= 0x0f,
81 	NV_INT_MASK			= NV_INT_DEV |
82 					  NV_INT_ADDED | NV_INT_REMOVED,
83 
84 	/* INT_CONFIG */
85 	NV_INT_CONFIG			= 0x12,
86 	NV_INT_CONFIG_METHD		= 0x01, // 0 = INT, 1 = SMI
87 
88 	// For PCI config register 20
89 	NV_MCP_SATA_CFG_20		= 0x50,
90 	NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
91 	NV_MCP_SATA_CFG_20_PORT0_EN	= (1 << 17),
92 	NV_MCP_SATA_CFG_20_PORT1_EN	= (1 << 16),
93 	NV_MCP_SATA_CFG_20_PORT0_PWB_EN	= (1 << 14),
94 	NV_MCP_SATA_CFG_20_PORT1_PWB_EN	= (1 << 12),
95 
96 	NV_ADMA_MAX_CPBS		= 32,
97 	NV_ADMA_CPB_SZ			= 128,
98 	NV_ADMA_APRD_SZ			= 16,
99 	NV_ADMA_SGTBL_LEN		= (1024 - NV_ADMA_CPB_SZ) /
100 					   NV_ADMA_APRD_SZ,
101 	NV_ADMA_SGTBL_TOTAL_LEN		= NV_ADMA_SGTBL_LEN + 5,
102 	NV_ADMA_SGTBL_SZ                = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 	NV_ADMA_PORT_PRIV_DMA_SZ        = NV_ADMA_MAX_CPBS *
104 					   (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
105 
106 	/* BAR5 offset to ADMA general registers */
107 	NV_ADMA_GEN			= 0x400,
108 	NV_ADMA_GEN_CTL			= 0x00,
109 	NV_ADMA_NOTIFIER_CLEAR		= 0x30,
110 
111 	/* BAR5 offset to ADMA ports */
112 	NV_ADMA_PORT			= 0x480,
113 
114 	/* size of ADMA port register space  */
115 	NV_ADMA_PORT_SIZE		= 0x100,
116 
117 	/* ADMA port registers */
118 	NV_ADMA_CTL			= 0x40,
119 	NV_ADMA_CPB_COUNT		= 0x42,
120 	NV_ADMA_NEXT_CPB_IDX		= 0x43,
121 	NV_ADMA_STAT			= 0x44,
122 	NV_ADMA_CPB_BASE_LOW		= 0x48,
123 	NV_ADMA_CPB_BASE_HIGH		= 0x4C,
124 	NV_ADMA_APPEND			= 0x50,
125 	NV_ADMA_NOTIFIER		= 0x68,
126 	NV_ADMA_NOTIFIER_ERROR		= 0x6C,
127 
128 	/* NV_ADMA_CTL register bits */
129 	NV_ADMA_CTL_HOTPLUG_IEN		= (1 << 0),
130 	NV_ADMA_CTL_CHANNEL_RESET	= (1 << 5),
131 	NV_ADMA_CTL_GO			= (1 << 7),
132 	NV_ADMA_CTL_AIEN		= (1 << 8),
133 	NV_ADMA_CTL_READ_NON_COHERENT	= (1 << 11),
134 	NV_ADMA_CTL_WRITE_NON_COHERENT	= (1 << 12),
135 
136 	/* CPB response flag bits */
137 	NV_CPB_RESP_DONE		= (1 << 0),
138 	NV_CPB_RESP_ATA_ERR		= (1 << 3),
139 	NV_CPB_RESP_CMD_ERR		= (1 << 4),
140 	NV_CPB_RESP_CPB_ERR		= (1 << 7),
141 
142 	/* CPB control flag bits */
143 	NV_CPB_CTL_CPB_VALID		= (1 << 0),
144 	NV_CPB_CTL_QUEUE		= (1 << 1),
145 	NV_CPB_CTL_APRD_VALID		= (1 << 2),
146 	NV_CPB_CTL_IEN			= (1 << 3),
147 	NV_CPB_CTL_FPDMA		= (1 << 4),
148 
149 	/* APRD flags */
150 	NV_APRD_WRITE			= (1 << 1),
151 	NV_APRD_END			= (1 << 2),
152 	NV_APRD_CONT			= (1 << 3),
153 
154 	/* NV_ADMA_STAT flags */
155 	NV_ADMA_STAT_TIMEOUT		= (1 << 0),
156 	NV_ADMA_STAT_HOTUNPLUG		= (1 << 1),
157 	NV_ADMA_STAT_HOTPLUG		= (1 << 2),
158 	NV_ADMA_STAT_CPBERR		= (1 << 4),
159 	NV_ADMA_STAT_SERROR		= (1 << 5),
160 	NV_ADMA_STAT_CMD_COMPLETE	= (1 << 6),
161 	NV_ADMA_STAT_IDLE		= (1 << 8),
162 	NV_ADMA_STAT_LEGACY		= (1 << 9),
163 	NV_ADMA_STAT_STOPPED		= (1 << 10),
164 	NV_ADMA_STAT_DONE		= (1 << 12),
165 	NV_ADMA_STAT_ERR		= NV_ADMA_STAT_CPBERR |
166 					  NV_ADMA_STAT_TIMEOUT,
167 
168 	/* port flags */
169 	NV_ADMA_PORT_REGISTER_MODE	= (1 << 0),
170 	NV_ADMA_ATAPI_SETUP_COMPLETE	= (1 << 1),
171 
172 	/* MCP55 reg offset */
173 	NV_CTL_MCP55			= 0x400,
174 	NV_INT_STATUS_MCP55		= 0x440,
175 	NV_INT_ENABLE_MCP55		= 0x444,
176 	NV_NCQ_REG_MCP55		= 0x448,
177 
178 	/* MCP55 */
179 	NV_INT_ALL_MCP55		= 0xffff,
180 	NV_INT_PORT_SHIFT_MCP55		= 16,	/* each port occupies 16 bits */
181 	NV_INT_MASK_MCP55		= NV_INT_ALL_MCP55 & 0xfffd,
182 
183 	/* SWNCQ ENABLE BITS*/
184 	NV_CTL_PRI_SWNCQ		= 0x02,
185 	NV_CTL_SEC_SWNCQ		= 0x04,
186 
187 	/* SW NCQ status bits*/
188 	NV_SWNCQ_IRQ_DEV		= (1 << 0),
189 	NV_SWNCQ_IRQ_PM			= (1 << 1),
190 	NV_SWNCQ_IRQ_ADDED		= (1 << 2),
191 	NV_SWNCQ_IRQ_REMOVED		= (1 << 3),
192 
193 	NV_SWNCQ_IRQ_BACKOUT		= (1 << 4),
194 	NV_SWNCQ_IRQ_SDBFIS		= (1 << 5),
195 	NV_SWNCQ_IRQ_DHREGFIS		= (1 << 6),
196 	NV_SWNCQ_IRQ_DMASETUP		= (1 << 7),
197 
198 	NV_SWNCQ_IRQ_HOTPLUG		= NV_SWNCQ_IRQ_ADDED |
199 					  NV_SWNCQ_IRQ_REMOVED,
200 
201 };
202 
203 /* ADMA Physical Region Descriptor - one SG segment */
204 struct nv_adma_prd {
205 	__le64			addr;
206 	__le32			len;
207 	u8			flags;
208 	u8			packet_len;
209 	__le16			reserved;
210 };
211 
212 enum nv_adma_regbits {
213 	CMDEND	= (1 << 15),		/* end of command list */
214 	WNB	= (1 << 14),		/* wait-not-BSY */
215 	IGN	= (1 << 13),		/* ignore this entry */
216 	CS1n	= (1 << (4 + 8)),	/* std. PATA signals follow... */
217 	DA2	= (1 << (2 + 8)),
218 	DA1	= (1 << (1 + 8)),
219 	DA0	= (1 << (0 + 8)),
220 };
221 
222 /* ADMA Command Parameter Block
223    The first 5 SG segments are stored inside the Command Parameter Block itself.
224    If there are more than 5 segments the remainder are stored in a separate
225    memory area indicated by next_aprd. */
226 struct nv_adma_cpb {
227 	u8			resp_flags;    /* 0 */
228 	u8			reserved1;     /* 1 */
229 	u8			ctl_flags;     /* 2 */
230 	/* len is length of taskfile in 64 bit words */
231 	u8			len;		/* 3  */
232 	u8			tag;           /* 4 */
233 	u8			next_cpb_idx;  /* 5 */
234 	__le16			reserved2;     /* 6-7 */
235 	__le16			tf[12];        /* 8-31 */
236 	struct nv_adma_prd	aprd[5];       /* 32-111 */
237 	__le64			next_aprd;     /* 112-119 */
238 	__le64			reserved3;     /* 120-127 */
239 };
240 
241 
242 struct nv_adma_port_priv {
243 	struct nv_adma_cpb	*cpb;
244 	dma_addr_t		cpb_dma;
245 	struct nv_adma_prd	*aprd;
246 	dma_addr_t		aprd_dma;
247 	void __iomem		*ctl_block;
248 	void __iomem		*gen_block;
249 	void __iomem		*notifier_clear_block;
250 	u64			adma_dma_mask;
251 	u8			flags;
252 	int			last_issue_ncq;
253 };
254 
255 struct nv_host_priv {
256 	unsigned long		type;
257 };
258 
259 struct defer_queue {
260 	u32		defer_bits;
261 	unsigned int	head;
262 	unsigned int	tail;
263 	unsigned int	tag[ATA_MAX_QUEUE];
264 };
265 
266 enum ncq_saw_flag_list {
267 	ncq_saw_d2h	= (1U << 0),
268 	ncq_saw_dmas	= (1U << 1),
269 	ncq_saw_sdb	= (1U << 2),
270 	ncq_saw_backout	= (1U << 3),
271 };
272 
273 struct nv_swncq_port_priv {
274 	struct ata_prd	*prd;	 /* our SG list */
275 	dma_addr_t	prd_dma; /* and its DMA mapping */
276 	void __iomem	*sactive_block;
277 	void __iomem	*irq_block;
278 	void __iomem	*tag_block;
279 	u32		qc_active;
280 
281 	unsigned int	last_issue_tag;
282 
283 	/* fifo circular queue to store deferral command */
284 	struct defer_queue defer_queue;
285 
286 	/* for NCQ interrupt analysis */
287 	u32		dhfis_bits;
288 	u32		dmafis_bits;
289 	u32		sdbfis_bits;
290 
291 	unsigned int	ncq_flags;
292 };
293 
294 
295 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
296 
297 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
298 #ifdef CONFIG_PM
299 static int nv_pci_device_resume(struct pci_dev *pdev);
300 #endif
301 static void nv_ck804_host_stop(struct ata_host *host);
302 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
303 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
304 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
305 static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
306 static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
307 
308 static void nv_nf2_freeze(struct ata_port *ap);
309 static void nv_nf2_thaw(struct ata_port *ap);
310 static void nv_ck804_freeze(struct ata_port *ap);
311 static void nv_ck804_thaw(struct ata_port *ap);
312 static int nv_hardreset(struct ata_link *link, unsigned int *class,
313 			unsigned long deadline);
314 static int nv_adma_slave_config(struct scsi_device *sdev);
315 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
316 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
317 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
318 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
319 static void nv_adma_irq_clear(struct ata_port *ap);
320 static int nv_adma_port_start(struct ata_port *ap);
321 static void nv_adma_port_stop(struct ata_port *ap);
322 #ifdef CONFIG_PM
323 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
324 static int nv_adma_port_resume(struct ata_port *ap);
325 #endif
326 static void nv_adma_freeze(struct ata_port *ap);
327 static void nv_adma_thaw(struct ata_port *ap);
328 static void nv_adma_error_handler(struct ata_port *ap);
329 static void nv_adma_host_stop(struct ata_host *host);
330 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
331 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
332 
333 static void nv_mcp55_thaw(struct ata_port *ap);
334 static void nv_mcp55_freeze(struct ata_port *ap);
335 static void nv_swncq_error_handler(struct ata_port *ap);
336 static int nv_swncq_slave_config(struct scsi_device *sdev);
337 static int nv_swncq_port_start(struct ata_port *ap);
338 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
339 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
340 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
341 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
342 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
343 #ifdef CONFIG_PM
344 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
345 static int nv_swncq_port_resume(struct ata_port *ap);
346 #endif
347 
348 enum nv_host_type
349 {
350 	GENERIC,
351 	NFORCE2,
352 	NFORCE3 = NFORCE2,	/* NF2 == NF3 as far as sata_nv is concerned */
353 	CK804,
354 	ADMA,
355 	SWNCQ,
356 };
357 
358 static const struct pci_device_id nv_pci_tbl[] = {
359 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
360 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
361 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
362 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
363 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
364 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
365 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
366 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), SWNCQ },
367 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), SWNCQ },
368 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), SWNCQ },
369 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), SWNCQ },
370 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
371 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
372 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
373 
374 	{ } /* terminate list */
375 };
376 
377 static struct pci_driver nv_pci_driver = {
378 	.name			= DRV_NAME,
379 	.id_table		= nv_pci_tbl,
380 	.probe			= nv_init_one,
381 #ifdef CONFIG_PM
382 	.suspend		= ata_pci_device_suspend,
383 	.resume			= nv_pci_device_resume,
384 #endif
385 	.remove			= ata_pci_remove_one,
386 };
387 
388 static struct scsi_host_template nv_sht = {
389 	ATA_BMDMA_SHT(DRV_NAME),
390 };
391 
392 static struct scsi_host_template nv_adma_sht = {
393 	ATA_NCQ_SHT(DRV_NAME),
394 	.can_queue		= NV_ADMA_MAX_CPBS,
395 	.sg_tablesize		= NV_ADMA_SGTBL_TOTAL_LEN,
396 	.dma_boundary		= NV_ADMA_DMA_BOUNDARY,
397 	.slave_configure	= nv_adma_slave_config,
398 };
399 
400 static struct scsi_host_template nv_swncq_sht = {
401 	ATA_NCQ_SHT(DRV_NAME),
402 	.can_queue		= ATA_MAX_QUEUE,
403 	.sg_tablesize		= LIBATA_MAX_PRD,
404 	.dma_boundary		= ATA_DMA_BOUNDARY,
405 	.slave_configure	= nv_swncq_slave_config,
406 };
407 
408 static struct ata_port_operations nv_generic_ops = {
409 	.inherits		= &ata_bmdma_port_ops,
410 	.hardreset		= nv_hardreset,
411 	.scr_read		= nv_scr_read,
412 	.scr_write		= nv_scr_write,
413 };
414 
415 static struct ata_port_operations nv_nf2_ops = {
416 	.inherits		= &nv_generic_ops,
417 	.freeze			= nv_nf2_freeze,
418 	.thaw			= nv_nf2_thaw,
419 };
420 
421 static struct ata_port_operations nv_ck804_ops = {
422 	.inherits		= &nv_generic_ops,
423 	.freeze			= nv_ck804_freeze,
424 	.thaw			= nv_ck804_thaw,
425 	.host_stop		= nv_ck804_host_stop,
426 };
427 
428 static struct ata_port_operations nv_adma_ops = {
429 	.inherits		= &nv_generic_ops,
430 
431 	.check_atapi_dma	= nv_adma_check_atapi_dma,
432 	.sff_tf_read		= nv_adma_tf_read,
433 	.qc_defer		= ata_std_qc_defer,
434 	.qc_prep		= nv_adma_qc_prep,
435 	.qc_issue		= nv_adma_qc_issue,
436 	.sff_irq_clear		= nv_adma_irq_clear,
437 
438 	.freeze			= nv_adma_freeze,
439 	.thaw			= nv_adma_thaw,
440 	.error_handler		= nv_adma_error_handler,
441 	.post_internal_cmd	= nv_adma_post_internal_cmd,
442 
443 	.port_start		= nv_adma_port_start,
444 	.port_stop		= nv_adma_port_stop,
445 #ifdef CONFIG_PM
446 	.port_suspend		= nv_adma_port_suspend,
447 	.port_resume		= nv_adma_port_resume,
448 #endif
449 	.host_stop		= nv_adma_host_stop,
450 };
451 
452 static struct ata_port_operations nv_swncq_ops = {
453 	.inherits		= &nv_generic_ops,
454 
455 	.qc_defer		= ata_std_qc_defer,
456 	.qc_prep		= nv_swncq_qc_prep,
457 	.qc_issue		= nv_swncq_qc_issue,
458 
459 	.freeze			= nv_mcp55_freeze,
460 	.thaw			= nv_mcp55_thaw,
461 	.error_handler		= nv_swncq_error_handler,
462 
463 #ifdef CONFIG_PM
464 	.port_suspend		= nv_swncq_port_suspend,
465 	.port_resume		= nv_swncq_port_resume,
466 #endif
467 	.port_start		= nv_swncq_port_start,
468 };
469 
470 struct nv_pi_priv {
471 	irq_handler_t			irq_handler;
472 	struct scsi_host_template	*sht;
473 };
474 
475 #define NV_PI_PRIV(_irq_handler, _sht) \
476 	&(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
477 
478 static const struct ata_port_info nv_port_info[] = {
479 	/* generic */
480 	{
481 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
482 		.pio_mask	= NV_PIO_MASK,
483 		.mwdma_mask	= NV_MWDMA_MASK,
484 		.udma_mask	= NV_UDMA_MASK,
485 		.port_ops	= &nv_generic_ops,
486 		.private_data	= NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
487 	},
488 	/* nforce2/3 */
489 	{
490 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
491 		.pio_mask	= NV_PIO_MASK,
492 		.mwdma_mask	= NV_MWDMA_MASK,
493 		.udma_mask	= NV_UDMA_MASK,
494 		.port_ops	= &nv_nf2_ops,
495 		.private_data	= NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
496 	},
497 	/* ck804 */
498 	{
499 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
500 		.pio_mask	= NV_PIO_MASK,
501 		.mwdma_mask	= NV_MWDMA_MASK,
502 		.udma_mask	= NV_UDMA_MASK,
503 		.port_ops	= &nv_ck804_ops,
504 		.private_data	= NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
505 	},
506 	/* ADMA */
507 	{
508 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
509 				  ATA_FLAG_MMIO | ATA_FLAG_NCQ,
510 		.pio_mask	= NV_PIO_MASK,
511 		.mwdma_mask	= NV_MWDMA_MASK,
512 		.udma_mask	= NV_UDMA_MASK,
513 		.port_ops	= &nv_adma_ops,
514 		.private_data	= NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
515 	},
516 	/* SWNCQ */
517 	{
518 		.flags	        = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
519 				  ATA_FLAG_NCQ,
520 		.pio_mask	= NV_PIO_MASK,
521 		.mwdma_mask	= NV_MWDMA_MASK,
522 		.udma_mask	= NV_UDMA_MASK,
523 		.port_ops	= &nv_swncq_ops,
524 		.private_data	= NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
525 	},
526 };
527 
528 MODULE_AUTHOR("NVIDIA");
529 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
530 MODULE_LICENSE("GPL");
531 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
532 MODULE_VERSION(DRV_VERSION);
533 
534 static int adma_enabled;
535 static int swncq_enabled = 1;
536 
537 static void nv_adma_register_mode(struct ata_port *ap)
538 {
539 	struct nv_adma_port_priv *pp = ap->private_data;
540 	void __iomem *mmio = pp->ctl_block;
541 	u16 tmp, status;
542 	int count = 0;
543 
544 	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
545 		return;
546 
547 	status = readw(mmio + NV_ADMA_STAT);
548 	while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
549 		ndelay(50);
550 		status = readw(mmio + NV_ADMA_STAT);
551 		count++;
552 	}
553 	if (count == 20)
554 		ata_port_printk(ap, KERN_WARNING,
555 			"timeout waiting for ADMA IDLE, stat=0x%hx\n",
556 			status);
557 
558 	tmp = readw(mmio + NV_ADMA_CTL);
559 	writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
560 
561 	count = 0;
562 	status = readw(mmio + NV_ADMA_STAT);
563 	while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
564 		ndelay(50);
565 		status = readw(mmio + NV_ADMA_STAT);
566 		count++;
567 	}
568 	if (count == 20)
569 		ata_port_printk(ap, KERN_WARNING,
570 			 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
571 			 status);
572 
573 	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
574 }
575 
576 static void nv_adma_mode(struct ata_port *ap)
577 {
578 	struct nv_adma_port_priv *pp = ap->private_data;
579 	void __iomem *mmio = pp->ctl_block;
580 	u16 tmp, status;
581 	int count = 0;
582 
583 	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
584 		return;
585 
586 	WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
587 
588 	tmp = readw(mmio + NV_ADMA_CTL);
589 	writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
590 
591 	status = readw(mmio + NV_ADMA_STAT);
592 	while (((status & NV_ADMA_STAT_LEGACY) ||
593 	      !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
594 		ndelay(50);
595 		status = readw(mmio + NV_ADMA_STAT);
596 		count++;
597 	}
598 	if (count == 20)
599 		ata_port_printk(ap, KERN_WARNING,
600 			"timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
601 			status);
602 
603 	pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
604 }
605 
606 static int nv_adma_slave_config(struct scsi_device *sdev)
607 {
608 	struct ata_port *ap = ata_shost_to_port(sdev->host);
609 	struct nv_adma_port_priv *pp = ap->private_data;
610 	struct nv_adma_port_priv *port0, *port1;
611 	struct scsi_device *sdev0, *sdev1;
612 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
613 	unsigned long segment_boundary, flags;
614 	unsigned short sg_tablesize;
615 	int rc;
616 	int adma_enable;
617 	u32 current_reg, new_reg, config_mask;
618 
619 	rc = ata_scsi_slave_config(sdev);
620 
621 	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
622 		/* Not a proper libata device, ignore */
623 		return rc;
624 
625 	spin_lock_irqsave(ap->lock, flags);
626 
627 	if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
628 		/*
629 		 * NVIDIA reports that ADMA mode does not support ATAPI commands.
630 		 * Therefore ATAPI commands are sent through the legacy interface.
631 		 * However, the legacy interface only supports 32-bit DMA.
632 		 * Restrict DMA parameters as required by the legacy interface
633 		 * when an ATAPI device is connected.
634 		 */
635 		segment_boundary = ATA_DMA_BOUNDARY;
636 		/* Subtract 1 since an extra entry may be needed for padding, see
637 		   libata-scsi.c */
638 		sg_tablesize = LIBATA_MAX_PRD - 1;
639 
640 		/* Since the legacy DMA engine is in use, we need to disable ADMA
641 		   on the port. */
642 		adma_enable = 0;
643 		nv_adma_register_mode(ap);
644 	} else {
645 		segment_boundary = NV_ADMA_DMA_BOUNDARY;
646 		sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
647 		adma_enable = 1;
648 	}
649 
650 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
651 
652 	if (ap->port_no == 1)
653 		config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
654 			      NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
655 	else
656 		config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
657 			      NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
658 
659 	if (adma_enable) {
660 		new_reg = current_reg | config_mask;
661 		pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
662 	} else {
663 		new_reg = current_reg & ~config_mask;
664 		pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
665 	}
666 
667 	if (current_reg != new_reg)
668 		pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
669 
670 	port0 = ap->host->ports[0]->private_data;
671 	port1 = ap->host->ports[1]->private_data;
672 	sdev0 = ap->host->ports[0]->link.device[0].sdev;
673 	sdev1 = ap->host->ports[1]->link.device[0].sdev;
674 	if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
675 	    (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
676 		/** We have to set the DMA mask to 32-bit if either port is in
677 		    ATAPI mode, since they are on the same PCI device which is
678 		    used for DMA mapping. If we set the mask we also need to set
679 		    the bounce limit on both ports to ensure that the block
680 		    layer doesn't feed addresses that cause DMA mapping to
681 		    choke. If either SCSI device is not allocated yet, it's OK
682 		    since that port will discover its correct setting when it
683 		    does get allocated.
684 		    Note: Setting 32-bit mask should not fail. */
685 		if (sdev0)
686 			blk_queue_bounce_limit(sdev0->request_queue,
687 					       ATA_DMA_MASK);
688 		if (sdev1)
689 			blk_queue_bounce_limit(sdev1->request_queue,
690 					       ATA_DMA_MASK);
691 
692 		pci_set_dma_mask(pdev, ATA_DMA_MASK);
693 	} else {
694 		/** This shouldn't fail as it was set to this value before */
695 		pci_set_dma_mask(pdev, pp->adma_dma_mask);
696 		if (sdev0)
697 			blk_queue_bounce_limit(sdev0->request_queue,
698 					       pp->adma_dma_mask);
699 		if (sdev1)
700 			blk_queue_bounce_limit(sdev1->request_queue,
701 					       pp->adma_dma_mask);
702 	}
703 
704 	blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
705 	blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
706 	ata_port_printk(ap, KERN_INFO,
707 		"DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
708 		(unsigned long long)*ap->host->dev->dma_mask,
709 		segment_boundary, sg_tablesize);
710 
711 	spin_unlock_irqrestore(ap->lock, flags);
712 
713 	return rc;
714 }
715 
716 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
717 {
718 	struct nv_adma_port_priv *pp = qc->ap->private_data;
719 	return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
720 }
721 
722 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
723 {
724 	/* Other than when internal or pass-through commands are executed,
725 	   the only time this function will be called in ADMA mode will be
726 	   if a command fails. In the failure case we don't care about going
727 	   into register mode with ADMA commands pending, as the commands will
728 	   all shortly be aborted anyway. We assume that NCQ commands are not
729 	   issued via passthrough, which is the only way that switching into
730 	   ADMA mode could abort outstanding commands. */
731 	nv_adma_register_mode(ap);
732 
733 	ata_sff_tf_read(ap, tf);
734 }
735 
736 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
737 {
738 	unsigned int idx = 0;
739 
740 	if (tf->flags & ATA_TFLAG_ISADDR) {
741 		if (tf->flags & ATA_TFLAG_LBA48) {
742 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR   << 8) | tf->hob_feature | WNB);
743 			cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
744 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAL  << 8) | tf->hob_lbal);
745 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAM  << 8) | tf->hob_lbam);
746 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAH  << 8) | tf->hob_lbah);
747 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature);
748 		} else
749 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature | WNB);
750 
751 		cpb[idx++] = cpu_to_le16((ATA_REG_NSECT  << 8) | tf->nsect);
752 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAL   << 8) | tf->lbal);
753 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAM   << 8) | tf->lbam);
754 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAH   << 8) | tf->lbah);
755 	}
756 
757 	if (tf->flags & ATA_TFLAG_DEVICE)
758 		cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
759 
760 	cpb[idx++] = cpu_to_le16((ATA_REG_CMD    << 8) | tf->command | CMDEND);
761 
762 	while (idx < 12)
763 		cpb[idx++] = cpu_to_le16(IGN);
764 
765 	return idx;
766 }
767 
768 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
769 {
770 	struct nv_adma_port_priv *pp = ap->private_data;
771 	u8 flags = pp->cpb[cpb_num].resp_flags;
772 
773 	VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
774 
775 	if (unlikely((force_err ||
776 		     flags & (NV_CPB_RESP_ATA_ERR |
777 			      NV_CPB_RESP_CMD_ERR |
778 			      NV_CPB_RESP_CPB_ERR)))) {
779 		struct ata_eh_info *ehi = &ap->link.eh_info;
780 		int freeze = 0;
781 
782 		ata_ehi_clear_desc(ehi);
783 		__ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
784 		if (flags & NV_CPB_RESP_ATA_ERR) {
785 			ata_ehi_push_desc(ehi, "ATA error");
786 			ehi->err_mask |= AC_ERR_DEV;
787 		} else if (flags & NV_CPB_RESP_CMD_ERR) {
788 			ata_ehi_push_desc(ehi, "CMD error");
789 			ehi->err_mask |= AC_ERR_DEV;
790 		} else if (flags & NV_CPB_RESP_CPB_ERR) {
791 			ata_ehi_push_desc(ehi, "CPB error");
792 			ehi->err_mask |= AC_ERR_SYSTEM;
793 			freeze = 1;
794 		} else {
795 			/* notifier error, but no error in CPB flags? */
796 			ata_ehi_push_desc(ehi, "unknown");
797 			ehi->err_mask |= AC_ERR_OTHER;
798 			freeze = 1;
799 		}
800 		/* Kill all commands. EH will determine what actually failed. */
801 		if (freeze)
802 			ata_port_freeze(ap);
803 		else
804 			ata_port_abort(ap);
805 		return 1;
806 	}
807 
808 	if (likely(flags & NV_CPB_RESP_DONE)) {
809 		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
810 		VPRINTK("CPB flags done, flags=0x%x\n", flags);
811 		if (likely(qc)) {
812 			DPRINTK("Completing qc from tag %d\n", cpb_num);
813 			ata_qc_complete(qc);
814 		} else {
815 			struct ata_eh_info *ehi = &ap->link.eh_info;
816 			/* Notifier bits set without a command may indicate the drive
817 			   is misbehaving. Raise host state machine violation on this
818 			   condition. */
819 			ata_port_printk(ap, KERN_ERR,
820 					"notifier for tag %d with no cmd?\n",
821 					cpb_num);
822 			ehi->err_mask |= AC_ERR_HSM;
823 			ehi->action |= ATA_EH_RESET;
824 			ata_port_freeze(ap);
825 			return 1;
826 		}
827 	}
828 	return 0;
829 }
830 
831 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
832 {
833 	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
834 
835 	/* freeze if hotplugged */
836 	if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
837 		ata_port_freeze(ap);
838 		return 1;
839 	}
840 
841 	/* bail out if not our interrupt */
842 	if (!(irq_stat & NV_INT_DEV))
843 		return 0;
844 
845 	/* DEV interrupt w/ no active qc? */
846 	if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
847 		ata_sff_check_status(ap);
848 		return 1;
849 	}
850 
851 	/* handle interrupt */
852 	return ata_sff_host_intr(ap, qc);
853 }
854 
855 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
856 {
857 	struct ata_host *host = dev_instance;
858 	int i, handled = 0;
859 	u32 notifier_clears[2];
860 
861 	spin_lock(&host->lock);
862 
863 	for (i = 0; i < host->n_ports; i++) {
864 		struct ata_port *ap = host->ports[i];
865 		notifier_clears[i] = 0;
866 
867 		if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
868 			struct nv_adma_port_priv *pp = ap->private_data;
869 			void __iomem *mmio = pp->ctl_block;
870 			u16 status;
871 			u32 gen_ctl;
872 			u32 notifier, notifier_error;
873 
874 			/* if ADMA is disabled, use standard ata interrupt handler */
875 			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
876 				u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
877 					>> (NV_INT_PORT_SHIFT * i);
878 				handled += nv_host_intr(ap, irq_stat);
879 				continue;
880 			}
881 
882 			/* if in ATA register mode, check for standard interrupts */
883 			if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
884 				u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
885 					>> (NV_INT_PORT_SHIFT * i);
886 				if (ata_tag_valid(ap->link.active_tag))
887 					/** NV_INT_DEV indication seems unreliable at times
888 					    at least in ADMA mode. Force it on always when a
889 					    command is active, to prevent losing interrupts. */
890 					irq_stat |= NV_INT_DEV;
891 				handled += nv_host_intr(ap, irq_stat);
892 			}
893 
894 			notifier = readl(mmio + NV_ADMA_NOTIFIER);
895 			notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
896 			notifier_clears[i] = notifier | notifier_error;
897 
898 			gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
899 
900 			if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
901 			    !notifier_error)
902 				/* Nothing to do */
903 				continue;
904 
905 			status = readw(mmio + NV_ADMA_STAT);
906 
907 			/* Clear status. Ensure the controller sees the clearing before we start
908 			   looking at any of the CPB statuses, so that any CPB completions after
909 			   this point in the handler will raise another interrupt. */
910 			writew(status, mmio + NV_ADMA_STAT);
911 			readw(mmio + NV_ADMA_STAT); /* flush posted write */
912 			rmb();
913 
914 			handled++; /* irq handled if we got here */
915 
916 			/* freeze if hotplugged or controller error */
917 			if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
918 					       NV_ADMA_STAT_HOTUNPLUG |
919 					       NV_ADMA_STAT_TIMEOUT |
920 					       NV_ADMA_STAT_SERROR))) {
921 				struct ata_eh_info *ehi = &ap->link.eh_info;
922 
923 				ata_ehi_clear_desc(ehi);
924 				__ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
925 				if (status & NV_ADMA_STAT_TIMEOUT) {
926 					ehi->err_mask |= AC_ERR_SYSTEM;
927 					ata_ehi_push_desc(ehi, "timeout");
928 				} else if (status & NV_ADMA_STAT_HOTPLUG) {
929 					ata_ehi_hotplugged(ehi);
930 					ata_ehi_push_desc(ehi, "hotplug");
931 				} else if (status & NV_ADMA_STAT_HOTUNPLUG) {
932 					ata_ehi_hotplugged(ehi);
933 					ata_ehi_push_desc(ehi, "hot unplug");
934 				} else if (status & NV_ADMA_STAT_SERROR) {
935 					/* let libata analyze SError and figure out the cause */
936 					ata_ehi_push_desc(ehi, "SError");
937 				} else
938 					ata_ehi_push_desc(ehi, "unknown");
939 				ata_port_freeze(ap);
940 				continue;
941 			}
942 
943 			if (status & (NV_ADMA_STAT_DONE |
944 				      NV_ADMA_STAT_CPBERR |
945 				      NV_ADMA_STAT_CMD_COMPLETE)) {
946 				u32 check_commands = notifier_clears[i];
947 				int pos, error = 0;
948 
949 				if (status & NV_ADMA_STAT_CPBERR) {
950 					/* Check all active commands */
951 					if (ata_tag_valid(ap->link.active_tag))
952 						check_commands = 1 <<
953 							ap->link.active_tag;
954 					else
955 						check_commands = ap->
956 							link.sactive;
957 				}
958 
959 				/** Check CPBs for completed commands */
960 				while ((pos = ffs(check_commands)) && !error) {
961 					pos--;
962 					error = nv_adma_check_cpb(ap, pos,
963 						notifier_error & (1 << pos));
964 					check_commands &= ~(1 << pos);
965 				}
966 			}
967 		}
968 	}
969 
970 	if (notifier_clears[0] || notifier_clears[1]) {
971 		/* Note: Both notifier clear registers must be written
972 		   if either is set, even if one is zero, according to NVIDIA. */
973 		struct nv_adma_port_priv *pp = host->ports[0]->private_data;
974 		writel(notifier_clears[0], pp->notifier_clear_block);
975 		pp = host->ports[1]->private_data;
976 		writel(notifier_clears[1], pp->notifier_clear_block);
977 	}
978 
979 	spin_unlock(&host->lock);
980 
981 	return IRQ_RETVAL(handled);
982 }
983 
984 static void nv_adma_freeze(struct ata_port *ap)
985 {
986 	struct nv_adma_port_priv *pp = ap->private_data;
987 	void __iomem *mmio = pp->ctl_block;
988 	u16 tmp;
989 
990 	nv_ck804_freeze(ap);
991 
992 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
993 		return;
994 
995 	/* clear any outstanding CK804 notifications */
996 	writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
997 		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
998 
999 	/* Disable interrupt */
1000 	tmp = readw(mmio + NV_ADMA_CTL);
1001 	writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1002 		mmio + NV_ADMA_CTL);
1003 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1004 }
1005 
1006 static void nv_adma_thaw(struct ata_port *ap)
1007 {
1008 	struct nv_adma_port_priv *pp = ap->private_data;
1009 	void __iomem *mmio = pp->ctl_block;
1010 	u16 tmp;
1011 
1012 	nv_ck804_thaw(ap);
1013 
1014 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1015 		return;
1016 
1017 	/* Enable interrupt */
1018 	tmp = readw(mmio + NV_ADMA_CTL);
1019 	writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1020 		mmio + NV_ADMA_CTL);
1021 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1022 }
1023 
1024 static void nv_adma_irq_clear(struct ata_port *ap)
1025 {
1026 	struct nv_adma_port_priv *pp = ap->private_data;
1027 	void __iomem *mmio = pp->ctl_block;
1028 	u32 notifier_clears[2];
1029 
1030 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1031 		ata_sff_irq_clear(ap);
1032 		return;
1033 	}
1034 
1035 	/* clear any outstanding CK804 notifications */
1036 	writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1037 		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1038 
1039 	/* clear ADMA status */
1040 	writew(0xffff, mmio + NV_ADMA_STAT);
1041 
1042 	/* clear notifiers - note both ports need to be written with
1043 	   something even though we are only clearing on one */
1044 	if (ap->port_no == 0) {
1045 		notifier_clears[0] = 0xFFFFFFFF;
1046 		notifier_clears[1] = 0;
1047 	} else {
1048 		notifier_clears[0] = 0;
1049 		notifier_clears[1] = 0xFFFFFFFF;
1050 	}
1051 	pp = ap->host->ports[0]->private_data;
1052 	writel(notifier_clears[0], pp->notifier_clear_block);
1053 	pp = ap->host->ports[1]->private_data;
1054 	writel(notifier_clears[1], pp->notifier_clear_block);
1055 }
1056 
1057 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1058 {
1059 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1060 
1061 	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1062 		ata_sff_post_internal_cmd(qc);
1063 }
1064 
1065 static int nv_adma_port_start(struct ata_port *ap)
1066 {
1067 	struct device *dev = ap->host->dev;
1068 	struct nv_adma_port_priv *pp;
1069 	int rc;
1070 	void *mem;
1071 	dma_addr_t mem_dma;
1072 	void __iomem *mmio;
1073 	struct pci_dev *pdev = to_pci_dev(dev);
1074 	u16 tmp;
1075 
1076 	VPRINTK("ENTER\n");
1077 
1078 	/* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1079 	   pad buffers */
1080 	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1081 	if (rc)
1082 		return rc;
1083 	rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1084 	if (rc)
1085 		return rc;
1086 
1087 	rc = ata_port_start(ap);
1088 	if (rc)
1089 		return rc;
1090 
1091 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1092 	if (!pp)
1093 		return -ENOMEM;
1094 
1095 	mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1096 	       ap->port_no * NV_ADMA_PORT_SIZE;
1097 	pp->ctl_block = mmio;
1098 	pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1099 	pp->notifier_clear_block = pp->gen_block +
1100 	       NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1101 
1102 	/* Now that the legacy PRD and padding buffer are allocated we can
1103 	   safely raise the DMA mask to allocate the CPB/APRD table.
1104 	   These are allowed to fail since we store the value that ends up
1105 	   being used to set as the bounce limit in slave_config later if
1106 	   needed. */
1107 	pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1108 	pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1109 	pp->adma_dma_mask = *dev->dma_mask;
1110 
1111 	mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1112 				  &mem_dma, GFP_KERNEL);
1113 	if (!mem)
1114 		return -ENOMEM;
1115 	memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1116 
1117 	/*
1118 	 * First item in chunk of DMA memory:
1119 	 * 128-byte command parameter block (CPB)
1120 	 * one for each command tag
1121 	 */
1122 	pp->cpb     = mem;
1123 	pp->cpb_dma = mem_dma;
1124 
1125 	writel(mem_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
1126 	writel((mem_dma >> 16) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
1127 
1128 	mem     += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1129 	mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1130 
1131 	/*
1132 	 * Second item: block of ADMA_SGTBL_LEN s/g entries
1133 	 */
1134 	pp->aprd = mem;
1135 	pp->aprd_dma = mem_dma;
1136 
1137 	ap->private_data = pp;
1138 
1139 	/* clear any outstanding interrupt conditions */
1140 	writew(0xffff, mmio + NV_ADMA_STAT);
1141 
1142 	/* initialize port variables */
1143 	pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1144 
1145 	/* clear CPB fetch count */
1146 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1147 
1148 	/* clear GO for register mode, enable interrupt */
1149 	tmp = readw(mmio + NV_ADMA_CTL);
1150 	writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1151 		NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1152 
1153 	tmp = readw(mmio + NV_ADMA_CTL);
1154 	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1155 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1156 	udelay(1);
1157 	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1158 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1159 
1160 	return 0;
1161 }
1162 
1163 static void nv_adma_port_stop(struct ata_port *ap)
1164 {
1165 	struct nv_adma_port_priv *pp = ap->private_data;
1166 	void __iomem *mmio = pp->ctl_block;
1167 
1168 	VPRINTK("ENTER\n");
1169 	writew(0, mmio + NV_ADMA_CTL);
1170 }
1171 
1172 #ifdef CONFIG_PM
1173 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1174 {
1175 	struct nv_adma_port_priv *pp = ap->private_data;
1176 	void __iomem *mmio = pp->ctl_block;
1177 
1178 	/* Go to register mode - clears GO */
1179 	nv_adma_register_mode(ap);
1180 
1181 	/* clear CPB fetch count */
1182 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1183 
1184 	/* disable interrupt, shut down port */
1185 	writew(0, mmio + NV_ADMA_CTL);
1186 
1187 	return 0;
1188 }
1189 
1190 static int nv_adma_port_resume(struct ata_port *ap)
1191 {
1192 	struct nv_adma_port_priv *pp = ap->private_data;
1193 	void __iomem *mmio = pp->ctl_block;
1194 	u16 tmp;
1195 
1196 	/* set CPB block location */
1197 	writel(pp->cpb_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
1198 	writel((pp->cpb_dma >> 16) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
1199 
1200 	/* clear any outstanding interrupt conditions */
1201 	writew(0xffff, mmio + NV_ADMA_STAT);
1202 
1203 	/* initialize port variables */
1204 	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1205 
1206 	/* clear CPB fetch count */
1207 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1208 
1209 	/* clear GO for register mode, enable interrupt */
1210 	tmp = readw(mmio + NV_ADMA_CTL);
1211 	writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1212 		NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1213 
1214 	tmp = readw(mmio + NV_ADMA_CTL);
1215 	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1216 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1217 	udelay(1);
1218 	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1219 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1220 
1221 	return 0;
1222 }
1223 #endif
1224 
1225 static void nv_adma_setup_port(struct ata_port *ap)
1226 {
1227 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1228 	struct ata_ioports *ioport = &ap->ioaddr;
1229 
1230 	VPRINTK("ENTER\n");
1231 
1232 	mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1233 
1234 	ioport->cmd_addr	= mmio;
1235 	ioport->data_addr	= mmio + (ATA_REG_DATA * 4);
1236 	ioport->error_addr	=
1237 	ioport->feature_addr	= mmio + (ATA_REG_ERR * 4);
1238 	ioport->nsect_addr	= mmio + (ATA_REG_NSECT * 4);
1239 	ioport->lbal_addr	= mmio + (ATA_REG_LBAL * 4);
1240 	ioport->lbam_addr	= mmio + (ATA_REG_LBAM * 4);
1241 	ioport->lbah_addr	= mmio + (ATA_REG_LBAH * 4);
1242 	ioport->device_addr	= mmio + (ATA_REG_DEVICE * 4);
1243 	ioport->status_addr	=
1244 	ioport->command_addr	= mmio + (ATA_REG_STATUS * 4);
1245 	ioport->altstatus_addr	=
1246 	ioport->ctl_addr	= mmio + 0x20;
1247 }
1248 
1249 static int nv_adma_host_init(struct ata_host *host)
1250 {
1251 	struct pci_dev *pdev = to_pci_dev(host->dev);
1252 	unsigned int i;
1253 	u32 tmp32;
1254 
1255 	VPRINTK("ENTER\n");
1256 
1257 	/* enable ADMA on the ports */
1258 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1259 	tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1260 		 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1261 		 NV_MCP_SATA_CFG_20_PORT1_EN |
1262 		 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1263 
1264 	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1265 
1266 	for (i = 0; i < host->n_ports; i++)
1267 		nv_adma_setup_port(host->ports[i]);
1268 
1269 	return 0;
1270 }
1271 
1272 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1273 			      struct scatterlist *sg,
1274 			      int idx,
1275 			      struct nv_adma_prd *aprd)
1276 {
1277 	u8 flags = 0;
1278 	if (qc->tf.flags & ATA_TFLAG_WRITE)
1279 		flags |= NV_APRD_WRITE;
1280 	if (idx == qc->n_elem - 1)
1281 		flags |= NV_APRD_END;
1282 	else if (idx != 4)
1283 		flags |= NV_APRD_CONT;
1284 
1285 	aprd->addr  = cpu_to_le64(((u64)sg_dma_address(sg)));
1286 	aprd->len   = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1287 	aprd->flags = flags;
1288 	aprd->packet_len = 0;
1289 }
1290 
1291 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1292 {
1293 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1294 	struct nv_adma_prd *aprd;
1295 	struct scatterlist *sg;
1296 	unsigned int si;
1297 
1298 	VPRINTK("ENTER\n");
1299 
1300 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
1301 		aprd = (si < 5) ? &cpb->aprd[si] :
1302 			       &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)];
1303 		nv_adma_fill_aprd(qc, sg, si, aprd);
1304 	}
1305 	if (si > 5)
1306 		cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1307 	else
1308 		cpb->next_aprd = cpu_to_le64(0);
1309 }
1310 
1311 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1312 {
1313 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1314 
1315 	/* ADMA engine can only be used for non-ATAPI DMA commands,
1316 	   or interrupt-driven no-data commands. */
1317 	if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1318 	   (qc->tf.flags & ATA_TFLAG_POLLING))
1319 		return 1;
1320 
1321 	if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1322 	   (qc->tf.protocol == ATA_PROT_NODATA))
1323 		return 0;
1324 
1325 	return 1;
1326 }
1327 
1328 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1329 {
1330 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1331 	struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1332 	u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1333 		       NV_CPB_CTL_IEN;
1334 
1335 	if (nv_adma_use_reg_mode(qc)) {
1336 		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1337 			(qc->flags & ATA_QCFLAG_DMAMAP));
1338 		nv_adma_register_mode(qc->ap);
1339 		ata_sff_qc_prep(qc);
1340 		return;
1341 	}
1342 
1343 	cpb->resp_flags = NV_CPB_RESP_DONE;
1344 	wmb();
1345 	cpb->ctl_flags = 0;
1346 	wmb();
1347 
1348 	cpb->len		= 3;
1349 	cpb->tag		= qc->tag;
1350 	cpb->next_cpb_idx	= 0;
1351 
1352 	/* turn on NCQ flags for NCQ commands */
1353 	if (qc->tf.protocol == ATA_PROT_NCQ)
1354 		ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1355 
1356 	VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1357 
1358 	nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1359 
1360 	if (qc->flags & ATA_QCFLAG_DMAMAP) {
1361 		nv_adma_fill_sg(qc, cpb);
1362 		ctl_flags |= NV_CPB_CTL_APRD_VALID;
1363 	} else
1364 		memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1365 
1366 	/* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1367 	   until we are finished filling in all of the contents */
1368 	wmb();
1369 	cpb->ctl_flags = ctl_flags;
1370 	wmb();
1371 	cpb->resp_flags = 0;
1372 }
1373 
1374 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1375 {
1376 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1377 	void __iomem *mmio = pp->ctl_block;
1378 	int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1379 
1380 	VPRINTK("ENTER\n");
1381 
1382 	/* We can't handle result taskfile with NCQ commands, since
1383 	   retrieving the taskfile switches us out of ADMA mode and would abort
1384 	   existing commands. */
1385 	if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1386 		     (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1387 		ata_dev_printk(qc->dev, KERN_ERR,
1388 			"NCQ w/ RESULT_TF not allowed\n");
1389 		return AC_ERR_SYSTEM;
1390 	}
1391 
1392 	if (nv_adma_use_reg_mode(qc)) {
1393 		/* use ATA register mode */
1394 		VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1395 		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1396 			(qc->flags & ATA_QCFLAG_DMAMAP));
1397 		nv_adma_register_mode(qc->ap);
1398 		return ata_sff_qc_issue(qc);
1399 	} else
1400 		nv_adma_mode(qc->ap);
1401 
1402 	/* write append register, command tag in lower 8 bits
1403 	   and (number of cpbs to append -1) in top 8 bits */
1404 	wmb();
1405 
1406 	if (curr_ncq != pp->last_issue_ncq) {
1407 		/* Seems to need some delay before switching between NCQ and
1408 		   non-NCQ commands, else we get command timeouts and such. */
1409 		udelay(20);
1410 		pp->last_issue_ncq = curr_ncq;
1411 	}
1412 
1413 	writew(qc->tag, mmio + NV_ADMA_APPEND);
1414 
1415 	DPRINTK("Issued tag %u\n", qc->tag);
1416 
1417 	return 0;
1418 }
1419 
1420 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1421 {
1422 	struct ata_host *host = dev_instance;
1423 	unsigned int i;
1424 	unsigned int handled = 0;
1425 	unsigned long flags;
1426 
1427 	spin_lock_irqsave(&host->lock, flags);
1428 
1429 	for (i = 0; i < host->n_ports; i++) {
1430 		struct ata_port *ap;
1431 
1432 		ap = host->ports[i];
1433 		if (ap &&
1434 		    !(ap->flags & ATA_FLAG_DISABLED)) {
1435 			struct ata_queued_cmd *qc;
1436 
1437 			qc = ata_qc_from_tag(ap, ap->link.active_tag);
1438 			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1439 				handled += ata_sff_host_intr(ap, qc);
1440 			else
1441 				// No request pending?  Clear interrupt status
1442 				// anyway, in case there's one pending.
1443 				ap->ops->sff_check_status(ap);
1444 		}
1445 
1446 	}
1447 
1448 	spin_unlock_irqrestore(&host->lock, flags);
1449 
1450 	return IRQ_RETVAL(handled);
1451 }
1452 
1453 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1454 {
1455 	int i, handled = 0;
1456 
1457 	for (i = 0; i < host->n_ports; i++) {
1458 		struct ata_port *ap = host->ports[i];
1459 
1460 		if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1461 			handled += nv_host_intr(ap, irq_stat);
1462 
1463 		irq_stat >>= NV_INT_PORT_SHIFT;
1464 	}
1465 
1466 	return IRQ_RETVAL(handled);
1467 }
1468 
1469 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1470 {
1471 	struct ata_host *host = dev_instance;
1472 	u8 irq_stat;
1473 	irqreturn_t ret;
1474 
1475 	spin_lock(&host->lock);
1476 	irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1477 	ret = nv_do_interrupt(host, irq_stat);
1478 	spin_unlock(&host->lock);
1479 
1480 	return ret;
1481 }
1482 
1483 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1484 {
1485 	struct ata_host *host = dev_instance;
1486 	u8 irq_stat;
1487 	irqreturn_t ret;
1488 
1489 	spin_lock(&host->lock);
1490 	irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1491 	ret = nv_do_interrupt(host, irq_stat);
1492 	spin_unlock(&host->lock);
1493 
1494 	return ret;
1495 }
1496 
1497 static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
1498 {
1499 	if (sc_reg > SCR_CONTROL)
1500 		return -EINVAL;
1501 
1502 	*val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
1503 	return 0;
1504 }
1505 
1506 static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
1507 {
1508 	if (sc_reg > SCR_CONTROL)
1509 		return -EINVAL;
1510 
1511 	iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
1512 	return 0;
1513 }
1514 
1515 static void nv_nf2_freeze(struct ata_port *ap)
1516 {
1517 	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1518 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1519 	u8 mask;
1520 
1521 	mask = ioread8(scr_addr + NV_INT_ENABLE);
1522 	mask &= ~(NV_INT_ALL << shift);
1523 	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1524 }
1525 
1526 static void nv_nf2_thaw(struct ata_port *ap)
1527 {
1528 	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1529 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1530 	u8 mask;
1531 
1532 	iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1533 
1534 	mask = ioread8(scr_addr + NV_INT_ENABLE);
1535 	mask |= (NV_INT_MASK << shift);
1536 	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1537 }
1538 
1539 static void nv_ck804_freeze(struct ata_port *ap)
1540 {
1541 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1542 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1543 	u8 mask;
1544 
1545 	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1546 	mask &= ~(NV_INT_ALL << shift);
1547 	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1548 }
1549 
1550 static void nv_ck804_thaw(struct ata_port *ap)
1551 {
1552 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1553 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1554 	u8 mask;
1555 
1556 	writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1557 
1558 	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1559 	mask |= (NV_INT_MASK << shift);
1560 	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1561 }
1562 
1563 static void nv_mcp55_freeze(struct ata_port *ap)
1564 {
1565 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1566 	int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1567 	u32 mask;
1568 
1569 	writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1570 
1571 	mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1572 	mask &= ~(NV_INT_ALL_MCP55 << shift);
1573 	writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1574 	ata_sff_freeze(ap);
1575 }
1576 
1577 static void nv_mcp55_thaw(struct ata_port *ap)
1578 {
1579 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1580 	int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1581 	u32 mask;
1582 
1583 	writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1584 
1585 	mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1586 	mask |= (NV_INT_MASK_MCP55 << shift);
1587 	writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1588 	ata_sff_thaw(ap);
1589 }
1590 
1591 static int nv_hardreset(struct ata_link *link, unsigned int *class,
1592 			unsigned long deadline)
1593 {
1594 	int rc;
1595 
1596 	/* SATA hardreset fails to retrieve proper device signature on
1597 	 * some controllers.  Request follow up SRST.  For more info,
1598 	 * see http://bugzilla.kernel.org/show_bug.cgi?id=3352
1599 	 */
1600 	rc = sata_sff_hardreset(link, class, deadline);
1601 	if (rc)
1602 		return rc;
1603 	return -EAGAIN;
1604 }
1605 
1606 static void nv_adma_error_handler(struct ata_port *ap)
1607 {
1608 	struct nv_adma_port_priv *pp = ap->private_data;
1609 	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1610 		void __iomem *mmio = pp->ctl_block;
1611 		int i;
1612 		u16 tmp;
1613 
1614 		if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1615 			u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1616 			u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1617 			u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1618 			u32 status = readw(mmio + NV_ADMA_STAT);
1619 			u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1620 			u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1621 
1622 			ata_port_printk(ap, KERN_ERR,
1623 				"EH in ADMA mode, notifier 0x%X "
1624 				"notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1625 				"next cpb count 0x%X next cpb idx 0x%x\n",
1626 				notifier, notifier_error, gen_ctl, status,
1627 				cpb_count, next_cpb_idx);
1628 
1629 			for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
1630 				struct nv_adma_cpb *cpb = &pp->cpb[i];
1631 				if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1632 				    ap->link.sactive & (1 << i))
1633 					ata_port_printk(ap, KERN_ERR,
1634 						"CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1635 						i, cpb->ctl_flags, cpb->resp_flags);
1636 			}
1637 		}
1638 
1639 		/* Push us back into port register mode for error handling. */
1640 		nv_adma_register_mode(ap);
1641 
1642 		/* Mark all of the CPBs as invalid to prevent them from
1643 		   being executed */
1644 		for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1645 			pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1646 
1647 		/* clear CPB fetch count */
1648 		writew(0, mmio + NV_ADMA_CPB_COUNT);
1649 
1650 		/* Reset channel */
1651 		tmp = readw(mmio + NV_ADMA_CTL);
1652 		writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1653 		readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1654 		udelay(1);
1655 		writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1656 		readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1657 	}
1658 
1659 	ata_sff_error_handler(ap);
1660 }
1661 
1662 static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1663 {
1664 	struct nv_swncq_port_priv *pp = ap->private_data;
1665 	struct defer_queue *dq = &pp->defer_queue;
1666 
1667 	/* queue is full */
1668 	WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1669 	dq->defer_bits |= (1 << qc->tag);
1670 	dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
1671 }
1672 
1673 static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1674 {
1675 	struct nv_swncq_port_priv *pp = ap->private_data;
1676 	struct defer_queue *dq = &pp->defer_queue;
1677 	unsigned int tag;
1678 
1679 	if (dq->head == dq->tail)	/* null queue */
1680 		return NULL;
1681 
1682 	tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1683 	dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1684 	WARN_ON(!(dq->defer_bits & (1 << tag)));
1685 	dq->defer_bits &= ~(1 << tag);
1686 
1687 	return ata_qc_from_tag(ap, tag);
1688 }
1689 
1690 static void nv_swncq_fis_reinit(struct ata_port *ap)
1691 {
1692 	struct nv_swncq_port_priv *pp = ap->private_data;
1693 
1694 	pp->dhfis_bits = 0;
1695 	pp->dmafis_bits = 0;
1696 	pp->sdbfis_bits = 0;
1697 	pp->ncq_flags = 0;
1698 }
1699 
1700 static void nv_swncq_pp_reinit(struct ata_port *ap)
1701 {
1702 	struct nv_swncq_port_priv *pp = ap->private_data;
1703 	struct defer_queue *dq = &pp->defer_queue;
1704 
1705 	dq->head = 0;
1706 	dq->tail = 0;
1707 	dq->defer_bits = 0;
1708 	pp->qc_active = 0;
1709 	pp->last_issue_tag = ATA_TAG_POISON;
1710 	nv_swncq_fis_reinit(ap);
1711 }
1712 
1713 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1714 {
1715 	struct nv_swncq_port_priv *pp = ap->private_data;
1716 
1717 	writew(fis, pp->irq_block);
1718 }
1719 
1720 static void __ata_bmdma_stop(struct ata_port *ap)
1721 {
1722 	struct ata_queued_cmd qc;
1723 
1724 	qc.ap = ap;
1725 	ata_bmdma_stop(&qc);
1726 }
1727 
1728 static void nv_swncq_ncq_stop(struct ata_port *ap)
1729 {
1730 	struct nv_swncq_port_priv *pp = ap->private_data;
1731 	unsigned int i;
1732 	u32 sactive;
1733 	u32 done_mask;
1734 
1735 	ata_port_printk(ap, KERN_ERR,
1736 			"EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1737 			ap->qc_active, ap->link.sactive);
1738 	ata_port_printk(ap, KERN_ERR,
1739 		"SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n  "
1740 		"dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1741 		pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1742 		pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1743 
1744 	ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n",
1745 			ap->ops->sff_check_status(ap),
1746 			ioread8(ap->ioaddr.error_addr));
1747 
1748 	sactive = readl(pp->sactive_block);
1749 	done_mask = pp->qc_active ^ sactive;
1750 
1751 	ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n");
1752 	for (i = 0; i < ATA_MAX_QUEUE; i++) {
1753 		u8 err = 0;
1754 		if (pp->qc_active & (1 << i))
1755 			err = 0;
1756 		else if (done_mask & (1 << i))
1757 			err = 1;
1758 		else
1759 			continue;
1760 
1761 		ata_port_printk(ap, KERN_ERR,
1762 				"tag 0x%x: %01x %01x %01x %01x %s\n", i,
1763 				(pp->dhfis_bits >> i) & 0x1,
1764 				(pp->dmafis_bits >> i) & 0x1,
1765 				(pp->sdbfis_bits >> i) & 0x1,
1766 				(sactive >> i) & 0x1,
1767 				(err ? "error! tag doesn't exit" : " "));
1768 	}
1769 
1770 	nv_swncq_pp_reinit(ap);
1771 	ap->ops->sff_irq_clear(ap);
1772 	__ata_bmdma_stop(ap);
1773 	nv_swncq_irq_clear(ap, 0xffff);
1774 }
1775 
1776 static void nv_swncq_error_handler(struct ata_port *ap)
1777 {
1778 	struct ata_eh_context *ehc = &ap->link.eh_context;
1779 
1780 	if (ap->link.sactive) {
1781 		nv_swncq_ncq_stop(ap);
1782 		ehc->i.action |= ATA_EH_RESET;
1783 	}
1784 
1785 	ata_sff_error_handler(ap);
1786 }
1787 
1788 #ifdef CONFIG_PM
1789 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1790 {
1791 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1792 	u32 tmp;
1793 
1794 	/* clear irq */
1795 	writel(~0, mmio + NV_INT_STATUS_MCP55);
1796 
1797 	/* disable irq */
1798 	writel(0, mmio + NV_INT_ENABLE_MCP55);
1799 
1800 	/* disable swncq */
1801 	tmp = readl(mmio + NV_CTL_MCP55);
1802 	tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1803 	writel(tmp, mmio + NV_CTL_MCP55);
1804 
1805 	return 0;
1806 }
1807 
1808 static int nv_swncq_port_resume(struct ata_port *ap)
1809 {
1810 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1811 	u32 tmp;
1812 
1813 	/* clear irq */
1814 	writel(~0, mmio + NV_INT_STATUS_MCP55);
1815 
1816 	/* enable irq */
1817 	writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1818 
1819 	/* enable swncq */
1820 	tmp = readl(mmio + NV_CTL_MCP55);
1821 	writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1822 
1823 	return 0;
1824 }
1825 #endif
1826 
1827 static void nv_swncq_host_init(struct ata_host *host)
1828 {
1829 	u32 tmp;
1830 	void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1831 	struct pci_dev *pdev = to_pci_dev(host->dev);
1832 	u8 regval;
1833 
1834 	/* disable  ECO 398 */
1835 	pci_read_config_byte(pdev, 0x7f, &regval);
1836 	regval &= ~(1 << 7);
1837 	pci_write_config_byte(pdev, 0x7f, regval);
1838 
1839 	/* enable swncq */
1840 	tmp = readl(mmio + NV_CTL_MCP55);
1841 	VPRINTK("HOST_CTL:0x%X\n", tmp);
1842 	writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1843 
1844 	/* enable irq intr */
1845 	tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1846 	VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1847 	writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1848 
1849 	/*  clear port irq */
1850 	writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1851 }
1852 
1853 static int nv_swncq_slave_config(struct scsi_device *sdev)
1854 {
1855 	struct ata_port *ap = ata_shost_to_port(sdev->host);
1856 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1857 	struct ata_device *dev;
1858 	int rc;
1859 	u8 rev;
1860 	u8 check_maxtor = 0;
1861 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
1862 
1863 	rc = ata_scsi_slave_config(sdev);
1864 	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1865 		/* Not a proper libata device, ignore */
1866 		return rc;
1867 
1868 	dev = &ap->link.device[sdev->id];
1869 	if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1870 		return rc;
1871 
1872 	/* if MCP51 and Maxtor, then disable ncq */
1873 	if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1874 		pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1875 		check_maxtor = 1;
1876 
1877 	/* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1878 	if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1879 		pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1880 		pci_read_config_byte(pdev, 0x8, &rev);
1881 		if (rev <= 0xa2)
1882 			check_maxtor = 1;
1883 	}
1884 
1885 	if (!check_maxtor)
1886 		return rc;
1887 
1888 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1889 
1890 	if (strncmp(model_num, "Maxtor", 6) == 0) {
1891 		ata_scsi_change_queue_depth(sdev, 1);
1892 		ata_dev_printk(dev, KERN_NOTICE,
1893 			"Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth);
1894 	}
1895 
1896 	return rc;
1897 }
1898 
1899 static int nv_swncq_port_start(struct ata_port *ap)
1900 {
1901 	struct device *dev = ap->host->dev;
1902 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1903 	struct nv_swncq_port_priv *pp;
1904 	int rc;
1905 
1906 	rc = ata_port_start(ap);
1907 	if (rc)
1908 		return rc;
1909 
1910 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1911 	if (!pp)
1912 		return -ENOMEM;
1913 
1914 	pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1915 				      &pp->prd_dma, GFP_KERNEL);
1916 	if (!pp->prd)
1917 		return -ENOMEM;
1918 	memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
1919 
1920 	ap->private_data = pp;
1921 	pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1922 	pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1923 	pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1924 
1925 	return 0;
1926 }
1927 
1928 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1929 {
1930 	if (qc->tf.protocol != ATA_PROT_NCQ) {
1931 		ata_sff_qc_prep(qc);
1932 		return;
1933 	}
1934 
1935 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1936 		return;
1937 
1938 	nv_swncq_fill_sg(qc);
1939 }
1940 
1941 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1942 {
1943 	struct ata_port *ap = qc->ap;
1944 	struct scatterlist *sg;
1945 	struct nv_swncq_port_priv *pp = ap->private_data;
1946 	struct ata_prd *prd;
1947 	unsigned int si, idx;
1948 
1949 	prd = pp->prd + ATA_MAX_PRD * qc->tag;
1950 
1951 	idx = 0;
1952 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
1953 		u32 addr, offset;
1954 		u32 sg_len, len;
1955 
1956 		addr = (u32)sg_dma_address(sg);
1957 		sg_len = sg_dma_len(sg);
1958 
1959 		while (sg_len) {
1960 			offset = addr & 0xffff;
1961 			len = sg_len;
1962 			if ((offset + sg_len) > 0x10000)
1963 				len = 0x10000 - offset;
1964 
1965 			prd[idx].addr = cpu_to_le32(addr);
1966 			prd[idx].flags_len = cpu_to_le32(len & 0xffff);
1967 
1968 			idx++;
1969 			sg_len -= len;
1970 			addr += len;
1971 		}
1972 	}
1973 
1974 	prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
1975 }
1976 
1977 static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
1978 					  struct ata_queued_cmd *qc)
1979 {
1980 	struct nv_swncq_port_priv *pp = ap->private_data;
1981 
1982 	if (qc == NULL)
1983 		return 0;
1984 
1985 	DPRINTK("Enter\n");
1986 
1987 	writel((1 << qc->tag), pp->sactive_block);
1988 	pp->last_issue_tag = qc->tag;
1989 	pp->dhfis_bits &= ~(1 << qc->tag);
1990 	pp->dmafis_bits &= ~(1 << qc->tag);
1991 	pp->qc_active |= (0x1 << qc->tag);
1992 
1993 	ap->ops->sff_tf_load(ap, &qc->tf);	 /* load tf registers */
1994 	ap->ops->sff_exec_command(ap, &qc->tf);
1995 
1996 	DPRINTK("Issued tag %u\n", qc->tag);
1997 
1998 	return 0;
1999 }
2000 
2001 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2002 {
2003 	struct ata_port *ap = qc->ap;
2004 	struct nv_swncq_port_priv *pp = ap->private_data;
2005 
2006 	if (qc->tf.protocol != ATA_PROT_NCQ)
2007 		return ata_sff_qc_issue(qc);
2008 
2009 	DPRINTK("Enter\n");
2010 
2011 	if (!pp->qc_active)
2012 		nv_swncq_issue_atacmd(ap, qc);
2013 	else
2014 		nv_swncq_qc_to_dq(ap, qc);	/* add qc to defer queue */
2015 
2016 	return 0;
2017 }
2018 
2019 static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2020 {
2021 	u32 serror;
2022 	struct ata_eh_info *ehi = &ap->link.eh_info;
2023 
2024 	ata_ehi_clear_desc(ehi);
2025 
2026 	/* AHCI needs SError cleared; otherwise, it might lock up */
2027 	sata_scr_read(&ap->link, SCR_ERROR, &serror);
2028 	sata_scr_write(&ap->link, SCR_ERROR, serror);
2029 
2030 	/* analyze @irq_stat */
2031 	if (fis & NV_SWNCQ_IRQ_ADDED)
2032 		ata_ehi_push_desc(ehi, "hot plug");
2033 	else if (fis & NV_SWNCQ_IRQ_REMOVED)
2034 		ata_ehi_push_desc(ehi, "hot unplug");
2035 
2036 	ata_ehi_hotplugged(ehi);
2037 
2038 	/* okay, let's hand over to EH */
2039 	ehi->serror |= serror;
2040 
2041 	ata_port_freeze(ap);
2042 }
2043 
2044 static int nv_swncq_sdbfis(struct ata_port *ap)
2045 {
2046 	struct ata_queued_cmd *qc;
2047 	struct nv_swncq_port_priv *pp = ap->private_data;
2048 	struct ata_eh_info *ehi = &ap->link.eh_info;
2049 	u32 sactive;
2050 	int nr_done = 0;
2051 	u32 done_mask;
2052 	int i;
2053 	u8 host_stat;
2054 	u8 lack_dhfis = 0;
2055 
2056 	host_stat = ap->ops->bmdma_status(ap);
2057 	if (unlikely(host_stat & ATA_DMA_ERR)) {
2058 		/* error when transfering data to/from memory */
2059 		ata_ehi_clear_desc(ehi);
2060 		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2061 		ehi->err_mask |= AC_ERR_HOST_BUS;
2062 		ehi->action |= ATA_EH_RESET;
2063 		return -EINVAL;
2064 	}
2065 
2066 	ap->ops->sff_irq_clear(ap);
2067 	__ata_bmdma_stop(ap);
2068 
2069 	sactive = readl(pp->sactive_block);
2070 	done_mask = pp->qc_active ^ sactive;
2071 
2072 	if (unlikely(done_mask & sactive)) {
2073 		ata_ehi_clear_desc(ehi);
2074 		ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition"
2075 				  "(%08x->%08x)", pp->qc_active, sactive);
2076 		ehi->err_mask |= AC_ERR_HSM;
2077 		ehi->action |= ATA_EH_RESET;
2078 		return -EINVAL;
2079 	}
2080 	for (i = 0; i < ATA_MAX_QUEUE; i++) {
2081 		if (!(done_mask & (1 << i)))
2082 			continue;
2083 
2084 		qc = ata_qc_from_tag(ap, i);
2085 		if (qc) {
2086 			ata_qc_complete(qc);
2087 			pp->qc_active &= ~(1 << i);
2088 			pp->dhfis_bits &= ~(1 << i);
2089 			pp->dmafis_bits &= ~(1 << i);
2090 			pp->sdbfis_bits |= (1 << i);
2091 			nr_done++;
2092 		}
2093 	}
2094 
2095 	if (!ap->qc_active) {
2096 		DPRINTK("over\n");
2097 		nv_swncq_pp_reinit(ap);
2098 		return nr_done;
2099 	}
2100 
2101 	if (pp->qc_active & pp->dhfis_bits)
2102 		return nr_done;
2103 
2104 	if ((pp->ncq_flags & ncq_saw_backout) ||
2105 	    (pp->qc_active ^ pp->dhfis_bits))
2106 		/* if the controller cann't get a device to host register FIS,
2107 		 * The driver needs to reissue the new command.
2108 		 */
2109 		lack_dhfis = 1;
2110 
2111 	DPRINTK("id 0x%x QC: qc_active 0x%x,"
2112 		"SWNCQ:qc_active 0x%X defer_bits %X "
2113 		"dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2114 		ap->print_id, ap->qc_active, pp->qc_active,
2115 		pp->defer_queue.defer_bits, pp->dhfis_bits,
2116 		pp->dmafis_bits, pp->last_issue_tag);
2117 
2118 	nv_swncq_fis_reinit(ap);
2119 
2120 	if (lack_dhfis) {
2121 		qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2122 		nv_swncq_issue_atacmd(ap, qc);
2123 		return nr_done;
2124 	}
2125 
2126 	if (pp->defer_queue.defer_bits) {
2127 		/* send deferral queue command */
2128 		qc = nv_swncq_qc_from_dq(ap);
2129 		WARN_ON(qc == NULL);
2130 		nv_swncq_issue_atacmd(ap, qc);
2131 	}
2132 
2133 	return nr_done;
2134 }
2135 
2136 static inline u32 nv_swncq_tag(struct ata_port *ap)
2137 {
2138 	struct nv_swncq_port_priv *pp = ap->private_data;
2139 	u32 tag;
2140 
2141 	tag = readb(pp->tag_block) >> 2;
2142 	return (tag & 0x1f);
2143 }
2144 
2145 static int nv_swncq_dmafis(struct ata_port *ap)
2146 {
2147 	struct ata_queued_cmd *qc;
2148 	unsigned int rw;
2149 	u8 dmactl;
2150 	u32 tag;
2151 	struct nv_swncq_port_priv *pp = ap->private_data;
2152 
2153 	__ata_bmdma_stop(ap);
2154 	tag = nv_swncq_tag(ap);
2155 
2156 	DPRINTK("dma setup tag 0x%x\n", tag);
2157 	qc = ata_qc_from_tag(ap, tag);
2158 
2159 	if (unlikely(!qc))
2160 		return 0;
2161 
2162 	rw = qc->tf.flags & ATA_TFLAG_WRITE;
2163 
2164 	/* load PRD table addr. */
2165 	iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
2166 		  ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2167 
2168 	/* specify data direction, triple-check start bit is clear */
2169 	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2170 	dmactl &= ~ATA_DMA_WR;
2171 	if (!rw)
2172 		dmactl |= ATA_DMA_WR;
2173 
2174 	iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2175 
2176 	return 1;
2177 }
2178 
2179 static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2180 {
2181 	struct nv_swncq_port_priv *pp = ap->private_data;
2182 	struct ata_queued_cmd *qc;
2183 	struct ata_eh_info *ehi = &ap->link.eh_info;
2184 	u32 serror;
2185 	u8 ata_stat;
2186 	int rc = 0;
2187 
2188 	ata_stat = ap->ops->sff_check_status(ap);
2189 	nv_swncq_irq_clear(ap, fis);
2190 	if (!fis)
2191 		return;
2192 
2193 	if (ap->pflags & ATA_PFLAG_FROZEN)
2194 		return;
2195 
2196 	if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2197 		nv_swncq_hotplug(ap, fis);
2198 		return;
2199 	}
2200 
2201 	if (!pp->qc_active)
2202 		return;
2203 
2204 	if (ap->ops->scr_read(ap, SCR_ERROR, &serror))
2205 		return;
2206 	ap->ops->scr_write(ap, SCR_ERROR, serror);
2207 
2208 	if (ata_stat & ATA_ERR) {
2209 		ata_ehi_clear_desc(ehi);
2210 		ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2211 		ehi->err_mask |= AC_ERR_DEV;
2212 		ehi->serror |= serror;
2213 		ehi->action |= ATA_EH_RESET;
2214 		ata_port_freeze(ap);
2215 		return;
2216 	}
2217 
2218 	if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2219 		/* If the IRQ is backout, driver must issue
2220 		 * the new command again some time later.
2221 		 */
2222 		pp->ncq_flags |= ncq_saw_backout;
2223 	}
2224 
2225 	if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2226 		pp->ncq_flags |= ncq_saw_sdb;
2227 		DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2228 			"dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2229 			ap->print_id, pp->qc_active, pp->dhfis_bits,
2230 			pp->dmafis_bits, readl(pp->sactive_block));
2231 		rc = nv_swncq_sdbfis(ap);
2232 		if (rc < 0)
2233 			goto irq_error;
2234 	}
2235 
2236 	if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2237 		/* The interrupt indicates the new command
2238 		 * was transmitted correctly to the drive.
2239 		 */
2240 		pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2241 		pp->ncq_flags |= ncq_saw_d2h;
2242 		if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2243 			ata_ehi_push_desc(ehi, "illegal fis transaction");
2244 			ehi->err_mask |= AC_ERR_HSM;
2245 			ehi->action |= ATA_EH_RESET;
2246 			goto irq_error;
2247 		}
2248 
2249 		if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2250 		    !(pp->ncq_flags & ncq_saw_dmas)) {
2251 			ata_stat = ap->ops->sff_check_status(ap);
2252 			if (ata_stat & ATA_BUSY)
2253 				goto irq_exit;
2254 
2255 			if (pp->defer_queue.defer_bits) {
2256 				DPRINTK("send next command\n");
2257 				qc = nv_swncq_qc_from_dq(ap);
2258 				nv_swncq_issue_atacmd(ap, qc);
2259 			}
2260 		}
2261 	}
2262 
2263 	if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2264 		/* program the dma controller with appropriate PRD buffers
2265 		 * and start the DMA transfer for requested command.
2266 		 */
2267 		pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2268 		pp->ncq_flags |= ncq_saw_dmas;
2269 		rc = nv_swncq_dmafis(ap);
2270 	}
2271 
2272 irq_exit:
2273 	return;
2274 irq_error:
2275 	ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2276 	ata_port_freeze(ap);
2277 	return;
2278 }
2279 
2280 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2281 {
2282 	struct ata_host *host = dev_instance;
2283 	unsigned int i;
2284 	unsigned int handled = 0;
2285 	unsigned long flags;
2286 	u32 irq_stat;
2287 
2288 	spin_lock_irqsave(&host->lock, flags);
2289 
2290 	irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2291 
2292 	for (i = 0; i < host->n_ports; i++) {
2293 		struct ata_port *ap = host->ports[i];
2294 
2295 		if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
2296 			if (ap->link.sactive) {
2297 				nv_swncq_host_interrupt(ap, (u16)irq_stat);
2298 				handled = 1;
2299 			} else {
2300 				if (irq_stat)	/* reserve Hotplug */
2301 					nv_swncq_irq_clear(ap, 0xfff0);
2302 
2303 				handled += nv_host_intr(ap, (u8)irq_stat);
2304 			}
2305 		}
2306 		irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2307 	}
2308 
2309 	spin_unlock_irqrestore(&host->lock, flags);
2310 
2311 	return IRQ_RETVAL(handled);
2312 }
2313 
2314 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2315 {
2316 	static int printed_version;
2317 	const struct ata_port_info *ppi[] = { NULL, NULL };
2318 	struct nv_pi_priv *ipriv;
2319 	struct ata_host *host;
2320 	struct nv_host_priv *hpriv;
2321 	int rc;
2322 	u32 bar;
2323 	void __iomem *base;
2324 	unsigned long type = ent->driver_data;
2325 
2326         // Make sure this is a SATA controller by counting the number of bars
2327         // (NVIDIA SATA controllers will always have six bars).  Otherwise,
2328         // it's an IDE controller and we ignore it.
2329 	for (bar = 0; bar < 6; bar++)
2330 		if (pci_resource_start(pdev, bar) == 0)
2331 			return -ENODEV;
2332 
2333 	if (!printed_version++)
2334 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
2335 
2336 	rc = pcim_enable_device(pdev);
2337 	if (rc)
2338 		return rc;
2339 
2340 	/* determine type and allocate host */
2341 	if (type == CK804 && adma_enabled) {
2342 		dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
2343 		type = ADMA;
2344 	}
2345 
2346 	if (type == SWNCQ) {
2347 		if (swncq_enabled)
2348 			dev_printk(KERN_NOTICE, &pdev->dev,
2349 				   "Using SWNCQ mode\n");
2350 		else
2351 			type = GENERIC;
2352 	}
2353 
2354 	ppi[0] = &nv_port_info[type];
2355 	ipriv = ppi[0]->private_data;
2356 	rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
2357 	if (rc)
2358 		return rc;
2359 
2360 	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2361 	if (!hpriv)
2362 		return -ENOMEM;
2363 	hpriv->type = type;
2364 	host->private_data = hpriv;
2365 
2366 	/* request and iomap NV_MMIO_BAR */
2367 	rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2368 	if (rc)
2369 		return rc;
2370 
2371 	/* configure SCR access */
2372 	base = host->iomap[NV_MMIO_BAR];
2373 	host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2374 	host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2375 
2376 	/* enable SATA space for CK804 */
2377 	if (type >= CK804) {
2378 		u8 regval;
2379 
2380 		pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2381 		regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2382 		pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2383 	}
2384 
2385 	/* init ADMA */
2386 	if (type == ADMA) {
2387 		rc = nv_adma_host_init(host);
2388 		if (rc)
2389 			return rc;
2390 	} else if (type == SWNCQ)
2391 		nv_swncq_host_init(host);
2392 
2393 	pci_set_master(pdev);
2394 	return ata_host_activate(host, pdev->irq, ipriv->irq_handler,
2395 				 IRQF_SHARED, ipriv->sht);
2396 }
2397 
2398 #ifdef CONFIG_PM
2399 static int nv_pci_device_resume(struct pci_dev *pdev)
2400 {
2401 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
2402 	struct nv_host_priv *hpriv = host->private_data;
2403 	int rc;
2404 
2405 	rc = ata_pci_device_do_resume(pdev);
2406 	if (rc)
2407 		return rc;
2408 
2409 	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2410 		if (hpriv->type >= CK804) {
2411 			u8 regval;
2412 
2413 			pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2414 			regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2415 			pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2416 		}
2417 		if (hpriv->type == ADMA) {
2418 			u32 tmp32;
2419 			struct nv_adma_port_priv *pp;
2420 			/* enable/disable ADMA on the ports appropriately */
2421 			pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2422 
2423 			pp = host->ports[0]->private_data;
2424 			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2425 				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2426 					   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2427 			else
2428 				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT0_EN |
2429 					   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2430 			pp = host->ports[1]->private_data;
2431 			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2432 				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2433 					   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2434 			else
2435 				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT1_EN |
2436 					   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2437 
2438 			pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2439 		}
2440 	}
2441 
2442 	ata_host_resume(host);
2443 
2444 	return 0;
2445 }
2446 #endif
2447 
2448 static void nv_ck804_host_stop(struct ata_host *host)
2449 {
2450 	struct pci_dev *pdev = to_pci_dev(host->dev);
2451 	u8 regval;
2452 
2453 	/* disable SATA space for CK804 */
2454 	pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2455 	regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2456 	pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2457 }
2458 
2459 static void nv_adma_host_stop(struct ata_host *host)
2460 {
2461 	struct pci_dev *pdev = to_pci_dev(host->dev);
2462 	u32 tmp32;
2463 
2464 	/* disable ADMA on the ports */
2465 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2466 	tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2467 		   NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2468 		   NV_MCP_SATA_CFG_20_PORT1_EN |
2469 		   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2470 
2471 	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2472 
2473 	nv_ck804_host_stop(host);
2474 }
2475 
2476 static int __init nv_init(void)
2477 {
2478 	return pci_register_driver(&nv_pci_driver);
2479 }
2480 
2481 static void __exit nv_exit(void)
2482 {
2483 	pci_unregister_driver(&nv_pci_driver);
2484 }
2485 
2486 module_init(nv_init);
2487 module_exit(nv_exit);
2488 module_param_named(adma, adma_enabled, bool, 0444);
2489 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
2490 module_param_named(swncq, swncq_enabled, bool, 0444);
2491 MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
2492 
2493