xref: /openbmc/linux/drivers/ata/sata_nv.c (revision 7fe2f639)
1 /*
2  *  sata_nv.c - NVIDIA nForce SATA
3  *
4  *  Copyright 2004 NVIDIA Corp.  All rights reserved.
5  *  Copyright 2004 Andrew Chew
6  *
7  *
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 2, or (at your option)
11  *  any later version.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; see the file COPYING.  If not, write to
20  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21  *
22  *
23  *  libata documentation is available via 'make {ps|pdf}docs',
24  *  as Documentation/DocBook/libata.*
25  *
26  *  No hardware documentation available outside of NVIDIA.
27  *  This driver programs the NVIDIA SATA controller in a similar
28  *  fashion as with other PCI IDE BMDMA controllers, with a few
29  *  NV-specific details such as register offsets, SATA phy location,
30  *  hotplug info, etc.
31  *
32  *  CK804/MCP04 controllers support an alternate programming interface
33  *  similar to the ADMA specification (with some modifications).
34  *  This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35  *  sent through the legacy interface.
36  *
37  */
38 
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/gfp.h>
42 #include <linux/pci.h>
43 #include <linux/init.h>
44 #include <linux/blkdev.h>
45 #include <linux/delay.h>
46 #include <linux/interrupt.h>
47 #include <linux/device.h>
48 #include <scsi/scsi_host.h>
49 #include <scsi/scsi_device.h>
50 #include <linux/libata.h>
51 
52 #define DRV_NAME			"sata_nv"
53 #define DRV_VERSION			"3.5"
54 
55 #define NV_ADMA_DMA_BOUNDARY		0xffffffffUL
56 
57 enum {
58 	NV_MMIO_BAR			= 5,
59 
60 	NV_PORTS			= 2,
61 	NV_PIO_MASK			= ATA_PIO4,
62 	NV_MWDMA_MASK			= ATA_MWDMA2,
63 	NV_UDMA_MASK			= ATA_UDMA6,
64 	NV_PORT0_SCR_REG_OFFSET		= 0x00,
65 	NV_PORT1_SCR_REG_OFFSET		= 0x40,
66 
67 	/* INT_STATUS/ENABLE */
68 	NV_INT_STATUS			= 0x10,
69 	NV_INT_ENABLE			= 0x11,
70 	NV_INT_STATUS_CK804		= 0x440,
71 	NV_INT_ENABLE_CK804		= 0x441,
72 
73 	/* INT_STATUS/ENABLE bits */
74 	NV_INT_DEV			= 0x01,
75 	NV_INT_PM			= 0x02,
76 	NV_INT_ADDED			= 0x04,
77 	NV_INT_REMOVED			= 0x08,
78 
79 	NV_INT_PORT_SHIFT		= 4,	/* each port occupies 4 bits */
80 
81 	NV_INT_ALL			= 0x0f,
82 	NV_INT_MASK			= NV_INT_DEV |
83 					  NV_INT_ADDED | NV_INT_REMOVED,
84 
85 	/* INT_CONFIG */
86 	NV_INT_CONFIG			= 0x12,
87 	NV_INT_CONFIG_METHD		= 0x01, // 0 = INT, 1 = SMI
88 
89 	// For PCI config register 20
90 	NV_MCP_SATA_CFG_20		= 0x50,
91 	NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
92 	NV_MCP_SATA_CFG_20_PORT0_EN	= (1 << 17),
93 	NV_MCP_SATA_CFG_20_PORT1_EN	= (1 << 16),
94 	NV_MCP_SATA_CFG_20_PORT0_PWB_EN	= (1 << 14),
95 	NV_MCP_SATA_CFG_20_PORT1_PWB_EN	= (1 << 12),
96 
97 	NV_ADMA_MAX_CPBS		= 32,
98 	NV_ADMA_CPB_SZ			= 128,
99 	NV_ADMA_APRD_SZ			= 16,
100 	NV_ADMA_SGTBL_LEN		= (1024 - NV_ADMA_CPB_SZ) /
101 					   NV_ADMA_APRD_SZ,
102 	NV_ADMA_SGTBL_TOTAL_LEN		= NV_ADMA_SGTBL_LEN + 5,
103 	NV_ADMA_SGTBL_SZ                = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
104 	NV_ADMA_PORT_PRIV_DMA_SZ        = NV_ADMA_MAX_CPBS *
105 					   (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
106 
107 	/* BAR5 offset to ADMA general registers */
108 	NV_ADMA_GEN			= 0x400,
109 	NV_ADMA_GEN_CTL			= 0x00,
110 	NV_ADMA_NOTIFIER_CLEAR		= 0x30,
111 
112 	/* BAR5 offset to ADMA ports */
113 	NV_ADMA_PORT			= 0x480,
114 
115 	/* size of ADMA port register space  */
116 	NV_ADMA_PORT_SIZE		= 0x100,
117 
118 	/* ADMA port registers */
119 	NV_ADMA_CTL			= 0x40,
120 	NV_ADMA_CPB_COUNT		= 0x42,
121 	NV_ADMA_NEXT_CPB_IDX		= 0x43,
122 	NV_ADMA_STAT			= 0x44,
123 	NV_ADMA_CPB_BASE_LOW		= 0x48,
124 	NV_ADMA_CPB_BASE_HIGH		= 0x4C,
125 	NV_ADMA_APPEND			= 0x50,
126 	NV_ADMA_NOTIFIER		= 0x68,
127 	NV_ADMA_NOTIFIER_ERROR		= 0x6C,
128 
129 	/* NV_ADMA_CTL register bits */
130 	NV_ADMA_CTL_HOTPLUG_IEN		= (1 << 0),
131 	NV_ADMA_CTL_CHANNEL_RESET	= (1 << 5),
132 	NV_ADMA_CTL_GO			= (1 << 7),
133 	NV_ADMA_CTL_AIEN		= (1 << 8),
134 	NV_ADMA_CTL_READ_NON_COHERENT	= (1 << 11),
135 	NV_ADMA_CTL_WRITE_NON_COHERENT	= (1 << 12),
136 
137 	/* CPB response flag bits */
138 	NV_CPB_RESP_DONE		= (1 << 0),
139 	NV_CPB_RESP_ATA_ERR		= (1 << 3),
140 	NV_CPB_RESP_CMD_ERR		= (1 << 4),
141 	NV_CPB_RESP_CPB_ERR		= (1 << 7),
142 
143 	/* CPB control flag bits */
144 	NV_CPB_CTL_CPB_VALID		= (1 << 0),
145 	NV_CPB_CTL_QUEUE		= (1 << 1),
146 	NV_CPB_CTL_APRD_VALID		= (1 << 2),
147 	NV_CPB_CTL_IEN			= (1 << 3),
148 	NV_CPB_CTL_FPDMA		= (1 << 4),
149 
150 	/* APRD flags */
151 	NV_APRD_WRITE			= (1 << 1),
152 	NV_APRD_END			= (1 << 2),
153 	NV_APRD_CONT			= (1 << 3),
154 
155 	/* NV_ADMA_STAT flags */
156 	NV_ADMA_STAT_TIMEOUT		= (1 << 0),
157 	NV_ADMA_STAT_HOTUNPLUG		= (1 << 1),
158 	NV_ADMA_STAT_HOTPLUG		= (1 << 2),
159 	NV_ADMA_STAT_CPBERR		= (1 << 4),
160 	NV_ADMA_STAT_SERROR		= (1 << 5),
161 	NV_ADMA_STAT_CMD_COMPLETE	= (1 << 6),
162 	NV_ADMA_STAT_IDLE		= (1 << 8),
163 	NV_ADMA_STAT_LEGACY		= (1 << 9),
164 	NV_ADMA_STAT_STOPPED		= (1 << 10),
165 	NV_ADMA_STAT_DONE		= (1 << 12),
166 	NV_ADMA_STAT_ERR		= NV_ADMA_STAT_CPBERR |
167 					  NV_ADMA_STAT_TIMEOUT,
168 
169 	/* port flags */
170 	NV_ADMA_PORT_REGISTER_MODE	= (1 << 0),
171 	NV_ADMA_ATAPI_SETUP_COMPLETE	= (1 << 1),
172 
173 	/* MCP55 reg offset */
174 	NV_CTL_MCP55			= 0x400,
175 	NV_INT_STATUS_MCP55		= 0x440,
176 	NV_INT_ENABLE_MCP55		= 0x444,
177 	NV_NCQ_REG_MCP55		= 0x448,
178 
179 	/* MCP55 */
180 	NV_INT_ALL_MCP55		= 0xffff,
181 	NV_INT_PORT_SHIFT_MCP55		= 16,	/* each port occupies 16 bits */
182 	NV_INT_MASK_MCP55		= NV_INT_ALL_MCP55 & 0xfffd,
183 
184 	/* SWNCQ ENABLE BITS*/
185 	NV_CTL_PRI_SWNCQ		= 0x02,
186 	NV_CTL_SEC_SWNCQ		= 0x04,
187 
188 	/* SW NCQ status bits*/
189 	NV_SWNCQ_IRQ_DEV		= (1 << 0),
190 	NV_SWNCQ_IRQ_PM			= (1 << 1),
191 	NV_SWNCQ_IRQ_ADDED		= (1 << 2),
192 	NV_SWNCQ_IRQ_REMOVED		= (1 << 3),
193 
194 	NV_SWNCQ_IRQ_BACKOUT		= (1 << 4),
195 	NV_SWNCQ_IRQ_SDBFIS		= (1 << 5),
196 	NV_SWNCQ_IRQ_DHREGFIS		= (1 << 6),
197 	NV_SWNCQ_IRQ_DMASETUP		= (1 << 7),
198 
199 	NV_SWNCQ_IRQ_HOTPLUG		= NV_SWNCQ_IRQ_ADDED |
200 					  NV_SWNCQ_IRQ_REMOVED,
201 
202 };
203 
204 /* ADMA Physical Region Descriptor - one SG segment */
205 struct nv_adma_prd {
206 	__le64			addr;
207 	__le32			len;
208 	u8			flags;
209 	u8			packet_len;
210 	__le16			reserved;
211 };
212 
213 enum nv_adma_regbits {
214 	CMDEND	= (1 << 15),		/* end of command list */
215 	WNB	= (1 << 14),		/* wait-not-BSY */
216 	IGN	= (1 << 13),		/* ignore this entry */
217 	CS1n	= (1 << (4 + 8)),	/* std. PATA signals follow... */
218 	DA2	= (1 << (2 + 8)),
219 	DA1	= (1 << (1 + 8)),
220 	DA0	= (1 << (0 + 8)),
221 };
222 
223 /* ADMA Command Parameter Block
224    The first 5 SG segments are stored inside the Command Parameter Block itself.
225    If there are more than 5 segments the remainder are stored in a separate
226    memory area indicated by next_aprd. */
227 struct nv_adma_cpb {
228 	u8			resp_flags;    /* 0 */
229 	u8			reserved1;     /* 1 */
230 	u8			ctl_flags;     /* 2 */
231 	/* len is length of taskfile in 64 bit words */
232 	u8			len;		/* 3  */
233 	u8			tag;           /* 4 */
234 	u8			next_cpb_idx;  /* 5 */
235 	__le16			reserved2;     /* 6-7 */
236 	__le16			tf[12];        /* 8-31 */
237 	struct nv_adma_prd	aprd[5];       /* 32-111 */
238 	__le64			next_aprd;     /* 112-119 */
239 	__le64			reserved3;     /* 120-127 */
240 };
241 
242 
243 struct nv_adma_port_priv {
244 	struct nv_adma_cpb	*cpb;
245 	dma_addr_t		cpb_dma;
246 	struct nv_adma_prd	*aprd;
247 	dma_addr_t		aprd_dma;
248 	void __iomem		*ctl_block;
249 	void __iomem		*gen_block;
250 	void __iomem		*notifier_clear_block;
251 	u64			adma_dma_mask;
252 	u8			flags;
253 	int			last_issue_ncq;
254 };
255 
256 struct nv_host_priv {
257 	unsigned long		type;
258 };
259 
260 struct defer_queue {
261 	u32		defer_bits;
262 	unsigned int	head;
263 	unsigned int	tail;
264 	unsigned int	tag[ATA_MAX_QUEUE];
265 };
266 
267 enum ncq_saw_flag_list {
268 	ncq_saw_d2h	= (1U << 0),
269 	ncq_saw_dmas	= (1U << 1),
270 	ncq_saw_sdb	= (1U << 2),
271 	ncq_saw_backout	= (1U << 3),
272 };
273 
274 struct nv_swncq_port_priv {
275 	struct ata_bmdma_prd *prd;	 /* our SG list */
276 	dma_addr_t	prd_dma; /* and its DMA mapping */
277 	void __iomem	*sactive_block;
278 	void __iomem	*irq_block;
279 	void __iomem	*tag_block;
280 	u32		qc_active;
281 
282 	unsigned int	last_issue_tag;
283 
284 	/* fifo circular queue to store deferral command */
285 	struct defer_queue defer_queue;
286 
287 	/* for NCQ interrupt analysis */
288 	u32		dhfis_bits;
289 	u32		dmafis_bits;
290 	u32		sdbfis_bits;
291 
292 	unsigned int	ncq_flags;
293 };
294 
295 
296 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
297 
298 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
299 #ifdef CONFIG_PM
300 static int nv_pci_device_resume(struct pci_dev *pdev);
301 #endif
302 static void nv_ck804_host_stop(struct ata_host *host);
303 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
304 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
305 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
306 static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
307 static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
308 
309 static int nv_hardreset(struct ata_link *link, unsigned int *class,
310 			unsigned long deadline);
311 static void nv_nf2_freeze(struct ata_port *ap);
312 static void nv_nf2_thaw(struct ata_port *ap);
313 static void nv_ck804_freeze(struct ata_port *ap);
314 static void nv_ck804_thaw(struct ata_port *ap);
315 static int nv_adma_slave_config(struct scsi_device *sdev);
316 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
317 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
318 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
319 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
320 static void nv_adma_irq_clear(struct ata_port *ap);
321 static int nv_adma_port_start(struct ata_port *ap);
322 static void nv_adma_port_stop(struct ata_port *ap);
323 #ifdef CONFIG_PM
324 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
325 static int nv_adma_port_resume(struct ata_port *ap);
326 #endif
327 static void nv_adma_freeze(struct ata_port *ap);
328 static void nv_adma_thaw(struct ata_port *ap);
329 static void nv_adma_error_handler(struct ata_port *ap);
330 static void nv_adma_host_stop(struct ata_host *host);
331 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
332 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
333 
334 static void nv_mcp55_thaw(struct ata_port *ap);
335 static void nv_mcp55_freeze(struct ata_port *ap);
336 static void nv_swncq_error_handler(struct ata_port *ap);
337 static int nv_swncq_slave_config(struct scsi_device *sdev);
338 static int nv_swncq_port_start(struct ata_port *ap);
339 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
340 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
341 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
342 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
343 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
344 #ifdef CONFIG_PM
345 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
346 static int nv_swncq_port_resume(struct ata_port *ap);
347 #endif
348 
349 enum nv_host_type
350 {
351 	GENERIC,
352 	NFORCE2,
353 	NFORCE3 = NFORCE2,	/* NF2 == NF3 as far as sata_nv is concerned */
354 	CK804,
355 	ADMA,
356 	MCP5x,
357 	SWNCQ,
358 };
359 
360 static const struct pci_device_id nv_pci_tbl[] = {
361 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
362 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
363 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
364 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
365 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
366 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
367 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
368 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
369 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
370 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
371 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
372 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
373 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
374 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
375 
376 	{ } /* terminate list */
377 };
378 
379 static struct pci_driver nv_pci_driver = {
380 	.name			= DRV_NAME,
381 	.id_table		= nv_pci_tbl,
382 	.probe			= nv_init_one,
383 #ifdef CONFIG_PM
384 	.suspend		= ata_pci_device_suspend,
385 	.resume			= nv_pci_device_resume,
386 #endif
387 	.remove			= ata_pci_remove_one,
388 };
389 
390 static struct scsi_host_template nv_sht = {
391 	ATA_BMDMA_SHT(DRV_NAME),
392 };
393 
394 static struct scsi_host_template nv_adma_sht = {
395 	ATA_NCQ_SHT(DRV_NAME),
396 	.can_queue		= NV_ADMA_MAX_CPBS,
397 	.sg_tablesize		= NV_ADMA_SGTBL_TOTAL_LEN,
398 	.dma_boundary		= NV_ADMA_DMA_BOUNDARY,
399 	.slave_configure	= nv_adma_slave_config,
400 };
401 
402 static struct scsi_host_template nv_swncq_sht = {
403 	ATA_NCQ_SHT(DRV_NAME),
404 	.can_queue		= ATA_MAX_QUEUE,
405 	.sg_tablesize		= LIBATA_MAX_PRD,
406 	.dma_boundary		= ATA_DMA_BOUNDARY,
407 	.slave_configure	= nv_swncq_slave_config,
408 };
409 
410 /*
411  * NV SATA controllers have various different problems with hardreset
412  * protocol depending on the specific controller and device.
413  *
414  * GENERIC:
415  *
416  *  bko11195 reports that link doesn't come online after hardreset on
417  *  generic nv's and there have been several other similar reports on
418  *  linux-ide.
419  *
420  *  bko12351#c23 reports that warmplug on MCP61 doesn't work with
421  *  softreset.
422  *
423  * NF2/3:
424  *
425  *  bko3352 reports nf2/3 controllers can't determine device signature
426  *  reliably after hardreset.  The following thread reports detection
427  *  failure on cold boot with the standard debouncing timing.
428  *
429  *  http://thread.gmane.org/gmane.linux.ide/34098
430  *
431  *  bko12176 reports that hardreset fails to bring up the link during
432  *  boot on nf2.
433  *
434  * CK804:
435  *
436  *  For initial probing after boot and hot plugging, hardreset mostly
437  *  works fine on CK804 but curiously, reprobing on the initial port
438  *  by rescanning or rmmod/insmod fails to acquire the initial D2H Reg
439  *  FIS in somewhat undeterministic way.
440  *
441  * SWNCQ:
442  *
443  *  bko12351 reports that when SWNCQ is enabled, for hotplug to work,
444  *  hardreset should be used and hardreset can't report proper
445  *  signature, which suggests that mcp5x is closer to nf2 as long as
446  *  reset quirkiness is concerned.
447  *
448  *  bko12703 reports that boot probing fails for intel SSD with
449  *  hardreset.  Link fails to come online.  Softreset works fine.
450  *
451  * The failures are varied but the following patterns seem true for
452  * all flavors.
453  *
454  * - Softreset during boot always works.
455  *
456  * - Hardreset during boot sometimes fails to bring up the link on
457  *   certain comibnations and device signature acquisition is
458  *   unreliable.
459  *
460  * - Hardreset is often necessary after hotplug.
461  *
462  * So, preferring softreset for boot probing and error handling (as
463  * hardreset might bring down the link) but using hardreset for
464  * post-boot probing should work around the above issues in most
465  * cases.  Define nv_hardreset() which only kicks in for post-boot
466  * probing and use it for all variants.
467  */
468 static struct ata_port_operations nv_generic_ops = {
469 	.inherits		= &ata_bmdma_port_ops,
470 	.lost_interrupt		= ATA_OP_NULL,
471 	.scr_read		= nv_scr_read,
472 	.scr_write		= nv_scr_write,
473 	.hardreset		= nv_hardreset,
474 };
475 
476 static struct ata_port_operations nv_nf2_ops = {
477 	.inherits		= &nv_generic_ops,
478 	.freeze			= nv_nf2_freeze,
479 	.thaw			= nv_nf2_thaw,
480 };
481 
482 static struct ata_port_operations nv_ck804_ops = {
483 	.inherits		= &nv_generic_ops,
484 	.freeze			= nv_ck804_freeze,
485 	.thaw			= nv_ck804_thaw,
486 	.host_stop		= nv_ck804_host_stop,
487 };
488 
489 static struct ata_port_operations nv_adma_ops = {
490 	.inherits		= &nv_ck804_ops,
491 
492 	.check_atapi_dma	= nv_adma_check_atapi_dma,
493 	.sff_tf_read		= nv_adma_tf_read,
494 	.qc_defer		= ata_std_qc_defer,
495 	.qc_prep		= nv_adma_qc_prep,
496 	.qc_issue		= nv_adma_qc_issue,
497 	.sff_irq_clear		= nv_adma_irq_clear,
498 
499 	.freeze			= nv_adma_freeze,
500 	.thaw			= nv_adma_thaw,
501 	.error_handler		= nv_adma_error_handler,
502 	.post_internal_cmd	= nv_adma_post_internal_cmd,
503 
504 	.port_start		= nv_adma_port_start,
505 	.port_stop		= nv_adma_port_stop,
506 #ifdef CONFIG_PM
507 	.port_suspend		= nv_adma_port_suspend,
508 	.port_resume		= nv_adma_port_resume,
509 #endif
510 	.host_stop		= nv_adma_host_stop,
511 };
512 
513 static struct ata_port_operations nv_swncq_ops = {
514 	.inherits		= &nv_generic_ops,
515 
516 	.qc_defer		= ata_std_qc_defer,
517 	.qc_prep		= nv_swncq_qc_prep,
518 	.qc_issue		= nv_swncq_qc_issue,
519 
520 	.freeze			= nv_mcp55_freeze,
521 	.thaw			= nv_mcp55_thaw,
522 	.error_handler		= nv_swncq_error_handler,
523 
524 #ifdef CONFIG_PM
525 	.port_suspend		= nv_swncq_port_suspend,
526 	.port_resume		= nv_swncq_port_resume,
527 #endif
528 	.port_start		= nv_swncq_port_start,
529 };
530 
531 struct nv_pi_priv {
532 	irq_handler_t			irq_handler;
533 	struct scsi_host_template	*sht;
534 };
535 
536 #define NV_PI_PRIV(_irq_handler, _sht) \
537 	&(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
538 
539 static const struct ata_port_info nv_port_info[] = {
540 	/* generic */
541 	{
542 		.flags		= ATA_FLAG_SATA,
543 		.pio_mask	= NV_PIO_MASK,
544 		.mwdma_mask	= NV_MWDMA_MASK,
545 		.udma_mask	= NV_UDMA_MASK,
546 		.port_ops	= &nv_generic_ops,
547 		.private_data	= NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
548 	},
549 	/* nforce2/3 */
550 	{
551 		.flags		= ATA_FLAG_SATA,
552 		.pio_mask	= NV_PIO_MASK,
553 		.mwdma_mask	= NV_MWDMA_MASK,
554 		.udma_mask	= NV_UDMA_MASK,
555 		.port_ops	= &nv_nf2_ops,
556 		.private_data	= NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
557 	},
558 	/* ck804 */
559 	{
560 		.flags		= ATA_FLAG_SATA,
561 		.pio_mask	= NV_PIO_MASK,
562 		.mwdma_mask	= NV_MWDMA_MASK,
563 		.udma_mask	= NV_UDMA_MASK,
564 		.port_ops	= &nv_ck804_ops,
565 		.private_data	= NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
566 	},
567 	/* ADMA */
568 	{
569 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NCQ,
570 		.pio_mask	= NV_PIO_MASK,
571 		.mwdma_mask	= NV_MWDMA_MASK,
572 		.udma_mask	= NV_UDMA_MASK,
573 		.port_ops	= &nv_adma_ops,
574 		.private_data	= NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
575 	},
576 	/* MCP5x */
577 	{
578 		.flags		= ATA_FLAG_SATA,
579 		.pio_mask	= NV_PIO_MASK,
580 		.mwdma_mask	= NV_MWDMA_MASK,
581 		.udma_mask	= NV_UDMA_MASK,
582 		.port_ops	= &nv_generic_ops,
583 		.private_data	= NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
584 	},
585 	/* SWNCQ */
586 	{
587 		.flags	        = ATA_FLAG_SATA | ATA_FLAG_NCQ,
588 		.pio_mask	= NV_PIO_MASK,
589 		.mwdma_mask	= NV_MWDMA_MASK,
590 		.udma_mask	= NV_UDMA_MASK,
591 		.port_ops	= &nv_swncq_ops,
592 		.private_data	= NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
593 	},
594 };
595 
596 MODULE_AUTHOR("NVIDIA");
597 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
598 MODULE_LICENSE("GPL");
599 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
600 MODULE_VERSION(DRV_VERSION);
601 
602 static int adma_enabled;
603 static int swncq_enabled = 1;
604 static int msi_enabled;
605 
606 static void nv_adma_register_mode(struct ata_port *ap)
607 {
608 	struct nv_adma_port_priv *pp = ap->private_data;
609 	void __iomem *mmio = pp->ctl_block;
610 	u16 tmp, status;
611 	int count = 0;
612 
613 	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
614 		return;
615 
616 	status = readw(mmio + NV_ADMA_STAT);
617 	while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
618 		ndelay(50);
619 		status = readw(mmio + NV_ADMA_STAT);
620 		count++;
621 	}
622 	if (count == 20)
623 		ata_port_printk(ap, KERN_WARNING,
624 			"timeout waiting for ADMA IDLE, stat=0x%hx\n",
625 			status);
626 
627 	tmp = readw(mmio + NV_ADMA_CTL);
628 	writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
629 
630 	count = 0;
631 	status = readw(mmio + NV_ADMA_STAT);
632 	while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
633 		ndelay(50);
634 		status = readw(mmio + NV_ADMA_STAT);
635 		count++;
636 	}
637 	if (count == 20)
638 		ata_port_printk(ap, KERN_WARNING,
639 			 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
640 			 status);
641 
642 	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
643 }
644 
645 static void nv_adma_mode(struct ata_port *ap)
646 {
647 	struct nv_adma_port_priv *pp = ap->private_data;
648 	void __iomem *mmio = pp->ctl_block;
649 	u16 tmp, status;
650 	int count = 0;
651 
652 	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
653 		return;
654 
655 	WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
656 
657 	tmp = readw(mmio + NV_ADMA_CTL);
658 	writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
659 
660 	status = readw(mmio + NV_ADMA_STAT);
661 	while (((status & NV_ADMA_STAT_LEGACY) ||
662 	      !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
663 		ndelay(50);
664 		status = readw(mmio + NV_ADMA_STAT);
665 		count++;
666 	}
667 	if (count == 20)
668 		ata_port_printk(ap, KERN_WARNING,
669 			"timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
670 			status);
671 
672 	pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
673 }
674 
675 static int nv_adma_slave_config(struct scsi_device *sdev)
676 {
677 	struct ata_port *ap = ata_shost_to_port(sdev->host);
678 	struct nv_adma_port_priv *pp = ap->private_data;
679 	struct nv_adma_port_priv *port0, *port1;
680 	struct scsi_device *sdev0, *sdev1;
681 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
682 	unsigned long segment_boundary, flags;
683 	unsigned short sg_tablesize;
684 	int rc;
685 	int adma_enable;
686 	u32 current_reg, new_reg, config_mask;
687 
688 	rc = ata_scsi_slave_config(sdev);
689 
690 	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
691 		/* Not a proper libata device, ignore */
692 		return rc;
693 
694 	spin_lock_irqsave(ap->lock, flags);
695 
696 	if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
697 		/*
698 		 * NVIDIA reports that ADMA mode does not support ATAPI commands.
699 		 * Therefore ATAPI commands are sent through the legacy interface.
700 		 * However, the legacy interface only supports 32-bit DMA.
701 		 * Restrict DMA parameters as required by the legacy interface
702 		 * when an ATAPI device is connected.
703 		 */
704 		segment_boundary = ATA_DMA_BOUNDARY;
705 		/* Subtract 1 since an extra entry may be needed for padding, see
706 		   libata-scsi.c */
707 		sg_tablesize = LIBATA_MAX_PRD - 1;
708 
709 		/* Since the legacy DMA engine is in use, we need to disable ADMA
710 		   on the port. */
711 		adma_enable = 0;
712 		nv_adma_register_mode(ap);
713 	} else {
714 		segment_boundary = NV_ADMA_DMA_BOUNDARY;
715 		sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
716 		adma_enable = 1;
717 	}
718 
719 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
720 
721 	if (ap->port_no == 1)
722 		config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
723 			      NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
724 	else
725 		config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
726 			      NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
727 
728 	if (adma_enable) {
729 		new_reg = current_reg | config_mask;
730 		pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
731 	} else {
732 		new_reg = current_reg & ~config_mask;
733 		pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
734 	}
735 
736 	if (current_reg != new_reg)
737 		pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
738 
739 	port0 = ap->host->ports[0]->private_data;
740 	port1 = ap->host->ports[1]->private_data;
741 	sdev0 = ap->host->ports[0]->link.device[0].sdev;
742 	sdev1 = ap->host->ports[1]->link.device[0].sdev;
743 	if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
744 	    (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
745 		/** We have to set the DMA mask to 32-bit if either port is in
746 		    ATAPI mode, since they are on the same PCI device which is
747 		    used for DMA mapping. If we set the mask we also need to set
748 		    the bounce limit on both ports to ensure that the block
749 		    layer doesn't feed addresses that cause DMA mapping to
750 		    choke. If either SCSI device is not allocated yet, it's OK
751 		    since that port will discover its correct setting when it
752 		    does get allocated.
753 		    Note: Setting 32-bit mask should not fail. */
754 		if (sdev0)
755 			blk_queue_bounce_limit(sdev0->request_queue,
756 					       ATA_DMA_MASK);
757 		if (sdev1)
758 			blk_queue_bounce_limit(sdev1->request_queue,
759 					       ATA_DMA_MASK);
760 
761 		pci_set_dma_mask(pdev, ATA_DMA_MASK);
762 	} else {
763 		/** This shouldn't fail as it was set to this value before */
764 		pci_set_dma_mask(pdev, pp->adma_dma_mask);
765 		if (sdev0)
766 			blk_queue_bounce_limit(sdev0->request_queue,
767 					       pp->adma_dma_mask);
768 		if (sdev1)
769 			blk_queue_bounce_limit(sdev1->request_queue,
770 					       pp->adma_dma_mask);
771 	}
772 
773 	blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
774 	blk_queue_max_segments(sdev->request_queue, sg_tablesize);
775 	ata_port_printk(ap, KERN_INFO,
776 		"DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
777 		(unsigned long long)*ap->host->dev->dma_mask,
778 		segment_boundary, sg_tablesize);
779 
780 	spin_unlock_irqrestore(ap->lock, flags);
781 
782 	return rc;
783 }
784 
785 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
786 {
787 	struct nv_adma_port_priv *pp = qc->ap->private_data;
788 	return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
789 }
790 
791 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
792 {
793 	/* Other than when internal or pass-through commands are executed,
794 	   the only time this function will be called in ADMA mode will be
795 	   if a command fails. In the failure case we don't care about going
796 	   into register mode with ADMA commands pending, as the commands will
797 	   all shortly be aborted anyway. We assume that NCQ commands are not
798 	   issued via passthrough, which is the only way that switching into
799 	   ADMA mode could abort outstanding commands. */
800 	nv_adma_register_mode(ap);
801 
802 	ata_sff_tf_read(ap, tf);
803 }
804 
805 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
806 {
807 	unsigned int idx = 0;
808 
809 	if (tf->flags & ATA_TFLAG_ISADDR) {
810 		if (tf->flags & ATA_TFLAG_LBA48) {
811 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR   << 8) | tf->hob_feature | WNB);
812 			cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
813 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAL  << 8) | tf->hob_lbal);
814 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAM  << 8) | tf->hob_lbam);
815 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAH  << 8) | tf->hob_lbah);
816 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature);
817 		} else
818 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature | WNB);
819 
820 		cpb[idx++] = cpu_to_le16((ATA_REG_NSECT  << 8) | tf->nsect);
821 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAL   << 8) | tf->lbal);
822 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAM   << 8) | tf->lbam);
823 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAH   << 8) | tf->lbah);
824 	}
825 
826 	if (tf->flags & ATA_TFLAG_DEVICE)
827 		cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
828 
829 	cpb[idx++] = cpu_to_le16((ATA_REG_CMD    << 8) | tf->command | CMDEND);
830 
831 	while (idx < 12)
832 		cpb[idx++] = cpu_to_le16(IGN);
833 
834 	return idx;
835 }
836 
837 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
838 {
839 	struct nv_adma_port_priv *pp = ap->private_data;
840 	u8 flags = pp->cpb[cpb_num].resp_flags;
841 
842 	VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
843 
844 	if (unlikely((force_err ||
845 		     flags & (NV_CPB_RESP_ATA_ERR |
846 			      NV_CPB_RESP_CMD_ERR |
847 			      NV_CPB_RESP_CPB_ERR)))) {
848 		struct ata_eh_info *ehi = &ap->link.eh_info;
849 		int freeze = 0;
850 
851 		ata_ehi_clear_desc(ehi);
852 		__ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
853 		if (flags & NV_CPB_RESP_ATA_ERR) {
854 			ata_ehi_push_desc(ehi, "ATA error");
855 			ehi->err_mask |= AC_ERR_DEV;
856 		} else if (flags & NV_CPB_RESP_CMD_ERR) {
857 			ata_ehi_push_desc(ehi, "CMD error");
858 			ehi->err_mask |= AC_ERR_DEV;
859 		} else if (flags & NV_CPB_RESP_CPB_ERR) {
860 			ata_ehi_push_desc(ehi, "CPB error");
861 			ehi->err_mask |= AC_ERR_SYSTEM;
862 			freeze = 1;
863 		} else {
864 			/* notifier error, but no error in CPB flags? */
865 			ata_ehi_push_desc(ehi, "unknown");
866 			ehi->err_mask |= AC_ERR_OTHER;
867 			freeze = 1;
868 		}
869 		/* Kill all commands. EH will determine what actually failed. */
870 		if (freeze)
871 			ata_port_freeze(ap);
872 		else
873 			ata_port_abort(ap);
874 		return -1;
875 	}
876 
877 	if (likely(flags & NV_CPB_RESP_DONE))
878 		return 1;
879 	return 0;
880 }
881 
882 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
883 {
884 	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
885 
886 	/* freeze if hotplugged */
887 	if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
888 		ata_port_freeze(ap);
889 		return 1;
890 	}
891 
892 	/* bail out if not our interrupt */
893 	if (!(irq_stat & NV_INT_DEV))
894 		return 0;
895 
896 	/* DEV interrupt w/ no active qc? */
897 	if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
898 		ata_sff_check_status(ap);
899 		return 1;
900 	}
901 
902 	/* handle interrupt */
903 	return ata_bmdma_port_intr(ap, qc);
904 }
905 
906 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
907 {
908 	struct ata_host *host = dev_instance;
909 	int i, handled = 0;
910 	u32 notifier_clears[2];
911 
912 	spin_lock(&host->lock);
913 
914 	for (i = 0; i < host->n_ports; i++) {
915 		struct ata_port *ap = host->ports[i];
916 		struct nv_adma_port_priv *pp = ap->private_data;
917 		void __iomem *mmio = pp->ctl_block;
918 		u16 status;
919 		u32 gen_ctl;
920 		u32 notifier, notifier_error;
921 
922 		notifier_clears[i] = 0;
923 
924 		/* if ADMA is disabled, use standard ata interrupt handler */
925 		if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
926 			u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
927 				>> (NV_INT_PORT_SHIFT * i);
928 			handled += nv_host_intr(ap, irq_stat);
929 			continue;
930 		}
931 
932 		/* if in ATA register mode, check for standard interrupts */
933 		if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
934 			u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
935 				>> (NV_INT_PORT_SHIFT * i);
936 			if (ata_tag_valid(ap->link.active_tag))
937 				/** NV_INT_DEV indication seems unreliable
938 				    at times at least in ADMA mode. Force it
939 				    on always when a command is active, to
940 				    prevent losing interrupts. */
941 				irq_stat |= NV_INT_DEV;
942 			handled += nv_host_intr(ap, irq_stat);
943 		}
944 
945 		notifier = readl(mmio + NV_ADMA_NOTIFIER);
946 		notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
947 		notifier_clears[i] = notifier | notifier_error;
948 
949 		gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
950 
951 		if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
952 		    !notifier_error)
953 			/* Nothing to do */
954 			continue;
955 
956 		status = readw(mmio + NV_ADMA_STAT);
957 
958 		/*
959 		 * Clear status. Ensure the controller sees the
960 		 * clearing before we start looking at any of the CPB
961 		 * statuses, so that any CPB completions after this
962 		 * point in the handler will raise another interrupt.
963 		 */
964 		writew(status, mmio + NV_ADMA_STAT);
965 		readw(mmio + NV_ADMA_STAT); /* flush posted write */
966 		rmb();
967 
968 		handled++; /* irq handled if we got here */
969 
970 		/* freeze if hotplugged or controller error */
971 		if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
972 				       NV_ADMA_STAT_HOTUNPLUG |
973 				       NV_ADMA_STAT_TIMEOUT |
974 				       NV_ADMA_STAT_SERROR))) {
975 			struct ata_eh_info *ehi = &ap->link.eh_info;
976 
977 			ata_ehi_clear_desc(ehi);
978 			__ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
979 			if (status & NV_ADMA_STAT_TIMEOUT) {
980 				ehi->err_mask |= AC_ERR_SYSTEM;
981 				ata_ehi_push_desc(ehi, "timeout");
982 			} else if (status & NV_ADMA_STAT_HOTPLUG) {
983 				ata_ehi_hotplugged(ehi);
984 				ata_ehi_push_desc(ehi, "hotplug");
985 			} else if (status & NV_ADMA_STAT_HOTUNPLUG) {
986 				ata_ehi_hotplugged(ehi);
987 				ata_ehi_push_desc(ehi, "hot unplug");
988 			} else if (status & NV_ADMA_STAT_SERROR) {
989 				/* let EH analyze SError and figure out cause */
990 				ata_ehi_push_desc(ehi, "SError");
991 			} else
992 				ata_ehi_push_desc(ehi, "unknown");
993 			ata_port_freeze(ap);
994 			continue;
995 		}
996 
997 		if (status & (NV_ADMA_STAT_DONE |
998 			      NV_ADMA_STAT_CPBERR |
999 			      NV_ADMA_STAT_CMD_COMPLETE)) {
1000 			u32 check_commands = notifier_clears[i];
1001 			u32 done_mask = 0;
1002 			int pos, rc;
1003 
1004 			if (status & NV_ADMA_STAT_CPBERR) {
1005 				/* check all active commands */
1006 				if (ata_tag_valid(ap->link.active_tag))
1007 					check_commands = 1 <<
1008 						ap->link.active_tag;
1009 				else
1010 					check_commands = ap->link.sactive;
1011 			}
1012 
1013 			/* check CPBs for completed commands */
1014 			while ((pos = ffs(check_commands))) {
1015 				pos--;
1016 				rc = nv_adma_check_cpb(ap, pos,
1017 						notifier_error & (1 << pos));
1018 				if (rc > 0)
1019 					done_mask |= 1 << pos;
1020 				else if (unlikely(rc < 0))
1021 					check_commands = 0;
1022 				check_commands &= ~(1 << pos);
1023 			}
1024 			ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
1025 		}
1026 	}
1027 
1028 	if (notifier_clears[0] || notifier_clears[1]) {
1029 		/* Note: Both notifier clear registers must be written
1030 		   if either is set, even if one is zero, according to NVIDIA. */
1031 		struct nv_adma_port_priv *pp = host->ports[0]->private_data;
1032 		writel(notifier_clears[0], pp->notifier_clear_block);
1033 		pp = host->ports[1]->private_data;
1034 		writel(notifier_clears[1], pp->notifier_clear_block);
1035 	}
1036 
1037 	spin_unlock(&host->lock);
1038 
1039 	return IRQ_RETVAL(handled);
1040 }
1041 
1042 static void nv_adma_freeze(struct ata_port *ap)
1043 {
1044 	struct nv_adma_port_priv *pp = ap->private_data;
1045 	void __iomem *mmio = pp->ctl_block;
1046 	u16 tmp;
1047 
1048 	nv_ck804_freeze(ap);
1049 
1050 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1051 		return;
1052 
1053 	/* clear any outstanding CK804 notifications */
1054 	writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1055 		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1056 
1057 	/* Disable interrupt */
1058 	tmp = readw(mmio + NV_ADMA_CTL);
1059 	writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1060 		mmio + NV_ADMA_CTL);
1061 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1062 }
1063 
1064 static void nv_adma_thaw(struct ata_port *ap)
1065 {
1066 	struct nv_adma_port_priv *pp = ap->private_data;
1067 	void __iomem *mmio = pp->ctl_block;
1068 	u16 tmp;
1069 
1070 	nv_ck804_thaw(ap);
1071 
1072 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1073 		return;
1074 
1075 	/* Enable interrupt */
1076 	tmp = readw(mmio + NV_ADMA_CTL);
1077 	writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1078 		mmio + NV_ADMA_CTL);
1079 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1080 }
1081 
1082 static void nv_adma_irq_clear(struct ata_port *ap)
1083 {
1084 	struct nv_adma_port_priv *pp = ap->private_data;
1085 	void __iomem *mmio = pp->ctl_block;
1086 	u32 notifier_clears[2];
1087 
1088 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1089 		ata_bmdma_irq_clear(ap);
1090 		return;
1091 	}
1092 
1093 	/* clear any outstanding CK804 notifications */
1094 	writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1095 		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1096 
1097 	/* clear ADMA status */
1098 	writew(0xffff, mmio + NV_ADMA_STAT);
1099 
1100 	/* clear notifiers - note both ports need to be written with
1101 	   something even though we are only clearing on one */
1102 	if (ap->port_no == 0) {
1103 		notifier_clears[0] = 0xFFFFFFFF;
1104 		notifier_clears[1] = 0;
1105 	} else {
1106 		notifier_clears[0] = 0;
1107 		notifier_clears[1] = 0xFFFFFFFF;
1108 	}
1109 	pp = ap->host->ports[0]->private_data;
1110 	writel(notifier_clears[0], pp->notifier_clear_block);
1111 	pp = ap->host->ports[1]->private_data;
1112 	writel(notifier_clears[1], pp->notifier_clear_block);
1113 }
1114 
1115 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1116 {
1117 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1118 
1119 	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1120 		ata_bmdma_post_internal_cmd(qc);
1121 }
1122 
1123 static int nv_adma_port_start(struct ata_port *ap)
1124 {
1125 	struct device *dev = ap->host->dev;
1126 	struct nv_adma_port_priv *pp;
1127 	int rc;
1128 	void *mem;
1129 	dma_addr_t mem_dma;
1130 	void __iomem *mmio;
1131 	struct pci_dev *pdev = to_pci_dev(dev);
1132 	u16 tmp;
1133 
1134 	VPRINTK("ENTER\n");
1135 
1136 	/* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1137 	   pad buffers */
1138 	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1139 	if (rc)
1140 		return rc;
1141 	rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1142 	if (rc)
1143 		return rc;
1144 
1145 	/* we might fallback to bmdma, allocate bmdma resources */
1146 	rc = ata_bmdma_port_start(ap);
1147 	if (rc)
1148 		return rc;
1149 
1150 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1151 	if (!pp)
1152 		return -ENOMEM;
1153 
1154 	mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1155 	       ap->port_no * NV_ADMA_PORT_SIZE;
1156 	pp->ctl_block = mmio;
1157 	pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1158 	pp->notifier_clear_block = pp->gen_block +
1159 	       NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1160 
1161 	/* Now that the legacy PRD and padding buffer are allocated we can
1162 	   safely raise the DMA mask to allocate the CPB/APRD table.
1163 	   These are allowed to fail since we store the value that ends up
1164 	   being used to set as the bounce limit in slave_config later if
1165 	   needed. */
1166 	pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1167 	pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1168 	pp->adma_dma_mask = *dev->dma_mask;
1169 
1170 	mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1171 				  &mem_dma, GFP_KERNEL);
1172 	if (!mem)
1173 		return -ENOMEM;
1174 	memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1175 
1176 	/*
1177 	 * First item in chunk of DMA memory:
1178 	 * 128-byte command parameter block (CPB)
1179 	 * one for each command tag
1180 	 */
1181 	pp->cpb     = mem;
1182 	pp->cpb_dma = mem_dma;
1183 
1184 	writel(mem_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
1185 	writel((mem_dma >> 16) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
1186 
1187 	mem     += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1188 	mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1189 
1190 	/*
1191 	 * Second item: block of ADMA_SGTBL_LEN s/g entries
1192 	 */
1193 	pp->aprd = mem;
1194 	pp->aprd_dma = mem_dma;
1195 
1196 	ap->private_data = pp;
1197 
1198 	/* clear any outstanding interrupt conditions */
1199 	writew(0xffff, mmio + NV_ADMA_STAT);
1200 
1201 	/* initialize port variables */
1202 	pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1203 
1204 	/* clear CPB fetch count */
1205 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1206 
1207 	/* clear GO for register mode, enable interrupt */
1208 	tmp = readw(mmio + NV_ADMA_CTL);
1209 	writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1210 		NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1211 
1212 	tmp = readw(mmio + NV_ADMA_CTL);
1213 	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1214 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1215 	udelay(1);
1216 	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1217 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1218 
1219 	return 0;
1220 }
1221 
1222 static void nv_adma_port_stop(struct ata_port *ap)
1223 {
1224 	struct nv_adma_port_priv *pp = ap->private_data;
1225 	void __iomem *mmio = pp->ctl_block;
1226 
1227 	VPRINTK("ENTER\n");
1228 	writew(0, mmio + NV_ADMA_CTL);
1229 }
1230 
1231 #ifdef CONFIG_PM
1232 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1233 {
1234 	struct nv_adma_port_priv *pp = ap->private_data;
1235 	void __iomem *mmio = pp->ctl_block;
1236 
1237 	/* Go to register mode - clears GO */
1238 	nv_adma_register_mode(ap);
1239 
1240 	/* clear CPB fetch count */
1241 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1242 
1243 	/* disable interrupt, shut down port */
1244 	writew(0, mmio + NV_ADMA_CTL);
1245 
1246 	return 0;
1247 }
1248 
1249 static int nv_adma_port_resume(struct ata_port *ap)
1250 {
1251 	struct nv_adma_port_priv *pp = ap->private_data;
1252 	void __iomem *mmio = pp->ctl_block;
1253 	u16 tmp;
1254 
1255 	/* set CPB block location */
1256 	writel(pp->cpb_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
1257 	writel((pp->cpb_dma >> 16) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
1258 
1259 	/* clear any outstanding interrupt conditions */
1260 	writew(0xffff, mmio + NV_ADMA_STAT);
1261 
1262 	/* initialize port variables */
1263 	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1264 
1265 	/* clear CPB fetch count */
1266 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1267 
1268 	/* clear GO for register mode, enable interrupt */
1269 	tmp = readw(mmio + NV_ADMA_CTL);
1270 	writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1271 		NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1272 
1273 	tmp = readw(mmio + NV_ADMA_CTL);
1274 	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1275 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1276 	udelay(1);
1277 	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1278 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1279 
1280 	return 0;
1281 }
1282 #endif
1283 
1284 static void nv_adma_setup_port(struct ata_port *ap)
1285 {
1286 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1287 	struct ata_ioports *ioport = &ap->ioaddr;
1288 
1289 	VPRINTK("ENTER\n");
1290 
1291 	mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1292 
1293 	ioport->cmd_addr	= mmio;
1294 	ioport->data_addr	= mmio + (ATA_REG_DATA * 4);
1295 	ioport->error_addr	=
1296 	ioport->feature_addr	= mmio + (ATA_REG_ERR * 4);
1297 	ioport->nsect_addr	= mmio + (ATA_REG_NSECT * 4);
1298 	ioport->lbal_addr	= mmio + (ATA_REG_LBAL * 4);
1299 	ioport->lbam_addr	= mmio + (ATA_REG_LBAM * 4);
1300 	ioport->lbah_addr	= mmio + (ATA_REG_LBAH * 4);
1301 	ioport->device_addr	= mmio + (ATA_REG_DEVICE * 4);
1302 	ioport->status_addr	=
1303 	ioport->command_addr	= mmio + (ATA_REG_STATUS * 4);
1304 	ioport->altstatus_addr	=
1305 	ioport->ctl_addr	= mmio + 0x20;
1306 }
1307 
1308 static int nv_adma_host_init(struct ata_host *host)
1309 {
1310 	struct pci_dev *pdev = to_pci_dev(host->dev);
1311 	unsigned int i;
1312 	u32 tmp32;
1313 
1314 	VPRINTK("ENTER\n");
1315 
1316 	/* enable ADMA on the ports */
1317 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1318 	tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1319 		 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1320 		 NV_MCP_SATA_CFG_20_PORT1_EN |
1321 		 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1322 
1323 	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1324 
1325 	for (i = 0; i < host->n_ports; i++)
1326 		nv_adma_setup_port(host->ports[i]);
1327 
1328 	return 0;
1329 }
1330 
1331 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1332 			      struct scatterlist *sg,
1333 			      int idx,
1334 			      struct nv_adma_prd *aprd)
1335 {
1336 	u8 flags = 0;
1337 	if (qc->tf.flags & ATA_TFLAG_WRITE)
1338 		flags |= NV_APRD_WRITE;
1339 	if (idx == qc->n_elem - 1)
1340 		flags |= NV_APRD_END;
1341 	else if (idx != 4)
1342 		flags |= NV_APRD_CONT;
1343 
1344 	aprd->addr  = cpu_to_le64(((u64)sg_dma_address(sg)));
1345 	aprd->len   = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1346 	aprd->flags = flags;
1347 	aprd->packet_len = 0;
1348 }
1349 
1350 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1351 {
1352 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1353 	struct nv_adma_prd *aprd;
1354 	struct scatterlist *sg;
1355 	unsigned int si;
1356 
1357 	VPRINTK("ENTER\n");
1358 
1359 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
1360 		aprd = (si < 5) ? &cpb->aprd[si] :
1361 			       &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)];
1362 		nv_adma_fill_aprd(qc, sg, si, aprd);
1363 	}
1364 	if (si > 5)
1365 		cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1366 	else
1367 		cpb->next_aprd = cpu_to_le64(0);
1368 }
1369 
1370 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1371 {
1372 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1373 
1374 	/* ADMA engine can only be used for non-ATAPI DMA commands,
1375 	   or interrupt-driven no-data commands. */
1376 	if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1377 	   (qc->tf.flags & ATA_TFLAG_POLLING))
1378 		return 1;
1379 
1380 	if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1381 	   (qc->tf.protocol == ATA_PROT_NODATA))
1382 		return 0;
1383 
1384 	return 1;
1385 }
1386 
1387 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1388 {
1389 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1390 	struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1391 	u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1392 		       NV_CPB_CTL_IEN;
1393 
1394 	if (nv_adma_use_reg_mode(qc)) {
1395 		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1396 			(qc->flags & ATA_QCFLAG_DMAMAP));
1397 		nv_adma_register_mode(qc->ap);
1398 		ata_bmdma_qc_prep(qc);
1399 		return;
1400 	}
1401 
1402 	cpb->resp_flags = NV_CPB_RESP_DONE;
1403 	wmb();
1404 	cpb->ctl_flags = 0;
1405 	wmb();
1406 
1407 	cpb->len		= 3;
1408 	cpb->tag		= qc->tag;
1409 	cpb->next_cpb_idx	= 0;
1410 
1411 	/* turn on NCQ flags for NCQ commands */
1412 	if (qc->tf.protocol == ATA_PROT_NCQ)
1413 		ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1414 
1415 	VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1416 
1417 	nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1418 
1419 	if (qc->flags & ATA_QCFLAG_DMAMAP) {
1420 		nv_adma_fill_sg(qc, cpb);
1421 		ctl_flags |= NV_CPB_CTL_APRD_VALID;
1422 	} else
1423 		memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1424 
1425 	/* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1426 	   until we are finished filling in all of the contents */
1427 	wmb();
1428 	cpb->ctl_flags = ctl_flags;
1429 	wmb();
1430 	cpb->resp_flags = 0;
1431 }
1432 
1433 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1434 {
1435 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1436 	void __iomem *mmio = pp->ctl_block;
1437 	int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1438 
1439 	VPRINTK("ENTER\n");
1440 
1441 	/* We can't handle result taskfile with NCQ commands, since
1442 	   retrieving the taskfile switches us out of ADMA mode and would abort
1443 	   existing commands. */
1444 	if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1445 		     (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1446 		ata_dev_printk(qc->dev, KERN_ERR,
1447 			"NCQ w/ RESULT_TF not allowed\n");
1448 		return AC_ERR_SYSTEM;
1449 	}
1450 
1451 	if (nv_adma_use_reg_mode(qc)) {
1452 		/* use ATA register mode */
1453 		VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1454 		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1455 			(qc->flags & ATA_QCFLAG_DMAMAP));
1456 		nv_adma_register_mode(qc->ap);
1457 		return ata_bmdma_qc_issue(qc);
1458 	} else
1459 		nv_adma_mode(qc->ap);
1460 
1461 	/* write append register, command tag in lower 8 bits
1462 	   and (number of cpbs to append -1) in top 8 bits */
1463 	wmb();
1464 
1465 	if (curr_ncq != pp->last_issue_ncq) {
1466 		/* Seems to need some delay before switching between NCQ and
1467 		   non-NCQ commands, else we get command timeouts and such. */
1468 		udelay(20);
1469 		pp->last_issue_ncq = curr_ncq;
1470 	}
1471 
1472 	writew(qc->tag, mmio + NV_ADMA_APPEND);
1473 
1474 	DPRINTK("Issued tag %u\n", qc->tag);
1475 
1476 	return 0;
1477 }
1478 
1479 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1480 {
1481 	struct ata_host *host = dev_instance;
1482 	unsigned int i;
1483 	unsigned int handled = 0;
1484 	unsigned long flags;
1485 
1486 	spin_lock_irqsave(&host->lock, flags);
1487 
1488 	for (i = 0; i < host->n_ports; i++) {
1489 		struct ata_port *ap = host->ports[i];
1490 		struct ata_queued_cmd *qc;
1491 
1492 		qc = ata_qc_from_tag(ap, ap->link.active_tag);
1493 		if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
1494 			handled += ata_bmdma_port_intr(ap, qc);
1495 		} else {
1496 			/*
1497 			 * No request pending?  Clear interrupt status
1498 			 * anyway, in case there's one pending.
1499 			 */
1500 			ap->ops->sff_check_status(ap);
1501 		}
1502 	}
1503 
1504 	spin_unlock_irqrestore(&host->lock, flags);
1505 
1506 	return IRQ_RETVAL(handled);
1507 }
1508 
1509 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1510 {
1511 	int i, handled = 0;
1512 
1513 	for (i = 0; i < host->n_ports; i++) {
1514 		handled += nv_host_intr(host->ports[i], irq_stat);
1515 		irq_stat >>= NV_INT_PORT_SHIFT;
1516 	}
1517 
1518 	return IRQ_RETVAL(handled);
1519 }
1520 
1521 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1522 {
1523 	struct ata_host *host = dev_instance;
1524 	u8 irq_stat;
1525 	irqreturn_t ret;
1526 
1527 	spin_lock(&host->lock);
1528 	irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1529 	ret = nv_do_interrupt(host, irq_stat);
1530 	spin_unlock(&host->lock);
1531 
1532 	return ret;
1533 }
1534 
1535 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1536 {
1537 	struct ata_host *host = dev_instance;
1538 	u8 irq_stat;
1539 	irqreturn_t ret;
1540 
1541 	spin_lock(&host->lock);
1542 	irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1543 	ret = nv_do_interrupt(host, irq_stat);
1544 	spin_unlock(&host->lock);
1545 
1546 	return ret;
1547 }
1548 
1549 static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1550 {
1551 	if (sc_reg > SCR_CONTROL)
1552 		return -EINVAL;
1553 
1554 	*val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
1555 	return 0;
1556 }
1557 
1558 static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1559 {
1560 	if (sc_reg > SCR_CONTROL)
1561 		return -EINVAL;
1562 
1563 	iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
1564 	return 0;
1565 }
1566 
1567 static int nv_hardreset(struct ata_link *link, unsigned int *class,
1568 			unsigned long deadline)
1569 {
1570 	struct ata_eh_context *ehc = &link->eh_context;
1571 
1572 	/* Do hardreset iff it's post-boot probing, please read the
1573 	 * comment above port ops for details.
1574 	 */
1575 	if (!(link->ap->pflags & ATA_PFLAG_LOADING) &&
1576 	    !ata_dev_enabled(link->device))
1577 		sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1578 				    NULL, NULL);
1579 	else {
1580 		const unsigned long *timing = sata_ehc_deb_timing(ehc);
1581 		int rc;
1582 
1583 		if (!(ehc->i.flags & ATA_EHI_QUIET))
1584 			ata_link_printk(link, KERN_INFO, "nv: skipping "
1585 					"hardreset on occupied port\n");
1586 
1587 		/* make sure the link is online */
1588 		rc = sata_link_resume(link, timing, deadline);
1589 		/* whine about phy resume failure but proceed */
1590 		if (rc && rc != -EOPNOTSUPP)
1591 			ata_link_printk(link, KERN_WARNING, "failed to resume "
1592 					"link (errno=%d)\n", rc);
1593 	}
1594 
1595 	/* device signature acquisition is unreliable */
1596 	return -EAGAIN;
1597 }
1598 
1599 static void nv_nf2_freeze(struct ata_port *ap)
1600 {
1601 	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1602 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1603 	u8 mask;
1604 
1605 	mask = ioread8(scr_addr + NV_INT_ENABLE);
1606 	mask &= ~(NV_INT_ALL << shift);
1607 	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1608 }
1609 
1610 static void nv_nf2_thaw(struct ata_port *ap)
1611 {
1612 	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1613 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1614 	u8 mask;
1615 
1616 	iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1617 
1618 	mask = ioread8(scr_addr + NV_INT_ENABLE);
1619 	mask |= (NV_INT_MASK << shift);
1620 	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1621 }
1622 
1623 static void nv_ck804_freeze(struct ata_port *ap)
1624 {
1625 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1626 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1627 	u8 mask;
1628 
1629 	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1630 	mask &= ~(NV_INT_ALL << shift);
1631 	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1632 }
1633 
1634 static void nv_ck804_thaw(struct ata_port *ap)
1635 {
1636 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1637 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1638 	u8 mask;
1639 
1640 	writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1641 
1642 	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1643 	mask |= (NV_INT_MASK << shift);
1644 	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1645 }
1646 
1647 static void nv_mcp55_freeze(struct ata_port *ap)
1648 {
1649 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1650 	int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1651 	u32 mask;
1652 
1653 	writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1654 
1655 	mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1656 	mask &= ~(NV_INT_ALL_MCP55 << shift);
1657 	writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1658 }
1659 
1660 static void nv_mcp55_thaw(struct ata_port *ap)
1661 {
1662 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1663 	int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1664 	u32 mask;
1665 
1666 	writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1667 
1668 	mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1669 	mask |= (NV_INT_MASK_MCP55 << shift);
1670 	writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1671 }
1672 
1673 static void nv_adma_error_handler(struct ata_port *ap)
1674 {
1675 	struct nv_adma_port_priv *pp = ap->private_data;
1676 	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1677 		void __iomem *mmio = pp->ctl_block;
1678 		int i;
1679 		u16 tmp;
1680 
1681 		if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1682 			u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1683 			u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1684 			u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1685 			u32 status = readw(mmio + NV_ADMA_STAT);
1686 			u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1687 			u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1688 
1689 			ata_port_printk(ap, KERN_ERR,
1690 				"EH in ADMA mode, notifier 0x%X "
1691 				"notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1692 				"next cpb count 0x%X next cpb idx 0x%x\n",
1693 				notifier, notifier_error, gen_ctl, status,
1694 				cpb_count, next_cpb_idx);
1695 
1696 			for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
1697 				struct nv_adma_cpb *cpb = &pp->cpb[i];
1698 				if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1699 				    ap->link.sactive & (1 << i))
1700 					ata_port_printk(ap, KERN_ERR,
1701 						"CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1702 						i, cpb->ctl_flags, cpb->resp_flags);
1703 			}
1704 		}
1705 
1706 		/* Push us back into port register mode for error handling. */
1707 		nv_adma_register_mode(ap);
1708 
1709 		/* Mark all of the CPBs as invalid to prevent them from
1710 		   being executed */
1711 		for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1712 			pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1713 
1714 		/* clear CPB fetch count */
1715 		writew(0, mmio + NV_ADMA_CPB_COUNT);
1716 
1717 		/* Reset channel */
1718 		tmp = readw(mmio + NV_ADMA_CTL);
1719 		writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1720 		readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1721 		udelay(1);
1722 		writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1723 		readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1724 	}
1725 
1726 	ata_bmdma_error_handler(ap);
1727 }
1728 
1729 static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1730 {
1731 	struct nv_swncq_port_priv *pp = ap->private_data;
1732 	struct defer_queue *dq = &pp->defer_queue;
1733 
1734 	/* queue is full */
1735 	WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1736 	dq->defer_bits |= (1 << qc->tag);
1737 	dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
1738 }
1739 
1740 static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1741 {
1742 	struct nv_swncq_port_priv *pp = ap->private_data;
1743 	struct defer_queue *dq = &pp->defer_queue;
1744 	unsigned int tag;
1745 
1746 	if (dq->head == dq->tail)	/* null queue */
1747 		return NULL;
1748 
1749 	tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1750 	dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1751 	WARN_ON(!(dq->defer_bits & (1 << tag)));
1752 	dq->defer_bits &= ~(1 << tag);
1753 
1754 	return ata_qc_from_tag(ap, tag);
1755 }
1756 
1757 static void nv_swncq_fis_reinit(struct ata_port *ap)
1758 {
1759 	struct nv_swncq_port_priv *pp = ap->private_data;
1760 
1761 	pp->dhfis_bits = 0;
1762 	pp->dmafis_bits = 0;
1763 	pp->sdbfis_bits = 0;
1764 	pp->ncq_flags = 0;
1765 }
1766 
1767 static void nv_swncq_pp_reinit(struct ata_port *ap)
1768 {
1769 	struct nv_swncq_port_priv *pp = ap->private_data;
1770 	struct defer_queue *dq = &pp->defer_queue;
1771 
1772 	dq->head = 0;
1773 	dq->tail = 0;
1774 	dq->defer_bits = 0;
1775 	pp->qc_active = 0;
1776 	pp->last_issue_tag = ATA_TAG_POISON;
1777 	nv_swncq_fis_reinit(ap);
1778 }
1779 
1780 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1781 {
1782 	struct nv_swncq_port_priv *pp = ap->private_data;
1783 
1784 	writew(fis, pp->irq_block);
1785 }
1786 
1787 static void __ata_bmdma_stop(struct ata_port *ap)
1788 {
1789 	struct ata_queued_cmd qc;
1790 
1791 	qc.ap = ap;
1792 	ata_bmdma_stop(&qc);
1793 }
1794 
1795 static void nv_swncq_ncq_stop(struct ata_port *ap)
1796 {
1797 	struct nv_swncq_port_priv *pp = ap->private_data;
1798 	unsigned int i;
1799 	u32 sactive;
1800 	u32 done_mask;
1801 
1802 	ata_port_printk(ap, KERN_ERR,
1803 			"EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1804 			ap->qc_active, ap->link.sactive);
1805 	ata_port_printk(ap, KERN_ERR,
1806 		"SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n  "
1807 		"dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1808 		pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1809 		pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1810 
1811 	ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n",
1812 			ap->ops->sff_check_status(ap),
1813 			ioread8(ap->ioaddr.error_addr));
1814 
1815 	sactive = readl(pp->sactive_block);
1816 	done_mask = pp->qc_active ^ sactive;
1817 
1818 	ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n");
1819 	for (i = 0; i < ATA_MAX_QUEUE; i++) {
1820 		u8 err = 0;
1821 		if (pp->qc_active & (1 << i))
1822 			err = 0;
1823 		else if (done_mask & (1 << i))
1824 			err = 1;
1825 		else
1826 			continue;
1827 
1828 		ata_port_printk(ap, KERN_ERR,
1829 				"tag 0x%x: %01x %01x %01x %01x %s\n", i,
1830 				(pp->dhfis_bits >> i) & 0x1,
1831 				(pp->dmafis_bits >> i) & 0x1,
1832 				(pp->sdbfis_bits >> i) & 0x1,
1833 				(sactive >> i) & 0x1,
1834 				(err ? "error! tag doesn't exit" : " "));
1835 	}
1836 
1837 	nv_swncq_pp_reinit(ap);
1838 	ap->ops->sff_irq_clear(ap);
1839 	__ata_bmdma_stop(ap);
1840 	nv_swncq_irq_clear(ap, 0xffff);
1841 }
1842 
1843 static void nv_swncq_error_handler(struct ata_port *ap)
1844 {
1845 	struct ata_eh_context *ehc = &ap->link.eh_context;
1846 
1847 	if (ap->link.sactive) {
1848 		nv_swncq_ncq_stop(ap);
1849 		ehc->i.action |= ATA_EH_RESET;
1850 	}
1851 
1852 	ata_bmdma_error_handler(ap);
1853 }
1854 
1855 #ifdef CONFIG_PM
1856 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1857 {
1858 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1859 	u32 tmp;
1860 
1861 	/* clear irq */
1862 	writel(~0, mmio + NV_INT_STATUS_MCP55);
1863 
1864 	/* disable irq */
1865 	writel(0, mmio + NV_INT_ENABLE_MCP55);
1866 
1867 	/* disable swncq */
1868 	tmp = readl(mmio + NV_CTL_MCP55);
1869 	tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1870 	writel(tmp, mmio + NV_CTL_MCP55);
1871 
1872 	return 0;
1873 }
1874 
1875 static int nv_swncq_port_resume(struct ata_port *ap)
1876 {
1877 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1878 	u32 tmp;
1879 
1880 	/* clear irq */
1881 	writel(~0, mmio + NV_INT_STATUS_MCP55);
1882 
1883 	/* enable irq */
1884 	writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1885 
1886 	/* enable swncq */
1887 	tmp = readl(mmio + NV_CTL_MCP55);
1888 	writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1889 
1890 	return 0;
1891 }
1892 #endif
1893 
1894 static void nv_swncq_host_init(struct ata_host *host)
1895 {
1896 	u32 tmp;
1897 	void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1898 	struct pci_dev *pdev = to_pci_dev(host->dev);
1899 	u8 regval;
1900 
1901 	/* disable  ECO 398 */
1902 	pci_read_config_byte(pdev, 0x7f, &regval);
1903 	regval &= ~(1 << 7);
1904 	pci_write_config_byte(pdev, 0x7f, regval);
1905 
1906 	/* enable swncq */
1907 	tmp = readl(mmio + NV_CTL_MCP55);
1908 	VPRINTK("HOST_CTL:0x%X\n", tmp);
1909 	writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1910 
1911 	/* enable irq intr */
1912 	tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1913 	VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1914 	writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1915 
1916 	/*  clear port irq */
1917 	writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1918 }
1919 
1920 static int nv_swncq_slave_config(struct scsi_device *sdev)
1921 {
1922 	struct ata_port *ap = ata_shost_to_port(sdev->host);
1923 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1924 	struct ata_device *dev;
1925 	int rc;
1926 	u8 rev;
1927 	u8 check_maxtor = 0;
1928 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
1929 
1930 	rc = ata_scsi_slave_config(sdev);
1931 	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1932 		/* Not a proper libata device, ignore */
1933 		return rc;
1934 
1935 	dev = &ap->link.device[sdev->id];
1936 	if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1937 		return rc;
1938 
1939 	/* if MCP51 and Maxtor, then disable ncq */
1940 	if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1941 		pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1942 		check_maxtor = 1;
1943 
1944 	/* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1945 	if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1946 		pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1947 		pci_read_config_byte(pdev, 0x8, &rev);
1948 		if (rev <= 0xa2)
1949 			check_maxtor = 1;
1950 	}
1951 
1952 	if (!check_maxtor)
1953 		return rc;
1954 
1955 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1956 
1957 	if (strncmp(model_num, "Maxtor", 6) == 0) {
1958 		ata_scsi_change_queue_depth(sdev, 1, SCSI_QDEPTH_DEFAULT);
1959 		ata_dev_printk(dev, KERN_NOTICE,
1960 			"Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth);
1961 	}
1962 
1963 	return rc;
1964 }
1965 
1966 static int nv_swncq_port_start(struct ata_port *ap)
1967 {
1968 	struct device *dev = ap->host->dev;
1969 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1970 	struct nv_swncq_port_priv *pp;
1971 	int rc;
1972 
1973 	/* we might fallback to bmdma, allocate bmdma resources */
1974 	rc = ata_bmdma_port_start(ap);
1975 	if (rc)
1976 		return rc;
1977 
1978 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1979 	if (!pp)
1980 		return -ENOMEM;
1981 
1982 	pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1983 				      &pp->prd_dma, GFP_KERNEL);
1984 	if (!pp->prd)
1985 		return -ENOMEM;
1986 	memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
1987 
1988 	ap->private_data = pp;
1989 	pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1990 	pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1991 	pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1992 
1993 	return 0;
1994 }
1995 
1996 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1997 {
1998 	if (qc->tf.protocol != ATA_PROT_NCQ) {
1999 		ata_bmdma_qc_prep(qc);
2000 		return;
2001 	}
2002 
2003 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2004 		return;
2005 
2006 	nv_swncq_fill_sg(qc);
2007 }
2008 
2009 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
2010 {
2011 	struct ata_port *ap = qc->ap;
2012 	struct scatterlist *sg;
2013 	struct nv_swncq_port_priv *pp = ap->private_data;
2014 	struct ata_bmdma_prd *prd;
2015 	unsigned int si, idx;
2016 
2017 	prd = pp->prd + ATA_MAX_PRD * qc->tag;
2018 
2019 	idx = 0;
2020 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
2021 		u32 addr, offset;
2022 		u32 sg_len, len;
2023 
2024 		addr = (u32)sg_dma_address(sg);
2025 		sg_len = sg_dma_len(sg);
2026 
2027 		while (sg_len) {
2028 			offset = addr & 0xffff;
2029 			len = sg_len;
2030 			if ((offset + sg_len) > 0x10000)
2031 				len = 0x10000 - offset;
2032 
2033 			prd[idx].addr = cpu_to_le32(addr);
2034 			prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2035 
2036 			idx++;
2037 			sg_len -= len;
2038 			addr += len;
2039 		}
2040 	}
2041 
2042 	prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2043 }
2044 
2045 static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2046 					  struct ata_queued_cmd *qc)
2047 {
2048 	struct nv_swncq_port_priv *pp = ap->private_data;
2049 
2050 	if (qc == NULL)
2051 		return 0;
2052 
2053 	DPRINTK("Enter\n");
2054 
2055 	writel((1 << qc->tag), pp->sactive_block);
2056 	pp->last_issue_tag = qc->tag;
2057 	pp->dhfis_bits &= ~(1 << qc->tag);
2058 	pp->dmafis_bits &= ~(1 << qc->tag);
2059 	pp->qc_active |= (0x1 << qc->tag);
2060 
2061 	ap->ops->sff_tf_load(ap, &qc->tf);	 /* load tf registers */
2062 	ap->ops->sff_exec_command(ap, &qc->tf);
2063 
2064 	DPRINTK("Issued tag %u\n", qc->tag);
2065 
2066 	return 0;
2067 }
2068 
2069 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2070 {
2071 	struct ata_port *ap = qc->ap;
2072 	struct nv_swncq_port_priv *pp = ap->private_data;
2073 
2074 	if (qc->tf.protocol != ATA_PROT_NCQ)
2075 		return ata_bmdma_qc_issue(qc);
2076 
2077 	DPRINTK("Enter\n");
2078 
2079 	if (!pp->qc_active)
2080 		nv_swncq_issue_atacmd(ap, qc);
2081 	else
2082 		nv_swncq_qc_to_dq(ap, qc);	/* add qc to defer queue */
2083 
2084 	return 0;
2085 }
2086 
2087 static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2088 {
2089 	u32 serror;
2090 	struct ata_eh_info *ehi = &ap->link.eh_info;
2091 
2092 	ata_ehi_clear_desc(ehi);
2093 
2094 	/* AHCI needs SError cleared; otherwise, it might lock up */
2095 	sata_scr_read(&ap->link, SCR_ERROR, &serror);
2096 	sata_scr_write(&ap->link, SCR_ERROR, serror);
2097 
2098 	/* analyze @irq_stat */
2099 	if (fis & NV_SWNCQ_IRQ_ADDED)
2100 		ata_ehi_push_desc(ehi, "hot plug");
2101 	else if (fis & NV_SWNCQ_IRQ_REMOVED)
2102 		ata_ehi_push_desc(ehi, "hot unplug");
2103 
2104 	ata_ehi_hotplugged(ehi);
2105 
2106 	/* okay, let's hand over to EH */
2107 	ehi->serror |= serror;
2108 
2109 	ata_port_freeze(ap);
2110 }
2111 
2112 static int nv_swncq_sdbfis(struct ata_port *ap)
2113 {
2114 	struct ata_queued_cmd *qc;
2115 	struct nv_swncq_port_priv *pp = ap->private_data;
2116 	struct ata_eh_info *ehi = &ap->link.eh_info;
2117 	u32 sactive;
2118 	u32 done_mask;
2119 	u8 host_stat;
2120 	u8 lack_dhfis = 0;
2121 
2122 	host_stat = ap->ops->bmdma_status(ap);
2123 	if (unlikely(host_stat & ATA_DMA_ERR)) {
2124 		/* error when transferring data to/from memory */
2125 		ata_ehi_clear_desc(ehi);
2126 		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2127 		ehi->err_mask |= AC_ERR_HOST_BUS;
2128 		ehi->action |= ATA_EH_RESET;
2129 		return -EINVAL;
2130 	}
2131 
2132 	ap->ops->sff_irq_clear(ap);
2133 	__ata_bmdma_stop(ap);
2134 
2135 	sactive = readl(pp->sactive_block);
2136 	done_mask = pp->qc_active ^ sactive;
2137 
2138 	pp->qc_active &= ~done_mask;
2139 	pp->dhfis_bits &= ~done_mask;
2140 	pp->dmafis_bits &= ~done_mask;
2141 	pp->sdbfis_bits |= done_mask;
2142 	ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
2143 
2144 	if (!ap->qc_active) {
2145 		DPRINTK("over\n");
2146 		nv_swncq_pp_reinit(ap);
2147 		return 0;
2148 	}
2149 
2150 	if (pp->qc_active & pp->dhfis_bits)
2151 		return 0;
2152 
2153 	if ((pp->ncq_flags & ncq_saw_backout) ||
2154 	    (pp->qc_active ^ pp->dhfis_bits))
2155 		/* if the controller can't get a device to host register FIS,
2156 		 * The driver needs to reissue the new command.
2157 		 */
2158 		lack_dhfis = 1;
2159 
2160 	DPRINTK("id 0x%x QC: qc_active 0x%x,"
2161 		"SWNCQ:qc_active 0x%X defer_bits %X "
2162 		"dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2163 		ap->print_id, ap->qc_active, pp->qc_active,
2164 		pp->defer_queue.defer_bits, pp->dhfis_bits,
2165 		pp->dmafis_bits, pp->last_issue_tag);
2166 
2167 	nv_swncq_fis_reinit(ap);
2168 
2169 	if (lack_dhfis) {
2170 		qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2171 		nv_swncq_issue_atacmd(ap, qc);
2172 		return 0;
2173 	}
2174 
2175 	if (pp->defer_queue.defer_bits) {
2176 		/* send deferral queue command */
2177 		qc = nv_swncq_qc_from_dq(ap);
2178 		WARN_ON(qc == NULL);
2179 		nv_swncq_issue_atacmd(ap, qc);
2180 	}
2181 
2182 	return 0;
2183 }
2184 
2185 static inline u32 nv_swncq_tag(struct ata_port *ap)
2186 {
2187 	struct nv_swncq_port_priv *pp = ap->private_data;
2188 	u32 tag;
2189 
2190 	tag = readb(pp->tag_block) >> 2;
2191 	return (tag & 0x1f);
2192 }
2193 
2194 static void nv_swncq_dmafis(struct ata_port *ap)
2195 {
2196 	struct ata_queued_cmd *qc;
2197 	unsigned int rw;
2198 	u8 dmactl;
2199 	u32 tag;
2200 	struct nv_swncq_port_priv *pp = ap->private_data;
2201 
2202 	__ata_bmdma_stop(ap);
2203 	tag = nv_swncq_tag(ap);
2204 
2205 	DPRINTK("dma setup tag 0x%x\n", tag);
2206 	qc = ata_qc_from_tag(ap, tag);
2207 
2208 	if (unlikely(!qc))
2209 		return;
2210 
2211 	rw = qc->tf.flags & ATA_TFLAG_WRITE;
2212 
2213 	/* load PRD table addr. */
2214 	iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
2215 		  ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2216 
2217 	/* specify data direction, triple-check start bit is clear */
2218 	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2219 	dmactl &= ~ATA_DMA_WR;
2220 	if (!rw)
2221 		dmactl |= ATA_DMA_WR;
2222 
2223 	iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2224 }
2225 
2226 static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2227 {
2228 	struct nv_swncq_port_priv *pp = ap->private_data;
2229 	struct ata_queued_cmd *qc;
2230 	struct ata_eh_info *ehi = &ap->link.eh_info;
2231 	u32 serror;
2232 	u8 ata_stat;
2233 
2234 	ata_stat = ap->ops->sff_check_status(ap);
2235 	nv_swncq_irq_clear(ap, fis);
2236 	if (!fis)
2237 		return;
2238 
2239 	if (ap->pflags & ATA_PFLAG_FROZEN)
2240 		return;
2241 
2242 	if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2243 		nv_swncq_hotplug(ap, fis);
2244 		return;
2245 	}
2246 
2247 	if (!pp->qc_active)
2248 		return;
2249 
2250 	if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
2251 		return;
2252 	ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
2253 
2254 	if (ata_stat & ATA_ERR) {
2255 		ata_ehi_clear_desc(ehi);
2256 		ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2257 		ehi->err_mask |= AC_ERR_DEV;
2258 		ehi->serror |= serror;
2259 		ehi->action |= ATA_EH_RESET;
2260 		ata_port_freeze(ap);
2261 		return;
2262 	}
2263 
2264 	if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2265 		/* If the IRQ is backout, driver must issue
2266 		 * the new command again some time later.
2267 		 */
2268 		pp->ncq_flags |= ncq_saw_backout;
2269 	}
2270 
2271 	if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2272 		pp->ncq_flags |= ncq_saw_sdb;
2273 		DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2274 			"dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2275 			ap->print_id, pp->qc_active, pp->dhfis_bits,
2276 			pp->dmafis_bits, readl(pp->sactive_block));
2277 		if (nv_swncq_sdbfis(ap) < 0)
2278 			goto irq_error;
2279 	}
2280 
2281 	if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2282 		/* The interrupt indicates the new command
2283 		 * was transmitted correctly to the drive.
2284 		 */
2285 		pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2286 		pp->ncq_flags |= ncq_saw_d2h;
2287 		if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2288 			ata_ehi_push_desc(ehi, "illegal fis transaction");
2289 			ehi->err_mask |= AC_ERR_HSM;
2290 			ehi->action |= ATA_EH_RESET;
2291 			goto irq_error;
2292 		}
2293 
2294 		if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2295 		    !(pp->ncq_flags & ncq_saw_dmas)) {
2296 			ata_stat = ap->ops->sff_check_status(ap);
2297 			if (ata_stat & ATA_BUSY)
2298 				goto irq_exit;
2299 
2300 			if (pp->defer_queue.defer_bits) {
2301 				DPRINTK("send next command\n");
2302 				qc = nv_swncq_qc_from_dq(ap);
2303 				nv_swncq_issue_atacmd(ap, qc);
2304 			}
2305 		}
2306 	}
2307 
2308 	if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2309 		/* program the dma controller with appropriate PRD buffers
2310 		 * and start the DMA transfer for requested command.
2311 		 */
2312 		pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2313 		pp->ncq_flags |= ncq_saw_dmas;
2314 		nv_swncq_dmafis(ap);
2315 	}
2316 
2317 irq_exit:
2318 	return;
2319 irq_error:
2320 	ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2321 	ata_port_freeze(ap);
2322 	return;
2323 }
2324 
2325 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2326 {
2327 	struct ata_host *host = dev_instance;
2328 	unsigned int i;
2329 	unsigned int handled = 0;
2330 	unsigned long flags;
2331 	u32 irq_stat;
2332 
2333 	spin_lock_irqsave(&host->lock, flags);
2334 
2335 	irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2336 
2337 	for (i = 0; i < host->n_ports; i++) {
2338 		struct ata_port *ap = host->ports[i];
2339 
2340 		if (ap->link.sactive) {
2341 			nv_swncq_host_interrupt(ap, (u16)irq_stat);
2342 			handled = 1;
2343 		} else {
2344 			if (irq_stat)	/* reserve Hotplug */
2345 				nv_swncq_irq_clear(ap, 0xfff0);
2346 
2347 			handled += nv_host_intr(ap, (u8)irq_stat);
2348 		}
2349 		irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2350 	}
2351 
2352 	spin_unlock_irqrestore(&host->lock, flags);
2353 
2354 	return IRQ_RETVAL(handled);
2355 }
2356 
2357 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2358 {
2359 	static int printed_version;
2360 	const struct ata_port_info *ppi[] = { NULL, NULL };
2361 	struct nv_pi_priv *ipriv;
2362 	struct ata_host *host;
2363 	struct nv_host_priv *hpriv;
2364 	int rc;
2365 	u32 bar;
2366 	void __iomem *base;
2367 	unsigned long type = ent->driver_data;
2368 
2369         // Make sure this is a SATA controller by counting the number of bars
2370         // (NVIDIA SATA controllers will always have six bars).  Otherwise,
2371         // it's an IDE controller and we ignore it.
2372 	for (bar = 0; bar < 6; bar++)
2373 		if (pci_resource_start(pdev, bar) == 0)
2374 			return -ENODEV;
2375 
2376 	if (!printed_version++)
2377 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
2378 
2379 	rc = pcim_enable_device(pdev);
2380 	if (rc)
2381 		return rc;
2382 
2383 	/* determine type and allocate host */
2384 	if (type == CK804 && adma_enabled) {
2385 		dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
2386 		type = ADMA;
2387 	} else if (type == MCP5x && swncq_enabled) {
2388 		dev_printk(KERN_NOTICE, &pdev->dev, "Using SWNCQ mode\n");
2389 		type = SWNCQ;
2390 	}
2391 
2392 	ppi[0] = &nv_port_info[type];
2393 	ipriv = ppi[0]->private_data;
2394 	rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
2395 	if (rc)
2396 		return rc;
2397 
2398 	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2399 	if (!hpriv)
2400 		return -ENOMEM;
2401 	hpriv->type = type;
2402 	host->private_data = hpriv;
2403 
2404 	/* request and iomap NV_MMIO_BAR */
2405 	rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2406 	if (rc)
2407 		return rc;
2408 
2409 	/* configure SCR access */
2410 	base = host->iomap[NV_MMIO_BAR];
2411 	host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2412 	host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2413 
2414 	/* enable SATA space for CK804 */
2415 	if (type >= CK804) {
2416 		u8 regval;
2417 
2418 		pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2419 		regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2420 		pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2421 	}
2422 
2423 	/* init ADMA */
2424 	if (type == ADMA) {
2425 		rc = nv_adma_host_init(host);
2426 		if (rc)
2427 			return rc;
2428 	} else if (type == SWNCQ)
2429 		nv_swncq_host_init(host);
2430 
2431 	if (msi_enabled) {
2432 		dev_printk(KERN_NOTICE, &pdev->dev, "Using MSI\n");
2433 		pci_enable_msi(pdev);
2434 	}
2435 
2436 	pci_set_master(pdev);
2437 	return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
2438 }
2439 
2440 #ifdef CONFIG_PM
2441 static int nv_pci_device_resume(struct pci_dev *pdev)
2442 {
2443 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
2444 	struct nv_host_priv *hpriv = host->private_data;
2445 	int rc;
2446 
2447 	rc = ata_pci_device_do_resume(pdev);
2448 	if (rc)
2449 		return rc;
2450 
2451 	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2452 		if (hpriv->type >= CK804) {
2453 			u8 regval;
2454 
2455 			pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2456 			regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2457 			pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2458 		}
2459 		if (hpriv->type == ADMA) {
2460 			u32 tmp32;
2461 			struct nv_adma_port_priv *pp;
2462 			/* enable/disable ADMA on the ports appropriately */
2463 			pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2464 
2465 			pp = host->ports[0]->private_data;
2466 			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2467 				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2468 					   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2469 			else
2470 				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT0_EN |
2471 					   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2472 			pp = host->ports[1]->private_data;
2473 			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2474 				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2475 					   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2476 			else
2477 				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT1_EN |
2478 					   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2479 
2480 			pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2481 		}
2482 	}
2483 
2484 	ata_host_resume(host);
2485 
2486 	return 0;
2487 }
2488 #endif
2489 
2490 static void nv_ck804_host_stop(struct ata_host *host)
2491 {
2492 	struct pci_dev *pdev = to_pci_dev(host->dev);
2493 	u8 regval;
2494 
2495 	/* disable SATA space for CK804 */
2496 	pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2497 	regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2498 	pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2499 }
2500 
2501 static void nv_adma_host_stop(struct ata_host *host)
2502 {
2503 	struct pci_dev *pdev = to_pci_dev(host->dev);
2504 	u32 tmp32;
2505 
2506 	/* disable ADMA on the ports */
2507 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2508 	tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2509 		   NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2510 		   NV_MCP_SATA_CFG_20_PORT1_EN |
2511 		   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2512 
2513 	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2514 
2515 	nv_ck804_host_stop(host);
2516 }
2517 
2518 static int __init nv_init(void)
2519 {
2520 	return pci_register_driver(&nv_pci_driver);
2521 }
2522 
2523 static void __exit nv_exit(void)
2524 {
2525 	pci_unregister_driver(&nv_pci_driver);
2526 }
2527 
2528 module_init(nv_init);
2529 module_exit(nv_exit);
2530 module_param_named(adma, adma_enabled, bool, 0444);
2531 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
2532 module_param_named(swncq, swncq_enabled, bool, 0444);
2533 MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
2534 module_param_named(msi, msi_enabled, bool, 0444);
2535 MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)");
2536 
2537