xref: /openbmc/linux/drivers/ata/sata_nv.c (revision 8fa5723aa7e053d498336b48448b292fc2e0458b)
1 /*
2  *  sata_nv.c - NVIDIA nForce SATA
3  *
4  *  Copyright 2004 NVIDIA Corp.  All rights reserved.
5  *  Copyright 2004 Andrew Chew
6  *
7  *
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 2, or (at your option)
11  *  any later version.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; see the file COPYING.  If not, write to
20  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21  *
22  *
23  *  libata documentation is available via 'make {ps|pdf}docs',
24  *  as Documentation/DocBook/libata.*
25  *
26  *  No hardware documentation available outside of NVIDIA.
27  *  This driver programs the NVIDIA SATA controller in a similar
28  *  fashion as with other PCI IDE BMDMA controllers, with a few
29  *  NV-specific details such as register offsets, SATA phy location,
30  *  hotplug info, etc.
31  *
32  *  CK804/MCP04 controllers support an alternate programming interface
33  *  similar to the ADMA specification (with some modifications).
34  *  This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35  *  sent through the legacy interface.
36  *
37  */
38 
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
50 
51 #define DRV_NAME			"sata_nv"
52 #define DRV_VERSION			"3.5"
53 
54 #define NV_ADMA_DMA_BOUNDARY		0xffffffffUL
55 
56 enum {
57 	NV_MMIO_BAR			= 5,
58 
59 	NV_PORTS			= 2,
60 	NV_PIO_MASK			= 0x1f,
61 	NV_MWDMA_MASK			= 0x07,
62 	NV_UDMA_MASK			= 0x7f,
63 	NV_PORT0_SCR_REG_OFFSET		= 0x00,
64 	NV_PORT1_SCR_REG_OFFSET		= 0x40,
65 
66 	/* INT_STATUS/ENABLE */
67 	NV_INT_STATUS			= 0x10,
68 	NV_INT_ENABLE			= 0x11,
69 	NV_INT_STATUS_CK804		= 0x440,
70 	NV_INT_ENABLE_CK804		= 0x441,
71 
72 	/* INT_STATUS/ENABLE bits */
73 	NV_INT_DEV			= 0x01,
74 	NV_INT_PM			= 0x02,
75 	NV_INT_ADDED			= 0x04,
76 	NV_INT_REMOVED			= 0x08,
77 
78 	NV_INT_PORT_SHIFT		= 4,	/* each port occupies 4 bits */
79 
80 	NV_INT_ALL			= 0x0f,
81 	NV_INT_MASK			= NV_INT_DEV |
82 					  NV_INT_ADDED | NV_INT_REMOVED,
83 
84 	/* INT_CONFIG */
85 	NV_INT_CONFIG			= 0x12,
86 	NV_INT_CONFIG_METHD		= 0x01, // 0 = INT, 1 = SMI
87 
88 	// For PCI config register 20
89 	NV_MCP_SATA_CFG_20		= 0x50,
90 	NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
91 	NV_MCP_SATA_CFG_20_PORT0_EN	= (1 << 17),
92 	NV_MCP_SATA_CFG_20_PORT1_EN	= (1 << 16),
93 	NV_MCP_SATA_CFG_20_PORT0_PWB_EN	= (1 << 14),
94 	NV_MCP_SATA_CFG_20_PORT1_PWB_EN	= (1 << 12),
95 
96 	NV_ADMA_MAX_CPBS		= 32,
97 	NV_ADMA_CPB_SZ			= 128,
98 	NV_ADMA_APRD_SZ			= 16,
99 	NV_ADMA_SGTBL_LEN		= (1024 - NV_ADMA_CPB_SZ) /
100 					   NV_ADMA_APRD_SZ,
101 	NV_ADMA_SGTBL_TOTAL_LEN		= NV_ADMA_SGTBL_LEN + 5,
102 	NV_ADMA_SGTBL_SZ                = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 	NV_ADMA_PORT_PRIV_DMA_SZ        = NV_ADMA_MAX_CPBS *
104 					   (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
105 
106 	/* BAR5 offset to ADMA general registers */
107 	NV_ADMA_GEN			= 0x400,
108 	NV_ADMA_GEN_CTL			= 0x00,
109 	NV_ADMA_NOTIFIER_CLEAR		= 0x30,
110 
111 	/* BAR5 offset to ADMA ports */
112 	NV_ADMA_PORT			= 0x480,
113 
114 	/* size of ADMA port register space  */
115 	NV_ADMA_PORT_SIZE		= 0x100,
116 
117 	/* ADMA port registers */
118 	NV_ADMA_CTL			= 0x40,
119 	NV_ADMA_CPB_COUNT		= 0x42,
120 	NV_ADMA_NEXT_CPB_IDX		= 0x43,
121 	NV_ADMA_STAT			= 0x44,
122 	NV_ADMA_CPB_BASE_LOW		= 0x48,
123 	NV_ADMA_CPB_BASE_HIGH		= 0x4C,
124 	NV_ADMA_APPEND			= 0x50,
125 	NV_ADMA_NOTIFIER		= 0x68,
126 	NV_ADMA_NOTIFIER_ERROR		= 0x6C,
127 
128 	/* NV_ADMA_CTL register bits */
129 	NV_ADMA_CTL_HOTPLUG_IEN		= (1 << 0),
130 	NV_ADMA_CTL_CHANNEL_RESET	= (1 << 5),
131 	NV_ADMA_CTL_GO			= (1 << 7),
132 	NV_ADMA_CTL_AIEN		= (1 << 8),
133 	NV_ADMA_CTL_READ_NON_COHERENT	= (1 << 11),
134 	NV_ADMA_CTL_WRITE_NON_COHERENT	= (1 << 12),
135 
136 	/* CPB response flag bits */
137 	NV_CPB_RESP_DONE		= (1 << 0),
138 	NV_CPB_RESP_ATA_ERR		= (1 << 3),
139 	NV_CPB_RESP_CMD_ERR		= (1 << 4),
140 	NV_CPB_RESP_CPB_ERR		= (1 << 7),
141 
142 	/* CPB control flag bits */
143 	NV_CPB_CTL_CPB_VALID		= (1 << 0),
144 	NV_CPB_CTL_QUEUE		= (1 << 1),
145 	NV_CPB_CTL_APRD_VALID		= (1 << 2),
146 	NV_CPB_CTL_IEN			= (1 << 3),
147 	NV_CPB_CTL_FPDMA		= (1 << 4),
148 
149 	/* APRD flags */
150 	NV_APRD_WRITE			= (1 << 1),
151 	NV_APRD_END			= (1 << 2),
152 	NV_APRD_CONT			= (1 << 3),
153 
154 	/* NV_ADMA_STAT flags */
155 	NV_ADMA_STAT_TIMEOUT		= (1 << 0),
156 	NV_ADMA_STAT_HOTUNPLUG		= (1 << 1),
157 	NV_ADMA_STAT_HOTPLUG		= (1 << 2),
158 	NV_ADMA_STAT_CPBERR		= (1 << 4),
159 	NV_ADMA_STAT_SERROR		= (1 << 5),
160 	NV_ADMA_STAT_CMD_COMPLETE	= (1 << 6),
161 	NV_ADMA_STAT_IDLE		= (1 << 8),
162 	NV_ADMA_STAT_LEGACY		= (1 << 9),
163 	NV_ADMA_STAT_STOPPED		= (1 << 10),
164 	NV_ADMA_STAT_DONE		= (1 << 12),
165 	NV_ADMA_STAT_ERR		= NV_ADMA_STAT_CPBERR |
166 					  NV_ADMA_STAT_TIMEOUT,
167 
168 	/* port flags */
169 	NV_ADMA_PORT_REGISTER_MODE	= (1 << 0),
170 	NV_ADMA_ATAPI_SETUP_COMPLETE	= (1 << 1),
171 
172 	/* MCP55 reg offset */
173 	NV_CTL_MCP55			= 0x400,
174 	NV_INT_STATUS_MCP55		= 0x440,
175 	NV_INT_ENABLE_MCP55		= 0x444,
176 	NV_NCQ_REG_MCP55		= 0x448,
177 
178 	/* MCP55 */
179 	NV_INT_ALL_MCP55		= 0xffff,
180 	NV_INT_PORT_SHIFT_MCP55		= 16,	/* each port occupies 16 bits */
181 	NV_INT_MASK_MCP55		= NV_INT_ALL_MCP55 & 0xfffd,
182 
183 	/* SWNCQ ENABLE BITS*/
184 	NV_CTL_PRI_SWNCQ		= 0x02,
185 	NV_CTL_SEC_SWNCQ		= 0x04,
186 
187 	/* SW NCQ status bits*/
188 	NV_SWNCQ_IRQ_DEV		= (1 << 0),
189 	NV_SWNCQ_IRQ_PM			= (1 << 1),
190 	NV_SWNCQ_IRQ_ADDED		= (1 << 2),
191 	NV_SWNCQ_IRQ_REMOVED		= (1 << 3),
192 
193 	NV_SWNCQ_IRQ_BACKOUT		= (1 << 4),
194 	NV_SWNCQ_IRQ_SDBFIS		= (1 << 5),
195 	NV_SWNCQ_IRQ_DHREGFIS		= (1 << 6),
196 	NV_SWNCQ_IRQ_DMASETUP		= (1 << 7),
197 
198 	NV_SWNCQ_IRQ_HOTPLUG		= NV_SWNCQ_IRQ_ADDED |
199 					  NV_SWNCQ_IRQ_REMOVED,
200 
201 };
202 
203 /* ADMA Physical Region Descriptor - one SG segment */
204 struct nv_adma_prd {
205 	__le64			addr;
206 	__le32			len;
207 	u8			flags;
208 	u8			packet_len;
209 	__le16			reserved;
210 };
211 
212 enum nv_adma_regbits {
213 	CMDEND	= (1 << 15),		/* end of command list */
214 	WNB	= (1 << 14),		/* wait-not-BSY */
215 	IGN	= (1 << 13),		/* ignore this entry */
216 	CS1n	= (1 << (4 + 8)),	/* std. PATA signals follow... */
217 	DA2	= (1 << (2 + 8)),
218 	DA1	= (1 << (1 + 8)),
219 	DA0	= (1 << (0 + 8)),
220 };
221 
222 /* ADMA Command Parameter Block
223    The first 5 SG segments are stored inside the Command Parameter Block itself.
224    If there are more than 5 segments the remainder are stored in a separate
225    memory area indicated by next_aprd. */
226 struct nv_adma_cpb {
227 	u8			resp_flags;    /* 0 */
228 	u8			reserved1;     /* 1 */
229 	u8			ctl_flags;     /* 2 */
230 	/* len is length of taskfile in 64 bit words */
231 	u8			len;		/* 3  */
232 	u8			tag;           /* 4 */
233 	u8			next_cpb_idx;  /* 5 */
234 	__le16			reserved2;     /* 6-7 */
235 	__le16			tf[12];        /* 8-31 */
236 	struct nv_adma_prd	aprd[5];       /* 32-111 */
237 	__le64			next_aprd;     /* 112-119 */
238 	__le64			reserved3;     /* 120-127 */
239 };
240 
241 
242 struct nv_adma_port_priv {
243 	struct nv_adma_cpb	*cpb;
244 	dma_addr_t		cpb_dma;
245 	struct nv_adma_prd	*aprd;
246 	dma_addr_t		aprd_dma;
247 	void __iomem		*ctl_block;
248 	void __iomem		*gen_block;
249 	void __iomem		*notifier_clear_block;
250 	u64			adma_dma_mask;
251 	u8			flags;
252 	int			last_issue_ncq;
253 };
254 
255 struct nv_host_priv {
256 	unsigned long		type;
257 };
258 
259 struct defer_queue {
260 	u32		defer_bits;
261 	unsigned int	head;
262 	unsigned int	tail;
263 	unsigned int	tag[ATA_MAX_QUEUE];
264 };
265 
266 enum ncq_saw_flag_list {
267 	ncq_saw_d2h	= (1U << 0),
268 	ncq_saw_dmas	= (1U << 1),
269 	ncq_saw_sdb	= (1U << 2),
270 	ncq_saw_backout	= (1U << 3),
271 };
272 
273 struct nv_swncq_port_priv {
274 	struct ata_prd	*prd;	 /* our SG list */
275 	dma_addr_t	prd_dma; /* and its DMA mapping */
276 	void __iomem	*sactive_block;
277 	void __iomem	*irq_block;
278 	void __iomem	*tag_block;
279 	u32		qc_active;
280 
281 	unsigned int	last_issue_tag;
282 
283 	/* fifo circular queue to store deferral command */
284 	struct defer_queue defer_queue;
285 
286 	/* for NCQ interrupt analysis */
287 	u32		dhfis_bits;
288 	u32		dmafis_bits;
289 	u32		sdbfis_bits;
290 
291 	unsigned int	ncq_flags;
292 };
293 
294 
295 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
296 
297 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
298 #ifdef CONFIG_PM
299 static int nv_pci_device_resume(struct pci_dev *pdev);
300 #endif
301 static void nv_ck804_host_stop(struct ata_host *host);
302 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
303 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
304 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
305 static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
306 static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
307 
308 static void nv_nf2_freeze(struct ata_port *ap);
309 static void nv_nf2_thaw(struct ata_port *ap);
310 static void nv_ck804_freeze(struct ata_port *ap);
311 static void nv_ck804_thaw(struct ata_port *ap);
312 static int nv_hardreset(struct ata_link *link, unsigned int *class,
313 			unsigned long deadline);
314 static int nv_adma_slave_config(struct scsi_device *sdev);
315 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
316 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
317 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
318 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
319 static void nv_adma_irq_clear(struct ata_port *ap);
320 static int nv_adma_port_start(struct ata_port *ap);
321 static void nv_adma_port_stop(struct ata_port *ap);
322 #ifdef CONFIG_PM
323 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
324 static int nv_adma_port_resume(struct ata_port *ap);
325 #endif
326 static void nv_adma_freeze(struct ata_port *ap);
327 static void nv_adma_thaw(struct ata_port *ap);
328 static void nv_adma_error_handler(struct ata_port *ap);
329 static void nv_adma_host_stop(struct ata_host *host);
330 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
331 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
332 
333 static void nv_mcp55_thaw(struct ata_port *ap);
334 static void nv_mcp55_freeze(struct ata_port *ap);
335 static void nv_swncq_error_handler(struct ata_port *ap);
336 static int nv_swncq_slave_config(struct scsi_device *sdev);
337 static int nv_swncq_port_start(struct ata_port *ap);
338 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
339 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
340 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
341 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
342 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
343 #ifdef CONFIG_PM
344 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
345 static int nv_swncq_port_resume(struct ata_port *ap);
346 #endif
347 
348 enum nv_host_type
349 {
350 	GENERIC,
351 	NFORCE2,
352 	NFORCE3 = NFORCE2,	/* NF2 == NF3 as far as sata_nv is concerned */
353 	CK804,
354 	ADMA,
355 	SWNCQ,
356 };
357 
358 static const struct pci_device_id nv_pci_tbl[] = {
359 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
360 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
361 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
362 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
363 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
364 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
365 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
366 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), SWNCQ },
367 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), SWNCQ },
368 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), SWNCQ },
369 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), SWNCQ },
370 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
371 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
372 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
373 
374 	{ } /* terminate list */
375 };
376 
377 static struct pci_driver nv_pci_driver = {
378 	.name			= DRV_NAME,
379 	.id_table		= nv_pci_tbl,
380 	.probe			= nv_init_one,
381 #ifdef CONFIG_PM
382 	.suspend		= ata_pci_device_suspend,
383 	.resume			= nv_pci_device_resume,
384 #endif
385 	.remove			= ata_pci_remove_one,
386 };
387 
388 static struct scsi_host_template nv_sht = {
389 	ATA_BMDMA_SHT(DRV_NAME),
390 };
391 
392 static struct scsi_host_template nv_adma_sht = {
393 	ATA_NCQ_SHT(DRV_NAME),
394 	.can_queue		= NV_ADMA_MAX_CPBS,
395 	.sg_tablesize		= NV_ADMA_SGTBL_TOTAL_LEN,
396 	.dma_boundary		= NV_ADMA_DMA_BOUNDARY,
397 	.slave_configure	= nv_adma_slave_config,
398 };
399 
400 static struct scsi_host_template nv_swncq_sht = {
401 	ATA_NCQ_SHT(DRV_NAME),
402 	.can_queue		= ATA_MAX_QUEUE,
403 	.sg_tablesize		= LIBATA_MAX_PRD,
404 	.dma_boundary		= ATA_DMA_BOUNDARY,
405 	.slave_configure	= nv_swncq_slave_config,
406 };
407 
408 /* OSDL bz3352 reports that some nv controllers can't determine device
409  * signature reliably and nv_hardreset is implemented to work around
410  * the problem.  This was reported on nf3 and it's unclear whether any
411  * other controllers are affected.  However, the workaround has been
412  * applied to all variants and there isn't much to gain by trying to
413  * find out exactly which ones are affected at this point especially
414  * because NV has moved over to ahci for newer controllers.
415  */
416 static struct ata_port_operations nv_common_ops = {
417 	.inherits		= &ata_bmdma_port_ops,
418 	.hardreset		= nv_hardreset,
419 	.scr_read		= nv_scr_read,
420 	.scr_write		= nv_scr_write,
421 };
422 
423 /* OSDL bz11195 reports that link doesn't come online after hardreset
424  * on generic nv's and there have been several other similar reports
425  * on linux-ide.  Disable hardreset for generic nv's.
426  */
427 static struct ata_port_operations nv_generic_ops = {
428 	.inherits		= &nv_common_ops,
429 	.hardreset		= ATA_OP_NULL,
430 };
431 
432 static struct ata_port_operations nv_nf2_ops = {
433 	.inherits		= &nv_common_ops,
434 	.freeze			= nv_nf2_freeze,
435 	.thaw			= nv_nf2_thaw,
436 };
437 
438 static struct ata_port_operations nv_ck804_ops = {
439 	.inherits		= &nv_common_ops,
440 	.freeze			= nv_ck804_freeze,
441 	.thaw			= nv_ck804_thaw,
442 	.host_stop		= nv_ck804_host_stop,
443 };
444 
445 static struct ata_port_operations nv_adma_ops = {
446 	.inherits		= &nv_common_ops,
447 
448 	.check_atapi_dma	= nv_adma_check_atapi_dma,
449 	.sff_tf_read		= nv_adma_tf_read,
450 	.qc_defer		= ata_std_qc_defer,
451 	.qc_prep		= nv_adma_qc_prep,
452 	.qc_issue		= nv_adma_qc_issue,
453 	.sff_irq_clear		= nv_adma_irq_clear,
454 
455 	.freeze			= nv_adma_freeze,
456 	.thaw			= nv_adma_thaw,
457 	.error_handler		= nv_adma_error_handler,
458 	.post_internal_cmd	= nv_adma_post_internal_cmd,
459 
460 	.port_start		= nv_adma_port_start,
461 	.port_stop		= nv_adma_port_stop,
462 #ifdef CONFIG_PM
463 	.port_suspend		= nv_adma_port_suspend,
464 	.port_resume		= nv_adma_port_resume,
465 #endif
466 	.host_stop		= nv_adma_host_stop,
467 };
468 
469 static struct ata_port_operations nv_swncq_ops = {
470 	.inherits		= &nv_common_ops,
471 
472 	.qc_defer		= ata_std_qc_defer,
473 	.qc_prep		= nv_swncq_qc_prep,
474 	.qc_issue		= nv_swncq_qc_issue,
475 
476 	.freeze			= nv_mcp55_freeze,
477 	.thaw			= nv_mcp55_thaw,
478 	.error_handler		= nv_swncq_error_handler,
479 
480 #ifdef CONFIG_PM
481 	.port_suspend		= nv_swncq_port_suspend,
482 	.port_resume		= nv_swncq_port_resume,
483 #endif
484 	.port_start		= nv_swncq_port_start,
485 };
486 
487 struct nv_pi_priv {
488 	irq_handler_t			irq_handler;
489 	struct scsi_host_template	*sht;
490 };
491 
492 #define NV_PI_PRIV(_irq_handler, _sht) \
493 	&(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
494 
495 static const struct ata_port_info nv_port_info[] = {
496 	/* generic */
497 	{
498 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
499 		.pio_mask	= NV_PIO_MASK,
500 		.mwdma_mask	= NV_MWDMA_MASK,
501 		.udma_mask	= NV_UDMA_MASK,
502 		.port_ops	= &nv_generic_ops,
503 		.private_data	= NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
504 	},
505 	/* nforce2/3 */
506 	{
507 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
508 		.pio_mask	= NV_PIO_MASK,
509 		.mwdma_mask	= NV_MWDMA_MASK,
510 		.udma_mask	= NV_UDMA_MASK,
511 		.port_ops	= &nv_nf2_ops,
512 		.private_data	= NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
513 	},
514 	/* ck804 */
515 	{
516 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
517 		.pio_mask	= NV_PIO_MASK,
518 		.mwdma_mask	= NV_MWDMA_MASK,
519 		.udma_mask	= NV_UDMA_MASK,
520 		.port_ops	= &nv_ck804_ops,
521 		.private_data	= NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
522 	},
523 	/* ADMA */
524 	{
525 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
526 				  ATA_FLAG_MMIO | ATA_FLAG_NCQ,
527 		.pio_mask	= NV_PIO_MASK,
528 		.mwdma_mask	= NV_MWDMA_MASK,
529 		.udma_mask	= NV_UDMA_MASK,
530 		.port_ops	= &nv_adma_ops,
531 		.private_data	= NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
532 	},
533 	/* SWNCQ */
534 	{
535 		.flags	        = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
536 				  ATA_FLAG_NCQ,
537 		.pio_mask	= NV_PIO_MASK,
538 		.mwdma_mask	= NV_MWDMA_MASK,
539 		.udma_mask	= NV_UDMA_MASK,
540 		.port_ops	= &nv_swncq_ops,
541 		.private_data	= NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
542 	},
543 };
544 
545 MODULE_AUTHOR("NVIDIA");
546 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
547 MODULE_LICENSE("GPL");
548 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
549 MODULE_VERSION(DRV_VERSION);
550 
551 static int adma_enabled;
552 static int swncq_enabled = 1;
553 
554 static void nv_adma_register_mode(struct ata_port *ap)
555 {
556 	struct nv_adma_port_priv *pp = ap->private_data;
557 	void __iomem *mmio = pp->ctl_block;
558 	u16 tmp, status;
559 	int count = 0;
560 
561 	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
562 		return;
563 
564 	status = readw(mmio + NV_ADMA_STAT);
565 	while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
566 		ndelay(50);
567 		status = readw(mmio + NV_ADMA_STAT);
568 		count++;
569 	}
570 	if (count == 20)
571 		ata_port_printk(ap, KERN_WARNING,
572 			"timeout waiting for ADMA IDLE, stat=0x%hx\n",
573 			status);
574 
575 	tmp = readw(mmio + NV_ADMA_CTL);
576 	writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
577 
578 	count = 0;
579 	status = readw(mmio + NV_ADMA_STAT);
580 	while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
581 		ndelay(50);
582 		status = readw(mmio + NV_ADMA_STAT);
583 		count++;
584 	}
585 	if (count == 20)
586 		ata_port_printk(ap, KERN_WARNING,
587 			 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
588 			 status);
589 
590 	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
591 }
592 
593 static void nv_adma_mode(struct ata_port *ap)
594 {
595 	struct nv_adma_port_priv *pp = ap->private_data;
596 	void __iomem *mmio = pp->ctl_block;
597 	u16 tmp, status;
598 	int count = 0;
599 
600 	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
601 		return;
602 
603 	WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
604 
605 	tmp = readw(mmio + NV_ADMA_CTL);
606 	writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
607 
608 	status = readw(mmio + NV_ADMA_STAT);
609 	while (((status & NV_ADMA_STAT_LEGACY) ||
610 	      !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
611 		ndelay(50);
612 		status = readw(mmio + NV_ADMA_STAT);
613 		count++;
614 	}
615 	if (count == 20)
616 		ata_port_printk(ap, KERN_WARNING,
617 			"timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
618 			status);
619 
620 	pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
621 }
622 
623 static int nv_adma_slave_config(struct scsi_device *sdev)
624 {
625 	struct ata_port *ap = ata_shost_to_port(sdev->host);
626 	struct nv_adma_port_priv *pp = ap->private_data;
627 	struct nv_adma_port_priv *port0, *port1;
628 	struct scsi_device *sdev0, *sdev1;
629 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
630 	unsigned long segment_boundary, flags;
631 	unsigned short sg_tablesize;
632 	int rc;
633 	int adma_enable;
634 	u32 current_reg, new_reg, config_mask;
635 
636 	rc = ata_scsi_slave_config(sdev);
637 
638 	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
639 		/* Not a proper libata device, ignore */
640 		return rc;
641 
642 	spin_lock_irqsave(ap->lock, flags);
643 
644 	if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
645 		/*
646 		 * NVIDIA reports that ADMA mode does not support ATAPI commands.
647 		 * Therefore ATAPI commands are sent through the legacy interface.
648 		 * However, the legacy interface only supports 32-bit DMA.
649 		 * Restrict DMA parameters as required by the legacy interface
650 		 * when an ATAPI device is connected.
651 		 */
652 		segment_boundary = ATA_DMA_BOUNDARY;
653 		/* Subtract 1 since an extra entry may be needed for padding, see
654 		   libata-scsi.c */
655 		sg_tablesize = LIBATA_MAX_PRD - 1;
656 
657 		/* Since the legacy DMA engine is in use, we need to disable ADMA
658 		   on the port. */
659 		adma_enable = 0;
660 		nv_adma_register_mode(ap);
661 	} else {
662 		segment_boundary = NV_ADMA_DMA_BOUNDARY;
663 		sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
664 		adma_enable = 1;
665 	}
666 
667 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
668 
669 	if (ap->port_no == 1)
670 		config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
671 			      NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
672 	else
673 		config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
674 			      NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
675 
676 	if (adma_enable) {
677 		new_reg = current_reg | config_mask;
678 		pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
679 	} else {
680 		new_reg = current_reg & ~config_mask;
681 		pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
682 	}
683 
684 	if (current_reg != new_reg)
685 		pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
686 
687 	port0 = ap->host->ports[0]->private_data;
688 	port1 = ap->host->ports[1]->private_data;
689 	sdev0 = ap->host->ports[0]->link.device[0].sdev;
690 	sdev1 = ap->host->ports[1]->link.device[0].sdev;
691 	if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
692 	    (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
693 		/** We have to set the DMA mask to 32-bit if either port is in
694 		    ATAPI mode, since they are on the same PCI device which is
695 		    used for DMA mapping. If we set the mask we also need to set
696 		    the bounce limit on both ports to ensure that the block
697 		    layer doesn't feed addresses that cause DMA mapping to
698 		    choke. If either SCSI device is not allocated yet, it's OK
699 		    since that port will discover its correct setting when it
700 		    does get allocated.
701 		    Note: Setting 32-bit mask should not fail. */
702 		if (sdev0)
703 			blk_queue_bounce_limit(sdev0->request_queue,
704 					       ATA_DMA_MASK);
705 		if (sdev1)
706 			blk_queue_bounce_limit(sdev1->request_queue,
707 					       ATA_DMA_MASK);
708 
709 		pci_set_dma_mask(pdev, ATA_DMA_MASK);
710 	} else {
711 		/** This shouldn't fail as it was set to this value before */
712 		pci_set_dma_mask(pdev, pp->adma_dma_mask);
713 		if (sdev0)
714 			blk_queue_bounce_limit(sdev0->request_queue,
715 					       pp->adma_dma_mask);
716 		if (sdev1)
717 			blk_queue_bounce_limit(sdev1->request_queue,
718 					       pp->adma_dma_mask);
719 	}
720 
721 	blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
722 	blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
723 	ata_port_printk(ap, KERN_INFO,
724 		"DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
725 		(unsigned long long)*ap->host->dev->dma_mask,
726 		segment_boundary, sg_tablesize);
727 
728 	spin_unlock_irqrestore(ap->lock, flags);
729 
730 	return rc;
731 }
732 
733 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
734 {
735 	struct nv_adma_port_priv *pp = qc->ap->private_data;
736 	return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
737 }
738 
739 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
740 {
741 	/* Other than when internal or pass-through commands are executed,
742 	   the only time this function will be called in ADMA mode will be
743 	   if a command fails. In the failure case we don't care about going
744 	   into register mode with ADMA commands pending, as the commands will
745 	   all shortly be aborted anyway. We assume that NCQ commands are not
746 	   issued via passthrough, which is the only way that switching into
747 	   ADMA mode could abort outstanding commands. */
748 	nv_adma_register_mode(ap);
749 
750 	ata_sff_tf_read(ap, tf);
751 }
752 
753 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
754 {
755 	unsigned int idx = 0;
756 
757 	if (tf->flags & ATA_TFLAG_ISADDR) {
758 		if (tf->flags & ATA_TFLAG_LBA48) {
759 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR   << 8) | tf->hob_feature | WNB);
760 			cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
761 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAL  << 8) | tf->hob_lbal);
762 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAM  << 8) | tf->hob_lbam);
763 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAH  << 8) | tf->hob_lbah);
764 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature);
765 		} else
766 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature | WNB);
767 
768 		cpb[idx++] = cpu_to_le16((ATA_REG_NSECT  << 8) | tf->nsect);
769 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAL   << 8) | tf->lbal);
770 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAM   << 8) | tf->lbam);
771 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAH   << 8) | tf->lbah);
772 	}
773 
774 	if (tf->flags & ATA_TFLAG_DEVICE)
775 		cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
776 
777 	cpb[idx++] = cpu_to_le16((ATA_REG_CMD    << 8) | tf->command | CMDEND);
778 
779 	while (idx < 12)
780 		cpb[idx++] = cpu_to_le16(IGN);
781 
782 	return idx;
783 }
784 
785 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
786 {
787 	struct nv_adma_port_priv *pp = ap->private_data;
788 	u8 flags = pp->cpb[cpb_num].resp_flags;
789 
790 	VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
791 
792 	if (unlikely((force_err ||
793 		     flags & (NV_CPB_RESP_ATA_ERR |
794 			      NV_CPB_RESP_CMD_ERR |
795 			      NV_CPB_RESP_CPB_ERR)))) {
796 		struct ata_eh_info *ehi = &ap->link.eh_info;
797 		int freeze = 0;
798 
799 		ata_ehi_clear_desc(ehi);
800 		__ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
801 		if (flags & NV_CPB_RESP_ATA_ERR) {
802 			ata_ehi_push_desc(ehi, "ATA error");
803 			ehi->err_mask |= AC_ERR_DEV;
804 		} else if (flags & NV_CPB_RESP_CMD_ERR) {
805 			ata_ehi_push_desc(ehi, "CMD error");
806 			ehi->err_mask |= AC_ERR_DEV;
807 		} else if (flags & NV_CPB_RESP_CPB_ERR) {
808 			ata_ehi_push_desc(ehi, "CPB error");
809 			ehi->err_mask |= AC_ERR_SYSTEM;
810 			freeze = 1;
811 		} else {
812 			/* notifier error, but no error in CPB flags? */
813 			ata_ehi_push_desc(ehi, "unknown");
814 			ehi->err_mask |= AC_ERR_OTHER;
815 			freeze = 1;
816 		}
817 		/* Kill all commands. EH will determine what actually failed. */
818 		if (freeze)
819 			ata_port_freeze(ap);
820 		else
821 			ata_port_abort(ap);
822 		return 1;
823 	}
824 
825 	if (likely(flags & NV_CPB_RESP_DONE)) {
826 		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
827 		VPRINTK("CPB flags done, flags=0x%x\n", flags);
828 		if (likely(qc)) {
829 			DPRINTK("Completing qc from tag %d\n", cpb_num);
830 			ata_qc_complete(qc);
831 		} else {
832 			struct ata_eh_info *ehi = &ap->link.eh_info;
833 			/* Notifier bits set without a command may indicate the drive
834 			   is misbehaving. Raise host state machine violation on this
835 			   condition. */
836 			ata_port_printk(ap, KERN_ERR,
837 					"notifier for tag %d with no cmd?\n",
838 					cpb_num);
839 			ehi->err_mask |= AC_ERR_HSM;
840 			ehi->action |= ATA_EH_RESET;
841 			ata_port_freeze(ap);
842 			return 1;
843 		}
844 	}
845 	return 0;
846 }
847 
848 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
849 {
850 	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
851 
852 	/* freeze if hotplugged */
853 	if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
854 		ata_port_freeze(ap);
855 		return 1;
856 	}
857 
858 	/* bail out if not our interrupt */
859 	if (!(irq_stat & NV_INT_DEV))
860 		return 0;
861 
862 	/* DEV interrupt w/ no active qc? */
863 	if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
864 		ata_sff_check_status(ap);
865 		return 1;
866 	}
867 
868 	/* handle interrupt */
869 	return ata_sff_host_intr(ap, qc);
870 }
871 
872 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
873 {
874 	struct ata_host *host = dev_instance;
875 	int i, handled = 0;
876 	u32 notifier_clears[2];
877 
878 	spin_lock(&host->lock);
879 
880 	for (i = 0; i < host->n_ports; i++) {
881 		struct ata_port *ap = host->ports[i];
882 		notifier_clears[i] = 0;
883 
884 		if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
885 			struct nv_adma_port_priv *pp = ap->private_data;
886 			void __iomem *mmio = pp->ctl_block;
887 			u16 status;
888 			u32 gen_ctl;
889 			u32 notifier, notifier_error;
890 
891 			/* if ADMA is disabled, use standard ata interrupt handler */
892 			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
893 				u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
894 					>> (NV_INT_PORT_SHIFT * i);
895 				handled += nv_host_intr(ap, irq_stat);
896 				continue;
897 			}
898 
899 			/* if in ATA register mode, check for standard interrupts */
900 			if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
901 				u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
902 					>> (NV_INT_PORT_SHIFT * i);
903 				if (ata_tag_valid(ap->link.active_tag))
904 					/** NV_INT_DEV indication seems unreliable at times
905 					    at least in ADMA mode. Force it on always when a
906 					    command is active, to prevent losing interrupts. */
907 					irq_stat |= NV_INT_DEV;
908 				handled += nv_host_intr(ap, irq_stat);
909 			}
910 
911 			notifier = readl(mmio + NV_ADMA_NOTIFIER);
912 			notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
913 			notifier_clears[i] = notifier | notifier_error;
914 
915 			gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
916 
917 			if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
918 			    !notifier_error)
919 				/* Nothing to do */
920 				continue;
921 
922 			status = readw(mmio + NV_ADMA_STAT);
923 
924 			/* Clear status. Ensure the controller sees the clearing before we start
925 			   looking at any of the CPB statuses, so that any CPB completions after
926 			   this point in the handler will raise another interrupt. */
927 			writew(status, mmio + NV_ADMA_STAT);
928 			readw(mmio + NV_ADMA_STAT); /* flush posted write */
929 			rmb();
930 
931 			handled++; /* irq handled if we got here */
932 
933 			/* freeze if hotplugged or controller error */
934 			if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
935 					       NV_ADMA_STAT_HOTUNPLUG |
936 					       NV_ADMA_STAT_TIMEOUT |
937 					       NV_ADMA_STAT_SERROR))) {
938 				struct ata_eh_info *ehi = &ap->link.eh_info;
939 
940 				ata_ehi_clear_desc(ehi);
941 				__ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
942 				if (status & NV_ADMA_STAT_TIMEOUT) {
943 					ehi->err_mask |= AC_ERR_SYSTEM;
944 					ata_ehi_push_desc(ehi, "timeout");
945 				} else if (status & NV_ADMA_STAT_HOTPLUG) {
946 					ata_ehi_hotplugged(ehi);
947 					ata_ehi_push_desc(ehi, "hotplug");
948 				} else if (status & NV_ADMA_STAT_HOTUNPLUG) {
949 					ata_ehi_hotplugged(ehi);
950 					ata_ehi_push_desc(ehi, "hot unplug");
951 				} else if (status & NV_ADMA_STAT_SERROR) {
952 					/* let libata analyze SError and figure out the cause */
953 					ata_ehi_push_desc(ehi, "SError");
954 				} else
955 					ata_ehi_push_desc(ehi, "unknown");
956 				ata_port_freeze(ap);
957 				continue;
958 			}
959 
960 			if (status & (NV_ADMA_STAT_DONE |
961 				      NV_ADMA_STAT_CPBERR |
962 				      NV_ADMA_STAT_CMD_COMPLETE)) {
963 				u32 check_commands = notifier_clears[i];
964 				int pos, error = 0;
965 
966 				if (status & NV_ADMA_STAT_CPBERR) {
967 					/* Check all active commands */
968 					if (ata_tag_valid(ap->link.active_tag))
969 						check_commands = 1 <<
970 							ap->link.active_tag;
971 					else
972 						check_commands = ap->
973 							link.sactive;
974 				}
975 
976 				/** Check CPBs for completed commands */
977 				while ((pos = ffs(check_commands)) && !error) {
978 					pos--;
979 					error = nv_adma_check_cpb(ap, pos,
980 						notifier_error & (1 << pos));
981 					check_commands &= ~(1 << pos);
982 				}
983 			}
984 		}
985 	}
986 
987 	if (notifier_clears[0] || notifier_clears[1]) {
988 		/* Note: Both notifier clear registers must be written
989 		   if either is set, even if one is zero, according to NVIDIA. */
990 		struct nv_adma_port_priv *pp = host->ports[0]->private_data;
991 		writel(notifier_clears[0], pp->notifier_clear_block);
992 		pp = host->ports[1]->private_data;
993 		writel(notifier_clears[1], pp->notifier_clear_block);
994 	}
995 
996 	spin_unlock(&host->lock);
997 
998 	return IRQ_RETVAL(handled);
999 }
1000 
1001 static void nv_adma_freeze(struct ata_port *ap)
1002 {
1003 	struct nv_adma_port_priv *pp = ap->private_data;
1004 	void __iomem *mmio = pp->ctl_block;
1005 	u16 tmp;
1006 
1007 	nv_ck804_freeze(ap);
1008 
1009 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1010 		return;
1011 
1012 	/* clear any outstanding CK804 notifications */
1013 	writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1014 		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1015 
1016 	/* Disable interrupt */
1017 	tmp = readw(mmio + NV_ADMA_CTL);
1018 	writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1019 		mmio + NV_ADMA_CTL);
1020 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1021 }
1022 
1023 static void nv_adma_thaw(struct ata_port *ap)
1024 {
1025 	struct nv_adma_port_priv *pp = ap->private_data;
1026 	void __iomem *mmio = pp->ctl_block;
1027 	u16 tmp;
1028 
1029 	nv_ck804_thaw(ap);
1030 
1031 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1032 		return;
1033 
1034 	/* Enable interrupt */
1035 	tmp = readw(mmio + NV_ADMA_CTL);
1036 	writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1037 		mmio + NV_ADMA_CTL);
1038 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1039 }
1040 
1041 static void nv_adma_irq_clear(struct ata_port *ap)
1042 {
1043 	struct nv_adma_port_priv *pp = ap->private_data;
1044 	void __iomem *mmio = pp->ctl_block;
1045 	u32 notifier_clears[2];
1046 
1047 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1048 		ata_sff_irq_clear(ap);
1049 		return;
1050 	}
1051 
1052 	/* clear any outstanding CK804 notifications */
1053 	writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1054 		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1055 
1056 	/* clear ADMA status */
1057 	writew(0xffff, mmio + NV_ADMA_STAT);
1058 
1059 	/* clear notifiers - note both ports need to be written with
1060 	   something even though we are only clearing on one */
1061 	if (ap->port_no == 0) {
1062 		notifier_clears[0] = 0xFFFFFFFF;
1063 		notifier_clears[1] = 0;
1064 	} else {
1065 		notifier_clears[0] = 0;
1066 		notifier_clears[1] = 0xFFFFFFFF;
1067 	}
1068 	pp = ap->host->ports[0]->private_data;
1069 	writel(notifier_clears[0], pp->notifier_clear_block);
1070 	pp = ap->host->ports[1]->private_data;
1071 	writel(notifier_clears[1], pp->notifier_clear_block);
1072 }
1073 
1074 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1075 {
1076 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1077 
1078 	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1079 		ata_sff_post_internal_cmd(qc);
1080 }
1081 
1082 static int nv_adma_port_start(struct ata_port *ap)
1083 {
1084 	struct device *dev = ap->host->dev;
1085 	struct nv_adma_port_priv *pp;
1086 	int rc;
1087 	void *mem;
1088 	dma_addr_t mem_dma;
1089 	void __iomem *mmio;
1090 	struct pci_dev *pdev = to_pci_dev(dev);
1091 	u16 tmp;
1092 
1093 	VPRINTK("ENTER\n");
1094 
1095 	/* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1096 	   pad buffers */
1097 	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1098 	if (rc)
1099 		return rc;
1100 	rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1101 	if (rc)
1102 		return rc;
1103 
1104 	rc = ata_port_start(ap);
1105 	if (rc)
1106 		return rc;
1107 
1108 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1109 	if (!pp)
1110 		return -ENOMEM;
1111 
1112 	mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1113 	       ap->port_no * NV_ADMA_PORT_SIZE;
1114 	pp->ctl_block = mmio;
1115 	pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1116 	pp->notifier_clear_block = pp->gen_block +
1117 	       NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1118 
1119 	/* Now that the legacy PRD and padding buffer are allocated we can
1120 	   safely raise the DMA mask to allocate the CPB/APRD table.
1121 	   These are allowed to fail since we store the value that ends up
1122 	   being used to set as the bounce limit in slave_config later if
1123 	   needed. */
1124 	pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1125 	pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1126 	pp->adma_dma_mask = *dev->dma_mask;
1127 
1128 	mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1129 				  &mem_dma, GFP_KERNEL);
1130 	if (!mem)
1131 		return -ENOMEM;
1132 	memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1133 
1134 	/*
1135 	 * First item in chunk of DMA memory:
1136 	 * 128-byte command parameter block (CPB)
1137 	 * one for each command tag
1138 	 */
1139 	pp->cpb     = mem;
1140 	pp->cpb_dma = mem_dma;
1141 
1142 	writel(mem_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
1143 	writel((mem_dma >> 16) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
1144 
1145 	mem     += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1146 	mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1147 
1148 	/*
1149 	 * Second item: block of ADMA_SGTBL_LEN s/g entries
1150 	 */
1151 	pp->aprd = mem;
1152 	pp->aprd_dma = mem_dma;
1153 
1154 	ap->private_data = pp;
1155 
1156 	/* clear any outstanding interrupt conditions */
1157 	writew(0xffff, mmio + NV_ADMA_STAT);
1158 
1159 	/* initialize port variables */
1160 	pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1161 
1162 	/* clear CPB fetch count */
1163 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1164 
1165 	/* clear GO for register mode, enable interrupt */
1166 	tmp = readw(mmio + NV_ADMA_CTL);
1167 	writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1168 		NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1169 
1170 	tmp = readw(mmio + NV_ADMA_CTL);
1171 	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1172 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1173 	udelay(1);
1174 	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1175 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1176 
1177 	return 0;
1178 }
1179 
1180 static void nv_adma_port_stop(struct ata_port *ap)
1181 {
1182 	struct nv_adma_port_priv *pp = ap->private_data;
1183 	void __iomem *mmio = pp->ctl_block;
1184 
1185 	VPRINTK("ENTER\n");
1186 	writew(0, mmio + NV_ADMA_CTL);
1187 }
1188 
1189 #ifdef CONFIG_PM
1190 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1191 {
1192 	struct nv_adma_port_priv *pp = ap->private_data;
1193 	void __iomem *mmio = pp->ctl_block;
1194 
1195 	/* Go to register mode - clears GO */
1196 	nv_adma_register_mode(ap);
1197 
1198 	/* clear CPB fetch count */
1199 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1200 
1201 	/* disable interrupt, shut down port */
1202 	writew(0, mmio + NV_ADMA_CTL);
1203 
1204 	return 0;
1205 }
1206 
1207 static int nv_adma_port_resume(struct ata_port *ap)
1208 {
1209 	struct nv_adma_port_priv *pp = ap->private_data;
1210 	void __iomem *mmio = pp->ctl_block;
1211 	u16 tmp;
1212 
1213 	/* set CPB block location */
1214 	writel(pp->cpb_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
1215 	writel((pp->cpb_dma >> 16) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
1216 
1217 	/* clear any outstanding interrupt conditions */
1218 	writew(0xffff, mmio + NV_ADMA_STAT);
1219 
1220 	/* initialize port variables */
1221 	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1222 
1223 	/* clear CPB fetch count */
1224 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1225 
1226 	/* clear GO for register mode, enable interrupt */
1227 	tmp = readw(mmio + NV_ADMA_CTL);
1228 	writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1229 		NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1230 
1231 	tmp = readw(mmio + NV_ADMA_CTL);
1232 	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1233 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1234 	udelay(1);
1235 	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1236 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1237 
1238 	return 0;
1239 }
1240 #endif
1241 
1242 static void nv_adma_setup_port(struct ata_port *ap)
1243 {
1244 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1245 	struct ata_ioports *ioport = &ap->ioaddr;
1246 
1247 	VPRINTK("ENTER\n");
1248 
1249 	mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1250 
1251 	ioport->cmd_addr	= mmio;
1252 	ioport->data_addr	= mmio + (ATA_REG_DATA * 4);
1253 	ioport->error_addr	=
1254 	ioport->feature_addr	= mmio + (ATA_REG_ERR * 4);
1255 	ioport->nsect_addr	= mmio + (ATA_REG_NSECT * 4);
1256 	ioport->lbal_addr	= mmio + (ATA_REG_LBAL * 4);
1257 	ioport->lbam_addr	= mmio + (ATA_REG_LBAM * 4);
1258 	ioport->lbah_addr	= mmio + (ATA_REG_LBAH * 4);
1259 	ioport->device_addr	= mmio + (ATA_REG_DEVICE * 4);
1260 	ioport->status_addr	=
1261 	ioport->command_addr	= mmio + (ATA_REG_STATUS * 4);
1262 	ioport->altstatus_addr	=
1263 	ioport->ctl_addr	= mmio + 0x20;
1264 }
1265 
1266 static int nv_adma_host_init(struct ata_host *host)
1267 {
1268 	struct pci_dev *pdev = to_pci_dev(host->dev);
1269 	unsigned int i;
1270 	u32 tmp32;
1271 
1272 	VPRINTK("ENTER\n");
1273 
1274 	/* enable ADMA on the ports */
1275 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1276 	tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1277 		 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1278 		 NV_MCP_SATA_CFG_20_PORT1_EN |
1279 		 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1280 
1281 	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1282 
1283 	for (i = 0; i < host->n_ports; i++)
1284 		nv_adma_setup_port(host->ports[i]);
1285 
1286 	return 0;
1287 }
1288 
1289 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1290 			      struct scatterlist *sg,
1291 			      int idx,
1292 			      struct nv_adma_prd *aprd)
1293 {
1294 	u8 flags = 0;
1295 	if (qc->tf.flags & ATA_TFLAG_WRITE)
1296 		flags |= NV_APRD_WRITE;
1297 	if (idx == qc->n_elem - 1)
1298 		flags |= NV_APRD_END;
1299 	else if (idx != 4)
1300 		flags |= NV_APRD_CONT;
1301 
1302 	aprd->addr  = cpu_to_le64(((u64)sg_dma_address(sg)));
1303 	aprd->len   = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1304 	aprd->flags = flags;
1305 	aprd->packet_len = 0;
1306 }
1307 
1308 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1309 {
1310 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1311 	struct nv_adma_prd *aprd;
1312 	struct scatterlist *sg;
1313 	unsigned int si;
1314 
1315 	VPRINTK("ENTER\n");
1316 
1317 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
1318 		aprd = (si < 5) ? &cpb->aprd[si] :
1319 			       &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)];
1320 		nv_adma_fill_aprd(qc, sg, si, aprd);
1321 	}
1322 	if (si > 5)
1323 		cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1324 	else
1325 		cpb->next_aprd = cpu_to_le64(0);
1326 }
1327 
1328 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1329 {
1330 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1331 
1332 	/* ADMA engine can only be used for non-ATAPI DMA commands,
1333 	   or interrupt-driven no-data commands. */
1334 	if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1335 	   (qc->tf.flags & ATA_TFLAG_POLLING))
1336 		return 1;
1337 
1338 	if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1339 	   (qc->tf.protocol == ATA_PROT_NODATA))
1340 		return 0;
1341 
1342 	return 1;
1343 }
1344 
1345 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1346 {
1347 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1348 	struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1349 	u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1350 		       NV_CPB_CTL_IEN;
1351 
1352 	if (nv_adma_use_reg_mode(qc)) {
1353 		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1354 			(qc->flags & ATA_QCFLAG_DMAMAP));
1355 		nv_adma_register_mode(qc->ap);
1356 		ata_sff_qc_prep(qc);
1357 		return;
1358 	}
1359 
1360 	cpb->resp_flags = NV_CPB_RESP_DONE;
1361 	wmb();
1362 	cpb->ctl_flags = 0;
1363 	wmb();
1364 
1365 	cpb->len		= 3;
1366 	cpb->tag		= qc->tag;
1367 	cpb->next_cpb_idx	= 0;
1368 
1369 	/* turn on NCQ flags for NCQ commands */
1370 	if (qc->tf.protocol == ATA_PROT_NCQ)
1371 		ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1372 
1373 	VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1374 
1375 	nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1376 
1377 	if (qc->flags & ATA_QCFLAG_DMAMAP) {
1378 		nv_adma_fill_sg(qc, cpb);
1379 		ctl_flags |= NV_CPB_CTL_APRD_VALID;
1380 	} else
1381 		memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1382 
1383 	/* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1384 	   until we are finished filling in all of the contents */
1385 	wmb();
1386 	cpb->ctl_flags = ctl_flags;
1387 	wmb();
1388 	cpb->resp_flags = 0;
1389 }
1390 
1391 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1392 {
1393 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1394 	void __iomem *mmio = pp->ctl_block;
1395 	int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1396 
1397 	VPRINTK("ENTER\n");
1398 
1399 	/* We can't handle result taskfile with NCQ commands, since
1400 	   retrieving the taskfile switches us out of ADMA mode and would abort
1401 	   existing commands. */
1402 	if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1403 		     (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1404 		ata_dev_printk(qc->dev, KERN_ERR,
1405 			"NCQ w/ RESULT_TF not allowed\n");
1406 		return AC_ERR_SYSTEM;
1407 	}
1408 
1409 	if (nv_adma_use_reg_mode(qc)) {
1410 		/* use ATA register mode */
1411 		VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1412 		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1413 			(qc->flags & ATA_QCFLAG_DMAMAP));
1414 		nv_adma_register_mode(qc->ap);
1415 		return ata_sff_qc_issue(qc);
1416 	} else
1417 		nv_adma_mode(qc->ap);
1418 
1419 	/* write append register, command tag in lower 8 bits
1420 	   and (number of cpbs to append -1) in top 8 bits */
1421 	wmb();
1422 
1423 	if (curr_ncq != pp->last_issue_ncq) {
1424 		/* Seems to need some delay before switching between NCQ and
1425 		   non-NCQ commands, else we get command timeouts and such. */
1426 		udelay(20);
1427 		pp->last_issue_ncq = curr_ncq;
1428 	}
1429 
1430 	writew(qc->tag, mmio + NV_ADMA_APPEND);
1431 
1432 	DPRINTK("Issued tag %u\n", qc->tag);
1433 
1434 	return 0;
1435 }
1436 
1437 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1438 {
1439 	struct ata_host *host = dev_instance;
1440 	unsigned int i;
1441 	unsigned int handled = 0;
1442 	unsigned long flags;
1443 
1444 	spin_lock_irqsave(&host->lock, flags);
1445 
1446 	for (i = 0; i < host->n_ports; i++) {
1447 		struct ata_port *ap;
1448 
1449 		ap = host->ports[i];
1450 		if (ap &&
1451 		    !(ap->flags & ATA_FLAG_DISABLED)) {
1452 			struct ata_queued_cmd *qc;
1453 
1454 			qc = ata_qc_from_tag(ap, ap->link.active_tag);
1455 			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1456 				handled += ata_sff_host_intr(ap, qc);
1457 			else
1458 				// No request pending?  Clear interrupt status
1459 				// anyway, in case there's one pending.
1460 				ap->ops->sff_check_status(ap);
1461 		}
1462 
1463 	}
1464 
1465 	spin_unlock_irqrestore(&host->lock, flags);
1466 
1467 	return IRQ_RETVAL(handled);
1468 }
1469 
1470 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1471 {
1472 	int i, handled = 0;
1473 
1474 	for (i = 0; i < host->n_ports; i++) {
1475 		struct ata_port *ap = host->ports[i];
1476 
1477 		if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1478 			handled += nv_host_intr(ap, irq_stat);
1479 
1480 		irq_stat >>= NV_INT_PORT_SHIFT;
1481 	}
1482 
1483 	return IRQ_RETVAL(handled);
1484 }
1485 
1486 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1487 {
1488 	struct ata_host *host = dev_instance;
1489 	u8 irq_stat;
1490 	irqreturn_t ret;
1491 
1492 	spin_lock(&host->lock);
1493 	irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1494 	ret = nv_do_interrupt(host, irq_stat);
1495 	spin_unlock(&host->lock);
1496 
1497 	return ret;
1498 }
1499 
1500 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1501 {
1502 	struct ata_host *host = dev_instance;
1503 	u8 irq_stat;
1504 	irqreturn_t ret;
1505 
1506 	spin_lock(&host->lock);
1507 	irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1508 	ret = nv_do_interrupt(host, irq_stat);
1509 	spin_unlock(&host->lock);
1510 
1511 	return ret;
1512 }
1513 
1514 static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1515 {
1516 	if (sc_reg > SCR_CONTROL)
1517 		return -EINVAL;
1518 
1519 	*val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
1520 	return 0;
1521 }
1522 
1523 static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1524 {
1525 	if (sc_reg > SCR_CONTROL)
1526 		return -EINVAL;
1527 
1528 	iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
1529 	return 0;
1530 }
1531 
1532 static void nv_nf2_freeze(struct ata_port *ap)
1533 {
1534 	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1535 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1536 	u8 mask;
1537 
1538 	mask = ioread8(scr_addr + NV_INT_ENABLE);
1539 	mask &= ~(NV_INT_ALL << shift);
1540 	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1541 }
1542 
1543 static void nv_nf2_thaw(struct ata_port *ap)
1544 {
1545 	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1546 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1547 	u8 mask;
1548 
1549 	iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1550 
1551 	mask = ioread8(scr_addr + NV_INT_ENABLE);
1552 	mask |= (NV_INT_MASK << shift);
1553 	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1554 }
1555 
1556 static void nv_ck804_freeze(struct ata_port *ap)
1557 {
1558 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1559 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1560 	u8 mask;
1561 
1562 	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1563 	mask &= ~(NV_INT_ALL << shift);
1564 	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1565 }
1566 
1567 static void nv_ck804_thaw(struct ata_port *ap)
1568 {
1569 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1570 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1571 	u8 mask;
1572 
1573 	writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1574 
1575 	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1576 	mask |= (NV_INT_MASK << shift);
1577 	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1578 }
1579 
1580 static void nv_mcp55_freeze(struct ata_port *ap)
1581 {
1582 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1583 	int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1584 	u32 mask;
1585 
1586 	writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1587 
1588 	mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1589 	mask &= ~(NV_INT_ALL_MCP55 << shift);
1590 	writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1591 	ata_sff_freeze(ap);
1592 }
1593 
1594 static void nv_mcp55_thaw(struct ata_port *ap)
1595 {
1596 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1597 	int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1598 	u32 mask;
1599 
1600 	writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1601 
1602 	mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1603 	mask |= (NV_INT_MASK_MCP55 << shift);
1604 	writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1605 	ata_sff_thaw(ap);
1606 }
1607 
1608 static int nv_hardreset(struct ata_link *link, unsigned int *class,
1609 			unsigned long deadline)
1610 {
1611 	int rc;
1612 
1613 	/* SATA hardreset fails to retrieve proper device signature on
1614 	 * some controllers.  Request follow up SRST.  For more info,
1615 	 * see http://bugzilla.kernel.org/show_bug.cgi?id=3352
1616 	 */
1617 	rc = sata_sff_hardreset(link, class, deadline);
1618 	if (rc)
1619 		return rc;
1620 	return -EAGAIN;
1621 }
1622 
1623 static void nv_adma_error_handler(struct ata_port *ap)
1624 {
1625 	struct nv_adma_port_priv *pp = ap->private_data;
1626 	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1627 		void __iomem *mmio = pp->ctl_block;
1628 		int i;
1629 		u16 tmp;
1630 
1631 		if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1632 			u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1633 			u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1634 			u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1635 			u32 status = readw(mmio + NV_ADMA_STAT);
1636 			u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1637 			u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1638 
1639 			ata_port_printk(ap, KERN_ERR,
1640 				"EH in ADMA mode, notifier 0x%X "
1641 				"notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1642 				"next cpb count 0x%X next cpb idx 0x%x\n",
1643 				notifier, notifier_error, gen_ctl, status,
1644 				cpb_count, next_cpb_idx);
1645 
1646 			for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
1647 				struct nv_adma_cpb *cpb = &pp->cpb[i];
1648 				if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1649 				    ap->link.sactive & (1 << i))
1650 					ata_port_printk(ap, KERN_ERR,
1651 						"CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1652 						i, cpb->ctl_flags, cpb->resp_flags);
1653 			}
1654 		}
1655 
1656 		/* Push us back into port register mode for error handling. */
1657 		nv_adma_register_mode(ap);
1658 
1659 		/* Mark all of the CPBs as invalid to prevent them from
1660 		   being executed */
1661 		for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1662 			pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1663 
1664 		/* clear CPB fetch count */
1665 		writew(0, mmio + NV_ADMA_CPB_COUNT);
1666 
1667 		/* Reset channel */
1668 		tmp = readw(mmio + NV_ADMA_CTL);
1669 		writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1670 		readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1671 		udelay(1);
1672 		writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1673 		readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1674 	}
1675 
1676 	ata_sff_error_handler(ap);
1677 }
1678 
1679 static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1680 {
1681 	struct nv_swncq_port_priv *pp = ap->private_data;
1682 	struct defer_queue *dq = &pp->defer_queue;
1683 
1684 	/* queue is full */
1685 	WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1686 	dq->defer_bits |= (1 << qc->tag);
1687 	dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
1688 }
1689 
1690 static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1691 {
1692 	struct nv_swncq_port_priv *pp = ap->private_data;
1693 	struct defer_queue *dq = &pp->defer_queue;
1694 	unsigned int tag;
1695 
1696 	if (dq->head == dq->tail)	/* null queue */
1697 		return NULL;
1698 
1699 	tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1700 	dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1701 	WARN_ON(!(dq->defer_bits & (1 << tag)));
1702 	dq->defer_bits &= ~(1 << tag);
1703 
1704 	return ata_qc_from_tag(ap, tag);
1705 }
1706 
1707 static void nv_swncq_fis_reinit(struct ata_port *ap)
1708 {
1709 	struct nv_swncq_port_priv *pp = ap->private_data;
1710 
1711 	pp->dhfis_bits = 0;
1712 	pp->dmafis_bits = 0;
1713 	pp->sdbfis_bits = 0;
1714 	pp->ncq_flags = 0;
1715 }
1716 
1717 static void nv_swncq_pp_reinit(struct ata_port *ap)
1718 {
1719 	struct nv_swncq_port_priv *pp = ap->private_data;
1720 	struct defer_queue *dq = &pp->defer_queue;
1721 
1722 	dq->head = 0;
1723 	dq->tail = 0;
1724 	dq->defer_bits = 0;
1725 	pp->qc_active = 0;
1726 	pp->last_issue_tag = ATA_TAG_POISON;
1727 	nv_swncq_fis_reinit(ap);
1728 }
1729 
1730 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1731 {
1732 	struct nv_swncq_port_priv *pp = ap->private_data;
1733 
1734 	writew(fis, pp->irq_block);
1735 }
1736 
1737 static void __ata_bmdma_stop(struct ata_port *ap)
1738 {
1739 	struct ata_queued_cmd qc;
1740 
1741 	qc.ap = ap;
1742 	ata_bmdma_stop(&qc);
1743 }
1744 
1745 static void nv_swncq_ncq_stop(struct ata_port *ap)
1746 {
1747 	struct nv_swncq_port_priv *pp = ap->private_data;
1748 	unsigned int i;
1749 	u32 sactive;
1750 	u32 done_mask;
1751 
1752 	ata_port_printk(ap, KERN_ERR,
1753 			"EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1754 			ap->qc_active, ap->link.sactive);
1755 	ata_port_printk(ap, KERN_ERR,
1756 		"SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n  "
1757 		"dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1758 		pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1759 		pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1760 
1761 	ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n",
1762 			ap->ops->sff_check_status(ap),
1763 			ioread8(ap->ioaddr.error_addr));
1764 
1765 	sactive = readl(pp->sactive_block);
1766 	done_mask = pp->qc_active ^ sactive;
1767 
1768 	ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n");
1769 	for (i = 0; i < ATA_MAX_QUEUE; i++) {
1770 		u8 err = 0;
1771 		if (pp->qc_active & (1 << i))
1772 			err = 0;
1773 		else if (done_mask & (1 << i))
1774 			err = 1;
1775 		else
1776 			continue;
1777 
1778 		ata_port_printk(ap, KERN_ERR,
1779 				"tag 0x%x: %01x %01x %01x %01x %s\n", i,
1780 				(pp->dhfis_bits >> i) & 0x1,
1781 				(pp->dmafis_bits >> i) & 0x1,
1782 				(pp->sdbfis_bits >> i) & 0x1,
1783 				(sactive >> i) & 0x1,
1784 				(err ? "error! tag doesn't exit" : " "));
1785 	}
1786 
1787 	nv_swncq_pp_reinit(ap);
1788 	ap->ops->sff_irq_clear(ap);
1789 	__ata_bmdma_stop(ap);
1790 	nv_swncq_irq_clear(ap, 0xffff);
1791 }
1792 
1793 static void nv_swncq_error_handler(struct ata_port *ap)
1794 {
1795 	struct ata_eh_context *ehc = &ap->link.eh_context;
1796 
1797 	if (ap->link.sactive) {
1798 		nv_swncq_ncq_stop(ap);
1799 		ehc->i.action |= ATA_EH_RESET;
1800 	}
1801 
1802 	ata_sff_error_handler(ap);
1803 }
1804 
1805 #ifdef CONFIG_PM
1806 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1807 {
1808 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1809 	u32 tmp;
1810 
1811 	/* clear irq */
1812 	writel(~0, mmio + NV_INT_STATUS_MCP55);
1813 
1814 	/* disable irq */
1815 	writel(0, mmio + NV_INT_ENABLE_MCP55);
1816 
1817 	/* disable swncq */
1818 	tmp = readl(mmio + NV_CTL_MCP55);
1819 	tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1820 	writel(tmp, mmio + NV_CTL_MCP55);
1821 
1822 	return 0;
1823 }
1824 
1825 static int nv_swncq_port_resume(struct ata_port *ap)
1826 {
1827 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1828 	u32 tmp;
1829 
1830 	/* clear irq */
1831 	writel(~0, mmio + NV_INT_STATUS_MCP55);
1832 
1833 	/* enable irq */
1834 	writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1835 
1836 	/* enable swncq */
1837 	tmp = readl(mmio + NV_CTL_MCP55);
1838 	writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1839 
1840 	return 0;
1841 }
1842 #endif
1843 
1844 static void nv_swncq_host_init(struct ata_host *host)
1845 {
1846 	u32 tmp;
1847 	void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1848 	struct pci_dev *pdev = to_pci_dev(host->dev);
1849 	u8 regval;
1850 
1851 	/* disable  ECO 398 */
1852 	pci_read_config_byte(pdev, 0x7f, &regval);
1853 	regval &= ~(1 << 7);
1854 	pci_write_config_byte(pdev, 0x7f, regval);
1855 
1856 	/* enable swncq */
1857 	tmp = readl(mmio + NV_CTL_MCP55);
1858 	VPRINTK("HOST_CTL:0x%X\n", tmp);
1859 	writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1860 
1861 	/* enable irq intr */
1862 	tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1863 	VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1864 	writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1865 
1866 	/*  clear port irq */
1867 	writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1868 }
1869 
1870 static int nv_swncq_slave_config(struct scsi_device *sdev)
1871 {
1872 	struct ata_port *ap = ata_shost_to_port(sdev->host);
1873 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1874 	struct ata_device *dev;
1875 	int rc;
1876 	u8 rev;
1877 	u8 check_maxtor = 0;
1878 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
1879 
1880 	rc = ata_scsi_slave_config(sdev);
1881 	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1882 		/* Not a proper libata device, ignore */
1883 		return rc;
1884 
1885 	dev = &ap->link.device[sdev->id];
1886 	if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1887 		return rc;
1888 
1889 	/* if MCP51 and Maxtor, then disable ncq */
1890 	if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1891 		pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1892 		check_maxtor = 1;
1893 
1894 	/* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1895 	if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1896 		pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1897 		pci_read_config_byte(pdev, 0x8, &rev);
1898 		if (rev <= 0xa2)
1899 			check_maxtor = 1;
1900 	}
1901 
1902 	if (!check_maxtor)
1903 		return rc;
1904 
1905 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1906 
1907 	if (strncmp(model_num, "Maxtor", 6) == 0) {
1908 		ata_scsi_change_queue_depth(sdev, 1);
1909 		ata_dev_printk(dev, KERN_NOTICE,
1910 			"Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth);
1911 	}
1912 
1913 	return rc;
1914 }
1915 
1916 static int nv_swncq_port_start(struct ata_port *ap)
1917 {
1918 	struct device *dev = ap->host->dev;
1919 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1920 	struct nv_swncq_port_priv *pp;
1921 	int rc;
1922 
1923 	rc = ata_port_start(ap);
1924 	if (rc)
1925 		return rc;
1926 
1927 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1928 	if (!pp)
1929 		return -ENOMEM;
1930 
1931 	pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1932 				      &pp->prd_dma, GFP_KERNEL);
1933 	if (!pp->prd)
1934 		return -ENOMEM;
1935 	memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
1936 
1937 	ap->private_data = pp;
1938 	pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1939 	pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1940 	pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1941 
1942 	return 0;
1943 }
1944 
1945 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1946 {
1947 	if (qc->tf.protocol != ATA_PROT_NCQ) {
1948 		ata_sff_qc_prep(qc);
1949 		return;
1950 	}
1951 
1952 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1953 		return;
1954 
1955 	nv_swncq_fill_sg(qc);
1956 }
1957 
1958 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1959 {
1960 	struct ata_port *ap = qc->ap;
1961 	struct scatterlist *sg;
1962 	struct nv_swncq_port_priv *pp = ap->private_data;
1963 	struct ata_prd *prd;
1964 	unsigned int si, idx;
1965 
1966 	prd = pp->prd + ATA_MAX_PRD * qc->tag;
1967 
1968 	idx = 0;
1969 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
1970 		u32 addr, offset;
1971 		u32 sg_len, len;
1972 
1973 		addr = (u32)sg_dma_address(sg);
1974 		sg_len = sg_dma_len(sg);
1975 
1976 		while (sg_len) {
1977 			offset = addr & 0xffff;
1978 			len = sg_len;
1979 			if ((offset + sg_len) > 0x10000)
1980 				len = 0x10000 - offset;
1981 
1982 			prd[idx].addr = cpu_to_le32(addr);
1983 			prd[idx].flags_len = cpu_to_le32(len & 0xffff);
1984 
1985 			idx++;
1986 			sg_len -= len;
1987 			addr += len;
1988 		}
1989 	}
1990 
1991 	prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
1992 }
1993 
1994 static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
1995 					  struct ata_queued_cmd *qc)
1996 {
1997 	struct nv_swncq_port_priv *pp = ap->private_data;
1998 
1999 	if (qc == NULL)
2000 		return 0;
2001 
2002 	DPRINTK("Enter\n");
2003 
2004 	writel((1 << qc->tag), pp->sactive_block);
2005 	pp->last_issue_tag = qc->tag;
2006 	pp->dhfis_bits &= ~(1 << qc->tag);
2007 	pp->dmafis_bits &= ~(1 << qc->tag);
2008 	pp->qc_active |= (0x1 << qc->tag);
2009 
2010 	ap->ops->sff_tf_load(ap, &qc->tf);	 /* load tf registers */
2011 	ap->ops->sff_exec_command(ap, &qc->tf);
2012 
2013 	DPRINTK("Issued tag %u\n", qc->tag);
2014 
2015 	return 0;
2016 }
2017 
2018 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2019 {
2020 	struct ata_port *ap = qc->ap;
2021 	struct nv_swncq_port_priv *pp = ap->private_data;
2022 
2023 	if (qc->tf.protocol != ATA_PROT_NCQ)
2024 		return ata_sff_qc_issue(qc);
2025 
2026 	DPRINTK("Enter\n");
2027 
2028 	if (!pp->qc_active)
2029 		nv_swncq_issue_atacmd(ap, qc);
2030 	else
2031 		nv_swncq_qc_to_dq(ap, qc);	/* add qc to defer queue */
2032 
2033 	return 0;
2034 }
2035 
2036 static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2037 {
2038 	u32 serror;
2039 	struct ata_eh_info *ehi = &ap->link.eh_info;
2040 
2041 	ata_ehi_clear_desc(ehi);
2042 
2043 	/* AHCI needs SError cleared; otherwise, it might lock up */
2044 	sata_scr_read(&ap->link, SCR_ERROR, &serror);
2045 	sata_scr_write(&ap->link, SCR_ERROR, serror);
2046 
2047 	/* analyze @irq_stat */
2048 	if (fis & NV_SWNCQ_IRQ_ADDED)
2049 		ata_ehi_push_desc(ehi, "hot plug");
2050 	else if (fis & NV_SWNCQ_IRQ_REMOVED)
2051 		ata_ehi_push_desc(ehi, "hot unplug");
2052 
2053 	ata_ehi_hotplugged(ehi);
2054 
2055 	/* okay, let's hand over to EH */
2056 	ehi->serror |= serror;
2057 
2058 	ata_port_freeze(ap);
2059 }
2060 
2061 static int nv_swncq_sdbfis(struct ata_port *ap)
2062 {
2063 	struct ata_queued_cmd *qc;
2064 	struct nv_swncq_port_priv *pp = ap->private_data;
2065 	struct ata_eh_info *ehi = &ap->link.eh_info;
2066 	u32 sactive;
2067 	int nr_done = 0;
2068 	u32 done_mask;
2069 	int i;
2070 	u8 host_stat;
2071 	u8 lack_dhfis = 0;
2072 
2073 	host_stat = ap->ops->bmdma_status(ap);
2074 	if (unlikely(host_stat & ATA_DMA_ERR)) {
2075 		/* error when transfering data to/from memory */
2076 		ata_ehi_clear_desc(ehi);
2077 		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2078 		ehi->err_mask |= AC_ERR_HOST_BUS;
2079 		ehi->action |= ATA_EH_RESET;
2080 		return -EINVAL;
2081 	}
2082 
2083 	ap->ops->sff_irq_clear(ap);
2084 	__ata_bmdma_stop(ap);
2085 
2086 	sactive = readl(pp->sactive_block);
2087 	done_mask = pp->qc_active ^ sactive;
2088 
2089 	if (unlikely(done_mask & sactive)) {
2090 		ata_ehi_clear_desc(ehi);
2091 		ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition"
2092 				  "(%08x->%08x)", pp->qc_active, sactive);
2093 		ehi->err_mask |= AC_ERR_HSM;
2094 		ehi->action |= ATA_EH_RESET;
2095 		return -EINVAL;
2096 	}
2097 	for (i = 0; i < ATA_MAX_QUEUE; i++) {
2098 		if (!(done_mask & (1 << i)))
2099 			continue;
2100 
2101 		qc = ata_qc_from_tag(ap, i);
2102 		if (qc) {
2103 			ata_qc_complete(qc);
2104 			pp->qc_active &= ~(1 << i);
2105 			pp->dhfis_bits &= ~(1 << i);
2106 			pp->dmafis_bits &= ~(1 << i);
2107 			pp->sdbfis_bits |= (1 << i);
2108 			nr_done++;
2109 		}
2110 	}
2111 
2112 	if (!ap->qc_active) {
2113 		DPRINTK("over\n");
2114 		nv_swncq_pp_reinit(ap);
2115 		return nr_done;
2116 	}
2117 
2118 	if (pp->qc_active & pp->dhfis_bits)
2119 		return nr_done;
2120 
2121 	if ((pp->ncq_flags & ncq_saw_backout) ||
2122 	    (pp->qc_active ^ pp->dhfis_bits))
2123 		/* if the controller cann't get a device to host register FIS,
2124 		 * The driver needs to reissue the new command.
2125 		 */
2126 		lack_dhfis = 1;
2127 
2128 	DPRINTK("id 0x%x QC: qc_active 0x%x,"
2129 		"SWNCQ:qc_active 0x%X defer_bits %X "
2130 		"dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2131 		ap->print_id, ap->qc_active, pp->qc_active,
2132 		pp->defer_queue.defer_bits, pp->dhfis_bits,
2133 		pp->dmafis_bits, pp->last_issue_tag);
2134 
2135 	nv_swncq_fis_reinit(ap);
2136 
2137 	if (lack_dhfis) {
2138 		qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2139 		nv_swncq_issue_atacmd(ap, qc);
2140 		return nr_done;
2141 	}
2142 
2143 	if (pp->defer_queue.defer_bits) {
2144 		/* send deferral queue command */
2145 		qc = nv_swncq_qc_from_dq(ap);
2146 		WARN_ON(qc == NULL);
2147 		nv_swncq_issue_atacmd(ap, qc);
2148 	}
2149 
2150 	return nr_done;
2151 }
2152 
2153 static inline u32 nv_swncq_tag(struct ata_port *ap)
2154 {
2155 	struct nv_swncq_port_priv *pp = ap->private_data;
2156 	u32 tag;
2157 
2158 	tag = readb(pp->tag_block) >> 2;
2159 	return (tag & 0x1f);
2160 }
2161 
2162 static int nv_swncq_dmafis(struct ata_port *ap)
2163 {
2164 	struct ata_queued_cmd *qc;
2165 	unsigned int rw;
2166 	u8 dmactl;
2167 	u32 tag;
2168 	struct nv_swncq_port_priv *pp = ap->private_data;
2169 
2170 	__ata_bmdma_stop(ap);
2171 	tag = nv_swncq_tag(ap);
2172 
2173 	DPRINTK("dma setup tag 0x%x\n", tag);
2174 	qc = ata_qc_from_tag(ap, tag);
2175 
2176 	if (unlikely(!qc))
2177 		return 0;
2178 
2179 	rw = qc->tf.flags & ATA_TFLAG_WRITE;
2180 
2181 	/* load PRD table addr. */
2182 	iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
2183 		  ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2184 
2185 	/* specify data direction, triple-check start bit is clear */
2186 	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2187 	dmactl &= ~ATA_DMA_WR;
2188 	if (!rw)
2189 		dmactl |= ATA_DMA_WR;
2190 
2191 	iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2192 
2193 	return 1;
2194 }
2195 
2196 static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2197 {
2198 	struct nv_swncq_port_priv *pp = ap->private_data;
2199 	struct ata_queued_cmd *qc;
2200 	struct ata_eh_info *ehi = &ap->link.eh_info;
2201 	u32 serror;
2202 	u8 ata_stat;
2203 	int rc = 0;
2204 
2205 	ata_stat = ap->ops->sff_check_status(ap);
2206 	nv_swncq_irq_clear(ap, fis);
2207 	if (!fis)
2208 		return;
2209 
2210 	if (ap->pflags & ATA_PFLAG_FROZEN)
2211 		return;
2212 
2213 	if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2214 		nv_swncq_hotplug(ap, fis);
2215 		return;
2216 	}
2217 
2218 	if (!pp->qc_active)
2219 		return;
2220 
2221 	if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
2222 		return;
2223 	ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
2224 
2225 	if (ata_stat & ATA_ERR) {
2226 		ata_ehi_clear_desc(ehi);
2227 		ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2228 		ehi->err_mask |= AC_ERR_DEV;
2229 		ehi->serror |= serror;
2230 		ehi->action |= ATA_EH_RESET;
2231 		ata_port_freeze(ap);
2232 		return;
2233 	}
2234 
2235 	if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2236 		/* If the IRQ is backout, driver must issue
2237 		 * the new command again some time later.
2238 		 */
2239 		pp->ncq_flags |= ncq_saw_backout;
2240 	}
2241 
2242 	if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2243 		pp->ncq_flags |= ncq_saw_sdb;
2244 		DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2245 			"dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2246 			ap->print_id, pp->qc_active, pp->dhfis_bits,
2247 			pp->dmafis_bits, readl(pp->sactive_block));
2248 		rc = nv_swncq_sdbfis(ap);
2249 		if (rc < 0)
2250 			goto irq_error;
2251 	}
2252 
2253 	if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2254 		/* The interrupt indicates the new command
2255 		 * was transmitted correctly to the drive.
2256 		 */
2257 		pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2258 		pp->ncq_flags |= ncq_saw_d2h;
2259 		if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2260 			ata_ehi_push_desc(ehi, "illegal fis transaction");
2261 			ehi->err_mask |= AC_ERR_HSM;
2262 			ehi->action |= ATA_EH_RESET;
2263 			goto irq_error;
2264 		}
2265 
2266 		if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2267 		    !(pp->ncq_flags & ncq_saw_dmas)) {
2268 			ata_stat = ap->ops->sff_check_status(ap);
2269 			if (ata_stat & ATA_BUSY)
2270 				goto irq_exit;
2271 
2272 			if (pp->defer_queue.defer_bits) {
2273 				DPRINTK("send next command\n");
2274 				qc = nv_swncq_qc_from_dq(ap);
2275 				nv_swncq_issue_atacmd(ap, qc);
2276 			}
2277 		}
2278 	}
2279 
2280 	if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2281 		/* program the dma controller with appropriate PRD buffers
2282 		 * and start the DMA transfer for requested command.
2283 		 */
2284 		pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2285 		pp->ncq_flags |= ncq_saw_dmas;
2286 		rc = nv_swncq_dmafis(ap);
2287 	}
2288 
2289 irq_exit:
2290 	return;
2291 irq_error:
2292 	ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2293 	ata_port_freeze(ap);
2294 	return;
2295 }
2296 
2297 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2298 {
2299 	struct ata_host *host = dev_instance;
2300 	unsigned int i;
2301 	unsigned int handled = 0;
2302 	unsigned long flags;
2303 	u32 irq_stat;
2304 
2305 	spin_lock_irqsave(&host->lock, flags);
2306 
2307 	irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2308 
2309 	for (i = 0; i < host->n_ports; i++) {
2310 		struct ata_port *ap = host->ports[i];
2311 
2312 		if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
2313 			if (ap->link.sactive) {
2314 				nv_swncq_host_interrupt(ap, (u16)irq_stat);
2315 				handled = 1;
2316 			} else {
2317 				if (irq_stat)	/* reserve Hotplug */
2318 					nv_swncq_irq_clear(ap, 0xfff0);
2319 
2320 				handled += nv_host_intr(ap, (u8)irq_stat);
2321 			}
2322 		}
2323 		irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2324 	}
2325 
2326 	spin_unlock_irqrestore(&host->lock, flags);
2327 
2328 	return IRQ_RETVAL(handled);
2329 }
2330 
2331 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2332 {
2333 	static int printed_version;
2334 	const struct ata_port_info *ppi[] = { NULL, NULL };
2335 	struct nv_pi_priv *ipriv;
2336 	struct ata_host *host;
2337 	struct nv_host_priv *hpriv;
2338 	int rc;
2339 	u32 bar;
2340 	void __iomem *base;
2341 	unsigned long type = ent->driver_data;
2342 
2343         // Make sure this is a SATA controller by counting the number of bars
2344         // (NVIDIA SATA controllers will always have six bars).  Otherwise,
2345         // it's an IDE controller and we ignore it.
2346 	for (bar = 0; bar < 6; bar++)
2347 		if (pci_resource_start(pdev, bar) == 0)
2348 			return -ENODEV;
2349 
2350 	if (!printed_version++)
2351 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
2352 
2353 	rc = pcim_enable_device(pdev);
2354 	if (rc)
2355 		return rc;
2356 
2357 	/* determine type and allocate host */
2358 	if (type == CK804 && adma_enabled) {
2359 		dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
2360 		type = ADMA;
2361 	}
2362 
2363 	if (type == SWNCQ) {
2364 		if (swncq_enabled)
2365 			dev_printk(KERN_NOTICE, &pdev->dev,
2366 				   "Using SWNCQ mode\n");
2367 		else
2368 			type = GENERIC;
2369 	}
2370 
2371 	ppi[0] = &nv_port_info[type];
2372 	ipriv = ppi[0]->private_data;
2373 	rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
2374 	if (rc)
2375 		return rc;
2376 
2377 	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2378 	if (!hpriv)
2379 		return -ENOMEM;
2380 	hpriv->type = type;
2381 	host->private_data = hpriv;
2382 
2383 	/* request and iomap NV_MMIO_BAR */
2384 	rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2385 	if (rc)
2386 		return rc;
2387 
2388 	/* configure SCR access */
2389 	base = host->iomap[NV_MMIO_BAR];
2390 	host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2391 	host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2392 
2393 	/* enable SATA space for CK804 */
2394 	if (type >= CK804) {
2395 		u8 regval;
2396 
2397 		pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2398 		regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2399 		pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2400 	}
2401 
2402 	/* init ADMA */
2403 	if (type == ADMA) {
2404 		rc = nv_adma_host_init(host);
2405 		if (rc)
2406 			return rc;
2407 	} else if (type == SWNCQ)
2408 		nv_swncq_host_init(host);
2409 
2410 	pci_set_master(pdev);
2411 	return ata_host_activate(host, pdev->irq, ipriv->irq_handler,
2412 				 IRQF_SHARED, ipriv->sht);
2413 }
2414 
2415 #ifdef CONFIG_PM
2416 static int nv_pci_device_resume(struct pci_dev *pdev)
2417 {
2418 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
2419 	struct nv_host_priv *hpriv = host->private_data;
2420 	int rc;
2421 
2422 	rc = ata_pci_device_do_resume(pdev);
2423 	if (rc)
2424 		return rc;
2425 
2426 	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2427 		if (hpriv->type >= CK804) {
2428 			u8 regval;
2429 
2430 			pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2431 			regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2432 			pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2433 		}
2434 		if (hpriv->type == ADMA) {
2435 			u32 tmp32;
2436 			struct nv_adma_port_priv *pp;
2437 			/* enable/disable ADMA on the ports appropriately */
2438 			pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2439 
2440 			pp = host->ports[0]->private_data;
2441 			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2442 				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2443 					   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2444 			else
2445 				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT0_EN |
2446 					   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2447 			pp = host->ports[1]->private_data;
2448 			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2449 				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2450 					   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2451 			else
2452 				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT1_EN |
2453 					   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2454 
2455 			pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2456 		}
2457 	}
2458 
2459 	ata_host_resume(host);
2460 
2461 	return 0;
2462 }
2463 #endif
2464 
2465 static void nv_ck804_host_stop(struct ata_host *host)
2466 {
2467 	struct pci_dev *pdev = to_pci_dev(host->dev);
2468 	u8 regval;
2469 
2470 	/* disable SATA space for CK804 */
2471 	pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2472 	regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2473 	pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2474 }
2475 
2476 static void nv_adma_host_stop(struct ata_host *host)
2477 {
2478 	struct pci_dev *pdev = to_pci_dev(host->dev);
2479 	u32 tmp32;
2480 
2481 	/* disable ADMA on the ports */
2482 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2483 	tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2484 		   NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2485 		   NV_MCP_SATA_CFG_20_PORT1_EN |
2486 		   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2487 
2488 	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2489 
2490 	nv_ck804_host_stop(host);
2491 }
2492 
2493 static int __init nv_init(void)
2494 {
2495 	return pci_register_driver(&nv_pci_driver);
2496 }
2497 
2498 static void __exit nv_exit(void)
2499 {
2500 	pci_unregister_driver(&nv_pci_driver);
2501 }
2502 
2503 module_init(nv_init);
2504 module_exit(nv_exit);
2505 module_param_named(adma, adma_enabled, bool, 0444);
2506 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
2507 module_param_named(swncq, swncq_enabled, bool, 0444);
2508 MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
2509 
2510