xref: /openbmc/linux/drivers/ata/sata_nv.c (revision c900529f3d9161bfde5cca0754f83b4d3c3e0220)
1c82ee6d3SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2c6fd2807SJeff Garzik /*
3c6fd2807SJeff Garzik  *  sata_nv.c - NVIDIA nForce SATA
4c6fd2807SJeff Garzik  *
5c6fd2807SJeff Garzik  *  Copyright 2004 NVIDIA Corp.  All rights reserved.
6c6fd2807SJeff Garzik  *  Copyright 2004 Andrew Chew
7c6fd2807SJeff Garzik  *
8c6fd2807SJeff Garzik  *  libata documentation is available via 'make {ps|pdf}docs',
919285f3cSMauro Carvalho Chehab  *  as Documentation/driver-api/libata.rst
10c6fd2807SJeff Garzik  *
11c6fd2807SJeff Garzik  *  No hardware documentation available outside of NVIDIA.
12c6fd2807SJeff Garzik  *  This driver programs the NVIDIA SATA controller in a similar
13c6fd2807SJeff Garzik  *  fashion as with other PCI IDE BMDMA controllers, with a few
14c6fd2807SJeff Garzik  *  NV-specific details such as register offsets, SATA phy location,
15c6fd2807SJeff Garzik  *  hotplug info, etc.
16c6fd2807SJeff Garzik  *
17fbbb262dSRobert Hancock  *  CK804/MCP04 controllers support an alternate programming interface
18fbbb262dSRobert Hancock  *  similar to the ADMA specification (with some modifications).
19fbbb262dSRobert Hancock  *  This allows the use of NCQ. Non-DMA-mapped ATA commands are still
20fbbb262dSRobert Hancock  *  sent through the legacy interface.
21c6fd2807SJeff Garzik  */
22c6fd2807SJeff Garzik 
23c6fd2807SJeff Garzik #include <linux/kernel.h>
24c6fd2807SJeff Garzik #include <linux/module.h>
255a0e3ad6STejun Heo #include <linux/gfp.h>
26c6fd2807SJeff Garzik #include <linux/pci.h>
27c6fd2807SJeff Garzik #include <linux/blkdev.h>
28c6fd2807SJeff Garzik #include <linux/delay.h>
29c6fd2807SJeff Garzik #include <linux/interrupt.h>
30c6fd2807SJeff Garzik #include <linux/device.h>
31c6fd2807SJeff Garzik #include <scsi/scsi_host.h>
32fbbb262dSRobert Hancock #include <scsi/scsi_device.h>
33c6fd2807SJeff Garzik #include <linux/libata.h>
34c206a389SHannes Reinecke #include <trace/events/libata.h>
35c6fd2807SJeff Garzik 
36c6fd2807SJeff Garzik #define DRV_NAME			"sata_nv"
372a3103ceSJeff Garzik #define DRV_VERSION			"3.5"
38fbbb262dSRobert Hancock 
39fbbb262dSRobert Hancock #define NV_ADMA_DMA_BOUNDARY		0xffffffffUL
40c6fd2807SJeff Garzik 
41c6fd2807SJeff Garzik enum {
420d5ff566STejun Heo 	NV_MMIO_BAR			= 5,
430d5ff566STejun Heo 
44c6fd2807SJeff Garzik 	NV_PORTS			= 2,
4514bdef98SErik Inge Bolsø 	NV_PIO_MASK			= ATA_PIO4,
4614bdef98SErik Inge Bolsø 	NV_MWDMA_MASK			= ATA_MWDMA2,
4714bdef98SErik Inge Bolsø 	NV_UDMA_MASK			= ATA_UDMA6,
48c6fd2807SJeff Garzik 	NV_PORT0_SCR_REG_OFFSET		= 0x00,
49c6fd2807SJeff Garzik 	NV_PORT1_SCR_REG_OFFSET		= 0x40,
50c6fd2807SJeff Garzik 
51c6fd2807SJeff Garzik 	/* INT_STATUS/ENABLE */
52c6fd2807SJeff Garzik 	NV_INT_STATUS			= 0x10,
53c6fd2807SJeff Garzik 	NV_INT_ENABLE			= 0x11,
54c6fd2807SJeff Garzik 	NV_INT_STATUS_CK804		= 0x440,
55c6fd2807SJeff Garzik 	NV_INT_ENABLE_CK804		= 0x441,
56c6fd2807SJeff Garzik 
57c6fd2807SJeff Garzik 	/* INT_STATUS/ENABLE bits */
58c6fd2807SJeff Garzik 	NV_INT_DEV			= 0x01,
59c6fd2807SJeff Garzik 	NV_INT_PM			= 0x02,
60c6fd2807SJeff Garzik 	NV_INT_ADDED			= 0x04,
61c6fd2807SJeff Garzik 	NV_INT_REMOVED			= 0x08,
62c6fd2807SJeff Garzik 
63c6fd2807SJeff Garzik 	NV_INT_PORT_SHIFT		= 4,	/* each port occupies 4 bits */
64c6fd2807SJeff Garzik 
65c6fd2807SJeff Garzik 	NV_INT_ALL			= 0x0f,
66c6fd2807SJeff Garzik 	NV_INT_MASK			= NV_INT_DEV |
67c6fd2807SJeff Garzik 					  NV_INT_ADDED | NV_INT_REMOVED,
68c6fd2807SJeff Garzik 
69c6fd2807SJeff Garzik 	/* INT_CONFIG */
70c6fd2807SJeff Garzik 	NV_INT_CONFIG			= 0x12,
71c6fd2807SJeff Garzik 	NV_INT_CONFIG_METHD		= 0x01, // 0 = INT, 1 = SMI
72c6fd2807SJeff Garzik 
73c6fd2807SJeff Garzik 	// For PCI config register 20
74c6fd2807SJeff Garzik 	NV_MCP_SATA_CFG_20		= 0x50,
75c6fd2807SJeff Garzik 	NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
76fbbb262dSRobert Hancock 	NV_MCP_SATA_CFG_20_PORT0_EN	= (1 << 17),
77fbbb262dSRobert Hancock 	NV_MCP_SATA_CFG_20_PORT1_EN	= (1 << 16),
78fbbb262dSRobert Hancock 	NV_MCP_SATA_CFG_20_PORT0_PWB_EN	= (1 << 14),
79fbbb262dSRobert Hancock 	NV_MCP_SATA_CFG_20_PORT1_PWB_EN	= (1 << 12),
80fbbb262dSRobert Hancock 
81fbbb262dSRobert Hancock 	NV_ADMA_MAX_CPBS		= 32,
82fbbb262dSRobert Hancock 	NV_ADMA_CPB_SZ			= 128,
83fbbb262dSRobert Hancock 	NV_ADMA_APRD_SZ			= 16,
84fbbb262dSRobert Hancock 	NV_ADMA_SGTBL_LEN		= (1024 - NV_ADMA_CPB_SZ) /
85fbbb262dSRobert Hancock 					   NV_ADMA_APRD_SZ,
86fbbb262dSRobert Hancock 	NV_ADMA_SGTBL_TOTAL_LEN		= NV_ADMA_SGTBL_LEN + 5,
87fbbb262dSRobert Hancock 	NV_ADMA_SGTBL_SZ                = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
88fbbb262dSRobert Hancock 	NV_ADMA_PORT_PRIV_DMA_SZ        = NV_ADMA_MAX_CPBS *
89fbbb262dSRobert Hancock 					   (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
90fbbb262dSRobert Hancock 
91fbbb262dSRobert Hancock 	/* BAR5 offset to ADMA general registers */
92fbbb262dSRobert Hancock 	NV_ADMA_GEN			= 0x400,
93fbbb262dSRobert Hancock 	NV_ADMA_GEN_CTL			= 0x00,
94fbbb262dSRobert Hancock 	NV_ADMA_NOTIFIER_CLEAR		= 0x30,
95fbbb262dSRobert Hancock 
96fbbb262dSRobert Hancock 	/* BAR5 offset to ADMA ports */
97fbbb262dSRobert Hancock 	NV_ADMA_PORT			= 0x480,
98fbbb262dSRobert Hancock 
99fbbb262dSRobert Hancock 	/* size of ADMA port register space  */
100fbbb262dSRobert Hancock 	NV_ADMA_PORT_SIZE		= 0x100,
101fbbb262dSRobert Hancock 
102fbbb262dSRobert Hancock 	/* ADMA port registers */
103fbbb262dSRobert Hancock 	NV_ADMA_CTL			= 0x40,
104fbbb262dSRobert Hancock 	NV_ADMA_CPB_COUNT		= 0x42,
105fbbb262dSRobert Hancock 	NV_ADMA_NEXT_CPB_IDX		= 0x43,
106fbbb262dSRobert Hancock 	NV_ADMA_STAT			= 0x44,
107fbbb262dSRobert Hancock 	NV_ADMA_CPB_BASE_LOW		= 0x48,
108fbbb262dSRobert Hancock 	NV_ADMA_CPB_BASE_HIGH		= 0x4C,
109fbbb262dSRobert Hancock 	NV_ADMA_APPEND			= 0x50,
110fbbb262dSRobert Hancock 	NV_ADMA_NOTIFIER		= 0x68,
111fbbb262dSRobert Hancock 	NV_ADMA_NOTIFIER_ERROR		= 0x6C,
112fbbb262dSRobert Hancock 
113fbbb262dSRobert Hancock 	/* NV_ADMA_CTL register bits */
114fbbb262dSRobert Hancock 	NV_ADMA_CTL_HOTPLUG_IEN		= (1 << 0),
115fbbb262dSRobert Hancock 	NV_ADMA_CTL_CHANNEL_RESET	= (1 << 5),
116fbbb262dSRobert Hancock 	NV_ADMA_CTL_GO			= (1 << 7),
117fbbb262dSRobert Hancock 	NV_ADMA_CTL_AIEN		= (1 << 8),
118fbbb262dSRobert Hancock 	NV_ADMA_CTL_READ_NON_COHERENT	= (1 << 11),
119fbbb262dSRobert Hancock 	NV_ADMA_CTL_WRITE_NON_COHERENT	= (1 << 12),
120fbbb262dSRobert Hancock 
121fbbb262dSRobert Hancock 	/* CPB response flag bits */
122fbbb262dSRobert Hancock 	NV_CPB_RESP_DONE		= (1 << 0),
123fbbb262dSRobert Hancock 	NV_CPB_RESP_ATA_ERR		= (1 << 3),
124fbbb262dSRobert Hancock 	NV_CPB_RESP_CMD_ERR		= (1 << 4),
125fbbb262dSRobert Hancock 	NV_CPB_RESP_CPB_ERR		= (1 << 7),
126fbbb262dSRobert Hancock 
127fbbb262dSRobert Hancock 	/* CPB control flag bits */
128fbbb262dSRobert Hancock 	NV_CPB_CTL_CPB_VALID		= (1 << 0),
129fbbb262dSRobert Hancock 	NV_CPB_CTL_QUEUE		= (1 << 1),
130fbbb262dSRobert Hancock 	NV_CPB_CTL_APRD_VALID		= (1 << 2),
131fbbb262dSRobert Hancock 	NV_CPB_CTL_IEN			= (1 << 3),
132fbbb262dSRobert Hancock 	NV_CPB_CTL_FPDMA		= (1 << 4),
133fbbb262dSRobert Hancock 
134fbbb262dSRobert Hancock 	/* APRD flags */
135fbbb262dSRobert Hancock 	NV_APRD_WRITE			= (1 << 1),
136fbbb262dSRobert Hancock 	NV_APRD_END			= (1 << 2),
137fbbb262dSRobert Hancock 	NV_APRD_CONT			= (1 << 3),
138fbbb262dSRobert Hancock 
139fbbb262dSRobert Hancock 	/* NV_ADMA_STAT flags */
140fbbb262dSRobert Hancock 	NV_ADMA_STAT_TIMEOUT		= (1 << 0),
141fbbb262dSRobert Hancock 	NV_ADMA_STAT_HOTUNPLUG		= (1 << 1),
142fbbb262dSRobert Hancock 	NV_ADMA_STAT_HOTPLUG		= (1 << 2),
143fbbb262dSRobert Hancock 	NV_ADMA_STAT_CPBERR		= (1 << 4),
144fbbb262dSRobert Hancock 	NV_ADMA_STAT_SERROR		= (1 << 5),
145fbbb262dSRobert Hancock 	NV_ADMA_STAT_CMD_COMPLETE	= (1 << 6),
146fbbb262dSRobert Hancock 	NV_ADMA_STAT_IDLE		= (1 << 8),
147fbbb262dSRobert Hancock 	NV_ADMA_STAT_LEGACY		= (1 << 9),
148fbbb262dSRobert Hancock 	NV_ADMA_STAT_STOPPED		= (1 << 10),
149fbbb262dSRobert Hancock 	NV_ADMA_STAT_DONE		= (1 << 12),
150fbbb262dSRobert Hancock 	NV_ADMA_STAT_ERR		= NV_ADMA_STAT_CPBERR |
151fbbb262dSRobert Hancock 					  NV_ADMA_STAT_TIMEOUT,
152fbbb262dSRobert Hancock 
153fbbb262dSRobert Hancock 	/* port flags */
154fbbb262dSRobert Hancock 	NV_ADMA_PORT_REGISTER_MODE	= (1 << 0),
1552dec7555SRobert Hancock 	NV_ADMA_ATAPI_SETUP_COMPLETE	= (1 << 1),
156fbbb262dSRobert Hancock 
157f140f0f1SKuan Luo 	/* MCP55 reg offset */
158f140f0f1SKuan Luo 	NV_CTL_MCP55			= 0x400,
159f140f0f1SKuan Luo 	NV_INT_STATUS_MCP55		= 0x440,
160f140f0f1SKuan Luo 	NV_INT_ENABLE_MCP55		= 0x444,
161f140f0f1SKuan Luo 	NV_NCQ_REG_MCP55		= 0x448,
162f140f0f1SKuan Luo 
163f140f0f1SKuan Luo 	/* MCP55 */
164f140f0f1SKuan Luo 	NV_INT_ALL_MCP55		= 0xffff,
165f140f0f1SKuan Luo 	NV_INT_PORT_SHIFT_MCP55		= 16,	/* each port occupies 16 bits */
166f140f0f1SKuan Luo 	NV_INT_MASK_MCP55		= NV_INT_ALL_MCP55 & 0xfffd,
167f140f0f1SKuan Luo 
168f140f0f1SKuan Luo 	/* SWNCQ ENABLE BITS*/
169f140f0f1SKuan Luo 	NV_CTL_PRI_SWNCQ		= 0x02,
170f140f0f1SKuan Luo 	NV_CTL_SEC_SWNCQ		= 0x04,
171f140f0f1SKuan Luo 
172f140f0f1SKuan Luo 	/* SW NCQ status bits*/
173f140f0f1SKuan Luo 	NV_SWNCQ_IRQ_DEV		= (1 << 0),
174f140f0f1SKuan Luo 	NV_SWNCQ_IRQ_PM			= (1 << 1),
175f140f0f1SKuan Luo 	NV_SWNCQ_IRQ_ADDED		= (1 << 2),
176f140f0f1SKuan Luo 	NV_SWNCQ_IRQ_REMOVED		= (1 << 3),
177f140f0f1SKuan Luo 
178f140f0f1SKuan Luo 	NV_SWNCQ_IRQ_BACKOUT		= (1 << 4),
179f140f0f1SKuan Luo 	NV_SWNCQ_IRQ_SDBFIS		= (1 << 5),
180f140f0f1SKuan Luo 	NV_SWNCQ_IRQ_DHREGFIS		= (1 << 6),
181f140f0f1SKuan Luo 	NV_SWNCQ_IRQ_DMASETUP		= (1 << 7),
182f140f0f1SKuan Luo 
183f140f0f1SKuan Luo 	NV_SWNCQ_IRQ_HOTPLUG		= NV_SWNCQ_IRQ_ADDED |
184f140f0f1SKuan Luo 					  NV_SWNCQ_IRQ_REMOVED,
185f140f0f1SKuan Luo 
186c6fd2807SJeff Garzik };
187c6fd2807SJeff Garzik 
188fbbb262dSRobert Hancock /* ADMA Physical Region Descriptor - one SG segment */
189fbbb262dSRobert Hancock struct nv_adma_prd {
190fbbb262dSRobert Hancock 	__le64			addr;
191fbbb262dSRobert Hancock 	__le32			len;
192fbbb262dSRobert Hancock 	u8			flags;
193fbbb262dSRobert Hancock 	u8			packet_len;
194fbbb262dSRobert Hancock 	__le16			reserved;
195fbbb262dSRobert Hancock };
196fbbb262dSRobert Hancock 
197fbbb262dSRobert Hancock enum nv_adma_regbits {
198fbbb262dSRobert Hancock 	CMDEND	= (1 << 15),		/* end of command list */
199fbbb262dSRobert Hancock 	WNB	= (1 << 14),		/* wait-not-BSY */
200fbbb262dSRobert Hancock 	IGN	= (1 << 13),		/* ignore this entry */
201fbbb262dSRobert Hancock 	CS1n	= (1 << (4 + 8)),	/* std. PATA signals follow... */
202fbbb262dSRobert Hancock 	DA2	= (1 << (2 + 8)),
203fbbb262dSRobert Hancock 	DA1	= (1 << (1 + 8)),
204fbbb262dSRobert Hancock 	DA0	= (1 << (0 + 8)),
205fbbb262dSRobert Hancock };
206fbbb262dSRobert Hancock 
207fbbb262dSRobert Hancock /* ADMA Command Parameter Block
208fbbb262dSRobert Hancock    The first 5 SG segments are stored inside the Command Parameter Block itself.
209fbbb262dSRobert Hancock    If there are more than 5 segments the remainder are stored in a separate
210fbbb262dSRobert Hancock    memory area indicated by next_aprd. */
211fbbb262dSRobert Hancock struct nv_adma_cpb {
212fbbb262dSRobert Hancock 	u8			resp_flags;    /* 0 */
213fbbb262dSRobert Hancock 	u8			reserved1;     /* 1 */
214fbbb262dSRobert Hancock 	u8			ctl_flags;     /* 2 */
215fbbb262dSRobert Hancock 	/* len is length of taskfile in 64 bit words */
216fbbb262dSRobert Hancock 	u8			len;		/* 3  */
217fbbb262dSRobert Hancock 	u8			tag;           /* 4 */
218fbbb262dSRobert Hancock 	u8			next_cpb_idx;  /* 5 */
219fbbb262dSRobert Hancock 	__le16			reserved2;     /* 6-7 */
220fbbb262dSRobert Hancock 	__le16			tf[12];        /* 8-31 */
221fbbb262dSRobert Hancock 	struct nv_adma_prd	aprd[5];       /* 32-111 */
222fbbb262dSRobert Hancock 	__le64			next_aprd;     /* 112-119 */
223fbbb262dSRobert Hancock 	__le64			reserved3;     /* 120-127 */
224fbbb262dSRobert Hancock };
225fbbb262dSRobert Hancock 
226fbbb262dSRobert Hancock 
227fbbb262dSRobert Hancock struct nv_adma_port_priv {
228fbbb262dSRobert Hancock 	struct nv_adma_cpb	*cpb;
229fbbb262dSRobert Hancock 	dma_addr_t		cpb_dma;
230fbbb262dSRobert Hancock 	struct nv_adma_prd	*aprd;
231fbbb262dSRobert Hancock 	dma_addr_t		aprd_dma;
232cdf56bcfSRobert Hancock 	void __iomem		*ctl_block;
233cdf56bcfSRobert Hancock 	void __iomem		*gen_block;
234cdf56bcfSRobert Hancock 	void __iomem		*notifier_clear_block;
2358959d300SRobert Hancock 	u64			adma_dma_mask;
236fbbb262dSRobert Hancock 	u8			flags;
2375e5c74a5SRobert Hancock 	int			last_issue_ncq;
238fbbb262dSRobert Hancock };
239fbbb262dSRobert Hancock 
240cdf56bcfSRobert Hancock struct nv_host_priv {
241cdf56bcfSRobert Hancock 	unsigned long		type;
242cdf56bcfSRobert Hancock };
243cdf56bcfSRobert Hancock 
244f140f0f1SKuan Luo struct defer_queue {
245f140f0f1SKuan Luo 	u32		defer_bits;
246f140f0f1SKuan Luo 	unsigned int	head;
247f140f0f1SKuan Luo 	unsigned int	tail;
248f140f0f1SKuan Luo 	unsigned int	tag[ATA_MAX_QUEUE];
249f140f0f1SKuan Luo };
250f140f0f1SKuan Luo 
251f140f0f1SKuan Luo enum ncq_saw_flag_list {
252f140f0f1SKuan Luo 	ncq_saw_d2h	= (1U << 0),
253f140f0f1SKuan Luo 	ncq_saw_dmas	= (1U << 1),
254f140f0f1SKuan Luo 	ncq_saw_sdb	= (1U << 2),
255f140f0f1SKuan Luo 	ncq_saw_backout	= (1U << 3),
256f140f0f1SKuan Luo };
257f140f0f1SKuan Luo 
258f140f0f1SKuan Luo struct nv_swncq_port_priv {
259f60d7011STejun Heo 	struct ata_bmdma_prd *prd;	 /* our SG list */
260f140f0f1SKuan Luo 	dma_addr_t	prd_dma; /* and its DMA mapping */
261f140f0f1SKuan Luo 	void __iomem	*sactive_block;
262f140f0f1SKuan Luo 	void __iomem	*irq_block;
263f140f0f1SKuan Luo 	void __iomem	*tag_block;
264f140f0f1SKuan Luo 	u32		qc_active;
265f140f0f1SKuan Luo 
266f140f0f1SKuan Luo 	unsigned int	last_issue_tag;
267f140f0f1SKuan Luo 
268f140f0f1SKuan Luo 	/* fifo circular queue to store deferral command */
269f140f0f1SKuan Luo 	struct defer_queue defer_queue;
270f140f0f1SKuan Luo 
271f140f0f1SKuan Luo 	/* for NCQ interrupt analysis */
272f140f0f1SKuan Luo 	u32		dhfis_bits;
273f140f0f1SKuan Luo 	u32		dmafis_bits;
274f140f0f1SKuan Luo 	u32		sdbfis_bits;
275f140f0f1SKuan Luo 
276f140f0f1SKuan Luo 	unsigned int	ncq_flags;
277f140f0f1SKuan Luo };
278f140f0f1SKuan Luo 
279f140f0f1SKuan Luo 
280fbbb262dSRobert Hancock #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
281fbbb262dSRobert Hancock 
282c6fd2807SJeff Garzik static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
28358eb8cd5SBartlomiej Zolnierkiewicz #ifdef CONFIG_PM_SLEEP
284cdf56bcfSRobert Hancock static int nv_pci_device_resume(struct pci_dev *pdev);
285438ac6d5STejun Heo #endif
286cca3974eSJeff Garzik static void nv_ck804_host_stop(struct ata_host *host);
2877d12e780SDavid Howells static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
2887d12e780SDavid Howells static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
2897d12e780SDavid Howells static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
29082ef04fbSTejun Heo static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
29182ef04fbSTejun Heo static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
292c6fd2807SJeff Garzik 
2937f4774b3STejun Heo static int nv_hardreset(struct ata_link *link, unsigned int *class,
294e8caa3c7STejun Heo 			unsigned long deadline);
295c6fd2807SJeff Garzik static void nv_nf2_freeze(struct ata_port *ap);
296c6fd2807SJeff Garzik static void nv_nf2_thaw(struct ata_port *ap);
297c6fd2807SJeff Garzik static void nv_ck804_freeze(struct ata_port *ap);
298c6fd2807SJeff Garzik static void nv_ck804_thaw(struct ata_port *ap);
299fbbb262dSRobert Hancock static int nv_adma_slave_config(struct scsi_device *sdev);
3002dec7555SRobert Hancock static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
30195364f36SJiri Slaby static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc);
302fbbb262dSRobert Hancock static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
303fbbb262dSRobert Hancock static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
304fbbb262dSRobert Hancock static void nv_adma_irq_clear(struct ata_port *ap);
305fbbb262dSRobert Hancock static int nv_adma_port_start(struct ata_port *ap);
306fbbb262dSRobert Hancock static void nv_adma_port_stop(struct ata_port *ap);
307438ac6d5STejun Heo #ifdef CONFIG_PM
308cdf56bcfSRobert Hancock static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
309cdf56bcfSRobert Hancock static int nv_adma_port_resume(struct ata_port *ap);
310438ac6d5STejun Heo #endif
31153014e25SRobert Hancock static void nv_adma_freeze(struct ata_port *ap);
31253014e25SRobert Hancock static void nv_adma_thaw(struct ata_port *ap);
313fbbb262dSRobert Hancock static void nv_adma_error_handler(struct ata_port *ap);
314fbbb262dSRobert Hancock static void nv_adma_host_stop(struct ata_host *host);
315f5ecac2dSRobert Hancock static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
316f2fb344bSRobert Hancock static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
317c6fd2807SJeff Garzik 
318f140f0f1SKuan Luo static void nv_mcp55_thaw(struct ata_port *ap);
319f140f0f1SKuan Luo static void nv_mcp55_freeze(struct ata_port *ap);
320f140f0f1SKuan Luo static void nv_swncq_error_handler(struct ata_port *ap);
321f140f0f1SKuan Luo static int nv_swncq_slave_config(struct scsi_device *sdev);
322f140f0f1SKuan Luo static int nv_swncq_port_start(struct ata_port *ap);
32395364f36SJiri Slaby static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc);
324f140f0f1SKuan Luo static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
325f140f0f1SKuan Luo static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
326f140f0f1SKuan Luo static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
327f140f0f1SKuan Luo static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
328f140f0f1SKuan Luo #ifdef CONFIG_PM
329f140f0f1SKuan Luo static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
330f140f0f1SKuan Luo static int nv_swncq_port_resume(struct ata_port *ap);
331f140f0f1SKuan Luo #endif
332f140f0f1SKuan Luo 
333c6fd2807SJeff Garzik enum nv_host_type
334c6fd2807SJeff Garzik {
335c6fd2807SJeff Garzik 	GENERIC,
336c6fd2807SJeff Garzik 	NFORCE2,
337c6fd2807SJeff Garzik 	NFORCE3 = NFORCE2,	/* NF2 == NF3 as far as sata_nv is concerned */
338fbbb262dSRobert Hancock 	CK804,
339f140f0f1SKuan Luo 	ADMA,
3402d775708STejun Heo 	MCP5x,
341f140f0f1SKuan Luo 	SWNCQ,
342c6fd2807SJeff Garzik };
343c6fd2807SJeff Garzik 
344c6fd2807SJeff Garzik static const struct pci_device_id nv_pci_tbl[] = {
34554bb3a94SJeff Garzik 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
34654bb3a94SJeff Garzik 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
34754bb3a94SJeff Garzik 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
34854bb3a94SJeff Garzik 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
34954bb3a94SJeff Garzik 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
35054bb3a94SJeff Garzik 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
35154bb3a94SJeff Garzik 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
3522d775708STejun Heo 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
3532d775708STejun Heo 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
3542d775708STejun Heo 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
3552d775708STejun Heo 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
356e2e031ebSKuan Luo 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
357e2e031ebSKuan Luo 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
358e2e031ebSKuan Luo 	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
3592d2744fcSJeff Garzik 
3602d2744fcSJeff Garzik 	{ } /* terminate list */
361c6fd2807SJeff Garzik };
362c6fd2807SJeff Garzik 
363c6fd2807SJeff Garzik static struct pci_driver nv_pci_driver = {
364c6fd2807SJeff Garzik 	.name			= DRV_NAME,
365c6fd2807SJeff Garzik 	.id_table		= nv_pci_tbl,
366c6fd2807SJeff Garzik 	.probe			= nv_init_one,
36758eb8cd5SBartlomiej Zolnierkiewicz #ifdef CONFIG_PM_SLEEP
368cdf56bcfSRobert Hancock 	.suspend		= ata_pci_device_suspend,
369cdf56bcfSRobert Hancock 	.resume			= nv_pci_device_resume,
370438ac6d5STejun Heo #endif
3711daf9ce7STejun Heo 	.remove			= ata_pci_remove_one,
372c6fd2807SJeff Garzik };
373c6fd2807SJeff Garzik 
37425df73d9SBart Van Assche static const struct scsi_host_template nv_sht = {
37568d1d07bSTejun Heo 	ATA_BMDMA_SHT(DRV_NAME),
376c6fd2807SJeff Garzik };
377c6fd2807SJeff Garzik 
37825df73d9SBart Van Assche static const struct scsi_host_template nv_adma_sht = {
3797d43b828SLee Jones 	__ATA_BASE_SHT(DRV_NAME),
380fbbb262dSRobert Hancock 	.can_queue		= NV_ADMA_MAX_CPBS,
381fbbb262dSRobert Hancock 	.sg_tablesize		= NV_ADMA_SGTBL_TOTAL_LEN,
382fbbb262dSRobert Hancock 	.dma_boundary		= NV_ADMA_DMA_BOUNDARY,
383fbbb262dSRobert Hancock 	.slave_configure	= nv_adma_slave_config,
384c3f69c7fSBart Van Assche 	.sdev_groups		= ata_ncq_sdev_groups,
3857d43b828SLee Jones 	.change_queue_depth     = ata_scsi_change_queue_depth,
3867d43b828SLee Jones 	.tag_alloc_policy	= BLK_TAG_ALLOC_RR,
387fbbb262dSRobert Hancock };
388fbbb262dSRobert Hancock 
38925df73d9SBart Van Assche static const struct scsi_host_template nv_swncq_sht = {
3907d43b828SLee Jones 	__ATA_BASE_SHT(DRV_NAME),
391ba80c3a5SJens Axboe 	.can_queue		= ATA_MAX_QUEUE - 1,
392f140f0f1SKuan Luo 	.sg_tablesize		= LIBATA_MAX_PRD,
393f140f0f1SKuan Luo 	.dma_boundary		= ATA_DMA_BOUNDARY,
394f140f0f1SKuan Luo 	.slave_configure	= nv_swncq_slave_config,
395c3f69c7fSBart Van Assche 	.sdev_groups		= ata_ncq_sdev_groups,
3967d43b828SLee Jones 	.change_queue_depth     = ata_scsi_change_queue_depth,
3977d43b828SLee Jones 	.tag_alloc_policy	= BLK_TAG_ALLOC_RR,
398f140f0f1SKuan Luo };
399f140f0f1SKuan Luo 
4007f4774b3STejun Heo /*
4017f4774b3STejun Heo  * NV SATA controllers have various different problems with hardreset
4027f4774b3STejun Heo  * protocol depending on the specific controller and device.
4037f4774b3STejun Heo  *
4047f4774b3STejun Heo  * GENERIC:
4057f4774b3STejun Heo  *
4067f4774b3STejun Heo  *  bko11195 reports that link doesn't come online after hardreset on
4077f4774b3STejun Heo  *  generic nv's and there have been several other similar reports on
4087f4774b3STejun Heo  *  linux-ide.
4097f4774b3STejun Heo  *
4107f4774b3STejun Heo  *  bko12351#c23 reports that warmplug on MCP61 doesn't work with
4117f4774b3STejun Heo  *  softreset.
4127f4774b3STejun Heo  *
4137f4774b3STejun Heo  * NF2/3:
4147f4774b3STejun Heo  *
4157f4774b3STejun Heo  *  bko3352 reports nf2/3 controllers can't determine device signature
4167f4774b3STejun Heo  *  reliably after hardreset.  The following thread reports detection
4177f4774b3STejun Heo  *  failure on cold boot with the standard debouncing timing.
4187f4774b3STejun Heo  *
4197f4774b3STejun Heo  *  http://thread.gmane.org/gmane.linux.ide/34098
4207f4774b3STejun Heo  *
4217f4774b3STejun Heo  *  bko12176 reports that hardreset fails to bring up the link during
4227f4774b3STejun Heo  *  boot on nf2.
4237f4774b3STejun Heo  *
4247f4774b3STejun Heo  * CK804:
4257f4774b3STejun Heo  *
4267f4774b3STejun Heo  *  For initial probing after boot and hot plugging, hardreset mostly
4277f4774b3STejun Heo  *  works fine on CK804 but curiously, reprobing on the initial port
4287f4774b3STejun Heo  *  by rescanning or rmmod/insmod fails to acquire the initial D2H Reg
4297f4774b3STejun Heo  *  FIS in somewhat undeterministic way.
4307f4774b3STejun Heo  *
4317f4774b3STejun Heo  * SWNCQ:
4327f4774b3STejun Heo  *
4337f4774b3STejun Heo  *  bko12351 reports that when SWNCQ is enabled, for hotplug to work,
4347f4774b3STejun Heo  *  hardreset should be used and hardreset can't report proper
4357f4774b3STejun Heo  *  signature, which suggests that mcp5x is closer to nf2 as long as
4367f4774b3STejun Heo  *  reset quirkiness is concerned.
4377f4774b3STejun Heo  *
4387f4774b3STejun Heo  *  bko12703 reports that boot probing fails for intel SSD with
4397f4774b3STejun Heo  *  hardreset.  Link fails to come online.  Softreset works fine.
4407f4774b3STejun Heo  *
4417f4774b3STejun Heo  * The failures are varied but the following patterns seem true for
4427f4774b3STejun Heo  * all flavors.
4437f4774b3STejun Heo  *
4447f4774b3STejun Heo  * - Softreset during boot always works.
4457f4774b3STejun Heo  *
4467f4774b3STejun Heo  * - Hardreset during boot sometimes fails to bring up the link on
4477f4774b3STejun Heo  *   certain comibnations and device signature acquisition is
4487f4774b3STejun Heo  *   unreliable.
4497f4774b3STejun Heo  *
4507f4774b3STejun Heo  * - Hardreset is often necessary after hotplug.
4517f4774b3STejun Heo  *
4527f4774b3STejun Heo  * So, preferring softreset for boot probing and error handling (as
4537f4774b3STejun Heo  * hardreset might bring down the link) but using hardreset for
4547f4774b3STejun Heo  * post-boot probing should work around the above issues in most
4557f4774b3STejun Heo  * cases.  Define nv_hardreset() which only kicks in for post-boot
4567f4774b3STejun Heo  * probing and use it for all variants.
4577f4774b3STejun Heo  */
4587f4774b3STejun Heo static struct ata_port_operations nv_generic_ops = {
459029cfd6bSTejun Heo 	.inherits		= &ata_bmdma_port_ops,
460c96f1732SAlan Cox 	.lost_interrupt		= ATA_OP_NULL,
461c6fd2807SJeff Garzik 	.scr_read		= nv_scr_read,
462c6fd2807SJeff Garzik 	.scr_write		= nv_scr_write,
4637f4774b3STejun Heo 	.hardreset		= nv_hardreset,
464c6fd2807SJeff Garzik };
465c6fd2807SJeff Garzik 
466029cfd6bSTejun Heo static struct ata_port_operations nv_nf2_ops = {
4677dac745bSTejun Heo 	.inherits		= &nv_generic_ops,
468c6fd2807SJeff Garzik 	.freeze			= nv_nf2_freeze,
469c6fd2807SJeff Garzik 	.thaw			= nv_nf2_thaw,
470c6fd2807SJeff Garzik };
471c6fd2807SJeff Garzik 
472029cfd6bSTejun Heo static struct ata_port_operations nv_ck804_ops = {
4737f4774b3STejun Heo 	.inherits		= &nv_generic_ops,
474c6fd2807SJeff Garzik 	.freeze			= nv_ck804_freeze,
475c6fd2807SJeff Garzik 	.thaw			= nv_ck804_thaw,
476c6fd2807SJeff Garzik 	.host_stop		= nv_ck804_host_stop,
477c6fd2807SJeff Garzik };
478c6fd2807SJeff Garzik 
479029cfd6bSTejun Heo static struct ata_port_operations nv_adma_ops = {
4803c324283STejun Heo 	.inherits		= &nv_ck804_ops,
481029cfd6bSTejun Heo 
4822dec7555SRobert Hancock 	.check_atapi_dma	= nv_adma_check_atapi_dma,
4835682ed33STejun Heo 	.sff_tf_read		= nv_adma_tf_read,
48431cc23b3STejun Heo 	.qc_defer		= ata_std_qc_defer,
485fbbb262dSRobert Hancock 	.qc_prep		= nv_adma_qc_prep,
486fbbb262dSRobert Hancock 	.qc_issue		= nv_adma_qc_issue,
4875682ed33STejun Heo 	.sff_irq_clear		= nv_adma_irq_clear,
488029cfd6bSTejun Heo 
48953014e25SRobert Hancock 	.freeze			= nv_adma_freeze,
49053014e25SRobert Hancock 	.thaw			= nv_adma_thaw,
491fbbb262dSRobert Hancock 	.error_handler		= nv_adma_error_handler,
492f5ecac2dSRobert Hancock 	.post_internal_cmd	= nv_adma_post_internal_cmd,
493029cfd6bSTejun Heo 
494fbbb262dSRobert Hancock 	.port_start		= nv_adma_port_start,
495fbbb262dSRobert Hancock 	.port_stop		= nv_adma_port_stop,
496438ac6d5STejun Heo #ifdef CONFIG_PM
497cdf56bcfSRobert Hancock 	.port_suspend		= nv_adma_port_suspend,
498cdf56bcfSRobert Hancock 	.port_resume		= nv_adma_port_resume,
499438ac6d5STejun Heo #endif
500fbbb262dSRobert Hancock 	.host_stop		= nv_adma_host_stop,
501fbbb262dSRobert Hancock };
502fbbb262dSRobert Hancock 
503029cfd6bSTejun Heo static struct ata_port_operations nv_swncq_ops = {
5047f4774b3STejun Heo 	.inherits		= &nv_generic_ops,
505029cfd6bSTejun Heo 
506f140f0f1SKuan Luo 	.qc_defer		= ata_std_qc_defer,
507f140f0f1SKuan Luo 	.qc_prep		= nv_swncq_qc_prep,
508f140f0f1SKuan Luo 	.qc_issue		= nv_swncq_qc_issue,
509029cfd6bSTejun Heo 
510f140f0f1SKuan Luo 	.freeze			= nv_mcp55_freeze,
511f140f0f1SKuan Luo 	.thaw			= nv_mcp55_thaw,
512f140f0f1SKuan Luo 	.error_handler		= nv_swncq_error_handler,
513029cfd6bSTejun Heo 
514f140f0f1SKuan Luo #ifdef CONFIG_PM
515f140f0f1SKuan Luo 	.port_suspend		= nv_swncq_port_suspend,
516f140f0f1SKuan Luo 	.port_resume		= nv_swncq_port_resume,
517f140f0f1SKuan Luo #endif
518f140f0f1SKuan Luo 	.port_start		= nv_swncq_port_start,
519f140f0f1SKuan Luo };
520f140f0f1SKuan Luo 
52195947193STejun Heo struct nv_pi_priv {
52295947193STejun Heo 	irq_handler_t			irq_handler;
52325df73d9SBart Van Assche 	const struct scsi_host_template	*sht;
52495947193STejun Heo };
52595947193STejun Heo 
52695947193STejun Heo #define NV_PI_PRIV(_irq_handler, _sht) \
52795947193STejun Heo 	&(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
52895947193STejun Heo 
5291626aeb8STejun Heo static const struct ata_port_info nv_port_info[] = {
530c6fd2807SJeff Garzik 	/* generic */
531c6fd2807SJeff Garzik 	{
5329cbe056fSSergei Shtylyov 		.flags		= ATA_FLAG_SATA,
533c6fd2807SJeff Garzik 		.pio_mask	= NV_PIO_MASK,
534c6fd2807SJeff Garzik 		.mwdma_mask	= NV_MWDMA_MASK,
535c6fd2807SJeff Garzik 		.udma_mask	= NV_UDMA_MASK,
536c6fd2807SJeff Garzik 		.port_ops	= &nv_generic_ops,
53795947193STejun Heo 		.private_data	= NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
538c6fd2807SJeff Garzik 	},
539c6fd2807SJeff Garzik 	/* nforce2/3 */
540c6fd2807SJeff Garzik 	{
5419cbe056fSSergei Shtylyov 		.flags		= ATA_FLAG_SATA,
542c6fd2807SJeff Garzik 		.pio_mask	= NV_PIO_MASK,
543c6fd2807SJeff Garzik 		.mwdma_mask	= NV_MWDMA_MASK,
544c6fd2807SJeff Garzik 		.udma_mask	= NV_UDMA_MASK,
545c6fd2807SJeff Garzik 		.port_ops	= &nv_nf2_ops,
54695947193STejun Heo 		.private_data	= NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
547c6fd2807SJeff Garzik 	},
548c6fd2807SJeff Garzik 	/* ck804 */
549c6fd2807SJeff Garzik 	{
5509cbe056fSSergei Shtylyov 		.flags		= ATA_FLAG_SATA,
551c6fd2807SJeff Garzik 		.pio_mask	= NV_PIO_MASK,
552c6fd2807SJeff Garzik 		.mwdma_mask	= NV_MWDMA_MASK,
553c6fd2807SJeff Garzik 		.udma_mask	= NV_UDMA_MASK,
554c6fd2807SJeff Garzik 		.port_ops	= &nv_ck804_ops,
55595947193STejun Heo 		.private_data	= NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
556c6fd2807SJeff Garzik 	},
557fbbb262dSRobert Hancock 	/* ADMA */
558fbbb262dSRobert Hancock 	{
5599cbe056fSSergei Shtylyov 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NCQ,
560fbbb262dSRobert Hancock 		.pio_mask	= NV_PIO_MASK,
561fbbb262dSRobert Hancock 		.mwdma_mask	= NV_MWDMA_MASK,
562fbbb262dSRobert Hancock 		.udma_mask	= NV_UDMA_MASK,
563fbbb262dSRobert Hancock 		.port_ops	= &nv_adma_ops,
56495947193STejun Heo 		.private_data	= NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
565fbbb262dSRobert Hancock 	},
5662d775708STejun Heo 	/* MCP5x */
5672d775708STejun Heo 	{
5689cbe056fSSergei Shtylyov 		.flags		= ATA_FLAG_SATA,
5692d775708STejun Heo 		.pio_mask	= NV_PIO_MASK,
5702d775708STejun Heo 		.mwdma_mask	= NV_MWDMA_MASK,
5712d775708STejun Heo 		.udma_mask	= NV_UDMA_MASK,
5727f4774b3STejun Heo 		.port_ops	= &nv_generic_ops,
5732d775708STejun Heo 		.private_data	= NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
5742d775708STejun Heo 	},
575f140f0f1SKuan Luo 	/* SWNCQ */
576f140f0f1SKuan Luo 	{
5779cbe056fSSergei Shtylyov 		.flags	        = ATA_FLAG_SATA | ATA_FLAG_NCQ,
578f140f0f1SKuan Luo 		.pio_mask	= NV_PIO_MASK,
579f140f0f1SKuan Luo 		.mwdma_mask	= NV_MWDMA_MASK,
580f140f0f1SKuan Luo 		.udma_mask	= NV_UDMA_MASK,
581f140f0f1SKuan Luo 		.port_ops	= &nv_swncq_ops,
58295947193STejun Heo 		.private_data	= NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
583f140f0f1SKuan Luo 	},
584c6fd2807SJeff Garzik };
585c6fd2807SJeff Garzik 
586c6fd2807SJeff Garzik MODULE_AUTHOR("NVIDIA");
587c6fd2807SJeff Garzik MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
588c6fd2807SJeff Garzik MODULE_LICENSE("GPL");
589c6fd2807SJeff Garzik MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
590c6fd2807SJeff Garzik MODULE_VERSION(DRV_VERSION);
591c6fd2807SJeff Garzik 
59290ab5ee9SRusty Russell static bool adma_enabled;
593c13aff32SShailendra Verma static bool swncq_enabled = true;
59490ab5ee9SRusty Russell static bool msi_enabled;
595fbbb262dSRobert Hancock 
nv_adma_register_mode(struct ata_port * ap)5962dec7555SRobert Hancock static void nv_adma_register_mode(struct ata_port *ap)
5972dec7555SRobert Hancock {
5982dec7555SRobert Hancock 	struct nv_adma_port_priv *pp = ap->private_data;
599cdf56bcfSRobert Hancock 	void __iomem *mmio = pp->ctl_block;
600a2cfe81aSRobert Hancock 	u16 tmp, status;
601a2cfe81aSRobert Hancock 	int count = 0;
6022dec7555SRobert Hancock 
6032dec7555SRobert Hancock 	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
6042dec7555SRobert Hancock 		return;
6052dec7555SRobert Hancock 
606a2cfe81aSRobert Hancock 	status = readw(mmio + NV_ADMA_STAT);
607a2cfe81aSRobert Hancock 	while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
608a2cfe81aSRobert Hancock 		ndelay(50);
609a2cfe81aSRobert Hancock 		status = readw(mmio + NV_ADMA_STAT);
610a2cfe81aSRobert Hancock 		count++;
611a2cfe81aSRobert Hancock 	}
612a2cfe81aSRobert Hancock 	if (count == 20)
613a9a79dfeSJoe Perches 		ata_port_warn(ap, "timeout waiting for ADMA IDLE, stat=0x%hx\n",
614a2cfe81aSRobert Hancock 			      status);
615a2cfe81aSRobert Hancock 
6162dec7555SRobert Hancock 	tmp = readw(mmio + NV_ADMA_CTL);
6172dec7555SRobert Hancock 	writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
6182dec7555SRobert Hancock 
619a2cfe81aSRobert Hancock 	count = 0;
620a2cfe81aSRobert Hancock 	status = readw(mmio + NV_ADMA_STAT);
621a2cfe81aSRobert Hancock 	while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
622a2cfe81aSRobert Hancock 		ndelay(50);
623a2cfe81aSRobert Hancock 		status = readw(mmio + NV_ADMA_STAT);
624a2cfe81aSRobert Hancock 		count++;
625a2cfe81aSRobert Hancock 	}
626a2cfe81aSRobert Hancock 	if (count == 20)
627a9a79dfeSJoe Perches 		ata_port_warn(ap,
628a2cfe81aSRobert Hancock 			      "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
629a2cfe81aSRobert Hancock 			      status);
630a2cfe81aSRobert Hancock 
6312dec7555SRobert Hancock 	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
6322dec7555SRobert Hancock }
6332dec7555SRobert Hancock 
nv_adma_mode(struct ata_port * ap)6342dec7555SRobert Hancock static void nv_adma_mode(struct ata_port *ap)
6352dec7555SRobert Hancock {
6362dec7555SRobert Hancock 	struct nv_adma_port_priv *pp = ap->private_data;
637cdf56bcfSRobert Hancock 	void __iomem *mmio = pp->ctl_block;
638a2cfe81aSRobert Hancock 	u16 tmp, status;
639a2cfe81aSRobert Hancock 	int count = 0;
6402dec7555SRobert Hancock 
6412dec7555SRobert Hancock 	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
6422dec7555SRobert Hancock 		return;
6432dec7555SRobert Hancock 
6442dec7555SRobert Hancock 	WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
6452dec7555SRobert Hancock 
6462dec7555SRobert Hancock 	tmp = readw(mmio + NV_ADMA_CTL);
6472dec7555SRobert Hancock 	writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
6482dec7555SRobert Hancock 
649a2cfe81aSRobert Hancock 	status = readw(mmio + NV_ADMA_STAT);
650a2cfe81aSRobert Hancock 	while (((status & NV_ADMA_STAT_LEGACY) ||
651a2cfe81aSRobert Hancock 	      !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
652a2cfe81aSRobert Hancock 		ndelay(50);
653a2cfe81aSRobert Hancock 		status = readw(mmio + NV_ADMA_STAT);
654a2cfe81aSRobert Hancock 		count++;
655a2cfe81aSRobert Hancock 	}
656a2cfe81aSRobert Hancock 	if (count == 20)
657a9a79dfeSJoe Perches 		ata_port_warn(ap,
658a2cfe81aSRobert Hancock 			"timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
659a2cfe81aSRobert Hancock 			status);
660a2cfe81aSRobert Hancock 
6612dec7555SRobert Hancock 	pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
6622dec7555SRobert Hancock }
6632dec7555SRobert Hancock 
nv_adma_slave_config(struct scsi_device * sdev)664fbbb262dSRobert Hancock static int nv_adma_slave_config(struct scsi_device *sdev)
665fbbb262dSRobert Hancock {
666fbbb262dSRobert Hancock 	struct ata_port *ap = ata_shost_to_port(sdev->host);
6672dec7555SRobert Hancock 	struct nv_adma_port_priv *pp = ap->private_data;
6688959d300SRobert Hancock 	struct nv_adma_port_priv *port0, *port1;
6692dec7555SRobert Hancock 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
6708959d300SRobert Hancock 	unsigned long segment_boundary, flags;
671fbbb262dSRobert Hancock 	unsigned short sg_tablesize;
672fbbb262dSRobert Hancock 	int rc;
6732dec7555SRobert Hancock 	int adma_enable;
6742dec7555SRobert Hancock 	u32 current_reg, new_reg, config_mask;
675fbbb262dSRobert Hancock 
676fbbb262dSRobert Hancock 	rc = ata_scsi_slave_config(sdev);
677fbbb262dSRobert Hancock 
678fbbb262dSRobert Hancock 	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
679fbbb262dSRobert Hancock 		/* Not a proper libata device, ignore */
680fbbb262dSRobert Hancock 		return rc;
681fbbb262dSRobert Hancock 
6828959d300SRobert Hancock 	spin_lock_irqsave(ap->lock, flags);
6838959d300SRobert Hancock 
6849af5c9c9STejun Heo 	if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
685fbbb262dSRobert Hancock 		/*
686fbbb262dSRobert Hancock 		 * NVIDIA reports that ADMA mode does not support ATAPI commands.
687fbbb262dSRobert Hancock 		 * Therefore ATAPI commands are sent through the legacy interface.
688fbbb262dSRobert Hancock 		 * However, the legacy interface only supports 32-bit DMA.
689fbbb262dSRobert Hancock 		 * Restrict DMA parameters as required by the legacy interface
690fbbb262dSRobert Hancock 		 * when an ATAPI device is connected.
691fbbb262dSRobert Hancock 		 */
692fbbb262dSRobert Hancock 		segment_boundary = ATA_DMA_BOUNDARY;
693fbbb262dSRobert Hancock 		/* Subtract 1 since an extra entry may be needed for padding, see
694fbbb262dSRobert Hancock 		   libata-scsi.c */
695fbbb262dSRobert Hancock 		sg_tablesize = LIBATA_MAX_PRD - 1;
6962dec7555SRobert Hancock 
6972dec7555SRobert Hancock 		/* Since the legacy DMA engine is in use, we need to disable ADMA
6982dec7555SRobert Hancock 		   on the port. */
6992dec7555SRobert Hancock 		adma_enable = 0;
7002dec7555SRobert Hancock 		nv_adma_register_mode(ap);
7012dcb407eSJeff Garzik 	} else {
702fbbb262dSRobert Hancock 		segment_boundary = NV_ADMA_DMA_BOUNDARY;
703fbbb262dSRobert Hancock 		sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
7042dec7555SRobert Hancock 		adma_enable = 1;
705fbbb262dSRobert Hancock 	}
706fbbb262dSRobert Hancock 
7072dec7555SRobert Hancock 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
7082dec7555SRobert Hancock 
7092dec7555SRobert Hancock 	if (ap->port_no == 1)
7102dec7555SRobert Hancock 		config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
7112dec7555SRobert Hancock 			      NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
7122dec7555SRobert Hancock 	else
7132dec7555SRobert Hancock 		config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
7142dec7555SRobert Hancock 			      NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
7152dec7555SRobert Hancock 
7162dec7555SRobert Hancock 	if (adma_enable) {
7172dec7555SRobert Hancock 		new_reg = current_reg | config_mask;
7182dec7555SRobert Hancock 		pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
7192dcb407eSJeff Garzik 	} else {
7202dec7555SRobert Hancock 		new_reg = current_reg & ~config_mask;
7212dec7555SRobert Hancock 		pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
7222dec7555SRobert Hancock 	}
7232dec7555SRobert Hancock 
7242dec7555SRobert Hancock 	if (current_reg != new_reg)
7252dec7555SRobert Hancock 		pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
7262dec7555SRobert Hancock 
7278959d300SRobert Hancock 	port0 = ap->host->ports[0]->private_data;
7288959d300SRobert Hancock 	port1 = ap->host->ports[1]->private_data;
7298959d300SRobert Hancock 	if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
7308959d300SRobert Hancock 	    (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
731258c9fdeSChristoph Hellwig 		/*
732258c9fdeSChristoph Hellwig 		 * We have to set the DMA mask to 32-bit if either port is in
733258c9fdeSChristoph Hellwig 		 * ATAPI mode, since they are on the same PCI device which is
734258c9fdeSChristoph Hellwig 		 * used for DMA mapping.  If either SCSI device is not allocated
735258c9fdeSChristoph Hellwig 		 * yet, it's OK since that port will discover its correct
736258c9fdeSChristoph Hellwig 		 * setting when it does get allocated.
737258c9fdeSChristoph Hellwig 		 */
738258c9fdeSChristoph Hellwig 		rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
7398959d300SRobert Hancock 	} else {
740258c9fdeSChristoph Hellwig 		rc = dma_set_mask(&pdev->dev, pp->adma_dma_mask);
7418959d300SRobert Hancock 	}
7428959d300SRobert Hancock 
743fbbb262dSRobert Hancock 	blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
7448a78362cSMartin K. Petersen 	blk_queue_max_segments(sdev->request_queue, sg_tablesize);
745a9a79dfeSJoe Perches 	ata_port_info(ap,
7468959d300SRobert Hancock 		      "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
7478959d300SRobert Hancock 		      (unsigned long long)*ap->host->dev->dma_mask,
7488959d300SRobert Hancock 		      segment_boundary, sg_tablesize);
7498959d300SRobert Hancock 
7508959d300SRobert Hancock 	spin_unlock_irqrestore(ap->lock, flags);
7518959d300SRobert Hancock 
752fbbb262dSRobert Hancock 	return rc;
753fbbb262dSRobert Hancock }
754fbbb262dSRobert Hancock 
nv_adma_check_atapi_dma(struct ata_queued_cmd * qc)7552dec7555SRobert Hancock static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
7562dec7555SRobert Hancock {
7572dec7555SRobert Hancock 	struct nv_adma_port_priv *pp = qc->ap->private_data;
7582dec7555SRobert Hancock 	return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
7592dec7555SRobert Hancock }
7602dec7555SRobert Hancock 
nv_adma_tf_read(struct ata_port * ap,struct ata_taskfile * tf)761f2fb344bSRobert Hancock static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
762f2fb344bSRobert Hancock {
7633f3debdbSRobert Hancock 	/* Other than when internal or pass-through commands are executed,
7643f3debdbSRobert Hancock 	   the only time this function will be called in ADMA mode will be
7653f3debdbSRobert Hancock 	   if a command fails. In the failure case we don't care about going
7663f3debdbSRobert Hancock 	   into register mode with ADMA commands pending, as the commands will
7673f3debdbSRobert Hancock 	   all shortly be aborted anyway. We assume that NCQ commands are not
7683f3debdbSRobert Hancock 	   issued via passthrough, which is the only way that switching into
7693f3debdbSRobert Hancock 	   ADMA mode could abort outstanding commands. */
770f2fb344bSRobert Hancock 	nv_adma_register_mode(ap);
771f2fb344bSRobert Hancock 
7729363c382STejun Heo 	ata_sff_tf_read(ap, tf);
773f2fb344bSRobert Hancock }
774f2fb344bSRobert Hancock 
nv_adma_tf_to_cpb(struct ata_taskfile * tf,__le16 * cpb)7752dec7555SRobert Hancock static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
776fbbb262dSRobert Hancock {
777fbbb262dSRobert Hancock 	unsigned int idx = 0;
778fbbb262dSRobert Hancock 
779ac3d6b86SRobert Hancock 	if (tf->flags & ATA_TFLAG_ISADDR) {
780ac3d6b86SRobert Hancock 		if (tf->flags & ATA_TFLAG_LBA48) {
781ac3d6b86SRobert Hancock 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR   << 8) | tf->hob_feature | WNB);
782fbbb262dSRobert Hancock 			cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
783fbbb262dSRobert Hancock 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAL  << 8) | tf->hob_lbal);
784fbbb262dSRobert Hancock 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAM  << 8) | tf->hob_lbam);
785fbbb262dSRobert Hancock 			cpb[idx++] = cpu_to_le16((ATA_REG_LBAH  << 8) | tf->hob_lbah);
786fbbb262dSRobert Hancock 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature);
787ac3d6b86SRobert Hancock 		} else
788ac3d6b86SRobert Hancock 			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature | WNB);
789ac3d6b86SRobert Hancock 
790fbbb262dSRobert Hancock 		cpb[idx++] = cpu_to_le16((ATA_REG_NSECT  << 8) | tf->nsect);
791fbbb262dSRobert Hancock 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAL   << 8) | tf->lbal);
792fbbb262dSRobert Hancock 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAM   << 8) | tf->lbam);
793fbbb262dSRobert Hancock 		cpb[idx++] = cpu_to_le16((ATA_REG_LBAH   << 8) | tf->lbah);
794ac3d6b86SRobert Hancock 	}
795ac3d6b86SRobert Hancock 
796ac3d6b86SRobert Hancock 	if (tf->flags & ATA_TFLAG_DEVICE)
797ac3d6b86SRobert Hancock 		cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
798fbbb262dSRobert Hancock 
799fbbb262dSRobert Hancock 	cpb[idx++] = cpu_to_le16((ATA_REG_CMD    << 8) | tf->command | CMDEND);
800fbbb262dSRobert Hancock 
801ac3d6b86SRobert Hancock 	while (idx < 12)
802ac3d6b86SRobert Hancock 		cpb[idx++] = cpu_to_le16(IGN);
803ac3d6b86SRobert Hancock 
804fbbb262dSRobert Hancock 	return idx;
805fbbb262dSRobert Hancock }
806fbbb262dSRobert Hancock 
nv_adma_check_cpb(struct ata_port * ap,int cpb_num,int force_err)8075bd28a4bSRobert Hancock static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
808fbbb262dSRobert Hancock {
809fbbb262dSRobert Hancock 	struct nv_adma_port_priv *pp = ap->private_data;
8102dec7555SRobert Hancock 	u8 flags = pp->cpb[cpb_num].resp_flags;
811fbbb262dSRobert Hancock 
81247013c58SHannes Reinecke 	ata_port_dbg(ap, "CPB %d, flags=0x%x\n", cpb_num, flags);
813fbbb262dSRobert Hancock 
8145bd28a4bSRobert Hancock 	if (unlikely((force_err ||
8155bd28a4bSRobert Hancock 		     flags & (NV_CPB_RESP_ATA_ERR |
8165bd28a4bSRobert Hancock 			      NV_CPB_RESP_CMD_ERR |
8175bd28a4bSRobert Hancock 			      NV_CPB_RESP_CPB_ERR)))) {
8189af5c9c9STejun Heo 		struct ata_eh_info *ehi = &ap->link.eh_info;
8195bd28a4bSRobert Hancock 		int freeze = 0;
8205bd28a4bSRobert Hancock 
8215bd28a4bSRobert Hancock 		ata_ehi_clear_desc(ehi);
822b64bbc39STejun Heo 		__ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
823fbbb262dSRobert Hancock 		if (flags & NV_CPB_RESP_ATA_ERR) {
824b64bbc39STejun Heo 			ata_ehi_push_desc(ehi, "ATA error");
8255bd28a4bSRobert Hancock 			ehi->err_mask |= AC_ERR_DEV;
8265bd28a4bSRobert Hancock 		} else if (flags & NV_CPB_RESP_CMD_ERR) {
827b64bbc39STejun Heo 			ata_ehi_push_desc(ehi, "CMD error");
8285bd28a4bSRobert Hancock 			ehi->err_mask |= AC_ERR_DEV;
8295bd28a4bSRobert Hancock 		} else if (flags & NV_CPB_RESP_CPB_ERR) {
830b64bbc39STejun Heo 			ata_ehi_push_desc(ehi, "CPB error");
8315bd28a4bSRobert Hancock 			ehi->err_mask |= AC_ERR_SYSTEM;
8325bd28a4bSRobert Hancock 			freeze = 1;
8335bd28a4bSRobert Hancock 		} else {
8345bd28a4bSRobert Hancock 			/* notifier error, but no error in CPB flags? */
835b64bbc39STejun Heo 			ata_ehi_push_desc(ehi, "unknown");
8365bd28a4bSRobert Hancock 			ehi->err_mask |= AC_ERR_OTHER;
8375bd28a4bSRobert Hancock 			freeze = 1;
838fbbb262dSRobert Hancock 		}
8395bd28a4bSRobert Hancock 		/* Kill all commands. EH will determine what actually failed. */
8405bd28a4bSRobert Hancock 		if (freeze)
8415bd28a4bSRobert Hancock 			ata_port_freeze(ap);
8425bd28a4bSRobert Hancock 		else
8435bd28a4bSRobert Hancock 			ata_port_abort(ap);
8441aadf5c3STejun Heo 		return -1;
845fbbb262dSRobert Hancock 	}
8465bd28a4bSRobert Hancock 
8471aadf5c3STejun Heo 	if (likely(flags & NV_CPB_RESP_DONE))
8482a54cf76SRobert Hancock 		return 1;
8495bd28a4bSRobert Hancock 	return 0;
850fbbb262dSRobert Hancock }
851fbbb262dSRobert Hancock 
nv_host_intr(struct ata_port * ap,u8 irq_stat)8522dec7555SRobert Hancock static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
8532dec7555SRobert Hancock {
8549af5c9c9STejun Heo 	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
8552dec7555SRobert Hancock 
8562dec7555SRobert Hancock 	/* freeze if hotplugged */
8572dec7555SRobert Hancock 	if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
8582dec7555SRobert Hancock 		ata_port_freeze(ap);
8592dec7555SRobert Hancock 		return 1;
8602dec7555SRobert Hancock 	}
8612dec7555SRobert Hancock 
8622dec7555SRobert Hancock 	/* bail out if not our interrupt */
8632dec7555SRobert Hancock 	if (!(irq_stat & NV_INT_DEV))
8642dec7555SRobert Hancock 		return 0;
8652dec7555SRobert Hancock 
8662dec7555SRobert Hancock 	/* DEV interrupt w/ no active qc? */
8672dec7555SRobert Hancock 	if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
8689363c382STejun Heo 		ata_sff_check_status(ap);
8692dec7555SRobert Hancock 		return 1;
8702dec7555SRobert Hancock 	}
8712dec7555SRobert Hancock 
8722dec7555SRobert Hancock 	/* handle interrupt */
873c3b28894STejun Heo 	return ata_bmdma_port_intr(ap, qc);
8742dec7555SRobert Hancock }
8752dec7555SRobert Hancock 
nv_adma_interrupt(int irq,void * dev_instance)876fbbb262dSRobert Hancock static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
877fbbb262dSRobert Hancock {
878fbbb262dSRobert Hancock 	struct ata_host *host = dev_instance;
879fbbb262dSRobert Hancock 	int i, handled = 0;
8802dec7555SRobert Hancock 	u32 notifier_clears[2];
881fbbb262dSRobert Hancock 
882fbbb262dSRobert Hancock 	spin_lock(&host->lock);
883fbbb262dSRobert Hancock 
884fbbb262dSRobert Hancock 	for (i = 0; i < host->n_ports; i++) {
885fbbb262dSRobert Hancock 		struct ata_port *ap = host->ports[i];
886fbbb262dSRobert Hancock 		struct nv_adma_port_priv *pp = ap->private_data;
887cdf56bcfSRobert Hancock 		void __iomem *mmio = pp->ctl_block;
888fbbb262dSRobert Hancock 		u16 status;
889fbbb262dSRobert Hancock 		u32 gen_ctl;
890fbbb262dSRobert Hancock 		u32 notifier, notifier_error;
891fbbb262dSRobert Hancock 
8923e4ec344STejun Heo 		notifier_clears[i] = 0;
8933e4ec344STejun Heo 
89453014e25SRobert Hancock 		/* if ADMA is disabled, use standard ata interrupt handler */
89553014e25SRobert Hancock 		if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
89653014e25SRobert Hancock 			u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
89753014e25SRobert Hancock 				>> (NV_INT_PORT_SHIFT * i);
89853014e25SRobert Hancock 			handled += nv_host_intr(ap, irq_stat);
89953014e25SRobert Hancock 			continue;
90053014e25SRobert Hancock 		}
90153014e25SRobert Hancock 
90253014e25SRobert Hancock 		/* if in ATA register mode, check for standard interrupts */
903fbbb262dSRobert Hancock 		if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
9040d5ff566STejun Heo 			u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
9052dec7555SRobert Hancock 				>> (NV_INT_PORT_SHIFT * i);
9069af5c9c9STejun Heo 			if (ata_tag_valid(ap->link.active_tag))
9073e4ec344STejun Heo 				/** NV_INT_DEV indication seems unreliable
9083e4ec344STejun Heo 				    at times at least in ADMA mode. Force it
9093e4ec344STejun Heo 				    on always when a command is active, to
9103e4ec344STejun Heo 				    prevent losing interrupts. */
911f740d168SRobert Hancock 				irq_stat |= NV_INT_DEV;
9122dec7555SRobert Hancock 			handled += nv_host_intr(ap, irq_stat);
913fbbb262dSRobert Hancock 		}
914fbbb262dSRobert Hancock 
915fbbb262dSRobert Hancock 		notifier = readl(mmio + NV_ADMA_NOTIFIER);
916fbbb262dSRobert Hancock 		notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
9172dec7555SRobert Hancock 		notifier_clears[i] = notifier | notifier_error;
918fbbb262dSRobert Hancock 
919cdf56bcfSRobert Hancock 		gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
920fbbb262dSRobert Hancock 
921fbbb262dSRobert Hancock 		if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
922fbbb262dSRobert Hancock 		    !notifier_error)
923fbbb262dSRobert Hancock 			/* Nothing to do */
924fbbb262dSRobert Hancock 			continue;
925fbbb262dSRobert Hancock 
926fbbb262dSRobert Hancock 		status = readw(mmio + NV_ADMA_STAT);
927fbbb262dSRobert Hancock 
9283e4ec344STejun Heo 		/*
9293e4ec344STejun Heo 		 * Clear status. Ensure the controller sees the
9303e4ec344STejun Heo 		 * clearing before we start looking at any of the CPB
9313e4ec344STejun Heo 		 * statuses, so that any CPB completions after this
9323e4ec344STejun Heo 		 * point in the handler will raise another interrupt.
9333e4ec344STejun Heo 		 */
934fbbb262dSRobert Hancock 		writew(status, mmio + NV_ADMA_STAT);
935fbbb262dSRobert Hancock 		readw(mmio + NV_ADMA_STAT); /* flush posted write */
936fbbb262dSRobert Hancock 		rmb();
937fbbb262dSRobert Hancock 
9385bd28a4bSRobert Hancock 		handled++; /* irq handled if we got here */
9395bd28a4bSRobert Hancock 
9405bd28a4bSRobert Hancock 		/* freeze if hotplugged or controller error */
9415bd28a4bSRobert Hancock 		if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
9425bd28a4bSRobert Hancock 				       NV_ADMA_STAT_HOTUNPLUG |
9435278b50cSRobert Hancock 				       NV_ADMA_STAT_TIMEOUT |
9445278b50cSRobert Hancock 				       NV_ADMA_STAT_SERROR))) {
9459af5c9c9STejun Heo 			struct ata_eh_info *ehi = &ap->link.eh_info;
9465bd28a4bSRobert Hancock 
9475bd28a4bSRobert Hancock 			ata_ehi_clear_desc(ehi);
948b64bbc39STejun Heo 			__ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
9495bd28a4bSRobert Hancock 			if (status & NV_ADMA_STAT_TIMEOUT) {
9505bd28a4bSRobert Hancock 				ehi->err_mask |= AC_ERR_SYSTEM;
951b64bbc39STejun Heo 				ata_ehi_push_desc(ehi, "timeout");
9525bd28a4bSRobert Hancock 			} else if (status & NV_ADMA_STAT_HOTPLUG) {
9535bd28a4bSRobert Hancock 				ata_ehi_hotplugged(ehi);
954b64bbc39STejun Heo 				ata_ehi_push_desc(ehi, "hotplug");
9555bd28a4bSRobert Hancock 			} else if (status & NV_ADMA_STAT_HOTUNPLUG) {
9565bd28a4bSRobert Hancock 				ata_ehi_hotplugged(ehi);
957b64bbc39STejun Heo 				ata_ehi_push_desc(ehi, "hot unplug");
9585278b50cSRobert Hancock 			} else if (status & NV_ADMA_STAT_SERROR) {
9593e4ec344STejun Heo 				/* let EH analyze SError and figure out cause */
960b64bbc39STejun Heo 				ata_ehi_push_desc(ehi, "SError");
961b64bbc39STejun Heo 			} else
962b64bbc39STejun Heo 				ata_ehi_push_desc(ehi, "unknown");
963fbbb262dSRobert Hancock 			ata_port_freeze(ap);
964fbbb262dSRobert Hancock 			continue;
965fbbb262dSRobert Hancock 		}
966fbbb262dSRobert Hancock 
9675bd28a4bSRobert Hancock 		if (status & (NV_ADMA_STAT_DONE |
968a1fe7824SRobert Hancock 			      NV_ADMA_STAT_CPBERR |
969a1fe7824SRobert Hancock 			      NV_ADMA_STAT_CMD_COMPLETE)) {
970a1fe7824SRobert Hancock 			u32 check_commands = notifier_clears[i];
9711aadf5c3STejun Heo 			u32 done_mask = 0;
972752e386cSTejun Heo 			int pos, rc;
9738ba5e4cbSRobert Hancock 
974a1fe7824SRobert Hancock 			if (status & NV_ADMA_STAT_CPBERR) {
9753e4ec344STejun Heo 				/* check all active commands */
9769af5c9c9STejun Heo 				if (ata_tag_valid(ap->link.active_tag))
977a1fe7824SRobert Hancock 					check_commands = 1 <<
978a1fe7824SRobert Hancock 						ap->link.active_tag;
9798ba5e4cbSRobert Hancock 				else
9803e4ec344STejun Heo 					check_commands = ap->link.sactive;
981a1fe7824SRobert Hancock 			}
9828ba5e4cbSRobert Hancock 
9833e4ec344STejun Heo 			/* check CPBs for completed commands */
984752e386cSTejun Heo 			while ((pos = ffs(check_commands))) {
985fbbb262dSRobert Hancock 				pos--;
986752e386cSTejun Heo 				rc = nv_adma_check_cpb(ap, pos,
9875bd28a4bSRobert Hancock 						notifier_error & (1 << pos));
9881aadf5c3STejun Heo 				if (rc > 0)
9891aadf5c3STejun Heo 					done_mask |= 1 << pos;
9901aadf5c3STejun Heo 				else if (unlikely(rc < 0))
991752e386cSTejun Heo 					check_commands = 0;
992721449bfSRobert Hancock 				check_commands &= ~(1 << pos);
993fbbb262dSRobert Hancock 			}
9948385d756SSascha Hauer 			ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
995fbbb262dSRobert Hancock 		}
996fbbb262dSRobert Hancock 	}
997fbbb262dSRobert Hancock 
9982dec7555SRobert Hancock 	if (notifier_clears[0] || notifier_clears[1]) {
9992dec7555SRobert Hancock 		/* Note: Both notifier clear registers must be written
10002dec7555SRobert Hancock 		   if either is set, even if one is zero, according to NVIDIA. */
1001cdf56bcfSRobert Hancock 		struct nv_adma_port_priv *pp = host->ports[0]->private_data;
1002cdf56bcfSRobert Hancock 		writel(notifier_clears[0], pp->notifier_clear_block);
1003cdf56bcfSRobert Hancock 		pp = host->ports[1]->private_data;
1004cdf56bcfSRobert Hancock 		writel(notifier_clears[1], pp->notifier_clear_block);
10052dec7555SRobert Hancock 	}
10062dec7555SRobert Hancock 
1007fbbb262dSRobert Hancock 	spin_unlock(&host->lock);
1008fbbb262dSRobert Hancock 
1009fbbb262dSRobert Hancock 	return IRQ_RETVAL(handled);
1010fbbb262dSRobert Hancock }
1011fbbb262dSRobert Hancock 
nv_adma_freeze(struct ata_port * ap)101253014e25SRobert Hancock static void nv_adma_freeze(struct ata_port *ap)
101353014e25SRobert Hancock {
101453014e25SRobert Hancock 	struct nv_adma_port_priv *pp = ap->private_data;
101553014e25SRobert Hancock 	void __iomem *mmio = pp->ctl_block;
101653014e25SRobert Hancock 	u16 tmp;
101753014e25SRobert Hancock 
101853014e25SRobert Hancock 	nv_ck804_freeze(ap);
101953014e25SRobert Hancock 
102053014e25SRobert Hancock 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
102153014e25SRobert Hancock 		return;
102253014e25SRobert Hancock 
102353014e25SRobert Hancock 	/* clear any outstanding CK804 notifications */
102453014e25SRobert Hancock 	writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
102553014e25SRobert Hancock 		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
102653014e25SRobert Hancock 
102753014e25SRobert Hancock 	/* Disable interrupt */
102853014e25SRobert Hancock 	tmp = readw(mmio + NV_ADMA_CTL);
102953014e25SRobert Hancock 	writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
103053014e25SRobert Hancock 		mmio + NV_ADMA_CTL);
103153014e25SRobert Hancock 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
103253014e25SRobert Hancock }
103353014e25SRobert Hancock 
nv_adma_thaw(struct ata_port * ap)103453014e25SRobert Hancock static void nv_adma_thaw(struct ata_port *ap)
103553014e25SRobert Hancock {
103653014e25SRobert Hancock 	struct nv_adma_port_priv *pp = ap->private_data;
103753014e25SRobert Hancock 	void __iomem *mmio = pp->ctl_block;
103853014e25SRobert Hancock 	u16 tmp;
103953014e25SRobert Hancock 
104053014e25SRobert Hancock 	nv_ck804_thaw(ap);
104153014e25SRobert Hancock 
104253014e25SRobert Hancock 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
104353014e25SRobert Hancock 		return;
104453014e25SRobert Hancock 
104553014e25SRobert Hancock 	/* Enable interrupt */
104653014e25SRobert Hancock 	tmp = readw(mmio + NV_ADMA_CTL);
104753014e25SRobert Hancock 	writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
104853014e25SRobert Hancock 		mmio + NV_ADMA_CTL);
104953014e25SRobert Hancock 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
105053014e25SRobert Hancock }
105153014e25SRobert Hancock 
nv_adma_irq_clear(struct ata_port * ap)1052fbbb262dSRobert Hancock static void nv_adma_irq_clear(struct ata_port *ap)
1053fbbb262dSRobert Hancock {
1054cdf56bcfSRobert Hancock 	struct nv_adma_port_priv *pp = ap->private_data;
1055cdf56bcfSRobert Hancock 	void __iomem *mmio = pp->ctl_block;
105653014e25SRobert Hancock 	u32 notifier_clears[2];
105753014e25SRobert Hancock 
105853014e25SRobert Hancock 	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
105937f65b8bSTejun Heo 		ata_bmdma_irq_clear(ap);
106053014e25SRobert Hancock 		return;
106153014e25SRobert Hancock 	}
106253014e25SRobert Hancock 
106353014e25SRobert Hancock 	/* clear any outstanding CK804 notifications */
106453014e25SRobert Hancock 	writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
106553014e25SRobert Hancock 		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1066fbbb262dSRobert Hancock 
1067fbbb262dSRobert Hancock 	/* clear ADMA status */
106853014e25SRobert Hancock 	writew(0xffff, mmio + NV_ADMA_STAT);
1069fbbb262dSRobert Hancock 
107053014e25SRobert Hancock 	/* clear notifiers - note both ports need to be written with
107153014e25SRobert Hancock 	   something even though we are only clearing on one */
107253014e25SRobert Hancock 	if (ap->port_no == 0) {
107353014e25SRobert Hancock 		notifier_clears[0] = 0xFFFFFFFF;
107453014e25SRobert Hancock 		notifier_clears[1] = 0;
107553014e25SRobert Hancock 	} else {
107653014e25SRobert Hancock 		notifier_clears[0] = 0;
107753014e25SRobert Hancock 		notifier_clears[1] = 0xFFFFFFFF;
107853014e25SRobert Hancock 	}
107953014e25SRobert Hancock 	pp = ap->host->ports[0]->private_data;
108053014e25SRobert Hancock 	writel(notifier_clears[0], pp->notifier_clear_block);
108153014e25SRobert Hancock 	pp = ap->host->ports[1]->private_data;
108253014e25SRobert Hancock 	writel(notifier_clears[1], pp->notifier_clear_block);
1083fbbb262dSRobert Hancock }
1084fbbb262dSRobert Hancock 
nv_adma_post_internal_cmd(struct ata_queued_cmd * qc)1085f5ecac2dSRobert Hancock static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1086fbbb262dSRobert Hancock {
1087f5ecac2dSRobert Hancock 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1088fbbb262dSRobert Hancock 
1089f5ecac2dSRobert Hancock 	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1090fe06e5f9STejun Heo 		ata_bmdma_post_internal_cmd(qc);
1091fbbb262dSRobert Hancock }
1092fbbb262dSRobert Hancock 
nv_adma_port_start(struct ata_port * ap)1093fbbb262dSRobert Hancock static int nv_adma_port_start(struct ata_port *ap)
1094fbbb262dSRobert Hancock {
1095fbbb262dSRobert Hancock 	struct device *dev = ap->host->dev;
1096fbbb262dSRobert Hancock 	struct nv_adma_port_priv *pp;
1097fbbb262dSRobert Hancock 	int rc;
1098fbbb262dSRobert Hancock 	void *mem;
1099fbbb262dSRobert Hancock 	dma_addr_t mem_dma;
1100cdf56bcfSRobert Hancock 	void __iomem *mmio;
11018959d300SRobert Hancock 	struct pci_dev *pdev = to_pci_dev(dev);
1102fbbb262dSRobert Hancock 	u16 tmp;
1103fbbb262dSRobert Hancock 
1104258c9fdeSChristoph Hellwig 	/*
1105258c9fdeSChristoph Hellwig 	 * Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1106258c9fdeSChristoph Hellwig 	 * pad buffers.
1107258c9fdeSChristoph Hellwig 	 */
1108258c9fdeSChristoph Hellwig 	rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11098959d300SRobert Hancock 	if (rc)
11108959d300SRobert Hancock 		return rc;
11118959d300SRobert Hancock 
1112c7087652STejun Heo 	/* we might fallback to bmdma, allocate bmdma resources */
1113c7087652STejun Heo 	rc = ata_bmdma_port_start(ap);
1114fbbb262dSRobert Hancock 	if (rc)
1115fbbb262dSRobert Hancock 		return rc;
1116fbbb262dSRobert Hancock 
111724dc5f33STejun Heo 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
111824dc5f33STejun Heo 	if (!pp)
111924dc5f33STejun Heo 		return -ENOMEM;
1120fbbb262dSRobert Hancock 
11210d5ff566STejun Heo 	mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1122cdf56bcfSRobert Hancock 	       ap->port_no * NV_ADMA_PORT_SIZE;
1123cdf56bcfSRobert Hancock 	pp->ctl_block = mmio;
11240d5ff566STejun Heo 	pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1125cdf56bcfSRobert Hancock 	pp->notifier_clear_block = pp->gen_block +
1126cdf56bcfSRobert Hancock 	       NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1127cdf56bcfSRobert Hancock 
1128258c9fdeSChristoph Hellwig 	/*
1129258c9fdeSChristoph Hellwig 	 * Now that the legacy PRD and padding buffer are allocated we can
113051872b66SChristoph Hellwig 	 * raise the DMA mask to allocate the CPB/APRD table.
1131258c9fdeSChristoph Hellwig 	 */
113251872b66SChristoph Hellwig 	dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
113351872b66SChristoph Hellwig 
11348959d300SRobert Hancock 	pp->adma_dma_mask = *dev->dma_mask;
11358959d300SRobert Hancock 
113624dc5f33STejun Heo 	mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1137fbbb262dSRobert Hancock 				  &mem_dma, GFP_KERNEL);
113824dc5f33STejun Heo 	if (!mem)
113924dc5f33STejun Heo 		return -ENOMEM;
1140fbbb262dSRobert Hancock 
1141fbbb262dSRobert Hancock 	/*
1142fbbb262dSRobert Hancock 	 * First item in chunk of DMA memory:
1143fbbb262dSRobert Hancock 	 * 128-byte command parameter block (CPB)
1144fbbb262dSRobert Hancock 	 * one for each command tag
1145fbbb262dSRobert Hancock 	 */
1146fbbb262dSRobert Hancock 	pp->cpb     = mem;
1147fbbb262dSRobert Hancock 	pp->cpb_dma = mem_dma;
1148fbbb262dSRobert Hancock 
1149fbbb262dSRobert Hancock 	writel(mem_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
1150fbbb262dSRobert Hancock 	writel((mem_dma >> 16) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
1151fbbb262dSRobert Hancock 
1152fbbb262dSRobert Hancock 	mem     += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1153fbbb262dSRobert Hancock 	mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1154fbbb262dSRobert Hancock 
1155fbbb262dSRobert Hancock 	/*
1156fbbb262dSRobert Hancock 	 * Second item: block of ADMA_SGTBL_LEN s/g entries
1157fbbb262dSRobert Hancock 	 */
1158fbbb262dSRobert Hancock 	pp->aprd = mem;
1159fbbb262dSRobert Hancock 	pp->aprd_dma = mem_dma;
1160fbbb262dSRobert Hancock 
1161fbbb262dSRobert Hancock 	ap->private_data = pp;
1162fbbb262dSRobert Hancock 
1163fbbb262dSRobert Hancock 	/* clear any outstanding interrupt conditions */
1164fbbb262dSRobert Hancock 	writew(0xffff, mmio + NV_ADMA_STAT);
1165fbbb262dSRobert Hancock 
1166fbbb262dSRobert Hancock 	/* initialize port variables */
1167fbbb262dSRobert Hancock 	pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1168fbbb262dSRobert Hancock 
1169fbbb262dSRobert Hancock 	/* clear CPB fetch count */
1170fbbb262dSRobert Hancock 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1171fbbb262dSRobert Hancock 
1172cdf56bcfSRobert Hancock 	/* clear GO for register mode, enable interrupt */
1173fbbb262dSRobert Hancock 	tmp = readw(mmio + NV_ADMA_CTL);
11745ce0cf6fSRobert Hancock 	writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
11755ce0cf6fSRobert Hancock 		NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1176fbbb262dSRobert Hancock 
1177fbbb262dSRobert Hancock 	tmp = readw(mmio + NV_ADMA_CTL);
1178fbbb262dSRobert Hancock 	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
11795ce0cf6fSRobert Hancock 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1180fbbb262dSRobert Hancock 	udelay(1);
1181fbbb262dSRobert Hancock 	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
11825ce0cf6fSRobert Hancock 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1183fbbb262dSRobert Hancock 
1184fbbb262dSRobert Hancock 	return 0;
1185fbbb262dSRobert Hancock }
1186fbbb262dSRobert Hancock 
nv_adma_port_stop(struct ata_port * ap)1187fbbb262dSRobert Hancock static void nv_adma_port_stop(struct ata_port *ap)
1188fbbb262dSRobert Hancock {
1189fbbb262dSRobert Hancock 	struct nv_adma_port_priv *pp = ap->private_data;
1190cdf56bcfSRobert Hancock 	void __iomem *mmio = pp->ctl_block;
1191fbbb262dSRobert Hancock 
1192fbbb262dSRobert Hancock 	writew(0, mmio + NV_ADMA_CTL);
1193fbbb262dSRobert Hancock }
1194fbbb262dSRobert Hancock 
1195438ac6d5STejun Heo #ifdef CONFIG_PM
nv_adma_port_suspend(struct ata_port * ap,pm_message_t mesg)1196cdf56bcfSRobert Hancock static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1197cdf56bcfSRobert Hancock {
1198cdf56bcfSRobert Hancock 	struct nv_adma_port_priv *pp = ap->private_data;
1199cdf56bcfSRobert Hancock 	void __iomem *mmio = pp->ctl_block;
1200cdf56bcfSRobert Hancock 
1201cdf56bcfSRobert Hancock 	/* Go to register mode - clears GO */
1202cdf56bcfSRobert Hancock 	nv_adma_register_mode(ap);
1203cdf56bcfSRobert Hancock 
1204cdf56bcfSRobert Hancock 	/* clear CPB fetch count */
1205cdf56bcfSRobert Hancock 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1206cdf56bcfSRobert Hancock 
1207cdf56bcfSRobert Hancock 	/* disable interrupt, shut down port */
1208cdf56bcfSRobert Hancock 	writew(0, mmio + NV_ADMA_CTL);
1209cdf56bcfSRobert Hancock 
1210cdf56bcfSRobert Hancock 	return 0;
1211cdf56bcfSRobert Hancock }
1212cdf56bcfSRobert Hancock 
nv_adma_port_resume(struct ata_port * ap)1213cdf56bcfSRobert Hancock static int nv_adma_port_resume(struct ata_port *ap)
1214cdf56bcfSRobert Hancock {
1215cdf56bcfSRobert Hancock 	struct nv_adma_port_priv *pp = ap->private_data;
1216cdf56bcfSRobert Hancock 	void __iomem *mmio = pp->ctl_block;
1217cdf56bcfSRobert Hancock 	u16 tmp;
1218cdf56bcfSRobert Hancock 
1219cdf56bcfSRobert Hancock 	/* set CPB block location */
1220cdf56bcfSRobert Hancock 	writel(pp->cpb_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
1221cdf56bcfSRobert Hancock 	writel((pp->cpb_dma >> 16) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
1222cdf56bcfSRobert Hancock 
1223cdf56bcfSRobert Hancock 	/* clear any outstanding interrupt conditions */
1224cdf56bcfSRobert Hancock 	writew(0xffff, mmio + NV_ADMA_STAT);
1225cdf56bcfSRobert Hancock 
1226cdf56bcfSRobert Hancock 	/* initialize port variables */
1227cdf56bcfSRobert Hancock 	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1228cdf56bcfSRobert Hancock 
1229cdf56bcfSRobert Hancock 	/* clear CPB fetch count */
1230cdf56bcfSRobert Hancock 	writew(0, mmio + NV_ADMA_CPB_COUNT);
1231cdf56bcfSRobert Hancock 
1232cdf56bcfSRobert Hancock 	/* clear GO for register mode, enable interrupt */
1233cdf56bcfSRobert Hancock 	tmp = readw(mmio + NV_ADMA_CTL);
12345ce0cf6fSRobert Hancock 	writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
12355ce0cf6fSRobert Hancock 		NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1236cdf56bcfSRobert Hancock 
1237cdf56bcfSRobert Hancock 	tmp = readw(mmio + NV_ADMA_CTL);
1238cdf56bcfSRobert Hancock 	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
12395ce0cf6fSRobert Hancock 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1240cdf56bcfSRobert Hancock 	udelay(1);
1241cdf56bcfSRobert Hancock 	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
12425ce0cf6fSRobert Hancock 	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1243cdf56bcfSRobert Hancock 
1244cdf56bcfSRobert Hancock 	return 0;
1245cdf56bcfSRobert Hancock }
1246438ac6d5STejun Heo #endif
1247fbbb262dSRobert Hancock 
nv_adma_setup_port(struct ata_port * ap)12489a829ccfSTejun Heo static void nv_adma_setup_port(struct ata_port *ap)
1249fbbb262dSRobert Hancock {
12509a829ccfSTejun Heo 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
12519a829ccfSTejun Heo 	struct ata_ioports *ioport = &ap->ioaddr;
1252fbbb262dSRobert Hancock 
12539a829ccfSTejun Heo 	mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1254fbbb262dSRobert Hancock 
12550d5ff566STejun Heo 	ioport->cmd_addr	= mmio;
12560d5ff566STejun Heo 	ioport->data_addr	= mmio + (ATA_REG_DATA * 4);
1257fbbb262dSRobert Hancock 	ioport->error_addr	=
12580d5ff566STejun Heo 	ioport->feature_addr	= mmio + (ATA_REG_ERR * 4);
12590d5ff566STejun Heo 	ioport->nsect_addr	= mmio + (ATA_REG_NSECT * 4);
12600d5ff566STejun Heo 	ioport->lbal_addr	= mmio + (ATA_REG_LBAL * 4);
12610d5ff566STejun Heo 	ioport->lbam_addr	= mmio + (ATA_REG_LBAM * 4);
12620d5ff566STejun Heo 	ioport->lbah_addr	= mmio + (ATA_REG_LBAH * 4);
12630d5ff566STejun Heo 	ioport->device_addr	= mmio + (ATA_REG_DEVICE * 4);
1264fbbb262dSRobert Hancock 	ioport->status_addr	=
12650d5ff566STejun Heo 	ioport->command_addr	= mmio + (ATA_REG_STATUS * 4);
1266fbbb262dSRobert Hancock 	ioport->altstatus_addr	=
12670d5ff566STejun Heo 	ioport->ctl_addr	= mmio + 0x20;
1268fbbb262dSRobert Hancock }
1269fbbb262dSRobert Hancock 
nv_adma_host_init(struct ata_host * host)12709a829ccfSTejun Heo static int nv_adma_host_init(struct ata_host *host)
1271fbbb262dSRobert Hancock {
12729a829ccfSTejun Heo 	struct pci_dev *pdev = to_pci_dev(host->dev);
1273fbbb262dSRobert Hancock 	unsigned int i;
1274fbbb262dSRobert Hancock 	u32 tmp32;
1275fbbb262dSRobert Hancock 
1276fbbb262dSRobert Hancock 	/* enable ADMA on the ports */
1277fbbb262dSRobert Hancock 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1278fbbb262dSRobert Hancock 	tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1279fbbb262dSRobert Hancock 		 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1280fbbb262dSRobert Hancock 		 NV_MCP_SATA_CFG_20_PORT1_EN |
1281fbbb262dSRobert Hancock 		 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1282fbbb262dSRobert Hancock 
1283fbbb262dSRobert Hancock 	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1284fbbb262dSRobert Hancock 
12859a829ccfSTejun Heo 	for (i = 0; i < host->n_ports; i++)
12869a829ccfSTejun Heo 		nv_adma_setup_port(host->ports[i]);
1287fbbb262dSRobert Hancock 
1288fbbb262dSRobert Hancock 	return 0;
1289fbbb262dSRobert Hancock }
1290fbbb262dSRobert Hancock 
nv_adma_fill_aprd(struct ata_queued_cmd * qc,struct scatterlist * sg,int idx,struct nv_adma_prd * aprd)1291fbbb262dSRobert Hancock static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1292fbbb262dSRobert Hancock 			      struct scatterlist *sg,
1293fbbb262dSRobert Hancock 			      int idx,
1294fbbb262dSRobert Hancock 			      struct nv_adma_prd *aprd)
1295fbbb262dSRobert Hancock {
129641949ed5SRobert Hancock 	u8 flags = 0;
1297fbbb262dSRobert Hancock 	if (qc->tf.flags & ATA_TFLAG_WRITE)
1298fbbb262dSRobert Hancock 		flags |= NV_APRD_WRITE;
1299fbbb262dSRobert Hancock 	if (idx == qc->n_elem - 1)
1300fbbb262dSRobert Hancock 		flags |= NV_APRD_END;
1301fbbb262dSRobert Hancock 	else if (idx != 4)
1302fbbb262dSRobert Hancock 		flags |= NV_APRD_CONT;
1303fbbb262dSRobert Hancock 
1304fbbb262dSRobert Hancock 	aprd->addr  = cpu_to_le64(((u64)sg_dma_address(sg)));
1305fbbb262dSRobert Hancock 	aprd->len   = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
13062dec7555SRobert Hancock 	aprd->flags = flags;
130741949ed5SRobert Hancock 	aprd->packet_len = 0;
1308fbbb262dSRobert Hancock }
1309fbbb262dSRobert Hancock 
nv_adma_fill_sg(struct ata_queued_cmd * qc,struct nv_adma_cpb * cpb)1310fbbb262dSRobert Hancock static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1311fbbb262dSRobert Hancock {
1312fbbb262dSRobert Hancock 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1313fbbb262dSRobert Hancock 	struct nv_adma_prd *aprd;
1314fbbb262dSRobert Hancock 	struct scatterlist *sg;
1315ff2aeb1eSTejun Heo 	unsigned int si;
1316fbbb262dSRobert Hancock 
1317ff2aeb1eSTejun Heo 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
1318ff2aeb1eSTejun Heo 		aprd = (si < 5) ? &cpb->aprd[si] :
13194e5b6260SJens Axboe 			&pp->aprd[NV_ADMA_SGTBL_LEN * qc->hw_tag + (si-5)];
1320ff2aeb1eSTejun Heo 		nv_adma_fill_aprd(qc, sg, si, aprd);
1321fbbb262dSRobert Hancock 	}
1322ff2aeb1eSTejun Heo 	if (si > 5)
13234e5b6260SJens Axboe 		cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->hw_tag)));
132441949ed5SRobert Hancock 	else
132541949ed5SRobert Hancock 		cpb->next_aprd = cpu_to_le64(0);
1326fbbb262dSRobert Hancock }
1327fbbb262dSRobert Hancock 
nv_adma_use_reg_mode(struct ata_queued_cmd * qc)1328382a6652SRobert Hancock static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1329382a6652SRobert Hancock {
1330382a6652SRobert Hancock 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1331382a6652SRobert Hancock 
1332382a6652SRobert Hancock 	/* ADMA engine can only be used for non-ATAPI DMA commands,
13333f3debdbSRobert Hancock 	   or interrupt-driven no-data commands. */
1334382a6652SRobert Hancock 	if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
13353f3debdbSRobert Hancock 	   (qc->tf.flags & ATA_TFLAG_POLLING))
1336382a6652SRobert Hancock 		return 1;
1337382a6652SRobert Hancock 
1338382a6652SRobert Hancock 	if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1339382a6652SRobert Hancock 	   (qc->tf.protocol == ATA_PROT_NODATA))
1340382a6652SRobert Hancock 		return 0;
1341382a6652SRobert Hancock 
1342382a6652SRobert Hancock 	return 1;
1343382a6652SRobert Hancock }
1344382a6652SRobert Hancock 
nv_adma_qc_prep(struct ata_queued_cmd * qc)134595364f36SJiri Slaby static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc)
1346fbbb262dSRobert Hancock {
1347fbbb262dSRobert Hancock 	struct nv_adma_port_priv *pp = qc->ap->private_data;
13484e5b6260SJens Axboe 	struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag];
1349fbbb262dSRobert Hancock 	u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1350fbbb262dSRobert Hancock 		       NV_CPB_CTL_IEN;
1351fbbb262dSRobert Hancock 
1352382a6652SRobert Hancock 	if (nv_adma_use_reg_mode(qc)) {
13533f3debdbSRobert Hancock 		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
13543f3debdbSRobert Hancock 			(qc->flags & ATA_QCFLAG_DMAMAP));
13552dec7555SRobert Hancock 		nv_adma_register_mode(qc->ap);
1356f47451c4STejun Heo 		ata_bmdma_qc_prep(qc);
135795364f36SJiri Slaby 		return AC_ERR_OK;
1358fbbb262dSRobert Hancock 	}
1359fbbb262dSRobert Hancock 
136041949ed5SRobert Hancock 	cpb->resp_flags = NV_CPB_RESP_DONE;
136141949ed5SRobert Hancock 	wmb();
136241949ed5SRobert Hancock 	cpb->ctl_flags = 0;
136341949ed5SRobert Hancock 	wmb();
1364fbbb262dSRobert Hancock 
1365fbbb262dSRobert Hancock 	cpb->len		= 3;
13664e5b6260SJens Axboe 	cpb->tag		= qc->hw_tag;
1367fbbb262dSRobert Hancock 	cpb->next_cpb_idx	= 0;
1368fbbb262dSRobert Hancock 
1369fbbb262dSRobert Hancock 	/* turn on NCQ flags for NCQ commands */
1370fbbb262dSRobert Hancock 	if (qc->tf.protocol == ATA_PROT_NCQ)
1371fbbb262dSRobert Hancock 		ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1372fbbb262dSRobert Hancock 
1373fbbb262dSRobert Hancock 	nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1374fbbb262dSRobert Hancock 
1375382a6652SRobert Hancock 	if (qc->flags & ATA_QCFLAG_DMAMAP) {
1376fbbb262dSRobert Hancock 		nv_adma_fill_sg(qc, cpb);
1377382a6652SRobert Hancock 		ctl_flags |= NV_CPB_CTL_APRD_VALID;
1378382a6652SRobert Hancock 	} else
1379382a6652SRobert Hancock 		memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1380fbbb262dSRobert Hancock 
13815796d1c4SJeff Garzik 	/* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
13825796d1c4SJeff Garzik 	   until we are finished filling in all of the contents */
1383fbbb262dSRobert Hancock 	wmb();
1384fbbb262dSRobert Hancock 	cpb->ctl_flags = ctl_flags;
138541949ed5SRobert Hancock 	wmb();
138641949ed5SRobert Hancock 	cpb->resp_flags = 0;
138795364f36SJiri Slaby 
138895364f36SJiri Slaby 	return AC_ERR_OK;
1389fbbb262dSRobert Hancock }
1390fbbb262dSRobert Hancock 
nv_adma_qc_issue(struct ata_queued_cmd * qc)1391fbbb262dSRobert Hancock static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1392fbbb262dSRobert Hancock {
13932dec7555SRobert Hancock 	struct nv_adma_port_priv *pp = qc->ap->private_data;
1394cdf56bcfSRobert Hancock 	void __iomem *mmio = pp->ctl_block;
13955e5c74a5SRobert Hancock 	int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1396fbbb262dSRobert Hancock 
13973f3debdbSRobert Hancock 	/* We can't handle result taskfile with NCQ commands, since
13983f3debdbSRobert Hancock 	   retrieving the taskfile switches us out of ADMA mode and would abort
13993f3debdbSRobert Hancock 	   existing commands. */
14003f3debdbSRobert Hancock 	if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
14013f3debdbSRobert Hancock 		     (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1402a9a79dfeSJoe Perches 		ata_dev_err(qc->dev, "NCQ w/ RESULT_TF not allowed\n");
14033f3debdbSRobert Hancock 		return AC_ERR_SYSTEM;
14043f3debdbSRobert Hancock 	}
14053f3debdbSRobert Hancock 
1406382a6652SRobert Hancock 	if (nv_adma_use_reg_mode(qc)) {
1407fbbb262dSRobert Hancock 		/* use ATA register mode */
14083f3debdbSRobert Hancock 		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
14093f3debdbSRobert Hancock 			(qc->flags & ATA_QCFLAG_DMAMAP));
1410fbbb262dSRobert Hancock 		nv_adma_register_mode(qc->ap);
1411360ff783STejun Heo 		return ata_bmdma_qc_issue(qc);
1412fbbb262dSRobert Hancock 	} else
1413fbbb262dSRobert Hancock 		nv_adma_mode(qc->ap);
1414fbbb262dSRobert Hancock 
1415fbbb262dSRobert Hancock 	/* write append register, command tag in lower 8 bits
1416fbbb262dSRobert Hancock 	   and (number of cpbs to append -1) in top 8 bits */
1417fbbb262dSRobert Hancock 	wmb();
14185e5c74a5SRobert Hancock 
14195e5c74a5SRobert Hancock 	if (curr_ncq != pp->last_issue_ncq) {
14205796d1c4SJeff Garzik 		/* Seems to need some delay before switching between NCQ and
14215796d1c4SJeff Garzik 		   non-NCQ commands, else we get command timeouts and such. */
14225e5c74a5SRobert Hancock 		udelay(20);
14235e5c74a5SRobert Hancock 		pp->last_issue_ncq = curr_ncq;
14245e5c74a5SRobert Hancock 	}
14255e5c74a5SRobert Hancock 
14264e5b6260SJens Axboe 	writew(qc->hw_tag, mmio + NV_ADMA_APPEND);
1427fbbb262dSRobert Hancock 
1428fbbb262dSRobert Hancock 	return 0;
1429fbbb262dSRobert Hancock }
1430fbbb262dSRobert Hancock 
nv_generic_interrupt(int irq,void * dev_instance)14317d12e780SDavid Howells static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1432c6fd2807SJeff Garzik {
1433cca3974eSJeff Garzik 	struct ata_host *host = dev_instance;
1434c6fd2807SJeff Garzik 	unsigned int i;
1435c6fd2807SJeff Garzik 	unsigned int handled = 0;
1436c6fd2807SJeff Garzik 	unsigned long flags;
1437c6fd2807SJeff Garzik 
1438cca3974eSJeff Garzik 	spin_lock_irqsave(&host->lock, flags);
1439c6fd2807SJeff Garzik 
1440cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
14413e4ec344STejun Heo 		struct ata_port *ap = host->ports[i];
1442c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc;
1443c6fd2807SJeff Garzik 
14449af5c9c9STejun Heo 		qc = ata_qc_from_tag(ap, ap->link.active_tag);
14453e4ec344STejun Heo 		if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
1446c3b28894STejun Heo 			handled += ata_bmdma_port_intr(ap, qc);
14473e4ec344STejun Heo 		} else {
14483e4ec344STejun Heo 			/*
14493e4ec344STejun Heo 			 * No request pending?  Clear interrupt status
14503e4ec344STejun Heo 			 * anyway, in case there's one pending.
14513e4ec344STejun Heo 			 */
14525682ed33STejun Heo 			ap->ops->sff_check_status(ap);
1453c6fd2807SJeff Garzik 		}
1454c6fd2807SJeff Garzik 	}
1455c6fd2807SJeff Garzik 
1456cca3974eSJeff Garzik 	spin_unlock_irqrestore(&host->lock, flags);
1457c6fd2807SJeff Garzik 
1458c6fd2807SJeff Garzik 	return IRQ_RETVAL(handled);
1459c6fd2807SJeff Garzik }
1460c6fd2807SJeff Garzik 
nv_do_interrupt(struct ata_host * host,u8 irq_stat)1461cca3974eSJeff Garzik static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1462c6fd2807SJeff Garzik {
1463c6fd2807SJeff Garzik 	int i, handled = 0;
1464c6fd2807SJeff Garzik 
1465cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
14663e4ec344STejun Heo 		handled += nv_host_intr(host->ports[i], irq_stat);
1467c6fd2807SJeff Garzik 		irq_stat >>= NV_INT_PORT_SHIFT;
1468c6fd2807SJeff Garzik 	}
1469c6fd2807SJeff Garzik 
1470c6fd2807SJeff Garzik 	return IRQ_RETVAL(handled);
1471c6fd2807SJeff Garzik }
1472c6fd2807SJeff Garzik 
nv_nf2_interrupt(int irq,void * dev_instance)14737d12e780SDavid Howells static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1474c6fd2807SJeff Garzik {
1475cca3974eSJeff Garzik 	struct ata_host *host = dev_instance;
1476c6fd2807SJeff Garzik 	u8 irq_stat;
1477c6fd2807SJeff Garzik 	irqreturn_t ret;
1478c6fd2807SJeff Garzik 
1479cca3974eSJeff Garzik 	spin_lock(&host->lock);
14800d5ff566STejun Heo 	irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1481cca3974eSJeff Garzik 	ret = nv_do_interrupt(host, irq_stat);
1482cca3974eSJeff Garzik 	spin_unlock(&host->lock);
1483c6fd2807SJeff Garzik 
1484c6fd2807SJeff Garzik 	return ret;
1485c6fd2807SJeff Garzik }
1486c6fd2807SJeff Garzik 
nv_ck804_interrupt(int irq,void * dev_instance)14877d12e780SDavid Howells static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1488c6fd2807SJeff Garzik {
1489cca3974eSJeff Garzik 	struct ata_host *host = dev_instance;
1490c6fd2807SJeff Garzik 	u8 irq_stat;
1491c6fd2807SJeff Garzik 	irqreturn_t ret;
1492c6fd2807SJeff Garzik 
1493cca3974eSJeff Garzik 	spin_lock(&host->lock);
14940d5ff566STejun Heo 	irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1495cca3974eSJeff Garzik 	ret = nv_do_interrupt(host, irq_stat);
1496cca3974eSJeff Garzik 	spin_unlock(&host->lock);
1497c6fd2807SJeff Garzik 
1498c6fd2807SJeff Garzik 	return ret;
1499c6fd2807SJeff Garzik }
1500c6fd2807SJeff Garzik 
nv_scr_read(struct ata_link * link,unsigned int sc_reg,u32 * val)150182ef04fbSTejun Heo static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1502c6fd2807SJeff Garzik {
1503c6fd2807SJeff Garzik 	if (sc_reg > SCR_CONTROL)
1504da3dbb17STejun Heo 		return -EINVAL;
1505c6fd2807SJeff Garzik 
150682ef04fbSTejun Heo 	*val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
1507da3dbb17STejun Heo 	return 0;
1508c6fd2807SJeff Garzik }
1509c6fd2807SJeff Garzik 
nv_scr_write(struct ata_link * link,unsigned int sc_reg,u32 val)151082ef04fbSTejun Heo static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1511c6fd2807SJeff Garzik {
1512c6fd2807SJeff Garzik 	if (sc_reg > SCR_CONTROL)
1513da3dbb17STejun Heo 		return -EINVAL;
1514c6fd2807SJeff Garzik 
151582ef04fbSTejun Heo 	iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
1516da3dbb17STejun Heo 	return 0;
1517c6fd2807SJeff Garzik }
1518c6fd2807SJeff Garzik 
nv_hardreset(struct ata_link * link,unsigned int * class,unsigned long deadline)15197f4774b3STejun Heo static int nv_hardreset(struct ata_link *link, unsigned int *class,
1520e8caa3c7STejun Heo 			unsigned long deadline)
1521e8caa3c7STejun Heo {
15227f4774b3STejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
1523e8caa3c7STejun Heo 
15247f4774b3STejun Heo 	/* Do hardreset iff it's post-boot probing, please read the
15257f4774b3STejun Heo 	 * comment above port ops for details.
15267f4774b3STejun Heo 	 */
15277f4774b3STejun Heo 	if (!(link->ap->pflags & ATA_PFLAG_LOADING) &&
15287f4774b3STejun Heo 	    !ata_dev_enabled(link->device))
15297f4774b3STejun Heo 		sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
15307f4774b3STejun Heo 				    NULL, NULL);
15316489e326STejun Heo 	else {
1532*d14d41ccSSergey Shtylyov 		const unsigned int *timing = sata_ehc_deb_timing(ehc);
15336489e326STejun Heo 		int rc;
15346489e326STejun Heo 
15356489e326STejun Heo 		if (!(ehc->i.flags & ATA_EHI_QUIET))
1536a9a79dfeSJoe Perches 			ata_link_info(link,
1537a9a79dfeSJoe Perches 				      "nv: skipping hardreset on occupied port\n");
15386489e326STejun Heo 
15396489e326STejun Heo 		/* make sure the link is online */
15406489e326STejun Heo 		rc = sata_link_resume(link, timing, deadline);
15416489e326STejun Heo 		/* whine about phy resume failure but proceed */
15426489e326STejun Heo 		if (rc && rc != -EOPNOTSUPP)
1543a9a79dfeSJoe Perches 			ata_link_warn(link, "failed to resume link (errno=%d)\n",
1544a9a79dfeSJoe Perches 				      rc);
15456489e326STejun Heo 	}
15467f4774b3STejun Heo 
15477f4774b3STejun Heo 	/* device signature acquisition is unreliable */
15487f4774b3STejun Heo 	return -EAGAIN;
1549e8caa3c7STejun Heo }
1550e8caa3c7STejun Heo 
nv_nf2_freeze(struct ata_port * ap)1551c6fd2807SJeff Garzik static void nv_nf2_freeze(struct ata_port *ap)
1552c6fd2807SJeff Garzik {
15530d5ff566STejun Heo 	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1554c6fd2807SJeff Garzik 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1555c6fd2807SJeff Garzik 	u8 mask;
1556c6fd2807SJeff Garzik 
15570d5ff566STejun Heo 	mask = ioread8(scr_addr + NV_INT_ENABLE);
1558c6fd2807SJeff Garzik 	mask &= ~(NV_INT_ALL << shift);
15590d5ff566STejun Heo 	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1560c6fd2807SJeff Garzik }
1561c6fd2807SJeff Garzik 
nv_nf2_thaw(struct ata_port * ap)1562c6fd2807SJeff Garzik static void nv_nf2_thaw(struct ata_port *ap)
1563c6fd2807SJeff Garzik {
15640d5ff566STejun Heo 	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1565c6fd2807SJeff Garzik 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1566c6fd2807SJeff Garzik 	u8 mask;
1567c6fd2807SJeff Garzik 
15680d5ff566STejun Heo 	iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1569c6fd2807SJeff Garzik 
15700d5ff566STejun Heo 	mask = ioread8(scr_addr + NV_INT_ENABLE);
1571c6fd2807SJeff Garzik 	mask |= (NV_INT_MASK << shift);
15720d5ff566STejun Heo 	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1573c6fd2807SJeff Garzik }
1574c6fd2807SJeff Garzik 
nv_ck804_freeze(struct ata_port * ap)1575c6fd2807SJeff Garzik static void nv_ck804_freeze(struct ata_port *ap)
1576c6fd2807SJeff Garzik {
15770d5ff566STejun Heo 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1578c6fd2807SJeff Garzik 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1579c6fd2807SJeff Garzik 	u8 mask;
1580c6fd2807SJeff Garzik 
1581c6fd2807SJeff Garzik 	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1582c6fd2807SJeff Garzik 	mask &= ~(NV_INT_ALL << shift);
1583c6fd2807SJeff Garzik 	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1584c6fd2807SJeff Garzik }
1585c6fd2807SJeff Garzik 
nv_ck804_thaw(struct ata_port * ap)1586c6fd2807SJeff Garzik static void nv_ck804_thaw(struct ata_port *ap)
1587c6fd2807SJeff Garzik {
15880d5ff566STejun Heo 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1589c6fd2807SJeff Garzik 	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1590c6fd2807SJeff Garzik 	u8 mask;
1591c6fd2807SJeff Garzik 
1592c6fd2807SJeff Garzik 	writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1593c6fd2807SJeff Garzik 
1594c6fd2807SJeff Garzik 	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1595c6fd2807SJeff Garzik 	mask |= (NV_INT_MASK << shift);
1596c6fd2807SJeff Garzik 	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1597c6fd2807SJeff Garzik }
1598c6fd2807SJeff Garzik 
nv_mcp55_freeze(struct ata_port * ap)1599f140f0f1SKuan Luo static void nv_mcp55_freeze(struct ata_port *ap)
1600f140f0f1SKuan Luo {
1601f140f0f1SKuan Luo 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1602f140f0f1SKuan Luo 	int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1603f140f0f1SKuan Luo 	u32 mask;
1604f140f0f1SKuan Luo 
1605f140f0f1SKuan Luo 	writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1606f140f0f1SKuan Luo 
1607f140f0f1SKuan Luo 	mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1608f140f0f1SKuan Luo 	mask &= ~(NV_INT_ALL_MCP55 << shift);
1609f140f0f1SKuan Luo 	writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1610f140f0f1SKuan Luo }
1611f140f0f1SKuan Luo 
nv_mcp55_thaw(struct ata_port * ap)1612f140f0f1SKuan Luo static void nv_mcp55_thaw(struct ata_port *ap)
1613f140f0f1SKuan Luo {
1614f140f0f1SKuan Luo 	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1615f140f0f1SKuan Luo 	int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1616f140f0f1SKuan Luo 	u32 mask;
1617f140f0f1SKuan Luo 
1618f140f0f1SKuan Luo 	writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1619f140f0f1SKuan Luo 
1620f140f0f1SKuan Luo 	mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1621f140f0f1SKuan Luo 	mask |= (NV_INT_MASK_MCP55 << shift);
1622f140f0f1SKuan Luo 	writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1623f140f0f1SKuan Luo }
1624f140f0f1SKuan Luo 
nv_adma_error_handler(struct ata_port * ap)1625fbbb262dSRobert Hancock static void nv_adma_error_handler(struct ata_port *ap)
1626fbbb262dSRobert Hancock {
1627fbbb262dSRobert Hancock 	struct nv_adma_port_priv *pp = ap->private_data;
1628fbbb262dSRobert Hancock 	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1629cdf56bcfSRobert Hancock 		void __iomem *mmio = pp->ctl_block;
1630fbbb262dSRobert Hancock 		int i;
1631fbbb262dSRobert Hancock 		u16 tmp;
16322cb27853SRobert Hancock 
16339af5c9c9STejun Heo 		if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
16342cb27853SRobert Hancock 			u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
16352cb27853SRobert Hancock 			u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
16362cb27853SRobert Hancock 			u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
16372cb27853SRobert Hancock 			u32 status = readw(mmio + NV_ADMA_STAT);
163808af7414SRobert Hancock 			u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
163908af7414SRobert Hancock 			u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
16402cb27853SRobert Hancock 
1641a9a79dfeSJoe Perches 			ata_port_err(ap,
16425796d1c4SJeff Garzik 				"EH in ADMA mode, notifier 0x%X "
164308af7414SRobert Hancock 				"notifier_error 0x%X gen_ctl 0x%X status 0x%X "
164408af7414SRobert Hancock 				"next cpb count 0x%X next cpb idx 0x%x\n",
164508af7414SRobert Hancock 				notifier, notifier_error, gen_ctl, status,
164608af7414SRobert Hancock 				cpb_count, next_cpb_idx);
16472cb27853SRobert Hancock 
16482cb27853SRobert Hancock 			for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
16492cb27853SRobert Hancock 				struct nv_adma_cpb *cpb = &pp->cpb[i];
16509af5c9c9STejun Heo 				if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
16519af5c9c9STejun Heo 				    ap->link.sactive & (1 << i))
1652a9a79dfeSJoe Perches 					ata_port_err(ap,
16532cb27853SRobert Hancock 						"CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
16542cb27853SRobert Hancock 						i, cpb->ctl_flags, cpb->resp_flags);
16552cb27853SRobert Hancock 			}
16562cb27853SRobert Hancock 		}
1657fbbb262dSRobert Hancock 
1658fbbb262dSRobert Hancock 		/* Push us back into port register mode for error handling. */
1659fbbb262dSRobert Hancock 		nv_adma_register_mode(ap);
1660fbbb262dSRobert Hancock 
16615796d1c4SJeff Garzik 		/* Mark all of the CPBs as invalid to prevent them from
16625796d1c4SJeff Garzik 		   being executed */
1663fbbb262dSRobert Hancock 		for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1664fbbb262dSRobert Hancock 			pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1665fbbb262dSRobert Hancock 
1666fbbb262dSRobert Hancock 		/* clear CPB fetch count */
1667fbbb262dSRobert Hancock 		writew(0, mmio + NV_ADMA_CPB_COUNT);
1668fbbb262dSRobert Hancock 
1669fbbb262dSRobert Hancock 		/* Reset channel */
1670fbbb262dSRobert Hancock 		tmp = readw(mmio + NV_ADMA_CTL);
1671fbbb262dSRobert Hancock 		writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
16725ce0cf6fSRobert Hancock 		readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1673fbbb262dSRobert Hancock 		udelay(1);
1674fbbb262dSRobert Hancock 		writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
16755ce0cf6fSRobert Hancock 		readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1676fbbb262dSRobert Hancock 	}
1677fbbb262dSRobert Hancock 
1678fe06e5f9STejun Heo 	ata_bmdma_error_handler(ap);
1679fbbb262dSRobert Hancock }
1680fbbb262dSRobert Hancock 
nv_swncq_qc_to_dq(struct ata_port * ap,struct ata_queued_cmd * qc)1681f140f0f1SKuan Luo static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1682f140f0f1SKuan Luo {
1683f140f0f1SKuan Luo 	struct nv_swncq_port_priv *pp = ap->private_data;
1684f140f0f1SKuan Luo 	struct defer_queue *dq = &pp->defer_queue;
1685f140f0f1SKuan Luo 
1686f140f0f1SKuan Luo 	/* queue is full */
1687f140f0f1SKuan Luo 	WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
16884e5b6260SJens Axboe 	dq->defer_bits |= (1 << qc->hw_tag);
16894e5b6260SJens Axboe 	dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->hw_tag;
1690f140f0f1SKuan Luo }
1691f140f0f1SKuan Luo 
nv_swncq_qc_from_dq(struct ata_port * ap)1692f140f0f1SKuan Luo static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1693f140f0f1SKuan Luo {
1694f140f0f1SKuan Luo 	struct nv_swncq_port_priv *pp = ap->private_data;
1695f140f0f1SKuan Luo 	struct defer_queue *dq = &pp->defer_queue;
1696f140f0f1SKuan Luo 	unsigned int tag;
1697f140f0f1SKuan Luo 
1698f140f0f1SKuan Luo 	if (dq->head == dq->tail)	/* null queue */
1699f140f0f1SKuan Luo 		return NULL;
1700f140f0f1SKuan Luo 
1701f140f0f1SKuan Luo 	tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1702f140f0f1SKuan Luo 	dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1703f140f0f1SKuan Luo 	WARN_ON(!(dq->defer_bits & (1 << tag)));
1704f140f0f1SKuan Luo 	dq->defer_bits &= ~(1 << tag);
1705f140f0f1SKuan Luo 
1706f140f0f1SKuan Luo 	return ata_qc_from_tag(ap, tag);
1707f140f0f1SKuan Luo }
1708f140f0f1SKuan Luo 
nv_swncq_fis_reinit(struct ata_port * ap)1709f140f0f1SKuan Luo static void nv_swncq_fis_reinit(struct ata_port *ap)
1710f140f0f1SKuan Luo {
1711f140f0f1SKuan Luo 	struct nv_swncq_port_priv *pp = ap->private_data;
1712f140f0f1SKuan Luo 
1713f140f0f1SKuan Luo 	pp->dhfis_bits = 0;
1714f140f0f1SKuan Luo 	pp->dmafis_bits = 0;
1715f140f0f1SKuan Luo 	pp->sdbfis_bits = 0;
1716f140f0f1SKuan Luo 	pp->ncq_flags = 0;
1717f140f0f1SKuan Luo }
1718f140f0f1SKuan Luo 
nv_swncq_pp_reinit(struct ata_port * ap)1719f140f0f1SKuan Luo static void nv_swncq_pp_reinit(struct ata_port *ap)
1720f140f0f1SKuan Luo {
1721f140f0f1SKuan Luo 	struct nv_swncq_port_priv *pp = ap->private_data;
1722f140f0f1SKuan Luo 	struct defer_queue *dq = &pp->defer_queue;
1723f140f0f1SKuan Luo 
1724f140f0f1SKuan Luo 	dq->head = 0;
1725f140f0f1SKuan Luo 	dq->tail = 0;
1726f140f0f1SKuan Luo 	dq->defer_bits = 0;
1727f140f0f1SKuan Luo 	pp->qc_active = 0;
1728f140f0f1SKuan Luo 	pp->last_issue_tag = ATA_TAG_POISON;
1729f140f0f1SKuan Luo 	nv_swncq_fis_reinit(ap);
1730f140f0f1SKuan Luo }
1731f140f0f1SKuan Luo 
nv_swncq_irq_clear(struct ata_port * ap,u16 fis)1732f140f0f1SKuan Luo static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1733f140f0f1SKuan Luo {
1734f140f0f1SKuan Luo 	struct nv_swncq_port_priv *pp = ap->private_data;
1735f140f0f1SKuan Luo 
1736f140f0f1SKuan Luo 	writew(fis, pp->irq_block);
1737f140f0f1SKuan Luo }
1738f140f0f1SKuan Luo 
__ata_bmdma_stop(struct ata_port * ap)1739f140f0f1SKuan Luo static void __ata_bmdma_stop(struct ata_port *ap)
1740f140f0f1SKuan Luo {
1741f140f0f1SKuan Luo 	struct ata_queued_cmd qc;
1742f140f0f1SKuan Luo 
1743f140f0f1SKuan Luo 	qc.ap = ap;
1744f140f0f1SKuan Luo 	ata_bmdma_stop(&qc);
1745f140f0f1SKuan Luo }
1746f140f0f1SKuan Luo 
nv_swncq_ncq_stop(struct ata_port * ap)1747f140f0f1SKuan Luo static void nv_swncq_ncq_stop(struct ata_port *ap)
1748f140f0f1SKuan Luo {
1749f140f0f1SKuan Luo 	struct nv_swncq_port_priv *pp = ap->private_data;
1750f140f0f1SKuan Luo 	unsigned int i;
1751f140f0f1SKuan Luo 	u32 sactive;
1752f140f0f1SKuan Luo 	u32 done_mask;
1753f140f0f1SKuan Luo 
1754e3ed8939SJens Axboe 	ata_port_err(ap, "EH in SWNCQ mode,QC:qc_active 0x%llX sactive 0x%X\n",
1755f140f0f1SKuan Luo 		     ap->qc_active, ap->link.sactive);
1756a9a79dfeSJoe Perches 	ata_port_err(ap,
1757f140f0f1SKuan Luo 		"SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n  "
1758f140f0f1SKuan Luo 		"dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1759f140f0f1SKuan Luo 		pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1760f140f0f1SKuan Luo 		pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1761f140f0f1SKuan Luo 
1762a9a79dfeSJoe Perches 	ata_port_err(ap, "ATA_REG 0x%X ERR_REG 0x%X\n",
17635682ed33STejun Heo 		     ap->ops->sff_check_status(ap),
1764f140f0f1SKuan Luo 		     ioread8(ap->ioaddr.error_addr));
1765f140f0f1SKuan Luo 
1766f140f0f1SKuan Luo 	sactive = readl(pp->sactive_block);
1767f140f0f1SKuan Luo 	done_mask = pp->qc_active ^ sactive;
1768f140f0f1SKuan Luo 
1769a9a79dfeSJoe Perches 	ata_port_err(ap, "tag : dhfis dmafis sdbfis sactive\n");
1770f140f0f1SKuan Luo 	for (i = 0; i < ATA_MAX_QUEUE; i++) {
1771f140f0f1SKuan Luo 		u8 err = 0;
1772f140f0f1SKuan Luo 		if (pp->qc_active & (1 << i))
1773f140f0f1SKuan Luo 			err = 0;
1774f140f0f1SKuan Luo 		else if (done_mask & (1 << i))
1775f140f0f1SKuan Luo 			err = 1;
1776f140f0f1SKuan Luo 		else
1777f140f0f1SKuan Luo 			continue;
1778f140f0f1SKuan Luo 
1779a9a79dfeSJoe Perches 		ata_port_err(ap,
1780f140f0f1SKuan Luo 			     "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1781f140f0f1SKuan Luo 			     (pp->dhfis_bits >> i) & 0x1,
1782f140f0f1SKuan Luo 			     (pp->dmafis_bits >> i) & 0x1,
1783f140f0f1SKuan Luo 			     (pp->sdbfis_bits >> i) & 0x1,
1784f140f0f1SKuan Luo 			     (sactive >> i) & 0x1,
1785f140f0f1SKuan Luo 			     (err ? "error! tag doesn't exit" : " "));
1786f140f0f1SKuan Luo 	}
1787f140f0f1SKuan Luo 
1788f140f0f1SKuan Luo 	nv_swncq_pp_reinit(ap);
17895682ed33STejun Heo 	ap->ops->sff_irq_clear(ap);
1790f140f0f1SKuan Luo 	__ata_bmdma_stop(ap);
1791f140f0f1SKuan Luo 	nv_swncq_irq_clear(ap, 0xffff);
1792f140f0f1SKuan Luo }
1793f140f0f1SKuan Luo 
nv_swncq_error_handler(struct ata_port * ap)1794f140f0f1SKuan Luo static void nv_swncq_error_handler(struct ata_port *ap)
1795f140f0f1SKuan Luo {
1796f140f0f1SKuan Luo 	struct ata_eh_context *ehc = &ap->link.eh_context;
1797f140f0f1SKuan Luo 
1798f140f0f1SKuan Luo 	if (ap->link.sactive) {
1799f140f0f1SKuan Luo 		nv_swncq_ncq_stop(ap);
1800cf480626STejun Heo 		ehc->i.action |= ATA_EH_RESET;
1801f140f0f1SKuan Luo 	}
1802f140f0f1SKuan Luo 
1803fe06e5f9STejun Heo 	ata_bmdma_error_handler(ap);
1804f140f0f1SKuan Luo }
1805f140f0f1SKuan Luo 
1806f140f0f1SKuan Luo #ifdef CONFIG_PM
nv_swncq_port_suspend(struct ata_port * ap,pm_message_t mesg)1807f140f0f1SKuan Luo static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1808f140f0f1SKuan Luo {
1809f140f0f1SKuan Luo 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1810f140f0f1SKuan Luo 	u32 tmp;
1811f140f0f1SKuan Luo 
1812f140f0f1SKuan Luo 	/* clear irq */
1813f140f0f1SKuan Luo 	writel(~0, mmio + NV_INT_STATUS_MCP55);
1814f140f0f1SKuan Luo 
1815f140f0f1SKuan Luo 	/* disable irq */
1816f140f0f1SKuan Luo 	writel(0, mmio + NV_INT_ENABLE_MCP55);
1817f140f0f1SKuan Luo 
1818f140f0f1SKuan Luo 	/* disable swncq */
1819f140f0f1SKuan Luo 	tmp = readl(mmio + NV_CTL_MCP55);
1820f140f0f1SKuan Luo 	tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1821f140f0f1SKuan Luo 	writel(tmp, mmio + NV_CTL_MCP55);
1822f140f0f1SKuan Luo 
1823f140f0f1SKuan Luo 	return 0;
1824f140f0f1SKuan Luo }
1825f140f0f1SKuan Luo 
nv_swncq_port_resume(struct ata_port * ap)1826f140f0f1SKuan Luo static int nv_swncq_port_resume(struct ata_port *ap)
1827f140f0f1SKuan Luo {
1828f140f0f1SKuan Luo 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1829f140f0f1SKuan Luo 	u32 tmp;
1830f140f0f1SKuan Luo 
1831f140f0f1SKuan Luo 	/* clear irq */
1832f140f0f1SKuan Luo 	writel(~0, mmio + NV_INT_STATUS_MCP55);
1833f140f0f1SKuan Luo 
1834f140f0f1SKuan Luo 	/* enable irq */
1835f140f0f1SKuan Luo 	writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1836f140f0f1SKuan Luo 
1837f140f0f1SKuan Luo 	/* enable swncq */
1838f140f0f1SKuan Luo 	tmp = readl(mmio + NV_CTL_MCP55);
1839f140f0f1SKuan Luo 	writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1840f140f0f1SKuan Luo 
1841f140f0f1SKuan Luo 	return 0;
1842f140f0f1SKuan Luo }
1843f140f0f1SKuan Luo #endif
1844f140f0f1SKuan Luo 
nv_swncq_host_init(struct ata_host * host)1845f140f0f1SKuan Luo static void nv_swncq_host_init(struct ata_host *host)
1846f140f0f1SKuan Luo {
1847f140f0f1SKuan Luo 	u32 tmp;
1848f140f0f1SKuan Luo 	void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1849f140f0f1SKuan Luo 	struct pci_dev *pdev = to_pci_dev(host->dev);
1850f140f0f1SKuan Luo 	u8 regval;
1851f140f0f1SKuan Luo 
1852f140f0f1SKuan Luo 	/* disable  ECO 398 */
1853f140f0f1SKuan Luo 	pci_read_config_byte(pdev, 0x7f, &regval);
1854f140f0f1SKuan Luo 	regval &= ~(1 << 7);
1855f140f0f1SKuan Luo 	pci_write_config_byte(pdev, 0x7f, regval);
1856f140f0f1SKuan Luo 
1857f140f0f1SKuan Luo 	/* enable swncq */
1858f140f0f1SKuan Luo 	tmp = readl(mmio + NV_CTL_MCP55);
185947013c58SHannes Reinecke 	dev_dbg(&pdev->dev, "HOST_CTL:0x%X\n", tmp);
1860f140f0f1SKuan Luo 	writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1861f140f0f1SKuan Luo 
1862f140f0f1SKuan Luo 	/* enable irq intr */
1863f140f0f1SKuan Luo 	tmp = readl(mmio + NV_INT_ENABLE_MCP55);
186447013c58SHannes Reinecke 	dev_dbg(&pdev->dev, "HOST_ENABLE:0x%X\n", tmp);
1865f140f0f1SKuan Luo 	writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1866f140f0f1SKuan Luo 
1867f140f0f1SKuan Luo 	/*  clear port irq */
1868f140f0f1SKuan Luo 	writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1869f140f0f1SKuan Luo }
1870f140f0f1SKuan Luo 
nv_swncq_slave_config(struct scsi_device * sdev)1871f140f0f1SKuan Luo static int nv_swncq_slave_config(struct scsi_device *sdev)
1872f140f0f1SKuan Luo {
1873f140f0f1SKuan Luo 	struct ata_port *ap = ata_shost_to_port(sdev->host);
1874f140f0f1SKuan Luo 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1875f140f0f1SKuan Luo 	struct ata_device *dev;
1876f140f0f1SKuan Luo 	int rc;
1877f140f0f1SKuan Luo 	u8 rev;
1878f140f0f1SKuan Luo 	u8 check_maxtor = 0;
1879f140f0f1SKuan Luo 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
1880f140f0f1SKuan Luo 
1881f140f0f1SKuan Luo 	rc = ata_scsi_slave_config(sdev);
1882f140f0f1SKuan Luo 	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1883f140f0f1SKuan Luo 		/* Not a proper libata device, ignore */
1884f140f0f1SKuan Luo 		return rc;
1885f140f0f1SKuan Luo 
1886f140f0f1SKuan Luo 	dev = &ap->link.device[sdev->id];
1887f140f0f1SKuan Luo 	if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1888f140f0f1SKuan Luo 		return rc;
1889f140f0f1SKuan Luo 
1890f140f0f1SKuan Luo 	/* if MCP51 and Maxtor, then disable ncq */
1891f140f0f1SKuan Luo 	if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1892f140f0f1SKuan Luo 		pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1893f140f0f1SKuan Luo 		check_maxtor = 1;
1894f140f0f1SKuan Luo 
1895f140f0f1SKuan Luo 	/* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1896f140f0f1SKuan Luo 	if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1897f140f0f1SKuan Luo 		pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1898f140f0f1SKuan Luo 		pci_read_config_byte(pdev, 0x8, &rev);
1899f140f0f1SKuan Luo 		if (rev <= 0xa2)
1900f140f0f1SKuan Luo 			check_maxtor = 1;
1901f140f0f1SKuan Luo 	}
1902f140f0f1SKuan Luo 
1903f140f0f1SKuan Luo 	if (!check_maxtor)
1904f140f0f1SKuan Luo 		return rc;
1905f140f0f1SKuan Luo 
1906f140f0f1SKuan Luo 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1907f140f0f1SKuan Luo 
1908f140f0f1SKuan Luo 	if (strncmp(model_num, "Maxtor", 6) == 0) {
1909db5ed4dfSChristoph Hellwig 		ata_scsi_change_queue_depth(sdev, 1);
1910a9a79dfeSJoe Perches 		ata_dev_notice(dev, "Disabling SWNCQ mode (depth %x)\n",
1911a9a79dfeSJoe Perches 			       sdev->queue_depth);
1912f140f0f1SKuan Luo 	}
1913f140f0f1SKuan Luo 
1914f140f0f1SKuan Luo 	return rc;
1915f140f0f1SKuan Luo }
1916f140f0f1SKuan Luo 
nv_swncq_port_start(struct ata_port * ap)1917f140f0f1SKuan Luo static int nv_swncq_port_start(struct ata_port *ap)
1918f140f0f1SKuan Luo {
1919f140f0f1SKuan Luo 	struct device *dev = ap->host->dev;
1920f140f0f1SKuan Luo 	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1921f140f0f1SKuan Luo 	struct nv_swncq_port_priv *pp;
1922f140f0f1SKuan Luo 	int rc;
1923f140f0f1SKuan Luo 
1924c7087652STejun Heo 	/* we might fallback to bmdma, allocate bmdma resources */
1925c7087652STejun Heo 	rc = ata_bmdma_port_start(ap);
1926f140f0f1SKuan Luo 	if (rc)
1927f140f0f1SKuan Luo 		return rc;
1928f140f0f1SKuan Luo 
1929f140f0f1SKuan Luo 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1930f140f0f1SKuan Luo 	if (!pp)
1931f140f0f1SKuan Luo 		return -ENOMEM;
1932f140f0f1SKuan Luo 
1933f140f0f1SKuan Luo 	pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1934f140f0f1SKuan Luo 				      &pp->prd_dma, GFP_KERNEL);
1935f140f0f1SKuan Luo 	if (!pp->prd)
1936f140f0f1SKuan Luo 		return -ENOMEM;
1937f140f0f1SKuan Luo 
1938f140f0f1SKuan Luo 	ap->private_data = pp;
1939f140f0f1SKuan Luo 	pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1940f140f0f1SKuan Luo 	pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1941f140f0f1SKuan Luo 	pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1942f140f0f1SKuan Luo 
1943f140f0f1SKuan Luo 	return 0;
1944f140f0f1SKuan Luo }
1945f140f0f1SKuan Luo 
nv_swncq_qc_prep(struct ata_queued_cmd * qc)194695364f36SJiri Slaby static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1947f140f0f1SKuan Luo {
1948f140f0f1SKuan Luo 	if (qc->tf.protocol != ATA_PROT_NCQ) {
1949f47451c4STejun Heo 		ata_bmdma_qc_prep(qc);
195095364f36SJiri Slaby 		return AC_ERR_OK;
1951f140f0f1SKuan Luo 	}
1952f140f0f1SKuan Luo 
1953f140f0f1SKuan Luo 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
195495364f36SJiri Slaby 		return AC_ERR_OK;
1955f140f0f1SKuan Luo 
1956f140f0f1SKuan Luo 	nv_swncq_fill_sg(qc);
195795364f36SJiri Slaby 
195895364f36SJiri Slaby 	return AC_ERR_OK;
1959f140f0f1SKuan Luo }
1960f140f0f1SKuan Luo 
nv_swncq_fill_sg(struct ata_queued_cmd * qc)1961f140f0f1SKuan Luo static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1962f140f0f1SKuan Luo {
1963f140f0f1SKuan Luo 	struct ata_port *ap = qc->ap;
1964f140f0f1SKuan Luo 	struct scatterlist *sg;
1965f140f0f1SKuan Luo 	struct nv_swncq_port_priv *pp = ap->private_data;
1966f60d7011STejun Heo 	struct ata_bmdma_prd *prd;
1967ff2aeb1eSTejun Heo 	unsigned int si, idx;
1968f140f0f1SKuan Luo 
19694e5b6260SJens Axboe 	prd = pp->prd + ATA_MAX_PRD * qc->hw_tag;
1970f140f0f1SKuan Luo 
1971f140f0f1SKuan Luo 	idx = 0;
1972ff2aeb1eSTejun Heo 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
1973f140f0f1SKuan Luo 		u32 addr, offset;
1974f140f0f1SKuan Luo 		u32 sg_len, len;
1975f140f0f1SKuan Luo 
1976f140f0f1SKuan Luo 		addr = (u32)sg_dma_address(sg);
1977f140f0f1SKuan Luo 		sg_len = sg_dma_len(sg);
1978f140f0f1SKuan Luo 
1979f140f0f1SKuan Luo 		while (sg_len) {
1980f140f0f1SKuan Luo 			offset = addr & 0xffff;
1981f140f0f1SKuan Luo 			len = sg_len;
1982f140f0f1SKuan Luo 			if ((offset + sg_len) > 0x10000)
1983f140f0f1SKuan Luo 				len = 0x10000 - offset;
1984f140f0f1SKuan Luo 
1985f140f0f1SKuan Luo 			prd[idx].addr = cpu_to_le32(addr);
1986f140f0f1SKuan Luo 			prd[idx].flags_len = cpu_to_le32(len & 0xffff);
1987f140f0f1SKuan Luo 
1988f140f0f1SKuan Luo 			idx++;
1989f140f0f1SKuan Luo 			sg_len -= len;
1990f140f0f1SKuan Luo 			addr += len;
1991f140f0f1SKuan Luo 		}
1992f140f0f1SKuan Luo 	}
1993f140f0f1SKuan Luo 
1994f140f0f1SKuan Luo 	prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
1995f140f0f1SKuan Luo }
1996f140f0f1SKuan Luo 
nv_swncq_issue_atacmd(struct ata_port * ap,struct ata_queued_cmd * qc)1997f140f0f1SKuan Luo static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
1998f140f0f1SKuan Luo 					  struct ata_queued_cmd *qc)
1999f140f0f1SKuan Luo {
2000f140f0f1SKuan Luo 	struct nv_swncq_port_priv *pp = ap->private_data;
2001f140f0f1SKuan Luo 
2002f140f0f1SKuan Luo 	if (qc == NULL)
2003f140f0f1SKuan Luo 		return 0;
2004f140f0f1SKuan Luo 
20054e5b6260SJens Axboe 	writel((1 << qc->hw_tag), pp->sactive_block);
20064e5b6260SJens Axboe 	pp->last_issue_tag = qc->hw_tag;
20074e5b6260SJens Axboe 	pp->dhfis_bits &= ~(1 << qc->hw_tag);
20084e5b6260SJens Axboe 	pp->dmafis_bits &= ~(1 << qc->hw_tag);
20094e5b6260SJens Axboe 	pp->qc_active |= (0x1 << qc->hw_tag);
2010f140f0f1SKuan Luo 
2011c206a389SHannes Reinecke 	trace_ata_tf_load(ap, &qc->tf);
20125682ed33STejun Heo 	ap->ops->sff_tf_load(ap, &qc->tf);	 /* load tf registers */
2013c206a389SHannes Reinecke 	trace_ata_exec_command(ap, &qc->tf, qc->hw_tag);
20145682ed33STejun Heo 	ap->ops->sff_exec_command(ap, &qc->tf);
2015f140f0f1SKuan Luo 
2016f140f0f1SKuan Luo 	return 0;
2017f140f0f1SKuan Luo }
2018f140f0f1SKuan Luo 
nv_swncq_qc_issue(struct ata_queued_cmd * qc)2019f140f0f1SKuan Luo static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2020f140f0f1SKuan Luo {
2021f140f0f1SKuan Luo 	struct ata_port *ap = qc->ap;
2022f140f0f1SKuan Luo 	struct nv_swncq_port_priv *pp = ap->private_data;
2023f140f0f1SKuan Luo 
2024f140f0f1SKuan Luo 	if (qc->tf.protocol != ATA_PROT_NCQ)
2025360ff783STejun Heo 		return ata_bmdma_qc_issue(qc);
2026f140f0f1SKuan Luo 
2027f140f0f1SKuan Luo 	if (!pp->qc_active)
2028f140f0f1SKuan Luo 		nv_swncq_issue_atacmd(ap, qc);
2029f140f0f1SKuan Luo 	else
2030f140f0f1SKuan Luo 		nv_swncq_qc_to_dq(ap, qc);	/* add qc to defer queue */
2031f140f0f1SKuan Luo 
2032f140f0f1SKuan Luo 	return 0;
2033f140f0f1SKuan Luo }
2034f140f0f1SKuan Luo 
nv_swncq_hotplug(struct ata_port * ap,u32 fis)2035f140f0f1SKuan Luo static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2036f140f0f1SKuan Luo {
2037f140f0f1SKuan Luo 	u32 serror;
2038f140f0f1SKuan Luo 	struct ata_eh_info *ehi = &ap->link.eh_info;
2039f140f0f1SKuan Luo 
2040f140f0f1SKuan Luo 	ata_ehi_clear_desc(ehi);
2041f140f0f1SKuan Luo 
2042f140f0f1SKuan Luo 	/* AHCI needs SError cleared; otherwise, it might lock up */
2043f140f0f1SKuan Luo 	sata_scr_read(&ap->link, SCR_ERROR, &serror);
2044f140f0f1SKuan Luo 	sata_scr_write(&ap->link, SCR_ERROR, serror);
2045f140f0f1SKuan Luo 
2046f140f0f1SKuan Luo 	/* analyze @irq_stat */
2047f140f0f1SKuan Luo 	if (fis & NV_SWNCQ_IRQ_ADDED)
2048f140f0f1SKuan Luo 		ata_ehi_push_desc(ehi, "hot plug");
2049f140f0f1SKuan Luo 	else if (fis & NV_SWNCQ_IRQ_REMOVED)
2050f140f0f1SKuan Luo 		ata_ehi_push_desc(ehi, "hot unplug");
2051f140f0f1SKuan Luo 
2052f140f0f1SKuan Luo 	ata_ehi_hotplugged(ehi);
2053f140f0f1SKuan Luo 
2054f140f0f1SKuan Luo 	/* okay, let's hand over to EH */
2055f140f0f1SKuan Luo 	ehi->serror |= serror;
2056f140f0f1SKuan Luo 
2057f140f0f1SKuan Luo 	ata_port_freeze(ap);
2058f140f0f1SKuan Luo }
2059f140f0f1SKuan Luo 
nv_swncq_sdbfis(struct ata_port * ap)2060f140f0f1SKuan Luo static int nv_swncq_sdbfis(struct ata_port *ap)
2061f140f0f1SKuan Luo {
2062f140f0f1SKuan Luo 	struct ata_queued_cmd *qc;
2063f140f0f1SKuan Luo 	struct nv_swncq_port_priv *pp = ap->private_data;
2064f140f0f1SKuan Luo 	struct ata_eh_info *ehi = &ap->link.eh_info;
2065f140f0f1SKuan Luo 	u32 sactive;
2066f140f0f1SKuan Luo 	u32 done_mask;
2067f140f0f1SKuan Luo 	u8 host_stat;
2068f140f0f1SKuan Luo 	u8 lack_dhfis = 0;
2069f140f0f1SKuan Luo 
2070f140f0f1SKuan Luo 	host_stat = ap->ops->bmdma_status(ap);
2071c206a389SHannes Reinecke 	trace_ata_bmdma_status(ap, host_stat);
2072f140f0f1SKuan Luo 	if (unlikely(host_stat & ATA_DMA_ERR)) {
207325985edcSLucas De Marchi 		/* error when transferring data to/from memory */
2074f140f0f1SKuan Luo 		ata_ehi_clear_desc(ehi);
2075f140f0f1SKuan Luo 		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2076f140f0f1SKuan Luo 		ehi->err_mask |= AC_ERR_HOST_BUS;
2077cf480626STejun Heo 		ehi->action |= ATA_EH_RESET;
2078f140f0f1SKuan Luo 		return -EINVAL;
2079f140f0f1SKuan Luo 	}
2080f140f0f1SKuan Luo 
20815682ed33STejun Heo 	ap->ops->sff_irq_clear(ap);
2082f140f0f1SKuan Luo 	__ata_bmdma_stop(ap);
2083f140f0f1SKuan Luo 
2084f140f0f1SKuan Luo 	sactive = readl(pp->sactive_block);
2085f140f0f1SKuan Luo 	done_mask = pp->qc_active ^ sactive;
2086f140f0f1SKuan Luo 
20871aadf5c3STejun Heo 	pp->qc_active &= ~done_mask;
20881aadf5c3STejun Heo 	pp->dhfis_bits &= ~done_mask;
20891aadf5c3STejun Heo 	pp->dmafis_bits &= ~done_mask;
20901aadf5c3STejun Heo 	pp->sdbfis_bits |= done_mask;
20918e4c309fSSascha Hauer 	ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
2092f140f0f1SKuan Luo 
2093f140f0f1SKuan Luo 	if (!ap->qc_active) {
209447013c58SHannes Reinecke 		ata_port_dbg(ap, "over\n");
2095f140f0f1SKuan Luo 		nv_swncq_pp_reinit(ap);
2096752e386cSTejun Heo 		return 0;
2097f140f0f1SKuan Luo 	}
2098f140f0f1SKuan Luo 
2099f140f0f1SKuan Luo 	if (pp->qc_active & pp->dhfis_bits)
2100752e386cSTejun Heo 		return 0;
2101f140f0f1SKuan Luo 
2102f140f0f1SKuan Luo 	if ((pp->ncq_flags & ncq_saw_backout) ||
2103f140f0f1SKuan Luo 	    (pp->qc_active ^ pp->dhfis_bits))
2104752e386cSTejun Heo 		/* if the controller can't get a device to host register FIS,
2105f140f0f1SKuan Luo 		 * The driver needs to reissue the new command.
2106f140f0f1SKuan Luo 		 */
2107f140f0f1SKuan Luo 		lack_dhfis = 1;
2108f140f0f1SKuan Luo 
210947013c58SHannes Reinecke 	ata_port_dbg(ap, "QC: qc_active 0x%llx,"
2110f140f0f1SKuan Luo 		     "SWNCQ:qc_active 0x%X defer_bits %X "
2111f140f0f1SKuan Luo 		     "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
211247013c58SHannes Reinecke 		     ap->qc_active, pp->qc_active,
2113f140f0f1SKuan Luo 		     pp->defer_queue.defer_bits, pp->dhfis_bits,
2114f140f0f1SKuan Luo 		     pp->dmafis_bits, pp->last_issue_tag);
2115f140f0f1SKuan Luo 
2116f140f0f1SKuan Luo 	nv_swncq_fis_reinit(ap);
2117f140f0f1SKuan Luo 
2118f140f0f1SKuan Luo 	if (lack_dhfis) {
2119f140f0f1SKuan Luo 		qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2120f140f0f1SKuan Luo 		nv_swncq_issue_atacmd(ap, qc);
2121752e386cSTejun Heo 		return 0;
2122f140f0f1SKuan Luo 	}
2123f140f0f1SKuan Luo 
2124f140f0f1SKuan Luo 	if (pp->defer_queue.defer_bits) {
2125f140f0f1SKuan Luo 		/* send deferral queue command */
2126f140f0f1SKuan Luo 		qc = nv_swncq_qc_from_dq(ap);
2127f140f0f1SKuan Luo 		WARN_ON(qc == NULL);
2128f140f0f1SKuan Luo 		nv_swncq_issue_atacmd(ap, qc);
2129f140f0f1SKuan Luo 	}
2130f140f0f1SKuan Luo 
2131752e386cSTejun Heo 	return 0;
2132f140f0f1SKuan Luo }
2133f140f0f1SKuan Luo 
nv_swncq_tag(struct ata_port * ap)2134f140f0f1SKuan Luo static inline u32 nv_swncq_tag(struct ata_port *ap)
2135f140f0f1SKuan Luo {
2136f140f0f1SKuan Luo 	struct nv_swncq_port_priv *pp = ap->private_data;
2137f140f0f1SKuan Luo 	u32 tag;
2138f140f0f1SKuan Luo 
2139f140f0f1SKuan Luo 	tag = readb(pp->tag_block) >> 2;
2140f140f0f1SKuan Luo 	return (tag & 0x1f);
2141f140f0f1SKuan Luo }
2142f140f0f1SKuan Luo 
nv_swncq_dmafis(struct ata_port * ap)2143752e386cSTejun Heo static void nv_swncq_dmafis(struct ata_port *ap)
2144f140f0f1SKuan Luo {
2145f140f0f1SKuan Luo 	struct ata_queued_cmd *qc;
2146f140f0f1SKuan Luo 	unsigned int rw;
2147f140f0f1SKuan Luo 	u8 dmactl;
2148f140f0f1SKuan Luo 	u32 tag;
2149f140f0f1SKuan Luo 	struct nv_swncq_port_priv *pp = ap->private_data;
2150f140f0f1SKuan Luo 
2151f140f0f1SKuan Luo 	__ata_bmdma_stop(ap);
2152f140f0f1SKuan Luo 	tag = nv_swncq_tag(ap);
2153f140f0f1SKuan Luo 
215447013c58SHannes Reinecke 	ata_port_dbg(ap, "dma setup tag 0x%x\n", tag);
2155f140f0f1SKuan Luo 	qc = ata_qc_from_tag(ap, tag);
2156f140f0f1SKuan Luo 
2157f140f0f1SKuan Luo 	if (unlikely(!qc))
2158752e386cSTejun Heo 		return;
2159f140f0f1SKuan Luo 
2160f140f0f1SKuan Luo 	rw = qc->tf.flags & ATA_TFLAG_WRITE;
2161f140f0f1SKuan Luo 
2162f140f0f1SKuan Luo 	/* load PRD table addr. */
21634e5b6260SJens Axboe 	iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->hw_tag,
2164f140f0f1SKuan Luo 		  ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2165f140f0f1SKuan Luo 
2166f140f0f1SKuan Luo 	/* specify data direction, triple-check start bit is clear */
2167f140f0f1SKuan Luo 	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2168f140f0f1SKuan Luo 	dmactl &= ~ATA_DMA_WR;
2169f140f0f1SKuan Luo 	if (!rw)
2170f140f0f1SKuan Luo 		dmactl |= ATA_DMA_WR;
2171f140f0f1SKuan Luo 
2172f140f0f1SKuan Luo 	iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2173f140f0f1SKuan Luo }
2174f140f0f1SKuan Luo 
nv_swncq_host_interrupt(struct ata_port * ap,u16 fis)2175f140f0f1SKuan Luo static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2176f140f0f1SKuan Luo {
2177f140f0f1SKuan Luo 	struct nv_swncq_port_priv *pp = ap->private_data;
2178f140f0f1SKuan Luo 	struct ata_queued_cmd *qc;
2179f140f0f1SKuan Luo 	struct ata_eh_info *ehi = &ap->link.eh_info;
2180f140f0f1SKuan Luo 	u32 serror;
2181f140f0f1SKuan Luo 	u8 ata_stat;
2182f140f0f1SKuan Luo 
21835682ed33STejun Heo 	ata_stat = ap->ops->sff_check_status(ap);
2184f140f0f1SKuan Luo 	nv_swncq_irq_clear(ap, fis);
2185f140f0f1SKuan Luo 	if (!fis)
2186f140f0f1SKuan Luo 		return;
2187f140f0f1SKuan Luo 
21884cb7c6f1SNiklas Cassel 	if (ata_port_is_frozen(ap))
2189f140f0f1SKuan Luo 		return;
2190f140f0f1SKuan Luo 
2191f140f0f1SKuan Luo 	if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2192f140f0f1SKuan Luo 		nv_swncq_hotplug(ap, fis);
2193f140f0f1SKuan Luo 		return;
2194f140f0f1SKuan Luo 	}
2195f140f0f1SKuan Luo 
2196f140f0f1SKuan Luo 	if (!pp->qc_active)
2197f140f0f1SKuan Luo 		return;
2198f140f0f1SKuan Luo 
219982ef04fbSTejun Heo 	if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
2200f140f0f1SKuan Luo 		return;
220182ef04fbSTejun Heo 	ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
2202f140f0f1SKuan Luo 
2203f140f0f1SKuan Luo 	if (ata_stat & ATA_ERR) {
2204f140f0f1SKuan Luo 		ata_ehi_clear_desc(ehi);
2205f140f0f1SKuan Luo 		ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2206f140f0f1SKuan Luo 		ehi->err_mask |= AC_ERR_DEV;
2207f140f0f1SKuan Luo 		ehi->serror |= serror;
2208cf480626STejun Heo 		ehi->action |= ATA_EH_RESET;
2209f140f0f1SKuan Luo 		ata_port_freeze(ap);
2210f140f0f1SKuan Luo 		return;
2211f140f0f1SKuan Luo 	}
2212f140f0f1SKuan Luo 
2213f140f0f1SKuan Luo 	if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2214f140f0f1SKuan Luo 		/* If the IRQ is backout, driver must issue
2215f140f0f1SKuan Luo 		 * the new command again some time later.
2216f140f0f1SKuan Luo 		 */
2217f140f0f1SKuan Luo 		pp->ncq_flags |= ncq_saw_backout;
2218f140f0f1SKuan Luo 	}
2219f140f0f1SKuan Luo 
2220f140f0f1SKuan Luo 	if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2221f140f0f1SKuan Luo 		pp->ncq_flags |= ncq_saw_sdb;
222247013c58SHannes Reinecke 		ata_port_dbg(ap, "SWNCQ: qc_active 0x%X "
2223f140f0f1SKuan Luo 			"dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
222447013c58SHannes Reinecke 			pp->qc_active, pp->dhfis_bits,
2225f140f0f1SKuan Luo 			pp->dmafis_bits, readl(pp->sactive_block));
2226752e386cSTejun Heo 		if (nv_swncq_sdbfis(ap) < 0)
2227f140f0f1SKuan Luo 			goto irq_error;
2228f140f0f1SKuan Luo 	}
2229f140f0f1SKuan Luo 
2230f140f0f1SKuan Luo 	if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2231f140f0f1SKuan Luo 		/* The interrupt indicates the new command
2232f140f0f1SKuan Luo 		 * was transmitted correctly to the drive.
2233f140f0f1SKuan Luo 		 */
2234f140f0f1SKuan Luo 		pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2235f140f0f1SKuan Luo 		pp->ncq_flags |= ncq_saw_d2h;
2236f140f0f1SKuan Luo 		if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2237f140f0f1SKuan Luo 			ata_ehi_push_desc(ehi, "illegal fis transaction");
2238f140f0f1SKuan Luo 			ehi->err_mask |= AC_ERR_HSM;
2239cf480626STejun Heo 			ehi->action |= ATA_EH_RESET;
2240f140f0f1SKuan Luo 			goto irq_error;
2241f140f0f1SKuan Luo 		}
2242f140f0f1SKuan Luo 
2243f140f0f1SKuan Luo 		if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2244f140f0f1SKuan Luo 		    !(pp->ncq_flags & ncq_saw_dmas)) {
22455682ed33STejun Heo 			ata_stat = ap->ops->sff_check_status(ap);
2246f140f0f1SKuan Luo 			if (ata_stat & ATA_BUSY)
2247f140f0f1SKuan Luo 				goto irq_exit;
2248f140f0f1SKuan Luo 
2249f140f0f1SKuan Luo 			if (pp->defer_queue.defer_bits) {
225047013c58SHannes Reinecke 				ata_port_dbg(ap, "send next command\n");
2251f140f0f1SKuan Luo 				qc = nv_swncq_qc_from_dq(ap);
2252f140f0f1SKuan Luo 				nv_swncq_issue_atacmd(ap, qc);
2253f140f0f1SKuan Luo 			}
2254f140f0f1SKuan Luo 		}
2255f140f0f1SKuan Luo 	}
2256f140f0f1SKuan Luo 
2257f140f0f1SKuan Luo 	if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2258f140f0f1SKuan Luo 		/* program the dma controller with appropriate PRD buffers
2259f140f0f1SKuan Luo 		 * and start the DMA transfer for requested command.
2260f140f0f1SKuan Luo 		 */
2261f140f0f1SKuan Luo 		pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2262f140f0f1SKuan Luo 		pp->ncq_flags |= ncq_saw_dmas;
2263752e386cSTejun Heo 		nv_swncq_dmafis(ap);
2264f140f0f1SKuan Luo 	}
2265f140f0f1SKuan Luo 
2266f140f0f1SKuan Luo irq_exit:
2267f140f0f1SKuan Luo 	return;
2268f140f0f1SKuan Luo irq_error:
2269f140f0f1SKuan Luo 	ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2270f140f0f1SKuan Luo 	ata_port_freeze(ap);
2271f140f0f1SKuan Luo 	return;
2272f140f0f1SKuan Luo }
2273f140f0f1SKuan Luo 
nv_swncq_interrupt(int irq,void * dev_instance)2274f140f0f1SKuan Luo static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2275f140f0f1SKuan Luo {
2276f140f0f1SKuan Luo 	struct ata_host *host = dev_instance;
2277f140f0f1SKuan Luo 	unsigned int i;
2278f140f0f1SKuan Luo 	unsigned int handled = 0;
2279f140f0f1SKuan Luo 	unsigned long flags;
2280f140f0f1SKuan Luo 	u32 irq_stat;
2281f140f0f1SKuan Luo 
2282f140f0f1SKuan Luo 	spin_lock_irqsave(&host->lock, flags);
2283f140f0f1SKuan Luo 
2284f140f0f1SKuan Luo 	irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2285f140f0f1SKuan Luo 
2286f140f0f1SKuan Luo 	for (i = 0; i < host->n_ports; i++) {
2287f140f0f1SKuan Luo 		struct ata_port *ap = host->ports[i];
2288f140f0f1SKuan Luo 
2289f140f0f1SKuan Luo 		if (ap->link.sactive) {
2290f140f0f1SKuan Luo 			nv_swncq_host_interrupt(ap, (u16)irq_stat);
2291f140f0f1SKuan Luo 			handled = 1;
2292f140f0f1SKuan Luo 		} else {
2293f140f0f1SKuan Luo 			if (irq_stat)	/* reserve Hotplug */
2294f140f0f1SKuan Luo 				nv_swncq_irq_clear(ap, 0xfff0);
2295f140f0f1SKuan Luo 
2296f140f0f1SKuan Luo 			handled += nv_host_intr(ap, (u8)irq_stat);
2297f140f0f1SKuan Luo 		}
2298f140f0f1SKuan Luo 		irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2299f140f0f1SKuan Luo 	}
2300f140f0f1SKuan Luo 
2301f140f0f1SKuan Luo 	spin_unlock_irqrestore(&host->lock, flags);
2302f140f0f1SKuan Luo 
2303f140f0f1SKuan Luo 	return IRQ_RETVAL(handled);
2304f140f0f1SKuan Luo }
2305f140f0f1SKuan Luo 
nv_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)2306c6fd2807SJeff Garzik static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2307c6fd2807SJeff Garzik {
23081626aeb8STejun Heo 	const struct ata_port_info *ppi[] = { NULL, NULL };
230995947193STejun Heo 	struct nv_pi_priv *ipriv;
23109a829ccfSTejun Heo 	struct ata_host *host;
2311cdf56bcfSRobert Hancock 	struct nv_host_priv *hpriv;
2312c6fd2807SJeff Garzik 	int rc;
2313c6fd2807SJeff Garzik 	u32 bar;
23140d5ff566STejun Heo 	void __iomem *base;
2315fbbb262dSRobert Hancock 	unsigned long type = ent->driver_data;
2316c6fd2807SJeff Garzik 
2317c6fd2807SJeff Garzik         // Make sure this is a SATA controller by counting the number of bars
2318c6fd2807SJeff Garzik         // (NVIDIA SATA controllers will always have six bars).  Otherwise,
2319c6fd2807SJeff Garzik         // it's an IDE controller and we ignore it.
2320c9c13ba4SDenis Efremov 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
2321c6fd2807SJeff Garzik 		if (pci_resource_start(pdev, bar) == 0)
2322c6fd2807SJeff Garzik 			return -ENODEV;
2323c6fd2807SJeff Garzik 
232406296a1eSJoe Perches 	ata_print_version_once(&pdev->dev, DRV_VERSION);
2325c6fd2807SJeff Garzik 
232624dc5f33STejun Heo 	rc = pcim_enable_device(pdev);
2327c6fd2807SJeff Garzik 	if (rc)
232824dc5f33STejun Heo 		return rc;
2329c6fd2807SJeff Garzik 
23309a829ccfSTejun Heo 	/* determine type and allocate host */
2331f140f0f1SKuan Luo 	if (type == CK804 && adma_enabled) {
2332a44fec1fSJoe Perches 		dev_notice(&pdev->dev, "Using ADMA mode\n");
2333fbbb262dSRobert Hancock 		type = ADMA;
23342d775708STejun Heo 	} else if (type == MCP5x && swncq_enabled) {
2335a44fec1fSJoe Perches 		dev_notice(&pdev->dev, "Using SWNCQ mode\n");
23362d775708STejun Heo 		type = SWNCQ;
2337360737a9SJeff Garzik 	}
2338360737a9SJeff Garzik 
23391626aeb8STejun Heo 	ppi[0] = &nv_port_info[type];
234095947193STejun Heo 	ipriv = ppi[0]->private_data;
23411c5afdf7STejun Heo 	rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
2342c6fd2807SJeff Garzik 	if (rc)
234324dc5f33STejun Heo 		return rc;
2344c6fd2807SJeff Garzik 
234524dc5f33STejun Heo 	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2346cdf56bcfSRobert Hancock 	if (!hpriv)
234724dc5f33STejun Heo 		return -ENOMEM;
2348cdf56bcfSRobert Hancock 	hpriv->type = type;
23499a829ccfSTejun Heo 	host->private_data = hpriv;
2350c6fd2807SJeff Garzik 
23519a829ccfSTejun Heo 	/* request and iomap NV_MMIO_BAR */
23529a829ccfSTejun Heo 	rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
23539a829ccfSTejun Heo 	if (rc)
23549a829ccfSTejun Heo 		return rc;
23559a829ccfSTejun Heo 
23569a829ccfSTejun Heo 	/* configure SCR access */
23579a829ccfSTejun Heo 	base = host->iomap[NV_MMIO_BAR];
23589a829ccfSTejun Heo 	host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
23599a829ccfSTejun Heo 	host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2360c6fd2807SJeff Garzik 
2361c6fd2807SJeff Garzik 	/* enable SATA space for CK804 */
2362fbbb262dSRobert Hancock 	if (type >= CK804) {
2363c6fd2807SJeff Garzik 		u8 regval;
2364c6fd2807SJeff Garzik 
2365c6fd2807SJeff Garzik 		pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2366c6fd2807SJeff Garzik 		regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2367c6fd2807SJeff Garzik 		pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2368c6fd2807SJeff Garzik 	}
2369c6fd2807SJeff Garzik 
23709a829ccfSTejun Heo 	/* init ADMA */
2371fbbb262dSRobert Hancock 	if (type == ADMA) {
23729a829ccfSTejun Heo 		rc = nv_adma_host_init(host);
2373fbbb262dSRobert Hancock 		if (rc)
237424dc5f33STejun Heo 			return rc;
2375360737a9SJeff Garzik 	} else if (type == SWNCQ)
2376f140f0f1SKuan Luo 		nv_swncq_host_init(host);
2377fbbb262dSRobert Hancock 
237851c89499STony Vroon 	if (msi_enabled) {
2379a44fec1fSJoe Perches 		dev_notice(&pdev->dev, "Using MSI\n");
238051c89499STony Vroon 		pci_enable_msi(pdev);
238151c89499STony Vroon 	}
238251c89499STony Vroon 
23839a829ccfSTejun Heo 	pci_set_master(pdev);
238495cc2c70STejun Heo 	return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
2385c6fd2807SJeff Garzik }
2386c6fd2807SJeff Garzik 
238758eb8cd5SBartlomiej Zolnierkiewicz #ifdef CONFIG_PM_SLEEP
nv_pci_device_resume(struct pci_dev * pdev)2388cdf56bcfSRobert Hancock static int nv_pci_device_resume(struct pci_dev *pdev)
2389cdf56bcfSRobert Hancock {
23900a86e1c8SJingoo Han 	struct ata_host *host = pci_get_drvdata(pdev);
2391cdf56bcfSRobert Hancock 	struct nv_host_priv *hpriv = host->private_data;
2392ce053fa8SRobert Hancock 	int rc;
2393cdf56bcfSRobert Hancock 
2394ce053fa8SRobert Hancock 	rc = ata_pci_device_do_resume(pdev);
2395ce053fa8SRobert Hancock 	if (rc)
2396ce053fa8SRobert Hancock 		return rc;
2397cdf56bcfSRobert Hancock 
2398cdf56bcfSRobert Hancock 	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2399cdf56bcfSRobert Hancock 		if (hpriv->type >= CK804) {
2400cdf56bcfSRobert Hancock 			u8 regval;
2401cdf56bcfSRobert Hancock 
2402cdf56bcfSRobert Hancock 			pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2403cdf56bcfSRobert Hancock 			regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2404cdf56bcfSRobert Hancock 			pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2405cdf56bcfSRobert Hancock 		}
2406cdf56bcfSRobert Hancock 		if (hpriv->type == ADMA) {
2407cdf56bcfSRobert Hancock 			u32 tmp32;
2408cdf56bcfSRobert Hancock 			struct nv_adma_port_priv *pp;
2409cdf56bcfSRobert Hancock 			/* enable/disable ADMA on the ports appropriately */
2410cdf56bcfSRobert Hancock 			pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2411cdf56bcfSRobert Hancock 
2412cdf56bcfSRobert Hancock 			pp = host->ports[0]->private_data;
2413cdf56bcfSRobert Hancock 			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2414cdf56bcfSRobert Hancock 				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2415cdf56bcfSRobert Hancock 					   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2416cdf56bcfSRobert Hancock 			else
2417cdf56bcfSRobert Hancock 				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT0_EN |
2418cdf56bcfSRobert Hancock 					   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2419cdf56bcfSRobert Hancock 			pp = host->ports[1]->private_data;
2420cdf56bcfSRobert Hancock 			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2421cdf56bcfSRobert Hancock 				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2422cdf56bcfSRobert Hancock 					   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2423cdf56bcfSRobert Hancock 			else
2424cdf56bcfSRobert Hancock 				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT1_EN |
2425cdf56bcfSRobert Hancock 					   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2426cdf56bcfSRobert Hancock 
2427cdf56bcfSRobert Hancock 			pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2428cdf56bcfSRobert Hancock 		}
2429cdf56bcfSRobert Hancock 	}
2430cdf56bcfSRobert Hancock 
2431cdf56bcfSRobert Hancock 	ata_host_resume(host);
2432cdf56bcfSRobert Hancock 
2433cdf56bcfSRobert Hancock 	return 0;
2434cdf56bcfSRobert Hancock }
2435438ac6d5STejun Heo #endif
2436cdf56bcfSRobert Hancock 
nv_ck804_host_stop(struct ata_host * host)2437cca3974eSJeff Garzik static void nv_ck804_host_stop(struct ata_host *host)
2438c6fd2807SJeff Garzik {
2439cca3974eSJeff Garzik 	struct pci_dev *pdev = to_pci_dev(host->dev);
2440c6fd2807SJeff Garzik 	u8 regval;
2441c6fd2807SJeff Garzik 
2442c6fd2807SJeff Garzik 	/* disable SATA space for CK804 */
2443c6fd2807SJeff Garzik 	pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2444c6fd2807SJeff Garzik 	regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2445c6fd2807SJeff Garzik 	pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2446c6fd2807SJeff Garzik }
2447c6fd2807SJeff Garzik 
nv_adma_host_stop(struct ata_host * host)2448fbbb262dSRobert Hancock static void nv_adma_host_stop(struct ata_host *host)
2449fbbb262dSRobert Hancock {
2450fbbb262dSRobert Hancock 	struct pci_dev *pdev = to_pci_dev(host->dev);
2451fbbb262dSRobert Hancock 	u32 tmp32;
2452fbbb262dSRobert Hancock 
2453fbbb262dSRobert Hancock 	/* disable ADMA on the ports */
2454fbbb262dSRobert Hancock 	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2455fbbb262dSRobert Hancock 	tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2456fbbb262dSRobert Hancock 		   NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2457fbbb262dSRobert Hancock 		   NV_MCP_SATA_CFG_20_PORT1_EN |
2458fbbb262dSRobert Hancock 		   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2459fbbb262dSRobert Hancock 
2460fbbb262dSRobert Hancock 	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2461fbbb262dSRobert Hancock 
2462fbbb262dSRobert Hancock 	nv_ck804_host_stop(host);
2463fbbb262dSRobert Hancock }
2464fbbb262dSRobert Hancock 
24652fc75da0SAxel Lin module_pci_driver(nv_pci_driver);
2466c6fd2807SJeff Garzik 
2467fbbb262dSRobert Hancock module_param_named(adma, adma_enabled, bool, 0444);
246855f784c8SBrandon Ehle MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
2469f140f0f1SKuan Luo module_param_named(swncq, swncq_enabled, bool, 0444);
2470d21279f4SZoltan Boszormenyi MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
247151c89499STony Vroon module_param_named(msi, msi_enabled, bool, 0444);
247251c89499STony Vroon MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)");
2473