xref: /openbmc/linux/drivers/ata/sata_nv.c (revision c900529f3d9161bfde5cca0754f83b4d3c3e0220)
1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /*
3   *  sata_nv.c - NVIDIA nForce SATA
4   *
5   *  Copyright 2004 NVIDIA Corp.  All rights reserved.
6   *  Copyright 2004 Andrew Chew
7   *
8   *  libata documentation is available via 'make {ps|pdf}docs',
9   *  as Documentation/driver-api/libata.rst
10   *
11   *  No hardware documentation available outside of NVIDIA.
12   *  This driver programs the NVIDIA SATA controller in a similar
13   *  fashion as with other PCI IDE BMDMA controllers, with a few
14   *  NV-specific details such as register offsets, SATA phy location,
15   *  hotplug info, etc.
16   *
17   *  CK804/MCP04 controllers support an alternate programming interface
18   *  similar to the ADMA specification (with some modifications).
19   *  This allows the use of NCQ. Non-DMA-mapped ATA commands are still
20   *  sent through the legacy interface.
21   */
22  
23  #include <linux/kernel.h>
24  #include <linux/module.h>
25  #include <linux/gfp.h>
26  #include <linux/pci.h>
27  #include <linux/blkdev.h>
28  #include <linux/delay.h>
29  #include <linux/interrupt.h>
30  #include <linux/device.h>
31  #include <scsi/scsi_host.h>
32  #include <scsi/scsi_device.h>
33  #include <linux/libata.h>
34  #include <trace/events/libata.h>
35  
36  #define DRV_NAME			"sata_nv"
37  #define DRV_VERSION			"3.5"
38  
39  #define NV_ADMA_DMA_BOUNDARY		0xffffffffUL
40  
41  enum {
42  	NV_MMIO_BAR			= 5,
43  
44  	NV_PORTS			= 2,
45  	NV_PIO_MASK			= ATA_PIO4,
46  	NV_MWDMA_MASK			= ATA_MWDMA2,
47  	NV_UDMA_MASK			= ATA_UDMA6,
48  	NV_PORT0_SCR_REG_OFFSET		= 0x00,
49  	NV_PORT1_SCR_REG_OFFSET		= 0x40,
50  
51  	/* INT_STATUS/ENABLE */
52  	NV_INT_STATUS			= 0x10,
53  	NV_INT_ENABLE			= 0x11,
54  	NV_INT_STATUS_CK804		= 0x440,
55  	NV_INT_ENABLE_CK804		= 0x441,
56  
57  	/* INT_STATUS/ENABLE bits */
58  	NV_INT_DEV			= 0x01,
59  	NV_INT_PM			= 0x02,
60  	NV_INT_ADDED			= 0x04,
61  	NV_INT_REMOVED			= 0x08,
62  
63  	NV_INT_PORT_SHIFT		= 4,	/* each port occupies 4 bits */
64  
65  	NV_INT_ALL			= 0x0f,
66  	NV_INT_MASK			= NV_INT_DEV |
67  					  NV_INT_ADDED | NV_INT_REMOVED,
68  
69  	/* INT_CONFIG */
70  	NV_INT_CONFIG			= 0x12,
71  	NV_INT_CONFIG_METHD		= 0x01, // 0 = INT, 1 = SMI
72  
73  	// For PCI config register 20
74  	NV_MCP_SATA_CFG_20		= 0x50,
75  	NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
76  	NV_MCP_SATA_CFG_20_PORT0_EN	= (1 << 17),
77  	NV_MCP_SATA_CFG_20_PORT1_EN	= (1 << 16),
78  	NV_MCP_SATA_CFG_20_PORT0_PWB_EN	= (1 << 14),
79  	NV_MCP_SATA_CFG_20_PORT1_PWB_EN	= (1 << 12),
80  
81  	NV_ADMA_MAX_CPBS		= 32,
82  	NV_ADMA_CPB_SZ			= 128,
83  	NV_ADMA_APRD_SZ			= 16,
84  	NV_ADMA_SGTBL_LEN		= (1024 - NV_ADMA_CPB_SZ) /
85  					   NV_ADMA_APRD_SZ,
86  	NV_ADMA_SGTBL_TOTAL_LEN		= NV_ADMA_SGTBL_LEN + 5,
87  	NV_ADMA_SGTBL_SZ                = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
88  	NV_ADMA_PORT_PRIV_DMA_SZ        = NV_ADMA_MAX_CPBS *
89  					   (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
90  
91  	/* BAR5 offset to ADMA general registers */
92  	NV_ADMA_GEN			= 0x400,
93  	NV_ADMA_GEN_CTL			= 0x00,
94  	NV_ADMA_NOTIFIER_CLEAR		= 0x30,
95  
96  	/* BAR5 offset to ADMA ports */
97  	NV_ADMA_PORT			= 0x480,
98  
99  	/* size of ADMA port register space  */
100  	NV_ADMA_PORT_SIZE		= 0x100,
101  
102  	/* ADMA port registers */
103  	NV_ADMA_CTL			= 0x40,
104  	NV_ADMA_CPB_COUNT		= 0x42,
105  	NV_ADMA_NEXT_CPB_IDX		= 0x43,
106  	NV_ADMA_STAT			= 0x44,
107  	NV_ADMA_CPB_BASE_LOW		= 0x48,
108  	NV_ADMA_CPB_BASE_HIGH		= 0x4C,
109  	NV_ADMA_APPEND			= 0x50,
110  	NV_ADMA_NOTIFIER		= 0x68,
111  	NV_ADMA_NOTIFIER_ERROR		= 0x6C,
112  
113  	/* NV_ADMA_CTL register bits */
114  	NV_ADMA_CTL_HOTPLUG_IEN		= (1 << 0),
115  	NV_ADMA_CTL_CHANNEL_RESET	= (1 << 5),
116  	NV_ADMA_CTL_GO			= (1 << 7),
117  	NV_ADMA_CTL_AIEN		= (1 << 8),
118  	NV_ADMA_CTL_READ_NON_COHERENT	= (1 << 11),
119  	NV_ADMA_CTL_WRITE_NON_COHERENT	= (1 << 12),
120  
121  	/* CPB response flag bits */
122  	NV_CPB_RESP_DONE		= (1 << 0),
123  	NV_CPB_RESP_ATA_ERR		= (1 << 3),
124  	NV_CPB_RESP_CMD_ERR		= (1 << 4),
125  	NV_CPB_RESP_CPB_ERR		= (1 << 7),
126  
127  	/* CPB control flag bits */
128  	NV_CPB_CTL_CPB_VALID		= (1 << 0),
129  	NV_CPB_CTL_QUEUE		= (1 << 1),
130  	NV_CPB_CTL_APRD_VALID		= (1 << 2),
131  	NV_CPB_CTL_IEN			= (1 << 3),
132  	NV_CPB_CTL_FPDMA		= (1 << 4),
133  
134  	/* APRD flags */
135  	NV_APRD_WRITE			= (1 << 1),
136  	NV_APRD_END			= (1 << 2),
137  	NV_APRD_CONT			= (1 << 3),
138  
139  	/* NV_ADMA_STAT flags */
140  	NV_ADMA_STAT_TIMEOUT		= (1 << 0),
141  	NV_ADMA_STAT_HOTUNPLUG		= (1 << 1),
142  	NV_ADMA_STAT_HOTPLUG		= (1 << 2),
143  	NV_ADMA_STAT_CPBERR		= (1 << 4),
144  	NV_ADMA_STAT_SERROR		= (1 << 5),
145  	NV_ADMA_STAT_CMD_COMPLETE	= (1 << 6),
146  	NV_ADMA_STAT_IDLE		= (1 << 8),
147  	NV_ADMA_STAT_LEGACY		= (1 << 9),
148  	NV_ADMA_STAT_STOPPED		= (1 << 10),
149  	NV_ADMA_STAT_DONE		= (1 << 12),
150  	NV_ADMA_STAT_ERR		= NV_ADMA_STAT_CPBERR |
151  					  NV_ADMA_STAT_TIMEOUT,
152  
153  	/* port flags */
154  	NV_ADMA_PORT_REGISTER_MODE	= (1 << 0),
155  	NV_ADMA_ATAPI_SETUP_COMPLETE	= (1 << 1),
156  
157  	/* MCP55 reg offset */
158  	NV_CTL_MCP55			= 0x400,
159  	NV_INT_STATUS_MCP55		= 0x440,
160  	NV_INT_ENABLE_MCP55		= 0x444,
161  	NV_NCQ_REG_MCP55		= 0x448,
162  
163  	/* MCP55 */
164  	NV_INT_ALL_MCP55		= 0xffff,
165  	NV_INT_PORT_SHIFT_MCP55		= 16,	/* each port occupies 16 bits */
166  	NV_INT_MASK_MCP55		= NV_INT_ALL_MCP55 & 0xfffd,
167  
168  	/* SWNCQ ENABLE BITS*/
169  	NV_CTL_PRI_SWNCQ		= 0x02,
170  	NV_CTL_SEC_SWNCQ		= 0x04,
171  
172  	/* SW NCQ status bits*/
173  	NV_SWNCQ_IRQ_DEV		= (1 << 0),
174  	NV_SWNCQ_IRQ_PM			= (1 << 1),
175  	NV_SWNCQ_IRQ_ADDED		= (1 << 2),
176  	NV_SWNCQ_IRQ_REMOVED		= (1 << 3),
177  
178  	NV_SWNCQ_IRQ_BACKOUT		= (1 << 4),
179  	NV_SWNCQ_IRQ_SDBFIS		= (1 << 5),
180  	NV_SWNCQ_IRQ_DHREGFIS		= (1 << 6),
181  	NV_SWNCQ_IRQ_DMASETUP		= (1 << 7),
182  
183  	NV_SWNCQ_IRQ_HOTPLUG		= NV_SWNCQ_IRQ_ADDED |
184  					  NV_SWNCQ_IRQ_REMOVED,
185  
186  };
187  
188  /* ADMA Physical Region Descriptor - one SG segment */
189  struct nv_adma_prd {
190  	__le64			addr;
191  	__le32			len;
192  	u8			flags;
193  	u8			packet_len;
194  	__le16			reserved;
195  };
196  
197  enum nv_adma_regbits {
198  	CMDEND	= (1 << 15),		/* end of command list */
199  	WNB	= (1 << 14),		/* wait-not-BSY */
200  	IGN	= (1 << 13),		/* ignore this entry */
201  	CS1n	= (1 << (4 + 8)),	/* std. PATA signals follow... */
202  	DA2	= (1 << (2 + 8)),
203  	DA1	= (1 << (1 + 8)),
204  	DA0	= (1 << (0 + 8)),
205  };
206  
207  /* ADMA Command Parameter Block
208     The first 5 SG segments are stored inside the Command Parameter Block itself.
209     If there are more than 5 segments the remainder are stored in a separate
210     memory area indicated by next_aprd. */
211  struct nv_adma_cpb {
212  	u8			resp_flags;    /* 0 */
213  	u8			reserved1;     /* 1 */
214  	u8			ctl_flags;     /* 2 */
215  	/* len is length of taskfile in 64 bit words */
216  	u8			len;		/* 3  */
217  	u8			tag;           /* 4 */
218  	u8			next_cpb_idx;  /* 5 */
219  	__le16			reserved2;     /* 6-7 */
220  	__le16			tf[12];        /* 8-31 */
221  	struct nv_adma_prd	aprd[5];       /* 32-111 */
222  	__le64			next_aprd;     /* 112-119 */
223  	__le64			reserved3;     /* 120-127 */
224  };
225  
226  
227  struct nv_adma_port_priv {
228  	struct nv_adma_cpb	*cpb;
229  	dma_addr_t		cpb_dma;
230  	struct nv_adma_prd	*aprd;
231  	dma_addr_t		aprd_dma;
232  	void __iomem		*ctl_block;
233  	void __iomem		*gen_block;
234  	void __iomem		*notifier_clear_block;
235  	u64			adma_dma_mask;
236  	u8			flags;
237  	int			last_issue_ncq;
238  };
239  
240  struct nv_host_priv {
241  	unsigned long		type;
242  };
243  
244  struct defer_queue {
245  	u32		defer_bits;
246  	unsigned int	head;
247  	unsigned int	tail;
248  	unsigned int	tag[ATA_MAX_QUEUE];
249  };
250  
251  enum ncq_saw_flag_list {
252  	ncq_saw_d2h	= (1U << 0),
253  	ncq_saw_dmas	= (1U << 1),
254  	ncq_saw_sdb	= (1U << 2),
255  	ncq_saw_backout	= (1U << 3),
256  };
257  
258  struct nv_swncq_port_priv {
259  	struct ata_bmdma_prd *prd;	 /* our SG list */
260  	dma_addr_t	prd_dma; /* and its DMA mapping */
261  	void __iomem	*sactive_block;
262  	void __iomem	*irq_block;
263  	void __iomem	*tag_block;
264  	u32		qc_active;
265  
266  	unsigned int	last_issue_tag;
267  
268  	/* fifo circular queue to store deferral command */
269  	struct defer_queue defer_queue;
270  
271  	/* for NCQ interrupt analysis */
272  	u32		dhfis_bits;
273  	u32		dmafis_bits;
274  	u32		sdbfis_bits;
275  
276  	unsigned int	ncq_flags;
277  };
278  
279  
280  #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
281  
282  static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
283  #ifdef CONFIG_PM_SLEEP
284  static int nv_pci_device_resume(struct pci_dev *pdev);
285  #endif
286  static void nv_ck804_host_stop(struct ata_host *host);
287  static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
288  static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
289  static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
290  static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
291  static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
292  
293  static int nv_hardreset(struct ata_link *link, unsigned int *class,
294  			unsigned long deadline);
295  static void nv_nf2_freeze(struct ata_port *ap);
296  static void nv_nf2_thaw(struct ata_port *ap);
297  static void nv_ck804_freeze(struct ata_port *ap);
298  static void nv_ck804_thaw(struct ata_port *ap);
299  static int nv_adma_slave_config(struct scsi_device *sdev);
300  static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
301  static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc);
302  static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
303  static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
304  static void nv_adma_irq_clear(struct ata_port *ap);
305  static int nv_adma_port_start(struct ata_port *ap);
306  static void nv_adma_port_stop(struct ata_port *ap);
307  #ifdef CONFIG_PM
308  static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
309  static int nv_adma_port_resume(struct ata_port *ap);
310  #endif
311  static void nv_adma_freeze(struct ata_port *ap);
312  static void nv_adma_thaw(struct ata_port *ap);
313  static void nv_adma_error_handler(struct ata_port *ap);
314  static void nv_adma_host_stop(struct ata_host *host);
315  static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
316  static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
317  
318  static void nv_mcp55_thaw(struct ata_port *ap);
319  static void nv_mcp55_freeze(struct ata_port *ap);
320  static void nv_swncq_error_handler(struct ata_port *ap);
321  static int nv_swncq_slave_config(struct scsi_device *sdev);
322  static int nv_swncq_port_start(struct ata_port *ap);
323  static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc);
324  static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
325  static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
326  static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
327  static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
328  #ifdef CONFIG_PM
329  static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
330  static int nv_swncq_port_resume(struct ata_port *ap);
331  #endif
332  
333  enum nv_host_type
334  {
335  	GENERIC,
336  	NFORCE2,
337  	NFORCE3 = NFORCE2,	/* NF2 == NF3 as far as sata_nv is concerned */
338  	CK804,
339  	ADMA,
340  	MCP5x,
341  	SWNCQ,
342  };
343  
344  static const struct pci_device_id nv_pci_tbl[] = {
345  	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
346  	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
347  	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
348  	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
349  	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
350  	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
351  	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
352  	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
353  	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
354  	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
355  	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
356  	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
357  	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
358  	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
359  
360  	{ } /* terminate list */
361  };
362  
363  static struct pci_driver nv_pci_driver = {
364  	.name			= DRV_NAME,
365  	.id_table		= nv_pci_tbl,
366  	.probe			= nv_init_one,
367  #ifdef CONFIG_PM_SLEEP
368  	.suspend		= ata_pci_device_suspend,
369  	.resume			= nv_pci_device_resume,
370  #endif
371  	.remove			= ata_pci_remove_one,
372  };
373  
374  static const struct scsi_host_template nv_sht = {
375  	ATA_BMDMA_SHT(DRV_NAME),
376  };
377  
378  static const struct scsi_host_template nv_adma_sht = {
379  	__ATA_BASE_SHT(DRV_NAME),
380  	.can_queue		= NV_ADMA_MAX_CPBS,
381  	.sg_tablesize		= NV_ADMA_SGTBL_TOTAL_LEN,
382  	.dma_boundary		= NV_ADMA_DMA_BOUNDARY,
383  	.slave_configure	= nv_adma_slave_config,
384  	.sdev_groups		= ata_ncq_sdev_groups,
385  	.change_queue_depth     = ata_scsi_change_queue_depth,
386  	.tag_alloc_policy	= BLK_TAG_ALLOC_RR,
387  };
388  
389  static const struct scsi_host_template nv_swncq_sht = {
390  	__ATA_BASE_SHT(DRV_NAME),
391  	.can_queue		= ATA_MAX_QUEUE - 1,
392  	.sg_tablesize		= LIBATA_MAX_PRD,
393  	.dma_boundary		= ATA_DMA_BOUNDARY,
394  	.slave_configure	= nv_swncq_slave_config,
395  	.sdev_groups		= ata_ncq_sdev_groups,
396  	.change_queue_depth     = ata_scsi_change_queue_depth,
397  	.tag_alloc_policy	= BLK_TAG_ALLOC_RR,
398  };
399  
400  /*
401   * NV SATA controllers have various different problems with hardreset
402   * protocol depending on the specific controller and device.
403   *
404   * GENERIC:
405   *
406   *  bko11195 reports that link doesn't come online after hardreset on
407   *  generic nv's and there have been several other similar reports on
408   *  linux-ide.
409   *
410   *  bko12351#c23 reports that warmplug on MCP61 doesn't work with
411   *  softreset.
412   *
413   * NF2/3:
414   *
415   *  bko3352 reports nf2/3 controllers can't determine device signature
416   *  reliably after hardreset.  The following thread reports detection
417   *  failure on cold boot with the standard debouncing timing.
418   *
419   *  http://thread.gmane.org/gmane.linux.ide/34098
420   *
421   *  bko12176 reports that hardreset fails to bring up the link during
422   *  boot on nf2.
423   *
424   * CK804:
425   *
426   *  For initial probing after boot and hot plugging, hardreset mostly
427   *  works fine on CK804 but curiously, reprobing on the initial port
428   *  by rescanning or rmmod/insmod fails to acquire the initial D2H Reg
429   *  FIS in somewhat undeterministic way.
430   *
431   * SWNCQ:
432   *
433   *  bko12351 reports that when SWNCQ is enabled, for hotplug to work,
434   *  hardreset should be used and hardreset can't report proper
435   *  signature, which suggests that mcp5x is closer to nf2 as long as
436   *  reset quirkiness is concerned.
437   *
438   *  bko12703 reports that boot probing fails for intel SSD with
439   *  hardreset.  Link fails to come online.  Softreset works fine.
440   *
441   * The failures are varied but the following patterns seem true for
442   * all flavors.
443   *
444   * - Softreset during boot always works.
445   *
446   * - Hardreset during boot sometimes fails to bring up the link on
447   *   certain comibnations and device signature acquisition is
448   *   unreliable.
449   *
450   * - Hardreset is often necessary after hotplug.
451   *
452   * So, preferring softreset for boot probing and error handling (as
453   * hardreset might bring down the link) but using hardreset for
454   * post-boot probing should work around the above issues in most
455   * cases.  Define nv_hardreset() which only kicks in for post-boot
456   * probing and use it for all variants.
457   */
458  static struct ata_port_operations nv_generic_ops = {
459  	.inherits		= &ata_bmdma_port_ops,
460  	.lost_interrupt		= ATA_OP_NULL,
461  	.scr_read		= nv_scr_read,
462  	.scr_write		= nv_scr_write,
463  	.hardreset		= nv_hardreset,
464  };
465  
466  static struct ata_port_operations nv_nf2_ops = {
467  	.inherits		= &nv_generic_ops,
468  	.freeze			= nv_nf2_freeze,
469  	.thaw			= nv_nf2_thaw,
470  };
471  
472  static struct ata_port_operations nv_ck804_ops = {
473  	.inherits		= &nv_generic_ops,
474  	.freeze			= nv_ck804_freeze,
475  	.thaw			= nv_ck804_thaw,
476  	.host_stop		= nv_ck804_host_stop,
477  };
478  
479  static struct ata_port_operations nv_adma_ops = {
480  	.inherits		= &nv_ck804_ops,
481  
482  	.check_atapi_dma	= nv_adma_check_atapi_dma,
483  	.sff_tf_read		= nv_adma_tf_read,
484  	.qc_defer		= ata_std_qc_defer,
485  	.qc_prep		= nv_adma_qc_prep,
486  	.qc_issue		= nv_adma_qc_issue,
487  	.sff_irq_clear		= nv_adma_irq_clear,
488  
489  	.freeze			= nv_adma_freeze,
490  	.thaw			= nv_adma_thaw,
491  	.error_handler		= nv_adma_error_handler,
492  	.post_internal_cmd	= nv_adma_post_internal_cmd,
493  
494  	.port_start		= nv_adma_port_start,
495  	.port_stop		= nv_adma_port_stop,
496  #ifdef CONFIG_PM
497  	.port_suspend		= nv_adma_port_suspend,
498  	.port_resume		= nv_adma_port_resume,
499  #endif
500  	.host_stop		= nv_adma_host_stop,
501  };
502  
503  static struct ata_port_operations nv_swncq_ops = {
504  	.inherits		= &nv_generic_ops,
505  
506  	.qc_defer		= ata_std_qc_defer,
507  	.qc_prep		= nv_swncq_qc_prep,
508  	.qc_issue		= nv_swncq_qc_issue,
509  
510  	.freeze			= nv_mcp55_freeze,
511  	.thaw			= nv_mcp55_thaw,
512  	.error_handler		= nv_swncq_error_handler,
513  
514  #ifdef CONFIG_PM
515  	.port_suspend		= nv_swncq_port_suspend,
516  	.port_resume		= nv_swncq_port_resume,
517  #endif
518  	.port_start		= nv_swncq_port_start,
519  };
520  
521  struct nv_pi_priv {
522  	irq_handler_t			irq_handler;
523  	const struct scsi_host_template	*sht;
524  };
525  
526  #define NV_PI_PRIV(_irq_handler, _sht) \
527  	&(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
528  
529  static const struct ata_port_info nv_port_info[] = {
530  	/* generic */
531  	{
532  		.flags		= ATA_FLAG_SATA,
533  		.pio_mask	= NV_PIO_MASK,
534  		.mwdma_mask	= NV_MWDMA_MASK,
535  		.udma_mask	= NV_UDMA_MASK,
536  		.port_ops	= &nv_generic_ops,
537  		.private_data	= NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
538  	},
539  	/* nforce2/3 */
540  	{
541  		.flags		= ATA_FLAG_SATA,
542  		.pio_mask	= NV_PIO_MASK,
543  		.mwdma_mask	= NV_MWDMA_MASK,
544  		.udma_mask	= NV_UDMA_MASK,
545  		.port_ops	= &nv_nf2_ops,
546  		.private_data	= NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
547  	},
548  	/* ck804 */
549  	{
550  		.flags		= ATA_FLAG_SATA,
551  		.pio_mask	= NV_PIO_MASK,
552  		.mwdma_mask	= NV_MWDMA_MASK,
553  		.udma_mask	= NV_UDMA_MASK,
554  		.port_ops	= &nv_ck804_ops,
555  		.private_data	= NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
556  	},
557  	/* ADMA */
558  	{
559  		.flags		= ATA_FLAG_SATA | ATA_FLAG_NCQ,
560  		.pio_mask	= NV_PIO_MASK,
561  		.mwdma_mask	= NV_MWDMA_MASK,
562  		.udma_mask	= NV_UDMA_MASK,
563  		.port_ops	= &nv_adma_ops,
564  		.private_data	= NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
565  	},
566  	/* MCP5x */
567  	{
568  		.flags		= ATA_FLAG_SATA,
569  		.pio_mask	= NV_PIO_MASK,
570  		.mwdma_mask	= NV_MWDMA_MASK,
571  		.udma_mask	= NV_UDMA_MASK,
572  		.port_ops	= &nv_generic_ops,
573  		.private_data	= NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
574  	},
575  	/* SWNCQ */
576  	{
577  		.flags	        = ATA_FLAG_SATA | ATA_FLAG_NCQ,
578  		.pio_mask	= NV_PIO_MASK,
579  		.mwdma_mask	= NV_MWDMA_MASK,
580  		.udma_mask	= NV_UDMA_MASK,
581  		.port_ops	= &nv_swncq_ops,
582  		.private_data	= NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
583  	},
584  };
585  
586  MODULE_AUTHOR("NVIDIA");
587  MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
588  MODULE_LICENSE("GPL");
589  MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
590  MODULE_VERSION(DRV_VERSION);
591  
592  static bool adma_enabled;
593  static bool swncq_enabled = true;
594  static bool msi_enabled;
595  
nv_adma_register_mode(struct ata_port * ap)596  static void nv_adma_register_mode(struct ata_port *ap)
597  {
598  	struct nv_adma_port_priv *pp = ap->private_data;
599  	void __iomem *mmio = pp->ctl_block;
600  	u16 tmp, status;
601  	int count = 0;
602  
603  	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
604  		return;
605  
606  	status = readw(mmio + NV_ADMA_STAT);
607  	while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
608  		ndelay(50);
609  		status = readw(mmio + NV_ADMA_STAT);
610  		count++;
611  	}
612  	if (count == 20)
613  		ata_port_warn(ap, "timeout waiting for ADMA IDLE, stat=0x%hx\n",
614  			      status);
615  
616  	tmp = readw(mmio + NV_ADMA_CTL);
617  	writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
618  
619  	count = 0;
620  	status = readw(mmio + NV_ADMA_STAT);
621  	while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
622  		ndelay(50);
623  		status = readw(mmio + NV_ADMA_STAT);
624  		count++;
625  	}
626  	if (count == 20)
627  		ata_port_warn(ap,
628  			      "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
629  			      status);
630  
631  	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
632  }
633  
nv_adma_mode(struct ata_port * ap)634  static void nv_adma_mode(struct ata_port *ap)
635  {
636  	struct nv_adma_port_priv *pp = ap->private_data;
637  	void __iomem *mmio = pp->ctl_block;
638  	u16 tmp, status;
639  	int count = 0;
640  
641  	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
642  		return;
643  
644  	WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
645  
646  	tmp = readw(mmio + NV_ADMA_CTL);
647  	writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
648  
649  	status = readw(mmio + NV_ADMA_STAT);
650  	while (((status & NV_ADMA_STAT_LEGACY) ||
651  	      !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
652  		ndelay(50);
653  		status = readw(mmio + NV_ADMA_STAT);
654  		count++;
655  	}
656  	if (count == 20)
657  		ata_port_warn(ap,
658  			"timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
659  			status);
660  
661  	pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
662  }
663  
nv_adma_slave_config(struct scsi_device * sdev)664  static int nv_adma_slave_config(struct scsi_device *sdev)
665  {
666  	struct ata_port *ap = ata_shost_to_port(sdev->host);
667  	struct nv_adma_port_priv *pp = ap->private_data;
668  	struct nv_adma_port_priv *port0, *port1;
669  	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
670  	unsigned long segment_boundary, flags;
671  	unsigned short sg_tablesize;
672  	int rc;
673  	int adma_enable;
674  	u32 current_reg, new_reg, config_mask;
675  
676  	rc = ata_scsi_slave_config(sdev);
677  
678  	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
679  		/* Not a proper libata device, ignore */
680  		return rc;
681  
682  	spin_lock_irqsave(ap->lock, flags);
683  
684  	if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
685  		/*
686  		 * NVIDIA reports that ADMA mode does not support ATAPI commands.
687  		 * Therefore ATAPI commands are sent through the legacy interface.
688  		 * However, the legacy interface only supports 32-bit DMA.
689  		 * Restrict DMA parameters as required by the legacy interface
690  		 * when an ATAPI device is connected.
691  		 */
692  		segment_boundary = ATA_DMA_BOUNDARY;
693  		/* Subtract 1 since an extra entry may be needed for padding, see
694  		   libata-scsi.c */
695  		sg_tablesize = LIBATA_MAX_PRD - 1;
696  
697  		/* Since the legacy DMA engine is in use, we need to disable ADMA
698  		   on the port. */
699  		adma_enable = 0;
700  		nv_adma_register_mode(ap);
701  	} else {
702  		segment_boundary = NV_ADMA_DMA_BOUNDARY;
703  		sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
704  		adma_enable = 1;
705  	}
706  
707  	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
708  
709  	if (ap->port_no == 1)
710  		config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
711  			      NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
712  	else
713  		config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
714  			      NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
715  
716  	if (adma_enable) {
717  		new_reg = current_reg | config_mask;
718  		pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
719  	} else {
720  		new_reg = current_reg & ~config_mask;
721  		pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
722  	}
723  
724  	if (current_reg != new_reg)
725  		pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
726  
727  	port0 = ap->host->ports[0]->private_data;
728  	port1 = ap->host->ports[1]->private_data;
729  	if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
730  	    (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
731  		/*
732  		 * We have to set the DMA mask to 32-bit if either port is in
733  		 * ATAPI mode, since they are on the same PCI device which is
734  		 * used for DMA mapping.  If either SCSI device is not allocated
735  		 * yet, it's OK since that port will discover its correct
736  		 * setting when it does get allocated.
737  		 */
738  		rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
739  	} else {
740  		rc = dma_set_mask(&pdev->dev, pp->adma_dma_mask);
741  	}
742  
743  	blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
744  	blk_queue_max_segments(sdev->request_queue, sg_tablesize);
745  	ata_port_info(ap,
746  		      "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
747  		      (unsigned long long)*ap->host->dev->dma_mask,
748  		      segment_boundary, sg_tablesize);
749  
750  	spin_unlock_irqrestore(ap->lock, flags);
751  
752  	return rc;
753  }
754  
nv_adma_check_atapi_dma(struct ata_queued_cmd * qc)755  static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
756  {
757  	struct nv_adma_port_priv *pp = qc->ap->private_data;
758  	return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
759  }
760  
nv_adma_tf_read(struct ata_port * ap,struct ata_taskfile * tf)761  static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
762  {
763  	/* Other than when internal or pass-through commands are executed,
764  	   the only time this function will be called in ADMA mode will be
765  	   if a command fails. In the failure case we don't care about going
766  	   into register mode with ADMA commands pending, as the commands will
767  	   all shortly be aborted anyway. We assume that NCQ commands are not
768  	   issued via passthrough, which is the only way that switching into
769  	   ADMA mode could abort outstanding commands. */
770  	nv_adma_register_mode(ap);
771  
772  	ata_sff_tf_read(ap, tf);
773  }
774  
nv_adma_tf_to_cpb(struct ata_taskfile * tf,__le16 * cpb)775  static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
776  {
777  	unsigned int idx = 0;
778  
779  	if (tf->flags & ATA_TFLAG_ISADDR) {
780  		if (tf->flags & ATA_TFLAG_LBA48) {
781  			cpb[idx++] = cpu_to_le16((ATA_REG_ERR   << 8) | tf->hob_feature | WNB);
782  			cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
783  			cpb[idx++] = cpu_to_le16((ATA_REG_LBAL  << 8) | tf->hob_lbal);
784  			cpb[idx++] = cpu_to_le16((ATA_REG_LBAM  << 8) | tf->hob_lbam);
785  			cpb[idx++] = cpu_to_le16((ATA_REG_LBAH  << 8) | tf->hob_lbah);
786  			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature);
787  		} else
788  			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature | WNB);
789  
790  		cpb[idx++] = cpu_to_le16((ATA_REG_NSECT  << 8) | tf->nsect);
791  		cpb[idx++] = cpu_to_le16((ATA_REG_LBAL   << 8) | tf->lbal);
792  		cpb[idx++] = cpu_to_le16((ATA_REG_LBAM   << 8) | tf->lbam);
793  		cpb[idx++] = cpu_to_le16((ATA_REG_LBAH   << 8) | tf->lbah);
794  	}
795  
796  	if (tf->flags & ATA_TFLAG_DEVICE)
797  		cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
798  
799  	cpb[idx++] = cpu_to_le16((ATA_REG_CMD    << 8) | tf->command | CMDEND);
800  
801  	while (idx < 12)
802  		cpb[idx++] = cpu_to_le16(IGN);
803  
804  	return idx;
805  }
806  
nv_adma_check_cpb(struct ata_port * ap,int cpb_num,int force_err)807  static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
808  {
809  	struct nv_adma_port_priv *pp = ap->private_data;
810  	u8 flags = pp->cpb[cpb_num].resp_flags;
811  
812  	ata_port_dbg(ap, "CPB %d, flags=0x%x\n", cpb_num, flags);
813  
814  	if (unlikely((force_err ||
815  		     flags & (NV_CPB_RESP_ATA_ERR |
816  			      NV_CPB_RESP_CMD_ERR |
817  			      NV_CPB_RESP_CPB_ERR)))) {
818  		struct ata_eh_info *ehi = &ap->link.eh_info;
819  		int freeze = 0;
820  
821  		ata_ehi_clear_desc(ehi);
822  		__ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
823  		if (flags & NV_CPB_RESP_ATA_ERR) {
824  			ata_ehi_push_desc(ehi, "ATA error");
825  			ehi->err_mask |= AC_ERR_DEV;
826  		} else if (flags & NV_CPB_RESP_CMD_ERR) {
827  			ata_ehi_push_desc(ehi, "CMD error");
828  			ehi->err_mask |= AC_ERR_DEV;
829  		} else if (flags & NV_CPB_RESP_CPB_ERR) {
830  			ata_ehi_push_desc(ehi, "CPB error");
831  			ehi->err_mask |= AC_ERR_SYSTEM;
832  			freeze = 1;
833  		} else {
834  			/* notifier error, but no error in CPB flags? */
835  			ata_ehi_push_desc(ehi, "unknown");
836  			ehi->err_mask |= AC_ERR_OTHER;
837  			freeze = 1;
838  		}
839  		/* Kill all commands. EH will determine what actually failed. */
840  		if (freeze)
841  			ata_port_freeze(ap);
842  		else
843  			ata_port_abort(ap);
844  		return -1;
845  	}
846  
847  	if (likely(flags & NV_CPB_RESP_DONE))
848  		return 1;
849  	return 0;
850  }
851  
nv_host_intr(struct ata_port * ap,u8 irq_stat)852  static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
853  {
854  	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
855  
856  	/* freeze if hotplugged */
857  	if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
858  		ata_port_freeze(ap);
859  		return 1;
860  	}
861  
862  	/* bail out if not our interrupt */
863  	if (!(irq_stat & NV_INT_DEV))
864  		return 0;
865  
866  	/* DEV interrupt w/ no active qc? */
867  	if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
868  		ata_sff_check_status(ap);
869  		return 1;
870  	}
871  
872  	/* handle interrupt */
873  	return ata_bmdma_port_intr(ap, qc);
874  }
875  
nv_adma_interrupt(int irq,void * dev_instance)876  static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
877  {
878  	struct ata_host *host = dev_instance;
879  	int i, handled = 0;
880  	u32 notifier_clears[2];
881  
882  	spin_lock(&host->lock);
883  
884  	for (i = 0; i < host->n_ports; i++) {
885  		struct ata_port *ap = host->ports[i];
886  		struct nv_adma_port_priv *pp = ap->private_data;
887  		void __iomem *mmio = pp->ctl_block;
888  		u16 status;
889  		u32 gen_ctl;
890  		u32 notifier, notifier_error;
891  
892  		notifier_clears[i] = 0;
893  
894  		/* if ADMA is disabled, use standard ata interrupt handler */
895  		if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
896  			u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
897  				>> (NV_INT_PORT_SHIFT * i);
898  			handled += nv_host_intr(ap, irq_stat);
899  			continue;
900  		}
901  
902  		/* if in ATA register mode, check for standard interrupts */
903  		if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
904  			u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
905  				>> (NV_INT_PORT_SHIFT * i);
906  			if (ata_tag_valid(ap->link.active_tag))
907  				/** NV_INT_DEV indication seems unreliable
908  				    at times at least in ADMA mode. Force it
909  				    on always when a command is active, to
910  				    prevent losing interrupts. */
911  				irq_stat |= NV_INT_DEV;
912  			handled += nv_host_intr(ap, irq_stat);
913  		}
914  
915  		notifier = readl(mmio + NV_ADMA_NOTIFIER);
916  		notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
917  		notifier_clears[i] = notifier | notifier_error;
918  
919  		gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
920  
921  		if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
922  		    !notifier_error)
923  			/* Nothing to do */
924  			continue;
925  
926  		status = readw(mmio + NV_ADMA_STAT);
927  
928  		/*
929  		 * Clear status. Ensure the controller sees the
930  		 * clearing before we start looking at any of the CPB
931  		 * statuses, so that any CPB completions after this
932  		 * point in the handler will raise another interrupt.
933  		 */
934  		writew(status, mmio + NV_ADMA_STAT);
935  		readw(mmio + NV_ADMA_STAT); /* flush posted write */
936  		rmb();
937  
938  		handled++; /* irq handled if we got here */
939  
940  		/* freeze if hotplugged or controller error */
941  		if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
942  				       NV_ADMA_STAT_HOTUNPLUG |
943  				       NV_ADMA_STAT_TIMEOUT |
944  				       NV_ADMA_STAT_SERROR))) {
945  			struct ata_eh_info *ehi = &ap->link.eh_info;
946  
947  			ata_ehi_clear_desc(ehi);
948  			__ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
949  			if (status & NV_ADMA_STAT_TIMEOUT) {
950  				ehi->err_mask |= AC_ERR_SYSTEM;
951  				ata_ehi_push_desc(ehi, "timeout");
952  			} else if (status & NV_ADMA_STAT_HOTPLUG) {
953  				ata_ehi_hotplugged(ehi);
954  				ata_ehi_push_desc(ehi, "hotplug");
955  			} else if (status & NV_ADMA_STAT_HOTUNPLUG) {
956  				ata_ehi_hotplugged(ehi);
957  				ata_ehi_push_desc(ehi, "hot unplug");
958  			} else if (status & NV_ADMA_STAT_SERROR) {
959  				/* let EH analyze SError and figure out cause */
960  				ata_ehi_push_desc(ehi, "SError");
961  			} else
962  				ata_ehi_push_desc(ehi, "unknown");
963  			ata_port_freeze(ap);
964  			continue;
965  		}
966  
967  		if (status & (NV_ADMA_STAT_DONE |
968  			      NV_ADMA_STAT_CPBERR |
969  			      NV_ADMA_STAT_CMD_COMPLETE)) {
970  			u32 check_commands = notifier_clears[i];
971  			u32 done_mask = 0;
972  			int pos, rc;
973  
974  			if (status & NV_ADMA_STAT_CPBERR) {
975  				/* check all active commands */
976  				if (ata_tag_valid(ap->link.active_tag))
977  					check_commands = 1 <<
978  						ap->link.active_tag;
979  				else
980  					check_commands = ap->link.sactive;
981  			}
982  
983  			/* check CPBs for completed commands */
984  			while ((pos = ffs(check_commands))) {
985  				pos--;
986  				rc = nv_adma_check_cpb(ap, pos,
987  						notifier_error & (1 << pos));
988  				if (rc > 0)
989  					done_mask |= 1 << pos;
990  				else if (unlikely(rc < 0))
991  					check_commands = 0;
992  				check_commands &= ~(1 << pos);
993  			}
994  			ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
995  		}
996  	}
997  
998  	if (notifier_clears[0] || notifier_clears[1]) {
999  		/* Note: Both notifier clear registers must be written
1000  		   if either is set, even if one is zero, according to NVIDIA. */
1001  		struct nv_adma_port_priv *pp = host->ports[0]->private_data;
1002  		writel(notifier_clears[0], pp->notifier_clear_block);
1003  		pp = host->ports[1]->private_data;
1004  		writel(notifier_clears[1], pp->notifier_clear_block);
1005  	}
1006  
1007  	spin_unlock(&host->lock);
1008  
1009  	return IRQ_RETVAL(handled);
1010  }
1011  
nv_adma_freeze(struct ata_port * ap)1012  static void nv_adma_freeze(struct ata_port *ap)
1013  {
1014  	struct nv_adma_port_priv *pp = ap->private_data;
1015  	void __iomem *mmio = pp->ctl_block;
1016  	u16 tmp;
1017  
1018  	nv_ck804_freeze(ap);
1019  
1020  	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1021  		return;
1022  
1023  	/* clear any outstanding CK804 notifications */
1024  	writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1025  		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1026  
1027  	/* Disable interrupt */
1028  	tmp = readw(mmio + NV_ADMA_CTL);
1029  	writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1030  		mmio + NV_ADMA_CTL);
1031  	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1032  }
1033  
nv_adma_thaw(struct ata_port * ap)1034  static void nv_adma_thaw(struct ata_port *ap)
1035  {
1036  	struct nv_adma_port_priv *pp = ap->private_data;
1037  	void __iomem *mmio = pp->ctl_block;
1038  	u16 tmp;
1039  
1040  	nv_ck804_thaw(ap);
1041  
1042  	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1043  		return;
1044  
1045  	/* Enable interrupt */
1046  	tmp = readw(mmio + NV_ADMA_CTL);
1047  	writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1048  		mmio + NV_ADMA_CTL);
1049  	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1050  }
1051  
nv_adma_irq_clear(struct ata_port * ap)1052  static void nv_adma_irq_clear(struct ata_port *ap)
1053  {
1054  	struct nv_adma_port_priv *pp = ap->private_data;
1055  	void __iomem *mmio = pp->ctl_block;
1056  	u32 notifier_clears[2];
1057  
1058  	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1059  		ata_bmdma_irq_clear(ap);
1060  		return;
1061  	}
1062  
1063  	/* clear any outstanding CK804 notifications */
1064  	writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1065  		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1066  
1067  	/* clear ADMA status */
1068  	writew(0xffff, mmio + NV_ADMA_STAT);
1069  
1070  	/* clear notifiers - note both ports need to be written with
1071  	   something even though we are only clearing on one */
1072  	if (ap->port_no == 0) {
1073  		notifier_clears[0] = 0xFFFFFFFF;
1074  		notifier_clears[1] = 0;
1075  	} else {
1076  		notifier_clears[0] = 0;
1077  		notifier_clears[1] = 0xFFFFFFFF;
1078  	}
1079  	pp = ap->host->ports[0]->private_data;
1080  	writel(notifier_clears[0], pp->notifier_clear_block);
1081  	pp = ap->host->ports[1]->private_data;
1082  	writel(notifier_clears[1], pp->notifier_clear_block);
1083  }
1084  
nv_adma_post_internal_cmd(struct ata_queued_cmd * qc)1085  static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1086  {
1087  	struct nv_adma_port_priv *pp = qc->ap->private_data;
1088  
1089  	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1090  		ata_bmdma_post_internal_cmd(qc);
1091  }
1092  
nv_adma_port_start(struct ata_port * ap)1093  static int nv_adma_port_start(struct ata_port *ap)
1094  {
1095  	struct device *dev = ap->host->dev;
1096  	struct nv_adma_port_priv *pp;
1097  	int rc;
1098  	void *mem;
1099  	dma_addr_t mem_dma;
1100  	void __iomem *mmio;
1101  	struct pci_dev *pdev = to_pci_dev(dev);
1102  	u16 tmp;
1103  
1104  	/*
1105  	 * Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1106  	 * pad buffers.
1107  	 */
1108  	rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1109  	if (rc)
1110  		return rc;
1111  
1112  	/* we might fallback to bmdma, allocate bmdma resources */
1113  	rc = ata_bmdma_port_start(ap);
1114  	if (rc)
1115  		return rc;
1116  
1117  	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1118  	if (!pp)
1119  		return -ENOMEM;
1120  
1121  	mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1122  	       ap->port_no * NV_ADMA_PORT_SIZE;
1123  	pp->ctl_block = mmio;
1124  	pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1125  	pp->notifier_clear_block = pp->gen_block +
1126  	       NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1127  
1128  	/*
1129  	 * Now that the legacy PRD and padding buffer are allocated we can
1130  	 * raise the DMA mask to allocate the CPB/APRD table.
1131  	 */
1132  	dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1133  
1134  	pp->adma_dma_mask = *dev->dma_mask;
1135  
1136  	mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1137  				  &mem_dma, GFP_KERNEL);
1138  	if (!mem)
1139  		return -ENOMEM;
1140  
1141  	/*
1142  	 * First item in chunk of DMA memory:
1143  	 * 128-byte command parameter block (CPB)
1144  	 * one for each command tag
1145  	 */
1146  	pp->cpb     = mem;
1147  	pp->cpb_dma = mem_dma;
1148  
1149  	writel(mem_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
1150  	writel((mem_dma >> 16) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
1151  
1152  	mem     += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1153  	mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1154  
1155  	/*
1156  	 * Second item: block of ADMA_SGTBL_LEN s/g entries
1157  	 */
1158  	pp->aprd = mem;
1159  	pp->aprd_dma = mem_dma;
1160  
1161  	ap->private_data = pp;
1162  
1163  	/* clear any outstanding interrupt conditions */
1164  	writew(0xffff, mmio + NV_ADMA_STAT);
1165  
1166  	/* initialize port variables */
1167  	pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1168  
1169  	/* clear CPB fetch count */
1170  	writew(0, mmio + NV_ADMA_CPB_COUNT);
1171  
1172  	/* clear GO for register mode, enable interrupt */
1173  	tmp = readw(mmio + NV_ADMA_CTL);
1174  	writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1175  		NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1176  
1177  	tmp = readw(mmio + NV_ADMA_CTL);
1178  	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1179  	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1180  	udelay(1);
1181  	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1182  	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1183  
1184  	return 0;
1185  }
1186  
nv_adma_port_stop(struct ata_port * ap)1187  static void nv_adma_port_stop(struct ata_port *ap)
1188  {
1189  	struct nv_adma_port_priv *pp = ap->private_data;
1190  	void __iomem *mmio = pp->ctl_block;
1191  
1192  	writew(0, mmio + NV_ADMA_CTL);
1193  }
1194  
1195  #ifdef CONFIG_PM
nv_adma_port_suspend(struct ata_port * ap,pm_message_t mesg)1196  static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1197  {
1198  	struct nv_adma_port_priv *pp = ap->private_data;
1199  	void __iomem *mmio = pp->ctl_block;
1200  
1201  	/* Go to register mode - clears GO */
1202  	nv_adma_register_mode(ap);
1203  
1204  	/* clear CPB fetch count */
1205  	writew(0, mmio + NV_ADMA_CPB_COUNT);
1206  
1207  	/* disable interrupt, shut down port */
1208  	writew(0, mmio + NV_ADMA_CTL);
1209  
1210  	return 0;
1211  }
1212  
nv_adma_port_resume(struct ata_port * ap)1213  static int nv_adma_port_resume(struct ata_port *ap)
1214  {
1215  	struct nv_adma_port_priv *pp = ap->private_data;
1216  	void __iomem *mmio = pp->ctl_block;
1217  	u16 tmp;
1218  
1219  	/* set CPB block location */
1220  	writel(pp->cpb_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
1221  	writel((pp->cpb_dma >> 16) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
1222  
1223  	/* clear any outstanding interrupt conditions */
1224  	writew(0xffff, mmio + NV_ADMA_STAT);
1225  
1226  	/* initialize port variables */
1227  	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1228  
1229  	/* clear CPB fetch count */
1230  	writew(0, mmio + NV_ADMA_CPB_COUNT);
1231  
1232  	/* clear GO for register mode, enable interrupt */
1233  	tmp = readw(mmio + NV_ADMA_CTL);
1234  	writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1235  		NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1236  
1237  	tmp = readw(mmio + NV_ADMA_CTL);
1238  	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1239  	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1240  	udelay(1);
1241  	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1242  	readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1243  
1244  	return 0;
1245  }
1246  #endif
1247  
nv_adma_setup_port(struct ata_port * ap)1248  static void nv_adma_setup_port(struct ata_port *ap)
1249  {
1250  	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1251  	struct ata_ioports *ioport = &ap->ioaddr;
1252  
1253  	mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1254  
1255  	ioport->cmd_addr	= mmio;
1256  	ioport->data_addr	= mmio + (ATA_REG_DATA * 4);
1257  	ioport->error_addr	=
1258  	ioport->feature_addr	= mmio + (ATA_REG_ERR * 4);
1259  	ioport->nsect_addr	= mmio + (ATA_REG_NSECT * 4);
1260  	ioport->lbal_addr	= mmio + (ATA_REG_LBAL * 4);
1261  	ioport->lbam_addr	= mmio + (ATA_REG_LBAM * 4);
1262  	ioport->lbah_addr	= mmio + (ATA_REG_LBAH * 4);
1263  	ioport->device_addr	= mmio + (ATA_REG_DEVICE * 4);
1264  	ioport->status_addr	=
1265  	ioport->command_addr	= mmio + (ATA_REG_STATUS * 4);
1266  	ioport->altstatus_addr	=
1267  	ioport->ctl_addr	= mmio + 0x20;
1268  }
1269  
nv_adma_host_init(struct ata_host * host)1270  static int nv_adma_host_init(struct ata_host *host)
1271  {
1272  	struct pci_dev *pdev = to_pci_dev(host->dev);
1273  	unsigned int i;
1274  	u32 tmp32;
1275  
1276  	/* enable ADMA on the ports */
1277  	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1278  	tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1279  		 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1280  		 NV_MCP_SATA_CFG_20_PORT1_EN |
1281  		 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1282  
1283  	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1284  
1285  	for (i = 0; i < host->n_ports; i++)
1286  		nv_adma_setup_port(host->ports[i]);
1287  
1288  	return 0;
1289  }
1290  
nv_adma_fill_aprd(struct ata_queued_cmd * qc,struct scatterlist * sg,int idx,struct nv_adma_prd * aprd)1291  static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1292  			      struct scatterlist *sg,
1293  			      int idx,
1294  			      struct nv_adma_prd *aprd)
1295  {
1296  	u8 flags = 0;
1297  	if (qc->tf.flags & ATA_TFLAG_WRITE)
1298  		flags |= NV_APRD_WRITE;
1299  	if (idx == qc->n_elem - 1)
1300  		flags |= NV_APRD_END;
1301  	else if (idx != 4)
1302  		flags |= NV_APRD_CONT;
1303  
1304  	aprd->addr  = cpu_to_le64(((u64)sg_dma_address(sg)));
1305  	aprd->len   = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1306  	aprd->flags = flags;
1307  	aprd->packet_len = 0;
1308  }
1309  
nv_adma_fill_sg(struct ata_queued_cmd * qc,struct nv_adma_cpb * cpb)1310  static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1311  {
1312  	struct nv_adma_port_priv *pp = qc->ap->private_data;
1313  	struct nv_adma_prd *aprd;
1314  	struct scatterlist *sg;
1315  	unsigned int si;
1316  
1317  	for_each_sg(qc->sg, sg, qc->n_elem, si) {
1318  		aprd = (si < 5) ? &cpb->aprd[si] :
1319  			&pp->aprd[NV_ADMA_SGTBL_LEN * qc->hw_tag + (si-5)];
1320  		nv_adma_fill_aprd(qc, sg, si, aprd);
1321  	}
1322  	if (si > 5)
1323  		cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->hw_tag)));
1324  	else
1325  		cpb->next_aprd = cpu_to_le64(0);
1326  }
1327  
nv_adma_use_reg_mode(struct ata_queued_cmd * qc)1328  static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1329  {
1330  	struct nv_adma_port_priv *pp = qc->ap->private_data;
1331  
1332  	/* ADMA engine can only be used for non-ATAPI DMA commands,
1333  	   or interrupt-driven no-data commands. */
1334  	if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1335  	   (qc->tf.flags & ATA_TFLAG_POLLING))
1336  		return 1;
1337  
1338  	if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1339  	   (qc->tf.protocol == ATA_PROT_NODATA))
1340  		return 0;
1341  
1342  	return 1;
1343  }
1344  
nv_adma_qc_prep(struct ata_queued_cmd * qc)1345  static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc)
1346  {
1347  	struct nv_adma_port_priv *pp = qc->ap->private_data;
1348  	struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag];
1349  	u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1350  		       NV_CPB_CTL_IEN;
1351  
1352  	if (nv_adma_use_reg_mode(qc)) {
1353  		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1354  			(qc->flags & ATA_QCFLAG_DMAMAP));
1355  		nv_adma_register_mode(qc->ap);
1356  		ata_bmdma_qc_prep(qc);
1357  		return AC_ERR_OK;
1358  	}
1359  
1360  	cpb->resp_flags = NV_CPB_RESP_DONE;
1361  	wmb();
1362  	cpb->ctl_flags = 0;
1363  	wmb();
1364  
1365  	cpb->len		= 3;
1366  	cpb->tag		= qc->hw_tag;
1367  	cpb->next_cpb_idx	= 0;
1368  
1369  	/* turn on NCQ flags for NCQ commands */
1370  	if (qc->tf.protocol == ATA_PROT_NCQ)
1371  		ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1372  
1373  	nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1374  
1375  	if (qc->flags & ATA_QCFLAG_DMAMAP) {
1376  		nv_adma_fill_sg(qc, cpb);
1377  		ctl_flags |= NV_CPB_CTL_APRD_VALID;
1378  	} else
1379  		memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1380  
1381  	/* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1382  	   until we are finished filling in all of the contents */
1383  	wmb();
1384  	cpb->ctl_flags = ctl_flags;
1385  	wmb();
1386  	cpb->resp_flags = 0;
1387  
1388  	return AC_ERR_OK;
1389  }
1390  
nv_adma_qc_issue(struct ata_queued_cmd * qc)1391  static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1392  {
1393  	struct nv_adma_port_priv *pp = qc->ap->private_data;
1394  	void __iomem *mmio = pp->ctl_block;
1395  	int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1396  
1397  	/* We can't handle result taskfile with NCQ commands, since
1398  	   retrieving the taskfile switches us out of ADMA mode and would abort
1399  	   existing commands. */
1400  	if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1401  		     (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1402  		ata_dev_err(qc->dev, "NCQ w/ RESULT_TF not allowed\n");
1403  		return AC_ERR_SYSTEM;
1404  	}
1405  
1406  	if (nv_adma_use_reg_mode(qc)) {
1407  		/* use ATA register mode */
1408  		BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1409  			(qc->flags & ATA_QCFLAG_DMAMAP));
1410  		nv_adma_register_mode(qc->ap);
1411  		return ata_bmdma_qc_issue(qc);
1412  	} else
1413  		nv_adma_mode(qc->ap);
1414  
1415  	/* write append register, command tag in lower 8 bits
1416  	   and (number of cpbs to append -1) in top 8 bits */
1417  	wmb();
1418  
1419  	if (curr_ncq != pp->last_issue_ncq) {
1420  		/* Seems to need some delay before switching between NCQ and
1421  		   non-NCQ commands, else we get command timeouts and such. */
1422  		udelay(20);
1423  		pp->last_issue_ncq = curr_ncq;
1424  	}
1425  
1426  	writew(qc->hw_tag, mmio + NV_ADMA_APPEND);
1427  
1428  	return 0;
1429  }
1430  
nv_generic_interrupt(int irq,void * dev_instance)1431  static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1432  {
1433  	struct ata_host *host = dev_instance;
1434  	unsigned int i;
1435  	unsigned int handled = 0;
1436  	unsigned long flags;
1437  
1438  	spin_lock_irqsave(&host->lock, flags);
1439  
1440  	for (i = 0; i < host->n_ports; i++) {
1441  		struct ata_port *ap = host->ports[i];
1442  		struct ata_queued_cmd *qc;
1443  
1444  		qc = ata_qc_from_tag(ap, ap->link.active_tag);
1445  		if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
1446  			handled += ata_bmdma_port_intr(ap, qc);
1447  		} else {
1448  			/*
1449  			 * No request pending?  Clear interrupt status
1450  			 * anyway, in case there's one pending.
1451  			 */
1452  			ap->ops->sff_check_status(ap);
1453  		}
1454  	}
1455  
1456  	spin_unlock_irqrestore(&host->lock, flags);
1457  
1458  	return IRQ_RETVAL(handled);
1459  }
1460  
nv_do_interrupt(struct ata_host * host,u8 irq_stat)1461  static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1462  {
1463  	int i, handled = 0;
1464  
1465  	for (i = 0; i < host->n_ports; i++) {
1466  		handled += nv_host_intr(host->ports[i], irq_stat);
1467  		irq_stat >>= NV_INT_PORT_SHIFT;
1468  	}
1469  
1470  	return IRQ_RETVAL(handled);
1471  }
1472  
nv_nf2_interrupt(int irq,void * dev_instance)1473  static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1474  {
1475  	struct ata_host *host = dev_instance;
1476  	u8 irq_stat;
1477  	irqreturn_t ret;
1478  
1479  	spin_lock(&host->lock);
1480  	irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1481  	ret = nv_do_interrupt(host, irq_stat);
1482  	spin_unlock(&host->lock);
1483  
1484  	return ret;
1485  }
1486  
nv_ck804_interrupt(int irq,void * dev_instance)1487  static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1488  {
1489  	struct ata_host *host = dev_instance;
1490  	u8 irq_stat;
1491  	irqreturn_t ret;
1492  
1493  	spin_lock(&host->lock);
1494  	irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1495  	ret = nv_do_interrupt(host, irq_stat);
1496  	spin_unlock(&host->lock);
1497  
1498  	return ret;
1499  }
1500  
nv_scr_read(struct ata_link * link,unsigned int sc_reg,u32 * val)1501  static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1502  {
1503  	if (sc_reg > SCR_CONTROL)
1504  		return -EINVAL;
1505  
1506  	*val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
1507  	return 0;
1508  }
1509  
nv_scr_write(struct ata_link * link,unsigned int sc_reg,u32 val)1510  static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1511  {
1512  	if (sc_reg > SCR_CONTROL)
1513  		return -EINVAL;
1514  
1515  	iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
1516  	return 0;
1517  }
1518  
nv_hardreset(struct ata_link * link,unsigned int * class,unsigned long deadline)1519  static int nv_hardreset(struct ata_link *link, unsigned int *class,
1520  			unsigned long deadline)
1521  {
1522  	struct ata_eh_context *ehc = &link->eh_context;
1523  
1524  	/* Do hardreset iff it's post-boot probing, please read the
1525  	 * comment above port ops for details.
1526  	 */
1527  	if (!(link->ap->pflags & ATA_PFLAG_LOADING) &&
1528  	    !ata_dev_enabled(link->device))
1529  		sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1530  				    NULL, NULL);
1531  	else {
1532  		const unsigned int *timing = sata_ehc_deb_timing(ehc);
1533  		int rc;
1534  
1535  		if (!(ehc->i.flags & ATA_EHI_QUIET))
1536  			ata_link_info(link,
1537  				      "nv: skipping hardreset on occupied port\n");
1538  
1539  		/* make sure the link is online */
1540  		rc = sata_link_resume(link, timing, deadline);
1541  		/* whine about phy resume failure but proceed */
1542  		if (rc && rc != -EOPNOTSUPP)
1543  			ata_link_warn(link, "failed to resume link (errno=%d)\n",
1544  				      rc);
1545  	}
1546  
1547  	/* device signature acquisition is unreliable */
1548  	return -EAGAIN;
1549  }
1550  
nv_nf2_freeze(struct ata_port * ap)1551  static void nv_nf2_freeze(struct ata_port *ap)
1552  {
1553  	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1554  	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1555  	u8 mask;
1556  
1557  	mask = ioread8(scr_addr + NV_INT_ENABLE);
1558  	mask &= ~(NV_INT_ALL << shift);
1559  	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1560  }
1561  
nv_nf2_thaw(struct ata_port * ap)1562  static void nv_nf2_thaw(struct ata_port *ap)
1563  {
1564  	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1565  	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1566  	u8 mask;
1567  
1568  	iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1569  
1570  	mask = ioread8(scr_addr + NV_INT_ENABLE);
1571  	mask |= (NV_INT_MASK << shift);
1572  	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1573  }
1574  
nv_ck804_freeze(struct ata_port * ap)1575  static void nv_ck804_freeze(struct ata_port *ap)
1576  {
1577  	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1578  	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1579  	u8 mask;
1580  
1581  	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1582  	mask &= ~(NV_INT_ALL << shift);
1583  	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1584  }
1585  
nv_ck804_thaw(struct ata_port * ap)1586  static void nv_ck804_thaw(struct ata_port *ap)
1587  {
1588  	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1589  	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1590  	u8 mask;
1591  
1592  	writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1593  
1594  	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1595  	mask |= (NV_INT_MASK << shift);
1596  	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1597  }
1598  
nv_mcp55_freeze(struct ata_port * ap)1599  static void nv_mcp55_freeze(struct ata_port *ap)
1600  {
1601  	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1602  	int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1603  	u32 mask;
1604  
1605  	writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1606  
1607  	mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1608  	mask &= ~(NV_INT_ALL_MCP55 << shift);
1609  	writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1610  }
1611  
nv_mcp55_thaw(struct ata_port * ap)1612  static void nv_mcp55_thaw(struct ata_port *ap)
1613  {
1614  	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1615  	int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1616  	u32 mask;
1617  
1618  	writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1619  
1620  	mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1621  	mask |= (NV_INT_MASK_MCP55 << shift);
1622  	writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1623  }
1624  
nv_adma_error_handler(struct ata_port * ap)1625  static void nv_adma_error_handler(struct ata_port *ap)
1626  {
1627  	struct nv_adma_port_priv *pp = ap->private_data;
1628  	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1629  		void __iomem *mmio = pp->ctl_block;
1630  		int i;
1631  		u16 tmp;
1632  
1633  		if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1634  			u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1635  			u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1636  			u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1637  			u32 status = readw(mmio + NV_ADMA_STAT);
1638  			u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1639  			u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1640  
1641  			ata_port_err(ap,
1642  				"EH in ADMA mode, notifier 0x%X "
1643  				"notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1644  				"next cpb count 0x%X next cpb idx 0x%x\n",
1645  				notifier, notifier_error, gen_ctl, status,
1646  				cpb_count, next_cpb_idx);
1647  
1648  			for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
1649  				struct nv_adma_cpb *cpb = &pp->cpb[i];
1650  				if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1651  				    ap->link.sactive & (1 << i))
1652  					ata_port_err(ap,
1653  						"CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1654  						i, cpb->ctl_flags, cpb->resp_flags);
1655  			}
1656  		}
1657  
1658  		/* Push us back into port register mode for error handling. */
1659  		nv_adma_register_mode(ap);
1660  
1661  		/* Mark all of the CPBs as invalid to prevent them from
1662  		   being executed */
1663  		for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1664  			pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1665  
1666  		/* clear CPB fetch count */
1667  		writew(0, mmio + NV_ADMA_CPB_COUNT);
1668  
1669  		/* Reset channel */
1670  		tmp = readw(mmio + NV_ADMA_CTL);
1671  		writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1672  		readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1673  		udelay(1);
1674  		writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1675  		readw(mmio + NV_ADMA_CTL);	/* flush posted write */
1676  	}
1677  
1678  	ata_bmdma_error_handler(ap);
1679  }
1680  
nv_swncq_qc_to_dq(struct ata_port * ap,struct ata_queued_cmd * qc)1681  static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1682  {
1683  	struct nv_swncq_port_priv *pp = ap->private_data;
1684  	struct defer_queue *dq = &pp->defer_queue;
1685  
1686  	/* queue is full */
1687  	WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1688  	dq->defer_bits |= (1 << qc->hw_tag);
1689  	dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->hw_tag;
1690  }
1691  
nv_swncq_qc_from_dq(struct ata_port * ap)1692  static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1693  {
1694  	struct nv_swncq_port_priv *pp = ap->private_data;
1695  	struct defer_queue *dq = &pp->defer_queue;
1696  	unsigned int tag;
1697  
1698  	if (dq->head == dq->tail)	/* null queue */
1699  		return NULL;
1700  
1701  	tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1702  	dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1703  	WARN_ON(!(dq->defer_bits & (1 << tag)));
1704  	dq->defer_bits &= ~(1 << tag);
1705  
1706  	return ata_qc_from_tag(ap, tag);
1707  }
1708  
nv_swncq_fis_reinit(struct ata_port * ap)1709  static void nv_swncq_fis_reinit(struct ata_port *ap)
1710  {
1711  	struct nv_swncq_port_priv *pp = ap->private_data;
1712  
1713  	pp->dhfis_bits = 0;
1714  	pp->dmafis_bits = 0;
1715  	pp->sdbfis_bits = 0;
1716  	pp->ncq_flags = 0;
1717  }
1718  
nv_swncq_pp_reinit(struct ata_port * ap)1719  static void nv_swncq_pp_reinit(struct ata_port *ap)
1720  {
1721  	struct nv_swncq_port_priv *pp = ap->private_data;
1722  	struct defer_queue *dq = &pp->defer_queue;
1723  
1724  	dq->head = 0;
1725  	dq->tail = 0;
1726  	dq->defer_bits = 0;
1727  	pp->qc_active = 0;
1728  	pp->last_issue_tag = ATA_TAG_POISON;
1729  	nv_swncq_fis_reinit(ap);
1730  }
1731  
nv_swncq_irq_clear(struct ata_port * ap,u16 fis)1732  static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1733  {
1734  	struct nv_swncq_port_priv *pp = ap->private_data;
1735  
1736  	writew(fis, pp->irq_block);
1737  }
1738  
__ata_bmdma_stop(struct ata_port * ap)1739  static void __ata_bmdma_stop(struct ata_port *ap)
1740  {
1741  	struct ata_queued_cmd qc;
1742  
1743  	qc.ap = ap;
1744  	ata_bmdma_stop(&qc);
1745  }
1746  
nv_swncq_ncq_stop(struct ata_port * ap)1747  static void nv_swncq_ncq_stop(struct ata_port *ap)
1748  {
1749  	struct nv_swncq_port_priv *pp = ap->private_data;
1750  	unsigned int i;
1751  	u32 sactive;
1752  	u32 done_mask;
1753  
1754  	ata_port_err(ap, "EH in SWNCQ mode,QC:qc_active 0x%llX sactive 0x%X\n",
1755  		     ap->qc_active, ap->link.sactive);
1756  	ata_port_err(ap,
1757  		"SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n  "
1758  		"dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1759  		pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1760  		pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1761  
1762  	ata_port_err(ap, "ATA_REG 0x%X ERR_REG 0x%X\n",
1763  		     ap->ops->sff_check_status(ap),
1764  		     ioread8(ap->ioaddr.error_addr));
1765  
1766  	sactive = readl(pp->sactive_block);
1767  	done_mask = pp->qc_active ^ sactive;
1768  
1769  	ata_port_err(ap, "tag : dhfis dmafis sdbfis sactive\n");
1770  	for (i = 0; i < ATA_MAX_QUEUE; i++) {
1771  		u8 err = 0;
1772  		if (pp->qc_active & (1 << i))
1773  			err = 0;
1774  		else if (done_mask & (1 << i))
1775  			err = 1;
1776  		else
1777  			continue;
1778  
1779  		ata_port_err(ap,
1780  			     "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1781  			     (pp->dhfis_bits >> i) & 0x1,
1782  			     (pp->dmafis_bits >> i) & 0x1,
1783  			     (pp->sdbfis_bits >> i) & 0x1,
1784  			     (sactive >> i) & 0x1,
1785  			     (err ? "error! tag doesn't exit" : " "));
1786  	}
1787  
1788  	nv_swncq_pp_reinit(ap);
1789  	ap->ops->sff_irq_clear(ap);
1790  	__ata_bmdma_stop(ap);
1791  	nv_swncq_irq_clear(ap, 0xffff);
1792  }
1793  
nv_swncq_error_handler(struct ata_port * ap)1794  static void nv_swncq_error_handler(struct ata_port *ap)
1795  {
1796  	struct ata_eh_context *ehc = &ap->link.eh_context;
1797  
1798  	if (ap->link.sactive) {
1799  		nv_swncq_ncq_stop(ap);
1800  		ehc->i.action |= ATA_EH_RESET;
1801  	}
1802  
1803  	ata_bmdma_error_handler(ap);
1804  }
1805  
1806  #ifdef CONFIG_PM
nv_swncq_port_suspend(struct ata_port * ap,pm_message_t mesg)1807  static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1808  {
1809  	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1810  	u32 tmp;
1811  
1812  	/* clear irq */
1813  	writel(~0, mmio + NV_INT_STATUS_MCP55);
1814  
1815  	/* disable irq */
1816  	writel(0, mmio + NV_INT_ENABLE_MCP55);
1817  
1818  	/* disable swncq */
1819  	tmp = readl(mmio + NV_CTL_MCP55);
1820  	tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1821  	writel(tmp, mmio + NV_CTL_MCP55);
1822  
1823  	return 0;
1824  }
1825  
nv_swncq_port_resume(struct ata_port * ap)1826  static int nv_swncq_port_resume(struct ata_port *ap)
1827  {
1828  	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1829  	u32 tmp;
1830  
1831  	/* clear irq */
1832  	writel(~0, mmio + NV_INT_STATUS_MCP55);
1833  
1834  	/* enable irq */
1835  	writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1836  
1837  	/* enable swncq */
1838  	tmp = readl(mmio + NV_CTL_MCP55);
1839  	writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1840  
1841  	return 0;
1842  }
1843  #endif
1844  
nv_swncq_host_init(struct ata_host * host)1845  static void nv_swncq_host_init(struct ata_host *host)
1846  {
1847  	u32 tmp;
1848  	void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1849  	struct pci_dev *pdev = to_pci_dev(host->dev);
1850  	u8 regval;
1851  
1852  	/* disable  ECO 398 */
1853  	pci_read_config_byte(pdev, 0x7f, &regval);
1854  	regval &= ~(1 << 7);
1855  	pci_write_config_byte(pdev, 0x7f, regval);
1856  
1857  	/* enable swncq */
1858  	tmp = readl(mmio + NV_CTL_MCP55);
1859  	dev_dbg(&pdev->dev, "HOST_CTL:0x%X\n", tmp);
1860  	writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1861  
1862  	/* enable irq intr */
1863  	tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1864  	dev_dbg(&pdev->dev, "HOST_ENABLE:0x%X\n", tmp);
1865  	writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1866  
1867  	/*  clear port irq */
1868  	writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1869  }
1870  
nv_swncq_slave_config(struct scsi_device * sdev)1871  static int nv_swncq_slave_config(struct scsi_device *sdev)
1872  {
1873  	struct ata_port *ap = ata_shost_to_port(sdev->host);
1874  	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1875  	struct ata_device *dev;
1876  	int rc;
1877  	u8 rev;
1878  	u8 check_maxtor = 0;
1879  	unsigned char model_num[ATA_ID_PROD_LEN + 1];
1880  
1881  	rc = ata_scsi_slave_config(sdev);
1882  	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1883  		/* Not a proper libata device, ignore */
1884  		return rc;
1885  
1886  	dev = &ap->link.device[sdev->id];
1887  	if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1888  		return rc;
1889  
1890  	/* if MCP51 and Maxtor, then disable ncq */
1891  	if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1892  		pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1893  		check_maxtor = 1;
1894  
1895  	/* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1896  	if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1897  		pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1898  		pci_read_config_byte(pdev, 0x8, &rev);
1899  		if (rev <= 0xa2)
1900  			check_maxtor = 1;
1901  	}
1902  
1903  	if (!check_maxtor)
1904  		return rc;
1905  
1906  	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1907  
1908  	if (strncmp(model_num, "Maxtor", 6) == 0) {
1909  		ata_scsi_change_queue_depth(sdev, 1);
1910  		ata_dev_notice(dev, "Disabling SWNCQ mode (depth %x)\n",
1911  			       sdev->queue_depth);
1912  	}
1913  
1914  	return rc;
1915  }
1916  
nv_swncq_port_start(struct ata_port * ap)1917  static int nv_swncq_port_start(struct ata_port *ap)
1918  {
1919  	struct device *dev = ap->host->dev;
1920  	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1921  	struct nv_swncq_port_priv *pp;
1922  	int rc;
1923  
1924  	/* we might fallback to bmdma, allocate bmdma resources */
1925  	rc = ata_bmdma_port_start(ap);
1926  	if (rc)
1927  		return rc;
1928  
1929  	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1930  	if (!pp)
1931  		return -ENOMEM;
1932  
1933  	pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1934  				      &pp->prd_dma, GFP_KERNEL);
1935  	if (!pp->prd)
1936  		return -ENOMEM;
1937  
1938  	ap->private_data = pp;
1939  	pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1940  	pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1941  	pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1942  
1943  	return 0;
1944  }
1945  
nv_swncq_qc_prep(struct ata_queued_cmd * qc)1946  static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1947  {
1948  	if (qc->tf.protocol != ATA_PROT_NCQ) {
1949  		ata_bmdma_qc_prep(qc);
1950  		return AC_ERR_OK;
1951  	}
1952  
1953  	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1954  		return AC_ERR_OK;
1955  
1956  	nv_swncq_fill_sg(qc);
1957  
1958  	return AC_ERR_OK;
1959  }
1960  
nv_swncq_fill_sg(struct ata_queued_cmd * qc)1961  static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1962  {
1963  	struct ata_port *ap = qc->ap;
1964  	struct scatterlist *sg;
1965  	struct nv_swncq_port_priv *pp = ap->private_data;
1966  	struct ata_bmdma_prd *prd;
1967  	unsigned int si, idx;
1968  
1969  	prd = pp->prd + ATA_MAX_PRD * qc->hw_tag;
1970  
1971  	idx = 0;
1972  	for_each_sg(qc->sg, sg, qc->n_elem, si) {
1973  		u32 addr, offset;
1974  		u32 sg_len, len;
1975  
1976  		addr = (u32)sg_dma_address(sg);
1977  		sg_len = sg_dma_len(sg);
1978  
1979  		while (sg_len) {
1980  			offset = addr & 0xffff;
1981  			len = sg_len;
1982  			if ((offset + sg_len) > 0x10000)
1983  				len = 0x10000 - offset;
1984  
1985  			prd[idx].addr = cpu_to_le32(addr);
1986  			prd[idx].flags_len = cpu_to_le32(len & 0xffff);
1987  
1988  			idx++;
1989  			sg_len -= len;
1990  			addr += len;
1991  		}
1992  	}
1993  
1994  	prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
1995  }
1996  
nv_swncq_issue_atacmd(struct ata_port * ap,struct ata_queued_cmd * qc)1997  static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
1998  					  struct ata_queued_cmd *qc)
1999  {
2000  	struct nv_swncq_port_priv *pp = ap->private_data;
2001  
2002  	if (qc == NULL)
2003  		return 0;
2004  
2005  	writel((1 << qc->hw_tag), pp->sactive_block);
2006  	pp->last_issue_tag = qc->hw_tag;
2007  	pp->dhfis_bits &= ~(1 << qc->hw_tag);
2008  	pp->dmafis_bits &= ~(1 << qc->hw_tag);
2009  	pp->qc_active |= (0x1 << qc->hw_tag);
2010  
2011  	trace_ata_tf_load(ap, &qc->tf);
2012  	ap->ops->sff_tf_load(ap, &qc->tf);	 /* load tf registers */
2013  	trace_ata_exec_command(ap, &qc->tf, qc->hw_tag);
2014  	ap->ops->sff_exec_command(ap, &qc->tf);
2015  
2016  	return 0;
2017  }
2018  
nv_swncq_qc_issue(struct ata_queued_cmd * qc)2019  static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2020  {
2021  	struct ata_port *ap = qc->ap;
2022  	struct nv_swncq_port_priv *pp = ap->private_data;
2023  
2024  	if (qc->tf.protocol != ATA_PROT_NCQ)
2025  		return ata_bmdma_qc_issue(qc);
2026  
2027  	if (!pp->qc_active)
2028  		nv_swncq_issue_atacmd(ap, qc);
2029  	else
2030  		nv_swncq_qc_to_dq(ap, qc);	/* add qc to defer queue */
2031  
2032  	return 0;
2033  }
2034  
nv_swncq_hotplug(struct ata_port * ap,u32 fis)2035  static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2036  {
2037  	u32 serror;
2038  	struct ata_eh_info *ehi = &ap->link.eh_info;
2039  
2040  	ata_ehi_clear_desc(ehi);
2041  
2042  	/* AHCI needs SError cleared; otherwise, it might lock up */
2043  	sata_scr_read(&ap->link, SCR_ERROR, &serror);
2044  	sata_scr_write(&ap->link, SCR_ERROR, serror);
2045  
2046  	/* analyze @irq_stat */
2047  	if (fis & NV_SWNCQ_IRQ_ADDED)
2048  		ata_ehi_push_desc(ehi, "hot plug");
2049  	else if (fis & NV_SWNCQ_IRQ_REMOVED)
2050  		ata_ehi_push_desc(ehi, "hot unplug");
2051  
2052  	ata_ehi_hotplugged(ehi);
2053  
2054  	/* okay, let's hand over to EH */
2055  	ehi->serror |= serror;
2056  
2057  	ata_port_freeze(ap);
2058  }
2059  
nv_swncq_sdbfis(struct ata_port * ap)2060  static int nv_swncq_sdbfis(struct ata_port *ap)
2061  {
2062  	struct ata_queued_cmd *qc;
2063  	struct nv_swncq_port_priv *pp = ap->private_data;
2064  	struct ata_eh_info *ehi = &ap->link.eh_info;
2065  	u32 sactive;
2066  	u32 done_mask;
2067  	u8 host_stat;
2068  	u8 lack_dhfis = 0;
2069  
2070  	host_stat = ap->ops->bmdma_status(ap);
2071  	trace_ata_bmdma_status(ap, host_stat);
2072  	if (unlikely(host_stat & ATA_DMA_ERR)) {
2073  		/* error when transferring data to/from memory */
2074  		ata_ehi_clear_desc(ehi);
2075  		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2076  		ehi->err_mask |= AC_ERR_HOST_BUS;
2077  		ehi->action |= ATA_EH_RESET;
2078  		return -EINVAL;
2079  	}
2080  
2081  	ap->ops->sff_irq_clear(ap);
2082  	__ata_bmdma_stop(ap);
2083  
2084  	sactive = readl(pp->sactive_block);
2085  	done_mask = pp->qc_active ^ sactive;
2086  
2087  	pp->qc_active &= ~done_mask;
2088  	pp->dhfis_bits &= ~done_mask;
2089  	pp->dmafis_bits &= ~done_mask;
2090  	pp->sdbfis_bits |= done_mask;
2091  	ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
2092  
2093  	if (!ap->qc_active) {
2094  		ata_port_dbg(ap, "over\n");
2095  		nv_swncq_pp_reinit(ap);
2096  		return 0;
2097  	}
2098  
2099  	if (pp->qc_active & pp->dhfis_bits)
2100  		return 0;
2101  
2102  	if ((pp->ncq_flags & ncq_saw_backout) ||
2103  	    (pp->qc_active ^ pp->dhfis_bits))
2104  		/* if the controller can't get a device to host register FIS,
2105  		 * The driver needs to reissue the new command.
2106  		 */
2107  		lack_dhfis = 1;
2108  
2109  	ata_port_dbg(ap, "QC: qc_active 0x%llx,"
2110  		     "SWNCQ:qc_active 0x%X defer_bits %X "
2111  		     "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2112  		     ap->qc_active, pp->qc_active,
2113  		     pp->defer_queue.defer_bits, pp->dhfis_bits,
2114  		     pp->dmafis_bits, pp->last_issue_tag);
2115  
2116  	nv_swncq_fis_reinit(ap);
2117  
2118  	if (lack_dhfis) {
2119  		qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2120  		nv_swncq_issue_atacmd(ap, qc);
2121  		return 0;
2122  	}
2123  
2124  	if (pp->defer_queue.defer_bits) {
2125  		/* send deferral queue command */
2126  		qc = nv_swncq_qc_from_dq(ap);
2127  		WARN_ON(qc == NULL);
2128  		nv_swncq_issue_atacmd(ap, qc);
2129  	}
2130  
2131  	return 0;
2132  }
2133  
nv_swncq_tag(struct ata_port * ap)2134  static inline u32 nv_swncq_tag(struct ata_port *ap)
2135  {
2136  	struct nv_swncq_port_priv *pp = ap->private_data;
2137  	u32 tag;
2138  
2139  	tag = readb(pp->tag_block) >> 2;
2140  	return (tag & 0x1f);
2141  }
2142  
nv_swncq_dmafis(struct ata_port * ap)2143  static void nv_swncq_dmafis(struct ata_port *ap)
2144  {
2145  	struct ata_queued_cmd *qc;
2146  	unsigned int rw;
2147  	u8 dmactl;
2148  	u32 tag;
2149  	struct nv_swncq_port_priv *pp = ap->private_data;
2150  
2151  	__ata_bmdma_stop(ap);
2152  	tag = nv_swncq_tag(ap);
2153  
2154  	ata_port_dbg(ap, "dma setup tag 0x%x\n", tag);
2155  	qc = ata_qc_from_tag(ap, tag);
2156  
2157  	if (unlikely(!qc))
2158  		return;
2159  
2160  	rw = qc->tf.flags & ATA_TFLAG_WRITE;
2161  
2162  	/* load PRD table addr. */
2163  	iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->hw_tag,
2164  		  ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2165  
2166  	/* specify data direction, triple-check start bit is clear */
2167  	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2168  	dmactl &= ~ATA_DMA_WR;
2169  	if (!rw)
2170  		dmactl |= ATA_DMA_WR;
2171  
2172  	iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2173  }
2174  
nv_swncq_host_interrupt(struct ata_port * ap,u16 fis)2175  static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2176  {
2177  	struct nv_swncq_port_priv *pp = ap->private_data;
2178  	struct ata_queued_cmd *qc;
2179  	struct ata_eh_info *ehi = &ap->link.eh_info;
2180  	u32 serror;
2181  	u8 ata_stat;
2182  
2183  	ata_stat = ap->ops->sff_check_status(ap);
2184  	nv_swncq_irq_clear(ap, fis);
2185  	if (!fis)
2186  		return;
2187  
2188  	if (ata_port_is_frozen(ap))
2189  		return;
2190  
2191  	if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2192  		nv_swncq_hotplug(ap, fis);
2193  		return;
2194  	}
2195  
2196  	if (!pp->qc_active)
2197  		return;
2198  
2199  	if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
2200  		return;
2201  	ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
2202  
2203  	if (ata_stat & ATA_ERR) {
2204  		ata_ehi_clear_desc(ehi);
2205  		ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2206  		ehi->err_mask |= AC_ERR_DEV;
2207  		ehi->serror |= serror;
2208  		ehi->action |= ATA_EH_RESET;
2209  		ata_port_freeze(ap);
2210  		return;
2211  	}
2212  
2213  	if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2214  		/* If the IRQ is backout, driver must issue
2215  		 * the new command again some time later.
2216  		 */
2217  		pp->ncq_flags |= ncq_saw_backout;
2218  	}
2219  
2220  	if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2221  		pp->ncq_flags |= ncq_saw_sdb;
2222  		ata_port_dbg(ap, "SWNCQ: qc_active 0x%X "
2223  			"dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2224  			pp->qc_active, pp->dhfis_bits,
2225  			pp->dmafis_bits, readl(pp->sactive_block));
2226  		if (nv_swncq_sdbfis(ap) < 0)
2227  			goto irq_error;
2228  	}
2229  
2230  	if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2231  		/* The interrupt indicates the new command
2232  		 * was transmitted correctly to the drive.
2233  		 */
2234  		pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2235  		pp->ncq_flags |= ncq_saw_d2h;
2236  		if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2237  			ata_ehi_push_desc(ehi, "illegal fis transaction");
2238  			ehi->err_mask |= AC_ERR_HSM;
2239  			ehi->action |= ATA_EH_RESET;
2240  			goto irq_error;
2241  		}
2242  
2243  		if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2244  		    !(pp->ncq_flags & ncq_saw_dmas)) {
2245  			ata_stat = ap->ops->sff_check_status(ap);
2246  			if (ata_stat & ATA_BUSY)
2247  				goto irq_exit;
2248  
2249  			if (pp->defer_queue.defer_bits) {
2250  				ata_port_dbg(ap, "send next command\n");
2251  				qc = nv_swncq_qc_from_dq(ap);
2252  				nv_swncq_issue_atacmd(ap, qc);
2253  			}
2254  		}
2255  	}
2256  
2257  	if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2258  		/* program the dma controller with appropriate PRD buffers
2259  		 * and start the DMA transfer for requested command.
2260  		 */
2261  		pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2262  		pp->ncq_flags |= ncq_saw_dmas;
2263  		nv_swncq_dmafis(ap);
2264  	}
2265  
2266  irq_exit:
2267  	return;
2268  irq_error:
2269  	ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2270  	ata_port_freeze(ap);
2271  	return;
2272  }
2273  
nv_swncq_interrupt(int irq,void * dev_instance)2274  static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2275  {
2276  	struct ata_host *host = dev_instance;
2277  	unsigned int i;
2278  	unsigned int handled = 0;
2279  	unsigned long flags;
2280  	u32 irq_stat;
2281  
2282  	spin_lock_irqsave(&host->lock, flags);
2283  
2284  	irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2285  
2286  	for (i = 0; i < host->n_ports; i++) {
2287  		struct ata_port *ap = host->ports[i];
2288  
2289  		if (ap->link.sactive) {
2290  			nv_swncq_host_interrupt(ap, (u16)irq_stat);
2291  			handled = 1;
2292  		} else {
2293  			if (irq_stat)	/* reserve Hotplug */
2294  				nv_swncq_irq_clear(ap, 0xfff0);
2295  
2296  			handled += nv_host_intr(ap, (u8)irq_stat);
2297  		}
2298  		irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2299  	}
2300  
2301  	spin_unlock_irqrestore(&host->lock, flags);
2302  
2303  	return IRQ_RETVAL(handled);
2304  }
2305  
nv_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)2306  static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2307  {
2308  	const struct ata_port_info *ppi[] = { NULL, NULL };
2309  	struct nv_pi_priv *ipriv;
2310  	struct ata_host *host;
2311  	struct nv_host_priv *hpriv;
2312  	int rc;
2313  	u32 bar;
2314  	void __iomem *base;
2315  	unsigned long type = ent->driver_data;
2316  
2317          // Make sure this is a SATA controller by counting the number of bars
2318          // (NVIDIA SATA controllers will always have six bars).  Otherwise,
2319          // it's an IDE controller and we ignore it.
2320  	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
2321  		if (pci_resource_start(pdev, bar) == 0)
2322  			return -ENODEV;
2323  
2324  	ata_print_version_once(&pdev->dev, DRV_VERSION);
2325  
2326  	rc = pcim_enable_device(pdev);
2327  	if (rc)
2328  		return rc;
2329  
2330  	/* determine type and allocate host */
2331  	if (type == CK804 && adma_enabled) {
2332  		dev_notice(&pdev->dev, "Using ADMA mode\n");
2333  		type = ADMA;
2334  	} else if (type == MCP5x && swncq_enabled) {
2335  		dev_notice(&pdev->dev, "Using SWNCQ mode\n");
2336  		type = SWNCQ;
2337  	}
2338  
2339  	ppi[0] = &nv_port_info[type];
2340  	ipriv = ppi[0]->private_data;
2341  	rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
2342  	if (rc)
2343  		return rc;
2344  
2345  	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2346  	if (!hpriv)
2347  		return -ENOMEM;
2348  	hpriv->type = type;
2349  	host->private_data = hpriv;
2350  
2351  	/* request and iomap NV_MMIO_BAR */
2352  	rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2353  	if (rc)
2354  		return rc;
2355  
2356  	/* configure SCR access */
2357  	base = host->iomap[NV_MMIO_BAR];
2358  	host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2359  	host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2360  
2361  	/* enable SATA space for CK804 */
2362  	if (type >= CK804) {
2363  		u8 regval;
2364  
2365  		pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2366  		regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2367  		pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2368  	}
2369  
2370  	/* init ADMA */
2371  	if (type == ADMA) {
2372  		rc = nv_adma_host_init(host);
2373  		if (rc)
2374  			return rc;
2375  	} else if (type == SWNCQ)
2376  		nv_swncq_host_init(host);
2377  
2378  	if (msi_enabled) {
2379  		dev_notice(&pdev->dev, "Using MSI\n");
2380  		pci_enable_msi(pdev);
2381  	}
2382  
2383  	pci_set_master(pdev);
2384  	return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
2385  }
2386  
2387  #ifdef CONFIG_PM_SLEEP
nv_pci_device_resume(struct pci_dev * pdev)2388  static int nv_pci_device_resume(struct pci_dev *pdev)
2389  {
2390  	struct ata_host *host = pci_get_drvdata(pdev);
2391  	struct nv_host_priv *hpriv = host->private_data;
2392  	int rc;
2393  
2394  	rc = ata_pci_device_do_resume(pdev);
2395  	if (rc)
2396  		return rc;
2397  
2398  	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2399  		if (hpriv->type >= CK804) {
2400  			u8 regval;
2401  
2402  			pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2403  			regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2404  			pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2405  		}
2406  		if (hpriv->type == ADMA) {
2407  			u32 tmp32;
2408  			struct nv_adma_port_priv *pp;
2409  			/* enable/disable ADMA on the ports appropriately */
2410  			pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2411  
2412  			pp = host->ports[0]->private_data;
2413  			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2414  				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2415  					   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2416  			else
2417  				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT0_EN |
2418  					   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2419  			pp = host->ports[1]->private_data;
2420  			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2421  				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2422  					   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2423  			else
2424  				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT1_EN |
2425  					   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2426  
2427  			pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2428  		}
2429  	}
2430  
2431  	ata_host_resume(host);
2432  
2433  	return 0;
2434  }
2435  #endif
2436  
nv_ck804_host_stop(struct ata_host * host)2437  static void nv_ck804_host_stop(struct ata_host *host)
2438  {
2439  	struct pci_dev *pdev = to_pci_dev(host->dev);
2440  	u8 regval;
2441  
2442  	/* disable SATA space for CK804 */
2443  	pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2444  	regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2445  	pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2446  }
2447  
nv_adma_host_stop(struct ata_host * host)2448  static void nv_adma_host_stop(struct ata_host *host)
2449  {
2450  	struct pci_dev *pdev = to_pci_dev(host->dev);
2451  	u32 tmp32;
2452  
2453  	/* disable ADMA on the ports */
2454  	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2455  	tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2456  		   NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2457  		   NV_MCP_SATA_CFG_20_PORT1_EN |
2458  		   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2459  
2460  	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2461  
2462  	nv_ck804_host_stop(host);
2463  }
2464  
2465  module_pci_driver(nv_pci_driver);
2466  
2467  module_param_named(adma, adma_enabled, bool, 0444);
2468  MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
2469  module_param_named(swncq, swncq_enabled, bool, 0444);
2470  MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
2471  module_param_named(msi, msi_enabled, bool, 0444);
2472  MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)");
2473