xref: /openbmc/linux/drivers/ata/ahci.c (revision 4bdf0bb7)
1 /*
2  *  ahci.c - AHCI SATA support
3  *
4  *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
5  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6  *		    on emails.
7  *
8  *  Copyright 2004-2005 Red Hat, Inc.
9  *
10  *
11  *  This program is free software; you can redistribute it and/or modify
12  *  it under the terms of the GNU General Public License as published by
13  *  the Free Software Foundation; either version 2, or (at your option)
14  *  any later version.
15  *
16  *  This program is distributed in the hope that it will be useful,
17  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
18  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  *  GNU General Public License for more details.
20  *
21  *  You should have received a copy of the GNU General Public License
22  *  along with this program; see the file COPYING.  If not, write to
23  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24  *
25  *
26  * libata documentation is available via 'make {ps|pdf}docs',
27  * as Documentation/DocBook/libata.*
28  *
29  * AHCI hardware documentation:
30  * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
31  * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
32  *
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/blkdev.h>
40 #include <linux/delay.h>
41 #include <linux/interrupt.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/device.h>
44 #include <linux/dmi.h>
45 #include <scsi/scsi_host.h>
46 #include <scsi/scsi_cmnd.h>
47 #include <linux/libata.h>
48 
49 #define DRV_NAME	"ahci"
50 #define DRV_VERSION	"3.0"
51 
52 /* Enclosure Management Control */
53 #define EM_CTRL_MSG_TYPE              0x000f0000
54 
55 /* Enclosure Management LED Message Type */
56 #define EM_MSG_LED_HBA_PORT           0x0000000f
57 #define EM_MSG_LED_PMP_SLOT           0x0000ff00
58 #define EM_MSG_LED_VALUE              0xffff0000
59 #define EM_MSG_LED_VALUE_ACTIVITY     0x00070000
60 #define EM_MSG_LED_VALUE_OFF          0xfff80000
61 #define EM_MSG_LED_VALUE_ON           0x00010000
62 
63 static int ahci_skip_host_reset;
64 static int ahci_ignore_sss;
65 
66 module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
67 MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
68 
69 module_param_named(ignore_sss, ahci_ignore_sss, int, 0444);
70 MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)");
71 
72 static int ahci_enable_alpm(struct ata_port *ap,
73 		enum link_pm policy);
74 static void ahci_disable_alpm(struct ata_port *ap);
75 static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
76 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
77 			      size_t size);
78 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
79 					ssize_t size);
80 
81 enum {
82 	AHCI_PCI_BAR		= 5,
83 	AHCI_MAX_PORTS		= 32,
84 	AHCI_MAX_SG		= 168, /* hardware max is 64K */
85 	AHCI_DMA_BOUNDARY	= 0xffffffff,
86 	AHCI_MAX_CMDS		= 32,
87 	AHCI_CMD_SZ		= 32,
88 	AHCI_CMD_SLOT_SZ	= AHCI_MAX_CMDS * AHCI_CMD_SZ,
89 	AHCI_RX_FIS_SZ		= 256,
90 	AHCI_CMD_TBL_CDB	= 0x40,
91 	AHCI_CMD_TBL_HDR_SZ	= 0x80,
92 	AHCI_CMD_TBL_SZ		= AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
93 	AHCI_CMD_TBL_AR_SZ	= AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
94 	AHCI_PORT_PRIV_DMA_SZ	= AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
95 				  AHCI_RX_FIS_SZ,
96 	AHCI_IRQ_ON_SG		= (1 << 31),
97 	AHCI_CMD_ATAPI		= (1 << 5),
98 	AHCI_CMD_WRITE		= (1 << 6),
99 	AHCI_CMD_PREFETCH	= (1 << 7),
100 	AHCI_CMD_RESET		= (1 << 8),
101 	AHCI_CMD_CLR_BUSY	= (1 << 10),
102 
103 	RX_FIS_D2H_REG		= 0x40,	/* offset of D2H Register FIS data */
104 	RX_FIS_SDB		= 0x58, /* offset of SDB FIS data */
105 	RX_FIS_UNK		= 0x60, /* offset of Unknown FIS data */
106 
107 	board_ahci		= 0,
108 	board_ahci_vt8251	= 1,
109 	board_ahci_ign_iferr	= 2,
110 	board_ahci_sb600	= 3,
111 	board_ahci_mv		= 4,
112 	board_ahci_sb700	= 5, /* for SB700 and SB800 */
113 	board_ahci_mcp65	= 6,
114 	board_ahci_nopmp	= 7,
115 	board_ahci_yesncq	= 8,
116 
117 	/* global controller registers */
118 	HOST_CAP		= 0x00, /* host capabilities */
119 	HOST_CTL		= 0x04, /* global host control */
120 	HOST_IRQ_STAT		= 0x08, /* interrupt status */
121 	HOST_PORTS_IMPL		= 0x0c, /* bitmap of implemented ports */
122 	HOST_VERSION		= 0x10, /* AHCI spec. version compliancy */
123 	HOST_EM_LOC		= 0x1c, /* Enclosure Management location */
124 	HOST_EM_CTL		= 0x20, /* Enclosure Management Control */
125 	HOST_CAP2		= 0x24, /* host capabilities, extended */
126 
127 	/* HOST_CTL bits */
128 	HOST_RESET		= (1 << 0),  /* reset controller; self-clear */
129 	HOST_IRQ_EN		= (1 << 1),  /* global IRQ enable */
130 	HOST_AHCI_EN		= (1 << 31), /* AHCI enabled */
131 
132 	/* HOST_CAP bits */
133 	HOST_CAP_SXS		= (1 << 5),  /* Supports External SATA */
134 	HOST_CAP_EMS		= (1 << 6),  /* Enclosure Management support */
135 	HOST_CAP_CCC		= (1 << 7),  /* Command Completion Coalescing */
136 	HOST_CAP_PART		= (1 << 13), /* Partial state capable */
137 	HOST_CAP_SSC		= (1 << 14), /* Slumber state capable */
138 	HOST_CAP_PIO_MULTI	= (1 << 15), /* PIO multiple DRQ support */
139 	HOST_CAP_FBS		= (1 << 16), /* FIS-based switching support */
140 	HOST_CAP_PMP		= (1 << 17), /* Port Multiplier support */
141 	HOST_CAP_ONLY		= (1 << 18), /* Supports AHCI mode only */
142 	HOST_CAP_CLO		= (1 << 24), /* Command List Override support */
143 	HOST_CAP_LED		= (1 << 25), /* Supports activity LED */
144 	HOST_CAP_ALPM		= (1 << 26), /* Aggressive Link PM support */
145 	HOST_CAP_SSS		= (1 << 27), /* Staggered Spin-up */
146 	HOST_CAP_MPS		= (1 << 28), /* Mechanical presence switch */
147 	HOST_CAP_SNTF		= (1 << 29), /* SNotification register */
148 	HOST_CAP_NCQ		= (1 << 30), /* Native Command Queueing */
149 	HOST_CAP_64		= (1 << 31), /* PCI DAC (64-bit DMA) support */
150 
151 	/* HOST_CAP2 bits */
152 	HOST_CAP2_BOH		= (1 << 0),  /* BIOS/OS handoff supported */
153 	HOST_CAP2_NVMHCI	= (1 << 1),  /* NVMHCI supported */
154 	HOST_CAP2_APST		= (1 << 2),  /* Automatic partial to slumber */
155 
156 	/* registers for each SATA port */
157 	PORT_LST_ADDR		= 0x00, /* command list DMA addr */
158 	PORT_LST_ADDR_HI	= 0x04, /* command list DMA addr hi */
159 	PORT_FIS_ADDR		= 0x08, /* FIS rx buf addr */
160 	PORT_FIS_ADDR_HI	= 0x0c, /* FIS rx buf addr hi */
161 	PORT_IRQ_STAT		= 0x10, /* interrupt status */
162 	PORT_IRQ_MASK		= 0x14, /* interrupt enable/disable mask */
163 	PORT_CMD		= 0x18, /* port command */
164 	PORT_TFDATA		= 0x20,	/* taskfile data */
165 	PORT_SIG		= 0x24,	/* device TF signature */
166 	PORT_CMD_ISSUE		= 0x38, /* command issue */
167 	PORT_SCR_STAT		= 0x28, /* SATA phy register: SStatus */
168 	PORT_SCR_CTL		= 0x2c, /* SATA phy register: SControl */
169 	PORT_SCR_ERR		= 0x30, /* SATA phy register: SError */
170 	PORT_SCR_ACT		= 0x34, /* SATA phy register: SActive */
171 	PORT_SCR_NTF		= 0x3c, /* SATA phy register: SNotification */
172 
173 	/* PORT_IRQ_{STAT,MASK} bits */
174 	PORT_IRQ_COLD_PRES	= (1 << 31), /* cold presence detect */
175 	PORT_IRQ_TF_ERR		= (1 << 30), /* task file error */
176 	PORT_IRQ_HBUS_ERR	= (1 << 29), /* host bus fatal error */
177 	PORT_IRQ_HBUS_DATA_ERR	= (1 << 28), /* host bus data error */
178 	PORT_IRQ_IF_ERR		= (1 << 27), /* interface fatal error */
179 	PORT_IRQ_IF_NONFATAL	= (1 << 26), /* interface non-fatal error */
180 	PORT_IRQ_OVERFLOW	= (1 << 24), /* xfer exhausted available S/G */
181 	PORT_IRQ_BAD_PMP	= (1 << 23), /* incorrect port multiplier */
182 
183 	PORT_IRQ_PHYRDY		= (1 << 22), /* PhyRdy changed */
184 	PORT_IRQ_DEV_ILCK	= (1 << 7), /* device interlock */
185 	PORT_IRQ_CONNECT	= (1 << 6), /* port connect change status */
186 	PORT_IRQ_SG_DONE	= (1 << 5), /* descriptor processed */
187 	PORT_IRQ_UNK_FIS	= (1 << 4), /* unknown FIS rx'd */
188 	PORT_IRQ_SDB_FIS	= (1 << 3), /* Set Device Bits FIS rx'd */
189 	PORT_IRQ_DMAS_FIS	= (1 << 2), /* DMA Setup FIS rx'd */
190 	PORT_IRQ_PIOS_FIS	= (1 << 1), /* PIO Setup FIS rx'd */
191 	PORT_IRQ_D2H_REG_FIS	= (1 << 0), /* D2H Register FIS rx'd */
192 
193 	PORT_IRQ_FREEZE		= PORT_IRQ_HBUS_ERR |
194 				  PORT_IRQ_IF_ERR |
195 				  PORT_IRQ_CONNECT |
196 				  PORT_IRQ_PHYRDY |
197 				  PORT_IRQ_UNK_FIS |
198 				  PORT_IRQ_BAD_PMP,
199 	PORT_IRQ_ERROR		= PORT_IRQ_FREEZE |
200 				  PORT_IRQ_TF_ERR |
201 				  PORT_IRQ_HBUS_DATA_ERR,
202 	DEF_PORT_IRQ		= PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
203 				  PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
204 				  PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
205 
206 	/* PORT_CMD bits */
207 	PORT_CMD_ASP		= (1 << 27), /* Aggressive Slumber/Partial */
208 	PORT_CMD_ALPE		= (1 << 26), /* Aggressive Link PM enable */
209 	PORT_CMD_ATAPI		= (1 << 24), /* Device is ATAPI */
210 	PORT_CMD_PMP		= (1 << 17), /* PMP attached */
211 	PORT_CMD_LIST_ON	= (1 << 15), /* cmd list DMA engine running */
212 	PORT_CMD_FIS_ON		= (1 << 14), /* FIS DMA engine running */
213 	PORT_CMD_FIS_RX		= (1 << 4), /* Enable FIS receive DMA engine */
214 	PORT_CMD_CLO		= (1 << 3), /* Command list override */
215 	PORT_CMD_POWER_ON	= (1 << 2), /* Power up device */
216 	PORT_CMD_SPIN_UP	= (1 << 1), /* Spin up device */
217 	PORT_CMD_START		= (1 << 0), /* Enable port DMA engine */
218 
219 	PORT_CMD_ICC_MASK	= (0xf << 28), /* i/f ICC state mask */
220 	PORT_CMD_ICC_ACTIVE	= (0x1 << 28), /* Put i/f in active state */
221 	PORT_CMD_ICC_PARTIAL	= (0x2 << 28), /* Put i/f in partial state */
222 	PORT_CMD_ICC_SLUMBER	= (0x6 << 28), /* Put i/f in slumber state */
223 
224 	/* hpriv->flags bits */
225 	AHCI_HFLAG_NO_NCQ		= (1 << 0),
226 	AHCI_HFLAG_IGN_IRQ_IF_ERR	= (1 << 1), /* ignore IRQ_IF_ERR */
227 	AHCI_HFLAG_IGN_SERR_INTERNAL	= (1 << 2), /* ignore SERR_INTERNAL */
228 	AHCI_HFLAG_32BIT_ONLY		= (1 << 3), /* force 32bit */
229 	AHCI_HFLAG_MV_PATA		= (1 << 4), /* PATA port */
230 	AHCI_HFLAG_NO_MSI		= (1 << 5), /* no PCI MSI */
231 	AHCI_HFLAG_NO_PMP		= (1 << 6), /* no PMP */
232 	AHCI_HFLAG_NO_HOTPLUG		= (1 << 7), /* ignore PxSERR.DIAG.N */
233 	AHCI_HFLAG_SECT255		= (1 << 8), /* max 255 sectors */
234 	AHCI_HFLAG_YES_NCQ		= (1 << 9), /* force NCQ cap on */
235 	AHCI_HFLAG_NO_SUSPEND		= (1 << 10), /* don't suspend */
236 	AHCI_HFLAG_SRST_TOUT_IS_OFFLINE	= (1 << 11), /* treat SRST timeout as
237 							link offline */
238 
239 	/* ap->flags bits */
240 
241 	AHCI_FLAG_COMMON		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
242 					  ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
243 					  ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
244 					  ATA_FLAG_IPM,
245 
246 	ICH_MAP				= 0x90, /* ICH MAP register */
247 
248 	/* em constants */
249 	EM_MAX_SLOTS			= 8,
250 	EM_MAX_RETRY			= 5,
251 
252 	/* em_ctl bits */
253 	EM_CTL_RST			= (1 << 9), /* Reset */
254 	EM_CTL_TM			= (1 << 8), /* Transmit Message */
255 	EM_CTL_ALHD			= (1 << 26), /* Activity LED */
256 };
257 
258 struct ahci_cmd_hdr {
259 	__le32			opts;
260 	__le32			status;
261 	__le32			tbl_addr;
262 	__le32			tbl_addr_hi;
263 	__le32			reserved[4];
264 };
265 
266 struct ahci_sg {
267 	__le32			addr;
268 	__le32			addr_hi;
269 	__le32			reserved;
270 	__le32			flags_size;
271 };
272 
273 struct ahci_em_priv {
274 	enum sw_activity blink_policy;
275 	struct timer_list timer;
276 	unsigned long saved_activity;
277 	unsigned long activity;
278 	unsigned long led_state;
279 };
280 
281 struct ahci_host_priv {
282 	unsigned int		flags;		/* AHCI_HFLAG_* */
283 	u32			cap;		/* cap to use */
284 	u32			cap2;		/* cap2 to use */
285 	u32			port_map;	/* port map to use */
286 	u32			saved_cap;	/* saved initial cap */
287 	u32			saved_cap2;	/* saved initial cap2 */
288 	u32			saved_port_map;	/* saved initial port_map */
289 	u32 			em_loc; /* enclosure management location */
290 };
291 
292 struct ahci_port_priv {
293 	struct ata_link		*active_link;
294 	struct ahci_cmd_hdr	*cmd_slot;
295 	dma_addr_t		cmd_slot_dma;
296 	void			*cmd_tbl;
297 	dma_addr_t		cmd_tbl_dma;
298 	void			*rx_fis;
299 	dma_addr_t		rx_fis_dma;
300 	/* for NCQ spurious interrupt analysis */
301 	unsigned int		ncq_saw_d2h:1;
302 	unsigned int		ncq_saw_dmas:1;
303 	unsigned int		ncq_saw_sdb:1;
304 	u32 			intr_mask;	/* interrupts to enable */
305 	/* enclosure management info per PM slot */
306 	struct ahci_em_priv	em_priv[EM_MAX_SLOTS];
307 };
308 
309 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
310 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
311 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
312 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
313 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
314 static int ahci_port_start(struct ata_port *ap);
315 static void ahci_port_stop(struct ata_port *ap);
316 static void ahci_qc_prep(struct ata_queued_cmd *qc);
317 static void ahci_freeze(struct ata_port *ap);
318 static void ahci_thaw(struct ata_port *ap);
319 static void ahci_pmp_attach(struct ata_port *ap);
320 static void ahci_pmp_detach(struct ata_port *ap);
321 static int ahci_softreset(struct ata_link *link, unsigned int *class,
322 			  unsigned long deadline);
323 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
324 			  unsigned long deadline);
325 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
326 			  unsigned long deadline);
327 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
328 				 unsigned long deadline);
329 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
330 				unsigned long deadline);
331 static void ahci_postreset(struct ata_link *link, unsigned int *class);
332 static void ahci_error_handler(struct ata_port *ap);
333 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
334 static int ahci_port_resume(struct ata_port *ap);
335 static void ahci_dev_config(struct ata_device *dev);
336 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
337 			       u32 opts);
338 #ifdef CONFIG_PM
339 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
340 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
341 static int ahci_pci_device_resume(struct pci_dev *pdev);
342 #endif
343 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
344 static ssize_t ahci_activity_store(struct ata_device *dev,
345 				   enum sw_activity val);
346 static void ahci_init_sw_activity(struct ata_link *link);
347 
348 static ssize_t ahci_show_host_caps(struct device *dev,
349 				   struct device_attribute *attr, char *buf);
350 static ssize_t ahci_show_host_cap2(struct device *dev,
351 				   struct device_attribute *attr, char *buf);
352 static ssize_t ahci_show_host_version(struct device *dev,
353 				      struct device_attribute *attr, char *buf);
354 static ssize_t ahci_show_port_cmd(struct device *dev,
355 				  struct device_attribute *attr, char *buf);
356 
357 DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
358 DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
359 DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL);
360 DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
361 
362 static struct device_attribute *ahci_shost_attrs[] = {
363 	&dev_attr_link_power_management_policy,
364 	&dev_attr_em_message_type,
365 	&dev_attr_em_message,
366 	&dev_attr_ahci_host_caps,
367 	&dev_attr_ahci_host_cap2,
368 	&dev_attr_ahci_host_version,
369 	&dev_attr_ahci_port_cmd,
370 	NULL
371 };
372 
373 static struct device_attribute *ahci_sdev_attrs[] = {
374 	&dev_attr_sw_activity,
375 	&dev_attr_unload_heads,
376 	NULL
377 };
378 
379 static struct scsi_host_template ahci_sht = {
380 	ATA_NCQ_SHT(DRV_NAME),
381 	.can_queue		= AHCI_MAX_CMDS - 1,
382 	.sg_tablesize		= AHCI_MAX_SG,
383 	.dma_boundary		= AHCI_DMA_BOUNDARY,
384 	.shost_attrs		= ahci_shost_attrs,
385 	.sdev_attrs		= ahci_sdev_attrs,
386 };
387 
388 static struct ata_port_operations ahci_ops = {
389 	.inherits		= &sata_pmp_port_ops,
390 
391 	.qc_defer		= sata_pmp_qc_defer_cmd_switch,
392 	.qc_prep		= ahci_qc_prep,
393 	.qc_issue		= ahci_qc_issue,
394 	.qc_fill_rtf		= ahci_qc_fill_rtf,
395 
396 	.freeze			= ahci_freeze,
397 	.thaw			= ahci_thaw,
398 	.softreset		= ahci_softreset,
399 	.hardreset		= ahci_hardreset,
400 	.postreset		= ahci_postreset,
401 	.pmp_softreset		= ahci_softreset,
402 	.error_handler		= ahci_error_handler,
403 	.post_internal_cmd	= ahci_post_internal_cmd,
404 	.dev_config		= ahci_dev_config,
405 
406 	.scr_read		= ahci_scr_read,
407 	.scr_write		= ahci_scr_write,
408 	.pmp_attach		= ahci_pmp_attach,
409 	.pmp_detach		= ahci_pmp_detach,
410 
411 	.enable_pm		= ahci_enable_alpm,
412 	.disable_pm		= ahci_disable_alpm,
413 	.em_show		= ahci_led_show,
414 	.em_store		= ahci_led_store,
415 	.sw_activity_show	= ahci_activity_show,
416 	.sw_activity_store	= ahci_activity_store,
417 #ifdef CONFIG_PM
418 	.port_suspend		= ahci_port_suspend,
419 	.port_resume		= ahci_port_resume,
420 #endif
421 	.port_start		= ahci_port_start,
422 	.port_stop		= ahci_port_stop,
423 };
424 
425 static struct ata_port_operations ahci_vt8251_ops = {
426 	.inherits		= &ahci_ops,
427 	.hardreset		= ahci_vt8251_hardreset,
428 };
429 
430 static struct ata_port_operations ahci_p5wdh_ops = {
431 	.inherits		= &ahci_ops,
432 	.hardreset		= ahci_p5wdh_hardreset,
433 };
434 
435 static struct ata_port_operations ahci_sb600_ops = {
436 	.inherits		= &ahci_ops,
437 	.softreset		= ahci_sb600_softreset,
438 	.pmp_softreset		= ahci_sb600_softreset,
439 };
440 
441 #define AHCI_HFLAGS(flags)	.private_data	= (void *)(flags)
442 
443 static const struct ata_port_info ahci_port_info[] = {
444 	[board_ahci] =
445 	{
446 		.flags		= AHCI_FLAG_COMMON,
447 		.pio_mask	= ATA_PIO4,
448 		.udma_mask	= ATA_UDMA6,
449 		.port_ops	= &ahci_ops,
450 	},
451 	[board_ahci_vt8251] =
452 	{
453 		AHCI_HFLAGS	(AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
454 		.flags		= AHCI_FLAG_COMMON,
455 		.pio_mask	= ATA_PIO4,
456 		.udma_mask	= ATA_UDMA6,
457 		.port_ops	= &ahci_vt8251_ops,
458 	},
459 	[board_ahci_ign_iferr] =
460 	{
461 		AHCI_HFLAGS	(AHCI_HFLAG_IGN_IRQ_IF_ERR),
462 		.flags		= AHCI_FLAG_COMMON,
463 		.pio_mask	= ATA_PIO4,
464 		.udma_mask	= ATA_UDMA6,
465 		.port_ops	= &ahci_ops,
466 	},
467 	[board_ahci_sb600] =
468 	{
469 		AHCI_HFLAGS	(AHCI_HFLAG_IGN_SERR_INTERNAL |
470 				 AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255 |
471 				 AHCI_HFLAG_32BIT_ONLY),
472 		.flags		= AHCI_FLAG_COMMON,
473 		.pio_mask	= ATA_PIO4,
474 		.udma_mask	= ATA_UDMA6,
475 		.port_ops	= &ahci_sb600_ops,
476 	},
477 	[board_ahci_mv] =
478 	{
479 		AHCI_HFLAGS	(AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
480 				 AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
481 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
482 				  ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
483 		.pio_mask	= ATA_PIO4,
484 		.udma_mask	= ATA_UDMA6,
485 		.port_ops	= &ahci_ops,
486 	},
487 	[board_ahci_sb700] =	/* for SB700 and SB800 */
488 	{
489 		AHCI_HFLAGS	(AHCI_HFLAG_IGN_SERR_INTERNAL),
490 		.flags		= AHCI_FLAG_COMMON,
491 		.pio_mask	= ATA_PIO4,
492 		.udma_mask	= ATA_UDMA6,
493 		.port_ops	= &ahci_sb600_ops,
494 	},
495 	[board_ahci_mcp65] =
496 	{
497 		AHCI_HFLAGS	(AHCI_HFLAG_YES_NCQ),
498 		.flags		= AHCI_FLAG_COMMON,
499 		.pio_mask	= ATA_PIO4,
500 		.udma_mask	= ATA_UDMA6,
501 		.port_ops	= &ahci_ops,
502 	},
503 	[board_ahci_nopmp] =
504 	{
505 		AHCI_HFLAGS	(AHCI_HFLAG_NO_PMP),
506 		.flags		= AHCI_FLAG_COMMON,
507 		.pio_mask	= ATA_PIO4,
508 		.udma_mask	= ATA_UDMA6,
509 		.port_ops	= &ahci_ops,
510 	},
511 	/* board_ahci_yesncq */
512 	{
513 		AHCI_HFLAGS	(AHCI_HFLAG_YES_NCQ),
514 		.flags		= AHCI_FLAG_COMMON,
515 		.pio_mask	= ATA_PIO4,
516 		.udma_mask	= ATA_UDMA6,
517 		.port_ops	= &ahci_ops,
518 	},
519 };
520 
521 static const struct pci_device_id ahci_pci_tbl[] = {
522 	/* Intel */
523 	{ PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */
524 	{ PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */
525 	{ PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */
526 	{ PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */
527 	{ PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */
528 	{ PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */
529 	{ PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */
530 	{ PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
531 	{ PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
532 	{ PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
533 	{ PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
534 	{ PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* ICH8 */
535 	{ PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
536 	{ PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
537 	{ PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
538 	{ PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */
539 	{ PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */
540 	{ PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
541 	{ PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
542 	{ PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
543 	{ PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */
544 	{ PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */
545 	{ PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */
546 	{ PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */
547 	{ PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */
548 	{ PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
549 	{ PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */
550 	{ PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
551 	{ PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
552 	{ PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
553 	{ PCI_VDEVICE(INTEL, 0x3a22), board_ahci }, /* ICH10 */
554 	{ PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */
555 	{ PCI_VDEVICE(INTEL, 0x3b22), board_ahci }, /* PCH AHCI */
556 	{ PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */
557 	{ PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
558 	{ PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
559 	{ PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH AHCI */
560 	{ PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
561 	{ PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
562 	{ PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
563 
564 	/* JMicron 360/1/3/5/6, match class to avoid IDE function */
565 	{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
566 	  PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr },
567 
568 	/* ATI */
569 	{ PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
570 	{ PCI_VDEVICE(ATI, 0x4390), board_ahci_sb700 }, /* ATI SB700/800 */
571 	{ PCI_VDEVICE(ATI, 0x4391), board_ahci_sb700 }, /* ATI SB700/800 */
572 	{ PCI_VDEVICE(ATI, 0x4392), board_ahci_sb700 }, /* ATI SB700/800 */
573 	{ PCI_VDEVICE(ATI, 0x4393), board_ahci_sb700 }, /* ATI SB700/800 */
574 	{ PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */
575 	{ PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
576 
577 	/* AMD */
578 	{ PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD SB900 */
579 	/* AMD is using RAID class only for ahci controllers */
580 	{ PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
581 	  PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
582 
583 	/* VIA */
584 	{ PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
585 	{ PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */
586 
587 	/* NVIDIA */
588 	{ PCI_VDEVICE(NVIDIA, 0x044c), board_ahci_mcp65 },	/* MCP65 */
589 	{ PCI_VDEVICE(NVIDIA, 0x044d), board_ahci_mcp65 },	/* MCP65 */
590 	{ PCI_VDEVICE(NVIDIA, 0x044e), board_ahci_mcp65 },	/* MCP65 */
591 	{ PCI_VDEVICE(NVIDIA, 0x044f), board_ahci_mcp65 },	/* MCP65 */
592 	{ PCI_VDEVICE(NVIDIA, 0x045c), board_ahci_mcp65 },	/* MCP65 */
593 	{ PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 },	/* MCP65 */
594 	{ PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 },	/* MCP65 */
595 	{ PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 },	/* MCP65 */
596 	{ PCI_VDEVICE(NVIDIA, 0x0550), board_ahci_yesncq },	/* MCP67 */
597 	{ PCI_VDEVICE(NVIDIA, 0x0551), board_ahci_yesncq },	/* MCP67 */
598 	{ PCI_VDEVICE(NVIDIA, 0x0552), board_ahci_yesncq },	/* MCP67 */
599 	{ PCI_VDEVICE(NVIDIA, 0x0553), board_ahci_yesncq },	/* MCP67 */
600 	{ PCI_VDEVICE(NVIDIA, 0x0554), board_ahci_yesncq },	/* MCP67 */
601 	{ PCI_VDEVICE(NVIDIA, 0x0555), board_ahci_yesncq },	/* MCP67 */
602 	{ PCI_VDEVICE(NVIDIA, 0x0556), board_ahci_yesncq },	/* MCP67 */
603 	{ PCI_VDEVICE(NVIDIA, 0x0557), board_ahci_yesncq },	/* MCP67 */
604 	{ PCI_VDEVICE(NVIDIA, 0x0558), board_ahci_yesncq },	/* MCP67 */
605 	{ PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_yesncq },	/* MCP67 */
606 	{ PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_yesncq },	/* MCP67 */
607 	{ PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_yesncq },	/* MCP67 */
608 	{ PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_yesncq },	/* MCP73 */
609 	{ PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_yesncq },	/* MCP73 */
610 	{ PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_yesncq },	/* MCP73 */
611 	{ PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci_yesncq },	/* MCP73 */
612 	{ PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci_yesncq },	/* MCP73 */
613 	{ PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci_yesncq },	/* MCP73 */
614 	{ PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci_yesncq },	/* MCP73 */
615 	{ PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci_yesncq },	/* MCP73 */
616 	{ PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci_yesncq },	/* MCP73 */
617 	{ PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci_yesncq },	/* MCP73 */
618 	{ PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci_yesncq },	/* MCP73 */
619 	{ PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci_yesncq },	/* MCP73 */
620 	{ PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci },		/* MCP77 */
621 	{ PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci },		/* MCP77 */
622 	{ PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci },		/* MCP77 */
623 	{ PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci },		/* MCP77 */
624 	{ PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci },		/* MCP77 */
625 	{ PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci },		/* MCP77 */
626 	{ PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci },		/* MCP77 */
627 	{ PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci },		/* MCP77 */
628 	{ PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci },		/* MCP77 */
629 	{ PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci },		/* MCP77 */
630 	{ PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci },		/* MCP77 */
631 	{ PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci },		/* MCP77 */
632 	{ PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci },		/* MCP79 */
633 	{ PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci },		/* MCP79 */
634 	{ PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci },		/* MCP79 */
635 	{ PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci },		/* MCP79 */
636 	{ PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci },		/* MCP79 */
637 	{ PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci },		/* MCP79 */
638 	{ PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci },		/* MCP79 */
639 	{ PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci },		/* MCP79 */
640 	{ PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci },		/* MCP79 */
641 	{ PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci },		/* MCP79 */
642 	{ PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci },		/* MCP79 */
643 	{ PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci },		/* MCP79 */
644 	{ PCI_VDEVICE(NVIDIA, 0x0d84), board_ahci },		/* MCP89 */
645 	{ PCI_VDEVICE(NVIDIA, 0x0d85), board_ahci },		/* MCP89 */
646 	{ PCI_VDEVICE(NVIDIA, 0x0d86), board_ahci },		/* MCP89 */
647 	{ PCI_VDEVICE(NVIDIA, 0x0d87), board_ahci },		/* MCP89 */
648 	{ PCI_VDEVICE(NVIDIA, 0x0d88), board_ahci },		/* MCP89 */
649 	{ PCI_VDEVICE(NVIDIA, 0x0d89), board_ahci },		/* MCP89 */
650 	{ PCI_VDEVICE(NVIDIA, 0x0d8a), board_ahci },		/* MCP89 */
651 	{ PCI_VDEVICE(NVIDIA, 0x0d8b), board_ahci },		/* MCP89 */
652 	{ PCI_VDEVICE(NVIDIA, 0x0d8c), board_ahci },		/* MCP89 */
653 	{ PCI_VDEVICE(NVIDIA, 0x0d8d), board_ahci },		/* MCP89 */
654 	{ PCI_VDEVICE(NVIDIA, 0x0d8e), board_ahci },		/* MCP89 */
655 	{ PCI_VDEVICE(NVIDIA, 0x0d8f), board_ahci },		/* MCP89 */
656 
657 	/* SiS */
658 	{ PCI_VDEVICE(SI, 0x1184), board_ahci },		/* SiS 966 */
659 	{ PCI_VDEVICE(SI, 0x1185), board_ahci },		/* SiS 968 */
660 	{ PCI_VDEVICE(SI, 0x0186), board_ahci },		/* SiS 968 */
661 
662 	/* Marvell */
663 	{ PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv },	/* 6145 */
664 	{ PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv },	/* 6121 */
665 
666 	/* Promise */
667 	{ PCI_VDEVICE(PROMISE, 0x3f20), board_ahci },	/* PDC42819 */
668 
669 	/* Generic, PCI class code for AHCI */
670 	{ PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
671 	  PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
672 
673 	{ }	/* terminate list */
674 };
675 
676 
677 static struct pci_driver ahci_pci_driver = {
678 	.name			= DRV_NAME,
679 	.id_table		= ahci_pci_tbl,
680 	.probe			= ahci_init_one,
681 	.remove			= ata_pci_remove_one,
682 #ifdef CONFIG_PM
683 	.suspend		= ahci_pci_device_suspend,
684 	.resume			= ahci_pci_device_resume,
685 #endif
686 };
687 
688 static int ahci_em_messages = 1;
689 module_param(ahci_em_messages, int, 0444);
690 /* add other LED protocol types when they become supported */
691 MODULE_PARM_DESC(ahci_em_messages,
692 	"Set AHCI Enclosure Management Message type (0 = disabled, 1 = LED");
693 
694 #if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE)
695 static int marvell_enable;
696 #else
697 static int marvell_enable = 1;
698 #endif
699 module_param(marvell_enable, int, 0644);
700 MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
701 
702 
703 static inline int ahci_nr_ports(u32 cap)
704 {
705 	return (cap & 0x1f) + 1;
706 }
707 
708 static inline void __iomem *__ahci_port_base(struct ata_host *host,
709 					     unsigned int port_no)
710 {
711 	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
712 
713 	return mmio + 0x100 + (port_no * 0x80);
714 }
715 
716 static inline void __iomem *ahci_port_base(struct ata_port *ap)
717 {
718 	return __ahci_port_base(ap->host, ap->port_no);
719 }
720 
721 static void ahci_enable_ahci(void __iomem *mmio)
722 {
723 	int i;
724 	u32 tmp;
725 
726 	/* turn on AHCI_EN */
727 	tmp = readl(mmio + HOST_CTL);
728 	if (tmp & HOST_AHCI_EN)
729 		return;
730 
731 	/* Some controllers need AHCI_EN to be written multiple times.
732 	 * Try a few times before giving up.
733 	 */
734 	for (i = 0; i < 5; i++) {
735 		tmp |= HOST_AHCI_EN;
736 		writel(tmp, mmio + HOST_CTL);
737 		tmp = readl(mmio + HOST_CTL);	/* flush && sanity check */
738 		if (tmp & HOST_AHCI_EN)
739 			return;
740 		msleep(10);
741 	}
742 
743 	WARN_ON(1);
744 }
745 
746 static ssize_t ahci_show_host_caps(struct device *dev,
747 				   struct device_attribute *attr, char *buf)
748 {
749 	struct Scsi_Host *shost = class_to_shost(dev);
750 	struct ata_port *ap = ata_shost_to_port(shost);
751 	struct ahci_host_priv *hpriv = ap->host->private_data;
752 
753 	return sprintf(buf, "%x\n", hpriv->cap);
754 }
755 
756 static ssize_t ahci_show_host_cap2(struct device *dev,
757 				   struct device_attribute *attr, char *buf)
758 {
759 	struct Scsi_Host *shost = class_to_shost(dev);
760 	struct ata_port *ap = ata_shost_to_port(shost);
761 	struct ahci_host_priv *hpriv = ap->host->private_data;
762 
763 	return sprintf(buf, "%x\n", hpriv->cap2);
764 }
765 
766 static ssize_t ahci_show_host_version(struct device *dev,
767 				   struct device_attribute *attr, char *buf)
768 {
769 	struct Scsi_Host *shost = class_to_shost(dev);
770 	struct ata_port *ap = ata_shost_to_port(shost);
771 	void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
772 
773 	return sprintf(buf, "%x\n", readl(mmio + HOST_VERSION));
774 }
775 
776 static ssize_t ahci_show_port_cmd(struct device *dev,
777 				  struct device_attribute *attr, char *buf)
778 {
779 	struct Scsi_Host *shost = class_to_shost(dev);
780 	struct ata_port *ap = ata_shost_to_port(shost);
781 	void __iomem *port_mmio = ahci_port_base(ap);
782 
783 	return sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD));
784 }
785 
786 /**
787  *	ahci_save_initial_config - Save and fixup initial config values
788  *	@pdev: target PCI device
789  *	@hpriv: host private area to store config values
790  *
791  *	Some registers containing configuration info might be setup by
792  *	BIOS and might be cleared on reset.  This function saves the
793  *	initial values of those registers into @hpriv such that they
794  *	can be restored after controller reset.
795  *
796  *	If inconsistent, config values are fixed up by this function.
797  *
798  *	LOCKING:
799  *	None.
800  */
801 static void ahci_save_initial_config(struct pci_dev *pdev,
802 				     struct ahci_host_priv *hpriv)
803 {
804 	void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
805 	u32 cap, cap2, vers, port_map;
806 	int i;
807 	int mv;
808 
809 	/* make sure AHCI mode is enabled before accessing CAP */
810 	ahci_enable_ahci(mmio);
811 
812 	/* Values prefixed with saved_ are written back to host after
813 	 * reset.  Values without are used for driver operation.
814 	 */
815 	hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
816 	hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
817 
818 	/* CAP2 register is only defined for AHCI 1.2 and later */
819 	vers = readl(mmio + HOST_VERSION);
820 	if ((vers >> 16) > 1 ||
821 	   ((vers >> 16) == 1 && (vers & 0xFFFF) >= 0x200))
822 		hpriv->saved_cap2 = cap2 = readl(mmio + HOST_CAP2);
823 	else
824 		hpriv->saved_cap2 = cap2 = 0;
825 
826 	/* some chips have errata preventing 64bit use */
827 	if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
828 		dev_printk(KERN_INFO, &pdev->dev,
829 			   "controller can't do 64bit DMA, forcing 32bit\n");
830 		cap &= ~HOST_CAP_64;
831 	}
832 
833 	if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
834 		dev_printk(KERN_INFO, &pdev->dev,
835 			   "controller can't do NCQ, turning off CAP_NCQ\n");
836 		cap &= ~HOST_CAP_NCQ;
837 	}
838 
839 	if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
840 		dev_printk(KERN_INFO, &pdev->dev,
841 			   "controller can do NCQ, turning on CAP_NCQ\n");
842 		cap |= HOST_CAP_NCQ;
843 	}
844 
845 	if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
846 		dev_printk(KERN_INFO, &pdev->dev,
847 			   "controller can't do PMP, turning off CAP_PMP\n");
848 		cap &= ~HOST_CAP_PMP;
849 	}
850 
851 	if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361 &&
852 	    port_map != 1) {
853 		dev_printk(KERN_INFO, &pdev->dev,
854 			   "JMB361 has only one port, port_map 0x%x -> 0x%x\n",
855 			   port_map, 1);
856 		port_map = 1;
857 	}
858 
859 	/*
860 	 * Temporary Marvell 6145 hack: PATA port presence
861 	 * is asserted through the standard AHCI port
862 	 * presence register, as bit 4 (counting from 0)
863 	 */
864 	if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
865 		if (pdev->device == 0x6121)
866 			mv = 0x3;
867 		else
868 			mv = 0xf;
869 		dev_printk(KERN_ERR, &pdev->dev,
870 			   "MV_AHCI HACK: port_map %x -> %x\n",
871 			   port_map,
872 			   port_map & mv);
873 		dev_printk(KERN_ERR, &pdev->dev,
874 			  "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
875 
876 		port_map &= mv;
877 	}
878 
879 	/* cross check port_map and cap.n_ports */
880 	if (port_map) {
881 		int map_ports = 0;
882 
883 		for (i = 0; i < AHCI_MAX_PORTS; i++)
884 			if (port_map & (1 << i))
885 				map_ports++;
886 
887 		/* If PI has more ports than n_ports, whine, clear
888 		 * port_map and let it be generated from n_ports.
889 		 */
890 		if (map_ports > ahci_nr_ports(cap)) {
891 			dev_printk(KERN_WARNING, &pdev->dev,
892 				   "implemented port map (0x%x) contains more "
893 				   "ports than nr_ports (%u), using nr_ports\n",
894 				   port_map, ahci_nr_ports(cap));
895 			port_map = 0;
896 		}
897 	}
898 
899 	/* fabricate port_map from cap.nr_ports */
900 	if (!port_map) {
901 		port_map = (1 << ahci_nr_ports(cap)) - 1;
902 		dev_printk(KERN_WARNING, &pdev->dev,
903 			   "forcing PORTS_IMPL to 0x%x\n", port_map);
904 
905 		/* write the fixed up value to the PI register */
906 		hpriv->saved_port_map = port_map;
907 	}
908 
909 	/* record values to use during operation */
910 	hpriv->cap = cap;
911 	hpriv->cap2 = cap2;
912 	hpriv->port_map = port_map;
913 }
914 
915 /**
916  *	ahci_restore_initial_config - Restore initial config
917  *	@host: target ATA host
918  *
919  *	Restore initial config stored by ahci_save_initial_config().
920  *
921  *	LOCKING:
922  *	None.
923  */
924 static void ahci_restore_initial_config(struct ata_host *host)
925 {
926 	struct ahci_host_priv *hpriv = host->private_data;
927 	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
928 
929 	writel(hpriv->saved_cap, mmio + HOST_CAP);
930 	if (hpriv->saved_cap2)
931 		writel(hpriv->saved_cap2, mmio + HOST_CAP2);
932 	writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
933 	(void) readl(mmio + HOST_PORTS_IMPL);	/* flush */
934 }
935 
936 static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
937 {
938 	static const int offset[] = {
939 		[SCR_STATUS]		= PORT_SCR_STAT,
940 		[SCR_CONTROL]		= PORT_SCR_CTL,
941 		[SCR_ERROR]		= PORT_SCR_ERR,
942 		[SCR_ACTIVE]		= PORT_SCR_ACT,
943 		[SCR_NOTIFICATION]	= PORT_SCR_NTF,
944 	};
945 	struct ahci_host_priv *hpriv = ap->host->private_data;
946 
947 	if (sc_reg < ARRAY_SIZE(offset) &&
948 	    (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
949 		return offset[sc_reg];
950 	return 0;
951 }
952 
953 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
954 {
955 	void __iomem *port_mmio = ahci_port_base(link->ap);
956 	int offset = ahci_scr_offset(link->ap, sc_reg);
957 
958 	if (offset) {
959 		*val = readl(port_mmio + offset);
960 		return 0;
961 	}
962 	return -EINVAL;
963 }
964 
965 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
966 {
967 	void __iomem *port_mmio = ahci_port_base(link->ap);
968 	int offset = ahci_scr_offset(link->ap, sc_reg);
969 
970 	if (offset) {
971 		writel(val, port_mmio + offset);
972 		return 0;
973 	}
974 	return -EINVAL;
975 }
976 
977 static void ahci_start_engine(struct ata_port *ap)
978 {
979 	void __iomem *port_mmio = ahci_port_base(ap);
980 	u32 tmp;
981 
982 	/* start DMA */
983 	tmp = readl(port_mmio + PORT_CMD);
984 	tmp |= PORT_CMD_START;
985 	writel(tmp, port_mmio + PORT_CMD);
986 	readl(port_mmio + PORT_CMD); /* flush */
987 }
988 
989 static int ahci_stop_engine(struct ata_port *ap)
990 {
991 	void __iomem *port_mmio = ahci_port_base(ap);
992 	u32 tmp;
993 
994 	tmp = readl(port_mmio + PORT_CMD);
995 
996 	/* check if the HBA is idle */
997 	if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
998 		return 0;
999 
1000 	/* setting HBA to idle */
1001 	tmp &= ~PORT_CMD_START;
1002 	writel(tmp, port_mmio + PORT_CMD);
1003 
1004 	/* wait for engine to stop. This could be as long as 500 msec */
1005 	tmp = ata_wait_register(port_mmio + PORT_CMD,
1006 				PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
1007 	if (tmp & PORT_CMD_LIST_ON)
1008 		return -EIO;
1009 
1010 	return 0;
1011 }
1012 
1013 static void ahci_start_fis_rx(struct ata_port *ap)
1014 {
1015 	void __iomem *port_mmio = ahci_port_base(ap);
1016 	struct ahci_host_priv *hpriv = ap->host->private_data;
1017 	struct ahci_port_priv *pp = ap->private_data;
1018 	u32 tmp;
1019 
1020 	/* set FIS registers */
1021 	if (hpriv->cap & HOST_CAP_64)
1022 		writel((pp->cmd_slot_dma >> 16) >> 16,
1023 		       port_mmio + PORT_LST_ADDR_HI);
1024 	writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
1025 
1026 	if (hpriv->cap & HOST_CAP_64)
1027 		writel((pp->rx_fis_dma >> 16) >> 16,
1028 		       port_mmio + PORT_FIS_ADDR_HI);
1029 	writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
1030 
1031 	/* enable FIS reception */
1032 	tmp = readl(port_mmio + PORT_CMD);
1033 	tmp |= PORT_CMD_FIS_RX;
1034 	writel(tmp, port_mmio + PORT_CMD);
1035 
1036 	/* flush */
1037 	readl(port_mmio + PORT_CMD);
1038 }
1039 
1040 static int ahci_stop_fis_rx(struct ata_port *ap)
1041 {
1042 	void __iomem *port_mmio = ahci_port_base(ap);
1043 	u32 tmp;
1044 
1045 	/* disable FIS reception */
1046 	tmp = readl(port_mmio + PORT_CMD);
1047 	tmp &= ~PORT_CMD_FIS_RX;
1048 	writel(tmp, port_mmio + PORT_CMD);
1049 
1050 	/* wait for completion, spec says 500ms, give it 1000 */
1051 	tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
1052 				PORT_CMD_FIS_ON, 10, 1000);
1053 	if (tmp & PORT_CMD_FIS_ON)
1054 		return -EBUSY;
1055 
1056 	return 0;
1057 }
1058 
1059 static void ahci_power_up(struct ata_port *ap)
1060 {
1061 	struct ahci_host_priv *hpriv = ap->host->private_data;
1062 	void __iomem *port_mmio = ahci_port_base(ap);
1063 	u32 cmd;
1064 
1065 	cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
1066 
1067 	/* spin up device */
1068 	if (hpriv->cap & HOST_CAP_SSS) {
1069 		cmd |= PORT_CMD_SPIN_UP;
1070 		writel(cmd, port_mmio + PORT_CMD);
1071 	}
1072 
1073 	/* wake up link */
1074 	writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
1075 }
1076 
1077 static void ahci_disable_alpm(struct ata_port *ap)
1078 {
1079 	struct ahci_host_priv *hpriv = ap->host->private_data;
1080 	void __iomem *port_mmio = ahci_port_base(ap);
1081 	u32 cmd;
1082 	struct ahci_port_priv *pp = ap->private_data;
1083 
1084 	/* IPM bits should be disabled by libata-core */
1085 	/* get the existing command bits */
1086 	cmd = readl(port_mmio + PORT_CMD);
1087 
1088 	/* disable ALPM and ASP */
1089 	cmd &= ~PORT_CMD_ASP;
1090 	cmd &= ~PORT_CMD_ALPE;
1091 
1092 	/* force the interface back to active */
1093 	cmd |= PORT_CMD_ICC_ACTIVE;
1094 
1095 	/* write out new cmd value */
1096 	writel(cmd, port_mmio + PORT_CMD);
1097 	cmd = readl(port_mmio + PORT_CMD);
1098 
1099 	/* wait 10ms to be sure we've come out of any low power state */
1100 	msleep(10);
1101 
1102 	/* clear out any PhyRdy stuff from interrupt status */
1103 	writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
1104 
1105 	/* go ahead and clean out PhyRdy Change from Serror too */
1106 	ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
1107 
1108 	/*
1109  	 * Clear flag to indicate that we should ignore all PhyRdy
1110  	 * state changes
1111  	 */
1112 	hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
1113 
1114 	/*
1115  	 * Enable interrupts on Phy Ready.
1116  	 */
1117 	pp->intr_mask |= PORT_IRQ_PHYRDY;
1118 	writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1119 
1120 	/*
1121  	 * don't change the link pm policy - we can be called
1122  	 * just to turn of link pm temporarily
1123  	 */
1124 }
1125 
1126 static int ahci_enable_alpm(struct ata_port *ap,
1127 	enum link_pm policy)
1128 {
1129 	struct ahci_host_priv *hpriv = ap->host->private_data;
1130 	void __iomem *port_mmio = ahci_port_base(ap);
1131 	u32 cmd;
1132 	struct ahci_port_priv *pp = ap->private_data;
1133 	u32 asp;
1134 
1135 	/* Make sure the host is capable of link power management */
1136 	if (!(hpriv->cap & HOST_CAP_ALPM))
1137 		return -EINVAL;
1138 
1139 	switch (policy) {
1140 	case MAX_PERFORMANCE:
1141 	case NOT_AVAILABLE:
1142 		/*
1143  		 * if we came here with NOT_AVAILABLE,
1144  		 * it just means this is the first time we
1145  		 * have tried to enable - default to max performance,
1146  		 * and let the user go to lower power modes on request.
1147  		 */
1148 		ahci_disable_alpm(ap);
1149 		return 0;
1150 	case MIN_POWER:
1151 		/* configure HBA to enter SLUMBER */
1152 		asp = PORT_CMD_ASP;
1153 		break;
1154 	case MEDIUM_POWER:
1155 		/* configure HBA to enter PARTIAL */
1156 		asp = 0;
1157 		break;
1158 	default:
1159 		return -EINVAL;
1160 	}
1161 
1162 	/*
1163  	 * Disable interrupts on Phy Ready. This keeps us from
1164  	 * getting woken up due to spurious phy ready interrupts
1165 	 * TBD - Hot plug should be done via polling now, is
1166 	 * that even supported?
1167  	 */
1168 	pp->intr_mask &= ~PORT_IRQ_PHYRDY;
1169 	writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1170 
1171 	/*
1172  	 * Set a flag to indicate that we should ignore all PhyRdy
1173  	 * state changes since these can happen now whenever we
1174  	 * change link state
1175  	 */
1176 	hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
1177 
1178 	/* get the existing command bits */
1179 	cmd = readl(port_mmio + PORT_CMD);
1180 
1181 	/*
1182  	 * Set ASP based on Policy
1183  	 */
1184 	cmd |= asp;
1185 
1186 	/*
1187  	 * Setting this bit will instruct the HBA to aggressively
1188  	 * enter a lower power link state when it's appropriate and
1189  	 * based on the value set above for ASP
1190  	 */
1191 	cmd |= PORT_CMD_ALPE;
1192 
1193 	/* write out new cmd value */
1194 	writel(cmd, port_mmio + PORT_CMD);
1195 	cmd = readl(port_mmio + PORT_CMD);
1196 
1197 	/* IPM bits should be set by libata-core */
1198 	return 0;
1199 }
1200 
1201 #ifdef CONFIG_PM
1202 static void ahci_power_down(struct ata_port *ap)
1203 {
1204 	struct ahci_host_priv *hpriv = ap->host->private_data;
1205 	void __iomem *port_mmio = ahci_port_base(ap);
1206 	u32 cmd, scontrol;
1207 
1208 	if (!(hpriv->cap & HOST_CAP_SSS))
1209 		return;
1210 
1211 	/* put device into listen mode, first set PxSCTL.DET to 0 */
1212 	scontrol = readl(port_mmio + PORT_SCR_CTL);
1213 	scontrol &= ~0xf;
1214 	writel(scontrol, port_mmio + PORT_SCR_CTL);
1215 
1216 	/* then set PxCMD.SUD to 0 */
1217 	cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
1218 	cmd &= ~PORT_CMD_SPIN_UP;
1219 	writel(cmd, port_mmio + PORT_CMD);
1220 }
1221 #endif
1222 
1223 static void ahci_start_port(struct ata_port *ap)
1224 {
1225 	struct ahci_port_priv *pp = ap->private_data;
1226 	struct ata_link *link;
1227 	struct ahci_em_priv *emp;
1228 	ssize_t rc;
1229 	int i;
1230 
1231 	/* enable FIS reception */
1232 	ahci_start_fis_rx(ap);
1233 
1234 	/* enable DMA */
1235 	ahci_start_engine(ap);
1236 
1237 	/* turn on LEDs */
1238 	if (ap->flags & ATA_FLAG_EM) {
1239 		ata_for_each_link(link, ap, EDGE) {
1240 			emp = &pp->em_priv[link->pmp];
1241 
1242 			/* EM Transmit bit maybe busy during init */
1243 			for (i = 0; i < EM_MAX_RETRY; i++) {
1244 				rc = ahci_transmit_led_message(ap,
1245 							       emp->led_state,
1246 							       4);
1247 				if (rc == -EBUSY)
1248 					msleep(1);
1249 				else
1250 					break;
1251 			}
1252 		}
1253 	}
1254 
1255 	if (ap->flags & ATA_FLAG_SW_ACTIVITY)
1256 		ata_for_each_link(link, ap, EDGE)
1257 			ahci_init_sw_activity(link);
1258 
1259 }
1260 
1261 static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
1262 {
1263 	int rc;
1264 
1265 	/* disable DMA */
1266 	rc = ahci_stop_engine(ap);
1267 	if (rc) {
1268 		*emsg = "failed to stop engine";
1269 		return rc;
1270 	}
1271 
1272 	/* disable FIS reception */
1273 	rc = ahci_stop_fis_rx(ap);
1274 	if (rc) {
1275 		*emsg = "failed stop FIS RX";
1276 		return rc;
1277 	}
1278 
1279 	return 0;
1280 }
1281 
1282 static int ahci_reset_controller(struct ata_host *host)
1283 {
1284 	struct pci_dev *pdev = to_pci_dev(host->dev);
1285 	struct ahci_host_priv *hpriv = host->private_data;
1286 	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1287 	u32 tmp;
1288 
1289 	/* we must be in AHCI mode, before using anything
1290 	 * AHCI-specific, such as HOST_RESET.
1291 	 */
1292 	ahci_enable_ahci(mmio);
1293 
1294 	/* global controller reset */
1295 	if (!ahci_skip_host_reset) {
1296 		tmp = readl(mmio + HOST_CTL);
1297 		if ((tmp & HOST_RESET) == 0) {
1298 			writel(tmp | HOST_RESET, mmio + HOST_CTL);
1299 			readl(mmio + HOST_CTL); /* flush */
1300 		}
1301 
1302 		/*
1303 		 * to perform host reset, OS should set HOST_RESET
1304 		 * and poll until this bit is read to be "0".
1305 		 * reset must complete within 1 second, or
1306 		 * the hardware should be considered fried.
1307 		 */
1308 		tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET,
1309 					HOST_RESET, 10, 1000);
1310 
1311 		if (tmp & HOST_RESET) {
1312 			dev_printk(KERN_ERR, host->dev,
1313 				   "controller reset failed (0x%x)\n", tmp);
1314 			return -EIO;
1315 		}
1316 
1317 		/* turn on AHCI mode */
1318 		ahci_enable_ahci(mmio);
1319 
1320 		/* Some registers might be cleared on reset.  Restore
1321 		 * initial values.
1322 		 */
1323 		ahci_restore_initial_config(host);
1324 	} else
1325 		dev_printk(KERN_INFO, host->dev,
1326 			   "skipping global host reset\n");
1327 
1328 	if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
1329 		u16 tmp16;
1330 
1331 		/* configure PCS */
1332 		pci_read_config_word(pdev, 0x92, &tmp16);
1333 		if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
1334 			tmp16 |= hpriv->port_map;
1335 			pci_write_config_word(pdev, 0x92, tmp16);
1336 		}
1337 	}
1338 
1339 	return 0;
1340 }
1341 
1342 static void ahci_sw_activity(struct ata_link *link)
1343 {
1344 	struct ata_port *ap = link->ap;
1345 	struct ahci_port_priv *pp = ap->private_data;
1346 	struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1347 
1348 	if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
1349 		return;
1350 
1351 	emp->activity++;
1352 	if (!timer_pending(&emp->timer))
1353 		mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
1354 }
1355 
1356 static void ahci_sw_activity_blink(unsigned long arg)
1357 {
1358 	struct ata_link *link = (struct ata_link *)arg;
1359 	struct ata_port *ap = link->ap;
1360 	struct ahci_port_priv *pp = ap->private_data;
1361 	struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1362 	unsigned long led_message = emp->led_state;
1363 	u32 activity_led_state;
1364 	unsigned long flags;
1365 
1366 	led_message &= EM_MSG_LED_VALUE;
1367 	led_message |= ap->port_no | (link->pmp << 8);
1368 
1369 	/* check to see if we've had activity.  If so,
1370 	 * toggle state of LED and reset timer.  If not,
1371 	 * turn LED to desired idle state.
1372 	 */
1373 	spin_lock_irqsave(ap->lock, flags);
1374 	if (emp->saved_activity != emp->activity) {
1375 		emp->saved_activity = emp->activity;
1376 		/* get the current LED state */
1377 		activity_led_state = led_message & EM_MSG_LED_VALUE_ON;
1378 
1379 		if (activity_led_state)
1380 			activity_led_state = 0;
1381 		else
1382 			activity_led_state = 1;
1383 
1384 		/* clear old state */
1385 		led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1386 
1387 		/* toggle state */
1388 		led_message |= (activity_led_state << 16);
1389 		mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
1390 	} else {
1391 		/* switch to idle */
1392 		led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1393 		if (emp->blink_policy == BLINK_OFF)
1394 			led_message |= (1 << 16);
1395 	}
1396 	spin_unlock_irqrestore(ap->lock, flags);
1397 	ahci_transmit_led_message(ap, led_message, 4);
1398 }
1399 
1400 static void ahci_init_sw_activity(struct ata_link *link)
1401 {
1402 	struct ata_port *ap = link->ap;
1403 	struct ahci_port_priv *pp = ap->private_data;
1404 	struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1405 
1406 	/* init activity stats, setup timer */
1407 	emp->saved_activity = emp->activity = 0;
1408 	setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link);
1409 
1410 	/* check our blink policy and set flag for link if it's enabled */
1411 	if (emp->blink_policy)
1412 		link->flags |= ATA_LFLAG_SW_ACTIVITY;
1413 }
1414 
1415 static int ahci_reset_em(struct ata_host *host)
1416 {
1417 	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1418 	u32 em_ctl;
1419 
1420 	em_ctl = readl(mmio + HOST_EM_CTL);
1421 	if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
1422 		return -EINVAL;
1423 
1424 	writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
1425 	return 0;
1426 }
1427 
1428 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
1429 					ssize_t size)
1430 {
1431 	struct ahci_host_priv *hpriv = ap->host->private_data;
1432 	struct ahci_port_priv *pp = ap->private_data;
1433 	void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
1434 	u32 em_ctl;
1435 	u32 message[] = {0, 0};
1436 	unsigned long flags;
1437 	int pmp;
1438 	struct ahci_em_priv *emp;
1439 
1440 	/* get the slot number from the message */
1441 	pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1442 	if (pmp < EM_MAX_SLOTS)
1443 		emp = &pp->em_priv[pmp];
1444 	else
1445 		return -EINVAL;
1446 
1447 	spin_lock_irqsave(ap->lock, flags);
1448 
1449 	/*
1450 	 * if we are still busy transmitting a previous message,
1451 	 * do not allow
1452 	 */
1453 	em_ctl = readl(mmio + HOST_EM_CTL);
1454 	if (em_ctl & EM_CTL_TM) {
1455 		spin_unlock_irqrestore(ap->lock, flags);
1456 		return -EBUSY;
1457 	}
1458 
1459 	/*
1460 	 * create message header - this is all zero except for
1461 	 * the message size, which is 4 bytes.
1462 	 */
1463 	message[0] |= (4 << 8);
1464 
1465 	/* ignore 0:4 of byte zero, fill in port info yourself */
1466 	message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no);
1467 
1468 	/* write message to EM_LOC */
1469 	writel(message[0], mmio + hpriv->em_loc);
1470 	writel(message[1], mmio + hpriv->em_loc+4);
1471 
1472 	/* save off new led state for port/slot */
1473 	emp->led_state = state;
1474 
1475 	/*
1476 	 * tell hardware to transmit the message
1477 	 */
1478 	writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
1479 
1480 	spin_unlock_irqrestore(ap->lock, flags);
1481 	return size;
1482 }
1483 
1484 static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
1485 {
1486 	struct ahci_port_priv *pp = ap->private_data;
1487 	struct ata_link *link;
1488 	struct ahci_em_priv *emp;
1489 	int rc = 0;
1490 
1491 	ata_for_each_link(link, ap, EDGE) {
1492 		emp = &pp->em_priv[link->pmp];
1493 		rc += sprintf(buf, "%lx\n", emp->led_state);
1494 	}
1495 	return rc;
1496 }
1497 
1498 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
1499 				size_t size)
1500 {
1501 	int state;
1502 	int pmp;
1503 	struct ahci_port_priv *pp = ap->private_data;
1504 	struct ahci_em_priv *emp;
1505 
1506 	state = simple_strtoul(buf, NULL, 0);
1507 
1508 	/* get the slot number from the message */
1509 	pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1510 	if (pmp < EM_MAX_SLOTS)
1511 		emp = &pp->em_priv[pmp];
1512 	else
1513 		return -EINVAL;
1514 
1515 	/* mask off the activity bits if we are in sw_activity
1516 	 * mode, user should turn off sw_activity before setting
1517 	 * activity led through em_message
1518 	 */
1519 	if (emp->blink_policy)
1520 		state &= ~EM_MSG_LED_VALUE_ACTIVITY;
1521 
1522 	return ahci_transmit_led_message(ap, state, size);
1523 }
1524 
1525 static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
1526 {
1527 	struct ata_link *link = dev->link;
1528 	struct ata_port *ap = link->ap;
1529 	struct ahci_port_priv *pp = ap->private_data;
1530 	struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1531 	u32 port_led_state = emp->led_state;
1532 
1533 	/* save the desired Activity LED behavior */
1534 	if (val == OFF) {
1535 		/* clear LFLAG */
1536 		link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
1537 
1538 		/* set the LED to OFF */
1539 		port_led_state &= EM_MSG_LED_VALUE_OFF;
1540 		port_led_state |= (ap->port_no | (link->pmp << 8));
1541 		ahci_transmit_led_message(ap, port_led_state, 4);
1542 	} else {
1543 		link->flags |= ATA_LFLAG_SW_ACTIVITY;
1544 		if (val == BLINK_OFF) {
1545 			/* set LED to ON for idle */
1546 			port_led_state &= EM_MSG_LED_VALUE_OFF;
1547 			port_led_state |= (ap->port_no | (link->pmp << 8));
1548 			port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */
1549 			ahci_transmit_led_message(ap, port_led_state, 4);
1550 		}
1551 	}
1552 	emp->blink_policy = val;
1553 	return 0;
1554 }
1555 
1556 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
1557 {
1558 	struct ata_link *link = dev->link;
1559 	struct ata_port *ap = link->ap;
1560 	struct ahci_port_priv *pp = ap->private_data;
1561 	struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1562 
1563 	/* display the saved value of activity behavior for this
1564 	 * disk.
1565 	 */
1566 	return sprintf(buf, "%d\n", emp->blink_policy);
1567 }
1568 
1569 static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap,
1570 			   int port_no, void __iomem *mmio,
1571 			   void __iomem *port_mmio)
1572 {
1573 	const char *emsg = NULL;
1574 	int rc;
1575 	u32 tmp;
1576 
1577 	/* make sure port is not active */
1578 	rc = ahci_deinit_port(ap, &emsg);
1579 	if (rc)
1580 		dev_printk(KERN_WARNING, &pdev->dev,
1581 			   "%s (%d)\n", emsg, rc);
1582 
1583 	/* clear SError */
1584 	tmp = readl(port_mmio + PORT_SCR_ERR);
1585 	VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
1586 	writel(tmp, port_mmio + PORT_SCR_ERR);
1587 
1588 	/* clear port IRQ */
1589 	tmp = readl(port_mmio + PORT_IRQ_STAT);
1590 	VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1591 	if (tmp)
1592 		writel(tmp, port_mmio + PORT_IRQ_STAT);
1593 
1594 	writel(1 << port_no, mmio + HOST_IRQ_STAT);
1595 }
1596 
1597 static void ahci_init_controller(struct ata_host *host)
1598 {
1599 	struct ahci_host_priv *hpriv = host->private_data;
1600 	struct pci_dev *pdev = to_pci_dev(host->dev);
1601 	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1602 	int i;
1603 	void __iomem *port_mmio;
1604 	u32 tmp;
1605 	int mv;
1606 
1607 	if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
1608 		if (pdev->device == 0x6121)
1609 			mv = 2;
1610 		else
1611 			mv = 4;
1612 		port_mmio = __ahci_port_base(host, mv);
1613 
1614 		writel(0, port_mmio + PORT_IRQ_MASK);
1615 
1616 		/* clear port IRQ */
1617 		tmp = readl(port_mmio + PORT_IRQ_STAT);
1618 		VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1619 		if (tmp)
1620 			writel(tmp, port_mmio + PORT_IRQ_STAT);
1621 	}
1622 
1623 	for (i = 0; i < host->n_ports; i++) {
1624 		struct ata_port *ap = host->ports[i];
1625 
1626 		port_mmio = ahci_port_base(ap);
1627 		if (ata_port_is_dummy(ap))
1628 			continue;
1629 
1630 		ahci_port_init(pdev, ap, i, mmio, port_mmio);
1631 	}
1632 
1633 	tmp = readl(mmio + HOST_CTL);
1634 	VPRINTK("HOST_CTL 0x%x\n", tmp);
1635 	writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
1636 	tmp = readl(mmio + HOST_CTL);
1637 	VPRINTK("HOST_CTL 0x%x\n", tmp);
1638 }
1639 
1640 static void ahci_dev_config(struct ata_device *dev)
1641 {
1642 	struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
1643 
1644 	if (hpriv->flags & AHCI_HFLAG_SECT255) {
1645 		dev->max_sectors = 255;
1646 		ata_dev_printk(dev, KERN_INFO,
1647 			       "SB600 AHCI: limiting to 255 sectors per cmd\n");
1648 	}
1649 }
1650 
1651 static unsigned int ahci_dev_classify(struct ata_port *ap)
1652 {
1653 	void __iomem *port_mmio = ahci_port_base(ap);
1654 	struct ata_taskfile tf;
1655 	u32 tmp;
1656 
1657 	tmp = readl(port_mmio + PORT_SIG);
1658 	tf.lbah		= (tmp >> 24)	& 0xff;
1659 	tf.lbam		= (tmp >> 16)	& 0xff;
1660 	tf.lbal		= (tmp >> 8)	& 0xff;
1661 	tf.nsect	= (tmp)		& 0xff;
1662 
1663 	return ata_dev_classify(&tf);
1664 }
1665 
1666 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
1667 			       u32 opts)
1668 {
1669 	dma_addr_t cmd_tbl_dma;
1670 
1671 	cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
1672 
1673 	pp->cmd_slot[tag].opts = cpu_to_le32(opts);
1674 	pp->cmd_slot[tag].status = 0;
1675 	pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
1676 	pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
1677 }
1678 
1679 static int ahci_kick_engine(struct ata_port *ap)
1680 {
1681 	void __iomem *port_mmio = ahci_port_base(ap);
1682 	struct ahci_host_priv *hpriv = ap->host->private_data;
1683 	u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1684 	u32 tmp;
1685 	int busy, rc;
1686 
1687 	/* stop engine */
1688 	rc = ahci_stop_engine(ap);
1689 	if (rc)
1690 		goto out_restart;
1691 
1692 	/* need to do CLO?
1693 	 * always do CLO if PMP is attached (AHCI-1.3 9.2)
1694 	 */
1695 	busy = status & (ATA_BUSY | ATA_DRQ);
1696 	if (!busy && !sata_pmp_attached(ap)) {
1697 		rc = 0;
1698 		goto out_restart;
1699 	}
1700 
1701 	if (!(hpriv->cap & HOST_CAP_CLO)) {
1702 		rc = -EOPNOTSUPP;
1703 		goto out_restart;
1704 	}
1705 
1706 	/* perform CLO */
1707 	tmp = readl(port_mmio + PORT_CMD);
1708 	tmp |= PORT_CMD_CLO;
1709 	writel(tmp, port_mmio + PORT_CMD);
1710 
1711 	rc = 0;
1712 	tmp = ata_wait_register(port_mmio + PORT_CMD,
1713 				PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
1714 	if (tmp & PORT_CMD_CLO)
1715 		rc = -EIO;
1716 
1717 	/* restart engine */
1718  out_restart:
1719 	ahci_start_engine(ap);
1720 	return rc;
1721 }
1722 
1723 static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
1724 				struct ata_taskfile *tf, int is_cmd, u16 flags,
1725 				unsigned long timeout_msec)
1726 {
1727 	const u32 cmd_fis_len = 5; /* five dwords */
1728 	struct ahci_port_priv *pp = ap->private_data;
1729 	void __iomem *port_mmio = ahci_port_base(ap);
1730 	u8 *fis = pp->cmd_tbl;
1731 	u32 tmp;
1732 
1733 	/* prep the command */
1734 	ata_tf_to_fis(tf, pmp, is_cmd, fis);
1735 	ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
1736 
1737 	/* issue & wait */
1738 	writel(1, port_mmio + PORT_CMD_ISSUE);
1739 
1740 	if (timeout_msec) {
1741 		tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
1742 					1, timeout_msec);
1743 		if (tmp & 0x1) {
1744 			ahci_kick_engine(ap);
1745 			return -EBUSY;
1746 		}
1747 	} else
1748 		readl(port_mmio + PORT_CMD_ISSUE);	/* flush */
1749 
1750 	return 0;
1751 }
1752 
1753 static int ahci_do_softreset(struct ata_link *link, unsigned int *class,
1754 			     int pmp, unsigned long deadline,
1755 			     int (*check_ready)(struct ata_link *link))
1756 {
1757 	struct ata_port *ap = link->ap;
1758 	struct ahci_host_priv *hpriv = ap->host->private_data;
1759 	const char *reason = NULL;
1760 	unsigned long now, msecs;
1761 	struct ata_taskfile tf;
1762 	int rc;
1763 
1764 	DPRINTK("ENTER\n");
1765 
1766 	/* prepare for SRST (AHCI-1.1 10.4.1) */
1767 	rc = ahci_kick_engine(ap);
1768 	if (rc && rc != -EOPNOTSUPP)
1769 		ata_link_printk(link, KERN_WARNING,
1770 				"failed to reset engine (errno=%d)\n", rc);
1771 
1772 	ata_tf_init(link->device, &tf);
1773 
1774 	/* issue the first D2H Register FIS */
1775 	msecs = 0;
1776 	now = jiffies;
1777 	if (time_after(now, deadline))
1778 		msecs = jiffies_to_msecs(deadline - now);
1779 
1780 	tf.ctl |= ATA_SRST;
1781 	if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
1782 				 AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
1783 		rc = -EIO;
1784 		reason = "1st FIS failed";
1785 		goto fail;
1786 	}
1787 
1788 	/* spec says at least 5us, but be generous and sleep for 1ms */
1789 	msleep(1);
1790 
1791 	/* issue the second D2H Register FIS */
1792 	tf.ctl &= ~ATA_SRST;
1793 	ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
1794 
1795 	/* wait for link to become ready */
1796 	rc = ata_wait_after_reset(link, deadline, check_ready);
1797 	if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) {
1798 		/*
1799 		 * Workaround for cases where link online status can't
1800 		 * be trusted.  Treat device readiness timeout as link
1801 		 * offline.
1802 		 */
1803 		ata_link_printk(link, KERN_INFO,
1804 				"device not ready, treating as offline\n");
1805 		*class = ATA_DEV_NONE;
1806 	} else if (rc) {
1807 		/* link occupied, -ENODEV too is an error */
1808 		reason = "device not ready";
1809 		goto fail;
1810 	} else
1811 		*class = ahci_dev_classify(ap);
1812 
1813 	DPRINTK("EXIT, class=%u\n", *class);
1814 	return 0;
1815 
1816  fail:
1817 	ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
1818 	return rc;
1819 }
1820 
1821 static int ahci_check_ready(struct ata_link *link)
1822 {
1823 	void __iomem *port_mmio = ahci_port_base(link->ap);
1824 	u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1825 
1826 	return ata_check_ready(status);
1827 }
1828 
1829 static int ahci_softreset(struct ata_link *link, unsigned int *class,
1830 			  unsigned long deadline)
1831 {
1832 	int pmp = sata_srst_pmp(link);
1833 
1834 	DPRINTK("ENTER\n");
1835 
1836 	return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
1837 }
1838 
1839 static int ahci_sb600_check_ready(struct ata_link *link)
1840 {
1841 	void __iomem *port_mmio = ahci_port_base(link->ap);
1842 	u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1843 	u32 irq_status = readl(port_mmio + PORT_IRQ_STAT);
1844 
1845 	/*
1846 	 * There is no need to check TFDATA if BAD PMP is found due to HW bug,
1847 	 * which can save timeout delay.
1848 	 */
1849 	if (irq_status & PORT_IRQ_BAD_PMP)
1850 		return -EIO;
1851 
1852 	return ata_check_ready(status);
1853 }
1854 
1855 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
1856 				unsigned long deadline)
1857 {
1858 	struct ata_port *ap = link->ap;
1859 	void __iomem *port_mmio = ahci_port_base(ap);
1860 	int pmp = sata_srst_pmp(link);
1861 	int rc;
1862 	u32 irq_sts;
1863 
1864 	DPRINTK("ENTER\n");
1865 
1866 	rc = ahci_do_softreset(link, class, pmp, deadline,
1867 			       ahci_sb600_check_ready);
1868 
1869 	/*
1870 	 * Soft reset fails on some ATI chips with IPMS set when PMP
1871 	 * is enabled but SATA HDD/ODD is connected to SATA port,
1872 	 * do soft reset again to port 0.
1873 	 */
1874 	if (rc == -EIO) {
1875 		irq_sts = readl(port_mmio + PORT_IRQ_STAT);
1876 		if (irq_sts & PORT_IRQ_BAD_PMP) {
1877 			ata_link_printk(link, KERN_WARNING,
1878 					"applying SB600 PMP SRST workaround "
1879 					"and retrying\n");
1880 			rc = ahci_do_softreset(link, class, 0, deadline,
1881 					       ahci_check_ready);
1882 		}
1883 	}
1884 
1885 	return rc;
1886 }
1887 
1888 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
1889 			  unsigned long deadline)
1890 {
1891 	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
1892 	struct ata_port *ap = link->ap;
1893 	struct ahci_port_priv *pp = ap->private_data;
1894 	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1895 	struct ata_taskfile tf;
1896 	bool online;
1897 	int rc;
1898 
1899 	DPRINTK("ENTER\n");
1900 
1901 	ahci_stop_engine(ap);
1902 
1903 	/* clear D2H reception area to properly wait for D2H FIS */
1904 	ata_tf_init(link->device, &tf);
1905 	tf.command = 0x80;
1906 	ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1907 
1908 	rc = sata_link_hardreset(link, timing, deadline, &online,
1909 				 ahci_check_ready);
1910 
1911 	ahci_start_engine(ap);
1912 
1913 	if (online)
1914 		*class = ahci_dev_classify(ap);
1915 
1916 	DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1917 	return rc;
1918 }
1919 
1920 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
1921 				 unsigned long deadline)
1922 {
1923 	struct ata_port *ap = link->ap;
1924 	bool online;
1925 	int rc;
1926 
1927 	DPRINTK("ENTER\n");
1928 
1929 	ahci_stop_engine(ap);
1930 
1931 	rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
1932 				 deadline, &online, NULL);
1933 
1934 	ahci_start_engine(ap);
1935 
1936 	DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1937 
1938 	/* vt8251 doesn't clear BSY on signature FIS reception,
1939 	 * request follow-up softreset.
1940 	 */
1941 	return online ? -EAGAIN : rc;
1942 }
1943 
1944 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
1945 				unsigned long deadline)
1946 {
1947 	struct ata_port *ap = link->ap;
1948 	struct ahci_port_priv *pp = ap->private_data;
1949 	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1950 	struct ata_taskfile tf;
1951 	bool online;
1952 	int rc;
1953 
1954 	ahci_stop_engine(ap);
1955 
1956 	/* clear D2H reception area to properly wait for D2H FIS */
1957 	ata_tf_init(link->device, &tf);
1958 	tf.command = 0x80;
1959 	ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1960 
1961 	rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
1962 				 deadline, &online, NULL);
1963 
1964 	ahci_start_engine(ap);
1965 
1966 	/* The pseudo configuration device on SIMG4726 attached to
1967 	 * ASUS P5W-DH Deluxe doesn't send signature FIS after
1968 	 * hardreset if no device is attached to the first downstream
1969 	 * port && the pseudo device locks up on SRST w/ PMP==0.  To
1970 	 * work around this, wait for !BSY only briefly.  If BSY isn't
1971 	 * cleared, perform CLO and proceed to IDENTIFY (achieved by
1972 	 * ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA).
1973 	 *
1974 	 * Wait for two seconds.  Devices attached to downstream port
1975 	 * which can't process the following IDENTIFY after this will
1976 	 * have to be reset again.  For most cases, this should
1977 	 * suffice while making probing snappish enough.
1978 	 */
1979 	if (online) {
1980 		rc = ata_wait_after_reset(link, jiffies + 2 * HZ,
1981 					  ahci_check_ready);
1982 		if (rc)
1983 			ahci_kick_engine(ap);
1984 	}
1985 	return rc;
1986 }
1987 
1988 static void ahci_postreset(struct ata_link *link, unsigned int *class)
1989 {
1990 	struct ata_port *ap = link->ap;
1991 	void __iomem *port_mmio = ahci_port_base(ap);
1992 	u32 new_tmp, tmp;
1993 
1994 	ata_std_postreset(link, class);
1995 
1996 	/* Make sure port's ATAPI bit is set appropriately */
1997 	new_tmp = tmp = readl(port_mmio + PORT_CMD);
1998 	if (*class == ATA_DEV_ATAPI)
1999 		new_tmp |= PORT_CMD_ATAPI;
2000 	else
2001 		new_tmp &= ~PORT_CMD_ATAPI;
2002 	if (new_tmp != tmp) {
2003 		writel(new_tmp, port_mmio + PORT_CMD);
2004 		readl(port_mmio + PORT_CMD); /* flush */
2005 	}
2006 }
2007 
2008 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
2009 {
2010 	struct scatterlist *sg;
2011 	struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
2012 	unsigned int si;
2013 
2014 	VPRINTK("ENTER\n");
2015 
2016 	/*
2017 	 * Next, the S/G list.
2018 	 */
2019 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
2020 		dma_addr_t addr = sg_dma_address(sg);
2021 		u32 sg_len = sg_dma_len(sg);
2022 
2023 		ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
2024 		ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
2025 		ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
2026 	}
2027 
2028 	return si;
2029 }
2030 
2031 static void ahci_qc_prep(struct ata_queued_cmd *qc)
2032 {
2033 	struct ata_port *ap = qc->ap;
2034 	struct ahci_port_priv *pp = ap->private_data;
2035 	int is_atapi = ata_is_atapi(qc->tf.protocol);
2036 	void *cmd_tbl;
2037 	u32 opts;
2038 	const u32 cmd_fis_len = 5; /* five dwords */
2039 	unsigned int n_elem;
2040 
2041 	/*
2042 	 * Fill in command table information.  First, the header,
2043 	 * a SATA Register - Host to Device command FIS.
2044 	 */
2045 	cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
2046 
2047 	ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
2048 	if (is_atapi) {
2049 		memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
2050 		memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
2051 	}
2052 
2053 	n_elem = 0;
2054 	if (qc->flags & ATA_QCFLAG_DMAMAP)
2055 		n_elem = ahci_fill_sg(qc, cmd_tbl);
2056 
2057 	/*
2058 	 * Fill in command slot information.
2059 	 */
2060 	opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
2061 	if (qc->tf.flags & ATA_TFLAG_WRITE)
2062 		opts |= AHCI_CMD_WRITE;
2063 	if (is_atapi)
2064 		opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
2065 
2066 	ahci_fill_cmd_slot(pp, qc->tag, opts);
2067 }
2068 
2069 static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
2070 {
2071 	struct ahci_host_priv *hpriv = ap->host->private_data;
2072 	struct ahci_port_priv *pp = ap->private_data;
2073 	struct ata_eh_info *host_ehi = &ap->link.eh_info;
2074 	struct ata_link *link = NULL;
2075 	struct ata_queued_cmd *active_qc;
2076 	struct ata_eh_info *active_ehi;
2077 	u32 serror;
2078 
2079 	/* determine active link */
2080 	ata_for_each_link(link, ap, EDGE)
2081 		if (ata_link_active(link))
2082 			break;
2083 	if (!link)
2084 		link = &ap->link;
2085 
2086 	active_qc = ata_qc_from_tag(ap, link->active_tag);
2087 	active_ehi = &link->eh_info;
2088 
2089 	/* record irq stat */
2090 	ata_ehi_clear_desc(host_ehi);
2091 	ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
2092 
2093 	/* AHCI needs SError cleared; otherwise, it might lock up */
2094 	ahci_scr_read(&ap->link, SCR_ERROR, &serror);
2095 	ahci_scr_write(&ap->link, SCR_ERROR, serror);
2096 	host_ehi->serror |= serror;
2097 
2098 	/* some controllers set IRQ_IF_ERR on device errors, ignore it */
2099 	if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
2100 		irq_stat &= ~PORT_IRQ_IF_ERR;
2101 
2102 	if (irq_stat & PORT_IRQ_TF_ERR) {
2103 		/* If qc is active, charge it; otherwise, the active
2104 		 * link.  There's no active qc on NCQ errors.  It will
2105 		 * be determined by EH by reading log page 10h.
2106 		 */
2107 		if (active_qc)
2108 			active_qc->err_mask |= AC_ERR_DEV;
2109 		else
2110 			active_ehi->err_mask |= AC_ERR_DEV;
2111 
2112 		if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
2113 			host_ehi->serror &= ~SERR_INTERNAL;
2114 	}
2115 
2116 	if (irq_stat & PORT_IRQ_UNK_FIS) {
2117 		u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
2118 
2119 		active_ehi->err_mask |= AC_ERR_HSM;
2120 		active_ehi->action |= ATA_EH_RESET;
2121 		ata_ehi_push_desc(active_ehi,
2122 				  "unknown FIS %08x %08x %08x %08x" ,
2123 				  unk[0], unk[1], unk[2], unk[3]);
2124 	}
2125 
2126 	if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
2127 		active_ehi->err_mask |= AC_ERR_HSM;
2128 		active_ehi->action |= ATA_EH_RESET;
2129 		ata_ehi_push_desc(active_ehi, "incorrect PMP");
2130 	}
2131 
2132 	if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
2133 		host_ehi->err_mask |= AC_ERR_HOST_BUS;
2134 		host_ehi->action |= ATA_EH_RESET;
2135 		ata_ehi_push_desc(host_ehi, "host bus error");
2136 	}
2137 
2138 	if (irq_stat & PORT_IRQ_IF_ERR) {
2139 		host_ehi->err_mask |= AC_ERR_ATA_BUS;
2140 		host_ehi->action |= ATA_EH_RESET;
2141 		ata_ehi_push_desc(host_ehi, "interface fatal error");
2142 	}
2143 
2144 	if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
2145 		ata_ehi_hotplugged(host_ehi);
2146 		ata_ehi_push_desc(host_ehi, "%s",
2147 			irq_stat & PORT_IRQ_CONNECT ?
2148 			"connection status changed" : "PHY RDY changed");
2149 	}
2150 
2151 	/* okay, let's hand over to EH */
2152 
2153 	if (irq_stat & PORT_IRQ_FREEZE)
2154 		ata_port_freeze(ap);
2155 	else
2156 		ata_port_abort(ap);
2157 }
2158 
2159 static void ahci_port_intr(struct ata_port *ap)
2160 {
2161 	void __iomem *port_mmio = ahci_port_base(ap);
2162 	struct ata_eh_info *ehi = &ap->link.eh_info;
2163 	struct ahci_port_priv *pp = ap->private_data;
2164 	struct ahci_host_priv *hpriv = ap->host->private_data;
2165 	int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
2166 	u32 status, qc_active;
2167 	int rc;
2168 
2169 	status = readl(port_mmio + PORT_IRQ_STAT);
2170 	writel(status, port_mmio + PORT_IRQ_STAT);
2171 
2172 	/* ignore BAD_PMP while resetting */
2173 	if (unlikely(resetting))
2174 		status &= ~PORT_IRQ_BAD_PMP;
2175 
2176 	/* If we are getting PhyRdy, this is
2177  	 * just a power state change, we should
2178  	 * clear out this, plus the PhyRdy/Comm
2179  	 * Wake bits from Serror
2180  	 */
2181 	if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
2182 		(status & PORT_IRQ_PHYRDY)) {
2183 		status &= ~PORT_IRQ_PHYRDY;
2184 		ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
2185 	}
2186 
2187 	if (unlikely(status & PORT_IRQ_ERROR)) {
2188 		ahci_error_intr(ap, status);
2189 		return;
2190 	}
2191 
2192 	if (status & PORT_IRQ_SDB_FIS) {
2193 		/* If SNotification is available, leave notification
2194 		 * handling to sata_async_notification().  If not,
2195 		 * emulate it by snooping SDB FIS RX area.
2196 		 *
2197 		 * Snooping FIS RX area is probably cheaper than
2198 		 * poking SNotification but some constrollers which
2199 		 * implement SNotification, ICH9 for example, don't
2200 		 * store AN SDB FIS into receive area.
2201 		 */
2202 		if (hpriv->cap & HOST_CAP_SNTF)
2203 			sata_async_notification(ap);
2204 		else {
2205 			/* If the 'N' bit in word 0 of the FIS is set,
2206 			 * we just received asynchronous notification.
2207 			 * Tell libata about it.
2208 			 */
2209 			const __le32 *f = pp->rx_fis + RX_FIS_SDB;
2210 			u32 f0 = le32_to_cpu(f[0]);
2211 
2212 			if (f0 & (1 << 15))
2213 				sata_async_notification(ap);
2214 		}
2215 	}
2216 
2217 	/* pp->active_link is valid iff any command is in flight */
2218 	if (ap->qc_active && pp->active_link->sactive)
2219 		qc_active = readl(port_mmio + PORT_SCR_ACT);
2220 	else
2221 		qc_active = readl(port_mmio + PORT_CMD_ISSUE);
2222 
2223 	rc = ata_qc_complete_multiple(ap, qc_active);
2224 
2225 	/* while resetting, invalid completions are expected */
2226 	if (unlikely(rc < 0 && !resetting)) {
2227 		ehi->err_mask |= AC_ERR_HSM;
2228 		ehi->action |= ATA_EH_RESET;
2229 		ata_port_freeze(ap);
2230 	}
2231 }
2232 
2233 static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
2234 {
2235 	struct ata_host *host = dev_instance;
2236 	struct ahci_host_priv *hpriv;
2237 	unsigned int i, handled = 0;
2238 	void __iomem *mmio;
2239 	u32 irq_stat, irq_masked;
2240 
2241 	VPRINTK("ENTER\n");
2242 
2243 	hpriv = host->private_data;
2244 	mmio = host->iomap[AHCI_PCI_BAR];
2245 
2246 	/* sigh.  0xffffffff is a valid return from h/w */
2247 	irq_stat = readl(mmio + HOST_IRQ_STAT);
2248 	if (!irq_stat)
2249 		return IRQ_NONE;
2250 
2251 	irq_masked = irq_stat & hpriv->port_map;
2252 
2253 	spin_lock(&host->lock);
2254 
2255 	for (i = 0; i < host->n_ports; i++) {
2256 		struct ata_port *ap;
2257 
2258 		if (!(irq_masked & (1 << i)))
2259 			continue;
2260 
2261 		ap = host->ports[i];
2262 		if (ap) {
2263 			ahci_port_intr(ap);
2264 			VPRINTK("port %u\n", i);
2265 		} else {
2266 			VPRINTK("port %u (no irq)\n", i);
2267 			if (ata_ratelimit())
2268 				dev_printk(KERN_WARNING, host->dev,
2269 					"interrupt on disabled port %u\n", i);
2270 		}
2271 
2272 		handled = 1;
2273 	}
2274 
2275 	/* HOST_IRQ_STAT behaves as level triggered latch meaning that
2276 	 * it should be cleared after all the port events are cleared;
2277 	 * otherwise, it will raise a spurious interrupt after each
2278 	 * valid one.  Please read section 10.6.2 of ahci 1.1 for more
2279 	 * information.
2280 	 *
2281 	 * Also, use the unmasked value to clear interrupt as spurious
2282 	 * pending event on a dummy port might cause screaming IRQ.
2283 	 */
2284 	writel(irq_stat, mmio + HOST_IRQ_STAT);
2285 
2286 	spin_unlock(&host->lock);
2287 
2288 	VPRINTK("EXIT\n");
2289 
2290 	return IRQ_RETVAL(handled);
2291 }
2292 
2293 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
2294 {
2295 	struct ata_port *ap = qc->ap;
2296 	void __iomem *port_mmio = ahci_port_base(ap);
2297 	struct ahci_port_priv *pp = ap->private_data;
2298 
2299 	/* Keep track of the currently active link.  It will be used
2300 	 * in completion path to determine whether NCQ phase is in
2301 	 * progress.
2302 	 */
2303 	pp->active_link = qc->dev->link;
2304 
2305 	if (qc->tf.protocol == ATA_PROT_NCQ)
2306 		writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
2307 	writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
2308 
2309 	ahci_sw_activity(qc->dev->link);
2310 
2311 	return 0;
2312 }
2313 
2314 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
2315 {
2316 	struct ahci_port_priv *pp = qc->ap->private_data;
2317 	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
2318 
2319 	ata_tf_from_fis(d2h_fis, &qc->result_tf);
2320 	return true;
2321 }
2322 
2323 static void ahci_freeze(struct ata_port *ap)
2324 {
2325 	void __iomem *port_mmio = ahci_port_base(ap);
2326 
2327 	/* turn IRQ off */
2328 	writel(0, port_mmio + PORT_IRQ_MASK);
2329 }
2330 
2331 static void ahci_thaw(struct ata_port *ap)
2332 {
2333 	void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
2334 	void __iomem *port_mmio = ahci_port_base(ap);
2335 	u32 tmp;
2336 	struct ahci_port_priv *pp = ap->private_data;
2337 
2338 	/* clear IRQ */
2339 	tmp = readl(port_mmio + PORT_IRQ_STAT);
2340 	writel(tmp, port_mmio + PORT_IRQ_STAT);
2341 	writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
2342 
2343 	/* turn IRQ back on */
2344 	writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2345 }
2346 
2347 static void ahci_error_handler(struct ata_port *ap)
2348 {
2349 	if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
2350 		/* restart engine */
2351 		ahci_stop_engine(ap);
2352 		ahci_start_engine(ap);
2353 	}
2354 
2355 	sata_pmp_error_handler(ap);
2356 }
2357 
2358 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
2359 {
2360 	struct ata_port *ap = qc->ap;
2361 
2362 	/* make DMA engine forget about the failed command */
2363 	if (qc->flags & ATA_QCFLAG_FAILED)
2364 		ahci_kick_engine(ap);
2365 }
2366 
2367 static void ahci_pmp_attach(struct ata_port *ap)
2368 {
2369 	void __iomem *port_mmio = ahci_port_base(ap);
2370 	struct ahci_port_priv *pp = ap->private_data;
2371 	u32 cmd;
2372 
2373 	cmd = readl(port_mmio + PORT_CMD);
2374 	cmd |= PORT_CMD_PMP;
2375 	writel(cmd, port_mmio + PORT_CMD);
2376 
2377 	pp->intr_mask |= PORT_IRQ_BAD_PMP;
2378 	writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2379 }
2380 
2381 static void ahci_pmp_detach(struct ata_port *ap)
2382 {
2383 	void __iomem *port_mmio = ahci_port_base(ap);
2384 	struct ahci_port_priv *pp = ap->private_data;
2385 	u32 cmd;
2386 
2387 	cmd = readl(port_mmio + PORT_CMD);
2388 	cmd &= ~PORT_CMD_PMP;
2389 	writel(cmd, port_mmio + PORT_CMD);
2390 
2391 	pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
2392 	writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2393 }
2394 
2395 static int ahci_port_resume(struct ata_port *ap)
2396 {
2397 	ahci_power_up(ap);
2398 	ahci_start_port(ap);
2399 
2400 	if (sata_pmp_attached(ap))
2401 		ahci_pmp_attach(ap);
2402 	else
2403 		ahci_pmp_detach(ap);
2404 
2405 	return 0;
2406 }
2407 
2408 #ifdef CONFIG_PM
2409 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
2410 {
2411 	const char *emsg = NULL;
2412 	int rc;
2413 
2414 	rc = ahci_deinit_port(ap, &emsg);
2415 	if (rc == 0)
2416 		ahci_power_down(ap);
2417 	else {
2418 		ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
2419 		ahci_start_port(ap);
2420 	}
2421 
2422 	return rc;
2423 }
2424 
2425 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
2426 {
2427 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
2428 	struct ahci_host_priv *hpriv = host->private_data;
2429 	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
2430 	u32 ctl;
2431 
2432 	if (mesg.event & PM_EVENT_SUSPEND &&
2433 	    hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
2434 		dev_printk(KERN_ERR, &pdev->dev,
2435 			   "BIOS update required for suspend/resume\n");
2436 		return -EIO;
2437 	}
2438 
2439 	if (mesg.event & PM_EVENT_SLEEP) {
2440 		/* AHCI spec rev1.1 section 8.3.3:
2441 		 * Software must disable interrupts prior to requesting a
2442 		 * transition of the HBA to D3 state.
2443 		 */
2444 		ctl = readl(mmio + HOST_CTL);
2445 		ctl &= ~HOST_IRQ_EN;
2446 		writel(ctl, mmio + HOST_CTL);
2447 		readl(mmio + HOST_CTL); /* flush */
2448 	}
2449 
2450 	return ata_pci_device_suspend(pdev, mesg);
2451 }
2452 
2453 static int ahci_pci_device_resume(struct pci_dev *pdev)
2454 {
2455 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
2456 	int rc;
2457 
2458 	rc = ata_pci_device_do_resume(pdev);
2459 	if (rc)
2460 		return rc;
2461 
2462 	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2463 		rc = ahci_reset_controller(host);
2464 		if (rc)
2465 			return rc;
2466 
2467 		ahci_init_controller(host);
2468 	}
2469 
2470 	ata_host_resume(host);
2471 
2472 	return 0;
2473 }
2474 #endif
2475 
2476 static int ahci_port_start(struct ata_port *ap)
2477 {
2478 	struct device *dev = ap->host->dev;
2479 	struct ahci_port_priv *pp;
2480 	void *mem;
2481 	dma_addr_t mem_dma;
2482 
2483 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
2484 	if (!pp)
2485 		return -ENOMEM;
2486 
2487 	mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma,
2488 				  GFP_KERNEL);
2489 	if (!mem)
2490 		return -ENOMEM;
2491 	memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
2492 
2493 	/*
2494 	 * First item in chunk of DMA memory: 32-slot command table,
2495 	 * 32 bytes each in size
2496 	 */
2497 	pp->cmd_slot = mem;
2498 	pp->cmd_slot_dma = mem_dma;
2499 
2500 	mem += AHCI_CMD_SLOT_SZ;
2501 	mem_dma += AHCI_CMD_SLOT_SZ;
2502 
2503 	/*
2504 	 * Second item: Received-FIS area
2505 	 */
2506 	pp->rx_fis = mem;
2507 	pp->rx_fis_dma = mem_dma;
2508 
2509 	mem += AHCI_RX_FIS_SZ;
2510 	mem_dma += AHCI_RX_FIS_SZ;
2511 
2512 	/*
2513 	 * Third item: data area for storing a single command
2514 	 * and its scatter-gather table
2515 	 */
2516 	pp->cmd_tbl = mem;
2517 	pp->cmd_tbl_dma = mem_dma;
2518 
2519 	/*
2520 	 * Save off initial list of interrupts to be enabled.
2521 	 * This could be changed later
2522 	 */
2523 	pp->intr_mask = DEF_PORT_IRQ;
2524 
2525 	ap->private_data = pp;
2526 
2527 	/* engage engines, captain */
2528 	return ahci_port_resume(ap);
2529 }
2530 
2531 static void ahci_port_stop(struct ata_port *ap)
2532 {
2533 	const char *emsg = NULL;
2534 	int rc;
2535 
2536 	/* de-initialize port */
2537 	rc = ahci_deinit_port(ap, &emsg);
2538 	if (rc)
2539 		ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
2540 }
2541 
2542 static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
2543 {
2544 	int rc;
2545 
2546 	if (using_dac &&
2547 	    !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
2548 		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2549 		if (rc) {
2550 			rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2551 			if (rc) {
2552 				dev_printk(KERN_ERR, &pdev->dev,
2553 					   "64-bit DMA enable failed\n");
2554 				return rc;
2555 			}
2556 		}
2557 	} else {
2558 		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2559 		if (rc) {
2560 			dev_printk(KERN_ERR, &pdev->dev,
2561 				   "32-bit DMA enable failed\n");
2562 			return rc;
2563 		}
2564 		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2565 		if (rc) {
2566 			dev_printk(KERN_ERR, &pdev->dev,
2567 				   "32-bit consistent DMA enable failed\n");
2568 			return rc;
2569 		}
2570 	}
2571 	return 0;
2572 }
2573 
2574 static void ahci_print_info(struct ata_host *host)
2575 {
2576 	struct ahci_host_priv *hpriv = host->private_data;
2577 	struct pci_dev *pdev = to_pci_dev(host->dev);
2578 	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
2579 	u32 vers, cap, cap2, impl, speed;
2580 	const char *speed_s;
2581 	u16 cc;
2582 	const char *scc_s;
2583 
2584 	vers = readl(mmio + HOST_VERSION);
2585 	cap = hpriv->cap;
2586 	cap2 = hpriv->cap2;
2587 	impl = hpriv->port_map;
2588 
2589 	speed = (cap >> 20) & 0xf;
2590 	if (speed == 1)
2591 		speed_s = "1.5";
2592 	else if (speed == 2)
2593 		speed_s = "3";
2594 	else if (speed == 3)
2595 		speed_s = "6";
2596 	else
2597 		speed_s = "?";
2598 
2599 	pci_read_config_word(pdev, 0x0a, &cc);
2600 	if (cc == PCI_CLASS_STORAGE_IDE)
2601 		scc_s = "IDE";
2602 	else if (cc == PCI_CLASS_STORAGE_SATA)
2603 		scc_s = "SATA";
2604 	else if (cc == PCI_CLASS_STORAGE_RAID)
2605 		scc_s = "RAID";
2606 	else
2607 		scc_s = "unknown";
2608 
2609 	dev_printk(KERN_INFO, &pdev->dev,
2610 		"AHCI %02x%02x.%02x%02x "
2611 		"%u slots %u ports %s Gbps 0x%x impl %s mode\n"
2612 		,
2613 
2614 		(vers >> 24) & 0xff,
2615 		(vers >> 16) & 0xff,
2616 		(vers >> 8) & 0xff,
2617 		vers & 0xff,
2618 
2619 		((cap >> 8) & 0x1f) + 1,
2620 		(cap & 0x1f) + 1,
2621 		speed_s,
2622 		impl,
2623 		scc_s);
2624 
2625 	dev_printk(KERN_INFO, &pdev->dev,
2626 		"flags: "
2627 		"%s%s%s%s%s%s%s"
2628 		"%s%s%s%s%s%s%s"
2629 		"%s%s%s%s%s%s\n"
2630 		,
2631 
2632 		cap & HOST_CAP_64 ? "64bit " : "",
2633 		cap & HOST_CAP_NCQ ? "ncq " : "",
2634 		cap & HOST_CAP_SNTF ? "sntf " : "",
2635 		cap & HOST_CAP_MPS ? "ilck " : "",
2636 		cap & HOST_CAP_SSS ? "stag " : "",
2637 		cap & HOST_CAP_ALPM ? "pm " : "",
2638 		cap & HOST_CAP_LED ? "led " : "",
2639 		cap & HOST_CAP_CLO ? "clo " : "",
2640 		cap & HOST_CAP_ONLY ? "only " : "",
2641 		cap & HOST_CAP_PMP ? "pmp " : "",
2642 		cap & HOST_CAP_FBS ? "fbs " : "",
2643 		cap & HOST_CAP_PIO_MULTI ? "pio " : "",
2644 		cap & HOST_CAP_SSC ? "slum " : "",
2645 		cap & HOST_CAP_PART ? "part " : "",
2646 		cap & HOST_CAP_CCC ? "ccc " : "",
2647 		cap & HOST_CAP_EMS ? "ems " : "",
2648 		cap & HOST_CAP_SXS ? "sxs " : "",
2649 		cap2 & HOST_CAP2_APST ? "apst " : "",
2650 		cap2 & HOST_CAP2_NVMHCI ? "nvmp " : "",
2651 		cap2 & HOST_CAP2_BOH ? "boh " : ""
2652 		);
2653 }
2654 
2655 /* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is
2656  * hardwired to on-board SIMG 4726.  The chipset is ICH8 and doesn't
2657  * support PMP and the 4726 either directly exports the device
2658  * attached to the first downstream port or acts as a hardware storage
2659  * controller and emulate a single ATA device (can be RAID 0/1 or some
2660  * other configuration).
2661  *
2662  * When there's no device attached to the first downstream port of the
2663  * 4726, "Config Disk" appears, which is a pseudo ATA device to
2664  * configure the 4726.  However, ATA emulation of the device is very
2665  * lame.  It doesn't send signature D2H Reg FIS after the initial
2666  * hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues.
2667  *
2668  * The following function works around the problem by always using
2669  * hardreset on the port and not depending on receiving signature FIS
2670  * afterward.  If signature FIS isn't received soon, ATA class is
2671  * assumed without follow-up softreset.
2672  */
2673 static void ahci_p5wdh_workaround(struct ata_host *host)
2674 {
2675 	static struct dmi_system_id sysids[] = {
2676 		{
2677 			.ident = "P5W DH Deluxe",
2678 			.matches = {
2679 				DMI_MATCH(DMI_SYS_VENDOR,
2680 					  "ASUSTEK COMPUTER INC"),
2681 				DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
2682 			},
2683 		},
2684 		{ }
2685 	};
2686 	struct pci_dev *pdev = to_pci_dev(host->dev);
2687 
2688 	if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) &&
2689 	    dmi_check_system(sysids)) {
2690 		struct ata_port *ap = host->ports[1];
2691 
2692 		dev_printk(KERN_INFO, &pdev->dev, "enabling ASUS P5W DH "
2693 			   "Deluxe on-board SIMG4726 workaround\n");
2694 
2695 		ap->ops = &ahci_p5wdh_ops;
2696 		ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA;
2697 	}
2698 }
2699 
2700 /* only some SB600 ahci controllers can do 64bit DMA */
2701 static bool ahci_sb600_enable_64bit(struct pci_dev *pdev)
2702 {
2703 	static const struct dmi_system_id sysids[] = {
2704 		/*
2705 		 * The oldest version known to be broken is 0901 and
2706 		 * working is 1501 which was released on 2007-10-26.
2707 		 * Enable 64bit DMA on 1501 and anything newer.
2708 		 *
2709 		 * Please read bko#9412 for more info.
2710 		 */
2711 		{
2712 			.ident = "ASUS M2A-VM",
2713 			.matches = {
2714 				DMI_MATCH(DMI_BOARD_VENDOR,
2715 					  "ASUSTeK Computer INC."),
2716 				DMI_MATCH(DMI_BOARD_NAME, "M2A-VM"),
2717 			},
2718 			.driver_data = "20071026",	/* yyyymmdd */
2719 		},
2720 		{ }
2721 	};
2722 	const struct dmi_system_id *match;
2723 	int year, month, date;
2724 	char buf[9];
2725 
2726 	match = dmi_first_match(sysids);
2727 	if (pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x12, 0) ||
2728 	    !match)
2729 		return false;
2730 
2731 	dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
2732 	snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
2733 
2734 	if (strcmp(buf, match->driver_data) >= 0) {
2735 		dev_printk(KERN_WARNING, &pdev->dev, "%s: enabling 64bit DMA\n",
2736 			   match->ident);
2737 		return true;
2738 	} else {
2739 		dev_printk(KERN_WARNING, &pdev->dev, "%s: BIOS too old, "
2740 			   "forcing 32bit DMA, update BIOS\n", match->ident);
2741 		return false;
2742 	}
2743 }
2744 
2745 static bool ahci_broken_system_poweroff(struct pci_dev *pdev)
2746 {
2747 	static const struct dmi_system_id broken_systems[] = {
2748 		{
2749 			.ident = "HP Compaq nx6310",
2750 			.matches = {
2751 				DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2752 				DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6310"),
2753 			},
2754 			/* PCI slot number of the controller */
2755 			.driver_data = (void *)0x1FUL,
2756 		},
2757 		{
2758 			.ident = "HP Compaq 6720s",
2759 			.matches = {
2760 				DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2761 				DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6720s"),
2762 			},
2763 			/* PCI slot number of the controller */
2764 			.driver_data = (void *)0x1FUL,
2765 		},
2766 
2767 		{ }	/* terminate list */
2768 	};
2769 	const struct dmi_system_id *dmi = dmi_first_match(broken_systems);
2770 
2771 	if (dmi) {
2772 		unsigned long slot = (unsigned long)dmi->driver_data;
2773 		/* apply the quirk only to on-board controllers */
2774 		return slot == PCI_SLOT(pdev->devfn);
2775 	}
2776 
2777 	return false;
2778 }
2779 
2780 static bool ahci_broken_suspend(struct pci_dev *pdev)
2781 {
2782 	static const struct dmi_system_id sysids[] = {
2783 		/*
2784 		 * On HP dv[4-6] and HDX18 with earlier BIOSen, link
2785 		 * to the harddisk doesn't become online after
2786 		 * resuming from STR.  Warn and fail suspend.
2787 		 */
2788 		{
2789 			.ident = "dv4",
2790 			.matches = {
2791 				DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2792 				DMI_MATCH(DMI_PRODUCT_NAME,
2793 					  "HP Pavilion dv4 Notebook PC"),
2794 			},
2795 			.driver_data = "F.30", /* cutoff BIOS version */
2796 		},
2797 		{
2798 			.ident = "dv5",
2799 			.matches = {
2800 				DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2801 				DMI_MATCH(DMI_PRODUCT_NAME,
2802 					  "HP Pavilion dv5 Notebook PC"),
2803 			},
2804 			.driver_data = "F.16", /* cutoff BIOS version */
2805 		},
2806 		{
2807 			.ident = "dv6",
2808 			.matches = {
2809 				DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2810 				DMI_MATCH(DMI_PRODUCT_NAME,
2811 					  "HP Pavilion dv6 Notebook PC"),
2812 			},
2813 			.driver_data = "F.21",	/* cutoff BIOS version */
2814 		},
2815 		{
2816 			.ident = "HDX18",
2817 			.matches = {
2818 				DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2819 				DMI_MATCH(DMI_PRODUCT_NAME,
2820 					  "HP HDX18 Notebook PC"),
2821 			},
2822 			.driver_data = "F.23",	/* cutoff BIOS version */
2823 		},
2824 		{ }	/* terminate list */
2825 	};
2826 	const struct dmi_system_id *dmi = dmi_first_match(sysids);
2827 	const char *ver;
2828 
2829 	if (!dmi || pdev->bus->number || pdev->devfn != PCI_DEVFN(0x1f, 2))
2830 		return false;
2831 
2832 	ver = dmi_get_system_info(DMI_BIOS_VERSION);
2833 
2834 	return !ver || strcmp(ver, dmi->driver_data) < 0;
2835 }
2836 
2837 static bool ahci_broken_online(struct pci_dev *pdev)
2838 {
2839 #define ENCODE_BUSDEVFN(bus, slot, func)			\
2840 	(void *)(unsigned long)(((bus) << 8) | PCI_DEVFN((slot), (func)))
2841 	static const struct dmi_system_id sysids[] = {
2842 		/*
2843 		 * There are several gigabyte boards which use
2844 		 * SIMG5723s configured as hardware RAID.  Certain
2845 		 * 5723 firmware revisions shipped there keep the link
2846 		 * online but fail to answer properly to SRST or
2847 		 * IDENTIFY when no device is attached downstream
2848 		 * causing libata to retry quite a few times leading
2849 		 * to excessive detection delay.
2850 		 *
2851 		 * As these firmwares respond to the second reset try
2852 		 * with invalid device signature, considering unknown
2853 		 * sig as offline works around the problem acceptably.
2854 		 */
2855 		{
2856 			.ident = "EP45-DQ6",
2857 			.matches = {
2858 				DMI_MATCH(DMI_BOARD_VENDOR,
2859 					  "Gigabyte Technology Co., Ltd."),
2860 				DMI_MATCH(DMI_BOARD_NAME, "EP45-DQ6"),
2861 			},
2862 			.driver_data = ENCODE_BUSDEVFN(0x0a, 0x00, 0),
2863 		},
2864 		{
2865 			.ident = "EP45-DS5",
2866 			.matches = {
2867 				DMI_MATCH(DMI_BOARD_VENDOR,
2868 					  "Gigabyte Technology Co., Ltd."),
2869 				DMI_MATCH(DMI_BOARD_NAME, "EP45-DS5"),
2870 			},
2871 			.driver_data = ENCODE_BUSDEVFN(0x03, 0x00, 0),
2872 		},
2873 		{ }	/* terminate list */
2874 	};
2875 #undef ENCODE_BUSDEVFN
2876 	const struct dmi_system_id *dmi = dmi_first_match(sysids);
2877 	unsigned int val;
2878 
2879 	if (!dmi)
2880 		return false;
2881 
2882 	val = (unsigned long)dmi->driver_data;
2883 
2884 	return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff);
2885 }
2886 
2887 #ifdef CONFIG_ATA_ACPI
2888 static void ahci_gtf_filter_workaround(struct ata_host *host)
2889 {
2890 	static const struct dmi_system_id sysids[] = {
2891 		/*
2892 		 * Aspire 3810T issues a bunch of SATA enable commands
2893 		 * via _GTF including an invalid one and one which is
2894 		 * rejected by the device.  Among the successful ones
2895 		 * is FPDMA non-zero offset enable which when enabled
2896 		 * only on the drive side leads to NCQ command
2897 		 * failures.  Filter it out.
2898 		 */
2899 		{
2900 			.ident = "Aspire 3810T",
2901 			.matches = {
2902 				DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
2903 				DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3810T"),
2904 			},
2905 			.driver_data = (void *)ATA_ACPI_FILTER_FPDMA_OFFSET,
2906 		},
2907 		{ }
2908 	};
2909 	const struct dmi_system_id *dmi = dmi_first_match(sysids);
2910 	unsigned int filter;
2911 	int i;
2912 
2913 	if (!dmi)
2914 		return;
2915 
2916 	filter = (unsigned long)dmi->driver_data;
2917 	dev_printk(KERN_INFO, host->dev,
2918 		   "applying extra ACPI _GTF filter 0x%x for %s\n",
2919 		   filter, dmi->ident);
2920 
2921 	for (i = 0; i < host->n_ports; i++) {
2922 		struct ata_port *ap = host->ports[i];
2923 		struct ata_link *link;
2924 		struct ata_device *dev;
2925 
2926 		ata_for_each_link(link, ap, EDGE)
2927 			ata_for_each_dev(dev, link, ALL)
2928 				dev->gtf_filter |= filter;
2929 	}
2930 }
2931 #else
2932 static inline void ahci_gtf_filter_workaround(struct ata_host *host)
2933 {}
2934 #endif
2935 
2936 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2937 {
2938 	static int printed_version;
2939 	unsigned int board_id = ent->driver_data;
2940 	struct ata_port_info pi = ahci_port_info[board_id];
2941 	const struct ata_port_info *ppi[] = { &pi, NULL };
2942 	struct device *dev = &pdev->dev;
2943 	struct ahci_host_priv *hpriv;
2944 	struct ata_host *host;
2945 	int n_ports, i, rc;
2946 
2947 	VPRINTK("ENTER\n");
2948 
2949 	WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
2950 
2951 	if (!printed_version++)
2952 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
2953 
2954 	/* The AHCI driver can only drive the SATA ports, the PATA driver
2955 	   can drive them all so if both drivers are selected make sure
2956 	   AHCI stays out of the way */
2957 	if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable)
2958 		return -ENODEV;
2959 
2960 	/* acquire resources */
2961 	rc = pcim_enable_device(pdev);
2962 	if (rc)
2963 		return rc;
2964 
2965 	/* AHCI controllers often implement SFF compatible interface.
2966 	 * Grab all PCI BARs just in case.
2967 	 */
2968 	rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
2969 	if (rc == -EBUSY)
2970 		pcim_pin_device(pdev);
2971 	if (rc)
2972 		return rc;
2973 
2974 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
2975 	    (pdev->device == 0x2652 || pdev->device == 0x2653)) {
2976 		u8 map;
2977 
2978 		/* ICH6s share the same PCI ID for both piix and ahci
2979 		 * modes.  Enabling ahci mode while MAP indicates
2980 		 * combined mode is a bad idea.  Yield to ata_piix.
2981 		 */
2982 		pci_read_config_byte(pdev, ICH_MAP, &map);
2983 		if (map & 0x3) {
2984 			dev_printk(KERN_INFO, &pdev->dev, "controller is in "
2985 				   "combined mode, can't enable AHCI mode\n");
2986 			return -ENODEV;
2987 		}
2988 	}
2989 
2990 	hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
2991 	if (!hpriv)
2992 		return -ENOMEM;
2993 	hpriv->flags |= (unsigned long)pi.private_data;
2994 
2995 	/* MCP65 revision A1 and A2 can't do MSI */
2996 	if (board_id == board_ahci_mcp65 &&
2997 	    (pdev->revision == 0xa1 || pdev->revision == 0xa2))
2998 		hpriv->flags |= AHCI_HFLAG_NO_MSI;
2999 
3000 	/* SB800 does NOT need the workaround to ignore SERR_INTERNAL */
3001 	if (board_id == board_ahci_sb700 && pdev->revision >= 0x40)
3002 		hpriv->flags &= ~AHCI_HFLAG_IGN_SERR_INTERNAL;
3003 
3004 	/* only some SB600s can do 64bit DMA */
3005 	if (ahci_sb600_enable_64bit(pdev))
3006 		hpriv->flags &= ~AHCI_HFLAG_32BIT_ONLY;
3007 
3008 	if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
3009 		pci_intx(pdev, 1);
3010 
3011 	/* save initial config */
3012 	ahci_save_initial_config(pdev, hpriv);
3013 
3014 	/* prepare host */
3015 	if (hpriv->cap & HOST_CAP_NCQ)
3016 		pi.flags |= ATA_FLAG_NCQ | ATA_FLAG_FPDMA_AA;
3017 
3018 	if (hpriv->cap & HOST_CAP_PMP)
3019 		pi.flags |= ATA_FLAG_PMP;
3020 
3021 	if (ahci_em_messages && (hpriv->cap & HOST_CAP_EMS)) {
3022 		u8 messages;
3023 		void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
3024 		u32 em_loc = readl(mmio + HOST_EM_LOC);
3025 		u32 em_ctl = readl(mmio + HOST_EM_CTL);
3026 
3027 		messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16;
3028 
3029 		/* we only support LED message type right now */
3030 		if ((messages & 0x01) && (ahci_em_messages == 1)) {
3031 			/* store em_loc */
3032 			hpriv->em_loc = ((em_loc >> 16) * 4);
3033 			pi.flags |= ATA_FLAG_EM;
3034 			if (!(em_ctl & EM_CTL_ALHD))
3035 				pi.flags |= ATA_FLAG_SW_ACTIVITY;
3036 		}
3037 	}
3038 
3039 	if (ahci_broken_system_poweroff(pdev)) {
3040 		pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN;
3041 		dev_info(&pdev->dev,
3042 			"quirky BIOS, skipping spindown on poweroff\n");
3043 	}
3044 
3045 	if (ahci_broken_suspend(pdev)) {
3046 		hpriv->flags |= AHCI_HFLAG_NO_SUSPEND;
3047 		dev_printk(KERN_WARNING, &pdev->dev,
3048 			   "BIOS update required for suspend/resume\n");
3049 	}
3050 
3051 	if (ahci_broken_online(pdev)) {
3052 		hpriv->flags |= AHCI_HFLAG_SRST_TOUT_IS_OFFLINE;
3053 		dev_info(&pdev->dev,
3054 			 "online status unreliable, applying workaround\n");
3055 	}
3056 
3057 	/* CAP.NP sometimes indicate the index of the last enabled
3058 	 * port, at other times, that of the last possible port, so
3059 	 * determining the maximum port number requires looking at
3060 	 * both CAP.NP and port_map.
3061 	 */
3062 	n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
3063 
3064 	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3065 	if (!host)
3066 		return -ENOMEM;
3067 	host->iomap = pcim_iomap_table(pdev);
3068 	host->private_data = hpriv;
3069 
3070 	if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
3071 		host->flags |= ATA_HOST_PARALLEL_SCAN;
3072 	else
3073 		printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
3074 
3075 	if (pi.flags & ATA_FLAG_EM)
3076 		ahci_reset_em(host);
3077 
3078 	for (i = 0; i < host->n_ports; i++) {
3079 		struct ata_port *ap = host->ports[i];
3080 
3081 		ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
3082 		ata_port_pbar_desc(ap, AHCI_PCI_BAR,
3083 				   0x100 + ap->port_no * 0x80, "port");
3084 
3085 		/* set initial link pm policy */
3086 		ap->pm_policy = NOT_AVAILABLE;
3087 
3088 		/* set enclosure management message type */
3089 		if (ap->flags & ATA_FLAG_EM)
3090 			ap->em_message_type = ahci_em_messages;
3091 
3092 
3093 		/* disabled/not-implemented port */
3094 		if (!(hpriv->port_map & (1 << i)))
3095 			ap->ops = &ata_dummy_port_ops;
3096 	}
3097 
3098 	/* apply workaround for ASUS P5W DH Deluxe mainboard */
3099 	ahci_p5wdh_workaround(host);
3100 
3101 	/* apply gtf filter quirk */
3102 	ahci_gtf_filter_workaround(host);
3103 
3104 	/* initialize adapter */
3105 	rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
3106 	if (rc)
3107 		return rc;
3108 
3109 	rc = ahci_reset_controller(host);
3110 	if (rc)
3111 		return rc;
3112 
3113 	ahci_init_controller(host);
3114 	ahci_print_info(host);
3115 
3116 	pci_set_master(pdev);
3117 	return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
3118 				 &ahci_sht);
3119 }
3120 
3121 static int __init ahci_init(void)
3122 {
3123 	return pci_register_driver(&ahci_pci_driver);
3124 }
3125 
3126 static void __exit ahci_exit(void)
3127 {
3128 	pci_unregister_driver(&ahci_pci_driver);
3129 }
3130 
3131 
3132 MODULE_AUTHOR("Jeff Garzik");
3133 MODULE_DESCRIPTION("AHCI SATA low-level driver");
3134 MODULE_LICENSE("GPL");
3135 MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
3136 MODULE_VERSION(DRV_VERSION);
3137 
3138 module_init(ahci_init);
3139 module_exit(ahci_exit);
3140