xref: /openbmc/linux/drivers/ata/ahci.c (revision b627b4ed)
1 /*
2  *  ahci.c - AHCI SATA support
3  *
4  *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
5  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6  *		    on emails.
7  *
8  *  Copyright 2004-2005 Red Hat, Inc.
9  *
10  *
11  *  This program is free software; you can redistribute it and/or modify
12  *  it under the terms of the GNU General Public License as published by
13  *  the Free Software Foundation; either version 2, or (at your option)
14  *  any later version.
15  *
16  *  This program is distributed in the hope that it will be useful,
17  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
18  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  *  GNU General Public License for more details.
20  *
21  *  You should have received a copy of the GNU General Public License
22  *  along with this program; see the file COPYING.  If not, write to
23  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24  *
25  *
26  * libata documentation is available via 'make {ps|pdf}docs',
27  * as Documentation/DocBook/libata.*
28  *
29  * AHCI hardware documentation:
30  * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
31  * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
32  *
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/blkdev.h>
40 #include <linux/delay.h>
41 #include <linux/interrupt.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/device.h>
44 #include <linux/dmi.h>
45 #include <scsi/scsi_host.h>
46 #include <scsi/scsi_cmnd.h>
47 #include <linux/libata.h>
48 
49 #define DRV_NAME	"ahci"
50 #define DRV_VERSION	"3.0"
51 
52 /* Enclosure Management Control */
53 #define EM_CTRL_MSG_TYPE              0x000f0000
54 
55 /* Enclosure Management LED Message Type */
56 #define EM_MSG_LED_HBA_PORT           0x0000000f
57 #define EM_MSG_LED_PMP_SLOT           0x0000ff00
58 #define EM_MSG_LED_VALUE              0xffff0000
59 #define EM_MSG_LED_VALUE_ACTIVITY     0x00070000
60 #define EM_MSG_LED_VALUE_OFF          0xfff80000
61 #define EM_MSG_LED_VALUE_ON           0x00010000
62 
63 static int ahci_skip_host_reset;
64 static int ahci_ignore_sss;
65 
66 module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
67 MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
68 
69 module_param_named(ignore_sss, ahci_ignore_sss, int, 0444);
70 MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)");
71 
72 static int ahci_enable_alpm(struct ata_port *ap,
73 		enum link_pm policy);
74 static void ahci_disable_alpm(struct ata_port *ap);
75 static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
76 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
77 			      size_t size);
78 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
79 					ssize_t size);
80 #define MAX_SLOTS 8
81 #define MAX_RETRY 15
82 
83 enum {
84 	AHCI_PCI_BAR		= 5,
85 	AHCI_MAX_PORTS		= 32,
86 	AHCI_MAX_SG		= 168, /* hardware max is 64K */
87 	AHCI_DMA_BOUNDARY	= 0xffffffff,
88 	AHCI_MAX_CMDS		= 32,
89 	AHCI_CMD_SZ		= 32,
90 	AHCI_CMD_SLOT_SZ	= AHCI_MAX_CMDS * AHCI_CMD_SZ,
91 	AHCI_RX_FIS_SZ		= 256,
92 	AHCI_CMD_TBL_CDB	= 0x40,
93 	AHCI_CMD_TBL_HDR_SZ	= 0x80,
94 	AHCI_CMD_TBL_SZ		= AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
95 	AHCI_CMD_TBL_AR_SZ	= AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
96 	AHCI_PORT_PRIV_DMA_SZ	= AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
97 				  AHCI_RX_FIS_SZ,
98 	AHCI_IRQ_ON_SG		= (1 << 31),
99 	AHCI_CMD_ATAPI		= (1 << 5),
100 	AHCI_CMD_WRITE		= (1 << 6),
101 	AHCI_CMD_PREFETCH	= (1 << 7),
102 	AHCI_CMD_RESET		= (1 << 8),
103 	AHCI_CMD_CLR_BUSY	= (1 << 10),
104 
105 	RX_FIS_D2H_REG		= 0x40,	/* offset of D2H Register FIS data */
106 	RX_FIS_SDB		= 0x58, /* offset of SDB FIS data */
107 	RX_FIS_UNK		= 0x60, /* offset of Unknown FIS data */
108 
109 	board_ahci		= 0,
110 	board_ahci_vt8251	= 1,
111 	board_ahci_ign_iferr	= 2,
112 	board_ahci_sb600	= 3,
113 	board_ahci_mv		= 4,
114 	board_ahci_sb700	= 5, /* for SB700 and SB800 */
115 	board_ahci_mcp65	= 6,
116 	board_ahci_nopmp	= 7,
117 	board_ahci_yesncq	= 8,
118 
119 	/* global controller registers */
120 	HOST_CAP		= 0x00, /* host capabilities */
121 	HOST_CTL		= 0x04, /* global host control */
122 	HOST_IRQ_STAT		= 0x08, /* interrupt status */
123 	HOST_PORTS_IMPL		= 0x0c, /* bitmap of implemented ports */
124 	HOST_VERSION		= 0x10, /* AHCI spec. version compliancy */
125 	HOST_EM_LOC		= 0x1c, /* Enclosure Management location */
126 	HOST_EM_CTL		= 0x20, /* Enclosure Management Control */
127 
128 	/* HOST_CTL bits */
129 	HOST_RESET		= (1 << 0),  /* reset controller; self-clear */
130 	HOST_IRQ_EN		= (1 << 1),  /* global IRQ enable */
131 	HOST_AHCI_EN		= (1 << 31), /* AHCI enabled */
132 
133 	/* HOST_CAP bits */
134 	HOST_CAP_EMS		= (1 << 6),  /* Enclosure Management support */
135 	HOST_CAP_SSC		= (1 << 14), /* Slumber capable */
136 	HOST_CAP_PMP		= (1 << 17), /* Port Multiplier support */
137 	HOST_CAP_CLO		= (1 << 24), /* Command List Override support */
138 	HOST_CAP_ALPM		= (1 << 26), /* Aggressive Link PM support */
139 	HOST_CAP_SSS		= (1 << 27), /* Staggered Spin-up */
140 	HOST_CAP_SNTF		= (1 << 29), /* SNotification register */
141 	HOST_CAP_NCQ		= (1 << 30), /* Native Command Queueing */
142 	HOST_CAP_64		= (1 << 31), /* PCI DAC (64-bit DMA) support */
143 
144 	/* registers for each SATA port */
145 	PORT_LST_ADDR		= 0x00, /* command list DMA addr */
146 	PORT_LST_ADDR_HI	= 0x04, /* command list DMA addr hi */
147 	PORT_FIS_ADDR		= 0x08, /* FIS rx buf addr */
148 	PORT_FIS_ADDR_HI	= 0x0c, /* FIS rx buf addr hi */
149 	PORT_IRQ_STAT		= 0x10, /* interrupt status */
150 	PORT_IRQ_MASK		= 0x14, /* interrupt enable/disable mask */
151 	PORT_CMD		= 0x18, /* port command */
152 	PORT_TFDATA		= 0x20,	/* taskfile data */
153 	PORT_SIG		= 0x24,	/* device TF signature */
154 	PORT_CMD_ISSUE		= 0x38, /* command issue */
155 	PORT_SCR_STAT		= 0x28, /* SATA phy register: SStatus */
156 	PORT_SCR_CTL		= 0x2c, /* SATA phy register: SControl */
157 	PORT_SCR_ERR		= 0x30, /* SATA phy register: SError */
158 	PORT_SCR_ACT		= 0x34, /* SATA phy register: SActive */
159 	PORT_SCR_NTF		= 0x3c, /* SATA phy register: SNotification */
160 
161 	/* PORT_IRQ_{STAT,MASK} bits */
162 	PORT_IRQ_COLD_PRES	= (1 << 31), /* cold presence detect */
163 	PORT_IRQ_TF_ERR		= (1 << 30), /* task file error */
164 	PORT_IRQ_HBUS_ERR	= (1 << 29), /* host bus fatal error */
165 	PORT_IRQ_HBUS_DATA_ERR	= (1 << 28), /* host bus data error */
166 	PORT_IRQ_IF_ERR		= (1 << 27), /* interface fatal error */
167 	PORT_IRQ_IF_NONFATAL	= (1 << 26), /* interface non-fatal error */
168 	PORT_IRQ_OVERFLOW	= (1 << 24), /* xfer exhausted available S/G */
169 	PORT_IRQ_BAD_PMP	= (1 << 23), /* incorrect port multiplier */
170 
171 	PORT_IRQ_PHYRDY		= (1 << 22), /* PhyRdy changed */
172 	PORT_IRQ_DEV_ILCK	= (1 << 7), /* device interlock */
173 	PORT_IRQ_CONNECT	= (1 << 6), /* port connect change status */
174 	PORT_IRQ_SG_DONE	= (1 << 5), /* descriptor processed */
175 	PORT_IRQ_UNK_FIS	= (1 << 4), /* unknown FIS rx'd */
176 	PORT_IRQ_SDB_FIS	= (1 << 3), /* Set Device Bits FIS rx'd */
177 	PORT_IRQ_DMAS_FIS	= (1 << 2), /* DMA Setup FIS rx'd */
178 	PORT_IRQ_PIOS_FIS	= (1 << 1), /* PIO Setup FIS rx'd */
179 	PORT_IRQ_D2H_REG_FIS	= (1 << 0), /* D2H Register FIS rx'd */
180 
181 	PORT_IRQ_FREEZE		= PORT_IRQ_HBUS_ERR |
182 				  PORT_IRQ_IF_ERR |
183 				  PORT_IRQ_CONNECT |
184 				  PORT_IRQ_PHYRDY |
185 				  PORT_IRQ_UNK_FIS |
186 				  PORT_IRQ_BAD_PMP,
187 	PORT_IRQ_ERROR		= PORT_IRQ_FREEZE |
188 				  PORT_IRQ_TF_ERR |
189 				  PORT_IRQ_HBUS_DATA_ERR,
190 	DEF_PORT_IRQ		= PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
191 				  PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
192 				  PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
193 
194 	/* PORT_CMD bits */
195 	PORT_CMD_ASP		= (1 << 27), /* Aggressive Slumber/Partial */
196 	PORT_CMD_ALPE		= (1 << 26), /* Aggressive Link PM enable */
197 	PORT_CMD_ATAPI		= (1 << 24), /* Device is ATAPI */
198 	PORT_CMD_PMP		= (1 << 17), /* PMP attached */
199 	PORT_CMD_LIST_ON	= (1 << 15), /* cmd list DMA engine running */
200 	PORT_CMD_FIS_ON		= (1 << 14), /* FIS DMA engine running */
201 	PORT_CMD_FIS_RX		= (1 << 4), /* Enable FIS receive DMA engine */
202 	PORT_CMD_CLO		= (1 << 3), /* Command list override */
203 	PORT_CMD_POWER_ON	= (1 << 2), /* Power up device */
204 	PORT_CMD_SPIN_UP	= (1 << 1), /* Spin up device */
205 	PORT_CMD_START		= (1 << 0), /* Enable port DMA engine */
206 
207 	PORT_CMD_ICC_MASK	= (0xf << 28), /* i/f ICC state mask */
208 	PORT_CMD_ICC_ACTIVE	= (0x1 << 28), /* Put i/f in active state */
209 	PORT_CMD_ICC_PARTIAL	= (0x2 << 28), /* Put i/f in partial state */
210 	PORT_CMD_ICC_SLUMBER	= (0x6 << 28), /* Put i/f in slumber state */
211 
212 	/* hpriv->flags bits */
213 	AHCI_HFLAG_NO_NCQ		= (1 << 0),
214 	AHCI_HFLAG_IGN_IRQ_IF_ERR	= (1 << 1), /* ignore IRQ_IF_ERR */
215 	AHCI_HFLAG_IGN_SERR_INTERNAL	= (1 << 2), /* ignore SERR_INTERNAL */
216 	AHCI_HFLAG_32BIT_ONLY		= (1 << 3), /* force 32bit */
217 	AHCI_HFLAG_MV_PATA		= (1 << 4), /* PATA port */
218 	AHCI_HFLAG_NO_MSI		= (1 << 5), /* no PCI MSI */
219 	AHCI_HFLAG_NO_PMP		= (1 << 6), /* no PMP */
220 	AHCI_HFLAG_NO_HOTPLUG		= (1 << 7), /* ignore PxSERR.DIAG.N */
221 	AHCI_HFLAG_SECT255		= (1 << 8), /* max 255 sectors */
222 	AHCI_HFLAG_YES_NCQ		= (1 << 9), /* force NCQ cap on */
223 
224 	/* ap->flags bits */
225 
226 	AHCI_FLAG_COMMON		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
227 					  ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
228 					  ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
229 					  ATA_FLAG_IPM,
230 
231 	ICH_MAP				= 0x90, /* ICH MAP register */
232 
233 	/* em_ctl bits */
234 	EM_CTL_RST			= (1 << 9), /* Reset */
235 	EM_CTL_TM			= (1 << 8), /* Transmit Message */
236 	EM_CTL_ALHD			= (1 << 26), /* Activity LED */
237 };
238 
239 struct ahci_cmd_hdr {
240 	__le32			opts;
241 	__le32			status;
242 	__le32			tbl_addr;
243 	__le32			tbl_addr_hi;
244 	__le32			reserved[4];
245 };
246 
247 struct ahci_sg {
248 	__le32			addr;
249 	__le32			addr_hi;
250 	__le32			reserved;
251 	__le32			flags_size;
252 };
253 
254 struct ahci_em_priv {
255 	enum sw_activity blink_policy;
256 	struct timer_list timer;
257 	unsigned long saved_activity;
258 	unsigned long activity;
259 	unsigned long led_state;
260 };
261 
262 struct ahci_host_priv {
263 	unsigned int		flags;		/* AHCI_HFLAG_* */
264 	u32			cap;		/* cap to use */
265 	u32			port_map;	/* port map to use */
266 	u32			saved_cap;	/* saved initial cap */
267 	u32			saved_port_map;	/* saved initial port_map */
268 	u32 			em_loc; /* enclosure management location */
269 };
270 
271 struct ahci_port_priv {
272 	struct ata_link		*active_link;
273 	struct ahci_cmd_hdr	*cmd_slot;
274 	dma_addr_t		cmd_slot_dma;
275 	void			*cmd_tbl;
276 	dma_addr_t		cmd_tbl_dma;
277 	void			*rx_fis;
278 	dma_addr_t		rx_fis_dma;
279 	/* for NCQ spurious interrupt analysis */
280 	unsigned int		ncq_saw_d2h:1;
281 	unsigned int		ncq_saw_dmas:1;
282 	unsigned int		ncq_saw_sdb:1;
283 	u32 			intr_mask;	/* interrupts to enable */
284 	struct ahci_em_priv	em_priv[MAX_SLOTS];/* enclosure management info
285 					 	 * per PM slot */
286 };
287 
288 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
289 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
290 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
291 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
292 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
293 static int ahci_port_start(struct ata_port *ap);
294 static void ahci_port_stop(struct ata_port *ap);
295 static void ahci_qc_prep(struct ata_queued_cmd *qc);
296 static void ahci_freeze(struct ata_port *ap);
297 static void ahci_thaw(struct ata_port *ap);
298 static void ahci_pmp_attach(struct ata_port *ap);
299 static void ahci_pmp_detach(struct ata_port *ap);
300 static int ahci_softreset(struct ata_link *link, unsigned int *class,
301 			  unsigned long deadline);
302 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
303 			  unsigned long deadline);
304 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
305 			  unsigned long deadline);
306 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
307 				 unsigned long deadline);
308 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
309 				unsigned long deadline);
310 static void ahci_postreset(struct ata_link *link, unsigned int *class);
311 static void ahci_error_handler(struct ata_port *ap);
312 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
313 static int ahci_port_resume(struct ata_port *ap);
314 static void ahci_dev_config(struct ata_device *dev);
315 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl);
316 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
317 			       u32 opts);
318 #ifdef CONFIG_PM
319 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
320 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
321 static int ahci_pci_device_resume(struct pci_dev *pdev);
322 #endif
323 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
324 static ssize_t ahci_activity_store(struct ata_device *dev,
325 				   enum sw_activity val);
326 static void ahci_init_sw_activity(struct ata_link *link);
327 
328 static struct device_attribute *ahci_shost_attrs[] = {
329 	&dev_attr_link_power_management_policy,
330 	&dev_attr_em_message_type,
331 	&dev_attr_em_message,
332 	NULL
333 };
334 
335 static struct device_attribute *ahci_sdev_attrs[] = {
336 	&dev_attr_sw_activity,
337 	&dev_attr_unload_heads,
338 	NULL
339 };
340 
341 static struct scsi_host_template ahci_sht = {
342 	ATA_NCQ_SHT(DRV_NAME),
343 	.can_queue		= AHCI_MAX_CMDS - 1,
344 	.sg_tablesize		= AHCI_MAX_SG,
345 	.dma_boundary		= AHCI_DMA_BOUNDARY,
346 	.shost_attrs		= ahci_shost_attrs,
347 	.sdev_attrs		= ahci_sdev_attrs,
348 };
349 
350 static struct ata_port_operations ahci_ops = {
351 	.inherits		= &sata_pmp_port_ops,
352 
353 	.qc_defer		= sata_pmp_qc_defer_cmd_switch,
354 	.qc_prep		= ahci_qc_prep,
355 	.qc_issue		= ahci_qc_issue,
356 	.qc_fill_rtf		= ahci_qc_fill_rtf,
357 
358 	.freeze			= ahci_freeze,
359 	.thaw			= ahci_thaw,
360 	.softreset		= ahci_softreset,
361 	.hardreset		= ahci_hardreset,
362 	.postreset		= ahci_postreset,
363 	.pmp_softreset		= ahci_softreset,
364 	.error_handler		= ahci_error_handler,
365 	.post_internal_cmd	= ahci_post_internal_cmd,
366 	.dev_config		= ahci_dev_config,
367 
368 	.scr_read		= ahci_scr_read,
369 	.scr_write		= ahci_scr_write,
370 	.pmp_attach		= ahci_pmp_attach,
371 	.pmp_detach		= ahci_pmp_detach,
372 
373 	.enable_pm		= ahci_enable_alpm,
374 	.disable_pm		= ahci_disable_alpm,
375 	.em_show		= ahci_led_show,
376 	.em_store		= ahci_led_store,
377 	.sw_activity_show	= ahci_activity_show,
378 	.sw_activity_store	= ahci_activity_store,
379 #ifdef CONFIG_PM
380 	.port_suspend		= ahci_port_suspend,
381 	.port_resume		= ahci_port_resume,
382 #endif
383 	.port_start		= ahci_port_start,
384 	.port_stop		= ahci_port_stop,
385 };
386 
387 static struct ata_port_operations ahci_vt8251_ops = {
388 	.inherits		= &ahci_ops,
389 	.hardreset		= ahci_vt8251_hardreset,
390 };
391 
392 static struct ata_port_operations ahci_p5wdh_ops = {
393 	.inherits		= &ahci_ops,
394 	.hardreset		= ahci_p5wdh_hardreset,
395 };
396 
397 static struct ata_port_operations ahci_sb600_ops = {
398 	.inherits		= &ahci_ops,
399 	.softreset		= ahci_sb600_softreset,
400 	.pmp_softreset		= ahci_sb600_softreset,
401 };
402 
403 #define AHCI_HFLAGS(flags)	.private_data	= (void *)(flags)
404 
405 static const struct ata_port_info ahci_port_info[] = {
406 	/* board_ahci */
407 	{
408 		.flags		= AHCI_FLAG_COMMON,
409 		.pio_mask	= ATA_PIO4,
410 		.udma_mask	= ATA_UDMA6,
411 		.port_ops	= &ahci_ops,
412 	},
413 	/* board_ahci_vt8251 */
414 	{
415 		AHCI_HFLAGS	(AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
416 		.flags		= AHCI_FLAG_COMMON,
417 		.pio_mask	= ATA_PIO4,
418 		.udma_mask	= ATA_UDMA6,
419 		.port_ops	= &ahci_vt8251_ops,
420 	},
421 	/* board_ahci_ign_iferr */
422 	{
423 		AHCI_HFLAGS	(AHCI_HFLAG_IGN_IRQ_IF_ERR),
424 		.flags		= AHCI_FLAG_COMMON,
425 		.pio_mask	= ATA_PIO4,
426 		.udma_mask	= ATA_UDMA6,
427 		.port_ops	= &ahci_ops,
428 	},
429 	/* board_ahci_sb600 */
430 	{
431 		AHCI_HFLAGS	(AHCI_HFLAG_IGN_SERR_INTERNAL |
432 				 AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_MSI |
433 				 AHCI_HFLAG_SECT255),
434 		.flags		= AHCI_FLAG_COMMON,
435 		.pio_mask	= ATA_PIO4,
436 		.udma_mask	= ATA_UDMA6,
437 		.port_ops	= &ahci_sb600_ops,
438 	},
439 	/* board_ahci_mv */
440 	{
441 		AHCI_HFLAGS	(AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
442 				 AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
443 		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
444 				  ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
445 		.pio_mask	= ATA_PIO4,
446 		.udma_mask	= ATA_UDMA6,
447 		.port_ops	= &ahci_ops,
448 	},
449 	/* board_ahci_sb700, for SB700 and SB800 */
450 	{
451 		AHCI_HFLAGS	(AHCI_HFLAG_IGN_SERR_INTERNAL),
452 		.flags		= AHCI_FLAG_COMMON,
453 		.pio_mask	= ATA_PIO4,
454 		.udma_mask	= ATA_UDMA6,
455 		.port_ops	= &ahci_sb600_ops,
456 	},
457 	/* board_ahci_mcp65 */
458 	{
459 		AHCI_HFLAGS	(AHCI_HFLAG_YES_NCQ),
460 		.flags		= AHCI_FLAG_COMMON,
461 		.pio_mask	= ATA_PIO4,
462 		.udma_mask	= ATA_UDMA6,
463 		.port_ops	= &ahci_ops,
464 	},
465 	/* board_ahci_nopmp */
466 	{
467 		AHCI_HFLAGS	(AHCI_HFLAG_NO_PMP),
468 		.flags		= AHCI_FLAG_COMMON,
469 		.pio_mask	= ATA_PIO4,
470 		.udma_mask	= ATA_UDMA6,
471 		.port_ops	= &ahci_ops,
472 	},
473 	/* board_ahci_yesncq */
474 	{
475 		AHCI_HFLAGS	(AHCI_HFLAG_YES_NCQ),
476 		.flags		= AHCI_FLAG_COMMON,
477 		.pio_mask	= ATA_PIO4,
478 		.udma_mask	= ATA_UDMA6,
479 		.port_ops	= &ahci_ops,
480 	},
481 };
482 
483 static const struct pci_device_id ahci_pci_tbl[] = {
484 	/* Intel */
485 	{ PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */
486 	{ PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */
487 	{ PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */
488 	{ PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */
489 	{ PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */
490 	{ PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */
491 	{ PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */
492 	{ PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
493 	{ PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
494 	{ PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
495 	{ PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
496 	{ PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* ICH8 */
497 	{ PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
498 	{ PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
499 	{ PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
500 	{ PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */
501 	{ PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */
502 	{ PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
503 	{ PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
504 	{ PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
505 	{ PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */
506 	{ PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */
507 	{ PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */
508 	{ PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */
509 	{ PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */
510 	{ PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
511 	{ PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */
512 	{ PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
513 	{ PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
514 	{ PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
515 	{ PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */
516 	{ PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
517 	{ PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
518 	{ PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
519 	{ PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
520 
521 	/* JMicron 360/1/3/5/6, match class to avoid IDE function */
522 	{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
523 	  PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr },
524 
525 	/* ATI */
526 	{ PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
527 	{ PCI_VDEVICE(ATI, 0x4390), board_ahci_sb700 }, /* ATI SB700/800 */
528 	{ PCI_VDEVICE(ATI, 0x4391), board_ahci_sb700 }, /* ATI SB700/800 */
529 	{ PCI_VDEVICE(ATI, 0x4392), board_ahci_sb700 }, /* ATI SB700/800 */
530 	{ PCI_VDEVICE(ATI, 0x4393), board_ahci_sb700 }, /* ATI SB700/800 */
531 	{ PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */
532 	{ PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
533 
534 	/* VIA */
535 	{ PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
536 	{ PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */
537 
538 	/* NVIDIA */
539 	{ PCI_VDEVICE(NVIDIA, 0x044c), board_ahci_mcp65 },	/* MCP65 */
540 	{ PCI_VDEVICE(NVIDIA, 0x044d), board_ahci_mcp65 },	/* MCP65 */
541 	{ PCI_VDEVICE(NVIDIA, 0x044e), board_ahci_mcp65 },	/* MCP65 */
542 	{ PCI_VDEVICE(NVIDIA, 0x044f), board_ahci_mcp65 },	/* MCP65 */
543 	{ PCI_VDEVICE(NVIDIA, 0x045c), board_ahci_mcp65 },	/* MCP65 */
544 	{ PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 },	/* MCP65 */
545 	{ PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 },	/* MCP65 */
546 	{ PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 },	/* MCP65 */
547 	{ PCI_VDEVICE(NVIDIA, 0x0550), board_ahci_yesncq },	/* MCP67 */
548 	{ PCI_VDEVICE(NVIDIA, 0x0551), board_ahci_yesncq },	/* MCP67 */
549 	{ PCI_VDEVICE(NVIDIA, 0x0552), board_ahci_yesncq },	/* MCP67 */
550 	{ PCI_VDEVICE(NVIDIA, 0x0553), board_ahci_yesncq },	/* MCP67 */
551 	{ PCI_VDEVICE(NVIDIA, 0x0554), board_ahci_yesncq },	/* MCP67 */
552 	{ PCI_VDEVICE(NVIDIA, 0x0555), board_ahci_yesncq },	/* MCP67 */
553 	{ PCI_VDEVICE(NVIDIA, 0x0556), board_ahci_yesncq },	/* MCP67 */
554 	{ PCI_VDEVICE(NVIDIA, 0x0557), board_ahci_yesncq },	/* MCP67 */
555 	{ PCI_VDEVICE(NVIDIA, 0x0558), board_ahci_yesncq },	/* MCP67 */
556 	{ PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_yesncq },	/* MCP67 */
557 	{ PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_yesncq },	/* MCP67 */
558 	{ PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_yesncq },	/* MCP67 */
559 	{ PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_yesncq },	/* MCP73 */
560 	{ PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_yesncq },	/* MCP73 */
561 	{ PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_yesncq },	/* MCP73 */
562 	{ PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci_yesncq },	/* MCP73 */
563 	{ PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci_yesncq },	/* MCP73 */
564 	{ PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci_yesncq },	/* MCP73 */
565 	{ PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci_yesncq },	/* MCP73 */
566 	{ PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci_yesncq },	/* MCP73 */
567 	{ PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci_yesncq },	/* MCP73 */
568 	{ PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci_yesncq },	/* MCP73 */
569 	{ PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci_yesncq },	/* MCP73 */
570 	{ PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci_yesncq },	/* MCP73 */
571 	{ PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci },		/* MCP77 */
572 	{ PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci },		/* MCP77 */
573 	{ PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci },		/* MCP77 */
574 	{ PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci },		/* MCP77 */
575 	{ PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci },		/* MCP77 */
576 	{ PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci },		/* MCP77 */
577 	{ PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci },		/* MCP77 */
578 	{ PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci },		/* MCP77 */
579 	{ PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci },		/* MCP77 */
580 	{ PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci },		/* MCP77 */
581 	{ PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci },		/* MCP77 */
582 	{ PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci },		/* MCP77 */
583 	{ PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci },		/* MCP79 */
584 	{ PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci },		/* MCP79 */
585 	{ PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci },		/* MCP79 */
586 	{ PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci },		/* MCP79 */
587 	{ PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci },		/* MCP79 */
588 	{ PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci },		/* MCP79 */
589 	{ PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci },		/* MCP79 */
590 	{ PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci },		/* MCP79 */
591 	{ PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci },		/* MCP79 */
592 	{ PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci },		/* MCP79 */
593 	{ PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci },		/* MCP79 */
594 	{ PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci },		/* MCP79 */
595 	{ PCI_VDEVICE(NVIDIA, 0x0d84), board_ahci },		/* MCP89 */
596 	{ PCI_VDEVICE(NVIDIA, 0x0d85), board_ahci },		/* MCP89 */
597 	{ PCI_VDEVICE(NVIDIA, 0x0d86), board_ahci },		/* MCP89 */
598 	{ PCI_VDEVICE(NVIDIA, 0x0d87), board_ahci },		/* MCP89 */
599 	{ PCI_VDEVICE(NVIDIA, 0x0d88), board_ahci },		/* MCP89 */
600 	{ PCI_VDEVICE(NVIDIA, 0x0d89), board_ahci },		/* MCP89 */
601 	{ PCI_VDEVICE(NVIDIA, 0x0d8a), board_ahci },		/* MCP89 */
602 	{ PCI_VDEVICE(NVIDIA, 0x0d8b), board_ahci },		/* MCP89 */
603 	{ PCI_VDEVICE(NVIDIA, 0x0d8c), board_ahci },		/* MCP89 */
604 	{ PCI_VDEVICE(NVIDIA, 0x0d8d), board_ahci },		/* MCP89 */
605 	{ PCI_VDEVICE(NVIDIA, 0x0d8e), board_ahci },		/* MCP89 */
606 	{ PCI_VDEVICE(NVIDIA, 0x0d8f), board_ahci },		/* MCP89 */
607 
608 	/* SiS */
609 	{ PCI_VDEVICE(SI, 0x1184), board_ahci },		/* SiS 966 */
610 	{ PCI_VDEVICE(SI, 0x1185), board_ahci },		/* SiS 968 */
611 	{ PCI_VDEVICE(SI, 0x0186), board_ahci },		/* SiS 968 */
612 
613 	/* Marvell */
614 	{ PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv },	/* 6145 */
615 	{ PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv },	/* 6121 */
616 
617 	/* Promise */
618 	{ PCI_VDEVICE(PROMISE, 0x3f20), board_ahci },	/* PDC42819 */
619 
620 	/* Generic, PCI class code for AHCI */
621 	{ PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
622 	  PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
623 
624 	{ }	/* terminate list */
625 };
626 
627 
628 static struct pci_driver ahci_pci_driver = {
629 	.name			= DRV_NAME,
630 	.id_table		= ahci_pci_tbl,
631 	.probe			= ahci_init_one,
632 	.remove			= ata_pci_remove_one,
633 #ifdef CONFIG_PM
634 	.suspend		= ahci_pci_device_suspend,
635 	.resume			= ahci_pci_device_resume,
636 #endif
637 };
638 
639 static int ahci_em_messages = 1;
640 module_param(ahci_em_messages, int, 0444);
641 /* add other LED protocol types when they become supported */
642 MODULE_PARM_DESC(ahci_em_messages,
643 	"Set AHCI Enclosure Management Message type (0 = disabled, 1 = LED");
644 
645 #if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE)
646 static int marvell_enable;
647 #else
648 static int marvell_enable = 1;
649 #endif
650 module_param(marvell_enable, int, 0644);
651 MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
652 
653 
654 static inline int ahci_nr_ports(u32 cap)
655 {
656 	return (cap & 0x1f) + 1;
657 }
658 
659 static inline void __iomem *__ahci_port_base(struct ata_host *host,
660 					     unsigned int port_no)
661 {
662 	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
663 
664 	return mmio + 0x100 + (port_no * 0x80);
665 }
666 
667 static inline void __iomem *ahci_port_base(struct ata_port *ap)
668 {
669 	return __ahci_port_base(ap->host, ap->port_no);
670 }
671 
672 static void ahci_enable_ahci(void __iomem *mmio)
673 {
674 	int i;
675 	u32 tmp;
676 
677 	/* turn on AHCI_EN */
678 	tmp = readl(mmio + HOST_CTL);
679 	if (tmp & HOST_AHCI_EN)
680 		return;
681 
682 	/* Some controllers need AHCI_EN to be written multiple times.
683 	 * Try a few times before giving up.
684 	 */
685 	for (i = 0; i < 5; i++) {
686 		tmp |= HOST_AHCI_EN;
687 		writel(tmp, mmio + HOST_CTL);
688 		tmp = readl(mmio + HOST_CTL);	/* flush && sanity check */
689 		if (tmp & HOST_AHCI_EN)
690 			return;
691 		msleep(10);
692 	}
693 
694 	WARN_ON(1);
695 }
696 
697 /**
698  *	ahci_save_initial_config - Save and fixup initial config values
699  *	@pdev: target PCI device
700  *	@hpriv: host private area to store config values
701  *
702  *	Some registers containing configuration info might be setup by
703  *	BIOS and might be cleared on reset.  This function saves the
704  *	initial values of those registers into @hpriv such that they
705  *	can be restored after controller reset.
706  *
707  *	If inconsistent, config values are fixed up by this function.
708  *
709  *	LOCKING:
710  *	None.
711  */
712 static void ahci_save_initial_config(struct pci_dev *pdev,
713 				     struct ahci_host_priv *hpriv)
714 {
715 	void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
716 	u32 cap, port_map;
717 	int i;
718 	int mv;
719 
720 	/* make sure AHCI mode is enabled before accessing CAP */
721 	ahci_enable_ahci(mmio);
722 
723 	/* Values prefixed with saved_ are written back to host after
724 	 * reset.  Values without are used for driver operation.
725 	 */
726 	hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
727 	hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
728 
729 	/* some chips have errata preventing 64bit use */
730 	if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
731 		dev_printk(KERN_INFO, &pdev->dev,
732 			   "controller can't do 64bit DMA, forcing 32bit\n");
733 		cap &= ~HOST_CAP_64;
734 	}
735 
736 	if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
737 		dev_printk(KERN_INFO, &pdev->dev,
738 			   "controller can't do NCQ, turning off CAP_NCQ\n");
739 		cap &= ~HOST_CAP_NCQ;
740 	}
741 
742 	if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
743 		dev_printk(KERN_INFO, &pdev->dev,
744 			   "controller can do NCQ, turning on CAP_NCQ\n");
745 		cap |= HOST_CAP_NCQ;
746 	}
747 
748 	if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
749 		dev_printk(KERN_INFO, &pdev->dev,
750 			   "controller can't do PMP, turning off CAP_PMP\n");
751 		cap &= ~HOST_CAP_PMP;
752 	}
753 
754 	if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361 &&
755 	    port_map != 1) {
756 		dev_printk(KERN_INFO, &pdev->dev,
757 			   "JMB361 has only one port, port_map 0x%x -> 0x%x\n",
758 			   port_map, 1);
759 		port_map = 1;
760 	}
761 
762 	/*
763 	 * Temporary Marvell 6145 hack: PATA port presence
764 	 * is asserted through the standard AHCI port
765 	 * presence register, as bit 4 (counting from 0)
766 	 */
767 	if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
768 		if (pdev->device == 0x6121)
769 			mv = 0x3;
770 		else
771 			mv = 0xf;
772 		dev_printk(KERN_ERR, &pdev->dev,
773 			   "MV_AHCI HACK: port_map %x -> %x\n",
774 			   port_map,
775 			   port_map & mv);
776 		dev_printk(KERN_ERR, &pdev->dev,
777 			  "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
778 
779 		port_map &= mv;
780 	}
781 
782 	/* cross check port_map and cap.n_ports */
783 	if (port_map) {
784 		int map_ports = 0;
785 
786 		for (i = 0; i < AHCI_MAX_PORTS; i++)
787 			if (port_map & (1 << i))
788 				map_ports++;
789 
790 		/* If PI has more ports than n_ports, whine, clear
791 		 * port_map and let it be generated from n_ports.
792 		 */
793 		if (map_ports > ahci_nr_ports(cap)) {
794 			dev_printk(KERN_WARNING, &pdev->dev,
795 				   "implemented port map (0x%x) contains more "
796 				   "ports than nr_ports (%u), using nr_ports\n",
797 				   port_map, ahci_nr_ports(cap));
798 			port_map = 0;
799 		}
800 	}
801 
802 	/* fabricate port_map from cap.nr_ports */
803 	if (!port_map) {
804 		port_map = (1 << ahci_nr_ports(cap)) - 1;
805 		dev_printk(KERN_WARNING, &pdev->dev,
806 			   "forcing PORTS_IMPL to 0x%x\n", port_map);
807 
808 		/* write the fixed up value to the PI register */
809 		hpriv->saved_port_map = port_map;
810 	}
811 
812 	/* record values to use during operation */
813 	hpriv->cap = cap;
814 	hpriv->port_map = port_map;
815 }
816 
817 /**
818  *	ahci_restore_initial_config - Restore initial config
819  *	@host: target ATA host
820  *
821  *	Restore initial config stored by ahci_save_initial_config().
822  *
823  *	LOCKING:
824  *	None.
825  */
826 static void ahci_restore_initial_config(struct ata_host *host)
827 {
828 	struct ahci_host_priv *hpriv = host->private_data;
829 	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
830 
831 	writel(hpriv->saved_cap, mmio + HOST_CAP);
832 	writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
833 	(void) readl(mmio + HOST_PORTS_IMPL);	/* flush */
834 }
835 
836 static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
837 {
838 	static const int offset[] = {
839 		[SCR_STATUS]		= PORT_SCR_STAT,
840 		[SCR_CONTROL]		= PORT_SCR_CTL,
841 		[SCR_ERROR]		= PORT_SCR_ERR,
842 		[SCR_ACTIVE]		= PORT_SCR_ACT,
843 		[SCR_NOTIFICATION]	= PORT_SCR_NTF,
844 	};
845 	struct ahci_host_priv *hpriv = ap->host->private_data;
846 
847 	if (sc_reg < ARRAY_SIZE(offset) &&
848 	    (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
849 		return offset[sc_reg];
850 	return 0;
851 }
852 
853 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
854 {
855 	void __iomem *port_mmio = ahci_port_base(link->ap);
856 	int offset = ahci_scr_offset(link->ap, sc_reg);
857 
858 	if (offset) {
859 		*val = readl(port_mmio + offset);
860 		return 0;
861 	}
862 	return -EINVAL;
863 }
864 
865 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
866 {
867 	void __iomem *port_mmio = ahci_port_base(link->ap);
868 	int offset = ahci_scr_offset(link->ap, sc_reg);
869 
870 	if (offset) {
871 		writel(val, port_mmio + offset);
872 		return 0;
873 	}
874 	return -EINVAL;
875 }
876 
877 static void ahci_start_engine(struct ata_port *ap)
878 {
879 	void __iomem *port_mmio = ahci_port_base(ap);
880 	u32 tmp;
881 
882 	/* start DMA */
883 	tmp = readl(port_mmio + PORT_CMD);
884 	tmp |= PORT_CMD_START;
885 	writel(tmp, port_mmio + PORT_CMD);
886 	readl(port_mmio + PORT_CMD); /* flush */
887 }
888 
889 static int ahci_stop_engine(struct ata_port *ap)
890 {
891 	void __iomem *port_mmio = ahci_port_base(ap);
892 	u32 tmp;
893 
894 	tmp = readl(port_mmio + PORT_CMD);
895 
896 	/* check if the HBA is idle */
897 	if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
898 		return 0;
899 
900 	/* setting HBA to idle */
901 	tmp &= ~PORT_CMD_START;
902 	writel(tmp, port_mmio + PORT_CMD);
903 
904 	/* wait for engine to stop. This could be as long as 500 msec */
905 	tmp = ata_wait_register(port_mmio + PORT_CMD,
906 				PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
907 	if (tmp & PORT_CMD_LIST_ON)
908 		return -EIO;
909 
910 	return 0;
911 }
912 
913 static void ahci_start_fis_rx(struct ata_port *ap)
914 {
915 	void __iomem *port_mmio = ahci_port_base(ap);
916 	struct ahci_host_priv *hpriv = ap->host->private_data;
917 	struct ahci_port_priv *pp = ap->private_data;
918 	u32 tmp;
919 
920 	/* set FIS registers */
921 	if (hpriv->cap & HOST_CAP_64)
922 		writel((pp->cmd_slot_dma >> 16) >> 16,
923 		       port_mmio + PORT_LST_ADDR_HI);
924 	writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
925 
926 	if (hpriv->cap & HOST_CAP_64)
927 		writel((pp->rx_fis_dma >> 16) >> 16,
928 		       port_mmio + PORT_FIS_ADDR_HI);
929 	writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
930 
931 	/* enable FIS reception */
932 	tmp = readl(port_mmio + PORT_CMD);
933 	tmp |= PORT_CMD_FIS_RX;
934 	writel(tmp, port_mmio + PORT_CMD);
935 
936 	/* flush */
937 	readl(port_mmio + PORT_CMD);
938 }
939 
940 static int ahci_stop_fis_rx(struct ata_port *ap)
941 {
942 	void __iomem *port_mmio = ahci_port_base(ap);
943 	u32 tmp;
944 
945 	/* disable FIS reception */
946 	tmp = readl(port_mmio + PORT_CMD);
947 	tmp &= ~PORT_CMD_FIS_RX;
948 	writel(tmp, port_mmio + PORT_CMD);
949 
950 	/* wait for completion, spec says 500ms, give it 1000 */
951 	tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
952 				PORT_CMD_FIS_ON, 10, 1000);
953 	if (tmp & PORT_CMD_FIS_ON)
954 		return -EBUSY;
955 
956 	return 0;
957 }
958 
959 static void ahci_power_up(struct ata_port *ap)
960 {
961 	struct ahci_host_priv *hpriv = ap->host->private_data;
962 	void __iomem *port_mmio = ahci_port_base(ap);
963 	u32 cmd;
964 
965 	cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
966 
967 	/* spin up device */
968 	if (hpriv->cap & HOST_CAP_SSS) {
969 		cmd |= PORT_CMD_SPIN_UP;
970 		writel(cmd, port_mmio + PORT_CMD);
971 	}
972 
973 	/* wake up link */
974 	writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
975 }
976 
977 static void ahci_disable_alpm(struct ata_port *ap)
978 {
979 	struct ahci_host_priv *hpriv = ap->host->private_data;
980 	void __iomem *port_mmio = ahci_port_base(ap);
981 	u32 cmd;
982 	struct ahci_port_priv *pp = ap->private_data;
983 
984 	/* IPM bits should be disabled by libata-core */
985 	/* get the existing command bits */
986 	cmd = readl(port_mmio + PORT_CMD);
987 
988 	/* disable ALPM and ASP */
989 	cmd &= ~PORT_CMD_ASP;
990 	cmd &= ~PORT_CMD_ALPE;
991 
992 	/* force the interface back to active */
993 	cmd |= PORT_CMD_ICC_ACTIVE;
994 
995 	/* write out new cmd value */
996 	writel(cmd, port_mmio + PORT_CMD);
997 	cmd = readl(port_mmio + PORT_CMD);
998 
999 	/* wait 10ms to be sure we've come out of any low power state */
1000 	msleep(10);
1001 
1002 	/* clear out any PhyRdy stuff from interrupt status */
1003 	writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
1004 
1005 	/* go ahead and clean out PhyRdy Change from Serror too */
1006 	ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
1007 
1008 	/*
1009  	 * Clear flag to indicate that we should ignore all PhyRdy
1010  	 * state changes
1011  	 */
1012 	hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
1013 
1014 	/*
1015  	 * Enable interrupts on Phy Ready.
1016  	 */
1017 	pp->intr_mask |= PORT_IRQ_PHYRDY;
1018 	writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1019 
1020 	/*
1021  	 * don't change the link pm policy - we can be called
1022  	 * just to turn of link pm temporarily
1023  	 */
1024 }
1025 
1026 static int ahci_enable_alpm(struct ata_port *ap,
1027 	enum link_pm policy)
1028 {
1029 	struct ahci_host_priv *hpriv = ap->host->private_data;
1030 	void __iomem *port_mmio = ahci_port_base(ap);
1031 	u32 cmd;
1032 	struct ahci_port_priv *pp = ap->private_data;
1033 	u32 asp;
1034 
1035 	/* Make sure the host is capable of link power management */
1036 	if (!(hpriv->cap & HOST_CAP_ALPM))
1037 		return -EINVAL;
1038 
1039 	switch (policy) {
1040 	case MAX_PERFORMANCE:
1041 	case NOT_AVAILABLE:
1042 		/*
1043  		 * if we came here with NOT_AVAILABLE,
1044  		 * it just means this is the first time we
1045  		 * have tried to enable - default to max performance,
1046  		 * and let the user go to lower power modes on request.
1047  		 */
1048 		ahci_disable_alpm(ap);
1049 		return 0;
1050 	case MIN_POWER:
1051 		/* configure HBA to enter SLUMBER */
1052 		asp = PORT_CMD_ASP;
1053 		break;
1054 	case MEDIUM_POWER:
1055 		/* configure HBA to enter PARTIAL */
1056 		asp = 0;
1057 		break;
1058 	default:
1059 		return -EINVAL;
1060 	}
1061 
1062 	/*
1063  	 * Disable interrupts on Phy Ready. This keeps us from
1064  	 * getting woken up due to spurious phy ready interrupts
1065 	 * TBD - Hot plug should be done via polling now, is
1066 	 * that even supported?
1067  	 */
1068 	pp->intr_mask &= ~PORT_IRQ_PHYRDY;
1069 	writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1070 
1071 	/*
1072  	 * Set a flag to indicate that we should ignore all PhyRdy
1073  	 * state changes since these can happen now whenever we
1074  	 * change link state
1075  	 */
1076 	hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
1077 
1078 	/* get the existing command bits */
1079 	cmd = readl(port_mmio + PORT_CMD);
1080 
1081 	/*
1082  	 * Set ASP based on Policy
1083  	 */
1084 	cmd |= asp;
1085 
1086 	/*
1087  	 * Setting this bit will instruct the HBA to aggressively
1088  	 * enter a lower power link state when it's appropriate and
1089  	 * based on the value set above for ASP
1090  	 */
1091 	cmd |= PORT_CMD_ALPE;
1092 
1093 	/* write out new cmd value */
1094 	writel(cmd, port_mmio + PORT_CMD);
1095 	cmd = readl(port_mmio + PORT_CMD);
1096 
1097 	/* IPM bits should be set by libata-core */
1098 	return 0;
1099 }
1100 
1101 #ifdef CONFIG_PM
1102 static void ahci_power_down(struct ata_port *ap)
1103 {
1104 	struct ahci_host_priv *hpriv = ap->host->private_data;
1105 	void __iomem *port_mmio = ahci_port_base(ap);
1106 	u32 cmd, scontrol;
1107 
1108 	if (!(hpriv->cap & HOST_CAP_SSS))
1109 		return;
1110 
1111 	/* put device into listen mode, first set PxSCTL.DET to 0 */
1112 	scontrol = readl(port_mmio + PORT_SCR_CTL);
1113 	scontrol &= ~0xf;
1114 	writel(scontrol, port_mmio + PORT_SCR_CTL);
1115 
1116 	/* then set PxCMD.SUD to 0 */
1117 	cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
1118 	cmd &= ~PORT_CMD_SPIN_UP;
1119 	writel(cmd, port_mmio + PORT_CMD);
1120 }
1121 #endif
1122 
1123 static void ahci_start_port(struct ata_port *ap)
1124 {
1125 	struct ahci_port_priv *pp = ap->private_data;
1126 	struct ata_link *link;
1127 	struct ahci_em_priv *emp;
1128 	ssize_t rc;
1129 	int i;
1130 
1131 	/* enable FIS reception */
1132 	ahci_start_fis_rx(ap);
1133 
1134 	/* enable DMA */
1135 	ahci_start_engine(ap);
1136 
1137 	/* turn on LEDs */
1138 	if (ap->flags & ATA_FLAG_EM) {
1139 		ata_for_each_link(link, ap, EDGE) {
1140 			emp = &pp->em_priv[link->pmp];
1141 
1142 			/* EM Transmit bit maybe busy during init */
1143 			for (i = 0; i < MAX_RETRY; i++) {
1144 				rc = ahci_transmit_led_message(ap,
1145 							       emp->led_state,
1146 							       4);
1147 				if (rc == -EBUSY)
1148 					udelay(100);
1149 				else
1150 					break;
1151 			}
1152 		}
1153 	}
1154 
1155 	if (ap->flags & ATA_FLAG_SW_ACTIVITY)
1156 		ata_for_each_link(link, ap, EDGE)
1157 			ahci_init_sw_activity(link);
1158 
1159 }
1160 
1161 static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
1162 {
1163 	int rc;
1164 
1165 	/* disable DMA */
1166 	rc = ahci_stop_engine(ap);
1167 	if (rc) {
1168 		*emsg = "failed to stop engine";
1169 		return rc;
1170 	}
1171 
1172 	/* disable FIS reception */
1173 	rc = ahci_stop_fis_rx(ap);
1174 	if (rc) {
1175 		*emsg = "failed stop FIS RX";
1176 		return rc;
1177 	}
1178 
1179 	return 0;
1180 }
1181 
1182 static int ahci_reset_controller(struct ata_host *host)
1183 {
1184 	struct pci_dev *pdev = to_pci_dev(host->dev);
1185 	struct ahci_host_priv *hpriv = host->private_data;
1186 	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1187 	u32 tmp;
1188 
1189 	/* we must be in AHCI mode, before using anything
1190 	 * AHCI-specific, such as HOST_RESET.
1191 	 */
1192 	ahci_enable_ahci(mmio);
1193 
1194 	/* global controller reset */
1195 	if (!ahci_skip_host_reset) {
1196 		tmp = readl(mmio + HOST_CTL);
1197 		if ((tmp & HOST_RESET) == 0) {
1198 			writel(tmp | HOST_RESET, mmio + HOST_CTL);
1199 			readl(mmio + HOST_CTL); /* flush */
1200 		}
1201 
1202 		/*
1203 		 * to perform host reset, OS should set HOST_RESET
1204 		 * and poll until this bit is read to be "0".
1205 		 * reset must complete within 1 second, or
1206 		 * the hardware should be considered fried.
1207 		 */
1208 		tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET,
1209 					HOST_RESET, 10, 1000);
1210 
1211 		if (tmp & HOST_RESET) {
1212 			dev_printk(KERN_ERR, host->dev,
1213 				   "controller reset failed (0x%x)\n", tmp);
1214 			return -EIO;
1215 		}
1216 
1217 		/* turn on AHCI mode */
1218 		ahci_enable_ahci(mmio);
1219 
1220 		/* Some registers might be cleared on reset.  Restore
1221 		 * initial values.
1222 		 */
1223 		ahci_restore_initial_config(host);
1224 	} else
1225 		dev_printk(KERN_INFO, host->dev,
1226 			   "skipping global host reset\n");
1227 
1228 	if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
1229 		u16 tmp16;
1230 
1231 		/* configure PCS */
1232 		pci_read_config_word(pdev, 0x92, &tmp16);
1233 		if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
1234 			tmp16 |= hpriv->port_map;
1235 			pci_write_config_word(pdev, 0x92, tmp16);
1236 		}
1237 	}
1238 
1239 	return 0;
1240 }
1241 
1242 static void ahci_sw_activity(struct ata_link *link)
1243 {
1244 	struct ata_port *ap = link->ap;
1245 	struct ahci_port_priv *pp = ap->private_data;
1246 	struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1247 
1248 	if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
1249 		return;
1250 
1251 	emp->activity++;
1252 	if (!timer_pending(&emp->timer))
1253 		mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
1254 }
1255 
1256 static void ahci_sw_activity_blink(unsigned long arg)
1257 {
1258 	struct ata_link *link = (struct ata_link *)arg;
1259 	struct ata_port *ap = link->ap;
1260 	struct ahci_port_priv *pp = ap->private_data;
1261 	struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1262 	unsigned long led_message = emp->led_state;
1263 	u32 activity_led_state;
1264 	unsigned long flags;
1265 
1266 	led_message &= EM_MSG_LED_VALUE;
1267 	led_message |= ap->port_no | (link->pmp << 8);
1268 
1269 	/* check to see if we've had activity.  If so,
1270 	 * toggle state of LED and reset timer.  If not,
1271 	 * turn LED to desired idle state.
1272 	 */
1273 	spin_lock_irqsave(ap->lock, flags);
1274 	if (emp->saved_activity != emp->activity) {
1275 		emp->saved_activity = emp->activity;
1276 		/* get the current LED state */
1277 		activity_led_state = led_message & EM_MSG_LED_VALUE_ON;
1278 
1279 		if (activity_led_state)
1280 			activity_led_state = 0;
1281 		else
1282 			activity_led_state = 1;
1283 
1284 		/* clear old state */
1285 		led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1286 
1287 		/* toggle state */
1288 		led_message |= (activity_led_state << 16);
1289 		mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
1290 	} else {
1291 		/* switch to idle */
1292 		led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1293 		if (emp->blink_policy == BLINK_OFF)
1294 			led_message |= (1 << 16);
1295 	}
1296 	spin_unlock_irqrestore(ap->lock, flags);
1297 	ahci_transmit_led_message(ap, led_message, 4);
1298 }
1299 
1300 static void ahci_init_sw_activity(struct ata_link *link)
1301 {
1302 	struct ata_port *ap = link->ap;
1303 	struct ahci_port_priv *pp = ap->private_data;
1304 	struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1305 
1306 	/* init activity stats, setup timer */
1307 	emp->saved_activity = emp->activity = 0;
1308 	setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link);
1309 
1310 	/* check our blink policy and set flag for link if it's enabled */
1311 	if (emp->blink_policy)
1312 		link->flags |= ATA_LFLAG_SW_ACTIVITY;
1313 }
1314 
1315 static int ahci_reset_em(struct ata_host *host)
1316 {
1317 	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1318 	u32 em_ctl;
1319 
1320 	em_ctl = readl(mmio + HOST_EM_CTL);
1321 	if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
1322 		return -EINVAL;
1323 
1324 	writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
1325 	return 0;
1326 }
1327 
1328 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
1329 					ssize_t size)
1330 {
1331 	struct ahci_host_priv *hpriv = ap->host->private_data;
1332 	struct ahci_port_priv *pp = ap->private_data;
1333 	void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
1334 	u32 em_ctl;
1335 	u32 message[] = {0, 0};
1336 	unsigned long flags;
1337 	int pmp;
1338 	struct ahci_em_priv *emp;
1339 
1340 	/* get the slot number from the message */
1341 	pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1342 	if (pmp < MAX_SLOTS)
1343 		emp = &pp->em_priv[pmp];
1344 	else
1345 		return -EINVAL;
1346 
1347 	spin_lock_irqsave(ap->lock, flags);
1348 
1349 	/*
1350 	 * if we are still busy transmitting a previous message,
1351 	 * do not allow
1352 	 */
1353 	em_ctl = readl(mmio + HOST_EM_CTL);
1354 	if (em_ctl & EM_CTL_TM) {
1355 		spin_unlock_irqrestore(ap->lock, flags);
1356 		return -EBUSY;
1357 	}
1358 
1359 	/*
1360 	 * create message header - this is all zero except for
1361 	 * the message size, which is 4 bytes.
1362 	 */
1363 	message[0] |= (4 << 8);
1364 
1365 	/* ignore 0:4 of byte zero, fill in port info yourself */
1366 	message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no);
1367 
1368 	/* write message to EM_LOC */
1369 	writel(message[0], mmio + hpriv->em_loc);
1370 	writel(message[1], mmio + hpriv->em_loc+4);
1371 
1372 	/* save off new led state for port/slot */
1373 	emp->led_state = state;
1374 
1375 	/*
1376 	 * tell hardware to transmit the message
1377 	 */
1378 	writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
1379 
1380 	spin_unlock_irqrestore(ap->lock, flags);
1381 	return size;
1382 }
1383 
1384 static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
1385 {
1386 	struct ahci_port_priv *pp = ap->private_data;
1387 	struct ata_link *link;
1388 	struct ahci_em_priv *emp;
1389 	int rc = 0;
1390 
1391 	ata_for_each_link(link, ap, EDGE) {
1392 		emp = &pp->em_priv[link->pmp];
1393 		rc += sprintf(buf, "%lx\n", emp->led_state);
1394 	}
1395 	return rc;
1396 }
1397 
1398 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
1399 				size_t size)
1400 {
1401 	int state;
1402 	int pmp;
1403 	struct ahci_port_priv *pp = ap->private_data;
1404 	struct ahci_em_priv *emp;
1405 
1406 	state = simple_strtoul(buf, NULL, 0);
1407 
1408 	/* get the slot number from the message */
1409 	pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1410 	if (pmp < MAX_SLOTS)
1411 		emp = &pp->em_priv[pmp];
1412 	else
1413 		return -EINVAL;
1414 
1415 	/* mask off the activity bits if we are in sw_activity
1416 	 * mode, user should turn off sw_activity before setting
1417 	 * activity led through em_message
1418 	 */
1419 	if (emp->blink_policy)
1420 		state &= ~EM_MSG_LED_VALUE_ACTIVITY;
1421 
1422 	return ahci_transmit_led_message(ap, state, size);
1423 }
1424 
1425 static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
1426 {
1427 	struct ata_link *link = dev->link;
1428 	struct ata_port *ap = link->ap;
1429 	struct ahci_port_priv *pp = ap->private_data;
1430 	struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1431 	u32 port_led_state = emp->led_state;
1432 
1433 	/* save the desired Activity LED behavior */
1434 	if (val == OFF) {
1435 		/* clear LFLAG */
1436 		link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
1437 
1438 		/* set the LED to OFF */
1439 		port_led_state &= EM_MSG_LED_VALUE_OFF;
1440 		port_led_state |= (ap->port_no | (link->pmp << 8));
1441 		ahci_transmit_led_message(ap, port_led_state, 4);
1442 	} else {
1443 		link->flags |= ATA_LFLAG_SW_ACTIVITY;
1444 		if (val == BLINK_OFF) {
1445 			/* set LED to ON for idle */
1446 			port_led_state &= EM_MSG_LED_VALUE_OFF;
1447 			port_led_state |= (ap->port_no | (link->pmp << 8));
1448 			port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */
1449 			ahci_transmit_led_message(ap, port_led_state, 4);
1450 		}
1451 	}
1452 	emp->blink_policy = val;
1453 	return 0;
1454 }
1455 
1456 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
1457 {
1458 	struct ata_link *link = dev->link;
1459 	struct ata_port *ap = link->ap;
1460 	struct ahci_port_priv *pp = ap->private_data;
1461 	struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1462 
1463 	/* display the saved value of activity behavior for this
1464 	 * disk.
1465 	 */
1466 	return sprintf(buf, "%d\n", emp->blink_policy);
1467 }
1468 
1469 static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap,
1470 			   int port_no, void __iomem *mmio,
1471 			   void __iomem *port_mmio)
1472 {
1473 	const char *emsg = NULL;
1474 	int rc;
1475 	u32 tmp;
1476 
1477 	/* make sure port is not active */
1478 	rc = ahci_deinit_port(ap, &emsg);
1479 	if (rc)
1480 		dev_printk(KERN_WARNING, &pdev->dev,
1481 			   "%s (%d)\n", emsg, rc);
1482 
1483 	/* clear SError */
1484 	tmp = readl(port_mmio + PORT_SCR_ERR);
1485 	VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
1486 	writel(tmp, port_mmio + PORT_SCR_ERR);
1487 
1488 	/* clear port IRQ */
1489 	tmp = readl(port_mmio + PORT_IRQ_STAT);
1490 	VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1491 	if (tmp)
1492 		writel(tmp, port_mmio + PORT_IRQ_STAT);
1493 
1494 	writel(1 << port_no, mmio + HOST_IRQ_STAT);
1495 }
1496 
1497 static void ahci_init_controller(struct ata_host *host)
1498 {
1499 	struct ahci_host_priv *hpriv = host->private_data;
1500 	struct pci_dev *pdev = to_pci_dev(host->dev);
1501 	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1502 	int i;
1503 	void __iomem *port_mmio;
1504 	u32 tmp;
1505 	int mv;
1506 
1507 	if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
1508 		if (pdev->device == 0x6121)
1509 			mv = 2;
1510 		else
1511 			mv = 4;
1512 		port_mmio = __ahci_port_base(host, mv);
1513 
1514 		writel(0, port_mmio + PORT_IRQ_MASK);
1515 
1516 		/* clear port IRQ */
1517 		tmp = readl(port_mmio + PORT_IRQ_STAT);
1518 		VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1519 		if (tmp)
1520 			writel(tmp, port_mmio + PORT_IRQ_STAT);
1521 	}
1522 
1523 	for (i = 0; i < host->n_ports; i++) {
1524 		struct ata_port *ap = host->ports[i];
1525 
1526 		port_mmio = ahci_port_base(ap);
1527 		if (ata_port_is_dummy(ap))
1528 			continue;
1529 
1530 		ahci_port_init(pdev, ap, i, mmio, port_mmio);
1531 	}
1532 
1533 	tmp = readl(mmio + HOST_CTL);
1534 	VPRINTK("HOST_CTL 0x%x\n", tmp);
1535 	writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
1536 	tmp = readl(mmio + HOST_CTL);
1537 	VPRINTK("HOST_CTL 0x%x\n", tmp);
1538 }
1539 
1540 static void ahci_dev_config(struct ata_device *dev)
1541 {
1542 	struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
1543 
1544 	if (hpriv->flags & AHCI_HFLAG_SECT255) {
1545 		dev->max_sectors = 255;
1546 		ata_dev_printk(dev, KERN_INFO,
1547 			       "SB600 AHCI: limiting to 255 sectors per cmd\n");
1548 	}
1549 }
1550 
1551 static unsigned int ahci_dev_classify(struct ata_port *ap)
1552 {
1553 	void __iomem *port_mmio = ahci_port_base(ap);
1554 	struct ata_taskfile tf;
1555 	u32 tmp;
1556 
1557 	tmp = readl(port_mmio + PORT_SIG);
1558 	tf.lbah		= (tmp >> 24)	& 0xff;
1559 	tf.lbam		= (tmp >> 16)	& 0xff;
1560 	tf.lbal		= (tmp >> 8)	& 0xff;
1561 	tf.nsect	= (tmp)		& 0xff;
1562 
1563 	return ata_dev_classify(&tf);
1564 }
1565 
1566 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
1567 			       u32 opts)
1568 {
1569 	dma_addr_t cmd_tbl_dma;
1570 
1571 	cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
1572 
1573 	pp->cmd_slot[tag].opts = cpu_to_le32(opts);
1574 	pp->cmd_slot[tag].status = 0;
1575 	pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
1576 	pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
1577 }
1578 
1579 static int ahci_kick_engine(struct ata_port *ap, int force_restart)
1580 {
1581 	void __iomem *port_mmio = ahci_port_base(ap);
1582 	struct ahci_host_priv *hpriv = ap->host->private_data;
1583 	u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1584 	u32 tmp;
1585 	int busy, rc;
1586 
1587 	/* do we need to kick the port? */
1588 	busy = status & (ATA_BUSY | ATA_DRQ);
1589 	if (!busy && !force_restart)
1590 		return 0;
1591 
1592 	/* stop engine */
1593 	rc = ahci_stop_engine(ap);
1594 	if (rc)
1595 		goto out_restart;
1596 
1597 	/* need to do CLO? */
1598 	if (!busy) {
1599 		rc = 0;
1600 		goto out_restart;
1601 	}
1602 
1603 	if (!(hpriv->cap & HOST_CAP_CLO)) {
1604 		rc = -EOPNOTSUPP;
1605 		goto out_restart;
1606 	}
1607 
1608 	/* perform CLO */
1609 	tmp = readl(port_mmio + PORT_CMD);
1610 	tmp |= PORT_CMD_CLO;
1611 	writel(tmp, port_mmio + PORT_CMD);
1612 
1613 	rc = 0;
1614 	tmp = ata_wait_register(port_mmio + PORT_CMD,
1615 				PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
1616 	if (tmp & PORT_CMD_CLO)
1617 		rc = -EIO;
1618 
1619 	/* restart engine */
1620  out_restart:
1621 	ahci_start_engine(ap);
1622 	return rc;
1623 }
1624 
1625 static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
1626 				struct ata_taskfile *tf, int is_cmd, u16 flags,
1627 				unsigned long timeout_msec)
1628 {
1629 	const u32 cmd_fis_len = 5; /* five dwords */
1630 	struct ahci_port_priv *pp = ap->private_data;
1631 	void __iomem *port_mmio = ahci_port_base(ap);
1632 	u8 *fis = pp->cmd_tbl;
1633 	u32 tmp;
1634 
1635 	/* prep the command */
1636 	ata_tf_to_fis(tf, pmp, is_cmd, fis);
1637 	ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
1638 
1639 	/* issue & wait */
1640 	writel(1, port_mmio + PORT_CMD_ISSUE);
1641 
1642 	if (timeout_msec) {
1643 		tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
1644 					1, timeout_msec);
1645 		if (tmp & 0x1) {
1646 			ahci_kick_engine(ap, 1);
1647 			return -EBUSY;
1648 		}
1649 	} else
1650 		readl(port_mmio + PORT_CMD_ISSUE);	/* flush */
1651 
1652 	return 0;
1653 }
1654 
1655 static int ahci_do_softreset(struct ata_link *link, unsigned int *class,
1656 			     int pmp, unsigned long deadline,
1657 			     int (*check_ready)(struct ata_link *link))
1658 {
1659 	struct ata_port *ap = link->ap;
1660 	const char *reason = NULL;
1661 	unsigned long now, msecs;
1662 	struct ata_taskfile tf;
1663 	int rc;
1664 
1665 	DPRINTK("ENTER\n");
1666 
1667 	/* prepare for SRST (AHCI-1.1 10.4.1) */
1668 	rc = ahci_kick_engine(ap, 1);
1669 	if (rc && rc != -EOPNOTSUPP)
1670 		ata_link_printk(link, KERN_WARNING,
1671 				"failed to reset engine (errno=%d)\n", rc);
1672 
1673 	ata_tf_init(link->device, &tf);
1674 
1675 	/* issue the first D2H Register FIS */
1676 	msecs = 0;
1677 	now = jiffies;
1678 	if (time_after(now, deadline))
1679 		msecs = jiffies_to_msecs(deadline - now);
1680 
1681 	tf.ctl |= ATA_SRST;
1682 	if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
1683 				 AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
1684 		rc = -EIO;
1685 		reason = "1st FIS failed";
1686 		goto fail;
1687 	}
1688 
1689 	/* spec says at least 5us, but be generous and sleep for 1ms */
1690 	msleep(1);
1691 
1692 	/* issue the second D2H Register FIS */
1693 	tf.ctl &= ~ATA_SRST;
1694 	ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
1695 
1696 	/* wait for link to become ready */
1697 	rc = ata_wait_after_reset(link, deadline, check_ready);
1698 	/* link occupied, -ENODEV too is an error */
1699 	if (rc) {
1700 		reason = "device not ready";
1701 		goto fail;
1702 	}
1703 	*class = ahci_dev_classify(ap);
1704 
1705 	DPRINTK("EXIT, class=%u\n", *class);
1706 	return 0;
1707 
1708  fail:
1709 	ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
1710 	return rc;
1711 }
1712 
1713 static int ahci_check_ready(struct ata_link *link)
1714 {
1715 	void __iomem *port_mmio = ahci_port_base(link->ap);
1716 	u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1717 
1718 	return ata_check_ready(status);
1719 }
1720 
1721 static int ahci_softreset(struct ata_link *link, unsigned int *class,
1722 			  unsigned long deadline)
1723 {
1724 	int pmp = sata_srst_pmp(link);
1725 
1726 	DPRINTK("ENTER\n");
1727 
1728 	return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
1729 }
1730 
1731 static int ahci_sb600_check_ready(struct ata_link *link)
1732 {
1733 	void __iomem *port_mmio = ahci_port_base(link->ap);
1734 	u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1735 	u32 irq_status = readl(port_mmio + PORT_IRQ_STAT);
1736 
1737 	/*
1738 	 * There is no need to check TFDATA if BAD PMP is found due to HW bug,
1739 	 * which can save timeout delay.
1740 	 */
1741 	if (irq_status & PORT_IRQ_BAD_PMP)
1742 		return -EIO;
1743 
1744 	return ata_check_ready(status);
1745 }
1746 
1747 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
1748 				unsigned long deadline)
1749 {
1750 	struct ata_port *ap = link->ap;
1751 	void __iomem *port_mmio = ahci_port_base(ap);
1752 	int pmp = sata_srst_pmp(link);
1753 	int rc;
1754 	u32 irq_sts;
1755 
1756 	DPRINTK("ENTER\n");
1757 
1758 	rc = ahci_do_softreset(link, class, pmp, deadline,
1759 			       ahci_sb600_check_ready);
1760 
1761 	/*
1762 	 * Soft reset fails on some ATI chips with IPMS set when PMP
1763 	 * is enabled but SATA HDD/ODD is connected to SATA port,
1764 	 * do soft reset again to port 0.
1765 	 */
1766 	if (rc == -EIO) {
1767 		irq_sts = readl(port_mmio + PORT_IRQ_STAT);
1768 		if (irq_sts & PORT_IRQ_BAD_PMP) {
1769 			ata_link_printk(link, KERN_WARNING,
1770 					"failed due to HW bug, retry pmp=0\n");
1771 			rc = ahci_do_softreset(link, class, 0, deadline,
1772 					       ahci_check_ready);
1773 		}
1774 	}
1775 
1776 	return rc;
1777 }
1778 
1779 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
1780 			  unsigned long deadline)
1781 {
1782 	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
1783 	struct ata_port *ap = link->ap;
1784 	struct ahci_port_priv *pp = ap->private_data;
1785 	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1786 	struct ata_taskfile tf;
1787 	bool online;
1788 	int rc;
1789 
1790 	DPRINTK("ENTER\n");
1791 
1792 	ahci_stop_engine(ap);
1793 
1794 	/* clear D2H reception area to properly wait for D2H FIS */
1795 	ata_tf_init(link->device, &tf);
1796 	tf.command = 0x80;
1797 	ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1798 
1799 	rc = sata_link_hardreset(link, timing, deadline, &online,
1800 				 ahci_check_ready);
1801 
1802 	ahci_start_engine(ap);
1803 
1804 	if (online)
1805 		*class = ahci_dev_classify(ap);
1806 
1807 	DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1808 	return rc;
1809 }
1810 
1811 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
1812 				 unsigned long deadline)
1813 {
1814 	struct ata_port *ap = link->ap;
1815 	bool online;
1816 	int rc;
1817 
1818 	DPRINTK("ENTER\n");
1819 
1820 	ahci_stop_engine(ap);
1821 
1822 	rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
1823 				 deadline, &online, NULL);
1824 
1825 	ahci_start_engine(ap);
1826 
1827 	DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1828 
1829 	/* vt8251 doesn't clear BSY on signature FIS reception,
1830 	 * request follow-up softreset.
1831 	 */
1832 	return online ? -EAGAIN : rc;
1833 }
1834 
1835 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
1836 				unsigned long deadline)
1837 {
1838 	struct ata_port *ap = link->ap;
1839 	struct ahci_port_priv *pp = ap->private_data;
1840 	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1841 	struct ata_taskfile tf;
1842 	bool online;
1843 	int rc;
1844 
1845 	ahci_stop_engine(ap);
1846 
1847 	/* clear D2H reception area to properly wait for D2H FIS */
1848 	ata_tf_init(link->device, &tf);
1849 	tf.command = 0x80;
1850 	ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1851 
1852 	rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
1853 				 deadline, &online, NULL);
1854 
1855 	ahci_start_engine(ap);
1856 
1857 	/* The pseudo configuration device on SIMG4726 attached to
1858 	 * ASUS P5W-DH Deluxe doesn't send signature FIS after
1859 	 * hardreset if no device is attached to the first downstream
1860 	 * port && the pseudo device locks up on SRST w/ PMP==0.  To
1861 	 * work around this, wait for !BSY only briefly.  If BSY isn't
1862 	 * cleared, perform CLO and proceed to IDENTIFY (achieved by
1863 	 * ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA).
1864 	 *
1865 	 * Wait for two seconds.  Devices attached to downstream port
1866 	 * which can't process the following IDENTIFY after this will
1867 	 * have to be reset again.  For most cases, this should
1868 	 * suffice while making probing snappish enough.
1869 	 */
1870 	if (online) {
1871 		rc = ata_wait_after_reset(link, jiffies + 2 * HZ,
1872 					  ahci_check_ready);
1873 		if (rc)
1874 			ahci_kick_engine(ap, 0);
1875 	}
1876 	return rc;
1877 }
1878 
1879 static void ahci_postreset(struct ata_link *link, unsigned int *class)
1880 {
1881 	struct ata_port *ap = link->ap;
1882 	void __iomem *port_mmio = ahci_port_base(ap);
1883 	u32 new_tmp, tmp;
1884 
1885 	ata_std_postreset(link, class);
1886 
1887 	/* Make sure port's ATAPI bit is set appropriately */
1888 	new_tmp = tmp = readl(port_mmio + PORT_CMD);
1889 	if (*class == ATA_DEV_ATAPI)
1890 		new_tmp |= PORT_CMD_ATAPI;
1891 	else
1892 		new_tmp &= ~PORT_CMD_ATAPI;
1893 	if (new_tmp != tmp) {
1894 		writel(new_tmp, port_mmio + PORT_CMD);
1895 		readl(port_mmio + PORT_CMD); /* flush */
1896 	}
1897 }
1898 
1899 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
1900 {
1901 	struct scatterlist *sg;
1902 	struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
1903 	unsigned int si;
1904 
1905 	VPRINTK("ENTER\n");
1906 
1907 	/*
1908 	 * Next, the S/G list.
1909 	 */
1910 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
1911 		dma_addr_t addr = sg_dma_address(sg);
1912 		u32 sg_len = sg_dma_len(sg);
1913 
1914 		ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
1915 		ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
1916 		ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
1917 	}
1918 
1919 	return si;
1920 }
1921 
1922 static void ahci_qc_prep(struct ata_queued_cmd *qc)
1923 {
1924 	struct ata_port *ap = qc->ap;
1925 	struct ahci_port_priv *pp = ap->private_data;
1926 	int is_atapi = ata_is_atapi(qc->tf.protocol);
1927 	void *cmd_tbl;
1928 	u32 opts;
1929 	const u32 cmd_fis_len = 5; /* five dwords */
1930 	unsigned int n_elem;
1931 
1932 	/*
1933 	 * Fill in command table information.  First, the header,
1934 	 * a SATA Register - Host to Device command FIS.
1935 	 */
1936 	cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
1937 
1938 	ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
1939 	if (is_atapi) {
1940 		memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
1941 		memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
1942 	}
1943 
1944 	n_elem = 0;
1945 	if (qc->flags & ATA_QCFLAG_DMAMAP)
1946 		n_elem = ahci_fill_sg(qc, cmd_tbl);
1947 
1948 	/*
1949 	 * Fill in command slot information.
1950 	 */
1951 	opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
1952 	if (qc->tf.flags & ATA_TFLAG_WRITE)
1953 		opts |= AHCI_CMD_WRITE;
1954 	if (is_atapi)
1955 		opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
1956 
1957 	ahci_fill_cmd_slot(pp, qc->tag, opts);
1958 }
1959 
1960 static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
1961 {
1962 	struct ahci_host_priv *hpriv = ap->host->private_data;
1963 	struct ahci_port_priv *pp = ap->private_data;
1964 	struct ata_eh_info *host_ehi = &ap->link.eh_info;
1965 	struct ata_link *link = NULL;
1966 	struct ata_queued_cmd *active_qc;
1967 	struct ata_eh_info *active_ehi;
1968 	u32 serror;
1969 
1970 	/* determine active link */
1971 	ata_for_each_link(link, ap, EDGE)
1972 		if (ata_link_active(link))
1973 			break;
1974 	if (!link)
1975 		link = &ap->link;
1976 
1977 	active_qc = ata_qc_from_tag(ap, link->active_tag);
1978 	active_ehi = &link->eh_info;
1979 
1980 	/* record irq stat */
1981 	ata_ehi_clear_desc(host_ehi);
1982 	ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
1983 
1984 	/* AHCI needs SError cleared; otherwise, it might lock up */
1985 	ahci_scr_read(&ap->link, SCR_ERROR, &serror);
1986 	ahci_scr_write(&ap->link, SCR_ERROR, serror);
1987 	host_ehi->serror |= serror;
1988 
1989 	/* some controllers set IRQ_IF_ERR on device errors, ignore it */
1990 	if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
1991 		irq_stat &= ~PORT_IRQ_IF_ERR;
1992 
1993 	if (irq_stat & PORT_IRQ_TF_ERR) {
1994 		/* If qc is active, charge it; otherwise, the active
1995 		 * link.  There's no active qc on NCQ errors.  It will
1996 		 * be determined by EH by reading log page 10h.
1997 		 */
1998 		if (active_qc)
1999 			active_qc->err_mask |= AC_ERR_DEV;
2000 		else
2001 			active_ehi->err_mask |= AC_ERR_DEV;
2002 
2003 		if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
2004 			host_ehi->serror &= ~SERR_INTERNAL;
2005 	}
2006 
2007 	if (irq_stat & PORT_IRQ_UNK_FIS) {
2008 		u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
2009 
2010 		active_ehi->err_mask |= AC_ERR_HSM;
2011 		active_ehi->action |= ATA_EH_RESET;
2012 		ata_ehi_push_desc(active_ehi,
2013 				  "unknown FIS %08x %08x %08x %08x" ,
2014 				  unk[0], unk[1], unk[2], unk[3]);
2015 	}
2016 
2017 	if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
2018 		active_ehi->err_mask |= AC_ERR_HSM;
2019 		active_ehi->action |= ATA_EH_RESET;
2020 		ata_ehi_push_desc(active_ehi, "incorrect PMP");
2021 	}
2022 
2023 	if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
2024 		host_ehi->err_mask |= AC_ERR_HOST_BUS;
2025 		host_ehi->action |= ATA_EH_RESET;
2026 		ata_ehi_push_desc(host_ehi, "host bus error");
2027 	}
2028 
2029 	if (irq_stat & PORT_IRQ_IF_ERR) {
2030 		host_ehi->err_mask |= AC_ERR_ATA_BUS;
2031 		host_ehi->action |= ATA_EH_RESET;
2032 		ata_ehi_push_desc(host_ehi, "interface fatal error");
2033 	}
2034 
2035 	if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
2036 		ata_ehi_hotplugged(host_ehi);
2037 		ata_ehi_push_desc(host_ehi, "%s",
2038 			irq_stat & PORT_IRQ_CONNECT ?
2039 			"connection status changed" : "PHY RDY changed");
2040 	}
2041 
2042 	/* okay, let's hand over to EH */
2043 
2044 	if (irq_stat & PORT_IRQ_FREEZE)
2045 		ata_port_freeze(ap);
2046 	else
2047 		ata_port_abort(ap);
2048 }
2049 
2050 static void ahci_port_intr(struct ata_port *ap)
2051 {
2052 	void __iomem *port_mmio = ahci_port_base(ap);
2053 	struct ata_eh_info *ehi = &ap->link.eh_info;
2054 	struct ahci_port_priv *pp = ap->private_data;
2055 	struct ahci_host_priv *hpriv = ap->host->private_data;
2056 	int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
2057 	u32 status, qc_active;
2058 	int rc;
2059 
2060 	status = readl(port_mmio + PORT_IRQ_STAT);
2061 	writel(status, port_mmio + PORT_IRQ_STAT);
2062 
2063 	/* ignore BAD_PMP while resetting */
2064 	if (unlikely(resetting))
2065 		status &= ~PORT_IRQ_BAD_PMP;
2066 
2067 	/* If we are getting PhyRdy, this is
2068  	 * just a power state change, we should
2069  	 * clear out this, plus the PhyRdy/Comm
2070  	 * Wake bits from Serror
2071  	 */
2072 	if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
2073 		(status & PORT_IRQ_PHYRDY)) {
2074 		status &= ~PORT_IRQ_PHYRDY;
2075 		ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
2076 	}
2077 
2078 	if (unlikely(status & PORT_IRQ_ERROR)) {
2079 		ahci_error_intr(ap, status);
2080 		return;
2081 	}
2082 
2083 	if (status & PORT_IRQ_SDB_FIS) {
2084 		/* If SNotification is available, leave notification
2085 		 * handling to sata_async_notification().  If not,
2086 		 * emulate it by snooping SDB FIS RX area.
2087 		 *
2088 		 * Snooping FIS RX area is probably cheaper than
2089 		 * poking SNotification but some constrollers which
2090 		 * implement SNotification, ICH9 for example, don't
2091 		 * store AN SDB FIS into receive area.
2092 		 */
2093 		if (hpriv->cap & HOST_CAP_SNTF)
2094 			sata_async_notification(ap);
2095 		else {
2096 			/* If the 'N' bit in word 0 of the FIS is set,
2097 			 * we just received asynchronous notification.
2098 			 * Tell libata about it.
2099 			 */
2100 			const __le32 *f = pp->rx_fis + RX_FIS_SDB;
2101 			u32 f0 = le32_to_cpu(f[0]);
2102 
2103 			if (f0 & (1 << 15))
2104 				sata_async_notification(ap);
2105 		}
2106 	}
2107 
2108 	/* pp->active_link is valid iff any command is in flight */
2109 	if (ap->qc_active && pp->active_link->sactive)
2110 		qc_active = readl(port_mmio + PORT_SCR_ACT);
2111 	else
2112 		qc_active = readl(port_mmio + PORT_CMD_ISSUE);
2113 
2114 	rc = ata_qc_complete_multiple(ap, qc_active);
2115 
2116 	/* while resetting, invalid completions are expected */
2117 	if (unlikely(rc < 0 && !resetting)) {
2118 		ehi->err_mask |= AC_ERR_HSM;
2119 		ehi->action |= ATA_EH_RESET;
2120 		ata_port_freeze(ap);
2121 	}
2122 }
2123 
2124 static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
2125 {
2126 	struct ata_host *host = dev_instance;
2127 	struct ahci_host_priv *hpriv;
2128 	unsigned int i, handled = 0;
2129 	void __iomem *mmio;
2130 	u32 irq_stat, irq_masked;
2131 
2132 	VPRINTK("ENTER\n");
2133 
2134 	hpriv = host->private_data;
2135 	mmio = host->iomap[AHCI_PCI_BAR];
2136 
2137 	/* sigh.  0xffffffff is a valid return from h/w */
2138 	irq_stat = readl(mmio + HOST_IRQ_STAT);
2139 	if (!irq_stat)
2140 		return IRQ_NONE;
2141 
2142 	irq_masked = irq_stat & hpriv->port_map;
2143 
2144 	spin_lock(&host->lock);
2145 
2146 	for (i = 0; i < host->n_ports; i++) {
2147 		struct ata_port *ap;
2148 
2149 		if (!(irq_masked & (1 << i)))
2150 			continue;
2151 
2152 		ap = host->ports[i];
2153 		if (ap) {
2154 			ahci_port_intr(ap);
2155 			VPRINTK("port %u\n", i);
2156 		} else {
2157 			VPRINTK("port %u (no irq)\n", i);
2158 			if (ata_ratelimit())
2159 				dev_printk(KERN_WARNING, host->dev,
2160 					"interrupt on disabled port %u\n", i);
2161 		}
2162 
2163 		handled = 1;
2164 	}
2165 
2166 	/* HOST_IRQ_STAT behaves as level triggered latch meaning that
2167 	 * it should be cleared after all the port events are cleared;
2168 	 * otherwise, it will raise a spurious interrupt after each
2169 	 * valid one.  Please read section 10.6.2 of ahci 1.1 for more
2170 	 * information.
2171 	 *
2172 	 * Also, use the unmasked value to clear interrupt as spurious
2173 	 * pending event on a dummy port might cause screaming IRQ.
2174 	 */
2175 	writel(irq_stat, mmio + HOST_IRQ_STAT);
2176 
2177 	spin_unlock(&host->lock);
2178 
2179 	VPRINTK("EXIT\n");
2180 
2181 	return IRQ_RETVAL(handled);
2182 }
2183 
2184 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
2185 {
2186 	struct ata_port *ap = qc->ap;
2187 	void __iomem *port_mmio = ahci_port_base(ap);
2188 	struct ahci_port_priv *pp = ap->private_data;
2189 
2190 	/* Keep track of the currently active link.  It will be used
2191 	 * in completion path to determine whether NCQ phase is in
2192 	 * progress.
2193 	 */
2194 	pp->active_link = qc->dev->link;
2195 
2196 	if (qc->tf.protocol == ATA_PROT_NCQ)
2197 		writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
2198 	writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
2199 
2200 	ahci_sw_activity(qc->dev->link);
2201 
2202 	return 0;
2203 }
2204 
2205 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
2206 {
2207 	struct ahci_port_priv *pp = qc->ap->private_data;
2208 	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
2209 
2210 	ata_tf_from_fis(d2h_fis, &qc->result_tf);
2211 	return true;
2212 }
2213 
2214 static void ahci_freeze(struct ata_port *ap)
2215 {
2216 	void __iomem *port_mmio = ahci_port_base(ap);
2217 
2218 	/* turn IRQ off */
2219 	writel(0, port_mmio + PORT_IRQ_MASK);
2220 }
2221 
2222 static void ahci_thaw(struct ata_port *ap)
2223 {
2224 	void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
2225 	void __iomem *port_mmio = ahci_port_base(ap);
2226 	u32 tmp;
2227 	struct ahci_port_priv *pp = ap->private_data;
2228 
2229 	/* clear IRQ */
2230 	tmp = readl(port_mmio + PORT_IRQ_STAT);
2231 	writel(tmp, port_mmio + PORT_IRQ_STAT);
2232 	writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
2233 
2234 	/* turn IRQ back on */
2235 	writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2236 }
2237 
2238 static void ahci_error_handler(struct ata_port *ap)
2239 {
2240 	if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
2241 		/* restart engine */
2242 		ahci_stop_engine(ap);
2243 		ahci_start_engine(ap);
2244 	}
2245 
2246 	sata_pmp_error_handler(ap);
2247 }
2248 
2249 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
2250 {
2251 	struct ata_port *ap = qc->ap;
2252 
2253 	/* make DMA engine forget about the failed command */
2254 	if (qc->flags & ATA_QCFLAG_FAILED)
2255 		ahci_kick_engine(ap, 1);
2256 }
2257 
2258 static void ahci_pmp_attach(struct ata_port *ap)
2259 {
2260 	void __iomem *port_mmio = ahci_port_base(ap);
2261 	struct ahci_port_priv *pp = ap->private_data;
2262 	u32 cmd;
2263 
2264 	cmd = readl(port_mmio + PORT_CMD);
2265 	cmd |= PORT_CMD_PMP;
2266 	writel(cmd, port_mmio + PORT_CMD);
2267 
2268 	pp->intr_mask |= PORT_IRQ_BAD_PMP;
2269 	writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2270 }
2271 
2272 static void ahci_pmp_detach(struct ata_port *ap)
2273 {
2274 	void __iomem *port_mmio = ahci_port_base(ap);
2275 	struct ahci_port_priv *pp = ap->private_data;
2276 	u32 cmd;
2277 
2278 	cmd = readl(port_mmio + PORT_CMD);
2279 	cmd &= ~PORT_CMD_PMP;
2280 	writel(cmd, port_mmio + PORT_CMD);
2281 
2282 	pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
2283 	writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2284 }
2285 
2286 static int ahci_port_resume(struct ata_port *ap)
2287 {
2288 	ahci_power_up(ap);
2289 	ahci_start_port(ap);
2290 
2291 	if (sata_pmp_attached(ap))
2292 		ahci_pmp_attach(ap);
2293 	else
2294 		ahci_pmp_detach(ap);
2295 
2296 	return 0;
2297 }
2298 
2299 #ifdef CONFIG_PM
2300 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
2301 {
2302 	const char *emsg = NULL;
2303 	int rc;
2304 
2305 	rc = ahci_deinit_port(ap, &emsg);
2306 	if (rc == 0)
2307 		ahci_power_down(ap);
2308 	else {
2309 		ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
2310 		ahci_start_port(ap);
2311 	}
2312 
2313 	return rc;
2314 }
2315 
2316 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
2317 {
2318 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
2319 	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
2320 	u32 ctl;
2321 
2322 	if (mesg.event & PM_EVENT_SLEEP) {
2323 		/* AHCI spec rev1.1 section 8.3.3:
2324 		 * Software must disable interrupts prior to requesting a
2325 		 * transition of the HBA to D3 state.
2326 		 */
2327 		ctl = readl(mmio + HOST_CTL);
2328 		ctl &= ~HOST_IRQ_EN;
2329 		writel(ctl, mmio + HOST_CTL);
2330 		readl(mmio + HOST_CTL); /* flush */
2331 	}
2332 
2333 	return ata_pci_device_suspend(pdev, mesg);
2334 }
2335 
2336 static int ahci_pci_device_resume(struct pci_dev *pdev)
2337 {
2338 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
2339 	int rc;
2340 
2341 	rc = ata_pci_device_do_resume(pdev);
2342 	if (rc)
2343 		return rc;
2344 
2345 	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2346 		rc = ahci_reset_controller(host);
2347 		if (rc)
2348 			return rc;
2349 
2350 		ahci_init_controller(host);
2351 	}
2352 
2353 	ata_host_resume(host);
2354 
2355 	return 0;
2356 }
2357 #endif
2358 
2359 static int ahci_port_start(struct ata_port *ap)
2360 {
2361 	struct device *dev = ap->host->dev;
2362 	struct ahci_port_priv *pp;
2363 	void *mem;
2364 	dma_addr_t mem_dma;
2365 
2366 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
2367 	if (!pp)
2368 		return -ENOMEM;
2369 
2370 	mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma,
2371 				  GFP_KERNEL);
2372 	if (!mem)
2373 		return -ENOMEM;
2374 	memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
2375 
2376 	/*
2377 	 * First item in chunk of DMA memory: 32-slot command table,
2378 	 * 32 bytes each in size
2379 	 */
2380 	pp->cmd_slot = mem;
2381 	pp->cmd_slot_dma = mem_dma;
2382 
2383 	mem += AHCI_CMD_SLOT_SZ;
2384 	mem_dma += AHCI_CMD_SLOT_SZ;
2385 
2386 	/*
2387 	 * Second item: Received-FIS area
2388 	 */
2389 	pp->rx_fis = mem;
2390 	pp->rx_fis_dma = mem_dma;
2391 
2392 	mem += AHCI_RX_FIS_SZ;
2393 	mem_dma += AHCI_RX_FIS_SZ;
2394 
2395 	/*
2396 	 * Third item: data area for storing a single command
2397 	 * and its scatter-gather table
2398 	 */
2399 	pp->cmd_tbl = mem;
2400 	pp->cmd_tbl_dma = mem_dma;
2401 
2402 	/*
2403 	 * Save off initial list of interrupts to be enabled.
2404 	 * This could be changed later
2405 	 */
2406 	pp->intr_mask = DEF_PORT_IRQ;
2407 
2408 	ap->private_data = pp;
2409 
2410 	/* engage engines, captain */
2411 	return ahci_port_resume(ap);
2412 }
2413 
2414 static void ahci_port_stop(struct ata_port *ap)
2415 {
2416 	const char *emsg = NULL;
2417 	int rc;
2418 
2419 	/* de-initialize port */
2420 	rc = ahci_deinit_port(ap, &emsg);
2421 	if (rc)
2422 		ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
2423 }
2424 
2425 static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
2426 {
2427 	int rc;
2428 
2429 	if (using_dac &&
2430 	    !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
2431 		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2432 		if (rc) {
2433 			rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2434 			if (rc) {
2435 				dev_printk(KERN_ERR, &pdev->dev,
2436 					   "64-bit DMA enable failed\n");
2437 				return rc;
2438 			}
2439 		}
2440 	} else {
2441 		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2442 		if (rc) {
2443 			dev_printk(KERN_ERR, &pdev->dev,
2444 				   "32-bit DMA enable failed\n");
2445 			return rc;
2446 		}
2447 		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2448 		if (rc) {
2449 			dev_printk(KERN_ERR, &pdev->dev,
2450 				   "32-bit consistent DMA enable failed\n");
2451 			return rc;
2452 		}
2453 	}
2454 	return 0;
2455 }
2456 
2457 static void ahci_print_info(struct ata_host *host)
2458 {
2459 	struct ahci_host_priv *hpriv = host->private_data;
2460 	struct pci_dev *pdev = to_pci_dev(host->dev);
2461 	void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
2462 	u32 vers, cap, impl, speed;
2463 	const char *speed_s;
2464 	u16 cc;
2465 	const char *scc_s;
2466 
2467 	vers = readl(mmio + HOST_VERSION);
2468 	cap = hpriv->cap;
2469 	impl = hpriv->port_map;
2470 
2471 	speed = (cap >> 20) & 0xf;
2472 	if (speed == 1)
2473 		speed_s = "1.5";
2474 	else if (speed == 2)
2475 		speed_s = "3";
2476 	else if (speed == 3)
2477 		speed_s = "6";
2478 	else
2479 		speed_s = "?";
2480 
2481 	pci_read_config_word(pdev, 0x0a, &cc);
2482 	if (cc == PCI_CLASS_STORAGE_IDE)
2483 		scc_s = "IDE";
2484 	else if (cc == PCI_CLASS_STORAGE_SATA)
2485 		scc_s = "SATA";
2486 	else if (cc == PCI_CLASS_STORAGE_RAID)
2487 		scc_s = "RAID";
2488 	else
2489 		scc_s = "unknown";
2490 
2491 	dev_printk(KERN_INFO, &pdev->dev,
2492 		"AHCI %02x%02x.%02x%02x "
2493 		"%u slots %u ports %s Gbps 0x%x impl %s mode\n"
2494 		,
2495 
2496 		(vers >> 24) & 0xff,
2497 		(vers >> 16) & 0xff,
2498 		(vers >> 8) & 0xff,
2499 		vers & 0xff,
2500 
2501 		((cap >> 8) & 0x1f) + 1,
2502 		(cap & 0x1f) + 1,
2503 		speed_s,
2504 		impl,
2505 		scc_s);
2506 
2507 	dev_printk(KERN_INFO, &pdev->dev,
2508 		"flags: "
2509 		"%s%s%s%s%s%s%s"
2510 		"%s%s%s%s%s%s%s"
2511 		"%s\n"
2512 		,
2513 
2514 		cap & (1 << 31) ? "64bit " : "",
2515 		cap & (1 << 30) ? "ncq " : "",
2516 		cap & (1 << 29) ? "sntf " : "",
2517 		cap & (1 << 28) ? "ilck " : "",
2518 		cap & (1 << 27) ? "stag " : "",
2519 		cap & (1 << 26) ? "pm " : "",
2520 		cap & (1 << 25) ? "led " : "",
2521 
2522 		cap & (1 << 24) ? "clo " : "",
2523 		cap & (1 << 19) ? "nz " : "",
2524 		cap & (1 << 18) ? "only " : "",
2525 		cap & (1 << 17) ? "pmp " : "",
2526 		cap & (1 << 15) ? "pio " : "",
2527 		cap & (1 << 14) ? "slum " : "",
2528 		cap & (1 << 13) ? "part " : "",
2529 		cap & (1 << 6) ? "ems ": ""
2530 		);
2531 }
2532 
2533 /* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is
2534  * hardwired to on-board SIMG 4726.  The chipset is ICH8 and doesn't
2535  * support PMP and the 4726 either directly exports the device
2536  * attached to the first downstream port or acts as a hardware storage
2537  * controller and emulate a single ATA device (can be RAID 0/1 or some
2538  * other configuration).
2539  *
2540  * When there's no device attached to the first downstream port of the
2541  * 4726, "Config Disk" appears, which is a pseudo ATA device to
2542  * configure the 4726.  However, ATA emulation of the device is very
2543  * lame.  It doesn't send signature D2H Reg FIS after the initial
2544  * hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues.
2545  *
2546  * The following function works around the problem by always using
2547  * hardreset on the port and not depending on receiving signature FIS
2548  * afterward.  If signature FIS isn't received soon, ATA class is
2549  * assumed without follow-up softreset.
2550  */
2551 static void ahci_p5wdh_workaround(struct ata_host *host)
2552 {
2553 	static struct dmi_system_id sysids[] = {
2554 		{
2555 			.ident = "P5W DH Deluxe",
2556 			.matches = {
2557 				DMI_MATCH(DMI_SYS_VENDOR,
2558 					  "ASUSTEK COMPUTER INC"),
2559 				DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
2560 			},
2561 		},
2562 		{ }
2563 	};
2564 	struct pci_dev *pdev = to_pci_dev(host->dev);
2565 
2566 	if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) &&
2567 	    dmi_check_system(sysids)) {
2568 		struct ata_port *ap = host->ports[1];
2569 
2570 		dev_printk(KERN_INFO, &pdev->dev, "enabling ASUS P5W DH "
2571 			   "Deluxe on-board SIMG4726 workaround\n");
2572 
2573 		ap->ops = &ahci_p5wdh_ops;
2574 		ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA;
2575 	}
2576 }
2577 
2578 static bool ahci_broken_system_poweroff(struct pci_dev *pdev)
2579 {
2580 	static const struct dmi_system_id broken_systems[] = {
2581 		{
2582 			.ident = "HP Compaq nx6310",
2583 			.matches = {
2584 				DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2585 				DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6310"),
2586 			},
2587 			/* PCI slot number of the controller */
2588 			.driver_data = (void *)0x1FUL,
2589 		},
2590 		{
2591 			.ident = "HP Compaq 6720s",
2592 			.matches = {
2593 				DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2594 				DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6720s"),
2595 			},
2596 			/* PCI slot number of the controller */
2597 			.driver_data = (void *)0x1FUL,
2598 		},
2599 
2600 		{ }	/* terminate list */
2601 	};
2602 	const struct dmi_system_id *dmi = dmi_first_match(broken_systems);
2603 
2604 	if (dmi) {
2605 		unsigned long slot = (unsigned long)dmi->driver_data;
2606 		/* apply the quirk only to on-board controllers */
2607 		return slot == PCI_SLOT(pdev->devfn);
2608 	}
2609 
2610 	return false;
2611 }
2612 
2613 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2614 {
2615 	static int printed_version;
2616 	unsigned int board_id = ent->driver_data;
2617 	struct ata_port_info pi = ahci_port_info[board_id];
2618 	const struct ata_port_info *ppi[] = { &pi, NULL };
2619 	struct device *dev = &pdev->dev;
2620 	struct ahci_host_priv *hpriv;
2621 	struct ata_host *host;
2622 	int n_ports, i, rc;
2623 
2624 	VPRINTK("ENTER\n");
2625 
2626 	WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
2627 
2628 	if (!printed_version++)
2629 		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
2630 
2631 	/* The AHCI driver can only drive the SATA ports, the PATA driver
2632 	   can drive them all so if both drivers are selected make sure
2633 	   AHCI stays out of the way */
2634 	if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable)
2635 		return -ENODEV;
2636 
2637 	/* acquire resources */
2638 	rc = pcim_enable_device(pdev);
2639 	if (rc)
2640 		return rc;
2641 
2642 	/* AHCI controllers often implement SFF compatible interface.
2643 	 * Grab all PCI BARs just in case.
2644 	 */
2645 	rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
2646 	if (rc == -EBUSY)
2647 		pcim_pin_device(pdev);
2648 	if (rc)
2649 		return rc;
2650 
2651 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
2652 	    (pdev->device == 0x2652 || pdev->device == 0x2653)) {
2653 		u8 map;
2654 
2655 		/* ICH6s share the same PCI ID for both piix and ahci
2656 		 * modes.  Enabling ahci mode while MAP indicates
2657 		 * combined mode is a bad idea.  Yield to ata_piix.
2658 		 */
2659 		pci_read_config_byte(pdev, ICH_MAP, &map);
2660 		if (map & 0x3) {
2661 			dev_printk(KERN_INFO, &pdev->dev, "controller is in "
2662 				   "combined mode, can't enable AHCI mode\n");
2663 			return -ENODEV;
2664 		}
2665 	}
2666 
2667 	hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
2668 	if (!hpriv)
2669 		return -ENOMEM;
2670 	hpriv->flags |= (unsigned long)pi.private_data;
2671 
2672 	/* MCP65 revision A1 and A2 can't do MSI */
2673 	if (board_id == board_ahci_mcp65 &&
2674 	    (pdev->revision == 0xa1 || pdev->revision == 0xa2))
2675 		hpriv->flags |= AHCI_HFLAG_NO_MSI;
2676 
2677 	/* SB800 does NOT need the workaround to ignore SERR_INTERNAL */
2678 	if (board_id == board_ahci_sb700 && pdev->revision >= 0x40)
2679 		hpriv->flags &= ~AHCI_HFLAG_IGN_SERR_INTERNAL;
2680 
2681 	if (!(hpriv->flags & AHCI_HFLAG_NO_MSI))
2682 		pci_enable_msi(pdev);
2683 
2684 	/* save initial config */
2685 	ahci_save_initial_config(pdev, hpriv);
2686 
2687 	/* prepare host */
2688 	if (hpriv->cap & HOST_CAP_NCQ)
2689 		pi.flags |= ATA_FLAG_NCQ;
2690 
2691 	if (hpriv->cap & HOST_CAP_PMP)
2692 		pi.flags |= ATA_FLAG_PMP;
2693 
2694 	if (ahci_em_messages && (hpriv->cap & HOST_CAP_EMS)) {
2695 		u8 messages;
2696 		void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
2697 		u32 em_loc = readl(mmio + HOST_EM_LOC);
2698 		u32 em_ctl = readl(mmio + HOST_EM_CTL);
2699 
2700 		messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16;
2701 
2702 		/* we only support LED message type right now */
2703 		if ((messages & 0x01) && (ahci_em_messages == 1)) {
2704 			/* store em_loc */
2705 			hpriv->em_loc = ((em_loc >> 16) * 4);
2706 			pi.flags |= ATA_FLAG_EM;
2707 			if (!(em_ctl & EM_CTL_ALHD))
2708 				pi.flags |= ATA_FLAG_SW_ACTIVITY;
2709 		}
2710 	}
2711 
2712 	if (ahci_broken_system_poweroff(pdev)) {
2713 		pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN;
2714 		dev_info(&pdev->dev,
2715 			"quirky BIOS, skipping spindown on poweroff\n");
2716 	}
2717 
2718 	/* CAP.NP sometimes indicate the index of the last enabled
2719 	 * port, at other times, that of the last possible port, so
2720 	 * determining the maximum port number requires looking at
2721 	 * both CAP.NP and port_map.
2722 	 */
2723 	n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
2724 
2725 	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2726 	if (!host)
2727 		return -ENOMEM;
2728 	host->iomap = pcim_iomap_table(pdev);
2729 	host->private_data = hpriv;
2730 
2731 	if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
2732 		host->flags |= ATA_HOST_PARALLEL_SCAN;
2733 	else
2734 		printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
2735 
2736 	if (pi.flags & ATA_FLAG_EM)
2737 		ahci_reset_em(host);
2738 
2739 	for (i = 0; i < host->n_ports; i++) {
2740 		struct ata_port *ap = host->ports[i];
2741 
2742 		ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
2743 		ata_port_pbar_desc(ap, AHCI_PCI_BAR,
2744 				   0x100 + ap->port_no * 0x80, "port");
2745 
2746 		/* set initial link pm policy */
2747 		ap->pm_policy = NOT_AVAILABLE;
2748 
2749 		/* set enclosure management message type */
2750 		if (ap->flags & ATA_FLAG_EM)
2751 			ap->em_message_type = ahci_em_messages;
2752 
2753 
2754 		/* disabled/not-implemented port */
2755 		if (!(hpriv->port_map & (1 << i)))
2756 			ap->ops = &ata_dummy_port_ops;
2757 	}
2758 
2759 	/* apply workaround for ASUS P5W DH Deluxe mainboard */
2760 	ahci_p5wdh_workaround(host);
2761 
2762 	/* initialize adapter */
2763 	rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
2764 	if (rc)
2765 		return rc;
2766 
2767 	rc = ahci_reset_controller(host);
2768 	if (rc)
2769 		return rc;
2770 
2771 	ahci_init_controller(host);
2772 	ahci_print_info(host);
2773 
2774 	pci_set_master(pdev);
2775 	return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
2776 				 &ahci_sht);
2777 }
2778 
2779 static int __init ahci_init(void)
2780 {
2781 	return pci_register_driver(&ahci_pci_driver);
2782 }
2783 
2784 static void __exit ahci_exit(void)
2785 {
2786 	pci_unregister_driver(&ahci_pci_driver);
2787 }
2788 
2789 
2790 MODULE_AUTHOR("Jeff Garzik");
2791 MODULE_DESCRIPTION("AHCI SATA low-level driver");
2792 MODULE_LICENSE("GPL");
2793 MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
2794 MODULE_VERSION(DRV_VERSION);
2795 
2796 module_init(ahci_init);
2797 module_exit(ahci_exit);
2798