xref: /openbmc/linux/drivers/ata/libata-eh.c (revision a09bf4cd53b8ab000197ef81f15d50f29ecf973c)
1c6fd2807SJeff Garzik /*
2c6fd2807SJeff Garzik  *  libata-eh.c - libata error handling
3c6fd2807SJeff Garzik  *
4c6fd2807SJeff Garzik  *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
5c6fd2807SJeff Garzik  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6c6fd2807SJeff Garzik  *		    on emails.
7c6fd2807SJeff Garzik  *
8c6fd2807SJeff Garzik  *  Copyright 2006 Tejun Heo <htejun@gmail.com>
9c6fd2807SJeff Garzik  *
10c6fd2807SJeff Garzik  *
11c6fd2807SJeff Garzik  *  This program is free software; you can redistribute it and/or
12c6fd2807SJeff Garzik  *  modify it under the terms of the GNU General Public License as
13c6fd2807SJeff Garzik  *  published by the Free Software Foundation; either version 2, or
14c6fd2807SJeff Garzik  *  (at your option) any later version.
15c6fd2807SJeff Garzik  *
16c6fd2807SJeff Garzik  *  This program is distributed in the hope that it will be useful,
17c6fd2807SJeff Garzik  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
18c6fd2807SJeff Garzik  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19c6fd2807SJeff Garzik  *  General Public License for more details.
20c6fd2807SJeff Garzik  *
21c6fd2807SJeff Garzik  *  You should have received a copy of the GNU General Public License
22c6fd2807SJeff Garzik  *  along with this program; see the file COPYING.  If not, write to
23c6fd2807SJeff Garzik  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
24c6fd2807SJeff Garzik  *  USA.
25c6fd2807SJeff Garzik  *
26c6fd2807SJeff Garzik  *
27c6fd2807SJeff Garzik  *  libata documentation is available via 'make {ps|pdf}docs',
28c6fd2807SJeff Garzik  *  as Documentation/DocBook/libata.*
29c6fd2807SJeff Garzik  *
30c6fd2807SJeff Garzik  *  Hardware documentation available from http://www.t13.org/ and
31c6fd2807SJeff Garzik  *  http://www.sata-io.org/
32c6fd2807SJeff Garzik  *
33c6fd2807SJeff Garzik  */
34c6fd2807SJeff Garzik 
35c6fd2807SJeff Garzik #include <linux/kernel.h>
36242f9dcbSJens Axboe #include <linux/blkdev.h>
372855568bSJeff Garzik #include <linux/pci.h>
38c6fd2807SJeff Garzik #include <scsi/scsi.h>
39c6fd2807SJeff Garzik #include <scsi/scsi_host.h>
40c6fd2807SJeff Garzik #include <scsi/scsi_eh.h>
41c6fd2807SJeff Garzik #include <scsi/scsi_device.h>
42c6fd2807SJeff Garzik #include <scsi/scsi_cmnd.h>
436521148cSRobert Hancock #include <scsi/scsi_dbg.h>
44c6fd2807SJeff Garzik #include "../scsi/scsi_transport_api.h"
45c6fd2807SJeff Garzik 
46c6fd2807SJeff Garzik #include <linux/libata.h>
47c6fd2807SJeff Garzik 
48c6fd2807SJeff Garzik #include "libata.h"
49c6fd2807SJeff Garzik 
507d47e8d4STejun Heo enum {
513884f7b0STejun Heo 	/* speed down verdicts */
527d47e8d4STejun Heo 	ATA_EH_SPDN_NCQ_OFF		= (1 << 0),
537d47e8d4STejun Heo 	ATA_EH_SPDN_SPEED_DOWN		= (1 << 1),
547d47e8d4STejun Heo 	ATA_EH_SPDN_FALLBACK_TO_PIO	= (1 << 2),
5576326ac1STejun Heo 	ATA_EH_SPDN_KEEP_ERRORS		= (1 << 3),
563884f7b0STejun Heo 
573884f7b0STejun Heo 	/* error flags */
583884f7b0STejun Heo 	ATA_EFLAG_IS_IO			= (1 << 0),
5976326ac1STejun Heo 	ATA_EFLAG_DUBIOUS_XFER		= (1 << 1),
603884f7b0STejun Heo 
613884f7b0STejun Heo 	/* error categories */
623884f7b0STejun Heo 	ATA_ECAT_NONE			= 0,
633884f7b0STejun Heo 	ATA_ECAT_ATA_BUS		= 1,
643884f7b0STejun Heo 	ATA_ECAT_TOUT_HSM		= 2,
653884f7b0STejun Heo 	ATA_ECAT_UNK_DEV		= 3,
6675f9cafcSTejun Heo 	ATA_ECAT_DUBIOUS_NONE		= 4,
6775f9cafcSTejun Heo 	ATA_ECAT_DUBIOUS_ATA_BUS	= 5,
6875f9cafcSTejun Heo 	ATA_ECAT_DUBIOUS_TOUT_HSM	= 6,
6975f9cafcSTejun Heo 	ATA_ECAT_DUBIOUS_UNK_DEV	= 7,
7075f9cafcSTejun Heo 	ATA_ECAT_NR			= 8,
717d47e8d4STejun Heo 
7287fbc5a0STejun Heo 	ATA_EH_CMD_DFL_TIMEOUT		=  5000,
7387fbc5a0STejun Heo 
740a2c0f56STejun Heo 	/* always put at least this amount of time between resets */
750a2c0f56STejun Heo 	ATA_EH_RESET_COOL_DOWN		=  5000,
760a2c0f56STejun Heo 
77341c2c95STejun Heo 	/* Waiting in ->prereset can never be reliable.  It's
78341c2c95STejun Heo 	 * sometimes nice to wait there but it can't be depended upon;
79341c2c95STejun Heo 	 * otherwise, we wouldn't be resetting.  Just give it enough
80341c2c95STejun Heo 	 * time for most drives to spin up.
8131daabdaSTejun Heo 	 */
82341c2c95STejun Heo 	ATA_EH_PRERESET_TIMEOUT		= 10000,
83341c2c95STejun Heo 	ATA_EH_FASTDRAIN_INTERVAL	=  3000,
8411fc33daSTejun Heo 
8511fc33daSTejun Heo 	ATA_EH_UA_TRIES			= 5,
86c2c7a89cSTejun Heo 
87c2c7a89cSTejun Heo 	/* probe speed down parameters, see ata_eh_schedule_probe() */
88c2c7a89cSTejun Heo 	ATA_EH_PROBE_TRIAL_INTERVAL	= 60000,	/* 1 min */
89c2c7a89cSTejun Heo 	ATA_EH_PROBE_TRIALS		= 2,
9031daabdaSTejun Heo };
9131daabdaSTejun Heo 
9231daabdaSTejun Heo /* The following table determines how we sequence resets.  Each entry
9331daabdaSTejun Heo  * represents timeout for that try.  The first try can be soft or
9431daabdaSTejun Heo  * hardreset.  All others are hardreset if available.  In most cases
9531daabdaSTejun Heo  * the first reset w/ 10sec timeout should succeed.  Following entries
9631daabdaSTejun Heo  * are mostly for error handling, hotplug and retarded devices.
9731daabdaSTejun Heo  */
9831daabdaSTejun Heo static const unsigned long ata_eh_reset_timeouts[] = {
99341c2c95STejun Heo 	10000,	/* most drives spin up by 10sec */
100341c2c95STejun Heo 	10000,	/* > 99% working drives spin up before 20sec */
101341c2c95STejun Heo 	35000,	/* give > 30 secs of idleness for retarded devices */
102341c2c95STejun Heo 	 5000,	/* and sweet one last chance */
103d8af0eb6STejun Heo 	ULONG_MAX, /* > 1 min has elapsed, give up */
10431daabdaSTejun Heo };
10531daabdaSTejun Heo 
10687fbc5a0STejun Heo static const unsigned long ata_eh_identify_timeouts[] = {
10787fbc5a0STejun Heo 	 5000,	/* covers > 99% of successes and not too boring on failures */
10887fbc5a0STejun Heo 	10000,  /* combined time till here is enough even for media access */
10987fbc5a0STejun Heo 	30000,	/* for true idiots */
11087fbc5a0STejun Heo 	ULONG_MAX,
11187fbc5a0STejun Heo };
11287fbc5a0STejun Heo 
1136013efd8STejun Heo static const unsigned long ata_eh_flush_timeouts[] = {
1146013efd8STejun Heo 	15000,	/* be generous with flush */
1156013efd8STejun Heo 	15000,  /* ditto */
1166013efd8STejun Heo 	30000,	/* and even more generous */
1176013efd8STejun Heo 	ULONG_MAX,
1186013efd8STejun Heo };
1196013efd8STejun Heo 
12087fbc5a0STejun Heo static const unsigned long ata_eh_other_timeouts[] = {
12187fbc5a0STejun Heo 	 5000,	/* same rationale as identify timeout */
12287fbc5a0STejun Heo 	10000,	/* ditto */
12387fbc5a0STejun Heo 	/* but no merciful 30sec for other commands, it just isn't worth it */
12487fbc5a0STejun Heo 	ULONG_MAX,
12587fbc5a0STejun Heo };
12687fbc5a0STejun Heo 
12787fbc5a0STejun Heo struct ata_eh_cmd_timeout_ent {
12887fbc5a0STejun Heo 	const u8		*commands;
12987fbc5a0STejun Heo 	const unsigned long	*timeouts;
13087fbc5a0STejun Heo };
13187fbc5a0STejun Heo 
13287fbc5a0STejun Heo /* The following table determines timeouts to use for EH internal
13387fbc5a0STejun Heo  * commands.  Each table entry is a command class and matches the
13487fbc5a0STejun Heo  * commands the entry applies to and the timeout table to use.
13587fbc5a0STejun Heo  *
13687fbc5a0STejun Heo  * On the retry after a command timed out, the next timeout value from
13787fbc5a0STejun Heo  * the table is used.  If the table doesn't contain further entries,
13887fbc5a0STejun Heo  * the last value is used.
13987fbc5a0STejun Heo  *
14087fbc5a0STejun Heo  * ehc->cmd_timeout_idx keeps track of which timeout to use per
14187fbc5a0STejun Heo  * command class, so if SET_FEATURES times out on the first try, the
14287fbc5a0STejun Heo  * next try will use the second timeout value only for that class.
14387fbc5a0STejun Heo  */
14487fbc5a0STejun Heo #define CMDS(cmds...)	(const u8 []){ cmds, 0 }
14587fbc5a0STejun Heo static const struct ata_eh_cmd_timeout_ent
14687fbc5a0STejun Heo ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
14787fbc5a0STejun Heo 	{ .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI),
14887fbc5a0STejun Heo 	  .timeouts = ata_eh_identify_timeouts, },
14987fbc5a0STejun Heo 	{ .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT),
15087fbc5a0STejun Heo 	  .timeouts = ata_eh_other_timeouts, },
15187fbc5a0STejun Heo 	{ .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT),
15287fbc5a0STejun Heo 	  .timeouts = ata_eh_other_timeouts, },
15387fbc5a0STejun Heo 	{ .commands = CMDS(ATA_CMD_SET_FEATURES),
15487fbc5a0STejun Heo 	  .timeouts = ata_eh_other_timeouts, },
15587fbc5a0STejun Heo 	{ .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS),
15687fbc5a0STejun Heo 	  .timeouts = ata_eh_other_timeouts, },
1576013efd8STejun Heo 	{ .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT),
1586013efd8STejun Heo 	  .timeouts = ata_eh_flush_timeouts },
15987fbc5a0STejun Heo };
16087fbc5a0STejun Heo #undef CMDS
16187fbc5a0STejun Heo 
162c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap);
1636ffa01d8STejun Heo #ifdef CONFIG_PM
164c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap);
165c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap);
1666ffa01d8STejun Heo #else /* CONFIG_PM */
1676ffa01d8STejun Heo static void ata_eh_handle_port_suspend(struct ata_port *ap)
1686ffa01d8STejun Heo { }
1696ffa01d8STejun Heo 
1706ffa01d8STejun Heo static void ata_eh_handle_port_resume(struct ata_port *ap)
1716ffa01d8STejun Heo { }
1726ffa01d8STejun Heo #endif /* CONFIG_PM */
173c6fd2807SJeff Garzik 
174b64bbc39STejun Heo static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt,
175b64bbc39STejun Heo 				 va_list args)
176b64bbc39STejun Heo {
177b64bbc39STejun Heo 	ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
178b64bbc39STejun Heo 				     ATA_EH_DESC_LEN - ehi->desc_len,
179b64bbc39STejun Heo 				     fmt, args);
180b64bbc39STejun Heo }
181b64bbc39STejun Heo 
182b64bbc39STejun Heo /**
183b64bbc39STejun Heo  *	__ata_ehi_push_desc - push error description without adding separator
184b64bbc39STejun Heo  *	@ehi: target EHI
185b64bbc39STejun Heo  *	@fmt: printf format string
186b64bbc39STejun Heo  *
187b64bbc39STejun Heo  *	Format string according to @fmt and append it to @ehi->desc.
188b64bbc39STejun Heo  *
189b64bbc39STejun Heo  *	LOCKING:
190b64bbc39STejun Heo  *	spin_lock_irqsave(host lock)
191b64bbc39STejun Heo  */
192b64bbc39STejun Heo void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
193b64bbc39STejun Heo {
194b64bbc39STejun Heo 	va_list args;
195b64bbc39STejun Heo 
196b64bbc39STejun Heo 	va_start(args, fmt);
197b64bbc39STejun Heo 	__ata_ehi_pushv_desc(ehi, fmt, args);
198b64bbc39STejun Heo 	va_end(args);
199b64bbc39STejun Heo }
200b64bbc39STejun Heo 
201b64bbc39STejun Heo /**
202b64bbc39STejun Heo  *	ata_ehi_push_desc - push error description with separator
203b64bbc39STejun Heo  *	@ehi: target EHI
204b64bbc39STejun Heo  *	@fmt: printf format string
205b64bbc39STejun Heo  *
206b64bbc39STejun Heo  *	Format string according to @fmt and append it to @ehi->desc.
207b64bbc39STejun Heo  *	If @ehi->desc is not empty, ", " is added in-between.
208b64bbc39STejun Heo  *
209b64bbc39STejun Heo  *	LOCKING:
210b64bbc39STejun Heo  *	spin_lock_irqsave(host lock)
211b64bbc39STejun Heo  */
212b64bbc39STejun Heo void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
213b64bbc39STejun Heo {
214b64bbc39STejun Heo 	va_list args;
215b64bbc39STejun Heo 
216b64bbc39STejun Heo 	if (ehi->desc_len)
217b64bbc39STejun Heo 		__ata_ehi_push_desc(ehi, ", ");
218b64bbc39STejun Heo 
219b64bbc39STejun Heo 	va_start(args, fmt);
220b64bbc39STejun Heo 	__ata_ehi_pushv_desc(ehi, fmt, args);
221b64bbc39STejun Heo 	va_end(args);
222b64bbc39STejun Heo }
223b64bbc39STejun Heo 
224b64bbc39STejun Heo /**
225b64bbc39STejun Heo  *	ata_ehi_clear_desc - clean error description
226b64bbc39STejun Heo  *	@ehi: target EHI
227b64bbc39STejun Heo  *
228b64bbc39STejun Heo  *	Clear @ehi->desc.
229b64bbc39STejun Heo  *
230b64bbc39STejun Heo  *	LOCKING:
231b64bbc39STejun Heo  *	spin_lock_irqsave(host lock)
232b64bbc39STejun Heo  */
233b64bbc39STejun Heo void ata_ehi_clear_desc(struct ata_eh_info *ehi)
234b64bbc39STejun Heo {
235b64bbc39STejun Heo 	ehi->desc[0] = '\0';
236b64bbc39STejun Heo 	ehi->desc_len = 0;
237b64bbc39STejun Heo }
238b64bbc39STejun Heo 
239cbcdd875STejun Heo /**
240cbcdd875STejun Heo  *	ata_port_desc - append port description
241cbcdd875STejun Heo  *	@ap: target ATA port
242cbcdd875STejun Heo  *	@fmt: printf format string
243cbcdd875STejun Heo  *
244cbcdd875STejun Heo  *	Format string according to @fmt and append it to port
245cbcdd875STejun Heo  *	description.  If port description is not empty, " " is added
246cbcdd875STejun Heo  *	in-between.  This function is to be used while initializing
247cbcdd875STejun Heo  *	ata_host.  The description is printed on host registration.
248cbcdd875STejun Heo  *
249cbcdd875STejun Heo  *	LOCKING:
250cbcdd875STejun Heo  *	None.
251cbcdd875STejun Heo  */
252cbcdd875STejun Heo void ata_port_desc(struct ata_port *ap, const char *fmt, ...)
253cbcdd875STejun Heo {
254cbcdd875STejun Heo 	va_list args;
255cbcdd875STejun Heo 
256cbcdd875STejun Heo 	WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING));
257cbcdd875STejun Heo 
258cbcdd875STejun Heo 	if (ap->link.eh_info.desc_len)
259cbcdd875STejun Heo 		__ata_ehi_push_desc(&ap->link.eh_info, " ");
260cbcdd875STejun Heo 
261cbcdd875STejun Heo 	va_start(args, fmt);
262cbcdd875STejun Heo 	__ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args);
263cbcdd875STejun Heo 	va_end(args);
264cbcdd875STejun Heo }
265cbcdd875STejun Heo 
266cbcdd875STejun Heo #ifdef CONFIG_PCI
267cbcdd875STejun Heo 
268cbcdd875STejun Heo /**
269cbcdd875STejun Heo  *	ata_port_pbar_desc - append PCI BAR description
270cbcdd875STejun Heo  *	@ap: target ATA port
271cbcdd875STejun Heo  *	@bar: target PCI BAR
272cbcdd875STejun Heo  *	@offset: offset into PCI BAR
273cbcdd875STejun Heo  *	@name: name of the area
274cbcdd875STejun Heo  *
275cbcdd875STejun Heo  *	If @offset is negative, this function formats a string which
276cbcdd875STejun Heo  *	contains the name, address, size and type of the BAR and
277cbcdd875STejun Heo  *	appends it to the port description.  If @offset is zero or
278cbcdd875STejun Heo  *	positive, only name and offsetted address is appended.
279cbcdd875STejun Heo  *
280cbcdd875STejun Heo  *	LOCKING:
281cbcdd875STejun Heo  *	None.
282cbcdd875STejun Heo  */
283cbcdd875STejun Heo void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
284cbcdd875STejun Heo 			const char *name)
285cbcdd875STejun Heo {
286cbcdd875STejun Heo 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
287cbcdd875STejun Heo 	char *type = "";
288cbcdd875STejun Heo 	unsigned long long start, len;
289cbcdd875STejun Heo 
290cbcdd875STejun Heo 	if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
291cbcdd875STejun Heo 		type = "m";
292cbcdd875STejun Heo 	else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
293cbcdd875STejun Heo 		type = "i";
294cbcdd875STejun Heo 
295cbcdd875STejun Heo 	start = (unsigned long long)pci_resource_start(pdev, bar);
296cbcdd875STejun Heo 	len = (unsigned long long)pci_resource_len(pdev, bar);
297cbcdd875STejun Heo 
298cbcdd875STejun Heo 	if (offset < 0)
299cbcdd875STejun Heo 		ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start);
300cbcdd875STejun Heo 	else
301e6a73ab1SAndrew Morton 		ata_port_desc(ap, "%s 0x%llx", name,
302e6a73ab1SAndrew Morton 				start + (unsigned long long)offset);
303cbcdd875STejun Heo }
304cbcdd875STejun Heo 
305cbcdd875STejun Heo #endif /* CONFIG_PCI */
306cbcdd875STejun Heo 
30787fbc5a0STejun Heo static int ata_lookup_timeout_table(u8 cmd)
30887fbc5a0STejun Heo {
30987fbc5a0STejun Heo 	int i;
31087fbc5a0STejun Heo 
31187fbc5a0STejun Heo 	for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) {
31287fbc5a0STejun Heo 		const u8 *cur;
31387fbc5a0STejun Heo 
31487fbc5a0STejun Heo 		for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++)
31587fbc5a0STejun Heo 			if (*cur == cmd)
31687fbc5a0STejun Heo 				return i;
31787fbc5a0STejun Heo 	}
31887fbc5a0STejun Heo 
31987fbc5a0STejun Heo 	return -1;
32087fbc5a0STejun Heo }
32187fbc5a0STejun Heo 
32287fbc5a0STejun Heo /**
32387fbc5a0STejun Heo  *	ata_internal_cmd_timeout - determine timeout for an internal command
32487fbc5a0STejun Heo  *	@dev: target device
32587fbc5a0STejun Heo  *	@cmd: internal command to be issued
32687fbc5a0STejun Heo  *
32787fbc5a0STejun Heo  *	Determine timeout for internal command @cmd for @dev.
32887fbc5a0STejun Heo  *
32987fbc5a0STejun Heo  *	LOCKING:
33087fbc5a0STejun Heo  *	EH context.
33187fbc5a0STejun Heo  *
33287fbc5a0STejun Heo  *	RETURNS:
33387fbc5a0STejun Heo  *	Determined timeout.
33487fbc5a0STejun Heo  */
33587fbc5a0STejun Heo unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd)
33687fbc5a0STejun Heo {
33787fbc5a0STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
33887fbc5a0STejun Heo 	int ent = ata_lookup_timeout_table(cmd);
33987fbc5a0STejun Heo 	int idx;
34087fbc5a0STejun Heo 
34187fbc5a0STejun Heo 	if (ent < 0)
34287fbc5a0STejun Heo 		return ATA_EH_CMD_DFL_TIMEOUT;
34387fbc5a0STejun Heo 
34487fbc5a0STejun Heo 	idx = ehc->cmd_timeout_idx[dev->devno][ent];
34587fbc5a0STejun Heo 	return ata_eh_cmd_timeout_table[ent].timeouts[idx];
34687fbc5a0STejun Heo }
34787fbc5a0STejun Heo 
34887fbc5a0STejun Heo /**
34987fbc5a0STejun Heo  *	ata_internal_cmd_timed_out - notification for internal command timeout
35087fbc5a0STejun Heo  *	@dev: target device
35187fbc5a0STejun Heo  *	@cmd: internal command which timed out
35287fbc5a0STejun Heo  *
35387fbc5a0STejun Heo  *	Notify EH that internal command @cmd for @dev timed out.  This
35487fbc5a0STejun Heo  *	function should be called only for commands whose timeouts are
35587fbc5a0STejun Heo  *	determined using ata_internal_cmd_timeout().
35687fbc5a0STejun Heo  *
35787fbc5a0STejun Heo  *	LOCKING:
35887fbc5a0STejun Heo  *	EH context.
35987fbc5a0STejun Heo  */
36087fbc5a0STejun Heo void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd)
36187fbc5a0STejun Heo {
36287fbc5a0STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
36387fbc5a0STejun Heo 	int ent = ata_lookup_timeout_table(cmd);
36487fbc5a0STejun Heo 	int idx;
36587fbc5a0STejun Heo 
36687fbc5a0STejun Heo 	if (ent < 0)
36787fbc5a0STejun Heo 		return;
36887fbc5a0STejun Heo 
36987fbc5a0STejun Heo 	idx = ehc->cmd_timeout_idx[dev->devno][ent];
37087fbc5a0STejun Heo 	if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX)
37187fbc5a0STejun Heo 		ehc->cmd_timeout_idx[dev->devno][ent]++;
37287fbc5a0STejun Heo }
37387fbc5a0STejun Heo 
3743884f7b0STejun Heo static void ata_ering_record(struct ata_ering *ering, unsigned int eflags,
375c6fd2807SJeff Garzik 			     unsigned int err_mask)
376c6fd2807SJeff Garzik {
377c6fd2807SJeff Garzik 	struct ata_ering_entry *ent;
378c6fd2807SJeff Garzik 
379c6fd2807SJeff Garzik 	WARN_ON(!err_mask);
380c6fd2807SJeff Garzik 
381c6fd2807SJeff Garzik 	ering->cursor++;
382c6fd2807SJeff Garzik 	ering->cursor %= ATA_ERING_SIZE;
383c6fd2807SJeff Garzik 
384c6fd2807SJeff Garzik 	ent = &ering->ring[ering->cursor];
3853884f7b0STejun Heo 	ent->eflags = eflags;
386c6fd2807SJeff Garzik 	ent->err_mask = err_mask;
387c6fd2807SJeff Garzik 	ent->timestamp = get_jiffies_64();
388c6fd2807SJeff Garzik }
389c6fd2807SJeff Garzik 
39076326ac1STejun Heo static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering)
39176326ac1STejun Heo {
39276326ac1STejun Heo 	struct ata_ering_entry *ent = &ering->ring[ering->cursor];
39376326ac1STejun Heo 
39476326ac1STejun Heo 	if (ent->err_mask)
39576326ac1STejun Heo 		return ent;
39676326ac1STejun Heo 	return NULL;
39776326ac1STejun Heo }
39876326ac1STejun Heo 
3997d47e8d4STejun Heo static void ata_ering_clear(struct ata_ering *ering)
400c6fd2807SJeff Garzik {
4017d47e8d4STejun Heo 	memset(ering, 0, sizeof(*ering));
402c6fd2807SJeff Garzik }
403c6fd2807SJeff Garzik 
404c6fd2807SJeff Garzik static int ata_ering_map(struct ata_ering *ering,
405c6fd2807SJeff Garzik 			 int (*map_fn)(struct ata_ering_entry *, void *),
406c6fd2807SJeff Garzik 			 void *arg)
407c6fd2807SJeff Garzik {
408c6fd2807SJeff Garzik 	int idx, rc = 0;
409c6fd2807SJeff Garzik 	struct ata_ering_entry *ent;
410c6fd2807SJeff Garzik 
411c6fd2807SJeff Garzik 	idx = ering->cursor;
412c6fd2807SJeff Garzik 	do {
413c6fd2807SJeff Garzik 		ent = &ering->ring[idx];
414c6fd2807SJeff Garzik 		if (!ent->err_mask)
415c6fd2807SJeff Garzik 			break;
416c6fd2807SJeff Garzik 		rc = map_fn(ent, arg);
417c6fd2807SJeff Garzik 		if (rc)
418c6fd2807SJeff Garzik 			break;
419c6fd2807SJeff Garzik 		idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
420c6fd2807SJeff Garzik 	} while (idx != ering->cursor);
421c6fd2807SJeff Garzik 
422c6fd2807SJeff Garzik 	return rc;
423c6fd2807SJeff Garzik }
424c6fd2807SJeff Garzik 
425c6fd2807SJeff Garzik static unsigned int ata_eh_dev_action(struct ata_device *dev)
426c6fd2807SJeff Garzik {
4279af5c9c9STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
428c6fd2807SJeff Garzik 
429c6fd2807SJeff Garzik 	return ehc->i.action | ehc->i.dev_action[dev->devno];
430c6fd2807SJeff Garzik }
431c6fd2807SJeff Garzik 
432f58229f8STejun Heo static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
433c6fd2807SJeff Garzik 				struct ata_eh_info *ehi, unsigned int action)
434c6fd2807SJeff Garzik {
435f58229f8STejun Heo 	struct ata_device *tdev;
436c6fd2807SJeff Garzik 
437c6fd2807SJeff Garzik 	if (!dev) {
438c6fd2807SJeff Garzik 		ehi->action &= ~action;
4391eca4365STejun Heo 		ata_for_each_dev(tdev, link, ALL)
440f58229f8STejun Heo 			ehi->dev_action[tdev->devno] &= ~action;
441c6fd2807SJeff Garzik 	} else {
442c6fd2807SJeff Garzik 		/* doesn't make sense for port-wide EH actions */
443c6fd2807SJeff Garzik 		WARN_ON(!(action & ATA_EH_PERDEV_MASK));
444c6fd2807SJeff Garzik 
445c6fd2807SJeff Garzik 		/* break ehi->action into ehi->dev_action */
446c6fd2807SJeff Garzik 		if (ehi->action & action) {
4471eca4365STejun Heo 			ata_for_each_dev(tdev, link, ALL)
448f58229f8STejun Heo 				ehi->dev_action[tdev->devno] |=
449f58229f8STejun Heo 					ehi->action & action;
450c6fd2807SJeff Garzik 			ehi->action &= ~action;
451c6fd2807SJeff Garzik 		}
452c6fd2807SJeff Garzik 
453c6fd2807SJeff Garzik 		/* turn off the specified per-dev action */
454c6fd2807SJeff Garzik 		ehi->dev_action[dev->devno] &= ~action;
455c6fd2807SJeff Garzik 	}
456c6fd2807SJeff Garzik }
457c6fd2807SJeff Garzik 
458c6fd2807SJeff Garzik /**
459c6fd2807SJeff Garzik  *	ata_scsi_timed_out - SCSI layer time out callback
460c6fd2807SJeff Garzik  *	@cmd: timed out SCSI command
461c6fd2807SJeff Garzik  *
462c6fd2807SJeff Garzik  *	Handles SCSI layer timeout.  We race with normal completion of
463c6fd2807SJeff Garzik  *	the qc for @cmd.  If the qc is already gone, we lose and let
464c6fd2807SJeff Garzik  *	the scsi command finish (EH_HANDLED).  Otherwise, the qc has
465c6fd2807SJeff Garzik  *	timed out and EH should be invoked.  Prevent ata_qc_complete()
466c6fd2807SJeff Garzik  *	from finishing it by setting EH_SCHEDULED and return
467c6fd2807SJeff Garzik  *	EH_NOT_HANDLED.
468c6fd2807SJeff Garzik  *
469c6fd2807SJeff Garzik  *	TODO: kill this function once old EH is gone.
470c6fd2807SJeff Garzik  *
471c6fd2807SJeff Garzik  *	LOCKING:
472c6fd2807SJeff Garzik  *	Called from timer context
473c6fd2807SJeff Garzik  *
474c6fd2807SJeff Garzik  *	RETURNS:
475c6fd2807SJeff Garzik  *	EH_HANDLED or EH_NOT_HANDLED
476c6fd2807SJeff Garzik  */
477242f9dcbSJens Axboe enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
478c6fd2807SJeff Garzik {
479c6fd2807SJeff Garzik 	struct Scsi_Host *host = cmd->device->host;
480c6fd2807SJeff Garzik 	struct ata_port *ap = ata_shost_to_port(host);
481c6fd2807SJeff Garzik 	unsigned long flags;
482c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc;
483242f9dcbSJens Axboe 	enum blk_eh_timer_return ret;
484c6fd2807SJeff Garzik 
485c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
486c6fd2807SJeff Garzik 
487c6fd2807SJeff Garzik 	if (ap->ops->error_handler) {
488242f9dcbSJens Axboe 		ret = BLK_EH_NOT_HANDLED;
489c6fd2807SJeff Garzik 		goto out;
490c6fd2807SJeff Garzik 	}
491c6fd2807SJeff Garzik 
492242f9dcbSJens Axboe 	ret = BLK_EH_HANDLED;
493c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
4949af5c9c9STejun Heo 	qc = ata_qc_from_tag(ap, ap->link.active_tag);
495c6fd2807SJeff Garzik 	if (qc) {
496c6fd2807SJeff Garzik 		WARN_ON(qc->scsicmd != cmd);
497c6fd2807SJeff Garzik 		qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
498c6fd2807SJeff Garzik 		qc->err_mask |= AC_ERR_TIMEOUT;
499242f9dcbSJens Axboe 		ret = BLK_EH_NOT_HANDLED;
500c6fd2807SJeff Garzik 	}
501c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
502c6fd2807SJeff Garzik 
503c6fd2807SJeff Garzik  out:
504c6fd2807SJeff Garzik 	DPRINTK("EXIT, ret=%d\n", ret);
505c6fd2807SJeff Garzik 	return ret;
506c6fd2807SJeff Garzik }
507c6fd2807SJeff Garzik 
508ece180d1STejun Heo static void ata_eh_unload(struct ata_port *ap)
509ece180d1STejun Heo {
510ece180d1STejun Heo 	struct ata_link *link;
511ece180d1STejun Heo 	struct ata_device *dev;
512ece180d1STejun Heo 	unsigned long flags;
513ece180d1STejun Heo 
514ece180d1STejun Heo 	/* Restore SControl IPM and SPD for the next driver and
515ece180d1STejun Heo 	 * disable attached devices.
516ece180d1STejun Heo 	 */
517ece180d1STejun Heo 	ata_for_each_link(link, ap, PMP_FIRST) {
518ece180d1STejun Heo 		sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0);
519ece180d1STejun Heo 		ata_for_each_dev(dev, link, ALL)
520ece180d1STejun Heo 			ata_dev_disable(dev);
521ece180d1STejun Heo 	}
522ece180d1STejun Heo 
523ece180d1STejun Heo 	/* freeze and set UNLOADED */
524ece180d1STejun Heo 	spin_lock_irqsave(ap->lock, flags);
525ece180d1STejun Heo 
526ece180d1STejun Heo 	ata_port_freeze(ap);			/* won't be thawed */
527ece180d1STejun Heo 	ap->pflags &= ~ATA_PFLAG_EH_PENDING;	/* clear pending from freeze */
528ece180d1STejun Heo 	ap->pflags |= ATA_PFLAG_UNLOADED;
529ece180d1STejun Heo 
530ece180d1STejun Heo 	spin_unlock_irqrestore(ap->lock, flags);
531ece180d1STejun Heo }
532ece180d1STejun Heo 
533c6fd2807SJeff Garzik /**
534c6fd2807SJeff Garzik  *	ata_scsi_error - SCSI layer error handler callback
535c6fd2807SJeff Garzik  *	@host: SCSI host on which error occurred
536c6fd2807SJeff Garzik  *
537c6fd2807SJeff Garzik  *	Handles SCSI-layer-thrown error events.
538c6fd2807SJeff Garzik  *
539c6fd2807SJeff Garzik  *	LOCKING:
540c6fd2807SJeff Garzik  *	Inherited from SCSI layer (none, can sleep)
541c6fd2807SJeff Garzik  *
542c6fd2807SJeff Garzik  *	RETURNS:
543c6fd2807SJeff Garzik  *	Zero.
544c6fd2807SJeff Garzik  */
545c6fd2807SJeff Garzik void ata_scsi_error(struct Scsi_Host *host)
546c6fd2807SJeff Garzik {
547c6fd2807SJeff Garzik 	struct ata_port *ap = ata_shost_to_port(host);
548a1e10f7eSTejun Heo 	int i;
549c6fd2807SJeff Garzik 	unsigned long flags;
550c6fd2807SJeff Garzik 
551c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
552c6fd2807SJeff Garzik 
553c6fd2807SJeff Garzik 	/* synchronize with port task */
554c6fd2807SJeff Garzik 	ata_port_flush_task(ap);
555c6fd2807SJeff Garzik 
556cca3974eSJeff Garzik 	/* synchronize with host lock and sort out timeouts */
557c6fd2807SJeff Garzik 
558c6fd2807SJeff Garzik 	/* For new EH, all qcs are finished in one of three ways -
559c6fd2807SJeff Garzik 	 * normal completion, error completion, and SCSI timeout.
560c96f1732SAlan Cox 	 * Both completions can race against SCSI timeout.  When normal
561c6fd2807SJeff Garzik 	 * completion wins, the qc never reaches EH.  When error
562c6fd2807SJeff Garzik 	 * completion wins, the qc has ATA_QCFLAG_FAILED set.
563c6fd2807SJeff Garzik 	 *
564c6fd2807SJeff Garzik 	 * When SCSI timeout wins, things are a bit more complex.
565c6fd2807SJeff Garzik 	 * Normal or error completion can occur after the timeout but
566c6fd2807SJeff Garzik 	 * before this point.  In such cases, both types of
567c6fd2807SJeff Garzik 	 * completions are honored.  A scmd is determined to have
568c6fd2807SJeff Garzik 	 * timed out iff its associated qc is active and not failed.
569c6fd2807SJeff Garzik 	 */
570c6fd2807SJeff Garzik 	if (ap->ops->error_handler) {
571c6fd2807SJeff Garzik 		struct scsi_cmnd *scmd, *tmp;
572c6fd2807SJeff Garzik 		int nr_timedout = 0;
573c6fd2807SJeff Garzik 
574c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
575c6fd2807SJeff Garzik 
576c96f1732SAlan Cox 		/* This must occur under the ap->lock as we don't want
577c96f1732SAlan Cox 		   a polled recovery to race the real interrupt handler
578c96f1732SAlan Cox 
579c96f1732SAlan Cox 		   The lost_interrupt handler checks for any completed but
580c96f1732SAlan Cox 		   non-notified command and completes much like an IRQ handler.
581c96f1732SAlan Cox 
582c96f1732SAlan Cox 		   We then fall into the error recovery code which will treat
583c96f1732SAlan Cox 		   this as if normal completion won the race */
584c96f1732SAlan Cox 
585c96f1732SAlan Cox 		if (ap->ops->lost_interrupt)
586c96f1732SAlan Cox 			ap->ops->lost_interrupt(ap);
587c96f1732SAlan Cox 
588c6fd2807SJeff Garzik 		list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
589c6fd2807SJeff Garzik 			struct ata_queued_cmd *qc;
590c6fd2807SJeff Garzik 
591c6fd2807SJeff Garzik 			for (i = 0; i < ATA_MAX_QUEUE; i++) {
592c6fd2807SJeff Garzik 				qc = __ata_qc_from_tag(ap, i);
593c6fd2807SJeff Garzik 				if (qc->flags & ATA_QCFLAG_ACTIVE &&
594c6fd2807SJeff Garzik 				    qc->scsicmd == scmd)
595c6fd2807SJeff Garzik 					break;
596c6fd2807SJeff Garzik 			}
597c6fd2807SJeff Garzik 
598c6fd2807SJeff Garzik 			if (i < ATA_MAX_QUEUE) {
599c6fd2807SJeff Garzik 				/* the scmd has an associated qc */
600c6fd2807SJeff Garzik 				if (!(qc->flags & ATA_QCFLAG_FAILED)) {
601c6fd2807SJeff Garzik 					/* which hasn't failed yet, timeout */
602c6fd2807SJeff Garzik 					qc->err_mask |= AC_ERR_TIMEOUT;
603c6fd2807SJeff Garzik 					qc->flags |= ATA_QCFLAG_FAILED;
604c6fd2807SJeff Garzik 					nr_timedout++;
605c6fd2807SJeff Garzik 				}
606c6fd2807SJeff Garzik 			} else {
607c6fd2807SJeff Garzik 				/* Normal completion occurred after
608c6fd2807SJeff Garzik 				 * SCSI timeout but before this point.
609c6fd2807SJeff Garzik 				 * Successfully complete it.
610c6fd2807SJeff Garzik 				 */
611c6fd2807SJeff Garzik 				scmd->retries = scmd->allowed;
612c6fd2807SJeff Garzik 				scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
613c6fd2807SJeff Garzik 			}
614c6fd2807SJeff Garzik 		}
615c6fd2807SJeff Garzik 
616c6fd2807SJeff Garzik 		/* If we have timed out qcs.  They belong to EH from
617c6fd2807SJeff Garzik 		 * this point but the state of the controller is
618c6fd2807SJeff Garzik 		 * unknown.  Freeze the port to make sure the IRQ
619c6fd2807SJeff Garzik 		 * handler doesn't diddle with those qcs.  This must
620c6fd2807SJeff Garzik 		 * be done atomically w.r.t. setting QCFLAG_FAILED.
621c6fd2807SJeff Garzik 		 */
622c6fd2807SJeff Garzik 		if (nr_timedout)
623c6fd2807SJeff Garzik 			__ata_port_freeze(ap);
624c6fd2807SJeff Garzik 
625c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
626a1e10f7eSTejun Heo 
627a1e10f7eSTejun Heo 		/* initialize eh_tries */
628a1e10f7eSTejun Heo 		ap->eh_tries = ATA_EH_MAX_TRIES;
629c6fd2807SJeff Garzik 	} else
630c6fd2807SJeff Garzik 		spin_unlock_wait(ap->lock);
631c6fd2807SJeff Garzik 
632c96f1732SAlan Cox 	/* If we timed raced normal completion and there is nothing to
633c96f1732SAlan Cox 	   recover nr_timedout == 0 why exactly are we doing error recovery ? */
634c96f1732SAlan Cox 
635c6fd2807SJeff Garzik  repeat:
636c6fd2807SJeff Garzik 	/* invoke error handler */
637c6fd2807SJeff Garzik 	if (ap->ops->error_handler) {
638cf1b86c8STejun Heo 		struct ata_link *link;
639cf1b86c8STejun Heo 
6405ddf24c5STejun Heo 		/* kill fast drain timer */
6415ddf24c5STejun Heo 		del_timer_sync(&ap->fastdrain_timer);
6425ddf24c5STejun Heo 
643c6fd2807SJeff Garzik 		/* process port resume request */
644c6fd2807SJeff Garzik 		ata_eh_handle_port_resume(ap);
645c6fd2807SJeff Garzik 
646c6fd2807SJeff Garzik 		/* fetch & clear EH info */
647c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
648c6fd2807SJeff Garzik 
6491eca4365STejun Heo 		ata_for_each_link(link, ap, HOST_FIRST) {
65000115e0fSTejun Heo 			struct ata_eh_context *ehc = &link->eh_context;
65100115e0fSTejun Heo 			struct ata_device *dev;
65200115e0fSTejun Heo 
653cf1b86c8STejun Heo 			memset(&link->eh_context, 0, sizeof(link->eh_context));
654cf1b86c8STejun Heo 			link->eh_context.i = link->eh_info;
655cf1b86c8STejun Heo 			memset(&link->eh_info, 0, sizeof(link->eh_info));
65600115e0fSTejun Heo 
6571eca4365STejun Heo 			ata_for_each_dev(dev, link, ENABLED) {
65800115e0fSTejun Heo 				int devno = dev->devno;
65900115e0fSTejun Heo 
66000115e0fSTejun Heo 				ehc->saved_xfer_mode[devno] = dev->xfer_mode;
66100115e0fSTejun Heo 				if (ata_ncq_enabled(dev))
66200115e0fSTejun Heo 					ehc->saved_ncq_enabled |= 1 << devno;
66300115e0fSTejun Heo 			}
664cf1b86c8STejun Heo 		}
665c6fd2807SJeff Garzik 
666c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
667c6fd2807SJeff Garzik 		ap->pflags &= ~ATA_PFLAG_EH_PENDING;
668da917d69STejun Heo 		ap->excl_link = NULL;	/* don't maintain exclusion over EH */
669c6fd2807SJeff Garzik 
670c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
671c6fd2807SJeff Garzik 
672c6fd2807SJeff Garzik 		/* invoke EH, skip if unloading or suspended */
673c6fd2807SJeff Garzik 		if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
674c6fd2807SJeff Garzik 			ap->ops->error_handler(ap);
675ece180d1STejun Heo 		else {
676ece180d1STejun Heo 			/* if unloading, commence suicide */
677ece180d1STejun Heo 			if ((ap->pflags & ATA_PFLAG_UNLOADING) &&
678ece180d1STejun Heo 			    !(ap->pflags & ATA_PFLAG_UNLOADED))
679ece180d1STejun Heo 				ata_eh_unload(ap);
680c6fd2807SJeff Garzik 			ata_eh_finish(ap);
681ece180d1STejun Heo 		}
682c6fd2807SJeff Garzik 
683c6fd2807SJeff Garzik 		/* process port suspend request */
684c6fd2807SJeff Garzik 		ata_eh_handle_port_suspend(ap);
685c6fd2807SJeff Garzik 
686c6fd2807SJeff Garzik 		/* Exception might have happend after ->error_handler
687c6fd2807SJeff Garzik 		 * recovered the port but before this point.  Repeat
688c6fd2807SJeff Garzik 		 * EH in such case.
689c6fd2807SJeff Garzik 		 */
690c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
691c6fd2807SJeff Garzik 
692c6fd2807SJeff Garzik 		if (ap->pflags & ATA_PFLAG_EH_PENDING) {
693a1e10f7eSTejun Heo 			if (--ap->eh_tries) {
694c6fd2807SJeff Garzik 				spin_unlock_irqrestore(ap->lock, flags);
695c6fd2807SJeff Garzik 				goto repeat;
696c6fd2807SJeff Garzik 			}
697c6fd2807SJeff Garzik 			ata_port_printk(ap, KERN_ERR, "EH pending after %d "
698a1e10f7eSTejun Heo 					"tries, giving up\n", ATA_EH_MAX_TRIES);
699914616a3STejun Heo 			ap->pflags &= ~ATA_PFLAG_EH_PENDING;
700c6fd2807SJeff Garzik 		}
701c6fd2807SJeff Garzik 
702c6fd2807SJeff Garzik 		/* this run is complete, make sure EH info is clear */
7031eca4365STejun Heo 		ata_for_each_link(link, ap, HOST_FIRST)
704cf1b86c8STejun Heo 			memset(&link->eh_info, 0, sizeof(link->eh_info));
705c6fd2807SJeff Garzik 
706c6fd2807SJeff Garzik 		/* Clear host_eh_scheduled while holding ap->lock such
707c6fd2807SJeff Garzik 		 * that if exception occurs after this point but
708c6fd2807SJeff Garzik 		 * before EH completion, SCSI midlayer will
709c6fd2807SJeff Garzik 		 * re-initiate EH.
710c6fd2807SJeff Garzik 		 */
711c6fd2807SJeff Garzik 		host->host_eh_scheduled = 0;
712c6fd2807SJeff Garzik 
713c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
714c6fd2807SJeff Garzik 	} else {
7159af5c9c9STejun Heo 		WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL);
716c6fd2807SJeff Garzik 		ap->ops->eng_timeout(ap);
717c6fd2807SJeff Garzik 	}
718c6fd2807SJeff Garzik 
719c6fd2807SJeff Garzik 	/* finish or retry handled scmd's and clean up */
720c6fd2807SJeff Garzik 	WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
721c6fd2807SJeff Garzik 
722c6fd2807SJeff Garzik 	scsi_eh_flush_done_q(&ap->eh_done_q);
723c6fd2807SJeff Garzik 
724c6fd2807SJeff Garzik 	/* clean up */
725c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
726c6fd2807SJeff Garzik 
727c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_LOADING)
728c6fd2807SJeff Garzik 		ap->pflags &= ~ATA_PFLAG_LOADING;
729c6fd2807SJeff Garzik 	else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
73052bad64dSDavid Howells 		queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 0);
731c6fd2807SJeff Garzik 
732c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_RECOVERED)
733c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_INFO, "EH complete\n");
734c6fd2807SJeff Garzik 
735c6fd2807SJeff Garzik 	ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
736c6fd2807SJeff Garzik 
737c6fd2807SJeff Garzik 	/* tell wait_eh that we're done */
738c6fd2807SJeff Garzik 	ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
739c6fd2807SJeff Garzik 	wake_up_all(&ap->eh_wait_q);
740c6fd2807SJeff Garzik 
741c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
742c6fd2807SJeff Garzik 
743c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
744c6fd2807SJeff Garzik }
745c6fd2807SJeff Garzik 
746c6fd2807SJeff Garzik /**
747c6fd2807SJeff Garzik  *	ata_port_wait_eh - Wait for the currently pending EH to complete
748c6fd2807SJeff Garzik  *	@ap: Port to wait EH for
749c6fd2807SJeff Garzik  *
750c6fd2807SJeff Garzik  *	Wait until the currently pending EH is complete.
751c6fd2807SJeff Garzik  *
752c6fd2807SJeff Garzik  *	LOCKING:
753c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
754c6fd2807SJeff Garzik  */
755c6fd2807SJeff Garzik void ata_port_wait_eh(struct ata_port *ap)
756c6fd2807SJeff Garzik {
757c6fd2807SJeff Garzik 	unsigned long flags;
758c6fd2807SJeff Garzik 	DEFINE_WAIT(wait);
759c6fd2807SJeff Garzik 
760c6fd2807SJeff Garzik  retry:
761c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
762c6fd2807SJeff Garzik 
763c6fd2807SJeff Garzik 	while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
764c6fd2807SJeff Garzik 		prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
765c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
766c6fd2807SJeff Garzik 		schedule();
767c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
768c6fd2807SJeff Garzik 	}
769c6fd2807SJeff Garzik 	finish_wait(&ap->eh_wait_q, &wait);
770c6fd2807SJeff Garzik 
771c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
772c6fd2807SJeff Garzik 
773c6fd2807SJeff Garzik 	/* make sure SCSI EH is complete */
774cca3974eSJeff Garzik 	if (scsi_host_in_recovery(ap->scsi_host)) {
775c6fd2807SJeff Garzik 		msleep(10);
776c6fd2807SJeff Garzik 		goto retry;
777c6fd2807SJeff Garzik 	}
778c6fd2807SJeff Garzik }
779c6fd2807SJeff Garzik 
7805ddf24c5STejun Heo static int ata_eh_nr_in_flight(struct ata_port *ap)
7815ddf24c5STejun Heo {
7825ddf24c5STejun Heo 	unsigned int tag;
7835ddf24c5STejun Heo 	int nr = 0;
7845ddf24c5STejun Heo 
7855ddf24c5STejun Heo 	/* count only non-internal commands */
7865ddf24c5STejun Heo 	for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++)
7875ddf24c5STejun Heo 		if (ata_qc_from_tag(ap, tag))
7885ddf24c5STejun Heo 			nr++;
7895ddf24c5STejun Heo 
7905ddf24c5STejun Heo 	return nr;
7915ddf24c5STejun Heo }
7925ddf24c5STejun Heo 
7935ddf24c5STejun Heo void ata_eh_fastdrain_timerfn(unsigned long arg)
7945ddf24c5STejun Heo {
7955ddf24c5STejun Heo 	struct ata_port *ap = (void *)arg;
7965ddf24c5STejun Heo 	unsigned long flags;
7975ddf24c5STejun Heo 	int cnt;
7985ddf24c5STejun Heo 
7995ddf24c5STejun Heo 	spin_lock_irqsave(ap->lock, flags);
8005ddf24c5STejun Heo 
8015ddf24c5STejun Heo 	cnt = ata_eh_nr_in_flight(ap);
8025ddf24c5STejun Heo 
8035ddf24c5STejun Heo 	/* are we done? */
8045ddf24c5STejun Heo 	if (!cnt)
8055ddf24c5STejun Heo 		goto out_unlock;
8065ddf24c5STejun Heo 
8075ddf24c5STejun Heo 	if (cnt == ap->fastdrain_cnt) {
8085ddf24c5STejun Heo 		unsigned int tag;
8095ddf24c5STejun Heo 
8105ddf24c5STejun Heo 		/* No progress during the last interval, tag all
8115ddf24c5STejun Heo 		 * in-flight qcs as timed out and freeze the port.
8125ddf24c5STejun Heo 		 */
8135ddf24c5STejun Heo 		for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) {
8145ddf24c5STejun Heo 			struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
8155ddf24c5STejun Heo 			if (qc)
8165ddf24c5STejun Heo 				qc->err_mask |= AC_ERR_TIMEOUT;
8175ddf24c5STejun Heo 		}
8185ddf24c5STejun Heo 
8195ddf24c5STejun Heo 		ata_port_freeze(ap);
8205ddf24c5STejun Heo 	} else {
8215ddf24c5STejun Heo 		/* some qcs have finished, give it another chance */
8225ddf24c5STejun Heo 		ap->fastdrain_cnt = cnt;
8235ddf24c5STejun Heo 		ap->fastdrain_timer.expires =
824341c2c95STejun Heo 			ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
8255ddf24c5STejun Heo 		add_timer(&ap->fastdrain_timer);
8265ddf24c5STejun Heo 	}
8275ddf24c5STejun Heo 
8285ddf24c5STejun Heo  out_unlock:
8295ddf24c5STejun Heo 	spin_unlock_irqrestore(ap->lock, flags);
8305ddf24c5STejun Heo }
8315ddf24c5STejun Heo 
8325ddf24c5STejun Heo /**
8335ddf24c5STejun Heo  *	ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
8345ddf24c5STejun Heo  *	@ap: target ATA port
8355ddf24c5STejun Heo  *	@fastdrain: activate fast drain
8365ddf24c5STejun Heo  *
8375ddf24c5STejun Heo  *	Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain
8385ddf24c5STejun Heo  *	is non-zero and EH wasn't pending before.  Fast drain ensures
8395ddf24c5STejun Heo  *	that EH kicks in in timely manner.
8405ddf24c5STejun Heo  *
8415ddf24c5STejun Heo  *	LOCKING:
8425ddf24c5STejun Heo  *	spin_lock_irqsave(host lock)
8435ddf24c5STejun Heo  */
8445ddf24c5STejun Heo static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
8455ddf24c5STejun Heo {
8465ddf24c5STejun Heo 	int cnt;
8475ddf24c5STejun Heo 
8485ddf24c5STejun Heo 	/* already scheduled? */
8495ddf24c5STejun Heo 	if (ap->pflags & ATA_PFLAG_EH_PENDING)
8505ddf24c5STejun Heo 		return;
8515ddf24c5STejun Heo 
8525ddf24c5STejun Heo 	ap->pflags |= ATA_PFLAG_EH_PENDING;
8535ddf24c5STejun Heo 
8545ddf24c5STejun Heo 	if (!fastdrain)
8555ddf24c5STejun Heo 		return;
8565ddf24c5STejun Heo 
8575ddf24c5STejun Heo 	/* do we have in-flight qcs? */
8585ddf24c5STejun Heo 	cnt = ata_eh_nr_in_flight(ap);
8595ddf24c5STejun Heo 	if (!cnt)
8605ddf24c5STejun Heo 		return;
8615ddf24c5STejun Heo 
8625ddf24c5STejun Heo 	/* activate fast drain */
8635ddf24c5STejun Heo 	ap->fastdrain_cnt = cnt;
864341c2c95STejun Heo 	ap->fastdrain_timer.expires =
865341c2c95STejun Heo 		ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
8665ddf24c5STejun Heo 	add_timer(&ap->fastdrain_timer);
8675ddf24c5STejun Heo }
8685ddf24c5STejun Heo 
869c6fd2807SJeff Garzik /**
870c6fd2807SJeff Garzik  *	ata_qc_schedule_eh - schedule qc for error handling
871c6fd2807SJeff Garzik  *	@qc: command to schedule error handling for
872c6fd2807SJeff Garzik  *
873c6fd2807SJeff Garzik  *	Schedule error handling for @qc.  EH will kick in as soon as
874c6fd2807SJeff Garzik  *	other commands are drained.
875c6fd2807SJeff Garzik  *
876c6fd2807SJeff Garzik  *	LOCKING:
877cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
878c6fd2807SJeff Garzik  */
879c6fd2807SJeff Garzik void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
880c6fd2807SJeff Garzik {
881c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
882fa41efdaSTejun Heo 	struct request_queue *q = qc->scsicmd->device->request_queue;
883fa41efdaSTejun Heo 	unsigned long flags;
884c6fd2807SJeff Garzik 
885c6fd2807SJeff Garzik 	WARN_ON(!ap->ops->error_handler);
886c6fd2807SJeff Garzik 
887c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_FAILED;
8885ddf24c5STejun Heo 	ata_eh_set_pending(ap, 1);
889c6fd2807SJeff Garzik 
890c6fd2807SJeff Garzik 	/* The following will fail if timeout has already expired.
891c6fd2807SJeff Garzik 	 * ata_scsi_error() takes care of such scmds on EH entry.
892c6fd2807SJeff Garzik 	 * Note that ATA_QCFLAG_FAILED is unconditionally set after
893c6fd2807SJeff Garzik 	 * this function completes.
894c6fd2807SJeff Garzik 	 */
895fa41efdaSTejun Heo 	spin_lock_irqsave(q->queue_lock, flags);
896242f9dcbSJens Axboe 	blk_abort_request(qc->scsicmd->request);
897fa41efdaSTejun Heo 	spin_unlock_irqrestore(q->queue_lock, flags);
898c6fd2807SJeff Garzik }
899c6fd2807SJeff Garzik 
900c6fd2807SJeff Garzik /**
901c6fd2807SJeff Garzik  *	ata_port_schedule_eh - schedule error handling without a qc
902c6fd2807SJeff Garzik  *	@ap: ATA port to schedule EH for
903c6fd2807SJeff Garzik  *
904c6fd2807SJeff Garzik  *	Schedule error handling for @ap.  EH will kick in as soon as
905c6fd2807SJeff Garzik  *	all commands are drained.
906c6fd2807SJeff Garzik  *
907c6fd2807SJeff Garzik  *	LOCKING:
908cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
909c6fd2807SJeff Garzik  */
910c6fd2807SJeff Garzik void ata_port_schedule_eh(struct ata_port *ap)
911c6fd2807SJeff Garzik {
912c6fd2807SJeff Garzik 	WARN_ON(!ap->ops->error_handler);
913c6fd2807SJeff Garzik 
914f4d6d004STejun Heo 	if (ap->pflags & ATA_PFLAG_INITIALIZING)
915f4d6d004STejun Heo 		return;
916f4d6d004STejun Heo 
9175ddf24c5STejun Heo 	ata_eh_set_pending(ap, 1);
918cca3974eSJeff Garzik 	scsi_schedule_eh(ap->scsi_host);
919c6fd2807SJeff Garzik 
920c6fd2807SJeff Garzik 	DPRINTK("port EH scheduled\n");
921c6fd2807SJeff Garzik }
922c6fd2807SJeff Garzik 
923dbd82616STejun Heo static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
924c6fd2807SJeff Garzik {
925c6fd2807SJeff Garzik 	int tag, nr_aborted = 0;
926c6fd2807SJeff Garzik 
927c6fd2807SJeff Garzik 	WARN_ON(!ap->ops->error_handler);
928c6fd2807SJeff Garzik 
9295ddf24c5STejun Heo 	/* we're gonna abort all commands, no need for fast drain */
9305ddf24c5STejun Heo 	ata_eh_set_pending(ap, 0);
9315ddf24c5STejun Heo 
932c6fd2807SJeff Garzik 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
933c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
934c6fd2807SJeff Garzik 
935dbd82616STejun Heo 		if (qc && (!link || qc->dev->link == link)) {
936c6fd2807SJeff Garzik 			qc->flags |= ATA_QCFLAG_FAILED;
937c6fd2807SJeff Garzik 			ata_qc_complete(qc);
938c6fd2807SJeff Garzik 			nr_aborted++;
939c6fd2807SJeff Garzik 		}
940c6fd2807SJeff Garzik 	}
941c6fd2807SJeff Garzik 
942c6fd2807SJeff Garzik 	if (!nr_aborted)
943c6fd2807SJeff Garzik 		ata_port_schedule_eh(ap);
944c6fd2807SJeff Garzik 
945c6fd2807SJeff Garzik 	return nr_aborted;
946c6fd2807SJeff Garzik }
947c6fd2807SJeff Garzik 
948c6fd2807SJeff Garzik /**
949dbd82616STejun Heo  *	ata_link_abort - abort all qc's on the link
950dbd82616STejun Heo  *	@link: ATA link to abort qc's for
951dbd82616STejun Heo  *
952dbd82616STejun Heo  *	Abort all active qc's active on @link and schedule EH.
953dbd82616STejun Heo  *
954dbd82616STejun Heo  *	LOCKING:
955dbd82616STejun Heo  *	spin_lock_irqsave(host lock)
956dbd82616STejun Heo  *
957dbd82616STejun Heo  *	RETURNS:
958dbd82616STejun Heo  *	Number of aborted qc's.
959dbd82616STejun Heo  */
960dbd82616STejun Heo int ata_link_abort(struct ata_link *link)
961dbd82616STejun Heo {
962dbd82616STejun Heo 	return ata_do_link_abort(link->ap, link);
963dbd82616STejun Heo }
964dbd82616STejun Heo 
965dbd82616STejun Heo /**
966dbd82616STejun Heo  *	ata_port_abort - abort all qc's on the port
967dbd82616STejun Heo  *	@ap: ATA port to abort qc's for
968dbd82616STejun Heo  *
969dbd82616STejun Heo  *	Abort all active qc's of @ap and schedule EH.
970dbd82616STejun Heo  *
971dbd82616STejun Heo  *	LOCKING:
972dbd82616STejun Heo  *	spin_lock_irqsave(host_set lock)
973dbd82616STejun Heo  *
974dbd82616STejun Heo  *	RETURNS:
975dbd82616STejun Heo  *	Number of aborted qc's.
976dbd82616STejun Heo  */
977dbd82616STejun Heo int ata_port_abort(struct ata_port *ap)
978dbd82616STejun Heo {
979dbd82616STejun Heo 	return ata_do_link_abort(ap, NULL);
980dbd82616STejun Heo }
981dbd82616STejun Heo 
982dbd82616STejun Heo /**
983c6fd2807SJeff Garzik  *	__ata_port_freeze - freeze port
984c6fd2807SJeff Garzik  *	@ap: ATA port to freeze
985c6fd2807SJeff Garzik  *
986c6fd2807SJeff Garzik  *	This function is called when HSM violation or some other
987c6fd2807SJeff Garzik  *	condition disrupts normal operation of the port.  Frozen port
988c6fd2807SJeff Garzik  *	is not allowed to perform any operation until the port is
989c6fd2807SJeff Garzik  *	thawed, which usually follows a successful reset.
990c6fd2807SJeff Garzik  *
991c6fd2807SJeff Garzik  *	ap->ops->freeze() callback can be used for freezing the port
992c6fd2807SJeff Garzik  *	hardware-wise (e.g. mask interrupt and stop DMA engine).  If a
993c6fd2807SJeff Garzik  *	port cannot be frozen hardware-wise, the interrupt handler
994c6fd2807SJeff Garzik  *	must ack and clear interrupts unconditionally while the port
995c6fd2807SJeff Garzik  *	is frozen.
996c6fd2807SJeff Garzik  *
997c6fd2807SJeff Garzik  *	LOCKING:
998cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
999c6fd2807SJeff Garzik  */
1000c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap)
1001c6fd2807SJeff Garzik {
1002c6fd2807SJeff Garzik 	WARN_ON(!ap->ops->error_handler);
1003c6fd2807SJeff Garzik 
1004c6fd2807SJeff Garzik 	if (ap->ops->freeze)
1005c6fd2807SJeff Garzik 		ap->ops->freeze(ap);
1006c6fd2807SJeff Garzik 
1007c6fd2807SJeff Garzik 	ap->pflags |= ATA_PFLAG_FROZEN;
1008c6fd2807SJeff Garzik 
100944877b4eSTejun Heo 	DPRINTK("ata%u port frozen\n", ap->print_id);
1010c6fd2807SJeff Garzik }
1011c6fd2807SJeff Garzik 
1012c6fd2807SJeff Garzik /**
1013c6fd2807SJeff Garzik  *	ata_port_freeze - abort & freeze port
1014c6fd2807SJeff Garzik  *	@ap: ATA port to freeze
1015c6fd2807SJeff Garzik  *
101654c38444SJeff Garzik  *	Abort and freeze @ap.  The freeze operation must be called
101754c38444SJeff Garzik  *	first, because some hardware requires special operations
101854c38444SJeff Garzik  *	before the taskfile registers are accessible.
1019c6fd2807SJeff Garzik  *
1020c6fd2807SJeff Garzik  *	LOCKING:
1021cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
1022c6fd2807SJeff Garzik  *
1023c6fd2807SJeff Garzik  *	RETURNS:
1024c6fd2807SJeff Garzik  *	Number of aborted commands.
1025c6fd2807SJeff Garzik  */
1026c6fd2807SJeff Garzik int ata_port_freeze(struct ata_port *ap)
1027c6fd2807SJeff Garzik {
1028c6fd2807SJeff Garzik 	int nr_aborted;
1029c6fd2807SJeff Garzik 
1030c6fd2807SJeff Garzik 	WARN_ON(!ap->ops->error_handler);
1031c6fd2807SJeff Garzik 
1032c6fd2807SJeff Garzik 	__ata_port_freeze(ap);
103354c38444SJeff Garzik 	nr_aborted = ata_port_abort(ap);
1034c6fd2807SJeff Garzik 
1035c6fd2807SJeff Garzik 	return nr_aborted;
1036c6fd2807SJeff Garzik }
1037c6fd2807SJeff Garzik 
1038c6fd2807SJeff Garzik /**
10397d77b247STejun Heo  *	sata_async_notification - SATA async notification handler
10407d77b247STejun Heo  *	@ap: ATA port where async notification is received
10417d77b247STejun Heo  *
10427d77b247STejun Heo  *	Handler to be called when async notification via SDB FIS is
10437d77b247STejun Heo  *	received.  This function schedules EH if necessary.
10447d77b247STejun Heo  *
10457d77b247STejun Heo  *	LOCKING:
10467d77b247STejun Heo  *	spin_lock_irqsave(host lock)
10477d77b247STejun Heo  *
10487d77b247STejun Heo  *	RETURNS:
10497d77b247STejun Heo  *	1 if EH is scheduled, 0 otherwise.
10507d77b247STejun Heo  */
10517d77b247STejun Heo int sata_async_notification(struct ata_port *ap)
10527d77b247STejun Heo {
10537d77b247STejun Heo 	u32 sntf;
10547d77b247STejun Heo 	int rc;
10557d77b247STejun Heo 
10567d77b247STejun Heo 	if (!(ap->flags & ATA_FLAG_AN))
10577d77b247STejun Heo 		return 0;
10587d77b247STejun Heo 
10597d77b247STejun Heo 	rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
10607d77b247STejun Heo 	if (rc == 0)
10617d77b247STejun Heo 		sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
10627d77b247STejun Heo 
1063071f44b1STejun Heo 	if (!sata_pmp_attached(ap) || rc) {
10647d77b247STejun Heo 		/* PMP is not attached or SNTF is not available */
1065071f44b1STejun Heo 		if (!sata_pmp_attached(ap)) {
10667d77b247STejun Heo 			/* PMP is not attached.  Check whether ATAPI
10677d77b247STejun Heo 			 * AN is configured.  If so, notify media
10687d77b247STejun Heo 			 * change.
10697d77b247STejun Heo 			 */
10707d77b247STejun Heo 			struct ata_device *dev = ap->link.device;
10717d77b247STejun Heo 
10727d77b247STejun Heo 			if ((dev->class == ATA_DEV_ATAPI) &&
10737d77b247STejun Heo 			    (dev->flags & ATA_DFLAG_AN))
10747d77b247STejun Heo 				ata_scsi_media_change_notify(dev);
10757d77b247STejun Heo 			return 0;
10767d77b247STejun Heo 		} else {
10777d77b247STejun Heo 			/* PMP is attached but SNTF is not available.
10787d77b247STejun Heo 			 * ATAPI async media change notification is
10797d77b247STejun Heo 			 * not used.  The PMP must be reporting PHY
10807d77b247STejun Heo 			 * status change, schedule EH.
10817d77b247STejun Heo 			 */
10827d77b247STejun Heo 			ata_port_schedule_eh(ap);
10837d77b247STejun Heo 			return 1;
10847d77b247STejun Heo 		}
10857d77b247STejun Heo 	} else {
10867d77b247STejun Heo 		/* PMP is attached and SNTF is available */
10877d77b247STejun Heo 		struct ata_link *link;
10887d77b247STejun Heo 
10897d77b247STejun Heo 		/* check and notify ATAPI AN */
10901eca4365STejun Heo 		ata_for_each_link(link, ap, EDGE) {
10917d77b247STejun Heo 			if (!(sntf & (1 << link->pmp)))
10927d77b247STejun Heo 				continue;
10937d77b247STejun Heo 
10947d77b247STejun Heo 			if ((link->device->class == ATA_DEV_ATAPI) &&
10957d77b247STejun Heo 			    (link->device->flags & ATA_DFLAG_AN))
10967d77b247STejun Heo 				ata_scsi_media_change_notify(link->device);
10977d77b247STejun Heo 		}
10987d77b247STejun Heo 
10997d77b247STejun Heo 		/* If PMP is reporting that PHY status of some
11007d77b247STejun Heo 		 * downstream ports has changed, schedule EH.
11017d77b247STejun Heo 		 */
11027d77b247STejun Heo 		if (sntf & (1 << SATA_PMP_CTRL_PORT)) {
11037d77b247STejun Heo 			ata_port_schedule_eh(ap);
11047d77b247STejun Heo 			return 1;
11057d77b247STejun Heo 		}
11067d77b247STejun Heo 
11077d77b247STejun Heo 		return 0;
11087d77b247STejun Heo 	}
11097d77b247STejun Heo }
11107d77b247STejun Heo 
11117d77b247STejun Heo /**
1112c6fd2807SJeff Garzik  *	ata_eh_freeze_port - EH helper to freeze port
1113c6fd2807SJeff Garzik  *	@ap: ATA port to freeze
1114c6fd2807SJeff Garzik  *
1115c6fd2807SJeff Garzik  *	Freeze @ap.
1116c6fd2807SJeff Garzik  *
1117c6fd2807SJeff Garzik  *	LOCKING:
1118c6fd2807SJeff Garzik  *	None.
1119c6fd2807SJeff Garzik  */
1120c6fd2807SJeff Garzik void ata_eh_freeze_port(struct ata_port *ap)
1121c6fd2807SJeff Garzik {
1122c6fd2807SJeff Garzik 	unsigned long flags;
1123c6fd2807SJeff Garzik 
1124c6fd2807SJeff Garzik 	if (!ap->ops->error_handler)
1125c6fd2807SJeff Garzik 		return;
1126c6fd2807SJeff Garzik 
1127c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1128c6fd2807SJeff Garzik 	__ata_port_freeze(ap);
1129c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1130c6fd2807SJeff Garzik }
1131c6fd2807SJeff Garzik 
1132c6fd2807SJeff Garzik /**
1133c6fd2807SJeff Garzik  *	ata_port_thaw_port - EH helper to thaw port
1134c6fd2807SJeff Garzik  *	@ap: ATA port to thaw
1135c6fd2807SJeff Garzik  *
1136c6fd2807SJeff Garzik  *	Thaw frozen port @ap.
1137c6fd2807SJeff Garzik  *
1138c6fd2807SJeff Garzik  *	LOCKING:
1139c6fd2807SJeff Garzik  *	None.
1140c6fd2807SJeff Garzik  */
1141c6fd2807SJeff Garzik void ata_eh_thaw_port(struct ata_port *ap)
1142c6fd2807SJeff Garzik {
1143c6fd2807SJeff Garzik 	unsigned long flags;
1144c6fd2807SJeff Garzik 
1145c6fd2807SJeff Garzik 	if (!ap->ops->error_handler)
1146c6fd2807SJeff Garzik 		return;
1147c6fd2807SJeff Garzik 
1148c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1149c6fd2807SJeff Garzik 
1150c6fd2807SJeff Garzik 	ap->pflags &= ~ATA_PFLAG_FROZEN;
1151c6fd2807SJeff Garzik 
1152c6fd2807SJeff Garzik 	if (ap->ops->thaw)
1153c6fd2807SJeff Garzik 		ap->ops->thaw(ap);
1154c6fd2807SJeff Garzik 
1155c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1156c6fd2807SJeff Garzik 
115744877b4eSTejun Heo 	DPRINTK("ata%u port thawed\n", ap->print_id);
1158c6fd2807SJeff Garzik }
1159c6fd2807SJeff Garzik 
1160c6fd2807SJeff Garzik static void ata_eh_scsidone(struct scsi_cmnd *scmd)
1161c6fd2807SJeff Garzik {
1162c6fd2807SJeff Garzik 	/* nada */
1163c6fd2807SJeff Garzik }
1164c6fd2807SJeff Garzik 
1165c6fd2807SJeff Garzik static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
1166c6fd2807SJeff Garzik {
1167c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
1168c6fd2807SJeff Garzik 	struct scsi_cmnd *scmd = qc->scsicmd;
1169c6fd2807SJeff Garzik 	unsigned long flags;
1170c6fd2807SJeff Garzik 
1171c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1172c6fd2807SJeff Garzik 	qc->scsidone = ata_eh_scsidone;
1173c6fd2807SJeff Garzik 	__ata_qc_complete(qc);
1174c6fd2807SJeff Garzik 	WARN_ON(ata_tag_valid(qc->tag));
1175c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1176c6fd2807SJeff Garzik 
1177c6fd2807SJeff Garzik 	scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
1178c6fd2807SJeff Garzik }
1179c6fd2807SJeff Garzik 
1180c6fd2807SJeff Garzik /**
1181c6fd2807SJeff Garzik  *	ata_eh_qc_complete - Complete an active ATA command from EH
1182c6fd2807SJeff Garzik  *	@qc: Command to complete
1183c6fd2807SJeff Garzik  *
1184c6fd2807SJeff Garzik  *	Indicate to the mid and upper layers that an ATA command has
1185c6fd2807SJeff Garzik  *	completed.  To be used from EH.
1186c6fd2807SJeff Garzik  */
1187c6fd2807SJeff Garzik void ata_eh_qc_complete(struct ata_queued_cmd *qc)
1188c6fd2807SJeff Garzik {
1189c6fd2807SJeff Garzik 	struct scsi_cmnd *scmd = qc->scsicmd;
1190c6fd2807SJeff Garzik 	scmd->retries = scmd->allowed;
1191c6fd2807SJeff Garzik 	__ata_eh_qc_complete(qc);
1192c6fd2807SJeff Garzik }
1193c6fd2807SJeff Garzik 
1194c6fd2807SJeff Garzik /**
1195c6fd2807SJeff Garzik  *	ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
1196c6fd2807SJeff Garzik  *	@qc: Command to retry
1197c6fd2807SJeff Garzik  *
1198c6fd2807SJeff Garzik  *	Indicate to the mid and upper layers that an ATA command
1199c6fd2807SJeff Garzik  *	should be retried.  To be used from EH.
1200c6fd2807SJeff Garzik  *
1201c6fd2807SJeff Garzik  *	SCSI midlayer limits the number of retries to scmd->allowed.
1202c6fd2807SJeff Garzik  *	scmd->retries is decremented for commands which get retried
1203c6fd2807SJeff Garzik  *	due to unrelated failures (qc->err_mask is zero).
1204c6fd2807SJeff Garzik  */
1205c6fd2807SJeff Garzik void ata_eh_qc_retry(struct ata_queued_cmd *qc)
1206c6fd2807SJeff Garzik {
1207c6fd2807SJeff Garzik 	struct scsi_cmnd *scmd = qc->scsicmd;
1208c6fd2807SJeff Garzik 	if (!qc->err_mask && scmd->retries)
1209c6fd2807SJeff Garzik 		scmd->retries--;
1210c6fd2807SJeff Garzik 	__ata_eh_qc_complete(qc);
1211c6fd2807SJeff Garzik }
1212c6fd2807SJeff Garzik 
1213c6fd2807SJeff Garzik /**
1214678afac6STejun Heo  *	ata_dev_disable - disable ATA device
1215678afac6STejun Heo  *	@dev: ATA device to disable
1216678afac6STejun Heo  *
1217678afac6STejun Heo  *	Disable @dev.
1218678afac6STejun Heo  *
1219678afac6STejun Heo  *	Locking:
1220678afac6STejun Heo  *	EH context.
1221678afac6STejun Heo  */
1222678afac6STejun Heo void ata_dev_disable(struct ata_device *dev)
1223678afac6STejun Heo {
1224678afac6STejun Heo 	if (!ata_dev_enabled(dev))
1225678afac6STejun Heo 		return;
1226678afac6STejun Heo 
1227678afac6STejun Heo 	if (ata_msg_drv(dev->link->ap))
1228678afac6STejun Heo 		ata_dev_printk(dev, KERN_WARNING, "disabled\n");
1229678afac6STejun Heo 	ata_acpi_on_disable(dev);
1230678afac6STejun Heo 	ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET);
1231678afac6STejun Heo 	dev->class++;
123299cf610aSTejun Heo 
123399cf610aSTejun Heo 	/* From now till the next successful probe, ering is used to
123499cf610aSTejun Heo 	 * track probe failures.  Clear accumulated device error info.
123599cf610aSTejun Heo 	 */
123699cf610aSTejun Heo 	ata_ering_clear(&dev->ering);
1237678afac6STejun Heo }
1238678afac6STejun Heo 
1239678afac6STejun Heo /**
1240c6fd2807SJeff Garzik  *	ata_eh_detach_dev - detach ATA device
1241c6fd2807SJeff Garzik  *	@dev: ATA device to detach
1242c6fd2807SJeff Garzik  *
1243c6fd2807SJeff Garzik  *	Detach @dev.
1244c6fd2807SJeff Garzik  *
1245c6fd2807SJeff Garzik  *	LOCKING:
1246c6fd2807SJeff Garzik  *	None.
1247c6fd2807SJeff Garzik  */
1248fb7fd614STejun Heo void ata_eh_detach_dev(struct ata_device *dev)
1249c6fd2807SJeff Garzik {
1250f58229f8STejun Heo 	struct ata_link *link = dev->link;
1251f58229f8STejun Heo 	struct ata_port *ap = link->ap;
125290484ebfSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
1253c6fd2807SJeff Garzik 	unsigned long flags;
1254c6fd2807SJeff Garzik 
1255c6fd2807SJeff Garzik 	ata_dev_disable(dev);
1256c6fd2807SJeff Garzik 
1257c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1258c6fd2807SJeff Garzik 
1259c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_DETACH;
1260c6fd2807SJeff Garzik 
1261c6fd2807SJeff Garzik 	if (ata_scsi_offline_dev(dev)) {
1262c6fd2807SJeff Garzik 		dev->flags |= ATA_DFLAG_DETACHED;
1263c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
1264c6fd2807SJeff Garzik 	}
1265c6fd2807SJeff Garzik 
126690484ebfSTejun Heo 	/* clear per-dev EH info */
1267f58229f8STejun Heo 	ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK);
1268f58229f8STejun Heo 	ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK);
126990484ebfSTejun Heo 	ehc->saved_xfer_mode[dev->devno] = 0;
127090484ebfSTejun Heo 	ehc->saved_ncq_enabled &= ~(1 << dev->devno);
1271c6fd2807SJeff Garzik 
1272c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1273c6fd2807SJeff Garzik }
1274c6fd2807SJeff Garzik 
1275c6fd2807SJeff Garzik /**
1276c6fd2807SJeff Garzik  *	ata_eh_about_to_do - about to perform eh_action
1277955e57dfSTejun Heo  *	@link: target ATA link
1278c6fd2807SJeff Garzik  *	@dev: target ATA dev for per-dev action (can be NULL)
1279c6fd2807SJeff Garzik  *	@action: action about to be performed
1280c6fd2807SJeff Garzik  *
1281c6fd2807SJeff Garzik  *	Called just before performing EH actions to clear related bits
1282955e57dfSTejun Heo  *	in @link->eh_info such that eh actions are not unnecessarily
1283955e57dfSTejun Heo  *	repeated.
1284c6fd2807SJeff Garzik  *
1285c6fd2807SJeff Garzik  *	LOCKING:
1286c6fd2807SJeff Garzik  *	None.
1287c6fd2807SJeff Garzik  */
1288fb7fd614STejun Heo void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
1289c6fd2807SJeff Garzik 			unsigned int action)
1290c6fd2807SJeff Garzik {
1291955e57dfSTejun Heo 	struct ata_port *ap = link->ap;
1292955e57dfSTejun Heo 	struct ata_eh_info *ehi = &link->eh_info;
1293955e57dfSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
1294c6fd2807SJeff Garzik 	unsigned long flags;
1295c6fd2807SJeff Garzik 
1296c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1297c6fd2807SJeff Garzik 
1298955e57dfSTejun Heo 	ata_eh_clear_action(link, dev, ehi, action);
1299c6fd2807SJeff Garzik 
1300a568d1d2STejun Heo 	/* About to take EH action, set RECOVERED.  Ignore actions on
1301a568d1d2STejun Heo 	 * slave links as master will do them again.
1302a568d1d2STejun Heo 	 */
1303a568d1d2STejun Heo 	if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link)
1304c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_RECOVERED;
1305c6fd2807SJeff Garzik 
1306c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1307c6fd2807SJeff Garzik }
1308c6fd2807SJeff Garzik 
1309c6fd2807SJeff Garzik /**
1310c6fd2807SJeff Garzik  *	ata_eh_done - EH action complete
1311c6fd2807SJeff Garzik *	@ap: target ATA port
1312c6fd2807SJeff Garzik  *	@dev: target ATA dev for per-dev action (can be NULL)
1313c6fd2807SJeff Garzik  *	@action: action just completed
1314c6fd2807SJeff Garzik  *
1315c6fd2807SJeff Garzik  *	Called right after performing EH actions to clear related bits
1316955e57dfSTejun Heo  *	in @link->eh_context.
1317c6fd2807SJeff Garzik  *
1318c6fd2807SJeff Garzik  *	LOCKING:
1319c6fd2807SJeff Garzik  *	None.
1320c6fd2807SJeff Garzik  */
1321fb7fd614STejun Heo void ata_eh_done(struct ata_link *link, struct ata_device *dev,
1322c6fd2807SJeff Garzik 		 unsigned int action)
1323c6fd2807SJeff Garzik {
1324955e57dfSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
13259af5c9c9STejun Heo 
1326955e57dfSTejun Heo 	ata_eh_clear_action(link, dev, &ehc->i, action);
1327c6fd2807SJeff Garzik }
1328c6fd2807SJeff Garzik 
1329c6fd2807SJeff Garzik /**
1330c6fd2807SJeff Garzik  *	ata_err_string - convert err_mask to descriptive string
1331c6fd2807SJeff Garzik  *	@err_mask: error mask to convert to string
1332c6fd2807SJeff Garzik  *
1333c6fd2807SJeff Garzik  *	Convert @err_mask to descriptive string.  Errors are
1334c6fd2807SJeff Garzik  *	prioritized according to severity and only the most severe
1335c6fd2807SJeff Garzik  *	error is reported.
1336c6fd2807SJeff Garzik  *
1337c6fd2807SJeff Garzik  *	LOCKING:
1338c6fd2807SJeff Garzik  *	None.
1339c6fd2807SJeff Garzik  *
1340c6fd2807SJeff Garzik  *	RETURNS:
1341c6fd2807SJeff Garzik  *	Descriptive string for @err_mask
1342c6fd2807SJeff Garzik  */
1343c6fd2807SJeff Garzik static const char *ata_err_string(unsigned int err_mask)
1344c6fd2807SJeff Garzik {
1345c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_HOST_BUS)
1346c6fd2807SJeff Garzik 		return "host bus error";
1347c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_ATA_BUS)
1348c6fd2807SJeff Garzik 		return "ATA bus error";
1349c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_TIMEOUT)
1350c6fd2807SJeff Garzik 		return "timeout";
1351c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_HSM)
1352c6fd2807SJeff Garzik 		return "HSM violation";
1353c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_SYSTEM)
1354c6fd2807SJeff Garzik 		return "internal error";
1355c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_MEDIA)
1356c6fd2807SJeff Garzik 		return "media error";
1357c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_INVALID)
1358c6fd2807SJeff Garzik 		return "invalid argument";
1359c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_DEV)
1360c6fd2807SJeff Garzik 		return "device error";
1361c6fd2807SJeff Garzik 	return "unknown error";
1362c6fd2807SJeff Garzik }
1363c6fd2807SJeff Garzik 
1364c6fd2807SJeff Garzik /**
1365c6fd2807SJeff Garzik  *	ata_read_log_page - read a specific log page
1366c6fd2807SJeff Garzik  *	@dev: target device
1367c6fd2807SJeff Garzik  *	@page: page to read
1368c6fd2807SJeff Garzik  *	@buf: buffer to store read page
1369c6fd2807SJeff Garzik  *	@sectors: number of sectors to read
1370c6fd2807SJeff Garzik  *
1371c6fd2807SJeff Garzik  *	Read log page using READ_LOG_EXT command.
1372c6fd2807SJeff Garzik  *
1373c6fd2807SJeff Garzik  *	LOCKING:
1374c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1375c6fd2807SJeff Garzik  *
1376c6fd2807SJeff Garzik  *	RETURNS:
1377c6fd2807SJeff Garzik  *	0 on success, AC_ERR_* mask otherwise.
1378c6fd2807SJeff Garzik  */
1379c6fd2807SJeff Garzik static unsigned int ata_read_log_page(struct ata_device *dev,
1380c6fd2807SJeff Garzik 				      u8 page, void *buf, unsigned int sectors)
1381c6fd2807SJeff Garzik {
1382c6fd2807SJeff Garzik 	struct ata_taskfile tf;
1383c6fd2807SJeff Garzik 	unsigned int err_mask;
1384c6fd2807SJeff Garzik 
1385c6fd2807SJeff Garzik 	DPRINTK("read log page - page %d\n", page);
1386c6fd2807SJeff Garzik 
1387c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
1388c6fd2807SJeff Garzik 	tf.command = ATA_CMD_READ_LOG_EXT;
1389c6fd2807SJeff Garzik 	tf.lbal = page;
1390c6fd2807SJeff Garzik 	tf.nsect = sectors;
1391c6fd2807SJeff Garzik 	tf.hob_nsect = sectors >> 8;
1392c6fd2807SJeff Garzik 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
1393c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_PIO;
1394c6fd2807SJeff Garzik 
1395c6fd2807SJeff Garzik 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
13962b789108STejun Heo 				     buf, sectors * ATA_SECT_SIZE, 0);
1397c6fd2807SJeff Garzik 
1398c6fd2807SJeff Garzik 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
1399c6fd2807SJeff Garzik 	return err_mask;
1400c6fd2807SJeff Garzik }
1401c6fd2807SJeff Garzik 
1402c6fd2807SJeff Garzik /**
1403c6fd2807SJeff Garzik  *	ata_eh_read_log_10h - Read log page 10h for NCQ error details
1404c6fd2807SJeff Garzik  *	@dev: Device to read log page 10h from
1405c6fd2807SJeff Garzik  *	@tag: Resulting tag of the failed command
1406c6fd2807SJeff Garzik  *	@tf: Resulting taskfile registers of the failed command
1407c6fd2807SJeff Garzik  *
1408c6fd2807SJeff Garzik  *	Read log page 10h to obtain NCQ error details and clear error
1409c6fd2807SJeff Garzik  *	condition.
1410c6fd2807SJeff Garzik  *
1411c6fd2807SJeff Garzik  *	LOCKING:
1412c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1413c6fd2807SJeff Garzik  *
1414c6fd2807SJeff Garzik  *	RETURNS:
1415c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
1416c6fd2807SJeff Garzik  */
1417c6fd2807SJeff Garzik static int ata_eh_read_log_10h(struct ata_device *dev,
1418c6fd2807SJeff Garzik 			       int *tag, struct ata_taskfile *tf)
1419c6fd2807SJeff Garzik {
14209af5c9c9STejun Heo 	u8 *buf = dev->link->ap->sector_buf;
1421c6fd2807SJeff Garzik 	unsigned int err_mask;
1422c6fd2807SJeff Garzik 	u8 csum;
1423c6fd2807SJeff Garzik 	int i;
1424c6fd2807SJeff Garzik 
1425c6fd2807SJeff Garzik 	err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1);
1426c6fd2807SJeff Garzik 	if (err_mask)
1427c6fd2807SJeff Garzik 		return -EIO;
1428c6fd2807SJeff Garzik 
1429c6fd2807SJeff Garzik 	csum = 0;
1430c6fd2807SJeff Garzik 	for (i = 0; i < ATA_SECT_SIZE; i++)
1431c6fd2807SJeff Garzik 		csum += buf[i];
1432c6fd2807SJeff Garzik 	if (csum)
1433c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_WARNING,
1434c6fd2807SJeff Garzik 			       "invalid checksum 0x%x on log page 10h\n", csum);
1435c6fd2807SJeff Garzik 
1436c6fd2807SJeff Garzik 	if (buf[0] & 0x80)
1437c6fd2807SJeff Garzik 		return -ENOENT;
1438c6fd2807SJeff Garzik 
1439c6fd2807SJeff Garzik 	*tag = buf[0] & 0x1f;
1440c6fd2807SJeff Garzik 
1441c6fd2807SJeff Garzik 	tf->command = buf[2];
1442c6fd2807SJeff Garzik 	tf->feature = buf[3];
1443c6fd2807SJeff Garzik 	tf->lbal = buf[4];
1444c6fd2807SJeff Garzik 	tf->lbam = buf[5];
1445c6fd2807SJeff Garzik 	tf->lbah = buf[6];
1446c6fd2807SJeff Garzik 	tf->device = buf[7];
1447c6fd2807SJeff Garzik 	tf->hob_lbal = buf[8];
1448c6fd2807SJeff Garzik 	tf->hob_lbam = buf[9];
1449c6fd2807SJeff Garzik 	tf->hob_lbah = buf[10];
1450c6fd2807SJeff Garzik 	tf->nsect = buf[12];
1451c6fd2807SJeff Garzik 	tf->hob_nsect = buf[13];
1452c6fd2807SJeff Garzik 
1453c6fd2807SJeff Garzik 	return 0;
1454c6fd2807SJeff Garzik }
1455c6fd2807SJeff Garzik 
1456c6fd2807SJeff Garzik /**
145711fc33daSTejun Heo  *	atapi_eh_tur - perform ATAPI TEST_UNIT_READY
145811fc33daSTejun Heo  *	@dev: target ATAPI device
145911fc33daSTejun Heo  *	@r_sense_key: out parameter for sense_key
146011fc33daSTejun Heo  *
146111fc33daSTejun Heo  *	Perform ATAPI TEST_UNIT_READY.
146211fc33daSTejun Heo  *
146311fc33daSTejun Heo  *	LOCKING:
146411fc33daSTejun Heo  *	EH context (may sleep).
146511fc33daSTejun Heo  *
146611fc33daSTejun Heo  *	RETURNS:
146711fc33daSTejun Heo  *	0 on success, AC_ERR_* mask on failure.
146811fc33daSTejun Heo  */
146911fc33daSTejun Heo static unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
147011fc33daSTejun Heo {
147111fc33daSTejun Heo 	u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 };
147211fc33daSTejun Heo 	struct ata_taskfile tf;
147311fc33daSTejun Heo 	unsigned int err_mask;
147411fc33daSTejun Heo 
147511fc33daSTejun Heo 	ata_tf_init(dev, &tf);
147611fc33daSTejun Heo 
147711fc33daSTejun Heo 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
147811fc33daSTejun Heo 	tf.command = ATA_CMD_PACKET;
147911fc33daSTejun Heo 	tf.protocol = ATAPI_PROT_NODATA;
148011fc33daSTejun Heo 
148111fc33daSTejun Heo 	err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
148211fc33daSTejun Heo 	if (err_mask == AC_ERR_DEV)
148311fc33daSTejun Heo 		*r_sense_key = tf.feature >> 4;
148411fc33daSTejun Heo 	return err_mask;
148511fc33daSTejun Heo }
148611fc33daSTejun Heo 
148711fc33daSTejun Heo /**
1488c6fd2807SJeff Garzik  *	atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1489c6fd2807SJeff Garzik  *	@dev: device to perform REQUEST_SENSE to
1490c6fd2807SJeff Garzik  *	@sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
14913eabddb8STejun Heo  *	@dfl_sense_key: default sense key to use
1492c6fd2807SJeff Garzik  *
1493c6fd2807SJeff Garzik  *	Perform ATAPI REQUEST_SENSE after the device reported CHECK
1494c6fd2807SJeff Garzik  *	SENSE.  This function is EH helper.
1495c6fd2807SJeff Garzik  *
1496c6fd2807SJeff Garzik  *	LOCKING:
1497c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1498c6fd2807SJeff Garzik  *
1499c6fd2807SJeff Garzik  *	RETURNS:
1500c6fd2807SJeff Garzik  *	0 on success, AC_ERR_* mask on failure
1501c6fd2807SJeff Garzik  */
15023eabddb8STejun Heo static unsigned int atapi_eh_request_sense(struct ata_device *dev,
15033eabddb8STejun Heo 					   u8 *sense_buf, u8 dfl_sense_key)
1504c6fd2807SJeff Garzik {
15053eabddb8STejun Heo 	u8 cdb[ATAPI_CDB_LEN] =
15063eabddb8STejun Heo 		{ REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 };
15079af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
1508c6fd2807SJeff Garzik 	struct ata_taskfile tf;
1509c6fd2807SJeff Garzik 
1510c6fd2807SJeff Garzik 	DPRINTK("ATAPI request sense\n");
1511c6fd2807SJeff Garzik 
1512c6fd2807SJeff Garzik 	/* FIXME: is this needed? */
1513c6fd2807SJeff Garzik 	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
1514c6fd2807SJeff Garzik 
151556287768SAlbert Lee 	/* initialize sense_buf with the error register,
151656287768SAlbert Lee 	 * for the case where they are -not- overwritten
151756287768SAlbert Lee 	 */
1518c6fd2807SJeff Garzik 	sense_buf[0] = 0x70;
15193eabddb8STejun Heo 	sense_buf[2] = dfl_sense_key;
152056287768SAlbert Lee 
152156287768SAlbert Lee 	/* some devices time out if garbage left in tf */
152256287768SAlbert Lee 	ata_tf_init(dev, &tf);
1523c6fd2807SJeff Garzik 
1524c6fd2807SJeff Garzik 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1525c6fd2807SJeff Garzik 	tf.command = ATA_CMD_PACKET;
1526c6fd2807SJeff Garzik 
1527c6fd2807SJeff Garzik 	/* is it pointless to prefer PIO for "safety reasons"? */
1528c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_PIO_DMA) {
15290dc36888STejun Heo 		tf.protocol = ATAPI_PROT_DMA;
1530c6fd2807SJeff Garzik 		tf.feature |= ATAPI_PKT_DMA;
1531c6fd2807SJeff Garzik 	} else {
15320dc36888STejun Heo 		tf.protocol = ATAPI_PROT_PIO;
1533f2dfc1a1STejun Heo 		tf.lbam = SCSI_SENSE_BUFFERSIZE;
1534f2dfc1a1STejun Heo 		tf.lbah = 0;
1535c6fd2807SJeff Garzik 	}
1536c6fd2807SJeff Garzik 
1537c6fd2807SJeff Garzik 	return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
15382b789108STejun Heo 				 sense_buf, SCSI_SENSE_BUFFERSIZE, 0);
1539c6fd2807SJeff Garzik }
1540c6fd2807SJeff Garzik 
1541c6fd2807SJeff Garzik /**
1542c6fd2807SJeff Garzik  *	ata_eh_analyze_serror - analyze SError for a failed port
15430260731fSTejun Heo  *	@link: ATA link to analyze SError for
1544c6fd2807SJeff Garzik  *
1545c6fd2807SJeff Garzik  *	Analyze SError if available and further determine cause of
1546c6fd2807SJeff Garzik  *	failure.
1547c6fd2807SJeff Garzik  *
1548c6fd2807SJeff Garzik  *	LOCKING:
1549c6fd2807SJeff Garzik  *	None.
1550c6fd2807SJeff Garzik  */
15510260731fSTejun Heo static void ata_eh_analyze_serror(struct ata_link *link)
1552c6fd2807SJeff Garzik {
15530260731fSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
1554c6fd2807SJeff Garzik 	u32 serror = ehc->i.serror;
1555c6fd2807SJeff Garzik 	unsigned int err_mask = 0, action = 0;
1556f9df58cbSTejun Heo 	u32 hotplug_mask;
1557c6fd2807SJeff Garzik 
1558e0614db2STejun Heo 	if (serror & (SERR_PERSISTENT | SERR_DATA)) {
1559c6fd2807SJeff Garzik 		err_mask |= AC_ERR_ATA_BUS;
1560cf480626STejun Heo 		action |= ATA_EH_RESET;
1561c6fd2807SJeff Garzik 	}
1562c6fd2807SJeff Garzik 	if (serror & SERR_PROTOCOL) {
1563c6fd2807SJeff Garzik 		err_mask |= AC_ERR_HSM;
1564cf480626STejun Heo 		action |= ATA_EH_RESET;
1565c6fd2807SJeff Garzik 	}
1566c6fd2807SJeff Garzik 	if (serror & SERR_INTERNAL) {
1567c6fd2807SJeff Garzik 		err_mask |= AC_ERR_SYSTEM;
1568cf480626STejun Heo 		action |= ATA_EH_RESET;
1569c6fd2807SJeff Garzik 	}
1570f9df58cbSTejun Heo 
1571f9df58cbSTejun Heo 	/* Determine whether a hotplug event has occurred.  Both
1572f9df58cbSTejun Heo 	 * SError.N/X are considered hotplug events for enabled or
1573f9df58cbSTejun Heo 	 * host links.  For disabled PMP links, only N bit is
1574f9df58cbSTejun Heo 	 * considered as X bit is left at 1 for link plugging.
1575f9df58cbSTejun Heo 	 */
1576f9df58cbSTejun Heo 	hotplug_mask = 0;
1577f9df58cbSTejun Heo 
1578f9df58cbSTejun Heo 	if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
1579f9df58cbSTejun Heo 		hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG;
1580f9df58cbSTejun Heo 	else
1581f9df58cbSTejun Heo 		hotplug_mask = SERR_PHYRDY_CHG;
1582f9df58cbSTejun Heo 
1583f9df58cbSTejun Heo 	if (serror & hotplug_mask)
1584c6fd2807SJeff Garzik 		ata_ehi_hotplugged(&ehc->i);
1585c6fd2807SJeff Garzik 
1586c6fd2807SJeff Garzik 	ehc->i.err_mask |= err_mask;
1587c6fd2807SJeff Garzik 	ehc->i.action |= action;
1588c6fd2807SJeff Garzik }
1589c6fd2807SJeff Garzik 
1590c6fd2807SJeff Garzik /**
1591c6fd2807SJeff Garzik  *	ata_eh_analyze_ncq_error - analyze NCQ error
15920260731fSTejun Heo  *	@link: ATA link to analyze NCQ error for
1593c6fd2807SJeff Garzik  *
1594c6fd2807SJeff Garzik  *	Read log page 10h, determine the offending qc and acquire
1595c6fd2807SJeff Garzik  *	error status TF.  For NCQ device errors, all LLDDs have to do
1596c6fd2807SJeff Garzik  *	is setting AC_ERR_DEV in ehi->err_mask.  This function takes
1597c6fd2807SJeff Garzik  *	care of the rest.
1598c6fd2807SJeff Garzik  *
1599c6fd2807SJeff Garzik  *	LOCKING:
1600c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1601c6fd2807SJeff Garzik  */
160210acf3b0SMark Lord void ata_eh_analyze_ncq_error(struct ata_link *link)
1603c6fd2807SJeff Garzik {
16040260731fSTejun Heo 	struct ata_port *ap = link->ap;
16050260731fSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
16060260731fSTejun Heo 	struct ata_device *dev = link->device;
1607c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc;
1608c6fd2807SJeff Garzik 	struct ata_taskfile tf;
1609c6fd2807SJeff Garzik 	int tag, rc;
1610c6fd2807SJeff Garzik 
1611c6fd2807SJeff Garzik 	/* if frozen, we can't do much */
1612c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_FROZEN)
1613c6fd2807SJeff Garzik 		return;
1614c6fd2807SJeff Garzik 
1615c6fd2807SJeff Garzik 	/* is it NCQ device error? */
16160260731fSTejun Heo 	if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
1617c6fd2807SJeff Garzik 		return;
1618c6fd2807SJeff Garzik 
1619c6fd2807SJeff Garzik 	/* has LLDD analyzed already? */
1620c6fd2807SJeff Garzik 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1621c6fd2807SJeff Garzik 		qc = __ata_qc_from_tag(ap, tag);
1622c6fd2807SJeff Garzik 
1623c6fd2807SJeff Garzik 		if (!(qc->flags & ATA_QCFLAG_FAILED))
1624c6fd2807SJeff Garzik 			continue;
1625c6fd2807SJeff Garzik 
1626c6fd2807SJeff Garzik 		if (qc->err_mask)
1627c6fd2807SJeff Garzik 			return;
1628c6fd2807SJeff Garzik 	}
1629c6fd2807SJeff Garzik 
1630c6fd2807SJeff Garzik 	/* okay, this error is ours */
1631*a09bf4cdSJeff Garzik 	memset(&tf, 0, sizeof(tf));
1632c6fd2807SJeff Garzik 	rc = ata_eh_read_log_10h(dev, &tag, &tf);
1633c6fd2807SJeff Garzik 	if (rc) {
16340260731fSTejun Heo 		ata_link_printk(link, KERN_ERR, "failed to read log page 10h "
1635c6fd2807SJeff Garzik 				"(errno=%d)\n", rc);
1636c6fd2807SJeff Garzik 		return;
1637c6fd2807SJeff Garzik 	}
1638c6fd2807SJeff Garzik 
16390260731fSTejun Heo 	if (!(link->sactive & (1 << tag))) {
16400260731fSTejun Heo 		ata_link_printk(link, KERN_ERR, "log page 10h reported "
1641c6fd2807SJeff Garzik 				"inactive tag %d\n", tag);
1642c6fd2807SJeff Garzik 		return;
1643c6fd2807SJeff Garzik 	}
1644c6fd2807SJeff Garzik 
1645c6fd2807SJeff Garzik 	/* we've got the perpetrator, condemn it */
1646c6fd2807SJeff Garzik 	qc = __ata_qc_from_tag(ap, tag);
1647c6fd2807SJeff Garzik 	memcpy(&qc->result_tf, &tf, sizeof(tf));
1648a6116c9eSMark Lord 	qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
16495335b729STejun Heo 	qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
1650c6fd2807SJeff Garzik 	ehc->i.err_mask &= ~AC_ERR_DEV;
1651c6fd2807SJeff Garzik }
1652c6fd2807SJeff Garzik 
1653c6fd2807SJeff Garzik /**
1654c6fd2807SJeff Garzik  *	ata_eh_analyze_tf - analyze taskfile of a failed qc
1655c6fd2807SJeff Garzik  *	@qc: qc to analyze
1656c6fd2807SJeff Garzik  *	@tf: Taskfile registers to analyze
1657c6fd2807SJeff Garzik  *
1658c6fd2807SJeff Garzik  *	Analyze taskfile of @qc and further determine cause of
1659c6fd2807SJeff Garzik  *	failure.  This function also requests ATAPI sense data if
1660c6fd2807SJeff Garzik  *	avaliable.
1661c6fd2807SJeff Garzik  *
1662c6fd2807SJeff Garzik  *	LOCKING:
1663c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1664c6fd2807SJeff Garzik  *
1665c6fd2807SJeff Garzik  *	RETURNS:
1666c6fd2807SJeff Garzik  *	Determined recovery action
1667c6fd2807SJeff Garzik  */
1668c6fd2807SJeff Garzik static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1669c6fd2807SJeff Garzik 				      const struct ata_taskfile *tf)
1670c6fd2807SJeff Garzik {
1671c6fd2807SJeff Garzik 	unsigned int tmp, action = 0;
1672c6fd2807SJeff Garzik 	u8 stat = tf->command, err = tf->feature;
1673c6fd2807SJeff Garzik 
1674c6fd2807SJeff Garzik 	if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1675c6fd2807SJeff Garzik 		qc->err_mask |= AC_ERR_HSM;
1676cf480626STejun Heo 		return ATA_EH_RESET;
1677c6fd2807SJeff Garzik 	}
1678c6fd2807SJeff Garzik 
1679a51d644aSTejun Heo 	if (stat & (ATA_ERR | ATA_DF))
1680a51d644aSTejun Heo 		qc->err_mask |= AC_ERR_DEV;
1681a51d644aSTejun Heo 	else
1682c6fd2807SJeff Garzik 		return 0;
1683c6fd2807SJeff Garzik 
1684c6fd2807SJeff Garzik 	switch (qc->dev->class) {
1685c6fd2807SJeff Garzik 	case ATA_DEV_ATA:
1686c6fd2807SJeff Garzik 		if (err & ATA_ICRC)
1687c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_ATA_BUS;
1688c6fd2807SJeff Garzik 		if (err & ATA_UNC)
1689c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_MEDIA;
1690c6fd2807SJeff Garzik 		if (err & ATA_IDNF)
1691c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_INVALID;
1692c6fd2807SJeff Garzik 		break;
1693c6fd2807SJeff Garzik 
1694c6fd2807SJeff Garzik 	case ATA_DEV_ATAPI:
1695a569a30dSTejun Heo 		if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
16963eabddb8STejun Heo 			tmp = atapi_eh_request_sense(qc->dev,
16973eabddb8STejun Heo 						qc->scsicmd->sense_buffer,
16983eabddb8STejun Heo 						qc->result_tf.feature >> 4);
1699c6fd2807SJeff Garzik 			if (!tmp) {
1700a569a30dSTejun Heo 				/* ATA_QCFLAG_SENSE_VALID is used to
1701a569a30dSTejun Heo 				 * tell atapi_qc_complete() that sense
1702a569a30dSTejun Heo 				 * data is already valid.
1703c6fd2807SJeff Garzik 				 *
1704c6fd2807SJeff Garzik 				 * TODO: interpret sense data and set
1705c6fd2807SJeff Garzik 				 * appropriate err_mask.
1706c6fd2807SJeff Garzik 				 */
1707c6fd2807SJeff Garzik 				qc->flags |= ATA_QCFLAG_SENSE_VALID;
1708c6fd2807SJeff Garzik 			} else
1709c6fd2807SJeff Garzik 				qc->err_mask |= tmp;
1710c6fd2807SJeff Garzik 		}
1711a569a30dSTejun Heo 	}
1712c6fd2807SJeff Garzik 
1713c6fd2807SJeff Garzik 	if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
1714cf480626STejun Heo 		action |= ATA_EH_RESET;
1715c6fd2807SJeff Garzik 
1716c6fd2807SJeff Garzik 	return action;
1717c6fd2807SJeff Garzik }
1718c6fd2807SJeff Garzik 
171976326ac1STejun Heo static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask,
172076326ac1STejun Heo 				   int *xfer_ok)
1721c6fd2807SJeff Garzik {
172276326ac1STejun Heo 	int base = 0;
172376326ac1STejun Heo 
172476326ac1STejun Heo 	if (!(eflags & ATA_EFLAG_DUBIOUS_XFER))
172576326ac1STejun Heo 		*xfer_ok = 1;
172676326ac1STejun Heo 
172776326ac1STejun Heo 	if (!*xfer_ok)
172875f9cafcSTejun Heo 		base = ATA_ECAT_DUBIOUS_NONE;
172976326ac1STejun Heo 
17307d47e8d4STejun Heo 	if (err_mask & AC_ERR_ATA_BUS)
173176326ac1STejun Heo 		return base + ATA_ECAT_ATA_BUS;
1732c6fd2807SJeff Garzik 
17337d47e8d4STejun Heo 	if (err_mask & AC_ERR_TIMEOUT)
173476326ac1STejun Heo 		return base + ATA_ECAT_TOUT_HSM;
17357d47e8d4STejun Heo 
17363884f7b0STejun Heo 	if (eflags & ATA_EFLAG_IS_IO) {
17377d47e8d4STejun Heo 		if (err_mask & AC_ERR_HSM)
173876326ac1STejun Heo 			return base + ATA_ECAT_TOUT_HSM;
17397d47e8d4STejun Heo 		if ((err_mask &
17407d47e8d4STejun Heo 		     (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
174176326ac1STejun Heo 			return base + ATA_ECAT_UNK_DEV;
1742c6fd2807SJeff Garzik 	}
1743c6fd2807SJeff Garzik 
1744c6fd2807SJeff Garzik 	return 0;
1745c6fd2807SJeff Garzik }
1746c6fd2807SJeff Garzik 
17477d47e8d4STejun Heo struct speed_down_verdict_arg {
1748c6fd2807SJeff Garzik 	u64 since;
174976326ac1STejun Heo 	int xfer_ok;
17503884f7b0STejun Heo 	int nr_errors[ATA_ECAT_NR];
1751c6fd2807SJeff Garzik };
1752c6fd2807SJeff Garzik 
17537d47e8d4STejun Heo static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
1754c6fd2807SJeff Garzik {
17557d47e8d4STejun Heo 	struct speed_down_verdict_arg *arg = void_arg;
175676326ac1STejun Heo 	int cat;
1757c6fd2807SJeff Garzik 
1758c6fd2807SJeff Garzik 	if (ent->timestamp < arg->since)
1759c6fd2807SJeff Garzik 		return -1;
1760c6fd2807SJeff Garzik 
176176326ac1STejun Heo 	cat = ata_eh_categorize_error(ent->eflags, ent->err_mask,
176276326ac1STejun Heo 				      &arg->xfer_ok);
17637d47e8d4STejun Heo 	arg->nr_errors[cat]++;
176476326ac1STejun Heo 
1765c6fd2807SJeff Garzik 	return 0;
1766c6fd2807SJeff Garzik }
1767c6fd2807SJeff Garzik 
1768c6fd2807SJeff Garzik /**
17697d47e8d4STejun Heo  *	ata_eh_speed_down_verdict - Determine speed down verdict
1770c6fd2807SJeff Garzik  *	@dev: Device of interest
1771c6fd2807SJeff Garzik  *
1772c6fd2807SJeff Garzik  *	This function examines error ring of @dev and determines
17737d47e8d4STejun Heo  *	whether NCQ needs to be turned off, transfer speed should be
17747d47e8d4STejun Heo  *	stepped down, or falling back to PIO is necessary.
1775c6fd2807SJeff Garzik  *
17763884f7b0STejun Heo  *	ECAT_ATA_BUS	: ATA_BUS error for any command
1777c6fd2807SJeff Garzik  *
17783884f7b0STejun Heo  *	ECAT_TOUT_HSM	: TIMEOUT for any command or HSM violation for
17793884f7b0STejun Heo  *			  IO commands
17807d47e8d4STejun Heo  *
17813884f7b0STejun Heo  *	ECAT_UNK_DEV	: Unknown DEV error for IO commands
1782c6fd2807SJeff Garzik  *
178376326ac1STejun Heo  *	ECAT_DUBIOUS_*	: Identical to above three but occurred while
178476326ac1STejun Heo  *			  data transfer hasn't been verified.
178576326ac1STejun Heo  *
17863884f7b0STejun Heo  *	Verdicts are
17877d47e8d4STejun Heo  *
17883884f7b0STejun Heo  *	NCQ_OFF		: Turn off NCQ.
17897d47e8d4STejun Heo  *
17903884f7b0STejun Heo  *	SPEED_DOWN	: Speed down transfer speed but don't fall back
17913884f7b0STejun Heo  *			  to PIO.
17923884f7b0STejun Heo  *
17933884f7b0STejun Heo  *	FALLBACK_TO_PIO	: Fall back to PIO.
17943884f7b0STejun Heo  *
17953884f7b0STejun Heo  *	Even if multiple verdicts are returned, only one action is
179676326ac1STejun Heo  *	taken per error.  An action triggered by non-DUBIOUS errors
179776326ac1STejun Heo  *	clears ering, while one triggered by DUBIOUS_* errors doesn't.
179876326ac1STejun Heo  *	This is to expedite speed down decisions right after device is
179976326ac1STejun Heo  *	initially configured.
18003884f7b0STejun Heo  *
180176326ac1STejun Heo  *	The followings are speed down rules.  #1 and #2 deal with
180276326ac1STejun Heo  *	DUBIOUS errors.
180376326ac1STejun Heo  *
180476326ac1STejun Heo  *	1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
180576326ac1STejun Heo  *	   occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO.
180676326ac1STejun Heo  *
180776326ac1STejun Heo  *	2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
180876326ac1STejun Heo  *	   occurred during last 5 mins, NCQ_OFF.
180976326ac1STejun Heo  *
181076326ac1STejun Heo  *	3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
18113884f7b0STejun Heo  *	   ocurred during last 5 mins, FALLBACK_TO_PIO
18123884f7b0STejun Heo  *
181376326ac1STejun Heo  *	4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
18143884f7b0STejun Heo  *	   during last 10 mins, NCQ_OFF.
18153884f7b0STejun Heo  *
181676326ac1STejun Heo  *	5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
18173884f7b0STejun Heo  *	   UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
18187d47e8d4STejun Heo  *
1819c6fd2807SJeff Garzik  *	LOCKING:
1820c6fd2807SJeff Garzik  *	Inherited from caller.
1821c6fd2807SJeff Garzik  *
1822c6fd2807SJeff Garzik  *	RETURNS:
18237d47e8d4STejun Heo  *	OR of ATA_EH_SPDN_* flags.
1824c6fd2807SJeff Garzik  */
18257d47e8d4STejun Heo static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
1826c6fd2807SJeff Garzik {
18277d47e8d4STejun Heo 	const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ;
18287d47e8d4STejun Heo 	u64 j64 = get_jiffies_64();
18297d47e8d4STejun Heo 	struct speed_down_verdict_arg arg;
18307d47e8d4STejun Heo 	unsigned int verdict = 0;
1831c6fd2807SJeff Garzik 
18323884f7b0STejun Heo 	/* scan past 5 mins of error history */
18333884f7b0STejun Heo 	memset(&arg, 0, sizeof(arg));
18343884f7b0STejun Heo 	arg.since = j64 - min(j64, j5mins);
18353884f7b0STejun Heo 	ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
18363884f7b0STejun Heo 
183776326ac1STejun Heo 	if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] +
183876326ac1STejun Heo 	    arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1)
183976326ac1STejun Heo 		verdict |= ATA_EH_SPDN_SPEED_DOWN |
184076326ac1STejun Heo 			ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS;
184176326ac1STejun Heo 
184276326ac1STejun Heo 	if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] +
184376326ac1STejun Heo 	    arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1)
184476326ac1STejun Heo 		verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS;
184576326ac1STejun Heo 
18463884f7b0STejun Heo 	if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
18473884f7b0STejun Heo 	    arg.nr_errors[ATA_ECAT_TOUT_HSM] +
1848663f99b8STejun Heo 	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
18493884f7b0STejun Heo 		verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
18503884f7b0STejun Heo 
18517d47e8d4STejun Heo 	/* scan past 10 mins of error history */
1852c6fd2807SJeff Garzik 	memset(&arg, 0, sizeof(arg));
18537d47e8d4STejun Heo 	arg.since = j64 - min(j64, j10mins);
18547d47e8d4STejun Heo 	ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
1855c6fd2807SJeff Garzik 
18563884f7b0STejun Heo 	if (arg.nr_errors[ATA_ECAT_TOUT_HSM] +
18573884f7b0STejun Heo 	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 3)
18587d47e8d4STejun Heo 		verdict |= ATA_EH_SPDN_NCQ_OFF;
18593884f7b0STejun Heo 
18603884f7b0STejun Heo 	if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
18613884f7b0STejun Heo 	    arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 ||
1862663f99b8STejun Heo 	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
18637d47e8d4STejun Heo 		verdict |= ATA_EH_SPDN_SPEED_DOWN;
1864c6fd2807SJeff Garzik 
18657d47e8d4STejun Heo 	return verdict;
1866c6fd2807SJeff Garzik }
1867c6fd2807SJeff Garzik 
1868c6fd2807SJeff Garzik /**
1869c6fd2807SJeff Garzik  *	ata_eh_speed_down - record error and speed down if necessary
1870c6fd2807SJeff Garzik  *	@dev: Failed device
18713884f7b0STejun Heo  *	@eflags: mask of ATA_EFLAG_* flags
1872c6fd2807SJeff Garzik  *	@err_mask: err_mask of the error
1873c6fd2807SJeff Garzik  *
1874c6fd2807SJeff Garzik  *	Record error and examine error history to determine whether
1875c6fd2807SJeff Garzik  *	adjusting transmission speed is necessary.  It also sets
1876c6fd2807SJeff Garzik  *	transmission limits appropriately if such adjustment is
1877c6fd2807SJeff Garzik  *	necessary.
1878c6fd2807SJeff Garzik  *
1879c6fd2807SJeff Garzik  *	LOCKING:
1880c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1881c6fd2807SJeff Garzik  *
1882c6fd2807SJeff Garzik  *	RETURNS:
18837d47e8d4STejun Heo  *	Determined recovery action.
1884c6fd2807SJeff Garzik  */
18853884f7b0STejun Heo static unsigned int ata_eh_speed_down(struct ata_device *dev,
18863884f7b0STejun Heo 				unsigned int eflags, unsigned int err_mask)
1887c6fd2807SJeff Garzik {
1888b1c72916STejun Heo 	struct ata_link *link = ata_dev_phys_link(dev);
188976326ac1STejun Heo 	int xfer_ok = 0;
18907d47e8d4STejun Heo 	unsigned int verdict;
18917d47e8d4STejun Heo 	unsigned int action = 0;
18927d47e8d4STejun Heo 
18937d47e8d4STejun Heo 	/* don't bother if Cat-0 error */
189476326ac1STejun Heo 	if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0)
1895c6fd2807SJeff Garzik 		return 0;
1896c6fd2807SJeff Garzik 
1897c6fd2807SJeff Garzik 	/* record error and determine whether speed down is necessary */
18983884f7b0STejun Heo 	ata_ering_record(&dev->ering, eflags, err_mask);
18997d47e8d4STejun Heo 	verdict = ata_eh_speed_down_verdict(dev);
1900c6fd2807SJeff Garzik 
19017d47e8d4STejun Heo 	/* turn off NCQ? */
19027d47e8d4STejun Heo 	if ((verdict & ATA_EH_SPDN_NCQ_OFF) &&
19037d47e8d4STejun Heo 	    (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ |
19047d47e8d4STejun Heo 			   ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) {
19057d47e8d4STejun Heo 		dev->flags |= ATA_DFLAG_NCQ_OFF;
19067d47e8d4STejun Heo 		ata_dev_printk(dev, KERN_WARNING,
19077d47e8d4STejun Heo 			       "NCQ disabled due to excessive errors\n");
19087d47e8d4STejun Heo 		goto done;
19097d47e8d4STejun Heo 	}
1910c6fd2807SJeff Garzik 
19117d47e8d4STejun Heo 	/* speed down? */
19127d47e8d4STejun Heo 	if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
1913c6fd2807SJeff Garzik 		/* speed down SATA link speed if possible */
1914a07d499bSTejun Heo 		if (sata_down_spd_limit(link, 0) == 0) {
1915cf480626STejun Heo 			action |= ATA_EH_RESET;
19167d47e8d4STejun Heo 			goto done;
19177d47e8d4STejun Heo 		}
1918c6fd2807SJeff Garzik 
1919c6fd2807SJeff Garzik 		/* lower transfer mode */
19207d47e8d4STejun Heo 		if (dev->spdn_cnt < 2) {
19217d47e8d4STejun Heo 			static const int dma_dnxfer_sel[] =
19227d47e8d4STejun Heo 				{ ATA_DNXFER_DMA, ATA_DNXFER_40C };
19237d47e8d4STejun Heo 			static const int pio_dnxfer_sel[] =
19247d47e8d4STejun Heo 				{ ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 };
19257d47e8d4STejun Heo 			int sel;
1926c6fd2807SJeff Garzik 
19277d47e8d4STejun Heo 			if (dev->xfer_shift != ATA_SHIFT_PIO)
19287d47e8d4STejun Heo 				sel = dma_dnxfer_sel[dev->spdn_cnt];
19297d47e8d4STejun Heo 			else
19307d47e8d4STejun Heo 				sel = pio_dnxfer_sel[dev->spdn_cnt];
19317d47e8d4STejun Heo 
19327d47e8d4STejun Heo 			dev->spdn_cnt++;
19337d47e8d4STejun Heo 
19347d47e8d4STejun Heo 			if (ata_down_xfermask_limit(dev, sel) == 0) {
1935cf480626STejun Heo 				action |= ATA_EH_RESET;
19367d47e8d4STejun Heo 				goto done;
19377d47e8d4STejun Heo 			}
19387d47e8d4STejun Heo 		}
19397d47e8d4STejun Heo 	}
19407d47e8d4STejun Heo 
19417d47e8d4STejun Heo 	/* Fall back to PIO?  Slowing down to PIO is meaningless for
1942663f99b8STejun Heo 	 * SATA ATA devices.  Consider it only for PATA and SATAPI.
19437d47e8d4STejun Heo 	 */
19447d47e8d4STejun Heo 	if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
1945663f99b8STejun Heo 	    (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) &&
19467d47e8d4STejun Heo 	    (dev->xfer_shift != ATA_SHIFT_PIO)) {
19477d47e8d4STejun Heo 		if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
19487d47e8d4STejun Heo 			dev->spdn_cnt = 0;
1949cf480626STejun Heo 			action |= ATA_EH_RESET;
19507d47e8d4STejun Heo 			goto done;
19517d47e8d4STejun Heo 		}
19527d47e8d4STejun Heo 	}
19537d47e8d4STejun Heo 
1954c6fd2807SJeff Garzik 	return 0;
19557d47e8d4STejun Heo  done:
19567d47e8d4STejun Heo 	/* device has been slowed down, blow error history */
195776326ac1STejun Heo 	if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS))
19587d47e8d4STejun Heo 		ata_ering_clear(&dev->ering);
19597d47e8d4STejun Heo 	return action;
1960c6fd2807SJeff Garzik }
1961c6fd2807SJeff Garzik 
1962c6fd2807SJeff Garzik /**
19639b1e2658STejun Heo  *	ata_eh_link_autopsy - analyze error and determine recovery action
19649b1e2658STejun Heo  *	@link: host link to perform autopsy on
1965c6fd2807SJeff Garzik  *
19660260731fSTejun Heo  *	Analyze why @link failed and determine which recovery actions
19670260731fSTejun Heo  *	are needed.  This function also sets more detailed AC_ERR_*
19680260731fSTejun Heo  *	values and fills sense data for ATAPI CHECK SENSE.
1969c6fd2807SJeff Garzik  *
1970c6fd2807SJeff Garzik  *	LOCKING:
1971c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1972c6fd2807SJeff Garzik  */
19739b1e2658STejun Heo static void ata_eh_link_autopsy(struct ata_link *link)
1974c6fd2807SJeff Garzik {
19750260731fSTejun Heo 	struct ata_port *ap = link->ap;
1976936fd732STejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
1977dfcc173dSTejun Heo 	struct ata_device *dev;
19783884f7b0STejun Heo 	unsigned int all_err_mask = 0, eflags = 0;
19793884f7b0STejun Heo 	int tag;
1980c6fd2807SJeff Garzik 	u32 serror;
1981c6fd2807SJeff Garzik 	int rc;
1982c6fd2807SJeff Garzik 
1983c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
1984c6fd2807SJeff Garzik 
1985c6fd2807SJeff Garzik 	if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
1986c6fd2807SJeff Garzik 		return;
1987c6fd2807SJeff Garzik 
1988c6fd2807SJeff Garzik 	/* obtain and analyze SError */
1989936fd732STejun Heo 	rc = sata_scr_read(link, SCR_ERROR, &serror);
1990c6fd2807SJeff Garzik 	if (rc == 0) {
1991c6fd2807SJeff Garzik 		ehc->i.serror |= serror;
19920260731fSTejun Heo 		ata_eh_analyze_serror(link);
19934e57c517STejun Heo 	} else if (rc != -EOPNOTSUPP) {
1994cf480626STejun Heo 		/* SError read failed, force reset and probing */
1995b558edddSTejun Heo 		ehc->i.probe_mask |= ATA_ALL_DEVICES;
1996cf480626STejun Heo 		ehc->i.action |= ATA_EH_RESET;
19974e57c517STejun Heo 		ehc->i.err_mask |= AC_ERR_OTHER;
19984e57c517STejun Heo 	}
1999c6fd2807SJeff Garzik 
2000c6fd2807SJeff Garzik 	/* analyze NCQ failure */
20010260731fSTejun Heo 	ata_eh_analyze_ncq_error(link);
2002c6fd2807SJeff Garzik 
2003c6fd2807SJeff Garzik 	/* any real error trumps AC_ERR_OTHER */
2004c6fd2807SJeff Garzik 	if (ehc->i.err_mask & ~AC_ERR_OTHER)
2005c6fd2807SJeff Garzik 		ehc->i.err_mask &= ~AC_ERR_OTHER;
2006c6fd2807SJeff Garzik 
2007c6fd2807SJeff Garzik 	all_err_mask |= ehc->i.err_mask;
2008c6fd2807SJeff Garzik 
2009c6fd2807SJeff Garzik 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2010c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2011c6fd2807SJeff Garzik 
2012b1c72916STejun Heo 		if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2013b1c72916STejun Heo 		    ata_dev_phys_link(qc->dev) != link)
2014c6fd2807SJeff Garzik 			continue;
2015c6fd2807SJeff Garzik 
2016c6fd2807SJeff Garzik 		/* inherit upper level err_mask */
2017c6fd2807SJeff Garzik 		qc->err_mask |= ehc->i.err_mask;
2018c6fd2807SJeff Garzik 
2019c6fd2807SJeff Garzik 		/* analyze TF */
2020c6fd2807SJeff Garzik 		ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
2021c6fd2807SJeff Garzik 
2022c6fd2807SJeff Garzik 		/* DEV errors are probably spurious in case of ATA_BUS error */
2023c6fd2807SJeff Garzik 		if (qc->err_mask & AC_ERR_ATA_BUS)
2024c6fd2807SJeff Garzik 			qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
2025c6fd2807SJeff Garzik 					  AC_ERR_INVALID);
2026c6fd2807SJeff Garzik 
2027c6fd2807SJeff Garzik 		/* any real error trumps unknown error */
2028c6fd2807SJeff Garzik 		if (qc->err_mask & ~AC_ERR_OTHER)
2029c6fd2807SJeff Garzik 			qc->err_mask &= ~AC_ERR_OTHER;
2030c6fd2807SJeff Garzik 
2031c6fd2807SJeff Garzik 		/* SENSE_VALID trumps dev/unknown error and revalidation */
2032f90f0828STejun Heo 		if (qc->flags & ATA_QCFLAG_SENSE_VALID)
2033c6fd2807SJeff Garzik 			qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
2034c6fd2807SJeff Garzik 
203503faab78STejun Heo 		/* determine whether the command is worth retrying */
2036534ead70STejun Heo 		if (qc->flags & ATA_QCFLAG_IO ||
2037534ead70STejun Heo 		    (!(qc->err_mask & AC_ERR_INVALID) &&
2038534ead70STejun Heo 		     qc->err_mask != AC_ERR_DEV))
203903faab78STejun Heo 			qc->flags |= ATA_QCFLAG_RETRY;
204003faab78STejun Heo 
2041c6fd2807SJeff Garzik 		/* accumulate error info */
2042c6fd2807SJeff Garzik 		ehc->i.dev = qc->dev;
2043c6fd2807SJeff Garzik 		all_err_mask |= qc->err_mask;
2044c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_IO)
20453884f7b0STejun Heo 			eflags |= ATA_EFLAG_IS_IO;
2046c6fd2807SJeff Garzik 	}
2047c6fd2807SJeff Garzik 
2048c6fd2807SJeff Garzik 	/* enforce default EH actions */
2049c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_FROZEN ||
2050c6fd2807SJeff Garzik 	    all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
2051cf480626STejun Heo 		ehc->i.action |= ATA_EH_RESET;
20523884f7b0STejun Heo 	else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) ||
20533884f7b0STejun Heo 		 (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV)))
2054c6fd2807SJeff Garzik 		ehc->i.action |= ATA_EH_REVALIDATE;
2055c6fd2807SJeff Garzik 
2056dfcc173dSTejun Heo 	/* If we have offending qcs and the associated failed device,
2057dfcc173dSTejun Heo 	 * perform per-dev EH action only on the offending device.
2058dfcc173dSTejun Heo 	 */
2059c6fd2807SJeff Garzik 	if (ehc->i.dev) {
2060c6fd2807SJeff Garzik 		ehc->i.dev_action[ehc->i.dev->devno] |=
2061c6fd2807SJeff Garzik 			ehc->i.action & ATA_EH_PERDEV_MASK;
2062c6fd2807SJeff Garzik 		ehc->i.action &= ~ATA_EH_PERDEV_MASK;
2063c6fd2807SJeff Garzik 	}
2064c6fd2807SJeff Garzik 
20652695e366STejun Heo 	/* propagate timeout to host link */
20662695e366STejun Heo 	if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link))
20672695e366STejun Heo 		ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT;
20682695e366STejun Heo 
20692695e366STejun Heo 	/* record error and consider speeding down */
2070dfcc173dSTejun Heo 	dev = ehc->i.dev;
20712695e366STejun Heo 	if (!dev && ((ata_link_max_devices(link) == 1 &&
20722695e366STejun Heo 		      ata_dev_enabled(link->device))))
2073dfcc173dSTejun Heo 	    dev = link->device;
2074dfcc173dSTejun Heo 
207576326ac1STejun Heo 	if (dev) {
207676326ac1STejun Heo 		if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
207776326ac1STejun Heo 			eflags |= ATA_EFLAG_DUBIOUS_XFER;
20783884f7b0STejun Heo 		ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
207976326ac1STejun Heo 	}
2080dfcc173dSTejun Heo 
2081c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
2082c6fd2807SJeff Garzik }
2083c6fd2807SJeff Garzik 
2084c6fd2807SJeff Garzik /**
20859b1e2658STejun Heo  *	ata_eh_autopsy - analyze error and determine recovery action
20869b1e2658STejun Heo  *	@ap: host port to perform autopsy on
20879b1e2658STejun Heo  *
20889b1e2658STejun Heo  *	Analyze all links of @ap and determine why they failed and
20899b1e2658STejun Heo  *	which recovery actions are needed.
20909b1e2658STejun Heo  *
20919b1e2658STejun Heo  *	LOCKING:
20929b1e2658STejun Heo  *	Kernel thread context (may sleep).
20939b1e2658STejun Heo  */
2094fb7fd614STejun Heo void ata_eh_autopsy(struct ata_port *ap)
20959b1e2658STejun Heo {
20969b1e2658STejun Heo 	struct ata_link *link;
20979b1e2658STejun Heo 
20981eca4365STejun Heo 	ata_for_each_link(link, ap, EDGE)
20999b1e2658STejun Heo 		ata_eh_link_autopsy(link);
21002695e366STejun Heo 
2101b1c72916STejun Heo 	/* Handle the frigging slave link.  Autopsy is done similarly
2102b1c72916STejun Heo 	 * but actions and flags are transferred over to the master
2103b1c72916STejun Heo 	 * link and handled from there.
2104b1c72916STejun Heo 	 */
2105b1c72916STejun Heo 	if (ap->slave_link) {
2106b1c72916STejun Heo 		struct ata_eh_context *mehc = &ap->link.eh_context;
2107b1c72916STejun Heo 		struct ata_eh_context *sehc = &ap->slave_link->eh_context;
2108b1c72916STejun Heo 
2109848e4c68STejun Heo 		/* transfer control flags from master to slave */
2110848e4c68STejun Heo 		sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK;
2111848e4c68STejun Heo 
2112848e4c68STejun Heo 		/* perform autopsy on the slave link */
2113b1c72916STejun Heo 		ata_eh_link_autopsy(ap->slave_link);
2114b1c72916STejun Heo 
2115848e4c68STejun Heo 		/* transfer actions from slave to master and clear slave */
2116b1c72916STejun Heo 		ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2117b1c72916STejun Heo 		mehc->i.action		|= sehc->i.action;
2118b1c72916STejun Heo 		mehc->i.dev_action[1]	|= sehc->i.dev_action[1];
2119b1c72916STejun Heo 		mehc->i.flags		|= sehc->i.flags;
2120b1c72916STejun Heo 		ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2121b1c72916STejun Heo 	}
2122b1c72916STejun Heo 
21232695e366STejun Heo 	/* Autopsy of fanout ports can affect host link autopsy.
21242695e366STejun Heo 	 * Perform host link autopsy last.
21252695e366STejun Heo 	 */
2126071f44b1STejun Heo 	if (sata_pmp_attached(ap))
21272695e366STejun Heo 		ata_eh_link_autopsy(&ap->link);
21289b1e2658STejun Heo }
21299b1e2658STejun Heo 
21309b1e2658STejun Heo /**
21316521148cSRobert Hancock  *	ata_get_cmd_descript - get description for ATA command
21326521148cSRobert Hancock  *	@command: ATA command code to get description for
21336521148cSRobert Hancock  *
21346521148cSRobert Hancock  *	Return a textual description of the given command, or NULL if the
21356521148cSRobert Hancock  *	command is not known.
21366521148cSRobert Hancock  *
21376521148cSRobert Hancock  *	LOCKING:
21386521148cSRobert Hancock  *	None
21396521148cSRobert Hancock  */
21406521148cSRobert Hancock const char *ata_get_cmd_descript(u8 command)
21416521148cSRobert Hancock {
21426521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR
21436521148cSRobert Hancock 	static const struct
21446521148cSRobert Hancock 	{
21456521148cSRobert Hancock 		u8 command;
21466521148cSRobert Hancock 		const char *text;
21476521148cSRobert Hancock 	} cmd_descr[] = {
21486521148cSRobert Hancock 		{ ATA_CMD_DEV_RESET,		"DEVICE RESET" },
21496521148cSRobert Hancock 		{ ATA_CMD_CHK_POWER, 		"CHECK POWER MODE" },
21506521148cSRobert Hancock 		{ ATA_CMD_STANDBY, 		"STANDBY" },
21516521148cSRobert Hancock 		{ ATA_CMD_IDLE, 		"IDLE" },
21526521148cSRobert Hancock 		{ ATA_CMD_EDD, 			"EXECUTE DEVICE DIAGNOSTIC" },
21536521148cSRobert Hancock 		{ ATA_CMD_DOWNLOAD_MICRO,   	"DOWNLOAD MICROCODE" },
21546521148cSRobert Hancock 		{ ATA_CMD_NOP,			"NOP" },
21556521148cSRobert Hancock 		{ ATA_CMD_FLUSH, 		"FLUSH CACHE" },
21566521148cSRobert Hancock 		{ ATA_CMD_FLUSH_EXT, 		"FLUSH CACHE EXT" },
21576521148cSRobert Hancock 		{ ATA_CMD_ID_ATA,  		"IDENTIFY DEVICE" },
21586521148cSRobert Hancock 		{ ATA_CMD_ID_ATAPI, 		"IDENTIFY PACKET DEVICE" },
21596521148cSRobert Hancock 		{ ATA_CMD_SERVICE, 		"SERVICE" },
21606521148cSRobert Hancock 		{ ATA_CMD_READ, 		"READ DMA" },
21616521148cSRobert Hancock 		{ ATA_CMD_READ_EXT, 		"READ DMA EXT" },
21626521148cSRobert Hancock 		{ ATA_CMD_READ_QUEUED, 		"READ DMA QUEUED" },
21636521148cSRobert Hancock 		{ ATA_CMD_READ_STREAM_EXT, 	"READ STREAM EXT" },
21646521148cSRobert Hancock 		{ ATA_CMD_READ_STREAM_DMA_EXT,  "READ STREAM DMA EXT" },
21656521148cSRobert Hancock 		{ ATA_CMD_WRITE, 		"WRITE DMA" },
21666521148cSRobert Hancock 		{ ATA_CMD_WRITE_EXT, 		"WRITE DMA EXT" },
21676521148cSRobert Hancock 		{ ATA_CMD_WRITE_QUEUED, 	"WRITE DMA QUEUED EXT" },
21686521148cSRobert Hancock 		{ ATA_CMD_WRITE_STREAM_EXT, 	"WRITE STREAM EXT" },
21696521148cSRobert Hancock 		{ ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" },
21706521148cSRobert Hancock 		{ ATA_CMD_WRITE_FUA_EXT,	"WRITE DMA FUA EXT" },
21716521148cSRobert Hancock 		{ ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
21726521148cSRobert Hancock 		{ ATA_CMD_FPDMA_READ,		"READ FPDMA QUEUED" },
21736521148cSRobert Hancock 		{ ATA_CMD_FPDMA_WRITE,		"WRITE FPDMA QUEUED" },
21746521148cSRobert Hancock 		{ ATA_CMD_PIO_READ,		"READ SECTOR(S)" },
21756521148cSRobert Hancock 		{ ATA_CMD_PIO_READ_EXT,		"READ SECTOR(S) EXT" },
21766521148cSRobert Hancock 		{ ATA_CMD_PIO_WRITE,		"WRITE SECTOR(S)" },
21776521148cSRobert Hancock 		{ ATA_CMD_PIO_WRITE_EXT,	"WRITE SECTOR(S) EXT" },
21786521148cSRobert Hancock 		{ ATA_CMD_READ_MULTI,		"READ MULTIPLE" },
21796521148cSRobert Hancock 		{ ATA_CMD_READ_MULTI_EXT,	"READ MULTIPLE EXT" },
21806521148cSRobert Hancock 		{ ATA_CMD_WRITE_MULTI,		"WRITE MULTIPLE" },
21816521148cSRobert Hancock 		{ ATA_CMD_WRITE_MULTI_EXT,	"WRITE MULTIPLE EXT" },
21826521148cSRobert Hancock 		{ ATA_CMD_WRITE_MULTI_FUA_EXT, 	"WRITE MULTIPLE FUA EXT" },
21836521148cSRobert Hancock 		{ ATA_CMD_SET_FEATURES,		"SET FEATURES" },
21846521148cSRobert Hancock 		{ ATA_CMD_SET_MULTI,		"SET MULTIPLE MODE" },
21856521148cSRobert Hancock 		{ ATA_CMD_VERIFY,		"READ VERIFY SECTOR(S)" },
21866521148cSRobert Hancock 		{ ATA_CMD_VERIFY_EXT,		"READ VERIFY SECTOR(S) EXT" },
21876521148cSRobert Hancock 		{ ATA_CMD_WRITE_UNCORR_EXT,	"WRITE UNCORRECTABLE EXT" },
21886521148cSRobert Hancock 		{ ATA_CMD_STANDBYNOW1,		"STANDBY IMMEDIATE" },
21896521148cSRobert Hancock 		{ ATA_CMD_IDLEIMMEDIATE,	"IDLE IMMEDIATE" },
21906521148cSRobert Hancock 		{ ATA_CMD_SLEEP,		"SLEEP" },
21916521148cSRobert Hancock 		{ ATA_CMD_INIT_DEV_PARAMS,	"INITIALIZE DEVICE PARAMETERS" },
21926521148cSRobert Hancock 		{ ATA_CMD_READ_NATIVE_MAX,	"READ NATIVE MAX ADDRESS" },
21936521148cSRobert Hancock 		{ ATA_CMD_READ_NATIVE_MAX_EXT,	"READ NATIVE MAX ADDRESS EXT" },
21946521148cSRobert Hancock 		{ ATA_CMD_SET_MAX,		"SET MAX ADDRESS" },
21956521148cSRobert Hancock 		{ ATA_CMD_SET_MAX_EXT,		"SET MAX ADDRESS EXT" },
21966521148cSRobert Hancock 		{ ATA_CMD_READ_LOG_EXT,		"READ LOG EXT" },
21976521148cSRobert Hancock 		{ ATA_CMD_WRITE_LOG_EXT,	"WRITE LOG EXT" },
21986521148cSRobert Hancock 		{ ATA_CMD_READ_LOG_DMA_EXT,	"READ LOG DMA EXT" },
21996521148cSRobert Hancock 		{ ATA_CMD_WRITE_LOG_DMA_EXT, 	"WRITE LOG DMA EXT" },
22006521148cSRobert Hancock 		{ ATA_CMD_TRUSTED_RCV,		"TRUSTED RECEIVE" },
22016521148cSRobert Hancock 		{ ATA_CMD_TRUSTED_RCV_DMA, 	"TRUSTED RECEIVE DMA" },
22026521148cSRobert Hancock 		{ ATA_CMD_TRUSTED_SND,		"TRUSTED SEND" },
22036521148cSRobert Hancock 		{ ATA_CMD_TRUSTED_SND_DMA, 	"TRUSTED SEND DMA" },
22046521148cSRobert Hancock 		{ ATA_CMD_PMP_READ,		"READ BUFFER" },
22056521148cSRobert Hancock 		{ ATA_CMD_PMP_WRITE,		"WRITE BUFFER" },
22066521148cSRobert Hancock 		{ ATA_CMD_CONF_OVERLAY,		"DEVICE CONFIGURATION OVERLAY" },
22076521148cSRobert Hancock 		{ ATA_CMD_SEC_SET_PASS,		"SECURITY SET PASSWORD" },
22086521148cSRobert Hancock 		{ ATA_CMD_SEC_UNLOCK,		"SECURITY UNLOCK" },
22096521148cSRobert Hancock 		{ ATA_CMD_SEC_ERASE_PREP,	"SECURITY ERASE PREPARE" },
22106521148cSRobert Hancock 		{ ATA_CMD_SEC_ERASE_UNIT,	"SECURITY ERASE UNIT" },
22116521148cSRobert Hancock 		{ ATA_CMD_SEC_FREEZE_LOCK,	"SECURITY FREEZE LOCK" },
22126521148cSRobert Hancock 		{ ATA_CMD_SEC_DISABLE_PASS,	"SECURITY DISABLE PASSWORD" },
22136521148cSRobert Hancock 		{ ATA_CMD_CONFIG_STREAM,	"CONFIGURE STREAM" },
22146521148cSRobert Hancock 		{ ATA_CMD_SMART,		"SMART" },
22156521148cSRobert Hancock 		{ ATA_CMD_MEDIA_LOCK,		"DOOR LOCK" },
22166521148cSRobert Hancock 		{ ATA_CMD_MEDIA_UNLOCK,		"DOOR UNLOCK" },
22176521148cSRobert Hancock 		{ ATA_CMD_CHK_MED_CRD_TYP, 	"CHECK MEDIA CARD TYPE" },
22186521148cSRobert Hancock 		{ ATA_CMD_CFA_REQ_EXT_ERR, 	"CFA REQUEST EXTENDED ERROR" },
22196521148cSRobert Hancock 		{ ATA_CMD_CFA_WRITE_NE,		"CFA WRITE SECTORS WITHOUT ERASE" },
22206521148cSRobert Hancock 		{ ATA_CMD_CFA_TRANS_SECT,	"CFA TRANSLATE SECTOR" },
22216521148cSRobert Hancock 		{ ATA_CMD_CFA_ERASE,		"CFA ERASE SECTORS" },
22226521148cSRobert Hancock 		{ ATA_CMD_CFA_WRITE_MULT_NE, 	"CFA WRITE MULTIPLE WITHOUT ERASE" },
22236521148cSRobert Hancock 		{ ATA_CMD_READ_LONG,		"READ LONG (with retries)" },
22246521148cSRobert Hancock 		{ ATA_CMD_READ_LONG_ONCE,	"READ LONG (without retries)" },
22256521148cSRobert Hancock 		{ ATA_CMD_WRITE_LONG,		"WRITE LONG (with retries)" },
22266521148cSRobert Hancock 		{ ATA_CMD_WRITE_LONG_ONCE,	"WRITE LONG (without retries)" },
22276521148cSRobert Hancock 		{ ATA_CMD_RESTORE,		"RECALIBRATE" },
22286521148cSRobert Hancock 		{ 0,				NULL } /* terminate list */
22296521148cSRobert Hancock 	};
22306521148cSRobert Hancock 
22316521148cSRobert Hancock 	unsigned int i;
22326521148cSRobert Hancock 	for (i = 0; cmd_descr[i].text; i++)
22336521148cSRobert Hancock 		if (cmd_descr[i].command == command)
22346521148cSRobert Hancock 			return cmd_descr[i].text;
22356521148cSRobert Hancock #endif
22366521148cSRobert Hancock 
22376521148cSRobert Hancock 	return NULL;
22386521148cSRobert Hancock }
22396521148cSRobert Hancock 
22406521148cSRobert Hancock /**
22419b1e2658STejun Heo  *	ata_eh_link_report - report error handling to user
22420260731fSTejun Heo  *	@link: ATA link EH is going on
2243c6fd2807SJeff Garzik  *
2244c6fd2807SJeff Garzik  *	Report EH to user.
2245c6fd2807SJeff Garzik  *
2246c6fd2807SJeff Garzik  *	LOCKING:
2247c6fd2807SJeff Garzik  *	None.
2248c6fd2807SJeff Garzik  */
22499b1e2658STejun Heo static void ata_eh_link_report(struct ata_link *link)
2250c6fd2807SJeff Garzik {
22510260731fSTejun Heo 	struct ata_port *ap = link->ap;
22520260731fSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
2253c6fd2807SJeff Garzik 	const char *frozen, *desc;
2254a1e10f7eSTejun Heo 	char tries_buf[6];
2255c6fd2807SJeff Garzik 	int tag, nr_failed = 0;
2256c6fd2807SJeff Garzik 
225794ff3d54STejun Heo 	if (ehc->i.flags & ATA_EHI_QUIET)
225894ff3d54STejun Heo 		return;
225994ff3d54STejun Heo 
2260c6fd2807SJeff Garzik 	desc = NULL;
2261c6fd2807SJeff Garzik 	if (ehc->i.desc[0] != '\0')
2262c6fd2807SJeff Garzik 		desc = ehc->i.desc;
2263c6fd2807SJeff Garzik 
2264c6fd2807SJeff Garzik 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2265c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2266c6fd2807SJeff Garzik 
2267b1c72916STejun Heo 		if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2268b1c72916STejun Heo 		    ata_dev_phys_link(qc->dev) != link ||
2269e027bd36STejun Heo 		    ((qc->flags & ATA_QCFLAG_QUIET) &&
2270e027bd36STejun Heo 		     qc->err_mask == AC_ERR_DEV))
2271c6fd2807SJeff Garzik 			continue;
2272c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
2273c6fd2807SJeff Garzik 			continue;
2274c6fd2807SJeff Garzik 
2275c6fd2807SJeff Garzik 		nr_failed++;
2276c6fd2807SJeff Garzik 	}
2277c6fd2807SJeff Garzik 
2278c6fd2807SJeff Garzik 	if (!nr_failed && !ehc->i.err_mask)
2279c6fd2807SJeff Garzik 		return;
2280c6fd2807SJeff Garzik 
2281c6fd2807SJeff Garzik 	frozen = "";
2282c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_FROZEN)
2283c6fd2807SJeff Garzik 		frozen = " frozen";
2284c6fd2807SJeff Garzik 
2285a1e10f7eSTejun Heo 	memset(tries_buf, 0, sizeof(tries_buf));
2286a1e10f7eSTejun Heo 	if (ap->eh_tries < ATA_EH_MAX_TRIES)
2287a1e10f7eSTejun Heo 		snprintf(tries_buf, sizeof(tries_buf) - 1, " t%d",
2288a1e10f7eSTejun Heo 			 ap->eh_tries);
2289a1e10f7eSTejun Heo 
2290c6fd2807SJeff Garzik 	if (ehc->i.dev) {
2291c6fd2807SJeff Garzik 		ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x "
2292a1e10f7eSTejun Heo 			       "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2293a1e10f7eSTejun Heo 			       ehc->i.err_mask, link->sactive, ehc->i.serror,
2294a1e10f7eSTejun Heo 			       ehc->i.action, frozen, tries_buf);
2295c6fd2807SJeff Garzik 		if (desc)
2296b64bbc39STejun Heo 			ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc);
2297c6fd2807SJeff Garzik 	} else {
22980260731fSTejun Heo 		ata_link_printk(link, KERN_ERR, "exception Emask 0x%x "
2299a1e10f7eSTejun Heo 				"SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2300a1e10f7eSTejun Heo 				ehc->i.err_mask, link->sactive, ehc->i.serror,
2301a1e10f7eSTejun Heo 				ehc->i.action, frozen, tries_buf);
2302c6fd2807SJeff Garzik 		if (desc)
23030260731fSTejun Heo 			ata_link_printk(link, KERN_ERR, "%s\n", desc);
2304c6fd2807SJeff Garzik 	}
2305c6fd2807SJeff Garzik 
23066521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR
23071333e194SRobert Hancock 	if (ehc->i.serror)
2308da0e21d3STejun Heo 		ata_link_printk(link, KERN_ERR,
23091333e194SRobert Hancock 		  "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
23101333e194SRobert Hancock 		  ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "",
23111333e194SRobert Hancock 		  ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "",
23121333e194SRobert Hancock 		  ehc->i.serror & SERR_DATA ? "UnrecovData " : "",
23131333e194SRobert Hancock 		  ehc->i.serror & SERR_PERSISTENT ? "Persist " : "",
23141333e194SRobert Hancock 		  ehc->i.serror & SERR_PROTOCOL ? "Proto " : "",
23151333e194SRobert Hancock 		  ehc->i.serror & SERR_INTERNAL ? "HostInt " : "",
23161333e194SRobert Hancock 		  ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "",
23171333e194SRobert Hancock 		  ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "",
23181333e194SRobert Hancock 		  ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "",
23191333e194SRobert Hancock 		  ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "",
23201333e194SRobert Hancock 		  ehc->i.serror & SERR_DISPARITY ? "Dispar " : "",
23211333e194SRobert Hancock 		  ehc->i.serror & SERR_CRC ? "BadCRC " : "",
23221333e194SRobert Hancock 		  ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "",
23231333e194SRobert Hancock 		  ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "",
23241333e194SRobert Hancock 		  ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "",
23251333e194SRobert Hancock 		  ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "",
23261333e194SRobert Hancock 		  ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
23276521148cSRobert Hancock #endif
23281333e194SRobert Hancock 
2329c6fd2807SJeff Garzik 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2330c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
23318a937581STejun Heo 		struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
2332abb6a889STejun Heo 		const u8 *cdb = qc->cdb;
2333abb6a889STejun Heo 		char data_buf[20] = "";
2334abb6a889STejun Heo 		char cdb_buf[70] = "";
2335c6fd2807SJeff Garzik 
23360260731fSTejun Heo 		if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2337b1c72916STejun Heo 		    ata_dev_phys_link(qc->dev) != link || !qc->err_mask)
2338c6fd2807SJeff Garzik 			continue;
2339c6fd2807SJeff Garzik 
2340abb6a889STejun Heo 		if (qc->dma_dir != DMA_NONE) {
2341abb6a889STejun Heo 			static const char *dma_str[] = {
2342abb6a889STejun Heo 				[DMA_BIDIRECTIONAL]	= "bidi",
2343abb6a889STejun Heo 				[DMA_TO_DEVICE]		= "out",
2344abb6a889STejun Heo 				[DMA_FROM_DEVICE]	= "in",
2345abb6a889STejun Heo 			};
2346abb6a889STejun Heo 			static const char *prot_str[] = {
2347abb6a889STejun Heo 				[ATA_PROT_PIO]		= "pio",
2348abb6a889STejun Heo 				[ATA_PROT_DMA]		= "dma",
2349abb6a889STejun Heo 				[ATA_PROT_NCQ]		= "ncq",
23500dc36888STejun Heo 				[ATAPI_PROT_PIO]	= "pio",
23510dc36888STejun Heo 				[ATAPI_PROT_DMA]	= "dma",
2352abb6a889STejun Heo 			};
2353abb6a889STejun Heo 
2354abb6a889STejun Heo 			snprintf(data_buf, sizeof(data_buf), " %s %u %s",
2355abb6a889STejun Heo 				 prot_str[qc->tf.protocol], qc->nbytes,
2356abb6a889STejun Heo 				 dma_str[qc->dma_dir]);
2357abb6a889STejun Heo 		}
2358abb6a889STejun Heo 
23596521148cSRobert Hancock 		if (ata_is_atapi(qc->tf.protocol)) {
23606521148cSRobert Hancock 			if (qc->scsicmd)
23616521148cSRobert Hancock 				scsi_print_command(qc->scsicmd);
23626521148cSRobert Hancock 			else
2363abb6a889STejun Heo 				snprintf(cdb_buf, sizeof(cdb_buf),
2364abb6a889STejun Heo 				 "cdb %02x %02x %02x %02x %02x %02x %02x %02x  "
2365abb6a889STejun Heo 				 "%02x %02x %02x %02x %02x %02x %02x %02x\n         ",
2366abb6a889STejun Heo 				 cdb[0], cdb[1], cdb[2], cdb[3],
2367abb6a889STejun Heo 				 cdb[4], cdb[5], cdb[6], cdb[7],
2368abb6a889STejun Heo 				 cdb[8], cdb[9], cdb[10], cdb[11],
2369abb6a889STejun Heo 				 cdb[12], cdb[13], cdb[14], cdb[15]);
23706521148cSRobert Hancock 		} else {
23716521148cSRobert Hancock 			const char *descr = ata_get_cmd_descript(cmd->command);
23726521148cSRobert Hancock 			if (descr)
23736521148cSRobert Hancock 				ata_dev_printk(qc->dev, KERN_ERR,
23746521148cSRobert Hancock 					"failed command: %s\n", descr);
23756521148cSRobert Hancock 		}
2376abb6a889STejun Heo 
23778a937581STejun Heo 		ata_dev_printk(qc->dev, KERN_ERR,
23788a937581STejun Heo 			"cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2379abb6a889STejun Heo 			"tag %d%s\n         %s"
23808a937581STejun Heo 			"res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
23815335b729STejun Heo 			"Emask 0x%x (%s)%s\n",
23828a937581STejun Heo 			cmd->command, cmd->feature, cmd->nsect,
23838a937581STejun Heo 			cmd->lbal, cmd->lbam, cmd->lbah,
23848a937581STejun Heo 			cmd->hob_feature, cmd->hob_nsect,
23858a937581STejun Heo 			cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
2386abb6a889STejun Heo 			cmd->device, qc->tag, data_buf, cdb_buf,
23878a937581STejun Heo 			res->command, res->feature, res->nsect,
23888a937581STejun Heo 			res->lbal, res->lbam, res->lbah,
23898a937581STejun Heo 			res->hob_feature, res->hob_nsect,
23908a937581STejun Heo 			res->hob_lbal, res->hob_lbam, res->hob_lbah,
23915335b729STejun Heo 			res->device, qc->err_mask, ata_err_string(qc->err_mask),
23925335b729STejun Heo 			qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
23931333e194SRobert Hancock 
23946521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR
23951333e194SRobert Hancock 		if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
23961333e194SRobert Hancock 				    ATA_ERR)) {
23971333e194SRobert Hancock 			if (res->command & ATA_BUSY)
23981333e194SRobert Hancock 				ata_dev_printk(qc->dev, KERN_ERR,
23991333e194SRobert Hancock 				  "status: { Busy }\n");
24001333e194SRobert Hancock 			else
24011333e194SRobert Hancock 				ata_dev_printk(qc->dev, KERN_ERR,
24021333e194SRobert Hancock 				  "status: { %s%s%s%s}\n",
24031333e194SRobert Hancock 				  res->command & ATA_DRDY ? "DRDY " : "",
24041333e194SRobert Hancock 				  res->command & ATA_DF ? "DF " : "",
24051333e194SRobert Hancock 				  res->command & ATA_DRQ ? "DRQ " : "",
24061333e194SRobert Hancock 				  res->command & ATA_ERR ? "ERR " : "");
24071333e194SRobert Hancock 		}
24081333e194SRobert Hancock 
24091333e194SRobert Hancock 		if (cmd->command != ATA_CMD_PACKET &&
24101333e194SRobert Hancock 		    (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF |
24111333e194SRobert Hancock 				     ATA_ABORTED)))
24121333e194SRobert Hancock 			ata_dev_printk(qc->dev, KERN_ERR,
24131333e194SRobert Hancock 			  "error: { %s%s%s%s}\n",
24141333e194SRobert Hancock 			  res->feature & ATA_ICRC ? "ICRC " : "",
24151333e194SRobert Hancock 			  res->feature & ATA_UNC ? "UNC " : "",
24161333e194SRobert Hancock 			  res->feature & ATA_IDNF ? "IDNF " : "",
24171333e194SRobert Hancock 			  res->feature & ATA_ABORTED ? "ABRT " : "");
24186521148cSRobert Hancock #endif
2419c6fd2807SJeff Garzik 	}
2420c6fd2807SJeff Garzik }
2421c6fd2807SJeff Garzik 
24229b1e2658STejun Heo /**
24239b1e2658STejun Heo  *	ata_eh_report - report error handling to user
24249b1e2658STejun Heo  *	@ap: ATA port to report EH about
24259b1e2658STejun Heo  *
24269b1e2658STejun Heo  *	Report EH to user.
24279b1e2658STejun Heo  *
24289b1e2658STejun Heo  *	LOCKING:
24299b1e2658STejun Heo  *	None.
24309b1e2658STejun Heo  */
2431fb7fd614STejun Heo void ata_eh_report(struct ata_port *ap)
24329b1e2658STejun Heo {
24339b1e2658STejun Heo 	struct ata_link *link;
24349b1e2658STejun Heo 
24351eca4365STejun Heo 	ata_for_each_link(link, ap, HOST_FIRST)
24369b1e2658STejun Heo 		ata_eh_link_report(link);
24379b1e2658STejun Heo }
24389b1e2658STejun Heo 
2439cc0680a5STejun Heo static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
2440b1c72916STejun Heo 			unsigned int *classes, unsigned long deadline,
2441b1c72916STejun Heo 			bool clear_classes)
2442c6fd2807SJeff Garzik {
2443f58229f8STejun Heo 	struct ata_device *dev;
2444c6fd2807SJeff Garzik 
2445b1c72916STejun Heo 	if (clear_classes)
24461eca4365STejun Heo 		ata_for_each_dev(dev, link, ALL)
2447f58229f8STejun Heo 			classes[dev->devno] = ATA_DEV_UNKNOWN;
2448c6fd2807SJeff Garzik 
2449f046519fSTejun Heo 	return reset(link, classes, deadline);
2450c6fd2807SJeff Garzik }
2451c6fd2807SJeff Garzik 
2452ae791c05STejun Heo static int ata_eh_followup_srst_needed(struct ata_link *link,
24535dbfc9cbSTejun Heo 				       int rc, const unsigned int *classes)
2454c6fd2807SJeff Garzik {
245545db2f6cSTejun Heo 	if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
2456ae791c05STejun Heo 		return 0;
24575dbfc9cbSTejun Heo 	if (rc == -EAGAIN)
2458c6fd2807SJeff Garzik 		return 1;
2459071f44b1STejun Heo 	if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
24603495de73STejun Heo 		return 1;
2461c6fd2807SJeff Garzik 	return 0;
2462c6fd2807SJeff Garzik }
2463c6fd2807SJeff Garzik 
2464fb7fd614STejun Heo int ata_eh_reset(struct ata_link *link, int classify,
2465c6fd2807SJeff Garzik 		 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
2466c6fd2807SJeff Garzik 		 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
2467c6fd2807SJeff Garzik {
2468afaa5c37STejun Heo 	struct ata_port *ap = link->ap;
2469b1c72916STejun Heo 	struct ata_link *slave = ap->slave_link;
2470936fd732STejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
2471705d2014SBartlomiej Zolnierkiewicz 	struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL;
2472c6fd2807SJeff Garzik 	unsigned int *classes = ehc->classes;
2473416dc9edSTejun Heo 	unsigned int lflags = link->flags;
2474c6fd2807SJeff Garzik 	int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
2475d8af0eb6STejun Heo 	int max_tries = 0, try = 0;
2476b1c72916STejun Heo 	struct ata_link *failed_link;
2477f58229f8STejun Heo 	struct ata_device *dev;
2478416dc9edSTejun Heo 	unsigned long deadline, now;
2479c6fd2807SJeff Garzik 	ata_reset_fn_t reset;
2480afaa5c37STejun Heo 	unsigned long flags;
2481416dc9edSTejun Heo 	u32 sstatus;
2482b1c72916STejun Heo 	int nr_unknown, rc;
2483c6fd2807SJeff Garzik 
2484932648b0STejun Heo 	/*
2485932648b0STejun Heo 	 * Prepare to reset
2486932648b0STejun Heo 	 */
2487d8af0eb6STejun Heo 	while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX)
2488d8af0eb6STejun Heo 		max_tries++;
248905944bdfSTejun Heo 	if (link->flags & ATA_LFLAG_NO_HRST)
249005944bdfSTejun Heo 		hardreset = NULL;
249105944bdfSTejun Heo 	if (link->flags & ATA_LFLAG_NO_SRST)
249205944bdfSTejun Heo 		softreset = NULL;
2493d8af0eb6STejun Heo 
249419b72321STejun Heo 	/* make sure each reset attemp is at least COOL_DOWN apart */
249519b72321STejun Heo 	if (ehc->i.flags & ATA_EHI_DID_RESET) {
24960a2c0f56STejun Heo 		now = jiffies;
249719b72321STejun Heo 		WARN_ON(time_after(ehc->last_reset, now));
249819b72321STejun Heo 		deadline = ata_deadline(ehc->last_reset,
249919b72321STejun Heo 					ATA_EH_RESET_COOL_DOWN);
25000a2c0f56STejun Heo 		if (time_before(now, deadline))
25010a2c0f56STejun Heo 			schedule_timeout_uninterruptible(deadline - now);
250219b72321STejun Heo 	}
25030a2c0f56STejun Heo 
2504afaa5c37STejun Heo 	spin_lock_irqsave(ap->lock, flags);
2505afaa5c37STejun Heo 	ap->pflags |= ATA_PFLAG_RESETTING;
2506afaa5c37STejun Heo 	spin_unlock_irqrestore(ap->lock, flags);
2507afaa5c37STejun Heo 
2508cf480626STejun Heo 	ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2509c6fd2807SJeff Garzik 
25101eca4365STejun Heo 	ata_for_each_dev(dev, link, ALL) {
2511cdeab114STejun Heo 		/* If we issue an SRST then an ATA drive (not ATAPI)
2512cdeab114STejun Heo 		 * may change configuration and be in PIO0 timing. If
2513cdeab114STejun Heo 		 * we do a hard reset (or are coming from power on)
2514cdeab114STejun Heo 		 * this is true for ATA or ATAPI. Until we've set a
2515cdeab114STejun Heo 		 * suitable controller mode we should not touch the
2516cdeab114STejun Heo 		 * bus as we may be talking too fast.
2517cdeab114STejun Heo 		 */
2518cdeab114STejun Heo 		dev->pio_mode = XFER_PIO_0;
2519cdeab114STejun Heo 
2520cdeab114STejun Heo 		/* If the controller has a pio mode setup function
2521cdeab114STejun Heo 		 * then use it to set the chipset to rights. Don't
2522cdeab114STejun Heo 		 * touch the DMA setup as that will be dealt with when
2523cdeab114STejun Heo 		 * configuring devices.
2524cdeab114STejun Heo 		 */
2525cdeab114STejun Heo 		if (ap->ops->set_piomode)
2526cdeab114STejun Heo 			ap->ops->set_piomode(ap, dev);
2527cdeab114STejun Heo 	}
2528cdeab114STejun Heo 
2529cf480626STejun Heo 	/* prefer hardreset */
2530932648b0STejun Heo 	reset = NULL;
2531cf480626STejun Heo 	ehc->i.action &= ~ATA_EH_RESET;
2532cf480626STejun Heo 	if (hardreset) {
2533cf480626STejun Heo 		reset = hardreset;
2534a674050eSTejun Heo 		ehc->i.action |= ATA_EH_HARDRESET;
25354f7faa3fSTejun Heo 	} else if (softreset) {
2536cf480626STejun Heo 		reset = softreset;
2537a674050eSTejun Heo 		ehc->i.action |= ATA_EH_SOFTRESET;
2538cf480626STejun Heo 	}
2539c6fd2807SJeff Garzik 
2540c6fd2807SJeff Garzik 	if (prereset) {
2541b1c72916STejun Heo 		unsigned long deadline = ata_deadline(jiffies,
2542b1c72916STejun Heo 						      ATA_EH_PRERESET_TIMEOUT);
2543b1c72916STejun Heo 
2544b1c72916STejun Heo 		if (slave) {
2545b1c72916STejun Heo 			sehc->i.action &= ~ATA_EH_RESET;
2546b1c72916STejun Heo 			sehc->i.action |= ehc->i.action;
2547b1c72916STejun Heo 		}
2548b1c72916STejun Heo 
2549b1c72916STejun Heo 		rc = prereset(link, deadline);
2550b1c72916STejun Heo 
2551b1c72916STejun Heo 		/* If present, do prereset on slave link too.  Reset
2552b1c72916STejun Heo 		 * is skipped iff both master and slave links report
2553b1c72916STejun Heo 		 * -ENOENT or clear ATA_EH_RESET.
2554b1c72916STejun Heo 		 */
2555b1c72916STejun Heo 		if (slave && (rc == 0 || rc == -ENOENT)) {
2556b1c72916STejun Heo 			int tmp;
2557b1c72916STejun Heo 
2558b1c72916STejun Heo 			tmp = prereset(slave, deadline);
2559b1c72916STejun Heo 			if (tmp != -ENOENT)
2560b1c72916STejun Heo 				rc = tmp;
2561b1c72916STejun Heo 
2562b1c72916STejun Heo 			ehc->i.action |= sehc->i.action;
2563b1c72916STejun Heo 		}
2564b1c72916STejun Heo 
2565c6fd2807SJeff Garzik 		if (rc) {
2566c961922bSAlan Cox 			if (rc == -ENOENT) {
2567cc0680a5STejun Heo 				ata_link_printk(link, KERN_DEBUG,
25684aa9ab67STejun Heo 						"port disabled. ignoring.\n");
2569cf480626STejun Heo 				ehc->i.action &= ~ATA_EH_RESET;
25704aa9ab67STejun Heo 
25711eca4365STejun Heo 				ata_for_each_dev(dev, link, ALL)
2572f58229f8STejun Heo 					classes[dev->devno] = ATA_DEV_NONE;
25734aa9ab67STejun Heo 
25744aa9ab67STejun Heo 				rc = 0;
2575c961922bSAlan Cox 			} else
2576cc0680a5STejun Heo 				ata_link_printk(link, KERN_ERR,
2577c6fd2807SJeff Garzik 					"prereset failed (errno=%d)\n", rc);
2578fccb6ea5STejun Heo 			goto out;
2579c6fd2807SJeff Garzik 		}
2580c6fd2807SJeff Garzik 
2581932648b0STejun Heo 		/* prereset() might have cleared ATA_EH_RESET.  If so,
2582d6515e6fSTejun Heo 		 * bang classes, thaw and return.
2583932648b0STejun Heo 		 */
2584932648b0STejun Heo 		if (reset && !(ehc->i.action & ATA_EH_RESET)) {
25851eca4365STejun Heo 			ata_for_each_dev(dev, link, ALL)
2586f58229f8STejun Heo 				classes[dev->devno] = ATA_DEV_NONE;
2587d6515e6fSTejun Heo 			if ((ap->pflags & ATA_PFLAG_FROZEN) &&
2588d6515e6fSTejun Heo 			    ata_is_host_link(link))
2589d6515e6fSTejun Heo 				ata_eh_thaw_port(ap);
2590fccb6ea5STejun Heo 			rc = 0;
2591fccb6ea5STejun Heo 			goto out;
2592c6fd2807SJeff Garzik 		}
2593932648b0STejun Heo 	}
2594c6fd2807SJeff Garzik 
2595c6fd2807SJeff Garzik  retry:
2596932648b0STejun Heo 	/*
2597932648b0STejun Heo 	 * Perform reset
2598932648b0STejun Heo 	 */
2599dc98c32cSTejun Heo 	if (ata_is_host_link(link))
2600dc98c32cSTejun Heo 		ata_eh_freeze_port(ap);
2601dc98c32cSTejun Heo 
2602341c2c95STejun Heo 	deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]);
260331daabdaSTejun Heo 
2604932648b0STejun Heo 	if (reset) {
2605c6fd2807SJeff Garzik 		if (verbose)
2606cc0680a5STejun Heo 			ata_link_printk(link, KERN_INFO, "%s resetting link\n",
2607c6fd2807SJeff Garzik 					reset == softreset ? "soft" : "hard");
2608c6fd2807SJeff Garzik 
2609c6fd2807SJeff Garzik 		/* mark that this EH session started with reset */
261019b72321STejun Heo 		ehc->last_reset = jiffies;
26110d64a233STejun Heo 		if (reset == hardreset)
26120d64a233STejun Heo 			ehc->i.flags |= ATA_EHI_DID_HARDRESET;
26130d64a233STejun Heo 		else
26140d64a233STejun Heo 			ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
2615c6fd2807SJeff Garzik 
2616b1c72916STejun Heo 		rc = ata_do_reset(link, reset, classes, deadline, true);
2617b1c72916STejun Heo 		if (rc && rc != -EAGAIN) {
2618b1c72916STejun Heo 			failed_link = link;
26195dbfc9cbSTejun Heo 			goto fail;
2620b1c72916STejun Heo 		}
2621c6fd2807SJeff Garzik 
2622b1c72916STejun Heo 		/* hardreset slave link if existent */
2623b1c72916STejun Heo 		if (slave && reset == hardreset) {
2624b1c72916STejun Heo 			int tmp;
2625b1c72916STejun Heo 
2626b1c72916STejun Heo 			if (verbose)
2627b1c72916STejun Heo 				ata_link_printk(slave, KERN_INFO,
2628b1c72916STejun Heo 						"hard resetting link\n");
2629b1c72916STejun Heo 
2630b1c72916STejun Heo 			ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
2631b1c72916STejun Heo 			tmp = ata_do_reset(slave, reset, classes, deadline,
2632b1c72916STejun Heo 					   false);
2633b1c72916STejun Heo 			switch (tmp) {
2634b1c72916STejun Heo 			case -EAGAIN:
2635b1c72916STejun Heo 				rc = -EAGAIN;
2636b1c72916STejun Heo 			case 0:
2637b1c72916STejun Heo 				break;
2638b1c72916STejun Heo 			default:
2639b1c72916STejun Heo 				failed_link = slave;
2640b1c72916STejun Heo 				rc = tmp;
2641b1c72916STejun Heo 				goto fail;
2642b1c72916STejun Heo 			}
2643b1c72916STejun Heo 		}
2644b1c72916STejun Heo 
2645b1c72916STejun Heo 		/* perform follow-up SRST if necessary */
2646c6fd2807SJeff Garzik 		if (reset == hardreset &&
26475dbfc9cbSTejun Heo 		    ata_eh_followup_srst_needed(link, rc, classes)) {
2648c6fd2807SJeff Garzik 			reset = softreset;
2649c6fd2807SJeff Garzik 
2650c6fd2807SJeff Garzik 			if (!reset) {
2651cc0680a5STejun Heo 				ata_link_printk(link, KERN_ERR,
2652c6fd2807SJeff Garzik 						"follow-up softreset required "
2653c6fd2807SJeff Garzik 						"but no softreset avaliable\n");
2654b1c72916STejun Heo 				failed_link = link;
2655fccb6ea5STejun Heo 				rc = -EINVAL;
265608cf69d0STejun Heo 				goto fail;
2657c6fd2807SJeff Garzik 			}
2658c6fd2807SJeff Garzik 
2659cf480626STejun Heo 			ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2660b1c72916STejun Heo 			rc = ata_do_reset(link, reset, classes, deadline, true);
2661fe2c4d01STejun Heo 			if (rc) {
2662fe2c4d01STejun Heo 				failed_link = link;
2663fe2c4d01STejun Heo 				goto fail;
2664fe2c4d01STejun Heo 			}
2665c6fd2807SJeff Garzik 		}
2666932648b0STejun Heo 	} else {
2667932648b0STejun Heo 		if (verbose)
2668932648b0STejun Heo 			ata_link_printk(link, KERN_INFO, "no reset method "
2669932648b0STejun Heo 					"available, skipping reset\n");
2670932648b0STejun Heo 		if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
2671932648b0STejun Heo 			lflags |= ATA_LFLAG_ASSUME_ATA;
2672932648b0STejun Heo 	}
2673008a7896STejun Heo 
2674932648b0STejun Heo 	/*
2675932648b0STejun Heo 	 * Post-reset processing
2676932648b0STejun Heo 	 */
26771eca4365STejun Heo 	ata_for_each_dev(dev, link, ALL) {
2678416dc9edSTejun Heo 		/* After the reset, the device state is PIO 0 and the
2679416dc9edSTejun Heo 		 * controller state is undefined.  Reset also wakes up
2680416dc9edSTejun Heo 		 * drives from sleeping mode.
2681c6fd2807SJeff Garzik 		 */
2682f58229f8STejun Heo 		dev->pio_mode = XFER_PIO_0;
2683054a5fbaSTejun Heo 		dev->flags &= ~ATA_DFLAG_SLEEPING;
2684c6fd2807SJeff Garzik 
26853b761d3dSTejun Heo 		if (ata_phys_link_offline(ata_dev_phys_link(dev)))
26863b761d3dSTejun Heo 			continue;
26873b761d3dSTejun Heo 
26884ccd3329STejun Heo 		/* apply class override */
2689416dc9edSTejun Heo 		if (lflags & ATA_LFLAG_ASSUME_ATA)
2690ae791c05STejun Heo 			classes[dev->devno] = ATA_DEV_ATA;
2691416dc9edSTejun Heo 		else if (lflags & ATA_LFLAG_ASSUME_SEMB)
2692816ab897STejun Heo 			classes[dev->devno] = ATA_DEV_SEMB_UNSUP;
2693ae791c05STejun Heo 	}
2694ae791c05STejun Heo 
2695008a7896STejun Heo 	/* record current link speed */
2696936fd732STejun Heo 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
2697936fd732STejun Heo 		link->sata_spd = (sstatus >> 4) & 0xf;
2698b1c72916STejun Heo 	if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0)
2699b1c72916STejun Heo 		slave->sata_spd = (sstatus >> 4) & 0xf;
2700008a7896STejun Heo 
2701dc98c32cSTejun Heo 	/* thaw the port */
2702dc98c32cSTejun Heo 	if (ata_is_host_link(link))
2703dc98c32cSTejun Heo 		ata_eh_thaw_port(ap);
2704dc98c32cSTejun Heo 
2705f046519fSTejun Heo 	/* postreset() should clear hardware SError.  Although SError
2706f046519fSTejun Heo 	 * is cleared during link resume, clearing SError here is
2707f046519fSTejun Heo 	 * necessary as some PHYs raise hotplug events after SRST.
2708f046519fSTejun Heo 	 * This introduces race condition where hotplug occurs between
2709f046519fSTejun Heo 	 * reset and here.  This race is mediated by cross checking
2710f046519fSTejun Heo 	 * link onlineness and classification result later.
2711f046519fSTejun Heo 	 */
2712b1c72916STejun Heo 	if (postreset) {
2713cc0680a5STejun Heo 		postreset(link, classes);
2714b1c72916STejun Heo 		if (slave)
2715b1c72916STejun Heo 			postreset(slave, classes);
2716b1c72916STejun Heo 	}
2717c6fd2807SJeff Garzik 
27181e641060STejun Heo 	/*
27191e641060STejun Heo 	 * Some controllers can't be frozen very well and may set
27201e641060STejun Heo 	 * spuruious error conditions during reset.  Clear accumulated
27211e641060STejun Heo 	 * error information.  As reset is the final recovery action,
27221e641060STejun Heo 	 * nothing is lost by doing this.
27231e641060STejun Heo 	 */
2724f046519fSTejun Heo 	spin_lock_irqsave(link->ap->lock, flags);
27251e641060STejun Heo 	memset(&link->eh_info, 0, sizeof(link->eh_info));
2726b1c72916STejun Heo 	if (slave)
27271e641060STejun Heo 		memset(&slave->eh_info, 0, sizeof(link->eh_info));
27281e641060STejun Heo 	ap->pflags &= ~ATA_PFLAG_EH_PENDING;
2729f046519fSTejun Heo 	spin_unlock_irqrestore(link->ap->lock, flags);
2730f046519fSTejun Heo 
27313b761d3dSTejun Heo 	/*
27323b761d3dSTejun Heo 	 * Make sure onlineness and classification result correspond.
2733f046519fSTejun Heo 	 * Hotplug could have happened during reset and some
2734f046519fSTejun Heo 	 * controllers fail to wait while a drive is spinning up after
2735f046519fSTejun Heo 	 * being hotplugged causing misdetection.  By cross checking
27363b761d3dSTejun Heo 	 * link on/offlineness and classification result, those
27373b761d3dSTejun Heo 	 * conditions can be reliably detected and retried.
2738f046519fSTejun Heo 	 */
2739b1c72916STejun Heo 	nr_unknown = 0;
27401eca4365STejun Heo 	ata_for_each_dev(dev, link, ALL) {
27413b761d3dSTejun Heo 		if (ata_phys_link_online(ata_dev_phys_link(dev))) {
2742b1c72916STejun Heo 			if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
27433b761d3dSTejun Heo 				ata_dev_printk(dev, KERN_DEBUG, "link online "
27443b761d3dSTejun Heo 					       "but device misclassifed\n");
2745f046519fSTejun Heo 				classes[dev->devno] = ATA_DEV_NONE;
2746b1c72916STejun Heo 				nr_unknown++;
2747b1c72916STejun Heo 			}
27483b761d3dSTejun Heo 		} else if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
27493b761d3dSTejun Heo 			if (ata_class_enabled(classes[dev->devno]))
27503b761d3dSTejun Heo 				ata_dev_printk(dev, KERN_DEBUG, "link offline, "
27513b761d3dSTejun Heo 					       "clearing class %d to NONE\n",
27523b761d3dSTejun Heo 					       classes[dev->devno]);
27533b761d3dSTejun Heo 			classes[dev->devno] = ATA_DEV_NONE;
27543b761d3dSTejun Heo 		} else if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
27553b761d3dSTejun Heo 			ata_dev_printk(dev, KERN_DEBUG, "link status unknown, "
27563b761d3dSTejun Heo 				       "clearing UNKNOWN to NONE\n");
27573b761d3dSTejun Heo 			classes[dev->devno] = ATA_DEV_NONE;
27583b761d3dSTejun Heo 		}
2759f046519fSTejun Heo 	}
2760f046519fSTejun Heo 
2761b1c72916STejun Heo 	if (classify && nr_unknown) {
2762f046519fSTejun Heo 		if (try < max_tries) {
2763f046519fSTejun Heo 			ata_link_printk(link, KERN_WARNING, "link online but "
27643b761d3dSTejun Heo 					"%d devices misclassified, retrying\n",
27653b761d3dSTejun Heo 					nr_unknown);
2766b1c72916STejun Heo 			failed_link = link;
2767f046519fSTejun Heo 			rc = -EAGAIN;
2768f046519fSTejun Heo 			goto fail;
2769f046519fSTejun Heo 		}
2770f046519fSTejun Heo 		ata_link_printk(link, KERN_WARNING,
27713b761d3dSTejun Heo 				"link online but %d devices misclassified, "
27723b761d3dSTejun Heo 				"device detection might fail\n", nr_unknown);
2773f046519fSTejun Heo 	}
2774f046519fSTejun Heo 
2775c6fd2807SJeff Garzik 	/* reset successful, schedule revalidation */
2776cf480626STejun Heo 	ata_eh_done(link, NULL, ATA_EH_RESET);
2777b1c72916STejun Heo 	if (slave)
2778b1c72916STejun Heo 		ata_eh_done(slave, NULL, ATA_EH_RESET);
277919b72321STejun Heo 	ehc->last_reset = jiffies;	/* update to completion time */
2780c6fd2807SJeff Garzik 	ehc->i.action |= ATA_EH_REVALIDATE;
2781416dc9edSTejun Heo 
2782416dc9edSTejun Heo 	rc = 0;
2783fccb6ea5STejun Heo  out:
2784fccb6ea5STejun Heo 	/* clear hotplug flag */
2785fccb6ea5STejun Heo 	ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2786b1c72916STejun Heo 	if (slave)
2787b1c72916STejun Heo 		sehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2788afaa5c37STejun Heo 
2789afaa5c37STejun Heo 	spin_lock_irqsave(ap->lock, flags);
2790afaa5c37STejun Heo 	ap->pflags &= ~ATA_PFLAG_RESETTING;
2791afaa5c37STejun Heo 	spin_unlock_irqrestore(ap->lock, flags);
2792afaa5c37STejun Heo 
2793c6fd2807SJeff Garzik 	return rc;
2794416dc9edSTejun Heo 
2795416dc9edSTejun Heo  fail:
27965958e302STejun Heo 	/* if SCR isn't accessible on a fan-out port, PMP needs to be reset */
27975958e302STejun Heo 	if (!ata_is_host_link(link) &&
27985958e302STejun Heo 	    sata_scr_read(link, SCR_STATUS, &sstatus))
27995958e302STejun Heo 		rc = -ERESTART;
28005958e302STejun Heo 
2801416dc9edSTejun Heo 	if (rc == -ERESTART || try >= max_tries)
2802416dc9edSTejun Heo 		goto out;
2803416dc9edSTejun Heo 
2804416dc9edSTejun Heo 	now = jiffies;
2805416dc9edSTejun Heo 	if (time_before(now, deadline)) {
2806416dc9edSTejun Heo 		unsigned long delta = deadline - now;
2807416dc9edSTejun Heo 
2808b1c72916STejun Heo 		ata_link_printk(failed_link, KERN_WARNING,
28090a2c0f56STejun Heo 			"reset failed (errno=%d), retrying in %u secs\n",
28100a2c0f56STejun Heo 			rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
2811416dc9edSTejun Heo 
2812416dc9edSTejun Heo 		while (delta)
2813416dc9edSTejun Heo 			delta = schedule_timeout_uninterruptible(delta);
2814416dc9edSTejun Heo 	}
2815416dc9edSTejun Heo 
2816b1c72916STejun Heo 	if (try == max_tries - 1) {
2817a07d499bSTejun Heo 		sata_down_spd_limit(link, 0);
2818b1c72916STejun Heo 		if (slave)
2819a07d499bSTejun Heo 			sata_down_spd_limit(slave, 0);
2820b1c72916STejun Heo 	} else if (rc == -EPIPE)
2821a07d499bSTejun Heo 		sata_down_spd_limit(failed_link, 0);
2822b1c72916STejun Heo 
2823416dc9edSTejun Heo 	if (hardreset)
2824416dc9edSTejun Heo 		reset = hardreset;
2825416dc9edSTejun Heo 	goto retry;
2826c6fd2807SJeff Garzik }
2827c6fd2807SJeff Garzik 
282845fabbb7SElias Oltmanns static inline void ata_eh_pull_park_action(struct ata_port *ap)
282945fabbb7SElias Oltmanns {
283045fabbb7SElias Oltmanns 	struct ata_link *link;
283145fabbb7SElias Oltmanns 	struct ata_device *dev;
283245fabbb7SElias Oltmanns 	unsigned long flags;
283345fabbb7SElias Oltmanns 
283445fabbb7SElias Oltmanns 	/*
283545fabbb7SElias Oltmanns 	 * This function can be thought of as an extended version of
283645fabbb7SElias Oltmanns 	 * ata_eh_about_to_do() specially crafted to accommodate the
283745fabbb7SElias Oltmanns 	 * requirements of ATA_EH_PARK handling. Since the EH thread
283845fabbb7SElias Oltmanns 	 * does not leave the do {} while () loop in ata_eh_recover as
283945fabbb7SElias Oltmanns 	 * long as the timeout for a park request to *one* device on
284045fabbb7SElias Oltmanns 	 * the port has not expired, and since we still want to pick
284145fabbb7SElias Oltmanns 	 * up park requests to other devices on the same port or
284245fabbb7SElias Oltmanns 	 * timeout updates for the same device, we have to pull
284345fabbb7SElias Oltmanns 	 * ATA_EH_PARK actions from eh_info into eh_context.i
284445fabbb7SElias Oltmanns 	 * ourselves at the beginning of each pass over the loop.
284545fabbb7SElias Oltmanns 	 *
284645fabbb7SElias Oltmanns 	 * Additionally, all write accesses to &ap->park_req_pending
284745fabbb7SElias Oltmanns 	 * through INIT_COMPLETION() (see below) or complete_all()
284845fabbb7SElias Oltmanns 	 * (see ata_scsi_park_store()) are protected by the host lock.
284945fabbb7SElias Oltmanns 	 * As a result we have that park_req_pending.done is zero on
285045fabbb7SElias Oltmanns 	 * exit from this function, i.e. when ATA_EH_PARK actions for
285145fabbb7SElias Oltmanns 	 * *all* devices on port ap have been pulled into the
285245fabbb7SElias Oltmanns 	 * respective eh_context structs. If, and only if,
285345fabbb7SElias Oltmanns 	 * park_req_pending.done is non-zero by the time we reach
285445fabbb7SElias Oltmanns 	 * wait_for_completion_timeout(), another ATA_EH_PARK action
285545fabbb7SElias Oltmanns 	 * has been scheduled for at least one of the devices on port
285645fabbb7SElias Oltmanns 	 * ap and we have to cycle over the do {} while () loop in
285745fabbb7SElias Oltmanns 	 * ata_eh_recover() again.
285845fabbb7SElias Oltmanns 	 */
285945fabbb7SElias Oltmanns 
286045fabbb7SElias Oltmanns 	spin_lock_irqsave(ap->lock, flags);
286145fabbb7SElias Oltmanns 	INIT_COMPLETION(ap->park_req_pending);
28621eca4365STejun Heo 	ata_for_each_link(link, ap, EDGE) {
28631eca4365STejun Heo 		ata_for_each_dev(dev, link, ALL) {
286445fabbb7SElias Oltmanns 			struct ata_eh_info *ehi = &link->eh_info;
286545fabbb7SElias Oltmanns 
286645fabbb7SElias Oltmanns 			link->eh_context.i.dev_action[dev->devno] |=
286745fabbb7SElias Oltmanns 				ehi->dev_action[dev->devno] & ATA_EH_PARK;
286845fabbb7SElias Oltmanns 			ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
286945fabbb7SElias Oltmanns 		}
287045fabbb7SElias Oltmanns 	}
287145fabbb7SElias Oltmanns 	spin_unlock_irqrestore(ap->lock, flags);
287245fabbb7SElias Oltmanns }
287345fabbb7SElias Oltmanns 
287445fabbb7SElias Oltmanns static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
287545fabbb7SElias Oltmanns {
287645fabbb7SElias Oltmanns 	struct ata_eh_context *ehc = &dev->link->eh_context;
287745fabbb7SElias Oltmanns 	struct ata_taskfile tf;
287845fabbb7SElias Oltmanns 	unsigned int err_mask;
287945fabbb7SElias Oltmanns 
288045fabbb7SElias Oltmanns 	ata_tf_init(dev, &tf);
288145fabbb7SElias Oltmanns 	if (park) {
288245fabbb7SElias Oltmanns 		ehc->unloaded_mask |= 1 << dev->devno;
288345fabbb7SElias Oltmanns 		tf.command = ATA_CMD_IDLEIMMEDIATE;
288445fabbb7SElias Oltmanns 		tf.feature = 0x44;
288545fabbb7SElias Oltmanns 		tf.lbal = 0x4c;
288645fabbb7SElias Oltmanns 		tf.lbam = 0x4e;
288745fabbb7SElias Oltmanns 		tf.lbah = 0x55;
288845fabbb7SElias Oltmanns 	} else {
288945fabbb7SElias Oltmanns 		ehc->unloaded_mask &= ~(1 << dev->devno);
289045fabbb7SElias Oltmanns 		tf.command = ATA_CMD_CHK_POWER;
289145fabbb7SElias Oltmanns 	}
289245fabbb7SElias Oltmanns 
289345fabbb7SElias Oltmanns 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
289445fabbb7SElias Oltmanns 	tf.protocol |= ATA_PROT_NODATA;
289545fabbb7SElias Oltmanns 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
289645fabbb7SElias Oltmanns 	if (park && (err_mask || tf.lbal != 0xc4)) {
289745fabbb7SElias Oltmanns 		ata_dev_printk(dev, KERN_ERR, "head unload failed!\n");
289845fabbb7SElias Oltmanns 		ehc->unloaded_mask &= ~(1 << dev->devno);
289945fabbb7SElias Oltmanns 	}
290045fabbb7SElias Oltmanns }
290145fabbb7SElias Oltmanns 
29020260731fSTejun Heo static int ata_eh_revalidate_and_attach(struct ata_link *link,
2903c6fd2807SJeff Garzik 					struct ata_device **r_failed_dev)
2904c6fd2807SJeff Garzik {
29050260731fSTejun Heo 	struct ata_port *ap = link->ap;
29060260731fSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
2907c6fd2807SJeff Garzik 	struct ata_device *dev;
29088c3c52a8STejun Heo 	unsigned int new_mask = 0;
2909c6fd2807SJeff Garzik 	unsigned long flags;
2910f58229f8STejun Heo 	int rc = 0;
2911c6fd2807SJeff Garzik 
2912c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
2913c6fd2807SJeff Garzik 
29148c3c52a8STejun Heo 	/* For PATA drive side cable detection to work, IDENTIFY must
29158c3c52a8STejun Heo 	 * be done backwards such that PDIAG- is released by the slave
29168c3c52a8STejun Heo 	 * device before the master device is identified.
29178c3c52a8STejun Heo 	 */
29181eca4365STejun Heo 	ata_for_each_dev(dev, link, ALL_REVERSE) {
2919f58229f8STejun Heo 		unsigned int action = ata_eh_dev_action(dev);
2920f58229f8STejun Heo 		unsigned int readid_flags = 0;
2921c6fd2807SJeff Garzik 
2922bff04647STejun Heo 		if (ehc->i.flags & ATA_EHI_DID_RESET)
2923bff04647STejun Heo 			readid_flags |= ATA_READID_POSTRESET;
2924bff04647STejun Heo 
29259666f400STejun Heo 		if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
2926633273a3STejun Heo 			WARN_ON(dev->class == ATA_DEV_PMP);
2927633273a3STejun Heo 
2928b1c72916STejun Heo 			if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
2929c6fd2807SJeff Garzik 				rc = -EIO;
29308c3c52a8STejun Heo 				goto err;
2931c6fd2807SJeff Garzik 			}
2932c6fd2807SJeff Garzik 
29330260731fSTejun Heo 			ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE);
2934422c9daaSTejun Heo 			rc = ata_dev_revalidate(dev, ehc->classes[dev->devno],
2935422c9daaSTejun Heo 						readid_flags);
2936c6fd2807SJeff Garzik 			if (rc)
29378c3c52a8STejun Heo 				goto err;
2938c6fd2807SJeff Garzik 
29390260731fSTejun Heo 			ata_eh_done(link, dev, ATA_EH_REVALIDATE);
2940c6fd2807SJeff Garzik 
2941baa1e78aSTejun Heo 			/* Configuration may have changed, reconfigure
2942baa1e78aSTejun Heo 			 * transfer mode.
2943baa1e78aSTejun Heo 			 */
2944baa1e78aSTejun Heo 			ehc->i.flags |= ATA_EHI_SETMODE;
2945baa1e78aSTejun Heo 
2946c6fd2807SJeff Garzik 			/* schedule the scsi_rescan_device() here */
2947c6fd2807SJeff Garzik 			queue_work(ata_aux_wq, &(ap->scsi_rescan_task));
2948c6fd2807SJeff Garzik 		} else if (dev->class == ATA_DEV_UNKNOWN &&
2949c6fd2807SJeff Garzik 			   ehc->tries[dev->devno] &&
2950c6fd2807SJeff Garzik 			   ata_class_enabled(ehc->classes[dev->devno])) {
2951842faa6cSTejun Heo 			/* Temporarily set dev->class, it will be
2952842faa6cSTejun Heo 			 * permanently set once all configurations are
2953842faa6cSTejun Heo 			 * complete.  This is necessary because new
2954842faa6cSTejun Heo 			 * device configuration is done in two
2955842faa6cSTejun Heo 			 * separate loops.
2956842faa6cSTejun Heo 			 */
2957c6fd2807SJeff Garzik 			dev->class = ehc->classes[dev->devno];
2958c6fd2807SJeff Garzik 
2959633273a3STejun Heo 			if (dev->class == ATA_DEV_PMP)
2960633273a3STejun Heo 				rc = sata_pmp_attach(dev);
2961633273a3STejun Heo 			else
2962633273a3STejun Heo 				rc = ata_dev_read_id(dev, &dev->class,
2963633273a3STejun Heo 						     readid_flags, dev->id);
2964842faa6cSTejun Heo 
2965842faa6cSTejun Heo 			/* read_id might have changed class, store and reset */
2966842faa6cSTejun Heo 			ehc->classes[dev->devno] = dev->class;
2967842faa6cSTejun Heo 			dev->class = ATA_DEV_UNKNOWN;
2968842faa6cSTejun Heo 
29698c3c52a8STejun Heo 			switch (rc) {
29708c3c52a8STejun Heo 			case 0:
297199cf610aSTejun Heo 				/* clear error info accumulated during probe */
297299cf610aSTejun Heo 				ata_ering_clear(&dev->ering);
2973f58229f8STejun Heo 				new_mask |= 1 << dev->devno;
29748c3c52a8STejun Heo 				break;
29758c3c52a8STejun Heo 			case -ENOENT:
297655a8e2c8STejun Heo 				/* IDENTIFY was issued to non-existent
297755a8e2c8STejun Heo 				 * device.  No need to reset.  Just
2978842faa6cSTejun Heo 				 * thaw and ignore the device.
297955a8e2c8STejun Heo 				 */
298055a8e2c8STejun Heo 				ata_eh_thaw_port(ap);
2981c6fd2807SJeff Garzik 				break;
29828c3c52a8STejun Heo 			default:
29838c3c52a8STejun Heo 				goto err;
29848c3c52a8STejun Heo 			}
29858c3c52a8STejun Heo 		}
2986c6fd2807SJeff Garzik 	}
2987c6fd2807SJeff Garzik 
2988c1c4e8d5STejun Heo 	/* PDIAG- should have been released, ask cable type if post-reset */
298933267325STejun Heo 	if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) {
299033267325STejun Heo 		if (ap->ops->cable_detect)
2991c1c4e8d5STejun Heo 			ap->cbl = ap->ops->cable_detect(ap);
299233267325STejun Heo 		ata_force_cbl(ap);
299333267325STejun Heo 	}
2994c1c4e8d5STejun Heo 
29958c3c52a8STejun Heo 	/* Configure new devices forward such that user doesn't see
29968c3c52a8STejun Heo 	 * device detection messages backwards.
29978c3c52a8STejun Heo 	 */
29981eca4365STejun Heo 	ata_for_each_dev(dev, link, ALL) {
29994f7c2874STejun Heo 		if (!(new_mask & (1 << dev->devno)))
30008c3c52a8STejun Heo 			continue;
30018c3c52a8STejun Heo 
3002842faa6cSTejun Heo 		dev->class = ehc->classes[dev->devno];
3003842faa6cSTejun Heo 
30044f7c2874STejun Heo 		if (dev->class == ATA_DEV_PMP)
30054f7c2874STejun Heo 			continue;
30064f7c2874STejun Heo 
30078c3c52a8STejun Heo 		ehc->i.flags |= ATA_EHI_PRINTINFO;
30088c3c52a8STejun Heo 		rc = ata_dev_configure(dev);
30098c3c52a8STejun Heo 		ehc->i.flags &= ~ATA_EHI_PRINTINFO;
3010842faa6cSTejun Heo 		if (rc) {
3011842faa6cSTejun Heo 			dev->class = ATA_DEV_UNKNOWN;
30128c3c52a8STejun Heo 			goto err;
3013842faa6cSTejun Heo 		}
30148c3c52a8STejun Heo 
3015c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
3016c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
3017c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
3018baa1e78aSTejun Heo 
301955a8e2c8STejun Heo 		/* new device discovered, configure xfermode */
3020baa1e78aSTejun Heo 		ehc->i.flags |= ATA_EHI_SETMODE;
3021c6fd2807SJeff Garzik 	}
3022c6fd2807SJeff Garzik 
30238c3c52a8STejun Heo 	return 0;
30248c3c52a8STejun Heo 
30258c3c52a8STejun Heo  err:
3026c6fd2807SJeff Garzik 	*r_failed_dev = dev;
30278c3c52a8STejun Heo 	DPRINTK("EXIT rc=%d\n", rc);
3028c6fd2807SJeff Garzik 	return rc;
3029c6fd2807SJeff Garzik }
3030c6fd2807SJeff Garzik 
30316f1d1e3aSTejun Heo /**
30326f1d1e3aSTejun Heo  *	ata_set_mode - Program timings and issue SET FEATURES - XFER
30336f1d1e3aSTejun Heo  *	@link: link on which timings will be programmed
303498a1708dSMartin Olsson  *	@r_failed_dev: out parameter for failed device
30356f1d1e3aSTejun Heo  *
30366f1d1e3aSTejun Heo  *	Set ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
30376f1d1e3aSTejun Heo  *	ata_set_mode() fails, pointer to the failing device is
30386f1d1e3aSTejun Heo  *	returned in @r_failed_dev.
30396f1d1e3aSTejun Heo  *
30406f1d1e3aSTejun Heo  *	LOCKING:
30416f1d1e3aSTejun Heo  *	PCI/etc. bus probe sem.
30426f1d1e3aSTejun Heo  *
30436f1d1e3aSTejun Heo  *	RETURNS:
30446f1d1e3aSTejun Heo  *	0 on success, negative errno otherwise
30456f1d1e3aSTejun Heo  */
30466f1d1e3aSTejun Heo int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
30476f1d1e3aSTejun Heo {
30486f1d1e3aSTejun Heo 	struct ata_port *ap = link->ap;
304900115e0fSTejun Heo 	struct ata_device *dev;
305000115e0fSTejun Heo 	int rc;
30516f1d1e3aSTejun Heo 
305276326ac1STejun Heo 	/* if data transfer is verified, clear DUBIOUS_XFER on ering top */
30531eca4365STejun Heo 	ata_for_each_dev(dev, link, ENABLED) {
305476326ac1STejun Heo 		if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) {
305576326ac1STejun Heo 			struct ata_ering_entry *ent;
305676326ac1STejun Heo 
305776326ac1STejun Heo 			ent = ata_ering_top(&dev->ering);
305876326ac1STejun Heo 			if (ent)
305976326ac1STejun Heo 				ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER;
306076326ac1STejun Heo 		}
306176326ac1STejun Heo 	}
306276326ac1STejun Heo 
30636f1d1e3aSTejun Heo 	/* has private set_mode? */
30646f1d1e3aSTejun Heo 	if (ap->ops->set_mode)
306500115e0fSTejun Heo 		rc = ap->ops->set_mode(link, r_failed_dev);
306600115e0fSTejun Heo 	else
306700115e0fSTejun Heo 		rc = ata_do_set_mode(link, r_failed_dev);
306800115e0fSTejun Heo 
306900115e0fSTejun Heo 	/* if transfer mode has changed, set DUBIOUS_XFER on device */
30701eca4365STejun Heo 	ata_for_each_dev(dev, link, ENABLED) {
307100115e0fSTejun Heo 		struct ata_eh_context *ehc = &link->eh_context;
307200115e0fSTejun Heo 		u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
307300115e0fSTejun Heo 		u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
307400115e0fSTejun Heo 
307500115e0fSTejun Heo 		if (dev->xfer_mode != saved_xfer_mode ||
307600115e0fSTejun Heo 		    ata_ncq_enabled(dev) != saved_ncq)
307700115e0fSTejun Heo 			dev->flags |= ATA_DFLAG_DUBIOUS_XFER;
307800115e0fSTejun Heo 	}
307900115e0fSTejun Heo 
308000115e0fSTejun Heo 	return rc;
30816f1d1e3aSTejun Heo }
30826f1d1e3aSTejun Heo 
308311fc33daSTejun Heo /**
308411fc33daSTejun Heo  *	atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
308511fc33daSTejun Heo  *	@dev: ATAPI device to clear UA for
308611fc33daSTejun Heo  *
308711fc33daSTejun Heo  *	Resets and other operations can make an ATAPI device raise
308811fc33daSTejun Heo  *	UNIT ATTENTION which causes the next operation to fail.  This
308911fc33daSTejun Heo  *	function clears UA.
309011fc33daSTejun Heo  *
309111fc33daSTejun Heo  *	LOCKING:
309211fc33daSTejun Heo  *	EH context (may sleep).
309311fc33daSTejun Heo  *
309411fc33daSTejun Heo  *	RETURNS:
309511fc33daSTejun Heo  *	0 on success, -errno on failure.
309611fc33daSTejun Heo  */
309711fc33daSTejun Heo static int atapi_eh_clear_ua(struct ata_device *dev)
309811fc33daSTejun Heo {
309911fc33daSTejun Heo 	int i;
310011fc33daSTejun Heo 
310111fc33daSTejun Heo 	for (i = 0; i < ATA_EH_UA_TRIES; i++) {
3102b5357081STejun Heo 		u8 *sense_buffer = dev->link->ap->sector_buf;
310311fc33daSTejun Heo 		u8 sense_key = 0;
310411fc33daSTejun Heo 		unsigned int err_mask;
310511fc33daSTejun Heo 
310611fc33daSTejun Heo 		err_mask = atapi_eh_tur(dev, &sense_key);
310711fc33daSTejun Heo 		if (err_mask != 0 && err_mask != AC_ERR_DEV) {
310811fc33daSTejun Heo 			ata_dev_printk(dev, KERN_WARNING, "TEST_UNIT_READY "
310911fc33daSTejun Heo 				"failed (err_mask=0x%x)\n", err_mask);
311011fc33daSTejun Heo 			return -EIO;
311111fc33daSTejun Heo 		}
311211fc33daSTejun Heo 
311311fc33daSTejun Heo 		if (!err_mask || sense_key != UNIT_ATTENTION)
311411fc33daSTejun Heo 			return 0;
311511fc33daSTejun Heo 
311611fc33daSTejun Heo 		err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key);
311711fc33daSTejun Heo 		if (err_mask) {
311811fc33daSTejun Heo 			ata_dev_printk(dev, KERN_WARNING, "failed to clear "
311911fc33daSTejun Heo 				"UNIT ATTENTION (err_mask=0x%x)\n", err_mask);
312011fc33daSTejun Heo 			return -EIO;
312111fc33daSTejun Heo 		}
312211fc33daSTejun Heo 	}
312311fc33daSTejun Heo 
312411fc33daSTejun Heo 	ata_dev_printk(dev, KERN_WARNING,
312511fc33daSTejun Heo 		"UNIT ATTENTION persists after %d tries\n", ATA_EH_UA_TRIES);
312611fc33daSTejun Heo 
312711fc33daSTejun Heo 	return 0;
312811fc33daSTejun Heo }
312911fc33daSTejun Heo 
31306013efd8STejun Heo /**
31316013efd8STejun Heo  *	ata_eh_maybe_retry_flush - Retry FLUSH if necessary
31326013efd8STejun Heo  *	@dev: ATA device which may need FLUSH retry
31336013efd8STejun Heo  *
31346013efd8STejun Heo  *	If @dev failed FLUSH, it needs to be reported upper layer
31356013efd8STejun Heo  *	immediately as it means that @dev failed to remap and already
31366013efd8STejun Heo  *	lost at least a sector and further FLUSH retrials won't make
31376013efd8STejun Heo  *	any difference to the lost sector.  However, if FLUSH failed
31386013efd8STejun Heo  *	for other reasons, for example transmission error, FLUSH needs
31396013efd8STejun Heo  *	to be retried.
31406013efd8STejun Heo  *
31416013efd8STejun Heo  *	This function determines whether FLUSH failure retry is
31426013efd8STejun Heo  *	necessary and performs it if so.
31436013efd8STejun Heo  *
31446013efd8STejun Heo  *	RETURNS:
31456013efd8STejun Heo  *	0 if EH can continue, -errno if EH needs to be repeated.
31466013efd8STejun Heo  */
31476013efd8STejun Heo static int ata_eh_maybe_retry_flush(struct ata_device *dev)
31486013efd8STejun Heo {
31496013efd8STejun Heo 	struct ata_link *link = dev->link;
31506013efd8STejun Heo 	struct ata_port *ap = link->ap;
31516013efd8STejun Heo 	struct ata_queued_cmd *qc;
31526013efd8STejun Heo 	struct ata_taskfile tf;
31536013efd8STejun Heo 	unsigned int err_mask;
31546013efd8STejun Heo 	int rc = 0;
31556013efd8STejun Heo 
31566013efd8STejun Heo 	/* did flush fail for this device? */
31576013efd8STejun Heo 	if (!ata_tag_valid(link->active_tag))
31586013efd8STejun Heo 		return 0;
31596013efd8STejun Heo 
31606013efd8STejun Heo 	qc = __ata_qc_from_tag(ap, link->active_tag);
31616013efd8STejun Heo 	if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT &&
31626013efd8STejun Heo 			       qc->tf.command != ATA_CMD_FLUSH))
31636013efd8STejun Heo 		return 0;
31646013efd8STejun Heo 
31656013efd8STejun Heo 	/* if the device failed it, it should be reported to upper layers */
31666013efd8STejun Heo 	if (qc->err_mask & AC_ERR_DEV)
31676013efd8STejun Heo 		return 0;
31686013efd8STejun Heo 
31696013efd8STejun Heo 	/* flush failed for some other reason, give it another shot */
31706013efd8STejun Heo 	ata_tf_init(dev, &tf);
31716013efd8STejun Heo 
31726013efd8STejun Heo 	tf.command = qc->tf.command;
31736013efd8STejun Heo 	tf.flags |= ATA_TFLAG_DEVICE;
31746013efd8STejun Heo 	tf.protocol = ATA_PROT_NODATA;
31756013efd8STejun Heo 
31766013efd8STejun Heo 	ata_dev_printk(dev, KERN_WARNING, "retrying FLUSH 0x%x Emask 0x%x\n",
31776013efd8STejun Heo 		       tf.command, qc->err_mask);
31786013efd8STejun Heo 
31796013efd8STejun Heo 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
31806013efd8STejun Heo 	if (!err_mask) {
31816013efd8STejun Heo 		/*
31826013efd8STejun Heo 		 * FLUSH is complete but there's no way to
31836013efd8STejun Heo 		 * successfully complete a failed command from EH.
31846013efd8STejun Heo 		 * Making sure retry is allowed at least once and
31856013efd8STejun Heo 		 * retrying it should do the trick - whatever was in
31866013efd8STejun Heo 		 * the cache is already on the platter and this won't
31876013efd8STejun Heo 		 * cause infinite loop.
31886013efd8STejun Heo 		 */
31896013efd8STejun Heo 		qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1);
31906013efd8STejun Heo 	} else {
31916013efd8STejun Heo 		ata_dev_printk(dev, KERN_WARNING, "FLUSH failed Emask 0x%x\n",
31926013efd8STejun Heo 			       err_mask);
31936013efd8STejun Heo 		rc = -EIO;
31946013efd8STejun Heo 
31956013efd8STejun Heo 		/* if device failed it, report it to upper layers */
31966013efd8STejun Heo 		if (err_mask & AC_ERR_DEV) {
31976013efd8STejun Heo 			qc->err_mask |= AC_ERR_DEV;
31986013efd8STejun Heo 			qc->result_tf = tf;
31996013efd8STejun Heo 			if (!(ap->pflags & ATA_PFLAG_FROZEN))
32006013efd8STejun Heo 				rc = 0;
32016013efd8STejun Heo 		}
32026013efd8STejun Heo 	}
32036013efd8STejun Heo 	return rc;
32046013efd8STejun Heo }
32056013efd8STejun Heo 
32060260731fSTejun Heo static int ata_link_nr_enabled(struct ata_link *link)
3207c6fd2807SJeff Garzik {
3208f58229f8STejun Heo 	struct ata_device *dev;
3209f58229f8STejun Heo 	int cnt = 0;
3210c6fd2807SJeff Garzik 
32111eca4365STejun Heo 	ata_for_each_dev(dev, link, ENABLED)
3212c6fd2807SJeff Garzik 		cnt++;
3213c6fd2807SJeff Garzik 	return cnt;
3214c6fd2807SJeff Garzik }
3215c6fd2807SJeff Garzik 
32160260731fSTejun Heo static int ata_link_nr_vacant(struct ata_link *link)
3217c6fd2807SJeff Garzik {
3218f58229f8STejun Heo 	struct ata_device *dev;
3219f58229f8STejun Heo 	int cnt = 0;
3220c6fd2807SJeff Garzik 
32211eca4365STejun Heo 	ata_for_each_dev(dev, link, ALL)
3222f58229f8STejun Heo 		if (dev->class == ATA_DEV_UNKNOWN)
3223c6fd2807SJeff Garzik 			cnt++;
3224c6fd2807SJeff Garzik 	return cnt;
3225c6fd2807SJeff Garzik }
3226c6fd2807SJeff Garzik 
32270260731fSTejun Heo static int ata_eh_skip_recovery(struct ata_link *link)
3228c6fd2807SJeff Garzik {
3229672b2d65STejun Heo 	struct ata_port *ap = link->ap;
32300260731fSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
3231f58229f8STejun Heo 	struct ata_device *dev;
3232c6fd2807SJeff Garzik 
3233f9df58cbSTejun Heo 	/* skip disabled links */
3234f9df58cbSTejun Heo 	if (link->flags & ATA_LFLAG_DISABLED)
3235f9df58cbSTejun Heo 		return 1;
3236f9df58cbSTejun Heo 
3237672b2d65STejun Heo 	/* thaw frozen port and recover failed devices */
3238672b2d65STejun Heo 	if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
3239672b2d65STejun Heo 		return 0;
3240672b2d65STejun Heo 
3241672b2d65STejun Heo 	/* reset at least once if reset is requested */
3242672b2d65STejun Heo 	if ((ehc->i.action & ATA_EH_RESET) &&
3243672b2d65STejun Heo 	    !(ehc->i.flags & ATA_EHI_DID_RESET))
3244c6fd2807SJeff Garzik 		return 0;
3245c6fd2807SJeff Garzik 
3246c6fd2807SJeff Garzik 	/* skip if class codes for all vacant slots are ATA_DEV_NONE */
32471eca4365STejun Heo 	ata_for_each_dev(dev, link, ALL) {
3248c6fd2807SJeff Garzik 		if (dev->class == ATA_DEV_UNKNOWN &&
3249c6fd2807SJeff Garzik 		    ehc->classes[dev->devno] != ATA_DEV_NONE)
3250c6fd2807SJeff Garzik 			return 0;
3251c6fd2807SJeff Garzik 	}
3252c6fd2807SJeff Garzik 
3253c6fd2807SJeff Garzik 	return 1;
3254c6fd2807SJeff Garzik }
3255c6fd2807SJeff Garzik 
3256c2c7a89cSTejun Heo static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg)
3257c2c7a89cSTejun Heo {
3258c2c7a89cSTejun Heo 	u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL);
3259c2c7a89cSTejun Heo 	u64 now = get_jiffies_64();
3260c2c7a89cSTejun Heo 	int *trials = void_arg;
3261c2c7a89cSTejun Heo 
3262c2c7a89cSTejun Heo 	if (ent->timestamp < now - min(now, interval))
3263c2c7a89cSTejun Heo 		return -1;
3264c2c7a89cSTejun Heo 
3265c2c7a89cSTejun Heo 	(*trials)++;
3266c2c7a89cSTejun Heo 	return 0;
3267c2c7a89cSTejun Heo }
3268c2c7a89cSTejun Heo 
326902c05a27STejun Heo static int ata_eh_schedule_probe(struct ata_device *dev)
327002c05a27STejun Heo {
327102c05a27STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
3272c2c7a89cSTejun Heo 	struct ata_link *link = ata_dev_phys_link(dev);
3273c2c7a89cSTejun Heo 	int trials = 0;
327402c05a27STejun Heo 
327502c05a27STejun Heo 	if (!(ehc->i.probe_mask & (1 << dev->devno)) ||
327602c05a27STejun Heo 	    (ehc->did_probe_mask & (1 << dev->devno)))
327702c05a27STejun Heo 		return 0;
327802c05a27STejun Heo 
327902c05a27STejun Heo 	ata_eh_detach_dev(dev);
328002c05a27STejun Heo 	ata_dev_init(dev);
328102c05a27STejun Heo 	ehc->did_probe_mask |= (1 << dev->devno);
3282cf480626STejun Heo 	ehc->i.action |= ATA_EH_RESET;
328300115e0fSTejun Heo 	ehc->saved_xfer_mode[dev->devno] = 0;
328400115e0fSTejun Heo 	ehc->saved_ncq_enabled &= ~(1 << dev->devno);
328502c05a27STejun Heo 
3286c2c7a89cSTejun Heo 	/* Record and count probe trials on the ering.  The specific
3287c2c7a89cSTejun Heo 	 * error mask used is irrelevant.  Because a successful device
3288c2c7a89cSTejun Heo 	 * detection clears the ering, this count accumulates only if
3289c2c7a89cSTejun Heo 	 * there are consecutive failed probes.
3290c2c7a89cSTejun Heo 	 *
3291c2c7a89cSTejun Heo 	 * If the count is equal to or higher than ATA_EH_PROBE_TRIALS
3292c2c7a89cSTejun Heo 	 * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is
3293c2c7a89cSTejun Heo 	 * forced to 1.5Gbps.
3294c2c7a89cSTejun Heo 	 *
3295c2c7a89cSTejun Heo 	 * This is to work around cases where failed link speed
3296c2c7a89cSTejun Heo 	 * negotiation results in device misdetection leading to
3297c2c7a89cSTejun Heo 	 * infinite DEVXCHG or PHRDY CHG events.
3298c2c7a89cSTejun Heo 	 */
3299c2c7a89cSTejun Heo 	ata_ering_record(&dev->ering, 0, AC_ERR_OTHER);
3300c2c7a89cSTejun Heo 	ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials);
3301c2c7a89cSTejun Heo 
3302c2c7a89cSTejun Heo 	if (trials > ATA_EH_PROBE_TRIALS)
3303c2c7a89cSTejun Heo 		sata_down_spd_limit(link, 1);
3304c2c7a89cSTejun Heo 
330502c05a27STejun Heo 	return 1;
330602c05a27STejun Heo }
330702c05a27STejun Heo 
33089b1e2658STejun Heo static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
3309fee7ca72STejun Heo {
33109af5c9c9STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
3311fee7ca72STejun Heo 
3312cf9a590aSTejun Heo 	/* -EAGAIN from EH routine indicates retry without prejudice.
3313cf9a590aSTejun Heo 	 * The requester is responsible for ensuring forward progress.
3314cf9a590aSTejun Heo 	 */
3315cf9a590aSTejun Heo 	if (err != -EAGAIN)
3316fee7ca72STejun Heo 		ehc->tries[dev->devno]--;
3317fee7ca72STejun Heo 
3318fee7ca72STejun Heo 	switch (err) {
3319fee7ca72STejun Heo 	case -ENODEV:
3320fee7ca72STejun Heo 		/* device missing or wrong IDENTIFY data, schedule probing */
3321fee7ca72STejun Heo 		ehc->i.probe_mask |= (1 << dev->devno);
3322fee7ca72STejun Heo 	case -EINVAL:
3323fee7ca72STejun Heo 		/* give it just one more chance */
3324fee7ca72STejun Heo 		ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
3325fee7ca72STejun Heo 	case -EIO:
3326d89293abSTejun Heo 		if (ehc->tries[dev->devno] == 1) {
3327fee7ca72STejun Heo 			/* This is the last chance, better to slow
3328fee7ca72STejun Heo 			 * down than lose it.
3329fee7ca72STejun Heo 			 */
3330a07d499bSTejun Heo 			sata_down_spd_limit(ata_dev_phys_link(dev), 0);
3331d89293abSTejun Heo 			if (dev->pio_mode > XFER_PIO_0)
3332fee7ca72STejun Heo 				ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
3333fee7ca72STejun Heo 		}
3334fee7ca72STejun Heo 	}
3335fee7ca72STejun Heo 
3336fee7ca72STejun Heo 	if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
3337fee7ca72STejun Heo 		/* disable device if it has used up all its chances */
3338fee7ca72STejun Heo 		ata_dev_disable(dev);
3339fee7ca72STejun Heo 
3340fee7ca72STejun Heo 		/* detach if offline */
3341b1c72916STejun Heo 		if (ata_phys_link_offline(ata_dev_phys_link(dev)))
3342fee7ca72STejun Heo 			ata_eh_detach_dev(dev);
3343fee7ca72STejun Heo 
334402c05a27STejun Heo 		/* schedule probe if necessary */
334587fbc5a0STejun Heo 		if (ata_eh_schedule_probe(dev)) {
3346fee7ca72STejun Heo 			ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
334787fbc5a0STejun Heo 			memset(ehc->cmd_timeout_idx[dev->devno], 0,
334887fbc5a0STejun Heo 			       sizeof(ehc->cmd_timeout_idx[dev->devno]));
334987fbc5a0STejun Heo 		}
33509b1e2658STejun Heo 
33519b1e2658STejun Heo 		return 1;
3352fee7ca72STejun Heo 	} else {
3353cf480626STejun Heo 		ehc->i.action |= ATA_EH_RESET;
33549b1e2658STejun Heo 		return 0;
3355fee7ca72STejun Heo 	}
3356fee7ca72STejun Heo }
3357fee7ca72STejun Heo 
3358c6fd2807SJeff Garzik /**
3359c6fd2807SJeff Garzik  *	ata_eh_recover - recover host port after error
3360c6fd2807SJeff Garzik  *	@ap: host port to recover
3361c6fd2807SJeff Garzik  *	@prereset: prereset method (can be NULL)
3362c6fd2807SJeff Garzik  *	@softreset: softreset method (can be NULL)
3363c6fd2807SJeff Garzik  *	@hardreset: hardreset method (can be NULL)
3364c6fd2807SJeff Garzik  *	@postreset: postreset method (can be NULL)
33659b1e2658STejun Heo  *	@r_failed_link: out parameter for failed link
3366c6fd2807SJeff Garzik  *
3367c6fd2807SJeff Garzik  *	This is the alpha and omega, eum and yang, heart and soul of
3368c6fd2807SJeff Garzik  *	libata exception handling.  On entry, actions required to
33699b1e2658STejun Heo  *	recover each link and hotplug requests are recorded in the
33709b1e2658STejun Heo  *	link's eh_context.  This function executes all the operations
33719b1e2658STejun Heo  *	with appropriate retrials and fallbacks to resurrect failed
3372c6fd2807SJeff Garzik  *	devices, detach goners and greet newcomers.
3373c6fd2807SJeff Garzik  *
3374c6fd2807SJeff Garzik  *	LOCKING:
3375c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
3376c6fd2807SJeff Garzik  *
3377c6fd2807SJeff Garzik  *	RETURNS:
3378c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
3379c6fd2807SJeff Garzik  */
3380fb7fd614STejun Heo int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3381c6fd2807SJeff Garzik 		   ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
33829b1e2658STejun Heo 		   ata_postreset_fn_t postreset,
33839b1e2658STejun Heo 		   struct ata_link **r_failed_link)
3384c6fd2807SJeff Garzik {
33859b1e2658STejun Heo 	struct ata_link *link;
3386c6fd2807SJeff Garzik 	struct ata_device *dev;
33870a2c0f56STejun Heo 	int nr_failed_devs;
3388dc98c32cSTejun Heo 	int rc;
338945fabbb7SElias Oltmanns 	unsigned long flags, deadline;
3390c6fd2807SJeff Garzik 
3391c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
3392c6fd2807SJeff Garzik 
3393c6fd2807SJeff Garzik 	/* prep for recovery */
33941eca4365STejun Heo 	ata_for_each_link(link, ap, EDGE) {
33959b1e2658STejun Heo 		struct ata_eh_context *ehc = &link->eh_context;
33969b1e2658STejun Heo 
3397f9df58cbSTejun Heo 		/* re-enable link? */
3398f9df58cbSTejun Heo 		if (ehc->i.action & ATA_EH_ENABLE_LINK) {
3399f9df58cbSTejun Heo 			ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK);
3400f9df58cbSTejun Heo 			spin_lock_irqsave(ap->lock, flags);
3401f9df58cbSTejun Heo 			link->flags &= ~ATA_LFLAG_DISABLED;
3402f9df58cbSTejun Heo 			spin_unlock_irqrestore(ap->lock, flags);
3403f9df58cbSTejun Heo 			ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK);
3404f9df58cbSTejun Heo 		}
3405f9df58cbSTejun Heo 
34061eca4365STejun Heo 		ata_for_each_dev(dev, link, ALL) {
3407fd995f70STejun Heo 			if (link->flags & ATA_LFLAG_NO_RETRY)
3408fd995f70STejun Heo 				ehc->tries[dev->devno] = 1;
3409fd995f70STejun Heo 			else
3410c6fd2807SJeff Garzik 				ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3411c6fd2807SJeff Garzik 
341279a55b72STejun Heo 			/* collect port action mask recorded in dev actions */
34139b1e2658STejun Heo 			ehc->i.action |= ehc->i.dev_action[dev->devno] &
34149b1e2658STejun Heo 					 ~ATA_EH_PERDEV_MASK;
3415f58229f8STejun Heo 			ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK;
341679a55b72STejun Heo 
3417c6fd2807SJeff Garzik 			/* process hotplug request */
3418c6fd2807SJeff Garzik 			if (dev->flags & ATA_DFLAG_DETACH)
3419c6fd2807SJeff Garzik 				ata_eh_detach_dev(dev);
3420c6fd2807SJeff Garzik 
342102c05a27STejun Heo 			/* schedule probe if necessary */
342202c05a27STejun Heo 			if (!ata_dev_enabled(dev))
342302c05a27STejun Heo 				ata_eh_schedule_probe(dev);
3424c6fd2807SJeff Garzik 		}
34259b1e2658STejun Heo 	}
3426c6fd2807SJeff Garzik 
3427c6fd2807SJeff Garzik  retry:
3428c6fd2807SJeff Garzik 	rc = 0;
34299b1e2658STejun Heo 	nr_failed_devs = 0;
3430c6fd2807SJeff Garzik 
3431c6fd2807SJeff Garzik 	/* if UNLOADING, finish immediately */
3432c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_UNLOADING)
3433c6fd2807SJeff Garzik 		goto out;
3434c6fd2807SJeff Garzik 
34359b1e2658STejun Heo 	/* prep for EH */
34361eca4365STejun Heo 	ata_for_each_link(link, ap, EDGE) {
34379b1e2658STejun Heo 		struct ata_eh_context *ehc = &link->eh_context;
34389b1e2658STejun Heo 
3439c6fd2807SJeff Garzik 		/* skip EH if possible. */
34400260731fSTejun Heo 		if (ata_eh_skip_recovery(link))
3441c6fd2807SJeff Garzik 			ehc->i.action = 0;
3442c6fd2807SJeff Garzik 
34431eca4365STejun Heo 		ata_for_each_dev(dev, link, ALL)
3444f58229f8STejun Heo 			ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
34459b1e2658STejun Heo 	}
3446c6fd2807SJeff Garzik 
3447c6fd2807SJeff Garzik 	/* reset */
34481eca4365STejun Heo 	ata_for_each_link(link, ap, EDGE) {
34499b1e2658STejun Heo 		struct ata_eh_context *ehc = &link->eh_context;
34509b1e2658STejun Heo 
3451cf480626STejun Heo 		if (!(ehc->i.action & ATA_EH_RESET))
34529b1e2658STejun Heo 			continue;
34539b1e2658STejun Heo 
34549b1e2658STejun Heo 		rc = ata_eh_reset(link, ata_link_nr_vacant(link),
3455dc98c32cSTejun Heo 				  prereset, softreset, hardreset, postreset);
3456c6fd2807SJeff Garzik 		if (rc) {
34570260731fSTejun Heo 			ata_link_printk(link, KERN_ERR,
3458c6fd2807SJeff Garzik 					"reset failed, giving up\n");
3459c6fd2807SJeff Garzik 			goto out;
3460c6fd2807SJeff Garzik 		}
34619b1e2658STejun Heo 	}
3462c6fd2807SJeff Garzik 
346345fabbb7SElias Oltmanns 	do {
346445fabbb7SElias Oltmanns 		unsigned long now;
346545fabbb7SElias Oltmanns 
346645fabbb7SElias Oltmanns 		/*
346745fabbb7SElias Oltmanns 		 * clears ATA_EH_PARK in eh_info and resets
346845fabbb7SElias Oltmanns 		 * ap->park_req_pending
346945fabbb7SElias Oltmanns 		 */
347045fabbb7SElias Oltmanns 		ata_eh_pull_park_action(ap);
347145fabbb7SElias Oltmanns 
347245fabbb7SElias Oltmanns 		deadline = jiffies;
34731eca4365STejun Heo 		ata_for_each_link(link, ap, EDGE) {
34741eca4365STejun Heo 			ata_for_each_dev(dev, link, ALL) {
347545fabbb7SElias Oltmanns 				struct ata_eh_context *ehc = &link->eh_context;
347645fabbb7SElias Oltmanns 				unsigned long tmp;
347745fabbb7SElias Oltmanns 
347845fabbb7SElias Oltmanns 				if (dev->class != ATA_DEV_ATA)
347945fabbb7SElias Oltmanns 					continue;
348045fabbb7SElias Oltmanns 				if (!(ehc->i.dev_action[dev->devno] &
348145fabbb7SElias Oltmanns 				      ATA_EH_PARK))
348245fabbb7SElias Oltmanns 					continue;
348345fabbb7SElias Oltmanns 				tmp = dev->unpark_deadline;
348445fabbb7SElias Oltmanns 				if (time_before(deadline, tmp))
348545fabbb7SElias Oltmanns 					deadline = tmp;
348645fabbb7SElias Oltmanns 				else if (time_before_eq(tmp, jiffies))
348745fabbb7SElias Oltmanns 					continue;
348845fabbb7SElias Oltmanns 				if (ehc->unloaded_mask & (1 << dev->devno))
348945fabbb7SElias Oltmanns 					continue;
349045fabbb7SElias Oltmanns 
349145fabbb7SElias Oltmanns 				ata_eh_park_issue_cmd(dev, 1);
349245fabbb7SElias Oltmanns 			}
349345fabbb7SElias Oltmanns 		}
349445fabbb7SElias Oltmanns 
349545fabbb7SElias Oltmanns 		now = jiffies;
349645fabbb7SElias Oltmanns 		if (time_before_eq(deadline, now))
349745fabbb7SElias Oltmanns 			break;
349845fabbb7SElias Oltmanns 
349945fabbb7SElias Oltmanns 		deadline = wait_for_completion_timeout(&ap->park_req_pending,
350045fabbb7SElias Oltmanns 						       deadline - now);
350145fabbb7SElias Oltmanns 	} while (deadline);
35021eca4365STejun Heo 	ata_for_each_link(link, ap, EDGE) {
35031eca4365STejun Heo 		ata_for_each_dev(dev, link, ALL) {
350445fabbb7SElias Oltmanns 			if (!(link->eh_context.unloaded_mask &
350545fabbb7SElias Oltmanns 			      (1 << dev->devno)))
350645fabbb7SElias Oltmanns 				continue;
350745fabbb7SElias Oltmanns 
350845fabbb7SElias Oltmanns 			ata_eh_park_issue_cmd(dev, 0);
350945fabbb7SElias Oltmanns 			ata_eh_done(link, dev, ATA_EH_PARK);
351045fabbb7SElias Oltmanns 		}
351145fabbb7SElias Oltmanns 	}
351245fabbb7SElias Oltmanns 
35139b1e2658STejun Heo 	/* the rest */
35141eca4365STejun Heo 	ata_for_each_link(link, ap, EDGE) {
35159b1e2658STejun Heo 		struct ata_eh_context *ehc = &link->eh_context;
35169b1e2658STejun Heo 
3517c6fd2807SJeff Garzik 		/* revalidate existing devices and attach new ones */
35180260731fSTejun Heo 		rc = ata_eh_revalidate_and_attach(link, &dev);
3519c6fd2807SJeff Garzik 		if (rc)
3520c6fd2807SJeff Garzik 			goto dev_fail;
3521c6fd2807SJeff Garzik 
3522633273a3STejun Heo 		/* if PMP got attached, return, pmp EH will take care of it */
3523633273a3STejun Heo 		if (link->device->class == ATA_DEV_PMP) {
3524633273a3STejun Heo 			ehc->i.action = 0;
3525633273a3STejun Heo 			return 0;
3526633273a3STejun Heo 		}
3527633273a3STejun Heo 
3528baa1e78aSTejun Heo 		/* configure transfer mode if necessary */
3529baa1e78aSTejun Heo 		if (ehc->i.flags & ATA_EHI_SETMODE) {
35300260731fSTejun Heo 			rc = ata_set_mode(link, &dev);
35314ae72a1eSTejun Heo 			if (rc)
3532c6fd2807SJeff Garzik 				goto dev_fail;
3533baa1e78aSTejun Heo 			ehc->i.flags &= ~ATA_EHI_SETMODE;
3534c6fd2807SJeff Garzik 		}
3535c6fd2807SJeff Garzik 
353611fc33daSTejun Heo 		/* If reset has been issued, clear UA to avoid
353711fc33daSTejun Heo 		 * disrupting the current users of the device.
353811fc33daSTejun Heo 		 */
353911fc33daSTejun Heo 		if (ehc->i.flags & ATA_EHI_DID_RESET) {
35401eca4365STejun Heo 			ata_for_each_dev(dev, link, ALL) {
354111fc33daSTejun Heo 				if (dev->class != ATA_DEV_ATAPI)
354211fc33daSTejun Heo 					continue;
354311fc33daSTejun Heo 				rc = atapi_eh_clear_ua(dev);
354411fc33daSTejun Heo 				if (rc)
354511fc33daSTejun Heo 					goto dev_fail;
354611fc33daSTejun Heo 			}
354711fc33daSTejun Heo 		}
354811fc33daSTejun Heo 
35496013efd8STejun Heo 		/* retry flush if necessary */
35506013efd8STejun Heo 		ata_for_each_dev(dev, link, ALL) {
35516013efd8STejun Heo 			if (dev->class != ATA_DEV_ATA)
35526013efd8STejun Heo 				continue;
35536013efd8STejun Heo 			rc = ata_eh_maybe_retry_flush(dev);
35546013efd8STejun Heo 			if (rc)
35556013efd8STejun Heo 				goto dev_fail;
35566013efd8STejun Heo 		}
35576013efd8STejun Heo 
355811fc33daSTejun Heo 		/* configure link power saving */
35593ec25ebdSTejun Heo 		if (ehc->i.action & ATA_EH_LPM)
35601eca4365STejun Heo 			ata_for_each_dev(dev, link, ALL)
3561ca77329fSKristen Carlson Accardi 				ata_dev_enable_pm(dev, ap->pm_policy);
3562ca77329fSKristen Carlson Accardi 
35639b1e2658STejun Heo 		/* this link is okay now */
35649b1e2658STejun Heo 		ehc->i.flags = 0;
35659b1e2658STejun Heo 		continue;
3566c6fd2807SJeff Garzik 
3567c6fd2807SJeff Garzik dev_fail:
35689b1e2658STejun Heo 		nr_failed_devs++;
35690a2c0f56STejun Heo 		ata_eh_handle_dev_fail(dev, rc);
3570c6fd2807SJeff Garzik 
3571b06ce3e5STejun Heo 		if (ap->pflags & ATA_PFLAG_FROZEN) {
3572b06ce3e5STejun Heo 			/* PMP reset requires working host port.
3573b06ce3e5STejun Heo 			 * Can't retry if it's frozen.
3574b06ce3e5STejun Heo 			 */
3575071f44b1STejun Heo 			if (sata_pmp_attached(ap))
3576b06ce3e5STejun Heo 				goto out;
35779b1e2658STejun Heo 			break;
35789b1e2658STejun Heo 		}
3579b06ce3e5STejun Heo 	}
35809b1e2658STejun Heo 
35810a2c0f56STejun Heo 	if (nr_failed_devs)
3582c6fd2807SJeff Garzik 		goto retry;
3583c6fd2807SJeff Garzik 
3584c6fd2807SJeff Garzik  out:
35859b1e2658STejun Heo 	if (rc && r_failed_link)
35869b1e2658STejun Heo 		*r_failed_link = link;
3587c6fd2807SJeff Garzik 
3588c6fd2807SJeff Garzik 	DPRINTK("EXIT, rc=%d\n", rc);
3589c6fd2807SJeff Garzik 	return rc;
3590c6fd2807SJeff Garzik }
3591c6fd2807SJeff Garzik 
3592c6fd2807SJeff Garzik /**
3593c6fd2807SJeff Garzik  *	ata_eh_finish - finish up EH
3594c6fd2807SJeff Garzik  *	@ap: host port to finish EH for
3595c6fd2807SJeff Garzik  *
3596c6fd2807SJeff Garzik  *	Recovery is complete.  Clean up EH states and retry or finish
3597c6fd2807SJeff Garzik  *	failed qcs.
3598c6fd2807SJeff Garzik  *
3599c6fd2807SJeff Garzik  *	LOCKING:
3600c6fd2807SJeff Garzik  *	None.
3601c6fd2807SJeff Garzik  */
3602fb7fd614STejun Heo void ata_eh_finish(struct ata_port *ap)
3603c6fd2807SJeff Garzik {
3604c6fd2807SJeff Garzik 	int tag;
3605c6fd2807SJeff Garzik 
3606c6fd2807SJeff Garzik 	/* retry or finish qcs */
3607c6fd2807SJeff Garzik 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
3608c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
3609c6fd2807SJeff Garzik 
3610c6fd2807SJeff Garzik 		if (!(qc->flags & ATA_QCFLAG_FAILED))
3611c6fd2807SJeff Garzik 			continue;
3612c6fd2807SJeff Garzik 
3613c6fd2807SJeff Garzik 		if (qc->err_mask) {
3614c6fd2807SJeff Garzik 			/* FIXME: Once EH migration is complete,
3615c6fd2807SJeff Garzik 			 * generate sense data in this function,
3616c6fd2807SJeff Garzik 			 * considering both err_mask and tf.
3617c6fd2807SJeff Garzik 			 */
361803faab78STejun Heo 			if (qc->flags & ATA_QCFLAG_RETRY)
3619c6fd2807SJeff Garzik 				ata_eh_qc_retry(qc);
362003faab78STejun Heo 			else
362103faab78STejun Heo 				ata_eh_qc_complete(qc);
3622c6fd2807SJeff Garzik 		} else {
3623c6fd2807SJeff Garzik 			if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
3624c6fd2807SJeff Garzik 				ata_eh_qc_complete(qc);
3625c6fd2807SJeff Garzik 			} else {
3626c6fd2807SJeff Garzik 				/* feed zero TF to sense generation */
3627c6fd2807SJeff Garzik 				memset(&qc->result_tf, 0, sizeof(qc->result_tf));
3628c6fd2807SJeff Garzik 				ata_eh_qc_retry(qc);
3629c6fd2807SJeff Garzik 			}
3630c6fd2807SJeff Garzik 		}
3631c6fd2807SJeff Garzik 	}
3632da917d69STejun Heo 
3633da917d69STejun Heo 	/* make sure nr_active_links is zero after EH */
3634da917d69STejun Heo 	WARN_ON(ap->nr_active_links);
3635da917d69STejun Heo 	ap->nr_active_links = 0;
3636c6fd2807SJeff Garzik }
3637c6fd2807SJeff Garzik 
3638c6fd2807SJeff Garzik /**
3639c6fd2807SJeff Garzik  *	ata_do_eh - do standard error handling
3640c6fd2807SJeff Garzik  *	@ap: host port to handle error for
3641a1efdabaSTejun Heo  *
3642c6fd2807SJeff Garzik  *	@prereset: prereset method (can be NULL)
3643c6fd2807SJeff Garzik  *	@softreset: softreset method (can be NULL)
3644c6fd2807SJeff Garzik  *	@hardreset: hardreset method (can be NULL)
3645c6fd2807SJeff Garzik  *	@postreset: postreset method (can be NULL)
3646c6fd2807SJeff Garzik  *
3647c6fd2807SJeff Garzik  *	Perform standard error handling sequence.
3648c6fd2807SJeff Garzik  *
3649c6fd2807SJeff Garzik  *	LOCKING:
3650c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
3651c6fd2807SJeff Garzik  */
3652c6fd2807SJeff Garzik void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
3653c6fd2807SJeff Garzik 	       ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3654c6fd2807SJeff Garzik 	       ata_postreset_fn_t postreset)
3655c6fd2807SJeff Garzik {
36569b1e2658STejun Heo 	struct ata_device *dev;
36579b1e2658STejun Heo 	int rc;
36589b1e2658STejun Heo 
36599b1e2658STejun Heo 	ata_eh_autopsy(ap);
36609b1e2658STejun Heo 	ata_eh_report(ap);
36619b1e2658STejun Heo 
36629b1e2658STejun Heo 	rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
36639b1e2658STejun Heo 			    NULL);
36649b1e2658STejun Heo 	if (rc) {
36651eca4365STejun Heo 		ata_for_each_dev(dev, &ap->link, ALL)
36669b1e2658STejun Heo 			ata_dev_disable(dev);
36679b1e2658STejun Heo 	}
36689b1e2658STejun Heo 
3669c6fd2807SJeff Garzik 	ata_eh_finish(ap);
3670c6fd2807SJeff Garzik }
3671c6fd2807SJeff Garzik 
3672a1efdabaSTejun Heo /**
3673a1efdabaSTejun Heo  *	ata_std_error_handler - standard error handler
3674a1efdabaSTejun Heo  *	@ap: host port to handle error for
3675a1efdabaSTejun Heo  *
3676a1efdabaSTejun Heo  *	Standard error handler
3677a1efdabaSTejun Heo  *
3678a1efdabaSTejun Heo  *	LOCKING:
3679a1efdabaSTejun Heo  *	Kernel thread context (may sleep).
3680a1efdabaSTejun Heo  */
3681a1efdabaSTejun Heo void ata_std_error_handler(struct ata_port *ap)
3682a1efdabaSTejun Heo {
3683a1efdabaSTejun Heo 	struct ata_port_operations *ops = ap->ops;
3684a1efdabaSTejun Heo 	ata_reset_fn_t hardreset = ops->hardreset;
3685a1efdabaSTejun Heo 
368657c9efdfSTejun Heo 	/* ignore built-in hardreset if SCR access is not available */
368757c9efdfSTejun Heo 	if (ata_is_builtin_hardreset(hardreset) && !sata_scr_valid(&ap->link))
3688a1efdabaSTejun Heo 		hardreset = NULL;
3689a1efdabaSTejun Heo 
3690a1efdabaSTejun Heo 	ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
3691a1efdabaSTejun Heo }
3692a1efdabaSTejun Heo 
36936ffa01d8STejun Heo #ifdef CONFIG_PM
3694c6fd2807SJeff Garzik /**
3695c6fd2807SJeff Garzik  *	ata_eh_handle_port_suspend - perform port suspend operation
3696c6fd2807SJeff Garzik  *	@ap: port to suspend
3697c6fd2807SJeff Garzik  *
3698c6fd2807SJeff Garzik  *	Suspend @ap.
3699c6fd2807SJeff Garzik  *
3700c6fd2807SJeff Garzik  *	LOCKING:
3701c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
3702c6fd2807SJeff Garzik  */
3703c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap)
3704c6fd2807SJeff Garzik {
3705c6fd2807SJeff Garzik 	unsigned long flags;
3706c6fd2807SJeff Garzik 	int rc = 0;
3707c6fd2807SJeff Garzik 
3708c6fd2807SJeff Garzik 	/* are we suspending? */
3709c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
3710c6fd2807SJeff Garzik 	if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
3711c6fd2807SJeff Garzik 	    ap->pm_mesg.event == PM_EVENT_ON) {
3712c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
3713c6fd2807SJeff Garzik 		return;
3714c6fd2807SJeff Garzik 	}
3715c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
3716c6fd2807SJeff Garzik 
3717c6fd2807SJeff Garzik 	WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
3718c6fd2807SJeff Garzik 
371964578a3dSTejun Heo 	/* tell ACPI we're suspending */
372064578a3dSTejun Heo 	rc = ata_acpi_on_suspend(ap);
372164578a3dSTejun Heo 	if (rc)
372264578a3dSTejun Heo 		goto out;
372364578a3dSTejun Heo 
3724c6fd2807SJeff Garzik 	/* suspend */
3725c6fd2807SJeff Garzik 	ata_eh_freeze_port(ap);
3726c6fd2807SJeff Garzik 
3727c6fd2807SJeff Garzik 	if (ap->ops->port_suspend)
3728c6fd2807SJeff Garzik 		rc = ap->ops->port_suspend(ap, ap->pm_mesg);
3729c6fd2807SJeff Garzik 
3730bd3adca5SShaohua Li 	ata_acpi_set_state(ap, PMSG_SUSPEND);
373164578a3dSTejun Heo  out:
3732c6fd2807SJeff Garzik 	/* report result */
3733c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
3734c6fd2807SJeff Garzik 
3735c6fd2807SJeff Garzik 	ap->pflags &= ~ATA_PFLAG_PM_PENDING;
3736c6fd2807SJeff Garzik 	if (rc == 0)
3737c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_SUSPENDED;
373864578a3dSTejun Heo 	else if (ap->pflags & ATA_PFLAG_FROZEN)
3739c6fd2807SJeff Garzik 		ata_port_schedule_eh(ap);
3740c6fd2807SJeff Garzik 
3741c6fd2807SJeff Garzik 	if (ap->pm_result) {
3742c6fd2807SJeff Garzik 		*ap->pm_result = rc;
3743c6fd2807SJeff Garzik 		ap->pm_result = NULL;
3744c6fd2807SJeff Garzik 	}
3745c6fd2807SJeff Garzik 
3746c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
3747c6fd2807SJeff Garzik 
3748c6fd2807SJeff Garzik 	return;
3749c6fd2807SJeff Garzik }
3750c6fd2807SJeff Garzik 
3751c6fd2807SJeff Garzik /**
3752c6fd2807SJeff Garzik  *	ata_eh_handle_port_resume - perform port resume operation
3753c6fd2807SJeff Garzik  *	@ap: port to resume
3754c6fd2807SJeff Garzik  *
3755c6fd2807SJeff Garzik  *	Resume @ap.
3756c6fd2807SJeff Garzik  *
3757c6fd2807SJeff Garzik  *	LOCKING:
3758c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
3759c6fd2807SJeff Garzik  */
3760c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap)
3761c6fd2807SJeff Garzik {
37626f9c1ea2STejun Heo 	struct ata_link *link;
37636f9c1ea2STejun Heo 	struct ata_device *dev;
3764c6fd2807SJeff Garzik 	unsigned long flags;
37659666f400STejun Heo 	int rc = 0;
3766c6fd2807SJeff Garzik 
3767c6fd2807SJeff Garzik 	/* are we resuming? */
3768c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
3769c6fd2807SJeff Garzik 	if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
3770c6fd2807SJeff Garzik 	    ap->pm_mesg.event != PM_EVENT_ON) {
3771c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
3772c6fd2807SJeff Garzik 		return;
3773c6fd2807SJeff Garzik 	}
3774c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
3775c6fd2807SJeff Garzik 
37769666f400STejun Heo 	WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED));
3777c6fd2807SJeff Garzik 
37786f9c1ea2STejun Heo 	/*
37796f9c1ea2STejun Heo 	 * Error timestamps are in jiffies which doesn't run while
37806f9c1ea2STejun Heo 	 * suspended and PHY events during resume isn't too uncommon.
37816f9c1ea2STejun Heo 	 * When the two are combined, it can lead to unnecessary speed
37826f9c1ea2STejun Heo 	 * downs if the machine is suspended and resumed repeatedly.
37836f9c1ea2STejun Heo 	 * Clear error history.
37846f9c1ea2STejun Heo 	 */
37856f9c1ea2STejun Heo 	ata_for_each_link(link, ap, HOST_FIRST)
37866f9c1ea2STejun Heo 		ata_for_each_dev(dev, link, ALL)
37876f9c1ea2STejun Heo 			ata_ering_clear(&dev->ering);
37886f9c1ea2STejun Heo 
3789bd3adca5SShaohua Li 	ata_acpi_set_state(ap, PMSG_ON);
3790bd3adca5SShaohua Li 
3791c6fd2807SJeff Garzik 	if (ap->ops->port_resume)
3792c6fd2807SJeff Garzik 		rc = ap->ops->port_resume(ap);
3793c6fd2807SJeff Garzik 
37946746544cSTejun Heo 	/* tell ACPI that we're resuming */
37956746544cSTejun Heo 	ata_acpi_on_resume(ap);
37966746544cSTejun Heo 
37979666f400STejun Heo 	/* report result */
3798c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
3799c6fd2807SJeff Garzik 	ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
3800c6fd2807SJeff Garzik 	if (ap->pm_result) {
3801c6fd2807SJeff Garzik 		*ap->pm_result = rc;
3802c6fd2807SJeff Garzik 		ap->pm_result = NULL;
3803c6fd2807SJeff Garzik 	}
3804c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
3805c6fd2807SJeff Garzik }
38066ffa01d8STejun Heo #endif /* CONFIG_PM */
3807