xref: /openbmc/linux/drivers/ata/libata-eh.c (revision c34aeebc06e8bdde93e8c8f40d9903b1aaab63c6)
1c6fd2807SJeff Garzik /*
2c6fd2807SJeff Garzik  *  libata-eh.c - libata error handling
3c6fd2807SJeff Garzik  *
4c6fd2807SJeff Garzik  *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
5c6fd2807SJeff Garzik  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6c6fd2807SJeff Garzik  *		    on emails.
7c6fd2807SJeff Garzik  *
8c6fd2807SJeff Garzik  *  Copyright 2006 Tejun Heo <htejun@gmail.com>
9c6fd2807SJeff Garzik  *
10c6fd2807SJeff Garzik  *
11c6fd2807SJeff Garzik  *  This program is free software; you can redistribute it and/or
12c6fd2807SJeff Garzik  *  modify it under the terms of the GNU General Public License as
13c6fd2807SJeff Garzik  *  published by the Free Software Foundation; either version 2, or
14c6fd2807SJeff Garzik  *  (at your option) any later version.
15c6fd2807SJeff Garzik  *
16c6fd2807SJeff Garzik  *  This program is distributed in the hope that it will be useful,
17c6fd2807SJeff Garzik  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
18c6fd2807SJeff Garzik  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19c6fd2807SJeff Garzik  *  General Public License for more details.
20c6fd2807SJeff Garzik  *
21c6fd2807SJeff Garzik  *  You should have received a copy of the GNU General Public License
22c6fd2807SJeff Garzik  *  along with this program; see the file COPYING.  If not, write to
23c6fd2807SJeff Garzik  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
24c6fd2807SJeff Garzik  *  USA.
25c6fd2807SJeff Garzik  *
26c6fd2807SJeff Garzik  *
27c6fd2807SJeff Garzik  *  libata documentation is available via 'make {ps|pdf}docs',
28c6fd2807SJeff Garzik  *  as Documentation/DocBook/libata.*
29c6fd2807SJeff Garzik  *
30c6fd2807SJeff Garzik  *  Hardware documentation available from http://www.t13.org/ and
31c6fd2807SJeff Garzik  *  http://www.sata-io.org/
32c6fd2807SJeff Garzik  *
33c6fd2807SJeff Garzik  */
34c6fd2807SJeff Garzik 
35c6fd2807SJeff Garzik #include <linux/kernel.h>
36242f9dcbSJens Axboe #include <linux/blkdev.h>
372855568bSJeff Garzik #include <linux/pci.h>
38c6fd2807SJeff Garzik #include <scsi/scsi.h>
39c6fd2807SJeff Garzik #include <scsi/scsi_host.h>
40c6fd2807SJeff Garzik #include <scsi/scsi_eh.h>
41c6fd2807SJeff Garzik #include <scsi/scsi_device.h>
42c6fd2807SJeff Garzik #include <scsi/scsi_cmnd.h>
436521148cSRobert Hancock #include <scsi/scsi_dbg.h>
44c6fd2807SJeff Garzik #include "../scsi/scsi_transport_api.h"
45c6fd2807SJeff Garzik 
46c6fd2807SJeff Garzik #include <linux/libata.h>
47c6fd2807SJeff Garzik 
48c6fd2807SJeff Garzik #include "libata.h"
49c6fd2807SJeff Garzik 
507d47e8d4STejun Heo enum {
513884f7b0STejun Heo 	/* speed down verdicts */
527d47e8d4STejun Heo 	ATA_EH_SPDN_NCQ_OFF		= (1 << 0),
537d47e8d4STejun Heo 	ATA_EH_SPDN_SPEED_DOWN		= (1 << 1),
547d47e8d4STejun Heo 	ATA_EH_SPDN_FALLBACK_TO_PIO	= (1 << 2),
5576326ac1STejun Heo 	ATA_EH_SPDN_KEEP_ERRORS		= (1 << 3),
563884f7b0STejun Heo 
573884f7b0STejun Heo 	/* error flags */
583884f7b0STejun Heo 	ATA_EFLAG_IS_IO			= (1 << 0),
5976326ac1STejun Heo 	ATA_EFLAG_DUBIOUS_XFER		= (1 << 1),
60d9027470SGwendal Grignou 	ATA_EFLAG_OLD_ER                = (1 << 31),
613884f7b0STejun Heo 
623884f7b0STejun Heo 	/* error categories */
633884f7b0STejun Heo 	ATA_ECAT_NONE			= 0,
643884f7b0STejun Heo 	ATA_ECAT_ATA_BUS		= 1,
653884f7b0STejun Heo 	ATA_ECAT_TOUT_HSM		= 2,
663884f7b0STejun Heo 	ATA_ECAT_UNK_DEV		= 3,
6775f9cafcSTejun Heo 	ATA_ECAT_DUBIOUS_NONE		= 4,
6875f9cafcSTejun Heo 	ATA_ECAT_DUBIOUS_ATA_BUS	= 5,
6975f9cafcSTejun Heo 	ATA_ECAT_DUBIOUS_TOUT_HSM	= 6,
7075f9cafcSTejun Heo 	ATA_ECAT_DUBIOUS_UNK_DEV	= 7,
7175f9cafcSTejun Heo 	ATA_ECAT_NR			= 8,
727d47e8d4STejun Heo 
7387fbc5a0STejun Heo 	ATA_EH_CMD_DFL_TIMEOUT		=  5000,
7487fbc5a0STejun Heo 
750a2c0f56STejun Heo 	/* always put at least this amount of time between resets */
760a2c0f56STejun Heo 	ATA_EH_RESET_COOL_DOWN		=  5000,
770a2c0f56STejun Heo 
78341c2c95STejun Heo 	/* Waiting in ->prereset can never be reliable.  It's
79341c2c95STejun Heo 	 * sometimes nice to wait there but it can't be depended upon;
80341c2c95STejun Heo 	 * otherwise, we wouldn't be resetting.  Just give it enough
81341c2c95STejun Heo 	 * time for most drives to spin up.
8231daabdaSTejun Heo 	 */
83341c2c95STejun Heo 	ATA_EH_PRERESET_TIMEOUT		= 10000,
84341c2c95STejun Heo 	ATA_EH_FASTDRAIN_INTERVAL	=  3000,
8511fc33daSTejun Heo 
8611fc33daSTejun Heo 	ATA_EH_UA_TRIES			= 5,
87c2c7a89cSTejun Heo 
88c2c7a89cSTejun Heo 	/* probe speed down parameters, see ata_eh_schedule_probe() */
89c2c7a89cSTejun Heo 	ATA_EH_PROBE_TRIAL_INTERVAL	= 60000,	/* 1 min */
90c2c7a89cSTejun Heo 	ATA_EH_PROBE_TRIALS		= 2,
9131daabdaSTejun Heo };
9231daabdaSTejun Heo 
9331daabdaSTejun Heo /* The following table determines how we sequence resets.  Each entry
9431daabdaSTejun Heo  * represents timeout for that try.  The first try can be soft or
9531daabdaSTejun Heo  * hardreset.  All others are hardreset if available.  In most cases
9631daabdaSTejun Heo  * the first reset w/ 10sec timeout should succeed.  Following entries
9731daabdaSTejun Heo  * are mostly for error handling, hotplug and retarded devices.
9831daabdaSTejun Heo  */
9931daabdaSTejun Heo static const unsigned long ata_eh_reset_timeouts[] = {
100341c2c95STejun Heo 	10000,	/* most drives spin up by 10sec */
101341c2c95STejun Heo 	10000,	/* > 99% working drives spin up before 20sec */
102341c2c95STejun Heo 	35000,	/* give > 30 secs of idleness for retarded devices */
103341c2c95STejun Heo 	 5000,	/* and sweet one last chance */
104d8af0eb6STejun Heo 	ULONG_MAX, /* > 1 min has elapsed, give up */
10531daabdaSTejun Heo };
10631daabdaSTejun Heo 
10787fbc5a0STejun Heo static const unsigned long ata_eh_identify_timeouts[] = {
10887fbc5a0STejun Heo 	 5000,	/* covers > 99% of successes and not too boring on failures */
10987fbc5a0STejun Heo 	10000,  /* combined time till here is enough even for media access */
11087fbc5a0STejun Heo 	30000,	/* for true idiots */
11187fbc5a0STejun Heo 	ULONG_MAX,
11287fbc5a0STejun Heo };
11387fbc5a0STejun Heo 
1146013efd8STejun Heo static const unsigned long ata_eh_flush_timeouts[] = {
1156013efd8STejun Heo 	15000,	/* be generous with flush */
1166013efd8STejun Heo 	15000,  /* ditto */
1176013efd8STejun Heo 	30000,	/* and even more generous */
1186013efd8STejun Heo 	ULONG_MAX,
1196013efd8STejun Heo };
1206013efd8STejun Heo 
12187fbc5a0STejun Heo static const unsigned long ata_eh_other_timeouts[] = {
12287fbc5a0STejun Heo 	 5000,	/* same rationale as identify timeout */
12387fbc5a0STejun Heo 	10000,	/* ditto */
12487fbc5a0STejun Heo 	/* but no merciful 30sec for other commands, it just isn't worth it */
12587fbc5a0STejun Heo 	ULONG_MAX,
12687fbc5a0STejun Heo };
12787fbc5a0STejun Heo 
12887fbc5a0STejun Heo struct ata_eh_cmd_timeout_ent {
12987fbc5a0STejun Heo 	const u8		*commands;
13087fbc5a0STejun Heo 	const unsigned long	*timeouts;
13187fbc5a0STejun Heo };
13287fbc5a0STejun Heo 
13387fbc5a0STejun Heo /* The following table determines timeouts to use for EH internal
13487fbc5a0STejun Heo  * commands.  Each table entry is a command class and matches the
13587fbc5a0STejun Heo  * commands the entry applies to and the timeout table to use.
13687fbc5a0STejun Heo  *
13787fbc5a0STejun Heo  * On the retry after a command timed out, the next timeout value from
13887fbc5a0STejun Heo  * the table is used.  If the table doesn't contain further entries,
13987fbc5a0STejun Heo  * the last value is used.
14087fbc5a0STejun Heo  *
14187fbc5a0STejun Heo  * ehc->cmd_timeout_idx keeps track of which timeout to use per
14287fbc5a0STejun Heo  * command class, so if SET_FEATURES times out on the first try, the
14387fbc5a0STejun Heo  * next try will use the second timeout value only for that class.
14487fbc5a0STejun Heo  */
14587fbc5a0STejun Heo #define CMDS(cmds...)	(const u8 []){ cmds, 0 }
14687fbc5a0STejun Heo static const struct ata_eh_cmd_timeout_ent
14787fbc5a0STejun Heo ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
14887fbc5a0STejun Heo 	{ .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI),
14987fbc5a0STejun Heo 	  .timeouts = ata_eh_identify_timeouts, },
15087fbc5a0STejun Heo 	{ .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT),
15187fbc5a0STejun Heo 	  .timeouts = ata_eh_other_timeouts, },
15287fbc5a0STejun Heo 	{ .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT),
15387fbc5a0STejun Heo 	  .timeouts = ata_eh_other_timeouts, },
15487fbc5a0STejun Heo 	{ .commands = CMDS(ATA_CMD_SET_FEATURES),
15587fbc5a0STejun Heo 	  .timeouts = ata_eh_other_timeouts, },
15687fbc5a0STejun Heo 	{ .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS),
15787fbc5a0STejun Heo 	  .timeouts = ata_eh_other_timeouts, },
1586013efd8STejun Heo 	{ .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT),
1596013efd8STejun Heo 	  .timeouts = ata_eh_flush_timeouts },
16087fbc5a0STejun Heo };
16187fbc5a0STejun Heo #undef CMDS
16287fbc5a0STejun Heo 
163c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap);
1646ffa01d8STejun Heo #ifdef CONFIG_PM
165c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap);
166c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap);
1676ffa01d8STejun Heo #else /* CONFIG_PM */
1686ffa01d8STejun Heo static void ata_eh_handle_port_suspend(struct ata_port *ap)
1696ffa01d8STejun Heo { }
1706ffa01d8STejun Heo 
1716ffa01d8STejun Heo static void ata_eh_handle_port_resume(struct ata_port *ap)
1726ffa01d8STejun Heo { }
1736ffa01d8STejun Heo #endif /* CONFIG_PM */
174c6fd2807SJeff Garzik 
175b64bbc39STejun Heo static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt,
176b64bbc39STejun Heo 				 va_list args)
177b64bbc39STejun Heo {
178b64bbc39STejun Heo 	ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
179b64bbc39STejun Heo 				     ATA_EH_DESC_LEN - ehi->desc_len,
180b64bbc39STejun Heo 				     fmt, args);
181b64bbc39STejun Heo }
182b64bbc39STejun Heo 
183b64bbc39STejun Heo /**
184b64bbc39STejun Heo  *	__ata_ehi_push_desc - push error description without adding separator
185b64bbc39STejun Heo  *	@ehi: target EHI
186b64bbc39STejun Heo  *	@fmt: printf format string
187b64bbc39STejun Heo  *
188b64bbc39STejun Heo  *	Format string according to @fmt and append it to @ehi->desc.
189b64bbc39STejun Heo  *
190b64bbc39STejun Heo  *	LOCKING:
191b64bbc39STejun Heo  *	spin_lock_irqsave(host lock)
192b64bbc39STejun Heo  */
193b64bbc39STejun Heo void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
194b64bbc39STejun Heo {
195b64bbc39STejun Heo 	va_list args;
196b64bbc39STejun Heo 
197b64bbc39STejun Heo 	va_start(args, fmt);
198b64bbc39STejun Heo 	__ata_ehi_pushv_desc(ehi, fmt, args);
199b64bbc39STejun Heo 	va_end(args);
200b64bbc39STejun Heo }
201b64bbc39STejun Heo 
202b64bbc39STejun Heo /**
203b64bbc39STejun Heo  *	ata_ehi_push_desc - push error description with separator
204b64bbc39STejun Heo  *	@ehi: target EHI
205b64bbc39STejun Heo  *	@fmt: printf format string
206b64bbc39STejun Heo  *
207b64bbc39STejun Heo  *	Format string according to @fmt and append it to @ehi->desc.
208b64bbc39STejun Heo  *	If @ehi->desc is not empty, ", " is added in-between.
209b64bbc39STejun Heo  *
210b64bbc39STejun Heo  *	LOCKING:
211b64bbc39STejun Heo  *	spin_lock_irqsave(host lock)
212b64bbc39STejun Heo  */
213b64bbc39STejun Heo void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
214b64bbc39STejun Heo {
215b64bbc39STejun Heo 	va_list args;
216b64bbc39STejun Heo 
217b64bbc39STejun Heo 	if (ehi->desc_len)
218b64bbc39STejun Heo 		__ata_ehi_push_desc(ehi, ", ");
219b64bbc39STejun Heo 
220b64bbc39STejun Heo 	va_start(args, fmt);
221b64bbc39STejun Heo 	__ata_ehi_pushv_desc(ehi, fmt, args);
222b64bbc39STejun Heo 	va_end(args);
223b64bbc39STejun Heo }
224b64bbc39STejun Heo 
225b64bbc39STejun Heo /**
226b64bbc39STejun Heo  *	ata_ehi_clear_desc - clean error description
227b64bbc39STejun Heo  *	@ehi: target EHI
228b64bbc39STejun Heo  *
229b64bbc39STejun Heo  *	Clear @ehi->desc.
230b64bbc39STejun Heo  *
231b64bbc39STejun Heo  *	LOCKING:
232b64bbc39STejun Heo  *	spin_lock_irqsave(host lock)
233b64bbc39STejun Heo  */
234b64bbc39STejun Heo void ata_ehi_clear_desc(struct ata_eh_info *ehi)
235b64bbc39STejun Heo {
236b64bbc39STejun Heo 	ehi->desc[0] = '\0';
237b64bbc39STejun Heo 	ehi->desc_len = 0;
238b64bbc39STejun Heo }
239b64bbc39STejun Heo 
240cbcdd875STejun Heo /**
241cbcdd875STejun Heo  *	ata_port_desc - append port description
242cbcdd875STejun Heo  *	@ap: target ATA port
243cbcdd875STejun Heo  *	@fmt: printf format string
244cbcdd875STejun Heo  *
245cbcdd875STejun Heo  *	Format string according to @fmt and append it to port
246cbcdd875STejun Heo  *	description.  If port description is not empty, " " is added
247cbcdd875STejun Heo  *	in-between.  This function is to be used while initializing
248cbcdd875STejun Heo  *	ata_host.  The description is printed on host registration.
249cbcdd875STejun Heo  *
250cbcdd875STejun Heo  *	LOCKING:
251cbcdd875STejun Heo  *	None.
252cbcdd875STejun Heo  */
253cbcdd875STejun Heo void ata_port_desc(struct ata_port *ap, const char *fmt, ...)
254cbcdd875STejun Heo {
255cbcdd875STejun Heo 	va_list args;
256cbcdd875STejun Heo 
257cbcdd875STejun Heo 	WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING));
258cbcdd875STejun Heo 
259cbcdd875STejun Heo 	if (ap->link.eh_info.desc_len)
260cbcdd875STejun Heo 		__ata_ehi_push_desc(&ap->link.eh_info, " ");
261cbcdd875STejun Heo 
262cbcdd875STejun Heo 	va_start(args, fmt);
263cbcdd875STejun Heo 	__ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args);
264cbcdd875STejun Heo 	va_end(args);
265cbcdd875STejun Heo }
266cbcdd875STejun Heo 
267cbcdd875STejun Heo #ifdef CONFIG_PCI
268cbcdd875STejun Heo 
269cbcdd875STejun Heo /**
270cbcdd875STejun Heo  *	ata_port_pbar_desc - append PCI BAR description
271cbcdd875STejun Heo  *	@ap: target ATA port
272cbcdd875STejun Heo  *	@bar: target PCI BAR
273cbcdd875STejun Heo  *	@offset: offset into PCI BAR
274cbcdd875STejun Heo  *	@name: name of the area
275cbcdd875STejun Heo  *
276cbcdd875STejun Heo  *	If @offset is negative, this function formats a string which
277cbcdd875STejun Heo  *	contains the name, address, size and type of the BAR and
278cbcdd875STejun Heo  *	appends it to the port description.  If @offset is zero or
279cbcdd875STejun Heo  *	positive, only name and offsetted address is appended.
280cbcdd875STejun Heo  *
281cbcdd875STejun Heo  *	LOCKING:
282cbcdd875STejun Heo  *	None.
283cbcdd875STejun Heo  */
284cbcdd875STejun Heo void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
285cbcdd875STejun Heo 			const char *name)
286cbcdd875STejun Heo {
287cbcdd875STejun Heo 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
288cbcdd875STejun Heo 	char *type = "";
289cbcdd875STejun Heo 	unsigned long long start, len;
290cbcdd875STejun Heo 
291cbcdd875STejun Heo 	if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
292cbcdd875STejun Heo 		type = "m";
293cbcdd875STejun Heo 	else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
294cbcdd875STejun Heo 		type = "i";
295cbcdd875STejun Heo 
296cbcdd875STejun Heo 	start = (unsigned long long)pci_resource_start(pdev, bar);
297cbcdd875STejun Heo 	len = (unsigned long long)pci_resource_len(pdev, bar);
298cbcdd875STejun Heo 
299cbcdd875STejun Heo 	if (offset < 0)
300cbcdd875STejun Heo 		ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start);
301cbcdd875STejun Heo 	else
302e6a73ab1SAndrew Morton 		ata_port_desc(ap, "%s 0x%llx", name,
303e6a73ab1SAndrew Morton 				start + (unsigned long long)offset);
304cbcdd875STejun Heo }
305cbcdd875STejun Heo 
306cbcdd875STejun Heo #endif /* CONFIG_PCI */
307cbcdd875STejun Heo 
30887fbc5a0STejun Heo static int ata_lookup_timeout_table(u8 cmd)
30987fbc5a0STejun Heo {
31087fbc5a0STejun Heo 	int i;
31187fbc5a0STejun Heo 
31287fbc5a0STejun Heo 	for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) {
31387fbc5a0STejun Heo 		const u8 *cur;
31487fbc5a0STejun Heo 
31587fbc5a0STejun Heo 		for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++)
31687fbc5a0STejun Heo 			if (*cur == cmd)
31787fbc5a0STejun Heo 				return i;
31887fbc5a0STejun Heo 	}
31987fbc5a0STejun Heo 
32087fbc5a0STejun Heo 	return -1;
32187fbc5a0STejun Heo }
32287fbc5a0STejun Heo 
32387fbc5a0STejun Heo /**
32487fbc5a0STejun Heo  *	ata_internal_cmd_timeout - determine timeout for an internal command
32587fbc5a0STejun Heo  *	@dev: target device
32687fbc5a0STejun Heo  *	@cmd: internal command to be issued
32787fbc5a0STejun Heo  *
32887fbc5a0STejun Heo  *	Determine timeout for internal command @cmd for @dev.
32987fbc5a0STejun Heo  *
33087fbc5a0STejun Heo  *	LOCKING:
33187fbc5a0STejun Heo  *	EH context.
33287fbc5a0STejun Heo  *
33387fbc5a0STejun Heo  *	RETURNS:
33487fbc5a0STejun Heo  *	Determined timeout.
33587fbc5a0STejun Heo  */
33687fbc5a0STejun Heo unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd)
33787fbc5a0STejun Heo {
33887fbc5a0STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
33987fbc5a0STejun Heo 	int ent = ata_lookup_timeout_table(cmd);
34087fbc5a0STejun Heo 	int idx;
34187fbc5a0STejun Heo 
34287fbc5a0STejun Heo 	if (ent < 0)
34387fbc5a0STejun Heo 		return ATA_EH_CMD_DFL_TIMEOUT;
34487fbc5a0STejun Heo 
34587fbc5a0STejun Heo 	idx = ehc->cmd_timeout_idx[dev->devno][ent];
34687fbc5a0STejun Heo 	return ata_eh_cmd_timeout_table[ent].timeouts[idx];
34787fbc5a0STejun Heo }
34887fbc5a0STejun Heo 
34987fbc5a0STejun Heo /**
35087fbc5a0STejun Heo  *	ata_internal_cmd_timed_out - notification for internal command timeout
35187fbc5a0STejun Heo  *	@dev: target device
35287fbc5a0STejun Heo  *	@cmd: internal command which timed out
35387fbc5a0STejun Heo  *
35487fbc5a0STejun Heo  *	Notify EH that internal command @cmd for @dev timed out.  This
35587fbc5a0STejun Heo  *	function should be called only for commands whose timeouts are
35687fbc5a0STejun Heo  *	determined using ata_internal_cmd_timeout().
35787fbc5a0STejun Heo  *
35887fbc5a0STejun Heo  *	LOCKING:
35987fbc5a0STejun Heo  *	EH context.
36087fbc5a0STejun Heo  */
36187fbc5a0STejun Heo void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd)
36287fbc5a0STejun Heo {
36387fbc5a0STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
36487fbc5a0STejun Heo 	int ent = ata_lookup_timeout_table(cmd);
36587fbc5a0STejun Heo 	int idx;
36687fbc5a0STejun Heo 
36787fbc5a0STejun Heo 	if (ent < 0)
36887fbc5a0STejun Heo 		return;
36987fbc5a0STejun Heo 
37087fbc5a0STejun Heo 	idx = ehc->cmd_timeout_idx[dev->devno][ent];
37187fbc5a0STejun Heo 	if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX)
37287fbc5a0STejun Heo 		ehc->cmd_timeout_idx[dev->devno][ent]++;
37387fbc5a0STejun Heo }
37487fbc5a0STejun Heo 
3753884f7b0STejun Heo static void ata_ering_record(struct ata_ering *ering, unsigned int eflags,
376c6fd2807SJeff Garzik 			     unsigned int err_mask)
377c6fd2807SJeff Garzik {
378c6fd2807SJeff Garzik 	struct ata_ering_entry *ent;
379c6fd2807SJeff Garzik 
380c6fd2807SJeff Garzik 	WARN_ON(!err_mask);
381c6fd2807SJeff Garzik 
382c6fd2807SJeff Garzik 	ering->cursor++;
383c6fd2807SJeff Garzik 	ering->cursor %= ATA_ERING_SIZE;
384c6fd2807SJeff Garzik 
385c6fd2807SJeff Garzik 	ent = &ering->ring[ering->cursor];
3863884f7b0STejun Heo 	ent->eflags = eflags;
387c6fd2807SJeff Garzik 	ent->err_mask = err_mask;
388c6fd2807SJeff Garzik 	ent->timestamp = get_jiffies_64();
389c6fd2807SJeff Garzik }
390c6fd2807SJeff Garzik 
39176326ac1STejun Heo static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering)
39276326ac1STejun Heo {
39376326ac1STejun Heo 	struct ata_ering_entry *ent = &ering->ring[ering->cursor];
39476326ac1STejun Heo 
39576326ac1STejun Heo 	if (ent->err_mask)
39676326ac1STejun Heo 		return ent;
39776326ac1STejun Heo 	return NULL;
39876326ac1STejun Heo }
39976326ac1STejun Heo 
400d9027470SGwendal Grignou int ata_ering_map(struct ata_ering *ering,
401c6fd2807SJeff Garzik 		  int (*map_fn)(struct ata_ering_entry *, void *),
402c6fd2807SJeff Garzik 		  void *arg)
403c6fd2807SJeff Garzik {
404c6fd2807SJeff Garzik 	int idx, rc = 0;
405c6fd2807SJeff Garzik 	struct ata_ering_entry *ent;
406c6fd2807SJeff Garzik 
407c6fd2807SJeff Garzik 	idx = ering->cursor;
408c6fd2807SJeff Garzik 	do {
409c6fd2807SJeff Garzik 		ent = &ering->ring[idx];
410c6fd2807SJeff Garzik 		if (!ent->err_mask)
411c6fd2807SJeff Garzik 			break;
412c6fd2807SJeff Garzik 		rc = map_fn(ent, arg);
413c6fd2807SJeff Garzik 		if (rc)
414c6fd2807SJeff Garzik 			break;
415c6fd2807SJeff Garzik 		idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
416c6fd2807SJeff Garzik 	} while (idx != ering->cursor);
417c6fd2807SJeff Garzik 
418c6fd2807SJeff Garzik 	return rc;
419c6fd2807SJeff Garzik }
420c6fd2807SJeff Garzik 
421d9027470SGwendal Grignou int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg)
422d9027470SGwendal Grignou {
423d9027470SGwendal Grignou 	ent->eflags |= ATA_EFLAG_OLD_ER;
424d9027470SGwendal Grignou 	return 0;
425d9027470SGwendal Grignou }
426d9027470SGwendal Grignou 
427d9027470SGwendal Grignou static void ata_ering_clear(struct ata_ering *ering)
428d9027470SGwendal Grignou {
429d9027470SGwendal Grignou 	ata_ering_map(ering, ata_ering_clear_cb, NULL);
430d9027470SGwendal Grignou }
431d9027470SGwendal Grignou 
432c6fd2807SJeff Garzik static unsigned int ata_eh_dev_action(struct ata_device *dev)
433c6fd2807SJeff Garzik {
4349af5c9c9STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
435c6fd2807SJeff Garzik 
436c6fd2807SJeff Garzik 	return ehc->i.action | ehc->i.dev_action[dev->devno];
437c6fd2807SJeff Garzik }
438c6fd2807SJeff Garzik 
439f58229f8STejun Heo static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
440c6fd2807SJeff Garzik 				struct ata_eh_info *ehi, unsigned int action)
441c6fd2807SJeff Garzik {
442f58229f8STejun Heo 	struct ata_device *tdev;
443c6fd2807SJeff Garzik 
444c6fd2807SJeff Garzik 	if (!dev) {
445c6fd2807SJeff Garzik 		ehi->action &= ~action;
4461eca4365STejun Heo 		ata_for_each_dev(tdev, link, ALL)
447f58229f8STejun Heo 			ehi->dev_action[tdev->devno] &= ~action;
448c6fd2807SJeff Garzik 	} else {
449c6fd2807SJeff Garzik 		/* doesn't make sense for port-wide EH actions */
450c6fd2807SJeff Garzik 		WARN_ON(!(action & ATA_EH_PERDEV_MASK));
451c6fd2807SJeff Garzik 
452c6fd2807SJeff Garzik 		/* break ehi->action into ehi->dev_action */
453c6fd2807SJeff Garzik 		if (ehi->action & action) {
4541eca4365STejun Heo 			ata_for_each_dev(tdev, link, ALL)
455f58229f8STejun Heo 				ehi->dev_action[tdev->devno] |=
456f58229f8STejun Heo 					ehi->action & action;
457c6fd2807SJeff Garzik 			ehi->action &= ~action;
458c6fd2807SJeff Garzik 		}
459c6fd2807SJeff Garzik 
460c6fd2807SJeff Garzik 		/* turn off the specified per-dev action */
461c6fd2807SJeff Garzik 		ehi->dev_action[dev->devno] &= ~action;
462c6fd2807SJeff Garzik 	}
463c6fd2807SJeff Garzik }
464c6fd2807SJeff Garzik 
465c6fd2807SJeff Garzik /**
466c0c362b6STejun Heo  *	ata_eh_acquire - acquire EH ownership
467c0c362b6STejun Heo  *	@ap: ATA port to acquire EH ownership for
468c0c362b6STejun Heo  *
469c0c362b6STejun Heo  *	Acquire EH ownership for @ap.  This is the basic exclusion
470c0c362b6STejun Heo  *	mechanism for ports sharing a host.  Only one port hanging off
471c0c362b6STejun Heo  *	the same host can claim the ownership of EH.
472c0c362b6STejun Heo  *
473c0c362b6STejun Heo  *	LOCKING:
474c0c362b6STejun Heo  *	EH context.
475c0c362b6STejun Heo  */
476c0c362b6STejun Heo void ata_eh_acquire(struct ata_port *ap)
477c0c362b6STejun Heo {
478c0c362b6STejun Heo 	mutex_lock(&ap->host->eh_mutex);
479c0c362b6STejun Heo 	WARN_ON_ONCE(ap->host->eh_owner);
480c0c362b6STejun Heo 	ap->host->eh_owner = current;
481c0c362b6STejun Heo }
482c0c362b6STejun Heo 
483c0c362b6STejun Heo /**
484c0c362b6STejun Heo  *	ata_eh_release - release EH ownership
485c0c362b6STejun Heo  *	@ap: ATA port to release EH ownership for
486c0c362b6STejun Heo  *
487c0c362b6STejun Heo  *	Release EH ownership for @ap if the caller.  The caller must
488c0c362b6STejun Heo  *	have acquired EH ownership using ata_eh_acquire() previously.
489c0c362b6STejun Heo  *
490c0c362b6STejun Heo  *	LOCKING:
491c0c362b6STejun Heo  *	EH context.
492c0c362b6STejun Heo  */
493c0c362b6STejun Heo void ata_eh_release(struct ata_port *ap)
494c0c362b6STejun Heo {
495c0c362b6STejun Heo 	WARN_ON_ONCE(ap->host->eh_owner != current);
496c0c362b6STejun Heo 	ap->host->eh_owner = NULL;
497c0c362b6STejun Heo 	mutex_unlock(&ap->host->eh_mutex);
498c0c362b6STejun Heo }
499c0c362b6STejun Heo 
500c0c362b6STejun Heo /**
501c6fd2807SJeff Garzik  *	ata_scsi_timed_out - SCSI layer time out callback
502c6fd2807SJeff Garzik  *	@cmd: timed out SCSI command
503c6fd2807SJeff Garzik  *
504c6fd2807SJeff Garzik  *	Handles SCSI layer timeout.  We race with normal completion of
505c6fd2807SJeff Garzik  *	the qc for @cmd.  If the qc is already gone, we lose and let
506c6fd2807SJeff Garzik  *	the scsi command finish (EH_HANDLED).  Otherwise, the qc has
507c6fd2807SJeff Garzik  *	timed out and EH should be invoked.  Prevent ata_qc_complete()
508c6fd2807SJeff Garzik  *	from finishing it by setting EH_SCHEDULED and return
509c6fd2807SJeff Garzik  *	EH_NOT_HANDLED.
510c6fd2807SJeff Garzik  *
511c6fd2807SJeff Garzik  *	TODO: kill this function once old EH is gone.
512c6fd2807SJeff Garzik  *
513c6fd2807SJeff Garzik  *	LOCKING:
514c6fd2807SJeff Garzik  *	Called from timer context
515c6fd2807SJeff Garzik  *
516c6fd2807SJeff Garzik  *	RETURNS:
517c6fd2807SJeff Garzik  *	EH_HANDLED or EH_NOT_HANDLED
518c6fd2807SJeff Garzik  */
519242f9dcbSJens Axboe enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
520c6fd2807SJeff Garzik {
521c6fd2807SJeff Garzik 	struct Scsi_Host *host = cmd->device->host;
522c6fd2807SJeff Garzik 	struct ata_port *ap = ata_shost_to_port(host);
523c6fd2807SJeff Garzik 	unsigned long flags;
524c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc;
525242f9dcbSJens Axboe 	enum blk_eh_timer_return ret;
526c6fd2807SJeff Garzik 
527c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
528c6fd2807SJeff Garzik 
529c6fd2807SJeff Garzik 	if (ap->ops->error_handler) {
530242f9dcbSJens Axboe 		ret = BLK_EH_NOT_HANDLED;
531c6fd2807SJeff Garzik 		goto out;
532c6fd2807SJeff Garzik 	}
533c6fd2807SJeff Garzik 
534242f9dcbSJens Axboe 	ret = BLK_EH_HANDLED;
535c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
5369af5c9c9STejun Heo 	qc = ata_qc_from_tag(ap, ap->link.active_tag);
537c6fd2807SJeff Garzik 	if (qc) {
538c6fd2807SJeff Garzik 		WARN_ON(qc->scsicmd != cmd);
539c6fd2807SJeff Garzik 		qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
540c6fd2807SJeff Garzik 		qc->err_mask |= AC_ERR_TIMEOUT;
541242f9dcbSJens Axboe 		ret = BLK_EH_NOT_HANDLED;
542c6fd2807SJeff Garzik 	}
543c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
544c6fd2807SJeff Garzik 
545c6fd2807SJeff Garzik  out:
546c6fd2807SJeff Garzik 	DPRINTK("EXIT, ret=%d\n", ret);
547c6fd2807SJeff Garzik 	return ret;
548c6fd2807SJeff Garzik }
549c6fd2807SJeff Garzik 
550ece180d1STejun Heo static void ata_eh_unload(struct ata_port *ap)
551ece180d1STejun Heo {
552ece180d1STejun Heo 	struct ata_link *link;
553ece180d1STejun Heo 	struct ata_device *dev;
554ece180d1STejun Heo 	unsigned long flags;
555ece180d1STejun Heo 
556ece180d1STejun Heo 	/* Restore SControl IPM and SPD for the next driver and
557ece180d1STejun Heo 	 * disable attached devices.
558ece180d1STejun Heo 	 */
559ece180d1STejun Heo 	ata_for_each_link(link, ap, PMP_FIRST) {
560ece180d1STejun Heo 		sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0);
561ece180d1STejun Heo 		ata_for_each_dev(dev, link, ALL)
562ece180d1STejun Heo 			ata_dev_disable(dev);
563ece180d1STejun Heo 	}
564ece180d1STejun Heo 
565ece180d1STejun Heo 	/* freeze and set UNLOADED */
566ece180d1STejun Heo 	spin_lock_irqsave(ap->lock, flags);
567ece180d1STejun Heo 
568ece180d1STejun Heo 	ata_port_freeze(ap);			/* won't be thawed */
569ece180d1STejun Heo 	ap->pflags &= ~ATA_PFLAG_EH_PENDING;	/* clear pending from freeze */
570ece180d1STejun Heo 	ap->pflags |= ATA_PFLAG_UNLOADED;
571ece180d1STejun Heo 
572ece180d1STejun Heo 	spin_unlock_irqrestore(ap->lock, flags);
573ece180d1STejun Heo }
574ece180d1STejun Heo 
575c6fd2807SJeff Garzik /**
576c6fd2807SJeff Garzik  *	ata_scsi_error - SCSI layer error handler callback
577c6fd2807SJeff Garzik  *	@host: SCSI host on which error occurred
578c6fd2807SJeff Garzik  *
579c6fd2807SJeff Garzik  *	Handles SCSI-layer-thrown error events.
580c6fd2807SJeff Garzik  *
581c6fd2807SJeff Garzik  *	LOCKING:
582c6fd2807SJeff Garzik  *	Inherited from SCSI layer (none, can sleep)
583c6fd2807SJeff Garzik  *
584c6fd2807SJeff Garzik  *	RETURNS:
585c6fd2807SJeff Garzik  *	Zero.
586c6fd2807SJeff Garzik  */
587c6fd2807SJeff Garzik void ata_scsi_error(struct Scsi_Host *host)
588c6fd2807SJeff Garzik {
589c6fd2807SJeff Garzik 	struct ata_port *ap = ata_shost_to_port(host);
590a1e10f7eSTejun Heo 	int i;
591c6fd2807SJeff Garzik 	unsigned long flags;
592*c34aeebcSJames Bottomley 	LIST_HEAD(eh_work_q);
593c6fd2807SJeff Garzik 
594c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
595c6fd2807SJeff Garzik 
596*c34aeebcSJames Bottomley 	spin_lock_irqsave(host->host_lock, flags);
597*c34aeebcSJames Bottomley 	list_splice_init(&host->eh_cmd_q, &eh_work_q);
598*c34aeebcSJames Bottomley 	spin_unlock_irqrestore(host->host_lock, flags);
599*c34aeebcSJames Bottomley 
600c429137aSTejun Heo 	/* make sure sff pio task is not running */
601c429137aSTejun Heo 	ata_sff_flush_pio_task(ap);
602c6fd2807SJeff Garzik 
603cca3974eSJeff Garzik 	/* synchronize with host lock and sort out timeouts */
604c6fd2807SJeff Garzik 
605c6fd2807SJeff Garzik 	/* For new EH, all qcs are finished in one of three ways -
606c6fd2807SJeff Garzik 	 * normal completion, error completion, and SCSI timeout.
607c96f1732SAlan Cox 	 * Both completions can race against SCSI timeout.  When normal
608c6fd2807SJeff Garzik 	 * completion wins, the qc never reaches EH.  When error
609c6fd2807SJeff Garzik 	 * completion wins, the qc has ATA_QCFLAG_FAILED set.
610c6fd2807SJeff Garzik 	 *
611c6fd2807SJeff Garzik 	 * When SCSI timeout wins, things are a bit more complex.
612c6fd2807SJeff Garzik 	 * Normal or error completion can occur after the timeout but
613c6fd2807SJeff Garzik 	 * before this point.  In such cases, both types of
614c6fd2807SJeff Garzik 	 * completions are honored.  A scmd is determined to have
615c6fd2807SJeff Garzik 	 * timed out iff its associated qc is active and not failed.
616c6fd2807SJeff Garzik 	 */
617c6fd2807SJeff Garzik 	if (ap->ops->error_handler) {
618c6fd2807SJeff Garzik 		struct scsi_cmnd *scmd, *tmp;
619c6fd2807SJeff Garzik 		int nr_timedout = 0;
620c6fd2807SJeff Garzik 
621c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
622c6fd2807SJeff Garzik 
623c96f1732SAlan Cox 		/* This must occur under the ap->lock as we don't want
624c96f1732SAlan Cox 		   a polled recovery to race the real interrupt handler
625c96f1732SAlan Cox 
626c96f1732SAlan Cox 		   The lost_interrupt handler checks for any completed but
627c96f1732SAlan Cox 		   non-notified command and completes much like an IRQ handler.
628c96f1732SAlan Cox 
629c96f1732SAlan Cox 		   We then fall into the error recovery code which will treat
630c96f1732SAlan Cox 		   this as if normal completion won the race */
631c96f1732SAlan Cox 
632c96f1732SAlan Cox 		if (ap->ops->lost_interrupt)
633c96f1732SAlan Cox 			ap->ops->lost_interrupt(ap);
634c96f1732SAlan Cox 
635*c34aeebcSJames Bottomley 		list_for_each_entry_safe(scmd, tmp, &eh_work_q, eh_entry) {
636c6fd2807SJeff Garzik 			struct ata_queued_cmd *qc;
637c6fd2807SJeff Garzik 
638c6fd2807SJeff Garzik 			for (i = 0; i < ATA_MAX_QUEUE; i++) {
639c6fd2807SJeff Garzik 				qc = __ata_qc_from_tag(ap, i);
640c6fd2807SJeff Garzik 				if (qc->flags & ATA_QCFLAG_ACTIVE &&
641c6fd2807SJeff Garzik 				    qc->scsicmd == scmd)
642c6fd2807SJeff Garzik 					break;
643c6fd2807SJeff Garzik 			}
644c6fd2807SJeff Garzik 
645c6fd2807SJeff Garzik 			if (i < ATA_MAX_QUEUE) {
646c6fd2807SJeff Garzik 				/* the scmd has an associated qc */
647c6fd2807SJeff Garzik 				if (!(qc->flags & ATA_QCFLAG_FAILED)) {
648c6fd2807SJeff Garzik 					/* which hasn't failed yet, timeout */
649c6fd2807SJeff Garzik 					qc->err_mask |= AC_ERR_TIMEOUT;
650c6fd2807SJeff Garzik 					qc->flags |= ATA_QCFLAG_FAILED;
651c6fd2807SJeff Garzik 					nr_timedout++;
652c6fd2807SJeff Garzik 				}
653c6fd2807SJeff Garzik 			} else {
654c6fd2807SJeff Garzik 				/* Normal completion occurred after
655c6fd2807SJeff Garzik 				 * SCSI timeout but before this point.
656c6fd2807SJeff Garzik 				 * Successfully complete it.
657c6fd2807SJeff Garzik 				 */
658c6fd2807SJeff Garzik 				scmd->retries = scmd->allowed;
659c6fd2807SJeff Garzik 				scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
660c6fd2807SJeff Garzik 			}
661c6fd2807SJeff Garzik 		}
662c6fd2807SJeff Garzik 
663c6fd2807SJeff Garzik 		/* If we have timed out qcs.  They belong to EH from
664c6fd2807SJeff Garzik 		 * this point but the state of the controller is
665c6fd2807SJeff Garzik 		 * unknown.  Freeze the port to make sure the IRQ
666c6fd2807SJeff Garzik 		 * handler doesn't diddle with those qcs.  This must
667c6fd2807SJeff Garzik 		 * be done atomically w.r.t. setting QCFLAG_FAILED.
668c6fd2807SJeff Garzik 		 */
669c6fd2807SJeff Garzik 		if (nr_timedout)
670c6fd2807SJeff Garzik 			__ata_port_freeze(ap);
671c6fd2807SJeff Garzik 
672c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
673a1e10f7eSTejun Heo 
674a1e10f7eSTejun Heo 		/* initialize eh_tries */
675a1e10f7eSTejun Heo 		ap->eh_tries = ATA_EH_MAX_TRIES;
676c6fd2807SJeff Garzik 	} else
677c6fd2807SJeff Garzik 		spin_unlock_wait(ap->lock);
678c6fd2807SJeff Garzik 
679c96f1732SAlan Cox 	/* If we timed raced normal completion and there is nothing to
680c96f1732SAlan Cox 	   recover nr_timedout == 0 why exactly are we doing error recovery ? */
681c96f1732SAlan Cox 
682c6fd2807SJeff Garzik 	/* invoke error handler */
683c6fd2807SJeff Garzik 	if (ap->ops->error_handler) {
684cf1b86c8STejun Heo 		struct ata_link *link;
685cf1b86c8STejun Heo 
686c0c362b6STejun Heo 		/* acquire EH ownership */
687c0c362b6STejun Heo 		ata_eh_acquire(ap);
688c0c362b6STejun Heo  repeat:
6895ddf24c5STejun Heo 		/* kill fast drain timer */
6905ddf24c5STejun Heo 		del_timer_sync(&ap->fastdrain_timer);
6915ddf24c5STejun Heo 
692c6fd2807SJeff Garzik 		/* process port resume request */
693c6fd2807SJeff Garzik 		ata_eh_handle_port_resume(ap);
694c6fd2807SJeff Garzik 
695c6fd2807SJeff Garzik 		/* fetch & clear EH info */
696c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
697c6fd2807SJeff Garzik 
6981eca4365STejun Heo 		ata_for_each_link(link, ap, HOST_FIRST) {
69900115e0fSTejun Heo 			struct ata_eh_context *ehc = &link->eh_context;
70000115e0fSTejun Heo 			struct ata_device *dev;
70100115e0fSTejun Heo 
702cf1b86c8STejun Heo 			memset(&link->eh_context, 0, sizeof(link->eh_context));
703cf1b86c8STejun Heo 			link->eh_context.i = link->eh_info;
704cf1b86c8STejun Heo 			memset(&link->eh_info, 0, sizeof(link->eh_info));
70500115e0fSTejun Heo 
7061eca4365STejun Heo 			ata_for_each_dev(dev, link, ENABLED) {
70700115e0fSTejun Heo 				int devno = dev->devno;
70800115e0fSTejun Heo 
70900115e0fSTejun Heo 				ehc->saved_xfer_mode[devno] = dev->xfer_mode;
71000115e0fSTejun Heo 				if (ata_ncq_enabled(dev))
71100115e0fSTejun Heo 					ehc->saved_ncq_enabled |= 1 << devno;
71200115e0fSTejun Heo 			}
713cf1b86c8STejun Heo 		}
714c6fd2807SJeff Garzik 
715c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
716c6fd2807SJeff Garzik 		ap->pflags &= ~ATA_PFLAG_EH_PENDING;
717da917d69STejun Heo 		ap->excl_link = NULL;	/* don't maintain exclusion over EH */
718c6fd2807SJeff Garzik 
719c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
720c6fd2807SJeff Garzik 
721c6fd2807SJeff Garzik 		/* invoke EH, skip if unloading or suspended */
722c6fd2807SJeff Garzik 		if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
723c6fd2807SJeff Garzik 			ap->ops->error_handler(ap);
724ece180d1STejun Heo 		else {
725ece180d1STejun Heo 			/* if unloading, commence suicide */
726ece180d1STejun Heo 			if ((ap->pflags & ATA_PFLAG_UNLOADING) &&
727ece180d1STejun Heo 			    !(ap->pflags & ATA_PFLAG_UNLOADED))
728ece180d1STejun Heo 				ata_eh_unload(ap);
729c6fd2807SJeff Garzik 			ata_eh_finish(ap);
730ece180d1STejun Heo 		}
731c6fd2807SJeff Garzik 
732c6fd2807SJeff Garzik 		/* process port suspend request */
733c6fd2807SJeff Garzik 		ata_eh_handle_port_suspend(ap);
734c6fd2807SJeff Garzik 
735c6fd2807SJeff Garzik 		/* Exception might have happend after ->error_handler
736c6fd2807SJeff Garzik 		 * recovered the port but before this point.  Repeat
737c6fd2807SJeff Garzik 		 * EH in such case.
738c6fd2807SJeff Garzik 		 */
739c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
740c6fd2807SJeff Garzik 
741c6fd2807SJeff Garzik 		if (ap->pflags & ATA_PFLAG_EH_PENDING) {
742a1e10f7eSTejun Heo 			if (--ap->eh_tries) {
743c6fd2807SJeff Garzik 				spin_unlock_irqrestore(ap->lock, flags);
744c6fd2807SJeff Garzik 				goto repeat;
745c6fd2807SJeff Garzik 			}
746c6fd2807SJeff Garzik 			ata_port_printk(ap, KERN_ERR, "EH pending after %d "
747a1e10f7eSTejun Heo 					"tries, giving up\n", ATA_EH_MAX_TRIES);
748914616a3STejun Heo 			ap->pflags &= ~ATA_PFLAG_EH_PENDING;
749c6fd2807SJeff Garzik 		}
750c6fd2807SJeff Garzik 
751c6fd2807SJeff Garzik 		/* this run is complete, make sure EH info is clear */
7521eca4365STejun Heo 		ata_for_each_link(link, ap, HOST_FIRST)
753cf1b86c8STejun Heo 			memset(&link->eh_info, 0, sizeof(link->eh_info));
754c6fd2807SJeff Garzik 
755c6fd2807SJeff Garzik 		/* Clear host_eh_scheduled while holding ap->lock such
756c6fd2807SJeff Garzik 		 * that if exception occurs after this point but
757c6fd2807SJeff Garzik 		 * before EH completion, SCSI midlayer will
758c6fd2807SJeff Garzik 		 * re-initiate EH.
759c6fd2807SJeff Garzik 		 */
760c6fd2807SJeff Garzik 		host->host_eh_scheduled = 0;
761c6fd2807SJeff Garzik 
762c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
763c0c362b6STejun Heo 		ata_eh_release(ap);
764c6fd2807SJeff Garzik 	} else {
7659af5c9c9STejun Heo 		WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL);
766c6fd2807SJeff Garzik 		ap->ops->eng_timeout(ap);
767c6fd2807SJeff Garzik 	}
768c6fd2807SJeff Garzik 
769c6fd2807SJeff Garzik 	/* finish or retry handled scmd's and clean up */
770*c34aeebcSJames Bottomley 	WARN_ON(host->host_failed || !list_empty(&eh_work_q));
771c6fd2807SJeff Garzik 
772c6fd2807SJeff Garzik 	scsi_eh_flush_done_q(&ap->eh_done_q);
773c6fd2807SJeff Garzik 
774c6fd2807SJeff Garzik 	/* clean up */
775c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
776c6fd2807SJeff Garzik 
777c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_LOADING)
778c6fd2807SJeff Garzik 		ap->pflags &= ~ATA_PFLAG_LOADING;
779c6fd2807SJeff Garzik 	else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
780ad72cf98STejun Heo 		schedule_delayed_work(&ap->hotplug_task, 0);
781c6fd2807SJeff Garzik 
782c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_RECOVERED)
783c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_INFO, "EH complete\n");
784c6fd2807SJeff Garzik 
785c6fd2807SJeff Garzik 	ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
786c6fd2807SJeff Garzik 
787c6fd2807SJeff Garzik 	/* tell wait_eh that we're done */
788c6fd2807SJeff Garzik 	ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
789c6fd2807SJeff Garzik 	wake_up_all(&ap->eh_wait_q);
790c6fd2807SJeff Garzik 
791c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
792c6fd2807SJeff Garzik 
793c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
794c6fd2807SJeff Garzik }
795c6fd2807SJeff Garzik 
796c6fd2807SJeff Garzik /**
797c6fd2807SJeff Garzik  *	ata_port_wait_eh - Wait for the currently pending EH to complete
798c6fd2807SJeff Garzik  *	@ap: Port to wait EH for
799c6fd2807SJeff Garzik  *
800c6fd2807SJeff Garzik  *	Wait until the currently pending EH is complete.
801c6fd2807SJeff Garzik  *
802c6fd2807SJeff Garzik  *	LOCKING:
803c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
804c6fd2807SJeff Garzik  */
805c6fd2807SJeff Garzik void ata_port_wait_eh(struct ata_port *ap)
806c6fd2807SJeff Garzik {
807c6fd2807SJeff Garzik 	unsigned long flags;
808c6fd2807SJeff Garzik 	DEFINE_WAIT(wait);
809c6fd2807SJeff Garzik 
810c6fd2807SJeff Garzik  retry:
811c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
812c6fd2807SJeff Garzik 
813c6fd2807SJeff Garzik 	while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
814c6fd2807SJeff Garzik 		prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
815c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
816c6fd2807SJeff Garzik 		schedule();
817c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
818c6fd2807SJeff Garzik 	}
819c6fd2807SJeff Garzik 	finish_wait(&ap->eh_wait_q, &wait);
820c6fd2807SJeff Garzik 
821c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
822c6fd2807SJeff Garzik 
823c6fd2807SJeff Garzik 	/* make sure SCSI EH is complete */
824cca3974eSJeff Garzik 	if (scsi_host_in_recovery(ap->scsi_host)) {
82597750cebSTejun Heo 		ata_msleep(ap, 10);
826c6fd2807SJeff Garzik 		goto retry;
827c6fd2807SJeff Garzik 	}
828c6fd2807SJeff Garzik }
829c6fd2807SJeff Garzik 
8305ddf24c5STejun Heo static int ata_eh_nr_in_flight(struct ata_port *ap)
8315ddf24c5STejun Heo {
8325ddf24c5STejun Heo 	unsigned int tag;
8335ddf24c5STejun Heo 	int nr = 0;
8345ddf24c5STejun Heo 
8355ddf24c5STejun Heo 	/* count only non-internal commands */
8365ddf24c5STejun Heo 	for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++)
8375ddf24c5STejun Heo 		if (ata_qc_from_tag(ap, tag))
8385ddf24c5STejun Heo 			nr++;
8395ddf24c5STejun Heo 
8405ddf24c5STejun Heo 	return nr;
8415ddf24c5STejun Heo }
8425ddf24c5STejun Heo 
8435ddf24c5STejun Heo void ata_eh_fastdrain_timerfn(unsigned long arg)
8445ddf24c5STejun Heo {
8455ddf24c5STejun Heo 	struct ata_port *ap = (void *)arg;
8465ddf24c5STejun Heo 	unsigned long flags;
8475ddf24c5STejun Heo 	int cnt;
8485ddf24c5STejun Heo 
8495ddf24c5STejun Heo 	spin_lock_irqsave(ap->lock, flags);
8505ddf24c5STejun Heo 
8515ddf24c5STejun Heo 	cnt = ata_eh_nr_in_flight(ap);
8525ddf24c5STejun Heo 
8535ddf24c5STejun Heo 	/* are we done? */
8545ddf24c5STejun Heo 	if (!cnt)
8555ddf24c5STejun Heo 		goto out_unlock;
8565ddf24c5STejun Heo 
8575ddf24c5STejun Heo 	if (cnt == ap->fastdrain_cnt) {
8585ddf24c5STejun Heo 		unsigned int tag;
8595ddf24c5STejun Heo 
8605ddf24c5STejun Heo 		/* No progress during the last interval, tag all
8615ddf24c5STejun Heo 		 * in-flight qcs as timed out and freeze the port.
8625ddf24c5STejun Heo 		 */
8635ddf24c5STejun Heo 		for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) {
8645ddf24c5STejun Heo 			struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
8655ddf24c5STejun Heo 			if (qc)
8665ddf24c5STejun Heo 				qc->err_mask |= AC_ERR_TIMEOUT;
8675ddf24c5STejun Heo 		}
8685ddf24c5STejun Heo 
8695ddf24c5STejun Heo 		ata_port_freeze(ap);
8705ddf24c5STejun Heo 	} else {
8715ddf24c5STejun Heo 		/* some qcs have finished, give it another chance */
8725ddf24c5STejun Heo 		ap->fastdrain_cnt = cnt;
8735ddf24c5STejun Heo 		ap->fastdrain_timer.expires =
874341c2c95STejun Heo 			ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
8755ddf24c5STejun Heo 		add_timer(&ap->fastdrain_timer);
8765ddf24c5STejun Heo 	}
8775ddf24c5STejun Heo 
8785ddf24c5STejun Heo  out_unlock:
8795ddf24c5STejun Heo 	spin_unlock_irqrestore(ap->lock, flags);
8805ddf24c5STejun Heo }
8815ddf24c5STejun Heo 
8825ddf24c5STejun Heo /**
8835ddf24c5STejun Heo  *	ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
8845ddf24c5STejun Heo  *	@ap: target ATA port
8855ddf24c5STejun Heo  *	@fastdrain: activate fast drain
8865ddf24c5STejun Heo  *
8875ddf24c5STejun Heo  *	Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain
8885ddf24c5STejun Heo  *	is non-zero and EH wasn't pending before.  Fast drain ensures
8895ddf24c5STejun Heo  *	that EH kicks in in timely manner.
8905ddf24c5STejun Heo  *
8915ddf24c5STejun Heo  *	LOCKING:
8925ddf24c5STejun Heo  *	spin_lock_irqsave(host lock)
8935ddf24c5STejun Heo  */
8945ddf24c5STejun Heo static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
8955ddf24c5STejun Heo {
8965ddf24c5STejun Heo 	int cnt;
8975ddf24c5STejun Heo 
8985ddf24c5STejun Heo 	/* already scheduled? */
8995ddf24c5STejun Heo 	if (ap->pflags & ATA_PFLAG_EH_PENDING)
9005ddf24c5STejun Heo 		return;
9015ddf24c5STejun Heo 
9025ddf24c5STejun Heo 	ap->pflags |= ATA_PFLAG_EH_PENDING;
9035ddf24c5STejun Heo 
9045ddf24c5STejun Heo 	if (!fastdrain)
9055ddf24c5STejun Heo 		return;
9065ddf24c5STejun Heo 
9075ddf24c5STejun Heo 	/* do we have in-flight qcs? */
9085ddf24c5STejun Heo 	cnt = ata_eh_nr_in_flight(ap);
9095ddf24c5STejun Heo 	if (!cnt)
9105ddf24c5STejun Heo 		return;
9115ddf24c5STejun Heo 
9125ddf24c5STejun Heo 	/* activate fast drain */
9135ddf24c5STejun Heo 	ap->fastdrain_cnt = cnt;
914341c2c95STejun Heo 	ap->fastdrain_timer.expires =
915341c2c95STejun Heo 		ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
9165ddf24c5STejun Heo 	add_timer(&ap->fastdrain_timer);
9175ddf24c5STejun Heo }
9185ddf24c5STejun Heo 
919c6fd2807SJeff Garzik /**
920c6fd2807SJeff Garzik  *	ata_qc_schedule_eh - schedule qc for error handling
921c6fd2807SJeff Garzik  *	@qc: command to schedule error handling for
922c6fd2807SJeff Garzik  *
923c6fd2807SJeff Garzik  *	Schedule error handling for @qc.  EH will kick in as soon as
924c6fd2807SJeff Garzik  *	other commands are drained.
925c6fd2807SJeff Garzik  *
926c6fd2807SJeff Garzik  *	LOCKING:
927cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
928c6fd2807SJeff Garzik  */
929c6fd2807SJeff Garzik void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
930c6fd2807SJeff Garzik {
931c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
932fa41efdaSTejun Heo 	struct request_queue *q = qc->scsicmd->device->request_queue;
933fa41efdaSTejun Heo 	unsigned long flags;
934c6fd2807SJeff Garzik 
935c6fd2807SJeff Garzik 	WARN_ON(!ap->ops->error_handler);
936c6fd2807SJeff Garzik 
937c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_FAILED;
9385ddf24c5STejun Heo 	ata_eh_set_pending(ap, 1);
939c6fd2807SJeff Garzik 
940c6fd2807SJeff Garzik 	/* The following will fail if timeout has already expired.
941c6fd2807SJeff Garzik 	 * ata_scsi_error() takes care of such scmds on EH entry.
942c6fd2807SJeff Garzik 	 * Note that ATA_QCFLAG_FAILED is unconditionally set after
943c6fd2807SJeff Garzik 	 * this function completes.
944c6fd2807SJeff Garzik 	 */
945fa41efdaSTejun Heo 	spin_lock_irqsave(q->queue_lock, flags);
946242f9dcbSJens Axboe 	blk_abort_request(qc->scsicmd->request);
947fa41efdaSTejun Heo 	spin_unlock_irqrestore(q->queue_lock, flags);
948c6fd2807SJeff Garzik }
949c6fd2807SJeff Garzik 
950c6fd2807SJeff Garzik /**
951c6fd2807SJeff Garzik  *	ata_port_schedule_eh - schedule error handling without a qc
952c6fd2807SJeff Garzik  *	@ap: ATA port to schedule EH for
953c6fd2807SJeff Garzik  *
954c6fd2807SJeff Garzik  *	Schedule error handling for @ap.  EH will kick in as soon as
955c6fd2807SJeff Garzik  *	all commands are drained.
956c6fd2807SJeff Garzik  *
957c6fd2807SJeff Garzik  *	LOCKING:
958cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
959c6fd2807SJeff Garzik  */
960c6fd2807SJeff Garzik void ata_port_schedule_eh(struct ata_port *ap)
961c6fd2807SJeff Garzik {
962c6fd2807SJeff Garzik 	WARN_ON(!ap->ops->error_handler);
963c6fd2807SJeff Garzik 
964f4d6d004STejun Heo 	if (ap->pflags & ATA_PFLAG_INITIALIZING)
965f4d6d004STejun Heo 		return;
966f4d6d004STejun Heo 
9675ddf24c5STejun Heo 	ata_eh_set_pending(ap, 1);
968cca3974eSJeff Garzik 	scsi_schedule_eh(ap->scsi_host);
969c6fd2807SJeff Garzik 
970c6fd2807SJeff Garzik 	DPRINTK("port EH scheduled\n");
971c6fd2807SJeff Garzik }
972c6fd2807SJeff Garzik 
973dbd82616STejun Heo static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
974c6fd2807SJeff Garzik {
975c6fd2807SJeff Garzik 	int tag, nr_aborted = 0;
976c6fd2807SJeff Garzik 
977c6fd2807SJeff Garzik 	WARN_ON(!ap->ops->error_handler);
978c6fd2807SJeff Garzik 
9795ddf24c5STejun Heo 	/* we're gonna abort all commands, no need for fast drain */
9805ddf24c5STejun Heo 	ata_eh_set_pending(ap, 0);
9815ddf24c5STejun Heo 
982c6fd2807SJeff Garzik 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
983c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
984c6fd2807SJeff Garzik 
985dbd82616STejun Heo 		if (qc && (!link || qc->dev->link == link)) {
986c6fd2807SJeff Garzik 			qc->flags |= ATA_QCFLAG_FAILED;
987c6fd2807SJeff Garzik 			ata_qc_complete(qc);
988c6fd2807SJeff Garzik 			nr_aborted++;
989c6fd2807SJeff Garzik 		}
990c6fd2807SJeff Garzik 	}
991c6fd2807SJeff Garzik 
992c6fd2807SJeff Garzik 	if (!nr_aborted)
993c6fd2807SJeff Garzik 		ata_port_schedule_eh(ap);
994c6fd2807SJeff Garzik 
995c6fd2807SJeff Garzik 	return nr_aborted;
996c6fd2807SJeff Garzik }
997c6fd2807SJeff Garzik 
998c6fd2807SJeff Garzik /**
999dbd82616STejun Heo  *	ata_link_abort - abort all qc's on the link
1000dbd82616STejun Heo  *	@link: ATA link to abort qc's for
1001dbd82616STejun Heo  *
1002dbd82616STejun Heo  *	Abort all active qc's active on @link and schedule EH.
1003dbd82616STejun Heo  *
1004dbd82616STejun Heo  *	LOCKING:
1005dbd82616STejun Heo  *	spin_lock_irqsave(host lock)
1006dbd82616STejun Heo  *
1007dbd82616STejun Heo  *	RETURNS:
1008dbd82616STejun Heo  *	Number of aborted qc's.
1009dbd82616STejun Heo  */
1010dbd82616STejun Heo int ata_link_abort(struct ata_link *link)
1011dbd82616STejun Heo {
1012dbd82616STejun Heo 	return ata_do_link_abort(link->ap, link);
1013dbd82616STejun Heo }
1014dbd82616STejun Heo 
1015dbd82616STejun Heo /**
1016dbd82616STejun Heo  *	ata_port_abort - abort all qc's on the port
1017dbd82616STejun Heo  *	@ap: ATA port to abort qc's for
1018dbd82616STejun Heo  *
1019dbd82616STejun Heo  *	Abort all active qc's of @ap and schedule EH.
1020dbd82616STejun Heo  *
1021dbd82616STejun Heo  *	LOCKING:
1022dbd82616STejun Heo  *	spin_lock_irqsave(host_set lock)
1023dbd82616STejun Heo  *
1024dbd82616STejun Heo  *	RETURNS:
1025dbd82616STejun Heo  *	Number of aborted qc's.
1026dbd82616STejun Heo  */
1027dbd82616STejun Heo int ata_port_abort(struct ata_port *ap)
1028dbd82616STejun Heo {
1029dbd82616STejun Heo 	return ata_do_link_abort(ap, NULL);
1030dbd82616STejun Heo }
1031dbd82616STejun Heo 
1032dbd82616STejun Heo /**
1033c6fd2807SJeff Garzik  *	__ata_port_freeze - freeze port
1034c6fd2807SJeff Garzik  *	@ap: ATA port to freeze
1035c6fd2807SJeff Garzik  *
1036c6fd2807SJeff Garzik  *	This function is called when HSM violation or some other
1037c6fd2807SJeff Garzik  *	condition disrupts normal operation of the port.  Frozen port
1038c6fd2807SJeff Garzik  *	is not allowed to perform any operation until the port is
1039c6fd2807SJeff Garzik  *	thawed, which usually follows a successful reset.
1040c6fd2807SJeff Garzik  *
1041c6fd2807SJeff Garzik  *	ap->ops->freeze() callback can be used for freezing the port
1042c6fd2807SJeff Garzik  *	hardware-wise (e.g. mask interrupt and stop DMA engine).  If a
1043c6fd2807SJeff Garzik  *	port cannot be frozen hardware-wise, the interrupt handler
1044c6fd2807SJeff Garzik  *	must ack and clear interrupts unconditionally while the port
1045c6fd2807SJeff Garzik  *	is frozen.
1046c6fd2807SJeff Garzik  *
1047c6fd2807SJeff Garzik  *	LOCKING:
1048cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
1049c6fd2807SJeff Garzik  */
1050c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap)
1051c6fd2807SJeff Garzik {
1052c6fd2807SJeff Garzik 	WARN_ON(!ap->ops->error_handler);
1053c6fd2807SJeff Garzik 
1054c6fd2807SJeff Garzik 	if (ap->ops->freeze)
1055c6fd2807SJeff Garzik 		ap->ops->freeze(ap);
1056c6fd2807SJeff Garzik 
1057c6fd2807SJeff Garzik 	ap->pflags |= ATA_PFLAG_FROZEN;
1058c6fd2807SJeff Garzik 
105944877b4eSTejun Heo 	DPRINTK("ata%u port frozen\n", ap->print_id);
1060c6fd2807SJeff Garzik }
1061c6fd2807SJeff Garzik 
1062c6fd2807SJeff Garzik /**
1063c6fd2807SJeff Garzik  *	ata_port_freeze - abort & freeze port
1064c6fd2807SJeff Garzik  *	@ap: ATA port to freeze
1065c6fd2807SJeff Garzik  *
106654c38444SJeff Garzik  *	Abort and freeze @ap.  The freeze operation must be called
106754c38444SJeff Garzik  *	first, because some hardware requires special operations
106854c38444SJeff Garzik  *	before the taskfile registers are accessible.
1069c6fd2807SJeff Garzik  *
1070c6fd2807SJeff Garzik  *	LOCKING:
1071cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
1072c6fd2807SJeff Garzik  *
1073c6fd2807SJeff Garzik  *	RETURNS:
1074c6fd2807SJeff Garzik  *	Number of aborted commands.
1075c6fd2807SJeff Garzik  */
1076c6fd2807SJeff Garzik int ata_port_freeze(struct ata_port *ap)
1077c6fd2807SJeff Garzik {
1078c6fd2807SJeff Garzik 	int nr_aborted;
1079c6fd2807SJeff Garzik 
1080c6fd2807SJeff Garzik 	WARN_ON(!ap->ops->error_handler);
1081c6fd2807SJeff Garzik 
1082c6fd2807SJeff Garzik 	__ata_port_freeze(ap);
108354c38444SJeff Garzik 	nr_aborted = ata_port_abort(ap);
1084c6fd2807SJeff Garzik 
1085c6fd2807SJeff Garzik 	return nr_aborted;
1086c6fd2807SJeff Garzik }
1087c6fd2807SJeff Garzik 
1088c6fd2807SJeff Garzik /**
10897d77b247STejun Heo  *	sata_async_notification - SATA async notification handler
10907d77b247STejun Heo  *	@ap: ATA port where async notification is received
10917d77b247STejun Heo  *
10927d77b247STejun Heo  *	Handler to be called when async notification via SDB FIS is
10937d77b247STejun Heo  *	received.  This function schedules EH if necessary.
10947d77b247STejun Heo  *
10957d77b247STejun Heo  *	LOCKING:
10967d77b247STejun Heo  *	spin_lock_irqsave(host lock)
10977d77b247STejun Heo  *
10987d77b247STejun Heo  *	RETURNS:
10997d77b247STejun Heo  *	1 if EH is scheduled, 0 otherwise.
11007d77b247STejun Heo  */
11017d77b247STejun Heo int sata_async_notification(struct ata_port *ap)
11027d77b247STejun Heo {
11037d77b247STejun Heo 	u32 sntf;
11047d77b247STejun Heo 	int rc;
11057d77b247STejun Heo 
11067d77b247STejun Heo 	if (!(ap->flags & ATA_FLAG_AN))
11077d77b247STejun Heo 		return 0;
11087d77b247STejun Heo 
11097d77b247STejun Heo 	rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
11107d77b247STejun Heo 	if (rc == 0)
11117d77b247STejun Heo 		sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
11127d77b247STejun Heo 
1113071f44b1STejun Heo 	if (!sata_pmp_attached(ap) || rc) {
11147d77b247STejun Heo 		/* PMP is not attached or SNTF is not available */
1115071f44b1STejun Heo 		if (!sata_pmp_attached(ap)) {
11167d77b247STejun Heo 			/* PMP is not attached.  Check whether ATAPI
11177d77b247STejun Heo 			 * AN is configured.  If so, notify media
11187d77b247STejun Heo 			 * change.
11197d77b247STejun Heo 			 */
11207d77b247STejun Heo 			struct ata_device *dev = ap->link.device;
11217d77b247STejun Heo 
11227d77b247STejun Heo 			if ((dev->class == ATA_DEV_ATAPI) &&
11237d77b247STejun Heo 			    (dev->flags & ATA_DFLAG_AN))
11247d77b247STejun Heo 				ata_scsi_media_change_notify(dev);
11257d77b247STejun Heo 			return 0;
11267d77b247STejun Heo 		} else {
11277d77b247STejun Heo 			/* PMP is attached but SNTF is not available.
11287d77b247STejun Heo 			 * ATAPI async media change notification is
11297d77b247STejun Heo 			 * not used.  The PMP must be reporting PHY
11307d77b247STejun Heo 			 * status change, schedule EH.
11317d77b247STejun Heo 			 */
11327d77b247STejun Heo 			ata_port_schedule_eh(ap);
11337d77b247STejun Heo 			return 1;
11347d77b247STejun Heo 		}
11357d77b247STejun Heo 	} else {
11367d77b247STejun Heo 		/* PMP is attached and SNTF is available */
11377d77b247STejun Heo 		struct ata_link *link;
11387d77b247STejun Heo 
11397d77b247STejun Heo 		/* check and notify ATAPI AN */
11401eca4365STejun Heo 		ata_for_each_link(link, ap, EDGE) {
11417d77b247STejun Heo 			if (!(sntf & (1 << link->pmp)))
11427d77b247STejun Heo 				continue;
11437d77b247STejun Heo 
11447d77b247STejun Heo 			if ((link->device->class == ATA_DEV_ATAPI) &&
11457d77b247STejun Heo 			    (link->device->flags & ATA_DFLAG_AN))
11467d77b247STejun Heo 				ata_scsi_media_change_notify(link->device);
11477d77b247STejun Heo 		}
11487d77b247STejun Heo 
11497d77b247STejun Heo 		/* If PMP is reporting that PHY status of some
11507d77b247STejun Heo 		 * downstream ports has changed, schedule EH.
11517d77b247STejun Heo 		 */
11527d77b247STejun Heo 		if (sntf & (1 << SATA_PMP_CTRL_PORT)) {
11537d77b247STejun Heo 			ata_port_schedule_eh(ap);
11547d77b247STejun Heo 			return 1;
11557d77b247STejun Heo 		}
11567d77b247STejun Heo 
11577d77b247STejun Heo 		return 0;
11587d77b247STejun Heo 	}
11597d77b247STejun Heo }
11607d77b247STejun Heo 
11617d77b247STejun Heo /**
1162c6fd2807SJeff Garzik  *	ata_eh_freeze_port - EH helper to freeze port
1163c6fd2807SJeff Garzik  *	@ap: ATA port to freeze
1164c6fd2807SJeff Garzik  *
1165c6fd2807SJeff Garzik  *	Freeze @ap.
1166c6fd2807SJeff Garzik  *
1167c6fd2807SJeff Garzik  *	LOCKING:
1168c6fd2807SJeff Garzik  *	None.
1169c6fd2807SJeff Garzik  */
1170c6fd2807SJeff Garzik void ata_eh_freeze_port(struct ata_port *ap)
1171c6fd2807SJeff Garzik {
1172c6fd2807SJeff Garzik 	unsigned long flags;
1173c6fd2807SJeff Garzik 
1174c6fd2807SJeff Garzik 	if (!ap->ops->error_handler)
1175c6fd2807SJeff Garzik 		return;
1176c6fd2807SJeff Garzik 
1177c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1178c6fd2807SJeff Garzik 	__ata_port_freeze(ap);
1179c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1180c6fd2807SJeff Garzik }
1181c6fd2807SJeff Garzik 
1182c6fd2807SJeff Garzik /**
1183c6fd2807SJeff Garzik  *	ata_port_thaw_port - EH helper to thaw port
1184c6fd2807SJeff Garzik  *	@ap: ATA port to thaw
1185c6fd2807SJeff Garzik  *
1186c6fd2807SJeff Garzik  *	Thaw frozen port @ap.
1187c6fd2807SJeff Garzik  *
1188c6fd2807SJeff Garzik  *	LOCKING:
1189c6fd2807SJeff Garzik  *	None.
1190c6fd2807SJeff Garzik  */
1191c6fd2807SJeff Garzik void ata_eh_thaw_port(struct ata_port *ap)
1192c6fd2807SJeff Garzik {
1193c6fd2807SJeff Garzik 	unsigned long flags;
1194c6fd2807SJeff Garzik 
1195c6fd2807SJeff Garzik 	if (!ap->ops->error_handler)
1196c6fd2807SJeff Garzik 		return;
1197c6fd2807SJeff Garzik 
1198c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1199c6fd2807SJeff Garzik 
1200c6fd2807SJeff Garzik 	ap->pflags &= ~ATA_PFLAG_FROZEN;
1201c6fd2807SJeff Garzik 
1202c6fd2807SJeff Garzik 	if (ap->ops->thaw)
1203c6fd2807SJeff Garzik 		ap->ops->thaw(ap);
1204c6fd2807SJeff Garzik 
1205c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1206c6fd2807SJeff Garzik 
120744877b4eSTejun Heo 	DPRINTK("ata%u port thawed\n", ap->print_id);
1208c6fd2807SJeff Garzik }
1209c6fd2807SJeff Garzik 
1210c6fd2807SJeff Garzik static void ata_eh_scsidone(struct scsi_cmnd *scmd)
1211c6fd2807SJeff Garzik {
1212c6fd2807SJeff Garzik 	/* nada */
1213c6fd2807SJeff Garzik }
1214c6fd2807SJeff Garzik 
1215c6fd2807SJeff Garzik static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
1216c6fd2807SJeff Garzik {
1217c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
1218c6fd2807SJeff Garzik 	struct scsi_cmnd *scmd = qc->scsicmd;
1219c6fd2807SJeff Garzik 	unsigned long flags;
1220c6fd2807SJeff Garzik 
1221c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1222c6fd2807SJeff Garzik 	qc->scsidone = ata_eh_scsidone;
1223c6fd2807SJeff Garzik 	__ata_qc_complete(qc);
1224c6fd2807SJeff Garzik 	WARN_ON(ata_tag_valid(qc->tag));
1225c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1226c6fd2807SJeff Garzik 
1227c6fd2807SJeff Garzik 	scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
1228c6fd2807SJeff Garzik }
1229c6fd2807SJeff Garzik 
1230c6fd2807SJeff Garzik /**
1231c6fd2807SJeff Garzik  *	ata_eh_qc_complete - Complete an active ATA command from EH
1232c6fd2807SJeff Garzik  *	@qc: Command to complete
1233c6fd2807SJeff Garzik  *
1234c6fd2807SJeff Garzik  *	Indicate to the mid and upper layers that an ATA command has
1235c6fd2807SJeff Garzik  *	completed.  To be used from EH.
1236c6fd2807SJeff Garzik  */
1237c6fd2807SJeff Garzik void ata_eh_qc_complete(struct ata_queued_cmd *qc)
1238c6fd2807SJeff Garzik {
1239c6fd2807SJeff Garzik 	struct scsi_cmnd *scmd = qc->scsicmd;
1240c6fd2807SJeff Garzik 	scmd->retries = scmd->allowed;
1241c6fd2807SJeff Garzik 	__ata_eh_qc_complete(qc);
1242c6fd2807SJeff Garzik }
1243c6fd2807SJeff Garzik 
1244c6fd2807SJeff Garzik /**
1245c6fd2807SJeff Garzik  *	ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
1246c6fd2807SJeff Garzik  *	@qc: Command to retry
1247c6fd2807SJeff Garzik  *
1248c6fd2807SJeff Garzik  *	Indicate to the mid and upper layers that an ATA command
1249c6fd2807SJeff Garzik  *	should be retried.  To be used from EH.
1250c6fd2807SJeff Garzik  *
1251c6fd2807SJeff Garzik  *	SCSI midlayer limits the number of retries to scmd->allowed.
1252c6fd2807SJeff Garzik  *	scmd->retries is decremented for commands which get retried
1253c6fd2807SJeff Garzik  *	due to unrelated failures (qc->err_mask is zero).
1254c6fd2807SJeff Garzik  */
1255c6fd2807SJeff Garzik void ata_eh_qc_retry(struct ata_queued_cmd *qc)
1256c6fd2807SJeff Garzik {
1257c6fd2807SJeff Garzik 	struct scsi_cmnd *scmd = qc->scsicmd;
1258c6fd2807SJeff Garzik 	if (!qc->err_mask && scmd->retries)
1259c6fd2807SJeff Garzik 		scmd->retries--;
1260c6fd2807SJeff Garzik 	__ata_eh_qc_complete(qc);
1261c6fd2807SJeff Garzik }
1262c6fd2807SJeff Garzik 
1263c6fd2807SJeff Garzik /**
1264678afac6STejun Heo  *	ata_dev_disable - disable ATA device
1265678afac6STejun Heo  *	@dev: ATA device to disable
1266678afac6STejun Heo  *
1267678afac6STejun Heo  *	Disable @dev.
1268678afac6STejun Heo  *
1269678afac6STejun Heo  *	Locking:
1270678afac6STejun Heo  *	EH context.
1271678afac6STejun Heo  */
1272678afac6STejun Heo void ata_dev_disable(struct ata_device *dev)
1273678afac6STejun Heo {
1274678afac6STejun Heo 	if (!ata_dev_enabled(dev))
1275678afac6STejun Heo 		return;
1276678afac6STejun Heo 
1277678afac6STejun Heo 	if (ata_msg_drv(dev->link->ap))
1278678afac6STejun Heo 		ata_dev_printk(dev, KERN_WARNING, "disabled\n");
1279678afac6STejun Heo 	ata_acpi_on_disable(dev);
1280678afac6STejun Heo 	ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET);
1281678afac6STejun Heo 	dev->class++;
128299cf610aSTejun Heo 
128399cf610aSTejun Heo 	/* From now till the next successful probe, ering is used to
128499cf610aSTejun Heo 	 * track probe failures.  Clear accumulated device error info.
128599cf610aSTejun Heo 	 */
128699cf610aSTejun Heo 	ata_ering_clear(&dev->ering);
1287678afac6STejun Heo }
1288678afac6STejun Heo 
1289678afac6STejun Heo /**
1290c6fd2807SJeff Garzik  *	ata_eh_detach_dev - detach ATA device
1291c6fd2807SJeff Garzik  *	@dev: ATA device to detach
1292c6fd2807SJeff Garzik  *
1293c6fd2807SJeff Garzik  *	Detach @dev.
1294c6fd2807SJeff Garzik  *
1295c6fd2807SJeff Garzik  *	LOCKING:
1296c6fd2807SJeff Garzik  *	None.
1297c6fd2807SJeff Garzik  */
1298fb7fd614STejun Heo void ata_eh_detach_dev(struct ata_device *dev)
1299c6fd2807SJeff Garzik {
1300f58229f8STejun Heo 	struct ata_link *link = dev->link;
1301f58229f8STejun Heo 	struct ata_port *ap = link->ap;
130290484ebfSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
1303c6fd2807SJeff Garzik 	unsigned long flags;
1304c6fd2807SJeff Garzik 
1305c6fd2807SJeff Garzik 	ata_dev_disable(dev);
1306c6fd2807SJeff Garzik 
1307c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1308c6fd2807SJeff Garzik 
1309c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_DETACH;
1310c6fd2807SJeff Garzik 
1311c6fd2807SJeff Garzik 	if (ata_scsi_offline_dev(dev)) {
1312c6fd2807SJeff Garzik 		dev->flags |= ATA_DFLAG_DETACHED;
1313c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
1314c6fd2807SJeff Garzik 	}
1315c6fd2807SJeff Garzik 
131690484ebfSTejun Heo 	/* clear per-dev EH info */
1317f58229f8STejun Heo 	ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK);
1318f58229f8STejun Heo 	ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK);
131990484ebfSTejun Heo 	ehc->saved_xfer_mode[dev->devno] = 0;
132090484ebfSTejun Heo 	ehc->saved_ncq_enabled &= ~(1 << dev->devno);
1321c6fd2807SJeff Garzik 
1322c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1323c6fd2807SJeff Garzik }
1324c6fd2807SJeff Garzik 
1325c6fd2807SJeff Garzik /**
1326c6fd2807SJeff Garzik  *	ata_eh_about_to_do - about to perform eh_action
1327955e57dfSTejun Heo  *	@link: target ATA link
1328c6fd2807SJeff Garzik  *	@dev: target ATA dev for per-dev action (can be NULL)
1329c6fd2807SJeff Garzik  *	@action: action about to be performed
1330c6fd2807SJeff Garzik  *
1331c6fd2807SJeff Garzik  *	Called just before performing EH actions to clear related bits
1332955e57dfSTejun Heo  *	in @link->eh_info such that eh actions are not unnecessarily
1333955e57dfSTejun Heo  *	repeated.
1334c6fd2807SJeff Garzik  *
1335c6fd2807SJeff Garzik  *	LOCKING:
1336c6fd2807SJeff Garzik  *	None.
1337c6fd2807SJeff Garzik  */
1338fb7fd614STejun Heo void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
1339c6fd2807SJeff Garzik 			unsigned int action)
1340c6fd2807SJeff Garzik {
1341955e57dfSTejun Heo 	struct ata_port *ap = link->ap;
1342955e57dfSTejun Heo 	struct ata_eh_info *ehi = &link->eh_info;
1343955e57dfSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
1344c6fd2807SJeff Garzik 	unsigned long flags;
1345c6fd2807SJeff Garzik 
1346c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1347c6fd2807SJeff Garzik 
1348955e57dfSTejun Heo 	ata_eh_clear_action(link, dev, ehi, action);
1349c6fd2807SJeff Garzik 
1350a568d1d2STejun Heo 	/* About to take EH action, set RECOVERED.  Ignore actions on
1351a568d1d2STejun Heo 	 * slave links as master will do them again.
1352a568d1d2STejun Heo 	 */
1353a568d1d2STejun Heo 	if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link)
1354c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_RECOVERED;
1355c6fd2807SJeff Garzik 
1356c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1357c6fd2807SJeff Garzik }
1358c6fd2807SJeff Garzik 
1359c6fd2807SJeff Garzik /**
1360c6fd2807SJeff Garzik  *	ata_eh_done - EH action complete
1361c6fd2807SJeff Garzik *	@ap: target ATA port
1362c6fd2807SJeff Garzik  *	@dev: target ATA dev for per-dev action (can be NULL)
1363c6fd2807SJeff Garzik  *	@action: action just completed
1364c6fd2807SJeff Garzik  *
1365c6fd2807SJeff Garzik  *	Called right after performing EH actions to clear related bits
1366955e57dfSTejun Heo  *	in @link->eh_context.
1367c6fd2807SJeff Garzik  *
1368c6fd2807SJeff Garzik  *	LOCKING:
1369c6fd2807SJeff Garzik  *	None.
1370c6fd2807SJeff Garzik  */
1371fb7fd614STejun Heo void ata_eh_done(struct ata_link *link, struct ata_device *dev,
1372c6fd2807SJeff Garzik 		 unsigned int action)
1373c6fd2807SJeff Garzik {
1374955e57dfSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
13759af5c9c9STejun Heo 
1376955e57dfSTejun Heo 	ata_eh_clear_action(link, dev, &ehc->i, action);
1377c6fd2807SJeff Garzik }
1378c6fd2807SJeff Garzik 
1379c6fd2807SJeff Garzik /**
1380c6fd2807SJeff Garzik  *	ata_err_string - convert err_mask to descriptive string
1381c6fd2807SJeff Garzik  *	@err_mask: error mask to convert to string
1382c6fd2807SJeff Garzik  *
1383c6fd2807SJeff Garzik  *	Convert @err_mask to descriptive string.  Errors are
1384c6fd2807SJeff Garzik  *	prioritized according to severity and only the most severe
1385c6fd2807SJeff Garzik  *	error is reported.
1386c6fd2807SJeff Garzik  *
1387c6fd2807SJeff Garzik  *	LOCKING:
1388c6fd2807SJeff Garzik  *	None.
1389c6fd2807SJeff Garzik  *
1390c6fd2807SJeff Garzik  *	RETURNS:
1391c6fd2807SJeff Garzik  *	Descriptive string for @err_mask
1392c6fd2807SJeff Garzik  */
1393c6fd2807SJeff Garzik static const char *ata_err_string(unsigned int err_mask)
1394c6fd2807SJeff Garzik {
1395c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_HOST_BUS)
1396c6fd2807SJeff Garzik 		return "host bus error";
1397c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_ATA_BUS)
1398c6fd2807SJeff Garzik 		return "ATA bus error";
1399c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_TIMEOUT)
1400c6fd2807SJeff Garzik 		return "timeout";
1401c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_HSM)
1402c6fd2807SJeff Garzik 		return "HSM violation";
1403c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_SYSTEM)
1404c6fd2807SJeff Garzik 		return "internal error";
1405c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_MEDIA)
1406c6fd2807SJeff Garzik 		return "media error";
1407c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_INVALID)
1408c6fd2807SJeff Garzik 		return "invalid argument";
1409c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_DEV)
1410c6fd2807SJeff Garzik 		return "device error";
1411c6fd2807SJeff Garzik 	return "unknown error";
1412c6fd2807SJeff Garzik }
1413c6fd2807SJeff Garzik 
1414c6fd2807SJeff Garzik /**
1415c6fd2807SJeff Garzik  *	ata_read_log_page - read a specific log page
1416c6fd2807SJeff Garzik  *	@dev: target device
1417c6fd2807SJeff Garzik  *	@page: page to read
1418c6fd2807SJeff Garzik  *	@buf: buffer to store read page
1419c6fd2807SJeff Garzik  *	@sectors: number of sectors to read
1420c6fd2807SJeff Garzik  *
1421c6fd2807SJeff Garzik  *	Read log page using READ_LOG_EXT command.
1422c6fd2807SJeff Garzik  *
1423c6fd2807SJeff Garzik  *	LOCKING:
1424c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1425c6fd2807SJeff Garzik  *
1426c6fd2807SJeff Garzik  *	RETURNS:
1427c6fd2807SJeff Garzik  *	0 on success, AC_ERR_* mask otherwise.
1428c6fd2807SJeff Garzik  */
1429c6fd2807SJeff Garzik static unsigned int ata_read_log_page(struct ata_device *dev,
1430c6fd2807SJeff Garzik 				      u8 page, void *buf, unsigned int sectors)
1431c6fd2807SJeff Garzik {
1432c6fd2807SJeff Garzik 	struct ata_taskfile tf;
1433c6fd2807SJeff Garzik 	unsigned int err_mask;
1434c6fd2807SJeff Garzik 
1435c6fd2807SJeff Garzik 	DPRINTK("read log page - page %d\n", page);
1436c6fd2807SJeff Garzik 
1437c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
1438c6fd2807SJeff Garzik 	tf.command = ATA_CMD_READ_LOG_EXT;
1439c6fd2807SJeff Garzik 	tf.lbal = page;
1440c6fd2807SJeff Garzik 	tf.nsect = sectors;
1441c6fd2807SJeff Garzik 	tf.hob_nsect = sectors >> 8;
1442c6fd2807SJeff Garzik 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
1443c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_PIO;
1444c6fd2807SJeff Garzik 
1445c6fd2807SJeff Garzik 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
14462b789108STejun Heo 				     buf, sectors * ATA_SECT_SIZE, 0);
1447c6fd2807SJeff Garzik 
1448c6fd2807SJeff Garzik 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
1449c6fd2807SJeff Garzik 	return err_mask;
1450c6fd2807SJeff Garzik }
1451c6fd2807SJeff Garzik 
1452c6fd2807SJeff Garzik /**
1453c6fd2807SJeff Garzik  *	ata_eh_read_log_10h - Read log page 10h for NCQ error details
1454c6fd2807SJeff Garzik  *	@dev: Device to read log page 10h from
1455c6fd2807SJeff Garzik  *	@tag: Resulting tag of the failed command
1456c6fd2807SJeff Garzik  *	@tf: Resulting taskfile registers of the failed command
1457c6fd2807SJeff Garzik  *
1458c6fd2807SJeff Garzik  *	Read log page 10h to obtain NCQ error details and clear error
1459c6fd2807SJeff Garzik  *	condition.
1460c6fd2807SJeff Garzik  *
1461c6fd2807SJeff Garzik  *	LOCKING:
1462c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1463c6fd2807SJeff Garzik  *
1464c6fd2807SJeff Garzik  *	RETURNS:
1465c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
1466c6fd2807SJeff Garzik  */
1467c6fd2807SJeff Garzik static int ata_eh_read_log_10h(struct ata_device *dev,
1468c6fd2807SJeff Garzik 			       int *tag, struct ata_taskfile *tf)
1469c6fd2807SJeff Garzik {
14709af5c9c9STejun Heo 	u8 *buf = dev->link->ap->sector_buf;
1471c6fd2807SJeff Garzik 	unsigned int err_mask;
1472c6fd2807SJeff Garzik 	u8 csum;
1473c6fd2807SJeff Garzik 	int i;
1474c6fd2807SJeff Garzik 
1475c6fd2807SJeff Garzik 	err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1);
1476c6fd2807SJeff Garzik 	if (err_mask)
1477c6fd2807SJeff Garzik 		return -EIO;
1478c6fd2807SJeff Garzik 
1479c6fd2807SJeff Garzik 	csum = 0;
1480c6fd2807SJeff Garzik 	for (i = 0; i < ATA_SECT_SIZE; i++)
1481c6fd2807SJeff Garzik 		csum += buf[i];
1482c6fd2807SJeff Garzik 	if (csum)
1483c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_WARNING,
1484c6fd2807SJeff Garzik 			       "invalid checksum 0x%x on log page 10h\n", csum);
1485c6fd2807SJeff Garzik 
1486c6fd2807SJeff Garzik 	if (buf[0] & 0x80)
1487c6fd2807SJeff Garzik 		return -ENOENT;
1488c6fd2807SJeff Garzik 
1489c6fd2807SJeff Garzik 	*tag = buf[0] & 0x1f;
1490c6fd2807SJeff Garzik 
1491c6fd2807SJeff Garzik 	tf->command = buf[2];
1492c6fd2807SJeff Garzik 	tf->feature = buf[3];
1493c6fd2807SJeff Garzik 	tf->lbal = buf[4];
1494c6fd2807SJeff Garzik 	tf->lbam = buf[5];
1495c6fd2807SJeff Garzik 	tf->lbah = buf[6];
1496c6fd2807SJeff Garzik 	tf->device = buf[7];
1497c6fd2807SJeff Garzik 	tf->hob_lbal = buf[8];
1498c6fd2807SJeff Garzik 	tf->hob_lbam = buf[9];
1499c6fd2807SJeff Garzik 	tf->hob_lbah = buf[10];
1500c6fd2807SJeff Garzik 	tf->nsect = buf[12];
1501c6fd2807SJeff Garzik 	tf->hob_nsect = buf[13];
1502c6fd2807SJeff Garzik 
1503c6fd2807SJeff Garzik 	return 0;
1504c6fd2807SJeff Garzik }
1505c6fd2807SJeff Garzik 
1506c6fd2807SJeff Garzik /**
150711fc33daSTejun Heo  *	atapi_eh_tur - perform ATAPI TEST_UNIT_READY
150811fc33daSTejun Heo  *	@dev: target ATAPI device
150911fc33daSTejun Heo  *	@r_sense_key: out parameter for sense_key
151011fc33daSTejun Heo  *
151111fc33daSTejun Heo  *	Perform ATAPI TEST_UNIT_READY.
151211fc33daSTejun Heo  *
151311fc33daSTejun Heo  *	LOCKING:
151411fc33daSTejun Heo  *	EH context (may sleep).
151511fc33daSTejun Heo  *
151611fc33daSTejun Heo  *	RETURNS:
151711fc33daSTejun Heo  *	0 on success, AC_ERR_* mask on failure.
151811fc33daSTejun Heo  */
151911fc33daSTejun Heo static unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
152011fc33daSTejun Heo {
152111fc33daSTejun Heo 	u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 };
152211fc33daSTejun Heo 	struct ata_taskfile tf;
152311fc33daSTejun Heo 	unsigned int err_mask;
152411fc33daSTejun Heo 
152511fc33daSTejun Heo 	ata_tf_init(dev, &tf);
152611fc33daSTejun Heo 
152711fc33daSTejun Heo 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
152811fc33daSTejun Heo 	tf.command = ATA_CMD_PACKET;
152911fc33daSTejun Heo 	tf.protocol = ATAPI_PROT_NODATA;
153011fc33daSTejun Heo 
153111fc33daSTejun Heo 	err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
153211fc33daSTejun Heo 	if (err_mask == AC_ERR_DEV)
153311fc33daSTejun Heo 		*r_sense_key = tf.feature >> 4;
153411fc33daSTejun Heo 	return err_mask;
153511fc33daSTejun Heo }
153611fc33daSTejun Heo 
153711fc33daSTejun Heo /**
1538c6fd2807SJeff Garzik  *	atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1539c6fd2807SJeff Garzik  *	@dev: device to perform REQUEST_SENSE to
1540c6fd2807SJeff Garzik  *	@sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
15413eabddb8STejun Heo  *	@dfl_sense_key: default sense key to use
1542c6fd2807SJeff Garzik  *
1543c6fd2807SJeff Garzik  *	Perform ATAPI REQUEST_SENSE after the device reported CHECK
1544c6fd2807SJeff Garzik  *	SENSE.  This function is EH helper.
1545c6fd2807SJeff Garzik  *
1546c6fd2807SJeff Garzik  *	LOCKING:
1547c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1548c6fd2807SJeff Garzik  *
1549c6fd2807SJeff Garzik  *	RETURNS:
1550c6fd2807SJeff Garzik  *	0 on success, AC_ERR_* mask on failure
1551c6fd2807SJeff Garzik  */
15523eabddb8STejun Heo static unsigned int atapi_eh_request_sense(struct ata_device *dev,
15533eabddb8STejun Heo 					   u8 *sense_buf, u8 dfl_sense_key)
1554c6fd2807SJeff Garzik {
15553eabddb8STejun Heo 	u8 cdb[ATAPI_CDB_LEN] =
15563eabddb8STejun Heo 		{ REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 };
15579af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
1558c6fd2807SJeff Garzik 	struct ata_taskfile tf;
1559c6fd2807SJeff Garzik 
1560c6fd2807SJeff Garzik 	DPRINTK("ATAPI request sense\n");
1561c6fd2807SJeff Garzik 
1562c6fd2807SJeff Garzik 	/* FIXME: is this needed? */
1563c6fd2807SJeff Garzik 	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
1564c6fd2807SJeff Garzik 
156556287768SAlbert Lee 	/* initialize sense_buf with the error register,
156656287768SAlbert Lee 	 * for the case where they are -not- overwritten
156756287768SAlbert Lee 	 */
1568c6fd2807SJeff Garzik 	sense_buf[0] = 0x70;
15693eabddb8STejun Heo 	sense_buf[2] = dfl_sense_key;
157056287768SAlbert Lee 
157156287768SAlbert Lee 	/* some devices time out if garbage left in tf */
157256287768SAlbert Lee 	ata_tf_init(dev, &tf);
1573c6fd2807SJeff Garzik 
1574c6fd2807SJeff Garzik 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1575c6fd2807SJeff Garzik 	tf.command = ATA_CMD_PACKET;
1576c6fd2807SJeff Garzik 
1577c6fd2807SJeff Garzik 	/* is it pointless to prefer PIO for "safety reasons"? */
1578c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_PIO_DMA) {
15790dc36888STejun Heo 		tf.protocol = ATAPI_PROT_DMA;
1580c6fd2807SJeff Garzik 		tf.feature |= ATAPI_PKT_DMA;
1581c6fd2807SJeff Garzik 	} else {
15820dc36888STejun Heo 		tf.protocol = ATAPI_PROT_PIO;
1583f2dfc1a1STejun Heo 		tf.lbam = SCSI_SENSE_BUFFERSIZE;
1584f2dfc1a1STejun Heo 		tf.lbah = 0;
1585c6fd2807SJeff Garzik 	}
1586c6fd2807SJeff Garzik 
1587c6fd2807SJeff Garzik 	return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
15882b789108STejun Heo 				 sense_buf, SCSI_SENSE_BUFFERSIZE, 0);
1589c6fd2807SJeff Garzik }
1590c6fd2807SJeff Garzik 
1591c6fd2807SJeff Garzik /**
1592c6fd2807SJeff Garzik  *	ata_eh_analyze_serror - analyze SError for a failed port
15930260731fSTejun Heo  *	@link: ATA link to analyze SError for
1594c6fd2807SJeff Garzik  *
1595c6fd2807SJeff Garzik  *	Analyze SError if available and further determine cause of
1596c6fd2807SJeff Garzik  *	failure.
1597c6fd2807SJeff Garzik  *
1598c6fd2807SJeff Garzik  *	LOCKING:
1599c6fd2807SJeff Garzik  *	None.
1600c6fd2807SJeff Garzik  */
16010260731fSTejun Heo static void ata_eh_analyze_serror(struct ata_link *link)
1602c6fd2807SJeff Garzik {
16030260731fSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
1604c6fd2807SJeff Garzik 	u32 serror = ehc->i.serror;
1605c6fd2807SJeff Garzik 	unsigned int err_mask = 0, action = 0;
1606f9df58cbSTejun Heo 	u32 hotplug_mask;
1607c6fd2807SJeff Garzik 
1608e0614db2STejun Heo 	if (serror & (SERR_PERSISTENT | SERR_DATA)) {
1609c6fd2807SJeff Garzik 		err_mask |= AC_ERR_ATA_BUS;
1610cf480626STejun Heo 		action |= ATA_EH_RESET;
1611c6fd2807SJeff Garzik 	}
1612c6fd2807SJeff Garzik 	if (serror & SERR_PROTOCOL) {
1613c6fd2807SJeff Garzik 		err_mask |= AC_ERR_HSM;
1614cf480626STejun Heo 		action |= ATA_EH_RESET;
1615c6fd2807SJeff Garzik 	}
1616c6fd2807SJeff Garzik 	if (serror & SERR_INTERNAL) {
1617c6fd2807SJeff Garzik 		err_mask |= AC_ERR_SYSTEM;
1618cf480626STejun Heo 		action |= ATA_EH_RESET;
1619c6fd2807SJeff Garzik 	}
1620f9df58cbSTejun Heo 
1621f9df58cbSTejun Heo 	/* Determine whether a hotplug event has occurred.  Both
1622f9df58cbSTejun Heo 	 * SError.N/X are considered hotplug events for enabled or
1623f9df58cbSTejun Heo 	 * host links.  For disabled PMP links, only N bit is
1624f9df58cbSTejun Heo 	 * considered as X bit is left at 1 for link plugging.
1625f9df58cbSTejun Heo 	 */
1626eb0e85e3STejun Heo 	if (link->lpm_policy > ATA_LPM_MAX_POWER)
16276b7ae954STejun Heo 		hotplug_mask = 0;	/* hotplug doesn't work w/ LPM */
16286b7ae954STejun Heo 	else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
1629f9df58cbSTejun Heo 		hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG;
1630f9df58cbSTejun Heo 	else
1631f9df58cbSTejun Heo 		hotplug_mask = SERR_PHYRDY_CHG;
1632f9df58cbSTejun Heo 
1633f9df58cbSTejun Heo 	if (serror & hotplug_mask)
1634c6fd2807SJeff Garzik 		ata_ehi_hotplugged(&ehc->i);
1635c6fd2807SJeff Garzik 
1636c6fd2807SJeff Garzik 	ehc->i.err_mask |= err_mask;
1637c6fd2807SJeff Garzik 	ehc->i.action |= action;
1638c6fd2807SJeff Garzik }
1639c6fd2807SJeff Garzik 
1640c6fd2807SJeff Garzik /**
1641c6fd2807SJeff Garzik  *	ata_eh_analyze_ncq_error - analyze NCQ error
16420260731fSTejun Heo  *	@link: ATA link to analyze NCQ error for
1643c6fd2807SJeff Garzik  *
1644c6fd2807SJeff Garzik  *	Read log page 10h, determine the offending qc and acquire
1645c6fd2807SJeff Garzik  *	error status TF.  For NCQ device errors, all LLDDs have to do
1646c6fd2807SJeff Garzik  *	is setting AC_ERR_DEV in ehi->err_mask.  This function takes
1647c6fd2807SJeff Garzik  *	care of the rest.
1648c6fd2807SJeff Garzik  *
1649c6fd2807SJeff Garzik  *	LOCKING:
1650c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1651c6fd2807SJeff Garzik  */
165210acf3b0SMark Lord void ata_eh_analyze_ncq_error(struct ata_link *link)
1653c6fd2807SJeff Garzik {
16540260731fSTejun Heo 	struct ata_port *ap = link->ap;
16550260731fSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
16560260731fSTejun Heo 	struct ata_device *dev = link->device;
1657c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc;
1658c6fd2807SJeff Garzik 	struct ata_taskfile tf;
1659c6fd2807SJeff Garzik 	int tag, rc;
1660c6fd2807SJeff Garzik 
1661c6fd2807SJeff Garzik 	/* if frozen, we can't do much */
1662c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_FROZEN)
1663c6fd2807SJeff Garzik 		return;
1664c6fd2807SJeff Garzik 
1665c6fd2807SJeff Garzik 	/* is it NCQ device error? */
16660260731fSTejun Heo 	if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
1667c6fd2807SJeff Garzik 		return;
1668c6fd2807SJeff Garzik 
1669c6fd2807SJeff Garzik 	/* has LLDD analyzed already? */
1670c6fd2807SJeff Garzik 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1671c6fd2807SJeff Garzik 		qc = __ata_qc_from_tag(ap, tag);
1672c6fd2807SJeff Garzik 
1673c6fd2807SJeff Garzik 		if (!(qc->flags & ATA_QCFLAG_FAILED))
1674c6fd2807SJeff Garzik 			continue;
1675c6fd2807SJeff Garzik 
1676c6fd2807SJeff Garzik 		if (qc->err_mask)
1677c6fd2807SJeff Garzik 			return;
1678c6fd2807SJeff Garzik 	}
1679c6fd2807SJeff Garzik 
1680c6fd2807SJeff Garzik 	/* okay, this error is ours */
1681a09bf4cdSJeff Garzik 	memset(&tf, 0, sizeof(tf));
1682c6fd2807SJeff Garzik 	rc = ata_eh_read_log_10h(dev, &tag, &tf);
1683c6fd2807SJeff Garzik 	if (rc) {
16840260731fSTejun Heo 		ata_link_printk(link, KERN_ERR, "failed to read log page 10h "
1685c6fd2807SJeff Garzik 				"(errno=%d)\n", rc);
1686c6fd2807SJeff Garzik 		return;
1687c6fd2807SJeff Garzik 	}
1688c6fd2807SJeff Garzik 
16890260731fSTejun Heo 	if (!(link->sactive & (1 << tag))) {
16900260731fSTejun Heo 		ata_link_printk(link, KERN_ERR, "log page 10h reported "
1691c6fd2807SJeff Garzik 				"inactive tag %d\n", tag);
1692c6fd2807SJeff Garzik 		return;
1693c6fd2807SJeff Garzik 	}
1694c6fd2807SJeff Garzik 
1695c6fd2807SJeff Garzik 	/* we've got the perpetrator, condemn it */
1696c6fd2807SJeff Garzik 	qc = __ata_qc_from_tag(ap, tag);
1697c6fd2807SJeff Garzik 	memcpy(&qc->result_tf, &tf, sizeof(tf));
1698a6116c9eSMark Lord 	qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
16995335b729STejun Heo 	qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
1700c6fd2807SJeff Garzik 	ehc->i.err_mask &= ~AC_ERR_DEV;
1701c6fd2807SJeff Garzik }
1702c6fd2807SJeff Garzik 
1703c6fd2807SJeff Garzik /**
1704c6fd2807SJeff Garzik  *	ata_eh_analyze_tf - analyze taskfile of a failed qc
1705c6fd2807SJeff Garzik  *	@qc: qc to analyze
1706c6fd2807SJeff Garzik  *	@tf: Taskfile registers to analyze
1707c6fd2807SJeff Garzik  *
1708c6fd2807SJeff Garzik  *	Analyze taskfile of @qc and further determine cause of
1709c6fd2807SJeff Garzik  *	failure.  This function also requests ATAPI sense data if
1710c6fd2807SJeff Garzik  *	avaliable.
1711c6fd2807SJeff Garzik  *
1712c6fd2807SJeff Garzik  *	LOCKING:
1713c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1714c6fd2807SJeff Garzik  *
1715c6fd2807SJeff Garzik  *	RETURNS:
1716c6fd2807SJeff Garzik  *	Determined recovery action
1717c6fd2807SJeff Garzik  */
1718c6fd2807SJeff Garzik static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1719c6fd2807SJeff Garzik 				      const struct ata_taskfile *tf)
1720c6fd2807SJeff Garzik {
1721c6fd2807SJeff Garzik 	unsigned int tmp, action = 0;
1722c6fd2807SJeff Garzik 	u8 stat = tf->command, err = tf->feature;
1723c6fd2807SJeff Garzik 
1724c6fd2807SJeff Garzik 	if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1725c6fd2807SJeff Garzik 		qc->err_mask |= AC_ERR_HSM;
1726cf480626STejun Heo 		return ATA_EH_RESET;
1727c6fd2807SJeff Garzik 	}
1728c6fd2807SJeff Garzik 
1729a51d644aSTejun Heo 	if (stat & (ATA_ERR | ATA_DF))
1730a51d644aSTejun Heo 		qc->err_mask |= AC_ERR_DEV;
1731a51d644aSTejun Heo 	else
1732c6fd2807SJeff Garzik 		return 0;
1733c6fd2807SJeff Garzik 
1734c6fd2807SJeff Garzik 	switch (qc->dev->class) {
1735c6fd2807SJeff Garzik 	case ATA_DEV_ATA:
1736c6fd2807SJeff Garzik 		if (err & ATA_ICRC)
1737c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_ATA_BUS;
1738c6fd2807SJeff Garzik 		if (err & ATA_UNC)
1739c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_MEDIA;
1740c6fd2807SJeff Garzik 		if (err & ATA_IDNF)
1741c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_INVALID;
1742c6fd2807SJeff Garzik 		break;
1743c6fd2807SJeff Garzik 
1744c6fd2807SJeff Garzik 	case ATA_DEV_ATAPI:
1745a569a30dSTejun Heo 		if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
17463eabddb8STejun Heo 			tmp = atapi_eh_request_sense(qc->dev,
17473eabddb8STejun Heo 						qc->scsicmd->sense_buffer,
17483eabddb8STejun Heo 						qc->result_tf.feature >> 4);
1749c6fd2807SJeff Garzik 			if (!tmp) {
1750a569a30dSTejun Heo 				/* ATA_QCFLAG_SENSE_VALID is used to
1751a569a30dSTejun Heo 				 * tell atapi_qc_complete() that sense
1752a569a30dSTejun Heo 				 * data is already valid.
1753c6fd2807SJeff Garzik 				 *
1754c6fd2807SJeff Garzik 				 * TODO: interpret sense data and set
1755c6fd2807SJeff Garzik 				 * appropriate err_mask.
1756c6fd2807SJeff Garzik 				 */
1757c6fd2807SJeff Garzik 				qc->flags |= ATA_QCFLAG_SENSE_VALID;
1758c6fd2807SJeff Garzik 			} else
1759c6fd2807SJeff Garzik 				qc->err_mask |= tmp;
1760c6fd2807SJeff Garzik 		}
1761a569a30dSTejun Heo 	}
1762c6fd2807SJeff Garzik 
1763c6fd2807SJeff Garzik 	if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
1764cf480626STejun Heo 		action |= ATA_EH_RESET;
1765c6fd2807SJeff Garzik 
1766c6fd2807SJeff Garzik 	return action;
1767c6fd2807SJeff Garzik }
1768c6fd2807SJeff Garzik 
176976326ac1STejun Heo static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask,
177076326ac1STejun Heo 				   int *xfer_ok)
1771c6fd2807SJeff Garzik {
177276326ac1STejun Heo 	int base = 0;
177376326ac1STejun Heo 
177476326ac1STejun Heo 	if (!(eflags & ATA_EFLAG_DUBIOUS_XFER))
177576326ac1STejun Heo 		*xfer_ok = 1;
177676326ac1STejun Heo 
177776326ac1STejun Heo 	if (!*xfer_ok)
177875f9cafcSTejun Heo 		base = ATA_ECAT_DUBIOUS_NONE;
177976326ac1STejun Heo 
17807d47e8d4STejun Heo 	if (err_mask & AC_ERR_ATA_BUS)
178176326ac1STejun Heo 		return base + ATA_ECAT_ATA_BUS;
1782c6fd2807SJeff Garzik 
17837d47e8d4STejun Heo 	if (err_mask & AC_ERR_TIMEOUT)
178476326ac1STejun Heo 		return base + ATA_ECAT_TOUT_HSM;
17857d47e8d4STejun Heo 
17863884f7b0STejun Heo 	if (eflags & ATA_EFLAG_IS_IO) {
17877d47e8d4STejun Heo 		if (err_mask & AC_ERR_HSM)
178876326ac1STejun Heo 			return base + ATA_ECAT_TOUT_HSM;
17897d47e8d4STejun Heo 		if ((err_mask &
17907d47e8d4STejun Heo 		     (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
179176326ac1STejun Heo 			return base + ATA_ECAT_UNK_DEV;
1792c6fd2807SJeff Garzik 	}
1793c6fd2807SJeff Garzik 
1794c6fd2807SJeff Garzik 	return 0;
1795c6fd2807SJeff Garzik }
1796c6fd2807SJeff Garzik 
17977d47e8d4STejun Heo struct speed_down_verdict_arg {
1798c6fd2807SJeff Garzik 	u64 since;
179976326ac1STejun Heo 	int xfer_ok;
18003884f7b0STejun Heo 	int nr_errors[ATA_ECAT_NR];
1801c6fd2807SJeff Garzik };
1802c6fd2807SJeff Garzik 
18037d47e8d4STejun Heo static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
1804c6fd2807SJeff Garzik {
18057d47e8d4STejun Heo 	struct speed_down_verdict_arg *arg = void_arg;
180676326ac1STejun Heo 	int cat;
1807c6fd2807SJeff Garzik 
1808d9027470SGwendal Grignou 	if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since))
1809c6fd2807SJeff Garzik 		return -1;
1810c6fd2807SJeff Garzik 
181176326ac1STejun Heo 	cat = ata_eh_categorize_error(ent->eflags, ent->err_mask,
181276326ac1STejun Heo 				      &arg->xfer_ok);
18137d47e8d4STejun Heo 	arg->nr_errors[cat]++;
181476326ac1STejun Heo 
1815c6fd2807SJeff Garzik 	return 0;
1816c6fd2807SJeff Garzik }
1817c6fd2807SJeff Garzik 
1818c6fd2807SJeff Garzik /**
18197d47e8d4STejun Heo  *	ata_eh_speed_down_verdict - Determine speed down verdict
1820c6fd2807SJeff Garzik  *	@dev: Device of interest
1821c6fd2807SJeff Garzik  *
1822c6fd2807SJeff Garzik  *	This function examines error ring of @dev and determines
18237d47e8d4STejun Heo  *	whether NCQ needs to be turned off, transfer speed should be
18247d47e8d4STejun Heo  *	stepped down, or falling back to PIO is necessary.
1825c6fd2807SJeff Garzik  *
18263884f7b0STejun Heo  *	ECAT_ATA_BUS	: ATA_BUS error for any command
1827c6fd2807SJeff Garzik  *
18283884f7b0STejun Heo  *	ECAT_TOUT_HSM	: TIMEOUT for any command or HSM violation for
18293884f7b0STejun Heo  *			  IO commands
18307d47e8d4STejun Heo  *
18313884f7b0STejun Heo  *	ECAT_UNK_DEV	: Unknown DEV error for IO commands
1832c6fd2807SJeff Garzik  *
183376326ac1STejun Heo  *	ECAT_DUBIOUS_*	: Identical to above three but occurred while
183476326ac1STejun Heo  *			  data transfer hasn't been verified.
183576326ac1STejun Heo  *
18363884f7b0STejun Heo  *	Verdicts are
18377d47e8d4STejun Heo  *
18383884f7b0STejun Heo  *	NCQ_OFF		: Turn off NCQ.
18397d47e8d4STejun Heo  *
18403884f7b0STejun Heo  *	SPEED_DOWN	: Speed down transfer speed but don't fall back
18413884f7b0STejun Heo  *			  to PIO.
18423884f7b0STejun Heo  *
18433884f7b0STejun Heo  *	FALLBACK_TO_PIO	: Fall back to PIO.
18443884f7b0STejun Heo  *
18453884f7b0STejun Heo  *	Even if multiple verdicts are returned, only one action is
184676326ac1STejun Heo  *	taken per error.  An action triggered by non-DUBIOUS errors
184776326ac1STejun Heo  *	clears ering, while one triggered by DUBIOUS_* errors doesn't.
184876326ac1STejun Heo  *	This is to expedite speed down decisions right after device is
184976326ac1STejun Heo  *	initially configured.
18503884f7b0STejun Heo  *
185176326ac1STejun Heo  *	The followings are speed down rules.  #1 and #2 deal with
185276326ac1STejun Heo  *	DUBIOUS errors.
185376326ac1STejun Heo  *
185476326ac1STejun Heo  *	1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
185576326ac1STejun Heo  *	   occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO.
185676326ac1STejun Heo  *
185776326ac1STejun Heo  *	2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
185876326ac1STejun Heo  *	   occurred during last 5 mins, NCQ_OFF.
185976326ac1STejun Heo  *
186076326ac1STejun Heo  *	3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
18613884f7b0STejun Heo  *	   ocurred during last 5 mins, FALLBACK_TO_PIO
18623884f7b0STejun Heo  *
186376326ac1STejun Heo  *	4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
18643884f7b0STejun Heo  *	   during last 10 mins, NCQ_OFF.
18653884f7b0STejun Heo  *
186676326ac1STejun Heo  *	5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
18673884f7b0STejun Heo  *	   UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
18687d47e8d4STejun Heo  *
1869c6fd2807SJeff Garzik  *	LOCKING:
1870c6fd2807SJeff Garzik  *	Inherited from caller.
1871c6fd2807SJeff Garzik  *
1872c6fd2807SJeff Garzik  *	RETURNS:
18737d47e8d4STejun Heo  *	OR of ATA_EH_SPDN_* flags.
1874c6fd2807SJeff Garzik  */
18757d47e8d4STejun Heo static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
1876c6fd2807SJeff Garzik {
18777d47e8d4STejun Heo 	const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ;
18787d47e8d4STejun Heo 	u64 j64 = get_jiffies_64();
18797d47e8d4STejun Heo 	struct speed_down_verdict_arg arg;
18807d47e8d4STejun Heo 	unsigned int verdict = 0;
1881c6fd2807SJeff Garzik 
18823884f7b0STejun Heo 	/* scan past 5 mins of error history */
18833884f7b0STejun Heo 	memset(&arg, 0, sizeof(arg));
18843884f7b0STejun Heo 	arg.since = j64 - min(j64, j5mins);
18853884f7b0STejun Heo 	ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
18863884f7b0STejun Heo 
188776326ac1STejun Heo 	if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] +
188876326ac1STejun Heo 	    arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1)
188976326ac1STejun Heo 		verdict |= ATA_EH_SPDN_SPEED_DOWN |
189076326ac1STejun Heo 			ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS;
189176326ac1STejun Heo 
189276326ac1STejun Heo 	if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] +
189376326ac1STejun Heo 	    arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1)
189476326ac1STejun Heo 		verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS;
189576326ac1STejun Heo 
18963884f7b0STejun Heo 	if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
18973884f7b0STejun Heo 	    arg.nr_errors[ATA_ECAT_TOUT_HSM] +
1898663f99b8STejun Heo 	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
18993884f7b0STejun Heo 		verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
19003884f7b0STejun Heo 
19017d47e8d4STejun Heo 	/* scan past 10 mins of error history */
1902c6fd2807SJeff Garzik 	memset(&arg, 0, sizeof(arg));
19037d47e8d4STejun Heo 	arg.since = j64 - min(j64, j10mins);
19047d47e8d4STejun Heo 	ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
1905c6fd2807SJeff Garzik 
19063884f7b0STejun Heo 	if (arg.nr_errors[ATA_ECAT_TOUT_HSM] +
19073884f7b0STejun Heo 	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 3)
19087d47e8d4STejun Heo 		verdict |= ATA_EH_SPDN_NCQ_OFF;
19093884f7b0STejun Heo 
19103884f7b0STejun Heo 	if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
19113884f7b0STejun Heo 	    arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 ||
1912663f99b8STejun Heo 	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
19137d47e8d4STejun Heo 		verdict |= ATA_EH_SPDN_SPEED_DOWN;
1914c6fd2807SJeff Garzik 
19157d47e8d4STejun Heo 	return verdict;
1916c6fd2807SJeff Garzik }
1917c6fd2807SJeff Garzik 
1918c6fd2807SJeff Garzik /**
1919c6fd2807SJeff Garzik  *	ata_eh_speed_down - record error and speed down if necessary
1920c6fd2807SJeff Garzik  *	@dev: Failed device
19213884f7b0STejun Heo  *	@eflags: mask of ATA_EFLAG_* flags
1922c6fd2807SJeff Garzik  *	@err_mask: err_mask of the error
1923c6fd2807SJeff Garzik  *
1924c6fd2807SJeff Garzik  *	Record error and examine error history to determine whether
1925c6fd2807SJeff Garzik  *	adjusting transmission speed is necessary.  It also sets
1926c6fd2807SJeff Garzik  *	transmission limits appropriately if such adjustment is
1927c6fd2807SJeff Garzik  *	necessary.
1928c6fd2807SJeff Garzik  *
1929c6fd2807SJeff Garzik  *	LOCKING:
1930c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1931c6fd2807SJeff Garzik  *
1932c6fd2807SJeff Garzik  *	RETURNS:
19337d47e8d4STejun Heo  *	Determined recovery action.
1934c6fd2807SJeff Garzik  */
19353884f7b0STejun Heo static unsigned int ata_eh_speed_down(struct ata_device *dev,
19363884f7b0STejun Heo 				unsigned int eflags, unsigned int err_mask)
1937c6fd2807SJeff Garzik {
1938b1c72916STejun Heo 	struct ata_link *link = ata_dev_phys_link(dev);
193976326ac1STejun Heo 	int xfer_ok = 0;
19407d47e8d4STejun Heo 	unsigned int verdict;
19417d47e8d4STejun Heo 	unsigned int action = 0;
19427d47e8d4STejun Heo 
19437d47e8d4STejun Heo 	/* don't bother if Cat-0 error */
194476326ac1STejun Heo 	if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0)
1945c6fd2807SJeff Garzik 		return 0;
1946c6fd2807SJeff Garzik 
1947c6fd2807SJeff Garzik 	/* record error and determine whether speed down is necessary */
19483884f7b0STejun Heo 	ata_ering_record(&dev->ering, eflags, err_mask);
19497d47e8d4STejun Heo 	verdict = ata_eh_speed_down_verdict(dev);
1950c6fd2807SJeff Garzik 
19517d47e8d4STejun Heo 	/* turn off NCQ? */
19527d47e8d4STejun Heo 	if ((verdict & ATA_EH_SPDN_NCQ_OFF) &&
19537d47e8d4STejun Heo 	    (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ |
19547d47e8d4STejun Heo 			   ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) {
19557d47e8d4STejun Heo 		dev->flags |= ATA_DFLAG_NCQ_OFF;
19567d47e8d4STejun Heo 		ata_dev_printk(dev, KERN_WARNING,
19577d47e8d4STejun Heo 			       "NCQ disabled due to excessive errors\n");
19587d47e8d4STejun Heo 		goto done;
19597d47e8d4STejun Heo 	}
1960c6fd2807SJeff Garzik 
19617d47e8d4STejun Heo 	/* speed down? */
19627d47e8d4STejun Heo 	if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
1963c6fd2807SJeff Garzik 		/* speed down SATA link speed if possible */
1964a07d499bSTejun Heo 		if (sata_down_spd_limit(link, 0) == 0) {
1965cf480626STejun Heo 			action |= ATA_EH_RESET;
19667d47e8d4STejun Heo 			goto done;
19677d47e8d4STejun Heo 		}
1968c6fd2807SJeff Garzik 
1969c6fd2807SJeff Garzik 		/* lower transfer mode */
19707d47e8d4STejun Heo 		if (dev->spdn_cnt < 2) {
19717d47e8d4STejun Heo 			static const int dma_dnxfer_sel[] =
19727d47e8d4STejun Heo 				{ ATA_DNXFER_DMA, ATA_DNXFER_40C };
19737d47e8d4STejun Heo 			static const int pio_dnxfer_sel[] =
19747d47e8d4STejun Heo 				{ ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 };
19757d47e8d4STejun Heo 			int sel;
1976c6fd2807SJeff Garzik 
19777d47e8d4STejun Heo 			if (dev->xfer_shift != ATA_SHIFT_PIO)
19787d47e8d4STejun Heo 				sel = dma_dnxfer_sel[dev->spdn_cnt];
19797d47e8d4STejun Heo 			else
19807d47e8d4STejun Heo 				sel = pio_dnxfer_sel[dev->spdn_cnt];
19817d47e8d4STejun Heo 
19827d47e8d4STejun Heo 			dev->spdn_cnt++;
19837d47e8d4STejun Heo 
19847d47e8d4STejun Heo 			if (ata_down_xfermask_limit(dev, sel) == 0) {
1985cf480626STejun Heo 				action |= ATA_EH_RESET;
19867d47e8d4STejun Heo 				goto done;
19877d47e8d4STejun Heo 			}
19887d47e8d4STejun Heo 		}
19897d47e8d4STejun Heo 	}
19907d47e8d4STejun Heo 
19917d47e8d4STejun Heo 	/* Fall back to PIO?  Slowing down to PIO is meaningless for
1992663f99b8STejun Heo 	 * SATA ATA devices.  Consider it only for PATA and SATAPI.
19937d47e8d4STejun Heo 	 */
19947d47e8d4STejun Heo 	if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
1995663f99b8STejun Heo 	    (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) &&
19967d47e8d4STejun Heo 	    (dev->xfer_shift != ATA_SHIFT_PIO)) {
19977d47e8d4STejun Heo 		if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
19987d47e8d4STejun Heo 			dev->spdn_cnt = 0;
1999cf480626STejun Heo 			action |= ATA_EH_RESET;
20007d47e8d4STejun Heo 			goto done;
20017d47e8d4STejun Heo 		}
20027d47e8d4STejun Heo 	}
20037d47e8d4STejun Heo 
2004c6fd2807SJeff Garzik 	return 0;
20057d47e8d4STejun Heo  done:
20067d47e8d4STejun Heo 	/* device has been slowed down, blow error history */
200776326ac1STejun Heo 	if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS))
20087d47e8d4STejun Heo 		ata_ering_clear(&dev->ering);
20097d47e8d4STejun Heo 	return action;
2010c6fd2807SJeff Garzik }
2011c6fd2807SJeff Garzik 
2012c6fd2807SJeff Garzik /**
20139b1e2658STejun Heo  *	ata_eh_link_autopsy - analyze error and determine recovery action
20149b1e2658STejun Heo  *	@link: host link to perform autopsy on
2015c6fd2807SJeff Garzik  *
20160260731fSTejun Heo  *	Analyze why @link failed and determine which recovery actions
20170260731fSTejun Heo  *	are needed.  This function also sets more detailed AC_ERR_*
20180260731fSTejun Heo  *	values and fills sense data for ATAPI CHECK SENSE.
2019c6fd2807SJeff Garzik  *
2020c6fd2807SJeff Garzik  *	LOCKING:
2021c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
2022c6fd2807SJeff Garzik  */
20239b1e2658STejun Heo static void ata_eh_link_autopsy(struct ata_link *link)
2024c6fd2807SJeff Garzik {
20250260731fSTejun Heo 	struct ata_port *ap = link->ap;
2026936fd732STejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
2027dfcc173dSTejun Heo 	struct ata_device *dev;
20283884f7b0STejun Heo 	unsigned int all_err_mask = 0, eflags = 0;
20293884f7b0STejun Heo 	int tag;
2030c6fd2807SJeff Garzik 	u32 serror;
2031c6fd2807SJeff Garzik 	int rc;
2032c6fd2807SJeff Garzik 
2033c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
2034c6fd2807SJeff Garzik 
2035c6fd2807SJeff Garzik 	if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
2036c6fd2807SJeff Garzik 		return;
2037c6fd2807SJeff Garzik 
2038c6fd2807SJeff Garzik 	/* obtain and analyze SError */
2039936fd732STejun Heo 	rc = sata_scr_read(link, SCR_ERROR, &serror);
2040c6fd2807SJeff Garzik 	if (rc == 0) {
2041c6fd2807SJeff Garzik 		ehc->i.serror |= serror;
20420260731fSTejun Heo 		ata_eh_analyze_serror(link);
20434e57c517STejun Heo 	} else if (rc != -EOPNOTSUPP) {
2044cf480626STejun Heo 		/* SError read failed, force reset and probing */
2045b558edddSTejun Heo 		ehc->i.probe_mask |= ATA_ALL_DEVICES;
2046cf480626STejun Heo 		ehc->i.action |= ATA_EH_RESET;
20474e57c517STejun Heo 		ehc->i.err_mask |= AC_ERR_OTHER;
20484e57c517STejun Heo 	}
2049c6fd2807SJeff Garzik 
2050c6fd2807SJeff Garzik 	/* analyze NCQ failure */
20510260731fSTejun Heo 	ata_eh_analyze_ncq_error(link);
2052c6fd2807SJeff Garzik 
2053c6fd2807SJeff Garzik 	/* any real error trumps AC_ERR_OTHER */
2054c6fd2807SJeff Garzik 	if (ehc->i.err_mask & ~AC_ERR_OTHER)
2055c6fd2807SJeff Garzik 		ehc->i.err_mask &= ~AC_ERR_OTHER;
2056c6fd2807SJeff Garzik 
2057c6fd2807SJeff Garzik 	all_err_mask |= ehc->i.err_mask;
2058c6fd2807SJeff Garzik 
2059c6fd2807SJeff Garzik 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2060c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2061c6fd2807SJeff Garzik 
2062b1c72916STejun Heo 		if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2063b1c72916STejun Heo 		    ata_dev_phys_link(qc->dev) != link)
2064c6fd2807SJeff Garzik 			continue;
2065c6fd2807SJeff Garzik 
2066c6fd2807SJeff Garzik 		/* inherit upper level err_mask */
2067c6fd2807SJeff Garzik 		qc->err_mask |= ehc->i.err_mask;
2068c6fd2807SJeff Garzik 
2069c6fd2807SJeff Garzik 		/* analyze TF */
2070c6fd2807SJeff Garzik 		ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
2071c6fd2807SJeff Garzik 
2072c6fd2807SJeff Garzik 		/* DEV errors are probably spurious in case of ATA_BUS error */
2073c6fd2807SJeff Garzik 		if (qc->err_mask & AC_ERR_ATA_BUS)
2074c6fd2807SJeff Garzik 			qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
2075c6fd2807SJeff Garzik 					  AC_ERR_INVALID);
2076c6fd2807SJeff Garzik 
2077c6fd2807SJeff Garzik 		/* any real error trumps unknown error */
2078c6fd2807SJeff Garzik 		if (qc->err_mask & ~AC_ERR_OTHER)
2079c6fd2807SJeff Garzik 			qc->err_mask &= ~AC_ERR_OTHER;
2080c6fd2807SJeff Garzik 
2081c6fd2807SJeff Garzik 		/* SENSE_VALID trumps dev/unknown error and revalidation */
2082f90f0828STejun Heo 		if (qc->flags & ATA_QCFLAG_SENSE_VALID)
2083c6fd2807SJeff Garzik 			qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
2084c6fd2807SJeff Garzik 
208503faab78STejun Heo 		/* determine whether the command is worth retrying */
2086534ead70STejun Heo 		if (qc->flags & ATA_QCFLAG_IO ||
2087534ead70STejun Heo 		    (!(qc->err_mask & AC_ERR_INVALID) &&
2088534ead70STejun Heo 		     qc->err_mask != AC_ERR_DEV))
208903faab78STejun Heo 			qc->flags |= ATA_QCFLAG_RETRY;
209003faab78STejun Heo 
2091c6fd2807SJeff Garzik 		/* accumulate error info */
2092c6fd2807SJeff Garzik 		ehc->i.dev = qc->dev;
2093c6fd2807SJeff Garzik 		all_err_mask |= qc->err_mask;
2094c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_IO)
20953884f7b0STejun Heo 			eflags |= ATA_EFLAG_IS_IO;
2096c6fd2807SJeff Garzik 	}
2097c6fd2807SJeff Garzik 
2098c6fd2807SJeff Garzik 	/* enforce default EH actions */
2099c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_FROZEN ||
2100c6fd2807SJeff Garzik 	    all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
2101cf480626STejun Heo 		ehc->i.action |= ATA_EH_RESET;
21023884f7b0STejun Heo 	else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) ||
21033884f7b0STejun Heo 		 (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV)))
2104c6fd2807SJeff Garzik 		ehc->i.action |= ATA_EH_REVALIDATE;
2105c6fd2807SJeff Garzik 
2106dfcc173dSTejun Heo 	/* If we have offending qcs and the associated failed device,
2107dfcc173dSTejun Heo 	 * perform per-dev EH action only on the offending device.
2108dfcc173dSTejun Heo 	 */
2109c6fd2807SJeff Garzik 	if (ehc->i.dev) {
2110c6fd2807SJeff Garzik 		ehc->i.dev_action[ehc->i.dev->devno] |=
2111c6fd2807SJeff Garzik 			ehc->i.action & ATA_EH_PERDEV_MASK;
2112c6fd2807SJeff Garzik 		ehc->i.action &= ~ATA_EH_PERDEV_MASK;
2113c6fd2807SJeff Garzik 	}
2114c6fd2807SJeff Garzik 
21152695e366STejun Heo 	/* propagate timeout to host link */
21162695e366STejun Heo 	if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link))
21172695e366STejun Heo 		ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT;
21182695e366STejun Heo 
21192695e366STejun Heo 	/* record error and consider speeding down */
2120dfcc173dSTejun Heo 	dev = ehc->i.dev;
21212695e366STejun Heo 	if (!dev && ((ata_link_max_devices(link) == 1 &&
21222695e366STejun Heo 		      ata_dev_enabled(link->device))))
2123dfcc173dSTejun Heo 	    dev = link->device;
2124dfcc173dSTejun Heo 
212576326ac1STejun Heo 	if (dev) {
212676326ac1STejun Heo 		if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
212776326ac1STejun Heo 			eflags |= ATA_EFLAG_DUBIOUS_XFER;
21283884f7b0STejun Heo 		ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
212976326ac1STejun Heo 	}
2130dfcc173dSTejun Heo 
2131c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
2132c6fd2807SJeff Garzik }
2133c6fd2807SJeff Garzik 
2134c6fd2807SJeff Garzik /**
21359b1e2658STejun Heo  *	ata_eh_autopsy - analyze error and determine recovery action
21369b1e2658STejun Heo  *	@ap: host port to perform autopsy on
21379b1e2658STejun Heo  *
21389b1e2658STejun Heo  *	Analyze all links of @ap and determine why they failed and
21399b1e2658STejun Heo  *	which recovery actions are needed.
21409b1e2658STejun Heo  *
21419b1e2658STejun Heo  *	LOCKING:
21429b1e2658STejun Heo  *	Kernel thread context (may sleep).
21439b1e2658STejun Heo  */
2144fb7fd614STejun Heo void ata_eh_autopsy(struct ata_port *ap)
21459b1e2658STejun Heo {
21469b1e2658STejun Heo 	struct ata_link *link;
21479b1e2658STejun Heo 
21481eca4365STejun Heo 	ata_for_each_link(link, ap, EDGE)
21499b1e2658STejun Heo 		ata_eh_link_autopsy(link);
21502695e366STejun Heo 
2151b1c72916STejun Heo 	/* Handle the frigging slave link.  Autopsy is done similarly
2152b1c72916STejun Heo 	 * but actions and flags are transferred over to the master
2153b1c72916STejun Heo 	 * link and handled from there.
2154b1c72916STejun Heo 	 */
2155b1c72916STejun Heo 	if (ap->slave_link) {
2156b1c72916STejun Heo 		struct ata_eh_context *mehc = &ap->link.eh_context;
2157b1c72916STejun Heo 		struct ata_eh_context *sehc = &ap->slave_link->eh_context;
2158b1c72916STejun Heo 
2159848e4c68STejun Heo 		/* transfer control flags from master to slave */
2160848e4c68STejun Heo 		sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK;
2161848e4c68STejun Heo 
2162848e4c68STejun Heo 		/* perform autopsy on the slave link */
2163b1c72916STejun Heo 		ata_eh_link_autopsy(ap->slave_link);
2164b1c72916STejun Heo 
2165848e4c68STejun Heo 		/* transfer actions from slave to master and clear slave */
2166b1c72916STejun Heo 		ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2167b1c72916STejun Heo 		mehc->i.action		|= sehc->i.action;
2168b1c72916STejun Heo 		mehc->i.dev_action[1]	|= sehc->i.dev_action[1];
2169b1c72916STejun Heo 		mehc->i.flags		|= sehc->i.flags;
2170b1c72916STejun Heo 		ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2171b1c72916STejun Heo 	}
2172b1c72916STejun Heo 
21732695e366STejun Heo 	/* Autopsy of fanout ports can affect host link autopsy.
21742695e366STejun Heo 	 * Perform host link autopsy last.
21752695e366STejun Heo 	 */
2176071f44b1STejun Heo 	if (sata_pmp_attached(ap))
21772695e366STejun Heo 		ata_eh_link_autopsy(&ap->link);
21789b1e2658STejun Heo }
21799b1e2658STejun Heo 
21809b1e2658STejun Heo /**
21816521148cSRobert Hancock  *	ata_get_cmd_descript - get description for ATA command
21826521148cSRobert Hancock  *	@command: ATA command code to get description for
21836521148cSRobert Hancock  *
21846521148cSRobert Hancock  *	Return a textual description of the given command, or NULL if the
21856521148cSRobert Hancock  *	command is not known.
21866521148cSRobert Hancock  *
21876521148cSRobert Hancock  *	LOCKING:
21886521148cSRobert Hancock  *	None
21896521148cSRobert Hancock  */
21906521148cSRobert Hancock const char *ata_get_cmd_descript(u8 command)
21916521148cSRobert Hancock {
21926521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR
21936521148cSRobert Hancock 	static const struct
21946521148cSRobert Hancock 	{
21956521148cSRobert Hancock 		u8 command;
21966521148cSRobert Hancock 		const char *text;
21976521148cSRobert Hancock 	} cmd_descr[] = {
21986521148cSRobert Hancock 		{ ATA_CMD_DEV_RESET,		"DEVICE RESET" },
21996521148cSRobert Hancock 		{ ATA_CMD_CHK_POWER, 		"CHECK POWER MODE" },
22006521148cSRobert Hancock 		{ ATA_CMD_STANDBY, 		"STANDBY" },
22016521148cSRobert Hancock 		{ ATA_CMD_IDLE, 		"IDLE" },
22026521148cSRobert Hancock 		{ ATA_CMD_EDD, 			"EXECUTE DEVICE DIAGNOSTIC" },
22036521148cSRobert Hancock 		{ ATA_CMD_DOWNLOAD_MICRO,   	"DOWNLOAD MICROCODE" },
22046521148cSRobert Hancock 		{ ATA_CMD_NOP,			"NOP" },
22056521148cSRobert Hancock 		{ ATA_CMD_FLUSH, 		"FLUSH CACHE" },
22066521148cSRobert Hancock 		{ ATA_CMD_FLUSH_EXT, 		"FLUSH CACHE EXT" },
22076521148cSRobert Hancock 		{ ATA_CMD_ID_ATA,  		"IDENTIFY DEVICE" },
22086521148cSRobert Hancock 		{ ATA_CMD_ID_ATAPI, 		"IDENTIFY PACKET DEVICE" },
22096521148cSRobert Hancock 		{ ATA_CMD_SERVICE, 		"SERVICE" },
22106521148cSRobert Hancock 		{ ATA_CMD_READ, 		"READ DMA" },
22116521148cSRobert Hancock 		{ ATA_CMD_READ_EXT, 		"READ DMA EXT" },
22126521148cSRobert Hancock 		{ ATA_CMD_READ_QUEUED, 		"READ DMA QUEUED" },
22136521148cSRobert Hancock 		{ ATA_CMD_READ_STREAM_EXT, 	"READ STREAM EXT" },
22146521148cSRobert Hancock 		{ ATA_CMD_READ_STREAM_DMA_EXT,  "READ STREAM DMA EXT" },
22156521148cSRobert Hancock 		{ ATA_CMD_WRITE, 		"WRITE DMA" },
22166521148cSRobert Hancock 		{ ATA_CMD_WRITE_EXT, 		"WRITE DMA EXT" },
22176521148cSRobert Hancock 		{ ATA_CMD_WRITE_QUEUED, 	"WRITE DMA QUEUED EXT" },
22186521148cSRobert Hancock 		{ ATA_CMD_WRITE_STREAM_EXT, 	"WRITE STREAM EXT" },
22196521148cSRobert Hancock 		{ ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" },
22206521148cSRobert Hancock 		{ ATA_CMD_WRITE_FUA_EXT,	"WRITE DMA FUA EXT" },
22216521148cSRobert Hancock 		{ ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
22226521148cSRobert Hancock 		{ ATA_CMD_FPDMA_READ,		"READ FPDMA QUEUED" },
22236521148cSRobert Hancock 		{ ATA_CMD_FPDMA_WRITE,		"WRITE FPDMA QUEUED" },
22246521148cSRobert Hancock 		{ ATA_CMD_PIO_READ,		"READ SECTOR(S)" },
22256521148cSRobert Hancock 		{ ATA_CMD_PIO_READ_EXT,		"READ SECTOR(S) EXT" },
22266521148cSRobert Hancock 		{ ATA_CMD_PIO_WRITE,		"WRITE SECTOR(S)" },
22276521148cSRobert Hancock 		{ ATA_CMD_PIO_WRITE_EXT,	"WRITE SECTOR(S) EXT" },
22286521148cSRobert Hancock 		{ ATA_CMD_READ_MULTI,		"READ MULTIPLE" },
22296521148cSRobert Hancock 		{ ATA_CMD_READ_MULTI_EXT,	"READ MULTIPLE EXT" },
22306521148cSRobert Hancock 		{ ATA_CMD_WRITE_MULTI,		"WRITE MULTIPLE" },
22316521148cSRobert Hancock 		{ ATA_CMD_WRITE_MULTI_EXT,	"WRITE MULTIPLE EXT" },
22326521148cSRobert Hancock 		{ ATA_CMD_WRITE_MULTI_FUA_EXT, 	"WRITE MULTIPLE FUA EXT" },
22336521148cSRobert Hancock 		{ ATA_CMD_SET_FEATURES,		"SET FEATURES" },
22346521148cSRobert Hancock 		{ ATA_CMD_SET_MULTI,		"SET MULTIPLE MODE" },
22356521148cSRobert Hancock 		{ ATA_CMD_VERIFY,		"READ VERIFY SECTOR(S)" },
22366521148cSRobert Hancock 		{ ATA_CMD_VERIFY_EXT,		"READ VERIFY SECTOR(S) EXT" },
22376521148cSRobert Hancock 		{ ATA_CMD_WRITE_UNCORR_EXT,	"WRITE UNCORRECTABLE EXT" },
22386521148cSRobert Hancock 		{ ATA_CMD_STANDBYNOW1,		"STANDBY IMMEDIATE" },
22396521148cSRobert Hancock 		{ ATA_CMD_IDLEIMMEDIATE,	"IDLE IMMEDIATE" },
22406521148cSRobert Hancock 		{ ATA_CMD_SLEEP,		"SLEEP" },
22416521148cSRobert Hancock 		{ ATA_CMD_INIT_DEV_PARAMS,	"INITIALIZE DEVICE PARAMETERS" },
22426521148cSRobert Hancock 		{ ATA_CMD_READ_NATIVE_MAX,	"READ NATIVE MAX ADDRESS" },
22436521148cSRobert Hancock 		{ ATA_CMD_READ_NATIVE_MAX_EXT,	"READ NATIVE MAX ADDRESS EXT" },
22446521148cSRobert Hancock 		{ ATA_CMD_SET_MAX,		"SET MAX ADDRESS" },
22456521148cSRobert Hancock 		{ ATA_CMD_SET_MAX_EXT,		"SET MAX ADDRESS EXT" },
22466521148cSRobert Hancock 		{ ATA_CMD_READ_LOG_EXT,		"READ LOG EXT" },
22476521148cSRobert Hancock 		{ ATA_CMD_WRITE_LOG_EXT,	"WRITE LOG EXT" },
22486521148cSRobert Hancock 		{ ATA_CMD_READ_LOG_DMA_EXT,	"READ LOG DMA EXT" },
22496521148cSRobert Hancock 		{ ATA_CMD_WRITE_LOG_DMA_EXT, 	"WRITE LOG DMA EXT" },
22506521148cSRobert Hancock 		{ ATA_CMD_TRUSTED_RCV,		"TRUSTED RECEIVE" },
22516521148cSRobert Hancock 		{ ATA_CMD_TRUSTED_RCV_DMA, 	"TRUSTED RECEIVE DMA" },
22526521148cSRobert Hancock 		{ ATA_CMD_TRUSTED_SND,		"TRUSTED SEND" },
22536521148cSRobert Hancock 		{ ATA_CMD_TRUSTED_SND_DMA, 	"TRUSTED SEND DMA" },
22546521148cSRobert Hancock 		{ ATA_CMD_PMP_READ,		"READ BUFFER" },
22556521148cSRobert Hancock 		{ ATA_CMD_PMP_WRITE,		"WRITE BUFFER" },
22566521148cSRobert Hancock 		{ ATA_CMD_CONF_OVERLAY,		"DEVICE CONFIGURATION OVERLAY" },
22576521148cSRobert Hancock 		{ ATA_CMD_SEC_SET_PASS,		"SECURITY SET PASSWORD" },
22586521148cSRobert Hancock 		{ ATA_CMD_SEC_UNLOCK,		"SECURITY UNLOCK" },
22596521148cSRobert Hancock 		{ ATA_CMD_SEC_ERASE_PREP,	"SECURITY ERASE PREPARE" },
22606521148cSRobert Hancock 		{ ATA_CMD_SEC_ERASE_UNIT,	"SECURITY ERASE UNIT" },
22616521148cSRobert Hancock 		{ ATA_CMD_SEC_FREEZE_LOCK,	"SECURITY FREEZE LOCK" },
22626521148cSRobert Hancock 		{ ATA_CMD_SEC_DISABLE_PASS,	"SECURITY DISABLE PASSWORD" },
22636521148cSRobert Hancock 		{ ATA_CMD_CONFIG_STREAM,	"CONFIGURE STREAM" },
22646521148cSRobert Hancock 		{ ATA_CMD_SMART,		"SMART" },
22656521148cSRobert Hancock 		{ ATA_CMD_MEDIA_LOCK,		"DOOR LOCK" },
22666521148cSRobert Hancock 		{ ATA_CMD_MEDIA_UNLOCK,		"DOOR UNLOCK" },
2267acad7627SFUJITA Tomonori 		{ ATA_CMD_DSM,			"DATA SET MANAGEMENT" },
22686521148cSRobert Hancock 		{ ATA_CMD_CHK_MED_CRD_TYP, 	"CHECK MEDIA CARD TYPE" },
22696521148cSRobert Hancock 		{ ATA_CMD_CFA_REQ_EXT_ERR, 	"CFA REQUEST EXTENDED ERROR" },
22706521148cSRobert Hancock 		{ ATA_CMD_CFA_WRITE_NE,		"CFA WRITE SECTORS WITHOUT ERASE" },
22716521148cSRobert Hancock 		{ ATA_CMD_CFA_TRANS_SECT,	"CFA TRANSLATE SECTOR" },
22726521148cSRobert Hancock 		{ ATA_CMD_CFA_ERASE,		"CFA ERASE SECTORS" },
22736521148cSRobert Hancock 		{ ATA_CMD_CFA_WRITE_MULT_NE, 	"CFA WRITE MULTIPLE WITHOUT ERASE" },
22746521148cSRobert Hancock 		{ ATA_CMD_READ_LONG,		"READ LONG (with retries)" },
22756521148cSRobert Hancock 		{ ATA_CMD_READ_LONG_ONCE,	"READ LONG (without retries)" },
22766521148cSRobert Hancock 		{ ATA_CMD_WRITE_LONG,		"WRITE LONG (with retries)" },
22776521148cSRobert Hancock 		{ ATA_CMD_WRITE_LONG_ONCE,	"WRITE LONG (without retries)" },
22786521148cSRobert Hancock 		{ ATA_CMD_RESTORE,		"RECALIBRATE" },
22796521148cSRobert Hancock 		{ 0,				NULL } /* terminate list */
22806521148cSRobert Hancock 	};
22816521148cSRobert Hancock 
22826521148cSRobert Hancock 	unsigned int i;
22836521148cSRobert Hancock 	for (i = 0; cmd_descr[i].text; i++)
22846521148cSRobert Hancock 		if (cmd_descr[i].command == command)
22856521148cSRobert Hancock 			return cmd_descr[i].text;
22866521148cSRobert Hancock #endif
22876521148cSRobert Hancock 
22886521148cSRobert Hancock 	return NULL;
22896521148cSRobert Hancock }
22906521148cSRobert Hancock 
22916521148cSRobert Hancock /**
22929b1e2658STejun Heo  *	ata_eh_link_report - report error handling to user
22930260731fSTejun Heo  *	@link: ATA link EH is going on
2294c6fd2807SJeff Garzik  *
2295c6fd2807SJeff Garzik  *	Report EH to user.
2296c6fd2807SJeff Garzik  *
2297c6fd2807SJeff Garzik  *	LOCKING:
2298c6fd2807SJeff Garzik  *	None.
2299c6fd2807SJeff Garzik  */
23009b1e2658STejun Heo static void ata_eh_link_report(struct ata_link *link)
2301c6fd2807SJeff Garzik {
23020260731fSTejun Heo 	struct ata_port *ap = link->ap;
23030260731fSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
2304c6fd2807SJeff Garzik 	const char *frozen, *desc;
2305a1e10f7eSTejun Heo 	char tries_buf[6];
2306c6fd2807SJeff Garzik 	int tag, nr_failed = 0;
2307c6fd2807SJeff Garzik 
230894ff3d54STejun Heo 	if (ehc->i.flags & ATA_EHI_QUIET)
230994ff3d54STejun Heo 		return;
231094ff3d54STejun Heo 
2311c6fd2807SJeff Garzik 	desc = NULL;
2312c6fd2807SJeff Garzik 	if (ehc->i.desc[0] != '\0')
2313c6fd2807SJeff Garzik 		desc = ehc->i.desc;
2314c6fd2807SJeff Garzik 
2315c6fd2807SJeff Garzik 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2316c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2317c6fd2807SJeff Garzik 
2318b1c72916STejun Heo 		if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2319b1c72916STejun Heo 		    ata_dev_phys_link(qc->dev) != link ||
2320e027bd36STejun Heo 		    ((qc->flags & ATA_QCFLAG_QUIET) &&
2321e027bd36STejun Heo 		     qc->err_mask == AC_ERR_DEV))
2322c6fd2807SJeff Garzik 			continue;
2323c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
2324c6fd2807SJeff Garzik 			continue;
2325c6fd2807SJeff Garzik 
2326c6fd2807SJeff Garzik 		nr_failed++;
2327c6fd2807SJeff Garzik 	}
2328c6fd2807SJeff Garzik 
2329c6fd2807SJeff Garzik 	if (!nr_failed && !ehc->i.err_mask)
2330c6fd2807SJeff Garzik 		return;
2331c6fd2807SJeff Garzik 
2332c6fd2807SJeff Garzik 	frozen = "";
2333c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_FROZEN)
2334c6fd2807SJeff Garzik 		frozen = " frozen";
2335c6fd2807SJeff Garzik 
2336a1e10f7eSTejun Heo 	memset(tries_buf, 0, sizeof(tries_buf));
2337a1e10f7eSTejun Heo 	if (ap->eh_tries < ATA_EH_MAX_TRIES)
2338a1e10f7eSTejun Heo 		snprintf(tries_buf, sizeof(tries_buf) - 1, " t%d",
2339a1e10f7eSTejun Heo 			 ap->eh_tries);
2340a1e10f7eSTejun Heo 
2341c6fd2807SJeff Garzik 	if (ehc->i.dev) {
2342c6fd2807SJeff Garzik 		ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x "
2343a1e10f7eSTejun Heo 			       "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2344a1e10f7eSTejun Heo 			       ehc->i.err_mask, link->sactive, ehc->i.serror,
2345a1e10f7eSTejun Heo 			       ehc->i.action, frozen, tries_buf);
2346c6fd2807SJeff Garzik 		if (desc)
2347b64bbc39STejun Heo 			ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc);
2348c6fd2807SJeff Garzik 	} else {
23490260731fSTejun Heo 		ata_link_printk(link, KERN_ERR, "exception Emask 0x%x "
2350a1e10f7eSTejun Heo 				"SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2351a1e10f7eSTejun Heo 				ehc->i.err_mask, link->sactive, ehc->i.serror,
2352a1e10f7eSTejun Heo 				ehc->i.action, frozen, tries_buf);
2353c6fd2807SJeff Garzik 		if (desc)
23540260731fSTejun Heo 			ata_link_printk(link, KERN_ERR, "%s\n", desc);
2355c6fd2807SJeff Garzik 	}
2356c6fd2807SJeff Garzik 
23576521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR
23581333e194SRobert Hancock 	if (ehc->i.serror)
2359da0e21d3STejun Heo 		ata_link_printk(link, KERN_ERR,
23601333e194SRobert Hancock 		  "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
23611333e194SRobert Hancock 		  ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "",
23621333e194SRobert Hancock 		  ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "",
23631333e194SRobert Hancock 		  ehc->i.serror & SERR_DATA ? "UnrecovData " : "",
23641333e194SRobert Hancock 		  ehc->i.serror & SERR_PERSISTENT ? "Persist " : "",
23651333e194SRobert Hancock 		  ehc->i.serror & SERR_PROTOCOL ? "Proto " : "",
23661333e194SRobert Hancock 		  ehc->i.serror & SERR_INTERNAL ? "HostInt " : "",
23671333e194SRobert Hancock 		  ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "",
23681333e194SRobert Hancock 		  ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "",
23691333e194SRobert Hancock 		  ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "",
23701333e194SRobert Hancock 		  ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "",
23711333e194SRobert Hancock 		  ehc->i.serror & SERR_DISPARITY ? "Dispar " : "",
23721333e194SRobert Hancock 		  ehc->i.serror & SERR_CRC ? "BadCRC " : "",
23731333e194SRobert Hancock 		  ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "",
23741333e194SRobert Hancock 		  ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "",
23751333e194SRobert Hancock 		  ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "",
23761333e194SRobert Hancock 		  ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "",
23771333e194SRobert Hancock 		  ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
23786521148cSRobert Hancock #endif
23791333e194SRobert Hancock 
2380c6fd2807SJeff Garzik 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2381c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
23828a937581STejun Heo 		struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
2383abb6a889STejun Heo 		const u8 *cdb = qc->cdb;
2384abb6a889STejun Heo 		char data_buf[20] = "";
2385abb6a889STejun Heo 		char cdb_buf[70] = "";
2386c6fd2807SJeff Garzik 
23870260731fSTejun Heo 		if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2388b1c72916STejun Heo 		    ata_dev_phys_link(qc->dev) != link || !qc->err_mask)
2389c6fd2807SJeff Garzik 			continue;
2390c6fd2807SJeff Garzik 
2391abb6a889STejun Heo 		if (qc->dma_dir != DMA_NONE) {
2392abb6a889STejun Heo 			static const char *dma_str[] = {
2393abb6a889STejun Heo 				[DMA_BIDIRECTIONAL]	= "bidi",
2394abb6a889STejun Heo 				[DMA_TO_DEVICE]		= "out",
2395abb6a889STejun Heo 				[DMA_FROM_DEVICE]	= "in",
2396abb6a889STejun Heo 			};
2397abb6a889STejun Heo 			static const char *prot_str[] = {
2398abb6a889STejun Heo 				[ATA_PROT_PIO]		= "pio",
2399abb6a889STejun Heo 				[ATA_PROT_DMA]		= "dma",
2400abb6a889STejun Heo 				[ATA_PROT_NCQ]		= "ncq",
24010dc36888STejun Heo 				[ATAPI_PROT_PIO]	= "pio",
24020dc36888STejun Heo 				[ATAPI_PROT_DMA]	= "dma",
2403abb6a889STejun Heo 			};
2404abb6a889STejun Heo 
2405abb6a889STejun Heo 			snprintf(data_buf, sizeof(data_buf), " %s %u %s",
2406abb6a889STejun Heo 				 prot_str[qc->tf.protocol], qc->nbytes,
2407abb6a889STejun Heo 				 dma_str[qc->dma_dir]);
2408abb6a889STejun Heo 		}
2409abb6a889STejun Heo 
24106521148cSRobert Hancock 		if (ata_is_atapi(qc->tf.protocol)) {
24116521148cSRobert Hancock 			if (qc->scsicmd)
24126521148cSRobert Hancock 				scsi_print_command(qc->scsicmd);
24136521148cSRobert Hancock 			else
2414abb6a889STejun Heo 				snprintf(cdb_buf, sizeof(cdb_buf),
2415abb6a889STejun Heo 				 "cdb %02x %02x %02x %02x %02x %02x %02x %02x  "
2416abb6a889STejun Heo 				 "%02x %02x %02x %02x %02x %02x %02x %02x\n         ",
2417abb6a889STejun Heo 				 cdb[0], cdb[1], cdb[2], cdb[3],
2418abb6a889STejun Heo 				 cdb[4], cdb[5], cdb[6], cdb[7],
2419abb6a889STejun Heo 				 cdb[8], cdb[9], cdb[10], cdb[11],
2420abb6a889STejun Heo 				 cdb[12], cdb[13], cdb[14], cdb[15]);
24216521148cSRobert Hancock 		} else {
24226521148cSRobert Hancock 			const char *descr = ata_get_cmd_descript(cmd->command);
24236521148cSRobert Hancock 			if (descr)
24246521148cSRobert Hancock 				ata_dev_printk(qc->dev, KERN_ERR,
24256521148cSRobert Hancock 					"failed command: %s\n", descr);
24266521148cSRobert Hancock 		}
2427abb6a889STejun Heo 
24288a937581STejun Heo 		ata_dev_printk(qc->dev, KERN_ERR,
24298a937581STejun Heo 			"cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2430abb6a889STejun Heo 			"tag %d%s\n         %s"
24318a937581STejun Heo 			"res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
24325335b729STejun Heo 			"Emask 0x%x (%s)%s\n",
24338a937581STejun Heo 			cmd->command, cmd->feature, cmd->nsect,
24348a937581STejun Heo 			cmd->lbal, cmd->lbam, cmd->lbah,
24358a937581STejun Heo 			cmd->hob_feature, cmd->hob_nsect,
24368a937581STejun Heo 			cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
2437abb6a889STejun Heo 			cmd->device, qc->tag, data_buf, cdb_buf,
24388a937581STejun Heo 			res->command, res->feature, res->nsect,
24398a937581STejun Heo 			res->lbal, res->lbam, res->lbah,
24408a937581STejun Heo 			res->hob_feature, res->hob_nsect,
24418a937581STejun Heo 			res->hob_lbal, res->hob_lbam, res->hob_lbah,
24425335b729STejun Heo 			res->device, qc->err_mask, ata_err_string(qc->err_mask),
24435335b729STejun Heo 			qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
24441333e194SRobert Hancock 
24456521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR
24461333e194SRobert Hancock 		if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
24471333e194SRobert Hancock 				    ATA_ERR)) {
24481333e194SRobert Hancock 			if (res->command & ATA_BUSY)
24491333e194SRobert Hancock 				ata_dev_printk(qc->dev, KERN_ERR,
24501333e194SRobert Hancock 				  "status: { Busy }\n");
24511333e194SRobert Hancock 			else
24521333e194SRobert Hancock 				ata_dev_printk(qc->dev, KERN_ERR,
24531333e194SRobert Hancock 				  "status: { %s%s%s%s}\n",
24541333e194SRobert Hancock 				  res->command & ATA_DRDY ? "DRDY " : "",
24551333e194SRobert Hancock 				  res->command & ATA_DF ? "DF " : "",
24561333e194SRobert Hancock 				  res->command & ATA_DRQ ? "DRQ " : "",
24571333e194SRobert Hancock 				  res->command & ATA_ERR ? "ERR " : "");
24581333e194SRobert Hancock 		}
24591333e194SRobert Hancock 
24601333e194SRobert Hancock 		if (cmd->command != ATA_CMD_PACKET &&
24611333e194SRobert Hancock 		    (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF |
24621333e194SRobert Hancock 				     ATA_ABORTED)))
24631333e194SRobert Hancock 			ata_dev_printk(qc->dev, KERN_ERR,
24641333e194SRobert Hancock 			  "error: { %s%s%s%s}\n",
24651333e194SRobert Hancock 			  res->feature & ATA_ICRC ? "ICRC " : "",
24661333e194SRobert Hancock 			  res->feature & ATA_UNC ? "UNC " : "",
24671333e194SRobert Hancock 			  res->feature & ATA_IDNF ? "IDNF " : "",
24681333e194SRobert Hancock 			  res->feature & ATA_ABORTED ? "ABRT " : "");
24696521148cSRobert Hancock #endif
2470c6fd2807SJeff Garzik 	}
2471c6fd2807SJeff Garzik }
2472c6fd2807SJeff Garzik 
24739b1e2658STejun Heo /**
24749b1e2658STejun Heo  *	ata_eh_report - report error handling to user
24759b1e2658STejun Heo  *	@ap: ATA port to report EH about
24769b1e2658STejun Heo  *
24779b1e2658STejun Heo  *	Report EH to user.
24789b1e2658STejun Heo  *
24799b1e2658STejun Heo  *	LOCKING:
24809b1e2658STejun Heo  *	None.
24819b1e2658STejun Heo  */
2482fb7fd614STejun Heo void ata_eh_report(struct ata_port *ap)
24839b1e2658STejun Heo {
24849b1e2658STejun Heo 	struct ata_link *link;
24859b1e2658STejun Heo 
24861eca4365STejun Heo 	ata_for_each_link(link, ap, HOST_FIRST)
24879b1e2658STejun Heo 		ata_eh_link_report(link);
24889b1e2658STejun Heo }
24899b1e2658STejun Heo 
2490cc0680a5STejun Heo static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
2491b1c72916STejun Heo 			unsigned int *classes, unsigned long deadline,
2492b1c72916STejun Heo 			bool clear_classes)
2493c6fd2807SJeff Garzik {
2494f58229f8STejun Heo 	struct ata_device *dev;
2495c6fd2807SJeff Garzik 
2496b1c72916STejun Heo 	if (clear_classes)
24971eca4365STejun Heo 		ata_for_each_dev(dev, link, ALL)
2498f58229f8STejun Heo 			classes[dev->devno] = ATA_DEV_UNKNOWN;
2499c6fd2807SJeff Garzik 
2500f046519fSTejun Heo 	return reset(link, classes, deadline);
2501c6fd2807SJeff Garzik }
2502c6fd2807SJeff Garzik 
2503ae791c05STejun Heo static int ata_eh_followup_srst_needed(struct ata_link *link,
25045dbfc9cbSTejun Heo 				       int rc, const unsigned int *classes)
2505c6fd2807SJeff Garzik {
250645db2f6cSTejun Heo 	if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
2507ae791c05STejun Heo 		return 0;
25085dbfc9cbSTejun Heo 	if (rc == -EAGAIN)
2509c6fd2807SJeff Garzik 		return 1;
2510071f44b1STejun Heo 	if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
25113495de73STejun Heo 		return 1;
2512c6fd2807SJeff Garzik 	return 0;
2513c6fd2807SJeff Garzik }
2514c6fd2807SJeff Garzik 
2515fb7fd614STejun Heo int ata_eh_reset(struct ata_link *link, int classify,
2516c6fd2807SJeff Garzik 		 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
2517c6fd2807SJeff Garzik 		 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
2518c6fd2807SJeff Garzik {
2519afaa5c37STejun Heo 	struct ata_port *ap = link->ap;
2520b1c72916STejun Heo 	struct ata_link *slave = ap->slave_link;
2521936fd732STejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
2522705d2014SBartlomiej Zolnierkiewicz 	struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL;
2523c6fd2807SJeff Garzik 	unsigned int *classes = ehc->classes;
2524416dc9edSTejun Heo 	unsigned int lflags = link->flags;
2525c6fd2807SJeff Garzik 	int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
2526d8af0eb6STejun Heo 	int max_tries = 0, try = 0;
2527b1c72916STejun Heo 	struct ata_link *failed_link;
2528f58229f8STejun Heo 	struct ata_device *dev;
2529416dc9edSTejun Heo 	unsigned long deadline, now;
2530c6fd2807SJeff Garzik 	ata_reset_fn_t reset;
2531afaa5c37STejun Heo 	unsigned long flags;
2532416dc9edSTejun Heo 	u32 sstatus;
2533b1c72916STejun Heo 	int nr_unknown, rc;
2534c6fd2807SJeff Garzik 
2535932648b0STejun Heo 	/*
2536932648b0STejun Heo 	 * Prepare to reset
2537932648b0STejun Heo 	 */
2538d8af0eb6STejun Heo 	while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX)
2539d8af0eb6STejun Heo 		max_tries++;
254005944bdfSTejun Heo 	if (link->flags & ATA_LFLAG_NO_HRST)
254105944bdfSTejun Heo 		hardreset = NULL;
254205944bdfSTejun Heo 	if (link->flags & ATA_LFLAG_NO_SRST)
254305944bdfSTejun Heo 		softreset = NULL;
2544d8af0eb6STejun Heo 
254519b72321STejun Heo 	/* make sure each reset attemp is at least COOL_DOWN apart */
254619b72321STejun Heo 	if (ehc->i.flags & ATA_EHI_DID_RESET) {
25470a2c0f56STejun Heo 		now = jiffies;
254819b72321STejun Heo 		WARN_ON(time_after(ehc->last_reset, now));
254919b72321STejun Heo 		deadline = ata_deadline(ehc->last_reset,
255019b72321STejun Heo 					ATA_EH_RESET_COOL_DOWN);
25510a2c0f56STejun Heo 		if (time_before(now, deadline))
25520a2c0f56STejun Heo 			schedule_timeout_uninterruptible(deadline - now);
255319b72321STejun Heo 	}
25540a2c0f56STejun Heo 
2555afaa5c37STejun Heo 	spin_lock_irqsave(ap->lock, flags);
2556afaa5c37STejun Heo 	ap->pflags |= ATA_PFLAG_RESETTING;
2557afaa5c37STejun Heo 	spin_unlock_irqrestore(ap->lock, flags);
2558afaa5c37STejun Heo 
2559cf480626STejun Heo 	ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2560c6fd2807SJeff Garzik 
25611eca4365STejun Heo 	ata_for_each_dev(dev, link, ALL) {
2562cdeab114STejun Heo 		/* If we issue an SRST then an ATA drive (not ATAPI)
2563cdeab114STejun Heo 		 * may change configuration and be in PIO0 timing. If
2564cdeab114STejun Heo 		 * we do a hard reset (or are coming from power on)
2565cdeab114STejun Heo 		 * this is true for ATA or ATAPI. Until we've set a
2566cdeab114STejun Heo 		 * suitable controller mode we should not touch the
2567cdeab114STejun Heo 		 * bus as we may be talking too fast.
2568cdeab114STejun Heo 		 */
2569cdeab114STejun Heo 		dev->pio_mode = XFER_PIO_0;
2570cdeab114STejun Heo 
2571cdeab114STejun Heo 		/* If the controller has a pio mode setup function
2572cdeab114STejun Heo 		 * then use it to set the chipset to rights. Don't
2573cdeab114STejun Heo 		 * touch the DMA setup as that will be dealt with when
2574cdeab114STejun Heo 		 * configuring devices.
2575cdeab114STejun Heo 		 */
2576cdeab114STejun Heo 		if (ap->ops->set_piomode)
2577cdeab114STejun Heo 			ap->ops->set_piomode(ap, dev);
2578cdeab114STejun Heo 	}
2579cdeab114STejun Heo 
2580cf480626STejun Heo 	/* prefer hardreset */
2581932648b0STejun Heo 	reset = NULL;
2582cf480626STejun Heo 	ehc->i.action &= ~ATA_EH_RESET;
2583cf480626STejun Heo 	if (hardreset) {
2584cf480626STejun Heo 		reset = hardreset;
2585a674050eSTejun Heo 		ehc->i.action |= ATA_EH_HARDRESET;
25864f7faa3fSTejun Heo 	} else if (softreset) {
2587cf480626STejun Heo 		reset = softreset;
2588a674050eSTejun Heo 		ehc->i.action |= ATA_EH_SOFTRESET;
2589cf480626STejun Heo 	}
2590c6fd2807SJeff Garzik 
2591c6fd2807SJeff Garzik 	if (prereset) {
2592b1c72916STejun Heo 		unsigned long deadline = ata_deadline(jiffies,
2593b1c72916STejun Heo 						      ATA_EH_PRERESET_TIMEOUT);
2594b1c72916STejun Heo 
2595b1c72916STejun Heo 		if (slave) {
2596b1c72916STejun Heo 			sehc->i.action &= ~ATA_EH_RESET;
2597b1c72916STejun Heo 			sehc->i.action |= ehc->i.action;
2598b1c72916STejun Heo 		}
2599b1c72916STejun Heo 
2600b1c72916STejun Heo 		rc = prereset(link, deadline);
2601b1c72916STejun Heo 
2602b1c72916STejun Heo 		/* If present, do prereset on slave link too.  Reset
2603b1c72916STejun Heo 		 * is skipped iff both master and slave links report
2604b1c72916STejun Heo 		 * -ENOENT or clear ATA_EH_RESET.
2605b1c72916STejun Heo 		 */
2606b1c72916STejun Heo 		if (slave && (rc == 0 || rc == -ENOENT)) {
2607b1c72916STejun Heo 			int tmp;
2608b1c72916STejun Heo 
2609b1c72916STejun Heo 			tmp = prereset(slave, deadline);
2610b1c72916STejun Heo 			if (tmp != -ENOENT)
2611b1c72916STejun Heo 				rc = tmp;
2612b1c72916STejun Heo 
2613b1c72916STejun Heo 			ehc->i.action |= sehc->i.action;
2614b1c72916STejun Heo 		}
2615b1c72916STejun Heo 
2616c6fd2807SJeff Garzik 		if (rc) {
2617c961922bSAlan Cox 			if (rc == -ENOENT) {
2618cc0680a5STejun Heo 				ata_link_printk(link, KERN_DEBUG,
26194aa9ab67STejun Heo 						"port disabled. ignoring.\n");
2620cf480626STejun Heo 				ehc->i.action &= ~ATA_EH_RESET;
26214aa9ab67STejun Heo 
26221eca4365STejun Heo 				ata_for_each_dev(dev, link, ALL)
2623f58229f8STejun Heo 					classes[dev->devno] = ATA_DEV_NONE;
26244aa9ab67STejun Heo 
26254aa9ab67STejun Heo 				rc = 0;
2626c961922bSAlan Cox 			} else
2627cc0680a5STejun Heo 				ata_link_printk(link, KERN_ERR,
2628c6fd2807SJeff Garzik 					"prereset failed (errno=%d)\n", rc);
2629fccb6ea5STejun Heo 			goto out;
2630c6fd2807SJeff Garzik 		}
2631c6fd2807SJeff Garzik 
2632932648b0STejun Heo 		/* prereset() might have cleared ATA_EH_RESET.  If so,
2633d6515e6fSTejun Heo 		 * bang classes, thaw and return.
2634932648b0STejun Heo 		 */
2635932648b0STejun Heo 		if (reset && !(ehc->i.action & ATA_EH_RESET)) {
26361eca4365STejun Heo 			ata_for_each_dev(dev, link, ALL)
2637f58229f8STejun Heo 				classes[dev->devno] = ATA_DEV_NONE;
2638d6515e6fSTejun Heo 			if ((ap->pflags & ATA_PFLAG_FROZEN) &&
2639d6515e6fSTejun Heo 			    ata_is_host_link(link))
2640d6515e6fSTejun Heo 				ata_eh_thaw_port(ap);
2641fccb6ea5STejun Heo 			rc = 0;
2642fccb6ea5STejun Heo 			goto out;
2643c6fd2807SJeff Garzik 		}
2644932648b0STejun Heo 	}
2645c6fd2807SJeff Garzik 
2646c6fd2807SJeff Garzik  retry:
2647932648b0STejun Heo 	/*
2648932648b0STejun Heo 	 * Perform reset
2649932648b0STejun Heo 	 */
2650dc98c32cSTejun Heo 	if (ata_is_host_link(link))
2651dc98c32cSTejun Heo 		ata_eh_freeze_port(ap);
2652dc98c32cSTejun Heo 
2653341c2c95STejun Heo 	deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]);
265431daabdaSTejun Heo 
2655932648b0STejun Heo 	if (reset) {
2656c6fd2807SJeff Garzik 		if (verbose)
2657cc0680a5STejun Heo 			ata_link_printk(link, KERN_INFO, "%s resetting link\n",
2658c6fd2807SJeff Garzik 					reset == softreset ? "soft" : "hard");
2659c6fd2807SJeff Garzik 
2660c6fd2807SJeff Garzik 		/* mark that this EH session started with reset */
266119b72321STejun Heo 		ehc->last_reset = jiffies;
26620d64a233STejun Heo 		if (reset == hardreset)
26630d64a233STejun Heo 			ehc->i.flags |= ATA_EHI_DID_HARDRESET;
26640d64a233STejun Heo 		else
26650d64a233STejun Heo 			ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
2666c6fd2807SJeff Garzik 
2667b1c72916STejun Heo 		rc = ata_do_reset(link, reset, classes, deadline, true);
2668b1c72916STejun Heo 		if (rc && rc != -EAGAIN) {
2669b1c72916STejun Heo 			failed_link = link;
26705dbfc9cbSTejun Heo 			goto fail;
2671b1c72916STejun Heo 		}
2672c6fd2807SJeff Garzik 
2673b1c72916STejun Heo 		/* hardreset slave link if existent */
2674b1c72916STejun Heo 		if (slave && reset == hardreset) {
2675b1c72916STejun Heo 			int tmp;
2676b1c72916STejun Heo 
2677b1c72916STejun Heo 			if (verbose)
2678b1c72916STejun Heo 				ata_link_printk(slave, KERN_INFO,
2679b1c72916STejun Heo 						"hard resetting link\n");
2680b1c72916STejun Heo 
2681b1c72916STejun Heo 			ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
2682b1c72916STejun Heo 			tmp = ata_do_reset(slave, reset, classes, deadline,
2683b1c72916STejun Heo 					   false);
2684b1c72916STejun Heo 			switch (tmp) {
2685b1c72916STejun Heo 			case -EAGAIN:
2686b1c72916STejun Heo 				rc = -EAGAIN;
2687b1c72916STejun Heo 			case 0:
2688b1c72916STejun Heo 				break;
2689b1c72916STejun Heo 			default:
2690b1c72916STejun Heo 				failed_link = slave;
2691b1c72916STejun Heo 				rc = tmp;
2692b1c72916STejun Heo 				goto fail;
2693b1c72916STejun Heo 			}
2694b1c72916STejun Heo 		}
2695b1c72916STejun Heo 
2696b1c72916STejun Heo 		/* perform follow-up SRST if necessary */
2697c6fd2807SJeff Garzik 		if (reset == hardreset &&
26985dbfc9cbSTejun Heo 		    ata_eh_followup_srst_needed(link, rc, classes)) {
2699c6fd2807SJeff Garzik 			reset = softreset;
2700c6fd2807SJeff Garzik 
2701c6fd2807SJeff Garzik 			if (!reset) {
2702cc0680a5STejun Heo 				ata_link_printk(link, KERN_ERR,
2703c6fd2807SJeff Garzik 						"follow-up softreset required "
2704c6fd2807SJeff Garzik 						"but no softreset avaliable\n");
2705b1c72916STejun Heo 				failed_link = link;
2706fccb6ea5STejun Heo 				rc = -EINVAL;
270708cf69d0STejun Heo 				goto fail;
2708c6fd2807SJeff Garzik 			}
2709c6fd2807SJeff Garzik 
2710cf480626STejun Heo 			ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2711b1c72916STejun Heo 			rc = ata_do_reset(link, reset, classes, deadline, true);
2712fe2c4d01STejun Heo 			if (rc) {
2713fe2c4d01STejun Heo 				failed_link = link;
2714fe2c4d01STejun Heo 				goto fail;
2715fe2c4d01STejun Heo 			}
2716c6fd2807SJeff Garzik 		}
2717932648b0STejun Heo 	} else {
2718932648b0STejun Heo 		if (verbose)
2719932648b0STejun Heo 			ata_link_printk(link, KERN_INFO, "no reset method "
2720932648b0STejun Heo 					"available, skipping reset\n");
2721932648b0STejun Heo 		if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
2722932648b0STejun Heo 			lflags |= ATA_LFLAG_ASSUME_ATA;
2723932648b0STejun Heo 	}
2724008a7896STejun Heo 
2725932648b0STejun Heo 	/*
2726932648b0STejun Heo 	 * Post-reset processing
2727932648b0STejun Heo 	 */
27281eca4365STejun Heo 	ata_for_each_dev(dev, link, ALL) {
2729416dc9edSTejun Heo 		/* After the reset, the device state is PIO 0 and the
2730416dc9edSTejun Heo 		 * controller state is undefined.  Reset also wakes up
2731416dc9edSTejun Heo 		 * drives from sleeping mode.
2732c6fd2807SJeff Garzik 		 */
2733f58229f8STejun Heo 		dev->pio_mode = XFER_PIO_0;
2734054a5fbaSTejun Heo 		dev->flags &= ~ATA_DFLAG_SLEEPING;
2735c6fd2807SJeff Garzik 
27363b761d3dSTejun Heo 		if (ata_phys_link_offline(ata_dev_phys_link(dev)))
27373b761d3dSTejun Heo 			continue;
27383b761d3dSTejun Heo 
27394ccd3329STejun Heo 		/* apply class override */
2740416dc9edSTejun Heo 		if (lflags & ATA_LFLAG_ASSUME_ATA)
2741ae791c05STejun Heo 			classes[dev->devno] = ATA_DEV_ATA;
2742416dc9edSTejun Heo 		else if (lflags & ATA_LFLAG_ASSUME_SEMB)
2743816ab897STejun Heo 			classes[dev->devno] = ATA_DEV_SEMB_UNSUP;
2744ae791c05STejun Heo 	}
2745ae791c05STejun Heo 
2746008a7896STejun Heo 	/* record current link speed */
2747936fd732STejun Heo 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
2748936fd732STejun Heo 		link->sata_spd = (sstatus >> 4) & 0xf;
2749b1c72916STejun Heo 	if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0)
2750b1c72916STejun Heo 		slave->sata_spd = (sstatus >> 4) & 0xf;
2751008a7896STejun Heo 
2752dc98c32cSTejun Heo 	/* thaw the port */
2753dc98c32cSTejun Heo 	if (ata_is_host_link(link))
2754dc98c32cSTejun Heo 		ata_eh_thaw_port(ap);
2755dc98c32cSTejun Heo 
2756f046519fSTejun Heo 	/* postreset() should clear hardware SError.  Although SError
2757f046519fSTejun Heo 	 * is cleared during link resume, clearing SError here is
2758f046519fSTejun Heo 	 * necessary as some PHYs raise hotplug events after SRST.
2759f046519fSTejun Heo 	 * This introduces race condition where hotplug occurs between
2760f046519fSTejun Heo 	 * reset and here.  This race is mediated by cross checking
2761f046519fSTejun Heo 	 * link onlineness and classification result later.
2762f046519fSTejun Heo 	 */
2763b1c72916STejun Heo 	if (postreset) {
2764cc0680a5STejun Heo 		postreset(link, classes);
2765b1c72916STejun Heo 		if (slave)
2766b1c72916STejun Heo 			postreset(slave, classes);
2767b1c72916STejun Heo 	}
2768c6fd2807SJeff Garzik 
27691e641060STejun Heo 	/*
27701e641060STejun Heo 	 * Some controllers can't be frozen very well and may set
27711e641060STejun Heo 	 * spuruious error conditions during reset.  Clear accumulated
27721e641060STejun Heo 	 * error information.  As reset is the final recovery action,
27731e641060STejun Heo 	 * nothing is lost by doing this.
27741e641060STejun Heo 	 */
2775f046519fSTejun Heo 	spin_lock_irqsave(link->ap->lock, flags);
27761e641060STejun Heo 	memset(&link->eh_info, 0, sizeof(link->eh_info));
2777b1c72916STejun Heo 	if (slave)
27781e641060STejun Heo 		memset(&slave->eh_info, 0, sizeof(link->eh_info));
27791e641060STejun Heo 	ap->pflags &= ~ATA_PFLAG_EH_PENDING;
2780f046519fSTejun Heo 	spin_unlock_irqrestore(link->ap->lock, flags);
2781f046519fSTejun Heo 
27823b761d3dSTejun Heo 	/*
27833b761d3dSTejun Heo 	 * Make sure onlineness and classification result correspond.
2784f046519fSTejun Heo 	 * Hotplug could have happened during reset and some
2785f046519fSTejun Heo 	 * controllers fail to wait while a drive is spinning up after
2786f046519fSTejun Heo 	 * being hotplugged causing misdetection.  By cross checking
27873b761d3dSTejun Heo 	 * link on/offlineness and classification result, those
27883b761d3dSTejun Heo 	 * conditions can be reliably detected and retried.
2789f046519fSTejun Heo 	 */
2790b1c72916STejun Heo 	nr_unknown = 0;
27911eca4365STejun Heo 	ata_for_each_dev(dev, link, ALL) {
27923b761d3dSTejun Heo 		if (ata_phys_link_online(ata_dev_phys_link(dev))) {
2793b1c72916STejun Heo 			if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
27943b761d3dSTejun Heo 				ata_dev_printk(dev, KERN_DEBUG, "link online "
27953b761d3dSTejun Heo 					       "but device misclassifed\n");
2796f046519fSTejun Heo 				classes[dev->devno] = ATA_DEV_NONE;
2797b1c72916STejun Heo 				nr_unknown++;
2798b1c72916STejun Heo 			}
27993b761d3dSTejun Heo 		} else if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
28003b761d3dSTejun Heo 			if (ata_class_enabled(classes[dev->devno]))
28013b761d3dSTejun Heo 				ata_dev_printk(dev, KERN_DEBUG, "link offline, "
28023b761d3dSTejun Heo 					       "clearing class %d to NONE\n",
28033b761d3dSTejun Heo 					       classes[dev->devno]);
28043b761d3dSTejun Heo 			classes[dev->devno] = ATA_DEV_NONE;
28053b761d3dSTejun Heo 		} else if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
28063b761d3dSTejun Heo 			ata_dev_printk(dev, KERN_DEBUG, "link status unknown, "
28073b761d3dSTejun Heo 				       "clearing UNKNOWN to NONE\n");
28083b761d3dSTejun Heo 			classes[dev->devno] = ATA_DEV_NONE;
28093b761d3dSTejun Heo 		}
2810f046519fSTejun Heo 	}
2811f046519fSTejun Heo 
2812b1c72916STejun Heo 	if (classify && nr_unknown) {
2813f046519fSTejun Heo 		if (try < max_tries) {
2814f046519fSTejun Heo 			ata_link_printk(link, KERN_WARNING, "link online but "
28153b761d3dSTejun Heo 					"%d devices misclassified, retrying\n",
28163b761d3dSTejun Heo 					nr_unknown);
2817b1c72916STejun Heo 			failed_link = link;
2818f046519fSTejun Heo 			rc = -EAGAIN;
2819f046519fSTejun Heo 			goto fail;
2820f046519fSTejun Heo 		}
2821f046519fSTejun Heo 		ata_link_printk(link, KERN_WARNING,
28223b761d3dSTejun Heo 				"link online but %d devices misclassified, "
28233b761d3dSTejun Heo 				"device detection might fail\n", nr_unknown);
2824f046519fSTejun Heo 	}
2825f046519fSTejun Heo 
2826c6fd2807SJeff Garzik 	/* reset successful, schedule revalidation */
2827cf480626STejun Heo 	ata_eh_done(link, NULL, ATA_EH_RESET);
2828b1c72916STejun Heo 	if (slave)
2829b1c72916STejun Heo 		ata_eh_done(slave, NULL, ATA_EH_RESET);
283019b72321STejun Heo 	ehc->last_reset = jiffies;		/* update to completion time */
2831c6fd2807SJeff Garzik 	ehc->i.action |= ATA_EH_REVALIDATE;
28326b7ae954STejun Heo 	link->lpm_policy = ATA_LPM_UNKNOWN;	/* reset LPM state */
2833416dc9edSTejun Heo 
2834416dc9edSTejun Heo 	rc = 0;
2835fccb6ea5STejun Heo  out:
2836fccb6ea5STejun Heo 	/* clear hotplug flag */
2837fccb6ea5STejun Heo 	ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2838b1c72916STejun Heo 	if (slave)
2839b1c72916STejun Heo 		sehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2840afaa5c37STejun Heo 
2841afaa5c37STejun Heo 	spin_lock_irqsave(ap->lock, flags);
2842afaa5c37STejun Heo 	ap->pflags &= ~ATA_PFLAG_RESETTING;
2843afaa5c37STejun Heo 	spin_unlock_irqrestore(ap->lock, flags);
2844afaa5c37STejun Heo 
2845c6fd2807SJeff Garzik 	return rc;
2846416dc9edSTejun Heo 
2847416dc9edSTejun Heo  fail:
28485958e302STejun Heo 	/* if SCR isn't accessible on a fan-out port, PMP needs to be reset */
28495958e302STejun Heo 	if (!ata_is_host_link(link) &&
28505958e302STejun Heo 	    sata_scr_read(link, SCR_STATUS, &sstatus))
28515958e302STejun Heo 		rc = -ERESTART;
28525958e302STejun Heo 
2853416dc9edSTejun Heo 	if (rc == -ERESTART || try >= max_tries)
2854416dc9edSTejun Heo 		goto out;
2855416dc9edSTejun Heo 
2856416dc9edSTejun Heo 	now = jiffies;
2857416dc9edSTejun Heo 	if (time_before(now, deadline)) {
2858416dc9edSTejun Heo 		unsigned long delta = deadline - now;
2859416dc9edSTejun Heo 
2860b1c72916STejun Heo 		ata_link_printk(failed_link, KERN_WARNING,
28610a2c0f56STejun Heo 			"reset failed (errno=%d), retrying in %u secs\n",
28620a2c0f56STejun Heo 			rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
2863416dc9edSTejun Heo 
2864c0c362b6STejun Heo 		ata_eh_release(ap);
2865416dc9edSTejun Heo 		while (delta)
2866416dc9edSTejun Heo 			delta = schedule_timeout_uninterruptible(delta);
2867c0c362b6STejun Heo 		ata_eh_acquire(ap);
2868416dc9edSTejun Heo 	}
2869416dc9edSTejun Heo 
2870b1c72916STejun Heo 	if (try == max_tries - 1) {
2871a07d499bSTejun Heo 		sata_down_spd_limit(link, 0);
2872b1c72916STejun Heo 		if (slave)
2873a07d499bSTejun Heo 			sata_down_spd_limit(slave, 0);
2874b1c72916STejun Heo 	} else if (rc == -EPIPE)
2875a07d499bSTejun Heo 		sata_down_spd_limit(failed_link, 0);
2876b1c72916STejun Heo 
2877416dc9edSTejun Heo 	if (hardreset)
2878416dc9edSTejun Heo 		reset = hardreset;
2879416dc9edSTejun Heo 	goto retry;
2880c6fd2807SJeff Garzik }
2881c6fd2807SJeff Garzik 
288245fabbb7SElias Oltmanns static inline void ata_eh_pull_park_action(struct ata_port *ap)
288345fabbb7SElias Oltmanns {
288445fabbb7SElias Oltmanns 	struct ata_link *link;
288545fabbb7SElias Oltmanns 	struct ata_device *dev;
288645fabbb7SElias Oltmanns 	unsigned long flags;
288745fabbb7SElias Oltmanns 
288845fabbb7SElias Oltmanns 	/*
288945fabbb7SElias Oltmanns 	 * This function can be thought of as an extended version of
289045fabbb7SElias Oltmanns 	 * ata_eh_about_to_do() specially crafted to accommodate the
289145fabbb7SElias Oltmanns 	 * requirements of ATA_EH_PARK handling. Since the EH thread
289245fabbb7SElias Oltmanns 	 * does not leave the do {} while () loop in ata_eh_recover as
289345fabbb7SElias Oltmanns 	 * long as the timeout for a park request to *one* device on
289445fabbb7SElias Oltmanns 	 * the port has not expired, and since we still want to pick
289545fabbb7SElias Oltmanns 	 * up park requests to other devices on the same port or
289645fabbb7SElias Oltmanns 	 * timeout updates for the same device, we have to pull
289745fabbb7SElias Oltmanns 	 * ATA_EH_PARK actions from eh_info into eh_context.i
289845fabbb7SElias Oltmanns 	 * ourselves at the beginning of each pass over the loop.
289945fabbb7SElias Oltmanns 	 *
290045fabbb7SElias Oltmanns 	 * Additionally, all write accesses to &ap->park_req_pending
290145fabbb7SElias Oltmanns 	 * through INIT_COMPLETION() (see below) or complete_all()
290245fabbb7SElias Oltmanns 	 * (see ata_scsi_park_store()) are protected by the host lock.
290345fabbb7SElias Oltmanns 	 * As a result we have that park_req_pending.done is zero on
290445fabbb7SElias Oltmanns 	 * exit from this function, i.e. when ATA_EH_PARK actions for
290545fabbb7SElias Oltmanns 	 * *all* devices on port ap have been pulled into the
290645fabbb7SElias Oltmanns 	 * respective eh_context structs. If, and only if,
290745fabbb7SElias Oltmanns 	 * park_req_pending.done is non-zero by the time we reach
290845fabbb7SElias Oltmanns 	 * wait_for_completion_timeout(), another ATA_EH_PARK action
290945fabbb7SElias Oltmanns 	 * has been scheduled for at least one of the devices on port
291045fabbb7SElias Oltmanns 	 * ap and we have to cycle over the do {} while () loop in
291145fabbb7SElias Oltmanns 	 * ata_eh_recover() again.
291245fabbb7SElias Oltmanns 	 */
291345fabbb7SElias Oltmanns 
291445fabbb7SElias Oltmanns 	spin_lock_irqsave(ap->lock, flags);
291545fabbb7SElias Oltmanns 	INIT_COMPLETION(ap->park_req_pending);
29161eca4365STejun Heo 	ata_for_each_link(link, ap, EDGE) {
29171eca4365STejun Heo 		ata_for_each_dev(dev, link, ALL) {
291845fabbb7SElias Oltmanns 			struct ata_eh_info *ehi = &link->eh_info;
291945fabbb7SElias Oltmanns 
292045fabbb7SElias Oltmanns 			link->eh_context.i.dev_action[dev->devno] |=
292145fabbb7SElias Oltmanns 				ehi->dev_action[dev->devno] & ATA_EH_PARK;
292245fabbb7SElias Oltmanns 			ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
292345fabbb7SElias Oltmanns 		}
292445fabbb7SElias Oltmanns 	}
292545fabbb7SElias Oltmanns 	spin_unlock_irqrestore(ap->lock, flags);
292645fabbb7SElias Oltmanns }
292745fabbb7SElias Oltmanns 
292845fabbb7SElias Oltmanns static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
292945fabbb7SElias Oltmanns {
293045fabbb7SElias Oltmanns 	struct ata_eh_context *ehc = &dev->link->eh_context;
293145fabbb7SElias Oltmanns 	struct ata_taskfile tf;
293245fabbb7SElias Oltmanns 	unsigned int err_mask;
293345fabbb7SElias Oltmanns 
293445fabbb7SElias Oltmanns 	ata_tf_init(dev, &tf);
293545fabbb7SElias Oltmanns 	if (park) {
293645fabbb7SElias Oltmanns 		ehc->unloaded_mask |= 1 << dev->devno;
293745fabbb7SElias Oltmanns 		tf.command = ATA_CMD_IDLEIMMEDIATE;
293845fabbb7SElias Oltmanns 		tf.feature = 0x44;
293945fabbb7SElias Oltmanns 		tf.lbal = 0x4c;
294045fabbb7SElias Oltmanns 		tf.lbam = 0x4e;
294145fabbb7SElias Oltmanns 		tf.lbah = 0x55;
294245fabbb7SElias Oltmanns 	} else {
294345fabbb7SElias Oltmanns 		ehc->unloaded_mask &= ~(1 << dev->devno);
294445fabbb7SElias Oltmanns 		tf.command = ATA_CMD_CHK_POWER;
294545fabbb7SElias Oltmanns 	}
294645fabbb7SElias Oltmanns 
294745fabbb7SElias Oltmanns 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
294845fabbb7SElias Oltmanns 	tf.protocol |= ATA_PROT_NODATA;
294945fabbb7SElias Oltmanns 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
295045fabbb7SElias Oltmanns 	if (park && (err_mask || tf.lbal != 0xc4)) {
295145fabbb7SElias Oltmanns 		ata_dev_printk(dev, KERN_ERR, "head unload failed!\n");
295245fabbb7SElias Oltmanns 		ehc->unloaded_mask &= ~(1 << dev->devno);
295345fabbb7SElias Oltmanns 	}
295445fabbb7SElias Oltmanns }
295545fabbb7SElias Oltmanns 
29560260731fSTejun Heo static int ata_eh_revalidate_and_attach(struct ata_link *link,
2957c6fd2807SJeff Garzik 					struct ata_device **r_failed_dev)
2958c6fd2807SJeff Garzik {
29590260731fSTejun Heo 	struct ata_port *ap = link->ap;
29600260731fSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
2961c6fd2807SJeff Garzik 	struct ata_device *dev;
29628c3c52a8STejun Heo 	unsigned int new_mask = 0;
2963c6fd2807SJeff Garzik 	unsigned long flags;
2964f58229f8STejun Heo 	int rc = 0;
2965c6fd2807SJeff Garzik 
2966c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
2967c6fd2807SJeff Garzik 
29688c3c52a8STejun Heo 	/* For PATA drive side cable detection to work, IDENTIFY must
29698c3c52a8STejun Heo 	 * be done backwards such that PDIAG- is released by the slave
29708c3c52a8STejun Heo 	 * device before the master device is identified.
29718c3c52a8STejun Heo 	 */
29721eca4365STejun Heo 	ata_for_each_dev(dev, link, ALL_REVERSE) {
2973f58229f8STejun Heo 		unsigned int action = ata_eh_dev_action(dev);
2974f58229f8STejun Heo 		unsigned int readid_flags = 0;
2975c6fd2807SJeff Garzik 
2976bff04647STejun Heo 		if (ehc->i.flags & ATA_EHI_DID_RESET)
2977bff04647STejun Heo 			readid_flags |= ATA_READID_POSTRESET;
2978bff04647STejun Heo 
29799666f400STejun Heo 		if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
2980633273a3STejun Heo 			WARN_ON(dev->class == ATA_DEV_PMP);
2981633273a3STejun Heo 
2982b1c72916STejun Heo 			if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
2983c6fd2807SJeff Garzik 				rc = -EIO;
29848c3c52a8STejun Heo 				goto err;
2985c6fd2807SJeff Garzik 			}
2986c6fd2807SJeff Garzik 
29870260731fSTejun Heo 			ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE);
2988422c9daaSTejun Heo 			rc = ata_dev_revalidate(dev, ehc->classes[dev->devno],
2989422c9daaSTejun Heo 						readid_flags);
2990c6fd2807SJeff Garzik 			if (rc)
29918c3c52a8STejun Heo 				goto err;
2992c6fd2807SJeff Garzik 
29930260731fSTejun Heo 			ata_eh_done(link, dev, ATA_EH_REVALIDATE);
2994c6fd2807SJeff Garzik 
2995baa1e78aSTejun Heo 			/* Configuration may have changed, reconfigure
2996baa1e78aSTejun Heo 			 * transfer mode.
2997baa1e78aSTejun Heo 			 */
2998baa1e78aSTejun Heo 			ehc->i.flags |= ATA_EHI_SETMODE;
2999baa1e78aSTejun Heo 
3000c6fd2807SJeff Garzik 			/* schedule the scsi_rescan_device() here */
3001ad72cf98STejun Heo 			schedule_work(&(ap->scsi_rescan_task));
3002c6fd2807SJeff Garzik 		} else if (dev->class == ATA_DEV_UNKNOWN &&
3003c6fd2807SJeff Garzik 			   ehc->tries[dev->devno] &&
3004c6fd2807SJeff Garzik 			   ata_class_enabled(ehc->classes[dev->devno])) {
3005842faa6cSTejun Heo 			/* Temporarily set dev->class, it will be
3006842faa6cSTejun Heo 			 * permanently set once all configurations are
3007842faa6cSTejun Heo 			 * complete.  This is necessary because new
3008842faa6cSTejun Heo 			 * device configuration is done in two
3009842faa6cSTejun Heo 			 * separate loops.
3010842faa6cSTejun Heo 			 */
3011c6fd2807SJeff Garzik 			dev->class = ehc->classes[dev->devno];
3012c6fd2807SJeff Garzik 
3013633273a3STejun Heo 			if (dev->class == ATA_DEV_PMP)
3014633273a3STejun Heo 				rc = sata_pmp_attach(dev);
3015633273a3STejun Heo 			else
3016633273a3STejun Heo 				rc = ata_dev_read_id(dev, &dev->class,
3017633273a3STejun Heo 						     readid_flags, dev->id);
3018842faa6cSTejun Heo 
3019842faa6cSTejun Heo 			/* read_id might have changed class, store and reset */
3020842faa6cSTejun Heo 			ehc->classes[dev->devno] = dev->class;
3021842faa6cSTejun Heo 			dev->class = ATA_DEV_UNKNOWN;
3022842faa6cSTejun Heo 
30238c3c52a8STejun Heo 			switch (rc) {
30248c3c52a8STejun Heo 			case 0:
302599cf610aSTejun Heo 				/* clear error info accumulated during probe */
302699cf610aSTejun Heo 				ata_ering_clear(&dev->ering);
3027f58229f8STejun Heo 				new_mask |= 1 << dev->devno;
30288c3c52a8STejun Heo 				break;
30298c3c52a8STejun Heo 			case -ENOENT:
303055a8e2c8STejun Heo 				/* IDENTIFY was issued to non-existent
303155a8e2c8STejun Heo 				 * device.  No need to reset.  Just
3032842faa6cSTejun Heo 				 * thaw and ignore the device.
303355a8e2c8STejun Heo 				 */
303455a8e2c8STejun Heo 				ata_eh_thaw_port(ap);
3035c6fd2807SJeff Garzik 				break;
30368c3c52a8STejun Heo 			default:
30378c3c52a8STejun Heo 				goto err;
30388c3c52a8STejun Heo 			}
30398c3c52a8STejun Heo 		}
3040c6fd2807SJeff Garzik 	}
3041c6fd2807SJeff Garzik 
3042c1c4e8d5STejun Heo 	/* PDIAG- should have been released, ask cable type if post-reset */
304333267325STejun Heo 	if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) {
304433267325STejun Heo 		if (ap->ops->cable_detect)
3045c1c4e8d5STejun Heo 			ap->cbl = ap->ops->cable_detect(ap);
304633267325STejun Heo 		ata_force_cbl(ap);
304733267325STejun Heo 	}
3048c1c4e8d5STejun Heo 
30498c3c52a8STejun Heo 	/* Configure new devices forward such that user doesn't see
30508c3c52a8STejun Heo 	 * device detection messages backwards.
30518c3c52a8STejun Heo 	 */
30521eca4365STejun Heo 	ata_for_each_dev(dev, link, ALL) {
30534f7c2874STejun Heo 		if (!(new_mask & (1 << dev->devno)))
30548c3c52a8STejun Heo 			continue;
30558c3c52a8STejun Heo 
3056842faa6cSTejun Heo 		dev->class = ehc->classes[dev->devno];
3057842faa6cSTejun Heo 
30584f7c2874STejun Heo 		if (dev->class == ATA_DEV_PMP)
30594f7c2874STejun Heo 			continue;
30604f7c2874STejun Heo 
30618c3c52a8STejun Heo 		ehc->i.flags |= ATA_EHI_PRINTINFO;
30628c3c52a8STejun Heo 		rc = ata_dev_configure(dev);
30638c3c52a8STejun Heo 		ehc->i.flags &= ~ATA_EHI_PRINTINFO;
3064842faa6cSTejun Heo 		if (rc) {
3065842faa6cSTejun Heo 			dev->class = ATA_DEV_UNKNOWN;
30668c3c52a8STejun Heo 			goto err;
3067842faa6cSTejun Heo 		}
30688c3c52a8STejun Heo 
3069c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
3070c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
3071c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
3072baa1e78aSTejun Heo 
307355a8e2c8STejun Heo 		/* new device discovered, configure xfermode */
3074baa1e78aSTejun Heo 		ehc->i.flags |= ATA_EHI_SETMODE;
3075c6fd2807SJeff Garzik 	}
3076c6fd2807SJeff Garzik 
30778c3c52a8STejun Heo 	return 0;
30788c3c52a8STejun Heo 
30798c3c52a8STejun Heo  err:
3080c6fd2807SJeff Garzik 	*r_failed_dev = dev;
30818c3c52a8STejun Heo 	DPRINTK("EXIT rc=%d\n", rc);
3082c6fd2807SJeff Garzik 	return rc;
3083c6fd2807SJeff Garzik }
3084c6fd2807SJeff Garzik 
30856f1d1e3aSTejun Heo /**
30866f1d1e3aSTejun Heo  *	ata_set_mode - Program timings and issue SET FEATURES - XFER
30876f1d1e3aSTejun Heo  *	@link: link on which timings will be programmed
308898a1708dSMartin Olsson  *	@r_failed_dev: out parameter for failed device
30896f1d1e3aSTejun Heo  *
30906f1d1e3aSTejun Heo  *	Set ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
30916f1d1e3aSTejun Heo  *	ata_set_mode() fails, pointer to the failing device is
30926f1d1e3aSTejun Heo  *	returned in @r_failed_dev.
30936f1d1e3aSTejun Heo  *
30946f1d1e3aSTejun Heo  *	LOCKING:
30956f1d1e3aSTejun Heo  *	PCI/etc. bus probe sem.
30966f1d1e3aSTejun Heo  *
30976f1d1e3aSTejun Heo  *	RETURNS:
30986f1d1e3aSTejun Heo  *	0 on success, negative errno otherwise
30996f1d1e3aSTejun Heo  */
31006f1d1e3aSTejun Heo int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
31016f1d1e3aSTejun Heo {
31026f1d1e3aSTejun Heo 	struct ata_port *ap = link->ap;
310300115e0fSTejun Heo 	struct ata_device *dev;
310400115e0fSTejun Heo 	int rc;
31056f1d1e3aSTejun Heo 
310676326ac1STejun Heo 	/* if data transfer is verified, clear DUBIOUS_XFER on ering top */
31071eca4365STejun Heo 	ata_for_each_dev(dev, link, ENABLED) {
310876326ac1STejun Heo 		if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) {
310976326ac1STejun Heo 			struct ata_ering_entry *ent;
311076326ac1STejun Heo 
311176326ac1STejun Heo 			ent = ata_ering_top(&dev->ering);
311276326ac1STejun Heo 			if (ent)
311376326ac1STejun Heo 				ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER;
311476326ac1STejun Heo 		}
311576326ac1STejun Heo 	}
311676326ac1STejun Heo 
31176f1d1e3aSTejun Heo 	/* has private set_mode? */
31186f1d1e3aSTejun Heo 	if (ap->ops->set_mode)
311900115e0fSTejun Heo 		rc = ap->ops->set_mode(link, r_failed_dev);
312000115e0fSTejun Heo 	else
312100115e0fSTejun Heo 		rc = ata_do_set_mode(link, r_failed_dev);
312200115e0fSTejun Heo 
312300115e0fSTejun Heo 	/* if transfer mode has changed, set DUBIOUS_XFER on device */
31241eca4365STejun Heo 	ata_for_each_dev(dev, link, ENABLED) {
312500115e0fSTejun Heo 		struct ata_eh_context *ehc = &link->eh_context;
312600115e0fSTejun Heo 		u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
312700115e0fSTejun Heo 		u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
312800115e0fSTejun Heo 
312900115e0fSTejun Heo 		if (dev->xfer_mode != saved_xfer_mode ||
313000115e0fSTejun Heo 		    ata_ncq_enabled(dev) != saved_ncq)
313100115e0fSTejun Heo 			dev->flags |= ATA_DFLAG_DUBIOUS_XFER;
313200115e0fSTejun Heo 	}
313300115e0fSTejun Heo 
313400115e0fSTejun Heo 	return rc;
31356f1d1e3aSTejun Heo }
31366f1d1e3aSTejun Heo 
313711fc33daSTejun Heo /**
313811fc33daSTejun Heo  *	atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
313911fc33daSTejun Heo  *	@dev: ATAPI device to clear UA for
314011fc33daSTejun Heo  *
314111fc33daSTejun Heo  *	Resets and other operations can make an ATAPI device raise
314211fc33daSTejun Heo  *	UNIT ATTENTION which causes the next operation to fail.  This
314311fc33daSTejun Heo  *	function clears UA.
314411fc33daSTejun Heo  *
314511fc33daSTejun Heo  *	LOCKING:
314611fc33daSTejun Heo  *	EH context (may sleep).
314711fc33daSTejun Heo  *
314811fc33daSTejun Heo  *	RETURNS:
314911fc33daSTejun Heo  *	0 on success, -errno on failure.
315011fc33daSTejun Heo  */
315111fc33daSTejun Heo static int atapi_eh_clear_ua(struct ata_device *dev)
315211fc33daSTejun Heo {
315311fc33daSTejun Heo 	int i;
315411fc33daSTejun Heo 
315511fc33daSTejun Heo 	for (i = 0; i < ATA_EH_UA_TRIES; i++) {
3156b5357081STejun Heo 		u8 *sense_buffer = dev->link->ap->sector_buf;
315711fc33daSTejun Heo 		u8 sense_key = 0;
315811fc33daSTejun Heo 		unsigned int err_mask;
315911fc33daSTejun Heo 
316011fc33daSTejun Heo 		err_mask = atapi_eh_tur(dev, &sense_key);
316111fc33daSTejun Heo 		if (err_mask != 0 && err_mask != AC_ERR_DEV) {
316211fc33daSTejun Heo 			ata_dev_printk(dev, KERN_WARNING, "TEST_UNIT_READY "
316311fc33daSTejun Heo 				"failed (err_mask=0x%x)\n", err_mask);
316411fc33daSTejun Heo 			return -EIO;
316511fc33daSTejun Heo 		}
316611fc33daSTejun Heo 
316711fc33daSTejun Heo 		if (!err_mask || sense_key != UNIT_ATTENTION)
316811fc33daSTejun Heo 			return 0;
316911fc33daSTejun Heo 
317011fc33daSTejun Heo 		err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key);
317111fc33daSTejun Heo 		if (err_mask) {
317211fc33daSTejun Heo 			ata_dev_printk(dev, KERN_WARNING, "failed to clear "
317311fc33daSTejun Heo 				"UNIT ATTENTION (err_mask=0x%x)\n", err_mask);
317411fc33daSTejun Heo 			return -EIO;
317511fc33daSTejun Heo 		}
317611fc33daSTejun Heo 	}
317711fc33daSTejun Heo 
317811fc33daSTejun Heo 	ata_dev_printk(dev, KERN_WARNING,
317911fc33daSTejun Heo 		"UNIT ATTENTION persists after %d tries\n", ATA_EH_UA_TRIES);
318011fc33daSTejun Heo 
318111fc33daSTejun Heo 	return 0;
318211fc33daSTejun Heo }
318311fc33daSTejun Heo 
31846013efd8STejun Heo /**
31856013efd8STejun Heo  *	ata_eh_maybe_retry_flush - Retry FLUSH if necessary
31866013efd8STejun Heo  *	@dev: ATA device which may need FLUSH retry
31876013efd8STejun Heo  *
31886013efd8STejun Heo  *	If @dev failed FLUSH, it needs to be reported upper layer
31896013efd8STejun Heo  *	immediately as it means that @dev failed to remap and already
31906013efd8STejun Heo  *	lost at least a sector and further FLUSH retrials won't make
31916013efd8STejun Heo  *	any difference to the lost sector.  However, if FLUSH failed
31926013efd8STejun Heo  *	for other reasons, for example transmission error, FLUSH needs
31936013efd8STejun Heo  *	to be retried.
31946013efd8STejun Heo  *
31956013efd8STejun Heo  *	This function determines whether FLUSH failure retry is
31966013efd8STejun Heo  *	necessary and performs it if so.
31976013efd8STejun Heo  *
31986013efd8STejun Heo  *	RETURNS:
31996013efd8STejun Heo  *	0 if EH can continue, -errno if EH needs to be repeated.
32006013efd8STejun Heo  */
32016013efd8STejun Heo static int ata_eh_maybe_retry_flush(struct ata_device *dev)
32026013efd8STejun Heo {
32036013efd8STejun Heo 	struct ata_link *link = dev->link;
32046013efd8STejun Heo 	struct ata_port *ap = link->ap;
32056013efd8STejun Heo 	struct ata_queued_cmd *qc;
32066013efd8STejun Heo 	struct ata_taskfile tf;
32076013efd8STejun Heo 	unsigned int err_mask;
32086013efd8STejun Heo 	int rc = 0;
32096013efd8STejun Heo 
32106013efd8STejun Heo 	/* did flush fail for this device? */
32116013efd8STejun Heo 	if (!ata_tag_valid(link->active_tag))
32126013efd8STejun Heo 		return 0;
32136013efd8STejun Heo 
32146013efd8STejun Heo 	qc = __ata_qc_from_tag(ap, link->active_tag);
32156013efd8STejun Heo 	if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT &&
32166013efd8STejun Heo 			       qc->tf.command != ATA_CMD_FLUSH))
32176013efd8STejun Heo 		return 0;
32186013efd8STejun Heo 
32196013efd8STejun Heo 	/* if the device failed it, it should be reported to upper layers */
32206013efd8STejun Heo 	if (qc->err_mask & AC_ERR_DEV)
32216013efd8STejun Heo 		return 0;
32226013efd8STejun Heo 
32236013efd8STejun Heo 	/* flush failed for some other reason, give it another shot */
32246013efd8STejun Heo 	ata_tf_init(dev, &tf);
32256013efd8STejun Heo 
32266013efd8STejun Heo 	tf.command = qc->tf.command;
32276013efd8STejun Heo 	tf.flags |= ATA_TFLAG_DEVICE;
32286013efd8STejun Heo 	tf.protocol = ATA_PROT_NODATA;
32296013efd8STejun Heo 
32306013efd8STejun Heo 	ata_dev_printk(dev, KERN_WARNING, "retrying FLUSH 0x%x Emask 0x%x\n",
32316013efd8STejun Heo 		       tf.command, qc->err_mask);
32326013efd8STejun Heo 
32336013efd8STejun Heo 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
32346013efd8STejun Heo 	if (!err_mask) {
32356013efd8STejun Heo 		/*
32366013efd8STejun Heo 		 * FLUSH is complete but there's no way to
32376013efd8STejun Heo 		 * successfully complete a failed command from EH.
32386013efd8STejun Heo 		 * Making sure retry is allowed at least once and
32396013efd8STejun Heo 		 * retrying it should do the trick - whatever was in
32406013efd8STejun Heo 		 * the cache is already on the platter and this won't
32416013efd8STejun Heo 		 * cause infinite loop.
32426013efd8STejun Heo 		 */
32436013efd8STejun Heo 		qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1);
32446013efd8STejun Heo 	} else {
32456013efd8STejun Heo 		ata_dev_printk(dev, KERN_WARNING, "FLUSH failed Emask 0x%x\n",
32466013efd8STejun Heo 			       err_mask);
32476013efd8STejun Heo 		rc = -EIO;
32486013efd8STejun Heo 
32496013efd8STejun Heo 		/* if device failed it, report it to upper layers */
32506013efd8STejun Heo 		if (err_mask & AC_ERR_DEV) {
32516013efd8STejun Heo 			qc->err_mask |= AC_ERR_DEV;
32526013efd8STejun Heo 			qc->result_tf = tf;
32536013efd8STejun Heo 			if (!(ap->pflags & ATA_PFLAG_FROZEN))
32546013efd8STejun Heo 				rc = 0;
32556013efd8STejun Heo 		}
32566013efd8STejun Heo 	}
32576013efd8STejun Heo 	return rc;
32586013efd8STejun Heo }
32596013efd8STejun Heo 
32606b7ae954STejun Heo /**
32616b7ae954STejun Heo  *	ata_eh_set_lpm - configure SATA interface power management
32626b7ae954STejun Heo  *	@link: link to configure power management
32636b7ae954STejun Heo  *	@policy: the link power management policy
32646b7ae954STejun Heo  *	@r_failed_dev: out parameter for failed device
32656b7ae954STejun Heo  *
32666b7ae954STejun Heo  *	Enable SATA Interface power management.  This will enable
32676b7ae954STejun Heo  *	Device Interface Power Management (DIPM) for min_power
32686b7ae954STejun Heo  * 	policy, and then call driver specific callbacks for
32696b7ae954STejun Heo  *	enabling Host Initiated Power management.
32706b7ae954STejun Heo  *
32716b7ae954STejun Heo  *	LOCKING:
32726b7ae954STejun Heo  *	EH context.
32736b7ae954STejun Heo  *
32746b7ae954STejun Heo  *	RETURNS:
32756b7ae954STejun Heo  *	0 on success, -errno on failure.
32766b7ae954STejun Heo  */
32776b7ae954STejun Heo static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
32786b7ae954STejun Heo 			  struct ata_device **r_failed_dev)
32796b7ae954STejun Heo {
32806c8ea89cSTejun Heo 	struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL;
32816b7ae954STejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
32826b7ae954STejun Heo 	struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
3283e5005b15STejun Heo 	enum ata_lpm_policy old_policy = link->lpm_policy;
32846b7ae954STejun Heo 	unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
32856b7ae954STejun Heo 	unsigned int err_mask;
32866b7ae954STejun Heo 	int rc;
32876b7ae954STejun Heo 
32886b7ae954STejun Heo 	/* if the link or host doesn't do LPM, noop */
32896b7ae954STejun Heo 	if ((link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm))
32906b7ae954STejun Heo 		return 0;
32916b7ae954STejun Heo 
32926b7ae954STejun Heo 	/*
32936b7ae954STejun Heo 	 * DIPM is enabled only for MIN_POWER as some devices
32946b7ae954STejun Heo 	 * misbehave when the host NACKs transition to SLUMBER.  Order
32956b7ae954STejun Heo 	 * device and link configurations such that the host always
32966b7ae954STejun Heo 	 * allows DIPM requests.
32976b7ae954STejun Heo 	 */
32986b7ae954STejun Heo 	ata_for_each_dev(dev, link, ENABLED) {
32996b7ae954STejun Heo 		bool hipm = ata_id_has_hipm(dev->id);
33006b7ae954STejun Heo 		bool dipm = ata_id_has_dipm(dev->id);
33016b7ae954STejun Heo 
33026b7ae954STejun Heo 		/* find the first enabled and LPM enabled devices */
33036b7ae954STejun Heo 		if (!link_dev)
33046b7ae954STejun Heo 			link_dev = dev;
33056b7ae954STejun Heo 
33066b7ae954STejun Heo 		if (!lpm_dev && (hipm || dipm))
33076b7ae954STejun Heo 			lpm_dev = dev;
33086b7ae954STejun Heo 
33096b7ae954STejun Heo 		hints &= ~ATA_LPM_EMPTY;
33106b7ae954STejun Heo 		if (!hipm)
33116b7ae954STejun Heo 			hints &= ~ATA_LPM_HIPM;
33126b7ae954STejun Heo 
33136b7ae954STejun Heo 		/* disable DIPM before changing link config */
33146b7ae954STejun Heo 		if (policy != ATA_LPM_MIN_POWER && dipm) {
33156b7ae954STejun Heo 			err_mask = ata_dev_set_feature(dev,
33166b7ae954STejun Heo 					SETFEATURES_SATA_DISABLE, SATA_DIPM);
33176b7ae954STejun Heo 			if (err_mask && err_mask != AC_ERR_DEV) {
33186b7ae954STejun Heo 				ata_dev_printk(dev, KERN_WARNING,
33196b7ae954STejun Heo 					"failed to disable DIPM, Emask 0x%x\n",
33206b7ae954STejun Heo 					err_mask);
33216b7ae954STejun Heo 				rc = -EIO;
33226b7ae954STejun Heo 				goto fail;
33236b7ae954STejun Heo 			}
33246b7ae954STejun Heo 		}
33256b7ae954STejun Heo 	}
33266b7ae954STejun Heo 
33276c8ea89cSTejun Heo 	if (ap) {
33286b7ae954STejun Heo 		rc = ap->ops->set_lpm(link, policy, hints);
33296b7ae954STejun Heo 		if (!rc && ap->slave_link)
33306b7ae954STejun Heo 			rc = ap->ops->set_lpm(ap->slave_link, policy, hints);
33316c8ea89cSTejun Heo 	} else
33326c8ea89cSTejun Heo 		rc = sata_pmp_set_lpm(link, policy, hints);
33336b7ae954STejun Heo 
33346b7ae954STejun Heo 	/*
33356b7ae954STejun Heo 	 * Attribute link config failure to the first (LPM) enabled
33366b7ae954STejun Heo 	 * device on the link.
33376b7ae954STejun Heo 	 */
33386b7ae954STejun Heo 	if (rc) {
33396b7ae954STejun Heo 		if (rc == -EOPNOTSUPP) {
33406b7ae954STejun Heo 			link->flags |= ATA_LFLAG_NO_LPM;
33416b7ae954STejun Heo 			return 0;
33426b7ae954STejun Heo 		}
33436b7ae954STejun Heo 		dev = lpm_dev ? lpm_dev : link_dev;
33446b7ae954STejun Heo 		goto fail;
33456b7ae954STejun Heo 	}
33466b7ae954STejun Heo 
3347e5005b15STejun Heo 	/*
3348e5005b15STejun Heo 	 * Low level driver acked the transition.  Issue DIPM command
3349e5005b15STejun Heo 	 * with the new policy set.
3350e5005b15STejun Heo 	 */
3351e5005b15STejun Heo 	link->lpm_policy = policy;
3352e5005b15STejun Heo 	if (ap && ap->slave_link)
3353e5005b15STejun Heo 		ap->slave_link->lpm_policy = policy;
3354e5005b15STejun Heo 
33556b7ae954STejun Heo 	/* host config updated, enable DIPM if transitioning to MIN_POWER */
33566b7ae954STejun Heo 	ata_for_each_dev(dev, link, ENABLED) {
33576b7ae954STejun Heo 		if (policy == ATA_LPM_MIN_POWER && ata_id_has_dipm(dev->id)) {
33586b7ae954STejun Heo 			err_mask = ata_dev_set_feature(dev,
33596b7ae954STejun Heo 					SETFEATURES_SATA_ENABLE, SATA_DIPM);
33606b7ae954STejun Heo 			if (err_mask && err_mask != AC_ERR_DEV) {
33616b7ae954STejun Heo 				ata_dev_printk(dev, KERN_WARNING,
33626b7ae954STejun Heo 					"failed to enable DIPM, Emask 0x%x\n",
33636b7ae954STejun Heo 					err_mask);
33646b7ae954STejun Heo 				rc = -EIO;
33656b7ae954STejun Heo 				goto fail;
33666b7ae954STejun Heo 			}
33676b7ae954STejun Heo 		}
33686b7ae954STejun Heo 	}
33696b7ae954STejun Heo 
33706b7ae954STejun Heo 	return 0;
33716b7ae954STejun Heo 
33726b7ae954STejun Heo fail:
3373e5005b15STejun Heo 	/* restore the old policy */
3374e5005b15STejun Heo 	link->lpm_policy = old_policy;
3375e5005b15STejun Heo 	if (ap && ap->slave_link)
3376e5005b15STejun Heo 		ap->slave_link->lpm_policy = old_policy;
3377e5005b15STejun Heo 
33786b7ae954STejun Heo 	/* if no device or only one more chance is left, disable LPM */
33796b7ae954STejun Heo 	if (!dev || ehc->tries[dev->devno] <= 2) {
33806b7ae954STejun Heo 		ata_link_printk(link, KERN_WARNING,
33816b7ae954STejun Heo 				"disabling LPM on the link\n");
33826b7ae954STejun Heo 		link->flags |= ATA_LFLAG_NO_LPM;
33836b7ae954STejun Heo 	}
33846b7ae954STejun Heo 	if (r_failed_dev)
33856b7ae954STejun Heo 		*r_failed_dev = dev;
33866b7ae954STejun Heo 	return rc;
33876b7ae954STejun Heo }
33886b7ae954STejun Heo 
33890260731fSTejun Heo static int ata_link_nr_enabled(struct ata_link *link)
3390c6fd2807SJeff Garzik {
3391f58229f8STejun Heo 	struct ata_device *dev;
3392f58229f8STejun Heo 	int cnt = 0;
3393c6fd2807SJeff Garzik 
33941eca4365STejun Heo 	ata_for_each_dev(dev, link, ENABLED)
3395c6fd2807SJeff Garzik 		cnt++;
3396c6fd2807SJeff Garzik 	return cnt;
3397c6fd2807SJeff Garzik }
3398c6fd2807SJeff Garzik 
33990260731fSTejun Heo static int ata_link_nr_vacant(struct ata_link *link)
3400c6fd2807SJeff Garzik {
3401f58229f8STejun Heo 	struct ata_device *dev;
3402f58229f8STejun Heo 	int cnt = 0;
3403c6fd2807SJeff Garzik 
34041eca4365STejun Heo 	ata_for_each_dev(dev, link, ALL)
3405f58229f8STejun Heo 		if (dev->class == ATA_DEV_UNKNOWN)
3406c6fd2807SJeff Garzik 			cnt++;
3407c6fd2807SJeff Garzik 	return cnt;
3408c6fd2807SJeff Garzik }
3409c6fd2807SJeff Garzik 
34100260731fSTejun Heo static int ata_eh_skip_recovery(struct ata_link *link)
3411c6fd2807SJeff Garzik {
3412672b2d65STejun Heo 	struct ata_port *ap = link->ap;
34130260731fSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
3414f58229f8STejun Heo 	struct ata_device *dev;
3415c6fd2807SJeff Garzik 
3416f9df58cbSTejun Heo 	/* skip disabled links */
3417f9df58cbSTejun Heo 	if (link->flags & ATA_LFLAG_DISABLED)
3418f9df58cbSTejun Heo 		return 1;
3419f9df58cbSTejun Heo 
3420e2f3d75fSTejun Heo 	/* skip if explicitly requested */
3421e2f3d75fSTejun Heo 	if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
3422e2f3d75fSTejun Heo 		return 1;
3423e2f3d75fSTejun Heo 
3424672b2d65STejun Heo 	/* thaw frozen port and recover failed devices */
3425672b2d65STejun Heo 	if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
3426672b2d65STejun Heo 		return 0;
3427672b2d65STejun Heo 
3428672b2d65STejun Heo 	/* reset at least once if reset is requested */
3429672b2d65STejun Heo 	if ((ehc->i.action & ATA_EH_RESET) &&
3430672b2d65STejun Heo 	    !(ehc->i.flags & ATA_EHI_DID_RESET))
3431c6fd2807SJeff Garzik 		return 0;
3432c6fd2807SJeff Garzik 
3433c6fd2807SJeff Garzik 	/* skip if class codes for all vacant slots are ATA_DEV_NONE */
34341eca4365STejun Heo 	ata_for_each_dev(dev, link, ALL) {
3435c6fd2807SJeff Garzik 		if (dev->class == ATA_DEV_UNKNOWN &&
3436c6fd2807SJeff Garzik 		    ehc->classes[dev->devno] != ATA_DEV_NONE)
3437c6fd2807SJeff Garzik 			return 0;
3438c6fd2807SJeff Garzik 	}
3439c6fd2807SJeff Garzik 
3440c6fd2807SJeff Garzik 	return 1;
3441c6fd2807SJeff Garzik }
3442c6fd2807SJeff Garzik 
3443c2c7a89cSTejun Heo static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg)
3444c2c7a89cSTejun Heo {
3445c2c7a89cSTejun Heo 	u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL);
3446c2c7a89cSTejun Heo 	u64 now = get_jiffies_64();
3447c2c7a89cSTejun Heo 	int *trials = void_arg;
3448c2c7a89cSTejun Heo 
3449c2c7a89cSTejun Heo 	if (ent->timestamp < now - min(now, interval))
3450c2c7a89cSTejun Heo 		return -1;
3451c2c7a89cSTejun Heo 
3452c2c7a89cSTejun Heo 	(*trials)++;
3453c2c7a89cSTejun Heo 	return 0;
3454c2c7a89cSTejun Heo }
3455c2c7a89cSTejun Heo 
345602c05a27STejun Heo static int ata_eh_schedule_probe(struct ata_device *dev)
345702c05a27STejun Heo {
345802c05a27STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
3459c2c7a89cSTejun Heo 	struct ata_link *link = ata_dev_phys_link(dev);
3460c2c7a89cSTejun Heo 	int trials = 0;
346102c05a27STejun Heo 
346202c05a27STejun Heo 	if (!(ehc->i.probe_mask & (1 << dev->devno)) ||
346302c05a27STejun Heo 	    (ehc->did_probe_mask & (1 << dev->devno)))
346402c05a27STejun Heo 		return 0;
346502c05a27STejun Heo 
346602c05a27STejun Heo 	ata_eh_detach_dev(dev);
346702c05a27STejun Heo 	ata_dev_init(dev);
346802c05a27STejun Heo 	ehc->did_probe_mask |= (1 << dev->devno);
3469cf480626STejun Heo 	ehc->i.action |= ATA_EH_RESET;
347000115e0fSTejun Heo 	ehc->saved_xfer_mode[dev->devno] = 0;
347100115e0fSTejun Heo 	ehc->saved_ncq_enabled &= ~(1 << dev->devno);
347202c05a27STejun Heo 
34736b7ae954STejun Heo 	/* the link maybe in a deep sleep, wake it up */
34746c8ea89cSTejun Heo 	if (link->lpm_policy > ATA_LPM_MAX_POWER) {
34756c8ea89cSTejun Heo 		if (ata_is_host_link(link))
34766c8ea89cSTejun Heo 			link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER,
34776c8ea89cSTejun Heo 					       ATA_LPM_EMPTY);
34786c8ea89cSTejun Heo 		else
34796c8ea89cSTejun Heo 			sata_pmp_set_lpm(link, ATA_LPM_MAX_POWER,
34806c8ea89cSTejun Heo 					 ATA_LPM_EMPTY);
34816c8ea89cSTejun Heo 	}
34826b7ae954STejun Heo 
3483c2c7a89cSTejun Heo 	/* Record and count probe trials on the ering.  The specific
3484c2c7a89cSTejun Heo 	 * error mask used is irrelevant.  Because a successful device
3485c2c7a89cSTejun Heo 	 * detection clears the ering, this count accumulates only if
3486c2c7a89cSTejun Heo 	 * there are consecutive failed probes.
3487c2c7a89cSTejun Heo 	 *
3488c2c7a89cSTejun Heo 	 * If the count is equal to or higher than ATA_EH_PROBE_TRIALS
3489c2c7a89cSTejun Heo 	 * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is
3490c2c7a89cSTejun Heo 	 * forced to 1.5Gbps.
3491c2c7a89cSTejun Heo 	 *
3492c2c7a89cSTejun Heo 	 * This is to work around cases where failed link speed
3493c2c7a89cSTejun Heo 	 * negotiation results in device misdetection leading to
3494c2c7a89cSTejun Heo 	 * infinite DEVXCHG or PHRDY CHG events.
3495c2c7a89cSTejun Heo 	 */
3496c2c7a89cSTejun Heo 	ata_ering_record(&dev->ering, 0, AC_ERR_OTHER);
3497c2c7a89cSTejun Heo 	ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials);
3498c2c7a89cSTejun Heo 
3499c2c7a89cSTejun Heo 	if (trials > ATA_EH_PROBE_TRIALS)
3500c2c7a89cSTejun Heo 		sata_down_spd_limit(link, 1);
3501c2c7a89cSTejun Heo 
350202c05a27STejun Heo 	return 1;
350302c05a27STejun Heo }
350402c05a27STejun Heo 
35059b1e2658STejun Heo static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
3506fee7ca72STejun Heo {
35079af5c9c9STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
3508fee7ca72STejun Heo 
3509cf9a590aSTejun Heo 	/* -EAGAIN from EH routine indicates retry without prejudice.
3510cf9a590aSTejun Heo 	 * The requester is responsible for ensuring forward progress.
3511cf9a590aSTejun Heo 	 */
3512cf9a590aSTejun Heo 	if (err != -EAGAIN)
3513fee7ca72STejun Heo 		ehc->tries[dev->devno]--;
3514fee7ca72STejun Heo 
3515fee7ca72STejun Heo 	switch (err) {
3516fee7ca72STejun Heo 	case -ENODEV:
3517fee7ca72STejun Heo 		/* device missing or wrong IDENTIFY data, schedule probing */
3518fee7ca72STejun Heo 		ehc->i.probe_mask |= (1 << dev->devno);
3519fee7ca72STejun Heo 	case -EINVAL:
3520fee7ca72STejun Heo 		/* give it just one more chance */
3521fee7ca72STejun Heo 		ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
3522fee7ca72STejun Heo 	case -EIO:
3523d89293abSTejun Heo 		if (ehc->tries[dev->devno] == 1) {
3524fee7ca72STejun Heo 			/* This is the last chance, better to slow
3525fee7ca72STejun Heo 			 * down than lose it.
3526fee7ca72STejun Heo 			 */
3527a07d499bSTejun Heo 			sata_down_spd_limit(ata_dev_phys_link(dev), 0);
3528d89293abSTejun Heo 			if (dev->pio_mode > XFER_PIO_0)
3529fee7ca72STejun Heo 				ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
3530fee7ca72STejun Heo 		}
3531fee7ca72STejun Heo 	}
3532fee7ca72STejun Heo 
3533fee7ca72STejun Heo 	if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
3534fee7ca72STejun Heo 		/* disable device if it has used up all its chances */
3535fee7ca72STejun Heo 		ata_dev_disable(dev);
3536fee7ca72STejun Heo 
3537fee7ca72STejun Heo 		/* detach if offline */
3538b1c72916STejun Heo 		if (ata_phys_link_offline(ata_dev_phys_link(dev)))
3539fee7ca72STejun Heo 			ata_eh_detach_dev(dev);
3540fee7ca72STejun Heo 
354102c05a27STejun Heo 		/* schedule probe if necessary */
354287fbc5a0STejun Heo 		if (ata_eh_schedule_probe(dev)) {
3543fee7ca72STejun Heo 			ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
354487fbc5a0STejun Heo 			memset(ehc->cmd_timeout_idx[dev->devno], 0,
354587fbc5a0STejun Heo 			       sizeof(ehc->cmd_timeout_idx[dev->devno]));
354687fbc5a0STejun Heo 		}
35479b1e2658STejun Heo 
35489b1e2658STejun Heo 		return 1;
3549fee7ca72STejun Heo 	} else {
3550cf480626STejun Heo 		ehc->i.action |= ATA_EH_RESET;
35519b1e2658STejun Heo 		return 0;
3552fee7ca72STejun Heo 	}
3553fee7ca72STejun Heo }
3554fee7ca72STejun Heo 
3555c6fd2807SJeff Garzik /**
3556c6fd2807SJeff Garzik  *	ata_eh_recover - recover host port after error
3557c6fd2807SJeff Garzik  *	@ap: host port to recover
3558c6fd2807SJeff Garzik  *	@prereset: prereset method (can be NULL)
3559c6fd2807SJeff Garzik  *	@softreset: softreset method (can be NULL)
3560c6fd2807SJeff Garzik  *	@hardreset: hardreset method (can be NULL)
3561c6fd2807SJeff Garzik  *	@postreset: postreset method (can be NULL)
35629b1e2658STejun Heo  *	@r_failed_link: out parameter for failed link
3563c6fd2807SJeff Garzik  *
3564c6fd2807SJeff Garzik  *	This is the alpha and omega, eum and yang, heart and soul of
3565c6fd2807SJeff Garzik  *	libata exception handling.  On entry, actions required to
35669b1e2658STejun Heo  *	recover each link and hotplug requests are recorded in the
35679b1e2658STejun Heo  *	link's eh_context.  This function executes all the operations
35689b1e2658STejun Heo  *	with appropriate retrials and fallbacks to resurrect failed
3569c6fd2807SJeff Garzik  *	devices, detach goners and greet newcomers.
3570c6fd2807SJeff Garzik  *
3571c6fd2807SJeff Garzik  *	LOCKING:
3572c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
3573c6fd2807SJeff Garzik  *
3574c6fd2807SJeff Garzik  *	RETURNS:
3575c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
3576c6fd2807SJeff Garzik  */
3577fb7fd614STejun Heo int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3578c6fd2807SJeff Garzik 		   ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
35799b1e2658STejun Heo 		   ata_postreset_fn_t postreset,
35809b1e2658STejun Heo 		   struct ata_link **r_failed_link)
3581c6fd2807SJeff Garzik {
35829b1e2658STejun Heo 	struct ata_link *link;
3583c6fd2807SJeff Garzik 	struct ata_device *dev;
35846b7ae954STejun Heo 	int rc, nr_fails;
358545fabbb7SElias Oltmanns 	unsigned long flags, deadline;
3586c6fd2807SJeff Garzik 
3587c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
3588c6fd2807SJeff Garzik 
3589c6fd2807SJeff Garzik 	/* prep for recovery */
35901eca4365STejun Heo 	ata_for_each_link(link, ap, EDGE) {
35919b1e2658STejun Heo 		struct ata_eh_context *ehc = &link->eh_context;
35929b1e2658STejun Heo 
3593f9df58cbSTejun Heo 		/* re-enable link? */
3594f9df58cbSTejun Heo 		if (ehc->i.action & ATA_EH_ENABLE_LINK) {
3595f9df58cbSTejun Heo 			ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK);
3596f9df58cbSTejun Heo 			spin_lock_irqsave(ap->lock, flags);
3597f9df58cbSTejun Heo 			link->flags &= ~ATA_LFLAG_DISABLED;
3598f9df58cbSTejun Heo 			spin_unlock_irqrestore(ap->lock, flags);
3599f9df58cbSTejun Heo 			ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK);
3600f9df58cbSTejun Heo 		}
3601f9df58cbSTejun Heo 
36021eca4365STejun Heo 		ata_for_each_dev(dev, link, ALL) {
3603fd995f70STejun Heo 			if (link->flags & ATA_LFLAG_NO_RETRY)
3604fd995f70STejun Heo 				ehc->tries[dev->devno] = 1;
3605fd995f70STejun Heo 			else
3606c6fd2807SJeff Garzik 				ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3607c6fd2807SJeff Garzik 
360879a55b72STejun Heo 			/* collect port action mask recorded in dev actions */
36099b1e2658STejun Heo 			ehc->i.action |= ehc->i.dev_action[dev->devno] &
36109b1e2658STejun Heo 					 ~ATA_EH_PERDEV_MASK;
3611f58229f8STejun Heo 			ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK;
361279a55b72STejun Heo 
3613c6fd2807SJeff Garzik 			/* process hotplug request */
3614c6fd2807SJeff Garzik 			if (dev->flags & ATA_DFLAG_DETACH)
3615c6fd2807SJeff Garzik 				ata_eh_detach_dev(dev);
3616c6fd2807SJeff Garzik 
361702c05a27STejun Heo 			/* schedule probe if necessary */
361802c05a27STejun Heo 			if (!ata_dev_enabled(dev))
361902c05a27STejun Heo 				ata_eh_schedule_probe(dev);
3620c6fd2807SJeff Garzik 		}
36219b1e2658STejun Heo 	}
3622c6fd2807SJeff Garzik 
3623c6fd2807SJeff Garzik  retry:
3624c6fd2807SJeff Garzik 	rc = 0;
3625c6fd2807SJeff Garzik 
3626c6fd2807SJeff Garzik 	/* if UNLOADING, finish immediately */
3627c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_UNLOADING)
3628c6fd2807SJeff Garzik 		goto out;
3629c6fd2807SJeff Garzik 
36309b1e2658STejun Heo 	/* prep for EH */
36311eca4365STejun Heo 	ata_for_each_link(link, ap, EDGE) {
36329b1e2658STejun Heo 		struct ata_eh_context *ehc = &link->eh_context;
36339b1e2658STejun Heo 
3634c6fd2807SJeff Garzik 		/* skip EH if possible. */
36350260731fSTejun Heo 		if (ata_eh_skip_recovery(link))
3636c6fd2807SJeff Garzik 			ehc->i.action = 0;
3637c6fd2807SJeff Garzik 
36381eca4365STejun Heo 		ata_for_each_dev(dev, link, ALL)
3639f58229f8STejun Heo 			ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
36409b1e2658STejun Heo 	}
3641c6fd2807SJeff Garzik 
3642c6fd2807SJeff Garzik 	/* reset */
36431eca4365STejun Heo 	ata_for_each_link(link, ap, EDGE) {
36449b1e2658STejun Heo 		struct ata_eh_context *ehc = &link->eh_context;
36459b1e2658STejun Heo 
3646cf480626STejun Heo 		if (!(ehc->i.action & ATA_EH_RESET))
36479b1e2658STejun Heo 			continue;
36489b1e2658STejun Heo 
36499b1e2658STejun Heo 		rc = ata_eh_reset(link, ata_link_nr_vacant(link),
3650dc98c32cSTejun Heo 				  prereset, softreset, hardreset, postreset);
3651c6fd2807SJeff Garzik 		if (rc) {
36520260731fSTejun Heo 			ata_link_printk(link, KERN_ERR,
3653c6fd2807SJeff Garzik 					"reset failed, giving up\n");
3654c6fd2807SJeff Garzik 			goto out;
3655c6fd2807SJeff Garzik 		}
36569b1e2658STejun Heo 	}
3657c6fd2807SJeff Garzik 
365845fabbb7SElias Oltmanns 	do {
365945fabbb7SElias Oltmanns 		unsigned long now;
366045fabbb7SElias Oltmanns 
366145fabbb7SElias Oltmanns 		/*
366245fabbb7SElias Oltmanns 		 * clears ATA_EH_PARK in eh_info and resets
366345fabbb7SElias Oltmanns 		 * ap->park_req_pending
366445fabbb7SElias Oltmanns 		 */
366545fabbb7SElias Oltmanns 		ata_eh_pull_park_action(ap);
366645fabbb7SElias Oltmanns 
366745fabbb7SElias Oltmanns 		deadline = jiffies;
36681eca4365STejun Heo 		ata_for_each_link(link, ap, EDGE) {
36691eca4365STejun Heo 			ata_for_each_dev(dev, link, ALL) {
367045fabbb7SElias Oltmanns 				struct ata_eh_context *ehc = &link->eh_context;
367145fabbb7SElias Oltmanns 				unsigned long tmp;
367245fabbb7SElias Oltmanns 
367345fabbb7SElias Oltmanns 				if (dev->class != ATA_DEV_ATA)
367445fabbb7SElias Oltmanns 					continue;
367545fabbb7SElias Oltmanns 				if (!(ehc->i.dev_action[dev->devno] &
367645fabbb7SElias Oltmanns 				      ATA_EH_PARK))
367745fabbb7SElias Oltmanns 					continue;
367845fabbb7SElias Oltmanns 				tmp = dev->unpark_deadline;
367945fabbb7SElias Oltmanns 				if (time_before(deadline, tmp))
368045fabbb7SElias Oltmanns 					deadline = tmp;
368145fabbb7SElias Oltmanns 				else if (time_before_eq(tmp, jiffies))
368245fabbb7SElias Oltmanns 					continue;
368345fabbb7SElias Oltmanns 				if (ehc->unloaded_mask & (1 << dev->devno))
368445fabbb7SElias Oltmanns 					continue;
368545fabbb7SElias Oltmanns 
368645fabbb7SElias Oltmanns 				ata_eh_park_issue_cmd(dev, 1);
368745fabbb7SElias Oltmanns 			}
368845fabbb7SElias Oltmanns 		}
368945fabbb7SElias Oltmanns 
369045fabbb7SElias Oltmanns 		now = jiffies;
369145fabbb7SElias Oltmanns 		if (time_before_eq(deadline, now))
369245fabbb7SElias Oltmanns 			break;
369345fabbb7SElias Oltmanns 
3694c0c362b6STejun Heo 		ata_eh_release(ap);
369545fabbb7SElias Oltmanns 		deadline = wait_for_completion_timeout(&ap->park_req_pending,
369645fabbb7SElias Oltmanns 						       deadline - now);
3697c0c362b6STejun Heo 		ata_eh_acquire(ap);
369845fabbb7SElias Oltmanns 	} while (deadline);
36991eca4365STejun Heo 	ata_for_each_link(link, ap, EDGE) {
37001eca4365STejun Heo 		ata_for_each_dev(dev, link, ALL) {
370145fabbb7SElias Oltmanns 			if (!(link->eh_context.unloaded_mask &
370245fabbb7SElias Oltmanns 			      (1 << dev->devno)))
370345fabbb7SElias Oltmanns 				continue;
370445fabbb7SElias Oltmanns 
370545fabbb7SElias Oltmanns 			ata_eh_park_issue_cmd(dev, 0);
370645fabbb7SElias Oltmanns 			ata_eh_done(link, dev, ATA_EH_PARK);
370745fabbb7SElias Oltmanns 		}
370845fabbb7SElias Oltmanns 	}
370945fabbb7SElias Oltmanns 
37109b1e2658STejun Heo 	/* the rest */
37116b7ae954STejun Heo 	nr_fails = 0;
37126b7ae954STejun Heo 	ata_for_each_link(link, ap, PMP_FIRST) {
37139b1e2658STejun Heo 		struct ata_eh_context *ehc = &link->eh_context;
37149b1e2658STejun Heo 
37156b7ae954STejun Heo 		if (sata_pmp_attached(ap) && ata_is_host_link(link))
37166b7ae954STejun Heo 			goto config_lpm;
37176b7ae954STejun Heo 
3718c6fd2807SJeff Garzik 		/* revalidate existing devices and attach new ones */
37190260731fSTejun Heo 		rc = ata_eh_revalidate_and_attach(link, &dev);
3720c6fd2807SJeff Garzik 		if (rc)
37216b7ae954STejun Heo 			goto rest_fail;
3722c6fd2807SJeff Garzik 
3723633273a3STejun Heo 		/* if PMP got attached, return, pmp EH will take care of it */
3724633273a3STejun Heo 		if (link->device->class == ATA_DEV_PMP) {
3725633273a3STejun Heo 			ehc->i.action = 0;
3726633273a3STejun Heo 			return 0;
3727633273a3STejun Heo 		}
3728633273a3STejun Heo 
3729baa1e78aSTejun Heo 		/* configure transfer mode if necessary */
3730baa1e78aSTejun Heo 		if (ehc->i.flags & ATA_EHI_SETMODE) {
37310260731fSTejun Heo 			rc = ata_set_mode(link, &dev);
37324ae72a1eSTejun Heo 			if (rc)
37336b7ae954STejun Heo 				goto rest_fail;
3734baa1e78aSTejun Heo 			ehc->i.flags &= ~ATA_EHI_SETMODE;
3735c6fd2807SJeff Garzik 		}
3736c6fd2807SJeff Garzik 
373711fc33daSTejun Heo 		/* If reset has been issued, clear UA to avoid
373811fc33daSTejun Heo 		 * disrupting the current users of the device.
373911fc33daSTejun Heo 		 */
374011fc33daSTejun Heo 		if (ehc->i.flags & ATA_EHI_DID_RESET) {
37411eca4365STejun Heo 			ata_for_each_dev(dev, link, ALL) {
374211fc33daSTejun Heo 				if (dev->class != ATA_DEV_ATAPI)
374311fc33daSTejun Heo 					continue;
374411fc33daSTejun Heo 				rc = atapi_eh_clear_ua(dev);
374511fc33daSTejun Heo 				if (rc)
37466b7ae954STejun Heo 					goto rest_fail;
374711fc33daSTejun Heo 			}
374811fc33daSTejun Heo 		}
374911fc33daSTejun Heo 
37506013efd8STejun Heo 		/* retry flush if necessary */
37516013efd8STejun Heo 		ata_for_each_dev(dev, link, ALL) {
37526013efd8STejun Heo 			if (dev->class != ATA_DEV_ATA)
37536013efd8STejun Heo 				continue;
37546013efd8STejun Heo 			rc = ata_eh_maybe_retry_flush(dev);
37556013efd8STejun Heo 			if (rc)
37566b7ae954STejun Heo 				goto rest_fail;
37576013efd8STejun Heo 		}
37586013efd8STejun Heo 
37596b7ae954STejun Heo 	config_lpm:
376011fc33daSTejun Heo 		/* configure link power saving */
37616b7ae954STejun Heo 		if (link->lpm_policy != ap->target_lpm_policy) {
37626b7ae954STejun Heo 			rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev);
37636b7ae954STejun Heo 			if (rc)
37646b7ae954STejun Heo 				goto rest_fail;
37656b7ae954STejun Heo 		}
3766ca77329fSKristen Carlson Accardi 
37679b1e2658STejun Heo 		/* this link is okay now */
37689b1e2658STejun Heo 		ehc->i.flags = 0;
37699b1e2658STejun Heo 		continue;
3770c6fd2807SJeff Garzik 
37716b7ae954STejun Heo 	rest_fail:
37726b7ae954STejun Heo 		nr_fails++;
37736b7ae954STejun Heo 		if (dev)
37740a2c0f56STejun Heo 			ata_eh_handle_dev_fail(dev, rc);
3775c6fd2807SJeff Garzik 
3776b06ce3e5STejun Heo 		if (ap->pflags & ATA_PFLAG_FROZEN) {
3777b06ce3e5STejun Heo 			/* PMP reset requires working host port.
3778b06ce3e5STejun Heo 			 * Can't retry if it's frozen.
3779b06ce3e5STejun Heo 			 */
3780071f44b1STejun Heo 			if (sata_pmp_attached(ap))
3781b06ce3e5STejun Heo 				goto out;
37829b1e2658STejun Heo 			break;
37839b1e2658STejun Heo 		}
3784b06ce3e5STejun Heo 	}
37859b1e2658STejun Heo 
37866b7ae954STejun Heo 	if (nr_fails)
3787c6fd2807SJeff Garzik 		goto retry;
3788c6fd2807SJeff Garzik 
3789c6fd2807SJeff Garzik  out:
37909b1e2658STejun Heo 	if (rc && r_failed_link)
37919b1e2658STejun Heo 		*r_failed_link = link;
3792c6fd2807SJeff Garzik 
3793c6fd2807SJeff Garzik 	DPRINTK("EXIT, rc=%d\n", rc);
3794c6fd2807SJeff Garzik 	return rc;
3795c6fd2807SJeff Garzik }
3796c6fd2807SJeff Garzik 
3797c6fd2807SJeff Garzik /**
3798c6fd2807SJeff Garzik  *	ata_eh_finish - finish up EH
3799c6fd2807SJeff Garzik  *	@ap: host port to finish EH for
3800c6fd2807SJeff Garzik  *
3801c6fd2807SJeff Garzik  *	Recovery is complete.  Clean up EH states and retry or finish
3802c6fd2807SJeff Garzik  *	failed qcs.
3803c6fd2807SJeff Garzik  *
3804c6fd2807SJeff Garzik  *	LOCKING:
3805c6fd2807SJeff Garzik  *	None.
3806c6fd2807SJeff Garzik  */
3807fb7fd614STejun Heo void ata_eh_finish(struct ata_port *ap)
3808c6fd2807SJeff Garzik {
3809c6fd2807SJeff Garzik 	int tag;
3810c6fd2807SJeff Garzik 
3811c6fd2807SJeff Garzik 	/* retry or finish qcs */
3812c6fd2807SJeff Garzik 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
3813c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
3814c6fd2807SJeff Garzik 
3815c6fd2807SJeff Garzik 		if (!(qc->flags & ATA_QCFLAG_FAILED))
3816c6fd2807SJeff Garzik 			continue;
3817c6fd2807SJeff Garzik 
3818c6fd2807SJeff Garzik 		if (qc->err_mask) {
3819c6fd2807SJeff Garzik 			/* FIXME: Once EH migration is complete,
3820c6fd2807SJeff Garzik 			 * generate sense data in this function,
3821c6fd2807SJeff Garzik 			 * considering both err_mask and tf.
3822c6fd2807SJeff Garzik 			 */
382303faab78STejun Heo 			if (qc->flags & ATA_QCFLAG_RETRY)
3824c6fd2807SJeff Garzik 				ata_eh_qc_retry(qc);
382503faab78STejun Heo 			else
382603faab78STejun Heo 				ata_eh_qc_complete(qc);
3827c6fd2807SJeff Garzik 		} else {
3828c6fd2807SJeff Garzik 			if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
3829c6fd2807SJeff Garzik 				ata_eh_qc_complete(qc);
3830c6fd2807SJeff Garzik 			} else {
3831c6fd2807SJeff Garzik 				/* feed zero TF to sense generation */
3832c6fd2807SJeff Garzik 				memset(&qc->result_tf, 0, sizeof(qc->result_tf));
3833c6fd2807SJeff Garzik 				ata_eh_qc_retry(qc);
3834c6fd2807SJeff Garzik 			}
3835c6fd2807SJeff Garzik 		}
3836c6fd2807SJeff Garzik 	}
3837da917d69STejun Heo 
3838da917d69STejun Heo 	/* make sure nr_active_links is zero after EH */
3839da917d69STejun Heo 	WARN_ON(ap->nr_active_links);
3840da917d69STejun Heo 	ap->nr_active_links = 0;
3841c6fd2807SJeff Garzik }
3842c6fd2807SJeff Garzik 
3843c6fd2807SJeff Garzik /**
3844c6fd2807SJeff Garzik  *	ata_do_eh - do standard error handling
3845c6fd2807SJeff Garzik  *	@ap: host port to handle error for
3846a1efdabaSTejun Heo  *
3847c6fd2807SJeff Garzik  *	@prereset: prereset method (can be NULL)
3848c6fd2807SJeff Garzik  *	@softreset: softreset method (can be NULL)
3849c6fd2807SJeff Garzik  *	@hardreset: hardreset method (can be NULL)
3850c6fd2807SJeff Garzik  *	@postreset: postreset method (can be NULL)
3851c6fd2807SJeff Garzik  *
3852c6fd2807SJeff Garzik  *	Perform standard error handling sequence.
3853c6fd2807SJeff Garzik  *
3854c6fd2807SJeff Garzik  *	LOCKING:
3855c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
3856c6fd2807SJeff Garzik  */
3857c6fd2807SJeff Garzik void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
3858c6fd2807SJeff Garzik 	       ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3859c6fd2807SJeff Garzik 	       ata_postreset_fn_t postreset)
3860c6fd2807SJeff Garzik {
38619b1e2658STejun Heo 	struct ata_device *dev;
38629b1e2658STejun Heo 	int rc;
38639b1e2658STejun Heo 
38649b1e2658STejun Heo 	ata_eh_autopsy(ap);
38659b1e2658STejun Heo 	ata_eh_report(ap);
38669b1e2658STejun Heo 
38679b1e2658STejun Heo 	rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
38689b1e2658STejun Heo 			    NULL);
38699b1e2658STejun Heo 	if (rc) {
38701eca4365STejun Heo 		ata_for_each_dev(dev, &ap->link, ALL)
38719b1e2658STejun Heo 			ata_dev_disable(dev);
38729b1e2658STejun Heo 	}
38739b1e2658STejun Heo 
3874c6fd2807SJeff Garzik 	ata_eh_finish(ap);
3875c6fd2807SJeff Garzik }
3876c6fd2807SJeff Garzik 
3877a1efdabaSTejun Heo /**
3878a1efdabaSTejun Heo  *	ata_std_error_handler - standard error handler
3879a1efdabaSTejun Heo  *	@ap: host port to handle error for
3880a1efdabaSTejun Heo  *
3881a1efdabaSTejun Heo  *	Standard error handler
3882a1efdabaSTejun Heo  *
3883a1efdabaSTejun Heo  *	LOCKING:
3884a1efdabaSTejun Heo  *	Kernel thread context (may sleep).
3885a1efdabaSTejun Heo  */
3886a1efdabaSTejun Heo void ata_std_error_handler(struct ata_port *ap)
3887a1efdabaSTejun Heo {
3888a1efdabaSTejun Heo 	struct ata_port_operations *ops = ap->ops;
3889a1efdabaSTejun Heo 	ata_reset_fn_t hardreset = ops->hardreset;
3890a1efdabaSTejun Heo 
389157c9efdfSTejun Heo 	/* ignore built-in hardreset if SCR access is not available */
3892fe06e5f9STejun Heo 	if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link))
3893a1efdabaSTejun Heo 		hardreset = NULL;
3894a1efdabaSTejun Heo 
3895a1efdabaSTejun Heo 	ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
3896a1efdabaSTejun Heo }
3897a1efdabaSTejun Heo 
38986ffa01d8STejun Heo #ifdef CONFIG_PM
3899c6fd2807SJeff Garzik /**
3900c6fd2807SJeff Garzik  *	ata_eh_handle_port_suspend - perform port suspend operation
3901c6fd2807SJeff Garzik  *	@ap: port to suspend
3902c6fd2807SJeff Garzik  *
3903c6fd2807SJeff Garzik  *	Suspend @ap.
3904c6fd2807SJeff Garzik  *
3905c6fd2807SJeff Garzik  *	LOCKING:
3906c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
3907c6fd2807SJeff Garzik  */
3908c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap)
3909c6fd2807SJeff Garzik {
3910c6fd2807SJeff Garzik 	unsigned long flags;
3911c6fd2807SJeff Garzik 	int rc = 0;
3912c6fd2807SJeff Garzik 
3913c6fd2807SJeff Garzik 	/* are we suspending? */
3914c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
3915c6fd2807SJeff Garzik 	if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
3916c6fd2807SJeff Garzik 	    ap->pm_mesg.event == PM_EVENT_ON) {
3917c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
3918c6fd2807SJeff Garzik 		return;
3919c6fd2807SJeff Garzik 	}
3920c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
3921c6fd2807SJeff Garzik 
3922c6fd2807SJeff Garzik 	WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
3923c6fd2807SJeff Garzik 
392464578a3dSTejun Heo 	/* tell ACPI we're suspending */
392564578a3dSTejun Heo 	rc = ata_acpi_on_suspend(ap);
392664578a3dSTejun Heo 	if (rc)
392764578a3dSTejun Heo 		goto out;
392864578a3dSTejun Heo 
3929c6fd2807SJeff Garzik 	/* suspend */
3930c6fd2807SJeff Garzik 	ata_eh_freeze_port(ap);
3931c6fd2807SJeff Garzik 
3932c6fd2807SJeff Garzik 	if (ap->ops->port_suspend)
3933c6fd2807SJeff Garzik 		rc = ap->ops->port_suspend(ap, ap->pm_mesg);
3934c6fd2807SJeff Garzik 
3935bd3adca5SShaohua Li 	ata_acpi_set_state(ap, PMSG_SUSPEND);
393664578a3dSTejun Heo  out:
3937c6fd2807SJeff Garzik 	/* report result */
3938c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
3939c6fd2807SJeff Garzik 
3940c6fd2807SJeff Garzik 	ap->pflags &= ~ATA_PFLAG_PM_PENDING;
3941c6fd2807SJeff Garzik 	if (rc == 0)
3942c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_SUSPENDED;
394364578a3dSTejun Heo 	else if (ap->pflags & ATA_PFLAG_FROZEN)
3944c6fd2807SJeff Garzik 		ata_port_schedule_eh(ap);
3945c6fd2807SJeff Garzik 
3946c6fd2807SJeff Garzik 	if (ap->pm_result) {
3947c6fd2807SJeff Garzik 		*ap->pm_result = rc;
3948c6fd2807SJeff Garzik 		ap->pm_result = NULL;
3949c6fd2807SJeff Garzik 	}
3950c6fd2807SJeff Garzik 
3951c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
3952c6fd2807SJeff Garzik 
3953c6fd2807SJeff Garzik 	return;
3954c6fd2807SJeff Garzik }
3955c6fd2807SJeff Garzik 
3956c6fd2807SJeff Garzik /**
3957c6fd2807SJeff Garzik  *	ata_eh_handle_port_resume - perform port resume operation
3958c6fd2807SJeff Garzik  *	@ap: port to resume
3959c6fd2807SJeff Garzik  *
3960c6fd2807SJeff Garzik  *	Resume @ap.
3961c6fd2807SJeff Garzik  *
3962c6fd2807SJeff Garzik  *	LOCKING:
3963c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
3964c6fd2807SJeff Garzik  */
3965c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap)
3966c6fd2807SJeff Garzik {
39676f9c1ea2STejun Heo 	struct ata_link *link;
39686f9c1ea2STejun Heo 	struct ata_device *dev;
3969c6fd2807SJeff Garzik 	unsigned long flags;
39709666f400STejun Heo 	int rc = 0;
3971c6fd2807SJeff Garzik 
3972c6fd2807SJeff Garzik 	/* are we resuming? */
3973c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
3974c6fd2807SJeff Garzik 	if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
3975c6fd2807SJeff Garzik 	    ap->pm_mesg.event != PM_EVENT_ON) {
3976c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
3977c6fd2807SJeff Garzik 		return;
3978c6fd2807SJeff Garzik 	}
3979c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
3980c6fd2807SJeff Garzik 
39819666f400STejun Heo 	WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED));
3982c6fd2807SJeff Garzik 
39836f9c1ea2STejun Heo 	/*
39846f9c1ea2STejun Heo 	 * Error timestamps are in jiffies which doesn't run while
39856f9c1ea2STejun Heo 	 * suspended and PHY events during resume isn't too uncommon.
39866f9c1ea2STejun Heo 	 * When the two are combined, it can lead to unnecessary speed
39876f9c1ea2STejun Heo 	 * downs if the machine is suspended and resumed repeatedly.
39886f9c1ea2STejun Heo 	 * Clear error history.
39896f9c1ea2STejun Heo 	 */
39906f9c1ea2STejun Heo 	ata_for_each_link(link, ap, HOST_FIRST)
39916f9c1ea2STejun Heo 		ata_for_each_dev(dev, link, ALL)
39926f9c1ea2STejun Heo 			ata_ering_clear(&dev->ering);
39936f9c1ea2STejun Heo 
3994bd3adca5SShaohua Li 	ata_acpi_set_state(ap, PMSG_ON);
3995bd3adca5SShaohua Li 
3996c6fd2807SJeff Garzik 	if (ap->ops->port_resume)
3997c6fd2807SJeff Garzik 		rc = ap->ops->port_resume(ap);
3998c6fd2807SJeff Garzik 
39996746544cSTejun Heo 	/* tell ACPI that we're resuming */
40006746544cSTejun Heo 	ata_acpi_on_resume(ap);
40016746544cSTejun Heo 
40029666f400STejun Heo 	/* report result */
4003c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
4004c6fd2807SJeff Garzik 	ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
4005c6fd2807SJeff Garzik 	if (ap->pm_result) {
4006c6fd2807SJeff Garzik 		*ap->pm_result = rc;
4007c6fd2807SJeff Garzik 		ap->pm_result = NULL;
4008c6fd2807SJeff Garzik 	}
4009c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
4010c6fd2807SJeff Garzik }
40116ffa01d8STejun Heo #endif /* CONFIG_PM */
4012