xref: /openbmc/linux/drivers/ata/libata-eh.c (revision 36aae28e3df4127e296f2680d65cb6310ce61021)
1c6fd2807SJeff Garzik /*
2c6fd2807SJeff Garzik  *  libata-eh.c - libata error handling
3c6fd2807SJeff Garzik  *
48c3d3d4bSTejun Heo  *  Maintained by:  Tejun Heo <tj@kernel.org>
5c6fd2807SJeff Garzik  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6c6fd2807SJeff Garzik  *		    on emails.
7c6fd2807SJeff Garzik  *
8c6fd2807SJeff Garzik  *  Copyright 2006 Tejun Heo <htejun@gmail.com>
9c6fd2807SJeff Garzik  *
10c6fd2807SJeff Garzik  *
11c6fd2807SJeff Garzik  *  This program is free software; you can redistribute it and/or
12c6fd2807SJeff Garzik  *  modify it under the terms of the GNU General Public License as
13c6fd2807SJeff Garzik  *  published by the Free Software Foundation; either version 2, or
14c6fd2807SJeff Garzik  *  (at your option) any later version.
15c6fd2807SJeff Garzik  *
16c6fd2807SJeff Garzik  *  This program is distributed in the hope that it will be useful,
17c6fd2807SJeff Garzik  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
18c6fd2807SJeff Garzik  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19c6fd2807SJeff Garzik  *  General Public License for more details.
20c6fd2807SJeff Garzik  *
21c6fd2807SJeff Garzik  *  You should have received a copy of the GNU General Public License
22c6fd2807SJeff Garzik  *  along with this program; see the file COPYING.  If not, write to
23c6fd2807SJeff Garzik  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
24c6fd2807SJeff Garzik  *  USA.
25c6fd2807SJeff Garzik  *
26c6fd2807SJeff Garzik  *
27c6fd2807SJeff Garzik  *  libata documentation is available via 'make {ps|pdf}docs',
28c6fd2807SJeff Garzik  *  as Documentation/DocBook/libata.*
29c6fd2807SJeff Garzik  *
30c6fd2807SJeff Garzik  *  Hardware documentation available from http://www.t13.org/ and
31c6fd2807SJeff Garzik  *  http://www.sata-io.org/
32c6fd2807SJeff Garzik  *
33c6fd2807SJeff Garzik  */
34c6fd2807SJeff Garzik 
35c6fd2807SJeff Garzik #include <linux/kernel.h>
36242f9dcbSJens Axboe #include <linux/blkdev.h>
3738789fdaSPaul Gortmaker #include <linux/export.h>
382855568bSJeff Garzik #include <linux/pci.h>
39c6fd2807SJeff Garzik #include <scsi/scsi.h>
40c6fd2807SJeff Garzik #include <scsi/scsi_host.h>
41c6fd2807SJeff Garzik #include <scsi/scsi_eh.h>
42c6fd2807SJeff Garzik #include <scsi/scsi_device.h>
43c6fd2807SJeff Garzik #include <scsi/scsi_cmnd.h>
446521148cSRobert Hancock #include <scsi/scsi_dbg.h>
45c6fd2807SJeff Garzik #include "../scsi/scsi_transport_api.h"
46c6fd2807SJeff Garzik 
47c6fd2807SJeff Garzik #include <linux/libata.h>
48c6fd2807SJeff Garzik 
49c6fd2807SJeff Garzik #include "libata.h"
50c6fd2807SJeff Garzik 
517d47e8d4STejun Heo enum {
523884f7b0STejun Heo 	/* speed down verdicts */
537d47e8d4STejun Heo 	ATA_EH_SPDN_NCQ_OFF		= (1 << 0),
547d47e8d4STejun Heo 	ATA_EH_SPDN_SPEED_DOWN		= (1 << 1),
557d47e8d4STejun Heo 	ATA_EH_SPDN_FALLBACK_TO_PIO	= (1 << 2),
5676326ac1STejun Heo 	ATA_EH_SPDN_KEEP_ERRORS		= (1 << 3),
573884f7b0STejun Heo 
583884f7b0STejun Heo 	/* error flags */
593884f7b0STejun Heo 	ATA_EFLAG_IS_IO			= (1 << 0),
6076326ac1STejun Heo 	ATA_EFLAG_DUBIOUS_XFER		= (1 << 1),
61d9027470SGwendal Grignou 	ATA_EFLAG_OLD_ER                = (1 << 31),
623884f7b0STejun Heo 
633884f7b0STejun Heo 	/* error categories */
643884f7b0STejun Heo 	ATA_ECAT_NONE			= 0,
653884f7b0STejun Heo 	ATA_ECAT_ATA_BUS		= 1,
663884f7b0STejun Heo 	ATA_ECAT_TOUT_HSM		= 2,
673884f7b0STejun Heo 	ATA_ECAT_UNK_DEV		= 3,
6875f9cafcSTejun Heo 	ATA_ECAT_DUBIOUS_NONE		= 4,
6975f9cafcSTejun Heo 	ATA_ECAT_DUBIOUS_ATA_BUS	= 5,
7075f9cafcSTejun Heo 	ATA_ECAT_DUBIOUS_TOUT_HSM	= 6,
7175f9cafcSTejun Heo 	ATA_ECAT_DUBIOUS_UNK_DEV	= 7,
7275f9cafcSTejun Heo 	ATA_ECAT_NR			= 8,
737d47e8d4STejun Heo 
7487fbc5a0STejun Heo 	ATA_EH_CMD_DFL_TIMEOUT		=  5000,
7587fbc5a0STejun Heo 
760a2c0f56STejun Heo 	/* always put at least this amount of time between resets */
770a2c0f56STejun Heo 	ATA_EH_RESET_COOL_DOWN		=  5000,
780a2c0f56STejun Heo 
79341c2c95STejun Heo 	/* Waiting in ->prereset can never be reliable.  It's
80341c2c95STejun Heo 	 * sometimes nice to wait there but it can't be depended upon;
81341c2c95STejun Heo 	 * otherwise, we wouldn't be resetting.  Just give it enough
82341c2c95STejun Heo 	 * time for most drives to spin up.
8331daabdaSTejun Heo 	 */
84341c2c95STejun Heo 	ATA_EH_PRERESET_TIMEOUT		= 10000,
85341c2c95STejun Heo 	ATA_EH_FASTDRAIN_INTERVAL	=  3000,
8611fc33daSTejun Heo 
8711fc33daSTejun Heo 	ATA_EH_UA_TRIES			= 5,
88c2c7a89cSTejun Heo 
89c2c7a89cSTejun Heo 	/* probe speed down parameters, see ata_eh_schedule_probe() */
90c2c7a89cSTejun Heo 	ATA_EH_PROBE_TRIAL_INTERVAL	= 60000,	/* 1 min */
91c2c7a89cSTejun Heo 	ATA_EH_PROBE_TRIALS		= 2,
9231daabdaSTejun Heo };
9331daabdaSTejun Heo 
9431daabdaSTejun Heo /* The following table determines how we sequence resets.  Each entry
9531daabdaSTejun Heo  * represents timeout for that try.  The first try can be soft or
9631daabdaSTejun Heo  * hardreset.  All others are hardreset if available.  In most cases
9731daabdaSTejun Heo  * the first reset w/ 10sec timeout should succeed.  Following entries
9835bf8821SDan Williams  * are mostly for error handling, hotplug and those outlier devices that
9935bf8821SDan Williams  * take an exceptionally long time to recover from reset.
10031daabdaSTejun Heo  */
10131daabdaSTejun Heo static const unsigned long ata_eh_reset_timeouts[] = {
102341c2c95STejun Heo 	10000,	/* most drives spin up by 10sec */
103341c2c95STejun Heo 	10000,	/* > 99% working drives spin up before 20sec */
10435bf8821SDan Williams 	35000,	/* give > 30 secs of idleness for outlier devices */
105341c2c95STejun Heo 	 5000,	/* and sweet one last chance */
106d8af0eb6STejun Heo 	ULONG_MAX, /* > 1 min has elapsed, give up */
10731daabdaSTejun Heo };
10831daabdaSTejun Heo 
10987fbc5a0STejun Heo static const unsigned long ata_eh_identify_timeouts[] = {
11087fbc5a0STejun Heo 	 5000,	/* covers > 99% of successes and not too boring on failures */
11187fbc5a0STejun Heo 	10000,  /* combined time till here is enough even for media access */
11287fbc5a0STejun Heo 	30000,	/* for true idiots */
11387fbc5a0STejun Heo 	ULONG_MAX,
11487fbc5a0STejun Heo };
11587fbc5a0STejun Heo 
1166013efd8STejun Heo static const unsigned long ata_eh_flush_timeouts[] = {
1176013efd8STejun Heo 	15000,	/* be generous with flush */
1186013efd8STejun Heo 	15000,  /* ditto */
1196013efd8STejun Heo 	30000,	/* and even more generous */
1206013efd8STejun Heo 	ULONG_MAX,
1216013efd8STejun Heo };
1226013efd8STejun Heo 
12387fbc5a0STejun Heo static const unsigned long ata_eh_other_timeouts[] = {
12487fbc5a0STejun Heo 	 5000,	/* same rationale as identify timeout */
12587fbc5a0STejun Heo 	10000,	/* ditto */
12687fbc5a0STejun Heo 	/* but no merciful 30sec for other commands, it just isn't worth it */
12787fbc5a0STejun Heo 	ULONG_MAX,
12887fbc5a0STejun Heo };
12987fbc5a0STejun Heo 
13087fbc5a0STejun Heo struct ata_eh_cmd_timeout_ent {
13187fbc5a0STejun Heo 	const u8		*commands;
13287fbc5a0STejun Heo 	const unsigned long	*timeouts;
13387fbc5a0STejun Heo };
13487fbc5a0STejun Heo 
13587fbc5a0STejun Heo /* The following table determines timeouts to use for EH internal
13687fbc5a0STejun Heo  * commands.  Each table entry is a command class and matches the
13787fbc5a0STejun Heo  * commands the entry applies to and the timeout table to use.
13887fbc5a0STejun Heo  *
13987fbc5a0STejun Heo  * On the retry after a command timed out, the next timeout value from
14087fbc5a0STejun Heo  * the table is used.  If the table doesn't contain further entries,
14187fbc5a0STejun Heo  * the last value is used.
14287fbc5a0STejun Heo  *
14387fbc5a0STejun Heo  * ehc->cmd_timeout_idx keeps track of which timeout to use per
14487fbc5a0STejun Heo  * command class, so if SET_FEATURES times out on the first try, the
14587fbc5a0STejun Heo  * next try will use the second timeout value only for that class.
14687fbc5a0STejun Heo  */
14787fbc5a0STejun Heo #define CMDS(cmds...)	(const u8 []){ cmds, 0 }
14887fbc5a0STejun Heo static const struct ata_eh_cmd_timeout_ent
14987fbc5a0STejun Heo ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
15087fbc5a0STejun Heo 	{ .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI),
15187fbc5a0STejun Heo 	  .timeouts = ata_eh_identify_timeouts, },
15287fbc5a0STejun Heo 	{ .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT),
15387fbc5a0STejun Heo 	  .timeouts = ata_eh_other_timeouts, },
15487fbc5a0STejun Heo 	{ .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT),
15587fbc5a0STejun Heo 	  .timeouts = ata_eh_other_timeouts, },
15687fbc5a0STejun Heo 	{ .commands = CMDS(ATA_CMD_SET_FEATURES),
15787fbc5a0STejun Heo 	  .timeouts = ata_eh_other_timeouts, },
15887fbc5a0STejun Heo 	{ .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS),
15987fbc5a0STejun Heo 	  .timeouts = ata_eh_other_timeouts, },
1606013efd8STejun Heo 	{ .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT),
1616013efd8STejun Heo 	  .timeouts = ata_eh_flush_timeouts },
16287fbc5a0STejun Heo };
16387fbc5a0STejun Heo #undef CMDS
16487fbc5a0STejun Heo 
165c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap);
1666ffa01d8STejun Heo #ifdef CONFIG_PM
167c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap);
168c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap);
1696ffa01d8STejun Heo #else /* CONFIG_PM */
1706ffa01d8STejun Heo static void ata_eh_handle_port_suspend(struct ata_port *ap)
1716ffa01d8STejun Heo { }
1726ffa01d8STejun Heo 
1736ffa01d8STejun Heo static void ata_eh_handle_port_resume(struct ata_port *ap)
1746ffa01d8STejun Heo { }
1756ffa01d8STejun Heo #endif /* CONFIG_PM */
176c6fd2807SJeff Garzik 
177b64bbc39STejun Heo static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt,
178b64bbc39STejun Heo 				 va_list args)
179b64bbc39STejun Heo {
180b64bbc39STejun Heo 	ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
181b64bbc39STejun Heo 				     ATA_EH_DESC_LEN - ehi->desc_len,
182b64bbc39STejun Heo 				     fmt, args);
183b64bbc39STejun Heo }
184b64bbc39STejun Heo 
185b64bbc39STejun Heo /**
186b64bbc39STejun Heo  *	__ata_ehi_push_desc - push error description without adding separator
187b64bbc39STejun Heo  *	@ehi: target EHI
188b64bbc39STejun Heo  *	@fmt: printf format string
189b64bbc39STejun Heo  *
190b64bbc39STejun Heo  *	Format string according to @fmt and append it to @ehi->desc.
191b64bbc39STejun Heo  *
192b64bbc39STejun Heo  *	LOCKING:
193b64bbc39STejun Heo  *	spin_lock_irqsave(host lock)
194b64bbc39STejun Heo  */
195b64bbc39STejun Heo void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
196b64bbc39STejun Heo {
197b64bbc39STejun Heo 	va_list args;
198b64bbc39STejun Heo 
199b64bbc39STejun Heo 	va_start(args, fmt);
200b64bbc39STejun Heo 	__ata_ehi_pushv_desc(ehi, fmt, args);
201b64bbc39STejun Heo 	va_end(args);
202b64bbc39STejun Heo }
203b64bbc39STejun Heo 
204b64bbc39STejun Heo /**
205b64bbc39STejun Heo  *	ata_ehi_push_desc - push error description with separator
206b64bbc39STejun Heo  *	@ehi: target EHI
207b64bbc39STejun Heo  *	@fmt: printf format string
208b64bbc39STejun Heo  *
209b64bbc39STejun Heo  *	Format string according to @fmt and append it to @ehi->desc.
210b64bbc39STejun Heo  *	If @ehi->desc is not empty, ", " is added in-between.
211b64bbc39STejun Heo  *
212b64bbc39STejun Heo  *	LOCKING:
213b64bbc39STejun Heo  *	spin_lock_irqsave(host lock)
214b64bbc39STejun Heo  */
215b64bbc39STejun Heo void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
216b64bbc39STejun Heo {
217b64bbc39STejun Heo 	va_list args;
218b64bbc39STejun Heo 
219b64bbc39STejun Heo 	if (ehi->desc_len)
220b64bbc39STejun Heo 		__ata_ehi_push_desc(ehi, ", ");
221b64bbc39STejun Heo 
222b64bbc39STejun Heo 	va_start(args, fmt);
223b64bbc39STejun Heo 	__ata_ehi_pushv_desc(ehi, fmt, args);
224b64bbc39STejun Heo 	va_end(args);
225b64bbc39STejun Heo }
226b64bbc39STejun Heo 
227b64bbc39STejun Heo /**
228b64bbc39STejun Heo  *	ata_ehi_clear_desc - clean error description
229b64bbc39STejun Heo  *	@ehi: target EHI
230b64bbc39STejun Heo  *
231b64bbc39STejun Heo  *	Clear @ehi->desc.
232b64bbc39STejun Heo  *
233b64bbc39STejun Heo  *	LOCKING:
234b64bbc39STejun Heo  *	spin_lock_irqsave(host lock)
235b64bbc39STejun Heo  */
236b64bbc39STejun Heo void ata_ehi_clear_desc(struct ata_eh_info *ehi)
237b64bbc39STejun Heo {
238b64bbc39STejun Heo 	ehi->desc[0] = '\0';
239b64bbc39STejun Heo 	ehi->desc_len = 0;
240b64bbc39STejun Heo }
241b64bbc39STejun Heo 
242cbcdd875STejun Heo /**
243cbcdd875STejun Heo  *	ata_port_desc - append port description
244cbcdd875STejun Heo  *	@ap: target ATA port
245cbcdd875STejun Heo  *	@fmt: printf format string
246cbcdd875STejun Heo  *
247cbcdd875STejun Heo  *	Format string according to @fmt and append it to port
248cbcdd875STejun Heo  *	description.  If port description is not empty, " " is added
249cbcdd875STejun Heo  *	in-between.  This function is to be used while initializing
250cbcdd875STejun Heo  *	ata_host.  The description is printed on host registration.
251cbcdd875STejun Heo  *
252cbcdd875STejun Heo  *	LOCKING:
253cbcdd875STejun Heo  *	None.
254cbcdd875STejun Heo  */
255cbcdd875STejun Heo void ata_port_desc(struct ata_port *ap, const char *fmt, ...)
256cbcdd875STejun Heo {
257cbcdd875STejun Heo 	va_list args;
258cbcdd875STejun Heo 
259cbcdd875STejun Heo 	WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING));
260cbcdd875STejun Heo 
261cbcdd875STejun Heo 	if (ap->link.eh_info.desc_len)
262cbcdd875STejun Heo 		__ata_ehi_push_desc(&ap->link.eh_info, " ");
263cbcdd875STejun Heo 
264cbcdd875STejun Heo 	va_start(args, fmt);
265cbcdd875STejun Heo 	__ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args);
266cbcdd875STejun Heo 	va_end(args);
267cbcdd875STejun Heo }
268cbcdd875STejun Heo 
269cbcdd875STejun Heo #ifdef CONFIG_PCI
270cbcdd875STejun Heo 
271cbcdd875STejun Heo /**
272cbcdd875STejun Heo  *	ata_port_pbar_desc - append PCI BAR description
273cbcdd875STejun Heo  *	@ap: target ATA port
274cbcdd875STejun Heo  *	@bar: target PCI BAR
275cbcdd875STejun Heo  *	@offset: offset into PCI BAR
276cbcdd875STejun Heo  *	@name: name of the area
277cbcdd875STejun Heo  *
278cbcdd875STejun Heo  *	If @offset is negative, this function formats a string which
279cbcdd875STejun Heo  *	contains the name, address, size and type of the BAR and
280cbcdd875STejun Heo  *	appends it to the port description.  If @offset is zero or
281cbcdd875STejun Heo  *	positive, only name and offsetted address is appended.
282cbcdd875STejun Heo  *
283cbcdd875STejun Heo  *	LOCKING:
284cbcdd875STejun Heo  *	None.
285cbcdd875STejun Heo  */
286cbcdd875STejun Heo void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
287cbcdd875STejun Heo 			const char *name)
288cbcdd875STejun Heo {
289cbcdd875STejun Heo 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
290cbcdd875STejun Heo 	char *type = "";
291cbcdd875STejun Heo 	unsigned long long start, len;
292cbcdd875STejun Heo 
293cbcdd875STejun Heo 	if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
294cbcdd875STejun Heo 		type = "m";
295cbcdd875STejun Heo 	else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
296cbcdd875STejun Heo 		type = "i";
297cbcdd875STejun Heo 
298cbcdd875STejun Heo 	start = (unsigned long long)pci_resource_start(pdev, bar);
299cbcdd875STejun Heo 	len = (unsigned long long)pci_resource_len(pdev, bar);
300cbcdd875STejun Heo 
301cbcdd875STejun Heo 	if (offset < 0)
302cbcdd875STejun Heo 		ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start);
303cbcdd875STejun Heo 	else
304e6a73ab1SAndrew Morton 		ata_port_desc(ap, "%s 0x%llx", name,
305e6a73ab1SAndrew Morton 				start + (unsigned long long)offset);
306cbcdd875STejun Heo }
307cbcdd875STejun Heo 
308cbcdd875STejun Heo #endif /* CONFIG_PCI */
309cbcdd875STejun Heo 
31087fbc5a0STejun Heo static int ata_lookup_timeout_table(u8 cmd)
31187fbc5a0STejun Heo {
31287fbc5a0STejun Heo 	int i;
31387fbc5a0STejun Heo 
31487fbc5a0STejun Heo 	for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) {
31587fbc5a0STejun Heo 		const u8 *cur;
31687fbc5a0STejun Heo 
31787fbc5a0STejun Heo 		for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++)
31887fbc5a0STejun Heo 			if (*cur == cmd)
31987fbc5a0STejun Heo 				return i;
32087fbc5a0STejun Heo 	}
32187fbc5a0STejun Heo 
32287fbc5a0STejun Heo 	return -1;
32387fbc5a0STejun Heo }
32487fbc5a0STejun Heo 
32587fbc5a0STejun Heo /**
32687fbc5a0STejun Heo  *	ata_internal_cmd_timeout - determine timeout for an internal command
32787fbc5a0STejun Heo  *	@dev: target device
32887fbc5a0STejun Heo  *	@cmd: internal command to be issued
32987fbc5a0STejun Heo  *
33087fbc5a0STejun Heo  *	Determine timeout for internal command @cmd for @dev.
33187fbc5a0STejun Heo  *
33287fbc5a0STejun Heo  *	LOCKING:
33387fbc5a0STejun Heo  *	EH context.
33487fbc5a0STejun Heo  *
33587fbc5a0STejun Heo  *	RETURNS:
33687fbc5a0STejun Heo  *	Determined timeout.
33787fbc5a0STejun Heo  */
33887fbc5a0STejun Heo unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd)
33987fbc5a0STejun Heo {
34087fbc5a0STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
34187fbc5a0STejun Heo 	int ent = ata_lookup_timeout_table(cmd);
34287fbc5a0STejun Heo 	int idx;
34387fbc5a0STejun Heo 
34487fbc5a0STejun Heo 	if (ent < 0)
34587fbc5a0STejun Heo 		return ATA_EH_CMD_DFL_TIMEOUT;
34687fbc5a0STejun Heo 
34787fbc5a0STejun Heo 	idx = ehc->cmd_timeout_idx[dev->devno][ent];
34887fbc5a0STejun Heo 	return ata_eh_cmd_timeout_table[ent].timeouts[idx];
34987fbc5a0STejun Heo }
35087fbc5a0STejun Heo 
35187fbc5a0STejun Heo /**
35287fbc5a0STejun Heo  *	ata_internal_cmd_timed_out - notification for internal command timeout
35387fbc5a0STejun Heo  *	@dev: target device
35487fbc5a0STejun Heo  *	@cmd: internal command which timed out
35587fbc5a0STejun Heo  *
35687fbc5a0STejun Heo  *	Notify EH that internal command @cmd for @dev timed out.  This
35787fbc5a0STejun Heo  *	function should be called only for commands whose timeouts are
35887fbc5a0STejun Heo  *	determined using ata_internal_cmd_timeout().
35987fbc5a0STejun Heo  *
36087fbc5a0STejun Heo  *	LOCKING:
36187fbc5a0STejun Heo  *	EH context.
36287fbc5a0STejun Heo  */
36387fbc5a0STejun Heo void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd)
36487fbc5a0STejun Heo {
36587fbc5a0STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
36687fbc5a0STejun Heo 	int ent = ata_lookup_timeout_table(cmd);
36787fbc5a0STejun Heo 	int idx;
36887fbc5a0STejun Heo 
36987fbc5a0STejun Heo 	if (ent < 0)
37087fbc5a0STejun Heo 		return;
37187fbc5a0STejun Heo 
37287fbc5a0STejun Heo 	idx = ehc->cmd_timeout_idx[dev->devno][ent];
37387fbc5a0STejun Heo 	if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX)
37487fbc5a0STejun Heo 		ehc->cmd_timeout_idx[dev->devno][ent]++;
37587fbc5a0STejun Heo }
37687fbc5a0STejun Heo 
3773884f7b0STejun Heo static void ata_ering_record(struct ata_ering *ering, unsigned int eflags,
378c6fd2807SJeff Garzik 			     unsigned int err_mask)
379c6fd2807SJeff Garzik {
380c6fd2807SJeff Garzik 	struct ata_ering_entry *ent;
381c6fd2807SJeff Garzik 
382c6fd2807SJeff Garzik 	WARN_ON(!err_mask);
383c6fd2807SJeff Garzik 
384c6fd2807SJeff Garzik 	ering->cursor++;
385c6fd2807SJeff Garzik 	ering->cursor %= ATA_ERING_SIZE;
386c6fd2807SJeff Garzik 
387c6fd2807SJeff Garzik 	ent = &ering->ring[ering->cursor];
3883884f7b0STejun Heo 	ent->eflags = eflags;
389c6fd2807SJeff Garzik 	ent->err_mask = err_mask;
390c6fd2807SJeff Garzik 	ent->timestamp = get_jiffies_64();
391c6fd2807SJeff Garzik }
392c6fd2807SJeff Garzik 
39376326ac1STejun Heo static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering)
39476326ac1STejun Heo {
39576326ac1STejun Heo 	struct ata_ering_entry *ent = &ering->ring[ering->cursor];
39676326ac1STejun Heo 
39776326ac1STejun Heo 	if (ent->err_mask)
39876326ac1STejun Heo 		return ent;
39976326ac1STejun Heo 	return NULL;
40076326ac1STejun Heo }
40176326ac1STejun Heo 
402d9027470SGwendal Grignou int ata_ering_map(struct ata_ering *ering,
403c6fd2807SJeff Garzik 		  int (*map_fn)(struct ata_ering_entry *, void *),
404c6fd2807SJeff Garzik 		  void *arg)
405c6fd2807SJeff Garzik {
406c6fd2807SJeff Garzik 	int idx, rc = 0;
407c6fd2807SJeff Garzik 	struct ata_ering_entry *ent;
408c6fd2807SJeff Garzik 
409c6fd2807SJeff Garzik 	idx = ering->cursor;
410c6fd2807SJeff Garzik 	do {
411c6fd2807SJeff Garzik 		ent = &ering->ring[idx];
412c6fd2807SJeff Garzik 		if (!ent->err_mask)
413c6fd2807SJeff Garzik 			break;
414c6fd2807SJeff Garzik 		rc = map_fn(ent, arg);
415c6fd2807SJeff Garzik 		if (rc)
416c6fd2807SJeff Garzik 			break;
417c6fd2807SJeff Garzik 		idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
418c6fd2807SJeff Garzik 	} while (idx != ering->cursor);
419c6fd2807SJeff Garzik 
420c6fd2807SJeff Garzik 	return rc;
421c6fd2807SJeff Garzik }
422c6fd2807SJeff Garzik 
42360428407SH Hartley Sweeten static int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg)
424d9027470SGwendal Grignou {
425d9027470SGwendal Grignou 	ent->eflags |= ATA_EFLAG_OLD_ER;
426d9027470SGwendal Grignou 	return 0;
427d9027470SGwendal Grignou }
428d9027470SGwendal Grignou 
429d9027470SGwendal Grignou static void ata_ering_clear(struct ata_ering *ering)
430d9027470SGwendal Grignou {
431d9027470SGwendal Grignou 	ata_ering_map(ering, ata_ering_clear_cb, NULL);
432d9027470SGwendal Grignou }
433d9027470SGwendal Grignou 
434c6fd2807SJeff Garzik static unsigned int ata_eh_dev_action(struct ata_device *dev)
435c6fd2807SJeff Garzik {
4369af5c9c9STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
437c6fd2807SJeff Garzik 
438c6fd2807SJeff Garzik 	return ehc->i.action | ehc->i.dev_action[dev->devno];
439c6fd2807SJeff Garzik }
440c6fd2807SJeff Garzik 
441f58229f8STejun Heo static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
442c6fd2807SJeff Garzik 				struct ata_eh_info *ehi, unsigned int action)
443c6fd2807SJeff Garzik {
444f58229f8STejun Heo 	struct ata_device *tdev;
445c6fd2807SJeff Garzik 
446c6fd2807SJeff Garzik 	if (!dev) {
447c6fd2807SJeff Garzik 		ehi->action &= ~action;
4481eca4365STejun Heo 		ata_for_each_dev(tdev, link, ALL)
449f58229f8STejun Heo 			ehi->dev_action[tdev->devno] &= ~action;
450c6fd2807SJeff Garzik 	} else {
451c6fd2807SJeff Garzik 		/* doesn't make sense for port-wide EH actions */
452c6fd2807SJeff Garzik 		WARN_ON(!(action & ATA_EH_PERDEV_MASK));
453c6fd2807SJeff Garzik 
454c6fd2807SJeff Garzik 		/* break ehi->action into ehi->dev_action */
455c6fd2807SJeff Garzik 		if (ehi->action & action) {
4561eca4365STejun Heo 			ata_for_each_dev(tdev, link, ALL)
457f58229f8STejun Heo 				ehi->dev_action[tdev->devno] |=
458f58229f8STejun Heo 					ehi->action & action;
459c6fd2807SJeff Garzik 			ehi->action &= ~action;
460c6fd2807SJeff Garzik 		}
461c6fd2807SJeff Garzik 
462c6fd2807SJeff Garzik 		/* turn off the specified per-dev action */
463c6fd2807SJeff Garzik 		ehi->dev_action[dev->devno] &= ~action;
464c6fd2807SJeff Garzik 	}
465c6fd2807SJeff Garzik }
466c6fd2807SJeff Garzik 
467c6fd2807SJeff Garzik /**
468c0c362b6STejun Heo  *	ata_eh_acquire - acquire EH ownership
469c0c362b6STejun Heo  *	@ap: ATA port to acquire EH ownership for
470c0c362b6STejun Heo  *
471c0c362b6STejun Heo  *	Acquire EH ownership for @ap.  This is the basic exclusion
472c0c362b6STejun Heo  *	mechanism for ports sharing a host.  Only one port hanging off
473c0c362b6STejun Heo  *	the same host can claim the ownership of EH.
474c0c362b6STejun Heo  *
475c0c362b6STejun Heo  *	LOCKING:
476c0c362b6STejun Heo  *	EH context.
477c0c362b6STejun Heo  */
478c0c362b6STejun Heo void ata_eh_acquire(struct ata_port *ap)
479c0c362b6STejun Heo {
480c0c362b6STejun Heo 	mutex_lock(&ap->host->eh_mutex);
481c0c362b6STejun Heo 	WARN_ON_ONCE(ap->host->eh_owner);
482c0c362b6STejun Heo 	ap->host->eh_owner = current;
483c0c362b6STejun Heo }
484c0c362b6STejun Heo 
485c0c362b6STejun Heo /**
486c0c362b6STejun Heo  *	ata_eh_release - release EH ownership
487c0c362b6STejun Heo  *	@ap: ATA port to release EH ownership for
488c0c362b6STejun Heo  *
489c0c362b6STejun Heo  *	Release EH ownership for @ap if the caller.  The caller must
490c0c362b6STejun Heo  *	have acquired EH ownership using ata_eh_acquire() previously.
491c0c362b6STejun Heo  *
492c0c362b6STejun Heo  *	LOCKING:
493c0c362b6STejun Heo  *	EH context.
494c0c362b6STejun Heo  */
495c0c362b6STejun Heo void ata_eh_release(struct ata_port *ap)
496c0c362b6STejun Heo {
497c0c362b6STejun Heo 	WARN_ON_ONCE(ap->host->eh_owner != current);
498c0c362b6STejun Heo 	ap->host->eh_owner = NULL;
499c0c362b6STejun Heo 	mutex_unlock(&ap->host->eh_mutex);
500c0c362b6STejun Heo }
501c0c362b6STejun Heo 
502c0c362b6STejun Heo /**
503c6fd2807SJeff Garzik  *	ata_scsi_timed_out - SCSI layer time out callback
504c6fd2807SJeff Garzik  *	@cmd: timed out SCSI command
505c6fd2807SJeff Garzik  *
506c6fd2807SJeff Garzik  *	Handles SCSI layer timeout.  We race with normal completion of
507c6fd2807SJeff Garzik  *	the qc for @cmd.  If the qc is already gone, we lose and let
508c6fd2807SJeff Garzik  *	the scsi command finish (EH_HANDLED).  Otherwise, the qc has
509c6fd2807SJeff Garzik  *	timed out and EH should be invoked.  Prevent ata_qc_complete()
510c6fd2807SJeff Garzik  *	from finishing it by setting EH_SCHEDULED and return
511c6fd2807SJeff Garzik  *	EH_NOT_HANDLED.
512c6fd2807SJeff Garzik  *
513c6fd2807SJeff Garzik  *	TODO: kill this function once old EH is gone.
514c6fd2807SJeff Garzik  *
515c6fd2807SJeff Garzik  *	LOCKING:
516c6fd2807SJeff Garzik  *	Called from timer context
517c6fd2807SJeff Garzik  *
518c6fd2807SJeff Garzik  *	RETURNS:
519c6fd2807SJeff Garzik  *	EH_HANDLED or EH_NOT_HANDLED
520c6fd2807SJeff Garzik  */
521242f9dcbSJens Axboe enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
522c6fd2807SJeff Garzik {
523c6fd2807SJeff Garzik 	struct Scsi_Host *host = cmd->device->host;
524c6fd2807SJeff Garzik 	struct ata_port *ap = ata_shost_to_port(host);
525c6fd2807SJeff Garzik 	unsigned long flags;
526c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc;
527242f9dcbSJens Axboe 	enum blk_eh_timer_return ret;
528c6fd2807SJeff Garzik 
529c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
530c6fd2807SJeff Garzik 
531c6fd2807SJeff Garzik 	if (ap->ops->error_handler) {
532242f9dcbSJens Axboe 		ret = BLK_EH_NOT_HANDLED;
533c6fd2807SJeff Garzik 		goto out;
534c6fd2807SJeff Garzik 	}
535c6fd2807SJeff Garzik 
536242f9dcbSJens Axboe 	ret = BLK_EH_HANDLED;
537c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
5389af5c9c9STejun Heo 	qc = ata_qc_from_tag(ap, ap->link.active_tag);
539c6fd2807SJeff Garzik 	if (qc) {
540c6fd2807SJeff Garzik 		WARN_ON(qc->scsicmd != cmd);
541c6fd2807SJeff Garzik 		qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
542c6fd2807SJeff Garzik 		qc->err_mask |= AC_ERR_TIMEOUT;
543242f9dcbSJens Axboe 		ret = BLK_EH_NOT_HANDLED;
544c6fd2807SJeff Garzik 	}
545c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
546c6fd2807SJeff Garzik 
547c6fd2807SJeff Garzik  out:
548c6fd2807SJeff Garzik 	DPRINTK("EXIT, ret=%d\n", ret);
549c6fd2807SJeff Garzik 	return ret;
550c6fd2807SJeff Garzik }
551c6fd2807SJeff Garzik 
552ece180d1STejun Heo static void ata_eh_unload(struct ata_port *ap)
553ece180d1STejun Heo {
554ece180d1STejun Heo 	struct ata_link *link;
555ece180d1STejun Heo 	struct ata_device *dev;
556ece180d1STejun Heo 	unsigned long flags;
557ece180d1STejun Heo 
558ece180d1STejun Heo 	/* Restore SControl IPM and SPD for the next driver and
559ece180d1STejun Heo 	 * disable attached devices.
560ece180d1STejun Heo 	 */
561ece180d1STejun Heo 	ata_for_each_link(link, ap, PMP_FIRST) {
562ece180d1STejun Heo 		sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0);
563ece180d1STejun Heo 		ata_for_each_dev(dev, link, ALL)
564ece180d1STejun Heo 			ata_dev_disable(dev);
565ece180d1STejun Heo 	}
566ece180d1STejun Heo 
567ece180d1STejun Heo 	/* freeze and set UNLOADED */
568ece180d1STejun Heo 	spin_lock_irqsave(ap->lock, flags);
569ece180d1STejun Heo 
570ece180d1STejun Heo 	ata_port_freeze(ap);			/* won't be thawed */
571ece180d1STejun Heo 	ap->pflags &= ~ATA_PFLAG_EH_PENDING;	/* clear pending from freeze */
572ece180d1STejun Heo 	ap->pflags |= ATA_PFLAG_UNLOADED;
573ece180d1STejun Heo 
574ece180d1STejun Heo 	spin_unlock_irqrestore(ap->lock, flags);
575ece180d1STejun Heo }
576ece180d1STejun Heo 
577c6fd2807SJeff Garzik /**
578c6fd2807SJeff Garzik  *	ata_scsi_error - SCSI layer error handler callback
579c6fd2807SJeff Garzik  *	@host: SCSI host on which error occurred
580c6fd2807SJeff Garzik  *
581c6fd2807SJeff Garzik  *	Handles SCSI-layer-thrown error events.
582c6fd2807SJeff Garzik  *
583c6fd2807SJeff Garzik  *	LOCKING:
584c6fd2807SJeff Garzik  *	Inherited from SCSI layer (none, can sleep)
585c6fd2807SJeff Garzik  *
586c6fd2807SJeff Garzik  *	RETURNS:
587c6fd2807SJeff Garzik  *	Zero.
588c6fd2807SJeff Garzik  */
589c6fd2807SJeff Garzik void ata_scsi_error(struct Scsi_Host *host)
590c6fd2807SJeff Garzik {
591c6fd2807SJeff Garzik 	struct ata_port *ap = ata_shost_to_port(host);
592c6fd2807SJeff Garzik 	unsigned long flags;
593c34aeebcSJames Bottomley 	LIST_HEAD(eh_work_q);
594c6fd2807SJeff Garzik 
595c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
596c6fd2807SJeff Garzik 
597c34aeebcSJames Bottomley 	spin_lock_irqsave(host->host_lock, flags);
598c34aeebcSJames Bottomley 	list_splice_init(&host->eh_cmd_q, &eh_work_q);
599c34aeebcSJames Bottomley 	spin_unlock_irqrestore(host->host_lock, flags);
600c34aeebcSJames Bottomley 
6010e0b494cSJames Bottomley 	ata_scsi_cmd_error_handler(host, ap, &eh_work_q);
6020e0b494cSJames Bottomley 
6030e0b494cSJames Bottomley 	/* If we timed raced normal completion and there is nothing to
6040e0b494cSJames Bottomley 	   recover nr_timedout == 0 why exactly are we doing error recovery ? */
6050e0b494cSJames Bottomley 	ata_scsi_port_error_handler(host, ap);
6060e0b494cSJames Bottomley 
6070e0b494cSJames Bottomley 	/* finish or retry handled scmd's and clean up */
6080e0b494cSJames Bottomley 	WARN_ON(host->host_failed || !list_empty(&eh_work_q));
6090e0b494cSJames Bottomley 
6100e0b494cSJames Bottomley 	DPRINTK("EXIT\n");
6110e0b494cSJames Bottomley }
6120e0b494cSJames Bottomley 
6130e0b494cSJames Bottomley /**
6140e0b494cSJames Bottomley  * ata_scsi_cmd_error_handler - error callback for a list of commands
6150e0b494cSJames Bottomley  * @host:	scsi host containing the port
6160e0b494cSJames Bottomley  * @ap:		ATA port within the host
6170e0b494cSJames Bottomley  * @eh_work_q:	list of commands to process
6180e0b494cSJames Bottomley  *
6190e0b494cSJames Bottomley  * process the given list of commands and return those finished to the
6200e0b494cSJames Bottomley  * ap->eh_done_q.  This function is the first part of the libata error
6210e0b494cSJames Bottomley  * handler which processes a given list of failed commands.
6220e0b494cSJames Bottomley  */
6230e0b494cSJames Bottomley void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
6240e0b494cSJames Bottomley 				struct list_head *eh_work_q)
6250e0b494cSJames Bottomley {
6260e0b494cSJames Bottomley 	int i;
6270e0b494cSJames Bottomley 	unsigned long flags;
6280e0b494cSJames Bottomley 
629c429137aSTejun Heo 	/* make sure sff pio task is not running */
630c429137aSTejun Heo 	ata_sff_flush_pio_task(ap);
631c6fd2807SJeff Garzik 
632cca3974eSJeff Garzik 	/* synchronize with host lock and sort out timeouts */
633c6fd2807SJeff Garzik 
634c6fd2807SJeff Garzik 	/* For new EH, all qcs are finished in one of three ways -
635c6fd2807SJeff Garzik 	 * normal completion, error completion, and SCSI timeout.
636c96f1732SAlan Cox 	 * Both completions can race against SCSI timeout.  When normal
637c6fd2807SJeff Garzik 	 * completion wins, the qc never reaches EH.  When error
638c6fd2807SJeff Garzik 	 * completion wins, the qc has ATA_QCFLAG_FAILED set.
639c6fd2807SJeff Garzik 	 *
640c6fd2807SJeff Garzik 	 * When SCSI timeout wins, things are a bit more complex.
641c6fd2807SJeff Garzik 	 * Normal or error completion can occur after the timeout but
642c6fd2807SJeff Garzik 	 * before this point.  In such cases, both types of
643c6fd2807SJeff Garzik 	 * completions are honored.  A scmd is determined to have
644c6fd2807SJeff Garzik 	 * timed out iff its associated qc is active and not failed.
645c6fd2807SJeff Garzik 	 */
646c6fd2807SJeff Garzik 	if (ap->ops->error_handler) {
647c6fd2807SJeff Garzik 		struct scsi_cmnd *scmd, *tmp;
648c6fd2807SJeff Garzik 		int nr_timedout = 0;
649c6fd2807SJeff Garzik 
650c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
651c6fd2807SJeff Garzik 
652c96f1732SAlan Cox 		/* This must occur under the ap->lock as we don't want
653c96f1732SAlan Cox 		   a polled recovery to race the real interrupt handler
654c96f1732SAlan Cox 
655c96f1732SAlan Cox 		   The lost_interrupt handler checks for any completed but
656c96f1732SAlan Cox 		   non-notified command and completes much like an IRQ handler.
657c96f1732SAlan Cox 
658c96f1732SAlan Cox 		   We then fall into the error recovery code which will treat
659c96f1732SAlan Cox 		   this as if normal completion won the race */
660c96f1732SAlan Cox 
661c96f1732SAlan Cox 		if (ap->ops->lost_interrupt)
662c96f1732SAlan Cox 			ap->ops->lost_interrupt(ap);
663c96f1732SAlan Cox 
6640e0b494cSJames Bottomley 		list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) {
665c6fd2807SJeff Garzik 			struct ata_queued_cmd *qc;
666c6fd2807SJeff Garzik 
667c6fd2807SJeff Garzik 			for (i = 0; i < ATA_MAX_QUEUE; i++) {
668c6fd2807SJeff Garzik 				qc = __ata_qc_from_tag(ap, i);
669c6fd2807SJeff Garzik 				if (qc->flags & ATA_QCFLAG_ACTIVE &&
670c6fd2807SJeff Garzik 				    qc->scsicmd == scmd)
671c6fd2807SJeff Garzik 					break;
672c6fd2807SJeff Garzik 			}
673c6fd2807SJeff Garzik 
674c6fd2807SJeff Garzik 			if (i < ATA_MAX_QUEUE) {
675c6fd2807SJeff Garzik 				/* the scmd has an associated qc */
676c6fd2807SJeff Garzik 				if (!(qc->flags & ATA_QCFLAG_FAILED)) {
677c6fd2807SJeff Garzik 					/* which hasn't failed yet, timeout */
678c6fd2807SJeff Garzik 					qc->err_mask |= AC_ERR_TIMEOUT;
679c6fd2807SJeff Garzik 					qc->flags |= ATA_QCFLAG_FAILED;
680c6fd2807SJeff Garzik 					nr_timedout++;
681c6fd2807SJeff Garzik 				}
682c6fd2807SJeff Garzik 			} else {
683c6fd2807SJeff Garzik 				/* Normal completion occurred after
684c6fd2807SJeff Garzik 				 * SCSI timeout but before this point.
685c6fd2807SJeff Garzik 				 * Successfully complete it.
686c6fd2807SJeff Garzik 				 */
687c6fd2807SJeff Garzik 				scmd->retries = scmd->allowed;
688c6fd2807SJeff Garzik 				scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
689c6fd2807SJeff Garzik 			}
690c6fd2807SJeff Garzik 		}
691c6fd2807SJeff Garzik 
692c6fd2807SJeff Garzik 		/* If we have timed out qcs.  They belong to EH from
693c6fd2807SJeff Garzik 		 * this point but the state of the controller is
694c6fd2807SJeff Garzik 		 * unknown.  Freeze the port to make sure the IRQ
695c6fd2807SJeff Garzik 		 * handler doesn't diddle with those qcs.  This must
696c6fd2807SJeff Garzik 		 * be done atomically w.r.t. setting QCFLAG_FAILED.
697c6fd2807SJeff Garzik 		 */
698c6fd2807SJeff Garzik 		if (nr_timedout)
699c6fd2807SJeff Garzik 			__ata_port_freeze(ap);
700c6fd2807SJeff Garzik 
701c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
702a1e10f7eSTejun Heo 
703a1e10f7eSTejun Heo 		/* initialize eh_tries */
704a1e10f7eSTejun Heo 		ap->eh_tries = ATA_EH_MAX_TRIES;
705c6fd2807SJeff Garzik 	} else
706c6fd2807SJeff Garzik 		spin_unlock_wait(ap->lock);
707c6fd2807SJeff Garzik 
7080e0b494cSJames Bottomley }
7090e0b494cSJames Bottomley EXPORT_SYMBOL(ata_scsi_cmd_error_handler);
7100e0b494cSJames Bottomley 
7110e0b494cSJames Bottomley /**
7120e0b494cSJames Bottomley  * ata_scsi_port_error_handler - recover the port after the commands
7130e0b494cSJames Bottomley  * @host:	SCSI host containing the port
7140e0b494cSJames Bottomley  * @ap:		the ATA port
7150e0b494cSJames Bottomley  *
7160e0b494cSJames Bottomley  * Handle the recovery of the port @ap after all the commands
7170e0b494cSJames Bottomley  * have been recovered.
7180e0b494cSJames Bottomley  */
7190e0b494cSJames Bottomley void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
7200e0b494cSJames Bottomley {
7210e0b494cSJames Bottomley 	unsigned long flags;
722c96f1732SAlan Cox 
723c6fd2807SJeff Garzik 	/* invoke error handler */
724c6fd2807SJeff Garzik 	if (ap->ops->error_handler) {
725cf1b86c8STejun Heo 		struct ata_link *link;
726cf1b86c8STejun Heo 
727c0c362b6STejun Heo 		/* acquire EH ownership */
728c0c362b6STejun Heo 		ata_eh_acquire(ap);
729c0c362b6STejun Heo  repeat:
7305ddf24c5STejun Heo 		/* kill fast drain timer */
7315ddf24c5STejun Heo 		del_timer_sync(&ap->fastdrain_timer);
7325ddf24c5STejun Heo 
733c6fd2807SJeff Garzik 		/* process port resume request */
734c6fd2807SJeff Garzik 		ata_eh_handle_port_resume(ap);
735c6fd2807SJeff Garzik 
736c6fd2807SJeff Garzik 		/* fetch & clear EH info */
737c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
738c6fd2807SJeff Garzik 
7391eca4365STejun Heo 		ata_for_each_link(link, ap, HOST_FIRST) {
74000115e0fSTejun Heo 			struct ata_eh_context *ehc = &link->eh_context;
74100115e0fSTejun Heo 			struct ata_device *dev;
74200115e0fSTejun Heo 
743cf1b86c8STejun Heo 			memset(&link->eh_context, 0, sizeof(link->eh_context));
744cf1b86c8STejun Heo 			link->eh_context.i = link->eh_info;
745cf1b86c8STejun Heo 			memset(&link->eh_info, 0, sizeof(link->eh_info));
74600115e0fSTejun Heo 
7471eca4365STejun Heo 			ata_for_each_dev(dev, link, ENABLED) {
74800115e0fSTejun Heo 				int devno = dev->devno;
74900115e0fSTejun Heo 
75000115e0fSTejun Heo 				ehc->saved_xfer_mode[devno] = dev->xfer_mode;
75100115e0fSTejun Heo 				if (ata_ncq_enabled(dev))
75200115e0fSTejun Heo 					ehc->saved_ncq_enabled |= 1 << devno;
75300115e0fSTejun Heo 			}
754cf1b86c8STejun Heo 		}
755c6fd2807SJeff Garzik 
756c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
757c6fd2807SJeff Garzik 		ap->pflags &= ~ATA_PFLAG_EH_PENDING;
758da917d69STejun Heo 		ap->excl_link = NULL;	/* don't maintain exclusion over EH */
759c6fd2807SJeff Garzik 
760c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
761c6fd2807SJeff Garzik 
762c6fd2807SJeff Garzik 		/* invoke EH, skip if unloading or suspended */
763c6fd2807SJeff Garzik 		if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
764c6fd2807SJeff Garzik 			ap->ops->error_handler(ap);
765ece180d1STejun Heo 		else {
766ece180d1STejun Heo 			/* if unloading, commence suicide */
767ece180d1STejun Heo 			if ((ap->pflags & ATA_PFLAG_UNLOADING) &&
768ece180d1STejun Heo 			    !(ap->pflags & ATA_PFLAG_UNLOADED))
769ece180d1STejun Heo 				ata_eh_unload(ap);
770c6fd2807SJeff Garzik 			ata_eh_finish(ap);
771ece180d1STejun Heo 		}
772c6fd2807SJeff Garzik 
773c6fd2807SJeff Garzik 		/* process port suspend request */
774c6fd2807SJeff Garzik 		ata_eh_handle_port_suspend(ap);
775c6fd2807SJeff Garzik 
77625985edcSLucas De Marchi 		/* Exception might have happened after ->error_handler
777c6fd2807SJeff Garzik 		 * recovered the port but before this point.  Repeat
778c6fd2807SJeff Garzik 		 * EH in such case.
779c6fd2807SJeff Garzik 		 */
780c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
781c6fd2807SJeff Garzik 
782c6fd2807SJeff Garzik 		if (ap->pflags & ATA_PFLAG_EH_PENDING) {
783a1e10f7eSTejun Heo 			if (--ap->eh_tries) {
784c6fd2807SJeff Garzik 				spin_unlock_irqrestore(ap->lock, flags);
785c6fd2807SJeff Garzik 				goto repeat;
786c6fd2807SJeff Garzik 			}
787a9a79dfeSJoe Perches 			ata_port_err(ap,
788a9a79dfeSJoe Perches 				     "EH pending after %d tries, giving up\n",
789a9a79dfeSJoe Perches 				     ATA_EH_MAX_TRIES);
790914616a3STejun Heo 			ap->pflags &= ~ATA_PFLAG_EH_PENDING;
791c6fd2807SJeff Garzik 		}
792c6fd2807SJeff Garzik 
793c6fd2807SJeff Garzik 		/* this run is complete, make sure EH info is clear */
7941eca4365STejun Heo 		ata_for_each_link(link, ap, HOST_FIRST)
795cf1b86c8STejun Heo 			memset(&link->eh_info, 0, sizeof(link->eh_info));
796c6fd2807SJeff Garzik 
797e4a9c373SDan Williams 		/* end eh (clear host_eh_scheduled) while holding
798e4a9c373SDan Williams 		 * ap->lock such that if exception occurs after this
799e4a9c373SDan Williams 		 * point but before EH completion, SCSI midlayer will
800c6fd2807SJeff Garzik 		 * re-initiate EH.
801c6fd2807SJeff Garzik 		 */
802e4a9c373SDan Williams 		ap->ops->end_eh(ap);
803c6fd2807SJeff Garzik 
804c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
805c0c362b6STejun Heo 		ata_eh_release(ap);
806c6fd2807SJeff Garzik 	} else {
8079af5c9c9STejun Heo 		WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL);
808c6fd2807SJeff Garzik 		ap->ops->eng_timeout(ap);
809c6fd2807SJeff Garzik 	}
810c6fd2807SJeff Garzik 
811c6fd2807SJeff Garzik 	scsi_eh_flush_done_q(&ap->eh_done_q);
812c6fd2807SJeff Garzik 
813c6fd2807SJeff Garzik 	/* clean up */
814c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
815c6fd2807SJeff Garzik 
816c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_LOADING)
817c6fd2807SJeff Garzik 		ap->pflags &= ~ATA_PFLAG_LOADING;
818c6fd2807SJeff Garzik 	else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
819ad72cf98STejun Heo 		schedule_delayed_work(&ap->hotplug_task, 0);
820c6fd2807SJeff Garzik 
821c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_RECOVERED)
822a9a79dfeSJoe Perches 		ata_port_info(ap, "EH complete\n");
823c6fd2807SJeff Garzik 
824c6fd2807SJeff Garzik 	ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
825c6fd2807SJeff Garzik 
826c6fd2807SJeff Garzik 	/* tell wait_eh that we're done */
827c6fd2807SJeff Garzik 	ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
828c6fd2807SJeff Garzik 	wake_up_all(&ap->eh_wait_q);
829c6fd2807SJeff Garzik 
830c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
831c6fd2807SJeff Garzik }
8320e0b494cSJames Bottomley EXPORT_SYMBOL_GPL(ata_scsi_port_error_handler);
833c6fd2807SJeff Garzik 
834c6fd2807SJeff Garzik /**
835c6fd2807SJeff Garzik  *	ata_port_wait_eh - Wait for the currently pending EH to complete
836c6fd2807SJeff Garzik  *	@ap: Port to wait EH for
837c6fd2807SJeff Garzik  *
838c6fd2807SJeff Garzik  *	Wait until the currently pending EH is complete.
839c6fd2807SJeff Garzik  *
840c6fd2807SJeff Garzik  *	LOCKING:
841c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
842c6fd2807SJeff Garzik  */
843c6fd2807SJeff Garzik void ata_port_wait_eh(struct ata_port *ap)
844c6fd2807SJeff Garzik {
845c6fd2807SJeff Garzik 	unsigned long flags;
846c6fd2807SJeff Garzik 	DEFINE_WAIT(wait);
847c6fd2807SJeff Garzik 
848c6fd2807SJeff Garzik  retry:
849c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
850c6fd2807SJeff Garzik 
851c6fd2807SJeff Garzik 	while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
852c6fd2807SJeff Garzik 		prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
853c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
854c6fd2807SJeff Garzik 		schedule();
855c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
856c6fd2807SJeff Garzik 	}
857c6fd2807SJeff Garzik 	finish_wait(&ap->eh_wait_q, &wait);
858c6fd2807SJeff Garzik 
859c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
860c6fd2807SJeff Garzik 
861c6fd2807SJeff Garzik 	/* make sure SCSI EH is complete */
862cca3974eSJeff Garzik 	if (scsi_host_in_recovery(ap->scsi_host)) {
86397750cebSTejun Heo 		ata_msleep(ap, 10);
864c6fd2807SJeff Garzik 		goto retry;
865c6fd2807SJeff Garzik 	}
866c6fd2807SJeff Garzik }
86781c757bcSDan Williams EXPORT_SYMBOL_GPL(ata_port_wait_eh);
868c6fd2807SJeff Garzik 
8695ddf24c5STejun Heo static int ata_eh_nr_in_flight(struct ata_port *ap)
8705ddf24c5STejun Heo {
8715ddf24c5STejun Heo 	unsigned int tag;
8725ddf24c5STejun Heo 	int nr = 0;
8735ddf24c5STejun Heo 
8745ddf24c5STejun Heo 	/* count only non-internal commands */
8755ddf24c5STejun Heo 	for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++)
8765ddf24c5STejun Heo 		if (ata_qc_from_tag(ap, tag))
8775ddf24c5STejun Heo 			nr++;
8785ddf24c5STejun Heo 
8795ddf24c5STejun Heo 	return nr;
8805ddf24c5STejun Heo }
8815ddf24c5STejun Heo 
8825ddf24c5STejun Heo void ata_eh_fastdrain_timerfn(unsigned long arg)
8835ddf24c5STejun Heo {
8845ddf24c5STejun Heo 	struct ata_port *ap = (void *)arg;
8855ddf24c5STejun Heo 	unsigned long flags;
8865ddf24c5STejun Heo 	int cnt;
8875ddf24c5STejun Heo 
8885ddf24c5STejun Heo 	spin_lock_irqsave(ap->lock, flags);
8895ddf24c5STejun Heo 
8905ddf24c5STejun Heo 	cnt = ata_eh_nr_in_flight(ap);
8915ddf24c5STejun Heo 
8925ddf24c5STejun Heo 	/* are we done? */
8935ddf24c5STejun Heo 	if (!cnt)
8945ddf24c5STejun Heo 		goto out_unlock;
8955ddf24c5STejun Heo 
8965ddf24c5STejun Heo 	if (cnt == ap->fastdrain_cnt) {
8975ddf24c5STejun Heo 		unsigned int tag;
8985ddf24c5STejun Heo 
8995ddf24c5STejun Heo 		/* No progress during the last interval, tag all
9005ddf24c5STejun Heo 		 * in-flight qcs as timed out and freeze the port.
9015ddf24c5STejun Heo 		 */
9025ddf24c5STejun Heo 		for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) {
9035ddf24c5STejun Heo 			struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
9045ddf24c5STejun Heo 			if (qc)
9055ddf24c5STejun Heo 				qc->err_mask |= AC_ERR_TIMEOUT;
9065ddf24c5STejun Heo 		}
9075ddf24c5STejun Heo 
9085ddf24c5STejun Heo 		ata_port_freeze(ap);
9095ddf24c5STejun Heo 	} else {
9105ddf24c5STejun Heo 		/* some qcs have finished, give it another chance */
9115ddf24c5STejun Heo 		ap->fastdrain_cnt = cnt;
9125ddf24c5STejun Heo 		ap->fastdrain_timer.expires =
913341c2c95STejun Heo 			ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
9145ddf24c5STejun Heo 		add_timer(&ap->fastdrain_timer);
9155ddf24c5STejun Heo 	}
9165ddf24c5STejun Heo 
9175ddf24c5STejun Heo  out_unlock:
9185ddf24c5STejun Heo 	spin_unlock_irqrestore(ap->lock, flags);
9195ddf24c5STejun Heo }
9205ddf24c5STejun Heo 
9215ddf24c5STejun Heo /**
9225ddf24c5STejun Heo  *	ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
9235ddf24c5STejun Heo  *	@ap: target ATA port
9245ddf24c5STejun Heo  *	@fastdrain: activate fast drain
9255ddf24c5STejun Heo  *
9265ddf24c5STejun Heo  *	Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain
9275ddf24c5STejun Heo  *	is non-zero and EH wasn't pending before.  Fast drain ensures
9285ddf24c5STejun Heo  *	that EH kicks in in timely manner.
9295ddf24c5STejun Heo  *
9305ddf24c5STejun Heo  *	LOCKING:
9315ddf24c5STejun Heo  *	spin_lock_irqsave(host lock)
9325ddf24c5STejun Heo  */
9335ddf24c5STejun Heo static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
9345ddf24c5STejun Heo {
9355ddf24c5STejun Heo 	int cnt;
9365ddf24c5STejun Heo 
9375ddf24c5STejun Heo 	/* already scheduled? */
9385ddf24c5STejun Heo 	if (ap->pflags & ATA_PFLAG_EH_PENDING)
9395ddf24c5STejun Heo 		return;
9405ddf24c5STejun Heo 
9415ddf24c5STejun Heo 	ap->pflags |= ATA_PFLAG_EH_PENDING;
9425ddf24c5STejun Heo 
9435ddf24c5STejun Heo 	if (!fastdrain)
9445ddf24c5STejun Heo 		return;
9455ddf24c5STejun Heo 
9465ddf24c5STejun Heo 	/* do we have in-flight qcs? */
9475ddf24c5STejun Heo 	cnt = ata_eh_nr_in_flight(ap);
9485ddf24c5STejun Heo 	if (!cnt)
9495ddf24c5STejun Heo 		return;
9505ddf24c5STejun Heo 
9515ddf24c5STejun Heo 	/* activate fast drain */
9525ddf24c5STejun Heo 	ap->fastdrain_cnt = cnt;
953341c2c95STejun Heo 	ap->fastdrain_timer.expires =
954341c2c95STejun Heo 		ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
9555ddf24c5STejun Heo 	add_timer(&ap->fastdrain_timer);
9565ddf24c5STejun Heo }
9575ddf24c5STejun Heo 
958c6fd2807SJeff Garzik /**
959c6fd2807SJeff Garzik  *	ata_qc_schedule_eh - schedule qc for error handling
960c6fd2807SJeff Garzik  *	@qc: command to schedule error handling for
961c6fd2807SJeff Garzik  *
962c6fd2807SJeff Garzik  *	Schedule error handling for @qc.  EH will kick in as soon as
963c6fd2807SJeff Garzik  *	other commands are drained.
964c6fd2807SJeff Garzik  *
965c6fd2807SJeff Garzik  *	LOCKING:
966cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
967c6fd2807SJeff Garzik  */
968c6fd2807SJeff Garzik void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
969c6fd2807SJeff Garzik {
970c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
971fa41efdaSTejun Heo 	struct request_queue *q = qc->scsicmd->device->request_queue;
972fa41efdaSTejun Heo 	unsigned long flags;
973c6fd2807SJeff Garzik 
974c6fd2807SJeff Garzik 	WARN_ON(!ap->ops->error_handler);
975c6fd2807SJeff Garzik 
976c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_FAILED;
9775ddf24c5STejun Heo 	ata_eh_set_pending(ap, 1);
978c6fd2807SJeff Garzik 
979c6fd2807SJeff Garzik 	/* The following will fail if timeout has already expired.
980c6fd2807SJeff Garzik 	 * ata_scsi_error() takes care of such scmds on EH entry.
981c6fd2807SJeff Garzik 	 * Note that ATA_QCFLAG_FAILED is unconditionally set after
982c6fd2807SJeff Garzik 	 * this function completes.
983c6fd2807SJeff Garzik 	 */
984fa41efdaSTejun Heo 	spin_lock_irqsave(q->queue_lock, flags);
985242f9dcbSJens Axboe 	blk_abort_request(qc->scsicmd->request);
986fa41efdaSTejun Heo 	spin_unlock_irqrestore(q->queue_lock, flags);
987c6fd2807SJeff Garzik }
988c6fd2807SJeff Garzik 
989c6fd2807SJeff Garzik /**
990e4a9c373SDan Williams  * ata_std_sched_eh - non-libsas ata_ports issue eh with this common routine
991e4a9c373SDan Williams  * @ap: ATA port to schedule EH for
992e4a9c373SDan Williams  *
993e4a9c373SDan Williams  *	LOCKING: inherited from ata_port_schedule_eh
994e4a9c373SDan Williams  *	spin_lock_irqsave(host lock)
995e4a9c373SDan Williams  */
996e4a9c373SDan Williams void ata_std_sched_eh(struct ata_port *ap)
997e4a9c373SDan Williams {
998e4a9c373SDan Williams 	WARN_ON(!ap->ops->error_handler);
999e4a9c373SDan Williams 
1000e4a9c373SDan Williams 	if (ap->pflags & ATA_PFLAG_INITIALIZING)
1001e4a9c373SDan Williams 		return;
1002e4a9c373SDan Williams 
1003e4a9c373SDan Williams 	ata_eh_set_pending(ap, 1);
1004e4a9c373SDan Williams 	scsi_schedule_eh(ap->scsi_host);
1005e4a9c373SDan Williams 
1006e4a9c373SDan Williams 	DPRINTK("port EH scheduled\n");
1007e4a9c373SDan Williams }
1008e4a9c373SDan Williams EXPORT_SYMBOL_GPL(ata_std_sched_eh);
1009e4a9c373SDan Williams 
1010e4a9c373SDan Williams /**
1011e4a9c373SDan Williams  * ata_std_end_eh - non-libsas ata_ports complete eh with this common routine
1012e4a9c373SDan Williams  * @ap: ATA port to end EH for
1013e4a9c373SDan Williams  *
1014e4a9c373SDan Williams  * In the libata object model there is a 1:1 mapping of ata_port to
1015e4a9c373SDan Williams  * shost, so host fields can be directly manipulated under ap->lock, in
1016e4a9c373SDan Williams  * the libsas case we need to hold a lock at the ha->level to coordinate
1017e4a9c373SDan Williams  * these events.
1018e4a9c373SDan Williams  *
1019e4a9c373SDan Williams  *	LOCKING:
1020e4a9c373SDan Williams  *	spin_lock_irqsave(host lock)
1021e4a9c373SDan Williams  */
1022e4a9c373SDan Williams void ata_std_end_eh(struct ata_port *ap)
1023e4a9c373SDan Williams {
1024e4a9c373SDan Williams 	struct Scsi_Host *host = ap->scsi_host;
1025e4a9c373SDan Williams 
1026e4a9c373SDan Williams 	host->host_eh_scheduled = 0;
1027e4a9c373SDan Williams }
1028e4a9c373SDan Williams EXPORT_SYMBOL(ata_std_end_eh);
1029e4a9c373SDan Williams 
1030e4a9c373SDan Williams 
1031e4a9c373SDan Williams /**
1032c6fd2807SJeff Garzik  *	ata_port_schedule_eh - schedule error handling without a qc
1033c6fd2807SJeff Garzik  *	@ap: ATA port to schedule EH for
1034c6fd2807SJeff Garzik  *
1035c6fd2807SJeff Garzik  *	Schedule error handling for @ap.  EH will kick in as soon as
1036c6fd2807SJeff Garzik  *	all commands are drained.
1037c6fd2807SJeff Garzik  *
1038c6fd2807SJeff Garzik  *	LOCKING:
1039cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
1040c6fd2807SJeff Garzik  */
1041c6fd2807SJeff Garzik void ata_port_schedule_eh(struct ata_port *ap)
1042c6fd2807SJeff Garzik {
1043e4a9c373SDan Williams 	/* see: ata_std_sched_eh, unless you know better */
1044e4a9c373SDan Williams 	ap->ops->sched_eh(ap);
1045c6fd2807SJeff Garzik }
1046c6fd2807SJeff Garzik 
1047dbd82616STejun Heo static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
1048c6fd2807SJeff Garzik {
1049c6fd2807SJeff Garzik 	int tag, nr_aborted = 0;
1050c6fd2807SJeff Garzik 
1051c6fd2807SJeff Garzik 	WARN_ON(!ap->ops->error_handler);
1052c6fd2807SJeff Garzik 
10535ddf24c5STejun Heo 	/* we're gonna abort all commands, no need for fast drain */
10545ddf24c5STejun Heo 	ata_eh_set_pending(ap, 0);
10555ddf24c5STejun Heo 
1056c6fd2807SJeff Garzik 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1057c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
1058c6fd2807SJeff Garzik 
1059dbd82616STejun Heo 		if (qc && (!link || qc->dev->link == link)) {
1060c6fd2807SJeff Garzik 			qc->flags |= ATA_QCFLAG_FAILED;
1061c6fd2807SJeff Garzik 			ata_qc_complete(qc);
1062c6fd2807SJeff Garzik 			nr_aborted++;
1063c6fd2807SJeff Garzik 		}
1064c6fd2807SJeff Garzik 	}
1065c6fd2807SJeff Garzik 
1066c6fd2807SJeff Garzik 	if (!nr_aborted)
1067c6fd2807SJeff Garzik 		ata_port_schedule_eh(ap);
1068c6fd2807SJeff Garzik 
1069c6fd2807SJeff Garzik 	return nr_aborted;
1070c6fd2807SJeff Garzik }
1071c6fd2807SJeff Garzik 
1072c6fd2807SJeff Garzik /**
1073dbd82616STejun Heo  *	ata_link_abort - abort all qc's on the link
1074dbd82616STejun Heo  *	@link: ATA link to abort qc's for
1075dbd82616STejun Heo  *
1076dbd82616STejun Heo  *	Abort all active qc's active on @link and schedule EH.
1077dbd82616STejun Heo  *
1078dbd82616STejun Heo  *	LOCKING:
1079dbd82616STejun Heo  *	spin_lock_irqsave(host lock)
1080dbd82616STejun Heo  *
1081dbd82616STejun Heo  *	RETURNS:
1082dbd82616STejun Heo  *	Number of aborted qc's.
1083dbd82616STejun Heo  */
1084dbd82616STejun Heo int ata_link_abort(struct ata_link *link)
1085dbd82616STejun Heo {
1086dbd82616STejun Heo 	return ata_do_link_abort(link->ap, link);
1087dbd82616STejun Heo }
1088dbd82616STejun Heo 
1089dbd82616STejun Heo /**
1090dbd82616STejun Heo  *	ata_port_abort - abort all qc's on the port
1091dbd82616STejun Heo  *	@ap: ATA port to abort qc's for
1092dbd82616STejun Heo  *
1093dbd82616STejun Heo  *	Abort all active qc's of @ap and schedule EH.
1094dbd82616STejun Heo  *
1095dbd82616STejun Heo  *	LOCKING:
1096dbd82616STejun Heo  *	spin_lock_irqsave(host_set lock)
1097dbd82616STejun Heo  *
1098dbd82616STejun Heo  *	RETURNS:
1099dbd82616STejun Heo  *	Number of aborted qc's.
1100dbd82616STejun Heo  */
1101dbd82616STejun Heo int ata_port_abort(struct ata_port *ap)
1102dbd82616STejun Heo {
1103dbd82616STejun Heo 	return ata_do_link_abort(ap, NULL);
1104dbd82616STejun Heo }
1105dbd82616STejun Heo 
1106dbd82616STejun Heo /**
1107c6fd2807SJeff Garzik  *	__ata_port_freeze - freeze port
1108c6fd2807SJeff Garzik  *	@ap: ATA port to freeze
1109c6fd2807SJeff Garzik  *
1110c6fd2807SJeff Garzik  *	This function is called when HSM violation or some other
1111c6fd2807SJeff Garzik  *	condition disrupts normal operation of the port.  Frozen port
1112c6fd2807SJeff Garzik  *	is not allowed to perform any operation until the port is
1113c6fd2807SJeff Garzik  *	thawed, which usually follows a successful reset.
1114c6fd2807SJeff Garzik  *
1115c6fd2807SJeff Garzik  *	ap->ops->freeze() callback can be used for freezing the port
1116c6fd2807SJeff Garzik  *	hardware-wise (e.g. mask interrupt and stop DMA engine).  If a
1117c6fd2807SJeff Garzik  *	port cannot be frozen hardware-wise, the interrupt handler
1118c6fd2807SJeff Garzik  *	must ack and clear interrupts unconditionally while the port
1119c6fd2807SJeff Garzik  *	is frozen.
1120c6fd2807SJeff Garzik  *
1121c6fd2807SJeff Garzik  *	LOCKING:
1122cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
1123c6fd2807SJeff Garzik  */
1124c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap)
1125c6fd2807SJeff Garzik {
1126c6fd2807SJeff Garzik 	WARN_ON(!ap->ops->error_handler);
1127c6fd2807SJeff Garzik 
1128c6fd2807SJeff Garzik 	if (ap->ops->freeze)
1129c6fd2807SJeff Garzik 		ap->ops->freeze(ap);
1130c6fd2807SJeff Garzik 
1131c6fd2807SJeff Garzik 	ap->pflags |= ATA_PFLAG_FROZEN;
1132c6fd2807SJeff Garzik 
113344877b4eSTejun Heo 	DPRINTK("ata%u port frozen\n", ap->print_id);
1134c6fd2807SJeff Garzik }
1135c6fd2807SJeff Garzik 
1136c6fd2807SJeff Garzik /**
1137c6fd2807SJeff Garzik  *	ata_port_freeze - abort & freeze port
1138c6fd2807SJeff Garzik  *	@ap: ATA port to freeze
1139c6fd2807SJeff Garzik  *
114054c38444SJeff Garzik  *	Abort and freeze @ap.  The freeze operation must be called
114154c38444SJeff Garzik  *	first, because some hardware requires special operations
114254c38444SJeff Garzik  *	before the taskfile registers are accessible.
1143c6fd2807SJeff Garzik  *
1144c6fd2807SJeff Garzik  *	LOCKING:
1145cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
1146c6fd2807SJeff Garzik  *
1147c6fd2807SJeff Garzik  *	RETURNS:
1148c6fd2807SJeff Garzik  *	Number of aborted commands.
1149c6fd2807SJeff Garzik  */
1150c6fd2807SJeff Garzik int ata_port_freeze(struct ata_port *ap)
1151c6fd2807SJeff Garzik {
1152c6fd2807SJeff Garzik 	int nr_aborted;
1153c6fd2807SJeff Garzik 
1154c6fd2807SJeff Garzik 	WARN_ON(!ap->ops->error_handler);
1155c6fd2807SJeff Garzik 
1156c6fd2807SJeff Garzik 	__ata_port_freeze(ap);
115754c38444SJeff Garzik 	nr_aborted = ata_port_abort(ap);
1158c6fd2807SJeff Garzik 
1159c6fd2807SJeff Garzik 	return nr_aborted;
1160c6fd2807SJeff Garzik }
1161c6fd2807SJeff Garzik 
1162c6fd2807SJeff Garzik /**
11637d77b247STejun Heo  *	sata_async_notification - SATA async notification handler
11647d77b247STejun Heo  *	@ap: ATA port where async notification is received
11657d77b247STejun Heo  *
11667d77b247STejun Heo  *	Handler to be called when async notification via SDB FIS is
11677d77b247STejun Heo  *	received.  This function schedules EH if necessary.
11687d77b247STejun Heo  *
11697d77b247STejun Heo  *	LOCKING:
11707d77b247STejun Heo  *	spin_lock_irqsave(host lock)
11717d77b247STejun Heo  *
11727d77b247STejun Heo  *	RETURNS:
11737d77b247STejun Heo  *	1 if EH is scheduled, 0 otherwise.
11747d77b247STejun Heo  */
11757d77b247STejun Heo int sata_async_notification(struct ata_port *ap)
11767d77b247STejun Heo {
11777d77b247STejun Heo 	u32 sntf;
11787d77b247STejun Heo 	int rc;
11797d77b247STejun Heo 
11807d77b247STejun Heo 	if (!(ap->flags & ATA_FLAG_AN))
11817d77b247STejun Heo 		return 0;
11827d77b247STejun Heo 
11837d77b247STejun Heo 	rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
11847d77b247STejun Heo 	if (rc == 0)
11857d77b247STejun Heo 		sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
11867d77b247STejun Heo 
1187071f44b1STejun Heo 	if (!sata_pmp_attached(ap) || rc) {
11887d77b247STejun Heo 		/* PMP is not attached or SNTF is not available */
1189071f44b1STejun Heo 		if (!sata_pmp_attached(ap)) {
11907d77b247STejun Heo 			/* PMP is not attached.  Check whether ATAPI
11917d77b247STejun Heo 			 * AN is configured.  If so, notify media
11927d77b247STejun Heo 			 * change.
11937d77b247STejun Heo 			 */
11947d77b247STejun Heo 			struct ata_device *dev = ap->link.device;
11957d77b247STejun Heo 
11967d77b247STejun Heo 			if ((dev->class == ATA_DEV_ATAPI) &&
11977d77b247STejun Heo 			    (dev->flags & ATA_DFLAG_AN))
11987d77b247STejun Heo 				ata_scsi_media_change_notify(dev);
11997d77b247STejun Heo 			return 0;
12007d77b247STejun Heo 		} else {
12017d77b247STejun Heo 			/* PMP is attached but SNTF is not available.
12027d77b247STejun Heo 			 * ATAPI async media change notification is
12037d77b247STejun Heo 			 * not used.  The PMP must be reporting PHY
12047d77b247STejun Heo 			 * status change, schedule EH.
12057d77b247STejun Heo 			 */
12067d77b247STejun Heo 			ata_port_schedule_eh(ap);
12077d77b247STejun Heo 			return 1;
12087d77b247STejun Heo 		}
12097d77b247STejun Heo 	} else {
12107d77b247STejun Heo 		/* PMP is attached and SNTF is available */
12117d77b247STejun Heo 		struct ata_link *link;
12127d77b247STejun Heo 
12137d77b247STejun Heo 		/* check and notify ATAPI AN */
12141eca4365STejun Heo 		ata_for_each_link(link, ap, EDGE) {
12157d77b247STejun Heo 			if (!(sntf & (1 << link->pmp)))
12167d77b247STejun Heo 				continue;
12177d77b247STejun Heo 
12187d77b247STejun Heo 			if ((link->device->class == ATA_DEV_ATAPI) &&
12197d77b247STejun Heo 			    (link->device->flags & ATA_DFLAG_AN))
12207d77b247STejun Heo 				ata_scsi_media_change_notify(link->device);
12217d77b247STejun Heo 		}
12227d77b247STejun Heo 
12237d77b247STejun Heo 		/* If PMP is reporting that PHY status of some
12247d77b247STejun Heo 		 * downstream ports has changed, schedule EH.
12257d77b247STejun Heo 		 */
12267d77b247STejun Heo 		if (sntf & (1 << SATA_PMP_CTRL_PORT)) {
12277d77b247STejun Heo 			ata_port_schedule_eh(ap);
12287d77b247STejun Heo 			return 1;
12297d77b247STejun Heo 		}
12307d77b247STejun Heo 
12317d77b247STejun Heo 		return 0;
12327d77b247STejun Heo 	}
12337d77b247STejun Heo }
12347d77b247STejun Heo 
12357d77b247STejun Heo /**
1236c6fd2807SJeff Garzik  *	ata_eh_freeze_port - EH helper to freeze port
1237c6fd2807SJeff Garzik  *	@ap: ATA port to freeze
1238c6fd2807SJeff Garzik  *
1239c6fd2807SJeff Garzik  *	Freeze @ap.
1240c6fd2807SJeff Garzik  *
1241c6fd2807SJeff Garzik  *	LOCKING:
1242c6fd2807SJeff Garzik  *	None.
1243c6fd2807SJeff Garzik  */
1244c6fd2807SJeff Garzik void ata_eh_freeze_port(struct ata_port *ap)
1245c6fd2807SJeff Garzik {
1246c6fd2807SJeff Garzik 	unsigned long flags;
1247c6fd2807SJeff Garzik 
1248c6fd2807SJeff Garzik 	if (!ap->ops->error_handler)
1249c6fd2807SJeff Garzik 		return;
1250c6fd2807SJeff Garzik 
1251c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1252c6fd2807SJeff Garzik 	__ata_port_freeze(ap);
1253c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1254c6fd2807SJeff Garzik }
1255c6fd2807SJeff Garzik 
1256c6fd2807SJeff Garzik /**
1257c6fd2807SJeff Garzik  *	ata_port_thaw_port - EH helper to thaw port
1258c6fd2807SJeff Garzik  *	@ap: ATA port to thaw
1259c6fd2807SJeff Garzik  *
1260c6fd2807SJeff Garzik  *	Thaw frozen port @ap.
1261c6fd2807SJeff Garzik  *
1262c6fd2807SJeff Garzik  *	LOCKING:
1263c6fd2807SJeff Garzik  *	None.
1264c6fd2807SJeff Garzik  */
1265c6fd2807SJeff Garzik void ata_eh_thaw_port(struct ata_port *ap)
1266c6fd2807SJeff Garzik {
1267c6fd2807SJeff Garzik 	unsigned long flags;
1268c6fd2807SJeff Garzik 
1269c6fd2807SJeff Garzik 	if (!ap->ops->error_handler)
1270c6fd2807SJeff Garzik 		return;
1271c6fd2807SJeff Garzik 
1272c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1273c6fd2807SJeff Garzik 
1274c6fd2807SJeff Garzik 	ap->pflags &= ~ATA_PFLAG_FROZEN;
1275c6fd2807SJeff Garzik 
1276c6fd2807SJeff Garzik 	if (ap->ops->thaw)
1277c6fd2807SJeff Garzik 		ap->ops->thaw(ap);
1278c6fd2807SJeff Garzik 
1279c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1280c6fd2807SJeff Garzik 
128144877b4eSTejun Heo 	DPRINTK("ata%u port thawed\n", ap->print_id);
1282c6fd2807SJeff Garzik }
1283c6fd2807SJeff Garzik 
1284c6fd2807SJeff Garzik static void ata_eh_scsidone(struct scsi_cmnd *scmd)
1285c6fd2807SJeff Garzik {
1286c6fd2807SJeff Garzik 	/* nada */
1287c6fd2807SJeff Garzik }
1288c6fd2807SJeff Garzik 
1289c6fd2807SJeff Garzik static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
1290c6fd2807SJeff Garzik {
1291c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
1292c6fd2807SJeff Garzik 	struct scsi_cmnd *scmd = qc->scsicmd;
1293c6fd2807SJeff Garzik 	unsigned long flags;
1294c6fd2807SJeff Garzik 
1295c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1296c6fd2807SJeff Garzik 	qc->scsidone = ata_eh_scsidone;
1297c6fd2807SJeff Garzik 	__ata_qc_complete(qc);
1298c6fd2807SJeff Garzik 	WARN_ON(ata_tag_valid(qc->tag));
1299c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1300c6fd2807SJeff Garzik 
1301c6fd2807SJeff Garzik 	scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
1302c6fd2807SJeff Garzik }
1303c6fd2807SJeff Garzik 
1304c6fd2807SJeff Garzik /**
1305c6fd2807SJeff Garzik  *	ata_eh_qc_complete - Complete an active ATA command from EH
1306c6fd2807SJeff Garzik  *	@qc: Command to complete
1307c6fd2807SJeff Garzik  *
1308c6fd2807SJeff Garzik  *	Indicate to the mid and upper layers that an ATA command has
1309c6fd2807SJeff Garzik  *	completed.  To be used from EH.
1310c6fd2807SJeff Garzik  */
1311c6fd2807SJeff Garzik void ata_eh_qc_complete(struct ata_queued_cmd *qc)
1312c6fd2807SJeff Garzik {
1313c6fd2807SJeff Garzik 	struct scsi_cmnd *scmd = qc->scsicmd;
1314c6fd2807SJeff Garzik 	scmd->retries = scmd->allowed;
1315c6fd2807SJeff Garzik 	__ata_eh_qc_complete(qc);
1316c6fd2807SJeff Garzik }
1317c6fd2807SJeff Garzik 
1318c6fd2807SJeff Garzik /**
1319c6fd2807SJeff Garzik  *	ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
1320c6fd2807SJeff Garzik  *	@qc: Command to retry
1321c6fd2807SJeff Garzik  *
1322c6fd2807SJeff Garzik  *	Indicate to the mid and upper layers that an ATA command
1323c6fd2807SJeff Garzik  *	should be retried.  To be used from EH.
1324c6fd2807SJeff Garzik  *
1325c6fd2807SJeff Garzik  *	SCSI midlayer limits the number of retries to scmd->allowed.
1326f13e2201SGwendal Grignou  *	scmd->allowed is incremented for commands which get retried
1327c6fd2807SJeff Garzik  *	due to unrelated failures (qc->err_mask is zero).
1328c6fd2807SJeff Garzik  */
1329c6fd2807SJeff Garzik void ata_eh_qc_retry(struct ata_queued_cmd *qc)
1330c6fd2807SJeff Garzik {
1331c6fd2807SJeff Garzik 	struct scsi_cmnd *scmd = qc->scsicmd;
1332f13e2201SGwendal Grignou 	if (!qc->err_mask)
1333f13e2201SGwendal Grignou 		scmd->allowed++;
1334c6fd2807SJeff Garzik 	__ata_eh_qc_complete(qc);
1335c6fd2807SJeff Garzik }
1336c6fd2807SJeff Garzik 
1337c6fd2807SJeff Garzik /**
1338678afac6STejun Heo  *	ata_dev_disable - disable ATA device
1339678afac6STejun Heo  *	@dev: ATA device to disable
1340678afac6STejun Heo  *
1341678afac6STejun Heo  *	Disable @dev.
1342678afac6STejun Heo  *
1343678afac6STejun Heo  *	Locking:
1344678afac6STejun Heo  *	EH context.
1345678afac6STejun Heo  */
1346678afac6STejun Heo void ata_dev_disable(struct ata_device *dev)
1347678afac6STejun Heo {
1348678afac6STejun Heo 	if (!ata_dev_enabled(dev))
1349678afac6STejun Heo 		return;
1350678afac6STejun Heo 
1351678afac6STejun Heo 	if (ata_msg_drv(dev->link->ap))
1352a9a79dfeSJoe Perches 		ata_dev_warn(dev, "disabled\n");
1353678afac6STejun Heo 	ata_acpi_on_disable(dev);
1354678afac6STejun Heo 	ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET);
1355678afac6STejun Heo 	dev->class++;
135699cf610aSTejun Heo 
135799cf610aSTejun Heo 	/* From now till the next successful probe, ering is used to
135899cf610aSTejun Heo 	 * track probe failures.  Clear accumulated device error info.
135999cf610aSTejun Heo 	 */
136099cf610aSTejun Heo 	ata_ering_clear(&dev->ering);
1361678afac6STejun Heo }
1362678afac6STejun Heo 
1363678afac6STejun Heo /**
1364c6fd2807SJeff Garzik  *	ata_eh_detach_dev - detach ATA device
1365c6fd2807SJeff Garzik  *	@dev: ATA device to detach
1366c6fd2807SJeff Garzik  *
1367c6fd2807SJeff Garzik  *	Detach @dev.
1368c6fd2807SJeff Garzik  *
1369c6fd2807SJeff Garzik  *	LOCKING:
1370c6fd2807SJeff Garzik  *	None.
1371c6fd2807SJeff Garzik  */
1372fb7fd614STejun Heo void ata_eh_detach_dev(struct ata_device *dev)
1373c6fd2807SJeff Garzik {
1374f58229f8STejun Heo 	struct ata_link *link = dev->link;
1375f58229f8STejun Heo 	struct ata_port *ap = link->ap;
137690484ebfSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
1377c6fd2807SJeff Garzik 	unsigned long flags;
1378c6fd2807SJeff Garzik 
1379c6fd2807SJeff Garzik 	ata_dev_disable(dev);
1380c6fd2807SJeff Garzik 
1381c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1382c6fd2807SJeff Garzik 
1383c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_DETACH;
1384c6fd2807SJeff Garzik 
1385c6fd2807SJeff Garzik 	if (ata_scsi_offline_dev(dev)) {
1386c6fd2807SJeff Garzik 		dev->flags |= ATA_DFLAG_DETACHED;
1387c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
1388c6fd2807SJeff Garzik 	}
1389c6fd2807SJeff Garzik 
139090484ebfSTejun Heo 	/* clear per-dev EH info */
1391f58229f8STejun Heo 	ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK);
1392f58229f8STejun Heo 	ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK);
139390484ebfSTejun Heo 	ehc->saved_xfer_mode[dev->devno] = 0;
139490484ebfSTejun Heo 	ehc->saved_ncq_enabled &= ~(1 << dev->devno);
1395c6fd2807SJeff Garzik 
1396c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1397c6fd2807SJeff Garzik }
1398c6fd2807SJeff Garzik 
1399c6fd2807SJeff Garzik /**
1400c6fd2807SJeff Garzik  *	ata_eh_about_to_do - about to perform eh_action
1401955e57dfSTejun Heo  *	@link: target ATA link
1402c6fd2807SJeff Garzik  *	@dev: target ATA dev for per-dev action (can be NULL)
1403c6fd2807SJeff Garzik  *	@action: action about to be performed
1404c6fd2807SJeff Garzik  *
1405c6fd2807SJeff Garzik  *	Called just before performing EH actions to clear related bits
1406955e57dfSTejun Heo  *	in @link->eh_info such that eh actions are not unnecessarily
1407955e57dfSTejun Heo  *	repeated.
1408c6fd2807SJeff Garzik  *
1409c6fd2807SJeff Garzik  *	LOCKING:
1410c6fd2807SJeff Garzik  *	None.
1411c6fd2807SJeff Garzik  */
1412fb7fd614STejun Heo void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
1413c6fd2807SJeff Garzik 			unsigned int action)
1414c6fd2807SJeff Garzik {
1415955e57dfSTejun Heo 	struct ata_port *ap = link->ap;
1416955e57dfSTejun Heo 	struct ata_eh_info *ehi = &link->eh_info;
1417955e57dfSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
1418c6fd2807SJeff Garzik 	unsigned long flags;
1419c6fd2807SJeff Garzik 
1420c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1421c6fd2807SJeff Garzik 
1422955e57dfSTejun Heo 	ata_eh_clear_action(link, dev, ehi, action);
1423c6fd2807SJeff Garzik 
1424a568d1d2STejun Heo 	/* About to take EH action, set RECOVERED.  Ignore actions on
1425a568d1d2STejun Heo 	 * slave links as master will do them again.
1426a568d1d2STejun Heo 	 */
1427a568d1d2STejun Heo 	if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link)
1428c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_RECOVERED;
1429c6fd2807SJeff Garzik 
1430c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1431c6fd2807SJeff Garzik }
1432c6fd2807SJeff Garzik 
1433c6fd2807SJeff Garzik /**
1434c6fd2807SJeff Garzik  *	ata_eh_done - EH action complete
1435c6fd2807SJeff Garzik *	@ap: target ATA port
1436c6fd2807SJeff Garzik  *	@dev: target ATA dev for per-dev action (can be NULL)
1437c6fd2807SJeff Garzik  *	@action: action just completed
1438c6fd2807SJeff Garzik  *
1439c6fd2807SJeff Garzik  *	Called right after performing EH actions to clear related bits
1440955e57dfSTejun Heo  *	in @link->eh_context.
1441c6fd2807SJeff Garzik  *
1442c6fd2807SJeff Garzik  *	LOCKING:
1443c6fd2807SJeff Garzik  *	None.
1444c6fd2807SJeff Garzik  */
1445fb7fd614STejun Heo void ata_eh_done(struct ata_link *link, struct ata_device *dev,
1446c6fd2807SJeff Garzik 		 unsigned int action)
1447c6fd2807SJeff Garzik {
1448955e57dfSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
14499af5c9c9STejun Heo 
1450955e57dfSTejun Heo 	ata_eh_clear_action(link, dev, &ehc->i, action);
1451c6fd2807SJeff Garzik }
1452c6fd2807SJeff Garzik 
1453c6fd2807SJeff Garzik /**
1454c6fd2807SJeff Garzik  *	ata_err_string - convert err_mask to descriptive string
1455c6fd2807SJeff Garzik  *	@err_mask: error mask to convert to string
1456c6fd2807SJeff Garzik  *
1457c6fd2807SJeff Garzik  *	Convert @err_mask to descriptive string.  Errors are
1458c6fd2807SJeff Garzik  *	prioritized according to severity and only the most severe
1459c6fd2807SJeff Garzik  *	error is reported.
1460c6fd2807SJeff Garzik  *
1461c6fd2807SJeff Garzik  *	LOCKING:
1462c6fd2807SJeff Garzik  *	None.
1463c6fd2807SJeff Garzik  *
1464c6fd2807SJeff Garzik  *	RETURNS:
1465c6fd2807SJeff Garzik  *	Descriptive string for @err_mask
1466c6fd2807SJeff Garzik  */
1467c6fd2807SJeff Garzik static const char *ata_err_string(unsigned int err_mask)
1468c6fd2807SJeff Garzik {
1469c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_HOST_BUS)
1470c6fd2807SJeff Garzik 		return "host bus error";
1471c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_ATA_BUS)
1472c6fd2807SJeff Garzik 		return "ATA bus error";
1473c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_TIMEOUT)
1474c6fd2807SJeff Garzik 		return "timeout";
1475c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_HSM)
1476c6fd2807SJeff Garzik 		return "HSM violation";
1477c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_SYSTEM)
1478c6fd2807SJeff Garzik 		return "internal error";
1479c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_MEDIA)
1480c6fd2807SJeff Garzik 		return "media error";
1481c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_INVALID)
1482c6fd2807SJeff Garzik 		return "invalid argument";
1483c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_DEV)
1484c6fd2807SJeff Garzik 		return "device error";
1485c6fd2807SJeff Garzik 	return "unknown error";
1486c6fd2807SJeff Garzik }
1487c6fd2807SJeff Garzik 
1488c6fd2807SJeff Garzik /**
1489c6fd2807SJeff Garzik  *	ata_read_log_page - read a specific log page
1490c6fd2807SJeff Garzik  *	@dev: target device
149165fe1f0fSShane Huang  *	@log: log to read
1492c6fd2807SJeff Garzik  *	@page: page to read
1493c6fd2807SJeff Garzik  *	@buf: buffer to store read page
1494c6fd2807SJeff Garzik  *	@sectors: number of sectors to read
1495c6fd2807SJeff Garzik  *
1496c6fd2807SJeff Garzik  *	Read log page using READ_LOG_EXT command.
1497c6fd2807SJeff Garzik  *
1498c6fd2807SJeff Garzik  *	LOCKING:
1499c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1500c6fd2807SJeff Garzik  *
1501c6fd2807SJeff Garzik  *	RETURNS:
1502c6fd2807SJeff Garzik  *	0 on success, AC_ERR_* mask otherwise.
1503c6fd2807SJeff Garzik  */
150465fe1f0fSShane Huang unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
1505c6fd2807SJeff Garzik 			       u8 page, void *buf, unsigned int sectors)
1506c6fd2807SJeff Garzik {
1507c6fd2807SJeff Garzik 	struct ata_taskfile tf;
1508c6fd2807SJeff Garzik 	unsigned int err_mask;
1509c6fd2807SJeff Garzik 
151065fe1f0fSShane Huang 	DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page);
1511c6fd2807SJeff Garzik 
1512c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
1513c6fd2807SJeff Garzik 	tf.command = ATA_CMD_READ_LOG_EXT;
151465fe1f0fSShane Huang 	tf.lbal = log;
151565fe1f0fSShane Huang 	tf.lbam = page;
1516c6fd2807SJeff Garzik 	tf.nsect = sectors;
1517c6fd2807SJeff Garzik 	tf.hob_nsect = sectors >> 8;
1518c6fd2807SJeff Garzik 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
1519c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_PIO;
1520c6fd2807SJeff Garzik 
1521c6fd2807SJeff Garzik 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
15222b789108STejun Heo 				     buf, sectors * ATA_SECT_SIZE, 0);
1523c6fd2807SJeff Garzik 
1524c6fd2807SJeff Garzik 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
1525c6fd2807SJeff Garzik 	return err_mask;
1526c6fd2807SJeff Garzik }
1527c6fd2807SJeff Garzik 
1528c6fd2807SJeff Garzik /**
1529c6fd2807SJeff Garzik  *	ata_eh_read_log_10h - Read log page 10h for NCQ error details
1530c6fd2807SJeff Garzik  *	@dev: Device to read log page 10h from
1531c6fd2807SJeff Garzik  *	@tag: Resulting tag of the failed command
1532c6fd2807SJeff Garzik  *	@tf: Resulting taskfile registers of the failed command
1533c6fd2807SJeff Garzik  *
1534c6fd2807SJeff Garzik  *	Read log page 10h to obtain NCQ error details and clear error
1535c6fd2807SJeff Garzik  *	condition.
1536c6fd2807SJeff Garzik  *
1537c6fd2807SJeff Garzik  *	LOCKING:
1538c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1539c6fd2807SJeff Garzik  *
1540c6fd2807SJeff Garzik  *	RETURNS:
1541c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
1542c6fd2807SJeff Garzik  */
1543c6fd2807SJeff Garzik static int ata_eh_read_log_10h(struct ata_device *dev,
1544c6fd2807SJeff Garzik 			       int *tag, struct ata_taskfile *tf)
1545c6fd2807SJeff Garzik {
15469af5c9c9STejun Heo 	u8 *buf = dev->link->ap->sector_buf;
1547c6fd2807SJeff Garzik 	unsigned int err_mask;
1548c6fd2807SJeff Garzik 	u8 csum;
1549c6fd2807SJeff Garzik 	int i;
1550c6fd2807SJeff Garzik 
155165fe1f0fSShane Huang 	err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, 0, buf, 1);
1552c6fd2807SJeff Garzik 	if (err_mask)
1553c6fd2807SJeff Garzik 		return -EIO;
1554c6fd2807SJeff Garzik 
1555c6fd2807SJeff Garzik 	csum = 0;
1556c6fd2807SJeff Garzik 	for (i = 0; i < ATA_SECT_SIZE; i++)
1557c6fd2807SJeff Garzik 		csum += buf[i];
1558c6fd2807SJeff Garzik 	if (csum)
1559a9a79dfeSJoe Perches 		ata_dev_warn(dev, "invalid checksum 0x%x on log page 10h\n",
1560a9a79dfeSJoe Perches 			     csum);
1561c6fd2807SJeff Garzik 
1562c6fd2807SJeff Garzik 	if (buf[0] & 0x80)
1563c6fd2807SJeff Garzik 		return -ENOENT;
1564c6fd2807SJeff Garzik 
1565c6fd2807SJeff Garzik 	*tag = buf[0] & 0x1f;
1566c6fd2807SJeff Garzik 
1567c6fd2807SJeff Garzik 	tf->command = buf[2];
1568c6fd2807SJeff Garzik 	tf->feature = buf[3];
1569c6fd2807SJeff Garzik 	tf->lbal = buf[4];
1570c6fd2807SJeff Garzik 	tf->lbam = buf[5];
1571c6fd2807SJeff Garzik 	tf->lbah = buf[6];
1572c6fd2807SJeff Garzik 	tf->device = buf[7];
1573c6fd2807SJeff Garzik 	tf->hob_lbal = buf[8];
1574c6fd2807SJeff Garzik 	tf->hob_lbam = buf[9];
1575c6fd2807SJeff Garzik 	tf->hob_lbah = buf[10];
1576c6fd2807SJeff Garzik 	tf->nsect = buf[12];
1577c6fd2807SJeff Garzik 	tf->hob_nsect = buf[13];
1578c6fd2807SJeff Garzik 
1579c6fd2807SJeff Garzik 	return 0;
1580c6fd2807SJeff Garzik }
1581c6fd2807SJeff Garzik 
1582c6fd2807SJeff Garzik /**
158311fc33daSTejun Heo  *	atapi_eh_tur - perform ATAPI TEST_UNIT_READY
158411fc33daSTejun Heo  *	@dev: target ATAPI device
158511fc33daSTejun Heo  *	@r_sense_key: out parameter for sense_key
158611fc33daSTejun Heo  *
158711fc33daSTejun Heo  *	Perform ATAPI TEST_UNIT_READY.
158811fc33daSTejun Heo  *
158911fc33daSTejun Heo  *	LOCKING:
159011fc33daSTejun Heo  *	EH context (may sleep).
159111fc33daSTejun Heo  *
159211fc33daSTejun Heo  *	RETURNS:
159311fc33daSTejun Heo  *	0 on success, AC_ERR_* mask on failure.
159411fc33daSTejun Heo  */
15953dc67440SAaron Lu unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
159611fc33daSTejun Heo {
159711fc33daSTejun Heo 	u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 };
159811fc33daSTejun Heo 	struct ata_taskfile tf;
159911fc33daSTejun Heo 	unsigned int err_mask;
160011fc33daSTejun Heo 
160111fc33daSTejun Heo 	ata_tf_init(dev, &tf);
160211fc33daSTejun Heo 
160311fc33daSTejun Heo 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
160411fc33daSTejun Heo 	tf.command = ATA_CMD_PACKET;
160511fc33daSTejun Heo 	tf.protocol = ATAPI_PROT_NODATA;
160611fc33daSTejun Heo 
160711fc33daSTejun Heo 	err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
160811fc33daSTejun Heo 	if (err_mask == AC_ERR_DEV)
160911fc33daSTejun Heo 		*r_sense_key = tf.feature >> 4;
161011fc33daSTejun Heo 	return err_mask;
161111fc33daSTejun Heo }
161211fc33daSTejun Heo 
161311fc33daSTejun Heo /**
1614c6fd2807SJeff Garzik  *	atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1615c6fd2807SJeff Garzik  *	@dev: device to perform REQUEST_SENSE to
1616c6fd2807SJeff Garzik  *	@sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
16173eabddb8STejun Heo  *	@dfl_sense_key: default sense key to use
1618c6fd2807SJeff Garzik  *
1619c6fd2807SJeff Garzik  *	Perform ATAPI REQUEST_SENSE after the device reported CHECK
1620c6fd2807SJeff Garzik  *	SENSE.  This function is EH helper.
1621c6fd2807SJeff Garzik  *
1622c6fd2807SJeff Garzik  *	LOCKING:
1623c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1624c6fd2807SJeff Garzik  *
1625c6fd2807SJeff Garzik  *	RETURNS:
1626c6fd2807SJeff Garzik  *	0 on success, AC_ERR_* mask on failure
1627c6fd2807SJeff Garzik  */
16283dc67440SAaron Lu unsigned int atapi_eh_request_sense(struct ata_device *dev,
16293eabddb8STejun Heo 					   u8 *sense_buf, u8 dfl_sense_key)
1630c6fd2807SJeff Garzik {
16313eabddb8STejun Heo 	u8 cdb[ATAPI_CDB_LEN] =
16323eabddb8STejun Heo 		{ REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 };
16339af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
1634c6fd2807SJeff Garzik 	struct ata_taskfile tf;
1635c6fd2807SJeff Garzik 
1636c6fd2807SJeff Garzik 	DPRINTK("ATAPI request sense\n");
1637c6fd2807SJeff Garzik 
1638c6fd2807SJeff Garzik 	/* FIXME: is this needed? */
1639c6fd2807SJeff Garzik 	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
1640c6fd2807SJeff Garzik 
164156287768SAlbert Lee 	/* initialize sense_buf with the error register,
164256287768SAlbert Lee 	 * for the case where they are -not- overwritten
164356287768SAlbert Lee 	 */
1644c6fd2807SJeff Garzik 	sense_buf[0] = 0x70;
16453eabddb8STejun Heo 	sense_buf[2] = dfl_sense_key;
164656287768SAlbert Lee 
164756287768SAlbert Lee 	/* some devices time out if garbage left in tf */
164856287768SAlbert Lee 	ata_tf_init(dev, &tf);
1649c6fd2807SJeff Garzik 
1650c6fd2807SJeff Garzik 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1651c6fd2807SJeff Garzik 	tf.command = ATA_CMD_PACKET;
1652c6fd2807SJeff Garzik 
1653c6fd2807SJeff Garzik 	/* is it pointless to prefer PIO for "safety reasons"? */
1654c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_PIO_DMA) {
16550dc36888STejun Heo 		tf.protocol = ATAPI_PROT_DMA;
1656c6fd2807SJeff Garzik 		tf.feature |= ATAPI_PKT_DMA;
1657c6fd2807SJeff Garzik 	} else {
16580dc36888STejun Heo 		tf.protocol = ATAPI_PROT_PIO;
1659f2dfc1a1STejun Heo 		tf.lbam = SCSI_SENSE_BUFFERSIZE;
1660f2dfc1a1STejun Heo 		tf.lbah = 0;
1661c6fd2807SJeff Garzik 	}
1662c6fd2807SJeff Garzik 
1663c6fd2807SJeff Garzik 	return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
16642b789108STejun Heo 				 sense_buf, SCSI_SENSE_BUFFERSIZE, 0);
1665c6fd2807SJeff Garzik }
1666c6fd2807SJeff Garzik 
1667c6fd2807SJeff Garzik /**
1668c6fd2807SJeff Garzik  *	ata_eh_analyze_serror - analyze SError for a failed port
16690260731fSTejun Heo  *	@link: ATA link to analyze SError for
1670c6fd2807SJeff Garzik  *
1671c6fd2807SJeff Garzik  *	Analyze SError if available and further determine cause of
1672c6fd2807SJeff Garzik  *	failure.
1673c6fd2807SJeff Garzik  *
1674c6fd2807SJeff Garzik  *	LOCKING:
1675c6fd2807SJeff Garzik  *	None.
1676c6fd2807SJeff Garzik  */
16770260731fSTejun Heo static void ata_eh_analyze_serror(struct ata_link *link)
1678c6fd2807SJeff Garzik {
16790260731fSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
1680c6fd2807SJeff Garzik 	u32 serror = ehc->i.serror;
1681c6fd2807SJeff Garzik 	unsigned int err_mask = 0, action = 0;
1682f9df58cbSTejun Heo 	u32 hotplug_mask;
1683c6fd2807SJeff Garzik 
1684e0614db2STejun Heo 	if (serror & (SERR_PERSISTENT | SERR_DATA)) {
1685c6fd2807SJeff Garzik 		err_mask |= AC_ERR_ATA_BUS;
1686cf480626STejun Heo 		action |= ATA_EH_RESET;
1687c6fd2807SJeff Garzik 	}
1688c6fd2807SJeff Garzik 	if (serror & SERR_PROTOCOL) {
1689c6fd2807SJeff Garzik 		err_mask |= AC_ERR_HSM;
1690cf480626STejun Heo 		action |= ATA_EH_RESET;
1691c6fd2807SJeff Garzik 	}
1692c6fd2807SJeff Garzik 	if (serror & SERR_INTERNAL) {
1693c6fd2807SJeff Garzik 		err_mask |= AC_ERR_SYSTEM;
1694cf480626STejun Heo 		action |= ATA_EH_RESET;
1695c6fd2807SJeff Garzik 	}
1696f9df58cbSTejun Heo 
1697f9df58cbSTejun Heo 	/* Determine whether a hotplug event has occurred.  Both
1698f9df58cbSTejun Heo 	 * SError.N/X are considered hotplug events for enabled or
1699f9df58cbSTejun Heo 	 * host links.  For disabled PMP links, only N bit is
1700f9df58cbSTejun Heo 	 * considered as X bit is left at 1 for link plugging.
1701f9df58cbSTejun Heo 	 */
1702eb0e85e3STejun Heo 	if (link->lpm_policy > ATA_LPM_MAX_POWER)
17036b7ae954STejun Heo 		hotplug_mask = 0;	/* hotplug doesn't work w/ LPM */
17046b7ae954STejun Heo 	else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
1705f9df58cbSTejun Heo 		hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG;
1706f9df58cbSTejun Heo 	else
1707f9df58cbSTejun Heo 		hotplug_mask = SERR_PHYRDY_CHG;
1708f9df58cbSTejun Heo 
1709f9df58cbSTejun Heo 	if (serror & hotplug_mask)
1710c6fd2807SJeff Garzik 		ata_ehi_hotplugged(&ehc->i);
1711c6fd2807SJeff Garzik 
1712c6fd2807SJeff Garzik 	ehc->i.err_mask |= err_mask;
1713c6fd2807SJeff Garzik 	ehc->i.action |= action;
1714c6fd2807SJeff Garzik }
1715c6fd2807SJeff Garzik 
1716c6fd2807SJeff Garzik /**
1717c6fd2807SJeff Garzik  *	ata_eh_analyze_ncq_error - analyze NCQ error
17180260731fSTejun Heo  *	@link: ATA link to analyze NCQ error for
1719c6fd2807SJeff Garzik  *
1720c6fd2807SJeff Garzik  *	Read log page 10h, determine the offending qc and acquire
1721c6fd2807SJeff Garzik  *	error status TF.  For NCQ device errors, all LLDDs have to do
1722c6fd2807SJeff Garzik  *	is setting AC_ERR_DEV in ehi->err_mask.  This function takes
1723c6fd2807SJeff Garzik  *	care of the rest.
1724c6fd2807SJeff Garzik  *
1725c6fd2807SJeff Garzik  *	LOCKING:
1726c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1727c6fd2807SJeff Garzik  */
172810acf3b0SMark Lord void ata_eh_analyze_ncq_error(struct ata_link *link)
1729c6fd2807SJeff Garzik {
17300260731fSTejun Heo 	struct ata_port *ap = link->ap;
17310260731fSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
17320260731fSTejun Heo 	struct ata_device *dev = link->device;
1733c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc;
1734c6fd2807SJeff Garzik 	struct ata_taskfile tf;
1735c6fd2807SJeff Garzik 	int tag, rc;
1736c6fd2807SJeff Garzik 
1737c6fd2807SJeff Garzik 	/* if frozen, we can't do much */
1738c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_FROZEN)
1739c6fd2807SJeff Garzik 		return;
1740c6fd2807SJeff Garzik 
1741c6fd2807SJeff Garzik 	/* is it NCQ device error? */
17420260731fSTejun Heo 	if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
1743c6fd2807SJeff Garzik 		return;
1744c6fd2807SJeff Garzik 
1745c6fd2807SJeff Garzik 	/* has LLDD analyzed already? */
1746c6fd2807SJeff Garzik 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1747c6fd2807SJeff Garzik 		qc = __ata_qc_from_tag(ap, tag);
1748c6fd2807SJeff Garzik 
1749c6fd2807SJeff Garzik 		if (!(qc->flags & ATA_QCFLAG_FAILED))
1750c6fd2807SJeff Garzik 			continue;
1751c6fd2807SJeff Garzik 
1752c6fd2807SJeff Garzik 		if (qc->err_mask)
1753c6fd2807SJeff Garzik 			return;
1754c6fd2807SJeff Garzik 	}
1755c6fd2807SJeff Garzik 
1756c6fd2807SJeff Garzik 	/* okay, this error is ours */
1757a09bf4cdSJeff Garzik 	memset(&tf, 0, sizeof(tf));
1758c6fd2807SJeff Garzik 	rc = ata_eh_read_log_10h(dev, &tag, &tf);
1759c6fd2807SJeff Garzik 	if (rc) {
1760a9a79dfeSJoe Perches 		ata_link_err(link, "failed to read log page 10h (errno=%d)\n",
1761a9a79dfeSJoe Perches 			     rc);
1762c6fd2807SJeff Garzik 		return;
1763c6fd2807SJeff Garzik 	}
1764c6fd2807SJeff Garzik 
17650260731fSTejun Heo 	if (!(link->sactive & (1 << tag))) {
1766a9a79dfeSJoe Perches 		ata_link_err(link, "log page 10h reported inactive tag %d\n",
1767a9a79dfeSJoe Perches 			     tag);
1768c6fd2807SJeff Garzik 		return;
1769c6fd2807SJeff Garzik 	}
1770c6fd2807SJeff Garzik 
1771c6fd2807SJeff Garzik 	/* we've got the perpetrator, condemn it */
1772c6fd2807SJeff Garzik 	qc = __ata_qc_from_tag(ap, tag);
1773c6fd2807SJeff Garzik 	memcpy(&qc->result_tf, &tf, sizeof(tf));
1774a6116c9eSMark Lord 	qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
17755335b729STejun Heo 	qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
1776c6fd2807SJeff Garzik 	ehc->i.err_mask &= ~AC_ERR_DEV;
1777c6fd2807SJeff Garzik }
1778c6fd2807SJeff Garzik 
1779c6fd2807SJeff Garzik /**
1780c6fd2807SJeff Garzik  *	ata_eh_analyze_tf - analyze taskfile of a failed qc
1781c6fd2807SJeff Garzik  *	@qc: qc to analyze
1782c6fd2807SJeff Garzik  *	@tf: Taskfile registers to analyze
1783c6fd2807SJeff Garzik  *
1784c6fd2807SJeff Garzik  *	Analyze taskfile of @qc and further determine cause of
1785c6fd2807SJeff Garzik  *	failure.  This function also requests ATAPI sense data if
178625985edcSLucas De Marchi  *	available.
1787c6fd2807SJeff Garzik  *
1788c6fd2807SJeff Garzik  *	LOCKING:
1789c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1790c6fd2807SJeff Garzik  *
1791c6fd2807SJeff Garzik  *	RETURNS:
1792c6fd2807SJeff Garzik  *	Determined recovery action
1793c6fd2807SJeff Garzik  */
1794c6fd2807SJeff Garzik static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1795c6fd2807SJeff Garzik 				      const struct ata_taskfile *tf)
1796c6fd2807SJeff Garzik {
1797c6fd2807SJeff Garzik 	unsigned int tmp, action = 0;
1798c6fd2807SJeff Garzik 	u8 stat = tf->command, err = tf->feature;
1799c6fd2807SJeff Garzik 
1800c6fd2807SJeff Garzik 	if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1801c6fd2807SJeff Garzik 		qc->err_mask |= AC_ERR_HSM;
1802cf480626STejun Heo 		return ATA_EH_RESET;
1803c6fd2807SJeff Garzik 	}
1804c6fd2807SJeff Garzik 
1805a51d644aSTejun Heo 	if (stat & (ATA_ERR | ATA_DF))
1806a51d644aSTejun Heo 		qc->err_mask |= AC_ERR_DEV;
1807a51d644aSTejun Heo 	else
1808c6fd2807SJeff Garzik 		return 0;
1809c6fd2807SJeff Garzik 
1810c6fd2807SJeff Garzik 	switch (qc->dev->class) {
1811c6fd2807SJeff Garzik 	case ATA_DEV_ATA:
18129162c657SHannes Reinecke 	case ATA_DEV_ZAC:
1813c6fd2807SJeff Garzik 		if (err & ATA_ICRC)
1814c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_ATA_BUS;
1815eec7e1c1SAlexey Asemov 		if (err & (ATA_UNC | ATA_AMNF))
1816c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_MEDIA;
1817c6fd2807SJeff Garzik 		if (err & ATA_IDNF)
1818c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_INVALID;
1819c6fd2807SJeff Garzik 		break;
1820c6fd2807SJeff Garzik 
1821c6fd2807SJeff Garzik 	case ATA_DEV_ATAPI:
1822a569a30dSTejun Heo 		if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
18233eabddb8STejun Heo 			tmp = atapi_eh_request_sense(qc->dev,
18243eabddb8STejun Heo 						qc->scsicmd->sense_buffer,
18253eabddb8STejun Heo 						qc->result_tf.feature >> 4);
1826c6fd2807SJeff Garzik 			if (!tmp) {
1827a569a30dSTejun Heo 				/* ATA_QCFLAG_SENSE_VALID is used to
1828a569a30dSTejun Heo 				 * tell atapi_qc_complete() that sense
1829a569a30dSTejun Heo 				 * data is already valid.
1830c6fd2807SJeff Garzik 				 *
1831c6fd2807SJeff Garzik 				 * TODO: interpret sense data and set
1832c6fd2807SJeff Garzik 				 * appropriate err_mask.
1833c6fd2807SJeff Garzik 				 */
1834c6fd2807SJeff Garzik 				qc->flags |= ATA_QCFLAG_SENSE_VALID;
1835c6fd2807SJeff Garzik 			} else
1836c6fd2807SJeff Garzik 				qc->err_mask |= tmp;
1837c6fd2807SJeff Garzik 		}
1838a569a30dSTejun Heo 	}
1839c6fd2807SJeff Garzik 
1840c6fd2807SJeff Garzik 	if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
1841cf480626STejun Heo 		action |= ATA_EH_RESET;
1842c6fd2807SJeff Garzik 
1843c6fd2807SJeff Garzik 	return action;
1844c6fd2807SJeff Garzik }
1845c6fd2807SJeff Garzik 
184676326ac1STejun Heo static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask,
184776326ac1STejun Heo 				   int *xfer_ok)
1848c6fd2807SJeff Garzik {
184976326ac1STejun Heo 	int base = 0;
185076326ac1STejun Heo 
185176326ac1STejun Heo 	if (!(eflags & ATA_EFLAG_DUBIOUS_XFER))
185276326ac1STejun Heo 		*xfer_ok = 1;
185376326ac1STejun Heo 
185476326ac1STejun Heo 	if (!*xfer_ok)
185575f9cafcSTejun Heo 		base = ATA_ECAT_DUBIOUS_NONE;
185676326ac1STejun Heo 
18577d47e8d4STejun Heo 	if (err_mask & AC_ERR_ATA_BUS)
185876326ac1STejun Heo 		return base + ATA_ECAT_ATA_BUS;
1859c6fd2807SJeff Garzik 
18607d47e8d4STejun Heo 	if (err_mask & AC_ERR_TIMEOUT)
186176326ac1STejun Heo 		return base + ATA_ECAT_TOUT_HSM;
18627d47e8d4STejun Heo 
18633884f7b0STejun Heo 	if (eflags & ATA_EFLAG_IS_IO) {
18647d47e8d4STejun Heo 		if (err_mask & AC_ERR_HSM)
186576326ac1STejun Heo 			return base + ATA_ECAT_TOUT_HSM;
18667d47e8d4STejun Heo 		if ((err_mask &
18677d47e8d4STejun Heo 		     (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
186876326ac1STejun Heo 			return base + ATA_ECAT_UNK_DEV;
1869c6fd2807SJeff Garzik 	}
1870c6fd2807SJeff Garzik 
1871c6fd2807SJeff Garzik 	return 0;
1872c6fd2807SJeff Garzik }
1873c6fd2807SJeff Garzik 
18747d47e8d4STejun Heo struct speed_down_verdict_arg {
1875c6fd2807SJeff Garzik 	u64 since;
187676326ac1STejun Heo 	int xfer_ok;
18773884f7b0STejun Heo 	int nr_errors[ATA_ECAT_NR];
1878c6fd2807SJeff Garzik };
1879c6fd2807SJeff Garzik 
18807d47e8d4STejun Heo static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
1881c6fd2807SJeff Garzik {
18827d47e8d4STejun Heo 	struct speed_down_verdict_arg *arg = void_arg;
188376326ac1STejun Heo 	int cat;
1884c6fd2807SJeff Garzik 
1885d9027470SGwendal Grignou 	if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since))
1886c6fd2807SJeff Garzik 		return -1;
1887c6fd2807SJeff Garzik 
188876326ac1STejun Heo 	cat = ata_eh_categorize_error(ent->eflags, ent->err_mask,
188976326ac1STejun Heo 				      &arg->xfer_ok);
18907d47e8d4STejun Heo 	arg->nr_errors[cat]++;
189176326ac1STejun Heo 
1892c6fd2807SJeff Garzik 	return 0;
1893c6fd2807SJeff Garzik }
1894c6fd2807SJeff Garzik 
1895c6fd2807SJeff Garzik /**
18967d47e8d4STejun Heo  *	ata_eh_speed_down_verdict - Determine speed down verdict
1897c6fd2807SJeff Garzik  *	@dev: Device of interest
1898c6fd2807SJeff Garzik  *
1899c6fd2807SJeff Garzik  *	This function examines error ring of @dev and determines
19007d47e8d4STejun Heo  *	whether NCQ needs to be turned off, transfer speed should be
19017d47e8d4STejun Heo  *	stepped down, or falling back to PIO is necessary.
1902c6fd2807SJeff Garzik  *
19033884f7b0STejun Heo  *	ECAT_ATA_BUS	: ATA_BUS error for any command
1904c6fd2807SJeff Garzik  *
19053884f7b0STejun Heo  *	ECAT_TOUT_HSM	: TIMEOUT for any command or HSM violation for
19063884f7b0STejun Heo  *			  IO commands
19077d47e8d4STejun Heo  *
19083884f7b0STejun Heo  *	ECAT_UNK_DEV	: Unknown DEV error for IO commands
1909c6fd2807SJeff Garzik  *
191076326ac1STejun Heo  *	ECAT_DUBIOUS_*	: Identical to above three but occurred while
191176326ac1STejun Heo  *			  data transfer hasn't been verified.
191276326ac1STejun Heo  *
19133884f7b0STejun Heo  *	Verdicts are
19147d47e8d4STejun Heo  *
19153884f7b0STejun Heo  *	NCQ_OFF		: Turn off NCQ.
19167d47e8d4STejun Heo  *
19173884f7b0STejun Heo  *	SPEED_DOWN	: Speed down transfer speed but don't fall back
19183884f7b0STejun Heo  *			  to PIO.
19193884f7b0STejun Heo  *
19203884f7b0STejun Heo  *	FALLBACK_TO_PIO	: Fall back to PIO.
19213884f7b0STejun Heo  *
19223884f7b0STejun Heo  *	Even if multiple verdicts are returned, only one action is
192376326ac1STejun Heo  *	taken per error.  An action triggered by non-DUBIOUS errors
192476326ac1STejun Heo  *	clears ering, while one triggered by DUBIOUS_* errors doesn't.
192576326ac1STejun Heo  *	This is to expedite speed down decisions right after device is
192676326ac1STejun Heo  *	initially configured.
19273884f7b0STejun Heo  *
192876326ac1STejun Heo  *	The followings are speed down rules.  #1 and #2 deal with
192976326ac1STejun Heo  *	DUBIOUS errors.
193076326ac1STejun Heo  *
193176326ac1STejun Heo  *	1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
193276326ac1STejun Heo  *	   occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO.
193376326ac1STejun Heo  *
193476326ac1STejun Heo  *	2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
193576326ac1STejun Heo  *	   occurred during last 5 mins, NCQ_OFF.
193676326ac1STejun Heo  *
193776326ac1STejun Heo  *	3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
193825985edcSLucas De Marchi  *	   occurred during last 5 mins, FALLBACK_TO_PIO
19393884f7b0STejun Heo  *
194076326ac1STejun Heo  *	4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
19413884f7b0STejun Heo  *	   during last 10 mins, NCQ_OFF.
19423884f7b0STejun Heo  *
194376326ac1STejun Heo  *	5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
19443884f7b0STejun Heo  *	   UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
19457d47e8d4STejun Heo  *
1946c6fd2807SJeff Garzik  *	LOCKING:
1947c6fd2807SJeff Garzik  *	Inherited from caller.
1948c6fd2807SJeff Garzik  *
1949c6fd2807SJeff Garzik  *	RETURNS:
19507d47e8d4STejun Heo  *	OR of ATA_EH_SPDN_* flags.
1951c6fd2807SJeff Garzik  */
19527d47e8d4STejun Heo static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
1953c6fd2807SJeff Garzik {
19547d47e8d4STejun Heo 	const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ;
19557d47e8d4STejun Heo 	u64 j64 = get_jiffies_64();
19567d47e8d4STejun Heo 	struct speed_down_verdict_arg arg;
19577d47e8d4STejun Heo 	unsigned int verdict = 0;
1958c6fd2807SJeff Garzik 
19593884f7b0STejun Heo 	/* scan past 5 mins of error history */
19603884f7b0STejun Heo 	memset(&arg, 0, sizeof(arg));
19613884f7b0STejun Heo 	arg.since = j64 - min(j64, j5mins);
19623884f7b0STejun Heo 	ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
19633884f7b0STejun Heo 
196476326ac1STejun Heo 	if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] +
196576326ac1STejun Heo 	    arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1)
196676326ac1STejun Heo 		verdict |= ATA_EH_SPDN_SPEED_DOWN |
196776326ac1STejun Heo 			ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS;
196876326ac1STejun Heo 
196976326ac1STejun Heo 	if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] +
197076326ac1STejun Heo 	    arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1)
197176326ac1STejun Heo 		verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS;
197276326ac1STejun Heo 
19733884f7b0STejun Heo 	if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
19743884f7b0STejun Heo 	    arg.nr_errors[ATA_ECAT_TOUT_HSM] +
1975663f99b8STejun Heo 	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
19763884f7b0STejun Heo 		verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
19773884f7b0STejun Heo 
19787d47e8d4STejun Heo 	/* scan past 10 mins of error history */
1979c6fd2807SJeff Garzik 	memset(&arg, 0, sizeof(arg));
19807d47e8d4STejun Heo 	arg.since = j64 - min(j64, j10mins);
19817d47e8d4STejun Heo 	ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
1982c6fd2807SJeff Garzik 
19833884f7b0STejun Heo 	if (arg.nr_errors[ATA_ECAT_TOUT_HSM] +
19843884f7b0STejun Heo 	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 3)
19857d47e8d4STejun Heo 		verdict |= ATA_EH_SPDN_NCQ_OFF;
19863884f7b0STejun Heo 
19873884f7b0STejun Heo 	if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
19883884f7b0STejun Heo 	    arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 ||
1989663f99b8STejun Heo 	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
19907d47e8d4STejun Heo 		verdict |= ATA_EH_SPDN_SPEED_DOWN;
1991c6fd2807SJeff Garzik 
19927d47e8d4STejun Heo 	return verdict;
1993c6fd2807SJeff Garzik }
1994c6fd2807SJeff Garzik 
1995c6fd2807SJeff Garzik /**
1996c6fd2807SJeff Garzik  *	ata_eh_speed_down - record error and speed down if necessary
1997c6fd2807SJeff Garzik  *	@dev: Failed device
19983884f7b0STejun Heo  *	@eflags: mask of ATA_EFLAG_* flags
1999c6fd2807SJeff Garzik  *	@err_mask: err_mask of the error
2000c6fd2807SJeff Garzik  *
2001c6fd2807SJeff Garzik  *	Record error and examine error history to determine whether
2002c6fd2807SJeff Garzik  *	adjusting transmission speed is necessary.  It also sets
2003c6fd2807SJeff Garzik  *	transmission limits appropriately if such adjustment is
2004c6fd2807SJeff Garzik  *	necessary.
2005c6fd2807SJeff Garzik  *
2006c6fd2807SJeff Garzik  *	LOCKING:
2007c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
2008c6fd2807SJeff Garzik  *
2009c6fd2807SJeff Garzik  *	RETURNS:
20107d47e8d4STejun Heo  *	Determined recovery action.
2011c6fd2807SJeff Garzik  */
20123884f7b0STejun Heo static unsigned int ata_eh_speed_down(struct ata_device *dev,
20133884f7b0STejun Heo 				unsigned int eflags, unsigned int err_mask)
2014c6fd2807SJeff Garzik {
2015b1c72916STejun Heo 	struct ata_link *link = ata_dev_phys_link(dev);
201676326ac1STejun Heo 	int xfer_ok = 0;
20177d47e8d4STejun Heo 	unsigned int verdict;
20187d47e8d4STejun Heo 	unsigned int action = 0;
20197d47e8d4STejun Heo 
20207d47e8d4STejun Heo 	/* don't bother if Cat-0 error */
202176326ac1STejun Heo 	if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0)
2022c6fd2807SJeff Garzik 		return 0;
2023c6fd2807SJeff Garzik 
2024c6fd2807SJeff Garzik 	/* record error and determine whether speed down is necessary */
20253884f7b0STejun Heo 	ata_ering_record(&dev->ering, eflags, err_mask);
20267d47e8d4STejun Heo 	verdict = ata_eh_speed_down_verdict(dev);
2027c6fd2807SJeff Garzik 
20287d47e8d4STejun Heo 	/* turn off NCQ? */
20297d47e8d4STejun Heo 	if ((verdict & ATA_EH_SPDN_NCQ_OFF) &&
20307d47e8d4STejun Heo 	    (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ |
20317d47e8d4STejun Heo 			   ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) {
20327d47e8d4STejun Heo 		dev->flags |= ATA_DFLAG_NCQ_OFF;
2033a9a79dfeSJoe Perches 		ata_dev_warn(dev, "NCQ disabled due to excessive errors\n");
20347d47e8d4STejun Heo 		goto done;
20357d47e8d4STejun Heo 	}
2036c6fd2807SJeff Garzik 
20377d47e8d4STejun Heo 	/* speed down? */
20387d47e8d4STejun Heo 	if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
2039c6fd2807SJeff Garzik 		/* speed down SATA link speed if possible */
2040a07d499bSTejun Heo 		if (sata_down_spd_limit(link, 0) == 0) {
2041cf480626STejun Heo 			action |= ATA_EH_RESET;
20427d47e8d4STejun Heo 			goto done;
20437d47e8d4STejun Heo 		}
2044c6fd2807SJeff Garzik 
2045c6fd2807SJeff Garzik 		/* lower transfer mode */
20467d47e8d4STejun Heo 		if (dev->spdn_cnt < 2) {
20477d47e8d4STejun Heo 			static const int dma_dnxfer_sel[] =
20487d47e8d4STejun Heo 				{ ATA_DNXFER_DMA, ATA_DNXFER_40C };
20497d47e8d4STejun Heo 			static const int pio_dnxfer_sel[] =
20507d47e8d4STejun Heo 				{ ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 };
20517d47e8d4STejun Heo 			int sel;
2052c6fd2807SJeff Garzik 
20537d47e8d4STejun Heo 			if (dev->xfer_shift != ATA_SHIFT_PIO)
20547d47e8d4STejun Heo 				sel = dma_dnxfer_sel[dev->spdn_cnt];
20557d47e8d4STejun Heo 			else
20567d47e8d4STejun Heo 				sel = pio_dnxfer_sel[dev->spdn_cnt];
20577d47e8d4STejun Heo 
20587d47e8d4STejun Heo 			dev->spdn_cnt++;
20597d47e8d4STejun Heo 
20607d47e8d4STejun Heo 			if (ata_down_xfermask_limit(dev, sel) == 0) {
2061cf480626STejun Heo 				action |= ATA_EH_RESET;
20627d47e8d4STejun Heo 				goto done;
20637d47e8d4STejun Heo 			}
20647d47e8d4STejun Heo 		}
20657d47e8d4STejun Heo 	}
20667d47e8d4STejun Heo 
20677d47e8d4STejun Heo 	/* Fall back to PIO?  Slowing down to PIO is meaningless for
2068663f99b8STejun Heo 	 * SATA ATA devices.  Consider it only for PATA and SATAPI.
20697d47e8d4STejun Heo 	 */
20707d47e8d4STejun Heo 	if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
2071663f99b8STejun Heo 	    (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) &&
20727d47e8d4STejun Heo 	    (dev->xfer_shift != ATA_SHIFT_PIO)) {
20737d47e8d4STejun Heo 		if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
20747d47e8d4STejun Heo 			dev->spdn_cnt = 0;
2075cf480626STejun Heo 			action |= ATA_EH_RESET;
20767d47e8d4STejun Heo 			goto done;
20777d47e8d4STejun Heo 		}
20787d47e8d4STejun Heo 	}
20797d47e8d4STejun Heo 
2080c6fd2807SJeff Garzik 	return 0;
20817d47e8d4STejun Heo  done:
20827d47e8d4STejun Heo 	/* device has been slowed down, blow error history */
208376326ac1STejun Heo 	if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS))
20847d47e8d4STejun Heo 		ata_ering_clear(&dev->ering);
20857d47e8d4STejun Heo 	return action;
2086c6fd2807SJeff Garzik }
2087c6fd2807SJeff Garzik 
2088c6fd2807SJeff Garzik /**
20898d899e70SMark Lord  *	ata_eh_worth_retry - analyze error and decide whether to retry
20908d899e70SMark Lord  *	@qc: qc to possibly retry
20918d899e70SMark Lord  *
20928d899e70SMark Lord  *	Look at the cause of the error and decide if a retry
20938d899e70SMark Lord  * 	might be useful or not.  We don't want to retry media errors
20948d899e70SMark Lord  *	because the drive itself has probably already taken 10-30 seconds
20958d899e70SMark Lord  *	doing its own internal retries before reporting the failure.
20968d899e70SMark Lord  */
20978d899e70SMark Lord static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc)
20988d899e70SMark Lord {
20991eaca39aSBian Yu 	if (qc->err_mask & AC_ERR_MEDIA)
21008d899e70SMark Lord 		return 0;	/* don't retry media errors */
21018d899e70SMark Lord 	if (qc->flags & ATA_QCFLAG_IO)
21028d899e70SMark Lord 		return 1;	/* otherwise retry anything from fs stack */
21038d899e70SMark Lord 	if (qc->err_mask & AC_ERR_INVALID)
21048d899e70SMark Lord 		return 0;	/* don't retry these */
21058d899e70SMark Lord 	return qc->err_mask != AC_ERR_DEV;  /* retry if not dev error */
21068d899e70SMark Lord }
21078d899e70SMark Lord 
21088d899e70SMark Lord /**
21099b1e2658STejun Heo  *	ata_eh_link_autopsy - analyze error and determine recovery action
21109b1e2658STejun Heo  *	@link: host link to perform autopsy on
2111c6fd2807SJeff Garzik  *
21120260731fSTejun Heo  *	Analyze why @link failed and determine which recovery actions
21130260731fSTejun Heo  *	are needed.  This function also sets more detailed AC_ERR_*
21140260731fSTejun Heo  *	values and fills sense data for ATAPI CHECK SENSE.
2115c6fd2807SJeff Garzik  *
2116c6fd2807SJeff Garzik  *	LOCKING:
2117c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
2118c6fd2807SJeff Garzik  */
21199b1e2658STejun Heo static void ata_eh_link_autopsy(struct ata_link *link)
2120c6fd2807SJeff Garzik {
21210260731fSTejun Heo 	struct ata_port *ap = link->ap;
2122936fd732STejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
2123dfcc173dSTejun Heo 	struct ata_device *dev;
21243884f7b0STejun Heo 	unsigned int all_err_mask = 0, eflags = 0;
21253884f7b0STejun Heo 	int tag;
2126c6fd2807SJeff Garzik 	u32 serror;
2127c6fd2807SJeff Garzik 	int rc;
2128c6fd2807SJeff Garzik 
2129c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
2130c6fd2807SJeff Garzik 
2131c6fd2807SJeff Garzik 	if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
2132c6fd2807SJeff Garzik 		return;
2133c6fd2807SJeff Garzik 
2134c6fd2807SJeff Garzik 	/* obtain and analyze SError */
2135936fd732STejun Heo 	rc = sata_scr_read(link, SCR_ERROR, &serror);
2136c6fd2807SJeff Garzik 	if (rc == 0) {
2137c6fd2807SJeff Garzik 		ehc->i.serror |= serror;
21380260731fSTejun Heo 		ata_eh_analyze_serror(link);
21394e57c517STejun Heo 	} else if (rc != -EOPNOTSUPP) {
2140cf480626STejun Heo 		/* SError read failed, force reset and probing */
2141b558edddSTejun Heo 		ehc->i.probe_mask |= ATA_ALL_DEVICES;
2142cf480626STejun Heo 		ehc->i.action |= ATA_EH_RESET;
21434e57c517STejun Heo 		ehc->i.err_mask |= AC_ERR_OTHER;
21444e57c517STejun Heo 	}
2145c6fd2807SJeff Garzik 
2146c6fd2807SJeff Garzik 	/* analyze NCQ failure */
21470260731fSTejun Heo 	ata_eh_analyze_ncq_error(link);
2148c6fd2807SJeff Garzik 
2149c6fd2807SJeff Garzik 	/* any real error trumps AC_ERR_OTHER */
2150c6fd2807SJeff Garzik 	if (ehc->i.err_mask & ~AC_ERR_OTHER)
2151c6fd2807SJeff Garzik 		ehc->i.err_mask &= ~AC_ERR_OTHER;
2152c6fd2807SJeff Garzik 
2153c6fd2807SJeff Garzik 	all_err_mask |= ehc->i.err_mask;
2154c6fd2807SJeff Garzik 
2155c6fd2807SJeff Garzik 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2156c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2157c6fd2807SJeff Garzik 
2158b1c72916STejun Heo 		if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2159b1c72916STejun Heo 		    ata_dev_phys_link(qc->dev) != link)
2160c6fd2807SJeff Garzik 			continue;
2161c6fd2807SJeff Garzik 
2162c6fd2807SJeff Garzik 		/* inherit upper level err_mask */
2163c6fd2807SJeff Garzik 		qc->err_mask |= ehc->i.err_mask;
2164c6fd2807SJeff Garzik 
2165c6fd2807SJeff Garzik 		/* analyze TF */
2166c6fd2807SJeff Garzik 		ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
2167c6fd2807SJeff Garzik 
2168c6fd2807SJeff Garzik 		/* DEV errors are probably spurious in case of ATA_BUS error */
2169c6fd2807SJeff Garzik 		if (qc->err_mask & AC_ERR_ATA_BUS)
2170c6fd2807SJeff Garzik 			qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
2171c6fd2807SJeff Garzik 					  AC_ERR_INVALID);
2172c6fd2807SJeff Garzik 
2173c6fd2807SJeff Garzik 		/* any real error trumps unknown error */
2174c6fd2807SJeff Garzik 		if (qc->err_mask & ~AC_ERR_OTHER)
2175c6fd2807SJeff Garzik 			qc->err_mask &= ~AC_ERR_OTHER;
2176c6fd2807SJeff Garzik 
2177c6fd2807SJeff Garzik 		/* SENSE_VALID trumps dev/unknown error and revalidation */
2178f90f0828STejun Heo 		if (qc->flags & ATA_QCFLAG_SENSE_VALID)
2179c6fd2807SJeff Garzik 			qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
2180c6fd2807SJeff Garzik 
218103faab78STejun Heo 		/* determine whether the command is worth retrying */
21828d899e70SMark Lord 		if (ata_eh_worth_retry(qc))
218303faab78STejun Heo 			qc->flags |= ATA_QCFLAG_RETRY;
218403faab78STejun Heo 
2185c6fd2807SJeff Garzik 		/* accumulate error info */
2186c6fd2807SJeff Garzik 		ehc->i.dev = qc->dev;
2187c6fd2807SJeff Garzik 		all_err_mask |= qc->err_mask;
2188c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_IO)
21893884f7b0STejun Heo 			eflags |= ATA_EFLAG_IS_IO;
2190c6fd2807SJeff Garzik 	}
2191c6fd2807SJeff Garzik 
2192c6fd2807SJeff Garzik 	/* enforce default EH actions */
2193c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_FROZEN ||
2194c6fd2807SJeff Garzik 	    all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
2195cf480626STejun Heo 		ehc->i.action |= ATA_EH_RESET;
21963884f7b0STejun Heo 	else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) ||
21973884f7b0STejun Heo 		 (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV)))
2198c6fd2807SJeff Garzik 		ehc->i.action |= ATA_EH_REVALIDATE;
2199c6fd2807SJeff Garzik 
2200dfcc173dSTejun Heo 	/* If we have offending qcs and the associated failed device,
2201dfcc173dSTejun Heo 	 * perform per-dev EH action only on the offending device.
2202dfcc173dSTejun Heo 	 */
2203c6fd2807SJeff Garzik 	if (ehc->i.dev) {
2204c6fd2807SJeff Garzik 		ehc->i.dev_action[ehc->i.dev->devno] |=
2205c6fd2807SJeff Garzik 			ehc->i.action & ATA_EH_PERDEV_MASK;
2206c6fd2807SJeff Garzik 		ehc->i.action &= ~ATA_EH_PERDEV_MASK;
2207c6fd2807SJeff Garzik 	}
2208c6fd2807SJeff Garzik 
22092695e366STejun Heo 	/* propagate timeout to host link */
22102695e366STejun Heo 	if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link))
22112695e366STejun Heo 		ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT;
22122695e366STejun Heo 
22132695e366STejun Heo 	/* record error and consider speeding down */
2214dfcc173dSTejun Heo 	dev = ehc->i.dev;
22152695e366STejun Heo 	if (!dev && ((ata_link_max_devices(link) == 1 &&
22162695e366STejun Heo 		      ata_dev_enabled(link->device))))
2217dfcc173dSTejun Heo 	    dev = link->device;
2218dfcc173dSTejun Heo 
221976326ac1STejun Heo 	if (dev) {
222076326ac1STejun Heo 		if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
222176326ac1STejun Heo 			eflags |= ATA_EFLAG_DUBIOUS_XFER;
22223884f7b0STejun Heo 		ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
222376326ac1STejun Heo 	}
2224dfcc173dSTejun Heo 
2225c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
2226c6fd2807SJeff Garzik }
2227c6fd2807SJeff Garzik 
2228c6fd2807SJeff Garzik /**
22299b1e2658STejun Heo  *	ata_eh_autopsy - analyze error and determine recovery action
22309b1e2658STejun Heo  *	@ap: host port to perform autopsy on
22319b1e2658STejun Heo  *
22329b1e2658STejun Heo  *	Analyze all links of @ap and determine why they failed and
22339b1e2658STejun Heo  *	which recovery actions are needed.
22349b1e2658STejun Heo  *
22359b1e2658STejun Heo  *	LOCKING:
22369b1e2658STejun Heo  *	Kernel thread context (may sleep).
22379b1e2658STejun Heo  */
2238fb7fd614STejun Heo void ata_eh_autopsy(struct ata_port *ap)
22399b1e2658STejun Heo {
22409b1e2658STejun Heo 	struct ata_link *link;
22419b1e2658STejun Heo 
22421eca4365STejun Heo 	ata_for_each_link(link, ap, EDGE)
22439b1e2658STejun Heo 		ata_eh_link_autopsy(link);
22442695e366STejun Heo 
2245b1c72916STejun Heo 	/* Handle the frigging slave link.  Autopsy is done similarly
2246b1c72916STejun Heo 	 * but actions and flags are transferred over to the master
2247b1c72916STejun Heo 	 * link and handled from there.
2248b1c72916STejun Heo 	 */
2249b1c72916STejun Heo 	if (ap->slave_link) {
2250b1c72916STejun Heo 		struct ata_eh_context *mehc = &ap->link.eh_context;
2251b1c72916STejun Heo 		struct ata_eh_context *sehc = &ap->slave_link->eh_context;
2252b1c72916STejun Heo 
2253848e4c68STejun Heo 		/* transfer control flags from master to slave */
2254848e4c68STejun Heo 		sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK;
2255848e4c68STejun Heo 
2256848e4c68STejun Heo 		/* perform autopsy on the slave link */
2257b1c72916STejun Heo 		ata_eh_link_autopsy(ap->slave_link);
2258b1c72916STejun Heo 
2259848e4c68STejun Heo 		/* transfer actions from slave to master and clear slave */
2260b1c72916STejun Heo 		ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2261b1c72916STejun Heo 		mehc->i.action		|= sehc->i.action;
2262b1c72916STejun Heo 		mehc->i.dev_action[1]	|= sehc->i.dev_action[1];
2263b1c72916STejun Heo 		mehc->i.flags		|= sehc->i.flags;
2264b1c72916STejun Heo 		ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2265b1c72916STejun Heo 	}
2266b1c72916STejun Heo 
22672695e366STejun Heo 	/* Autopsy of fanout ports can affect host link autopsy.
22682695e366STejun Heo 	 * Perform host link autopsy last.
22692695e366STejun Heo 	 */
2270071f44b1STejun Heo 	if (sata_pmp_attached(ap))
22712695e366STejun Heo 		ata_eh_link_autopsy(&ap->link);
22729b1e2658STejun Heo }
22739b1e2658STejun Heo 
22749b1e2658STejun Heo /**
22756521148cSRobert Hancock  *	ata_get_cmd_descript - get description for ATA command
22766521148cSRobert Hancock  *	@command: ATA command code to get description for
22776521148cSRobert Hancock  *
22786521148cSRobert Hancock  *	Return a textual description of the given command, or NULL if the
22796521148cSRobert Hancock  *	command is not known.
22806521148cSRobert Hancock  *
22816521148cSRobert Hancock  *	LOCKING:
22826521148cSRobert Hancock  *	None
22836521148cSRobert Hancock  */
22846521148cSRobert Hancock const char *ata_get_cmd_descript(u8 command)
22856521148cSRobert Hancock {
22866521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR
22876521148cSRobert Hancock 	static const struct
22886521148cSRobert Hancock 	{
22896521148cSRobert Hancock 		u8 command;
22906521148cSRobert Hancock 		const char *text;
22916521148cSRobert Hancock 	} cmd_descr[] = {
22926521148cSRobert Hancock 		{ ATA_CMD_DEV_RESET,		"DEVICE RESET" },
22936521148cSRobert Hancock 		{ ATA_CMD_CHK_POWER, 		"CHECK POWER MODE" },
22946521148cSRobert Hancock 		{ ATA_CMD_STANDBY, 		"STANDBY" },
22956521148cSRobert Hancock 		{ ATA_CMD_IDLE, 		"IDLE" },
22966521148cSRobert Hancock 		{ ATA_CMD_EDD, 			"EXECUTE DEVICE DIAGNOSTIC" },
22976521148cSRobert Hancock 		{ ATA_CMD_DOWNLOAD_MICRO,   	"DOWNLOAD MICROCODE" },
22983915c3b5SRobert Hancock 		{ ATA_CMD_DOWNLOAD_MICRO_DMA,	"DOWNLOAD MICROCODE DMA" },
22996521148cSRobert Hancock 		{ ATA_CMD_NOP,			"NOP" },
23006521148cSRobert Hancock 		{ ATA_CMD_FLUSH, 		"FLUSH CACHE" },
23016521148cSRobert Hancock 		{ ATA_CMD_FLUSH_EXT, 		"FLUSH CACHE EXT" },
23026521148cSRobert Hancock 		{ ATA_CMD_ID_ATA,  		"IDENTIFY DEVICE" },
23036521148cSRobert Hancock 		{ ATA_CMD_ID_ATAPI, 		"IDENTIFY PACKET DEVICE" },
23046521148cSRobert Hancock 		{ ATA_CMD_SERVICE, 		"SERVICE" },
23056521148cSRobert Hancock 		{ ATA_CMD_READ, 		"READ DMA" },
23066521148cSRobert Hancock 		{ ATA_CMD_READ_EXT, 		"READ DMA EXT" },
23076521148cSRobert Hancock 		{ ATA_CMD_READ_QUEUED, 		"READ DMA QUEUED" },
23086521148cSRobert Hancock 		{ ATA_CMD_READ_STREAM_EXT, 	"READ STREAM EXT" },
23096521148cSRobert Hancock 		{ ATA_CMD_READ_STREAM_DMA_EXT,  "READ STREAM DMA EXT" },
23106521148cSRobert Hancock 		{ ATA_CMD_WRITE, 		"WRITE DMA" },
23116521148cSRobert Hancock 		{ ATA_CMD_WRITE_EXT, 		"WRITE DMA EXT" },
23126521148cSRobert Hancock 		{ ATA_CMD_WRITE_QUEUED, 	"WRITE DMA QUEUED EXT" },
23136521148cSRobert Hancock 		{ ATA_CMD_WRITE_STREAM_EXT, 	"WRITE STREAM EXT" },
23146521148cSRobert Hancock 		{ ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" },
23156521148cSRobert Hancock 		{ ATA_CMD_WRITE_FUA_EXT,	"WRITE DMA FUA EXT" },
23166521148cSRobert Hancock 		{ ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
23176521148cSRobert Hancock 		{ ATA_CMD_FPDMA_READ,		"READ FPDMA QUEUED" },
23186521148cSRobert Hancock 		{ ATA_CMD_FPDMA_WRITE,		"WRITE FPDMA QUEUED" },
23193915c3b5SRobert Hancock 		{ ATA_CMD_FPDMA_SEND,		"SEND FPDMA QUEUED" },
23203915c3b5SRobert Hancock 		{ ATA_CMD_FPDMA_RECV,		"RECEIVE FPDMA QUEUED" },
23216521148cSRobert Hancock 		{ ATA_CMD_PIO_READ,		"READ SECTOR(S)" },
23226521148cSRobert Hancock 		{ ATA_CMD_PIO_READ_EXT,		"READ SECTOR(S) EXT" },
23236521148cSRobert Hancock 		{ ATA_CMD_PIO_WRITE,		"WRITE SECTOR(S)" },
23246521148cSRobert Hancock 		{ ATA_CMD_PIO_WRITE_EXT,	"WRITE SECTOR(S) EXT" },
23256521148cSRobert Hancock 		{ ATA_CMD_READ_MULTI,		"READ MULTIPLE" },
23266521148cSRobert Hancock 		{ ATA_CMD_READ_MULTI_EXT,	"READ MULTIPLE EXT" },
23276521148cSRobert Hancock 		{ ATA_CMD_WRITE_MULTI,		"WRITE MULTIPLE" },
23286521148cSRobert Hancock 		{ ATA_CMD_WRITE_MULTI_EXT,	"WRITE MULTIPLE EXT" },
23296521148cSRobert Hancock 		{ ATA_CMD_WRITE_MULTI_FUA_EXT, 	"WRITE MULTIPLE FUA EXT" },
23306521148cSRobert Hancock 		{ ATA_CMD_SET_FEATURES,		"SET FEATURES" },
23316521148cSRobert Hancock 		{ ATA_CMD_SET_MULTI,		"SET MULTIPLE MODE" },
23326521148cSRobert Hancock 		{ ATA_CMD_VERIFY,		"READ VERIFY SECTOR(S)" },
23336521148cSRobert Hancock 		{ ATA_CMD_VERIFY_EXT,		"READ VERIFY SECTOR(S) EXT" },
23346521148cSRobert Hancock 		{ ATA_CMD_WRITE_UNCORR_EXT,	"WRITE UNCORRECTABLE EXT" },
23356521148cSRobert Hancock 		{ ATA_CMD_STANDBYNOW1,		"STANDBY IMMEDIATE" },
23366521148cSRobert Hancock 		{ ATA_CMD_IDLEIMMEDIATE,	"IDLE IMMEDIATE" },
23376521148cSRobert Hancock 		{ ATA_CMD_SLEEP,		"SLEEP" },
23386521148cSRobert Hancock 		{ ATA_CMD_INIT_DEV_PARAMS,	"INITIALIZE DEVICE PARAMETERS" },
23396521148cSRobert Hancock 		{ ATA_CMD_READ_NATIVE_MAX,	"READ NATIVE MAX ADDRESS" },
23406521148cSRobert Hancock 		{ ATA_CMD_READ_NATIVE_MAX_EXT,	"READ NATIVE MAX ADDRESS EXT" },
23416521148cSRobert Hancock 		{ ATA_CMD_SET_MAX,		"SET MAX ADDRESS" },
23426521148cSRobert Hancock 		{ ATA_CMD_SET_MAX_EXT,		"SET MAX ADDRESS EXT" },
23436521148cSRobert Hancock 		{ ATA_CMD_READ_LOG_EXT,		"READ LOG EXT" },
23446521148cSRobert Hancock 		{ ATA_CMD_WRITE_LOG_EXT,	"WRITE LOG EXT" },
23456521148cSRobert Hancock 		{ ATA_CMD_READ_LOG_DMA_EXT,	"READ LOG DMA EXT" },
23466521148cSRobert Hancock 		{ ATA_CMD_WRITE_LOG_DMA_EXT, 	"WRITE LOG DMA EXT" },
23473915c3b5SRobert Hancock 		{ ATA_CMD_TRUSTED_NONDATA,	"TRUSTED NON-DATA" },
23486521148cSRobert Hancock 		{ ATA_CMD_TRUSTED_RCV,		"TRUSTED RECEIVE" },
23496521148cSRobert Hancock 		{ ATA_CMD_TRUSTED_RCV_DMA, 	"TRUSTED RECEIVE DMA" },
23506521148cSRobert Hancock 		{ ATA_CMD_TRUSTED_SND,		"TRUSTED SEND" },
23516521148cSRobert Hancock 		{ ATA_CMD_TRUSTED_SND_DMA, 	"TRUSTED SEND DMA" },
23526521148cSRobert Hancock 		{ ATA_CMD_PMP_READ,		"READ BUFFER" },
23533915c3b5SRobert Hancock 		{ ATA_CMD_PMP_READ_DMA,		"READ BUFFER DMA" },
23546521148cSRobert Hancock 		{ ATA_CMD_PMP_WRITE,		"WRITE BUFFER" },
23553915c3b5SRobert Hancock 		{ ATA_CMD_PMP_WRITE_DMA,	"WRITE BUFFER DMA" },
23566521148cSRobert Hancock 		{ ATA_CMD_CONF_OVERLAY,		"DEVICE CONFIGURATION OVERLAY" },
23576521148cSRobert Hancock 		{ ATA_CMD_SEC_SET_PASS,		"SECURITY SET PASSWORD" },
23586521148cSRobert Hancock 		{ ATA_CMD_SEC_UNLOCK,		"SECURITY UNLOCK" },
23596521148cSRobert Hancock 		{ ATA_CMD_SEC_ERASE_PREP,	"SECURITY ERASE PREPARE" },
23606521148cSRobert Hancock 		{ ATA_CMD_SEC_ERASE_UNIT,	"SECURITY ERASE UNIT" },
23616521148cSRobert Hancock 		{ ATA_CMD_SEC_FREEZE_LOCK,	"SECURITY FREEZE LOCK" },
23626521148cSRobert Hancock 		{ ATA_CMD_SEC_DISABLE_PASS,	"SECURITY DISABLE PASSWORD" },
23636521148cSRobert Hancock 		{ ATA_CMD_CONFIG_STREAM,	"CONFIGURE STREAM" },
23646521148cSRobert Hancock 		{ ATA_CMD_SMART,		"SMART" },
23656521148cSRobert Hancock 		{ ATA_CMD_MEDIA_LOCK,		"DOOR LOCK" },
23666521148cSRobert Hancock 		{ ATA_CMD_MEDIA_UNLOCK,		"DOOR UNLOCK" },
2367acad7627SFUJITA Tomonori 		{ ATA_CMD_DSM,			"DATA SET MANAGEMENT" },
23686521148cSRobert Hancock 		{ ATA_CMD_CHK_MED_CRD_TYP, 	"CHECK MEDIA CARD TYPE" },
23696521148cSRobert Hancock 		{ ATA_CMD_CFA_REQ_EXT_ERR, 	"CFA REQUEST EXTENDED ERROR" },
23706521148cSRobert Hancock 		{ ATA_CMD_CFA_WRITE_NE,		"CFA WRITE SECTORS WITHOUT ERASE" },
23716521148cSRobert Hancock 		{ ATA_CMD_CFA_TRANS_SECT,	"CFA TRANSLATE SECTOR" },
23726521148cSRobert Hancock 		{ ATA_CMD_CFA_ERASE,		"CFA ERASE SECTORS" },
23736521148cSRobert Hancock 		{ ATA_CMD_CFA_WRITE_MULT_NE, 	"CFA WRITE MULTIPLE WITHOUT ERASE" },
23743915c3b5SRobert Hancock 		{ ATA_CMD_REQ_SENSE_DATA,	"REQUEST SENSE DATA EXT" },
23753915c3b5SRobert Hancock 		{ ATA_CMD_SANITIZE_DEVICE,	"SANITIZE DEVICE" },
23766521148cSRobert Hancock 		{ ATA_CMD_READ_LONG,		"READ LONG (with retries)" },
23776521148cSRobert Hancock 		{ ATA_CMD_READ_LONG_ONCE,	"READ LONG (without retries)" },
23786521148cSRobert Hancock 		{ ATA_CMD_WRITE_LONG,		"WRITE LONG (with retries)" },
23796521148cSRobert Hancock 		{ ATA_CMD_WRITE_LONG_ONCE,	"WRITE LONG (without retries)" },
23806521148cSRobert Hancock 		{ ATA_CMD_RESTORE,		"RECALIBRATE" },
23816521148cSRobert Hancock 		{ 0,				NULL } /* terminate list */
23826521148cSRobert Hancock 	};
23836521148cSRobert Hancock 
23846521148cSRobert Hancock 	unsigned int i;
23856521148cSRobert Hancock 	for (i = 0; cmd_descr[i].text; i++)
23866521148cSRobert Hancock 		if (cmd_descr[i].command == command)
23876521148cSRobert Hancock 			return cmd_descr[i].text;
23886521148cSRobert Hancock #endif
23896521148cSRobert Hancock 
23906521148cSRobert Hancock 	return NULL;
23916521148cSRobert Hancock }
2392*36aae28eSAndy Shevchenko EXPORT_SYMBOL_GPL(ata_get_cmd_descript);
23936521148cSRobert Hancock 
23946521148cSRobert Hancock /**
23959b1e2658STejun Heo  *	ata_eh_link_report - report error handling to user
23960260731fSTejun Heo  *	@link: ATA link EH is going on
2397c6fd2807SJeff Garzik  *
2398c6fd2807SJeff Garzik  *	Report EH to user.
2399c6fd2807SJeff Garzik  *
2400c6fd2807SJeff Garzik  *	LOCKING:
2401c6fd2807SJeff Garzik  *	None.
2402c6fd2807SJeff Garzik  */
24039b1e2658STejun Heo static void ata_eh_link_report(struct ata_link *link)
2404c6fd2807SJeff Garzik {
24050260731fSTejun Heo 	struct ata_port *ap = link->ap;
24060260731fSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
2407c6fd2807SJeff Garzik 	const char *frozen, *desc;
2408462098b0SLevente Kurusa 	char tries_buf[6] = "";
2409c6fd2807SJeff Garzik 	int tag, nr_failed = 0;
2410c6fd2807SJeff Garzik 
241194ff3d54STejun Heo 	if (ehc->i.flags & ATA_EHI_QUIET)
241294ff3d54STejun Heo 		return;
241394ff3d54STejun Heo 
2414c6fd2807SJeff Garzik 	desc = NULL;
2415c6fd2807SJeff Garzik 	if (ehc->i.desc[0] != '\0')
2416c6fd2807SJeff Garzik 		desc = ehc->i.desc;
2417c6fd2807SJeff Garzik 
2418c6fd2807SJeff Garzik 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2419c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2420c6fd2807SJeff Garzik 
2421b1c72916STejun Heo 		if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2422b1c72916STejun Heo 		    ata_dev_phys_link(qc->dev) != link ||
2423e027bd36STejun Heo 		    ((qc->flags & ATA_QCFLAG_QUIET) &&
2424e027bd36STejun Heo 		     qc->err_mask == AC_ERR_DEV))
2425c6fd2807SJeff Garzik 			continue;
2426c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
2427c6fd2807SJeff Garzik 			continue;
2428c6fd2807SJeff Garzik 
2429c6fd2807SJeff Garzik 		nr_failed++;
2430c6fd2807SJeff Garzik 	}
2431c6fd2807SJeff Garzik 
2432c6fd2807SJeff Garzik 	if (!nr_failed && !ehc->i.err_mask)
2433c6fd2807SJeff Garzik 		return;
2434c6fd2807SJeff Garzik 
2435c6fd2807SJeff Garzik 	frozen = "";
2436c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_FROZEN)
2437c6fd2807SJeff Garzik 		frozen = " frozen";
2438c6fd2807SJeff Garzik 
2439a1e10f7eSTejun Heo 	if (ap->eh_tries < ATA_EH_MAX_TRIES)
2440462098b0SLevente Kurusa 		snprintf(tries_buf, sizeof(tries_buf), " t%d",
2441a1e10f7eSTejun Heo 			 ap->eh_tries);
2442a1e10f7eSTejun Heo 
2443c6fd2807SJeff Garzik 	if (ehc->i.dev) {
2444a9a79dfeSJoe Perches 		ata_dev_err(ehc->i.dev, "exception Emask 0x%x "
2445a1e10f7eSTejun Heo 			    "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2446a1e10f7eSTejun Heo 			    ehc->i.err_mask, link->sactive, ehc->i.serror,
2447a1e10f7eSTejun Heo 			    ehc->i.action, frozen, tries_buf);
2448c6fd2807SJeff Garzik 		if (desc)
2449a9a79dfeSJoe Perches 			ata_dev_err(ehc->i.dev, "%s\n", desc);
2450c6fd2807SJeff Garzik 	} else {
2451a9a79dfeSJoe Perches 		ata_link_err(link, "exception Emask 0x%x "
2452a1e10f7eSTejun Heo 			     "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2453a1e10f7eSTejun Heo 			     ehc->i.err_mask, link->sactive, ehc->i.serror,
2454a1e10f7eSTejun Heo 			     ehc->i.action, frozen, tries_buf);
2455c6fd2807SJeff Garzik 		if (desc)
2456a9a79dfeSJoe Perches 			ata_link_err(link, "%s\n", desc);
2457c6fd2807SJeff Garzik 	}
2458c6fd2807SJeff Garzik 
24596521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR
24601333e194SRobert Hancock 	if (ehc->i.serror)
2461a9a79dfeSJoe Perches 		ata_link_err(link,
24621333e194SRobert Hancock 		  "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
24631333e194SRobert Hancock 		  ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "",
24641333e194SRobert Hancock 		  ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "",
24651333e194SRobert Hancock 		  ehc->i.serror & SERR_DATA ? "UnrecovData " : "",
24661333e194SRobert Hancock 		  ehc->i.serror & SERR_PERSISTENT ? "Persist " : "",
24671333e194SRobert Hancock 		  ehc->i.serror & SERR_PROTOCOL ? "Proto " : "",
24681333e194SRobert Hancock 		  ehc->i.serror & SERR_INTERNAL ? "HostInt " : "",
24691333e194SRobert Hancock 		  ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "",
24701333e194SRobert Hancock 		  ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "",
24711333e194SRobert Hancock 		  ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "",
24721333e194SRobert Hancock 		  ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "",
24731333e194SRobert Hancock 		  ehc->i.serror & SERR_DISPARITY ? "Dispar " : "",
24741333e194SRobert Hancock 		  ehc->i.serror & SERR_CRC ? "BadCRC " : "",
24751333e194SRobert Hancock 		  ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "",
24761333e194SRobert Hancock 		  ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "",
24771333e194SRobert Hancock 		  ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "",
24781333e194SRobert Hancock 		  ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "",
24791333e194SRobert Hancock 		  ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
24806521148cSRobert Hancock #endif
24811333e194SRobert Hancock 
2482c6fd2807SJeff Garzik 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2483c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
24848a937581STejun Heo 		struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
2485abb6a889STejun Heo 		const u8 *cdb = qc->cdb;
2486abb6a889STejun Heo 		char data_buf[20] = "";
2487abb6a889STejun Heo 		char cdb_buf[70] = "";
2488c6fd2807SJeff Garzik 
24890260731fSTejun Heo 		if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2490b1c72916STejun Heo 		    ata_dev_phys_link(qc->dev) != link || !qc->err_mask)
2491c6fd2807SJeff Garzik 			continue;
2492c6fd2807SJeff Garzik 
2493abb6a889STejun Heo 		if (qc->dma_dir != DMA_NONE) {
2494abb6a889STejun Heo 			static const char *dma_str[] = {
2495abb6a889STejun Heo 				[DMA_BIDIRECTIONAL]	= "bidi",
2496abb6a889STejun Heo 				[DMA_TO_DEVICE]		= "out",
2497abb6a889STejun Heo 				[DMA_FROM_DEVICE]	= "in",
2498abb6a889STejun Heo 			};
2499abb6a889STejun Heo 			static const char *prot_str[] = {
2500abb6a889STejun Heo 				[ATA_PROT_PIO]		= "pio",
2501abb6a889STejun Heo 				[ATA_PROT_DMA]		= "dma",
2502abb6a889STejun Heo 				[ATA_PROT_NCQ]		= "ncq",
25030dc36888STejun Heo 				[ATAPI_PROT_PIO]	= "pio",
25040dc36888STejun Heo 				[ATAPI_PROT_DMA]	= "dma",
2505abb6a889STejun Heo 			};
2506abb6a889STejun Heo 
2507abb6a889STejun Heo 			snprintf(data_buf, sizeof(data_buf), " %s %u %s",
2508abb6a889STejun Heo 				 prot_str[qc->tf.protocol], qc->nbytes,
2509abb6a889STejun Heo 				 dma_str[qc->dma_dir]);
2510abb6a889STejun Heo 		}
2511abb6a889STejun Heo 
25126521148cSRobert Hancock 		if (ata_is_atapi(qc->tf.protocol)) {
25136521148cSRobert Hancock 			if (qc->scsicmd)
25146521148cSRobert Hancock 				scsi_print_command(qc->scsicmd);
25156521148cSRobert Hancock 			else
2516abb6a889STejun Heo 				snprintf(cdb_buf, sizeof(cdb_buf),
2517abb6a889STejun Heo 				 "cdb %02x %02x %02x %02x %02x %02x %02x %02x  "
2518abb6a889STejun Heo 				 "%02x %02x %02x %02x %02x %02x %02x %02x\n         ",
2519abb6a889STejun Heo 				 cdb[0], cdb[1], cdb[2], cdb[3],
2520abb6a889STejun Heo 				 cdb[4], cdb[5], cdb[6], cdb[7],
2521abb6a889STejun Heo 				 cdb[8], cdb[9], cdb[10], cdb[11],
2522abb6a889STejun Heo 				 cdb[12], cdb[13], cdb[14], cdb[15]);
25236521148cSRobert Hancock 		} else {
25246521148cSRobert Hancock 			const char *descr = ata_get_cmd_descript(cmd->command);
25256521148cSRobert Hancock 			if (descr)
2526a9a79dfeSJoe Perches 				ata_dev_err(qc->dev, "failed command: %s\n",
2527a9a79dfeSJoe Perches 					    descr);
25286521148cSRobert Hancock 		}
2529abb6a889STejun Heo 
2530a9a79dfeSJoe Perches 		ata_dev_err(qc->dev,
25318a937581STejun Heo 			"cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2532abb6a889STejun Heo 			"tag %d%s\n         %s"
25338a937581STejun Heo 			"res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
25345335b729STejun Heo 			"Emask 0x%x (%s)%s\n",
25358a937581STejun Heo 			cmd->command, cmd->feature, cmd->nsect,
25368a937581STejun Heo 			cmd->lbal, cmd->lbam, cmd->lbah,
25378a937581STejun Heo 			cmd->hob_feature, cmd->hob_nsect,
25388a937581STejun Heo 			cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
2539abb6a889STejun Heo 			cmd->device, qc->tag, data_buf, cdb_buf,
25408a937581STejun Heo 			res->command, res->feature, res->nsect,
25418a937581STejun Heo 			res->lbal, res->lbam, res->lbah,
25428a937581STejun Heo 			res->hob_feature, res->hob_nsect,
25438a937581STejun Heo 			res->hob_lbal, res->hob_lbam, res->hob_lbah,
25445335b729STejun Heo 			res->device, qc->err_mask, ata_err_string(qc->err_mask),
25455335b729STejun Heo 			qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
25461333e194SRobert Hancock 
25476521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR
25481333e194SRobert Hancock 		if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
25491333e194SRobert Hancock 				    ATA_ERR)) {
25501333e194SRobert Hancock 			if (res->command & ATA_BUSY)
2551a9a79dfeSJoe Perches 				ata_dev_err(qc->dev, "status: { Busy }\n");
25521333e194SRobert Hancock 			else
2553a9a79dfeSJoe Perches 				ata_dev_err(qc->dev, "status: { %s%s%s%s}\n",
25541333e194SRobert Hancock 				  res->command & ATA_DRDY ? "DRDY " : "",
25551333e194SRobert Hancock 				  res->command & ATA_DF ? "DF " : "",
25561333e194SRobert Hancock 				  res->command & ATA_DRQ ? "DRQ " : "",
25571333e194SRobert Hancock 				  res->command & ATA_ERR ? "ERR " : "");
25581333e194SRobert Hancock 		}
25591333e194SRobert Hancock 
25601333e194SRobert Hancock 		if (cmd->command != ATA_CMD_PACKET &&
2561eec7e1c1SAlexey Asemov 		    (res->feature & (ATA_ICRC | ATA_UNC | ATA_AMNF |
2562eec7e1c1SAlexey Asemov 				     ATA_IDNF | ATA_ABORTED)))
2563eec7e1c1SAlexey Asemov 			ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n",
25641333e194SRobert Hancock 			  res->feature & ATA_ICRC ? "ICRC " : "",
25651333e194SRobert Hancock 			  res->feature & ATA_UNC ? "UNC " : "",
2566eec7e1c1SAlexey Asemov 			  res->feature & ATA_AMNF ? "AMNF " : "",
25671333e194SRobert Hancock 			  res->feature & ATA_IDNF ? "IDNF " : "",
25681333e194SRobert Hancock 			  res->feature & ATA_ABORTED ? "ABRT " : "");
25696521148cSRobert Hancock #endif
2570c6fd2807SJeff Garzik 	}
2571c6fd2807SJeff Garzik }
2572c6fd2807SJeff Garzik 
25739b1e2658STejun Heo /**
25749b1e2658STejun Heo  *	ata_eh_report - report error handling to user
25759b1e2658STejun Heo  *	@ap: ATA port to report EH about
25769b1e2658STejun Heo  *
25779b1e2658STejun Heo  *	Report EH to user.
25789b1e2658STejun Heo  *
25799b1e2658STejun Heo  *	LOCKING:
25809b1e2658STejun Heo  *	None.
25819b1e2658STejun Heo  */
2582fb7fd614STejun Heo void ata_eh_report(struct ata_port *ap)
25839b1e2658STejun Heo {
25849b1e2658STejun Heo 	struct ata_link *link;
25859b1e2658STejun Heo 
25861eca4365STejun Heo 	ata_for_each_link(link, ap, HOST_FIRST)
25879b1e2658STejun Heo 		ata_eh_link_report(link);
25889b1e2658STejun Heo }
25899b1e2658STejun Heo 
2590cc0680a5STejun Heo static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
2591b1c72916STejun Heo 			unsigned int *classes, unsigned long deadline,
2592b1c72916STejun Heo 			bool clear_classes)
2593c6fd2807SJeff Garzik {
2594f58229f8STejun Heo 	struct ata_device *dev;
2595c6fd2807SJeff Garzik 
2596b1c72916STejun Heo 	if (clear_classes)
25971eca4365STejun Heo 		ata_for_each_dev(dev, link, ALL)
2598f58229f8STejun Heo 			classes[dev->devno] = ATA_DEV_UNKNOWN;
2599c6fd2807SJeff Garzik 
2600f046519fSTejun Heo 	return reset(link, classes, deadline);
2601c6fd2807SJeff Garzik }
2602c6fd2807SJeff Garzik 
2603e8411fbaSSergei Shtylyov static int ata_eh_followup_srst_needed(struct ata_link *link, int rc)
2604c6fd2807SJeff Garzik {
260545db2f6cSTejun Heo 	if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
2606ae791c05STejun Heo 		return 0;
26075dbfc9cbSTejun Heo 	if (rc == -EAGAIN)
2608c6fd2807SJeff Garzik 		return 1;
2609071f44b1STejun Heo 	if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
26103495de73STejun Heo 		return 1;
2611c6fd2807SJeff Garzik 	return 0;
2612c6fd2807SJeff Garzik }
2613c6fd2807SJeff Garzik 
2614fb7fd614STejun Heo int ata_eh_reset(struct ata_link *link, int classify,
2615c6fd2807SJeff Garzik 		 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
2616c6fd2807SJeff Garzik 		 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
2617c6fd2807SJeff Garzik {
2618afaa5c37STejun Heo 	struct ata_port *ap = link->ap;
2619b1c72916STejun Heo 	struct ata_link *slave = ap->slave_link;
2620936fd732STejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
2621705d2014SBartlomiej Zolnierkiewicz 	struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL;
2622c6fd2807SJeff Garzik 	unsigned int *classes = ehc->classes;
2623416dc9edSTejun Heo 	unsigned int lflags = link->flags;
2624c6fd2807SJeff Garzik 	int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
2625d8af0eb6STejun Heo 	int max_tries = 0, try = 0;
2626b1c72916STejun Heo 	struct ata_link *failed_link;
2627f58229f8STejun Heo 	struct ata_device *dev;
2628416dc9edSTejun Heo 	unsigned long deadline, now;
2629c6fd2807SJeff Garzik 	ata_reset_fn_t reset;
2630afaa5c37STejun Heo 	unsigned long flags;
2631416dc9edSTejun Heo 	u32 sstatus;
2632b1c72916STejun Heo 	int nr_unknown, rc;
2633c6fd2807SJeff Garzik 
2634932648b0STejun Heo 	/*
2635932648b0STejun Heo 	 * Prepare to reset
2636932648b0STejun Heo 	 */
2637d8af0eb6STejun Heo 	while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX)
2638d8af0eb6STejun Heo 		max_tries++;
2639ca6d43b0SDan Williams 	if (link->flags & ATA_LFLAG_RST_ONCE)
2640ca6d43b0SDan Williams 		max_tries = 1;
264105944bdfSTejun Heo 	if (link->flags & ATA_LFLAG_NO_HRST)
264205944bdfSTejun Heo 		hardreset = NULL;
264305944bdfSTejun Heo 	if (link->flags & ATA_LFLAG_NO_SRST)
264405944bdfSTejun Heo 		softreset = NULL;
2645d8af0eb6STejun Heo 
264625985edcSLucas De Marchi 	/* make sure each reset attempt is at least COOL_DOWN apart */
264719b72321STejun Heo 	if (ehc->i.flags & ATA_EHI_DID_RESET) {
26480a2c0f56STejun Heo 		now = jiffies;
264919b72321STejun Heo 		WARN_ON(time_after(ehc->last_reset, now));
265019b72321STejun Heo 		deadline = ata_deadline(ehc->last_reset,
265119b72321STejun Heo 					ATA_EH_RESET_COOL_DOWN);
26520a2c0f56STejun Heo 		if (time_before(now, deadline))
26530a2c0f56STejun Heo 			schedule_timeout_uninterruptible(deadline - now);
265419b72321STejun Heo 	}
26550a2c0f56STejun Heo 
2656afaa5c37STejun Heo 	spin_lock_irqsave(ap->lock, flags);
2657afaa5c37STejun Heo 	ap->pflags |= ATA_PFLAG_RESETTING;
2658afaa5c37STejun Heo 	spin_unlock_irqrestore(ap->lock, flags);
2659afaa5c37STejun Heo 
2660cf480626STejun Heo 	ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2661c6fd2807SJeff Garzik 
26621eca4365STejun Heo 	ata_for_each_dev(dev, link, ALL) {
2663cdeab114STejun Heo 		/* If we issue an SRST then an ATA drive (not ATAPI)
2664cdeab114STejun Heo 		 * may change configuration and be in PIO0 timing. If
2665cdeab114STejun Heo 		 * we do a hard reset (or are coming from power on)
2666cdeab114STejun Heo 		 * this is true for ATA or ATAPI. Until we've set a
2667cdeab114STejun Heo 		 * suitable controller mode we should not touch the
2668cdeab114STejun Heo 		 * bus as we may be talking too fast.
2669cdeab114STejun Heo 		 */
2670cdeab114STejun Heo 		dev->pio_mode = XFER_PIO_0;
26715416912aSAaron Lu 		dev->dma_mode = 0xff;
2672cdeab114STejun Heo 
2673cdeab114STejun Heo 		/* If the controller has a pio mode setup function
2674cdeab114STejun Heo 		 * then use it to set the chipset to rights. Don't
2675cdeab114STejun Heo 		 * touch the DMA setup as that will be dealt with when
2676cdeab114STejun Heo 		 * configuring devices.
2677cdeab114STejun Heo 		 */
2678cdeab114STejun Heo 		if (ap->ops->set_piomode)
2679cdeab114STejun Heo 			ap->ops->set_piomode(ap, dev);
2680cdeab114STejun Heo 	}
2681cdeab114STejun Heo 
2682cf480626STejun Heo 	/* prefer hardreset */
2683932648b0STejun Heo 	reset = NULL;
2684cf480626STejun Heo 	ehc->i.action &= ~ATA_EH_RESET;
2685cf480626STejun Heo 	if (hardreset) {
2686cf480626STejun Heo 		reset = hardreset;
2687a674050eSTejun Heo 		ehc->i.action |= ATA_EH_HARDRESET;
26884f7faa3fSTejun Heo 	} else if (softreset) {
2689cf480626STejun Heo 		reset = softreset;
2690a674050eSTejun Heo 		ehc->i.action |= ATA_EH_SOFTRESET;
2691cf480626STejun Heo 	}
2692c6fd2807SJeff Garzik 
2693c6fd2807SJeff Garzik 	if (prereset) {
2694b1c72916STejun Heo 		unsigned long deadline = ata_deadline(jiffies,
2695b1c72916STejun Heo 						      ATA_EH_PRERESET_TIMEOUT);
2696b1c72916STejun Heo 
2697b1c72916STejun Heo 		if (slave) {
2698b1c72916STejun Heo 			sehc->i.action &= ~ATA_EH_RESET;
2699b1c72916STejun Heo 			sehc->i.action |= ehc->i.action;
2700b1c72916STejun Heo 		}
2701b1c72916STejun Heo 
2702b1c72916STejun Heo 		rc = prereset(link, deadline);
2703b1c72916STejun Heo 
2704b1c72916STejun Heo 		/* If present, do prereset on slave link too.  Reset
2705b1c72916STejun Heo 		 * is skipped iff both master and slave links report
2706b1c72916STejun Heo 		 * -ENOENT or clear ATA_EH_RESET.
2707b1c72916STejun Heo 		 */
2708b1c72916STejun Heo 		if (slave && (rc == 0 || rc == -ENOENT)) {
2709b1c72916STejun Heo 			int tmp;
2710b1c72916STejun Heo 
2711b1c72916STejun Heo 			tmp = prereset(slave, deadline);
2712b1c72916STejun Heo 			if (tmp != -ENOENT)
2713b1c72916STejun Heo 				rc = tmp;
2714b1c72916STejun Heo 
2715b1c72916STejun Heo 			ehc->i.action |= sehc->i.action;
2716b1c72916STejun Heo 		}
2717b1c72916STejun Heo 
2718c6fd2807SJeff Garzik 		if (rc) {
2719c961922bSAlan Cox 			if (rc == -ENOENT) {
2720a9a79dfeSJoe Perches 				ata_link_dbg(link, "port disabled--ignoring\n");
2721cf480626STejun Heo 				ehc->i.action &= ~ATA_EH_RESET;
27224aa9ab67STejun Heo 
27231eca4365STejun Heo 				ata_for_each_dev(dev, link, ALL)
2724f58229f8STejun Heo 					classes[dev->devno] = ATA_DEV_NONE;
27254aa9ab67STejun Heo 
27264aa9ab67STejun Heo 				rc = 0;
2727c961922bSAlan Cox 			} else
2728a9a79dfeSJoe Perches 				ata_link_err(link,
2729a9a79dfeSJoe Perches 					     "prereset failed (errno=%d)\n",
2730a9a79dfeSJoe Perches 					     rc);
2731fccb6ea5STejun Heo 			goto out;
2732c6fd2807SJeff Garzik 		}
2733c6fd2807SJeff Garzik 
2734932648b0STejun Heo 		/* prereset() might have cleared ATA_EH_RESET.  If so,
2735d6515e6fSTejun Heo 		 * bang classes, thaw and return.
2736932648b0STejun Heo 		 */
2737932648b0STejun Heo 		if (reset && !(ehc->i.action & ATA_EH_RESET)) {
27381eca4365STejun Heo 			ata_for_each_dev(dev, link, ALL)
2739f58229f8STejun Heo 				classes[dev->devno] = ATA_DEV_NONE;
2740d6515e6fSTejun Heo 			if ((ap->pflags & ATA_PFLAG_FROZEN) &&
2741d6515e6fSTejun Heo 			    ata_is_host_link(link))
2742d6515e6fSTejun Heo 				ata_eh_thaw_port(ap);
2743fccb6ea5STejun Heo 			rc = 0;
2744fccb6ea5STejun Heo 			goto out;
2745c6fd2807SJeff Garzik 		}
2746932648b0STejun Heo 	}
2747c6fd2807SJeff Garzik 
2748c6fd2807SJeff Garzik  retry:
2749932648b0STejun Heo 	/*
2750932648b0STejun Heo 	 * Perform reset
2751932648b0STejun Heo 	 */
2752dc98c32cSTejun Heo 	if (ata_is_host_link(link))
2753dc98c32cSTejun Heo 		ata_eh_freeze_port(ap);
2754dc98c32cSTejun Heo 
2755341c2c95STejun Heo 	deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]);
275631daabdaSTejun Heo 
2757932648b0STejun Heo 	if (reset) {
2758c6fd2807SJeff Garzik 		if (verbose)
2759a9a79dfeSJoe Perches 			ata_link_info(link, "%s resetting link\n",
2760c6fd2807SJeff Garzik 				      reset == softreset ? "soft" : "hard");
2761c6fd2807SJeff Garzik 
2762c6fd2807SJeff Garzik 		/* mark that this EH session started with reset */
276319b72321STejun Heo 		ehc->last_reset = jiffies;
27640d64a233STejun Heo 		if (reset == hardreset)
27650d64a233STejun Heo 			ehc->i.flags |= ATA_EHI_DID_HARDRESET;
27660d64a233STejun Heo 		else
27670d64a233STejun Heo 			ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
2768c6fd2807SJeff Garzik 
2769b1c72916STejun Heo 		rc = ata_do_reset(link, reset, classes, deadline, true);
2770b1c72916STejun Heo 		if (rc && rc != -EAGAIN) {
2771b1c72916STejun Heo 			failed_link = link;
27725dbfc9cbSTejun Heo 			goto fail;
2773b1c72916STejun Heo 		}
2774c6fd2807SJeff Garzik 
2775b1c72916STejun Heo 		/* hardreset slave link if existent */
2776b1c72916STejun Heo 		if (slave && reset == hardreset) {
2777b1c72916STejun Heo 			int tmp;
2778b1c72916STejun Heo 
2779b1c72916STejun Heo 			if (verbose)
2780a9a79dfeSJoe Perches 				ata_link_info(slave, "hard resetting link\n");
2781b1c72916STejun Heo 
2782b1c72916STejun Heo 			ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
2783b1c72916STejun Heo 			tmp = ata_do_reset(slave, reset, classes, deadline,
2784b1c72916STejun Heo 					   false);
2785b1c72916STejun Heo 			switch (tmp) {
2786b1c72916STejun Heo 			case -EAGAIN:
2787b1c72916STejun Heo 				rc = -EAGAIN;
2788b1c72916STejun Heo 			case 0:
2789b1c72916STejun Heo 				break;
2790b1c72916STejun Heo 			default:
2791b1c72916STejun Heo 				failed_link = slave;
2792b1c72916STejun Heo 				rc = tmp;
2793b1c72916STejun Heo 				goto fail;
2794b1c72916STejun Heo 			}
2795b1c72916STejun Heo 		}
2796b1c72916STejun Heo 
2797b1c72916STejun Heo 		/* perform follow-up SRST if necessary */
2798c6fd2807SJeff Garzik 		if (reset == hardreset &&
2799e8411fbaSSergei Shtylyov 		    ata_eh_followup_srst_needed(link, rc)) {
2800c6fd2807SJeff Garzik 			reset = softreset;
2801c6fd2807SJeff Garzik 
2802c6fd2807SJeff Garzik 			if (!reset) {
2803a9a79dfeSJoe Perches 				ata_link_err(link,
2804a9a79dfeSJoe Perches 	     "follow-up softreset required but no softreset available\n");
2805b1c72916STejun Heo 				failed_link = link;
2806fccb6ea5STejun Heo 				rc = -EINVAL;
280708cf69d0STejun Heo 				goto fail;
2808c6fd2807SJeff Garzik 			}
2809c6fd2807SJeff Garzik 
2810cf480626STejun Heo 			ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2811b1c72916STejun Heo 			rc = ata_do_reset(link, reset, classes, deadline, true);
2812fe2c4d01STejun Heo 			if (rc) {
2813fe2c4d01STejun Heo 				failed_link = link;
2814fe2c4d01STejun Heo 				goto fail;
2815fe2c4d01STejun Heo 			}
2816c6fd2807SJeff Garzik 		}
2817932648b0STejun Heo 	} else {
2818932648b0STejun Heo 		if (verbose)
2819a9a79dfeSJoe Perches 			ata_link_info(link,
2820a9a79dfeSJoe Perches 	"no reset method available, skipping reset\n");
2821932648b0STejun Heo 		if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
2822932648b0STejun Heo 			lflags |= ATA_LFLAG_ASSUME_ATA;
2823932648b0STejun Heo 	}
2824008a7896STejun Heo 
2825932648b0STejun Heo 	/*
2826932648b0STejun Heo 	 * Post-reset processing
2827932648b0STejun Heo 	 */
28281eca4365STejun Heo 	ata_for_each_dev(dev, link, ALL) {
2829416dc9edSTejun Heo 		/* After the reset, the device state is PIO 0 and the
2830416dc9edSTejun Heo 		 * controller state is undefined.  Reset also wakes up
2831416dc9edSTejun Heo 		 * drives from sleeping mode.
2832c6fd2807SJeff Garzik 		 */
2833f58229f8STejun Heo 		dev->pio_mode = XFER_PIO_0;
2834054a5fbaSTejun Heo 		dev->flags &= ~ATA_DFLAG_SLEEPING;
2835c6fd2807SJeff Garzik 
28363b761d3dSTejun Heo 		if (ata_phys_link_offline(ata_dev_phys_link(dev)))
28373b761d3dSTejun Heo 			continue;
28383b761d3dSTejun Heo 
28394ccd3329STejun Heo 		/* apply class override */
2840416dc9edSTejun Heo 		if (lflags & ATA_LFLAG_ASSUME_ATA)
2841ae791c05STejun Heo 			classes[dev->devno] = ATA_DEV_ATA;
2842416dc9edSTejun Heo 		else if (lflags & ATA_LFLAG_ASSUME_SEMB)
2843816ab897STejun Heo 			classes[dev->devno] = ATA_DEV_SEMB_UNSUP;
2844ae791c05STejun Heo 	}
2845ae791c05STejun Heo 
2846008a7896STejun Heo 	/* record current link speed */
2847936fd732STejun Heo 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
2848936fd732STejun Heo 		link->sata_spd = (sstatus >> 4) & 0xf;
2849b1c72916STejun Heo 	if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0)
2850b1c72916STejun Heo 		slave->sata_spd = (sstatus >> 4) & 0xf;
2851008a7896STejun Heo 
2852dc98c32cSTejun Heo 	/* thaw the port */
2853dc98c32cSTejun Heo 	if (ata_is_host_link(link))
2854dc98c32cSTejun Heo 		ata_eh_thaw_port(ap);
2855dc98c32cSTejun Heo 
2856f046519fSTejun Heo 	/* postreset() should clear hardware SError.  Although SError
2857f046519fSTejun Heo 	 * is cleared during link resume, clearing SError here is
2858f046519fSTejun Heo 	 * necessary as some PHYs raise hotplug events after SRST.
2859f046519fSTejun Heo 	 * This introduces race condition where hotplug occurs between
2860f046519fSTejun Heo 	 * reset and here.  This race is mediated by cross checking
2861f046519fSTejun Heo 	 * link onlineness and classification result later.
2862f046519fSTejun Heo 	 */
2863b1c72916STejun Heo 	if (postreset) {
2864cc0680a5STejun Heo 		postreset(link, classes);
2865b1c72916STejun Heo 		if (slave)
2866b1c72916STejun Heo 			postreset(slave, classes);
2867b1c72916STejun Heo 	}
2868c6fd2807SJeff Garzik 
28691e641060STejun Heo 	/*
28708c56caccSTejun Heo 	 * Some controllers can't be frozen very well and may set spurious
28718c56caccSTejun Heo 	 * error conditions during reset.  Clear accumulated error
28728c56caccSTejun Heo 	 * information and re-thaw the port if frozen.  As reset is the
28738c56caccSTejun Heo 	 * final recovery action and we cross check link onlineness against
28748c56caccSTejun Heo 	 * device classification later, no hotplug event is lost by this.
28751e641060STejun Heo 	 */
2876f046519fSTejun Heo 	spin_lock_irqsave(link->ap->lock, flags);
28771e641060STejun Heo 	memset(&link->eh_info, 0, sizeof(link->eh_info));
2878b1c72916STejun Heo 	if (slave)
28791e641060STejun Heo 		memset(&slave->eh_info, 0, sizeof(link->eh_info));
28801e641060STejun Heo 	ap->pflags &= ~ATA_PFLAG_EH_PENDING;
2881f046519fSTejun Heo 	spin_unlock_irqrestore(link->ap->lock, flags);
2882f046519fSTejun Heo 
28838c56caccSTejun Heo 	if (ap->pflags & ATA_PFLAG_FROZEN)
28848c56caccSTejun Heo 		ata_eh_thaw_port(ap);
28858c56caccSTejun Heo 
28863b761d3dSTejun Heo 	/*
28873b761d3dSTejun Heo 	 * Make sure onlineness and classification result correspond.
2888f046519fSTejun Heo 	 * Hotplug could have happened during reset and some
2889f046519fSTejun Heo 	 * controllers fail to wait while a drive is spinning up after
2890f046519fSTejun Heo 	 * being hotplugged causing misdetection.  By cross checking
28913b761d3dSTejun Heo 	 * link on/offlineness and classification result, those
28923b761d3dSTejun Heo 	 * conditions can be reliably detected and retried.
2893f046519fSTejun Heo 	 */
2894b1c72916STejun Heo 	nr_unknown = 0;
28951eca4365STejun Heo 	ata_for_each_dev(dev, link, ALL) {
28963b761d3dSTejun Heo 		if (ata_phys_link_online(ata_dev_phys_link(dev))) {
2897b1c72916STejun Heo 			if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2898a9a79dfeSJoe Perches 				ata_dev_dbg(dev, "link online but device misclassified\n");
2899f046519fSTejun Heo 				classes[dev->devno] = ATA_DEV_NONE;
2900b1c72916STejun Heo 				nr_unknown++;
2901b1c72916STejun Heo 			}
29023b761d3dSTejun Heo 		} else if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
29033b761d3dSTejun Heo 			if (ata_class_enabled(classes[dev->devno]))
2904a9a79dfeSJoe Perches 				ata_dev_dbg(dev,
2905a9a79dfeSJoe Perches 					    "link offline, clearing class %d to NONE\n",
29063b761d3dSTejun Heo 					    classes[dev->devno]);
29073b761d3dSTejun Heo 			classes[dev->devno] = ATA_DEV_NONE;
29083b761d3dSTejun Heo 		} else if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2909a9a79dfeSJoe Perches 			ata_dev_dbg(dev,
2910a9a79dfeSJoe Perches 				    "link status unknown, clearing UNKNOWN to NONE\n");
29113b761d3dSTejun Heo 			classes[dev->devno] = ATA_DEV_NONE;
29123b761d3dSTejun Heo 		}
2913f046519fSTejun Heo 	}
2914f046519fSTejun Heo 
2915b1c72916STejun Heo 	if (classify && nr_unknown) {
2916f046519fSTejun Heo 		if (try < max_tries) {
2917a9a79dfeSJoe Perches 			ata_link_warn(link,
2918a9a79dfeSJoe Perches 				      "link online but %d devices misclassified, retrying\n",
29193b761d3dSTejun Heo 				      nr_unknown);
2920b1c72916STejun Heo 			failed_link = link;
2921f046519fSTejun Heo 			rc = -EAGAIN;
2922f046519fSTejun Heo 			goto fail;
2923f046519fSTejun Heo 		}
2924a9a79dfeSJoe Perches 		ata_link_warn(link,
29253b761d3dSTejun Heo 			      "link online but %d devices misclassified, "
29263b761d3dSTejun Heo 			      "device detection might fail\n", nr_unknown);
2927f046519fSTejun Heo 	}
2928f046519fSTejun Heo 
2929c6fd2807SJeff Garzik 	/* reset successful, schedule revalidation */
2930cf480626STejun Heo 	ata_eh_done(link, NULL, ATA_EH_RESET);
2931b1c72916STejun Heo 	if (slave)
2932b1c72916STejun Heo 		ata_eh_done(slave, NULL, ATA_EH_RESET);
293319b72321STejun Heo 	ehc->last_reset = jiffies;		/* update to completion time */
2934c6fd2807SJeff Garzik 	ehc->i.action |= ATA_EH_REVALIDATE;
29356b7ae954STejun Heo 	link->lpm_policy = ATA_LPM_UNKNOWN;	/* reset LPM state */
2936416dc9edSTejun Heo 
2937416dc9edSTejun Heo 	rc = 0;
2938fccb6ea5STejun Heo  out:
2939fccb6ea5STejun Heo 	/* clear hotplug flag */
2940fccb6ea5STejun Heo 	ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2941b1c72916STejun Heo 	if (slave)
2942b1c72916STejun Heo 		sehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2943afaa5c37STejun Heo 
2944afaa5c37STejun Heo 	spin_lock_irqsave(ap->lock, flags);
2945afaa5c37STejun Heo 	ap->pflags &= ~ATA_PFLAG_RESETTING;
2946afaa5c37STejun Heo 	spin_unlock_irqrestore(ap->lock, flags);
2947afaa5c37STejun Heo 
2948c6fd2807SJeff Garzik 	return rc;
2949416dc9edSTejun Heo 
2950416dc9edSTejun Heo  fail:
29515958e302STejun Heo 	/* if SCR isn't accessible on a fan-out port, PMP needs to be reset */
29525958e302STejun Heo 	if (!ata_is_host_link(link) &&
29535958e302STejun Heo 	    sata_scr_read(link, SCR_STATUS, &sstatus))
29545958e302STejun Heo 		rc = -ERESTART;
29555958e302STejun Heo 
29567a46c078SGwendal Grignou 	if (try >= max_tries) {
29578ea7645cSTejun Heo 		/*
29588ea7645cSTejun Heo 		 * Thaw host port even if reset failed, so that the port
29598ea7645cSTejun Heo 		 * can be retried on the next phy event.  This risks
29608ea7645cSTejun Heo 		 * repeated EH runs but seems to be a better tradeoff than
29618ea7645cSTejun Heo 		 * shutting down a port after a botched hotplug attempt.
29628ea7645cSTejun Heo 		 */
29638ea7645cSTejun Heo 		if (ata_is_host_link(link))
29648ea7645cSTejun Heo 			ata_eh_thaw_port(ap);
2965416dc9edSTejun Heo 		goto out;
29668ea7645cSTejun Heo 	}
2967416dc9edSTejun Heo 
2968416dc9edSTejun Heo 	now = jiffies;
2969416dc9edSTejun Heo 	if (time_before(now, deadline)) {
2970416dc9edSTejun Heo 		unsigned long delta = deadline - now;
2971416dc9edSTejun Heo 
2972a9a79dfeSJoe Perches 		ata_link_warn(failed_link,
29730a2c0f56STejun Heo 			"reset failed (errno=%d), retrying in %u secs\n",
29740a2c0f56STejun Heo 			rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
2975416dc9edSTejun Heo 
2976c0c362b6STejun Heo 		ata_eh_release(ap);
2977416dc9edSTejun Heo 		while (delta)
2978416dc9edSTejun Heo 			delta = schedule_timeout_uninterruptible(delta);
2979c0c362b6STejun Heo 		ata_eh_acquire(ap);
2980416dc9edSTejun Heo 	}
2981416dc9edSTejun Heo 
29827a46c078SGwendal Grignou 	/*
29837a46c078SGwendal Grignou 	 * While disks spinup behind PMP, some controllers fail sending SRST.
29847a46c078SGwendal Grignou 	 * They need to be reset - as well as the PMP - before retrying.
29857a46c078SGwendal Grignou 	 */
29867a46c078SGwendal Grignou 	if (rc == -ERESTART) {
29877a46c078SGwendal Grignou 		if (ata_is_host_link(link))
29887a46c078SGwendal Grignou 			ata_eh_thaw_port(ap);
29897a46c078SGwendal Grignou 		goto out;
29907a46c078SGwendal Grignou 	}
29917a46c078SGwendal Grignou 
2992b1c72916STejun Heo 	if (try == max_tries - 1) {
2993a07d499bSTejun Heo 		sata_down_spd_limit(link, 0);
2994b1c72916STejun Heo 		if (slave)
2995a07d499bSTejun Heo 			sata_down_spd_limit(slave, 0);
2996b1c72916STejun Heo 	} else if (rc == -EPIPE)
2997a07d499bSTejun Heo 		sata_down_spd_limit(failed_link, 0);
2998b1c72916STejun Heo 
2999416dc9edSTejun Heo 	if (hardreset)
3000416dc9edSTejun Heo 		reset = hardreset;
3001416dc9edSTejun Heo 	goto retry;
3002c6fd2807SJeff Garzik }
3003c6fd2807SJeff Garzik 
300445fabbb7SElias Oltmanns static inline void ata_eh_pull_park_action(struct ata_port *ap)
300545fabbb7SElias Oltmanns {
300645fabbb7SElias Oltmanns 	struct ata_link *link;
300745fabbb7SElias Oltmanns 	struct ata_device *dev;
300845fabbb7SElias Oltmanns 	unsigned long flags;
300945fabbb7SElias Oltmanns 
301045fabbb7SElias Oltmanns 	/*
301145fabbb7SElias Oltmanns 	 * This function can be thought of as an extended version of
301245fabbb7SElias Oltmanns 	 * ata_eh_about_to_do() specially crafted to accommodate the
301345fabbb7SElias Oltmanns 	 * requirements of ATA_EH_PARK handling. Since the EH thread
301445fabbb7SElias Oltmanns 	 * does not leave the do {} while () loop in ata_eh_recover as
301545fabbb7SElias Oltmanns 	 * long as the timeout for a park request to *one* device on
301645fabbb7SElias Oltmanns 	 * the port has not expired, and since we still want to pick
301745fabbb7SElias Oltmanns 	 * up park requests to other devices on the same port or
301845fabbb7SElias Oltmanns 	 * timeout updates for the same device, we have to pull
301945fabbb7SElias Oltmanns 	 * ATA_EH_PARK actions from eh_info into eh_context.i
302045fabbb7SElias Oltmanns 	 * ourselves at the beginning of each pass over the loop.
302145fabbb7SElias Oltmanns 	 *
302245fabbb7SElias Oltmanns 	 * Additionally, all write accesses to &ap->park_req_pending
302316735d02SWolfram Sang 	 * through reinit_completion() (see below) or complete_all()
302445fabbb7SElias Oltmanns 	 * (see ata_scsi_park_store()) are protected by the host lock.
302545fabbb7SElias Oltmanns 	 * As a result we have that park_req_pending.done is zero on
302645fabbb7SElias Oltmanns 	 * exit from this function, i.e. when ATA_EH_PARK actions for
302745fabbb7SElias Oltmanns 	 * *all* devices on port ap have been pulled into the
302845fabbb7SElias Oltmanns 	 * respective eh_context structs. If, and only if,
302945fabbb7SElias Oltmanns 	 * park_req_pending.done is non-zero by the time we reach
303045fabbb7SElias Oltmanns 	 * wait_for_completion_timeout(), another ATA_EH_PARK action
303145fabbb7SElias Oltmanns 	 * has been scheduled for at least one of the devices on port
303245fabbb7SElias Oltmanns 	 * ap and we have to cycle over the do {} while () loop in
303345fabbb7SElias Oltmanns 	 * ata_eh_recover() again.
303445fabbb7SElias Oltmanns 	 */
303545fabbb7SElias Oltmanns 
303645fabbb7SElias Oltmanns 	spin_lock_irqsave(ap->lock, flags);
303716735d02SWolfram Sang 	reinit_completion(&ap->park_req_pending);
30381eca4365STejun Heo 	ata_for_each_link(link, ap, EDGE) {
30391eca4365STejun Heo 		ata_for_each_dev(dev, link, ALL) {
304045fabbb7SElias Oltmanns 			struct ata_eh_info *ehi = &link->eh_info;
304145fabbb7SElias Oltmanns 
304245fabbb7SElias Oltmanns 			link->eh_context.i.dev_action[dev->devno] |=
304345fabbb7SElias Oltmanns 				ehi->dev_action[dev->devno] & ATA_EH_PARK;
304445fabbb7SElias Oltmanns 			ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
304545fabbb7SElias Oltmanns 		}
304645fabbb7SElias Oltmanns 	}
304745fabbb7SElias Oltmanns 	spin_unlock_irqrestore(ap->lock, flags);
304845fabbb7SElias Oltmanns }
304945fabbb7SElias Oltmanns 
305045fabbb7SElias Oltmanns static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
305145fabbb7SElias Oltmanns {
305245fabbb7SElias Oltmanns 	struct ata_eh_context *ehc = &dev->link->eh_context;
305345fabbb7SElias Oltmanns 	struct ata_taskfile tf;
305445fabbb7SElias Oltmanns 	unsigned int err_mask;
305545fabbb7SElias Oltmanns 
305645fabbb7SElias Oltmanns 	ata_tf_init(dev, &tf);
305745fabbb7SElias Oltmanns 	if (park) {
305845fabbb7SElias Oltmanns 		ehc->unloaded_mask |= 1 << dev->devno;
305945fabbb7SElias Oltmanns 		tf.command = ATA_CMD_IDLEIMMEDIATE;
306045fabbb7SElias Oltmanns 		tf.feature = 0x44;
306145fabbb7SElias Oltmanns 		tf.lbal = 0x4c;
306245fabbb7SElias Oltmanns 		tf.lbam = 0x4e;
306345fabbb7SElias Oltmanns 		tf.lbah = 0x55;
306445fabbb7SElias Oltmanns 	} else {
306545fabbb7SElias Oltmanns 		ehc->unloaded_mask &= ~(1 << dev->devno);
306645fabbb7SElias Oltmanns 		tf.command = ATA_CMD_CHK_POWER;
306745fabbb7SElias Oltmanns 	}
306845fabbb7SElias Oltmanns 
306945fabbb7SElias Oltmanns 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
307045fabbb7SElias Oltmanns 	tf.protocol |= ATA_PROT_NODATA;
307145fabbb7SElias Oltmanns 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
307245fabbb7SElias Oltmanns 	if (park && (err_mask || tf.lbal != 0xc4)) {
3073a9a79dfeSJoe Perches 		ata_dev_err(dev, "head unload failed!\n");
307445fabbb7SElias Oltmanns 		ehc->unloaded_mask &= ~(1 << dev->devno);
307545fabbb7SElias Oltmanns 	}
307645fabbb7SElias Oltmanns }
307745fabbb7SElias Oltmanns 
30780260731fSTejun Heo static int ata_eh_revalidate_and_attach(struct ata_link *link,
3079c6fd2807SJeff Garzik 					struct ata_device **r_failed_dev)
3080c6fd2807SJeff Garzik {
30810260731fSTejun Heo 	struct ata_port *ap = link->ap;
30820260731fSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
3083c6fd2807SJeff Garzik 	struct ata_device *dev;
30848c3c52a8STejun Heo 	unsigned int new_mask = 0;
3085c6fd2807SJeff Garzik 	unsigned long flags;
3086f58229f8STejun Heo 	int rc = 0;
3087c6fd2807SJeff Garzik 
3088c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
3089c6fd2807SJeff Garzik 
30908c3c52a8STejun Heo 	/* For PATA drive side cable detection to work, IDENTIFY must
30918c3c52a8STejun Heo 	 * be done backwards such that PDIAG- is released by the slave
30928c3c52a8STejun Heo 	 * device before the master device is identified.
30938c3c52a8STejun Heo 	 */
30941eca4365STejun Heo 	ata_for_each_dev(dev, link, ALL_REVERSE) {
3095f58229f8STejun Heo 		unsigned int action = ata_eh_dev_action(dev);
3096f58229f8STejun Heo 		unsigned int readid_flags = 0;
3097c6fd2807SJeff Garzik 
3098bff04647STejun Heo 		if (ehc->i.flags & ATA_EHI_DID_RESET)
3099bff04647STejun Heo 			readid_flags |= ATA_READID_POSTRESET;
3100bff04647STejun Heo 
31019666f400STejun Heo 		if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
3102633273a3STejun Heo 			WARN_ON(dev->class == ATA_DEV_PMP);
3103633273a3STejun Heo 
3104b1c72916STejun Heo 			if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
3105c6fd2807SJeff Garzik 				rc = -EIO;
31068c3c52a8STejun Heo 				goto err;
3107c6fd2807SJeff Garzik 			}
3108c6fd2807SJeff Garzik 
31090260731fSTejun Heo 			ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE);
3110422c9daaSTejun Heo 			rc = ata_dev_revalidate(dev, ehc->classes[dev->devno],
3111422c9daaSTejun Heo 						readid_flags);
3112c6fd2807SJeff Garzik 			if (rc)
31138c3c52a8STejun Heo 				goto err;
3114c6fd2807SJeff Garzik 
31150260731fSTejun Heo 			ata_eh_done(link, dev, ATA_EH_REVALIDATE);
3116c6fd2807SJeff Garzik 
3117baa1e78aSTejun Heo 			/* Configuration may have changed, reconfigure
3118baa1e78aSTejun Heo 			 * transfer mode.
3119baa1e78aSTejun Heo 			 */
3120baa1e78aSTejun Heo 			ehc->i.flags |= ATA_EHI_SETMODE;
3121baa1e78aSTejun Heo 
3122c6fd2807SJeff Garzik 			/* schedule the scsi_rescan_device() here */
3123ad72cf98STejun Heo 			schedule_work(&(ap->scsi_rescan_task));
3124c6fd2807SJeff Garzik 		} else if (dev->class == ATA_DEV_UNKNOWN &&
3125c6fd2807SJeff Garzik 			   ehc->tries[dev->devno] &&
3126c6fd2807SJeff Garzik 			   ata_class_enabled(ehc->classes[dev->devno])) {
3127842faa6cSTejun Heo 			/* Temporarily set dev->class, it will be
3128842faa6cSTejun Heo 			 * permanently set once all configurations are
3129842faa6cSTejun Heo 			 * complete.  This is necessary because new
3130842faa6cSTejun Heo 			 * device configuration is done in two
3131842faa6cSTejun Heo 			 * separate loops.
3132842faa6cSTejun Heo 			 */
3133c6fd2807SJeff Garzik 			dev->class = ehc->classes[dev->devno];
3134c6fd2807SJeff Garzik 
3135633273a3STejun Heo 			if (dev->class == ATA_DEV_PMP)
3136633273a3STejun Heo 				rc = sata_pmp_attach(dev);
3137633273a3STejun Heo 			else
3138633273a3STejun Heo 				rc = ata_dev_read_id(dev, &dev->class,
3139633273a3STejun Heo 						     readid_flags, dev->id);
3140842faa6cSTejun Heo 
3141842faa6cSTejun Heo 			/* read_id might have changed class, store and reset */
3142842faa6cSTejun Heo 			ehc->classes[dev->devno] = dev->class;
3143842faa6cSTejun Heo 			dev->class = ATA_DEV_UNKNOWN;
3144842faa6cSTejun Heo 
31458c3c52a8STejun Heo 			switch (rc) {
31468c3c52a8STejun Heo 			case 0:
314799cf610aSTejun Heo 				/* clear error info accumulated during probe */
314899cf610aSTejun Heo 				ata_ering_clear(&dev->ering);
3149f58229f8STejun Heo 				new_mask |= 1 << dev->devno;
31508c3c52a8STejun Heo 				break;
31518c3c52a8STejun Heo 			case -ENOENT:
315255a8e2c8STejun Heo 				/* IDENTIFY was issued to non-existent
315355a8e2c8STejun Heo 				 * device.  No need to reset.  Just
3154842faa6cSTejun Heo 				 * thaw and ignore the device.
315555a8e2c8STejun Heo 				 */
315655a8e2c8STejun Heo 				ata_eh_thaw_port(ap);
3157c6fd2807SJeff Garzik 				break;
31588c3c52a8STejun Heo 			default:
31598c3c52a8STejun Heo 				goto err;
31608c3c52a8STejun Heo 			}
31618c3c52a8STejun Heo 		}
3162c6fd2807SJeff Garzik 	}
3163c6fd2807SJeff Garzik 
3164c1c4e8d5STejun Heo 	/* PDIAG- should have been released, ask cable type if post-reset */
316533267325STejun Heo 	if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) {
316633267325STejun Heo 		if (ap->ops->cable_detect)
3167c1c4e8d5STejun Heo 			ap->cbl = ap->ops->cable_detect(ap);
316833267325STejun Heo 		ata_force_cbl(ap);
316933267325STejun Heo 	}
3170c1c4e8d5STejun Heo 
31718c3c52a8STejun Heo 	/* Configure new devices forward such that user doesn't see
31728c3c52a8STejun Heo 	 * device detection messages backwards.
31738c3c52a8STejun Heo 	 */
31741eca4365STejun Heo 	ata_for_each_dev(dev, link, ALL) {
31754f7c2874STejun Heo 		if (!(new_mask & (1 << dev->devno)))
31768c3c52a8STejun Heo 			continue;
31778c3c52a8STejun Heo 
3178842faa6cSTejun Heo 		dev->class = ehc->classes[dev->devno];
3179842faa6cSTejun Heo 
31804f7c2874STejun Heo 		if (dev->class == ATA_DEV_PMP)
31814f7c2874STejun Heo 			continue;
31824f7c2874STejun Heo 
31838c3c52a8STejun Heo 		ehc->i.flags |= ATA_EHI_PRINTINFO;
31848c3c52a8STejun Heo 		rc = ata_dev_configure(dev);
31858c3c52a8STejun Heo 		ehc->i.flags &= ~ATA_EHI_PRINTINFO;
3186842faa6cSTejun Heo 		if (rc) {
3187842faa6cSTejun Heo 			dev->class = ATA_DEV_UNKNOWN;
31888c3c52a8STejun Heo 			goto err;
3189842faa6cSTejun Heo 		}
31908c3c52a8STejun Heo 
3191c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
3192c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
3193c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
3194baa1e78aSTejun Heo 
319555a8e2c8STejun Heo 		/* new device discovered, configure xfermode */
3196baa1e78aSTejun Heo 		ehc->i.flags |= ATA_EHI_SETMODE;
3197c6fd2807SJeff Garzik 	}
3198c6fd2807SJeff Garzik 
31998c3c52a8STejun Heo 	return 0;
32008c3c52a8STejun Heo 
32018c3c52a8STejun Heo  err:
3202c6fd2807SJeff Garzik 	*r_failed_dev = dev;
32038c3c52a8STejun Heo 	DPRINTK("EXIT rc=%d\n", rc);
3204c6fd2807SJeff Garzik 	return rc;
3205c6fd2807SJeff Garzik }
3206c6fd2807SJeff Garzik 
32076f1d1e3aSTejun Heo /**
32086f1d1e3aSTejun Heo  *	ata_set_mode - Program timings and issue SET FEATURES - XFER
32096f1d1e3aSTejun Heo  *	@link: link on which timings will be programmed
321098a1708dSMartin Olsson  *	@r_failed_dev: out parameter for failed device
32116f1d1e3aSTejun Heo  *
32126f1d1e3aSTejun Heo  *	Set ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
32136f1d1e3aSTejun Heo  *	ata_set_mode() fails, pointer to the failing device is
32146f1d1e3aSTejun Heo  *	returned in @r_failed_dev.
32156f1d1e3aSTejun Heo  *
32166f1d1e3aSTejun Heo  *	LOCKING:
32176f1d1e3aSTejun Heo  *	PCI/etc. bus probe sem.
32186f1d1e3aSTejun Heo  *
32196f1d1e3aSTejun Heo  *	RETURNS:
32206f1d1e3aSTejun Heo  *	0 on success, negative errno otherwise
32216f1d1e3aSTejun Heo  */
32226f1d1e3aSTejun Heo int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
32236f1d1e3aSTejun Heo {
32246f1d1e3aSTejun Heo 	struct ata_port *ap = link->ap;
322500115e0fSTejun Heo 	struct ata_device *dev;
322600115e0fSTejun Heo 	int rc;
32276f1d1e3aSTejun Heo 
322876326ac1STejun Heo 	/* if data transfer is verified, clear DUBIOUS_XFER on ering top */
32291eca4365STejun Heo 	ata_for_each_dev(dev, link, ENABLED) {
323076326ac1STejun Heo 		if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) {
323176326ac1STejun Heo 			struct ata_ering_entry *ent;
323276326ac1STejun Heo 
323376326ac1STejun Heo 			ent = ata_ering_top(&dev->ering);
323476326ac1STejun Heo 			if (ent)
323576326ac1STejun Heo 				ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER;
323676326ac1STejun Heo 		}
323776326ac1STejun Heo 	}
323876326ac1STejun Heo 
32396f1d1e3aSTejun Heo 	/* has private set_mode? */
32406f1d1e3aSTejun Heo 	if (ap->ops->set_mode)
324100115e0fSTejun Heo 		rc = ap->ops->set_mode(link, r_failed_dev);
324200115e0fSTejun Heo 	else
324300115e0fSTejun Heo 		rc = ata_do_set_mode(link, r_failed_dev);
324400115e0fSTejun Heo 
324500115e0fSTejun Heo 	/* if transfer mode has changed, set DUBIOUS_XFER on device */
32461eca4365STejun Heo 	ata_for_each_dev(dev, link, ENABLED) {
324700115e0fSTejun Heo 		struct ata_eh_context *ehc = &link->eh_context;
324800115e0fSTejun Heo 		u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
324900115e0fSTejun Heo 		u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
325000115e0fSTejun Heo 
325100115e0fSTejun Heo 		if (dev->xfer_mode != saved_xfer_mode ||
325200115e0fSTejun Heo 		    ata_ncq_enabled(dev) != saved_ncq)
325300115e0fSTejun Heo 			dev->flags |= ATA_DFLAG_DUBIOUS_XFER;
325400115e0fSTejun Heo 	}
325500115e0fSTejun Heo 
325600115e0fSTejun Heo 	return rc;
32576f1d1e3aSTejun Heo }
32586f1d1e3aSTejun Heo 
325911fc33daSTejun Heo /**
326011fc33daSTejun Heo  *	atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
326111fc33daSTejun Heo  *	@dev: ATAPI device to clear UA for
326211fc33daSTejun Heo  *
326311fc33daSTejun Heo  *	Resets and other operations can make an ATAPI device raise
326411fc33daSTejun Heo  *	UNIT ATTENTION which causes the next operation to fail.  This
326511fc33daSTejun Heo  *	function clears UA.
326611fc33daSTejun Heo  *
326711fc33daSTejun Heo  *	LOCKING:
326811fc33daSTejun Heo  *	EH context (may sleep).
326911fc33daSTejun Heo  *
327011fc33daSTejun Heo  *	RETURNS:
327111fc33daSTejun Heo  *	0 on success, -errno on failure.
327211fc33daSTejun Heo  */
327311fc33daSTejun Heo static int atapi_eh_clear_ua(struct ata_device *dev)
327411fc33daSTejun Heo {
327511fc33daSTejun Heo 	int i;
327611fc33daSTejun Heo 
327711fc33daSTejun Heo 	for (i = 0; i < ATA_EH_UA_TRIES; i++) {
3278b5357081STejun Heo 		u8 *sense_buffer = dev->link->ap->sector_buf;
327911fc33daSTejun Heo 		u8 sense_key = 0;
328011fc33daSTejun Heo 		unsigned int err_mask;
328111fc33daSTejun Heo 
328211fc33daSTejun Heo 		err_mask = atapi_eh_tur(dev, &sense_key);
328311fc33daSTejun Heo 		if (err_mask != 0 && err_mask != AC_ERR_DEV) {
3284a9a79dfeSJoe Perches 			ata_dev_warn(dev,
3285a9a79dfeSJoe Perches 				     "TEST_UNIT_READY failed (err_mask=0x%x)\n",
3286a9a79dfeSJoe Perches 				     err_mask);
328711fc33daSTejun Heo 			return -EIO;
328811fc33daSTejun Heo 		}
328911fc33daSTejun Heo 
329011fc33daSTejun Heo 		if (!err_mask || sense_key != UNIT_ATTENTION)
329111fc33daSTejun Heo 			return 0;
329211fc33daSTejun Heo 
329311fc33daSTejun Heo 		err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key);
329411fc33daSTejun Heo 		if (err_mask) {
3295a9a79dfeSJoe Perches 			ata_dev_warn(dev, "failed to clear "
329611fc33daSTejun Heo 				"UNIT ATTENTION (err_mask=0x%x)\n", err_mask);
329711fc33daSTejun Heo 			return -EIO;
329811fc33daSTejun Heo 		}
329911fc33daSTejun Heo 	}
330011fc33daSTejun Heo 
3301a9a79dfeSJoe Perches 	ata_dev_warn(dev, "UNIT ATTENTION persists after %d tries\n",
3302a9a79dfeSJoe Perches 		     ATA_EH_UA_TRIES);
330311fc33daSTejun Heo 
330411fc33daSTejun Heo 	return 0;
330511fc33daSTejun Heo }
330611fc33daSTejun Heo 
33076013efd8STejun Heo /**
33086013efd8STejun Heo  *	ata_eh_maybe_retry_flush - Retry FLUSH if necessary
33096013efd8STejun Heo  *	@dev: ATA device which may need FLUSH retry
33106013efd8STejun Heo  *
33116013efd8STejun Heo  *	If @dev failed FLUSH, it needs to be reported upper layer
33126013efd8STejun Heo  *	immediately as it means that @dev failed to remap and already
33136013efd8STejun Heo  *	lost at least a sector and further FLUSH retrials won't make
33146013efd8STejun Heo  *	any difference to the lost sector.  However, if FLUSH failed
33156013efd8STejun Heo  *	for other reasons, for example transmission error, FLUSH needs
33166013efd8STejun Heo  *	to be retried.
33176013efd8STejun Heo  *
33186013efd8STejun Heo  *	This function determines whether FLUSH failure retry is
33196013efd8STejun Heo  *	necessary and performs it if so.
33206013efd8STejun Heo  *
33216013efd8STejun Heo  *	RETURNS:
33226013efd8STejun Heo  *	0 if EH can continue, -errno if EH needs to be repeated.
33236013efd8STejun Heo  */
33246013efd8STejun Heo static int ata_eh_maybe_retry_flush(struct ata_device *dev)
33256013efd8STejun Heo {
33266013efd8STejun Heo 	struct ata_link *link = dev->link;
33276013efd8STejun Heo 	struct ata_port *ap = link->ap;
33286013efd8STejun Heo 	struct ata_queued_cmd *qc;
33296013efd8STejun Heo 	struct ata_taskfile tf;
33306013efd8STejun Heo 	unsigned int err_mask;
33316013efd8STejun Heo 	int rc = 0;
33326013efd8STejun Heo 
33336013efd8STejun Heo 	/* did flush fail for this device? */
33346013efd8STejun Heo 	if (!ata_tag_valid(link->active_tag))
33356013efd8STejun Heo 		return 0;
33366013efd8STejun Heo 
33376013efd8STejun Heo 	qc = __ata_qc_from_tag(ap, link->active_tag);
33386013efd8STejun Heo 	if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT &&
33396013efd8STejun Heo 			       qc->tf.command != ATA_CMD_FLUSH))
33406013efd8STejun Heo 		return 0;
33416013efd8STejun Heo 
33426013efd8STejun Heo 	/* if the device failed it, it should be reported to upper layers */
33436013efd8STejun Heo 	if (qc->err_mask & AC_ERR_DEV)
33446013efd8STejun Heo 		return 0;
33456013efd8STejun Heo 
33466013efd8STejun Heo 	/* flush failed for some other reason, give it another shot */
33476013efd8STejun Heo 	ata_tf_init(dev, &tf);
33486013efd8STejun Heo 
33496013efd8STejun Heo 	tf.command = qc->tf.command;
33506013efd8STejun Heo 	tf.flags |= ATA_TFLAG_DEVICE;
33516013efd8STejun Heo 	tf.protocol = ATA_PROT_NODATA;
33526013efd8STejun Heo 
3353a9a79dfeSJoe Perches 	ata_dev_warn(dev, "retrying FLUSH 0x%x Emask 0x%x\n",
33546013efd8STejun Heo 		       tf.command, qc->err_mask);
33556013efd8STejun Heo 
33566013efd8STejun Heo 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
33576013efd8STejun Heo 	if (!err_mask) {
33586013efd8STejun Heo 		/*
33596013efd8STejun Heo 		 * FLUSH is complete but there's no way to
33606013efd8STejun Heo 		 * successfully complete a failed command from EH.
33616013efd8STejun Heo 		 * Making sure retry is allowed at least once and
33626013efd8STejun Heo 		 * retrying it should do the trick - whatever was in
33636013efd8STejun Heo 		 * the cache is already on the platter and this won't
33646013efd8STejun Heo 		 * cause infinite loop.
33656013efd8STejun Heo 		 */
33666013efd8STejun Heo 		qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1);
33676013efd8STejun Heo 	} else {
3368a9a79dfeSJoe Perches 		ata_dev_warn(dev, "FLUSH failed Emask 0x%x\n",
33696013efd8STejun Heo 			       err_mask);
33706013efd8STejun Heo 		rc = -EIO;
33716013efd8STejun Heo 
33726013efd8STejun Heo 		/* if device failed it, report it to upper layers */
33736013efd8STejun Heo 		if (err_mask & AC_ERR_DEV) {
33746013efd8STejun Heo 			qc->err_mask |= AC_ERR_DEV;
33756013efd8STejun Heo 			qc->result_tf = tf;
33766013efd8STejun Heo 			if (!(ap->pflags & ATA_PFLAG_FROZEN))
33776013efd8STejun Heo 				rc = 0;
33786013efd8STejun Heo 		}
33796013efd8STejun Heo 	}
33806013efd8STejun Heo 	return rc;
33816013efd8STejun Heo }
33826013efd8STejun Heo 
33836b7ae954STejun Heo /**
33846b7ae954STejun Heo  *	ata_eh_set_lpm - configure SATA interface power management
33856b7ae954STejun Heo  *	@link: link to configure power management
33866b7ae954STejun Heo  *	@policy: the link power management policy
33876b7ae954STejun Heo  *	@r_failed_dev: out parameter for failed device
33886b7ae954STejun Heo  *
33896b7ae954STejun Heo  *	Enable SATA Interface power management.  This will enable
33906b7ae954STejun Heo  *	Device Interface Power Management (DIPM) for min_power
33916b7ae954STejun Heo  * 	policy, and then call driver specific callbacks for
33926b7ae954STejun Heo  *	enabling Host Initiated Power management.
33936b7ae954STejun Heo  *
33946b7ae954STejun Heo  *	LOCKING:
33956b7ae954STejun Heo  *	EH context.
33966b7ae954STejun Heo  *
33976b7ae954STejun Heo  *	RETURNS:
33986b7ae954STejun Heo  *	0 on success, -errno on failure.
33996b7ae954STejun Heo  */
34006b7ae954STejun Heo static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
34016b7ae954STejun Heo 			  struct ata_device **r_failed_dev)
34026b7ae954STejun Heo {
34036c8ea89cSTejun Heo 	struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL;
34046b7ae954STejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
34056b7ae954STejun Heo 	struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
3406e5005b15STejun Heo 	enum ata_lpm_policy old_policy = link->lpm_policy;
34075f6f12ccSTejun Heo 	bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM;
34086b7ae954STejun Heo 	unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
34096b7ae954STejun Heo 	unsigned int err_mask;
34106b7ae954STejun Heo 	int rc;
34116b7ae954STejun Heo 
34126b7ae954STejun Heo 	/* if the link or host doesn't do LPM, noop */
34136b7ae954STejun Heo 	if ((link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm))
34146b7ae954STejun Heo 		return 0;
34156b7ae954STejun Heo 
34166b7ae954STejun Heo 	/*
34176b7ae954STejun Heo 	 * DIPM is enabled only for MIN_POWER as some devices
34186b7ae954STejun Heo 	 * misbehave when the host NACKs transition to SLUMBER.  Order
34196b7ae954STejun Heo 	 * device and link configurations such that the host always
34206b7ae954STejun Heo 	 * allows DIPM requests.
34216b7ae954STejun Heo 	 */
34226b7ae954STejun Heo 	ata_for_each_dev(dev, link, ENABLED) {
34236b7ae954STejun Heo 		bool hipm = ata_id_has_hipm(dev->id);
3424ae01b249STejun Heo 		bool dipm = ata_id_has_dipm(dev->id) && !no_dipm;
34256b7ae954STejun Heo 
34266b7ae954STejun Heo 		/* find the first enabled and LPM enabled devices */
34276b7ae954STejun Heo 		if (!link_dev)
34286b7ae954STejun Heo 			link_dev = dev;
34296b7ae954STejun Heo 
34306b7ae954STejun Heo 		if (!lpm_dev && (hipm || dipm))
34316b7ae954STejun Heo 			lpm_dev = dev;
34326b7ae954STejun Heo 
34336b7ae954STejun Heo 		hints &= ~ATA_LPM_EMPTY;
34346b7ae954STejun Heo 		if (!hipm)
34356b7ae954STejun Heo 			hints &= ~ATA_LPM_HIPM;
34366b7ae954STejun Heo 
34376b7ae954STejun Heo 		/* disable DIPM before changing link config */
34386b7ae954STejun Heo 		if (policy != ATA_LPM_MIN_POWER && dipm) {
34396b7ae954STejun Heo 			err_mask = ata_dev_set_feature(dev,
34406b7ae954STejun Heo 					SETFEATURES_SATA_DISABLE, SATA_DIPM);
34416b7ae954STejun Heo 			if (err_mask && err_mask != AC_ERR_DEV) {
3442a9a79dfeSJoe Perches 				ata_dev_warn(dev,
34436b7ae954STejun Heo 					     "failed to disable DIPM, Emask 0x%x\n",
34446b7ae954STejun Heo 					     err_mask);
34456b7ae954STejun Heo 				rc = -EIO;
34466b7ae954STejun Heo 				goto fail;
34476b7ae954STejun Heo 			}
34486b7ae954STejun Heo 		}
34496b7ae954STejun Heo 	}
34506b7ae954STejun Heo 
34516c8ea89cSTejun Heo 	if (ap) {
34526b7ae954STejun Heo 		rc = ap->ops->set_lpm(link, policy, hints);
34536b7ae954STejun Heo 		if (!rc && ap->slave_link)
34546b7ae954STejun Heo 			rc = ap->ops->set_lpm(ap->slave_link, policy, hints);
34556c8ea89cSTejun Heo 	} else
34566c8ea89cSTejun Heo 		rc = sata_pmp_set_lpm(link, policy, hints);
34576b7ae954STejun Heo 
34586b7ae954STejun Heo 	/*
34596b7ae954STejun Heo 	 * Attribute link config failure to the first (LPM) enabled
34606b7ae954STejun Heo 	 * device on the link.
34616b7ae954STejun Heo 	 */
34626b7ae954STejun Heo 	if (rc) {
34636b7ae954STejun Heo 		if (rc == -EOPNOTSUPP) {
34646b7ae954STejun Heo 			link->flags |= ATA_LFLAG_NO_LPM;
34656b7ae954STejun Heo 			return 0;
34666b7ae954STejun Heo 		}
34676b7ae954STejun Heo 		dev = lpm_dev ? lpm_dev : link_dev;
34686b7ae954STejun Heo 		goto fail;
34696b7ae954STejun Heo 	}
34706b7ae954STejun Heo 
3471e5005b15STejun Heo 	/*
3472e5005b15STejun Heo 	 * Low level driver acked the transition.  Issue DIPM command
3473e5005b15STejun Heo 	 * with the new policy set.
3474e5005b15STejun Heo 	 */
3475e5005b15STejun Heo 	link->lpm_policy = policy;
3476e5005b15STejun Heo 	if (ap && ap->slave_link)
3477e5005b15STejun Heo 		ap->slave_link->lpm_policy = policy;
3478e5005b15STejun Heo 
34796b7ae954STejun Heo 	/* host config updated, enable DIPM if transitioning to MIN_POWER */
34806b7ae954STejun Heo 	ata_for_each_dev(dev, link, ENABLED) {
3481ae01b249STejun Heo 		if (policy == ATA_LPM_MIN_POWER && !no_dipm &&
3482ae01b249STejun Heo 		    ata_id_has_dipm(dev->id)) {
34836b7ae954STejun Heo 			err_mask = ata_dev_set_feature(dev,
34846b7ae954STejun Heo 					SETFEATURES_SATA_ENABLE, SATA_DIPM);
34856b7ae954STejun Heo 			if (err_mask && err_mask != AC_ERR_DEV) {
3486a9a79dfeSJoe Perches 				ata_dev_warn(dev,
34876b7ae954STejun Heo 					"failed to enable DIPM, Emask 0x%x\n",
34886b7ae954STejun Heo 					err_mask);
34896b7ae954STejun Heo 				rc = -EIO;
34906b7ae954STejun Heo 				goto fail;
34916b7ae954STejun Heo 			}
34926b7ae954STejun Heo 		}
34936b7ae954STejun Heo 	}
34946b7ae954STejun Heo 
34956b7ae954STejun Heo 	return 0;
34966b7ae954STejun Heo 
34976b7ae954STejun Heo fail:
3498e5005b15STejun Heo 	/* restore the old policy */
3499e5005b15STejun Heo 	link->lpm_policy = old_policy;
3500e5005b15STejun Heo 	if (ap && ap->slave_link)
3501e5005b15STejun Heo 		ap->slave_link->lpm_policy = old_policy;
3502e5005b15STejun Heo 
35036b7ae954STejun Heo 	/* if no device or only one more chance is left, disable LPM */
35046b7ae954STejun Heo 	if (!dev || ehc->tries[dev->devno] <= 2) {
3505a9a79dfeSJoe Perches 		ata_link_warn(link, "disabling LPM on the link\n");
35066b7ae954STejun Heo 		link->flags |= ATA_LFLAG_NO_LPM;
35076b7ae954STejun Heo 	}
35086b7ae954STejun Heo 	if (r_failed_dev)
35096b7ae954STejun Heo 		*r_failed_dev = dev;
35106b7ae954STejun Heo 	return rc;
35116b7ae954STejun Heo }
35126b7ae954STejun Heo 
35138a745f1fSKristen Carlson Accardi int ata_link_nr_enabled(struct ata_link *link)
3514c6fd2807SJeff Garzik {
3515f58229f8STejun Heo 	struct ata_device *dev;
3516f58229f8STejun Heo 	int cnt = 0;
3517c6fd2807SJeff Garzik 
35181eca4365STejun Heo 	ata_for_each_dev(dev, link, ENABLED)
3519c6fd2807SJeff Garzik 		cnt++;
3520c6fd2807SJeff Garzik 	return cnt;
3521c6fd2807SJeff Garzik }
3522c6fd2807SJeff Garzik 
35230260731fSTejun Heo static int ata_link_nr_vacant(struct ata_link *link)
3524c6fd2807SJeff Garzik {
3525f58229f8STejun Heo 	struct ata_device *dev;
3526f58229f8STejun Heo 	int cnt = 0;
3527c6fd2807SJeff Garzik 
35281eca4365STejun Heo 	ata_for_each_dev(dev, link, ALL)
3529f58229f8STejun Heo 		if (dev->class == ATA_DEV_UNKNOWN)
3530c6fd2807SJeff Garzik 			cnt++;
3531c6fd2807SJeff Garzik 	return cnt;
3532c6fd2807SJeff Garzik }
3533c6fd2807SJeff Garzik 
35340260731fSTejun Heo static int ata_eh_skip_recovery(struct ata_link *link)
3535c6fd2807SJeff Garzik {
3536672b2d65STejun Heo 	struct ata_port *ap = link->ap;
35370260731fSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
3538f58229f8STejun Heo 	struct ata_device *dev;
3539c6fd2807SJeff Garzik 
3540f9df58cbSTejun Heo 	/* skip disabled links */
3541f9df58cbSTejun Heo 	if (link->flags & ATA_LFLAG_DISABLED)
3542f9df58cbSTejun Heo 		return 1;
3543f9df58cbSTejun Heo 
3544e2f3d75fSTejun Heo 	/* skip if explicitly requested */
3545e2f3d75fSTejun Heo 	if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
3546e2f3d75fSTejun Heo 		return 1;
3547e2f3d75fSTejun Heo 
3548672b2d65STejun Heo 	/* thaw frozen port and recover failed devices */
3549672b2d65STejun Heo 	if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
3550672b2d65STejun Heo 		return 0;
3551672b2d65STejun Heo 
3552672b2d65STejun Heo 	/* reset at least once if reset is requested */
3553672b2d65STejun Heo 	if ((ehc->i.action & ATA_EH_RESET) &&
3554672b2d65STejun Heo 	    !(ehc->i.flags & ATA_EHI_DID_RESET))
3555c6fd2807SJeff Garzik 		return 0;
3556c6fd2807SJeff Garzik 
3557c6fd2807SJeff Garzik 	/* skip if class codes for all vacant slots are ATA_DEV_NONE */
35581eca4365STejun Heo 	ata_for_each_dev(dev, link, ALL) {
3559c6fd2807SJeff Garzik 		if (dev->class == ATA_DEV_UNKNOWN &&
3560c6fd2807SJeff Garzik 		    ehc->classes[dev->devno] != ATA_DEV_NONE)
3561c6fd2807SJeff Garzik 			return 0;
3562c6fd2807SJeff Garzik 	}
3563c6fd2807SJeff Garzik 
3564c6fd2807SJeff Garzik 	return 1;
3565c6fd2807SJeff Garzik }
3566c6fd2807SJeff Garzik 
3567c2c7a89cSTejun Heo static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg)
3568c2c7a89cSTejun Heo {
3569c2c7a89cSTejun Heo 	u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL);
3570c2c7a89cSTejun Heo 	u64 now = get_jiffies_64();
3571c2c7a89cSTejun Heo 	int *trials = void_arg;
3572c2c7a89cSTejun Heo 
35736868225eSLin Ming 	if ((ent->eflags & ATA_EFLAG_OLD_ER) ||
35746868225eSLin Ming 	    (ent->timestamp < now - min(now, interval)))
3575c2c7a89cSTejun Heo 		return -1;
3576c2c7a89cSTejun Heo 
3577c2c7a89cSTejun Heo 	(*trials)++;
3578c2c7a89cSTejun Heo 	return 0;
3579c2c7a89cSTejun Heo }
3580c2c7a89cSTejun Heo 
358102c05a27STejun Heo static int ata_eh_schedule_probe(struct ata_device *dev)
358202c05a27STejun Heo {
358302c05a27STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
3584c2c7a89cSTejun Heo 	struct ata_link *link = ata_dev_phys_link(dev);
3585c2c7a89cSTejun Heo 	int trials = 0;
358602c05a27STejun Heo 
358702c05a27STejun Heo 	if (!(ehc->i.probe_mask & (1 << dev->devno)) ||
358802c05a27STejun Heo 	    (ehc->did_probe_mask & (1 << dev->devno)))
358902c05a27STejun Heo 		return 0;
359002c05a27STejun Heo 
359102c05a27STejun Heo 	ata_eh_detach_dev(dev);
359202c05a27STejun Heo 	ata_dev_init(dev);
359302c05a27STejun Heo 	ehc->did_probe_mask |= (1 << dev->devno);
3594cf480626STejun Heo 	ehc->i.action |= ATA_EH_RESET;
359500115e0fSTejun Heo 	ehc->saved_xfer_mode[dev->devno] = 0;
359600115e0fSTejun Heo 	ehc->saved_ncq_enabled &= ~(1 << dev->devno);
359702c05a27STejun Heo 
35986b7ae954STejun Heo 	/* the link maybe in a deep sleep, wake it up */
35996c8ea89cSTejun Heo 	if (link->lpm_policy > ATA_LPM_MAX_POWER) {
36006c8ea89cSTejun Heo 		if (ata_is_host_link(link))
36016c8ea89cSTejun Heo 			link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER,
36026c8ea89cSTejun Heo 					       ATA_LPM_EMPTY);
36036c8ea89cSTejun Heo 		else
36046c8ea89cSTejun Heo 			sata_pmp_set_lpm(link, ATA_LPM_MAX_POWER,
36056c8ea89cSTejun Heo 					 ATA_LPM_EMPTY);
36066c8ea89cSTejun Heo 	}
36076b7ae954STejun Heo 
3608c2c7a89cSTejun Heo 	/* Record and count probe trials on the ering.  The specific
3609c2c7a89cSTejun Heo 	 * error mask used is irrelevant.  Because a successful device
3610c2c7a89cSTejun Heo 	 * detection clears the ering, this count accumulates only if
3611c2c7a89cSTejun Heo 	 * there are consecutive failed probes.
3612c2c7a89cSTejun Heo 	 *
3613c2c7a89cSTejun Heo 	 * If the count is equal to or higher than ATA_EH_PROBE_TRIALS
3614c2c7a89cSTejun Heo 	 * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is
3615c2c7a89cSTejun Heo 	 * forced to 1.5Gbps.
3616c2c7a89cSTejun Heo 	 *
3617c2c7a89cSTejun Heo 	 * This is to work around cases where failed link speed
3618c2c7a89cSTejun Heo 	 * negotiation results in device misdetection leading to
3619c2c7a89cSTejun Heo 	 * infinite DEVXCHG or PHRDY CHG events.
3620c2c7a89cSTejun Heo 	 */
3621c2c7a89cSTejun Heo 	ata_ering_record(&dev->ering, 0, AC_ERR_OTHER);
3622c2c7a89cSTejun Heo 	ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials);
3623c2c7a89cSTejun Heo 
3624c2c7a89cSTejun Heo 	if (trials > ATA_EH_PROBE_TRIALS)
3625c2c7a89cSTejun Heo 		sata_down_spd_limit(link, 1);
3626c2c7a89cSTejun Heo 
362702c05a27STejun Heo 	return 1;
362802c05a27STejun Heo }
362902c05a27STejun Heo 
36309b1e2658STejun Heo static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
3631fee7ca72STejun Heo {
36329af5c9c9STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
3633fee7ca72STejun Heo 
3634cf9a590aSTejun Heo 	/* -EAGAIN from EH routine indicates retry without prejudice.
3635cf9a590aSTejun Heo 	 * The requester is responsible for ensuring forward progress.
3636cf9a590aSTejun Heo 	 */
3637cf9a590aSTejun Heo 	if (err != -EAGAIN)
3638fee7ca72STejun Heo 		ehc->tries[dev->devno]--;
3639fee7ca72STejun Heo 
3640fee7ca72STejun Heo 	switch (err) {
3641fee7ca72STejun Heo 	case -ENODEV:
3642fee7ca72STejun Heo 		/* device missing or wrong IDENTIFY data, schedule probing */
3643fee7ca72STejun Heo 		ehc->i.probe_mask |= (1 << dev->devno);
3644fee7ca72STejun Heo 	case -EINVAL:
3645fee7ca72STejun Heo 		/* give it just one more chance */
3646fee7ca72STejun Heo 		ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
3647fee7ca72STejun Heo 	case -EIO:
3648d89293abSTejun Heo 		if (ehc->tries[dev->devno] == 1) {
3649fee7ca72STejun Heo 			/* This is the last chance, better to slow
3650fee7ca72STejun Heo 			 * down than lose it.
3651fee7ca72STejun Heo 			 */
3652a07d499bSTejun Heo 			sata_down_spd_limit(ata_dev_phys_link(dev), 0);
3653d89293abSTejun Heo 			if (dev->pio_mode > XFER_PIO_0)
3654fee7ca72STejun Heo 				ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
3655fee7ca72STejun Heo 		}
3656fee7ca72STejun Heo 	}
3657fee7ca72STejun Heo 
3658fee7ca72STejun Heo 	if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
3659fee7ca72STejun Heo 		/* disable device if it has used up all its chances */
3660fee7ca72STejun Heo 		ata_dev_disable(dev);
3661fee7ca72STejun Heo 
3662fee7ca72STejun Heo 		/* detach if offline */
3663b1c72916STejun Heo 		if (ata_phys_link_offline(ata_dev_phys_link(dev)))
3664fee7ca72STejun Heo 			ata_eh_detach_dev(dev);
3665fee7ca72STejun Heo 
366602c05a27STejun Heo 		/* schedule probe if necessary */
366787fbc5a0STejun Heo 		if (ata_eh_schedule_probe(dev)) {
3668fee7ca72STejun Heo 			ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
366987fbc5a0STejun Heo 			memset(ehc->cmd_timeout_idx[dev->devno], 0,
367087fbc5a0STejun Heo 			       sizeof(ehc->cmd_timeout_idx[dev->devno]));
367187fbc5a0STejun Heo 		}
36729b1e2658STejun Heo 
36739b1e2658STejun Heo 		return 1;
3674fee7ca72STejun Heo 	} else {
3675cf480626STejun Heo 		ehc->i.action |= ATA_EH_RESET;
36769b1e2658STejun Heo 		return 0;
3677fee7ca72STejun Heo 	}
3678fee7ca72STejun Heo }
3679fee7ca72STejun Heo 
3680c6fd2807SJeff Garzik /**
3681c6fd2807SJeff Garzik  *	ata_eh_recover - recover host port after error
3682c6fd2807SJeff Garzik  *	@ap: host port to recover
3683c6fd2807SJeff Garzik  *	@prereset: prereset method (can be NULL)
3684c6fd2807SJeff Garzik  *	@softreset: softreset method (can be NULL)
3685c6fd2807SJeff Garzik  *	@hardreset: hardreset method (can be NULL)
3686c6fd2807SJeff Garzik  *	@postreset: postreset method (can be NULL)
36879b1e2658STejun Heo  *	@r_failed_link: out parameter for failed link
3688c6fd2807SJeff Garzik  *
3689c6fd2807SJeff Garzik  *	This is the alpha and omega, eum and yang, heart and soul of
3690c6fd2807SJeff Garzik  *	libata exception handling.  On entry, actions required to
36919b1e2658STejun Heo  *	recover each link and hotplug requests are recorded in the
36929b1e2658STejun Heo  *	link's eh_context.  This function executes all the operations
36939b1e2658STejun Heo  *	with appropriate retrials and fallbacks to resurrect failed
3694c6fd2807SJeff Garzik  *	devices, detach goners and greet newcomers.
3695c6fd2807SJeff Garzik  *
3696c6fd2807SJeff Garzik  *	LOCKING:
3697c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
3698c6fd2807SJeff Garzik  *
3699c6fd2807SJeff Garzik  *	RETURNS:
3700c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
3701c6fd2807SJeff Garzik  */
3702fb7fd614STejun Heo int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3703c6fd2807SJeff Garzik 		   ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
37049b1e2658STejun Heo 		   ata_postreset_fn_t postreset,
37059b1e2658STejun Heo 		   struct ata_link **r_failed_link)
3706c6fd2807SJeff Garzik {
37079b1e2658STejun Heo 	struct ata_link *link;
3708c6fd2807SJeff Garzik 	struct ata_device *dev;
37096b7ae954STejun Heo 	int rc, nr_fails;
371045fabbb7SElias Oltmanns 	unsigned long flags, deadline;
3711c6fd2807SJeff Garzik 
3712c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
3713c6fd2807SJeff Garzik 
3714c6fd2807SJeff Garzik 	/* prep for recovery */
37151eca4365STejun Heo 	ata_for_each_link(link, ap, EDGE) {
37169b1e2658STejun Heo 		struct ata_eh_context *ehc = &link->eh_context;
37179b1e2658STejun Heo 
3718f9df58cbSTejun Heo 		/* re-enable link? */
3719f9df58cbSTejun Heo 		if (ehc->i.action & ATA_EH_ENABLE_LINK) {
3720f9df58cbSTejun Heo 			ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK);
3721f9df58cbSTejun Heo 			spin_lock_irqsave(ap->lock, flags);
3722f9df58cbSTejun Heo 			link->flags &= ~ATA_LFLAG_DISABLED;
3723f9df58cbSTejun Heo 			spin_unlock_irqrestore(ap->lock, flags);
3724f9df58cbSTejun Heo 			ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK);
3725f9df58cbSTejun Heo 		}
3726f9df58cbSTejun Heo 
37271eca4365STejun Heo 		ata_for_each_dev(dev, link, ALL) {
3728fd995f70STejun Heo 			if (link->flags & ATA_LFLAG_NO_RETRY)
3729fd995f70STejun Heo 				ehc->tries[dev->devno] = 1;
3730fd995f70STejun Heo 			else
3731c6fd2807SJeff Garzik 				ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3732c6fd2807SJeff Garzik 
373379a55b72STejun Heo 			/* collect port action mask recorded in dev actions */
37349b1e2658STejun Heo 			ehc->i.action |= ehc->i.dev_action[dev->devno] &
37359b1e2658STejun Heo 					 ~ATA_EH_PERDEV_MASK;
3736f58229f8STejun Heo 			ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK;
373779a55b72STejun Heo 
3738c6fd2807SJeff Garzik 			/* process hotplug request */
3739c6fd2807SJeff Garzik 			if (dev->flags & ATA_DFLAG_DETACH)
3740c6fd2807SJeff Garzik 				ata_eh_detach_dev(dev);
3741c6fd2807SJeff Garzik 
374202c05a27STejun Heo 			/* schedule probe if necessary */
374302c05a27STejun Heo 			if (!ata_dev_enabled(dev))
374402c05a27STejun Heo 				ata_eh_schedule_probe(dev);
3745c6fd2807SJeff Garzik 		}
37469b1e2658STejun Heo 	}
3747c6fd2807SJeff Garzik 
3748c6fd2807SJeff Garzik  retry:
3749c6fd2807SJeff Garzik 	rc = 0;
3750c6fd2807SJeff Garzik 
3751c6fd2807SJeff Garzik 	/* if UNLOADING, finish immediately */
3752c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_UNLOADING)
3753c6fd2807SJeff Garzik 		goto out;
3754c6fd2807SJeff Garzik 
37559b1e2658STejun Heo 	/* prep for EH */
37561eca4365STejun Heo 	ata_for_each_link(link, ap, EDGE) {
37579b1e2658STejun Heo 		struct ata_eh_context *ehc = &link->eh_context;
37589b1e2658STejun Heo 
3759c6fd2807SJeff Garzik 		/* skip EH if possible. */
37600260731fSTejun Heo 		if (ata_eh_skip_recovery(link))
3761c6fd2807SJeff Garzik 			ehc->i.action = 0;
3762c6fd2807SJeff Garzik 
37631eca4365STejun Heo 		ata_for_each_dev(dev, link, ALL)
3764f58229f8STejun Heo 			ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
37659b1e2658STejun Heo 	}
3766c6fd2807SJeff Garzik 
3767c6fd2807SJeff Garzik 	/* reset */
37681eca4365STejun Heo 	ata_for_each_link(link, ap, EDGE) {
37699b1e2658STejun Heo 		struct ata_eh_context *ehc = &link->eh_context;
37709b1e2658STejun Heo 
3771cf480626STejun Heo 		if (!(ehc->i.action & ATA_EH_RESET))
37729b1e2658STejun Heo 			continue;
37739b1e2658STejun Heo 
37749b1e2658STejun Heo 		rc = ata_eh_reset(link, ata_link_nr_vacant(link),
3775dc98c32cSTejun Heo 				  prereset, softreset, hardreset, postreset);
3776c6fd2807SJeff Garzik 		if (rc) {
3777a9a79dfeSJoe Perches 			ata_link_err(link, "reset failed, giving up\n");
3778c6fd2807SJeff Garzik 			goto out;
3779c6fd2807SJeff Garzik 		}
37809b1e2658STejun Heo 	}
3781c6fd2807SJeff Garzik 
378245fabbb7SElias Oltmanns 	do {
378345fabbb7SElias Oltmanns 		unsigned long now;
378445fabbb7SElias Oltmanns 
378545fabbb7SElias Oltmanns 		/*
378645fabbb7SElias Oltmanns 		 * clears ATA_EH_PARK in eh_info and resets
378745fabbb7SElias Oltmanns 		 * ap->park_req_pending
378845fabbb7SElias Oltmanns 		 */
378945fabbb7SElias Oltmanns 		ata_eh_pull_park_action(ap);
379045fabbb7SElias Oltmanns 
379145fabbb7SElias Oltmanns 		deadline = jiffies;
37921eca4365STejun Heo 		ata_for_each_link(link, ap, EDGE) {
37931eca4365STejun Heo 			ata_for_each_dev(dev, link, ALL) {
379445fabbb7SElias Oltmanns 				struct ata_eh_context *ehc = &link->eh_context;
379545fabbb7SElias Oltmanns 				unsigned long tmp;
379645fabbb7SElias Oltmanns 
37979162c657SHannes Reinecke 				if (dev->class != ATA_DEV_ATA &&
37989162c657SHannes Reinecke 				    dev->class != ATA_DEV_ZAC)
379945fabbb7SElias Oltmanns 					continue;
380045fabbb7SElias Oltmanns 				if (!(ehc->i.dev_action[dev->devno] &
380145fabbb7SElias Oltmanns 				      ATA_EH_PARK))
380245fabbb7SElias Oltmanns 					continue;
380345fabbb7SElias Oltmanns 				tmp = dev->unpark_deadline;
380445fabbb7SElias Oltmanns 				if (time_before(deadline, tmp))
380545fabbb7SElias Oltmanns 					deadline = tmp;
380645fabbb7SElias Oltmanns 				else if (time_before_eq(tmp, jiffies))
380745fabbb7SElias Oltmanns 					continue;
380845fabbb7SElias Oltmanns 				if (ehc->unloaded_mask & (1 << dev->devno))
380945fabbb7SElias Oltmanns 					continue;
381045fabbb7SElias Oltmanns 
381145fabbb7SElias Oltmanns 				ata_eh_park_issue_cmd(dev, 1);
381245fabbb7SElias Oltmanns 			}
381345fabbb7SElias Oltmanns 		}
381445fabbb7SElias Oltmanns 
381545fabbb7SElias Oltmanns 		now = jiffies;
381645fabbb7SElias Oltmanns 		if (time_before_eq(deadline, now))
381745fabbb7SElias Oltmanns 			break;
381845fabbb7SElias Oltmanns 
3819c0c362b6STejun Heo 		ata_eh_release(ap);
382045fabbb7SElias Oltmanns 		deadline = wait_for_completion_timeout(&ap->park_req_pending,
382145fabbb7SElias Oltmanns 						       deadline - now);
3822c0c362b6STejun Heo 		ata_eh_acquire(ap);
382345fabbb7SElias Oltmanns 	} while (deadline);
38241eca4365STejun Heo 	ata_for_each_link(link, ap, EDGE) {
38251eca4365STejun Heo 		ata_for_each_dev(dev, link, ALL) {
382645fabbb7SElias Oltmanns 			if (!(link->eh_context.unloaded_mask &
382745fabbb7SElias Oltmanns 			      (1 << dev->devno)))
382845fabbb7SElias Oltmanns 				continue;
382945fabbb7SElias Oltmanns 
383045fabbb7SElias Oltmanns 			ata_eh_park_issue_cmd(dev, 0);
383145fabbb7SElias Oltmanns 			ata_eh_done(link, dev, ATA_EH_PARK);
383245fabbb7SElias Oltmanns 		}
383345fabbb7SElias Oltmanns 	}
383445fabbb7SElias Oltmanns 
38359b1e2658STejun Heo 	/* the rest */
38366b7ae954STejun Heo 	nr_fails = 0;
38376b7ae954STejun Heo 	ata_for_each_link(link, ap, PMP_FIRST) {
38389b1e2658STejun Heo 		struct ata_eh_context *ehc = &link->eh_context;
38399b1e2658STejun Heo 
38406b7ae954STejun Heo 		if (sata_pmp_attached(ap) && ata_is_host_link(link))
38416b7ae954STejun Heo 			goto config_lpm;
38426b7ae954STejun Heo 
3843c6fd2807SJeff Garzik 		/* revalidate existing devices and attach new ones */
38440260731fSTejun Heo 		rc = ata_eh_revalidate_and_attach(link, &dev);
3845c6fd2807SJeff Garzik 		if (rc)
38466b7ae954STejun Heo 			goto rest_fail;
3847c6fd2807SJeff Garzik 
3848633273a3STejun Heo 		/* if PMP got attached, return, pmp EH will take care of it */
3849633273a3STejun Heo 		if (link->device->class == ATA_DEV_PMP) {
3850633273a3STejun Heo 			ehc->i.action = 0;
3851633273a3STejun Heo 			return 0;
3852633273a3STejun Heo 		}
3853633273a3STejun Heo 
3854baa1e78aSTejun Heo 		/* configure transfer mode if necessary */
3855baa1e78aSTejun Heo 		if (ehc->i.flags & ATA_EHI_SETMODE) {
38560260731fSTejun Heo 			rc = ata_set_mode(link, &dev);
38574ae72a1eSTejun Heo 			if (rc)
38586b7ae954STejun Heo 				goto rest_fail;
3859baa1e78aSTejun Heo 			ehc->i.flags &= ~ATA_EHI_SETMODE;
3860c6fd2807SJeff Garzik 		}
3861c6fd2807SJeff Garzik 
386211fc33daSTejun Heo 		/* If reset has been issued, clear UA to avoid
386311fc33daSTejun Heo 		 * disrupting the current users of the device.
386411fc33daSTejun Heo 		 */
386511fc33daSTejun Heo 		if (ehc->i.flags & ATA_EHI_DID_RESET) {
38661eca4365STejun Heo 			ata_for_each_dev(dev, link, ALL) {
386711fc33daSTejun Heo 				if (dev->class != ATA_DEV_ATAPI)
386811fc33daSTejun Heo 					continue;
386911fc33daSTejun Heo 				rc = atapi_eh_clear_ua(dev);
387011fc33daSTejun Heo 				if (rc)
38716b7ae954STejun Heo 					goto rest_fail;
387221334205SAaron Lu 				if (zpodd_dev_enabled(dev))
387321334205SAaron Lu 					zpodd_post_poweron(dev);
387411fc33daSTejun Heo 			}
387511fc33daSTejun Heo 		}
387611fc33daSTejun Heo 
38776013efd8STejun Heo 		/* retry flush if necessary */
38786013efd8STejun Heo 		ata_for_each_dev(dev, link, ALL) {
38799162c657SHannes Reinecke 			if (dev->class != ATA_DEV_ATA &&
38809162c657SHannes Reinecke 			    dev->class != ATA_DEV_ZAC)
38816013efd8STejun Heo 				continue;
38826013efd8STejun Heo 			rc = ata_eh_maybe_retry_flush(dev);
38836013efd8STejun Heo 			if (rc)
38846b7ae954STejun Heo 				goto rest_fail;
38856013efd8STejun Heo 		}
38866013efd8STejun Heo 
38876b7ae954STejun Heo 	config_lpm:
388811fc33daSTejun Heo 		/* configure link power saving */
38896b7ae954STejun Heo 		if (link->lpm_policy != ap->target_lpm_policy) {
38906b7ae954STejun Heo 			rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev);
38916b7ae954STejun Heo 			if (rc)
38926b7ae954STejun Heo 				goto rest_fail;
38936b7ae954STejun Heo 		}
3894ca77329fSKristen Carlson Accardi 
38959b1e2658STejun Heo 		/* this link is okay now */
38969b1e2658STejun Heo 		ehc->i.flags = 0;
38979b1e2658STejun Heo 		continue;
3898c6fd2807SJeff Garzik 
38996b7ae954STejun Heo 	rest_fail:
39006b7ae954STejun Heo 		nr_fails++;
39016b7ae954STejun Heo 		if (dev)
39020a2c0f56STejun Heo 			ata_eh_handle_dev_fail(dev, rc);
3903c6fd2807SJeff Garzik 
3904b06ce3e5STejun Heo 		if (ap->pflags & ATA_PFLAG_FROZEN) {
3905b06ce3e5STejun Heo 			/* PMP reset requires working host port.
3906b06ce3e5STejun Heo 			 * Can't retry if it's frozen.
3907b06ce3e5STejun Heo 			 */
3908071f44b1STejun Heo 			if (sata_pmp_attached(ap))
3909b06ce3e5STejun Heo 				goto out;
39109b1e2658STejun Heo 			break;
39119b1e2658STejun Heo 		}
3912b06ce3e5STejun Heo 	}
39139b1e2658STejun Heo 
39146b7ae954STejun Heo 	if (nr_fails)
3915c6fd2807SJeff Garzik 		goto retry;
3916c6fd2807SJeff Garzik 
3917c6fd2807SJeff Garzik  out:
39189b1e2658STejun Heo 	if (rc && r_failed_link)
39199b1e2658STejun Heo 		*r_failed_link = link;
3920c6fd2807SJeff Garzik 
3921c6fd2807SJeff Garzik 	DPRINTK("EXIT, rc=%d\n", rc);
3922c6fd2807SJeff Garzik 	return rc;
3923c6fd2807SJeff Garzik }
3924c6fd2807SJeff Garzik 
3925c6fd2807SJeff Garzik /**
3926c6fd2807SJeff Garzik  *	ata_eh_finish - finish up EH
3927c6fd2807SJeff Garzik  *	@ap: host port to finish EH for
3928c6fd2807SJeff Garzik  *
3929c6fd2807SJeff Garzik  *	Recovery is complete.  Clean up EH states and retry or finish
3930c6fd2807SJeff Garzik  *	failed qcs.
3931c6fd2807SJeff Garzik  *
3932c6fd2807SJeff Garzik  *	LOCKING:
3933c6fd2807SJeff Garzik  *	None.
3934c6fd2807SJeff Garzik  */
3935fb7fd614STejun Heo void ata_eh_finish(struct ata_port *ap)
3936c6fd2807SJeff Garzik {
3937c6fd2807SJeff Garzik 	int tag;
3938c6fd2807SJeff Garzik 
3939c6fd2807SJeff Garzik 	/* retry or finish qcs */
3940c6fd2807SJeff Garzik 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
3941c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
3942c6fd2807SJeff Garzik 
3943c6fd2807SJeff Garzik 		if (!(qc->flags & ATA_QCFLAG_FAILED))
3944c6fd2807SJeff Garzik 			continue;
3945c6fd2807SJeff Garzik 
3946c6fd2807SJeff Garzik 		if (qc->err_mask) {
3947c6fd2807SJeff Garzik 			/* FIXME: Once EH migration is complete,
3948c6fd2807SJeff Garzik 			 * generate sense data in this function,
3949c6fd2807SJeff Garzik 			 * considering both err_mask and tf.
3950c6fd2807SJeff Garzik 			 */
395103faab78STejun Heo 			if (qc->flags & ATA_QCFLAG_RETRY)
3952c6fd2807SJeff Garzik 				ata_eh_qc_retry(qc);
395303faab78STejun Heo 			else
395403faab78STejun Heo 				ata_eh_qc_complete(qc);
3955c6fd2807SJeff Garzik 		} else {
3956c6fd2807SJeff Garzik 			if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
3957c6fd2807SJeff Garzik 				ata_eh_qc_complete(qc);
3958c6fd2807SJeff Garzik 			} else {
3959c6fd2807SJeff Garzik 				/* feed zero TF to sense generation */
3960c6fd2807SJeff Garzik 				memset(&qc->result_tf, 0, sizeof(qc->result_tf));
3961c6fd2807SJeff Garzik 				ata_eh_qc_retry(qc);
3962c6fd2807SJeff Garzik 			}
3963c6fd2807SJeff Garzik 		}
3964c6fd2807SJeff Garzik 	}
3965da917d69STejun Heo 
3966da917d69STejun Heo 	/* make sure nr_active_links is zero after EH */
3967da917d69STejun Heo 	WARN_ON(ap->nr_active_links);
3968da917d69STejun Heo 	ap->nr_active_links = 0;
3969c6fd2807SJeff Garzik }
3970c6fd2807SJeff Garzik 
3971c6fd2807SJeff Garzik /**
3972c6fd2807SJeff Garzik  *	ata_do_eh - do standard error handling
3973c6fd2807SJeff Garzik  *	@ap: host port to handle error for
3974a1efdabaSTejun Heo  *
3975c6fd2807SJeff Garzik  *	@prereset: prereset method (can be NULL)
3976c6fd2807SJeff Garzik  *	@softreset: softreset method (can be NULL)
3977c6fd2807SJeff Garzik  *	@hardreset: hardreset method (can be NULL)
3978c6fd2807SJeff Garzik  *	@postreset: postreset method (can be NULL)
3979c6fd2807SJeff Garzik  *
3980c6fd2807SJeff Garzik  *	Perform standard error handling sequence.
3981c6fd2807SJeff Garzik  *
3982c6fd2807SJeff Garzik  *	LOCKING:
3983c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
3984c6fd2807SJeff Garzik  */
3985c6fd2807SJeff Garzik void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
3986c6fd2807SJeff Garzik 	       ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3987c6fd2807SJeff Garzik 	       ata_postreset_fn_t postreset)
3988c6fd2807SJeff Garzik {
39899b1e2658STejun Heo 	struct ata_device *dev;
39909b1e2658STejun Heo 	int rc;
39919b1e2658STejun Heo 
39929b1e2658STejun Heo 	ata_eh_autopsy(ap);
39939b1e2658STejun Heo 	ata_eh_report(ap);
39949b1e2658STejun Heo 
39959b1e2658STejun Heo 	rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
39969b1e2658STejun Heo 			    NULL);
39979b1e2658STejun Heo 	if (rc) {
39981eca4365STejun Heo 		ata_for_each_dev(dev, &ap->link, ALL)
39999b1e2658STejun Heo 			ata_dev_disable(dev);
40009b1e2658STejun Heo 	}
40019b1e2658STejun Heo 
4002c6fd2807SJeff Garzik 	ata_eh_finish(ap);
4003c6fd2807SJeff Garzik }
4004c6fd2807SJeff Garzik 
4005a1efdabaSTejun Heo /**
4006a1efdabaSTejun Heo  *	ata_std_error_handler - standard error handler
4007a1efdabaSTejun Heo  *	@ap: host port to handle error for
4008a1efdabaSTejun Heo  *
4009a1efdabaSTejun Heo  *	Standard error handler
4010a1efdabaSTejun Heo  *
4011a1efdabaSTejun Heo  *	LOCKING:
4012a1efdabaSTejun Heo  *	Kernel thread context (may sleep).
4013a1efdabaSTejun Heo  */
4014a1efdabaSTejun Heo void ata_std_error_handler(struct ata_port *ap)
4015a1efdabaSTejun Heo {
4016a1efdabaSTejun Heo 	struct ata_port_operations *ops = ap->ops;
4017a1efdabaSTejun Heo 	ata_reset_fn_t hardreset = ops->hardreset;
4018a1efdabaSTejun Heo 
401957c9efdfSTejun Heo 	/* ignore built-in hardreset if SCR access is not available */
4020fe06e5f9STejun Heo 	if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link))
4021a1efdabaSTejun Heo 		hardreset = NULL;
4022a1efdabaSTejun Heo 
4023a1efdabaSTejun Heo 	ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
4024a1efdabaSTejun Heo }
4025a1efdabaSTejun Heo 
40266ffa01d8STejun Heo #ifdef CONFIG_PM
4027c6fd2807SJeff Garzik /**
4028c6fd2807SJeff Garzik  *	ata_eh_handle_port_suspend - perform port suspend operation
4029c6fd2807SJeff Garzik  *	@ap: port to suspend
4030c6fd2807SJeff Garzik  *
4031c6fd2807SJeff Garzik  *	Suspend @ap.
4032c6fd2807SJeff Garzik  *
4033c6fd2807SJeff Garzik  *	LOCKING:
4034c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
4035c6fd2807SJeff Garzik  */
4036c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap)
4037c6fd2807SJeff Garzik {
4038c6fd2807SJeff Garzik 	unsigned long flags;
4039c6fd2807SJeff Garzik 	int rc = 0;
40403dc67440SAaron Lu 	struct ata_device *dev;
4041c6fd2807SJeff Garzik 
4042c6fd2807SJeff Garzik 	/* are we suspending? */
4043c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
4044c6fd2807SJeff Garzik 	if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
4045a7ff60dbSAaron Lu 	    ap->pm_mesg.event & PM_EVENT_RESUME) {
4046c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
4047c6fd2807SJeff Garzik 		return;
4048c6fd2807SJeff Garzik 	}
4049c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
4050c6fd2807SJeff Garzik 
4051c6fd2807SJeff Garzik 	WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
4052c6fd2807SJeff Garzik 
40533dc67440SAaron Lu 	/*
40543dc67440SAaron Lu 	 * If we have a ZPODD attached, check its zero
40553dc67440SAaron Lu 	 * power ready status before the port is frozen.
4056a7ff60dbSAaron Lu 	 * Only needed for runtime suspend.
40573dc67440SAaron Lu 	 */
4058a7ff60dbSAaron Lu 	if (PMSG_IS_AUTO(ap->pm_mesg)) {
40593dc67440SAaron Lu 		ata_for_each_dev(dev, &ap->link, ENABLED) {
40603dc67440SAaron Lu 			if (zpodd_dev_enabled(dev))
40613dc67440SAaron Lu 				zpodd_on_suspend(dev);
40623dc67440SAaron Lu 		}
4063a7ff60dbSAaron Lu 	}
40643dc67440SAaron Lu 
406564578a3dSTejun Heo 	/* tell ACPI we're suspending */
406664578a3dSTejun Heo 	rc = ata_acpi_on_suspend(ap);
406764578a3dSTejun Heo 	if (rc)
406864578a3dSTejun Heo 		goto out;
406964578a3dSTejun Heo 
4070c6fd2807SJeff Garzik 	/* suspend */
4071c6fd2807SJeff Garzik 	ata_eh_freeze_port(ap);
4072c6fd2807SJeff Garzik 
4073c6fd2807SJeff Garzik 	if (ap->ops->port_suspend)
4074c6fd2807SJeff Garzik 		rc = ap->ops->port_suspend(ap, ap->pm_mesg);
4075c6fd2807SJeff Garzik 
4076a7ff60dbSAaron Lu 	ata_acpi_set_state(ap, ap->pm_mesg);
407764578a3dSTejun Heo  out:
4078bc6e7c4bSDan Williams 	/* update the flags */
4079c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
4080c6fd2807SJeff Garzik 
4081c6fd2807SJeff Garzik 	ap->pflags &= ~ATA_PFLAG_PM_PENDING;
4082c6fd2807SJeff Garzik 	if (rc == 0)
4083c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_SUSPENDED;
408464578a3dSTejun Heo 	else if (ap->pflags & ATA_PFLAG_FROZEN)
4085c6fd2807SJeff Garzik 		ata_port_schedule_eh(ap);
4086c6fd2807SJeff Garzik 
4087c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
4088c6fd2807SJeff Garzik 
4089c6fd2807SJeff Garzik 	return;
4090c6fd2807SJeff Garzik }
4091c6fd2807SJeff Garzik 
4092c6fd2807SJeff Garzik /**
4093c6fd2807SJeff Garzik  *	ata_eh_handle_port_resume - perform port resume operation
4094c6fd2807SJeff Garzik  *	@ap: port to resume
4095c6fd2807SJeff Garzik  *
4096c6fd2807SJeff Garzik  *	Resume @ap.
4097c6fd2807SJeff Garzik  *
4098c6fd2807SJeff Garzik  *	LOCKING:
4099c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
4100c6fd2807SJeff Garzik  */
4101c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap)
4102c6fd2807SJeff Garzik {
41036f9c1ea2STejun Heo 	struct ata_link *link;
41046f9c1ea2STejun Heo 	struct ata_device *dev;
4105c6fd2807SJeff Garzik 	unsigned long flags;
41069666f400STejun Heo 	int rc = 0;
4107c6fd2807SJeff Garzik 
4108c6fd2807SJeff Garzik 	/* are we resuming? */
4109c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
4110c6fd2807SJeff Garzik 	if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
4111a7ff60dbSAaron Lu 	    !(ap->pm_mesg.event & PM_EVENT_RESUME)) {
4112c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
4113c6fd2807SJeff Garzik 		return;
4114c6fd2807SJeff Garzik 	}
4115c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
4116c6fd2807SJeff Garzik 
41179666f400STejun Heo 	WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED));
4118c6fd2807SJeff Garzik 
41196f9c1ea2STejun Heo 	/*
41206f9c1ea2STejun Heo 	 * Error timestamps are in jiffies which doesn't run while
41216f9c1ea2STejun Heo 	 * suspended and PHY events during resume isn't too uncommon.
41226f9c1ea2STejun Heo 	 * When the two are combined, it can lead to unnecessary speed
41236f9c1ea2STejun Heo 	 * downs if the machine is suspended and resumed repeatedly.
41246f9c1ea2STejun Heo 	 * Clear error history.
41256f9c1ea2STejun Heo 	 */
41266f9c1ea2STejun Heo 	ata_for_each_link(link, ap, HOST_FIRST)
41276f9c1ea2STejun Heo 		ata_for_each_dev(dev, link, ALL)
41286f9c1ea2STejun Heo 			ata_ering_clear(&dev->ering);
41296f9c1ea2STejun Heo 
4130a7ff60dbSAaron Lu 	ata_acpi_set_state(ap, ap->pm_mesg);
4131bd3adca5SShaohua Li 
4132c6fd2807SJeff Garzik 	if (ap->ops->port_resume)
4133c6fd2807SJeff Garzik 		rc = ap->ops->port_resume(ap);
4134c6fd2807SJeff Garzik 
41356746544cSTejun Heo 	/* tell ACPI that we're resuming */
41366746544cSTejun Heo 	ata_acpi_on_resume(ap);
41376746544cSTejun Heo 
4138bc6e7c4bSDan Williams 	/* update the flags */
4139c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
4140c6fd2807SJeff Garzik 	ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
4141c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
4142c6fd2807SJeff Garzik }
41436ffa01d8STejun Heo #endif /* CONFIG_PM */
4144