xref: /openbmc/linux/drivers/ata/libata-eh.c (revision 54c38444fad6a99b4b19512f8f0055d69115e69e)
1c6fd2807SJeff Garzik /*
2c6fd2807SJeff Garzik  *  libata-eh.c - libata error handling
3c6fd2807SJeff Garzik  *
4c6fd2807SJeff Garzik  *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
5c6fd2807SJeff Garzik  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6c6fd2807SJeff Garzik  *		    on emails.
7c6fd2807SJeff Garzik  *
8c6fd2807SJeff Garzik  *  Copyright 2006 Tejun Heo <htejun@gmail.com>
9c6fd2807SJeff Garzik  *
10c6fd2807SJeff Garzik  *
11c6fd2807SJeff Garzik  *  This program is free software; you can redistribute it and/or
12c6fd2807SJeff Garzik  *  modify it under the terms of the GNU General Public License as
13c6fd2807SJeff Garzik  *  published by the Free Software Foundation; either version 2, or
14c6fd2807SJeff Garzik  *  (at your option) any later version.
15c6fd2807SJeff Garzik  *
16c6fd2807SJeff Garzik  *  This program is distributed in the hope that it will be useful,
17c6fd2807SJeff Garzik  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
18c6fd2807SJeff Garzik  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19c6fd2807SJeff Garzik  *  General Public License for more details.
20c6fd2807SJeff Garzik  *
21c6fd2807SJeff Garzik  *  You should have received a copy of the GNU General Public License
22c6fd2807SJeff Garzik  *  along with this program; see the file COPYING.  If not, write to
23c6fd2807SJeff Garzik  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
24c6fd2807SJeff Garzik  *  USA.
25c6fd2807SJeff Garzik  *
26c6fd2807SJeff Garzik  *
27c6fd2807SJeff Garzik  *  libata documentation is available via 'make {ps|pdf}docs',
28c6fd2807SJeff Garzik  *  as Documentation/DocBook/libata.*
29c6fd2807SJeff Garzik  *
30c6fd2807SJeff Garzik  *  Hardware documentation available from http://www.t13.org/ and
31c6fd2807SJeff Garzik  *  http://www.sata-io.org/
32c6fd2807SJeff Garzik  *
33c6fd2807SJeff Garzik  */
34c6fd2807SJeff Garzik 
35c6fd2807SJeff Garzik #include <linux/kernel.h>
36242f9dcbSJens Axboe #include <linux/blkdev.h>
372855568bSJeff Garzik #include <linux/pci.h>
38c6fd2807SJeff Garzik #include <scsi/scsi.h>
39c6fd2807SJeff Garzik #include <scsi/scsi_host.h>
40c6fd2807SJeff Garzik #include <scsi/scsi_eh.h>
41c6fd2807SJeff Garzik #include <scsi/scsi_device.h>
42c6fd2807SJeff Garzik #include <scsi/scsi_cmnd.h>
43c6fd2807SJeff Garzik #include "../scsi/scsi_transport_api.h"
44c6fd2807SJeff Garzik 
45c6fd2807SJeff Garzik #include <linux/libata.h>
46c6fd2807SJeff Garzik 
47c6fd2807SJeff Garzik #include "libata.h"
48c6fd2807SJeff Garzik 
497d47e8d4STejun Heo enum {
503884f7b0STejun Heo 	/* speed down verdicts */
517d47e8d4STejun Heo 	ATA_EH_SPDN_NCQ_OFF		= (1 << 0),
527d47e8d4STejun Heo 	ATA_EH_SPDN_SPEED_DOWN		= (1 << 1),
537d47e8d4STejun Heo 	ATA_EH_SPDN_FALLBACK_TO_PIO	= (1 << 2),
5476326ac1STejun Heo 	ATA_EH_SPDN_KEEP_ERRORS		= (1 << 3),
553884f7b0STejun Heo 
563884f7b0STejun Heo 	/* error flags */
573884f7b0STejun Heo 	ATA_EFLAG_IS_IO			= (1 << 0),
5876326ac1STejun Heo 	ATA_EFLAG_DUBIOUS_XFER		= (1 << 1),
593884f7b0STejun Heo 
603884f7b0STejun Heo 	/* error categories */
613884f7b0STejun Heo 	ATA_ECAT_NONE			= 0,
623884f7b0STejun Heo 	ATA_ECAT_ATA_BUS		= 1,
633884f7b0STejun Heo 	ATA_ECAT_TOUT_HSM		= 2,
643884f7b0STejun Heo 	ATA_ECAT_UNK_DEV		= 3,
6575f9cafcSTejun Heo 	ATA_ECAT_DUBIOUS_NONE		= 4,
6675f9cafcSTejun Heo 	ATA_ECAT_DUBIOUS_ATA_BUS	= 5,
6775f9cafcSTejun Heo 	ATA_ECAT_DUBIOUS_TOUT_HSM	= 6,
6875f9cafcSTejun Heo 	ATA_ECAT_DUBIOUS_UNK_DEV	= 7,
6975f9cafcSTejun Heo 	ATA_ECAT_NR			= 8,
707d47e8d4STejun Heo 
7187fbc5a0STejun Heo 	ATA_EH_CMD_DFL_TIMEOUT		=  5000,
7287fbc5a0STejun Heo 
730a2c0f56STejun Heo 	/* always put at least this amount of time between resets */
740a2c0f56STejun Heo 	ATA_EH_RESET_COOL_DOWN		=  5000,
750a2c0f56STejun Heo 
76341c2c95STejun Heo 	/* Waiting in ->prereset can never be reliable.  It's
77341c2c95STejun Heo 	 * sometimes nice to wait there but it can't be depended upon;
78341c2c95STejun Heo 	 * otherwise, we wouldn't be resetting.  Just give it enough
79341c2c95STejun Heo 	 * time for most drives to spin up.
8031daabdaSTejun Heo 	 */
81341c2c95STejun Heo 	ATA_EH_PRERESET_TIMEOUT		= 10000,
82341c2c95STejun Heo 	ATA_EH_FASTDRAIN_INTERVAL	=  3000,
8311fc33daSTejun Heo 
8411fc33daSTejun Heo 	ATA_EH_UA_TRIES			= 5,
85c2c7a89cSTejun Heo 
86c2c7a89cSTejun Heo 	/* probe speed down parameters, see ata_eh_schedule_probe() */
87c2c7a89cSTejun Heo 	ATA_EH_PROBE_TRIAL_INTERVAL	= 60000,	/* 1 min */
88c2c7a89cSTejun Heo 	ATA_EH_PROBE_TRIALS		= 2,
8931daabdaSTejun Heo };
9031daabdaSTejun Heo 
9131daabdaSTejun Heo /* The following table determines how we sequence resets.  Each entry
9231daabdaSTejun Heo  * represents timeout for that try.  The first try can be soft or
9331daabdaSTejun Heo  * hardreset.  All others are hardreset if available.  In most cases
9431daabdaSTejun Heo  * the first reset w/ 10sec timeout should succeed.  Following entries
9531daabdaSTejun Heo  * are mostly for error handling, hotplug and retarded devices.
9631daabdaSTejun Heo  */
9731daabdaSTejun Heo static const unsigned long ata_eh_reset_timeouts[] = {
98341c2c95STejun Heo 	10000,	/* most drives spin up by 10sec */
99341c2c95STejun Heo 	10000,	/* > 99% working drives spin up before 20sec */
100341c2c95STejun Heo 	35000,	/* give > 30 secs of idleness for retarded devices */
101341c2c95STejun Heo 	 5000,	/* and sweet one last chance */
102d8af0eb6STejun Heo 	ULONG_MAX, /* > 1 min has elapsed, give up */
10331daabdaSTejun Heo };
10431daabdaSTejun Heo 
10587fbc5a0STejun Heo static const unsigned long ata_eh_identify_timeouts[] = {
10687fbc5a0STejun Heo 	 5000,	/* covers > 99% of successes and not too boring on failures */
10787fbc5a0STejun Heo 	10000,  /* combined time till here is enough even for media access */
10887fbc5a0STejun Heo 	30000,	/* for true idiots */
10987fbc5a0STejun Heo 	ULONG_MAX,
11087fbc5a0STejun Heo };
11187fbc5a0STejun Heo 
11287fbc5a0STejun Heo static const unsigned long ata_eh_other_timeouts[] = {
11387fbc5a0STejun Heo 	 5000,	/* same rationale as identify timeout */
11487fbc5a0STejun Heo 	10000,	/* ditto */
11587fbc5a0STejun Heo 	/* but no merciful 30sec for other commands, it just isn't worth it */
11687fbc5a0STejun Heo 	ULONG_MAX,
11787fbc5a0STejun Heo };
11887fbc5a0STejun Heo 
11987fbc5a0STejun Heo struct ata_eh_cmd_timeout_ent {
12087fbc5a0STejun Heo 	const u8		*commands;
12187fbc5a0STejun Heo 	const unsigned long	*timeouts;
12287fbc5a0STejun Heo };
12387fbc5a0STejun Heo 
12487fbc5a0STejun Heo /* The following table determines timeouts to use for EH internal
12587fbc5a0STejun Heo  * commands.  Each table entry is a command class and matches the
12687fbc5a0STejun Heo  * commands the entry applies to and the timeout table to use.
12787fbc5a0STejun Heo  *
12887fbc5a0STejun Heo  * On the retry after a command timed out, the next timeout value from
12987fbc5a0STejun Heo  * the table is used.  If the table doesn't contain further entries,
13087fbc5a0STejun Heo  * the last value is used.
13187fbc5a0STejun Heo  *
13287fbc5a0STejun Heo  * ehc->cmd_timeout_idx keeps track of which timeout to use per
13387fbc5a0STejun Heo  * command class, so if SET_FEATURES times out on the first try, the
13487fbc5a0STejun Heo  * next try will use the second timeout value only for that class.
13587fbc5a0STejun Heo  */
13687fbc5a0STejun Heo #define CMDS(cmds...)	(const u8 []){ cmds, 0 }
13787fbc5a0STejun Heo static const struct ata_eh_cmd_timeout_ent
13887fbc5a0STejun Heo ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
13987fbc5a0STejun Heo 	{ .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI),
14087fbc5a0STejun Heo 	  .timeouts = ata_eh_identify_timeouts, },
14187fbc5a0STejun Heo 	{ .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT),
14287fbc5a0STejun Heo 	  .timeouts = ata_eh_other_timeouts, },
14387fbc5a0STejun Heo 	{ .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT),
14487fbc5a0STejun Heo 	  .timeouts = ata_eh_other_timeouts, },
14587fbc5a0STejun Heo 	{ .commands = CMDS(ATA_CMD_SET_FEATURES),
14687fbc5a0STejun Heo 	  .timeouts = ata_eh_other_timeouts, },
14787fbc5a0STejun Heo 	{ .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS),
14887fbc5a0STejun Heo 	  .timeouts = ata_eh_other_timeouts, },
14987fbc5a0STejun Heo };
15087fbc5a0STejun Heo #undef CMDS
15187fbc5a0STejun Heo 
152c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap);
1536ffa01d8STejun Heo #ifdef CONFIG_PM
154c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap);
155c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap);
1566ffa01d8STejun Heo #else /* CONFIG_PM */
1576ffa01d8STejun Heo static void ata_eh_handle_port_suspend(struct ata_port *ap)
1586ffa01d8STejun Heo { }
1596ffa01d8STejun Heo 
1606ffa01d8STejun Heo static void ata_eh_handle_port_resume(struct ata_port *ap)
1616ffa01d8STejun Heo { }
1626ffa01d8STejun Heo #endif /* CONFIG_PM */
163c6fd2807SJeff Garzik 
164b64bbc39STejun Heo static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt,
165b64bbc39STejun Heo 				 va_list args)
166b64bbc39STejun Heo {
167b64bbc39STejun Heo 	ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
168b64bbc39STejun Heo 				     ATA_EH_DESC_LEN - ehi->desc_len,
169b64bbc39STejun Heo 				     fmt, args);
170b64bbc39STejun Heo }
171b64bbc39STejun Heo 
172b64bbc39STejun Heo /**
173b64bbc39STejun Heo  *	__ata_ehi_push_desc - push error description without adding separator
174b64bbc39STejun Heo  *	@ehi: target EHI
175b64bbc39STejun Heo  *	@fmt: printf format string
176b64bbc39STejun Heo  *
177b64bbc39STejun Heo  *	Format string according to @fmt and append it to @ehi->desc.
178b64bbc39STejun Heo  *
179b64bbc39STejun Heo  *	LOCKING:
180b64bbc39STejun Heo  *	spin_lock_irqsave(host lock)
181b64bbc39STejun Heo  */
182b64bbc39STejun Heo void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
183b64bbc39STejun Heo {
184b64bbc39STejun Heo 	va_list args;
185b64bbc39STejun Heo 
186b64bbc39STejun Heo 	va_start(args, fmt);
187b64bbc39STejun Heo 	__ata_ehi_pushv_desc(ehi, fmt, args);
188b64bbc39STejun Heo 	va_end(args);
189b64bbc39STejun Heo }
190b64bbc39STejun Heo 
191b64bbc39STejun Heo /**
192b64bbc39STejun Heo  *	ata_ehi_push_desc - push error description with separator
193b64bbc39STejun Heo  *	@ehi: target EHI
194b64bbc39STejun Heo  *	@fmt: printf format string
195b64bbc39STejun Heo  *
196b64bbc39STejun Heo  *	Format string according to @fmt and append it to @ehi->desc.
197b64bbc39STejun Heo  *	If @ehi->desc is not empty, ", " is added in-between.
198b64bbc39STejun Heo  *
199b64bbc39STejun Heo  *	LOCKING:
200b64bbc39STejun Heo  *	spin_lock_irqsave(host lock)
201b64bbc39STejun Heo  */
202b64bbc39STejun Heo void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
203b64bbc39STejun Heo {
204b64bbc39STejun Heo 	va_list args;
205b64bbc39STejun Heo 
206b64bbc39STejun Heo 	if (ehi->desc_len)
207b64bbc39STejun Heo 		__ata_ehi_push_desc(ehi, ", ");
208b64bbc39STejun Heo 
209b64bbc39STejun Heo 	va_start(args, fmt);
210b64bbc39STejun Heo 	__ata_ehi_pushv_desc(ehi, fmt, args);
211b64bbc39STejun Heo 	va_end(args);
212b64bbc39STejun Heo }
213b64bbc39STejun Heo 
214b64bbc39STejun Heo /**
215b64bbc39STejun Heo  *	ata_ehi_clear_desc - clean error description
216b64bbc39STejun Heo  *	@ehi: target EHI
217b64bbc39STejun Heo  *
218b64bbc39STejun Heo  *	Clear @ehi->desc.
219b64bbc39STejun Heo  *
220b64bbc39STejun Heo  *	LOCKING:
221b64bbc39STejun Heo  *	spin_lock_irqsave(host lock)
222b64bbc39STejun Heo  */
223b64bbc39STejun Heo void ata_ehi_clear_desc(struct ata_eh_info *ehi)
224b64bbc39STejun Heo {
225b64bbc39STejun Heo 	ehi->desc[0] = '\0';
226b64bbc39STejun Heo 	ehi->desc_len = 0;
227b64bbc39STejun Heo }
228b64bbc39STejun Heo 
229cbcdd875STejun Heo /**
230cbcdd875STejun Heo  *	ata_port_desc - append port description
231cbcdd875STejun Heo  *	@ap: target ATA port
232cbcdd875STejun Heo  *	@fmt: printf format string
233cbcdd875STejun Heo  *
234cbcdd875STejun Heo  *	Format string according to @fmt and append it to port
235cbcdd875STejun Heo  *	description.  If port description is not empty, " " is added
236cbcdd875STejun Heo  *	in-between.  This function is to be used while initializing
237cbcdd875STejun Heo  *	ata_host.  The description is printed on host registration.
238cbcdd875STejun Heo  *
239cbcdd875STejun Heo  *	LOCKING:
240cbcdd875STejun Heo  *	None.
241cbcdd875STejun Heo  */
242cbcdd875STejun Heo void ata_port_desc(struct ata_port *ap, const char *fmt, ...)
243cbcdd875STejun Heo {
244cbcdd875STejun Heo 	va_list args;
245cbcdd875STejun Heo 
246cbcdd875STejun Heo 	WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING));
247cbcdd875STejun Heo 
248cbcdd875STejun Heo 	if (ap->link.eh_info.desc_len)
249cbcdd875STejun Heo 		__ata_ehi_push_desc(&ap->link.eh_info, " ");
250cbcdd875STejun Heo 
251cbcdd875STejun Heo 	va_start(args, fmt);
252cbcdd875STejun Heo 	__ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args);
253cbcdd875STejun Heo 	va_end(args);
254cbcdd875STejun Heo }
255cbcdd875STejun Heo 
256cbcdd875STejun Heo #ifdef CONFIG_PCI
257cbcdd875STejun Heo 
258cbcdd875STejun Heo /**
259cbcdd875STejun Heo  *	ata_port_pbar_desc - append PCI BAR description
260cbcdd875STejun Heo  *	@ap: target ATA port
261cbcdd875STejun Heo  *	@bar: target PCI BAR
262cbcdd875STejun Heo  *	@offset: offset into PCI BAR
263cbcdd875STejun Heo  *	@name: name of the area
264cbcdd875STejun Heo  *
265cbcdd875STejun Heo  *	If @offset is negative, this function formats a string which
266cbcdd875STejun Heo  *	contains the name, address, size and type of the BAR and
267cbcdd875STejun Heo  *	appends it to the port description.  If @offset is zero or
268cbcdd875STejun Heo  *	positive, only name and offsetted address is appended.
269cbcdd875STejun Heo  *
270cbcdd875STejun Heo  *	LOCKING:
271cbcdd875STejun Heo  *	None.
272cbcdd875STejun Heo  */
273cbcdd875STejun Heo void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
274cbcdd875STejun Heo 			const char *name)
275cbcdd875STejun Heo {
276cbcdd875STejun Heo 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
277cbcdd875STejun Heo 	char *type = "";
278cbcdd875STejun Heo 	unsigned long long start, len;
279cbcdd875STejun Heo 
280cbcdd875STejun Heo 	if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
281cbcdd875STejun Heo 		type = "m";
282cbcdd875STejun Heo 	else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
283cbcdd875STejun Heo 		type = "i";
284cbcdd875STejun Heo 
285cbcdd875STejun Heo 	start = (unsigned long long)pci_resource_start(pdev, bar);
286cbcdd875STejun Heo 	len = (unsigned long long)pci_resource_len(pdev, bar);
287cbcdd875STejun Heo 
288cbcdd875STejun Heo 	if (offset < 0)
289cbcdd875STejun Heo 		ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start);
290cbcdd875STejun Heo 	else
291e6a73ab1SAndrew Morton 		ata_port_desc(ap, "%s 0x%llx", name,
292e6a73ab1SAndrew Morton 				start + (unsigned long long)offset);
293cbcdd875STejun Heo }
294cbcdd875STejun Heo 
295cbcdd875STejun Heo #endif /* CONFIG_PCI */
296cbcdd875STejun Heo 
29787fbc5a0STejun Heo static int ata_lookup_timeout_table(u8 cmd)
29887fbc5a0STejun Heo {
29987fbc5a0STejun Heo 	int i;
30087fbc5a0STejun Heo 
30187fbc5a0STejun Heo 	for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) {
30287fbc5a0STejun Heo 		const u8 *cur;
30387fbc5a0STejun Heo 
30487fbc5a0STejun Heo 		for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++)
30587fbc5a0STejun Heo 			if (*cur == cmd)
30687fbc5a0STejun Heo 				return i;
30787fbc5a0STejun Heo 	}
30887fbc5a0STejun Heo 
30987fbc5a0STejun Heo 	return -1;
31087fbc5a0STejun Heo }
31187fbc5a0STejun Heo 
31287fbc5a0STejun Heo /**
31387fbc5a0STejun Heo  *	ata_internal_cmd_timeout - determine timeout for an internal command
31487fbc5a0STejun Heo  *	@dev: target device
31587fbc5a0STejun Heo  *	@cmd: internal command to be issued
31687fbc5a0STejun Heo  *
31787fbc5a0STejun Heo  *	Determine timeout for internal command @cmd for @dev.
31887fbc5a0STejun Heo  *
31987fbc5a0STejun Heo  *	LOCKING:
32087fbc5a0STejun Heo  *	EH context.
32187fbc5a0STejun Heo  *
32287fbc5a0STejun Heo  *	RETURNS:
32387fbc5a0STejun Heo  *	Determined timeout.
32487fbc5a0STejun Heo  */
32587fbc5a0STejun Heo unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd)
32687fbc5a0STejun Heo {
32787fbc5a0STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
32887fbc5a0STejun Heo 	int ent = ata_lookup_timeout_table(cmd);
32987fbc5a0STejun Heo 	int idx;
33087fbc5a0STejun Heo 
33187fbc5a0STejun Heo 	if (ent < 0)
33287fbc5a0STejun Heo 		return ATA_EH_CMD_DFL_TIMEOUT;
33387fbc5a0STejun Heo 
33487fbc5a0STejun Heo 	idx = ehc->cmd_timeout_idx[dev->devno][ent];
33587fbc5a0STejun Heo 	return ata_eh_cmd_timeout_table[ent].timeouts[idx];
33687fbc5a0STejun Heo }
33787fbc5a0STejun Heo 
33887fbc5a0STejun Heo /**
33987fbc5a0STejun Heo  *	ata_internal_cmd_timed_out - notification for internal command timeout
34087fbc5a0STejun Heo  *	@dev: target device
34187fbc5a0STejun Heo  *	@cmd: internal command which timed out
34287fbc5a0STejun Heo  *
34387fbc5a0STejun Heo  *	Notify EH that internal command @cmd for @dev timed out.  This
34487fbc5a0STejun Heo  *	function should be called only for commands whose timeouts are
34587fbc5a0STejun Heo  *	determined using ata_internal_cmd_timeout().
34687fbc5a0STejun Heo  *
34787fbc5a0STejun Heo  *	LOCKING:
34887fbc5a0STejun Heo  *	EH context.
34987fbc5a0STejun Heo  */
35087fbc5a0STejun Heo void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd)
35187fbc5a0STejun Heo {
35287fbc5a0STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
35387fbc5a0STejun Heo 	int ent = ata_lookup_timeout_table(cmd);
35487fbc5a0STejun Heo 	int idx;
35587fbc5a0STejun Heo 
35687fbc5a0STejun Heo 	if (ent < 0)
35787fbc5a0STejun Heo 		return;
35887fbc5a0STejun Heo 
35987fbc5a0STejun Heo 	idx = ehc->cmd_timeout_idx[dev->devno][ent];
36087fbc5a0STejun Heo 	if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX)
36187fbc5a0STejun Heo 		ehc->cmd_timeout_idx[dev->devno][ent]++;
36287fbc5a0STejun Heo }
36387fbc5a0STejun Heo 
3643884f7b0STejun Heo static void ata_ering_record(struct ata_ering *ering, unsigned int eflags,
365c6fd2807SJeff Garzik 			     unsigned int err_mask)
366c6fd2807SJeff Garzik {
367c6fd2807SJeff Garzik 	struct ata_ering_entry *ent;
368c6fd2807SJeff Garzik 
369c6fd2807SJeff Garzik 	WARN_ON(!err_mask);
370c6fd2807SJeff Garzik 
371c6fd2807SJeff Garzik 	ering->cursor++;
372c6fd2807SJeff Garzik 	ering->cursor %= ATA_ERING_SIZE;
373c6fd2807SJeff Garzik 
374c6fd2807SJeff Garzik 	ent = &ering->ring[ering->cursor];
3753884f7b0STejun Heo 	ent->eflags = eflags;
376c6fd2807SJeff Garzik 	ent->err_mask = err_mask;
377c6fd2807SJeff Garzik 	ent->timestamp = get_jiffies_64();
378c6fd2807SJeff Garzik }
379c6fd2807SJeff Garzik 
38076326ac1STejun Heo static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering)
38176326ac1STejun Heo {
38276326ac1STejun Heo 	struct ata_ering_entry *ent = &ering->ring[ering->cursor];
38376326ac1STejun Heo 
38476326ac1STejun Heo 	if (ent->err_mask)
38576326ac1STejun Heo 		return ent;
38676326ac1STejun Heo 	return NULL;
38776326ac1STejun Heo }
38876326ac1STejun Heo 
3897d47e8d4STejun Heo static void ata_ering_clear(struct ata_ering *ering)
390c6fd2807SJeff Garzik {
3917d47e8d4STejun Heo 	memset(ering, 0, sizeof(*ering));
392c6fd2807SJeff Garzik }
393c6fd2807SJeff Garzik 
394c6fd2807SJeff Garzik static int ata_ering_map(struct ata_ering *ering,
395c6fd2807SJeff Garzik 			 int (*map_fn)(struct ata_ering_entry *, void *),
396c6fd2807SJeff Garzik 			 void *arg)
397c6fd2807SJeff Garzik {
398c6fd2807SJeff Garzik 	int idx, rc = 0;
399c6fd2807SJeff Garzik 	struct ata_ering_entry *ent;
400c6fd2807SJeff Garzik 
401c6fd2807SJeff Garzik 	idx = ering->cursor;
402c6fd2807SJeff Garzik 	do {
403c6fd2807SJeff Garzik 		ent = &ering->ring[idx];
404c6fd2807SJeff Garzik 		if (!ent->err_mask)
405c6fd2807SJeff Garzik 			break;
406c6fd2807SJeff Garzik 		rc = map_fn(ent, arg);
407c6fd2807SJeff Garzik 		if (rc)
408c6fd2807SJeff Garzik 			break;
409c6fd2807SJeff Garzik 		idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
410c6fd2807SJeff Garzik 	} while (idx != ering->cursor);
411c6fd2807SJeff Garzik 
412c6fd2807SJeff Garzik 	return rc;
413c6fd2807SJeff Garzik }
414c6fd2807SJeff Garzik 
415c6fd2807SJeff Garzik static unsigned int ata_eh_dev_action(struct ata_device *dev)
416c6fd2807SJeff Garzik {
4179af5c9c9STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
418c6fd2807SJeff Garzik 
419c6fd2807SJeff Garzik 	return ehc->i.action | ehc->i.dev_action[dev->devno];
420c6fd2807SJeff Garzik }
421c6fd2807SJeff Garzik 
422f58229f8STejun Heo static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
423c6fd2807SJeff Garzik 				struct ata_eh_info *ehi, unsigned int action)
424c6fd2807SJeff Garzik {
425f58229f8STejun Heo 	struct ata_device *tdev;
426c6fd2807SJeff Garzik 
427c6fd2807SJeff Garzik 	if (!dev) {
428c6fd2807SJeff Garzik 		ehi->action &= ~action;
4291eca4365STejun Heo 		ata_for_each_dev(tdev, link, ALL)
430f58229f8STejun Heo 			ehi->dev_action[tdev->devno] &= ~action;
431c6fd2807SJeff Garzik 	} else {
432c6fd2807SJeff Garzik 		/* doesn't make sense for port-wide EH actions */
433c6fd2807SJeff Garzik 		WARN_ON(!(action & ATA_EH_PERDEV_MASK));
434c6fd2807SJeff Garzik 
435c6fd2807SJeff Garzik 		/* break ehi->action into ehi->dev_action */
436c6fd2807SJeff Garzik 		if (ehi->action & action) {
4371eca4365STejun Heo 			ata_for_each_dev(tdev, link, ALL)
438f58229f8STejun Heo 				ehi->dev_action[tdev->devno] |=
439f58229f8STejun Heo 					ehi->action & action;
440c6fd2807SJeff Garzik 			ehi->action &= ~action;
441c6fd2807SJeff Garzik 		}
442c6fd2807SJeff Garzik 
443c6fd2807SJeff Garzik 		/* turn off the specified per-dev action */
444c6fd2807SJeff Garzik 		ehi->dev_action[dev->devno] &= ~action;
445c6fd2807SJeff Garzik 	}
446c6fd2807SJeff Garzik }
447c6fd2807SJeff Garzik 
448c6fd2807SJeff Garzik /**
449c6fd2807SJeff Garzik  *	ata_scsi_timed_out - SCSI layer time out callback
450c6fd2807SJeff Garzik  *	@cmd: timed out SCSI command
451c6fd2807SJeff Garzik  *
452c6fd2807SJeff Garzik  *	Handles SCSI layer timeout.  We race with normal completion of
453c6fd2807SJeff Garzik  *	the qc for @cmd.  If the qc is already gone, we lose and let
454c6fd2807SJeff Garzik  *	the scsi command finish (EH_HANDLED).  Otherwise, the qc has
455c6fd2807SJeff Garzik  *	timed out and EH should be invoked.  Prevent ata_qc_complete()
456c6fd2807SJeff Garzik  *	from finishing it by setting EH_SCHEDULED and return
457c6fd2807SJeff Garzik  *	EH_NOT_HANDLED.
458c6fd2807SJeff Garzik  *
459c6fd2807SJeff Garzik  *	TODO: kill this function once old EH is gone.
460c6fd2807SJeff Garzik  *
461c6fd2807SJeff Garzik  *	LOCKING:
462c6fd2807SJeff Garzik  *	Called from timer context
463c6fd2807SJeff Garzik  *
464c6fd2807SJeff Garzik  *	RETURNS:
465c6fd2807SJeff Garzik  *	EH_HANDLED or EH_NOT_HANDLED
466c6fd2807SJeff Garzik  */
467242f9dcbSJens Axboe enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
468c6fd2807SJeff Garzik {
469c6fd2807SJeff Garzik 	struct Scsi_Host *host = cmd->device->host;
470c6fd2807SJeff Garzik 	struct ata_port *ap = ata_shost_to_port(host);
471c6fd2807SJeff Garzik 	unsigned long flags;
472c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc;
473242f9dcbSJens Axboe 	enum blk_eh_timer_return ret;
474c6fd2807SJeff Garzik 
475c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
476c6fd2807SJeff Garzik 
477c6fd2807SJeff Garzik 	if (ap->ops->error_handler) {
478242f9dcbSJens Axboe 		ret = BLK_EH_NOT_HANDLED;
479c6fd2807SJeff Garzik 		goto out;
480c6fd2807SJeff Garzik 	}
481c6fd2807SJeff Garzik 
482242f9dcbSJens Axboe 	ret = BLK_EH_HANDLED;
483c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
4849af5c9c9STejun Heo 	qc = ata_qc_from_tag(ap, ap->link.active_tag);
485c6fd2807SJeff Garzik 	if (qc) {
486c6fd2807SJeff Garzik 		WARN_ON(qc->scsicmd != cmd);
487c6fd2807SJeff Garzik 		qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
488c6fd2807SJeff Garzik 		qc->err_mask |= AC_ERR_TIMEOUT;
489242f9dcbSJens Axboe 		ret = BLK_EH_NOT_HANDLED;
490c6fd2807SJeff Garzik 	}
491c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
492c6fd2807SJeff Garzik 
493c6fd2807SJeff Garzik  out:
494c6fd2807SJeff Garzik 	DPRINTK("EXIT, ret=%d\n", ret);
495c6fd2807SJeff Garzik 	return ret;
496c6fd2807SJeff Garzik }
497c6fd2807SJeff Garzik 
498ece180d1STejun Heo static void ata_eh_unload(struct ata_port *ap)
499ece180d1STejun Heo {
500ece180d1STejun Heo 	struct ata_link *link;
501ece180d1STejun Heo 	struct ata_device *dev;
502ece180d1STejun Heo 	unsigned long flags;
503ece180d1STejun Heo 
504ece180d1STejun Heo 	/* Restore SControl IPM and SPD for the next driver and
505ece180d1STejun Heo 	 * disable attached devices.
506ece180d1STejun Heo 	 */
507ece180d1STejun Heo 	ata_for_each_link(link, ap, PMP_FIRST) {
508ece180d1STejun Heo 		sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0);
509ece180d1STejun Heo 		ata_for_each_dev(dev, link, ALL)
510ece180d1STejun Heo 			ata_dev_disable(dev);
511ece180d1STejun Heo 	}
512ece180d1STejun Heo 
513ece180d1STejun Heo 	/* freeze and set UNLOADED */
514ece180d1STejun Heo 	spin_lock_irqsave(ap->lock, flags);
515ece180d1STejun Heo 
516ece180d1STejun Heo 	ata_port_freeze(ap);			/* won't be thawed */
517ece180d1STejun Heo 	ap->pflags &= ~ATA_PFLAG_EH_PENDING;	/* clear pending from freeze */
518ece180d1STejun Heo 	ap->pflags |= ATA_PFLAG_UNLOADED;
519ece180d1STejun Heo 
520ece180d1STejun Heo 	spin_unlock_irqrestore(ap->lock, flags);
521ece180d1STejun Heo }
522ece180d1STejun Heo 
523c6fd2807SJeff Garzik /**
524c6fd2807SJeff Garzik  *	ata_scsi_error - SCSI layer error handler callback
525c6fd2807SJeff Garzik  *	@host: SCSI host on which error occurred
526c6fd2807SJeff Garzik  *
527c6fd2807SJeff Garzik  *	Handles SCSI-layer-thrown error events.
528c6fd2807SJeff Garzik  *
529c6fd2807SJeff Garzik  *	LOCKING:
530c6fd2807SJeff Garzik  *	Inherited from SCSI layer (none, can sleep)
531c6fd2807SJeff Garzik  *
532c6fd2807SJeff Garzik  *	RETURNS:
533c6fd2807SJeff Garzik  *	Zero.
534c6fd2807SJeff Garzik  */
535c6fd2807SJeff Garzik void ata_scsi_error(struct Scsi_Host *host)
536c6fd2807SJeff Garzik {
537c6fd2807SJeff Garzik 	struct ata_port *ap = ata_shost_to_port(host);
538a1e10f7eSTejun Heo 	int i;
539c6fd2807SJeff Garzik 	unsigned long flags;
540c6fd2807SJeff Garzik 
541c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
542c6fd2807SJeff Garzik 
543c6fd2807SJeff Garzik 	/* synchronize with port task */
544c6fd2807SJeff Garzik 	ata_port_flush_task(ap);
545c6fd2807SJeff Garzik 
546cca3974eSJeff Garzik 	/* synchronize with host lock and sort out timeouts */
547c6fd2807SJeff Garzik 
548c6fd2807SJeff Garzik 	/* For new EH, all qcs are finished in one of three ways -
549c6fd2807SJeff Garzik 	 * normal completion, error completion, and SCSI timeout.
550c96f1732SAlan Cox 	 * Both completions can race against SCSI timeout.  When normal
551c6fd2807SJeff Garzik 	 * completion wins, the qc never reaches EH.  When error
552c6fd2807SJeff Garzik 	 * completion wins, the qc has ATA_QCFLAG_FAILED set.
553c6fd2807SJeff Garzik 	 *
554c6fd2807SJeff Garzik 	 * When SCSI timeout wins, things are a bit more complex.
555c6fd2807SJeff Garzik 	 * Normal or error completion can occur after the timeout but
556c6fd2807SJeff Garzik 	 * before this point.  In such cases, both types of
557c6fd2807SJeff Garzik 	 * completions are honored.  A scmd is determined to have
558c6fd2807SJeff Garzik 	 * timed out iff its associated qc is active and not failed.
559c6fd2807SJeff Garzik 	 */
560c6fd2807SJeff Garzik 	if (ap->ops->error_handler) {
561c6fd2807SJeff Garzik 		struct scsi_cmnd *scmd, *tmp;
562c6fd2807SJeff Garzik 		int nr_timedout = 0;
563c6fd2807SJeff Garzik 
564c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
565c6fd2807SJeff Garzik 
566c96f1732SAlan Cox 		/* This must occur under the ap->lock as we don't want
567c96f1732SAlan Cox 		   a polled recovery to race the real interrupt handler
568c96f1732SAlan Cox 
569c96f1732SAlan Cox 		   The lost_interrupt handler checks for any completed but
570c96f1732SAlan Cox 		   non-notified command and completes much like an IRQ handler.
571c96f1732SAlan Cox 
572c96f1732SAlan Cox 		   We then fall into the error recovery code which will treat
573c96f1732SAlan Cox 		   this as if normal completion won the race */
574c96f1732SAlan Cox 
575c96f1732SAlan Cox 		if (ap->ops->lost_interrupt)
576c96f1732SAlan Cox 			ap->ops->lost_interrupt(ap);
577c96f1732SAlan Cox 
578c6fd2807SJeff Garzik 		list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
579c6fd2807SJeff Garzik 			struct ata_queued_cmd *qc;
580c6fd2807SJeff Garzik 
581c6fd2807SJeff Garzik 			for (i = 0; i < ATA_MAX_QUEUE; i++) {
582c6fd2807SJeff Garzik 				qc = __ata_qc_from_tag(ap, i);
583c6fd2807SJeff Garzik 				if (qc->flags & ATA_QCFLAG_ACTIVE &&
584c6fd2807SJeff Garzik 				    qc->scsicmd == scmd)
585c6fd2807SJeff Garzik 					break;
586c6fd2807SJeff Garzik 			}
587c6fd2807SJeff Garzik 
588c6fd2807SJeff Garzik 			if (i < ATA_MAX_QUEUE) {
589c6fd2807SJeff Garzik 				/* the scmd has an associated qc */
590c6fd2807SJeff Garzik 				if (!(qc->flags & ATA_QCFLAG_FAILED)) {
591c6fd2807SJeff Garzik 					/* which hasn't failed yet, timeout */
592c6fd2807SJeff Garzik 					qc->err_mask |= AC_ERR_TIMEOUT;
593c6fd2807SJeff Garzik 					qc->flags |= ATA_QCFLAG_FAILED;
594c6fd2807SJeff Garzik 					nr_timedout++;
595c6fd2807SJeff Garzik 				}
596c6fd2807SJeff Garzik 			} else {
597c6fd2807SJeff Garzik 				/* Normal completion occurred after
598c6fd2807SJeff Garzik 				 * SCSI timeout but before this point.
599c6fd2807SJeff Garzik 				 * Successfully complete it.
600c6fd2807SJeff Garzik 				 */
601c6fd2807SJeff Garzik 				scmd->retries = scmd->allowed;
602c6fd2807SJeff Garzik 				scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
603c6fd2807SJeff Garzik 			}
604c6fd2807SJeff Garzik 		}
605c6fd2807SJeff Garzik 
606c6fd2807SJeff Garzik 		/* If we have timed out qcs.  They belong to EH from
607c6fd2807SJeff Garzik 		 * this point but the state of the controller is
608c6fd2807SJeff Garzik 		 * unknown.  Freeze the port to make sure the IRQ
609c6fd2807SJeff Garzik 		 * handler doesn't diddle with those qcs.  This must
610c6fd2807SJeff Garzik 		 * be done atomically w.r.t. setting QCFLAG_FAILED.
611c6fd2807SJeff Garzik 		 */
612c6fd2807SJeff Garzik 		if (nr_timedout)
613c6fd2807SJeff Garzik 			__ata_port_freeze(ap);
614c6fd2807SJeff Garzik 
615c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
616a1e10f7eSTejun Heo 
617a1e10f7eSTejun Heo 		/* initialize eh_tries */
618a1e10f7eSTejun Heo 		ap->eh_tries = ATA_EH_MAX_TRIES;
619c6fd2807SJeff Garzik 	} else
620c6fd2807SJeff Garzik 		spin_unlock_wait(ap->lock);
621c6fd2807SJeff Garzik 
622c96f1732SAlan Cox 	/* If we timed raced normal completion and there is nothing to
623c96f1732SAlan Cox 	   recover nr_timedout == 0 why exactly are we doing error recovery ? */
624c96f1732SAlan Cox 
625c6fd2807SJeff Garzik  repeat:
626c6fd2807SJeff Garzik 	/* invoke error handler */
627c6fd2807SJeff Garzik 	if (ap->ops->error_handler) {
628cf1b86c8STejun Heo 		struct ata_link *link;
629cf1b86c8STejun Heo 
6305ddf24c5STejun Heo 		/* kill fast drain timer */
6315ddf24c5STejun Heo 		del_timer_sync(&ap->fastdrain_timer);
6325ddf24c5STejun Heo 
633c6fd2807SJeff Garzik 		/* process port resume request */
634c6fd2807SJeff Garzik 		ata_eh_handle_port_resume(ap);
635c6fd2807SJeff Garzik 
636c6fd2807SJeff Garzik 		/* fetch & clear EH info */
637c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
638c6fd2807SJeff Garzik 
6391eca4365STejun Heo 		ata_for_each_link(link, ap, HOST_FIRST) {
64000115e0fSTejun Heo 			struct ata_eh_context *ehc = &link->eh_context;
64100115e0fSTejun Heo 			struct ata_device *dev;
64200115e0fSTejun Heo 
643cf1b86c8STejun Heo 			memset(&link->eh_context, 0, sizeof(link->eh_context));
644cf1b86c8STejun Heo 			link->eh_context.i = link->eh_info;
645cf1b86c8STejun Heo 			memset(&link->eh_info, 0, sizeof(link->eh_info));
64600115e0fSTejun Heo 
6471eca4365STejun Heo 			ata_for_each_dev(dev, link, ENABLED) {
64800115e0fSTejun Heo 				int devno = dev->devno;
64900115e0fSTejun Heo 
65000115e0fSTejun Heo 				ehc->saved_xfer_mode[devno] = dev->xfer_mode;
65100115e0fSTejun Heo 				if (ata_ncq_enabled(dev))
65200115e0fSTejun Heo 					ehc->saved_ncq_enabled |= 1 << devno;
65300115e0fSTejun Heo 			}
654cf1b86c8STejun Heo 		}
655c6fd2807SJeff Garzik 
656c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
657c6fd2807SJeff Garzik 		ap->pflags &= ~ATA_PFLAG_EH_PENDING;
658da917d69STejun Heo 		ap->excl_link = NULL;	/* don't maintain exclusion over EH */
659c6fd2807SJeff Garzik 
660c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
661c6fd2807SJeff Garzik 
662c6fd2807SJeff Garzik 		/* invoke EH, skip if unloading or suspended */
663c6fd2807SJeff Garzik 		if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
664c6fd2807SJeff Garzik 			ap->ops->error_handler(ap);
665ece180d1STejun Heo 		else {
666ece180d1STejun Heo 			/* if unloading, commence suicide */
667ece180d1STejun Heo 			if ((ap->pflags & ATA_PFLAG_UNLOADING) &&
668ece180d1STejun Heo 			    !(ap->pflags & ATA_PFLAG_UNLOADED))
669ece180d1STejun Heo 				ata_eh_unload(ap);
670c6fd2807SJeff Garzik 			ata_eh_finish(ap);
671ece180d1STejun Heo 		}
672c6fd2807SJeff Garzik 
673c6fd2807SJeff Garzik 		/* process port suspend request */
674c6fd2807SJeff Garzik 		ata_eh_handle_port_suspend(ap);
675c6fd2807SJeff Garzik 
676c6fd2807SJeff Garzik 		/* Exception might have happend after ->error_handler
677c6fd2807SJeff Garzik 		 * recovered the port but before this point.  Repeat
678c6fd2807SJeff Garzik 		 * EH in such case.
679c6fd2807SJeff Garzik 		 */
680c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
681c6fd2807SJeff Garzik 
682c6fd2807SJeff Garzik 		if (ap->pflags & ATA_PFLAG_EH_PENDING) {
683a1e10f7eSTejun Heo 			if (--ap->eh_tries) {
684c6fd2807SJeff Garzik 				spin_unlock_irqrestore(ap->lock, flags);
685c6fd2807SJeff Garzik 				goto repeat;
686c6fd2807SJeff Garzik 			}
687c6fd2807SJeff Garzik 			ata_port_printk(ap, KERN_ERR, "EH pending after %d "
688a1e10f7eSTejun Heo 					"tries, giving up\n", ATA_EH_MAX_TRIES);
689914616a3STejun Heo 			ap->pflags &= ~ATA_PFLAG_EH_PENDING;
690c6fd2807SJeff Garzik 		}
691c6fd2807SJeff Garzik 
692c6fd2807SJeff Garzik 		/* this run is complete, make sure EH info is clear */
6931eca4365STejun Heo 		ata_for_each_link(link, ap, HOST_FIRST)
694cf1b86c8STejun Heo 			memset(&link->eh_info, 0, sizeof(link->eh_info));
695c6fd2807SJeff Garzik 
696c6fd2807SJeff Garzik 		/* Clear host_eh_scheduled while holding ap->lock such
697c6fd2807SJeff Garzik 		 * that if exception occurs after this point but
698c6fd2807SJeff Garzik 		 * before EH completion, SCSI midlayer will
699c6fd2807SJeff Garzik 		 * re-initiate EH.
700c6fd2807SJeff Garzik 		 */
701c6fd2807SJeff Garzik 		host->host_eh_scheduled = 0;
702c6fd2807SJeff Garzik 
703c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
704c6fd2807SJeff Garzik 	} else {
7059af5c9c9STejun Heo 		WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL);
706c6fd2807SJeff Garzik 		ap->ops->eng_timeout(ap);
707c6fd2807SJeff Garzik 	}
708c6fd2807SJeff Garzik 
709c6fd2807SJeff Garzik 	/* finish or retry handled scmd's and clean up */
710c6fd2807SJeff Garzik 	WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
711c6fd2807SJeff Garzik 
712c6fd2807SJeff Garzik 	scsi_eh_flush_done_q(&ap->eh_done_q);
713c6fd2807SJeff Garzik 
714c6fd2807SJeff Garzik 	/* clean up */
715c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
716c6fd2807SJeff Garzik 
717c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_LOADING)
718c6fd2807SJeff Garzik 		ap->pflags &= ~ATA_PFLAG_LOADING;
719c6fd2807SJeff Garzik 	else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
72052bad64dSDavid Howells 		queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 0);
721c6fd2807SJeff Garzik 
722c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_RECOVERED)
723c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_INFO, "EH complete\n");
724c6fd2807SJeff Garzik 
725c6fd2807SJeff Garzik 	ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
726c6fd2807SJeff Garzik 
727c6fd2807SJeff Garzik 	/* tell wait_eh that we're done */
728c6fd2807SJeff Garzik 	ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
729c6fd2807SJeff Garzik 	wake_up_all(&ap->eh_wait_q);
730c6fd2807SJeff Garzik 
731c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
732c6fd2807SJeff Garzik 
733c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
734c6fd2807SJeff Garzik }
735c6fd2807SJeff Garzik 
736c6fd2807SJeff Garzik /**
737c6fd2807SJeff Garzik  *	ata_port_wait_eh - Wait for the currently pending EH to complete
738c6fd2807SJeff Garzik  *	@ap: Port to wait EH for
739c6fd2807SJeff Garzik  *
740c6fd2807SJeff Garzik  *	Wait until the currently pending EH is complete.
741c6fd2807SJeff Garzik  *
742c6fd2807SJeff Garzik  *	LOCKING:
743c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
744c6fd2807SJeff Garzik  */
745c6fd2807SJeff Garzik void ata_port_wait_eh(struct ata_port *ap)
746c6fd2807SJeff Garzik {
747c6fd2807SJeff Garzik 	unsigned long flags;
748c6fd2807SJeff Garzik 	DEFINE_WAIT(wait);
749c6fd2807SJeff Garzik 
750c6fd2807SJeff Garzik  retry:
751c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
752c6fd2807SJeff Garzik 
753c6fd2807SJeff Garzik 	while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
754c6fd2807SJeff Garzik 		prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
755c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
756c6fd2807SJeff Garzik 		schedule();
757c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
758c6fd2807SJeff Garzik 	}
759c6fd2807SJeff Garzik 	finish_wait(&ap->eh_wait_q, &wait);
760c6fd2807SJeff Garzik 
761c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
762c6fd2807SJeff Garzik 
763c6fd2807SJeff Garzik 	/* make sure SCSI EH is complete */
764cca3974eSJeff Garzik 	if (scsi_host_in_recovery(ap->scsi_host)) {
765c6fd2807SJeff Garzik 		msleep(10);
766c6fd2807SJeff Garzik 		goto retry;
767c6fd2807SJeff Garzik 	}
768c6fd2807SJeff Garzik }
769c6fd2807SJeff Garzik 
7705ddf24c5STejun Heo static int ata_eh_nr_in_flight(struct ata_port *ap)
7715ddf24c5STejun Heo {
7725ddf24c5STejun Heo 	unsigned int tag;
7735ddf24c5STejun Heo 	int nr = 0;
7745ddf24c5STejun Heo 
7755ddf24c5STejun Heo 	/* count only non-internal commands */
7765ddf24c5STejun Heo 	for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++)
7775ddf24c5STejun Heo 		if (ata_qc_from_tag(ap, tag))
7785ddf24c5STejun Heo 			nr++;
7795ddf24c5STejun Heo 
7805ddf24c5STejun Heo 	return nr;
7815ddf24c5STejun Heo }
7825ddf24c5STejun Heo 
7835ddf24c5STejun Heo void ata_eh_fastdrain_timerfn(unsigned long arg)
7845ddf24c5STejun Heo {
7855ddf24c5STejun Heo 	struct ata_port *ap = (void *)arg;
7865ddf24c5STejun Heo 	unsigned long flags;
7875ddf24c5STejun Heo 	int cnt;
7885ddf24c5STejun Heo 
7895ddf24c5STejun Heo 	spin_lock_irqsave(ap->lock, flags);
7905ddf24c5STejun Heo 
7915ddf24c5STejun Heo 	cnt = ata_eh_nr_in_flight(ap);
7925ddf24c5STejun Heo 
7935ddf24c5STejun Heo 	/* are we done? */
7945ddf24c5STejun Heo 	if (!cnt)
7955ddf24c5STejun Heo 		goto out_unlock;
7965ddf24c5STejun Heo 
7975ddf24c5STejun Heo 	if (cnt == ap->fastdrain_cnt) {
7985ddf24c5STejun Heo 		unsigned int tag;
7995ddf24c5STejun Heo 
8005ddf24c5STejun Heo 		/* No progress during the last interval, tag all
8015ddf24c5STejun Heo 		 * in-flight qcs as timed out and freeze the port.
8025ddf24c5STejun Heo 		 */
8035ddf24c5STejun Heo 		for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) {
8045ddf24c5STejun Heo 			struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
8055ddf24c5STejun Heo 			if (qc)
8065ddf24c5STejun Heo 				qc->err_mask |= AC_ERR_TIMEOUT;
8075ddf24c5STejun Heo 		}
8085ddf24c5STejun Heo 
8095ddf24c5STejun Heo 		ata_port_freeze(ap);
8105ddf24c5STejun Heo 	} else {
8115ddf24c5STejun Heo 		/* some qcs have finished, give it another chance */
8125ddf24c5STejun Heo 		ap->fastdrain_cnt = cnt;
8135ddf24c5STejun Heo 		ap->fastdrain_timer.expires =
814341c2c95STejun Heo 			ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
8155ddf24c5STejun Heo 		add_timer(&ap->fastdrain_timer);
8165ddf24c5STejun Heo 	}
8175ddf24c5STejun Heo 
8185ddf24c5STejun Heo  out_unlock:
8195ddf24c5STejun Heo 	spin_unlock_irqrestore(ap->lock, flags);
8205ddf24c5STejun Heo }
8215ddf24c5STejun Heo 
8225ddf24c5STejun Heo /**
8235ddf24c5STejun Heo  *	ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
8245ddf24c5STejun Heo  *	@ap: target ATA port
8255ddf24c5STejun Heo  *	@fastdrain: activate fast drain
8265ddf24c5STejun Heo  *
8275ddf24c5STejun Heo  *	Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain
8285ddf24c5STejun Heo  *	is non-zero and EH wasn't pending before.  Fast drain ensures
8295ddf24c5STejun Heo  *	that EH kicks in in timely manner.
8305ddf24c5STejun Heo  *
8315ddf24c5STejun Heo  *	LOCKING:
8325ddf24c5STejun Heo  *	spin_lock_irqsave(host lock)
8335ddf24c5STejun Heo  */
8345ddf24c5STejun Heo static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
8355ddf24c5STejun Heo {
8365ddf24c5STejun Heo 	int cnt;
8375ddf24c5STejun Heo 
8385ddf24c5STejun Heo 	/* already scheduled? */
8395ddf24c5STejun Heo 	if (ap->pflags & ATA_PFLAG_EH_PENDING)
8405ddf24c5STejun Heo 		return;
8415ddf24c5STejun Heo 
8425ddf24c5STejun Heo 	ap->pflags |= ATA_PFLAG_EH_PENDING;
8435ddf24c5STejun Heo 
8445ddf24c5STejun Heo 	if (!fastdrain)
8455ddf24c5STejun Heo 		return;
8465ddf24c5STejun Heo 
8475ddf24c5STejun Heo 	/* do we have in-flight qcs? */
8485ddf24c5STejun Heo 	cnt = ata_eh_nr_in_flight(ap);
8495ddf24c5STejun Heo 	if (!cnt)
8505ddf24c5STejun Heo 		return;
8515ddf24c5STejun Heo 
8525ddf24c5STejun Heo 	/* activate fast drain */
8535ddf24c5STejun Heo 	ap->fastdrain_cnt = cnt;
854341c2c95STejun Heo 	ap->fastdrain_timer.expires =
855341c2c95STejun Heo 		ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
8565ddf24c5STejun Heo 	add_timer(&ap->fastdrain_timer);
8575ddf24c5STejun Heo }
8585ddf24c5STejun Heo 
859c6fd2807SJeff Garzik /**
860c6fd2807SJeff Garzik  *	ata_qc_schedule_eh - schedule qc for error handling
861c6fd2807SJeff Garzik  *	@qc: command to schedule error handling for
862c6fd2807SJeff Garzik  *
863c6fd2807SJeff Garzik  *	Schedule error handling for @qc.  EH will kick in as soon as
864c6fd2807SJeff Garzik  *	other commands are drained.
865c6fd2807SJeff Garzik  *
866c6fd2807SJeff Garzik  *	LOCKING:
867cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
868c6fd2807SJeff Garzik  */
869c6fd2807SJeff Garzik void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
870c6fd2807SJeff Garzik {
871c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
872c6fd2807SJeff Garzik 
873c6fd2807SJeff Garzik 	WARN_ON(!ap->ops->error_handler);
874c6fd2807SJeff Garzik 
875c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_FAILED;
8765ddf24c5STejun Heo 	ata_eh_set_pending(ap, 1);
877c6fd2807SJeff Garzik 
878c6fd2807SJeff Garzik 	/* The following will fail if timeout has already expired.
879c6fd2807SJeff Garzik 	 * ata_scsi_error() takes care of such scmds on EH entry.
880c6fd2807SJeff Garzik 	 * Note that ATA_QCFLAG_FAILED is unconditionally set after
881c6fd2807SJeff Garzik 	 * this function completes.
882c6fd2807SJeff Garzik 	 */
883242f9dcbSJens Axboe 	blk_abort_request(qc->scsicmd->request);
884c6fd2807SJeff Garzik }
885c6fd2807SJeff Garzik 
886c6fd2807SJeff Garzik /**
887c6fd2807SJeff Garzik  *	ata_port_schedule_eh - schedule error handling without a qc
888c6fd2807SJeff Garzik  *	@ap: ATA port to schedule EH for
889c6fd2807SJeff Garzik  *
890c6fd2807SJeff Garzik  *	Schedule error handling for @ap.  EH will kick in as soon as
891c6fd2807SJeff Garzik  *	all commands are drained.
892c6fd2807SJeff Garzik  *
893c6fd2807SJeff Garzik  *	LOCKING:
894cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
895c6fd2807SJeff Garzik  */
896c6fd2807SJeff Garzik void ata_port_schedule_eh(struct ata_port *ap)
897c6fd2807SJeff Garzik {
898c6fd2807SJeff Garzik 	WARN_ON(!ap->ops->error_handler);
899c6fd2807SJeff Garzik 
900f4d6d004STejun Heo 	if (ap->pflags & ATA_PFLAG_INITIALIZING)
901f4d6d004STejun Heo 		return;
902f4d6d004STejun Heo 
9035ddf24c5STejun Heo 	ata_eh_set_pending(ap, 1);
904cca3974eSJeff Garzik 	scsi_schedule_eh(ap->scsi_host);
905c6fd2807SJeff Garzik 
906c6fd2807SJeff Garzik 	DPRINTK("port EH scheduled\n");
907c6fd2807SJeff Garzik }
908c6fd2807SJeff Garzik 
909dbd82616STejun Heo static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
910c6fd2807SJeff Garzik {
911c6fd2807SJeff Garzik 	int tag, nr_aborted = 0;
912c6fd2807SJeff Garzik 
913c6fd2807SJeff Garzik 	WARN_ON(!ap->ops->error_handler);
914c6fd2807SJeff Garzik 
9155ddf24c5STejun Heo 	/* we're gonna abort all commands, no need for fast drain */
9165ddf24c5STejun Heo 	ata_eh_set_pending(ap, 0);
9175ddf24c5STejun Heo 
918c6fd2807SJeff Garzik 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
919c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
920c6fd2807SJeff Garzik 
921dbd82616STejun Heo 		if (qc && (!link || qc->dev->link == link)) {
922c6fd2807SJeff Garzik 			qc->flags |= ATA_QCFLAG_FAILED;
923c6fd2807SJeff Garzik 			ata_qc_complete(qc);
924c6fd2807SJeff Garzik 			nr_aborted++;
925c6fd2807SJeff Garzik 		}
926c6fd2807SJeff Garzik 	}
927c6fd2807SJeff Garzik 
928c6fd2807SJeff Garzik 	if (!nr_aborted)
929c6fd2807SJeff Garzik 		ata_port_schedule_eh(ap);
930c6fd2807SJeff Garzik 
931c6fd2807SJeff Garzik 	return nr_aborted;
932c6fd2807SJeff Garzik }
933c6fd2807SJeff Garzik 
934c6fd2807SJeff Garzik /**
935dbd82616STejun Heo  *	ata_link_abort - abort all qc's on the link
936dbd82616STejun Heo  *	@link: ATA link to abort qc's for
937dbd82616STejun Heo  *
938dbd82616STejun Heo  *	Abort all active qc's active on @link and schedule EH.
939dbd82616STejun Heo  *
940dbd82616STejun Heo  *	LOCKING:
941dbd82616STejun Heo  *	spin_lock_irqsave(host lock)
942dbd82616STejun Heo  *
943dbd82616STejun Heo  *	RETURNS:
944dbd82616STejun Heo  *	Number of aborted qc's.
945dbd82616STejun Heo  */
946dbd82616STejun Heo int ata_link_abort(struct ata_link *link)
947dbd82616STejun Heo {
948dbd82616STejun Heo 	return ata_do_link_abort(link->ap, link);
949dbd82616STejun Heo }
950dbd82616STejun Heo 
951dbd82616STejun Heo /**
952dbd82616STejun Heo  *	ata_port_abort - abort all qc's on the port
953dbd82616STejun Heo  *	@ap: ATA port to abort qc's for
954dbd82616STejun Heo  *
955dbd82616STejun Heo  *	Abort all active qc's of @ap and schedule EH.
956dbd82616STejun Heo  *
957dbd82616STejun Heo  *	LOCKING:
958dbd82616STejun Heo  *	spin_lock_irqsave(host_set lock)
959dbd82616STejun Heo  *
960dbd82616STejun Heo  *	RETURNS:
961dbd82616STejun Heo  *	Number of aborted qc's.
962dbd82616STejun Heo  */
963dbd82616STejun Heo int ata_port_abort(struct ata_port *ap)
964dbd82616STejun Heo {
965dbd82616STejun Heo 	return ata_do_link_abort(ap, NULL);
966dbd82616STejun Heo }
967dbd82616STejun Heo 
968dbd82616STejun Heo /**
969c6fd2807SJeff Garzik  *	__ata_port_freeze - freeze port
970c6fd2807SJeff Garzik  *	@ap: ATA port to freeze
971c6fd2807SJeff Garzik  *
972c6fd2807SJeff Garzik  *	This function is called when HSM violation or some other
973c6fd2807SJeff Garzik  *	condition disrupts normal operation of the port.  Frozen port
974c6fd2807SJeff Garzik  *	is not allowed to perform any operation until the port is
975c6fd2807SJeff Garzik  *	thawed, which usually follows a successful reset.
976c6fd2807SJeff Garzik  *
977c6fd2807SJeff Garzik  *	ap->ops->freeze() callback can be used for freezing the port
978c6fd2807SJeff Garzik  *	hardware-wise (e.g. mask interrupt and stop DMA engine).  If a
979c6fd2807SJeff Garzik  *	port cannot be frozen hardware-wise, the interrupt handler
980c6fd2807SJeff Garzik  *	must ack and clear interrupts unconditionally while the port
981c6fd2807SJeff Garzik  *	is frozen.
982c6fd2807SJeff Garzik  *
983c6fd2807SJeff Garzik  *	LOCKING:
984cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
985c6fd2807SJeff Garzik  */
986c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap)
987c6fd2807SJeff Garzik {
988c6fd2807SJeff Garzik 	WARN_ON(!ap->ops->error_handler);
989c6fd2807SJeff Garzik 
990c6fd2807SJeff Garzik 	if (ap->ops->freeze)
991c6fd2807SJeff Garzik 		ap->ops->freeze(ap);
992c6fd2807SJeff Garzik 
993c6fd2807SJeff Garzik 	ap->pflags |= ATA_PFLAG_FROZEN;
994c6fd2807SJeff Garzik 
99544877b4eSTejun Heo 	DPRINTK("ata%u port frozen\n", ap->print_id);
996c6fd2807SJeff Garzik }
997c6fd2807SJeff Garzik 
998c6fd2807SJeff Garzik /**
999c6fd2807SJeff Garzik  *	ata_port_freeze - abort & freeze port
1000c6fd2807SJeff Garzik  *	@ap: ATA port to freeze
1001c6fd2807SJeff Garzik  *
1002*54c38444SJeff Garzik  *	Abort and freeze @ap.  The freeze operation must be called
1003*54c38444SJeff Garzik  *	first, because some hardware requires special operations
1004*54c38444SJeff Garzik  *	before the taskfile registers are accessible.
1005c6fd2807SJeff Garzik  *
1006c6fd2807SJeff Garzik  *	LOCKING:
1007cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
1008c6fd2807SJeff Garzik  *
1009c6fd2807SJeff Garzik  *	RETURNS:
1010c6fd2807SJeff Garzik  *	Number of aborted commands.
1011c6fd2807SJeff Garzik  */
1012c6fd2807SJeff Garzik int ata_port_freeze(struct ata_port *ap)
1013c6fd2807SJeff Garzik {
1014c6fd2807SJeff Garzik 	int nr_aborted;
1015c6fd2807SJeff Garzik 
1016c6fd2807SJeff Garzik 	WARN_ON(!ap->ops->error_handler);
1017c6fd2807SJeff Garzik 
1018c6fd2807SJeff Garzik 	__ata_port_freeze(ap);
1019*54c38444SJeff Garzik 	nr_aborted = ata_port_abort(ap);
1020c6fd2807SJeff Garzik 
1021c6fd2807SJeff Garzik 	return nr_aborted;
1022c6fd2807SJeff Garzik }
1023c6fd2807SJeff Garzik 
1024c6fd2807SJeff Garzik /**
10257d77b247STejun Heo  *	sata_async_notification - SATA async notification handler
10267d77b247STejun Heo  *	@ap: ATA port where async notification is received
10277d77b247STejun Heo  *
10287d77b247STejun Heo  *	Handler to be called when async notification via SDB FIS is
10297d77b247STejun Heo  *	received.  This function schedules EH if necessary.
10307d77b247STejun Heo  *
10317d77b247STejun Heo  *	LOCKING:
10327d77b247STejun Heo  *	spin_lock_irqsave(host lock)
10337d77b247STejun Heo  *
10347d77b247STejun Heo  *	RETURNS:
10357d77b247STejun Heo  *	1 if EH is scheduled, 0 otherwise.
10367d77b247STejun Heo  */
10377d77b247STejun Heo int sata_async_notification(struct ata_port *ap)
10387d77b247STejun Heo {
10397d77b247STejun Heo 	u32 sntf;
10407d77b247STejun Heo 	int rc;
10417d77b247STejun Heo 
10427d77b247STejun Heo 	if (!(ap->flags & ATA_FLAG_AN))
10437d77b247STejun Heo 		return 0;
10447d77b247STejun Heo 
10457d77b247STejun Heo 	rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
10467d77b247STejun Heo 	if (rc == 0)
10477d77b247STejun Heo 		sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
10487d77b247STejun Heo 
1049071f44b1STejun Heo 	if (!sata_pmp_attached(ap) || rc) {
10507d77b247STejun Heo 		/* PMP is not attached or SNTF is not available */
1051071f44b1STejun Heo 		if (!sata_pmp_attached(ap)) {
10527d77b247STejun Heo 			/* PMP is not attached.  Check whether ATAPI
10537d77b247STejun Heo 			 * AN is configured.  If so, notify media
10547d77b247STejun Heo 			 * change.
10557d77b247STejun Heo 			 */
10567d77b247STejun Heo 			struct ata_device *dev = ap->link.device;
10577d77b247STejun Heo 
10587d77b247STejun Heo 			if ((dev->class == ATA_DEV_ATAPI) &&
10597d77b247STejun Heo 			    (dev->flags & ATA_DFLAG_AN))
10607d77b247STejun Heo 				ata_scsi_media_change_notify(dev);
10617d77b247STejun Heo 			return 0;
10627d77b247STejun Heo 		} else {
10637d77b247STejun Heo 			/* PMP is attached but SNTF is not available.
10647d77b247STejun Heo 			 * ATAPI async media change notification is
10657d77b247STejun Heo 			 * not used.  The PMP must be reporting PHY
10667d77b247STejun Heo 			 * status change, schedule EH.
10677d77b247STejun Heo 			 */
10687d77b247STejun Heo 			ata_port_schedule_eh(ap);
10697d77b247STejun Heo 			return 1;
10707d77b247STejun Heo 		}
10717d77b247STejun Heo 	} else {
10727d77b247STejun Heo 		/* PMP is attached and SNTF is available */
10737d77b247STejun Heo 		struct ata_link *link;
10747d77b247STejun Heo 
10757d77b247STejun Heo 		/* check and notify ATAPI AN */
10761eca4365STejun Heo 		ata_for_each_link(link, ap, EDGE) {
10777d77b247STejun Heo 			if (!(sntf & (1 << link->pmp)))
10787d77b247STejun Heo 				continue;
10797d77b247STejun Heo 
10807d77b247STejun Heo 			if ((link->device->class == ATA_DEV_ATAPI) &&
10817d77b247STejun Heo 			    (link->device->flags & ATA_DFLAG_AN))
10827d77b247STejun Heo 				ata_scsi_media_change_notify(link->device);
10837d77b247STejun Heo 		}
10847d77b247STejun Heo 
10857d77b247STejun Heo 		/* If PMP is reporting that PHY status of some
10867d77b247STejun Heo 		 * downstream ports has changed, schedule EH.
10877d77b247STejun Heo 		 */
10887d77b247STejun Heo 		if (sntf & (1 << SATA_PMP_CTRL_PORT)) {
10897d77b247STejun Heo 			ata_port_schedule_eh(ap);
10907d77b247STejun Heo 			return 1;
10917d77b247STejun Heo 		}
10927d77b247STejun Heo 
10937d77b247STejun Heo 		return 0;
10947d77b247STejun Heo 	}
10957d77b247STejun Heo }
10967d77b247STejun Heo 
10977d77b247STejun Heo /**
1098c6fd2807SJeff Garzik  *	ata_eh_freeze_port - EH helper to freeze port
1099c6fd2807SJeff Garzik  *	@ap: ATA port to freeze
1100c6fd2807SJeff Garzik  *
1101c6fd2807SJeff Garzik  *	Freeze @ap.
1102c6fd2807SJeff Garzik  *
1103c6fd2807SJeff Garzik  *	LOCKING:
1104c6fd2807SJeff Garzik  *	None.
1105c6fd2807SJeff Garzik  */
1106c6fd2807SJeff Garzik void ata_eh_freeze_port(struct ata_port *ap)
1107c6fd2807SJeff Garzik {
1108c6fd2807SJeff Garzik 	unsigned long flags;
1109c6fd2807SJeff Garzik 
1110c6fd2807SJeff Garzik 	if (!ap->ops->error_handler)
1111c6fd2807SJeff Garzik 		return;
1112c6fd2807SJeff Garzik 
1113c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1114c6fd2807SJeff Garzik 	__ata_port_freeze(ap);
1115c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1116c6fd2807SJeff Garzik }
1117c6fd2807SJeff Garzik 
1118c6fd2807SJeff Garzik /**
1119c6fd2807SJeff Garzik  *	ata_port_thaw_port - EH helper to thaw port
1120c6fd2807SJeff Garzik  *	@ap: ATA port to thaw
1121c6fd2807SJeff Garzik  *
1122c6fd2807SJeff Garzik  *	Thaw frozen port @ap.
1123c6fd2807SJeff Garzik  *
1124c6fd2807SJeff Garzik  *	LOCKING:
1125c6fd2807SJeff Garzik  *	None.
1126c6fd2807SJeff Garzik  */
1127c6fd2807SJeff Garzik void ata_eh_thaw_port(struct ata_port *ap)
1128c6fd2807SJeff Garzik {
1129c6fd2807SJeff Garzik 	unsigned long flags;
1130c6fd2807SJeff Garzik 
1131c6fd2807SJeff Garzik 	if (!ap->ops->error_handler)
1132c6fd2807SJeff Garzik 		return;
1133c6fd2807SJeff Garzik 
1134c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1135c6fd2807SJeff Garzik 
1136c6fd2807SJeff Garzik 	ap->pflags &= ~ATA_PFLAG_FROZEN;
1137c6fd2807SJeff Garzik 
1138c6fd2807SJeff Garzik 	if (ap->ops->thaw)
1139c6fd2807SJeff Garzik 		ap->ops->thaw(ap);
1140c6fd2807SJeff Garzik 
1141c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1142c6fd2807SJeff Garzik 
114344877b4eSTejun Heo 	DPRINTK("ata%u port thawed\n", ap->print_id);
1144c6fd2807SJeff Garzik }
1145c6fd2807SJeff Garzik 
1146c6fd2807SJeff Garzik static void ata_eh_scsidone(struct scsi_cmnd *scmd)
1147c6fd2807SJeff Garzik {
1148c6fd2807SJeff Garzik 	/* nada */
1149c6fd2807SJeff Garzik }
1150c6fd2807SJeff Garzik 
1151c6fd2807SJeff Garzik static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
1152c6fd2807SJeff Garzik {
1153c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
1154c6fd2807SJeff Garzik 	struct scsi_cmnd *scmd = qc->scsicmd;
1155c6fd2807SJeff Garzik 	unsigned long flags;
1156c6fd2807SJeff Garzik 
1157c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1158c6fd2807SJeff Garzik 	qc->scsidone = ata_eh_scsidone;
1159c6fd2807SJeff Garzik 	__ata_qc_complete(qc);
1160c6fd2807SJeff Garzik 	WARN_ON(ata_tag_valid(qc->tag));
1161c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1162c6fd2807SJeff Garzik 
1163c6fd2807SJeff Garzik 	scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
1164c6fd2807SJeff Garzik }
1165c6fd2807SJeff Garzik 
1166c6fd2807SJeff Garzik /**
1167c6fd2807SJeff Garzik  *	ata_eh_qc_complete - Complete an active ATA command from EH
1168c6fd2807SJeff Garzik  *	@qc: Command to complete
1169c6fd2807SJeff Garzik  *
1170c6fd2807SJeff Garzik  *	Indicate to the mid and upper layers that an ATA command has
1171c6fd2807SJeff Garzik  *	completed.  To be used from EH.
1172c6fd2807SJeff Garzik  */
1173c6fd2807SJeff Garzik void ata_eh_qc_complete(struct ata_queued_cmd *qc)
1174c6fd2807SJeff Garzik {
1175c6fd2807SJeff Garzik 	struct scsi_cmnd *scmd = qc->scsicmd;
1176c6fd2807SJeff Garzik 	scmd->retries = scmd->allowed;
1177c6fd2807SJeff Garzik 	__ata_eh_qc_complete(qc);
1178c6fd2807SJeff Garzik }
1179c6fd2807SJeff Garzik 
1180c6fd2807SJeff Garzik /**
1181c6fd2807SJeff Garzik  *	ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
1182c6fd2807SJeff Garzik  *	@qc: Command to retry
1183c6fd2807SJeff Garzik  *
1184c6fd2807SJeff Garzik  *	Indicate to the mid and upper layers that an ATA command
1185c6fd2807SJeff Garzik  *	should be retried.  To be used from EH.
1186c6fd2807SJeff Garzik  *
1187c6fd2807SJeff Garzik  *	SCSI midlayer limits the number of retries to scmd->allowed.
1188c6fd2807SJeff Garzik  *	scmd->retries is decremented for commands which get retried
1189c6fd2807SJeff Garzik  *	due to unrelated failures (qc->err_mask is zero).
1190c6fd2807SJeff Garzik  */
1191c6fd2807SJeff Garzik void ata_eh_qc_retry(struct ata_queued_cmd *qc)
1192c6fd2807SJeff Garzik {
1193c6fd2807SJeff Garzik 	struct scsi_cmnd *scmd = qc->scsicmd;
1194c6fd2807SJeff Garzik 	if (!qc->err_mask && scmd->retries)
1195c6fd2807SJeff Garzik 		scmd->retries--;
1196c6fd2807SJeff Garzik 	__ata_eh_qc_complete(qc);
1197c6fd2807SJeff Garzik }
1198c6fd2807SJeff Garzik 
1199c6fd2807SJeff Garzik /**
1200678afac6STejun Heo  *	ata_dev_disable - disable ATA device
1201678afac6STejun Heo  *	@dev: ATA device to disable
1202678afac6STejun Heo  *
1203678afac6STejun Heo  *	Disable @dev.
1204678afac6STejun Heo  *
1205678afac6STejun Heo  *	Locking:
1206678afac6STejun Heo  *	EH context.
1207678afac6STejun Heo  */
1208678afac6STejun Heo void ata_dev_disable(struct ata_device *dev)
1209678afac6STejun Heo {
1210678afac6STejun Heo 	if (!ata_dev_enabled(dev))
1211678afac6STejun Heo 		return;
1212678afac6STejun Heo 
1213678afac6STejun Heo 	if (ata_msg_drv(dev->link->ap))
1214678afac6STejun Heo 		ata_dev_printk(dev, KERN_WARNING, "disabled\n");
1215678afac6STejun Heo 	ata_acpi_on_disable(dev);
1216678afac6STejun Heo 	ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET);
1217678afac6STejun Heo 	dev->class++;
121899cf610aSTejun Heo 
121999cf610aSTejun Heo 	/* From now till the next successful probe, ering is used to
122099cf610aSTejun Heo 	 * track probe failures.  Clear accumulated device error info.
122199cf610aSTejun Heo 	 */
122299cf610aSTejun Heo 	ata_ering_clear(&dev->ering);
1223678afac6STejun Heo }
1224678afac6STejun Heo 
1225678afac6STejun Heo /**
1226c6fd2807SJeff Garzik  *	ata_eh_detach_dev - detach ATA device
1227c6fd2807SJeff Garzik  *	@dev: ATA device to detach
1228c6fd2807SJeff Garzik  *
1229c6fd2807SJeff Garzik  *	Detach @dev.
1230c6fd2807SJeff Garzik  *
1231c6fd2807SJeff Garzik  *	LOCKING:
1232c6fd2807SJeff Garzik  *	None.
1233c6fd2807SJeff Garzik  */
1234fb7fd614STejun Heo void ata_eh_detach_dev(struct ata_device *dev)
1235c6fd2807SJeff Garzik {
1236f58229f8STejun Heo 	struct ata_link *link = dev->link;
1237f58229f8STejun Heo 	struct ata_port *ap = link->ap;
123890484ebfSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
1239c6fd2807SJeff Garzik 	unsigned long flags;
1240c6fd2807SJeff Garzik 
1241c6fd2807SJeff Garzik 	ata_dev_disable(dev);
1242c6fd2807SJeff Garzik 
1243c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1244c6fd2807SJeff Garzik 
1245c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_DETACH;
1246c6fd2807SJeff Garzik 
1247c6fd2807SJeff Garzik 	if (ata_scsi_offline_dev(dev)) {
1248c6fd2807SJeff Garzik 		dev->flags |= ATA_DFLAG_DETACHED;
1249c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
1250c6fd2807SJeff Garzik 	}
1251c6fd2807SJeff Garzik 
125290484ebfSTejun Heo 	/* clear per-dev EH info */
1253f58229f8STejun Heo 	ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK);
1254f58229f8STejun Heo 	ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK);
125590484ebfSTejun Heo 	ehc->saved_xfer_mode[dev->devno] = 0;
125690484ebfSTejun Heo 	ehc->saved_ncq_enabled &= ~(1 << dev->devno);
1257c6fd2807SJeff Garzik 
1258c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1259c6fd2807SJeff Garzik }
1260c6fd2807SJeff Garzik 
1261c6fd2807SJeff Garzik /**
1262c6fd2807SJeff Garzik  *	ata_eh_about_to_do - about to perform eh_action
1263955e57dfSTejun Heo  *	@link: target ATA link
1264c6fd2807SJeff Garzik  *	@dev: target ATA dev for per-dev action (can be NULL)
1265c6fd2807SJeff Garzik  *	@action: action about to be performed
1266c6fd2807SJeff Garzik  *
1267c6fd2807SJeff Garzik  *	Called just before performing EH actions to clear related bits
1268955e57dfSTejun Heo  *	in @link->eh_info such that eh actions are not unnecessarily
1269955e57dfSTejun Heo  *	repeated.
1270c6fd2807SJeff Garzik  *
1271c6fd2807SJeff Garzik  *	LOCKING:
1272c6fd2807SJeff Garzik  *	None.
1273c6fd2807SJeff Garzik  */
1274fb7fd614STejun Heo void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
1275c6fd2807SJeff Garzik 			unsigned int action)
1276c6fd2807SJeff Garzik {
1277955e57dfSTejun Heo 	struct ata_port *ap = link->ap;
1278955e57dfSTejun Heo 	struct ata_eh_info *ehi = &link->eh_info;
1279955e57dfSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
1280c6fd2807SJeff Garzik 	unsigned long flags;
1281c6fd2807SJeff Garzik 
1282c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1283c6fd2807SJeff Garzik 
1284955e57dfSTejun Heo 	ata_eh_clear_action(link, dev, ehi, action);
1285c6fd2807SJeff Garzik 
1286a568d1d2STejun Heo 	/* About to take EH action, set RECOVERED.  Ignore actions on
1287a568d1d2STejun Heo 	 * slave links as master will do them again.
1288a568d1d2STejun Heo 	 */
1289a568d1d2STejun Heo 	if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link)
1290c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_RECOVERED;
1291c6fd2807SJeff Garzik 
1292c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1293c6fd2807SJeff Garzik }
1294c6fd2807SJeff Garzik 
1295c6fd2807SJeff Garzik /**
1296c6fd2807SJeff Garzik  *	ata_eh_done - EH action complete
1297c6fd2807SJeff Garzik *	@ap: target ATA port
1298c6fd2807SJeff Garzik  *	@dev: target ATA dev for per-dev action (can be NULL)
1299c6fd2807SJeff Garzik  *	@action: action just completed
1300c6fd2807SJeff Garzik  *
1301c6fd2807SJeff Garzik  *	Called right after performing EH actions to clear related bits
1302955e57dfSTejun Heo  *	in @link->eh_context.
1303c6fd2807SJeff Garzik  *
1304c6fd2807SJeff Garzik  *	LOCKING:
1305c6fd2807SJeff Garzik  *	None.
1306c6fd2807SJeff Garzik  */
1307fb7fd614STejun Heo void ata_eh_done(struct ata_link *link, struct ata_device *dev,
1308c6fd2807SJeff Garzik 		 unsigned int action)
1309c6fd2807SJeff Garzik {
1310955e57dfSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
13119af5c9c9STejun Heo 
1312955e57dfSTejun Heo 	ata_eh_clear_action(link, dev, &ehc->i, action);
1313c6fd2807SJeff Garzik }
1314c6fd2807SJeff Garzik 
1315c6fd2807SJeff Garzik /**
1316c6fd2807SJeff Garzik  *	ata_err_string - convert err_mask to descriptive string
1317c6fd2807SJeff Garzik  *	@err_mask: error mask to convert to string
1318c6fd2807SJeff Garzik  *
1319c6fd2807SJeff Garzik  *	Convert @err_mask to descriptive string.  Errors are
1320c6fd2807SJeff Garzik  *	prioritized according to severity and only the most severe
1321c6fd2807SJeff Garzik  *	error is reported.
1322c6fd2807SJeff Garzik  *
1323c6fd2807SJeff Garzik  *	LOCKING:
1324c6fd2807SJeff Garzik  *	None.
1325c6fd2807SJeff Garzik  *
1326c6fd2807SJeff Garzik  *	RETURNS:
1327c6fd2807SJeff Garzik  *	Descriptive string for @err_mask
1328c6fd2807SJeff Garzik  */
1329c6fd2807SJeff Garzik static const char *ata_err_string(unsigned int err_mask)
1330c6fd2807SJeff Garzik {
1331c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_HOST_BUS)
1332c6fd2807SJeff Garzik 		return "host bus error";
1333c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_ATA_BUS)
1334c6fd2807SJeff Garzik 		return "ATA bus error";
1335c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_TIMEOUT)
1336c6fd2807SJeff Garzik 		return "timeout";
1337c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_HSM)
1338c6fd2807SJeff Garzik 		return "HSM violation";
1339c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_SYSTEM)
1340c6fd2807SJeff Garzik 		return "internal error";
1341c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_MEDIA)
1342c6fd2807SJeff Garzik 		return "media error";
1343c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_INVALID)
1344c6fd2807SJeff Garzik 		return "invalid argument";
1345c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_DEV)
1346c6fd2807SJeff Garzik 		return "device error";
1347c6fd2807SJeff Garzik 	return "unknown error";
1348c6fd2807SJeff Garzik }
1349c6fd2807SJeff Garzik 
1350c6fd2807SJeff Garzik /**
1351c6fd2807SJeff Garzik  *	ata_read_log_page - read a specific log page
1352c6fd2807SJeff Garzik  *	@dev: target device
1353c6fd2807SJeff Garzik  *	@page: page to read
1354c6fd2807SJeff Garzik  *	@buf: buffer to store read page
1355c6fd2807SJeff Garzik  *	@sectors: number of sectors to read
1356c6fd2807SJeff Garzik  *
1357c6fd2807SJeff Garzik  *	Read log page using READ_LOG_EXT command.
1358c6fd2807SJeff Garzik  *
1359c6fd2807SJeff Garzik  *	LOCKING:
1360c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1361c6fd2807SJeff Garzik  *
1362c6fd2807SJeff Garzik  *	RETURNS:
1363c6fd2807SJeff Garzik  *	0 on success, AC_ERR_* mask otherwise.
1364c6fd2807SJeff Garzik  */
1365c6fd2807SJeff Garzik static unsigned int ata_read_log_page(struct ata_device *dev,
1366c6fd2807SJeff Garzik 				      u8 page, void *buf, unsigned int sectors)
1367c6fd2807SJeff Garzik {
1368c6fd2807SJeff Garzik 	struct ata_taskfile tf;
1369c6fd2807SJeff Garzik 	unsigned int err_mask;
1370c6fd2807SJeff Garzik 
1371c6fd2807SJeff Garzik 	DPRINTK("read log page - page %d\n", page);
1372c6fd2807SJeff Garzik 
1373c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
1374c6fd2807SJeff Garzik 	tf.command = ATA_CMD_READ_LOG_EXT;
1375c6fd2807SJeff Garzik 	tf.lbal = page;
1376c6fd2807SJeff Garzik 	tf.nsect = sectors;
1377c6fd2807SJeff Garzik 	tf.hob_nsect = sectors >> 8;
1378c6fd2807SJeff Garzik 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
1379c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_PIO;
1380c6fd2807SJeff Garzik 
1381c6fd2807SJeff Garzik 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
13822b789108STejun Heo 				     buf, sectors * ATA_SECT_SIZE, 0);
1383c6fd2807SJeff Garzik 
1384c6fd2807SJeff Garzik 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
1385c6fd2807SJeff Garzik 	return err_mask;
1386c6fd2807SJeff Garzik }
1387c6fd2807SJeff Garzik 
1388c6fd2807SJeff Garzik /**
1389c6fd2807SJeff Garzik  *	ata_eh_read_log_10h - Read log page 10h for NCQ error details
1390c6fd2807SJeff Garzik  *	@dev: Device to read log page 10h from
1391c6fd2807SJeff Garzik  *	@tag: Resulting tag of the failed command
1392c6fd2807SJeff Garzik  *	@tf: Resulting taskfile registers of the failed command
1393c6fd2807SJeff Garzik  *
1394c6fd2807SJeff Garzik  *	Read log page 10h to obtain NCQ error details and clear error
1395c6fd2807SJeff Garzik  *	condition.
1396c6fd2807SJeff Garzik  *
1397c6fd2807SJeff Garzik  *	LOCKING:
1398c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1399c6fd2807SJeff Garzik  *
1400c6fd2807SJeff Garzik  *	RETURNS:
1401c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
1402c6fd2807SJeff Garzik  */
1403c6fd2807SJeff Garzik static int ata_eh_read_log_10h(struct ata_device *dev,
1404c6fd2807SJeff Garzik 			       int *tag, struct ata_taskfile *tf)
1405c6fd2807SJeff Garzik {
14069af5c9c9STejun Heo 	u8 *buf = dev->link->ap->sector_buf;
1407c6fd2807SJeff Garzik 	unsigned int err_mask;
1408c6fd2807SJeff Garzik 	u8 csum;
1409c6fd2807SJeff Garzik 	int i;
1410c6fd2807SJeff Garzik 
1411c6fd2807SJeff Garzik 	err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1);
1412c6fd2807SJeff Garzik 	if (err_mask)
1413c6fd2807SJeff Garzik 		return -EIO;
1414c6fd2807SJeff Garzik 
1415c6fd2807SJeff Garzik 	csum = 0;
1416c6fd2807SJeff Garzik 	for (i = 0; i < ATA_SECT_SIZE; i++)
1417c6fd2807SJeff Garzik 		csum += buf[i];
1418c6fd2807SJeff Garzik 	if (csum)
1419c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_WARNING,
1420c6fd2807SJeff Garzik 			       "invalid checksum 0x%x on log page 10h\n", csum);
1421c6fd2807SJeff Garzik 
1422c6fd2807SJeff Garzik 	if (buf[0] & 0x80)
1423c6fd2807SJeff Garzik 		return -ENOENT;
1424c6fd2807SJeff Garzik 
1425c6fd2807SJeff Garzik 	*tag = buf[0] & 0x1f;
1426c6fd2807SJeff Garzik 
1427c6fd2807SJeff Garzik 	tf->command = buf[2];
1428c6fd2807SJeff Garzik 	tf->feature = buf[3];
1429c6fd2807SJeff Garzik 	tf->lbal = buf[4];
1430c6fd2807SJeff Garzik 	tf->lbam = buf[5];
1431c6fd2807SJeff Garzik 	tf->lbah = buf[6];
1432c6fd2807SJeff Garzik 	tf->device = buf[7];
1433c6fd2807SJeff Garzik 	tf->hob_lbal = buf[8];
1434c6fd2807SJeff Garzik 	tf->hob_lbam = buf[9];
1435c6fd2807SJeff Garzik 	tf->hob_lbah = buf[10];
1436c6fd2807SJeff Garzik 	tf->nsect = buf[12];
1437c6fd2807SJeff Garzik 	tf->hob_nsect = buf[13];
1438c6fd2807SJeff Garzik 
1439c6fd2807SJeff Garzik 	return 0;
1440c6fd2807SJeff Garzik }
1441c6fd2807SJeff Garzik 
1442c6fd2807SJeff Garzik /**
144311fc33daSTejun Heo  *	atapi_eh_tur - perform ATAPI TEST_UNIT_READY
144411fc33daSTejun Heo  *	@dev: target ATAPI device
144511fc33daSTejun Heo  *	@r_sense_key: out parameter for sense_key
144611fc33daSTejun Heo  *
144711fc33daSTejun Heo  *	Perform ATAPI TEST_UNIT_READY.
144811fc33daSTejun Heo  *
144911fc33daSTejun Heo  *	LOCKING:
145011fc33daSTejun Heo  *	EH context (may sleep).
145111fc33daSTejun Heo  *
145211fc33daSTejun Heo  *	RETURNS:
145311fc33daSTejun Heo  *	0 on success, AC_ERR_* mask on failure.
145411fc33daSTejun Heo  */
145511fc33daSTejun Heo static unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
145611fc33daSTejun Heo {
145711fc33daSTejun Heo 	u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 };
145811fc33daSTejun Heo 	struct ata_taskfile tf;
145911fc33daSTejun Heo 	unsigned int err_mask;
146011fc33daSTejun Heo 
146111fc33daSTejun Heo 	ata_tf_init(dev, &tf);
146211fc33daSTejun Heo 
146311fc33daSTejun Heo 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
146411fc33daSTejun Heo 	tf.command = ATA_CMD_PACKET;
146511fc33daSTejun Heo 	tf.protocol = ATAPI_PROT_NODATA;
146611fc33daSTejun Heo 
146711fc33daSTejun Heo 	err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
146811fc33daSTejun Heo 	if (err_mask == AC_ERR_DEV)
146911fc33daSTejun Heo 		*r_sense_key = tf.feature >> 4;
147011fc33daSTejun Heo 	return err_mask;
147111fc33daSTejun Heo }
147211fc33daSTejun Heo 
147311fc33daSTejun Heo /**
1474c6fd2807SJeff Garzik  *	atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1475c6fd2807SJeff Garzik  *	@dev: device to perform REQUEST_SENSE to
1476c6fd2807SJeff Garzik  *	@sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
14773eabddb8STejun Heo  *	@dfl_sense_key: default sense key to use
1478c6fd2807SJeff Garzik  *
1479c6fd2807SJeff Garzik  *	Perform ATAPI REQUEST_SENSE after the device reported CHECK
1480c6fd2807SJeff Garzik  *	SENSE.  This function is EH helper.
1481c6fd2807SJeff Garzik  *
1482c6fd2807SJeff Garzik  *	LOCKING:
1483c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1484c6fd2807SJeff Garzik  *
1485c6fd2807SJeff Garzik  *	RETURNS:
1486c6fd2807SJeff Garzik  *	0 on success, AC_ERR_* mask on failure
1487c6fd2807SJeff Garzik  */
14883eabddb8STejun Heo static unsigned int atapi_eh_request_sense(struct ata_device *dev,
14893eabddb8STejun Heo 					   u8 *sense_buf, u8 dfl_sense_key)
1490c6fd2807SJeff Garzik {
14913eabddb8STejun Heo 	u8 cdb[ATAPI_CDB_LEN] =
14923eabddb8STejun Heo 		{ REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 };
14939af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
1494c6fd2807SJeff Garzik 	struct ata_taskfile tf;
1495c6fd2807SJeff Garzik 
1496c6fd2807SJeff Garzik 	DPRINTK("ATAPI request sense\n");
1497c6fd2807SJeff Garzik 
1498c6fd2807SJeff Garzik 	/* FIXME: is this needed? */
1499c6fd2807SJeff Garzik 	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
1500c6fd2807SJeff Garzik 
150156287768SAlbert Lee 	/* initialize sense_buf with the error register,
150256287768SAlbert Lee 	 * for the case where they are -not- overwritten
150356287768SAlbert Lee 	 */
1504c6fd2807SJeff Garzik 	sense_buf[0] = 0x70;
15053eabddb8STejun Heo 	sense_buf[2] = dfl_sense_key;
150656287768SAlbert Lee 
150756287768SAlbert Lee 	/* some devices time out if garbage left in tf */
150856287768SAlbert Lee 	ata_tf_init(dev, &tf);
1509c6fd2807SJeff Garzik 
1510c6fd2807SJeff Garzik 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1511c6fd2807SJeff Garzik 	tf.command = ATA_CMD_PACKET;
1512c6fd2807SJeff Garzik 
1513c6fd2807SJeff Garzik 	/* is it pointless to prefer PIO for "safety reasons"? */
1514c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_PIO_DMA) {
15150dc36888STejun Heo 		tf.protocol = ATAPI_PROT_DMA;
1516c6fd2807SJeff Garzik 		tf.feature |= ATAPI_PKT_DMA;
1517c6fd2807SJeff Garzik 	} else {
15180dc36888STejun Heo 		tf.protocol = ATAPI_PROT_PIO;
1519f2dfc1a1STejun Heo 		tf.lbam = SCSI_SENSE_BUFFERSIZE;
1520f2dfc1a1STejun Heo 		tf.lbah = 0;
1521c6fd2807SJeff Garzik 	}
1522c6fd2807SJeff Garzik 
1523c6fd2807SJeff Garzik 	return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
15242b789108STejun Heo 				 sense_buf, SCSI_SENSE_BUFFERSIZE, 0);
1525c6fd2807SJeff Garzik }
1526c6fd2807SJeff Garzik 
1527c6fd2807SJeff Garzik /**
1528c6fd2807SJeff Garzik  *	ata_eh_analyze_serror - analyze SError for a failed port
15290260731fSTejun Heo  *	@link: ATA link to analyze SError for
1530c6fd2807SJeff Garzik  *
1531c6fd2807SJeff Garzik  *	Analyze SError if available and further determine cause of
1532c6fd2807SJeff Garzik  *	failure.
1533c6fd2807SJeff Garzik  *
1534c6fd2807SJeff Garzik  *	LOCKING:
1535c6fd2807SJeff Garzik  *	None.
1536c6fd2807SJeff Garzik  */
15370260731fSTejun Heo static void ata_eh_analyze_serror(struct ata_link *link)
1538c6fd2807SJeff Garzik {
15390260731fSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
1540c6fd2807SJeff Garzik 	u32 serror = ehc->i.serror;
1541c6fd2807SJeff Garzik 	unsigned int err_mask = 0, action = 0;
1542f9df58cbSTejun Heo 	u32 hotplug_mask;
1543c6fd2807SJeff Garzik 
1544e0614db2STejun Heo 	if (serror & (SERR_PERSISTENT | SERR_DATA)) {
1545c6fd2807SJeff Garzik 		err_mask |= AC_ERR_ATA_BUS;
1546cf480626STejun Heo 		action |= ATA_EH_RESET;
1547c6fd2807SJeff Garzik 	}
1548c6fd2807SJeff Garzik 	if (serror & SERR_PROTOCOL) {
1549c6fd2807SJeff Garzik 		err_mask |= AC_ERR_HSM;
1550cf480626STejun Heo 		action |= ATA_EH_RESET;
1551c6fd2807SJeff Garzik 	}
1552c6fd2807SJeff Garzik 	if (serror & SERR_INTERNAL) {
1553c6fd2807SJeff Garzik 		err_mask |= AC_ERR_SYSTEM;
1554cf480626STejun Heo 		action |= ATA_EH_RESET;
1555c6fd2807SJeff Garzik 	}
1556f9df58cbSTejun Heo 
1557f9df58cbSTejun Heo 	/* Determine whether a hotplug event has occurred.  Both
1558f9df58cbSTejun Heo 	 * SError.N/X are considered hotplug events for enabled or
1559f9df58cbSTejun Heo 	 * host links.  For disabled PMP links, only N bit is
1560f9df58cbSTejun Heo 	 * considered as X bit is left at 1 for link plugging.
1561f9df58cbSTejun Heo 	 */
1562f9df58cbSTejun Heo 	hotplug_mask = 0;
1563f9df58cbSTejun Heo 
1564f9df58cbSTejun Heo 	if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
1565f9df58cbSTejun Heo 		hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG;
1566f9df58cbSTejun Heo 	else
1567f9df58cbSTejun Heo 		hotplug_mask = SERR_PHYRDY_CHG;
1568f9df58cbSTejun Heo 
1569f9df58cbSTejun Heo 	if (serror & hotplug_mask)
1570c6fd2807SJeff Garzik 		ata_ehi_hotplugged(&ehc->i);
1571c6fd2807SJeff Garzik 
1572c6fd2807SJeff Garzik 	ehc->i.err_mask |= err_mask;
1573c6fd2807SJeff Garzik 	ehc->i.action |= action;
1574c6fd2807SJeff Garzik }
1575c6fd2807SJeff Garzik 
1576c6fd2807SJeff Garzik /**
1577c6fd2807SJeff Garzik  *	ata_eh_analyze_ncq_error - analyze NCQ error
15780260731fSTejun Heo  *	@link: ATA link to analyze NCQ error for
1579c6fd2807SJeff Garzik  *
1580c6fd2807SJeff Garzik  *	Read log page 10h, determine the offending qc and acquire
1581c6fd2807SJeff Garzik  *	error status TF.  For NCQ device errors, all LLDDs have to do
1582c6fd2807SJeff Garzik  *	is setting AC_ERR_DEV in ehi->err_mask.  This function takes
1583c6fd2807SJeff Garzik  *	care of the rest.
1584c6fd2807SJeff Garzik  *
1585c6fd2807SJeff Garzik  *	LOCKING:
1586c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1587c6fd2807SJeff Garzik  */
158810acf3b0SMark Lord void ata_eh_analyze_ncq_error(struct ata_link *link)
1589c6fd2807SJeff Garzik {
15900260731fSTejun Heo 	struct ata_port *ap = link->ap;
15910260731fSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
15920260731fSTejun Heo 	struct ata_device *dev = link->device;
1593c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc;
1594c6fd2807SJeff Garzik 	struct ata_taskfile tf;
1595c6fd2807SJeff Garzik 	int tag, rc;
1596c6fd2807SJeff Garzik 
1597c6fd2807SJeff Garzik 	/* if frozen, we can't do much */
1598c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_FROZEN)
1599c6fd2807SJeff Garzik 		return;
1600c6fd2807SJeff Garzik 
1601c6fd2807SJeff Garzik 	/* is it NCQ device error? */
16020260731fSTejun Heo 	if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
1603c6fd2807SJeff Garzik 		return;
1604c6fd2807SJeff Garzik 
1605c6fd2807SJeff Garzik 	/* has LLDD analyzed already? */
1606c6fd2807SJeff Garzik 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1607c6fd2807SJeff Garzik 		qc = __ata_qc_from_tag(ap, tag);
1608c6fd2807SJeff Garzik 
1609c6fd2807SJeff Garzik 		if (!(qc->flags & ATA_QCFLAG_FAILED))
1610c6fd2807SJeff Garzik 			continue;
1611c6fd2807SJeff Garzik 
1612c6fd2807SJeff Garzik 		if (qc->err_mask)
1613c6fd2807SJeff Garzik 			return;
1614c6fd2807SJeff Garzik 	}
1615c6fd2807SJeff Garzik 
1616c6fd2807SJeff Garzik 	/* okay, this error is ours */
1617c6fd2807SJeff Garzik 	rc = ata_eh_read_log_10h(dev, &tag, &tf);
1618c6fd2807SJeff Garzik 	if (rc) {
16190260731fSTejun Heo 		ata_link_printk(link, KERN_ERR, "failed to read log page 10h "
1620c6fd2807SJeff Garzik 				"(errno=%d)\n", rc);
1621c6fd2807SJeff Garzik 		return;
1622c6fd2807SJeff Garzik 	}
1623c6fd2807SJeff Garzik 
16240260731fSTejun Heo 	if (!(link->sactive & (1 << tag))) {
16250260731fSTejun Heo 		ata_link_printk(link, KERN_ERR, "log page 10h reported "
1626c6fd2807SJeff Garzik 				"inactive tag %d\n", tag);
1627c6fd2807SJeff Garzik 		return;
1628c6fd2807SJeff Garzik 	}
1629c6fd2807SJeff Garzik 
1630c6fd2807SJeff Garzik 	/* we've got the perpetrator, condemn it */
1631c6fd2807SJeff Garzik 	qc = __ata_qc_from_tag(ap, tag);
1632c6fd2807SJeff Garzik 	memcpy(&qc->result_tf, &tf, sizeof(tf));
1633a6116c9eSMark Lord 	qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
16345335b729STejun Heo 	qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
1635c6fd2807SJeff Garzik 	ehc->i.err_mask &= ~AC_ERR_DEV;
1636c6fd2807SJeff Garzik }
1637c6fd2807SJeff Garzik 
1638c6fd2807SJeff Garzik /**
1639c6fd2807SJeff Garzik  *	ata_eh_analyze_tf - analyze taskfile of a failed qc
1640c6fd2807SJeff Garzik  *	@qc: qc to analyze
1641c6fd2807SJeff Garzik  *	@tf: Taskfile registers to analyze
1642c6fd2807SJeff Garzik  *
1643c6fd2807SJeff Garzik  *	Analyze taskfile of @qc and further determine cause of
1644c6fd2807SJeff Garzik  *	failure.  This function also requests ATAPI sense data if
1645c6fd2807SJeff Garzik  *	avaliable.
1646c6fd2807SJeff Garzik  *
1647c6fd2807SJeff Garzik  *	LOCKING:
1648c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1649c6fd2807SJeff Garzik  *
1650c6fd2807SJeff Garzik  *	RETURNS:
1651c6fd2807SJeff Garzik  *	Determined recovery action
1652c6fd2807SJeff Garzik  */
1653c6fd2807SJeff Garzik static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1654c6fd2807SJeff Garzik 				      const struct ata_taskfile *tf)
1655c6fd2807SJeff Garzik {
1656c6fd2807SJeff Garzik 	unsigned int tmp, action = 0;
1657c6fd2807SJeff Garzik 	u8 stat = tf->command, err = tf->feature;
1658c6fd2807SJeff Garzik 
1659c6fd2807SJeff Garzik 	if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1660c6fd2807SJeff Garzik 		qc->err_mask |= AC_ERR_HSM;
1661cf480626STejun Heo 		return ATA_EH_RESET;
1662c6fd2807SJeff Garzik 	}
1663c6fd2807SJeff Garzik 
1664a51d644aSTejun Heo 	if (stat & (ATA_ERR | ATA_DF))
1665a51d644aSTejun Heo 		qc->err_mask |= AC_ERR_DEV;
1666a51d644aSTejun Heo 	else
1667c6fd2807SJeff Garzik 		return 0;
1668c6fd2807SJeff Garzik 
1669c6fd2807SJeff Garzik 	switch (qc->dev->class) {
1670c6fd2807SJeff Garzik 	case ATA_DEV_ATA:
1671c6fd2807SJeff Garzik 		if (err & ATA_ICRC)
1672c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_ATA_BUS;
1673c6fd2807SJeff Garzik 		if (err & ATA_UNC)
1674c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_MEDIA;
1675c6fd2807SJeff Garzik 		if (err & ATA_IDNF)
1676c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_INVALID;
1677c6fd2807SJeff Garzik 		break;
1678c6fd2807SJeff Garzik 
1679c6fd2807SJeff Garzik 	case ATA_DEV_ATAPI:
1680a569a30dSTejun Heo 		if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
16813eabddb8STejun Heo 			tmp = atapi_eh_request_sense(qc->dev,
16823eabddb8STejun Heo 						qc->scsicmd->sense_buffer,
16833eabddb8STejun Heo 						qc->result_tf.feature >> 4);
1684c6fd2807SJeff Garzik 			if (!tmp) {
1685a569a30dSTejun Heo 				/* ATA_QCFLAG_SENSE_VALID is used to
1686a569a30dSTejun Heo 				 * tell atapi_qc_complete() that sense
1687a569a30dSTejun Heo 				 * data is already valid.
1688c6fd2807SJeff Garzik 				 *
1689c6fd2807SJeff Garzik 				 * TODO: interpret sense data and set
1690c6fd2807SJeff Garzik 				 * appropriate err_mask.
1691c6fd2807SJeff Garzik 				 */
1692c6fd2807SJeff Garzik 				qc->flags |= ATA_QCFLAG_SENSE_VALID;
1693c6fd2807SJeff Garzik 			} else
1694c6fd2807SJeff Garzik 				qc->err_mask |= tmp;
1695c6fd2807SJeff Garzik 		}
1696a569a30dSTejun Heo 	}
1697c6fd2807SJeff Garzik 
1698c6fd2807SJeff Garzik 	if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
1699cf480626STejun Heo 		action |= ATA_EH_RESET;
1700c6fd2807SJeff Garzik 
1701c6fd2807SJeff Garzik 	return action;
1702c6fd2807SJeff Garzik }
1703c6fd2807SJeff Garzik 
170476326ac1STejun Heo static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask,
170576326ac1STejun Heo 				   int *xfer_ok)
1706c6fd2807SJeff Garzik {
170776326ac1STejun Heo 	int base = 0;
170876326ac1STejun Heo 
170976326ac1STejun Heo 	if (!(eflags & ATA_EFLAG_DUBIOUS_XFER))
171076326ac1STejun Heo 		*xfer_ok = 1;
171176326ac1STejun Heo 
171276326ac1STejun Heo 	if (!*xfer_ok)
171375f9cafcSTejun Heo 		base = ATA_ECAT_DUBIOUS_NONE;
171476326ac1STejun Heo 
17157d47e8d4STejun Heo 	if (err_mask & AC_ERR_ATA_BUS)
171676326ac1STejun Heo 		return base + ATA_ECAT_ATA_BUS;
1717c6fd2807SJeff Garzik 
17187d47e8d4STejun Heo 	if (err_mask & AC_ERR_TIMEOUT)
171976326ac1STejun Heo 		return base + ATA_ECAT_TOUT_HSM;
17207d47e8d4STejun Heo 
17213884f7b0STejun Heo 	if (eflags & ATA_EFLAG_IS_IO) {
17227d47e8d4STejun Heo 		if (err_mask & AC_ERR_HSM)
172376326ac1STejun Heo 			return base + ATA_ECAT_TOUT_HSM;
17247d47e8d4STejun Heo 		if ((err_mask &
17257d47e8d4STejun Heo 		     (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
172676326ac1STejun Heo 			return base + ATA_ECAT_UNK_DEV;
1727c6fd2807SJeff Garzik 	}
1728c6fd2807SJeff Garzik 
1729c6fd2807SJeff Garzik 	return 0;
1730c6fd2807SJeff Garzik }
1731c6fd2807SJeff Garzik 
17327d47e8d4STejun Heo struct speed_down_verdict_arg {
1733c6fd2807SJeff Garzik 	u64 since;
173476326ac1STejun Heo 	int xfer_ok;
17353884f7b0STejun Heo 	int nr_errors[ATA_ECAT_NR];
1736c6fd2807SJeff Garzik };
1737c6fd2807SJeff Garzik 
17387d47e8d4STejun Heo static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
1739c6fd2807SJeff Garzik {
17407d47e8d4STejun Heo 	struct speed_down_verdict_arg *arg = void_arg;
174176326ac1STejun Heo 	int cat;
1742c6fd2807SJeff Garzik 
1743c6fd2807SJeff Garzik 	if (ent->timestamp < arg->since)
1744c6fd2807SJeff Garzik 		return -1;
1745c6fd2807SJeff Garzik 
174676326ac1STejun Heo 	cat = ata_eh_categorize_error(ent->eflags, ent->err_mask,
174776326ac1STejun Heo 				      &arg->xfer_ok);
17487d47e8d4STejun Heo 	arg->nr_errors[cat]++;
174976326ac1STejun Heo 
1750c6fd2807SJeff Garzik 	return 0;
1751c6fd2807SJeff Garzik }
1752c6fd2807SJeff Garzik 
1753c6fd2807SJeff Garzik /**
17547d47e8d4STejun Heo  *	ata_eh_speed_down_verdict - Determine speed down verdict
1755c6fd2807SJeff Garzik  *	@dev: Device of interest
1756c6fd2807SJeff Garzik  *
1757c6fd2807SJeff Garzik  *	This function examines error ring of @dev and determines
17587d47e8d4STejun Heo  *	whether NCQ needs to be turned off, transfer speed should be
17597d47e8d4STejun Heo  *	stepped down, or falling back to PIO is necessary.
1760c6fd2807SJeff Garzik  *
17613884f7b0STejun Heo  *	ECAT_ATA_BUS	: ATA_BUS error for any command
1762c6fd2807SJeff Garzik  *
17633884f7b0STejun Heo  *	ECAT_TOUT_HSM	: TIMEOUT for any command or HSM violation for
17643884f7b0STejun Heo  *			  IO commands
17657d47e8d4STejun Heo  *
17663884f7b0STejun Heo  *	ECAT_UNK_DEV	: Unknown DEV error for IO commands
1767c6fd2807SJeff Garzik  *
176876326ac1STejun Heo  *	ECAT_DUBIOUS_*	: Identical to above three but occurred while
176976326ac1STejun Heo  *			  data transfer hasn't been verified.
177076326ac1STejun Heo  *
17713884f7b0STejun Heo  *	Verdicts are
17727d47e8d4STejun Heo  *
17733884f7b0STejun Heo  *	NCQ_OFF		: Turn off NCQ.
17747d47e8d4STejun Heo  *
17753884f7b0STejun Heo  *	SPEED_DOWN	: Speed down transfer speed but don't fall back
17763884f7b0STejun Heo  *			  to PIO.
17773884f7b0STejun Heo  *
17783884f7b0STejun Heo  *	FALLBACK_TO_PIO	: Fall back to PIO.
17793884f7b0STejun Heo  *
17803884f7b0STejun Heo  *	Even if multiple verdicts are returned, only one action is
178176326ac1STejun Heo  *	taken per error.  An action triggered by non-DUBIOUS errors
178276326ac1STejun Heo  *	clears ering, while one triggered by DUBIOUS_* errors doesn't.
178376326ac1STejun Heo  *	This is to expedite speed down decisions right after device is
178476326ac1STejun Heo  *	initially configured.
17853884f7b0STejun Heo  *
178676326ac1STejun Heo  *	The followings are speed down rules.  #1 and #2 deal with
178776326ac1STejun Heo  *	DUBIOUS errors.
178876326ac1STejun Heo  *
178976326ac1STejun Heo  *	1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
179076326ac1STejun Heo  *	   occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO.
179176326ac1STejun Heo  *
179276326ac1STejun Heo  *	2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
179376326ac1STejun Heo  *	   occurred during last 5 mins, NCQ_OFF.
179476326ac1STejun Heo  *
179576326ac1STejun Heo  *	3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
17963884f7b0STejun Heo  *	   ocurred during last 5 mins, FALLBACK_TO_PIO
17973884f7b0STejun Heo  *
179876326ac1STejun Heo  *	4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
17993884f7b0STejun Heo  *	   during last 10 mins, NCQ_OFF.
18003884f7b0STejun Heo  *
180176326ac1STejun Heo  *	5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
18023884f7b0STejun Heo  *	   UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
18037d47e8d4STejun Heo  *
1804c6fd2807SJeff Garzik  *	LOCKING:
1805c6fd2807SJeff Garzik  *	Inherited from caller.
1806c6fd2807SJeff Garzik  *
1807c6fd2807SJeff Garzik  *	RETURNS:
18087d47e8d4STejun Heo  *	OR of ATA_EH_SPDN_* flags.
1809c6fd2807SJeff Garzik  */
18107d47e8d4STejun Heo static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
1811c6fd2807SJeff Garzik {
18127d47e8d4STejun Heo 	const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ;
18137d47e8d4STejun Heo 	u64 j64 = get_jiffies_64();
18147d47e8d4STejun Heo 	struct speed_down_verdict_arg arg;
18157d47e8d4STejun Heo 	unsigned int verdict = 0;
1816c6fd2807SJeff Garzik 
18173884f7b0STejun Heo 	/* scan past 5 mins of error history */
18183884f7b0STejun Heo 	memset(&arg, 0, sizeof(arg));
18193884f7b0STejun Heo 	arg.since = j64 - min(j64, j5mins);
18203884f7b0STejun Heo 	ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
18213884f7b0STejun Heo 
182276326ac1STejun Heo 	if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] +
182376326ac1STejun Heo 	    arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1)
182476326ac1STejun Heo 		verdict |= ATA_EH_SPDN_SPEED_DOWN |
182576326ac1STejun Heo 			ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS;
182676326ac1STejun Heo 
182776326ac1STejun Heo 	if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] +
182876326ac1STejun Heo 	    arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1)
182976326ac1STejun Heo 		verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS;
183076326ac1STejun Heo 
18313884f7b0STejun Heo 	if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
18323884f7b0STejun Heo 	    arg.nr_errors[ATA_ECAT_TOUT_HSM] +
1833663f99b8STejun Heo 	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
18343884f7b0STejun Heo 		verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
18353884f7b0STejun Heo 
18367d47e8d4STejun Heo 	/* scan past 10 mins of error history */
1837c6fd2807SJeff Garzik 	memset(&arg, 0, sizeof(arg));
18387d47e8d4STejun Heo 	arg.since = j64 - min(j64, j10mins);
18397d47e8d4STejun Heo 	ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
1840c6fd2807SJeff Garzik 
18413884f7b0STejun Heo 	if (arg.nr_errors[ATA_ECAT_TOUT_HSM] +
18423884f7b0STejun Heo 	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 3)
18437d47e8d4STejun Heo 		verdict |= ATA_EH_SPDN_NCQ_OFF;
18443884f7b0STejun Heo 
18453884f7b0STejun Heo 	if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
18463884f7b0STejun Heo 	    arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 ||
1847663f99b8STejun Heo 	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
18487d47e8d4STejun Heo 		verdict |= ATA_EH_SPDN_SPEED_DOWN;
1849c6fd2807SJeff Garzik 
18507d47e8d4STejun Heo 	return verdict;
1851c6fd2807SJeff Garzik }
1852c6fd2807SJeff Garzik 
1853c6fd2807SJeff Garzik /**
1854c6fd2807SJeff Garzik  *	ata_eh_speed_down - record error and speed down if necessary
1855c6fd2807SJeff Garzik  *	@dev: Failed device
18563884f7b0STejun Heo  *	@eflags: mask of ATA_EFLAG_* flags
1857c6fd2807SJeff Garzik  *	@err_mask: err_mask of the error
1858c6fd2807SJeff Garzik  *
1859c6fd2807SJeff Garzik  *	Record error and examine error history to determine whether
1860c6fd2807SJeff Garzik  *	adjusting transmission speed is necessary.  It also sets
1861c6fd2807SJeff Garzik  *	transmission limits appropriately if such adjustment is
1862c6fd2807SJeff Garzik  *	necessary.
1863c6fd2807SJeff Garzik  *
1864c6fd2807SJeff Garzik  *	LOCKING:
1865c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1866c6fd2807SJeff Garzik  *
1867c6fd2807SJeff Garzik  *	RETURNS:
18687d47e8d4STejun Heo  *	Determined recovery action.
1869c6fd2807SJeff Garzik  */
18703884f7b0STejun Heo static unsigned int ata_eh_speed_down(struct ata_device *dev,
18713884f7b0STejun Heo 				unsigned int eflags, unsigned int err_mask)
1872c6fd2807SJeff Garzik {
1873b1c72916STejun Heo 	struct ata_link *link = ata_dev_phys_link(dev);
187476326ac1STejun Heo 	int xfer_ok = 0;
18757d47e8d4STejun Heo 	unsigned int verdict;
18767d47e8d4STejun Heo 	unsigned int action = 0;
18777d47e8d4STejun Heo 
18787d47e8d4STejun Heo 	/* don't bother if Cat-0 error */
187976326ac1STejun Heo 	if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0)
1880c6fd2807SJeff Garzik 		return 0;
1881c6fd2807SJeff Garzik 
1882c6fd2807SJeff Garzik 	/* record error and determine whether speed down is necessary */
18833884f7b0STejun Heo 	ata_ering_record(&dev->ering, eflags, err_mask);
18847d47e8d4STejun Heo 	verdict = ata_eh_speed_down_verdict(dev);
1885c6fd2807SJeff Garzik 
18867d47e8d4STejun Heo 	/* turn off NCQ? */
18877d47e8d4STejun Heo 	if ((verdict & ATA_EH_SPDN_NCQ_OFF) &&
18887d47e8d4STejun Heo 	    (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ |
18897d47e8d4STejun Heo 			   ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) {
18907d47e8d4STejun Heo 		dev->flags |= ATA_DFLAG_NCQ_OFF;
18917d47e8d4STejun Heo 		ata_dev_printk(dev, KERN_WARNING,
18927d47e8d4STejun Heo 			       "NCQ disabled due to excessive errors\n");
18937d47e8d4STejun Heo 		goto done;
18947d47e8d4STejun Heo 	}
1895c6fd2807SJeff Garzik 
18967d47e8d4STejun Heo 	/* speed down? */
18977d47e8d4STejun Heo 	if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
1898c6fd2807SJeff Garzik 		/* speed down SATA link speed if possible */
1899a07d499bSTejun Heo 		if (sata_down_spd_limit(link, 0) == 0) {
1900cf480626STejun Heo 			action |= ATA_EH_RESET;
19017d47e8d4STejun Heo 			goto done;
19027d47e8d4STejun Heo 		}
1903c6fd2807SJeff Garzik 
1904c6fd2807SJeff Garzik 		/* lower transfer mode */
19057d47e8d4STejun Heo 		if (dev->spdn_cnt < 2) {
19067d47e8d4STejun Heo 			static const int dma_dnxfer_sel[] =
19077d47e8d4STejun Heo 				{ ATA_DNXFER_DMA, ATA_DNXFER_40C };
19087d47e8d4STejun Heo 			static const int pio_dnxfer_sel[] =
19097d47e8d4STejun Heo 				{ ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 };
19107d47e8d4STejun Heo 			int sel;
1911c6fd2807SJeff Garzik 
19127d47e8d4STejun Heo 			if (dev->xfer_shift != ATA_SHIFT_PIO)
19137d47e8d4STejun Heo 				sel = dma_dnxfer_sel[dev->spdn_cnt];
19147d47e8d4STejun Heo 			else
19157d47e8d4STejun Heo 				sel = pio_dnxfer_sel[dev->spdn_cnt];
19167d47e8d4STejun Heo 
19177d47e8d4STejun Heo 			dev->spdn_cnt++;
19187d47e8d4STejun Heo 
19197d47e8d4STejun Heo 			if (ata_down_xfermask_limit(dev, sel) == 0) {
1920cf480626STejun Heo 				action |= ATA_EH_RESET;
19217d47e8d4STejun Heo 				goto done;
19227d47e8d4STejun Heo 			}
19237d47e8d4STejun Heo 		}
19247d47e8d4STejun Heo 	}
19257d47e8d4STejun Heo 
19267d47e8d4STejun Heo 	/* Fall back to PIO?  Slowing down to PIO is meaningless for
1927663f99b8STejun Heo 	 * SATA ATA devices.  Consider it only for PATA and SATAPI.
19287d47e8d4STejun Heo 	 */
19297d47e8d4STejun Heo 	if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
1930663f99b8STejun Heo 	    (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) &&
19317d47e8d4STejun Heo 	    (dev->xfer_shift != ATA_SHIFT_PIO)) {
19327d47e8d4STejun Heo 		if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
19337d47e8d4STejun Heo 			dev->spdn_cnt = 0;
1934cf480626STejun Heo 			action |= ATA_EH_RESET;
19357d47e8d4STejun Heo 			goto done;
19367d47e8d4STejun Heo 		}
19377d47e8d4STejun Heo 	}
19387d47e8d4STejun Heo 
1939c6fd2807SJeff Garzik 	return 0;
19407d47e8d4STejun Heo  done:
19417d47e8d4STejun Heo 	/* device has been slowed down, blow error history */
194276326ac1STejun Heo 	if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS))
19437d47e8d4STejun Heo 		ata_ering_clear(&dev->ering);
19447d47e8d4STejun Heo 	return action;
1945c6fd2807SJeff Garzik }
1946c6fd2807SJeff Garzik 
1947c6fd2807SJeff Garzik /**
19489b1e2658STejun Heo  *	ata_eh_link_autopsy - analyze error and determine recovery action
19499b1e2658STejun Heo  *	@link: host link to perform autopsy on
1950c6fd2807SJeff Garzik  *
19510260731fSTejun Heo  *	Analyze why @link failed and determine which recovery actions
19520260731fSTejun Heo  *	are needed.  This function also sets more detailed AC_ERR_*
19530260731fSTejun Heo  *	values and fills sense data for ATAPI CHECK SENSE.
1954c6fd2807SJeff Garzik  *
1955c6fd2807SJeff Garzik  *	LOCKING:
1956c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1957c6fd2807SJeff Garzik  */
19589b1e2658STejun Heo static void ata_eh_link_autopsy(struct ata_link *link)
1959c6fd2807SJeff Garzik {
19600260731fSTejun Heo 	struct ata_port *ap = link->ap;
1961936fd732STejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
1962dfcc173dSTejun Heo 	struct ata_device *dev;
19633884f7b0STejun Heo 	unsigned int all_err_mask = 0, eflags = 0;
19643884f7b0STejun Heo 	int tag;
1965c6fd2807SJeff Garzik 	u32 serror;
1966c6fd2807SJeff Garzik 	int rc;
1967c6fd2807SJeff Garzik 
1968c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
1969c6fd2807SJeff Garzik 
1970c6fd2807SJeff Garzik 	if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
1971c6fd2807SJeff Garzik 		return;
1972c6fd2807SJeff Garzik 
1973c6fd2807SJeff Garzik 	/* obtain and analyze SError */
1974936fd732STejun Heo 	rc = sata_scr_read(link, SCR_ERROR, &serror);
1975c6fd2807SJeff Garzik 	if (rc == 0) {
1976c6fd2807SJeff Garzik 		ehc->i.serror |= serror;
19770260731fSTejun Heo 		ata_eh_analyze_serror(link);
19784e57c517STejun Heo 	} else if (rc != -EOPNOTSUPP) {
1979cf480626STejun Heo 		/* SError read failed, force reset and probing */
1980b558edddSTejun Heo 		ehc->i.probe_mask |= ATA_ALL_DEVICES;
1981cf480626STejun Heo 		ehc->i.action |= ATA_EH_RESET;
19824e57c517STejun Heo 		ehc->i.err_mask |= AC_ERR_OTHER;
19834e57c517STejun Heo 	}
1984c6fd2807SJeff Garzik 
1985c6fd2807SJeff Garzik 	/* analyze NCQ failure */
19860260731fSTejun Heo 	ata_eh_analyze_ncq_error(link);
1987c6fd2807SJeff Garzik 
1988c6fd2807SJeff Garzik 	/* any real error trumps AC_ERR_OTHER */
1989c6fd2807SJeff Garzik 	if (ehc->i.err_mask & ~AC_ERR_OTHER)
1990c6fd2807SJeff Garzik 		ehc->i.err_mask &= ~AC_ERR_OTHER;
1991c6fd2807SJeff Garzik 
1992c6fd2807SJeff Garzik 	all_err_mask |= ehc->i.err_mask;
1993c6fd2807SJeff Garzik 
1994c6fd2807SJeff Garzik 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1995c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1996c6fd2807SJeff Garzik 
1997b1c72916STejun Heo 		if (!(qc->flags & ATA_QCFLAG_FAILED) ||
1998b1c72916STejun Heo 		    ata_dev_phys_link(qc->dev) != link)
1999c6fd2807SJeff Garzik 			continue;
2000c6fd2807SJeff Garzik 
2001c6fd2807SJeff Garzik 		/* inherit upper level err_mask */
2002c6fd2807SJeff Garzik 		qc->err_mask |= ehc->i.err_mask;
2003c6fd2807SJeff Garzik 
2004c6fd2807SJeff Garzik 		/* analyze TF */
2005c6fd2807SJeff Garzik 		ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
2006c6fd2807SJeff Garzik 
2007c6fd2807SJeff Garzik 		/* DEV errors are probably spurious in case of ATA_BUS error */
2008c6fd2807SJeff Garzik 		if (qc->err_mask & AC_ERR_ATA_BUS)
2009c6fd2807SJeff Garzik 			qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
2010c6fd2807SJeff Garzik 					  AC_ERR_INVALID);
2011c6fd2807SJeff Garzik 
2012c6fd2807SJeff Garzik 		/* any real error trumps unknown error */
2013c6fd2807SJeff Garzik 		if (qc->err_mask & ~AC_ERR_OTHER)
2014c6fd2807SJeff Garzik 			qc->err_mask &= ~AC_ERR_OTHER;
2015c6fd2807SJeff Garzik 
2016c6fd2807SJeff Garzik 		/* SENSE_VALID trumps dev/unknown error and revalidation */
2017f90f0828STejun Heo 		if (qc->flags & ATA_QCFLAG_SENSE_VALID)
2018c6fd2807SJeff Garzik 			qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
2019c6fd2807SJeff Garzik 
202003faab78STejun Heo 		/* determine whether the command is worth retrying */
202103faab78STejun Heo 		if (!(qc->err_mask & AC_ERR_INVALID) &&
202203faab78STejun Heo 		    ((qc->flags & ATA_QCFLAG_IO) || qc->err_mask != AC_ERR_DEV))
202303faab78STejun Heo 			qc->flags |= ATA_QCFLAG_RETRY;
202403faab78STejun Heo 
2025c6fd2807SJeff Garzik 		/* accumulate error info */
2026c6fd2807SJeff Garzik 		ehc->i.dev = qc->dev;
2027c6fd2807SJeff Garzik 		all_err_mask |= qc->err_mask;
2028c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_IO)
20293884f7b0STejun Heo 			eflags |= ATA_EFLAG_IS_IO;
2030c6fd2807SJeff Garzik 	}
2031c6fd2807SJeff Garzik 
2032c6fd2807SJeff Garzik 	/* enforce default EH actions */
2033c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_FROZEN ||
2034c6fd2807SJeff Garzik 	    all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
2035cf480626STejun Heo 		ehc->i.action |= ATA_EH_RESET;
20363884f7b0STejun Heo 	else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) ||
20373884f7b0STejun Heo 		 (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV)))
2038c6fd2807SJeff Garzik 		ehc->i.action |= ATA_EH_REVALIDATE;
2039c6fd2807SJeff Garzik 
2040dfcc173dSTejun Heo 	/* If we have offending qcs and the associated failed device,
2041dfcc173dSTejun Heo 	 * perform per-dev EH action only on the offending device.
2042dfcc173dSTejun Heo 	 */
2043c6fd2807SJeff Garzik 	if (ehc->i.dev) {
2044c6fd2807SJeff Garzik 		ehc->i.dev_action[ehc->i.dev->devno] |=
2045c6fd2807SJeff Garzik 			ehc->i.action & ATA_EH_PERDEV_MASK;
2046c6fd2807SJeff Garzik 		ehc->i.action &= ~ATA_EH_PERDEV_MASK;
2047c6fd2807SJeff Garzik 	}
2048c6fd2807SJeff Garzik 
20492695e366STejun Heo 	/* propagate timeout to host link */
20502695e366STejun Heo 	if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link))
20512695e366STejun Heo 		ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT;
20522695e366STejun Heo 
20532695e366STejun Heo 	/* record error and consider speeding down */
2054dfcc173dSTejun Heo 	dev = ehc->i.dev;
20552695e366STejun Heo 	if (!dev && ((ata_link_max_devices(link) == 1 &&
20562695e366STejun Heo 		      ata_dev_enabled(link->device))))
2057dfcc173dSTejun Heo 	    dev = link->device;
2058dfcc173dSTejun Heo 
205976326ac1STejun Heo 	if (dev) {
206076326ac1STejun Heo 		if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
206176326ac1STejun Heo 			eflags |= ATA_EFLAG_DUBIOUS_XFER;
20623884f7b0STejun Heo 		ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
206376326ac1STejun Heo 	}
2064dfcc173dSTejun Heo 
2065c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
2066c6fd2807SJeff Garzik }
2067c6fd2807SJeff Garzik 
2068c6fd2807SJeff Garzik /**
20699b1e2658STejun Heo  *	ata_eh_autopsy - analyze error and determine recovery action
20709b1e2658STejun Heo  *	@ap: host port to perform autopsy on
20719b1e2658STejun Heo  *
20729b1e2658STejun Heo  *	Analyze all links of @ap and determine why they failed and
20739b1e2658STejun Heo  *	which recovery actions are needed.
20749b1e2658STejun Heo  *
20759b1e2658STejun Heo  *	LOCKING:
20769b1e2658STejun Heo  *	Kernel thread context (may sleep).
20779b1e2658STejun Heo  */
2078fb7fd614STejun Heo void ata_eh_autopsy(struct ata_port *ap)
20799b1e2658STejun Heo {
20809b1e2658STejun Heo 	struct ata_link *link;
20819b1e2658STejun Heo 
20821eca4365STejun Heo 	ata_for_each_link(link, ap, EDGE)
20839b1e2658STejun Heo 		ata_eh_link_autopsy(link);
20842695e366STejun Heo 
2085b1c72916STejun Heo 	/* Handle the frigging slave link.  Autopsy is done similarly
2086b1c72916STejun Heo 	 * but actions and flags are transferred over to the master
2087b1c72916STejun Heo 	 * link and handled from there.
2088b1c72916STejun Heo 	 */
2089b1c72916STejun Heo 	if (ap->slave_link) {
2090b1c72916STejun Heo 		struct ata_eh_context *mehc = &ap->link.eh_context;
2091b1c72916STejun Heo 		struct ata_eh_context *sehc = &ap->slave_link->eh_context;
2092b1c72916STejun Heo 
2093848e4c68STejun Heo 		/* transfer control flags from master to slave */
2094848e4c68STejun Heo 		sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK;
2095848e4c68STejun Heo 
2096848e4c68STejun Heo 		/* perform autopsy on the slave link */
2097b1c72916STejun Heo 		ata_eh_link_autopsy(ap->slave_link);
2098b1c72916STejun Heo 
2099848e4c68STejun Heo 		/* transfer actions from slave to master and clear slave */
2100b1c72916STejun Heo 		ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2101b1c72916STejun Heo 		mehc->i.action		|= sehc->i.action;
2102b1c72916STejun Heo 		mehc->i.dev_action[1]	|= sehc->i.dev_action[1];
2103b1c72916STejun Heo 		mehc->i.flags		|= sehc->i.flags;
2104b1c72916STejun Heo 		ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2105b1c72916STejun Heo 	}
2106b1c72916STejun Heo 
21072695e366STejun Heo 	/* Autopsy of fanout ports can affect host link autopsy.
21082695e366STejun Heo 	 * Perform host link autopsy last.
21092695e366STejun Heo 	 */
2110071f44b1STejun Heo 	if (sata_pmp_attached(ap))
21112695e366STejun Heo 		ata_eh_link_autopsy(&ap->link);
21129b1e2658STejun Heo }
21139b1e2658STejun Heo 
21149b1e2658STejun Heo /**
21159b1e2658STejun Heo  *	ata_eh_link_report - report error handling to user
21160260731fSTejun Heo  *	@link: ATA link EH is going on
2117c6fd2807SJeff Garzik  *
2118c6fd2807SJeff Garzik  *	Report EH to user.
2119c6fd2807SJeff Garzik  *
2120c6fd2807SJeff Garzik  *	LOCKING:
2121c6fd2807SJeff Garzik  *	None.
2122c6fd2807SJeff Garzik  */
21239b1e2658STejun Heo static void ata_eh_link_report(struct ata_link *link)
2124c6fd2807SJeff Garzik {
21250260731fSTejun Heo 	struct ata_port *ap = link->ap;
21260260731fSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
2127c6fd2807SJeff Garzik 	const char *frozen, *desc;
2128a1e10f7eSTejun Heo 	char tries_buf[6];
2129c6fd2807SJeff Garzik 	int tag, nr_failed = 0;
2130c6fd2807SJeff Garzik 
213194ff3d54STejun Heo 	if (ehc->i.flags & ATA_EHI_QUIET)
213294ff3d54STejun Heo 		return;
213394ff3d54STejun Heo 
2134c6fd2807SJeff Garzik 	desc = NULL;
2135c6fd2807SJeff Garzik 	if (ehc->i.desc[0] != '\0')
2136c6fd2807SJeff Garzik 		desc = ehc->i.desc;
2137c6fd2807SJeff Garzik 
2138c6fd2807SJeff Garzik 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2139c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2140c6fd2807SJeff Garzik 
2141b1c72916STejun Heo 		if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2142b1c72916STejun Heo 		    ata_dev_phys_link(qc->dev) != link ||
2143e027bd36STejun Heo 		    ((qc->flags & ATA_QCFLAG_QUIET) &&
2144e027bd36STejun Heo 		     qc->err_mask == AC_ERR_DEV))
2145c6fd2807SJeff Garzik 			continue;
2146c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
2147c6fd2807SJeff Garzik 			continue;
2148c6fd2807SJeff Garzik 
2149c6fd2807SJeff Garzik 		nr_failed++;
2150c6fd2807SJeff Garzik 	}
2151c6fd2807SJeff Garzik 
2152c6fd2807SJeff Garzik 	if (!nr_failed && !ehc->i.err_mask)
2153c6fd2807SJeff Garzik 		return;
2154c6fd2807SJeff Garzik 
2155c6fd2807SJeff Garzik 	frozen = "";
2156c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_FROZEN)
2157c6fd2807SJeff Garzik 		frozen = " frozen";
2158c6fd2807SJeff Garzik 
2159a1e10f7eSTejun Heo 	memset(tries_buf, 0, sizeof(tries_buf));
2160a1e10f7eSTejun Heo 	if (ap->eh_tries < ATA_EH_MAX_TRIES)
2161a1e10f7eSTejun Heo 		snprintf(tries_buf, sizeof(tries_buf) - 1, " t%d",
2162a1e10f7eSTejun Heo 			 ap->eh_tries);
2163a1e10f7eSTejun Heo 
2164c6fd2807SJeff Garzik 	if (ehc->i.dev) {
2165c6fd2807SJeff Garzik 		ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x "
2166a1e10f7eSTejun Heo 			       "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2167a1e10f7eSTejun Heo 			       ehc->i.err_mask, link->sactive, ehc->i.serror,
2168a1e10f7eSTejun Heo 			       ehc->i.action, frozen, tries_buf);
2169c6fd2807SJeff Garzik 		if (desc)
2170b64bbc39STejun Heo 			ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc);
2171c6fd2807SJeff Garzik 	} else {
21720260731fSTejun Heo 		ata_link_printk(link, KERN_ERR, "exception Emask 0x%x "
2173a1e10f7eSTejun Heo 				"SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2174a1e10f7eSTejun Heo 				ehc->i.err_mask, link->sactive, ehc->i.serror,
2175a1e10f7eSTejun Heo 				ehc->i.action, frozen, tries_buf);
2176c6fd2807SJeff Garzik 		if (desc)
21770260731fSTejun Heo 			ata_link_printk(link, KERN_ERR, "%s\n", desc);
2178c6fd2807SJeff Garzik 	}
2179c6fd2807SJeff Garzik 
21801333e194SRobert Hancock 	if (ehc->i.serror)
2181da0e21d3STejun Heo 		ata_link_printk(link, KERN_ERR,
21821333e194SRobert Hancock 		  "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
21831333e194SRobert Hancock 		  ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "",
21841333e194SRobert Hancock 		  ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "",
21851333e194SRobert Hancock 		  ehc->i.serror & SERR_DATA ? "UnrecovData " : "",
21861333e194SRobert Hancock 		  ehc->i.serror & SERR_PERSISTENT ? "Persist " : "",
21871333e194SRobert Hancock 		  ehc->i.serror & SERR_PROTOCOL ? "Proto " : "",
21881333e194SRobert Hancock 		  ehc->i.serror & SERR_INTERNAL ? "HostInt " : "",
21891333e194SRobert Hancock 		  ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "",
21901333e194SRobert Hancock 		  ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "",
21911333e194SRobert Hancock 		  ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "",
21921333e194SRobert Hancock 		  ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "",
21931333e194SRobert Hancock 		  ehc->i.serror & SERR_DISPARITY ? "Dispar " : "",
21941333e194SRobert Hancock 		  ehc->i.serror & SERR_CRC ? "BadCRC " : "",
21951333e194SRobert Hancock 		  ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "",
21961333e194SRobert Hancock 		  ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "",
21971333e194SRobert Hancock 		  ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "",
21981333e194SRobert Hancock 		  ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "",
21991333e194SRobert Hancock 		  ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
22001333e194SRobert Hancock 
2201c6fd2807SJeff Garzik 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2202c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
22038a937581STejun Heo 		struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
2204abb6a889STejun Heo 		const u8 *cdb = qc->cdb;
2205abb6a889STejun Heo 		char data_buf[20] = "";
2206abb6a889STejun Heo 		char cdb_buf[70] = "";
2207c6fd2807SJeff Garzik 
22080260731fSTejun Heo 		if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2209b1c72916STejun Heo 		    ata_dev_phys_link(qc->dev) != link || !qc->err_mask)
2210c6fd2807SJeff Garzik 			continue;
2211c6fd2807SJeff Garzik 
2212abb6a889STejun Heo 		if (qc->dma_dir != DMA_NONE) {
2213abb6a889STejun Heo 			static const char *dma_str[] = {
2214abb6a889STejun Heo 				[DMA_BIDIRECTIONAL]	= "bidi",
2215abb6a889STejun Heo 				[DMA_TO_DEVICE]		= "out",
2216abb6a889STejun Heo 				[DMA_FROM_DEVICE]	= "in",
2217abb6a889STejun Heo 			};
2218abb6a889STejun Heo 			static const char *prot_str[] = {
2219abb6a889STejun Heo 				[ATA_PROT_PIO]		= "pio",
2220abb6a889STejun Heo 				[ATA_PROT_DMA]		= "dma",
2221abb6a889STejun Heo 				[ATA_PROT_NCQ]		= "ncq",
22220dc36888STejun Heo 				[ATAPI_PROT_PIO]	= "pio",
22230dc36888STejun Heo 				[ATAPI_PROT_DMA]	= "dma",
2224abb6a889STejun Heo 			};
2225abb6a889STejun Heo 
2226abb6a889STejun Heo 			snprintf(data_buf, sizeof(data_buf), " %s %u %s",
2227abb6a889STejun Heo 				 prot_str[qc->tf.protocol], qc->nbytes,
2228abb6a889STejun Heo 				 dma_str[qc->dma_dir]);
2229abb6a889STejun Heo 		}
2230abb6a889STejun Heo 
2231e39eec13SJeff Garzik 		if (ata_is_atapi(qc->tf.protocol))
2232abb6a889STejun Heo 			snprintf(cdb_buf, sizeof(cdb_buf),
2233abb6a889STejun Heo 				 "cdb %02x %02x %02x %02x %02x %02x %02x %02x  "
2234abb6a889STejun Heo 				 "%02x %02x %02x %02x %02x %02x %02x %02x\n         ",
2235abb6a889STejun Heo 				 cdb[0], cdb[1], cdb[2], cdb[3],
2236abb6a889STejun Heo 				 cdb[4], cdb[5], cdb[6], cdb[7],
2237abb6a889STejun Heo 				 cdb[8], cdb[9], cdb[10], cdb[11],
2238abb6a889STejun Heo 				 cdb[12], cdb[13], cdb[14], cdb[15]);
2239abb6a889STejun Heo 
22408a937581STejun Heo 		ata_dev_printk(qc->dev, KERN_ERR,
22418a937581STejun Heo 			"cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2242abb6a889STejun Heo 			"tag %d%s\n         %s"
22438a937581STejun Heo 			"res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
22445335b729STejun Heo 			"Emask 0x%x (%s)%s\n",
22458a937581STejun Heo 			cmd->command, cmd->feature, cmd->nsect,
22468a937581STejun Heo 			cmd->lbal, cmd->lbam, cmd->lbah,
22478a937581STejun Heo 			cmd->hob_feature, cmd->hob_nsect,
22488a937581STejun Heo 			cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
2249abb6a889STejun Heo 			cmd->device, qc->tag, data_buf, cdb_buf,
22508a937581STejun Heo 			res->command, res->feature, res->nsect,
22518a937581STejun Heo 			res->lbal, res->lbam, res->lbah,
22528a937581STejun Heo 			res->hob_feature, res->hob_nsect,
22538a937581STejun Heo 			res->hob_lbal, res->hob_lbam, res->hob_lbah,
22545335b729STejun Heo 			res->device, qc->err_mask, ata_err_string(qc->err_mask),
22555335b729STejun Heo 			qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
22561333e194SRobert Hancock 
22571333e194SRobert Hancock 		if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
22581333e194SRobert Hancock 				    ATA_ERR)) {
22591333e194SRobert Hancock 			if (res->command & ATA_BUSY)
22601333e194SRobert Hancock 				ata_dev_printk(qc->dev, KERN_ERR,
22611333e194SRobert Hancock 				  "status: { Busy }\n");
22621333e194SRobert Hancock 			else
22631333e194SRobert Hancock 				ata_dev_printk(qc->dev, KERN_ERR,
22641333e194SRobert Hancock 				  "status: { %s%s%s%s}\n",
22651333e194SRobert Hancock 				  res->command & ATA_DRDY ? "DRDY " : "",
22661333e194SRobert Hancock 				  res->command & ATA_DF ? "DF " : "",
22671333e194SRobert Hancock 				  res->command & ATA_DRQ ? "DRQ " : "",
22681333e194SRobert Hancock 				  res->command & ATA_ERR ? "ERR " : "");
22691333e194SRobert Hancock 		}
22701333e194SRobert Hancock 
22711333e194SRobert Hancock 		if (cmd->command != ATA_CMD_PACKET &&
22721333e194SRobert Hancock 		    (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF |
22731333e194SRobert Hancock 				     ATA_ABORTED)))
22741333e194SRobert Hancock 			ata_dev_printk(qc->dev, KERN_ERR,
22751333e194SRobert Hancock 			  "error: { %s%s%s%s}\n",
22761333e194SRobert Hancock 			  res->feature & ATA_ICRC ? "ICRC " : "",
22771333e194SRobert Hancock 			  res->feature & ATA_UNC ? "UNC " : "",
22781333e194SRobert Hancock 			  res->feature & ATA_IDNF ? "IDNF " : "",
22791333e194SRobert Hancock 			  res->feature & ATA_ABORTED ? "ABRT " : "");
2280c6fd2807SJeff Garzik 	}
2281c6fd2807SJeff Garzik }
2282c6fd2807SJeff Garzik 
22839b1e2658STejun Heo /**
22849b1e2658STejun Heo  *	ata_eh_report - report error handling to user
22859b1e2658STejun Heo  *	@ap: ATA port to report EH about
22869b1e2658STejun Heo  *
22879b1e2658STejun Heo  *	Report EH to user.
22889b1e2658STejun Heo  *
22899b1e2658STejun Heo  *	LOCKING:
22909b1e2658STejun Heo  *	None.
22919b1e2658STejun Heo  */
2292fb7fd614STejun Heo void ata_eh_report(struct ata_port *ap)
22939b1e2658STejun Heo {
22949b1e2658STejun Heo 	struct ata_link *link;
22959b1e2658STejun Heo 
22961eca4365STejun Heo 	ata_for_each_link(link, ap, HOST_FIRST)
22979b1e2658STejun Heo 		ata_eh_link_report(link);
22989b1e2658STejun Heo }
22999b1e2658STejun Heo 
2300cc0680a5STejun Heo static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
2301b1c72916STejun Heo 			unsigned int *classes, unsigned long deadline,
2302b1c72916STejun Heo 			bool clear_classes)
2303c6fd2807SJeff Garzik {
2304f58229f8STejun Heo 	struct ata_device *dev;
2305c6fd2807SJeff Garzik 
2306b1c72916STejun Heo 	if (clear_classes)
23071eca4365STejun Heo 		ata_for_each_dev(dev, link, ALL)
2308f58229f8STejun Heo 			classes[dev->devno] = ATA_DEV_UNKNOWN;
2309c6fd2807SJeff Garzik 
2310f046519fSTejun Heo 	return reset(link, classes, deadline);
2311c6fd2807SJeff Garzik }
2312c6fd2807SJeff Garzik 
2313ae791c05STejun Heo static int ata_eh_followup_srst_needed(struct ata_link *link,
23145dbfc9cbSTejun Heo 				       int rc, const unsigned int *classes)
2315c6fd2807SJeff Garzik {
231645db2f6cSTejun Heo 	if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
2317ae791c05STejun Heo 		return 0;
23185dbfc9cbSTejun Heo 	if (rc == -EAGAIN)
2319c6fd2807SJeff Garzik 		return 1;
2320071f44b1STejun Heo 	if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
23213495de73STejun Heo 		return 1;
2322c6fd2807SJeff Garzik 	return 0;
2323c6fd2807SJeff Garzik }
2324c6fd2807SJeff Garzik 
2325fb7fd614STejun Heo int ata_eh_reset(struct ata_link *link, int classify,
2326c6fd2807SJeff Garzik 		 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
2327c6fd2807SJeff Garzik 		 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
2328c6fd2807SJeff Garzik {
2329afaa5c37STejun Heo 	struct ata_port *ap = link->ap;
2330b1c72916STejun Heo 	struct ata_link *slave = ap->slave_link;
2331936fd732STejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
2332705d2014SBartlomiej Zolnierkiewicz 	struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL;
2333c6fd2807SJeff Garzik 	unsigned int *classes = ehc->classes;
2334416dc9edSTejun Heo 	unsigned int lflags = link->flags;
2335c6fd2807SJeff Garzik 	int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
2336d8af0eb6STejun Heo 	int max_tries = 0, try = 0;
2337b1c72916STejun Heo 	struct ata_link *failed_link;
2338f58229f8STejun Heo 	struct ata_device *dev;
2339416dc9edSTejun Heo 	unsigned long deadline, now;
2340c6fd2807SJeff Garzik 	ata_reset_fn_t reset;
2341afaa5c37STejun Heo 	unsigned long flags;
2342416dc9edSTejun Heo 	u32 sstatus;
2343b1c72916STejun Heo 	int nr_unknown, rc;
2344c6fd2807SJeff Garzik 
2345932648b0STejun Heo 	/*
2346932648b0STejun Heo 	 * Prepare to reset
2347932648b0STejun Heo 	 */
2348d8af0eb6STejun Heo 	while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX)
2349d8af0eb6STejun Heo 		max_tries++;
235005944bdfSTejun Heo 	if (link->flags & ATA_LFLAG_NO_HRST)
235105944bdfSTejun Heo 		hardreset = NULL;
235205944bdfSTejun Heo 	if (link->flags & ATA_LFLAG_NO_SRST)
235305944bdfSTejun Heo 		softreset = NULL;
2354d8af0eb6STejun Heo 
235519b72321STejun Heo 	/* make sure each reset attemp is at least COOL_DOWN apart */
235619b72321STejun Heo 	if (ehc->i.flags & ATA_EHI_DID_RESET) {
23570a2c0f56STejun Heo 		now = jiffies;
235819b72321STejun Heo 		WARN_ON(time_after(ehc->last_reset, now));
235919b72321STejun Heo 		deadline = ata_deadline(ehc->last_reset,
236019b72321STejun Heo 					ATA_EH_RESET_COOL_DOWN);
23610a2c0f56STejun Heo 		if (time_before(now, deadline))
23620a2c0f56STejun Heo 			schedule_timeout_uninterruptible(deadline - now);
236319b72321STejun Heo 	}
23640a2c0f56STejun Heo 
2365afaa5c37STejun Heo 	spin_lock_irqsave(ap->lock, flags);
2366afaa5c37STejun Heo 	ap->pflags |= ATA_PFLAG_RESETTING;
2367afaa5c37STejun Heo 	spin_unlock_irqrestore(ap->lock, flags);
2368afaa5c37STejun Heo 
2369cf480626STejun Heo 	ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2370c6fd2807SJeff Garzik 
23711eca4365STejun Heo 	ata_for_each_dev(dev, link, ALL) {
2372cdeab114STejun Heo 		/* If we issue an SRST then an ATA drive (not ATAPI)
2373cdeab114STejun Heo 		 * may change configuration and be in PIO0 timing. If
2374cdeab114STejun Heo 		 * we do a hard reset (or are coming from power on)
2375cdeab114STejun Heo 		 * this is true for ATA or ATAPI. Until we've set a
2376cdeab114STejun Heo 		 * suitable controller mode we should not touch the
2377cdeab114STejun Heo 		 * bus as we may be talking too fast.
2378cdeab114STejun Heo 		 */
2379cdeab114STejun Heo 		dev->pio_mode = XFER_PIO_0;
2380cdeab114STejun Heo 
2381cdeab114STejun Heo 		/* If the controller has a pio mode setup function
2382cdeab114STejun Heo 		 * then use it to set the chipset to rights. Don't
2383cdeab114STejun Heo 		 * touch the DMA setup as that will be dealt with when
2384cdeab114STejun Heo 		 * configuring devices.
2385cdeab114STejun Heo 		 */
2386cdeab114STejun Heo 		if (ap->ops->set_piomode)
2387cdeab114STejun Heo 			ap->ops->set_piomode(ap, dev);
2388cdeab114STejun Heo 	}
2389cdeab114STejun Heo 
2390cf480626STejun Heo 	/* prefer hardreset */
2391932648b0STejun Heo 	reset = NULL;
2392cf480626STejun Heo 	ehc->i.action &= ~ATA_EH_RESET;
2393cf480626STejun Heo 	if (hardreset) {
2394cf480626STejun Heo 		reset = hardreset;
2395a674050eSTejun Heo 		ehc->i.action |= ATA_EH_HARDRESET;
23964f7faa3fSTejun Heo 	} else if (softreset) {
2397cf480626STejun Heo 		reset = softreset;
2398a674050eSTejun Heo 		ehc->i.action |= ATA_EH_SOFTRESET;
2399cf480626STejun Heo 	}
2400c6fd2807SJeff Garzik 
2401c6fd2807SJeff Garzik 	if (prereset) {
2402b1c72916STejun Heo 		unsigned long deadline = ata_deadline(jiffies,
2403b1c72916STejun Heo 						      ATA_EH_PRERESET_TIMEOUT);
2404b1c72916STejun Heo 
2405b1c72916STejun Heo 		if (slave) {
2406b1c72916STejun Heo 			sehc->i.action &= ~ATA_EH_RESET;
2407b1c72916STejun Heo 			sehc->i.action |= ehc->i.action;
2408b1c72916STejun Heo 		}
2409b1c72916STejun Heo 
2410b1c72916STejun Heo 		rc = prereset(link, deadline);
2411b1c72916STejun Heo 
2412b1c72916STejun Heo 		/* If present, do prereset on slave link too.  Reset
2413b1c72916STejun Heo 		 * is skipped iff both master and slave links report
2414b1c72916STejun Heo 		 * -ENOENT or clear ATA_EH_RESET.
2415b1c72916STejun Heo 		 */
2416b1c72916STejun Heo 		if (slave && (rc == 0 || rc == -ENOENT)) {
2417b1c72916STejun Heo 			int tmp;
2418b1c72916STejun Heo 
2419b1c72916STejun Heo 			tmp = prereset(slave, deadline);
2420b1c72916STejun Heo 			if (tmp != -ENOENT)
2421b1c72916STejun Heo 				rc = tmp;
2422b1c72916STejun Heo 
2423b1c72916STejun Heo 			ehc->i.action |= sehc->i.action;
2424b1c72916STejun Heo 		}
2425b1c72916STejun Heo 
2426c6fd2807SJeff Garzik 		if (rc) {
2427c961922bSAlan Cox 			if (rc == -ENOENT) {
2428cc0680a5STejun Heo 				ata_link_printk(link, KERN_DEBUG,
24294aa9ab67STejun Heo 						"port disabled. ignoring.\n");
2430cf480626STejun Heo 				ehc->i.action &= ~ATA_EH_RESET;
24314aa9ab67STejun Heo 
24321eca4365STejun Heo 				ata_for_each_dev(dev, link, ALL)
2433f58229f8STejun Heo 					classes[dev->devno] = ATA_DEV_NONE;
24344aa9ab67STejun Heo 
24354aa9ab67STejun Heo 				rc = 0;
2436c961922bSAlan Cox 			} else
2437cc0680a5STejun Heo 				ata_link_printk(link, KERN_ERR,
2438c6fd2807SJeff Garzik 					"prereset failed (errno=%d)\n", rc);
2439fccb6ea5STejun Heo 			goto out;
2440c6fd2807SJeff Garzik 		}
2441c6fd2807SJeff Garzik 
2442932648b0STejun Heo 		/* prereset() might have cleared ATA_EH_RESET.  If so,
2443d6515e6fSTejun Heo 		 * bang classes, thaw and return.
2444932648b0STejun Heo 		 */
2445932648b0STejun Heo 		if (reset && !(ehc->i.action & ATA_EH_RESET)) {
24461eca4365STejun Heo 			ata_for_each_dev(dev, link, ALL)
2447f58229f8STejun Heo 				classes[dev->devno] = ATA_DEV_NONE;
2448d6515e6fSTejun Heo 			if ((ap->pflags & ATA_PFLAG_FROZEN) &&
2449d6515e6fSTejun Heo 			    ata_is_host_link(link))
2450d6515e6fSTejun Heo 				ata_eh_thaw_port(ap);
2451fccb6ea5STejun Heo 			rc = 0;
2452fccb6ea5STejun Heo 			goto out;
2453c6fd2807SJeff Garzik 		}
2454932648b0STejun Heo 	}
2455c6fd2807SJeff Garzik 
2456c6fd2807SJeff Garzik  retry:
2457932648b0STejun Heo 	/*
2458932648b0STejun Heo 	 * Perform reset
2459932648b0STejun Heo 	 */
2460dc98c32cSTejun Heo 	if (ata_is_host_link(link))
2461dc98c32cSTejun Heo 		ata_eh_freeze_port(ap);
2462dc98c32cSTejun Heo 
2463341c2c95STejun Heo 	deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]);
246431daabdaSTejun Heo 
2465932648b0STejun Heo 	if (reset) {
2466c6fd2807SJeff Garzik 		if (verbose)
2467cc0680a5STejun Heo 			ata_link_printk(link, KERN_INFO, "%s resetting link\n",
2468c6fd2807SJeff Garzik 					reset == softreset ? "soft" : "hard");
2469c6fd2807SJeff Garzik 
2470c6fd2807SJeff Garzik 		/* mark that this EH session started with reset */
247119b72321STejun Heo 		ehc->last_reset = jiffies;
24720d64a233STejun Heo 		if (reset == hardreset)
24730d64a233STejun Heo 			ehc->i.flags |= ATA_EHI_DID_HARDRESET;
24740d64a233STejun Heo 		else
24750d64a233STejun Heo 			ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
2476c6fd2807SJeff Garzik 
2477b1c72916STejun Heo 		rc = ata_do_reset(link, reset, classes, deadline, true);
2478b1c72916STejun Heo 		if (rc && rc != -EAGAIN) {
2479b1c72916STejun Heo 			failed_link = link;
24805dbfc9cbSTejun Heo 			goto fail;
2481b1c72916STejun Heo 		}
2482c6fd2807SJeff Garzik 
2483b1c72916STejun Heo 		/* hardreset slave link if existent */
2484b1c72916STejun Heo 		if (slave && reset == hardreset) {
2485b1c72916STejun Heo 			int tmp;
2486b1c72916STejun Heo 
2487b1c72916STejun Heo 			if (verbose)
2488b1c72916STejun Heo 				ata_link_printk(slave, KERN_INFO,
2489b1c72916STejun Heo 						"hard resetting link\n");
2490b1c72916STejun Heo 
2491b1c72916STejun Heo 			ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
2492b1c72916STejun Heo 			tmp = ata_do_reset(slave, reset, classes, deadline,
2493b1c72916STejun Heo 					   false);
2494b1c72916STejun Heo 			switch (tmp) {
2495b1c72916STejun Heo 			case -EAGAIN:
2496b1c72916STejun Heo 				rc = -EAGAIN;
2497b1c72916STejun Heo 			case 0:
2498b1c72916STejun Heo 				break;
2499b1c72916STejun Heo 			default:
2500b1c72916STejun Heo 				failed_link = slave;
2501b1c72916STejun Heo 				rc = tmp;
2502b1c72916STejun Heo 				goto fail;
2503b1c72916STejun Heo 			}
2504b1c72916STejun Heo 		}
2505b1c72916STejun Heo 
2506b1c72916STejun Heo 		/* perform follow-up SRST if necessary */
2507c6fd2807SJeff Garzik 		if (reset == hardreset &&
25085dbfc9cbSTejun Heo 		    ata_eh_followup_srst_needed(link, rc, classes)) {
2509c6fd2807SJeff Garzik 			reset = softreset;
2510c6fd2807SJeff Garzik 
2511c6fd2807SJeff Garzik 			if (!reset) {
2512cc0680a5STejun Heo 				ata_link_printk(link, KERN_ERR,
2513c6fd2807SJeff Garzik 						"follow-up softreset required "
2514c6fd2807SJeff Garzik 						"but no softreset avaliable\n");
2515b1c72916STejun Heo 				failed_link = link;
2516fccb6ea5STejun Heo 				rc = -EINVAL;
251708cf69d0STejun Heo 				goto fail;
2518c6fd2807SJeff Garzik 			}
2519c6fd2807SJeff Garzik 
2520cf480626STejun Heo 			ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2521b1c72916STejun Heo 			rc = ata_do_reset(link, reset, classes, deadline, true);
2522fe2c4d01STejun Heo 			if (rc) {
2523fe2c4d01STejun Heo 				failed_link = link;
2524fe2c4d01STejun Heo 				goto fail;
2525fe2c4d01STejun Heo 			}
2526c6fd2807SJeff Garzik 		}
2527932648b0STejun Heo 	} else {
2528932648b0STejun Heo 		if (verbose)
2529932648b0STejun Heo 			ata_link_printk(link, KERN_INFO, "no reset method "
2530932648b0STejun Heo 					"available, skipping reset\n");
2531932648b0STejun Heo 		if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
2532932648b0STejun Heo 			lflags |= ATA_LFLAG_ASSUME_ATA;
2533932648b0STejun Heo 	}
2534008a7896STejun Heo 
2535932648b0STejun Heo 	/*
2536932648b0STejun Heo 	 * Post-reset processing
2537932648b0STejun Heo 	 */
25381eca4365STejun Heo 	ata_for_each_dev(dev, link, ALL) {
2539416dc9edSTejun Heo 		/* After the reset, the device state is PIO 0 and the
2540416dc9edSTejun Heo 		 * controller state is undefined.  Reset also wakes up
2541416dc9edSTejun Heo 		 * drives from sleeping mode.
2542c6fd2807SJeff Garzik 		 */
2543f58229f8STejun Heo 		dev->pio_mode = XFER_PIO_0;
2544054a5fbaSTejun Heo 		dev->flags &= ~ATA_DFLAG_SLEEPING;
2545c6fd2807SJeff Garzik 
2546816ab897STejun Heo 		if (!ata_phys_link_offline(ata_dev_phys_link(dev))) {
25474ccd3329STejun Heo 			/* apply class override */
2548416dc9edSTejun Heo 			if (lflags & ATA_LFLAG_ASSUME_ATA)
2549ae791c05STejun Heo 				classes[dev->devno] = ATA_DEV_ATA;
2550416dc9edSTejun Heo 			else if (lflags & ATA_LFLAG_ASSUME_SEMB)
2551816ab897STejun Heo 				classes[dev->devno] = ATA_DEV_SEMB_UNSUP;
2552816ab897STejun Heo 		} else
2553816ab897STejun Heo 			classes[dev->devno] = ATA_DEV_NONE;
2554ae791c05STejun Heo 	}
2555ae791c05STejun Heo 
2556008a7896STejun Heo 	/* record current link speed */
2557936fd732STejun Heo 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
2558936fd732STejun Heo 		link->sata_spd = (sstatus >> 4) & 0xf;
2559b1c72916STejun Heo 	if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0)
2560b1c72916STejun Heo 		slave->sata_spd = (sstatus >> 4) & 0xf;
2561008a7896STejun Heo 
2562dc98c32cSTejun Heo 	/* thaw the port */
2563dc98c32cSTejun Heo 	if (ata_is_host_link(link))
2564dc98c32cSTejun Heo 		ata_eh_thaw_port(ap);
2565dc98c32cSTejun Heo 
2566f046519fSTejun Heo 	/* postreset() should clear hardware SError.  Although SError
2567f046519fSTejun Heo 	 * is cleared during link resume, clearing SError here is
2568f046519fSTejun Heo 	 * necessary as some PHYs raise hotplug events after SRST.
2569f046519fSTejun Heo 	 * This introduces race condition where hotplug occurs between
2570f046519fSTejun Heo 	 * reset and here.  This race is mediated by cross checking
2571f046519fSTejun Heo 	 * link onlineness and classification result later.
2572f046519fSTejun Heo 	 */
2573b1c72916STejun Heo 	if (postreset) {
2574cc0680a5STejun Heo 		postreset(link, classes);
2575b1c72916STejun Heo 		if (slave)
2576b1c72916STejun Heo 			postreset(slave, classes);
2577b1c72916STejun Heo 	}
2578c6fd2807SJeff Garzik 
2579f046519fSTejun Heo 	/* clear cached SError */
2580f046519fSTejun Heo 	spin_lock_irqsave(link->ap->lock, flags);
2581f046519fSTejun Heo 	link->eh_info.serror = 0;
2582b1c72916STejun Heo 	if (slave)
2583b1c72916STejun Heo 		slave->eh_info.serror = 0;
2584f046519fSTejun Heo 	spin_unlock_irqrestore(link->ap->lock, flags);
2585f046519fSTejun Heo 
2586f046519fSTejun Heo 	/* Make sure onlineness and classification result correspond.
2587f046519fSTejun Heo 	 * Hotplug could have happened during reset and some
2588f046519fSTejun Heo 	 * controllers fail to wait while a drive is spinning up after
2589f046519fSTejun Heo 	 * being hotplugged causing misdetection.  By cross checking
2590f046519fSTejun Heo 	 * link onlineness and classification result, those conditions
2591f046519fSTejun Heo 	 * can be reliably detected and retried.
2592f046519fSTejun Heo 	 */
2593b1c72916STejun Heo 	nr_unknown = 0;
25941eca4365STejun Heo 	ata_for_each_dev(dev, link, ALL) {
2595f046519fSTejun Heo 		/* convert all ATA_DEV_UNKNOWN to ATA_DEV_NONE */
2596b1c72916STejun Heo 		if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2597f046519fSTejun Heo 			classes[dev->devno] = ATA_DEV_NONE;
2598b1c72916STejun Heo 			if (ata_phys_link_online(ata_dev_phys_link(dev)))
2599b1c72916STejun Heo 				nr_unknown++;
2600b1c72916STejun Heo 		}
2601f046519fSTejun Heo 	}
2602f046519fSTejun Heo 
2603b1c72916STejun Heo 	if (classify && nr_unknown) {
2604f046519fSTejun Heo 		if (try < max_tries) {
2605f046519fSTejun Heo 			ata_link_printk(link, KERN_WARNING, "link online but "
2606f046519fSTejun Heo 				       "device misclassified, retrying\n");
2607b1c72916STejun Heo 			failed_link = link;
2608f046519fSTejun Heo 			rc = -EAGAIN;
2609f046519fSTejun Heo 			goto fail;
2610f046519fSTejun Heo 		}
2611f046519fSTejun Heo 		ata_link_printk(link, KERN_WARNING,
2612f046519fSTejun Heo 			       "link online but device misclassified, "
2613f046519fSTejun Heo 			       "device detection might fail\n");
2614f046519fSTejun Heo 	}
2615f046519fSTejun Heo 
2616c6fd2807SJeff Garzik 	/* reset successful, schedule revalidation */
2617cf480626STejun Heo 	ata_eh_done(link, NULL, ATA_EH_RESET);
2618b1c72916STejun Heo 	if (slave)
2619b1c72916STejun Heo 		ata_eh_done(slave, NULL, ATA_EH_RESET);
262019b72321STejun Heo 	ehc->last_reset = jiffies;	/* update to completion time */
2621c6fd2807SJeff Garzik 	ehc->i.action |= ATA_EH_REVALIDATE;
2622416dc9edSTejun Heo 
2623416dc9edSTejun Heo 	rc = 0;
2624fccb6ea5STejun Heo  out:
2625fccb6ea5STejun Heo 	/* clear hotplug flag */
2626fccb6ea5STejun Heo 	ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2627b1c72916STejun Heo 	if (slave)
2628b1c72916STejun Heo 		sehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2629afaa5c37STejun Heo 
2630afaa5c37STejun Heo 	spin_lock_irqsave(ap->lock, flags);
2631afaa5c37STejun Heo 	ap->pflags &= ~ATA_PFLAG_RESETTING;
2632afaa5c37STejun Heo 	spin_unlock_irqrestore(ap->lock, flags);
2633afaa5c37STejun Heo 
2634c6fd2807SJeff Garzik 	return rc;
2635416dc9edSTejun Heo 
2636416dc9edSTejun Heo  fail:
26375958e302STejun Heo 	/* if SCR isn't accessible on a fan-out port, PMP needs to be reset */
26385958e302STejun Heo 	if (!ata_is_host_link(link) &&
26395958e302STejun Heo 	    sata_scr_read(link, SCR_STATUS, &sstatus))
26405958e302STejun Heo 		rc = -ERESTART;
26415958e302STejun Heo 
2642416dc9edSTejun Heo 	if (rc == -ERESTART || try >= max_tries)
2643416dc9edSTejun Heo 		goto out;
2644416dc9edSTejun Heo 
2645416dc9edSTejun Heo 	now = jiffies;
2646416dc9edSTejun Heo 	if (time_before(now, deadline)) {
2647416dc9edSTejun Heo 		unsigned long delta = deadline - now;
2648416dc9edSTejun Heo 
2649b1c72916STejun Heo 		ata_link_printk(failed_link, KERN_WARNING,
26500a2c0f56STejun Heo 			"reset failed (errno=%d), retrying in %u secs\n",
26510a2c0f56STejun Heo 			rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
2652416dc9edSTejun Heo 
2653416dc9edSTejun Heo 		while (delta)
2654416dc9edSTejun Heo 			delta = schedule_timeout_uninterruptible(delta);
2655416dc9edSTejun Heo 	}
2656416dc9edSTejun Heo 
2657b1c72916STejun Heo 	if (try == max_tries - 1) {
2658a07d499bSTejun Heo 		sata_down_spd_limit(link, 0);
2659b1c72916STejun Heo 		if (slave)
2660a07d499bSTejun Heo 			sata_down_spd_limit(slave, 0);
2661b1c72916STejun Heo 	} else if (rc == -EPIPE)
2662a07d499bSTejun Heo 		sata_down_spd_limit(failed_link, 0);
2663b1c72916STejun Heo 
2664416dc9edSTejun Heo 	if (hardreset)
2665416dc9edSTejun Heo 		reset = hardreset;
2666416dc9edSTejun Heo 	goto retry;
2667c6fd2807SJeff Garzik }
2668c6fd2807SJeff Garzik 
266945fabbb7SElias Oltmanns static inline void ata_eh_pull_park_action(struct ata_port *ap)
267045fabbb7SElias Oltmanns {
267145fabbb7SElias Oltmanns 	struct ata_link *link;
267245fabbb7SElias Oltmanns 	struct ata_device *dev;
267345fabbb7SElias Oltmanns 	unsigned long flags;
267445fabbb7SElias Oltmanns 
267545fabbb7SElias Oltmanns 	/*
267645fabbb7SElias Oltmanns 	 * This function can be thought of as an extended version of
267745fabbb7SElias Oltmanns 	 * ata_eh_about_to_do() specially crafted to accommodate the
267845fabbb7SElias Oltmanns 	 * requirements of ATA_EH_PARK handling. Since the EH thread
267945fabbb7SElias Oltmanns 	 * does not leave the do {} while () loop in ata_eh_recover as
268045fabbb7SElias Oltmanns 	 * long as the timeout for a park request to *one* device on
268145fabbb7SElias Oltmanns 	 * the port has not expired, and since we still want to pick
268245fabbb7SElias Oltmanns 	 * up park requests to other devices on the same port or
268345fabbb7SElias Oltmanns 	 * timeout updates for the same device, we have to pull
268445fabbb7SElias Oltmanns 	 * ATA_EH_PARK actions from eh_info into eh_context.i
268545fabbb7SElias Oltmanns 	 * ourselves at the beginning of each pass over the loop.
268645fabbb7SElias Oltmanns 	 *
268745fabbb7SElias Oltmanns 	 * Additionally, all write accesses to &ap->park_req_pending
268845fabbb7SElias Oltmanns 	 * through INIT_COMPLETION() (see below) or complete_all()
268945fabbb7SElias Oltmanns 	 * (see ata_scsi_park_store()) are protected by the host lock.
269045fabbb7SElias Oltmanns 	 * As a result we have that park_req_pending.done is zero on
269145fabbb7SElias Oltmanns 	 * exit from this function, i.e. when ATA_EH_PARK actions for
269245fabbb7SElias Oltmanns 	 * *all* devices on port ap have been pulled into the
269345fabbb7SElias Oltmanns 	 * respective eh_context structs. If, and only if,
269445fabbb7SElias Oltmanns 	 * park_req_pending.done is non-zero by the time we reach
269545fabbb7SElias Oltmanns 	 * wait_for_completion_timeout(), another ATA_EH_PARK action
269645fabbb7SElias Oltmanns 	 * has been scheduled for at least one of the devices on port
269745fabbb7SElias Oltmanns 	 * ap and we have to cycle over the do {} while () loop in
269845fabbb7SElias Oltmanns 	 * ata_eh_recover() again.
269945fabbb7SElias Oltmanns 	 */
270045fabbb7SElias Oltmanns 
270145fabbb7SElias Oltmanns 	spin_lock_irqsave(ap->lock, flags);
270245fabbb7SElias Oltmanns 	INIT_COMPLETION(ap->park_req_pending);
27031eca4365STejun Heo 	ata_for_each_link(link, ap, EDGE) {
27041eca4365STejun Heo 		ata_for_each_dev(dev, link, ALL) {
270545fabbb7SElias Oltmanns 			struct ata_eh_info *ehi = &link->eh_info;
270645fabbb7SElias Oltmanns 
270745fabbb7SElias Oltmanns 			link->eh_context.i.dev_action[dev->devno] |=
270845fabbb7SElias Oltmanns 				ehi->dev_action[dev->devno] & ATA_EH_PARK;
270945fabbb7SElias Oltmanns 			ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
271045fabbb7SElias Oltmanns 		}
271145fabbb7SElias Oltmanns 	}
271245fabbb7SElias Oltmanns 	spin_unlock_irqrestore(ap->lock, flags);
271345fabbb7SElias Oltmanns }
271445fabbb7SElias Oltmanns 
271545fabbb7SElias Oltmanns static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
271645fabbb7SElias Oltmanns {
271745fabbb7SElias Oltmanns 	struct ata_eh_context *ehc = &dev->link->eh_context;
271845fabbb7SElias Oltmanns 	struct ata_taskfile tf;
271945fabbb7SElias Oltmanns 	unsigned int err_mask;
272045fabbb7SElias Oltmanns 
272145fabbb7SElias Oltmanns 	ata_tf_init(dev, &tf);
272245fabbb7SElias Oltmanns 	if (park) {
272345fabbb7SElias Oltmanns 		ehc->unloaded_mask |= 1 << dev->devno;
272445fabbb7SElias Oltmanns 		tf.command = ATA_CMD_IDLEIMMEDIATE;
272545fabbb7SElias Oltmanns 		tf.feature = 0x44;
272645fabbb7SElias Oltmanns 		tf.lbal = 0x4c;
272745fabbb7SElias Oltmanns 		tf.lbam = 0x4e;
272845fabbb7SElias Oltmanns 		tf.lbah = 0x55;
272945fabbb7SElias Oltmanns 	} else {
273045fabbb7SElias Oltmanns 		ehc->unloaded_mask &= ~(1 << dev->devno);
273145fabbb7SElias Oltmanns 		tf.command = ATA_CMD_CHK_POWER;
273245fabbb7SElias Oltmanns 	}
273345fabbb7SElias Oltmanns 
273445fabbb7SElias Oltmanns 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
273545fabbb7SElias Oltmanns 	tf.protocol |= ATA_PROT_NODATA;
273645fabbb7SElias Oltmanns 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
273745fabbb7SElias Oltmanns 	if (park && (err_mask || tf.lbal != 0xc4)) {
273845fabbb7SElias Oltmanns 		ata_dev_printk(dev, KERN_ERR, "head unload failed!\n");
273945fabbb7SElias Oltmanns 		ehc->unloaded_mask &= ~(1 << dev->devno);
274045fabbb7SElias Oltmanns 	}
274145fabbb7SElias Oltmanns }
274245fabbb7SElias Oltmanns 
27430260731fSTejun Heo static int ata_eh_revalidate_and_attach(struct ata_link *link,
2744c6fd2807SJeff Garzik 					struct ata_device **r_failed_dev)
2745c6fd2807SJeff Garzik {
27460260731fSTejun Heo 	struct ata_port *ap = link->ap;
27470260731fSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
2748c6fd2807SJeff Garzik 	struct ata_device *dev;
27498c3c52a8STejun Heo 	unsigned int new_mask = 0;
2750c6fd2807SJeff Garzik 	unsigned long flags;
2751f58229f8STejun Heo 	int rc = 0;
2752c6fd2807SJeff Garzik 
2753c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
2754c6fd2807SJeff Garzik 
27558c3c52a8STejun Heo 	/* For PATA drive side cable detection to work, IDENTIFY must
27568c3c52a8STejun Heo 	 * be done backwards such that PDIAG- is released by the slave
27578c3c52a8STejun Heo 	 * device before the master device is identified.
27588c3c52a8STejun Heo 	 */
27591eca4365STejun Heo 	ata_for_each_dev(dev, link, ALL_REVERSE) {
2760f58229f8STejun Heo 		unsigned int action = ata_eh_dev_action(dev);
2761f58229f8STejun Heo 		unsigned int readid_flags = 0;
2762c6fd2807SJeff Garzik 
2763bff04647STejun Heo 		if (ehc->i.flags & ATA_EHI_DID_RESET)
2764bff04647STejun Heo 			readid_flags |= ATA_READID_POSTRESET;
2765bff04647STejun Heo 
27669666f400STejun Heo 		if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
2767633273a3STejun Heo 			WARN_ON(dev->class == ATA_DEV_PMP);
2768633273a3STejun Heo 
2769b1c72916STejun Heo 			if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
2770c6fd2807SJeff Garzik 				rc = -EIO;
27718c3c52a8STejun Heo 				goto err;
2772c6fd2807SJeff Garzik 			}
2773c6fd2807SJeff Garzik 
27740260731fSTejun Heo 			ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE);
2775422c9daaSTejun Heo 			rc = ata_dev_revalidate(dev, ehc->classes[dev->devno],
2776422c9daaSTejun Heo 						readid_flags);
2777c6fd2807SJeff Garzik 			if (rc)
27788c3c52a8STejun Heo 				goto err;
2779c6fd2807SJeff Garzik 
27800260731fSTejun Heo 			ata_eh_done(link, dev, ATA_EH_REVALIDATE);
2781c6fd2807SJeff Garzik 
2782baa1e78aSTejun Heo 			/* Configuration may have changed, reconfigure
2783baa1e78aSTejun Heo 			 * transfer mode.
2784baa1e78aSTejun Heo 			 */
2785baa1e78aSTejun Heo 			ehc->i.flags |= ATA_EHI_SETMODE;
2786baa1e78aSTejun Heo 
2787c6fd2807SJeff Garzik 			/* schedule the scsi_rescan_device() here */
2788c6fd2807SJeff Garzik 			queue_work(ata_aux_wq, &(ap->scsi_rescan_task));
2789c6fd2807SJeff Garzik 		} else if (dev->class == ATA_DEV_UNKNOWN &&
2790c6fd2807SJeff Garzik 			   ehc->tries[dev->devno] &&
2791c6fd2807SJeff Garzik 			   ata_class_enabled(ehc->classes[dev->devno])) {
2792842faa6cSTejun Heo 			/* Temporarily set dev->class, it will be
2793842faa6cSTejun Heo 			 * permanently set once all configurations are
2794842faa6cSTejun Heo 			 * complete.  This is necessary because new
2795842faa6cSTejun Heo 			 * device configuration is done in two
2796842faa6cSTejun Heo 			 * separate loops.
2797842faa6cSTejun Heo 			 */
2798c6fd2807SJeff Garzik 			dev->class = ehc->classes[dev->devno];
2799c6fd2807SJeff Garzik 
2800633273a3STejun Heo 			if (dev->class == ATA_DEV_PMP)
2801633273a3STejun Heo 				rc = sata_pmp_attach(dev);
2802633273a3STejun Heo 			else
2803633273a3STejun Heo 				rc = ata_dev_read_id(dev, &dev->class,
2804633273a3STejun Heo 						     readid_flags, dev->id);
2805842faa6cSTejun Heo 
2806842faa6cSTejun Heo 			/* read_id might have changed class, store and reset */
2807842faa6cSTejun Heo 			ehc->classes[dev->devno] = dev->class;
2808842faa6cSTejun Heo 			dev->class = ATA_DEV_UNKNOWN;
2809842faa6cSTejun Heo 
28108c3c52a8STejun Heo 			switch (rc) {
28118c3c52a8STejun Heo 			case 0:
281299cf610aSTejun Heo 				/* clear error info accumulated during probe */
281399cf610aSTejun Heo 				ata_ering_clear(&dev->ering);
2814f58229f8STejun Heo 				new_mask |= 1 << dev->devno;
28158c3c52a8STejun Heo 				break;
28168c3c52a8STejun Heo 			case -ENOENT:
281755a8e2c8STejun Heo 				/* IDENTIFY was issued to non-existent
281855a8e2c8STejun Heo 				 * device.  No need to reset.  Just
2819842faa6cSTejun Heo 				 * thaw and ignore the device.
282055a8e2c8STejun Heo 				 */
282155a8e2c8STejun Heo 				ata_eh_thaw_port(ap);
2822c6fd2807SJeff Garzik 				break;
28238c3c52a8STejun Heo 			default:
28248c3c52a8STejun Heo 				goto err;
28258c3c52a8STejun Heo 			}
28268c3c52a8STejun Heo 		}
2827c6fd2807SJeff Garzik 	}
2828c6fd2807SJeff Garzik 
2829c1c4e8d5STejun Heo 	/* PDIAG- should have been released, ask cable type if post-reset */
283033267325STejun Heo 	if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) {
283133267325STejun Heo 		if (ap->ops->cable_detect)
2832c1c4e8d5STejun Heo 			ap->cbl = ap->ops->cable_detect(ap);
283333267325STejun Heo 		ata_force_cbl(ap);
283433267325STejun Heo 	}
2835c1c4e8d5STejun Heo 
28368c3c52a8STejun Heo 	/* Configure new devices forward such that user doesn't see
28378c3c52a8STejun Heo 	 * device detection messages backwards.
28388c3c52a8STejun Heo 	 */
28391eca4365STejun Heo 	ata_for_each_dev(dev, link, ALL) {
2840633273a3STejun Heo 		if (!(new_mask & (1 << dev->devno)) ||
2841633273a3STejun Heo 		    dev->class == ATA_DEV_PMP)
28428c3c52a8STejun Heo 			continue;
28438c3c52a8STejun Heo 
2844842faa6cSTejun Heo 		dev->class = ehc->classes[dev->devno];
2845842faa6cSTejun Heo 
28468c3c52a8STejun Heo 		ehc->i.flags |= ATA_EHI_PRINTINFO;
28478c3c52a8STejun Heo 		rc = ata_dev_configure(dev);
28488c3c52a8STejun Heo 		ehc->i.flags &= ~ATA_EHI_PRINTINFO;
2849842faa6cSTejun Heo 		if (rc) {
2850842faa6cSTejun Heo 			dev->class = ATA_DEV_UNKNOWN;
28518c3c52a8STejun Heo 			goto err;
2852842faa6cSTejun Heo 		}
28538c3c52a8STejun Heo 
2854c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
2855c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
2856c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
2857baa1e78aSTejun Heo 
285855a8e2c8STejun Heo 		/* new device discovered, configure xfermode */
2859baa1e78aSTejun Heo 		ehc->i.flags |= ATA_EHI_SETMODE;
2860c6fd2807SJeff Garzik 	}
2861c6fd2807SJeff Garzik 
28628c3c52a8STejun Heo 	return 0;
28638c3c52a8STejun Heo 
28648c3c52a8STejun Heo  err:
2865c6fd2807SJeff Garzik 	*r_failed_dev = dev;
28668c3c52a8STejun Heo 	DPRINTK("EXIT rc=%d\n", rc);
2867c6fd2807SJeff Garzik 	return rc;
2868c6fd2807SJeff Garzik }
2869c6fd2807SJeff Garzik 
28706f1d1e3aSTejun Heo /**
28716f1d1e3aSTejun Heo  *	ata_set_mode - Program timings and issue SET FEATURES - XFER
28726f1d1e3aSTejun Heo  *	@link: link on which timings will be programmed
287398a1708dSMartin Olsson  *	@r_failed_dev: out parameter for failed device
28746f1d1e3aSTejun Heo  *
28756f1d1e3aSTejun Heo  *	Set ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
28766f1d1e3aSTejun Heo  *	ata_set_mode() fails, pointer to the failing device is
28776f1d1e3aSTejun Heo  *	returned in @r_failed_dev.
28786f1d1e3aSTejun Heo  *
28796f1d1e3aSTejun Heo  *	LOCKING:
28806f1d1e3aSTejun Heo  *	PCI/etc. bus probe sem.
28816f1d1e3aSTejun Heo  *
28826f1d1e3aSTejun Heo  *	RETURNS:
28836f1d1e3aSTejun Heo  *	0 on success, negative errno otherwise
28846f1d1e3aSTejun Heo  */
28856f1d1e3aSTejun Heo int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
28866f1d1e3aSTejun Heo {
28876f1d1e3aSTejun Heo 	struct ata_port *ap = link->ap;
288800115e0fSTejun Heo 	struct ata_device *dev;
288900115e0fSTejun Heo 	int rc;
28906f1d1e3aSTejun Heo 
289176326ac1STejun Heo 	/* if data transfer is verified, clear DUBIOUS_XFER on ering top */
28921eca4365STejun Heo 	ata_for_each_dev(dev, link, ENABLED) {
289376326ac1STejun Heo 		if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) {
289476326ac1STejun Heo 			struct ata_ering_entry *ent;
289576326ac1STejun Heo 
289676326ac1STejun Heo 			ent = ata_ering_top(&dev->ering);
289776326ac1STejun Heo 			if (ent)
289876326ac1STejun Heo 				ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER;
289976326ac1STejun Heo 		}
290076326ac1STejun Heo 	}
290176326ac1STejun Heo 
29026f1d1e3aSTejun Heo 	/* has private set_mode? */
29036f1d1e3aSTejun Heo 	if (ap->ops->set_mode)
290400115e0fSTejun Heo 		rc = ap->ops->set_mode(link, r_failed_dev);
290500115e0fSTejun Heo 	else
290600115e0fSTejun Heo 		rc = ata_do_set_mode(link, r_failed_dev);
290700115e0fSTejun Heo 
290800115e0fSTejun Heo 	/* if transfer mode has changed, set DUBIOUS_XFER on device */
29091eca4365STejun Heo 	ata_for_each_dev(dev, link, ENABLED) {
291000115e0fSTejun Heo 		struct ata_eh_context *ehc = &link->eh_context;
291100115e0fSTejun Heo 		u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
291200115e0fSTejun Heo 		u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
291300115e0fSTejun Heo 
291400115e0fSTejun Heo 		if (dev->xfer_mode != saved_xfer_mode ||
291500115e0fSTejun Heo 		    ata_ncq_enabled(dev) != saved_ncq)
291600115e0fSTejun Heo 			dev->flags |= ATA_DFLAG_DUBIOUS_XFER;
291700115e0fSTejun Heo 	}
291800115e0fSTejun Heo 
291900115e0fSTejun Heo 	return rc;
29206f1d1e3aSTejun Heo }
29216f1d1e3aSTejun Heo 
292211fc33daSTejun Heo /**
292311fc33daSTejun Heo  *	atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
292411fc33daSTejun Heo  *	@dev: ATAPI device to clear UA for
292511fc33daSTejun Heo  *
292611fc33daSTejun Heo  *	Resets and other operations can make an ATAPI device raise
292711fc33daSTejun Heo  *	UNIT ATTENTION which causes the next operation to fail.  This
292811fc33daSTejun Heo  *	function clears UA.
292911fc33daSTejun Heo  *
293011fc33daSTejun Heo  *	LOCKING:
293111fc33daSTejun Heo  *	EH context (may sleep).
293211fc33daSTejun Heo  *
293311fc33daSTejun Heo  *	RETURNS:
293411fc33daSTejun Heo  *	0 on success, -errno on failure.
293511fc33daSTejun Heo  */
293611fc33daSTejun Heo static int atapi_eh_clear_ua(struct ata_device *dev)
293711fc33daSTejun Heo {
293811fc33daSTejun Heo 	int i;
293911fc33daSTejun Heo 
294011fc33daSTejun Heo 	for (i = 0; i < ATA_EH_UA_TRIES; i++) {
2941b5357081STejun Heo 		u8 *sense_buffer = dev->link->ap->sector_buf;
294211fc33daSTejun Heo 		u8 sense_key = 0;
294311fc33daSTejun Heo 		unsigned int err_mask;
294411fc33daSTejun Heo 
294511fc33daSTejun Heo 		err_mask = atapi_eh_tur(dev, &sense_key);
294611fc33daSTejun Heo 		if (err_mask != 0 && err_mask != AC_ERR_DEV) {
294711fc33daSTejun Heo 			ata_dev_printk(dev, KERN_WARNING, "TEST_UNIT_READY "
294811fc33daSTejun Heo 				"failed (err_mask=0x%x)\n", err_mask);
294911fc33daSTejun Heo 			return -EIO;
295011fc33daSTejun Heo 		}
295111fc33daSTejun Heo 
295211fc33daSTejun Heo 		if (!err_mask || sense_key != UNIT_ATTENTION)
295311fc33daSTejun Heo 			return 0;
295411fc33daSTejun Heo 
295511fc33daSTejun Heo 		err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key);
295611fc33daSTejun Heo 		if (err_mask) {
295711fc33daSTejun Heo 			ata_dev_printk(dev, KERN_WARNING, "failed to clear "
295811fc33daSTejun Heo 				"UNIT ATTENTION (err_mask=0x%x)\n", err_mask);
295911fc33daSTejun Heo 			return -EIO;
296011fc33daSTejun Heo 		}
296111fc33daSTejun Heo 	}
296211fc33daSTejun Heo 
296311fc33daSTejun Heo 	ata_dev_printk(dev, KERN_WARNING,
296411fc33daSTejun Heo 		"UNIT ATTENTION persists after %d tries\n", ATA_EH_UA_TRIES);
296511fc33daSTejun Heo 
296611fc33daSTejun Heo 	return 0;
296711fc33daSTejun Heo }
296811fc33daSTejun Heo 
29690260731fSTejun Heo static int ata_link_nr_enabled(struct ata_link *link)
2970c6fd2807SJeff Garzik {
2971f58229f8STejun Heo 	struct ata_device *dev;
2972f58229f8STejun Heo 	int cnt = 0;
2973c6fd2807SJeff Garzik 
29741eca4365STejun Heo 	ata_for_each_dev(dev, link, ENABLED)
2975c6fd2807SJeff Garzik 		cnt++;
2976c6fd2807SJeff Garzik 	return cnt;
2977c6fd2807SJeff Garzik }
2978c6fd2807SJeff Garzik 
29790260731fSTejun Heo static int ata_link_nr_vacant(struct ata_link *link)
2980c6fd2807SJeff Garzik {
2981f58229f8STejun Heo 	struct ata_device *dev;
2982f58229f8STejun Heo 	int cnt = 0;
2983c6fd2807SJeff Garzik 
29841eca4365STejun Heo 	ata_for_each_dev(dev, link, ALL)
2985f58229f8STejun Heo 		if (dev->class == ATA_DEV_UNKNOWN)
2986c6fd2807SJeff Garzik 			cnt++;
2987c6fd2807SJeff Garzik 	return cnt;
2988c6fd2807SJeff Garzik }
2989c6fd2807SJeff Garzik 
29900260731fSTejun Heo static int ata_eh_skip_recovery(struct ata_link *link)
2991c6fd2807SJeff Garzik {
2992672b2d65STejun Heo 	struct ata_port *ap = link->ap;
29930260731fSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
2994f58229f8STejun Heo 	struct ata_device *dev;
2995c6fd2807SJeff Garzik 
2996f9df58cbSTejun Heo 	/* skip disabled links */
2997f9df58cbSTejun Heo 	if (link->flags & ATA_LFLAG_DISABLED)
2998f9df58cbSTejun Heo 		return 1;
2999f9df58cbSTejun Heo 
3000672b2d65STejun Heo 	/* thaw frozen port and recover failed devices */
3001672b2d65STejun Heo 	if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
3002672b2d65STejun Heo 		return 0;
3003672b2d65STejun Heo 
3004672b2d65STejun Heo 	/* reset at least once if reset is requested */
3005672b2d65STejun Heo 	if ((ehc->i.action & ATA_EH_RESET) &&
3006672b2d65STejun Heo 	    !(ehc->i.flags & ATA_EHI_DID_RESET))
3007c6fd2807SJeff Garzik 		return 0;
3008c6fd2807SJeff Garzik 
3009c6fd2807SJeff Garzik 	/* skip if class codes for all vacant slots are ATA_DEV_NONE */
30101eca4365STejun Heo 	ata_for_each_dev(dev, link, ALL) {
3011c6fd2807SJeff Garzik 		if (dev->class == ATA_DEV_UNKNOWN &&
3012c6fd2807SJeff Garzik 		    ehc->classes[dev->devno] != ATA_DEV_NONE)
3013c6fd2807SJeff Garzik 			return 0;
3014c6fd2807SJeff Garzik 	}
3015c6fd2807SJeff Garzik 
3016c6fd2807SJeff Garzik 	return 1;
3017c6fd2807SJeff Garzik }
3018c6fd2807SJeff Garzik 
3019c2c7a89cSTejun Heo static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg)
3020c2c7a89cSTejun Heo {
3021c2c7a89cSTejun Heo 	u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL);
3022c2c7a89cSTejun Heo 	u64 now = get_jiffies_64();
3023c2c7a89cSTejun Heo 	int *trials = void_arg;
3024c2c7a89cSTejun Heo 
3025c2c7a89cSTejun Heo 	if (ent->timestamp < now - min(now, interval))
3026c2c7a89cSTejun Heo 		return -1;
3027c2c7a89cSTejun Heo 
3028c2c7a89cSTejun Heo 	(*trials)++;
3029c2c7a89cSTejun Heo 	return 0;
3030c2c7a89cSTejun Heo }
3031c2c7a89cSTejun Heo 
303202c05a27STejun Heo static int ata_eh_schedule_probe(struct ata_device *dev)
303302c05a27STejun Heo {
303402c05a27STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
3035c2c7a89cSTejun Heo 	struct ata_link *link = ata_dev_phys_link(dev);
3036c2c7a89cSTejun Heo 	int trials = 0;
303702c05a27STejun Heo 
303802c05a27STejun Heo 	if (!(ehc->i.probe_mask & (1 << dev->devno)) ||
303902c05a27STejun Heo 	    (ehc->did_probe_mask & (1 << dev->devno)))
304002c05a27STejun Heo 		return 0;
304102c05a27STejun Heo 
304202c05a27STejun Heo 	ata_eh_detach_dev(dev);
304302c05a27STejun Heo 	ata_dev_init(dev);
304402c05a27STejun Heo 	ehc->did_probe_mask |= (1 << dev->devno);
3045cf480626STejun Heo 	ehc->i.action |= ATA_EH_RESET;
304600115e0fSTejun Heo 	ehc->saved_xfer_mode[dev->devno] = 0;
304700115e0fSTejun Heo 	ehc->saved_ncq_enabled &= ~(1 << dev->devno);
304802c05a27STejun Heo 
3049c2c7a89cSTejun Heo 	/* Record and count probe trials on the ering.  The specific
3050c2c7a89cSTejun Heo 	 * error mask used is irrelevant.  Because a successful device
3051c2c7a89cSTejun Heo 	 * detection clears the ering, this count accumulates only if
3052c2c7a89cSTejun Heo 	 * there are consecutive failed probes.
3053c2c7a89cSTejun Heo 	 *
3054c2c7a89cSTejun Heo 	 * If the count is equal to or higher than ATA_EH_PROBE_TRIALS
3055c2c7a89cSTejun Heo 	 * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is
3056c2c7a89cSTejun Heo 	 * forced to 1.5Gbps.
3057c2c7a89cSTejun Heo 	 *
3058c2c7a89cSTejun Heo 	 * This is to work around cases where failed link speed
3059c2c7a89cSTejun Heo 	 * negotiation results in device misdetection leading to
3060c2c7a89cSTejun Heo 	 * infinite DEVXCHG or PHRDY CHG events.
3061c2c7a89cSTejun Heo 	 */
3062c2c7a89cSTejun Heo 	ata_ering_record(&dev->ering, 0, AC_ERR_OTHER);
3063c2c7a89cSTejun Heo 	ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials);
3064c2c7a89cSTejun Heo 
3065c2c7a89cSTejun Heo 	if (trials > ATA_EH_PROBE_TRIALS)
3066c2c7a89cSTejun Heo 		sata_down_spd_limit(link, 1);
3067c2c7a89cSTejun Heo 
306802c05a27STejun Heo 	return 1;
306902c05a27STejun Heo }
307002c05a27STejun Heo 
30719b1e2658STejun Heo static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
3072fee7ca72STejun Heo {
30739af5c9c9STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
3074fee7ca72STejun Heo 
3075cf9a590aSTejun Heo 	/* -EAGAIN from EH routine indicates retry without prejudice.
3076cf9a590aSTejun Heo 	 * The requester is responsible for ensuring forward progress.
3077cf9a590aSTejun Heo 	 */
3078cf9a590aSTejun Heo 	if (err != -EAGAIN)
3079fee7ca72STejun Heo 		ehc->tries[dev->devno]--;
3080fee7ca72STejun Heo 
3081fee7ca72STejun Heo 	switch (err) {
3082fee7ca72STejun Heo 	case -ENODEV:
3083fee7ca72STejun Heo 		/* device missing or wrong IDENTIFY data, schedule probing */
3084fee7ca72STejun Heo 		ehc->i.probe_mask |= (1 << dev->devno);
3085fee7ca72STejun Heo 	case -EINVAL:
3086fee7ca72STejun Heo 		/* give it just one more chance */
3087fee7ca72STejun Heo 		ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
3088fee7ca72STejun Heo 	case -EIO:
3089d89293abSTejun Heo 		if (ehc->tries[dev->devno] == 1) {
3090fee7ca72STejun Heo 			/* This is the last chance, better to slow
3091fee7ca72STejun Heo 			 * down than lose it.
3092fee7ca72STejun Heo 			 */
3093a07d499bSTejun Heo 			sata_down_spd_limit(ata_dev_phys_link(dev), 0);
3094d89293abSTejun Heo 			if (dev->pio_mode > XFER_PIO_0)
3095fee7ca72STejun Heo 				ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
3096fee7ca72STejun Heo 		}
3097fee7ca72STejun Heo 	}
3098fee7ca72STejun Heo 
3099fee7ca72STejun Heo 	if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
3100fee7ca72STejun Heo 		/* disable device if it has used up all its chances */
3101fee7ca72STejun Heo 		ata_dev_disable(dev);
3102fee7ca72STejun Heo 
3103fee7ca72STejun Heo 		/* detach if offline */
3104b1c72916STejun Heo 		if (ata_phys_link_offline(ata_dev_phys_link(dev)))
3105fee7ca72STejun Heo 			ata_eh_detach_dev(dev);
3106fee7ca72STejun Heo 
310702c05a27STejun Heo 		/* schedule probe if necessary */
310887fbc5a0STejun Heo 		if (ata_eh_schedule_probe(dev)) {
3109fee7ca72STejun Heo 			ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
311087fbc5a0STejun Heo 			memset(ehc->cmd_timeout_idx[dev->devno], 0,
311187fbc5a0STejun Heo 			       sizeof(ehc->cmd_timeout_idx[dev->devno]));
311287fbc5a0STejun Heo 		}
31139b1e2658STejun Heo 
31149b1e2658STejun Heo 		return 1;
3115fee7ca72STejun Heo 	} else {
3116cf480626STejun Heo 		ehc->i.action |= ATA_EH_RESET;
31179b1e2658STejun Heo 		return 0;
3118fee7ca72STejun Heo 	}
3119fee7ca72STejun Heo }
3120fee7ca72STejun Heo 
3121c6fd2807SJeff Garzik /**
3122c6fd2807SJeff Garzik  *	ata_eh_recover - recover host port after error
3123c6fd2807SJeff Garzik  *	@ap: host port to recover
3124c6fd2807SJeff Garzik  *	@prereset: prereset method (can be NULL)
3125c6fd2807SJeff Garzik  *	@softreset: softreset method (can be NULL)
3126c6fd2807SJeff Garzik  *	@hardreset: hardreset method (can be NULL)
3127c6fd2807SJeff Garzik  *	@postreset: postreset method (can be NULL)
31289b1e2658STejun Heo  *	@r_failed_link: out parameter for failed link
3129c6fd2807SJeff Garzik  *
3130c6fd2807SJeff Garzik  *	This is the alpha and omega, eum and yang, heart and soul of
3131c6fd2807SJeff Garzik  *	libata exception handling.  On entry, actions required to
31329b1e2658STejun Heo  *	recover each link and hotplug requests are recorded in the
31339b1e2658STejun Heo  *	link's eh_context.  This function executes all the operations
31349b1e2658STejun Heo  *	with appropriate retrials and fallbacks to resurrect failed
3135c6fd2807SJeff Garzik  *	devices, detach goners and greet newcomers.
3136c6fd2807SJeff Garzik  *
3137c6fd2807SJeff Garzik  *	LOCKING:
3138c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
3139c6fd2807SJeff Garzik  *
3140c6fd2807SJeff Garzik  *	RETURNS:
3141c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
3142c6fd2807SJeff Garzik  */
3143fb7fd614STejun Heo int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3144c6fd2807SJeff Garzik 		   ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
31459b1e2658STejun Heo 		   ata_postreset_fn_t postreset,
31469b1e2658STejun Heo 		   struct ata_link **r_failed_link)
3147c6fd2807SJeff Garzik {
31489b1e2658STejun Heo 	struct ata_link *link;
3149c6fd2807SJeff Garzik 	struct ata_device *dev;
31500a2c0f56STejun Heo 	int nr_failed_devs;
3151dc98c32cSTejun Heo 	int rc;
315245fabbb7SElias Oltmanns 	unsigned long flags, deadline;
3153c6fd2807SJeff Garzik 
3154c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
3155c6fd2807SJeff Garzik 
3156c6fd2807SJeff Garzik 	/* prep for recovery */
31571eca4365STejun Heo 	ata_for_each_link(link, ap, EDGE) {
31589b1e2658STejun Heo 		struct ata_eh_context *ehc = &link->eh_context;
31599b1e2658STejun Heo 
3160f9df58cbSTejun Heo 		/* re-enable link? */
3161f9df58cbSTejun Heo 		if (ehc->i.action & ATA_EH_ENABLE_LINK) {
3162f9df58cbSTejun Heo 			ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK);
3163f9df58cbSTejun Heo 			spin_lock_irqsave(ap->lock, flags);
3164f9df58cbSTejun Heo 			link->flags &= ~ATA_LFLAG_DISABLED;
3165f9df58cbSTejun Heo 			spin_unlock_irqrestore(ap->lock, flags);
3166f9df58cbSTejun Heo 			ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK);
3167f9df58cbSTejun Heo 		}
3168f9df58cbSTejun Heo 
31691eca4365STejun Heo 		ata_for_each_dev(dev, link, ALL) {
3170fd995f70STejun Heo 			if (link->flags & ATA_LFLAG_NO_RETRY)
3171fd995f70STejun Heo 				ehc->tries[dev->devno] = 1;
3172fd995f70STejun Heo 			else
3173c6fd2807SJeff Garzik 				ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3174c6fd2807SJeff Garzik 
317579a55b72STejun Heo 			/* collect port action mask recorded in dev actions */
31769b1e2658STejun Heo 			ehc->i.action |= ehc->i.dev_action[dev->devno] &
31779b1e2658STejun Heo 					 ~ATA_EH_PERDEV_MASK;
3178f58229f8STejun Heo 			ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK;
317979a55b72STejun Heo 
3180c6fd2807SJeff Garzik 			/* process hotplug request */
3181c6fd2807SJeff Garzik 			if (dev->flags & ATA_DFLAG_DETACH)
3182c6fd2807SJeff Garzik 				ata_eh_detach_dev(dev);
3183c6fd2807SJeff Garzik 
318402c05a27STejun Heo 			/* schedule probe if necessary */
318502c05a27STejun Heo 			if (!ata_dev_enabled(dev))
318602c05a27STejun Heo 				ata_eh_schedule_probe(dev);
3187c6fd2807SJeff Garzik 		}
31889b1e2658STejun Heo 	}
3189c6fd2807SJeff Garzik 
3190c6fd2807SJeff Garzik  retry:
3191c6fd2807SJeff Garzik 	rc = 0;
31929b1e2658STejun Heo 	nr_failed_devs = 0;
3193c6fd2807SJeff Garzik 
3194c6fd2807SJeff Garzik 	/* if UNLOADING, finish immediately */
3195c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_UNLOADING)
3196c6fd2807SJeff Garzik 		goto out;
3197c6fd2807SJeff Garzik 
31989b1e2658STejun Heo 	/* prep for EH */
31991eca4365STejun Heo 	ata_for_each_link(link, ap, EDGE) {
32009b1e2658STejun Heo 		struct ata_eh_context *ehc = &link->eh_context;
32019b1e2658STejun Heo 
3202c6fd2807SJeff Garzik 		/* skip EH if possible. */
32030260731fSTejun Heo 		if (ata_eh_skip_recovery(link))
3204c6fd2807SJeff Garzik 			ehc->i.action = 0;
3205c6fd2807SJeff Garzik 
32061eca4365STejun Heo 		ata_for_each_dev(dev, link, ALL)
3207f58229f8STejun Heo 			ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
32089b1e2658STejun Heo 	}
3209c6fd2807SJeff Garzik 
3210c6fd2807SJeff Garzik 	/* reset */
32111eca4365STejun Heo 	ata_for_each_link(link, ap, EDGE) {
32129b1e2658STejun Heo 		struct ata_eh_context *ehc = &link->eh_context;
32139b1e2658STejun Heo 
3214cf480626STejun Heo 		if (!(ehc->i.action & ATA_EH_RESET))
32159b1e2658STejun Heo 			continue;
32169b1e2658STejun Heo 
32179b1e2658STejun Heo 		rc = ata_eh_reset(link, ata_link_nr_vacant(link),
3218dc98c32cSTejun Heo 				  prereset, softreset, hardreset, postreset);
3219c6fd2807SJeff Garzik 		if (rc) {
32200260731fSTejun Heo 			ata_link_printk(link, KERN_ERR,
3221c6fd2807SJeff Garzik 					"reset failed, giving up\n");
3222c6fd2807SJeff Garzik 			goto out;
3223c6fd2807SJeff Garzik 		}
32249b1e2658STejun Heo 	}
3225c6fd2807SJeff Garzik 
322645fabbb7SElias Oltmanns 	do {
322745fabbb7SElias Oltmanns 		unsigned long now;
322845fabbb7SElias Oltmanns 
322945fabbb7SElias Oltmanns 		/*
323045fabbb7SElias Oltmanns 		 * clears ATA_EH_PARK in eh_info and resets
323145fabbb7SElias Oltmanns 		 * ap->park_req_pending
323245fabbb7SElias Oltmanns 		 */
323345fabbb7SElias Oltmanns 		ata_eh_pull_park_action(ap);
323445fabbb7SElias Oltmanns 
323545fabbb7SElias Oltmanns 		deadline = jiffies;
32361eca4365STejun Heo 		ata_for_each_link(link, ap, EDGE) {
32371eca4365STejun Heo 			ata_for_each_dev(dev, link, ALL) {
323845fabbb7SElias Oltmanns 				struct ata_eh_context *ehc = &link->eh_context;
323945fabbb7SElias Oltmanns 				unsigned long tmp;
324045fabbb7SElias Oltmanns 
324145fabbb7SElias Oltmanns 				if (dev->class != ATA_DEV_ATA)
324245fabbb7SElias Oltmanns 					continue;
324345fabbb7SElias Oltmanns 				if (!(ehc->i.dev_action[dev->devno] &
324445fabbb7SElias Oltmanns 				      ATA_EH_PARK))
324545fabbb7SElias Oltmanns 					continue;
324645fabbb7SElias Oltmanns 				tmp = dev->unpark_deadline;
324745fabbb7SElias Oltmanns 				if (time_before(deadline, tmp))
324845fabbb7SElias Oltmanns 					deadline = tmp;
324945fabbb7SElias Oltmanns 				else if (time_before_eq(tmp, jiffies))
325045fabbb7SElias Oltmanns 					continue;
325145fabbb7SElias Oltmanns 				if (ehc->unloaded_mask & (1 << dev->devno))
325245fabbb7SElias Oltmanns 					continue;
325345fabbb7SElias Oltmanns 
325445fabbb7SElias Oltmanns 				ata_eh_park_issue_cmd(dev, 1);
325545fabbb7SElias Oltmanns 			}
325645fabbb7SElias Oltmanns 		}
325745fabbb7SElias Oltmanns 
325845fabbb7SElias Oltmanns 		now = jiffies;
325945fabbb7SElias Oltmanns 		if (time_before_eq(deadline, now))
326045fabbb7SElias Oltmanns 			break;
326145fabbb7SElias Oltmanns 
326245fabbb7SElias Oltmanns 		deadline = wait_for_completion_timeout(&ap->park_req_pending,
326345fabbb7SElias Oltmanns 						       deadline - now);
326445fabbb7SElias Oltmanns 	} while (deadline);
32651eca4365STejun Heo 	ata_for_each_link(link, ap, EDGE) {
32661eca4365STejun Heo 		ata_for_each_dev(dev, link, ALL) {
326745fabbb7SElias Oltmanns 			if (!(link->eh_context.unloaded_mask &
326845fabbb7SElias Oltmanns 			      (1 << dev->devno)))
326945fabbb7SElias Oltmanns 				continue;
327045fabbb7SElias Oltmanns 
327145fabbb7SElias Oltmanns 			ata_eh_park_issue_cmd(dev, 0);
327245fabbb7SElias Oltmanns 			ata_eh_done(link, dev, ATA_EH_PARK);
327345fabbb7SElias Oltmanns 		}
327445fabbb7SElias Oltmanns 	}
327545fabbb7SElias Oltmanns 
32769b1e2658STejun Heo 	/* the rest */
32771eca4365STejun Heo 	ata_for_each_link(link, ap, EDGE) {
32789b1e2658STejun Heo 		struct ata_eh_context *ehc = &link->eh_context;
32799b1e2658STejun Heo 
3280c6fd2807SJeff Garzik 		/* revalidate existing devices and attach new ones */
32810260731fSTejun Heo 		rc = ata_eh_revalidate_and_attach(link, &dev);
3282c6fd2807SJeff Garzik 		if (rc)
3283c6fd2807SJeff Garzik 			goto dev_fail;
3284c6fd2807SJeff Garzik 
3285633273a3STejun Heo 		/* if PMP got attached, return, pmp EH will take care of it */
3286633273a3STejun Heo 		if (link->device->class == ATA_DEV_PMP) {
3287633273a3STejun Heo 			ehc->i.action = 0;
3288633273a3STejun Heo 			return 0;
3289633273a3STejun Heo 		}
3290633273a3STejun Heo 
3291baa1e78aSTejun Heo 		/* configure transfer mode if necessary */
3292baa1e78aSTejun Heo 		if (ehc->i.flags & ATA_EHI_SETMODE) {
32930260731fSTejun Heo 			rc = ata_set_mode(link, &dev);
32944ae72a1eSTejun Heo 			if (rc)
3295c6fd2807SJeff Garzik 				goto dev_fail;
3296baa1e78aSTejun Heo 			ehc->i.flags &= ~ATA_EHI_SETMODE;
3297c6fd2807SJeff Garzik 		}
3298c6fd2807SJeff Garzik 
329911fc33daSTejun Heo 		/* If reset has been issued, clear UA to avoid
330011fc33daSTejun Heo 		 * disrupting the current users of the device.
330111fc33daSTejun Heo 		 */
330211fc33daSTejun Heo 		if (ehc->i.flags & ATA_EHI_DID_RESET) {
33031eca4365STejun Heo 			ata_for_each_dev(dev, link, ALL) {
330411fc33daSTejun Heo 				if (dev->class != ATA_DEV_ATAPI)
330511fc33daSTejun Heo 					continue;
330611fc33daSTejun Heo 				rc = atapi_eh_clear_ua(dev);
330711fc33daSTejun Heo 				if (rc)
330811fc33daSTejun Heo 					goto dev_fail;
330911fc33daSTejun Heo 			}
331011fc33daSTejun Heo 		}
331111fc33daSTejun Heo 
331211fc33daSTejun Heo 		/* configure link power saving */
33133ec25ebdSTejun Heo 		if (ehc->i.action & ATA_EH_LPM)
33141eca4365STejun Heo 			ata_for_each_dev(dev, link, ALL)
3315ca77329fSKristen Carlson Accardi 				ata_dev_enable_pm(dev, ap->pm_policy);
3316ca77329fSKristen Carlson Accardi 
33179b1e2658STejun Heo 		/* this link is okay now */
33189b1e2658STejun Heo 		ehc->i.flags = 0;
33199b1e2658STejun Heo 		continue;
3320c6fd2807SJeff Garzik 
3321c6fd2807SJeff Garzik dev_fail:
33229b1e2658STejun Heo 		nr_failed_devs++;
33230a2c0f56STejun Heo 		ata_eh_handle_dev_fail(dev, rc);
3324c6fd2807SJeff Garzik 
3325b06ce3e5STejun Heo 		if (ap->pflags & ATA_PFLAG_FROZEN) {
3326b06ce3e5STejun Heo 			/* PMP reset requires working host port.
3327b06ce3e5STejun Heo 			 * Can't retry if it's frozen.
3328b06ce3e5STejun Heo 			 */
3329071f44b1STejun Heo 			if (sata_pmp_attached(ap))
3330b06ce3e5STejun Heo 				goto out;
33319b1e2658STejun Heo 			break;
33329b1e2658STejun Heo 		}
3333b06ce3e5STejun Heo 	}
33349b1e2658STejun Heo 
33350a2c0f56STejun Heo 	if (nr_failed_devs)
3336c6fd2807SJeff Garzik 		goto retry;
3337c6fd2807SJeff Garzik 
3338c6fd2807SJeff Garzik  out:
33399b1e2658STejun Heo 	if (rc && r_failed_link)
33409b1e2658STejun Heo 		*r_failed_link = link;
3341c6fd2807SJeff Garzik 
3342c6fd2807SJeff Garzik 	DPRINTK("EXIT, rc=%d\n", rc);
3343c6fd2807SJeff Garzik 	return rc;
3344c6fd2807SJeff Garzik }
3345c6fd2807SJeff Garzik 
3346c6fd2807SJeff Garzik /**
3347c6fd2807SJeff Garzik  *	ata_eh_finish - finish up EH
3348c6fd2807SJeff Garzik  *	@ap: host port to finish EH for
3349c6fd2807SJeff Garzik  *
3350c6fd2807SJeff Garzik  *	Recovery is complete.  Clean up EH states and retry or finish
3351c6fd2807SJeff Garzik  *	failed qcs.
3352c6fd2807SJeff Garzik  *
3353c6fd2807SJeff Garzik  *	LOCKING:
3354c6fd2807SJeff Garzik  *	None.
3355c6fd2807SJeff Garzik  */
3356fb7fd614STejun Heo void ata_eh_finish(struct ata_port *ap)
3357c6fd2807SJeff Garzik {
3358c6fd2807SJeff Garzik 	int tag;
3359c6fd2807SJeff Garzik 
3360c6fd2807SJeff Garzik 	/* retry or finish qcs */
3361c6fd2807SJeff Garzik 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
3362c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
3363c6fd2807SJeff Garzik 
3364c6fd2807SJeff Garzik 		if (!(qc->flags & ATA_QCFLAG_FAILED))
3365c6fd2807SJeff Garzik 			continue;
3366c6fd2807SJeff Garzik 
3367c6fd2807SJeff Garzik 		if (qc->err_mask) {
3368c6fd2807SJeff Garzik 			/* FIXME: Once EH migration is complete,
3369c6fd2807SJeff Garzik 			 * generate sense data in this function,
3370c6fd2807SJeff Garzik 			 * considering both err_mask and tf.
3371c6fd2807SJeff Garzik 			 */
337203faab78STejun Heo 			if (qc->flags & ATA_QCFLAG_RETRY)
3373c6fd2807SJeff Garzik 				ata_eh_qc_retry(qc);
337403faab78STejun Heo 			else
337503faab78STejun Heo 				ata_eh_qc_complete(qc);
3376c6fd2807SJeff Garzik 		} else {
3377c6fd2807SJeff Garzik 			if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
3378c6fd2807SJeff Garzik 				ata_eh_qc_complete(qc);
3379c6fd2807SJeff Garzik 			} else {
3380c6fd2807SJeff Garzik 				/* feed zero TF to sense generation */
3381c6fd2807SJeff Garzik 				memset(&qc->result_tf, 0, sizeof(qc->result_tf));
3382c6fd2807SJeff Garzik 				ata_eh_qc_retry(qc);
3383c6fd2807SJeff Garzik 			}
3384c6fd2807SJeff Garzik 		}
3385c6fd2807SJeff Garzik 	}
3386da917d69STejun Heo 
3387da917d69STejun Heo 	/* make sure nr_active_links is zero after EH */
3388da917d69STejun Heo 	WARN_ON(ap->nr_active_links);
3389da917d69STejun Heo 	ap->nr_active_links = 0;
3390c6fd2807SJeff Garzik }
3391c6fd2807SJeff Garzik 
3392c6fd2807SJeff Garzik /**
3393c6fd2807SJeff Garzik  *	ata_do_eh - do standard error handling
3394c6fd2807SJeff Garzik  *	@ap: host port to handle error for
3395a1efdabaSTejun Heo  *
3396c6fd2807SJeff Garzik  *	@prereset: prereset method (can be NULL)
3397c6fd2807SJeff Garzik  *	@softreset: softreset method (can be NULL)
3398c6fd2807SJeff Garzik  *	@hardreset: hardreset method (can be NULL)
3399c6fd2807SJeff Garzik  *	@postreset: postreset method (can be NULL)
3400c6fd2807SJeff Garzik  *
3401c6fd2807SJeff Garzik  *	Perform standard error handling sequence.
3402c6fd2807SJeff Garzik  *
3403c6fd2807SJeff Garzik  *	LOCKING:
3404c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
3405c6fd2807SJeff Garzik  */
3406c6fd2807SJeff Garzik void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
3407c6fd2807SJeff Garzik 	       ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3408c6fd2807SJeff Garzik 	       ata_postreset_fn_t postreset)
3409c6fd2807SJeff Garzik {
34109b1e2658STejun Heo 	struct ata_device *dev;
34119b1e2658STejun Heo 	int rc;
34129b1e2658STejun Heo 
34139b1e2658STejun Heo 	ata_eh_autopsy(ap);
34149b1e2658STejun Heo 	ata_eh_report(ap);
34159b1e2658STejun Heo 
34169b1e2658STejun Heo 	rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
34179b1e2658STejun Heo 			    NULL);
34189b1e2658STejun Heo 	if (rc) {
34191eca4365STejun Heo 		ata_for_each_dev(dev, &ap->link, ALL)
34209b1e2658STejun Heo 			ata_dev_disable(dev);
34219b1e2658STejun Heo 	}
34229b1e2658STejun Heo 
3423c6fd2807SJeff Garzik 	ata_eh_finish(ap);
3424c6fd2807SJeff Garzik }
3425c6fd2807SJeff Garzik 
3426a1efdabaSTejun Heo /**
3427a1efdabaSTejun Heo  *	ata_std_error_handler - standard error handler
3428a1efdabaSTejun Heo  *	@ap: host port to handle error for
3429a1efdabaSTejun Heo  *
3430a1efdabaSTejun Heo  *	Standard error handler
3431a1efdabaSTejun Heo  *
3432a1efdabaSTejun Heo  *	LOCKING:
3433a1efdabaSTejun Heo  *	Kernel thread context (may sleep).
3434a1efdabaSTejun Heo  */
3435a1efdabaSTejun Heo void ata_std_error_handler(struct ata_port *ap)
3436a1efdabaSTejun Heo {
3437a1efdabaSTejun Heo 	struct ata_port_operations *ops = ap->ops;
3438a1efdabaSTejun Heo 	ata_reset_fn_t hardreset = ops->hardreset;
3439a1efdabaSTejun Heo 
344057c9efdfSTejun Heo 	/* ignore built-in hardreset if SCR access is not available */
344157c9efdfSTejun Heo 	if (ata_is_builtin_hardreset(hardreset) && !sata_scr_valid(&ap->link))
3442a1efdabaSTejun Heo 		hardreset = NULL;
3443a1efdabaSTejun Heo 
3444a1efdabaSTejun Heo 	ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
3445a1efdabaSTejun Heo }
3446a1efdabaSTejun Heo 
34476ffa01d8STejun Heo #ifdef CONFIG_PM
3448c6fd2807SJeff Garzik /**
3449c6fd2807SJeff Garzik  *	ata_eh_handle_port_suspend - perform port suspend operation
3450c6fd2807SJeff Garzik  *	@ap: port to suspend
3451c6fd2807SJeff Garzik  *
3452c6fd2807SJeff Garzik  *	Suspend @ap.
3453c6fd2807SJeff Garzik  *
3454c6fd2807SJeff Garzik  *	LOCKING:
3455c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
3456c6fd2807SJeff Garzik  */
3457c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap)
3458c6fd2807SJeff Garzik {
3459c6fd2807SJeff Garzik 	unsigned long flags;
3460c6fd2807SJeff Garzik 	int rc = 0;
3461c6fd2807SJeff Garzik 
3462c6fd2807SJeff Garzik 	/* are we suspending? */
3463c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
3464c6fd2807SJeff Garzik 	if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
3465c6fd2807SJeff Garzik 	    ap->pm_mesg.event == PM_EVENT_ON) {
3466c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
3467c6fd2807SJeff Garzik 		return;
3468c6fd2807SJeff Garzik 	}
3469c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
3470c6fd2807SJeff Garzik 
3471c6fd2807SJeff Garzik 	WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
3472c6fd2807SJeff Garzik 
347364578a3dSTejun Heo 	/* tell ACPI we're suspending */
347464578a3dSTejun Heo 	rc = ata_acpi_on_suspend(ap);
347564578a3dSTejun Heo 	if (rc)
347664578a3dSTejun Heo 		goto out;
347764578a3dSTejun Heo 
3478c6fd2807SJeff Garzik 	/* suspend */
3479c6fd2807SJeff Garzik 	ata_eh_freeze_port(ap);
3480c6fd2807SJeff Garzik 
3481c6fd2807SJeff Garzik 	if (ap->ops->port_suspend)
3482c6fd2807SJeff Garzik 		rc = ap->ops->port_suspend(ap, ap->pm_mesg);
3483c6fd2807SJeff Garzik 
3484bd3adca5SShaohua Li 	ata_acpi_set_state(ap, PMSG_SUSPEND);
348564578a3dSTejun Heo  out:
3486c6fd2807SJeff Garzik 	/* report result */
3487c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
3488c6fd2807SJeff Garzik 
3489c6fd2807SJeff Garzik 	ap->pflags &= ~ATA_PFLAG_PM_PENDING;
3490c6fd2807SJeff Garzik 	if (rc == 0)
3491c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_SUSPENDED;
349264578a3dSTejun Heo 	else if (ap->pflags & ATA_PFLAG_FROZEN)
3493c6fd2807SJeff Garzik 		ata_port_schedule_eh(ap);
3494c6fd2807SJeff Garzik 
3495c6fd2807SJeff Garzik 	if (ap->pm_result) {
3496c6fd2807SJeff Garzik 		*ap->pm_result = rc;
3497c6fd2807SJeff Garzik 		ap->pm_result = NULL;
3498c6fd2807SJeff Garzik 	}
3499c6fd2807SJeff Garzik 
3500c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
3501c6fd2807SJeff Garzik 
3502c6fd2807SJeff Garzik 	return;
3503c6fd2807SJeff Garzik }
3504c6fd2807SJeff Garzik 
3505c6fd2807SJeff Garzik /**
3506c6fd2807SJeff Garzik  *	ata_eh_handle_port_resume - perform port resume operation
3507c6fd2807SJeff Garzik  *	@ap: port to resume
3508c6fd2807SJeff Garzik  *
3509c6fd2807SJeff Garzik  *	Resume @ap.
3510c6fd2807SJeff Garzik  *
3511c6fd2807SJeff Garzik  *	LOCKING:
3512c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
3513c6fd2807SJeff Garzik  */
3514c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap)
3515c6fd2807SJeff Garzik {
35166f9c1ea2STejun Heo 	struct ata_link *link;
35176f9c1ea2STejun Heo 	struct ata_device *dev;
3518c6fd2807SJeff Garzik 	unsigned long flags;
35199666f400STejun Heo 	int rc = 0;
3520c6fd2807SJeff Garzik 
3521c6fd2807SJeff Garzik 	/* are we resuming? */
3522c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
3523c6fd2807SJeff Garzik 	if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
3524c6fd2807SJeff Garzik 	    ap->pm_mesg.event != PM_EVENT_ON) {
3525c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
3526c6fd2807SJeff Garzik 		return;
3527c6fd2807SJeff Garzik 	}
3528c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
3529c6fd2807SJeff Garzik 
35309666f400STejun Heo 	WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED));
3531c6fd2807SJeff Garzik 
35326f9c1ea2STejun Heo 	/*
35336f9c1ea2STejun Heo 	 * Error timestamps are in jiffies which doesn't run while
35346f9c1ea2STejun Heo 	 * suspended and PHY events during resume isn't too uncommon.
35356f9c1ea2STejun Heo 	 * When the two are combined, it can lead to unnecessary speed
35366f9c1ea2STejun Heo 	 * downs if the machine is suspended and resumed repeatedly.
35376f9c1ea2STejun Heo 	 * Clear error history.
35386f9c1ea2STejun Heo 	 */
35396f9c1ea2STejun Heo 	ata_for_each_link(link, ap, HOST_FIRST)
35406f9c1ea2STejun Heo 		ata_for_each_dev(dev, link, ALL)
35416f9c1ea2STejun Heo 			ata_ering_clear(&dev->ering);
35426f9c1ea2STejun Heo 
3543bd3adca5SShaohua Li 	ata_acpi_set_state(ap, PMSG_ON);
3544bd3adca5SShaohua Li 
3545c6fd2807SJeff Garzik 	if (ap->ops->port_resume)
3546c6fd2807SJeff Garzik 		rc = ap->ops->port_resume(ap);
3547c6fd2807SJeff Garzik 
35486746544cSTejun Heo 	/* tell ACPI that we're resuming */
35496746544cSTejun Heo 	ata_acpi_on_resume(ap);
35506746544cSTejun Heo 
35519666f400STejun Heo 	/* report result */
3552c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
3553c6fd2807SJeff Garzik 	ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
3554c6fd2807SJeff Garzik 	if (ap->pm_result) {
3555c6fd2807SJeff Garzik 		*ap->pm_result = rc;
3556c6fd2807SJeff Garzik 		ap->pm_result = NULL;
3557c6fd2807SJeff Garzik 	}
3558c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
3559c6fd2807SJeff Garzik }
35606ffa01d8STejun Heo #endif /* CONFIG_PM */
3561