xref: /openbmc/linux/drivers/ata/libata-eh.c (revision a7ff60dbe0858496531c75b1544666c099a2b200)
1c6fd2807SJeff Garzik /*
2c6fd2807SJeff Garzik  *  libata-eh.c - libata error handling
3c6fd2807SJeff Garzik  *
4c6fd2807SJeff Garzik  *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
5c6fd2807SJeff Garzik  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6c6fd2807SJeff Garzik  *		    on emails.
7c6fd2807SJeff Garzik  *
8c6fd2807SJeff Garzik  *  Copyright 2006 Tejun Heo <htejun@gmail.com>
9c6fd2807SJeff Garzik  *
10c6fd2807SJeff Garzik  *
11c6fd2807SJeff Garzik  *  This program is free software; you can redistribute it and/or
12c6fd2807SJeff Garzik  *  modify it under the terms of the GNU General Public License as
13c6fd2807SJeff Garzik  *  published by the Free Software Foundation; either version 2, or
14c6fd2807SJeff Garzik  *  (at your option) any later version.
15c6fd2807SJeff Garzik  *
16c6fd2807SJeff Garzik  *  This program is distributed in the hope that it will be useful,
17c6fd2807SJeff Garzik  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
18c6fd2807SJeff Garzik  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19c6fd2807SJeff Garzik  *  General Public License for more details.
20c6fd2807SJeff Garzik  *
21c6fd2807SJeff Garzik  *  You should have received a copy of the GNU General Public License
22c6fd2807SJeff Garzik  *  along with this program; see the file COPYING.  If not, write to
23c6fd2807SJeff Garzik  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
24c6fd2807SJeff Garzik  *  USA.
25c6fd2807SJeff Garzik  *
26c6fd2807SJeff Garzik  *
27c6fd2807SJeff Garzik  *  libata documentation is available via 'make {ps|pdf}docs',
28c6fd2807SJeff Garzik  *  as Documentation/DocBook/libata.*
29c6fd2807SJeff Garzik  *
30c6fd2807SJeff Garzik  *  Hardware documentation available from http://www.t13.org/ and
31c6fd2807SJeff Garzik  *  http://www.sata-io.org/
32c6fd2807SJeff Garzik  *
33c6fd2807SJeff Garzik  */
34c6fd2807SJeff Garzik 
35c6fd2807SJeff Garzik #include <linux/kernel.h>
36242f9dcbSJens Axboe #include <linux/blkdev.h>
3738789fdaSPaul Gortmaker #include <linux/export.h>
382855568bSJeff Garzik #include <linux/pci.h>
39c6fd2807SJeff Garzik #include <scsi/scsi.h>
40c6fd2807SJeff Garzik #include <scsi/scsi_host.h>
41c6fd2807SJeff Garzik #include <scsi/scsi_eh.h>
42c6fd2807SJeff Garzik #include <scsi/scsi_device.h>
43c6fd2807SJeff Garzik #include <scsi/scsi_cmnd.h>
446521148cSRobert Hancock #include <scsi/scsi_dbg.h>
45c6fd2807SJeff Garzik #include "../scsi/scsi_transport_api.h"
46c6fd2807SJeff Garzik 
47c6fd2807SJeff Garzik #include <linux/libata.h>
48c6fd2807SJeff Garzik 
49c6fd2807SJeff Garzik #include "libata.h"
50c6fd2807SJeff Garzik 
517d47e8d4STejun Heo enum {
523884f7b0STejun Heo 	/* speed down verdicts */
537d47e8d4STejun Heo 	ATA_EH_SPDN_NCQ_OFF		= (1 << 0),
547d47e8d4STejun Heo 	ATA_EH_SPDN_SPEED_DOWN		= (1 << 1),
557d47e8d4STejun Heo 	ATA_EH_SPDN_FALLBACK_TO_PIO	= (1 << 2),
5676326ac1STejun Heo 	ATA_EH_SPDN_KEEP_ERRORS		= (1 << 3),
573884f7b0STejun Heo 
583884f7b0STejun Heo 	/* error flags */
593884f7b0STejun Heo 	ATA_EFLAG_IS_IO			= (1 << 0),
6076326ac1STejun Heo 	ATA_EFLAG_DUBIOUS_XFER		= (1 << 1),
61d9027470SGwendal Grignou 	ATA_EFLAG_OLD_ER                = (1 << 31),
623884f7b0STejun Heo 
633884f7b0STejun Heo 	/* error categories */
643884f7b0STejun Heo 	ATA_ECAT_NONE			= 0,
653884f7b0STejun Heo 	ATA_ECAT_ATA_BUS		= 1,
663884f7b0STejun Heo 	ATA_ECAT_TOUT_HSM		= 2,
673884f7b0STejun Heo 	ATA_ECAT_UNK_DEV		= 3,
6875f9cafcSTejun Heo 	ATA_ECAT_DUBIOUS_NONE		= 4,
6975f9cafcSTejun Heo 	ATA_ECAT_DUBIOUS_ATA_BUS	= 5,
7075f9cafcSTejun Heo 	ATA_ECAT_DUBIOUS_TOUT_HSM	= 6,
7175f9cafcSTejun Heo 	ATA_ECAT_DUBIOUS_UNK_DEV	= 7,
7275f9cafcSTejun Heo 	ATA_ECAT_NR			= 8,
737d47e8d4STejun Heo 
7487fbc5a0STejun Heo 	ATA_EH_CMD_DFL_TIMEOUT		=  5000,
7587fbc5a0STejun Heo 
760a2c0f56STejun Heo 	/* always put at least this amount of time between resets */
770a2c0f56STejun Heo 	ATA_EH_RESET_COOL_DOWN		=  5000,
780a2c0f56STejun Heo 
79341c2c95STejun Heo 	/* Waiting in ->prereset can never be reliable.  It's
80341c2c95STejun Heo 	 * sometimes nice to wait there but it can't be depended upon;
81341c2c95STejun Heo 	 * otherwise, we wouldn't be resetting.  Just give it enough
82341c2c95STejun Heo 	 * time for most drives to spin up.
8331daabdaSTejun Heo 	 */
84341c2c95STejun Heo 	ATA_EH_PRERESET_TIMEOUT		= 10000,
85341c2c95STejun Heo 	ATA_EH_FASTDRAIN_INTERVAL	=  3000,
8611fc33daSTejun Heo 
8711fc33daSTejun Heo 	ATA_EH_UA_TRIES			= 5,
88c2c7a89cSTejun Heo 
89c2c7a89cSTejun Heo 	/* probe speed down parameters, see ata_eh_schedule_probe() */
90c2c7a89cSTejun Heo 	ATA_EH_PROBE_TRIAL_INTERVAL	= 60000,	/* 1 min */
91c2c7a89cSTejun Heo 	ATA_EH_PROBE_TRIALS		= 2,
9231daabdaSTejun Heo };
9331daabdaSTejun Heo 
9431daabdaSTejun Heo /* The following table determines how we sequence resets.  Each entry
9531daabdaSTejun Heo  * represents timeout for that try.  The first try can be soft or
9631daabdaSTejun Heo  * hardreset.  All others are hardreset if available.  In most cases
9731daabdaSTejun Heo  * the first reset w/ 10sec timeout should succeed.  Following entries
9831daabdaSTejun Heo  * are mostly for error handling, hotplug and retarded devices.
9931daabdaSTejun Heo  */
10031daabdaSTejun Heo static const unsigned long ata_eh_reset_timeouts[] = {
101341c2c95STejun Heo 	10000,	/* most drives spin up by 10sec */
102341c2c95STejun Heo 	10000,	/* > 99% working drives spin up before 20sec */
103341c2c95STejun Heo 	35000,	/* give > 30 secs of idleness for retarded devices */
104341c2c95STejun Heo 	 5000,	/* and sweet one last chance */
105d8af0eb6STejun Heo 	ULONG_MAX, /* > 1 min has elapsed, give up */
10631daabdaSTejun Heo };
10731daabdaSTejun Heo 
10887fbc5a0STejun Heo static const unsigned long ata_eh_identify_timeouts[] = {
10987fbc5a0STejun Heo 	 5000,	/* covers > 99% of successes and not too boring on failures */
11087fbc5a0STejun Heo 	10000,  /* combined time till here is enough even for media access */
11187fbc5a0STejun Heo 	30000,	/* for true idiots */
11287fbc5a0STejun Heo 	ULONG_MAX,
11387fbc5a0STejun Heo };
11487fbc5a0STejun Heo 
1156013efd8STejun Heo static const unsigned long ata_eh_flush_timeouts[] = {
1166013efd8STejun Heo 	15000,	/* be generous with flush */
1176013efd8STejun Heo 	15000,  /* ditto */
1186013efd8STejun Heo 	30000,	/* and even more generous */
1196013efd8STejun Heo 	ULONG_MAX,
1206013efd8STejun Heo };
1216013efd8STejun Heo 
12287fbc5a0STejun Heo static const unsigned long ata_eh_other_timeouts[] = {
12387fbc5a0STejun Heo 	 5000,	/* same rationale as identify timeout */
12487fbc5a0STejun Heo 	10000,	/* ditto */
12587fbc5a0STejun Heo 	/* but no merciful 30sec for other commands, it just isn't worth it */
12687fbc5a0STejun Heo 	ULONG_MAX,
12787fbc5a0STejun Heo };
12887fbc5a0STejun Heo 
12987fbc5a0STejun Heo struct ata_eh_cmd_timeout_ent {
13087fbc5a0STejun Heo 	const u8		*commands;
13187fbc5a0STejun Heo 	const unsigned long	*timeouts;
13287fbc5a0STejun Heo };
13387fbc5a0STejun Heo 
13487fbc5a0STejun Heo /* The following table determines timeouts to use for EH internal
13587fbc5a0STejun Heo  * commands.  Each table entry is a command class and matches the
13687fbc5a0STejun Heo  * commands the entry applies to and the timeout table to use.
13787fbc5a0STejun Heo  *
13887fbc5a0STejun Heo  * On the retry after a command timed out, the next timeout value from
13987fbc5a0STejun Heo  * the table is used.  If the table doesn't contain further entries,
14087fbc5a0STejun Heo  * the last value is used.
14187fbc5a0STejun Heo  *
14287fbc5a0STejun Heo  * ehc->cmd_timeout_idx keeps track of which timeout to use per
14387fbc5a0STejun Heo  * command class, so if SET_FEATURES times out on the first try, the
14487fbc5a0STejun Heo  * next try will use the second timeout value only for that class.
14587fbc5a0STejun Heo  */
14687fbc5a0STejun Heo #define CMDS(cmds...)	(const u8 []){ cmds, 0 }
14787fbc5a0STejun Heo static const struct ata_eh_cmd_timeout_ent
14887fbc5a0STejun Heo ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
14987fbc5a0STejun Heo 	{ .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI),
15087fbc5a0STejun Heo 	  .timeouts = ata_eh_identify_timeouts, },
15187fbc5a0STejun Heo 	{ .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT),
15287fbc5a0STejun Heo 	  .timeouts = ata_eh_other_timeouts, },
15387fbc5a0STejun Heo 	{ .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT),
15487fbc5a0STejun Heo 	  .timeouts = ata_eh_other_timeouts, },
15587fbc5a0STejun Heo 	{ .commands = CMDS(ATA_CMD_SET_FEATURES),
15687fbc5a0STejun Heo 	  .timeouts = ata_eh_other_timeouts, },
15787fbc5a0STejun Heo 	{ .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS),
15887fbc5a0STejun Heo 	  .timeouts = ata_eh_other_timeouts, },
1596013efd8STejun Heo 	{ .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT),
1606013efd8STejun Heo 	  .timeouts = ata_eh_flush_timeouts },
16187fbc5a0STejun Heo };
16287fbc5a0STejun Heo #undef CMDS
16387fbc5a0STejun Heo 
164c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap);
1656ffa01d8STejun Heo #ifdef CONFIG_PM
166c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap);
167c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap);
1686ffa01d8STejun Heo #else /* CONFIG_PM */
1696ffa01d8STejun Heo static void ata_eh_handle_port_suspend(struct ata_port *ap)
1706ffa01d8STejun Heo { }
1716ffa01d8STejun Heo 
1726ffa01d8STejun Heo static void ata_eh_handle_port_resume(struct ata_port *ap)
1736ffa01d8STejun Heo { }
1746ffa01d8STejun Heo #endif /* CONFIG_PM */
175c6fd2807SJeff Garzik 
176b64bbc39STejun Heo static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt,
177b64bbc39STejun Heo 				 va_list args)
178b64bbc39STejun Heo {
179b64bbc39STejun Heo 	ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
180b64bbc39STejun Heo 				     ATA_EH_DESC_LEN - ehi->desc_len,
181b64bbc39STejun Heo 				     fmt, args);
182b64bbc39STejun Heo }
183b64bbc39STejun Heo 
184b64bbc39STejun Heo /**
185b64bbc39STejun Heo  *	__ata_ehi_push_desc - push error description without adding separator
186b64bbc39STejun Heo  *	@ehi: target EHI
187b64bbc39STejun Heo  *	@fmt: printf format string
188b64bbc39STejun Heo  *
189b64bbc39STejun Heo  *	Format string according to @fmt and append it to @ehi->desc.
190b64bbc39STejun Heo  *
191b64bbc39STejun Heo  *	LOCKING:
192b64bbc39STejun Heo  *	spin_lock_irqsave(host lock)
193b64bbc39STejun Heo  */
194b64bbc39STejun Heo void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
195b64bbc39STejun Heo {
196b64bbc39STejun Heo 	va_list args;
197b64bbc39STejun Heo 
198b64bbc39STejun Heo 	va_start(args, fmt);
199b64bbc39STejun Heo 	__ata_ehi_pushv_desc(ehi, fmt, args);
200b64bbc39STejun Heo 	va_end(args);
201b64bbc39STejun Heo }
202b64bbc39STejun Heo 
203b64bbc39STejun Heo /**
204b64bbc39STejun Heo  *	ata_ehi_push_desc - push error description with separator
205b64bbc39STejun Heo  *	@ehi: target EHI
206b64bbc39STejun Heo  *	@fmt: printf format string
207b64bbc39STejun Heo  *
208b64bbc39STejun Heo  *	Format string according to @fmt and append it to @ehi->desc.
209b64bbc39STejun Heo  *	If @ehi->desc is not empty, ", " is added in-between.
210b64bbc39STejun Heo  *
211b64bbc39STejun Heo  *	LOCKING:
212b64bbc39STejun Heo  *	spin_lock_irqsave(host lock)
213b64bbc39STejun Heo  */
214b64bbc39STejun Heo void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
215b64bbc39STejun Heo {
216b64bbc39STejun Heo 	va_list args;
217b64bbc39STejun Heo 
218b64bbc39STejun Heo 	if (ehi->desc_len)
219b64bbc39STejun Heo 		__ata_ehi_push_desc(ehi, ", ");
220b64bbc39STejun Heo 
221b64bbc39STejun Heo 	va_start(args, fmt);
222b64bbc39STejun Heo 	__ata_ehi_pushv_desc(ehi, fmt, args);
223b64bbc39STejun Heo 	va_end(args);
224b64bbc39STejun Heo }
225b64bbc39STejun Heo 
226b64bbc39STejun Heo /**
227b64bbc39STejun Heo  *	ata_ehi_clear_desc - clean error description
228b64bbc39STejun Heo  *	@ehi: target EHI
229b64bbc39STejun Heo  *
230b64bbc39STejun Heo  *	Clear @ehi->desc.
231b64bbc39STejun Heo  *
232b64bbc39STejun Heo  *	LOCKING:
233b64bbc39STejun Heo  *	spin_lock_irqsave(host lock)
234b64bbc39STejun Heo  */
235b64bbc39STejun Heo void ata_ehi_clear_desc(struct ata_eh_info *ehi)
236b64bbc39STejun Heo {
237b64bbc39STejun Heo 	ehi->desc[0] = '\0';
238b64bbc39STejun Heo 	ehi->desc_len = 0;
239b64bbc39STejun Heo }
240b64bbc39STejun Heo 
241cbcdd875STejun Heo /**
242cbcdd875STejun Heo  *	ata_port_desc - append port description
243cbcdd875STejun Heo  *	@ap: target ATA port
244cbcdd875STejun Heo  *	@fmt: printf format string
245cbcdd875STejun Heo  *
246cbcdd875STejun Heo  *	Format string according to @fmt and append it to port
247cbcdd875STejun Heo  *	description.  If port description is not empty, " " is added
248cbcdd875STejun Heo  *	in-between.  This function is to be used while initializing
249cbcdd875STejun Heo  *	ata_host.  The description is printed on host registration.
250cbcdd875STejun Heo  *
251cbcdd875STejun Heo  *	LOCKING:
252cbcdd875STejun Heo  *	None.
253cbcdd875STejun Heo  */
254cbcdd875STejun Heo void ata_port_desc(struct ata_port *ap, const char *fmt, ...)
255cbcdd875STejun Heo {
256cbcdd875STejun Heo 	va_list args;
257cbcdd875STejun Heo 
258cbcdd875STejun Heo 	WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING));
259cbcdd875STejun Heo 
260cbcdd875STejun Heo 	if (ap->link.eh_info.desc_len)
261cbcdd875STejun Heo 		__ata_ehi_push_desc(&ap->link.eh_info, " ");
262cbcdd875STejun Heo 
263cbcdd875STejun Heo 	va_start(args, fmt);
264cbcdd875STejun Heo 	__ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args);
265cbcdd875STejun Heo 	va_end(args);
266cbcdd875STejun Heo }
267cbcdd875STejun Heo 
268cbcdd875STejun Heo #ifdef CONFIG_PCI
269cbcdd875STejun Heo 
270cbcdd875STejun Heo /**
271cbcdd875STejun Heo  *	ata_port_pbar_desc - append PCI BAR description
272cbcdd875STejun Heo  *	@ap: target ATA port
273cbcdd875STejun Heo  *	@bar: target PCI BAR
274cbcdd875STejun Heo  *	@offset: offset into PCI BAR
275cbcdd875STejun Heo  *	@name: name of the area
276cbcdd875STejun Heo  *
277cbcdd875STejun Heo  *	If @offset is negative, this function formats a string which
278cbcdd875STejun Heo  *	contains the name, address, size and type of the BAR and
279cbcdd875STejun Heo  *	appends it to the port description.  If @offset is zero or
280cbcdd875STejun Heo  *	positive, only name and offsetted address is appended.
281cbcdd875STejun Heo  *
282cbcdd875STejun Heo  *	LOCKING:
283cbcdd875STejun Heo  *	None.
284cbcdd875STejun Heo  */
285cbcdd875STejun Heo void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
286cbcdd875STejun Heo 			const char *name)
287cbcdd875STejun Heo {
288cbcdd875STejun Heo 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
289cbcdd875STejun Heo 	char *type = "";
290cbcdd875STejun Heo 	unsigned long long start, len;
291cbcdd875STejun Heo 
292cbcdd875STejun Heo 	if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
293cbcdd875STejun Heo 		type = "m";
294cbcdd875STejun Heo 	else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
295cbcdd875STejun Heo 		type = "i";
296cbcdd875STejun Heo 
297cbcdd875STejun Heo 	start = (unsigned long long)pci_resource_start(pdev, bar);
298cbcdd875STejun Heo 	len = (unsigned long long)pci_resource_len(pdev, bar);
299cbcdd875STejun Heo 
300cbcdd875STejun Heo 	if (offset < 0)
301cbcdd875STejun Heo 		ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start);
302cbcdd875STejun Heo 	else
303e6a73ab1SAndrew Morton 		ata_port_desc(ap, "%s 0x%llx", name,
304e6a73ab1SAndrew Morton 				start + (unsigned long long)offset);
305cbcdd875STejun Heo }
306cbcdd875STejun Heo 
307cbcdd875STejun Heo #endif /* CONFIG_PCI */
308cbcdd875STejun Heo 
30987fbc5a0STejun Heo static int ata_lookup_timeout_table(u8 cmd)
31087fbc5a0STejun Heo {
31187fbc5a0STejun Heo 	int i;
31287fbc5a0STejun Heo 
31387fbc5a0STejun Heo 	for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) {
31487fbc5a0STejun Heo 		const u8 *cur;
31587fbc5a0STejun Heo 
31687fbc5a0STejun Heo 		for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++)
31787fbc5a0STejun Heo 			if (*cur == cmd)
31887fbc5a0STejun Heo 				return i;
31987fbc5a0STejun Heo 	}
32087fbc5a0STejun Heo 
32187fbc5a0STejun Heo 	return -1;
32287fbc5a0STejun Heo }
32387fbc5a0STejun Heo 
32487fbc5a0STejun Heo /**
32587fbc5a0STejun Heo  *	ata_internal_cmd_timeout - determine timeout for an internal command
32687fbc5a0STejun Heo  *	@dev: target device
32787fbc5a0STejun Heo  *	@cmd: internal command to be issued
32887fbc5a0STejun Heo  *
32987fbc5a0STejun Heo  *	Determine timeout for internal command @cmd for @dev.
33087fbc5a0STejun Heo  *
33187fbc5a0STejun Heo  *	LOCKING:
33287fbc5a0STejun Heo  *	EH context.
33387fbc5a0STejun Heo  *
33487fbc5a0STejun Heo  *	RETURNS:
33587fbc5a0STejun Heo  *	Determined timeout.
33687fbc5a0STejun Heo  */
33787fbc5a0STejun Heo unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd)
33887fbc5a0STejun Heo {
33987fbc5a0STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
34087fbc5a0STejun Heo 	int ent = ata_lookup_timeout_table(cmd);
34187fbc5a0STejun Heo 	int idx;
34287fbc5a0STejun Heo 
34387fbc5a0STejun Heo 	if (ent < 0)
34487fbc5a0STejun Heo 		return ATA_EH_CMD_DFL_TIMEOUT;
34587fbc5a0STejun Heo 
34687fbc5a0STejun Heo 	idx = ehc->cmd_timeout_idx[dev->devno][ent];
34787fbc5a0STejun Heo 	return ata_eh_cmd_timeout_table[ent].timeouts[idx];
34887fbc5a0STejun Heo }
34987fbc5a0STejun Heo 
35087fbc5a0STejun Heo /**
35187fbc5a0STejun Heo  *	ata_internal_cmd_timed_out - notification for internal command timeout
35287fbc5a0STejun Heo  *	@dev: target device
35387fbc5a0STejun Heo  *	@cmd: internal command which timed out
35487fbc5a0STejun Heo  *
35587fbc5a0STejun Heo  *	Notify EH that internal command @cmd for @dev timed out.  This
35687fbc5a0STejun Heo  *	function should be called only for commands whose timeouts are
35787fbc5a0STejun Heo  *	determined using ata_internal_cmd_timeout().
35887fbc5a0STejun Heo  *
35987fbc5a0STejun Heo  *	LOCKING:
36087fbc5a0STejun Heo  *	EH context.
36187fbc5a0STejun Heo  */
36287fbc5a0STejun Heo void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd)
36387fbc5a0STejun Heo {
36487fbc5a0STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
36587fbc5a0STejun Heo 	int ent = ata_lookup_timeout_table(cmd);
36687fbc5a0STejun Heo 	int idx;
36787fbc5a0STejun Heo 
36887fbc5a0STejun Heo 	if (ent < 0)
36987fbc5a0STejun Heo 		return;
37087fbc5a0STejun Heo 
37187fbc5a0STejun Heo 	idx = ehc->cmd_timeout_idx[dev->devno][ent];
37287fbc5a0STejun Heo 	if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX)
37387fbc5a0STejun Heo 		ehc->cmd_timeout_idx[dev->devno][ent]++;
37487fbc5a0STejun Heo }
37587fbc5a0STejun Heo 
3763884f7b0STejun Heo static void ata_ering_record(struct ata_ering *ering, unsigned int eflags,
377c6fd2807SJeff Garzik 			     unsigned int err_mask)
378c6fd2807SJeff Garzik {
379c6fd2807SJeff Garzik 	struct ata_ering_entry *ent;
380c6fd2807SJeff Garzik 
381c6fd2807SJeff Garzik 	WARN_ON(!err_mask);
382c6fd2807SJeff Garzik 
383c6fd2807SJeff Garzik 	ering->cursor++;
384c6fd2807SJeff Garzik 	ering->cursor %= ATA_ERING_SIZE;
385c6fd2807SJeff Garzik 
386c6fd2807SJeff Garzik 	ent = &ering->ring[ering->cursor];
3873884f7b0STejun Heo 	ent->eflags = eflags;
388c6fd2807SJeff Garzik 	ent->err_mask = err_mask;
389c6fd2807SJeff Garzik 	ent->timestamp = get_jiffies_64();
390c6fd2807SJeff Garzik }
391c6fd2807SJeff Garzik 
39276326ac1STejun Heo static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering)
39376326ac1STejun Heo {
39476326ac1STejun Heo 	struct ata_ering_entry *ent = &ering->ring[ering->cursor];
39576326ac1STejun Heo 
39676326ac1STejun Heo 	if (ent->err_mask)
39776326ac1STejun Heo 		return ent;
39876326ac1STejun Heo 	return NULL;
39976326ac1STejun Heo }
40076326ac1STejun Heo 
401d9027470SGwendal Grignou int ata_ering_map(struct ata_ering *ering,
402c6fd2807SJeff Garzik 		  int (*map_fn)(struct ata_ering_entry *, void *),
403c6fd2807SJeff Garzik 		  void *arg)
404c6fd2807SJeff Garzik {
405c6fd2807SJeff Garzik 	int idx, rc = 0;
406c6fd2807SJeff Garzik 	struct ata_ering_entry *ent;
407c6fd2807SJeff Garzik 
408c6fd2807SJeff Garzik 	idx = ering->cursor;
409c6fd2807SJeff Garzik 	do {
410c6fd2807SJeff Garzik 		ent = &ering->ring[idx];
411c6fd2807SJeff Garzik 		if (!ent->err_mask)
412c6fd2807SJeff Garzik 			break;
413c6fd2807SJeff Garzik 		rc = map_fn(ent, arg);
414c6fd2807SJeff Garzik 		if (rc)
415c6fd2807SJeff Garzik 			break;
416c6fd2807SJeff Garzik 		idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
417c6fd2807SJeff Garzik 	} while (idx != ering->cursor);
418c6fd2807SJeff Garzik 
419c6fd2807SJeff Garzik 	return rc;
420c6fd2807SJeff Garzik }
421c6fd2807SJeff Garzik 
42260428407SH Hartley Sweeten static int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg)
423d9027470SGwendal Grignou {
424d9027470SGwendal Grignou 	ent->eflags |= ATA_EFLAG_OLD_ER;
425d9027470SGwendal Grignou 	return 0;
426d9027470SGwendal Grignou }
427d9027470SGwendal Grignou 
428d9027470SGwendal Grignou static void ata_ering_clear(struct ata_ering *ering)
429d9027470SGwendal Grignou {
430d9027470SGwendal Grignou 	ata_ering_map(ering, ata_ering_clear_cb, NULL);
431d9027470SGwendal Grignou }
432d9027470SGwendal Grignou 
433c6fd2807SJeff Garzik static unsigned int ata_eh_dev_action(struct ata_device *dev)
434c6fd2807SJeff Garzik {
4359af5c9c9STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
436c6fd2807SJeff Garzik 
437c6fd2807SJeff Garzik 	return ehc->i.action | ehc->i.dev_action[dev->devno];
438c6fd2807SJeff Garzik }
439c6fd2807SJeff Garzik 
440f58229f8STejun Heo static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
441c6fd2807SJeff Garzik 				struct ata_eh_info *ehi, unsigned int action)
442c6fd2807SJeff Garzik {
443f58229f8STejun Heo 	struct ata_device *tdev;
444c6fd2807SJeff Garzik 
445c6fd2807SJeff Garzik 	if (!dev) {
446c6fd2807SJeff Garzik 		ehi->action &= ~action;
4471eca4365STejun Heo 		ata_for_each_dev(tdev, link, ALL)
448f58229f8STejun Heo 			ehi->dev_action[tdev->devno] &= ~action;
449c6fd2807SJeff Garzik 	} else {
450c6fd2807SJeff Garzik 		/* doesn't make sense for port-wide EH actions */
451c6fd2807SJeff Garzik 		WARN_ON(!(action & ATA_EH_PERDEV_MASK));
452c6fd2807SJeff Garzik 
453c6fd2807SJeff Garzik 		/* break ehi->action into ehi->dev_action */
454c6fd2807SJeff Garzik 		if (ehi->action & action) {
4551eca4365STejun Heo 			ata_for_each_dev(tdev, link, ALL)
456f58229f8STejun Heo 				ehi->dev_action[tdev->devno] |=
457f58229f8STejun Heo 					ehi->action & action;
458c6fd2807SJeff Garzik 			ehi->action &= ~action;
459c6fd2807SJeff Garzik 		}
460c6fd2807SJeff Garzik 
461c6fd2807SJeff Garzik 		/* turn off the specified per-dev action */
462c6fd2807SJeff Garzik 		ehi->dev_action[dev->devno] &= ~action;
463c6fd2807SJeff Garzik 	}
464c6fd2807SJeff Garzik }
465c6fd2807SJeff Garzik 
466c6fd2807SJeff Garzik /**
467c0c362b6STejun Heo  *	ata_eh_acquire - acquire EH ownership
468c0c362b6STejun Heo  *	@ap: ATA port to acquire EH ownership for
469c0c362b6STejun Heo  *
470c0c362b6STejun Heo  *	Acquire EH ownership for @ap.  This is the basic exclusion
471c0c362b6STejun Heo  *	mechanism for ports sharing a host.  Only one port hanging off
472c0c362b6STejun Heo  *	the same host can claim the ownership of EH.
473c0c362b6STejun Heo  *
474c0c362b6STejun Heo  *	LOCKING:
475c0c362b6STejun Heo  *	EH context.
476c0c362b6STejun Heo  */
477c0c362b6STejun Heo void ata_eh_acquire(struct ata_port *ap)
478c0c362b6STejun Heo {
479c0c362b6STejun Heo 	mutex_lock(&ap->host->eh_mutex);
480c0c362b6STejun Heo 	WARN_ON_ONCE(ap->host->eh_owner);
481c0c362b6STejun Heo 	ap->host->eh_owner = current;
482c0c362b6STejun Heo }
483c0c362b6STejun Heo 
484c0c362b6STejun Heo /**
485c0c362b6STejun Heo  *	ata_eh_release - release EH ownership
486c0c362b6STejun Heo  *	@ap: ATA port to release EH ownership for
487c0c362b6STejun Heo  *
488c0c362b6STejun Heo  *	Release EH ownership for @ap if the caller.  The caller must
489c0c362b6STejun Heo  *	have acquired EH ownership using ata_eh_acquire() previously.
490c0c362b6STejun Heo  *
491c0c362b6STejun Heo  *	LOCKING:
492c0c362b6STejun Heo  *	EH context.
493c0c362b6STejun Heo  */
494c0c362b6STejun Heo void ata_eh_release(struct ata_port *ap)
495c0c362b6STejun Heo {
496c0c362b6STejun Heo 	WARN_ON_ONCE(ap->host->eh_owner != current);
497c0c362b6STejun Heo 	ap->host->eh_owner = NULL;
498c0c362b6STejun Heo 	mutex_unlock(&ap->host->eh_mutex);
499c0c362b6STejun Heo }
500c0c362b6STejun Heo 
501c0c362b6STejun Heo /**
502c6fd2807SJeff Garzik  *	ata_scsi_timed_out - SCSI layer time out callback
503c6fd2807SJeff Garzik  *	@cmd: timed out SCSI command
504c6fd2807SJeff Garzik  *
505c6fd2807SJeff Garzik  *	Handles SCSI layer timeout.  We race with normal completion of
506c6fd2807SJeff Garzik  *	the qc for @cmd.  If the qc is already gone, we lose and let
507c6fd2807SJeff Garzik  *	the scsi command finish (EH_HANDLED).  Otherwise, the qc has
508c6fd2807SJeff Garzik  *	timed out and EH should be invoked.  Prevent ata_qc_complete()
509c6fd2807SJeff Garzik  *	from finishing it by setting EH_SCHEDULED and return
510c6fd2807SJeff Garzik  *	EH_NOT_HANDLED.
511c6fd2807SJeff Garzik  *
512c6fd2807SJeff Garzik  *	TODO: kill this function once old EH is gone.
513c6fd2807SJeff Garzik  *
514c6fd2807SJeff Garzik  *	LOCKING:
515c6fd2807SJeff Garzik  *	Called from timer context
516c6fd2807SJeff Garzik  *
517c6fd2807SJeff Garzik  *	RETURNS:
518c6fd2807SJeff Garzik  *	EH_HANDLED or EH_NOT_HANDLED
519c6fd2807SJeff Garzik  */
520242f9dcbSJens Axboe enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
521c6fd2807SJeff Garzik {
522c6fd2807SJeff Garzik 	struct Scsi_Host *host = cmd->device->host;
523c6fd2807SJeff Garzik 	struct ata_port *ap = ata_shost_to_port(host);
524c6fd2807SJeff Garzik 	unsigned long flags;
525c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc;
526242f9dcbSJens Axboe 	enum blk_eh_timer_return ret;
527c6fd2807SJeff Garzik 
528c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
529c6fd2807SJeff Garzik 
530c6fd2807SJeff Garzik 	if (ap->ops->error_handler) {
531242f9dcbSJens Axboe 		ret = BLK_EH_NOT_HANDLED;
532c6fd2807SJeff Garzik 		goto out;
533c6fd2807SJeff Garzik 	}
534c6fd2807SJeff Garzik 
535242f9dcbSJens Axboe 	ret = BLK_EH_HANDLED;
536c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
5379af5c9c9STejun Heo 	qc = ata_qc_from_tag(ap, ap->link.active_tag);
538c6fd2807SJeff Garzik 	if (qc) {
539c6fd2807SJeff Garzik 		WARN_ON(qc->scsicmd != cmd);
540c6fd2807SJeff Garzik 		qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
541c6fd2807SJeff Garzik 		qc->err_mask |= AC_ERR_TIMEOUT;
542242f9dcbSJens Axboe 		ret = BLK_EH_NOT_HANDLED;
543c6fd2807SJeff Garzik 	}
544c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
545c6fd2807SJeff Garzik 
546c6fd2807SJeff Garzik  out:
547c6fd2807SJeff Garzik 	DPRINTK("EXIT, ret=%d\n", ret);
548c6fd2807SJeff Garzik 	return ret;
549c6fd2807SJeff Garzik }
550c6fd2807SJeff Garzik 
551ece180d1STejun Heo static void ata_eh_unload(struct ata_port *ap)
552ece180d1STejun Heo {
553ece180d1STejun Heo 	struct ata_link *link;
554ece180d1STejun Heo 	struct ata_device *dev;
555ece180d1STejun Heo 	unsigned long flags;
556ece180d1STejun Heo 
557ece180d1STejun Heo 	/* Restore SControl IPM and SPD for the next driver and
558ece180d1STejun Heo 	 * disable attached devices.
559ece180d1STejun Heo 	 */
560ece180d1STejun Heo 	ata_for_each_link(link, ap, PMP_FIRST) {
561ece180d1STejun Heo 		sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0);
562ece180d1STejun Heo 		ata_for_each_dev(dev, link, ALL)
563ece180d1STejun Heo 			ata_dev_disable(dev);
564ece180d1STejun Heo 	}
565ece180d1STejun Heo 
566ece180d1STejun Heo 	/* freeze and set UNLOADED */
567ece180d1STejun Heo 	spin_lock_irqsave(ap->lock, flags);
568ece180d1STejun Heo 
569ece180d1STejun Heo 	ata_port_freeze(ap);			/* won't be thawed */
570ece180d1STejun Heo 	ap->pflags &= ~ATA_PFLAG_EH_PENDING;	/* clear pending from freeze */
571ece180d1STejun Heo 	ap->pflags |= ATA_PFLAG_UNLOADED;
572ece180d1STejun Heo 
573ece180d1STejun Heo 	spin_unlock_irqrestore(ap->lock, flags);
574ece180d1STejun Heo }
575ece180d1STejun Heo 
576c6fd2807SJeff Garzik /**
577c6fd2807SJeff Garzik  *	ata_scsi_error - SCSI layer error handler callback
578c6fd2807SJeff Garzik  *	@host: SCSI host on which error occurred
579c6fd2807SJeff Garzik  *
580c6fd2807SJeff Garzik  *	Handles SCSI-layer-thrown error events.
581c6fd2807SJeff Garzik  *
582c6fd2807SJeff Garzik  *	LOCKING:
583c6fd2807SJeff Garzik  *	Inherited from SCSI layer (none, can sleep)
584c6fd2807SJeff Garzik  *
585c6fd2807SJeff Garzik  *	RETURNS:
586c6fd2807SJeff Garzik  *	Zero.
587c6fd2807SJeff Garzik  */
588c6fd2807SJeff Garzik void ata_scsi_error(struct Scsi_Host *host)
589c6fd2807SJeff Garzik {
590c6fd2807SJeff Garzik 	struct ata_port *ap = ata_shost_to_port(host);
591c6fd2807SJeff Garzik 	unsigned long flags;
592c34aeebcSJames Bottomley 	LIST_HEAD(eh_work_q);
593c6fd2807SJeff Garzik 
594c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
595c6fd2807SJeff Garzik 
596c34aeebcSJames Bottomley 	spin_lock_irqsave(host->host_lock, flags);
597c34aeebcSJames Bottomley 	list_splice_init(&host->eh_cmd_q, &eh_work_q);
598c34aeebcSJames Bottomley 	spin_unlock_irqrestore(host->host_lock, flags);
599c34aeebcSJames Bottomley 
6000e0b494cSJames Bottomley 	ata_scsi_cmd_error_handler(host, ap, &eh_work_q);
6010e0b494cSJames Bottomley 
6020e0b494cSJames Bottomley 	/* If we timed raced normal completion and there is nothing to
6030e0b494cSJames Bottomley 	   recover nr_timedout == 0 why exactly are we doing error recovery ? */
6040e0b494cSJames Bottomley 	ata_scsi_port_error_handler(host, ap);
6050e0b494cSJames Bottomley 
6060e0b494cSJames Bottomley 	/* finish or retry handled scmd's and clean up */
6070e0b494cSJames Bottomley 	WARN_ON(host->host_failed || !list_empty(&eh_work_q));
6080e0b494cSJames Bottomley 
6090e0b494cSJames Bottomley 	DPRINTK("EXIT\n");
6100e0b494cSJames Bottomley }
6110e0b494cSJames Bottomley 
6120e0b494cSJames Bottomley /**
6130e0b494cSJames Bottomley  * ata_scsi_cmd_error_handler - error callback for a list of commands
6140e0b494cSJames Bottomley  * @host:	scsi host containing the port
6150e0b494cSJames Bottomley  * @ap:		ATA port within the host
6160e0b494cSJames Bottomley  * @eh_work_q:	list of commands to process
6170e0b494cSJames Bottomley  *
6180e0b494cSJames Bottomley  * process the given list of commands and return those finished to the
6190e0b494cSJames Bottomley  * ap->eh_done_q.  This function is the first part of the libata error
6200e0b494cSJames Bottomley  * handler which processes a given list of failed commands.
6210e0b494cSJames Bottomley  */
6220e0b494cSJames Bottomley void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
6230e0b494cSJames Bottomley 				struct list_head *eh_work_q)
6240e0b494cSJames Bottomley {
6250e0b494cSJames Bottomley 	int i;
6260e0b494cSJames Bottomley 	unsigned long flags;
6270e0b494cSJames Bottomley 
628c429137aSTejun Heo 	/* make sure sff pio task is not running */
629c429137aSTejun Heo 	ata_sff_flush_pio_task(ap);
630c6fd2807SJeff Garzik 
631cca3974eSJeff Garzik 	/* synchronize with host lock and sort out timeouts */
632c6fd2807SJeff Garzik 
633c6fd2807SJeff Garzik 	/* For new EH, all qcs are finished in one of three ways -
634c6fd2807SJeff Garzik 	 * normal completion, error completion, and SCSI timeout.
635c96f1732SAlan Cox 	 * Both completions can race against SCSI timeout.  When normal
636c6fd2807SJeff Garzik 	 * completion wins, the qc never reaches EH.  When error
637c6fd2807SJeff Garzik 	 * completion wins, the qc has ATA_QCFLAG_FAILED set.
638c6fd2807SJeff Garzik 	 *
639c6fd2807SJeff Garzik 	 * When SCSI timeout wins, things are a bit more complex.
640c6fd2807SJeff Garzik 	 * Normal or error completion can occur after the timeout but
641c6fd2807SJeff Garzik 	 * before this point.  In such cases, both types of
642c6fd2807SJeff Garzik 	 * completions are honored.  A scmd is determined to have
643c6fd2807SJeff Garzik 	 * timed out iff its associated qc is active and not failed.
644c6fd2807SJeff Garzik 	 */
645c6fd2807SJeff Garzik 	if (ap->ops->error_handler) {
646c6fd2807SJeff Garzik 		struct scsi_cmnd *scmd, *tmp;
647c6fd2807SJeff Garzik 		int nr_timedout = 0;
648c6fd2807SJeff Garzik 
649c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
650c6fd2807SJeff Garzik 
651c96f1732SAlan Cox 		/* This must occur under the ap->lock as we don't want
652c96f1732SAlan Cox 		   a polled recovery to race the real interrupt handler
653c96f1732SAlan Cox 
654c96f1732SAlan Cox 		   The lost_interrupt handler checks for any completed but
655c96f1732SAlan Cox 		   non-notified command and completes much like an IRQ handler.
656c96f1732SAlan Cox 
657c96f1732SAlan Cox 		   We then fall into the error recovery code which will treat
658c96f1732SAlan Cox 		   this as if normal completion won the race */
659c96f1732SAlan Cox 
660c96f1732SAlan Cox 		if (ap->ops->lost_interrupt)
661c96f1732SAlan Cox 			ap->ops->lost_interrupt(ap);
662c96f1732SAlan Cox 
6630e0b494cSJames Bottomley 		list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) {
664c6fd2807SJeff Garzik 			struct ata_queued_cmd *qc;
665c6fd2807SJeff Garzik 
666c6fd2807SJeff Garzik 			for (i = 0; i < ATA_MAX_QUEUE; i++) {
667c6fd2807SJeff Garzik 				qc = __ata_qc_from_tag(ap, i);
668c6fd2807SJeff Garzik 				if (qc->flags & ATA_QCFLAG_ACTIVE &&
669c6fd2807SJeff Garzik 				    qc->scsicmd == scmd)
670c6fd2807SJeff Garzik 					break;
671c6fd2807SJeff Garzik 			}
672c6fd2807SJeff Garzik 
673c6fd2807SJeff Garzik 			if (i < ATA_MAX_QUEUE) {
674c6fd2807SJeff Garzik 				/* the scmd has an associated qc */
675c6fd2807SJeff Garzik 				if (!(qc->flags & ATA_QCFLAG_FAILED)) {
676c6fd2807SJeff Garzik 					/* which hasn't failed yet, timeout */
677c6fd2807SJeff Garzik 					qc->err_mask |= AC_ERR_TIMEOUT;
678c6fd2807SJeff Garzik 					qc->flags |= ATA_QCFLAG_FAILED;
679c6fd2807SJeff Garzik 					nr_timedout++;
680c6fd2807SJeff Garzik 				}
681c6fd2807SJeff Garzik 			} else {
682c6fd2807SJeff Garzik 				/* Normal completion occurred after
683c6fd2807SJeff Garzik 				 * SCSI timeout but before this point.
684c6fd2807SJeff Garzik 				 * Successfully complete it.
685c6fd2807SJeff Garzik 				 */
686c6fd2807SJeff Garzik 				scmd->retries = scmd->allowed;
687c6fd2807SJeff Garzik 				scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
688c6fd2807SJeff Garzik 			}
689c6fd2807SJeff Garzik 		}
690c6fd2807SJeff Garzik 
691c6fd2807SJeff Garzik 		/* If we have timed out qcs.  They belong to EH from
692c6fd2807SJeff Garzik 		 * this point but the state of the controller is
693c6fd2807SJeff Garzik 		 * unknown.  Freeze the port to make sure the IRQ
694c6fd2807SJeff Garzik 		 * handler doesn't diddle with those qcs.  This must
695c6fd2807SJeff Garzik 		 * be done atomically w.r.t. setting QCFLAG_FAILED.
696c6fd2807SJeff Garzik 		 */
697c6fd2807SJeff Garzik 		if (nr_timedout)
698c6fd2807SJeff Garzik 			__ata_port_freeze(ap);
699c6fd2807SJeff Garzik 
700c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
701a1e10f7eSTejun Heo 
702a1e10f7eSTejun Heo 		/* initialize eh_tries */
703a1e10f7eSTejun Heo 		ap->eh_tries = ATA_EH_MAX_TRIES;
704c6fd2807SJeff Garzik 	} else
705c6fd2807SJeff Garzik 		spin_unlock_wait(ap->lock);
706c6fd2807SJeff Garzik 
7070e0b494cSJames Bottomley }
7080e0b494cSJames Bottomley EXPORT_SYMBOL(ata_scsi_cmd_error_handler);
7090e0b494cSJames Bottomley 
7100e0b494cSJames Bottomley /**
7110e0b494cSJames Bottomley  * ata_scsi_port_error_handler - recover the port after the commands
7120e0b494cSJames Bottomley  * @host:	SCSI host containing the port
7130e0b494cSJames Bottomley  * @ap:		the ATA port
7140e0b494cSJames Bottomley  *
7150e0b494cSJames Bottomley  * Handle the recovery of the port @ap after all the commands
7160e0b494cSJames Bottomley  * have been recovered.
7170e0b494cSJames Bottomley  */
7180e0b494cSJames Bottomley void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
7190e0b494cSJames Bottomley {
7200e0b494cSJames Bottomley 	unsigned long flags;
721c96f1732SAlan Cox 
722c6fd2807SJeff Garzik 	/* invoke error handler */
723c6fd2807SJeff Garzik 	if (ap->ops->error_handler) {
724cf1b86c8STejun Heo 		struct ata_link *link;
725cf1b86c8STejun Heo 
726c0c362b6STejun Heo 		/* acquire EH ownership */
727c0c362b6STejun Heo 		ata_eh_acquire(ap);
728c0c362b6STejun Heo  repeat:
7295ddf24c5STejun Heo 		/* kill fast drain timer */
7305ddf24c5STejun Heo 		del_timer_sync(&ap->fastdrain_timer);
7315ddf24c5STejun Heo 
732c6fd2807SJeff Garzik 		/* process port resume request */
733c6fd2807SJeff Garzik 		ata_eh_handle_port_resume(ap);
734c6fd2807SJeff Garzik 
735c6fd2807SJeff Garzik 		/* fetch & clear EH info */
736c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
737c6fd2807SJeff Garzik 
7381eca4365STejun Heo 		ata_for_each_link(link, ap, HOST_FIRST) {
73900115e0fSTejun Heo 			struct ata_eh_context *ehc = &link->eh_context;
74000115e0fSTejun Heo 			struct ata_device *dev;
74100115e0fSTejun Heo 
742cf1b86c8STejun Heo 			memset(&link->eh_context, 0, sizeof(link->eh_context));
743cf1b86c8STejun Heo 			link->eh_context.i = link->eh_info;
744cf1b86c8STejun Heo 			memset(&link->eh_info, 0, sizeof(link->eh_info));
74500115e0fSTejun Heo 
7461eca4365STejun Heo 			ata_for_each_dev(dev, link, ENABLED) {
74700115e0fSTejun Heo 				int devno = dev->devno;
74800115e0fSTejun Heo 
74900115e0fSTejun Heo 				ehc->saved_xfer_mode[devno] = dev->xfer_mode;
75000115e0fSTejun Heo 				if (ata_ncq_enabled(dev))
75100115e0fSTejun Heo 					ehc->saved_ncq_enabled |= 1 << devno;
75200115e0fSTejun Heo 			}
753cf1b86c8STejun Heo 		}
754c6fd2807SJeff Garzik 
755c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
756c6fd2807SJeff Garzik 		ap->pflags &= ~ATA_PFLAG_EH_PENDING;
757da917d69STejun Heo 		ap->excl_link = NULL;	/* don't maintain exclusion over EH */
758c6fd2807SJeff Garzik 
759c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
760c6fd2807SJeff Garzik 
761c6fd2807SJeff Garzik 		/* invoke EH, skip if unloading or suspended */
762c6fd2807SJeff Garzik 		if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
763c6fd2807SJeff Garzik 			ap->ops->error_handler(ap);
764ece180d1STejun Heo 		else {
765ece180d1STejun Heo 			/* if unloading, commence suicide */
766ece180d1STejun Heo 			if ((ap->pflags & ATA_PFLAG_UNLOADING) &&
767ece180d1STejun Heo 			    !(ap->pflags & ATA_PFLAG_UNLOADED))
768ece180d1STejun Heo 				ata_eh_unload(ap);
769c6fd2807SJeff Garzik 			ata_eh_finish(ap);
770ece180d1STejun Heo 		}
771c6fd2807SJeff Garzik 
772c6fd2807SJeff Garzik 		/* process port suspend request */
773c6fd2807SJeff Garzik 		ata_eh_handle_port_suspend(ap);
774c6fd2807SJeff Garzik 
77525985edcSLucas De Marchi 		/* Exception might have happened after ->error_handler
776c6fd2807SJeff Garzik 		 * recovered the port but before this point.  Repeat
777c6fd2807SJeff Garzik 		 * EH in such case.
778c6fd2807SJeff Garzik 		 */
779c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
780c6fd2807SJeff Garzik 
781c6fd2807SJeff Garzik 		if (ap->pflags & ATA_PFLAG_EH_PENDING) {
782a1e10f7eSTejun Heo 			if (--ap->eh_tries) {
783c6fd2807SJeff Garzik 				spin_unlock_irqrestore(ap->lock, flags);
784c6fd2807SJeff Garzik 				goto repeat;
785c6fd2807SJeff Garzik 			}
786a9a79dfeSJoe Perches 			ata_port_err(ap,
787a9a79dfeSJoe Perches 				     "EH pending after %d tries, giving up\n",
788a9a79dfeSJoe Perches 				     ATA_EH_MAX_TRIES);
789914616a3STejun Heo 			ap->pflags &= ~ATA_PFLAG_EH_PENDING;
790c6fd2807SJeff Garzik 		}
791c6fd2807SJeff Garzik 
792c6fd2807SJeff Garzik 		/* this run is complete, make sure EH info is clear */
7931eca4365STejun Heo 		ata_for_each_link(link, ap, HOST_FIRST)
794cf1b86c8STejun Heo 			memset(&link->eh_info, 0, sizeof(link->eh_info));
795c6fd2807SJeff Garzik 
796e4a9c373SDan Williams 		/* end eh (clear host_eh_scheduled) while holding
797e4a9c373SDan Williams 		 * ap->lock such that if exception occurs after this
798e4a9c373SDan Williams 		 * point but before EH completion, SCSI midlayer will
799c6fd2807SJeff Garzik 		 * re-initiate EH.
800c6fd2807SJeff Garzik 		 */
801e4a9c373SDan Williams 		ap->ops->end_eh(ap);
802c6fd2807SJeff Garzik 
803c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
804c0c362b6STejun Heo 		ata_eh_release(ap);
805c6fd2807SJeff Garzik 	} else {
8069af5c9c9STejun Heo 		WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL);
807c6fd2807SJeff Garzik 		ap->ops->eng_timeout(ap);
808c6fd2807SJeff Garzik 	}
809c6fd2807SJeff Garzik 
810c6fd2807SJeff Garzik 	scsi_eh_flush_done_q(&ap->eh_done_q);
811c6fd2807SJeff Garzik 
812c6fd2807SJeff Garzik 	/* clean up */
813c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
814c6fd2807SJeff Garzik 
815c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_LOADING)
816c6fd2807SJeff Garzik 		ap->pflags &= ~ATA_PFLAG_LOADING;
817c6fd2807SJeff Garzik 	else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
818ad72cf98STejun Heo 		schedule_delayed_work(&ap->hotplug_task, 0);
819c6fd2807SJeff Garzik 
820c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_RECOVERED)
821a9a79dfeSJoe Perches 		ata_port_info(ap, "EH complete\n");
822c6fd2807SJeff Garzik 
823c6fd2807SJeff Garzik 	ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
824c6fd2807SJeff Garzik 
825c6fd2807SJeff Garzik 	/* tell wait_eh that we're done */
826c6fd2807SJeff Garzik 	ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
827c6fd2807SJeff Garzik 	wake_up_all(&ap->eh_wait_q);
828c6fd2807SJeff Garzik 
829c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
830c6fd2807SJeff Garzik }
8310e0b494cSJames Bottomley EXPORT_SYMBOL_GPL(ata_scsi_port_error_handler);
832c6fd2807SJeff Garzik 
833c6fd2807SJeff Garzik /**
834c6fd2807SJeff Garzik  *	ata_port_wait_eh - Wait for the currently pending EH to complete
835c6fd2807SJeff Garzik  *	@ap: Port to wait EH for
836c6fd2807SJeff Garzik  *
837c6fd2807SJeff Garzik  *	Wait until the currently pending EH is complete.
838c6fd2807SJeff Garzik  *
839c6fd2807SJeff Garzik  *	LOCKING:
840c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
841c6fd2807SJeff Garzik  */
842c6fd2807SJeff Garzik void ata_port_wait_eh(struct ata_port *ap)
843c6fd2807SJeff Garzik {
844c6fd2807SJeff Garzik 	unsigned long flags;
845c6fd2807SJeff Garzik 	DEFINE_WAIT(wait);
846c6fd2807SJeff Garzik 
847c6fd2807SJeff Garzik  retry:
848c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
849c6fd2807SJeff Garzik 
850c6fd2807SJeff Garzik 	while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
851c6fd2807SJeff Garzik 		prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
852c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
853c6fd2807SJeff Garzik 		schedule();
854c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
855c6fd2807SJeff Garzik 	}
856c6fd2807SJeff Garzik 	finish_wait(&ap->eh_wait_q, &wait);
857c6fd2807SJeff Garzik 
858c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
859c6fd2807SJeff Garzik 
860c6fd2807SJeff Garzik 	/* make sure SCSI EH is complete */
861cca3974eSJeff Garzik 	if (scsi_host_in_recovery(ap->scsi_host)) {
86297750cebSTejun Heo 		ata_msleep(ap, 10);
863c6fd2807SJeff Garzik 		goto retry;
864c6fd2807SJeff Garzik 	}
865c6fd2807SJeff Garzik }
86681c757bcSDan Williams EXPORT_SYMBOL_GPL(ata_port_wait_eh);
867c6fd2807SJeff Garzik 
8685ddf24c5STejun Heo static int ata_eh_nr_in_flight(struct ata_port *ap)
8695ddf24c5STejun Heo {
8705ddf24c5STejun Heo 	unsigned int tag;
8715ddf24c5STejun Heo 	int nr = 0;
8725ddf24c5STejun Heo 
8735ddf24c5STejun Heo 	/* count only non-internal commands */
8745ddf24c5STejun Heo 	for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++)
8755ddf24c5STejun Heo 		if (ata_qc_from_tag(ap, tag))
8765ddf24c5STejun Heo 			nr++;
8775ddf24c5STejun Heo 
8785ddf24c5STejun Heo 	return nr;
8795ddf24c5STejun Heo }
8805ddf24c5STejun Heo 
8815ddf24c5STejun Heo void ata_eh_fastdrain_timerfn(unsigned long arg)
8825ddf24c5STejun Heo {
8835ddf24c5STejun Heo 	struct ata_port *ap = (void *)arg;
8845ddf24c5STejun Heo 	unsigned long flags;
8855ddf24c5STejun Heo 	int cnt;
8865ddf24c5STejun Heo 
8875ddf24c5STejun Heo 	spin_lock_irqsave(ap->lock, flags);
8885ddf24c5STejun Heo 
8895ddf24c5STejun Heo 	cnt = ata_eh_nr_in_flight(ap);
8905ddf24c5STejun Heo 
8915ddf24c5STejun Heo 	/* are we done? */
8925ddf24c5STejun Heo 	if (!cnt)
8935ddf24c5STejun Heo 		goto out_unlock;
8945ddf24c5STejun Heo 
8955ddf24c5STejun Heo 	if (cnt == ap->fastdrain_cnt) {
8965ddf24c5STejun Heo 		unsigned int tag;
8975ddf24c5STejun Heo 
8985ddf24c5STejun Heo 		/* No progress during the last interval, tag all
8995ddf24c5STejun Heo 		 * in-flight qcs as timed out and freeze the port.
9005ddf24c5STejun Heo 		 */
9015ddf24c5STejun Heo 		for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) {
9025ddf24c5STejun Heo 			struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
9035ddf24c5STejun Heo 			if (qc)
9045ddf24c5STejun Heo 				qc->err_mask |= AC_ERR_TIMEOUT;
9055ddf24c5STejun Heo 		}
9065ddf24c5STejun Heo 
9075ddf24c5STejun Heo 		ata_port_freeze(ap);
9085ddf24c5STejun Heo 	} else {
9095ddf24c5STejun Heo 		/* some qcs have finished, give it another chance */
9105ddf24c5STejun Heo 		ap->fastdrain_cnt = cnt;
9115ddf24c5STejun Heo 		ap->fastdrain_timer.expires =
912341c2c95STejun Heo 			ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
9135ddf24c5STejun Heo 		add_timer(&ap->fastdrain_timer);
9145ddf24c5STejun Heo 	}
9155ddf24c5STejun Heo 
9165ddf24c5STejun Heo  out_unlock:
9175ddf24c5STejun Heo 	spin_unlock_irqrestore(ap->lock, flags);
9185ddf24c5STejun Heo }
9195ddf24c5STejun Heo 
9205ddf24c5STejun Heo /**
9215ddf24c5STejun Heo  *	ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
9225ddf24c5STejun Heo  *	@ap: target ATA port
9235ddf24c5STejun Heo  *	@fastdrain: activate fast drain
9245ddf24c5STejun Heo  *
9255ddf24c5STejun Heo  *	Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain
9265ddf24c5STejun Heo  *	is non-zero and EH wasn't pending before.  Fast drain ensures
9275ddf24c5STejun Heo  *	that EH kicks in in timely manner.
9285ddf24c5STejun Heo  *
9295ddf24c5STejun Heo  *	LOCKING:
9305ddf24c5STejun Heo  *	spin_lock_irqsave(host lock)
9315ddf24c5STejun Heo  */
9325ddf24c5STejun Heo static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
9335ddf24c5STejun Heo {
9345ddf24c5STejun Heo 	int cnt;
9355ddf24c5STejun Heo 
9365ddf24c5STejun Heo 	/* already scheduled? */
9375ddf24c5STejun Heo 	if (ap->pflags & ATA_PFLAG_EH_PENDING)
9385ddf24c5STejun Heo 		return;
9395ddf24c5STejun Heo 
9405ddf24c5STejun Heo 	ap->pflags |= ATA_PFLAG_EH_PENDING;
9415ddf24c5STejun Heo 
9425ddf24c5STejun Heo 	if (!fastdrain)
9435ddf24c5STejun Heo 		return;
9445ddf24c5STejun Heo 
9455ddf24c5STejun Heo 	/* do we have in-flight qcs? */
9465ddf24c5STejun Heo 	cnt = ata_eh_nr_in_flight(ap);
9475ddf24c5STejun Heo 	if (!cnt)
9485ddf24c5STejun Heo 		return;
9495ddf24c5STejun Heo 
9505ddf24c5STejun Heo 	/* activate fast drain */
9515ddf24c5STejun Heo 	ap->fastdrain_cnt = cnt;
952341c2c95STejun Heo 	ap->fastdrain_timer.expires =
953341c2c95STejun Heo 		ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
9545ddf24c5STejun Heo 	add_timer(&ap->fastdrain_timer);
9555ddf24c5STejun Heo }
9565ddf24c5STejun Heo 
957c6fd2807SJeff Garzik /**
958c6fd2807SJeff Garzik  *	ata_qc_schedule_eh - schedule qc for error handling
959c6fd2807SJeff Garzik  *	@qc: command to schedule error handling for
960c6fd2807SJeff Garzik  *
961c6fd2807SJeff Garzik  *	Schedule error handling for @qc.  EH will kick in as soon as
962c6fd2807SJeff Garzik  *	other commands are drained.
963c6fd2807SJeff Garzik  *
964c6fd2807SJeff Garzik  *	LOCKING:
965cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
966c6fd2807SJeff Garzik  */
967c6fd2807SJeff Garzik void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
968c6fd2807SJeff Garzik {
969c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
970fa41efdaSTejun Heo 	struct request_queue *q = qc->scsicmd->device->request_queue;
971fa41efdaSTejun Heo 	unsigned long flags;
972c6fd2807SJeff Garzik 
973c6fd2807SJeff Garzik 	WARN_ON(!ap->ops->error_handler);
974c6fd2807SJeff Garzik 
975c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_FAILED;
9765ddf24c5STejun Heo 	ata_eh_set_pending(ap, 1);
977c6fd2807SJeff Garzik 
978c6fd2807SJeff Garzik 	/* The following will fail if timeout has already expired.
979c6fd2807SJeff Garzik 	 * ata_scsi_error() takes care of such scmds on EH entry.
980c6fd2807SJeff Garzik 	 * Note that ATA_QCFLAG_FAILED is unconditionally set after
981c6fd2807SJeff Garzik 	 * this function completes.
982c6fd2807SJeff Garzik 	 */
983fa41efdaSTejun Heo 	spin_lock_irqsave(q->queue_lock, flags);
984242f9dcbSJens Axboe 	blk_abort_request(qc->scsicmd->request);
985fa41efdaSTejun Heo 	spin_unlock_irqrestore(q->queue_lock, flags);
986c6fd2807SJeff Garzik }
987c6fd2807SJeff Garzik 
988c6fd2807SJeff Garzik /**
989e4a9c373SDan Williams  * ata_std_sched_eh - non-libsas ata_ports issue eh with this common routine
990e4a9c373SDan Williams  * @ap: ATA port to schedule EH for
991e4a9c373SDan Williams  *
992e4a9c373SDan Williams  *	LOCKING: inherited from ata_port_schedule_eh
993e4a9c373SDan Williams  *	spin_lock_irqsave(host lock)
994e4a9c373SDan Williams  */
995e4a9c373SDan Williams void ata_std_sched_eh(struct ata_port *ap)
996e4a9c373SDan Williams {
997e4a9c373SDan Williams 	WARN_ON(!ap->ops->error_handler);
998e4a9c373SDan Williams 
999e4a9c373SDan Williams 	if (ap->pflags & ATA_PFLAG_INITIALIZING)
1000e4a9c373SDan Williams 		return;
1001e4a9c373SDan Williams 
1002e4a9c373SDan Williams 	ata_eh_set_pending(ap, 1);
1003e4a9c373SDan Williams 	scsi_schedule_eh(ap->scsi_host);
1004e4a9c373SDan Williams 
1005e4a9c373SDan Williams 	DPRINTK("port EH scheduled\n");
1006e4a9c373SDan Williams }
1007e4a9c373SDan Williams EXPORT_SYMBOL_GPL(ata_std_sched_eh);
1008e4a9c373SDan Williams 
1009e4a9c373SDan Williams /**
1010e4a9c373SDan Williams  * ata_std_end_eh - non-libsas ata_ports complete eh with this common routine
1011e4a9c373SDan Williams  * @ap: ATA port to end EH for
1012e4a9c373SDan Williams  *
1013e4a9c373SDan Williams  * In the libata object model there is a 1:1 mapping of ata_port to
1014e4a9c373SDan Williams  * shost, so host fields can be directly manipulated under ap->lock, in
1015e4a9c373SDan Williams  * the libsas case we need to hold a lock at the ha->level to coordinate
1016e4a9c373SDan Williams  * these events.
1017e4a9c373SDan Williams  *
1018e4a9c373SDan Williams  *	LOCKING:
1019e4a9c373SDan Williams  *	spin_lock_irqsave(host lock)
1020e4a9c373SDan Williams  */
1021e4a9c373SDan Williams void ata_std_end_eh(struct ata_port *ap)
1022e4a9c373SDan Williams {
1023e4a9c373SDan Williams 	struct Scsi_Host *host = ap->scsi_host;
1024e4a9c373SDan Williams 
1025e4a9c373SDan Williams 	host->host_eh_scheduled = 0;
1026e4a9c373SDan Williams }
1027e4a9c373SDan Williams EXPORT_SYMBOL(ata_std_end_eh);
1028e4a9c373SDan Williams 
1029e4a9c373SDan Williams 
1030e4a9c373SDan Williams /**
1031c6fd2807SJeff Garzik  *	ata_port_schedule_eh - schedule error handling without a qc
1032c6fd2807SJeff Garzik  *	@ap: ATA port to schedule EH for
1033c6fd2807SJeff Garzik  *
1034c6fd2807SJeff Garzik  *	Schedule error handling for @ap.  EH will kick in as soon as
1035c6fd2807SJeff Garzik  *	all commands are drained.
1036c6fd2807SJeff Garzik  *
1037c6fd2807SJeff Garzik  *	LOCKING:
1038cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
1039c6fd2807SJeff Garzik  */
1040c6fd2807SJeff Garzik void ata_port_schedule_eh(struct ata_port *ap)
1041c6fd2807SJeff Garzik {
1042e4a9c373SDan Williams 	/* see: ata_std_sched_eh, unless you know better */
1043e4a9c373SDan Williams 	ap->ops->sched_eh(ap);
1044c6fd2807SJeff Garzik }
1045c6fd2807SJeff Garzik 
1046dbd82616STejun Heo static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
1047c6fd2807SJeff Garzik {
1048c6fd2807SJeff Garzik 	int tag, nr_aborted = 0;
1049c6fd2807SJeff Garzik 
1050c6fd2807SJeff Garzik 	WARN_ON(!ap->ops->error_handler);
1051c6fd2807SJeff Garzik 
10525ddf24c5STejun Heo 	/* we're gonna abort all commands, no need for fast drain */
10535ddf24c5STejun Heo 	ata_eh_set_pending(ap, 0);
10545ddf24c5STejun Heo 
1055c6fd2807SJeff Garzik 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1056c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
1057c6fd2807SJeff Garzik 
1058dbd82616STejun Heo 		if (qc && (!link || qc->dev->link == link)) {
1059c6fd2807SJeff Garzik 			qc->flags |= ATA_QCFLAG_FAILED;
1060c6fd2807SJeff Garzik 			ata_qc_complete(qc);
1061c6fd2807SJeff Garzik 			nr_aborted++;
1062c6fd2807SJeff Garzik 		}
1063c6fd2807SJeff Garzik 	}
1064c6fd2807SJeff Garzik 
1065c6fd2807SJeff Garzik 	if (!nr_aborted)
1066c6fd2807SJeff Garzik 		ata_port_schedule_eh(ap);
1067c6fd2807SJeff Garzik 
1068c6fd2807SJeff Garzik 	return nr_aborted;
1069c6fd2807SJeff Garzik }
1070c6fd2807SJeff Garzik 
1071c6fd2807SJeff Garzik /**
1072dbd82616STejun Heo  *	ata_link_abort - abort all qc's on the link
1073dbd82616STejun Heo  *	@link: ATA link to abort qc's for
1074dbd82616STejun Heo  *
1075dbd82616STejun Heo  *	Abort all active qc's active on @link and schedule EH.
1076dbd82616STejun Heo  *
1077dbd82616STejun Heo  *	LOCKING:
1078dbd82616STejun Heo  *	spin_lock_irqsave(host lock)
1079dbd82616STejun Heo  *
1080dbd82616STejun Heo  *	RETURNS:
1081dbd82616STejun Heo  *	Number of aborted qc's.
1082dbd82616STejun Heo  */
1083dbd82616STejun Heo int ata_link_abort(struct ata_link *link)
1084dbd82616STejun Heo {
1085dbd82616STejun Heo 	return ata_do_link_abort(link->ap, link);
1086dbd82616STejun Heo }
1087dbd82616STejun Heo 
1088dbd82616STejun Heo /**
1089dbd82616STejun Heo  *	ata_port_abort - abort all qc's on the port
1090dbd82616STejun Heo  *	@ap: ATA port to abort qc's for
1091dbd82616STejun Heo  *
1092dbd82616STejun Heo  *	Abort all active qc's of @ap and schedule EH.
1093dbd82616STejun Heo  *
1094dbd82616STejun Heo  *	LOCKING:
1095dbd82616STejun Heo  *	spin_lock_irqsave(host_set lock)
1096dbd82616STejun Heo  *
1097dbd82616STejun Heo  *	RETURNS:
1098dbd82616STejun Heo  *	Number of aborted qc's.
1099dbd82616STejun Heo  */
1100dbd82616STejun Heo int ata_port_abort(struct ata_port *ap)
1101dbd82616STejun Heo {
1102dbd82616STejun Heo 	return ata_do_link_abort(ap, NULL);
1103dbd82616STejun Heo }
1104dbd82616STejun Heo 
1105dbd82616STejun Heo /**
1106c6fd2807SJeff Garzik  *	__ata_port_freeze - freeze port
1107c6fd2807SJeff Garzik  *	@ap: ATA port to freeze
1108c6fd2807SJeff Garzik  *
1109c6fd2807SJeff Garzik  *	This function is called when HSM violation or some other
1110c6fd2807SJeff Garzik  *	condition disrupts normal operation of the port.  Frozen port
1111c6fd2807SJeff Garzik  *	is not allowed to perform any operation until the port is
1112c6fd2807SJeff Garzik  *	thawed, which usually follows a successful reset.
1113c6fd2807SJeff Garzik  *
1114c6fd2807SJeff Garzik  *	ap->ops->freeze() callback can be used for freezing the port
1115c6fd2807SJeff Garzik  *	hardware-wise (e.g. mask interrupt and stop DMA engine).  If a
1116c6fd2807SJeff Garzik  *	port cannot be frozen hardware-wise, the interrupt handler
1117c6fd2807SJeff Garzik  *	must ack and clear interrupts unconditionally while the port
1118c6fd2807SJeff Garzik  *	is frozen.
1119c6fd2807SJeff Garzik  *
1120c6fd2807SJeff Garzik  *	LOCKING:
1121cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
1122c6fd2807SJeff Garzik  */
1123c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap)
1124c6fd2807SJeff Garzik {
1125c6fd2807SJeff Garzik 	WARN_ON(!ap->ops->error_handler);
1126c6fd2807SJeff Garzik 
1127c6fd2807SJeff Garzik 	if (ap->ops->freeze)
1128c6fd2807SJeff Garzik 		ap->ops->freeze(ap);
1129c6fd2807SJeff Garzik 
1130c6fd2807SJeff Garzik 	ap->pflags |= ATA_PFLAG_FROZEN;
1131c6fd2807SJeff Garzik 
113244877b4eSTejun Heo 	DPRINTK("ata%u port frozen\n", ap->print_id);
1133c6fd2807SJeff Garzik }
1134c6fd2807SJeff Garzik 
1135c6fd2807SJeff Garzik /**
1136c6fd2807SJeff Garzik  *	ata_port_freeze - abort & freeze port
1137c6fd2807SJeff Garzik  *	@ap: ATA port to freeze
1138c6fd2807SJeff Garzik  *
113954c38444SJeff Garzik  *	Abort and freeze @ap.  The freeze operation must be called
114054c38444SJeff Garzik  *	first, because some hardware requires special operations
114154c38444SJeff Garzik  *	before the taskfile registers are accessible.
1142c6fd2807SJeff Garzik  *
1143c6fd2807SJeff Garzik  *	LOCKING:
1144cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
1145c6fd2807SJeff Garzik  *
1146c6fd2807SJeff Garzik  *	RETURNS:
1147c6fd2807SJeff Garzik  *	Number of aborted commands.
1148c6fd2807SJeff Garzik  */
1149c6fd2807SJeff Garzik int ata_port_freeze(struct ata_port *ap)
1150c6fd2807SJeff Garzik {
1151c6fd2807SJeff Garzik 	int nr_aborted;
1152c6fd2807SJeff Garzik 
1153c6fd2807SJeff Garzik 	WARN_ON(!ap->ops->error_handler);
1154c6fd2807SJeff Garzik 
1155c6fd2807SJeff Garzik 	__ata_port_freeze(ap);
115654c38444SJeff Garzik 	nr_aborted = ata_port_abort(ap);
1157c6fd2807SJeff Garzik 
1158c6fd2807SJeff Garzik 	return nr_aborted;
1159c6fd2807SJeff Garzik }
1160c6fd2807SJeff Garzik 
1161c6fd2807SJeff Garzik /**
11627d77b247STejun Heo  *	sata_async_notification - SATA async notification handler
11637d77b247STejun Heo  *	@ap: ATA port where async notification is received
11647d77b247STejun Heo  *
11657d77b247STejun Heo  *	Handler to be called when async notification via SDB FIS is
11667d77b247STejun Heo  *	received.  This function schedules EH if necessary.
11677d77b247STejun Heo  *
11687d77b247STejun Heo  *	LOCKING:
11697d77b247STejun Heo  *	spin_lock_irqsave(host lock)
11707d77b247STejun Heo  *
11717d77b247STejun Heo  *	RETURNS:
11727d77b247STejun Heo  *	1 if EH is scheduled, 0 otherwise.
11737d77b247STejun Heo  */
11747d77b247STejun Heo int sata_async_notification(struct ata_port *ap)
11757d77b247STejun Heo {
11767d77b247STejun Heo 	u32 sntf;
11777d77b247STejun Heo 	int rc;
11787d77b247STejun Heo 
11797d77b247STejun Heo 	if (!(ap->flags & ATA_FLAG_AN))
11807d77b247STejun Heo 		return 0;
11817d77b247STejun Heo 
11827d77b247STejun Heo 	rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
11837d77b247STejun Heo 	if (rc == 0)
11847d77b247STejun Heo 		sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
11857d77b247STejun Heo 
1186071f44b1STejun Heo 	if (!sata_pmp_attached(ap) || rc) {
11877d77b247STejun Heo 		/* PMP is not attached or SNTF is not available */
1188071f44b1STejun Heo 		if (!sata_pmp_attached(ap)) {
11897d77b247STejun Heo 			/* PMP is not attached.  Check whether ATAPI
11907d77b247STejun Heo 			 * AN is configured.  If so, notify media
11917d77b247STejun Heo 			 * change.
11927d77b247STejun Heo 			 */
11937d77b247STejun Heo 			struct ata_device *dev = ap->link.device;
11947d77b247STejun Heo 
11957d77b247STejun Heo 			if ((dev->class == ATA_DEV_ATAPI) &&
11967d77b247STejun Heo 			    (dev->flags & ATA_DFLAG_AN))
11977d77b247STejun Heo 				ata_scsi_media_change_notify(dev);
11987d77b247STejun Heo 			return 0;
11997d77b247STejun Heo 		} else {
12007d77b247STejun Heo 			/* PMP is attached but SNTF is not available.
12017d77b247STejun Heo 			 * ATAPI async media change notification is
12027d77b247STejun Heo 			 * not used.  The PMP must be reporting PHY
12037d77b247STejun Heo 			 * status change, schedule EH.
12047d77b247STejun Heo 			 */
12057d77b247STejun Heo 			ata_port_schedule_eh(ap);
12067d77b247STejun Heo 			return 1;
12077d77b247STejun Heo 		}
12087d77b247STejun Heo 	} else {
12097d77b247STejun Heo 		/* PMP is attached and SNTF is available */
12107d77b247STejun Heo 		struct ata_link *link;
12117d77b247STejun Heo 
12127d77b247STejun Heo 		/* check and notify ATAPI AN */
12131eca4365STejun Heo 		ata_for_each_link(link, ap, EDGE) {
12147d77b247STejun Heo 			if (!(sntf & (1 << link->pmp)))
12157d77b247STejun Heo 				continue;
12167d77b247STejun Heo 
12177d77b247STejun Heo 			if ((link->device->class == ATA_DEV_ATAPI) &&
12187d77b247STejun Heo 			    (link->device->flags & ATA_DFLAG_AN))
12197d77b247STejun Heo 				ata_scsi_media_change_notify(link->device);
12207d77b247STejun Heo 		}
12217d77b247STejun Heo 
12227d77b247STejun Heo 		/* If PMP is reporting that PHY status of some
12237d77b247STejun Heo 		 * downstream ports has changed, schedule EH.
12247d77b247STejun Heo 		 */
12257d77b247STejun Heo 		if (sntf & (1 << SATA_PMP_CTRL_PORT)) {
12267d77b247STejun Heo 			ata_port_schedule_eh(ap);
12277d77b247STejun Heo 			return 1;
12287d77b247STejun Heo 		}
12297d77b247STejun Heo 
12307d77b247STejun Heo 		return 0;
12317d77b247STejun Heo 	}
12327d77b247STejun Heo }
12337d77b247STejun Heo 
12347d77b247STejun Heo /**
1235c6fd2807SJeff Garzik  *	ata_eh_freeze_port - EH helper to freeze port
1236c6fd2807SJeff Garzik  *	@ap: ATA port to freeze
1237c6fd2807SJeff Garzik  *
1238c6fd2807SJeff Garzik  *	Freeze @ap.
1239c6fd2807SJeff Garzik  *
1240c6fd2807SJeff Garzik  *	LOCKING:
1241c6fd2807SJeff Garzik  *	None.
1242c6fd2807SJeff Garzik  */
1243c6fd2807SJeff Garzik void ata_eh_freeze_port(struct ata_port *ap)
1244c6fd2807SJeff Garzik {
1245c6fd2807SJeff Garzik 	unsigned long flags;
1246c6fd2807SJeff Garzik 
1247c6fd2807SJeff Garzik 	if (!ap->ops->error_handler)
1248c6fd2807SJeff Garzik 		return;
1249c6fd2807SJeff Garzik 
1250c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1251c6fd2807SJeff Garzik 	__ata_port_freeze(ap);
1252c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1253c6fd2807SJeff Garzik }
1254c6fd2807SJeff Garzik 
1255c6fd2807SJeff Garzik /**
1256c6fd2807SJeff Garzik  *	ata_port_thaw_port - EH helper to thaw port
1257c6fd2807SJeff Garzik  *	@ap: ATA port to thaw
1258c6fd2807SJeff Garzik  *
1259c6fd2807SJeff Garzik  *	Thaw frozen port @ap.
1260c6fd2807SJeff Garzik  *
1261c6fd2807SJeff Garzik  *	LOCKING:
1262c6fd2807SJeff Garzik  *	None.
1263c6fd2807SJeff Garzik  */
1264c6fd2807SJeff Garzik void ata_eh_thaw_port(struct ata_port *ap)
1265c6fd2807SJeff Garzik {
1266c6fd2807SJeff Garzik 	unsigned long flags;
1267c6fd2807SJeff Garzik 
1268c6fd2807SJeff Garzik 	if (!ap->ops->error_handler)
1269c6fd2807SJeff Garzik 		return;
1270c6fd2807SJeff Garzik 
1271c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1272c6fd2807SJeff Garzik 
1273c6fd2807SJeff Garzik 	ap->pflags &= ~ATA_PFLAG_FROZEN;
1274c6fd2807SJeff Garzik 
1275c6fd2807SJeff Garzik 	if (ap->ops->thaw)
1276c6fd2807SJeff Garzik 		ap->ops->thaw(ap);
1277c6fd2807SJeff Garzik 
1278c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1279c6fd2807SJeff Garzik 
128044877b4eSTejun Heo 	DPRINTK("ata%u port thawed\n", ap->print_id);
1281c6fd2807SJeff Garzik }
1282c6fd2807SJeff Garzik 
1283c6fd2807SJeff Garzik static void ata_eh_scsidone(struct scsi_cmnd *scmd)
1284c6fd2807SJeff Garzik {
1285c6fd2807SJeff Garzik 	/* nada */
1286c6fd2807SJeff Garzik }
1287c6fd2807SJeff Garzik 
1288c6fd2807SJeff Garzik static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
1289c6fd2807SJeff Garzik {
1290c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
1291c6fd2807SJeff Garzik 	struct scsi_cmnd *scmd = qc->scsicmd;
1292c6fd2807SJeff Garzik 	unsigned long flags;
1293c6fd2807SJeff Garzik 
1294c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1295c6fd2807SJeff Garzik 	qc->scsidone = ata_eh_scsidone;
1296c6fd2807SJeff Garzik 	__ata_qc_complete(qc);
1297c6fd2807SJeff Garzik 	WARN_ON(ata_tag_valid(qc->tag));
1298c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1299c6fd2807SJeff Garzik 
1300c6fd2807SJeff Garzik 	scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
1301c6fd2807SJeff Garzik }
1302c6fd2807SJeff Garzik 
1303c6fd2807SJeff Garzik /**
1304c6fd2807SJeff Garzik  *	ata_eh_qc_complete - Complete an active ATA command from EH
1305c6fd2807SJeff Garzik  *	@qc: Command to complete
1306c6fd2807SJeff Garzik  *
1307c6fd2807SJeff Garzik  *	Indicate to the mid and upper layers that an ATA command has
1308c6fd2807SJeff Garzik  *	completed.  To be used from EH.
1309c6fd2807SJeff Garzik  */
1310c6fd2807SJeff Garzik void ata_eh_qc_complete(struct ata_queued_cmd *qc)
1311c6fd2807SJeff Garzik {
1312c6fd2807SJeff Garzik 	struct scsi_cmnd *scmd = qc->scsicmd;
1313c6fd2807SJeff Garzik 	scmd->retries = scmd->allowed;
1314c6fd2807SJeff Garzik 	__ata_eh_qc_complete(qc);
1315c6fd2807SJeff Garzik }
1316c6fd2807SJeff Garzik 
1317c6fd2807SJeff Garzik /**
1318c6fd2807SJeff Garzik  *	ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
1319c6fd2807SJeff Garzik  *	@qc: Command to retry
1320c6fd2807SJeff Garzik  *
1321c6fd2807SJeff Garzik  *	Indicate to the mid and upper layers that an ATA command
1322c6fd2807SJeff Garzik  *	should be retried.  To be used from EH.
1323c6fd2807SJeff Garzik  *
1324c6fd2807SJeff Garzik  *	SCSI midlayer limits the number of retries to scmd->allowed.
1325c6fd2807SJeff Garzik  *	scmd->retries is decremented for commands which get retried
1326c6fd2807SJeff Garzik  *	due to unrelated failures (qc->err_mask is zero).
1327c6fd2807SJeff Garzik  */
1328c6fd2807SJeff Garzik void ata_eh_qc_retry(struct ata_queued_cmd *qc)
1329c6fd2807SJeff Garzik {
1330c6fd2807SJeff Garzik 	struct scsi_cmnd *scmd = qc->scsicmd;
1331c6fd2807SJeff Garzik 	if (!qc->err_mask && scmd->retries)
1332c6fd2807SJeff Garzik 		scmd->retries--;
1333c6fd2807SJeff Garzik 	__ata_eh_qc_complete(qc);
1334c6fd2807SJeff Garzik }
1335c6fd2807SJeff Garzik 
1336c6fd2807SJeff Garzik /**
1337678afac6STejun Heo  *	ata_dev_disable - disable ATA device
1338678afac6STejun Heo  *	@dev: ATA device to disable
1339678afac6STejun Heo  *
1340678afac6STejun Heo  *	Disable @dev.
1341678afac6STejun Heo  *
1342678afac6STejun Heo  *	Locking:
1343678afac6STejun Heo  *	EH context.
1344678afac6STejun Heo  */
1345678afac6STejun Heo void ata_dev_disable(struct ata_device *dev)
1346678afac6STejun Heo {
1347678afac6STejun Heo 	if (!ata_dev_enabled(dev))
1348678afac6STejun Heo 		return;
1349678afac6STejun Heo 
1350678afac6STejun Heo 	if (ata_msg_drv(dev->link->ap))
1351a9a79dfeSJoe Perches 		ata_dev_warn(dev, "disabled\n");
1352678afac6STejun Heo 	ata_acpi_on_disable(dev);
1353678afac6STejun Heo 	ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET);
1354678afac6STejun Heo 	dev->class++;
135599cf610aSTejun Heo 
135699cf610aSTejun Heo 	/* From now till the next successful probe, ering is used to
135799cf610aSTejun Heo 	 * track probe failures.  Clear accumulated device error info.
135899cf610aSTejun Heo 	 */
135999cf610aSTejun Heo 	ata_ering_clear(&dev->ering);
1360678afac6STejun Heo }
1361678afac6STejun Heo 
1362678afac6STejun Heo /**
1363c6fd2807SJeff Garzik  *	ata_eh_detach_dev - detach ATA device
1364c6fd2807SJeff Garzik  *	@dev: ATA device to detach
1365c6fd2807SJeff Garzik  *
1366c6fd2807SJeff Garzik  *	Detach @dev.
1367c6fd2807SJeff Garzik  *
1368c6fd2807SJeff Garzik  *	LOCKING:
1369c6fd2807SJeff Garzik  *	None.
1370c6fd2807SJeff Garzik  */
1371fb7fd614STejun Heo void ata_eh_detach_dev(struct ata_device *dev)
1372c6fd2807SJeff Garzik {
1373f58229f8STejun Heo 	struct ata_link *link = dev->link;
1374f58229f8STejun Heo 	struct ata_port *ap = link->ap;
137590484ebfSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
1376c6fd2807SJeff Garzik 	unsigned long flags;
1377c6fd2807SJeff Garzik 
1378c6fd2807SJeff Garzik 	ata_dev_disable(dev);
1379c6fd2807SJeff Garzik 
1380c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1381c6fd2807SJeff Garzik 
1382c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_DETACH;
1383c6fd2807SJeff Garzik 
1384c6fd2807SJeff Garzik 	if (ata_scsi_offline_dev(dev)) {
1385c6fd2807SJeff Garzik 		dev->flags |= ATA_DFLAG_DETACHED;
1386c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
1387c6fd2807SJeff Garzik 	}
1388c6fd2807SJeff Garzik 
138990484ebfSTejun Heo 	/* clear per-dev EH info */
1390f58229f8STejun Heo 	ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK);
1391f58229f8STejun Heo 	ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK);
139290484ebfSTejun Heo 	ehc->saved_xfer_mode[dev->devno] = 0;
139390484ebfSTejun Heo 	ehc->saved_ncq_enabled &= ~(1 << dev->devno);
1394c6fd2807SJeff Garzik 
1395c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1396c6fd2807SJeff Garzik }
1397c6fd2807SJeff Garzik 
1398c6fd2807SJeff Garzik /**
1399c6fd2807SJeff Garzik  *	ata_eh_about_to_do - about to perform eh_action
1400955e57dfSTejun Heo  *	@link: target ATA link
1401c6fd2807SJeff Garzik  *	@dev: target ATA dev for per-dev action (can be NULL)
1402c6fd2807SJeff Garzik  *	@action: action about to be performed
1403c6fd2807SJeff Garzik  *
1404c6fd2807SJeff Garzik  *	Called just before performing EH actions to clear related bits
1405955e57dfSTejun Heo  *	in @link->eh_info such that eh actions are not unnecessarily
1406955e57dfSTejun Heo  *	repeated.
1407c6fd2807SJeff Garzik  *
1408c6fd2807SJeff Garzik  *	LOCKING:
1409c6fd2807SJeff Garzik  *	None.
1410c6fd2807SJeff Garzik  */
1411fb7fd614STejun Heo void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
1412c6fd2807SJeff Garzik 			unsigned int action)
1413c6fd2807SJeff Garzik {
1414955e57dfSTejun Heo 	struct ata_port *ap = link->ap;
1415955e57dfSTejun Heo 	struct ata_eh_info *ehi = &link->eh_info;
1416955e57dfSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
1417c6fd2807SJeff Garzik 	unsigned long flags;
1418c6fd2807SJeff Garzik 
1419c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1420c6fd2807SJeff Garzik 
1421955e57dfSTejun Heo 	ata_eh_clear_action(link, dev, ehi, action);
1422c6fd2807SJeff Garzik 
1423a568d1d2STejun Heo 	/* About to take EH action, set RECOVERED.  Ignore actions on
1424a568d1d2STejun Heo 	 * slave links as master will do them again.
1425a568d1d2STejun Heo 	 */
1426a568d1d2STejun Heo 	if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link)
1427c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_RECOVERED;
1428c6fd2807SJeff Garzik 
1429c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1430c6fd2807SJeff Garzik }
1431c6fd2807SJeff Garzik 
1432c6fd2807SJeff Garzik /**
1433c6fd2807SJeff Garzik  *	ata_eh_done - EH action complete
1434c6fd2807SJeff Garzik *	@ap: target ATA port
1435c6fd2807SJeff Garzik  *	@dev: target ATA dev for per-dev action (can be NULL)
1436c6fd2807SJeff Garzik  *	@action: action just completed
1437c6fd2807SJeff Garzik  *
1438c6fd2807SJeff Garzik  *	Called right after performing EH actions to clear related bits
1439955e57dfSTejun Heo  *	in @link->eh_context.
1440c6fd2807SJeff Garzik  *
1441c6fd2807SJeff Garzik  *	LOCKING:
1442c6fd2807SJeff Garzik  *	None.
1443c6fd2807SJeff Garzik  */
1444fb7fd614STejun Heo void ata_eh_done(struct ata_link *link, struct ata_device *dev,
1445c6fd2807SJeff Garzik 		 unsigned int action)
1446c6fd2807SJeff Garzik {
1447955e57dfSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
14489af5c9c9STejun Heo 
1449955e57dfSTejun Heo 	ata_eh_clear_action(link, dev, &ehc->i, action);
1450c6fd2807SJeff Garzik }
1451c6fd2807SJeff Garzik 
1452c6fd2807SJeff Garzik /**
1453c6fd2807SJeff Garzik  *	ata_err_string - convert err_mask to descriptive string
1454c6fd2807SJeff Garzik  *	@err_mask: error mask to convert to string
1455c6fd2807SJeff Garzik  *
1456c6fd2807SJeff Garzik  *	Convert @err_mask to descriptive string.  Errors are
1457c6fd2807SJeff Garzik  *	prioritized according to severity and only the most severe
1458c6fd2807SJeff Garzik  *	error is reported.
1459c6fd2807SJeff Garzik  *
1460c6fd2807SJeff Garzik  *	LOCKING:
1461c6fd2807SJeff Garzik  *	None.
1462c6fd2807SJeff Garzik  *
1463c6fd2807SJeff Garzik  *	RETURNS:
1464c6fd2807SJeff Garzik  *	Descriptive string for @err_mask
1465c6fd2807SJeff Garzik  */
1466c6fd2807SJeff Garzik static const char *ata_err_string(unsigned int err_mask)
1467c6fd2807SJeff Garzik {
1468c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_HOST_BUS)
1469c6fd2807SJeff Garzik 		return "host bus error";
1470c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_ATA_BUS)
1471c6fd2807SJeff Garzik 		return "ATA bus error";
1472c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_TIMEOUT)
1473c6fd2807SJeff Garzik 		return "timeout";
1474c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_HSM)
1475c6fd2807SJeff Garzik 		return "HSM violation";
1476c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_SYSTEM)
1477c6fd2807SJeff Garzik 		return "internal error";
1478c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_MEDIA)
1479c6fd2807SJeff Garzik 		return "media error";
1480c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_INVALID)
1481c6fd2807SJeff Garzik 		return "invalid argument";
1482c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_DEV)
1483c6fd2807SJeff Garzik 		return "device error";
1484c6fd2807SJeff Garzik 	return "unknown error";
1485c6fd2807SJeff Garzik }
1486c6fd2807SJeff Garzik 
1487c6fd2807SJeff Garzik /**
1488c6fd2807SJeff Garzik  *	ata_read_log_page - read a specific log page
1489c6fd2807SJeff Garzik  *	@dev: target device
149065fe1f0fSShane Huang  *	@log: log to read
1491c6fd2807SJeff Garzik  *	@page: page to read
1492c6fd2807SJeff Garzik  *	@buf: buffer to store read page
1493c6fd2807SJeff Garzik  *	@sectors: number of sectors to read
1494c6fd2807SJeff Garzik  *
1495c6fd2807SJeff Garzik  *	Read log page using READ_LOG_EXT command.
1496c6fd2807SJeff Garzik  *
1497c6fd2807SJeff Garzik  *	LOCKING:
1498c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1499c6fd2807SJeff Garzik  *
1500c6fd2807SJeff Garzik  *	RETURNS:
1501c6fd2807SJeff Garzik  *	0 on success, AC_ERR_* mask otherwise.
1502c6fd2807SJeff Garzik  */
150365fe1f0fSShane Huang unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
1504c6fd2807SJeff Garzik 			       u8 page, void *buf, unsigned int sectors)
1505c6fd2807SJeff Garzik {
1506c6fd2807SJeff Garzik 	struct ata_taskfile tf;
1507c6fd2807SJeff Garzik 	unsigned int err_mask;
1508c6fd2807SJeff Garzik 
150965fe1f0fSShane Huang 	DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page);
1510c6fd2807SJeff Garzik 
1511c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
1512c6fd2807SJeff Garzik 	tf.command = ATA_CMD_READ_LOG_EXT;
151365fe1f0fSShane Huang 	tf.lbal = log;
151465fe1f0fSShane Huang 	tf.lbam = page;
1515c6fd2807SJeff Garzik 	tf.nsect = sectors;
1516c6fd2807SJeff Garzik 	tf.hob_nsect = sectors >> 8;
1517c6fd2807SJeff Garzik 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
1518c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_PIO;
1519c6fd2807SJeff Garzik 
1520c6fd2807SJeff Garzik 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
15212b789108STejun Heo 				     buf, sectors * ATA_SECT_SIZE, 0);
1522c6fd2807SJeff Garzik 
1523c6fd2807SJeff Garzik 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
1524c6fd2807SJeff Garzik 	return err_mask;
1525c6fd2807SJeff Garzik }
1526c6fd2807SJeff Garzik 
1527c6fd2807SJeff Garzik /**
1528c6fd2807SJeff Garzik  *	ata_eh_read_log_10h - Read log page 10h for NCQ error details
1529c6fd2807SJeff Garzik  *	@dev: Device to read log page 10h from
1530c6fd2807SJeff Garzik  *	@tag: Resulting tag of the failed command
1531c6fd2807SJeff Garzik  *	@tf: Resulting taskfile registers of the failed command
1532c6fd2807SJeff Garzik  *
1533c6fd2807SJeff Garzik  *	Read log page 10h to obtain NCQ error details and clear error
1534c6fd2807SJeff Garzik  *	condition.
1535c6fd2807SJeff Garzik  *
1536c6fd2807SJeff Garzik  *	LOCKING:
1537c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1538c6fd2807SJeff Garzik  *
1539c6fd2807SJeff Garzik  *	RETURNS:
1540c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
1541c6fd2807SJeff Garzik  */
1542c6fd2807SJeff Garzik static int ata_eh_read_log_10h(struct ata_device *dev,
1543c6fd2807SJeff Garzik 			       int *tag, struct ata_taskfile *tf)
1544c6fd2807SJeff Garzik {
15459af5c9c9STejun Heo 	u8 *buf = dev->link->ap->sector_buf;
1546c6fd2807SJeff Garzik 	unsigned int err_mask;
1547c6fd2807SJeff Garzik 	u8 csum;
1548c6fd2807SJeff Garzik 	int i;
1549c6fd2807SJeff Garzik 
155065fe1f0fSShane Huang 	err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, 0, buf, 1);
1551c6fd2807SJeff Garzik 	if (err_mask)
1552c6fd2807SJeff Garzik 		return -EIO;
1553c6fd2807SJeff Garzik 
1554c6fd2807SJeff Garzik 	csum = 0;
1555c6fd2807SJeff Garzik 	for (i = 0; i < ATA_SECT_SIZE; i++)
1556c6fd2807SJeff Garzik 		csum += buf[i];
1557c6fd2807SJeff Garzik 	if (csum)
1558a9a79dfeSJoe Perches 		ata_dev_warn(dev, "invalid checksum 0x%x on log page 10h\n",
1559a9a79dfeSJoe Perches 			     csum);
1560c6fd2807SJeff Garzik 
1561c6fd2807SJeff Garzik 	if (buf[0] & 0x80)
1562c6fd2807SJeff Garzik 		return -ENOENT;
1563c6fd2807SJeff Garzik 
1564c6fd2807SJeff Garzik 	*tag = buf[0] & 0x1f;
1565c6fd2807SJeff Garzik 
1566c6fd2807SJeff Garzik 	tf->command = buf[2];
1567c6fd2807SJeff Garzik 	tf->feature = buf[3];
1568c6fd2807SJeff Garzik 	tf->lbal = buf[4];
1569c6fd2807SJeff Garzik 	tf->lbam = buf[5];
1570c6fd2807SJeff Garzik 	tf->lbah = buf[6];
1571c6fd2807SJeff Garzik 	tf->device = buf[7];
1572c6fd2807SJeff Garzik 	tf->hob_lbal = buf[8];
1573c6fd2807SJeff Garzik 	tf->hob_lbam = buf[9];
1574c6fd2807SJeff Garzik 	tf->hob_lbah = buf[10];
1575c6fd2807SJeff Garzik 	tf->nsect = buf[12];
1576c6fd2807SJeff Garzik 	tf->hob_nsect = buf[13];
1577c6fd2807SJeff Garzik 
1578c6fd2807SJeff Garzik 	return 0;
1579c6fd2807SJeff Garzik }
1580c6fd2807SJeff Garzik 
1581c6fd2807SJeff Garzik /**
158211fc33daSTejun Heo  *	atapi_eh_tur - perform ATAPI TEST_UNIT_READY
158311fc33daSTejun Heo  *	@dev: target ATAPI device
158411fc33daSTejun Heo  *	@r_sense_key: out parameter for sense_key
158511fc33daSTejun Heo  *
158611fc33daSTejun Heo  *	Perform ATAPI TEST_UNIT_READY.
158711fc33daSTejun Heo  *
158811fc33daSTejun Heo  *	LOCKING:
158911fc33daSTejun Heo  *	EH context (may sleep).
159011fc33daSTejun Heo  *
159111fc33daSTejun Heo  *	RETURNS:
159211fc33daSTejun Heo  *	0 on success, AC_ERR_* mask on failure.
159311fc33daSTejun Heo  */
15943dc67440SAaron Lu unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
159511fc33daSTejun Heo {
159611fc33daSTejun Heo 	u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 };
159711fc33daSTejun Heo 	struct ata_taskfile tf;
159811fc33daSTejun Heo 	unsigned int err_mask;
159911fc33daSTejun Heo 
160011fc33daSTejun Heo 	ata_tf_init(dev, &tf);
160111fc33daSTejun Heo 
160211fc33daSTejun Heo 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
160311fc33daSTejun Heo 	tf.command = ATA_CMD_PACKET;
160411fc33daSTejun Heo 	tf.protocol = ATAPI_PROT_NODATA;
160511fc33daSTejun Heo 
160611fc33daSTejun Heo 	err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
160711fc33daSTejun Heo 	if (err_mask == AC_ERR_DEV)
160811fc33daSTejun Heo 		*r_sense_key = tf.feature >> 4;
160911fc33daSTejun Heo 	return err_mask;
161011fc33daSTejun Heo }
161111fc33daSTejun Heo 
161211fc33daSTejun Heo /**
1613c6fd2807SJeff Garzik  *	atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1614c6fd2807SJeff Garzik  *	@dev: device to perform REQUEST_SENSE to
1615c6fd2807SJeff Garzik  *	@sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
16163eabddb8STejun Heo  *	@dfl_sense_key: default sense key to use
1617c6fd2807SJeff Garzik  *
1618c6fd2807SJeff Garzik  *	Perform ATAPI REQUEST_SENSE after the device reported CHECK
1619c6fd2807SJeff Garzik  *	SENSE.  This function is EH helper.
1620c6fd2807SJeff Garzik  *
1621c6fd2807SJeff Garzik  *	LOCKING:
1622c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1623c6fd2807SJeff Garzik  *
1624c6fd2807SJeff Garzik  *	RETURNS:
1625c6fd2807SJeff Garzik  *	0 on success, AC_ERR_* mask on failure
1626c6fd2807SJeff Garzik  */
16273dc67440SAaron Lu unsigned int atapi_eh_request_sense(struct ata_device *dev,
16283eabddb8STejun Heo 					   u8 *sense_buf, u8 dfl_sense_key)
1629c6fd2807SJeff Garzik {
16303eabddb8STejun Heo 	u8 cdb[ATAPI_CDB_LEN] =
16313eabddb8STejun Heo 		{ REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 };
16329af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
1633c6fd2807SJeff Garzik 	struct ata_taskfile tf;
1634c6fd2807SJeff Garzik 
1635c6fd2807SJeff Garzik 	DPRINTK("ATAPI request sense\n");
1636c6fd2807SJeff Garzik 
1637c6fd2807SJeff Garzik 	/* FIXME: is this needed? */
1638c6fd2807SJeff Garzik 	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
1639c6fd2807SJeff Garzik 
164056287768SAlbert Lee 	/* initialize sense_buf with the error register,
164156287768SAlbert Lee 	 * for the case where they are -not- overwritten
164256287768SAlbert Lee 	 */
1643c6fd2807SJeff Garzik 	sense_buf[0] = 0x70;
16443eabddb8STejun Heo 	sense_buf[2] = dfl_sense_key;
164556287768SAlbert Lee 
164656287768SAlbert Lee 	/* some devices time out if garbage left in tf */
164756287768SAlbert Lee 	ata_tf_init(dev, &tf);
1648c6fd2807SJeff Garzik 
1649c6fd2807SJeff Garzik 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1650c6fd2807SJeff Garzik 	tf.command = ATA_CMD_PACKET;
1651c6fd2807SJeff Garzik 
1652c6fd2807SJeff Garzik 	/* is it pointless to prefer PIO for "safety reasons"? */
1653c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_PIO_DMA) {
16540dc36888STejun Heo 		tf.protocol = ATAPI_PROT_DMA;
1655c6fd2807SJeff Garzik 		tf.feature |= ATAPI_PKT_DMA;
1656c6fd2807SJeff Garzik 	} else {
16570dc36888STejun Heo 		tf.protocol = ATAPI_PROT_PIO;
1658f2dfc1a1STejun Heo 		tf.lbam = SCSI_SENSE_BUFFERSIZE;
1659f2dfc1a1STejun Heo 		tf.lbah = 0;
1660c6fd2807SJeff Garzik 	}
1661c6fd2807SJeff Garzik 
1662c6fd2807SJeff Garzik 	return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
16632b789108STejun Heo 				 sense_buf, SCSI_SENSE_BUFFERSIZE, 0);
1664c6fd2807SJeff Garzik }
1665c6fd2807SJeff Garzik 
1666c6fd2807SJeff Garzik /**
1667c6fd2807SJeff Garzik  *	ata_eh_analyze_serror - analyze SError for a failed port
16680260731fSTejun Heo  *	@link: ATA link to analyze SError for
1669c6fd2807SJeff Garzik  *
1670c6fd2807SJeff Garzik  *	Analyze SError if available and further determine cause of
1671c6fd2807SJeff Garzik  *	failure.
1672c6fd2807SJeff Garzik  *
1673c6fd2807SJeff Garzik  *	LOCKING:
1674c6fd2807SJeff Garzik  *	None.
1675c6fd2807SJeff Garzik  */
16760260731fSTejun Heo static void ata_eh_analyze_serror(struct ata_link *link)
1677c6fd2807SJeff Garzik {
16780260731fSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
1679c6fd2807SJeff Garzik 	u32 serror = ehc->i.serror;
1680c6fd2807SJeff Garzik 	unsigned int err_mask = 0, action = 0;
1681f9df58cbSTejun Heo 	u32 hotplug_mask;
1682c6fd2807SJeff Garzik 
1683e0614db2STejun Heo 	if (serror & (SERR_PERSISTENT | SERR_DATA)) {
1684c6fd2807SJeff Garzik 		err_mask |= AC_ERR_ATA_BUS;
1685cf480626STejun Heo 		action |= ATA_EH_RESET;
1686c6fd2807SJeff Garzik 	}
1687c6fd2807SJeff Garzik 	if (serror & SERR_PROTOCOL) {
1688c6fd2807SJeff Garzik 		err_mask |= AC_ERR_HSM;
1689cf480626STejun Heo 		action |= ATA_EH_RESET;
1690c6fd2807SJeff Garzik 	}
1691c6fd2807SJeff Garzik 	if (serror & SERR_INTERNAL) {
1692c6fd2807SJeff Garzik 		err_mask |= AC_ERR_SYSTEM;
1693cf480626STejun Heo 		action |= ATA_EH_RESET;
1694c6fd2807SJeff Garzik 	}
1695f9df58cbSTejun Heo 
1696f9df58cbSTejun Heo 	/* Determine whether a hotplug event has occurred.  Both
1697f9df58cbSTejun Heo 	 * SError.N/X are considered hotplug events for enabled or
1698f9df58cbSTejun Heo 	 * host links.  For disabled PMP links, only N bit is
1699f9df58cbSTejun Heo 	 * considered as X bit is left at 1 for link plugging.
1700f9df58cbSTejun Heo 	 */
1701eb0e85e3STejun Heo 	if (link->lpm_policy > ATA_LPM_MAX_POWER)
17026b7ae954STejun Heo 		hotplug_mask = 0;	/* hotplug doesn't work w/ LPM */
17036b7ae954STejun Heo 	else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
1704f9df58cbSTejun Heo 		hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG;
1705f9df58cbSTejun Heo 	else
1706f9df58cbSTejun Heo 		hotplug_mask = SERR_PHYRDY_CHG;
1707f9df58cbSTejun Heo 
1708f9df58cbSTejun Heo 	if (serror & hotplug_mask)
1709c6fd2807SJeff Garzik 		ata_ehi_hotplugged(&ehc->i);
1710c6fd2807SJeff Garzik 
1711c6fd2807SJeff Garzik 	ehc->i.err_mask |= err_mask;
1712c6fd2807SJeff Garzik 	ehc->i.action |= action;
1713c6fd2807SJeff Garzik }
1714c6fd2807SJeff Garzik 
1715c6fd2807SJeff Garzik /**
1716c6fd2807SJeff Garzik  *	ata_eh_analyze_ncq_error - analyze NCQ error
17170260731fSTejun Heo  *	@link: ATA link to analyze NCQ error for
1718c6fd2807SJeff Garzik  *
1719c6fd2807SJeff Garzik  *	Read log page 10h, determine the offending qc and acquire
1720c6fd2807SJeff Garzik  *	error status TF.  For NCQ device errors, all LLDDs have to do
1721c6fd2807SJeff Garzik  *	is setting AC_ERR_DEV in ehi->err_mask.  This function takes
1722c6fd2807SJeff Garzik  *	care of the rest.
1723c6fd2807SJeff Garzik  *
1724c6fd2807SJeff Garzik  *	LOCKING:
1725c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1726c6fd2807SJeff Garzik  */
172710acf3b0SMark Lord void ata_eh_analyze_ncq_error(struct ata_link *link)
1728c6fd2807SJeff Garzik {
17290260731fSTejun Heo 	struct ata_port *ap = link->ap;
17300260731fSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
17310260731fSTejun Heo 	struct ata_device *dev = link->device;
1732c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc;
1733c6fd2807SJeff Garzik 	struct ata_taskfile tf;
1734c6fd2807SJeff Garzik 	int tag, rc;
1735c6fd2807SJeff Garzik 
1736c6fd2807SJeff Garzik 	/* if frozen, we can't do much */
1737c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_FROZEN)
1738c6fd2807SJeff Garzik 		return;
1739c6fd2807SJeff Garzik 
1740c6fd2807SJeff Garzik 	/* is it NCQ device error? */
17410260731fSTejun Heo 	if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
1742c6fd2807SJeff Garzik 		return;
1743c6fd2807SJeff Garzik 
1744c6fd2807SJeff Garzik 	/* has LLDD analyzed already? */
1745c6fd2807SJeff Garzik 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1746c6fd2807SJeff Garzik 		qc = __ata_qc_from_tag(ap, tag);
1747c6fd2807SJeff Garzik 
1748c6fd2807SJeff Garzik 		if (!(qc->flags & ATA_QCFLAG_FAILED))
1749c6fd2807SJeff Garzik 			continue;
1750c6fd2807SJeff Garzik 
1751c6fd2807SJeff Garzik 		if (qc->err_mask)
1752c6fd2807SJeff Garzik 			return;
1753c6fd2807SJeff Garzik 	}
1754c6fd2807SJeff Garzik 
1755c6fd2807SJeff Garzik 	/* okay, this error is ours */
1756a09bf4cdSJeff Garzik 	memset(&tf, 0, sizeof(tf));
1757c6fd2807SJeff Garzik 	rc = ata_eh_read_log_10h(dev, &tag, &tf);
1758c6fd2807SJeff Garzik 	if (rc) {
1759a9a79dfeSJoe Perches 		ata_link_err(link, "failed to read log page 10h (errno=%d)\n",
1760a9a79dfeSJoe Perches 			     rc);
1761c6fd2807SJeff Garzik 		return;
1762c6fd2807SJeff Garzik 	}
1763c6fd2807SJeff Garzik 
17640260731fSTejun Heo 	if (!(link->sactive & (1 << tag))) {
1765a9a79dfeSJoe Perches 		ata_link_err(link, "log page 10h reported inactive tag %d\n",
1766a9a79dfeSJoe Perches 			     tag);
1767c6fd2807SJeff Garzik 		return;
1768c6fd2807SJeff Garzik 	}
1769c6fd2807SJeff Garzik 
1770c6fd2807SJeff Garzik 	/* we've got the perpetrator, condemn it */
1771c6fd2807SJeff Garzik 	qc = __ata_qc_from_tag(ap, tag);
1772c6fd2807SJeff Garzik 	memcpy(&qc->result_tf, &tf, sizeof(tf));
1773a6116c9eSMark Lord 	qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
17745335b729STejun Heo 	qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
1775c6fd2807SJeff Garzik 	ehc->i.err_mask &= ~AC_ERR_DEV;
1776c6fd2807SJeff Garzik }
1777c6fd2807SJeff Garzik 
1778c6fd2807SJeff Garzik /**
1779c6fd2807SJeff Garzik  *	ata_eh_analyze_tf - analyze taskfile of a failed qc
1780c6fd2807SJeff Garzik  *	@qc: qc to analyze
1781c6fd2807SJeff Garzik  *	@tf: Taskfile registers to analyze
1782c6fd2807SJeff Garzik  *
1783c6fd2807SJeff Garzik  *	Analyze taskfile of @qc and further determine cause of
1784c6fd2807SJeff Garzik  *	failure.  This function also requests ATAPI sense data if
178525985edcSLucas De Marchi  *	available.
1786c6fd2807SJeff Garzik  *
1787c6fd2807SJeff Garzik  *	LOCKING:
1788c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1789c6fd2807SJeff Garzik  *
1790c6fd2807SJeff Garzik  *	RETURNS:
1791c6fd2807SJeff Garzik  *	Determined recovery action
1792c6fd2807SJeff Garzik  */
1793c6fd2807SJeff Garzik static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1794c6fd2807SJeff Garzik 				      const struct ata_taskfile *tf)
1795c6fd2807SJeff Garzik {
1796c6fd2807SJeff Garzik 	unsigned int tmp, action = 0;
1797c6fd2807SJeff Garzik 	u8 stat = tf->command, err = tf->feature;
1798c6fd2807SJeff Garzik 
1799c6fd2807SJeff Garzik 	if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1800c6fd2807SJeff Garzik 		qc->err_mask |= AC_ERR_HSM;
1801cf480626STejun Heo 		return ATA_EH_RESET;
1802c6fd2807SJeff Garzik 	}
1803c6fd2807SJeff Garzik 
1804a51d644aSTejun Heo 	if (stat & (ATA_ERR | ATA_DF))
1805a51d644aSTejun Heo 		qc->err_mask |= AC_ERR_DEV;
1806a51d644aSTejun Heo 	else
1807c6fd2807SJeff Garzik 		return 0;
1808c6fd2807SJeff Garzik 
1809c6fd2807SJeff Garzik 	switch (qc->dev->class) {
1810c6fd2807SJeff Garzik 	case ATA_DEV_ATA:
1811c6fd2807SJeff Garzik 		if (err & ATA_ICRC)
1812c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_ATA_BUS;
1813c6fd2807SJeff Garzik 		if (err & ATA_UNC)
1814c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_MEDIA;
1815c6fd2807SJeff Garzik 		if (err & ATA_IDNF)
1816c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_INVALID;
1817c6fd2807SJeff Garzik 		break;
1818c6fd2807SJeff Garzik 
1819c6fd2807SJeff Garzik 	case ATA_DEV_ATAPI:
1820a569a30dSTejun Heo 		if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
18213eabddb8STejun Heo 			tmp = atapi_eh_request_sense(qc->dev,
18223eabddb8STejun Heo 						qc->scsicmd->sense_buffer,
18233eabddb8STejun Heo 						qc->result_tf.feature >> 4);
1824c6fd2807SJeff Garzik 			if (!tmp) {
1825a569a30dSTejun Heo 				/* ATA_QCFLAG_SENSE_VALID is used to
1826a569a30dSTejun Heo 				 * tell atapi_qc_complete() that sense
1827a569a30dSTejun Heo 				 * data is already valid.
1828c6fd2807SJeff Garzik 				 *
1829c6fd2807SJeff Garzik 				 * TODO: interpret sense data and set
1830c6fd2807SJeff Garzik 				 * appropriate err_mask.
1831c6fd2807SJeff Garzik 				 */
1832c6fd2807SJeff Garzik 				qc->flags |= ATA_QCFLAG_SENSE_VALID;
1833c6fd2807SJeff Garzik 			} else
1834c6fd2807SJeff Garzik 				qc->err_mask |= tmp;
1835c6fd2807SJeff Garzik 		}
1836a569a30dSTejun Heo 	}
1837c6fd2807SJeff Garzik 
1838c6fd2807SJeff Garzik 	if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
1839cf480626STejun Heo 		action |= ATA_EH_RESET;
1840c6fd2807SJeff Garzik 
1841c6fd2807SJeff Garzik 	return action;
1842c6fd2807SJeff Garzik }
1843c6fd2807SJeff Garzik 
184476326ac1STejun Heo static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask,
184576326ac1STejun Heo 				   int *xfer_ok)
1846c6fd2807SJeff Garzik {
184776326ac1STejun Heo 	int base = 0;
184876326ac1STejun Heo 
184976326ac1STejun Heo 	if (!(eflags & ATA_EFLAG_DUBIOUS_XFER))
185076326ac1STejun Heo 		*xfer_ok = 1;
185176326ac1STejun Heo 
185276326ac1STejun Heo 	if (!*xfer_ok)
185375f9cafcSTejun Heo 		base = ATA_ECAT_DUBIOUS_NONE;
185476326ac1STejun Heo 
18557d47e8d4STejun Heo 	if (err_mask & AC_ERR_ATA_BUS)
185676326ac1STejun Heo 		return base + ATA_ECAT_ATA_BUS;
1857c6fd2807SJeff Garzik 
18587d47e8d4STejun Heo 	if (err_mask & AC_ERR_TIMEOUT)
185976326ac1STejun Heo 		return base + ATA_ECAT_TOUT_HSM;
18607d47e8d4STejun Heo 
18613884f7b0STejun Heo 	if (eflags & ATA_EFLAG_IS_IO) {
18627d47e8d4STejun Heo 		if (err_mask & AC_ERR_HSM)
186376326ac1STejun Heo 			return base + ATA_ECAT_TOUT_HSM;
18647d47e8d4STejun Heo 		if ((err_mask &
18657d47e8d4STejun Heo 		     (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
186676326ac1STejun Heo 			return base + ATA_ECAT_UNK_DEV;
1867c6fd2807SJeff Garzik 	}
1868c6fd2807SJeff Garzik 
1869c6fd2807SJeff Garzik 	return 0;
1870c6fd2807SJeff Garzik }
1871c6fd2807SJeff Garzik 
18727d47e8d4STejun Heo struct speed_down_verdict_arg {
1873c6fd2807SJeff Garzik 	u64 since;
187476326ac1STejun Heo 	int xfer_ok;
18753884f7b0STejun Heo 	int nr_errors[ATA_ECAT_NR];
1876c6fd2807SJeff Garzik };
1877c6fd2807SJeff Garzik 
18787d47e8d4STejun Heo static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
1879c6fd2807SJeff Garzik {
18807d47e8d4STejun Heo 	struct speed_down_verdict_arg *arg = void_arg;
188176326ac1STejun Heo 	int cat;
1882c6fd2807SJeff Garzik 
1883d9027470SGwendal Grignou 	if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since))
1884c6fd2807SJeff Garzik 		return -1;
1885c6fd2807SJeff Garzik 
188676326ac1STejun Heo 	cat = ata_eh_categorize_error(ent->eflags, ent->err_mask,
188776326ac1STejun Heo 				      &arg->xfer_ok);
18887d47e8d4STejun Heo 	arg->nr_errors[cat]++;
188976326ac1STejun Heo 
1890c6fd2807SJeff Garzik 	return 0;
1891c6fd2807SJeff Garzik }
1892c6fd2807SJeff Garzik 
1893c6fd2807SJeff Garzik /**
18947d47e8d4STejun Heo  *	ata_eh_speed_down_verdict - Determine speed down verdict
1895c6fd2807SJeff Garzik  *	@dev: Device of interest
1896c6fd2807SJeff Garzik  *
1897c6fd2807SJeff Garzik  *	This function examines error ring of @dev and determines
18987d47e8d4STejun Heo  *	whether NCQ needs to be turned off, transfer speed should be
18997d47e8d4STejun Heo  *	stepped down, or falling back to PIO is necessary.
1900c6fd2807SJeff Garzik  *
19013884f7b0STejun Heo  *	ECAT_ATA_BUS	: ATA_BUS error for any command
1902c6fd2807SJeff Garzik  *
19033884f7b0STejun Heo  *	ECAT_TOUT_HSM	: TIMEOUT for any command or HSM violation for
19043884f7b0STejun Heo  *			  IO commands
19057d47e8d4STejun Heo  *
19063884f7b0STejun Heo  *	ECAT_UNK_DEV	: Unknown DEV error for IO commands
1907c6fd2807SJeff Garzik  *
190876326ac1STejun Heo  *	ECAT_DUBIOUS_*	: Identical to above three but occurred while
190976326ac1STejun Heo  *			  data transfer hasn't been verified.
191076326ac1STejun Heo  *
19113884f7b0STejun Heo  *	Verdicts are
19127d47e8d4STejun Heo  *
19133884f7b0STejun Heo  *	NCQ_OFF		: Turn off NCQ.
19147d47e8d4STejun Heo  *
19153884f7b0STejun Heo  *	SPEED_DOWN	: Speed down transfer speed but don't fall back
19163884f7b0STejun Heo  *			  to PIO.
19173884f7b0STejun Heo  *
19183884f7b0STejun Heo  *	FALLBACK_TO_PIO	: Fall back to PIO.
19193884f7b0STejun Heo  *
19203884f7b0STejun Heo  *	Even if multiple verdicts are returned, only one action is
192176326ac1STejun Heo  *	taken per error.  An action triggered by non-DUBIOUS errors
192276326ac1STejun Heo  *	clears ering, while one triggered by DUBIOUS_* errors doesn't.
192376326ac1STejun Heo  *	This is to expedite speed down decisions right after device is
192476326ac1STejun Heo  *	initially configured.
19253884f7b0STejun Heo  *
192676326ac1STejun Heo  *	The followings are speed down rules.  #1 and #2 deal with
192776326ac1STejun Heo  *	DUBIOUS errors.
192876326ac1STejun Heo  *
192976326ac1STejun Heo  *	1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
193076326ac1STejun Heo  *	   occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO.
193176326ac1STejun Heo  *
193276326ac1STejun Heo  *	2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
193376326ac1STejun Heo  *	   occurred during last 5 mins, NCQ_OFF.
193476326ac1STejun Heo  *
193576326ac1STejun Heo  *	3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
193625985edcSLucas De Marchi  *	   occurred during last 5 mins, FALLBACK_TO_PIO
19373884f7b0STejun Heo  *
193876326ac1STejun Heo  *	4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
19393884f7b0STejun Heo  *	   during last 10 mins, NCQ_OFF.
19403884f7b0STejun Heo  *
194176326ac1STejun Heo  *	5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
19423884f7b0STejun Heo  *	   UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
19437d47e8d4STejun Heo  *
1944c6fd2807SJeff Garzik  *	LOCKING:
1945c6fd2807SJeff Garzik  *	Inherited from caller.
1946c6fd2807SJeff Garzik  *
1947c6fd2807SJeff Garzik  *	RETURNS:
19487d47e8d4STejun Heo  *	OR of ATA_EH_SPDN_* flags.
1949c6fd2807SJeff Garzik  */
19507d47e8d4STejun Heo static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
1951c6fd2807SJeff Garzik {
19527d47e8d4STejun Heo 	const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ;
19537d47e8d4STejun Heo 	u64 j64 = get_jiffies_64();
19547d47e8d4STejun Heo 	struct speed_down_verdict_arg arg;
19557d47e8d4STejun Heo 	unsigned int verdict = 0;
1956c6fd2807SJeff Garzik 
19573884f7b0STejun Heo 	/* scan past 5 mins of error history */
19583884f7b0STejun Heo 	memset(&arg, 0, sizeof(arg));
19593884f7b0STejun Heo 	arg.since = j64 - min(j64, j5mins);
19603884f7b0STejun Heo 	ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
19613884f7b0STejun Heo 
196276326ac1STejun Heo 	if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] +
196376326ac1STejun Heo 	    arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1)
196476326ac1STejun Heo 		verdict |= ATA_EH_SPDN_SPEED_DOWN |
196576326ac1STejun Heo 			ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS;
196676326ac1STejun Heo 
196776326ac1STejun Heo 	if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] +
196876326ac1STejun Heo 	    arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1)
196976326ac1STejun Heo 		verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS;
197076326ac1STejun Heo 
19713884f7b0STejun Heo 	if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
19723884f7b0STejun Heo 	    arg.nr_errors[ATA_ECAT_TOUT_HSM] +
1973663f99b8STejun Heo 	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
19743884f7b0STejun Heo 		verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
19753884f7b0STejun Heo 
19767d47e8d4STejun Heo 	/* scan past 10 mins of error history */
1977c6fd2807SJeff Garzik 	memset(&arg, 0, sizeof(arg));
19787d47e8d4STejun Heo 	arg.since = j64 - min(j64, j10mins);
19797d47e8d4STejun Heo 	ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
1980c6fd2807SJeff Garzik 
19813884f7b0STejun Heo 	if (arg.nr_errors[ATA_ECAT_TOUT_HSM] +
19823884f7b0STejun Heo 	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 3)
19837d47e8d4STejun Heo 		verdict |= ATA_EH_SPDN_NCQ_OFF;
19843884f7b0STejun Heo 
19853884f7b0STejun Heo 	if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
19863884f7b0STejun Heo 	    arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 ||
1987663f99b8STejun Heo 	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
19887d47e8d4STejun Heo 		verdict |= ATA_EH_SPDN_SPEED_DOWN;
1989c6fd2807SJeff Garzik 
19907d47e8d4STejun Heo 	return verdict;
1991c6fd2807SJeff Garzik }
1992c6fd2807SJeff Garzik 
1993c6fd2807SJeff Garzik /**
1994c6fd2807SJeff Garzik  *	ata_eh_speed_down - record error and speed down if necessary
1995c6fd2807SJeff Garzik  *	@dev: Failed device
19963884f7b0STejun Heo  *	@eflags: mask of ATA_EFLAG_* flags
1997c6fd2807SJeff Garzik  *	@err_mask: err_mask of the error
1998c6fd2807SJeff Garzik  *
1999c6fd2807SJeff Garzik  *	Record error and examine error history to determine whether
2000c6fd2807SJeff Garzik  *	adjusting transmission speed is necessary.  It also sets
2001c6fd2807SJeff Garzik  *	transmission limits appropriately if such adjustment is
2002c6fd2807SJeff Garzik  *	necessary.
2003c6fd2807SJeff Garzik  *
2004c6fd2807SJeff Garzik  *	LOCKING:
2005c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
2006c6fd2807SJeff Garzik  *
2007c6fd2807SJeff Garzik  *	RETURNS:
20087d47e8d4STejun Heo  *	Determined recovery action.
2009c6fd2807SJeff Garzik  */
20103884f7b0STejun Heo static unsigned int ata_eh_speed_down(struct ata_device *dev,
20113884f7b0STejun Heo 				unsigned int eflags, unsigned int err_mask)
2012c6fd2807SJeff Garzik {
2013b1c72916STejun Heo 	struct ata_link *link = ata_dev_phys_link(dev);
201476326ac1STejun Heo 	int xfer_ok = 0;
20157d47e8d4STejun Heo 	unsigned int verdict;
20167d47e8d4STejun Heo 	unsigned int action = 0;
20177d47e8d4STejun Heo 
20187d47e8d4STejun Heo 	/* don't bother if Cat-0 error */
201976326ac1STejun Heo 	if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0)
2020c6fd2807SJeff Garzik 		return 0;
2021c6fd2807SJeff Garzik 
2022c6fd2807SJeff Garzik 	/* record error and determine whether speed down is necessary */
20233884f7b0STejun Heo 	ata_ering_record(&dev->ering, eflags, err_mask);
20247d47e8d4STejun Heo 	verdict = ata_eh_speed_down_verdict(dev);
2025c6fd2807SJeff Garzik 
20267d47e8d4STejun Heo 	/* turn off NCQ? */
20277d47e8d4STejun Heo 	if ((verdict & ATA_EH_SPDN_NCQ_OFF) &&
20287d47e8d4STejun Heo 	    (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ |
20297d47e8d4STejun Heo 			   ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) {
20307d47e8d4STejun Heo 		dev->flags |= ATA_DFLAG_NCQ_OFF;
2031a9a79dfeSJoe Perches 		ata_dev_warn(dev, "NCQ disabled due to excessive errors\n");
20327d47e8d4STejun Heo 		goto done;
20337d47e8d4STejun Heo 	}
2034c6fd2807SJeff Garzik 
20357d47e8d4STejun Heo 	/* speed down? */
20367d47e8d4STejun Heo 	if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
2037c6fd2807SJeff Garzik 		/* speed down SATA link speed if possible */
2038a07d499bSTejun Heo 		if (sata_down_spd_limit(link, 0) == 0) {
2039cf480626STejun Heo 			action |= ATA_EH_RESET;
20407d47e8d4STejun Heo 			goto done;
20417d47e8d4STejun Heo 		}
2042c6fd2807SJeff Garzik 
2043c6fd2807SJeff Garzik 		/* lower transfer mode */
20447d47e8d4STejun Heo 		if (dev->spdn_cnt < 2) {
20457d47e8d4STejun Heo 			static const int dma_dnxfer_sel[] =
20467d47e8d4STejun Heo 				{ ATA_DNXFER_DMA, ATA_DNXFER_40C };
20477d47e8d4STejun Heo 			static const int pio_dnxfer_sel[] =
20487d47e8d4STejun Heo 				{ ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 };
20497d47e8d4STejun Heo 			int sel;
2050c6fd2807SJeff Garzik 
20517d47e8d4STejun Heo 			if (dev->xfer_shift != ATA_SHIFT_PIO)
20527d47e8d4STejun Heo 				sel = dma_dnxfer_sel[dev->spdn_cnt];
20537d47e8d4STejun Heo 			else
20547d47e8d4STejun Heo 				sel = pio_dnxfer_sel[dev->spdn_cnt];
20557d47e8d4STejun Heo 
20567d47e8d4STejun Heo 			dev->spdn_cnt++;
20577d47e8d4STejun Heo 
20587d47e8d4STejun Heo 			if (ata_down_xfermask_limit(dev, sel) == 0) {
2059cf480626STejun Heo 				action |= ATA_EH_RESET;
20607d47e8d4STejun Heo 				goto done;
20617d47e8d4STejun Heo 			}
20627d47e8d4STejun Heo 		}
20637d47e8d4STejun Heo 	}
20647d47e8d4STejun Heo 
20657d47e8d4STejun Heo 	/* Fall back to PIO?  Slowing down to PIO is meaningless for
2066663f99b8STejun Heo 	 * SATA ATA devices.  Consider it only for PATA and SATAPI.
20677d47e8d4STejun Heo 	 */
20687d47e8d4STejun Heo 	if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
2069663f99b8STejun Heo 	    (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) &&
20707d47e8d4STejun Heo 	    (dev->xfer_shift != ATA_SHIFT_PIO)) {
20717d47e8d4STejun Heo 		if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
20727d47e8d4STejun Heo 			dev->spdn_cnt = 0;
2073cf480626STejun Heo 			action |= ATA_EH_RESET;
20747d47e8d4STejun Heo 			goto done;
20757d47e8d4STejun Heo 		}
20767d47e8d4STejun Heo 	}
20777d47e8d4STejun Heo 
2078c6fd2807SJeff Garzik 	return 0;
20797d47e8d4STejun Heo  done:
20807d47e8d4STejun Heo 	/* device has been slowed down, blow error history */
208176326ac1STejun Heo 	if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS))
20827d47e8d4STejun Heo 		ata_ering_clear(&dev->ering);
20837d47e8d4STejun Heo 	return action;
2084c6fd2807SJeff Garzik }
2085c6fd2807SJeff Garzik 
2086c6fd2807SJeff Garzik /**
20878d899e70SMark Lord  *	ata_eh_worth_retry - analyze error and decide whether to retry
20888d899e70SMark Lord  *	@qc: qc to possibly retry
20898d899e70SMark Lord  *
20908d899e70SMark Lord  *	Look at the cause of the error and decide if a retry
20918d899e70SMark Lord  * 	might be useful or not.  We don't want to retry media errors
20928d899e70SMark Lord  *	because the drive itself has probably already taken 10-30 seconds
20938d899e70SMark Lord  *	doing its own internal retries before reporting the failure.
20948d899e70SMark Lord  */
20958d899e70SMark Lord static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc)
20968d899e70SMark Lord {
20971eaca39aSBian Yu 	if (qc->err_mask & AC_ERR_MEDIA)
20988d899e70SMark Lord 		return 0;	/* don't retry media errors */
20998d899e70SMark Lord 	if (qc->flags & ATA_QCFLAG_IO)
21008d899e70SMark Lord 		return 1;	/* otherwise retry anything from fs stack */
21018d899e70SMark Lord 	if (qc->err_mask & AC_ERR_INVALID)
21028d899e70SMark Lord 		return 0;	/* don't retry these */
21038d899e70SMark Lord 	return qc->err_mask != AC_ERR_DEV;  /* retry if not dev error */
21048d899e70SMark Lord }
21058d899e70SMark Lord 
21068d899e70SMark Lord /**
21079b1e2658STejun Heo  *	ata_eh_link_autopsy - analyze error and determine recovery action
21089b1e2658STejun Heo  *	@link: host link to perform autopsy on
2109c6fd2807SJeff Garzik  *
21100260731fSTejun Heo  *	Analyze why @link failed and determine which recovery actions
21110260731fSTejun Heo  *	are needed.  This function also sets more detailed AC_ERR_*
21120260731fSTejun Heo  *	values and fills sense data for ATAPI CHECK SENSE.
2113c6fd2807SJeff Garzik  *
2114c6fd2807SJeff Garzik  *	LOCKING:
2115c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
2116c6fd2807SJeff Garzik  */
21179b1e2658STejun Heo static void ata_eh_link_autopsy(struct ata_link *link)
2118c6fd2807SJeff Garzik {
21190260731fSTejun Heo 	struct ata_port *ap = link->ap;
2120936fd732STejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
2121dfcc173dSTejun Heo 	struct ata_device *dev;
21223884f7b0STejun Heo 	unsigned int all_err_mask = 0, eflags = 0;
21233884f7b0STejun Heo 	int tag;
2124c6fd2807SJeff Garzik 	u32 serror;
2125c6fd2807SJeff Garzik 	int rc;
2126c6fd2807SJeff Garzik 
2127c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
2128c6fd2807SJeff Garzik 
2129c6fd2807SJeff Garzik 	if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
2130c6fd2807SJeff Garzik 		return;
2131c6fd2807SJeff Garzik 
2132c6fd2807SJeff Garzik 	/* obtain and analyze SError */
2133936fd732STejun Heo 	rc = sata_scr_read(link, SCR_ERROR, &serror);
2134c6fd2807SJeff Garzik 	if (rc == 0) {
2135c6fd2807SJeff Garzik 		ehc->i.serror |= serror;
21360260731fSTejun Heo 		ata_eh_analyze_serror(link);
21374e57c517STejun Heo 	} else if (rc != -EOPNOTSUPP) {
2138cf480626STejun Heo 		/* SError read failed, force reset and probing */
2139b558edddSTejun Heo 		ehc->i.probe_mask |= ATA_ALL_DEVICES;
2140cf480626STejun Heo 		ehc->i.action |= ATA_EH_RESET;
21414e57c517STejun Heo 		ehc->i.err_mask |= AC_ERR_OTHER;
21424e57c517STejun Heo 	}
2143c6fd2807SJeff Garzik 
2144c6fd2807SJeff Garzik 	/* analyze NCQ failure */
21450260731fSTejun Heo 	ata_eh_analyze_ncq_error(link);
2146c6fd2807SJeff Garzik 
2147c6fd2807SJeff Garzik 	/* any real error trumps AC_ERR_OTHER */
2148c6fd2807SJeff Garzik 	if (ehc->i.err_mask & ~AC_ERR_OTHER)
2149c6fd2807SJeff Garzik 		ehc->i.err_mask &= ~AC_ERR_OTHER;
2150c6fd2807SJeff Garzik 
2151c6fd2807SJeff Garzik 	all_err_mask |= ehc->i.err_mask;
2152c6fd2807SJeff Garzik 
2153c6fd2807SJeff Garzik 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2154c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2155c6fd2807SJeff Garzik 
2156b1c72916STejun Heo 		if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2157b1c72916STejun Heo 		    ata_dev_phys_link(qc->dev) != link)
2158c6fd2807SJeff Garzik 			continue;
2159c6fd2807SJeff Garzik 
2160c6fd2807SJeff Garzik 		/* inherit upper level err_mask */
2161c6fd2807SJeff Garzik 		qc->err_mask |= ehc->i.err_mask;
2162c6fd2807SJeff Garzik 
2163c6fd2807SJeff Garzik 		/* analyze TF */
2164c6fd2807SJeff Garzik 		ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
2165c6fd2807SJeff Garzik 
2166c6fd2807SJeff Garzik 		/* DEV errors are probably spurious in case of ATA_BUS error */
2167c6fd2807SJeff Garzik 		if (qc->err_mask & AC_ERR_ATA_BUS)
2168c6fd2807SJeff Garzik 			qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
2169c6fd2807SJeff Garzik 					  AC_ERR_INVALID);
2170c6fd2807SJeff Garzik 
2171c6fd2807SJeff Garzik 		/* any real error trumps unknown error */
2172c6fd2807SJeff Garzik 		if (qc->err_mask & ~AC_ERR_OTHER)
2173c6fd2807SJeff Garzik 			qc->err_mask &= ~AC_ERR_OTHER;
2174c6fd2807SJeff Garzik 
2175c6fd2807SJeff Garzik 		/* SENSE_VALID trumps dev/unknown error and revalidation */
2176f90f0828STejun Heo 		if (qc->flags & ATA_QCFLAG_SENSE_VALID)
2177c6fd2807SJeff Garzik 			qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
2178c6fd2807SJeff Garzik 
217903faab78STejun Heo 		/* determine whether the command is worth retrying */
21808d899e70SMark Lord 		if (ata_eh_worth_retry(qc))
218103faab78STejun Heo 			qc->flags |= ATA_QCFLAG_RETRY;
218203faab78STejun Heo 
2183c6fd2807SJeff Garzik 		/* accumulate error info */
2184c6fd2807SJeff Garzik 		ehc->i.dev = qc->dev;
2185c6fd2807SJeff Garzik 		all_err_mask |= qc->err_mask;
2186c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_IO)
21873884f7b0STejun Heo 			eflags |= ATA_EFLAG_IS_IO;
2188c6fd2807SJeff Garzik 	}
2189c6fd2807SJeff Garzik 
2190c6fd2807SJeff Garzik 	/* enforce default EH actions */
2191c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_FROZEN ||
2192c6fd2807SJeff Garzik 	    all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
2193cf480626STejun Heo 		ehc->i.action |= ATA_EH_RESET;
21943884f7b0STejun Heo 	else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) ||
21953884f7b0STejun Heo 		 (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV)))
2196c6fd2807SJeff Garzik 		ehc->i.action |= ATA_EH_REVALIDATE;
2197c6fd2807SJeff Garzik 
2198dfcc173dSTejun Heo 	/* If we have offending qcs and the associated failed device,
2199dfcc173dSTejun Heo 	 * perform per-dev EH action only on the offending device.
2200dfcc173dSTejun Heo 	 */
2201c6fd2807SJeff Garzik 	if (ehc->i.dev) {
2202c6fd2807SJeff Garzik 		ehc->i.dev_action[ehc->i.dev->devno] |=
2203c6fd2807SJeff Garzik 			ehc->i.action & ATA_EH_PERDEV_MASK;
2204c6fd2807SJeff Garzik 		ehc->i.action &= ~ATA_EH_PERDEV_MASK;
2205c6fd2807SJeff Garzik 	}
2206c6fd2807SJeff Garzik 
22072695e366STejun Heo 	/* propagate timeout to host link */
22082695e366STejun Heo 	if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link))
22092695e366STejun Heo 		ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT;
22102695e366STejun Heo 
22112695e366STejun Heo 	/* record error and consider speeding down */
2212dfcc173dSTejun Heo 	dev = ehc->i.dev;
22132695e366STejun Heo 	if (!dev && ((ata_link_max_devices(link) == 1 &&
22142695e366STejun Heo 		      ata_dev_enabled(link->device))))
2215dfcc173dSTejun Heo 	    dev = link->device;
2216dfcc173dSTejun Heo 
221776326ac1STejun Heo 	if (dev) {
221876326ac1STejun Heo 		if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
221976326ac1STejun Heo 			eflags |= ATA_EFLAG_DUBIOUS_XFER;
22203884f7b0STejun Heo 		ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
222176326ac1STejun Heo 	}
2222dfcc173dSTejun Heo 
2223c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
2224c6fd2807SJeff Garzik }
2225c6fd2807SJeff Garzik 
2226c6fd2807SJeff Garzik /**
22279b1e2658STejun Heo  *	ata_eh_autopsy - analyze error and determine recovery action
22289b1e2658STejun Heo  *	@ap: host port to perform autopsy on
22299b1e2658STejun Heo  *
22309b1e2658STejun Heo  *	Analyze all links of @ap and determine why they failed and
22319b1e2658STejun Heo  *	which recovery actions are needed.
22329b1e2658STejun Heo  *
22339b1e2658STejun Heo  *	LOCKING:
22349b1e2658STejun Heo  *	Kernel thread context (may sleep).
22359b1e2658STejun Heo  */
2236fb7fd614STejun Heo void ata_eh_autopsy(struct ata_port *ap)
22379b1e2658STejun Heo {
22389b1e2658STejun Heo 	struct ata_link *link;
22399b1e2658STejun Heo 
22401eca4365STejun Heo 	ata_for_each_link(link, ap, EDGE)
22419b1e2658STejun Heo 		ata_eh_link_autopsy(link);
22422695e366STejun Heo 
2243b1c72916STejun Heo 	/* Handle the frigging slave link.  Autopsy is done similarly
2244b1c72916STejun Heo 	 * but actions and flags are transferred over to the master
2245b1c72916STejun Heo 	 * link and handled from there.
2246b1c72916STejun Heo 	 */
2247b1c72916STejun Heo 	if (ap->slave_link) {
2248b1c72916STejun Heo 		struct ata_eh_context *mehc = &ap->link.eh_context;
2249b1c72916STejun Heo 		struct ata_eh_context *sehc = &ap->slave_link->eh_context;
2250b1c72916STejun Heo 
2251848e4c68STejun Heo 		/* transfer control flags from master to slave */
2252848e4c68STejun Heo 		sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK;
2253848e4c68STejun Heo 
2254848e4c68STejun Heo 		/* perform autopsy on the slave link */
2255b1c72916STejun Heo 		ata_eh_link_autopsy(ap->slave_link);
2256b1c72916STejun Heo 
2257848e4c68STejun Heo 		/* transfer actions from slave to master and clear slave */
2258b1c72916STejun Heo 		ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2259b1c72916STejun Heo 		mehc->i.action		|= sehc->i.action;
2260b1c72916STejun Heo 		mehc->i.dev_action[1]	|= sehc->i.dev_action[1];
2261b1c72916STejun Heo 		mehc->i.flags		|= sehc->i.flags;
2262b1c72916STejun Heo 		ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2263b1c72916STejun Heo 	}
2264b1c72916STejun Heo 
22652695e366STejun Heo 	/* Autopsy of fanout ports can affect host link autopsy.
22662695e366STejun Heo 	 * Perform host link autopsy last.
22672695e366STejun Heo 	 */
2268071f44b1STejun Heo 	if (sata_pmp_attached(ap))
22692695e366STejun Heo 		ata_eh_link_autopsy(&ap->link);
22709b1e2658STejun Heo }
22719b1e2658STejun Heo 
22729b1e2658STejun Heo /**
22736521148cSRobert Hancock  *	ata_get_cmd_descript - get description for ATA command
22746521148cSRobert Hancock  *	@command: ATA command code to get description for
22756521148cSRobert Hancock  *
22766521148cSRobert Hancock  *	Return a textual description of the given command, or NULL if the
22776521148cSRobert Hancock  *	command is not known.
22786521148cSRobert Hancock  *
22796521148cSRobert Hancock  *	LOCKING:
22806521148cSRobert Hancock  *	None
22816521148cSRobert Hancock  */
22826521148cSRobert Hancock const char *ata_get_cmd_descript(u8 command)
22836521148cSRobert Hancock {
22846521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR
22856521148cSRobert Hancock 	static const struct
22866521148cSRobert Hancock 	{
22876521148cSRobert Hancock 		u8 command;
22886521148cSRobert Hancock 		const char *text;
22896521148cSRobert Hancock 	} cmd_descr[] = {
22906521148cSRobert Hancock 		{ ATA_CMD_DEV_RESET,		"DEVICE RESET" },
22916521148cSRobert Hancock 		{ ATA_CMD_CHK_POWER, 		"CHECK POWER MODE" },
22926521148cSRobert Hancock 		{ ATA_CMD_STANDBY, 		"STANDBY" },
22936521148cSRobert Hancock 		{ ATA_CMD_IDLE, 		"IDLE" },
22946521148cSRobert Hancock 		{ ATA_CMD_EDD, 			"EXECUTE DEVICE DIAGNOSTIC" },
22956521148cSRobert Hancock 		{ ATA_CMD_DOWNLOAD_MICRO,   	"DOWNLOAD MICROCODE" },
22966521148cSRobert Hancock 		{ ATA_CMD_NOP,			"NOP" },
22976521148cSRobert Hancock 		{ ATA_CMD_FLUSH, 		"FLUSH CACHE" },
22986521148cSRobert Hancock 		{ ATA_CMD_FLUSH_EXT, 		"FLUSH CACHE EXT" },
22996521148cSRobert Hancock 		{ ATA_CMD_ID_ATA,  		"IDENTIFY DEVICE" },
23006521148cSRobert Hancock 		{ ATA_CMD_ID_ATAPI, 		"IDENTIFY PACKET DEVICE" },
23016521148cSRobert Hancock 		{ ATA_CMD_SERVICE, 		"SERVICE" },
23026521148cSRobert Hancock 		{ ATA_CMD_READ, 		"READ DMA" },
23036521148cSRobert Hancock 		{ ATA_CMD_READ_EXT, 		"READ DMA EXT" },
23046521148cSRobert Hancock 		{ ATA_CMD_READ_QUEUED, 		"READ DMA QUEUED" },
23056521148cSRobert Hancock 		{ ATA_CMD_READ_STREAM_EXT, 	"READ STREAM EXT" },
23066521148cSRobert Hancock 		{ ATA_CMD_READ_STREAM_DMA_EXT,  "READ STREAM DMA EXT" },
23076521148cSRobert Hancock 		{ ATA_CMD_WRITE, 		"WRITE DMA" },
23086521148cSRobert Hancock 		{ ATA_CMD_WRITE_EXT, 		"WRITE DMA EXT" },
23096521148cSRobert Hancock 		{ ATA_CMD_WRITE_QUEUED, 	"WRITE DMA QUEUED EXT" },
23106521148cSRobert Hancock 		{ ATA_CMD_WRITE_STREAM_EXT, 	"WRITE STREAM EXT" },
23116521148cSRobert Hancock 		{ ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" },
23126521148cSRobert Hancock 		{ ATA_CMD_WRITE_FUA_EXT,	"WRITE DMA FUA EXT" },
23136521148cSRobert Hancock 		{ ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
23146521148cSRobert Hancock 		{ ATA_CMD_FPDMA_READ,		"READ FPDMA QUEUED" },
23156521148cSRobert Hancock 		{ ATA_CMD_FPDMA_WRITE,		"WRITE FPDMA QUEUED" },
23166521148cSRobert Hancock 		{ ATA_CMD_PIO_READ,		"READ SECTOR(S)" },
23176521148cSRobert Hancock 		{ ATA_CMD_PIO_READ_EXT,		"READ SECTOR(S) EXT" },
23186521148cSRobert Hancock 		{ ATA_CMD_PIO_WRITE,		"WRITE SECTOR(S)" },
23196521148cSRobert Hancock 		{ ATA_CMD_PIO_WRITE_EXT,	"WRITE SECTOR(S) EXT" },
23206521148cSRobert Hancock 		{ ATA_CMD_READ_MULTI,		"READ MULTIPLE" },
23216521148cSRobert Hancock 		{ ATA_CMD_READ_MULTI_EXT,	"READ MULTIPLE EXT" },
23226521148cSRobert Hancock 		{ ATA_CMD_WRITE_MULTI,		"WRITE MULTIPLE" },
23236521148cSRobert Hancock 		{ ATA_CMD_WRITE_MULTI_EXT,	"WRITE MULTIPLE EXT" },
23246521148cSRobert Hancock 		{ ATA_CMD_WRITE_MULTI_FUA_EXT, 	"WRITE MULTIPLE FUA EXT" },
23256521148cSRobert Hancock 		{ ATA_CMD_SET_FEATURES,		"SET FEATURES" },
23266521148cSRobert Hancock 		{ ATA_CMD_SET_MULTI,		"SET MULTIPLE MODE" },
23276521148cSRobert Hancock 		{ ATA_CMD_VERIFY,		"READ VERIFY SECTOR(S)" },
23286521148cSRobert Hancock 		{ ATA_CMD_VERIFY_EXT,		"READ VERIFY SECTOR(S) EXT" },
23296521148cSRobert Hancock 		{ ATA_CMD_WRITE_UNCORR_EXT,	"WRITE UNCORRECTABLE EXT" },
23306521148cSRobert Hancock 		{ ATA_CMD_STANDBYNOW1,		"STANDBY IMMEDIATE" },
23316521148cSRobert Hancock 		{ ATA_CMD_IDLEIMMEDIATE,	"IDLE IMMEDIATE" },
23326521148cSRobert Hancock 		{ ATA_CMD_SLEEP,		"SLEEP" },
23336521148cSRobert Hancock 		{ ATA_CMD_INIT_DEV_PARAMS,	"INITIALIZE DEVICE PARAMETERS" },
23346521148cSRobert Hancock 		{ ATA_CMD_READ_NATIVE_MAX,	"READ NATIVE MAX ADDRESS" },
23356521148cSRobert Hancock 		{ ATA_CMD_READ_NATIVE_MAX_EXT,	"READ NATIVE MAX ADDRESS EXT" },
23366521148cSRobert Hancock 		{ ATA_CMD_SET_MAX,		"SET MAX ADDRESS" },
23376521148cSRobert Hancock 		{ ATA_CMD_SET_MAX_EXT,		"SET MAX ADDRESS EXT" },
23386521148cSRobert Hancock 		{ ATA_CMD_READ_LOG_EXT,		"READ LOG EXT" },
23396521148cSRobert Hancock 		{ ATA_CMD_WRITE_LOG_EXT,	"WRITE LOG EXT" },
23406521148cSRobert Hancock 		{ ATA_CMD_READ_LOG_DMA_EXT,	"READ LOG DMA EXT" },
23416521148cSRobert Hancock 		{ ATA_CMD_WRITE_LOG_DMA_EXT, 	"WRITE LOG DMA EXT" },
23426521148cSRobert Hancock 		{ ATA_CMD_TRUSTED_RCV,		"TRUSTED RECEIVE" },
23436521148cSRobert Hancock 		{ ATA_CMD_TRUSTED_RCV_DMA, 	"TRUSTED RECEIVE DMA" },
23446521148cSRobert Hancock 		{ ATA_CMD_TRUSTED_SND,		"TRUSTED SEND" },
23456521148cSRobert Hancock 		{ ATA_CMD_TRUSTED_SND_DMA, 	"TRUSTED SEND DMA" },
23466521148cSRobert Hancock 		{ ATA_CMD_PMP_READ,		"READ BUFFER" },
23476521148cSRobert Hancock 		{ ATA_CMD_PMP_WRITE,		"WRITE BUFFER" },
23486521148cSRobert Hancock 		{ ATA_CMD_CONF_OVERLAY,		"DEVICE CONFIGURATION OVERLAY" },
23496521148cSRobert Hancock 		{ ATA_CMD_SEC_SET_PASS,		"SECURITY SET PASSWORD" },
23506521148cSRobert Hancock 		{ ATA_CMD_SEC_UNLOCK,		"SECURITY UNLOCK" },
23516521148cSRobert Hancock 		{ ATA_CMD_SEC_ERASE_PREP,	"SECURITY ERASE PREPARE" },
23526521148cSRobert Hancock 		{ ATA_CMD_SEC_ERASE_UNIT,	"SECURITY ERASE UNIT" },
23536521148cSRobert Hancock 		{ ATA_CMD_SEC_FREEZE_LOCK,	"SECURITY FREEZE LOCK" },
23546521148cSRobert Hancock 		{ ATA_CMD_SEC_DISABLE_PASS,	"SECURITY DISABLE PASSWORD" },
23556521148cSRobert Hancock 		{ ATA_CMD_CONFIG_STREAM,	"CONFIGURE STREAM" },
23566521148cSRobert Hancock 		{ ATA_CMD_SMART,		"SMART" },
23576521148cSRobert Hancock 		{ ATA_CMD_MEDIA_LOCK,		"DOOR LOCK" },
23586521148cSRobert Hancock 		{ ATA_CMD_MEDIA_UNLOCK,		"DOOR UNLOCK" },
2359acad7627SFUJITA Tomonori 		{ ATA_CMD_DSM,			"DATA SET MANAGEMENT" },
23606521148cSRobert Hancock 		{ ATA_CMD_CHK_MED_CRD_TYP, 	"CHECK MEDIA CARD TYPE" },
23616521148cSRobert Hancock 		{ ATA_CMD_CFA_REQ_EXT_ERR, 	"CFA REQUEST EXTENDED ERROR" },
23626521148cSRobert Hancock 		{ ATA_CMD_CFA_WRITE_NE,		"CFA WRITE SECTORS WITHOUT ERASE" },
23636521148cSRobert Hancock 		{ ATA_CMD_CFA_TRANS_SECT,	"CFA TRANSLATE SECTOR" },
23646521148cSRobert Hancock 		{ ATA_CMD_CFA_ERASE,		"CFA ERASE SECTORS" },
23656521148cSRobert Hancock 		{ ATA_CMD_CFA_WRITE_MULT_NE, 	"CFA WRITE MULTIPLE WITHOUT ERASE" },
23666521148cSRobert Hancock 		{ ATA_CMD_READ_LONG,		"READ LONG (with retries)" },
23676521148cSRobert Hancock 		{ ATA_CMD_READ_LONG_ONCE,	"READ LONG (without retries)" },
23686521148cSRobert Hancock 		{ ATA_CMD_WRITE_LONG,		"WRITE LONG (with retries)" },
23696521148cSRobert Hancock 		{ ATA_CMD_WRITE_LONG_ONCE,	"WRITE LONG (without retries)" },
23706521148cSRobert Hancock 		{ ATA_CMD_RESTORE,		"RECALIBRATE" },
23716521148cSRobert Hancock 		{ 0,				NULL } /* terminate list */
23726521148cSRobert Hancock 	};
23736521148cSRobert Hancock 
23746521148cSRobert Hancock 	unsigned int i;
23756521148cSRobert Hancock 	for (i = 0; cmd_descr[i].text; i++)
23766521148cSRobert Hancock 		if (cmd_descr[i].command == command)
23776521148cSRobert Hancock 			return cmd_descr[i].text;
23786521148cSRobert Hancock #endif
23796521148cSRobert Hancock 
23806521148cSRobert Hancock 	return NULL;
23816521148cSRobert Hancock }
23826521148cSRobert Hancock 
23836521148cSRobert Hancock /**
23849b1e2658STejun Heo  *	ata_eh_link_report - report error handling to user
23850260731fSTejun Heo  *	@link: ATA link EH is going on
2386c6fd2807SJeff Garzik  *
2387c6fd2807SJeff Garzik  *	Report EH to user.
2388c6fd2807SJeff Garzik  *
2389c6fd2807SJeff Garzik  *	LOCKING:
2390c6fd2807SJeff Garzik  *	None.
2391c6fd2807SJeff Garzik  */
23929b1e2658STejun Heo static void ata_eh_link_report(struct ata_link *link)
2393c6fd2807SJeff Garzik {
23940260731fSTejun Heo 	struct ata_port *ap = link->ap;
23950260731fSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
2396c6fd2807SJeff Garzik 	const char *frozen, *desc;
2397a1e10f7eSTejun Heo 	char tries_buf[6];
2398c6fd2807SJeff Garzik 	int tag, nr_failed = 0;
2399c6fd2807SJeff Garzik 
240094ff3d54STejun Heo 	if (ehc->i.flags & ATA_EHI_QUIET)
240194ff3d54STejun Heo 		return;
240294ff3d54STejun Heo 
2403c6fd2807SJeff Garzik 	desc = NULL;
2404c6fd2807SJeff Garzik 	if (ehc->i.desc[0] != '\0')
2405c6fd2807SJeff Garzik 		desc = ehc->i.desc;
2406c6fd2807SJeff Garzik 
2407c6fd2807SJeff Garzik 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2408c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2409c6fd2807SJeff Garzik 
2410b1c72916STejun Heo 		if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2411b1c72916STejun Heo 		    ata_dev_phys_link(qc->dev) != link ||
2412e027bd36STejun Heo 		    ((qc->flags & ATA_QCFLAG_QUIET) &&
2413e027bd36STejun Heo 		     qc->err_mask == AC_ERR_DEV))
2414c6fd2807SJeff Garzik 			continue;
2415c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
2416c6fd2807SJeff Garzik 			continue;
2417c6fd2807SJeff Garzik 
2418c6fd2807SJeff Garzik 		nr_failed++;
2419c6fd2807SJeff Garzik 	}
2420c6fd2807SJeff Garzik 
2421c6fd2807SJeff Garzik 	if (!nr_failed && !ehc->i.err_mask)
2422c6fd2807SJeff Garzik 		return;
2423c6fd2807SJeff Garzik 
2424c6fd2807SJeff Garzik 	frozen = "";
2425c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_FROZEN)
2426c6fd2807SJeff Garzik 		frozen = " frozen";
2427c6fd2807SJeff Garzik 
2428a1e10f7eSTejun Heo 	memset(tries_buf, 0, sizeof(tries_buf));
2429a1e10f7eSTejun Heo 	if (ap->eh_tries < ATA_EH_MAX_TRIES)
2430a1e10f7eSTejun Heo 		snprintf(tries_buf, sizeof(tries_buf) - 1, " t%d",
2431a1e10f7eSTejun Heo 			 ap->eh_tries);
2432a1e10f7eSTejun Heo 
2433c6fd2807SJeff Garzik 	if (ehc->i.dev) {
2434a9a79dfeSJoe Perches 		ata_dev_err(ehc->i.dev, "exception Emask 0x%x "
2435a1e10f7eSTejun Heo 			    "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2436a1e10f7eSTejun Heo 			    ehc->i.err_mask, link->sactive, ehc->i.serror,
2437a1e10f7eSTejun Heo 			    ehc->i.action, frozen, tries_buf);
2438c6fd2807SJeff Garzik 		if (desc)
2439a9a79dfeSJoe Perches 			ata_dev_err(ehc->i.dev, "%s\n", desc);
2440c6fd2807SJeff Garzik 	} else {
2441a9a79dfeSJoe Perches 		ata_link_err(link, "exception Emask 0x%x "
2442a1e10f7eSTejun Heo 			     "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2443a1e10f7eSTejun Heo 			     ehc->i.err_mask, link->sactive, ehc->i.serror,
2444a1e10f7eSTejun Heo 			     ehc->i.action, frozen, tries_buf);
2445c6fd2807SJeff Garzik 		if (desc)
2446a9a79dfeSJoe Perches 			ata_link_err(link, "%s\n", desc);
2447c6fd2807SJeff Garzik 	}
2448c6fd2807SJeff Garzik 
24496521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR
24501333e194SRobert Hancock 	if (ehc->i.serror)
2451a9a79dfeSJoe Perches 		ata_link_err(link,
24521333e194SRobert Hancock 		  "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
24531333e194SRobert Hancock 		  ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "",
24541333e194SRobert Hancock 		  ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "",
24551333e194SRobert Hancock 		  ehc->i.serror & SERR_DATA ? "UnrecovData " : "",
24561333e194SRobert Hancock 		  ehc->i.serror & SERR_PERSISTENT ? "Persist " : "",
24571333e194SRobert Hancock 		  ehc->i.serror & SERR_PROTOCOL ? "Proto " : "",
24581333e194SRobert Hancock 		  ehc->i.serror & SERR_INTERNAL ? "HostInt " : "",
24591333e194SRobert Hancock 		  ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "",
24601333e194SRobert Hancock 		  ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "",
24611333e194SRobert Hancock 		  ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "",
24621333e194SRobert Hancock 		  ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "",
24631333e194SRobert Hancock 		  ehc->i.serror & SERR_DISPARITY ? "Dispar " : "",
24641333e194SRobert Hancock 		  ehc->i.serror & SERR_CRC ? "BadCRC " : "",
24651333e194SRobert Hancock 		  ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "",
24661333e194SRobert Hancock 		  ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "",
24671333e194SRobert Hancock 		  ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "",
24681333e194SRobert Hancock 		  ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "",
24691333e194SRobert Hancock 		  ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
24706521148cSRobert Hancock #endif
24711333e194SRobert Hancock 
2472c6fd2807SJeff Garzik 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2473c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
24748a937581STejun Heo 		struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
2475abb6a889STejun Heo 		const u8 *cdb = qc->cdb;
2476abb6a889STejun Heo 		char data_buf[20] = "";
2477abb6a889STejun Heo 		char cdb_buf[70] = "";
2478c6fd2807SJeff Garzik 
24790260731fSTejun Heo 		if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2480b1c72916STejun Heo 		    ata_dev_phys_link(qc->dev) != link || !qc->err_mask)
2481c6fd2807SJeff Garzik 			continue;
2482c6fd2807SJeff Garzik 
2483abb6a889STejun Heo 		if (qc->dma_dir != DMA_NONE) {
2484abb6a889STejun Heo 			static const char *dma_str[] = {
2485abb6a889STejun Heo 				[DMA_BIDIRECTIONAL]	= "bidi",
2486abb6a889STejun Heo 				[DMA_TO_DEVICE]		= "out",
2487abb6a889STejun Heo 				[DMA_FROM_DEVICE]	= "in",
2488abb6a889STejun Heo 			};
2489abb6a889STejun Heo 			static const char *prot_str[] = {
2490abb6a889STejun Heo 				[ATA_PROT_PIO]		= "pio",
2491abb6a889STejun Heo 				[ATA_PROT_DMA]		= "dma",
2492abb6a889STejun Heo 				[ATA_PROT_NCQ]		= "ncq",
24930dc36888STejun Heo 				[ATAPI_PROT_PIO]	= "pio",
24940dc36888STejun Heo 				[ATAPI_PROT_DMA]	= "dma",
2495abb6a889STejun Heo 			};
2496abb6a889STejun Heo 
2497abb6a889STejun Heo 			snprintf(data_buf, sizeof(data_buf), " %s %u %s",
2498abb6a889STejun Heo 				 prot_str[qc->tf.protocol], qc->nbytes,
2499abb6a889STejun Heo 				 dma_str[qc->dma_dir]);
2500abb6a889STejun Heo 		}
2501abb6a889STejun Heo 
25026521148cSRobert Hancock 		if (ata_is_atapi(qc->tf.protocol)) {
25036521148cSRobert Hancock 			if (qc->scsicmd)
25046521148cSRobert Hancock 				scsi_print_command(qc->scsicmd);
25056521148cSRobert Hancock 			else
2506abb6a889STejun Heo 				snprintf(cdb_buf, sizeof(cdb_buf),
2507abb6a889STejun Heo 				 "cdb %02x %02x %02x %02x %02x %02x %02x %02x  "
2508abb6a889STejun Heo 				 "%02x %02x %02x %02x %02x %02x %02x %02x\n         ",
2509abb6a889STejun Heo 				 cdb[0], cdb[1], cdb[2], cdb[3],
2510abb6a889STejun Heo 				 cdb[4], cdb[5], cdb[6], cdb[7],
2511abb6a889STejun Heo 				 cdb[8], cdb[9], cdb[10], cdb[11],
2512abb6a889STejun Heo 				 cdb[12], cdb[13], cdb[14], cdb[15]);
25136521148cSRobert Hancock 		} else {
25146521148cSRobert Hancock 			const char *descr = ata_get_cmd_descript(cmd->command);
25156521148cSRobert Hancock 			if (descr)
2516a9a79dfeSJoe Perches 				ata_dev_err(qc->dev, "failed command: %s\n",
2517a9a79dfeSJoe Perches 					    descr);
25186521148cSRobert Hancock 		}
2519abb6a889STejun Heo 
2520a9a79dfeSJoe Perches 		ata_dev_err(qc->dev,
25218a937581STejun Heo 			"cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2522abb6a889STejun Heo 			"tag %d%s\n         %s"
25238a937581STejun Heo 			"res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
25245335b729STejun Heo 			"Emask 0x%x (%s)%s\n",
25258a937581STejun Heo 			cmd->command, cmd->feature, cmd->nsect,
25268a937581STejun Heo 			cmd->lbal, cmd->lbam, cmd->lbah,
25278a937581STejun Heo 			cmd->hob_feature, cmd->hob_nsect,
25288a937581STejun Heo 			cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
2529abb6a889STejun Heo 			cmd->device, qc->tag, data_buf, cdb_buf,
25308a937581STejun Heo 			res->command, res->feature, res->nsect,
25318a937581STejun Heo 			res->lbal, res->lbam, res->lbah,
25328a937581STejun Heo 			res->hob_feature, res->hob_nsect,
25338a937581STejun Heo 			res->hob_lbal, res->hob_lbam, res->hob_lbah,
25345335b729STejun Heo 			res->device, qc->err_mask, ata_err_string(qc->err_mask),
25355335b729STejun Heo 			qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
25361333e194SRobert Hancock 
25376521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR
25381333e194SRobert Hancock 		if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
25391333e194SRobert Hancock 				    ATA_ERR)) {
25401333e194SRobert Hancock 			if (res->command & ATA_BUSY)
2541a9a79dfeSJoe Perches 				ata_dev_err(qc->dev, "status: { Busy }\n");
25421333e194SRobert Hancock 			else
2543a9a79dfeSJoe Perches 				ata_dev_err(qc->dev, "status: { %s%s%s%s}\n",
25441333e194SRobert Hancock 				  res->command & ATA_DRDY ? "DRDY " : "",
25451333e194SRobert Hancock 				  res->command & ATA_DF ? "DF " : "",
25461333e194SRobert Hancock 				  res->command & ATA_DRQ ? "DRQ " : "",
25471333e194SRobert Hancock 				  res->command & ATA_ERR ? "ERR " : "");
25481333e194SRobert Hancock 		}
25491333e194SRobert Hancock 
25501333e194SRobert Hancock 		if (cmd->command != ATA_CMD_PACKET &&
25511333e194SRobert Hancock 		    (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF |
25521333e194SRobert Hancock 				     ATA_ABORTED)))
2553a9a79dfeSJoe Perches 			ata_dev_err(qc->dev, "error: { %s%s%s%s}\n",
25541333e194SRobert Hancock 			  res->feature & ATA_ICRC ? "ICRC " : "",
25551333e194SRobert Hancock 			  res->feature & ATA_UNC ? "UNC " : "",
25561333e194SRobert Hancock 			  res->feature & ATA_IDNF ? "IDNF " : "",
25571333e194SRobert Hancock 			  res->feature & ATA_ABORTED ? "ABRT " : "");
25586521148cSRobert Hancock #endif
2559c6fd2807SJeff Garzik 	}
2560c6fd2807SJeff Garzik }
2561c6fd2807SJeff Garzik 
25629b1e2658STejun Heo /**
25639b1e2658STejun Heo  *	ata_eh_report - report error handling to user
25649b1e2658STejun Heo  *	@ap: ATA port to report EH about
25659b1e2658STejun Heo  *
25669b1e2658STejun Heo  *	Report EH to user.
25679b1e2658STejun Heo  *
25689b1e2658STejun Heo  *	LOCKING:
25699b1e2658STejun Heo  *	None.
25709b1e2658STejun Heo  */
2571fb7fd614STejun Heo void ata_eh_report(struct ata_port *ap)
25729b1e2658STejun Heo {
25739b1e2658STejun Heo 	struct ata_link *link;
25749b1e2658STejun Heo 
25751eca4365STejun Heo 	ata_for_each_link(link, ap, HOST_FIRST)
25769b1e2658STejun Heo 		ata_eh_link_report(link);
25779b1e2658STejun Heo }
25789b1e2658STejun Heo 
2579cc0680a5STejun Heo static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
2580b1c72916STejun Heo 			unsigned int *classes, unsigned long deadline,
2581b1c72916STejun Heo 			bool clear_classes)
2582c6fd2807SJeff Garzik {
2583f58229f8STejun Heo 	struct ata_device *dev;
2584c6fd2807SJeff Garzik 
2585b1c72916STejun Heo 	if (clear_classes)
25861eca4365STejun Heo 		ata_for_each_dev(dev, link, ALL)
2587f58229f8STejun Heo 			classes[dev->devno] = ATA_DEV_UNKNOWN;
2588c6fd2807SJeff Garzik 
2589f046519fSTejun Heo 	return reset(link, classes, deadline);
2590c6fd2807SJeff Garzik }
2591c6fd2807SJeff Garzik 
2592e8411fbaSSergei Shtylyov static int ata_eh_followup_srst_needed(struct ata_link *link, int rc)
2593c6fd2807SJeff Garzik {
259445db2f6cSTejun Heo 	if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
2595ae791c05STejun Heo 		return 0;
25965dbfc9cbSTejun Heo 	if (rc == -EAGAIN)
2597c6fd2807SJeff Garzik 		return 1;
2598071f44b1STejun Heo 	if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
25993495de73STejun Heo 		return 1;
2600c6fd2807SJeff Garzik 	return 0;
2601c6fd2807SJeff Garzik }
2602c6fd2807SJeff Garzik 
2603fb7fd614STejun Heo int ata_eh_reset(struct ata_link *link, int classify,
2604c6fd2807SJeff Garzik 		 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
2605c6fd2807SJeff Garzik 		 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
2606c6fd2807SJeff Garzik {
2607afaa5c37STejun Heo 	struct ata_port *ap = link->ap;
2608b1c72916STejun Heo 	struct ata_link *slave = ap->slave_link;
2609936fd732STejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
2610705d2014SBartlomiej Zolnierkiewicz 	struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL;
2611c6fd2807SJeff Garzik 	unsigned int *classes = ehc->classes;
2612416dc9edSTejun Heo 	unsigned int lflags = link->flags;
2613c6fd2807SJeff Garzik 	int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
2614d8af0eb6STejun Heo 	int max_tries = 0, try = 0;
2615b1c72916STejun Heo 	struct ata_link *failed_link;
2616f58229f8STejun Heo 	struct ata_device *dev;
2617416dc9edSTejun Heo 	unsigned long deadline, now;
2618c6fd2807SJeff Garzik 	ata_reset_fn_t reset;
2619afaa5c37STejun Heo 	unsigned long flags;
2620416dc9edSTejun Heo 	u32 sstatus;
2621b1c72916STejun Heo 	int nr_unknown, rc;
2622c6fd2807SJeff Garzik 
2623932648b0STejun Heo 	/*
2624932648b0STejun Heo 	 * Prepare to reset
2625932648b0STejun Heo 	 */
2626d8af0eb6STejun Heo 	while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX)
2627d8af0eb6STejun Heo 		max_tries++;
2628ca6d43b0SDan Williams 	if (link->flags & ATA_LFLAG_RST_ONCE)
2629ca6d43b0SDan Williams 		max_tries = 1;
263005944bdfSTejun Heo 	if (link->flags & ATA_LFLAG_NO_HRST)
263105944bdfSTejun Heo 		hardreset = NULL;
263205944bdfSTejun Heo 	if (link->flags & ATA_LFLAG_NO_SRST)
263305944bdfSTejun Heo 		softreset = NULL;
2634d8af0eb6STejun Heo 
263525985edcSLucas De Marchi 	/* make sure each reset attempt is at least COOL_DOWN apart */
263619b72321STejun Heo 	if (ehc->i.flags & ATA_EHI_DID_RESET) {
26370a2c0f56STejun Heo 		now = jiffies;
263819b72321STejun Heo 		WARN_ON(time_after(ehc->last_reset, now));
263919b72321STejun Heo 		deadline = ata_deadline(ehc->last_reset,
264019b72321STejun Heo 					ATA_EH_RESET_COOL_DOWN);
26410a2c0f56STejun Heo 		if (time_before(now, deadline))
26420a2c0f56STejun Heo 			schedule_timeout_uninterruptible(deadline - now);
264319b72321STejun Heo 	}
26440a2c0f56STejun Heo 
2645afaa5c37STejun Heo 	spin_lock_irqsave(ap->lock, flags);
2646afaa5c37STejun Heo 	ap->pflags |= ATA_PFLAG_RESETTING;
2647afaa5c37STejun Heo 	spin_unlock_irqrestore(ap->lock, flags);
2648afaa5c37STejun Heo 
2649cf480626STejun Heo 	ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2650c6fd2807SJeff Garzik 
26511eca4365STejun Heo 	ata_for_each_dev(dev, link, ALL) {
2652cdeab114STejun Heo 		/* If we issue an SRST then an ATA drive (not ATAPI)
2653cdeab114STejun Heo 		 * may change configuration and be in PIO0 timing. If
2654cdeab114STejun Heo 		 * we do a hard reset (or are coming from power on)
2655cdeab114STejun Heo 		 * this is true for ATA or ATAPI. Until we've set a
2656cdeab114STejun Heo 		 * suitable controller mode we should not touch the
2657cdeab114STejun Heo 		 * bus as we may be talking too fast.
2658cdeab114STejun Heo 		 */
2659cdeab114STejun Heo 		dev->pio_mode = XFER_PIO_0;
26605416912aSAaron Lu 		dev->dma_mode = 0xff;
2661cdeab114STejun Heo 
2662cdeab114STejun Heo 		/* If the controller has a pio mode setup function
2663cdeab114STejun Heo 		 * then use it to set the chipset to rights. Don't
2664cdeab114STejun Heo 		 * touch the DMA setup as that will be dealt with when
2665cdeab114STejun Heo 		 * configuring devices.
2666cdeab114STejun Heo 		 */
2667cdeab114STejun Heo 		if (ap->ops->set_piomode)
2668cdeab114STejun Heo 			ap->ops->set_piomode(ap, dev);
2669cdeab114STejun Heo 	}
2670cdeab114STejun Heo 
2671cf480626STejun Heo 	/* prefer hardreset */
2672932648b0STejun Heo 	reset = NULL;
2673cf480626STejun Heo 	ehc->i.action &= ~ATA_EH_RESET;
2674cf480626STejun Heo 	if (hardreset) {
2675cf480626STejun Heo 		reset = hardreset;
2676a674050eSTejun Heo 		ehc->i.action |= ATA_EH_HARDRESET;
26774f7faa3fSTejun Heo 	} else if (softreset) {
2678cf480626STejun Heo 		reset = softreset;
2679a674050eSTejun Heo 		ehc->i.action |= ATA_EH_SOFTRESET;
2680cf480626STejun Heo 	}
2681c6fd2807SJeff Garzik 
2682c6fd2807SJeff Garzik 	if (prereset) {
2683b1c72916STejun Heo 		unsigned long deadline = ata_deadline(jiffies,
2684b1c72916STejun Heo 						      ATA_EH_PRERESET_TIMEOUT);
2685b1c72916STejun Heo 
2686b1c72916STejun Heo 		if (slave) {
2687b1c72916STejun Heo 			sehc->i.action &= ~ATA_EH_RESET;
2688b1c72916STejun Heo 			sehc->i.action |= ehc->i.action;
2689b1c72916STejun Heo 		}
2690b1c72916STejun Heo 
2691b1c72916STejun Heo 		rc = prereset(link, deadline);
2692b1c72916STejun Heo 
2693b1c72916STejun Heo 		/* If present, do prereset on slave link too.  Reset
2694b1c72916STejun Heo 		 * is skipped iff both master and slave links report
2695b1c72916STejun Heo 		 * -ENOENT or clear ATA_EH_RESET.
2696b1c72916STejun Heo 		 */
2697b1c72916STejun Heo 		if (slave && (rc == 0 || rc == -ENOENT)) {
2698b1c72916STejun Heo 			int tmp;
2699b1c72916STejun Heo 
2700b1c72916STejun Heo 			tmp = prereset(slave, deadline);
2701b1c72916STejun Heo 			if (tmp != -ENOENT)
2702b1c72916STejun Heo 				rc = tmp;
2703b1c72916STejun Heo 
2704b1c72916STejun Heo 			ehc->i.action |= sehc->i.action;
2705b1c72916STejun Heo 		}
2706b1c72916STejun Heo 
2707c6fd2807SJeff Garzik 		if (rc) {
2708c961922bSAlan Cox 			if (rc == -ENOENT) {
2709a9a79dfeSJoe Perches 				ata_link_dbg(link, "port disabled--ignoring\n");
2710cf480626STejun Heo 				ehc->i.action &= ~ATA_EH_RESET;
27114aa9ab67STejun Heo 
27121eca4365STejun Heo 				ata_for_each_dev(dev, link, ALL)
2713f58229f8STejun Heo 					classes[dev->devno] = ATA_DEV_NONE;
27144aa9ab67STejun Heo 
27154aa9ab67STejun Heo 				rc = 0;
2716c961922bSAlan Cox 			} else
2717a9a79dfeSJoe Perches 				ata_link_err(link,
2718a9a79dfeSJoe Perches 					     "prereset failed (errno=%d)\n",
2719a9a79dfeSJoe Perches 					     rc);
2720fccb6ea5STejun Heo 			goto out;
2721c6fd2807SJeff Garzik 		}
2722c6fd2807SJeff Garzik 
2723932648b0STejun Heo 		/* prereset() might have cleared ATA_EH_RESET.  If so,
2724d6515e6fSTejun Heo 		 * bang classes, thaw and return.
2725932648b0STejun Heo 		 */
2726932648b0STejun Heo 		if (reset && !(ehc->i.action & ATA_EH_RESET)) {
27271eca4365STejun Heo 			ata_for_each_dev(dev, link, ALL)
2728f58229f8STejun Heo 				classes[dev->devno] = ATA_DEV_NONE;
2729d6515e6fSTejun Heo 			if ((ap->pflags & ATA_PFLAG_FROZEN) &&
2730d6515e6fSTejun Heo 			    ata_is_host_link(link))
2731d6515e6fSTejun Heo 				ata_eh_thaw_port(ap);
2732fccb6ea5STejun Heo 			rc = 0;
2733fccb6ea5STejun Heo 			goto out;
2734c6fd2807SJeff Garzik 		}
2735932648b0STejun Heo 	}
2736c6fd2807SJeff Garzik 
2737c6fd2807SJeff Garzik  retry:
2738932648b0STejun Heo 	/*
2739932648b0STejun Heo 	 * Perform reset
2740932648b0STejun Heo 	 */
2741dc98c32cSTejun Heo 	if (ata_is_host_link(link))
2742dc98c32cSTejun Heo 		ata_eh_freeze_port(ap);
2743dc98c32cSTejun Heo 
2744341c2c95STejun Heo 	deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]);
274531daabdaSTejun Heo 
2746932648b0STejun Heo 	if (reset) {
2747c6fd2807SJeff Garzik 		if (verbose)
2748a9a79dfeSJoe Perches 			ata_link_info(link, "%s resetting link\n",
2749c6fd2807SJeff Garzik 				      reset == softreset ? "soft" : "hard");
2750c6fd2807SJeff Garzik 
2751c6fd2807SJeff Garzik 		/* mark that this EH session started with reset */
275219b72321STejun Heo 		ehc->last_reset = jiffies;
27530d64a233STejun Heo 		if (reset == hardreset)
27540d64a233STejun Heo 			ehc->i.flags |= ATA_EHI_DID_HARDRESET;
27550d64a233STejun Heo 		else
27560d64a233STejun Heo 			ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
2757c6fd2807SJeff Garzik 
2758b1c72916STejun Heo 		rc = ata_do_reset(link, reset, classes, deadline, true);
2759b1c72916STejun Heo 		if (rc && rc != -EAGAIN) {
2760b1c72916STejun Heo 			failed_link = link;
27615dbfc9cbSTejun Heo 			goto fail;
2762b1c72916STejun Heo 		}
2763c6fd2807SJeff Garzik 
2764b1c72916STejun Heo 		/* hardreset slave link if existent */
2765b1c72916STejun Heo 		if (slave && reset == hardreset) {
2766b1c72916STejun Heo 			int tmp;
2767b1c72916STejun Heo 
2768b1c72916STejun Heo 			if (verbose)
2769a9a79dfeSJoe Perches 				ata_link_info(slave, "hard resetting link\n");
2770b1c72916STejun Heo 
2771b1c72916STejun Heo 			ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
2772b1c72916STejun Heo 			tmp = ata_do_reset(slave, reset, classes, deadline,
2773b1c72916STejun Heo 					   false);
2774b1c72916STejun Heo 			switch (tmp) {
2775b1c72916STejun Heo 			case -EAGAIN:
2776b1c72916STejun Heo 				rc = -EAGAIN;
2777b1c72916STejun Heo 			case 0:
2778b1c72916STejun Heo 				break;
2779b1c72916STejun Heo 			default:
2780b1c72916STejun Heo 				failed_link = slave;
2781b1c72916STejun Heo 				rc = tmp;
2782b1c72916STejun Heo 				goto fail;
2783b1c72916STejun Heo 			}
2784b1c72916STejun Heo 		}
2785b1c72916STejun Heo 
2786b1c72916STejun Heo 		/* perform follow-up SRST if necessary */
2787c6fd2807SJeff Garzik 		if (reset == hardreset &&
2788e8411fbaSSergei Shtylyov 		    ata_eh_followup_srst_needed(link, rc)) {
2789c6fd2807SJeff Garzik 			reset = softreset;
2790c6fd2807SJeff Garzik 
2791c6fd2807SJeff Garzik 			if (!reset) {
2792a9a79dfeSJoe Perches 				ata_link_err(link,
2793a9a79dfeSJoe Perches 	     "follow-up softreset required but no softreset available\n");
2794b1c72916STejun Heo 				failed_link = link;
2795fccb6ea5STejun Heo 				rc = -EINVAL;
279608cf69d0STejun Heo 				goto fail;
2797c6fd2807SJeff Garzik 			}
2798c6fd2807SJeff Garzik 
2799cf480626STejun Heo 			ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2800b1c72916STejun Heo 			rc = ata_do_reset(link, reset, classes, deadline, true);
2801fe2c4d01STejun Heo 			if (rc) {
2802fe2c4d01STejun Heo 				failed_link = link;
2803fe2c4d01STejun Heo 				goto fail;
2804fe2c4d01STejun Heo 			}
2805c6fd2807SJeff Garzik 		}
2806932648b0STejun Heo 	} else {
2807932648b0STejun Heo 		if (verbose)
2808a9a79dfeSJoe Perches 			ata_link_info(link,
2809a9a79dfeSJoe Perches 	"no reset method available, skipping reset\n");
2810932648b0STejun Heo 		if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
2811932648b0STejun Heo 			lflags |= ATA_LFLAG_ASSUME_ATA;
2812932648b0STejun Heo 	}
2813008a7896STejun Heo 
2814932648b0STejun Heo 	/*
2815932648b0STejun Heo 	 * Post-reset processing
2816932648b0STejun Heo 	 */
28171eca4365STejun Heo 	ata_for_each_dev(dev, link, ALL) {
2818416dc9edSTejun Heo 		/* After the reset, the device state is PIO 0 and the
2819416dc9edSTejun Heo 		 * controller state is undefined.  Reset also wakes up
2820416dc9edSTejun Heo 		 * drives from sleeping mode.
2821c6fd2807SJeff Garzik 		 */
2822f58229f8STejun Heo 		dev->pio_mode = XFER_PIO_0;
2823054a5fbaSTejun Heo 		dev->flags &= ~ATA_DFLAG_SLEEPING;
2824c6fd2807SJeff Garzik 
28253b761d3dSTejun Heo 		if (ata_phys_link_offline(ata_dev_phys_link(dev)))
28263b761d3dSTejun Heo 			continue;
28273b761d3dSTejun Heo 
28284ccd3329STejun Heo 		/* apply class override */
2829416dc9edSTejun Heo 		if (lflags & ATA_LFLAG_ASSUME_ATA)
2830ae791c05STejun Heo 			classes[dev->devno] = ATA_DEV_ATA;
2831416dc9edSTejun Heo 		else if (lflags & ATA_LFLAG_ASSUME_SEMB)
2832816ab897STejun Heo 			classes[dev->devno] = ATA_DEV_SEMB_UNSUP;
2833ae791c05STejun Heo 	}
2834ae791c05STejun Heo 
2835008a7896STejun Heo 	/* record current link speed */
2836936fd732STejun Heo 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
2837936fd732STejun Heo 		link->sata_spd = (sstatus >> 4) & 0xf;
2838b1c72916STejun Heo 	if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0)
2839b1c72916STejun Heo 		slave->sata_spd = (sstatus >> 4) & 0xf;
2840008a7896STejun Heo 
2841dc98c32cSTejun Heo 	/* thaw the port */
2842dc98c32cSTejun Heo 	if (ata_is_host_link(link))
2843dc98c32cSTejun Heo 		ata_eh_thaw_port(ap);
2844dc98c32cSTejun Heo 
2845f046519fSTejun Heo 	/* postreset() should clear hardware SError.  Although SError
2846f046519fSTejun Heo 	 * is cleared during link resume, clearing SError here is
2847f046519fSTejun Heo 	 * necessary as some PHYs raise hotplug events after SRST.
2848f046519fSTejun Heo 	 * This introduces race condition where hotplug occurs between
2849f046519fSTejun Heo 	 * reset and here.  This race is mediated by cross checking
2850f046519fSTejun Heo 	 * link onlineness and classification result later.
2851f046519fSTejun Heo 	 */
2852b1c72916STejun Heo 	if (postreset) {
2853cc0680a5STejun Heo 		postreset(link, classes);
2854b1c72916STejun Heo 		if (slave)
2855b1c72916STejun Heo 			postreset(slave, classes);
2856b1c72916STejun Heo 	}
2857c6fd2807SJeff Garzik 
28581e641060STejun Heo 	/*
28598c56caccSTejun Heo 	 * Some controllers can't be frozen very well and may set spurious
28608c56caccSTejun Heo 	 * error conditions during reset.  Clear accumulated error
28618c56caccSTejun Heo 	 * information and re-thaw the port if frozen.  As reset is the
28628c56caccSTejun Heo 	 * final recovery action and we cross check link onlineness against
28638c56caccSTejun Heo 	 * device classification later, no hotplug event is lost by this.
28641e641060STejun Heo 	 */
2865f046519fSTejun Heo 	spin_lock_irqsave(link->ap->lock, flags);
28661e641060STejun Heo 	memset(&link->eh_info, 0, sizeof(link->eh_info));
2867b1c72916STejun Heo 	if (slave)
28681e641060STejun Heo 		memset(&slave->eh_info, 0, sizeof(link->eh_info));
28691e641060STejun Heo 	ap->pflags &= ~ATA_PFLAG_EH_PENDING;
2870f046519fSTejun Heo 	spin_unlock_irqrestore(link->ap->lock, flags);
2871f046519fSTejun Heo 
28728c56caccSTejun Heo 	if (ap->pflags & ATA_PFLAG_FROZEN)
28738c56caccSTejun Heo 		ata_eh_thaw_port(ap);
28748c56caccSTejun Heo 
28753b761d3dSTejun Heo 	/*
28763b761d3dSTejun Heo 	 * Make sure onlineness and classification result correspond.
2877f046519fSTejun Heo 	 * Hotplug could have happened during reset and some
2878f046519fSTejun Heo 	 * controllers fail to wait while a drive is spinning up after
2879f046519fSTejun Heo 	 * being hotplugged causing misdetection.  By cross checking
28803b761d3dSTejun Heo 	 * link on/offlineness and classification result, those
28813b761d3dSTejun Heo 	 * conditions can be reliably detected and retried.
2882f046519fSTejun Heo 	 */
2883b1c72916STejun Heo 	nr_unknown = 0;
28841eca4365STejun Heo 	ata_for_each_dev(dev, link, ALL) {
28853b761d3dSTejun Heo 		if (ata_phys_link_online(ata_dev_phys_link(dev))) {
2886b1c72916STejun Heo 			if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2887a9a79dfeSJoe Perches 				ata_dev_dbg(dev, "link online but device misclassified\n");
2888f046519fSTejun Heo 				classes[dev->devno] = ATA_DEV_NONE;
2889b1c72916STejun Heo 				nr_unknown++;
2890b1c72916STejun Heo 			}
28913b761d3dSTejun Heo 		} else if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
28923b761d3dSTejun Heo 			if (ata_class_enabled(classes[dev->devno]))
2893a9a79dfeSJoe Perches 				ata_dev_dbg(dev,
2894a9a79dfeSJoe Perches 					    "link offline, clearing class %d to NONE\n",
28953b761d3dSTejun Heo 					    classes[dev->devno]);
28963b761d3dSTejun Heo 			classes[dev->devno] = ATA_DEV_NONE;
28973b761d3dSTejun Heo 		} else if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2898a9a79dfeSJoe Perches 			ata_dev_dbg(dev,
2899a9a79dfeSJoe Perches 				    "link status unknown, clearing UNKNOWN to NONE\n");
29003b761d3dSTejun Heo 			classes[dev->devno] = ATA_DEV_NONE;
29013b761d3dSTejun Heo 		}
2902f046519fSTejun Heo 	}
2903f046519fSTejun Heo 
2904b1c72916STejun Heo 	if (classify && nr_unknown) {
2905f046519fSTejun Heo 		if (try < max_tries) {
2906a9a79dfeSJoe Perches 			ata_link_warn(link,
2907a9a79dfeSJoe Perches 				      "link online but %d devices misclassified, retrying\n",
29083b761d3dSTejun Heo 				      nr_unknown);
2909b1c72916STejun Heo 			failed_link = link;
2910f046519fSTejun Heo 			rc = -EAGAIN;
2911f046519fSTejun Heo 			goto fail;
2912f046519fSTejun Heo 		}
2913a9a79dfeSJoe Perches 		ata_link_warn(link,
29143b761d3dSTejun Heo 			      "link online but %d devices misclassified, "
29153b761d3dSTejun Heo 			      "device detection might fail\n", nr_unknown);
2916f046519fSTejun Heo 	}
2917f046519fSTejun Heo 
2918c6fd2807SJeff Garzik 	/* reset successful, schedule revalidation */
2919cf480626STejun Heo 	ata_eh_done(link, NULL, ATA_EH_RESET);
2920b1c72916STejun Heo 	if (slave)
2921b1c72916STejun Heo 		ata_eh_done(slave, NULL, ATA_EH_RESET);
292219b72321STejun Heo 	ehc->last_reset = jiffies;		/* update to completion time */
2923c6fd2807SJeff Garzik 	ehc->i.action |= ATA_EH_REVALIDATE;
29246b7ae954STejun Heo 	link->lpm_policy = ATA_LPM_UNKNOWN;	/* reset LPM state */
2925416dc9edSTejun Heo 
2926416dc9edSTejun Heo 	rc = 0;
2927fccb6ea5STejun Heo  out:
2928fccb6ea5STejun Heo 	/* clear hotplug flag */
2929fccb6ea5STejun Heo 	ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2930b1c72916STejun Heo 	if (slave)
2931b1c72916STejun Heo 		sehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2932afaa5c37STejun Heo 
2933afaa5c37STejun Heo 	spin_lock_irqsave(ap->lock, flags);
2934afaa5c37STejun Heo 	ap->pflags &= ~ATA_PFLAG_RESETTING;
2935afaa5c37STejun Heo 	spin_unlock_irqrestore(ap->lock, flags);
2936afaa5c37STejun Heo 
2937c6fd2807SJeff Garzik 	return rc;
2938416dc9edSTejun Heo 
2939416dc9edSTejun Heo  fail:
29405958e302STejun Heo 	/* if SCR isn't accessible on a fan-out port, PMP needs to be reset */
29415958e302STejun Heo 	if (!ata_is_host_link(link) &&
29425958e302STejun Heo 	    sata_scr_read(link, SCR_STATUS, &sstatus))
29435958e302STejun Heo 		rc = -ERESTART;
29445958e302STejun Heo 
29457a46c078SGwendal Grignou 	if (try >= max_tries) {
29468ea7645cSTejun Heo 		/*
29478ea7645cSTejun Heo 		 * Thaw host port even if reset failed, so that the port
29488ea7645cSTejun Heo 		 * can be retried on the next phy event.  This risks
29498ea7645cSTejun Heo 		 * repeated EH runs but seems to be a better tradeoff than
29508ea7645cSTejun Heo 		 * shutting down a port after a botched hotplug attempt.
29518ea7645cSTejun Heo 		 */
29528ea7645cSTejun Heo 		if (ata_is_host_link(link))
29538ea7645cSTejun Heo 			ata_eh_thaw_port(ap);
2954416dc9edSTejun Heo 		goto out;
29558ea7645cSTejun Heo 	}
2956416dc9edSTejun Heo 
2957416dc9edSTejun Heo 	now = jiffies;
2958416dc9edSTejun Heo 	if (time_before(now, deadline)) {
2959416dc9edSTejun Heo 		unsigned long delta = deadline - now;
2960416dc9edSTejun Heo 
2961a9a79dfeSJoe Perches 		ata_link_warn(failed_link,
29620a2c0f56STejun Heo 			"reset failed (errno=%d), retrying in %u secs\n",
29630a2c0f56STejun Heo 			rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
2964416dc9edSTejun Heo 
2965c0c362b6STejun Heo 		ata_eh_release(ap);
2966416dc9edSTejun Heo 		while (delta)
2967416dc9edSTejun Heo 			delta = schedule_timeout_uninterruptible(delta);
2968c0c362b6STejun Heo 		ata_eh_acquire(ap);
2969416dc9edSTejun Heo 	}
2970416dc9edSTejun Heo 
29717a46c078SGwendal Grignou 	/*
29727a46c078SGwendal Grignou 	 * While disks spinup behind PMP, some controllers fail sending SRST.
29737a46c078SGwendal Grignou 	 * They need to be reset - as well as the PMP - before retrying.
29747a46c078SGwendal Grignou 	 */
29757a46c078SGwendal Grignou 	if (rc == -ERESTART) {
29767a46c078SGwendal Grignou 		if (ata_is_host_link(link))
29777a46c078SGwendal Grignou 			ata_eh_thaw_port(ap);
29787a46c078SGwendal Grignou 		goto out;
29797a46c078SGwendal Grignou 	}
29807a46c078SGwendal Grignou 
2981b1c72916STejun Heo 	if (try == max_tries - 1) {
2982a07d499bSTejun Heo 		sata_down_spd_limit(link, 0);
2983b1c72916STejun Heo 		if (slave)
2984a07d499bSTejun Heo 			sata_down_spd_limit(slave, 0);
2985b1c72916STejun Heo 	} else if (rc == -EPIPE)
2986a07d499bSTejun Heo 		sata_down_spd_limit(failed_link, 0);
2987b1c72916STejun Heo 
2988416dc9edSTejun Heo 	if (hardreset)
2989416dc9edSTejun Heo 		reset = hardreset;
2990416dc9edSTejun Heo 	goto retry;
2991c6fd2807SJeff Garzik }
2992c6fd2807SJeff Garzik 
299345fabbb7SElias Oltmanns static inline void ata_eh_pull_park_action(struct ata_port *ap)
299445fabbb7SElias Oltmanns {
299545fabbb7SElias Oltmanns 	struct ata_link *link;
299645fabbb7SElias Oltmanns 	struct ata_device *dev;
299745fabbb7SElias Oltmanns 	unsigned long flags;
299845fabbb7SElias Oltmanns 
299945fabbb7SElias Oltmanns 	/*
300045fabbb7SElias Oltmanns 	 * This function can be thought of as an extended version of
300145fabbb7SElias Oltmanns 	 * ata_eh_about_to_do() specially crafted to accommodate the
300245fabbb7SElias Oltmanns 	 * requirements of ATA_EH_PARK handling. Since the EH thread
300345fabbb7SElias Oltmanns 	 * does not leave the do {} while () loop in ata_eh_recover as
300445fabbb7SElias Oltmanns 	 * long as the timeout for a park request to *one* device on
300545fabbb7SElias Oltmanns 	 * the port has not expired, and since we still want to pick
300645fabbb7SElias Oltmanns 	 * up park requests to other devices on the same port or
300745fabbb7SElias Oltmanns 	 * timeout updates for the same device, we have to pull
300845fabbb7SElias Oltmanns 	 * ATA_EH_PARK actions from eh_info into eh_context.i
300945fabbb7SElias Oltmanns 	 * ourselves at the beginning of each pass over the loop.
301045fabbb7SElias Oltmanns 	 *
301145fabbb7SElias Oltmanns 	 * Additionally, all write accesses to &ap->park_req_pending
301245fabbb7SElias Oltmanns 	 * through INIT_COMPLETION() (see below) or complete_all()
301345fabbb7SElias Oltmanns 	 * (see ata_scsi_park_store()) are protected by the host lock.
301445fabbb7SElias Oltmanns 	 * As a result we have that park_req_pending.done is zero on
301545fabbb7SElias Oltmanns 	 * exit from this function, i.e. when ATA_EH_PARK actions for
301645fabbb7SElias Oltmanns 	 * *all* devices on port ap have been pulled into the
301745fabbb7SElias Oltmanns 	 * respective eh_context structs. If, and only if,
301845fabbb7SElias Oltmanns 	 * park_req_pending.done is non-zero by the time we reach
301945fabbb7SElias Oltmanns 	 * wait_for_completion_timeout(), another ATA_EH_PARK action
302045fabbb7SElias Oltmanns 	 * has been scheduled for at least one of the devices on port
302145fabbb7SElias Oltmanns 	 * ap and we have to cycle over the do {} while () loop in
302245fabbb7SElias Oltmanns 	 * ata_eh_recover() again.
302345fabbb7SElias Oltmanns 	 */
302445fabbb7SElias Oltmanns 
302545fabbb7SElias Oltmanns 	spin_lock_irqsave(ap->lock, flags);
302645fabbb7SElias Oltmanns 	INIT_COMPLETION(ap->park_req_pending);
30271eca4365STejun Heo 	ata_for_each_link(link, ap, EDGE) {
30281eca4365STejun Heo 		ata_for_each_dev(dev, link, ALL) {
302945fabbb7SElias Oltmanns 			struct ata_eh_info *ehi = &link->eh_info;
303045fabbb7SElias Oltmanns 
303145fabbb7SElias Oltmanns 			link->eh_context.i.dev_action[dev->devno] |=
303245fabbb7SElias Oltmanns 				ehi->dev_action[dev->devno] & ATA_EH_PARK;
303345fabbb7SElias Oltmanns 			ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
303445fabbb7SElias Oltmanns 		}
303545fabbb7SElias Oltmanns 	}
303645fabbb7SElias Oltmanns 	spin_unlock_irqrestore(ap->lock, flags);
303745fabbb7SElias Oltmanns }
303845fabbb7SElias Oltmanns 
303945fabbb7SElias Oltmanns static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
304045fabbb7SElias Oltmanns {
304145fabbb7SElias Oltmanns 	struct ata_eh_context *ehc = &dev->link->eh_context;
304245fabbb7SElias Oltmanns 	struct ata_taskfile tf;
304345fabbb7SElias Oltmanns 	unsigned int err_mask;
304445fabbb7SElias Oltmanns 
304545fabbb7SElias Oltmanns 	ata_tf_init(dev, &tf);
304645fabbb7SElias Oltmanns 	if (park) {
304745fabbb7SElias Oltmanns 		ehc->unloaded_mask |= 1 << dev->devno;
304845fabbb7SElias Oltmanns 		tf.command = ATA_CMD_IDLEIMMEDIATE;
304945fabbb7SElias Oltmanns 		tf.feature = 0x44;
305045fabbb7SElias Oltmanns 		tf.lbal = 0x4c;
305145fabbb7SElias Oltmanns 		tf.lbam = 0x4e;
305245fabbb7SElias Oltmanns 		tf.lbah = 0x55;
305345fabbb7SElias Oltmanns 	} else {
305445fabbb7SElias Oltmanns 		ehc->unloaded_mask &= ~(1 << dev->devno);
305545fabbb7SElias Oltmanns 		tf.command = ATA_CMD_CHK_POWER;
305645fabbb7SElias Oltmanns 	}
305745fabbb7SElias Oltmanns 
305845fabbb7SElias Oltmanns 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
305945fabbb7SElias Oltmanns 	tf.protocol |= ATA_PROT_NODATA;
306045fabbb7SElias Oltmanns 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
306145fabbb7SElias Oltmanns 	if (park && (err_mask || tf.lbal != 0xc4)) {
3062a9a79dfeSJoe Perches 		ata_dev_err(dev, "head unload failed!\n");
306345fabbb7SElias Oltmanns 		ehc->unloaded_mask &= ~(1 << dev->devno);
306445fabbb7SElias Oltmanns 	}
306545fabbb7SElias Oltmanns }
306645fabbb7SElias Oltmanns 
30670260731fSTejun Heo static int ata_eh_revalidate_and_attach(struct ata_link *link,
3068c6fd2807SJeff Garzik 					struct ata_device **r_failed_dev)
3069c6fd2807SJeff Garzik {
30700260731fSTejun Heo 	struct ata_port *ap = link->ap;
30710260731fSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
3072c6fd2807SJeff Garzik 	struct ata_device *dev;
30738c3c52a8STejun Heo 	unsigned int new_mask = 0;
3074c6fd2807SJeff Garzik 	unsigned long flags;
3075f58229f8STejun Heo 	int rc = 0;
3076c6fd2807SJeff Garzik 
3077c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
3078c6fd2807SJeff Garzik 
30798c3c52a8STejun Heo 	/* For PATA drive side cable detection to work, IDENTIFY must
30808c3c52a8STejun Heo 	 * be done backwards such that PDIAG- is released by the slave
30818c3c52a8STejun Heo 	 * device before the master device is identified.
30828c3c52a8STejun Heo 	 */
30831eca4365STejun Heo 	ata_for_each_dev(dev, link, ALL_REVERSE) {
3084f58229f8STejun Heo 		unsigned int action = ata_eh_dev_action(dev);
3085f58229f8STejun Heo 		unsigned int readid_flags = 0;
3086c6fd2807SJeff Garzik 
3087bff04647STejun Heo 		if (ehc->i.flags & ATA_EHI_DID_RESET)
3088bff04647STejun Heo 			readid_flags |= ATA_READID_POSTRESET;
3089bff04647STejun Heo 
30909666f400STejun Heo 		if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
3091633273a3STejun Heo 			WARN_ON(dev->class == ATA_DEV_PMP);
3092633273a3STejun Heo 
3093b1c72916STejun Heo 			if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
3094c6fd2807SJeff Garzik 				rc = -EIO;
30958c3c52a8STejun Heo 				goto err;
3096c6fd2807SJeff Garzik 			}
3097c6fd2807SJeff Garzik 
30980260731fSTejun Heo 			ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE);
3099422c9daaSTejun Heo 			rc = ata_dev_revalidate(dev, ehc->classes[dev->devno],
3100422c9daaSTejun Heo 						readid_flags);
3101c6fd2807SJeff Garzik 			if (rc)
31028c3c52a8STejun Heo 				goto err;
3103c6fd2807SJeff Garzik 
31040260731fSTejun Heo 			ata_eh_done(link, dev, ATA_EH_REVALIDATE);
3105c6fd2807SJeff Garzik 
3106baa1e78aSTejun Heo 			/* Configuration may have changed, reconfigure
3107baa1e78aSTejun Heo 			 * transfer mode.
3108baa1e78aSTejun Heo 			 */
3109baa1e78aSTejun Heo 			ehc->i.flags |= ATA_EHI_SETMODE;
3110baa1e78aSTejun Heo 
3111c6fd2807SJeff Garzik 			/* schedule the scsi_rescan_device() here */
3112ad72cf98STejun Heo 			schedule_work(&(ap->scsi_rescan_task));
3113c6fd2807SJeff Garzik 		} else if (dev->class == ATA_DEV_UNKNOWN &&
3114c6fd2807SJeff Garzik 			   ehc->tries[dev->devno] &&
3115c6fd2807SJeff Garzik 			   ata_class_enabled(ehc->classes[dev->devno])) {
3116842faa6cSTejun Heo 			/* Temporarily set dev->class, it will be
3117842faa6cSTejun Heo 			 * permanently set once all configurations are
3118842faa6cSTejun Heo 			 * complete.  This is necessary because new
3119842faa6cSTejun Heo 			 * device configuration is done in two
3120842faa6cSTejun Heo 			 * separate loops.
3121842faa6cSTejun Heo 			 */
3122c6fd2807SJeff Garzik 			dev->class = ehc->classes[dev->devno];
3123c6fd2807SJeff Garzik 
3124633273a3STejun Heo 			if (dev->class == ATA_DEV_PMP)
3125633273a3STejun Heo 				rc = sata_pmp_attach(dev);
3126633273a3STejun Heo 			else
3127633273a3STejun Heo 				rc = ata_dev_read_id(dev, &dev->class,
3128633273a3STejun Heo 						     readid_flags, dev->id);
3129842faa6cSTejun Heo 
3130842faa6cSTejun Heo 			/* read_id might have changed class, store and reset */
3131842faa6cSTejun Heo 			ehc->classes[dev->devno] = dev->class;
3132842faa6cSTejun Heo 			dev->class = ATA_DEV_UNKNOWN;
3133842faa6cSTejun Heo 
31348c3c52a8STejun Heo 			switch (rc) {
31358c3c52a8STejun Heo 			case 0:
313699cf610aSTejun Heo 				/* clear error info accumulated during probe */
313799cf610aSTejun Heo 				ata_ering_clear(&dev->ering);
3138f58229f8STejun Heo 				new_mask |= 1 << dev->devno;
31398c3c52a8STejun Heo 				break;
31408c3c52a8STejun Heo 			case -ENOENT:
314155a8e2c8STejun Heo 				/* IDENTIFY was issued to non-existent
314255a8e2c8STejun Heo 				 * device.  No need to reset.  Just
3143842faa6cSTejun Heo 				 * thaw and ignore the device.
314455a8e2c8STejun Heo 				 */
314555a8e2c8STejun Heo 				ata_eh_thaw_port(ap);
3146c6fd2807SJeff Garzik 				break;
31478c3c52a8STejun Heo 			default:
31488c3c52a8STejun Heo 				goto err;
31498c3c52a8STejun Heo 			}
31508c3c52a8STejun Heo 		}
3151c6fd2807SJeff Garzik 	}
3152c6fd2807SJeff Garzik 
3153c1c4e8d5STejun Heo 	/* PDIAG- should have been released, ask cable type if post-reset */
315433267325STejun Heo 	if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) {
315533267325STejun Heo 		if (ap->ops->cable_detect)
3156c1c4e8d5STejun Heo 			ap->cbl = ap->ops->cable_detect(ap);
315733267325STejun Heo 		ata_force_cbl(ap);
315833267325STejun Heo 	}
3159c1c4e8d5STejun Heo 
31608c3c52a8STejun Heo 	/* Configure new devices forward such that user doesn't see
31618c3c52a8STejun Heo 	 * device detection messages backwards.
31628c3c52a8STejun Heo 	 */
31631eca4365STejun Heo 	ata_for_each_dev(dev, link, ALL) {
31644f7c2874STejun Heo 		if (!(new_mask & (1 << dev->devno)))
31658c3c52a8STejun Heo 			continue;
31668c3c52a8STejun Heo 
3167842faa6cSTejun Heo 		dev->class = ehc->classes[dev->devno];
3168842faa6cSTejun Heo 
31694f7c2874STejun Heo 		if (dev->class == ATA_DEV_PMP)
31704f7c2874STejun Heo 			continue;
31714f7c2874STejun Heo 
31728c3c52a8STejun Heo 		ehc->i.flags |= ATA_EHI_PRINTINFO;
31738c3c52a8STejun Heo 		rc = ata_dev_configure(dev);
31748c3c52a8STejun Heo 		ehc->i.flags &= ~ATA_EHI_PRINTINFO;
3175842faa6cSTejun Heo 		if (rc) {
3176842faa6cSTejun Heo 			dev->class = ATA_DEV_UNKNOWN;
31778c3c52a8STejun Heo 			goto err;
3178842faa6cSTejun Heo 		}
31798c3c52a8STejun Heo 
3180c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
3181c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
3182c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
3183baa1e78aSTejun Heo 
318455a8e2c8STejun Heo 		/* new device discovered, configure xfermode */
3185baa1e78aSTejun Heo 		ehc->i.flags |= ATA_EHI_SETMODE;
3186c6fd2807SJeff Garzik 	}
3187c6fd2807SJeff Garzik 
31888c3c52a8STejun Heo 	return 0;
31898c3c52a8STejun Heo 
31908c3c52a8STejun Heo  err:
3191c6fd2807SJeff Garzik 	*r_failed_dev = dev;
31928c3c52a8STejun Heo 	DPRINTK("EXIT rc=%d\n", rc);
3193c6fd2807SJeff Garzik 	return rc;
3194c6fd2807SJeff Garzik }
3195c6fd2807SJeff Garzik 
31966f1d1e3aSTejun Heo /**
31976f1d1e3aSTejun Heo  *	ata_set_mode - Program timings and issue SET FEATURES - XFER
31986f1d1e3aSTejun Heo  *	@link: link on which timings will be programmed
319998a1708dSMartin Olsson  *	@r_failed_dev: out parameter for failed device
32006f1d1e3aSTejun Heo  *
32016f1d1e3aSTejun Heo  *	Set ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
32026f1d1e3aSTejun Heo  *	ata_set_mode() fails, pointer to the failing device is
32036f1d1e3aSTejun Heo  *	returned in @r_failed_dev.
32046f1d1e3aSTejun Heo  *
32056f1d1e3aSTejun Heo  *	LOCKING:
32066f1d1e3aSTejun Heo  *	PCI/etc. bus probe sem.
32076f1d1e3aSTejun Heo  *
32086f1d1e3aSTejun Heo  *	RETURNS:
32096f1d1e3aSTejun Heo  *	0 on success, negative errno otherwise
32106f1d1e3aSTejun Heo  */
32116f1d1e3aSTejun Heo int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
32126f1d1e3aSTejun Heo {
32136f1d1e3aSTejun Heo 	struct ata_port *ap = link->ap;
321400115e0fSTejun Heo 	struct ata_device *dev;
321500115e0fSTejun Heo 	int rc;
32166f1d1e3aSTejun Heo 
321776326ac1STejun Heo 	/* if data transfer is verified, clear DUBIOUS_XFER on ering top */
32181eca4365STejun Heo 	ata_for_each_dev(dev, link, ENABLED) {
321976326ac1STejun Heo 		if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) {
322076326ac1STejun Heo 			struct ata_ering_entry *ent;
322176326ac1STejun Heo 
322276326ac1STejun Heo 			ent = ata_ering_top(&dev->ering);
322376326ac1STejun Heo 			if (ent)
322476326ac1STejun Heo 				ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER;
322576326ac1STejun Heo 		}
322676326ac1STejun Heo 	}
322776326ac1STejun Heo 
32286f1d1e3aSTejun Heo 	/* has private set_mode? */
32296f1d1e3aSTejun Heo 	if (ap->ops->set_mode)
323000115e0fSTejun Heo 		rc = ap->ops->set_mode(link, r_failed_dev);
323100115e0fSTejun Heo 	else
323200115e0fSTejun Heo 		rc = ata_do_set_mode(link, r_failed_dev);
323300115e0fSTejun Heo 
323400115e0fSTejun Heo 	/* if transfer mode has changed, set DUBIOUS_XFER on device */
32351eca4365STejun Heo 	ata_for_each_dev(dev, link, ENABLED) {
323600115e0fSTejun Heo 		struct ata_eh_context *ehc = &link->eh_context;
323700115e0fSTejun Heo 		u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
323800115e0fSTejun Heo 		u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
323900115e0fSTejun Heo 
324000115e0fSTejun Heo 		if (dev->xfer_mode != saved_xfer_mode ||
324100115e0fSTejun Heo 		    ata_ncq_enabled(dev) != saved_ncq)
324200115e0fSTejun Heo 			dev->flags |= ATA_DFLAG_DUBIOUS_XFER;
324300115e0fSTejun Heo 	}
324400115e0fSTejun Heo 
324500115e0fSTejun Heo 	return rc;
32466f1d1e3aSTejun Heo }
32476f1d1e3aSTejun Heo 
324811fc33daSTejun Heo /**
324911fc33daSTejun Heo  *	atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
325011fc33daSTejun Heo  *	@dev: ATAPI device to clear UA for
325111fc33daSTejun Heo  *
325211fc33daSTejun Heo  *	Resets and other operations can make an ATAPI device raise
325311fc33daSTejun Heo  *	UNIT ATTENTION which causes the next operation to fail.  This
325411fc33daSTejun Heo  *	function clears UA.
325511fc33daSTejun Heo  *
325611fc33daSTejun Heo  *	LOCKING:
325711fc33daSTejun Heo  *	EH context (may sleep).
325811fc33daSTejun Heo  *
325911fc33daSTejun Heo  *	RETURNS:
326011fc33daSTejun Heo  *	0 on success, -errno on failure.
326111fc33daSTejun Heo  */
326211fc33daSTejun Heo static int atapi_eh_clear_ua(struct ata_device *dev)
326311fc33daSTejun Heo {
326411fc33daSTejun Heo 	int i;
326511fc33daSTejun Heo 
326611fc33daSTejun Heo 	for (i = 0; i < ATA_EH_UA_TRIES; i++) {
3267b5357081STejun Heo 		u8 *sense_buffer = dev->link->ap->sector_buf;
326811fc33daSTejun Heo 		u8 sense_key = 0;
326911fc33daSTejun Heo 		unsigned int err_mask;
327011fc33daSTejun Heo 
327111fc33daSTejun Heo 		err_mask = atapi_eh_tur(dev, &sense_key);
327211fc33daSTejun Heo 		if (err_mask != 0 && err_mask != AC_ERR_DEV) {
3273a9a79dfeSJoe Perches 			ata_dev_warn(dev,
3274a9a79dfeSJoe Perches 				     "TEST_UNIT_READY failed (err_mask=0x%x)\n",
3275a9a79dfeSJoe Perches 				     err_mask);
327611fc33daSTejun Heo 			return -EIO;
327711fc33daSTejun Heo 		}
327811fc33daSTejun Heo 
327911fc33daSTejun Heo 		if (!err_mask || sense_key != UNIT_ATTENTION)
328011fc33daSTejun Heo 			return 0;
328111fc33daSTejun Heo 
328211fc33daSTejun Heo 		err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key);
328311fc33daSTejun Heo 		if (err_mask) {
3284a9a79dfeSJoe Perches 			ata_dev_warn(dev, "failed to clear "
328511fc33daSTejun Heo 				"UNIT ATTENTION (err_mask=0x%x)\n", err_mask);
328611fc33daSTejun Heo 			return -EIO;
328711fc33daSTejun Heo 		}
328811fc33daSTejun Heo 	}
328911fc33daSTejun Heo 
3290a9a79dfeSJoe Perches 	ata_dev_warn(dev, "UNIT ATTENTION persists after %d tries\n",
3291a9a79dfeSJoe Perches 		     ATA_EH_UA_TRIES);
329211fc33daSTejun Heo 
329311fc33daSTejun Heo 	return 0;
329411fc33daSTejun Heo }
329511fc33daSTejun Heo 
32966013efd8STejun Heo /**
32976013efd8STejun Heo  *	ata_eh_maybe_retry_flush - Retry FLUSH if necessary
32986013efd8STejun Heo  *	@dev: ATA device which may need FLUSH retry
32996013efd8STejun Heo  *
33006013efd8STejun Heo  *	If @dev failed FLUSH, it needs to be reported upper layer
33016013efd8STejun Heo  *	immediately as it means that @dev failed to remap and already
33026013efd8STejun Heo  *	lost at least a sector and further FLUSH retrials won't make
33036013efd8STejun Heo  *	any difference to the lost sector.  However, if FLUSH failed
33046013efd8STejun Heo  *	for other reasons, for example transmission error, FLUSH needs
33056013efd8STejun Heo  *	to be retried.
33066013efd8STejun Heo  *
33076013efd8STejun Heo  *	This function determines whether FLUSH failure retry is
33086013efd8STejun Heo  *	necessary and performs it if so.
33096013efd8STejun Heo  *
33106013efd8STejun Heo  *	RETURNS:
33116013efd8STejun Heo  *	0 if EH can continue, -errno if EH needs to be repeated.
33126013efd8STejun Heo  */
33136013efd8STejun Heo static int ata_eh_maybe_retry_flush(struct ata_device *dev)
33146013efd8STejun Heo {
33156013efd8STejun Heo 	struct ata_link *link = dev->link;
33166013efd8STejun Heo 	struct ata_port *ap = link->ap;
33176013efd8STejun Heo 	struct ata_queued_cmd *qc;
33186013efd8STejun Heo 	struct ata_taskfile tf;
33196013efd8STejun Heo 	unsigned int err_mask;
33206013efd8STejun Heo 	int rc = 0;
33216013efd8STejun Heo 
33226013efd8STejun Heo 	/* did flush fail for this device? */
33236013efd8STejun Heo 	if (!ata_tag_valid(link->active_tag))
33246013efd8STejun Heo 		return 0;
33256013efd8STejun Heo 
33266013efd8STejun Heo 	qc = __ata_qc_from_tag(ap, link->active_tag);
33276013efd8STejun Heo 	if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT &&
33286013efd8STejun Heo 			       qc->tf.command != ATA_CMD_FLUSH))
33296013efd8STejun Heo 		return 0;
33306013efd8STejun Heo 
33316013efd8STejun Heo 	/* if the device failed it, it should be reported to upper layers */
33326013efd8STejun Heo 	if (qc->err_mask & AC_ERR_DEV)
33336013efd8STejun Heo 		return 0;
33346013efd8STejun Heo 
33356013efd8STejun Heo 	/* flush failed for some other reason, give it another shot */
33366013efd8STejun Heo 	ata_tf_init(dev, &tf);
33376013efd8STejun Heo 
33386013efd8STejun Heo 	tf.command = qc->tf.command;
33396013efd8STejun Heo 	tf.flags |= ATA_TFLAG_DEVICE;
33406013efd8STejun Heo 	tf.protocol = ATA_PROT_NODATA;
33416013efd8STejun Heo 
3342a9a79dfeSJoe Perches 	ata_dev_warn(dev, "retrying FLUSH 0x%x Emask 0x%x\n",
33436013efd8STejun Heo 		       tf.command, qc->err_mask);
33446013efd8STejun Heo 
33456013efd8STejun Heo 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
33466013efd8STejun Heo 	if (!err_mask) {
33476013efd8STejun Heo 		/*
33486013efd8STejun Heo 		 * FLUSH is complete but there's no way to
33496013efd8STejun Heo 		 * successfully complete a failed command from EH.
33506013efd8STejun Heo 		 * Making sure retry is allowed at least once and
33516013efd8STejun Heo 		 * retrying it should do the trick - whatever was in
33526013efd8STejun Heo 		 * the cache is already on the platter and this won't
33536013efd8STejun Heo 		 * cause infinite loop.
33546013efd8STejun Heo 		 */
33556013efd8STejun Heo 		qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1);
33566013efd8STejun Heo 	} else {
3357a9a79dfeSJoe Perches 		ata_dev_warn(dev, "FLUSH failed Emask 0x%x\n",
33586013efd8STejun Heo 			       err_mask);
33596013efd8STejun Heo 		rc = -EIO;
33606013efd8STejun Heo 
33616013efd8STejun Heo 		/* if device failed it, report it to upper layers */
33626013efd8STejun Heo 		if (err_mask & AC_ERR_DEV) {
33636013efd8STejun Heo 			qc->err_mask |= AC_ERR_DEV;
33646013efd8STejun Heo 			qc->result_tf = tf;
33656013efd8STejun Heo 			if (!(ap->pflags & ATA_PFLAG_FROZEN))
33666013efd8STejun Heo 				rc = 0;
33676013efd8STejun Heo 		}
33686013efd8STejun Heo 	}
33696013efd8STejun Heo 	return rc;
33706013efd8STejun Heo }
33716013efd8STejun Heo 
33726b7ae954STejun Heo /**
33736b7ae954STejun Heo  *	ata_eh_set_lpm - configure SATA interface power management
33746b7ae954STejun Heo  *	@link: link to configure power management
33756b7ae954STejun Heo  *	@policy: the link power management policy
33766b7ae954STejun Heo  *	@r_failed_dev: out parameter for failed device
33776b7ae954STejun Heo  *
33786b7ae954STejun Heo  *	Enable SATA Interface power management.  This will enable
33796b7ae954STejun Heo  *	Device Interface Power Management (DIPM) for min_power
33806b7ae954STejun Heo  * 	policy, and then call driver specific callbacks for
33816b7ae954STejun Heo  *	enabling Host Initiated Power management.
33826b7ae954STejun Heo  *
33836b7ae954STejun Heo  *	LOCKING:
33846b7ae954STejun Heo  *	EH context.
33856b7ae954STejun Heo  *
33866b7ae954STejun Heo  *	RETURNS:
33876b7ae954STejun Heo  *	0 on success, -errno on failure.
33886b7ae954STejun Heo  */
33896b7ae954STejun Heo static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
33906b7ae954STejun Heo 			  struct ata_device **r_failed_dev)
33916b7ae954STejun Heo {
33926c8ea89cSTejun Heo 	struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL;
33936b7ae954STejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
33946b7ae954STejun Heo 	struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
3395e5005b15STejun Heo 	enum ata_lpm_policy old_policy = link->lpm_policy;
33965f6f12ccSTejun Heo 	bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM;
33976b7ae954STejun Heo 	unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
33986b7ae954STejun Heo 	unsigned int err_mask;
33996b7ae954STejun Heo 	int rc;
34006b7ae954STejun Heo 
34016b7ae954STejun Heo 	/* if the link or host doesn't do LPM, noop */
34026b7ae954STejun Heo 	if ((link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm))
34036b7ae954STejun Heo 		return 0;
34046b7ae954STejun Heo 
34056b7ae954STejun Heo 	/*
34066b7ae954STejun Heo 	 * DIPM is enabled only for MIN_POWER as some devices
34076b7ae954STejun Heo 	 * misbehave when the host NACKs transition to SLUMBER.  Order
34086b7ae954STejun Heo 	 * device and link configurations such that the host always
34096b7ae954STejun Heo 	 * allows DIPM requests.
34106b7ae954STejun Heo 	 */
34116b7ae954STejun Heo 	ata_for_each_dev(dev, link, ENABLED) {
34126b7ae954STejun Heo 		bool hipm = ata_id_has_hipm(dev->id);
3413ae01b249STejun Heo 		bool dipm = ata_id_has_dipm(dev->id) && !no_dipm;
34146b7ae954STejun Heo 
34156b7ae954STejun Heo 		/* find the first enabled and LPM enabled devices */
34166b7ae954STejun Heo 		if (!link_dev)
34176b7ae954STejun Heo 			link_dev = dev;
34186b7ae954STejun Heo 
34196b7ae954STejun Heo 		if (!lpm_dev && (hipm || dipm))
34206b7ae954STejun Heo 			lpm_dev = dev;
34216b7ae954STejun Heo 
34226b7ae954STejun Heo 		hints &= ~ATA_LPM_EMPTY;
34236b7ae954STejun Heo 		if (!hipm)
34246b7ae954STejun Heo 			hints &= ~ATA_LPM_HIPM;
34256b7ae954STejun Heo 
34266b7ae954STejun Heo 		/* disable DIPM before changing link config */
34276b7ae954STejun Heo 		if (policy != ATA_LPM_MIN_POWER && dipm) {
34286b7ae954STejun Heo 			err_mask = ata_dev_set_feature(dev,
34296b7ae954STejun Heo 					SETFEATURES_SATA_DISABLE, SATA_DIPM);
34306b7ae954STejun Heo 			if (err_mask && err_mask != AC_ERR_DEV) {
3431a9a79dfeSJoe Perches 				ata_dev_warn(dev,
34326b7ae954STejun Heo 					     "failed to disable DIPM, Emask 0x%x\n",
34336b7ae954STejun Heo 					     err_mask);
34346b7ae954STejun Heo 				rc = -EIO;
34356b7ae954STejun Heo 				goto fail;
34366b7ae954STejun Heo 			}
34376b7ae954STejun Heo 		}
34386b7ae954STejun Heo 	}
34396b7ae954STejun Heo 
34406c8ea89cSTejun Heo 	if (ap) {
34416b7ae954STejun Heo 		rc = ap->ops->set_lpm(link, policy, hints);
34426b7ae954STejun Heo 		if (!rc && ap->slave_link)
34436b7ae954STejun Heo 			rc = ap->ops->set_lpm(ap->slave_link, policy, hints);
34446c8ea89cSTejun Heo 	} else
34456c8ea89cSTejun Heo 		rc = sata_pmp_set_lpm(link, policy, hints);
34466b7ae954STejun Heo 
34476b7ae954STejun Heo 	/*
34486b7ae954STejun Heo 	 * Attribute link config failure to the first (LPM) enabled
34496b7ae954STejun Heo 	 * device on the link.
34506b7ae954STejun Heo 	 */
34516b7ae954STejun Heo 	if (rc) {
34526b7ae954STejun Heo 		if (rc == -EOPNOTSUPP) {
34536b7ae954STejun Heo 			link->flags |= ATA_LFLAG_NO_LPM;
34546b7ae954STejun Heo 			return 0;
34556b7ae954STejun Heo 		}
34566b7ae954STejun Heo 		dev = lpm_dev ? lpm_dev : link_dev;
34576b7ae954STejun Heo 		goto fail;
34586b7ae954STejun Heo 	}
34596b7ae954STejun Heo 
3460e5005b15STejun Heo 	/*
3461e5005b15STejun Heo 	 * Low level driver acked the transition.  Issue DIPM command
3462e5005b15STejun Heo 	 * with the new policy set.
3463e5005b15STejun Heo 	 */
3464e5005b15STejun Heo 	link->lpm_policy = policy;
3465e5005b15STejun Heo 	if (ap && ap->slave_link)
3466e5005b15STejun Heo 		ap->slave_link->lpm_policy = policy;
3467e5005b15STejun Heo 
34686b7ae954STejun Heo 	/* host config updated, enable DIPM if transitioning to MIN_POWER */
34696b7ae954STejun Heo 	ata_for_each_dev(dev, link, ENABLED) {
3470ae01b249STejun Heo 		if (policy == ATA_LPM_MIN_POWER && !no_dipm &&
3471ae01b249STejun Heo 		    ata_id_has_dipm(dev->id)) {
34726b7ae954STejun Heo 			err_mask = ata_dev_set_feature(dev,
34736b7ae954STejun Heo 					SETFEATURES_SATA_ENABLE, SATA_DIPM);
34746b7ae954STejun Heo 			if (err_mask && err_mask != AC_ERR_DEV) {
3475a9a79dfeSJoe Perches 				ata_dev_warn(dev,
34766b7ae954STejun Heo 					"failed to enable DIPM, Emask 0x%x\n",
34776b7ae954STejun Heo 					err_mask);
34786b7ae954STejun Heo 				rc = -EIO;
34796b7ae954STejun Heo 				goto fail;
34806b7ae954STejun Heo 			}
34816b7ae954STejun Heo 		}
34826b7ae954STejun Heo 	}
34836b7ae954STejun Heo 
34846b7ae954STejun Heo 	return 0;
34856b7ae954STejun Heo 
34866b7ae954STejun Heo fail:
3487e5005b15STejun Heo 	/* restore the old policy */
3488e5005b15STejun Heo 	link->lpm_policy = old_policy;
3489e5005b15STejun Heo 	if (ap && ap->slave_link)
3490e5005b15STejun Heo 		ap->slave_link->lpm_policy = old_policy;
3491e5005b15STejun Heo 
34926b7ae954STejun Heo 	/* if no device or only one more chance is left, disable LPM */
34936b7ae954STejun Heo 	if (!dev || ehc->tries[dev->devno] <= 2) {
3494a9a79dfeSJoe Perches 		ata_link_warn(link, "disabling LPM on the link\n");
34956b7ae954STejun Heo 		link->flags |= ATA_LFLAG_NO_LPM;
34966b7ae954STejun Heo 	}
34976b7ae954STejun Heo 	if (r_failed_dev)
34986b7ae954STejun Heo 		*r_failed_dev = dev;
34996b7ae954STejun Heo 	return rc;
35006b7ae954STejun Heo }
35016b7ae954STejun Heo 
35028a745f1fSKristen Carlson Accardi int ata_link_nr_enabled(struct ata_link *link)
3503c6fd2807SJeff Garzik {
3504f58229f8STejun Heo 	struct ata_device *dev;
3505f58229f8STejun Heo 	int cnt = 0;
3506c6fd2807SJeff Garzik 
35071eca4365STejun Heo 	ata_for_each_dev(dev, link, ENABLED)
3508c6fd2807SJeff Garzik 		cnt++;
3509c6fd2807SJeff Garzik 	return cnt;
3510c6fd2807SJeff Garzik }
3511c6fd2807SJeff Garzik 
35120260731fSTejun Heo static int ata_link_nr_vacant(struct ata_link *link)
3513c6fd2807SJeff Garzik {
3514f58229f8STejun Heo 	struct ata_device *dev;
3515f58229f8STejun Heo 	int cnt = 0;
3516c6fd2807SJeff Garzik 
35171eca4365STejun Heo 	ata_for_each_dev(dev, link, ALL)
3518f58229f8STejun Heo 		if (dev->class == ATA_DEV_UNKNOWN)
3519c6fd2807SJeff Garzik 			cnt++;
3520c6fd2807SJeff Garzik 	return cnt;
3521c6fd2807SJeff Garzik }
3522c6fd2807SJeff Garzik 
35230260731fSTejun Heo static int ata_eh_skip_recovery(struct ata_link *link)
3524c6fd2807SJeff Garzik {
3525672b2d65STejun Heo 	struct ata_port *ap = link->ap;
35260260731fSTejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
3527f58229f8STejun Heo 	struct ata_device *dev;
3528c6fd2807SJeff Garzik 
3529f9df58cbSTejun Heo 	/* skip disabled links */
3530f9df58cbSTejun Heo 	if (link->flags & ATA_LFLAG_DISABLED)
3531f9df58cbSTejun Heo 		return 1;
3532f9df58cbSTejun Heo 
3533e2f3d75fSTejun Heo 	/* skip if explicitly requested */
3534e2f3d75fSTejun Heo 	if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
3535e2f3d75fSTejun Heo 		return 1;
3536e2f3d75fSTejun Heo 
3537672b2d65STejun Heo 	/* thaw frozen port and recover failed devices */
3538672b2d65STejun Heo 	if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
3539672b2d65STejun Heo 		return 0;
3540672b2d65STejun Heo 
3541672b2d65STejun Heo 	/* reset at least once if reset is requested */
3542672b2d65STejun Heo 	if ((ehc->i.action & ATA_EH_RESET) &&
3543672b2d65STejun Heo 	    !(ehc->i.flags & ATA_EHI_DID_RESET))
3544c6fd2807SJeff Garzik 		return 0;
3545c6fd2807SJeff Garzik 
3546c6fd2807SJeff Garzik 	/* skip if class codes for all vacant slots are ATA_DEV_NONE */
35471eca4365STejun Heo 	ata_for_each_dev(dev, link, ALL) {
3548c6fd2807SJeff Garzik 		if (dev->class == ATA_DEV_UNKNOWN &&
3549c6fd2807SJeff Garzik 		    ehc->classes[dev->devno] != ATA_DEV_NONE)
3550c6fd2807SJeff Garzik 			return 0;
3551c6fd2807SJeff Garzik 	}
3552c6fd2807SJeff Garzik 
3553c6fd2807SJeff Garzik 	return 1;
3554c6fd2807SJeff Garzik }
3555c6fd2807SJeff Garzik 
3556c2c7a89cSTejun Heo static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg)
3557c2c7a89cSTejun Heo {
3558c2c7a89cSTejun Heo 	u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL);
3559c2c7a89cSTejun Heo 	u64 now = get_jiffies_64();
3560c2c7a89cSTejun Heo 	int *trials = void_arg;
3561c2c7a89cSTejun Heo 
35626868225eSLin Ming 	if ((ent->eflags & ATA_EFLAG_OLD_ER) ||
35636868225eSLin Ming 	    (ent->timestamp < now - min(now, interval)))
3564c2c7a89cSTejun Heo 		return -1;
3565c2c7a89cSTejun Heo 
3566c2c7a89cSTejun Heo 	(*trials)++;
3567c2c7a89cSTejun Heo 	return 0;
3568c2c7a89cSTejun Heo }
3569c2c7a89cSTejun Heo 
357002c05a27STejun Heo static int ata_eh_schedule_probe(struct ata_device *dev)
357102c05a27STejun Heo {
357202c05a27STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
3573c2c7a89cSTejun Heo 	struct ata_link *link = ata_dev_phys_link(dev);
3574c2c7a89cSTejun Heo 	int trials = 0;
357502c05a27STejun Heo 
357602c05a27STejun Heo 	if (!(ehc->i.probe_mask & (1 << dev->devno)) ||
357702c05a27STejun Heo 	    (ehc->did_probe_mask & (1 << dev->devno)))
357802c05a27STejun Heo 		return 0;
357902c05a27STejun Heo 
358002c05a27STejun Heo 	ata_eh_detach_dev(dev);
358102c05a27STejun Heo 	ata_dev_init(dev);
358202c05a27STejun Heo 	ehc->did_probe_mask |= (1 << dev->devno);
3583cf480626STejun Heo 	ehc->i.action |= ATA_EH_RESET;
358400115e0fSTejun Heo 	ehc->saved_xfer_mode[dev->devno] = 0;
358500115e0fSTejun Heo 	ehc->saved_ncq_enabled &= ~(1 << dev->devno);
358602c05a27STejun Heo 
35876b7ae954STejun Heo 	/* the link maybe in a deep sleep, wake it up */
35886c8ea89cSTejun Heo 	if (link->lpm_policy > ATA_LPM_MAX_POWER) {
35896c8ea89cSTejun Heo 		if (ata_is_host_link(link))
35906c8ea89cSTejun Heo 			link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER,
35916c8ea89cSTejun Heo 					       ATA_LPM_EMPTY);
35926c8ea89cSTejun Heo 		else
35936c8ea89cSTejun Heo 			sata_pmp_set_lpm(link, ATA_LPM_MAX_POWER,
35946c8ea89cSTejun Heo 					 ATA_LPM_EMPTY);
35956c8ea89cSTejun Heo 	}
35966b7ae954STejun Heo 
3597c2c7a89cSTejun Heo 	/* Record and count probe trials on the ering.  The specific
3598c2c7a89cSTejun Heo 	 * error mask used is irrelevant.  Because a successful device
3599c2c7a89cSTejun Heo 	 * detection clears the ering, this count accumulates only if
3600c2c7a89cSTejun Heo 	 * there are consecutive failed probes.
3601c2c7a89cSTejun Heo 	 *
3602c2c7a89cSTejun Heo 	 * If the count is equal to or higher than ATA_EH_PROBE_TRIALS
3603c2c7a89cSTejun Heo 	 * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is
3604c2c7a89cSTejun Heo 	 * forced to 1.5Gbps.
3605c2c7a89cSTejun Heo 	 *
3606c2c7a89cSTejun Heo 	 * This is to work around cases where failed link speed
3607c2c7a89cSTejun Heo 	 * negotiation results in device misdetection leading to
3608c2c7a89cSTejun Heo 	 * infinite DEVXCHG or PHRDY CHG events.
3609c2c7a89cSTejun Heo 	 */
3610c2c7a89cSTejun Heo 	ata_ering_record(&dev->ering, 0, AC_ERR_OTHER);
3611c2c7a89cSTejun Heo 	ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials);
3612c2c7a89cSTejun Heo 
3613c2c7a89cSTejun Heo 	if (trials > ATA_EH_PROBE_TRIALS)
3614c2c7a89cSTejun Heo 		sata_down_spd_limit(link, 1);
3615c2c7a89cSTejun Heo 
361602c05a27STejun Heo 	return 1;
361702c05a27STejun Heo }
361802c05a27STejun Heo 
36199b1e2658STejun Heo static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
3620fee7ca72STejun Heo {
36219af5c9c9STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
3622fee7ca72STejun Heo 
3623cf9a590aSTejun Heo 	/* -EAGAIN from EH routine indicates retry without prejudice.
3624cf9a590aSTejun Heo 	 * The requester is responsible for ensuring forward progress.
3625cf9a590aSTejun Heo 	 */
3626cf9a590aSTejun Heo 	if (err != -EAGAIN)
3627fee7ca72STejun Heo 		ehc->tries[dev->devno]--;
3628fee7ca72STejun Heo 
3629fee7ca72STejun Heo 	switch (err) {
3630fee7ca72STejun Heo 	case -ENODEV:
3631fee7ca72STejun Heo 		/* device missing or wrong IDENTIFY data, schedule probing */
3632fee7ca72STejun Heo 		ehc->i.probe_mask |= (1 << dev->devno);
3633fee7ca72STejun Heo 	case -EINVAL:
3634fee7ca72STejun Heo 		/* give it just one more chance */
3635fee7ca72STejun Heo 		ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
3636fee7ca72STejun Heo 	case -EIO:
3637d89293abSTejun Heo 		if (ehc->tries[dev->devno] == 1) {
3638fee7ca72STejun Heo 			/* This is the last chance, better to slow
3639fee7ca72STejun Heo 			 * down than lose it.
3640fee7ca72STejun Heo 			 */
3641a07d499bSTejun Heo 			sata_down_spd_limit(ata_dev_phys_link(dev), 0);
3642d89293abSTejun Heo 			if (dev->pio_mode > XFER_PIO_0)
3643fee7ca72STejun Heo 				ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
3644fee7ca72STejun Heo 		}
3645fee7ca72STejun Heo 	}
3646fee7ca72STejun Heo 
3647fee7ca72STejun Heo 	if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
3648fee7ca72STejun Heo 		/* disable device if it has used up all its chances */
3649fee7ca72STejun Heo 		ata_dev_disable(dev);
3650fee7ca72STejun Heo 
3651fee7ca72STejun Heo 		/* detach if offline */
3652b1c72916STejun Heo 		if (ata_phys_link_offline(ata_dev_phys_link(dev)))
3653fee7ca72STejun Heo 			ata_eh_detach_dev(dev);
3654fee7ca72STejun Heo 
365502c05a27STejun Heo 		/* schedule probe if necessary */
365687fbc5a0STejun Heo 		if (ata_eh_schedule_probe(dev)) {
3657fee7ca72STejun Heo 			ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
365887fbc5a0STejun Heo 			memset(ehc->cmd_timeout_idx[dev->devno], 0,
365987fbc5a0STejun Heo 			       sizeof(ehc->cmd_timeout_idx[dev->devno]));
366087fbc5a0STejun Heo 		}
36619b1e2658STejun Heo 
36629b1e2658STejun Heo 		return 1;
3663fee7ca72STejun Heo 	} else {
3664cf480626STejun Heo 		ehc->i.action |= ATA_EH_RESET;
36659b1e2658STejun Heo 		return 0;
3666fee7ca72STejun Heo 	}
3667fee7ca72STejun Heo }
3668fee7ca72STejun Heo 
3669c6fd2807SJeff Garzik /**
3670c6fd2807SJeff Garzik  *	ata_eh_recover - recover host port after error
3671c6fd2807SJeff Garzik  *	@ap: host port to recover
3672c6fd2807SJeff Garzik  *	@prereset: prereset method (can be NULL)
3673c6fd2807SJeff Garzik  *	@softreset: softreset method (can be NULL)
3674c6fd2807SJeff Garzik  *	@hardreset: hardreset method (can be NULL)
3675c6fd2807SJeff Garzik  *	@postreset: postreset method (can be NULL)
36769b1e2658STejun Heo  *	@r_failed_link: out parameter for failed link
3677c6fd2807SJeff Garzik  *
3678c6fd2807SJeff Garzik  *	This is the alpha and omega, eum and yang, heart and soul of
3679c6fd2807SJeff Garzik  *	libata exception handling.  On entry, actions required to
36809b1e2658STejun Heo  *	recover each link and hotplug requests are recorded in the
36819b1e2658STejun Heo  *	link's eh_context.  This function executes all the operations
36829b1e2658STejun Heo  *	with appropriate retrials and fallbacks to resurrect failed
3683c6fd2807SJeff Garzik  *	devices, detach goners and greet newcomers.
3684c6fd2807SJeff Garzik  *
3685c6fd2807SJeff Garzik  *	LOCKING:
3686c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
3687c6fd2807SJeff Garzik  *
3688c6fd2807SJeff Garzik  *	RETURNS:
3689c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
3690c6fd2807SJeff Garzik  */
3691fb7fd614STejun Heo int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3692c6fd2807SJeff Garzik 		   ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
36939b1e2658STejun Heo 		   ata_postreset_fn_t postreset,
36949b1e2658STejun Heo 		   struct ata_link **r_failed_link)
3695c6fd2807SJeff Garzik {
36969b1e2658STejun Heo 	struct ata_link *link;
3697c6fd2807SJeff Garzik 	struct ata_device *dev;
36986b7ae954STejun Heo 	int rc, nr_fails;
369945fabbb7SElias Oltmanns 	unsigned long flags, deadline;
3700c6fd2807SJeff Garzik 
3701c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
3702c6fd2807SJeff Garzik 
3703c6fd2807SJeff Garzik 	/* prep for recovery */
37041eca4365STejun Heo 	ata_for_each_link(link, ap, EDGE) {
37059b1e2658STejun Heo 		struct ata_eh_context *ehc = &link->eh_context;
37069b1e2658STejun Heo 
3707f9df58cbSTejun Heo 		/* re-enable link? */
3708f9df58cbSTejun Heo 		if (ehc->i.action & ATA_EH_ENABLE_LINK) {
3709f9df58cbSTejun Heo 			ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK);
3710f9df58cbSTejun Heo 			spin_lock_irqsave(ap->lock, flags);
3711f9df58cbSTejun Heo 			link->flags &= ~ATA_LFLAG_DISABLED;
3712f9df58cbSTejun Heo 			spin_unlock_irqrestore(ap->lock, flags);
3713f9df58cbSTejun Heo 			ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK);
3714f9df58cbSTejun Heo 		}
3715f9df58cbSTejun Heo 
37161eca4365STejun Heo 		ata_for_each_dev(dev, link, ALL) {
3717fd995f70STejun Heo 			if (link->flags & ATA_LFLAG_NO_RETRY)
3718fd995f70STejun Heo 				ehc->tries[dev->devno] = 1;
3719fd995f70STejun Heo 			else
3720c6fd2807SJeff Garzik 				ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3721c6fd2807SJeff Garzik 
372279a55b72STejun Heo 			/* collect port action mask recorded in dev actions */
37239b1e2658STejun Heo 			ehc->i.action |= ehc->i.dev_action[dev->devno] &
37249b1e2658STejun Heo 					 ~ATA_EH_PERDEV_MASK;
3725f58229f8STejun Heo 			ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK;
372679a55b72STejun Heo 
3727c6fd2807SJeff Garzik 			/* process hotplug request */
3728c6fd2807SJeff Garzik 			if (dev->flags & ATA_DFLAG_DETACH)
3729c6fd2807SJeff Garzik 				ata_eh_detach_dev(dev);
3730c6fd2807SJeff Garzik 
373102c05a27STejun Heo 			/* schedule probe if necessary */
373202c05a27STejun Heo 			if (!ata_dev_enabled(dev))
373302c05a27STejun Heo 				ata_eh_schedule_probe(dev);
3734c6fd2807SJeff Garzik 		}
37359b1e2658STejun Heo 	}
3736c6fd2807SJeff Garzik 
3737c6fd2807SJeff Garzik  retry:
3738c6fd2807SJeff Garzik 	rc = 0;
3739c6fd2807SJeff Garzik 
3740c6fd2807SJeff Garzik 	/* if UNLOADING, finish immediately */
3741c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_UNLOADING)
3742c6fd2807SJeff Garzik 		goto out;
3743c6fd2807SJeff Garzik 
37449b1e2658STejun Heo 	/* prep for EH */
37451eca4365STejun Heo 	ata_for_each_link(link, ap, EDGE) {
37469b1e2658STejun Heo 		struct ata_eh_context *ehc = &link->eh_context;
37479b1e2658STejun Heo 
3748c6fd2807SJeff Garzik 		/* skip EH if possible. */
37490260731fSTejun Heo 		if (ata_eh_skip_recovery(link))
3750c6fd2807SJeff Garzik 			ehc->i.action = 0;
3751c6fd2807SJeff Garzik 
37521eca4365STejun Heo 		ata_for_each_dev(dev, link, ALL)
3753f58229f8STejun Heo 			ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
37549b1e2658STejun Heo 	}
3755c6fd2807SJeff Garzik 
3756c6fd2807SJeff Garzik 	/* reset */
37571eca4365STejun Heo 	ata_for_each_link(link, ap, EDGE) {
37589b1e2658STejun Heo 		struct ata_eh_context *ehc = &link->eh_context;
37599b1e2658STejun Heo 
3760cf480626STejun Heo 		if (!(ehc->i.action & ATA_EH_RESET))
37619b1e2658STejun Heo 			continue;
37629b1e2658STejun Heo 
37639b1e2658STejun Heo 		rc = ata_eh_reset(link, ata_link_nr_vacant(link),
3764dc98c32cSTejun Heo 				  prereset, softreset, hardreset, postreset);
3765c6fd2807SJeff Garzik 		if (rc) {
3766a9a79dfeSJoe Perches 			ata_link_err(link, "reset failed, giving up\n");
3767c6fd2807SJeff Garzik 			goto out;
3768c6fd2807SJeff Garzik 		}
37699b1e2658STejun Heo 	}
3770c6fd2807SJeff Garzik 
377145fabbb7SElias Oltmanns 	do {
377245fabbb7SElias Oltmanns 		unsigned long now;
377345fabbb7SElias Oltmanns 
377445fabbb7SElias Oltmanns 		/*
377545fabbb7SElias Oltmanns 		 * clears ATA_EH_PARK in eh_info and resets
377645fabbb7SElias Oltmanns 		 * ap->park_req_pending
377745fabbb7SElias Oltmanns 		 */
377845fabbb7SElias Oltmanns 		ata_eh_pull_park_action(ap);
377945fabbb7SElias Oltmanns 
378045fabbb7SElias Oltmanns 		deadline = jiffies;
37811eca4365STejun Heo 		ata_for_each_link(link, ap, EDGE) {
37821eca4365STejun Heo 			ata_for_each_dev(dev, link, ALL) {
378345fabbb7SElias Oltmanns 				struct ata_eh_context *ehc = &link->eh_context;
378445fabbb7SElias Oltmanns 				unsigned long tmp;
378545fabbb7SElias Oltmanns 
378645fabbb7SElias Oltmanns 				if (dev->class != ATA_DEV_ATA)
378745fabbb7SElias Oltmanns 					continue;
378845fabbb7SElias Oltmanns 				if (!(ehc->i.dev_action[dev->devno] &
378945fabbb7SElias Oltmanns 				      ATA_EH_PARK))
379045fabbb7SElias Oltmanns 					continue;
379145fabbb7SElias Oltmanns 				tmp = dev->unpark_deadline;
379245fabbb7SElias Oltmanns 				if (time_before(deadline, tmp))
379345fabbb7SElias Oltmanns 					deadline = tmp;
379445fabbb7SElias Oltmanns 				else if (time_before_eq(tmp, jiffies))
379545fabbb7SElias Oltmanns 					continue;
379645fabbb7SElias Oltmanns 				if (ehc->unloaded_mask & (1 << dev->devno))
379745fabbb7SElias Oltmanns 					continue;
379845fabbb7SElias Oltmanns 
379945fabbb7SElias Oltmanns 				ata_eh_park_issue_cmd(dev, 1);
380045fabbb7SElias Oltmanns 			}
380145fabbb7SElias Oltmanns 		}
380245fabbb7SElias Oltmanns 
380345fabbb7SElias Oltmanns 		now = jiffies;
380445fabbb7SElias Oltmanns 		if (time_before_eq(deadline, now))
380545fabbb7SElias Oltmanns 			break;
380645fabbb7SElias Oltmanns 
3807c0c362b6STejun Heo 		ata_eh_release(ap);
380845fabbb7SElias Oltmanns 		deadline = wait_for_completion_timeout(&ap->park_req_pending,
380945fabbb7SElias Oltmanns 						       deadline - now);
3810c0c362b6STejun Heo 		ata_eh_acquire(ap);
381145fabbb7SElias Oltmanns 	} while (deadline);
38121eca4365STejun Heo 	ata_for_each_link(link, ap, EDGE) {
38131eca4365STejun Heo 		ata_for_each_dev(dev, link, ALL) {
381445fabbb7SElias Oltmanns 			if (!(link->eh_context.unloaded_mask &
381545fabbb7SElias Oltmanns 			      (1 << dev->devno)))
381645fabbb7SElias Oltmanns 				continue;
381745fabbb7SElias Oltmanns 
381845fabbb7SElias Oltmanns 			ata_eh_park_issue_cmd(dev, 0);
381945fabbb7SElias Oltmanns 			ata_eh_done(link, dev, ATA_EH_PARK);
382045fabbb7SElias Oltmanns 		}
382145fabbb7SElias Oltmanns 	}
382245fabbb7SElias Oltmanns 
38239b1e2658STejun Heo 	/* the rest */
38246b7ae954STejun Heo 	nr_fails = 0;
38256b7ae954STejun Heo 	ata_for_each_link(link, ap, PMP_FIRST) {
38269b1e2658STejun Heo 		struct ata_eh_context *ehc = &link->eh_context;
38279b1e2658STejun Heo 
38286b7ae954STejun Heo 		if (sata_pmp_attached(ap) && ata_is_host_link(link))
38296b7ae954STejun Heo 			goto config_lpm;
38306b7ae954STejun Heo 
3831c6fd2807SJeff Garzik 		/* revalidate existing devices and attach new ones */
38320260731fSTejun Heo 		rc = ata_eh_revalidate_and_attach(link, &dev);
3833c6fd2807SJeff Garzik 		if (rc)
38346b7ae954STejun Heo 			goto rest_fail;
3835c6fd2807SJeff Garzik 
3836633273a3STejun Heo 		/* if PMP got attached, return, pmp EH will take care of it */
3837633273a3STejun Heo 		if (link->device->class == ATA_DEV_PMP) {
3838633273a3STejun Heo 			ehc->i.action = 0;
3839633273a3STejun Heo 			return 0;
3840633273a3STejun Heo 		}
3841633273a3STejun Heo 
3842baa1e78aSTejun Heo 		/* configure transfer mode if necessary */
3843baa1e78aSTejun Heo 		if (ehc->i.flags & ATA_EHI_SETMODE) {
38440260731fSTejun Heo 			rc = ata_set_mode(link, &dev);
38454ae72a1eSTejun Heo 			if (rc)
38466b7ae954STejun Heo 				goto rest_fail;
3847baa1e78aSTejun Heo 			ehc->i.flags &= ~ATA_EHI_SETMODE;
3848c6fd2807SJeff Garzik 		}
3849c6fd2807SJeff Garzik 
385011fc33daSTejun Heo 		/* If reset has been issued, clear UA to avoid
385111fc33daSTejun Heo 		 * disrupting the current users of the device.
385211fc33daSTejun Heo 		 */
385311fc33daSTejun Heo 		if (ehc->i.flags & ATA_EHI_DID_RESET) {
38541eca4365STejun Heo 			ata_for_each_dev(dev, link, ALL) {
385511fc33daSTejun Heo 				if (dev->class != ATA_DEV_ATAPI)
385611fc33daSTejun Heo 					continue;
385711fc33daSTejun Heo 				rc = atapi_eh_clear_ua(dev);
385811fc33daSTejun Heo 				if (rc)
38596b7ae954STejun Heo 					goto rest_fail;
386021334205SAaron Lu 				if (zpodd_dev_enabled(dev))
386121334205SAaron Lu 					zpodd_post_poweron(dev);
386211fc33daSTejun Heo 			}
386311fc33daSTejun Heo 		}
386411fc33daSTejun Heo 
38656013efd8STejun Heo 		/* retry flush if necessary */
38666013efd8STejun Heo 		ata_for_each_dev(dev, link, ALL) {
38676013efd8STejun Heo 			if (dev->class != ATA_DEV_ATA)
38686013efd8STejun Heo 				continue;
38696013efd8STejun Heo 			rc = ata_eh_maybe_retry_flush(dev);
38706013efd8STejun Heo 			if (rc)
38716b7ae954STejun Heo 				goto rest_fail;
38726013efd8STejun Heo 		}
38736013efd8STejun Heo 
38746b7ae954STejun Heo 	config_lpm:
387511fc33daSTejun Heo 		/* configure link power saving */
38766b7ae954STejun Heo 		if (link->lpm_policy != ap->target_lpm_policy) {
38776b7ae954STejun Heo 			rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev);
38786b7ae954STejun Heo 			if (rc)
38796b7ae954STejun Heo 				goto rest_fail;
38806b7ae954STejun Heo 		}
3881ca77329fSKristen Carlson Accardi 
38829b1e2658STejun Heo 		/* this link is okay now */
38839b1e2658STejun Heo 		ehc->i.flags = 0;
38849b1e2658STejun Heo 		continue;
3885c6fd2807SJeff Garzik 
38866b7ae954STejun Heo 	rest_fail:
38876b7ae954STejun Heo 		nr_fails++;
38886b7ae954STejun Heo 		if (dev)
38890a2c0f56STejun Heo 			ata_eh_handle_dev_fail(dev, rc);
3890c6fd2807SJeff Garzik 
3891b06ce3e5STejun Heo 		if (ap->pflags & ATA_PFLAG_FROZEN) {
3892b06ce3e5STejun Heo 			/* PMP reset requires working host port.
3893b06ce3e5STejun Heo 			 * Can't retry if it's frozen.
3894b06ce3e5STejun Heo 			 */
3895071f44b1STejun Heo 			if (sata_pmp_attached(ap))
3896b06ce3e5STejun Heo 				goto out;
38979b1e2658STejun Heo 			break;
38989b1e2658STejun Heo 		}
3899b06ce3e5STejun Heo 	}
39009b1e2658STejun Heo 
39016b7ae954STejun Heo 	if (nr_fails)
3902c6fd2807SJeff Garzik 		goto retry;
3903c6fd2807SJeff Garzik 
3904c6fd2807SJeff Garzik  out:
39059b1e2658STejun Heo 	if (rc && r_failed_link)
39069b1e2658STejun Heo 		*r_failed_link = link;
3907c6fd2807SJeff Garzik 
3908c6fd2807SJeff Garzik 	DPRINTK("EXIT, rc=%d\n", rc);
3909c6fd2807SJeff Garzik 	return rc;
3910c6fd2807SJeff Garzik }
3911c6fd2807SJeff Garzik 
3912c6fd2807SJeff Garzik /**
3913c6fd2807SJeff Garzik  *	ata_eh_finish - finish up EH
3914c6fd2807SJeff Garzik  *	@ap: host port to finish EH for
3915c6fd2807SJeff Garzik  *
3916c6fd2807SJeff Garzik  *	Recovery is complete.  Clean up EH states and retry or finish
3917c6fd2807SJeff Garzik  *	failed qcs.
3918c6fd2807SJeff Garzik  *
3919c6fd2807SJeff Garzik  *	LOCKING:
3920c6fd2807SJeff Garzik  *	None.
3921c6fd2807SJeff Garzik  */
3922fb7fd614STejun Heo void ata_eh_finish(struct ata_port *ap)
3923c6fd2807SJeff Garzik {
3924c6fd2807SJeff Garzik 	int tag;
3925c6fd2807SJeff Garzik 
3926c6fd2807SJeff Garzik 	/* retry or finish qcs */
3927c6fd2807SJeff Garzik 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
3928c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
3929c6fd2807SJeff Garzik 
3930c6fd2807SJeff Garzik 		if (!(qc->flags & ATA_QCFLAG_FAILED))
3931c6fd2807SJeff Garzik 			continue;
3932c6fd2807SJeff Garzik 
3933c6fd2807SJeff Garzik 		if (qc->err_mask) {
3934c6fd2807SJeff Garzik 			/* FIXME: Once EH migration is complete,
3935c6fd2807SJeff Garzik 			 * generate sense data in this function,
3936c6fd2807SJeff Garzik 			 * considering both err_mask and tf.
3937c6fd2807SJeff Garzik 			 */
393803faab78STejun Heo 			if (qc->flags & ATA_QCFLAG_RETRY)
3939c6fd2807SJeff Garzik 				ata_eh_qc_retry(qc);
394003faab78STejun Heo 			else
394103faab78STejun Heo 				ata_eh_qc_complete(qc);
3942c6fd2807SJeff Garzik 		} else {
3943c6fd2807SJeff Garzik 			if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
3944c6fd2807SJeff Garzik 				ata_eh_qc_complete(qc);
3945c6fd2807SJeff Garzik 			} else {
3946c6fd2807SJeff Garzik 				/* feed zero TF to sense generation */
3947c6fd2807SJeff Garzik 				memset(&qc->result_tf, 0, sizeof(qc->result_tf));
3948c6fd2807SJeff Garzik 				ata_eh_qc_retry(qc);
3949c6fd2807SJeff Garzik 			}
3950c6fd2807SJeff Garzik 		}
3951c6fd2807SJeff Garzik 	}
3952da917d69STejun Heo 
3953da917d69STejun Heo 	/* make sure nr_active_links is zero after EH */
3954da917d69STejun Heo 	WARN_ON(ap->nr_active_links);
3955da917d69STejun Heo 	ap->nr_active_links = 0;
3956c6fd2807SJeff Garzik }
3957c6fd2807SJeff Garzik 
3958c6fd2807SJeff Garzik /**
3959c6fd2807SJeff Garzik  *	ata_do_eh - do standard error handling
3960c6fd2807SJeff Garzik  *	@ap: host port to handle error for
3961a1efdabaSTejun Heo  *
3962c6fd2807SJeff Garzik  *	@prereset: prereset method (can be NULL)
3963c6fd2807SJeff Garzik  *	@softreset: softreset method (can be NULL)
3964c6fd2807SJeff Garzik  *	@hardreset: hardreset method (can be NULL)
3965c6fd2807SJeff Garzik  *	@postreset: postreset method (can be NULL)
3966c6fd2807SJeff Garzik  *
3967c6fd2807SJeff Garzik  *	Perform standard error handling sequence.
3968c6fd2807SJeff Garzik  *
3969c6fd2807SJeff Garzik  *	LOCKING:
3970c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
3971c6fd2807SJeff Garzik  */
3972c6fd2807SJeff Garzik void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
3973c6fd2807SJeff Garzik 	       ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3974c6fd2807SJeff Garzik 	       ata_postreset_fn_t postreset)
3975c6fd2807SJeff Garzik {
39769b1e2658STejun Heo 	struct ata_device *dev;
39779b1e2658STejun Heo 	int rc;
39789b1e2658STejun Heo 
39799b1e2658STejun Heo 	ata_eh_autopsy(ap);
39809b1e2658STejun Heo 	ata_eh_report(ap);
39819b1e2658STejun Heo 
39829b1e2658STejun Heo 	rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
39839b1e2658STejun Heo 			    NULL);
39849b1e2658STejun Heo 	if (rc) {
39851eca4365STejun Heo 		ata_for_each_dev(dev, &ap->link, ALL)
39869b1e2658STejun Heo 			ata_dev_disable(dev);
39879b1e2658STejun Heo 	}
39889b1e2658STejun Heo 
3989c6fd2807SJeff Garzik 	ata_eh_finish(ap);
3990c6fd2807SJeff Garzik }
3991c6fd2807SJeff Garzik 
3992a1efdabaSTejun Heo /**
3993a1efdabaSTejun Heo  *	ata_std_error_handler - standard error handler
3994a1efdabaSTejun Heo  *	@ap: host port to handle error for
3995a1efdabaSTejun Heo  *
3996a1efdabaSTejun Heo  *	Standard error handler
3997a1efdabaSTejun Heo  *
3998a1efdabaSTejun Heo  *	LOCKING:
3999a1efdabaSTejun Heo  *	Kernel thread context (may sleep).
4000a1efdabaSTejun Heo  */
4001a1efdabaSTejun Heo void ata_std_error_handler(struct ata_port *ap)
4002a1efdabaSTejun Heo {
4003a1efdabaSTejun Heo 	struct ata_port_operations *ops = ap->ops;
4004a1efdabaSTejun Heo 	ata_reset_fn_t hardreset = ops->hardreset;
4005a1efdabaSTejun Heo 
400657c9efdfSTejun Heo 	/* ignore built-in hardreset if SCR access is not available */
4007fe06e5f9STejun Heo 	if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link))
4008a1efdabaSTejun Heo 		hardreset = NULL;
4009a1efdabaSTejun Heo 
4010a1efdabaSTejun Heo 	ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
4011a1efdabaSTejun Heo }
4012a1efdabaSTejun Heo 
40136ffa01d8STejun Heo #ifdef CONFIG_PM
4014c6fd2807SJeff Garzik /**
4015c6fd2807SJeff Garzik  *	ata_eh_handle_port_suspend - perform port suspend operation
4016c6fd2807SJeff Garzik  *	@ap: port to suspend
4017c6fd2807SJeff Garzik  *
4018c6fd2807SJeff Garzik  *	Suspend @ap.
4019c6fd2807SJeff Garzik  *
4020c6fd2807SJeff Garzik  *	LOCKING:
4021c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
4022c6fd2807SJeff Garzik  */
4023c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap)
4024c6fd2807SJeff Garzik {
4025c6fd2807SJeff Garzik 	unsigned long flags;
4026c6fd2807SJeff Garzik 	int rc = 0;
40273dc67440SAaron Lu 	struct ata_device *dev;
4028c6fd2807SJeff Garzik 
4029c6fd2807SJeff Garzik 	/* are we suspending? */
4030c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
4031c6fd2807SJeff Garzik 	if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
4032*a7ff60dbSAaron Lu 	    ap->pm_mesg.event & PM_EVENT_RESUME) {
4033c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
4034c6fd2807SJeff Garzik 		return;
4035c6fd2807SJeff Garzik 	}
4036c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
4037c6fd2807SJeff Garzik 
4038c6fd2807SJeff Garzik 	WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
4039c6fd2807SJeff Garzik 
40403dc67440SAaron Lu 	/*
40413dc67440SAaron Lu 	 * If we have a ZPODD attached, check its zero
40423dc67440SAaron Lu 	 * power ready status before the port is frozen.
4043*a7ff60dbSAaron Lu 	 * Only needed for runtime suspend.
40443dc67440SAaron Lu 	 */
4045*a7ff60dbSAaron Lu 	if (PMSG_IS_AUTO(ap->pm_mesg)) {
40463dc67440SAaron Lu 		ata_for_each_dev(dev, &ap->link, ENABLED) {
40473dc67440SAaron Lu 			if (zpodd_dev_enabled(dev))
40483dc67440SAaron Lu 				zpodd_on_suspend(dev);
40493dc67440SAaron Lu 		}
4050*a7ff60dbSAaron Lu 	}
40513dc67440SAaron Lu 
405264578a3dSTejun Heo 	/* tell ACPI we're suspending */
405364578a3dSTejun Heo 	rc = ata_acpi_on_suspend(ap);
405464578a3dSTejun Heo 	if (rc)
405564578a3dSTejun Heo 		goto out;
405664578a3dSTejun Heo 
4057c6fd2807SJeff Garzik 	/* suspend */
4058c6fd2807SJeff Garzik 	ata_eh_freeze_port(ap);
4059c6fd2807SJeff Garzik 
4060c6fd2807SJeff Garzik 	if (ap->ops->port_suspend)
4061c6fd2807SJeff Garzik 		rc = ap->ops->port_suspend(ap, ap->pm_mesg);
4062c6fd2807SJeff Garzik 
4063*a7ff60dbSAaron Lu 	ata_acpi_set_state(ap, ap->pm_mesg);
406464578a3dSTejun Heo  out:
4065c6fd2807SJeff Garzik 	/* report result */
4066c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
4067c6fd2807SJeff Garzik 
4068c6fd2807SJeff Garzik 	ap->pflags &= ~ATA_PFLAG_PM_PENDING;
4069c6fd2807SJeff Garzik 	if (rc == 0)
4070c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_SUSPENDED;
407164578a3dSTejun Heo 	else if (ap->pflags & ATA_PFLAG_FROZEN)
4072c6fd2807SJeff Garzik 		ata_port_schedule_eh(ap);
4073c6fd2807SJeff Garzik 
4074c6fd2807SJeff Garzik 	if (ap->pm_result) {
4075c6fd2807SJeff Garzik 		*ap->pm_result = rc;
4076c6fd2807SJeff Garzik 		ap->pm_result = NULL;
4077c6fd2807SJeff Garzik 	}
4078c6fd2807SJeff Garzik 
4079c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
4080c6fd2807SJeff Garzik 
4081c6fd2807SJeff Garzik 	return;
4082c6fd2807SJeff Garzik }
4083c6fd2807SJeff Garzik 
4084c6fd2807SJeff Garzik /**
4085c6fd2807SJeff Garzik  *	ata_eh_handle_port_resume - perform port resume operation
4086c6fd2807SJeff Garzik  *	@ap: port to resume
4087c6fd2807SJeff Garzik  *
4088c6fd2807SJeff Garzik  *	Resume @ap.
4089c6fd2807SJeff Garzik  *
4090c6fd2807SJeff Garzik  *	LOCKING:
4091c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
4092c6fd2807SJeff Garzik  */
4093c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap)
4094c6fd2807SJeff Garzik {
40956f9c1ea2STejun Heo 	struct ata_link *link;
40966f9c1ea2STejun Heo 	struct ata_device *dev;
4097c6fd2807SJeff Garzik 	unsigned long flags;
40989666f400STejun Heo 	int rc = 0;
4099c6fd2807SJeff Garzik 
4100c6fd2807SJeff Garzik 	/* are we resuming? */
4101c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
4102c6fd2807SJeff Garzik 	if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
4103*a7ff60dbSAaron Lu 	    !(ap->pm_mesg.event & PM_EVENT_RESUME)) {
4104c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
4105c6fd2807SJeff Garzik 		return;
4106c6fd2807SJeff Garzik 	}
4107c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
4108c6fd2807SJeff Garzik 
41099666f400STejun Heo 	WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED));
4110c6fd2807SJeff Garzik 
41116f9c1ea2STejun Heo 	/*
41126f9c1ea2STejun Heo 	 * Error timestamps are in jiffies which doesn't run while
41136f9c1ea2STejun Heo 	 * suspended and PHY events during resume isn't too uncommon.
41146f9c1ea2STejun Heo 	 * When the two are combined, it can lead to unnecessary speed
41156f9c1ea2STejun Heo 	 * downs if the machine is suspended and resumed repeatedly.
41166f9c1ea2STejun Heo 	 * Clear error history.
41176f9c1ea2STejun Heo 	 */
41186f9c1ea2STejun Heo 	ata_for_each_link(link, ap, HOST_FIRST)
41196f9c1ea2STejun Heo 		ata_for_each_dev(dev, link, ALL)
41206f9c1ea2STejun Heo 			ata_ering_clear(&dev->ering);
41216f9c1ea2STejun Heo 
4122*a7ff60dbSAaron Lu 	ata_acpi_set_state(ap, ap->pm_mesg);
4123bd3adca5SShaohua Li 
4124c6fd2807SJeff Garzik 	if (ap->ops->port_resume)
4125c6fd2807SJeff Garzik 		rc = ap->ops->port_resume(ap);
4126c6fd2807SJeff Garzik 
41276746544cSTejun Heo 	/* tell ACPI that we're resuming */
41286746544cSTejun Heo 	ata_acpi_on_resume(ap);
41296746544cSTejun Heo 
41309666f400STejun Heo 	/* report result */
4131c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
4132c6fd2807SJeff Garzik 	ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
4133c6fd2807SJeff Garzik 	if (ap->pm_result) {
4134c6fd2807SJeff Garzik 		*ap->pm_result = rc;
4135c6fd2807SJeff Garzik 		ap->pm_result = NULL;
4136c6fd2807SJeff Garzik 	}
4137c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
4138c6fd2807SJeff Garzik }
41396ffa01d8STejun Heo #endif /* CONFIG_PM */
4140