1c82ee6d3SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2c6fd2807SJeff Garzik /*
3c6fd2807SJeff Garzik * libata-eh.c - libata error handling
4c6fd2807SJeff Garzik *
5c6fd2807SJeff Garzik * Copyright 2006 Tejun Heo <htejun@gmail.com>
6c6fd2807SJeff Garzik *
7c6fd2807SJeff Garzik * libata documentation is available via 'make {ps|pdf}docs',
89bb9a39cSMauro Carvalho Chehab * as Documentation/driver-api/libata.rst
9c6fd2807SJeff Garzik *
10c6fd2807SJeff Garzik * Hardware documentation available from http://www.t13.org/ and
11c6fd2807SJeff Garzik * http://www.sata-io.org/
12c6fd2807SJeff Garzik */
13c6fd2807SJeff Garzik
14c6fd2807SJeff Garzik #include <linux/kernel.h>
15242f9dcbSJens Axboe #include <linux/blkdev.h>
1638789fdaSPaul Gortmaker #include <linux/export.h>
172855568bSJeff Garzik #include <linux/pci.h>
18c6fd2807SJeff Garzik #include <scsi/scsi.h>
19c6fd2807SJeff Garzik #include <scsi/scsi_host.h>
20c6fd2807SJeff Garzik #include <scsi/scsi_eh.h>
21c6fd2807SJeff Garzik #include <scsi/scsi_device.h>
22c6fd2807SJeff Garzik #include <scsi/scsi_cmnd.h>
236521148cSRobert Hancock #include <scsi/scsi_dbg.h>
24c6fd2807SJeff Garzik #include "../scsi/scsi_transport_api.h"
25c6fd2807SJeff Garzik
26c6fd2807SJeff Garzik #include <linux/libata.h>
27c6fd2807SJeff Garzik
28255c03d1SHannes Reinecke #include <trace/events/libata.h>
29c6fd2807SJeff Garzik #include "libata.h"
30c6fd2807SJeff Garzik
317d47e8d4STejun Heo enum {
323884f7b0STejun Heo /* speed down verdicts */
337d47e8d4STejun Heo ATA_EH_SPDN_NCQ_OFF = (1 << 0),
347d47e8d4STejun Heo ATA_EH_SPDN_SPEED_DOWN = (1 << 1),
357d47e8d4STejun Heo ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2),
3676326ac1STejun Heo ATA_EH_SPDN_KEEP_ERRORS = (1 << 3),
373884f7b0STejun Heo
383884f7b0STejun Heo /* error flags */
393884f7b0STejun Heo ATA_EFLAG_IS_IO = (1 << 0),
4076326ac1STejun Heo ATA_EFLAG_DUBIOUS_XFER = (1 << 1),
41d9027470SGwendal Grignou ATA_EFLAG_OLD_ER = (1 << 31),
423884f7b0STejun Heo
433884f7b0STejun Heo /* error categories */
443884f7b0STejun Heo ATA_ECAT_NONE = 0,
453884f7b0STejun Heo ATA_ECAT_ATA_BUS = 1,
463884f7b0STejun Heo ATA_ECAT_TOUT_HSM = 2,
473884f7b0STejun Heo ATA_ECAT_UNK_DEV = 3,
4875f9cafcSTejun Heo ATA_ECAT_DUBIOUS_NONE = 4,
4975f9cafcSTejun Heo ATA_ECAT_DUBIOUS_ATA_BUS = 5,
5075f9cafcSTejun Heo ATA_ECAT_DUBIOUS_TOUT_HSM = 6,
5175f9cafcSTejun Heo ATA_ECAT_DUBIOUS_UNK_DEV = 7,
5275f9cafcSTejun Heo ATA_ECAT_NR = 8,
537d47e8d4STejun Heo
5487fbc5a0STejun Heo ATA_EH_CMD_DFL_TIMEOUT = 5000,
5587fbc5a0STejun Heo
560a2c0f56STejun Heo /* always put at least this amount of time between resets */
570a2c0f56STejun Heo ATA_EH_RESET_COOL_DOWN = 5000,
580a2c0f56STejun Heo
59341c2c95STejun Heo /* Waiting in ->prereset can never be reliable. It's
60341c2c95STejun Heo * sometimes nice to wait there but it can't be depended upon;
61341c2c95STejun Heo * otherwise, we wouldn't be resetting. Just give it enough
62341c2c95STejun Heo * time for most drives to spin up.
6331daabdaSTejun Heo */
64341c2c95STejun Heo ATA_EH_PRERESET_TIMEOUT = 10000,
65341c2c95STejun Heo ATA_EH_FASTDRAIN_INTERVAL = 3000,
6611fc33daSTejun Heo
6711fc33daSTejun Heo ATA_EH_UA_TRIES = 5,
68c2c7a89cSTejun Heo
69c2c7a89cSTejun Heo /* probe speed down parameters, see ata_eh_schedule_probe() */
70c2c7a89cSTejun Heo ATA_EH_PROBE_TRIAL_INTERVAL = 60000, /* 1 min */
71c2c7a89cSTejun Heo ATA_EH_PROBE_TRIALS = 2,
7231daabdaSTejun Heo };
7331daabdaSTejun Heo
7431daabdaSTejun Heo /* The following table determines how we sequence resets. Each entry
7531daabdaSTejun Heo * represents timeout for that try. The first try can be soft or
7631daabdaSTejun Heo * hardreset. All others are hardreset if available. In most cases
7731daabdaSTejun Heo * the first reset w/ 10sec timeout should succeed. Following entries
7835bf8821SDan Williams * are mostly for error handling, hotplug and those outlier devices that
7935bf8821SDan Williams * take an exceptionally long time to recover from reset.
8031daabdaSTejun Heo */
81ca02f225SSergey Shtylyov static const unsigned int ata_eh_reset_timeouts[] = {
82341c2c95STejun Heo 10000, /* most drives spin up by 10sec */
83341c2c95STejun Heo 10000, /* > 99% working drives spin up before 20sec */
8435bf8821SDan Williams 35000, /* give > 30 secs of idleness for outlier devices */
85341c2c95STejun Heo 5000, /* and sweet one last chance */
86ca02f225SSergey Shtylyov UINT_MAX, /* > 1 min has elapsed, give up */
8731daabdaSTejun Heo };
8831daabdaSTejun Heo
89e06233f9SSergey Shtylyov static const unsigned int ata_eh_identify_timeouts[] = {
9087fbc5a0STejun Heo 5000, /* covers > 99% of successes and not too boring on failures */
9187fbc5a0STejun Heo 10000, /* combined time till here is enough even for media access */
9287fbc5a0STejun Heo 30000, /* for true idiots */
93e06233f9SSergey Shtylyov UINT_MAX,
9487fbc5a0STejun Heo };
9587fbc5a0STejun Heo
96e06233f9SSergey Shtylyov static const unsigned int ata_eh_revalidate_timeouts[] = {
9768dbbe7dSDamien Le Moal 15000, /* Some drives are slow to read log pages when waking-up */
9868dbbe7dSDamien Le Moal 15000, /* combined time till here is enough even for media access */
99e06233f9SSergey Shtylyov UINT_MAX,
10068dbbe7dSDamien Le Moal };
10168dbbe7dSDamien Le Moal
102e06233f9SSergey Shtylyov static const unsigned int ata_eh_flush_timeouts[] = {
1036013efd8STejun Heo 15000, /* be generous with flush */
1046013efd8STejun Heo 15000, /* ditto */
1056013efd8STejun Heo 30000, /* and even more generous */
106e06233f9SSergey Shtylyov UINT_MAX,
1076013efd8STejun Heo };
1086013efd8STejun Heo
109e06233f9SSergey Shtylyov static const unsigned int ata_eh_other_timeouts[] = {
11087fbc5a0STejun Heo 5000, /* same rationale as identify timeout */
11187fbc5a0STejun Heo 10000, /* ditto */
11287fbc5a0STejun Heo /* but no merciful 30sec for other commands, it just isn't worth it */
113e06233f9SSergey Shtylyov UINT_MAX,
11487fbc5a0STejun Heo };
11587fbc5a0STejun Heo
11687fbc5a0STejun Heo struct ata_eh_cmd_timeout_ent {
11787fbc5a0STejun Heo const u8 *commands;
118e06233f9SSergey Shtylyov const unsigned int *timeouts;
11987fbc5a0STejun Heo };
12087fbc5a0STejun Heo
12187fbc5a0STejun Heo /* The following table determines timeouts to use for EH internal
12287fbc5a0STejun Heo * commands. Each table entry is a command class and matches the
12387fbc5a0STejun Heo * commands the entry applies to and the timeout table to use.
12487fbc5a0STejun Heo *
12587fbc5a0STejun Heo * On the retry after a command timed out, the next timeout value from
12687fbc5a0STejun Heo * the table is used. If the table doesn't contain further entries,
12787fbc5a0STejun Heo * the last value is used.
12887fbc5a0STejun Heo *
12987fbc5a0STejun Heo * ehc->cmd_timeout_idx keeps track of which timeout to use per
13087fbc5a0STejun Heo * command class, so if SET_FEATURES times out on the first try, the
13187fbc5a0STejun Heo * next try will use the second timeout value only for that class.
13287fbc5a0STejun Heo */
13387fbc5a0STejun Heo #define CMDS(cmds...) (const u8 []){ cmds, 0 }
13487fbc5a0STejun Heo static const struct ata_eh_cmd_timeout_ent
13587fbc5a0STejun Heo ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
13687fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI),
13787fbc5a0STejun Heo .timeouts = ata_eh_identify_timeouts, },
13868dbbe7dSDamien Le Moal { .commands = CMDS(ATA_CMD_READ_LOG_EXT, ATA_CMD_READ_LOG_DMA_EXT),
13968dbbe7dSDamien Le Moal .timeouts = ata_eh_revalidate_timeouts, },
14087fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT),
14187fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, },
14287fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT),
14387fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, },
14487fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_SET_FEATURES),
14587fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, },
14687fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS),
14787fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, },
1486013efd8STejun Heo { .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT),
1496013efd8STejun Heo .timeouts = ata_eh_flush_timeouts },
150aa3998dbSDamien Le Moal { .commands = CMDS(ATA_CMD_VERIFY),
151aa3998dbSDamien Le Moal .timeouts = ata_eh_reset_timeouts },
15287fbc5a0STejun Heo };
15387fbc5a0STejun Heo #undef CMDS
15487fbc5a0STejun Heo
155c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap);
15671d7b6e5SNiklas Cassel static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
15771d7b6e5SNiklas Cassel struct ata_device **r_failed_dev);
1586ffa01d8STejun Heo #ifdef CONFIG_PM
159c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap);
160c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap);
1616ffa01d8STejun Heo #else /* CONFIG_PM */
ata_eh_handle_port_suspend(struct ata_port * ap)1626ffa01d8STejun Heo static void ata_eh_handle_port_suspend(struct ata_port *ap)
1636ffa01d8STejun Heo { }
1646ffa01d8STejun Heo
ata_eh_handle_port_resume(struct ata_port * ap)1656ffa01d8STejun Heo static void ata_eh_handle_port_resume(struct ata_port *ap)
1666ffa01d8STejun Heo { }
1676ffa01d8STejun Heo #endif /* CONFIG_PM */
168c6fd2807SJeff Garzik
__ata_ehi_pushv_desc(struct ata_eh_info * ehi,const char * fmt,va_list args)1690d74d872SMathieu Malaterre static __printf(2, 0) void __ata_ehi_pushv_desc(struct ata_eh_info *ehi,
1700d74d872SMathieu Malaterre const char *fmt, va_list args)
171b64bbc39STejun Heo {
172b64bbc39STejun Heo ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
173b64bbc39STejun Heo ATA_EH_DESC_LEN - ehi->desc_len,
174b64bbc39STejun Heo fmt, args);
175b64bbc39STejun Heo }
176b64bbc39STejun Heo
177b64bbc39STejun Heo /**
178b64bbc39STejun Heo * __ata_ehi_push_desc - push error description without adding separator
179b64bbc39STejun Heo * @ehi: target EHI
180b64bbc39STejun Heo * @fmt: printf format string
181b64bbc39STejun Heo *
182b64bbc39STejun Heo * Format string according to @fmt and append it to @ehi->desc.
183b64bbc39STejun Heo *
184b64bbc39STejun Heo * LOCKING:
185b64bbc39STejun Heo * spin_lock_irqsave(host lock)
186b64bbc39STejun Heo */
__ata_ehi_push_desc(struct ata_eh_info * ehi,const char * fmt,...)187b64bbc39STejun Heo void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
188b64bbc39STejun Heo {
189b64bbc39STejun Heo va_list args;
190b64bbc39STejun Heo
191b64bbc39STejun Heo va_start(args, fmt);
192b64bbc39STejun Heo __ata_ehi_pushv_desc(ehi, fmt, args);
193b64bbc39STejun Heo va_end(args);
194b64bbc39STejun Heo }
195a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
196b64bbc39STejun Heo
197b64bbc39STejun Heo /**
198b64bbc39STejun Heo * ata_ehi_push_desc - push error description with separator
199b64bbc39STejun Heo * @ehi: target EHI
200b64bbc39STejun Heo * @fmt: printf format string
201b64bbc39STejun Heo *
202b64bbc39STejun Heo * Format string according to @fmt and append it to @ehi->desc.
203b64bbc39STejun Heo * If @ehi->desc is not empty, ", " is added in-between.
204b64bbc39STejun Heo *
205b64bbc39STejun Heo * LOCKING:
206b64bbc39STejun Heo * spin_lock_irqsave(host lock)
207b64bbc39STejun Heo */
ata_ehi_push_desc(struct ata_eh_info * ehi,const char * fmt,...)208b64bbc39STejun Heo void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
209b64bbc39STejun Heo {
210b64bbc39STejun Heo va_list args;
211b64bbc39STejun Heo
212b64bbc39STejun Heo if (ehi->desc_len)
213b64bbc39STejun Heo __ata_ehi_push_desc(ehi, ", ");
214b64bbc39STejun Heo
215b64bbc39STejun Heo va_start(args, fmt);
216b64bbc39STejun Heo __ata_ehi_pushv_desc(ehi, fmt, args);
217b64bbc39STejun Heo va_end(args);
218b64bbc39STejun Heo }
219a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
220b64bbc39STejun Heo
221b64bbc39STejun Heo /**
222b64bbc39STejun Heo * ata_ehi_clear_desc - clean error description
223b64bbc39STejun Heo * @ehi: target EHI
224b64bbc39STejun Heo *
225b64bbc39STejun Heo * Clear @ehi->desc.
226b64bbc39STejun Heo *
227b64bbc39STejun Heo * LOCKING:
228b64bbc39STejun Heo * spin_lock_irqsave(host lock)
229b64bbc39STejun Heo */
ata_ehi_clear_desc(struct ata_eh_info * ehi)230b64bbc39STejun Heo void ata_ehi_clear_desc(struct ata_eh_info *ehi)
231b64bbc39STejun Heo {
232b64bbc39STejun Heo ehi->desc[0] = '\0';
233b64bbc39STejun Heo ehi->desc_len = 0;
234b64bbc39STejun Heo }
235a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
236b64bbc39STejun Heo
237cbcdd875STejun Heo /**
238cbcdd875STejun Heo * ata_port_desc - append port description
239cbcdd875STejun Heo * @ap: target ATA port
240cbcdd875STejun Heo * @fmt: printf format string
241cbcdd875STejun Heo *
242cbcdd875STejun Heo * Format string according to @fmt and append it to port
243cbcdd875STejun Heo * description. If port description is not empty, " " is added
244cbcdd875STejun Heo * in-between. This function is to be used while initializing
245cbcdd875STejun Heo * ata_host. The description is printed on host registration.
246cbcdd875STejun Heo *
247cbcdd875STejun Heo * LOCKING:
248cbcdd875STejun Heo * None.
249cbcdd875STejun Heo */
ata_port_desc(struct ata_port * ap,const char * fmt,...)250cbcdd875STejun Heo void ata_port_desc(struct ata_port *ap, const char *fmt, ...)
251cbcdd875STejun Heo {
252cbcdd875STejun Heo va_list args;
253cbcdd875STejun Heo
254cbcdd875STejun Heo WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING));
255cbcdd875STejun Heo
256cbcdd875STejun Heo if (ap->link.eh_info.desc_len)
257cbcdd875STejun Heo __ata_ehi_push_desc(&ap->link.eh_info, " ");
258cbcdd875STejun Heo
259cbcdd875STejun Heo va_start(args, fmt);
260cbcdd875STejun Heo __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args);
261cbcdd875STejun Heo va_end(args);
262cbcdd875STejun Heo }
263a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_port_desc);
264cbcdd875STejun Heo
265cbcdd875STejun Heo #ifdef CONFIG_PCI
266cbcdd875STejun Heo /**
267cbcdd875STejun Heo * ata_port_pbar_desc - append PCI BAR description
268cbcdd875STejun Heo * @ap: target ATA port
269cbcdd875STejun Heo * @bar: target PCI BAR
270cbcdd875STejun Heo * @offset: offset into PCI BAR
271cbcdd875STejun Heo * @name: name of the area
272cbcdd875STejun Heo *
273cbcdd875STejun Heo * If @offset is negative, this function formats a string which
274cbcdd875STejun Heo * contains the name, address, size and type of the BAR and
275cbcdd875STejun Heo * appends it to the port description. If @offset is zero or
276cbcdd875STejun Heo * positive, only name and offsetted address is appended.
277cbcdd875STejun Heo *
278cbcdd875STejun Heo * LOCKING:
279cbcdd875STejun Heo * None.
280cbcdd875STejun Heo */
ata_port_pbar_desc(struct ata_port * ap,int bar,ssize_t offset,const char * name)281cbcdd875STejun Heo void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
282cbcdd875STejun Heo const char *name)
283cbcdd875STejun Heo {
284cbcdd875STejun Heo struct pci_dev *pdev = to_pci_dev(ap->host->dev);
285cbcdd875STejun Heo char *type = "";
286cbcdd875STejun Heo unsigned long long start, len;
287cbcdd875STejun Heo
288cbcdd875STejun Heo if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
289cbcdd875STejun Heo type = "m";
290cbcdd875STejun Heo else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
291cbcdd875STejun Heo type = "i";
292cbcdd875STejun Heo
293cbcdd875STejun Heo start = (unsigned long long)pci_resource_start(pdev, bar);
294cbcdd875STejun Heo len = (unsigned long long)pci_resource_len(pdev, bar);
295cbcdd875STejun Heo
296cbcdd875STejun Heo if (offset < 0)
297cbcdd875STejun Heo ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start);
298cbcdd875STejun Heo else
299e6a73ab1SAndrew Morton ata_port_desc(ap, "%s 0x%llx", name,
300e6a73ab1SAndrew Morton start + (unsigned long long)offset);
301cbcdd875STejun Heo }
302a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
303cbcdd875STejun Heo #endif /* CONFIG_PCI */
304cbcdd875STejun Heo
ata_lookup_timeout_table(u8 cmd)30587fbc5a0STejun Heo static int ata_lookup_timeout_table(u8 cmd)
30687fbc5a0STejun Heo {
30787fbc5a0STejun Heo int i;
30887fbc5a0STejun Heo
30987fbc5a0STejun Heo for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) {
31087fbc5a0STejun Heo const u8 *cur;
31187fbc5a0STejun Heo
31287fbc5a0STejun Heo for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++)
31387fbc5a0STejun Heo if (*cur == cmd)
31487fbc5a0STejun Heo return i;
31587fbc5a0STejun Heo }
31687fbc5a0STejun Heo
31787fbc5a0STejun Heo return -1;
31887fbc5a0STejun Heo }
31987fbc5a0STejun Heo
32087fbc5a0STejun Heo /**
32187fbc5a0STejun Heo * ata_internal_cmd_timeout - determine timeout for an internal command
32287fbc5a0STejun Heo * @dev: target device
32387fbc5a0STejun Heo * @cmd: internal command to be issued
32487fbc5a0STejun Heo *
32587fbc5a0STejun Heo * Determine timeout for internal command @cmd for @dev.
32687fbc5a0STejun Heo *
32787fbc5a0STejun Heo * LOCKING:
32887fbc5a0STejun Heo * EH context.
32987fbc5a0STejun Heo *
33087fbc5a0STejun Heo * RETURNS:
33187fbc5a0STejun Heo * Determined timeout.
33287fbc5a0STejun Heo */
ata_internal_cmd_timeout(struct ata_device * dev,u8 cmd)333e06233f9SSergey Shtylyov unsigned int ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd)
33487fbc5a0STejun Heo {
33587fbc5a0STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context;
33687fbc5a0STejun Heo int ent = ata_lookup_timeout_table(cmd);
33787fbc5a0STejun Heo int idx;
33887fbc5a0STejun Heo
33987fbc5a0STejun Heo if (ent < 0)
34087fbc5a0STejun Heo return ATA_EH_CMD_DFL_TIMEOUT;
34187fbc5a0STejun Heo
34287fbc5a0STejun Heo idx = ehc->cmd_timeout_idx[dev->devno][ent];
34387fbc5a0STejun Heo return ata_eh_cmd_timeout_table[ent].timeouts[idx];
34487fbc5a0STejun Heo }
34587fbc5a0STejun Heo
34687fbc5a0STejun Heo /**
34787fbc5a0STejun Heo * ata_internal_cmd_timed_out - notification for internal command timeout
34887fbc5a0STejun Heo * @dev: target device
34987fbc5a0STejun Heo * @cmd: internal command which timed out
35087fbc5a0STejun Heo *
35187fbc5a0STejun Heo * Notify EH that internal command @cmd for @dev timed out. This
35287fbc5a0STejun Heo * function should be called only for commands whose timeouts are
35387fbc5a0STejun Heo * determined using ata_internal_cmd_timeout().
35487fbc5a0STejun Heo *
35587fbc5a0STejun Heo * LOCKING:
35687fbc5a0STejun Heo * EH context.
35787fbc5a0STejun Heo */
ata_internal_cmd_timed_out(struct ata_device * dev,u8 cmd)35887fbc5a0STejun Heo void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd)
35987fbc5a0STejun Heo {
36087fbc5a0STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context;
36187fbc5a0STejun Heo int ent = ata_lookup_timeout_table(cmd);
36287fbc5a0STejun Heo int idx;
36387fbc5a0STejun Heo
36487fbc5a0STejun Heo if (ent < 0)
36587fbc5a0STejun Heo return;
36687fbc5a0STejun Heo
36787fbc5a0STejun Heo idx = ehc->cmd_timeout_idx[dev->devno][ent];
368e06233f9SSergey Shtylyov if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != UINT_MAX)
36987fbc5a0STejun Heo ehc->cmd_timeout_idx[dev->devno][ent]++;
37087fbc5a0STejun Heo }
37187fbc5a0STejun Heo
ata_ering_record(struct ata_ering * ering,unsigned int eflags,unsigned int err_mask)3723884f7b0STejun Heo static void ata_ering_record(struct ata_ering *ering, unsigned int eflags,
373c6fd2807SJeff Garzik unsigned int err_mask)
374c6fd2807SJeff Garzik {
375c6fd2807SJeff Garzik struct ata_ering_entry *ent;
376c6fd2807SJeff Garzik
377c6fd2807SJeff Garzik WARN_ON(!err_mask);
378c6fd2807SJeff Garzik
379c6fd2807SJeff Garzik ering->cursor++;
380c6fd2807SJeff Garzik ering->cursor %= ATA_ERING_SIZE;
381c6fd2807SJeff Garzik
382c6fd2807SJeff Garzik ent = &ering->ring[ering->cursor];
3833884f7b0STejun Heo ent->eflags = eflags;
384c6fd2807SJeff Garzik ent->err_mask = err_mask;
385c6fd2807SJeff Garzik ent->timestamp = get_jiffies_64();
386c6fd2807SJeff Garzik }
387c6fd2807SJeff Garzik
ata_ering_top(struct ata_ering * ering)38876326ac1STejun Heo static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering)
38976326ac1STejun Heo {
39076326ac1STejun Heo struct ata_ering_entry *ent = &ering->ring[ering->cursor];
39176326ac1STejun Heo
39276326ac1STejun Heo if (ent->err_mask)
39376326ac1STejun Heo return ent;
39476326ac1STejun Heo return NULL;
39576326ac1STejun Heo }
39676326ac1STejun Heo
ata_ering_map(struct ata_ering * ering,int (* map_fn)(struct ata_ering_entry *,void *),void * arg)397d9027470SGwendal Grignou int ata_ering_map(struct ata_ering *ering,
398c6fd2807SJeff Garzik int (*map_fn)(struct ata_ering_entry *, void *),
399c6fd2807SJeff Garzik void *arg)
400c6fd2807SJeff Garzik {
401c6fd2807SJeff Garzik int idx, rc = 0;
402c6fd2807SJeff Garzik struct ata_ering_entry *ent;
403c6fd2807SJeff Garzik
404c6fd2807SJeff Garzik idx = ering->cursor;
405c6fd2807SJeff Garzik do {
406c6fd2807SJeff Garzik ent = &ering->ring[idx];
407c6fd2807SJeff Garzik if (!ent->err_mask)
408c6fd2807SJeff Garzik break;
409c6fd2807SJeff Garzik rc = map_fn(ent, arg);
410c6fd2807SJeff Garzik if (rc)
411c6fd2807SJeff Garzik break;
412c6fd2807SJeff Garzik idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
413c6fd2807SJeff Garzik } while (idx != ering->cursor);
414c6fd2807SJeff Garzik
415c6fd2807SJeff Garzik return rc;
416c6fd2807SJeff Garzik }
417c6fd2807SJeff Garzik
ata_ering_clear_cb(struct ata_ering_entry * ent,void * void_arg)41860428407SH Hartley Sweeten static int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg)
419d9027470SGwendal Grignou {
420d9027470SGwendal Grignou ent->eflags |= ATA_EFLAG_OLD_ER;
421d9027470SGwendal Grignou return 0;
422d9027470SGwendal Grignou }
423d9027470SGwendal Grignou
ata_ering_clear(struct ata_ering * ering)424d9027470SGwendal Grignou static void ata_ering_clear(struct ata_ering *ering)
425d9027470SGwendal Grignou {
426d9027470SGwendal Grignou ata_ering_map(ering, ata_ering_clear_cb, NULL);
427d9027470SGwendal Grignou }
428d9027470SGwendal Grignou
ata_eh_dev_action(struct ata_device * dev)429c6fd2807SJeff Garzik static unsigned int ata_eh_dev_action(struct ata_device *dev)
430c6fd2807SJeff Garzik {
4319af5c9c9STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context;
432c6fd2807SJeff Garzik
433c6fd2807SJeff Garzik return ehc->i.action | ehc->i.dev_action[dev->devno];
434c6fd2807SJeff Garzik }
435c6fd2807SJeff Garzik
ata_eh_clear_action(struct ata_link * link,struct ata_device * dev,struct ata_eh_info * ehi,unsigned int action)436f58229f8STejun Heo static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
437c6fd2807SJeff Garzik struct ata_eh_info *ehi, unsigned int action)
438c6fd2807SJeff Garzik {
439f58229f8STejun Heo struct ata_device *tdev;
440c6fd2807SJeff Garzik
441c6fd2807SJeff Garzik if (!dev) {
442c6fd2807SJeff Garzik ehi->action &= ~action;
4431eca4365STejun Heo ata_for_each_dev(tdev, link, ALL)
444f58229f8STejun Heo ehi->dev_action[tdev->devno] &= ~action;
445c6fd2807SJeff Garzik } else {
446c6fd2807SJeff Garzik /* doesn't make sense for port-wide EH actions */
447c6fd2807SJeff Garzik WARN_ON(!(action & ATA_EH_PERDEV_MASK));
448c6fd2807SJeff Garzik
449c6fd2807SJeff Garzik /* break ehi->action into ehi->dev_action */
450c6fd2807SJeff Garzik if (ehi->action & action) {
4511eca4365STejun Heo ata_for_each_dev(tdev, link, ALL)
452f58229f8STejun Heo ehi->dev_action[tdev->devno] |=
453f58229f8STejun Heo ehi->action & action;
454c6fd2807SJeff Garzik ehi->action &= ~action;
455c6fd2807SJeff Garzik }
456c6fd2807SJeff Garzik
457c6fd2807SJeff Garzik /* turn off the specified per-dev action */
458c6fd2807SJeff Garzik ehi->dev_action[dev->devno] &= ~action;
459c6fd2807SJeff Garzik }
460c6fd2807SJeff Garzik }
461c6fd2807SJeff Garzik
462c6fd2807SJeff Garzik /**
463c0c362b6STejun Heo * ata_eh_acquire - acquire EH ownership
464c0c362b6STejun Heo * @ap: ATA port to acquire EH ownership for
465c0c362b6STejun Heo *
466c0c362b6STejun Heo * Acquire EH ownership for @ap. This is the basic exclusion
467c0c362b6STejun Heo * mechanism for ports sharing a host. Only one port hanging off
468c0c362b6STejun Heo * the same host can claim the ownership of EH.
469c0c362b6STejun Heo *
470c0c362b6STejun Heo * LOCKING:
471c0c362b6STejun Heo * EH context.
472c0c362b6STejun Heo */
ata_eh_acquire(struct ata_port * ap)473c0c362b6STejun Heo void ata_eh_acquire(struct ata_port *ap)
474c0c362b6STejun Heo {
475c0c362b6STejun Heo mutex_lock(&ap->host->eh_mutex);
476c0c362b6STejun Heo WARN_ON_ONCE(ap->host->eh_owner);
477c0c362b6STejun Heo ap->host->eh_owner = current;
478c0c362b6STejun Heo }
479c0c362b6STejun Heo
480c0c362b6STejun Heo /**
481c0c362b6STejun Heo * ata_eh_release - release EH ownership
482c0c362b6STejun Heo * @ap: ATA port to release EH ownership for
483c0c362b6STejun Heo *
484c0c362b6STejun Heo * Release EH ownership for @ap if the caller. The caller must
485c0c362b6STejun Heo * have acquired EH ownership using ata_eh_acquire() previously.
486c0c362b6STejun Heo *
487c0c362b6STejun Heo * LOCKING:
488c0c362b6STejun Heo * EH context.
489c0c362b6STejun Heo */
ata_eh_release(struct ata_port * ap)490c0c362b6STejun Heo void ata_eh_release(struct ata_port *ap)
491c0c362b6STejun Heo {
492c0c362b6STejun Heo WARN_ON_ONCE(ap->host->eh_owner != current);
493c0c362b6STejun Heo ap->host->eh_owner = NULL;
494c0c362b6STejun Heo mutex_unlock(&ap->host->eh_mutex);
495c0c362b6STejun Heo }
496c0c362b6STejun Heo
ata_eh_unload(struct ata_port * ap)497ece180d1STejun Heo static void ata_eh_unload(struct ata_port *ap)
498ece180d1STejun Heo {
499ece180d1STejun Heo struct ata_link *link;
500ece180d1STejun Heo struct ata_device *dev;
501ece180d1STejun Heo unsigned long flags;
502ece180d1STejun Heo
503aa3998dbSDamien Le Moal /*
504aa3998dbSDamien Le Moal * Unless we are restarting, transition all enabled devices to
505aa3998dbSDamien Le Moal * standby power mode.
506aa3998dbSDamien Le Moal */
507aa3998dbSDamien Le Moal if (system_state != SYSTEM_RESTART) {
508aa3998dbSDamien Le Moal ata_for_each_link(link, ap, PMP_FIRST) {
509aa3998dbSDamien Le Moal ata_for_each_dev(dev, link, ENABLED)
510aa3998dbSDamien Le Moal ata_dev_power_set_standby(dev);
511aa3998dbSDamien Le Moal }
512aa3998dbSDamien Le Moal }
513aa3998dbSDamien Le Moal
514aa3998dbSDamien Le Moal /*
515aa3998dbSDamien Le Moal * Restore SControl IPM and SPD for the next driver and
516ece180d1STejun Heo * disable attached devices.
517ece180d1STejun Heo */
518ece180d1STejun Heo ata_for_each_link(link, ap, PMP_FIRST) {
519ece180d1STejun Heo sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0);
520ece180d1STejun Heo ata_for_each_dev(dev, link, ALL)
521ece180d1STejun Heo ata_dev_disable(dev);
522ece180d1STejun Heo }
523ece180d1STejun Heo
524ece180d1STejun Heo /* freeze and set UNLOADED */
525ece180d1STejun Heo spin_lock_irqsave(ap->lock, flags);
526ece180d1STejun Heo
527ece180d1STejun Heo ata_port_freeze(ap); /* won't be thawed */
528ece180d1STejun Heo ap->pflags &= ~ATA_PFLAG_EH_PENDING; /* clear pending from freeze */
529ece180d1STejun Heo ap->pflags |= ATA_PFLAG_UNLOADED;
530ece180d1STejun Heo
531ece180d1STejun Heo spin_unlock_irqrestore(ap->lock, flags);
532ece180d1STejun Heo }
533ece180d1STejun Heo
534c6fd2807SJeff Garzik /**
535c6fd2807SJeff Garzik * ata_scsi_error - SCSI layer error handler callback
536c6fd2807SJeff Garzik * @host: SCSI host on which error occurred
537c6fd2807SJeff Garzik *
538c6fd2807SJeff Garzik * Handles SCSI-layer-thrown error events.
539c6fd2807SJeff Garzik *
540c6fd2807SJeff Garzik * LOCKING:
541c6fd2807SJeff Garzik * Inherited from SCSI layer (none, can sleep)
542c6fd2807SJeff Garzik *
543c6fd2807SJeff Garzik * RETURNS:
544c6fd2807SJeff Garzik * Zero.
545c6fd2807SJeff Garzik */
ata_scsi_error(struct Scsi_Host * host)546c6fd2807SJeff Garzik void ata_scsi_error(struct Scsi_Host *host)
547c6fd2807SJeff Garzik {
548c6fd2807SJeff Garzik struct ata_port *ap = ata_shost_to_port(host);
549c6fd2807SJeff Garzik unsigned long flags;
550c34aeebcSJames Bottomley LIST_HEAD(eh_work_q);
551c6fd2807SJeff Garzik
552c34aeebcSJames Bottomley spin_lock_irqsave(host->host_lock, flags);
553c34aeebcSJames Bottomley list_splice_init(&host->eh_cmd_q, &eh_work_q);
554c34aeebcSJames Bottomley spin_unlock_irqrestore(host->host_lock, flags);
555c34aeebcSJames Bottomley
5560e0b494cSJames Bottomley ata_scsi_cmd_error_handler(host, ap, &eh_work_q);
5570e0b494cSJames Bottomley
5580e0b494cSJames Bottomley /* If we timed raced normal completion and there is nothing to
5590e0b494cSJames Bottomley recover nr_timedout == 0 why exactly are we doing error recovery ? */
5600e0b494cSJames Bottomley ata_scsi_port_error_handler(host, ap);
5610e0b494cSJames Bottomley
5620e0b494cSJames Bottomley /* finish or retry handled scmd's and clean up */
56372d8c36eSWei Fang WARN_ON(!list_empty(&eh_work_q));
5640e0b494cSJames Bottomley
5650e0b494cSJames Bottomley }
5660e0b494cSJames Bottomley
5670e0b494cSJames Bottomley /**
5680e0b494cSJames Bottomley * ata_scsi_cmd_error_handler - error callback for a list of commands
5690e0b494cSJames Bottomley * @host: scsi host containing the port
5700e0b494cSJames Bottomley * @ap: ATA port within the host
5710e0b494cSJames Bottomley * @eh_work_q: list of commands to process
5720e0b494cSJames Bottomley *
5730e0b494cSJames Bottomley * process the given list of commands and return those finished to the
5740e0b494cSJames Bottomley * ap->eh_done_q. This function is the first part of the libata error
5750e0b494cSJames Bottomley * handler which processes a given list of failed commands.
5760e0b494cSJames Bottomley */
ata_scsi_cmd_error_handler(struct Scsi_Host * host,struct ata_port * ap,struct list_head * eh_work_q)5770e0b494cSJames Bottomley void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
5780e0b494cSJames Bottomley struct list_head *eh_work_q)
5790e0b494cSJames Bottomley {
5800e0b494cSJames Bottomley int i;
5810e0b494cSJames Bottomley unsigned long flags;
582b83ad9eeSWenchao Hao struct scsi_cmnd *scmd, *tmp;
583b83ad9eeSWenchao Hao int nr_timedout = 0;
5840e0b494cSJames Bottomley
585c429137aSTejun Heo /* make sure sff pio task is not running */
586c429137aSTejun Heo ata_sff_flush_pio_task(ap);
587c6fd2807SJeff Garzik
588cca3974eSJeff Garzik /* synchronize with host lock and sort out timeouts */
589c6fd2807SJeff Garzik
590b83ad9eeSWenchao Hao /*
591ff8072d5SHannes Reinecke * For EH, all qcs are finished in one of three ways -
592c6fd2807SJeff Garzik * normal completion, error completion, and SCSI timeout.
593c96f1732SAlan Cox * Both completions can race against SCSI timeout. When normal
594c6fd2807SJeff Garzik * completion wins, the qc never reaches EH. When error
59587629312SNiklas Cassel * completion wins, the qc has ATA_QCFLAG_EH set.
596c6fd2807SJeff Garzik *
597c6fd2807SJeff Garzik * When SCSI timeout wins, things are a bit more complex.
598c6fd2807SJeff Garzik * Normal or error completion can occur after the timeout but
599c6fd2807SJeff Garzik * before this point. In such cases, both types of
600c6fd2807SJeff Garzik * completions are honored. A scmd is determined to have
601c6fd2807SJeff Garzik * timed out iff its associated qc is active and not failed.
602c6fd2807SJeff Garzik */
603a4f08141SPaul E. McKenney spin_lock_irqsave(ap->lock, flags);
604c6fd2807SJeff Garzik
605b83ad9eeSWenchao Hao /*
606b83ad9eeSWenchao Hao * This must occur under the ap->lock as we don't want
607b83ad9eeSWenchao Hao * a polled recovery to race the real interrupt handler
608b83ad9eeSWenchao Hao *
609b83ad9eeSWenchao Hao * The lost_interrupt handler checks for any completed but
610b83ad9eeSWenchao Hao * non-notified command and completes much like an IRQ handler.
611b83ad9eeSWenchao Hao *
612b83ad9eeSWenchao Hao * We then fall into the error recovery code which will treat
613b83ad9eeSWenchao Hao * this as if normal completion won the race
614b83ad9eeSWenchao Hao */
615c96f1732SAlan Cox if (ap->ops->lost_interrupt)
616c96f1732SAlan Cox ap->ops->lost_interrupt(ap);
617c96f1732SAlan Cox
6180e0b494cSJames Bottomley list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) {
619c6fd2807SJeff Garzik struct ata_queued_cmd *qc;
620c6fd2807SJeff Garzik
62127f113dcSNiklas Cassel /*
62227f113dcSNiklas Cassel * If the scmd was added to EH, via ata_qc_schedule_eh() ->
62327f113dcSNiklas Cassel * scsi_timeout() -> scsi_eh_scmd_add(), scsi_timeout() will
62427f113dcSNiklas Cassel * have set DID_TIME_OUT (since libata does not have an abort
62527f113dcSNiklas Cassel * handler). Thus, to clear DID_TIME_OUT, clear the host byte.
62627f113dcSNiklas Cassel */
62727f113dcSNiklas Cassel set_host_byte(scmd, DID_OK);
62827f113dcSNiklas Cassel
629258c4e5cSJens Axboe ata_qc_for_each_raw(ap, qc, i) {
630c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_ACTIVE &&
631c6fd2807SJeff Garzik qc->scsicmd == scmd)
632c6fd2807SJeff Garzik break;
633c6fd2807SJeff Garzik }
634c6fd2807SJeff Garzik
635c6fd2807SJeff Garzik if (i < ATA_MAX_QUEUE) {
636c6fd2807SJeff Garzik /* the scmd has an associated qc */
63787629312SNiklas Cassel if (!(qc->flags & ATA_QCFLAG_EH)) {
638c6fd2807SJeff Garzik /* which hasn't failed yet, timeout */
639*24f63812SNiklas Cassel set_host_byte(scmd, DID_TIME_OUT);
640c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_TIMEOUT;
64187629312SNiklas Cassel qc->flags |= ATA_QCFLAG_EH;
642c6fd2807SJeff Garzik nr_timedout++;
643c6fd2807SJeff Garzik }
644c6fd2807SJeff Garzik } else {
645c6fd2807SJeff Garzik /* Normal completion occurred after
646c6fd2807SJeff Garzik * SCSI timeout but before this point.
647c6fd2807SJeff Garzik * Successfully complete it.
648c6fd2807SJeff Garzik */
649c6fd2807SJeff Garzik scmd->retries = scmd->allowed;
650c6fd2807SJeff Garzik scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
651c6fd2807SJeff Garzik }
652c6fd2807SJeff Garzik }
653c6fd2807SJeff Garzik
654b83ad9eeSWenchao Hao /*
655b83ad9eeSWenchao Hao * If we have timed out qcs. They belong to EH from
656c6fd2807SJeff Garzik * this point but the state of the controller is
657c6fd2807SJeff Garzik * unknown. Freeze the port to make sure the IRQ
658c6fd2807SJeff Garzik * handler doesn't diddle with those qcs. This must
65987629312SNiklas Cassel * be done atomically w.r.t. setting ATA_QCFLAG_EH.
660c6fd2807SJeff Garzik */
661c6fd2807SJeff Garzik if (nr_timedout)
662c6fd2807SJeff Garzik __ata_port_freeze(ap);
663c6fd2807SJeff Garzik
664a1e10f7eSTejun Heo /* initialize eh_tries */
665a1e10f7eSTejun Heo ap->eh_tries = ATA_EH_MAX_TRIES;
666c6fd2807SJeff Garzik
667b83ad9eeSWenchao Hao spin_unlock_irqrestore(ap->lock, flags);
6680e0b494cSJames Bottomley }
6690e0b494cSJames Bottomley EXPORT_SYMBOL(ata_scsi_cmd_error_handler);
6700e0b494cSJames Bottomley
6710e0b494cSJames Bottomley /**
6720e0b494cSJames Bottomley * ata_scsi_port_error_handler - recover the port after the commands
6730e0b494cSJames Bottomley * @host: SCSI host containing the port
6740e0b494cSJames Bottomley * @ap: the ATA port
6750e0b494cSJames Bottomley *
6760e0b494cSJames Bottomley * Handle the recovery of the port @ap after all the commands
6770e0b494cSJames Bottomley * have been recovered.
6780e0b494cSJames Bottomley */
ata_scsi_port_error_handler(struct Scsi_Host * host,struct ata_port * ap)6790e0b494cSJames Bottomley void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
6800e0b494cSJames Bottomley {
6810e0b494cSJames Bottomley unsigned long flags;
682cf1b86c8STejun Heo struct ata_link *link;
683cf1b86c8STejun Heo
684c0c362b6STejun Heo /* acquire EH ownership */
685c0c362b6STejun Heo ata_eh_acquire(ap);
686c0c362b6STejun Heo repeat:
6875ddf24c5STejun Heo /* kill fast drain timer */
6885ddf24c5STejun Heo del_timer_sync(&ap->fastdrain_timer);
6895ddf24c5STejun Heo
690c6fd2807SJeff Garzik /* process port resume request */
691c6fd2807SJeff Garzik ata_eh_handle_port_resume(ap);
692c6fd2807SJeff Garzik
693c6fd2807SJeff Garzik /* fetch & clear EH info */
694c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags);
695c6fd2807SJeff Garzik
6961eca4365STejun Heo ata_for_each_link(link, ap, HOST_FIRST) {
69700115e0fSTejun Heo struct ata_eh_context *ehc = &link->eh_context;
69800115e0fSTejun Heo struct ata_device *dev;
69900115e0fSTejun Heo
700cf1b86c8STejun Heo memset(&link->eh_context, 0, sizeof(link->eh_context));
701cf1b86c8STejun Heo link->eh_context.i = link->eh_info;
702cf1b86c8STejun Heo memset(&link->eh_info, 0, sizeof(link->eh_info));
70300115e0fSTejun Heo
7041eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) {
70500115e0fSTejun Heo int devno = dev->devno;
70600115e0fSTejun Heo
70700115e0fSTejun Heo ehc->saved_xfer_mode[devno] = dev->xfer_mode;
70800115e0fSTejun Heo if (ata_ncq_enabled(dev))
70900115e0fSTejun Heo ehc->saved_ncq_enabled |= 1 << devno;
710aa3998dbSDamien Le Moal
711aa3998dbSDamien Le Moal /* If we are resuming, wake up the device */
712a1f506afSDamien Le Moal if (ap->pflags & ATA_PFLAG_RESUMING) {
713a1f506afSDamien Le Moal dev->flags |= ATA_DFLAG_RESUMING;
714aa3998dbSDamien Le Moal ehc->i.dev_action[devno] |= ATA_EH_SET_ACTIVE;
71500115e0fSTejun Heo }
716cf1b86c8STejun Heo }
717a1f506afSDamien Le Moal }
718c6fd2807SJeff Garzik
719c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
720c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_EH_PENDING;
721da917d69STejun Heo ap->excl_link = NULL; /* don't maintain exclusion over EH */
722c6fd2807SJeff Garzik
723c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags);
724c6fd2807SJeff Garzik
725c6fd2807SJeff Garzik /* invoke EH, skip if unloading or suspended */
726c6fd2807SJeff Garzik if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
727c6fd2807SJeff Garzik ap->ops->error_handler(ap);
728ece180d1STejun Heo else {
729ece180d1STejun Heo /* if unloading, commence suicide */
730ece180d1STejun Heo if ((ap->pflags & ATA_PFLAG_UNLOADING) &&
731ece180d1STejun Heo !(ap->pflags & ATA_PFLAG_UNLOADED))
732ece180d1STejun Heo ata_eh_unload(ap);
733c6fd2807SJeff Garzik ata_eh_finish(ap);
734ece180d1STejun Heo }
735c6fd2807SJeff Garzik
736c6fd2807SJeff Garzik /* process port suspend request */
737c6fd2807SJeff Garzik ata_eh_handle_port_suspend(ap);
738c6fd2807SJeff Garzik
739ff8072d5SHannes Reinecke /*
740ff8072d5SHannes Reinecke * Exception might have happened after ->error_handler recovered the
741ff8072d5SHannes Reinecke * port but before this point. Repeat EH in such case.
742c6fd2807SJeff Garzik */
743c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags);
744c6fd2807SJeff Garzik
745c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_EH_PENDING) {
746a1e10f7eSTejun Heo if (--ap->eh_tries) {
747c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags);
748c6fd2807SJeff Garzik goto repeat;
749c6fd2807SJeff Garzik }
750a9a79dfeSJoe Perches ata_port_err(ap,
751a9a79dfeSJoe Perches "EH pending after %d tries, giving up\n",
752a9a79dfeSJoe Perches ATA_EH_MAX_TRIES);
753914616a3STejun Heo ap->pflags &= ~ATA_PFLAG_EH_PENDING;
754c6fd2807SJeff Garzik }
755c6fd2807SJeff Garzik
756c6fd2807SJeff Garzik /* this run is complete, make sure EH info is clear */
7571eca4365STejun Heo ata_for_each_link(link, ap, HOST_FIRST)
758cf1b86c8STejun Heo memset(&link->eh_info, 0, sizeof(link->eh_info));
759c6fd2807SJeff Garzik
760ff8072d5SHannes Reinecke /*
761ff8072d5SHannes Reinecke * end eh (clear host_eh_scheduled) while holding ap->lock such that if
762ff8072d5SHannes Reinecke * exception occurs after this point but before EH completion, SCSI
763ff8072d5SHannes Reinecke * midlayer will re-initiate EH.
764c6fd2807SJeff Garzik */
765e4a9c373SDan Williams ap->ops->end_eh(ap);
766c6fd2807SJeff Garzik
767c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags);
768c0c362b6STejun Heo ata_eh_release(ap);
769c6fd2807SJeff Garzik
770c6fd2807SJeff Garzik scsi_eh_flush_done_q(&ap->eh_done_q);
771c6fd2807SJeff Garzik
772c6fd2807SJeff Garzik /* clean up */
773c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags);
774c6fd2807SJeff Garzik
775aa3998dbSDamien Le Moal ap->pflags &= ~ATA_PFLAG_RESUMING;
776aa3998dbSDamien Le Moal
777c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_LOADING)
778c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_LOADING;
7796f54120eSJason Yan else if ((ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) &&
7806f54120eSJason Yan !(ap->flags & ATA_FLAG_SAS_HOST))
781ad72cf98STejun Heo schedule_delayed_work(&ap->hotplug_task, 0);
782c6fd2807SJeff Garzik
783c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_RECOVERED)
784a9a79dfeSJoe Perches ata_port_info(ap, "EH complete\n");
785c6fd2807SJeff Garzik
786c6fd2807SJeff Garzik ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
787c6fd2807SJeff Garzik
788c6fd2807SJeff Garzik /* tell wait_eh that we're done */
789c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
790c6fd2807SJeff Garzik wake_up_all(&ap->eh_wait_q);
791c6fd2807SJeff Garzik
792c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags);
793c6fd2807SJeff Garzik }
7940e0b494cSJames Bottomley EXPORT_SYMBOL_GPL(ata_scsi_port_error_handler);
795c6fd2807SJeff Garzik
796c6fd2807SJeff Garzik /**
797c6fd2807SJeff Garzik * ata_port_wait_eh - Wait for the currently pending EH to complete
798c6fd2807SJeff Garzik * @ap: Port to wait EH for
799c6fd2807SJeff Garzik *
800c6fd2807SJeff Garzik * Wait until the currently pending EH is complete.
801c6fd2807SJeff Garzik *
802c6fd2807SJeff Garzik * LOCKING:
803c6fd2807SJeff Garzik * Kernel thread context (may sleep).
804c6fd2807SJeff Garzik */
ata_port_wait_eh(struct ata_port * ap)805c6fd2807SJeff Garzik void ata_port_wait_eh(struct ata_port *ap)
806c6fd2807SJeff Garzik {
807c6fd2807SJeff Garzik unsigned long flags;
808c6fd2807SJeff Garzik DEFINE_WAIT(wait);
809c6fd2807SJeff Garzik
810c6fd2807SJeff Garzik retry:
811c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags);
812c6fd2807SJeff Garzik
813c6fd2807SJeff Garzik while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
814c6fd2807SJeff Garzik prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
815c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags);
816c6fd2807SJeff Garzik schedule();
817c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags);
818c6fd2807SJeff Garzik }
819c6fd2807SJeff Garzik finish_wait(&ap->eh_wait_q, &wait);
820c6fd2807SJeff Garzik
821c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags);
822c6fd2807SJeff Garzik
823c6fd2807SJeff Garzik /* make sure SCSI EH is complete */
824cca3974eSJeff Garzik if (scsi_host_in_recovery(ap->scsi_host)) {
82597750cebSTejun Heo ata_msleep(ap, 10);
826c6fd2807SJeff Garzik goto retry;
827c6fd2807SJeff Garzik }
828c6fd2807SJeff Garzik }
82981c757bcSDan Williams EXPORT_SYMBOL_GPL(ata_port_wait_eh);
830c6fd2807SJeff Garzik
ata_eh_nr_in_flight(struct ata_port * ap)831afae461aSSergey Shtylyov static unsigned int ata_eh_nr_in_flight(struct ata_port *ap)
8325ddf24c5STejun Heo {
833258c4e5cSJens Axboe struct ata_queued_cmd *qc;
8345ddf24c5STejun Heo unsigned int tag;
835afae461aSSergey Shtylyov unsigned int nr = 0;
8365ddf24c5STejun Heo
8375ddf24c5STejun Heo /* count only non-internal commands */
838258c4e5cSJens Axboe ata_qc_for_each(ap, qc, tag) {
839258c4e5cSJens Axboe if (qc)
8405ddf24c5STejun Heo nr++;
8419d207accSJens Axboe }
8425ddf24c5STejun Heo
8435ddf24c5STejun Heo return nr;
8445ddf24c5STejun Heo }
8455ddf24c5STejun Heo
ata_eh_fastdrain_timerfn(struct timer_list * t)846b93ab338SKees Cook void ata_eh_fastdrain_timerfn(struct timer_list *t)
8475ddf24c5STejun Heo {
848b93ab338SKees Cook struct ata_port *ap = from_timer(ap, t, fastdrain_timer);
8495ddf24c5STejun Heo unsigned long flags;
850afae461aSSergey Shtylyov unsigned int cnt;
8515ddf24c5STejun Heo
8525ddf24c5STejun Heo spin_lock_irqsave(ap->lock, flags);
8535ddf24c5STejun Heo
8545ddf24c5STejun Heo cnt = ata_eh_nr_in_flight(ap);
8555ddf24c5STejun Heo
8565ddf24c5STejun Heo /* are we done? */
8575ddf24c5STejun Heo if (!cnt)
8585ddf24c5STejun Heo goto out_unlock;
8595ddf24c5STejun Heo
8605ddf24c5STejun Heo if (cnt == ap->fastdrain_cnt) {
861258c4e5cSJens Axboe struct ata_queued_cmd *qc;
8625ddf24c5STejun Heo unsigned int tag;
8635ddf24c5STejun Heo
8645ddf24c5STejun Heo /* No progress during the last interval, tag all
8655ddf24c5STejun Heo * in-flight qcs as timed out and freeze the port.
8665ddf24c5STejun Heo */
867258c4e5cSJens Axboe ata_qc_for_each(ap, qc, tag) {
8685ddf24c5STejun Heo if (qc)
8695ddf24c5STejun Heo qc->err_mask |= AC_ERR_TIMEOUT;
8705ddf24c5STejun Heo }
8715ddf24c5STejun Heo
8725ddf24c5STejun Heo ata_port_freeze(ap);
8735ddf24c5STejun Heo } else {
8745ddf24c5STejun Heo /* some qcs have finished, give it another chance */
8755ddf24c5STejun Heo ap->fastdrain_cnt = cnt;
8765ddf24c5STejun Heo ap->fastdrain_timer.expires =
877341c2c95STejun Heo ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
8785ddf24c5STejun Heo add_timer(&ap->fastdrain_timer);
8795ddf24c5STejun Heo }
8805ddf24c5STejun Heo
8815ddf24c5STejun Heo out_unlock:
8825ddf24c5STejun Heo spin_unlock_irqrestore(ap->lock, flags);
8835ddf24c5STejun Heo }
8845ddf24c5STejun Heo
8855ddf24c5STejun Heo /**
8865ddf24c5STejun Heo * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
8875ddf24c5STejun Heo * @ap: target ATA port
8885ddf24c5STejun Heo * @fastdrain: activate fast drain
8895ddf24c5STejun Heo *
8905ddf24c5STejun Heo * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain
8915ddf24c5STejun Heo * is non-zero and EH wasn't pending before. Fast drain ensures
8925ddf24c5STejun Heo * that EH kicks in in timely manner.
8935ddf24c5STejun Heo *
8945ddf24c5STejun Heo * LOCKING:
8955ddf24c5STejun Heo * spin_lock_irqsave(host lock)
8965ddf24c5STejun Heo */
ata_eh_set_pending(struct ata_port * ap,int fastdrain)8975ddf24c5STejun Heo static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
8985ddf24c5STejun Heo {
899afae461aSSergey Shtylyov unsigned int cnt;
9005ddf24c5STejun Heo
9015ddf24c5STejun Heo /* already scheduled? */
9025ddf24c5STejun Heo if (ap->pflags & ATA_PFLAG_EH_PENDING)
9035ddf24c5STejun Heo return;
9045ddf24c5STejun Heo
9055ddf24c5STejun Heo ap->pflags |= ATA_PFLAG_EH_PENDING;
9065ddf24c5STejun Heo
9075ddf24c5STejun Heo if (!fastdrain)
9085ddf24c5STejun Heo return;
9095ddf24c5STejun Heo
9105ddf24c5STejun Heo /* do we have in-flight qcs? */
9115ddf24c5STejun Heo cnt = ata_eh_nr_in_flight(ap);
9125ddf24c5STejun Heo if (!cnt)
9135ddf24c5STejun Heo return;
9145ddf24c5STejun Heo
9155ddf24c5STejun Heo /* activate fast drain */
9165ddf24c5STejun Heo ap->fastdrain_cnt = cnt;
917341c2c95STejun Heo ap->fastdrain_timer.expires =
918341c2c95STejun Heo ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
9195ddf24c5STejun Heo add_timer(&ap->fastdrain_timer);
9205ddf24c5STejun Heo }
9215ddf24c5STejun Heo
922c6fd2807SJeff Garzik /**
923c6fd2807SJeff Garzik * ata_qc_schedule_eh - schedule qc for error handling
924c6fd2807SJeff Garzik * @qc: command to schedule error handling for
925c6fd2807SJeff Garzik *
926c6fd2807SJeff Garzik * Schedule error handling for @qc. EH will kick in as soon as
927c6fd2807SJeff Garzik * other commands are drained.
928c6fd2807SJeff Garzik *
929c6fd2807SJeff Garzik * LOCKING:
930cca3974eSJeff Garzik * spin_lock_irqsave(host lock)
931c6fd2807SJeff Garzik */
ata_qc_schedule_eh(struct ata_queued_cmd * qc)932c6fd2807SJeff Garzik void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
933c6fd2807SJeff Garzik {
934c6fd2807SJeff Garzik struct ata_port *ap = qc->ap;
935c6fd2807SJeff Garzik
93687629312SNiklas Cassel qc->flags |= ATA_QCFLAG_EH;
9375ddf24c5STejun Heo ata_eh_set_pending(ap, 1);
938c6fd2807SJeff Garzik
939c6fd2807SJeff Garzik /* The following will fail if timeout has already expired.
940c6fd2807SJeff Garzik * ata_scsi_error() takes care of such scmds on EH entry.
94187629312SNiklas Cassel * Note that ATA_QCFLAG_EH is unconditionally set after
942c6fd2807SJeff Garzik * this function completes.
943c6fd2807SJeff Garzik */
944c8329cd5SBart Van Assche blk_abort_request(scsi_cmd_to_rq(qc->scsicmd));
945c6fd2807SJeff Garzik }
946c6fd2807SJeff Garzik
947c6fd2807SJeff Garzik /**
948e4a9c373SDan Williams * ata_std_sched_eh - non-libsas ata_ports issue eh with this common routine
949e4a9c373SDan Williams * @ap: ATA port to schedule EH for
950e4a9c373SDan Williams *
951e4a9c373SDan Williams * LOCKING: inherited from ata_port_schedule_eh
952e4a9c373SDan Williams * spin_lock_irqsave(host lock)
953e4a9c373SDan Williams */
ata_std_sched_eh(struct ata_port * ap)954e4a9c373SDan Williams void ata_std_sched_eh(struct ata_port *ap)
955e4a9c373SDan Williams {
956e4a9c373SDan Williams if (ap->pflags & ATA_PFLAG_INITIALIZING)
957e4a9c373SDan Williams return;
958e4a9c373SDan Williams
959e4a9c373SDan Williams ata_eh_set_pending(ap, 1);
960e4a9c373SDan Williams scsi_schedule_eh(ap->scsi_host);
961e4a9c373SDan Williams
962c318458cSHannes Reinecke trace_ata_std_sched_eh(ap);
963e4a9c373SDan Williams }
964e4a9c373SDan Williams EXPORT_SYMBOL_GPL(ata_std_sched_eh);
965e4a9c373SDan Williams
966e4a9c373SDan Williams /**
967e4a9c373SDan Williams * ata_std_end_eh - non-libsas ata_ports complete eh with this common routine
968e4a9c373SDan Williams * @ap: ATA port to end EH for
969e4a9c373SDan Williams *
970e4a9c373SDan Williams * In the libata object model there is a 1:1 mapping of ata_port to
971e4a9c373SDan Williams * shost, so host fields can be directly manipulated under ap->lock, in
972e4a9c373SDan Williams * the libsas case we need to hold a lock at the ha->level to coordinate
973e4a9c373SDan Williams * these events.
974e4a9c373SDan Williams *
975e4a9c373SDan Williams * LOCKING:
976e4a9c373SDan Williams * spin_lock_irqsave(host lock)
977e4a9c373SDan Williams */
ata_std_end_eh(struct ata_port * ap)978e4a9c373SDan Williams void ata_std_end_eh(struct ata_port *ap)
979e4a9c373SDan Williams {
980e4a9c373SDan Williams struct Scsi_Host *host = ap->scsi_host;
981e4a9c373SDan Williams
982e4a9c373SDan Williams host->host_eh_scheduled = 0;
983e4a9c373SDan Williams }
984e4a9c373SDan Williams EXPORT_SYMBOL(ata_std_end_eh);
985e4a9c373SDan Williams
986e4a9c373SDan Williams
987e4a9c373SDan Williams /**
988c6fd2807SJeff Garzik * ata_port_schedule_eh - schedule error handling without a qc
989c6fd2807SJeff Garzik * @ap: ATA port to schedule EH for
990c6fd2807SJeff Garzik *
991c6fd2807SJeff Garzik * Schedule error handling for @ap. EH will kick in as soon as
992c6fd2807SJeff Garzik * all commands are drained.
993c6fd2807SJeff Garzik *
994c6fd2807SJeff Garzik * LOCKING:
995cca3974eSJeff Garzik * spin_lock_irqsave(host lock)
996c6fd2807SJeff Garzik */
ata_port_schedule_eh(struct ata_port * ap)997c6fd2807SJeff Garzik void ata_port_schedule_eh(struct ata_port *ap)
998c6fd2807SJeff Garzik {
999e4a9c373SDan Williams /* see: ata_std_sched_eh, unless you know better */
1000e4a9c373SDan Williams ap->ops->sched_eh(ap);
1001c6fd2807SJeff Garzik }
1002a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
1003c6fd2807SJeff Garzik
ata_do_link_abort(struct ata_port * ap,struct ata_link * link)1004dbd82616STejun Heo static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
1005c6fd2807SJeff Garzik {
1006258c4e5cSJens Axboe struct ata_queued_cmd *qc;
1007c6fd2807SJeff Garzik int tag, nr_aborted = 0;
1008c6fd2807SJeff Garzik
10095ddf24c5STejun Heo /* we're gonna abort all commands, no need for fast drain */
10105ddf24c5STejun Heo ata_eh_set_pending(ap, 0);
10115ddf24c5STejun Heo
101228361c40SJens Axboe /* include internal tag in iteration */
1013258c4e5cSJens Axboe ata_qc_for_each_with_internal(ap, qc, tag) {
1014dbd82616STejun Heo if (qc && (!link || qc->dev->link == link)) {
101587629312SNiklas Cassel qc->flags |= ATA_QCFLAG_EH;
1016c6fd2807SJeff Garzik ata_qc_complete(qc);
1017c6fd2807SJeff Garzik nr_aborted++;
1018c6fd2807SJeff Garzik }
1019c6fd2807SJeff Garzik }
1020c6fd2807SJeff Garzik
1021c6fd2807SJeff Garzik if (!nr_aborted)
1022c6fd2807SJeff Garzik ata_port_schedule_eh(ap);
1023c6fd2807SJeff Garzik
1024c6fd2807SJeff Garzik return nr_aborted;
1025c6fd2807SJeff Garzik }
1026c6fd2807SJeff Garzik
1027c6fd2807SJeff Garzik /**
1028dbd82616STejun Heo * ata_link_abort - abort all qc's on the link
1029dbd82616STejun Heo * @link: ATA link to abort qc's for
1030dbd82616STejun Heo *
1031dbd82616STejun Heo * Abort all active qc's active on @link and schedule EH.
1032dbd82616STejun Heo *
1033dbd82616STejun Heo * LOCKING:
1034dbd82616STejun Heo * spin_lock_irqsave(host lock)
1035dbd82616STejun Heo *
1036dbd82616STejun Heo * RETURNS:
1037dbd82616STejun Heo * Number of aborted qc's.
1038dbd82616STejun Heo */
ata_link_abort(struct ata_link * link)1039dbd82616STejun Heo int ata_link_abort(struct ata_link *link)
1040dbd82616STejun Heo {
1041dbd82616STejun Heo return ata_do_link_abort(link->ap, link);
1042dbd82616STejun Heo }
1043a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_link_abort);
1044dbd82616STejun Heo
1045dbd82616STejun Heo /**
1046dbd82616STejun Heo * ata_port_abort - abort all qc's on the port
1047dbd82616STejun Heo * @ap: ATA port to abort qc's for
1048dbd82616STejun Heo *
1049dbd82616STejun Heo * Abort all active qc's of @ap and schedule EH.
1050dbd82616STejun Heo *
1051dbd82616STejun Heo * LOCKING:
1052dbd82616STejun Heo * spin_lock_irqsave(host_set lock)
1053dbd82616STejun Heo *
1054dbd82616STejun Heo * RETURNS:
1055dbd82616STejun Heo * Number of aborted qc's.
1056dbd82616STejun Heo */
ata_port_abort(struct ata_port * ap)1057dbd82616STejun Heo int ata_port_abort(struct ata_port *ap)
1058dbd82616STejun Heo {
1059dbd82616STejun Heo return ata_do_link_abort(ap, NULL);
1060dbd82616STejun Heo }
1061a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_port_abort);
1062dbd82616STejun Heo
1063dbd82616STejun Heo /**
1064c6fd2807SJeff Garzik * __ata_port_freeze - freeze port
1065c6fd2807SJeff Garzik * @ap: ATA port to freeze
1066c6fd2807SJeff Garzik *
1067c6fd2807SJeff Garzik * This function is called when HSM violation or some other
1068c6fd2807SJeff Garzik * condition disrupts normal operation of the port. Frozen port
1069c6fd2807SJeff Garzik * is not allowed to perform any operation until the port is
1070c6fd2807SJeff Garzik * thawed, which usually follows a successful reset.
1071c6fd2807SJeff Garzik *
1072c6fd2807SJeff Garzik * ap->ops->freeze() callback can be used for freezing the port
1073c6fd2807SJeff Garzik * hardware-wise (e.g. mask interrupt and stop DMA engine). If a
1074c6fd2807SJeff Garzik * port cannot be frozen hardware-wise, the interrupt handler
1075c6fd2807SJeff Garzik * must ack and clear interrupts unconditionally while the port
1076c6fd2807SJeff Garzik * is frozen.
1077c6fd2807SJeff Garzik *
1078c6fd2807SJeff Garzik * LOCKING:
1079cca3974eSJeff Garzik * spin_lock_irqsave(host lock)
1080c6fd2807SJeff Garzik */
__ata_port_freeze(struct ata_port * ap)1081c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap)
1082c6fd2807SJeff Garzik {
1083c6fd2807SJeff Garzik if (ap->ops->freeze)
1084c6fd2807SJeff Garzik ap->ops->freeze(ap);
1085c6fd2807SJeff Garzik
1086c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_FROZEN;
1087c6fd2807SJeff Garzik
1088c318458cSHannes Reinecke trace_ata_port_freeze(ap);
1089c6fd2807SJeff Garzik }
1090c6fd2807SJeff Garzik
1091c6fd2807SJeff Garzik /**
1092c6fd2807SJeff Garzik * ata_port_freeze - abort & freeze port
1093c6fd2807SJeff Garzik * @ap: ATA port to freeze
1094c6fd2807SJeff Garzik *
109554c38444SJeff Garzik * Abort and freeze @ap. The freeze operation must be called
109654c38444SJeff Garzik * first, because some hardware requires special operations
109754c38444SJeff Garzik * before the taskfile registers are accessible.
1098c6fd2807SJeff Garzik *
1099c6fd2807SJeff Garzik * LOCKING:
1100cca3974eSJeff Garzik * spin_lock_irqsave(host lock)
1101c6fd2807SJeff Garzik *
1102c6fd2807SJeff Garzik * RETURNS:
1103c6fd2807SJeff Garzik * Number of aborted commands.
1104c6fd2807SJeff Garzik */
ata_port_freeze(struct ata_port * ap)1105c6fd2807SJeff Garzik int ata_port_freeze(struct ata_port *ap)
1106c6fd2807SJeff Garzik {
1107c6fd2807SJeff Garzik __ata_port_freeze(ap);
1108c6fd2807SJeff Garzik
1109cb6e73aaSye xingchen return ata_port_abort(ap);
1110c6fd2807SJeff Garzik }
1111a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_port_freeze);
1112c6fd2807SJeff Garzik
1113c6fd2807SJeff Garzik /**
1114c6fd2807SJeff Garzik * ata_eh_freeze_port - EH helper to freeze port
1115c6fd2807SJeff Garzik * @ap: ATA port to freeze
1116c6fd2807SJeff Garzik *
1117c6fd2807SJeff Garzik * Freeze @ap.
1118c6fd2807SJeff Garzik *
1119c6fd2807SJeff Garzik * LOCKING:
1120c6fd2807SJeff Garzik * None.
1121c6fd2807SJeff Garzik */
ata_eh_freeze_port(struct ata_port * ap)1122c6fd2807SJeff Garzik void ata_eh_freeze_port(struct ata_port *ap)
1123c6fd2807SJeff Garzik {
1124c6fd2807SJeff Garzik unsigned long flags;
1125c6fd2807SJeff Garzik
1126c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags);
1127c6fd2807SJeff Garzik __ata_port_freeze(ap);
1128c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags);
1129c6fd2807SJeff Garzik }
1130a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
1131c6fd2807SJeff Garzik
1132c6fd2807SJeff Garzik /**
113394bd5719SMauro Carvalho Chehab * ata_eh_thaw_port - EH helper to thaw port
1134c6fd2807SJeff Garzik * @ap: ATA port to thaw
1135c6fd2807SJeff Garzik *
1136c6fd2807SJeff Garzik * Thaw frozen port @ap.
1137c6fd2807SJeff Garzik *
1138c6fd2807SJeff Garzik * LOCKING:
1139c6fd2807SJeff Garzik * None.
1140c6fd2807SJeff Garzik */
ata_eh_thaw_port(struct ata_port * ap)1141c6fd2807SJeff Garzik void ata_eh_thaw_port(struct ata_port *ap)
1142c6fd2807SJeff Garzik {
1143c6fd2807SJeff Garzik unsigned long flags;
1144c6fd2807SJeff Garzik
1145c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags);
1146c6fd2807SJeff Garzik
1147c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_FROZEN;
1148c6fd2807SJeff Garzik
1149c6fd2807SJeff Garzik if (ap->ops->thaw)
1150c6fd2807SJeff Garzik ap->ops->thaw(ap);
1151c6fd2807SJeff Garzik
1152c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags);
1153c6fd2807SJeff Garzik
1154c318458cSHannes Reinecke trace_ata_port_thaw(ap);
1155c6fd2807SJeff Garzik }
1156c6fd2807SJeff Garzik
ata_eh_scsidone(struct scsi_cmnd * scmd)1157c6fd2807SJeff Garzik static void ata_eh_scsidone(struct scsi_cmnd *scmd)
1158c6fd2807SJeff Garzik {
1159c6fd2807SJeff Garzik /* nada */
1160c6fd2807SJeff Garzik }
1161c6fd2807SJeff Garzik
__ata_eh_qc_complete(struct ata_queued_cmd * qc)1162c6fd2807SJeff Garzik static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
1163c6fd2807SJeff Garzik {
1164c6fd2807SJeff Garzik struct ata_port *ap = qc->ap;
1165c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd;
1166c6fd2807SJeff Garzik unsigned long flags;
1167c6fd2807SJeff Garzik
1168c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags);
1169c6fd2807SJeff Garzik qc->scsidone = ata_eh_scsidone;
1170c6fd2807SJeff Garzik __ata_qc_complete(qc);
1171c6fd2807SJeff Garzik WARN_ON(ata_tag_valid(qc->tag));
1172c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags);
1173c6fd2807SJeff Garzik
1174c6fd2807SJeff Garzik scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
1175c6fd2807SJeff Garzik }
1176c6fd2807SJeff Garzik
1177c6fd2807SJeff Garzik /**
1178c6fd2807SJeff Garzik * ata_eh_qc_complete - Complete an active ATA command from EH
1179c6fd2807SJeff Garzik * @qc: Command to complete
1180c6fd2807SJeff Garzik *
1181c6fd2807SJeff Garzik * Indicate to the mid and upper layers that an ATA command has
1182c6fd2807SJeff Garzik * completed. To be used from EH.
1183c6fd2807SJeff Garzik */
ata_eh_qc_complete(struct ata_queued_cmd * qc)1184c6fd2807SJeff Garzik void ata_eh_qc_complete(struct ata_queued_cmd *qc)
1185c6fd2807SJeff Garzik {
1186c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd;
1187c6fd2807SJeff Garzik scmd->retries = scmd->allowed;
1188c6fd2807SJeff Garzik __ata_eh_qc_complete(qc);
1189c6fd2807SJeff Garzik }
1190c6fd2807SJeff Garzik
1191c6fd2807SJeff Garzik /**
1192c6fd2807SJeff Garzik * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
1193c6fd2807SJeff Garzik * @qc: Command to retry
1194c6fd2807SJeff Garzik *
1195c6fd2807SJeff Garzik * Indicate to the mid and upper layers that an ATA command
1196c6fd2807SJeff Garzik * should be retried. To be used from EH.
1197c6fd2807SJeff Garzik *
1198c6fd2807SJeff Garzik * SCSI midlayer limits the number of retries to scmd->allowed.
1199f13e2201SGwendal Grignou * scmd->allowed is incremented for commands which get retried
1200c6fd2807SJeff Garzik * due to unrelated failures (qc->err_mask is zero).
1201c6fd2807SJeff Garzik */
ata_eh_qc_retry(struct ata_queued_cmd * qc)1202c6fd2807SJeff Garzik void ata_eh_qc_retry(struct ata_queued_cmd *qc)
1203c6fd2807SJeff Garzik {
1204c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd;
1205f13e2201SGwendal Grignou if (!qc->err_mask)
1206f13e2201SGwendal Grignou scmd->allowed++;
1207c6fd2807SJeff Garzik __ata_eh_qc_complete(qc);
1208c6fd2807SJeff Garzik }
1209c6fd2807SJeff Garzik
1210c6fd2807SJeff Garzik /**
1211678afac6STejun Heo * ata_dev_disable - disable ATA device
1212678afac6STejun Heo * @dev: ATA device to disable
1213678afac6STejun Heo *
1214678afac6STejun Heo * Disable @dev.
1215678afac6STejun Heo *
1216678afac6STejun Heo * Locking:
1217678afac6STejun Heo * EH context.
1218678afac6STejun Heo */
ata_dev_disable(struct ata_device * dev)1219678afac6STejun Heo void ata_dev_disable(struct ata_device *dev)
1220678afac6STejun Heo {
1221678afac6STejun Heo if (!ata_dev_enabled(dev))
1222678afac6STejun Heo return;
1223678afac6STejun Heo
12241c95a27cSHannes Reinecke ata_dev_warn(dev, "disable device\n");
1225678afac6STejun Heo ata_acpi_on_disable(dev);
1226678afac6STejun Heo ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET);
1227678afac6STejun Heo dev->class++;
122899cf610aSTejun Heo
122999cf610aSTejun Heo /* From now till the next successful probe, ering is used to
123099cf610aSTejun Heo * track probe failures. Clear accumulated device error info.
123199cf610aSTejun Heo */
123299cf610aSTejun Heo ata_ering_clear(&dev->ering);
1233678afac6STejun Heo }
1234a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_dev_disable);
1235678afac6STejun Heo
1236678afac6STejun Heo /**
1237c6fd2807SJeff Garzik * ata_eh_detach_dev - detach ATA device
1238c6fd2807SJeff Garzik * @dev: ATA device to detach
1239c6fd2807SJeff Garzik *
1240c6fd2807SJeff Garzik * Detach @dev.
1241c6fd2807SJeff Garzik *
1242c6fd2807SJeff Garzik * LOCKING:
1243c6fd2807SJeff Garzik * None.
1244c6fd2807SJeff Garzik */
ata_eh_detach_dev(struct ata_device * dev)1245fb7fd614STejun Heo void ata_eh_detach_dev(struct ata_device *dev)
1246c6fd2807SJeff Garzik {
1247f58229f8STejun Heo struct ata_link *link = dev->link;
1248f58229f8STejun Heo struct ata_port *ap = link->ap;
124990484ebfSTejun Heo struct ata_eh_context *ehc = &link->eh_context;
1250c6fd2807SJeff Garzik unsigned long flags;
1251c6fd2807SJeff Garzik
1252aa3998dbSDamien Le Moal /*
1253aa3998dbSDamien Le Moal * If the device is still enabled, transition it to standby power mode
1254aa3998dbSDamien Le Moal * (i.e. spin down HDDs).
1255aa3998dbSDamien Le Moal */
1256aa3998dbSDamien Le Moal if (ata_dev_enabled(dev))
1257aa3998dbSDamien Le Moal ata_dev_power_set_standby(dev);
1258aa3998dbSDamien Le Moal
1259c6fd2807SJeff Garzik ata_dev_disable(dev);
1260c6fd2807SJeff Garzik
1261c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags);
1262c6fd2807SJeff Garzik
1263c6fd2807SJeff Garzik dev->flags &= ~ATA_DFLAG_DETACH;
1264c6fd2807SJeff Garzik
1265c6fd2807SJeff Garzik if (ata_scsi_offline_dev(dev)) {
1266c6fd2807SJeff Garzik dev->flags |= ATA_DFLAG_DETACHED;
1267c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
1268c6fd2807SJeff Garzik }
1269c6fd2807SJeff Garzik
127090484ebfSTejun Heo /* clear per-dev EH info */
1271f58229f8STejun Heo ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK);
1272f58229f8STejun Heo ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK);
127390484ebfSTejun Heo ehc->saved_xfer_mode[dev->devno] = 0;
127490484ebfSTejun Heo ehc->saved_ncq_enabled &= ~(1 << dev->devno);
1275c6fd2807SJeff Garzik
1276c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags);
1277c6fd2807SJeff Garzik }
1278c6fd2807SJeff Garzik
1279c6fd2807SJeff Garzik /**
1280c6fd2807SJeff Garzik * ata_eh_about_to_do - about to perform eh_action
1281955e57dfSTejun Heo * @link: target ATA link
1282c6fd2807SJeff Garzik * @dev: target ATA dev for per-dev action (can be NULL)
1283c6fd2807SJeff Garzik * @action: action about to be performed
1284c6fd2807SJeff Garzik *
1285c6fd2807SJeff Garzik * Called just before performing EH actions to clear related bits
1286955e57dfSTejun Heo * in @link->eh_info such that eh actions are not unnecessarily
1287955e57dfSTejun Heo * repeated.
1288c6fd2807SJeff Garzik *
1289c6fd2807SJeff Garzik * LOCKING:
1290c6fd2807SJeff Garzik * None.
1291c6fd2807SJeff Garzik */
ata_eh_about_to_do(struct ata_link * link,struct ata_device * dev,unsigned int action)1292fb7fd614STejun Heo void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
1293c6fd2807SJeff Garzik unsigned int action)
1294c6fd2807SJeff Garzik {
1295955e57dfSTejun Heo struct ata_port *ap = link->ap;
1296955e57dfSTejun Heo struct ata_eh_info *ehi = &link->eh_info;
1297955e57dfSTejun Heo struct ata_eh_context *ehc = &link->eh_context;
1298c6fd2807SJeff Garzik unsigned long flags;
1299c6fd2807SJeff Garzik
1300c318458cSHannes Reinecke trace_ata_eh_about_to_do(link, dev ? dev->devno : 0, action);
1301c318458cSHannes Reinecke
1302c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags);
1303c6fd2807SJeff Garzik
1304955e57dfSTejun Heo ata_eh_clear_action(link, dev, ehi, action);
1305c6fd2807SJeff Garzik
1306a568d1d2STejun Heo /* About to take EH action, set RECOVERED. Ignore actions on
1307a568d1d2STejun Heo * slave links as master will do them again.
1308a568d1d2STejun Heo */
1309a568d1d2STejun Heo if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link)
1310c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_RECOVERED;
1311c6fd2807SJeff Garzik
1312c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags);
1313c6fd2807SJeff Garzik }
1314c6fd2807SJeff Garzik
1315c6fd2807SJeff Garzik /**
1316c6fd2807SJeff Garzik * ata_eh_done - EH action complete
13172f60e1abSJonathan Corbet * @link: ATA link for which EH actions are complete
1318c6fd2807SJeff Garzik * @dev: target ATA dev for per-dev action (can be NULL)
1319c6fd2807SJeff Garzik * @action: action just completed
1320c6fd2807SJeff Garzik *
1321c6fd2807SJeff Garzik * Called right after performing EH actions to clear related bits
1322955e57dfSTejun Heo * in @link->eh_context.
1323c6fd2807SJeff Garzik *
1324c6fd2807SJeff Garzik * LOCKING:
1325c6fd2807SJeff Garzik * None.
1326c6fd2807SJeff Garzik */
ata_eh_done(struct ata_link * link,struct ata_device * dev,unsigned int action)1327fb7fd614STejun Heo void ata_eh_done(struct ata_link *link, struct ata_device *dev,
1328c6fd2807SJeff Garzik unsigned int action)
1329c6fd2807SJeff Garzik {
1330955e57dfSTejun Heo struct ata_eh_context *ehc = &link->eh_context;
13319af5c9c9STejun Heo
1332c318458cSHannes Reinecke trace_ata_eh_done(link, dev ? dev->devno : 0, action);
1333c318458cSHannes Reinecke
1334955e57dfSTejun Heo ata_eh_clear_action(link, dev, &ehc->i, action);
1335c6fd2807SJeff Garzik }
1336c6fd2807SJeff Garzik
1337c6fd2807SJeff Garzik /**
1338c6fd2807SJeff Garzik * ata_err_string - convert err_mask to descriptive string
1339c6fd2807SJeff Garzik * @err_mask: error mask to convert to string
1340c6fd2807SJeff Garzik *
1341c6fd2807SJeff Garzik * Convert @err_mask to descriptive string. Errors are
1342c6fd2807SJeff Garzik * prioritized according to severity and only the most severe
1343c6fd2807SJeff Garzik * error is reported.
1344c6fd2807SJeff Garzik *
1345c6fd2807SJeff Garzik * LOCKING:
1346c6fd2807SJeff Garzik * None.
1347c6fd2807SJeff Garzik *
1348c6fd2807SJeff Garzik * RETURNS:
1349c6fd2807SJeff Garzik * Descriptive string for @err_mask
1350c6fd2807SJeff Garzik */
ata_err_string(unsigned int err_mask)1351c6fd2807SJeff Garzik static const char *ata_err_string(unsigned int err_mask)
1352c6fd2807SJeff Garzik {
1353c6fd2807SJeff Garzik if (err_mask & AC_ERR_HOST_BUS)
1354c6fd2807SJeff Garzik return "host bus error";
1355c6fd2807SJeff Garzik if (err_mask & AC_ERR_ATA_BUS)
1356c6fd2807SJeff Garzik return "ATA bus error";
1357c6fd2807SJeff Garzik if (err_mask & AC_ERR_TIMEOUT)
1358c6fd2807SJeff Garzik return "timeout";
1359c6fd2807SJeff Garzik if (err_mask & AC_ERR_HSM)
1360c6fd2807SJeff Garzik return "HSM violation";
1361c6fd2807SJeff Garzik if (err_mask & AC_ERR_SYSTEM)
1362c6fd2807SJeff Garzik return "internal error";
1363c6fd2807SJeff Garzik if (err_mask & AC_ERR_MEDIA)
1364c6fd2807SJeff Garzik return "media error";
1365c6fd2807SJeff Garzik if (err_mask & AC_ERR_INVALID)
1366c6fd2807SJeff Garzik return "invalid argument";
1367c6fd2807SJeff Garzik if (err_mask & AC_ERR_DEV)
1368c6fd2807SJeff Garzik return "device error";
136954fb131bSDamien Le Moal if (err_mask & AC_ERR_NCQ)
137054fb131bSDamien Le Moal return "NCQ error";
137154fb131bSDamien Le Moal if (err_mask & AC_ERR_NODEV_HINT)
137254fb131bSDamien Le Moal return "Polling detection error";
1373c6fd2807SJeff Garzik return "unknown error";
1374c6fd2807SJeff Garzik }
1375c6fd2807SJeff Garzik
1376c6fd2807SJeff Garzik /**
137711fc33daSTejun Heo * atapi_eh_tur - perform ATAPI TEST_UNIT_READY
137811fc33daSTejun Heo * @dev: target ATAPI device
137911fc33daSTejun Heo * @r_sense_key: out parameter for sense_key
138011fc33daSTejun Heo *
138111fc33daSTejun Heo * Perform ATAPI TEST_UNIT_READY.
138211fc33daSTejun Heo *
138311fc33daSTejun Heo * LOCKING:
138411fc33daSTejun Heo * EH context (may sleep).
138511fc33daSTejun Heo *
138611fc33daSTejun Heo * RETURNS:
138711fc33daSTejun Heo * 0 on success, AC_ERR_* mask on failure.
138811fc33daSTejun Heo */
atapi_eh_tur(struct ata_device * dev,u8 * r_sense_key)13893dc67440SAaron Lu unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
139011fc33daSTejun Heo {
139111fc33daSTejun Heo u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 };
139211fc33daSTejun Heo struct ata_taskfile tf;
139311fc33daSTejun Heo unsigned int err_mask;
139411fc33daSTejun Heo
139511fc33daSTejun Heo ata_tf_init(dev, &tf);
139611fc33daSTejun Heo
139711fc33daSTejun Heo tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
139811fc33daSTejun Heo tf.command = ATA_CMD_PACKET;
139911fc33daSTejun Heo tf.protocol = ATAPI_PROT_NODATA;
140011fc33daSTejun Heo
140111fc33daSTejun Heo err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
140211fc33daSTejun Heo if (err_mask == AC_ERR_DEV)
1403efcef265SSergey Shtylyov *r_sense_key = tf.error >> 4;
140411fc33daSTejun Heo return err_mask;
140511fc33daSTejun Heo }
140611fc33daSTejun Heo
140711fc33daSTejun Heo /**
1408e87fd28cSHannes Reinecke * ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
14092f60e1abSJonathan Corbet * @qc: qc to perform REQUEST_SENSE_SENSE_DATA_EXT to
1410e87fd28cSHannes Reinecke *
1411e87fd28cSHannes Reinecke * Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
1412e87fd28cSHannes Reinecke * SENSE. This function is an EH helper.
1413e87fd28cSHannes Reinecke *
1414e87fd28cSHannes Reinecke * LOCKING:
1415e87fd28cSHannes Reinecke * Kernel thread context (may sleep).
141624aeebbfSNiklas Cassel *
141724aeebbfSNiklas Cassel * RETURNS:
141824aeebbfSNiklas Cassel * true if sense data could be fetched, false otherwise.
1419e87fd28cSHannes Reinecke */
ata_eh_request_sense(struct ata_queued_cmd * qc)142024aeebbfSNiklas Cassel static bool ata_eh_request_sense(struct ata_queued_cmd *qc)
1421e87fd28cSHannes Reinecke {
1422b46c760eSNiklas Cassel struct scsi_cmnd *cmd = qc->scsicmd;
1423e87fd28cSHannes Reinecke struct ata_device *dev = qc->dev;
1424e87fd28cSHannes Reinecke struct ata_taskfile tf;
1425e87fd28cSHannes Reinecke unsigned int err_mask;
1426e87fd28cSHannes Reinecke
14274cb7c6f1SNiklas Cassel if (ata_port_is_frozen(qc->ap)) {
1428e87fd28cSHannes Reinecke ata_dev_warn(dev, "sense data available but port frozen\n");
142924aeebbfSNiklas Cassel return false;
1430e87fd28cSHannes Reinecke }
1431e87fd28cSHannes Reinecke
1432e87fd28cSHannes Reinecke if (!ata_id_sense_reporting_enabled(dev->id)) {
1433e87fd28cSHannes Reinecke ata_dev_warn(qc->dev, "sense data reporting disabled\n");
143424aeebbfSNiklas Cassel return false;
1435e87fd28cSHannes Reinecke }
1436e87fd28cSHannes Reinecke
1437e87fd28cSHannes Reinecke ata_tf_init(dev, &tf);
1438e87fd28cSHannes Reinecke tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1439e87fd28cSHannes Reinecke tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1440e87fd28cSHannes Reinecke tf.command = ATA_CMD_REQ_SENSE_DATA;
1441e87fd28cSHannes Reinecke tf.protocol = ATA_PROT_NODATA;
1442e87fd28cSHannes Reinecke
1443e87fd28cSHannes Reinecke err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1444e87fd28cSHannes Reinecke /* Ignore err_mask; ATA_ERR might be set */
1445efcef265SSergey Shtylyov if (tf.status & ATA_SENSE) {
14464b89ad8eSNiklas Cassel if (ata_scsi_sense_is_valid(tf.lbah, tf.lbam, tf.lbal)) {
144724aeebbfSNiklas Cassel /* Set sense without also setting scsicmd->result */
144824aeebbfSNiklas Cassel scsi_build_sense_buffer(dev->flags & ATA_DFLAG_D_SENSE,
144924aeebbfSNiklas Cassel cmd->sense_buffer, tf.lbah,
145024aeebbfSNiklas Cassel tf.lbam, tf.lbal);
1451e87fd28cSHannes Reinecke qc->flags |= ATA_QCFLAG_SENSE_VALID;
145224aeebbfSNiklas Cassel return true;
14534b89ad8eSNiklas Cassel }
1454e87fd28cSHannes Reinecke } else {
1455e87fd28cSHannes Reinecke ata_dev_warn(dev, "request sense failed stat %02x emask %x\n",
1456efcef265SSergey Shtylyov tf.status, err_mask);
1457e87fd28cSHannes Reinecke }
145824aeebbfSNiklas Cassel
145924aeebbfSNiklas Cassel return false;
1460e87fd28cSHannes Reinecke }
1461e87fd28cSHannes Reinecke
1462e87fd28cSHannes Reinecke /**
1463c6fd2807SJeff Garzik * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1464c6fd2807SJeff Garzik * @dev: device to perform REQUEST_SENSE to
1465c6fd2807SJeff Garzik * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
14663eabddb8STejun Heo * @dfl_sense_key: default sense key to use
1467c6fd2807SJeff Garzik *
1468c6fd2807SJeff Garzik * Perform ATAPI REQUEST_SENSE after the device reported CHECK
1469c6fd2807SJeff Garzik * SENSE. This function is EH helper.
1470c6fd2807SJeff Garzik *
1471c6fd2807SJeff Garzik * LOCKING:
1472c6fd2807SJeff Garzik * Kernel thread context (may sleep).
1473c6fd2807SJeff Garzik *
1474c6fd2807SJeff Garzik * RETURNS:
1475c6fd2807SJeff Garzik * 0 on success, AC_ERR_* mask on failure
1476c6fd2807SJeff Garzik */
atapi_eh_request_sense(struct ata_device * dev,u8 * sense_buf,u8 dfl_sense_key)14773dc67440SAaron Lu unsigned int atapi_eh_request_sense(struct ata_device *dev,
14783eabddb8STejun Heo u8 *sense_buf, u8 dfl_sense_key)
1479c6fd2807SJeff Garzik {
14803eabddb8STejun Heo u8 cdb[ATAPI_CDB_LEN] =
14813eabddb8STejun Heo { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 };
14829af5c9c9STejun Heo struct ata_port *ap = dev->link->ap;
1483c6fd2807SJeff Garzik struct ata_taskfile tf;
1484c6fd2807SJeff Garzik
1485c6fd2807SJeff Garzik memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
1486c6fd2807SJeff Garzik
148756287768SAlbert Lee /* initialize sense_buf with the error register,
148856287768SAlbert Lee * for the case where they are -not- overwritten
148956287768SAlbert Lee */
1490c6fd2807SJeff Garzik sense_buf[0] = 0x70;
14913eabddb8STejun Heo sense_buf[2] = dfl_sense_key;
149256287768SAlbert Lee
149356287768SAlbert Lee /* some devices time out if garbage left in tf */
149456287768SAlbert Lee ata_tf_init(dev, &tf);
1495c6fd2807SJeff Garzik
1496c6fd2807SJeff Garzik tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1497c6fd2807SJeff Garzik tf.command = ATA_CMD_PACKET;
1498c6fd2807SJeff Garzik
1499c6fd2807SJeff Garzik /* is it pointless to prefer PIO for "safety reasons"? */
1500c6fd2807SJeff Garzik if (ap->flags & ATA_FLAG_PIO_DMA) {
15010dc36888STejun Heo tf.protocol = ATAPI_PROT_DMA;
1502c6fd2807SJeff Garzik tf.feature |= ATAPI_PKT_DMA;
1503c6fd2807SJeff Garzik } else {
15040dc36888STejun Heo tf.protocol = ATAPI_PROT_PIO;
1505f2dfc1a1STejun Heo tf.lbam = SCSI_SENSE_BUFFERSIZE;
1506f2dfc1a1STejun Heo tf.lbah = 0;
1507c6fd2807SJeff Garzik }
1508c6fd2807SJeff Garzik
1509c6fd2807SJeff Garzik return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
15102b789108STejun Heo sense_buf, SCSI_SENSE_BUFFERSIZE, 0);
1511c6fd2807SJeff Garzik }
1512c6fd2807SJeff Garzik
1513c6fd2807SJeff Garzik /**
1514c6fd2807SJeff Garzik * ata_eh_analyze_serror - analyze SError for a failed port
15150260731fSTejun Heo * @link: ATA link to analyze SError for
1516c6fd2807SJeff Garzik *
1517c6fd2807SJeff Garzik * Analyze SError if available and further determine cause of
1518c6fd2807SJeff Garzik * failure.
1519c6fd2807SJeff Garzik *
1520c6fd2807SJeff Garzik * LOCKING:
1521c6fd2807SJeff Garzik * None.
1522c6fd2807SJeff Garzik */
ata_eh_analyze_serror(struct ata_link * link)15230260731fSTejun Heo static void ata_eh_analyze_serror(struct ata_link *link)
1524c6fd2807SJeff Garzik {
15250260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context;
1526c6fd2807SJeff Garzik u32 serror = ehc->i.serror;
1527c6fd2807SJeff Garzik unsigned int err_mask = 0, action = 0;
1528f9df58cbSTejun Heo u32 hotplug_mask;
1529c6fd2807SJeff Garzik
1530e0614db2STejun Heo if (serror & (SERR_PERSISTENT | SERR_DATA)) {
1531c6fd2807SJeff Garzik err_mask |= AC_ERR_ATA_BUS;
1532cf480626STejun Heo action |= ATA_EH_RESET;
1533c6fd2807SJeff Garzik }
1534c6fd2807SJeff Garzik if (serror & SERR_PROTOCOL) {
1535c6fd2807SJeff Garzik err_mask |= AC_ERR_HSM;
1536cf480626STejun Heo action |= ATA_EH_RESET;
1537c6fd2807SJeff Garzik }
1538c6fd2807SJeff Garzik if (serror & SERR_INTERNAL) {
1539c6fd2807SJeff Garzik err_mask |= AC_ERR_SYSTEM;
1540cf480626STejun Heo action |= ATA_EH_RESET;
1541c6fd2807SJeff Garzik }
1542f9df58cbSTejun Heo
1543f9df58cbSTejun Heo /* Determine whether a hotplug event has occurred. Both
1544f9df58cbSTejun Heo * SError.N/X are considered hotplug events for enabled or
1545f9df58cbSTejun Heo * host links. For disabled PMP links, only N bit is
1546f9df58cbSTejun Heo * considered as X bit is left at 1 for link plugging.
1547f9df58cbSTejun Heo */
1548eb0e85e3STejun Heo if (link->lpm_policy > ATA_LPM_MAX_POWER)
15496b7ae954STejun Heo hotplug_mask = 0; /* hotplug doesn't work w/ LPM */
15506b7ae954STejun Heo else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
1551f9df58cbSTejun Heo hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG;
1552f9df58cbSTejun Heo else
1553f9df58cbSTejun Heo hotplug_mask = SERR_PHYRDY_CHG;
1554f9df58cbSTejun Heo
1555f9df58cbSTejun Heo if (serror & hotplug_mask)
1556c6fd2807SJeff Garzik ata_ehi_hotplugged(&ehc->i);
1557c6fd2807SJeff Garzik
1558c6fd2807SJeff Garzik ehc->i.err_mask |= err_mask;
1559c6fd2807SJeff Garzik ehc->i.action |= action;
1560c6fd2807SJeff Garzik }
1561c6fd2807SJeff Garzik
1562c6fd2807SJeff Garzik /**
1563c6fd2807SJeff Garzik * ata_eh_analyze_tf - analyze taskfile of a failed qc
1564c6fd2807SJeff Garzik * @qc: qc to analyze
1565c6fd2807SJeff Garzik *
1566c6fd2807SJeff Garzik * Analyze taskfile of @qc and further determine cause of
1567c6fd2807SJeff Garzik * failure. This function also requests ATAPI sense data if
156825985edcSLucas De Marchi * available.
1569c6fd2807SJeff Garzik *
1570c6fd2807SJeff Garzik * LOCKING:
1571c6fd2807SJeff Garzik * Kernel thread context (may sleep).
1572c6fd2807SJeff Garzik *
1573c6fd2807SJeff Garzik * RETURNS:
1574c6fd2807SJeff Garzik * Determined recovery action
1575c6fd2807SJeff Garzik */
ata_eh_analyze_tf(struct ata_queued_cmd * qc)1576e3b1fff6SNiklas Cassel static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc)
1577c6fd2807SJeff Garzik {
1578e3b1fff6SNiklas Cassel const struct ata_taskfile *tf = &qc->result_tf;
1579c6fd2807SJeff Garzik unsigned int tmp, action = 0;
1580efcef265SSergey Shtylyov u8 stat = tf->status, err = tf->error;
1581c6fd2807SJeff Garzik
1582c6fd2807SJeff Garzik if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1583c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_HSM;
1584cf480626STejun Heo return ATA_EH_RESET;
1585c6fd2807SJeff Garzik }
1586c6fd2807SJeff Garzik
1587e87fd28cSHannes Reinecke if (stat & (ATA_ERR | ATA_DF)) {
1588a51d644aSTejun Heo qc->err_mask |= AC_ERR_DEV;
1589e87fd28cSHannes Reinecke /*
1590e87fd28cSHannes Reinecke * Sense data reporting does not work if the
1591e87fd28cSHannes Reinecke * device fault bit is set.
1592e87fd28cSHannes Reinecke */
1593e87fd28cSHannes Reinecke if (stat & ATA_DF)
1594e87fd28cSHannes Reinecke stat &= ~ATA_SENSE;
1595e87fd28cSHannes Reinecke } else {
1596c6fd2807SJeff Garzik return 0;
1597e87fd28cSHannes Reinecke }
1598c6fd2807SJeff Garzik
1599c6fd2807SJeff Garzik switch (qc->dev->class) {
1600013115d9SNiklas Cassel case ATA_DEV_ATA:
16019162c657SHannes Reinecke case ATA_DEV_ZAC:
1602461ec040SNiklas Cassel /*
1603461ec040SNiklas Cassel * Fetch the sense data explicitly if:
1604461ec040SNiklas Cassel * -It was a non-NCQ command that failed, or
1605461ec040SNiklas Cassel * -It was a NCQ command that failed, but the sense data
1606461ec040SNiklas Cassel * was not included in the NCQ command error log
1607461ec040SNiklas Cassel * (i.e. NCQ autosense is not supported by the device).
1608461ec040SNiklas Cassel */
160924aeebbfSNiklas Cassel if (!(qc->flags & ATA_QCFLAG_SENSE_VALID) &&
161024aeebbfSNiklas Cassel (stat & ATA_SENSE) && ata_eh_request_sense(qc))
161124aeebbfSNiklas Cassel set_status_byte(qc->scsicmd, SAM_STAT_CHECK_CONDITION);
1612c6fd2807SJeff Garzik if (err & ATA_ICRC)
1613c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_ATA_BUS;
1614eec7e1c1SAlexey Asemov if (err & (ATA_UNC | ATA_AMNF))
1615c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_MEDIA;
1616c6fd2807SJeff Garzik if (err & ATA_IDNF)
1617c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_INVALID;
1618c6fd2807SJeff Garzik break;
1619c6fd2807SJeff Garzik
1620c6fd2807SJeff Garzik case ATA_DEV_ATAPI:
16214cb7c6f1SNiklas Cassel if (!ata_port_is_frozen(qc->ap)) {
16223eabddb8STejun Heo tmp = atapi_eh_request_sense(qc->dev,
16233eabddb8STejun Heo qc->scsicmd->sense_buffer,
1624efcef265SSergey Shtylyov qc->result_tf.error >> 4);
16253852e373SHannes Reinecke if (!tmp)
1626c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_SENSE_VALID;
16273852e373SHannes Reinecke else
1628c6fd2807SJeff Garzik qc->err_mask |= tmp;
1629c6fd2807SJeff Garzik }
1630a569a30dSTejun Heo }
1631c6fd2807SJeff Garzik
16323852e373SHannes Reinecke if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
1633b8e162f9SBart Van Assche enum scsi_disposition ret = scsi_check_sense(qc->scsicmd);
16343852e373SHannes Reinecke /*
163579487259SDamien Le Moal * SUCCESS here means that the sense code could be
16363852e373SHannes Reinecke * evaluated and should be passed to the upper layers
16373852e373SHannes Reinecke * for correct evaluation.
163879487259SDamien Le Moal * FAILED means the sense code could not be interpreted
16393852e373SHannes Reinecke * and the device would need to be reset.
16403852e373SHannes Reinecke * NEEDS_RETRY and ADD_TO_MLQUEUE means that the
16413852e373SHannes Reinecke * command would need to be retried.
16423852e373SHannes Reinecke */
16433852e373SHannes Reinecke if (ret == NEEDS_RETRY || ret == ADD_TO_MLQUEUE) {
16443852e373SHannes Reinecke qc->flags |= ATA_QCFLAG_RETRY;
16453852e373SHannes Reinecke qc->err_mask |= AC_ERR_OTHER;
16463852e373SHannes Reinecke } else if (ret != SUCCESS) {
16473852e373SHannes Reinecke qc->err_mask |= AC_ERR_HSM;
16483852e373SHannes Reinecke }
16493852e373SHannes Reinecke }
1650c6fd2807SJeff Garzik if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
1651cf480626STejun Heo action |= ATA_EH_RESET;
1652c6fd2807SJeff Garzik
1653c6fd2807SJeff Garzik return action;
1654c6fd2807SJeff Garzik }
1655c6fd2807SJeff Garzik
ata_eh_categorize_error(unsigned int eflags,unsigned int err_mask,int * xfer_ok)165676326ac1STejun Heo static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask,
165776326ac1STejun Heo int *xfer_ok)
1658c6fd2807SJeff Garzik {
165976326ac1STejun Heo int base = 0;
166076326ac1STejun Heo
166176326ac1STejun Heo if (!(eflags & ATA_EFLAG_DUBIOUS_XFER))
166276326ac1STejun Heo *xfer_ok = 1;
166376326ac1STejun Heo
166476326ac1STejun Heo if (!*xfer_ok)
166575f9cafcSTejun Heo base = ATA_ECAT_DUBIOUS_NONE;
166676326ac1STejun Heo
16677d47e8d4STejun Heo if (err_mask & AC_ERR_ATA_BUS)
166876326ac1STejun Heo return base + ATA_ECAT_ATA_BUS;
1669c6fd2807SJeff Garzik
16707d47e8d4STejun Heo if (err_mask & AC_ERR_TIMEOUT)
167176326ac1STejun Heo return base + ATA_ECAT_TOUT_HSM;
16727d47e8d4STejun Heo
16733884f7b0STejun Heo if (eflags & ATA_EFLAG_IS_IO) {
16747d47e8d4STejun Heo if (err_mask & AC_ERR_HSM)
167576326ac1STejun Heo return base + ATA_ECAT_TOUT_HSM;
16767d47e8d4STejun Heo if ((err_mask &
16777d47e8d4STejun Heo (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
167876326ac1STejun Heo return base + ATA_ECAT_UNK_DEV;
1679c6fd2807SJeff Garzik }
1680c6fd2807SJeff Garzik
1681c6fd2807SJeff Garzik return 0;
1682c6fd2807SJeff Garzik }
1683c6fd2807SJeff Garzik
16847d47e8d4STejun Heo struct speed_down_verdict_arg {
1685c6fd2807SJeff Garzik u64 since;
168676326ac1STejun Heo int xfer_ok;
16873884f7b0STejun Heo int nr_errors[ATA_ECAT_NR];
1688c6fd2807SJeff Garzik };
1689c6fd2807SJeff Garzik
speed_down_verdict_cb(struct ata_ering_entry * ent,void * void_arg)16907d47e8d4STejun Heo static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
1691c6fd2807SJeff Garzik {
16927d47e8d4STejun Heo struct speed_down_verdict_arg *arg = void_arg;
169376326ac1STejun Heo int cat;
1694c6fd2807SJeff Garzik
1695d9027470SGwendal Grignou if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since))
1696c6fd2807SJeff Garzik return -1;
1697c6fd2807SJeff Garzik
169876326ac1STejun Heo cat = ata_eh_categorize_error(ent->eflags, ent->err_mask,
169976326ac1STejun Heo &arg->xfer_ok);
17007d47e8d4STejun Heo arg->nr_errors[cat]++;
170176326ac1STejun Heo
1702c6fd2807SJeff Garzik return 0;
1703c6fd2807SJeff Garzik }
1704c6fd2807SJeff Garzik
1705c6fd2807SJeff Garzik /**
17067d47e8d4STejun Heo * ata_eh_speed_down_verdict - Determine speed down verdict
1707c6fd2807SJeff Garzik * @dev: Device of interest
1708c6fd2807SJeff Garzik *
1709c6fd2807SJeff Garzik * This function examines error ring of @dev and determines
17107d47e8d4STejun Heo * whether NCQ needs to be turned off, transfer speed should be
17117d47e8d4STejun Heo * stepped down, or falling back to PIO is necessary.
1712c6fd2807SJeff Garzik *
17133884f7b0STejun Heo * ECAT_ATA_BUS : ATA_BUS error for any command
1714c6fd2807SJeff Garzik *
17153884f7b0STejun Heo * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for
17163884f7b0STejun Heo * IO commands
17177d47e8d4STejun Heo *
17183884f7b0STejun Heo * ECAT_UNK_DEV : Unknown DEV error for IO commands
1719c6fd2807SJeff Garzik *
172076326ac1STejun Heo * ECAT_DUBIOUS_* : Identical to above three but occurred while
172176326ac1STejun Heo * data transfer hasn't been verified.
172276326ac1STejun Heo *
17233884f7b0STejun Heo * Verdicts are
17247d47e8d4STejun Heo *
17253884f7b0STejun Heo * NCQ_OFF : Turn off NCQ.
17267d47e8d4STejun Heo *
17273884f7b0STejun Heo * SPEED_DOWN : Speed down transfer speed but don't fall back
17283884f7b0STejun Heo * to PIO.
17293884f7b0STejun Heo *
17303884f7b0STejun Heo * FALLBACK_TO_PIO : Fall back to PIO.
17313884f7b0STejun Heo *
17323884f7b0STejun Heo * Even if multiple verdicts are returned, only one action is
173376326ac1STejun Heo * taken per error. An action triggered by non-DUBIOUS errors
173476326ac1STejun Heo * clears ering, while one triggered by DUBIOUS_* errors doesn't.
173576326ac1STejun Heo * This is to expedite speed down decisions right after device is
173676326ac1STejun Heo * initially configured.
17373884f7b0STejun Heo *
17384091fb95SMasahiro Yamada * The following are speed down rules. #1 and #2 deal with
173976326ac1STejun Heo * DUBIOUS errors.
174076326ac1STejun Heo *
174176326ac1STejun Heo * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
174276326ac1STejun Heo * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO.
174376326ac1STejun Heo *
174476326ac1STejun Heo * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
174576326ac1STejun Heo * occurred during last 5 mins, NCQ_OFF.
174676326ac1STejun Heo *
174776326ac1STejun Heo * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
174825985edcSLucas De Marchi * occurred during last 5 mins, FALLBACK_TO_PIO
17493884f7b0STejun Heo *
175076326ac1STejun Heo * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
17513884f7b0STejun Heo * during last 10 mins, NCQ_OFF.
17523884f7b0STejun Heo *
175376326ac1STejun Heo * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
17543884f7b0STejun Heo * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
17557d47e8d4STejun Heo *
1756c6fd2807SJeff Garzik * LOCKING:
1757c6fd2807SJeff Garzik * Inherited from caller.
1758c6fd2807SJeff Garzik *
1759c6fd2807SJeff Garzik * RETURNS:
17607d47e8d4STejun Heo * OR of ATA_EH_SPDN_* flags.
1761c6fd2807SJeff Garzik */
ata_eh_speed_down_verdict(struct ata_device * dev)17627d47e8d4STejun Heo static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
1763c6fd2807SJeff Garzik {
17647d47e8d4STejun Heo const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ;
17657d47e8d4STejun Heo u64 j64 = get_jiffies_64();
17667d47e8d4STejun Heo struct speed_down_verdict_arg arg;
17677d47e8d4STejun Heo unsigned int verdict = 0;
1768c6fd2807SJeff Garzik
17693884f7b0STejun Heo /* scan past 5 mins of error history */
17703884f7b0STejun Heo memset(&arg, 0, sizeof(arg));
17713884f7b0STejun Heo arg.since = j64 - min(j64, j5mins);
17723884f7b0STejun Heo ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
17733884f7b0STejun Heo
177476326ac1STejun Heo if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] +
177576326ac1STejun Heo arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1)
177676326ac1STejun Heo verdict |= ATA_EH_SPDN_SPEED_DOWN |
177776326ac1STejun Heo ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS;
177876326ac1STejun Heo
177976326ac1STejun Heo if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] +
178076326ac1STejun Heo arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1)
178176326ac1STejun Heo verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS;
178276326ac1STejun Heo
17833884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
17843884f7b0STejun Heo arg.nr_errors[ATA_ECAT_TOUT_HSM] +
1785663f99b8STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
17863884f7b0STejun Heo verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
17873884f7b0STejun Heo
17887d47e8d4STejun Heo /* scan past 10 mins of error history */
1789c6fd2807SJeff Garzik memset(&arg, 0, sizeof(arg));
17907d47e8d4STejun Heo arg.since = j64 - min(j64, j10mins);
17917d47e8d4STejun Heo ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
1792c6fd2807SJeff Garzik
17933884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_TOUT_HSM] +
17943884f7b0STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 3)
17957d47e8d4STejun Heo verdict |= ATA_EH_SPDN_NCQ_OFF;
17963884f7b0STejun Heo
17973884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
17983884f7b0STejun Heo arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 ||
1799663f99b8STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
18007d47e8d4STejun Heo verdict |= ATA_EH_SPDN_SPEED_DOWN;
1801c6fd2807SJeff Garzik
18027d47e8d4STejun Heo return verdict;
1803c6fd2807SJeff Garzik }
1804c6fd2807SJeff Garzik
1805c6fd2807SJeff Garzik /**
1806c6fd2807SJeff Garzik * ata_eh_speed_down - record error and speed down if necessary
1807c6fd2807SJeff Garzik * @dev: Failed device
18083884f7b0STejun Heo * @eflags: mask of ATA_EFLAG_* flags
1809c6fd2807SJeff Garzik * @err_mask: err_mask of the error
1810c6fd2807SJeff Garzik *
1811c6fd2807SJeff Garzik * Record error and examine error history to determine whether
1812c6fd2807SJeff Garzik * adjusting transmission speed is necessary. It also sets
1813c6fd2807SJeff Garzik * transmission limits appropriately if such adjustment is
1814c6fd2807SJeff Garzik * necessary.
1815c6fd2807SJeff Garzik *
1816c6fd2807SJeff Garzik * LOCKING:
1817c6fd2807SJeff Garzik * Kernel thread context (may sleep).
1818c6fd2807SJeff Garzik *
1819c6fd2807SJeff Garzik * RETURNS:
18207d47e8d4STejun Heo * Determined recovery action.
1821c6fd2807SJeff Garzik */
ata_eh_speed_down(struct ata_device * dev,unsigned int eflags,unsigned int err_mask)18223884f7b0STejun Heo static unsigned int ata_eh_speed_down(struct ata_device *dev,
18233884f7b0STejun Heo unsigned int eflags, unsigned int err_mask)
1824c6fd2807SJeff Garzik {
1825b1c72916STejun Heo struct ata_link *link = ata_dev_phys_link(dev);
182676326ac1STejun Heo int xfer_ok = 0;
18277d47e8d4STejun Heo unsigned int verdict;
18287d47e8d4STejun Heo unsigned int action = 0;
18297d47e8d4STejun Heo
18307d47e8d4STejun Heo /* don't bother if Cat-0 error */
183176326ac1STejun Heo if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0)
1832c6fd2807SJeff Garzik return 0;
1833c6fd2807SJeff Garzik
1834c6fd2807SJeff Garzik /* record error and determine whether speed down is necessary */
18353884f7b0STejun Heo ata_ering_record(&dev->ering, eflags, err_mask);
18367d47e8d4STejun Heo verdict = ata_eh_speed_down_verdict(dev);
1837c6fd2807SJeff Garzik
18387d47e8d4STejun Heo /* turn off NCQ? */
183912980c1fSDamien Le Moal if ((verdict & ATA_EH_SPDN_NCQ_OFF) && ata_ncq_enabled(dev)) {
18407d47e8d4STejun Heo dev->flags |= ATA_DFLAG_NCQ_OFF;
1841a9a79dfeSJoe Perches ata_dev_warn(dev, "NCQ disabled due to excessive errors\n");
18427d47e8d4STejun Heo goto done;
18437d47e8d4STejun Heo }
1844c6fd2807SJeff Garzik
18457d47e8d4STejun Heo /* speed down? */
18467d47e8d4STejun Heo if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
1847c6fd2807SJeff Garzik /* speed down SATA link speed if possible */
1848a07d499bSTejun Heo if (sata_down_spd_limit(link, 0) == 0) {
1849cf480626STejun Heo action |= ATA_EH_RESET;
18507d47e8d4STejun Heo goto done;
18517d47e8d4STejun Heo }
1852c6fd2807SJeff Garzik
1853c6fd2807SJeff Garzik /* lower transfer mode */
18547d47e8d4STejun Heo if (dev->spdn_cnt < 2) {
18557d47e8d4STejun Heo static const int dma_dnxfer_sel[] =
18567d47e8d4STejun Heo { ATA_DNXFER_DMA, ATA_DNXFER_40C };
18577d47e8d4STejun Heo static const int pio_dnxfer_sel[] =
18587d47e8d4STejun Heo { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 };
18597d47e8d4STejun Heo int sel;
1860c6fd2807SJeff Garzik
18617d47e8d4STejun Heo if (dev->xfer_shift != ATA_SHIFT_PIO)
18627d47e8d4STejun Heo sel = dma_dnxfer_sel[dev->spdn_cnt];
18637d47e8d4STejun Heo else
18647d47e8d4STejun Heo sel = pio_dnxfer_sel[dev->spdn_cnt];
18657d47e8d4STejun Heo
18667d47e8d4STejun Heo dev->spdn_cnt++;
18677d47e8d4STejun Heo
18687d47e8d4STejun Heo if (ata_down_xfermask_limit(dev, sel) == 0) {
1869cf480626STejun Heo action |= ATA_EH_RESET;
18707d47e8d4STejun Heo goto done;
18717d47e8d4STejun Heo }
18727d47e8d4STejun Heo }
18737d47e8d4STejun Heo }
18747d47e8d4STejun Heo
18757d47e8d4STejun Heo /* Fall back to PIO? Slowing down to PIO is meaningless for
1876663f99b8STejun Heo * SATA ATA devices. Consider it only for PATA and SATAPI.
18777d47e8d4STejun Heo */
18787d47e8d4STejun Heo if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
1879663f99b8STejun Heo (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) &&
18807d47e8d4STejun Heo (dev->xfer_shift != ATA_SHIFT_PIO)) {
18817d47e8d4STejun Heo if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
18827d47e8d4STejun Heo dev->spdn_cnt = 0;
1883cf480626STejun Heo action |= ATA_EH_RESET;
18847d47e8d4STejun Heo goto done;
18857d47e8d4STejun Heo }
18867d47e8d4STejun Heo }
18877d47e8d4STejun Heo
1888c6fd2807SJeff Garzik return 0;
18897d47e8d4STejun Heo done:
18907d47e8d4STejun Heo /* device has been slowed down, blow error history */
189176326ac1STejun Heo if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS))
18927d47e8d4STejun Heo ata_ering_clear(&dev->ering);
18937d47e8d4STejun Heo return action;
1894c6fd2807SJeff Garzik }
1895c6fd2807SJeff Garzik
1896c6fd2807SJeff Garzik /**
18978d899e70SMark Lord * ata_eh_worth_retry - analyze error and decide whether to retry
18988d899e70SMark Lord * @qc: qc to possibly retry
18998d899e70SMark Lord *
19008d899e70SMark Lord * Look at the cause of the error and decide if a retry
19018d899e70SMark Lord * might be useful or not. We don't want to retry media errors
19028d899e70SMark Lord * because the drive itself has probably already taken 10-30 seconds
19038d899e70SMark Lord * doing its own internal retries before reporting the failure.
19048d899e70SMark Lord */
ata_eh_worth_retry(struct ata_queued_cmd * qc)19058d899e70SMark Lord static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc)
19068d899e70SMark Lord {
19071eaca39aSBian Yu if (qc->err_mask & AC_ERR_MEDIA)
19088d899e70SMark Lord return 0; /* don't retry media errors */
19098d899e70SMark Lord if (qc->flags & ATA_QCFLAG_IO)
19108d899e70SMark Lord return 1; /* otherwise retry anything from fs stack */
19118d899e70SMark Lord if (qc->err_mask & AC_ERR_INVALID)
19128d899e70SMark Lord return 0; /* don't retry these */
19138d899e70SMark Lord return qc->err_mask != AC_ERR_DEV; /* retry if not dev error */
19148d899e70SMark Lord }
19158d899e70SMark Lord
19168d899e70SMark Lord /**
19177eb49509SDamien Le Moal * ata_eh_quiet - check if we need to be quiet about a command error
19187eb49509SDamien Le Moal * @qc: qc to check
19197eb49509SDamien Le Moal *
19207eb49509SDamien Le Moal * Look at the qc flags anbd its scsi command request flags to determine
19217eb49509SDamien Le Moal * if we need to be quiet about the command failure.
19227eb49509SDamien Le Moal */
ata_eh_quiet(struct ata_queued_cmd * qc)19237eb49509SDamien Le Moal static inline bool ata_eh_quiet(struct ata_queued_cmd *qc)
19247eb49509SDamien Le Moal {
1925c8329cd5SBart Van Assche if (qc->scsicmd && scsi_cmd_to_rq(qc->scsicmd)->rq_flags & RQF_QUIET)
19267eb49509SDamien Le Moal qc->flags |= ATA_QCFLAG_QUIET;
19277eb49509SDamien Le Moal return qc->flags & ATA_QCFLAG_QUIET;
19287eb49509SDamien Le Moal }
19297eb49509SDamien Le Moal
ata_eh_read_sense_success_non_ncq(struct ata_link * link)193018bd7718SNiklas Cassel static int ata_eh_read_sense_success_non_ncq(struct ata_link *link)
193118bd7718SNiklas Cassel {
193218bd7718SNiklas Cassel struct ata_port *ap = link->ap;
193318bd7718SNiklas Cassel struct ata_queued_cmd *qc;
193418bd7718SNiklas Cassel
193518bd7718SNiklas Cassel qc = __ata_qc_from_tag(ap, link->active_tag);
193618bd7718SNiklas Cassel if (!qc)
193718bd7718SNiklas Cassel return -EIO;
193818bd7718SNiklas Cassel
193918bd7718SNiklas Cassel if (!(qc->flags & ATA_QCFLAG_EH) ||
194018bd7718SNiklas Cassel !(qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD) ||
194118bd7718SNiklas Cassel qc->err_mask)
194218bd7718SNiklas Cassel return -EIO;
194318bd7718SNiklas Cassel
194418bd7718SNiklas Cassel if (!ata_eh_request_sense(qc))
194518bd7718SNiklas Cassel return -EIO;
194618bd7718SNiklas Cassel
194718bd7718SNiklas Cassel /*
194818bd7718SNiklas Cassel * If we have sense data, call scsi_check_sense() in order to set the
194918bd7718SNiklas Cassel * correct SCSI ML byte (if any). No point in checking the return value,
195018bd7718SNiklas Cassel * since the command has already completed successfully.
195118bd7718SNiklas Cassel */
195218bd7718SNiklas Cassel scsi_check_sense(qc->scsicmd);
195318bd7718SNiklas Cassel
195418bd7718SNiklas Cassel return 0;
195518bd7718SNiklas Cassel }
195618bd7718SNiklas Cassel
ata_eh_get_success_sense(struct ata_link * link)195718bd7718SNiklas Cassel static void ata_eh_get_success_sense(struct ata_link *link)
195818bd7718SNiklas Cassel {
195918bd7718SNiklas Cassel struct ata_eh_context *ehc = &link->eh_context;
196018bd7718SNiklas Cassel struct ata_device *dev = link->device;
196118bd7718SNiklas Cassel struct ata_port *ap = link->ap;
196218bd7718SNiklas Cassel struct ata_queued_cmd *qc;
196318bd7718SNiklas Cassel int tag, ret = 0;
196418bd7718SNiklas Cassel
196518bd7718SNiklas Cassel if (!(ehc->i.dev_action[dev->devno] & ATA_EH_GET_SUCCESS_SENSE))
196618bd7718SNiklas Cassel return;
196718bd7718SNiklas Cassel
196818bd7718SNiklas Cassel /* if frozen, we can't do much */
196918bd7718SNiklas Cassel if (ata_port_is_frozen(ap)) {
197018bd7718SNiklas Cassel ata_dev_warn(dev,
197118bd7718SNiklas Cassel "successful sense data available but port frozen\n");
197218bd7718SNiklas Cassel goto out;
197318bd7718SNiklas Cassel }
197418bd7718SNiklas Cassel
197518bd7718SNiklas Cassel /*
197618bd7718SNiklas Cassel * If the link has sactive set, then we have outstanding NCQ commands
197718bd7718SNiklas Cassel * and have to read the Successful NCQ Commands log to get the sense
197818bd7718SNiklas Cassel * data. Otherwise, we are dealing with a non-NCQ command and use
197918bd7718SNiklas Cassel * request sense ext command to retrieve the sense data.
198018bd7718SNiklas Cassel */
198118bd7718SNiklas Cassel if (link->sactive)
198218bd7718SNiklas Cassel ret = ata_eh_read_sense_success_ncq_log(link);
198318bd7718SNiklas Cassel else
198418bd7718SNiklas Cassel ret = ata_eh_read_sense_success_non_ncq(link);
198518bd7718SNiklas Cassel if (ret)
198618bd7718SNiklas Cassel goto out;
198718bd7718SNiklas Cassel
198818bd7718SNiklas Cassel ata_eh_done(link, dev, ATA_EH_GET_SUCCESS_SENSE);
198918bd7718SNiklas Cassel return;
199018bd7718SNiklas Cassel
199118bd7718SNiklas Cassel out:
199218bd7718SNiklas Cassel /*
199318bd7718SNiklas Cassel * If we failed to get sense data for a successful command that ought to
199418bd7718SNiklas Cassel * have sense data, we cannot simply return BLK_STS_OK to user space.
199518bd7718SNiklas Cassel * This is because we can't know if the sense data that we couldn't get
199618bd7718SNiklas Cassel * was actually "DATA CURRENTLY UNAVAILABLE". Reporting such a command
199718bd7718SNiklas Cassel * as success to user space would result in a silent data corruption.
199818bd7718SNiklas Cassel * Thus, add a bogus ABORTED_COMMAND sense data to such commands, such
199918bd7718SNiklas Cassel * that SCSI will report these commands as BLK_STS_IOERR to user space.
200018bd7718SNiklas Cassel */
200118bd7718SNiklas Cassel ata_qc_for_each_raw(ap, qc, tag) {
200218bd7718SNiklas Cassel if (!(qc->flags & ATA_QCFLAG_EH) ||
200318bd7718SNiklas Cassel !(qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD) ||
200418bd7718SNiklas Cassel qc->err_mask ||
200518bd7718SNiklas Cassel ata_dev_phys_link(qc->dev) != link)
200618bd7718SNiklas Cassel continue;
200718bd7718SNiklas Cassel
200818bd7718SNiklas Cassel /* We managed to get sense for this success command, skip. */
200918bd7718SNiklas Cassel if (qc->flags & ATA_QCFLAG_SENSE_VALID)
201018bd7718SNiklas Cassel continue;
201118bd7718SNiklas Cassel
201218bd7718SNiklas Cassel /* This success command did not have any sense data, skip. */
201318bd7718SNiklas Cassel if (!(qc->result_tf.status & ATA_SENSE))
201418bd7718SNiklas Cassel continue;
201518bd7718SNiklas Cassel
201618bd7718SNiklas Cassel /* This success command had sense data, but we failed to get. */
201718bd7718SNiklas Cassel ata_scsi_set_sense(dev, qc->scsicmd, ABORTED_COMMAND, 0, 0);
201818bd7718SNiklas Cassel qc->flags |= ATA_QCFLAG_SENSE_VALID;
201918bd7718SNiklas Cassel }
202018bd7718SNiklas Cassel ata_eh_done(link, dev, ATA_EH_GET_SUCCESS_SENSE);
202118bd7718SNiklas Cassel }
202218bd7718SNiklas Cassel
20237eb49509SDamien Le Moal /**
20249b1e2658STejun Heo * ata_eh_link_autopsy - analyze error and determine recovery action
20259b1e2658STejun Heo * @link: host link to perform autopsy on
2026c6fd2807SJeff Garzik *
20270260731fSTejun Heo * Analyze why @link failed and determine which recovery actions
20280260731fSTejun Heo * are needed. This function also sets more detailed AC_ERR_*
20290260731fSTejun Heo * values and fills sense data for ATAPI CHECK SENSE.
2030c6fd2807SJeff Garzik *
2031c6fd2807SJeff Garzik * LOCKING:
2032c6fd2807SJeff Garzik * Kernel thread context (may sleep).
2033c6fd2807SJeff Garzik */
ata_eh_link_autopsy(struct ata_link * link)20349b1e2658STejun Heo static void ata_eh_link_autopsy(struct ata_link *link)
2035c6fd2807SJeff Garzik {
20360260731fSTejun Heo struct ata_port *ap = link->ap;
2037936fd732STejun Heo struct ata_eh_context *ehc = &link->eh_context;
2038258c4e5cSJens Axboe struct ata_queued_cmd *qc;
2039dfcc173dSTejun Heo struct ata_device *dev;
20403884f7b0STejun Heo unsigned int all_err_mask = 0, eflags = 0;
20417eb49509SDamien Le Moal int tag, nr_failed = 0, nr_quiet = 0;
2042c6fd2807SJeff Garzik u32 serror;
2043c6fd2807SJeff Garzik int rc;
2044c6fd2807SJeff Garzik
2045c6fd2807SJeff Garzik if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
2046c6fd2807SJeff Garzik return;
2047c6fd2807SJeff Garzik
2048c6fd2807SJeff Garzik /* obtain and analyze SError */
2049936fd732STejun Heo rc = sata_scr_read(link, SCR_ERROR, &serror);
2050c6fd2807SJeff Garzik if (rc == 0) {
2051c6fd2807SJeff Garzik ehc->i.serror |= serror;
20520260731fSTejun Heo ata_eh_analyze_serror(link);
20534e57c517STejun Heo } else if (rc != -EOPNOTSUPP) {
2054cf480626STejun Heo /* SError read failed, force reset and probing */
2055b558edddSTejun Heo ehc->i.probe_mask |= ATA_ALL_DEVICES;
2056cf480626STejun Heo ehc->i.action |= ATA_EH_RESET;
20574e57c517STejun Heo ehc->i.err_mask |= AC_ERR_OTHER;
20584e57c517STejun Heo }
2059c6fd2807SJeff Garzik
2060c6fd2807SJeff Garzik /* analyze NCQ failure */
20610260731fSTejun Heo ata_eh_analyze_ncq_error(link);
2062c6fd2807SJeff Garzik
206318bd7718SNiklas Cassel /*
206418bd7718SNiklas Cassel * Check if this was a successful command that simply needs sense data.
206518bd7718SNiklas Cassel * Since the sense data is not part of the completion, we need to fetch
206618bd7718SNiklas Cassel * it using an additional command. Since this can't be done from irq
206718bd7718SNiklas Cassel * context, the sense data for successful commands are fetched by EH.
206818bd7718SNiklas Cassel */
206918bd7718SNiklas Cassel ata_eh_get_success_sense(link);
207018bd7718SNiklas Cassel
2071c6fd2807SJeff Garzik /* any real error trumps AC_ERR_OTHER */
2072c6fd2807SJeff Garzik if (ehc->i.err_mask & ~AC_ERR_OTHER)
2073c6fd2807SJeff Garzik ehc->i.err_mask &= ~AC_ERR_OTHER;
2074c6fd2807SJeff Garzik
2075c6fd2807SJeff Garzik all_err_mask |= ehc->i.err_mask;
2076c6fd2807SJeff Garzik
2077258c4e5cSJens Axboe ata_qc_for_each_raw(ap, qc, tag) {
207887629312SNiklas Cassel if (!(qc->flags & ATA_QCFLAG_EH) ||
20793d8a3ae3SNiklas Cassel qc->flags & ATA_QCFLAG_RETRY ||
208018bd7718SNiklas Cassel qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD ||
2081b1c72916STejun Heo ata_dev_phys_link(qc->dev) != link)
2082c6fd2807SJeff Garzik continue;
2083c6fd2807SJeff Garzik
2084c6fd2807SJeff Garzik /* inherit upper level err_mask */
2085c6fd2807SJeff Garzik qc->err_mask |= ehc->i.err_mask;
2086c6fd2807SJeff Garzik
2087c6fd2807SJeff Garzik /* analyze TF */
2088e3b1fff6SNiklas Cassel ehc->i.action |= ata_eh_analyze_tf(qc);
2089c6fd2807SJeff Garzik
2090c6fd2807SJeff Garzik /* DEV errors are probably spurious in case of ATA_BUS error */
2091c6fd2807SJeff Garzik if (qc->err_mask & AC_ERR_ATA_BUS)
2092c6fd2807SJeff Garzik qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
2093c6fd2807SJeff Garzik AC_ERR_INVALID);
2094c6fd2807SJeff Garzik
2095c6fd2807SJeff Garzik /* any real error trumps unknown error */
2096c6fd2807SJeff Garzik if (qc->err_mask & ~AC_ERR_OTHER)
2097c6fd2807SJeff Garzik qc->err_mask &= ~AC_ERR_OTHER;
2098c6fd2807SJeff Garzik
2099804689adSDamien Le Moal /*
2100804689adSDamien Le Moal * SENSE_VALID trumps dev/unknown error and revalidation. Upper
2101804689adSDamien Le Moal * layers will determine whether the command is worth retrying
2102804689adSDamien Le Moal * based on the sense data and device class/type. Otherwise,
2103804689adSDamien Le Moal * determine directly if the command is worth retrying using its
2104804689adSDamien Le Moal * error mask and flags.
2105804689adSDamien Le Moal */
2106f90f0828STejun Heo if (qc->flags & ATA_QCFLAG_SENSE_VALID)
2107c6fd2807SJeff Garzik qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
2108804689adSDamien Le Moal else if (ata_eh_worth_retry(qc))
210903faab78STejun Heo qc->flags |= ATA_QCFLAG_RETRY;
211003faab78STejun Heo
2111c6fd2807SJeff Garzik /* accumulate error info */
2112c6fd2807SJeff Garzik ehc->i.dev = qc->dev;
2113c6fd2807SJeff Garzik all_err_mask |= qc->err_mask;
2114c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_IO)
21153884f7b0STejun Heo eflags |= ATA_EFLAG_IS_IO;
2116255c03d1SHannes Reinecke trace_ata_eh_link_autopsy_qc(qc);
21177eb49509SDamien Le Moal
21187eb49509SDamien Le Moal /* Count quiet errors */
21197eb49509SDamien Le Moal if (ata_eh_quiet(qc))
21207eb49509SDamien Le Moal nr_quiet++;
21217eb49509SDamien Le Moal nr_failed++;
2122c6fd2807SJeff Garzik }
2123c6fd2807SJeff Garzik
21247eb49509SDamien Le Moal /* If all failed commands requested silence, then be quiet */
21257eb49509SDamien Le Moal if (nr_quiet == nr_failed)
21267eb49509SDamien Le Moal ehc->i.flags |= ATA_EHI_QUIET;
21277eb49509SDamien Le Moal
2128c6fd2807SJeff Garzik /* enforce default EH actions */
21294cb7c6f1SNiklas Cassel if (ata_port_is_frozen(ap) ||
2130c6fd2807SJeff Garzik all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
2131cf480626STejun Heo ehc->i.action |= ATA_EH_RESET;
21323884f7b0STejun Heo else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) ||
21333884f7b0STejun Heo (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV)))
2134c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_REVALIDATE;
2135c6fd2807SJeff Garzik
2136dfcc173dSTejun Heo /* If we have offending qcs and the associated failed device,
2137dfcc173dSTejun Heo * perform per-dev EH action only on the offending device.
2138dfcc173dSTejun Heo */
2139c6fd2807SJeff Garzik if (ehc->i.dev) {
2140c6fd2807SJeff Garzik ehc->i.dev_action[ehc->i.dev->devno] |=
2141c6fd2807SJeff Garzik ehc->i.action & ATA_EH_PERDEV_MASK;
2142c6fd2807SJeff Garzik ehc->i.action &= ~ATA_EH_PERDEV_MASK;
2143c6fd2807SJeff Garzik }
2144c6fd2807SJeff Garzik
21452695e366STejun Heo /* propagate timeout to host link */
21462695e366STejun Heo if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link))
21472695e366STejun Heo ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT;
21482695e366STejun Heo
21492695e366STejun Heo /* record error and consider speeding down */
2150dfcc173dSTejun Heo dev = ehc->i.dev;
21512695e366STejun Heo if (!dev && ((ata_link_max_devices(link) == 1 &&
21522695e366STejun Heo ata_dev_enabled(link->device))))
2153dfcc173dSTejun Heo dev = link->device;
2154dfcc173dSTejun Heo
215576326ac1STejun Heo if (dev) {
215676326ac1STejun Heo if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
215776326ac1STejun Heo eflags |= ATA_EFLAG_DUBIOUS_XFER;
21583884f7b0STejun Heo ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
2159255c03d1SHannes Reinecke trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask);
2160f1601113SRameshwar Prasad Sahu }
2161c6fd2807SJeff Garzik }
2162c6fd2807SJeff Garzik
2163c6fd2807SJeff Garzik /**
21649b1e2658STejun Heo * ata_eh_autopsy - analyze error and determine recovery action
21659b1e2658STejun Heo * @ap: host port to perform autopsy on
21669b1e2658STejun Heo *
21679b1e2658STejun Heo * Analyze all links of @ap and determine why they failed and
21689b1e2658STejun Heo * which recovery actions are needed.
21699b1e2658STejun Heo *
21709b1e2658STejun Heo * LOCKING:
21719b1e2658STejun Heo * Kernel thread context (may sleep).
21729b1e2658STejun Heo */
ata_eh_autopsy(struct ata_port * ap)2173fb7fd614STejun Heo void ata_eh_autopsy(struct ata_port *ap)
21749b1e2658STejun Heo {
21759b1e2658STejun Heo struct ata_link *link;
21769b1e2658STejun Heo
21771eca4365STejun Heo ata_for_each_link(link, ap, EDGE)
21789b1e2658STejun Heo ata_eh_link_autopsy(link);
21792695e366STejun Heo
2180b1c72916STejun Heo /* Handle the frigging slave link. Autopsy is done similarly
2181b1c72916STejun Heo * but actions and flags are transferred over to the master
2182b1c72916STejun Heo * link and handled from there.
2183b1c72916STejun Heo */
2184b1c72916STejun Heo if (ap->slave_link) {
2185b1c72916STejun Heo struct ata_eh_context *mehc = &ap->link.eh_context;
2186b1c72916STejun Heo struct ata_eh_context *sehc = &ap->slave_link->eh_context;
2187b1c72916STejun Heo
2188848e4c68STejun Heo /* transfer control flags from master to slave */
2189848e4c68STejun Heo sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK;
2190848e4c68STejun Heo
2191848e4c68STejun Heo /* perform autopsy on the slave link */
2192b1c72916STejun Heo ata_eh_link_autopsy(ap->slave_link);
2193b1c72916STejun Heo
2194848e4c68STejun Heo /* transfer actions from slave to master and clear slave */
2195b1c72916STejun Heo ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2196b1c72916STejun Heo mehc->i.action |= sehc->i.action;
2197b1c72916STejun Heo mehc->i.dev_action[1] |= sehc->i.dev_action[1];
2198b1c72916STejun Heo mehc->i.flags |= sehc->i.flags;
2199b1c72916STejun Heo ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2200b1c72916STejun Heo }
2201b1c72916STejun Heo
22022695e366STejun Heo /* Autopsy of fanout ports can affect host link autopsy.
22032695e366STejun Heo * Perform host link autopsy last.
22042695e366STejun Heo */
2205071f44b1STejun Heo if (sata_pmp_attached(ap))
22062695e366STejun Heo ata_eh_link_autopsy(&ap->link);
22079b1e2658STejun Heo }
22089b1e2658STejun Heo
22099b1e2658STejun Heo /**
2210d4520903SHannes Reinecke * ata_get_cmd_name - get name for ATA command
2211d4520903SHannes Reinecke * @command: ATA command code to get name for
22126521148cSRobert Hancock *
2213d4520903SHannes Reinecke * Return a textual name of the given command or "unknown"
22146521148cSRobert Hancock *
22156521148cSRobert Hancock * LOCKING:
22166521148cSRobert Hancock * None
22176521148cSRobert Hancock */
ata_get_cmd_name(u8 command)2218d4520903SHannes Reinecke const char *ata_get_cmd_name(u8 command)
22196521148cSRobert Hancock {
22206521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR
22216521148cSRobert Hancock static const struct
22226521148cSRobert Hancock {
22236521148cSRobert Hancock u8 command;
22246521148cSRobert Hancock const char *text;
22256521148cSRobert Hancock } cmd_descr[] = {
22266521148cSRobert Hancock { ATA_CMD_DEV_RESET, "DEVICE RESET" },
22276521148cSRobert Hancock { ATA_CMD_CHK_POWER, "CHECK POWER MODE" },
22286521148cSRobert Hancock { ATA_CMD_STANDBY, "STANDBY" },
22296521148cSRobert Hancock { ATA_CMD_IDLE, "IDLE" },
22306521148cSRobert Hancock { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" },
22316521148cSRobert Hancock { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" },
22323915c3b5SRobert Hancock { ATA_CMD_DOWNLOAD_MICRO_DMA, "DOWNLOAD MICROCODE DMA" },
22336521148cSRobert Hancock { ATA_CMD_NOP, "NOP" },
22346521148cSRobert Hancock { ATA_CMD_FLUSH, "FLUSH CACHE" },
22356521148cSRobert Hancock { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" },
22366521148cSRobert Hancock { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" },
22376521148cSRobert Hancock { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" },
22386521148cSRobert Hancock { ATA_CMD_SERVICE, "SERVICE" },
22396521148cSRobert Hancock { ATA_CMD_READ, "READ DMA" },
22406521148cSRobert Hancock { ATA_CMD_READ_EXT, "READ DMA EXT" },
22416521148cSRobert Hancock { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" },
22426521148cSRobert Hancock { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" },
22436521148cSRobert Hancock { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" },
22446521148cSRobert Hancock { ATA_CMD_WRITE, "WRITE DMA" },
22456521148cSRobert Hancock { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" },
22466521148cSRobert Hancock { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" },
22476521148cSRobert Hancock { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" },
22486521148cSRobert Hancock { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" },
22496521148cSRobert Hancock { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" },
22506521148cSRobert Hancock { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
22516521148cSRobert Hancock { ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" },
22526521148cSRobert Hancock { ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" },
2253d3122bf9SDamien Le Moal { ATA_CMD_NCQ_NON_DATA, "NCQ NON-DATA" },
22543915c3b5SRobert Hancock { ATA_CMD_FPDMA_SEND, "SEND FPDMA QUEUED" },
22553915c3b5SRobert Hancock { ATA_CMD_FPDMA_RECV, "RECEIVE FPDMA QUEUED" },
22566521148cSRobert Hancock { ATA_CMD_PIO_READ, "READ SECTOR(S)" },
22576521148cSRobert Hancock { ATA_CMD_PIO_READ_EXT, "READ SECTOR(S) EXT" },
22586521148cSRobert Hancock { ATA_CMD_PIO_WRITE, "WRITE SECTOR(S)" },
22596521148cSRobert Hancock { ATA_CMD_PIO_WRITE_EXT, "WRITE SECTOR(S) EXT" },
22606521148cSRobert Hancock { ATA_CMD_READ_MULTI, "READ MULTIPLE" },
22616521148cSRobert Hancock { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" },
22626521148cSRobert Hancock { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" },
22636521148cSRobert Hancock { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" },
22646521148cSRobert Hancock { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" },
22656521148cSRobert Hancock { ATA_CMD_SET_FEATURES, "SET FEATURES" },
22666521148cSRobert Hancock { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" },
22676521148cSRobert Hancock { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" },
22686521148cSRobert Hancock { ATA_CMD_VERIFY_EXT, "READ VERIFY SECTOR(S) EXT" },
22696521148cSRobert Hancock { ATA_CMD_WRITE_UNCORR_EXT, "WRITE UNCORRECTABLE EXT" },
22706521148cSRobert Hancock { ATA_CMD_STANDBYNOW1, "STANDBY IMMEDIATE" },
22716521148cSRobert Hancock { ATA_CMD_IDLEIMMEDIATE, "IDLE IMMEDIATE" },
22726521148cSRobert Hancock { ATA_CMD_SLEEP, "SLEEP" },
22736521148cSRobert Hancock { ATA_CMD_INIT_DEV_PARAMS, "INITIALIZE DEVICE PARAMETERS" },
22746521148cSRobert Hancock { ATA_CMD_READ_NATIVE_MAX, "READ NATIVE MAX ADDRESS" },
22756521148cSRobert Hancock { ATA_CMD_READ_NATIVE_MAX_EXT, "READ NATIVE MAX ADDRESS EXT" },
22766521148cSRobert Hancock { ATA_CMD_SET_MAX, "SET MAX ADDRESS" },
22776521148cSRobert Hancock { ATA_CMD_SET_MAX_EXT, "SET MAX ADDRESS EXT" },
22786521148cSRobert Hancock { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" },
22796521148cSRobert Hancock { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" },
22806521148cSRobert Hancock { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" },
22816521148cSRobert Hancock { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" },
22823915c3b5SRobert Hancock { ATA_CMD_TRUSTED_NONDATA, "TRUSTED NON-DATA" },
22836521148cSRobert Hancock { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" },
22846521148cSRobert Hancock { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" },
22856521148cSRobert Hancock { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" },
22866521148cSRobert Hancock { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" },
22876521148cSRobert Hancock { ATA_CMD_PMP_READ, "READ BUFFER" },
22883915c3b5SRobert Hancock { ATA_CMD_PMP_READ_DMA, "READ BUFFER DMA" },
22896521148cSRobert Hancock { ATA_CMD_PMP_WRITE, "WRITE BUFFER" },
22903915c3b5SRobert Hancock { ATA_CMD_PMP_WRITE_DMA, "WRITE BUFFER DMA" },
22916521148cSRobert Hancock { ATA_CMD_CONF_OVERLAY, "DEVICE CONFIGURATION OVERLAY" },
22926521148cSRobert Hancock { ATA_CMD_SEC_SET_PASS, "SECURITY SET PASSWORD" },
22936521148cSRobert Hancock { ATA_CMD_SEC_UNLOCK, "SECURITY UNLOCK" },
22946521148cSRobert Hancock { ATA_CMD_SEC_ERASE_PREP, "SECURITY ERASE PREPARE" },
22956521148cSRobert Hancock { ATA_CMD_SEC_ERASE_UNIT, "SECURITY ERASE UNIT" },
22966521148cSRobert Hancock { ATA_CMD_SEC_FREEZE_LOCK, "SECURITY FREEZE LOCK" },
22976521148cSRobert Hancock { ATA_CMD_SEC_DISABLE_PASS, "SECURITY DISABLE PASSWORD" },
22986521148cSRobert Hancock { ATA_CMD_CONFIG_STREAM, "CONFIGURE STREAM" },
22996521148cSRobert Hancock { ATA_CMD_SMART, "SMART" },
23006521148cSRobert Hancock { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" },
23016521148cSRobert Hancock { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" },
2302acad7627SFUJITA Tomonori { ATA_CMD_DSM, "DATA SET MANAGEMENT" },
23036521148cSRobert Hancock { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" },
23046521148cSRobert Hancock { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" },
23056521148cSRobert Hancock { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" },
23066521148cSRobert Hancock { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" },
23076521148cSRobert Hancock { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" },
23086521148cSRobert Hancock { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" },
23093915c3b5SRobert Hancock { ATA_CMD_REQ_SENSE_DATA, "REQUEST SENSE DATA EXT" },
23103915c3b5SRobert Hancock { ATA_CMD_SANITIZE_DEVICE, "SANITIZE DEVICE" },
231128a3fc22SHannes Reinecke { ATA_CMD_ZAC_MGMT_IN, "ZAC MANAGEMENT IN" },
231227708a95SHannes Reinecke { ATA_CMD_ZAC_MGMT_OUT, "ZAC MANAGEMENT OUT" },
23136521148cSRobert Hancock { ATA_CMD_READ_LONG, "READ LONG (with retries)" },
23146521148cSRobert Hancock { ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" },
23156521148cSRobert Hancock { ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" },
23166521148cSRobert Hancock { ATA_CMD_WRITE_LONG_ONCE, "WRITE LONG (without retries)" },
23176521148cSRobert Hancock { ATA_CMD_RESTORE, "RECALIBRATE" },
23186521148cSRobert Hancock { 0, NULL } /* terminate list */
23196521148cSRobert Hancock };
23206521148cSRobert Hancock
23216521148cSRobert Hancock unsigned int i;
23226521148cSRobert Hancock for (i = 0; cmd_descr[i].text; i++)
23236521148cSRobert Hancock if (cmd_descr[i].command == command)
23246521148cSRobert Hancock return cmd_descr[i].text;
23256521148cSRobert Hancock #endif
23266521148cSRobert Hancock
2327d4520903SHannes Reinecke return "unknown";
23286521148cSRobert Hancock }
2329d4520903SHannes Reinecke EXPORT_SYMBOL_GPL(ata_get_cmd_name);
23306521148cSRobert Hancock
23316521148cSRobert Hancock /**
23329b1e2658STejun Heo * ata_eh_link_report - report error handling to user
23330260731fSTejun Heo * @link: ATA link EH is going on
2334c6fd2807SJeff Garzik *
2335c6fd2807SJeff Garzik * Report EH to user.
2336c6fd2807SJeff Garzik *
2337c6fd2807SJeff Garzik * LOCKING:
2338c6fd2807SJeff Garzik * None.
2339c6fd2807SJeff Garzik */
ata_eh_link_report(struct ata_link * link)23409b1e2658STejun Heo static void ata_eh_link_report(struct ata_link *link)
2341c6fd2807SJeff Garzik {
23420260731fSTejun Heo struct ata_port *ap = link->ap;
23430260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context;
2344258c4e5cSJens Axboe struct ata_queued_cmd *qc;
2345c6fd2807SJeff Garzik const char *frozen, *desc;
234649728bdcSDamien Le Moal char tries_buf[16] = "";
2347c6fd2807SJeff Garzik int tag, nr_failed = 0;
2348c6fd2807SJeff Garzik
234994ff3d54STejun Heo if (ehc->i.flags & ATA_EHI_QUIET)
235094ff3d54STejun Heo return;
235194ff3d54STejun Heo
2352c6fd2807SJeff Garzik desc = NULL;
2353c6fd2807SJeff Garzik if (ehc->i.desc[0] != '\0')
2354c6fd2807SJeff Garzik desc = ehc->i.desc;
2355c6fd2807SJeff Garzik
2356258c4e5cSJens Axboe ata_qc_for_each_raw(ap, qc, tag) {
235787629312SNiklas Cassel if (!(qc->flags & ATA_QCFLAG_EH) ||
2358b1c72916STejun Heo ata_dev_phys_link(qc->dev) != link ||
2359e027bd36STejun Heo ((qc->flags & ATA_QCFLAG_QUIET) &&
2360e027bd36STejun Heo qc->err_mask == AC_ERR_DEV))
2361c6fd2807SJeff Garzik continue;
2362c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
2363c6fd2807SJeff Garzik continue;
2364c6fd2807SJeff Garzik
2365c6fd2807SJeff Garzik nr_failed++;
2366c6fd2807SJeff Garzik }
2367c6fd2807SJeff Garzik
2368c6fd2807SJeff Garzik if (!nr_failed && !ehc->i.err_mask)
2369c6fd2807SJeff Garzik return;
2370c6fd2807SJeff Garzik
2371c6fd2807SJeff Garzik frozen = "";
23724cb7c6f1SNiklas Cassel if (ata_port_is_frozen(ap))
2373c6fd2807SJeff Garzik frozen = " frozen";
2374c6fd2807SJeff Garzik
2375a1e10f7eSTejun Heo if (ap->eh_tries < ATA_EH_MAX_TRIES)
2376462098b0SLevente Kurusa snprintf(tries_buf, sizeof(tries_buf), " t%d",
2377a1e10f7eSTejun Heo ap->eh_tries);
2378a1e10f7eSTejun Heo
2379c6fd2807SJeff Garzik if (ehc->i.dev) {
2380a9a79dfeSJoe Perches ata_dev_err(ehc->i.dev, "exception Emask 0x%x "
2381a1e10f7eSTejun Heo "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2382a1e10f7eSTejun Heo ehc->i.err_mask, link->sactive, ehc->i.serror,
2383a1e10f7eSTejun Heo ehc->i.action, frozen, tries_buf);
2384c6fd2807SJeff Garzik if (desc)
2385a9a79dfeSJoe Perches ata_dev_err(ehc->i.dev, "%s\n", desc);
2386c6fd2807SJeff Garzik } else {
2387a9a79dfeSJoe Perches ata_link_err(link, "exception Emask 0x%x "
2388a1e10f7eSTejun Heo "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2389a1e10f7eSTejun Heo ehc->i.err_mask, link->sactive, ehc->i.serror,
2390a1e10f7eSTejun Heo ehc->i.action, frozen, tries_buf);
2391c6fd2807SJeff Garzik if (desc)
2392a9a79dfeSJoe Perches ata_link_err(link, "%s\n", desc);
2393c6fd2807SJeff Garzik }
2394c6fd2807SJeff Garzik
23956521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR
23961333e194SRobert Hancock if (ehc->i.serror)
2397a9a79dfeSJoe Perches ata_link_err(link,
23981333e194SRobert Hancock "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
23991333e194SRobert Hancock ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "",
24001333e194SRobert Hancock ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "",
24011333e194SRobert Hancock ehc->i.serror & SERR_DATA ? "UnrecovData " : "",
24021333e194SRobert Hancock ehc->i.serror & SERR_PERSISTENT ? "Persist " : "",
24031333e194SRobert Hancock ehc->i.serror & SERR_PROTOCOL ? "Proto " : "",
24041333e194SRobert Hancock ehc->i.serror & SERR_INTERNAL ? "HostInt " : "",
24051333e194SRobert Hancock ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "",
24061333e194SRobert Hancock ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "",
24071333e194SRobert Hancock ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "",
24081333e194SRobert Hancock ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "",
24091333e194SRobert Hancock ehc->i.serror & SERR_DISPARITY ? "Dispar " : "",
24101333e194SRobert Hancock ehc->i.serror & SERR_CRC ? "BadCRC " : "",
24111333e194SRobert Hancock ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "",
24121333e194SRobert Hancock ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "",
24131333e194SRobert Hancock ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "",
24141333e194SRobert Hancock ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "",
24151333e194SRobert Hancock ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
24166521148cSRobert Hancock #endif
24171333e194SRobert Hancock
2418258c4e5cSJens Axboe ata_qc_for_each_raw(ap, qc, tag) {
24198a937581STejun Heo struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
2420abb6a889STejun Heo char data_buf[20] = "";
2421abb6a889STejun Heo char cdb_buf[70] = "";
2422c6fd2807SJeff Garzik
242387629312SNiklas Cassel if (!(qc->flags & ATA_QCFLAG_EH) ||
2424b1c72916STejun Heo ata_dev_phys_link(qc->dev) != link || !qc->err_mask)
2425c6fd2807SJeff Garzik continue;
2426c6fd2807SJeff Garzik
2427abb6a889STejun Heo if (qc->dma_dir != DMA_NONE) {
2428abb6a889STejun Heo static const char *dma_str[] = {
2429abb6a889STejun Heo [DMA_BIDIRECTIONAL] = "bidi",
2430abb6a889STejun Heo [DMA_TO_DEVICE] = "out",
2431abb6a889STejun Heo [DMA_FROM_DEVICE] = "in",
2432abb6a889STejun Heo };
2433fb1b8b11SGeert Uytterhoeven const char *prot_str = NULL;
2434abb6a889STejun Heo
2435fb1b8b11SGeert Uytterhoeven switch (qc->tf.protocol) {
2436fb1b8b11SGeert Uytterhoeven case ATA_PROT_UNKNOWN:
2437fb1b8b11SGeert Uytterhoeven prot_str = "unknown";
2438fb1b8b11SGeert Uytterhoeven break;
2439fb1b8b11SGeert Uytterhoeven case ATA_PROT_NODATA:
2440fb1b8b11SGeert Uytterhoeven prot_str = "nodata";
2441fb1b8b11SGeert Uytterhoeven break;
2442fb1b8b11SGeert Uytterhoeven case ATA_PROT_PIO:
2443fb1b8b11SGeert Uytterhoeven prot_str = "pio";
2444fb1b8b11SGeert Uytterhoeven break;
2445fb1b8b11SGeert Uytterhoeven case ATA_PROT_DMA:
2446fb1b8b11SGeert Uytterhoeven prot_str = "dma";
2447fb1b8b11SGeert Uytterhoeven break;
2448fb1b8b11SGeert Uytterhoeven case ATA_PROT_NCQ:
2449fb1b8b11SGeert Uytterhoeven prot_str = "ncq dma";
2450fb1b8b11SGeert Uytterhoeven break;
2451fb1b8b11SGeert Uytterhoeven case ATA_PROT_NCQ_NODATA:
2452fb1b8b11SGeert Uytterhoeven prot_str = "ncq nodata";
2453fb1b8b11SGeert Uytterhoeven break;
2454fb1b8b11SGeert Uytterhoeven case ATAPI_PROT_NODATA:
2455fb1b8b11SGeert Uytterhoeven prot_str = "nodata";
2456fb1b8b11SGeert Uytterhoeven break;
2457fb1b8b11SGeert Uytterhoeven case ATAPI_PROT_PIO:
2458fb1b8b11SGeert Uytterhoeven prot_str = "pio";
2459fb1b8b11SGeert Uytterhoeven break;
2460fb1b8b11SGeert Uytterhoeven case ATAPI_PROT_DMA:
2461fb1b8b11SGeert Uytterhoeven prot_str = "dma";
2462fb1b8b11SGeert Uytterhoeven break;
2463fb1b8b11SGeert Uytterhoeven }
2464abb6a889STejun Heo snprintf(data_buf, sizeof(data_buf), " %s %u %s",
2465fb1b8b11SGeert Uytterhoeven prot_str, qc->nbytes, dma_str[qc->dma_dir]);
2466abb6a889STejun Heo }
2467abb6a889STejun Heo
24686521148cSRobert Hancock if (ata_is_atapi(qc->tf.protocol)) {
2469a13b0c9dSHannes Reinecke const u8 *cdb = qc->cdb;
2470a13b0c9dSHannes Reinecke size_t cdb_len = qc->dev->cdb_len;
2471a13b0c9dSHannes Reinecke
2472cbba5b0eSHannes Reinecke if (qc->scsicmd) {
2473cbba5b0eSHannes Reinecke cdb = qc->scsicmd->cmnd;
2474cbba5b0eSHannes Reinecke cdb_len = qc->scsicmd->cmd_len;
2475cbba5b0eSHannes Reinecke }
2476cbba5b0eSHannes Reinecke __scsi_format_command(cdb_buf, sizeof(cdb_buf),
2477cbba5b0eSHannes Reinecke cdb, cdb_len);
2478d4520903SHannes Reinecke } else
2479a9a79dfeSJoe Perches ata_dev_err(qc->dev, "failed command: %s\n",
2480d4520903SHannes Reinecke ata_get_cmd_name(cmd->command));
2481abb6a889STejun Heo
2482a9a79dfeSJoe Perches ata_dev_err(qc->dev,
24838a937581STejun Heo "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2484abb6a889STejun Heo "tag %d%s\n %s"
24858a937581STejun Heo "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
24865335b729STejun Heo "Emask 0x%x (%s)%s\n",
24878a937581STejun Heo cmd->command, cmd->feature, cmd->nsect,
24888a937581STejun Heo cmd->lbal, cmd->lbam, cmd->lbah,
24898a937581STejun Heo cmd->hob_feature, cmd->hob_nsect,
24908a937581STejun Heo cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
2491abb6a889STejun Heo cmd->device, qc->tag, data_buf, cdb_buf,
2492efcef265SSergey Shtylyov res->status, res->error, res->nsect,
24938a937581STejun Heo res->lbal, res->lbam, res->lbah,
24948a937581STejun Heo res->hob_feature, res->hob_nsect,
24958a937581STejun Heo res->hob_lbal, res->hob_lbam, res->hob_lbah,
24965335b729STejun Heo res->device, qc->err_mask, ata_err_string(qc->err_mask),
24975335b729STejun Heo qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
24981333e194SRobert Hancock
24996521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR
2500efcef265SSergey Shtylyov if (res->status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
2501e87fd28cSHannes Reinecke ATA_SENSE | ATA_ERR)) {
2502efcef265SSergey Shtylyov if (res->status & ATA_BUSY)
2503a9a79dfeSJoe Perches ata_dev_err(qc->dev, "status: { Busy }\n");
25041333e194SRobert Hancock else
2505e87fd28cSHannes Reinecke ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n",
2506efcef265SSergey Shtylyov res->status & ATA_DRDY ? "DRDY " : "",
2507efcef265SSergey Shtylyov res->status & ATA_DF ? "DF " : "",
2508efcef265SSergey Shtylyov res->status & ATA_DRQ ? "DRQ " : "",
2509efcef265SSergey Shtylyov res->status & ATA_SENSE ? "SENSE " : "",
2510efcef265SSergey Shtylyov res->status & ATA_ERR ? "ERR " : "");
25111333e194SRobert Hancock }
25121333e194SRobert Hancock
25131333e194SRobert Hancock if (cmd->command != ATA_CMD_PACKET &&
2514efcef265SSergey Shtylyov (res->error & (ATA_ICRC | ATA_UNC | ATA_AMNF | ATA_IDNF |
2515efcef265SSergey Shtylyov ATA_ABORTED)))
2516eec7e1c1SAlexey Asemov ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n",
2517efcef265SSergey Shtylyov res->error & ATA_ICRC ? "ICRC " : "",
2518efcef265SSergey Shtylyov res->error & ATA_UNC ? "UNC " : "",
2519efcef265SSergey Shtylyov res->error & ATA_AMNF ? "AMNF " : "",
2520efcef265SSergey Shtylyov res->error & ATA_IDNF ? "IDNF " : "",
2521efcef265SSergey Shtylyov res->error & ATA_ABORTED ? "ABRT " : "");
25226521148cSRobert Hancock #endif
2523c6fd2807SJeff Garzik }
2524c6fd2807SJeff Garzik }
2525c6fd2807SJeff Garzik
25269b1e2658STejun Heo /**
25279b1e2658STejun Heo * ata_eh_report - report error handling to user
25289b1e2658STejun Heo * @ap: ATA port to report EH about
25299b1e2658STejun Heo *
25309b1e2658STejun Heo * Report EH to user.
25319b1e2658STejun Heo *
25329b1e2658STejun Heo * LOCKING:
25339b1e2658STejun Heo * None.
25349b1e2658STejun Heo */
ata_eh_report(struct ata_port * ap)2535fb7fd614STejun Heo void ata_eh_report(struct ata_port *ap)
25369b1e2658STejun Heo {
25379b1e2658STejun Heo struct ata_link *link;
25389b1e2658STejun Heo
25391eca4365STejun Heo ata_for_each_link(link, ap, HOST_FIRST)
25409b1e2658STejun Heo ata_eh_link_report(link);
25419b1e2658STejun Heo }
25429b1e2658STejun Heo
ata_do_reset(struct ata_link * link,ata_reset_fn_t reset,unsigned int * classes,unsigned long deadline,bool clear_classes)2543cc0680a5STejun Heo static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
2544b1c72916STejun Heo unsigned int *classes, unsigned long deadline,
2545b1c72916STejun Heo bool clear_classes)
2546c6fd2807SJeff Garzik {
2547f58229f8STejun Heo struct ata_device *dev;
2548c6fd2807SJeff Garzik
2549b1c72916STejun Heo if (clear_classes)
25501eca4365STejun Heo ata_for_each_dev(dev, link, ALL)
2551f58229f8STejun Heo classes[dev->devno] = ATA_DEV_UNKNOWN;
2552c6fd2807SJeff Garzik
2553f046519fSTejun Heo return reset(link, classes, deadline);
2554c6fd2807SJeff Garzik }
2555c6fd2807SJeff Garzik
ata_eh_followup_srst_needed(struct ata_link * link,int rc)2556e8411fbaSSergei Shtylyov static int ata_eh_followup_srst_needed(struct ata_link *link, int rc)
2557c6fd2807SJeff Garzik {
255845db2f6cSTejun Heo if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
2559ae791c05STejun Heo return 0;
25605dbfc9cbSTejun Heo if (rc == -EAGAIN)
2561c6fd2807SJeff Garzik return 1;
2562071f44b1STejun Heo if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
25633495de73STejun Heo return 1;
2564c6fd2807SJeff Garzik return 0;
2565c6fd2807SJeff Garzik }
2566c6fd2807SJeff Garzik
ata_eh_reset(struct ata_link * link,int classify,ata_prereset_fn_t prereset,ata_reset_fn_t softreset,ata_reset_fn_t hardreset,ata_postreset_fn_t postreset)2567fb7fd614STejun Heo int ata_eh_reset(struct ata_link *link, int classify,
2568c6fd2807SJeff Garzik ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
2569c6fd2807SJeff Garzik ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
2570c6fd2807SJeff Garzik {
2571afaa5c37STejun Heo struct ata_port *ap = link->ap;
2572b1c72916STejun Heo struct ata_link *slave = ap->slave_link;
2573936fd732STejun Heo struct ata_eh_context *ehc = &link->eh_context;
2574705d2014SBartlomiej Zolnierkiewicz struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL;
2575c6fd2807SJeff Garzik unsigned int *classes = ehc->classes;
2576416dc9edSTejun Heo unsigned int lflags = link->flags;
2577c6fd2807SJeff Garzik int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
2578d8af0eb6STejun Heo int max_tries = 0, try = 0;
2579b1c72916STejun Heo struct ata_link *failed_link;
2580f58229f8STejun Heo struct ata_device *dev;
2581416dc9edSTejun Heo unsigned long deadline, now;
2582c6fd2807SJeff Garzik ata_reset_fn_t reset;
2583afaa5c37STejun Heo unsigned long flags;
2584416dc9edSTejun Heo u32 sstatus;
2585b1c72916STejun Heo int nr_unknown, rc;
2586c6fd2807SJeff Garzik
2587932648b0STejun Heo /*
2588932648b0STejun Heo * Prepare to reset
2589932648b0STejun Heo */
2590ca02f225SSergey Shtylyov while (ata_eh_reset_timeouts[max_tries] != UINT_MAX)
2591d8af0eb6STejun Heo max_tries++;
2592ca6d43b0SDan Williams if (link->flags & ATA_LFLAG_RST_ONCE)
2593ca6d43b0SDan Williams max_tries = 1;
259405944bdfSTejun Heo if (link->flags & ATA_LFLAG_NO_HRST)
259505944bdfSTejun Heo hardreset = NULL;
259605944bdfSTejun Heo if (link->flags & ATA_LFLAG_NO_SRST)
259705944bdfSTejun Heo softreset = NULL;
2598d8af0eb6STejun Heo
259925985edcSLucas De Marchi /* make sure each reset attempt is at least COOL_DOWN apart */
260019b72321STejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) {
26010a2c0f56STejun Heo now = jiffies;
260219b72321STejun Heo WARN_ON(time_after(ehc->last_reset, now));
260319b72321STejun Heo deadline = ata_deadline(ehc->last_reset,
260419b72321STejun Heo ATA_EH_RESET_COOL_DOWN);
26050a2c0f56STejun Heo if (time_before(now, deadline))
26060a2c0f56STejun Heo schedule_timeout_uninterruptible(deadline - now);
260719b72321STejun Heo }
26080a2c0f56STejun Heo
2609afaa5c37STejun Heo spin_lock_irqsave(ap->lock, flags);
2610afaa5c37STejun Heo ap->pflags |= ATA_PFLAG_RESETTING;
2611afaa5c37STejun Heo spin_unlock_irqrestore(ap->lock, flags);
2612afaa5c37STejun Heo
2613cf480626STejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2614c6fd2807SJeff Garzik
26151eca4365STejun Heo ata_for_each_dev(dev, link, ALL) {
2616cdeab114STejun Heo /* If we issue an SRST then an ATA drive (not ATAPI)
2617cdeab114STejun Heo * may change configuration and be in PIO0 timing. If
2618cdeab114STejun Heo * we do a hard reset (or are coming from power on)
2619cdeab114STejun Heo * this is true for ATA or ATAPI. Until we've set a
2620cdeab114STejun Heo * suitable controller mode we should not touch the
2621cdeab114STejun Heo * bus as we may be talking too fast.
2622cdeab114STejun Heo */
2623cdeab114STejun Heo dev->pio_mode = XFER_PIO_0;
26245416912aSAaron Lu dev->dma_mode = 0xff;
2625cdeab114STejun Heo
2626cdeab114STejun Heo /* If the controller has a pio mode setup function
2627cdeab114STejun Heo * then use it to set the chipset to rights. Don't
2628cdeab114STejun Heo * touch the DMA setup as that will be dealt with when
2629cdeab114STejun Heo * configuring devices.
2630cdeab114STejun Heo */
2631cdeab114STejun Heo if (ap->ops->set_piomode)
2632cdeab114STejun Heo ap->ops->set_piomode(ap, dev);
2633cdeab114STejun Heo }
2634cdeab114STejun Heo
2635cf480626STejun Heo /* prefer hardreset */
2636932648b0STejun Heo reset = NULL;
2637cf480626STejun Heo ehc->i.action &= ~ATA_EH_RESET;
2638cf480626STejun Heo if (hardreset) {
2639cf480626STejun Heo reset = hardreset;
2640a674050eSTejun Heo ehc->i.action |= ATA_EH_HARDRESET;
26414f7faa3fSTejun Heo } else if (softreset) {
2642cf480626STejun Heo reset = softreset;
2643a674050eSTejun Heo ehc->i.action |= ATA_EH_SOFTRESET;
2644cf480626STejun Heo }
2645c6fd2807SJeff Garzik
2646c6fd2807SJeff Garzik if (prereset) {
2647b1c72916STejun Heo unsigned long deadline = ata_deadline(jiffies,
2648b1c72916STejun Heo ATA_EH_PRERESET_TIMEOUT);
2649b1c72916STejun Heo
2650b1c72916STejun Heo if (slave) {
2651b1c72916STejun Heo sehc->i.action &= ~ATA_EH_RESET;
2652b1c72916STejun Heo sehc->i.action |= ehc->i.action;
2653b1c72916STejun Heo }
2654b1c72916STejun Heo
2655b1c72916STejun Heo rc = prereset(link, deadline);
2656b1c72916STejun Heo
2657b1c72916STejun Heo /* If present, do prereset on slave link too. Reset
2658b1c72916STejun Heo * is skipped iff both master and slave links report
2659b1c72916STejun Heo * -ENOENT or clear ATA_EH_RESET.
2660b1c72916STejun Heo */
2661b1c72916STejun Heo if (slave && (rc == 0 || rc == -ENOENT)) {
2662b1c72916STejun Heo int tmp;
2663b1c72916STejun Heo
2664b1c72916STejun Heo tmp = prereset(slave, deadline);
2665b1c72916STejun Heo if (tmp != -ENOENT)
2666b1c72916STejun Heo rc = tmp;
2667b1c72916STejun Heo
2668b1c72916STejun Heo ehc->i.action |= sehc->i.action;
2669b1c72916STejun Heo }
2670b1c72916STejun Heo
2671c6fd2807SJeff Garzik if (rc) {
2672c961922bSAlan Cox if (rc == -ENOENT) {
2673a9a79dfeSJoe Perches ata_link_dbg(link, "port disabled--ignoring\n");
2674cf480626STejun Heo ehc->i.action &= ~ATA_EH_RESET;
26754aa9ab67STejun Heo
26761eca4365STejun Heo ata_for_each_dev(dev, link, ALL)
2677f58229f8STejun Heo classes[dev->devno] = ATA_DEV_NONE;
26784aa9ab67STejun Heo
26794aa9ab67STejun Heo rc = 0;
2680c961922bSAlan Cox } else
2681a9a79dfeSJoe Perches ata_link_err(link,
2682a9a79dfeSJoe Perches "prereset failed (errno=%d)\n",
2683a9a79dfeSJoe Perches rc);
2684fccb6ea5STejun Heo goto out;
2685c6fd2807SJeff Garzik }
2686c6fd2807SJeff Garzik
2687932648b0STejun Heo /* prereset() might have cleared ATA_EH_RESET. If so,
2688d6515e6fSTejun Heo * bang classes, thaw and return.
2689932648b0STejun Heo */
2690932648b0STejun Heo if (reset && !(ehc->i.action & ATA_EH_RESET)) {
26911eca4365STejun Heo ata_for_each_dev(dev, link, ALL)
2692f58229f8STejun Heo classes[dev->devno] = ATA_DEV_NONE;
26934cb7c6f1SNiklas Cassel if (ata_port_is_frozen(ap) && ata_is_host_link(link))
2694d6515e6fSTejun Heo ata_eh_thaw_port(ap);
2695fccb6ea5STejun Heo rc = 0;
2696fccb6ea5STejun Heo goto out;
2697c6fd2807SJeff Garzik }
2698932648b0STejun Heo }
2699c6fd2807SJeff Garzik
2700c6fd2807SJeff Garzik retry:
2701932648b0STejun Heo /*
2702932648b0STejun Heo * Perform reset
2703932648b0STejun Heo */
2704dc98c32cSTejun Heo if (ata_is_host_link(link))
2705dc98c32cSTejun Heo ata_eh_freeze_port(ap);
2706dc98c32cSTejun Heo
2707341c2c95STejun Heo deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]);
270831daabdaSTejun Heo
2709932648b0STejun Heo if (reset) {
2710c6fd2807SJeff Garzik if (verbose)
2711a9a79dfeSJoe Perches ata_link_info(link, "%s resetting link\n",
2712c6fd2807SJeff Garzik reset == softreset ? "soft" : "hard");
2713c6fd2807SJeff Garzik
2714c6fd2807SJeff Garzik /* mark that this EH session started with reset */
271519b72321STejun Heo ehc->last_reset = jiffies;
2716f8ec26d0SHannes Reinecke if (reset == hardreset) {
27170d64a233STejun Heo ehc->i.flags |= ATA_EHI_DID_HARDRESET;
2718f8ec26d0SHannes Reinecke trace_ata_link_hardreset_begin(link, classes, deadline);
2719f8ec26d0SHannes Reinecke } else {
27200d64a233STejun Heo ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
2721f8ec26d0SHannes Reinecke trace_ata_link_softreset_begin(link, classes, deadline);
2722f8ec26d0SHannes Reinecke }
2723c6fd2807SJeff Garzik
2724b1c72916STejun Heo rc = ata_do_reset(link, reset, classes, deadline, true);
2725f8ec26d0SHannes Reinecke if (reset == hardreset)
2726f8ec26d0SHannes Reinecke trace_ata_link_hardreset_end(link, classes, rc);
2727f8ec26d0SHannes Reinecke else
2728f8ec26d0SHannes Reinecke trace_ata_link_softreset_end(link, classes, rc);
2729b1c72916STejun Heo if (rc && rc != -EAGAIN) {
2730b1c72916STejun Heo failed_link = link;
27315dbfc9cbSTejun Heo goto fail;
2732b1c72916STejun Heo }
2733c6fd2807SJeff Garzik
2734b1c72916STejun Heo /* hardreset slave link if existent */
2735b1c72916STejun Heo if (slave && reset == hardreset) {
2736b1c72916STejun Heo int tmp;
2737b1c72916STejun Heo
2738b1c72916STejun Heo if (verbose)
2739a9a79dfeSJoe Perches ata_link_info(slave, "hard resetting link\n");
2740b1c72916STejun Heo
2741b1c72916STejun Heo ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
2742f8ec26d0SHannes Reinecke trace_ata_slave_hardreset_begin(slave, classes,
2743f8ec26d0SHannes Reinecke deadline);
2744b1c72916STejun Heo tmp = ata_do_reset(slave, reset, classes, deadline,
2745b1c72916STejun Heo false);
2746f8ec26d0SHannes Reinecke trace_ata_slave_hardreset_end(slave, classes, tmp);
2747b1c72916STejun Heo switch (tmp) {
2748b1c72916STejun Heo case -EAGAIN:
2749b1c72916STejun Heo rc = -EAGAIN;
2750e06abcc6SGustavo A. R. Silva break;
2751b1c72916STejun Heo case 0:
2752b1c72916STejun Heo break;
2753b1c72916STejun Heo default:
2754b1c72916STejun Heo failed_link = slave;
2755b1c72916STejun Heo rc = tmp;
2756b1c72916STejun Heo goto fail;
2757b1c72916STejun Heo }
2758b1c72916STejun Heo }
2759b1c72916STejun Heo
2760b1c72916STejun Heo /* perform follow-up SRST if necessary */
2761c6fd2807SJeff Garzik if (reset == hardreset &&
2762e8411fbaSSergei Shtylyov ata_eh_followup_srst_needed(link, rc)) {
2763c6fd2807SJeff Garzik reset = softreset;
2764c6fd2807SJeff Garzik
2765c6fd2807SJeff Garzik if (!reset) {
2766a9a79dfeSJoe Perches ata_link_err(link,
2767a9a79dfeSJoe Perches "follow-up softreset required but no softreset available\n");
2768b1c72916STejun Heo failed_link = link;
2769fccb6ea5STejun Heo rc = -EINVAL;
277008cf69d0STejun Heo goto fail;
2771c6fd2807SJeff Garzik }
2772c6fd2807SJeff Garzik
2773cf480626STejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2774f8ec26d0SHannes Reinecke trace_ata_link_softreset_begin(link, classes, deadline);
2775b1c72916STejun Heo rc = ata_do_reset(link, reset, classes, deadline, true);
2776f8ec26d0SHannes Reinecke trace_ata_link_softreset_end(link, classes, rc);
2777fe2c4d01STejun Heo if (rc) {
2778fe2c4d01STejun Heo failed_link = link;
2779fe2c4d01STejun Heo goto fail;
2780fe2c4d01STejun Heo }
2781c6fd2807SJeff Garzik }
2782932648b0STejun Heo } else {
2783932648b0STejun Heo if (verbose)
2784a9a79dfeSJoe Perches ata_link_info(link,
2785a9a79dfeSJoe Perches "no reset method available, skipping reset\n");
2786932648b0STejun Heo if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
2787932648b0STejun Heo lflags |= ATA_LFLAG_ASSUME_ATA;
2788932648b0STejun Heo }
2789008a7896STejun Heo
2790932648b0STejun Heo /*
2791932648b0STejun Heo * Post-reset processing
2792932648b0STejun Heo */
27931eca4365STejun Heo ata_for_each_dev(dev, link, ALL) {
2794416dc9edSTejun Heo /* After the reset, the device state is PIO 0 and the
2795416dc9edSTejun Heo * controller state is undefined. Reset also wakes up
2796416dc9edSTejun Heo * drives from sleeping mode.
2797c6fd2807SJeff Garzik */
2798f58229f8STejun Heo dev->pio_mode = XFER_PIO_0;
2799054a5fbaSTejun Heo dev->flags &= ~ATA_DFLAG_SLEEPING;
2800c6fd2807SJeff Garzik
28013b761d3dSTejun Heo if (ata_phys_link_offline(ata_dev_phys_link(dev)))
28023b761d3dSTejun Heo continue;
28033b761d3dSTejun Heo
28044ccd3329STejun Heo /* apply class override */
2805416dc9edSTejun Heo if (lflags & ATA_LFLAG_ASSUME_ATA)
2806ae791c05STejun Heo classes[dev->devno] = ATA_DEV_ATA;
2807416dc9edSTejun Heo else if (lflags & ATA_LFLAG_ASSUME_SEMB)
2808816ab897STejun Heo classes[dev->devno] = ATA_DEV_SEMB_UNSUP;
2809ae791c05STejun Heo }
2810ae791c05STejun Heo
2811008a7896STejun Heo /* record current link speed */
2812936fd732STejun Heo if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
2813936fd732STejun Heo link->sata_spd = (sstatus >> 4) & 0xf;
2814b1c72916STejun Heo if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0)
2815b1c72916STejun Heo slave->sata_spd = (sstatus >> 4) & 0xf;
2816008a7896STejun Heo
2817dc98c32cSTejun Heo /* thaw the port */
2818dc98c32cSTejun Heo if (ata_is_host_link(link))
2819dc98c32cSTejun Heo ata_eh_thaw_port(ap);
2820dc98c32cSTejun Heo
2821f046519fSTejun Heo /* postreset() should clear hardware SError. Although SError
2822f046519fSTejun Heo * is cleared during link resume, clearing SError here is
2823f046519fSTejun Heo * necessary as some PHYs raise hotplug events after SRST.
2824f046519fSTejun Heo * This introduces race condition where hotplug occurs between
2825f046519fSTejun Heo * reset and here. This race is mediated by cross checking
2826f046519fSTejun Heo * link onlineness and classification result later.
2827f046519fSTejun Heo */
2828b1c72916STejun Heo if (postreset) {
2829cc0680a5STejun Heo postreset(link, classes);
2830f8ec26d0SHannes Reinecke trace_ata_link_postreset(link, classes, rc);
2831f8ec26d0SHannes Reinecke if (slave) {
2832b1c72916STejun Heo postreset(slave, classes);
2833f8ec26d0SHannes Reinecke trace_ata_slave_postreset(slave, classes, rc);
2834f8ec26d0SHannes Reinecke }
2835b1c72916STejun Heo }
2836c6fd2807SJeff Garzik
283780cc944eSNiklas Cassel /* clear cached SError */
2838f046519fSTejun Heo spin_lock_irqsave(link->ap->lock, flags);
283980cc944eSNiklas Cassel link->eh_info.serror = 0;
2840b1c72916STejun Heo if (slave)
284180cc944eSNiklas Cassel slave->eh_info.serror = 0;
2842f046519fSTejun Heo spin_unlock_irqrestore(link->ap->lock, flags);
2843f046519fSTejun Heo
28443b761d3dSTejun Heo /*
28453b761d3dSTejun Heo * Make sure onlineness and classification result correspond.
2846f046519fSTejun Heo * Hotplug could have happened during reset and some
2847f046519fSTejun Heo * controllers fail to wait while a drive is spinning up after
2848f046519fSTejun Heo * being hotplugged causing misdetection. By cross checking
28493b761d3dSTejun Heo * link on/offlineness and classification result, those
28503b761d3dSTejun Heo * conditions can be reliably detected and retried.
2851f046519fSTejun Heo */
2852b1c72916STejun Heo nr_unknown = 0;
28531eca4365STejun Heo ata_for_each_dev(dev, link, ALL) {
28543b761d3dSTejun Heo if (ata_phys_link_online(ata_dev_phys_link(dev))) {
2855b1c72916STejun Heo if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2856a9a79dfeSJoe Perches ata_dev_dbg(dev, "link online but device misclassified\n");
2857f046519fSTejun Heo classes[dev->devno] = ATA_DEV_NONE;
2858b1c72916STejun Heo nr_unknown++;
2859b1c72916STejun Heo }
28603b761d3dSTejun Heo } else if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
28613b761d3dSTejun Heo if (ata_class_enabled(classes[dev->devno]))
2862a9a79dfeSJoe Perches ata_dev_dbg(dev,
2863a9a79dfeSJoe Perches "link offline, clearing class %d to NONE\n",
28643b761d3dSTejun Heo classes[dev->devno]);
28653b761d3dSTejun Heo classes[dev->devno] = ATA_DEV_NONE;
28663b761d3dSTejun Heo } else if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2867a9a79dfeSJoe Perches ata_dev_dbg(dev,
2868a9a79dfeSJoe Perches "link status unknown, clearing UNKNOWN to NONE\n");
28693b761d3dSTejun Heo classes[dev->devno] = ATA_DEV_NONE;
28703b761d3dSTejun Heo }
2871f046519fSTejun Heo }
2872f046519fSTejun Heo
2873b1c72916STejun Heo if (classify && nr_unknown) {
2874f046519fSTejun Heo if (try < max_tries) {
2875a9a79dfeSJoe Perches ata_link_warn(link,
2876a9a79dfeSJoe Perches "link online but %d devices misclassified, retrying\n",
28773b761d3dSTejun Heo nr_unknown);
2878b1c72916STejun Heo failed_link = link;
2879f046519fSTejun Heo rc = -EAGAIN;
2880f046519fSTejun Heo goto fail;
2881f046519fSTejun Heo }
2882a9a79dfeSJoe Perches ata_link_warn(link,
28833b761d3dSTejun Heo "link online but %d devices misclassified, "
28843b761d3dSTejun Heo "device detection might fail\n", nr_unknown);
2885f046519fSTejun Heo }
2886f046519fSTejun Heo
2887c6fd2807SJeff Garzik /* reset successful, schedule revalidation */
2888cf480626STejun Heo ata_eh_done(link, NULL, ATA_EH_RESET);
2889b1c72916STejun Heo if (slave)
2890b1c72916STejun Heo ata_eh_done(slave, NULL, ATA_EH_RESET);
289119b72321STejun Heo ehc->last_reset = jiffies; /* update to completion time */
2892c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_REVALIDATE;
28936b7ae954STejun Heo link->lpm_policy = ATA_LPM_UNKNOWN; /* reset LPM state */
2894416dc9edSTejun Heo
2895416dc9edSTejun Heo rc = 0;
2896fccb6ea5STejun Heo out:
2897fccb6ea5STejun Heo /* clear hotplug flag */
2898fccb6ea5STejun Heo ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2899b1c72916STejun Heo if (slave)
2900b1c72916STejun Heo sehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2901afaa5c37STejun Heo
2902afaa5c37STejun Heo spin_lock_irqsave(ap->lock, flags);
2903afaa5c37STejun Heo ap->pflags &= ~ATA_PFLAG_RESETTING;
2904afaa5c37STejun Heo spin_unlock_irqrestore(ap->lock, flags);
2905afaa5c37STejun Heo
2906c6fd2807SJeff Garzik return rc;
2907416dc9edSTejun Heo
2908416dc9edSTejun Heo fail:
29095958e302STejun Heo /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */
29105958e302STejun Heo if (!ata_is_host_link(link) &&
29115958e302STejun Heo sata_scr_read(link, SCR_STATUS, &sstatus))
29125958e302STejun Heo rc = -ERESTART;
29135958e302STejun Heo
29147a46c078SGwendal Grignou if (try >= max_tries) {
29158ea7645cSTejun Heo /*
29168ea7645cSTejun Heo * Thaw host port even if reset failed, so that the port
29178ea7645cSTejun Heo * can be retried on the next phy event. This risks
29188ea7645cSTejun Heo * repeated EH runs but seems to be a better tradeoff than
29198ea7645cSTejun Heo * shutting down a port after a botched hotplug attempt.
29208ea7645cSTejun Heo */
29218ea7645cSTejun Heo if (ata_is_host_link(link))
29228ea7645cSTejun Heo ata_eh_thaw_port(ap);
2923416dc9edSTejun Heo goto out;
29248ea7645cSTejun Heo }
2925416dc9edSTejun Heo
2926416dc9edSTejun Heo now = jiffies;
2927416dc9edSTejun Heo if (time_before(now, deadline)) {
2928416dc9edSTejun Heo unsigned long delta = deadline - now;
2929416dc9edSTejun Heo
2930a9a79dfeSJoe Perches ata_link_warn(failed_link,
29310a2c0f56STejun Heo "reset failed (errno=%d), retrying in %u secs\n",
29320a2c0f56STejun Heo rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
2933416dc9edSTejun Heo
2934c0c362b6STejun Heo ata_eh_release(ap);
2935416dc9edSTejun Heo while (delta)
2936416dc9edSTejun Heo delta = schedule_timeout_uninterruptible(delta);
2937c0c362b6STejun Heo ata_eh_acquire(ap);
2938416dc9edSTejun Heo }
2939416dc9edSTejun Heo
29407a46c078SGwendal Grignou /*
29417a46c078SGwendal Grignou * While disks spinup behind PMP, some controllers fail sending SRST.
29427a46c078SGwendal Grignou * They need to be reset - as well as the PMP - before retrying.
29437a46c078SGwendal Grignou */
29447a46c078SGwendal Grignou if (rc == -ERESTART) {
29457a46c078SGwendal Grignou if (ata_is_host_link(link))
29467a46c078SGwendal Grignou ata_eh_thaw_port(ap);
29477a46c078SGwendal Grignou goto out;
29487a46c078SGwendal Grignou }
29497a46c078SGwendal Grignou
2950b1c72916STejun Heo if (try == max_tries - 1) {
2951a07d499bSTejun Heo sata_down_spd_limit(link, 0);
2952b1c72916STejun Heo if (slave)
2953a07d499bSTejun Heo sata_down_spd_limit(slave, 0);
2954b1c72916STejun Heo } else if (rc == -EPIPE)
2955a07d499bSTejun Heo sata_down_spd_limit(failed_link, 0);
2956b1c72916STejun Heo
2957416dc9edSTejun Heo if (hardreset)
2958416dc9edSTejun Heo reset = hardreset;
2959416dc9edSTejun Heo goto retry;
2960c6fd2807SJeff Garzik }
2961c6fd2807SJeff Garzik
ata_eh_pull_park_action(struct ata_port * ap)296245fabbb7SElias Oltmanns static inline void ata_eh_pull_park_action(struct ata_port *ap)
296345fabbb7SElias Oltmanns {
296445fabbb7SElias Oltmanns struct ata_link *link;
296545fabbb7SElias Oltmanns struct ata_device *dev;
296645fabbb7SElias Oltmanns unsigned long flags;
296745fabbb7SElias Oltmanns
296845fabbb7SElias Oltmanns /*
296945fabbb7SElias Oltmanns * This function can be thought of as an extended version of
297045fabbb7SElias Oltmanns * ata_eh_about_to_do() specially crafted to accommodate the
297145fabbb7SElias Oltmanns * requirements of ATA_EH_PARK handling. Since the EH thread
297245fabbb7SElias Oltmanns * does not leave the do {} while () loop in ata_eh_recover as
297345fabbb7SElias Oltmanns * long as the timeout for a park request to *one* device on
297445fabbb7SElias Oltmanns * the port has not expired, and since we still want to pick
297545fabbb7SElias Oltmanns * up park requests to other devices on the same port or
297645fabbb7SElias Oltmanns * timeout updates for the same device, we have to pull
297745fabbb7SElias Oltmanns * ATA_EH_PARK actions from eh_info into eh_context.i
297845fabbb7SElias Oltmanns * ourselves at the beginning of each pass over the loop.
297945fabbb7SElias Oltmanns *
298045fabbb7SElias Oltmanns * Additionally, all write accesses to &ap->park_req_pending
298116735d02SWolfram Sang * through reinit_completion() (see below) or complete_all()
298245fabbb7SElias Oltmanns * (see ata_scsi_park_store()) are protected by the host lock.
298345fabbb7SElias Oltmanns * As a result we have that park_req_pending.done is zero on
298445fabbb7SElias Oltmanns * exit from this function, i.e. when ATA_EH_PARK actions for
298545fabbb7SElias Oltmanns * *all* devices on port ap have been pulled into the
298645fabbb7SElias Oltmanns * respective eh_context structs. If, and only if,
298745fabbb7SElias Oltmanns * park_req_pending.done is non-zero by the time we reach
298845fabbb7SElias Oltmanns * wait_for_completion_timeout(), another ATA_EH_PARK action
298945fabbb7SElias Oltmanns * has been scheduled for at least one of the devices on port
299045fabbb7SElias Oltmanns * ap and we have to cycle over the do {} while () loop in
299145fabbb7SElias Oltmanns * ata_eh_recover() again.
299245fabbb7SElias Oltmanns */
299345fabbb7SElias Oltmanns
299445fabbb7SElias Oltmanns spin_lock_irqsave(ap->lock, flags);
299516735d02SWolfram Sang reinit_completion(&ap->park_req_pending);
29961eca4365STejun Heo ata_for_each_link(link, ap, EDGE) {
29971eca4365STejun Heo ata_for_each_dev(dev, link, ALL) {
299845fabbb7SElias Oltmanns struct ata_eh_info *ehi = &link->eh_info;
299945fabbb7SElias Oltmanns
300045fabbb7SElias Oltmanns link->eh_context.i.dev_action[dev->devno] |=
300145fabbb7SElias Oltmanns ehi->dev_action[dev->devno] & ATA_EH_PARK;
300245fabbb7SElias Oltmanns ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
300345fabbb7SElias Oltmanns }
300445fabbb7SElias Oltmanns }
300545fabbb7SElias Oltmanns spin_unlock_irqrestore(ap->lock, flags);
300645fabbb7SElias Oltmanns }
300745fabbb7SElias Oltmanns
ata_eh_park_issue_cmd(struct ata_device * dev,int park)300845fabbb7SElias Oltmanns static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
300945fabbb7SElias Oltmanns {
301045fabbb7SElias Oltmanns struct ata_eh_context *ehc = &dev->link->eh_context;
301145fabbb7SElias Oltmanns struct ata_taskfile tf;
301245fabbb7SElias Oltmanns unsigned int err_mask;
301345fabbb7SElias Oltmanns
301445fabbb7SElias Oltmanns ata_tf_init(dev, &tf);
301545fabbb7SElias Oltmanns if (park) {
301645fabbb7SElias Oltmanns ehc->unloaded_mask |= 1 << dev->devno;
301745fabbb7SElias Oltmanns tf.command = ATA_CMD_IDLEIMMEDIATE;
301845fabbb7SElias Oltmanns tf.feature = 0x44;
301945fabbb7SElias Oltmanns tf.lbal = 0x4c;
302045fabbb7SElias Oltmanns tf.lbam = 0x4e;
302145fabbb7SElias Oltmanns tf.lbah = 0x55;
302245fabbb7SElias Oltmanns } else {
302345fabbb7SElias Oltmanns ehc->unloaded_mask &= ~(1 << dev->devno);
302445fabbb7SElias Oltmanns tf.command = ATA_CMD_CHK_POWER;
302545fabbb7SElias Oltmanns }
302645fabbb7SElias Oltmanns
302745fabbb7SElias Oltmanns tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
3028bd18bc04SHannes Reinecke tf.protocol = ATA_PROT_NODATA;
302945fabbb7SElias Oltmanns err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
303045fabbb7SElias Oltmanns if (park && (err_mask || tf.lbal != 0xc4)) {
3031a9a79dfeSJoe Perches ata_dev_err(dev, "head unload failed!\n");
303245fabbb7SElias Oltmanns ehc->unloaded_mask &= ~(1 << dev->devno);
303345fabbb7SElias Oltmanns }
303445fabbb7SElias Oltmanns }
303545fabbb7SElias Oltmanns
ata_eh_revalidate_and_attach(struct ata_link * link,struct ata_device ** r_failed_dev)30360260731fSTejun Heo static int ata_eh_revalidate_and_attach(struct ata_link *link,
3037c6fd2807SJeff Garzik struct ata_device **r_failed_dev)
3038c6fd2807SJeff Garzik {
30390260731fSTejun Heo struct ata_port *ap = link->ap;
30400260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context;
3041c6fd2807SJeff Garzik struct ata_device *dev;
30428c3c52a8STejun Heo unsigned int new_mask = 0;
3043c6fd2807SJeff Garzik unsigned long flags;
3044f58229f8STejun Heo int rc = 0;
3045c6fd2807SJeff Garzik
30468c3c52a8STejun Heo /* For PATA drive side cable detection to work, IDENTIFY must
30478c3c52a8STejun Heo * be done backwards such that PDIAG- is released by the slave
30488c3c52a8STejun Heo * device before the master device is identified.
30498c3c52a8STejun Heo */
30501eca4365STejun Heo ata_for_each_dev(dev, link, ALL_REVERSE) {
3051f58229f8STejun Heo unsigned int action = ata_eh_dev_action(dev);
3052f58229f8STejun Heo unsigned int readid_flags = 0;
3053c6fd2807SJeff Garzik
3054bff04647STejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET)
3055bff04647STejun Heo readid_flags |= ATA_READID_POSTRESET;
3056bff04647STejun Heo
3057aa3998dbSDamien Le Moal /*
3058aa3998dbSDamien Le Moal * When resuming, before executing any command, make sure to
3059aa3998dbSDamien Le Moal * transition the device to the active power mode.
3060aa3998dbSDamien Le Moal */
3061aa3998dbSDamien Le Moal if ((action & ATA_EH_SET_ACTIVE) && ata_dev_enabled(dev)) {
3062aa3998dbSDamien Le Moal ata_dev_power_set_active(dev);
3063aa3998dbSDamien Le Moal ata_eh_done(link, dev, ATA_EH_SET_ACTIVE);
3064aa3998dbSDamien Le Moal }
3065aa3998dbSDamien Le Moal
30669666f400STejun Heo if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
3067633273a3STejun Heo WARN_ON(dev->class == ATA_DEV_PMP);
3068633273a3STejun Heo
306971d7b6e5SNiklas Cassel /*
307071d7b6e5SNiklas Cassel * The link may be in a deep sleep, wake it up.
307171d7b6e5SNiklas Cassel *
307271d7b6e5SNiklas Cassel * If the link is in deep sleep, ata_phys_link_offline()
307371d7b6e5SNiklas Cassel * will return true, causing the revalidation to fail,
307471d7b6e5SNiklas Cassel * which leads to a (potentially) needless hard reset.
307571d7b6e5SNiklas Cassel *
307671d7b6e5SNiklas Cassel * ata_eh_recover() will later restore the link policy
307771d7b6e5SNiklas Cassel * to ap->target_lpm_policy after revalidation is done.
307871d7b6e5SNiklas Cassel */
307971d7b6e5SNiklas Cassel if (link->lpm_policy > ATA_LPM_MAX_POWER) {
308071d7b6e5SNiklas Cassel rc = ata_eh_set_lpm(link, ATA_LPM_MAX_POWER,
308171d7b6e5SNiklas Cassel r_failed_dev);
308271d7b6e5SNiklas Cassel if (rc)
308371d7b6e5SNiklas Cassel goto err;
308471d7b6e5SNiklas Cassel }
308571d7b6e5SNiklas Cassel
3086b1c72916STejun Heo if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
3087c6fd2807SJeff Garzik rc = -EIO;
30888c3c52a8STejun Heo goto err;
3089c6fd2807SJeff Garzik }
3090c6fd2807SJeff Garzik
30910260731fSTejun Heo ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE);
3092422c9daaSTejun Heo rc = ata_dev_revalidate(dev, ehc->classes[dev->devno],
3093422c9daaSTejun Heo readid_flags);
3094c6fd2807SJeff Garzik if (rc)
30958c3c52a8STejun Heo goto err;
3096c6fd2807SJeff Garzik
30970260731fSTejun Heo ata_eh_done(link, dev, ATA_EH_REVALIDATE);
3098c6fd2807SJeff Garzik
3099baa1e78aSTejun Heo /* Configuration may have changed, reconfigure
3100baa1e78aSTejun Heo * transfer mode.
3101baa1e78aSTejun Heo */
3102baa1e78aSTejun Heo ehc->i.flags |= ATA_EHI_SETMODE;
3103baa1e78aSTejun Heo
3104c6fd2807SJeff Garzik /* schedule the scsi_rescan_device() here */
31056aa0365aSDamien Le Moal schedule_delayed_work(&ap->scsi_rescan_task, 0);
3106c6fd2807SJeff Garzik } else if (dev->class == ATA_DEV_UNKNOWN &&
3107c6fd2807SJeff Garzik ehc->tries[dev->devno] &&
3108c6fd2807SJeff Garzik ata_class_enabled(ehc->classes[dev->devno])) {
3109842faa6cSTejun Heo /* Temporarily set dev->class, it will be
3110842faa6cSTejun Heo * permanently set once all configurations are
3111842faa6cSTejun Heo * complete. This is necessary because new
3112842faa6cSTejun Heo * device configuration is done in two
3113842faa6cSTejun Heo * separate loops.
3114842faa6cSTejun Heo */
3115c6fd2807SJeff Garzik dev->class = ehc->classes[dev->devno];
3116c6fd2807SJeff Garzik
3117633273a3STejun Heo if (dev->class == ATA_DEV_PMP)
3118633273a3STejun Heo rc = sata_pmp_attach(dev);
3119633273a3STejun Heo else
3120633273a3STejun Heo rc = ata_dev_read_id(dev, &dev->class,
3121633273a3STejun Heo readid_flags, dev->id);
3122842faa6cSTejun Heo
3123842faa6cSTejun Heo /* read_id might have changed class, store and reset */
3124842faa6cSTejun Heo ehc->classes[dev->devno] = dev->class;
3125842faa6cSTejun Heo dev->class = ATA_DEV_UNKNOWN;
3126842faa6cSTejun Heo
31278c3c52a8STejun Heo switch (rc) {
31288c3c52a8STejun Heo case 0:
312999cf610aSTejun Heo /* clear error info accumulated during probe */
313099cf610aSTejun Heo ata_ering_clear(&dev->ering);
3131f58229f8STejun Heo new_mask |= 1 << dev->devno;
31328c3c52a8STejun Heo break;
31338c3c52a8STejun Heo case -ENOENT:
313455a8e2c8STejun Heo /* IDENTIFY was issued to non-existent
313555a8e2c8STejun Heo * device. No need to reset. Just
3136842faa6cSTejun Heo * thaw and ignore the device.
313755a8e2c8STejun Heo */
313855a8e2c8STejun Heo ata_eh_thaw_port(ap);
3139c6fd2807SJeff Garzik break;
31408c3c52a8STejun Heo default:
31418c3c52a8STejun Heo goto err;
31428c3c52a8STejun Heo }
31438c3c52a8STejun Heo }
3144c6fd2807SJeff Garzik }
3145c6fd2807SJeff Garzik
3146c1c4e8d5STejun Heo /* PDIAG- should have been released, ask cable type if post-reset */
314733267325STejun Heo if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) {
314833267325STejun Heo if (ap->ops->cable_detect)
3149c1c4e8d5STejun Heo ap->cbl = ap->ops->cable_detect(ap);
315033267325STejun Heo ata_force_cbl(ap);
315133267325STejun Heo }
3152c1c4e8d5STejun Heo
31538c3c52a8STejun Heo /* Configure new devices forward such that user doesn't see
31548c3c52a8STejun Heo * device detection messages backwards.
31558c3c52a8STejun Heo */
31561eca4365STejun Heo ata_for_each_dev(dev, link, ALL) {
31574f7c2874STejun Heo if (!(new_mask & (1 << dev->devno)))
31588c3c52a8STejun Heo continue;
31598c3c52a8STejun Heo
3160842faa6cSTejun Heo dev->class = ehc->classes[dev->devno];
3161842faa6cSTejun Heo
31624f7c2874STejun Heo if (dev->class == ATA_DEV_PMP)
31634f7c2874STejun Heo continue;
31644f7c2874STejun Heo
31658c3c52a8STejun Heo ehc->i.flags |= ATA_EHI_PRINTINFO;
31668c3c52a8STejun Heo rc = ata_dev_configure(dev);
31678c3c52a8STejun Heo ehc->i.flags &= ~ATA_EHI_PRINTINFO;
3168842faa6cSTejun Heo if (rc) {
3169842faa6cSTejun Heo dev->class = ATA_DEV_UNKNOWN;
31708c3c52a8STejun Heo goto err;
3171842faa6cSTejun Heo }
31728c3c52a8STejun Heo
3173c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags);
3174c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
3175c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags);
3176baa1e78aSTejun Heo
317755a8e2c8STejun Heo /* new device discovered, configure xfermode */
3178baa1e78aSTejun Heo ehc->i.flags |= ATA_EHI_SETMODE;
3179c6fd2807SJeff Garzik }
3180c6fd2807SJeff Garzik
31818c3c52a8STejun Heo return 0;
31828c3c52a8STejun Heo
31838c3c52a8STejun Heo err:
3184a1f506afSDamien Le Moal dev->flags &= ~ATA_DFLAG_RESUMING;
3185c6fd2807SJeff Garzik *r_failed_dev = dev;
3186c6fd2807SJeff Garzik return rc;
3187c6fd2807SJeff Garzik }
3188c6fd2807SJeff Garzik
31896f1d1e3aSTejun Heo /**
31906f1d1e3aSTejun Heo * ata_set_mode - Program timings and issue SET FEATURES - XFER
31916f1d1e3aSTejun Heo * @link: link on which timings will be programmed
319298a1708dSMartin Olsson * @r_failed_dev: out parameter for failed device
31936f1d1e3aSTejun Heo *
31946f1d1e3aSTejun Heo * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
31956f1d1e3aSTejun Heo * ata_set_mode() fails, pointer to the failing device is
31966f1d1e3aSTejun Heo * returned in @r_failed_dev.
31976f1d1e3aSTejun Heo *
31986f1d1e3aSTejun Heo * LOCKING:
31996f1d1e3aSTejun Heo * PCI/etc. bus probe sem.
32006f1d1e3aSTejun Heo *
32016f1d1e3aSTejun Heo * RETURNS:
32026f1d1e3aSTejun Heo * 0 on success, negative errno otherwise
32036f1d1e3aSTejun Heo */
ata_set_mode(struct ata_link * link,struct ata_device ** r_failed_dev)32046f1d1e3aSTejun Heo int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
32056f1d1e3aSTejun Heo {
32066f1d1e3aSTejun Heo struct ata_port *ap = link->ap;
320700115e0fSTejun Heo struct ata_device *dev;
320800115e0fSTejun Heo int rc;
32096f1d1e3aSTejun Heo
321076326ac1STejun Heo /* if data transfer is verified, clear DUBIOUS_XFER on ering top */
32111eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) {
321276326ac1STejun Heo if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) {
321376326ac1STejun Heo struct ata_ering_entry *ent;
321476326ac1STejun Heo
321576326ac1STejun Heo ent = ata_ering_top(&dev->ering);
321676326ac1STejun Heo if (ent)
321776326ac1STejun Heo ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER;
321876326ac1STejun Heo }
321976326ac1STejun Heo }
322076326ac1STejun Heo
32216f1d1e3aSTejun Heo /* has private set_mode? */
32226f1d1e3aSTejun Heo if (ap->ops->set_mode)
322300115e0fSTejun Heo rc = ap->ops->set_mode(link, r_failed_dev);
322400115e0fSTejun Heo else
322500115e0fSTejun Heo rc = ata_do_set_mode(link, r_failed_dev);
322600115e0fSTejun Heo
322700115e0fSTejun Heo /* if transfer mode has changed, set DUBIOUS_XFER on device */
32281eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) {
322900115e0fSTejun Heo struct ata_eh_context *ehc = &link->eh_context;
323000115e0fSTejun Heo u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
323100115e0fSTejun Heo u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
323200115e0fSTejun Heo
323300115e0fSTejun Heo if (dev->xfer_mode != saved_xfer_mode ||
323400115e0fSTejun Heo ata_ncq_enabled(dev) != saved_ncq)
323500115e0fSTejun Heo dev->flags |= ATA_DFLAG_DUBIOUS_XFER;
323600115e0fSTejun Heo }
323700115e0fSTejun Heo
323800115e0fSTejun Heo return rc;
32396f1d1e3aSTejun Heo }
32406f1d1e3aSTejun Heo
324111fc33daSTejun Heo /**
324211fc33daSTejun Heo * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
324311fc33daSTejun Heo * @dev: ATAPI device to clear UA for
324411fc33daSTejun Heo *
324511fc33daSTejun Heo * Resets and other operations can make an ATAPI device raise
324611fc33daSTejun Heo * UNIT ATTENTION which causes the next operation to fail. This
324711fc33daSTejun Heo * function clears UA.
324811fc33daSTejun Heo *
324911fc33daSTejun Heo * LOCKING:
325011fc33daSTejun Heo * EH context (may sleep).
325111fc33daSTejun Heo *
325211fc33daSTejun Heo * RETURNS:
325311fc33daSTejun Heo * 0 on success, -errno on failure.
325411fc33daSTejun Heo */
atapi_eh_clear_ua(struct ata_device * dev)325511fc33daSTejun Heo static int atapi_eh_clear_ua(struct ata_device *dev)
325611fc33daSTejun Heo {
325711fc33daSTejun Heo int i;
325811fc33daSTejun Heo
325911fc33daSTejun Heo for (i = 0; i < ATA_EH_UA_TRIES; i++) {
3260b5357081STejun Heo u8 *sense_buffer = dev->link->ap->sector_buf;
326111fc33daSTejun Heo u8 sense_key = 0;
326211fc33daSTejun Heo unsigned int err_mask;
326311fc33daSTejun Heo
326411fc33daSTejun Heo err_mask = atapi_eh_tur(dev, &sense_key);
326511fc33daSTejun Heo if (err_mask != 0 && err_mask != AC_ERR_DEV) {
3266a9a79dfeSJoe Perches ata_dev_warn(dev,
3267a9a79dfeSJoe Perches "TEST_UNIT_READY failed (err_mask=0x%x)\n",
3268a9a79dfeSJoe Perches err_mask);
326911fc33daSTejun Heo return -EIO;
327011fc33daSTejun Heo }
327111fc33daSTejun Heo
327211fc33daSTejun Heo if (!err_mask || sense_key != UNIT_ATTENTION)
327311fc33daSTejun Heo return 0;
327411fc33daSTejun Heo
327511fc33daSTejun Heo err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key);
327611fc33daSTejun Heo if (err_mask) {
3277a9a79dfeSJoe Perches ata_dev_warn(dev, "failed to clear "
327811fc33daSTejun Heo "UNIT ATTENTION (err_mask=0x%x)\n", err_mask);
327911fc33daSTejun Heo return -EIO;
328011fc33daSTejun Heo }
328111fc33daSTejun Heo }
328211fc33daSTejun Heo
3283a9a79dfeSJoe Perches ata_dev_warn(dev, "UNIT ATTENTION persists after %d tries\n",
3284a9a79dfeSJoe Perches ATA_EH_UA_TRIES);
328511fc33daSTejun Heo
328611fc33daSTejun Heo return 0;
328711fc33daSTejun Heo }
328811fc33daSTejun Heo
32896013efd8STejun Heo /**
32906013efd8STejun Heo * ata_eh_maybe_retry_flush - Retry FLUSH if necessary
32916013efd8STejun Heo * @dev: ATA device which may need FLUSH retry
32926013efd8STejun Heo *
32936013efd8STejun Heo * If @dev failed FLUSH, it needs to be reported upper layer
32946013efd8STejun Heo * immediately as it means that @dev failed to remap and already
32956013efd8STejun Heo * lost at least a sector and further FLUSH retrials won't make
32966013efd8STejun Heo * any difference to the lost sector. However, if FLUSH failed
32976013efd8STejun Heo * for other reasons, for example transmission error, FLUSH needs
32986013efd8STejun Heo * to be retried.
32996013efd8STejun Heo *
33006013efd8STejun Heo * This function determines whether FLUSH failure retry is
33016013efd8STejun Heo * necessary and performs it if so.
33026013efd8STejun Heo *
33036013efd8STejun Heo * RETURNS:
33046013efd8STejun Heo * 0 if EH can continue, -errno if EH needs to be repeated.
33056013efd8STejun Heo */
ata_eh_maybe_retry_flush(struct ata_device * dev)33066013efd8STejun Heo static int ata_eh_maybe_retry_flush(struct ata_device *dev)
33076013efd8STejun Heo {
33086013efd8STejun Heo struct ata_link *link = dev->link;
33096013efd8STejun Heo struct ata_port *ap = link->ap;
33106013efd8STejun Heo struct ata_queued_cmd *qc;
33116013efd8STejun Heo struct ata_taskfile tf;
33126013efd8STejun Heo unsigned int err_mask;
33136013efd8STejun Heo int rc = 0;
33146013efd8STejun Heo
33156013efd8STejun Heo /* did flush fail for this device? */
33166013efd8STejun Heo if (!ata_tag_valid(link->active_tag))
33176013efd8STejun Heo return 0;
33186013efd8STejun Heo
33196013efd8STejun Heo qc = __ata_qc_from_tag(ap, link->active_tag);
33206013efd8STejun Heo if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT &&
33216013efd8STejun Heo qc->tf.command != ATA_CMD_FLUSH))
33226013efd8STejun Heo return 0;
33236013efd8STejun Heo
33246013efd8STejun Heo /* if the device failed it, it should be reported to upper layers */
33256013efd8STejun Heo if (qc->err_mask & AC_ERR_DEV)
33266013efd8STejun Heo return 0;
33276013efd8STejun Heo
33286013efd8STejun Heo /* flush failed for some other reason, give it another shot */
33296013efd8STejun Heo ata_tf_init(dev, &tf);
33306013efd8STejun Heo
33316013efd8STejun Heo tf.command = qc->tf.command;
33326013efd8STejun Heo tf.flags |= ATA_TFLAG_DEVICE;
33336013efd8STejun Heo tf.protocol = ATA_PROT_NODATA;
33346013efd8STejun Heo
3335a9a79dfeSJoe Perches ata_dev_warn(dev, "retrying FLUSH 0x%x Emask 0x%x\n",
33366013efd8STejun Heo tf.command, qc->err_mask);
33376013efd8STejun Heo
33386013efd8STejun Heo err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
33396013efd8STejun Heo if (!err_mask) {
33406013efd8STejun Heo /*
33416013efd8STejun Heo * FLUSH is complete but there's no way to
33426013efd8STejun Heo * successfully complete a failed command from EH.
33436013efd8STejun Heo * Making sure retry is allowed at least once and
33446013efd8STejun Heo * retrying it should do the trick - whatever was in
33456013efd8STejun Heo * the cache is already on the platter and this won't
33466013efd8STejun Heo * cause infinite loop.
33476013efd8STejun Heo */
33486013efd8STejun Heo qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1);
33496013efd8STejun Heo } else {
3350a9a79dfeSJoe Perches ata_dev_warn(dev, "FLUSH failed Emask 0x%x\n",
33516013efd8STejun Heo err_mask);
33526013efd8STejun Heo rc = -EIO;
33536013efd8STejun Heo
33546013efd8STejun Heo /* if device failed it, report it to upper layers */
33556013efd8STejun Heo if (err_mask & AC_ERR_DEV) {
33566013efd8STejun Heo qc->err_mask |= AC_ERR_DEV;
33576013efd8STejun Heo qc->result_tf = tf;
33584cb7c6f1SNiklas Cassel if (!ata_port_is_frozen(ap))
33596013efd8STejun Heo rc = 0;
33606013efd8STejun Heo }
33616013efd8STejun Heo }
33626013efd8STejun Heo return rc;
33636013efd8STejun Heo }
33646013efd8STejun Heo
33656b7ae954STejun Heo /**
33666b7ae954STejun Heo * ata_eh_set_lpm - configure SATA interface power management
33676b7ae954STejun Heo * @link: link to configure power management
33686b7ae954STejun Heo * @policy: the link power management policy
33696b7ae954STejun Heo * @r_failed_dev: out parameter for failed device
33706b7ae954STejun Heo *
33716b7ae954STejun Heo * Enable SATA Interface power management. This will enable
3372f4ac6476SHans de Goede * Device Interface Power Management (DIPM) for min_power and
3373f4ac6476SHans de Goede * medium_power_with_dipm policies, and then call driver specific
3374f4ac6476SHans de Goede * callbacks for enabling Host Initiated Power management.
33756b7ae954STejun Heo *
33766b7ae954STejun Heo * LOCKING:
33776b7ae954STejun Heo * EH context.
33786b7ae954STejun Heo *
33796b7ae954STejun Heo * RETURNS:
33806b7ae954STejun Heo * 0 on success, -errno on failure.
33816b7ae954STejun Heo */
ata_eh_set_lpm(struct ata_link * link,enum ata_lpm_policy policy,struct ata_device ** r_failed_dev)33826b7ae954STejun Heo static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
33836b7ae954STejun Heo struct ata_device **r_failed_dev)
33846b7ae954STejun Heo {
33856c8ea89cSTejun Heo struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL;
33866b7ae954STejun Heo struct ata_eh_context *ehc = &link->eh_context;
33876b7ae954STejun Heo struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
3388e5005b15STejun Heo enum ata_lpm_policy old_policy = link->lpm_policy;
33895f6f12ccSTejun Heo bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM;
33906b7ae954STejun Heo unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
33916b7ae954STejun Heo unsigned int err_mask;
33926b7ae954STejun Heo int rc;
33936b7ae954STejun Heo
33946b7ae954STejun Heo /* if the link or host doesn't do LPM, noop */
33954c9029e7SBartlomiej Zolnierkiewicz if (!IS_ENABLED(CONFIG_SATA_HOST) ||
33964c9029e7SBartlomiej Zolnierkiewicz (link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm))
33976b7ae954STejun Heo return 0;
33986b7ae954STejun Heo
33996b7ae954STejun Heo /*
34006b7ae954STejun Heo * DIPM is enabled only for MIN_POWER as some devices
34016b7ae954STejun Heo * misbehave when the host NACKs transition to SLUMBER. Order
34026b7ae954STejun Heo * device and link configurations such that the host always
34036b7ae954STejun Heo * allows DIPM requests.
34046b7ae954STejun Heo */
34056b7ae954STejun Heo ata_for_each_dev(dev, link, ENABLED) {
34066b7ae954STejun Heo bool hipm = ata_id_has_hipm(dev->id);
3407ae01b249STejun Heo bool dipm = ata_id_has_dipm(dev->id) && !no_dipm;
34086b7ae954STejun Heo
34096b7ae954STejun Heo /* find the first enabled and LPM enabled devices */
34106b7ae954STejun Heo if (!link_dev)
34116b7ae954STejun Heo link_dev = dev;
34126b7ae954STejun Heo
34136b7ae954STejun Heo if (!lpm_dev && (hipm || dipm))
34146b7ae954STejun Heo lpm_dev = dev;
34156b7ae954STejun Heo
34166b7ae954STejun Heo hints &= ~ATA_LPM_EMPTY;
34176b7ae954STejun Heo if (!hipm)
34186b7ae954STejun Heo hints &= ~ATA_LPM_HIPM;
34196b7ae954STejun Heo
34206b7ae954STejun Heo /* disable DIPM before changing link config */
3421f4ac6476SHans de Goede if (policy < ATA_LPM_MED_POWER_WITH_DIPM && dipm) {
34226b7ae954STejun Heo err_mask = ata_dev_set_feature(dev,
34236b7ae954STejun Heo SETFEATURES_SATA_DISABLE, SATA_DIPM);
34246b7ae954STejun Heo if (err_mask && err_mask != AC_ERR_DEV) {
3425a9a79dfeSJoe Perches ata_dev_warn(dev,
34266b7ae954STejun Heo "failed to disable DIPM, Emask 0x%x\n",
34276b7ae954STejun Heo err_mask);
34286b7ae954STejun Heo rc = -EIO;
34296b7ae954STejun Heo goto fail;
34306b7ae954STejun Heo }
34316b7ae954STejun Heo }
34326b7ae954STejun Heo }
34336b7ae954STejun Heo
34346c8ea89cSTejun Heo if (ap) {
34356b7ae954STejun Heo rc = ap->ops->set_lpm(link, policy, hints);
34366b7ae954STejun Heo if (!rc && ap->slave_link)
34376b7ae954STejun Heo rc = ap->ops->set_lpm(ap->slave_link, policy, hints);
34386c8ea89cSTejun Heo } else
34396c8ea89cSTejun Heo rc = sata_pmp_set_lpm(link, policy, hints);
34406b7ae954STejun Heo
34416b7ae954STejun Heo /*
34426b7ae954STejun Heo * Attribute link config failure to the first (LPM) enabled
34436b7ae954STejun Heo * device on the link.
34446b7ae954STejun Heo */
34456b7ae954STejun Heo if (rc) {
34466b7ae954STejun Heo if (rc == -EOPNOTSUPP) {
34476b7ae954STejun Heo link->flags |= ATA_LFLAG_NO_LPM;
34486b7ae954STejun Heo return 0;
34496b7ae954STejun Heo }
34506b7ae954STejun Heo dev = lpm_dev ? lpm_dev : link_dev;
34516b7ae954STejun Heo goto fail;
34526b7ae954STejun Heo }
34536b7ae954STejun Heo
3454e5005b15STejun Heo /*
3455e5005b15STejun Heo * Low level driver acked the transition. Issue DIPM command
3456e5005b15STejun Heo * with the new policy set.
3457e5005b15STejun Heo */
3458e5005b15STejun Heo link->lpm_policy = policy;
3459e5005b15STejun Heo if (ap && ap->slave_link)
3460e5005b15STejun Heo ap->slave_link->lpm_policy = policy;
3461e5005b15STejun Heo
34626b7ae954STejun Heo /* host config updated, enable DIPM if transitioning to MIN_POWER */
34636b7ae954STejun Heo ata_for_each_dev(dev, link, ENABLED) {
3464f4ac6476SHans de Goede if (policy >= ATA_LPM_MED_POWER_WITH_DIPM && !no_dipm &&
3465ae01b249STejun Heo ata_id_has_dipm(dev->id)) {
34666b7ae954STejun Heo err_mask = ata_dev_set_feature(dev,
34676b7ae954STejun Heo SETFEATURES_SATA_ENABLE, SATA_DIPM);
34686b7ae954STejun Heo if (err_mask && err_mask != AC_ERR_DEV) {
3469a9a79dfeSJoe Perches ata_dev_warn(dev,
34706b7ae954STejun Heo "failed to enable DIPM, Emask 0x%x\n",
34716b7ae954STejun Heo err_mask);
34726b7ae954STejun Heo rc = -EIO;
34736b7ae954STejun Heo goto fail;
34746b7ae954STejun Heo }
34756b7ae954STejun Heo }
34766b7ae954STejun Heo }
34776b7ae954STejun Heo
347809c5b480SGabriele Mazzotta link->last_lpm_change = jiffies;
347909c5b480SGabriele Mazzotta link->flags |= ATA_LFLAG_CHANGED;
348009c5b480SGabriele Mazzotta
34816b7ae954STejun Heo return 0;
34826b7ae954STejun Heo
34836b7ae954STejun Heo fail:
3484e5005b15STejun Heo /* restore the old policy */
3485e5005b15STejun Heo link->lpm_policy = old_policy;
3486e5005b15STejun Heo if (ap && ap->slave_link)
3487e5005b15STejun Heo ap->slave_link->lpm_policy = old_policy;
3488e5005b15STejun Heo
34896b7ae954STejun Heo /* if no device or only one more chance is left, disable LPM */
34906b7ae954STejun Heo if (!dev || ehc->tries[dev->devno] <= 2) {
3491a9a79dfeSJoe Perches ata_link_warn(link, "disabling LPM on the link\n");
34926b7ae954STejun Heo link->flags |= ATA_LFLAG_NO_LPM;
34936b7ae954STejun Heo }
34946b7ae954STejun Heo if (r_failed_dev)
34956b7ae954STejun Heo *r_failed_dev = dev;
34966b7ae954STejun Heo return rc;
34976b7ae954STejun Heo }
34986b7ae954STejun Heo
ata_link_nr_enabled(struct ata_link * link)34998a745f1fSKristen Carlson Accardi int ata_link_nr_enabled(struct ata_link *link)
3500c6fd2807SJeff Garzik {
3501f58229f8STejun Heo struct ata_device *dev;
3502f58229f8STejun Heo int cnt = 0;
3503c6fd2807SJeff Garzik
35041eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED)
3505c6fd2807SJeff Garzik cnt++;
3506c6fd2807SJeff Garzik return cnt;
3507c6fd2807SJeff Garzik }
3508c6fd2807SJeff Garzik
ata_link_nr_vacant(struct ata_link * link)35090260731fSTejun Heo static int ata_link_nr_vacant(struct ata_link *link)
3510c6fd2807SJeff Garzik {
3511f58229f8STejun Heo struct ata_device *dev;
3512f58229f8STejun Heo int cnt = 0;
3513c6fd2807SJeff Garzik
35141eca4365STejun Heo ata_for_each_dev(dev, link, ALL)
3515f58229f8STejun Heo if (dev->class == ATA_DEV_UNKNOWN)
3516c6fd2807SJeff Garzik cnt++;
3517c6fd2807SJeff Garzik return cnt;
3518c6fd2807SJeff Garzik }
3519c6fd2807SJeff Garzik
ata_eh_skip_recovery(struct ata_link * link)35200260731fSTejun Heo static int ata_eh_skip_recovery(struct ata_link *link)
3521c6fd2807SJeff Garzik {
3522672b2d65STejun Heo struct ata_port *ap = link->ap;
35230260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context;
3524f58229f8STejun Heo struct ata_device *dev;
3525c6fd2807SJeff Garzik
3526f9df58cbSTejun Heo /* skip disabled links */
3527f9df58cbSTejun Heo if (link->flags & ATA_LFLAG_DISABLED)
3528f9df58cbSTejun Heo return 1;
3529f9df58cbSTejun Heo
3530e2f3d75fSTejun Heo /* skip if explicitly requested */
3531e2f3d75fSTejun Heo if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
3532e2f3d75fSTejun Heo return 1;
3533e2f3d75fSTejun Heo
3534672b2d65STejun Heo /* thaw frozen port and recover failed devices */
35354cb7c6f1SNiklas Cassel if (ata_port_is_frozen(ap) || ata_link_nr_enabled(link))
3536672b2d65STejun Heo return 0;
3537672b2d65STejun Heo
3538672b2d65STejun Heo /* reset at least once if reset is requested */
3539672b2d65STejun Heo if ((ehc->i.action & ATA_EH_RESET) &&
3540672b2d65STejun Heo !(ehc->i.flags & ATA_EHI_DID_RESET))
3541c6fd2807SJeff Garzik return 0;
3542c6fd2807SJeff Garzik
3543c6fd2807SJeff Garzik /* skip if class codes for all vacant slots are ATA_DEV_NONE */
35441eca4365STejun Heo ata_for_each_dev(dev, link, ALL) {
3545c6fd2807SJeff Garzik if (dev->class == ATA_DEV_UNKNOWN &&
3546c6fd2807SJeff Garzik ehc->classes[dev->devno] != ATA_DEV_NONE)
3547c6fd2807SJeff Garzik return 0;
3548c6fd2807SJeff Garzik }
3549c6fd2807SJeff Garzik
3550c6fd2807SJeff Garzik return 1;
3551c6fd2807SJeff Garzik }
3552c6fd2807SJeff Garzik
ata_count_probe_trials_cb(struct ata_ering_entry * ent,void * void_arg)3553c2c7a89cSTejun Heo static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg)
3554c2c7a89cSTejun Heo {
3555c2c7a89cSTejun Heo u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL);
3556c2c7a89cSTejun Heo u64 now = get_jiffies_64();
3557c2c7a89cSTejun Heo int *trials = void_arg;
3558c2c7a89cSTejun Heo
35596868225eSLin Ming if ((ent->eflags & ATA_EFLAG_OLD_ER) ||
35606868225eSLin Ming (ent->timestamp < now - min(now, interval)))
3561c2c7a89cSTejun Heo return -1;
3562c2c7a89cSTejun Heo
3563c2c7a89cSTejun Heo (*trials)++;
3564c2c7a89cSTejun Heo return 0;
3565c2c7a89cSTejun Heo }
3566c2c7a89cSTejun Heo
ata_eh_schedule_probe(struct ata_device * dev)356702c05a27STejun Heo static int ata_eh_schedule_probe(struct ata_device *dev)
356802c05a27STejun Heo {
356902c05a27STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context;
3570c2c7a89cSTejun Heo struct ata_link *link = ata_dev_phys_link(dev);
3571c2c7a89cSTejun Heo int trials = 0;
357202c05a27STejun Heo
357302c05a27STejun Heo if (!(ehc->i.probe_mask & (1 << dev->devno)) ||
357402c05a27STejun Heo (ehc->did_probe_mask & (1 << dev->devno)))
357502c05a27STejun Heo return 0;
357602c05a27STejun Heo
357702c05a27STejun Heo ata_eh_detach_dev(dev);
357802c05a27STejun Heo ata_dev_init(dev);
357902c05a27STejun Heo ehc->did_probe_mask |= (1 << dev->devno);
3580cf480626STejun Heo ehc->i.action |= ATA_EH_RESET;
358100115e0fSTejun Heo ehc->saved_xfer_mode[dev->devno] = 0;
358200115e0fSTejun Heo ehc->saved_ncq_enabled &= ~(1 << dev->devno);
358302c05a27STejun Heo
35846b7ae954STejun Heo /* the link maybe in a deep sleep, wake it up */
35856c8ea89cSTejun Heo if (link->lpm_policy > ATA_LPM_MAX_POWER) {
35866c8ea89cSTejun Heo if (ata_is_host_link(link))
35876c8ea89cSTejun Heo link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER,
35886c8ea89cSTejun Heo ATA_LPM_EMPTY);
35896c8ea89cSTejun Heo else
35906c8ea89cSTejun Heo sata_pmp_set_lpm(link, ATA_LPM_MAX_POWER,
35916c8ea89cSTejun Heo ATA_LPM_EMPTY);
35926c8ea89cSTejun Heo }
35936b7ae954STejun Heo
3594c2c7a89cSTejun Heo /* Record and count probe trials on the ering. The specific
3595c2c7a89cSTejun Heo * error mask used is irrelevant. Because a successful device
3596c2c7a89cSTejun Heo * detection clears the ering, this count accumulates only if
3597c2c7a89cSTejun Heo * there are consecutive failed probes.
3598c2c7a89cSTejun Heo *
3599c2c7a89cSTejun Heo * If the count is equal to or higher than ATA_EH_PROBE_TRIALS
3600c2c7a89cSTejun Heo * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is
3601c2c7a89cSTejun Heo * forced to 1.5Gbps.
3602c2c7a89cSTejun Heo *
3603c2c7a89cSTejun Heo * This is to work around cases where failed link speed
3604c2c7a89cSTejun Heo * negotiation results in device misdetection leading to
3605c2c7a89cSTejun Heo * infinite DEVXCHG or PHRDY CHG events.
3606c2c7a89cSTejun Heo */
3607c2c7a89cSTejun Heo ata_ering_record(&dev->ering, 0, AC_ERR_OTHER);
3608c2c7a89cSTejun Heo ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials);
3609c2c7a89cSTejun Heo
3610c2c7a89cSTejun Heo if (trials > ATA_EH_PROBE_TRIALS)
3611c2c7a89cSTejun Heo sata_down_spd_limit(link, 1);
3612c2c7a89cSTejun Heo
361302c05a27STejun Heo return 1;
361402c05a27STejun Heo }
361502c05a27STejun Heo
ata_eh_handle_dev_fail(struct ata_device * dev,int err)36169b1e2658STejun Heo static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
3617fee7ca72STejun Heo {
36189af5c9c9STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context;
3619fee7ca72STejun Heo
3620cf9a590aSTejun Heo /* -EAGAIN from EH routine indicates retry without prejudice.
3621cf9a590aSTejun Heo * The requester is responsible for ensuring forward progress.
3622cf9a590aSTejun Heo */
3623cf9a590aSTejun Heo if (err != -EAGAIN)
3624fee7ca72STejun Heo ehc->tries[dev->devno]--;
3625fee7ca72STejun Heo
3626fee7ca72STejun Heo switch (err) {
3627fee7ca72STejun Heo case -ENODEV:
3628fee7ca72STejun Heo /* device missing or wrong IDENTIFY data, schedule probing */
3629fee7ca72STejun Heo ehc->i.probe_mask |= (1 << dev->devno);
3630df561f66SGustavo A. R. Silva fallthrough;
3631fee7ca72STejun Heo case -EINVAL:
3632fee7ca72STejun Heo /* give it just one more chance */
3633fee7ca72STejun Heo ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
3634df561f66SGustavo A. R. Silva fallthrough;
3635fee7ca72STejun Heo case -EIO:
3636d89293abSTejun Heo if (ehc->tries[dev->devno] == 1) {
3637fee7ca72STejun Heo /* This is the last chance, better to slow
3638fee7ca72STejun Heo * down than lose it.
3639fee7ca72STejun Heo */
3640a07d499bSTejun Heo sata_down_spd_limit(ata_dev_phys_link(dev), 0);
3641d89293abSTejun Heo if (dev->pio_mode > XFER_PIO_0)
3642fee7ca72STejun Heo ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
3643fee7ca72STejun Heo }
3644fee7ca72STejun Heo }
3645fee7ca72STejun Heo
3646fee7ca72STejun Heo if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
3647fee7ca72STejun Heo /* disable device if it has used up all its chances */
3648fee7ca72STejun Heo ata_dev_disable(dev);
3649fee7ca72STejun Heo
3650fee7ca72STejun Heo /* detach if offline */
3651b1c72916STejun Heo if (ata_phys_link_offline(ata_dev_phys_link(dev)))
3652fee7ca72STejun Heo ata_eh_detach_dev(dev);
3653fee7ca72STejun Heo
365402c05a27STejun Heo /* schedule probe if necessary */
365587fbc5a0STejun Heo if (ata_eh_schedule_probe(dev)) {
3656fee7ca72STejun Heo ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
365787fbc5a0STejun Heo memset(ehc->cmd_timeout_idx[dev->devno], 0,
365887fbc5a0STejun Heo sizeof(ehc->cmd_timeout_idx[dev->devno]));
365987fbc5a0STejun Heo }
36609b1e2658STejun Heo
36619b1e2658STejun Heo return 1;
3662fee7ca72STejun Heo } else {
3663cf480626STejun Heo ehc->i.action |= ATA_EH_RESET;
36649b1e2658STejun Heo return 0;
3665fee7ca72STejun Heo }
3666fee7ca72STejun Heo }
3667fee7ca72STejun Heo
3668c6fd2807SJeff Garzik /**
3669c6fd2807SJeff Garzik * ata_eh_recover - recover host port after error
3670c6fd2807SJeff Garzik * @ap: host port to recover
3671c6fd2807SJeff Garzik * @prereset: prereset method (can be NULL)
3672c6fd2807SJeff Garzik * @softreset: softreset method (can be NULL)
3673c6fd2807SJeff Garzik * @hardreset: hardreset method (can be NULL)
3674c6fd2807SJeff Garzik * @postreset: postreset method (can be NULL)
36759b1e2658STejun Heo * @r_failed_link: out parameter for failed link
3676c6fd2807SJeff Garzik *
3677c6fd2807SJeff Garzik * This is the alpha and omega, eum and yang, heart and soul of
3678c6fd2807SJeff Garzik * libata exception handling. On entry, actions required to
36799b1e2658STejun Heo * recover each link and hotplug requests are recorded in the
36809b1e2658STejun Heo * link's eh_context. This function executes all the operations
36819b1e2658STejun Heo * with appropriate retrials and fallbacks to resurrect failed
3682c6fd2807SJeff Garzik * devices, detach goners and greet newcomers.
3683c6fd2807SJeff Garzik *
3684c6fd2807SJeff Garzik * LOCKING:
3685c6fd2807SJeff Garzik * Kernel thread context (may sleep).
3686c6fd2807SJeff Garzik *
3687c6fd2807SJeff Garzik * RETURNS:
3688c6fd2807SJeff Garzik * 0 on success, -errno on failure.
3689c6fd2807SJeff Garzik */
ata_eh_recover(struct ata_port * ap,ata_prereset_fn_t prereset,ata_reset_fn_t softreset,ata_reset_fn_t hardreset,ata_postreset_fn_t postreset,struct ata_link ** r_failed_link)3690fb7fd614STejun Heo int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3691c6fd2807SJeff Garzik ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
36929b1e2658STejun Heo ata_postreset_fn_t postreset,
36939b1e2658STejun Heo struct ata_link **r_failed_link)
3694c6fd2807SJeff Garzik {
36959b1e2658STejun Heo struct ata_link *link;
3696c6fd2807SJeff Garzik struct ata_device *dev;
36976b7ae954STejun Heo int rc, nr_fails;
369845fabbb7SElias Oltmanns unsigned long flags, deadline;
3699c6fd2807SJeff Garzik
3700c6fd2807SJeff Garzik /* prep for recovery */
37011eca4365STejun Heo ata_for_each_link(link, ap, EDGE) {
37029b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context;
37039b1e2658STejun Heo
3704f9df58cbSTejun Heo /* re-enable link? */
3705f9df58cbSTejun Heo if (ehc->i.action & ATA_EH_ENABLE_LINK) {
3706f9df58cbSTejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK);
3707f9df58cbSTejun Heo spin_lock_irqsave(ap->lock, flags);
3708f9df58cbSTejun Heo link->flags &= ~ATA_LFLAG_DISABLED;
3709f9df58cbSTejun Heo spin_unlock_irqrestore(ap->lock, flags);
3710f9df58cbSTejun Heo ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK);
3711f9df58cbSTejun Heo }
3712f9df58cbSTejun Heo
37131eca4365STejun Heo ata_for_each_dev(dev, link, ALL) {
3714fd995f70STejun Heo if (link->flags & ATA_LFLAG_NO_RETRY)
3715fd995f70STejun Heo ehc->tries[dev->devno] = 1;
3716fd995f70STejun Heo else
3717c6fd2807SJeff Garzik ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3718c6fd2807SJeff Garzik
371979a55b72STejun Heo /* collect port action mask recorded in dev actions */
37209b1e2658STejun Heo ehc->i.action |= ehc->i.dev_action[dev->devno] &
37219b1e2658STejun Heo ~ATA_EH_PERDEV_MASK;
3722f58229f8STejun Heo ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK;
372379a55b72STejun Heo
3724c6fd2807SJeff Garzik /* process hotplug request */
3725c6fd2807SJeff Garzik if (dev->flags & ATA_DFLAG_DETACH)
3726c6fd2807SJeff Garzik ata_eh_detach_dev(dev);
3727c6fd2807SJeff Garzik
372802c05a27STejun Heo /* schedule probe if necessary */
372902c05a27STejun Heo if (!ata_dev_enabled(dev))
373002c05a27STejun Heo ata_eh_schedule_probe(dev);
3731c6fd2807SJeff Garzik }
37329b1e2658STejun Heo }
3733c6fd2807SJeff Garzik
3734c6fd2807SJeff Garzik retry:
3735c6fd2807SJeff Garzik rc = 0;
3736c6fd2807SJeff Garzik
3737c6fd2807SJeff Garzik /* if UNLOADING, finish immediately */
3738c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_UNLOADING)
3739c6fd2807SJeff Garzik goto out;
3740c6fd2807SJeff Garzik
37419b1e2658STejun Heo /* prep for EH */
37421eca4365STejun Heo ata_for_each_link(link, ap, EDGE) {
37439b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context;
37449b1e2658STejun Heo
3745c6fd2807SJeff Garzik /* skip EH if possible. */
37460260731fSTejun Heo if (ata_eh_skip_recovery(link))
3747c6fd2807SJeff Garzik ehc->i.action = 0;
3748c6fd2807SJeff Garzik
37491eca4365STejun Heo ata_for_each_dev(dev, link, ALL)
3750f58229f8STejun Heo ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
37519b1e2658STejun Heo }
3752c6fd2807SJeff Garzik
3753c6fd2807SJeff Garzik /* reset */
37541eca4365STejun Heo ata_for_each_link(link, ap, EDGE) {
37559b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context;
37569b1e2658STejun Heo
3757cf480626STejun Heo if (!(ehc->i.action & ATA_EH_RESET))
37589b1e2658STejun Heo continue;
37599b1e2658STejun Heo
37609b1e2658STejun Heo rc = ata_eh_reset(link, ata_link_nr_vacant(link),
3761dc98c32cSTejun Heo prereset, softreset, hardreset, postreset);
3762c6fd2807SJeff Garzik if (rc) {
3763a9a79dfeSJoe Perches ata_link_err(link, "reset failed, giving up\n");
3764c6fd2807SJeff Garzik goto out;
3765c6fd2807SJeff Garzik }
37669b1e2658STejun Heo }
3767c6fd2807SJeff Garzik
376845fabbb7SElias Oltmanns do {
376945fabbb7SElias Oltmanns unsigned long now;
377045fabbb7SElias Oltmanns
377145fabbb7SElias Oltmanns /*
377245fabbb7SElias Oltmanns * clears ATA_EH_PARK in eh_info and resets
377345fabbb7SElias Oltmanns * ap->park_req_pending
377445fabbb7SElias Oltmanns */
377545fabbb7SElias Oltmanns ata_eh_pull_park_action(ap);
377645fabbb7SElias Oltmanns
377745fabbb7SElias Oltmanns deadline = jiffies;
37781eca4365STejun Heo ata_for_each_link(link, ap, EDGE) {
37791eca4365STejun Heo ata_for_each_dev(dev, link, ALL) {
378045fabbb7SElias Oltmanns struct ata_eh_context *ehc = &link->eh_context;
378145fabbb7SElias Oltmanns unsigned long tmp;
378245fabbb7SElias Oltmanns
37839162c657SHannes Reinecke if (dev->class != ATA_DEV_ATA &&
37849162c657SHannes Reinecke dev->class != ATA_DEV_ZAC)
378545fabbb7SElias Oltmanns continue;
378645fabbb7SElias Oltmanns if (!(ehc->i.dev_action[dev->devno] &
378745fabbb7SElias Oltmanns ATA_EH_PARK))
378845fabbb7SElias Oltmanns continue;
378945fabbb7SElias Oltmanns tmp = dev->unpark_deadline;
379045fabbb7SElias Oltmanns if (time_before(deadline, tmp))
379145fabbb7SElias Oltmanns deadline = tmp;
379245fabbb7SElias Oltmanns else if (time_before_eq(tmp, jiffies))
379345fabbb7SElias Oltmanns continue;
379445fabbb7SElias Oltmanns if (ehc->unloaded_mask & (1 << dev->devno))
379545fabbb7SElias Oltmanns continue;
379645fabbb7SElias Oltmanns
379745fabbb7SElias Oltmanns ata_eh_park_issue_cmd(dev, 1);
379845fabbb7SElias Oltmanns }
379945fabbb7SElias Oltmanns }
380045fabbb7SElias Oltmanns
380145fabbb7SElias Oltmanns now = jiffies;
380245fabbb7SElias Oltmanns if (time_before_eq(deadline, now))
380345fabbb7SElias Oltmanns break;
380445fabbb7SElias Oltmanns
3805c0c362b6STejun Heo ata_eh_release(ap);
380645fabbb7SElias Oltmanns deadline = wait_for_completion_timeout(&ap->park_req_pending,
380745fabbb7SElias Oltmanns deadline - now);
3808c0c362b6STejun Heo ata_eh_acquire(ap);
380945fabbb7SElias Oltmanns } while (deadline);
38101eca4365STejun Heo ata_for_each_link(link, ap, EDGE) {
38111eca4365STejun Heo ata_for_each_dev(dev, link, ALL) {
381245fabbb7SElias Oltmanns if (!(link->eh_context.unloaded_mask &
381345fabbb7SElias Oltmanns (1 << dev->devno)))
381445fabbb7SElias Oltmanns continue;
381545fabbb7SElias Oltmanns
381645fabbb7SElias Oltmanns ata_eh_park_issue_cmd(dev, 0);
381745fabbb7SElias Oltmanns ata_eh_done(link, dev, ATA_EH_PARK);
381845fabbb7SElias Oltmanns }
381945fabbb7SElias Oltmanns }
382045fabbb7SElias Oltmanns
38219b1e2658STejun Heo /* the rest */
38226b7ae954STejun Heo nr_fails = 0;
38236b7ae954STejun Heo ata_for_each_link(link, ap, PMP_FIRST) {
38249b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context;
38259b1e2658STejun Heo
38266b7ae954STejun Heo if (sata_pmp_attached(ap) && ata_is_host_link(link))
38276b7ae954STejun Heo goto config_lpm;
38286b7ae954STejun Heo
3829c6fd2807SJeff Garzik /* revalidate existing devices and attach new ones */
38300260731fSTejun Heo rc = ata_eh_revalidate_and_attach(link, &dev);
3831c6fd2807SJeff Garzik if (rc)
38326b7ae954STejun Heo goto rest_fail;
3833c6fd2807SJeff Garzik
3834633273a3STejun Heo /* if PMP got attached, return, pmp EH will take care of it */
3835633273a3STejun Heo if (link->device->class == ATA_DEV_PMP) {
3836633273a3STejun Heo ehc->i.action = 0;
3837633273a3STejun Heo return 0;
3838633273a3STejun Heo }
3839633273a3STejun Heo
3840baa1e78aSTejun Heo /* configure transfer mode if necessary */
3841baa1e78aSTejun Heo if (ehc->i.flags & ATA_EHI_SETMODE) {
38420260731fSTejun Heo rc = ata_set_mode(link, &dev);
38434ae72a1eSTejun Heo if (rc)
38446b7ae954STejun Heo goto rest_fail;
3845baa1e78aSTejun Heo ehc->i.flags &= ~ATA_EHI_SETMODE;
3846c6fd2807SJeff Garzik }
3847c6fd2807SJeff Garzik
384811fc33daSTejun Heo /* If reset has been issued, clear UA to avoid
384911fc33daSTejun Heo * disrupting the current users of the device.
385011fc33daSTejun Heo */
385111fc33daSTejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) {
38521eca4365STejun Heo ata_for_each_dev(dev, link, ALL) {
385311fc33daSTejun Heo if (dev->class != ATA_DEV_ATAPI)
385411fc33daSTejun Heo continue;
385511fc33daSTejun Heo rc = atapi_eh_clear_ua(dev);
385611fc33daSTejun Heo if (rc)
38576b7ae954STejun Heo goto rest_fail;
385821334205SAaron Lu if (zpodd_dev_enabled(dev))
385921334205SAaron Lu zpodd_post_poweron(dev);
386011fc33daSTejun Heo }
386111fc33daSTejun Heo }
386211fc33daSTejun Heo
38636013efd8STejun Heo /* retry flush if necessary */
38646013efd8STejun Heo ata_for_each_dev(dev, link, ALL) {
38659162c657SHannes Reinecke if (dev->class != ATA_DEV_ATA &&
38669162c657SHannes Reinecke dev->class != ATA_DEV_ZAC)
38676013efd8STejun Heo continue;
38686013efd8STejun Heo rc = ata_eh_maybe_retry_flush(dev);
38696013efd8STejun Heo if (rc)
38706b7ae954STejun Heo goto rest_fail;
38716013efd8STejun Heo }
38726013efd8STejun Heo
38736b7ae954STejun Heo config_lpm:
387411fc33daSTejun Heo /* configure link power saving */
38756b7ae954STejun Heo if (link->lpm_policy != ap->target_lpm_policy) {
38766b7ae954STejun Heo rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev);
38776b7ae954STejun Heo if (rc)
38786b7ae954STejun Heo goto rest_fail;
38796b7ae954STejun Heo }
3880ca77329fSKristen Carlson Accardi
38819b1e2658STejun Heo /* this link is okay now */
38829b1e2658STejun Heo ehc->i.flags = 0;
38839b1e2658STejun Heo continue;
3884c6fd2807SJeff Garzik
38856b7ae954STejun Heo rest_fail:
38866b7ae954STejun Heo nr_fails++;
38876b7ae954STejun Heo if (dev)
38880a2c0f56STejun Heo ata_eh_handle_dev_fail(dev, rc);
3889c6fd2807SJeff Garzik
38904cb7c6f1SNiklas Cassel if (ata_port_is_frozen(ap)) {
3891b06ce3e5STejun Heo /* PMP reset requires working host port.
3892b06ce3e5STejun Heo * Can't retry if it's frozen.
3893b06ce3e5STejun Heo */
3894071f44b1STejun Heo if (sata_pmp_attached(ap))
3895b06ce3e5STejun Heo goto out;
38969b1e2658STejun Heo break;
38979b1e2658STejun Heo }
3898b06ce3e5STejun Heo }
38999b1e2658STejun Heo
39006b7ae954STejun Heo if (nr_fails)
3901c6fd2807SJeff Garzik goto retry;
3902c6fd2807SJeff Garzik
3903c6fd2807SJeff Garzik out:
39049b1e2658STejun Heo if (rc && r_failed_link)
39059b1e2658STejun Heo *r_failed_link = link;
3906c6fd2807SJeff Garzik
3907c6fd2807SJeff Garzik return rc;
3908c6fd2807SJeff Garzik }
3909c6fd2807SJeff Garzik
3910c6fd2807SJeff Garzik /**
3911c6fd2807SJeff Garzik * ata_eh_finish - finish up EH
3912c6fd2807SJeff Garzik * @ap: host port to finish EH for
3913c6fd2807SJeff Garzik *
3914c6fd2807SJeff Garzik * Recovery is complete. Clean up EH states and retry or finish
3915c6fd2807SJeff Garzik * failed qcs.
3916c6fd2807SJeff Garzik *
3917c6fd2807SJeff Garzik * LOCKING:
3918c6fd2807SJeff Garzik * None.
3919c6fd2807SJeff Garzik */
ata_eh_finish(struct ata_port * ap)3920fb7fd614STejun Heo void ata_eh_finish(struct ata_port *ap)
3921c6fd2807SJeff Garzik {
3922258c4e5cSJens Axboe struct ata_queued_cmd *qc;
3923c6fd2807SJeff Garzik int tag;
3924c6fd2807SJeff Garzik
3925c6fd2807SJeff Garzik /* retry or finish qcs */
3926258c4e5cSJens Axboe ata_qc_for_each_raw(ap, qc, tag) {
392787629312SNiklas Cassel if (!(qc->flags & ATA_QCFLAG_EH))
3928c6fd2807SJeff Garzik continue;
3929c6fd2807SJeff Garzik
3930c6fd2807SJeff Garzik if (qc->err_mask) {
3931c6fd2807SJeff Garzik /* FIXME: Once EH migration is complete,
3932c6fd2807SJeff Garzik * generate sense data in this function,
3933c6fd2807SJeff Garzik * considering both err_mask and tf.
3934c6fd2807SJeff Garzik */
3935e4c26a1bSNiklas Cassel if (qc->flags & ATA_QCFLAG_RETRY) {
3936e4c26a1bSNiklas Cassel /*
3937e4c26a1bSNiklas Cassel * Since qc->err_mask is set, ata_eh_qc_retry()
3938e4c26a1bSNiklas Cassel * will not increment scmd->allowed, so upper
3939e4c26a1bSNiklas Cassel * layer will only retry the command if it has
3940e4c26a1bSNiklas Cassel * not already been retried too many times.
3941e4c26a1bSNiklas Cassel */
3942c6fd2807SJeff Garzik ata_eh_qc_retry(qc);
3943e4c26a1bSNiklas Cassel } else {
394403faab78STejun Heo ata_eh_qc_complete(qc);
3945e4c26a1bSNiklas Cassel }
3946c6fd2807SJeff Garzik } else {
394718bd7718SNiklas Cassel if (qc->flags & ATA_QCFLAG_SENSE_VALID ||
394818bd7718SNiklas Cassel qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD) {
3949c6fd2807SJeff Garzik ata_eh_qc_complete(qc);
3950c6fd2807SJeff Garzik } else {
3951c6fd2807SJeff Garzik /* feed zero TF to sense generation */
3952c6fd2807SJeff Garzik memset(&qc->result_tf, 0, sizeof(qc->result_tf));
3953e4c26a1bSNiklas Cassel /*
3954e4c26a1bSNiklas Cassel * Since qc->err_mask is not set,
3955e4c26a1bSNiklas Cassel * ata_eh_qc_retry() will increment
3956e4c26a1bSNiklas Cassel * scmd->allowed, so upper layer is guaranteed
3957e4c26a1bSNiklas Cassel * to retry the command.
3958e4c26a1bSNiklas Cassel */
3959c6fd2807SJeff Garzik ata_eh_qc_retry(qc);
3960c6fd2807SJeff Garzik }
3961c6fd2807SJeff Garzik }
3962c6fd2807SJeff Garzik }
3963da917d69STejun Heo
3964da917d69STejun Heo /* make sure nr_active_links is zero after EH */
3965da917d69STejun Heo WARN_ON(ap->nr_active_links);
3966da917d69STejun Heo ap->nr_active_links = 0;
3967c6fd2807SJeff Garzik }
3968c6fd2807SJeff Garzik
3969c6fd2807SJeff Garzik /**
3970c6fd2807SJeff Garzik * ata_do_eh - do standard error handling
3971c6fd2807SJeff Garzik * @ap: host port to handle error for
3972a1efdabaSTejun Heo *
3973c6fd2807SJeff Garzik * @prereset: prereset method (can be NULL)
3974c6fd2807SJeff Garzik * @softreset: softreset method (can be NULL)
3975c6fd2807SJeff Garzik * @hardreset: hardreset method (can be NULL)
3976c6fd2807SJeff Garzik * @postreset: postreset method (can be NULL)
3977c6fd2807SJeff Garzik *
3978c6fd2807SJeff Garzik * Perform standard error handling sequence.
3979c6fd2807SJeff Garzik *
3980c6fd2807SJeff Garzik * LOCKING:
3981c6fd2807SJeff Garzik * Kernel thread context (may sleep).
3982c6fd2807SJeff Garzik */
ata_do_eh(struct ata_port * ap,ata_prereset_fn_t prereset,ata_reset_fn_t softreset,ata_reset_fn_t hardreset,ata_postreset_fn_t postreset)3983c6fd2807SJeff Garzik void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
3984c6fd2807SJeff Garzik ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3985c6fd2807SJeff Garzik ata_postreset_fn_t postreset)
3986c6fd2807SJeff Garzik {
39879b1e2658STejun Heo struct ata_device *dev;
39889b1e2658STejun Heo int rc;
39899b1e2658STejun Heo
39909b1e2658STejun Heo ata_eh_autopsy(ap);
39919b1e2658STejun Heo ata_eh_report(ap);
39929b1e2658STejun Heo
39939b1e2658STejun Heo rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
39949b1e2658STejun Heo NULL);
39959b1e2658STejun Heo if (rc) {
39961eca4365STejun Heo ata_for_each_dev(dev, &ap->link, ALL)
39979b1e2658STejun Heo ata_dev_disable(dev);
39989b1e2658STejun Heo }
39999b1e2658STejun Heo
4000c6fd2807SJeff Garzik ata_eh_finish(ap);
4001c6fd2807SJeff Garzik }
4002c6fd2807SJeff Garzik
4003a1efdabaSTejun Heo /**
4004a1efdabaSTejun Heo * ata_std_error_handler - standard error handler
4005a1efdabaSTejun Heo * @ap: host port to handle error for
4006a1efdabaSTejun Heo *
4007a1efdabaSTejun Heo * Standard error handler
4008a1efdabaSTejun Heo *
4009a1efdabaSTejun Heo * LOCKING:
4010a1efdabaSTejun Heo * Kernel thread context (may sleep).
4011a1efdabaSTejun Heo */
ata_std_error_handler(struct ata_port * ap)4012a1efdabaSTejun Heo void ata_std_error_handler(struct ata_port *ap)
4013a1efdabaSTejun Heo {
4014a1efdabaSTejun Heo struct ata_port_operations *ops = ap->ops;
4015a1efdabaSTejun Heo ata_reset_fn_t hardreset = ops->hardreset;
4016a1efdabaSTejun Heo
401757c9efdfSTejun Heo /* ignore built-in hardreset if SCR access is not available */
4018fe06e5f9STejun Heo if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link))
4019a1efdabaSTejun Heo hardreset = NULL;
4020a1efdabaSTejun Heo
4021a1efdabaSTejun Heo ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
4022a1efdabaSTejun Heo }
4023a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_std_error_handler);
4024a1efdabaSTejun Heo
40256ffa01d8STejun Heo #ifdef CONFIG_PM
4026c6fd2807SJeff Garzik /**
4027c6fd2807SJeff Garzik * ata_eh_handle_port_suspend - perform port suspend operation
4028c6fd2807SJeff Garzik * @ap: port to suspend
4029c6fd2807SJeff Garzik *
4030c6fd2807SJeff Garzik * Suspend @ap.
4031c6fd2807SJeff Garzik *
4032c6fd2807SJeff Garzik * LOCKING:
4033c6fd2807SJeff Garzik * Kernel thread context (may sleep).
4034c6fd2807SJeff Garzik */
ata_eh_handle_port_suspend(struct ata_port * ap)4035c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap)
4036c6fd2807SJeff Garzik {
4037c6fd2807SJeff Garzik unsigned long flags;
4038c6fd2807SJeff Garzik int rc = 0;
40393dc67440SAaron Lu struct ata_device *dev;
4040aa3998dbSDamien Le Moal struct ata_link *link;
4041c6fd2807SJeff Garzik
4042c6fd2807SJeff Garzik /* are we suspending? */
4043c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags);
4044c6fd2807SJeff Garzik if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
4045a7ff60dbSAaron Lu ap->pm_mesg.event & PM_EVENT_RESUME) {
4046c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags);
4047c6fd2807SJeff Garzik return;
4048c6fd2807SJeff Garzik }
4049c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags);
4050c6fd2807SJeff Garzik
4051c6fd2807SJeff Garzik WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
4052c6fd2807SJeff Garzik
405331c62224SNiklas Cassel /*
405431c62224SNiklas Cassel * We will reach this point for all of the PM events:
405531c62224SNiklas Cassel * PM_EVENT_SUSPEND (if runtime pm, PM_EVENT_AUTO will also be set)
405631c62224SNiklas Cassel * PM_EVENT_FREEZE, and PM_EVENT_HIBERNATE.
405731c62224SNiklas Cassel *
405831c62224SNiklas Cassel * We do not want to perform disk spin down for PM_EVENT_FREEZE.
405931c62224SNiklas Cassel * (Spin down will be performed by the subsequent PM_EVENT_HIBERNATE.)
406031c62224SNiklas Cassel */
406131c62224SNiklas Cassel if (!(ap->pm_mesg.event & PM_EVENT_FREEZE)) {
4062aa3998dbSDamien Le Moal /* Set all devices attached to the port in standby mode */
4063aa3998dbSDamien Le Moal ata_for_each_link(link, ap, HOST_FIRST) {
4064aa3998dbSDamien Le Moal ata_for_each_dev(dev, link, ENABLED)
4065aa3998dbSDamien Le Moal ata_dev_power_set_standby(dev);
4066aa3998dbSDamien Le Moal }
406731c62224SNiklas Cassel }
4068aa3998dbSDamien Le Moal
40693dc67440SAaron Lu /*
40703dc67440SAaron Lu * If we have a ZPODD attached, check its zero
40713dc67440SAaron Lu * power ready status before the port is frozen.
4072a7ff60dbSAaron Lu * Only needed for runtime suspend.
40733dc67440SAaron Lu */
4074a7ff60dbSAaron Lu if (PMSG_IS_AUTO(ap->pm_mesg)) {
40753dc67440SAaron Lu ata_for_each_dev(dev, &ap->link, ENABLED) {
40763dc67440SAaron Lu if (zpodd_dev_enabled(dev))
40773dc67440SAaron Lu zpodd_on_suspend(dev);
40783dc67440SAaron Lu }
4079a7ff60dbSAaron Lu }
40803dc67440SAaron Lu
4081c6fd2807SJeff Garzik /* suspend */
4082c6fd2807SJeff Garzik ata_eh_freeze_port(ap);
4083c6fd2807SJeff Garzik
4084c6fd2807SJeff Garzik if (ap->ops->port_suspend)
4085c6fd2807SJeff Garzik rc = ap->ops->port_suspend(ap, ap->pm_mesg);
4086c6fd2807SJeff Garzik
4087a7ff60dbSAaron Lu ata_acpi_set_state(ap, ap->pm_mesg);
40882a7b02eaSSergey Shtylyov
4089bc6e7c4bSDan Williams /* update the flags */
4090c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags);
4091c6fd2807SJeff Garzik
4092c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_PM_PENDING;
4093c6fd2807SJeff Garzik if (rc == 0)
4094c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SUSPENDED;
40954cb7c6f1SNiklas Cassel else if (ata_port_is_frozen(ap))
4096c6fd2807SJeff Garzik ata_port_schedule_eh(ap);
4097c6fd2807SJeff Garzik
4098c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags);
4099c6fd2807SJeff Garzik
4100c6fd2807SJeff Garzik return;
4101c6fd2807SJeff Garzik }
4102c6fd2807SJeff Garzik
4103c6fd2807SJeff Garzik /**
4104c6fd2807SJeff Garzik * ata_eh_handle_port_resume - perform port resume operation
4105c6fd2807SJeff Garzik * @ap: port to resume
4106c6fd2807SJeff Garzik *
4107c6fd2807SJeff Garzik * Resume @ap.
4108c6fd2807SJeff Garzik *
4109c6fd2807SJeff Garzik * LOCKING:
4110c6fd2807SJeff Garzik * Kernel thread context (may sleep).
4111c6fd2807SJeff Garzik */
ata_eh_handle_port_resume(struct ata_port * ap)4112c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap)
4113c6fd2807SJeff Garzik {
41146f9c1ea2STejun Heo struct ata_link *link;
41156f9c1ea2STejun Heo struct ata_device *dev;
4116c6fd2807SJeff Garzik unsigned long flags;
4117c6fd2807SJeff Garzik
4118c6fd2807SJeff Garzik /* are we resuming? */
4119c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags);
4120c6fd2807SJeff Garzik if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
4121a7ff60dbSAaron Lu !(ap->pm_mesg.event & PM_EVENT_RESUME)) {
4122c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags);
4123c6fd2807SJeff Garzik return;
4124c6fd2807SJeff Garzik }
4125c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags);
4126c6fd2807SJeff Garzik
41279666f400STejun Heo WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED));
4128c6fd2807SJeff Garzik
41296f9c1ea2STejun Heo /*
41306f9c1ea2STejun Heo * Error timestamps are in jiffies which doesn't run while
41316f9c1ea2STejun Heo * suspended and PHY events during resume isn't too uncommon.
41326f9c1ea2STejun Heo * When the two are combined, it can lead to unnecessary speed
41336f9c1ea2STejun Heo * downs if the machine is suspended and resumed repeatedly.
41346f9c1ea2STejun Heo * Clear error history.
41356f9c1ea2STejun Heo */
41366f9c1ea2STejun Heo ata_for_each_link(link, ap, HOST_FIRST)
41376f9c1ea2STejun Heo ata_for_each_dev(dev, link, ALL)
41386f9c1ea2STejun Heo ata_ering_clear(&dev->ering);
41396f9c1ea2STejun Heo
4140a7ff60dbSAaron Lu ata_acpi_set_state(ap, ap->pm_mesg);
4141bd3adca5SShaohua Li
4142c6fd2807SJeff Garzik if (ap->ops->port_resume)
4143ae867937SKefeng Wang ap->ops->port_resume(ap);
4144c6fd2807SJeff Garzik
41456746544cSTejun Heo /* tell ACPI that we're resuming */
41466746544cSTejun Heo ata_acpi_on_resume(ap);
41476746544cSTejun Heo
4148bc6e7c4bSDan Williams /* update the flags */
4149c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags);
4150c6fd2807SJeff Garzik ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
4151aa3998dbSDamien Le Moal ap->pflags |= ATA_PFLAG_RESUMING;
4152c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags);
4153c6fd2807SJeff Garzik }
41546ffa01d8STejun Heo #endif /* CONFIG_PM */
4155