1c6fd2807SJeff Garzik /* 2c6fd2807SJeff Garzik * libata-eh.c - libata error handling 3c6fd2807SJeff Garzik * 4c6fd2807SJeff Garzik * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5c6fd2807SJeff Garzik * Please ALWAYS copy linux-ide@vger.kernel.org 6c6fd2807SJeff Garzik * on emails. 7c6fd2807SJeff Garzik * 8c6fd2807SJeff Garzik * Copyright 2006 Tejun Heo <htejun@gmail.com> 9c6fd2807SJeff Garzik * 10c6fd2807SJeff Garzik * 11c6fd2807SJeff Garzik * This program is free software; you can redistribute it and/or 12c6fd2807SJeff Garzik * modify it under the terms of the GNU General Public License as 13c6fd2807SJeff Garzik * published by the Free Software Foundation; either version 2, or 14c6fd2807SJeff Garzik * (at your option) any later version. 15c6fd2807SJeff Garzik * 16c6fd2807SJeff Garzik * This program is distributed in the hope that it will be useful, 17c6fd2807SJeff Garzik * but WITHOUT ANY WARRANTY; without even the implied warranty of 18c6fd2807SJeff Garzik * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19c6fd2807SJeff Garzik * General Public License for more details. 20c6fd2807SJeff Garzik * 21c6fd2807SJeff Garzik * You should have received a copy of the GNU General Public License 22c6fd2807SJeff Garzik * along with this program; see the file COPYING. If not, write to 23c6fd2807SJeff Garzik * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, 24c6fd2807SJeff Garzik * USA. 25c6fd2807SJeff Garzik * 26c6fd2807SJeff Garzik * 27c6fd2807SJeff Garzik * libata documentation is available via 'make {ps|pdf}docs', 28c6fd2807SJeff Garzik * as Documentation/DocBook/libata.* 29c6fd2807SJeff Garzik * 30c6fd2807SJeff Garzik * Hardware documentation available from http://www.t13.org/ and 31c6fd2807SJeff Garzik * http://www.sata-io.org/ 32c6fd2807SJeff Garzik * 33c6fd2807SJeff Garzik */ 34c6fd2807SJeff Garzik 35c6fd2807SJeff Garzik #include <linux/kernel.h> 36242f9dcbSJens Axboe #include <linux/blkdev.h> 372855568bSJeff Garzik #include <linux/pci.h> 38c6fd2807SJeff Garzik #include <scsi/scsi.h> 39c6fd2807SJeff Garzik #include <scsi/scsi_host.h> 40c6fd2807SJeff Garzik #include <scsi/scsi_eh.h> 41c6fd2807SJeff Garzik #include <scsi/scsi_device.h> 42c6fd2807SJeff Garzik #include <scsi/scsi_cmnd.h> 436521148cSRobert Hancock #include <scsi/scsi_dbg.h> 44c6fd2807SJeff Garzik #include "../scsi/scsi_transport_api.h" 45c6fd2807SJeff Garzik 46c6fd2807SJeff Garzik #include <linux/libata.h> 47c6fd2807SJeff Garzik 48c6fd2807SJeff Garzik #include "libata.h" 49c6fd2807SJeff Garzik 507d47e8d4STejun Heo enum { 513884f7b0STejun Heo /* speed down verdicts */ 527d47e8d4STejun Heo ATA_EH_SPDN_NCQ_OFF = (1 << 0), 537d47e8d4STejun Heo ATA_EH_SPDN_SPEED_DOWN = (1 << 1), 547d47e8d4STejun Heo ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2), 5576326ac1STejun Heo ATA_EH_SPDN_KEEP_ERRORS = (1 << 3), 563884f7b0STejun Heo 573884f7b0STejun Heo /* error flags */ 583884f7b0STejun Heo ATA_EFLAG_IS_IO = (1 << 0), 5976326ac1STejun Heo ATA_EFLAG_DUBIOUS_XFER = (1 << 1), 60d9027470SGwendal Grignou ATA_EFLAG_OLD_ER = (1 << 31), 613884f7b0STejun Heo 623884f7b0STejun Heo /* error categories */ 633884f7b0STejun Heo ATA_ECAT_NONE = 0, 643884f7b0STejun Heo ATA_ECAT_ATA_BUS = 1, 653884f7b0STejun Heo ATA_ECAT_TOUT_HSM = 2, 663884f7b0STejun Heo ATA_ECAT_UNK_DEV = 3, 6775f9cafcSTejun Heo ATA_ECAT_DUBIOUS_NONE = 4, 6875f9cafcSTejun Heo ATA_ECAT_DUBIOUS_ATA_BUS = 5, 6975f9cafcSTejun Heo ATA_ECAT_DUBIOUS_TOUT_HSM = 6, 7075f9cafcSTejun Heo ATA_ECAT_DUBIOUS_UNK_DEV = 7, 7175f9cafcSTejun Heo ATA_ECAT_NR = 8, 727d47e8d4STejun Heo 7387fbc5a0STejun Heo ATA_EH_CMD_DFL_TIMEOUT = 5000, 7487fbc5a0STejun Heo 750a2c0f56STejun Heo /* always put at least this amount of time between resets */ 760a2c0f56STejun Heo ATA_EH_RESET_COOL_DOWN = 5000, 770a2c0f56STejun Heo 78341c2c95STejun Heo /* Waiting in ->prereset can never be reliable. It's 79341c2c95STejun Heo * sometimes nice to wait there but it can't be depended upon; 80341c2c95STejun Heo * otherwise, we wouldn't be resetting. Just give it enough 81341c2c95STejun Heo * time for most drives to spin up. 8231daabdaSTejun Heo */ 83341c2c95STejun Heo ATA_EH_PRERESET_TIMEOUT = 10000, 84341c2c95STejun Heo ATA_EH_FASTDRAIN_INTERVAL = 3000, 8511fc33daSTejun Heo 8611fc33daSTejun Heo ATA_EH_UA_TRIES = 5, 87c2c7a89cSTejun Heo 88c2c7a89cSTejun Heo /* probe speed down parameters, see ata_eh_schedule_probe() */ 89c2c7a89cSTejun Heo ATA_EH_PROBE_TRIAL_INTERVAL = 60000, /* 1 min */ 90c2c7a89cSTejun Heo ATA_EH_PROBE_TRIALS = 2, 9131daabdaSTejun Heo }; 9231daabdaSTejun Heo 9331daabdaSTejun Heo /* The following table determines how we sequence resets. Each entry 9431daabdaSTejun Heo * represents timeout for that try. The first try can be soft or 9531daabdaSTejun Heo * hardreset. All others are hardreset if available. In most cases 9631daabdaSTejun Heo * the first reset w/ 10sec timeout should succeed. Following entries 9731daabdaSTejun Heo * are mostly for error handling, hotplug and retarded devices. 9831daabdaSTejun Heo */ 9931daabdaSTejun Heo static const unsigned long ata_eh_reset_timeouts[] = { 100341c2c95STejun Heo 10000, /* most drives spin up by 10sec */ 101341c2c95STejun Heo 10000, /* > 99% working drives spin up before 20sec */ 102341c2c95STejun Heo 35000, /* give > 30 secs of idleness for retarded devices */ 103341c2c95STejun Heo 5000, /* and sweet one last chance */ 104d8af0eb6STejun Heo ULONG_MAX, /* > 1 min has elapsed, give up */ 10531daabdaSTejun Heo }; 10631daabdaSTejun Heo 10787fbc5a0STejun Heo static const unsigned long ata_eh_identify_timeouts[] = { 10887fbc5a0STejun Heo 5000, /* covers > 99% of successes and not too boring on failures */ 10987fbc5a0STejun Heo 10000, /* combined time till here is enough even for media access */ 11087fbc5a0STejun Heo 30000, /* for true idiots */ 11187fbc5a0STejun Heo ULONG_MAX, 11287fbc5a0STejun Heo }; 11387fbc5a0STejun Heo 1146013efd8STejun Heo static const unsigned long ata_eh_flush_timeouts[] = { 1156013efd8STejun Heo 15000, /* be generous with flush */ 1166013efd8STejun Heo 15000, /* ditto */ 1176013efd8STejun Heo 30000, /* and even more generous */ 1186013efd8STejun Heo ULONG_MAX, 1196013efd8STejun Heo }; 1206013efd8STejun Heo 12187fbc5a0STejun Heo static const unsigned long ata_eh_other_timeouts[] = { 12287fbc5a0STejun Heo 5000, /* same rationale as identify timeout */ 12387fbc5a0STejun Heo 10000, /* ditto */ 12487fbc5a0STejun Heo /* but no merciful 30sec for other commands, it just isn't worth it */ 12587fbc5a0STejun Heo ULONG_MAX, 12687fbc5a0STejun Heo }; 12787fbc5a0STejun Heo 12887fbc5a0STejun Heo struct ata_eh_cmd_timeout_ent { 12987fbc5a0STejun Heo const u8 *commands; 13087fbc5a0STejun Heo const unsigned long *timeouts; 13187fbc5a0STejun Heo }; 13287fbc5a0STejun Heo 13387fbc5a0STejun Heo /* The following table determines timeouts to use for EH internal 13487fbc5a0STejun Heo * commands. Each table entry is a command class and matches the 13587fbc5a0STejun Heo * commands the entry applies to and the timeout table to use. 13687fbc5a0STejun Heo * 13787fbc5a0STejun Heo * On the retry after a command timed out, the next timeout value from 13887fbc5a0STejun Heo * the table is used. If the table doesn't contain further entries, 13987fbc5a0STejun Heo * the last value is used. 14087fbc5a0STejun Heo * 14187fbc5a0STejun Heo * ehc->cmd_timeout_idx keeps track of which timeout to use per 14287fbc5a0STejun Heo * command class, so if SET_FEATURES times out on the first try, the 14387fbc5a0STejun Heo * next try will use the second timeout value only for that class. 14487fbc5a0STejun Heo */ 14587fbc5a0STejun Heo #define CMDS(cmds...) (const u8 []){ cmds, 0 } 14687fbc5a0STejun Heo static const struct ata_eh_cmd_timeout_ent 14787fbc5a0STejun Heo ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = { 14887fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI), 14987fbc5a0STejun Heo .timeouts = ata_eh_identify_timeouts, }, 15087fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT), 15187fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 15287fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT), 15387fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 15487fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_SET_FEATURES), 15587fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 15687fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS), 15787fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 1586013efd8STejun Heo { .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT), 1596013efd8STejun Heo .timeouts = ata_eh_flush_timeouts }, 16087fbc5a0STejun Heo }; 16187fbc5a0STejun Heo #undef CMDS 16287fbc5a0STejun Heo 163c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap); 1646ffa01d8STejun Heo #ifdef CONFIG_PM 165c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap); 166c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap); 1676ffa01d8STejun Heo #else /* CONFIG_PM */ 1686ffa01d8STejun Heo static void ata_eh_handle_port_suspend(struct ata_port *ap) 1696ffa01d8STejun Heo { } 1706ffa01d8STejun Heo 1716ffa01d8STejun Heo static void ata_eh_handle_port_resume(struct ata_port *ap) 1726ffa01d8STejun Heo { } 1736ffa01d8STejun Heo #endif /* CONFIG_PM */ 174c6fd2807SJeff Garzik 175b64bbc39STejun Heo static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt, 176b64bbc39STejun Heo va_list args) 177b64bbc39STejun Heo { 178b64bbc39STejun Heo ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len, 179b64bbc39STejun Heo ATA_EH_DESC_LEN - ehi->desc_len, 180b64bbc39STejun Heo fmt, args); 181b64bbc39STejun Heo } 182b64bbc39STejun Heo 183b64bbc39STejun Heo /** 184b64bbc39STejun Heo * __ata_ehi_push_desc - push error description without adding separator 185b64bbc39STejun Heo * @ehi: target EHI 186b64bbc39STejun Heo * @fmt: printf format string 187b64bbc39STejun Heo * 188b64bbc39STejun Heo * Format string according to @fmt and append it to @ehi->desc. 189b64bbc39STejun Heo * 190b64bbc39STejun Heo * LOCKING: 191b64bbc39STejun Heo * spin_lock_irqsave(host lock) 192b64bbc39STejun Heo */ 193b64bbc39STejun Heo void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) 194b64bbc39STejun Heo { 195b64bbc39STejun Heo va_list args; 196b64bbc39STejun Heo 197b64bbc39STejun Heo va_start(args, fmt); 198b64bbc39STejun Heo __ata_ehi_pushv_desc(ehi, fmt, args); 199b64bbc39STejun Heo va_end(args); 200b64bbc39STejun Heo } 201b64bbc39STejun Heo 202b64bbc39STejun Heo /** 203b64bbc39STejun Heo * ata_ehi_push_desc - push error description with separator 204b64bbc39STejun Heo * @ehi: target EHI 205b64bbc39STejun Heo * @fmt: printf format string 206b64bbc39STejun Heo * 207b64bbc39STejun Heo * Format string according to @fmt and append it to @ehi->desc. 208b64bbc39STejun Heo * If @ehi->desc is not empty, ", " is added in-between. 209b64bbc39STejun Heo * 210b64bbc39STejun Heo * LOCKING: 211b64bbc39STejun Heo * spin_lock_irqsave(host lock) 212b64bbc39STejun Heo */ 213b64bbc39STejun Heo void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) 214b64bbc39STejun Heo { 215b64bbc39STejun Heo va_list args; 216b64bbc39STejun Heo 217b64bbc39STejun Heo if (ehi->desc_len) 218b64bbc39STejun Heo __ata_ehi_push_desc(ehi, ", "); 219b64bbc39STejun Heo 220b64bbc39STejun Heo va_start(args, fmt); 221b64bbc39STejun Heo __ata_ehi_pushv_desc(ehi, fmt, args); 222b64bbc39STejun Heo va_end(args); 223b64bbc39STejun Heo } 224b64bbc39STejun Heo 225b64bbc39STejun Heo /** 226b64bbc39STejun Heo * ata_ehi_clear_desc - clean error description 227b64bbc39STejun Heo * @ehi: target EHI 228b64bbc39STejun Heo * 229b64bbc39STejun Heo * Clear @ehi->desc. 230b64bbc39STejun Heo * 231b64bbc39STejun Heo * LOCKING: 232b64bbc39STejun Heo * spin_lock_irqsave(host lock) 233b64bbc39STejun Heo */ 234b64bbc39STejun Heo void ata_ehi_clear_desc(struct ata_eh_info *ehi) 235b64bbc39STejun Heo { 236b64bbc39STejun Heo ehi->desc[0] = '\0'; 237b64bbc39STejun Heo ehi->desc_len = 0; 238b64bbc39STejun Heo } 239b64bbc39STejun Heo 240cbcdd875STejun Heo /** 241cbcdd875STejun Heo * ata_port_desc - append port description 242cbcdd875STejun Heo * @ap: target ATA port 243cbcdd875STejun Heo * @fmt: printf format string 244cbcdd875STejun Heo * 245cbcdd875STejun Heo * Format string according to @fmt and append it to port 246cbcdd875STejun Heo * description. If port description is not empty, " " is added 247cbcdd875STejun Heo * in-between. This function is to be used while initializing 248cbcdd875STejun Heo * ata_host. The description is printed on host registration. 249cbcdd875STejun Heo * 250cbcdd875STejun Heo * LOCKING: 251cbcdd875STejun Heo * None. 252cbcdd875STejun Heo */ 253cbcdd875STejun Heo void ata_port_desc(struct ata_port *ap, const char *fmt, ...) 254cbcdd875STejun Heo { 255cbcdd875STejun Heo va_list args; 256cbcdd875STejun Heo 257cbcdd875STejun Heo WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING)); 258cbcdd875STejun Heo 259cbcdd875STejun Heo if (ap->link.eh_info.desc_len) 260cbcdd875STejun Heo __ata_ehi_push_desc(&ap->link.eh_info, " "); 261cbcdd875STejun Heo 262cbcdd875STejun Heo va_start(args, fmt); 263cbcdd875STejun Heo __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args); 264cbcdd875STejun Heo va_end(args); 265cbcdd875STejun Heo } 266cbcdd875STejun Heo 267cbcdd875STejun Heo #ifdef CONFIG_PCI 268cbcdd875STejun Heo 269cbcdd875STejun Heo /** 270cbcdd875STejun Heo * ata_port_pbar_desc - append PCI BAR description 271cbcdd875STejun Heo * @ap: target ATA port 272cbcdd875STejun Heo * @bar: target PCI BAR 273cbcdd875STejun Heo * @offset: offset into PCI BAR 274cbcdd875STejun Heo * @name: name of the area 275cbcdd875STejun Heo * 276cbcdd875STejun Heo * If @offset is negative, this function formats a string which 277cbcdd875STejun Heo * contains the name, address, size and type of the BAR and 278cbcdd875STejun Heo * appends it to the port description. If @offset is zero or 279cbcdd875STejun Heo * positive, only name and offsetted address is appended. 280cbcdd875STejun Heo * 281cbcdd875STejun Heo * LOCKING: 282cbcdd875STejun Heo * None. 283cbcdd875STejun Heo */ 284cbcdd875STejun Heo void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset, 285cbcdd875STejun Heo const char *name) 286cbcdd875STejun Heo { 287cbcdd875STejun Heo struct pci_dev *pdev = to_pci_dev(ap->host->dev); 288cbcdd875STejun Heo char *type = ""; 289cbcdd875STejun Heo unsigned long long start, len; 290cbcdd875STejun Heo 291cbcdd875STejun Heo if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) 292cbcdd875STejun Heo type = "m"; 293cbcdd875STejun Heo else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) 294cbcdd875STejun Heo type = "i"; 295cbcdd875STejun Heo 296cbcdd875STejun Heo start = (unsigned long long)pci_resource_start(pdev, bar); 297cbcdd875STejun Heo len = (unsigned long long)pci_resource_len(pdev, bar); 298cbcdd875STejun Heo 299cbcdd875STejun Heo if (offset < 0) 300cbcdd875STejun Heo ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start); 301cbcdd875STejun Heo else 302e6a73ab1SAndrew Morton ata_port_desc(ap, "%s 0x%llx", name, 303e6a73ab1SAndrew Morton start + (unsigned long long)offset); 304cbcdd875STejun Heo } 305cbcdd875STejun Heo 306cbcdd875STejun Heo #endif /* CONFIG_PCI */ 307cbcdd875STejun Heo 30887fbc5a0STejun Heo static int ata_lookup_timeout_table(u8 cmd) 30987fbc5a0STejun Heo { 31087fbc5a0STejun Heo int i; 31187fbc5a0STejun Heo 31287fbc5a0STejun Heo for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) { 31387fbc5a0STejun Heo const u8 *cur; 31487fbc5a0STejun Heo 31587fbc5a0STejun Heo for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++) 31687fbc5a0STejun Heo if (*cur == cmd) 31787fbc5a0STejun Heo return i; 31887fbc5a0STejun Heo } 31987fbc5a0STejun Heo 32087fbc5a0STejun Heo return -1; 32187fbc5a0STejun Heo } 32287fbc5a0STejun Heo 32387fbc5a0STejun Heo /** 32487fbc5a0STejun Heo * ata_internal_cmd_timeout - determine timeout for an internal command 32587fbc5a0STejun Heo * @dev: target device 32687fbc5a0STejun Heo * @cmd: internal command to be issued 32787fbc5a0STejun Heo * 32887fbc5a0STejun Heo * Determine timeout for internal command @cmd for @dev. 32987fbc5a0STejun Heo * 33087fbc5a0STejun Heo * LOCKING: 33187fbc5a0STejun Heo * EH context. 33287fbc5a0STejun Heo * 33387fbc5a0STejun Heo * RETURNS: 33487fbc5a0STejun Heo * Determined timeout. 33587fbc5a0STejun Heo */ 33687fbc5a0STejun Heo unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd) 33787fbc5a0STejun Heo { 33887fbc5a0STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 33987fbc5a0STejun Heo int ent = ata_lookup_timeout_table(cmd); 34087fbc5a0STejun Heo int idx; 34187fbc5a0STejun Heo 34287fbc5a0STejun Heo if (ent < 0) 34387fbc5a0STejun Heo return ATA_EH_CMD_DFL_TIMEOUT; 34487fbc5a0STejun Heo 34587fbc5a0STejun Heo idx = ehc->cmd_timeout_idx[dev->devno][ent]; 34687fbc5a0STejun Heo return ata_eh_cmd_timeout_table[ent].timeouts[idx]; 34787fbc5a0STejun Heo } 34887fbc5a0STejun Heo 34987fbc5a0STejun Heo /** 35087fbc5a0STejun Heo * ata_internal_cmd_timed_out - notification for internal command timeout 35187fbc5a0STejun Heo * @dev: target device 35287fbc5a0STejun Heo * @cmd: internal command which timed out 35387fbc5a0STejun Heo * 35487fbc5a0STejun Heo * Notify EH that internal command @cmd for @dev timed out. This 35587fbc5a0STejun Heo * function should be called only for commands whose timeouts are 35687fbc5a0STejun Heo * determined using ata_internal_cmd_timeout(). 35787fbc5a0STejun Heo * 35887fbc5a0STejun Heo * LOCKING: 35987fbc5a0STejun Heo * EH context. 36087fbc5a0STejun Heo */ 36187fbc5a0STejun Heo void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd) 36287fbc5a0STejun Heo { 36387fbc5a0STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 36487fbc5a0STejun Heo int ent = ata_lookup_timeout_table(cmd); 36587fbc5a0STejun Heo int idx; 36687fbc5a0STejun Heo 36787fbc5a0STejun Heo if (ent < 0) 36887fbc5a0STejun Heo return; 36987fbc5a0STejun Heo 37087fbc5a0STejun Heo idx = ehc->cmd_timeout_idx[dev->devno][ent]; 37187fbc5a0STejun Heo if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX) 37287fbc5a0STejun Heo ehc->cmd_timeout_idx[dev->devno][ent]++; 37387fbc5a0STejun Heo } 37487fbc5a0STejun Heo 3753884f7b0STejun Heo static void ata_ering_record(struct ata_ering *ering, unsigned int eflags, 376c6fd2807SJeff Garzik unsigned int err_mask) 377c6fd2807SJeff Garzik { 378c6fd2807SJeff Garzik struct ata_ering_entry *ent; 379c6fd2807SJeff Garzik 380c6fd2807SJeff Garzik WARN_ON(!err_mask); 381c6fd2807SJeff Garzik 382c6fd2807SJeff Garzik ering->cursor++; 383c6fd2807SJeff Garzik ering->cursor %= ATA_ERING_SIZE; 384c6fd2807SJeff Garzik 385c6fd2807SJeff Garzik ent = &ering->ring[ering->cursor]; 3863884f7b0STejun Heo ent->eflags = eflags; 387c6fd2807SJeff Garzik ent->err_mask = err_mask; 388c6fd2807SJeff Garzik ent->timestamp = get_jiffies_64(); 389c6fd2807SJeff Garzik } 390c6fd2807SJeff Garzik 39176326ac1STejun Heo static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering) 39276326ac1STejun Heo { 39376326ac1STejun Heo struct ata_ering_entry *ent = &ering->ring[ering->cursor]; 39476326ac1STejun Heo 39576326ac1STejun Heo if (ent->err_mask) 39676326ac1STejun Heo return ent; 39776326ac1STejun Heo return NULL; 39876326ac1STejun Heo } 39976326ac1STejun Heo 400d9027470SGwendal Grignou int ata_ering_map(struct ata_ering *ering, 401c6fd2807SJeff Garzik int (*map_fn)(struct ata_ering_entry *, void *), 402c6fd2807SJeff Garzik void *arg) 403c6fd2807SJeff Garzik { 404c6fd2807SJeff Garzik int idx, rc = 0; 405c6fd2807SJeff Garzik struct ata_ering_entry *ent; 406c6fd2807SJeff Garzik 407c6fd2807SJeff Garzik idx = ering->cursor; 408c6fd2807SJeff Garzik do { 409c6fd2807SJeff Garzik ent = &ering->ring[idx]; 410c6fd2807SJeff Garzik if (!ent->err_mask) 411c6fd2807SJeff Garzik break; 412c6fd2807SJeff Garzik rc = map_fn(ent, arg); 413c6fd2807SJeff Garzik if (rc) 414c6fd2807SJeff Garzik break; 415c6fd2807SJeff Garzik idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE; 416c6fd2807SJeff Garzik } while (idx != ering->cursor); 417c6fd2807SJeff Garzik 418c6fd2807SJeff Garzik return rc; 419c6fd2807SJeff Garzik } 420c6fd2807SJeff Garzik 421d9027470SGwendal Grignou int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg) 422d9027470SGwendal Grignou { 423d9027470SGwendal Grignou ent->eflags |= ATA_EFLAG_OLD_ER; 424d9027470SGwendal Grignou return 0; 425d9027470SGwendal Grignou } 426d9027470SGwendal Grignou 427d9027470SGwendal Grignou static void ata_ering_clear(struct ata_ering *ering) 428d9027470SGwendal Grignou { 429d9027470SGwendal Grignou ata_ering_map(ering, ata_ering_clear_cb, NULL); 430d9027470SGwendal Grignou } 431d9027470SGwendal Grignou 432c6fd2807SJeff Garzik static unsigned int ata_eh_dev_action(struct ata_device *dev) 433c6fd2807SJeff Garzik { 4349af5c9c9STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 435c6fd2807SJeff Garzik 436c6fd2807SJeff Garzik return ehc->i.action | ehc->i.dev_action[dev->devno]; 437c6fd2807SJeff Garzik } 438c6fd2807SJeff Garzik 439f58229f8STejun Heo static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev, 440c6fd2807SJeff Garzik struct ata_eh_info *ehi, unsigned int action) 441c6fd2807SJeff Garzik { 442f58229f8STejun Heo struct ata_device *tdev; 443c6fd2807SJeff Garzik 444c6fd2807SJeff Garzik if (!dev) { 445c6fd2807SJeff Garzik ehi->action &= ~action; 4461eca4365STejun Heo ata_for_each_dev(tdev, link, ALL) 447f58229f8STejun Heo ehi->dev_action[tdev->devno] &= ~action; 448c6fd2807SJeff Garzik } else { 449c6fd2807SJeff Garzik /* doesn't make sense for port-wide EH actions */ 450c6fd2807SJeff Garzik WARN_ON(!(action & ATA_EH_PERDEV_MASK)); 451c6fd2807SJeff Garzik 452c6fd2807SJeff Garzik /* break ehi->action into ehi->dev_action */ 453c6fd2807SJeff Garzik if (ehi->action & action) { 4541eca4365STejun Heo ata_for_each_dev(tdev, link, ALL) 455f58229f8STejun Heo ehi->dev_action[tdev->devno] |= 456f58229f8STejun Heo ehi->action & action; 457c6fd2807SJeff Garzik ehi->action &= ~action; 458c6fd2807SJeff Garzik } 459c6fd2807SJeff Garzik 460c6fd2807SJeff Garzik /* turn off the specified per-dev action */ 461c6fd2807SJeff Garzik ehi->dev_action[dev->devno] &= ~action; 462c6fd2807SJeff Garzik } 463c6fd2807SJeff Garzik } 464c6fd2807SJeff Garzik 465c6fd2807SJeff Garzik /** 466c0c362b6STejun Heo * ata_eh_acquire - acquire EH ownership 467c0c362b6STejun Heo * @ap: ATA port to acquire EH ownership for 468c0c362b6STejun Heo * 469c0c362b6STejun Heo * Acquire EH ownership for @ap. This is the basic exclusion 470c0c362b6STejun Heo * mechanism for ports sharing a host. Only one port hanging off 471c0c362b6STejun Heo * the same host can claim the ownership of EH. 472c0c362b6STejun Heo * 473c0c362b6STejun Heo * LOCKING: 474c0c362b6STejun Heo * EH context. 475c0c362b6STejun Heo */ 476c0c362b6STejun Heo void ata_eh_acquire(struct ata_port *ap) 477c0c362b6STejun Heo { 478c0c362b6STejun Heo mutex_lock(&ap->host->eh_mutex); 479c0c362b6STejun Heo WARN_ON_ONCE(ap->host->eh_owner); 480c0c362b6STejun Heo ap->host->eh_owner = current; 481c0c362b6STejun Heo } 482c0c362b6STejun Heo 483c0c362b6STejun Heo /** 484c0c362b6STejun Heo * ata_eh_release - release EH ownership 485c0c362b6STejun Heo * @ap: ATA port to release EH ownership for 486c0c362b6STejun Heo * 487c0c362b6STejun Heo * Release EH ownership for @ap if the caller. The caller must 488c0c362b6STejun Heo * have acquired EH ownership using ata_eh_acquire() previously. 489c0c362b6STejun Heo * 490c0c362b6STejun Heo * LOCKING: 491c0c362b6STejun Heo * EH context. 492c0c362b6STejun Heo */ 493c0c362b6STejun Heo void ata_eh_release(struct ata_port *ap) 494c0c362b6STejun Heo { 495c0c362b6STejun Heo WARN_ON_ONCE(ap->host->eh_owner != current); 496c0c362b6STejun Heo ap->host->eh_owner = NULL; 497c0c362b6STejun Heo mutex_unlock(&ap->host->eh_mutex); 498c0c362b6STejun Heo } 499c0c362b6STejun Heo 500c0c362b6STejun Heo /** 501c6fd2807SJeff Garzik * ata_scsi_timed_out - SCSI layer time out callback 502c6fd2807SJeff Garzik * @cmd: timed out SCSI command 503c6fd2807SJeff Garzik * 504c6fd2807SJeff Garzik * Handles SCSI layer timeout. We race with normal completion of 505c6fd2807SJeff Garzik * the qc for @cmd. If the qc is already gone, we lose and let 506c6fd2807SJeff Garzik * the scsi command finish (EH_HANDLED). Otherwise, the qc has 507c6fd2807SJeff Garzik * timed out and EH should be invoked. Prevent ata_qc_complete() 508c6fd2807SJeff Garzik * from finishing it by setting EH_SCHEDULED and return 509c6fd2807SJeff Garzik * EH_NOT_HANDLED. 510c6fd2807SJeff Garzik * 511c6fd2807SJeff Garzik * TODO: kill this function once old EH is gone. 512c6fd2807SJeff Garzik * 513c6fd2807SJeff Garzik * LOCKING: 514c6fd2807SJeff Garzik * Called from timer context 515c6fd2807SJeff Garzik * 516c6fd2807SJeff Garzik * RETURNS: 517c6fd2807SJeff Garzik * EH_HANDLED or EH_NOT_HANDLED 518c6fd2807SJeff Garzik */ 519242f9dcbSJens Axboe enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) 520c6fd2807SJeff Garzik { 521c6fd2807SJeff Garzik struct Scsi_Host *host = cmd->device->host; 522c6fd2807SJeff Garzik struct ata_port *ap = ata_shost_to_port(host); 523c6fd2807SJeff Garzik unsigned long flags; 524c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 525242f9dcbSJens Axboe enum blk_eh_timer_return ret; 526c6fd2807SJeff Garzik 527c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 528c6fd2807SJeff Garzik 529c6fd2807SJeff Garzik if (ap->ops->error_handler) { 530242f9dcbSJens Axboe ret = BLK_EH_NOT_HANDLED; 531c6fd2807SJeff Garzik goto out; 532c6fd2807SJeff Garzik } 533c6fd2807SJeff Garzik 534242f9dcbSJens Axboe ret = BLK_EH_HANDLED; 535c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 5369af5c9c9STejun Heo qc = ata_qc_from_tag(ap, ap->link.active_tag); 537c6fd2807SJeff Garzik if (qc) { 538c6fd2807SJeff Garzik WARN_ON(qc->scsicmd != cmd); 539c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_EH_SCHEDULED; 540c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_TIMEOUT; 541242f9dcbSJens Axboe ret = BLK_EH_NOT_HANDLED; 542c6fd2807SJeff Garzik } 543c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 544c6fd2807SJeff Garzik 545c6fd2807SJeff Garzik out: 546c6fd2807SJeff Garzik DPRINTK("EXIT, ret=%d\n", ret); 547c6fd2807SJeff Garzik return ret; 548c6fd2807SJeff Garzik } 549c6fd2807SJeff Garzik 550ece180d1STejun Heo static void ata_eh_unload(struct ata_port *ap) 551ece180d1STejun Heo { 552ece180d1STejun Heo struct ata_link *link; 553ece180d1STejun Heo struct ata_device *dev; 554ece180d1STejun Heo unsigned long flags; 555ece180d1STejun Heo 556ece180d1STejun Heo /* Restore SControl IPM and SPD for the next driver and 557ece180d1STejun Heo * disable attached devices. 558ece180d1STejun Heo */ 559ece180d1STejun Heo ata_for_each_link(link, ap, PMP_FIRST) { 560ece180d1STejun Heo sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0); 561ece180d1STejun Heo ata_for_each_dev(dev, link, ALL) 562ece180d1STejun Heo ata_dev_disable(dev); 563ece180d1STejun Heo } 564ece180d1STejun Heo 565ece180d1STejun Heo /* freeze and set UNLOADED */ 566ece180d1STejun Heo spin_lock_irqsave(ap->lock, flags); 567ece180d1STejun Heo 568ece180d1STejun Heo ata_port_freeze(ap); /* won't be thawed */ 569ece180d1STejun Heo ap->pflags &= ~ATA_PFLAG_EH_PENDING; /* clear pending from freeze */ 570ece180d1STejun Heo ap->pflags |= ATA_PFLAG_UNLOADED; 571ece180d1STejun Heo 572ece180d1STejun Heo spin_unlock_irqrestore(ap->lock, flags); 573ece180d1STejun Heo } 574ece180d1STejun Heo 575c6fd2807SJeff Garzik /** 576c6fd2807SJeff Garzik * ata_scsi_error - SCSI layer error handler callback 577c6fd2807SJeff Garzik * @host: SCSI host on which error occurred 578c6fd2807SJeff Garzik * 579c6fd2807SJeff Garzik * Handles SCSI-layer-thrown error events. 580c6fd2807SJeff Garzik * 581c6fd2807SJeff Garzik * LOCKING: 582c6fd2807SJeff Garzik * Inherited from SCSI layer (none, can sleep) 583c6fd2807SJeff Garzik * 584c6fd2807SJeff Garzik * RETURNS: 585c6fd2807SJeff Garzik * Zero. 586c6fd2807SJeff Garzik */ 587c6fd2807SJeff Garzik void ata_scsi_error(struct Scsi_Host *host) 588c6fd2807SJeff Garzik { 589c6fd2807SJeff Garzik struct ata_port *ap = ata_shost_to_port(host); 590c6fd2807SJeff Garzik unsigned long flags; 591c34aeebcSJames Bottomley LIST_HEAD(eh_work_q); 592c6fd2807SJeff Garzik 593c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 594c6fd2807SJeff Garzik 595c34aeebcSJames Bottomley spin_lock_irqsave(host->host_lock, flags); 596c34aeebcSJames Bottomley list_splice_init(&host->eh_cmd_q, &eh_work_q); 597c34aeebcSJames Bottomley spin_unlock_irqrestore(host->host_lock, flags); 598c34aeebcSJames Bottomley 5990e0b494cSJames Bottomley ata_scsi_cmd_error_handler(host, ap, &eh_work_q); 6000e0b494cSJames Bottomley 6010e0b494cSJames Bottomley /* If we timed raced normal completion and there is nothing to 6020e0b494cSJames Bottomley recover nr_timedout == 0 why exactly are we doing error recovery ? */ 6030e0b494cSJames Bottomley ata_scsi_port_error_handler(host, ap); 6040e0b494cSJames Bottomley 6050e0b494cSJames Bottomley /* finish or retry handled scmd's and clean up */ 6060e0b494cSJames Bottomley WARN_ON(host->host_failed || !list_empty(&eh_work_q)); 6070e0b494cSJames Bottomley 6080e0b494cSJames Bottomley DPRINTK("EXIT\n"); 6090e0b494cSJames Bottomley } 6100e0b494cSJames Bottomley 6110e0b494cSJames Bottomley /** 6120e0b494cSJames Bottomley * ata_scsi_cmd_error_handler - error callback for a list of commands 6130e0b494cSJames Bottomley * @host: scsi host containing the port 6140e0b494cSJames Bottomley * @ap: ATA port within the host 6150e0b494cSJames Bottomley * @eh_work_q: list of commands to process 6160e0b494cSJames Bottomley * 6170e0b494cSJames Bottomley * process the given list of commands and return those finished to the 6180e0b494cSJames Bottomley * ap->eh_done_q. This function is the first part of the libata error 6190e0b494cSJames Bottomley * handler which processes a given list of failed commands. 6200e0b494cSJames Bottomley */ 6210e0b494cSJames Bottomley void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, 6220e0b494cSJames Bottomley struct list_head *eh_work_q) 6230e0b494cSJames Bottomley { 6240e0b494cSJames Bottomley int i; 6250e0b494cSJames Bottomley unsigned long flags; 6260e0b494cSJames Bottomley 627c429137aSTejun Heo /* make sure sff pio task is not running */ 628c429137aSTejun Heo ata_sff_flush_pio_task(ap); 629c6fd2807SJeff Garzik 630cca3974eSJeff Garzik /* synchronize with host lock and sort out timeouts */ 631c6fd2807SJeff Garzik 632c6fd2807SJeff Garzik /* For new EH, all qcs are finished in one of three ways - 633c6fd2807SJeff Garzik * normal completion, error completion, and SCSI timeout. 634c96f1732SAlan Cox * Both completions can race against SCSI timeout. When normal 635c6fd2807SJeff Garzik * completion wins, the qc never reaches EH. When error 636c6fd2807SJeff Garzik * completion wins, the qc has ATA_QCFLAG_FAILED set. 637c6fd2807SJeff Garzik * 638c6fd2807SJeff Garzik * When SCSI timeout wins, things are a bit more complex. 639c6fd2807SJeff Garzik * Normal or error completion can occur after the timeout but 640c6fd2807SJeff Garzik * before this point. In such cases, both types of 641c6fd2807SJeff Garzik * completions are honored. A scmd is determined to have 642c6fd2807SJeff Garzik * timed out iff its associated qc is active and not failed. 643c6fd2807SJeff Garzik */ 644c6fd2807SJeff Garzik if (ap->ops->error_handler) { 645c6fd2807SJeff Garzik struct scsi_cmnd *scmd, *tmp; 646c6fd2807SJeff Garzik int nr_timedout = 0; 647c6fd2807SJeff Garzik 648c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 649c6fd2807SJeff Garzik 650c96f1732SAlan Cox /* This must occur under the ap->lock as we don't want 651c96f1732SAlan Cox a polled recovery to race the real interrupt handler 652c96f1732SAlan Cox 653c96f1732SAlan Cox The lost_interrupt handler checks for any completed but 654c96f1732SAlan Cox non-notified command and completes much like an IRQ handler. 655c96f1732SAlan Cox 656c96f1732SAlan Cox We then fall into the error recovery code which will treat 657c96f1732SAlan Cox this as if normal completion won the race */ 658c96f1732SAlan Cox 659c96f1732SAlan Cox if (ap->ops->lost_interrupt) 660c96f1732SAlan Cox ap->ops->lost_interrupt(ap); 661c96f1732SAlan Cox 6620e0b494cSJames Bottomley list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) { 663c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 664c6fd2807SJeff Garzik 665c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_QUEUE; i++) { 666c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, i); 667c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_ACTIVE && 668c6fd2807SJeff Garzik qc->scsicmd == scmd) 669c6fd2807SJeff Garzik break; 670c6fd2807SJeff Garzik } 671c6fd2807SJeff Garzik 672c6fd2807SJeff Garzik if (i < ATA_MAX_QUEUE) { 673c6fd2807SJeff Garzik /* the scmd has an associated qc */ 674c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) { 675c6fd2807SJeff Garzik /* which hasn't failed yet, timeout */ 676c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_TIMEOUT; 677c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 678c6fd2807SJeff Garzik nr_timedout++; 679c6fd2807SJeff Garzik } 680c6fd2807SJeff Garzik } else { 681c6fd2807SJeff Garzik /* Normal completion occurred after 682c6fd2807SJeff Garzik * SCSI timeout but before this point. 683c6fd2807SJeff Garzik * Successfully complete it. 684c6fd2807SJeff Garzik */ 685c6fd2807SJeff Garzik scmd->retries = scmd->allowed; 686c6fd2807SJeff Garzik scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 687c6fd2807SJeff Garzik } 688c6fd2807SJeff Garzik } 689c6fd2807SJeff Garzik 690c6fd2807SJeff Garzik /* If we have timed out qcs. They belong to EH from 691c6fd2807SJeff Garzik * this point but the state of the controller is 692c6fd2807SJeff Garzik * unknown. Freeze the port to make sure the IRQ 693c6fd2807SJeff Garzik * handler doesn't diddle with those qcs. This must 694c6fd2807SJeff Garzik * be done atomically w.r.t. setting QCFLAG_FAILED. 695c6fd2807SJeff Garzik */ 696c6fd2807SJeff Garzik if (nr_timedout) 697c6fd2807SJeff Garzik __ata_port_freeze(ap); 698c6fd2807SJeff Garzik 699c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 700a1e10f7eSTejun Heo 701a1e10f7eSTejun Heo /* initialize eh_tries */ 702a1e10f7eSTejun Heo ap->eh_tries = ATA_EH_MAX_TRIES; 703c6fd2807SJeff Garzik } else 704c6fd2807SJeff Garzik spin_unlock_wait(ap->lock); 705c6fd2807SJeff Garzik 7060e0b494cSJames Bottomley } 7070e0b494cSJames Bottomley EXPORT_SYMBOL(ata_scsi_cmd_error_handler); 7080e0b494cSJames Bottomley 7090e0b494cSJames Bottomley /** 7100e0b494cSJames Bottomley * ata_scsi_port_error_handler - recover the port after the commands 7110e0b494cSJames Bottomley * @host: SCSI host containing the port 7120e0b494cSJames Bottomley * @ap: the ATA port 7130e0b494cSJames Bottomley * 7140e0b494cSJames Bottomley * Handle the recovery of the port @ap after all the commands 7150e0b494cSJames Bottomley * have been recovered. 7160e0b494cSJames Bottomley */ 7170e0b494cSJames Bottomley void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap) 7180e0b494cSJames Bottomley { 7190e0b494cSJames Bottomley unsigned long flags; 720c96f1732SAlan Cox 721c6fd2807SJeff Garzik /* invoke error handler */ 722c6fd2807SJeff Garzik if (ap->ops->error_handler) { 723cf1b86c8STejun Heo struct ata_link *link; 724cf1b86c8STejun Heo 725c0c362b6STejun Heo /* acquire EH ownership */ 726c0c362b6STejun Heo ata_eh_acquire(ap); 727c0c362b6STejun Heo repeat: 7285ddf24c5STejun Heo /* kill fast drain timer */ 7295ddf24c5STejun Heo del_timer_sync(&ap->fastdrain_timer); 7305ddf24c5STejun Heo 731c6fd2807SJeff Garzik /* process port resume request */ 732c6fd2807SJeff Garzik ata_eh_handle_port_resume(ap); 733c6fd2807SJeff Garzik 734c6fd2807SJeff Garzik /* fetch & clear EH info */ 735c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 736c6fd2807SJeff Garzik 7371eca4365STejun Heo ata_for_each_link(link, ap, HOST_FIRST) { 73800115e0fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 73900115e0fSTejun Heo struct ata_device *dev; 74000115e0fSTejun Heo 741cf1b86c8STejun Heo memset(&link->eh_context, 0, sizeof(link->eh_context)); 742cf1b86c8STejun Heo link->eh_context.i = link->eh_info; 743cf1b86c8STejun Heo memset(&link->eh_info, 0, sizeof(link->eh_info)); 74400115e0fSTejun Heo 7451eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) { 74600115e0fSTejun Heo int devno = dev->devno; 74700115e0fSTejun Heo 74800115e0fSTejun Heo ehc->saved_xfer_mode[devno] = dev->xfer_mode; 74900115e0fSTejun Heo if (ata_ncq_enabled(dev)) 75000115e0fSTejun Heo ehc->saved_ncq_enabled |= 1 << devno; 75100115e0fSTejun Heo } 752cf1b86c8STejun Heo } 753c6fd2807SJeff Garzik 754c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS; 755c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_EH_PENDING; 756da917d69STejun Heo ap->excl_link = NULL; /* don't maintain exclusion over EH */ 757c6fd2807SJeff Garzik 758c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 759c6fd2807SJeff Garzik 760c6fd2807SJeff Garzik /* invoke EH, skip if unloading or suspended */ 761c6fd2807SJeff Garzik if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED))) 762c6fd2807SJeff Garzik ap->ops->error_handler(ap); 763ece180d1STejun Heo else { 764ece180d1STejun Heo /* if unloading, commence suicide */ 765ece180d1STejun Heo if ((ap->pflags & ATA_PFLAG_UNLOADING) && 766ece180d1STejun Heo !(ap->pflags & ATA_PFLAG_UNLOADED)) 767ece180d1STejun Heo ata_eh_unload(ap); 768c6fd2807SJeff Garzik ata_eh_finish(ap); 769ece180d1STejun Heo } 770c6fd2807SJeff Garzik 771c6fd2807SJeff Garzik /* process port suspend request */ 772c6fd2807SJeff Garzik ata_eh_handle_port_suspend(ap); 773c6fd2807SJeff Garzik 77425985edcSLucas De Marchi /* Exception might have happened after ->error_handler 775c6fd2807SJeff Garzik * recovered the port but before this point. Repeat 776c6fd2807SJeff Garzik * EH in such case. 777c6fd2807SJeff Garzik */ 778c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 779c6fd2807SJeff Garzik 780c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_EH_PENDING) { 781a1e10f7eSTejun Heo if (--ap->eh_tries) { 782c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 783c6fd2807SJeff Garzik goto repeat; 784c6fd2807SJeff Garzik } 785a9a79dfeSJoe Perches ata_port_err(ap, 786a9a79dfeSJoe Perches "EH pending after %d tries, giving up\n", 787a9a79dfeSJoe Perches ATA_EH_MAX_TRIES); 788914616a3STejun Heo ap->pflags &= ~ATA_PFLAG_EH_PENDING; 789c6fd2807SJeff Garzik } 790c6fd2807SJeff Garzik 791c6fd2807SJeff Garzik /* this run is complete, make sure EH info is clear */ 7921eca4365STejun Heo ata_for_each_link(link, ap, HOST_FIRST) 793cf1b86c8STejun Heo memset(&link->eh_info, 0, sizeof(link->eh_info)); 794c6fd2807SJeff Garzik 795c6fd2807SJeff Garzik /* Clear host_eh_scheduled while holding ap->lock such 796c6fd2807SJeff Garzik * that if exception occurs after this point but 797c6fd2807SJeff Garzik * before EH completion, SCSI midlayer will 798c6fd2807SJeff Garzik * re-initiate EH. 799c6fd2807SJeff Garzik */ 800c6fd2807SJeff Garzik host->host_eh_scheduled = 0; 801c6fd2807SJeff Garzik 802c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 803c0c362b6STejun Heo ata_eh_release(ap); 804c6fd2807SJeff Garzik } else { 8059af5c9c9STejun Heo WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL); 806c6fd2807SJeff Garzik ap->ops->eng_timeout(ap); 807c6fd2807SJeff Garzik } 808c6fd2807SJeff Garzik 809c6fd2807SJeff Garzik scsi_eh_flush_done_q(&ap->eh_done_q); 810c6fd2807SJeff Garzik 811c6fd2807SJeff Garzik /* clean up */ 812c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 813c6fd2807SJeff Garzik 814c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_LOADING) 815c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_LOADING; 816c6fd2807SJeff Garzik else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) 817ad72cf98STejun Heo schedule_delayed_work(&ap->hotplug_task, 0); 818c6fd2807SJeff Garzik 819c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_RECOVERED) 820a9a79dfeSJoe Perches ata_port_info(ap, "EH complete\n"); 821c6fd2807SJeff Garzik 822c6fd2807SJeff Garzik ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED); 823c6fd2807SJeff Garzik 824c6fd2807SJeff Garzik /* tell wait_eh that we're done */ 825c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS; 826c6fd2807SJeff Garzik wake_up_all(&ap->eh_wait_q); 827c6fd2807SJeff Garzik 828c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 829c6fd2807SJeff Garzik } 8300e0b494cSJames Bottomley EXPORT_SYMBOL_GPL(ata_scsi_port_error_handler); 831c6fd2807SJeff Garzik 832c6fd2807SJeff Garzik /** 833c6fd2807SJeff Garzik * ata_port_wait_eh - Wait for the currently pending EH to complete 834c6fd2807SJeff Garzik * @ap: Port to wait EH for 835c6fd2807SJeff Garzik * 836c6fd2807SJeff Garzik * Wait until the currently pending EH is complete. 837c6fd2807SJeff Garzik * 838c6fd2807SJeff Garzik * LOCKING: 839c6fd2807SJeff Garzik * Kernel thread context (may sleep). 840c6fd2807SJeff Garzik */ 841c6fd2807SJeff Garzik void ata_port_wait_eh(struct ata_port *ap) 842c6fd2807SJeff Garzik { 843c6fd2807SJeff Garzik unsigned long flags; 844c6fd2807SJeff Garzik DEFINE_WAIT(wait); 845c6fd2807SJeff Garzik 846c6fd2807SJeff Garzik retry: 847c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 848c6fd2807SJeff Garzik 849c6fd2807SJeff Garzik while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) { 850c6fd2807SJeff Garzik prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE); 851c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 852c6fd2807SJeff Garzik schedule(); 853c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 854c6fd2807SJeff Garzik } 855c6fd2807SJeff Garzik finish_wait(&ap->eh_wait_q, &wait); 856c6fd2807SJeff Garzik 857c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 858c6fd2807SJeff Garzik 859c6fd2807SJeff Garzik /* make sure SCSI EH is complete */ 860cca3974eSJeff Garzik if (scsi_host_in_recovery(ap->scsi_host)) { 86197750cebSTejun Heo ata_msleep(ap, 10); 862c6fd2807SJeff Garzik goto retry; 863c6fd2807SJeff Garzik } 864c6fd2807SJeff Garzik } 865c6fd2807SJeff Garzik 8665ddf24c5STejun Heo static int ata_eh_nr_in_flight(struct ata_port *ap) 8675ddf24c5STejun Heo { 8685ddf24c5STejun Heo unsigned int tag; 8695ddf24c5STejun Heo int nr = 0; 8705ddf24c5STejun Heo 8715ddf24c5STejun Heo /* count only non-internal commands */ 8725ddf24c5STejun Heo for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) 8735ddf24c5STejun Heo if (ata_qc_from_tag(ap, tag)) 8745ddf24c5STejun Heo nr++; 8755ddf24c5STejun Heo 8765ddf24c5STejun Heo return nr; 8775ddf24c5STejun Heo } 8785ddf24c5STejun Heo 8795ddf24c5STejun Heo void ata_eh_fastdrain_timerfn(unsigned long arg) 8805ddf24c5STejun Heo { 8815ddf24c5STejun Heo struct ata_port *ap = (void *)arg; 8825ddf24c5STejun Heo unsigned long flags; 8835ddf24c5STejun Heo int cnt; 8845ddf24c5STejun Heo 8855ddf24c5STejun Heo spin_lock_irqsave(ap->lock, flags); 8865ddf24c5STejun Heo 8875ddf24c5STejun Heo cnt = ata_eh_nr_in_flight(ap); 8885ddf24c5STejun Heo 8895ddf24c5STejun Heo /* are we done? */ 8905ddf24c5STejun Heo if (!cnt) 8915ddf24c5STejun Heo goto out_unlock; 8925ddf24c5STejun Heo 8935ddf24c5STejun Heo if (cnt == ap->fastdrain_cnt) { 8945ddf24c5STejun Heo unsigned int tag; 8955ddf24c5STejun Heo 8965ddf24c5STejun Heo /* No progress during the last interval, tag all 8975ddf24c5STejun Heo * in-flight qcs as timed out and freeze the port. 8985ddf24c5STejun Heo */ 8995ddf24c5STejun Heo for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) { 9005ddf24c5STejun Heo struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); 9015ddf24c5STejun Heo if (qc) 9025ddf24c5STejun Heo qc->err_mask |= AC_ERR_TIMEOUT; 9035ddf24c5STejun Heo } 9045ddf24c5STejun Heo 9055ddf24c5STejun Heo ata_port_freeze(ap); 9065ddf24c5STejun Heo } else { 9075ddf24c5STejun Heo /* some qcs have finished, give it another chance */ 9085ddf24c5STejun Heo ap->fastdrain_cnt = cnt; 9095ddf24c5STejun Heo ap->fastdrain_timer.expires = 910341c2c95STejun Heo ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); 9115ddf24c5STejun Heo add_timer(&ap->fastdrain_timer); 9125ddf24c5STejun Heo } 9135ddf24c5STejun Heo 9145ddf24c5STejun Heo out_unlock: 9155ddf24c5STejun Heo spin_unlock_irqrestore(ap->lock, flags); 9165ddf24c5STejun Heo } 9175ddf24c5STejun Heo 9185ddf24c5STejun Heo /** 9195ddf24c5STejun Heo * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain 9205ddf24c5STejun Heo * @ap: target ATA port 9215ddf24c5STejun Heo * @fastdrain: activate fast drain 9225ddf24c5STejun Heo * 9235ddf24c5STejun Heo * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain 9245ddf24c5STejun Heo * is non-zero and EH wasn't pending before. Fast drain ensures 9255ddf24c5STejun Heo * that EH kicks in in timely manner. 9265ddf24c5STejun Heo * 9275ddf24c5STejun Heo * LOCKING: 9285ddf24c5STejun Heo * spin_lock_irqsave(host lock) 9295ddf24c5STejun Heo */ 9305ddf24c5STejun Heo static void ata_eh_set_pending(struct ata_port *ap, int fastdrain) 9315ddf24c5STejun Heo { 9325ddf24c5STejun Heo int cnt; 9335ddf24c5STejun Heo 9345ddf24c5STejun Heo /* already scheduled? */ 9355ddf24c5STejun Heo if (ap->pflags & ATA_PFLAG_EH_PENDING) 9365ddf24c5STejun Heo return; 9375ddf24c5STejun Heo 9385ddf24c5STejun Heo ap->pflags |= ATA_PFLAG_EH_PENDING; 9395ddf24c5STejun Heo 9405ddf24c5STejun Heo if (!fastdrain) 9415ddf24c5STejun Heo return; 9425ddf24c5STejun Heo 9435ddf24c5STejun Heo /* do we have in-flight qcs? */ 9445ddf24c5STejun Heo cnt = ata_eh_nr_in_flight(ap); 9455ddf24c5STejun Heo if (!cnt) 9465ddf24c5STejun Heo return; 9475ddf24c5STejun Heo 9485ddf24c5STejun Heo /* activate fast drain */ 9495ddf24c5STejun Heo ap->fastdrain_cnt = cnt; 950341c2c95STejun Heo ap->fastdrain_timer.expires = 951341c2c95STejun Heo ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); 9525ddf24c5STejun Heo add_timer(&ap->fastdrain_timer); 9535ddf24c5STejun Heo } 9545ddf24c5STejun Heo 955c6fd2807SJeff Garzik /** 956c6fd2807SJeff Garzik * ata_qc_schedule_eh - schedule qc for error handling 957c6fd2807SJeff Garzik * @qc: command to schedule error handling for 958c6fd2807SJeff Garzik * 959c6fd2807SJeff Garzik * Schedule error handling for @qc. EH will kick in as soon as 960c6fd2807SJeff Garzik * other commands are drained. 961c6fd2807SJeff Garzik * 962c6fd2807SJeff Garzik * LOCKING: 963cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 964c6fd2807SJeff Garzik */ 965c6fd2807SJeff Garzik void ata_qc_schedule_eh(struct ata_queued_cmd *qc) 966c6fd2807SJeff Garzik { 967c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 968fa41efdaSTejun Heo struct request_queue *q = qc->scsicmd->device->request_queue; 969fa41efdaSTejun Heo unsigned long flags; 970c6fd2807SJeff Garzik 971c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 972c6fd2807SJeff Garzik 973c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 9745ddf24c5STejun Heo ata_eh_set_pending(ap, 1); 975c6fd2807SJeff Garzik 976c6fd2807SJeff Garzik /* The following will fail if timeout has already expired. 977c6fd2807SJeff Garzik * ata_scsi_error() takes care of such scmds on EH entry. 978c6fd2807SJeff Garzik * Note that ATA_QCFLAG_FAILED is unconditionally set after 979c6fd2807SJeff Garzik * this function completes. 980c6fd2807SJeff Garzik */ 981fa41efdaSTejun Heo spin_lock_irqsave(q->queue_lock, flags); 982242f9dcbSJens Axboe blk_abort_request(qc->scsicmd->request); 983fa41efdaSTejun Heo spin_unlock_irqrestore(q->queue_lock, flags); 984c6fd2807SJeff Garzik } 985c6fd2807SJeff Garzik 986c6fd2807SJeff Garzik /** 987c6fd2807SJeff Garzik * ata_port_schedule_eh - schedule error handling without a qc 988c6fd2807SJeff Garzik * @ap: ATA port to schedule EH for 989c6fd2807SJeff Garzik * 990c6fd2807SJeff Garzik * Schedule error handling for @ap. EH will kick in as soon as 991c6fd2807SJeff Garzik * all commands are drained. 992c6fd2807SJeff Garzik * 993c6fd2807SJeff Garzik * LOCKING: 994cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 995c6fd2807SJeff Garzik */ 996c6fd2807SJeff Garzik void ata_port_schedule_eh(struct ata_port *ap) 997c6fd2807SJeff Garzik { 998c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 999c6fd2807SJeff Garzik 1000f4d6d004STejun Heo if (ap->pflags & ATA_PFLAG_INITIALIZING) 1001f4d6d004STejun Heo return; 1002f4d6d004STejun Heo 10035ddf24c5STejun Heo ata_eh_set_pending(ap, 1); 1004cca3974eSJeff Garzik scsi_schedule_eh(ap->scsi_host); 1005c6fd2807SJeff Garzik 1006c6fd2807SJeff Garzik DPRINTK("port EH scheduled\n"); 1007c6fd2807SJeff Garzik } 1008c6fd2807SJeff Garzik 1009dbd82616STejun Heo static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link) 1010c6fd2807SJeff Garzik { 1011c6fd2807SJeff Garzik int tag, nr_aborted = 0; 1012c6fd2807SJeff Garzik 1013c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 1014c6fd2807SJeff Garzik 10155ddf24c5STejun Heo /* we're gonna abort all commands, no need for fast drain */ 10165ddf24c5STejun Heo ata_eh_set_pending(ap, 0); 10175ddf24c5STejun Heo 1018c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1019c6fd2807SJeff Garzik struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); 1020c6fd2807SJeff Garzik 1021dbd82616STejun Heo if (qc && (!link || qc->dev->link == link)) { 1022c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 1023c6fd2807SJeff Garzik ata_qc_complete(qc); 1024c6fd2807SJeff Garzik nr_aborted++; 1025c6fd2807SJeff Garzik } 1026c6fd2807SJeff Garzik } 1027c6fd2807SJeff Garzik 1028c6fd2807SJeff Garzik if (!nr_aborted) 1029c6fd2807SJeff Garzik ata_port_schedule_eh(ap); 1030c6fd2807SJeff Garzik 1031c6fd2807SJeff Garzik return nr_aborted; 1032c6fd2807SJeff Garzik } 1033c6fd2807SJeff Garzik 1034c6fd2807SJeff Garzik /** 1035dbd82616STejun Heo * ata_link_abort - abort all qc's on the link 1036dbd82616STejun Heo * @link: ATA link to abort qc's for 1037dbd82616STejun Heo * 1038dbd82616STejun Heo * Abort all active qc's active on @link and schedule EH. 1039dbd82616STejun Heo * 1040dbd82616STejun Heo * LOCKING: 1041dbd82616STejun Heo * spin_lock_irqsave(host lock) 1042dbd82616STejun Heo * 1043dbd82616STejun Heo * RETURNS: 1044dbd82616STejun Heo * Number of aborted qc's. 1045dbd82616STejun Heo */ 1046dbd82616STejun Heo int ata_link_abort(struct ata_link *link) 1047dbd82616STejun Heo { 1048dbd82616STejun Heo return ata_do_link_abort(link->ap, link); 1049dbd82616STejun Heo } 1050dbd82616STejun Heo 1051dbd82616STejun Heo /** 1052dbd82616STejun Heo * ata_port_abort - abort all qc's on the port 1053dbd82616STejun Heo * @ap: ATA port to abort qc's for 1054dbd82616STejun Heo * 1055dbd82616STejun Heo * Abort all active qc's of @ap and schedule EH. 1056dbd82616STejun Heo * 1057dbd82616STejun Heo * LOCKING: 1058dbd82616STejun Heo * spin_lock_irqsave(host_set lock) 1059dbd82616STejun Heo * 1060dbd82616STejun Heo * RETURNS: 1061dbd82616STejun Heo * Number of aborted qc's. 1062dbd82616STejun Heo */ 1063dbd82616STejun Heo int ata_port_abort(struct ata_port *ap) 1064dbd82616STejun Heo { 1065dbd82616STejun Heo return ata_do_link_abort(ap, NULL); 1066dbd82616STejun Heo } 1067dbd82616STejun Heo 1068dbd82616STejun Heo /** 1069c6fd2807SJeff Garzik * __ata_port_freeze - freeze port 1070c6fd2807SJeff Garzik * @ap: ATA port to freeze 1071c6fd2807SJeff Garzik * 1072c6fd2807SJeff Garzik * This function is called when HSM violation or some other 1073c6fd2807SJeff Garzik * condition disrupts normal operation of the port. Frozen port 1074c6fd2807SJeff Garzik * is not allowed to perform any operation until the port is 1075c6fd2807SJeff Garzik * thawed, which usually follows a successful reset. 1076c6fd2807SJeff Garzik * 1077c6fd2807SJeff Garzik * ap->ops->freeze() callback can be used for freezing the port 1078c6fd2807SJeff Garzik * hardware-wise (e.g. mask interrupt and stop DMA engine). If a 1079c6fd2807SJeff Garzik * port cannot be frozen hardware-wise, the interrupt handler 1080c6fd2807SJeff Garzik * must ack and clear interrupts unconditionally while the port 1081c6fd2807SJeff Garzik * is frozen. 1082c6fd2807SJeff Garzik * 1083c6fd2807SJeff Garzik * LOCKING: 1084cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 1085c6fd2807SJeff Garzik */ 1086c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap) 1087c6fd2807SJeff Garzik { 1088c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 1089c6fd2807SJeff Garzik 1090c6fd2807SJeff Garzik if (ap->ops->freeze) 1091c6fd2807SJeff Garzik ap->ops->freeze(ap); 1092c6fd2807SJeff Garzik 1093c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_FROZEN; 1094c6fd2807SJeff Garzik 109544877b4eSTejun Heo DPRINTK("ata%u port frozen\n", ap->print_id); 1096c6fd2807SJeff Garzik } 1097c6fd2807SJeff Garzik 1098c6fd2807SJeff Garzik /** 1099c6fd2807SJeff Garzik * ata_port_freeze - abort & freeze port 1100c6fd2807SJeff Garzik * @ap: ATA port to freeze 1101c6fd2807SJeff Garzik * 110254c38444SJeff Garzik * Abort and freeze @ap. The freeze operation must be called 110354c38444SJeff Garzik * first, because some hardware requires special operations 110454c38444SJeff Garzik * before the taskfile registers are accessible. 1105c6fd2807SJeff Garzik * 1106c6fd2807SJeff Garzik * LOCKING: 1107cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 1108c6fd2807SJeff Garzik * 1109c6fd2807SJeff Garzik * RETURNS: 1110c6fd2807SJeff Garzik * Number of aborted commands. 1111c6fd2807SJeff Garzik */ 1112c6fd2807SJeff Garzik int ata_port_freeze(struct ata_port *ap) 1113c6fd2807SJeff Garzik { 1114c6fd2807SJeff Garzik int nr_aborted; 1115c6fd2807SJeff Garzik 1116c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 1117c6fd2807SJeff Garzik 1118c6fd2807SJeff Garzik __ata_port_freeze(ap); 111954c38444SJeff Garzik nr_aborted = ata_port_abort(ap); 1120c6fd2807SJeff Garzik 1121c6fd2807SJeff Garzik return nr_aborted; 1122c6fd2807SJeff Garzik } 1123c6fd2807SJeff Garzik 1124c6fd2807SJeff Garzik /** 11257d77b247STejun Heo * sata_async_notification - SATA async notification handler 11267d77b247STejun Heo * @ap: ATA port where async notification is received 11277d77b247STejun Heo * 11287d77b247STejun Heo * Handler to be called when async notification via SDB FIS is 11297d77b247STejun Heo * received. This function schedules EH if necessary. 11307d77b247STejun Heo * 11317d77b247STejun Heo * LOCKING: 11327d77b247STejun Heo * spin_lock_irqsave(host lock) 11337d77b247STejun Heo * 11347d77b247STejun Heo * RETURNS: 11357d77b247STejun Heo * 1 if EH is scheduled, 0 otherwise. 11367d77b247STejun Heo */ 11377d77b247STejun Heo int sata_async_notification(struct ata_port *ap) 11387d77b247STejun Heo { 11397d77b247STejun Heo u32 sntf; 11407d77b247STejun Heo int rc; 11417d77b247STejun Heo 11427d77b247STejun Heo if (!(ap->flags & ATA_FLAG_AN)) 11437d77b247STejun Heo return 0; 11447d77b247STejun Heo 11457d77b247STejun Heo rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf); 11467d77b247STejun Heo if (rc == 0) 11477d77b247STejun Heo sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf); 11487d77b247STejun Heo 1149071f44b1STejun Heo if (!sata_pmp_attached(ap) || rc) { 11507d77b247STejun Heo /* PMP is not attached or SNTF is not available */ 1151071f44b1STejun Heo if (!sata_pmp_attached(ap)) { 11527d77b247STejun Heo /* PMP is not attached. Check whether ATAPI 11537d77b247STejun Heo * AN is configured. If so, notify media 11547d77b247STejun Heo * change. 11557d77b247STejun Heo */ 11567d77b247STejun Heo struct ata_device *dev = ap->link.device; 11577d77b247STejun Heo 11587d77b247STejun Heo if ((dev->class == ATA_DEV_ATAPI) && 11597d77b247STejun Heo (dev->flags & ATA_DFLAG_AN)) 11607d77b247STejun Heo ata_scsi_media_change_notify(dev); 11617d77b247STejun Heo return 0; 11627d77b247STejun Heo } else { 11637d77b247STejun Heo /* PMP is attached but SNTF is not available. 11647d77b247STejun Heo * ATAPI async media change notification is 11657d77b247STejun Heo * not used. The PMP must be reporting PHY 11667d77b247STejun Heo * status change, schedule EH. 11677d77b247STejun Heo */ 11687d77b247STejun Heo ata_port_schedule_eh(ap); 11697d77b247STejun Heo return 1; 11707d77b247STejun Heo } 11717d77b247STejun Heo } else { 11727d77b247STejun Heo /* PMP is attached and SNTF is available */ 11737d77b247STejun Heo struct ata_link *link; 11747d77b247STejun Heo 11757d77b247STejun Heo /* check and notify ATAPI AN */ 11761eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 11777d77b247STejun Heo if (!(sntf & (1 << link->pmp))) 11787d77b247STejun Heo continue; 11797d77b247STejun Heo 11807d77b247STejun Heo if ((link->device->class == ATA_DEV_ATAPI) && 11817d77b247STejun Heo (link->device->flags & ATA_DFLAG_AN)) 11827d77b247STejun Heo ata_scsi_media_change_notify(link->device); 11837d77b247STejun Heo } 11847d77b247STejun Heo 11857d77b247STejun Heo /* If PMP is reporting that PHY status of some 11867d77b247STejun Heo * downstream ports has changed, schedule EH. 11877d77b247STejun Heo */ 11887d77b247STejun Heo if (sntf & (1 << SATA_PMP_CTRL_PORT)) { 11897d77b247STejun Heo ata_port_schedule_eh(ap); 11907d77b247STejun Heo return 1; 11917d77b247STejun Heo } 11927d77b247STejun Heo 11937d77b247STejun Heo return 0; 11947d77b247STejun Heo } 11957d77b247STejun Heo } 11967d77b247STejun Heo 11977d77b247STejun Heo /** 1198c6fd2807SJeff Garzik * ata_eh_freeze_port - EH helper to freeze port 1199c6fd2807SJeff Garzik * @ap: ATA port to freeze 1200c6fd2807SJeff Garzik * 1201c6fd2807SJeff Garzik * Freeze @ap. 1202c6fd2807SJeff Garzik * 1203c6fd2807SJeff Garzik * LOCKING: 1204c6fd2807SJeff Garzik * None. 1205c6fd2807SJeff Garzik */ 1206c6fd2807SJeff Garzik void ata_eh_freeze_port(struct ata_port *ap) 1207c6fd2807SJeff Garzik { 1208c6fd2807SJeff Garzik unsigned long flags; 1209c6fd2807SJeff Garzik 1210c6fd2807SJeff Garzik if (!ap->ops->error_handler) 1211c6fd2807SJeff Garzik return; 1212c6fd2807SJeff Garzik 1213c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1214c6fd2807SJeff Garzik __ata_port_freeze(ap); 1215c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1216c6fd2807SJeff Garzik } 1217c6fd2807SJeff Garzik 1218c6fd2807SJeff Garzik /** 1219c6fd2807SJeff Garzik * ata_port_thaw_port - EH helper to thaw port 1220c6fd2807SJeff Garzik * @ap: ATA port to thaw 1221c6fd2807SJeff Garzik * 1222c6fd2807SJeff Garzik * Thaw frozen port @ap. 1223c6fd2807SJeff Garzik * 1224c6fd2807SJeff Garzik * LOCKING: 1225c6fd2807SJeff Garzik * None. 1226c6fd2807SJeff Garzik */ 1227c6fd2807SJeff Garzik void ata_eh_thaw_port(struct ata_port *ap) 1228c6fd2807SJeff Garzik { 1229c6fd2807SJeff Garzik unsigned long flags; 1230c6fd2807SJeff Garzik 1231c6fd2807SJeff Garzik if (!ap->ops->error_handler) 1232c6fd2807SJeff Garzik return; 1233c6fd2807SJeff Garzik 1234c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1235c6fd2807SJeff Garzik 1236c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_FROZEN; 1237c6fd2807SJeff Garzik 1238c6fd2807SJeff Garzik if (ap->ops->thaw) 1239c6fd2807SJeff Garzik ap->ops->thaw(ap); 1240c6fd2807SJeff Garzik 1241c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1242c6fd2807SJeff Garzik 124344877b4eSTejun Heo DPRINTK("ata%u port thawed\n", ap->print_id); 1244c6fd2807SJeff Garzik } 1245c6fd2807SJeff Garzik 1246c6fd2807SJeff Garzik static void ata_eh_scsidone(struct scsi_cmnd *scmd) 1247c6fd2807SJeff Garzik { 1248c6fd2807SJeff Garzik /* nada */ 1249c6fd2807SJeff Garzik } 1250c6fd2807SJeff Garzik 1251c6fd2807SJeff Garzik static void __ata_eh_qc_complete(struct ata_queued_cmd *qc) 1252c6fd2807SJeff Garzik { 1253c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 1254c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 1255c6fd2807SJeff Garzik unsigned long flags; 1256c6fd2807SJeff Garzik 1257c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1258c6fd2807SJeff Garzik qc->scsidone = ata_eh_scsidone; 1259c6fd2807SJeff Garzik __ata_qc_complete(qc); 1260c6fd2807SJeff Garzik WARN_ON(ata_tag_valid(qc->tag)); 1261c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1262c6fd2807SJeff Garzik 1263c6fd2807SJeff Garzik scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 1264c6fd2807SJeff Garzik } 1265c6fd2807SJeff Garzik 1266c6fd2807SJeff Garzik /** 1267c6fd2807SJeff Garzik * ata_eh_qc_complete - Complete an active ATA command from EH 1268c6fd2807SJeff Garzik * @qc: Command to complete 1269c6fd2807SJeff Garzik * 1270c6fd2807SJeff Garzik * Indicate to the mid and upper layers that an ATA command has 1271c6fd2807SJeff Garzik * completed. To be used from EH. 1272c6fd2807SJeff Garzik */ 1273c6fd2807SJeff Garzik void ata_eh_qc_complete(struct ata_queued_cmd *qc) 1274c6fd2807SJeff Garzik { 1275c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 1276c6fd2807SJeff Garzik scmd->retries = scmd->allowed; 1277c6fd2807SJeff Garzik __ata_eh_qc_complete(qc); 1278c6fd2807SJeff Garzik } 1279c6fd2807SJeff Garzik 1280c6fd2807SJeff Garzik /** 1281c6fd2807SJeff Garzik * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH 1282c6fd2807SJeff Garzik * @qc: Command to retry 1283c6fd2807SJeff Garzik * 1284c6fd2807SJeff Garzik * Indicate to the mid and upper layers that an ATA command 1285c6fd2807SJeff Garzik * should be retried. To be used from EH. 1286c6fd2807SJeff Garzik * 1287c6fd2807SJeff Garzik * SCSI midlayer limits the number of retries to scmd->allowed. 1288c6fd2807SJeff Garzik * scmd->retries is decremented for commands which get retried 1289c6fd2807SJeff Garzik * due to unrelated failures (qc->err_mask is zero). 1290c6fd2807SJeff Garzik */ 1291c6fd2807SJeff Garzik void ata_eh_qc_retry(struct ata_queued_cmd *qc) 1292c6fd2807SJeff Garzik { 1293c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 1294c6fd2807SJeff Garzik if (!qc->err_mask && scmd->retries) 1295c6fd2807SJeff Garzik scmd->retries--; 1296c6fd2807SJeff Garzik __ata_eh_qc_complete(qc); 1297c6fd2807SJeff Garzik } 1298c6fd2807SJeff Garzik 1299c6fd2807SJeff Garzik /** 1300678afac6STejun Heo * ata_dev_disable - disable ATA device 1301678afac6STejun Heo * @dev: ATA device to disable 1302678afac6STejun Heo * 1303678afac6STejun Heo * Disable @dev. 1304678afac6STejun Heo * 1305678afac6STejun Heo * Locking: 1306678afac6STejun Heo * EH context. 1307678afac6STejun Heo */ 1308678afac6STejun Heo void ata_dev_disable(struct ata_device *dev) 1309678afac6STejun Heo { 1310678afac6STejun Heo if (!ata_dev_enabled(dev)) 1311678afac6STejun Heo return; 1312678afac6STejun Heo 1313678afac6STejun Heo if (ata_msg_drv(dev->link->ap)) 1314a9a79dfeSJoe Perches ata_dev_warn(dev, "disabled\n"); 1315678afac6STejun Heo ata_acpi_on_disable(dev); 1316678afac6STejun Heo ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET); 1317678afac6STejun Heo dev->class++; 131899cf610aSTejun Heo 131999cf610aSTejun Heo /* From now till the next successful probe, ering is used to 132099cf610aSTejun Heo * track probe failures. Clear accumulated device error info. 132199cf610aSTejun Heo */ 132299cf610aSTejun Heo ata_ering_clear(&dev->ering); 1323678afac6STejun Heo } 1324678afac6STejun Heo 1325678afac6STejun Heo /** 1326c6fd2807SJeff Garzik * ata_eh_detach_dev - detach ATA device 1327c6fd2807SJeff Garzik * @dev: ATA device to detach 1328c6fd2807SJeff Garzik * 1329c6fd2807SJeff Garzik * Detach @dev. 1330c6fd2807SJeff Garzik * 1331c6fd2807SJeff Garzik * LOCKING: 1332c6fd2807SJeff Garzik * None. 1333c6fd2807SJeff Garzik */ 1334fb7fd614STejun Heo void ata_eh_detach_dev(struct ata_device *dev) 1335c6fd2807SJeff Garzik { 1336f58229f8STejun Heo struct ata_link *link = dev->link; 1337f58229f8STejun Heo struct ata_port *ap = link->ap; 133890484ebfSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 1339c6fd2807SJeff Garzik unsigned long flags; 1340c6fd2807SJeff Garzik 1341c6fd2807SJeff Garzik ata_dev_disable(dev); 1342c6fd2807SJeff Garzik 1343c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1344c6fd2807SJeff Garzik 1345c6fd2807SJeff Garzik dev->flags &= ~ATA_DFLAG_DETACH; 1346c6fd2807SJeff Garzik 1347c6fd2807SJeff Garzik if (ata_scsi_offline_dev(dev)) { 1348c6fd2807SJeff Garzik dev->flags |= ATA_DFLAG_DETACHED; 1349c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 1350c6fd2807SJeff Garzik } 1351c6fd2807SJeff Garzik 135290484ebfSTejun Heo /* clear per-dev EH info */ 1353f58229f8STejun Heo ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK); 1354f58229f8STejun Heo ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK); 135590484ebfSTejun Heo ehc->saved_xfer_mode[dev->devno] = 0; 135690484ebfSTejun Heo ehc->saved_ncq_enabled &= ~(1 << dev->devno); 1357c6fd2807SJeff Garzik 1358c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1359c6fd2807SJeff Garzik } 1360c6fd2807SJeff Garzik 1361c6fd2807SJeff Garzik /** 1362c6fd2807SJeff Garzik * ata_eh_about_to_do - about to perform eh_action 1363955e57dfSTejun Heo * @link: target ATA link 1364c6fd2807SJeff Garzik * @dev: target ATA dev for per-dev action (can be NULL) 1365c6fd2807SJeff Garzik * @action: action about to be performed 1366c6fd2807SJeff Garzik * 1367c6fd2807SJeff Garzik * Called just before performing EH actions to clear related bits 1368955e57dfSTejun Heo * in @link->eh_info such that eh actions are not unnecessarily 1369955e57dfSTejun Heo * repeated. 1370c6fd2807SJeff Garzik * 1371c6fd2807SJeff Garzik * LOCKING: 1372c6fd2807SJeff Garzik * None. 1373c6fd2807SJeff Garzik */ 1374fb7fd614STejun Heo void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev, 1375c6fd2807SJeff Garzik unsigned int action) 1376c6fd2807SJeff Garzik { 1377955e57dfSTejun Heo struct ata_port *ap = link->ap; 1378955e57dfSTejun Heo struct ata_eh_info *ehi = &link->eh_info; 1379955e57dfSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 1380c6fd2807SJeff Garzik unsigned long flags; 1381c6fd2807SJeff Garzik 1382c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1383c6fd2807SJeff Garzik 1384955e57dfSTejun Heo ata_eh_clear_action(link, dev, ehi, action); 1385c6fd2807SJeff Garzik 1386a568d1d2STejun Heo /* About to take EH action, set RECOVERED. Ignore actions on 1387a568d1d2STejun Heo * slave links as master will do them again. 1388a568d1d2STejun Heo */ 1389a568d1d2STejun Heo if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link) 1390c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_RECOVERED; 1391c6fd2807SJeff Garzik 1392c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1393c6fd2807SJeff Garzik } 1394c6fd2807SJeff Garzik 1395c6fd2807SJeff Garzik /** 1396c6fd2807SJeff Garzik * ata_eh_done - EH action complete 1397c6fd2807SJeff Garzik * @ap: target ATA port 1398c6fd2807SJeff Garzik * @dev: target ATA dev for per-dev action (can be NULL) 1399c6fd2807SJeff Garzik * @action: action just completed 1400c6fd2807SJeff Garzik * 1401c6fd2807SJeff Garzik * Called right after performing EH actions to clear related bits 1402955e57dfSTejun Heo * in @link->eh_context. 1403c6fd2807SJeff Garzik * 1404c6fd2807SJeff Garzik * LOCKING: 1405c6fd2807SJeff Garzik * None. 1406c6fd2807SJeff Garzik */ 1407fb7fd614STejun Heo void ata_eh_done(struct ata_link *link, struct ata_device *dev, 1408c6fd2807SJeff Garzik unsigned int action) 1409c6fd2807SJeff Garzik { 1410955e57dfSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 14119af5c9c9STejun Heo 1412955e57dfSTejun Heo ata_eh_clear_action(link, dev, &ehc->i, action); 1413c6fd2807SJeff Garzik } 1414c6fd2807SJeff Garzik 1415c6fd2807SJeff Garzik /** 1416c6fd2807SJeff Garzik * ata_err_string - convert err_mask to descriptive string 1417c6fd2807SJeff Garzik * @err_mask: error mask to convert to string 1418c6fd2807SJeff Garzik * 1419c6fd2807SJeff Garzik * Convert @err_mask to descriptive string. Errors are 1420c6fd2807SJeff Garzik * prioritized according to severity and only the most severe 1421c6fd2807SJeff Garzik * error is reported. 1422c6fd2807SJeff Garzik * 1423c6fd2807SJeff Garzik * LOCKING: 1424c6fd2807SJeff Garzik * None. 1425c6fd2807SJeff Garzik * 1426c6fd2807SJeff Garzik * RETURNS: 1427c6fd2807SJeff Garzik * Descriptive string for @err_mask 1428c6fd2807SJeff Garzik */ 1429c6fd2807SJeff Garzik static const char *ata_err_string(unsigned int err_mask) 1430c6fd2807SJeff Garzik { 1431c6fd2807SJeff Garzik if (err_mask & AC_ERR_HOST_BUS) 1432c6fd2807SJeff Garzik return "host bus error"; 1433c6fd2807SJeff Garzik if (err_mask & AC_ERR_ATA_BUS) 1434c6fd2807SJeff Garzik return "ATA bus error"; 1435c6fd2807SJeff Garzik if (err_mask & AC_ERR_TIMEOUT) 1436c6fd2807SJeff Garzik return "timeout"; 1437c6fd2807SJeff Garzik if (err_mask & AC_ERR_HSM) 1438c6fd2807SJeff Garzik return "HSM violation"; 1439c6fd2807SJeff Garzik if (err_mask & AC_ERR_SYSTEM) 1440c6fd2807SJeff Garzik return "internal error"; 1441c6fd2807SJeff Garzik if (err_mask & AC_ERR_MEDIA) 1442c6fd2807SJeff Garzik return "media error"; 1443c6fd2807SJeff Garzik if (err_mask & AC_ERR_INVALID) 1444c6fd2807SJeff Garzik return "invalid argument"; 1445c6fd2807SJeff Garzik if (err_mask & AC_ERR_DEV) 1446c6fd2807SJeff Garzik return "device error"; 1447c6fd2807SJeff Garzik return "unknown error"; 1448c6fd2807SJeff Garzik } 1449c6fd2807SJeff Garzik 1450c6fd2807SJeff Garzik /** 1451c6fd2807SJeff Garzik * ata_read_log_page - read a specific log page 1452c6fd2807SJeff Garzik * @dev: target device 1453c6fd2807SJeff Garzik * @page: page to read 1454c6fd2807SJeff Garzik * @buf: buffer to store read page 1455c6fd2807SJeff Garzik * @sectors: number of sectors to read 1456c6fd2807SJeff Garzik * 1457c6fd2807SJeff Garzik * Read log page using READ_LOG_EXT command. 1458c6fd2807SJeff Garzik * 1459c6fd2807SJeff Garzik * LOCKING: 1460c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1461c6fd2807SJeff Garzik * 1462c6fd2807SJeff Garzik * RETURNS: 1463c6fd2807SJeff Garzik * 0 on success, AC_ERR_* mask otherwise. 1464c6fd2807SJeff Garzik */ 1465c6fd2807SJeff Garzik static unsigned int ata_read_log_page(struct ata_device *dev, 1466c6fd2807SJeff Garzik u8 page, void *buf, unsigned int sectors) 1467c6fd2807SJeff Garzik { 1468c6fd2807SJeff Garzik struct ata_taskfile tf; 1469c6fd2807SJeff Garzik unsigned int err_mask; 1470c6fd2807SJeff Garzik 1471c6fd2807SJeff Garzik DPRINTK("read log page - page %d\n", page); 1472c6fd2807SJeff Garzik 1473c6fd2807SJeff Garzik ata_tf_init(dev, &tf); 1474c6fd2807SJeff Garzik tf.command = ATA_CMD_READ_LOG_EXT; 1475c6fd2807SJeff Garzik tf.lbal = page; 1476c6fd2807SJeff Garzik tf.nsect = sectors; 1477c6fd2807SJeff Garzik tf.hob_nsect = sectors >> 8; 1478c6fd2807SJeff Garzik tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE; 1479c6fd2807SJeff Garzik tf.protocol = ATA_PROT_PIO; 1480c6fd2807SJeff Garzik 1481c6fd2807SJeff Garzik err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, 14822b789108STejun Heo buf, sectors * ATA_SECT_SIZE, 0); 1483c6fd2807SJeff Garzik 1484c6fd2807SJeff Garzik DPRINTK("EXIT, err_mask=%x\n", err_mask); 1485c6fd2807SJeff Garzik return err_mask; 1486c6fd2807SJeff Garzik } 1487c6fd2807SJeff Garzik 1488c6fd2807SJeff Garzik /** 1489c6fd2807SJeff Garzik * ata_eh_read_log_10h - Read log page 10h for NCQ error details 1490c6fd2807SJeff Garzik * @dev: Device to read log page 10h from 1491c6fd2807SJeff Garzik * @tag: Resulting tag of the failed command 1492c6fd2807SJeff Garzik * @tf: Resulting taskfile registers of the failed command 1493c6fd2807SJeff Garzik * 1494c6fd2807SJeff Garzik * Read log page 10h to obtain NCQ error details and clear error 1495c6fd2807SJeff Garzik * condition. 1496c6fd2807SJeff Garzik * 1497c6fd2807SJeff Garzik * LOCKING: 1498c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1499c6fd2807SJeff Garzik * 1500c6fd2807SJeff Garzik * RETURNS: 1501c6fd2807SJeff Garzik * 0 on success, -errno otherwise. 1502c6fd2807SJeff Garzik */ 1503c6fd2807SJeff Garzik static int ata_eh_read_log_10h(struct ata_device *dev, 1504c6fd2807SJeff Garzik int *tag, struct ata_taskfile *tf) 1505c6fd2807SJeff Garzik { 15069af5c9c9STejun Heo u8 *buf = dev->link->ap->sector_buf; 1507c6fd2807SJeff Garzik unsigned int err_mask; 1508c6fd2807SJeff Garzik u8 csum; 1509c6fd2807SJeff Garzik int i; 1510c6fd2807SJeff Garzik 1511c6fd2807SJeff Garzik err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1); 1512c6fd2807SJeff Garzik if (err_mask) 1513c6fd2807SJeff Garzik return -EIO; 1514c6fd2807SJeff Garzik 1515c6fd2807SJeff Garzik csum = 0; 1516c6fd2807SJeff Garzik for (i = 0; i < ATA_SECT_SIZE; i++) 1517c6fd2807SJeff Garzik csum += buf[i]; 1518c6fd2807SJeff Garzik if (csum) 1519a9a79dfeSJoe Perches ata_dev_warn(dev, "invalid checksum 0x%x on log page 10h\n", 1520a9a79dfeSJoe Perches csum); 1521c6fd2807SJeff Garzik 1522c6fd2807SJeff Garzik if (buf[0] & 0x80) 1523c6fd2807SJeff Garzik return -ENOENT; 1524c6fd2807SJeff Garzik 1525c6fd2807SJeff Garzik *tag = buf[0] & 0x1f; 1526c6fd2807SJeff Garzik 1527c6fd2807SJeff Garzik tf->command = buf[2]; 1528c6fd2807SJeff Garzik tf->feature = buf[3]; 1529c6fd2807SJeff Garzik tf->lbal = buf[4]; 1530c6fd2807SJeff Garzik tf->lbam = buf[5]; 1531c6fd2807SJeff Garzik tf->lbah = buf[6]; 1532c6fd2807SJeff Garzik tf->device = buf[7]; 1533c6fd2807SJeff Garzik tf->hob_lbal = buf[8]; 1534c6fd2807SJeff Garzik tf->hob_lbam = buf[9]; 1535c6fd2807SJeff Garzik tf->hob_lbah = buf[10]; 1536c6fd2807SJeff Garzik tf->nsect = buf[12]; 1537c6fd2807SJeff Garzik tf->hob_nsect = buf[13]; 1538c6fd2807SJeff Garzik 1539c6fd2807SJeff Garzik return 0; 1540c6fd2807SJeff Garzik } 1541c6fd2807SJeff Garzik 1542c6fd2807SJeff Garzik /** 154311fc33daSTejun Heo * atapi_eh_tur - perform ATAPI TEST_UNIT_READY 154411fc33daSTejun Heo * @dev: target ATAPI device 154511fc33daSTejun Heo * @r_sense_key: out parameter for sense_key 154611fc33daSTejun Heo * 154711fc33daSTejun Heo * Perform ATAPI TEST_UNIT_READY. 154811fc33daSTejun Heo * 154911fc33daSTejun Heo * LOCKING: 155011fc33daSTejun Heo * EH context (may sleep). 155111fc33daSTejun Heo * 155211fc33daSTejun Heo * RETURNS: 155311fc33daSTejun Heo * 0 on success, AC_ERR_* mask on failure. 155411fc33daSTejun Heo */ 155511fc33daSTejun Heo static unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key) 155611fc33daSTejun Heo { 155711fc33daSTejun Heo u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 }; 155811fc33daSTejun Heo struct ata_taskfile tf; 155911fc33daSTejun Heo unsigned int err_mask; 156011fc33daSTejun Heo 156111fc33daSTejun Heo ata_tf_init(dev, &tf); 156211fc33daSTejun Heo 156311fc33daSTejun Heo tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 156411fc33daSTejun Heo tf.command = ATA_CMD_PACKET; 156511fc33daSTejun Heo tf.protocol = ATAPI_PROT_NODATA; 156611fc33daSTejun Heo 156711fc33daSTejun Heo err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0); 156811fc33daSTejun Heo if (err_mask == AC_ERR_DEV) 156911fc33daSTejun Heo *r_sense_key = tf.feature >> 4; 157011fc33daSTejun Heo return err_mask; 157111fc33daSTejun Heo } 157211fc33daSTejun Heo 157311fc33daSTejun Heo /** 1574c6fd2807SJeff Garzik * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE 1575c6fd2807SJeff Garzik * @dev: device to perform REQUEST_SENSE to 1576c6fd2807SJeff Garzik * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long) 15773eabddb8STejun Heo * @dfl_sense_key: default sense key to use 1578c6fd2807SJeff Garzik * 1579c6fd2807SJeff Garzik * Perform ATAPI REQUEST_SENSE after the device reported CHECK 1580c6fd2807SJeff Garzik * SENSE. This function is EH helper. 1581c6fd2807SJeff Garzik * 1582c6fd2807SJeff Garzik * LOCKING: 1583c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1584c6fd2807SJeff Garzik * 1585c6fd2807SJeff Garzik * RETURNS: 1586c6fd2807SJeff Garzik * 0 on success, AC_ERR_* mask on failure 1587c6fd2807SJeff Garzik */ 15883eabddb8STejun Heo static unsigned int atapi_eh_request_sense(struct ata_device *dev, 15893eabddb8STejun Heo u8 *sense_buf, u8 dfl_sense_key) 1590c6fd2807SJeff Garzik { 15913eabddb8STejun Heo u8 cdb[ATAPI_CDB_LEN] = 15923eabddb8STejun Heo { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 }; 15939af5c9c9STejun Heo struct ata_port *ap = dev->link->ap; 1594c6fd2807SJeff Garzik struct ata_taskfile tf; 1595c6fd2807SJeff Garzik 1596c6fd2807SJeff Garzik DPRINTK("ATAPI request sense\n"); 1597c6fd2807SJeff Garzik 1598c6fd2807SJeff Garzik /* FIXME: is this needed? */ 1599c6fd2807SJeff Garzik memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); 1600c6fd2807SJeff Garzik 160156287768SAlbert Lee /* initialize sense_buf with the error register, 160256287768SAlbert Lee * for the case where they are -not- overwritten 160356287768SAlbert Lee */ 1604c6fd2807SJeff Garzik sense_buf[0] = 0x70; 16053eabddb8STejun Heo sense_buf[2] = dfl_sense_key; 160656287768SAlbert Lee 160756287768SAlbert Lee /* some devices time out if garbage left in tf */ 160856287768SAlbert Lee ata_tf_init(dev, &tf); 1609c6fd2807SJeff Garzik 1610c6fd2807SJeff Garzik tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1611c6fd2807SJeff Garzik tf.command = ATA_CMD_PACKET; 1612c6fd2807SJeff Garzik 1613c6fd2807SJeff Garzik /* is it pointless to prefer PIO for "safety reasons"? */ 1614c6fd2807SJeff Garzik if (ap->flags & ATA_FLAG_PIO_DMA) { 16150dc36888STejun Heo tf.protocol = ATAPI_PROT_DMA; 1616c6fd2807SJeff Garzik tf.feature |= ATAPI_PKT_DMA; 1617c6fd2807SJeff Garzik } else { 16180dc36888STejun Heo tf.protocol = ATAPI_PROT_PIO; 1619f2dfc1a1STejun Heo tf.lbam = SCSI_SENSE_BUFFERSIZE; 1620f2dfc1a1STejun Heo tf.lbah = 0; 1621c6fd2807SJeff Garzik } 1622c6fd2807SJeff Garzik 1623c6fd2807SJeff Garzik return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE, 16242b789108STejun Heo sense_buf, SCSI_SENSE_BUFFERSIZE, 0); 1625c6fd2807SJeff Garzik } 1626c6fd2807SJeff Garzik 1627c6fd2807SJeff Garzik /** 1628c6fd2807SJeff Garzik * ata_eh_analyze_serror - analyze SError for a failed port 16290260731fSTejun Heo * @link: ATA link to analyze SError for 1630c6fd2807SJeff Garzik * 1631c6fd2807SJeff Garzik * Analyze SError if available and further determine cause of 1632c6fd2807SJeff Garzik * failure. 1633c6fd2807SJeff Garzik * 1634c6fd2807SJeff Garzik * LOCKING: 1635c6fd2807SJeff Garzik * None. 1636c6fd2807SJeff Garzik */ 16370260731fSTejun Heo static void ata_eh_analyze_serror(struct ata_link *link) 1638c6fd2807SJeff Garzik { 16390260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 1640c6fd2807SJeff Garzik u32 serror = ehc->i.serror; 1641c6fd2807SJeff Garzik unsigned int err_mask = 0, action = 0; 1642f9df58cbSTejun Heo u32 hotplug_mask; 1643c6fd2807SJeff Garzik 1644e0614db2STejun Heo if (serror & (SERR_PERSISTENT | SERR_DATA)) { 1645c6fd2807SJeff Garzik err_mask |= AC_ERR_ATA_BUS; 1646cf480626STejun Heo action |= ATA_EH_RESET; 1647c6fd2807SJeff Garzik } 1648c6fd2807SJeff Garzik if (serror & SERR_PROTOCOL) { 1649c6fd2807SJeff Garzik err_mask |= AC_ERR_HSM; 1650cf480626STejun Heo action |= ATA_EH_RESET; 1651c6fd2807SJeff Garzik } 1652c6fd2807SJeff Garzik if (serror & SERR_INTERNAL) { 1653c6fd2807SJeff Garzik err_mask |= AC_ERR_SYSTEM; 1654cf480626STejun Heo action |= ATA_EH_RESET; 1655c6fd2807SJeff Garzik } 1656f9df58cbSTejun Heo 1657f9df58cbSTejun Heo /* Determine whether a hotplug event has occurred. Both 1658f9df58cbSTejun Heo * SError.N/X are considered hotplug events for enabled or 1659f9df58cbSTejun Heo * host links. For disabled PMP links, only N bit is 1660f9df58cbSTejun Heo * considered as X bit is left at 1 for link plugging. 1661f9df58cbSTejun Heo */ 1662eb0e85e3STejun Heo if (link->lpm_policy > ATA_LPM_MAX_POWER) 16636b7ae954STejun Heo hotplug_mask = 0; /* hotplug doesn't work w/ LPM */ 16646b7ae954STejun Heo else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link)) 1665f9df58cbSTejun Heo hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG; 1666f9df58cbSTejun Heo else 1667f9df58cbSTejun Heo hotplug_mask = SERR_PHYRDY_CHG; 1668f9df58cbSTejun Heo 1669f9df58cbSTejun Heo if (serror & hotplug_mask) 1670c6fd2807SJeff Garzik ata_ehi_hotplugged(&ehc->i); 1671c6fd2807SJeff Garzik 1672c6fd2807SJeff Garzik ehc->i.err_mask |= err_mask; 1673c6fd2807SJeff Garzik ehc->i.action |= action; 1674c6fd2807SJeff Garzik } 1675c6fd2807SJeff Garzik 1676c6fd2807SJeff Garzik /** 1677c6fd2807SJeff Garzik * ata_eh_analyze_ncq_error - analyze NCQ error 16780260731fSTejun Heo * @link: ATA link to analyze NCQ error for 1679c6fd2807SJeff Garzik * 1680c6fd2807SJeff Garzik * Read log page 10h, determine the offending qc and acquire 1681c6fd2807SJeff Garzik * error status TF. For NCQ device errors, all LLDDs have to do 1682c6fd2807SJeff Garzik * is setting AC_ERR_DEV in ehi->err_mask. This function takes 1683c6fd2807SJeff Garzik * care of the rest. 1684c6fd2807SJeff Garzik * 1685c6fd2807SJeff Garzik * LOCKING: 1686c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1687c6fd2807SJeff Garzik */ 168810acf3b0SMark Lord void ata_eh_analyze_ncq_error(struct ata_link *link) 1689c6fd2807SJeff Garzik { 16900260731fSTejun Heo struct ata_port *ap = link->ap; 16910260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 16920260731fSTejun Heo struct ata_device *dev = link->device; 1693c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 1694c6fd2807SJeff Garzik struct ata_taskfile tf; 1695c6fd2807SJeff Garzik int tag, rc; 1696c6fd2807SJeff Garzik 1697c6fd2807SJeff Garzik /* if frozen, we can't do much */ 1698c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN) 1699c6fd2807SJeff Garzik return; 1700c6fd2807SJeff Garzik 1701c6fd2807SJeff Garzik /* is it NCQ device error? */ 17020260731fSTejun Heo if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV)) 1703c6fd2807SJeff Garzik return; 1704c6fd2807SJeff Garzik 1705c6fd2807SJeff Garzik /* has LLDD analyzed already? */ 1706c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1707c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, tag); 1708c6fd2807SJeff Garzik 1709c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) 1710c6fd2807SJeff Garzik continue; 1711c6fd2807SJeff Garzik 1712c6fd2807SJeff Garzik if (qc->err_mask) 1713c6fd2807SJeff Garzik return; 1714c6fd2807SJeff Garzik } 1715c6fd2807SJeff Garzik 1716c6fd2807SJeff Garzik /* okay, this error is ours */ 1717a09bf4cdSJeff Garzik memset(&tf, 0, sizeof(tf)); 1718c6fd2807SJeff Garzik rc = ata_eh_read_log_10h(dev, &tag, &tf); 1719c6fd2807SJeff Garzik if (rc) { 1720a9a79dfeSJoe Perches ata_link_err(link, "failed to read log page 10h (errno=%d)\n", 1721a9a79dfeSJoe Perches rc); 1722c6fd2807SJeff Garzik return; 1723c6fd2807SJeff Garzik } 1724c6fd2807SJeff Garzik 17250260731fSTejun Heo if (!(link->sactive & (1 << tag))) { 1726a9a79dfeSJoe Perches ata_link_err(link, "log page 10h reported inactive tag %d\n", 1727a9a79dfeSJoe Perches tag); 1728c6fd2807SJeff Garzik return; 1729c6fd2807SJeff Garzik } 1730c6fd2807SJeff Garzik 1731c6fd2807SJeff Garzik /* we've got the perpetrator, condemn it */ 1732c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, tag); 1733c6fd2807SJeff Garzik memcpy(&qc->result_tf, &tf, sizeof(tf)); 1734a6116c9eSMark Lord qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 17355335b729STejun Heo qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ; 1736c6fd2807SJeff Garzik ehc->i.err_mask &= ~AC_ERR_DEV; 1737c6fd2807SJeff Garzik } 1738c6fd2807SJeff Garzik 1739c6fd2807SJeff Garzik /** 1740c6fd2807SJeff Garzik * ata_eh_analyze_tf - analyze taskfile of a failed qc 1741c6fd2807SJeff Garzik * @qc: qc to analyze 1742c6fd2807SJeff Garzik * @tf: Taskfile registers to analyze 1743c6fd2807SJeff Garzik * 1744c6fd2807SJeff Garzik * Analyze taskfile of @qc and further determine cause of 1745c6fd2807SJeff Garzik * failure. This function also requests ATAPI sense data if 174625985edcSLucas De Marchi * available. 1747c6fd2807SJeff Garzik * 1748c6fd2807SJeff Garzik * LOCKING: 1749c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1750c6fd2807SJeff Garzik * 1751c6fd2807SJeff Garzik * RETURNS: 1752c6fd2807SJeff Garzik * Determined recovery action 1753c6fd2807SJeff Garzik */ 1754c6fd2807SJeff Garzik static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, 1755c6fd2807SJeff Garzik const struct ata_taskfile *tf) 1756c6fd2807SJeff Garzik { 1757c6fd2807SJeff Garzik unsigned int tmp, action = 0; 1758c6fd2807SJeff Garzik u8 stat = tf->command, err = tf->feature; 1759c6fd2807SJeff Garzik 1760c6fd2807SJeff Garzik if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) { 1761c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_HSM; 1762cf480626STejun Heo return ATA_EH_RESET; 1763c6fd2807SJeff Garzik } 1764c6fd2807SJeff Garzik 1765a51d644aSTejun Heo if (stat & (ATA_ERR | ATA_DF)) 1766a51d644aSTejun Heo qc->err_mask |= AC_ERR_DEV; 1767a51d644aSTejun Heo else 1768c6fd2807SJeff Garzik return 0; 1769c6fd2807SJeff Garzik 1770c6fd2807SJeff Garzik switch (qc->dev->class) { 1771c6fd2807SJeff Garzik case ATA_DEV_ATA: 1772c6fd2807SJeff Garzik if (err & ATA_ICRC) 1773c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_ATA_BUS; 1774c6fd2807SJeff Garzik if (err & ATA_UNC) 1775c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_MEDIA; 1776c6fd2807SJeff Garzik if (err & ATA_IDNF) 1777c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_INVALID; 1778c6fd2807SJeff Garzik break; 1779c6fd2807SJeff Garzik 1780c6fd2807SJeff Garzik case ATA_DEV_ATAPI: 1781a569a30dSTejun Heo if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) { 17823eabddb8STejun Heo tmp = atapi_eh_request_sense(qc->dev, 17833eabddb8STejun Heo qc->scsicmd->sense_buffer, 17843eabddb8STejun Heo qc->result_tf.feature >> 4); 1785c6fd2807SJeff Garzik if (!tmp) { 1786a569a30dSTejun Heo /* ATA_QCFLAG_SENSE_VALID is used to 1787a569a30dSTejun Heo * tell atapi_qc_complete() that sense 1788a569a30dSTejun Heo * data is already valid. 1789c6fd2807SJeff Garzik * 1790c6fd2807SJeff Garzik * TODO: interpret sense data and set 1791c6fd2807SJeff Garzik * appropriate err_mask. 1792c6fd2807SJeff Garzik */ 1793c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_SENSE_VALID; 1794c6fd2807SJeff Garzik } else 1795c6fd2807SJeff Garzik qc->err_mask |= tmp; 1796c6fd2807SJeff Garzik } 1797a569a30dSTejun Heo } 1798c6fd2807SJeff Garzik 1799c6fd2807SJeff Garzik if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS)) 1800cf480626STejun Heo action |= ATA_EH_RESET; 1801c6fd2807SJeff Garzik 1802c6fd2807SJeff Garzik return action; 1803c6fd2807SJeff Garzik } 1804c6fd2807SJeff Garzik 180576326ac1STejun Heo static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask, 180676326ac1STejun Heo int *xfer_ok) 1807c6fd2807SJeff Garzik { 180876326ac1STejun Heo int base = 0; 180976326ac1STejun Heo 181076326ac1STejun Heo if (!(eflags & ATA_EFLAG_DUBIOUS_XFER)) 181176326ac1STejun Heo *xfer_ok = 1; 181276326ac1STejun Heo 181376326ac1STejun Heo if (!*xfer_ok) 181475f9cafcSTejun Heo base = ATA_ECAT_DUBIOUS_NONE; 181576326ac1STejun Heo 18167d47e8d4STejun Heo if (err_mask & AC_ERR_ATA_BUS) 181776326ac1STejun Heo return base + ATA_ECAT_ATA_BUS; 1818c6fd2807SJeff Garzik 18197d47e8d4STejun Heo if (err_mask & AC_ERR_TIMEOUT) 182076326ac1STejun Heo return base + ATA_ECAT_TOUT_HSM; 18217d47e8d4STejun Heo 18223884f7b0STejun Heo if (eflags & ATA_EFLAG_IS_IO) { 18237d47e8d4STejun Heo if (err_mask & AC_ERR_HSM) 182476326ac1STejun Heo return base + ATA_ECAT_TOUT_HSM; 18257d47e8d4STejun Heo if ((err_mask & 18267d47e8d4STejun Heo (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV) 182776326ac1STejun Heo return base + ATA_ECAT_UNK_DEV; 1828c6fd2807SJeff Garzik } 1829c6fd2807SJeff Garzik 1830c6fd2807SJeff Garzik return 0; 1831c6fd2807SJeff Garzik } 1832c6fd2807SJeff Garzik 18337d47e8d4STejun Heo struct speed_down_verdict_arg { 1834c6fd2807SJeff Garzik u64 since; 183576326ac1STejun Heo int xfer_ok; 18363884f7b0STejun Heo int nr_errors[ATA_ECAT_NR]; 1837c6fd2807SJeff Garzik }; 1838c6fd2807SJeff Garzik 18397d47e8d4STejun Heo static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg) 1840c6fd2807SJeff Garzik { 18417d47e8d4STejun Heo struct speed_down_verdict_arg *arg = void_arg; 184276326ac1STejun Heo int cat; 1843c6fd2807SJeff Garzik 1844d9027470SGwendal Grignou if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since)) 1845c6fd2807SJeff Garzik return -1; 1846c6fd2807SJeff Garzik 184776326ac1STejun Heo cat = ata_eh_categorize_error(ent->eflags, ent->err_mask, 184876326ac1STejun Heo &arg->xfer_ok); 18497d47e8d4STejun Heo arg->nr_errors[cat]++; 185076326ac1STejun Heo 1851c6fd2807SJeff Garzik return 0; 1852c6fd2807SJeff Garzik } 1853c6fd2807SJeff Garzik 1854c6fd2807SJeff Garzik /** 18557d47e8d4STejun Heo * ata_eh_speed_down_verdict - Determine speed down verdict 1856c6fd2807SJeff Garzik * @dev: Device of interest 1857c6fd2807SJeff Garzik * 1858c6fd2807SJeff Garzik * This function examines error ring of @dev and determines 18597d47e8d4STejun Heo * whether NCQ needs to be turned off, transfer speed should be 18607d47e8d4STejun Heo * stepped down, or falling back to PIO is necessary. 1861c6fd2807SJeff Garzik * 18623884f7b0STejun Heo * ECAT_ATA_BUS : ATA_BUS error for any command 1863c6fd2807SJeff Garzik * 18643884f7b0STejun Heo * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for 18653884f7b0STejun Heo * IO commands 18667d47e8d4STejun Heo * 18673884f7b0STejun Heo * ECAT_UNK_DEV : Unknown DEV error for IO commands 1868c6fd2807SJeff Garzik * 186976326ac1STejun Heo * ECAT_DUBIOUS_* : Identical to above three but occurred while 187076326ac1STejun Heo * data transfer hasn't been verified. 187176326ac1STejun Heo * 18723884f7b0STejun Heo * Verdicts are 18737d47e8d4STejun Heo * 18743884f7b0STejun Heo * NCQ_OFF : Turn off NCQ. 18757d47e8d4STejun Heo * 18763884f7b0STejun Heo * SPEED_DOWN : Speed down transfer speed but don't fall back 18773884f7b0STejun Heo * to PIO. 18783884f7b0STejun Heo * 18793884f7b0STejun Heo * FALLBACK_TO_PIO : Fall back to PIO. 18803884f7b0STejun Heo * 18813884f7b0STejun Heo * Even if multiple verdicts are returned, only one action is 188276326ac1STejun Heo * taken per error. An action triggered by non-DUBIOUS errors 188376326ac1STejun Heo * clears ering, while one triggered by DUBIOUS_* errors doesn't. 188476326ac1STejun Heo * This is to expedite speed down decisions right after device is 188576326ac1STejun Heo * initially configured. 18863884f7b0STejun Heo * 188776326ac1STejun Heo * The followings are speed down rules. #1 and #2 deal with 188876326ac1STejun Heo * DUBIOUS errors. 188976326ac1STejun Heo * 189076326ac1STejun Heo * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors 189176326ac1STejun Heo * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO. 189276326ac1STejun Heo * 189376326ac1STejun Heo * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors 189476326ac1STejun Heo * occurred during last 5 mins, NCQ_OFF. 189576326ac1STejun Heo * 189676326ac1STejun Heo * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors 189725985edcSLucas De Marchi * occurred during last 5 mins, FALLBACK_TO_PIO 18983884f7b0STejun Heo * 189976326ac1STejun Heo * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred 19003884f7b0STejun Heo * during last 10 mins, NCQ_OFF. 19013884f7b0STejun Heo * 190276326ac1STejun Heo * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6 19033884f7b0STejun Heo * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN. 19047d47e8d4STejun Heo * 1905c6fd2807SJeff Garzik * LOCKING: 1906c6fd2807SJeff Garzik * Inherited from caller. 1907c6fd2807SJeff Garzik * 1908c6fd2807SJeff Garzik * RETURNS: 19097d47e8d4STejun Heo * OR of ATA_EH_SPDN_* flags. 1910c6fd2807SJeff Garzik */ 19117d47e8d4STejun Heo static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev) 1912c6fd2807SJeff Garzik { 19137d47e8d4STejun Heo const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ; 19147d47e8d4STejun Heo u64 j64 = get_jiffies_64(); 19157d47e8d4STejun Heo struct speed_down_verdict_arg arg; 19167d47e8d4STejun Heo unsigned int verdict = 0; 1917c6fd2807SJeff Garzik 19183884f7b0STejun Heo /* scan past 5 mins of error history */ 19193884f7b0STejun Heo memset(&arg, 0, sizeof(arg)); 19203884f7b0STejun Heo arg.since = j64 - min(j64, j5mins); 19213884f7b0STejun Heo ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); 19223884f7b0STejun Heo 192376326ac1STejun Heo if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] + 192476326ac1STejun Heo arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1) 192576326ac1STejun Heo verdict |= ATA_EH_SPDN_SPEED_DOWN | 192676326ac1STejun Heo ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS; 192776326ac1STejun Heo 192876326ac1STejun Heo if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] + 192976326ac1STejun Heo arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1) 193076326ac1STejun Heo verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS; 193176326ac1STejun Heo 19323884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_ATA_BUS] + 19333884f7b0STejun Heo arg.nr_errors[ATA_ECAT_TOUT_HSM] + 1934663f99b8STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) 19353884f7b0STejun Heo verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO; 19363884f7b0STejun Heo 19377d47e8d4STejun Heo /* scan past 10 mins of error history */ 1938c6fd2807SJeff Garzik memset(&arg, 0, sizeof(arg)); 19397d47e8d4STejun Heo arg.since = j64 - min(j64, j10mins); 19407d47e8d4STejun Heo ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); 1941c6fd2807SJeff Garzik 19423884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_TOUT_HSM] + 19433884f7b0STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 3) 19447d47e8d4STejun Heo verdict |= ATA_EH_SPDN_NCQ_OFF; 19453884f7b0STejun Heo 19463884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_ATA_BUS] + 19473884f7b0STejun Heo arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 || 1948663f99b8STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) 19497d47e8d4STejun Heo verdict |= ATA_EH_SPDN_SPEED_DOWN; 1950c6fd2807SJeff Garzik 19517d47e8d4STejun Heo return verdict; 1952c6fd2807SJeff Garzik } 1953c6fd2807SJeff Garzik 1954c6fd2807SJeff Garzik /** 1955c6fd2807SJeff Garzik * ata_eh_speed_down - record error and speed down if necessary 1956c6fd2807SJeff Garzik * @dev: Failed device 19573884f7b0STejun Heo * @eflags: mask of ATA_EFLAG_* flags 1958c6fd2807SJeff Garzik * @err_mask: err_mask of the error 1959c6fd2807SJeff Garzik * 1960c6fd2807SJeff Garzik * Record error and examine error history to determine whether 1961c6fd2807SJeff Garzik * adjusting transmission speed is necessary. It also sets 1962c6fd2807SJeff Garzik * transmission limits appropriately if such adjustment is 1963c6fd2807SJeff Garzik * necessary. 1964c6fd2807SJeff Garzik * 1965c6fd2807SJeff Garzik * LOCKING: 1966c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1967c6fd2807SJeff Garzik * 1968c6fd2807SJeff Garzik * RETURNS: 19697d47e8d4STejun Heo * Determined recovery action. 1970c6fd2807SJeff Garzik */ 19713884f7b0STejun Heo static unsigned int ata_eh_speed_down(struct ata_device *dev, 19723884f7b0STejun Heo unsigned int eflags, unsigned int err_mask) 1973c6fd2807SJeff Garzik { 1974b1c72916STejun Heo struct ata_link *link = ata_dev_phys_link(dev); 197576326ac1STejun Heo int xfer_ok = 0; 19767d47e8d4STejun Heo unsigned int verdict; 19777d47e8d4STejun Heo unsigned int action = 0; 19787d47e8d4STejun Heo 19797d47e8d4STejun Heo /* don't bother if Cat-0 error */ 198076326ac1STejun Heo if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0) 1981c6fd2807SJeff Garzik return 0; 1982c6fd2807SJeff Garzik 1983c6fd2807SJeff Garzik /* record error and determine whether speed down is necessary */ 19843884f7b0STejun Heo ata_ering_record(&dev->ering, eflags, err_mask); 19857d47e8d4STejun Heo verdict = ata_eh_speed_down_verdict(dev); 1986c6fd2807SJeff Garzik 19877d47e8d4STejun Heo /* turn off NCQ? */ 19887d47e8d4STejun Heo if ((verdict & ATA_EH_SPDN_NCQ_OFF) && 19897d47e8d4STejun Heo (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ | 19907d47e8d4STejun Heo ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) { 19917d47e8d4STejun Heo dev->flags |= ATA_DFLAG_NCQ_OFF; 1992a9a79dfeSJoe Perches ata_dev_warn(dev, "NCQ disabled due to excessive errors\n"); 19937d47e8d4STejun Heo goto done; 19947d47e8d4STejun Heo } 1995c6fd2807SJeff Garzik 19967d47e8d4STejun Heo /* speed down? */ 19977d47e8d4STejun Heo if (verdict & ATA_EH_SPDN_SPEED_DOWN) { 1998c6fd2807SJeff Garzik /* speed down SATA link speed if possible */ 1999a07d499bSTejun Heo if (sata_down_spd_limit(link, 0) == 0) { 2000cf480626STejun Heo action |= ATA_EH_RESET; 20017d47e8d4STejun Heo goto done; 20027d47e8d4STejun Heo } 2003c6fd2807SJeff Garzik 2004c6fd2807SJeff Garzik /* lower transfer mode */ 20057d47e8d4STejun Heo if (dev->spdn_cnt < 2) { 20067d47e8d4STejun Heo static const int dma_dnxfer_sel[] = 20077d47e8d4STejun Heo { ATA_DNXFER_DMA, ATA_DNXFER_40C }; 20087d47e8d4STejun Heo static const int pio_dnxfer_sel[] = 20097d47e8d4STejun Heo { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 }; 20107d47e8d4STejun Heo int sel; 2011c6fd2807SJeff Garzik 20127d47e8d4STejun Heo if (dev->xfer_shift != ATA_SHIFT_PIO) 20137d47e8d4STejun Heo sel = dma_dnxfer_sel[dev->spdn_cnt]; 20147d47e8d4STejun Heo else 20157d47e8d4STejun Heo sel = pio_dnxfer_sel[dev->spdn_cnt]; 20167d47e8d4STejun Heo 20177d47e8d4STejun Heo dev->spdn_cnt++; 20187d47e8d4STejun Heo 20197d47e8d4STejun Heo if (ata_down_xfermask_limit(dev, sel) == 0) { 2020cf480626STejun Heo action |= ATA_EH_RESET; 20217d47e8d4STejun Heo goto done; 20227d47e8d4STejun Heo } 20237d47e8d4STejun Heo } 20247d47e8d4STejun Heo } 20257d47e8d4STejun Heo 20267d47e8d4STejun Heo /* Fall back to PIO? Slowing down to PIO is meaningless for 2027663f99b8STejun Heo * SATA ATA devices. Consider it only for PATA and SATAPI. 20287d47e8d4STejun Heo */ 20297d47e8d4STejun Heo if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) && 2030663f99b8STejun Heo (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) && 20317d47e8d4STejun Heo (dev->xfer_shift != ATA_SHIFT_PIO)) { 20327d47e8d4STejun Heo if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) { 20337d47e8d4STejun Heo dev->spdn_cnt = 0; 2034cf480626STejun Heo action |= ATA_EH_RESET; 20357d47e8d4STejun Heo goto done; 20367d47e8d4STejun Heo } 20377d47e8d4STejun Heo } 20387d47e8d4STejun Heo 2039c6fd2807SJeff Garzik return 0; 20407d47e8d4STejun Heo done: 20417d47e8d4STejun Heo /* device has been slowed down, blow error history */ 204276326ac1STejun Heo if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS)) 20437d47e8d4STejun Heo ata_ering_clear(&dev->ering); 20447d47e8d4STejun Heo return action; 2045c6fd2807SJeff Garzik } 2046c6fd2807SJeff Garzik 2047c6fd2807SJeff Garzik /** 20489b1e2658STejun Heo * ata_eh_link_autopsy - analyze error and determine recovery action 20499b1e2658STejun Heo * @link: host link to perform autopsy on 2050c6fd2807SJeff Garzik * 20510260731fSTejun Heo * Analyze why @link failed and determine which recovery actions 20520260731fSTejun Heo * are needed. This function also sets more detailed AC_ERR_* 20530260731fSTejun Heo * values and fills sense data for ATAPI CHECK SENSE. 2054c6fd2807SJeff Garzik * 2055c6fd2807SJeff Garzik * LOCKING: 2056c6fd2807SJeff Garzik * Kernel thread context (may sleep). 2057c6fd2807SJeff Garzik */ 20589b1e2658STejun Heo static void ata_eh_link_autopsy(struct ata_link *link) 2059c6fd2807SJeff Garzik { 20600260731fSTejun Heo struct ata_port *ap = link->ap; 2061936fd732STejun Heo struct ata_eh_context *ehc = &link->eh_context; 2062dfcc173dSTejun Heo struct ata_device *dev; 20633884f7b0STejun Heo unsigned int all_err_mask = 0, eflags = 0; 20643884f7b0STejun Heo int tag; 2065c6fd2807SJeff Garzik u32 serror; 2066c6fd2807SJeff Garzik int rc; 2067c6fd2807SJeff Garzik 2068c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 2069c6fd2807SJeff Garzik 2070c6fd2807SJeff Garzik if (ehc->i.flags & ATA_EHI_NO_AUTOPSY) 2071c6fd2807SJeff Garzik return; 2072c6fd2807SJeff Garzik 2073c6fd2807SJeff Garzik /* obtain and analyze SError */ 2074936fd732STejun Heo rc = sata_scr_read(link, SCR_ERROR, &serror); 2075c6fd2807SJeff Garzik if (rc == 0) { 2076c6fd2807SJeff Garzik ehc->i.serror |= serror; 20770260731fSTejun Heo ata_eh_analyze_serror(link); 20784e57c517STejun Heo } else if (rc != -EOPNOTSUPP) { 2079cf480626STejun Heo /* SError read failed, force reset and probing */ 2080b558edddSTejun Heo ehc->i.probe_mask |= ATA_ALL_DEVICES; 2081cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 20824e57c517STejun Heo ehc->i.err_mask |= AC_ERR_OTHER; 20834e57c517STejun Heo } 2084c6fd2807SJeff Garzik 2085c6fd2807SJeff Garzik /* analyze NCQ failure */ 20860260731fSTejun Heo ata_eh_analyze_ncq_error(link); 2087c6fd2807SJeff Garzik 2088c6fd2807SJeff Garzik /* any real error trumps AC_ERR_OTHER */ 2089c6fd2807SJeff Garzik if (ehc->i.err_mask & ~AC_ERR_OTHER) 2090c6fd2807SJeff Garzik ehc->i.err_mask &= ~AC_ERR_OTHER; 2091c6fd2807SJeff Garzik 2092c6fd2807SJeff Garzik all_err_mask |= ehc->i.err_mask; 2093c6fd2807SJeff Garzik 2094c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2095c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 2096c6fd2807SJeff Garzik 2097b1c72916STejun Heo if (!(qc->flags & ATA_QCFLAG_FAILED) || 2098b1c72916STejun Heo ata_dev_phys_link(qc->dev) != link) 2099c6fd2807SJeff Garzik continue; 2100c6fd2807SJeff Garzik 2101c6fd2807SJeff Garzik /* inherit upper level err_mask */ 2102c6fd2807SJeff Garzik qc->err_mask |= ehc->i.err_mask; 2103c6fd2807SJeff Garzik 2104c6fd2807SJeff Garzik /* analyze TF */ 2105c6fd2807SJeff Garzik ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf); 2106c6fd2807SJeff Garzik 2107c6fd2807SJeff Garzik /* DEV errors are probably spurious in case of ATA_BUS error */ 2108c6fd2807SJeff Garzik if (qc->err_mask & AC_ERR_ATA_BUS) 2109c6fd2807SJeff Garzik qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA | 2110c6fd2807SJeff Garzik AC_ERR_INVALID); 2111c6fd2807SJeff Garzik 2112c6fd2807SJeff Garzik /* any real error trumps unknown error */ 2113c6fd2807SJeff Garzik if (qc->err_mask & ~AC_ERR_OTHER) 2114c6fd2807SJeff Garzik qc->err_mask &= ~AC_ERR_OTHER; 2115c6fd2807SJeff Garzik 2116c6fd2807SJeff Garzik /* SENSE_VALID trumps dev/unknown error and revalidation */ 2117f90f0828STejun Heo if (qc->flags & ATA_QCFLAG_SENSE_VALID) 2118c6fd2807SJeff Garzik qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); 2119c6fd2807SJeff Garzik 212003faab78STejun Heo /* determine whether the command is worth retrying */ 2121534ead70STejun Heo if (qc->flags & ATA_QCFLAG_IO || 2122534ead70STejun Heo (!(qc->err_mask & AC_ERR_INVALID) && 2123534ead70STejun Heo qc->err_mask != AC_ERR_DEV)) 212403faab78STejun Heo qc->flags |= ATA_QCFLAG_RETRY; 212503faab78STejun Heo 2126c6fd2807SJeff Garzik /* accumulate error info */ 2127c6fd2807SJeff Garzik ehc->i.dev = qc->dev; 2128c6fd2807SJeff Garzik all_err_mask |= qc->err_mask; 2129c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_IO) 21303884f7b0STejun Heo eflags |= ATA_EFLAG_IS_IO; 2131c6fd2807SJeff Garzik } 2132c6fd2807SJeff Garzik 2133c6fd2807SJeff Garzik /* enforce default EH actions */ 2134c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN || 2135c6fd2807SJeff Garzik all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT)) 2136cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 21373884f7b0STejun Heo else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) || 21383884f7b0STejun Heo (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV))) 2139c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_REVALIDATE; 2140c6fd2807SJeff Garzik 2141dfcc173dSTejun Heo /* If we have offending qcs and the associated failed device, 2142dfcc173dSTejun Heo * perform per-dev EH action only on the offending device. 2143dfcc173dSTejun Heo */ 2144c6fd2807SJeff Garzik if (ehc->i.dev) { 2145c6fd2807SJeff Garzik ehc->i.dev_action[ehc->i.dev->devno] |= 2146c6fd2807SJeff Garzik ehc->i.action & ATA_EH_PERDEV_MASK; 2147c6fd2807SJeff Garzik ehc->i.action &= ~ATA_EH_PERDEV_MASK; 2148c6fd2807SJeff Garzik } 2149c6fd2807SJeff Garzik 21502695e366STejun Heo /* propagate timeout to host link */ 21512695e366STejun Heo if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link)) 21522695e366STejun Heo ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT; 21532695e366STejun Heo 21542695e366STejun Heo /* record error and consider speeding down */ 2155dfcc173dSTejun Heo dev = ehc->i.dev; 21562695e366STejun Heo if (!dev && ((ata_link_max_devices(link) == 1 && 21572695e366STejun Heo ata_dev_enabled(link->device)))) 2158dfcc173dSTejun Heo dev = link->device; 2159dfcc173dSTejun Heo 216076326ac1STejun Heo if (dev) { 216176326ac1STejun Heo if (dev->flags & ATA_DFLAG_DUBIOUS_XFER) 216276326ac1STejun Heo eflags |= ATA_EFLAG_DUBIOUS_XFER; 21633884f7b0STejun Heo ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask); 216476326ac1STejun Heo } 2165dfcc173dSTejun Heo 2166c6fd2807SJeff Garzik DPRINTK("EXIT\n"); 2167c6fd2807SJeff Garzik } 2168c6fd2807SJeff Garzik 2169c6fd2807SJeff Garzik /** 21709b1e2658STejun Heo * ata_eh_autopsy - analyze error and determine recovery action 21719b1e2658STejun Heo * @ap: host port to perform autopsy on 21729b1e2658STejun Heo * 21739b1e2658STejun Heo * Analyze all links of @ap and determine why they failed and 21749b1e2658STejun Heo * which recovery actions are needed. 21759b1e2658STejun Heo * 21769b1e2658STejun Heo * LOCKING: 21779b1e2658STejun Heo * Kernel thread context (may sleep). 21789b1e2658STejun Heo */ 2179fb7fd614STejun Heo void ata_eh_autopsy(struct ata_port *ap) 21809b1e2658STejun Heo { 21819b1e2658STejun Heo struct ata_link *link; 21829b1e2658STejun Heo 21831eca4365STejun Heo ata_for_each_link(link, ap, EDGE) 21849b1e2658STejun Heo ata_eh_link_autopsy(link); 21852695e366STejun Heo 2186b1c72916STejun Heo /* Handle the frigging slave link. Autopsy is done similarly 2187b1c72916STejun Heo * but actions and flags are transferred over to the master 2188b1c72916STejun Heo * link and handled from there. 2189b1c72916STejun Heo */ 2190b1c72916STejun Heo if (ap->slave_link) { 2191b1c72916STejun Heo struct ata_eh_context *mehc = &ap->link.eh_context; 2192b1c72916STejun Heo struct ata_eh_context *sehc = &ap->slave_link->eh_context; 2193b1c72916STejun Heo 2194848e4c68STejun Heo /* transfer control flags from master to slave */ 2195848e4c68STejun Heo sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK; 2196848e4c68STejun Heo 2197848e4c68STejun Heo /* perform autopsy on the slave link */ 2198b1c72916STejun Heo ata_eh_link_autopsy(ap->slave_link); 2199b1c72916STejun Heo 2200848e4c68STejun Heo /* transfer actions from slave to master and clear slave */ 2201b1c72916STejun Heo ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); 2202b1c72916STejun Heo mehc->i.action |= sehc->i.action; 2203b1c72916STejun Heo mehc->i.dev_action[1] |= sehc->i.dev_action[1]; 2204b1c72916STejun Heo mehc->i.flags |= sehc->i.flags; 2205b1c72916STejun Heo ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); 2206b1c72916STejun Heo } 2207b1c72916STejun Heo 22082695e366STejun Heo /* Autopsy of fanout ports can affect host link autopsy. 22092695e366STejun Heo * Perform host link autopsy last. 22102695e366STejun Heo */ 2211071f44b1STejun Heo if (sata_pmp_attached(ap)) 22122695e366STejun Heo ata_eh_link_autopsy(&ap->link); 22139b1e2658STejun Heo } 22149b1e2658STejun Heo 22159b1e2658STejun Heo /** 22166521148cSRobert Hancock * ata_get_cmd_descript - get description for ATA command 22176521148cSRobert Hancock * @command: ATA command code to get description for 22186521148cSRobert Hancock * 22196521148cSRobert Hancock * Return a textual description of the given command, or NULL if the 22206521148cSRobert Hancock * command is not known. 22216521148cSRobert Hancock * 22226521148cSRobert Hancock * LOCKING: 22236521148cSRobert Hancock * None 22246521148cSRobert Hancock */ 22256521148cSRobert Hancock const char *ata_get_cmd_descript(u8 command) 22266521148cSRobert Hancock { 22276521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR 22286521148cSRobert Hancock static const struct 22296521148cSRobert Hancock { 22306521148cSRobert Hancock u8 command; 22316521148cSRobert Hancock const char *text; 22326521148cSRobert Hancock } cmd_descr[] = { 22336521148cSRobert Hancock { ATA_CMD_DEV_RESET, "DEVICE RESET" }, 22346521148cSRobert Hancock { ATA_CMD_CHK_POWER, "CHECK POWER MODE" }, 22356521148cSRobert Hancock { ATA_CMD_STANDBY, "STANDBY" }, 22366521148cSRobert Hancock { ATA_CMD_IDLE, "IDLE" }, 22376521148cSRobert Hancock { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" }, 22386521148cSRobert Hancock { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" }, 22396521148cSRobert Hancock { ATA_CMD_NOP, "NOP" }, 22406521148cSRobert Hancock { ATA_CMD_FLUSH, "FLUSH CACHE" }, 22416521148cSRobert Hancock { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" }, 22426521148cSRobert Hancock { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" }, 22436521148cSRobert Hancock { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" }, 22446521148cSRobert Hancock { ATA_CMD_SERVICE, "SERVICE" }, 22456521148cSRobert Hancock { ATA_CMD_READ, "READ DMA" }, 22466521148cSRobert Hancock { ATA_CMD_READ_EXT, "READ DMA EXT" }, 22476521148cSRobert Hancock { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" }, 22486521148cSRobert Hancock { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" }, 22496521148cSRobert Hancock { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" }, 22506521148cSRobert Hancock { ATA_CMD_WRITE, "WRITE DMA" }, 22516521148cSRobert Hancock { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" }, 22526521148cSRobert Hancock { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" }, 22536521148cSRobert Hancock { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" }, 22546521148cSRobert Hancock { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" }, 22556521148cSRobert Hancock { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" }, 22566521148cSRobert Hancock { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" }, 22576521148cSRobert Hancock { ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" }, 22586521148cSRobert Hancock { ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" }, 22596521148cSRobert Hancock { ATA_CMD_PIO_READ, "READ SECTOR(S)" }, 22606521148cSRobert Hancock { ATA_CMD_PIO_READ_EXT, "READ SECTOR(S) EXT" }, 22616521148cSRobert Hancock { ATA_CMD_PIO_WRITE, "WRITE SECTOR(S)" }, 22626521148cSRobert Hancock { ATA_CMD_PIO_WRITE_EXT, "WRITE SECTOR(S) EXT" }, 22636521148cSRobert Hancock { ATA_CMD_READ_MULTI, "READ MULTIPLE" }, 22646521148cSRobert Hancock { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" }, 22656521148cSRobert Hancock { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" }, 22666521148cSRobert Hancock { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" }, 22676521148cSRobert Hancock { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" }, 22686521148cSRobert Hancock { ATA_CMD_SET_FEATURES, "SET FEATURES" }, 22696521148cSRobert Hancock { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" }, 22706521148cSRobert Hancock { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" }, 22716521148cSRobert Hancock { ATA_CMD_VERIFY_EXT, "READ VERIFY SECTOR(S) EXT" }, 22726521148cSRobert Hancock { ATA_CMD_WRITE_UNCORR_EXT, "WRITE UNCORRECTABLE EXT" }, 22736521148cSRobert Hancock { ATA_CMD_STANDBYNOW1, "STANDBY IMMEDIATE" }, 22746521148cSRobert Hancock { ATA_CMD_IDLEIMMEDIATE, "IDLE IMMEDIATE" }, 22756521148cSRobert Hancock { ATA_CMD_SLEEP, "SLEEP" }, 22766521148cSRobert Hancock { ATA_CMD_INIT_DEV_PARAMS, "INITIALIZE DEVICE PARAMETERS" }, 22776521148cSRobert Hancock { ATA_CMD_READ_NATIVE_MAX, "READ NATIVE MAX ADDRESS" }, 22786521148cSRobert Hancock { ATA_CMD_READ_NATIVE_MAX_EXT, "READ NATIVE MAX ADDRESS EXT" }, 22796521148cSRobert Hancock { ATA_CMD_SET_MAX, "SET MAX ADDRESS" }, 22806521148cSRobert Hancock { ATA_CMD_SET_MAX_EXT, "SET MAX ADDRESS EXT" }, 22816521148cSRobert Hancock { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" }, 22826521148cSRobert Hancock { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" }, 22836521148cSRobert Hancock { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" }, 22846521148cSRobert Hancock { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" }, 22856521148cSRobert Hancock { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" }, 22866521148cSRobert Hancock { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" }, 22876521148cSRobert Hancock { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" }, 22886521148cSRobert Hancock { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" }, 22896521148cSRobert Hancock { ATA_CMD_PMP_READ, "READ BUFFER" }, 22906521148cSRobert Hancock { ATA_CMD_PMP_WRITE, "WRITE BUFFER" }, 22916521148cSRobert Hancock { ATA_CMD_CONF_OVERLAY, "DEVICE CONFIGURATION OVERLAY" }, 22926521148cSRobert Hancock { ATA_CMD_SEC_SET_PASS, "SECURITY SET PASSWORD" }, 22936521148cSRobert Hancock { ATA_CMD_SEC_UNLOCK, "SECURITY UNLOCK" }, 22946521148cSRobert Hancock { ATA_CMD_SEC_ERASE_PREP, "SECURITY ERASE PREPARE" }, 22956521148cSRobert Hancock { ATA_CMD_SEC_ERASE_UNIT, "SECURITY ERASE UNIT" }, 22966521148cSRobert Hancock { ATA_CMD_SEC_FREEZE_LOCK, "SECURITY FREEZE LOCK" }, 22976521148cSRobert Hancock { ATA_CMD_SEC_DISABLE_PASS, "SECURITY DISABLE PASSWORD" }, 22986521148cSRobert Hancock { ATA_CMD_CONFIG_STREAM, "CONFIGURE STREAM" }, 22996521148cSRobert Hancock { ATA_CMD_SMART, "SMART" }, 23006521148cSRobert Hancock { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" }, 23016521148cSRobert Hancock { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" }, 2302acad7627SFUJITA Tomonori { ATA_CMD_DSM, "DATA SET MANAGEMENT" }, 23036521148cSRobert Hancock { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" }, 23046521148cSRobert Hancock { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" }, 23056521148cSRobert Hancock { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" }, 23066521148cSRobert Hancock { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" }, 23076521148cSRobert Hancock { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" }, 23086521148cSRobert Hancock { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" }, 23096521148cSRobert Hancock { ATA_CMD_READ_LONG, "READ LONG (with retries)" }, 23106521148cSRobert Hancock { ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" }, 23116521148cSRobert Hancock { ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" }, 23126521148cSRobert Hancock { ATA_CMD_WRITE_LONG_ONCE, "WRITE LONG (without retries)" }, 23136521148cSRobert Hancock { ATA_CMD_RESTORE, "RECALIBRATE" }, 23146521148cSRobert Hancock { 0, NULL } /* terminate list */ 23156521148cSRobert Hancock }; 23166521148cSRobert Hancock 23176521148cSRobert Hancock unsigned int i; 23186521148cSRobert Hancock for (i = 0; cmd_descr[i].text; i++) 23196521148cSRobert Hancock if (cmd_descr[i].command == command) 23206521148cSRobert Hancock return cmd_descr[i].text; 23216521148cSRobert Hancock #endif 23226521148cSRobert Hancock 23236521148cSRobert Hancock return NULL; 23246521148cSRobert Hancock } 23256521148cSRobert Hancock 23266521148cSRobert Hancock /** 23279b1e2658STejun Heo * ata_eh_link_report - report error handling to user 23280260731fSTejun Heo * @link: ATA link EH is going on 2329c6fd2807SJeff Garzik * 2330c6fd2807SJeff Garzik * Report EH to user. 2331c6fd2807SJeff Garzik * 2332c6fd2807SJeff Garzik * LOCKING: 2333c6fd2807SJeff Garzik * None. 2334c6fd2807SJeff Garzik */ 23359b1e2658STejun Heo static void ata_eh_link_report(struct ata_link *link) 2336c6fd2807SJeff Garzik { 23370260731fSTejun Heo struct ata_port *ap = link->ap; 23380260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 2339c6fd2807SJeff Garzik const char *frozen, *desc; 2340a1e10f7eSTejun Heo char tries_buf[6]; 2341c6fd2807SJeff Garzik int tag, nr_failed = 0; 2342c6fd2807SJeff Garzik 234394ff3d54STejun Heo if (ehc->i.flags & ATA_EHI_QUIET) 234494ff3d54STejun Heo return; 234594ff3d54STejun Heo 2346c6fd2807SJeff Garzik desc = NULL; 2347c6fd2807SJeff Garzik if (ehc->i.desc[0] != '\0') 2348c6fd2807SJeff Garzik desc = ehc->i.desc; 2349c6fd2807SJeff Garzik 2350c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2351c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 2352c6fd2807SJeff Garzik 2353b1c72916STejun Heo if (!(qc->flags & ATA_QCFLAG_FAILED) || 2354b1c72916STejun Heo ata_dev_phys_link(qc->dev) != link || 2355e027bd36STejun Heo ((qc->flags & ATA_QCFLAG_QUIET) && 2356e027bd36STejun Heo qc->err_mask == AC_ERR_DEV)) 2357c6fd2807SJeff Garzik continue; 2358c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask) 2359c6fd2807SJeff Garzik continue; 2360c6fd2807SJeff Garzik 2361c6fd2807SJeff Garzik nr_failed++; 2362c6fd2807SJeff Garzik } 2363c6fd2807SJeff Garzik 2364c6fd2807SJeff Garzik if (!nr_failed && !ehc->i.err_mask) 2365c6fd2807SJeff Garzik return; 2366c6fd2807SJeff Garzik 2367c6fd2807SJeff Garzik frozen = ""; 2368c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN) 2369c6fd2807SJeff Garzik frozen = " frozen"; 2370c6fd2807SJeff Garzik 2371a1e10f7eSTejun Heo memset(tries_buf, 0, sizeof(tries_buf)); 2372a1e10f7eSTejun Heo if (ap->eh_tries < ATA_EH_MAX_TRIES) 2373a1e10f7eSTejun Heo snprintf(tries_buf, sizeof(tries_buf) - 1, " t%d", 2374a1e10f7eSTejun Heo ap->eh_tries); 2375a1e10f7eSTejun Heo 2376c6fd2807SJeff Garzik if (ehc->i.dev) { 2377a9a79dfeSJoe Perches ata_dev_err(ehc->i.dev, "exception Emask 0x%x " 2378a1e10f7eSTejun Heo "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", 2379a1e10f7eSTejun Heo ehc->i.err_mask, link->sactive, ehc->i.serror, 2380a1e10f7eSTejun Heo ehc->i.action, frozen, tries_buf); 2381c6fd2807SJeff Garzik if (desc) 2382a9a79dfeSJoe Perches ata_dev_err(ehc->i.dev, "%s\n", desc); 2383c6fd2807SJeff Garzik } else { 2384a9a79dfeSJoe Perches ata_link_err(link, "exception Emask 0x%x " 2385a1e10f7eSTejun Heo "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", 2386a1e10f7eSTejun Heo ehc->i.err_mask, link->sactive, ehc->i.serror, 2387a1e10f7eSTejun Heo ehc->i.action, frozen, tries_buf); 2388c6fd2807SJeff Garzik if (desc) 2389a9a79dfeSJoe Perches ata_link_err(link, "%s\n", desc); 2390c6fd2807SJeff Garzik } 2391c6fd2807SJeff Garzik 23926521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR 23931333e194SRobert Hancock if (ehc->i.serror) 2394a9a79dfeSJoe Perches ata_link_err(link, 23951333e194SRobert Hancock "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n", 23961333e194SRobert Hancock ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "", 23971333e194SRobert Hancock ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "", 23981333e194SRobert Hancock ehc->i.serror & SERR_DATA ? "UnrecovData " : "", 23991333e194SRobert Hancock ehc->i.serror & SERR_PERSISTENT ? "Persist " : "", 24001333e194SRobert Hancock ehc->i.serror & SERR_PROTOCOL ? "Proto " : "", 24011333e194SRobert Hancock ehc->i.serror & SERR_INTERNAL ? "HostInt " : "", 24021333e194SRobert Hancock ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "", 24031333e194SRobert Hancock ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "", 24041333e194SRobert Hancock ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "", 24051333e194SRobert Hancock ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "", 24061333e194SRobert Hancock ehc->i.serror & SERR_DISPARITY ? "Dispar " : "", 24071333e194SRobert Hancock ehc->i.serror & SERR_CRC ? "BadCRC " : "", 24081333e194SRobert Hancock ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "", 24091333e194SRobert Hancock ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "", 24101333e194SRobert Hancock ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "", 24111333e194SRobert Hancock ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "", 24121333e194SRobert Hancock ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : ""); 24136521148cSRobert Hancock #endif 24141333e194SRobert Hancock 2415c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2416c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 24178a937581STejun Heo struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; 2418abb6a889STejun Heo const u8 *cdb = qc->cdb; 2419abb6a889STejun Heo char data_buf[20] = ""; 2420abb6a889STejun Heo char cdb_buf[70] = ""; 2421c6fd2807SJeff Garzik 24220260731fSTejun Heo if (!(qc->flags & ATA_QCFLAG_FAILED) || 2423b1c72916STejun Heo ata_dev_phys_link(qc->dev) != link || !qc->err_mask) 2424c6fd2807SJeff Garzik continue; 2425c6fd2807SJeff Garzik 2426abb6a889STejun Heo if (qc->dma_dir != DMA_NONE) { 2427abb6a889STejun Heo static const char *dma_str[] = { 2428abb6a889STejun Heo [DMA_BIDIRECTIONAL] = "bidi", 2429abb6a889STejun Heo [DMA_TO_DEVICE] = "out", 2430abb6a889STejun Heo [DMA_FROM_DEVICE] = "in", 2431abb6a889STejun Heo }; 2432abb6a889STejun Heo static const char *prot_str[] = { 2433abb6a889STejun Heo [ATA_PROT_PIO] = "pio", 2434abb6a889STejun Heo [ATA_PROT_DMA] = "dma", 2435abb6a889STejun Heo [ATA_PROT_NCQ] = "ncq", 24360dc36888STejun Heo [ATAPI_PROT_PIO] = "pio", 24370dc36888STejun Heo [ATAPI_PROT_DMA] = "dma", 2438abb6a889STejun Heo }; 2439abb6a889STejun Heo 2440abb6a889STejun Heo snprintf(data_buf, sizeof(data_buf), " %s %u %s", 2441abb6a889STejun Heo prot_str[qc->tf.protocol], qc->nbytes, 2442abb6a889STejun Heo dma_str[qc->dma_dir]); 2443abb6a889STejun Heo } 2444abb6a889STejun Heo 24456521148cSRobert Hancock if (ata_is_atapi(qc->tf.protocol)) { 24466521148cSRobert Hancock if (qc->scsicmd) 24476521148cSRobert Hancock scsi_print_command(qc->scsicmd); 24486521148cSRobert Hancock else 2449abb6a889STejun Heo snprintf(cdb_buf, sizeof(cdb_buf), 2450abb6a889STejun Heo "cdb %02x %02x %02x %02x %02x %02x %02x %02x " 2451abb6a889STejun Heo "%02x %02x %02x %02x %02x %02x %02x %02x\n ", 2452abb6a889STejun Heo cdb[0], cdb[1], cdb[2], cdb[3], 2453abb6a889STejun Heo cdb[4], cdb[5], cdb[6], cdb[7], 2454abb6a889STejun Heo cdb[8], cdb[9], cdb[10], cdb[11], 2455abb6a889STejun Heo cdb[12], cdb[13], cdb[14], cdb[15]); 24566521148cSRobert Hancock } else { 24576521148cSRobert Hancock const char *descr = ata_get_cmd_descript(cmd->command); 24586521148cSRobert Hancock if (descr) 2459a9a79dfeSJoe Perches ata_dev_err(qc->dev, "failed command: %s\n", 2460a9a79dfeSJoe Perches descr); 24616521148cSRobert Hancock } 2462abb6a889STejun Heo 2463a9a79dfeSJoe Perches ata_dev_err(qc->dev, 24648a937581STejun Heo "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " 2465abb6a889STejun Heo "tag %d%s\n %s" 24668a937581STejun Heo "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " 24675335b729STejun Heo "Emask 0x%x (%s)%s\n", 24688a937581STejun Heo cmd->command, cmd->feature, cmd->nsect, 24698a937581STejun Heo cmd->lbal, cmd->lbam, cmd->lbah, 24708a937581STejun Heo cmd->hob_feature, cmd->hob_nsect, 24718a937581STejun Heo cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah, 2472abb6a889STejun Heo cmd->device, qc->tag, data_buf, cdb_buf, 24738a937581STejun Heo res->command, res->feature, res->nsect, 24748a937581STejun Heo res->lbal, res->lbam, res->lbah, 24758a937581STejun Heo res->hob_feature, res->hob_nsect, 24768a937581STejun Heo res->hob_lbal, res->hob_lbam, res->hob_lbah, 24775335b729STejun Heo res->device, qc->err_mask, ata_err_string(qc->err_mask), 24785335b729STejun Heo qc->err_mask & AC_ERR_NCQ ? " <F>" : ""); 24791333e194SRobert Hancock 24806521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR 24811333e194SRobert Hancock if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | 24821333e194SRobert Hancock ATA_ERR)) { 24831333e194SRobert Hancock if (res->command & ATA_BUSY) 2484a9a79dfeSJoe Perches ata_dev_err(qc->dev, "status: { Busy }\n"); 24851333e194SRobert Hancock else 2486a9a79dfeSJoe Perches ata_dev_err(qc->dev, "status: { %s%s%s%s}\n", 24871333e194SRobert Hancock res->command & ATA_DRDY ? "DRDY " : "", 24881333e194SRobert Hancock res->command & ATA_DF ? "DF " : "", 24891333e194SRobert Hancock res->command & ATA_DRQ ? "DRQ " : "", 24901333e194SRobert Hancock res->command & ATA_ERR ? "ERR " : ""); 24911333e194SRobert Hancock } 24921333e194SRobert Hancock 24931333e194SRobert Hancock if (cmd->command != ATA_CMD_PACKET && 24941333e194SRobert Hancock (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF | 24951333e194SRobert Hancock ATA_ABORTED))) 2496a9a79dfeSJoe Perches ata_dev_err(qc->dev, "error: { %s%s%s%s}\n", 24971333e194SRobert Hancock res->feature & ATA_ICRC ? "ICRC " : "", 24981333e194SRobert Hancock res->feature & ATA_UNC ? "UNC " : "", 24991333e194SRobert Hancock res->feature & ATA_IDNF ? "IDNF " : "", 25001333e194SRobert Hancock res->feature & ATA_ABORTED ? "ABRT " : ""); 25016521148cSRobert Hancock #endif 2502c6fd2807SJeff Garzik } 2503c6fd2807SJeff Garzik } 2504c6fd2807SJeff Garzik 25059b1e2658STejun Heo /** 25069b1e2658STejun Heo * ata_eh_report - report error handling to user 25079b1e2658STejun Heo * @ap: ATA port to report EH about 25089b1e2658STejun Heo * 25099b1e2658STejun Heo * Report EH to user. 25109b1e2658STejun Heo * 25119b1e2658STejun Heo * LOCKING: 25129b1e2658STejun Heo * None. 25139b1e2658STejun Heo */ 2514fb7fd614STejun Heo void ata_eh_report(struct ata_port *ap) 25159b1e2658STejun Heo { 25169b1e2658STejun Heo struct ata_link *link; 25179b1e2658STejun Heo 25181eca4365STejun Heo ata_for_each_link(link, ap, HOST_FIRST) 25199b1e2658STejun Heo ata_eh_link_report(link); 25209b1e2658STejun Heo } 25219b1e2658STejun Heo 2522cc0680a5STejun Heo static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset, 2523b1c72916STejun Heo unsigned int *classes, unsigned long deadline, 2524b1c72916STejun Heo bool clear_classes) 2525c6fd2807SJeff Garzik { 2526f58229f8STejun Heo struct ata_device *dev; 2527c6fd2807SJeff Garzik 2528b1c72916STejun Heo if (clear_classes) 25291eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 2530f58229f8STejun Heo classes[dev->devno] = ATA_DEV_UNKNOWN; 2531c6fd2807SJeff Garzik 2532f046519fSTejun Heo return reset(link, classes, deadline); 2533c6fd2807SJeff Garzik } 2534c6fd2807SJeff Garzik 2535*e8411fbaSSergei Shtylyov static int ata_eh_followup_srst_needed(struct ata_link *link, int rc) 2536c6fd2807SJeff Garzik { 253745db2f6cSTejun Heo if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link)) 2538ae791c05STejun Heo return 0; 25395dbfc9cbSTejun Heo if (rc == -EAGAIN) 2540c6fd2807SJeff Garzik return 1; 2541071f44b1STejun Heo if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) 25423495de73STejun Heo return 1; 2543c6fd2807SJeff Garzik return 0; 2544c6fd2807SJeff Garzik } 2545c6fd2807SJeff Garzik 2546fb7fd614STejun Heo int ata_eh_reset(struct ata_link *link, int classify, 2547c6fd2807SJeff Garzik ata_prereset_fn_t prereset, ata_reset_fn_t softreset, 2548c6fd2807SJeff Garzik ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) 2549c6fd2807SJeff Garzik { 2550afaa5c37STejun Heo struct ata_port *ap = link->ap; 2551b1c72916STejun Heo struct ata_link *slave = ap->slave_link; 2552936fd732STejun Heo struct ata_eh_context *ehc = &link->eh_context; 2553705d2014SBartlomiej Zolnierkiewicz struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL; 2554c6fd2807SJeff Garzik unsigned int *classes = ehc->classes; 2555416dc9edSTejun Heo unsigned int lflags = link->flags; 2556c6fd2807SJeff Garzik int verbose = !(ehc->i.flags & ATA_EHI_QUIET); 2557d8af0eb6STejun Heo int max_tries = 0, try = 0; 2558b1c72916STejun Heo struct ata_link *failed_link; 2559f58229f8STejun Heo struct ata_device *dev; 2560416dc9edSTejun Heo unsigned long deadline, now; 2561c6fd2807SJeff Garzik ata_reset_fn_t reset; 2562afaa5c37STejun Heo unsigned long flags; 2563416dc9edSTejun Heo u32 sstatus; 2564b1c72916STejun Heo int nr_unknown, rc; 2565c6fd2807SJeff Garzik 2566932648b0STejun Heo /* 2567932648b0STejun Heo * Prepare to reset 2568932648b0STejun Heo */ 2569d8af0eb6STejun Heo while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX) 2570d8af0eb6STejun Heo max_tries++; 257105944bdfSTejun Heo if (link->flags & ATA_LFLAG_NO_HRST) 257205944bdfSTejun Heo hardreset = NULL; 257305944bdfSTejun Heo if (link->flags & ATA_LFLAG_NO_SRST) 257405944bdfSTejun Heo softreset = NULL; 2575d8af0eb6STejun Heo 257625985edcSLucas De Marchi /* make sure each reset attempt is at least COOL_DOWN apart */ 257719b72321STejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) { 25780a2c0f56STejun Heo now = jiffies; 257919b72321STejun Heo WARN_ON(time_after(ehc->last_reset, now)); 258019b72321STejun Heo deadline = ata_deadline(ehc->last_reset, 258119b72321STejun Heo ATA_EH_RESET_COOL_DOWN); 25820a2c0f56STejun Heo if (time_before(now, deadline)) 25830a2c0f56STejun Heo schedule_timeout_uninterruptible(deadline - now); 258419b72321STejun Heo } 25850a2c0f56STejun Heo 2586afaa5c37STejun Heo spin_lock_irqsave(ap->lock, flags); 2587afaa5c37STejun Heo ap->pflags |= ATA_PFLAG_RESETTING; 2588afaa5c37STejun Heo spin_unlock_irqrestore(ap->lock, flags); 2589afaa5c37STejun Heo 2590cf480626STejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2591c6fd2807SJeff Garzik 25921eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 2593cdeab114STejun Heo /* If we issue an SRST then an ATA drive (not ATAPI) 2594cdeab114STejun Heo * may change configuration and be in PIO0 timing. If 2595cdeab114STejun Heo * we do a hard reset (or are coming from power on) 2596cdeab114STejun Heo * this is true for ATA or ATAPI. Until we've set a 2597cdeab114STejun Heo * suitable controller mode we should not touch the 2598cdeab114STejun Heo * bus as we may be talking too fast. 2599cdeab114STejun Heo */ 2600cdeab114STejun Heo dev->pio_mode = XFER_PIO_0; 2601cdeab114STejun Heo 2602cdeab114STejun Heo /* If the controller has a pio mode setup function 2603cdeab114STejun Heo * then use it to set the chipset to rights. Don't 2604cdeab114STejun Heo * touch the DMA setup as that will be dealt with when 2605cdeab114STejun Heo * configuring devices. 2606cdeab114STejun Heo */ 2607cdeab114STejun Heo if (ap->ops->set_piomode) 2608cdeab114STejun Heo ap->ops->set_piomode(ap, dev); 2609cdeab114STejun Heo } 2610cdeab114STejun Heo 2611cf480626STejun Heo /* prefer hardreset */ 2612932648b0STejun Heo reset = NULL; 2613cf480626STejun Heo ehc->i.action &= ~ATA_EH_RESET; 2614cf480626STejun Heo if (hardreset) { 2615cf480626STejun Heo reset = hardreset; 2616a674050eSTejun Heo ehc->i.action |= ATA_EH_HARDRESET; 26174f7faa3fSTejun Heo } else if (softreset) { 2618cf480626STejun Heo reset = softreset; 2619a674050eSTejun Heo ehc->i.action |= ATA_EH_SOFTRESET; 2620cf480626STejun Heo } 2621c6fd2807SJeff Garzik 2622c6fd2807SJeff Garzik if (prereset) { 2623b1c72916STejun Heo unsigned long deadline = ata_deadline(jiffies, 2624b1c72916STejun Heo ATA_EH_PRERESET_TIMEOUT); 2625b1c72916STejun Heo 2626b1c72916STejun Heo if (slave) { 2627b1c72916STejun Heo sehc->i.action &= ~ATA_EH_RESET; 2628b1c72916STejun Heo sehc->i.action |= ehc->i.action; 2629b1c72916STejun Heo } 2630b1c72916STejun Heo 2631b1c72916STejun Heo rc = prereset(link, deadline); 2632b1c72916STejun Heo 2633b1c72916STejun Heo /* If present, do prereset on slave link too. Reset 2634b1c72916STejun Heo * is skipped iff both master and slave links report 2635b1c72916STejun Heo * -ENOENT or clear ATA_EH_RESET. 2636b1c72916STejun Heo */ 2637b1c72916STejun Heo if (slave && (rc == 0 || rc == -ENOENT)) { 2638b1c72916STejun Heo int tmp; 2639b1c72916STejun Heo 2640b1c72916STejun Heo tmp = prereset(slave, deadline); 2641b1c72916STejun Heo if (tmp != -ENOENT) 2642b1c72916STejun Heo rc = tmp; 2643b1c72916STejun Heo 2644b1c72916STejun Heo ehc->i.action |= sehc->i.action; 2645b1c72916STejun Heo } 2646b1c72916STejun Heo 2647c6fd2807SJeff Garzik if (rc) { 2648c961922bSAlan Cox if (rc == -ENOENT) { 2649a9a79dfeSJoe Perches ata_link_dbg(link, "port disabled--ignoring\n"); 2650cf480626STejun Heo ehc->i.action &= ~ATA_EH_RESET; 26514aa9ab67STejun Heo 26521eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 2653f58229f8STejun Heo classes[dev->devno] = ATA_DEV_NONE; 26544aa9ab67STejun Heo 26554aa9ab67STejun Heo rc = 0; 2656c961922bSAlan Cox } else 2657a9a79dfeSJoe Perches ata_link_err(link, 2658a9a79dfeSJoe Perches "prereset failed (errno=%d)\n", 2659a9a79dfeSJoe Perches rc); 2660fccb6ea5STejun Heo goto out; 2661c6fd2807SJeff Garzik } 2662c6fd2807SJeff Garzik 2663932648b0STejun Heo /* prereset() might have cleared ATA_EH_RESET. If so, 2664d6515e6fSTejun Heo * bang classes, thaw and return. 2665932648b0STejun Heo */ 2666932648b0STejun Heo if (reset && !(ehc->i.action & ATA_EH_RESET)) { 26671eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 2668f58229f8STejun Heo classes[dev->devno] = ATA_DEV_NONE; 2669d6515e6fSTejun Heo if ((ap->pflags & ATA_PFLAG_FROZEN) && 2670d6515e6fSTejun Heo ata_is_host_link(link)) 2671d6515e6fSTejun Heo ata_eh_thaw_port(ap); 2672fccb6ea5STejun Heo rc = 0; 2673fccb6ea5STejun Heo goto out; 2674c6fd2807SJeff Garzik } 2675932648b0STejun Heo } 2676c6fd2807SJeff Garzik 2677c6fd2807SJeff Garzik retry: 2678932648b0STejun Heo /* 2679932648b0STejun Heo * Perform reset 2680932648b0STejun Heo */ 2681dc98c32cSTejun Heo if (ata_is_host_link(link)) 2682dc98c32cSTejun Heo ata_eh_freeze_port(ap); 2683dc98c32cSTejun Heo 2684341c2c95STejun Heo deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]); 268531daabdaSTejun Heo 2686932648b0STejun Heo if (reset) { 2687c6fd2807SJeff Garzik if (verbose) 2688a9a79dfeSJoe Perches ata_link_info(link, "%s resetting link\n", 2689c6fd2807SJeff Garzik reset == softreset ? "soft" : "hard"); 2690c6fd2807SJeff Garzik 2691c6fd2807SJeff Garzik /* mark that this EH session started with reset */ 269219b72321STejun Heo ehc->last_reset = jiffies; 26930d64a233STejun Heo if (reset == hardreset) 26940d64a233STejun Heo ehc->i.flags |= ATA_EHI_DID_HARDRESET; 26950d64a233STejun Heo else 26960d64a233STejun Heo ehc->i.flags |= ATA_EHI_DID_SOFTRESET; 2697c6fd2807SJeff Garzik 2698b1c72916STejun Heo rc = ata_do_reset(link, reset, classes, deadline, true); 2699b1c72916STejun Heo if (rc && rc != -EAGAIN) { 2700b1c72916STejun Heo failed_link = link; 27015dbfc9cbSTejun Heo goto fail; 2702b1c72916STejun Heo } 2703c6fd2807SJeff Garzik 2704b1c72916STejun Heo /* hardreset slave link if existent */ 2705b1c72916STejun Heo if (slave && reset == hardreset) { 2706b1c72916STejun Heo int tmp; 2707b1c72916STejun Heo 2708b1c72916STejun Heo if (verbose) 2709a9a79dfeSJoe Perches ata_link_info(slave, "hard resetting link\n"); 2710b1c72916STejun Heo 2711b1c72916STejun Heo ata_eh_about_to_do(slave, NULL, ATA_EH_RESET); 2712b1c72916STejun Heo tmp = ata_do_reset(slave, reset, classes, deadline, 2713b1c72916STejun Heo false); 2714b1c72916STejun Heo switch (tmp) { 2715b1c72916STejun Heo case -EAGAIN: 2716b1c72916STejun Heo rc = -EAGAIN; 2717b1c72916STejun Heo case 0: 2718b1c72916STejun Heo break; 2719b1c72916STejun Heo default: 2720b1c72916STejun Heo failed_link = slave; 2721b1c72916STejun Heo rc = tmp; 2722b1c72916STejun Heo goto fail; 2723b1c72916STejun Heo } 2724b1c72916STejun Heo } 2725b1c72916STejun Heo 2726b1c72916STejun Heo /* perform follow-up SRST if necessary */ 2727c6fd2807SJeff Garzik if (reset == hardreset && 2728*e8411fbaSSergei Shtylyov ata_eh_followup_srst_needed(link, rc)) { 2729c6fd2807SJeff Garzik reset = softreset; 2730c6fd2807SJeff Garzik 2731c6fd2807SJeff Garzik if (!reset) { 2732a9a79dfeSJoe Perches ata_link_err(link, 2733a9a79dfeSJoe Perches "follow-up softreset required but no softreset available\n"); 2734b1c72916STejun Heo failed_link = link; 2735fccb6ea5STejun Heo rc = -EINVAL; 273608cf69d0STejun Heo goto fail; 2737c6fd2807SJeff Garzik } 2738c6fd2807SJeff Garzik 2739cf480626STejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2740b1c72916STejun Heo rc = ata_do_reset(link, reset, classes, deadline, true); 2741fe2c4d01STejun Heo if (rc) { 2742fe2c4d01STejun Heo failed_link = link; 2743fe2c4d01STejun Heo goto fail; 2744fe2c4d01STejun Heo } 2745c6fd2807SJeff Garzik } 2746932648b0STejun Heo } else { 2747932648b0STejun Heo if (verbose) 2748a9a79dfeSJoe Perches ata_link_info(link, 2749a9a79dfeSJoe Perches "no reset method available, skipping reset\n"); 2750932648b0STejun Heo if (!(lflags & ATA_LFLAG_ASSUME_CLASS)) 2751932648b0STejun Heo lflags |= ATA_LFLAG_ASSUME_ATA; 2752932648b0STejun Heo } 2753008a7896STejun Heo 2754932648b0STejun Heo /* 2755932648b0STejun Heo * Post-reset processing 2756932648b0STejun Heo */ 27571eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 2758416dc9edSTejun Heo /* After the reset, the device state is PIO 0 and the 2759416dc9edSTejun Heo * controller state is undefined. Reset also wakes up 2760416dc9edSTejun Heo * drives from sleeping mode. 2761c6fd2807SJeff Garzik */ 2762f58229f8STejun Heo dev->pio_mode = XFER_PIO_0; 2763054a5fbaSTejun Heo dev->flags &= ~ATA_DFLAG_SLEEPING; 2764c6fd2807SJeff Garzik 27653b761d3dSTejun Heo if (ata_phys_link_offline(ata_dev_phys_link(dev))) 27663b761d3dSTejun Heo continue; 27673b761d3dSTejun Heo 27684ccd3329STejun Heo /* apply class override */ 2769416dc9edSTejun Heo if (lflags & ATA_LFLAG_ASSUME_ATA) 2770ae791c05STejun Heo classes[dev->devno] = ATA_DEV_ATA; 2771416dc9edSTejun Heo else if (lflags & ATA_LFLAG_ASSUME_SEMB) 2772816ab897STejun Heo classes[dev->devno] = ATA_DEV_SEMB_UNSUP; 2773ae791c05STejun Heo } 2774ae791c05STejun Heo 2775008a7896STejun Heo /* record current link speed */ 2776936fd732STejun Heo if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0) 2777936fd732STejun Heo link->sata_spd = (sstatus >> 4) & 0xf; 2778b1c72916STejun Heo if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0) 2779b1c72916STejun Heo slave->sata_spd = (sstatus >> 4) & 0xf; 2780008a7896STejun Heo 2781dc98c32cSTejun Heo /* thaw the port */ 2782dc98c32cSTejun Heo if (ata_is_host_link(link)) 2783dc98c32cSTejun Heo ata_eh_thaw_port(ap); 2784dc98c32cSTejun Heo 2785f046519fSTejun Heo /* postreset() should clear hardware SError. Although SError 2786f046519fSTejun Heo * is cleared during link resume, clearing SError here is 2787f046519fSTejun Heo * necessary as some PHYs raise hotplug events after SRST. 2788f046519fSTejun Heo * This introduces race condition where hotplug occurs between 2789f046519fSTejun Heo * reset and here. This race is mediated by cross checking 2790f046519fSTejun Heo * link onlineness and classification result later. 2791f046519fSTejun Heo */ 2792b1c72916STejun Heo if (postreset) { 2793cc0680a5STejun Heo postreset(link, classes); 2794b1c72916STejun Heo if (slave) 2795b1c72916STejun Heo postreset(slave, classes); 2796b1c72916STejun Heo } 2797c6fd2807SJeff Garzik 27981e641060STejun Heo /* 27998c56caccSTejun Heo * Some controllers can't be frozen very well and may set spurious 28008c56caccSTejun Heo * error conditions during reset. Clear accumulated error 28018c56caccSTejun Heo * information and re-thaw the port if frozen. As reset is the 28028c56caccSTejun Heo * final recovery action and we cross check link onlineness against 28038c56caccSTejun Heo * device classification later, no hotplug event is lost by this. 28041e641060STejun Heo */ 2805f046519fSTejun Heo spin_lock_irqsave(link->ap->lock, flags); 28061e641060STejun Heo memset(&link->eh_info, 0, sizeof(link->eh_info)); 2807b1c72916STejun Heo if (slave) 28081e641060STejun Heo memset(&slave->eh_info, 0, sizeof(link->eh_info)); 28091e641060STejun Heo ap->pflags &= ~ATA_PFLAG_EH_PENDING; 2810f046519fSTejun Heo spin_unlock_irqrestore(link->ap->lock, flags); 2811f046519fSTejun Heo 28128c56caccSTejun Heo if (ap->pflags & ATA_PFLAG_FROZEN) 28138c56caccSTejun Heo ata_eh_thaw_port(ap); 28148c56caccSTejun Heo 28153b761d3dSTejun Heo /* 28163b761d3dSTejun Heo * Make sure onlineness and classification result correspond. 2817f046519fSTejun Heo * Hotplug could have happened during reset and some 2818f046519fSTejun Heo * controllers fail to wait while a drive is spinning up after 2819f046519fSTejun Heo * being hotplugged causing misdetection. By cross checking 28203b761d3dSTejun Heo * link on/offlineness and classification result, those 28213b761d3dSTejun Heo * conditions can be reliably detected and retried. 2822f046519fSTejun Heo */ 2823b1c72916STejun Heo nr_unknown = 0; 28241eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 28253b761d3dSTejun Heo if (ata_phys_link_online(ata_dev_phys_link(dev))) { 2826b1c72916STejun Heo if (classes[dev->devno] == ATA_DEV_UNKNOWN) { 2827a9a79dfeSJoe Perches ata_dev_dbg(dev, "link online but device misclassified\n"); 2828f046519fSTejun Heo classes[dev->devno] = ATA_DEV_NONE; 2829b1c72916STejun Heo nr_unknown++; 2830b1c72916STejun Heo } 28313b761d3dSTejun Heo } else if (ata_phys_link_offline(ata_dev_phys_link(dev))) { 28323b761d3dSTejun Heo if (ata_class_enabled(classes[dev->devno])) 2833a9a79dfeSJoe Perches ata_dev_dbg(dev, 2834a9a79dfeSJoe Perches "link offline, clearing class %d to NONE\n", 28353b761d3dSTejun Heo classes[dev->devno]); 28363b761d3dSTejun Heo classes[dev->devno] = ATA_DEV_NONE; 28373b761d3dSTejun Heo } else if (classes[dev->devno] == ATA_DEV_UNKNOWN) { 2838a9a79dfeSJoe Perches ata_dev_dbg(dev, 2839a9a79dfeSJoe Perches "link status unknown, clearing UNKNOWN to NONE\n"); 28403b761d3dSTejun Heo classes[dev->devno] = ATA_DEV_NONE; 28413b761d3dSTejun Heo } 2842f046519fSTejun Heo } 2843f046519fSTejun Heo 2844b1c72916STejun Heo if (classify && nr_unknown) { 2845f046519fSTejun Heo if (try < max_tries) { 2846a9a79dfeSJoe Perches ata_link_warn(link, 2847a9a79dfeSJoe Perches "link online but %d devices misclassified, retrying\n", 28483b761d3dSTejun Heo nr_unknown); 2849b1c72916STejun Heo failed_link = link; 2850f046519fSTejun Heo rc = -EAGAIN; 2851f046519fSTejun Heo goto fail; 2852f046519fSTejun Heo } 2853a9a79dfeSJoe Perches ata_link_warn(link, 28543b761d3dSTejun Heo "link online but %d devices misclassified, " 28553b761d3dSTejun Heo "device detection might fail\n", nr_unknown); 2856f046519fSTejun Heo } 2857f046519fSTejun Heo 2858c6fd2807SJeff Garzik /* reset successful, schedule revalidation */ 2859cf480626STejun Heo ata_eh_done(link, NULL, ATA_EH_RESET); 2860b1c72916STejun Heo if (slave) 2861b1c72916STejun Heo ata_eh_done(slave, NULL, ATA_EH_RESET); 286219b72321STejun Heo ehc->last_reset = jiffies; /* update to completion time */ 2863c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_REVALIDATE; 28646b7ae954STejun Heo link->lpm_policy = ATA_LPM_UNKNOWN; /* reset LPM state */ 2865416dc9edSTejun Heo 2866416dc9edSTejun Heo rc = 0; 2867fccb6ea5STejun Heo out: 2868fccb6ea5STejun Heo /* clear hotplug flag */ 2869fccb6ea5STejun Heo ehc->i.flags &= ~ATA_EHI_HOTPLUGGED; 2870b1c72916STejun Heo if (slave) 2871b1c72916STejun Heo sehc->i.flags &= ~ATA_EHI_HOTPLUGGED; 2872afaa5c37STejun Heo 2873afaa5c37STejun Heo spin_lock_irqsave(ap->lock, flags); 2874afaa5c37STejun Heo ap->pflags &= ~ATA_PFLAG_RESETTING; 2875afaa5c37STejun Heo spin_unlock_irqrestore(ap->lock, flags); 2876afaa5c37STejun Heo 2877c6fd2807SJeff Garzik return rc; 2878416dc9edSTejun Heo 2879416dc9edSTejun Heo fail: 28805958e302STejun Heo /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */ 28815958e302STejun Heo if (!ata_is_host_link(link) && 28825958e302STejun Heo sata_scr_read(link, SCR_STATUS, &sstatus)) 28835958e302STejun Heo rc = -ERESTART; 28845958e302STejun Heo 28858ea7645cSTejun Heo if (rc == -ERESTART || try >= max_tries) { 28868ea7645cSTejun Heo /* 28878ea7645cSTejun Heo * Thaw host port even if reset failed, so that the port 28888ea7645cSTejun Heo * can be retried on the next phy event. This risks 28898ea7645cSTejun Heo * repeated EH runs but seems to be a better tradeoff than 28908ea7645cSTejun Heo * shutting down a port after a botched hotplug attempt. 28918ea7645cSTejun Heo */ 28928ea7645cSTejun Heo if (ata_is_host_link(link)) 28938ea7645cSTejun Heo ata_eh_thaw_port(ap); 2894416dc9edSTejun Heo goto out; 28958ea7645cSTejun Heo } 2896416dc9edSTejun Heo 2897416dc9edSTejun Heo now = jiffies; 2898416dc9edSTejun Heo if (time_before(now, deadline)) { 2899416dc9edSTejun Heo unsigned long delta = deadline - now; 2900416dc9edSTejun Heo 2901a9a79dfeSJoe Perches ata_link_warn(failed_link, 29020a2c0f56STejun Heo "reset failed (errno=%d), retrying in %u secs\n", 29030a2c0f56STejun Heo rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000)); 2904416dc9edSTejun Heo 2905c0c362b6STejun Heo ata_eh_release(ap); 2906416dc9edSTejun Heo while (delta) 2907416dc9edSTejun Heo delta = schedule_timeout_uninterruptible(delta); 2908c0c362b6STejun Heo ata_eh_acquire(ap); 2909416dc9edSTejun Heo } 2910416dc9edSTejun Heo 2911b1c72916STejun Heo if (try == max_tries - 1) { 2912a07d499bSTejun Heo sata_down_spd_limit(link, 0); 2913b1c72916STejun Heo if (slave) 2914a07d499bSTejun Heo sata_down_spd_limit(slave, 0); 2915b1c72916STejun Heo } else if (rc == -EPIPE) 2916a07d499bSTejun Heo sata_down_spd_limit(failed_link, 0); 2917b1c72916STejun Heo 2918416dc9edSTejun Heo if (hardreset) 2919416dc9edSTejun Heo reset = hardreset; 2920416dc9edSTejun Heo goto retry; 2921c6fd2807SJeff Garzik } 2922c6fd2807SJeff Garzik 292345fabbb7SElias Oltmanns static inline void ata_eh_pull_park_action(struct ata_port *ap) 292445fabbb7SElias Oltmanns { 292545fabbb7SElias Oltmanns struct ata_link *link; 292645fabbb7SElias Oltmanns struct ata_device *dev; 292745fabbb7SElias Oltmanns unsigned long flags; 292845fabbb7SElias Oltmanns 292945fabbb7SElias Oltmanns /* 293045fabbb7SElias Oltmanns * This function can be thought of as an extended version of 293145fabbb7SElias Oltmanns * ata_eh_about_to_do() specially crafted to accommodate the 293245fabbb7SElias Oltmanns * requirements of ATA_EH_PARK handling. Since the EH thread 293345fabbb7SElias Oltmanns * does not leave the do {} while () loop in ata_eh_recover as 293445fabbb7SElias Oltmanns * long as the timeout for a park request to *one* device on 293545fabbb7SElias Oltmanns * the port has not expired, and since we still want to pick 293645fabbb7SElias Oltmanns * up park requests to other devices on the same port or 293745fabbb7SElias Oltmanns * timeout updates for the same device, we have to pull 293845fabbb7SElias Oltmanns * ATA_EH_PARK actions from eh_info into eh_context.i 293945fabbb7SElias Oltmanns * ourselves at the beginning of each pass over the loop. 294045fabbb7SElias Oltmanns * 294145fabbb7SElias Oltmanns * Additionally, all write accesses to &ap->park_req_pending 294245fabbb7SElias Oltmanns * through INIT_COMPLETION() (see below) or complete_all() 294345fabbb7SElias Oltmanns * (see ata_scsi_park_store()) are protected by the host lock. 294445fabbb7SElias Oltmanns * As a result we have that park_req_pending.done is zero on 294545fabbb7SElias Oltmanns * exit from this function, i.e. when ATA_EH_PARK actions for 294645fabbb7SElias Oltmanns * *all* devices on port ap have been pulled into the 294745fabbb7SElias Oltmanns * respective eh_context structs. If, and only if, 294845fabbb7SElias Oltmanns * park_req_pending.done is non-zero by the time we reach 294945fabbb7SElias Oltmanns * wait_for_completion_timeout(), another ATA_EH_PARK action 295045fabbb7SElias Oltmanns * has been scheduled for at least one of the devices on port 295145fabbb7SElias Oltmanns * ap and we have to cycle over the do {} while () loop in 295245fabbb7SElias Oltmanns * ata_eh_recover() again. 295345fabbb7SElias Oltmanns */ 295445fabbb7SElias Oltmanns 295545fabbb7SElias Oltmanns spin_lock_irqsave(ap->lock, flags); 295645fabbb7SElias Oltmanns INIT_COMPLETION(ap->park_req_pending); 29571eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 29581eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 295945fabbb7SElias Oltmanns struct ata_eh_info *ehi = &link->eh_info; 296045fabbb7SElias Oltmanns 296145fabbb7SElias Oltmanns link->eh_context.i.dev_action[dev->devno] |= 296245fabbb7SElias Oltmanns ehi->dev_action[dev->devno] & ATA_EH_PARK; 296345fabbb7SElias Oltmanns ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK); 296445fabbb7SElias Oltmanns } 296545fabbb7SElias Oltmanns } 296645fabbb7SElias Oltmanns spin_unlock_irqrestore(ap->lock, flags); 296745fabbb7SElias Oltmanns } 296845fabbb7SElias Oltmanns 296945fabbb7SElias Oltmanns static void ata_eh_park_issue_cmd(struct ata_device *dev, int park) 297045fabbb7SElias Oltmanns { 297145fabbb7SElias Oltmanns struct ata_eh_context *ehc = &dev->link->eh_context; 297245fabbb7SElias Oltmanns struct ata_taskfile tf; 297345fabbb7SElias Oltmanns unsigned int err_mask; 297445fabbb7SElias Oltmanns 297545fabbb7SElias Oltmanns ata_tf_init(dev, &tf); 297645fabbb7SElias Oltmanns if (park) { 297745fabbb7SElias Oltmanns ehc->unloaded_mask |= 1 << dev->devno; 297845fabbb7SElias Oltmanns tf.command = ATA_CMD_IDLEIMMEDIATE; 297945fabbb7SElias Oltmanns tf.feature = 0x44; 298045fabbb7SElias Oltmanns tf.lbal = 0x4c; 298145fabbb7SElias Oltmanns tf.lbam = 0x4e; 298245fabbb7SElias Oltmanns tf.lbah = 0x55; 298345fabbb7SElias Oltmanns } else { 298445fabbb7SElias Oltmanns ehc->unloaded_mask &= ~(1 << dev->devno); 298545fabbb7SElias Oltmanns tf.command = ATA_CMD_CHK_POWER; 298645fabbb7SElias Oltmanns } 298745fabbb7SElias Oltmanns 298845fabbb7SElias Oltmanns tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 298945fabbb7SElias Oltmanns tf.protocol |= ATA_PROT_NODATA; 299045fabbb7SElias Oltmanns err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 299145fabbb7SElias Oltmanns if (park && (err_mask || tf.lbal != 0xc4)) { 2992a9a79dfeSJoe Perches ata_dev_err(dev, "head unload failed!\n"); 299345fabbb7SElias Oltmanns ehc->unloaded_mask &= ~(1 << dev->devno); 299445fabbb7SElias Oltmanns } 299545fabbb7SElias Oltmanns } 299645fabbb7SElias Oltmanns 29970260731fSTejun Heo static int ata_eh_revalidate_and_attach(struct ata_link *link, 2998c6fd2807SJeff Garzik struct ata_device **r_failed_dev) 2999c6fd2807SJeff Garzik { 30000260731fSTejun Heo struct ata_port *ap = link->ap; 30010260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 3002c6fd2807SJeff Garzik struct ata_device *dev; 30038c3c52a8STejun Heo unsigned int new_mask = 0; 3004c6fd2807SJeff Garzik unsigned long flags; 3005f58229f8STejun Heo int rc = 0; 3006c6fd2807SJeff Garzik 3007c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 3008c6fd2807SJeff Garzik 30098c3c52a8STejun Heo /* For PATA drive side cable detection to work, IDENTIFY must 30108c3c52a8STejun Heo * be done backwards such that PDIAG- is released by the slave 30118c3c52a8STejun Heo * device before the master device is identified. 30128c3c52a8STejun Heo */ 30131eca4365STejun Heo ata_for_each_dev(dev, link, ALL_REVERSE) { 3014f58229f8STejun Heo unsigned int action = ata_eh_dev_action(dev); 3015f58229f8STejun Heo unsigned int readid_flags = 0; 3016c6fd2807SJeff Garzik 3017bff04647STejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) 3018bff04647STejun Heo readid_flags |= ATA_READID_POSTRESET; 3019bff04647STejun Heo 30209666f400STejun Heo if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) { 3021633273a3STejun Heo WARN_ON(dev->class == ATA_DEV_PMP); 3022633273a3STejun Heo 3023b1c72916STejun Heo if (ata_phys_link_offline(ata_dev_phys_link(dev))) { 3024c6fd2807SJeff Garzik rc = -EIO; 30258c3c52a8STejun Heo goto err; 3026c6fd2807SJeff Garzik } 3027c6fd2807SJeff Garzik 30280260731fSTejun Heo ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE); 3029422c9daaSTejun Heo rc = ata_dev_revalidate(dev, ehc->classes[dev->devno], 3030422c9daaSTejun Heo readid_flags); 3031c6fd2807SJeff Garzik if (rc) 30328c3c52a8STejun Heo goto err; 3033c6fd2807SJeff Garzik 30340260731fSTejun Heo ata_eh_done(link, dev, ATA_EH_REVALIDATE); 3035c6fd2807SJeff Garzik 3036baa1e78aSTejun Heo /* Configuration may have changed, reconfigure 3037baa1e78aSTejun Heo * transfer mode. 3038baa1e78aSTejun Heo */ 3039baa1e78aSTejun Heo ehc->i.flags |= ATA_EHI_SETMODE; 3040baa1e78aSTejun Heo 3041c6fd2807SJeff Garzik /* schedule the scsi_rescan_device() here */ 3042ad72cf98STejun Heo schedule_work(&(ap->scsi_rescan_task)); 3043c6fd2807SJeff Garzik } else if (dev->class == ATA_DEV_UNKNOWN && 3044c6fd2807SJeff Garzik ehc->tries[dev->devno] && 3045c6fd2807SJeff Garzik ata_class_enabled(ehc->classes[dev->devno])) { 3046842faa6cSTejun Heo /* Temporarily set dev->class, it will be 3047842faa6cSTejun Heo * permanently set once all configurations are 3048842faa6cSTejun Heo * complete. This is necessary because new 3049842faa6cSTejun Heo * device configuration is done in two 3050842faa6cSTejun Heo * separate loops. 3051842faa6cSTejun Heo */ 3052c6fd2807SJeff Garzik dev->class = ehc->classes[dev->devno]; 3053c6fd2807SJeff Garzik 3054633273a3STejun Heo if (dev->class == ATA_DEV_PMP) 3055633273a3STejun Heo rc = sata_pmp_attach(dev); 3056633273a3STejun Heo else 3057633273a3STejun Heo rc = ata_dev_read_id(dev, &dev->class, 3058633273a3STejun Heo readid_flags, dev->id); 3059842faa6cSTejun Heo 3060842faa6cSTejun Heo /* read_id might have changed class, store and reset */ 3061842faa6cSTejun Heo ehc->classes[dev->devno] = dev->class; 3062842faa6cSTejun Heo dev->class = ATA_DEV_UNKNOWN; 3063842faa6cSTejun Heo 30648c3c52a8STejun Heo switch (rc) { 30658c3c52a8STejun Heo case 0: 306699cf610aSTejun Heo /* clear error info accumulated during probe */ 306799cf610aSTejun Heo ata_ering_clear(&dev->ering); 3068f58229f8STejun Heo new_mask |= 1 << dev->devno; 30698c3c52a8STejun Heo break; 30708c3c52a8STejun Heo case -ENOENT: 307155a8e2c8STejun Heo /* IDENTIFY was issued to non-existent 307255a8e2c8STejun Heo * device. No need to reset. Just 3073842faa6cSTejun Heo * thaw and ignore the device. 307455a8e2c8STejun Heo */ 307555a8e2c8STejun Heo ata_eh_thaw_port(ap); 3076c6fd2807SJeff Garzik break; 30778c3c52a8STejun Heo default: 30788c3c52a8STejun Heo goto err; 30798c3c52a8STejun Heo } 30808c3c52a8STejun Heo } 3081c6fd2807SJeff Garzik } 3082c6fd2807SJeff Garzik 3083c1c4e8d5STejun Heo /* PDIAG- should have been released, ask cable type if post-reset */ 308433267325STejun Heo if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) { 308533267325STejun Heo if (ap->ops->cable_detect) 3086c1c4e8d5STejun Heo ap->cbl = ap->ops->cable_detect(ap); 308733267325STejun Heo ata_force_cbl(ap); 308833267325STejun Heo } 3089c1c4e8d5STejun Heo 30908c3c52a8STejun Heo /* Configure new devices forward such that user doesn't see 30918c3c52a8STejun Heo * device detection messages backwards. 30928c3c52a8STejun Heo */ 30931eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 30944f7c2874STejun Heo if (!(new_mask & (1 << dev->devno))) 30958c3c52a8STejun Heo continue; 30968c3c52a8STejun Heo 3097842faa6cSTejun Heo dev->class = ehc->classes[dev->devno]; 3098842faa6cSTejun Heo 30994f7c2874STejun Heo if (dev->class == ATA_DEV_PMP) 31004f7c2874STejun Heo continue; 31014f7c2874STejun Heo 31028c3c52a8STejun Heo ehc->i.flags |= ATA_EHI_PRINTINFO; 31038c3c52a8STejun Heo rc = ata_dev_configure(dev); 31048c3c52a8STejun Heo ehc->i.flags &= ~ATA_EHI_PRINTINFO; 3105842faa6cSTejun Heo if (rc) { 3106842faa6cSTejun Heo dev->class = ATA_DEV_UNKNOWN; 31078c3c52a8STejun Heo goto err; 3108842faa6cSTejun Heo } 31098c3c52a8STejun Heo 3110c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 3111c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 3112c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3113baa1e78aSTejun Heo 311455a8e2c8STejun Heo /* new device discovered, configure xfermode */ 3115baa1e78aSTejun Heo ehc->i.flags |= ATA_EHI_SETMODE; 3116c6fd2807SJeff Garzik } 3117c6fd2807SJeff Garzik 31188c3c52a8STejun Heo return 0; 31198c3c52a8STejun Heo 31208c3c52a8STejun Heo err: 3121c6fd2807SJeff Garzik *r_failed_dev = dev; 31228c3c52a8STejun Heo DPRINTK("EXIT rc=%d\n", rc); 3123c6fd2807SJeff Garzik return rc; 3124c6fd2807SJeff Garzik } 3125c6fd2807SJeff Garzik 31266f1d1e3aSTejun Heo /** 31276f1d1e3aSTejun Heo * ata_set_mode - Program timings and issue SET FEATURES - XFER 31286f1d1e3aSTejun Heo * @link: link on which timings will be programmed 312998a1708dSMartin Olsson * @r_failed_dev: out parameter for failed device 31306f1d1e3aSTejun Heo * 31316f1d1e3aSTejun Heo * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If 31326f1d1e3aSTejun Heo * ata_set_mode() fails, pointer to the failing device is 31336f1d1e3aSTejun Heo * returned in @r_failed_dev. 31346f1d1e3aSTejun Heo * 31356f1d1e3aSTejun Heo * LOCKING: 31366f1d1e3aSTejun Heo * PCI/etc. bus probe sem. 31376f1d1e3aSTejun Heo * 31386f1d1e3aSTejun Heo * RETURNS: 31396f1d1e3aSTejun Heo * 0 on success, negative errno otherwise 31406f1d1e3aSTejun Heo */ 31416f1d1e3aSTejun Heo int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) 31426f1d1e3aSTejun Heo { 31436f1d1e3aSTejun Heo struct ata_port *ap = link->ap; 314400115e0fSTejun Heo struct ata_device *dev; 314500115e0fSTejun Heo int rc; 31466f1d1e3aSTejun Heo 314776326ac1STejun Heo /* if data transfer is verified, clear DUBIOUS_XFER on ering top */ 31481eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) { 314976326ac1STejun Heo if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) { 315076326ac1STejun Heo struct ata_ering_entry *ent; 315176326ac1STejun Heo 315276326ac1STejun Heo ent = ata_ering_top(&dev->ering); 315376326ac1STejun Heo if (ent) 315476326ac1STejun Heo ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER; 315576326ac1STejun Heo } 315676326ac1STejun Heo } 315776326ac1STejun Heo 31586f1d1e3aSTejun Heo /* has private set_mode? */ 31596f1d1e3aSTejun Heo if (ap->ops->set_mode) 316000115e0fSTejun Heo rc = ap->ops->set_mode(link, r_failed_dev); 316100115e0fSTejun Heo else 316200115e0fSTejun Heo rc = ata_do_set_mode(link, r_failed_dev); 316300115e0fSTejun Heo 316400115e0fSTejun Heo /* if transfer mode has changed, set DUBIOUS_XFER on device */ 31651eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) { 316600115e0fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 316700115e0fSTejun Heo u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno]; 316800115e0fSTejun Heo u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno)); 316900115e0fSTejun Heo 317000115e0fSTejun Heo if (dev->xfer_mode != saved_xfer_mode || 317100115e0fSTejun Heo ata_ncq_enabled(dev) != saved_ncq) 317200115e0fSTejun Heo dev->flags |= ATA_DFLAG_DUBIOUS_XFER; 317300115e0fSTejun Heo } 317400115e0fSTejun Heo 317500115e0fSTejun Heo return rc; 31766f1d1e3aSTejun Heo } 31776f1d1e3aSTejun Heo 317811fc33daSTejun Heo /** 317911fc33daSTejun Heo * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset 318011fc33daSTejun Heo * @dev: ATAPI device to clear UA for 318111fc33daSTejun Heo * 318211fc33daSTejun Heo * Resets and other operations can make an ATAPI device raise 318311fc33daSTejun Heo * UNIT ATTENTION which causes the next operation to fail. This 318411fc33daSTejun Heo * function clears UA. 318511fc33daSTejun Heo * 318611fc33daSTejun Heo * LOCKING: 318711fc33daSTejun Heo * EH context (may sleep). 318811fc33daSTejun Heo * 318911fc33daSTejun Heo * RETURNS: 319011fc33daSTejun Heo * 0 on success, -errno on failure. 319111fc33daSTejun Heo */ 319211fc33daSTejun Heo static int atapi_eh_clear_ua(struct ata_device *dev) 319311fc33daSTejun Heo { 319411fc33daSTejun Heo int i; 319511fc33daSTejun Heo 319611fc33daSTejun Heo for (i = 0; i < ATA_EH_UA_TRIES; i++) { 3197b5357081STejun Heo u8 *sense_buffer = dev->link->ap->sector_buf; 319811fc33daSTejun Heo u8 sense_key = 0; 319911fc33daSTejun Heo unsigned int err_mask; 320011fc33daSTejun Heo 320111fc33daSTejun Heo err_mask = atapi_eh_tur(dev, &sense_key); 320211fc33daSTejun Heo if (err_mask != 0 && err_mask != AC_ERR_DEV) { 3203a9a79dfeSJoe Perches ata_dev_warn(dev, 3204a9a79dfeSJoe Perches "TEST_UNIT_READY failed (err_mask=0x%x)\n", 3205a9a79dfeSJoe Perches err_mask); 320611fc33daSTejun Heo return -EIO; 320711fc33daSTejun Heo } 320811fc33daSTejun Heo 320911fc33daSTejun Heo if (!err_mask || sense_key != UNIT_ATTENTION) 321011fc33daSTejun Heo return 0; 321111fc33daSTejun Heo 321211fc33daSTejun Heo err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key); 321311fc33daSTejun Heo if (err_mask) { 3214a9a79dfeSJoe Perches ata_dev_warn(dev, "failed to clear " 321511fc33daSTejun Heo "UNIT ATTENTION (err_mask=0x%x)\n", err_mask); 321611fc33daSTejun Heo return -EIO; 321711fc33daSTejun Heo } 321811fc33daSTejun Heo } 321911fc33daSTejun Heo 3220a9a79dfeSJoe Perches ata_dev_warn(dev, "UNIT ATTENTION persists after %d tries\n", 3221a9a79dfeSJoe Perches ATA_EH_UA_TRIES); 322211fc33daSTejun Heo 322311fc33daSTejun Heo return 0; 322411fc33daSTejun Heo } 322511fc33daSTejun Heo 32266013efd8STejun Heo /** 32276013efd8STejun Heo * ata_eh_maybe_retry_flush - Retry FLUSH if necessary 32286013efd8STejun Heo * @dev: ATA device which may need FLUSH retry 32296013efd8STejun Heo * 32306013efd8STejun Heo * If @dev failed FLUSH, it needs to be reported upper layer 32316013efd8STejun Heo * immediately as it means that @dev failed to remap and already 32326013efd8STejun Heo * lost at least a sector and further FLUSH retrials won't make 32336013efd8STejun Heo * any difference to the lost sector. However, if FLUSH failed 32346013efd8STejun Heo * for other reasons, for example transmission error, FLUSH needs 32356013efd8STejun Heo * to be retried. 32366013efd8STejun Heo * 32376013efd8STejun Heo * This function determines whether FLUSH failure retry is 32386013efd8STejun Heo * necessary and performs it if so. 32396013efd8STejun Heo * 32406013efd8STejun Heo * RETURNS: 32416013efd8STejun Heo * 0 if EH can continue, -errno if EH needs to be repeated. 32426013efd8STejun Heo */ 32436013efd8STejun Heo static int ata_eh_maybe_retry_flush(struct ata_device *dev) 32446013efd8STejun Heo { 32456013efd8STejun Heo struct ata_link *link = dev->link; 32466013efd8STejun Heo struct ata_port *ap = link->ap; 32476013efd8STejun Heo struct ata_queued_cmd *qc; 32486013efd8STejun Heo struct ata_taskfile tf; 32496013efd8STejun Heo unsigned int err_mask; 32506013efd8STejun Heo int rc = 0; 32516013efd8STejun Heo 32526013efd8STejun Heo /* did flush fail for this device? */ 32536013efd8STejun Heo if (!ata_tag_valid(link->active_tag)) 32546013efd8STejun Heo return 0; 32556013efd8STejun Heo 32566013efd8STejun Heo qc = __ata_qc_from_tag(ap, link->active_tag); 32576013efd8STejun Heo if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT && 32586013efd8STejun Heo qc->tf.command != ATA_CMD_FLUSH)) 32596013efd8STejun Heo return 0; 32606013efd8STejun Heo 32616013efd8STejun Heo /* if the device failed it, it should be reported to upper layers */ 32626013efd8STejun Heo if (qc->err_mask & AC_ERR_DEV) 32636013efd8STejun Heo return 0; 32646013efd8STejun Heo 32656013efd8STejun Heo /* flush failed for some other reason, give it another shot */ 32666013efd8STejun Heo ata_tf_init(dev, &tf); 32676013efd8STejun Heo 32686013efd8STejun Heo tf.command = qc->tf.command; 32696013efd8STejun Heo tf.flags |= ATA_TFLAG_DEVICE; 32706013efd8STejun Heo tf.protocol = ATA_PROT_NODATA; 32716013efd8STejun Heo 3272a9a79dfeSJoe Perches ata_dev_warn(dev, "retrying FLUSH 0x%x Emask 0x%x\n", 32736013efd8STejun Heo tf.command, qc->err_mask); 32746013efd8STejun Heo 32756013efd8STejun Heo err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 32766013efd8STejun Heo if (!err_mask) { 32776013efd8STejun Heo /* 32786013efd8STejun Heo * FLUSH is complete but there's no way to 32796013efd8STejun Heo * successfully complete a failed command from EH. 32806013efd8STejun Heo * Making sure retry is allowed at least once and 32816013efd8STejun Heo * retrying it should do the trick - whatever was in 32826013efd8STejun Heo * the cache is already on the platter and this won't 32836013efd8STejun Heo * cause infinite loop. 32846013efd8STejun Heo */ 32856013efd8STejun Heo qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1); 32866013efd8STejun Heo } else { 3287a9a79dfeSJoe Perches ata_dev_warn(dev, "FLUSH failed Emask 0x%x\n", 32886013efd8STejun Heo err_mask); 32896013efd8STejun Heo rc = -EIO; 32906013efd8STejun Heo 32916013efd8STejun Heo /* if device failed it, report it to upper layers */ 32926013efd8STejun Heo if (err_mask & AC_ERR_DEV) { 32936013efd8STejun Heo qc->err_mask |= AC_ERR_DEV; 32946013efd8STejun Heo qc->result_tf = tf; 32956013efd8STejun Heo if (!(ap->pflags & ATA_PFLAG_FROZEN)) 32966013efd8STejun Heo rc = 0; 32976013efd8STejun Heo } 32986013efd8STejun Heo } 32996013efd8STejun Heo return rc; 33006013efd8STejun Heo } 33016013efd8STejun Heo 33026b7ae954STejun Heo /** 33036b7ae954STejun Heo * ata_eh_set_lpm - configure SATA interface power management 33046b7ae954STejun Heo * @link: link to configure power management 33056b7ae954STejun Heo * @policy: the link power management policy 33066b7ae954STejun Heo * @r_failed_dev: out parameter for failed device 33076b7ae954STejun Heo * 33086b7ae954STejun Heo * Enable SATA Interface power management. This will enable 33096b7ae954STejun Heo * Device Interface Power Management (DIPM) for min_power 33106b7ae954STejun Heo * policy, and then call driver specific callbacks for 33116b7ae954STejun Heo * enabling Host Initiated Power management. 33126b7ae954STejun Heo * 33136b7ae954STejun Heo * LOCKING: 33146b7ae954STejun Heo * EH context. 33156b7ae954STejun Heo * 33166b7ae954STejun Heo * RETURNS: 33176b7ae954STejun Heo * 0 on success, -errno on failure. 33186b7ae954STejun Heo */ 33196b7ae954STejun Heo static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, 33206b7ae954STejun Heo struct ata_device **r_failed_dev) 33216b7ae954STejun Heo { 33226c8ea89cSTejun Heo struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL; 33236b7ae954STejun Heo struct ata_eh_context *ehc = &link->eh_context; 33246b7ae954STejun Heo struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL; 3325e5005b15STejun Heo enum ata_lpm_policy old_policy = link->lpm_policy; 33265f6f12ccSTejun Heo bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM; 33276b7ae954STejun Heo unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM; 33286b7ae954STejun Heo unsigned int err_mask; 33296b7ae954STejun Heo int rc; 33306b7ae954STejun Heo 33316b7ae954STejun Heo /* if the link or host doesn't do LPM, noop */ 33326b7ae954STejun Heo if ((link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm)) 33336b7ae954STejun Heo return 0; 33346b7ae954STejun Heo 33356b7ae954STejun Heo /* 33366b7ae954STejun Heo * DIPM is enabled only for MIN_POWER as some devices 33376b7ae954STejun Heo * misbehave when the host NACKs transition to SLUMBER. Order 33386b7ae954STejun Heo * device and link configurations such that the host always 33396b7ae954STejun Heo * allows DIPM requests. 33406b7ae954STejun Heo */ 33416b7ae954STejun Heo ata_for_each_dev(dev, link, ENABLED) { 33426b7ae954STejun Heo bool hipm = ata_id_has_hipm(dev->id); 3343ae01b249STejun Heo bool dipm = ata_id_has_dipm(dev->id) && !no_dipm; 33446b7ae954STejun Heo 33456b7ae954STejun Heo /* find the first enabled and LPM enabled devices */ 33466b7ae954STejun Heo if (!link_dev) 33476b7ae954STejun Heo link_dev = dev; 33486b7ae954STejun Heo 33496b7ae954STejun Heo if (!lpm_dev && (hipm || dipm)) 33506b7ae954STejun Heo lpm_dev = dev; 33516b7ae954STejun Heo 33526b7ae954STejun Heo hints &= ~ATA_LPM_EMPTY; 33536b7ae954STejun Heo if (!hipm) 33546b7ae954STejun Heo hints &= ~ATA_LPM_HIPM; 33556b7ae954STejun Heo 33566b7ae954STejun Heo /* disable DIPM before changing link config */ 33576b7ae954STejun Heo if (policy != ATA_LPM_MIN_POWER && dipm) { 33586b7ae954STejun Heo err_mask = ata_dev_set_feature(dev, 33596b7ae954STejun Heo SETFEATURES_SATA_DISABLE, SATA_DIPM); 33606b7ae954STejun Heo if (err_mask && err_mask != AC_ERR_DEV) { 3361a9a79dfeSJoe Perches ata_dev_warn(dev, 33626b7ae954STejun Heo "failed to disable DIPM, Emask 0x%x\n", 33636b7ae954STejun Heo err_mask); 33646b7ae954STejun Heo rc = -EIO; 33656b7ae954STejun Heo goto fail; 33666b7ae954STejun Heo } 33676b7ae954STejun Heo } 33686b7ae954STejun Heo } 33696b7ae954STejun Heo 33706c8ea89cSTejun Heo if (ap) { 33716b7ae954STejun Heo rc = ap->ops->set_lpm(link, policy, hints); 33726b7ae954STejun Heo if (!rc && ap->slave_link) 33736b7ae954STejun Heo rc = ap->ops->set_lpm(ap->slave_link, policy, hints); 33746c8ea89cSTejun Heo } else 33756c8ea89cSTejun Heo rc = sata_pmp_set_lpm(link, policy, hints); 33766b7ae954STejun Heo 33776b7ae954STejun Heo /* 33786b7ae954STejun Heo * Attribute link config failure to the first (LPM) enabled 33796b7ae954STejun Heo * device on the link. 33806b7ae954STejun Heo */ 33816b7ae954STejun Heo if (rc) { 33826b7ae954STejun Heo if (rc == -EOPNOTSUPP) { 33836b7ae954STejun Heo link->flags |= ATA_LFLAG_NO_LPM; 33846b7ae954STejun Heo return 0; 33856b7ae954STejun Heo } 33866b7ae954STejun Heo dev = lpm_dev ? lpm_dev : link_dev; 33876b7ae954STejun Heo goto fail; 33886b7ae954STejun Heo } 33896b7ae954STejun Heo 3390e5005b15STejun Heo /* 3391e5005b15STejun Heo * Low level driver acked the transition. Issue DIPM command 3392e5005b15STejun Heo * with the new policy set. 3393e5005b15STejun Heo */ 3394e5005b15STejun Heo link->lpm_policy = policy; 3395e5005b15STejun Heo if (ap && ap->slave_link) 3396e5005b15STejun Heo ap->slave_link->lpm_policy = policy; 3397e5005b15STejun Heo 33986b7ae954STejun Heo /* host config updated, enable DIPM if transitioning to MIN_POWER */ 33996b7ae954STejun Heo ata_for_each_dev(dev, link, ENABLED) { 3400ae01b249STejun Heo if (policy == ATA_LPM_MIN_POWER && !no_dipm && 3401ae01b249STejun Heo ata_id_has_dipm(dev->id)) { 34026b7ae954STejun Heo err_mask = ata_dev_set_feature(dev, 34036b7ae954STejun Heo SETFEATURES_SATA_ENABLE, SATA_DIPM); 34046b7ae954STejun Heo if (err_mask && err_mask != AC_ERR_DEV) { 3405a9a79dfeSJoe Perches ata_dev_warn(dev, 34066b7ae954STejun Heo "failed to enable DIPM, Emask 0x%x\n", 34076b7ae954STejun Heo err_mask); 34086b7ae954STejun Heo rc = -EIO; 34096b7ae954STejun Heo goto fail; 34106b7ae954STejun Heo } 34116b7ae954STejun Heo } 34126b7ae954STejun Heo } 34136b7ae954STejun Heo 34146b7ae954STejun Heo return 0; 34156b7ae954STejun Heo 34166b7ae954STejun Heo fail: 3417e5005b15STejun Heo /* restore the old policy */ 3418e5005b15STejun Heo link->lpm_policy = old_policy; 3419e5005b15STejun Heo if (ap && ap->slave_link) 3420e5005b15STejun Heo ap->slave_link->lpm_policy = old_policy; 3421e5005b15STejun Heo 34226b7ae954STejun Heo /* if no device or only one more chance is left, disable LPM */ 34236b7ae954STejun Heo if (!dev || ehc->tries[dev->devno] <= 2) { 3424a9a79dfeSJoe Perches ata_link_warn(link, "disabling LPM on the link\n"); 34256b7ae954STejun Heo link->flags |= ATA_LFLAG_NO_LPM; 34266b7ae954STejun Heo } 34276b7ae954STejun Heo if (r_failed_dev) 34286b7ae954STejun Heo *r_failed_dev = dev; 34296b7ae954STejun Heo return rc; 34306b7ae954STejun Heo } 34316b7ae954STejun Heo 34328a745f1fSKristen Carlson Accardi int ata_link_nr_enabled(struct ata_link *link) 3433c6fd2807SJeff Garzik { 3434f58229f8STejun Heo struct ata_device *dev; 3435f58229f8STejun Heo int cnt = 0; 3436c6fd2807SJeff Garzik 34371eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) 3438c6fd2807SJeff Garzik cnt++; 3439c6fd2807SJeff Garzik return cnt; 3440c6fd2807SJeff Garzik } 3441c6fd2807SJeff Garzik 34420260731fSTejun Heo static int ata_link_nr_vacant(struct ata_link *link) 3443c6fd2807SJeff Garzik { 3444f58229f8STejun Heo struct ata_device *dev; 3445f58229f8STejun Heo int cnt = 0; 3446c6fd2807SJeff Garzik 34471eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 3448f58229f8STejun Heo if (dev->class == ATA_DEV_UNKNOWN) 3449c6fd2807SJeff Garzik cnt++; 3450c6fd2807SJeff Garzik return cnt; 3451c6fd2807SJeff Garzik } 3452c6fd2807SJeff Garzik 34530260731fSTejun Heo static int ata_eh_skip_recovery(struct ata_link *link) 3454c6fd2807SJeff Garzik { 3455672b2d65STejun Heo struct ata_port *ap = link->ap; 34560260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 3457f58229f8STejun Heo struct ata_device *dev; 3458c6fd2807SJeff Garzik 3459f9df58cbSTejun Heo /* skip disabled links */ 3460f9df58cbSTejun Heo if (link->flags & ATA_LFLAG_DISABLED) 3461f9df58cbSTejun Heo return 1; 3462f9df58cbSTejun Heo 3463e2f3d75fSTejun Heo /* skip if explicitly requested */ 3464e2f3d75fSTejun Heo if (ehc->i.flags & ATA_EHI_NO_RECOVERY) 3465e2f3d75fSTejun Heo return 1; 3466e2f3d75fSTejun Heo 3467672b2d65STejun Heo /* thaw frozen port and recover failed devices */ 3468672b2d65STejun Heo if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link)) 3469672b2d65STejun Heo return 0; 3470672b2d65STejun Heo 3471672b2d65STejun Heo /* reset at least once if reset is requested */ 3472672b2d65STejun Heo if ((ehc->i.action & ATA_EH_RESET) && 3473672b2d65STejun Heo !(ehc->i.flags & ATA_EHI_DID_RESET)) 3474c6fd2807SJeff Garzik return 0; 3475c6fd2807SJeff Garzik 3476c6fd2807SJeff Garzik /* skip if class codes for all vacant slots are ATA_DEV_NONE */ 34771eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 3478c6fd2807SJeff Garzik if (dev->class == ATA_DEV_UNKNOWN && 3479c6fd2807SJeff Garzik ehc->classes[dev->devno] != ATA_DEV_NONE) 3480c6fd2807SJeff Garzik return 0; 3481c6fd2807SJeff Garzik } 3482c6fd2807SJeff Garzik 3483c6fd2807SJeff Garzik return 1; 3484c6fd2807SJeff Garzik } 3485c6fd2807SJeff Garzik 3486c2c7a89cSTejun Heo static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg) 3487c2c7a89cSTejun Heo { 3488c2c7a89cSTejun Heo u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL); 3489c2c7a89cSTejun Heo u64 now = get_jiffies_64(); 3490c2c7a89cSTejun Heo int *trials = void_arg; 3491c2c7a89cSTejun Heo 3492c2c7a89cSTejun Heo if (ent->timestamp < now - min(now, interval)) 3493c2c7a89cSTejun Heo return -1; 3494c2c7a89cSTejun Heo 3495c2c7a89cSTejun Heo (*trials)++; 3496c2c7a89cSTejun Heo return 0; 3497c2c7a89cSTejun Heo } 3498c2c7a89cSTejun Heo 349902c05a27STejun Heo static int ata_eh_schedule_probe(struct ata_device *dev) 350002c05a27STejun Heo { 350102c05a27STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 3502c2c7a89cSTejun Heo struct ata_link *link = ata_dev_phys_link(dev); 3503c2c7a89cSTejun Heo int trials = 0; 350402c05a27STejun Heo 350502c05a27STejun Heo if (!(ehc->i.probe_mask & (1 << dev->devno)) || 350602c05a27STejun Heo (ehc->did_probe_mask & (1 << dev->devno))) 350702c05a27STejun Heo return 0; 350802c05a27STejun Heo 350902c05a27STejun Heo ata_eh_detach_dev(dev); 351002c05a27STejun Heo ata_dev_init(dev); 351102c05a27STejun Heo ehc->did_probe_mask |= (1 << dev->devno); 3512cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 351300115e0fSTejun Heo ehc->saved_xfer_mode[dev->devno] = 0; 351400115e0fSTejun Heo ehc->saved_ncq_enabled &= ~(1 << dev->devno); 351502c05a27STejun Heo 35166b7ae954STejun Heo /* the link maybe in a deep sleep, wake it up */ 35176c8ea89cSTejun Heo if (link->lpm_policy > ATA_LPM_MAX_POWER) { 35186c8ea89cSTejun Heo if (ata_is_host_link(link)) 35196c8ea89cSTejun Heo link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER, 35206c8ea89cSTejun Heo ATA_LPM_EMPTY); 35216c8ea89cSTejun Heo else 35226c8ea89cSTejun Heo sata_pmp_set_lpm(link, ATA_LPM_MAX_POWER, 35236c8ea89cSTejun Heo ATA_LPM_EMPTY); 35246c8ea89cSTejun Heo } 35256b7ae954STejun Heo 3526c2c7a89cSTejun Heo /* Record and count probe trials on the ering. The specific 3527c2c7a89cSTejun Heo * error mask used is irrelevant. Because a successful device 3528c2c7a89cSTejun Heo * detection clears the ering, this count accumulates only if 3529c2c7a89cSTejun Heo * there are consecutive failed probes. 3530c2c7a89cSTejun Heo * 3531c2c7a89cSTejun Heo * If the count is equal to or higher than ATA_EH_PROBE_TRIALS 3532c2c7a89cSTejun Heo * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is 3533c2c7a89cSTejun Heo * forced to 1.5Gbps. 3534c2c7a89cSTejun Heo * 3535c2c7a89cSTejun Heo * This is to work around cases where failed link speed 3536c2c7a89cSTejun Heo * negotiation results in device misdetection leading to 3537c2c7a89cSTejun Heo * infinite DEVXCHG or PHRDY CHG events. 3538c2c7a89cSTejun Heo */ 3539c2c7a89cSTejun Heo ata_ering_record(&dev->ering, 0, AC_ERR_OTHER); 3540c2c7a89cSTejun Heo ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials); 3541c2c7a89cSTejun Heo 3542c2c7a89cSTejun Heo if (trials > ATA_EH_PROBE_TRIALS) 3543c2c7a89cSTejun Heo sata_down_spd_limit(link, 1); 3544c2c7a89cSTejun Heo 354502c05a27STejun Heo return 1; 354602c05a27STejun Heo } 354702c05a27STejun Heo 35489b1e2658STejun Heo static int ata_eh_handle_dev_fail(struct ata_device *dev, int err) 3549fee7ca72STejun Heo { 35509af5c9c9STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 3551fee7ca72STejun Heo 3552cf9a590aSTejun Heo /* -EAGAIN from EH routine indicates retry without prejudice. 3553cf9a590aSTejun Heo * The requester is responsible for ensuring forward progress. 3554cf9a590aSTejun Heo */ 3555cf9a590aSTejun Heo if (err != -EAGAIN) 3556fee7ca72STejun Heo ehc->tries[dev->devno]--; 3557fee7ca72STejun Heo 3558fee7ca72STejun Heo switch (err) { 3559fee7ca72STejun Heo case -ENODEV: 3560fee7ca72STejun Heo /* device missing or wrong IDENTIFY data, schedule probing */ 3561fee7ca72STejun Heo ehc->i.probe_mask |= (1 << dev->devno); 3562fee7ca72STejun Heo case -EINVAL: 3563fee7ca72STejun Heo /* give it just one more chance */ 3564fee7ca72STejun Heo ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1); 3565fee7ca72STejun Heo case -EIO: 3566d89293abSTejun Heo if (ehc->tries[dev->devno] == 1) { 3567fee7ca72STejun Heo /* This is the last chance, better to slow 3568fee7ca72STejun Heo * down than lose it. 3569fee7ca72STejun Heo */ 3570a07d499bSTejun Heo sata_down_spd_limit(ata_dev_phys_link(dev), 0); 3571d89293abSTejun Heo if (dev->pio_mode > XFER_PIO_0) 3572fee7ca72STejun Heo ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); 3573fee7ca72STejun Heo } 3574fee7ca72STejun Heo } 3575fee7ca72STejun Heo 3576fee7ca72STejun Heo if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) { 3577fee7ca72STejun Heo /* disable device if it has used up all its chances */ 3578fee7ca72STejun Heo ata_dev_disable(dev); 3579fee7ca72STejun Heo 3580fee7ca72STejun Heo /* detach if offline */ 3581b1c72916STejun Heo if (ata_phys_link_offline(ata_dev_phys_link(dev))) 3582fee7ca72STejun Heo ata_eh_detach_dev(dev); 3583fee7ca72STejun Heo 358402c05a27STejun Heo /* schedule probe if necessary */ 358587fbc5a0STejun Heo if (ata_eh_schedule_probe(dev)) { 3586fee7ca72STejun Heo ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 358787fbc5a0STejun Heo memset(ehc->cmd_timeout_idx[dev->devno], 0, 358887fbc5a0STejun Heo sizeof(ehc->cmd_timeout_idx[dev->devno])); 358987fbc5a0STejun Heo } 35909b1e2658STejun Heo 35919b1e2658STejun Heo return 1; 3592fee7ca72STejun Heo } else { 3593cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 35949b1e2658STejun Heo return 0; 3595fee7ca72STejun Heo } 3596fee7ca72STejun Heo } 3597fee7ca72STejun Heo 3598c6fd2807SJeff Garzik /** 3599c6fd2807SJeff Garzik * ata_eh_recover - recover host port after error 3600c6fd2807SJeff Garzik * @ap: host port to recover 3601c6fd2807SJeff Garzik * @prereset: prereset method (can be NULL) 3602c6fd2807SJeff Garzik * @softreset: softreset method (can be NULL) 3603c6fd2807SJeff Garzik * @hardreset: hardreset method (can be NULL) 3604c6fd2807SJeff Garzik * @postreset: postreset method (can be NULL) 36059b1e2658STejun Heo * @r_failed_link: out parameter for failed link 3606c6fd2807SJeff Garzik * 3607c6fd2807SJeff Garzik * This is the alpha and omega, eum and yang, heart and soul of 3608c6fd2807SJeff Garzik * libata exception handling. On entry, actions required to 36099b1e2658STejun Heo * recover each link and hotplug requests are recorded in the 36109b1e2658STejun Heo * link's eh_context. This function executes all the operations 36119b1e2658STejun Heo * with appropriate retrials and fallbacks to resurrect failed 3612c6fd2807SJeff Garzik * devices, detach goners and greet newcomers. 3613c6fd2807SJeff Garzik * 3614c6fd2807SJeff Garzik * LOCKING: 3615c6fd2807SJeff Garzik * Kernel thread context (may sleep). 3616c6fd2807SJeff Garzik * 3617c6fd2807SJeff Garzik * RETURNS: 3618c6fd2807SJeff Garzik * 0 on success, -errno on failure. 3619c6fd2807SJeff Garzik */ 3620fb7fd614STejun Heo int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, 3621c6fd2807SJeff Garzik ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 36229b1e2658STejun Heo ata_postreset_fn_t postreset, 36239b1e2658STejun Heo struct ata_link **r_failed_link) 3624c6fd2807SJeff Garzik { 36259b1e2658STejun Heo struct ata_link *link; 3626c6fd2807SJeff Garzik struct ata_device *dev; 36276b7ae954STejun Heo int rc, nr_fails; 362845fabbb7SElias Oltmanns unsigned long flags, deadline; 3629c6fd2807SJeff Garzik 3630c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 3631c6fd2807SJeff Garzik 3632c6fd2807SJeff Garzik /* prep for recovery */ 36331eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 36349b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 36359b1e2658STejun Heo 3636f9df58cbSTejun Heo /* re-enable link? */ 3637f9df58cbSTejun Heo if (ehc->i.action & ATA_EH_ENABLE_LINK) { 3638f9df58cbSTejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK); 3639f9df58cbSTejun Heo spin_lock_irqsave(ap->lock, flags); 3640f9df58cbSTejun Heo link->flags &= ~ATA_LFLAG_DISABLED; 3641f9df58cbSTejun Heo spin_unlock_irqrestore(ap->lock, flags); 3642f9df58cbSTejun Heo ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK); 3643f9df58cbSTejun Heo } 3644f9df58cbSTejun Heo 36451eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 3646fd995f70STejun Heo if (link->flags & ATA_LFLAG_NO_RETRY) 3647fd995f70STejun Heo ehc->tries[dev->devno] = 1; 3648fd995f70STejun Heo else 3649c6fd2807SJeff Garzik ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 3650c6fd2807SJeff Garzik 365179a55b72STejun Heo /* collect port action mask recorded in dev actions */ 36529b1e2658STejun Heo ehc->i.action |= ehc->i.dev_action[dev->devno] & 36539b1e2658STejun Heo ~ATA_EH_PERDEV_MASK; 3654f58229f8STejun Heo ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK; 365579a55b72STejun Heo 3656c6fd2807SJeff Garzik /* process hotplug request */ 3657c6fd2807SJeff Garzik if (dev->flags & ATA_DFLAG_DETACH) 3658c6fd2807SJeff Garzik ata_eh_detach_dev(dev); 3659c6fd2807SJeff Garzik 366002c05a27STejun Heo /* schedule probe if necessary */ 366102c05a27STejun Heo if (!ata_dev_enabled(dev)) 366202c05a27STejun Heo ata_eh_schedule_probe(dev); 3663c6fd2807SJeff Garzik } 36649b1e2658STejun Heo } 3665c6fd2807SJeff Garzik 3666c6fd2807SJeff Garzik retry: 3667c6fd2807SJeff Garzik rc = 0; 3668c6fd2807SJeff Garzik 3669c6fd2807SJeff Garzik /* if UNLOADING, finish immediately */ 3670c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_UNLOADING) 3671c6fd2807SJeff Garzik goto out; 3672c6fd2807SJeff Garzik 36739b1e2658STejun Heo /* prep for EH */ 36741eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 36759b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 36769b1e2658STejun Heo 3677c6fd2807SJeff Garzik /* skip EH if possible. */ 36780260731fSTejun Heo if (ata_eh_skip_recovery(link)) 3679c6fd2807SJeff Garzik ehc->i.action = 0; 3680c6fd2807SJeff Garzik 36811eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 3682f58229f8STejun Heo ehc->classes[dev->devno] = ATA_DEV_UNKNOWN; 36839b1e2658STejun Heo } 3684c6fd2807SJeff Garzik 3685c6fd2807SJeff Garzik /* reset */ 36861eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 36879b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 36889b1e2658STejun Heo 3689cf480626STejun Heo if (!(ehc->i.action & ATA_EH_RESET)) 36909b1e2658STejun Heo continue; 36919b1e2658STejun Heo 36929b1e2658STejun Heo rc = ata_eh_reset(link, ata_link_nr_vacant(link), 3693dc98c32cSTejun Heo prereset, softreset, hardreset, postreset); 3694c6fd2807SJeff Garzik if (rc) { 3695a9a79dfeSJoe Perches ata_link_err(link, "reset failed, giving up\n"); 3696c6fd2807SJeff Garzik goto out; 3697c6fd2807SJeff Garzik } 36989b1e2658STejun Heo } 3699c6fd2807SJeff Garzik 370045fabbb7SElias Oltmanns do { 370145fabbb7SElias Oltmanns unsigned long now; 370245fabbb7SElias Oltmanns 370345fabbb7SElias Oltmanns /* 370445fabbb7SElias Oltmanns * clears ATA_EH_PARK in eh_info and resets 370545fabbb7SElias Oltmanns * ap->park_req_pending 370645fabbb7SElias Oltmanns */ 370745fabbb7SElias Oltmanns ata_eh_pull_park_action(ap); 370845fabbb7SElias Oltmanns 370945fabbb7SElias Oltmanns deadline = jiffies; 37101eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 37111eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 371245fabbb7SElias Oltmanns struct ata_eh_context *ehc = &link->eh_context; 371345fabbb7SElias Oltmanns unsigned long tmp; 371445fabbb7SElias Oltmanns 371545fabbb7SElias Oltmanns if (dev->class != ATA_DEV_ATA) 371645fabbb7SElias Oltmanns continue; 371745fabbb7SElias Oltmanns if (!(ehc->i.dev_action[dev->devno] & 371845fabbb7SElias Oltmanns ATA_EH_PARK)) 371945fabbb7SElias Oltmanns continue; 372045fabbb7SElias Oltmanns tmp = dev->unpark_deadline; 372145fabbb7SElias Oltmanns if (time_before(deadline, tmp)) 372245fabbb7SElias Oltmanns deadline = tmp; 372345fabbb7SElias Oltmanns else if (time_before_eq(tmp, jiffies)) 372445fabbb7SElias Oltmanns continue; 372545fabbb7SElias Oltmanns if (ehc->unloaded_mask & (1 << dev->devno)) 372645fabbb7SElias Oltmanns continue; 372745fabbb7SElias Oltmanns 372845fabbb7SElias Oltmanns ata_eh_park_issue_cmd(dev, 1); 372945fabbb7SElias Oltmanns } 373045fabbb7SElias Oltmanns } 373145fabbb7SElias Oltmanns 373245fabbb7SElias Oltmanns now = jiffies; 373345fabbb7SElias Oltmanns if (time_before_eq(deadline, now)) 373445fabbb7SElias Oltmanns break; 373545fabbb7SElias Oltmanns 3736c0c362b6STejun Heo ata_eh_release(ap); 373745fabbb7SElias Oltmanns deadline = wait_for_completion_timeout(&ap->park_req_pending, 373845fabbb7SElias Oltmanns deadline - now); 3739c0c362b6STejun Heo ata_eh_acquire(ap); 374045fabbb7SElias Oltmanns } while (deadline); 37411eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 37421eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 374345fabbb7SElias Oltmanns if (!(link->eh_context.unloaded_mask & 374445fabbb7SElias Oltmanns (1 << dev->devno))) 374545fabbb7SElias Oltmanns continue; 374645fabbb7SElias Oltmanns 374745fabbb7SElias Oltmanns ata_eh_park_issue_cmd(dev, 0); 374845fabbb7SElias Oltmanns ata_eh_done(link, dev, ATA_EH_PARK); 374945fabbb7SElias Oltmanns } 375045fabbb7SElias Oltmanns } 375145fabbb7SElias Oltmanns 37529b1e2658STejun Heo /* the rest */ 37536b7ae954STejun Heo nr_fails = 0; 37546b7ae954STejun Heo ata_for_each_link(link, ap, PMP_FIRST) { 37559b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 37569b1e2658STejun Heo 37576b7ae954STejun Heo if (sata_pmp_attached(ap) && ata_is_host_link(link)) 37586b7ae954STejun Heo goto config_lpm; 37596b7ae954STejun Heo 3760c6fd2807SJeff Garzik /* revalidate existing devices and attach new ones */ 37610260731fSTejun Heo rc = ata_eh_revalidate_and_attach(link, &dev); 3762c6fd2807SJeff Garzik if (rc) 37636b7ae954STejun Heo goto rest_fail; 3764c6fd2807SJeff Garzik 3765633273a3STejun Heo /* if PMP got attached, return, pmp EH will take care of it */ 3766633273a3STejun Heo if (link->device->class == ATA_DEV_PMP) { 3767633273a3STejun Heo ehc->i.action = 0; 3768633273a3STejun Heo return 0; 3769633273a3STejun Heo } 3770633273a3STejun Heo 3771baa1e78aSTejun Heo /* configure transfer mode if necessary */ 3772baa1e78aSTejun Heo if (ehc->i.flags & ATA_EHI_SETMODE) { 37730260731fSTejun Heo rc = ata_set_mode(link, &dev); 37744ae72a1eSTejun Heo if (rc) 37756b7ae954STejun Heo goto rest_fail; 3776baa1e78aSTejun Heo ehc->i.flags &= ~ATA_EHI_SETMODE; 3777c6fd2807SJeff Garzik } 3778c6fd2807SJeff Garzik 377911fc33daSTejun Heo /* If reset has been issued, clear UA to avoid 378011fc33daSTejun Heo * disrupting the current users of the device. 378111fc33daSTejun Heo */ 378211fc33daSTejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) { 37831eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 378411fc33daSTejun Heo if (dev->class != ATA_DEV_ATAPI) 378511fc33daSTejun Heo continue; 378611fc33daSTejun Heo rc = atapi_eh_clear_ua(dev); 378711fc33daSTejun Heo if (rc) 37886b7ae954STejun Heo goto rest_fail; 378911fc33daSTejun Heo } 379011fc33daSTejun Heo } 379111fc33daSTejun Heo 37926013efd8STejun Heo /* retry flush if necessary */ 37936013efd8STejun Heo ata_for_each_dev(dev, link, ALL) { 37946013efd8STejun Heo if (dev->class != ATA_DEV_ATA) 37956013efd8STejun Heo continue; 37966013efd8STejun Heo rc = ata_eh_maybe_retry_flush(dev); 37976013efd8STejun Heo if (rc) 37986b7ae954STejun Heo goto rest_fail; 37996013efd8STejun Heo } 38006013efd8STejun Heo 38016b7ae954STejun Heo config_lpm: 380211fc33daSTejun Heo /* configure link power saving */ 38036b7ae954STejun Heo if (link->lpm_policy != ap->target_lpm_policy) { 38046b7ae954STejun Heo rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev); 38056b7ae954STejun Heo if (rc) 38066b7ae954STejun Heo goto rest_fail; 38076b7ae954STejun Heo } 3808ca77329fSKristen Carlson Accardi 38099b1e2658STejun Heo /* this link is okay now */ 38109b1e2658STejun Heo ehc->i.flags = 0; 38119b1e2658STejun Heo continue; 3812c6fd2807SJeff Garzik 38136b7ae954STejun Heo rest_fail: 38146b7ae954STejun Heo nr_fails++; 38156b7ae954STejun Heo if (dev) 38160a2c0f56STejun Heo ata_eh_handle_dev_fail(dev, rc); 3817c6fd2807SJeff Garzik 3818b06ce3e5STejun Heo if (ap->pflags & ATA_PFLAG_FROZEN) { 3819b06ce3e5STejun Heo /* PMP reset requires working host port. 3820b06ce3e5STejun Heo * Can't retry if it's frozen. 3821b06ce3e5STejun Heo */ 3822071f44b1STejun Heo if (sata_pmp_attached(ap)) 3823b06ce3e5STejun Heo goto out; 38249b1e2658STejun Heo break; 38259b1e2658STejun Heo } 3826b06ce3e5STejun Heo } 38279b1e2658STejun Heo 38286b7ae954STejun Heo if (nr_fails) 3829c6fd2807SJeff Garzik goto retry; 3830c6fd2807SJeff Garzik 3831c6fd2807SJeff Garzik out: 38329b1e2658STejun Heo if (rc && r_failed_link) 38339b1e2658STejun Heo *r_failed_link = link; 3834c6fd2807SJeff Garzik 3835c6fd2807SJeff Garzik DPRINTK("EXIT, rc=%d\n", rc); 3836c6fd2807SJeff Garzik return rc; 3837c6fd2807SJeff Garzik } 3838c6fd2807SJeff Garzik 3839c6fd2807SJeff Garzik /** 3840c6fd2807SJeff Garzik * ata_eh_finish - finish up EH 3841c6fd2807SJeff Garzik * @ap: host port to finish EH for 3842c6fd2807SJeff Garzik * 3843c6fd2807SJeff Garzik * Recovery is complete. Clean up EH states and retry or finish 3844c6fd2807SJeff Garzik * failed qcs. 3845c6fd2807SJeff Garzik * 3846c6fd2807SJeff Garzik * LOCKING: 3847c6fd2807SJeff Garzik * None. 3848c6fd2807SJeff Garzik */ 3849fb7fd614STejun Heo void ata_eh_finish(struct ata_port *ap) 3850c6fd2807SJeff Garzik { 3851c6fd2807SJeff Garzik int tag; 3852c6fd2807SJeff Garzik 3853c6fd2807SJeff Garzik /* retry or finish qcs */ 3854c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 3855c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 3856c6fd2807SJeff Garzik 3857c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) 3858c6fd2807SJeff Garzik continue; 3859c6fd2807SJeff Garzik 3860c6fd2807SJeff Garzik if (qc->err_mask) { 3861c6fd2807SJeff Garzik /* FIXME: Once EH migration is complete, 3862c6fd2807SJeff Garzik * generate sense data in this function, 3863c6fd2807SJeff Garzik * considering both err_mask and tf. 3864c6fd2807SJeff Garzik */ 386503faab78STejun Heo if (qc->flags & ATA_QCFLAG_RETRY) 3866c6fd2807SJeff Garzik ata_eh_qc_retry(qc); 386703faab78STejun Heo else 386803faab78STejun Heo ata_eh_qc_complete(qc); 3869c6fd2807SJeff Garzik } else { 3870c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_SENSE_VALID) { 3871c6fd2807SJeff Garzik ata_eh_qc_complete(qc); 3872c6fd2807SJeff Garzik } else { 3873c6fd2807SJeff Garzik /* feed zero TF to sense generation */ 3874c6fd2807SJeff Garzik memset(&qc->result_tf, 0, sizeof(qc->result_tf)); 3875c6fd2807SJeff Garzik ata_eh_qc_retry(qc); 3876c6fd2807SJeff Garzik } 3877c6fd2807SJeff Garzik } 3878c6fd2807SJeff Garzik } 3879da917d69STejun Heo 3880da917d69STejun Heo /* make sure nr_active_links is zero after EH */ 3881da917d69STejun Heo WARN_ON(ap->nr_active_links); 3882da917d69STejun Heo ap->nr_active_links = 0; 3883c6fd2807SJeff Garzik } 3884c6fd2807SJeff Garzik 3885c6fd2807SJeff Garzik /** 3886c6fd2807SJeff Garzik * ata_do_eh - do standard error handling 3887c6fd2807SJeff Garzik * @ap: host port to handle error for 3888a1efdabaSTejun Heo * 3889c6fd2807SJeff Garzik * @prereset: prereset method (can be NULL) 3890c6fd2807SJeff Garzik * @softreset: softreset method (can be NULL) 3891c6fd2807SJeff Garzik * @hardreset: hardreset method (can be NULL) 3892c6fd2807SJeff Garzik * @postreset: postreset method (can be NULL) 3893c6fd2807SJeff Garzik * 3894c6fd2807SJeff Garzik * Perform standard error handling sequence. 3895c6fd2807SJeff Garzik * 3896c6fd2807SJeff Garzik * LOCKING: 3897c6fd2807SJeff Garzik * Kernel thread context (may sleep). 3898c6fd2807SJeff Garzik */ 3899c6fd2807SJeff Garzik void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, 3900c6fd2807SJeff Garzik ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 3901c6fd2807SJeff Garzik ata_postreset_fn_t postreset) 3902c6fd2807SJeff Garzik { 39039b1e2658STejun Heo struct ata_device *dev; 39049b1e2658STejun Heo int rc; 39059b1e2658STejun Heo 39069b1e2658STejun Heo ata_eh_autopsy(ap); 39079b1e2658STejun Heo ata_eh_report(ap); 39089b1e2658STejun Heo 39099b1e2658STejun Heo rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset, 39109b1e2658STejun Heo NULL); 39119b1e2658STejun Heo if (rc) { 39121eca4365STejun Heo ata_for_each_dev(dev, &ap->link, ALL) 39139b1e2658STejun Heo ata_dev_disable(dev); 39149b1e2658STejun Heo } 39159b1e2658STejun Heo 3916c6fd2807SJeff Garzik ata_eh_finish(ap); 3917c6fd2807SJeff Garzik } 3918c6fd2807SJeff Garzik 3919a1efdabaSTejun Heo /** 3920a1efdabaSTejun Heo * ata_std_error_handler - standard error handler 3921a1efdabaSTejun Heo * @ap: host port to handle error for 3922a1efdabaSTejun Heo * 3923a1efdabaSTejun Heo * Standard error handler 3924a1efdabaSTejun Heo * 3925a1efdabaSTejun Heo * LOCKING: 3926a1efdabaSTejun Heo * Kernel thread context (may sleep). 3927a1efdabaSTejun Heo */ 3928a1efdabaSTejun Heo void ata_std_error_handler(struct ata_port *ap) 3929a1efdabaSTejun Heo { 3930a1efdabaSTejun Heo struct ata_port_operations *ops = ap->ops; 3931a1efdabaSTejun Heo ata_reset_fn_t hardreset = ops->hardreset; 3932a1efdabaSTejun Heo 393357c9efdfSTejun Heo /* ignore built-in hardreset if SCR access is not available */ 3934fe06e5f9STejun Heo if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link)) 3935a1efdabaSTejun Heo hardreset = NULL; 3936a1efdabaSTejun Heo 3937a1efdabaSTejun Heo ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset); 3938a1efdabaSTejun Heo } 3939a1efdabaSTejun Heo 39406ffa01d8STejun Heo #ifdef CONFIG_PM 3941c6fd2807SJeff Garzik /** 3942c6fd2807SJeff Garzik * ata_eh_handle_port_suspend - perform port suspend operation 3943c6fd2807SJeff Garzik * @ap: port to suspend 3944c6fd2807SJeff Garzik * 3945c6fd2807SJeff Garzik * Suspend @ap. 3946c6fd2807SJeff Garzik * 3947c6fd2807SJeff Garzik * LOCKING: 3948c6fd2807SJeff Garzik * Kernel thread context (may sleep). 3949c6fd2807SJeff Garzik */ 3950c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap) 3951c6fd2807SJeff Garzik { 3952c6fd2807SJeff Garzik unsigned long flags; 3953c6fd2807SJeff Garzik int rc = 0; 3954c6fd2807SJeff Garzik 3955c6fd2807SJeff Garzik /* are we suspending? */ 3956c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 3957c6fd2807SJeff Garzik if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || 3958c6fd2807SJeff Garzik ap->pm_mesg.event == PM_EVENT_ON) { 3959c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3960c6fd2807SJeff Garzik return; 3961c6fd2807SJeff Garzik } 3962c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3963c6fd2807SJeff Garzik 3964c6fd2807SJeff Garzik WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED); 3965c6fd2807SJeff Garzik 396664578a3dSTejun Heo /* tell ACPI we're suspending */ 396764578a3dSTejun Heo rc = ata_acpi_on_suspend(ap); 396864578a3dSTejun Heo if (rc) 396964578a3dSTejun Heo goto out; 397064578a3dSTejun Heo 3971c6fd2807SJeff Garzik /* suspend */ 3972c6fd2807SJeff Garzik ata_eh_freeze_port(ap); 3973c6fd2807SJeff Garzik 3974c6fd2807SJeff Garzik if (ap->ops->port_suspend) 3975c6fd2807SJeff Garzik rc = ap->ops->port_suspend(ap, ap->pm_mesg); 3976c6fd2807SJeff Garzik 3977bd3adca5SShaohua Li ata_acpi_set_state(ap, PMSG_SUSPEND); 397864578a3dSTejun Heo out: 3979c6fd2807SJeff Garzik /* report result */ 3980c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 3981c6fd2807SJeff Garzik 3982c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_PM_PENDING; 3983c6fd2807SJeff Garzik if (rc == 0) 3984c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SUSPENDED; 398564578a3dSTejun Heo else if (ap->pflags & ATA_PFLAG_FROZEN) 3986c6fd2807SJeff Garzik ata_port_schedule_eh(ap); 3987c6fd2807SJeff Garzik 3988c6fd2807SJeff Garzik if (ap->pm_result) { 3989c6fd2807SJeff Garzik *ap->pm_result = rc; 3990c6fd2807SJeff Garzik ap->pm_result = NULL; 3991c6fd2807SJeff Garzik } 3992c6fd2807SJeff Garzik 3993c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3994c6fd2807SJeff Garzik 3995c6fd2807SJeff Garzik return; 3996c6fd2807SJeff Garzik } 3997c6fd2807SJeff Garzik 3998c6fd2807SJeff Garzik /** 3999c6fd2807SJeff Garzik * ata_eh_handle_port_resume - perform port resume operation 4000c6fd2807SJeff Garzik * @ap: port to resume 4001c6fd2807SJeff Garzik * 4002c6fd2807SJeff Garzik * Resume @ap. 4003c6fd2807SJeff Garzik * 4004c6fd2807SJeff Garzik * LOCKING: 4005c6fd2807SJeff Garzik * Kernel thread context (may sleep). 4006c6fd2807SJeff Garzik */ 4007c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap) 4008c6fd2807SJeff Garzik { 40096f9c1ea2STejun Heo struct ata_link *link; 40106f9c1ea2STejun Heo struct ata_device *dev; 4011c6fd2807SJeff Garzik unsigned long flags; 40129666f400STejun Heo int rc = 0; 4013c6fd2807SJeff Garzik 4014c6fd2807SJeff Garzik /* are we resuming? */ 4015c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 4016c6fd2807SJeff Garzik if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || 4017c6fd2807SJeff Garzik ap->pm_mesg.event != PM_EVENT_ON) { 4018c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 4019c6fd2807SJeff Garzik return; 4020c6fd2807SJeff Garzik } 4021c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 4022c6fd2807SJeff Garzik 40239666f400STejun Heo WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED)); 4024c6fd2807SJeff Garzik 40256f9c1ea2STejun Heo /* 40266f9c1ea2STejun Heo * Error timestamps are in jiffies which doesn't run while 40276f9c1ea2STejun Heo * suspended and PHY events during resume isn't too uncommon. 40286f9c1ea2STejun Heo * When the two are combined, it can lead to unnecessary speed 40296f9c1ea2STejun Heo * downs if the machine is suspended and resumed repeatedly. 40306f9c1ea2STejun Heo * Clear error history. 40316f9c1ea2STejun Heo */ 40326f9c1ea2STejun Heo ata_for_each_link(link, ap, HOST_FIRST) 40336f9c1ea2STejun Heo ata_for_each_dev(dev, link, ALL) 40346f9c1ea2STejun Heo ata_ering_clear(&dev->ering); 40356f9c1ea2STejun Heo 4036bd3adca5SShaohua Li ata_acpi_set_state(ap, PMSG_ON); 4037bd3adca5SShaohua Li 4038c6fd2807SJeff Garzik if (ap->ops->port_resume) 4039c6fd2807SJeff Garzik rc = ap->ops->port_resume(ap); 4040c6fd2807SJeff Garzik 40416746544cSTejun Heo /* tell ACPI that we're resuming */ 40426746544cSTejun Heo ata_acpi_on_resume(ap); 40436746544cSTejun Heo 40449666f400STejun Heo /* report result */ 4045c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 4046c6fd2807SJeff Garzik ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED); 4047c6fd2807SJeff Garzik if (ap->pm_result) { 4048c6fd2807SJeff Garzik *ap->pm_result = rc; 4049c6fd2807SJeff Garzik ap->pm_result = NULL; 4050c6fd2807SJeff Garzik } 4051c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 4052c6fd2807SJeff Garzik } 40536ffa01d8STejun Heo #endif /* CONFIG_PM */ 4054