1c6fd2807SJeff Garzik /* 2c6fd2807SJeff Garzik * libata-eh.c - libata error handling 3c6fd2807SJeff Garzik * 4c6fd2807SJeff Garzik * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5c6fd2807SJeff Garzik * Please ALWAYS copy linux-ide@vger.kernel.org 6c6fd2807SJeff Garzik * on emails. 7c6fd2807SJeff Garzik * 8c6fd2807SJeff Garzik * Copyright 2006 Tejun Heo <htejun@gmail.com> 9c6fd2807SJeff Garzik * 10c6fd2807SJeff Garzik * 11c6fd2807SJeff Garzik * This program is free software; you can redistribute it and/or 12c6fd2807SJeff Garzik * modify it under the terms of the GNU General Public License as 13c6fd2807SJeff Garzik * published by the Free Software Foundation; either version 2, or 14c6fd2807SJeff Garzik * (at your option) any later version. 15c6fd2807SJeff Garzik * 16c6fd2807SJeff Garzik * This program is distributed in the hope that it will be useful, 17c6fd2807SJeff Garzik * but WITHOUT ANY WARRANTY; without even the implied warranty of 18c6fd2807SJeff Garzik * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19c6fd2807SJeff Garzik * General Public License for more details. 20c6fd2807SJeff Garzik * 21c6fd2807SJeff Garzik * You should have received a copy of the GNU General Public License 22c6fd2807SJeff Garzik * along with this program; see the file COPYING. If not, write to 23c6fd2807SJeff Garzik * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, 24c6fd2807SJeff Garzik * USA. 25c6fd2807SJeff Garzik * 26c6fd2807SJeff Garzik * 27c6fd2807SJeff Garzik * libata documentation is available via 'make {ps|pdf}docs', 28c6fd2807SJeff Garzik * as Documentation/DocBook/libata.* 29c6fd2807SJeff Garzik * 30c6fd2807SJeff Garzik * Hardware documentation available from http://www.t13.org/ and 31c6fd2807SJeff Garzik * http://www.sata-io.org/ 32c6fd2807SJeff Garzik * 33c6fd2807SJeff Garzik */ 34c6fd2807SJeff Garzik 35c6fd2807SJeff Garzik #include <linux/kernel.h> 36242f9dcbSJens Axboe #include <linux/blkdev.h> 372855568bSJeff Garzik #include <linux/pci.h> 38c6fd2807SJeff Garzik #include <scsi/scsi.h> 39c6fd2807SJeff Garzik #include <scsi/scsi_host.h> 40c6fd2807SJeff Garzik #include <scsi/scsi_eh.h> 41c6fd2807SJeff Garzik #include <scsi/scsi_device.h> 42c6fd2807SJeff Garzik #include <scsi/scsi_cmnd.h> 436521148cSRobert Hancock #include <scsi/scsi_dbg.h> 44c6fd2807SJeff Garzik #include "../scsi/scsi_transport_api.h" 45c6fd2807SJeff Garzik 46c6fd2807SJeff Garzik #include <linux/libata.h> 47c6fd2807SJeff Garzik 48c6fd2807SJeff Garzik #include "libata.h" 49c6fd2807SJeff Garzik 507d47e8d4STejun Heo enum { 513884f7b0STejun Heo /* speed down verdicts */ 527d47e8d4STejun Heo ATA_EH_SPDN_NCQ_OFF = (1 << 0), 537d47e8d4STejun Heo ATA_EH_SPDN_SPEED_DOWN = (1 << 1), 547d47e8d4STejun Heo ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2), 5576326ac1STejun Heo ATA_EH_SPDN_KEEP_ERRORS = (1 << 3), 563884f7b0STejun Heo 573884f7b0STejun Heo /* error flags */ 583884f7b0STejun Heo ATA_EFLAG_IS_IO = (1 << 0), 5976326ac1STejun Heo ATA_EFLAG_DUBIOUS_XFER = (1 << 1), 60d9027470SGwendal Grignou ATA_EFLAG_OLD_ER = (1 << 31), 613884f7b0STejun Heo 623884f7b0STejun Heo /* error categories */ 633884f7b0STejun Heo ATA_ECAT_NONE = 0, 643884f7b0STejun Heo ATA_ECAT_ATA_BUS = 1, 653884f7b0STejun Heo ATA_ECAT_TOUT_HSM = 2, 663884f7b0STejun Heo ATA_ECAT_UNK_DEV = 3, 6775f9cafcSTejun Heo ATA_ECAT_DUBIOUS_NONE = 4, 6875f9cafcSTejun Heo ATA_ECAT_DUBIOUS_ATA_BUS = 5, 6975f9cafcSTejun Heo ATA_ECAT_DUBIOUS_TOUT_HSM = 6, 7075f9cafcSTejun Heo ATA_ECAT_DUBIOUS_UNK_DEV = 7, 7175f9cafcSTejun Heo ATA_ECAT_NR = 8, 727d47e8d4STejun Heo 7387fbc5a0STejun Heo ATA_EH_CMD_DFL_TIMEOUT = 5000, 7487fbc5a0STejun Heo 750a2c0f56STejun Heo /* always put at least this amount of time between resets */ 760a2c0f56STejun Heo ATA_EH_RESET_COOL_DOWN = 5000, 770a2c0f56STejun Heo 78341c2c95STejun Heo /* Waiting in ->prereset can never be reliable. It's 79341c2c95STejun Heo * sometimes nice to wait there but it can't be depended upon; 80341c2c95STejun Heo * otherwise, we wouldn't be resetting. Just give it enough 81341c2c95STejun Heo * time for most drives to spin up. 8231daabdaSTejun Heo */ 83341c2c95STejun Heo ATA_EH_PRERESET_TIMEOUT = 10000, 84341c2c95STejun Heo ATA_EH_FASTDRAIN_INTERVAL = 3000, 8511fc33daSTejun Heo 8611fc33daSTejun Heo ATA_EH_UA_TRIES = 5, 87c2c7a89cSTejun Heo 88c2c7a89cSTejun Heo /* probe speed down parameters, see ata_eh_schedule_probe() */ 89c2c7a89cSTejun Heo ATA_EH_PROBE_TRIAL_INTERVAL = 60000, /* 1 min */ 90c2c7a89cSTejun Heo ATA_EH_PROBE_TRIALS = 2, 9131daabdaSTejun Heo }; 9231daabdaSTejun Heo 9331daabdaSTejun Heo /* The following table determines how we sequence resets. Each entry 9431daabdaSTejun Heo * represents timeout for that try. The first try can be soft or 9531daabdaSTejun Heo * hardreset. All others are hardreset if available. In most cases 9631daabdaSTejun Heo * the first reset w/ 10sec timeout should succeed. Following entries 9731daabdaSTejun Heo * are mostly for error handling, hotplug and retarded devices. 9831daabdaSTejun Heo */ 9931daabdaSTejun Heo static const unsigned long ata_eh_reset_timeouts[] = { 100341c2c95STejun Heo 10000, /* most drives spin up by 10sec */ 101341c2c95STejun Heo 10000, /* > 99% working drives spin up before 20sec */ 102341c2c95STejun Heo 35000, /* give > 30 secs of idleness for retarded devices */ 103341c2c95STejun Heo 5000, /* and sweet one last chance */ 104d8af0eb6STejun Heo ULONG_MAX, /* > 1 min has elapsed, give up */ 10531daabdaSTejun Heo }; 10631daabdaSTejun Heo 10787fbc5a0STejun Heo static const unsigned long ata_eh_identify_timeouts[] = { 10887fbc5a0STejun Heo 5000, /* covers > 99% of successes and not too boring on failures */ 10987fbc5a0STejun Heo 10000, /* combined time till here is enough even for media access */ 11087fbc5a0STejun Heo 30000, /* for true idiots */ 11187fbc5a0STejun Heo ULONG_MAX, 11287fbc5a0STejun Heo }; 11387fbc5a0STejun Heo 1146013efd8STejun Heo static const unsigned long ata_eh_flush_timeouts[] = { 1156013efd8STejun Heo 15000, /* be generous with flush */ 1166013efd8STejun Heo 15000, /* ditto */ 1176013efd8STejun Heo 30000, /* and even more generous */ 1186013efd8STejun Heo ULONG_MAX, 1196013efd8STejun Heo }; 1206013efd8STejun Heo 12187fbc5a0STejun Heo static const unsigned long ata_eh_other_timeouts[] = { 12287fbc5a0STejun Heo 5000, /* same rationale as identify timeout */ 12387fbc5a0STejun Heo 10000, /* ditto */ 12487fbc5a0STejun Heo /* but no merciful 30sec for other commands, it just isn't worth it */ 12587fbc5a0STejun Heo ULONG_MAX, 12687fbc5a0STejun Heo }; 12787fbc5a0STejun Heo 12887fbc5a0STejun Heo struct ata_eh_cmd_timeout_ent { 12987fbc5a0STejun Heo const u8 *commands; 13087fbc5a0STejun Heo const unsigned long *timeouts; 13187fbc5a0STejun Heo }; 13287fbc5a0STejun Heo 13387fbc5a0STejun Heo /* The following table determines timeouts to use for EH internal 13487fbc5a0STejun Heo * commands. Each table entry is a command class and matches the 13587fbc5a0STejun Heo * commands the entry applies to and the timeout table to use. 13687fbc5a0STejun Heo * 13787fbc5a0STejun Heo * On the retry after a command timed out, the next timeout value from 13887fbc5a0STejun Heo * the table is used. If the table doesn't contain further entries, 13987fbc5a0STejun Heo * the last value is used. 14087fbc5a0STejun Heo * 14187fbc5a0STejun Heo * ehc->cmd_timeout_idx keeps track of which timeout to use per 14287fbc5a0STejun Heo * command class, so if SET_FEATURES times out on the first try, the 14387fbc5a0STejun Heo * next try will use the second timeout value only for that class. 14487fbc5a0STejun Heo */ 14587fbc5a0STejun Heo #define CMDS(cmds...) (const u8 []){ cmds, 0 } 14687fbc5a0STejun Heo static const struct ata_eh_cmd_timeout_ent 14787fbc5a0STejun Heo ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = { 14887fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI), 14987fbc5a0STejun Heo .timeouts = ata_eh_identify_timeouts, }, 15087fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT), 15187fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 15287fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT), 15387fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 15487fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_SET_FEATURES), 15587fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 15687fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS), 15787fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 1586013efd8STejun Heo { .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT), 1596013efd8STejun Heo .timeouts = ata_eh_flush_timeouts }, 16087fbc5a0STejun Heo }; 16187fbc5a0STejun Heo #undef CMDS 16287fbc5a0STejun Heo 163c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap); 1646ffa01d8STejun Heo #ifdef CONFIG_PM 165c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap); 166c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap); 1676ffa01d8STejun Heo #else /* CONFIG_PM */ 1686ffa01d8STejun Heo static void ata_eh_handle_port_suspend(struct ata_port *ap) 1696ffa01d8STejun Heo { } 1706ffa01d8STejun Heo 1716ffa01d8STejun Heo static void ata_eh_handle_port_resume(struct ata_port *ap) 1726ffa01d8STejun Heo { } 1736ffa01d8STejun Heo #endif /* CONFIG_PM */ 174c6fd2807SJeff Garzik 175b64bbc39STejun Heo static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt, 176b64bbc39STejun Heo va_list args) 177b64bbc39STejun Heo { 178b64bbc39STejun Heo ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len, 179b64bbc39STejun Heo ATA_EH_DESC_LEN - ehi->desc_len, 180b64bbc39STejun Heo fmt, args); 181b64bbc39STejun Heo } 182b64bbc39STejun Heo 183b64bbc39STejun Heo /** 184b64bbc39STejun Heo * __ata_ehi_push_desc - push error description without adding separator 185b64bbc39STejun Heo * @ehi: target EHI 186b64bbc39STejun Heo * @fmt: printf format string 187b64bbc39STejun Heo * 188b64bbc39STejun Heo * Format string according to @fmt and append it to @ehi->desc. 189b64bbc39STejun Heo * 190b64bbc39STejun Heo * LOCKING: 191b64bbc39STejun Heo * spin_lock_irqsave(host lock) 192b64bbc39STejun Heo */ 193b64bbc39STejun Heo void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) 194b64bbc39STejun Heo { 195b64bbc39STejun Heo va_list args; 196b64bbc39STejun Heo 197b64bbc39STejun Heo va_start(args, fmt); 198b64bbc39STejun Heo __ata_ehi_pushv_desc(ehi, fmt, args); 199b64bbc39STejun Heo va_end(args); 200b64bbc39STejun Heo } 201b64bbc39STejun Heo 202b64bbc39STejun Heo /** 203b64bbc39STejun Heo * ata_ehi_push_desc - push error description with separator 204b64bbc39STejun Heo * @ehi: target EHI 205b64bbc39STejun Heo * @fmt: printf format string 206b64bbc39STejun Heo * 207b64bbc39STejun Heo * Format string according to @fmt and append it to @ehi->desc. 208b64bbc39STejun Heo * If @ehi->desc is not empty, ", " is added in-between. 209b64bbc39STejun Heo * 210b64bbc39STejun Heo * LOCKING: 211b64bbc39STejun Heo * spin_lock_irqsave(host lock) 212b64bbc39STejun Heo */ 213b64bbc39STejun Heo void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) 214b64bbc39STejun Heo { 215b64bbc39STejun Heo va_list args; 216b64bbc39STejun Heo 217b64bbc39STejun Heo if (ehi->desc_len) 218b64bbc39STejun Heo __ata_ehi_push_desc(ehi, ", "); 219b64bbc39STejun Heo 220b64bbc39STejun Heo va_start(args, fmt); 221b64bbc39STejun Heo __ata_ehi_pushv_desc(ehi, fmt, args); 222b64bbc39STejun Heo va_end(args); 223b64bbc39STejun Heo } 224b64bbc39STejun Heo 225b64bbc39STejun Heo /** 226b64bbc39STejun Heo * ata_ehi_clear_desc - clean error description 227b64bbc39STejun Heo * @ehi: target EHI 228b64bbc39STejun Heo * 229b64bbc39STejun Heo * Clear @ehi->desc. 230b64bbc39STejun Heo * 231b64bbc39STejun Heo * LOCKING: 232b64bbc39STejun Heo * spin_lock_irqsave(host lock) 233b64bbc39STejun Heo */ 234b64bbc39STejun Heo void ata_ehi_clear_desc(struct ata_eh_info *ehi) 235b64bbc39STejun Heo { 236b64bbc39STejun Heo ehi->desc[0] = '\0'; 237b64bbc39STejun Heo ehi->desc_len = 0; 238b64bbc39STejun Heo } 239b64bbc39STejun Heo 240cbcdd875STejun Heo /** 241cbcdd875STejun Heo * ata_port_desc - append port description 242cbcdd875STejun Heo * @ap: target ATA port 243cbcdd875STejun Heo * @fmt: printf format string 244cbcdd875STejun Heo * 245cbcdd875STejun Heo * Format string according to @fmt and append it to port 246cbcdd875STejun Heo * description. If port description is not empty, " " is added 247cbcdd875STejun Heo * in-between. This function is to be used while initializing 248cbcdd875STejun Heo * ata_host. The description is printed on host registration. 249cbcdd875STejun Heo * 250cbcdd875STejun Heo * LOCKING: 251cbcdd875STejun Heo * None. 252cbcdd875STejun Heo */ 253cbcdd875STejun Heo void ata_port_desc(struct ata_port *ap, const char *fmt, ...) 254cbcdd875STejun Heo { 255cbcdd875STejun Heo va_list args; 256cbcdd875STejun Heo 257cbcdd875STejun Heo WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING)); 258cbcdd875STejun Heo 259cbcdd875STejun Heo if (ap->link.eh_info.desc_len) 260cbcdd875STejun Heo __ata_ehi_push_desc(&ap->link.eh_info, " "); 261cbcdd875STejun Heo 262cbcdd875STejun Heo va_start(args, fmt); 263cbcdd875STejun Heo __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args); 264cbcdd875STejun Heo va_end(args); 265cbcdd875STejun Heo } 266cbcdd875STejun Heo 267cbcdd875STejun Heo #ifdef CONFIG_PCI 268cbcdd875STejun Heo 269cbcdd875STejun Heo /** 270cbcdd875STejun Heo * ata_port_pbar_desc - append PCI BAR description 271cbcdd875STejun Heo * @ap: target ATA port 272cbcdd875STejun Heo * @bar: target PCI BAR 273cbcdd875STejun Heo * @offset: offset into PCI BAR 274cbcdd875STejun Heo * @name: name of the area 275cbcdd875STejun Heo * 276cbcdd875STejun Heo * If @offset is negative, this function formats a string which 277cbcdd875STejun Heo * contains the name, address, size and type of the BAR and 278cbcdd875STejun Heo * appends it to the port description. If @offset is zero or 279cbcdd875STejun Heo * positive, only name and offsetted address is appended. 280cbcdd875STejun Heo * 281cbcdd875STejun Heo * LOCKING: 282cbcdd875STejun Heo * None. 283cbcdd875STejun Heo */ 284cbcdd875STejun Heo void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset, 285cbcdd875STejun Heo const char *name) 286cbcdd875STejun Heo { 287cbcdd875STejun Heo struct pci_dev *pdev = to_pci_dev(ap->host->dev); 288cbcdd875STejun Heo char *type = ""; 289cbcdd875STejun Heo unsigned long long start, len; 290cbcdd875STejun Heo 291cbcdd875STejun Heo if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) 292cbcdd875STejun Heo type = "m"; 293cbcdd875STejun Heo else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) 294cbcdd875STejun Heo type = "i"; 295cbcdd875STejun Heo 296cbcdd875STejun Heo start = (unsigned long long)pci_resource_start(pdev, bar); 297cbcdd875STejun Heo len = (unsigned long long)pci_resource_len(pdev, bar); 298cbcdd875STejun Heo 299cbcdd875STejun Heo if (offset < 0) 300cbcdd875STejun Heo ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start); 301cbcdd875STejun Heo else 302e6a73ab1SAndrew Morton ata_port_desc(ap, "%s 0x%llx", name, 303e6a73ab1SAndrew Morton start + (unsigned long long)offset); 304cbcdd875STejun Heo } 305cbcdd875STejun Heo 306cbcdd875STejun Heo #endif /* CONFIG_PCI */ 307cbcdd875STejun Heo 30887fbc5a0STejun Heo static int ata_lookup_timeout_table(u8 cmd) 30987fbc5a0STejun Heo { 31087fbc5a0STejun Heo int i; 31187fbc5a0STejun Heo 31287fbc5a0STejun Heo for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) { 31387fbc5a0STejun Heo const u8 *cur; 31487fbc5a0STejun Heo 31587fbc5a0STejun Heo for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++) 31687fbc5a0STejun Heo if (*cur == cmd) 31787fbc5a0STejun Heo return i; 31887fbc5a0STejun Heo } 31987fbc5a0STejun Heo 32087fbc5a0STejun Heo return -1; 32187fbc5a0STejun Heo } 32287fbc5a0STejun Heo 32387fbc5a0STejun Heo /** 32487fbc5a0STejun Heo * ata_internal_cmd_timeout - determine timeout for an internal command 32587fbc5a0STejun Heo * @dev: target device 32687fbc5a0STejun Heo * @cmd: internal command to be issued 32787fbc5a0STejun Heo * 32887fbc5a0STejun Heo * Determine timeout for internal command @cmd for @dev. 32987fbc5a0STejun Heo * 33087fbc5a0STejun Heo * LOCKING: 33187fbc5a0STejun Heo * EH context. 33287fbc5a0STejun Heo * 33387fbc5a0STejun Heo * RETURNS: 33487fbc5a0STejun Heo * Determined timeout. 33587fbc5a0STejun Heo */ 33687fbc5a0STejun Heo unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd) 33787fbc5a0STejun Heo { 33887fbc5a0STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 33987fbc5a0STejun Heo int ent = ata_lookup_timeout_table(cmd); 34087fbc5a0STejun Heo int idx; 34187fbc5a0STejun Heo 34287fbc5a0STejun Heo if (ent < 0) 34387fbc5a0STejun Heo return ATA_EH_CMD_DFL_TIMEOUT; 34487fbc5a0STejun Heo 34587fbc5a0STejun Heo idx = ehc->cmd_timeout_idx[dev->devno][ent]; 34687fbc5a0STejun Heo return ata_eh_cmd_timeout_table[ent].timeouts[idx]; 34787fbc5a0STejun Heo } 34887fbc5a0STejun Heo 34987fbc5a0STejun Heo /** 35087fbc5a0STejun Heo * ata_internal_cmd_timed_out - notification for internal command timeout 35187fbc5a0STejun Heo * @dev: target device 35287fbc5a0STejun Heo * @cmd: internal command which timed out 35387fbc5a0STejun Heo * 35487fbc5a0STejun Heo * Notify EH that internal command @cmd for @dev timed out. This 35587fbc5a0STejun Heo * function should be called only for commands whose timeouts are 35687fbc5a0STejun Heo * determined using ata_internal_cmd_timeout(). 35787fbc5a0STejun Heo * 35887fbc5a0STejun Heo * LOCKING: 35987fbc5a0STejun Heo * EH context. 36087fbc5a0STejun Heo */ 36187fbc5a0STejun Heo void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd) 36287fbc5a0STejun Heo { 36387fbc5a0STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 36487fbc5a0STejun Heo int ent = ata_lookup_timeout_table(cmd); 36587fbc5a0STejun Heo int idx; 36687fbc5a0STejun Heo 36787fbc5a0STejun Heo if (ent < 0) 36887fbc5a0STejun Heo return; 36987fbc5a0STejun Heo 37087fbc5a0STejun Heo idx = ehc->cmd_timeout_idx[dev->devno][ent]; 37187fbc5a0STejun Heo if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX) 37287fbc5a0STejun Heo ehc->cmd_timeout_idx[dev->devno][ent]++; 37387fbc5a0STejun Heo } 37487fbc5a0STejun Heo 3753884f7b0STejun Heo static void ata_ering_record(struct ata_ering *ering, unsigned int eflags, 376c6fd2807SJeff Garzik unsigned int err_mask) 377c6fd2807SJeff Garzik { 378c6fd2807SJeff Garzik struct ata_ering_entry *ent; 379c6fd2807SJeff Garzik 380c6fd2807SJeff Garzik WARN_ON(!err_mask); 381c6fd2807SJeff Garzik 382c6fd2807SJeff Garzik ering->cursor++; 383c6fd2807SJeff Garzik ering->cursor %= ATA_ERING_SIZE; 384c6fd2807SJeff Garzik 385c6fd2807SJeff Garzik ent = &ering->ring[ering->cursor]; 3863884f7b0STejun Heo ent->eflags = eflags; 387c6fd2807SJeff Garzik ent->err_mask = err_mask; 388c6fd2807SJeff Garzik ent->timestamp = get_jiffies_64(); 389c6fd2807SJeff Garzik } 390c6fd2807SJeff Garzik 39176326ac1STejun Heo static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering) 39276326ac1STejun Heo { 39376326ac1STejun Heo struct ata_ering_entry *ent = &ering->ring[ering->cursor]; 39476326ac1STejun Heo 39576326ac1STejun Heo if (ent->err_mask) 39676326ac1STejun Heo return ent; 39776326ac1STejun Heo return NULL; 39876326ac1STejun Heo } 39976326ac1STejun Heo 400d9027470SGwendal Grignou int ata_ering_map(struct ata_ering *ering, 401c6fd2807SJeff Garzik int (*map_fn)(struct ata_ering_entry *, void *), 402c6fd2807SJeff Garzik void *arg) 403c6fd2807SJeff Garzik { 404c6fd2807SJeff Garzik int idx, rc = 0; 405c6fd2807SJeff Garzik struct ata_ering_entry *ent; 406c6fd2807SJeff Garzik 407c6fd2807SJeff Garzik idx = ering->cursor; 408c6fd2807SJeff Garzik do { 409c6fd2807SJeff Garzik ent = &ering->ring[idx]; 410c6fd2807SJeff Garzik if (!ent->err_mask) 411c6fd2807SJeff Garzik break; 412c6fd2807SJeff Garzik rc = map_fn(ent, arg); 413c6fd2807SJeff Garzik if (rc) 414c6fd2807SJeff Garzik break; 415c6fd2807SJeff Garzik idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE; 416c6fd2807SJeff Garzik } while (idx != ering->cursor); 417c6fd2807SJeff Garzik 418c6fd2807SJeff Garzik return rc; 419c6fd2807SJeff Garzik } 420c6fd2807SJeff Garzik 421d9027470SGwendal Grignou int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg) 422d9027470SGwendal Grignou { 423d9027470SGwendal Grignou ent->eflags |= ATA_EFLAG_OLD_ER; 424d9027470SGwendal Grignou return 0; 425d9027470SGwendal Grignou } 426d9027470SGwendal Grignou 427d9027470SGwendal Grignou static void ata_ering_clear(struct ata_ering *ering) 428d9027470SGwendal Grignou { 429d9027470SGwendal Grignou ata_ering_map(ering, ata_ering_clear_cb, NULL); 430d9027470SGwendal Grignou } 431d9027470SGwendal Grignou 432c6fd2807SJeff Garzik static unsigned int ata_eh_dev_action(struct ata_device *dev) 433c6fd2807SJeff Garzik { 4349af5c9c9STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 435c6fd2807SJeff Garzik 436c6fd2807SJeff Garzik return ehc->i.action | ehc->i.dev_action[dev->devno]; 437c6fd2807SJeff Garzik } 438c6fd2807SJeff Garzik 439f58229f8STejun Heo static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev, 440c6fd2807SJeff Garzik struct ata_eh_info *ehi, unsigned int action) 441c6fd2807SJeff Garzik { 442f58229f8STejun Heo struct ata_device *tdev; 443c6fd2807SJeff Garzik 444c6fd2807SJeff Garzik if (!dev) { 445c6fd2807SJeff Garzik ehi->action &= ~action; 4461eca4365STejun Heo ata_for_each_dev(tdev, link, ALL) 447f58229f8STejun Heo ehi->dev_action[tdev->devno] &= ~action; 448c6fd2807SJeff Garzik } else { 449c6fd2807SJeff Garzik /* doesn't make sense for port-wide EH actions */ 450c6fd2807SJeff Garzik WARN_ON(!(action & ATA_EH_PERDEV_MASK)); 451c6fd2807SJeff Garzik 452c6fd2807SJeff Garzik /* break ehi->action into ehi->dev_action */ 453c6fd2807SJeff Garzik if (ehi->action & action) { 4541eca4365STejun Heo ata_for_each_dev(tdev, link, ALL) 455f58229f8STejun Heo ehi->dev_action[tdev->devno] |= 456f58229f8STejun Heo ehi->action & action; 457c6fd2807SJeff Garzik ehi->action &= ~action; 458c6fd2807SJeff Garzik } 459c6fd2807SJeff Garzik 460c6fd2807SJeff Garzik /* turn off the specified per-dev action */ 461c6fd2807SJeff Garzik ehi->dev_action[dev->devno] &= ~action; 462c6fd2807SJeff Garzik } 463c6fd2807SJeff Garzik } 464c6fd2807SJeff Garzik 465c6fd2807SJeff Garzik /** 466c6fd2807SJeff Garzik * ata_scsi_timed_out - SCSI layer time out callback 467c6fd2807SJeff Garzik * @cmd: timed out SCSI command 468c6fd2807SJeff Garzik * 469c6fd2807SJeff Garzik * Handles SCSI layer timeout. We race with normal completion of 470c6fd2807SJeff Garzik * the qc for @cmd. If the qc is already gone, we lose and let 471c6fd2807SJeff Garzik * the scsi command finish (EH_HANDLED). Otherwise, the qc has 472c6fd2807SJeff Garzik * timed out and EH should be invoked. Prevent ata_qc_complete() 473c6fd2807SJeff Garzik * from finishing it by setting EH_SCHEDULED and return 474c6fd2807SJeff Garzik * EH_NOT_HANDLED. 475c6fd2807SJeff Garzik * 476c6fd2807SJeff Garzik * TODO: kill this function once old EH is gone. 477c6fd2807SJeff Garzik * 478c6fd2807SJeff Garzik * LOCKING: 479c6fd2807SJeff Garzik * Called from timer context 480c6fd2807SJeff Garzik * 481c6fd2807SJeff Garzik * RETURNS: 482c6fd2807SJeff Garzik * EH_HANDLED or EH_NOT_HANDLED 483c6fd2807SJeff Garzik */ 484242f9dcbSJens Axboe enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) 485c6fd2807SJeff Garzik { 486c6fd2807SJeff Garzik struct Scsi_Host *host = cmd->device->host; 487c6fd2807SJeff Garzik struct ata_port *ap = ata_shost_to_port(host); 488c6fd2807SJeff Garzik unsigned long flags; 489c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 490242f9dcbSJens Axboe enum blk_eh_timer_return ret; 491c6fd2807SJeff Garzik 492c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 493c6fd2807SJeff Garzik 494c6fd2807SJeff Garzik if (ap->ops->error_handler) { 495242f9dcbSJens Axboe ret = BLK_EH_NOT_HANDLED; 496c6fd2807SJeff Garzik goto out; 497c6fd2807SJeff Garzik } 498c6fd2807SJeff Garzik 499242f9dcbSJens Axboe ret = BLK_EH_HANDLED; 500c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 5019af5c9c9STejun Heo qc = ata_qc_from_tag(ap, ap->link.active_tag); 502c6fd2807SJeff Garzik if (qc) { 503c6fd2807SJeff Garzik WARN_ON(qc->scsicmd != cmd); 504c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_EH_SCHEDULED; 505c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_TIMEOUT; 506242f9dcbSJens Axboe ret = BLK_EH_NOT_HANDLED; 507c6fd2807SJeff Garzik } 508c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 509c6fd2807SJeff Garzik 510c6fd2807SJeff Garzik out: 511c6fd2807SJeff Garzik DPRINTK("EXIT, ret=%d\n", ret); 512c6fd2807SJeff Garzik return ret; 513c6fd2807SJeff Garzik } 514c6fd2807SJeff Garzik 515ece180d1STejun Heo static void ata_eh_unload(struct ata_port *ap) 516ece180d1STejun Heo { 517ece180d1STejun Heo struct ata_link *link; 518ece180d1STejun Heo struct ata_device *dev; 519ece180d1STejun Heo unsigned long flags; 520ece180d1STejun Heo 521ece180d1STejun Heo /* Restore SControl IPM and SPD for the next driver and 522ece180d1STejun Heo * disable attached devices. 523ece180d1STejun Heo */ 524ece180d1STejun Heo ata_for_each_link(link, ap, PMP_FIRST) { 525ece180d1STejun Heo sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0); 526ece180d1STejun Heo ata_for_each_dev(dev, link, ALL) 527ece180d1STejun Heo ata_dev_disable(dev); 528ece180d1STejun Heo } 529ece180d1STejun Heo 530ece180d1STejun Heo /* freeze and set UNLOADED */ 531ece180d1STejun Heo spin_lock_irqsave(ap->lock, flags); 532ece180d1STejun Heo 533ece180d1STejun Heo ata_port_freeze(ap); /* won't be thawed */ 534ece180d1STejun Heo ap->pflags &= ~ATA_PFLAG_EH_PENDING; /* clear pending from freeze */ 535ece180d1STejun Heo ap->pflags |= ATA_PFLAG_UNLOADED; 536ece180d1STejun Heo 537ece180d1STejun Heo spin_unlock_irqrestore(ap->lock, flags); 538ece180d1STejun Heo } 539ece180d1STejun Heo 540c6fd2807SJeff Garzik /** 541c6fd2807SJeff Garzik * ata_scsi_error - SCSI layer error handler callback 542c6fd2807SJeff Garzik * @host: SCSI host on which error occurred 543c6fd2807SJeff Garzik * 544c6fd2807SJeff Garzik * Handles SCSI-layer-thrown error events. 545c6fd2807SJeff Garzik * 546c6fd2807SJeff Garzik * LOCKING: 547c6fd2807SJeff Garzik * Inherited from SCSI layer (none, can sleep) 548c6fd2807SJeff Garzik * 549c6fd2807SJeff Garzik * RETURNS: 550c6fd2807SJeff Garzik * Zero. 551c6fd2807SJeff Garzik */ 552c6fd2807SJeff Garzik void ata_scsi_error(struct Scsi_Host *host) 553c6fd2807SJeff Garzik { 554c6fd2807SJeff Garzik struct ata_port *ap = ata_shost_to_port(host); 555a1e10f7eSTejun Heo int i; 556c6fd2807SJeff Garzik unsigned long flags; 557c6fd2807SJeff Garzik 558c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 559c6fd2807SJeff Garzik 560c429137aSTejun Heo /* make sure sff pio task is not running */ 561c429137aSTejun Heo ata_sff_flush_pio_task(ap); 562c6fd2807SJeff Garzik 563cca3974eSJeff Garzik /* synchronize with host lock and sort out timeouts */ 564c6fd2807SJeff Garzik 565c6fd2807SJeff Garzik /* For new EH, all qcs are finished in one of three ways - 566c6fd2807SJeff Garzik * normal completion, error completion, and SCSI timeout. 567c96f1732SAlan Cox * Both completions can race against SCSI timeout. When normal 568c6fd2807SJeff Garzik * completion wins, the qc never reaches EH. When error 569c6fd2807SJeff Garzik * completion wins, the qc has ATA_QCFLAG_FAILED set. 570c6fd2807SJeff Garzik * 571c6fd2807SJeff Garzik * When SCSI timeout wins, things are a bit more complex. 572c6fd2807SJeff Garzik * Normal or error completion can occur after the timeout but 573c6fd2807SJeff Garzik * before this point. In such cases, both types of 574c6fd2807SJeff Garzik * completions are honored. A scmd is determined to have 575c6fd2807SJeff Garzik * timed out iff its associated qc is active and not failed. 576c6fd2807SJeff Garzik */ 577c6fd2807SJeff Garzik if (ap->ops->error_handler) { 578c6fd2807SJeff Garzik struct scsi_cmnd *scmd, *tmp; 579c6fd2807SJeff Garzik int nr_timedout = 0; 580c6fd2807SJeff Garzik 581c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 582c6fd2807SJeff Garzik 583c96f1732SAlan Cox /* This must occur under the ap->lock as we don't want 584c96f1732SAlan Cox a polled recovery to race the real interrupt handler 585c96f1732SAlan Cox 586c96f1732SAlan Cox The lost_interrupt handler checks for any completed but 587c96f1732SAlan Cox non-notified command and completes much like an IRQ handler. 588c96f1732SAlan Cox 589c96f1732SAlan Cox We then fall into the error recovery code which will treat 590c96f1732SAlan Cox this as if normal completion won the race */ 591c96f1732SAlan Cox 592c96f1732SAlan Cox if (ap->ops->lost_interrupt) 593c96f1732SAlan Cox ap->ops->lost_interrupt(ap); 594c96f1732SAlan Cox 595c6fd2807SJeff Garzik list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) { 596c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 597c6fd2807SJeff Garzik 598c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_QUEUE; i++) { 599c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, i); 600c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_ACTIVE && 601c6fd2807SJeff Garzik qc->scsicmd == scmd) 602c6fd2807SJeff Garzik break; 603c6fd2807SJeff Garzik } 604c6fd2807SJeff Garzik 605c6fd2807SJeff Garzik if (i < ATA_MAX_QUEUE) { 606c6fd2807SJeff Garzik /* the scmd has an associated qc */ 607c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) { 608c6fd2807SJeff Garzik /* which hasn't failed yet, timeout */ 609c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_TIMEOUT; 610c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 611c6fd2807SJeff Garzik nr_timedout++; 612c6fd2807SJeff Garzik } 613c6fd2807SJeff Garzik } else { 614c6fd2807SJeff Garzik /* Normal completion occurred after 615c6fd2807SJeff Garzik * SCSI timeout but before this point. 616c6fd2807SJeff Garzik * Successfully complete it. 617c6fd2807SJeff Garzik */ 618c6fd2807SJeff Garzik scmd->retries = scmd->allowed; 619c6fd2807SJeff Garzik scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 620c6fd2807SJeff Garzik } 621c6fd2807SJeff Garzik } 622c6fd2807SJeff Garzik 623c6fd2807SJeff Garzik /* If we have timed out qcs. They belong to EH from 624c6fd2807SJeff Garzik * this point but the state of the controller is 625c6fd2807SJeff Garzik * unknown. Freeze the port to make sure the IRQ 626c6fd2807SJeff Garzik * handler doesn't diddle with those qcs. This must 627c6fd2807SJeff Garzik * be done atomically w.r.t. setting QCFLAG_FAILED. 628c6fd2807SJeff Garzik */ 629c6fd2807SJeff Garzik if (nr_timedout) 630c6fd2807SJeff Garzik __ata_port_freeze(ap); 631c6fd2807SJeff Garzik 632c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 633a1e10f7eSTejun Heo 634a1e10f7eSTejun Heo /* initialize eh_tries */ 635a1e10f7eSTejun Heo ap->eh_tries = ATA_EH_MAX_TRIES; 636c6fd2807SJeff Garzik } else 637c6fd2807SJeff Garzik spin_unlock_wait(ap->lock); 638c6fd2807SJeff Garzik 639c96f1732SAlan Cox /* If we timed raced normal completion and there is nothing to 640c96f1732SAlan Cox recover nr_timedout == 0 why exactly are we doing error recovery ? */ 641c96f1732SAlan Cox 642c6fd2807SJeff Garzik repeat: 643c6fd2807SJeff Garzik /* invoke error handler */ 644c6fd2807SJeff Garzik if (ap->ops->error_handler) { 645cf1b86c8STejun Heo struct ata_link *link; 646cf1b86c8STejun Heo 6475ddf24c5STejun Heo /* kill fast drain timer */ 6485ddf24c5STejun Heo del_timer_sync(&ap->fastdrain_timer); 6495ddf24c5STejun Heo 650c6fd2807SJeff Garzik /* process port resume request */ 651c6fd2807SJeff Garzik ata_eh_handle_port_resume(ap); 652c6fd2807SJeff Garzik 653c6fd2807SJeff Garzik /* fetch & clear EH info */ 654c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 655c6fd2807SJeff Garzik 6561eca4365STejun Heo ata_for_each_link(link, ap, HOST_FIRST) { 65700115e0fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 65800115e0fSTejun Heo struct ata_device *dev; 65900115e0fSTejun Heo 660cf1b86c8STejun Heo memset(&link->eh_context, 0, sizeof(link->eh_context)); 661cf1b86c8STejun Heo link->eh_context.i = link->eh_info; 662cf1b86c8STejun Heo memset(&link->eh_info, 0, sizeof(link->eh_info)); 66300115e0fSTejun Heo 6641eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) { 66500115e0fSTejun Heo int devno = dev->devno; 66600115e0fSTejun Heo 66700115e0fSTejun Heo ehc->saved_xfer_mode[devno] = dev->xfer_mode; 66800115e0fSTejun Heo if (ata_ncq_enabled(dev)) 66900115e0fSTejun Heo ehc->saved_ncq_enabled |= 1 << devno; 67000115e0fSTejun Heo } 671cf1b86c8STejun Heo } 672c6fd2807SJeff Garzik 673c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS; 674c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_EH_PENDING; 675da917d69STejun Heo ap->excl_link = NULL; /* don't maintain exclusion over EH */ 676c6fd2807SJeff Garzik 677c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 678c6fd2807SJeff Garzik 679c6fd2807SJeff Garzik /* invoke EH, skip if unloading or suspended */ 680c6fd2807SJeff Garzik if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED))) 681c6fd2807SJeff Garzik ap->ops->error_handler(ap); 682ece180d1STejun Heo else { 683ece180d1STejun Heo /* if unloading, commence suicide */ 684ece180d1STejun Heo if ((ap->pflags & ATA_PFLAG_UNLOADING) && 685ece180d1STejun Heo !(ap->pflags & ATA_PFLAG_UNLOADED)) 686ece180d1STejun Heo ata_eh_unload(ap); 687c6fd2807SJeff Garzik ata_eh_finish(ap); 688ece180d1STejun Heo } 689c6fd2807SJeff Garzik 690c6fd2807SJeff Garzik /* process port suspend request */ 691c6fd2807SJeff Garzik ata_eh_handle_port_suspend(ap); 692c6fd2807SJeff Garzik 693c6fd2807SJeff Garzik /* Exception might have happend after ->error_handler 694c6fd2807SJeff Garzik * recovered the port but before this point. Repeat 695c6fd2807SJeff Garzik * EH in such case. 696c6fd2807SJeff Garzik */ 697c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 698c6fd2807SJeff Garzik 699c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_EH_PENDING) { 700a1e10f7eSTejun Heo if (--ap->eh_tries) { 701c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 702c6fd2807SJeff Garzik goto repeat; 703c6fd2807SJeff Garzik } 704c6fd2807SJeff Garzik ata_port_printk(ap, KERN_ERR, "EH pending after %d " 705a1e10f7eSTejun Heo "tries, giving up\n", ATA_EH_MAX_TRIES); 706914616a3STejun Heo ap->pflags &= ~ATA_PFLAG_EH_PENDING; 707c6fd2807SJeff Garzik } 708c6fd2807SJeff Garzik 709c6fd2807SJeff Garzik /* this run is complete, make sure EH info is clear */ 7101eca4365STejun Heo ata_for_each_link(link, ap, HOST_FIRST) 711cf1b86c8STejun Heo memset(&link->eh_info, 0, sizeof(link->eh_info)); 712c6fd2807SJeff Garzik 713c6fd2807SJeff Garzik /* Clear host_eh_scheduled while holding ap->lock such 714c6fd2807SJeff Garzik * that if exception occurs after this point but 715c6fd2807SJeff Garzik * before EH completion, SCSI midlayer will 716c6fd2807SJeff Garzik * re-initiate EH. 717c6fd2807SJeff Garzik */ 718c6fd2807SJeff Garzik host->host_eh_scheduled = 0; 719c6fd2807SJeff Garzik 720c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 721c6fd2807SJeff Garzik } else { 7229af5c9c9STejun Heo WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL); 723c6fd2807SJeff Garzik ap->ops->eng_timeout(ap); 724c6fd2807SJeff Garzik } 725c6fd2807SJeff Garzik 726c6fd2807SJeff Garzik /* finish or retry handled scmd's and clean up */ 727c6fd2807SJeff Garzik WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q)); 728c6fd2807SJeff Garzik 729c6fd2807SJeff Garzik scsi_eh_flush_done_q(&ap->eh_done_q); 730c6fd2807SJeff Garzik 731c6fd2807SJeff Garzik /* clean up */ 732c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 733c6fd2807SJeff Garzik 734c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_LOADING) 735c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_LOADING; 736c6fd2807SJeff Garzik else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) 737ad72cf98STejun Heo schedule_delayed_work(&ap->hotplug_task, 0); 738c6fd2807SJeff Garzik 739c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_RECOVERED) 740c6fd2807SJeff Garzik ata_port_printk(ap, KERN_INFO, "EH complete\n"); 741c6fd2807SJeff Garzik 742c6fd2807SJeff Garzik ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED); 743c6fd2807SJeff Garzik 744c6fd2807SJeff Garzik /* tell wait_eh that we're done */ 745c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS; 746c6fd2807SJeff Garzik wake_up_all(&ap->eh_wait_q); 747c6fd2807SJeff Garzik 748c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 749c6fd2807SJeff Garzik 750c6fd2807SJeff Garzik DPRINTK("EXIT\n"); 751c6fd2807SJeff Garzik } 752c6fd2807SJeff Garzik 753c6fd2807SJeff Garzik /** 754c6fd2807SJeff Garzik * ata_port_wait_eh - Wait for the currently pending EH to complete 755c6fd2807SJeff Garzik * @ap: Port to wait EH for 756c6fd2807SJeff Garzik * 757c6fd2807SJeff Garzik * Wait until the currently pending EH is complete. 758c6fd2807SJeff Garzik * 759c6fd2807SJeff Garzik * LOCKING: 760c6fd2807SJeff Garzik * Kernel thread context (may sleep). 761c6fd2807SJeff Garzik */ 762c6fd2807SJeff Garzik void ata_port_wait_eh(struct ata_port *ap) 763c6fd2807SJeff Garzik { 764c6fd2807SJeff Garzik unsigned long flags; 765c6fd2807SJeff Garzik DEFINE_WAIT(wait); 766c6fd2807SJeff Garzik 767c6fd2807SJeff Garzik retry: 768c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 769c6fd2807SJeff Garzik 770c6fd2807SJeff Garzik while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) { 771c6fd2807SJeff Garzik prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE); 772c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 773c6fd2807SJeff Garzik schedule(); 774c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 775c6fd2807SJeff Garzik } 776c6fd2807SJeff Garzik finish_wait(&ap->eh_wait_q, &wait); 777c6fd2807SJeff Garzik 778c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 779c6fd2807SJeff Garzik 780c6fd2807SJeff Garzik /* make sure SCSI EH is complete */ 781cca3974eSJeff Garzik if (scsi_host_in_recovery(ap->scsi_host)) { 782c6fd2807SJeff Garzik msleep(10); 783c6fd2807SJeff Garzik goto retry; 784c6fd2807SJeff Garzik } 785c6fd2807SJeff Garzik } 786c6fd2807SJeff Garzik 7875ddf24c5STejun Heo static int ata_eh_nr_in_flight(struct ata_port *ap) 7885ddf24c5STejun Heo { 7895ddf24c5STejun Heo unsigned int tag; 7905ddf24c5STejun Heo int nr = 0; 7915ddf24c5STejun Heo 7925ddf24c5STejun Heo /* count only non-internal commands */ 7935ddf24c5STejun Heo for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) 7945ddf24c5STejun Heo if (ata_qc_from_tag(ap, tag)) 7955ddf24c5STejun Heo nr++; 7965ddf24c5STejun Heo 7975ddf24c5STejun Heo return nr; 7985ddf24c5STejun Heo } 7995ddf24c5STejun Heo 8005ddf24c5STejun Heo void ata_eh_fastdrain_timerfn(unsigned long arg) 8015ddf24c5STejun Heo { 8025ddf24c5STejun Heo struct ata_port *ap = (void *)arg; 8035ddf24c5STejun Heo unsigned long flags; 8045ddf24c5STejun Heo int cnt; 8055ddf24c5STejun Heo 8065ddf24c5STejun Heo spin_lock_irqsave(ap->lock, flags); 8075ddf24c5STejun Heo 8085ddf24c5STejun Heo cnt = ata_eh_nr_in_flight(ap); 8095ddf24c5STejun Heo 8105ddf24c5STejun Heo /* are we done? */ 8115ddf24c5STejun Heo if (!cnt) 8125ddf24c5STejun Heo goto out_unlock; 8135ddf24c5STejun Heo 8145ddf24c5STejun Heo if (cnt == ap->fastdrain_cnt) { 8155ddf24c5STejun Heo unsigned int tag; 8165ddf24c5STejun Heo 8175ddf24c5STejun Heo /* No progress during the last interval, tag all 8185ddf24c5STejun Heo * in-flight qcs as timed out and freeze the port. 8195ddf24c5STejun Heo */ 8205ddf24c5STejun Heo for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) { 8215ddf24c5STejun Heo struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); 8225ddf24c5STejun Heo if (qc) 8235ddf24c5STejun Heo qc->err_mask |= AC_ERR_TIMEOUT; 8245ddf24c5STejun Heo } 8255ddf24c5STejun Heo 8265ddf24c5STejun Heo ata_port_freeze(ap); 8275ddf24c5STejun Heo } else { 8285ddf24c5STejun Heo /* some qcs have finished, give it another chance */ 8295ddf24c5STejun Heo ap->fastdrain_cnt = cnt; 8305ddf24c5STejun Heo ap->fastdrain_timer.expires = 831341c2c95STejun Heo ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); 8325ddf24c5STejun Heo add_timer(&ap->fastdrain_timer); 8335ddf24c5STejun Heo } 8345ddf24c5STejun Heo 8355ddf24c5STejun Heo out_unlock: 8365ddf24c5STejun Heo spin_unlock_irqrestore(ap->lock, flags); 8375ddf24c5STejun Heo } 8385ddf24c5STejun Heo 8395ddf24c5STejun Heo /** 8405ddf24c5STejun Heo * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain 8415ddf24c5STejun Heo * @ap: target ATA port 8425ddf24c5STejun Heo * @fastdrain: activate fast drain 8435ddf24c5STejun Heo * 8445ddf24c5STejun Heo * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain 8455ddf24c5STejun Heo * is non-zero and EH wasn't pending before. Fast drain ensures 8465ddf24c5STejun Heo * that EH kicks in in timely manner. 8475ddf24c5STejun Heo * 8485ddf24c5STejun Heo * LOCKING: 8495ddf24c5STejun Heo * spin_lock_irqsave(host lock) 8505ddf24c5STejun Heo */ 8515ddf24c5STejun Heo static void ata_eh_set_pending(struct ata_port *ap, int fastdrain) 8525ddf24c5STejun Heo { 8535ddf24c5STejun Heo int cnt; 8545ddf24c5STejun Heo 8555ddf24c5STejun Heo /* already scheduled? */ 8565ddf24c5STejun Heo if (ap->pflags & ATA_PFLAG_EH_PENDING) 8575ddf24c5STejun Heo return; 8585ddf24c5STejun Heo 8595ddf24c5STejun Heo ap->pflags |= ATA_PFLAG_EH_PENDING; 8605ddf24c5STejun Heo 8615ddf24c5STejun Heo if (!fastdrain) 8625ddf24c5STejun Heo return; 8635ddf24c5STejun Heo 8645ddf24c5STejun Heo /* do we have in-flight qcs? */ 8655ddf24c5STejun Heo cnt = ata_eh_nr_in_flight(ap); 8665ddf24c5STejun Heo if (!cnt) 8675ddf24c5STejun Heo return; 8685ddf24c5STejun Heo 8695ddf24c5STejun Heo /* activate fast drain */ 8705ddf24c5STejun Heo ap->fastdrain_cnt = cnt; 871341c2c95STejun Heo ap->fastdrain_timer.expires = 872341c2c95STejun Heo ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); 8735ddf24c5STejun Heo add_timer(&ap->fastdrain_timer); 8745ddf24c5STejun Heo } 8755ddf24c5STejun Heo 876c6fd2807SJeff Garzik /** 877c6fd2807SJeff Garzik * ata_qc_schedule_eh - schedule qc for error handling 878c6fd2807SJeff Garzik * @qc: command to schedule error handling for 879c6fd2807SJeff Garzik * 880c6fd2807SJeff Garzik * Schedule error handling for @qc. EH will kick in as soon as 881c6fd2807SJeff Garzik * other commands are drained. 882c6fd2807SJeff Garzik * 883c6fd2807SJeff Garzik * LOCKING: 884cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 885c6fd2807SJeff Garzik */ 886c6fd2807SJeff Garzik void ata_qc_schedule_eh(struct ata_queued_cmd *qc) 887c6fd2807SJeff Garzik { 888c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 889fa41efdaSTejun Heo struct request_queue *q = qc->scsicmd->device->request_queue; 890fa41efdaSTejun Heo unsigned long flags; 891c6fd2807SJeff Garzik 892c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 893c6fd2807SJeff Garzik 894c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 8955ddf24c5STejun Heo ata_eh_set_pending(ap, 1); 896c6fd2807SJeff Garzik 897c6fd2807SJeff Garzik /* The following will fail if timeout has already expired. 898c6fd2807SJeff Garzik * ata_scsi_error() takes care of such scmds on EH entry. 899c6fd2807SJeff Garzik * Note that ATA_QCFLAG_FAILED is unconditionally set after 900c6fd2807SJeff Garzik * this function completes. 901c6fd2807SJeff Garzik */ 902fa41efdaSTejun Heo spin_lock_irqsave(q->queue_lock, flags); 903242f9dcbSJens Axboe blk_abort_request(qc->scsicmd->request); 904fa41efdaSTejun Heo spin_unlock_irqrestore(q->queue_lock, flags); 905c6fd2807SJeff Garzik } 906c6fd2807SJeff Garzik 907c6fd2807SJeff Garzik /** 908c6fd2807SJeff Garzik * ata_port_schedule_eh - schedule error handling without a qc 909c6fd2807SJeff Garzik * @ap: ATA port to schedule EH for 910c6fd2807SJeff Garzik * 911c6fd2807SJeff Garzik * Schedule error handling for @ap. EH will kick in as soon as 912c6fd2807SJeff Garzik * all commands are drained. 913c6fd2807SJeff Garzik * 914c6fd2807SJeff Garzik * LOCKING: 915cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 916c6fd2807SJeff Garzik */ 917c6fd2807SJeff Garzik void ata_port_schedule_eh(struct ata_port *ap) 918c6fd2807SJeff Garzik { 919c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 920c6fd2807SJeff Garzik 921f4d6d004STejun Heo if (ap->pflags & ATA_PFLAG_INITIALIZING) 922f4d6d004STejun Heo return; 923f4d6d004STejun Heo 9245ddf24c5STejun Heo ata_eh_set_pending(ap, 1); 925cca3974eSJeff Garzik scsi_schedule_eh(ap->scsi_host); 926c6fd2807SJeff Garzik 927c6fd2807SJeff Garzik DPRINTK("port EH scheduled\n"); 928c6fd2807SJeff Garzik } 929c6fd2807SJeff Garzik 930dbd82616STejun Heo static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link) 931c6fd2807SJeff Garzik { 932c6fd2807SJeff Garzik int tag, nr_aborted = 0; 933c6fd2807SJeff Garzik 934c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 935c6fd2807SJeff Garzik 9365ddf24c5STejun Heo /* we're gonna abort all commands, no need for fast drain */ 9375ddf24c5STejun Heo ata_eh_set_pending(ap, 0); 9385ddf24c5STejun Heo 939c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 940c6fd2807SJeff Garzik struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); 941c6fd2807SJeff Garzik 942dbd82616STejun Heo if (qc && (!link || qc->dev->link == link)) { 943c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 944c6fd2807SJeff Garzik ata_qc_complete(qc); 945c6fd2807SJeff Garzik nr_aborted++; 946c6fd2807SJeff Garzik } 947c6fd2807SJeff Garzik } 948c6fd2807SJeff Garzik 949c6fd2807SJeff Garzik if (!nr_aborted) 950c6fd2807SJeff Garzik ata_port_schedule_eh(ap); 951c6fd2807SJeff Garzik 952c6fd2807SJeff Garzik return nr_aborted; 953c6fd2807SJeff Garzik } 954c6fd2807SJeff Garzik 955c6fd2807SJeff Garzik /** 956dbd82616STejun Heo * ata_link_abort - abort all qc's on the link 957dbd82616STejun Heo * @link: ATA link to abort qc's for 958dbd82616STejun Heo * 959dbd82616STejun Heo * Abort all active qc's active on @link and schedule EH. 960dbd82616STejun Heo * 961dbd82616STejun Heo * LOCKING: 962dbd82616STejun Heo * spin_lock_irqsave(host lock) 963dbd82616STejun Heo * 964dbd82616STejun Heo * RETURNS: 965dbd82616STejun Heo * Number of aborted qc's. 966dbd82616STejun Heo */ 967dbd82616STejun Heo int ata_link_abort(struct ata_link *link) 968dbd82616STejun Heo { 969dbd82616STejun Heo return ata_do_link_abort(link->ap, link); 970dbd82616STejun Heo } 971dbd82616STejun Heo 972dbd82616STejun Heo /** 973dbd82616STejun Heo * ata_port_abort - abort all qc's on the port 974dbd82616STejun Heo * @ap: ATA port to abort qc's for 975dbd82616STejun Heo * 976dbd82616STejun Heo * Abort all active qc's of @ap and schedule EH. 977dbd82616STejun Heo * 978dbd82616STejun Heo * LOCKING: 979dbd82616STejun Heo * spin_lock_irqsave(host_set lock) 980dbd82616STejun Heo * 981dbd82616STejun Heo * RETURNS: 982dbd82616STejun Heo * Number of aborted qc's. 983dbd82616STejun Heo */ 984dbd82616STejun Heo int ata_port_abort(struct ata_port *ap) 985dbd82616STejun Heo { 986dbd82616STejun Heo return ata_do_link_abort(ap, NULL); 987dbd82616STejun Heo } 988dbd82616STejun Heo 989dbd82616STejun Heo /** 990c6fd2807SJeff Garzik * __ata_port_freeze - freeze port 991c6fd2807SJeff Garzik * @ap: ATA port to freeze 992c6fd2807SJeff Garzik * 993c6fd2807SJeff Garzik * This function is called when HSM violation or some other 994c6fd2807SJeff Garzik * condition disrupts normal operation of the port. Frozen port 995c6fd2807SJeff Garzik * is not allowed to perform any operation until the port is 996c6fd2807SJeff Garzik * thawed, which usually follows a successful reset. 997c6fd2807SJeff Garzik * 998c6fd2807SJeff Garzik * ap->ops->freeze() callback can be used for freezing the port 999c6fd2807SJeff Garzik * hardware-wise (e.g. mask interrupt and stop DMA engine). If a 1000c6fd2807SJeff Garzik * port cannot be frozen hardware-wise, the interrupt handler 1001c6fd2807SJeff Garzik * must ack and clear interrupts unconditionally while the port 1002c6fd2807SJeff Garzik * is frozen. 1003c6fd2807SJeff Garzik * 1004c6fd2807SJeff Garzik * LOCKING: 1005cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 1006c6fd2807SJeff Garzik */ 1007c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap) 1008c6fd2807SJeff Garzik { 1009c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 1010c6fd2807SJeff Garzik 1011c6fd2807SJeff Garzik if (ap->ops->freeze) 1012c6fd2807SJeff Garzik ap->ops->freeze(ap); 1013c6fd2807SJeff Garzik 1014c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_FROZEN; 1015c6fd2807SJeff Garzik 101644877b4eSTejun Heo DPRINTK("ata%u port frozen\n", ap->print_id); 1017c6fd2807SJeff Garzik } 1018c6fd2807SJeff Garzik 1019c6fd2807SJeff Garzik /** 1020c6fd2807SJeff Garzik * ata_port_freeze - abort & freeze port 1021c6fd2807SJeff Garzik * @ap: ATA port to freeze 1022c6fd2807SJeff Garzik * 102354c38444SJeff Garzik * Abort and freeze @ap. The freeze operation must be called 102454c38444SJeff Garzik * first, because some hardware requires special operations 102554c38444SJeff Garzik * before the taskfile registers are accessible. 1026c6fd2807SJeff Garzik * 1027c6fd2807SJeff Garzik * LOCKING: 1028cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 1029c6fd2807SJeff Garzik * 1030c6fd2807SJeff Garzik * RETURNS: 1031c6fd2807SJeff Garzik * Number of aborted commands. 1032c6fd2807SJeff Garzik */ 1033c6fd2807SJeff Garzik int ata_port_freeze(struct ata_port *ap) 1034c6fd2807SJeff Garzik { 1035c6fd2807SJeff Garzik int nr_aborted; 1036c6fd2807SJeff Garzik 1037c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 1038c6fd2807SJeff Garzik 1039c6fd2807SJeff Garzik __ata_port_freeze(ap); 104054c38444SJeff Garzik nr_aborted = ata_port_abort(ap); 1041c6fd2807SJeff Garzik 1042c6fd2807SJeff Garzik return nr_aborted; 1043c6fd2807SJeff Garzik } 1044c6fd2807SJeff Garzik 1045c6fd2807SJeff Garzik /** 10467d77b247STejun Heo * sata_async_notification - SATA async notification handler 10477d77b247STejun Heo * @ap: ATA port where async notification is received 10487d77b247STejun Heo * 10497d77b247STejun Heo * Handler to be called when async notification via SDB FIS is 10507d77b247STejun Heo * received. This function schedules EH if necessary. 10517d77b247STejun Heo * 10527d77b247STejun Heo * LOCKING: 10537d77b247STejun Heo * spin_lock_irqsave(host lock) 10547d77b247STejun Heo * 10557d77b247STejun Heo * RETURNS: 10567d77b247STejun Heo * 1 if EH is scheduled, 0 otherwise. 10577d77b247STejun Heo */ 10587d77b247STejun Heo int sata_async_notification(struct ata_port *ap) 10597d77b247STejun Heo { 10607d77b247STejun Heo u32 sntf; 10617d77b247STejun Heo int rc; 10627d77b247STejun Heo 10637d77b247STejun Heo if (!(ap->flags & ATA_FLAG_AN)) 10647d77b247STejun Heo return 0; 10657d77b247STejun Heo 10667d77b247STejun Heo rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf); 10677d77b247STejun Heo if (rc == 0) 10687d77b247STejun Heo sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf); 10697d77b247STejun Heo 1070071f44b1STejun Heo if (!sata_pmp_attached(ap) || rc) { 10717d77b247STejun Heo /* PMP is not attached or SNTF is not available */ 1072071f44b1STejun Heo if (!sata_pmp_attached(ap)) { 10737d77b247STejun Heo /* PMP is not attached. Check whether ATAPI 10747d77b247STejun Heo * AN is configured. If so, notify media 10757d77b247STejun Heo * change. 10767d77b247STejun Heo */ 10777d77b247STejun Heo struct ata_device *dev = ap->link.device; 10787d77b247STejun Heo 10797d77b247STejun Heo if ((dev->class == ATA_DEV_ATAPI) && 10807d77b247STejun Heo (dev->flags & ATA_DFLAG_AN)) 10817d77b247STejun Heo ata_scsi_media_change_notify(dev); 10827d77b247STejun Heo return 0; 10837d77b247STejun Heo } else { 10847d77b247STejun Heo /* PMP is attached but SNTF is not available. 10857d77b247STejun Heo * ATAPI async media change notification is 10867d77b247STejun Heo * not used. The PMP must be reporting PHY 10877d77b247STejun Heo * status change, schedule EH. 10887d77b247STejun Heo */ 10897d77b247STejun Heo ata_port_schedule_eh(ap); 10907d77b247STejun Heo return 1; 10917d77b247STejun Heo } 10927d77b247STejun Heo } else { 10937d77b247STejun Heo /* PMP is attached and SNTF is available */ 10947d77b247STejun Heo struct ata_link *link; 10957d77b247STejun Heo 10967d77b247STejun Heo /* check and notify ATAPI AN */ 10971eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 10987d77b247STejun Heo if (!(sntf & (1 << link->pmp))) 10997d77b247STejun Heo continue; 11007d77b247STejun Heo 11017d77b247STejun Heo if ((link->device->class == ATA_DEV_ATAPI) && 11027d77b247STejun Heo (link->device->flags & ATA_DFLAG_AN)) 11037d77b247STejun Heo ata_scsi_media_change_notify(link->device); 11047d77b247STejun Heo } 11057d77b247STejun Heo 11067d77b247STejun Heo /* If PMP is reporting that PHY status of some 11077d77b247STejun Heo * downstream ports has changed, schedule EH. 11087d77b247STejun Heo */ 11097d77b247STejun Heo if (sntf & (1 << SATA_PMP_CTRL_PORT)) { 11107d77b247STejun Heo ata_port_schedule_eh(ap); 11117d77b247STejun Heo return 1; 11127d77b247STejun Heo } 11137d77b247STejun Heo 11147d77b247STejun Heo return 0; 11157d77b247STejun Heo } 11167d77b247STejun Heo } 11177d77b247STejun Heo 11187d77b247STejun Heo /** 1119c6fd2807SJeff Garzik * ata_eh_freeze_port - EH helper to freeze port 1120c6fd2807SJeff Garzik * @ap: ATA port to freeze 1121c6fd2807SJeff Garzik * 1122c6fd2807SJeff Garzik * Freeze @ap. 1123c6fd2807SJeff Garzik * 1124c6fd2807SJeff Garzik * LOCKING: 1125c6fd2807SJeff Garzik * None. 1126c6fd2807SJeff Garzik */ 1127c6fd2807SJeff Garzik void ata_eh_freeze_port(struct ata_port *ap) 1128c6fd2807SJeff Garzik { 1129c6fd2807SJeff Garzik unsigned long flags; 1130c6fd2807SJeff Garzik 1131c6fd2807SJeff Garzik if (!ap->ops->error_handler) 1132c6fd2807SJeff Garzik return; 1133c6fd2807SJeff Garzik 1134c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1135c6fd2807SJeff Garzik __ata_port_freeze(ap); 1136c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1137c6fd2807SJeff Garzik } 1138c6fd2807SJeff Garzik 1139c6fd2807SJeff Garzik /** 1140c6fd2807SJeff Garzik * ata_port_thaw_port - EH helper to thaw port 1141c6fd2807SJeff Garzik * @ap: ATA port to thaw 1142c6fd2807SJeff Garzik * 1143c6fd2807SJeff Garzik * Thaw frozen port @ap. 1144c6fd2807SJeff Garzik * 1145c6fd2807SJeff Garzik * LOCKING: 1146c6fd2807SJeff Garzik * None. 1147c6fd2807SJeff Garzik */ 1148c6fd2807SJeff Garzik void ata_eh_thaw_port(struct ata_port *ap) 1149c6fd2807SJeff Garzik { 1150c6fd2807SJeff Garzik unsigned long flags; 1151c6fd2807SJeff Garzik 1152c6fd2807SJeff Garzik if (!ap->ops->error_handler) 1153c6fd2807SJeff Garzik return; 1154c6fd2807SJeff Garzik 1155c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1156c6fd2807SJeff Garzik 1157c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_FROZEN; 1158c6fd2807SJeff Garzik 1159c6fd2807SJeff Garzik if (ap->ops->thaw) 1160c6fd2807SJeff Garzik ap->ops->thaw(ap); 1161c6fd2807SJeff Garzik 1162c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1163c6fd2807SJeff Garzik 116444877b4eSTejun Heo DPRINTK("ata%u port thawed\n", ap->print_id); 1165c6fd2807SJeff Garzik } 1166c6fd2807SJeff Garzik 1167c6fd2807SJeff Garzik static void ata_eh_scsidone(struct scsi_cmnd *scmd) 1168c6fd2807SJeff Garzik { 1169c6fd2807SJeff Garzik /* nada */ 1170c6fd2807SJeff Garzik } 1171c6fd2807SJeff Garzik 1172c6fd2807SJeff Garzik static void __ata_eh_qc_complete(struct ata_queued_cmd *qc) 1173c6fd2807SJeff Garzik { 1174c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 1175c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 1176c6fd2807SJeff Garzik unsigned long flags; 1177c6fd2807SJeff Garzik 1178c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1179c6fd2807SJeff Garzik qc->scsidone = ata_eh_scsidone; 1180c6fd2807SJeff Garzik __ata_qc_complete(qc); 1181c6fd2807SJeff Garzik WARN_ON(ata_tag_valid(qc->tag)); 1182c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1183c6fd2807SJeff Garzik 1184c6fd2807SJeff Garzik scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 1185c6fd2807SJeff Garzik } 1186c6fd2807SJeff Garzik 1187c6fd2807SJeff Garzik /** 1188c6fd2807SJeff Garzik * ata_eh_qc_complete - Complete an active ATA command from EH 1189c6fd2807SJeff Garzik * @qc: Command to complete 1190c6fd2807SJeff Garzik * 1191c6fd2807SJeff Garzik * Indicate to the mid and upper layers that an ATA command has 1192c6fd2807SJeff Garzik * completed. To be used from EH. 1193c6fd2807SJeff Garzik */ 1194c6fd2807SJeff Garzik void ata_eh_qc_complete(struct ata_queued_cmd *qc) 1195c6fd2807SJeff Garzik { 1196c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 1197c6fd2807SJeff Garzik scmd->retries = scmd->allowed; 1198c6fd2807SJeff Garzik __ata_eh_qc_complete(qc); 1199c6fd2807SJeff Garzik } 1200c6fd2807SJeff Garzik 1201c6fd2807SJeff Garzik /** 1202c6fd2807SJeff Garzik * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH 1203c6fd2807SJeff Garzik * @qc: Command to retry 1204c6fd2807SJeff Garzik * 1205c6fd2807SJeff Garzik * Indicate to the mid and upper layers that an ATA command 1206c6fd2807SJeff Garzik * should be retried. To be used from EH. 1207c6fd2807SJeff Garzik * 1208c6fd2807SJeff Garzik * SCSI midlayer limits the number of retries to scmd->allowed. 1209c6fd2807SJeff Garzik * scmd->retries is decremented for commands which get retried 1210c6fd2807SJeff Garzik * due to unrelated failures (qc->err_mask is zero). 1211c6fd2807SJeff Garzik */ 1212c6fd2807SJeff Garzik void ata_eh_qc_retry(struct ata_queued_cmd *qc) 1213c6fd2807SJeff Garzik { 1214c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 1215c6fd2807SJeff Garzik if (!qc->err_mask && scmd->retries) 1216c6fd2807SJeff Garzik scmd->retries--; 1217c6fd2807SJeff Garzik __ata_eh_qc_complete(qc); 1218c6fd2807SJeff Garzik } 1219c6fd2807SJeff Garzik 1220c6fd2807SJeff Garzik /** 1221678afac6STejun Heo * ata_dev_disable - disable ATA device 1222678afac6STejun Heo * @dev: ATA device to disable 1223678afac6STejun Heo * 1224678afac6STejun Heo * Disable @dev. 1225678afac6STejun Heo * 1226678afac6STejun Heo * Locking: 1227678afac6STejun Heo * EH context. 1228678afac6STejun Heo */ 1229678afac6STejun Heo void ata_dev_disable(struct ata_device *dev) 1230678afac6STejun Heo { 1231678afac6STejun Heo if (!ata_dev_enabled(dev)) 1232678afac6STejun Heo return; 1233678afac6STejun Heo 1234678afac6STejun Heo if (ata_msg_drv(dev->link->ap)) 1235678afac6STejun Heo ata_dev_printk(dev, KERN_WARNING, "disabled\n"); 1236678afac6STejun Heo ata_acpi_on_disable(dev); 1237678afac6STejun Heo ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET); 1238678afac6STejun Heo dev->class++; 123999cf610aSTejun Heo 124099cf610aSTejun Heo /* From now till the next successful probe, ering is used to 124199cf610aSTejun Heo * track probe failures. Clear accumulated device error info. 124299cf610aSTejun Heo */ 124399cf610aSTejun Heo ata_ering_clear(&dev->ering); 1244678afac6STejun Heo } 1245678afac6STejun Heo 1246678afac6STejun Heo /** 1247c6fd2807SJeff Garzik * ata_eh_detach_dev - detach ATA device 1248c6fd2807SJeff Garzik * @dev: ATA device to detach 1249c6fd2807SJeff Garzik * 1250c6fd2807SJeff Garzik * Detach @dev. 1251c6fd2807SJeff Garzik * 1252c6fd2807SJeff Garzik * LOCKING: 1253c6fd2807SJeff Garzik * None. 1254c6fd2807SJeff Garzik */ 1255fb7fd614STejun Heo void ata_eh_detach_dev(struct ata_device *dev) 1256c6fd2807SJeff Garzik { 1257f58229f8STejun Heo struct ata_link *link = dev->link; 1258f58229f8STejun Heo struct ata_port *ap = link->ap; 125990484ebfSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 1260c6fd2807SJeff Garzik unsigned long flags; 1261c6fd2807SJeff Garzik 1262c6fd2807SJeff Garzik ata_dev_disable(dev); 1263c6fd2807SJeff Garzik 1264c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1265c6fd2807SJeff Garzik 1266c6fd2807SJeff Garzik dev->flags &= ~ATA_DFLAG_DETACH; 1267c6fd2807SJeff Garzik 1268c6fd2807SJeff Garzik if (ata_scsi_offline_dev(dev)) { 1269c6fd2807SJeff Garzik dev->flags |= ATA_DFLAG_DETACHED; 1270c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 1271c6fd2807SJeff Garzik } 1272c6fd2807SJeff Garzik 127390484ebfSTejun Heo /* clear per-dev EH info */ 1274f58229f8STejun Heo ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK); 1275f58229f8STejun Heo ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK); 127690484ebfSTejun Heo ehc->saved_xfer_mode[dev->devno] = 0; 127790484ebfSTejun Heo ehc->saved_ncq_enabled &= ~(1 << dev->devno); 1278c6fd2807SJeff Garzik 1279c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1280c6fd2807SJeff Garzik } 1281c6fd2807SJeff Garzik 1282c6fd2807SJeff Garzik /** 1283c6fd2807SJeff Garzik * ata_eh_about_to_do - about to perform eh_action 1284955e57dfSTejun Heo * @link: target ATA link 1285c6fd2807SJeff Garzik * @dev: target ATA dev for per-dev action (can be NULL) 1286c6fd2807SJeff Garzik * @action: action about to be performed 1287c6fd2807SJeff Garzik * 1288c6fd2807SJeff Garzik * Called just before performing EH actions to clear related bits 1289955e57dfSTejun Heo * in @link->eh_info such that eh actions are not unnecessarily 1290955e57dfSTejun Heo * repeated. 1291c6fd2807SJeff Garzik * 1292c6fd2807SJeff Garzik * LOCKING: 1293c6fd2807SJeff Garzik * None. 1294c6fd2807SJeff Garzik */ 1295fb7fd614STejun Heo void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev, 1296c6fd2807SJeff Garzik unsigned int action) 1297c6fd2807SJeff Garzik { 1298955e57dfSTejun Heo struct ata_port *ap = link->ap; 1299955e57dfSTejun Heo struct ata_eh_info *ehi = &link->eh_info; 1300955e57dfSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 1301c6fd2807SJeff Garzik unsigned long flags; 1302c6fd2807SJeff Garzik 1303c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1304c6fd2807SJeff Garzik 1305955e57dfSTejun Heo ata_eh_clear_action(link, dev, ehi, action); 1306c6fd2807SJeff Garzik 1307a568d1d2STejun Heo /* About to take EH action, set RECOVERED. Ignore actions on 1308a568d1d2STejun Heo * slave links as master will do them again. 1309a568d1d2STejun Heo */ 1310a568d1d2STejun Heo if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link) 1311c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_RECOVERED; 1312c6fd2807SJeff Garzik 1313c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1314c6fd2807SJeff Garzik } 1315c6fd2807SJeff Garzik 1316c6fd2807SJeff Garzik /** 1317c6fd2807SJeff Garzik * ata_eh_done - EH action complete 1318c6fd2807SJeff Garzik * @ap: target ATA port 1319c6fd2807SJeff Garzik * @dev: target ATA dev for per-dev action (can be NULL) 1320c6fd2807SJeff Garzik * @action: action just completed 1321c6fd2807SJeff Garzik * 1322c6fd2807SJeff Garzik * Called right after performing EH actions to clear related bits 1323955e57dfSTejun Heo * in @link->eh_context. 1324c6fd2807SJeff Garzik * 1325c6fd2807SJeff Garzik * LOCKING: 1326c6fd2807SJeff Garzik * None. 1327c6fd2807SJeff Garzik */ 1328fb7fd614STejun Heo void ata_eh_done(struct ata_link *link, struct ata_device *dev, 1329c6fd2807SJeff Garzik unsigned int action) 1330c6fd2807SJeff Garzik { 1331955e57dfSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 13329af5c9c9STejun Heo 1333955e57dfSTejun Heo ata_eh_clear_action(link, dev, &ehc->i, action); 1334c6fd2807SJeff Garzik } 1335c6fd2807SJeff Garzik 1336c6fd2807SJeff Garzik /** 1337c6fd2807SJeff Garzik * ata_err_string - convert err_mask to descriptive string 1338c6fd2807SJeff Garzik * @err_mask: error mask to convert to string 1339c6fd2807SJeff Garzik * 1340c6fd2807SJeff Garzik * Convert @err_mask to descriptive string. Errors are 1341c6fd2807SJeff Garzik * prioritized according to severity and only the most severe 1342c6fd2807SJeff Garzik * error is reported. 1343c6fd2807SJeff Garzik * 1344c6fd2807SJeff Garzik * LOCKING: 1345c6fd2807SJeff Garzik * None. 1346c6fd2807SJeff Garzik * 1347c6fd2807SJeff Garzik * RETURNS: 1348c6fd2807SJeff Garzik * Descriptive string for @err_mask 1349c6fd2807SJeff Garzik */ 1350c6fd2807SJeff Garzik static const char *ata_err_string(unsigned int err_mask) 1351c6fd2807SJeff Garzik { 1352c6fd2807SJeff Garzik if (err_mask & AC_ERR_HOST_BUS) 1353c6fd2807SJeff Garzik return "host bus error"; 1354c6fd2807SJeff Garzik if (err_mask & AC_ERR_ATA_BUS) 1355c6fd2807SJeff Garzik return "ATA bus error"; 1356c6fd2807SJeff Garzik if (err_mask & AC_ERR_TIMEOUT) 1357c6fd2807SJeff Garzik return "timeout"; 1358c6fd2807SJeff Garzik if (err_mask & AC_ERR_HSM) 1359c6fd2807SJeff Garzik return "HSM violation"; 1360c6fd2807SJeff Garzik if (err_mask & AC_ERR_SYSTEM) 1361c6fd2807SJeff Garzik return "internal error"; 1362c6fd2807SJeff Garzik if (err_mask & AC_ERR_MEDIA) 1363c6fd2807SJeff Garzik return "media error"; 1364c6fd2807SJeff Garzik if (err_mask & AC_ERR_INVALID) 1365c6fd2807SJeff Garzik return "invalid argument"; 1366c6fd2807SJeff Garzik if (err_mask & AC_ERR_DEV) 1367c6fd2807SJeff Garzik return "device error"; 1368c6fd2807SJeff Garzik return "unknown error"; 1369c6fd2807SJeff Garzik } 1370c6fd2807SJeff Garzik 1371c6fd2807SJeff Garzik /** 1372c6fd2807SJeff Garzik * ata_read_log_page - read a specific log page 1373c6fd2807SJeff Garzik * @dev: target device 1374c6fd2807SJeff Garzik * @page: page to read 1375c6fd2807SJeff Garzik * @buf: buffer to store read page 1376c6fd2807SJeff Garzik * @sectors: number of sectors to read 1377c6fd2807SJeff Garzik * 1378c6fd2807SJeff Garzik * Read log page using READ_LOG_EXT command. 1379c6fd2807SJeff Garzik * 1380c6fd2807SJeff Garzik * LOCKING: 1381c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1382c6fd2807SJeff Garzik * 1383c6fd2807SJeff Garzik * RETURNS: 1384c6fd2807SJeff Garzik * 0 on success, AC_ERR_* mask otherwise. 1385c6fd2807SJeff Garzik */ 1386c6fd2807SJeff Garzik static unsigned int ata_read_log_page(struct ata_device *dev, 1387c6fd2807SJeff Garzik u8 page, void *buf, unsigned int sectors) 1388c6fd2807SJeff Garzik { 1389c6fd2807SJeff Garzik struct ata_taskfile tf; 1390c6fd2807SJeff Garzik unsigned int err_mask; 1391c6fd2807SJeff Garzik 1392c6fd2807SJeff Garzik DPRINTK("read log page - page %d\n", page); 1393c6fd2807SJeff Garzik 1394c6fd2807SJeff Garzik ata_tf_init(dev, &tf); 1395c6fd2807SJeff Garzik tf.command = ATA_CMD_READ_LOG_EXT; 1396c6fd2807SJeff Garzik tf.lbal = page; 1397c6fd2807SJeff Garzik tf.nsect = sectors; 1398c6fd2807SJeff Garzik tf.hob_nsect = sectors >> 8; 1399c6fd2807SJeff Garzik tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE; 1400c6fd2807SJeff Garzik tf.protocol = ATA_PROT_PIO; 1401c6fd2807SJeff Garzik 1402c6fd2807SJeff Garzik err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, 14032b789108STejun Heo buf, sectors * ATA_SECT_SIZE, 0); 1404c6fd2807SJeff Garzik 1405c6fd2807SJeff Garzik DPRINTK("EXIT, err_mask=%x\n", err_mask); 1406c6fd2807SJeff Garzik return err_mask; 1407c6fd2807SJeff Garzik } 1408c6fd2807SJeff Garzik 1409c6fd2807SJeff Garzik /** 1410c6fd2807SJeff Garzik * ata_eh_read_log_10h - Read log page 10h for NCQ error details 1411c6fd2807SJeff Garzik * @dev: Device to read log page 10h from 1412c6fd2807SJeff Garzik * @tag: Resulting tag of the failed command 1413c6fd2807SJeff Garzik * @tf: Resulting taskfile registers of the failed command 1414c6fd2807SJeff Garzik * 1415c6fd2807SJeff Garzik * Read log page 10h to obtain NCQ error details and clear error 1416c6fd2807SJeff Garzik * condition. 1417c6fd2807SJeff Garzik * 1418c6fd2807SJeff Garzik * LOCKING: 1419c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1420c6fd2807SJeff Garzik * 1421c6fd2807SJeff Garzik * RETURNS: 1422c6fd2807SJeff Garzik * 0 on success, -errno otherwise. 1423c6fd2807SJeff Garzik */ 1424c6fd2807SJeff Garzik static int ata_eh_read_log_10h(struct ata_device *dev, 1425c6fd2807SJeff Garzik int *tag, struct ata_taskfile *tf) 1426c6fd2807SJeff Garzik { 14279af5c9c9STejun Heo u8 *buf = dev->link->ap->sector_buf; 1428c6fd2807SJeff Garzik unsigned int err_mask; 1429c6fd2807SJeff Garzik u8 csum; 1430c6fd2807SJeff Garzik int i; 1431c6fd2807SJeff Garzik 1432c6fd2807SJeff Garzik err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1); 1433c6fd2807SJeff Garzik if (err_mask) 1434c6fd2807SJeff Garzik return -EIO; 1435c6fd2807SJeff Garzik 1436c6fd2807SJeff Garzik csum = 0; 1437c6fd2807SJeff Garzik for (i = 0; i < ATA_SECT_SIZE; i++) 1438c6fd2807SJeff Garzik csum += buf[i]; 1439c6fd2807SJeff Garzik if (csum) 1440c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_WARNING, 1441c6fd2807SJeff Garzik "invalid checksum 0x%x on log page 10h\n", csum); 1442c6fd2807SJeff Garzik 1443c6fd2807SJeff Garzik if (buf[0] & 0x80) 1444c6fd2807SJeff Garzik return -ENOENT; 1445c6fd2807SJeff Garzik 1446c6fd2807SJeff Garzik *tag = buf[0] & 0x1f; 1447c6fd2807SJeff Garzik 1448c6fd2807SJeff Garzik tf->command = buf[2]; 1449c6fd2807SJeff Garzik tf->feature = buf[3]; 1450c6fd2807SJeff Garzik tf->lbal = buf[4]; 1451c6fd2807SJeff Garzik tf->lbam = buf[5]; 1452c6fd2807SJeff Garzik tf->lbah = buf[6]; 1453c6fd2807SJeff Garzik tf->device = buf[7]; 1454c6fd2807SJeff Garzik tf->hob_lbal = buf[8]; 1455c6fd2807SJeff Garzik tf->hob_lbam = buf[9]; 1456c6fd2807SJeff Garzik tf->hob_lbah = buf[10]; 1457c6fd2807SJeff Garzik tf->nsect = buf[12]; 1458c6fd2807SJeff Garzik tf->hob_nsect = buf[13]; 1459c6fd2807SJeff Garzik 1460c6fd2807SJeff Garzik return 0; 1461c6fd2807SJeff Garzik } 1462c6fd2807SJeff Garzik 1463c6fd2807SJeff Garzik /** 146411fc33daSTejun Heo * atapi_eh_tur - perform ATAPI TEST_UNIT_READY 146511fc33daSTejun Heo * @dev: target ATAPI device 146611fc33daSTejun Heo * @r_sense_key: out parameter for sense_key 146711fc33daSTejun Heo * 146811fc33daSTejun Heo * Perform ATAPI TEST_UNIT_READY. 146911fc33daSTejun Heo * 147011fc33daSTejun Heo * LOCKING: 147111fc33daSTejun Heo * EH context (may sleep). 147211fc33daSTejun Heo * 147311fc33daSTejun Heo * RETURNS: 147411fc33daSTejun Heo * 0 on success, AC_ERR_* mask on failure. 147511fc33daSTejun Heo */ 147611fc33daSTejun Heo static unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key) 147711fc33daSTejun Heo { 147811fc33daSTejun Heo u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 }; 147911fc33daSTejun Heo struct ata_taskfile tf; 148011fc33daSTejun Heo unsigned int err_mask; 148111fc33daSTejun Heo 148211fc33daSTejun Heo ata_tf_init(dev, &tf); 148311fc33daSTejun Heo 148411fc33daSTejun Heo tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 148511fc33daSTejun Heo tf.command = ATA_CMD_PACKET; 148611fc33daSTejun Heo tf.protocol = ATAPI_PROT_NODATA; 148711fc33daSTejun Heo 148811fc33daSTejun Heo err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0); 148911fc33daSTejun Heo if (err_mask == AC_ERR_DEV) 149011fc33daSTejun Heo *r_sense_key = tf.feature >> 4; 149111fc33daSTejun Heo return err_mask; 149211fc33daSTejun Heo } 149311fc33daSTejun Heo 149411fc33daSTejun Heo /** 1495c6fd2807SJeff Garzik * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE 1496c6fd2807SJeff Garzik * @dev: device to perform REQUEST_SENSE to 1497c6fd2807SJeff Garzik * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long) 14983eabddb8STejun Heo * @dfl_sense_key: default sense key to use 1499c6fd2807SJeff Garzik * 1500c6fd2807SJeff Garzik * Perform ATAPI REQUEST_SENSE after the device reported CHECK 1501c6fd2807SJeff Garzik * SENSE. This function is EH helper. 1502c6fd2807SJeff Garzik * 1503c6fd2807SJeff Garzik * LOCKING: 1504c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1505c6fd2807SJeff Garzik * 1506c6fd2807SJeff Garzik * RETURNS: 1507c6fd2807SJeff Garzik * 0 on success, AC_ERR_* mask on failure 1508c6fd2807SJeff Garzik */ 15093eabddb8STejun Heo static unsigned int atapi_eh_request_sense(struct ata_device *dev, 15103eabddb8STejun Heo u8 *sense_buf, u8 dfl_sense_key) 1511c6fd2807SJeff Garzik { 15123eabddb8STejun Heo u8 cdb[ATAPI_CDB_LEN] = 15133eabddb8STejun Heo { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 }; 15149af5c9c9STejun Heo struct ata_port *ap = dev->link->ap; 1515c6fd2807SJeff Garzik struct ata_taskfile tf; 1516c6fd2807SJeff Garzik 1517c6fd2807SJeff Garzik DPRINTK("ATAPI request sense\n"); 1518c6fd2807SJeff Garzik 1519c6fd2807SJeff Garzik /* FIXME: is this needed? */ 1520c6fd2807SJeff Garzik memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); 1521c6fd2807SJeff Garzik 152256287768SAlbert Lee /* initialize sense_buf with the error register, 152356287768SAlbert Lee * for the case where they are -not- overwritten 152456287768SAlbert Lee */ 1525c6fd2807SJeff Garzik sense_buf[0] = 0x70; 15263eabddb8STejun Heo sense_buf[2] = dfl_sense_key; 152756287768SAlbert Lee 152856287768SAlbert Lee /* some devices time out if garbage left in tf */ 152956287768SAlbert Lee ata_tf_init(dev, &tf); 1530c6fd2807SJeff Garzik 1531c6fd2807SJeff Garzik tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1532c6fd2807SJeff Garzik tf.command = ATA_CMD_PACKET; 1533c6fd2807SJeff Garzik 1534c6fd2807SJeff Garzik /* is it pointless to prefer PIO for "safety reasons"? */ 1535c6fd2807SJeff Garzik if (ap->flags & ATA_FLAG_PIO_DMA) { 15360dc36888STejun Heo tf.protocol = ATAPI_PROT_DMA; 1537c6fd2807SJeff Garzik tf.feature |= ATAPI_PKT_DMA; 1538c6fd2807SJeff Garzik } else { 15390dc36888STejun Heo tf.protocol = ATAPI_PROT_PIO; 1540f2dfc1a1STejun Heo tf.lbam = SCSI_SENSE_BUFFERSIZE; 1541f2dfc1a1STejun Heo tf.lbah = 0; 1542c6fd2807SJeff Garzik } 1543c6fd2807SJeff Garzik 1544c6fd2807SJeff Garzik return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE, 15452b789108STejun Heo sense_buf, SCSI_SENSE_BUFFERSIZE, 0); 1546c6fd2807SJeff Garzik } 1547c6fd2807SJeff Garzik 1548c6fd2807SJeff Garzik /** 1549c6fd2807SJeff Garzik * ata_eh_analyze_serror - analyze SError for a failed port 15500260731fSTejun Heo * @link: ATA link to analyze SError for 1551c6fd2807SJeff Garzik * 1552c6fd2807SJeff Garzik * Analyze SError if available and further determine cause of 1553c6fd2807SJeff Garzik * failure. 1554c6fd2807SJeff Garzik * 1555c6fd2807SJeff Garzik * LOCKING: 1556c6fd2807SJeff Garzik * None. 1557c6fd2807SJeff Garzik */ 15580260731fSTejun Heo static void ata_eh_analyze_serror(struct ata_link *link) 1559c6fd2807SJeff Garzik { 15600260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 1561c6fd2807SJeff Garzik u32 serror = ehc->i.serror; 1562c6fd2807SJeff Garzik unsigned int err_mask = 0, action = 0; 1563f9df58cbSTejun Heo u32 hotplug_mask; 1564c6fd2807SJeff Garzik 1565e0614db2STejun Heo if (serror & (SERR_PERSISTENT | SERR_DATA)) { 1566c6fd2807SJeff Garzik err_mask |= AC_ERR_ATA_BUS; 1567cf480626STejun Heo action |= ATA_EH_RESET; 1568c6fd2807SJeff Garzik } 1569c6fd2807SJeff Garzik if (serror & SERR_PROTOCOL) { 1570c6fd2807SJeff Garzik err_mask |= AC_ERR_HSM; 1571cf480626STejun Heo action |= ATA_EH_RESET; 1572c6fd2807SJeff Garzik } 1573c6fd2807SJeff Garzik if (serror & SERR_INTERNAL) { 1574c6fd2807SJeff Garzik err_mask |= AC_ERR_SYSTEM; 1575cf480626STejun Heo action |= ATA_EH_RESET; 1576c6fd2807SJeff Garzik } 1577f9df58cbSTejun Heo 1578f9df58cbSTejun Heo /* Determine whether a hotplug event has occurred. Both 1579f9df58cbSTejun Heo * SError.N/X are considered hotplug events for enabled or 1580f9df58cbSTejun Heo * host links. For disabled PMP links, only N bit is 1581f9df58cbSTejun Heo * considered as X bit is left at 1 for link plugging. 1582f9df58cbSTejun Heo */ 15836b7ae954STejun Heo if (link->lpm_policy != ATA_LPM_MAX_POWER) 15846b7ae954STejun Heo hotplug_mask = 0; /* hotplug doesn't work w/ LPM */ 15856b7ae954STejun Heo else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link)) 1586f9df58cbSTejun Heo hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG; 1587f9df58cbSTejun Heo else 1588f9df58cbSTejun Heo hotplug_mask = SERR_PHYRDY_CHG; 1589f9df58cbSTejun Heo 1590f9df58cbSTejun Heo if (serror & hotplug_mask) 1591c6fd2807SJeff Garzik ata_ehi_hotplugged(&ehc->i); 1592c6fd2807SJeff Garzik 1593c6fd2807SJeff Garzik ehc->i.err_mask |= err_mask; 1594c6fd2807SJeff Garzik ehc->i.action |= action; 1595c6fd2807SJeff Garzik } 1596c6fd2807SJeff Garzik 1597c6fd2807SJeff Garzik /** 1598c6fd2807SJeff Garzik * ata_eh_analyze_ncq_error - analyze NCQ error 15990260731fSTejun Heo * @link: ATA link to analyze NCQ error for 1600c6fd2807SJeff Garzik * 1601c6fd2807SJeff Garzik * Read log page 10h, determine the offending qc and acquire 1602c6fd2807SJeff Garzik * error status TF. For NCQ device errors, all LLDDs have to do 1603c6fd2807SJeff Garzik * is setting AC_ERR_DEV in ehi->err_mask. This function takes 1604c6fd2807SJeff Garzik * care of the rest. 1605c6fd2807SJeff Garzik * 1606c6fd2807SJeff Garzik * LOCKING: 1607c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1608c6fd2807SJeff Garzik */ 160910acf3b0SMark Lord void ata_eh_analyze_ncq_error(struct ata_link *link) 1610c6fd2807SJeff Garzik { 16110260731fSTejun Heo struct ata_port *ap = link->ap; 16120260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 16130260731fSTejun Heo struct ata_device *dev = link->device; 1614c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 1615c6fd2807SJeff Garzik struct ata_taskfile tf; 1616c6fd2807SJeff Garzik int tag, rc; 1617c6fd2807SJeff Garzik 1618c6fd2807SJeff Garzik /* if frozen, we can't do much */ 1619c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN) 1620c6fd2807SJeff Garzik return; 1621c6fd2807SJeff Garzik 1622c6fd2807SJeff Garzik /* is it NCQ device error? */ 16230260731fSTejun Heo if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV)) 1624c6fd2807SJeff Garzik return; 1625c6fd2807SJeff Garzik 1626c6fd2807SJeff Garzik /* has LLDD analyzed already? */ 1627c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1628c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, tag); 1629c6fd2807SJeff Garzik 1630c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) 1631c6fd2807SJeff Garzik continue; 1632c6fd2807SJeff Garzik 1633c6fd2807SJeff Garzik if (qc->err_mask) 1634c6fd2807SJeff Garzik return; 1635c6fd2807SJeff Garzik } 1636c6fd2807SJeff Garzik 1637c6fd2807SJeff Garzik /* okay, this error is ours */ 1638a09bf4cdSJeff Garzik memset(&tf, 0, sizeof(tf)); 1639c6fd2807SJeff Garzik rc = ata_eh_read_log_10h(dev, &tag, &tf); 1640c6fd2807SJeff Garzik if (rc) { 16410260731fSTejun Heo ata_link_printk(link, KERN_ERR, "failed to read log page 10h " 1642c6fd2807SJeff Garzik "(errno=%d)\n", rc); 1643c6fd2807SJeff Garzik return; 1644c6fd2807SJeff Garzik } 1645c6fd2807SJeff Garzik 16460260731fSTejun Heo if (!(link->sactive & (1 << tag))) { 16470260731fSTejun Heo ata_link_printk(link, KERN_ERR, "log page 10h reported " 1648c6fd2807SJeff Garzik "inactive tag %d\n", tag); 1649c6fd2807SJeff Garzik return; 1650c6fd2807SJeff Garzik } 1651c6fd2807SJeff Garzik 1652c6fd2807SJeff Garzik /* we've got the perpetrator, condemn it */ 1653c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, tag); 1654c6fd2807SJeff Garzik memcpy(&qc->result_tf, &tf, sizeof(tf)); 1655a6116c9eSMark Lord qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 16565335b729STejun Heo qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ; 1657c6fd2807SJeff Garzik ehc->i.err_mask &= ~AC_ERR_DEV; 1658c6fd2807SJeff Garzik } 1659c6fd2807SJeff Garzik 1660c6fd2807SJeff Garzik /** 1661c6fd2807SJeff Garzik * ata_eh_analyze_tf - analyze taskfile of a failed qc 1662c6fd2807SJeff Garzik * @qc: qc to analyze 1663c6fd2807SJeff Garzik * @tf: Taskfile registers to analyze 1664c6fd2807SJeff Garzik * 1665c6fd2807SJeff Garzik * Analyze taskfile of @qc and further determine cause of 1666c6fd2807SJeff Garzik * failure. This function also requests ATAPI sense data if 1667c6fd2807SJeff Garzik * avaliable. 1668c6fd2807SJeff Garzik * 1669c6fd2807SJeff Garzik * LOCKING: 1670c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1671c6fd2807SJeff Garzik * 1672c6fd2807SJeff Garzik * RETURNS: 1673c6fd2807SJeff Garzik * Determined recovery action 1674c6fd2807SJeff Garzik */ 1675c6fd2807SJeff Garzik static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, 1676c6fd2807SJeff Garzik const struct ata_taskfile *tf) 1677c6fd2807SJeff Garzik { 1678c6fd2807SJeff Garzik unsigned int tmp, action = 0; 1679c6fd2807SJeff Garzik u8 stat = tf->command, err = tf->feature; 1680c6fd2807SJeff Garzik 1681c6fd2807SJeff Garzik if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) { 1682c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_HSM; 1683cf480626STejun Heo return ATA_EH_RESET; 1684c6fd2807SJeff Garzik } 1685c6fd2807SJeff Garzik 1686a51d644aSTejun Heo if (stat & (ATA_ERR | ATA_DF)) 1687a51d644aSTejun Heo qc->err_mask |= AC_ERR_DEV; 1688a51d644aSTejun Heo else 1689c6fd2807SJeff Garzik return 0; 1690c6fd2807SJeff Garzik 1691c6fd2807SJeff Garzik switch (qc->dev->class) { 1692c6fd2807SJeff Garzik case ATA_DEV_ATA: 1693c6fd2807SJeff Garzik if (err & ATA_ICRC) 1694c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_ATA_BUS; 1695c6fd2807SJeff Garzik if (err & ATA_UNC) 1696c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_MEDIA; 1697c6fd2807SJeff Garzik if (err & ATA_IDNF) 1698c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_INVALID; 1699c6fd2807SJeff Garzik break; 1700c6fd2807SJeff Garzik 1701c6fd2807SJeff Garzik case ATA_DEV_ATAPI: 1702a569a30dSTejun Heo if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) { 17033eabddb8STejun Heo tmp = atapi_eh_request_sense(qc->dev, 17043eabddb8STejun Heo qc->scsicmd->sense_buffer, 17053eabddb8STejun Heo qc->result_tf.feature >> 4); 1706c6fd2807SJeff Garzik if (!tmp) { 1707a569a30dSTejun Heo /* ATA_QCFLAG_SENSE_VALID is used to 1708a569a30dSTejun Heo * tell atapi_qc_complete() that sense 1709a569a30dSTejun Heo * data is already valid. 1710c6fd2807SJeff Garzik * 1711c6fd2807SJeff Garzik * TODO: interpret sense data and set 1712c6fd2807SJeff Garzik * appropriate err_mask. 1713c6fd2807SJeff Garzik */ 1714c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_SENSE_VALID; 1715c6fd2807SJeff Garzik } else 1716c6fd2807SJeff Garzik qc->err_mask |= tmp; 1717c6fd2807SJeff Garzik } 1718a569a30dSTejun Heo } 1719c6fd2807SJeff Garzik 1720c6fd2807SJeff Garzik if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS)) 1721cf480626STejun Heo action |= ATA_EH_RESET; 1722c6fd2807SJeff Garzik 1723c6fd2807SJeff Garzik return action; 1724c6fd2807SJeff Garzik } 1725c6fd2807SJeff Garzik 172676326ac1STejun Heo static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask, 172776326ac1STejun Heo int *xfer_ok) 1728c6fd2807SJeff Garzik { 172976326ac1STejun Heo int base = 0; 173076326ac1STejun Heo 173176326ac1STejun Heo if (!(eflags & ATA_EFLAG_DUBIOUS_XFER)) 173276326ac1STejun Heo *xfer_ok = 1; 173376326ac1STejun Heo 173476326ac1STejun Heo if (!*xfer_ok) 173575f9cafcSTejun Heo base = ATA_ECAT_DUBIOUS_NONE; 173676326ac1STejun Heo 17377d47e8d4STejun Heo if (err_mask & AC_ERR_ATA_BUS) 173876326ac1STejun Heo return base + ATA_ECAT_ATA_BUS; 1739c6fd2807SJeff Garzik 17407d47e8d4STejun Heo if (err_mask & AC_ERR_TIMEOUT) 174176326ac1STejun Heo return base + ATA_ECAT_TOUT_HSM; 17427d47e8d4STejun Heo 17433884f7b0STejun Heo if (eflags & ATA_EFLAG_IS_IO) { 17447d47e8d4STejun Heo if (err_mask & AC_ERR_HSM) 174576326ac1STejun Heo return base + ATA_ECAT_TOUT_HSM; 17467d47e8d4STejun Heo if ((err_mask & 17477d47e8d4STejun Heo (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV) 174876326ac1STejun Heo return base + ATA_ECAT_UNK_DEV; 1749c6fd2807SJeff Garzik } 1750c6fd2807SJeff Garzik 1751c6fd2807SJeff Garzik return 0; 1752c6fd2807SJeff Garzik } 1753c6fd2807SJeff Garzik 17547d47e8d4STejun Heo struct speed_down_verdict_arg { 1755c6fd2807SJeff Garzik u64 since; 175676326ac1STejun Heo int xfer_ok; 17573884f7b0STejun Heo int nr_errors[ATA_ECAT_NR]; 1758c6fd2807SJeff Garzik }; 1759c6fd2807SJeff Garzik 17607d47e8d4STejun Heo static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg) 1761c6fd2807SJeff Garzik { 17627d47e8d4STejun Heo struct speed_down_verdict_arg *arg = void_arg; 176376326ac1STejun Heo int cat; 1764c6fd2807SJeff Garzik 1765d9027470SGwendal Grignou if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since)) 1766c6fd2807SJeff Garzik return -1; 1767c6fd2807SJeff Garzik 176876326ac1STejun Heo cat = ata_eh_categorize_error(ent->eflags, ent->err_mask, 176976326ac1STejun Heo &arg->xfer_ok); 17707d47e8d4STejun Heo arg->nr_errors[cat]++; 177176326ac1STejun Heo 1772c6fd2807SJeff Garzik return 0; 1773c6fd2807SJeff Garzik } 1774c6fd2807SJeff Garzik 1775c6fd2807SJeff Garzik /** 17767d47e8d4STejun Heo * ata_eh_speed_down_verdict - Determine speed down verdict 1777c6fd2807SJeff Garzik * @dev: Device of interest 1778c6fd2807SJeff Garzik * 1779c6fd2807SJeff Garzik * This function examines error ring of @dev and determines 17807d47e8d4STejun Heo * whether NCQ needs to be turned off, transfer speed should be 17817d47e8d4STejun Heo * stepped down, or falling back to PIO is necessary. 1782c6fd2807SJeff Garzik * 17833884f7b0STejun Heo * ECAT_ATA_BUS : ATA_BUS error for any command 1784c6fd2807SJeff Garzik * 17853884f7b0STejun Heo * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for 17863884f7b0STejun Heo * IO commands 17877d47e8d4STejun Heo * 17883884f7b0STejun Heo * ECAT_UNK_DEV : Unknown DEV error for IO commands 1789c6fd2807SJeff Garzik * 179076326ac1STejun Heo * ECAT_DUBIOUS_* : Identical to above three but occurred while 179176326ac1STejun Heo * data transfer hasn't been verified. 179276326ac1STejun Heo * 17933884f7b0STejun Heo * Verdicts are 17947d47e8d4STejun Heo * 17953884f7b0STejun Heo * NCQ_OFF : Turn off NCQ. 17967d47e8d4STejun Heo * 17973884f7b0STejun Heo * SPEED_DOWN : Speed down transfer speed but don't fall back 17983884f7b0STejun Heo * to PIO. 17993884f7b0STejun Heo * 18003884f7b0STejun Heo * FALLBACK_TO_PIO : Fall back to PIO. 18013884f7b0STejun Heo * 18023884f7b0STejun Heo * Even if multiple verdicts are returned, only one action is 180376326ac1STejun Heo * taken per error. An action triggered by non-DUBIOUS errors 180476326ac1STejun Heo * clears ering, while one triggered by DUBIOUS_* errors doesn't. 180576326ac1STejun Heo * This is to expedite speed down decisions right after device is 180676326ac1STejun Heo * initially configured. 18073884f7b0STejun Heo * 180876326ac1STejun Heo * The followings are speed down rules. #1 and #2 deal with 180976326ac1STejun Heo * DUBIOUS errors. 181076326ac1STejun Heo * 181176326ac1STejun Heo * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors 181276326ac1STejun Heo * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO. 181376326ac1STejun Heo * 181476326ac1STejun Heo * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors 181576326ac1STejun Heo * occurred during last 5 mins, NCQ_OFF. 181676326ac1STejun Heo * 181776326ac1STejun Heo * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors 18183884f7b0STejun Heo * ocurred during last 5 mins, FALLBACK_TO_PIO 18193884f7b0STejun Heo * 182076326ac1STejun Heo * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred 18213884f7b0STejun Heo * during last 10 mins, NCQ_OFF. 18223884f7b0STejun Heo * 182376326ac1STejun Heo * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6 18243884f7b0STejun Heo * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN. 18257d47e8d4STejun Heo * 1826c6fd2807SJeff Garzik * LOCKING: 1827c6fd2807SJeff Garzik * Inherited from caller. 1828c6fd2807SJeff Garzik * 1829c6fd2807SJeff Garzik * RETURNS: 18307d47e8d4STejun Heo * OR of ATA_EH_SPDN_* flags. 1831c6fd2807SJeff Garzik */ 18327d47e8d4STejun Heo static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev) 1833c6fd2807SJeff Garzik { 18347d47e8d4STejun Heo const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ; 18357d47e8d4STejun Heo u64 j64 = get_jiffies_64(); 18367d47e8d4STejun Heo struct speed_down_verdict_arg arg; 18377d47e8d4STejun Heo unsigned int verdict = 0; 1838c6fd2807SJeff Garzik 18393884f7b0STejun Heo /* scan past 5 mins of error history */ 18403884f7b0STejun Heo memset(&arg, 0, sizeof(arg)); 18413884f7b0STejun Heo arg.since = j64 - min(j64, j5mins); 18423884f7b0STejun Heo ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); 18433884f7b0STejun Heo 184476326ac1STejun Heo if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] + 184576326ac1STejun Heo arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1) 184676326ac1STejun Heo verdict |= ATA_EH_SPDN_SPEED_DOWN | 184776326ac1STejun Heo ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS; 184876326ac1STejun Heo 184976326ac1STejun Heo if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] + 185076326ac1STejun Heo arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1) 185176326ac1STejun Heo verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS; 185276326ac1STejun Heo 18533884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_ATA_BUS] + 18543884f7b0STejun Heo arg.nr_errors[ATA_ECAT_TOUT_HSM] + 1855663f99b8STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) 18563884f7b0STejun Heo verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO; 18573884f7b0STejun Heo 18587d47e8d4STejun Heo /* scan past 10 mins of error history */ 1859c6fd2807SJeff Garzik memset(&arg, 0, sizeof(arg)); 18607d47e8d4STejun Heo arg.since = j64 - min(j64, j10mins); 18617d47e8d4STejun Heo ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); 1862c6fd2807SJeff Garzik 18633884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_TOUT_HSM] + 18643884f7b0STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 3) 18657d47e8d4STejun Heo verdict |= ATA_EH_SPDN_NCQ_OFF; 18663884f7b0STejun Heo 18673884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_ATA_BUS] + 18683884f7b0STejun Heo arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 || 1869663f99b8STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) 18707d47e8d4STejun Heo verdict |= ATA_EH_SPDN_SPEED_DOWN; 1871c6fd2807SJeff Garzik 18727d47e8d4STejun Heo return verdict; 1873c6fd2807SJeff Garzik } 1874c6fd2807SJeff Garzik 1875c6fd2807SJeff Garzik /** 1876c6fd2807SJeff Garzik * ata_eh_speed_down - record error and speed down if necessary 1877c6fd2807SJeff Garzik * @dev: Failed device 18783884f7b0STejun Heo * @eflags: mask of ATA_EFLAG_* flags 1879c6fd2807SJeff Garzik * @err_mask: err_mask of the error 1880c6fd2807SJeff Garzik * 1881c6fd2807SJeff Garzik * Record error and examine error history to determine whether 1882c6fd2807SJeff Garzik * adjusting transmission speed is necessary. It also sets 1883c6fd2807SJeff Garzik * transmission limits appropriately if such adjustment is 1884c6fd2807SJeff Garzik * necessary. 1885c6fd2807SJeff Garzik * 1886c6fd2807SJeff Garzik * LOCKING: 1887c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1888c6fd2807SJeff Garzik * 1889c6fd2807SJeff Garzik * RETURNS: 18907d47e8d4STejun Heo * Determined recovery action. 1891c6fd2807SJeff Garzik */ 18923884f7b0STejun Heo static unsigned int ata_eh_speed_down(struct ata_device *dev, 18933884f7b0STejun Heo unsigned int eflags, unsigned int err_mask) 1894c6fd2807SJeff Garzik { 1895b1c72916STejun Heo struct ata_link *link = ata_dev_phys_link(dev); 189676326ac1STejun Heo int xfer_ok = 0; 18977d47e8d4STejun Heo unsigned int verdict; 18987d47e8d4STejun Heo unsigned int action = 0; 18997d47e8d4STejun Heo 19007d47e8d4STejun Heo /* don't bother if Cat-0 error */ 190176326ac1STejun Heo if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0) 1902c6fd2807SJeff Garzik return 0; 1903c6fd2807SJeff Garzik 1904c6fd2807SJeff Garzik /* record error and determine whether speed down is necessary */ 19053884f7b0STejun Heo ata_ering_record(&dev->ering, eflags, err_mask); 19067d47e8d4STejun Heo verdict = ata_eh_speed_down_verdict(dev); 1907c6fd2807SJeff Garzik 19087d47e8d4STejun Heo /* turn off NCQ? */ 19097d47e8d4STejun Heo if ((verdict & ATA_EH_SPDN_NCQ_OFF) && 19107d47e8d4STejun Heo (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ | 19117d47e8d4STejun Heo ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) { 19127d47e8d4STejun Heo dev->flags |= ATA_DFLAG_NCQ_OFF; 19137d47e8d4STejun Heo ata_dev_printk(dev, KERN_WARNING, 19147d47e8d4STejun Heo "NCQ disabled due to excessive errors\n"); 19157d47e8d4STejun Heo goto done; 19167d47e8d4STejun Heo } 1917c6fd2807SJeff Garzik 19187d47e8d4STejun Heo /* speed down? */ 19197d47e8d4STejun Heo if (verdict & ATA_EH_SPDN_SPEED_DOWN) { 1920c6fd2807SJeff Garzik /* speed down SATA link speed if possible */ 1921a07d499bSTejun Heo if (sata_down_spd_limit(link, 0) == 0) { 1922cf480626STejun Heo action |= ATA_EH_RESET; 19237d47e8d4STejun Heo goto done; 19247d47e8d4STejun Heo } 1925c6fd2807SJeff Garzik 1926c6fd2807SJeff Garzik /* lower transfer mode */ 19277d47e8d4STejun Heo if (dev->spdn_cnt < 2) { 19287d47e8d4STejun Heo static const int dma_dnxfer_sel[] = 19297d47e8d4STejun Heo { ATA_DNXFER_DMA, ATA_DNXFER_40C }; 19307d47e8d4STejun Heo static const int pio_dnxfer_sel[] = 19317d47e8d4STejun Heo { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 }; 19327d47e8d4STejun Heo int sel; 1933c6fd2807SJeff Garzik 19347d47e8d4STejun Heo if (dev->xfer_shift != ATA_SHIFT_PIO) 19357d47e8d4STejun Heo sel = dma_dnxfer_sel[dev->spdn_cnt]; 19367d47e8d4STejun Heo else 19377d47e8d4STejun Heo sel = pio_dnxfer_sel[dev->spdn_cnt]; 19387d47e8d4STejun Heo 19397d47e8d4STejun Heo dev->spdn_cnt++; 19407d47e8d4STejun Heo 19417d47e8d4STejun Heo if (ata_down_xfermask_limit(dev, sel) == 0) { 1942cf480626STejun Heo action |= ATA_EH_RESET; 19437d47e8d4STejun Heo goto done; 19447d47e8d4STejun Heo } 19457d47e8d4STejun Heo } 19467d47e8d4STejun Heo } 19477d47e8d4STejun Heo 19487d47e8d4STejun Heo /* Fall back to PIO? Slowing down to PIO is meaningless for 1949663f99b8STejun Heo * SATA ATA devices. Consider it only for PATA and SATAPI. 19507d47e8d4STejun Heo */ 19517d47e8d4STejun Heo if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) && 1952663f99b8STejun Heo (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) && 19537d47e8d4STejun Heo (dev->xfer_shift != ATA_SHIFT_PIO)) { 19547d47e8d4STejun Heo if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) { 19557d47e8d4STejun Heo dev->spdn_cnt = 0; 1956cf480626STejun Heo action |= ATA_EH_RESET; 19577d47e8d4STejun Heo goto done; 19587d47e8d4STejun Heo } 19597d47e8d4STejun Heo } 19607d47e8d4STejun Heo 1961c6fd2807SJeff Garzik return 0; 19627d47e8d4STejun Heo done: 19637d47e8d4STejun Heo /* device has been slowed down, blow error history */ 196476326ac1STejun Heo if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS)) 19657d47e8d4STejun Heo ata_ering_clear(&dev->ering); 19667d47e8d4STejun Heo return action; 1967c6fd2807SJeff Garzik } 1968c6fd2807SJeff Garzik 1969c6fd2807SJeff Garzik /** 19709b1e2658STejun Heo * ata_eh_link_autopsy - analyze error and determine recovery action 19719b1e2658STejun Heo * @link: host link to perform autopsy on 1972c6fd2807SJeff Garzik * 19730260731fSTejun Heo * Analyze why @link failed and determine which recovery actions 19740260731fSTejun Heo * are needed. This function also sets more detailed AC_ERR_* 19750260731fSTejun Heo * values and fills sense data for ATAPI CHECK SENSE. 1976c6fd2807SJeff Garzik * 1977c6fd2807SJeff Garzik * LOCKING: 1978c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1979c6fd2807SJeff Garzik */ 19809b1e2658STejun Heo static void ata_eh_link_autopsy(struct ata_link *link) 1981c6fd2807SJeff Garzik { 19820260731fSTejun Heo struct ata_port *ap = link->ap; 1983936fd732STejun Heo struct ata_eh_context *ehc = &link->eh_context; 1984dfcc173dSTejun Heo struct ata_device *dev; 19853884f7b0STejun Heo unsigned int all_err_mask = 0, eflags = 0; 19863884f7b0STejun Heo int tag; 1987c6fd2807SJeff Garzik u32 serror; 1988c6fd2807SJeff Garzik int rc; 1989c6fd2807SJeff Garzik 1990c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 1991c6fd2807SJeff Garzik 1992c6fd2807SJeff Garzik if (ehc->i.flags & ATA_EHI_NO_AUTOPSY) 1993c6fd2807SJeff Garzik return; 1994c6fd2807SJeff Garzik 1995c6fd2807SJeff Garzik /* obtain and analyze SError */ 1996936fd732STejun Heo rc = sata_scr_read(link, SCR_ERROR, &serror); 1997c6fd2807SJeff Garzik if (rc == 0) { 1998c6fd2807SJeff Garzik ehc->i.serror |= serror; 19990260731fSTejun Heo ata_eh_analyze_serror(link); 20004e57c517STejun Heo } else if (rc != -EOPNOTSUPP) { 2001cf480626STejun Heo /* SError read failed, force reset and probing */ 2002b558edddSTejun Heo ehc->i.probe_mask |= ATA_ALL_DEVICES; 2003cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 20044e57c517STejun Heo ehc->i.err_mask |= AC_ERR_OTHER; 20054e57c517STejun Heo } 2006c6fd2807SJeff Garzik 2007c6fd2807SJeff Garzik /* analyze NCQ failure */ 20080260731fSTejun Heo ata_eh_analyze_ncq_error(link); 2009c6fd2807SJeff Garzik 2010c6fd2807SJeff Garzik /* any real error trumps AC_ERR_OTHER */ 2011c6fd2807SJeff Garzik if (ehc->i.err_mask & ~AC_ERR_OTHER) 2012c6fd2807SJeff Garzik ehc->i.err_mask &= ~AC_ERR_OTHER; 2013c6fd2807SJeff Garzik 2014c6fd2807SJeff Garzik all_err_mask |= ehc->i.err_mask; 2015c6fd2807SJeff Garzik 2016c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2017c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 2018c6fd2807SJeff Garzik 2019b1c72916STejun Heo if (!(qc->flags & ATA_QCFLAG_FAILED) || 2020b1c72916STejun Heo ata_dev_phys_link(qc->dev) != link) 2021c6fd2807SJeff Garzik continue; 2022c6fd2807SJeff Garzik 2023c6fd2807SJeff Garzik /* inherit upper level err_mask */ 2024c6fd2807SJeff Garzik qc->err_mask |= ehc->i.err_mask; 2025c6fd2807SJeff Garzik 2026c6fd2807SJeff Garzik /* analyze TF */ 2027c6fd2807SJeff Garzik ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf); 2028c6fd2807SJeff Garzik 2029c6fd2807SJeff Garzik /* DEV errors are probably spurious in case of ATA_BUS error */ 2030c6fd2807SJeff Garzik if (qc->err_mask & AC_ERR_ATA_BUS) 2031c6fd2807SJeff Garzik qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA | 2032c6fd2807SJeff Garzik AC_ERR_INVALID); 2033c6fd2807SJeff Garzik 2034c6fd2807SJeff Garzik /* any real error trumps unknown error */ 2035c6fd2807SJeff Garzik if (qc->err_mask & ~AC_ERR_OTHER) 2036c6fd2807SJeff Garzik qc->err_mask &= ~AC_ERR_OTHER; 2037c6fd2807SJeff Garzik 2038c6fd2807SJeff Garzik /* SENSE_VALID trumps dev/unknown error and revalidation */ 2039f90f0828STejun Heo if (qc->flags & ATA_QCFLAG_SENSE_VALID) 2040c6fd2807SJeff Garzik qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); 2041c6fd2807SJeff Garzik 204203faab78STejun Heo /* determine whether the command is worth retrying */ 2043534ead70STejun Heo if (qc->flags & ATA_QCFLAG_IO || 2044534ead70STejun Heo (!(qc->err_mask & AC_ERR_INVALID) && 2045534ead70STejun Heo qc->err_mask != AC_ERR_DEV)) 204603faab78STejun Heo qc->flags |= ATA_QCFLAG_RETRY; 204703faab78STejun Heo 2048c6fd2807SJeff Garzik /* accumulate error info */ 2049c6fd2807SJeff Garzik ehc->i.dev = qc->dev; 2050c6fd2807SJeff Garzik all_err_mask |= qc->err_mask; 2051c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_IO) 20523884f7b0STejun Heo eflags |= ATA_EFLAG_IS_IO; 2053c6fd2807SJeff Garzik } 2054c6fd2807SJeff Garzik 2055c6fd2807SJeff Garzik /* enforce default EH actions */ 2056c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN || 2057c6fd2807SJeff Garzik all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT)) 2058cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 20593884f7b0STejun Heo else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) || 20603884f7b0STejun Heo (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV))) 2061c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_REVALIDATE; 2062c6fd2807SJeff Garzik 2063dfcc173dSTejun Heo /* If we have offending qcs and the associated failed device, 2064dfcc173dSTejun Heo * perform per-dev EH action only on the offending device. 2065dfcc173dSTejun Heo */ 2066c6fd2807SJeff Garzik if (ehc->i.dev) { 2067c6fd2807SJeff Garzik ehc->i.dev_action[ehc->i.dev->devno] |= 2068c6fd2807SJeff Garzik ehc->i.action & ATA_EH_PERDEV_MASK; 2069c6fd2807SJeff Garzik ehc->i.action &= ~ATA_EH_PERDEV_MASK; 2070c6fd2807SJeff Garzik } 2071c6fd2807SJeff Garzik 20722695e366STejun Heo /* propagate timeout to host link */ 20732695e366STejun Heo if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link)) 20742695e366STejun Heo ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT; 20752695e366STejun Heo 20762695e366STejun Heo /* record error and consider speeding down */ 2077dfcc173dSTejun Heo dev = ehc->i.dev; 20782695e366STejun Heo if (!dev && ((ata_link_max_devices(link) == 1 && 20792695e366STejun Heo ata_dev_enabled(link->device)))) 2080dfcc173dSTejun Heo dev = link->device; 2081dfcc173dSTejun Heo 208276326ac1STejun Heo if (dev) { 208376326ac1STejun Heo if (dev->flags & ATA_DFLAG_DUBIOUS_XFER) 208476326ac1STejun Heo eflags |= ATA_EFLAG_DUBIOUS_XFER; 20853884f7b0STejun Heo ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask); 208676326ac1STejun Heo } 2087dfcc173dSTejun Heo 2088c6fd2807SJeff Garzik DPRINTK("EXIT\n"); 2089c6fd2807SJeff Garzik } 2090c6fd2807SJeff Garzik 2091c6fd2807SJeff Garzik /** 20929b1e2658STejun Heo * ata_eh_autopsy - analyze error and determine recovery action 20939b1e2658STejun Heo * @ap: host port to perform autopsy on 20949b1e2658STejun Heo * 20959b1e2658STejun Heo * Analyze all links of @ap and determine why they failed and 20969b1e2658STejun Heo * which recovery actions are needed. 20979b1e2658STejun Heo * 20989b1e2658STejun Heo * LOCKING: 20999b1e2658STejun Heo * Kernel thread context (may sleep). 21009b1e2658STejun Heo */ 2101fb7fd614STejun Heo void ata_eh_autopsy(struct ata_port *ap) 21029b1e2658STejun Heo { 21039b1e2658STejun Heo struct ata_link *link; 21049b1e2658STejun Heo 21051eca4365STejun Heo ata_for_each_link(link, ap, EDGE) 21069b1e2658STejun Heo ata_eh_link_autopsy(link); 21072695e366STejun Heo 2108b1c72916STejun Heo /* Handle the frigging slave link. Autopsy is done similarly 2109b1c72916STejun Heo * but actions and flags are transferred over to the master 2110b1c72916STejun Heo * link and handled from there. 2111b1c72916STejun Heo */ 2112b1c72916STejun Heo if (ap->slave_link) { 2113b1c72916STejun Heo struct ata_eh_context *mehc = &ap->link.eh_context; 2114b1c72916STejun Heo struct ata_eh_context *sehc = &ap->slave_link->eh_context; 2115b1c72916STejun Heo 2116848e4c68STejun Heo /* transfer control flags from master to slave */ 2117848e4c68STejun Heo sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK; 2118848e4c68STejun Heo 2119848e4c68STejun Heo /* perform autopsy on the slave link */ 2120b1c72916STejun Heo ata_eh_link_autopsy(ap->slave_link); 2121b1c72916STejun Heo 2122848e4c68STejun Heo /* transfer actions from slave to master and clear slave */ 2123b1c72916STejun Heo ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); 2124b1c72916STejun Heo mehc->i.action |= sehc->i.action; 2125b1c72916STejun Heo mehc->i.dev_action[1] |= sehc->i.dev_action[1]; 2126b1c72916STejun Heo mehc->i.flags |= sehc->i.flags; 2127b1c72916STejun Heo ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); 2128b1c72916STejun Heo } 2129b1c72916STejun Heo 21302695e366STejun Heo /* Autopsy of fanout ports can affect host link autopsy. 21312695e366STejun Heo * Perform host link autopsy last. 21322695e366STejun Heo */ 2133071f44b1STejun Heo if (sata_pmp_attached(ap)) 21342695e366STejun Heo ata_eh_link_autopsy(&ap->link); 21359b1e2658STejun Heo } 21369b1e2658STejun Heo 21379b1e2658STejun Heo /** 21386521148cSRobert Hancock * ata_get_cmd_descript - get description for ATA command 21396521148cSRobert Hancock * @command: ATA command code to get description for 21406521148cSRobert Hancock * 21416521148cSRobert Hancock * Return a textual description of the given command, or NULL if the 21426521148cSRobert Hancock * command is not known. 21436521148cSRobert Hancock * 21446521148cSRobert Hancock * LOCKING: 21456521148cSRobert Hancock * None 21466521148cSRobert Hancock */ 21476521148cSRobert Hancock const char *ata_get_cmd_descript(u8 command) 21486521148cSRobert Hancock { 21496521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR 21506521148cSRobert Hancock static const struct 21516521148cSRobert Hancock { 21526521148cSRobert Hancock u8 command; 21536521148cSRobert Hancock const char *text; 21546521148cSRobert Hancock } cmd_descr[] = { 21556521148cSRobert Hancock { ATA_CMD_DEV_RESET, "DEVICE RESET" }, 21566521148cSRobert Hancock { ATA_CMD_CHK_POWER, "CHECK POWER MODE" }, 21576521148cSRobert Hancock { ATA_CMD_STANDBY, "STANDBY" }, 21586521148cSRobert Hancock { ATA_CMD_IDLE, "IDLE" }, 21596521148cSRobert Hancock { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" }, 21606521148cSRobert Hancock { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" }, 21616521148cSRobert Hancock { ATA_CMD_NOP, "NOP" }, 21626521148cSRobert Hancock { ATA_CMD_FLUSH, "FLUSH CACHE" }, 21636521148cSRobert Hancock { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" }, 21646521148cSRobert Hancock { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" }, 21656521148cSRobert Hancock { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" }, 21666521148cSRobert Hancock { ATA_CMD_SERVICE, "SERVICE" }, 21676521148cSRobert Hancock { ATA_CMD_READ, "READ DMA" }, 21686521148cSRobert Hancock { ATA_CMD_READ_EXT, "READ DMA EXT" }, 21696521148cSRobert Hancock { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" }, 21706521148cSRobert Hancock { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" }, 21716521148cSRobert Hancock { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" }, 21726521148cSRobert Hancock { ATA_CMD_WRITE, "WRITE DMA" }, 21736521148cSRobert Hancock { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" }, 21746521148cSRobert Hancock { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" }, 21756521148cSRobert Hancock { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" }, 21766521148cSRobert Hancock { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" }, 21776521148cSRobert Hancock { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" }, 21786521148cSRobert Hancock { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" }, 21796521148cSRobert Hancock { ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" }, 21806521148cSRobert Hancock { ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" }, 21816521148cSRobert Hancock { ATA_CMD_PIO_READ, "READ SECTOR(S)" }, 21826521148cSRobert Hancock { ATA_CMD_PIO_READ_EXT, "READ SECTOR(S) EXT" }, 21836521148cSRobert Hancock { ATA_CMD_PIO_WRITE, "WRITE SECTOR(S)" }, 21846521148cSRobert Hancock { ATA_CMD_PIO_WRITE_EXT, "WRITE SECTOR(S) EXT" }, 21856521148cSRobert Hancock { ATA_CMD_READ_MULTI, "READ MULTIPLE" }, 21866521148cSRobert Hancock { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" }, 21876521148cSRobert Hancock { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" }, 21886521148cSRobert Hancock { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" }, 21896521148cSRobert Hancock { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" }, 21906521148cSRobert Hancock { ATA_CMD_SET_FEATURES, "SET FEATURES" }, 21916521148cSRobert Hancock { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" }, 21926521148cSRobert Hancock { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" }, 21936521148cSRobert Hancock { ATA_CMD_VERIFY_EXT, "READ VERIFY SECTOR(S) EXT" }, 21946521148cSRobert Hancock { ATA_CMD_WRITE_UNCORR_EXT, "WRITE UNCORRECTABLE EXT" }, 21956521148cSRobert Hancock { ATA_CMD_STANDBYNOW1, "STANDBY IMMEDIATE" }, 21966521148cSRobert Hancock { ATA_CMD_IDLEIMMEDIATE, "IDLE IMMEDIATE" }, 21976521148cSRobert Hancock { ATA_CMD_SLEEP, "SLEEP" }, 21986521148cSRobert Hancock { ATA_CMD_INIT_DEV_PARAMS, "INITIALIZE DEVICE PARAMETERS" }, 21996521148cSRobert Hancock { ATA_CMD_READ_NATIVE_MAX, "READ NATIVE MAX ADDRESS" }, 22006521148cSRobert Hancock { ATA_CMD_READ_NATIVE_MAX_EXT, "READ NATIVE MAX ADDRESS EXT" }, 22016521148cSRobert Hancock { ATA_CMD_SET_MAX, "SET MAX ADDRESS" }, 22026521148cSRobert Hancock { ATA_CMD_SET_MAX_EXT, "SET MAX ADDRESS EXT" }, 22036521148cSRobert Hancock { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" }, 22046521148cSRobert Hancock { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" }, 22056521148cSRobert Hancock { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" }, 22066521148cSRobert Hancock { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" }, 22076521148cSRobert Hancock { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" }, 22086521148cSRobert Hancock { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" }, 22096521148cSRobert Hancock { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" }, 22106521148cSRobert Hancock { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" }, 22116521148cSRobert Hancock { ATA_CMD_PMP_READ, "READ BUFFER" }, 22126521148cSRobert Hancock { ATA_CMD_PMP_WRITE, "WRITE BUFFER" }, 22136521148cSRobert Hancock { ATA_CMD_CONF_OVERLAY, "DEVICE CONFIGURATION OVERLAY" }, 22146521148cSRobert Hancock { ATA_CMD_SEC_SET_PASS, "SECURITY SET PASSWORD" }, 22156521148cSRobert Hancock { ATA_CMD_SEC_UNLOCK, "SECURITY UNLOCK" }, 22166521148cSRobert Hancock { ATA_CMD_SEC_ERASE_PREP, "SECURITY ERASE PREPARE" }, 22176521148cSRobert Hancock { ATA_CMD_SEC_ERASE_UNIT, "SECURITY ERASE UNIT" }, 22186521148cSRobert Hancock { ATA_CMD_SEC_FREEZE_LOCK, "SECURITY FREEZE LOCK" }, 22196521148cSRobert Hancock { ATA_CMD_SEC_DISABLE_PASS, "SECURITY DISABLE PASSWORD" }, 22206521148cSRobert Hancock { ATA_CMD_CONFIG_STREAM, "CONFIGURE STREAM" }, 22216521148cSRobert Hancock { ATA_CMD_SMART, "SMART" }, 22226521148cSRobert Hancock { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" }, 22236521148cSRobert Hancock { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" }, 2224acad7627SFUJITA Tomonori { ATA_CMD_DSM, "DATA SET MANAGEMENT" }, 22256521148cSRobert Hancock { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" }, 22266521148cSRobert Hancock { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" }, 22276521148cSRobert Hancock { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" }, 22286521148cSRobert Hancock { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" }, 22296521148cSRobert Hancock { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" }, 22306521148cSRobert Hancock { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" }, 22316521148cSRobert Hancock { ATA_CMD_READ_LONG, "READ LONG (with retries)" }, 22326521148cSRobert Hancock { ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" }, 22336521148cSRobert Hancock { ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" }, 22346521148cSRobert Hancock { ATA_CMD_WRITE_LONG_ONCE, "WRITE LONG (without retries)" }, 22356521148cSRobert Hancock { ATA_CMD_RESTORE, "RECALIBRATE" }, 22366521148cSRobert Hancock { 0, NULL } /* terminate list */ 22376521148cSRobert Hancock }; 22386521148cSRobert Hancock 22396521148cSRobert Hancock unsigned int i; 22406521148cSRobert Hancock for (i = 0; cmd_descr[i].text; i++) 22416521148cSRobert Hancock if (cmd_descr[i].command == command) 22426521148cSRobert Hancock return cmd_descr[i].text; 22436521148cSRobert Hancock #endif 22446521148cSRobert Hancock 22456521148cSRobert Hancock return NULL; 22466521148cSRobert Hancock } 22476521148cSRobert Hancock 22486521148cSRobert Hancock /** 22499b1e2658STejun Heo * ata_eh_link_report - report error handling to user 22500260731fSTejun Heo * @link: ATA link EH is going on 2251c6fd2807SJeff Garzik * 2252c6fd2807SJeff Garzik * Report EH to user. 2253c6fd2807SJeff Garzik * 2254c6fd2807SJeff Garzik * LOCKING: 2255c6fd2807SJeff Garzik * None. 2256c6fd2807SJeff Garzik */ 22579b1e2658STejun Heo static void ata_eh_link_report(struct ata_link *link) 2258c6fd2807SJeff Garzik { 22590260731fSTejun Heo struct ata_port *ap = link->ap; 22600260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 2261c6fd2807SJeff Garzik const char *frozen, *desc; 2262a1e10f7eSTejun Heo char tries_buf[6]; 2263c6fd2807SJeff Garzik int tag, nr_failed = 0; 2264c6fd2807SJeff Garzik 226594ff3d54STejun Heo if (ehc->i.flags & ATA_EHI_QUIET) 226694ff3d54STejun Heo return; 226794ff3d54STejun Heo 2268c6fd2807SJeff Garzik desc = NULL; 2269c6fd2807SJeff Garzik if (ehc->i.desc[0] != '\0') 2270c6fd2807SJeff Garzik desc = ehc->i.desc; 2271c6fd2807SJeff Garzik 2272c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2273c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 2274c6fd2807SJeff Garzik 2275b1c72916STejun Heo if (!(qc->flags & ATA_QCFLAG_FAILED) || 2276b1c72916STejun Heo ata_dev_phys_link(qc->dev) != link || 2277e027bd36STejun Heo ((qc->flags & ATA_QCFLAG_QUIET) && 2278e027bd36STejun Heo qc->err_mask == AC_ERR_DEV)) 2279c6fd2807SJeff Garzik continue; 2280c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask) 2281c6fd2807SJeff Garzik continue; 2282c6fd2807SJeff Garzik 2283c6fd2807SJeff Garzik nr_failed++; 2284c6fd2807SJeff Garzik } 2285c6fd2807SJeff Garzik 2286c6fd2807SJeff Garzik if (!nr_failed && !ehc->i.err_mask) 2287c6fd2807SJeff Garzik return; 2288c6fd2807SJeff Garzik 2289c6fd2807SJeff Garzik frozen = ""; 2290c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN) 2291c6fd2807SJeff Garzik frozen = " frozen"; 2292c6fd2807SJeff Garzik 2293a1e10f7eSTejun Heo memset(tries_buf, 0, sizeof(tries_buf)); 2294a1e10f7eSTejun Heo if (ap->eh_tries < ATA_EH_MAX_TRIES) 2295a1e10f7eSTejun Heo snprintf(tries_buf, sizeof(tries_buf) - 1, " t%d", 2296a1e10f7eSTejun Heo ap->eh_tries); 2297a1e10f7eSTejun Heo 2298c6fd2807SJeff Garzik if (ehc->i.dev) { 2299c6fd2807SJeff Garzik ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x " 2300a1e10f7eSTejun Heo "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", 2301a1e10f7eSTejun Heo ehc->i.err_mask, link->sactive, ehc->i.serror, 2302a1e10f7eSTejun Heo ehc->i.action, frozen, tries_buf); 2303c6fd2807SJeff Garzik if (desc) 2304b64bbc39STejun Heo ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc); 2305c6fd2807SJeff Garzik } else { 23060260731fSTejun Heo ata_link_printk(link, KERN_ERR, "exception Emask 0x%x " 2307a1e10f7eSTejun Heo "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", 2308a1e10f7eSTejun Heo ehc->i.err_mask, link->sactive, ehc->i.serror, 2309a1e10f7eSTejun Heo ehc->i.action, frozen, tries_buf); 2310c6fd2807SJeff Garzik if (desc) 23110260731fSTejun Heo ata_link_printk(link, KERN_ERR, "%s\n", desc); 2312c6fd2807SJeff Garzik } 2313c6fd2807SJeff Garzik 23146521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR 23151333e194SRobert Hancock if (ehc->i.serror) 2316da0e21d3STejun Heo ata_link_printk(link, KERN_ERR, 23171333e194SRobert Hancock "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n", 23181333e194SRobert Hancock ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "", 23191333e194SRobert Hancock ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "", 23201333e194SRobert Hancock ehc->i.serror & SERR_DATA ? "UnrecovData " : "", 23211333e194SRobert Hancock ehc->i.serror & SERR_PERSISTENT ? "Persist " : "", 23221333e194SRobert Hancock ehc->i.serror & SERR_PROTOCOL ? "Proto " : "", 23231333e194SRobert Hancock ehc->i.serror & SERR_INTERNAL ? "HostInt " : "", 23241333e194SRobert Hancock ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "", 23251333e194SRobert Hancock ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "", 23261333e194SRobert Hancock ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "", 23271333e194SRobert Hancock ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "", 23281333e194SRobert Hancock ehc->i.serror & SERR_DISPARITY ? "Dispar " : "", 23291333e194SRobert Hancock ehc->i.serror & SERR_CRC ? "BadCRC " : "", 23301333e194SRobert Hancock ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "", 23311333e194SRobert Hancock ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "", 23321333e194SRobert Hancock ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "", 23331333e194SRobert Hancock ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "", 23341333e194SRobert Hancock ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : ""); 23356521148cSRobert Hancock #endif 23361333e194SRobert Hancock 2337c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2338c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 23398a937581STejun Heo struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; 2340abb6a889STejun Heo const u8 *cdb = qc->cdb; 2341abb6a889STejun Heo char data_buf[20] = ""; 2342abb6a889STejun Heo char cdb_buf[70] = ""; 2343c6fd2807SJeff Garzik 23440260731fSTejun Heo if (!(qc->flags & ATA_QCFLAG_FAILED) || 2345b1c72916STejun Heo ata_dev_phys_link(qc->dev) != link || !qc->err_mask) 2346c6fd2807SJeff Garzik continue; 2347c6fd2807SJeff Garzik 2348abb6a889STejun Heo if (qc->dma_dir != DMA_NONE) { 2349abb6a889STejun Heo static const char *dma_str[] = { 2350abb6a889STejun Heo [DMA_BIDIRECTIONAL] = "bidi", 2351abb6a889STejun Heo [DMA_TO_DEVICE] = "out", 2352abb6a889STejun Heo [DMA_FROM_DEVICE] = "in", 2353abb6a889STejun Heo }; 2354abb6a889STejun Heo static const char *prot_str[] = { 2355abb6a889STejun Heo [ATA_PROT_PIO] = "pio", 2356abb6a889STejun Heo [ATA_PROT_DMA] = "dma", 2357abb6a889STejun Heo [ATA_PROT_NCQ] = "ncq", 23580dc36888STejun Heo [ATAPI_PROT_PIO] = "pio", 23590dc36888STejun Heo [ATAPI_PROT_DMA] = "dma", 2360abb6a889STejun Heo }; 2361abb6a889STejun Heo 2362abb6a889STejun Heo snprintf(data_buf, sizeof(data_buf), " %s %u %s", 2363abb6a889STejun Heo prot_str[qc->tf.protocol], qc->nbytes, 2364abb6a889STejun Heo dma_str[qc->dma_dir]); 2365abb6a889STejun Heo } 2366abb6a889STejun Heo 23676521148cSRobert Hancock if (ata_is_atapi(qc->tf.protocol)) { 23686521148cSRobert Hancock if (qc->scsicmd) 23696521148cSRobert Hancock scsi_print_command(qc->scsicmd); 23706521148cSRobert Hancock else 2371abb6a889STejun Heo snprintf(cdb_buf, sizeof(cdb_buf), 2372abb6a889STejun Heo "cdb %02x %02x %02x %02x %02x %02x %02x %02x " 2373abb6a889STejun Heo "%02x %02x %02x %02x %02x %02x %02x %02x\n ", 2374abb6a889STejun Heo cdb[0], cdb[1], cdb[2], cdb[3], 2375abb6a889STejun Heo cdb[4], cdb[5], cdb[6], cdb[7], 2376abb6a889STejun Heo cdb[8], cdb[9], cdb[10], cdb[11], 2377abb6a889STejun Heo cdb[12], cdb[13], cdb[14], cdb[15]); 23786521148cSRobert Hancock } else { 23796521148cSRobert Hancock const char *descr = ata_get_cmd_descript(cmd->command); 23806521148cSRobert Hancock if (descr) 23816521148cSRobert Hancock ata_dev_printk(qc->dev, KERN_ERR, 23826521148cSRobert Hancock "failed command: %s\n", descr); 23836521148cSRobert Hancock } 2384abb6a889STejun Heo 23858a937581STejun Heo ata_dev_printk(qc->dev, KERN_ERR, 23868a937581STejun Heo "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " 2387abb6a889STejun Heo "tag %d%s\n %s" 23888a937581STejun Heo "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " 23895335b729STejun Heo "Emask 0x%x (%s)%s\n", 23908a937581STejun Heo cmd->command, cmd->feature, cmd->nsect, 23918a937581STejun Heo cmd->lbal, cmd->lbam, cmd->lbah, 23928a937581STejun Heo cmd->hob_feature, cmd->hob_nsect, 23938a937581STejun Heo cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah, 2394abb6a889STejun Heo cmd->device, qc->tag, data_buf, cdb_buf, 23958a937581STejun Heo res->command, res->feature, res->nsect, 23968a937581STejun Heo res->lbal, res->lbam, res->lbah, 23978a937581STejun Heo res->hob_feature, res->hob_nsect, 23988a937581STejun Heo res->hob_lbal, res->hob_lbam, res->hob_lbah, 23995335b729STejun Heo res->device, qc->err_mask, ata_err_string(qc->err_mask), 24005335b729STejun Heo qc->err_mask & AC_ERR_NCQ ? " <F>" : ""); 24011333e194SRobert Hancock 24026521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR 24031333e194SRobert Hancock if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | 24041333e194SRobert Hancock ATA_ERR)) { 24051333e194SRobert Hancock if (res->command & ATA_BUSY) 24061333e194SRobert Hancock ata_dev_printk(qc->dev, KERN_ERR, 24071333e194SRobert Hancock "status: { Busy }\n"); 24081333e194SRobert Hancock else 24091333e194SRobert Hancock ata_dev_printk(qc->dev, KERN_ERR, 24101333e194SRobert Hancock "status: { %s%s%s%s}\n", 24111333e194SRobert Hancock res->command & ATA_DRDY ? "DRDY " : "", 24121333e194SRobert Hancock res->command & ATA_DF ? "DF " : "", 24131333e194SRobert Hancock res->command & ATA_DRQ ? "DRQ " : "", 24141333e194SRobert Hancock res->command & ATA_ERR ? "ERR " : ""); 24151333e194SRobert Hancock } 24161333e194SRobert Hancock 24171333e194SRobert Hancock if (cmd->command != ATA_CMD_PACKET && 24181333e194SRobert Hancock (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF | 24191333e194SRobert Hancock ATA_ABORTED))) 24201333e194SRobert Hancock ata_dev_printk(qc->dev, KERN_ERR, 24211333e194SRobert Hancock "error: { %s%s%s%s}\n", 24221333e194SRobert Hancock res->feature & ATA_ICRC ? "ICRC " : "", 24231333e194SRobert Hancock res->feature & ATA_UNC ? "UNC " : "", 24241333e194SRobert Hancock res->feature & ATA_IDNF ? "IDNF " : "", 24251333e194SRobert Hancock res->feature & ATA_ABORTED ? "ABRT " : ""); 24266521148cSRobert Hancock #endif 2427c6fd2807SJeff Garzik } 2428c6fd2807SJeff Garzik } 2429c6fd2807SJeff Garzik 24309b1e2658STejun Heo /** 24319b1e2658STejun Heo * ata_eh_report - report error handling to user 24329b1e2658STejun Heo * @ap: ATA port to report EH about 24339b1e2658STejun Heo * 24349b1e2658STejun Heo * Report EH to user. 24359b1e2658STejun Heo * 24369b1e2658STejun Heo * LOCKING: 24379b1e2658STejun Heo * None. 24389b1e2658STejun Heo */ 2439fb7fd614STejun Heo void ata_eh_report(struct ata_port *ap) 24409b1e2658STejun Heo { 24419b1e2658STejun Heo struct ata_link *link; 24429b1e2658STejun Heo 24431eca4365STejun Heo ata_for_each_link(link, ap, HOST_FIRST) 24449b1e2658STejun Heo ata_eh_link_report(link); 24459b1e2658STejun Heo } 24469b1e2658STejun Heo 2447cc0680a5STejun Heo static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset, 2448b1c72916STejun Heo unsigned int *classes, unsigned long deadline, 2449b1c72916STejun Heo bool clear_classes) 2450c6fd2807SJeff Garzik { 2451f58229f8STejun Heo struct ata_device *dev; 2452c6fd2807SJeff Garzik 2453b1c72916STejun Heo if (clear_classes) 24541eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 2455f58229f8STejun Heo classes[dev->devno] = ATA_DEV_UNKNOWN; 2456c6fd2807SJeff Garzik 2457f046519fSTejun Heo return reset(link, classes, deadline); 2458c6fd2807SJeff Garzik } 2459c6fd2807SJeff Garzik 2460ae791c05STejun Heo static int ata_eh_followup_srst_needed(struct ata_link *link, 24615dbfc9cbSTejun Heo int rc, const unsigned int *classes) 2462c6fd2807SJeff Garzik { 246345db2f6cSTejun Heo if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link)) 2464ae791c05STejun Heo return 0; 24655dbfc9cbSTejun Heo if (rc == -EAGAIN) 2466c6fd2807SJeff Garzik return 1; 2467071f44b1STejun Heo if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) 24683495de73STejun Heo return 1; 2469c6fd2807SJeff Garzik return 0; 2470c6fd2807SJeff Garzik } 2471c6fd2807SJeff Garzik 2472fb7fd614STejun Heo int ata_eh_reset(struct ata_link *link, int classify, 2473c6fd2807SJeff Garzik ata_prereset_fn_t prereset, ata_reset_fn_t softreset, 2474c6fd2807SJeff Garzik ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) 2475c6fd2807SJeff Garzik { 2476afaa5c37STejun Heo struct ata_port *ap = link->ap; 2477b1c72916STejun Heo struct ata_link *slave = ap->slave_link; 2478936fd732STejun Heo struct ata_eh_context *ehc = &link->eh_context; 2479705d2014SBartlomiej Zolnierkiewicz struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL; 2480c6fd2807SJeff Garzik unsigned int *classes = ehc->classes; 2481416dc9edSTejun Heo unsigned int lflags = link->flags; 2482c6fd2807SJeff Garzik int verbose = !(ehc->i.flags & ATA_EHI_QUIET); 2483d8af0eb6STejun Heo int max_tries = 0, try = 0; 2484b1c72916STejun Heo struct ata_link *failed_link; 2485f58229f8STejun Heo struct ata_device *dev; 2486416dc9edSTejun Heo unsigned long deadline, now; 2487c6fd2807SJeff Garzik ata_reset_fn_t reset; 2488afaa5c37STejun Heo unsigned long flags; 2489416dc9edSTejun Heo u32 sstatus; 2490b1c72916STejun Heo int nr_unknown, rc; 2491c6fd2807SJeff Garzik 2492932648b0STejun Heo /* 2493932648b0STejun Heo * Prepare to reset 2494932648b0STejun Heo */ 2495d8af0eb6STejun Heo while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX) 2496d8af0eb6STejun Heo max_tries++; 249705944bdfSTejun Heo if (link->flags & ATA_LFLAG_NO_HRST) 249805944bdfSTejun Heo hardreset = NULL; 249905944bdfSTejun Heo if (link->flags & ATA_LFLAG_NO_SRST) 250005944bdfSTejun Heo softreset = NULL; 2501d8af0eb6STejun Heo 250219b72321STejun Heo /* make sure each reset attemp is at least COOL_DOWN apart */ 250319b72321STejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) { 25040a2c0f56STejun Heo now = jiffies; 250519b72321STejun Heo WARN_ON(time_after(ehc->last_reset, now)); 250619b72321STejun Heo deadline = ata_deadline(ehc->last_reset, 250719b72321STejun Heo ATA_EH_RESET_COOL_DOWN); 25080a2c0f56STejun Heo if (time_before(now, deadline)) 25090a2c0f56STejun Heo schedule_timeout_uninterruptible(deadline - now); 251019b72321STejun Heo } 25110a2c0f56STejun Heo 2512afaa5c37STejun Heo spin_lock_irqsave(ap->lock, flags); 2513afaa5c37STejun Heo ap->pflags |= ATA_PFLAG_RESETTING; 2514afaa5c37STejun Heo spin_unlock_irqrestore(ap->lock, flags); 2515afaa5c37STejun Heo 2516cf480626STejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2517c6fd2807SJeff Garzik 25181eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 2519cdeab114STejun Heo /* If we issue an SRST then an ATA drive (not ATAPI) 2520cdeab114STejun Heo * may change configuration and be in PIO0 timing. If 2521cdeab114STejun Heo * we do a hard reset (or are coming from power on) 2522cdeab114STejun Heo * this is true for ATA or ATAPI. Until we've set a 2523cdeab114STejun Heo * suitable controller mode we should not touch the 2524cdeab114STejun Heo * bus as we may be talking too fast. 2525cdeab114STejun Heo */ 2526cdeab114STejun Heo dev->pio_mode = XFER_PIO_0; 2527cdeab114STejun Heo 2528cdeab114STejun Heo /* If the controller has a pio mode setup function 2529cdeab114STejun Heo * then use it to set the chipset to rights. Don't 2530cdeab114STejun Heo * touch the DMA setup as that will be dealt with when 2531cdeab114STejun Heo * configuring devices. 2532cdeab114STejun Heo */ 2533cdeab114STejun Heo if (ap->ops->set_piomode) 2534cdeab114STejun Heo ap->ops->set_piomode(ap, dev); 2535cdeab114STejun Heo } 2536cdeab114STejun Heo 2537cf480626STejun Heo /* prefer hardreset */ 2538932648b0STejun Heo reset = NULL; 2539cf480626STejun Heo ehc->i.action &= ~ATA_EH_RESET; 2540cf480626STejun Heo if (hardreset) { 2541cf480626STejun Heo reset = hardreset; 2542a674050eSTejun Heo ehc->i.action |= ATA_EH_HARDRESET; 25434f7faa3fSTejun Heo } else if (softreset) { 2544cf480626STejun Heo reset = softreset; 2545a674050eSTejun Heo ehc->i.action |= ATA_EH_SOFTRESET; 2546cf480626STejun Heo } 2547c6fd2807SJeff Garzik 2548c6fd2807SJeff Garzik if (prereset) { 2549b1c72916STejun Heo unsigned long deadline = ata_deadline(jiffies, 2550b1c72916STejun Heo ATA_EH_PRERESET_TIMEOUT); 2551b1c72916STejun Heo 2552b1c72916STejun Heo if (slave) { 2553b1c72916STejun Heo sehc->i.action &= ~ATA_EH_RESET; 2554b1c72916STejun Heo sehc->i.action |= ehc->i.action; 2555b1c72916STejun Heo } 2556b1c72916STejun Heo 2557b1c72916STejun Heo rc = prereset(link, deadline); 2558b1c72916STejun Heo 2559b1c72916STejun Heo /* If present, do prereset on slave link too. Reset 2560b1c72916STejun Heo * is skipped iff both master and slave links report 2561b1c72916STejun Heo * -ENOENT or clear ATA_EH_RESET. 2562b1c72916STejun Heo */ 2563b1c72916STejun Heo if (slave && (rc == 0 || rc == -ENOENT)) { 2564b1c72916STejun Heo int tmp; 2565b1c72916STejun Heo 2566b1c72916STejun Heo tmp = prereset(slave, deadline); 2567b1c72916STejun Heo if (tmp != -ENOENT) 2568b1c72916STejun Heo rc = tmp; 2569b1c72916STejun Heo 2570b1c72916STejun Heo ehc->i.action |= sehc->i.action; 2571b1c72916STejun Heo } 2572b1c72916STejun Heo 2573c6fd2807SJeff Garzik if (rc) { 2574c961922bSAlan Cox if (rc == -ENOENT) { 2575cc0680a5STejun Heo ata_link_printk(link, KERN_DEBUG, 25764aa9ab67STejun Heo "port disabled. ignoring.\n"); 2577cf480626STejun Heo ehc->i.action &= ~ATA_EH_RESET; 25784aa9ab67STejun Heo 25791eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 2580f58229f8STejun Heo classes[dev->devno] = ATA_DEV_NONE; 25814aa9ab67STejun Heo 25824aa9ab67STejun Heo rc = 0; 2583c961922bSAlan Cox } else 2584cc0680a5STejun Heo ata_link_printk(link, KERN_ERR, 2585c6fd2807SJeff Garzik "prereset failed (errno=%d)\n", rc); 2586fccb6ea5STejun Heo goto out; 2587c6fd2807SJeff Garzik } 2588c6fd2807SJeff Garzik 2589932648b0STejun Heo /* prereset() might have cleared ATA_EH_RESET. If so, 2590d6515e6fSTejun Heo * bang classes, thaw and return. 2591932648b0STejun Heo */ 2592932648b0STejun Heo if (reset && !(ehc->i.action & ATA_EH_RESET)) { 25931eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 2594f58229f8STejun Heo classes[dev->devno] = ATA_DEV_NONE; 2595d6515e6fSTejun Heo if ((ap->pflags & ATA_PFLAG_FROZEN) && 2596d6515e6fSTejun Heo ata_is_host_link(link)) 2597d6515e6fSTejun Heo ata_eh_thaw_port(ap); 2598fccb6ea5STejun Heo rc = 0; 2599fccb6ea5STejun Heo goto out; 2600c6fd2807SJeff Garzik } 2601932648b0STejun Heo } 2602c6fd2807SJeff Garzik 2603c6fd2807SJeff Garzik retry: 2604932648b0STejun Heo /* 2605932648b0STejun Heo * Perform reset 2606932648b0STejun Heo */ 2607dc98c32cSTejun Heo if (ata_is_host_link(link)) 2608dc98c32cSTejun Heo ata_eh_freeze_port(ap); 2609dc98c32cSTejun Heo 2610341c2c95STejun Heo deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]); 261131daabdaSTejun Heo 2612932648b0STejun Heo if (reset) { 2613c6fd2807SJeff Garzik if (verbose) 2614cc0680a5STejun Heo ata_link_printk(link, KERN_INFO, "%s resetting link\n", 2615c6fd2807SJeff Garzik reset == softreset ? "soft" : "hard"); 2616c6fd2807SJeff Garzik 2617c6fd2807SJeff Garzik /* mark that this EH session started with reset */ 261819b72321STejun Heo ehc->last_reset = jiffies; 26190d64a233STejun Heo if (reset == hardreset) 26200d64a233STejun Heo ehc->i.flags |= ATA_EHI_DID_HARDRESET; 26210d64a233STejun Heo else 26220d64a233STejun Heo ehc->i.flags |= ATA_EHI_DID_SOFTRESET; 2623c6fd2807SJeff Garzik 2624b1c72916STejun Heo rc = ata_do_reset(link, reset, classes, deadline, true); 2625b1c72916STejun Heo if (rc && rc != -EAGAIN) { 2626b1c72916STejun Heo failed_link = link; 26275dbfc9cbSTejun Heo goto fail; 2628b1c72916STejun Heo } 2629c6fd2807SJeff Garzik 2630b1c72916STejun Heo /* hardreset slave link if existent */ 2631b1c72916STejun Heo if (slave && reset == hardreset) { 2632b1c72916STejun Heo int tmp; 2633b1c72916STejun Heo 2634b1c72916STejun Heo if (verbose) 2635b1c72916STejun Heo ata_link_printk(slave, KERN_INFO, 2636b1c72916STejun Heo "hard resetting link\n"); 2637b1c72916STejun Heo 2638b1c72916STejun Heo ata_eh_about_to_do(slave, NULL, ATA_EH_RESET); 2639b1c72916STejun Heo tmp = ata_do_reset(slave, reset, classes, deadline, 2640b1c72916STejun Heo false); 2641b1c72916STejun Heo switch (tmp) { 2642b1c72916STejun Heo case -EAGAIN: 2643b1c72916STejun Heo rc = -EAGAIN; 2644b1c72916STejun Heo case 0: 2645b1c72916STejun Heo break; 2646b1c72916STejun Heo default: 2647b1c72916STejun Heo failed_link = slave; 2648b1c72916STejun Heo rc = tmp; 2649b1c72916STejun Heo goto fail; 2650b1c72916STejun Heo } 2651b1c72916STejun Heo } 2652b1c72916STejun Heo 2653b1c72916STejun Heo /* perform follow-up SRST if necessary */ 2654c6fd2807SJeff Garzik if (reset == hardreset && 26555dbfc9cbSTejun Heo ata_eh_followup_srst_needed(link, rc, classes)) { 2656c6fd2807SJeff Garzik reset = softreset; 2657c6fd2807SJeff Garzik 2658c6fd2807SJeff Garzik if (!reset) { 2659cc0680a5STejun Heo ata_link_printk(link, KERN_ERR, 2660c6fd2807SJeff Garzik "follow-up softreset required " 2661c6fd2807SJeff Garzik "but no softreset avaliable\n"); 2662b1c72916STejun Heo failed_link = link; 2663fccb6ea5STejun Heo rc = -EINVAL; 266408cf69d0STejun Heo goto fail; 2665c6fd2807SJeff Garzik } 2666c6fd2807SJeff Garzik 2667cf480626STejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2668b1c72916STejun Heo rc = ata_do_reset(link, reset, classes, deadline, true); 2669fe2c4d01STejun Heo if (rc) { 2670fe2c4d01STejun Heo failed_link = link; 2671fe2c4d01STejun Heo goto fail; 2672fe2c4d01STejun Heo } 2673c6fd2807SJeff Garzik } 2674932648b0STejun Heo } else { 2675932648b0STejun Heo if (verbose) 2676932648b0STejun Heo ata_link_printk(link, KERN_INFO, "no reset method " 2677932648b0STejun Heo "available, skipping reset\n"); 2678932648b0STejun Heo if (!(lflags & ATA_LFLAG_ASSUME_CLASS)) 2679932648b0STejun Heo lflags |= ATA_LFLAG_ASSUME_ATA; 2680932648b0STejun Heo } 2681008a7896STejun Heo 2682932648b0STejun Heo /* 2683932648b0STejun Heo * Post-reset processing 2684932648b0STejun Heo */ 26851eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 2686416dc9edSTejun Heo /* After the reset, the device state is PIO 0 and the 2687416dc9edSTejun Heo * controller state is undefined. Reset also wakes up 2688416dc9edSTejun Heo * drives from sleeping mode. 2689c6fd2807SJeff Garzik */ 2690f58229f8STejun Heo dev->pio_mode = XFER_PIO_0; 2691054a5fbaSTejun Heo dev->flags &= ~ATA_DFLAG_SLEEPING; 2692c6fd2807SJeff Garzik 26933b761d3dSTejun Heo if (ata_phys_link_offline(ata_dev_phys_link(dev))) 26943b761d3dSTejun Heo continue; 26953b761d3dSTejun Heo 26964ccd3329STejun Heo /* apply class override */ 2697416dc9edSTejun Heo if (lflags & ATA_LFLAG_ASSUME_ATA) 2698ae791c05STejun Heo classes[dev->devno] = ATA_DEV_ATA; 2699416dc9edSTejun Heo else if (lflags & ATA_LFLAG_ASSUME_SEMB) 2700816ab897STejun Heo classes[dev->devno] = ATA_DEV_SEMB_UNSUP; 2701ae791c05STejun Heo } 2702ae791c05STejun Heo 2703008a7896STejun Heo /* record current link speed */ 2704936fd732STejun Heo if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0) 2705936fd732STejun Heo link->sata_spd = (sstatus >> 4) & 0xf; 2706b1c72916STejun Heo if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0) 2707b1c72916STejun Heo slave->sata_spd = (sstatus >> 4) & 0xf; 2708008a7896STejun Heo 2709dc98c32cSTejun Heo /* thaw the port */ 2710dc98c32cSTejun Heo if (ata_is_host_link(link)) 2711dc98c32cSTejun Heo ata_eh_thaw_port(ap); 2712dc98c32cSTejun Heo 2713f046519fSTejun Heo /* postreset() should clear hardware SError. Although SError 2714f046519fSTejun Heo * is cleared during link resume, clearing SError here is 2715f046519fSTejun Heo * necessary as some PHYs raise hotplug events after SRST. 2716f046519fSTejun Heo * This introduces race condition where hotplug occurs between 2717f046519fSTejun Heo * reset and here. This race is mediated by cross checking 2718f046519fSTejun Heo * link onlineness and classification result later. 2719f046519fSTejun Heo */ 2720b1c72916STejun Heo if (postreset) { 2721cc0680a5STejun Heo postreset(link, classes); 2722b1c72916STejun Heo if (slave) 2723b1c72916STejun Heo postreset(slave, classes); 2724b1c72916STejun Heo } 2725c6fd2807SJeff Garzik 27261e641060STejun Heo /* 27271e641060STejun Heo * Some controllers can't be frozen very well and may set 27281e641060STejun Heo * spuruious error conditions during reset. Clear accumulated 27291e641060STejun Heo * error information. As reset is the final recovery action, 27301e641060STejun Heo * nothing is lost by doing this. 27311e641060STejun Heo */ 2732f046519fSTejun Heo spin_lock_irqsave(link->ap->lock, flags); 27331e641060STejun Heo memset(&link->eh_info, 0, sizeof(link->eh_info)); 2734b1c72916STejun Heo if (slave) 27351e641060STejun Heo memset(&slave->eh_info, 0, sizeof(link->eh_info)); 27361e641060STejun Heo ap->pflags &= ~ATA_PFLAG_EH_PENDING; 2737f046519fSTejun Heo spin_unlock_irqrestore(link->ap->lock, flags); 2738f046519fSTejun Heo 27393b761d3dSTejun Heo /* 27403b761d3dSTejun Heo * Make sure onlineness and classification result correspond. 2741f046519fSTejun Heo * Hotplug could have happened during reset and some 2742f046519fSTejun Heo * controllers fail to wait while a drive is spinning up after 2743f046519fSTejun Heo * being hotplugged causing misdetection. By cross checking 27443b761d3dSTejun Heo * link on/offlineness and classification result, those 27453b761d3dSTejun Heo * conditions can be reliably detected and retried. 2746f046519fSTejun Heo */ 2747b1c72916STejun Heo nr_unknown = 0; 27481eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 27493b761d3dSTejun Heo if (ata_phys_link_online(ata_dev_phys_link(dev))) { 2750b1c72916STejun Heo if (classes[dev->devno] == ATA_DEV_UNKNOWN) { 27513b761d3dSTejun Heo ata_dev_printk(dev, KERN_DEBUG, "link online " 27523b761d3dSTejun Heo "but device misclassifed\n"); 2753f046519fSTejun Heo classes[dev->devno] = ATA_DEV_NONE; 2754b1c72916STejun Heo nr_unknown++; 2755b1c72916STejun Heo } 27563b761d3dSTejun Heo } else if (ata_phys_link_offline(ata_dev_phys_link(dev))) { 27573b761d3dSTejun Heo if (ata_class_enabled(classes[dev->devno])) 27583b761d3dSTejun Heo ata_dev_printk(dev, KERN_DEBUG, "link offline, " 27593b761d3dSTejun Heo "clearing class %d to NONE\n", 27603b761d3dSTejun Heo classes[dev->devno]); 27613b761d3dSTejun Heo classes[dev->devno] = ATA_DEV_NONE; 27623b761d3dSTejun Heo } else if (classes[dev->devno] == ATA_DEV_UNKNOWN) { 27633b761d3dSTejun Heo ata_dev_printk(dev, KERN_DEBUG, "link status unknown, " 27643b761d3dSTejun Heo "clearing UNKNOWN to NONE\n"); 27653b761d3dSTejun Heo classes[dev->devno] = ATA_DEV_NONE; 27663b761d3dSTejun Heo } 2767f046519fSTejun Heo } 2768f046519fSTejun Heo 2769b1c72916STejun Heo if (classify && nr_unknown) { 2770f046519fSTejun Heo if (try < max_tries) { 2771f046519fSTejun Heo ata_link_printk(link, KERN_WARNING, "link online but " 27723b761d3dSTejun Heo "%d devices misclassified, retrying\n", 27733b761d3dSTejun Heo nr_unknown); 2774b1c72916STejun Heo failed_link = link; 2775f046519fSTejun Heo rc = -EAGAIN; 2776f046519fSTejun Heo goto fail; 2777f046519fSTejun Heo } 2778f046519fSTejun Heo ata_link_printk(link, KERN_WARNING, 27793b761d3dSTejun Heo "link online but %d devices misclassified, " 27803b761d3dSTejun Heo "device detection might fail\n", nr_unknown); 2781f046519fSTejun Heo } 2782f046519fSTejun Heo 2783c6fd2807SJeff Garzik /* reset successful, schedule revalidation */ 2784cf480626STejun Heo ata_eh_done(link, NULL, ATA_EH_RESET); 2785b1c72916STejun Heo if (slave) 2786b1c72916STejun Heo ata_eh_done(slave, NULL, ATA_EH_RESET); 278719b72321STejun Heo ehc->last_reset = jiffies; /* update to completion time */ 2788c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_REVALIDATE; 27896b7ae954STejun Heo link->lpm_policy = ATA_LPM_UNKNOWN; /* reset LPM state */ 2790416dc9edSTejun Heo 2791416dc9edSTejun Heo rc = 0; 2792fccb6ea5STejun Heo out: 2793fccb6ea5STejun Heo /* clear hotplug flag */ 2794fccb6ea5STejun Heo ehc->i.flags &= ~ATA_EHI_HOTPLUGGED; 2795b1c72916STejun Heo if (slave) 2796b1c72916STejun Heo sehc->i.flags &= ~ATA_EHI_HOTPLUGGED; 2797afaa5c37STejun Heo 2798afaa5c37STejun Heo spin_lock_irqsave(ap->lock, flags); 2799afaa5c37STejun Heo ap->pflags &= ~ATA_PFLAG_RESETTING; 2800afaa5c37STejun Heo spin_unlock_irqrestore(ap->lock, flags); 2801afaa5c37STejun Heo 2802c6fd2807SJeff Garzik return rc; 2803416dc9edSTejun Heo 2804416dc9edSTejun Heo fail: 28055958e302STejun Heo /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */ 28065958e302STejun Heo if (!ata_is_host_link(link) && 28075958e302STejun Heo sata_scr_read(link, SCR_STATUS, &sstatus)) 28085958e302STejun Heo rc = -ERESTART; 28095958e302STejun Heo 2810416dc9edSTejun Heo if (rc == -ERESTART || try >= max_tries) 2811416dc9edSTejun Heo goto out; 2812416dc9edSTejun Heo 2813416dc9edSTejun Heo now = jiffies; 2814416dc9edSTejun Heo if (time_before(now, deadline)) { 2815416dc9edSTejun Heo unsigned long delta = deadline - now; 2816416dc9edSTejun Heo 2817b1c72916STejun Heo ata_link_printk(failed_link, KERN_WARNING, 28180a2c0f56STejun Heo "reset failed (errno=%d), retrying in %u secs\n", 28190a2c0f56STejun Heo rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000)); 2820416dc9edSTejun Heo 2821416dc9edSTejun Heo while (delta) 2822416dc9edSTejun Heo delta = schedule_timeout_uninterruptible(delta); 2823416dc9edSTejun Heo } 2824416dc9edSTejun Heo 2825b1c72916STejun Heo if (try == max_tries - 1) { 2826a07d499bSTejun Heo sata_down_spd_limit(link, 0); 2827b1c72916STejun Heo if (slave) 2828a07d499bSTejun Heo sata_down_spd_limit(slave, 0); 2829b1c72916STejun Heo } else if (rc == -EPIPE) 2830a07d499bSTejun Heo sata_down_spd_limit(failed_link, 0); 2831b1c72916STejun Heo 2832416dc9edSTejun Heo if (hardreset) 2833416dc9edSTejun Heo reset = hardreset; 2834416dc9edSTejun Heo goto retry; 2835c6fd2807SJeff Garzik } 2836c6fd2807SJeff Garzik 283745fabbb7SElias Oltmanns static inline void ata_eh_pull_park_action(struct ata_port *ap) 283845fabbb7SElias Oltmanns { 283945fabbb7SElias Oltmanns struct ata_link *link; 284045fabbb7SElias Oltmanns struct ata_device *dev; 284145fabbb7SElias Oltmanns unsigned long flags; 284245fabbb7SElias Oltmanns 284345fabbb7SElias Oltmanns /* 284445fabbb7SElias Oltmanns * This function can be thought of as an extended version of 284545fabbb7SElias Oltmanns * ata_eh_about_to_do() specially crafted to accommodate the 284645fabbb7SElias Oltmanns * requirements of ATA_EH_PARK handling. Since the EH thread 284745fabbb7SElias Oltmanns * does not leave the do {} while () loop in ata_eh_recover as 284845fabbb7SElias Oltmanns * long as the timeout for a park request to *one* device on 284945fabbb7SElias Oltmanns * the port has not expired, and since we still want to pick 285045fabbb7SElias Oltmanns * up park requests to other devices on the same port or 285145fabbb7SElias Oltmanns * timeout updates for the same device, we have to pull 285245fabbb7SElias Oltmanns * ATA_EH_PARK actions from eh_info into eh_context.i 285345fabbb7SElias Oltmanns * ourselves at the beginning of each pass over the loop. 285445fabbb7SElias Oltmanns * 285545fabbb7SElias Oltmanns * Additionally, all write accesses to &ap->park_req_pending 285645fabbb7SElias Oltmanns * through INIT_COMPLETION() (see below) or complete_all() 285745fabbb7SElias Oltmanns * (see ata_scsi_park_store()) are protected by the host lock. 285845fabbb7SElias Oltmanns * As a result we have that park_req_pending.done is zero on 285945fabbb7SElias Oltmanns * exit from this function, i.e. when ATA_EH_PARK actions for 286045fabbb7SElias Oltmanns * *all* devices on port ap have been pulled into the 286145fabbb7SElias Oltmanns * respective eh_context structs. If, and only if, 286245fabbb7SElias Oltmanns * park_req_pending.done is non-zero by the time we reach 286345fabbb7SElias Oltmanns * wait_for_completion_timeout(), another ATA_EH_PARK action 286445fabbb7SElias Oltmanns * has been scheduled for at least one of the devices on port 286545fabbb7SElias Oltmanns * ap and we have to cycle over the do {} while () loop in 286645fabbb7SElias Oltmanns * ata_eh_recover() again. 286745fabbb7SElias Oltmanns */ 286845fabbb7SElias Oltmanns 286945fabbb7SElias Oltmanns spin_lock_irqsave(ap->lock, flags); 287045fabbb7SElias Oltmanns INIT_COMPLETION(ap->park_req_pending); 28711eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 28721eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 287345fabbb7SElias Oltmanns struct ata_eh_info *ehi = &link->eh_info; 287445fabbb7SElias Oltmanns 287545fabbb7SElias Oltmanns link->eh_context.i.dev_action[dev->devno] |= 287645fabbb7SElias Oltmanns ehi->dev_action[dev->devno] & ATA_EH_PARK; 287745fabbb7SElias Oltmanns ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK); 287845fabbb7SElias Oltmanns } 287945fabbb7SElias Oltmanns } 288045fabbb7SElias Oltmanns spin_unlock_irqrestore(ap->lock, flags); 288145fabbb7SElias Oltmanns } 288245fabbb7SElias Oltmanns 288345fabbb7SElias Oltmanns static void ata_eh_park_issue_cmd(struct ata_device *dev, int park) 288445fabbb7SElias Oltmanns { 288545fabbb7SElias Oltmanns struct ata_eh_context *ehc = &dev->link->eh_context; 288645fabbb7SElias Oltmanns struct ata_taskfile tf; 288745fabbb7SElias Oltmanns unsigned int err_mask; 288845fabbb7SElias Oltmanns 288945fabbb7SElias Oltmanns ata_tf_init(dev, &tf); 289045fabbb7SElias Oltmanns if (park) { 289145fabbb7SElias Oltmanns ehc->unloaded_mask |= 1 << dev->devno; 289245fabbb7SElias Oltmanns tf.command = ATA_CMD_IDLEIMMEDIATE; 289345fabbb7SElias Oltmanns tf.feature = 0x44; 289445fabbb7SElias Oltmanns tf.lbal = 0x4c; 289545fabbb7SElias Oltmanns tf.lbam = 0x4e; 289645fabbb7SElias Oltmanns tf.lbah = 0x55; 289745fabbb7SElias Oltmanns } else { 289845fabbb7SElias Oltmanns ehc->unloaded_mask &= ~(1 << dev->devno); 289945fabbb7SElias Oltmanns tf.command = ATA_CMD_CHK_POWER; 290045fabbb7SElias Oltmanns } 290145fabbb7SElias Oltmanns 290245fabbb7SElias Oltmanns tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 290345fabbb7SElias Oltmanns tf.protocol |= ATA_PROT_NODATA; 290445fabbb7SElias Oltmanns err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 290545fabbb7SElias Oltmanns if (park && (err_mask || tf.lbal != 0xc4)) { 290645fabbb7SElias Oltmanns ata_dev_printk(dev, KERN_ERR, "head unload failed!\n"); 290745fabbb7SElias Oltmanns ehc->unloaded_mask &= ~(1 << dev->devno); 290845fabbb7SElias Oltmanns } 290945fabbb7SElias Oltmanns } 291045fabbb7SElias Oltmanns 29110260731fSTejun Heo static int ata_eh_revalidate_and_attach(struct ata_link *link, 2912c6fd2807SJeff Garzik struct ata_device **r_failed_dev) 2913c6fd2807SJeff Garzik { 29140260731fSTejun Heo struct ata_port *ap = link->ap; 29150260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 2916c6fd2807SJeff Garzik struct ata_device *dev; 29178c3c52a8STejun Heo unsigned int new_mask = 0; 2918c6fd2807SJeff Garzik unsigned long flags; 2919f58229f8STejun Heo int rc = 0; 2920c6fd2807SJeff Garzik 2921c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 2922c6fd2807SJeff Garzik 29238c3c52a8STejun Heo /* For PATA drive side cable detection to work, IDENTIFY must 29248c3c52a8STejun Heo * be done backwards such that PDIAG- is released by the slave 29258c3c52a8STejun Heo * device before the master device is identified. 29268c3c52a8STejun Heo */ 29271eca4365STejun Heo ata_for_each_dev(dev, link, ALL_REVERSE) { 2928f58229f8STejun Heo unsigned int action = ata_eh_dev_action(dev); 2929f58229f8STejun Heo unsigned int readid_flags = 0; 2930c6fd2807SJeff Garzik 2931bff04647STejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) 2932bff04647STejun Heo readid_flags |= ATA_READID_POSTRESET; 2933bff04647STejun Heo 29349666f400STejun Heo if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) { 2935633273a3STejun Heo WARN_ON(dev->class == ATA_DEV_PMP); 2936633273a3STejun Heo 2937b1c72916STejun Heo if (ata_phys_link_offline(ata_dev_phys_link(dev))) { 2938c6fd2807SJeff Garzik rc = -EIO; 29398c3c52a8STejun Heo goto err; 2940c6fd2807SJeff Garzik } 2941c6fd2807SJeff Garzik 29420260731fSTejun Heo ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE); 2943422c9daaSTejun Heo rc = ata_dev_revalidate(dev, ehc->classes[dev->devno], 2944422c9daaSTejun Heo readid_flags); 2945c6fd2807SJeff Garzik if (rc) 29468c3c52a8STejun Heo goto err; 2947c6fd2807SJeff Garzik 29480260731fSTejun Heo ata_eh_done(link, dev, ATA_EH_REVALIDATE); 2949c6fd2807SJeff Garzik 2950baa1e78aSTejun Heo /* Configuration may have changed, reconfigure 2951baa1e78aSTejun Heo * transfer mode. 2952baa1e78aSTejun Heo */ 2953baa1e78aSTejun Heo ehc->i.flags |= ATA_EHI_SETMODE; 2954baa1e78aSTejun Heo 2955c6fd2807SJeff Garzik /* schedule the scsi_rescan_device() here */ 2956ad72cf98STejun Heo schedule_work(&(ap->scsi_rescan_task)); 2957c6fd2807SJeff Garzik } else if (dev->class == ATA_DEV_UNKNOWN && 2958c6fd2807SJeff Garzik ehc->tries[dev->devno] && 2959c6fd2807SJeff Garzik ata_class_enabled(ehc->classes[dev->devno])) { 2960842faa6cSTejun Heo /* Temporarily set dev->class, it will be 2961842faa6cSTejun Heo * permanently set once all configurations are 2962842faa6cSTejun Heo * complete. This is necessary because new 2963842faa6cSTejun Heo * device configuration is done in two 2964842faa6cSTejun Heo * separate loops. 2965842faa6cSTejun Heo */ 2966c6fd2807SJeff Garzik dev->class = ehc->classes[dev->devno]; 2967c6fd2807SJeff Garzik 2968633273a3STejun Heo if (dev->class == ATA_DEV_PMP) 2969633273a3STejun Heo rc = sata_pmp_attach(dev); 2970633273a3STejun Heo else 2971633273a3STejun Heo rc = ata_dev_read_id(dev, &dev->class, 2972633273a3STejun Heo readid_flags, dev->id); 2973842faa6cSTejun Heo 2974842faa6cSTejun Heo /* read_id might have changed class, store and reset */ 2975842faa6cSTejun Heo ehc->classes[dev->devno] = dev->class; 2976842faa6cSTejun Heo dev->class = ATA_DEV_UNKNOWN; 2977842faa6cSTejun Heo 29788c3c52a8STejun Heo switch (rc) { 29798c3c52a8STejun Heo case 0: 298099cf610aSTejun Heo /* clear error info accumulated during probe */ 298199cf610aSTejun Heo ata_ering_clear(&dev->ering); 2982f58229f8STejun Heo new_mask |= 1 << dev->devno; 29838c3c52a8STejun Heo break; 29848c3c52a8STejun Heo case -ENOENT: 298555a8e2c8STejun Heo /* IDENTIFY was issued to non-existent 298655a8e2c8STejun Heo * device. No need to reset. Just 2987842faa6cSTejun Heo * thaw and ignore the device. 298855a8e2c8STejun Heo */ 298955a8e2c8STejun Heo ata_eh_thaw_port(ap); 2990c6fd2807SJeff Garzik break; 29918c3c52a8STejun Heo default: 29928c3c52a8STejun Heo goto err; 29938c3c52a8STejun Heo } 29948c3c52a8STejun Heo } 2995c6fd2807SJeff Garzik } 2996c6fd2807SJeff Garzik 2997c1c4e8d5STejun Heo /* PDIAG- should have been released, ask cable type if post-reset */ 299833267325STejun Heo if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) { 299933267325STejun Heo if (ap->ops->cable_detect) 3000c1c4e8d5STejun Heo ap->cbl = ap->ops->cable_detect(ap); 300133267325STejun Heo ata_force_cbl(ap); 300233267325STejun Heo } 3003c1c4e8d5STejun Heo 30048c3c52a8STejun Heo /* Configure new devices forward such that user doesn't see 30058c3c52a8STejun Heo * device detection messages backwards. 30068c3c52a8STejun Heo */ 30071eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 30084f7c2874STejun Heo if (!(new_mask & (1 << dev->devno))) 30098c3c52a8STejun Heo continue; 30108c3c52a8STejun Heo 3011842faa6cSTejun Heo dev->class = ehc->classes[dev->devno]; 3012842faa6cSTejun Heo 30134f7c2874STejun Heo if (dev->class == ATA_DEV_PMP) 30144f7c2874STejun Heo continue; 30154f7c2874STejun Heo 30168c3c52a8STejun Heo ehc->i.flags |= ATA_EHI_PRINTINFO; 30178c3c52a8STejun Heo rc = ata_dev_configure(dev); 30188c3c52a8STejun Heo ehc->i.flags &= ~ATA_EHI_PRINTINFO; 3019842faa6cSTejun Heo if (rc) { 3020842faa6cSTejun Heo dev->class = ATA_DEV_UNKNOWN; 30218c3c52a8STejun Heo goto err; 3022842faa6cSTejun Heo } 30238c3c52a8STejun Heo 3024c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 3025c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 3026c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3027baa1e78aSTejun Heo 302855a8e2c8STejun Heo /* new device discovered, configure xfermode */ 3029baa1e78aSTejun Heo ehc->i.flags |= ATA_EHI_SETMODE; 3030c6fd2807SJeff Garzik } 3031c6fd2807SJeff Garzik 30328c3c52a8STejun Heo return 0; 30338c3c52a8STejun Heo 30348c3c52a8STejun Heo err: 3035c6fd2807SJeff Garzik *r_failed_dev = dev; 30368c3c52a8STejun Heo DPRINTK("EXIT rc=%d\n", rc); 3037c6fd2807SJeff Garzik return rc; 3038c6fd2807SJeff Garzik } 3039c6fd2807SJeff Garzik 30406f1d1e3aSTejun Heo /** 30416f1d1e3aSTejun Heo * ata_set_mode - Program timings and issue SET FEATURES - XFER 30426f1d1e3aSTejun Heo * @link: link on which timings will be programmed 304398a1708dSMartin Olsson * @r_failed_dev: out parameter for failed device 30446f1d1e3aSTejun Heo * 30456f1d1e3aSTejun Heo * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If 30466f1d1e3aSTejun Heo * ata_set_mode() fails, pointer to the failing device is 30476f1d1e3aSTejun Heo * returned in @r_failed_dev. 30486f1d1e3aSTejun Heo * 30496f1d1e3aSTejun Heo * LOCKING: 30506f1d1e3aSTejun Heo * PCI/etc. bus probe sem. 30516f1d1e3aSTejun Heo * 30526f1d1e3aSTejun Heo * RETURNS: 30536f1d1e3aSTejun Heo * 0 on success, negative errno otherwise 30546f1d1e3aSTejun Heo */ 30556f1d1e3aSTejun Heo int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) 30566f1d1e3aSTejun Heo { 30576f1d1e3aSTejun Heo struct ata_port *ap = link->ap; 305800115e0fSTejun Heo struct ata_device *dev; 305900115e0fSTejun Heo int rc; 30606f1d1e3aSTejun Heo 306176326ac1STejun Heo /* if data transfer is verified, clear DUBIOUS_XFER on ering top */ 30621eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) { 306376326ac1STejun Heo if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) { 306476326ac1STejun Heo struct ata_ering_entry *ent; 306576326ac1STejun Heo 306676326ac1STejun Heo ent = ata_ering_top(&dev->ering); 306776326ac1STejun Heo if (ent) 306876326ac1STejun Heo ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER; 306976326ac1STejun Heo } 307076326ac1STejun Heo } 307176326ac1STejun Heo 30726f1d1e3aSTejun Heo /* has private set_mode? */ 30736f1d1e3aSTejun Heo if (ap->ops->set_mode) 307400115e0fSTejun Heo rc = ap->ops->set_mode(link, r_failed_dev); 307500115e0fSTejun Heo else 307600115e0fSTejun Heo rc = ata_do_set_mode(link, r_failed_dev); 307700115e0fSTejun Heo 307800115e0fSTejun Heo /* if transfer mode has changed, set DUBIOUS_XFER on device */ 30791eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) { 308000115e0fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 308100115e0fSTejun Heo u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno]; 308200115e0fSTejun Heo u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno)); 308300115e0fSTejun Heo 308400115e0fSTejun Heo if (dev->xfer_mode != saved_xfer_mode || 308500115e0fSTejun Heo ata_ncq_enabled(dev) != saved_ncq) 308600115e0fSTejun Heo dev->flags |= ATA_DFLAG_DUBIOUS_XFER; 308700115e0fSTejun Heo } 308800115e0fSTejun Heo 308900115e0fSTejun Heo return rc; 30906f1d1e3aSTejun Heo } 30916f1d1e3aSTejun Heo 309211fc33daSTejun Heo /** 309311fc33daSTejun Heo * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset 309411fc33daSTejun Heo * @dev: ATAPI device to clear UA for 309511fc33daSTejun Heo * 309611fc33daSTejun Heo * Resets and other operations can make an ATAPI device raise 309711fc33daSTejun Heo * UNIT ATTENTION which causes the next operation to fail. This 309811fc33daSTejun Heo * function clears UA. 309911fc33daSTejun Heo * 310011fc33daSTejun Heo * LOCKING: 310111fc33daSTejun Heo * EH context (may sleep). 310211fc33daSTejun Heo * 310311fc33daSTejun Heo * RETURNS: 310411fc33daSTejun Heo * 0 on success, -errno on failure. 310511fc33daSTejun Heo */ 310611fc33daSTejun Heo static int atapi_eh_clear_ua(struct ata_device *dev) 310711fc33daSTejun Heo { 310811fc33daSTejun Heo int i; 310911fc33daSTejun Heo 311011fc33daSTejun Heo for (i = 0; i < ATA_EH_UA_TRIES; i++) { 3111b5357081STejun Heo u8 *sense_buffer = dev->link->ap->sector_buf; 311211fc33daSTejun Heo u8 sense_key = 0; 311311fc33daSTejun Heo unsigned int err_mask; 311411fc33daSTejun Heo 311511fc33daSTejun Heo err_mask = atapi_eh_tur(dev, &sense_key); 311611fc33daSTejun Heo if (err_mask != 0 && err_mask != AC_ERR_DEV) { 311711fc33daSTejun Heo ata_dev_printk(dev, KERN_WARNING, "TEST_UNIT_READY " 311811fc33daSTejun Heo "failed (err_mask=0x%x)\n", err_mask); 311911fc33daSTejun Heo return -EIO; 312011fc33daSTejun Heo } 312111fc33daSTejun Heo 312211fc33daSTejun Heo if (!err_mask || sense_key != UNIT_ATTENTION) 312311fc33daSTejun Heo return 0; 312411fc33daSTejun Heo 312511fc33daSTejun Heo err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key); 312611fc33daSTejun Heo if (err_mask) { 312711fc33daSTejun Heo ata_dev_printk(dev, KERN_WARNING, "failed to clear " 312811fc33daSTejun Heo "UNIT ATTENTION (err_mask=0x%x)\n", err_mask); 312911fc33daSTejun Heo return -EIO; 313011fc33daSTejun Heo } 313111fc33daSTejun Heo } 313211fc33daSTejun Heo 313311fc33daSTejun Heo ata_dev_printk(dev, KERN_WARNING, 313411fc33daSTejun Heo "UNIT ATTENTION persists after %d tries\n", ATA_EH_UA_TRIES); 313511fc33daSTejun Heo 313611fc33daSTejun Heo return 0; 313711fc33daSTejun Heo } 313811fc33daSTejun Heo 31396013efd8STejun Heo /** 31406013efd8STejun Heo * ata_eh_maybe_retry_flush - Retry FLUSH if necessary 31416013efd8STejun Heo * @dev: ATA device which may need FLUSH retry 31426013efd8STejun Heo * 31436013efd8STejun Heo * If @dev failed FLUSH, it needs to be reported upper layer 31446013efd8STejun Heo * immediately as it means that @dev failed to remap and already 31456013efd8STejun Heo * lost at least a sector and further FLUSH retrials won't make 31466013efd8STejun Heo * any difference to the lost sector. However, if FLUSH failed 31476013efd8STejun Heo * for other reasons, for example transmission error, FLUSH needs 31486013efd8STejun Heo * to be retried. 31496013efd8STejun Heo * 31506013efd8STejun Heo * This function determines whether FLUSH failure retry is 31516013efd8STejun Heo * necessary and performs it if so. 31526013efd8STejun Heo * 31536013efd8STejun Heo * RETURNS: 31546013efd8STejun Heo * 0 if EH can continue, -errno if EH needs to be repeated. 31556013efd8STejun Heo */ 31566013efd8STejun Heo static int ata_eh_maybe_retry_flush(struct ata_device *dev) 31576013efd8STejun Heo { 31586013efd8STejun Heo struct ata_link *link = dev->link; 31596013efd8STejun Heo struct ata_port *ap = link->ap; 31606013efd8STejun Heo struct ata_queued_cmd *qc; 31616013efd8STejun Heo struct ata_taskfile tf; 31626013efd8STejun Heo unsigned int err_mask; 31636013efd8STejun Heo int rc = 0; 31646013efd8STejun Heo 31656013efd8STejun Heo /* did flush fail for this device? */ 31666013efd8STejun Heo if (!ata_tag_valid(link->active_tag)) 31676013efd8STejun Heo return 0; 31686013efd8STejun Heo 31696013efd8STejun Heo qc = __ata_qc_from_tag(ap, link->active_tag); 31706013efd8STejun Heo if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT && 31716013efd8STejun Heo qc->tf.command != ATA_CMD_FLUSH)) 31726013efd8STejun Heo return 0; 31736013efd8STejun Heo 31746013efd8STejun Heo /* if the device failed it, it should be reported to upper layers */ 31756013efd8STejun Heo if (qc->err_mask & AC_ERR_DEV) 31766013efd8STejun Heo return 0; 31776013efd8STejun Heo 31786013efd8STejun Heo /* flush failed for some other reason, give it another shot */ 31796013efd8STejun Heo ata_tf_init(dev, &tf); 31806013efd8STejun Heo 31816013efd8STejun Heo tf.command = qc->tf.command; 31826013efd8STejun Heo tf.flags |= ATA_TFLAG_DEVICE; 31836013efd8STejun Heo tf.protocol = ATA_PROT_NODATA; 31846013efd8STejun Heo 31856013efd8STejun Heo ata_dev_printk(dev, KERN_WARNING, "retrying FLUSH 0x%x Emask 0x%x\n", 31866013efd8STejun Heo tf.command, qc->err_mask); 31876013efd8STejun Heo 31886013efd8STejun Heo err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 31896013efd8STejun Heo if (!err_mask) { 31906013efd8STejun Heo /* 31916013efd8STejun Heo * FLUSH is complete but there's no way to 31926013efd8STejun Heo * successfully complete a failed command from EH. 31936013efd8STejun Heo * Making sure retry is allowed at least once and 31946013efd8STejun Heo * retrying it should do the trick - whatever was in 31956013efd8STejun Heo * the cache is already on the platter and this won't 31966013efd8STejun Heo * cause infinite loop. 31976013efd8STejun Heo */ 31986013efd8STejun Heo qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1); 31996013efd8STejun Heo } else { 32006013efd8STejun Heo ata_dev_printk(dev, KERN_WARNING, "FLUSH failed Emask 0x%x\n", 32016013efd8STejun Heo err_mask); 32026013efd8STejun Heo rc = -EIO; 32036013efd8STejun Heo 32046013efd8STejun Heo /* if device failed it, report it to upper layers */ 32056013efd8STejun Heo if (err_mask & AC_ERR_DEV) { 32066013efd8STejun Heo qc->err_mask |= AC_ERR_DEV; 32076013efd8STejun Heo qc->result_tf = tf; 32086013efd8STejun Heo if (!(ap->pflags & ATA_PFLAG_FROZEN)) 32096013efd8STejun Heo rc = 0; 32106013efd8STejun Heo } 32116013efd8STejun Heo } 32126013efd8STejun Heo return rc; 32136013efd8STejun Heo } 32146013efd8STejun Heo 32156b7ae954STejun Heo /** 32166b7ae954STejun Heo * ata_eh_set_lpm - configure SATA interface power management 32176b7ae954STejun Heo * @link: link to configure power management 32186b7ae954STejun Heo * @policy: the link power management policy 32196b7ae954STejun Heo * @r_failed_dev: out parameter for failed device 32206b7ae954STejun Heo * 32216b7ae954STejun Heo * Enable SATA Interface power management. This will enable 32226b7ae954STejun Heo * Device Interface Power Management (DIPM) for min_power 32236b7ae954STejun Heo * policy, and then call driver specific callbacks for 32246b7ae954STejun Heo * enabling Host Initiated Power management. 32256b7ae954STejun Heo * 32266b7ae954STejun Heo * LOCKING: 32276b7ae954STejun Heo * EH context. 32286b7ae954STejun Heo * 32296b7ae954STejun Heo * RETURNS: 32306b7ae954STejun Heo * 0 on success, -errno on failure. 32316b7ae954STejun Heo */ 32326b7ae954STejun Heo static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, 32336b7ae954STejun Heo struct ata_device **r_failed_dev) 32346b7ae954STejun Heo { 3235*6c8ea89cSTejun Heo struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL; 32366b7ae954STejun Heo struct ata_eh_context *ehc = &link->eh_context; 32376b7ae954STejun Heo struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL; 32386b7ae954STejun Heo unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM; 32396b7ae954STejun Heo unsigned int err_mask; 32406b7ae954STejun Heo int rc; 32416b7ae954STejun Heo 32426b7ae954STejun Heo /* if the link or host doesn't do LPM, noop */ 32436b7ae954STejun Heo if ((link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm)) 32446b7ae954STejun Heo return 0; 32456b7ae954STejun Heo 32466b7ae954STejun Heo /* 32476b7ae954STejun Heo * DIPM is enabled only for MIN_POWER as some devices 32486b7ae954STejun Heo * misbehave when the host NACKs transition to SLUMBER. Order 32496b7ae954STejun Heo * device and link configurations such that the host always 32506b7ae954STejun Heo * allows DIPM requests. 32516b7ae954STejun Heo */ 32526b7ae954STejun Heo ata_for_each_dev(dev, link, ENABLED) { 32536b7ae954STejun Heo bool hipm = ata_id_has_hipm(dev->id); 32546b7ae954STejun Heo bool dipm = ata_id_has_dipm(dev->id); 32556b7ae954STejun Heo 32566b7ae954STejun Heo /* find the first enabled and LPM enabled devices */ 32576b7ae954STejun Heo if (!link_dev) 32586b7ae954STejun Heo link_dev = dev; 32596b7ae954STejun Heo 32606b7ae954STejun Heo if (!lpm_dev && (hipm || dipm)) 32616b7ae954STejun Heo lpm_dev = dev; 32626b7ae954STejun Heo 32636b7ae954STejun Heo hints &= ~ATA_LPM_EMPTY; 32646b7ae954STejun Heo if (!hipm) 32656b7ae954STejun Heo hints &= ~ATA_LPM_HIPM; 32666b7ae954STejun Heo 32676b7ae954STejun Heo /* disable DIPM before changing link config */ 32686b7ae954STejun Heo if (policy != ATA_LPM_MIN_POWER && dipm) { 32696b7ae954STejun Heo err_mask = ata_dev_set_feature(dev, 32706b7ae954STejun Heo SETFEATURES_SATA_DISABLE, SATA_DIPM); 32716b7ae954STejun Heo if (err_mask && err_mask != AC_ERR_DEV) { 32726b7ae954STejun Heo ata_dev_printk(dev, KERN_WARNING, 32736b7ae954STejun Heo "failed to disable DIPM, Emask 0x%x\n", 32746b7ae954STejun Heo err_mask); 32756b7ae954STejun Heo rc = -EIO; 32766b7ae954STejun Heo goto fail; 32776b7ae954STejun Heo } 32786b7ae954STejun Heo } 32796b7ae954STejun Heo } 32806b7ae954STejun Heo 3281*6c8ea89cSTejun Heo if (ap) { 32826b7ae954STejun Heo rc = ap->ops->set_lpm(link, policy, hints); 32836b7ae954STejun Heo if (!rc && ap->slave_link) 32846b7ae954STejun Heo rc = ap->ops->set_lpm(ap->slave_link, policy, hints); 3285*6c8ea89cSTejun Heo } else 3286*6c8ea89cSTejun Heo rc = sata_pmp_set_lpm(link, policy, hints); 32876b7ae954STejun Heo 32886b7ae954STejun Heo /* 32896b7ae954STejun Heo * Attribute link config failure to the first (LPM) enabled 32906b7ae954STejun Heo * device on the link. 32916b7ae954STejun Heo */ 32926b7ae954STejun Heo if (rc) { 32936b7ae954STejun Heo if (rc == -EOPNOTSUPP) { 32946b7ae954STejun Heo link->flags |= ATA_LFLAG_NO_LPM; 32956b7ae954STejun Heo return 0; 32966b7ae954STejun Heo } 32976b7ae954STejun Heo dev = lpm_dev ? lpm_dev : link_dev; 32986b7ae954STejun Heo goto fail; 32996b7ae954STejun Heo } 33006b7ae954STejun Heo 33016b7ae954STejun Heo /* host config updated, enable DIPM if transitioning to MIN_POWER */ 33026b7ae954STejun Heo ata_for_each_dev(dev, link, ENABLED) { 33036b7ae954STejun Heo if (policy == ATA_LPM_MIN_POWER && ata_id_has_dipm(dev->id)) { 33046b7ae954STejun Heo err_mask = ata_dev_set_feature(dev, 33056b7ae954STejun Heo SETFEATURES_SATA_ENABLE, SATA_DIPM); 33066b7ae954STejun Heo if (err_mask && err_mask != AC_ERR_DEV) { 33076b7ae954STejun Heo ata_dev_printk(dev, KERN_WARNING, 33086b7ae954STejun Heo "failed to enable DIPM, Emask 0x%x\n", 33096b7ae954STejun Heo err_mask); 33106b7ae954STejun Heo rc = -EIO; 33116b7ae954STejun Heo goto fail; 33126b7ae954STejun Heo } 33136b7ae954STejun Heo } 33146b7ae954STejun Heo } 33156b7ae954STejun Heo 33166b7ae954STejun Heo link->lpm_policy = policy; 33176b7ae954STejun Heo if (ap && ap->slave_link) 33186b7ae954STejun Heo ap->slave_link->lpm_policy = policy; 33196b7ae954STejun Heo return 0; 33206b7ae954STejun Heo 33216b7ae954STejun Heo fail: 33226b7ae954STejun Heo /* if no device or only one more chance is left, disable LPM */ 33236b7ae954STejun Heo if (!dev || ehc->tries[dev->devno] <= 2) { 33246b7ae954STejun Heo ata_link_printk(link, KERN_WARNING, 33256b7ae954STejun Heo "disabling LPM on the link\n"); 33266b7ae954STejun Heo link->flags |= ATA_LFLAG_NO_LPM; 33276b7ae954STejun Heo } 33286b7ae954STejun Heo if (r_failed_dev) 33296b7ae954STejun Heo *r_failed_dev = dev; 33306b7ae954STejun Heo return rc; 33316b7ae954STejun Heo } 33326b7ae954STejun Heo 33330260731fSTejun Heo static int ata_link_nr_enabled(struct ata_link *link) 3334c6fd2807SJeff Garzik { 3335f58229f8STejun Heo struct ata_device *dev; 3336f58229f8STejun Heo int cnt = 0; 3337c6fd2807SJeff Garzik 33381eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) 3339c6fd2807SJeff Garzik cnt++; 3340c6fd2807SJeff Garzik return cnt; 3341c6fd2807SJeff Garzik } 3342c6fd2807SJeff Garzik 33430260731fSTejun Heo static int ata_link_nr_vacant(struct ata_link *link) 3344c6fd2807SJeff Garzik { 3345f58229f8STejun Heo struct ata_device *dev; 3346f58229f8STejun Heo int cnt = 0; 3347c6fd2807SJeff Garzik 33481eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 3349f58229f8STejun Heo if (dev->class == ATA_DEV_UNKNOWN) 3350c6fd2807SJeff Garzik cnt++; 3351c6fd2807SJeff Garzik return cnt; 3352c6fd2807SJeff Garzik } 3353c6fd2807SJeff Garzik 33540260731fSTejun Heo static int ata_eh_skip_recovery(struct ata_link *link) 3355c6fd2807SJeff Garzik { 3356672b2d65STejun Heo struct ata_port *ap = link->ap; 33570260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 3358f58229f8STejun Heo struct ata_device *dev; 3359c6fd2807SJeff Garzik 3360f9df58cbSTejun Heo /* skip disabled links */ 3361f9df58cbSTejun Heo if (link->flags & ATA_LFLAG_DISABLED) 3362f9df58cbSTejun Heo return 1; 3363f9df58cbSTejun Heo 3364e2f3d75fSTejun Heo /* skip if explicitly requested */ 3365e2f3d75fSTejun Heo if (ehc->i.flags & ATA_EHI_NO_RECOVERY) 3366e2f3d75fSTejun Heo return 1; 3367e2f3d75fSTejun Heo 3368672b2d65STejun Heo /* thaw frozen port and recover failed devices */ 3369672b2d65STejun Heo if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link)) 3370672b2d65STejun Heo return 0; 3371672b2d65STejun Heo 3372672b2d65STejun Heo /* reset at least once if reset is requested */ 3373672b2d65STejun Heo if ((ehc->i.action & ATA_EH_RESET) && 3374672b2d65STejun Heo !(ehc->i.flags & ATA_EHI_DID_RESET)) 3375c6fd2807SJeff Garzik return 0; 3376c6fd2807SJeff Garzik 3377c6fd2807SJeff Garzik /* skip if class codes for all vacant slots are ATA_DEV_NONE */ 33781eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 3379c6fd2807SJeff Garzik if (dev->class == ATA_DEV_UNKNOWN && 3380c6fd2807SJeff Garzik ehc->classes[dev->devno] != ATA_DEV_NONE) 3381c6fd2807SJeff Garzik return 0; 3382c6fd2807SJeff Garzik } 3383c6fd2807SJeff Garzik 3384c6fd2807SJeff Garzik return 1; 3385c6fd2807SJeff Garzik } 3386c6fd2807SJeff Garzik 3387c2c7a89cSTejun Heo static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg) 3388c2c7a89cSTejun Heo { 3389c2c7a89cSTejun Heo u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL); 3390c2c7a89cSTejun Heo u64 now = get_jiffies_64(); 3391c2c7a89cSTejun Heo int *trials = void_arg; 3392c2c7a89cSTejun Heo 3393c2c7a89cSTejun Heo if (ent->timestamp < now - min(now, interval)) 3394c2c7a89cSTejun Heo return -1; 3395c2c7a89cSTejun Heo 3396c2c7a89cSTejun Heo (*trials)++; 3397c2c7a89cSTejun Heo return 0; 3398c2c7a89cSTejun Heo } 3399c2c7a89cSTejun Heo 340002c05a27STejun Heo static int ata_eh_schedule_probe(struct ata_device *dev) 340102c05a27STejun Heo { 340202c05a27STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 3403c2c7a89cSTejun Heo struct ata_link *link = ata_dev_phys_link(dev); 3404c2c7a89cSTejun Heo int trials = 0; 340502c05a27STejun Heo 340602c05a27STejun Heo if (!(ehc->i.probe_mask & (1 << dev->devno)) || 340702c05a27STejun Heo (ehc->did_probe_mask & (1 << dev->devno))) 340802c05a27STejun Heo return 0; 340902c05a27STejun Heo 341002c05a27STejun Heo ata_eh_detach_dev(dev); 341102c05a27STejun Heo ata_dev_init(dev); 341202c05a27STejun Heo ehc->did_probe_mask |= (1 << dev->devno); 3413cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 341400115e0fSTejun Heo ehc->saved_xfer_mode[dev->devno] = 0; 341500115e0fSTejun Heo ehc->saved_ncq_enabled &= ~(1 << dev->devno); 341602c05a27STejun Heo 34176b7ae954STejun Heo /* the link maybe in a deep sleep, wake it up */ 3418*6c8ea89cSTejun Heo if (link->lpm_policy > ATA_LPM_MAX_POWER) { 3419*6c8ea89cSTejun Heo if (ata_is_host_link(link)) 3420*6c8ea89cSTejun Heo link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER, 3421*6c8ea89cSTejun Heo ATA_LPM_EMPTY); 3422*6c8ea89cSTejun Heo else 3423*6c8ea89cSTejun Heo sata_pmp_set_lpm(link, ATA_LPM_MAX_POWER, 3424*6c8ea89cSTejun Heo ATA_LPM_EMPTY); 3425*6c8ea89cSTejun Heo } 34266b7ae954STejun Heo 3427c2c7a89cSTejun Heo /* Record and count probe trials on the ering. The specific 3428c2c7a89cSTejun Heo * error mask used is irrelevant. Because a successful device 3429c2c7a89cSTejun Heo * detection clears the ering, this count accumulates only if 3430c2c7a89cSTejun Heo * there are consecutive failed probes. 3431c2c7a89cSTejun Heo * 3432c2c7a89cSTejun Heo * If the count is equal to or higher than ATA_EH_PROBE_TRIALS 3433c2c7a89cSTejun Heo * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is 3434c2c7a89cSTejun Heo * forced to 1.5Gbps. 3435c2c7a89cSTejun Heo * 3436c2c7a89cSTejun Heo * This is to work around cases where failed link speed 3437c2c7a89cSTejun Heo * negotiation results in device misdetection leading to 3438c2c7a89cSTejun Heo * infinite DEVXCHG or PHRDY CHG events. 3439c2c7a89cSTejun Heo */ 3440c2c7a89cSTejun Heo ata_ering_record(&dev->ering, 0, AC_ERR_OTHER); 3441c2c7a89cSTejun Heo ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials); 3442c2c7a89cSTejun Heo 3443c2c7a89cSTejun Heo if (trials > ATA_EH_PROBE_TRIALS) 3444c2c7a89cSTejun Heo sata_down_spd_limit(link, 1); 3445c2c7a89cSTejun Heo 344602c05a27STejun Heo return 1; 344702c05a27STejun Heo } 344802c05a27STejun Heo 34499b1e2658STejun Heo static int ata_eh_handle_dev_fail(struct ata_device *dev, int err) 3450fee7ca72STejun Heo { 34519af5c9c9STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 3452fee7ca72STejun Heo 3453cf9a590aSTejun Heo /* -EAGAIN from EH routine indicates retry without prejudice. 3454cf9a590aSTejun Heo * The requester is responsible for ensuring forward progress. 3455cf9a590aSTejun Heo */ 3456cf9a590aSTejun Heo if (err != -EAGAIN) 3457fee7ca72STejun Heo ehc->tries[dev->devno]--; 3458fee7ca72STejun Heo 3459fee7ca72STejun Heo switch (err) { 3460fee7ca72STejun Heo case -ENODEV: 3461fee7ca72STejun Heo /* device missing or wrong IDENTIFY data, schedule probing */ 3462fee7ca72STejun Heo ehc->i.probe_mask |= (1 << dev->devno); 3463fee7ca72STejun Heo case -EINVAL: 3464fee7ca72STejun Heo /* give it just one more chance */ 3465fee7ca72STejun Heo ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1); 3466fee7ca72STejun Heo case -EIO: 3467d89293abSTejun Heo if (ehc->tries[dev->devno] == 1) { 3468fee7ca72STejun Heo /* This is the last chance, better to slow 3469fee7ca72STejun Heo * down than lose it. 3470fee7ca72STejun Heo */ 3471a07d499bSTejun Heo sata_down_spd_limit(ata_dev_phys_link(dev), 0); 3472d89293abSTejun Heo if (dev->pio_mode > XFER_PIO_0) 3473fee7ca72STejun Heo ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); 3474fee7ca72STejun Heo } 3475fee7ca72STejun Heo } 3476fee7ca72STejun Heo 3477fee7ca72STejun Heo if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) { 3478fee7ca72STejun Heo /* disable device if it has used up all its chances */ 3479fee7ca72STejun Heo ata_dev_disable(dev); 3480fee7ca72STejun Heo 3481fee7ca72STejun Heo /* detach if offline */ 3482b1c72916STejun Heo if (ata_phys_link_offline(ata_dev_phys_link(dev))) 3483fee7ca72STejun Heo ata_eh_detach_dev(dev); 3484fee7ca72STejun Heo 348502c05a27STejun Heo /* schedule probe if necessary */ 348687fbc5a0STejun Heo if (ata_eh_schedule_probe(dev)) { 3487fee7ca72STejun Heo ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 348887fbc5a0STejun Heo memset(ehc->cmd_timeout_idx[dev->devno], 0, 348987fbc5a0STejun Heo sizeof(ehc->cmd_timeout_idx[dev->devno])); 349087fbc5a0STejun Heo } 34919b1e2658STejun Heo 34929b1e2658STejun Heo return 1; 3493fee7ca72STejun Heo } else { 3494cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 34959b1e2658STejun Heo return 0; 3496fee7ca72STejun Heo } 3497fee7ca72STejun Heo } 3498fee7ca72STejun Heo 3499c6fd2807SJeff Garzik /** 3500c6fd2807SJeff Garzik * ata_eh_recover - recover host port after error 3501c6fd2807SJeff Garzik * @ap: host port to recover 3502c6fd2807SJeff Garzik * @prereset: prereset method (can be NULL) 3503c6fd2807SJeff Garzik * @softreset: softreset method (can be NULL) 3504c6fd2807SJeff Garzik * @hardreset: hardreset method (can be NULL) 3505c6fd2807SJeff Garzik * @postreset: postreset method (can be NULL) 35069b1e2658STejun Heo * @r_failed_link: out parameter for failed link 3507c6fd2807SJeff Garzik * 3508c6fd2807SJeff Garzik * This is the alpha and omega, eum and yang, heart and soul of 3509c6fd2807SJeff Garzik * libata exception handling. On entry, actions required to 35109b1e2658STejun Heo * recover each link and hotplug requests are recorded in the 35119b1e2658STejun Heo * link's eh_context. This function executes all the operations 35129b1e2658STejun Heo * with appropriate retrials and fallbacks to resurrect failed 3513c6fd2807SJeff Garzik * devices, detach goners and greet newcomers. 3514c6fd2807SJeff Garzik * 3515c6fd2807SJeff Garzik * LOCKING: 3516c6fd2807SJeff Garzik * Kernel thread context (may sleep). 3517c6fd2807SJeff Garzik * 3518c6fd2807SJeff Garzik * RETURNS: 3519c6fd2807SJeff Garzik * 0 on success, -errno on failure. 3520c6fd2807SJeff Garzik */ 3521fb7fd614STejun Heo int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, 3522c6fd2807SJeff Garzik ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 35239b1e2658STejun Heo ata_postreset_fn_t postreset, 35249b1e2658STejun Heo struct ata_link **r_failed_link) 3525c6fd2807SJeff Garzik { 35269b1e2658STejun Heo struct ata_link *link; 3527c6fd2807SJeff Garzik struct ata_device *dev; 35286b7ae954STejun Heo int rc, nr_fails; 352945fabbb7SElias Oltmanns unsigned long flags, deadline; 3530c6fd2807SJeff Garzik 3531c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 3532c6fd2807SJeff Garzik 3533c6fd2807SJeff Garzik /* prep for recovery */ 35341eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 35359b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 35369b1e2658STejun Heo 3537f9df58cbSTejun Heo /* re-enable link? */ 3538f9df58cbSTejun Heo if (ehc->i.action & ATA_EH_ENABLE_LINK) { 3539f9df58cbSTejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK); 3540f9df58cbSTejun Heo spin_lock_irqsave(ap->lock, flags); 3541f9df58cbSTejun Heo link->flags &= ~ATA_LFLAG_DISABLED; 3542f9df58cbSTejun Heo spin_unlock_irqrestore(ap->lock, flags); 3543f9df58cbSTejun Heo ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK); 3544f9df58cbSTejun Heo } 3545f9df58cbSTejun Heo 35461eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 3547fd995f70STejun Heo if (link->flags & ATA_LFLAG_NO_RETRY) 3548fd995f70STejun Heo ehc->tries[dev->devno] = 1; 3549fd995f70STejun Heo else 3550c6fd2807SJeff Garzik ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 3551c6fd2807SJeff Garzik 355279a55b72STejun Heo /* collect port action mask recorded in dev actions */ 35539b1e2658STejun Heo ehc->i.action |= ehc->i.dev_action[dev->devno] & 35549b1e2658STejun Heo ~ATA_EH_PERDEV_MASK; 3555f58229f8STejun Heo ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK; 355679a55b72STejun Heo 3557c6fd2807SJeff Garzik /* process hotplug request */ 3558c6fd2807SJeff Garzik if (dev->flags & ATA_DFLAG_DETACH) 3559c6fd2807SJeff Garzik ata_eh_detach_dev(dev); 3560c6fd2807SJeff Garzik 356102c05a27STejun Heo /* schedule probe if necessary */ 356202c05a27STejun Heo if (!ata_dev_enabled(dev)) 356302c05a27STejun Heo ata_eh_schedule_probe(dev); 3564c6fd2807SJeff Garzik } 35659b1e2658STejun Heo } 3566c6fd2807SJeff Garzik 3567c6fd2807SJeff Garzik retry: 3568c6fd2807SJeff Garzik rc = 0; 3569c6fd2807SJeff Garzik 3570c6fd2807SJeff Garzik /* if UNLOADING, finish immediately */ 3571c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_UNLOADING) 3572c6fd2807SJeff Garzik goto out; 3573c6fd2807SJeff Garzik 35749b1e2658STejun Heo /* prep for EH */ 35751eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 35769b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 35779b1e2658STejun Heo 3578c6fd2807SJeff Garzik /* skip EH if possible. */ 35790260731fSTejun Heo if (ata_eh_skip_recovery(link)) 3580c6fd2807SJeff Garzik ehc->i.action = 0; 3581c6fd2807SJeff Garzik 35821eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 3583f58229f8STejun Heo ehc->classes[dev->devno] = ATA_DEV_UNKNOWN; 35849b1e2658STejun Heo } 3585c6fd2807SJeff Garzik 3586c6fd2807SJeff Garzik /* reset */ 35871eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 35889b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 35899b1e2658STejun Heo 3590cf480626STejun Heo if (!(ehc->i.action & ATA_EH_RESET)) 35919b1e2658STejun Heo continue; 35929b1e2658STejun Heo 35939b1e2658STejun Heo rc = ata_eh_reset(link, ata_link_nr_vacant(link), 3594dc98c32cSTejun Heo prereset, softreset, hardreset, postreset); 3595c6fd2807SJeff Garzik if (rc) { 35960260731fSTejun Heo ata_link_printk(link, KERN_ERR, 3597c6fd2807SJeff Garzik "reset failed, giving up\n"); 3598c6fd2807SJeff Garzik goto out; 3599c6fd2807SJeff Garzik } 36009b1e2658STejun Heo } 3601c6fd2807SJeff Garzik 360245fabbb7SElias Oltmanns do { 360345fabbb7SElias Oltmanns unsigned long now; 360445fabbb7SElias Oltmanns 360545fabbb7SElias Oltmanns /* 360645fabbb7SElias Oltmanns * clears ATA_EH_PARK in eh_info and resets 360745fabbb7SElias Oltmanns * ap->park_req_pending 360845fabbb7SElias Oltmanns */ 360945fabbb7SElias Oltmanns ata_eh_pull_park_action(ap); 361045fabbb7SElias Oltmanns 361145fabbb7SElias Oltmanns deadline = jiffies; 36121eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 36131eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 361445fabbb7SElias Oltmanns struct ata_eh_context *ehc = &link->eh_context; 361545fabbb7SElias Oltmanns unsigned long tmp; 361645fabbb7SElias Oltmanns 361745fabbb7SElias Oltmanns if (dev->class != ATA_DEV_ATA) 361845fabbb7SElias Oltmanns continue; 361945fabbb7SElias Oltmanns if (!(ehc->i.dev_action[dev->devno] & 362045fabbb7SElias Oltmanns ATA_EH_PARK)) 362145fabbb7SElias Oltmanns continue; 362245fabbb7SElias Oltmanns tmp = dev->unpark_deadline; 362345fabbb7SElias Oltmanns if (time_before(deadline, tmp)) 362445fabbb7SElias Oltmanns deadline = tmp; 362545fabbb7SElias Oltmanns else if (time_before_eq(tmp, jiffies)) 362645fabbb7SElias Oltmanns continue; 362745fabbb7SElias Oltmanns if (ehc->unloaded_mask & (1 << dev->devno)) 362845fabbb7SElias Oltmanns continue; 362945fabbb7SElias Oltmanns 363045fabbb7SElias Oltmanns ata_eh_park_issue_cmd(dev, 1); 363145fabbb7SElias Oltmanns } 363245fabbb7SElias Oltmanns } 363345fabbb7SElias Oltmanns 363445fabbb7SElias Oltmanns now = jiffies; 363545fabbb7SElias Oltmanns if (time_before_eq(deadline, now)) 363645fabbb7SElias Oltmanns break; 363745fabbb7SElias Oltmanns 363845fabbb7SElias Oltmanns deadline = wait_for_completion_timeout(&ap->park_req_pending, 363945fabbb7SElias Oltmanns deadline - now); 364045fabbb7SElias Oltmanns } while (deadline); 36411eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 36421eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 364345fabbb7SElias Oltmanns if (!(link->eh_context.unloaded_mask & 364445fabbb7SElias Oltmanns (1 << dev->devno))) 364545fabbb7SElias Oltmanns continue; 364645fabbb7SElias Oltmanns 364745fabbb7SElias Oltmanns ata_eh_park_issue_cmd(dev, 0); 364845fabbb7SElias Oltmanns ata_eh_done(link, dev, ATA_EH_PARK); 364945fabbb7SElias Oltmanns } 365045fabbb7SElias Oltmanns } 365145fabbb7SElias Oltmanns 36529b1e2658STejun Heo /* the rest */ 36536b7ae954STejun Heo nr_fails = 0; 36546b7ae954STejun Heo ata_for_each_link(link, ap, PMP_FIRST) { 36559b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 36569b1e2658STejun Heo 36576b7ae954STejun Heo if (sata_pmp_attached(ap) && ata_is_host_link(link)) 36586b7ae954STejun Heo goto config_lpm; 36596b7ae954STejun Heo 3660c6fd2807SJeff Garzik /* revalidate existing devices and attach new ones */ 36610260731fSTejun Heo rc = ata_eh_revalidate_and_attach(link, &dev); 3662c6fd2807SJeff Garzik if (rc) 36636b7ae954STejun Heo goto rest_fail; 3664c6fd2807SJeff Garzik 3665633273a3STejun Heo /* if PMP got attached, return, pmp EH will take care of it */ 3666633273a3STejun Heo if (link->device->class == ATA_DEV_PMP) { 3667633273a3STejun Heo ehc->i.action = 0; 3668633273a3STejun Heo return 0; 3669633273a3STejun Heo } 3670633273a3STejun Heo 3671baa1e78aSTejun Heo /* configure transfer mode if necessary */ 3672baa1e78aSTejun Heo if (ehc->i.flags & ATA_EHI_SETMODE) { 36730260731fSTejun Heo rc = ata_set_mode(link, &dev); 36744ae72a1eSTejun Heo if (rc) 36756b7ae954STejun Heo goto rest_fail; 3676baa1e78aSTejun Heo ehc->i.flags &= ~ATA_EHI_SETMODE; 3677c6fd2807SJeff Garzik } 3678c6fd2807SJeff Garzik 367911fc33daSTejun Heo /* If reset has been issued, clear UA to avoid 368011fc33daSTejun Heo * disrupting the current users of the device. 368111fc33daSTejun Heo */ 368211fc33daSTejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) { 36831eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 368411fc33daSTejun Heo if (dev->class != ATA_DEV_ATAPI) 368511fc33daSTejun Heo continue; 368611fc33daSTejun Heo rc = atapi_eh_clear_ua(dev); 368711fc33daSTejun Heo if (rc) 36886b7ae954STejun Heo goto rest_fail; 368911fc33daSTejun Heo } 369011fc33daSTejun Heo } 369111fc33daSTejun Heo 36926013efd8STejun Heo /* retry flush if necessary */ 36936013efd8STejun Heo ata_for_each_dev(dev, link, ALL) { 36946013efd8STejun Heo if (dev->class != ATA_DEV_ATA) 36956013efd8STejun Heo continue; 36966013efd8STejun Heo rc = ata_eh_maybe_retry_flush(dev); 36976013efd8STejun Heo if (rc) 36986b7ae954STejun Heo goto rest_fail; 36996013efd8STejun Heo } 37006013efd8STejun Heo 37016b7ae954STejun Heo config_lpm: 370211fc33daSTejun Heo /* configure link power saving */ 37036b7ae954STejun Heo if (link->lpm_policy != ap->target_lpm_policy) { 37046b7ae954STejun Heo rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev); 37056b7ae954STejun Heo if (rc) 37066b7ae954STejun Heo goto rest_fail; 37076b7ae954STejun Heo } 3708ca77329fSKristen Carlson Accardi 37099b1e2658STejun Heo /* this link is okay now */ 37109b1e2658STejun Heo ehc->i.flags = 0; 37119b1e2658STejun Heo continue; 3712c6fd2807SJeff Garzik 37136b7ae954STejun Heo rest_fail: 37146b7ae954STejun Heo nr_fails++; 37156b7ae954STejun Heo if (dev) 37160a2c0f56STejun Heo ata_eh_handle_dev_fail(dev, rc); 3717c6fd2807SJeff Garzik 3718b06ce3e5STejun Heo if (ap->pflags & ATA_PFLAG_FROZEN) { 3719b06ce3e5STejun Heo /* PMP reset requires working host port. 3720b06ce3e5STejun Heo * Can't retry if it's frozen. 3721b06ce3e5STejun Heo */ 3722071f44b1STejun Heo if (sata_pmp_attached(ap)) 3723b06ce3e5STejun Heo goto out; 37249b1e2658STejun Heo break; 37259b1e2658STejun Heo } 3726b06ce3e5STejun Heo } 37279b1e2658STejun Heo 37286b7ae954STejun Heo if (nr_fails) 3729c6fd2807SJeff Garzik goto retry; 3730c6fd2807SJeff Garzik 3731c6fd2807SJeff Garzik out: 37329b1e2658STejun Heo if (rc && r_failed_link) 37339b1e2658STejun Heo *r_failed_link = link; 3734c6fd2807SJeff Garzik 3735c6fd2807SJeff Garzik DPRINTK("EXIT, rc=%d\n", rc); 3736c6fd2807SJeff Garzik return rc; 3737c6fd2807SJeff Garzik } 3738c6fd2807SJeff Garzik 3739c6fd2807SJeff Garzik /** 3740c6fd2807SJeff Garzik * ata_eh_finish - finish up EH 3741c6fd2807SJeff Garzik * @ap: host port to finish EH for 3742c6fd2807SJeff Garzik * 3743c6fd2807SJeff Garzik * Recovery is complete. Clean up EH states and retry or finish 3744c6fd2807SJeff Garzik * failed qcs. 3745c6fd2807SJeff Garzik * 3746c6fd2807SJeff Garzik * LOCKING: 3747c6fd2807SJeff Garzik * None. 3748c6fd2807SJeff Garzik */ 3749fb7fd614STejun Heo void ata_eh_finish(struct ata_port *ap) 3750c6fd2807SJeff Garzik { 3751c6fd2807SJeff Garzik int tag; 3752c6fd2807SJeff Garzik 3753c6fd2807SJeff Garzik /* retry or finish qcs */ 3754c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 3755c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 3756c6fd2807SJeff Garzik 3757c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) 3758c6fd2807SJeff Garzik continue; 3759c6fd2807SJeff Garzik 3760c6fd2807SJeff Garzik if (qc->err_mask) { 3761c6fd2807SJeff Garzik /* FIXME: Once EH migration is complete, 3762c6fd2807SJeff Garzik * generate sense data in this function, 3763c6fd2807SJeff Garzik * considering both err_mask and tf. 3764c6fd2807SJeff Garzik */ 376503faab78STejun Heo if (qc->flags & ATA_QCFLAG_RETRY) 3766c6fd2807SJeff Garzik ata_eh_qc_retry(qc); 376703faab78STejun Heo else 376803faab78STejun Heo ata_eh_qc_complete(qc); 3769c6fd2807SJeff Garzik } else { 3770c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_SENSE_VALID) { 3771c6fd2807SJeff Garzik ata_eh_qc_complete(qc); 3772c6fd2807SJeff Garzik } else { 3773c6fd2807SJeff Garzik /* feed zero TF to sense generation */ 3774c6fd2807SJeff Garzik memset(&qc->result_tf, 0, sizeof(qc->result_tf)); 3775c6fd2807SJeff Garzik ata_eh_qc_retry(qc); 3776c6fd2807SJeff Garzik } 3777c6fd2807SJeff Garzik } 3778c6fd2807SJeff Garzik } 3779da917d69STejun Heo 3780da917d69STejun Heo /* make sure nr_active_links is zero after EH */ 3781da917d69STejun Heo WARN_ON(ap->nr_active_links); 3782da917d69STejun Heo ap->nr_active_links = 0; 3783c6fd2807SJeff Garzik } 3784c6fd2807SJeff Garzik 3785c6fd2807SJeff Garzik /** 3786c6fd2807SJeff Garzik * ata_do_eh - do standard error handling 3787c6fd2807SJeff Garzik * @ap: host port to handle error for 3788a1efdabaSTejun Heo * 3789c6fd2807SJeff Garzik * @prereset: prereset method (can be NULL) 3790c6fd2807SJeff Garzik * @softreset: softreset method (can be NULL) 3791c6fd2807SJeff Garzik * @hardreset: hardreset method (can be NULL) 3792c6fd2807SJeff Garzik * @postreset: postreset method (can be NULL) 3793c6fd2807SJeff Garzik * 3794c6fd2807SJeff Garzik * Perform standard error handling sequence. 3795c6fd2807SJeff Garzik * 3796c6fd2807SJeff Garzik * LOCKING: 3797c6fd2807SJeff Garzik * Kernel thread context (may sleep). 3798c6fd2807SJeff Garzik */ 3799c6fd2807SJeff Garzik void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, 3800c6fd2807SJeff Garzik ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 3801c6fd2807SJeff Garzik ata_postreset_fn_t postreset) 3802c6fd2807SJeff Garzik { 38039b1e2658STejun Heo struct ata_device *dev; 38049b1e2658STejun Heo int rc; 38059b1e2658STejun Heo 38069b1e2658STejun Heo ata_eh_autopsy(ap); 38079b1e2658STejun Heo ata_eh_report(ap); 38089b1e2658STejun Heo 38099b1e2658STejun Heo rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset, 38109b1e2658STejun Heo NULL); 38119b1e2658STejun Heo if (rc) { 38121eca4365STejun Heo ata_for_each_dev(dev, &ap->link, ALL) 38139b1e2658STejun Heo ata_dev_disable(dev); 38149b1e2658STejun Heo } 38159b1e2658STejun Heo 3816c6fd2807SJeff Garzik ata_eh_finish(ap); 3817c6fd2807SJeff Garzik } 3818c6fd2807SJeff Garzik 3819a1efdabaSTejun Heo /** 3820a1efdabaSTejun Heo * ata_std_error_handler - standard error handler 3821a1efdabaSTejun Heo * @ap: host port to handle error for 3822a1efdabaSTejun Heo * 3823a1efdabaSTejun Heo * Standard error handler 3824a1efdabaSTejun Heo * 3825a1efdabaSTejun Heo * LOCKING: 3826a1efdabaSTejun Heo * Kernel thread context (may sleep). 3827a1efdabaSTejun Heo */ 3828a1efdabaSTejun Heo void ata_std_error_handler(struct ata_port *ap) 3829a1efdabaSTejun Heo { 3830a1efdabaSTejun Heo struct ata_port_operations *ops = ap->ops; 3831a1efdabaSTejun Heo ata_reset_fn_t hardreset = ops->hardreset; 3832a1efdabaSTejun Heo 383357c9efdfSTejun Heo /* ignore built-in hardreset if SCR access is not available */ 3834fe06e5f9STejun Heo if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link)) 3835a1efdabaSTejun Heo hardreset = NULL; 3836a1efdabaSTejun Heo 3837a1efdabaSTejun Heo ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset); 3838a1efdabaSTejun Heo } 3839a1efdabaSTejun Heo 38406ffa01d8STejun Heo #ifdef CONFIG_PM 3841c6fd2807SJeff Garzik /** 3842c6fd2807SJeff Garzik * ata_eh_handle_port_suspend - perform port suspend operation 3843c6fd2807SJeff Garzik * @ap: port to suspend 3844c6fd2807SJeff Garzik * 3845c6fd2807SJeff Garzik * Suspend @ap. 3846c6fd2807SJeff Garzik * 3847c6fd2807SJeff Garzik * LOCKING: 3848c6fd2807SJeff Garzik * Kernel thread context (may sleep). 3849c6fd2807SJeff Garzik */ 3850c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap) 3851c6fd2807SJeff Garzik { 3852c6fd2807SJeff Garzik unsigned long flags; 3853c6fd2807SJeff Garzik int rc = 0; 3854c6fd2807SJeff Garzik 3855c6fd2807SJeff Garzik /* are we suspending? */ 3856c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 3857c6fd2807SJeff Garzik if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || 3858c6fd2807SJeff Garzik ap->pm_mesg.event == PM_EVENT_ON) { 3859c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3860c6fd2807SJeff Garzik return; 3861c6fd2807SJeff Garzik } 3862c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3863c6fd2807SJeff Garzik 3864c6fd2807SJeff Garzik WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED); 3865c6fd2807SJeff Garzik 386664578a3dSTejun Heo /* tell ACPI we're suspending */ 386764578a3dSTejun Heo rc = ata_acpi_on_suspend(ap); 386864578a3dSTejun Heo if (rc) 386964578a3dSTejun Heo goto out; 387064578a3dSTejun Heo 3871c6fd2807SJeff Garzik /* suspend */ 3872c6fd2807SJeff Garzik ata_eh_freeze_port(ap); 3873c6fd2807SJeff Garzik 3874c6fd2807SJeff Garzik if (ap->ops->port_suspend) 3875c6fd2807SJeff Garzik rc = ap->ops->port_suspend(ap, ap->pm_mesg); 3876c6fd2807SJeff Garzik 3877bd3adca5SShaohua Li ata_acpi_set_state(ap, PMSG_SUSPEND); 387864578a3dSTejun Heo out: 3879c6fd2807SJeff Garzik /* report result */ 3880c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 3881c6fd2807SJeff Garzik 3882c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_PM_PENDING; 3883c6fd2807SJeff Garzik if (rc == 0) 3884c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SUSPENDED; 388564578a3dSTejun Heo else if (ap->pflags & ATA_PFLAG_FROZEN) 3886c6fd2807SJeff Garzik ata_port_schedule_eh(ap); 3887c6fd2807SJeff Garzik 3888c6fd2807SJeff Garzik if (ap->pm_result) { 3889c6fd2807SJeff Garzik *ap->pm_result = rc; 3890c6fd2807SJeff Garzik ap->pm_result = NULL; 3891c6fd2807SJeff Garzik } 3892c6fd2807SJeff Garzik 3893c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3894c6fd2807SJeff Garzik 3895c6fd2807SJeff Garzik return; 3896c6fd2807SJeff Garzik } 3897c6fd2807SJeff Garzik 3898c6fd2807SJeff Garzik /** 3899c6fd2807SJeff Garzik * ata_eh_handle_port_resume - perform port resume operation 3900c6fd2807SJeff Garzik * @ap: port to resume 3901c6fd2807SJeff Garzik * 3902c6fd2807SJeff Garzik * Resume @ap. 3903c6fd2807SJeff Garzik * 3904c6fd2807SJeff Garzik * LOCKING: 3905c6fd2807SJeff Garzik * Kernel thread context (may sleep). 3906c6fd2807SJeff Garzik */ 3907c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap) 3908c6fd2807SJeff Garzik { 39096f9c1ea2STejun Heo struct ata_link *link; 39106f9c1ea2STejun Heo struct ata_device *dev; 3911c6fd2807SJeff Garzik unsigned long flags; 39129666f400STejun Heo int rc = 0; 3913c6fd2807SJeff Garzik 3914c6fd2807SJeff Garzik /* are we resuming? */ 3915c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 3916c6fd2807SJeff Garzik if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || 3917c6fd2807SJeff Garzik ap->pm_mesg.event != PM_EVENT_ON) { 3918c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3919c6fd2807SJeff Garzik return; 3920c6fd2807SJeff Garzik } 3921c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3922c6fd2807SJeff Garzik 39239666f400STejun Heo WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED)); 3924c6fd2807SJeff Garzik 39256f9c1ea2STejun Heo /* 39266f9c1ea2STejun Heo * Error timestamps are in jiffies which doesn't run while 39276f9c1ea2STejun Heo * suspended and PHY events during resume isn't too uncommon. 39286f9c1ea2STejun Heo * When the two are combined, it can lead to unnecessary speed 39296f9c1ea2STejun Heo * downs if the machine is suspended and resumed repeatedly. 39306f9c1ea2STejun Heo * Clear error history. 39316f9c1ea2STejun Heo */ 39326f9c1ea2STejun Heo ata_for_each_link(link, ap, HOST_FIRST) 39336f9c1ea2STejun Heo ata_for_each_dev(dev, link, ALL) 39346f9c1ea2STejun Heo ata_ering_clear(&dev->ering); 39356f9c1ea2STejun Heo 3936bd3adca5SShaohua Li ata_acpi_set_state(ap, PMSG_ON); 3937bd3adca5SShaohua Li 3938c6fd2807SJeff Garzik if (ap->ops->port_resume) 3939c6fd2807SJeff Garzik rc = ap->ops->port_resume(ap); 3940c6fd2807SJeff Garzik 39416746544cSTejun Heo /* tell ACPI that we're resuming */ 39426746544cSTejun Heo ata_acpi_on_resume(ap); 39436746544cSTejun Heo 39449666f400STejun Heo /* report result */ 3945c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 3946c6fd2807SJeff Garzik ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED); 3947c6fd2807SJeff Garzik if (ap->pm_result) { 3948c6fd2807SJeff Garzik *ap->pm_result = rc; 3949c6fd2807SJeff Garzik ap->pm_result = NULL; 3950c6fd2807SJeff Garzik } 3951c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3952c6fd2807SJeff Garzik } 39536ffa01d8STejun Heo #endif /* CONFIG_PM */ 3954