1c6fd2807SJeff Garzik /* 2c6fd2807SJeff Garzik * libata-eh.c - libata error handling 3c6fd2807SJeff Garzik * 4c6fd2807SJeff Garzik * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5c6fd2807SJeff Garzik * Please ALWAYS copy linux-ide@vger.kernel.org 6c6fd2807SJeff Garzik * on emails. 7c6fd2807SJeff Garzik * 8c6fd2807SJeff Garzik * Copyright 2006 Tejun Heo <htejun@gmail.com> 9c6fd2807SJeff Garzik * 10c6fd2807SJeff Garzik * 11c6fd2807SJeff Garzik * This program is free software; you can redistribute it and/or 12c6fd2807SJeff Garzik * modify it under the terms of the GNU General Public License as 13c6fd2807SJeff Garzik * published by the Free Software Foundation; either version 2, or 14c6fd2807SJeff Garzik * (at your option) any later version. 15c6fd2807SJeff Garzik * 16c6fd2807SJeff Garzik * This program is distributed in the hope that it will be useful, 17c6fd2807SJeff Garzik * but WITHOUT ANY WARRANTY; without even the implied warranty of 18c6fd2807SJeff Garzik * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19c6fd2807SJeff Garzik * General Public License for more details. 20c6fd2807SJeff Garzik * 21c6fd2807SJeff Garzik * You should have received a copy of the GNU General Public License 22c6fd2807SJeff Garzik * along with this program; see the file COPYING. If not, write to 23c6fd2807SJeff Garzik * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, 24c6fd2807SJeff Garzik * USA. 25c6fd2807SJeff Garzik * 26c6fd2807SJeff Garzik * 27c6fd2807SJeff Garzik * libata documentation is available via 'make {ps|pdf}docs', 28c6fd2807SJeff Garzik * as Documentation/DocBook/libata.* 29c6fd2807SJeff Garzik * 30c6fd2807SJeff Garzik * Hardware documentation available from http://www.t13.org/ and 31c6fd2807SJeff Garzik * http://www.sata-io.org/ 32c6fd2807SJeff Garzik * 33c6fd2807SJeff Garzik */ 34c6fd2807SJeff Garzik 35c6fd2807SJeff Garzik #include <linux/kernel.h> 36242f9dcbSJens Axboe #include <linux/blkdev.h> 372855568bSJeff Garzik #include <linux/pci.h> 38c6fd2807SJeff Garzik #include <scsi/scsi.h> 39c6fd2807SJeff Garzik #include <scsi/scsi_host.h> 40c6fd2807SJeff Garzik #include <scsi/scsi_eh.h> 41c6fd2807SJeff Garzik #include <scsi/scsi_device.h> 42c6fd2807SJeff Garzik #include <scsi/scsi_cmnd.h> 436521148cSRobert Hancock #include <scsi/scsi_dbg.h> 44c6fd2807SJeff Garzik #include "../scsi/scsi_transport_api.h" 45c6fd2807SJeff Garzik 46c6fd2807SJeff Garzik #include <linux/libata.h> 47c6fd2807SJeff Garzik 48c6fd2807SJeff Garzik #include "libata.h" 49c6fd2807SJeff Garzik 507d47e8d4STejun Heo enum { 513884f7b0STejun Heo /* speed down verdicts */ 527d47e8d4STejun Heo ATA_EH_SPDN_NCQ_OFF = (1 << 0), 537d47e8d4STejun Heo ATA_EH_SPDN_SPEED_DOWN = (1 << 1), 547d47e8d4STejun Heo ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2), 5576326ac1STejun Heo ATA_EH_SPDN_KEEP_ERRORS = (1 << 3), 563884f7b0STejun Heo 573884f7b0STejun Heo /* error flags */ 583884f7b0STejun Heo ATA_EFLAG_IS_IO = (1 << 0), 5976326ac1STejun Heo ATA_EFLAG_DUBIOUS_XFER = (1 << 1), 60d9027470SGwendal Grignou ATA_EFLAG_OLD_ER = (1 << 31), 613884f7b0STejun Heo 623884f7b0STejun Heo /* error categories */ 633884f7b0STejun Heo ATA_ECAT_NONE = 0, 643884f7b0STejun Heo ATA_ECAT_ATA_BUS = 1, 653884f7b0STejun Heo ATA_ECAT_TOUT_HSM = 2, 663884f7b0STejun Heo ATA_ECAT_UNK_DEV = 3, 6775f9cafcSTejun Heo ATA_ECAT_DUBIOUS_NONE = 4, 6875f9cafcSTejun Heo ATA_ECAT_DUBIOUS_ATA_BUS = 5, 6975f9cafcSTejun Heo ATA_ECAT_DUBIOUS_TOUT_HSM = 6, 7075f9cafcSTejun Heo ATA_ECAT_DUBIOUS_UNK_DEV = 7, 7175f9cafcSTejun Heo ATA_ECAT_NR = 8, 727d47e8d4STejun Heo 7387fbc5a0STejun Heo ATA_EH_CMD_DFL_TIMEOUT = 5000, 7487fbc5a0STejun Heo 750a2c0f56STejun Heo /* always put at least this amount of time between resets */ 760a2c0f56STejun Heo ATA_EH_RESET_COOL_DOWN = 5000, 770a2c0f56STejun Heo 78341c2c95STejun Heo /* Waiting in ->prereset can never be reliable. It's 79341c2c95STejun Heo * sometimes nice to wait there but it can't be depended upon; 80341c2c95STejun Heo * otherwise, we wouldn't be resetting. Just give it enough 81341c2c95STejun Heo * time for most drives to spin up. 8231daabdaSTejun Heo */ 83341c2c95STejun Heo ATA_EH_PRERESET_TIMEOUT = 10000, 84341c2c95STejun Heo ATA_EH_FASTDRAIN_INTERVAL = 3000, 8511fc33daSTejun Heo 8611fc33daSTejun Heo ATA_EH_UA_TRIES = 5, 87c2c7a89cSTejun Heo 88c2c7a89cSTejun Heo /* probe speed down parameters, see ata_eh_schedule_probe() */ 89c2c7a89cSTejun Heo ATA_EH_PROBE_TRIAL_INTERVAL = 60000, /* 1 min */ 90c2c7a89cSTejun Heo ATA_EH_PROBE_TRIALS = 2, 9131daabdaSTejun Heo }; 9231daabdaSTejun Heo 9331daabdaSTejun Heo /* The following table determines how we sequence resets. Each entry 9431daabdaSTejun Heo * represents timeout for that try. The first try can be soft or 9531daabdaSTejun Heo * hardreset. All others are hardreset if available. In most cases 9631daabdaSTejun Heo * the first reset w/ 10sec timeout should succeed. Following entries 9731daabdaSTejun Heo * are mostly for error handling, hotplug and retarded devices. 9831daabdaSTejun Heo */ 9931daabdaSTejun Heo static const unsigned long ata_eh_reset_timeouts[] = { 100341c2c95STejun Heo 10000, /* most drives spin up by 10sec */ 101341c2c95STejun Heo 10000, /* > 99% working drives spin up before 20sec */ 102341c2c95STejun Heo 35000, /* give > 30 secs of idleness for retarded devices */ 103341c2c95STejun Heo 5000, /* and sweet one last chance */ 104d8af0eb6STejun Heo ULONG_MAX, /* > 1 min has elapsed, give up */ 10531daabdaSTejun Heo }; 10631daabdaSTejun Heo 10787fbc5a0STejun Heo static const unsigned long ata_eh_identify_timeouts[] = { 10887fbc5a0STejun Heo 5000, /* covers > 99% of successes and not too boring on failures */ 10987fbc5a0STejun Heo 10000, /* combined time till here is enough even for media access */ 11087fbc5a0STejun Heo 30000, /* for true idiots */ 11187fbc5a0STejun Heo ULONG_MAX, 11287fbc5a0STejun Heo }; 11387fbc5a0STejun Heo 1146013efd8STejun Heo static const unsigned long ata_eh_flush_timeouts[] = { 1156013efd8STejun Heo 15000, /* be generous with flush */ 1166013efd8STejun Heo 15000, /* ditto */ 1176013efd8STejun Heo 30000, /* and even more generous */ 1186013efd8STejun Heo ULONG_MAX, 1196013efd8STejun Heo }; 1206013efd8STejun Heo 12187fbc5a0STejun Heo static const unsigned long ata_eh_other_timeouts[] = { 12287fbc5a0STejun Heo 5000, /* same rationale as identify timeout */ 12387fbc5a0STejun Heo 10000, /* ditto */ 12487fbc5a0STejun Heo /* but no merciful 30sec for other commands, it just isn't worth it */ 12587fbc5a0STejun Heo ULONG_MAX, 12687fbc5a0STejun Heo }; 12787fbc5a0STejun Heo 12887fbc5a0STejun Heo struct ata_eh_cmd_timeout_ent { 12987fbc5a0STejun Heo const u8 *commands; 13087fbc5a0STejun Heo const unsigned long *timeouts; 13187fbc5a0STejun Heo }; 13287fbc5a0STejun Heo 13387fbc5a0STejun Heo /* The following table determines timeouts to use for EH internal 13487fbc5a0STejun Heo * commands. Each table entry is a command class and matches the 13587fbc5a0STejun Heo * commands the entry applies to and the timeout table to use. 13687fbc5a0STejun Heo * 13787fbc5a0STejun Heo * On the retry after a command timed out, the next timeout value from 13887fbc5a0STejun Heo * the table is used. If the table doesn't contain further entries, 13987fbc5a0STejun Heo * the last value is used. 14087fbc5a0STejun Heo * 14187fbc5a0STejun Heo * ehc->cmd_timeout_idx keeps track of which timeout to use per 14287fbc5a0STejun Heo * command class, so if SET_FEATURES times out on the first try, the 14387fbc5a0STejun Heo * next try will use the second timeout value only for that class. 14487fbc5a0STejun Heo */ 14587fbc5a0STejun Heo #define CMDS(cmds...) (const u8 []){ cmds, 0 } 14687fbc5a0STejun Heo static const struct ata_eh_cmd_timeout_ent 14787fbc5a0STejun Heo ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = { 14887fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI), 14987fbc5a0STejun Heo .timeouts = ata_eh_identify_timeouts, }, 15087fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT), 15187fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 15287fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT), 15387fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 15487fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_SET_FEATURES), 15587fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 15687fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS), 15787fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 1586013efd8STejun Heo { .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT), 1596013efd8STejun Heo .timeouts = ata_eh_flush_timeouts }, 16087fbc5a0STejun Heo }; 16187fbc5a0STejun Heo #undef CMDS 16287fbc5a0STejun Heo 163c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap); 1646ffa01d8STejun Heo #ifdef CONFIG_PM 165c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap); 166c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap); 1676ffa01d8STejun Heo #else /* CONFIG_PM */ 1686ffa01d8STejun Heo static void ata_eh_handle_port_suspend(struct ata_port *ap) 1696ffa01d8STejun Heo { } 1706ffa01d8STejun Heo 1716ffa01d8STejun Heo static void ata_eh_handle_port_resume(struct ata_port *ap) 1726ffa01d8STejun Heo { } 1736ffa01d8STejun Heo #endif /* CONFIG_PM */ 174c6fd2807SJeff Garzik 175b64bbc39STejun Heo static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt, 176b64bbc39STejun Heo va_list args) 177b64bbc39STejun Heo { 178b64bbc39STejun Heo ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len, 179b64bbc39STejun Heo ATA_EH_DESC_LEN - ehi->desc_len, 180b64bbc39STejun Heo fmt, args); 181b64bbc39STejun Heo } 182b64bbc39STejun Heo 183b64bbc39STejun Heo /** 184b64bbc39STejun Heo * __ata_ehi_push_desc - push error description without adding separator 185b64bbc39STejun Heo * @ehi: target EHI 186b64bbc39STejun Heo * @fmt: printf format string 187b64bbc39STejun Heo * 188b64bbc39STejun Heo * Format string according to @fmt and append it to @ehi->desc. 189b64bbc39STejun Heo * 190b64bbc39STejun Heo * LOCKING: 191b64bbc39STejun Heo * spin_lock_irqsave(host lock) 192b64bbc39STejun Heo */ 193b64bbc39STejun Heo void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) 194b64bbc39STejun Heo { 195b64bbc39STejun Heo va_list args; 196b64bbc39STejun Heo 197b64bbc39STejun Heo va_start(args, fmt); 198b64bbc39STejun Heo __ata_ehi_pushv_desc(ehi, fmt, args); 199b64bbc39STejun Heo va_end(args); 200b64bbc39STejun Heo } 201b64bbc39STejun Heo 202b64bbc39STejun Heo /** 203b64bbc39STejun Heo * ata_ehi_push_desc - push error description with separator 204b64bbc39STejun Heo * @ehi: target EHI 205b64bbc39STejun Heo * @fmt: printf format string 206b64bbc39STejun Heo * 207b64bbc39STejun Heo * Format string according to @fmt and append it to @ehi->desc. 208b64bbc39STejun Heo * If @ehi->desc is not empty, ", " is added in-between. 209b64bbc39STejun Heo * 210b64bbc39STejun Heo * LOCKING: 211b64bbc39STejun Heo * spin_lock_irqsave(host lock) 212b64bbc39STejun Heo */ 213b64bbc39STejun Heo void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) 214b64bbc39STejun Heo { 215b64bbc39STejun Heo va_list args; 216b64bbc39STejun Heo 217b64bbc39STejun Heo if (ehi->desc_len) 218b64bbc39STejun Heo __ata_ehi_push_desc(ehi, ", "); 219b64bbc39STejun Heo 220b64bbc39STejun Heo va_start(args, fmt); 221b64bbc39STejun Heo __ata_ehi_pushv_desc(ehi, fmt, args); 222b64bbc39STejun Heo va_end(args); 223b64bbc39STejun Heo } 224b64bbc39STejun Heo 225b64bbc39STejun Heo /** 226b64bbc39STejun Heo * ata_ehi_clear_desc - clean error description 227b64bbc39STejun Heo * @ehi: target EHI 228b64bbc39STejun Heo * 229b64bbc39STejun Heo * Clear @ehi->desc. 230b64bbc39STejun Heo * 231b64bbc39STejun Heo * LOCKING: 232b64bbc39STejun Heo * spin_lock_irqsave(host lock) 233b64bbc39STejun Heo */ 234b64bbc39STejun Heo void ata_ehi_clear_desc(struct ata_eh_info *ehi) 235b64bbc39STejun Heo { 236b64bbc39STejun Heo ehi->desc[0] = '\0'; 237b64bbc39STejun Heo ehi->desc_len = 0; 238b64bbc39STejun Heo } 239b64bbc39STejun Heo 240cbcdd875STejun Heo /** 241cbcdd875STejun Heo * ata_port_desc - append port description 242cbcdd875STejun Heo * @ap: target ATA port 243cbcdd875STejun Heo * @fmt: printf format string 244cbcdd875STejun Heo * 245cbcdd875STejun Heo * Format string according to @fmt and append it to port 246cbcdd875STejun Heo * description. If port description is not empty, " " is added 247cbcdd875STejun Heo * in-between. This function is to be used while initializing 248cbcdd875STejun Heo * ata_host. The description is printed on host registration. 249cbcdd875STejun Heo * 250cbcdd875STejun Heo * LOCKING: 251cbcdd875STejun Heo * None. 252cbcdd875STejun Heo */ 253cbcdd875STejun Heo void ata_port_desc(struct ata_port *ap, const char *fmt, ...) 254cbcdd875STejun Heo { 255cbcdd875STejun Heo va_list args; 256cbcdd875STejun Heo 257cbcdd875STejun Heo WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING)); 258cbcdd875STejun Heo 259cbcdd875STejun Heo if (ap->link.eh_info.desc_len) 260cbcdd875STejun Heo __ata_ehi_push_desc(&ap->link.eh_info, " "); 261cbcdd875STejun Heo 262cbcdd875STejun Heo va_start(args, fmt); 263cbcdd875STejun Heo __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args); 264cbcdd875STejun Heo va_end(args); 265cbcdd875STejun Heo } 266cbcdd875STejun Heo 267cbcdd875STejun Heo #ifdef CONFIG_PCI 268cbcdd875STejun Heo 269cbcdd875STejun Heo /** 270cbcdd875STejun Heo * ata_port_pbar_desc - append PCI BAR description 271cbcdd875STejun Heo * @ap: target ATA port 272cbcdd875STejun Heo * @bar: target PCI BAR 273cbcdd875STejun Heo * @offset: offset into PCI BAR 274cbcdd875STejun Heo * @name: name of the area 275cbcdd875STejun Heo * 276cbcdd875STejun Heo * If @offset is negative, this function formats a string which 277cbcdd875STejun Heo * contains the name, address, size and type of the BAR and 278cbcdd875STejun Heo * appends it to the port description. If @offset is zero or 279cbcdd875STejun Heo * positive, only name and offsetted address is appended. 280cbcdd875STejun Heo * 281cbcdd875STejun Heo * LOCKING: 282cbcdd875STejun Heo * None. 283cbcdd875STejun Heo */ 284cbcdd875STejun Heo void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset, 285cbcdd875STejun Heo const char *name) 286cbcdd875STejun Heo { 287cbcdd875STejun Heo struct pci_dev *pdev = to_pci_dev(ap->host->dev); 288cbcdd875STejun Heo char *type = ""; 289cbcdd875STejun Heo unsigned long long start, len; 290cbcdd875STejun Heo 291cbcdd875STejun Heo if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) 292cbcdd875STejun Heo type = "m"; 293cbcdd875STejun Heo else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) 294cbcdd875STejun Heo type = "i"; 295cbcdd875STejun Heo 296cbcdd875STejun Heo start = (unsigned long long)pci_resource_start(pdev, bar); 297cbcdd875STejun Heo len = (unsigned long long)pci_resource_len(pdev, bar); 298cbcdd875STejun Heo 299cbcdd875STejun Heo if (offset < 0) 300cbcdd875STejun Heo ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start); 301cbcdd875STejun Heo else 302e6a73ab1SAndrew Morton ata_port_desc(ap, "%s 0x%llx", name, 303e6a73ab1SAndrew Morton start + (unsigned long long)offset); 304cbcdd875STejun Heo } 305cbcdd875STejun Heo 306cbcdd875STejun Heo #endif /* CONFIG_PCI */ 307cbcdd875STejun Heo 30887fbc5a0STejun Heo static int ata_lookup_timeout_table(u8 cmd) 30987fbc5a0STejun Heo { 31087fbc5a0STejun Heo int i; 31187fbc5a0STejun Heo 31287fbc5a0STejun Heo for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) { 31387fbc5a0STejun Heo const u8 *cur; 31487fbc5a0STejun Heo 31587fbc5a0STejun Heo for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++) 31687fbc5a0STejun Heo if (*cur == cmd) 31787fbc5a0STejun Heo return i; 31887fbc5a0STejun Heo } 31987fbc5a0STejun Heo 32087fbc5a0STejun Heo return -1; 32187fbc5a0STejun Heo } 32287fbc5a0STejun Heo 32387fbc5a0STejun Heo /** 32487fbc5a0STejun Heo * ata_internal_cmd_timeout - determine timeout for an internal command 32587fbc5a0STejun Heo * @dev: target device 32687fbc5a0STejun Heo * @cmd: internal command to be issued 32787fbc5a0STejun Heo * 32887fbc5a0STejun Heo * Determine timeout for internal command @cmd for @dev. 32987fbc5a0STejun Heo * 33087fbc5a0STejun Heo * LOCKING: 33187fbc5a0STejun Heo * EH context. 33287fbc5a0STejun Heo * 33387fbc5a0STejun Heo * RETURNS: 33487fbc5a0STejun Heo * Determined timeout. 33587fbc5a0STejun Heo */ 33687fbc5a0STejun Heo unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd) 33787fbc5a0STejun Heo { 33887fbc5a0STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 33987fbc5a0STejun Heo int ent = ata_lookup_timeout_table(cmd); 34087fbc5a0STejun Heo int idx; 34187fbc5a0STejun Heo 34287fbc5a0STejun Heo if (ent < 0) 34387fbc5a0STejun Heo return ATA_EH_CMD_DFL_TIMEOUT; 34487fbc5a0STejun Heo 34587fbc5a0STejun Heo idx = ehc->cmd_timeout_idx[dev->devno][ent]; 34687fbc5a0STejun Heo return ata_eh_cmd_timeout_table[ent].timeouts[idx]; 34787fbc5a0STejun Heo } 34887fbc5a0STejun Heo 34987fbc5a0STejun Heo /** 35087fbc5a0STejun Heo * ata_internal_cmd_timed_out - notification for internal command timeout 35187fbc5a0STejun Heo * @dev: target device 35287fbc5a0STejun Heo * @cmd: internal command which timed out 35387fbc5a0STejun Heo * 35487fbc5a0STejun Heo * Notify EH that internal command @cmd for @dev timed out. This 35587fbc5a0STejun Heo * function should be called only for commands whose timeouts are 35687fbc5a0STejun Heo * determined using ata_internal_cmd_timeout(). 35787fbc5a0STejun Heo * 35887fbc5a0STejun Heo * LOCKING: 35987fbc5a0STejun Heo * EH context. 36087fbc5a0STejun Heo */ 36187fbc5a0STejun Heo void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd) 36287fbc5a0STejun Heo { 36387fbc5a0STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 36487fbc5a0STejun Heo int ent = ata_lookup_timeout_table(cmd); 36587fbc5a0STejun Heo int idx; 36687fbc5a0STejun Heo 36787fbc5a0STejun Heo if (ent < 0) 36887fbc5a0STejun Heo return; 36987fbc5a0STejun Heo 37087fbc5a0STejun Heo idx = ehc->cmd_timeout_idx[dev->devno][ent]; 37187fbc5a0STejun Heo if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX) 37287fbc5a0STejun Heo ehc->cmd_timeout_idx[dev->devno][ent]++; 37387fbc5a0STejun Heo } 37487fbc5a0STejun Heo 3753884f7b0STejun Heo static void ata_ering_record(struct ata_ering *ering, unsigned int eflags, 376c6fd2807SJeff Garzik unsigned int err_mask) 377c6fd2807SJeff Garzik { 378c6fd2807SJeff Garzik struct ata_ering_entry *ent; 379c6fd2807SJeff Garzik 380c6fd2807SJeff Garzik WARN_ON(!err_mask); 381c6fd2807SJeff Garzik 382c6fd2807SJeff Garzik ering->cursor++; 383c6fd2807SJeff Garzik ering->cursor %= ATA_ERING_SIZE; 384c6fd2807SJeff Garzik 385c6fd2807SJeff Garzik ent = &ering->ring[ering->cursor]; 3863884f7b0STejun Heo ent->eflags = eflags; 387c6fd2807SJeff Garzik ent->err_mask = err_mask; 388c6fd2807SJeff Garzik ent->timestamp = get_jiffies_64(); 389c6fd2807SJeff Garzik } 390c6fd2807SJeff Garzik 39176326ac1STejun Heo static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering) 39276326ac1STejun Heo { 39376326ac1STejun Heo struct ata_ering_entry *ent = &ering->ring[ering->cursor]; 39476326ac1STejun Heo 39576326ac1STejun Heo if (ent->err_mask) 39676326ac1STejun Heo return ent; 39776326ac1STejun Heo return NULL; 39876326ac1STejun Heo } 39976326ac1STejun Heo 400d9027470SGwendal Grignou int ata_ering_map(struct ata_ering *ering, 401c6fd2807SJeff Garzik int (*map_fn)(struct ata_ering_entry *, void *), 402c6fd2807SJeff Garzik void *arg) 403c6fd2807SJeff Garzik { 404c6fd2807SJeff Garzik int idx, rc = 0; 405c6fd2807SJeff Garzik struct ata_ering_entry *ent; 406c6fd2807SJeff Garzik 407c6fd2807SJeff Garzik idx = ering->cursor; 408c6fd2807SJeff Garzik do { 409c6fd2807SJeff Garzik ent = &ering->ring[idx]; 410c6fd2807SJeff Garzik if (!ent->err_mask) 411c6fd2807SJeff Garzik break; 412c6fd2807SJeff Garzik rc = map_fn(ent, arg); 413c6fd2807SJeff Garzik if (rc) 414c6fd2807SJeff Garzik break; 415c6fd2807SJeff Garzik idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE; 416c6fd2807SJeff Garzik } while (idx != ering->cursor); 417c6fd2807SJeff Garzik 418c6fd2807SJeff Garzik return rc; 419c6fd2807SJeff Garzik } 420c6fd2807SJeff Garzik 421d9027470SGwendal Grignou int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg) 422d9027470SGwendal Grignou { 423d9027470SGwendal Grignou ent->eflags |= ATA_EFLAG_OLD_ER; 424d9027470SGwendal Grignou return 0; 425d9027470SGwendal Grignou } 426d9027470SGwendal Grignou 427d9027470SGwendal Grignou static void ata_ering_clear(struct ata_ering *ering) 428d9027470SGwendal Grignou { 429d9027470SGwendal Grignou ata_ering_map(ering, ata_ering_clear_cb, NULL); 430d9027470SGwendal Grignou } 431d9027470SGwendal Grignou 432c6fd2807SJeff Garzik static unsigned int ata_eh_dev_action(struct ata_device *dev) 433c6fd2807SJeff Garzik { 4349af5c9c9STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 435c6fd2807SJeff Garzik 436c6fd2807SJeff Garzik return ehc->i.action | ehc->i.dev_action[dev->devno]; 437c6fd2807SJeff Garzik } 438c6fd2807SJeff Garzik 439f58229f8STejun Heo static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev, 440c6fd2807SJeff Garzik struct ata_eh_info *ehi, unsigned int action) 441c6fd2807SJeff Garzik { 442f58229f8STejun Heo struct ata_device *tdev; 443c6fd2807SJeff Garzik 444c6fd2807SJeff Garzik if (!dev) { 445c6fd2807SJeff Garzik ehi->action &= ~action; 4461eca4365STejun Heo ata_for_each_dev(tdev, link, ALL) 447f58229f8STejun Heo ehi->dev_action[tdev->devno] &= ~action; 448c6fd2807SJeff Garzik } else { 449c6fd2807SJeff Garzik /* doesn't make sense for port-wide EH actions */ 450c6fd2807SJeff Garzik WARN_ON(!(action & ATA_EH_PERDEV_MASK)); 451c6fd2807SJeff Garzik 452c6fd2807SJeff Garzik /* break ehi->action into ehi->dev_action */ 453c6fd2807SJeff Garzik if (ehi->action & action) { 4541eca4365STejun Heo ata_for_each_dev(tdev, link, ALL) 455f58229f8STejun Heo ehi->dev_action[tdev->devno] |= 456f58229f8STejun Heo ehi->action & action; 457c6fd2807SJeff Garzik ehi->action &= ~action; 458c6fd2807SJeff Garzik } 459c6fd2807SJeff Garzik 460c6fd2807SJeff Garzik /* turn off the specified per-dev action */ 461c6fd2807SJeff Garzik ehi->dev_action[dev->devno] &= ~action; 462c6fd2807SJeff Garzik } 463c6fd2807SJeff Garzik } 464c6fd2807SJeff Garzik 465c6fd2807SJeff Garzik /** 466*c0c362b6STejun Heo * ata_eh_acquire - acquire EH ownership 467*c0c362b6STejun Heo * @ap: ATA port to acquire EH ownership for 468*c0c362b6STejun Heo * 469*c0c362b6STejun Heo * Acquire EH ownership for @ap. This is the basic exclusion 470*c0c362b6STejun Heo * mechanism for ports sharing a host. Only one port hanging off 471*c0c362b6STejun Heo * the same host can claim the ownership of EH. 472*c0c362b6STejun Heo * 473*c0c362b6STejun Heo * LOCKING: 474*c0c362b6STejun Heo * EH context. 475*c0c362b6STejun Heo */ 476*c0c362b6STejun Heo void ata_eh_acquire(struct ata_port *ap) 477*c0c362b6STejun Heo { 478*c0c362b6STejun Heo mutex_lock(&ap->host->eh_mutex); 479*c0c362b6STejun Heo WARN_ON_ONCE(ap->host->eh_owner); 480*c0c362b6STejun Heo ap->host->eh_owner = current; 481*c0c362b6STejun Heo } 482*c0c362b6STejun Heo 483*c0c362b6STejun Heo /** 484*c0c362b6STejun Heo * ata_eh_release - release EH ownership 485*c0c362b6STejun Heo * @ap: ATA port to release EH ownership for 486*c0c362b6STejun Heo * 487*c0c362b6STejun Heo * Release EH ownership for @ap if the caller. The caller must 488*c0c362b6STejun Heo * have acquired EH ownership using ata_eh_acquire() previously. 489*c0c362b6STejun Heo * 490*c0c362b6STejun Heo * LOCKING: 491*c0c362b6STejun Heo * EH context. 492*c0c362b6STejun Heo */ 493*c0c362b6STejun Heo void ata_eh_release(struct ata_port *ap) 494*c0c362b6STejun Heo { 495*c0c362b6STejun Heo WARN_ON_ONCE(ap->host->eh_owner != current); 496*c0c362b6STejun Heo ap->host->eh_owner = NULL; 497*c0c362b6STejun Heo mutex_unlock(&ap->host->eh_mutex); 498*c0c362b6STejun Heo } 499*c0c362b6STejun Heo 500*c0c362b6STejun Heo /** 501c6fd2807SJeff Garzik * ata_scsi_timed_out - SCSI layer time out callback 502c6fd2807SJeff Garzik * @cmd: timed out SCSI command 503c6fd2807SJeff Garzik * 504c6fd2807SJeff Garzik * Handles SCSI layer timeout. We race with normal completion of 505c6fd2807SJeff Garzik * the qc for @cmd. If the qc is already gone, we lose and let 506c6fd2807SJeff Garzik * the scsi command finish (EH_HANDLED). Otherwise, the qc has 507c6fd2807SJeff Garzik * timed out and EH should be invoked. Prevent ata_qc_complete() 508c6fd2807SJeff Garzik * from finishing it by setting EH_SCHEDULED and return 509c6fd2807SJeff Garzik * EH_NOT_HANDLED. 510c6fd2807SJeff Garzik * 511c6fd2807SJeff Garzik * TODO: kill this function once old EH is gone. 512c6fd2807SJeff Garzik * 513c6fd2807SJeff Garzik * LOCKING: 514c6fd2807SJeff Garzik * Called from timer context 515c6fd2807SJeff Garzik * 516c6fd2807SJeff Garzik * RETURNS: 517c6fd2807SJeff Garzik * EH_HANDLED or EH_NOT_HANDLED 518c6fd2807SJeff Garzik */ 519242f9dcbSJens Axboe enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) 520c6fd2807SJeff Garzik { 521c6fd2807SJeff Garzik struct Scsi_Host *host = cmd->device->host; 522c6fd2807SJeff Garzik struct ata_port *ap = ata_shost_to_port(host); 523c6fd2807SJeff Garzik unsigned long flags; 524c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 525242f9dcbSJens Axboe enum blk_eh_timer_return ret; 526c6fd2807SJeff Garzik 527c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 528c6fd2807SJeff Garzik 529c6fd2807SJeff Garzik if (ap->ops->error_handler) { 530242f9dcbSJens Axboe ret = BLK_EH_NOT_HANDLED; 531c6fd2807SJeff Garzik goto out; 532c6fd2807SJeff Garzik } 533c6fd2807SJeff Garzik 534242f9dcbSJens Axboe ret = BLK_EH_HANDLED; 535c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 5369af5c9c9STejun Heo qc = ata_qc_from_tag(ap, ap->link.active_tag); 537c6fd2807SJeff Garzik if (qc) { 538c6fd2807SJeff Garzik WARN_ON(qc->scsicmd != cmd); 539c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_EH_SCHEDULED; 540c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_TIMEOUT; 541242f9dcbSJens Axboe ret = BLK_EH_NOT_HANDLED; 542c6fd2807SJeff Garzik } 543c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 544c6fd2807SJeff Garzik 545c6fd2807SJeff Garzik out: 546c6fd2807SJeff Garzik DPRINTK("EXIT, ret=%d\n", ret); 547c6fd2807SJeff Garzik return ret; 548c6fd2807SJeff Garzik } 549c6fd2807SJeff Garzik 550ece180d1STejun Heo static void ata_eh_unload(struct ata_port *ap) 551ece180d1STejun Heo { 552ece180d1STejun Heo struct ata_link *link; 553ece180d1STejun Heo struct ata_device *dev; 554ece180d1STejun Heo unsigned long flags; 555ece180d1STejun Heo 556ece180d1STejun Heo /* Restore SControl IPM and SPD for the next driver and 557ece180d1STejun Heo * disable attached devices. 558ece180d1STejun Heo */ 559ece180d1STejun Heo ata_for_each_link(link, ap, PMP_FIRST) { 560ece180d1STejun Heo sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0); 561ece180d1STejun Heo ata_for_each_dev(dev, link, ALL) 562ece180d1STejun Heo ata_dev_disable(dev); 563ece180d1STejun Heo } 564ece180d1STejun Heo 565ece180d1STejun Heo /* freeze and set UNLOADED */ 566ece180d1STejun Heo spin_lock_irqsave(ap->lock, flags); 567ece180d1STejun Heo 568ece180d1STejun Heo ata_port_freeze(ap); /* won't be thawed */ 569ece180d1STejun Heo ap->pflags &= ~ATA_PFLAG_EH_PENDING; /* clear pending from freeze */ 570ece180d1STejun Heo ap->pflags |= ATA_PFLAG_UNLOADED; 571ece180d1STejun Heo 572ece180d1STejun Heo spin_unlock_irqrestore(ap->lock, flags); 573ece180d1STejun Heo } 574ece180d1STejun Heo 575c6fd2807SJeff Garzik /** 576c6fd2807SJeff Garzik * ata_scsi_error - SCSI layer error handler callback 577c6fd2807SJeff Garzik * @host: SCSI host on which error occurred 578c6fd2807SJeff Garzik * 579c6fd2807SJeff Garzik * Handles SCSI-layer-thrown error events. 580c6fd2807SJeff Garzik * 581c6fd2807SJeff Garzik * LOCKING: 582c6fd2807SJeff Garzik * Inherited from SCSI layer (none, can sleep) 583c6fd2807SJeff Garzik * 584c6fd2807SJeff Garzik * RETURNS: 585c6fd2807SJeff Garzik * Zero. 586c6fd2807SJeff Garzik */ 587c6fd2807SJeff Garzik void ata_scsi_error(struct Scsi_Host *host) 588c6fd2807SJeff Garzik { 589c6fd2807SJeff Garzik struct ata_port *ap = ata_shost_to_port(host); 590a1e10f7eSTejun Heo int i; 591c6fd2807SJeff Garzik unsigned long flags; 592c6fd2807SJeff Garzik 593c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 594c6fd2807SJeff Garzik 595c429137aSTejun Heo /* make sure sff pio task is not running */ 596c429137aSTejun Heo ata_sff_flush_pio_task(ap); 597c6fd2807SJeff Garzik 598cca3974eSJeff Garzik /* synchronize with host lock and sort out timeouts */ 599c6fd2807SJeff Garzik 600c6fd2807SJeff Garzik /* For new EH, all qcs are finished in one of three ways - 601c6fd2807SJeff Garzik * normal completion, error completion, and SCSI timeout. 602c96f1732SAlan Cox * Both completions can race against SCSI timeout. When normal 603c6fd2807SJeff Garzik * completion wins, the qc never reaches EH. When error 604c6fd2807SJeff Garzik * completion wins, the qc has ATA_QCFLAG_FAILED set. 605c6fd2807SJeff Garzik * 606c6fd2807SJeff Garzik * When SCSI timeout wins, things are a bit more complex. 607c6fd2807SJeff Garzik * Normal or error completion can occur after the timeout but 608c6fd2807SJeff Garzik * before this point. In such cases, both types of 609c6fd2807SJeff Garzik * completions are honored. A scmd is determined to have 610c6fd2807SJeff Garzik * timed out iff its associated qc is active and not failed. 611c6fd2807SJeff Garzik */ 612c6fd2807SJeff Garzik if (ap->ops->error_handler) { 613c6fd2807SJeff Garzik struct scsi_cmnd *scmd, *tmp; 614c6fd2807SJeff Garzik int nr_timedout = 0; 615c6fd2807SJeff Garzik 616c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 617c6fd2807SJeff Garzik 618c96f1732SAlan Cox /* This must occur under the ap->lock as we don't want 619c96f1732SAlan Cox a polled recovery to race the real interrupt handler 620c96f1732SAlan Cox 621c96f1732SAlan Cox The lost_interrupt handler checks for any completed but 622c96f1732SAlan Cox non-notified command and completes much like an IRQ handler. 623c96f1732SAlan Cox 624c96f1732SAlan Cox We then fall into the error recovery code which will treat 625c96f1732SAlan Cox this as if normal completion won the race */ 626c96f1732SAlan Cox 627c96f1732SAlan Cox if (ap->ops->lost_interrupt) 628c96f1732SAlan Cox ap->ops->lost_interrupt(ap); 629c96f1732SAlan Cox 630c6fd2807SJeff Garzik list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) { 631c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 632c6fd2807SJeff Garzik 633c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_QUEUE; i++) { 634c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, i); 635c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_ACTIVE && 636c6fd2807SJeff Garzik qc->scsicmd == scmd) 637c6fd2807SJeff Garzik break; 638c6fd2807SJeff Garzik } 639c6fd2807SJeff Garzik 640c6fd2807SJeff Garzik if (i < ATA_MAX_QUEUE) { 641c6fd2807SJeff Garzik /* the scmd has an associated qc */ 642c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) { 643c6fd2807SJeff Garzik /* which hasn't failed yet, timeout */ 644c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_TIMEOUT; 645c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 646c6fd2807SJeff Garzik nr_timedout++; 647c6fd2807SJeff Garzik } 648c6fd2807SJeff Garzik } else { 649c6fd2807SJeff Garzik /* Normal completion occurred after 650c6fd2807SJeff Garzik * SCSI timeout but before this point. 651c6fd2807SJeff Garzik * Successfully complete it. 652c6fd2807SJeff Garzik */ 653c6fd2807SJeff Garzik scmd->retries = scmd->allowed; 654c6fd2807SJeff Garzik scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 655c6fd2807SJeff Garzik } 656c6fd2807SJeff Garzik } 657c6fd2807SJeff Garzik 658c6fd2807SJeff Garzik /* If we have timed out qcs. They belong to EH from 659c6fd2807SJeff Garzik * this point but the state of the controller is 660c6fd2807SJeff Garzik * unknown. Freeze the port to make sure the IRQ 661c6fd2807SJeff Garzik * handler doesn't diddle with those qcs. This must 662c6fd2807SJeff Garzik * be done atomically w.r.t. setting QCFLAG_FAILED. 663c6fd2807SJeff Garzik */ 664c6fd2807SJeff Garzik if (nr_timedout) 665c6fd2807SJeff Garzik __ata_port_freeze(ap); 666c6fd2807SJeff Garzik 667c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 668a1e10f7eSTejun Heo 669a1e10f7eSTejun Heo /* initialize eh_tries */ 670a1e10f7eSTejun Heo ap->eh_tries = ATA_EH_MAX_TRIES; 671c6fd2807SJeff Garzik } else 672c6fd2807SJeff Garzik spin_unlock_wait(ap->lock); 673c6fd2807SJeff Garzik 674c96f1732SAlan Cox /* If we timed raced normal completion and there is nothing to 675c96f1732SAlan Cox recover nr_timedout == 0 why exactly are we doing error recovery ? */ 676c96f1732SAlan Cox 677c6fd2807SJeff Garzik /* invoke error handler */ 678c6fd2807SJeff Garzik if (ap->ops->error_handler) { 679cf1b86c8STejun Heo struct ata_link *link; 680cf1b86c8STejun Heo 681*c0c362b6STejun Heo /* acquire EH ownership */ 682*c0c362b6STejun Heo ata_eh_acquire(ap); 683*c0c362b6STejun Heo repeat: 6845ddf24c5STejun Heo /* kill fast drain timer */ 6855ddf24c5STejun Heo del_timer_sync(&ap->fastdrain_timer); 6865ddf24c5STejun Heo 687c6fd2807SJeff Garzik /* process port resume request */ 688c6fd2807SJeff Garzik ata_eh_handle_port_resume(ap); 689c6fd2807SJeff Garzik 690c6fd2807SJeff Garzik /* fetch & clear EH info */ 691c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 692c6fd2807SJeff Garzik 6931eca4365STejun Heo ata_for_each_link(link, ap, HOST_FIRST) { 69400115e0fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 69500115e0fSTejun Heo struct ata_device *dev; 69600115e0fSTejun Heo 697cf1b86c8STejun Heo memset(&link->eh_context, 0, sizeof(link->eh_context)); 698cf1b86c8STejun Heo link->eh_context.i = link->eh_info; 699cf1b86c8STejun Heo memset(&link->eh_info, 0, sizeof(link->eh_info)); 70000115e0fSTejun Heo 7011eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) { 70200115e0fSTejun Heo int devno = dev->devno; 70300115e0fSTejun Heo 70400115e0fSTejun Heo ehc->saved_xfer_mode[devno] = dev->xfer_mode; 70500115e0fSTejun Heo if (ata_ncq_enabled(dev)) 70600115e0fSTejun Heo ehc->saved_ncq_enabled |= 1 << devno; 70700115e0fSTejun Heo } 708cf1b86c8STejun Heo } 709c6fd2807SJeff Garzik 710c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS; 711c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_EH_PENDING; 712da917d69STejun Heo ap->excl_link = NULL; /* don't maintain exclusion over EH */ 713c6fd2807SJeff Garzik 714c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 715c6fd2807SJeff Garzik 716c6fd2807SJeff Garzik /* invoke EH, skip if unloading or suspended */ 717c6fd2807SJeff Garzik if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED))) 718c6fd2807SJeff Garzik ap->ops->error_handler(ap); 719ece180d1STejun Heo else { 720ece180d1STejun Heo /* if unloading, commence suicide */ 721ece180d1STejun Heo if ((ap->pflags & ATA_PFLAG_UNLOADING) && 722ece180d1STejun Heo !(ap->pflags & ATA_PFLAG_UNLOADED)) 723ece180d1STejun Heo ata_eh_unload(ap); 724c6fd2807SJeff Garzik ata_eh_finish(ap); 725ece180d1STejun Heo } 726c6fd2807SJeff Garzik 727c6fd2807SJeff Garzik /* process port suspend request */ 728c6fd2807SJeff Garzik ata_eh_handle_port_suspend(ap); 729c6fd2807SJeff Garzik 730c6fd2807SJeff Garzik /* Exception might have happend after ->error_handler 731c6fd2807SJeff Garzik * recovered the port but before this point. Repeat 732c6fd2807SJeff Garzik * EH in such case. 733c6fd2807SJeff Garzik */ 734c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 735c6fd2807SJeff Garzik 736c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_EH_PENDING) { 737a1e10f7eSTejun Heo if (--ap->eh_tries) { 738c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 739c6fd2807SJeff Garzik goto repeat; 740c6fd2807SJeff Garzik } 741c6fd2807SJeff Garzik ata_port_printk(ap, KERN_ERR, "EH pending after %d " 742a1e10f7eSTejun Heo "tries, giving up\n", ATA_EH_MAX_TRIES); 743914616a3STejun Heo ap->pflags &= ~ATA_PFLAG_EH_PENDING; 744c6fd2807SJeff Garzik } 745c6fd2807SJeff Garzik 746c6fd2807SJeff Garzik /* this run is complete, make sure EH info is clear */ 7471eca4365STejun Heo ata_for_each_link(link, ap, HOST_FIRST) 748cf1b86c8STejun Heo memset(&link->eh_info, 0, sizeof(link->eh_info)); 749c6fd2807SJeff Garzik 750c6fd2807SJeff Garzik /* Clear host_eh_scheduled while holding ap->lock such 751c6fd2807SJeff Garzik * that if exception occurs after this point but 752c6fd2807SJeff Garzik * before EH completion, SCSI midlayer will 753c6fd2807SJeff Garzik * re-initiate EH. 754c6fd2807SJeff Garzik */ 755c6fd2807SJeff Garzik host->host_eh_scheduled = 0; 756c6fd2807SJeff Garzik 757c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 758*c0c362b6STejun Heo ata_eh_release(ap); 759c6fd2807SJeff Garzik } else { 7609af5c9c9STejun Heo WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL); 761c6fd2807SJeff Garzik ap->ops->eng_timeout(ap); 762c6fd2807SJeff Garzik } 763c6fd2807SJeff Garzik 764c6fd2807SJeff Garzik /* finish or retry handled scmd's and clean up */ 765c6fd2807SJeff Garzik WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q)); 766c6fd2807SJeff Garzik 767c6fd2807SJeff Garzik scsi_eh_flush_done_q(&ap->eh_done_q); 768c6fd2807SJeff Garzik 769c6fd2807SJeff Garzik /* clean up */ 770c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 771c6fd2807SJeff Garzik 772c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_LOADING) 773c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_LOADING; 774c6fd2807SJeff Garzik else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) 775ad72cf98STejun Heo schedule_delayed_work(&ap->hotplug_task, 0); 776c6fd2807SJeff Garzik 777c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_RECOVERED) 778c6fd2807SJeff Garzik ata_port_printk(ap, KERN_INFO, "EH complete\n"); 779c6fd2807SJeff Garzik 780c6fd2807SJeff Garzik ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED); 781c6fd2807SJeff Garzik 782c6fd2807SJeff Garzik /* tell wait_eh that we're done */ 783c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS; 784c6fd2807SJeff Garzik wake_up_all(&ap->eh_wait_q); 785c6fd2807SJeff Garzik 786c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 787c6fd2807SJeff Garzik 788c6fd2807SJeff Garzik DPRINTK("EXIT\n"); 789c6fd2807SJeff Garzik } 790c6fd2807SJeff Garzik 791c6fd2807SJeff Garzik /** 792c6fd2807SJeff Garzik * ata_port_wait_eh - Wait for the currently pending EH to complete 793c6fd2807SJeff Garzik * @ap: Port to wait EH for 794c6fd2807SJeff Garzik * 795c6fd2807SJeff Garzik * Wait until the currently pending EH is complete. 796c6fd2807SJeff Garzik * 797c6fd2807SJeff Garzik * LOCKING: 798c6fd2807SJeff Garzik * Kernel thread context (may sleep). 799c6fd2807SJeff Garzik */ 800c6fd2807SJeff Garzik void ata_port_wait_eh(struct ata_port *ap) 801c6fd2807SJeff Garzik { 802c6fd2807SJeff Garzik unsigned long flags; 803c6fd2807SJeff Garzik DEFINE_WAIT(wait); 804c6fd2807SJeff Garzik 805c6fd2807SJeff Garzik retry: 806c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 807c6fd2807SJeff Garzik 808c6fd2807SJeff Garzik while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) { 809c6fd2807SJeff Garzik prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE); 810c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 811c6fd2807SJeff Garzik schedule(); 812c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 813c6fd2807SJeff Garzik } 814c6fd2807SJeff Garzik finish_wait(&ap->eh_wait_q, &wait); 815c6fd2807SJeff Garzik 816c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 817c6fd2807SJeff Garzik 818c6fd2807SJeff Garzik /* make sure SCSI EH is complete */ 819cca3974eSJeff Garzik if (scsi_host_in_recovery(ap->scsi_host)) { 82097750cebSTejun Heo ata_msleep(ap, 10); 821c6fd2807SJeff Garzik goto retry; 822c6fd2807SJeff Garzik } 823c6fd2807SJeff Garzik } 824c6fd2807SJeff Garzik 8255ddf24c5STejun Heo static int ata_eh_nr_in_flight(struct ata_port *ap) 8265ddf24c5STejun Heo { 8275ddf24c5STejun Heo unsigned int tag; 8285ddf24c5STejun Heo int nr = 0; 8295ddf24c5STejun Heo 8305ddf24c5STejun Heo /* count only non-internal commands */ 8315ddf24c5STejun Heo for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) 8325ddf24c5STejun Heo if (ata_qc_from_tag(ap, tag)) 8335ddf24c5STejun Heo nr++; 8345ddf24c5STejun Heo 8355ddf24c5STejun Heo return nr; 8365ddf24c5STejun Heo } 8375ddf24c5STejun Heo 8385ddf24c5STejun Heo void ata_eh_fastdrain_timerfn(unsigned long arg) 8395ddf24c5STejun Heo { 8405ddf24c5STejun Heo struct ata_port *ap = (void *)arg; 8415ddf24c5STejun Heo unsigned long flags; 8425ddf24c5STejun Heo int cnt; 8435ddf24c5STejun Heo 8445ddf24c5STejun Heo spin_lock_irqsave(ap->lock, flags); 8455ddf24c5STejun Heo 8465ddf24c5STejun Heo cnt = ata_eh_nr_in_flight(ap); 8475ddf24c5STejun Heo 8485ddf24c5STejun Heo /* are we done? */ 8495ddf24c5STejun Heo if (!cnt) 8505ddf24c5STejun Heo goto out_unlock; 8515ddf24c5STejun Heo 8525ddf24c5STejun Heo if (cnt == ap->fastdrain_cnt) { 8535ddf24c5STejun Heo unsigned int tag; 8545ddf24c5STejun Heo 8555ddf24c5STejun Heo /* No progress during the last interval, tag all 8565ddf24c5STejun Heo * in-flight qcs as timed out and freeze the port. 8575ddf24c5STejun Heo */ 8585ddf24c5STejun Heo for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) { 8595ddf24c5STejun Heo struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); 8605ddf24c5STejun Heo if (qc) 8615ddf24c5STejun Heo qc->err_mask |= AC_ERR_TIMEOUT; 8625ddf24c5STejun Heo } 8635ddf24c5STejun Heo 8645ddf24c5STejun Heo ata_port_freeze(ap); 8655ddf24c5STejun Heo } else { 8665ddf24c5STejun Heo /* some qcs have finished, give it another chance */ 8675ddf24c5STejun Heo ap->fastdrain_cnt = cnt; 8685ddf24c5STejun Heo ap->fastdrain_timer.expires = 869341c2c95STejun Heo ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); 8705ddf24c5STejun Heo add_timer(&ap->fastdrain_timer); 8715ddf24c5STejun Heo } 8725ddf24c5STejun Heo 8735ddf24c5STejun Heo out_unlock: 8745ddf24c5STejun Heo spin_unlock_irqrestore(ap->lock, flags); 8755ddf24c5STejun Heo } 8765ddf24c5STejun Heo 8775ddf24c5STejun Heo /** 8785ddf24c5STejun Heo * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain 8795ddf24c5STejun Heo * @ap: target ATA port 8805ddf24c5STejun Heo * @fastdrain: activate fast drain 8815ddf24c5STejun Heo * 8825ddf24c5STejun Heo * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain 8835ddf24c5STejun Heo * is non-zero and EH wasn't pending before. Fast drain ensures 8845ddf24c5STejun Heo * that EH kicks in in timely manner. 8855ddf24c5STejun Heo * 8865ddf24c5STejun Heo * LOCKING: 8875ddf24c5STejun Heo * spin_lock_irqsave(host lock) 8885ddf24c5STejun Heo */ 8895ddf24c5STejun Heo static void ata_eh_set_pending(struct ata_port *ap, int fastdrain) 8905ddf24c5STejun Heo { 8915ddf24c5STejun Heo int cnt; 8925ddf24c5STejun Heo 8935ddf24c5STejun Heo /* already scheduled? */ 8945ddf24c5STejun Heo if (ap->pflags & ATA_PFLAG_EH_PENDING) 8955ddf24c5STejun Heo return; 8965ddf24c5STejun Heo 8975ddf24c5STejun Heo ap->pflags |= ATA_PFLAG_EH_PENDING; 8985ddf24c5STejun Heo 8995ddf24c5STejun Heo if (!fastdrain) 9005ddf24c5STejun Heo return; 9015ddf24c5STejun Heo 9025ddf24c5STejun Heo /* do we have in-flight qcs? */ 9035ddf24c5STejun Heo cnt = ata_eh_nr_in_flight(ap); 9045ddf24c5STejun Heo if (!cnt) 9055ddf24c5STejun Heo return; 9065ddf24c5STejun Heo 9075ddf24c5STejun Heo /* activate fast drain */ 9085ddf24c5STejun Heo ap->fastdrain_cnt = cnt; 909341c2c95STejun Heo ap->fastdrain_timer.expires = 910341c2c95STejun Heo ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); 9115ddf24c5STejun Heo add_timer(&ap->fastdrain_timer); 9125ddf24c5STejun Heo } 9135ddf24c5STejun Heo 914c6fd2807SJeff Garzik /** 915c6fd2807SJeff Garzik * ata_qc_schedule_eh - schedule qc for error handling 916c6fd2807SJeff Garzik * @qc: command to schedule error handling for 917c6fd2807SJeff Garzik * 918c6fd2807SJeff Garzik * Schedule error handling for @qc. EH will kick in as soon as 919c6fd2807SJeff Garzik * other commands are drained. 920c6fd2807SJeff Garzik * 921c6fd2807SJeff Garzik * LOCKING: 922cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 923c6fd2807SJeff Garzik */ 924c6fd2807SJeff Garzik void ata_qc_schedule_eh(struct ata_queued_cmd *qc) 925c6fd2807SJeff Garzik { 926c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 927fa41efdaSTejun Heo struct request_queue *q = qc->scsicmd->device->request_queue; 928fa41efdaSTejun Heo unsigned long flags; 929c6fd2807SJeff Garzik 930c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 931c6fd2807SJeff Garzik 932c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 9335ddf24c5STejun Heo ata_eh_set_pending(ap, 1); 934c6fd2807SJeff Garzik 935c6fd2807SJeff Garzik /* The following will fail if timeout has already expired. 936c6fd2807SJeff Garzik * ata_scsi_error() takes care of such scmds on EH entry. 937c6fd2807SJeff Garzik * Note that ATA_QCFLAG_FAILED is unconditionally set after 938c6fd2807SJeff Garzik * this function completes. 939c6fd2807SJeff Garzik */ 940fa41efdaSTejun Heo spin_lock_irqsave(q->queue_lock, flags); 941242f9dcbSJens Axboe blk_abort_request(qc->scsicmd->request); 942fa41efdaSTejun Heo spin_unlock_irqrestore(q->queue_lock, flags); 943c6fd2807SJeff Garzik } 944c6fd2807SJeff Garzik 945c6fd2807SJeff Garzik /** 946c6fd2807SJeff Garzik * ata_port_schedule_eh - schedule error handling without a qc 947c6fd2807SJeff Garzik * @ap: ATA port to schedule EH for 948c6fd2807SJeff Garzik * 949c6fd2807SJeff Garzik * Schedule error handling for @ap. EH will kick in as soon as 950c6fd2807SJeff Garzik * all commands are drained. 951c6fd2807SJeff Garzik * 952c6fd2807SJeff Garzik * LOCKING: 953cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 954c6fd2807SJeff Garzik */ 955c6fd2807SJeff Garzik void ata_port_schedule_eh(struct ata_port *ap) 956c6fd2807SJeff Garzik { 957c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 958c6fd2807SJeff Garzik 959f4d6d004STejun Heo if (ap->pflags & ATA_PFLAG_INITIALIZING) 960f4d6d004STejun Heo return; 961f4d6d004STejun Heo 9625ddf24c5STejun Heo ata_eh_set_pending(ap, 1); 963cca3974eSJeff Garzik scsi_schedule_eh(ap->scsi_host); 964c6fd2807SJeff Garzik 965c6fd2807SJeff Garzik DPRINTK("port EH scheduled\n"); 966c6fd2807SJeff Garzik } 967c6fd2807SJeff Garzik 968dbd82616STejun Heo static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link) 969c6fd2807SJeff Garzik { 970c6fd2807SJeff Garzik int tag, nr_aborted = 0; 971c6fd2807SJeff Garzik 972c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 973c6fd2807SJeff Garzik 9745ddf24c5STejun Heo /* we're gonna abort all commands, no need for fast drain */ 9755ddf24c5STejun Heo ata_eh_set_pending(ap, 0); 9765ddf24c5STejun Heo 977c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 978c6fd2807SJeff Garzik struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); 979c6fd2807SJeff Garzik 980dbd82616STejun Heo if (qc && (!link || qc->dev->link == link)) { 981c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 982c6fd2807SJeff Garzik ata_qc_complete(qc); 983c6fd2807SJeff Garzik nr_aborted++; 984c6fd2807SJeff Garzik } 985c6fd2807SJeff Garzik } 986c6fd2807SJeff Garzik 987c6fd2807SJeff Garzik if (!nr_aborted) 988c6fd2807SJeff Garzik ata_port_schedule_eh(ap); 989c6fd2807SJeff Garzik 990c6fd2807SJeff Garzik return nr_aborted; 991c6fd2807SJeff Garzik } 992c6fd2807SJeff Garzik 993c6fd2807SJeff Garzik /** 994dbd82616STejun Heo * ata_link_abort - abort all qc's on the link 995dbd82616STejun Heo * @link: ATA link to abort qc's for 996dbd82616STejun Heo * 997dbd82616STejun Heo * Abort all active qc's active on @link and schedule EH. 998dbd82616STejun Heo * 999dbd82616STejun Heo * LOCKING: 1000dbd82616STejun Heo * spin_lock_irqsave(host lock) 1001dbd82616STejun Heo * 1002dbd82616STejun Heo * RETURNS: 1003dbd82616STejun Heo * Number of aborted qc's. 1004dbd82616STejun Heo */ 1005dbd82616STejun Heo int ata_link_abort(struct ata_link *link) 1006dbd82616STejun Heo { 1007dbd82616STejun Heo return ata_do_link_abort(link->ap, link); 1008dbd82616STejun Heo } 1009dbd82616STejun Heo 1010dbd82616STejun Heo /** 1011dbd82616STejun Heo * ata_port_abort - abort all qc's on the port 1012dbd82616STejun Heo * @ap: ATA port to abort qc's for 1013dbd82616STejun Heo * 1014dbd82616STejun Heo * Abort all active qc's of @ap and schedule EH. 1015dbd82616STejun Heo * 1016dbd82616STejun Heo * LOCKING: 1017dbd82616STejun Heo * spin_lock_irqsave(host_set lock) 1018dbd82616STejun Heo * 1019dbd82616STejun Heo * RETURNS: 1020dbd82616STejun Heo * Number of aborted qc's. 1021dbd82616STejun Heo */ 1022dbd82616STejun Heo int ata_port_abort(struct ata_port *ap) 1023dbd82616STejun Heo { 1024dbd82616STejun Heo return ata_do_link_abort(ap, NULL); 1025dbd82616STejun Heo } 1026dbd82616STejun Heo 1027dbd82616STejun Heo /** 1028c6fd2807SJeff Garzik * __ata_port_freeze - freeze port 1029c6fd2807SJeff Garzik * @ap: ATA port to freeze 1030c6fd2807SJeff Garzik * 1031c6fd2807SJeff Garzik * This function is called when HSM violation or some other 1032c6fd2807SJeff Garzik * condition disrupts normal operation of the port. Frozen port 1033c6fd2807SJeff Garzik * is not allowed to perform any operation until the port is 1034c6fd2807SJeff Garzik * thawed, which usually follows a successful reset. 1035c6fd2807SJeff Garzik * 1036c6fd2807SJeff Garzik * ap->ops->freeze() callback can be used for freezing the port 1037c6fd2807SJeff Garzik * hardware-wise (e.g. mask interrupt and stop DMA engine). If a 1038c6fd2807SJeff Garzik * port cannot be frozen hardware-wise, the interrupt handler 1039c6fd2807SJeff Garzik * must ack and clear interrupts unconditionally while the port 1040c6fd2807SJeff Garzik * is frozen. 1041c6fd2807SJeff Garzik * 1042c6fd2807SJeff Garzik * LOCKING: 1043cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 1044c6fd2807SJeff Garzik */ 1045c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap) 1046c6fd2807SJeff Garzik { 1047c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 1048c6fd2807SJeff Garzik 1049c6fd2807SJeff Garzik if (ap->ops->freeze) 1050c6fd2807SJeff Garzik ap->ops->freeze(ap); 1051c6fd2807SJeff Garzik 1052c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_FROZEN; 1053c6fd2807SJeff Garzik 105444877b4eSTejun Heo DPRINTK("ata%u port frozen\n", ap->print_id); 1055c6fd2807SJeff Garzik } 1056c6fd2807SJeff Garzik 1057c6fd2807SJeff Garzik /** 1058c6fd2807SJeff Garzik * ata_port_freeze - abort & freeze port 1059c6fd2807SJeff Garzik * @ap: ATA port to freeze 1060c6fd2807SJeff Garzik * 106154c38444SJeff Garzik * Abort and freeze @ap. The freeze operation must be called 106254c38444SJeff Garzik * first, because some hardware requires special operations 106354c38444SJeff Garzik * before the taskfile registers are accessible. 1064c6fd2807SJeff Garzik * 1065c6fd2807SJeff Garzik * LOCKING: 1066cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 1067c6fd2807SJeff Garzik * 1068c6fd2807SJeff Garzik * RETURNS: 1069c6fd2807SJeff Garzik * Number of aborted commands. 1070c6fd2807SJeff Garzik */ 1071c6fd2807SJeff Garzik int ata_port_freeze(struct ata_port *ap) 1072c6fd2807SJeff Garzik { 1073c6fd2807SJeff Garzik int nr_aborted; 1074c6fd2807SJeff Garzik 1075c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 1076c6fd2807SJeff Garzik 1077c6fd2807SJeff Garzik __ata_port_freeze(ap); 107854c38444SJeff Garzik nr_aborted = ata_port_abort(ap); 1079c6fd2807SJeff Garzik 1080c6fd2807SJeff Garzik return nr_aborted; 1081c6fd2807SJeff Garzik } 1082c6fd2807SJeff Garzik 1083c6fd2807SJeff Garzik /** 10847d77b247STejun Heo * sata_async_notification - SATA async notification handler 10857d77b247STejun Heo * @ap: ATA port where async notification is received 10867d77b247STejun Heo * 10877d77b247STejun Heo * Handler to be called when async notification via SDB FIS is 10887d77b247STejun Heo * received. This function schedules EH if necessary. 10897d77b247STejun Heo * 10907d77b247STejun Heo * LOCKING: 10917d77b247STejun Heo * spin_lock_irqsave(host lock) 10927d77b247STejun Heo * 10937d77b247STejun Heo * RETURNS: 10947d77b247STejun Heo * 1 if EH is scheduled, 0 otherwise. 10957d77b247STejun Heo */ 10967d77b247STejun Heo int sata_async_notification(struct ata_port *ap) 10977d77b247STejun Heo { 10987d77b247STejun Heo u32 sntf; 10997d77b247STejun Heo int rc; 11007d77b247STejun Heo 11017d77b247STejun Heo if (!(ap->flags & ATA_FLAG_AN)) 11027d77b247STejun Heo return 0; 11037d77b247STejun Heo 11047d77b247STejun Heo rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf); 11057d77b247STejun Heo if (rc == 0) 11067d77b247STejun Heo sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf); 11077d77b247STejun Heo 1108071f44b1STejun Heo if (!sata_pmp_attached(ap) || rc) { 11097d77b247STejun Heo /* PMP is not attached or SNTF is not available */ 1110071f44b1STejun Heo if (!sata_pmp_attached(ap)) { 11117d77b247STejun Heo /* PMP is not attached. Check whether ATAPI 11127d77b247STejun Heo * AN is configured. If so, notify media 11137d77b247STejun Heo * change. 11147d77b247STejun Heo */ 11157d77b247STejun Heo struct ata_device *dev = ap->link.device; 11167d77b247STejun Heo 11177d77b247STejun Heo if ((dev->class == ATA_DEV_ATAPI) && 11187d77b247STejun Heo (dev->flags & ATA_DFLAG_AN)) 11197d77b247STejun Heo ata_scsi_media_change_notify(dev); 11207d77b247STejun Heo return 0; 11217d77b247STejun Heo } else { 11227d77b247STejun Heo /* PMP is attached but SNTF is not available. 11237d77b247STejun Heo * ATAPI async media change notification is 11247d77b247STejun Heo * not used. The PMP must be reporting PHY 11257d77b247STejun Heo * status change, schedule EH. 11267d77b247STejun Heo */ 11277d77b247STejun Heo ata_port_schedule_eh(ap); 11287d77b247STejun Heo return 1; 11297d77b247STejun Heo } 11307d77b247STejun Heo } else { 11317d77b247STejun Heo /* PMP is attached and SNTF is available */ 11327d77b247STejun Heo struct ata_link *link; 11337d77b247STejun Heo 11347d77b247STejun Heo /* check and notify ATAPI AN */ 11351eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 11367d77b247STejun Heo if (!(sntf & (1 << link->pmp))) 11377d77b247STejun Heo continue; 11387d77b247STejun Heo 11397d77b247STejun Heo if ((link->device->class == ATA_DEV_ATAPI) && 11407d77b247STejun Heo (link->device->flags & ATA_DFLAG_AN)) 11417d77b247STejun Heo ata_scsi_media_change_notify(link->device); 11427d77b247STejun Heo } 11437d77b247STejun Heo 11447d77b247STejun Heo /* If PMP is reporting that PHY status of some 11457d77b247STejun Heo * downstream ports has changed, schedule EH. 11467d77b247STejun Heo */ 11477d77b247STejun Heo if (sntf & (1 << SATA_PMP_CTRL_PORT)) { 11487d77b247STejun Heo ata_port_schedule_eh(ap); 11497d77b247STejun Heo return 1; 11507d77b247STejun Heo } 11517d77b247STejun Heo 11527d77b247STejun Heo return 0; 11537d77b247STejun Heo } 11547d77b247STejun Heo } 11557d77b247STejun Heo 11567d77b247STejun Heo /** 1157c6fd2807SJeff Garzik * ata_eh_freeze_port - EH helper to freeze port 1158c6fd2807SJeff Garzik * @ap: ATA port to freeze 1159c6fd2807SJeff Garzik * 1160c6fd2807SJeff Garzik * Freeze @ap. 1161c6fd2807SJeff Garzik * 1162c6fd2807SJeff Garzik * LOCKING: 1163c6fd2807SJeff Garzik * None. 1164c6fd2807SJeff Garzik */ 1165c6fd2807SJeff Garzik void ata_eh_freeze_port(struct ata_port *ap) 1166c6fd2807SJeff Garzik { 1167c6fd2807SJeff Garzik unsigned long flags; 1168c6fd2807SJeff Garzik 1169c6fd2807SJeff Garzik if (!ap->ops->error_handler) 1170c6fd2807SJeff Garzik return; 1171c6fd2807SJeff Garzik 1172c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1173c6fd2807SJeff Garzik __ata_port_freeze(ap); 1174c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1175c6fd2807SJeff Garzik } 1176c6fd2807SJeff Garzik 1177c6fd2807SJeff Garzik /** 1178c6fd2807SJeff Garzik * ata_port_thaw_port - EH helper to thaw port 1179c6fd2807SJeff Garzik * @ap: ATA port to thaw 1180c6fd2807SJeff Garzik * 1181c6fd2807SJeff Garzik * Thaw frozen port @ap. 1182c6fd2807SJeff Garzik * 1183c6fd2807SJeff Garzik * LOCKING: 1184c6fd2807SJeff Garzik * None. 1185c6fd2807SJeff Garzik */ 1186c6fd2807SJeff Garzik void ata_eh_thaw_port(struct ata_port *ap) 1187c6fd2807SJeff Garzik { 1188c6fd2807SJeff Garzik unsigned long flags; 1189c6fd2807SJeff Garzik 1190c6fd2807SJeff Garzik if (!ap->ops->error_handler) 1191c6fd2807SJeff Garzik return; 1192c6fd2807SJeff Garzik 1193c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1194c6fd2807SJeff Garzik 1195c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_FROZEN; 1196c6fd2807SJeff Garzik 1197c6fd2807SJeff Garzik if (ap->ops->thaw) 1198c6fd2807SJeff Garzik ap->ops->thaw(ap); 1199c6fd2807SJeff Garzik 1200c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1201c6fd2807SJeff Garzik 120244877b4eSTejun Heo DPRINTK("ata%u port thawed\n", ap->print_id); 1203c6fd2807SJeff Garzik } 1204c6fd2807SJeff Garzik 1205c6fd2807SJeff Garzik static void ata_eh_scsidone(struct scsi_cmnd *scmd) 1206c6fd2807SJeff Garzik { 1207c6fd2807SJeff Garzik /* nada */ 1208c6fd2807SJeff Garzik } 1209c6fd2807SJeff Garzik 1210c6fd2807SJeff Garzik static void __ata_eh_qc_complete(struct ata_queued_cmd *qc) 1211c6fd2807SJeff Garzik { 1212c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 1213c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 1214c6fd2807SJeff Garzik unsigned long flags; 1215c6fd2807SJeff Garzik 1216c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1217c6fd2807SJeff Garzik qc->scsidone = ata_eh_scsidone; 1218c6fd2807SJeff Garzik __ata_qc_complete(qc); 1219c6fd2807SJeff Garzik WARN_ON(ata_tag_valid(qc->tag)); 1220c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1221c6fd2807SJeff Garzik 1222c6fd2807SJeff Garzik scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 1223c6fd2807SJeff Garzik } 1224c6fd2807SJeff Garzik 1225c6fd2807SJeff Garzik /** 1226c6fd2807SJeff Garzik * ata_eh_qc_complete - Complete an active ATA command from EH 1227c6fd2807SJeff Garzik * @qc: Command to complete 1228c6fd2807SJeff Garzik * 1229c6fd2807SJeff Garzik * Indicate to the mid and upper layers that an ATA command has 1230c6fd2807SJeff Garzik * completed. To be used from EH. 1231c6fd2807SJeff Garzik */ 1232c6fd2807SJeff Garzik void ata_eh_qc_complete(struct ata_queued_cmd *qc) 1233c6fd2807SJeff Garzik { 1234c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 1235c6fd2807SJeff Garzik scmd->retries = scmd->allowed; 1236c6fd2807SJeff Garzik __ata_eh_qc_complete(qc); 1237c6fd2807SJeff Garzik } 1238c6fd2807SJeff Garzik 1239c6fd2807SJeff Garzik /** 1240c6fd2807SJeff Garzik * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH 1241c6fd2807SJeff Garzik * @qc: Command to retry 1242c6fd2807SJeff Garzik * 1243c6fd2807SJeff Garzik * Indicate to the mid and upper layers that an ATA command 1244c6fd2807SJeff Garzik * should be retried. To be used from EH. 1245c6fd2807SJeff Garzik * 1246c6fd2807SJeff Garzik * SCSI midlayer limits the number of retries to scmd->allowed. 1247c6fd2807SJeff Garzik * scmd->retries is decremented for commands which get retried 1248c6fd2807SJeff Garzik * due to unrelated failures (qc->err_mask is zero). 1249c6fd2807SJeff Garzik */ 1250c6fd2807SJeff Garzik void ata_eh_qc_retry(struct ata_queued_cmd *qc) 1251c6fd2807SJeff Garzik { 1252c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 1253c6fd2807SJeff Garzik if (!qc->err_mask && scmd->retries) 1254c6fd2807SJeff Garzik scmd->retries--; 1255c6fd2807SJeff Garzik __ata_eh_qc_complete(qc); 1256c6fd2807SJeff Garzik } 1257c6fd2807SJeff Garzik 1258c6fd2807SJeff Garzik /** 1259678afac6STejun Heo * ata_dev_disable - disable ATA device 1260678afac6STejun Heo * @dev: ATA device to disable 1261678afac6STejun Heo * 1262678afac6STejun Heo * Disable @dev. 1263678afac6STejun Heo * 1264678afac6STejun Heo * Locking: 1265678afac6STejun Heo * EH context. 1266678afac6STejun Heo */ 1267678afac6STejun Heo void ata_dev_disable(struct ata_device *dev) 1268678afac6STejun Heo { 1269678afac6STejun Heo if (!ata_dev_enabled(dev)) 1270678afac6STejun Heo return; 1271678afac6STejun Heo 1272678afac6STejun Heo if (ata_msg_drv(dev->link->ap)) 1273678afac6STejun Heo ata_dev_printk(dev, KERN_WARNING, "disabled\n"); 1274678afac6STejun Heo ata_acpi_on_disable(dev); 1275678afac6STejun Heo ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET); 1276678afac6STejun Heo dev->class++; 127799cf610aSTejun Heo 127899cf610aSTejun Heo /* From now till the next successful probe, ering is used to 127999cf610aSTejun Heo * track probe failures. Clear accumulated device error info. 128099cf610aSTejun Heo */ 128199cf610aSTejun Heo ata_ering_clear(&dev->ering); 1282678afac6STejun Heo } 1283678afac6STejun Heo 1284678afac6STejun Heo /** 1285c6fd2807SJeff Garzik * ata_eh_detach_dev - detach ATA device 1286c6fd2807SJeff Garzik * @dev: ATA device to detach 1287c6fd2807SJeff Garzik * 1288c6fd2807SJeff Garzik * Detach @dev. 1289c6fd2807SJeff Garzik * 1290c6fd2807SJeff Garzik * LOCKING: 1291c6fd2807SJeff Garzik * None. 1292c6fd2807SJeff Garzik */ 1293fb7fd614STejun Heo void ata_eh_detach_dev(struct ata_device *dev) 1294c6fd2807SJeff Garzik { 1295f58229f8STejun Heo struct ata_link *link = dev->link; 1296f58229f8STejun Heo struct ata_port *ap = link->ap; 129790484ebfSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 1298c6fd2807SJeff Garzik unsigned long flags; 1299c6fd2807SJeff Garzik 1300c6fd2807SJeff Garzik ata_dev_disable(dev); 1301c6fd2807SJeff Garzik 1302c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1303c6fd2807SJeff Garzik 1304c6fd2807SJeff Garzik dev->flags &= ~ATA_DFLAG_DETACH; 1305c6fd2807SJeff Garzik 1306c6fd2807SJeff Garzik if (ata_scsi_offline_dev(dev)) { 1307c6fd2807SJeff Garzik dev->flags |= ATA_DFLAG_DETACHED; 1308c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 1309c6fd2807SJeff Garzik } 1310c6fd2807SJeff Garzik 131190484ebfSTejun Heo /* clear per-dev EH info */ 1312f58229f8STejun Heo ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK); 1313f58229f8STejun Heo ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK); 131490484ebfSTejun Heo ehc->saved_xfer_mode[dev->devno] = 0; 131590484ebfSTejun Heo ehc->saved_ncq_enabled &= ~(1 << dev->devno); 1316c6fd2807SJeff Garzik 1317c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1318c6fd2807SJeff Garzik } 1319c6fd2807SJeff Garzik 1320c6fd2807SJeff Garzik /** 1321c6fd2807SJeff Garzik * ata_eh_about_to_do - about to perform eh_action 1322955e57dfSTejun Heo * @link: target ATA link 1323c6fd2807SJeff Garzik * @dev: target ATA dev for per-dev action (can be NULL) 1324c6fd2807SJeff Garzik * @action: action about to be performed 1325c6fd2807SJeff Garzik * 1326c6fd2807SJeff Garzik * Called just before performing EH actions to clear related bits 1327955e57dfSTejun Heo * in @link->eh_info such that eh actions are not unnecessarily 1328955e57dfSTejun Heo * repeated. 1329c6fd2807SJeff Garzik * 1330c6fd2807SJeff Garzik * LOCKING: 1331c6fd2807SJeff Garzik * None. 1332c6fd2807SJeff Garzik */ 1333fb7fd614STejun Heo void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev, 1334c6fd2807SJeff Garzik unsigned int action) 1335c6fd2807SJeff Garzik { 1336955e57dfSTejun Heo struct ata_port *ap = link->ap; 1337955e57dfSTejun Heo struct ata_eh_info *ehi = &link->eh_info; 1338955e57dfSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 1339c6fd2807SJeff Garzik unsigned long flags; 1340c6fd2807SJeff Garzik 1341c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1342c6fd2807SJeff Garzik 1343955e57dfSTejun Heo ata_eh_clear_action(link, dev, ehi, action); 1344c6fd2807SJeff Garzik 1345a568d1d2STejun Heo /* About to take EH action, set RECOVERED. Ignore actions on 1346a568d1d2STejun Heo * slave links as master will do them again. 1347a568d1d2STejun Heo */ 1348a568d1d2STejun Heo if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link) 1349c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_RECOVERED; 1350c6fd2807SJeff Garzik 1351c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1352c6fd2807SJeff Garzik } 1353c6fd2807SJeff Garzik 1354c6fd2807SJeff Garzik /** 1355c6fd2807SJeff Garzik * ata_eh_done - EH action complete 1356c6fd2807SJeff Garzik * @ap: target ATA port 1357c6fd2807SJeff Garzik * @dev: target ATA dev for per-dev action (can be NULL) 1358c6fd2807SJeff Garzik * @action: action just completed 1359c6fd2807SJeff Garzik * 1360c6fd2807SJeff Garzik * Called right after performing EH actions to clear related bits 1361955e57dfSTejun Heo * in @link->eh_context. 1362c6fd2807SJeff Garzik * 1363c6fd2807SJeff Garzik * LOCKING: 1364c6fd2807SJeff Garzik * None. 1365c6fd2807SJeff Garzik */ 1366fb7fd614STejun Heo void ata_eh_done(struct ata_link *link, struct ata_device *dev, 1367c6fd2807SJeff Garzik unsigned int action) 1368c6fd2807SJeff Garzik { 1369955e57dfSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 13709af5c9c9STejun Heo 1371955e57dfSTejun Heo ata_eh_clear_action(link, dev, &ehc->i, action); 1372c6fd2807SJeff Garzik } 1373c6fd2807SJeff Garzik 1374c6fd2807SJeff Garzik /** 1375c6fd2807SJeff Garzik * ata_err_string - convert err_mask to descriptive string 1376c6fd2807SJeff Garzik * @err_mask: error mask to convert to string 1377c6fd2807SJeff Garzik * 1378c6fd2807SJeff Garzik * Convert @err_mask to descriptive string. Errors are 1379c6fd2807SJeff Garzik * prioritized according to severity and only the most severe 1380c6fd2807SJeff Garzik * error is reported. 1381c6fd2807SJeff Garzik * 1382c6fd2807SJeff Garzik * LOCKING: 1383c6fd2807SJeff Garzik * None. 1384c6fd2807SJeff Garzik * 1385c6fd2807SJeff Garzik * RETURNS: 1386c6fd2807SJeff Garzik * Descriptive string for @err_mask 1387c6fd2807SJeff Garzik */ 1388c6fd2807SJeff Garzik static const char *ata_err_string(unsigned int err_mask) 1389c6fd2807SJeff Garzik { 1390c6fd2807SJeff Garzik if (err_mask & AC_ERR_HOST_BUS) 1391c6fd2807SJeff Garzik return "host bus error"; 1392c6fd2807SJeff Garzik if (err_mask & AC_ERR_ATA_BUS) 1393c6fd2807SJeff Garzik return "ATA bus error"; 1394c6fd2807SJeff Garzik if (err_mask & AC_ERR_TIMEOUT) 1395c6fd2807SJeff Garzik return "timeout"; 1396c6fd2807SJeff Garzik if (err_mask & AC_ERR_HSM) 1397c6fd2807SJeff Garzik return "HSM violation"; 1398c6fd2807SJeff Garzik if (err_mask & AC_ERR_SYSTEM) 1399c6fd2807SJeff Garzik return "internal error"; 1400c6fd2807SJeff Garzik if (err_mask & AC_ERR_MEDIA) 1401c6fd2807SJeff Garzik return "media error"; 1402c6fd2807SJeff Garzik if (err_mask & AC_ERR_INVALID) 1403c6fd2807SJeff Garzik return "invalid argument"; 1404c6fd2807SJeff Garzik if (err_mask & AC_ERR_DEV) 1405c6fd2807SJeff Garzik return "device error"; 1406c6fd2807SJeff Garzik return "unknown error"; 1407c6fd2807SJeff Garzik } 1408c6fd2807SJeff Garzik 1409c6fd2807SJeff Garzik /** 1410c6fd2807SJeff Garzik * ata_read_log_page - read a specific log page 1411c6fd2807SJeff Garzik * @dev: target device 1412c6fd2807SJeff Garzik * @page: page to read 1413c6fd2807SJeff Garzik * @buf: buffer to store read page 1414c6fd2807SJeff Garzik * @sectors: number of sectors to read 1415c6fd2807SJeff Garzik * 1416c6fd2807SJeff Garzik * Read log page using READ_LOG_EXT command. 1417c6fd2807SJeff Garzik * 1418c6fd2807SJeff Garzik * LOCKING: 1419c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1420c6fd2807SJeff Garzik * 1421c6fd2807SJeff Garzik * RETURNS: 1422c6fd2807SJeff Garzik * 0 on success, AC_ERR_* mask otherwise. 1423c6fd2807SJeff Garzik */ 1424c6fd2807SJeff Garzik static unsigned int ata_read_log_page(struct ata_device *dev, 1425c6fd2807SJeff Garzik u8 page, void *buf, unsigned int sectors) 1426c6fd2807SJeff Garzik { 1427c6fd2807SJeff Garzik struct ata_taskfile tf; 1428c6fd2807SJeff Garzik unsigned int err_mask; 1429c6fd2807SJeff Garzik 1430c6fd2807SJeff Garzik DPRINTK("read log page - page %d\n", page); 1431c6fd2807SJeff Garzik 1432c6fd2807SJeff Garzik ata_tf_init(dev, &tf); 1433c6fd2807SJeff Garzik tf.command = ATA_CMD_READ_LOG_EXT; 1434c6fd2807SJeff Garzik tf.lbal = page; 1435c6fd2807SJeff Garzik tf.nsect = sectors; 1436c6fd2807SJeff Garzik tf.hob_nsect = sectors >> 8; 1437c6fd2807SJeff Garzik tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE; 1438c6fd2807SJeff Garzik tf.protocol = ATA_PROT_PIO; 1439c6fd2807SJeff Garzik 1440c6fd2807SJeff Garzik err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, 14412b789108STejun Heo buf, sectors * ATA_SECT_SIZE, 0); 1442c6fd2807SJeff Garzik 1443c6fd2807SJeff Garzik DPRINTK("EXIT, err_mask=%x\n", err_mask); 1444c6fd2807SJeff Garzik return err_mask; 1445c6fd2807SJeff Garzik } 1446c6fd2807SJeff Garzik 1447c6fd2807SJeff Garzik /** 1448c6fd2807SJeff Garzik * ata_eh_read_log_10h - Read log page 10h for NCQ error details 1449c6fd2807SJeff Garzik * @dev: Device to read log page 10h from 1450c6fd2807SJeff Garzik * @tag: Resulting tag of the failed command 1451c6fd2807SJeff Garzik * @tf: Resulting taskfile registers of the failed command 1452c6fd2807SJeff Garzik * 1453c6fd2807SJeff Garzik * Read log page 10h to obtain NCQ error details and clear error 1454c6fd2807SJeff Garzik * condition. 1455c6fd2807SJeff Garzik * 1456c6fd2807SJeff Garzik * LOCKING: 1457c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1458c6fd2807SJeff Garzik * 1459c6fd2807SJeff Garzik * RETURNS: 1460c6fd2807SJeff Garzik * 0 on success, -errno otherwise. 1461c6fd2807SJeff Garzik */ 1462c6fd2807SJeff Garzik static int ata_eh_read_log_10h(struct ata_device *dev, 1463c6fd2807SJeff Garzik int *tag, struct ata_taskfile *tf) 1464c6fd2807SJeff Garzik { 14659af5c9c9STejun Heo u8 *buf = dev->link->ap->sector_buf; 1466c6fd2807SJeff Garzik unsigned int err_mask; 1467c6fd2807SJeff Garzik u8 csum; 1468c6fd2807SJeff Garzik int i; 1469c6fd2807SJeff Garzik 1470c6fd2807SJeff Garzik err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1); 1471c6fd2807SJeff Garzik if (err_mask) 1472c6fd2807SJeff Garzik return -EIO; 1473c6fd2807SJeff Garzik 1474c6fd2807SJeff Garzik csum = 0; 1475c6fd2807SJeff Garzik for (i = 0; i < ATA_SECT_SIZE; i++) 1476c6fd2807SJeff Garzik csum += buf[i]; 1477c6fd2807SJeff Garzik if (csum) 1478c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_WARNING, 1479c6fd2807SJeff Garzik "invalid checksum 0x%x on log page 10h\n", csum); 1480c6fd2807SJeff Garzik 1481c6fd2807SJeff Garzik if (buf[0] & 0x80) 1482c6fd2807SJeff Garzik return -ENOENT; 1483c6fd2807SJeff Garzik 1484c6fd2807SJeff Garzik *tag = buf[0] & 0x1f; 1485c6fd2807SJeff Garzik 1486c6fd2807SJeff Garzik tf->command = buf[2]; 1487c6fd2807SJeff Garzik tf->feature = buf[3]; 1488c6fd2807SJeff Garzik tf->lbal = buf[4]; 1489c6fd2807SJeff Garzik tf->lbam = buf[5]; 1490c6fd2807SJeff Garzik tf->lbah = buf[6]; 1491c6fd2807SJeff Garzik tf->device = buf[7]; 1492c6fd2807SJeff Garzik tf->hob_lbal = buf[8]; 1493c6fd2807SJeff Garzik tf->hob_lbam = buf[9]; 1494c6fd2807SJeff Garzik tf->hob_lbah = buf[10]; 1495c6fd2807SJeff Garzik tf->nsect = buf[12]; 1496c6fd2807SJeff Garzik tf->hob_nsect = buf[13]; 1497c6fd2807SJeff Garzik 1498c6fd2807SJeff Garzik return 0; 1499c6fd2807SJeff Garzik } 1500c6fd2807SJeff Garzik 1501c6fd2807SJeff Garzik /** 150211fc33daSTejun Heo * atapi_eh_tur - perform ATAPI TEST_UNIT_READY 150311fc33daSTejun Heo * @dev: target ATAPI device 150411fc33daSTejun Heo * @r_sense_key: out parameter for sense_key 150511fc33daSTejun Heo * 150611fc33daSTejun Heo * Perform ATAPI TEST_UNIT_READY. 150711fc33daSTejun Heo * 150811fc33daSTejun Heo * LOCKING: 150911fc33daSTejun Heo * EH context (may sleep). 151011fc33daSTejun Heo * 151111fc33daSTejun Heo * RETURNS: 151211fc33daSTejun Heo * 0 on success, AC_ERR_* mask on failure. 151311fc33daSTejun Heo */ 151411fc33daSTejun Heo static unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key) 151511fc33daSTejun Heo { 151611fc33daSTejun Heo u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 }; 151711fc33daSTejun Heo struct ata_taskfile tf; 151811fc33daSTejun Heo unsigned int err_mask; 151911fc33daSTejun Heo 152011fc33daSTejun Heo ata_tf_init(dev, &tf); 152111fc33daSTejun Heo 152211fc33daSTejun Heo tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 152311fc33daSTejun Heo tf.command = ATA_CMD_PACKET; 152411fc33daSTejun Heo tf.protocol = ATAPI_PROT_NODATA; 152511fc33daSTejun Heo 152611fc33daSTejun Heo err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0); 152711fc33daSTejun Heo if (err_mask == AC_ERR_DEV) 152811fc33daSTejun Heo *r_sense_key = tf.feature >> 4; 152911fc33daSTejun Heo return err_mask; 153011fc33daSTejun Heo } 153111fc33daSTejun Heo 153211fc33daSTejun Heo /** 1533c6fd2807SJeff Garzik * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE 1534c6fd2807SJeff Garzik * @dev: device to perform REQUEST_SENSE to 1535c6fd2807SJeff Garzik * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long) 15363eabddb8STejun Heo * @dfl_sense_key: default sense key to use 1537c6fd2807SJeff Garzik * 1538c6fd2807SJeff Garzik * Perform ATAPI REQUEST_SENSE after the device reported CHECK 1539c6fd2807SJeff Garzik * SENSE. This function is EH helper. 1540c6fd2807SJeff Garzik * 1541c6fd2807SJeff Garzik * LOCKING: 1542c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1543c6fd2807SJeff Garzik * 1544c6fd2807SJeff Garzik * RETURNS: 1545c6fd2807SJeff Garzik * 0 on success, AC_ERR_* mask on failure 1546c6fd2807SJeff Garzik */ 15473eabddb8STejun Heo static unsigned int atapi_eh_request_sense(struct ata_device *dev, 15483eabddb8STejun Heo u8 *sense_buf, u8 dfl_sense_key) 1549c6fd2807SJeff Garzik { 15503eabddb8STejun Heo u8 cdb[ATAPI_CDB_LEN] = 15513eabddb8STejun Heo { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 }; 15529af5c9c9STejun Heo struct ata_port *ap = dev->link->ap; 1553c6fd2807SJeff Garzik struct ata_taskfile tf; 1554c6fd2807SJeff Garzik 1555c6fd2807SJeff Garzik DPRINTK("ATAPI request sense\n"); 1556c6fd2807SJeff Garzik 1557c6fd2807SJeff Garzik /* FIXME: is this needed? */ 1558c6fd2807SJeff Garzik memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); 1559c6fd2807SJeff Garzik 156056287768SAlbert Lee /* initialize sense_buf with the error register, 156156287768SAlbert Lee * for the case where they are -not- overwritten 156256287768SAlbert Lee */ 1563c6fd2807SJeff Garzik sense_buf[0] = 0x70; 15643eabddb8STejun Heo sense_buf[2] = dfl_sense_key; 156556287768SAlbert Lee 156656287768SAlbert Lee /* some devices time out if garbage left in tf */ 156756287768SAlbert Lee ata_tf_init(dev, &tf); 1568c6fd2807SJeff Garzik 1569c6fd2807SJeff Garzik tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1570c6fd2807SJeff Garzik tf.command = ATA_CMD_PACKET; 1571c6fd2807SJeff Garzik 1572c6fd2807SJeff Garzik /* is it pointless to prefer PIO for "safety reasons"? */ 1573c6fd2807SJeff Garzik if (ap->flags & ATA_FLAG_PIO_DMA) { 15740dc36888STejun Heo tf.protocol = ATAPI_PROT_DMA; 1575c6fd2807SJeff Garzik tf.feature |= ATAPI_PKT_DMA; 1576c6fd2807SJeff Garzik } else { 15770dc36888STejun Heo tf.protocol = ATAPI_PROT_PIO; 1578f2dfc1a1STejun Heo tf.lbam = SCSI_SENSE_BUFFERSIZE; 1579f2dfc1a1STejun Heo tf.lbah = 0; 1580c6fd2807SJeff Garzik } 1581c6fd2807SJeff Garzik 1582c6fd2807SJeff Garzik return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE, 15832b789108STejun Heo sense_buf, SCSI_SENSE_BUFFERSIZE, 0); 1584c6fd2807SJeff Garzik } 1585c6fd2807SJeff Garzik 1586c6fd2807SJeff Garzik /** 1587c6fd2807SJeff Garzik * ata_eh_analyze_serror - analyze SError for a failed port 15880260731fSTejun Heo * @link: ATA link to analyze SError for 1589c6fd2807SJeff Garzik * 1590c6fd2807SJeff Garzik * Analyze SError if available and further determine cause of 1591c6fd2807SJeff Garzik * failure. 1592c6fd2807SJeff Garzik * 1593c6fd2807SJeff Garzik * LOCKING: 1594c6fd2807SJeff Garzik * None. 1595c6fd2807SJeff Garzik */ 15960260731fSTejun Heo static void ata_eh_analyze_serror(struct ata_link *link) 1597c6fd2807SJeff Garzik { 15980260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 1599c6fd2807SJeff Garzik u32 serror = ehc->i.serror; 1600c6fd2807SJeff Garzik unsigned int err_mask = 0, action = 0; 1601f9df58cbSTejun Heo u32 hotplug_mask; 1602c6fd2807SJeff Garzik 1603e0614db2STejun Heo if (serror & (SERR_PERSISTENT | SERR_DATA)) { 1604c6fd2807SJeff Garzik err_mask |= AC_ERR_ATA_BUS; 1605cf480626STejun Heo action |= ATA_EH_RESET; 1606c6fd2807SJeff Garzik } 1607c6fd2807SJeff Garzik if (serror & SERR_PROTOCOL) { 1608c6fd2807SJeff Garzik err_mask |= AC_ERR_HSM; 1609cf480626STejun Heo action |= ATA_EH_RESET; 1610c6fd2807SJeff Garzik } 1611c6fd2807SJeff Garzik if (serror & SERR_INTERNAL) { 1612c6fd2807SJeff Garzik err_mask |= AC_ERR_SYSTEM; 1613cf480626STejun Heo action |= ATA_EH_RESET; 1614c6fd2807SJeff Garzik } 1615f9df58cbSTejun Heo 1616f9df58cbSTejun Heo /* Determine whether a hotplug event has occurred. Both 1617f9df58cbSTejun Heo * SError.N/X are considered hotplug events for enabled or 1618f9df58cbSTejun Heo * host links. For disabled PMP links, only N bit is 1619f9df58cbSTejun Heo * considered as X bit is left at 1 for link plugging. 1620f9df58cbSTejun Heo */ 16216b7ae954STejun Heo if (link->lpm_policy != ATA_LPM_MAX_POWER) 16226b7ae954STejun Heo hotplug_mask = 0; /* hotplug doesn't work w/ LPM */ 16236b7ae954STejun Heo else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link)) 1624f9df58cbSTejun Heo hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG; 1625f9df58cbSTejun Heo else 1626f9df58cbSTejun Heo hotplug_mask = SERR_PHYRDY_CHG; 1627f9df58cbSTejun Heo 1628f9df58cbSTejun Heo if (serror & hotplug_mask) 1629c6fd2807SJeff Garzik ata_ehi_hotplugged(&ehc->i); 1630c6fd2807SJeff Garzik 1631c6fd2807SJeff Garzik ehc->i.err_mask |= err_mask; 1632c6fd2807SJeff Garzik ehc->i.action |= action; 1633c6fd2807SJeff Garzik } 1634c6fd2807SJeff Garzik 1635c6fd2807SJeff Garzik /** 1636c6fd2807SJeff Garzik * ata_eh_analyze_ncq_error - analyze NCQ error 16370260731fSTejun Heo * @link: ATA link to analyze NCQ error for 1638c6fd2807SJeff Garzik * 1639c6fd2807SJeff Garzik * Read log page 10h, determine the offending qc and acquire 1640c6fd2807SJeff Garzik * error status TF. For NCQ device errors, all LLDDs have to do 1641c6fd2807SJeff Garzik * is setting AC_ERR_DEV in ehi->err_mask. This function takes 1642c6fd2807SJeff Garzik * care of the rest. 1643c6fd2807SJeff Garzik * 1644c6fd2807SJeff Garzik * LOCKING: 1645c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1646c6fd2807SJeff Garzik */ 164710acf3b0SMark Lord void ata_eh_analyze_ncq_error(struct ata_link *link) 1648c6fd2807SJeff Garzik { 16490260731fSTejun Heo struct ata_port *ap = link->ap; 16500260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 16510260731fSTejun Heo struct ata_device *dev = link->device; 1652c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 1653c6fd2807SJeff Garzik struct ata_taskfile tf; 1654c6fd2807SJeff Garzik int tag, rc; 1655c6fd2807SJeff Garzik 1656c6fd2807SJeff Garzik /* if frozen, we can't do much */ 1657c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN) 1658c6fd2807SJeff Garzik return; 1659c6fd2807SJeff Garzik 1660c6fd2807SJeff Garzik /* is it NCQ device error? */ 16610260731fSTejun Heo if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV)) 1662c6fd2807SJeff Garzik return; 1663c6fd2807SJeff Garzik 1664c6fd2807SJeff Garzik /* has LLDD analyzed already? */ 1665c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1666c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, tag); 1667c6fd2807SJeff Garzik 1668c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) 1669c6fd2807SJeff Garzik continue; 1670c6fd2807SJeff Garzik 1671c6fd2807SJeff Garzik if (qc->err_mask) 1672c6fd2807SJeff Garzik return; 1673c6fd2807SJeff Garzik } 1674c6fd2807SJeff Garzik 1675c6fd2807SJeff Garzik /* okay, this error is ours */ 1676a09bf4cdSJeff Garzik memset(&tf, 0, sizeof(tf)); 1677c6fd2807SJeff Garzik rc = ata_eh_read_log_10h(dev, &tag, &tf); 1678c6fd2807SJeff Garzik if (rc) { 16790260731fSTejun Heo ata_link_printk(link, KERN_ERR, "failed to read log page 10h " 1680c6fd2807SJeff Garzik "(errno=%d)\n", rc); 1681c6fd2807SJeff Garzik return; 1682c6fd2807SJeff Garzik } 1683c6fd2807SJeff Garzik 16840260731fSTejun Heo if (!(link->sactive & (1 << tag))) { 16850260731fSTejun Heo ata_link_printk(link, KERN_ERR, "log page 10h reported " 1686c6fd2807SJeff Garzik "inactive tag %d\n", tag); 1687c6fd2807SJeff Garzik return; 1688c6fd2807SJeff Garzik } 1689c6fd2807SJeff Garzik 1690c6fd2807SJeff Garzik /* we've got the perpetrator, condemn it */ 1691c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, tag); 1692c6fd2807SJeff Garzik memcpy(&qc->result_tf, &tf, sizeof(tf)); 1693a6116c9eSMark Lord qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 16945335b729STejun Heo qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ; 1695c6fd2807SJeff Garzik ehc->i.err_mask &= ~AC_ERR_DEV; 1696c6fd2807SJeff Garzik } 1697c6fd2807SJeff Garzik 1698c6fd2807SJeff Garzik /** 1699c6fd2807SJeff Garzik * ata_eh_analyze_tf - analyze taskfile of a failed qc 1700c6fd2807SJeff Garzik * @qc: qc to analyze 1701c6fd2807SJeff Garzik * @tf: Taskfile registers to analyze 1702c6fd2807SJeff Garzik * 1703c6fd2807SJeff Garzik * Analyze taskfile of @qc and further determine cause of 1704c6fd2807SJeff Garzik * failure. This function also requests ATAPI sense data if 1705c6fd2807SJeff Garzik * avaliable. 1706c6fd2807SJeff Garzik * 1707c6fd2807SJeff Garzik * LOCKING: 1708c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1709c6fd2807SJeff Garzik * 1710c6fd2807SJeff Garzik * RETURNS: 1711c6fd2807SJeff Garzik * Determined recovery action 1712c6fd2807SJeff Garzik */ 1713c6fd2807SJeff Garzik static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, 1714c6fd2807SJeff Garzik const struct ata_taskfile *tf) 1715c6fd2807SJeff Garzik { 1716c6fd2807SJeff Garzik unsigned int tmp, action = 0; 1717c6fd2807SJeff Garzik u8 stat = tf->command, err = tf->feature; 1718c6fd2807SJeff Garzik 1719c6fd2807SJeff Garzik if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) { 1720c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_HSM; 1721cf480626STejun Heo return ATA_EH_RESET; 1722c6fd2807SJeff Garzik } 1723c6fd2807SJeff Garzik 1724a51d644aSTejun Heo if (stat & (ATA_ERR | ATA_DF)) 1725a51d644aSTejun Heo qc->err_mask |= AC_ERR_DEV; 1726a51d644aSTejun Heo else 1727c6fd2807SJeff Garzik return 0; 1728c6fd2807SJeff Garzik 1729c6fd2807SJeff Garzik switch (qc->dev->class) { 1730c6fd2807SJeff Garzik case ATA_DEV_ATA: 1731c6fd2807SJeff Garzik if (err & ATA_ICRC) 1732c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_ATA_BUS; 1733c6fd2807SJeff Garzik if (err & ATA_UNC) 1734c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_MEDIA; 1735c6fd2807SJeff Garzik if (err & ATA_IDNF) 1736c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_INVALID; 1737c6fd2807SJeff Garzik break; 1738c6fd2807SJeff Garzik 1739c6fd2807SJeff Garzik case ATA_DEV_ATAPI: 1740a569a30dSTejun Heo if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) { 17413eabddb8STejun Heo tmp = atapi_eh_request_sense(qc->dev, 17423eabddb8STejun Heo qc->scsicmd->sense_buffer, 17433eabddb8STejun Heo qc->result_tf.feature >> 4); 1744c6fd2807SJeff Garzik if (!tmp) { 1745a569a30dSTejun Heo /* ATA_QCFLAG_SENSE_VALID is used to 1746a569a30dSTejun Heo * tell atapi_qc_complete() that sense 1747a569a30dSTejun Heo * data is already valid. 1748c6fd2807SJeff Garzik * 1749c6fd2807SJeff Garzik * TODO: interpret sense data and set 1750c6fd2807SJeff Garzik * appropriate err_mask. 1751c6fd2807SJeff Garzik */ 1752c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_SENSE_VALID; 1753c6fd2807SJeff Garzik } else 1754c6fd2807SJeff Garzik qc->err_mask |= tmp; 1755c6fd2807SJeff Garzik } 1756a569a30dSTejun Heo } 1757c6fd2807SJeff Garzik 1758c6fd2807SJeff Garzik if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS)) 1759cf480626STejun Heo action |= ATA_EH_RESET; 1760c6fd2807SJeff Garzik 1761c6fd2807SJeff Garzik return action; 1762c6fd2807SJeff Garzik } 1763c6fd2807SJeff Garzik 176476326ac1STejun Heo static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask, 176576326ac1STejun Heo int *xfer_ok) 1766c6fd2807SJeff Garzik { 176776326ac1STejun Heo int base = 0; 176876326ac1STejun Heo 176976326ac1STejun Heo if (!(eflags & ATA_EFLAG_DUBIOUS_XFER)) 177076326ac1STejun Heo *xfer_ok = 1; 177176326ac1STejun Heo 177276326ac1STejun Heo if (!*xfer_ok) 177375f9cafcSTejun Heo base = ATA_ECAT_DUBIOUS_NONE; 177476326ac1STejun Heo 17757d47e8d4STejun Heo if (err_mask & AC_ERR_ATA_BUS) 177676326ac1STejun Heo return base + ATA_ECAT_ATA_BUS; 1777c6fd2807SJeff Garzik 17787d47e8d4STejun Heo if (err_mask & AC_ERR_TIMEOUT) 177976326ac1STejun Heo return base + ATA_ECAT_TOUT_HSM; 17807d47e8d4STejun Heo 17813884f7b0STejun Heo if (eflags & ATA_EFLAG_IS_IO) { 17827d47e8d4STejun Heo if (err_mask & AC_ERR_HSM) 178376326ac1STejun Heo return base + ATA_ECAT_TOUT_HSM; 17847d47e8d4STejun Heo if ((err_mask & 17857d47e8d4STejun Heo (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV) 178676326ac1STejun Heo return base + ATA_ECAT_UNK_DEV; 1787c6fd2807SJeff Garzik } 1788c6fd2807SJeff Garzik 1789c6fd2807SJeff Garzik return 0; 1790c6fd2807SJeff Garzik } 1791c6fd2807SJeff Garzik 17927d47e8d4STejun Heo struct speed_down_verdict_arg { 1793c6fd2807SJeff Garzik u64 since; 179476326ac1STejun Heo int xfer_ok; 17953884f7b0STejun Heo int nr_errors[ATA_ECAT_NR]; 1796c6fd2807SJeff Garzik }; 1797c6fd2807SJeff Garzik 17987d47e8d4STejun Heo static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg) 1799c6fd2807SJeff Garzik { 18007d47e8d4STejun Heo struct speed_down_verdict_arg *arg = void_arg; 180176326ac1STejun Heo int cat; 1802c6fd2807SJeff Garzik 1803d9027470SGwendal Grignou if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since)) 1804c6fd2807SJeff Garzik return -1; 1805c6fd2807SJeff Garzik 180676326ac1STejun Heo cat = ata_eh_categorize_error(ent->eflags, ent->err_mask, 180776326ac1STejun Heo &arg->xfer_ok); 18087d47e8d4STejun Heo arg->nr_errors[cat]++; 180976326ac1STejun Heo 1810c6fd2807SJeff Garzik return 0; 1811c6fd2807SJeff Garzik } 1812c6fd2807SJeff Garzik 1813c6fd2807SJeff Garzik /** 18147d47e8d4STejun Heo * ata_eh_speed_down_verdict - Determine speed down verdict 1815c6fd2807SJeff Garzik * @dev: Device of interest 1816c6fd2807SJeff Garzik * 1817c6fd2807SJeff Garzik * This function examines error ring of @dev and determines 18187d47e8d4STejun Heo * whether NCQ needs to be turned off, transfer speed should be 18197d47e8d4STejun Heo * stepped down, or falling back to PIO is necessary. 1820c6fd2807SJeff Garzik * 18213884f7b0STejun Heo * ECAT_ATA_BUS : ATA_BUS error for any command 1822c6fd2807SJeff Garzik * 18233884f7b0STejun Heo * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for 18243884f7b0STejun Heo * IO commands 18257d47e8d4STejun Heo * 18263884f7b0STejun Heo * ECAT_UNK_DEV : Unknown DEV error for IO commands 1827c6fd2807SJeff Garzik * 182876326ac1STejun Heo * ECAT_DUBIOUS_* : Identical to above three but occurred while 182976326ac1STejun Heo * data transfer hasn't been verified. 183076326ac1STejun Heo * 18313884f7b0STejun Heo * Verdicts are 18327d47e8d4STejun Heo * 18333884f7b0STejun Heo * NCQ_OFF : Turn off NCQ. 18347d47e8d4STejun Heo * 18353884f7b0STejun Heo * SPEED_DOWN : Speed down transfer speed but don't fall back 18363884f7b0STejun Heo * to PIO. 18373884f7b0STejun Heo * 18383884f7b0STejun Heo * FALLBACK_TO_PIO : Fall back to PIO. 18393884f7b0STejun Heo * 18403884f7b0STejun Heo * Even if multiple verdicts are returned, only one action is 184176326ac1STejun Heo * taken per error. An action triggered by non-DUBIOUS errors 184276326ac1STejun Heo * clears ering, while one triggered by DUBIOUS_* errors doesn't. 184376326ac1STejun Heo * This is to expedite speed down decisions right after device is 184476326ac1STejun Heo * initially configured. 18453884f7b0STejun Heo * 184676326ac1STejun Heo * The followings are speed down rules. #1 and #2 deal with 184776326ac1STejun Heo * DUBIOUS errors. 184876326ac1STejun Heo * 184976326ac1STejun Heo * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors 185076326ac1STejun Heo * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO. 185176326ac1STejun Heo * 185276326ac1STejun Heo * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors 185376326ac1STejun Heo * occurred during last 5 mins, NCQ_OFF. 185476326ac1STejun Heo * 185576326ac1STejun Heo * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors 18563884f7b0STejun Heo * ocurred during last 5 mins, FALLBACK_TO_PIO 18573884f7b0STejun Heo * 185876326ac1STejun Heo * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred 18593884f7b0STejun Heo * during last 10 mins, NCQ_OFF. 18603884f7b0STejun Heo * 186176326ac1STejun Heo * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6 18623884f7b0STejun Heo * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN. 18637d47e8d4STejun Heo * 1864c6fd2807SJeff Garzik * LOCKING: 1865c6fd2807SJeff Garzik * Inherited from caller. 1866c6fd2807SJeff Garzik * 1867c6fd2807SJeff Garzik * RETURNS: 18687d47e8d4STejun Heo * OR of ATA_EH_SPDN_* flags. 1869c6fd2807SJeff Garzik */ 18707d47e8d4STejun Heo static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev) 1871c6fd2807SJeff Garzik { 18727d47e8d4STejun Heo const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ; 18737d47e8d4STejun Heo u64 j64 = get_jiffies_64(); 18747d47e8d4STejun Heo struct speed_down_verdict_arg arg; 18757d47e8d4STejun Heo unsigned int verdict = 0; 1876c6fd2807SJeff Garzik 18773884f7b0STejun Heo /* scan past 5 mins of error history */ 18783884f7b0STejun Heo memset(&arg, 0, sizeof(arg)); 18793884f7b0STejun Heo arg.since = j64 - min(j64, j5mins); 18803884f7b0STejun Heo ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); 18813884f7b0STejun Heo 188276326ac1STejun Heo if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] + 188376326ac1STejun Heo arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1) 188476326ac1STejun Heo verdict |= ATA_EH_SPDN_SPEED_DOWN | 188576326ac1STejun Heo ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS; 188676326ac1STejun Heo 188776326ac1STejun Heo if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] + 188876326ac1STejun Heo arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1) 188976326ac1STejun Heo verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS; 189076326ac1STejun Heo 18913884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_ATA_BUS] + 18923884f7b0STejun Heo arg.nr_errors[ATA_ECAT_TOUT_HSM] + 1893663f99b8STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) 18943884f7b0STejun Heo verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO; 18953884f7b0STejun Heo 18967d47e8d4STejun Heo /* scan past 10 mins of error history */ 1897c6fd2807SJeff Garzik memset(&arg, 0, sizeof(arg)); 18987d47e8d4STejun Heo arg.since = j64 - min(j64, j10mins); 18997d47e8d4STejun Heo ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); 1900c6fd2807SJeff Garzik 19013884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_TOUT_HSM] + 19023884f7b0STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 3) 19037d47e8d4STejun Heo verdict |= ATA_EH_SPDN_NCQ_OFF; 19043884f7b0STejun Heo 19053884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_ATA_BUS] + 19063884f7b0STejun Heo arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 || 1907663f99b8STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) 19087d47e8d4STejun Heo verdict |= ATA_EH_SPDN_SPEED_DOWN; 1909c6fd2807SJeff Garzik 19107d47e8d4STejun Heo return verdict; 1911c6fd2807SJeff Garzik } 1912c6fd2807SJeff Garzik 1913c6fd2807SJeff Garzik /** 1914c6fd2807SJeff Garzik * ata_eh_speed_down - record error and speed down if necessary 1915c6fd2807SJeff Garzik * @dev: Failed device 19163884f7b0STejun Heo * @eflags: mask of ATA_EFLAG_* flags 1917c6fd2807SJeff Garzik * @err_mask: err_mask of the error 1918c6fd2807SJeff Garzik * 1919c6fd2807SJeff Garzik * Record error and examine error history to determine whether 1920c6fd2807SJeff Garzik * adjusting transmission speed is necessary. It also sets 1921c6fd2807SJeff Garzik * transmission limits appropriately if such adjustment is 1922c6fd2807SJeff Garzik * necessary. 1923c6fd2807SJeff Garzik * 1924c6fd2807SJeff Garzik * LOCKING: 1925c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1926c6fd2807SJeff Garzik * 1927c6fd2807SJeff Garzik * RETURNS: 19287d47e8d4STejun Heo * Determined recovery action. 1929c6fd2807SJeff Garzik */ 19303884f7b0STejun Heo static unsigned int ata_eh_speed_down(struct ata_device *dev, 19313884f7b0STejun Heo unsigned int eflags, unsigned int err_mask) 1932c6fd2807SJeff Garzik { 1933b1c72916STejun Heo struct ata_link *link = ata_dev_phys_link(dev); 193476326ac1STejun Heo int xfer_ok = 0; 19357d47e8d4STejun Heo unsigned int verdict; 19367d47e8d4STejun Heo unsigned int action = 0; 19377d47e8d4STejun Heo 19387d47e8d4STejun Heo /* don't bother if Cat-0 error */ 193976326ac1STejun Heo if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0) 1940c6fd2807SJeff Garzik return 0; 1941c6fd2807SJeff Garzik 1942c6fd2807SJeff Garzik /* record error and determine whether speed down is necessary */ 19433884f7b0STejun Heo ata_ering_record(&dev->ering, eflags, err_mask); 19447d47e8d4STejun Heo verdict = ata_eh_speed_down_verdict(dev); 1945c6fd2807SJeff Garzik 19467d47e8d4STejun Heo /* turn off NCQ? */ 19477d47e8d4STejun Heo if ((verdict & ATA_EH_SPDN_NCQ_OFF) && 19487d47e8d4STejun Heo (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ | 19497d47e8d4STejun Heo ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) { 19507d47e8d4STejun Heo dev->flags |= ATA_DFLAG_NCQ_OFF; 19517d47e8d4STejun Heo ata_dev_printk(dev, KERN_WARNING, 19527d47e8d4STejun Heo "NCQ disabled due to excessive errors\n"); 19537d47e8d4STejun Heo goto done; 19547d47e8d4STejun Heo } 1955c6fd2807SJeff Garzik 19567d47e8d4STejun Heo /* speed down? */ 19577d47e8d4STejun Heo if (verdict & ATA_EH_SPDN_SPEED_DOWN) { 1958c6fd2807SJeff Garzik /* speed down SATA link speed if possible */ 1959a07d499bSTejun Heo if (sata_down_spd_limit(link, 0) == 0) { 1960cf480626STejun Heo action |= ATA_EH_RESET; 19617d47e8d4STejun Heo goto done; 19627d47e8d4STejun Heo } 1963c6fd2807SJeff Garzik 1964c6fd2807SJeff Garzik /* lower transfer mode */ 19657d47e8d4STejun Heo if (dev->spdn_cnt < 2) { 19667d47e8d4STejun Heo static const int dma_dnxfer_sel[] = 19677d47e8d4STejun Heo { ATA_DNXFER_DMA, ATA_DNXFER_40C }; 19687d47e8d4STejun Heo static const int pio_dnxfer_sel[] = 19697d47e8d4STejun Heo { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 }; 19707d47e8d4STejun Heo int sel; 1971c6fd2807SJeff Garzik 19727d47e8d4STejun Heo if (dev->xfer_shift != ATA_SHIFT_PIO) 19737d47e8d4STejun Heo sel = dma_dnxfer_sel[dev->spdn_cnt]; 19747d47e8d4STejun Heo else 19757d47e8d4STejun Heo sel = pio_dnxfer_sel[dev->spdn_cnt]; 19767d47e8d4STejun Heo 19777d47e8d4STejun Heo dev->spdn_cnt++; 19787d47e8d4STejun Heo 19797d47e8d4STejun Heo if (ata_down_xfermask_limit(dev, sel) == 0) { 1980cf480626STejun Heo action |= ATA_EH_RESET; 19817d47e8d4STejun Heo goto done; 19827d47e8d4STejun Heo } 19837d47e8d4STejun Heo } 19847d47e8d4STejun Heo } 19857d47e8d4STejun Heo 19867d47e8d4STejun Heo /* Fall back to PIO? Slowing down to PIO is meaningless for 1987663f99b8STejun Heo * SATA ATA devices. Consider it only for PATA and SATAPI. 19887d47e8d4STejun Heo */ 19897d47e8d4STejun Heo if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) && 1990663f99b8STejun Heo (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) && 19917d47e8d4STejun Heo (dev->xfer_shift != ATA_SHIFT_PIO)) { 19927d47e8d4STejun Heo if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) { 19937d47e8d4STejun Heo dev->spdn_cnt = 0; 1994cf480626STejun Heo action |= ATA_EH_RESET; 19957d47e8d4STejun Heo goto done; 19967d47e8d4STejun Heo } 19977d47e8d4STejun Heo } 19987d47e8d4STejun Heo 1999c6fd2807SJeff Garzik return 0; 20007d47e8d4STejun Heo done: 20017d47e8d4STejun Heo /* device has been slowed down, blow error history */ 200276326ac1STejun Heo if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS)) 20037d47e8d4STejun Heo ata_ering_clear(&dev->ering); 20047d47e8d4STejun Heo return action; 2005c6fd2807SJeff Garzik } 2006c6fd2807SJeff Garzik 2007c6fd2807SJeff Garzik /** 20089b1e2658STejun Heo * ata_eh_link_autopsy - analyze error and determine recovery action 20099b1e2658STejun Heo * @link: host link to perform autopsy on 2010c6fd2807SJeff Garzik * 20110260731fSTejun Heo * Analyze why @link failed and determine which recovery actions 20120260731fSTejun Heo * are needed. This function also sets more detailed AC_ERR_* 20130260731fSTejun Heo * values and fills sense data for ATAPI CHECK SENSE. 2014c6fd2807SJeff Garzik * 2015c6fd2807SJeff Garzik * LOCKING: 2016c6fd2807SJeff Garzik * Kernel thread context (may sleep). 2017c6fd2807SJeff Garzik */ 20189b1e2658STejun Heo static void ata_eh_link_autopsy(struct ata_link *link) 2019c6fd2807SJeff Garzik { 20200260731fSTejun Heo struct ata_port *ap = link->ap; 2021936fd732STejun Heo struct ata_eh_context *ehc = &link->eh_context; 2022dfcc173dSTejun Heo struct ata_device *dev; 20233884f7b0STejun Heo unsigned int all_err_mask = 0, eflags = 0; 20243884f7b0STejun Heo int tag; 2025c6fd2807SJeff Garzik u32 serror; 2026c6fd2807SJeff Garzik int rc; 2027c6fd2807SJeff Garzik 2028c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 2029c6fd2807SJeff Garzik 2030c6fd2807SJeff Garzik if (ehc->i.flags & ATA_EHI_NO_AUTOPSY) 2031c6fd2807SJeff Garzik return; 2032c6fd2807SJeff Garzik 2033c6fd2807SJeff Garzik /* obtain and analyze SError */ 2034936fd732STejun Heo rc = sata_scr_read(link, SCR_ERROR, &serror); 2035c6fd2807SJeff Garzik if (rc == 0) { 2036c6fd2807SJeff Garzik ehc->i.serror |= serror; 20370260731fSTejun Heo ata_eh_analyze_serror(link); 20384e57c517STejun Heo } else if (rc != -EOPNOTSUPP) { 2039cf480626STejun Heo /* SError read failed, force reset and probing */ 2040b558edddSTejun Heo ehc->i.probe_mask |= ATA_ALL_DEVICES; 2041cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 20424e57c517STejun Heo ehc->i.err_mask |= AC_ERR_OTHER; 20434e57c517STejun Heo } 2044c6fd2807SJeff Garzik 2045c6fd2807SJeff Garzik /* analyze NCQ failure */ 20460260731fSTejun Heo ata_eh_analyze_ncq_error(link); 2047c6fd2807SJeff Garzik 2048c6fd2807SJeff Garzik /* any real error trumps AC_ERR_OTHER */ 2049c6fd2807SJeff Garzik if (ehc->i.err_mask & ~AC_ERR_OTHER) 2050c6fd2807SJeff Garzik ehc->i.err_mask &= ~AC_ERR_OTHER; 2051c6fd2807SJeff Garzik 2052c6fd2807SJeff Garzik all_err_mask |= ehc->i.err_mask; 2053c6fd2807SJeff Garzik 2054c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2055c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 2056c6fd2807SJeff Garzik 2057b1c72916STejun Heo if (!(qc->flags & ATA_QCFLAG_FAILED) || 2058b1c72916STejun Heo ata_dev_phys_link(qc->dev) != link) 2059c6fd2807SJeff Garzik continue; 2060c6fd2807SJeff Garzik 2061c6fd2807SJeff Garzik /* inherit upper level err_mask */ 2062c6fd2807SJeff Garzik qc->err_mask |= ehc->i.err_mask; 2063c6fd2807SJeff Garzik 2064c6fd2807SJeff Garzik /* analyze TF */ 2065c6fd2807SJeff Garzik ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf); 2066c6fd2807SJeff Garzik 2067c6fd2807SJeff Garzik /* DEV errors are probably spurious in case of ATA_BUS error */ 2068c6fd2807SJeff Garzik if (qc->err_mask & AC_ERR_ATA_BUS) 2069c6fd2807SJeff Garzik qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA | 2070c6fd2807SJeff Garzik AC_ERR_INVALID); 2071c6fd2807SJeff Garzik 2072c6fd2807SJeff Garzik /* any real error trumps unknown error */ 2073c6fd2807SJeff Garzik if (qc->err_mask & ~AC_ERR_OTHER) 2074c6fd2807SJeff Garzik qc->err_mask &= ~AC_ERR_OTHER; 2075c6fd2807SJeff Garzik 2076c6fd2807SJeff Garzik /* SENSE_VALID trumps dev/unknown error and revalidation */ 2077f90f0828STejun Heo if (qc->flags & ATA_QCFLAG_SENSE_VALID) 2078c6fd2807SJeff Garzik qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); 2079c6fd2807SJeff Garzik 208003faab78STejun Heo /* determine whether the command is worth retrying */ 2081534ead70STejun Heo if (qc->flags & ATA_QCFLAG_IO || 2082534ead70STejun Heo (!(qc->err_mask & AC_ERR_INVALID) && 2083534ead70STejun Heo qc->err_mask != AC_ERR_DEV)) 208403faab78STejun Heo qc->flags |= ATA_QCFLAG_RETRY; 208503faab78STejun Heo 2086c6fd2807SJeff Garzik /* accumulate error info */ 2087c6fd2807SJeff Garzik ehc->i.dev = qc->dev; 2088c6fd2807SJeff Garzik all_err_mask |= qc->err_mask; 2089c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_IO) 20903884f7b0STejun Heo eflags |= ATA_EFLAG_IS_IO; 2091c6fd2807SJeff Garzik } 2092c6fd2807SJeff Garzik 2093c6fd2807SJeff Garzik /* enforce default EH actions */ 2094c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN || 2095c6fd2807SJeff Garzik all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT)) 2096cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 20973884f7b0STejun Heo else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) || 20983884f7b0STejun Heo (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV))) 2099c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_REVALIDATE; 2100c6fd2807SJeff Garzik 2101dfcc173dSTejun Heo /* If we have offending qcs and the associated failed device, 2102dfcc173dSTejun Heo * perform per-dev EH action only on the offending device. 2103dfcc173dSTejun Heo */ 2104c6fd2807SJeff Garzik if (ehc->i.dev) { 2105c6fd2807SJeff Garzik ehc->i.dev_action[ehc->i.dev->devno] |= 2106c6fd2807SJeff Garzik ehc->i.action & ATA_EH_PERDEV_MASK; 2107c6fd2807SJeff Garzik ehc->i.action &= ~ATA_EH_PERDEV_MASK; 2108c6fd2807SJeff Garzik } 2109c6fd2807SJeff Garzik 21102695e366STejun Heo /* propagate timeout to host link */ 21112695e366STejun Heo if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link)) 21122695e366STejun Heo ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT; 21132695e366STejun Heo 21142695e366STejun Heo /* record error and consider speeding down */ 2115dfcc173dSTejun Heo dev = ehc->i.dev; 21162695e366STejun Heo if (!dev && ((ata_link_max_devices(link) == 1 && 21172695e366STejun Heo ata_dev_enabled(link->device)))) 2118dfcc173dSTejun Heo dev = link->device; 2119dfcc173dSTejun Heo 212076326ac1STejun Heo if (dev) { 212176326ac1STejun Heo if (dev->flags & ATA_DFLAG_DUBIOUS_XFER) 212276326ac1STejun Heo eflags |= ATA_EFLAG_DUBIOUS_XFER; 21233884f7b0STejun Heo ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask); 212476326ac1STejun Heo } 2125dfcc173dSTejun Heo 2126c6fd2807SJeff Garzik DPRINTK("EXIT\n"); 2127c6fd2807SJeff Garzik } 2128c6fd2807SJeff Garzik 2129c6fd2807SJeff Garzik /** 21309b1e2658STejun Heo * ata_eh_autopsy - analyze error and determine recovery action 21319b1e2658STejun Heo * @ap: host port to perform autopsy on 21329b1e2658STejun Heo * 21339b1e2658STejun Heo * Analyze all links of @ap and determine why they failed and 21349b1e2658STejun Heo * which recovery actions are needed. 21359b1e2658STejun Heo * 21369b1e2658STejun Heo * LOCKING: 21379b1e2658STejun Heo * Kernel thread context (may sleep). 21389b1e2658STejun Heo */ 2139fb7fd614STejun Heo void ata_eh_autopsy(struct ata_port *ap) 21409b1e2658STejun Heo { 21419b1e2658STejun Heo struct ata_link *link; 21429b1e2658STejun Heo 21431eca4365STejun Heo ata_for_each_link(link, ap, EDGE) 21449b1e2658STejun Heo ata_eh_link_autopsy(link); 21452695e366STejun Heo 2146b1c72916STejun Heo /* Handle the frigging slave link. Autopsy is done similarly 2147b1c72916STejun Heo * but actions and flags are transferred over to the master 2148b1c72916STejun Heo * link and handled from there. 2149b1c72916STejun Heo */ 2150b1c72916STejun Heo if (ap->slave_link) { 2151b1c72916STejun Heo struct ata_eh_context *mehc = &ap->link.eh_context; 2152b1c72916STejun Heo struct ata_eh_context *sehc = &ap->slave_link->eh_context; 2153b1c72916STejun Heo 2154848e4c68STejun Heo /* transfer control flags from master to slave */ 2155848e4c68STejun Heo sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK; 2156848e4c68STejun Heo 2157848e4c68STejun Heo /* perform autopsy on the slave link */ 2158b1c72916STejun Heo ata_eh_link_autopsy(ap->slave_link); 2159b1c72916STejun Heo 2160848e4c68STejun Heo /* transfer actions from slave to master and clear slave */ 2161b1c72916STejun Heo ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); 2162b1c72916STejun Heo mehc->i.action |= sehc->i.action; 2163b1c72916STejun Heo mehc->i.dev_action[1] |= sehc->i.dev_action[1]; 2164b1c72916STejun Heo mehc->i.flags |= sehc->i.flags; 2165b1c72916STejun Heo ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); 2166b1c72916STejun Heo } 2167b1c72916STejun Heo 21682695e366STejun Heo /* Autopsy of fanout ports can affect host link autopsy. 21692695e366STejun Heo * Perform host link autopsy last. 21702695e366STejun Heo */ 2171071f44b1STejun Heo if (sata_pmp_attached(ap)) 21722695e366STejun Heo ata_eh_link_autopsy(&ap->link); 21739b1e2658STejun Heo } 21749b1e2658STejun Heo 21759b1e2658STejun Heo /** 21766521148cSRobert Hancock * ata_get_cmd_descript - get description for ATA command 21776521148cSRobert Hancock * @command: ATA command code to get description for 21786521148cSRobert Hancock * 21796521148cSRobert Hancock * Return a textual description of the given command, or NULL if the 21806521148cSRobert Hancock * command is not known. 21816521148cSRobert Hancock * 21826521148cSRobert Hancock * LOCKING: 21836521148cSRobert Hancock * None 21846521148cSRobert Hancock */ 21856521148cSRobert Hancock const char *ata_get_cmd_descript(u8 command) 21866521148cSRobert Hancock { 21876521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR 21886521148cSRobert Hancock static const struct 21896521148cSRobert Hancock { 21906521148cSRobert Hancock u8 command; 21916521148cSRobert Hancock const char *text; 21926521148cSRobert Hancock } cmd_descr[] = { 21936521148cSRobert Hancock { ATA_CMD_DEV_RESET, "DEVICE RESET" }, 21946521148cSRobert Hancock { ATA_CMD_CHK_POWER, "CHECK POWER MODE" }, 21956521148cSRobert Hancock { ATA_CMD_STANDBY, "STANDBY" }, 21966521148cSRobert Hancock { ATA_CMD_IDLE, "IDLE" }, 21976521148cSRobert Hancock { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" }, 21986521148cSRobert Hancock { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" }, 21996521148cSRobert Hancock { ATA_CMD_NOP, "NOP" }, 22006521148cSRobert Hancock { ATA_CMD_FLUSH, "FLUSH CACHE" }, 22016521148cSRobert Hancock { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" }, 22026521148cSRobert Hancock { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" }, 22036521148cSRobert Hancock { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" }, 22046521148cSRobert Hancock { ATA_CMD_SERVICE, "SERVICE" }, 22056521148cSRobert Hancock { ATA_CMD_READ, "READ DMA" }, 22066521148cSRobert Hancock { ATA_CMD_READ_EXT, "READ DMA EXT" }, 22076521148cSRobert Hancock { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" }, 22086521148cSRobert Hancock { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" }, 22096521148cSRobert Hancock { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" }, 22106521148cSRobert Hancock { ATA_CMD_WRITE, "WRITE DMA" }, 22116521148cSRobert Hancock { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" }, 22126521148cSRobert Hancock { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" }, 22136521148cSRobert Hancock { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" }, 22146521148cSRobert Hancock { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" }, 22156521148cSRobert Hancock { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" }, 22166521148cSRobert Hancock { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" }, 22176521148cSRobert Hancock { ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" }, 22186521148cSRobert Hancock { ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" }, 22196521148cSRobert Hancock { ATA_CMD_PIO_READ, "READ SECTOR(S)" }, 22206521148cSRobert Hancock { ATA_CMD_PIO_READ_EXT, "READ SECTOR(S) EXT" }, 22216521148cSRobert Hancock { ATA_CMD_PIO_WRITE, "WRITE SECTOR(S)" }, 22226521148cSRobert Hancock { ATA_CMD_PIO_WRITE_EXT, "WRITE SECTOR(S) EXT" }, 22236521148cSRobert Hancock { ATA_CMD_READ_MULTI, "READ MULTIPLE" }, 22246521148cSRobert Hancock { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" }, 22256521148cSRobert Hancock { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" }, 22266521148cSRobert Hancock { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" }, 22276521148cSRobert Hancock { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" }, 22286521148cSRobert Hancock { ATA_CMD_SET_FEATURES, "SET FEATURES" }, 22296521148cSRobert Hancock { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" }, 22306521148cSRobert Hancock { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" }, 22316521148cSRobert Hancock { ATA_CMD_VERIFY_EXT, "READ VERIFY SECTOR(S) EXT" }, 22326521148cSRobert Hancock { ATA_CMD_WRITE_UNCORR_EXT, "WRITE UNCORRECTABLE EXT" }, 22336521148cSRobert Hancock { ATA_CMD_STANDBYNOW1, "STANDBY IMMEDIATE" }, 22346521148cSRobert Hancock { ATA_CMD_IDLEIMMEDIATE, "IDLE IMMEDIATE" }, 22356521148cSRobert Hancock { ATA_CMD_SLEEP, "SLEEP" }, 22366521148cSRobert Hancock { ATA_CMD_INIT_DEV_PARAMS, "INITIALIZE DEVICE PARAMETERS" }, 22376521148cSRobert Hancock { ATA_CMD_READ_NATIVE_MAX, "READ NATIVE MAX ADDRESS" }, 22386521148cSRobert Hancock { ATA_CMD_READ_NATIVE_MAX_EXT, "READ NATIVE MAX ADDRESS EXT" }, 22396521148cSRobert Hancock { ATA_CMD_SET_MAX, "SET MAX ADDRESS" }, 22406521148cSRobert Hancock { ATA_CMD_SET_MAX_EXT, "SET MAX ADDRESS EXT" }, 22416521148cSRobert Hancock { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" }, 22426521148cSRobert Hancock { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" }, 22436521148cSRobert Hancock { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" }, 22446521148cSRobert Hancock { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" }, 22456521148cSRobert Hancock { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" }, 22466521148cSRobert Hancock { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" }, 22476521148cSRobert Hancock { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" }, 22486521148cSRobert Hancock { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" }, 22496521148cSRobert Hancock { ATA_CMD_PMP_READ, "READ BUFFER" }, 22506521148cSRobert Hancock { ATA_CMD_PMP_WRITE, "WRITE BUFFER" }, 22516521148cSRobert Hancock { ATA_CMD_CONF_OVERLAY, "DEVICE CONFIGURATION OVERLAY" }, 22526521148cSRobert Hancock { ATA_CMD_SEC_SET_PASS, "SECURITY SET PASSWORD" }, 22536521148cSRobert Hancock { ATA_CMD_SEC_UNLOCK, "SECURITY UNLOCK" }, 22546521148cSRobert Hancock { ATA_CMD_SEC_ERASE_PREP, "SECURITY ERASE PREPARE" }, 22556521148cSRobert Hancock { ATA_CMD_SEC_ERASE_UNIT, "SECURITY ERASE UNIT" }, 22566521148cSRobert Hancock { ATA_CMD_SEC_FREEZE_LOCK, "SECURITY FREEZE LOCK" }, 22576521148cSRobert Hancock { ATA_CMD_SEC_DISABLE_PASS, "SECURITY DISABLE PASSWORD" }, 22586521148cSRobert Hancock { ATA_CMD_CONFIG_STREAM, "CONFIGURE STREAM" }, 22596521148cSRobert Hancock { ATA_CMD_SMART, "SMART" }, 22606521148cSRobert Hancock { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" }, 22616521148cSRobert Hancock { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" }, 2262acad7627SFUJITA Tomonori { ATA_CMD_DSM, "DATA SET MANAGEMENT" }, 22636521148cSRobert Hancock { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" }, 22646521148cSRobert Hancock { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" }, 22656521148cSRobert Hancock { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" }, 22666521148cSRobert Hancock { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" }, 22676521148cSRobert Hancock { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" }, 22686521148cSRobert Hancock { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" }, 22696521148cSRobert Hancock { ATA_CMD_READ_LONG, "READ LONG (with retries)" }, 22706521148cSRobert Hancock { ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" }, 22716521148cSRobert Hancock { ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" }, 22726521148cSRobert Hancock { ATA_CMD_WRITE_LONG_ONCE, "WRITE LONG (without retries)" }, 22736521148cSRobert Hancock { ATA_CMD_RESTORE, "RECALIBRATE" }, 22746521148cSRobert Hancock { 0, NULL } /* terminate list */ 22756521148cSRobert Hancock }; 22766521148cSRobert Hancock 22776521148cSRobert Hancock unsigned int i; 22786521148cSRobert Hancock for (i = 0; cmd_descr[i].text; i++) 22796521148cSRobert Hancock if (cmd_descr[i].command == command) 22806521148cSRobert Hancock return cmd_descr[i].text; 22816521148cSRobert Hancock #endif 22826521148cSRobert Hancock 22836521148cSRobert Hancock return NULL; 22846521148cSRobert Hancock } 22856521148cSRobert Hancock 22866521148cSRobert Hancock /** 22879b1e2658STejun Heo * ata_eh_link_report - report error handling to user 22880260731fSTejun Heo * @link: ATA link EH is going on 2289c6fd2807SJeff Garzik * 2290c6fd2807SJeff Garzik * Report EH to user. 2291c6fd2807SJeff Garzik * 2292c6fd2807SJeff Garzik * LOCKING: 2293c6fd2807SJeff Garzik * None. 2294c6fd2807SJeff Garzik */ 22959b1e2658STejun Heo static void ata_eh_link_report(struct ata_link *link) 2296c6fd2807SJeff Garzik { 22970260731fSTejun Heo struct ata_port *ap = link->ap; 22980260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 2299c6fd2807SJeff Garzik const char *frozen, *desc; 2300a1e10f7eSTejun Heo char tries_buf[6]; 2301c6fd2807SJeff Garzik int tag, nr_failed = 0; 2302c6fd2807SJeff Garzik 230394ff3d54STejun Heo if (ehc->i.flags & ATA_EHI_QUIET) 230494ff3d54STejun Heo return; 230594ff3d54STejun Heo 2306c6fd2807SJeff Garzik desc = NULL; 2307c6fd2807SJeff Garzik if (ehc->i.desc[0] != '\0') 2308c6fd2807SJeff Garzik desc = ehc->i.desc; 2309c6fd2807SJeff Garzik 2310c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2311c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 2312c6fd2807SJeff Garzik 2313b1c72916STejun Heo if (!(qc->flags & ATA_QCFLAG_FAILED) || 2314b1c72916STejun Heo ata_dev_phys_link(qc->dev) != link || 2315e027bd36STejun Heo ((qc->flags & ATA_QCFLAG_QUIET) && 2316e027bd36STejun Heo qc->err_mask == AC_ERR_DEV)) 2317c6fd2807SJeff Garzik continue; 2318c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask) 2319c6fd2807SJeff Garzik continue; 2320c6fd2807SJeff Garzik 2321c6fd2807SJeff Garzik nr_failed++; 2322c6fd2807SJeff Garzik } 2323c6fd2807SJeff Garzik 2324c6fd2807SJeff Garzik if (!nr_failed && !ehc->i.err_mask) 2325c6fd2807SJeff Garzik return; 2326c6fd2807SJeff Garzik 2327c6fd2807SJeff Garzik frozen = ""; 2328c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN) 2329c6fd2807SJeff Garzik frozen = " frozen"; 2330c6fd2807SJeff Garzik 2331a1e10f7eSTejun Heo memset(tries_buf, 0, sizeof(tries_buf)); 2332a1e10f7eSTejun Heo if (ap->eh_tries < ATA_EH_MAX_TRIES) 2333a1e10f7eSTejun Heo snprintf(tries_buf, sizeof(tries_buf) - 1, " t%d", 2334a1e10f7eSTejun Heo ap->eh_tries); 2335a1e10f7eSTejun Heo 2336c6fd2807SJeff Garzik if (ehc->i.dev) { 2337c6fd2807SJeff Garzik ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x " 2338a1e10f7eSTejun Heo "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", 2339a1e10f7eSTejun Heo ehc->i.err_mask, link->sactive, ehc->i.serror, 2340a1e10f7eSTejun Heo ehc->i.action, frozen, tries_buf); 2341c6fd2807SJeff Garzik if (desc) 2342b64bbc39STejun Heo ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc); 2343c6fd2807SJeff Garzik } else { 23440260731fSTejun Heo ata_link_printk(link, KERN_ERR, "exception Emask 0x%x " 2345a1e10f7eSTejun Heo "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", 2346a1e10f7eSTejun Heo ehc->i.err_mask, link->sactive, ehc->i.serror, 2347a1e10f7eSTejun Heo ehc->i.action, frozen, tries_buf); 2348c6fd2807SJeff Garzik if (desc) 23490260731fSTejun Heo ata_link_printk(link, KERN_ERR, "%s\n", desc); 2350c6fd2807SJeff Garzik } 2351c6fd2807SJeff Garzik 23526521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR 23531333e194SRobert Hancock if (ehc->i.serror) 2354da0e21d3STejun Heo ata_link_printk(link, KERN_ERR, 23551333e194SRobert Hancock "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n", 23561333e194SRobert Hancock ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "", 23571333e194SRobert Hancock ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "", 23581333e194SRobert Hancock ehc->i.serror & SERR_DATA ? "UnrecovData " : "", 23591333e194SRobert Hancock ehc->i.serror & SERR_PERSISTENT ? "Persist " : "", 23601333e194SRobert Hancock ehc->i.serror & SERR_PROTOCOL ? "Proto " : "", 23611333e194SRobert Hancock ehc->i.serror & SERR_INTERNAL ? "HostInt " : "", 23621333e194SRobert Hancock ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "", 23631333e194SRobert Hancock ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "", 23641333e194SRobert Hancock ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "", 23651333e194SRobert Hancock ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "", 23661333e194SRobert Hancock ehc->i.serror & SERR_DISPARITY ? "Dispar " : "", 23671333e194SRobert Hancock ehc->i.serror & SERR_CRC ? "BadCRC " : "", 23681333e194SRobert Hancock ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "", 23691333e194SRobert Hancock ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "", 23701333e194SRobert Hancock ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "", 23711333e194SRobert Hancock ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "", 23721333e194SRobert Hancock ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : ""); 23736521148cSRobert Hancock #endif 23741333e194SRobert Hancock 2375c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2376c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 23778a937581STejun Heo struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; 2378abb6a889STejun Heo const u8 *cdb = qc->cdb; 2379abb6a889STejun Heo char data_buf[20] = ""; 2380abb6a889STejun Heo char cdb_buf[70] = ""; 2381c6fd2807SJeff Garzik 23820260731fSTejun Heo if (!(qc->flags & ATA_QCFLAG_FAILED) || 2383b1c72916STejun Heo ata_dev_phys_link(qc->dev) != link || !qc->err_mask) 2384c6fd2807SJeff Garzik continue; 2385c6fd2807SJeff Garzik 2386abb6a889STejun Heo if (qc->dma_dir != DMA_NONE) { 2387abb6a889STejun Heo static const char *dma_str[] = { 2388abb6a889STejun Heo [DMA_BIDIRECTIONAL] = "bidi", 2389abb6a889STejun Heo [DMA_TO_DEVICE] = "out", 2390abb6a889STejun Heo [DMA_FROM_DEVICE] = "in", 2391abb6a889STejun Heo }; 2392abb6a889STejun Heo static const char *prot_str[] = { 2393abb6a889STejun Heo [ATA_PROT_PIO] = "pio", 2394abb6a889STejun Heo [ATA_PROT_DMA] = "dma", 2395abb6a889STejun Heo [ATA_PROT_NCQ] = "ncq", 23960dc36888STejun Heo [ATAPI_PROT_PIO] = "pio", 23970dc36888STejun Heo [ATAPI_PROT_DMA] = "dma", 2398abb6a889STejun Heo }; 2399abb6a889STejun Heo 2400abb6a889STejun Heo snprintf(data_buf, sizeof(data_buf), " %s %u %s", 2401abb6a889STejun Heo prot_str[qc->tf.protocol], qc->nbytes, 2402abb6a889STejun Heo dma_str[qc->dma_dir]); 2403abb6a889STejun Heo } 2404abb6a889STejun Heo 24056521148cSRobert Hancock if (ata_is_atapi(qc->tf.protocol)) { 24066521148cSRobert Hancock if (qc->scsicmd) 24076521148cSRobert Hancock scsi_print_command(qc->scsicmd); 24086521148cSRobert Hancock else 2409abb6a889STejun Heo snprintf(cdb_buf, sizeof(cdb_buf), 2410abb6a889STejun Heo "cdb %02x %02x %02x %02x %02x %02x %02x %02x " 2411abb6a889STejun Heo "%02x %02x %02x %02x %02x %02x %02x %02x\n ", 2412abb6a889STejun Heo cdb[0], cdb[1], cdb[2], cdb[3], 2413abb6a889STejun Heo cdb[4], cdb[5], cdb[6], cdb[7], 2414abb6a889STejun Heo cdb[8], cdb[9], cdb[10], cdb[11], 2415abb6a889STejun Heo cdb[12], cdb[13], cdb[14], cdb[15]); 24166521148cSRobert Hancock } else { 24176521148cSRobert Hancock const char *descr = ata_get_cmd_descript(cmd->command); 24186521148cSRobert Hancock if (descr) 24196521148cSRobert Hancock ata_dev_printk(qc->dev, KERN_ERR, 24206521148cSRobert Hancock "failed command: %s\n", descr); 24216521148cSRobert Hancock } 2422abb6a889STejun Heo 24238a937581STejun Heo ata_dev_printk(qc->dev, KERN_ERR, 24248a937581STejun Heo "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " 2425abb6a889STejun Heo "tag %d%s\n %s" 24268a937581STejun Heo "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " 24275335b729STejun Heo "Emask 0x%x (%s)%s\n", 24288a937581STejun Heo cmd->command, cmd->feature, cmd->nsect, 24298a937581STejun Heo cmd->lbal, cmd->lbam, cmd->lbah, 24308a937581STejun Heo cmd->hob_feature, cmd->hob_nsect, 24318a937581STejun Heo cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah, 2432abb6a889STejun Heo cmd->device, qc->tag, data_buf, cdb_buf, 24338a937581STejun Heo res->command, res->feature, res->nsect, 24348a937581STejun Heo res->lbal, res->lbam, res->lbah, 24358a937581STejun Heo res->hob_feature, res->hob_nsect, 24368a937581STejun Heo res->hob_lbal, res->hob_lbam, res->hob_lbah, 24375335b729STejun Heo res->device, qc->err_mask, ata_err_string(qc->err_mask), 24385335b729STejun Heo qc->err_mask & AC_ERR_NCQ ? " <F>" : ""); 24391333e194SRobert Hancock 24406521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR 24411333e194SRobert Hancock if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | 24421333e194SRobert Hancock ATA_ERR)) { 24431333e194SRobert Hancock if (res->command & ATA_BUSY) 24441333e194SRobert Hancock ata_dev_printk(qc->dev, KERN_ERR, 24451333e194SRobert Hancock "status: { Busy }\n"); 24461333e194SRobert Hancock else 24471333e194SRobert Hancock ata_dev_printk(qc->dev, KERN_ERR, 24481333e194SRobert Hancock "status: { %s%s%s%s}\n", 24491333e194SRobert Hancock res->command & ATA_DRDY ? "DRDY " : "", 24501333e194SRobert Hancock res->command & ATA_DF ? "DF " : "", 24511333e194SRobert Hancock res->command & ATA_DRQ ? "DRQ " : "", 24521333e194SRobert Hancock res->command & ATA_ERR ? "ERR " : ""); 24531333e194SRobert Hancock } 24541333e194SRobert Hancock 24551333e194SRobert Hancock if (cmd->command != ATA_CMD_PACKET && 24561333e194SRobert Hancock (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF | 24571333e194SRobert Hancock ATA_ABORTED))) 24581333e194SRobert Hancock ata_dev_printk(qc->dev, KERN_ERR, 24591333e194SRobert Hancock "error: { %s%s%s%s}\n", 24601333e194SRobert Hancock res->feature & ATA_ICRC ? "ICRC " : "", 24611333e194SRobert Hancock res->feature & ATA_UNC ? "UNC " : "", 24621333e194SRobert Hancock res->feature & ATA_IDNF ? "IDNF " : "", 24631333e194SRobert Hancock res->feature & ATA_ABORTED ? "ABRT " : ""); 24646521148cSRobert Hancock #endif 2465c6fd2807SJeff Garzik } 2466c6fd2807SJeff Garzik } 2467c6fd2807SJeff Garzik 24689b1e2658STejun Heo /** 24699b1e2658STejun Heo * ata_eh_report - report error handling to user 24709b1e2658STejun Heo * @ap: ATA port to report EH about 24719b1e2658STejun Heo * 24729b1e2658STejun Heo * Report EH to user. 24739b1e2658STejun Heo * 24749b1e2658STejun Heo * LOCKING: 24759b1e2658STejun Heo * None. 24769b1e2658STejun Heo */ 2477fb7fd614STejun Heo void ata_eh_report(struct ata_port *ap) 24789b1e2658STejun Heo { 24799b1e2658STejun Heo struct ata_link *link; 24809b1e2658STejun Heo 24811eca4365STejun Heo ata_for_each_link(link, ap, HOST_FIRST) 24829b1e2658STejun Heo ata_eh_link_report(link); 24839b1e2658STejun Heo } 24849b1e2658STejun Heo 2485cc0680a5STejun Heo static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset, 2486b1c72916STejun Heo unsigned int *classes, unsigned long deadline, 2487b1c72916STejun Heo bool clear_classes) 2488c6fd2807SJeff Garzik { 2489f58229f8STejun Heo struct ata_device *dev; 2490c6fd2807SJeff Garzik 2491b1c72916STejun Heo if (clear_classes) 24921eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 2493f58229f8STejun Heo classes[dev->devno] = ATA_DEV_UNKNOWN; 2494c6fd2807SJeff Garzik 2495f046519fSTejun Heo return reset(link, classes, deadline); 2496c6fd2807SJeff Garzik } 2497c6fd2807SJeff Garzik 2498ae791c05STejun Heo static int ata_eh_followup_srst_needed(struct ata_link *link, 24995dbfc9cbSTejun Heo int rc, const unsigned int *classes) 2500c6fd2807SJeff Garzik { 250145db2f6cSTejun Heo if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link)) 2502ae791c05STejun Heo return 0; 25035dbfc9cbSTejun Heo if (rc == -EAGAIN) 2504c6fd2807SJeff Garzik return 1; 2505071f44b1STejun Heo if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) 25063495de73STejun Heo return 1; 2507c6fd2807SJeff Garzik return 0; 2508c6fd2807SJeff Garzik } 2509c6fd2807SJeff Garzik 2510fb7fd614STejun Heo int ata_eh_reset(struct ata_link *link, int classify, 2511c6fd2807SJeff Garzik ata_prereset_fn_t prereset, ata_reset_fn_t softreset, 2512c6fd2807SJeff Garzik ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) 2513c6fd2807SJeff Garzik { 2514afaa5c37STejun Heo struct ata_port *ap = link->ap; 2515b1c72916STejun Heo struct ata_link *slave = ap->slave_link; 2516936fd732STejun Heo struct ata_eh_context *ehc = &link->eh_context; 2517705d2014SBartlomiej Zolnierkiewicz struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL; 2518c6fd2807SJeff Garzik unsigned int *classes = ehc->classes; 2519416dc9edSTejun Heo unsigned int lflags = link->flags; 2520c6fd2807SJeff Garzik int verbose = !(ehc->i.flags & ATA_EHI_QUIET); 2521d8af0eb6STejun Heo int max_tries = 0, try = 0; 2522b1c72916STejun Heo struct ata_link *failed_link; 2523f58229f8STejun Heo struct ata_device *dev; 2524416dc9edSTejun Heo unsigned long deadline, now; 2525c6fd2807SJeff Garzik ata_reset_fn_t reset; 2526afaa5c37STejun Heo unsigned long flags; 2527416dc9edSTejun Heo u32 sstatus; 2528b1c72916STejun Heo int nr_unknown, rc; 2529c6fd2807SJeff Garzik 2530932648b0STejun Heo /* 2531932648b0STejun Heo * Prepare to reset 2532932648b0STejun Heo */ 2533d8af0eb6STejun Heo while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX) 2534d8af0eb6STejun Heo max_tries++; 253505944bdfSTejun Heo if (link->flags & ATA_LFLAG_NO_HRST) 253605944bdfSTejun Heo hardreset = NULL; 253705944bdfSTejun Heo if (link->flags & ATA_LFLAG_NO_SRST) 253805944bdfSTejun Heo softreset = NULL; 2539d8af0eb6STejun Heo 254019b72321STejun Heo /* make sure each reset attemp is at least COOL_DOWN apart */ 254119b72321STejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) { 25420a2c0f56STejun Heo now = jiffies; 254319b72321STejun Heo WARN_ON(time_after(ehc->last_reset, now)); 254419b72321STejun Heo deadline = ata_deadline(ehc->last_reset, 254519b72321STejun Heo ATA_EH_RESET_COOL_DOWN); 25460a2c0f56STejun Heo if (time_before(now, deadline)) 25470a2c0f56STejun Heo schedule_timeout_uninterruptible(deadline - now); 254819b72321STejun Heo } 25490a2c0f56STejun Heo 2550afaa5c37STejun Heo spin_lock_irqsave(ap->lock, flags); 2551afaa5c37STejun Heo ap->pflags |= ATA_PFLAG_RESETTING; 2552afaa5c37STejun Heo spin_unlock_irqrestore(ap->lock, flags); 2553afaa5c37STejun Heo 2554cf480626STejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2555c6fd2807SJeff Garzik 25561eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 2557cdeab114STejun Heo /* If we issue an SRST then an ATA drive (not ATAPI) 2558cdeab114STejun Heo * may change configuration and be in PIO0 timing. If 2559cdeab114STejun Heo * we do a hard reset (or are coming from power on) 2560cdeab114STejun Heo * this is true for ATA or ATAPI. Until we've set a 2561cdeab114STejun Heo * suitable controller mode we should not touch the 2562cdeab114STejun Heo * bus as we may be talking too fast. 2563cdeab114STejun Heo */ 2564cdeab114STejun Heo dev->pio_mode = XFER_PIO_0; 2565cdeab114STejun Heo 2566cdeab114STejun Heo /* If the controller has a pio mode setup function 2567cdeab114STejun Heo * then use it to set the chipset to rights. Don't 2568cdeab114STejun Heo * touch the DMA setup as that will be dealt with when 2569cdeab114STejun Heo * configuring devices. 2570cdeab114STejun Heo */ 2571cdeab114STejun Heo if (ap->ops->set_piomode) 2572cdeab114STejun Heo ap->ops->set_piomode(ap, dev); 2573cdeab114STejun Heo } 2574cdeab114STejun Heo 2575cf480626STejun Heo /* prefer hardreset */ 2576932648b0STejun Heo reset = NULL; 2577cf480626STejun Heo ehc->i.action &= ~ATA_EH_RESET; 2578cf480626STejun Heo if (hardreset) { 2579cf480626STejun Heo reset = hardreset; 2580a674050eSTejun Heo ehc->i.action |= ATA_EH_HARDRESET; 25814f7faa3fSTejun Heo } else if (softreset) { 2582cf480626STejun Heo reset = softreset; 2583a674050eSTejun Heo ehc->i.action |= ATA_EH_SOFTRESET; 2584cf480626STejun Heo } 2585c6fd2807SJeff Garzik 2586c6fd2807SJeff Garzik if (prereset) { 2587b1c72916STejun Heo unsigned long deadline = ata_deadline(jiffies, 2588b1c72916STejun Heo ATA_EH_PRERESET_TIMEOUT); 2589b1c72916STejun Heo 2590b1c72916STejun Heo if (slave) { 2591b1c72916STejun Heo sehc->i.action &= ~ATA_EH_RESET; 2592b1c72916STejun Heo sehc->i.action |= ehc->i.action; 2593b1c72916STejun Heo } 2594b1c72916STejun Heo 2595b1c72916STejun Heo rc = prereset(link, deadline); 2596b1c72916STejun Heo 2597b1c72916STejun Heo /* If present, do prereset on slave link too. Reset 2598b1c72916STejun Heo * is skipped iff both master and slave links report 2599b1c72916STejun Heo * -ENOENT or clear ATA_EH_RESET. 2600b1c72916STejun Heo */ 2601b1c72916STejun Heo if (slave && (rc == 0 || rc == -ENOENT)) { 2602b1c72916STejun Heo int tmp; 2603b1c72916STejun Heo 2604b1c72916STejun Heo tmp = prereset(slave, deadline); 2605b1c72916STejun Heo if (tmp != -ENOENT) 2606b1c72916STejun Heo rc = tmp; 2607b1c72916STejun Heo 2608b1c72916STejun Heo ehc->i.action |= sehc->i.action; 2609b1c72916STejun Heo } 2610b1c72916STejun Heo 2611c6fd2807SJeff Garzik if (rc) { 2612c961922bSAlan Cox if (rc == -ENOENT) { 2613cc0680a5STejun Heo ata_link_printk(link, KERN_DEBUG, 26144aa9ab67STejun Heo "port disabled. ignoring.\n"); 2615cf480626STejun Heo ehc->i.action &= ~ATA_EH_RESET; 26164aa9ab67STejun Heo 26171eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 2618f58229f8STejun Heo classes[dev->devno] = ATA_DEV_NONE; 26194aa9ab67STejun Heo 26204aa9ab67STejun Heo rc = 0; 2621c961922bSAlan Cox } else 2622cc0680a5STejun Heo ata_link_printk(link, KERN_ERR, 2623c6fd2807SJeff Garzik "prereset failed (errno=%d)\n", rc); 2624fccb6ea5STejun Heo goto out; 2625c6fd2807SJeff Garzik } 2626c6fd2807SJeff Garzik 2627932648b0STejun Heo /* prereset() might have cleared ATA_EH_RESET. If so, 2628d6515e6fSTejun Heo * bang classes, thaw and return. 2629932648b0STejun Heo */ 2630932648b0STejun Heo if (reset && !(ehc->i.action & ATA_EH_RESET)) { 26311eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 2632f58229f8STejun Heo classes[dev->devno] = ATA_DEV_NONE; 2633d6515e6fSTejun Heo if ((ap->pflags & ATA_PFLAG_FROZEN) && 2634d6515e6fSTejun Heo ata_is_host_link(link)) 2635d6515e6fSTejun Heo ata_eh_thaw_port(ap); 2636fccb6ea5STejun Heo rc = 0; 2637fccb6ea5STejun Heo goto out; 2638c6fd2807SJeff Garzik } 2639932648b0STejun Heo } 2640c6fd2807SJeff Garzik 2641c6fd2807SJeff Garzik retry: 2642932648b0STejun Heo /* 2643932648b0STejun Heo * Perform reset 2644932648b0STejun Heo */ 2645dc98c32cSTejun Heo if (ata_is_host_link(link)) 2646dc98c32cSTejun Heo ata_eh_freeze_port(ap); 2647dc98c32cSTejun Heo 2648341c2c95STejun Heo deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]); 264931daabdaSTejun Heo 2650932648b0STejun Heo if (reset) { 2651c6fd2807SJeff Garzik if (verbose) 2652cc0680a5STejun Heo ata_link_printk(link, KERN_INFO, "%s resetting link\n", 2653c6fd2807SJeff Garzik reset == softreset ? "soft" : "hard"); 2654c6fd2807SJeff Garzik 2655c6fd2807SJeff Garzik /* mark that this EH session started with reset */ 265619b72321STejun Heo ehc->last_reset = jiffies; 26570d64a233STejun Heo if (reset == hardreset) 26580d64a233STejun Heo ehc->i.flags |= ATA_EHI_DID_HARDRESET; 26590d64a233STejun Heo else 26600d64a233STejun Heo ehc->i.flags |= ATA_EHI_DID_SOFTRESET; 2661c6fd2807SJeff Garzik 2662b1c72916STejun Heo rc = ata_do_reset(link, reset, classes, deadline, true); 2663b1c72916STejun Heo if (rc && rc != -EAGAIN) { 2664b1c72916STejun Heo failed_link = link; 26655dbfc9cbSTejun Heo goto fail; 2666b1c72916STejun Heo } 2667c6fd2807SJeff Garzik 2668b1c72916STejun Heo /* hardreset slave link if existent */ 2669b1c72916STejun Heo if (slave && reset == hardreset) { 2670b1c72916STejun Heo int tmp; 2671b1c72916STejun Heo 2672b1c72916STejun Heo if (verbose) 2673b1c72916STejun Heo ata_link_printk(slave, KERN_INFO, 2674b1c72916STejun Heo "hard resetting link\n"); 2675b1c72916STejun Heo 2676b1c72916STejun Heo ata_eh_about_to_do(slave, NULL, ATA_EH_RESET); 2677b1c72916STejun Heo tmp = ata_do_reset(slave, reset, classes, deadline, 2678b1c72916STejun Heo false); 2679b1c72916STejun Heo switch (tmp) { 2680b1c72916STejun Heo case -EAGAIN: 2681b1c72916STejun Heo rc = -EAGAIN; 2682b1c72916STejun Heo case 0: 2683b1c72916STejun Heo break; 2684b1c72916STejun Heo default: 2685b1c72916STejun Heo failed_link = slave; 2686b1c72916STejun Heo rc = tmp; 2687b1c72916STejun Heo goto fail; 2688b1c72916STejun Heo } 2689b1c72916STejun Heo } 2690b1c72916STejun Heo 2691b1c72916STejun Heo /* perform follow-up SRST if necessary */ 2692c6fd2807SJeff Garzik if (reset == hardreset && 26935dbfc9cbSTejun Heo ata_eh_followup_srst_needed(link, rc, classes)) { 2694c6fd2807SJeff Garzik reset = softreset; 2695c6fd2807SJeff Garzik 2696c6fd2807SJeff Garzik if (!reset) { 2697cc0680a5STejun Heo ata_link_printk(link, KERN_ERR, 2698c6fd2807SJeff Garzik "follow-up softreset required " 2699c6fd2807SJeff Garzik "but no softreset avaliable\n"); 2700b1c72916STejun Heo failed_link = link; 2701fccb6ea5STejun Heo rc = -EINVAL; 270208cf69d0STejun Heo goto fail; 2703c6fd2807SJeff Garzik } 2704c6fd2807SJeff Garzik 2705cf480626STejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2706b1c72916STejun Heo rc = ata_do_reset(link, reset, classes, deadline, true); 2707fe2c4d01STejun Heo if (rc) { 2708fe2c4d01STejun Heo failed_link = link; 2709fe2c4d01STejun Heo goto fail; 2710fe2c4d01STejun Heo } 2711c6fd2807SJeff Garzik } 2712932648b0STejun Heo } else { 2713932648b0STejun Heo if (verbose) 2714932648b0STejun Heo ata_link_printk(link, KERN_INFO, "no reset method " 2715932648b0STejun Heo "available, skipping reset\n"); 2716932648b0STejun Heo if (!(lflags & ATA_LFLAG_ASSUME_CLASS)) 2717932648b0STejun Heo lflags |= ATA_LFLAG_ASSUME_ATA; 2718932648b0STejun Heo } 2719008a7896STejun Heo 2720932648b0STejun Heo /* 2721932648b0STejun Heo * Post-reset processing 2722932648b0STejun Heo */ 27231eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 2724416dc9edSTejun Heo /* After the reset, the device state is PIO 0 and the 2725416dc9edSTejun Heo * controller state is undefined. Reset also wakes up 2726416dc9edSTejun Heo * drives from sleeping mode. 2727c6fd2807SJeff Garzik */ 2728f58229f8STejun Heo dev->pio_mode = XFER_PIO_0; 2729054a5fbaSTejun Heo dev->flags &= ~ATA_DFLAG_SLEEPING; 2730c6fd2807SJeff Garzik 27313b761d3dSTejun Heo if (ata_phys_link_offline(ata_dev_phys_link(dev))) 27323b761d3dSTejun Heo continue; 27333b761d3dSTejun Heo 27344ccd3329STejun Heo /* apply class override */ 2735416dc9edSTejun Heo if (lflags & ATA_LFLAG_ASSUME_ATA) 2736ae791c05STejun Heo classes[dev->devno] = ATA_DEV_ATA; 2737416dc9edSTejun Heo else if (lflags & ATA_LFLAG_ASSUME_SEMB) 2738816ab897STejun Heo classes[dev->devno] = ATA_DEV_SEMB_UNSUP; 2739ae791c05STejun Heo } 2740ae791c05STejun Heo 2741008a7896STejun Heo /* record current link speed */ 2742936fd732STejun Heo if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0) 2743936fd732STejun Heo link->sata_spd = (sstatus >> 4) & 0xf; 2744b1c72916STejun Heo if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0) 2745b1c72916STejun Heo slave->sata_spd = (sstatus >> 4) & 0xf; 2746008a7896STejun Heo 2747dc98c32cSTejun Heo /* thaw the port */ 2748dc98c32cSTejun Heo if (ata_is_host_link(link)) 2749dc98c32cSTejun Heo ata_eh_thaw_port(ap); 2750dc98c32cSTejun Heo 2751f046519fSTejun Heo /* postreset() should clear hardware SError. Although SError 2752f046519fSTejun Heo * is cleared during link resume, clearing SError here is 2753f046519fSTejun Heo * necessary as some PHYs raise hotplug events after SRST. 2754f046519fSTejun Heo * This introduces race condition where hotplug occurs between 2755f046519fSTejun Heo * reset and here. This race is mediated by cross checking 2756f046519fSTejun Heo * link onlineness and classification result later. 2757f046519fSTejun Heo */ 2758b1c72916STejun Heo if (postreset) { 2759cc0680a5STejun Heo postreset(link, classes); 2760b1c72916STejun Heo if (slave) 2761b1c72916STejun Heo postreset(slave, classes); 2762b1c72916STejun Heo } 2763c6fd2807SJeff Garzik 27641e641060STejun Heo /* 27651e641060STejun Heo * Some controllers can't be frozen very well and may set 27661e641060STejun Heo * spuruious error conditions during reset. Clear accumulated 27671e641060STejun Heo * error information. As reset is the final recovery action, 27681e641060STejun Heo * nothing is lost by doing this. 27691e641060STejun Heo */ 2770f046519fSTejun Heo spin_lock_irqsave(link->ap->lock, flags); 27711e641060STejun Heo memset(&link->eh_info, 0, sizeof(link->eh_info)); 2772b1c72916STejun Heo if (slave) 27731e641060STejun Heo memset(&slave->eh_info, 0, sizeof(link->eh_info)); 27741e641060STejun Heo ap->pflags &= ~ATA_PFLAG_EH_PENDING; 2775f046519fSTejun Heo spin_unlock_irqrestore(link->ap->lock, flags); 2776f046519fSTejun Heo 27773b761d3dSTejun Heo /* 27783b761d3dSTejun Heo * Make sure onlineness and classification result correspond. 2779f046519fSTejun Heo * Hotplug could have happened during reset and some 2780f046519fSTejun Heo * controllers fail to wait while a drive is spinning up after 2781f046519fSTejun Heo * being hotplugged causing misdetection. By cross checking 27823b761d3dSTejun Heo * link on/offlineness and classification result, those 27833b761d3dSTejun Heo * conditions can be reliably detected and retried. 2784f046519fSTejun Heo */ 2785b1c72916STejun Heo nr_unknown = 0; 27861eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 27873b761d3dSTejun Heo if (ata_phys_link_online(ata_dev_phys_link(dev))) { 2788b1c72916STejun Heo if (classes[dev->devno] == ATA_DEV_UNKNOWN) { 27893b761d3dSTejun Heo ata_dev_printk(dev, KERN_DEBUG, "link online " 27903b761d3dSTejun Heo "but device misclassifed\n"); 2791f046519fSTejun Heo classes[dev->devno] = ATA_DEV_NONE; 2792b1c72916STejun Heo nr_unknown++; 2793b1c72916STejun Heo } 27943b761d3dSTejun Heo } else if (ata_phys_link_offline(ata_dev_phys_link(dev))) { 27953b761d3dSTejun Heo if (ata_class_enabled(classes[dev->devno])) 27963b761d3dSTejun Heo ata_dev_printk(dev, KERN_DEBUG, "link offline, " 27973b761d3dSTejun Heo "clearing class %d to NONE\n", 27983b761d3dSTejun Heo classes[dev->devno]); 27993b761d3dSTejun Heo classes[dev->devno] = ATA_DEV_NONE; 28003b761d3dSTejun Heo } else if (classes[dev->devno] == ATA_DEV_UNKNOWN) { 28013b761d3dSTejun Heo ata_dev_printk(dev, KERN_DEBUG, "link status unknown, " 28023b761d3dSTejun Heo "clearing UNKNOWN to NONE\n"); 28033b761d3dSTejun Heo classes[dev->devno] = ATA_DEV_NONE; 28043b761d3dSTejun Heo } 2805f046519fSTejun Heo } 2806f046519fSTejun Heo 2807b1c72916STejun Heo if (classify && nr_unknown) { 2808f046519fSTejun Heo if (try < max_tries) { 2809f046519fSTejun Heo ata_link_printk(link, KERN_WARNING, "link online but " 28103b761d3dSTejun Heo "%d devices misclassified, retrying\n", 28113b761d3dSTejun Heo nr_unknown); 2812b1c72916STejun Heo failed_link = link; 2813f046519fSTejun Heo rc = -EAGAIN; 2814f046519fSTejun Heo goto fail; 2815f046519fSTejun Heo } 2816f046519fSTejun Heo ata_link_printk(link, KERN_WARNING, 28173b761d3dSTejun Heo "link online but %d devices misclassified, " 28183b761d3dSTejun Heo "device detection might fail\n", nr_unknown); 2819f046519fSTejun Heo } 2820f046519fSTejun Heo 2821c6fd2807SJeff Garzik /* reset successful, schedule revalidation */ 2822cf480626STejun Heo ata_eh_done(link, NULL, ATA_EH_RESET); 2823b1c72916STejun Heo if (slave) 2824b1c72916STejun Heo ata_eh_done(slave, NULL, ATA_EH_RESET); 282519b72321STejun Heo ehc->last_reset = jiffies; /* update to completion time */ 2826c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_REVALIDATE; 28276b7ae954STejun Heo link->lpm_policy = ATA_LPM_UNKNOWN; /* reset LPM state */ 2828416dc9edSTejun Heo 2829416dc9edSTejun Heo rc = 0; 2830fccb6ea5STejun Heo out: 2831fccb6ea5STejun Heo /* clear hotplug flag */ 2832fccb6ea5STejun Heo ehc->i.flags &= ~ATA_EHI_HOTPLUGGED; 2833b1c72916STejun Heo if (slave) 2834b1c72916STejun Heo sehc->i.flags &= ~ATA_EHI_HOTPLUGGED; 2835afaa5c37STejun Heo 2836afaa5c37STejun Heo spin_lock_irqsave(ap->lock, flags); 2837afaa5c37STejun Heo ap->pflags &= ~ATA_PFLAG_RESETTING; 2838afaa5c37STejun Heo spin_unlock_irqrestore(ap->lock, flags); 2839afaa5c37STejun Heo 2840c6fd2807SJeff Garzik return rc; 2841416dc9edSTejun Heo 2842416dc9edSTejun Heo fail: 28435958e302STejun Heo /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */ 28445958e302STejun Heo if (!ata_is_host_link(link) && 28455958e302STejun Heo sata_scr_read(link, SCR_STATUS, &sstatus)) 28465958e302STejun Heo rc = -ERESTART; 28475958e302STejun Heo 2848416dc9edSTejun Heo if (rc == -ERESTART || try >= max_tries) 2849416dc9edSTejun Heo goto out; 2850416dc9edSTejun Heo 2851416dc9edSTejun Heo now = jiffies; 2852416dc9edSTejun Heo if (time_before(now, deadline)) { 2853416dc9edSTejun Heo unsigned long delta = deadline - now; 2854416dc9edSTejun Heo 2855b1c72916STejun Heo ata_link_printk(failed_link, KERN_WARNING, 28560a2c0f56STejun Heo "reset failed (errno=%d), retrying in %u secs\n", 28570a2c0f56STejun Heo rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000)); 2858416dc9edSTejun Heo 2859*c0c362b6STejun Heo ata_eh_release(ap); 2860416dc9edSTejun Heo while (delta) 2861416dc9edSTejun Heo delta = schedule_timeout_uninterruptible(delta); 2862*c0c362b6STejun Heo ata_eh_acquire(ap); 2863416dc9edSTejun Heo } 2864416dc9edSTejun Heo 2865b1c72916STejun Heo if (try == max_tries - 1) { 2866a07d499bSTejun Heo sata_down_spd_limit(link, 0); 2867b1c72916STejun Heo if (slave) 2868a07d499bSTejun Heo sata_down_spd_limit(slave, 0); 2869b1c72916STejun Heo } else if (rc == -EPIPE) 2870a07d499bSTejun Heo sata_down_spd_limit(failed_link, 0); 2871b1c72916STejun Heo 2872416dc9edSTejun Heo if (hardreset) 2873416dc9edSTejun Heo reset = hardreset; 2874416dc9edSTejun Heo goto retry; 2875c6fd2807SJeff Garzik } 2876c6fd2807SJeff Garzik 287745fabbb7SElias Oltmanns static inline void ata_eh_pull_park_action(struct ata_port *ap) 287845fabbb7SElias Oltmanns { 287945fabbb7SElias Oltmanns struct ata_link *link; 288045fabbb7SElias Oltmanns struct ata_device *dev; 288145fabbb7SElias Oltmanns unsigned long flags; 288245fabbb7SElias Oltmanns 288345fabbb7SElias Oltmanns /* 288445fabbb7SElias Oltmanns * This function can be thought of as an extended version of 288545fabbb7SElias Oltmanns * ata_eh_about_to_do() specially crafted to accommodate the 288645fabbb7SElias Oltmanns * requirements of ATA_EH_PARK handling. Since the EH thread 288745fabbb7SElias Oltmanns * does not leave the do {} while () loop in ata_eh_recover as 288845fabbb7SElias Oltmanns * long as the timeout for a park request to *one* device on 288945fabbb7SElias Oltmanns * the port has not expired, and since we still want to pick 289045fabbb7SElias Oltmanns * up park requests to other devices on the same port or 289145fabbb7SElias Oltmanns * timeout updates for the same device, we have to pull 289245fabbb7SElias Oltmanns * ATA_EH_PARK actions from eh_info into eh_context.i 289345fabbb7SElias Oltmanns * ourselves at the beginning of each pass over the loop. 289445fabbb7SElias Oltmanns * 289545fabbb7SElias Oltmanns * Additionally, all write accesses to &ap->park_req_pending 289645fabbb7SElias Oltmanns * through INIT_COMPLETION() (see below) or complete_all() 289745fabbb7SElias Oltmanns * (see ata_scsi_park_store()) are protected by the host lock. 289845fabbb7SElias Oltmanns * As a result we have that park_req_pending.done is zero on 289945fabbb7SElias Oltmanns * exit from this function, i.e. when ATA_EH_PARK actions for 290045fabbb7SElias Oltmanns * *all* devices on port ap have been pulled into the 290145fabbb7SElias Oltmanns * respective eh_context structs. If, and only if, 290245fabbb7SElias Oltmanns * park_req_pending.done is non-zero by the time we reach 290345fabbb7SElias Oltmanns * wait_for_completion_timeout(), another ATA_EH_PARK action 290445fabbb7SElias Oltmanns * has been scheduled for at least one of the devices on port 290545fabbb7SElias Oltmanns * ap and we have to cycle over the do {} while () loop in 290645fabbb7SElias Oltmanns * ata_eh_recover() again. 290745fabbb7SElias Oltmanns */ 290845fabbb7SElias Oltmanns 290945fabbb7SElias Oltmanns spin_lock_irqsave(ap->lock, flags); 291045fabbb7SElias Oltmanns INIT_COMPLETION(ap->park_req_pending); 29111eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 29121eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 291345fabbb7SElias Oltmanns struct ata_eh_info *ehi = &link->eh_info; 291445fabbb7SElias Oltmanns 291545fabbb7SElias Oltmanns link->eh_context.i.dev_action[dev->devno] |= 291645fabbb7SElias Oltmanns ehi->dev_action[dev->devno] & ATA_EH_PARK; 291745fabbb7SElias Oltmanns ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK); 291845fabbb7SElias Oltmanns } 291945fabbb7SElias Oltmanns } 292045fabbb7SElias Oltmanns spin_unlock_irqrestore(ap->lock, flags); 292145fabbb7SElias Oltmanns } 292245fabbb7SElias Oltmanns 292345fabbb7SElias Oltmanns static void ata_eh_park_issue_cmd(struct ata_device *dev, int park) 292445fabbb7SElias Oltmanns { 292545fabbb7SElias Oltmanns struct ata_eh_context *ehc = &dev->link->eh_context; 292645fabbb7SElias Oltmanns struct ata_taskfile tf; 292745fabbb7SElias Oltmanns unsigned int err_mask; 292845fabbb7SElias Oltmanns 292945fabbb7SElias Oltmanns ata_tf_init(dev, &tf); 293045fabbb7SElias Oltmanns if (park) { 293145fabbb7SElias Oltmanns ehc->unloaded_mask |= 1 << dev->devno; 293245fabbb7SElias Oltmanns tf.command = ATA_CMD_IDLEIMMEDIATE; 293345fabbb7SElias Oltmanns tf.feature = 0x44; 293445fabbb7SElias Oltmanns tf.lbal = 0x4c; 293545fabbb7SElias Oltmanns tf.lbam = 0x4e; 293645fabbb7SElias Oltmanns tf.lbah = 0x55; 293745fabbb7SElias Oltmanns } else { 293845fabbb7SElias Oltmanns ehc->unloaded_mask &= ~(1 << dev->devno); 293945fabbb7SElias Oltmanns tf.command = ATA_CMD_CHK_POWER; 294045fabbb7SElias Oltmanns } 294145fabbb7SElias Oltmanns 294245fabbb7SElias Oltmanns tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 294345fabbb7SElias Oltmanns tf.protocol |= ATA_PROT_NODATA; 294445fabbb7SElias Oltmanns err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 294545fabbb7SElias Oltmanns if (park && (err_mask || tf.lbal != 0xc4)) { 294645fabbb7SElias Oltmanns ata_dev_printk(dev, KERN_ERR, "head unload failed!\n"); 294745fabbb7SElias Oltmanns ehc->unloaded_mask &= ~(1 << dev->devno); 294845fabbb7SElias Oltmanns } 294945fabbb7SElias Oltmanns } 295045fabbb7SElias Oltmanns 29510260731fSTejun Heo static int ata_eh_revalidate_and_attach(struct ata_link *link, 2952c6fd2807SJeff Garzik struct ata_device **r_failed_dev) 2953c6fd2807SJeff Garzik { 29540260731fSTejun Heo struct ata_port *ap = link->ap; 29550260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 2956c6fd2807SJeff Garzik struct ata_device *dev; 29578c3c52a8STejun Heo unsigned int new_mask = 0; 2958c6fd2807SJeff Garzik unsigned long flags; 2959f58229f8STejun Heo int rc = 0; 2960c6fd2807SJeff Garzik 2961c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 2962c6fd2807SJeff Garzik 29638c3c52a8STejun Heo /* For PATA drive side cable detection to work, IDENTIFY must 29648c3c52a8STejun Heo * be done backwards such that PDIAG- is released by the slave 29658c3c52a8STejun Heo * device before the master device is identified. 29668c3c52a8STejun Heo */ 29671eca4365STejun Heo ata_for_each_dev(dev, link, ALL_REVERSE) { 2968f58229f8STejun Heo unsigned int action = ata_eh_dev_action(dev); 2969f58229f8STejun Heo unsigned int readid_flags = 0; 2970c6fd2807SJeff Garzik 2971bff04647STejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) 2972bff04647STejun Heo readid_flags |= ATA_READID_POSTRESET; 2973bff04647STejun Heo 29749666f400STejun Heo if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) { 2975633273a3STejun Heo WARN_ON(dev->class == ATA_DEV_PMP); 2976633273a3STejun Heo 2977b1c72916STejun Heo if (ata_phys_link_offline(ata_dev_phys_link(dev))) { 2978c6fd2807SJeff Garzik rc = -EIO; 29798c3c52a8STejun Heo goto err; 2980c6fd2807SJeff Garzik } 2981c6fd2807SJeff Garzik 29820260731fSTejun Heo ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE); 2983422c9daaSTejun Heo rc = ata_dev_revalidate(dev, ehc->classes[dev->devno], 2984422c9daaSTejun Heo readid_flags); 2985c6fd2807SJeff Garzik if (rc) 29868c3c52a8STejun Heo goto err; 2987c6fd2807SJeff Garzik 29880260731fSTejun Heo ata_eh_done(link, dev, ATA_EH_REVALIDATE); 2989c6fd2807SJeff Garzik 2990baa1e78aSTejun Heo /* Configuration may have changed, reconfigure 2991baa1e78aSTejun Heo * transfer mode. 2992baa1e78aSTejun Heo */ 2993baa1e78aSTejun Heo ehc->i.flags |= ATA_EHI_SETMODE; 2994baa1e78aSTejun Heo 2995c6fd2807SJeff Garzik /* schedule the scsi_rescan_device() here */ 2996ad72cf98STejun Heo schedule_work(&(ap->scsi_rescan_task)); 2997c6fd2807SJeff Garzik } else if (dev->class == ATA_DEV_UNKNOWN && 2998c6fd2807SJeff Garzik ehc->tries[dev->devno] && 2999c6fd2807SJeff Garzik ata_class_enabled(ehc->classes[dev->devno])) { 3000842faa6cSTejun Heo /* Temporarily set dev->class, it will be 3001842faa6cSTejun Heo * permanently set once all configurations are 3002842faa6cSTejun Heo * complete. This is necessary because new 3003842faa6cSTejun Heo * device configuration is done in two 3004842faa6cSTejun Heo * separate loops. 3005842faa6cSTejun Heo */ 3006c6fd2807SJeff Garzik dev->class = ehc->classes[dev->devno]; 3007c6fd2807SJeff Garzik 3008633273a3STejun Heo if (dev->class == ATA_DEV_PMP) 3009633273a3STejun Heo rc = sata_pmp_attach(dev); 3010633273a3STejun Heo else 3011633273a3STejun Heo rc = ata_dev_read_id(dev, &dev->class, 3012633273a3STejun Heo readid_flags, dev->id); 3013842faa6cSTejun Heo 3014842faa6cSTejun Heo /* read_id might have changed class, store and reset */ 3015842faa6cSTejun Heo ehc->classes[dev->devno] = dev->class; 3016842faa6cSTejun Heo dev->class = ATA_DEV_UNKNOWN; 3017842faa6cSTejun Heo 30188c3c52a8STejun Heo switch (rc) { 30198c3c52a8STejun Heo case 0: 302099cf610aSTejun Heo /* clear error info accumulated during probe */ 302199cf610aSTejun Heo ata_ering_clear(&dev->ering); 3022f58229f8STejun Heo new_mask |= 1 << dev->devno; 30238c3c52a8STejun Heo break; 30248c3c52a8STejun Heo case -ENOENT: 302555a8e2c8STejun Heo /* IDENTIFY was issued to non-existent 302655a8e2c8STejun Heo * device. No need to reset. Just 3027842faa6cSTejun Heo * thaw and ignore the device. 302855a8e2c8STejun Heo */ 302955a8e2c8STejun Heo ata_eh_thaw_port(ap); 3030c6fd2807SJeff Garzik break; 30318c3c52a8STejun Heo default: 30328c3c52a8STejun Heo goto err; 30338c3c52a8STejun Heo } 30348c3c52a8STejun Heo } 3035c6fd2807SJeff Garzik } 3036c6fd2807SJeff Garzik 3037c1c4e8d5STejun Heo /* PDIAG- should have been released, ask cable type if post-reset */ 303833267325STejun Heo if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) { 303933267325STejun Heo if (ap->ops->cable_detect) 3040c1c4e8d5STejun Heo ap->cbl = ap->ops->cable_detect(ap); 304133267325STejun Heo ata_force_cbl(ap); 304233267325STejun Heo } 3043c1c4e8d5STejun Heo 30448c3c52a8STejun Heo /* Configure new devices forward such that user doesn't see 30458c3c52a8STejun Heo * device detection messages backwards. 30468c3c52a8STejun Heo */ 30471eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 30484f7c2874STejun Heo if (!(new_mask & (1 << dev->devno))) 30498c3c52a8STejun Heo continue; 30508c3c52a8STejun Heo 3051842faa6cSTejun Heo dev->class = ehc->classes[dev->devno]; 3052842faa6cSTejun Heo 30534f7c2874STejun Heo if (dev->class == ATA_DEV_PMP) 30544f7c2874STejun Heo continue; 30554f7c2874STejun Heo 30568c3c52a8STejun Heo ehc->i.flags |= ATA_EHI_PRINTINFO; 30578c3c52a8STejun Heo rc = ata_dev_configure(dev); 30588c3c52a8STejun Heo ehc->i.flags &= ~ATA_EHI_PRINTINFO; 3059842faa6cSTejun Heo if (rc) { 3060842faa6cSTejun Heo dev->class = ATA_DEV_UNKNOWN; 30618c3c52a8STejun Heo goto err; 3062842faa6cSTejun Heo } 30638c3c52a8STejun Heo 3064c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 3065c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 3066c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3067baa1e78aSTejun Heo 306855a8e2c8STejun Heo /* new device discovered, configure xfermode */ 3069baa1e78aSTejun Heo ehc->i.flags |= ATA_EHI_SETMODE; 3070c6fd2807SJeff Garzik } 3071c6fd2807SJeff Garzik 30728c3c52a8STejun Heo return 0; 30738c3c52a8STejun Heo 30748c3c52a8STejun Heo err: 3075c6fd2807SJeff Garzik *r_failed_dev = dev; 30768c3c52a8STejun Heo DPRINTK("EXIT rc=%d\n", rc); 3077c6fd2807SJeff Garzik return rc; 3078c6fd2807SJeff Garzik } 3079c6fd2807SJeff Garzik 30806f1d1e3aSTejun Heo /** 30816f1d1e3aSTejun Heo * ata_set_mode - Program timings and issue SET FEATURES - XFER 30826f1d1e3aSTejun Heo * @link: link on which timings will be programmed 308398a1708dSMartin Olsson * @r_failed_dev: out parameter for failed device 30846f1d1e3aSTejun Heo * 30856f1d1e3aSTejun Heo * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If 30866f1d1e3aSTejun Heo * ata_set_mode() fails, pointer to the failing device is 30876f1d1e3aSTejun Heo * returned in @r_failed_dev. 30886f1d1e3aSTejun Heo * 30896f1d1e3aSTejun Heo * LOCKING: 30906f1d1e3aSTejun Heo * PCI/etc. bus probe sem. 30916f1d1e3aSTejun Heo * 30926f1d1e3aSTejun Heo * RETURNS: 30936f1d1e3aSTejun Heo * 0 on success, negative errno otherwise 30946f1d1e3aSTejun Heo */ 30956f1d1e3aSTejun Heo int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) 30966f1d1e3aSTejun Heo { 30976f1d1e3aSTejun Heo struct ata_port *ap = link->ap; 309800115e0fSTejun Heo struct ata_device *dev; 309900115e0fSTejun Heo int rc; 31006f1d1e3aSTejun Heo 310176326ac1STejun Heo /* if data transfer is verified, clear DUBIOUS_XFER on ering top */ 31021eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) { 310376326ac1STejun Heo if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) { 310476326ac1STejun Heo struct ata_ering_entry *ent; 310576326ac1STejun Heo 310676326ac1STejun Heo ent = ata_ering_top(&dev->ering); 310776326ac1STejun Heo if (ent) 310876326ac1STejun Heo ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER; 310976326ac1STejun Heo } 311076326ac1STejun Heo } 311176326ac1STejun Heo 31126f1d1e3aSTejun Heo /* has private set_mode? */ 31136f1d1e3aSTejun Heo if (ap->ops->set_mode) 311400115e0fSTejun Heo rc = ap->ops->set_mode(link, r_failed_dev); 311500115e0fSTejun Heo else 311600115e0fSTejun Heo rc = ata_do_set_mode(link, r_failed_dev); 311700115e0fSTejun Heo 311800115e0fSTejun Heo /* if transfer mode has changed, set DUBIOUS_XFER on device */ 31191eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) { 312000115e0fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 312100115e0fSTejun Heo u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno]; 312200115e0fSTejun Heo u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno)); 312300115e0fSTejun Heo 312400115e0fSTejun Heo if (dev->xfer_mode != saved_xfer_mode || 312500115e0fSTejun Heo ata_ncq_enabled(dev) != saved_ncq) 312600115e0fSTejun Heo dev->flags |= ATA_DFLAG_DUBIOUS_XFER; 312700115e0fSTejun Heo } 312800115e0fSTejun Heo 312900115e0fSTejun Heo return rc; 31306f1d1e3aSTejun Heo } 31316f1d1e3aSTejun Heo 313211fc33daSTejun Heo /** 313311fc33daSTejun Heo * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset 313411fc33daSTejun Heo * @dev: ATAPI device to clear UA for 313511fc33daSTejun Heo * 313611fc33daSTejun Heo * Resets and other operations can make an ATAPI device raise 313711fc33daSTejun Heo * UNIT ATTENTION which causes the next operation to fail. This 313811fc33daSTejun Heo * function clears UA. 313911fc33daSTejun Heo * 314011fc33daSTejun Heo * LOCKING: 314111fc33daSTejun Heo * EH context (may sleep). 314211fc33daSTejun Heo * 314311fc33daSTejun Heo * RETURNS: 314411fc33daSTejun Heo * 0 on success, -errno on failure. 314511fc33daSTejun Heo */ 314611fc33daSTejun Heo static int atapi_eh_clear_ua(struct ata_device *dev) 314711fc33daSTejun Heo { 314811fc33daSTejun Heo int i; 314911fc33daSTejun Heo 315011fc33daSTejun Heo for (i = 0; i < ATA_EH_UA_TRIES; i++) { 3151b5357081STejun Heo u8 *sense_buffer = dev->link->ap->sector_buf; 315211fc33daSTejun Heo u8 sense_key = 0; 315311fc33daSTejun Heo unsigned int err_mask; 315411fc33daSTejun Heo 315511fc33daSTejun Heo err_mask = atapi_eh_tur(dev, &sense_key); 315611fc33daSTejun Heo if (err_mask != 0 && err_mask != AC_ERR_DEV) { 315711fc33daSTejun Heo ata_dev_printk(dev, KERN_WARNING, "TEST_UNIT_READY " 315811fc33daSTejun Heo "failed (err_mask=0x%x)\n", err_mask); 315911fc33daSTejun Heo return -EIO; 316011fc33daSTejun Heo } 316111fc33daSTejun Heo 316211fc33daSTejun Heo if (!err_mask || sense_key != UNIT_ATTENTION) 316311fc33daSTejun Heo return 0; 316411fc33daSTejun Heo 316511fc33daSTejun Heo err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key); 316611fc33daSTejun Heo if (err_mask) { 316711fc33daSTejun Heo ata_dev_printk(dev, KERN_WARNING, "failed to clear " 316811fc33daSTejun Heo "UNIT ATTENTION (err_mask=0x%x)\n", err_mask); 316911fc33daSTejun Heo return -EIO; 317011fc33daSTejun Heo } 317111fc33daSTejun Heo } 317211fc33daSTejun Heo 317311fc33daSTejun Heo ata_dev_printk(dev, KERN_WARNING, 317411fc33daSTejun Heo "UNIT ATTENTION persists after %d tries\n", ATA_EH_UA_TRIES); 317511fc33daSTejun Heo 317611fc33daSTejun Heo return 0; 317711fc33daSTejun Heo } 317811fc33daSTejun Heo 31796013efd8STejun Heo /** 31806013efd8STejun Heo * ata_eh_maybe_retry_flush - Retry FLUSH if necessary 31816013efd8STejun Heo * @dev: ATA device which may need FLUSH retry 31826013efd8STejun Heo * 31836013efd8STejun Heo * If @dev failed FLUSH, it needs to be reported upper layer 31846013efd8STejun Heo * immediately as it means that @dev failed to remap and already 31856013efd8STejun Heo * lost at least a sector and further FLUSH retrials won't make 31866013efd8STejun Heo * any difference to the lost sector. However, if FLUSH failed 31876013efd8STejun Heo * for other reasons, for example transmission error, FLUSH needs 31886013efd8STejun Heo * to be retried. 31896013efd8STejun Heo * 31906013efd8STejun Heo * This function determines whether FLUSH failure retry is 31916013efd8STejun Heo * necessary and performs it if so. 31926013efd8STejun Heo * 31936013efd8STejun Heo * RETURNS: 31946013efd8STejun Heo * 0 if EH can continue, -errno if EH needs to be repeated. 31956013efd8STejun Heo */ 31966013efd8STejun Heo static int ata_eh_maybe_retry_flush(struct ata_device *dev) 31976013efd8STejun Heo { 31986013efd8STejun Heo struct ata_link *link = dev->link; 31996013efd8STejun Heo struct ata_port *ap = link->ap; 32006013efd8STejun Heo struct ata_queued_cmd *qc; 32016013efd8STejun Heo struct ata_taskfile tf; 32026013efd8STejun Heo unsigned int err_mask; 32036013efd8STejun Heo int rc = 0; 32046013efd8STejun Heo 32056013efd8STejun Heo /* did flush fail for this device? */ 32066013efd8STejun Heo if (!ata_tag_valid(link->active_tag)) 32076013efd8STejun Heo return 0; 32086013efd8STejun Heo 32096013efd8STejun Heo qc = __ata_qc_from_tag(ap, link->active_tag); 32106013efd8STejun Heo if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT && 32116013efd8STejun Heo qc->tf.command != ATA_CMD_FLUSH)) 32126013efd8STejun Heo return 0; 32136013efd8STejun Heo 32146013efd8STejun Heo /* if the device failed it, it should be reported to upper layers */ 32156013efd8STejun Heo if (qc->err_mask & AC_ERR_DEV) 32166013efd8STejun Heo return 0; 32176013efd8STejun Heo 32186013efd8STejun Heo /* flush failed for some other reason, give it another shot */ 32196013efd8STejun Heo ata_tf_init(dev, &tf); 32206013efd8STejun Heo 32216013efd8STejun Heo tf.command = qc->tf.command; 32226013efd8STejun Heo tf.flags |= ATA_TFLAG_DEVICE; 32236013efd8STejun Heo tf.protocol = ATA_PROT_NODATA; 32246013efd8STejun Heo 32256013efd8STejun Heo ata_dev_printk(dev, KERN_WARNING, "retrying FLUSH 0x%x Emask 0x%x\n", 32266013efd8STejun Heo tf.command, qc->err_mask); 32276013efd8STejun Heo 32286013efd8STejun Heo err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 32296013efd8STejun Heo if (!err_mask) { 32306013efd8STejun Heo /* 32316013efd8STejun Heo * FLUSH is complete but there's no way to 32326013efd8STejun Heo * successfully complete a failed command from EH. 32336013efd8STejun Heo * Making sure retry is allowed at least once and 32346013efd8STejun Heo * retrying it should do the trick - whatever was in 32356013efd8STejun Heo * the cache is already on the platter and this won't 32366013efd8STejun Heo * cause infinite loop. 32376013efd8STejun Heo */ 32386013efd8STejun Heo qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1); 32396013efd8STejun Heo } else { 32406013efd8STejun Heo ata_dev_printk(dev, KERN_WARNING, "FLUSH failed Emask 0x%x\n", 32416013efd8STejun Heo err_mask); 32426013efd8STejun Heo rc = -EIO; 32436013efd8STejun Heo 32446013efd8STejun Heo /* if device failed it, report it to upper layers */ 32456013efd8STejun Heo if (err_mask & AC_ERR_DEV) { 32466013efd8STejun Heo qc->err_mask |= AC_ERR_DEV; 32476013efd8STejun Heo qc->result_tf = tf; 32486013efd8STejun Heo if (!(ap->pflags & ATA_PFLAG_FROZEN)) 32496013efd8STejun Heo rc = 0; 32506013efd8STejun Heo } 32516013efd8STejun Heo } 32526013efd8STejun Heo return rc; 32536013efd8STejun Heo } 32546013efd8STejun Heo 32556b7ae954STejun Heo /** 32566b7ae954STejun Heo * ata_eh_set_lpm - configure SATA interface power management 32576b7ae954STejun Heo * @link: link to configure power management 32586b7ae954STejun Heo * @policy: the link power management policy 32596b7ae954STejun Heo * @r_failed_dev: out parameter for failed device 32606b7ae954STejun Heo * 32616b7ae954STejun Heo * Enable SATA Interface power management. This will enable 32626b7ae954STejun Heo * Device Interface Power Management (DIPM) for min_power 32636b7ae954STejun Heo * policy, and then call driver specific callbacks for 32646b7ae954STejun Heo * enabling Host Initiated Power management. 32656b7ae954STejun Heo * 32666b7ae954STejun Heo * LOCKING: 32676b7ae954STejun Heo * EH context. 32686b7ae954STejun Heo * 32696b7ae954STejun Heo * RETURNS: 32706b7ae954STejun Heo * 0 on success, -errno on failure. 32716b7ae954STejun Heo */ 32726b7ae954STejun Heo static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, 32736b7ae954STejun Heo struct ata_device **r_failed_dev) 32746b7ae954STejun Heo { 32756c8ea89cSTejun Heo struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL; 32766b7ae954STejun Heo struct ata_eh_context *ehc = &link->eh_context; 32776b7ae954STejun Heo struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL; 32786b7ae954STejun Heo unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM; 32796b7ae954STejun Heo unsigned int err_mask; 32806b7ae954STejun Heo int rc; 32816b7ae954STejun Heo 32826b7ae954STejun Heo /* if the link or host doesn't do LPM, noop */ 32836b7ae954STejun Heo if ((link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm)) 32846b7ae954STejun Heo return 0; 32856b7ae954STejun Heo 32866b7ae954STejun Heo /* 32876b7ae954STejun Heo * DIPM is enabled only for MIN_POWER as some devices 32886b7ae954STejun Heo * misbehave when the host NACKs transition to SLUMBER. Order 32896b7ae954STejun Heo * device and link configurations such that the host always 32906b7ae954STejun Heo * allows DIPM requests. 32916b7ae954STejun Heo */ 32926b7ae954STejun Heo ata_for_each_dev(dev, link, ENABLED) { 32936b7ae954STejun Heo bool hipm = ata_id_has_hipm(dev->id); 32946b7ae954STejun Heo bool dipm = ata_id_has_dipm(dev->id); 32956b7ae954STejun Heo 32966b7ae954STejun Heo /* find the first enabled and LPM enabled devices */ 32976b7ae954STejun Heo if (!link_dev) 32986b7ae954STejun Heo link_dev = dev; 32996b7ae954STejun Heo 33006b7ae954STejun Heo if (!lpm_dev && (hipm || dipm)) 33016b7ae954STejun Heo lpm_dev = dev; 33026b7ae954STejun Heo 33036b7ae954STejun Heo hints &= ~ATA_LPM_EMPTY; 33046b7ae954STejun Heo if (!hipm) 33056b7ae954STejun Heo hints &= ~ATA_LPM_HIPM; 33066b7ae954STejun Heo 33076b7ae954STejun Heo /* disable DIPM before changing link config */ 33086b7ae954STejun Heo if (policy != ATA_LPM_MIN_POWER && dipm) { 33096b7ae954STejun Heo err_mask = ata_dev_set_feature(dev, 33106b7ae954STejun Heo SETFEATURES_SATA_DISABLE, SATA_DIPM); 33116b7ae954STejun Heo if (err_mask && err_mask != AC_ERR_DEV) { 33126b7ae954STejun Heo ata_dev_printk(dev, KERN_WARNING, 33136b7ae954STejun Heo "failed to disable DIPM, Emask 0x%x\n", 33146b7ae954STejun Heo err_mask); 33156b7ae954STejun Heo rc = -EIO; 33166b7ae954STejun Heo goto fail; 33176b7ae954STejun Heo } 33186b7ae954STejun Heo } 33196b7ae954STejun Heo } 33206b7ae954STejun Heo 33216c8ea89cSTejun Heo if (ap) { 33226b7ae954STejun Heo rc = ap->ops->set_lpm(link, policy, hints); 33236b7ae954STejun Heo if (!rc && ap->slave_link) 33246b7ae954STejun Heo rc = ap->ops->set_lpm(ap->slave_link, policy, hints); 33256c8ea89cSTejun Heo } else 33266c8ea89cSTejun Heo rc = sata_pmp_set_lpm(link, policy, hints); 33276b7ae954STejun Heo 33286b7ae954STejun Heo /* 33296b7ae954STejun Heo * Attribute link config failure to the first (LPM) enabled 33306b7ae954STejun Heo * device on the link. 33316b7ae954STejun Heo */ 33326b7ae954STejun Heo if (rc) { 33336b7ae954STejun Heo if (rc == -EOPNOTSUPP) { 33346b7ae954STejun Heo link->flags |= ATA_LFLAG_NO_LPM; 33356b7ae954STejun Heo return 0; 33366b7ae954STejun Heo } 33376b7ae954STejun Heo dev = lpm_dev ? lpm_dev : link_dev; 33386b7ae954STejun Heo goto fail; 33396b7ae954STejun Heo } 33406b7ae954STejun Heo 33416b7ae954STejun Heo /* host config updated, enable DIPM if transitioning to MIN_POWER */ 33426b7ae954STejun Heo ata_for_each_dev(dev, link, ENABLED) { 33436b7ae954STejun Heo if (policy == ATA_LPM_MIN_POWER && ata_id_has_dipm(dev->id)) { 33446b7ae954STejun Heo err_mask = ata_dev_set_feature(dev, 33456b7ae954STejun Heo SETFEATURES_SATA_ENABLE, SATA_DIPM); 33466b7ae954STejun Heo if (err_mask && err_mask != AC_ERR_DEV) { 33476b7ae954STejun Heo ata_dev_printk(dev, KERN_WARNING, 33486b7ae954STejun Heo "failed to enable DIPM, Emask 0x%x\n", 33496b7ae954STejun Heo err_mask); 33506b7ae954STejun Heo rc = -EIO; 33516b7ae954STejun Heo goto fail; 33526b7ae954STejun Heo } 33536b7ae954STejun Heo } 33546b7ae954STejun Heo } 33556b7ae954STejun Heo 33566b7ae954STejun Heo link->lpm_policy = policy; 33576b7ae954STejun Heo if (ap && ap->slave_link) 33586b7ae954STejun Heo ap->slave_link->lpm_policy = policy; 33596b7ae954STejun Heo return 0; 33606b7ae954STejun Heo 33616b7ae954STejun Heo fail: 33626b7ae954STejun Heo /* if no device or only one more chance is left, disable LPM */ 33636b7ae954STejun Heo if (!dev || ehc->tries[dev->devno] <= 2) { 33646b7ae954STejun Heo ata_link_printk(link, KERN_WARNING, 33656b7ae954STejun Heo "disabling LPM on the link\n"); 33666b7ae954STejun Heo link->flags |= ATA_LFLAG_NO_LPM; 33676b7ae954STejun Heo } 33686b7ae954STejun Heo if (r_failed_dev) 33696b7ae954STejun Heo *r_failed_dev = dev; 33706b7ae954STejun Heo return rc; 33716b7ae954STejun Heo } 33726b7ae954STejun Heo 33730260731fSTejun Heo static int ata_link_nr_enabled(struct ata_link *link) 3374c6fd2807SJeff Garzik { 3375f58229f8STejun Heo struct ata_device *dev; 3376f58229f8STejun Heo int cnt = 0; 3377c6fd2807SJeff Garzik 33781eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) 3379c6fd2807SJeff Garzik cnt++; 3380c6fd2807SJeff Garzik return cnt; 3381c6fd2807SJeff Garzik } 3382c6fd2807SJeff Garzik 33830260731fSTejun Heo static int ata_link_nr_vacant(struct ata_link *link) 3384c6fd2807SJeff Garzik { 3385f58229f8STejun Heo struct ata_device *dev; 3386f58229f8STejun Heo int cnt = 0; 3387c6fd2807SJeff Garzik 33881eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 3389f58229f8STejun Heo if (dev->class == ATA_DEV_UNKNOWN) 3390c6fd2807SJeff Garzik cnt++; 3391c6fd2807SJeff Garzik return cnt; 3392c6fd2807SJeff Garzik } 3393c6fd2807SJeff Garzik 33940260731fSTejun Heo static int ata_eh_skip_recovery(struct ata_link *link) 3395c6fd2807SJeff Garzik { 3396672b2d65STejun Heo struct ata_port *ap = link->ap; 33970260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 3398f58229f8STejun Heo struct ata_device *dev; 3399c6fd2807SJeff Garzik 3400f9df58cbSTejun Heo /* skip disabled links */ 3401f9df58cbSTejun Heo if (link->flags & ATA_LFLAG_DISABLED) 3402f9df58cbSTejun Heo return 1; 3403f9df58cbSTejun Heo 3404e2f3d75fSTejun Heo /* skip if explicitly requested */ 3405e2f3d75fSTejun Heo if (ehc->i.flags & ATA_EHI_NO_RECOVERY) 3406e2f3d75fSTejun Heo return 1; 3407e2f3d75fSTejun Heo 3408672b2d65STejun Heo /* thaw frozen port and recover failed devices */ 3409672b2d65STejun Heo if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link)) 3410672b2d65STejun Heo return 0; 3411672b2d65STejun Heo 3412672b2d65STejun Heo /* reset at least once if reset is requested */ 3413672b2d65STejun Heo if ((ehc->i.action & ATA_EH_RESET) && 3414672b2d65STejun Heo !(ehc->i.flags & ATA_EHI_DID_RESET)) 3415c6fd2807SJeff Garzik return 0; 3416c6fd2807SJeff Garzik 3417c6fd2807SJeff Garzik /* skip if class codes for all vacant slots are ATA_DEV_NONE */ 34181eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 3419c6fd2807SJeff Garzik if (dev->class == ATA_DEV_UNKNOWN && 3420c6fd2807SJeff Garzik ehc->classes[dev->devno] != ATA_DEV_NONE) 3421c6fd2807SJeff Garzik return 0; 3422c6fd2807SJeff Garzik } 3423c6fd2807SJeff Garzik 3424c6fd2807SJeff Garzik return 1; 3425c6fd2807SJeff Garzik } 3426c6fd2807SJeff Garzik 3427c2c7a89cSTejun Heo static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg) 3428c2c7a89cSTejun Heo { 3429c2c7a89cSTejun Heo u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL); 3430c2c7a89cSTejun Heo u64 now = get_jiffies_64(); 3431c2c7a89cSTejun Heo int *trials = void_arg; 3432c2c7a89cSTejun Heo 3433c2c7a89cSTejun Heo if (ent->timestamp < now - min(now, interval)) 3434c2c7a89cSTejun Heo return -1; 3435c2c7a89cSTejun Heo 3436c2c7a89cSTejun Heo (*trials)++; 3437c2c7a89cSTejun Heo return 0; 3438c2c7a89cSTejun Heo } 3439c2c7a89cSTejun Heo 344002c05a27STejun Heo static int ata_eh_schedule_probe(struct ata_device *dev) 344102c05a27STejun Heo { 344202c05a27STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 3443c2c7a89cSTejun Heo struct ata_link *link = ata_dev_phys_link(dev); 3444c2c7a89cSTejun Heo int trials = 0; 344502c05a27STejun Heo 344602c05a27STejun Heo if (!(ehc->i.probe_mask & (1 << dev->devno)) || 344702c05a27STejun Heo (ehc->did_probe_mask & (1 << dev->devno))) 344802c05a27STejun Heo return 0; 344902c05a27STejun Heo 345002c05a27STejun Heo ata_eh_detach_dev(dev); 345102c05a27STejun Heo ata_dev_init(dev); 345202c05a27STejun Heo ehc->did_probe_mask |= (1 << dev->devno); 3453cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 345400115e0fSTejun Heo ehc->saved_xfer_mode[dev->devno] = 0; 345500115e0fSTejun Heo ehc->saved_ncq_enabled &= ~(1 << dev->devno); 345602c05a27STejun Heo 34576b7ae954STejun Heo /* the link maybe in a deep sleep, wake it up */ 34586c8ea89cSTejun Heo if (link->lpm_policy > ATA_LPM_MAX_POWER) { 34596c8ea89cSTejun Heo if (ata_is_host_link(link)) 34606c8ea89cSTejun Heo link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER, 34616c8ea89cSTejun Heo ATA_LPM_EMPTY); 34626c8ea89cSTejun Heo else 34636c8ea89cSTejun Heo sata_pmp_set_lpm(link, ATA_LPM_MAX_POWER, 34646c8ea89cSTejun Heo ATA_LPM_EMPTY); 34656c8ea89cSTejun Heo } 34666b7ae954STejun Heo 3467c2c7a89cSTejun Heo /* Record and count probe trials on the ering. The specific 3468c2c7a89cSTejun Heo * error mask used is irrelevant. Because a successful device 3469c2c7a89cSTejun Heo * detection clears the ering, this count accumulates only if 3470c2c7a89cSTejun Heo * there are consecutive failed probes. 3471c2c7a89cSTejun Heo * 3472c2c7a89cSTejun Heo * If the count is equal to or higher than ATA_EH_PROBE_TRIALS 3473c2c7a89cSTejun Heo * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is 3474c2c7a89cSTejun Heo * forced to 1.5Gbps. 3475c2c7a89cSTejun Heo * 3476c2c7a89cSTejun Heo * This is to work around cases where failed link speed 3477c2c7a89cSTejun Heo * negotiation results in device misdetection leading to 3478c2c7a89cSTejun Heo * infinite DEVXCHG or PHRDY CHG events. 3479c2c7a89cSTejun Heo */ 3480c2c7a89cSTejun Heo ata_ering_record(&dev->ering, 0, AC_ERR_OTHER); 3481c2c7a89cSTejun Heo ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials); 3482c2c7a89cSTejun Heo 3483c2c7a89cSTejun Heo if (trials > ATA_EH_PROBE_TRIALS) 3484c2c7a89cSTejun Heo sata_down_spd_limit(link, 1); 3485c2c7a89cSTejun Heo 348602c05a27STejun Heo return 1; 348702c05a27STejun Heo } 348802c05a27STejun Heo 34899b1e2658STejun Heo static int ata_eh_handle_dev_fail(struct ata_device *dev, int err) 3490fee7ca72STejun Heo { 34919af5c9c9STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 3492fee7ca72STejun Heo 3493cf9a590aSTejun Heo /* -EAGAIN from EH routine indicates retry without prejudice. 3494cf9a590aSTejun Heo * The requester is responsible for ensuring forward progress. 3495cf9a590aSTejun Heo */ 3496cf9a590aSTejun Heo if (err != -EAGAIN) 3497fee7ca72STejun Heo ehc->tries[dev->devno]--; 3498fee7ca72STejun Heo 3499fee7ca72STejun Heo switch (err) { 3500fee7ca72STejun Heo case -ENODEV: 3501fee7ca72STejun Heo /* device missing or wrong IDENTIFY data, schedule probing */ 3502fee7ca72STejun Heo ehc->i.probe_mask |= (1 << dev->devno); 3503fee7ca72STejun Heo case -EINVAL: 3504fee7ca72STejun Heo /* give it just one more chance */ 3505fee7ca72STejun Heo ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1); 3506fee7ca72STejun Heo case -EIO: 3507d89293abSTejun Heo if (ehc->tries[dev->devno] == 1) { 3508fee7ca72STejun Heo /* This is the last chance, better to slow 3509fee7ca72STejun Heo * down than lose it. 3510fee7ca72STejun Heo */ 3511a07d499bSTejun Heo sata_down_spd_limit(ata_dev_phys_link(dev), 0); 3512d89293abSTejun Heo if (dev->pio_mode > XFER_PIO_0) 3513fee7ca72STejun Heo ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); 3514fee7ca72STejun Heo } 3515fee7ca72STejun Heo } 3516fee7ca72STejun Heo 3517fee7ca72STejun Heo if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) { 3518fee7ca72STejun Heo /* disable device if it has used up all its chances */ 3519fee7ca72STejun Heo ata_dev_disable(dev); 3520fee7ca72STejun Heo 3521fee7ca72STejun Heo /* detach if offline */ 3522b1c72916STejun Heo if (ata_phys_link_offline(ata_dev_phys_link(dev))) 3523fee7ca72STejun Heo ata_eh_detach_dev(dev); 3524fee7ca72STejun Heo 352502c05a27STejun Heo /* schedule probe if necessary */ 352687fbc5a0STejun Heo if (ata_eh_schedule_probe(dev)) { 3527fee7ca72STejun Heo ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 352887fbc5a0STejun Heo memset(ehc->cmd_timeout_idx[dev->devno], 0, 352987fbc5a0STejun Heo sizeof(ehc->cmd_timeout_idx[dev->devno])); 353087fbc5a0STejun Heo } 35319b1e2658STejun Heo 35329b1e2658STejun Heo return 1; 3533fee7ca72STejun Heo } else { 3534cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 35359b1e2658STejun Heo return 0; 3536fee7ca72STejun Heo } 3537fee7ca72STejun Heo } 3538fee7ca72STejun Heo 3539c6fd2807SJeff Garzik /** 3540c6fd2807SJeff Garzik * ata_eh_recover - recover host port after error 3541c6fd2807SJeff Garzik * @ap: host port to recover 3542c6fd2807SJeff Garzik * @prereset: prereset method (can be NULL) 3543c6fd2807SJeff Garzik * @softreset: softreset method (can be NULL) 3544c6fd2807SJeff Garzik * @hardreset: hardreset method (can be NULL) 3545c6fd2807SJeff Garzik * @postreset: postreset method (can be NULL) 35469b1e2658STejun Heo * @r_failed_link: out parameter for failed link 3547c6fd2807SJeff Garzik * 3548c6fd2807SJeff Garzik * This is the alpha and omega, eum and yang, heart and soul of 3549c6fd2807SJeff Garzik * libata exception handling. On entry, actions required to 35509b1e2658STejun Heo * recover each link and hotplug requests are recorded in the 35519b1e2658STejun Heo * link's eh_context. This function executes all the operations 35529b1e2658STejun Heo * with appropriate retrials and fallbacks to resurrect failed 3553c6fd2807SJeff Garzik * devices, detach goners and greet newcomers. 3554c6fd2807SJeff Garzik * 3555c6fd2807SJeff Garzik * LOCKING: 3556c6fd2807SJeff Garzik * Kernel thread context (may sleep). 3557c6fd2807SJeff Garzik * 3558c6fd2807SJeff Garzik * RETURNS: 3559c6fd2807SJeff Garzik * 0 on success, -errno on failure. 3560c6fd2807SJeff Garzik */ 3561fb7fd614STejun Heo int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, 3562c6fd2807SJeff Garzik ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 35639b1e2658STejun Heo ata_postreset_fn_t postreset, 35649b1e2658STejun Heo struct ata_link **r_failed_link) 3565c6fd2807SJeff Garzik { 35669b1e2658STejun Heo struct ata_link *link; 3567c6fd2807SJeff Garzik struct ata_device *dev; 35686b7ae954STejun Heo int rc, nr_fails; 356945fabbb7SElias Oltmanns unsigned long flags, deadline; 3570c6fd2807SJeff Garzik 3571c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 3572c6fd2807SJeff Garzik 3573c6fd2807SJeff Garzik /* prep for recovery */ 35741eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 35759b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 35769b1e2658STejun Heo 3577f9df58cbSTejun Heo /* re-enable link? */ 3578f9df58cbSTejun Heo if (ehc->i.action & ATA_EH_ENABLE_LINK) { 3579f9df58cbSTejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK); 3580f9df58cbSTejun Heo spin_lock_irqsave(ap->lock, flags); 3581f9df58cbSTejun Heo link->flags &= ~ATA_LFLAG_DISABLED; 3582f9df58cbSTejun Heo spin_unlock_irqrestore(ap->lock, flags); 3583f9df58cbSTejun Heo ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK); 3584f9df58cbSTejun Heo } 3585f9df58cbSTejun Heo 35861eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 3587fd995f70STejun Heo if (link->flags & ATA_LFLAG_NO_RETRY) 3588fd995f70STejun Heo ehc->tries[dev->devno] = 1; 3589fd995f70STejun Heo else 3590c6fd2807SJeff Garzik ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 3591c6fd2807SJeff Garzik 359279a55b72STejun Heo /* collect port action mask recorded in dev actions */ 35939b1e2658STejun Heo ehc->i.action |= ehc->i.dev_action[dev->devno] & 35949b1e2658STejun Heo ~ATA_EH_PERDEV_MASK; 3595f58229f8STejun Heo ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK; 359679a55b72STejun Heo 3597c6fd2807SJeff Garzik /* process hotplug request */ 3598c6fd2807SJeff Garzik if (dev->flags & ATA_DFLAG_DETACH) 3599c6fd2807SJeff Garzik ata_eh_detach_dev(dev); 3600c6fd2807SJeff Garzik 360102c05a27STejun Heo /* schedule probe if necessary */ 360202c05a27STejun Heo if (!ata_dev_enabled(dev)) 360302c05a27STejun Heo ata_eh_schedule_probe(dev); 3604c6fd2807SJeff Garzik } 36059b1e2658STejun Heo } 3606c6fd2807SJeff Garzik 3607c6fd2807SJeff Garzik retry: 3608c6fd2807SJeff Garzik rc = 0; 3609c6fd2807SJeff Garzik 3610c6fd2807SJeff Garzik /* if UNLOADING, finish immediately */ 3611c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_UNLOADING) 3612c6fd2807SJeff Garzik goto out; 3613c6fd2807SJeff Garzik 36149b1e2658STejun Heo /* prep for EH */ 36151eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 36169b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 36179b1e2658STejun Heo 3618c6fd2807SJeff Garzik /* skip EH if possible. */ 36190260731fSTejun Heo if (ata_eh_skip_recovery(link)) 3620c6fd2807SJeff Garzik ehc->i.action = 0; 3621c6fd2807SJeff Garzik 36221eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 3623f58229f8STejun Heo ehc->classes[dev->devno] = ATA_DEV_UNKNOWN; 36249b1e2658STejun Heo } 3625c6fd2807SJeff Garzik 3626c6fd2807SJeff Garzik /* reset */ 36271eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 36289b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 36299b1e2658STejun Heo 3630cf480626STejun Heo if (!(ehc->i.action & ATA_EH_RESET)) 36319b1e2658STejun Heo continue; 36329b1e2658STejun Heo 36339b1e2658STejun Heo rc = ata_eh_reset(link, ata_link_nr_vacant(link), 3634dc98c32cSTejun Heo prereset, softreset, hardreset, postreset); 3635c6fd2807SJeff Garzik if (rc) { 36360260731fSTejun Heo ata_link_printk(link, KERN_ERR, 3637c6fd2807SJeff Garzik "reset failed, giving up\n"); 3638c6fd2807SJeff Garzik goto out; 3639c6fd2807SJeff Garzik } 36409b1e2658STejun Heo } 3641c6fd2807SJeff Garzik 364245fabbb7SElias Oltmanns do { 364345fabbb7SElias Oltmanns unsigned long now; 364445fabbb7SElias Oltmanns 364545fabbb7SElias Oltmanns /* 364645fabbb7SElias Oltmanns * clears ATA_EH_PARK in eh_info and resets 364745fabbb7SElias Oltmanns * ap->park_req_pending 364845fabbb7SElias Oltmanns */ 364945fabbb7SElias Oltmanns ata_eh_pull_park_action(ap); 365045fabbb7SElias Oltmanns 365145fabbb7SElias Oltmanns deadline = jiffies; 36521eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 36531eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 365445fabbb7SElias Oltmanns struct ata_eh_context *ehc = &link->eh_context; 365545fabbb7SElias Oltmanns unsigned long tmp; 365645fabbb7SElias Oltmanns 365745fabbb7SElias Oltmanns if (dev->class != ATA_DEV_ATA) 365845fabbb7SElias Oltmanns continue; 365945fabbb7SElias Oltmanns if (!(ehc->i.dev_action[dev->devno] & 366045fabbb7SElias Oltmanns ATA_EH_PARK)) 366145fabbb7SElias Oltmanns continue; 366245fabbb7SElias Oltmanns tmp = dev->unpark_deadline; 366345fabbb7SElias Oltmanns if (time_before(deadline, tmp)) 366445fabbb7SElias Oltmanns deadline = tmp; 366545fabbb7SElias Oltmanns else if (time_before_eq(tmp, jiffies)) 366645fabbb7SElias Oltmanns continue; 366745fabbb7SElias Oltmanns if (ehc->unloaded_mask & (1 << dev->devno)) 366845fabbb7SElias Oltmanns continue; 366945fabbb7SElias Oltmanns 367045fabbb7SElias Oltmanns ata_eh_park_issue_cmd(dev, 1); 367145fabbb7SElias Oltmanns } 367245fabbb7SElias Oltmanns } 367345fabbb7SElias Oltmanns 367445fabbb7SElias Oltmanns now = jiffies; 367545fabbb7SElias Oltmanns if (time_before_eq(deadline, now)) 367645fabbb7SElias Oltmanns break; 367745fabbb7SElias Oltmanns 3678*c0c362b6STejun Heo ata_eh_release(ap); 367945fabbb7SElias Oltmanns deadline = wait_for_completion_timeout(&ap->park_req_pending, 368045fabbb7SElias Oltmanns deadline - now); 3681*c0c362b6STejun Heo ata_eh_acquire(ap); 368245fabbb7SElias Oltmanns } while (deadline); 36831eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 36841eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 368545fabbb7SElias Oltmanns if (!(link->eh_context.unloaded_mask & 368645fabbb7SElias Oltmanns (1 << dev->devno))) 368745fabbb7SElias Oltmanns continue; 368845fabbb7SElias Oltmanns 368945fabbb7SElias Oltmanns ata_eh_park_issue_cmd(dev, 0); 369045fabbb7SElias Oltmanns ata_eh_done(link, dev, ATA_EH_PARK); 369145fabbb7SElias Oltmanns } 369245fabbb7SElias Oltmanns } 369345fabbb7SElias Oltmanns 36949b1e2658STejun Heo /* the rest */ 36956b7ae954STejun Heo nr_fails = 0; 36966b7ae954STejun Heo ata_for_each_link(link, ap, PMP_FIRST) { 36979b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 36989b1e2658STejun Heo 36996b7ae954STejun Heo if (sata_pmp_attached(ap) && ata_is_host_link(link)) 37006b7ae954STejun Heo goto config_lpm; 37016b7ae954STejun Heo 3702c6fd2807SJeff Garzik /* revalidate existing devices and attach new ones */ 37030260731fSTejun Heo rc = ata_eh_revalidate_and_attach(link, &dev); 3704c6fd2807SJeff Garzik if (rc) 37056b7ae954STejun Heo goto rest_fail; 3706c6fd2807SJeff Garzik 3707633273a3STejun Heo /* if PMP got attached, return, pmp EH will take care of it */ 3708633273a3STejun Heo if (link->device->class == ATA_DEV_PMP) { 3709633273a3STejun Heo ehc->i.action = 0; 3710633273a3STejun Heo return 0; 3711633273a3STejun Heo } 3712633273a3STejun Heo 3713baa1e78aSTejun Heo /* configure transfer mode if necessary */ 3714baa1e78aSTejun Heo if (ehc->i.flags & ATA_EHI_SETMODE) { 37150260731fSTejun Heo rc = ata_set_mode(link, &dev); 37164ae72a1eSTejun Heo if (rc) 37176b7ae954STejun Heo goto rest_fail; 3718baa1e78aSTejun Heo ehc->i.flags &= ~ATA_EHI_SETMODE; 3719c6fd2807SJeff Garzik } 3720c6fd2807SJeff Garzik 372111fc33daSTejun Heo /* If reset has been issued, clear UA to avoid 372211fc33daSTejun Heo * disrupting the current users of the device. 372311fc33daSTejun Heo */ 372411fc33daSTejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) { 37251eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 372611fc33daSTejun Heo if (dev->class != ATA_DEV_ATAPI) 372711fc33daSTejun Heo continue; 372811fc33daSTejun Heo rc = atapi_eh_clear_ua(dev); 372911fc33daSTejun Heo if (rc) 37306b7ae954STejun Heo goto rest_fail; 373111fc33daSTejun Heo } 373211fc33daSTejun Heo } 373311fc33daSTejun Heo 37346013efd8STejun Heo /* retry flush if necessary */ 37356013efd8STejun Heo ata_for_each_dev(dev, link, ALL) { 37366013efd8STejun Heo if (dev->class != ATA_DEV_ATA) 37376013efd8STejun Heo continue; 37386013efd8STejun Heo rc = ata_eh_maybe_retry_flush(dev); 37396013efd8STejun Heo if (rc) 37406b7ae954STejun Heo goto rest_fail; 37416013efd8STejun Heo } 37426013efd8STejun Heo 37436b7ae954STejun Heo config_lpm: 374411fc33daSTejun Heo /* configure link power saving */ 37456b7ae954STejun Heo if (link->lpm_policy != ap->target_lpm_policy) { 37466b7ae954STejun Heo rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev); 37476b7ae954STejun Heo if (rc) 37486b7ae954STejun Heo goto rest_fail; 37496b7ae954STejun Heo } 3750ca77329fSKristen Carlson Accardi 37519b1e2658STejun Heo /* this link is okay now */ 37529b1e2658STejun Heo ehc->i.flags = 0; 37539b1e2658STejun Heo continue; 3754c6fd2807SJeff Garzik 37556b7ae954STejun Heo rest_fail: 37566b7ae954STejun Heo nr_fails++; 37576b7ae954STejun Heo if (dev) 37580a2c0f56STejun Heo ata_eh_handle_dev_fail(dev, rc); 3759c6fd2807SJeff Garzik 3760b06ce3e5STejun Heo if (ap->pflags & ATA_PFLAG_FROZEN) { 3761b06ce3e5STejun Heo /* PMP reset requires working host port. 3762b06ce3e5STejun Heo * Can't retry if it's frozen. 3763b06ce3e5STejun Heo */ 3764071f44b1STejun Heo if (sata_pmp_attached(ap)) 3765b06ce3e5STejun Heo goto out; 37669b1e2658STejun Heo break; 37679b1e2658STejun Heo } 3768b06ce3e5STejun Heo } 37699b1e2658STejun Heo 37706b7ae954STejun Heo if (nr_fails) 3771c6fd2807SJeff Garzik goto retry; 3772c6fd2807SJeff Garzik 3773c6fd2807SJeff Garzik out: 37749b1e2658STejun Heo if (rc && r_failed_link) 37759b1e2658STejun Heo *r_failed_link = link; 3776c6fd2807SJeff Garzik 3777c6fd2807SJeff Garzik DPRINTK("EXIT, rc=%d\n", rc); 3778c6fd2807SJeff Garzik return rc; 3779c6fd2807SJeff Garzik } 3780c6fd2807SJeff Garzik 3781c6fd2807SJeff Garzik /** 3782c6fd2807SJeff Garzik * ata_eh_finish - finish up EH 3783c6fd2807SJeff Garzik * @ap: host port to finish EH for 3784c6fd2807SJeff Garzik * 3785c6fd2807SJeff Garzik * Recovery is complete. Clean up EH states and retry or finish 3786c6fd2807SJeff Garzik * failed qcs. 3787c6fd2807SJeff Garzik * 3788c6fd2807SJeff Garzik * LOCKING: 3789c6fd2807SJeff Garzik * None. 3790c6fd2807SJeff Garzik */ 3791fb7fd614STejun Heo void ata_eh_finish(struct ata_port *ap) 3792c6fd2807SJeff Garzik { 3793c6fd2807SJeff Garzik int tag; 3794c6fd2807SJeff Garzik 3795c6fd2807SJeff Garzik /* retry or finish qcs */ 3796c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 3797c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 3798c6fd2807SJeff Garzik 3799c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) 3800c6fd2807SJeff Garzik continue; 3801c6fd2807SJeff Garzik 3802c6fd2807SJeff Garzik if (qc->err_mask) { 3803c6fd2807SJeff Garzik /* FIXME: Once EH migration is complete, 3804c6fd2807SJeff Garzik * generate sense data in this function, 3805c6fd2807SJeff Garzik * considering both err_mask and tf. 3806c6fd2807SJeff Garzik */ 380703faab78STejun Heo if (qc->flags & ATA_QCFLAG_RETRY) 3808c6fd2807SJeff Garzik ata_eh_qc_retry(qc); 380903faab78STejun Heo else 381003faab78STejun Heo ata_eh_qc_complete(qc); 3811c6fd2807SJeff Garzik } else { 3812c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_SENSE_VALID) { 3813c6fd2807SJeff Garzik ata_eh_qc_complete(qc); 3814c6fd2807SJeff Garzik } else { 3815c6fd2807SJeff Garzik /* feed zero TF to sense generation */ 3816c6fd2807SJeff Garzik memset(&qc->result_tf, 0, sizeof(qc->result_tf)); 3817c6fd2807SJeff Garzik ata_eh_qc_retry(qc); 3818c6fd2807SJeff Garzik } 3819c6fd2807SJeff Garzik } 3820c6fd2807SJeff Garzik } 3821da917d69STejun Heo 3822da917d69STejun Heo /* make sure nr_active_links is zero after EH */ 3823da917d69STejun Heo WARN_ON(ap->nr_active_links); 3824da917d69STejun Heo ap->nr_active_links = 0; 3825c6fd2807SJeff Garzik } 3826c6fd2807SJeff Garzik 3827c6fd2807SJeff Garzik /** 3828c6fd2807SJeff Garzik * ata_do_eh - do standard error handling 3829c6fd2807SJeff Garzik * @ap: host port to handle error for 3830a1efdabaSTejun Heo * 3831c6fd2807SJeff Garzik * @prereset: prereset method (can be NULL) 3832c6fd2807SJeff Garzik * @softreset: softreset method (can be NULL) 3833c6fd2807SJeff Garzik * @hardreset: hardreset method (can be NULL) 3834c6fd2807SJeff Garzik * @postreset: postreset method (can be NULL) 3835c6fd2807SJeff Garzik * 3836c6fd2807SJeff Garzik * Perform standard error handling sequence. 3837c6fd2807SJeff Garzik * 3838c6fd2807SJeff Garzik * LOCKING: 3839c6fd2807SJeff Garzik * Kernel thread context (may sleep). 3840c6fd2807SJeff Garzik */ 3841c6fd2807SJeff Garzik void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, 3842c6fd2807SJeff Garzik ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 3843c6fd2807SJeff Garzik ata_postreset_fn_t postreset) 3844c6fd2807SJeff Garzik { 38459b1e2658STejun Heo struct ata_device *dev; 38469b1e2658STejun Heo int rc; 38479b1e2658STejun Heo 38489b1e2658STejun Heo ata_eh_autopsy(ap); 38499b1e2658STejun Heo ata_eh_report(ap); 38509b1e2658STejun Heo 38519b1e2658STejun Heo rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset, 38529b1e2658STejun Heo NULL); 38539b1e2658STejun Heo if (rc) { 38541eca4365STejun Heo ata_for_each_dev(dev, &ap->link, ALL) 38559b1e2658STejun Heo ata_dev_disable(dev); 38569b1e2658STejun Heo } 38579b1e2658STejun Heo 3858c6fd2807SJeff Garzik ata_eh_finish(ap); 3859c6fd2807SJeff Garzik } 3860c6fd2807SJeff Garzik 3861a1efdabaSTejun Heo /** 3862a1efdabaSTejun Heo * ata_std_error_handler - standard error handler 3863a1efdabaSTejun Heo * @ap: host port to handle error for 3864a1efdabaSTejun Heo * 3865a1efdabaSTejun Heo * Standard error handler 3866a1efdabaSTejun Heo * 3867a1efdabaSTejun Heo * LOCKING: 3868a1efdabaSTejun Heo * Kernel thread context (may sleep). 3869a1efdabaSTejun Heo */ 3870a1efdabaSTejun Heo void ata_std_error_handler(struct ata_port *ap) 3871a1efdabaSTejun Heo { 3872a1efdabaSTejun Heo struct ata_port_operations *ops = ap->ops; 3873a1efdabaSTejun Heo ata_reset_fn_t hardreset = ops->hardreset; 3874a1efdabaSTejun Heo 387557c9efdfSTejun Heo /* ignore built-in hardreset if SCR access is not available */ 3876fe06e5f9STejun Heo if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link)) 3877a1efdabaSTejun Heo hardreset = NULL; 3878a1efdabaSTejun Heo 3879a1efdabaSTejun Heo ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset); 3880a1efdabaSTejun Heo } 3881a1efdabaSTejun Heo 38826ffa01d8STejun Heo #ifdef CONFIG_PM 3883c6fd2807SJeff Garzik /** 3884c6fd2807SJeff Garzik * ata_eh_handle_port_suspend - perform port suspend operation 3885c6fd2807SJeff Garzik * @ap: port to suspend 3886c6fd2807SJeff Garzik * 3887c6fd2807SJeff Garzik * Suspend @ap. 3888c6fd2807SJeff Garzik * 3889c6fd2807SJeff Garzik * LOCKING: 3890c6fd2807SJeff Garzik * Kernel thread context (may sleep). 3891c6fd2807SJeff Garzik */ 3892c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap) 3893c6fd2807SJeff Garzik { 3894c6fd2807SJeff Garzik unsigned long flags; 3895c6fd2807SJeff Garzik int rc = 0; 3896c6fd2807SJeff Garzik 3897c6fd2807SJeff Garzik /* are we suspending? */ 3898c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 3899c6fd2807SJeff Garzik if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || 3900c6fd2807SJeff Garzik ap->pm_mesg.event == PM_EVENT_ON) { 3901c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3902c6fd2807SJeff Garzik return; 3903c6fd2807SJeff Garzik } 3904c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3905c6fd2807SJeff Garzik 3906c6fd2807SJeff Garzik WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED); 3907c6fd2807SJeff Garzik 390864578a3dSTejun Heo /* tell ACPI we're suspending */ 390964578a3dSTejun Heo rc = ata_acpi_on_suspend(ap); 391064578a3dSTejun Heo if (rc) 391164578a3dSTejun Heo goto out; 391264578a3dSTejun Heo 3913c6fd2807SJeff Garzik /* suspend */ 3914c6fd2807SJeff Garzik ata_eh_freeze_port(ap); 3915c6fd2807SJeff Garzik 3916c6fd2807SJeff Garzik if (ap->ops->port_suspend) 3917c6fd2807SJeff Garzik rc = ap->ops->port_suspend(ap, ap->pm_mesg); 3918c6fd2807SJeff Garzik 3919bd3adca5SShaohua Li ata_acpi_set_state(ap, PMSG_SUSPEND); 392064578a3dSTejun Heo out: 3921c6fd2807SJeff Garzik /* report result */ 3922c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 3923c6fd2807SJeff Garzik 3924c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_PM_PENDING; 3925c6fd2807SJeff Garzik if (rc == 0) 3926c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SUSPENDED; 392764578a3dSTejun Heo else if (ap->pflags & ATA_PFLAG_FROZEN) 3928c6fd2807SJeff Garzik ata_port_schedule_eh(ap); 3929c6fd2807SJeff Garzik 3930c6fd2807SJeff Garzik if (ap->pm_result) { 3931c6fd2807SJeff Garzik *ap->pm_result = rc; 3932c6fd2807SJeff Garzik ap->pm_result = NULL; 3933c6fd2807SJeff Garzik } 3934c6fd2807SJeff Garzik 3935c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3936c6fd2807SJeff Garzik 3937c6fd2807SJeff Garzik return; 3938c6fd2807SJeff Garzik } 3939c6fd2807SJeff Garzik 3940c6fd2807SJeff Garzik /** 3941c6fd2807SJeff Garzik * ata_eh_handle_port_resume - perform port resume operation 3942c6fd2807SJeff Garzik * @ap: port to resume 3943c6fd2807SJeff Garzik * 3944c6fd2807SJeff Garzik * Resume @ap. 3945c6fd2807SJeff Garzik * 3946c6fd2807SJeff Garzik * LOCKING: 3947c6fd2807SJeff Garzik * Kernel thread context (may sleep). 3948c6fd2807SJeff Garzik */ 3949c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap) 3950c6fd2807SJeff Garzik { 39516f9c1ea2STejun Heo struct ata_link *link; 39526f9c1ea2STejun Heo struct ata_device *dev; 3953c6fd2807SJeff Garzik unsigned long flags; 39549666f400STejun Heo int rc = 0; 3955c6fd2807SJeff Garzik 3956c6fd2807SJeff Garzik /* are we resuming? */ 3957c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 3958c6fd2807SJeff Garzik if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || 3959c6fd2807SJeff Garzik ap->pm_mesg.event != PM_EVENT_ON) { 3960c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3961c6fd2807SJeff Garzik return; 3962c6fd2807SJeff Garzik } 3963c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3964c6fd2807SJeff Garzik 39659666f400STejun Heo WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED)); 3966c6fd2807SJeff Garzik 39676f9c1ea2STejun Heo /* 39686f9c1ea2STejun Heo * Error timestamps are in jiffies which doesn't run while 39696f9c1ea2STejun Heo * suspended and PHY events during resume isn't too uncommon. 39706f9c1ea2STejun Heo * When the two are combined, it can lead to unnecessary speed 39716f9c1ea2STejun Heo * downs if the machine is suspended and resumed repeatedly. 39726f9c1ea2STejun Heo * Clear error history. 39736f9c1ea2STejun Heo */ 39746f9c1ea2STejun Heo ata_for_each_link(link, ap, HOST_FIRST) 39756f9c1ea2STejun Heo ata_for_each_dev(dev, link, ALL) 39766f9c1ea2STejun Heo ata_ering_clear(&dev->ering); 39776f9c1ea2STejun Heo 3978bd3adca5SShaohua Li ata_acpi_set_state(ap, PMSG_ON); 3979bd3adca5SShaohua Li 3980c6fd2807SJeff Garzik if (ap->ops->port_resume) 3981c6fd2807SJeff Garzik rc = ap->ops->port_resume(ap); 3982c6fd2807SJeff Garzik 39836746544cSTejun Heo /* tell ACPI that we're resuming */ 39846746544cSTejun Heo ata_acpi_on_resume(ap); 39856746544cSTejun Heo 39869666f400STejun Heo /* report result */ 3987c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 3988c6fd2807SJeff Garzik ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED); 3989c6fd2807SJeff Garzik if (ap->pm_result) { 3990c6fd2807SJeff Garzik *ap->pm_result = rc; 3991c6fd2807SJeff Garzik ap->pm_result = NULL; 3992c6fd2807SJeff Garzik } 3993c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3994c6fd2807SJeff Garzik } 39956ffa01d8STejun Heo #endif /* CONFIG_PM */ 3996