1c6fd2807SJeff Garzik /* 2c6fd2807SJeff Garzik * libata-eh.c - libata error handling 3c6fd2807SJeff Garzik * 4c6fd2807SJeff Garzik * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5c6fd2807SJeff Garzik * Please ALWAYS copy linux-ide@vger.kernel.org 6c6fd2807SJeff Garzik * on emails. 7c6fd2807SJeff Garzik * 8c6fd2807SJeff Garzik * Copyright 2006 Tejun Heo <htejun@gmail.com> 9c6fd2807SJeff Garzik * 10c6fd2807SJeff Garzik * 11c6fd2807SJeff Garzik * This program is free software; you can redistribute it and/or 12c6fd2807SJeff Garzik * modify it under the terms of the GNU General Public License as 13c6fd2807SJeff Garzik * published by the Free Software Foundation; either version 2, or 14c6fd2807SJeff Garzik * (at your option) any later version. 15c6fd2807SJeff Garzik * 16c6fd2807SJeff Garzik * This program is distributed in the hope that it will be useful, 17c6fd2807SJeff Garzik * but WITHOUT ANY WARRANTY; without even the implied warranty of 18c6fd2807SJeff Garzik * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19c6fd2807SJeff Garzik * General Public License for more details. 20c6fd2807SJeff Garzik * 21c6fd2807SJeff Garzik * You should have received a copy of the GNU General Public License 22c6fd2807SJeff Garzik * along with this program; see the file COPYING. If not, write to 23c6fd2807SJeff Garzik * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, 24c6fd2807SJeff Garzik * USA. 25c6fd2807SJeff Garzik * 26c6fd2807SJeff Garzik * 27c6fd2807SJeff Garzik * libata documentation is available via 'make {ps|pdf}docs', 28c6fd2807SJeff Garzik * as Documentation/DocBook/libata.* 29c6fd2807SJeff Garzik * 30c6fd2807SJeff Garzik * Hardware documentation available from http://www.t13.org/ and 31c6fd2807SJeff Garzik * http://www.sata-io.org/ 32c6fd2807SJeff Garzik * 33c6fd2807SJeff Garzik */ 34c6fd2807SJeff Garzik 35c6fd2807SJeff Garzik #include <linux/kernel.h> 36242f9dcbSJens Axboe #include <linux/blkdev.h> 372855568bSJeff Garzik #include <linux/pci.h> 38c6fd2807SJeff Garzik #include <scsi/scsi.h> 39c6fd2807SJeff Garzik #include <scsi/scsi_host.h> 40c6fd2807SJeff Garzik #include <scsi/scsi_eh.h> 41c6fd2807SJeff Garzik #include <scsi/scsi_device.h> 42c6fd2807SJeff Garzik #include <scsi/scsi_cmnd.h> 43*6521148cSRobert Hancock #include <scsi/scsi_dbg.h> 44c6fd2807SJeff Garzik #include "../scsi/scsi_transport_api.h" 45c6fd2807SJeff Garzik 46c6fd2807SJeff Garzik #include <linux/libata.h> 47c6fd2807SJeff Garzik 48c6fd2807SJeff Garzik #include "libata.h" 49c6fd2807SJeff Garzik 507d47e8d4STejun Heo enum { 513884f7b0STejun Heo /* speed down verdicts */ 527d47e8d4STejun Heo ATA_EH_SPDN_NCQ_OFF = (1 << 0), 537d47e8d4STejun Heo ATA_EH_SPDN_SPEED_DOWN = (1 << 1), 547d47e8d4STejun Heo ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2), 5576326ac1STejun Heo ATA_EH_SPDN_KEEP_ERRORS = (1 << 3), 563884f7b0STejun Heo 573884f7b0STejun Heo /* error flags */ 583884f7b0STejun Heo ATA_EFLAG_IS_IO = (1 << 0), 5976326ac1STejun Heo ATA_EFLAG_DUBIOUS_XFER = (1 << 1), 603884f7b0STejun Heo 613884f7b0STejun Heo /* error categories */ 623884f7b0STejun Heo ATA_ECAT_NONE = 0, 633884f7b0STejun Heo ATA_ECAT_ATA_BUS = 1, 643884f7b0STejun Heo ATA_ECAT_TOUT_HSM = 2, 653884f7b0STejun Heo ATA_ECAT_UNK_DEV = 3, 6675f9cafcSTejun Heo ATA_ECAT_DUBIOUS_NONE = 4, 6775f9cafcSTejun Heo ATA_ECAT_DUBIOUS_ATA_BUS = 5, 6875f9cafcSTejun Heo ATA_ECAT_DUBIOUS_TOUT_HSM = 6, 6975f9cafcSTejun Heo ATA_ECAT_DUBIOUS_UNK_DEV = 7, 7075f9cafcSTejun Heo ATA_ECAT_NR = 8, 717d47e8d4STejun Heo 7287fbc5a0STejun Heo ATA_EH_CMD_DFL_TIMEOUT = 5000, 7387fbc5a0STejun Heo 740a2c0f56STejun Heo /* always put at least this amount of time between resets */ 750a2c0f56STejun Heo ATA_EH_RESET_COOL_DOWN = 5000, 760a2c0f56STejun Heo 77341c2c95STejun Heo /* Waiting in ->prereset can never be reliable. It's 78341c2c95STejun Heo * sometimes nice to wait there but it can't be depended upon; 79341c2c95STejun Heo * otherwise, we wouldn't be resetting. Just give it enough 80341c2c95STejun Heo * time for most drives to spin up. 8131daabdaSTejun Heo */ 82341c2c95STejun Heo ATA_EH_PRERESET_TIMEOUT = 10000, 83341c2c95STejun Heo ATA_EH_FASTDRAIN_INTERVAL = 3000, 8411fc33daSTejun Heo 8511fc33daSTejun Heo ATA_EH_UA_TRIES = 5, 86c2c7a89cSTejun Heo 87c2c7a89cSTejun Heo /* probe speed down parameters, see ata_eh_schedule_probe() */ 88c2c7a89cSTejun Heo ATA_EH_PROBE_TRIAL_INTERVAL = 60000, /* 1 min */ 89c2c7a89cSTejun Heo ATA_EH_PROBE_TRIALS = 2, 9031daabdaSTejun Heo }; 9131daabdaSTejun Heo 9231daabdaSTejun Heo /* The following table determines how we sequence resets. Each entry 9331daabdaSTejun Heo * represents timeout for that try. The first try can be soft or 9431daabdaSTejun Heo * hardreset. All others are hardreset if available. In most cases 9531daabdaSTejun Heo * the first reset w/ 10sec timeout should succeed. Following entries 9631daabdaSTejun Heo * are mostly for error handling, hotplug and retarded devices. 9731daabdaSTejun Heo */ 9831daabdaSTejun Heo static const unsigned long ata_eh_reset_timeouts[] = { 99341c2c95STejun Heo 10000, /* most drives spin up by 10sec */ 100341c2c95STejun Heo 10000, /* > 99% working drives spin up before 20sec */ 101341c2c95STejun Heo 35000, /* give > 30 secs of idleness for retarded devices */ 102341c2c95STejun Heo 5000, /* and sweet one last chance */ 103d8af0eb6STejun Heo ULONG_MAX, /* > 1 min has elapsed, give up */ 10431daabdaSTejun Heo }; 10531daabdaSTejun Heo 10687fbc5a0STejun Heo static const unsigned long ata_eh_identify_timeouts[] = { 10787fbc5a0STejun Heo 5000, /* covers > 99% of successes and not too boring on failures */ 10887fbc5a0STejun Heo 10000, /* combined time till here is enough even for media access */ 10987fbc5a0STejun Heo 30000, /* for true idiots */ 11087fbc5a0STejun Heo ULONG_MAX, 11187fbc5a0STejun Heo }; 11287fbc5a0STejun Heo 11387fbc5a0STejun Heo static const unsigned long ata_eh_other_timeouts[] = { 11487fbc5a0STejun Heo 5000, /* same rationale as identify timeout */ 11587fbc5a0STejun Heo 10000, /* ditto */ 11687fbc5a0STejun Heo /* but no merciful 30sec for other commands, it just isn't worth it */ 11787fbc5a0STejun Heo ULONG_MAX, 11887fbc5a0STejun Heo }; 11987fbc5a0STejun Heo 12087fbc5a0STejun Heo struct ata_eh_cmd_timeout_ent { 12187fbc5a0STejun Heo const u8 *commands; 12287fbc5a0STejun Heo const unsigned long *timeouts; 12387fbc5a0STejun Heo }; 12487fbc5a0STejun Heo 12587fbc5a0STejun Heo /* The following table determines timeouts to use for EH internal 12687fbc5a0STejun Heo * commands. Each table entry is a command class and matches the 12787fbc5a0STejun Heo * commands the entry applies to and the timeout table to use. 12887fbc5a0STejun Heo * 12987fbc5a0STejun Heo * On the retry after a command timed out, the next timeout value from 13087fbc5a0STejun Heo * the table is used. If the table doesn't contain further entries, 13187fbc5a0STejun Heo * the last value is used. 13287fbc5a0STejun Heo * 13387fbc5a0STejun Heo * ehc->cmd_timeout_idx keeps track of which timeout to use per 13487fbc5a0STejun Heo * command class, so if SET_FEATURES times out on the first try, the 13587fbc5a0STejun Heo * next try will use the second timeout value only for that class. 13687fbc5a0STejun Heo */ 13787fbc5a0STejun Heo #define CMDS(cmds...) (const u8 []){ cmds, 0 } 13887fbc5a0STejun Heo static const struct ata_eh_cmd_timeout_ent 13987fbc5a0STejun Heo ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = { 14087fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI), 14187fbc5a0STejun Heo .timeouts = ata_eh_identify_timeouts, }, 14287fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT), 14387fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 14487fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT), 14587fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 14687fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_SET_FEATURES), 14787fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 14887fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS), 14987fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 15087fbc5a0STejun Heo }; 15187fbc5a0STejun Heo #undef CMDS 15287fbc5a0STejun Heo 153c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap); 1546ffa01d8STejun Heo #ifdef CONFIG_PM 155c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap); 156c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap); 1576ffa01d8STejun Heo #else /* CONFIG_PM */ 1586ffa01d8STejun Heo static void ata_eh_handle_port_suspend(struct ata_port *ap) 1596ffa01d8STejun Heo { } 1606ffa01d8STejun Heo 1616ffa01d8STejun Heo static void ata_eh_handle_port_resume(struct ata_port *ap) 1626ffa01d8STejun Heo { } 1636ffa01d8STejun Heo #endif /* CONFIG_PM */ 164c6fd2807SJeff Garzik 165b64bbc39STejun Heo static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt, 166b64bbc39STejun Heo va_list args) 167b64bbc39STejun Heo { 168b64bbc39STejun Heo ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len, 169b64bbc39STejun Heo ATA_EH_DESC_LEN - ehi->desc_len, 170b64bbc39STejun Heo fmt, args); 171b64bbc39STejun Heo } 172b64bbc39STejun Heo 173b64bbc39STejun Heo /** 174b64bbc39STejun Heo * __ata_ehi_push_desc - push error description without adding separator 175b64bbc39STejun Heo * @ehi: target EHI 176b64bbc39STejun Heo * @fmt: printf format string 177b64bbc39STejun Heo * 178b64bbc39STejun Heo * Format string according to @fmt and append it to @ehi->desc. 179b64bbc39STejun Heo * 180b64bbc39STejun Heo * LOCKING: 181b64bbc39STejun Heo * spin_lock_irqsave(host lock) 182b64bbc39STejun Heo */ 183b64bbc39STejun Heo void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) 184b64bbc39STejun Heo { 185b64bbc39STejun Heo va_list args; 186b64bbc39STejun Heo 187b64bbc39STejun Heo va_start(args, fmt); 188b64bbc39STejun Heo __ata_ehi_pushv_desc(ehi, fmt, args); 189b64bbc39STejun Heo va_end(args); 190b64bbc39STejun Heo } 191b64bbc39STejun Heo 192b64bbc39STejun Heo /** 193b64bbc39STejun Heo * ata_ehi_push_desc - push error description with separator 194b64bbc39STejun Heo * @ehi: target EHI 195b64bbc39STejun Heo * @fmt: printf format string 196b64bbc39STejun Heo * 197b64bbc39STejun Heo * Format string according to @fmt and append it to @ehi->desc. 198b64bbc39STejun Heo * If @ehi->desc is not empty, ", " is added in-between. 199b64bbc39STejun Heo * 200b64bbc39STejun Heo * LOCKING: 201b64bbc39STejun Heo * spin_lock_irqsave(host lock) 202b64bbc39STejun Heo */ 203b64bbc39STejun Heo void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) 204b64bbc39STejun Heo { 205b64bbc39STejun Heo va_list args; 206b64bbc39STejun Heo 207b64bbc39STejun Heo if (ehi->desc_len) 208b64bbc39STejun Heo __ata_ehi_push_desc(ehi, ", "); 209b64bbc39STejun Heo 210b64bbc39STejun Heo va_start(args, fmt); 211b64bbc39STejun Heo __ata_ehi_pushv_desc(ehi, fmt, args); 212b64bbc39STejun Heo va_end(args); 213b64bbc39STejun Heo } 214b64bbc39STejun Heo 215b64bbc39STejun Heo /** 216b64bbc39STejun Heo * ata_ehi_clear_desc - clean error description 217b64bbc39STejun Heo * @ehi: target EHI 218b64bbc39STejun Heo * 219b64bbc39STejun Heo * Clear @ehi->desc. 220b64bbc39STejun Heo * 221b64bbc39STejun Heo * LOCKING: 222b64bbc39STejun Heo * spin_lock_irqsave(host lock) 223b64bbc39STejun Heo */ 224b64bbc39STejun Heo void ata_ehi_clear_desc(struct ata_eh_info *ehi) 225b64bbc39STejun Heo { 226b64bbc39STejun Heo ehi->desc[0] = '\0'; 227b64bbc39STejun Heo ehi->desc_len = 0; 228b64bbc39STejun Heo } 229b64bbc39STejun Heo 230cbcdd875STejun Heo /** 231cbcdd875STejun Heo * ata_port_desc - append port description 232cbcdd875STejun Heo * @ap: target ATA port 233cbcdd875STejun Heo * @fmt: printf format string 234cbcdd875STejun Heo * 235cbcdd875STejun Heo * Format string according to @fmt and append it to port 236cbcdd875STejun Heo * description. If port description is not empty, " " is added 237cbcdd875STejun Heo * in-between. This function is to be used while initializing 238cbcdd875STejun Heo * ata_host. The description is printed on host registration. 239cbcdd875STejun Heo * 240cbcdd875STejun Heo * LOCKING: 241cbcdd875STejun Heo * None. 242cbcdd875STejun Heo */ 243cbcdd875STejun Heo void ata_port_desc(struct ata_port *ap, const char *fmt, ...) 244cbcdd875STejun Heo { 245cbcdd875STejun Heo va_list args; 246cbcdd875STejun Heo 247cbcdd875STejun Heo WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING)); 248cbcdd875STejun Heo 249cbcdd875STejun Heo if (ap->link.eh_info.desc_len) 250cbcdd875STejun Heo __ata_ehi_push_desc(&ap->link.eh_info, " "); 251cbcdd875STejun Heo 252cbcdd875STejun Heo va_start(args, fmt); 253cbcdd875STejun Heo __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args); 254cbcdd875STejun Heo va_end(args); 255cbcdd875STejun Heo } 256cbcdd875STejun Heo 257cbcdd875STejun Heo #ifdef CONFIG_PCI 258cbcdd875STejun Heo 259cbcdd875STejun Heo /** 260cbcdd875STejun Heo * ata_port_pbar_desc - append PCI BAR description 261cbcdd875STejun Heo * @ap: target ATA port 262cbcdd875STejun Heo * @bar: target PCI BAR 263cbcdd875STejun Heo * @offset: offset into PCI BAR 264cbcdd875STejun Heo * @name: name of the area 265cbcdd875STejun Heo * 266cbcdd875STejun Heo * If @offset is negative, this function formats a string which 267cbcdd875STejun Heo * contains the name, address, size and type of the BAR and 268cbcdd875STejun Heo * appends it to the port description. If @offset is zero or 269cbcdd875STejun Heo * positive, only name and offsetted address is appended. 270cbcdd875STejun Heo * 271cbcdd875STejun Heo * LOCKING: 272cbcdd875STejun Heo * None. 273cbcdd875STejun Heo */ 274cbcdd875STejun Heo void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset, 275cbcdd875STejun Heo const char *name) 276cbcdd875STejun Heo { 277cbcdd875STejun Heo struct pci_dev *pdev = to_pci_dev(ap->host->dev); 278cbcdd875STejun Heo char *type = ""; 279cbcdd875STejun Heo unsigned long long start, len; 280cbcdd875STejun Heo 281cbcdd875STejun Heo if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) 282cbcdd875STejun Heo type = "m"; 283cbcdd875STejun Heo else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) 284cbcdd875STejun Heo type = "i"; 285cbcdd875STejun Heo 286cbcdd875STejun Heo start = (unsigned long long)pci_resource_start(pdev, bar); 287cbcdd875STejun Heo len = (unsigned long long)pci_resource_len(pdev, bar); 288cbcdd875STejun Heo 289cbcdd875STejun Heo if (offset < 0) 290cbcdd875STejun Heo ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start); 291cbcdd875STejun Heo else 292e6a73ab1SAndrew Morton ata_port_desc(ap, "%s 0x%llx", name, 293e6a73ab1SAndrew Morton start + (unsigned long long)offset); 294cbcdd875STejun Heo } 295cbcdd875STejun Heo 296cbcdd875STejun Heo #endif /* CONFIG_PCI */ 297cbcdd875STejun Heo 29887fbc5a0STejun Heo static int ata_lookup_timeout_table(u8 cmd) 29987fbc5a0STejun Heo { 30087fbc5a0STejun Heo int i; 30187fbc5a0STejun Heo 30287fbc5a0STejun Heo for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) { 30387fbc5a0STejun Heo const u8 *cur; 30487fbc5a0STejun Heo 30587fbc5a0STejun Heo for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++) 30687fbc5a0STejun Heo if (*cur == cmd) 30787fbc5a0STejun Heo return i; 30887fbc5a0STejun Heo } 30987fbc5a0STejun Heo 31087fbc5a0STejun Heo return -1; 31187fbc5a0STejun Heo } 31287fbc5a0STejun Heo 31387fbc5a0STejun Heo /** 31487fbc5a0STejun Heo * ata_internal_cmd_timeout - determine timeout for an internal command 31587fbc5a0STejun Heo * @dev: target device 31687fbc5a0STejun Heo * @cmd: internal command to be issued 31787fbc5a0STejun Heo * 31887fbc5a0STejun Heo * Determine timeout for internal command @cmd for @dev. 31987fbc5a0STejun Heo * 32087fbc5a0STejun Heo * LOCKING: 32187fbc5a0STejun Heo * EH context. 32287fbc5a0STejun Heo * 32387fbc5a0STejun Heo * RETURNS: 32487fbc5a0STejun Heo * Determined timeout. 32587fbc5a0STejun Heo */ 32687fbc5a0STejun Heo unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd) 32787fbc5a0STejun Heo { 32887fbc5a0STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 32987fbc5a0STejun Heo int ent = ata_lookup_timeout_table(cmd); 33087fbc5a0STejun Heo int idx; 33187fbc5a0STejun Heo 33287fbc5a0STejun Heo if (ent < 0) 33387fbc5a0STejun Heo return ATA_EH_CMD_DFL_TIMEOUT; 33487fbc5a0STejun Heo 33587fbc5a0STejun Heo idx = ehc->cmd_timeout_idx[dev->devno][ent]; 33687fbc5a0STejun Heo return ata_eh_cmd_timeout_table[ent].timeouts[idx]; 33787fbc5a0STejun Heo } 33887fbc5a0STejun Heo 33987fbc5a0STejun Heo /** 34087fbc5a0STejun Heo * ata_internal_cmd_timed_out - notification for internal command timeout 34187fbc5a0STejun Heo * @dev: target device 34287fbc5a0STejun Heo * @cmd: internal command which timed out 34387fbc5a0STejun Heo * 34487fbc5a0STejun Heo * Notify EH that internal command @cmd for @dev timed out. This 34587fbc5a0STejun Heo * function should be called only for commands whose timeouts are 34687fbc5a0STejun Heo * determined using ata_internal_cmd_timeout(). 34787fbc5a0STejun Heo * 34887fbc5a0STejun Heo * LOCKING: 34987fbc5a0STejun Heo * EH context. 35087fbc5a0STejun Heo */ 35187fbc5a0STejun Heo void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd) 35287fbc5a0STejun Heo { 35387fbc5a0STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 35487fbc5a0STejun Heo int ent = ata_lookup_timeout_table(cmd); 35587fbc5a0STejun Heo int idx; 35687fbc5a0STejun Heo 35787fbc5a0STejun Heo if (ent < 0) 35887fbc5a0STejun Heo return; 35987fbc5a0STejun Heo 36087fbc5a0STejun Heo idx = ehc->cmd_timeout_idx[dev->devno][ent]; 36187fbc5a0STejun Heo if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX) 36287fbc5a0STejun Heo ehc->cmd_timeout_idx[dev->devno][ent]++; 36387fbc5a0STejun Heo } 36487fbc5a0STejun Heo 3653884f7b0STejun Heo static void ata_ering_record(struct ata_ering *ering, unsigned int eflags, 366c6fd2807SJeff Garzik unsigned int err_mask) 367c6fd2807SJeff Garzik { 368c6fd2807SJeff Garzik struct ata_ering_entry *ent; 369c6fd2807SJeff Garzik 370c6fd2807SJeff Garzik WARN_ON(!err_mask); 371c6fd2807SJeff Garzik 372c6fd2807SJeff Garzik ering->cursor++; 373c6fd2807SJeff Garzik ering->cursor %= ATA_ERING_SIZE; 374c6fd2807SJeff Garzik 375c6fd2807SJeff Garzik ent = &ering->ring[ering->cursor]; 3763884f7b0STejun Heo ent->eflags = eflags; 377c6fd2807SJeff Garzik ent->err_mask = err_mask; 378c6fd2807SJeff Garzik ent->timestamp = get_jiffies_64(); 379c6fd2807SJeff Garzik } 380c6fd2807SJeff Garzik 38176326ac1STejun Heo static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering) 38276326ac1STejun Heo { 38376326ac1STejun Heo struct ata_ering_entry *ent = &ering->ring[ering->cursor]; 38476326ac1STejun Heo 38576326ac1STejun Heo if (ent->err_mask) 38676326ac1STejun Heo return ent; 38776326ac1STejun Heo return NULL; 38876326ac1STejun Heo } 38976326ac1STejun Heo 3907d47e8d4STejun Heo static void ata_ering_clear(struct ata_ering *ering) 391c6fd2807SJeff Garzik { 3927d47e8d4STejun Heo memset(ering, 0, sizeof(*ering)); 393c6fd2807SJeff Garzik } 394c6fd2807SJeff Garzik 395c6fd2807SJeff Garzik static int ata_ering_map(struct ata_ering *ering, 396c6fd2807SJeff Garzik int (*map_fn)(struct ata_ering_entry *, void *), 397c6fd2807SJeff Garzik void *arg) 398c6fd2807SJeff Garzik { 399c6fd2807SJeff Garzik int idx, rc = 0; 400c6fd2807SJeff Garzik struct ata_ering_entry *ent; 401c6fd2807SJeff Garzik 402c6fd2807SJeff Garzik idx = ering->cursor; 403c6fd2807SJeff Garzik do { 404c6fd2807SJeff Garzik ent = &ering->ring[idx]; 405c6fd2807SJeff Garzik if (!ent->err_mask) 406c6fd2807SJeff Garzik break; 407c6fd2807SJeff Garzik rc = map_fn(ent, arg); 408c6fd2807SJeff Garzik if (rc) 409c6fd2807SJeff Garzik break; 410c6fd2807SJeff Garzik idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE; 411c6fd2807SJeff Garzik } while (idx != ering->cursor); 412c6fd2807SJeff Garzik 413c6fd2807SJeff Garzik return rc; 414c6fd2807SJeff Garzik } 415c6fd2807SJeff Garzik 416c6fd2807SJeff Garzik static unsigned int ata_eh_dev_action(struct ata_device *dev) 417c6fd2807SJeff Garzik { 4189af5c9c9STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 419c6fd2807SJeff Garzik 420c6fd2807SJeff Garzik return ehc->i.action | ehc->i.dev_action[dev->devno]; 421c6fd2807SJeff Garzik } 422c6fd2807SJeff Garzik 423f58229f8STejun Heo static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev, 424c6fd2807SJeff Garzik struct ata_eh_info *ehi, unsigned int action) 425c6fd2807SJeff Garzik { 426f58229f8STejun Heo struct ata_device *tdev; 427c6fd2807SJeff Garzik 428c6fd2807SJeff Garzik if (!dev) { 429c6fd2807SJeff Garzik ehi->action &= ~action; 4301eca4365STejun Heo ata_for_each_dev(tdev, link, ALL) 431f58229f8STejun Heo ehi->dev_action[tdev->devno] &= ~action; 432c6fd2807SJeff Garzik } else { 433c6fd2807SJeff Garzik /* doesn't make sense for port-wide EH actions */ 434c6fd2807SJeff Garzik WARN_ON(!(action & ATA_EH_PERDEV_MASK)); 435c6fd2807SJeff Garzik 436c6fd2807SJeff Garzik /* break ehi->action into ehi->dev_action */ 437c6fd2807SJeff Garzik if (ehi->action & action) { 4381eca4365STejun Heo ata_for_each_dev(tdev, link, ALL) 439f58229f8STejun Heo ehi->dev_action[tdev->devno] |= 440f58229f8STejun Heo ehi->action & action; 441c6fd2807SJeff Garzik ehi->action &= ~action; 442c6fd2807SJeff Garzik } 443c6fd2807SJeff Garzik 444c6fd2807SJeff Garzik /* turn off the specified per-dev action */ 445c6fd2807SJeff Garzik ehi->dev_action[dev->devno] &= ~action; 446c6fd2807SJeff Garzik } 447c6fd2807SJeff Garzik } 448c6fd2807SJeff Garzik 449c6fd2807SJeff Garzik /** 450c6fd2807SJeff Garzik * ata_scsi_timed_out - SCSI layer time out callback 451c6fd2807SJeff Garzik * @cmd: timed out SCSI command 452c6fd2807SJeff Garzik * 453c6fd2807SJeff Garzik * Handles SCSI layer timeout. We race with normal completion of 454c6fd2807SJeff Garzik * the qc for @cmd. If the qc is already gone, we lose and let 455c6fd2807SJeff Garzik * the scsi command finish (EH_HANDLED). Otherwise, the qc has 456c6fd2807SJeff Garzik * timed out and EH should be invoked. Prevent ata_qc_complete() 457c6fd2807SJeff Garzik * from finishing it by setting EH_SCHEDULED and return 458c6fd2807SJeff Garzik * EH_NOT_HANDLED. 459c6fd2807SJeff Garzik * 460c6fd2807SJeff Garzik * TODO: kill this function once old EH is gone. 461c6fd2807SJeff Garzik * 462c6fd2807SJeff Garzik * LOCKING: 463c6fd2807SJeff Garzik * Called from timer context 464c6fd2807SJeff Garzik * 465c6fd2807SJeff Garzik * RETURNS: 466c6fd2807SJeff Garzik * EH_HANDLED or EH_NOT_HANDLED 467c6fd2807SJeff Garzik */ 468242f9dcbSJens Axboe enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) 469c6fd2807SJeff Garzik { 470c6fd2807SJeff Garzik struct Scsi_Host *host = cmd->device->host; 471c6fd2807SJeff Garzik struct ata_port *ap = ata_shost_to_port(host); 472c6fd2807SJeff Garzik unsigned long flags; 473c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 474242f9dcbSJens Axboe enum blk_eh_timer_return ret; 475c6fd2807SJeff Garzik 476c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 477c6fd2807SJeff Garzik 478c6fd2807SJeff Garzik if (ap->ops->error_handler) { 479242f9dcbSJens Axboe ret = BLK_EH_NOT_HANDLED; 480c6fd2807SJeff Garzik goto out; 481c6fd2807SJeff Garzik } 482c6fd2807SJeff Garzik 483242f9dcbSJens Axboe ret = BLK_EH_HANDLED; 484c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 4859af5c9c9STejun Heo qc = ata_qc_from_tag(ap, ap->link.active_tag); 486c6fd2807SJeff Garzik if (qc) { 487c6fd2807SJeff Garzik WARN_ON(qc->scsicmd != cmd); 488c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_EH_SCHEDULED; 489c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_TIMEOUT; 490242f9dcbSJens Axboe ret = BLK_EH_NOT_HANDLED; 491c6fd2807SJeff Garzik } 492c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 493c6fd2807SJeff Garzik 494c6fd2807SJeff Garzik out: 495c6fd2807SJeff Garzik DPRINTK("EXIT, ret=%d\n", ret); 496c6fd2807SJeff Garzik return ret; 497c6fd2807SJeff Garzik } 498c6fd2807SJeff Garzik 499ece180d1STejun Heo static void ata_eh_unload(struct ata_port *ap) 500ece180d1STejun Heo { 501ece180d1STejun Heo struct ata_link *link; 502ece180d1STejun Heo struct ata_device *dev; 503ece180d1STejun Heo unsigned long flags; 504ece180d1STejun Heo 505ece180d1STejun Heo /* Restore SControl IPM and SPD for the next driver and 506ece180d1STejun Heo * disable attached devices. 507ece180d1STejun Heo */ 508ece180d1STejun Heo ata_for_each_link(link, ap, PMP_FIRST) { 509ece180d1STejun Heo sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0); 510ece180d1STejun Heo ata_for_each_dev(dev, link, ALL) 511ece180d1STejun Heo ata_dev_disable(dev); 512ece180d1STejun Heo } 513ece180d1STejun Heo 514ece180d1STejun Heo /* freeze and set UNLOADED */ 515ece180d1STejun Heo spin_lock_irqsave(ap->lock, flags); 516ece180d1STejun Heo 517ece180d1STejun Heo ata_port_freeze(ap); /* won't be thawed */ 518ece180d1STejun Heo ap->pflags &= ~ATA_PFLAG_EH_PENDING; /* clear pending from freeze */ 519ece180d1STejun Heo ap->pflags |= ATA_PFLAG_UNLOADED; 520ece180d1STejun Heo 521ece180d1STejun Heo spin_unlock_irqrestore(ap->lock, flags); 522ece180d1STejun Heo } 523ece180d1STejun Heo 524c6fd2807SJeff Garzik /** 525c6fd2807SJeff Garzik * ata_scsi_error - SCSI layer error handler callback 526c6fd2807SJeff Garzik * @host: SCSI host on which error occurred 527c6fd2807SJeff Garzik * 528c6fd2807SJeff Garzik * Handles SCSI-layer-thrown error events. 529c6fd2807SJeff Garzik * 530c6fd2807SJeff Garzik * LOCKING: 531c6fd2807SJeff Garzik * Inherited from SCSI layer (none, can sleep) 532c6fd2807SJeff Garzik * 533c6fd2807SJeff Garzik * RETURNS: 534c6fd2807SJeff Garzik * Zero. 535c6fd2807SJeff Garzik */ 536c6fd2807SJeff Garzik void ata_scsi_error(struct Scsi_Host *host) 537c6fd2807SJeff Garzik { 538c6fd2807SJeff Garzik struct ata_port *ap = ata_shost_to_port(host); 539a1e10f7eSTejun Heo int i; 540c6fd2807SJeff Garzik unsigned long flags; 541c6fd2807SJeff Garzik 542c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 543c6fd2807SJeff Garzik 544c6fd2807SJeff Garzik /* synchronize with port task */ 545c6fd2807SJeff Garzik ata_port_flush_task(ap); 546c6fd2807SJeff Garzik 547cca3974eSJeff Garzik /* synchronize with host lock and sort out timeouts */ 548c6fd2807SJeff Garzik 549c6fd2807SJeff Garzik /* For new EH, all qcs are finished in one of three ways - 550c6fd2807SJeff Garzik * normal completion, error completion, and SCSI timeout. 551c96f1732SAlan Cox * Both completions can race against SCSI timeout. When normal 552c6fd2807SJeff Garzik * completion wins, the qc never reaches EH. When error 553c6fd2807SJeff Garzik * completion wins, the qc has ATA_QCFLAG_FAILED set. 554c6fd2807SJeff Garzik * 555c6fd2807SJeff Garzik * When SCSI timeout wins, things are a bit more complex. 556c6fd2807SJeff Garzik * Normal or error completion can occur after the timeout but 557c6fd2807SJeff Garzik * before this point. In such cases, both types of 558c6fd2807SJeff Garzik * completions are honored. A scmd is determined to have 559c6fd2807SJeff Garzik * timed out iff its associated qc is active and not failed. 560c6fd2807SJeff Garzik */ 561c6fd2807SJeff Garzik if (ap->ops->error_handler) { 562c6fd2807SJeff Garzik struct scsi_cmnd *scmd, *tmp; 563c6fd2807SJeff Garzik int nr_timedout = 0; 564c6fd2807SJeff Garzik 565c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 566c6fd2807SJeff Garzik 567c96f1732SAlan Cox /* This must occur under the ap->lock as we don't want 568c96f1732SAlan Cox a polled recovery to race the real interrupt handler 569c96f1732SAlan Cox 570c96f1732SAlan Cox The lost_interrupt handler checks for any completed but 571c96f1732SAlan Cox non-notified command and completes much like an IRQ handler. 572c96f1732SAlan Cox 573c96f1732SAlan Cox We then fall into the error recovery code which will treat 574c96f1732SAlan Cox this as if normal completion won the race */ 575c96f1732SAlan Cox 576c96f1732SAlan Cox if (ap->ops->lost_interrupt) 577c96f1732SAlan Cox ap->ops->lost_interrupt(ap); 578c96f1732SAlan Cox 579c6fd2807SJeff Garzik list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) { 580c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 581c6fd2807SJeff Garzik 582c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_QUEUE; i++) { 583c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, i); 584c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_ACTIVE && 585c6fd2807SJeff Garzik qc->scsicmd == scmd) 586c6fd2807SJeff Garzik break; 587c6fd2807SJeff Garzik } 588c6fd2807SJeff Garzik 589c6fd2807SJeff Garzik if (i < ATA_MAX_QUEUE) { 590c6fd2807SJeff Garzik /* the scmd has an associated qc */ 591c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) { 592c6fd2807SJeff Garzik /* which hasn't failed yet, timeout */ 593c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_TIMEOUT; 594c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 595c6fd2807SJeff Garzik nr_timedout++; 596c6fd2807SJeff Garzik } 597c6fd2807SJeff Garzik } else { 598c6fd2807SJeff Garzik /* Normal completion occurred after 599c6fd2807SJeff Garzik * SCSI timeout but before this point. 600c6fd2807SJeff Garzik * Successfully complete it. 601c6fd2807SJeff Garzik */ 602c6fd2807SJeff Garzik scmd->retries = scmd->allowed; 603c6fd2807SJeff Garzik scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 604c6fd2807SJeff Garzik } 605c6fd2807SJeff Garzik } 606c6fd2807SJeff Garzik 607c6fd2807SJeff Garzik /* If we have timed out qcs. They belong to EH from 608c6fd2807SJeff Garzik * this point but the state of the controller is 609c6fd2807SJeff Garzik * unknown. Freeze the port to make sure the IRQ 610c6fd2807SJeff Garzik * handler doesn't diddle with those qcs. This must 611c6fd2807SJeff Garzik * be done atomically w.r.t. setting QCFLAG_FAILED. 612c6fd2807SJeff Garzik */ 613c6fd2807SJeff Garzik if (nr_timedout) 614c6fd2807SJeff Garzik __ata_port_freeze(ap); 615c6fd2807SJeff Garzik 616c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 617a1e10f7eSTejun Heo 618a1e10f7eSTejun Heo /* initialize eh_tries */ 619a1e10f7eSTejun Heo ap->eh_tries = ATA_EH_MAX_TRIES; 620c6fd2807SJeff Garzik } else 621c6fd2807SJeff Garzik spin_unlock_wait(ap->lock); 622c6fd2807SJeff Garzik 623c96f1732SAlan Cox /* If we timed raced normal completion and there is nothing to 624c96f1732SAlan Cox recover nr_timedout == 0 why exactly are we doing error recovery ? */ 625c96f1732SAlan Cox 626c6fd2807SJeff Garzik repeat: 627c6fd2807SJeff Garzik /* invoke error handler */ 628c6fd2807SJeff Garzik if (ap->ops->error_handler) { 629cf1b86c8STejun Heo struct ata_link *link; 630cf1b86c8STejun Heo 6315ddf24c5STejun Heo /* kill fast drain timer */ 6325ddf24c5STejun Heo del_timer_sync(&ap->fastdrain_timer); 6335ddf24c5STejun Heo 634c6fd2807SJeff Garzik /* process port resume request */ 635c6fd2807SJeff Garzik ata_eh_handle_port_resume(ap); 636c6fd2807SJeff Garzik 637c6fd2807SJeff Garzik /* fetch & clear EH info */ 638c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 639c6fd2807SJeff Garzik 6401eca4365STejun Heo ata_for_each_link(link, ap, HOST_FIRST) { 64100115e0fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 64200115e0fSTejun Heo struct ata_device *dev; 64300115e0fSTejun Heo 644cf1b86c8STejun Heo memset(&link->eh_context, 0, sizeof(link->eh_context)); 645cf1b86c8STejun Heo link->eh_context.i = link->eh_info; 646cf1b86c8STejun Heo memset(&link->eh_info, 0, sizeof(link->eh_info)); 64700115e0fSTejun Heo 6481eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) { 64900115e0fSTejun Heo int devno = dev->devno; 65000115e0fSTejun Heo 65100115e0fSTejun Heo ehc->saved_xfer_mode[devno] = dev->xfer_mode; 65200115e0fSTejun Heo if (ata_ncq_enabled(dev)) 65300115e0fSTejun Heo ehc->saved_ncq_enabled |= 1 << devno; 65400115e0fSTejun Heo } 655cf1b86c8STejun Heo } 656c6fd2807SJeff Garzik 657c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS; 658c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_EH_PENDING; 659da917d69STejun Heo ap->excl_link = NULL; /* don't maintain exclusion over EH */ 660c6fd2807SJeff Garzik 661c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 662c6fd2807SJeff Garzik 663c6fd2807SJeff Garzik /* invoke EH, skip if unloading or suspended */ 664c6fd2807SJeff Garzik if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED))) 665c6fd2807SJeff Garzik ap->ops->error_handler(ap); 666ece180d1STejun Heo else { 667ece180d1STejun Heo /* if unloading, commence suicide */ 668ece180d1STejun Heo if ((ap->pflags & ATA_PFLAG_UNLOADING) && 669ece180d1STejun Heo !(ap->pflags & ATA_PFLAG_UNLOADED)) 670ece180d1STejun Heo ata_eh_unload(ap); 671c6fd2807SJeff Garzik ata_eh_finish(ap); 672ece180d1STejun Heo } 673c6fd2807SJeff Garzik 674c6fd2807SJeff Garzik /* process port suspend request */ 675c6fd2807SJeff Garzik ata_eh_handle_port_suspend(ap); 676c6fd2807SJeff Garzik 677c6fd2807SJeff Garzik /* Exception might have happend after ->error_handler 678c6fd2807SJeff Garzik * recovered the port but before this point. Repeat 679c6fd2807SJeff Garzik * EH in such case. 680c6fd2807SJeff Garzik */ 681c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 682c6fd2807SJeff Garzik 683c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_EH_PENDING) { 684a1e10f7eSTejun Heo if (--ap->eh_tries) { 685c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 686c6fd2807SJeff Garzik goto repeat; 687c6fd2807SJeff Garzik } 688c6fd2807SJeff Garzik ata_port_printk(ap, KERN_ERR, "EH pending after %d " 689a1e10f7eSTejun Heo "tries, giving up\n", ATA_EH_MAX_TRIES); 690914616a3STejun Heo ap->pflags &= ~ATA_PFLAG_EH_PENDING; 691c6fd2807SJeff Garzik } 692c6fd2807SJeff Garzik 693c6fd2807SJeff Garzik /* this run is complete, make sure EH info is clear */ 6941eca4365STejun Heo ata_for_each_link(link, ap, HOST_FIRST) 695cf1b86c8STejun Heo memset(&link->eh_info, 0, sizeof(link->eh_info)); 696c6fd2807SJeff Garzik 697c6fd2807SJeff Garzik /* Clear host_eh_scheduled while holding ap->lock such 698c6fd2807SJeff Garzik * that if exception occurs after this point but 699c6fd2807SJeff Garzik * before EH completion, SCSI midlayer will 700c6fd2807SJeff Garzik * re-initiate EH. 701c6fd2807SJeff Garzik */ 702c6fd2807SJeff Garzik host->host_eh_scheduled = 0; 703c6fd2807SJeff Garzik 704c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 705c6fd2807SJeff Garzik } else { 7069af5c9c9STejun Heo WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL); 707c6fd2807SJeff Garzik ap->ops->eng_timeout(ap); 708c6fd2807SJeff Garzik } 709c6fd2807SJeff Garzik 710c6fd2807SJeff Garzik /* finish or retry handled scmd's and clean up */ 711c6fd2807SJeff Garzik WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q)); 712c6fd2807SJeff Garzik 713c6fd2807SJeff Garzik scsi_eh_flush_done_q(&ap->eh_done_q); 714c6fd2807SJeff Garzik 715c6fd2807SJeff Garzik /* clean up */ 716c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 717c6fd2807SJeff Garzik 718c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_LOADING) 719c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_LOADING; 720c6fd2807SJeff Garzik else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) 72152bad64dSDavid Howells queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 0); 722c6fd2807SJeff Garzik 723c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_RECOVERED) 724c6fd2807SJeff Garzik ata_port_printk(ap, KERN_INFO, "EH complete\n"); 725c6fd2807SJeff Garzik 726c6fd2807SJeff Garzik ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED); 727c6fd2807SJeff Garzik 728c6fd2807SJeff Garzik /* tell wait_eh that we're done */ 729c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS; 730c6fd2807SJeff Garzik wake_up_all(&ap->eh_wait_q); 731c6fd2807SJeff Garzik 732c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 733c6fd2807SJeff Garzik 734c6fd2807SJeff Garzik DPRINTK("EXIT\n"); 735c6fd2807SJeff Garzik } 736c6fd2807SJeff Garzik 737c6fd2807SJeff Garzik /** 738c6fd2807SJeff Garzik * ata_port_wait_eh - Wait for the currently pending EH to complete 739c6fd2807SJeff Garzik * @ap: Port to wait EH for 740c6fd2807SJeff Garzik * 741c6fd2807SJeff Garzik * Wait until the currently pending EH is complete. 742c6fd2807SJeff Garzik * 743c6fd2807SJeff Garzik * LOCKING: 744c6fd2807SJeff Garzik * Kernel thread context (may sleep). 745c6fd2807SJeff Garzik */ 746c6fd2807SJeff Garzik void ata_port_wait_eh(struct ata_port *ap) 747c6fd2807SJeff Garzik { 748c6fd2807SJeff Garzik unsigned long flags; 749c6fd2807SJeff Garzik DEFINE_WAIT(wait); 750c6fd2807SJeff Garzik 751c6fd2807SJeff Garzik retry: 752c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 753c6fd2807SJeff Garzik 754c6fd2807SJeff Garzik while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) { 755c6fd2807SJeff Garzik prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE); 756c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 757c6fd2807SJeff Garzik schedule(); 758c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 759c6fd2807SJeff Garzik } 760c6fd2807SJeff Garzik finish_wait(&ap->eh_wait_q, &wait); 761c6fd2807SJeff Garzik 762c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 763c6fd2807SJeff Garzik 764c6fd2807SJeff Garzik /* make sure SCSI EH is complete */ 765cca3974eSJeff Garzik if (scsi_host_in_recovery(ap->scsi_host)) { 766c6fd2807SJeff Garzik msleep(10); 767c6fd2807SJeff Garzik goto retry; 768c6fd2807SJeff Garzik } 769c6fd2807SJeff Garzik } 770c6fd2807SJeff Garzik 7715ddf24c5STejun Heo static int ata_eh_nr_in_flight(struct ata_port *ap) 7725ddf24c5STejun Heo { 7735ddf24c5STejun Heo unsigned int tag; 7745ddf24c5STejun Heo int nr = 0; 7755ddf24c5STejun Heo 7765ddf24c5STejun Heo /* count only non-internal commands */ 7775ddf24c5STejun Heo for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) 7785ddf24c5STejun Heo if (ata_qc_from_tag(ap, tag)) 7795ddf24c5STejun Heo nr++; 7805ddf24c5STejun Heo 7815ddf24c5STejun Heo return nr; 7825ddf24c5STejun Heo } 7835ddf24c5STejun Heo 7845ddf24c5STejun Heo void ata_eh_fastdrain_timerfn(unsigned long arg) 7855ddf24c5STejun Heo { 7865ddf24c5STejun Heo struct ata_port *ap = (void *)arg; 7875ddf24c5STejun Heo unsigned long flags; 7885ddf24c5STejun Heo int cnt; 7895ddf24c5STejun Heo 7905ddf24c5STejun Heo spin_lock_irqsave(ap->lock, flags); 7915ddf24c5STejun Heo 7925ddf24c5STejun Heo cnt = ata_eh_nr_in_flight(ap); 7935ddf24c5STejun Heo 7945ddf24c5STejun Heo /* are we done? */ 7955ddf24c5STejun Heo if (!cnt) 7965ddf24c5STejun Heo goto out_unlock; 7975ddf24c5STejun Heo 7985ddf24c5STejun Heo if (cnt == ap->fastdrain_cnt) { 7995ddf24c5STejun Heo unsigned int tag; 8005ddf24c5STejun Heo 8015ddf24c5STejun Heo /* No progress during the last interval, tag all 8025ddf24c5STejun Heo * in-flight qcs as timed out and freeze the port. 8035ddf24c5STejun Heo */ 8045ddf24c5STejun Heo for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) { 8055ddf24c5STejun Heo struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); 8065ddf24c5STejun Heo if (qc) 8075ddf24c5STejun Heo qc->err_mask |= AC_ERR_TIMEOUT; 8085ddf24c5STejun Heo } 8095ddf24c5STejun Heo 8105ddf24c5STejun Heo ata_port_freeze(ap); 8115ddf24c5STejun Heo } else { 8125ddf24c5STejun Heo /* some qcs have finished, give it another chance */ 8135ddf24c5STejun Heo ap->fastdrain_cnt = cnt; 8145ddf24c5STejun Heo ap->fastdrain_timer.expires = 815341c2c95STejun Heo ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); 8165ddf24c5STejun Heo add_timer(&ap->fastdrain_timer); 8175ddf24c5STejun Heo } 8185ddf24c5STejun Heo 8195ddf24c5STejun Heo out_unlock: 8205ddf24c5STejun Heo spin_unlock_irqrestore(ap->lock, flags); 8215ddf24c5STejun Heo } 8225ddf24c5STejun Heo 8235ddf24c5STejun Heo /** 8245ddf24c5STejun Heo * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain 8255ddf24c5STejun Heo * @ap: target ATA port 8265ddf24c5STejun Heo * @fastdrain: activate fast drain 8275ddf24c5STejun Heo * 8285ddf24c5STejun Heo * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain 8295ddf24c5STejun Heo * is non-zero and EH wasn't pending before. Fast drain ensures 8305ddf24c5STejun Heo * that EH kicks in in timely manner. 8315ddf24c5STejun Heo * 8325ddf24c5STejun Heo * LOCKING: 8335ddf24c5STejun Heo * spin_lock_irqsave(host lock) 8345ddf24c5STejun Heo */ 8355ddf24c5STejun Heo static void ata_eh_set_pending(struct ata_port *ap, int fastdrain) 8365ddf24c5STejun Heo { 8375ddf24c5STejun Heo int cnt; 8385ddf24c5STejun Heo 8395ddf24c5STejun Heo /* already scheduled? */ 8405ddf24c5STejun Heo if (ap->pflags & ATA_PFLAG_EH_PENDING) 8415ddf24c5STejun Heo return; 8425ddf24c5STejun Heo 8435ddf24c5STejun Heo ap->pflags |= ATA_PFLAG_EH_PENDING; 8445ddf24c5STejun Heo 8455ddf24c5STejun Heo if (!fastdrain) 8465ddf24c5STejun Heo return; 8475ddf24c5STejun Heo 8485ddf24c5STejun Heo /* do we have in-flight qcs? */ 8495ddf24c5STejun Heo cnt = ata_eh_nr_in_flight(ap); 8505ddf24c5STejun Heo if (!cnt) 8515ddf24c5STejun Heo return; 8525ddf24c5STejun Heo 8535ddf24c5STejun Heo /* activate fast drain */ 8545ddf24c5STejun Heo ap->fastdrain_cnt = cnt; 855341c2c95STejun Heo ap->fastdrain_timer.expires = 856341c2c95STejun Heo ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); 8575ddf24c5STejun Heo add_timer(&ap->fastdrain_timer); 8585ddf24c5STejun Heo } 8595ddf24c5STejun Heo 860c6fd2807SJeff Garzik /** 861c6fd2807SJeff Garzik * ata_qc_schedule_eh - schedule qc for error handling 862c6fd2807SJeff Garzik * @qc: command to schedule error handling for 863c6fd2807SJeff Garzik * 864c6fd2807SJeff Garzik * Schedule error handling for @qc. EH will kick in as soon as 865c6fd2807SJeff Garzik * other commands are drained. 866c6fd2807SJeff Garzik * 867c6fd2807SJeff Garzik * LOCKING: 868cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 869c6fd2807SJeff Garzik */ 870c6fd2807SJeff Garzik void ata_qc_schedule_eh(struct ata_queued_cmd *qc) 871c6fd2807SJeff Garzik { 872c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 873c6fd2807SJeff Garzik 874c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 875c6fd2807SJeff Garzik 876c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 8775ddf24c5STejun Heo ata_eh_set_pending(ap, 1); 878c6fd2807SJeff Garzik 879c6fd2807SJeff Garzik /* The following will fail if timeout has already expired. 880c6fd2807SJeff Garzik * ata_scsi_error() takes care of such scmds on EH entry. 881c6fd2807SJeff Garzik * Note that ATA_QCFLAG_FAILED is unconditionally set after 882c6fd2807SJeff Garzik * this function completes. 883c6fd2807SJeff Garzik */ 884242f9dcbSJens Axboe blk_abort_request(qc->scsicmd->request); 885c6fd2807SJeff Garzik } 886c6fd2807SJeff Garzik 887c6fd2807SJeff Garzik /** 888c6fd2807SJeff Garzik * ata_port_schedule_eh - schedule error handling without a qc 889c6fd2807SJeff Garzik * @ap: ATA port to schedule EH for 890c6fd2807SJeff Garzik * 891c6fd2807SJeff Garzik * Schedule error handling for @ap. EH will kick in as soon as 892c6fd2807SJeff Garzik * all commands are drained. 893c6fd2807SJeff Garzik * 894c6fd2807SJeff Garzik * LOCKING: 895cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 896c6fd2807SJeff Garzik */ 897c6fd2807SJeff Garzik void ata_port_schedule_eh(struct ata_port *ap) 898c6fd2807SJeff Garzik { 899c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 900c6fd2807SJeff Garzik 901f4d6d004STejun Heo if (ap->pflags & ATA_PFLAG_INITIALIZING) 902f4d6d004STejun Heo return; 903f4d6d004STejun Heo 9045ddf24c5STejun Heo ata_eh_set_pending(ap, 1); 905cca3974eSJeff Garzik scsi_schedule_eh(ap->scsi_host); 906c6fd2807SJeff Garzik 907c6fd2807SJeff Garzik DPRINTK("port EH scheduled\n"); 908c6fd2807SJeff Garzik } 909c6fd2807SJeff Garzik 910dbd82616STejun Heo static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link) 911c6fd2807SJeff Garzik { 912c6fd2807SJeff Garzik int tag, nr_aborted = 0; 913c6fd2807SJeff Garzik 914c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 915c6fd2807SJeff Garzik 9165ddf24c5STejun Heo /* we're gonna abort all commands, no need for fast drain */ 9175ddf24c5STejun Heo ata_eh_set_pending(ap, 0); 9185ddf24c5STejun Heo 919c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 920c6fd2807SJeff Garzik struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); 921c6fd2807SJeff Garzik 922dbd82616STejun Heo if (qc && (!link || qc->dev->link == link)) { 923c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 924c6fd2807SJeff Garzik ata_qc_complete(qc); 925c6fd2807SJeff Garzik nr_aborted++; 926c6fd2807SJeff Garzik } 927c6fd2807SJeff Garzik } 928c6fd2807SJeff Garzik 929c6fd2807SJeff Garzik if (!nr_aborted) 930c6fd2807SJeff Garzik ata_port_schedule_eh(ap); 931c6fd2807SJeff Garzik 932c6fd2807SJeff Garzik return nr_aborted; 933c6fd2807SJeff Garzik } 934c6fd2807SJeff Garzik 935c6fd2807SJeff Garzik /** 936dbd82616STejun Heo * ata_link_abort - abort all qc's on the link 937dbd82616STejun Heo * @link: ATA link to abort qc's for 938dbd82616STejun Heo * 939dbd82616STejun Heo * Abort all active qc's active on @link and schedule EH. 940dbd82616STejun Heo * 941dbd82616STejun Heo * LOCKING: 942dbd82616STejun Heo * spin_lock_irqsave(host lock) 943dbd82616STejun Heo * 944dbd82616STejun Heo * RETURNS: 945dbd82616STejun Heo * Number of aborted qc's. 946dbd82616STejun Heo */ 947dbd82616STejun Heo int ata_link_abort(struct ata_link *link) 948dbd82616STejun Heo { 949dbd82616STejun Heo return ata_do_link_abort(link->ap, link); 950dbd82616STejun Heo } 951dbd82616STejun Heo 952dbd82616STejun Heo /** 953dbd82616STejun Heo * ata_port_abort - abort all qc's on the port 954dbd82616STejun Heo * @ap: ATA port to abort qc's for 955dbd82616STejun Heo * 956dbd82616STejun Heo * Abort all active qc's of @ap and schedule EH. 957dbd82616STejun Heo * 958dbd82616STejun Heo * LOCKING: 959dbd82616STejun Heo * spin_lock_irqsave(host_set lock) 960dbd82616STejun Heo * 961dbd82616STejun Heo * RETURNS: 962dbd82616STejun Heo * Number of aborted qc's. 963dbd82616STejun Heo */ 964dbd82616STejun Heo int ata_port_abort(struct ata_port *ap) 965dbd82616STejun Heo { 966dbd82616STejun Heo return ata_do_link_abort(ap, NULL); 967dbd82616STejun Heo } 968dbd82616STejun Heo 969dbd82616STejun Heo /** 970c6fd2807SJeff Garzik * __ata_port_freeze - freeze port 971c6fd2807SJeff Garzik * @ap: ATA port to freeze 972c6fd2807SJeff Garzik * 973c6fd2807SJeff Garzik * This function is called when HSM violation or some other 974c6fd2807SJeff Garzik * condition disrupts normal operation of the port. Frozen port 975c6fd2807SJeff Garzik * is not allowed to perform any operation until the port is 976c6fd2807SJeff Garzik * thawed, which usually follows a successful reset. 977c6fd2807SJeff Garzik * 978c6fd2807SJeff Garzik * ap->ops->freeze() callback can be used for freezing the port 979c6fd2807SJeff Garzik * hardware-wise (e.g. mask interrupt and stop DMA engine). If a 980c6fd2807SJeff Garzik * port cannot be frozen hardware-wise, the interrupt handler 981c6fd2807SJeff Garzik * must ack and clear interrupts unconditionally while the port 982c6fd2807SJeff Garzik * is frozen. 983c6fd2807SJeff Garzik * 984c6fd2807SJeff Garzik * LOCKING: 985cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 986c6fd2807SJeff Garzik */ 987c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap) 988c6fd2807SJeff Garzik { 989c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 990c6fd2807SJeff Garzik 991c6fd2807SJeff Garzik if (ap->ops->freeze) 992c6fd2807SJeff Garzik ap->ops->freeze(ap); 993c6fd2807SJeff Garzik 994c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_FROZEN; 995c6fd2807SJeff Garzik 99644877b4eSTejun Heo DPRINTK("ata%u port frozen\n", ap->print_id); 997c6fd2807SJeff Garzik } 998c6fd2807SJeff Garzik 999c6fd2807SJeff Garzik /** 1000c6fd2807SJeff Garzik * ata_port_freeze - abort & freeze port 1001c6fd2807SJeff Garzik * @ap: ATA port to freeze 1002c6fd2807SJeff Garzik * 100354c38444SJeff Garzik * Abort and freeze @ap. The freeze operation must be called 100454c38444SJeff Garzik * first, because some hardware requires special operations 100554c38444SJeff Garzik * before the taskfile registers are accessible. 1006c6fd2807SJeff Garzik * 1007c6fd2807SJeff Garzik * LOCKING: 1008cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 1009c6fd2807SJeff Garzik * 1010c6fd2807SJeff Garzik * RETURNS: 1011c6fd2807SJeff Garzik * Number of aborted commands. 1012c6fd2807SJeff Garzik */ 1013c6fd2807SJeff Garzik int ata_port_freeze(struct ata_port *ap) 1014c6fd2807SJeff Garzik { 1015c6fd2807SJeff Garzik int nr_aborted; 1016c6fd2807SJeff Garzik 1017c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 1018c6fd2807SJeff Garzik 1019c6fd2807SJeff Garzik __ata_port_freeze(ap); 102054c38444SJeff Garzik nr_aborted = ata_port_abort(ap); 1021c6fd2807SJeff Garzik 1022c6fd2807SJeff Garzik return nr_aborted; 1023c6fd2807SJeff Garzik } 1024c6fd2807SJeff Garzik 1025c6fd2807SJeff Garzik /** 10267d77b247STejun Heo * sata_async_notification - SATA async notification handler 10277d77b247STejun Heo * @ap: ATA port where async notification is received 10287d77b247STejun Heo * 10297d77b247STejun Heo * Handler to be called when async notification via SDB FIS is 10307d77b247STejun Heo * received. This function schedules EH if necessary. 10317d77b247STejun Heo * 10327d77b247STejun Heo * LOCKING: 10337d77b247STejun Heo * spin_lock_irqsave(host lock) 10347d77b247STejun Heo * 10357d77b247STejun Heo * RETURNS: 10367d77b247STejun Heo * 1 if EH is scheduled, 0 otherwise. 10377d77b247STejun Heo */ 10387d77b247STejun Heo int sata_async_notification(struct ata_port *ap) 10397d77b247STejun Heo { 10407d77b247STejun Heo u32 sntf; 10417d77b247STejun Heo int rc; 10427d77b247STejun Heo 10437d77b247STejun Heo if (!(ap->flags & ATA_FLAG_AN)) 10447d77b247STejun Heo return 0; 10457d77b247STejun Heo 10467d77b247STejun Heo rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf); 10477d77b247STejun Heo if (rc == 0) 10487d77b247STejun Heo sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf); 10497d77b247STejun Heo 1050071f44b1STejun Heo if (!sata_pmp_attached(ap) || rc) { 10517d77b247STejun Heo /* PMP is not attached or SNTF is not available */ 1052071f44b1STejun Heo if (!sata_pmp_attached(ap)) { 10537d77b247STejun Heo /* PMP is not attached. Check whether ATAPI 10547d77b247STejun Heo * AN is configured. If so, notify media 10557d77b247STejun Heo * change. 10567d77b247STejun Heo */ 10577d77b247STejun Heo struct ata_device *dev = ap->link.device; 10587d77b247STejun Heo 10597d77b247STejun Heo if ((dev->class == ATA_DEV_ATAPI) && 10607d77b247STejun Heo (dev->flags & ATA_DFLAG_AN)) 10617d77b247STejun Heo ata_scsi_media_change_notify(dev); 10627d77b247STejun Heo return 0; 10637d77b247STejun Heo } else { 10647d77b247STejun Heo /* PMP is attached but SNTF is not available. 10657d77b247STejun Heo * ATAPI async media change notification is 10667d77b247STejun Heo * not used. The PMP must be reporting PHY 10677d77b247STejun Heo * status change, schedule EH. 10687d77b247STejun Heo */ 10697d77b247STejun Heo ata_port_schedule_eh(ap); 10707d77b247STejun Heo return 1; 10717d77b247STejun Heo } 10727d77b247STejun Heo } else { 10737d77b247STejun Heo /* PMP is attached and SNTF is available */ 10747d77b247STejun Heo struct ata_link *link; 10757d77b247STejun Heo 10767d77b247STejun Heo /* check and notify ATAPI AN */ 10771eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 10787d77b247STejun Heo if (!(sntf & (1 << link->pmp))) 10797d77b247STejun Heo continue; 10807d77b247STejun Heo 10817d77b247STejun Heo if ((link->device->class == ATA_DEV_ATAPI) && 10827d77b247STejun Heo (link->device->flags & ATA_DFLAG_AN)) 10837d77b247STejun Heo ata_scsi_media_change_notify(link->device); 10847d77b247STejun Heo } 10857d77b247STejun Heo 10867d77b247STejun Heo /* If PMP is reporting that PHY status of some 10877d77b247STejun Heo * downstream ports has changed, schedule EH. 10887d77b247STejun Heo */ 10897d77b247STejun Heo if (sntf & (1 << SATA_PMP_CTRL_PORT)) { 10907d77b247STejun Heo ata_port_schedule_eh(ap); 10917d77b247STejun Heo return 1; 10927d77b247STejun Heo } 10937d77b247STejun Heo 10947d77b247STejun Heo return 0; 10957d77b247STejun Heo } 10967d77b247STejun Heo } 10977d77b247STejun Heo 10987d77b247STejun Heo /** 1099c6fd2807SJeff Garzik * ata_eh_freeze_port - EH helper to freeze port 1100c6fd2807SJeff Garzik * @ap: ATA port to freeze 1101c6fd2807SJeff Garzik * 1102c6fd2807SJeff Garzik * Freeze @ap. 1103c6fd2807SJeff Garzik * 1104c6fd2807SJeff Garzik * LOCKING: 1105c6fd2807SJeff Garzik * None. 1106c6fd2807SJeff Garzik */ 1107c6fd2807SJeff Garzik void ata_eh_freeze_port(struct ata_port *ap) 1108c6fd2807SJeff Garzik { 1109c6fd2807SJeff Garzik unsigned long flags; 1110c6fd2807SJeff Garzik 1111c6fd2807SJeff Garzik if (!ap->ops->error_handler) 1112c6fd2807SJeff Garzik return; 1113c6fd2807SJeff Garzik 1114c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1115c6fd2807SJeff Garzik __ata_port_freeze(ap); 1116c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1117c6fd2807SJeff Garzik } 1118c6fd2807SJeff Garzik 1119c6fd2807SJeff Garzik /** 1120c6fd2807SJeff Garzik * ata_port_thaw_port - EH helper to thaw port 1121c6fd2807SJeff Garzik * @ap: ATA port to thaw 1122c6fd2807SJeff Garzik * 1123c6fd2807SJeff Garzik * Thaw frozen port @ap. 1124c6fd2807SJeff Garzik * 1125c6fd2807SJeff Garzik * LOCKING: 1126c6fd2807SJeff Garzik * None. 1127c6fd2807SJeff Garzik */ 1128c6fd2807SJeff Garzik void ata_eh_thaw_port(struct ata_port *ap) 1129c6fd2807SJeff Garzik { 1130c6fd2807SJeff Garzik unsigned long flags; 1131c6fd2807SJeff Garzik 1132c6fd2807SJeff Garzik if (!ap->ops->error_handler) 1133c6fd2807SJeff Garzik return; 1134c6fd2807SJeff Garzik 1135c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1136c6fd2807SJeff Garzik 1137c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_FROZEN; 1138c6fd2807SJeff Garzik 1139c6fd2807SJeff Garzik if (ap->ops->thaw) 1140c6fd2807SJeff Garzik ap->ops->thaw(ap); 1141c6fd2807SJeff Garzik 1142c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1143c6fd2807SJeff Garzik 114444877b4eSTejun Heo DPRINTK("ata%u port thawed\n", ap->print_id); 1145c6fd2807SJeff Garzik } 1146c6fd2807SJeff Garzik 1147c6fd2807SJeff Garzik static void ata_eh_scsidone(struct scsi_cmnd *scmd) 1148c6fd2807SJeff Garzik { 1149c6fd2807SJeff Garzik /* nada */ 1150c6fd2807SJeff Garzik } 1151c6fd2807SJeff Garzik 1152c6fd2807SJeff Garzik static void __ata_eh_qc_complete(struct ata_queued_cmd *qc) 1153c6fd2807SJeff Garzik { 1154c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 1155c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 1156c6fd2807SJeff Garzik unsigned long flags; 1157c6fd2807SJeff Garzik 1158c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1159c6fd2807SJeff Garzik qc->scsidone = ata_eh_scsidone; 1160c6fd2807SJeff Garzik __ata_qc_complete(qc); 1161c6fd2807SJeff Garzik WARN_ON(ata_tag_valid(qc->tag)); 1162c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1163c6fd2807SJeff Garzik 1164c6fd2807SJeff Garzik scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 1165c6fd2807SJeff Garzik } 1166c6fd2807SJeff Garzik 1167c6fd2807SJeff Garzik /** 1168c6fd2807SJeff Garzik * ata_eh_qc_complete - Complete an active ATA command from EH 1169c6fd2807SJeff Garzik * @qc: Command to complete 1170c6fd2807SJeff Garzik * 1171c6fd2807SJeff Garzik * Indicate to the mid and upper layers that an ATA command has 1172c6fd2807SJeff Garzik * completed. To be used from EH. 1173c6fd2807SJeff Garzik */ 1174c6fd2807SJeff Garzik void ata_eh_qc_complete(struct ata_queued_cmd *qc) 1175c6fd2807SJeff Garzik { 1176c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 1177c6fd2807SJeff Garzik scmd->retries = scmd->allowed; 1178c6fd2807SJeff Garzik __ata_eh_qc_complete(qc); 1179c6fd2807SJeff Garzik } 1180c6fd2807SJeff Garzik 1181c6fd2807SJeff Garzik /** 1182c6fd2807SJeff Garzik * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH 1183c6fd2807SJeff Garzik * @qc: Command to retry 1184c6fd2807SJeff Garzik * 1185c6fd2807SJeff Garzik * Indicate to the mid and upper layers that an ATA command 1186c6fd2807SJeff Garzik * should be retried. To be used from EH. 1187c6fd2807SJeff Garzik * 1188c6fd2807SJeff Garzik * SCSI midlayer limits the number of retries to scmd->allowed. 1189c6fd2807SJeff Garzik * scmd->retries is decremented for commands which get retried 1190c6fd2807SJeff Garzik * due to unrelated failures (qc->err_mask is zero). 1191c6fd2807SJeff Garzik */ 1192c6fd2807SJeff Garzik void ata_eh_qc_retry(struct ata_queued_cmd *qc) 1193c6fd2807SJeff Garzik { 1194c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 1195c6fd2807SJeff Garzik if (!qc->err_mask && scmd->retries) 1196c6fd2807SJeff Garzik scmd->retries--; 1197c6fd2807SJeff Garzik __ata_eh_qc_complete(qc); 1198c6fd2807SJeff Garzik } 1199c6fd2807SJeff Garzik 1200c6fd2807SJeff Garzik /** 1201678afac6STejun Heo * ata_dev_disable - disable ATA device 1202678afac6STejun Heo * @dev: ATA device to disable 1203678afac6STejun Heo * 1204678afac6STejun Heo * Disable @dev. 1205678afac6STejun Heo * 1206678afac6STejun Heo * Locking: 1207678afac6STejun Heo * EH context. 1208678afac6STejun Heo */ 1209678afac6STejun Heo void ata_dev_disable(struct ata_device *dev) 1210678afac6STejun Heo { 1211678afac6STejun Heo if (!ata_dev_enabled(dev)) 1212678afac6STejun Heo return; 1213678afac6STejun Heo 1214678afac6STejun Heo if (ata_msg_drv(dev->link->ap)) 1215678afac6STejun Heo ata_dev_printk(dev, KERN_WARNING, "disabled\n"); 1216678afac6STejun Heo ata_acpi_on_disable(dev); 1217678afac6STejun Heo ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET); 1218678afac6STejun Heo dev->class++; 121999cf610aSTejun Heo 122099cf610aSTejun Heo /* From now till the next successful probe, ering is used to 122199cf610aSTejun Heo * track probe failures. Clear accumulated device error info. 122299cf610aSTejun Heo */ 122399cf610aSTejun Heo ata_ering_clear(&dev->ering); 1224678afac6STejun Heo } 1225678afac6STejun Heo 1226678afac6STejun Heo /** 1227c6fd2807SJeff Garzik * ata_eh_detach_dev - detach ATA device 1228c6fd2807SJeff Garzik * @dev: ATA device to detach 1229c6fd2807SJeff Garzik * 1230c6fd2807SJeff Garzik * Detach @dev. 1231c6fd2807SJeff Garzik * 1232c6fd2807SJeff Garzik * LOCKING: 1233c6fd2807SJeff Garzik * None. 1234c6fd2807SJeff Garzik */ 1235fb7fd614STejun Heo void ata_eh_detach_dev(struct ata_device *dev) 1236c6fd2807SJeff Garzik { 1237f58229f8STejun Heo struct ata_link *link = dev->link; 1238f58229f8STejun Heo struct ata_port *ap = link->ap; 123990484ebfSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 1240c6fd2807SJeff Garzik unsigned long flags; 1241c6fd2807SJeff Garzik 1242c6fd2807SJeff Garzik ata_dev_disable(dev); 1243c6fd2807SJeff Garzik 1244c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1245c6fd2807SJeff Garzik 1246c6fd2807SJeff Garzik dev->flags &= ~ATA_DFLAG_DETACH; 1247c6fd2807SJeff Garzik 1248c6fd2807SJeff Garzik if (ata_scsi_offline_dev(dev)) { 1249c6fd2807SJeff Garzik dev->flags |= ATA_DFLAG_DETACHED; 1250c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 1251c6fd2807SJeff Garzik } 1252c6fd2807SJeff Garzik 125390484ebfSTejun Heo /* clear per-dev EH info */ 1254f58229f8STejun Heo ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK); 1255f58229f8STejun Heo ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK); 125690484ebfSTejun Heo ehc->saved_xfer_mode[dev->devno] = 0; 125790484ebfSTejun Heo ehc->saved_ncq_enabled &= ~(1 << dev->devno); 1258c6fd2807SJeff Garzik 1259c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1260c6fd2807SJeff Garzik } 1261c6fd2807SJeff Garzik 1262c6fd2807SJeff Garzik /** 1263c6fd2807SJeff Garzik * ata_eh_about_to_do - about to perform eh_action 1264955e57dfSTejun Heo * @link: target ATA link 1265c6fd2807SJeff Garzik * @dev: target ATA dev for per-dev action (can be NULL) 1266c6fd2807SJeff Garzik * @action: action about to be performed 1267c6fd2807SJeff Garzik * 1268c6fd2807SJeff Garzik * Called just before performing EH actions to clear related bits 1269955e57dfSTejun Heo * in @link->eh_info such that eh actions are not unnecessarily 1270955e57dfSTejun Heo * repeated. 1271c6fd2807SJeff Garzik * 1272c6fd2807SJeff Garzik * LOCKING: 1273c6fd2807SJeff Garzik * None. 1274c6fd2807SJeff Garzik */ 1275fb7fd614STejun Heo void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev, 1276c6fd2807SJeff Garzik unsigned int action) 1277c6fd2807SJeff Garzik { 1278955e57dfSTejun Heo struct ata_port *ap = link->ap; 1279955e57dfSTejun Heo struct ata_eh_info *ehi = &link->eh_info; 1280955e57dfSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 1281c6fd2807SJeff Garzik unsigned long flags; 1282c6fd2807SJeff Garzik 1283c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1284c6fd2807SJeff Garzik 1285955e57dfSTejun Heo ata_eh_clear_action(link, dev, ehi, action); 1286c6fd2807SJeff Garzik 1287a568d1d2STejun Heo /* About to take EH action, set RECOVERED. Ignore actions on 1288a568d1d2STejun Heo * slave links as master will do them again. 1289a568d1d2STejun Heo */ 1290a568d1d2STejun Heo if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link) 1291c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_RECOVERED; 1292c6fd2807SJeff Garzik 1293c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1294c6fd2807SJeff Garzik } 1295c6fd2807SJeff Garzik 1296c6fd2807SJeff Garzik /** 1297c6fd2807SJeff Garzik * ata_eh_done - EH action complete 1298c6fd2807SJeff Garzik * @ap: target ATA port 1299c6fd2807SJeff Garzik * @dev: target ATA dev for per-dev action (can be NULL) 1300c6fd2807SJeff Garzik * @action: action just completed 1301c6fd2807SJeff Garzik * 1302c6fd2807SJeff Garzik * Called right after performing EH actions to clear related bits 1303955e57dfSTejun Heo * in @link->eh_context. 1304c6fd2807SJeff Garzik * 1305c6fd2807SJeff Garzik * LOCKING: 1306c6fd2807SJeff Garzik * None. 1307c6fd2807SJeff Garzik */ 1308fb7fd614STejun Heo void ata_eh_done(struct ata_link *link, struct ata_device *dev, 1309c6fd2807SJeff Garzik unsigned int action) 1310c6fd2807SJeff Garzik { 1311955e57dfSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 13129af5c9c9STejun Heo 1313955e57dfSTejun Heo ata_eh_clear_action(link, dev, &ehc->i, action); 1314c6fd2807SJeff Garzik } 1315c6fd2807SJeff Garzik 1316c6fd2807SJeff Garzik /** 1317c6fd2807SJeff Garzik * ata_err_string - convert err_mask to descriptive string 1318c6fd2807SJeff Garzik * @err_mask: error mask to convert to string 1319c6fd2807SJeff Garzik * 1320c6fd2807SJeff Garzik * Convert @err_mask to descriptive string. Errors are 1321c6fd2807SJeff Garzik * prioritized according to severity and only the most severe 1322c6fd2807SJeff Garzik * error is reported. 1323c6fd2807SJeff Garzik * 1324c6fd2807SJeff Garzik * LOCKING: 1325c6fd2807SJeff Garzik * None. 1326c6fd2807SJeff Garzik * 1327c6fd2807SJeff Garzik * RETURNS: 1328c6fd2807SJeff Garzik * Descriptive string for @err_mask 1329c6fd2807SJeff Garzik */ 1330c6fd2807SJeff Garzik static const char *ata_err_string(unsigned int err_mask) 1331c6fd2807SJeff Garzik { 1332c6fd2807SJeff Garzik if (err_mask & AC_ERR_HOST_BUS) 1333c6fd2807SJeff Garzik return "host bus error"; 1334c6fd2807SJeff Garzik if (err_mask & AC_ERR_ATA_BUS) 1335c6fd2807SJeff Garzik return "ATA bus error"; 1336c6fd2807SJeff Garzik if (err_mask & AC_ERR_TIMEOUT) 1337c6fd2807SJeff Garzik return "timeout"; 1338c6fd2807SJeff Garzik if (err_mask & AC_ERR_HSM) 1339c6fd2807SJeff Garzik return "HSM violation"; 1340c6fd2807SJeff Garzik if (err_mask & AC_ERR_SYSTEM) 1341c6fd2807SJeff Garzik return "internal error"; 1342c6fd2807SJeff Garzik if (err_mask & AC_ERR_MEDIA) 1343c6fd2807SJeff Garzik return "media error"; 1344c6fd2807SJeff Garzik if (err_mask & AC_ERR_INVALID) 1345c6fd2807SJeff Garzik return "invalid argument"; 1346c6fd2807SJeff Garzik if (err_mask & AC_ERR_DEV) 1347c6fd2807SJeff Garzik return "device error"; 1348c6fd2807SJeff Garzik return "unknown error"; 1349c6fd2807SJeff Garzik } 1350c6fd2807SJeff Garzik 1351c6fd2807SJeff Garzik /** 1352c6fd2807SJeff Garzik * ata_read_log_page - read a specific log page 1353c6fd2807SJeff Garzik * @dev: target device 1354c6fd2807SJeff Garzik * @page: page to read 1355c6fd2807SJeff Garzik * @buf: buffer to store read page 1356c6fd2807SJeff Garzik * @sectors: number of sectors to read 1357c6fd2807SJeff Garzik * 1358c6fd2807SJeff Garzik * Read log page using READ_LOG_EXT command. 1359c6fd2807SJeff Garzik * 1360c6fd2807SJeff Garzik * LOCKING: 1361c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1362c6fd2807SJeff Garzik * 1363c6fd2807SJeff Garzik * RETURNS: 1364c6fd2807SJeff Garzik * 0 on success, AC_ERR_* mask otherwise. 1365c6fd2807SJeff Garzik */ 1366c6fd2807SJeff Garzik static unsigned int ata_read_log_page(struct ata_device *dev, 1367c6fd2807SJeff Garzik u8 page, void *buf, unsigned int sectors) 1368c6fd2807SJeff Garzik { 1369c6fd2807SJeff Garzik struct ata_taskfile tf; 1370c6fd2807SJeff Garzik unsigned int err_mask; 1371c6fd2807SJeff Garzik 1372c6fd2807SJeff Garzik DPRINTK("read log page - page %d\n", page); 1373c6fd2807SJeff Garzik 1374c6fd2807SJeff Garzik ata_tf_init(dev, &tf); 1375c6fd2807SJeff Garzik tf.command = ATA_CMD_READ_LOG_EXT; 1376c6fd2807SJeff Garzik tf.lbal = page; 1377c6fd2807SJeff Garzik tf.nsect = sectors; 1378c6fd2807SJeff Garzik tf.hob_nsect = sectors >> 8; 1379c6fd2807SJeff Garzik tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE; 1380c6fd2807SJeff Garzik tf.protocol = ATA_PROT_PIO; 1381c6fd2807SJeff Garzik 1382c6fd2807SJeff Garzik err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, 13832b789108STejun Heo buf, sectors * ATA_SECT_SIZE, 0); 1384c6fd2807SJeff Garzik 1385c6fd2807SJeff Garzik DPRINTK("EXIT, err_mask=%x\n", err_mask); 1386c6fd2807SJeff Garzik return err_mask; 1387c6fd2807SJeff Garzik } 1388c6fd2807SJeff Garzik 1389c6fd2807SJeff Garzik /** 1390c6fd2807SJeff Garzik * ata_eh_read_log_10h - Read log page 10h for NCQ error details 1391c6fd2807SJeff Garzik * @dev: Device to read log page 10h from 1392c6fd2807SJeff Garzik * @tag: Resulting tag of the failed command 1393c6fd2807SJeff Garzik * @tf: Resulting taskfile registers of the failed command 1394c6fd2807SJeff Garzik * 1395c6fd2807SJeff Garzik * Read log page 10h to obtain NCQ error details and clear error 1396c6fd2807SJeff Garzik * condition. 1397c6fd2807SJeff Garzik * 1398c6fd2807SJeff Garzik * LOCKING: 1399c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1400c6fd2807SJeff Garzik * 1401c6fd2807SJeff Garzik * RETURNS: 1402c6fd2807SJeff Garzik * 0 on success, -errno otherwise. 1403c6fd2807SJeff Garzik */ 1404c6fd2807SJeff Garzik static int ata_eh_read_log_10h(struct ata_device *dev, 1405c6fd2807SJeff Garzik int *tag, struct ata_taskfile *tf) 1406c6fd2807SJeff Garzik { 14079af5c9c9STejun Heo u8 *buf = dev->link->ap->sector_buf; 1408c6fd2807SJeff Garzik unsigned int err_mask; 1409c6fd2807SJeff Garzik u8 csum; 1410c6fd2807SJeff Garzik int i; 1411c6fd2807SJeff Garzik 1412c6fd2807SJeff Garzik err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1); 1413c6fd2807SJeff Garzik if (err_mask) 1414c6fd2807SJeff Garzik return -EIO; 1415c6fd2807SJeff Garzik 1416c6fd2807SJeff Garzik csum = 0; 1417c6fd2807SJeff Garzik for (i = 0; i < ATA_SECT_SIZE; i++) 1418c6fd2807SJeff Garzik csum += buf[i]; 1419c6fd2807SJeff Garzik if (csum) 1420c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_WARNING, 1421c6fd2807SJeff Garzik "invalid checksum 0x%x on log page 10h\n", csum); 1422c6fd2807SJeff Garzik 1423c6fd2807SJeff Garzik if (buf[0] & 0x80) 1424c6fd2807SJeff Garzik return -ENOENT; 1425c6fd2807SJeff Garzik 1426c6fd2807SJeff Garzik *tag = buf[0] & 0x1f; 1427c6fd2807SJeff Garzik 1428c6fd2807SJeff Garzik tf->command = buf[2]; 1429c6fd2807SJeff Garzik tf->feature = buf[3]; 1430c6fd2807SJeff Garzik tf->lbal = buf[4]; 1431c6fd2807SJeff Garzik tf->lbam = buf[5]; 1432c6fd2807SJeff Garzik tf->lbah = buf[6]; 1433c6fd2807SJeff Garzik tf->device = buf[7]; 1434c6fd2807SJeff Garzik tf->hob_lbal = buf[8]; 1435c6fd2807SJeff Garzik tf->hob_lbam = buf[9]; 1436c6fd2807SJeff Garzik tf->hob_lbah = buf[10]; 1437c6fd2807SJeff Garzik tf->nsect = buf[12]; 1438c6fd2807SJeff Garzik tf->hob_nsect = buf[13]; 1439c6fd2807SJeff Garzik 1440c6fd2807SJeff Garzik return 0; 1441c6fd2807SJeff Garzik } 1442c6fd2807SJeff Garzik 1443c6fd2807SJeff Garzik /** 144411fc33daSTejun Heo * atapi_eh_tur - perform ATAPI TEST_UNIT_READY 144511fc33daSTejun Heo * @dev: target ATAPI device 144611fc33daSTejun Heo * @r_sense_key: out parameter for sense_key 144711fc33daSTejun Heo * 144811fc33daSTejun Heo * Perform ATAPI TEST_UNIT_READY. 144911fc33daSTejun Heo * 145011fc33daSTejun Heo * LOCKING: 145111fc33daSTejun Heo * EH context (may sleep). 145211fc33daSTejun Heo * 145311fc33daSTejun Heo * RETURNS: 145411fc33daSTejun Heo * 0 on success, AC_ERR_* mask on failure. 145511fc33daSTejun Heo */ 145611fc33daSTejun Heo static unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key) 145711fc33daSTejun Heo { 145811fc33daSTejun Heo u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 }; 145911fc33daSTejun Heo struct ata_taskfile tf; 146011fc33daSTejun Heo unsigned int err_mask; 146111fc33daSTejun Heo 146211fc33daSTejun Heo ata_tf_init(dev, &tf); 146311fc33daSTejun Heo 146411fc33daSTejun Heo tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 146511fc33daSTejun Heo tf.command = ATA_CMD_PACKET; 146611fc33daSTejun Heo tf.protocol = ATAPI_PROT_NODATA; 146711fc33daSTejun Heo 146811fc33daSTejun Heo err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0); 146911fc33daSTejun Heo if (err_mask == AC_ERR_DEV) 147011fc33daSTejun Heo *r_sense_key = tf.feature >> 4; 147111fc33daSTejun Heo return err_mask; 147211fc33daSTejun Heo } 147311fc33daSTejun Heo 147411fc33daSTejun Heo /** 1475c6fd2807SJeff Garzik * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE 1476c6fd2807SJeff Garzik * @dev: device to perform REQUEST_SENSE to 1477c6fd2807SJeff Garzik * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long) 14783eabddb8STejun Heo * @dfl_sense_key: default sense key to use 1479c6fd2807SJeff Garzik * 1480c6fd2807SJeff Garzik * Perform ATAPI REQUEST_SENSE after the device reported CHECK 1481c6fd2807SJeff Garzik * SENSE. This function is EH helper. 1482c6fd2807SJeff Garzik * 1483c6fd2807SJeff Garzik * LOCKING: 1484c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1485c6fd2807SJeff Garzik * 1486c6fd2807SJeff Garzik * RETURNS: 1487c6fd2807SJeff Garzik * 0 on success, AC_ERR_* mask on failure 1488c6fd2807SJeff Garzik */ 14893eabddb8STejun Heo static unsigned int atapi_eh_request_sense(struct ata_device *dev, 14903eabddb8STejun Heo u8 *sense_buf, u8 dfl_sense_key) 1491c6fd2807SJeff Garzik { 14923eabddb8STejun Heo u8 cdb[ATAPI_CDB_LEN] = 14933eabddb8STejun Heo { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 }; 14949af5c9c9STejun Heo struct ata_port *ap = dev->link->ap; 1495c6fd2807SJeff Garzik struct ata_taskfile tf; 1496c6fd2807SJeff Garzik 1497c6fd2807SJeff Garzik DPRINTK("ATAPI request sense\n"); 1498c6fd2807SJeff Garzik 1499c6fd2807SJeff Garzik /* FIXME: is this needed? */ 1500c6fd2807SJeff Garzik memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); 1501c6fd2807SJeff Garzik 150256287768SAlbert Lee /* initialize sense_buf with the error register, 150356287768SAlbert Lee * for the case where they are -not- overwritten 150456287768SAlbert Lee */ 1505c6fd2807SJeff Garzik sense_buf[0] = 0x70; 15063eabddb8STejun Heo sense_buf[2] = dfl_sense_key; 150756287768SAlbert Lee 150856287768SAlbert Lee /* some devices time out if garbage left in tf */ 150956287768SAlbert Lee ata_tf_init(dev, &tf); 1510c6fd2807SJeff Garzik 1511c6fd2807SJeff Garzik tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1512c6fd2807SJeff Garzik tf.command = ATA_CMD_PACKET; 1513c6fd2807SJeff Garzik 1514c6fd2807SJeff Garzik /* is it pointless to prefer PIO for "safety reasons"? */ 1515c6fd2807SJeff Garzik if (ap->flags & ATA_FLAG_PIO_DMA) { 15160dc36888STejun Heo tf.protocol = ATAPI_PROT_DMA; 1517c6fd2807SJeff Garzik tf.feature |= ATAPI_PKT_DMA; 1518c6fd2807SJeff Garzik } else { 15190dc36888STejun Heo tf.protocol = ATAPI_PROT_PIO; 1520f2dfc1a1STejun Heo tf.lbam = SCSI_SENSE_BUFFERSIZE; 1521f2dfc1a1STejun Heo tf.lbah = 0; 1522c6fd2807SJeff Garzik } 1523c6fd2807SJeff Garzik 1524c6fd2807SJeff Garzik return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE, 15252b789108STejun Heo sense_buf, SCSI_SENSE_BUFFERSIZE, 0); 1526c6fd2807SJeff Garzik } 1527c6fd2807SJeff Garzik 1528c6fd2807SJeff Garzik /** 1529c6fd2807SJeff Garzik * ata_eh_analyze_serror - analyze SError for a failed port 15300260731fSTejun Heo * @link: ATA link to analyze SError for 1531c6fd2807SJeff Garzik * 1532c6fd2807SJeff Garzik * Analyze SError if available and further determine cause of 1533c6fd2807SJeff Garzik * failure. 1534c6fd2807SJeff Garzik * 1535c6fd2807SJeff Garzik * LOCKING: 1536c6fd2807SJeff Garzik * None. 1537c6fd2807SJeff Garzik */ 15380260731fSTejun Heo static void ata_eh_analyze_serror(struct ata_link *link) 1539c6fd2807SJeff Garzik { 15400260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 1541c6fd2807SJeff Garzik u32 serror = ehc->i.serror; 1542c6fd2807SJeff Garzik unsigned int err_mask = 0, action = 0; 1543f9df58cbSTejun Heo u32 hotplug_mask; 1544c6fd2807SJeff Garzik 1545e0614db2STejun Heo if (serror & (SERR_PERSISTENT | SERR_DATA)) { 1546c6fd2807SJeff Garzik err_mask |= AC_ERR_ATA_BUS; 1547cf480626STejun Heo action |= ATA_EH_RESET; 1548c6fd2807SJeff Garzik } 1549c6fd2807SJeff Garzik if (serror & SERR_PROTOCOL) { 1550c6fd2807SJeff Garzik err_mask |= AC_ERR_HSM; 1551cf480626STejun Heo action |= ATA_EH_RESET; 1552c6fd2807SJeff Garzik } 1553c6fd2807SJeff Garzik if (serror & SERR_INTERNAL) { 1554c6fd2807SJeff Garzik err_mask |= AC_ERR_SYSTEM; 1555cf480626STejun Heo action |= ATA_EH_RESET; 1556c6fd2807SJeff Garzik } 1557f9df58cbSTejun Heo 1558f9df58cbSTejun Heo /* Determine whether a hotplug event has occurred. Both 1559f9df58cbSTejun Heo * SError.N/X are considered hotplug events for enabled or 1560f9df58cbSTejun Heo * host links. For disabled PMP links, only N bit is 1561f9df58cbSTejun Heo * considered as X bit is left at 1 for link plugging. 1562f9df58cbSTejun Heo */ 1563f9df58cbSTejun Heo hotplug_mask = 0; 1564f9df58cbSTejun Heo 1565f9df58cbSTejun Heo if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link)) 1566f9df58cbSTejun Heo hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG; 1567f9df58cbSTejun Heo else 1568f9df58cbSTejun Heo hotplug_mask = SERR_PHYRDY_CHG; 1569f9df58cbSTejun Heo 1570f9df58cbSTejun Heo if (serror & hotplug_mask) 1571c6fd2807SJeff Garzik ata_ehi_hotplugged(&ehc->i); 1572c6fd2807SJeff Garzik 1573c6fd2807SJeff Garzik ehc->i.err_mask |= err_mask; 1574c6fd2807SJeff Garzik ehc->i.action |= action; 1575c6fd2807SJeff Garzik } 1576c6fd2807SJeff Garzik 1577c6fd2807SJeff Garzik /** 1578c6fd2807SJeff Garzik * ata_eh_analyze_ncq_error - analyze NCQ error 15790260731fSTejun Heo * @link: ATA link to analyze NCQ error for 1580c6fd2807SJeff Garzik * 1581c6fd2807SJeff Garzik * Read log page 10h, determine the offending qc and acquire 1582c6fd2807SJeff Garzik * error status TF. For NCQ device errors, all LLDDs have to do 1583c6fd2807SJeff Garzik * is setting AC_ERR_DEV in ehi->err_mask. This function takes 1584c6fd2807SJeff Garzik * care of the rest. 1585c6fd2807SJeff Garzik * 1586c6fd2807SJeff Garzik * LOCKING: 1587c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1588c6fd2807SJeff Garzik */ 158910acf3b0SMark Lord void ata_eh_analyze_ncq_error(struct ata_link *link) 1590c6fd2807SJeff Garzik { 15910260731fSTejun Heo struct ata_port *ap = link->ap; 15920260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 15930260731fSTejun Heo struct ata_device *dev = link->device; 1594c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 1595c6fd2807SJeff Garzik struct ata_taskfile tf; 1596c6fd2807SJeff Garzik int tag, rc; 1597c6fd2807SJeff Garzik 1598c6fd2807SJeff Garzik /* if frozen, we can't do much */ 1599c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN) 1600c6fd2807SJeff Garzik return; 1601c6fd2807SJeff Garzik 1602c6fd2807SJeff Garzik /* is it NCQ device error? */ 16030260731fSTejun Heo if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV)) 1604c6fd2807SJeff Garzik return; 1605c6fd2807SJeff Garzik 1606c6fd2807SJeff Garzik /* has LLDD analyzed already? */ 1607c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1608c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, tag); 1609c6fd2807SJeff Garzik 1610c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) 1611c6fd2807SJeff Garzik continue; 1612c6fd2807SJeff Garzik 1613c6fd2807SJeff Garzik if (qc->err_mask) 1614c6fd2807SJeff Garzik return; 1615c6fd2807SJeff Garzik } 1616c6fd2807SJeff Garzik 1617c6fd2807SJeff Garzik /* okay, this error is ours */ 1618c6fd2807SJeff Garzik rc = ata_eh_read_log_10h(dev, &tag, &tf); 1619c6fd2807SJeff Garzik if (rc) { 16200260731fSTejun Heo ata_link_printk(link, KERN_ERR, "failed to read log page 10h " 1621c6fd2807SJeff Garzik "(errno=%d)\n", rc); 1622c6fd2807SJeff Garzik return; 1623c6fd2807SJeff Garzik } 1624c6fd2807SJeff Garzik 16250260731fSTejun Heo if (!(link->sactive & (1 << tag))) { 16260260731fSTejun Heo ata_link_printk(link, KERN_ERR, "log page 10h reported " 1627c6fd2807SJeff Garzik "inactive tag %d\n", tag); 1628c6fd2807SJeff Garzik return; 1629c6fd2807SJeff Garzik } 1630c6fd2807SJeff Garzik 1631c6fd2807SJeff Garzik /* we've got the perpetrator, condemn it */ 1632c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, tag); 1633c6fd2807SJeff Garzik memcpy(&qc->result_tf, &tf, sizeof(tf)); 1634a6116c9eSMark Lord qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 16355335b729STejun Heo qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ; 1636c6fd2807SJeff Garzik ehc->i.err_mask &= ~AC_ERR_DEV; 1637c6fd2807SJeff Garzik } 1638c6fd2807SJeff Garzik 1639c6fd2807SJeff Garzik /** 1640c6fd2807SJeff Garzik * ata_eh_analyze_tf - analyze taskfile of a failed qc 1641c6fd2807SJeff Garzik * @qc: qc to analyze 1642c6fd2807SJeff Garzik * @tf: Taskfile registers to analyze 1643c6fd2807SJeff Garzik * 1644c6fd2807SJeff Garzik * Analyze taskfile of @qc and further determine cause of 1645c6fd2807SJeff Garzik * failure. This function also requests ATAPI sense data if 1646c6fd2807SJeff Garzik * avaliable. 1647c6fd2807SJeff Garzik * 1648c6fd2807SJeff Garzik * LOCKING: 1649c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1650c6fd2807SJeff Garzik * 1651c6fd2807SJeff Garzik * RETURNS: 1652c6fd2807SJeff Garzik * Determined recovery action 1653c6fd2807SJeff Garzik */ 1654c6fd2807SJeff Garzik static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, 1655c6fd2807SJeff Garzik const struct ata_taskfile *tf) 1656c6fd2807SJeff Garzik { 1657c6fd2807SJeff Garzik unsigned int tmp, action = 0; 1658c6fd2807SJeff Garzik u8 stat = tf->command, err = tf->feature; 1659c6fd2807SJeff Garzik 1660c6fd2807SJeff Garzik if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) { 1661c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_HSM; 1662cf480626STejun Heo return ATA_EH_RESET; 1663c6fd2807SJeff Garzik } 1664c6fd2807SJeff Garzik 1665a51d644aSTejun Heo if (stat & (ATA_ERR | ATA_DF)) 1666a51d644aSTejun Heo qc->err_mask |= AC_ERR_DEV; 1667a51d644aSTejun Heo else 1668c6fd2807SJeff Garzik return 0; 1669c6fd2807SJeff Garzik 1670c6fd2807SJeff Garzik switch (qc->dev->class) { 1671c6fd2807SJeff Garzik case ATA_DEV_ATA: 1672c6fd2807SJeff Garzik if (err & ATA_ICRC) 1673c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_ATA_BUS; 1674c6fd2807SJeff Garzik if (err & ATA_UNC) 1675c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_MEDIA; 1676c6fd2807SJeff Garzik if (err & ATA_IDNF) 1677c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_INVALID; 1678c6fd2807SJeff Garzik break; 1679c6fd2807SJeff Garzik 1680c6fd2807SJeff Garzik case ATA_DEV_ATAPI: 1681a569a30dSTejun Heo if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) { 16823eabddb8STejun Heo tmp = atapi_eh_request_sense(qc->dev, 16833eabddb8STejun Heo qc->scsicmd->sense_buffer, 16843eabddb8STejun Heo qc->result_tf.feature >> 4); 1685c6fd2807SJeff Garzik if (!tmp) { 1686a569a30dSTejun Heo /* ATA_QCFLAG_SENSE_VALID is used to 1687a569a30dSTejun Heo * tell atapi_qc_complete() that sense 1688a569a30dSTejun Heo * data is already valid. 1689c6fd2807SJeff Garzik * 1690c6fd2807SJeff Garzik * TODO: interpret sense data and set 1691c6fd2807SJeff Garzik * appropriate err_mask. 1692c6fd2807SJeff Garzik */ 1693c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_SENSE_VALID; 1694c6fd2807SJeff Garzik } else 1695c6fd2807SJeff Garzik qc->err_mask |= tmp; 1696c6fd2807SJeff Garzik } 1697a569a30dSTejun Heo } 1698c6fd2807SJeff Garzik 1699c6fd2807SJeff Garzik if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS)) 1700cf480626STejun Heo action |= ATA_EH_RESET; 1701c6fd2807SJeff Garzik 1702c6fd2807SJeff Garzik return action; 1703c6fd2807SJeff Garzik } 1704c6fd2807SJeff Garzik 170576326ac1STejun Heo static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask, 170676326ac1STejun Heo int *xfer_ok) 1707c6fd2807SJeff Garzik { 170876326ac1STejun Heo int base = 0; 170976326ac1STejun Heo 171076326ac1STejun Heo if (!(eflags & ATA_EFLAG_DUBIOUS_XFER)) 171176326ac1STejun Heo *xfer_ok = 1; 171276326ac1STejun Heo 171376326ac1STejun Heo if (!*xfer_ok) 171475f9cafcSTejun Heo base = ATA_ECAT_DUBIOUS_NONE; 171576326ac1STejun Heo 17167d47e8d4STejun Heo if (err_mask & AC_ERR_ATA_BUS) 171776326ac1STejun Heo return base + ATA_ECAT_ATA_BUS; 1718c6fd2807SJeff Garzik 17197d47e8d4STejun Heo if (err_mask & AC_ERR_TIMEOUT) 172076326ac1STejun Heo return base + ATA_ECAT_TOUT_HSM; 17217d47e8d4STejun Heo 17223884f7b0STejun Heo if (eflags & ATA_EFLAG_IS_IO) { 17237d47e8d4STejun Heo if (err_mask & AC_ERR_HSM) 172476326ac1STejun Heo return base + ATA_ECAT_TOUT_HSM; 17257d47e8d4STejun Heo if ((err_mask & 17267d47e8d4STejun Heo (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV) 172776326ac1STejun Heo return base + ATA_ECAT_UNK_DEV; 1728c6fd2807SJeff Garzik } 1729c6fd2807SJeff Garzik 1730c6fd2807SJeff Garzik return 0; 1731c6fd2807SJeff Garzik } 1732c6fd2807SJeff Garzik 17337d47e8d4STejun Heo struct speed_down_verdict_arg { 1734c6fd2807SJeff Garzik u64 since; 173576326ac1STejun Heo int xfer_ok; 17363884f7b0STejun Heo int nr_errors[ATA_ECAT_NR]; 1737c6fd2807SJeff Garzik }; 1738c6fd2807SJeff Garzik 17397d47e8d4STejun Heo static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg) 1740c6fd2807SJeff Garzik { 17417d47e8d4STejun Heo struct speed_down_verdict_arg *arg = void_arg; 174276326ac1STejun Heo int cat; 1743c6fd2807SJeff Garzik 1744c6fd2807SJeff Garzik if (ent->timestamp < arg->since) 1745c6fd2807SJeff Garzik return -1; 1746c6fd2807SJeff Garzik 174776326ac1STejun Heo cat = ata_eh_categorize_error(ent->eflags, ent->err_mask, 174876326ac1STejun Heo &arg->xfer_ok); 17497d47e8d4STejun Heo arg->nr_errors[cat]++; 175076326ac1STejun Heo 1751c6fd2807SJeff Garzik return 0; 1752c6fd2807SJeff Garzik } 1753c6fd2807SJeff Garzik 1754c6fd2807SJeff Garzik /** 17557d47e8d4STejun Heo * ata_eh_speed_down_verdict - Determine speed down verdict 1756c6fd2807SJeff Garzik * @dev: Device of interest 1757c6fd2807SJeff Garzik * 1758c6fd2807SJeff Garzik * This function examines error ring of @dev and determines 17597d47e8d4STejun Heo * whether NCQ needs to be turned off, transfer speed should be 17607d47e8d4STejun Heo * stepped down, or falling back to PIO is necessary. 1761c6fd2807SJeff Garzik * 17623884f7b0STejun Heo * ECAT_ATA_BUS : ATA_BUS error for any command 1763c6fd2807SJeff Garzik * 17643884f7b0STejun Heo * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for 17653884f7b0STejun Heo * IO commands 17667d47e8d4STejun Heo * 17673884f7b0STejun Heo * ECAT_UNK_DEV : Unknown DEV error for IO commands 1768c6fd2807SJeff Garzik * 176976326ac1STejun Heo * ECAT_DUBIOUS_* : Identical to above three but occurred while 177076326ac1STejun Heo * data transfer hasn't been verified. 177176326ac1STejun Heo * 17723884f7b0STejun Heo * Verdicts are 17737d47e8d4STejun Heo * 17743884f7b0STejun Heo * NCQ_OFF : Turn off NCQ. 17757d47e8d4STejun Heo * 17763884f7b0STejun Heo * SPEED_DOWN : Speed down transfer speed but don't fall back 17773884f7b0STejun Heo * to PIO. 17783884f7b0STejun Heo * 17793884f7b0STejun Heo * FALLBACK_TO_PIO : Fall back to PIO. 17803884f7b0STejun Heo * 17813884f7b0STejun Heo * Even if multiple verdicts are returned, only one action is 178276326ac1STejun Heo * taken per error. An action triggered by non-DUBIOUS errors 178376326ac1STejun Heo * clears ering, while one triggered by DUBIOUS_* errors doesn't. 178476326ac1STejun Heo * This is to expedite speed down decisions right after device is 178576326ac1STejun Heo * initially configured. 17863884f7b0STejun Heo * 178776326ac1STejun Heo * The followings are speed down rules. #1 and #2 deal with 178876326ac1STejun Heo * DUBIOUS errors. 178976326ac1STejun Heo * 179076326ac1STejun Heo * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors 179176326ac1STejun Heo * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO. 179276326ac1STejun Heo * 179376326ac1STejun Heo * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors 179476326ac1STejun Heo * occurred during last 5 mins, NCQ_OFF. 179576326ac1STejun Heo * 179676326ac1STejun Heo * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors 17973884f7b0STejun Heo * ocurred during last 5 mins, FALLBACK_TO_PIO 17983884f7b0STejun Heo * 179976326ac1STejun Heo * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred 18003884f7b0STejun Heo * during last 10 mins, NCQ_OFF. 18013884f7b0STejun Heo * 180276326ac1STejun Heo * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6 18033884f7b0STejun Heo * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN. 18047d47e8d4STejun Heo * 1805c6fd2807SJeff Garzik * LOCKING: 1806c6fd2807SJeff Garzik * Inherited from caller. 1807c6fd2807SJeff Garzik * 1808c6fd2807SJeff Garzik * RETURNS: 18097d47e8d4STejun Heo * OR of ATA_EH_SPDN_* flags. 1810c6fd2807SJeff Garzik */ 18117d47e8d4STejun Heo static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev) 1812c6fd2807SJeff Garzik { 18137d47e8d4STejun Heo const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ; 18147d47e8d4STejun Heo u64 j64 = get_jiffies_64(); 18157d47e8d4STejun Heo struct speed_down_verdict_arg arg; 18167d47e8d4STejun Heo unsigned int verdict = 0; 1817c6fd2807SJeff Garzik 18183884f7b0STejun Heo /* scan past 5 mins of error history */ 18193884f7b0STejun Heo memset(&arg, 0, sizeof(arg)); 18203884f7b0STejun Heo arg.since = j64 - min(j64, j5mins); 18213884f7b0STejun Heo ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); 18223884f7b0STejun Heo 182376326ac1STejun Heo if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] + 182476326ac1STejun Heo arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1) 182576326ac1STejun Heo verdict |= ATA_EH_SPDN_SPEED_DOWN | 182676326ac1STejun Heo ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS; 182776326ac1STejun Heo 182876326ac1STejun Heo if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] + 182976326ac1STejun Heo arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1) 183076326ac1STejun Heo verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS; 183176326ac1STejun Heo 18323884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_ATA_BUS] + 18333884f7b0STejun Heo arg.nr_errors[ATA_ECAT_TOUT_HSM] + 1834663f99b8STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) 18353884f7b0STejun Heo verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO; 18363884f7b0STejun Heo 18377d47e8d4STejun Heo /* scan past 10 mins of error history */ 1838c6fd2807SJeff Garzik memset(&arg, 0, sizeof(arg)); 18397d47e8d4STejun Heo arg.since = j64 - min(j64, j10mins); 18407d47e8d4STejun Heo ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); 1841c6fd2807SJeff Garzik 18423884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_TOUT_HSM] + 18433884f7b0STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 3) 18447d47e8d4STejun Heo verdict |= ATA_EH_SPDN_NCQ_OFF; 18453884f7b0STejun Heo 18463884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_ATA_BUS] + 18473884f7b0STejun Heo arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 || 1848663f99b8STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) 18497d47e8d4STejun Heo verdict |= ATA_EH_SPDN_SPEED_DOWN; 1850c6fd2807SJeff Garzik 18517d47e8d4STejun Heo return verdict; 1852c6fd2807SJeff Garzik } 1853c6fd2807SJeff Garzik 1854c6fd2807SJeff Garzik /** 1855c6fd2807SJeff Garzik * ata_eh_speed_down - record error and speed down if necessary 1856c6fd2807SJeff Garzik * @dev: Failed device 18573884f7b0STejun Heo * @eflags: mask of ATA_EFLAG_* flags 1858c6fd2807SJeff Garzik * @err_mask: err_mask of the error 1859c6fd2807SJeff Garzik * 1860c6fd2807SJeff Garzik * Record error and examine error history to determine whether 1861c6fd2807SJeff Garzik * adjusting transmission speed is necessary. It also sets 1862c6fd2807SJeff Garzik * transmission limits appropriately if such adjustment is 1863c6fd2807SJeff Garzik * necessary. 1864c6fd2807SJeff Garzik * 1865c6fd2807SJeff Garzik * LOCKING: 1866c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1867c6fd2807SJeff Garzik * 1868c6fd2807SJeff Garzik * RETURNS: 18697d47e8d4STejun Heo * Determined recovery action. 1870c6fd2807SJeff Garzik */ 18713884f7b0STejun Heo static unsigned int ata_eh_speed_down(struct ata_device *dev, 18723884f7b0STejun Heo unsigned int eflags, unsigned int err_mask) 1873c6fd2807SJeff Garzik { 1874b1c72916STejun Heo struct ata_link *link = ata_dev_phys_link(dev); 187576326ac1STejun Heo int xfer_ok = 0; 18767d47e8d4STejun Heo unsigned int verdict; 18777d47e8d4STejun Heo unsigned int action = 0; 18787d47e8d4STejun Heo 18797d47e8d4STejun Heo /* don't bother if Cat-0 error */ 188076326ac1STejun Heo if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0) 1881c6fd2807SJeff Garzik return 0; 1882c6fd2807SJeff Garzik 1883c6fd2807SJeff Garzik /* record error and determine whether speed down is necessary */ 18843884f7b0STejun Heo ata_ering_record(&dev->ering, eflags, err_mask); 18857d47e8d4STejun Heo verdict = ata_eh_speed_down_verdict(dev); 1886c6fd2807SJeff Garzik 18877d47e8d4STejun Heo /* turn off NCQ? */ 18887d47e8d4STejun Heo if ((verdict & ATA_EH_SPDN_NCQ_OFF) && 18897d47e8d4STejun Heo (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ | 18907d47e8d4STejun Heo ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) { 18917d47e8d4STejun Heo dev->flags |= ATA_DFLAG_NCQ_OFF; 18927d47e8d4STejun Heo ata_dev_printk(dev, KERN_WARNING, 18937d47e8d4STejun Heo "NCQ disabled due to excessive errors\n"); 18947d47e8d4STejun Heo goto done; 18957d47e8d4STejun Heo } 1896c6fd2807SJeff Garzik 18977d47e8d4STejun Heo /* speed down? */ 18987d47e8d4STejun Heo if (verdict & ATA_EH_SPDN_SPEED_DOWN) { 1899c6fd2807SJeff Garzik /* speed down SATA link speed if possible */ 1900a07d499bSTejun Heo if (sata_down_spd_limit(link, 0) == 0) { 1901cf480626STejun Heo action |= ATA_EH_RESET; 19027d47e8d4STejun Heo goto done; 19037d47e8d4STejun Heo } 1904c6fd2807SJeff Garzik 1905c6fd2807SJeff Garzik /* lower transfer mode */ 19067d47e8d4STejun Heo if (dev->spdn_cnt < 2) { 19077d47e8d4STejun Heo static const int dma_dnxfer_sel[] = 19087d47e8d4STejun Heo { ATA_DNXFER_DMA, ATA_DNXFER_40C }; 19097d47e8d4STejun Heo static const int pio_dnxfer_sel[] = 19107d47e8d4STejun Heo { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 }; 19117d47e8d4STejun Heo int sel; 1912c6fd2807SJeff Garzik 19137d47e8d4STejun Heo if (dev->xfer_shift != ATA_SHIFT_PIO) 19147d47e8d4STejun Heo sel = dma_dnxfer_sel[dev->spdn_cnt]; 19157d47e8d4STejun Heo else 19167d47e8d4STejun Heo sel = pio_dnxfer_sel[dev->spdn_cnt]; 19177d47e8d4STejun Heo 19187d47e8d4STejun Heo dev->spdn_cnt++; 19197d47e8d4STejun Heo 19207d47e8d4STejun Heo if (ata_down_xfermask_limit(dev, sel) == 0) { 1921cf480626STejun Heo action |= ATA_EH_RESET; 19227d47e8d4STejun Heo goto done; 19237d47e8d4STejun Heo } 19247d47e8d4STejun Heo } 19257d47e8d4STejun Heo } 19267d47e8d4STejun Heo 19277d47e8d4STejun Heo /* Fall back to PIO? Slowing down to PIO is meaningless for 1928663f99b8STejun Heo * SATA ATA devices. Consider it only for PATA and SATAPI. 19297d47e8d4STejun Heo */ 19307d47e8d4STejun Heo if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) && 1931663f99b8STejun Heo (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) && 19327d47e8d4STejun Heo (dev->xfer_shift != ATA_SHIFT_PIO)) { 19337d47e8d4STejun Heo if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) { 19347d47e8d4STejun Heo dev->spdn_cnt = 0; 1935cf480626STejun Heo action |= ATA_EH_RESET; 19367d47e8d4STejun Heo goto done; 19377d47e8d4STejun Heo } 19387d47e8d4STejun Heo } 19397d47e8d4STejun Heo 1940c6fd2807SJeff Garzik return 0; 19417d47e8d4STejun Heo done: 19427d47e8d4STejun Heo /* device has been slowed down, blow error history */ 194376326ac1STejun Heo if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS)) 19447d47e8d4STejun Heo ata_ering_clear(&dev->ering); 19457d47e8d4STejun Heo return action; 1946c6fd2807SJeff Garzik } 1947c6fd2807SJeff Garzik 1948c6fd2807SJeff Garzik /** 19499b1e2658STejun Heo * ata_eh_link_autopsy - analyze error and determine recovery action 19509b1e2658STejun Heo * @link: host link to perform autopsy on 1951c6fd2807SJeff Garzik * 19520260731fSTejun Heo * Analyze why @link failed and determine which recovery actions 19530260731fSTejun Heo * are needed. This function also sets more detailed AC_ERR_* 19540260731fSTejun Heo * values and fills sense data for ATAPI CHECK SENSE. 1955c6fd2807SJeff Garzik * 1956c6fd2807SJeff Garzik * LOCKING: 1957c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1958c6fd2807SJeff Garzik */ 19599b1e2658STejun Heo static void ata_eh_link_autopsy(struct ata_link *link) 1960c6fd2807SJeff Garzik { 19610260731fSTejun Heo struct ata_port *ap = link->ap; 1962936fd732STejun Heo struct ata_eh_context *ehc = &link->eh_context; 1963dfcc173dSTejun Heo struct ata_device *dev; 19643884f7b0STejun Heo unsigned int all_err_mask = 0, eflags = 0; 19653884f7b0STejun Heo int tag; 1966c6fd2807SJeff Garzik u32 serror; 1967c6fd2807SJeff Garzik int rc; 1968c6fd2807SJeff Garzik 1969c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 1970c6fd2807SJeff Garzik 1971c6fd2807SJeff Garzik if (ehc->i.flags & ATA_EHI_NO_AUTOPSY) 1972c6fd2807SJeff Garzik return; 1973c6fd2807SJeff Garzik 1974c6fd2807SJeff Garzik /* obtain and analyze SError */ 1975936fd732STejun Heo rc = sata_scr_read(link, SCR_ERROR, &serror); 1976c6fd2807SJeff Garzik if (rc == 0) { 1977c6fd2807SJeff Garzik ehc->i.serror |= serror; 19780260731fSTejun Heo ata_eh_analyze_serror(link); 19794e57c517STejun Heo } else if (rc != -EOPNOTSUPP) { 1980cf480626STejun Heo /* SError read failed, force reset and probing */ 1981b558edddSTejun Heo ehc->i.probe_mask |= ATA_ALL_DEVICES; 1982cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 19834e57c517STejun Heo ehc->i.err_mask |= AC_ERR_OTHER; 19844e57c517STejun Heo } 1985c6fd2807SJeff Garzik 1986c6fd2807SJeff Garzik /* analyze NCQ failure */ 19870260731fSTejun Heo ata_eh_analyze_ncq_error(link); 1988c6fd2807SJeff Garzik 1989c6fd2807SJeff Garzik /* any real error trumps AC_ERR_OTHER */ 1990c6fd2807SJeff Garzik if (ehc->i.err_mask & ~AC_ERR_OTHER) 1991c6fd2807SJeff Garzik ehc->i.err_mask &= ~AC_ERR_OTHER; 1992c6fd2807SJeff Garzik 1993c6fd2807SJeff Garzik all_err_mask |= ehc->i.err_mask; 1994c6fd2807SJeff Garzik 1995c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1996c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 1997c6fd2807SJeff Garzik 1998b1c72916STejun Heo if (!(qc->flags & ATA_QCFLAG_FAILED) || 1999b1c72916STejun Heo ata_dev_phys_link(qc->dev) != link) 2000c6fd2807SJeff Garzik continue; 2001c6fd2807SJeff Garzik 2002c6fd2807SJeff Garzik /* inherit upper level err_mask */ 2003c6fd2807SJeff Garzik qc->err_mask |= ehc->i.err_mask; 2004c6fd2807SJeff Garzik 2005c6fd2807SJeff Garzik /* analyze TF */ 2006c6fd2807SJeff Garzik ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf); 2007c6fd2807SJeff Garzik 2008c6fd2807SJeff Garzik /* DEV errors are probably spurious in case of ATA_BUS error */ 2009c6fd2807SJeff Garzik if (qc->err_mask & AC_ERR_ATA_BUS) 2010c6fd2807SJeff Garzik qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA | 2011c6fd2807SJeff Garzik AC_ERR_INVALID); 2012c6fd2807SJeff Garzik 2013c6fd2807SJeff Garzik /* any real error trumps unknown error */ 2014c6fd2807SJeff Garzik if (qc->err_mask & ~AC_ERR_OTHER) 2015c6fd2807SJeff Garzik qc->err_mask &= ~AC_ERR_OTHER; 2016c6fd2807SJeff Garzik 2017c6fd2807SJeff Garzik /* SENSE_VALID trumps dev/unknown error and revalidation */ 2018f90f0828STejun Heo if (qc->flags & ATA_QCFLAG_SENSE_VALID) 2019c6fd2807SJeff Garzik qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); 2020c6fd2807SJeff Garzik 202103faab78STejun Heo /* determine whether the command is worth retrying */ 202203faab78STejun Heo if (!(qc->err_mask & AC_ERR_INVALID) && 202303faab78STejun Heo ((qc->flags & ATA_QCFLAG_IO) || qc->err_mask != AC_ERR_DEV)) 202403faab78STejun Heo qc->flags |= ATA_QCFLAG_RETRY; 202503faab78STejun Heo 2026c6fd2807SJeff Garzik /* accumulate error info */ 2027c6fd2807SJeff Garzik ehc->i.dev = qc->dev; 2028c6fd2807SJeff Garzik all_err_mask |= qc->err_mask; 2029c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_IO) 20303884f7b0STejun Heo eflags |= ATA_EFLAG_IS_IO; 2031c6fd2807SJeff Garzik } 2032c6fd2807SJeff Garzik 2033c6fd2807SJeff Garzik /* enforce default EH actions */ 2034c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN || 2035c6fd2807SJeff Garzik all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT)) 2036cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 20373884f7b0STejun Heo else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) || 20383884f7b0STejun Heo (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV))) 2039c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_REVALIDATE; 2040c6fd2807SJeff Garzik 2041dfcc173dSTejun Heo /* If we have offending qcs and the associated failed device, 2042dfcc173dSTejun Heo * perform per-dev EH action only on the offending device. 2043dfcc173dSTejun Heo */ 2044c6fd2807SJeff Garzik if (ehc->i.dev) { 2045c6fd2807SJeff Garzik ehc->i.dev_action[ehc->i.dev->devno] |= 2046c6fd2807SJeff Garzik ehc->i.action & ATA_EH_PERDEV_MASK; 2047c6fd2807SJeff Garzik ehc->i.action &= ~ATA_EH_PERDEV_MASK; 2048c6fd2807SJeff Garzik } 2049c6fd2807SJeff Garzik 20502695e366STejun Heo /* propagate timeout to host link */ 20512695e366STejun Heo if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link)) 20522695e366STejun Heo ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT; 20532695e366STejun Heo 20542695e366STejun Heo /* record error and consider speeding down */ 2055dfcc173dSTejun Heo dev = ehc->i.dev; 20562695e366STejun Heo if (!dev && ((ata_link_max_devices(link) == 1 && 20572695e366STejun Heo ata_dev_enabled(link->device)))) 2058dfcc173dSTejun Heo dev = link->device; 2059dfcc173dSTejun Heo 206076326ac1STejun Heo if (dev) { 206176326ac1STejun Heo if (dev->flags & ATA_DFLAG_DUBIOUS_XFER) 206276326ac1STejun Heo eflags |= ATA_EFLAG_DUBIOUS_XFER; 20633884f7b0STejun Heo ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask); 206476326ac1STejun Heo } 2065dfcc173dSTejun Heo 2066c6fd2807SJeff Garzik DPRINTK("EXIT\n"); 2067c6fd2807SJeff Garzik } 2068c6fd2807SJeff Garzik 2069c6fd2807SJeff Garzik /** 20709b1e2658STejun Heo * ata_eh_autopsy - analyze error and determine recovery action 20719b1e2658STejun Heo * @ap: host port to perform autopsy on 20729b1e2658STejun Heo * 20739b1e2658STejun Heo * Analyze all links of @ap and determine why they failed and 20749b1e2658STejun Heo * which recovery actions are needed. 20759b1e2658STejun Heo * 20769b1e2658STejun Heo * LOCKING: 20779b1e2658STejun Heo * Kernel thread context (may sleep). 20789b1e2658STejun Heo */ 2079fb7fd614STejun Heo void ata_eh_autopsy(struct ata_port *ap) 20809b1e2658STejun Heo { 20819b1e2658STejun Heo struct ata_link *link; 20829b1e2658STejun Heo 20831eca4365STejun Heo ata_for_each_link(link, ap, EDGE) 20849b1e2658STejun Heo ata_eh_link_autopsy(link); 20852695e366STejun Heo 2086b1c72916STejun Heo /* Handle the frigging slave link. Autopsy is done similarly 2087b1c72916STejun Heo * but actions and flags are transferred over to the master 2088b1c72916STejun Heo * link and handled from there. 2089b1c72916STejun Heo */ 2090b1c72916STejun Heo if (ap->slave_link) { 2091b1c72916STejun Heo struct ata_eh_context *mehc = &ap->link.eh_context; 2092b1c72916STejun Heo struct ata_eh_context *sehc = &ap->slave_link->eh_context; 2093b1c72916STejun Heo 2094848e4c68STejun Heo /* transfer control flags from master to slave */ 2095848e4c68STejun Heo sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK; 2096848e4c68STejun Heo 2097848e4c68STejun Heo /* perform autopsy on the slave link */ 2098b1c72916STejun Heo ata_eh_link_autopsy(ap->slave_link); 2099b1c72916STejun Heo 2100848e4c68STejun Heo /* transfer actions from slave to master and clear slave */ 2101b1c72916STejun Heo ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); 2102b1c72916STejun Heo mehc->i.action |= sehc->i.action; 2103b1c72916STejun Heo mehc->i.dev_action[1] |= sehc->i.dev_action[1]; 2104b1c72916STejun Heo mehc->i.flags |= sehc->i.flags; 2105b1c72916STejun Heo ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); 2106b1c72916STejun Heo } 2107b1c72916STejun Heo 21082695e366STejun Heo /* Autopsy of fanout ports can affect host link autopsy. 21092695e366STejun Heo * Perform host link autopsy last. 21102695e366STejun Heo */ 2111071f44b1STejun Heo if (sata_pmp_attached(ap)) 21122695e366STejun Heo ata_eh_link_autopsy(&ap->link); 21139b1e2658STejun Heo } 21149b1e2658STejun Heo 21159b1e2658STejun Heo /** 2116*6521148cSRobert Hancock * ata_get_cmd_descript - get description for ATA command 2117*6521148cSRobert Hancock * @command: ATA command code to get description for 2118*6521148cSRobert Hancock * 2119*6521148cSRobert Hancock * Return a textual description of the given command, or NULL if the 2120*6521148cSRobert Hancock * command is not known. 2121*6521148cSRobert Hancock * 2122*6521148cSRobert Hancock * LOCKING: 2123*6521148cSRobert Hancock * None 2124*6521148cSRobert Hancock */ 2125*6521148cSRobert Hancock const char *ata_get_cmd_descript(u8 command) 2126*6521148cSRobert Hancock { 2127*6521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR 2128*6521148cSRobert Hancock static const struct 2129*6521148cSRobert Hancock { 2130*6521148cSRobert Hancock u8 command; 2131*6521148cSRobert Hancock const char *text; 2132*6521148cSRobert Hancock } cmd_descr[] = { 2133*6521148cSRobert Hancock { ATA_CMD_DEV_RESET, "DEVICE RESET" }, 2134*6521148cSRobert Hancock { ATA_CMD_CHK_POWER, "CHECK POWER MODE" }, 2135*6521148cSRobert Hancock { ATA_CMD_STANDBY, "STANDBY" }, 2136*6521148cSRobert Hancock { ATA_CMD_IDLE, "IDLE" }, 2137*6521148cSRobert Hancock { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" }, 2138*6521148cSRobert Hancock { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" }, 2139*6521148cSRobert Hancock { ATA_CMD_NOP, "NOP" }, 2140*6521148cSRobert Hancock { ATA_CMD_FLUSH, "FLUSH CACHE" }, 2141*6521148cSRobert Hancock { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" }, 2142*6521148cSRobert Hancock { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" }, 2143*6521148cSRobert Hancock { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" }, 2144*6521148cSRobert Hancock { ATA_CMD_SERVICE, "SERVICE" }, 2145*6521148cSRobert Hancock { ATA_CMD_READ, "READ DMA" }, 2146*6521148cSRobert Hancock { ATA_CMD_READ_EXT, "READ DMA EXT" }, 2147*6521148cSRobert Hancock { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" }, 2148*6521148cSRobert Hancock { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" }, 2149*6521148cSRobert Hancock { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" }, 2150*6521148cSRobert Hancock { ATA_CMD_WRITE, "WRITE DMA" }, 2151*6521148cSRobert Hancock { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" }, 2152*6521148cSRobert Hancock { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" }, 2153*6521148cSRobert Hancock { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" }, 2154*6521148cSRobert Hancock { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" }, 2155*6521148cSRobert Hancock { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" }, 2156*6521148cSRobert Hancock { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" }, 2157*6521148cSRobert Hancock { ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" }, 2158*6521148cSRobert Hancock { ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" }, 2159*6521148cSRobert Hancock { ATA_CMD_PIO_READ, "READ SECTOR(S)" }, 2160*6521148cSRobert Hancock { ATA_CMD_PIO_READ_EXT, "READ SECTOR(S) EXT" }, 2161*6521148cSRobert Hancock { ATA_CMD_PIO_WRITE, "WRITE SECTOR(S)" }, 2162*6521148cSRobert Hancock { ATA_CMD_PIO_WRITE_EXT, "WRITE SECTOR(S) EXT" }, 2163*6521148cSRobert Hancock { ATA_CMD_READ_MULTI, "READ MULTIPLE" }, 2164*6521148cSRobert Hancock { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" }, 2165*6521148cSRobert Hancock { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" }, 2166*6521148cSRobert Hancock { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" }, 2167*6521148cSRobert Hancock { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" }, 2168*6521148cSRobert Hancock { ATA_CMD_SET_FEATURES, "SET FEATURES" }, 2169*6521148cSRobert Hancock { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" }, 2170*6521148cSRobert Hancock { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" }, 2171*6521148cSRobert Hancock { ATA_CMD_VERIFY_EXT, "READ VERIFY SECTOR(S) EXT" }, 2172*6521148cSRobert Hancock { ATA_CMD_WRITE_UNCORR_EXT, "WRITE UNCORRECTABLE EXT" }, 2173*6521148cSRobert Hancock { ATA_CMD_STANDBYNOW1, "STANDBY IMMEDIATE" }, 2174*6521148cSRobert Hancock { ATA_CMD_IDLEIMMEDIATE, "IDLE IMMEDIATE" }, 2175*6521148cSRobert Hancock { ATA_CMD_SLEEP, "SLEEP" }, 2176*6521148cSRobert Hancock { ATA_CMD_INIT_DEV_PARAMS, "INITIALIZE DEVICE PARAMETERS" }, 2177*6521148cSRobert Hancock { ATA_CMD_READ_NATIVE_MAX, "READ NATIVE MAX ADDRESS" }, 2178*6521148cSRobert Hancock { ATA_CMD_READ_NATIVE_MAX_EXT, "READ NATIVE MAX ADDRESS EXT" }, 2179*6521148cSRobert Hancock { ATA_CMD_SET_MAX, "SET MAX ADDRESS" }, 2180*6521148cSRobert Hancock { ATA_CMD_SET_MAX_EXT, "SET MAX ADDRESS EXT" }, 2181*6521148cSRobert Hancock { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" }, 2182*6521148cSRobert Hancock { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" }, 2183*6521148cSRobert Hancock { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" }, 2184*6521148cSRobert Hancock { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" }, 2185*6521148cSRobert Hancock { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" }, 2186*6521148cSRobert Hancock { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" }, 2187*6521148cSRobert Hancock { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" }, 2188*6521148cSRobert Hancock { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" }, 2189*6521148cSRobert Hancock { ATA_CMD_PMP_READ, "READ BUFFER" }, 2190*6521148cSRobert Hancock { ATA_CMD_PMP_WRITE, "WRITE BUFFER" }, 2191*6521148cSRobert Hancock { ATA_CMD_CONF_OVERLAY, "DEVICE CONFIGURATION OVERLAY" }, 2192*6521148cSRobert Hancock { ATA_CMD_SEC_SET_PASS, "SECURITY SET PASSWORD" }, 2193*6521148cSRobert Hancock { ATA_CMD_SEC_UNLOCK, "SECURITY UNLOCK" }, 2194*6521148cSRobert Hancock { ATA_CMD_SEC_ERASE_PREP, "SECURITY ERASE PREPARE" }, 2195*6521148cSRobert Hancock { ATA_CMD_SEC_ERASE_UNIT, "SECURITY ERASE UNIT" }, 2196*6521148cSRobert Hancock { ATA_CMD_SEC_FREEZE_LOCK, "SECURITY FREEZE LOCK" }, 2197*6521148cSRobert Hancock { ATA_CMD_SEC_DISABLE_PASS, "SECURITY DISABLE PASSWORD" }, 2198*6521148cSRobert Hancock { ATA_CMD_CONFIG_STREAM, "CONFIGURE STREAM" }, 2199*6521148cSRobert Hancock { ATA_CMD_SMART, "SMART" }, 2200*6521148cSRobert Hancock { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" }, 2201*6521148cSRobert Hancock { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" }, 2202*6521148cSRobert Hancock { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" }, 2203*6521148cSRobert Hancock { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" }, 2204*6521148cSRobert Hancock { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" }, 2205*6521148cSRobert Hancock { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" }, 2206*6521148cSRobert Hancock { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" }, 2207*6521148cSRobert Hancock { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" }, 2208*6521148cSRobert Hancock { ATA_CMD_READ_LONG, "READ LONG (with retries)" }, 2209*6521148cSRobert Hancock { ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" }, 2210*6521148cSRobert Hancock { ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" }, 2211*6521148cSRobert Hancock { ATA_CMD_WRITE_LONG_ONCE, "WRITE LONG (without retries)" }, 2212*6521148cSRobert Hancock { ATA_CMD_RESTORE, "RECALIBRATE" }, 2213*6521148cSRobert Hancock { 0, NULL } /* terminate list */ 2214*6521148cSRobert Hancock }; 2215*6521148cSRobert Hancock 2216*6521148cSRobert Hancock unsigned int i; 2217*6521148cSRobert Hancock for (i = 0; cmd_descr[i].text; i++) 2218*6521148cSRobert Hancock if (cmd_descr[i].command == command) 2219*6521148cSRobert Hancock return cmd_descr[i].text; 2220*6521148cSRobert Hancock #endif 2221*6521148cSRobert Hancock 2222*6521148cSRobert Hancock return NULL; 2223*6521148cSRobert Hancock } 2224*6521148cSRobert Hancock 2225*6521148cSRobert Hancock /** 22269b1e2658STejun Heo * ata_eh_link_report - report error handling to user 22270260731fSTejun Heo * @link: ATA link EH is going on 2228c6fd2807SJeff Garzik * 2229c6fd2807SJeff Garzik * Report EH to user. 2230c6fd2807SJeff Garzik * 2231c6fd2807SJeff Garzik * LOCKING: 2232c6fd2807SJeff Garzik * None. 2233c6fd2807SJeff Garzik */ 22349b1e2658STejun Heo static void ata_eh_link_report(struct ata_link *link) 2235c6fd2807SJeff Garzik { 22360260731fSTejun Heo struct ata_port *ap = link->ap; 22370260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 2238c6fd2807SJeff Garzik const char *frozen, *desc; 2239a1e10f7eSTejun Heo char tries_buf[6]; 2240c6fd2807SJeff Garzik int tag, nr_failed = 0; 2241c6fd2807SJeff Garzik 224294ff3d54STejun Heo if (ehc->i.flags & ATA_EHI_QUIET) 224394ff3d54STejun Heo return; 224494ff3d54STejun Heo 2245c6fd2807SJeff Garzik desc = NULL; 2246c6fd2807SJeff Garzik if (ehc->i.desc[0] != '\0') 2247c6fd2807SJeff Garzik desc = ehc->i.desc; 2248c6fd2807SJeff Garzik 2249c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2250c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 2251c6fd2807SJeff Garzik 2252b1c72916STejun Heo if (!(qc->flags & ATA_QCFLAG_FAILED) || 2253b1c72916STejun Heo ata_dev_phys_link(qc->dev) != link || 2254e027bd36STejun Heo ((qc->flags & ATA_QCFLAG_QUIET) && 2255e027bd36STejun Heo qc->err_mask == AC_ERR_DEV)) 2256c6fd2807SJeff Garzik continue; 2257c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask) 2258c6fd2807SJeff Garzik continue; 2259c6fd2807SJeff Garzik 2260c6fd2807SJeff Garzik nr_failed++; 2261c6fd2807SJeff Garzik } 2262c6fd2807SJeff Garzik 2263c6fd2807SJeff Garzik if (!nr_failed && !ehc->i.err_mask) 2264c6fd2807SJeff Garzik return; 2265c6fd2807SJeff Garzik 2266c6fd2807SJeff Garzik frozen = ""; 2267c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN) 2268c6fd2807SJeff Garzik frozen = " frozen"; 2269c6fd2807SJeff Garzik 2270a1e10f7eSTejun Heo memset(tries_buf, 0, sizeof(tries_buf)); 2271a1e10f7eSTejun Heo if (ap->eh_tries < ATA_EH_MAX_TRIES) 2272a1e10f7eSTejun Heo snprintf(tries_buf, sizeof(tries_buf) - 1, " t%d", 2273a1e10f7eSTejun Heo ap->eh_tries); 2274a1e10f7eSTejun Heo 2275c6fd2807SJeff Garzik if (ehc->i.dev) { 2276c6fd2807SJeff Garzik ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x " 2277a1e10f7eSTejun Heo "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", 2278a1e10f7eSTejun Heo ehc->i.err_mask, link->sactive, ehc->i.serror, 2279a1e10f7eSTejun Heo ehc->i.action, frozen, tries_buf); 2280c6fd2807SJeff Garzik if (desc) 2281b64bbc39STejun Heo ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc); 2282c6fd2807SJeff Garzik } else { 22830260731fSTejun Heo ata_link_printk(link, KERN_ERR, "exception Emask 0x%x " 2284a1e10f7eSTejun Heo "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", 2285a1e10f7eSTejun Heo ehc->i.err_mask, link->sactive, ehc->i.serror, 2286a1e10f7eSTejun Heo ehc->i.action, frozen, tries_buf); 2287c6fd2807SJeff Garzik if (desc) 22880260731fSTejun Heo ata_link_printk(link, KERN_ERR, "%s\n", desc); 2289c6fd2807SJeff Garzik } 2290c6fd2807SJeff Garzik 2291*6521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR 22921333e194SRobert Hancock if (ehc->i.serror) 2293da0e21d3STejun Heo ata_link_printk(link, KERN_ERR, 22941333e194SRobert Hancock "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n", 22951333e194SRobert Hancock ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "", 22961333e194SRobert Hancock ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "", 22971333e194SRobert Hancock ehc->i.serror & SERR_DATA ? "UnrecovData " : "", 22981333e194SRobert Hancock ehc->i.serror & SERR_PERSISTENT ? "Persist " : "", 22991333e194SRobert Hancock ehc->i.serror & SERR_PROTOCOL ? "Proto " : "", 23001333e194SRobert Hancock ehc->i.serror & SERR_INTERNAL ? "HostInt " : "", 23011333e194SRobert Hancock ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "", 23021333e194SRobert Hancock ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "", 23031333e194SRobert Hancock ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "", 23041333e194SRobert Hancock ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "", 23051333e194SRobert Hancock ehc->i.serror & SERR_DISPARITY ? "Dispar " : "", 23061333e194SRobert Hancock ehc->i.serror & SERR_CRC ? "BadCRC " : "", 23071333e194SRobert Hancock ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "", 23081333e194SRobert Hancock ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "", 23091333e194SRobert Hancock ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "", 23101333e194SRobert Hancock ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "", 23111333e194SRobert Hancock ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : ""); 2312*6521148cSRobert Hancock #endif 23131333e194SRobert Hancock 2314c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2315c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 23168a937581STejun Heo struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; 2317abb6a889STejun Heo const u8 *cdb = qc->cdb; 2318abb6a889STejun Heo char data_buf[20] = ""; 2319abb6a889STejun Heo char cdb_buf[70] = ""; 2320c6fd2807SJeff Garzik 23210260731fSTejun Heo if (!(qc->flags & ATA_QCFLAG_FAILED) || 2322b1c72916STejun Heo ata_dev_phys_link(qc->dev) != link || !qc->err_mask) 2323c6fd2807SJeff Garzik continue; 2324c6fd2807SJeff Garzik 2325abb6a889STejun Heo if (qc->dma_dir != DMA_NONE) { 2326abb6a889STejun Heo static const char *dma_str[] = { 2327abb6a889STejun Heo [DMA_BIDIRECTIONAL] = "bidi", 2328abb6a889STejun Heo [DMA_TO_DEVICE] = "out", 2329abb6a889STejun Heo [DMA_FROM_DEVICE] = "in", 2330abb6a889STejun Heo }; 2331abb6a889STejun Heo static const char *prot_str[] = { 2332abb6a889STejun Heo [ATA_PROT_PIO] = "pio", 2333abb6a889STejun Heo [ATA_PROT_DMA] = "dma", 2334abb6a889STejun Heo [ATA_PROT_NCQ] = "ncq", 23350dc36888STejun Heo [ATAPI_PROT_PIO] = "pio", 23360dc36888STejun Heo [ATAPI_PROT_DMA] = "dma", 2337abb6a889STejun Heo }; 2338abb6a889STejun Heo 2339abb6a889STejun Heo snprintf(data_buf, sizeof(data_buf), " %s %u %s", 2340abb6a889STejun Heo prot_str[qc->tf.protocol], qc->nbytes, 2341abb6a889STejun Heo dma_str[qc->dma_dir]); 2342abb6a889STejun Heo } 2343abb6a889STejun Heo 2344*6521148cSRobert Hancock if (ata_is_atapi(qc->tf.protocol)) { 2345*6521148cSRobert Hancock if (qc->scsicmd) 2346*6521148cSRobert Hancock scsi_print_command(qc->scsicmd); 2347*6521148cSRobert Hancock else 2348abb6a889STejun Heo snprintf(cdb_buf, sizeof(cdb_buf), 2349abb6a889STejun Heo "cdb %02x %02x %02x %02x %02x %02x %02x %02x " 2350abb6a889STejun Heo "%02x %02x %02x %02x %02x %02x %02x %02x\n ", 2351abb6a889STejun Heo cdb[0], cdb[1], cdb[2], cdb[3], 2352abb6a889STejun Heo cdb[4], cdb[5], cdb[6], cdb[7], 2353abb6a889STejun Heo cdb[8], cdb[9], cdb[10], cdb[11], 2354abb6a889STejun Heo cdb[12], cdb[13], cdb[14], cdb[15]); 2355*6521148cSRobert Hancock } else { 2356*6521148cSRobert Hancock const char *descr = ata_get_cmd_descript(cmd->command); 2357*6521148cSRobert Hancock if (descr) 2358*6521148cSRobert Hancock ata_dev_printk(qc->dev, KERN_ERR, 2359*6521148cSRobert Hancock "failed command: %s\n", descr); 2360*6521148cSRobert Hancock } 2361abb6a889STejun Heo 23628a937581STejun Heo ata_dev_printk(qc->dev, KERN_ERR, 23638a937581STejun Heo "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " 2364abb6a889STejun Heo "tag %d%s\n %s" 23658a937581STejun Heo "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " 23665335b729STejun Heo "Emask 0x%x (%s)%s\n", 23678a937581STejun Heo cmd->command, cmd->feature, cmd->nsect, 23688a937581STejun Heo cmd->lbal, cmd->lbam, cmd->lbah, 23698a937581STejun Heo cmd->hob_feature, cmd->hob_nsect, 23708a937581STejun Heo cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah, 2371abb6a889STejun Heo cmd->device, qc->tag, data_buf, cdb_buf, 23728a937581STejun Heo res->command, res->feature, res->nsect, 23738a937581STejun Heo res->lbal, res->lbam, res->lbah, 23748a937581STejun Heo res->hob_feature, res->hob_nsect, 23758a937581STejun Heo res->hob_lbal, res->hob_lbam, res->hob_lbah, 23765335b729STejun Heo res->device, qc->err_mask, ata_err_string(qc->err_mask), 23775335b729STejun Heo qc->err_mask & AC_ERR_NCQ ? " <F>" : ""); 23781333e194SRobert Hancock 2379*6521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR 23801333e194SRobert Hancock if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | 23811333e194SRobert Hancock ATA_ERR)) { 23821333e194SRobert Hancock if (res->command & ATA_BUSY) 23831333e194SRobert Hancock ata_dev_printk(qc->dev, KERN_ERR, 23841333e194SRobert Hancock "status: { Busy }\n"); 23851333e194SRobert Hancock else 23861333e194SRobert Hancock ata_dev_printk(qc->dev, KERN_ERR, 23871333e194SRobert Hancock "status: { %s%s%s%s}\n", 23881333e194SRobert Hancock res->command & ATA_DRDY ? "DRDY " : "", 23891333e194SRobert Hancock res->command & ATA_DF ? "DF " : "", 23901333e194SRobert Hancock res->command & ATA_DRQ ? "DRQ " : "", 23911333e194SRobert Hancock res->command & ATA_ERR ? "ERR " : ""); 23921333e194SRobert Hancock } 23931333e194SRobert Hancock 23941333e194SRobert Hancock if (cmd->command != ATA_CMD_PACKET && 23951333e194SRobert Hancock (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF | 23961333e194SRobert Hancock ATA_ABORTED))) 23971333e194SRobert Hancock ata_dev_printk(qc->dev, KERN_ERR, 23981333e194SRobert Hancock "error: { %s%s%s%s}\n", 23991333e194SRobert Hancock res->feature & ATA_ICRC ? "ICRC " : "", 24001333e194SRobert Hancock res->feature & ATA_UNC ? "UNC " : "", 24011333e194SRobert Hancock res->feature & ATA_IDNF ? "IDNF " : "", 24021333e194SRobert Hancock res->feature & ATA_ABORTED ? "ABRT " : ""); 2403*6521148cSRobert Hancock #endif 2404c6fd2807SJeff Garzik } 2405c6fd2807SJeff Garzik } 2406c6fd2807SJeff Garzik 24079b1e2658STejun Heo /** 24089b1e2658STejun Heo * ata_eh_report - report error handling to user 24099b1e2658STejun Heo * @ap: ATA port to report EH about 24109b1e2658STejun Heo * 24119b1e2658STejun Heo * Report EH to user. 24129b1e2658STejun Heo * 24139b1e2658STejun Heo * LOCKING: 24149b1e2658STejun Heo * None. 24159b1e2658STejun Heo */ 2416fb7fd614STejun Heo void ata_eh_report(struct ata_port *ap) 24179b1e2658STejun Heo { 24189b1e2658STejun Heo struct ata_link *link; 24199b1e2658STejun Heo 24201eca4365STejun Heo ata_for_each_link(link, ap, HOST_FIRST) 24219b1e2658STejun Heo ata_eh_link_report(link); 24229b1e2658STejun Heo } 24239b1e2658STejun Heo 2424cc0680a5STejun Heo static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset, 2425b1c72916STejun Heo unsigned int *classes, unsigned long deadline, 2426b1c72916STejun Heo bool clear_classes) 2427c6fd2807SJeff Garzik { 2428f58229f8STejun Heo struct ata_device *dev; 2429c6fd2807SJeff Garzik 2430b1c72916STejun Heo if (clear_classes) 24311eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 2432f58229f8STejun Heo classes[dev->devno] = ATA_DEV_UNKNOWN; 2433c6fd2807SJeff Garzik 2434f046519fSTejun Heo return reset(link, classes, deadline); 2435c6fd2807SJeff Garzik } 2436c6fd2807SJeff Garzik 2437ae791c05STejun Heo static int ata_eh_followup_srst_needed(struct ata_link *link, 24385dbfc9cbSTejun Heo int rc, const unsigned int *classes) 2439c6fd2807SJeff Garzik { 244045db2f6cSTejun Heo if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link)) 2441ae791c05STejun Heo return 0; 24425dbfc9cbSTejun Heo if (rc == -EAGAIN) 2443c6fd2807SJeff Garzik return 1; 2444071f44b1STejun Heo if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) 24453495de73STejun Heo return 1; 2446c6fd2807SJeff Garzik return 0; 2447c6fd2807SJeff Garzik } 2448c6fd2807SJeff Garzik 2449fb7fd614STejun Heo int ata_eh_reset(struct ata_link *link, int classify, 2450c6fd2807SJeff Garzik ata_prereset_fn_t prereset, ata_reset_fn_t softreset, 2451c6fd2807SJeff Garzik ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) 2452c6fd2807SJeff Garzik { 2453afaa5c37STejun Heo struct ata_port *ap = link->ap; 2454b1c72916STejun Heo struct ata_link *slave = ap->slave_link; 2455936fd732STejun Heo struct ata_eh_context *ehc = &link->eh_context; 2456705d2014SBartlomiej Zolnierkiewicz struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL; 2457c6fd2807SJeff Garzik unsigned int *classes = ehc->classes; 2458416dc9edSTejun Heo unsigned int lflags = link->flags; 2459c6fd2807SJeff Garzik int verbose = !(ehc->i.flags & ATA_EHI_QUIET); 2460d8af0eb6STejun Heo int max_tries = 0, try = 0; 2461b1c72916STejun Heo struct ata_link *failed_link; 2462f58229f8STejun Heo struct ata_device *dev; 2463416dc9edSTejun Heo unsigned long deadline, now; 2464c6fd2807SJeff Garzik ata_reset_fn_t reset; 2465afaa5c37STejun Heo unsigned long flags; 2466416dc9edSTejun Heo u32 sstatus; 2467b1c72916STejun Heo int nr_unknown, rc; 2468c6fd2807SJeff Garzik 2469932648b0STejun Heo /* 2470932648b0STejun Heo * Prepare to reset 2471932648b0STejun Heo */ 2472d8af0eb6STejun Heo while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX) 2473d8af0eb6STejun Heo max_tries++; 247405944bdfSTejun Heo if (link->flags & ATA_LFLAG_NO_HRST) 247505944bdfSTejun Heo hardreset = NULL; 247605944bdfSTejun Heo if (link->flags & ATA_LFLAG_NO_SRST) 247705944bdfSTejun Heo softreset = NULL; 2478d8af0eb6STejun Heo 247919b72321STejun Heo /* make sure each reset attemp is at least COOL_DOWN apart */ 248019b72321STejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) { 24810a2c0f56STejun Heo now = jiffies; 248219b72321STejun Heo WARN_ON(time_after(ehc->last_reset, now)); 248319b72321STejun Heo deadline = ata_deadline(ehc->last_reset, 248419b72321STejun Heo ATA_EH_RESET_COOL_DOWN); 24850a2c0f56STejun Heo if (time_before(now, deadline)) 24860a2c0f56STejun Heo schedule_timeout_uninterruptible(deadline - now); 248719b72321STejun Heo } 24880a2c0f56STejun Heo 2489afaa5c37STejun Heo spin_lock_irqsave(ap->lock, flags); 2490afaa5c37STejun Heo ap->pflags |= ATA_PFLAG_RESETTING; 2491afaa5c37STejun Heo spin_unlock_irqrestore(ap->lock, flags); 2492afaa5c37STejun Heo 2493cf480626STejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2494c6fd2807SJeff Garzik 24951eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 2496cdeab114STejun Heo /* If we issue an SRST then an ATA drive (not ATAPI) 2497cdeab114STejun Heo * may change configuration and be in PIO0 timing. If 2498cdeab114STejun Heo * we do a hard reset (or are coming from power on) 2499cdeab114STejun Heo * this is true for ATA or ATAPI. Until we've set a 2500cdeab114STejun Heo * suitable controller mode we should not touch the 2501cdeab114STejun Heo * bus as we may be talking too fast. 2502cdeab114STejun Heo */ 2503cdeab114STejun Heo dev->pio_mode = XFER_PIO_0; 2504cdeab114STejun Heo 2505cdeab114STejun Heo /* If the controller has a pio mode setup function 2506cdeab114STejun Heo * then use it to set the chipset to rights. Don't 2507cdeab114STejun Heo * touch the DMA setup as that will be dealt with when 2508cdeab114STejun Heo * configuring devices. 2509cdeab114STejun Heo */ 2510cdeab114STejun Heo if (ap->ops->set_piomode) 2511cdeab114STejun Heo ap->ops->set_piomode(ap, dev); 2512cdeab114STejun Heo } 2513cdeab114STejun Heo 2514cf480626STejun Heo /* prefer hardreset */ 2515932648b0STejun Heo reset = NULL; 2516cf480626STejun Heo ehc->i.action &= ~ATA_EH_RESET; 2517cf480626STejun Heo if (hardreset) { 2518cf480626STejun Heo reset = hardreset; 2519a674050eSTejun Heo ehc->i.action |= ATA_EH_HARDRESET; 25204f7faa3fSTejun Heo } else if (softreset) { 2521cf480626STejun Heo reset = softreset; 2522a674050eSTejun Heo ehc->i.action |= ATA_EH_SOFTRESET; 2523cf480626STejun Heo } 2524c6fd2807SJeff Garzik 2525c6fd2807SJeff Garzik if (prereset) { 2526b1c72916STejun Heo unsigned long deadline = ata_deadline(jiffies, 2527b1c72916STejun Heo ATA_EH_PRERESET_TIMEOUT); 2528b1c72916STejun Heo 2529b1c72916STejun Heo if (slave) { 2530b1c72916STejun Heo sehc->i.action &= ~ATA_EH_RESET; 2531b1c72916STejun Heo sehc->i.action |= ehc->i.action; 2532b1c72916STejun Heo } 2533b1c72916STejun Heo 2534b1c72916STejun Heo rc = prereset(link, deadline); 2535b1c72916STejun Heo 2536b1c72916STejun Heo /* If present, do prereset on slave link too. Reset 2537b1c72916STejun Heo * is skipped iff both master and slave links report 2538b1c72916STejun Heo * -ENOENT or clear ATA_EH_RESET. 2539b1c72916STejun Heo */ 2540b1c72916STejun Heo if (slave && (rc == 0 || rc == -ENOENT)) { 2541b1c72916STejun Heo int tmp; 2542b1c72916STejun Heo 2543b1c72916STejun Heo tmp = prereset(slave, deadline); 2544b1c72916STejun Heo if (tmp != -ENOENT) 2545b1c72916STejun Heo rc = tmp; 2546b1c72916STejun Heo 2547b1c72916STejun Heo ehc->i.action |= sehc->i.action; 2548b1c72916STejun Heo } 2549b1c72916STejun Heo 2550c6fd2807SJeff Garzik if (rc) { 2551c961922bSAlan Cox if (rc == -ENOENT) { 2552cc0680a5STejun Heo ata_link_printk(link, KERN_DEBUG, 25534aa9ab67STejun Heo "port disabled. ignoring.\n"); 2554cf480626STejun Heo ehc->i.action &= ~ATA_EH_RESET; 25554aa9ab67STejun Heo 25561eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 2557f58229f8STejun Heo classes[dev->devno] = ATA_DEV_NONE; 25584aa9ab67STejun Heo 25594aa9ab67STejun Heo rc = 0; 2560c961922bSAlan Cox } else 2561cc0680a5STejun Heo ata_link_printk(link, KERN_ERR, 2562c6fd2807SJeff Garzik "prereset failed (errno=%d)\n", rc); 2563fccb6ea5STejun Heo goto out; 2564c6fd2807SJeff Garzik } 2565c6fd2807SJeff Garzik 2566932648b0STejun Heo /* prereset() might have cleared ATA_EH_RESET. If so, 2567d6515e6fSTejun Heo * bang classes, thaw and return. 2568932648b0STejun Heo */ 2569932648b0STejun Heo if (reset && !(ehc->i.action & ATA_EH_RESET)) { 25701eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 2571f58229f8STejun Heo classes[dev->devno] = ATA_DEV_NONE; 2572d6515e6fSTejun Heo if ((ap->pflags & ATA_PFLAG_FROZEN) && 2573d6515e6fSTejun Heo ata_is_host_link(link)) 2574d6515e6fSTejun Heo ata_eh_thaw_port(ap); 2575fccb6ea5STejun Heo rc = 0; 2576fccb6ea5STejun Heo goto out; 2577c6fd2807SJeff Garzik } 2578932648b0STejun Heo } 2579c6fd2807SJeff Garzik 2580c6fd2807SJeff Garzik retry: 2581932648b0STejun Heo /* 2582932648b0STejun Heo * Perform reset 2583932648b0STejun Heo */ 2584dc98c32cSTejun Heo if (ata_is_host_link(link)) 2585dc98c32cSTejun Heo ata_eh_freeze_port(ap); 2586dc98c32cSTejun Heo 2587341c2c95STejun Heo deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]); 258831daabdaSTejun Heo 2589932648b0STejun Heo if (reset) { 2590c6fd2807SJeff Garzik if (verbose) 2591cc0680a5STejun Heo ata_link_printk(link, KERN_INFO, "%s resetting link\n", 2592c6fd2807SJeff Garzik reset == softreset ? "soft" : "hard"); 2593c6fd2807SJeff Garzik 2594c6fd2807SJeff Garzik /* mark that this EH session started with reset */ 259519b72321STejun Heo ehc->last_reset = jiffies; 25960d64a233STejun Heo if (reset == hardreset) 25970d64a233STejun Heo ehc->i.flags |= ATA_EHI_DID_HARDRESET; 25980d64a233STejun Heo else 25990d64a233STejun Heo ehc->i.flags |= ATA_EHI_DID_SOFTRESET; 2600c6fd2807SJeff Garzik 2601b1c72916STejun Heo rc = ata_do_reset(link, reset, classes, deadline, true); 2602b1c72916STejun Heo if (rc && rc != -EAGAIN) { 2603b1c72916STejun Heo failed_link = link; 26045dbfc9cbSTejun Heo goto fail; 2605b1c72916STejun Heo } 2606c6fd2807SJeff Garzik 2607b1c72916STejun Heo /* hardreset slave link if existent */ 2608b1c72916STejun Heo if (slave && reset == hardreset) { 2609b1c72916STejun Heo int tmp; 2610b1c72916STejun Heo 2611b1c72916STejun Heo if (verbose) 2612b1c72916STejun Heo ata_link_printk(slave, KERN_INFO, 2613b1c72916STejun Heo "hard resetting link\n"); 2614b1c72916STejun Heo 2615b1c72916STejun Heo ata_eh_about_to_do(slave, NULL, ATA_EH_RESET); 2616b1c72916STejun Heo tmp = ata_do_reset(slave, reset, classes, deadline, 2617b1c72916STejun Heo false); 2618b1c72916STejun Heo switch (tmp) { 2619b1c72916STejun Heo case -EAGAIN: 2620b1c72916STejun Heo rc = -EAGAIN; 2621b1c72916STejun Heo case 0: 2622b1c72916STejun Heo break; 2623b1c72916STejun Heo default: 2624b1c72916STejun Heo failed_link = slave; 2625b1c72916STejun Heo rc = tmp; 2626b1c72916STejun Heo goto fail; 2627b1c72916STejun Heo } 2628b1c72916STejun Heo } 2629b1c72916STejun Heo 2630b1c72916STejun Heo /* perform follow-up SRST if necessary */ 2631c6fd2807SJeff Garzik if (reset == hardreset && 26325dbfc9cbSTejun Heo ata_eh_followup_srst_needed(link, rc, classes)) { 2633c6fd2807SJeff Garzik reset = softreset; 2634c6fd2807SJeff Garzik 2635c6fd2807SJeff Garzik if (!reset) { 2636cc0680a5STejun Heo ata_link_printk(link, KERN_ERR, 2637c6fd2807SJeff Garzik "follow-up softreset required " 2638c6fd2807SJeff Garzik "but no softreset avaliable\n"); 2639b1c72916STejun Heo failed_link = link; 2640fccb6ea5STejun Heo rc = -EINVAL; 264108cf69d0STejun Heo goto fail; 2642c6fd2807SJeff Garzik } 2643c6fd2807SJeff Garzik 2644cf480626STejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2645b1c72916STejun Heo rc = ata_do_reset(link, reset, classes, deadline, true); 2646fe2c4d01STejun Heo if (rc) { 2647fe2c4d01STejun Heo failed_link = link; 2648fe2c4d01STejun Heo goto fail; 2649fe2c4d01STejun Heo } 2650c6fd2807SJeff Garzik } 2651932648b0STejun Heo } else { 2652932648b0STejun Heo if (verbose) 2653932648b0STejun Heo ata_link_printk(link, KERN_INFO, "no reset method " 2654932648b0STejun Heo "available, skipping reset\n"); 2655932648b0STejun Heo if (!(lflags & ATA_LFLAG_ASSUME_CLASS)) 2656932648b0STejun Heo lflags |= ATA_LFLAG_ASSUME_ATA; 2657932648b0STejun Heo } 2658008a7896STejun Heo 2659932648b0STejun Heo /* 2660932648b0STejun Heo * Post-reset processing 2661932648b0STejun Heo */ 26621eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 2663416dc9edSTejun Heo /* After the reset, the device state is PIO 0 and the 2664416dc9edSTejun Heo * controller state is undefined. Reset also wakes up 2665416dc9edSTejun Heo * drives from sleeping mode. 2666c6fd2807SJeff Garzik */ 2667f58229f8STejun Heo dev->pio_mode = XFER_PIO_0; 2668054a5fbaSTejun Heo dev->flags &= ~ATA_DFLAG_SLEEPING; 2669c6fd2807SJeff Garzik 2670816ab897STejun Heo if (!ata_phys_link_offline(ata_dev_phys_link(dev))) { 26714ccd3329STejun Heo /* apply class override */ 2672416dc9edSTejun Heo if (lflags & ATA_LFLAG_ASSUME_ATA) 2673ae791c05STejun Heo classes[dev->devno] = ATA_DEV_ATA; 2674416dc9edSTejun Heo else if (lflags & ATA_LFLAG_ASSUME_SEMB) 2675816ab897STejun Heo classes[dev->devno] = ATA_DEV_SEMB_UNSUP; 2676816ab897STejun Heo } else 2677816ab897STejun Heo classes[dev->devno] = ATA_DEV_NONE; 2678ae791c05STejun Heo } 2679ae791c05STejun Heo 2680008a7896STejun Heo /* record current link speed */ 2681936fd732STejun Heo if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0) 2682936fd732STejun Heo link->sata_spd = (sstatus >> 4) & 0xf; 2683b1c72916STejun Heo if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0) 2684b1c72916STejun Heo slave->sata_spd = (sstatus >> 4) & 0xf; 2685008a7896STejun Heo 2686dc98c32cSTejun Heo /* thaw the port */ 2687dc98c32cSTejun Heo if (ata_is_host_link(link)) 2688dc98c32cSTejun Heo ata_eh_thaw_port(ap); 2689dc98c32cSTejun Heo 2690f046519fSTejun Heo /* postreset() should clear hardware SError. Although SError 2691f046519fSTejun Heo * is cleared during link resume, clearing SError here is 2692f046519fSTejun Heo * necessary as some PHYs raise hotplug events after SRST. 2693f046519fSTejun Heo * This introduces race condition where hotplug occurs between 2694f046519fSTejun Heo * reset and here. This race is mediated by cross checking 2695f046519fSTejun Heo * link onlineness and classification result later. 2696f046519fSTejun Heo */ 2697b1c72916STejun Heo if (postreset) { 2698cc0680a5STejun Heo postreset(link, classes); 2699b1c72916STejun Heo if (slave) 2700b1c72916STejun Heo postreset(slave, classes); 2701b1c72916STejun Heo } 2702c6fd2807SJeff Garzik 27031e641060STejun Heo /* 27041e641060STejun Heo * Some controllers can't be frozen very well and may set 27051e641060STejun Heo * spuruious error conditions during reset. Clear accumulated 27061e641060STejun Heo * error information. As reset is the final recovery action, 27071e641060STejun Heo * nothing is lost by doing this. 27081e641060STejun Heo */ 2709f046519fSTejun Heo spin_lock_irqsave(link->ap->lock, flags); 27101e641060STejun Heo memset(&link->eh_info, 0, sizeof(link->eh_info)); 2711b1c72916STejun Heo if (slave) 27121e641060STejun Heo memset(&slave->eh_info, 0, sizeof(link->eh_info)); 27131e641060STejun Heo ap->pflags &= ~ATA_PFLAG_EH_PENDING; 2714f046519fSTejun Heo spin_unlock_irqrestore(link->ap->lock, flags); 2715f046519fSTejun Heo 2716f046519fSTejun Heo /* Make sure onlineness and classification result correspond. 2717f046519fSTejun Heo * Hotplug could have happened during reset and some 2718f046519fSTejun Heo * controllers fail to wait while a drive is spinning up after 2719f046519fSTejun Heo * being hotplugged causing misdetection. By cross checking 2720f046519fSTejun Heo * link onlineness and classification result, those conditions 2721f046519fSTejun Heo * can be reliably detected and retried. 2722f046519fSTejun Heo */ 2723b1c72916STejun Heo nr_unknown = 0; 27241eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 2725f046519fSTejun Heo /* convert all ATA_DEV_UNKNOWN to ATA_DEV_NONE */ 2726b1c72916STejun Heo if (classes[dev->devno] == ATA_DEV_UNKNOWN) { 2727f046519fSTejun Heo classes[dev->devno] = ATA_DEV_NONE; 2728b1c72916STejun Heo if (ata_phys_link_online(ata_dev_phys_link(dev))) 2729b1c72916STejun Heo nr_unknown++; 2730b1c72916STejun Heo } 2731f046519fSTejun Heo } 2732f046519fSTejun Heo 2733b1c72916STejun Heo if (classify && nr_unknown) { 2734f046519fSTejun Heo if (try < max_tries) { 2735f046519fSTejun Heo ata_link_printk(link, KERN_WARNING, "link online but " 2736f046519fSTejun Heo "device misclassified, retrying\n"); 2737b1c72916STejun Heo failed_link = link; 2738f046519fSTejun Heo rc = -EAGAIN; 2739f046519fSTejun Heo goto fail; 2740f046519fSTejun Heo } 2741f046519fSTejun Heo ata_link_printk(link, KERN_WARNING, 2742f046519fSTejun Heo "link online but device misclassified, " 2743f046519fSTejun Heo "device detection might fail\n"); 2744f046519fSTejun Heo } 2745f046519fSTejun Heo 2746c6fd2807SJeff Garzik /* reset successful, schedule revalidation */ 2747cf480626STejun Heo ata_eh_done(link, NULL, ATA_EH_RESET); 2748b1c72916STejun Heo if (slave) 2749b1c72916STejun Heo ata_eh_done(slave, NULL, ATA_EH_RESET); 275019b72321STejun Heo ehc->last_reset = jiffies; /* update to completion time */ 2751c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_REVALIDATE; 2752416dc9edSTejun Heo 2753416dc9edSTejun Heo rc = 0; 2754fccb6ea5STejun Heo out: 2755fccb6ea5STejun Heo /* clear hotplug flag */ 2756fccb6ea5STejun Heo ehc->i.flags &= ~ATA_EHI_HOTPLUGGED; 2757b1c72916STejun Heo if (slave) 2758b1c72916STejun Heo sehc->i.flags &= ~ATA_EHI_HOTPLUGGED; 2759afaa5c37STejun Heo 2760afaa5c37STejun Heo spin_lock_irqsave(ap->lock, flags); 2761afaa5c37STejun Heo ap->pflags &= ~ATA_PFLAG_RESETTING; 2762afaa5c37STejun Heo spin_unlock_irqrestore(ap->lock, flags); 2763afaa5c37STejun Heo 2764c6fd2807SJeff Garzik return rc; 2765416dc9edSTejun Heo 2766416dc9edSTejun Heo fail: 27675958e302STejun Heo /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */ 27685958e302STejun Heo if (!ata_is_host_link(link) && 27695958e302STejun Heo sata_scr_read(link, SCR_STATUS, &sstatus)) 27705958e302STejun Heo rc = -ERESTART; 27715958e302STejun Heo 2772416dc9edSTejun Heo if (rc == -ERESTART || try >= max_tries) 2773416dc9edSTejun Heo goto out; 2774416dc9edSTejun Heo 2775416dc9edSTejun Heo now = jiffies; 2776416dc9edSTejun Heo if (time_before(now, deadline)) { 2777416dc9edSTejun Heo unsigned long delta = deadline - now; 2778416dc9edSTejun Heo 2779b1c72916STejun Heo ata_link_printk(failed_link, KERN_WARNING, 27800a2c0f56STejun Heo "reset failed (errno=%d), retrying in %u secs\n", 27810a2c0f56STejun Heo rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000)); 2782416dc9edSTejun Heo 2783416dc9edSTejun Heo while (delta) 2784416dc9edSTejun Heo delta = schedule_timeout_uninterruptible(delta); 2785416dc9edSTejun Heo } 2786416dc9edSTejun Heo 2787b1c72916STejun Heo if (try == max_tries - 1) { 2788a07d499bSTejun Heo sata_down_spd_limit(link, 0); 2789b1c72916STejun Heo if (slave) 2790a07d499bSTejun Heo sata_down_spd_limit(slave, 0); 2791b1c72916STejun Heo } else if (rc == -EPIPE) 2792a07d499bSTejun Heo sata_down_spd_limit(failed_link, 0); 2793b1c72916STejun Heo 2794416dc9edSTejun Heo if (hardreset) 2795416dc9edSTejun Heo reset = hardreset; 2796416dc9edSTejun Heo goto retry; 2797c6fd2807SJeff Garzik } 2798c6fd2807SJeff Garzik 279945fabbb7SElias Oltmanns static inline void ata_eh_pull_park_action(struct ata_port *ap) 280045fabbb7SElias Oltmanns { 280145fabbb7SElias Oltmanns struct ata_link *link; 280245fabbb7SElias Oltmanns struct ata_device *dev; 280345fabbb7SElias Oltmanns unsigned long flags; 280445fabbb7SElias Oltmanns 280545fabbb7SElias Oltmanns /* 280645fabbb7SElias Oltmanns * This function can be thought of as an extended version of 280745fabbb7SElias Oltmanns * ata_eh_about_to_do() specially crafted to accommodate the 280845fabbb7SElias Oltmanns * requirements of ATA_EH_PARK handling. Since the EH thread 280945fabbb7SElias Oltmanns * does not leave the do {} while () loop in ata_eh_recover as 281045fabbb7SElias Oltmanns * long as the timeout for a park request to *one* device on 281145fabbb7SElias Oltmanns * the port has not expired, and since we still want to pick 281245fabbb7SElias Oltmanns * up park requests to other devices on the same port or 281345fabbb7SElias Oltmanns * timeout updates for the same device, we have to pull 281445fabbb7SElias Oltmanns * ATA_EH_PARK actions from eh_info into eh_context.i 281545fabbb7SElias Oltmanns * ourselves at the beginning of each pass over the loop. 281645fabbb7SElias Oltmanns * 281745fabbb7SElias Oltmanns * Additionally, all write accesses to &ap->park_req_pending 281845fabbb7SElias Oltmanns * through INIT_COMPLETION() (see below) or complete_all() 281945fabbb7SElias Oltmanns * (see ata_scsi_park_store()) are protected by the host lock. 282045fabbb7SElias Oltmanns * As a result we have that park_req_pending.done is zero on 282145fabbb7SElias Oltmanns * exit from this function, i.e. when ATA_EH_PARK actions for 282245fabbb7SElias Oltmanns * *all* devices on port ap have been pulled into the 282345fabbb7SElias Oltmanns * respective eh_context structs. If, and only if, 282445fabbb7SElias Oltmanns * park_req_pending.done is non-zero by the time we reach 282545fabbb7SElias Oltmanns * wait_for_completion_timeout(), another ATA_EH_PARK action 282645fabbb7SElias Oltmanns * has been scheduled for at least one of the devices on port 282745fabbb7SElias Oltmanns * ap and we have to cycle over the do {} while () loop in 282845fabbb7SElias Oltmanns * ata_eh_recover() again. 282945fabbb7SElias Oltmanns */ 283045fabbb7SElias Oltmanns 283145fabbb7SElias Oltmanns spin_lock_irqsave(ap->lock, flags); 283245fabbb7SElias Oltmanns INIT_COMPLETION(ap->park_req_pending); 28331eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 28341eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 283545fabbb7SElias Oltmanns struct ata_eh_info *ehi = &link->eh_info; 283645fabbb7SElias Oltmanns 283745fabbb7SElias Oltmanns link->eh_context.i.dev_action[dev->devno] |= 283845fabbb7SElias Oltmanns ehi->dev_action[dev->devno] & ATA_EH_PARK; 283945fabbb7SElias Oltmanns ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK); 284045fabbb7SElias Oltmanns } 284145fabbb7SElias Oltmanns } 284245fabbb7SElias Oltmanns spin_unlock_irqrestore(ap->lock, flags); 284345fabbb7SElias Oltmanns } 284445fabbb7SElias Oltmanns 284545fabbb7SElias Oltmanns static void ata_eh_park_issue_cmd(struct ata_device *dev, int park) 284645fabbb7SElias Oltmanns { 284745fabbb7SElias Oltmanns struct ata_eh_context *ehc = &dev->link->eh_context; 284845fabbb7SElias Oltmanns struct ata_taskfile tf; 284945fabbb7SElias Oltmanns unsigned int err_mask; 285045fabbb7SElias Oltmanns 285145fabbb7SElias Oltmanns ata_tf_init(dev, &tf); 285245fabbb7SElias Oltmanns if (park) { 285345fabbb7SElias Oltmanns ehc->unloaded_mask |= 1 << dev->devno; 285445fabbb7SElias Oltmanns tf.command = ATA_CMD_IDLEIMMEDIATE; 285545fabbb7SElias Oltmanns tf.feature = 0x44; 285645fabbb7SElias Oltmanns tf.lbal = 0x4c; 285745fabbb7SElias Oltmanns tf.lbam = 0x4e; 285845fabbb7SElias Oltmanns tf.lbah = 0x55; 285945fabbb7SElias Oltmanns } else { 286045fabbb7SElias Oltmanns ehc->unloaded_mask &= ~(1 << dev->devno); 286145fabbb7SElias Oltmanns tf.command = ATA_CMD_CHK_POWER; 286245fabbb7SElias Oltmanns } 286345fabbb7SElias Oltmanns 286445fabbb7SElias Oltmanns tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 286545fabbb7SElias Oltmanns tf.protocol |= ATA_PROT_NODATA; 286645fabbb7SElias Oltmanns err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 286745fabbb7SElias Oltmanns if (park && (err_mask || tf.lbal != 0xc4)) { 286845fabbb7SElias Oltmanns ata_dev_printk(dev, KERN_ERR, "head unload failed!\n"); 286945fabbb7SElias Oltmanns ehc->unloaded_mask &= ~(1 << dev->devno); 287045fabbb7SElias Oltmanns } 287145fabbb7SElias Oltmanns } 287245fabbb7SElias Oltmanns 28730260731fSTejun Heo static int ata_eh_revalidate_and_attach(struct ata_link *link, 2874c6fd2807SJeff Garzik struct ata_device **r_failed_dev) 2875c6fd2807SJeff Garzik { 28760260731fSTejun Heo struct ata_port *ap = link->ap; 28770260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 2878c6fd2807SJeff Garzik struct ata_device *dev; 28798c3c52a8STejun Heo unsigned int new_mask = 0; 2880c6fd2807SJeff Garzik unsigned long flags; 2881f58229f8STejun Heo int rc = 0; 2882c6fd2807SJeff Garzik 2883c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 2884c6fd2807SJeff Garzik 28858c3c52a8STejun Heo /* For PATA drive side cable detection to work, IDENTIFY must 28868c3c52a8STejun Heo * be done backwards such that PDIAG- is released by the slave 28878c3c52a8STejun Heo * device before the master device is identified. 28888c3c52a8STejun Heo */ 28891eca4365STejun Heo ata_for_each_dev(dev, link, ALL_REVERSE) { 2890f58229f8STejun Heo unsigned int action = ata_eh_dev_action(dev); 2891f58229f8STejun Heo unsigned int readid_flags = 0; 2892c6fd2807SJeff Garzik 2893bff04647STejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) 2894bff04647STejun Heo readid_flags |= ATA_READID_POSTRESET; 2895bff04647STejun Heo 28969666f400STejun Heo if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) { 2897633273a3STejun Heo WARN_ON(dev->class == ATA_DEV_PMP); 2898633273a3STejun Heo 2899b1c72916STejun Heo if (ata_phys_link_offline(ata_dev_phys_link(dev))) { 2900c6fd2807SJeff Garzik rc = -EIO; 29018c3c52a8STejun Heo goto err; 2902c6fd2807SJeff Garzik } 2903c6fd2807SJeff Garzik 29040260731fSTejun Heo ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE); 2905422c9daaSTejun Heo rc = ata_dev_revalidate(dev, ehc->classes[dev->devno], 2906422c9daaSTejun Heo readid_flags); 2907c6fd2807SJeff Garzik if (rc) 29088c3c52a8STejun Heo goto err; 2909c6fd2807SJeff Garzik 29100260731fSTejun Heo ata_eh_done(link, dev, ATA_EH_REVALIDATE); 2911c6fd2807SJeff Garzik 2912baa1e78aSTejun Heo /* Configuration may have changed, reconfigure 2913baa1e78aSTejun Heo * transfer mode. 2914baa1e78aSTejun Heo */ 2915baa1e78aSTejun Heo ehc->i.flags |= ATA_EHI_SETMODE; 2916baa1e78aSTejun Heo 2917c6fd2807SJeff Garzik /* schedule the scsi_rescan_device() here */ 2918c6fd2807SJeff Garzik queue_work(ata_aux_wq, &(ap->scsi_rescan_task)); 2919c6fd2807SJeff Garzik } else if (dev->class == ATA_DEV_UNKNOWN && 2920c6fd2807SJeff Garzik ehc->tries[dev->devno] && 2921c6fd2807SJeff Garzik ata_class_enabled(ehc->classes[dev->devno])) { 2922842faa6cSTejun Heo /* Temporarily set dev->class, it will be 2923842faa6cSTejun Heo * permanently set once all configurations are 2924842faa6cSTejun Heo * complete. This is necessary because new 2925842faa6cSTejun Heo * device configuration is done in two 2926842faa6cSTejun Heo * separate loops. 2927842faa6cSTejun Heo */ 2928c6fd2807SJeff Garzik dev->class = ehc->classes[dev->devno]; 2929c6fd2807SJeff Garzik 2930633273a3STejun Heo if (dev->class == ATA_DEV_PMP) 2931633273a3STejun Heo rc = sata_pmp_attach(dev); 2932633273a3STejun Heo else 2933633273a3STejun Heo rc = ata_dev_read_id(dev, &dev->class, 2934633273a3STejun Heo readid_flags, dev->id); 2935842faa6cSTejun Heo 2936842faa6cSTejun Heo /* read_id might have changed class, store and reset */ 2937842faa6cSTejun Heo ehc->classes[dev->devno] = dev->class; 2938842faa6cSTejun Heo dev->class = ATA_DEV_UNKNOWN; 2939842faa6cSTejun Heo 29408c3c52a8STejun Heo switch (rc) { 29418c3c52a8STejun Heo case 0: 294299cf610aSTejun Heo /* clear error info accumulated during probe */ 294399cf610aSTejun Heo ata_ering_clear(&dev->ering); 2944f58229f8STejun Heo new_mask |= 1 << dev->devno; 29458c3c52a8STejun Heo break; 29468c3c52a8STejun Heo case -ENOENT: 294755a8e2c8STejun Heo /* IDENTIFY was issued to non-existent 294855a8e2c8STejun Heo * device. No need to reset. Just 2949842faa6cSTejun Heo * thaw and ignore the device. 295055a8e2c8STejun Heo */ 295155a8e2c8STejun Heo ata_eh_thaw_port(ap); 2952c6fd2807SJeff Garzik break; 29538c3c52a8STejun Heo default: 29548c3c52a8STejun Heo goto err; 29558c3c52a8STejun Heo } 29568c3c52a8STejun Heo } 2957c6fd2807SJeff Garzik } 2958c6fd2807SJeff Garzik 2959c1c4e8d5STejun Heo /* PDIAG- should have been released, ask cable type if post-reset */ 296033267325STejun Heo if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) { 296133267325STejun Heo if (ap->ops->cable_detect) 2962c1c4e8d5STejun Heo ap->cbl = ap->ops->cable_detect(ap); 296333267325STejun Heo ata_force_cbl(ap); 296433267325STejun Heo } 2965c1c4e8d5STejun Heo 29668c3c52a8STejun Heo /* Configure new devices forward such that user doesn't see 29678c3c52a8STejun Heo * device detection messages backwards. 29688c3c52a8STejun Heo */ 29691eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 2970633273a3STejun Heo if (!(new_mask & (1 << dev->devno)) || 2971633273a3STejun Heo dev->class == ATA_DEV_PMP) 29728c3c52a8STejun Heo continue; 29738c3c52a8STejun Heo 2974842faa6cSTejun Heo dev->class = ehc->classes[dev->devno]; 2975842faa6cSTejun Heo 29768c3c52a8STejun Heo ehc->i.flags |= ATA_EHI_PRINTINFO; 29778c3c52a8STejun Heo rc = ata_dev_configure(dev); 29788c3c52a8STejun Heo ehc->i.flags &= ~ATA_EHI_PRINTINFO; 2979842faa6cSTejun Heo if (rc) { 2980842faa6cSTejun Heo dev->class = ATA_DEV_UNKNOWN; 29818c3c52a8STejun Heo goto err; 2982842faa6cSTejun Heo } 29838c3c52a8STejun Heo 2984c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 2985c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 2986c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 2987baa1e78aSTejun Heo 298855a8e2c8STejun Heo /* new device discovered, configure xfermode */ 2989baa1e78aSTejun Heo ehc->i.flags |= ATA_EHI_SETMODE; 2990c6fd2807SJeff Garzik } 2991c6fd2807SJeff Garzik 29928c3c52a8STejun Heo return 0; 29938c3c52a8STejun Heo 29948c3c52a8STejun Heo err: 2995c6fd2807SJeff Garzik *r_failed_dev = dev; 29968c3c52a8STejun Heo DPRINTK("EXIT rc=%d\n", rc); 2997c6fd2807SJeff Garzik return rc; 2998c6fd2807SJeff Garzik } 2999c6fd2807SJeff Garzik 30006f1d1e3aSTejun Heo /** 30016f1d1e3aSTejun Heo * ata_set_mode - Program timings and issue SET FEATURES - XFER 30026f1d1e3aSTejun Heo * @link: link on which timings will be programmed 300398a1708dSMartin Olsson * @r_failed_dev: out parameter for failed device 30046f1d1e3aSTejun Heo * 30056f1d1e3aSTejun Heo * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If 30066f1d1e3aSTejun Heo * ata_set_mode() fails, pointer to the failing device is 30076f1d1e3aSTejun Heo * returned in @r_failed_dev. 30086f1d1e3aSTejun Heo * 30096f1d1e3aSTejun Heo * LOCKING: 30106f1d1e3aSTejun Heo * PCI/etc. bus probe sem. 30116f1d1e3aSTejun Heo * 30126f1d1e3aSTejun Heo * RETURNS: 30136f1d1e3aSTejun Heo * 0 on success, negative errno otherwise 30146f1d1e3aSTejun Heo */ 30156f1d1e3aSTejun Heo int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) 30166f1d1e3aSTejun Heo { 30176f1d1e3aSTejun Heo struct ata_port *ap = link->ap; 301800115e0fSTejun Heo struct ata_device *dev; 301900115e0fSTejun Heo int rc; 30206f1d1e3aSTejun Heo 302176326ac1STejun Heo /* if data transfer is verified, clear DUBIOUS_XFER on ering top */ 30221eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) { 302376326ac1STejun Heo if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) { 302476326ac1STejun Heo struct ata_ering_entry *ent; 302576326ac1STejun Heo 302676326ac1STejun Heo ent = ata_ering_top(&dev->ering); 302776326ac1STejun Heo if (ent) 302876326ac1STejun Heo ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER; 302976326ac1STejun Heo } 303076326ac1STejun Heo } 303176326ac1STejun Heo 30326f1d1e3aSTejun Heo /* has private set_mode? */ 30336f1d1e3aSTejun Heo if (ap->ops->set_mode) 303400115e0fSTejun Heo rc = ap->ops->set_mode(link, r_failed_dev); 303500115e0fSTejun Heo else 303600115e0fSTejun Heo rc = ata_do_set_mode(link, r_failed_dev); 303700115e0fSTejun Heo 303800115e0fSTejun Heo /* if transfer mode has changed, set DUBIOUS_XFER on device */ 30391eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) { 304000115e0fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 304100115e0fSTejun Heo u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno]; 304200115e0fSTejun Heo u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno)); 304300115e0fSTejun Heo 304400115e0fSTejun Heo if (dev->xfer_mode != saved_xfer_mode || 304500115e0fSTejun Heo ata_ncq_enabled(dev) != saved_ncq) 304600115e0fSTejun Heo dev->flags |= ATA_DFLAG_DUBIOUS_XFER; 304700115e0fSTejun Heo } 304800115e0fSTejun Heo 304900115e0fSTejun Heo return rc; 30506f1d1e3aSTejun Heo } 30516f1d1e3aSTejun Heo 305211fc33daSTejun Heo /** 305311fc33daSTejun Heo * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset 305411fc33daSTejun Heo * @dev: ATAPI device to clear UA for 305511fc33daSTejun Heo * 305611fc33daSTejun Heo * Resets and other operations can make an ATAPI device raise 305711fc33daSTejun Heo * UNIT ATTENTION which causes the next operation to fail. This 305811fc33daSTejun Heo * function clears UA. 305911fc33daSTejun Heo * 306011fc33daSTejun Heo * LOCKING: 306111fc33daSTejun Heo * EH context (may sleep). 306211fc33daSTejun Heo * 306311fc33daSTejun Heo * RETURNS: 306411fc33daSTejun Heo * 0 on success, -errno on failure. 306511fc33daSTejun Heo */ 306611fc33daSTejun Heo static int atapi_eh_clear_ua(struct ata_device *dev) 306711fc33daSTejun Heo { 306811fc33daSTejun Heo int i; 306911fc33daSTejun Heo 307011fc33daSTejun Heo for (i = 0; i < ATA_EH_UA_TRIES; i++) { 3071b5357081STejun Heo u8 *sense_buffer = dev->link->ap->sector_buf; 307211fc33daSTejun Heo u8 sense_key = 0; 307311fc33daSTejun Heo unsigned int err_mask; 307411fc33daSTejun Heo 307511fc33daSTejun Heo err_mask = atapi_eh_tur(dev, &sense_key); 307611fc33daSTejun Heo if (err_mask != 0 && err_mask != AC_ERR_DEV) { 307711fc33daSTejun Heo ata_dev_printk(dev, KERN_WARNING, "TEST_UNIT_READY " 307811fc33daSTejun Heo "failed (err_mask=0x%x)\n", err_mask); 307911fc33daSTejun Heo return -EIO; 308011fc33daSTejun Heo } 308111fc33daSTejun Heo 308211fc33daSTejun Heo if (!err_mask || sense_key != UNIT_ATTENTION) 308311fc33daSTejun Heo return 0; 308411fc33daSTejun Heo 308511fc33daSTejun Heo err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key); 308611fc33daSTejun Heo if (err_mask) { 308711fc33daSTejun Heo ata_dev_printk(dev, KERN_WARNING, "failed to clear " 308811fc33daSTejun Heo "UNIT ATTENTION (err_mask=0x%x)\n", err_mask); 308911fc33daSTejun Heo return -EIO; 309011fc33daSTejun Heo } 309111fc33daSTejun Heo } 309211fc33daSTejun Heo 309311fc33daSTejun Heo ata_dev_printk(dev, KERN_WARNING, 309411fc33daSTejun Heo "UNIT ATTENTION persists after %d tries\n", ATA_EH_UA_TRIES); 309511fc33daSTejun Heo 309611fc33daSTejun Heo return 0; 309711fc33daSTejun Heo } 309811fc33daSTejun Heo 30990260731fSTejun Heo static int ata_link_nr_enabled(struct ata_link *link) 3100c6fd2807SJeff Garzik { 3101f58229f8STejun Heo struct ata_device *dev; 3102f58229f8STejun Heo int cnt = 0; 3103c6fd2807SJeff Garzik 31041eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) 3105c6fd2807SJeff Garzik cnt++; 3106c6fd2807SJeff Garzik return cnt; 3107c6fd2807SJeff Garzik } 3108c6fd2807SJeff Garzik 31090260731fSTejun Heo static int ata_link_nr_vacant(struct ata_link *link) 3110c6fd2807SJeff Garzik { 3111f58229f8STejun Heo struct ata_device *dev; 3112f58229f8STejun Heo int cnt = 0; 3113c6fd2807SJeff Garzik 31141eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 3115f58229f8STejun Heo if (dev->class == ATA_DEV_UNKNOWN) 3116c6fd2807SJeff Garzik cnt++; 3117c6fd2807SJeff Garzik return cnt; 3118c6fd2807SJeff Garzik } 3119c6fd2807SJeff Garzik 31200260731fSTejun Heo static int ata_eh_skip_recovery(struct ata_link *link) 3121c6fd2807SJeff Garzik { 3122672b2d65STejun Heo struct ata_port *ap = link->ap; 31230260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 3124f58229f8STejun Heo struct ata_device *dev; 3125c6fd2807SJeff Garzik 3126f9df58cbSTejun Heo /* skip disabled links */ 3127f9df58cbSTejun Heo if (link->flags & ATA_LFLAG_DISABLED) 3128f9df58cbSTejun Heo return 1; 3129f9df58cbSTejun Heo 3130672b2d65STejun Heo /* thaw frozen port and recover failed devices */ 3131672b2d65STejun Heo if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link)) 3132672b2d65STejun Heo return 0; 3133672b2d65STejun Heo 3134672b2d65STejun Heo /* reset at least once if reset is requested */ 3135672b2d65STejun Heo if ((ehc->i.action & ATA_EH_RESET) && 3136672b2d65STejun Heo !(ehc->i.flags & ATA_EHI_DID_RESET)) 3137c6fd2807SJeff Garzik return 0; 3138c6fd2807SJeff Garzik 3139c6fd2807SJeff Garzik /* skip if class codes for all vacant slots are ATA_DEV_NONE */ 31401eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 3141c6fd2807SJeff Garzik if (dev->class == ATA_DEV_UNKNOWN && 3142c6fd2807SJeff Garzik ehc->classes[dev->devno] != ATA_DEV_NONE) 3143c6fd2807SJeff Garzik return 0; 3144c6fd2807SJeff Garzik } 3145c6fd2807SJeff Garzik 3146c6fd2807SJeff Garzik return 1; 3147c6fd2807SJeff Garzik } 3148c6fd2807SJeff Garzik 3149c2c7a89cSTejun Heo static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg) 3150c2c7a89cSTejun Heo { 3151c2c7a89cSTejun Heo u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL); 3152c2c7a89cSTejun Heo u64 now = get_jiffies_64(); 3153c2c7a89cSTejun Heo int *trials = void_arg; 3154c2c7a89cSTejun Heo 3155c2c7a89cSTejun Heo if (ent->timestamp < now - min(now, interval)) 3156c2c7a89cSTejun Heo return -1; 3157c2c7a89cSTejun Heo 3158c2c7a89cSTejun Heo (*trials)++; 3159c2c7a89cSTejun Heo return 0; 3160c2c7a89cSTejun Heo } 3161c2c7a89cSTejun Heo 316202c05a27STejun Heo static int ata_eh_schedule_probe(struct ata_device *dev) 316302c05a27STejun Heo { 316402c05a27STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 3165c2c7a89cSTejun Heo struct ata_link *link = ata_dev_phys_link(dev); 3166c2c7a89cSTejun Heo int trials = 0; 316702c05a27STejun Heo 316802c05a27STejun Heo if (!(ehc->i.probe_mask & (1 << dev->devno)) || 316902c05a27STejun Heo (ehc->did_probe_mask & (1 << dev->devno))) 317002c05a27STejun Heo return 0; 317102c05a27STejun Heo 317202c05a27STejun Heo ata_eh_detach_dev(dev); 317302c05a27STejun Heo ata_dev_init(dev); 317402c05a27STejun Heo ehc->did_probe_mask |= (1 << dev->devno); 3175cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 317600115e0fSTejun Heo ehc->saved_xfer_mode[dev->devno] = 0; 317700115e0fSTejun Heo ehc->saved_ncq_enabled &= ~(1 << dev->devno); 317802c05a27STejun Heo 3179c2c7a89cSTejun Heo /* Record and count probe trials on the ering. The specific 3180c2c7a89cSTejun Heo * error mask used is irrelevant. Because a successful device 3181c2c7a89cSTejun Heo * detection clears the ering, this count accumulates only if 3182c2c7a89cSTejun Heo * there are consecutive failed probes. 3183c2c7a89cSTejun Heo * 3184c2c7a89cSTejun Heo * If the count is equal to or higher than ATA_EH_PROBE_TRIALS 3185c2c7a89cSTejun Heo * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is 3186c2c7a89cSTejun Heo * forced to 1.5Gbps. 3187c2c7a89cSTejun Heo * 3188c2c7a89cSTejun Heo * This is to work around cases where failed link speed 3189c2c7a89cSTejun Heo * negotiation results in device misdetection leading to 3190c2c7a89cSTejun Heo * infinite DEVXCHG or PHRDY CHG events. 3191c2c7a89cSTejun Heo */ 3192c2c7a89cSTejun Heo ata_ering_record(&dev->ering, 0, AC_ERR_OTHER); 3193c2c7a89cSTejun Heo ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials); 3194c2c7a89cSTejun Heo 3195c2c7a89cSTejun Heo if (trials > ATA_EH_PROBE_TRIALS) 3196c2c7a89cSTejun Heo sata_down_spd_limit(link, 1); 3197c2c7a89cSTejun Heo 319802c05a27STejun Heo return 1; 319902c05a27STejun Heo } 320002c05a27STejun Heo 32019b1e2658STejun Heo static int ata_eh_handle_dev_fail(struct ata_device *dev, int err) 3202fee7ca72STejun Heo { 32039af5c9c9STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 3204fee7ca72STejun Heo 3205cf9a590aSTejun Heo /* -EAGAIN from EH routine indicates retry without prejudice. 3206cf9a590aSTejun Heo * The requester is responsible for ensuring forward progress. 3207cf9a590aSTejun Heo */ 3208cf9a590aSTejun Heo if (err != -EAGAIN) 3209fee7ca72STejun Heo ehc->tries[dev->devno]--; 3210fee7ca72STejun Heo 3211fee7ca72STejun Heo switch (err) { 3212fee7ca72STejun Heo case -ENODEV: 3213fee7ca72STejun Heo /* device missing or wrong IDENTIFY data, schedule probing */ 3214fee7ca72STejun Heo ehc->i.probe_mask |= (1 << dev->devno); 3215fee7ca72STejun Heo case -EINVAL: 3216fee7ca72STejun Heo /* give it just one more chance */ 3217fee7ca72STejun Heo ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1); 3218fee7ca72STejun Heo case -EIO: 3219d89293abSTejun Heo if (ehc->tries[dev->devno] == 1) { 3220fee7ca72STejun Heo /* This is the last chance, better to slow 3221fee7ca72STejun Heo * down than lose it. 3222fee7ca72STejun Heo */ 3223a07d499bSTejun Heo sata_down_spd_limit(ata_dev_phys_link(dev), 0); 3224d89293abSTejun Heo if (dev->pio_mode > XFER_PIO_0) 3225fee7ca72STejun Heo ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); 3226fee7ca72STejun Heo } 3227fee7ca72STejun Heo } 3228fee7ca72STejun Heo 3229fee7ca72STejun Heo if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) { 3230fee7ca72STejun Heo /* disable device if it has used up all its chances */ 3231fee7ca72STejun Heo ata_dev_disable(dev); 3232fee7ca72STejun Heo 3233fee7ca72STejun Heo /* detach if offline */ 3234b1c72916STejun Heo if (ata_phys_link_offline(ata_dev_phys_link(dev))) 3235fee7ca72STejun Heo ata_eh_detach_dev(dev); 3236fee7ca72STejun Heo 323702c05a27STejun Heo /* schedule probe if necessary */ 323887fbc5a0STejun Heo if (ata_eh_schedule_probe(dev)) { 3239fee7ca72STejun Heo ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 324087fbc5a0STejun Heo memset(ehc->cmd_timeout_idx[dev->devno], 0, 324187fbc5a0STejun Heo sizeof(ehc->cmd_timeout_idx[dev->devno])); 324287fbc5a0STejun Heo } 32439b1e2658STejun Heo 32449b1e2658STejun Heo return 1; 3245fee7ca72STejun Heo } else { 3246cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 32479b1e2658STejun Heo return 0; 3248fee7ca72STejun Heo } 3249fee7ca72STejun Heo } 3250fee7ca72STejun Heo 3251c6fd2807SJeff Garzik /** 3252c6fd2807SJeff Garzik * ata_eh_recover - recover host port after error 3253c6fd2807SJeff Garzik * @ap: host port to recover 3254c6fd2807SJeff Garzik * @prereset: prereset method (can be NULL) 3255c6fd2807SJeff Garzik * @softreset: softreset method (can be NULL) 3256c6fd2807SJeff Garzik * @hardreset: hardreset method (can be NULL) 3257c6fd2807SJeff Garzik * @postreset: postreset method (can be NULL) 32589b1e2658STejun Heo * @r_failed_link: out parameter for failed link 3259c6fd2807SJeff Garzik * 3260c6fd2807SJeff Garzik * This is the alpha and omega, eum and yang, heart and soul of 3261c6fd2807SJeff Garzik * libata exception handling. On entry, actions required to 32629b1e2658STejun Heo * recover each link and hotplug requests are recorded in the 32639b1e2658STejun Heo * link's eh_context. This function executes all the operations 32649b1e2658STejun Heo * with appropriate retrials and fallbacks to resurrect failed 3265c6fd2807SJeff Garzik * devices, detach goners and greet newcomers. 3266c6fd2807SJeff Garzik * 3267c6fd2807SJeff Garzik * LOCKING: 3268c6fd2807SJeff Garzik * Kernel thread context (may sleep). 3269c6fd2807SJeff Garzik * 3270c6fd2807SJeff Garzik * RETURNS: 3271c6fd2807SJeff Garzik * 0 on success, -errno on failure. 3272c6fd2807SJeff Garzik */ 3273fb7fd614STejun Heo int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, 3274c6fd2807SJeff Garzik ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 32759b1e2658STejun Heo ata_postreset_fn_t postreset, 32769b1e2658STejun Heo struct ata_link **r_failed_link) 3277c6fd2807SJeff Garzik { 32789b1e2658STejun Heo struct ata_link *link; 3279c6fd2807SJeff Garzik struct ata_device *dev; 32800a2c0f56STejun Heo int nr_failed_devs; 3281dc98c32cSTejun Heo int rc; 328245fabbb7SElias Oltmanns unsigned long flags, deadline; 3283c6fd2807SJeff Garzik 3284c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 3285c6fd2807SJeff Garzik 3286c6fd2807SJeff Garzik /* prep for recovery */ 32871eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 32889b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 32899b1e2658STejun Heo 3290f9df58cbSTejun Heo /* re-enable link? */ 3291f9df58cbSTejun Heo if (ehc->i.action & ATA_EH_ENABLE_LINK) { 3292f9df58cbSTejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK); 3293f9df58cbSTejun Heo spin_lock_irqsave(ap->lock, flags); 3294f9df58cbSTejun Heo link->flags &= ~ATA_LFLAG_DISABLED; 3295f9df58cbSTejun Heo spin_unlock_irqrestore(ap->lock, flags); 3296f9df58cbSTejun Heo ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK); 3297f9df58cbSTejun Heo } 3298f9df58cbSTejun Heo 32991eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 3300fd995f70STejun Heo if (link->flags & ATA_LFLAG_NO_RETRY) 3301fd995f70STejun Heo ehc->tries[dev->devno] = 1; 3302fd995f70STejun Heo else 3303c6fd2807SJeff Garzik ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 3304c6fd2807SJeff Garzik 330579a55b72STejun Heo /* collect port action mask recorded in dev actions */ 33069b1e2658STejun Heo ehc->i.action |= ehc->i.dev_action[dev->devno] & 33079b1e2658STejun Heo ~ATA_EH_PERDEV_MASK; 3308f58229f8STejun Heo ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK; 330979a55b72STejun Heo 3310c6fd2807SJeff Garzik /* process hotplug request */ 3311c6fd2807SJeff Garzik if (dev->flags & ATA_DFLAG_DETACH) 3312c6fd2807SJeff Garzik ata_eh_detach_dev(dev); 3313c6fd2807SJeff Garzik 331402c05a27STejun Heo /* schedule probe if necessary */ 331502c05a27STejun Heo if (!ata_dev_enabled(dev)) 331602c05a27STejun Heo ata_eh_schedule_probe(dev); 3317c6fd2807SJeff Garzik } 33189b1e2658STejun Heo } 3319c6fd2807SJeff Garzik 3320c6fd2807SJeff Garzik retry: 3321c6fd2807SJeff Garzik rc = 0; 33229b1e2658STejun Heo nr_failed_devs = 0; 3323c6fd2807SJeff Garzik 3324c6fd2807SJeff Garzik /* if UNLOADING, finish immediately */ 3325c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_UNLOADING) 3326c6fd2807SJeff Garzik goto out; 3327c6fd2807SJeff Garzik 33289b1e2658STejun Heo /* prep for EH */ 33291eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 33309b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 33319b1e2658STejun Heo 3332c6fd2807SJeff Garzik /* skip EH if possible. */ 33330260731fSTejun Heo if (ata_eh_skip_recovery(link)) 3334c6fd2807SJeff Garzik ehc->i.action = 0; 3335c6fd2807SJeff Garzik 33361eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 3337f58229f8STejun Heo ehc->classes[dev->devno] = ATA_DEV_UNKNOWN; 33389b1e2658STejun Heo } 3339c6fd2807SJeff Garzik 3340c6fd2807SJeff Garzik /* reset */ 33411eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 33429b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 33439b1e2658STejun Heo 3344cf480626STejun Heo if (!(ehc->i.action & ATA_EH_RESET)) 33459b1e2658STejun Heo continue; 33469b1e2658STejun Heo 33479b1e2658STejun Heo rc = ata_eh_reset(link, ata_link_nr_vacant(link), 3348dc98c32cSTejun Heo prereset, softreset, hardreset, postreset); 3349c6fd2807SJeff Garzik if (rc) { 33500260731fSTejun Heo ata_link_printk(link, KERN_ERR, 3351c6fd2807SJeff Garzik "reset failed, giving up\n"); 3352c6fd2807SJeff Garzik goto out; 3353c6fd2807SJeff Garzik } 33549b1e2658STejun Heo } 3355c6fd2807SJeff Garzik 335645fabbb7SElias Oltmanns do { 335745fabbb7SElias Oltmanns unsigned long now; 335845fabbb7SElias Oltmanns 335945fabbb7SElias Oltmanns /* 336045fabbb7SElias Oltmanns * clears ATA_EH_PARK in eh_info and resets 336145fabbb7SElias Oltmanns * ap->park_req_pending 336245fabbb7SElias Oltmanns */ 336345fabbb7SElias Oltmanns ata_eh_pull_park_action(ap); 336445fabbb7SElias Oltmanns 336545fabbb7SElias Oltmanns deadline = jiffies; 33661eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 33671eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 336845fabbb7SElias Oltmanns struct ata_eh_context *ehc = &link->eh_context; 336945fabbb7SElias Oltmanns unsigned long tmp; 337045fabbb7SElias Oltmanns 337145fabbb7SElias Oltmanns if (dev->class != ATA_DEV_ATA) 337245fabbb7SElias Oltmanns continue; 337345fabbb7SElias Oltmanns if (!(ehc->i.dev_action[dev->devno] & 337445fabbb7SElias Oltmanns ATA_EH_PARK)) 337545fabbb7SElias Oltmanns continue; 337645fabbb7SElias Oltmanns tmp = dev->unpark_deadline; 337745fabbb7SElias Oltmanns if (time_before(deadline, tmp)) 337845fabbb7SElias Oltmanns deadline = tmp; 337945fabbb7SElias Oltmanns else if (time_before_eq(tmp, jiffies)) 338045fabbb7SElias Oltmanns continue; 338145fabbb7SElias Oltmanns if (ehc->unloaded_mask & (1 << dev->devno)) 338245fabbb7SElias Oltmanns continue; 338345fabbb7SElias Oltmanns 338445fabbb7SElias Oltmanns ata_eh_park_issue_cmd(dev, 1); 338545fabbb7SElias Oltmanns } 338645fabbb7SElias Oltmanns } 338745fabbb7SElias Oltmanns 338845fabbb7SElias Oltmanns now = jiffies; 338945fabbb7SElias Oltmanns if (time_before_eq(deadline, now)) 339045fabbb7SElias Oltmanns break; 339145fabbb7SElias Oltmanns 339245fabbb7SElias Oltmanns deadline = wait_for_completion_timeout(&ap->park_req_pending, 339345fabbb7SElias Oltmanns deadline - now); 339445fabbb7SElias Oltmanns } while (deadline); 33951eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 33961eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 339745fabbb7SElias Oltmanns if (!(link->eh_context.unloaded_mask & 339845fabbb7SElias Oltmanns (1 << dev->devno))) 339945fabbb7SElias Oltmanns continue; 340045fabbb7SElias Oltmanns 340145fabbb7SElias Oltmanns ata_eh_park_issue_cmd(dev, 0); 340245fabbb7SElias Oltmanns ata_eh_done(link, dev, ATA_EH_PARK); 340345fabbb7SElias Oltmanns } 340445fabbb7SElias Oltmanns } 340545fabbb7SElias Oltmanns 34069b1e2658STejun Heo /* the rest */ 34071eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 34089b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 34099b1e2658STejun Heo 3410c6fd2807SJeff Garzik /* revalidate existing devices and attach new ones */ 34110260731fSTejun Heo rc = ata_eh_revalidate_and_attach(link, &dev); 3412c6fd2807SJeff Garzik if (rc) 3413c6fd2807SJeff Garzik goto dev_fail; 3414c6fd2807SJeff Garzik 3415633273a3STejun Heo /* if PMP got attached, return, pmp EH will take care of it */ 3416633273a3STejun Heo if (link->device->class == ATA_DEV_PMP) { 3417633273a3STejun Heo ehc->i.action = 0; 3418633273a3STejun Heo return 0; 3419633273a3STejun Heo } 3420633273a3STejun Heo 3421baa1e78aSTejun Heo /* configure transfer mode if necessary */ 3422baa1e78aSTejun Heo if (ehc->i.flags & ATA_EHI_SETMODE) { 34230260731fSTejun Heo rc = ata_set_mode(link, &dev); 34244ae72a1eSTejun Heo if (rc) 3425c6fd2807SJeff Garzik goto dev_fail; 3426baa1e78aSTejun Heo ehc->i.flags &= ~ATA_EHI_SETMODE; 3427c6fd2807SJeff Garzik } 3428c6fd2807SJeff Garzik 342911fc33daSTejun Heo /* If reset has been issued, clear UA to avoid 343011fc33daSTejun Heo * disrupting the current users of the device. 343111fc33daSTejun Heo */ 343211fc33daSTejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) { 34331eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 343411fc33daSTejun Heo if (dev->class != ATA_DEV_ATAPI) 343511fc33daSTejun Heo continue; 343611fc33daSTejun Heo rc = atapi_eh_clear_ua(dev); 343711fc33daSTejun Heo if (rc) 343811fc33daSTejun Heo goto dev_fail; 343911fc33daSTejun Heo } 344011fc33daSTejun Heo } 344111fc33daSTejun Heo 344211fc33daSTejun Heo /* configure link power saving */ 34433ec25ebdSTejun Heo if (ehc->i.action & ATA_EH_LPM) 34441eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 3445ca77329fSKristen Carlson Accardi ata_dev_enable_pm(dev, ap->pm_policy); 3446ca77329fSKristen Carlson Accardi 34479b1e2658STejun Heo /* this link is okay now */ 34489b1e2658STejun Heo ehc->i.flags = 0; 34499b1e2658STejun Heo continue; 3450c6fd2807SJeff Garzik 3451c6fd2807SJeff Garzik dev_fail: 34529b1e2658STejun Heo nr_failed_devs++; 34530a2c0f56STejun Heo ata_eh_handle_dev_fail(dev, rc); 3454c6fd2807SJeff Garzik 3455b06ce3e5STejun Heo if (ap->pflags & ATA_PFLAG_FROZEN) { 3456b06ce3e5STejun Heo /* PMP reset requires working host port. 3457b06ce3e5STejun Heo * Can't retry if it's frozen. 3458b06ce3e5STejun Heo */ 3459071f44b1STejun Heo if (sata_pmp_attached(ap)) 3460b06ce3e5STejun Heo goto out; 34619b1e2658STejun Heo break; 34629b1e2658STejun Heo } 3463b06ce3e5STejun Heo } 34649b1e2658STejun Heo 34650a2c0f56STejun Heo if (nr_failed_devs) 3466c6fd2807SJeff Garzik goto retry; 3467c6fd2807SJeff Garzik 3468c6fd2807SJeff Garzik out: 34699b1e2658STejun Heo if (rc && r_failed_link) 34709b1e2658STejun Heo *r_failed_link = link; 3471c6fd2807SJeff Garzik 3472c6fd2807SJeff Garzik DPRINTK("EXIT, rc=%d\n", rc); 3473c6fd2807SJeff Garzik return rc; 3474c6fd2807SJeff Garzik } 3475c6fd2807SJeff Garzik 3476c6fd2807SJeff Garzik /** 3477c6fd2807SJeff Garzik * ata_eh_finish - finish up EH 3478c6fd2807SJeff Garzik * @ap: host port to finish EH for 3479c6fd2807SJeff Garzik * 3480c6fd2807SJeff Garzik * Recovery is complete. Clean up EH states and retry or finish 3481c6fd2807SJeff Garzik * failed qcs. 3482c6fd2807SJeff Garzik * 3483c6fd2807SJeff Garzik * LOCKING: 3484c6fd2807SJeff Garzik * None. 3485c6fd2807SJeff Garzik */ 3486fb7fd614STejun Heo void ata_eh_finish(struct ata_port *ap) 3487c6fd2807SJeff Garzik { 3488c6fd2807SJeff Garzik int tag; 3489c6fd2807SJeff Garzik 3490c6fd2807SJeff Garzik /* retry or finish qcs */ 3491c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 3492c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 3493c6fd2807SJeff Garzik 3494c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) 3495c6fd2807SJeff Garzik continue; 3496c6fd2807SJeff Garzik 3497c6fd2807SJeff Garzik if (qc->err_mask) { 3498c6fd2807SJeff Garzik /* FIXME: Once EH migration is complete, 3499c6fd2807SJeff Garzik * generate sense data in this function, 3500c6fd2807SJeff Garzik * considering both err_mask and tf. 3501c6fd2807SJeff Garzik */ 350203faab78STejun Heo if (qc->flags & ATA_QCFLAG_RETRY) 3503c6fd2807SJeff Garzik ata_eh_qc_retry(qc); 350403faab78STejun Heo else 350503faab78STejun Heo ata_eh_qc_complete(qc); 3506c6fd2807SJeff Garzik } else { 3507c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_SENSE_VALID) { 3508c6fd2807SJeff Garzik ata_eh_qc_complete(qc); 3509c6fd2807SJeff Garzik } else { 3510c6fd2807SJeff Garzik /* feed zero TF to sense generation */ 3511c6fd2807SJeff Garzik memset(&qc->result_tf, 0, sizeof(qc->result_tf)); 3512c6fd2807SJeff Garzik ata_eh_qc_retry(qc); 3513c6fd2807SJeff Garzik } 3514c6fd2807SJeff Garzik } 3515c6fd2807SJeff Garzik } 3516da917d69STejun Heo 3517da917d69STejun Heo /* make sure nr_active_links is zero after EH */ 3518da917d69STejun Heo WARN_ON(ap->nr_active_links); 3519da917d69STejun Heo ap->nr_active_links = 0; 3520c6fd2807SJeff Garzik } 3521c6fd2807SJeff Garzik 3522c6fd2807SJeff Garzik /** 3523c6fd2807SJeff Garzik * ata_do_eh - do standard error handling 3524c6fd2807SJeff Garzik * @ap: host port to handle error for 3525a1efdabaSTejun Heo * 3526c6fd2807SJeff Garzik * @prereset: prereset method (can be NULL) 3527c6fd2807SJeff Garzik * @softreset: softreset method (can be NULL) 3528c6fd2807SJeff Garzik * @hardreset: hardreset method (can be NULL) 3529c6fd2807SJeff Garzik * @postreset: postreset method (can be NULL) 3530c6fd2807SJeff Garzik * 3531c6fd2807SJeff Garzik * Perform standard error handling sequence. 3532c6fd2807SJeff Garzik * 3533c6fd2807SJeff Garzik * LOCKING: 3534c6fd2807SJeff Garzik * Kernel thread context (may sleep). 3535c6fd2807SJeff Garzik */ 3536c6fd2807SJeff Garzik void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, 3537c6fd2807SJeff Garzik ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 3538c6fd2807SJeff Garzik ata_postreset_fn_t postreset) 3539c6fd2807SJeff Garzik { 35409b1e2658STejun Heo struct ata_device *dev; 35419b1e2658STejun Heo int rc; 35429b1e2658STejun Heo 35439b1e2658STejun Heo ata_eh_autopsy(ap); 35449b1e2658STejun Heo ata_eh_report(ap); 35459b1e2658STejun Heo 35469b1e2658STejun Heo rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset, 35479b1e2658STejun Heo NULL); 35489b1e2658STejun Heo if (rc) { 35491eca4365STejun Heo ata_for_each_dev(dev, &ap->link, ALL) 35509b1e2658STejun Heo ata_dev_disable(dev); 35519b1e2658STejun Heo } 35529b1e2658STejun Heo 3553c6fd2807SJeff Garzik ata_eh_finish(ap); 3554c6fd2807SJeff Garzik } 3555c6fd2807SJeff Garzik 3556a1efdabaSTejun Heo /** 3557a1efdabaSTejun Heo * ata_std_error_handler - standard error handler 3558a1efdabaSTejun Heo * @ap: host port to handle error for 3559a1efdabaSTejun Heo * 3560a1efdabaSTejun Heo * Standard error handler 3561a1efdabaSTejun Heo * 3562a1efdabaSTejun Heo * LOCKING: 3563a1efdabaSTejun Heo * Kernel thread context (may sleep). 3564a1efdabaSTejun Heo */ 3565a1efdabaSTejun Heo void ata_std_error_handler(struct ata_port *ap) 3566a1efdabaSTejun Heo { 3567a1efdabaSTejun Heo struct ata_port_operations *ops = ap->ops; 3568a1efdabaSTejun Heo ata_reset_fn_t hardreset = ops->hardreset; 3569a1efdabaSTejun Heo 357057c9efdfSTejun Heo /* ignore built-in hardreset if SCR access is not available */ 357157c9efdfSTejun Heo if (ata_is_builtin_hardreset(hardreset) && !sata_scr_valid(&ap->link)) 3572a1efdabaSTejun Heo hardreset = NULL; 3573a1efdabaSTejun Heo 3574a1efdabaSTejun Heo ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset); 3575a1efdabaSTejun Heo } 3576a1efdabaSTejun Heo 35776ffa01d8STejun Heo #ifdef CONFIG_PM 3578c6fd2807SJeff Garzik /** 3579c6fd2807SJeff Garzik * ata_eh_handle_port_suspend - perform port suspend operation 3580c6fd2807SJeff Garzik * @ap: port to suspend 3581c6fd2807SJeff Garzik * 3582c6fd2807SJeff Garzik * Suspend @ap. 3583c6fd2807SJeff Garzik * 3584c6fd2807SJeff Garzik * LOCKING: 3585c6fd2807SJeff Garzik * Kernel thread context (may sleep). 3586c6fd2807SJeff Garzik */ 3587c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap) 3588c6fd2807SJeff Garzik { 3589c6fd2807SJeff Garzik unsigned long flags; 3590c6fd2807SJeff Garzik int rc = 0; 3591c6fd2807SJeff Garzik 3592c6fd2807SJeff Garzik /* are we suspending? */ 3593c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 3594c6fd2807SJeff Garzik if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || 3595c6fd2807SJeff Garzik ap->pm_mesg.event == PM_EVENT_ON) { 3596c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3597c6fd2807SJeff Garzik return; 3598c6fd2807SJeff Garzik } 3599c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3600c6fd2807SJeff Garzik 3601c6fd2807SJeff Garzik WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED); 3602c6fd2807SJeff Garzik 360364578a3dSTejun Heo /* tell ACPI we're suspending */ 360464578a3dSTejun Heo rc = ata_acpi_on_suspend(ap); 360564578a3dSTejun Heo if (rc) 360664578a3dSTejun Heo goto out; 360764578a3dSTejun Heo 3608c6fd2807SJeff Garzik /* suspend */ 3609c6fd2807SJeff Garzik ata_eh_freeze_port(ap); 3610c6fd2807SJeff Garzik 3611c6fd2807SJeff Garzik if (ap->ops->port_suspend) 3612c6fd2807SJeff Garzik rc = ap->ops->port_suspend(ap, ap->pm_mesg); 3613c6fd2807SJeff Garzik 3614bd3adca5SShaohua Li ata_acpi_set_state(ap, PMSG_SUSPEND); 361564578a3dSTejun Heo out: 3616c6fd2807SJeff Garzik /* report result */ 3617c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 3618c6fd2807SJeff Garzik 3619c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_PM_PENDING; 3620c6fd2807SJeff Garzik if (rc == 0) 3621c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SUSPENDED; 362264578a3dSTejun Heo else if (ap->pflags & ATA_PFLAG_FROZEN) 3623c6fd2807SJeff Garzik ata_port_schedule_eh(ap); 3624c6fd2807SJeff Garzik 3625c6fd2807SJeff Garzik if (ap->pm_result) { 3626c6fd2807SJeff Garzik *ap->pm_result = rc; 3627c6fd2807SJeff Garzik ap->pm_result = NULL; 3628c6fd2807SJeff Garzik } 3629c6fd2807SJeff Garzik 3630c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3631c6fd2807SJeff Garzik 3632c6fd2807SJeff Garzik return; 3633c6fd2807SJeff Garzik } 3634c6fd2807SJeff Garzik 3635c6fd2807SJeff Garzik /** 3636c6fd2807SJeff Garzik * ata_eh_handle_port_resume - perform port resume operation 3637c6fd2807SJeff Garzik * @ap: port to resume 3638c6fd2807SJeff Garzik * 3639c6fd2807SJeff Garzik * Resume @ap. 3640c6fd2807SJeff Garzik * 3641c6fd2807SJeff Garzik * LOCKING: 3642c6fd2807SJeff Garzik * Kernel thread context (may sleep). 3643c6fd2807SJeff Garzik */ 3644c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap) 3645c6fd2807SJeff Garzik { 36466f9c1ea2STejun Heo struct ata_link *link; 36476f9c1ea2STejun Heo struct ata_device *dev; 3648c6fd2807SJeff Garzik unsigned long flags; 36499666f400STejun Heo int rc = 0; 3650c6fd2807SJeff Garzik 3651c6fd2807SJeff Garzik /* are we resuming? */ 3652c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 3653c6fd2807SJeff Garzik if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || 3654c6fd2807SJeff Garzik ap->pm_mesg.event != PM_EVENT_ON) { 3655c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3656c6fd2807SJeff Garzik return; 3657c6fd2807SJeff Garzik } 3658c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3659c6fd2807SJeff Garzik 36609666f400STejun Heo WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED)); 3661c6fd2807SJeff Garzik 36626f9c1ea2STejun Heo /* 36636f9c1ea2STejun Heo * Error timestamps are in jiffies which doesn't run while 36646f9c1ea2STejun Heo * suspended and PHY events during resume isn't too uncommon. 36656f9c1ea2STejun Heo * When the two are combined, it can lead to unnecessary speed 36666f9c1ea2STejun Heo * downs if the machine is suspended and resumed repeatedly. 36676f9c1ea2STejun Heo * Clear error history. 36686f9c1ea2STejun Heo */ 36696f9c1ea2STejun Heo ata_for_each_link(link, ap, HOST_FIRST) 36706f9c1ea2STejun Heo ata_for_each_dev(dev, link, ALL) 36716f9c1ea2STejun Heo ata_ering_clear(&dev->ering); 36726f9c1ea2STejun Heo 3673bd3adca5SShaohua Li ata_acpi_set_state(ap, PMSG_ON); 3674bd3adca5SShaohua Li 3675c6fd2807SJeff Garzik if (ap->ops->port_resume) 3676c6fd2807SJeff Garzik rc = ap->ops->port_resume(ap); 3677c6fd2807SJeff Garzik 36786746544cSTejun Heo /* tell ACPI that we're resuming */ 36796746544cSTejun Heo ata_acpi_on_resume(ap); 36806746544cSTejun Heo 36819666f400STejun Heo /* report result */ 3682c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 3683c6fd2807SJeff Garzik ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED); 3684c6fd2807SJeff Garzik if (ap->pm_result) { 3685c6fd2807SJeff Garzik *ap->pm_result = rc; 3686c6fd2807SJeff Garzik ap->pm_result = NULL; 3687c6fd2807SJeff Garzik } 3688c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3689c6fd2807SJeff Garzik } 36906ffa01d8STejun Heo #endif /* CONFIG_PM */ 3691