1c6fd2807SJeff Garzik /* 2c6fd2807SJeff Garzik * libata-eh.c - libata error handling 3c6fd2807SJeff Garzik * 4c6fd2807SJeff Garzik * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5c6fd2807SJeff Garzik * Please ALWAYS copy linux-ide@vger.kernel.org 6c6fd2807SJeff Garzik * on emails. 7c6fd2807SJeff Garzik * 8c6fd2807SJeff Garzik * Copyright 2006 Tejun Heo <htejun@gmail.com> 9c6fd2807SJeff Garzik * 10c6fd2807SJeff Garzik * 11c6fd2807SJeff Garzik * This program is free software; you can redistribute it and/or 12c6fd2807SJeff Garzik * modify it under the terms of the GNU General Public License as 13c6fd2807SJeff Garzik * published by the Free Software Foundation; either version 2, or 14c6fd2807SJeff Garzik * (at your option) any later version. 15c6fd2807SJeff Garzik * 16c6fd2807SJeff Garzik * This program is distributed in the hope that it will be useful, 17c6fd2807SJeff Garzik * but WITHOUT ANY WARRANTY; without even the implied warranty of 18c6fd2807SJeff Garzik * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19c6fd2807SJeff Garzik * General Public License for more details. 20c6fd2807SJeff Garzik * 21c6fd2807SJeff Garzik * You should have received a copy of the GNU General Public License 22c6fd2807SJeff Garzik * along with this program; see the file COPYING. If not, write to 23c6fd2807SJeff Garzik * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, 24c6fd2807SJeff Garzik * USA. 25c6fd2807SJeff Garzik * 26c6fd2807SJeff Garzik * 27c6fd2807SJeff Garzik * libata documentation is available via 'make {ps|pdf}docs', 28c6fd2807SJeff Garzik * as Documentation/DocBook/libata.* 29c6fd2807SJeff Garzik * 30c6fd2807SJeff Garzik * Hardware documentation available from http://www.t13.org/ and 31c6fd2807SJeff Garzik * http://www.sata-io.org/ 32c6fd2807SJeff Garzik * 33c6fd2807SJeff Garzik */ 34c6fd2807SJeff Garzik 35c6fd2807SJeff Garzik #include <linux/kernel.h> 36242f9dcbSJens Axboe #include <linux/blkdev.h> 372855568bSJeff Garzik #include <linux/pci.h> 38c6fd2807SJeff Garzik #include <scsi/scsi.h> 39c6fd2807SJeff Garzik #include <scsi/scsi_host.h> 40c6fd2807SJeff Garzik #include <scsi/scsi_eh.h> 41c6fd2807SJeff Garzik #include <scsi/scsi_device.h> 42c6fd2807SJeff Garzik #include <scsi/scsi_cmnd.h> 43c6fd2807SJeff Garzik #include "../scsi/scsi_transport_api.h" 44c6fd2807SJeff Garzik 45c6fd2807SJeff Garzik #include <linux/libata.h> 46c6fd2807SJeff Garzik 47c6fd2807SJeff Garzik #include "libata.h" 48c6fd2807SJeff Garzik 497d47e8d4STejun Heo enum { 503884f7b0STejun Heo /* speed down verdicts */ 517d47e8d4STejun Heo ATA_EH_SPDN_NCQ_OFF = (1 << 0), 527d47e8d4STejun Heo ATA_EH_SPDN_SPEED_DOWN = (1 << 1), 537d47e8d4STejun Heo ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2), 5476326ac1STejun Heo ATA_EH_SPDN_KEEP_ERRORS = (1 << 3), 553884f7b0STejun Heo 563884f7b0STejun Heo /* error flags */ 573884f7b0STejun Heo ATA_EFLAG_IS_IO = (1 << 0), 5876326ac1STejun Heo ATA_EFLAG_DUBIOUS_XFER = (1 << 1), 593884f7b0STejun Heo 603884f7b0STejun Heo /* error categories */ 613884f7b0STejun Heo ATA_ECAT_NONE = 0, 623884f7b0STejun Heo ATA_ECAT_ATA_BUS = 1, 633884f7b0STejun Heo ATA_ECAT_TOUT_HSM = 2, 643884f7b0STejun Heo ATA_ECAT_UNK_DEV = 3, 6575f9cafcSTejun Heo ATA_ECAT_DUBIOUS_NONE = 4, 6675f9cafcSTejun Heo ATA_ECAT_DUBIOUS_ATA_BUS = 5, 6775f9cafcSTejun Heo ATA_ECAT_DUBIOUS_TOUT_HSM = 6, 6875f9cafcSTejun Heo ATA_ECAT_DUBIOUS_UNK_DEV = 7, 6975f9cafcSTejun Heo ATA_ECAT_NR = 8, 707d47e8d4STejun Heo 7187fbc5a0STejun Heo ATA_EH_CMD_DFL_TIMEOUT = 5000, 7287fbc5a0STejun Heo 730a2c0f56STejun Heo /* always put at least this amount of time between resets */ 740a2c0f56STejun Heo ATA_EH_RESET_COOL_DOWN = 5000, 750a2c0f56STejun Heo 76341c2c95STejun Heo /* Waiting in ->prereset can never be reliable. It's 77341c2c95STejun Heo * sometimes nice to wait there but it can't be depended upon; 78341c2c95STejun Heo * otherwise, we wouldn't be resetting. Just give it enough 79341c2c95STejun Heo * time for most drives to spin up. 8031daabdaSTejun Heo */ 81341c2c95STejun Heo ATA_EH_PRERESET_TIMEOUT = 10000, 82341c2c95STejun Heo ATA_EH_FASTDRAIN_INTERVAL = 3000, 8311fc33daSTejun Heo 8411fc33daSTejun Heo ATA_EH_UA_TRIES = 5, 85c2c7a89cSTejun Heo 86c2c7a89cSTejun Heo /* probe speed down parameters, see ata_eh_schedule_probe() */ 87c2c7a89cSTejun Heo ATA_EH_PROBE_TRIAL_INTERVAL = 60000, /* 1 min */ 88c2c7a89cSTejun Heo ATA_EH_PROBE_TRIALS = 2, 8931daabdaSTejun Heo }; 9031daabdaSTejun Heo 9131daabdaSTejun Heo /* The following table determines how we sequence resets. Each entry 9231daabdaSTejun Heo * represents timeout for that try. The first try can be soft or 9331daabdaSTejun Heo * hardreset. All others are hardreset if available. In most cases 9431daabdaSTejun Heo * the first reset w/ 10sec timeout should succeed. Following entries 9531daabdaSTejun Heo * are mostly for error handling, hotplug and retarded devices. 9631daabdaSTejun Heo */ 9731daabdaSTejun Heo static const unsigned long ata_eh_reset_timeouts[] = { 98341c2c95STejun Heo 10000, /* most drives spin up by 10sec */ 99341c2c95STejun Heo 10000, /* > 99% working drives spin up before 20sec */ 100341c2c95STejun Heo 35000, /* give > 30 secs of idleness for retarded devices */ 101341c2c95STejun Heo 5000, /* and sweet one last chance */ 102d8af0eb6STejun Heo ULONG_MAX, /* > 1 min has elapsed, give up */ 10331daabdaSTejun Heo }; 10431daabdaSTejun Heo 10587fbc5a0STejun Heo static const unsigned long ata_eh_identify_timeouts[] = { 10687fbc5a0STejun Heo 5000, /* covers > 99% of successes and not too boring on failures */ 10787fbc5a0STejun Heo 10000, /* combined time till here is enough even for media access */ 10887fbc5a0STejun Heo 30000, /* for true idiots */ 10987fbc5a0STejun Heo ULONG_MAX, 11087fbc5a0STejun Heo }; 11187fbc5a0STejun Heo 11287fbc5a0STejun Heo static const unsigned long ata_eh_other_timeouts[] = { 11387fbc5a0STejun Heo 5000, /* same rationale as identify timeout */ 11487fbc5a0STejun Heo 10000, /* ditto */ 11587fbc5a0STejun Heo /* but no merciful 30sec for other commands, it just isn't worth it */ 11687fbc5a0STejun Heo ULONG_MAX, 11787fbc5a0STejun Heo }; 11887fbc5a0STejun Heo 11987fbc5a0STejun Heo struct ata_eh_cmd_timeout_ent { 12087fbc5a0STejun Heo const u8 *commands; 12187fbc5a0STejun Heo const unsigned long *timeouts; 12287fbc5a0STejun Heo }; 12387fbc5a0STejun Heo 12487fbc5a0STejun Heo /* The following table determines timeouts to use for EH internal 12587fbc5a0STejun Heo * commands. Each table entry is a command class and matches the 12687fbc5a0STejun Heo * commands the entry applies to and the timeout table to use. 12787fbc5a0STejun Heo * 12887fbc5a0STejun Heo * On the retry after a command timed out, the next timeout value from 12987fbc5a0STejun Heo * the table is used. If the table doesn't contain further entries, 13087fbc5a0STejun Heo * the last value is used. 13187fbc5a0STejun Heo * 13287fbc5a0STejun Heo * ehc->cmd_timeout_idx keeps track of which timeout to use per 13387fbc5a0STejun Heo * command class, so if SET_FEATURES times out on the first try, the 13487fbc5a0STejun Heo * next try will use the second timeout value only for that class. 13587fbc5a0STejun Heo */ 13687fbc5a0STejun Heo #define CMDS(cmds...) (const u8 []){ cmds, 0 } 13787fbc5a0STejun Heo static const struct ata_eh_cmd_timeout_ent 13887fbc5a0STejun Heo ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = { 13987fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI), 14087fbc5a0STejun Heo .timeouts = ata_eh_identify_timeouts, }, 14187fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT), 14287fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 14387fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT), 14487fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 14587fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_SET_FEATURES), 14687fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 14787fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS), 14887fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 14987fbc5a0STejun Heo }; 15087fbc5a0STejun Heo #undef CMDS 15187fbc5a0STejun Heo 152c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap); 1536ffa01d8STejun Heo #ifdef CONFIG_PM 154c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap); 155c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap); 1566ffa01d8STejun Heo #else /* CONFIG_PM */ 1576ffa01d8STejun Heo static void ata_eh_handle_port_suspend(struct ata_port *ap) 1586ffa01d8STejun Heo { } 1596ffa01d8STejun Heo 1606ffa01d8STejun Heo static void ata_eh_handle_port_resume(struct ata_port *ap) 1616ffa01d8STejun Heo { } 1626ffa01d8STejun Heo #endif /* CONFIG_PM */ 163c6fd2807SJeff Garzik 164b64bbc39STejun Heo static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt, 165b64bbc39STejun Heo va_list args) 166b64bbc39STejun Heo { 167b64bbc39STejun Heo ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len, 168b64bbc39STejun Heo ATA_EH_DESC_LEN - ehi->desc_len, 169b64bbc39STejun Heo fmt, args); 170b64bbc39STejun Heo } 171b64bbc39STejun Heo 172b64bbc39STejun Heo /** 173b64bbc39STejun Heo * __ata_ehi_push_desc - push error description without adding separator 174b64bbc39STejun Heo * @ehi: target EHI 175b64bbc39STejun Heo * @fmt: printf format string 176b64bbc39STejun Heo * 177b64bbc39STejun Heo * Format string according to @fmt and append it to @ehi->desc. 178b64bbc39STejun Heo * 179b64bbc39STejun Heo * LOCKING: 180b64bbc39STejun Heo * spin_lock_irqsave(host lock) 181b64bbc39STejun Heo */ 182b64bbc39STejun Heo void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) 183b64bbc39STejun Heo { 184b64bbc39STejun Heo va_list args; 185b64bbc39STejun Heo 186b64bbc39STejun Heo va_start(args, fmt); 187b64bbc39STejun Heo __ata_ehi_pushv_desc(ehi, fmt, args); 188b64bbc39STejun Heo va_end(args); 189b64bbc39STejun Heo } 190b64bbc39STejun Heo 191b64bbc39STejun Heo /** 192b64bbc39STejun Heo * ata_ehi_push_desc - push error description with separator 193b64bbc39STejun Heo * @ehi: target EHI 194b64bbc39STejun Heo * @fmt: printf format string 195b64bbc39STejun Heo * 196b64bbc39STejun Heo * Format string according to @fmt and append it to @ehi->desc. 197b64bbc39STejun Heo * If @ehi->desc is not empty, ", " is added in-between. 198b64bbc39STejun Heo * 199b64bbc39STejun Heo * LOCKING: 200b64bbc39STejun Heo * spin_lock_irqsave(host lock) 201b64bbc39STejun Heo */ 202b64bbc39STejun Heo void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) 203b64bbc39STejun Heo { 204b64bbc39STejun Heo va_list args; 205b64bbc39STejun Heo 206b64bbc39STejun Heo if (ehi->desc_len) 207b64bbc39STejun Heo __ata_ehi_push_desc(ehi, ", "); 208b64bbc39STejun Heo 209b64bbc39STejun Heo va_start(args, fmt); 210b64bbc39STejun Heo __ata_ehi_pushv_desc(ehi, fmt, args); 211b64bbc39STejun Heo va_end(args); 212b64bbc39STejun Heo } 213b64bbc39STejun Heo 214b64bbc39STejun Heo /** 215b64bbc39STejun Heo * ata_ehi_clear_desc - clean error description 216b64bbc39STejun Heo * @ehi: target EHI 217b64bbc39STejun Heo * 218b64bbc39STejun Heo * Clear @ehi->desc. 219b64bbc39STejun Heo * 220b64bbc39STejun Heo * LOCKING: 221b64bbc39STejun Heo * spin_lock_irqsave(host lock) 222b64bbc39STejun Heo */ 223b64bbc39STejun Heo void ata_ehi_clear_desc(struct ata_eh_info *ehi) 224b64bbc39STejun Heo { 225b64bbc39STejun Heo ehi->desc[0] = '\0'; 226b64bbc39STejun Heo ehi->desc_len = 0; 227b64bbc39STejun Heo } 228b64bbc39STejun Heo 229cbcdd875STejun Heo /** 230cbcdd875STejun Heo * ata_port_desc - append port description 231cbcdd875STejun Heo * @ap: target ATA port 232cbcdd875STejun Heo * @fmt: printf format string 233cbcdd875STejun Heo * 234cbcdd875STejun Heo * Format string according to @fmt and append it to port 235cbcdd875STejun Heo * description. If port description is not empty, " " is added 236cbcdd875STejun Heo * in-between. This function is to be used while initializing 237cbcdd875STejun Heo * ata_host. The description is printed on host registration. 238cbcdd875STejun Heo * 239cbcdd875STejun Heo * LOCKING: 240cbcdd875STejun Heo * None. 241cbcdd875STejun Heo */ 242cbcdd875STejun Heo void ata_port_desc(struct ata_port *ap, const char *fmt, ...) 243cbcdd875STejun Heo { 244cbcdd875STejun Heo va_list args; 245cbcdd875STejun Heo 246cbcdd875STejun Heo WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING)); 247cbcdd875STejun Heo 248cbcdd875STejun Heo if (ap->link.eh_info.desc_len) 249cbcdd875STejun Heo __ata_ehi_push_desc(&ap->link.eh_info, " "); 250cbcdd875STejun Heo 251cbcdd875STejun Heo va_start(args, fmt); 252cbcdd875STejun Heo __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args); 253cbcdd875STejun Heo va_end(args); 254cbcdd875STejun Heo } 255cbcdd875STejun Heo 256cbcdd875STejun Heo #ifdef CONFIG_PCI 257cbcdd875STejun Heo 258cbcdd875STejun Heo /** 259cbcdd875STejun Heo * ata_port_pbar_desc - append PCI BAR description 260cbcdd875STejun Heo * @ap: target ATA port 261cbcdd875STejun Heo * @bar: target PCI BAR 262cbcdd875STejun Heo * @offset: offset into PCI BAR 263cbcdd875STejun Heo * @name: name of the area 264cbcdd875STejun Heo * 265cbcdd875STejun Heo * If @offset is negative, this function formats a string which 266cbcdd875STejun Heo * contains the name, address, size and type of the BAR and 267cbcdd875STejun Heo * appends it to the port description. If @offset is zero or 268cbcdd875STejun Heo * positive, only name and offsetted address is appended. 269cbcdd875STejun Heo * 270cbcdd875STejun Heo * LOCKING: 271cbcdd875STejun Heo * None. 272cbcdd875STejun Heo */ 273cbcdd875STejun Heo void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset, 274cbcdd875STejun Heo const char *name) 275cbcdd875STejun Heo { 276cbcdd875STejun Heo struct pci_dev *pdev = to_pci_dev(ap->host->dev); 277cbcdd875STejun Heo char *type = ""; 278cbcdd875STejun Heo unsigned long long start, len; 279cbcdd875STejun Heo 280cbcdd875STejun Heo if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) 281cbcdd875STejun Heo type = "m"; 282cbcdd875STejun Heo else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) 283cbcdd875STejun Heo type = "i"; 284cbcdd875STejun Heo 285cbcdd875STejun Heo start = (unsigned long long)pci_resource_start(pdev, bar); 286cbcdd875STejun Heo len = (unsigned long long)pci_resource_len(pdev, bar); 287cbcdd875STejun Heo 288cbcdd875STejun Heo if (offset < 0) 289cbcdd875STejun Heo ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start); 290cbcdd875STejun Heo else 291e6a73ab1SAndrew Morton ata_port_desc(ap, "%s 0x%llx", name, 292e6a73ab1SAndrew Morton start + (unsigned long long)offset); 293cbcdd875STejun Heo } 294cbcdd875STejun Heo 295cbcdd875STejun Heo #endif /* CONFIG_PCI */ 296cbcdd875STejun Heo 29787fbc5a0STejun Heo static int ata_lookup_timeout_table(u8 cmd) 29887fbc5a0STejun Heo { 29987fbc5a0STejun Heo int i; 30087fbc5a0STejun Heo 30187fbc5a0STejun Heo for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) { 30287fbc5a0STejun Heo const u8 *cur; 30387fbc5a0STejun Heo 30487fbc5a0STejun Heo for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++) 30587fbc5a0STejun Heo if (*cur == cmd) 30687fbc5a0STejun Heo return i; 30787fbc5a0STejun Heo } 30887fbc5a0STejun Heo 30987fbc5a0STejun Heo return -1; 31087fbc5a0STejun Heo } 31187fbc5a0STejun Heo 31287fbc5a0STejun Heo /** 31387fbc5a0STejun Heo * ata_internal_cmd_timeout - determine timeout for an internal command 31487fbc5a0STejun Heo * @dev: target device 31587fbc5a0STejun Heo * @cmd: internal command to be issued 31687fbc5a0STejun Heo * 31787fbc5a0STejun Heo * Determine timeout for internal command @cmd for @dev. 31887fbc5a0STejun Heo * 31987fbc5a0STejun Heo * LOCKING: 32087fbc5a0STejun Heo * EH context. 32187fbc5a0STejun Heo * 32287fbc5a0STejun Heo * RETURNS: 32387fbc5a0STejun Heo * Determined timeout. 32487fbc5a0STejun Heo */ 32587fbc5a0STejun Heo unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd) 32687fbc5a0STejun Heo { 32787fbc5a0STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 32887fbc5a0STejun Heo int ent = ata_lookup_timeout_table(cmd); 32987fbc5a0STejun Heo int idx; 33087fbc5a0STejun Heo 33187fbc5a0STejun Heo if (ent < 0) 33287fbc5a0STejun Heo return ATA_EH_CMD_DFL_TIMEOUT; 33387fbc5a0STejun Heo 33487fbc5a0STejun Heo idx = ehc->cmd_timeout_idx[dev->devno][ent]; 33587fbc5a0STejun Heo return ata_eh_cmd_timeout_table[ent].timeouts[idx]; 33687fbc5a0STejun Heo } 33787fbc5a0STejun Heo 33887fbc5a0STejun Heo /** 33987fbc5a0STejun Heo * ata_internal_cmd_timed_out - notification for internal command timeout 34087fbc5a0STejun Heo * @dev: target device 34187fbc5a0STejun Heo * @cmd: internal command which timed out 34287fbc5a0STejun Heo * 34387fbc5a0STejun Heo * Notify EH that internal command @cmd for @dev timed out. This 34487fbc5a0STejun Heo * function should be called only for commands whose timeouts are 34587fbc5a0STejun Heo * determined using ata_internal_cmd_timeout(). 34687fbc5a0STejun Heo * 34787fbc5a0STejun Heo * LOCKING: 34887fbc5a0STejun Heo * EH context. 34987fbc5a0STejun Heo */ 35087fbc5a0STejun Heo void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd) 35187fbc5a0STejun Heo { 35287fbc5a0STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 35387fbc5a0STejun Heo int ent = ata_lookup_timeout_table(cmd); 35487fbc5a0STejun Heo int idx; 35587fbc5a0STejun Heo 35687fbc5a0STejun Heo if (ent < 0) 35787fbc5a0STejun Heo return; 35887fbc5a0STejun Heo 35987fbc5a0STejun Heo idx = ehc->cmd_timeout_idx[dev->devno][ent]; 36087fbc5a0STejun Heo if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX) 36187fbc5a0STejun Heo ehc->cmd_timeout_idx[dev->devno][ent]++; 36287fbc5a0STejun Heo } 36387fbc5a0STejun Heo 3643884f7b0STejun Heo static void ata_ering_record(struct ata_ering *ering, unsigned int eflags, 365c6fd2807SJeff Garzik unsigned int err_mask) 366c6fd2807SJeff Garzik { 367c6fd2807SJeff Garzik struct ata_ering_entry *ent; 368c6fd2807SJeff Garzik 369c6fd2807SJeff Garzik WARN_ON(!err_mask); 370c6fd2807SJeff Garzik 371c6fd2807SJeff Garzik ering->cursor++; 372c6fd2807SJeff Garzik ering->cursor %= ATA_ERING_SIZE; 373c6fd2807SJeff Garzik 374c6fd2807SJeff Garzik ent = &ering->ring[ering->cursor]; 3753884f7b0STejun Heo ent->eflags = eflags; 376c6fd2807SJeff Garzik ent->err_mask = err_mask; 377c6fd2807SJeff Garzik ent->timestamp = get_jiffies_64(); 378c6fd2807SJeff Garzik } 379c6fd2807SJeff Garzik 38076326ac1STejun Heo static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering) 38176326ac1STejun Heo { 38276326ac1STejun Heo struct ata_ering_entry *ent = &ering->ring[ering->cursor]; 38376326ac1STejun Heo 38476326ac1STejun Heo if (ent->err_mask) 38576326ac1STejun Heo return ent; 38676326ac1STejun Heo return NULL; 38776326ac1STejun Heo } 38876326ac1STejun Heo 3897d47e8d4STejun Heo static void ata_ering_clear(struct ata_ering *ering) 390c6fd2807SJeff Garzik { 3917d47e8d4STejun Heo memset(ering, 0, sizeof(*ering)); 392c6fd2807SJeff Garzik } 393c6fd2807SJeff Garzik 394c6fd2807SJeff Garzik static int ata_ering_map(struct ata_ering *ering, 395c6fd2807SJeff Garzik int (*map_fn)(struct ata_ering_entry *, void *), 396c6fd2807SJeff Garzik void *arg) 397c6fd2807SJeff Garzik { 398c6fd2807SJeff Garzik int idx, rc = 0; 399c6fd2807SJeff Garzik struct ata_ering_entry *ent; 400c6fd2807SJeff Garzik 401c6fd2807SJeff Garzik idx = ering->cursor; 402c6fd2807SJeff Garzik do { 403c6fd2807SJeff Garzik ent = &ering->ring[idx]; 404c6fd2807SJeff Garzik if (!ent->err_mask) 405c6fd2807SJeff Garzik break; 406c6fd2807SJeff Garzik rc = map_fn(ent, arg); 407c6fd2807SJeff Garzik if (rc) 408c6fd2807SJeff Garzik break; 409c6fd2807SJeff Garzik idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE; 410c6fd2807SJeff Garzik } while (idx != ering->cursor); 411c6fd2807SJeff Garzik 412c6fd2807SJeff Garzik return rc; 413c6fd2807SJeff Garzik } 414c6fd2807SJeff Garzik 415c6fd2807SJeff Garzik static unsigned int ata_eh_dev_action(struct ata_device *dev) 416c6fd2807SJeff Garzik { 4179af5c9c9STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 418c6fd2807SJeff Garzik 419c6fd2807SJeff Garzik return ehc->i.action | ehc->i.dev_action[dev->devno]; 420c6fd2807SJeff Garzik } 421c6fd2807SJeff Garzik 422f58229f8STejun Heo static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev, 423c6fd2807SJeff Garzik struct ata_eh_info *ehi, unsigned int action) 424c6fd2807SJeff Garzik { 425f58229f8STejun Heo struct ata_device *tdev; 426c6fd2807SJeff Garzik 427c6fd2807SJeff Garzik if (!dev) { 428c6fd2807SJeff Garzik ehi->action &= ~action; 4291eca4365STejun Heo ata_for_each_dev(tdev, link, ALL) 430f58229f8STejun Heo ehi->dev_action[tdev->devno] &= ~action; 431c6fd2807SJeff Garzik } else { 432c6fd2807SJeff Garzik /* doesn't make sense for port-wide EH actions */ 433c6fd2807SJeff Garzik WARN_ON(!(action & ATA_EH_PERDEV_MASK)); 434c6fd2807SJeff Garzik 435c6fd2807SJeff Garzik /* break ehi->action into ehi->dev_action */ 436c6fd2807SJeff Garzik if (ehi->action & action) { 4371eca4365STejun Heo ata_for_each_dev(tdev, link, ALL) 438f58229f8STejun Heo ehi->dev_action[tdev->devno] |= 439f58229f8STejun Heo ehi->action & action; 440c6fd2807SJeff Garzik ehi->action &= ~action; 441c6fd2807SJeff Garzik } 442c6fd2807SJeff Garzik 443c6fd2807SJeff Garzik /* turn off the specified per-dev action */ 444c6fd2807SJeff Garzik ehi->dev_action[dev->devno] &= ~action; 445c6fd2807SJeff Garzik } 446c6fd2807SJeff Garzik } 447c6fd2807SJeff Garzik 448c6fd2807SJeff Garzik /** 449c6fd2807SJeff Garzik * ata_scsi_timed_out - SCSI layer time out callback 450c6fd2807SJeff Garzik * @cmd: timed out SCSI command 451c6fd2807SJeff Garzik * 452c6fd2807SJeff Garzik * Handles SCSI layer timeout. We race with normal completion of 453c6fd2807SJeff Garzik * the qc for @cmd. If the qc is already gone, we lose and let 454c6fd2807SJeff Garzik * the scsi command finish (EH_HANDLED). Otherwise, the qc has 455c6fd2807SJeff Garzik * timed out and EH should be invoked. Prevent ata_qc_complete() 456c6fd2807SJeff Garzik * from finishing it by setting EH_SCHEDULED and return 457c6fd2807SJeff Garzik * EH_NOT_HANDLED. 458c6fd2807SJeff Garzik * 459c6fd2807SJeff Garzik * TODO: kill this function once old EH is gone. 460c6fd2807SJeff Garzik * 461c6fd2807SJeff Garzik * LOCKING: 462c6fd2807SJeff Garzik * Called from timer context 463c6fd2807SJeff Garzik * 464c6fd2807SJeff Garzik * RETURNS: 465c6fd2807SJeff Garzik * EH_HANDLED or EH_NOT_HANDLED 466c6fd2807SJeff Garzik */ 467242f9dcbSJens Axboe enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) 468c6fd2807SJeff Garzik { 469c6fd2807SJeff Garzik struct Scsi_Host *host = cmd->device->host; 470c6fd2807SJeff Garzik struct ata_port *ap = ata_shost_to_port(host); 471c6fd2807SJeff Garzik unsigned long flags; 472c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 473242f9dcbSJens Axboe enum blk_eh_timer_return ret; 474c6fd2807SJeff Garzik 475c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 476c6fd2807SJeff Garzik 477c6fd2807SJeff Garzik if (ap->ops->error_handler) { 478242f9dcbSJens Axboe ret = BLK_EH_NOT_HANDLED; 479c6fd2807SJeff Garzik goto out; 480c6fd2807SJeff Garzik } 481c6fd2807SJeff Garzik 482242f9dcbSJens Axboe ret = BLK_EH_HANDLED; 483c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 4849af5c9c9STejun Heo qc = ata_qc_from_tag(ap, ap->link.active_tag); 485c6fd2807SJeff Garzik if (qc) { 486c6fd2807SJeff Garzik WARN_ON(qc->scsicmd != cmd); 487c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_EH_SCHEDULED; 488c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_TIMEOUT; 489242f9dcbSJens Axboe ret = BLK_EH_NOT_HANDLED; 490c6fd2807SJeff Garzik } 491c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 492c6fd2807SJeff Garzik 493c6fd2807SJeff Garzik out: 494c6fd2807SJeff Garzik DPRINTK("EXIT, ret=%d\n", ret); 495c6fd2807SJeff Garzik return ret; 496c6fd2807SJeff Garzik } 497c6fd2807SJeff Garzik 498ece180d1STejun Heo static void ata_eh_unload(struct ata_port *ap) 499ece180d1STejun Heo { 500ece180d1STejun Heo struct ata_link *link; 501ece180d1STejun Heo struct ata_device *dev; 502ece180d1STejun Heo unsigned long flags; 503ece180d1STejun Heo 504ece180d1STejun Heo /* Restore SControl IPM and SPD for the next driver and 505ece180d1STejun Heo * disable attached devices. 506ece180d1STejun Heo */ 507ece180d1STejun Heo ata_for_each_link(link, ap, PMP_FIRST) { 508ece180d1STejun Heo sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0); 509ece180d1STejun Heo ata_for_each_dev(dev, link, ALL) 510ece180d1STejun Heo ata_dev_disable(dev); 511ece180d1STejun Heo } 512ece180d1STejun Heo 513ece180d1STejun Heo /* freeze and set UNLOADED */ 514ece180d1STejun Heo spin_lock_irqsave(ap->lock, flags); 515ece180d1STejun Heo 516ece180d1STejun Heo ata_port_freeze(ap); /* won't be thawed */ 517ece180d1STejun Heo ap->pflags &= ~ATA_PFLAG_EH_PENDING; /* clear pending from freeze */ 518ece180d1STejun Heo ap->pflags |= ATA_PFLAG_UNLOADED; 519ece180d1STejun Heo 520ece180d1STejun Heo spin_unlock_irqrestore(ap->lock, flags); 521ece180d1STejun Heo } 522ece180d1STejun Heo 523c6fd2807SJeff Garzik /** 524c6fd2807SJeff Garzik * ata_scsi_error - SCSI layer error handler callback 525c6fd2807SJeff Garzik * @host: SCSI host on which error occurred 526c6fd2807SJeff Garzik * 527c6fd2807SJeff Garzik * Handles SCSI-layer-thrown error events. 528c6fd2807SJeff Garzik * 529c6fd2807SJeff Garzik * LOCKING: 530c6fd2807SJeff Garzik * Inherited from SCSI layer (none, can sleep) 531c6fd2807SJeff Garzik * 532c6fd2807SJeff Garzik * RETURNS: 533c6fd2807SJeff Garzik * Zero. 534c6fd2807SJeff Garzik */ 535c6fd2807SJeff Garzik void ata_scsi_error(struct Scsi_Host *host) 536c6fd2807SJeff Garzik { 537c6fd2807SJeff Garzik struct ata_port *ap = ata_shost_to_port(host); 538a1e10f7eSTejun Heo int i; 539c6fd2807SJeff Garzik unsigned long flags; 540c6fd2807SJeff Garzik 541c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 542c6fd2807SJeff Garzik 543c6fd2807SJeff Garzik /* synchronize with port task */ 544c6fd2807SJeff Garzik ata_port_flush_task(ap); 545c6fd2807SJeff Garzik 546cca3974eSJeff Garzik /* synchronize with host lock and sort out timeouts */ 547c6fd2807SJeff Garzik 548c6fd2807SJeff Garzik /* For new EH, all qcs are finished in one of three ways - 549c6fd2807SJeff Garzik * normal completion, error completion, and SCSI timeout. 550*c96f1732SAlan Cox * Both completions can race against SCSI timeout. When normal 551c6fd2807SJeff Garzik * completion wins, the qc never reaches EH. When error 552c6fd2807SJeff Garzik * completion wins, the qc has ATA_QCFLAG_FAILED set. 553c6fd2807SJeff Garzik * 554c6fd2807SJeff Garzik * When SCSI timeout wins, things are a bit more complex. 555c6fd2807SJeff Garzik * Normal or error completion can occur after the timeout but 556c6fd2807SJeff Garzik * before this point. In such cases, both types of 557c6fd2807SJeff Garzik * completions are honored. A scmd is determined to have 558c6fd2807SJeff Garzik * timed out iff its associated qc is active and not failed. 559c6fd2807SJeff Garzik */ 560c6fd2807SJeff Garzik if (ap->ops->error_handler) { 561c6fd2807SJeff Garzik struct scsi_cmnd *scmd, *tmp; 562c6fd2807SJeff Garzik int nr_timedout = 0; 563c6fd2807SJeff Garzik 564c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 565c6fd2807SJeff Garzik 566*c96f1732SAlan Cox /* This must occur under the ap->lock as we don't want 567*c96f1732SAlan Cox a polled recovery to race the real interrupt handler 568*c96f1732SAlan Cox 569*c96f1732SAlan Cox The lost_interrupt handler checks for any completed but 570*c96f1732SAlan Cox non-notified command and completes much like an IRQ handler. 571*c96f1732SAlan Cox 572*c96f1732SAlan Cox We then fall into the error recovery code which will treat 573*c96f1732SAlan Cox this as if normal completion won the race */ 574*c96f1732SAlan Cox 575*c96f1732SAlan Cox if (ap->ops->lost_interrupt) 576*c96f1732SAlan Cox ap->ops->lost_interrupt(ap); 577*c96f1732SAlan Cox 578c6fd2807SJeff Garzik list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) { 579c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 580c6fd2807SJeff Garzik 581c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_QUEUE; i++) { 582c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, i); 583c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_ACTIVE && 584c6fd2807SJeff Garzik qc->scsicmd == scmd) 585c6fd2807SJeff Garzik break; 586c6fd2807SJeff Garzik } 587c6fd2807SJeff Garzik 588c6fd2807SJeff Garzik if (i < ATA_MAX_QUEUE) { 589c6fd2807SJeff Garzik /* the scmd has an associated qc */ 590c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) { 591c6fd2807SJeff Garzik /* which hasn't failed yet, timeout */ 592c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_TIMEOUT; 593c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 594c6fd2807SJeff Garzik nr_timedout++; 595c6fd2807SJeff Garzik } 596c6fd2807SJeff Garzik } else { 597c6fd2807SJeff Garzik /* Normal completion occurred after 598c6fd2807SJeff Garzik * SCSI timeout but before this point. 599c6fd2807SJeff Garzik * Successfully complete it. 600c6fd2807SJeff Garzik */ 601c6fd2807SJeff Garzik scmd->retries = scmd->allowed; 602c6fd2807SJeff Garzik scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 603c6fd2807SJeff Garzik } 604c6fd2807SJeff Garzik } 605c6fd2807SJeff Garzik 606c6fd2807SJeff Garzik /* If we have timed out qcs. They belong to EH from 607c6fd2807SJeff Garzik * this point but the state of the controller is 608c6fd2807SJeff Garzik * unknown. Freeze the port to make sure the IRQ 609c6fd2807SJeff Garzik * handler doesn't diddle with those qcs. This must 610c6fd2807SJeff Garzik * be done atomically w.r.t. setting QCFLAG_FAILED. 611c6fd2807SJeff Garzik */ 612c6fd2807SJeff Garzik if (nr_timedout) 613c6fd2807SJeff Garzik __ata_port_freeze(ap); 614c6fd2807SJeff Garzik 615c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 616a1e10f7eSTejun Heo 617a1e10f7eSTejun Heo /* initialize eh_tries */ 618a1e10f7eSTejun Heo ap->eh_tries = ATA_EH_MAX_TRIES; 619c6fd2807SJeff Garzik } else 620c6fd2807SJeff Garzik spin_unlock_wait(ap->lock); 621c6fd2807SJeff Garzik 622*c96f1732SAlan Cox /* If we timed raced normal completion and there is nothing to 623*c96f1732SAlan Cox recover nr_timedout == 0 why exactly are we doing error recovery ? */ 624*c96f1732SAlan Cox 625c6fd2807SJeff Garzik repeat: 626c6fd2807SJeff Garzik /* invoke error handler */ 627c6fd2807SJeff Garzik if (ap->ops->error_handler) { 628cf1b86c8STejun Heo struct ata_link *link; 629cf1b86c8STejun Heo 6305ddf24c5STejun Heo /* kill fast drain timer */ 6315ddf24c5STejun Heo del_timer_sync(&ap->fastdrain_timer); 6325ddf24c5STejun Heo 633c6fd2807SJeff Garzik /* process port resume request */ 634c6fd2807SJeff Garzik ata_eh_handle_port_resume(ap); 635c6fd2807SJeff Garzik 636c6fd2807SJeff Garzik /* fetch & clear EH info */ 637c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 638c6fd2807SJeff Garzik 6391eca4365STejun Heo ata_for_each_link(link, ap, HOST_FIRST) { 64000115e0fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 64100115e0fSTejun Heo struct ata_device *dev; 64200115e0fSTejun Heo 643cf1b86c8STejun Heo memset(&link->eh_context, 0, sizeof(link->eh_context)); 644cf1b86c8STejun Heo link->eh_context.i = link->eh_info; 645cf1b86c8STejun Heo memset(&link->eh_info, 0, sizeof(link->eh_info)); 64600115e0fSTejun Heo 6471eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) { 64800115e0fSTejun Heo int devno = dev->devno; 64900115e0fSTejun Heo 65000115e0fSTejun Heo ehc->saved_xfer_mode[devno] = dev->xfer_mode; 65100115e0fSTejun Heo if (ata_ncq_enabled(dev)) 65200115e0fSTejun Heo ehc->saved_ncq_enabled |= 1 << devno; 65300115e0fSTejun Heo } 654cf1b86c8STejun Heo } 655c6fd2807SJeff Garzik 656c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS; 657c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_EH_PENDING; 658da917d69STejun Heo ap->excl_link = NULL; /* don't maintain exclusion over EH */ 659c6fd2807SJeff Garzik 660c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 661c6fd2807SJeff Garzik 662c6fd2807SJeff Garzik /* invoke EH, skip if unloading or suspended */ 663c6fd2807SJeff Garzik if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED))) 664c6fd2807SJeff Garzik ap->ops->error_handler(ap); 665ece180d1STejun Heo else { 666ece180d1STejun Heo /* if unloading, commence suicide */ 667ece180d1STejun Heo if ((ap->pflags & ATA_PFLAG_UNLOADING) && 668ece180d1STejun Heo !(ap->pflags & ATA_PFLAG_UNLOADED)) 669ece180d1STejun Heo ata_eh_unload(ap); 670c6fd2807SJeff Garzik ata_eh_finish(ap); 671ece180d1STejun Heo } 672c6fd2807SJeff Garzik 673c6fd2807SJeff Garzik /* process port suspend request */ 674c6fd2807SJeff Garzik ata_eh_handle_port_suspend(ap); 675c6fd2807SJeff Garzik 676c6fd2807SJeff Garzik /* Exception might have happend after ->error_handler 677c6fd2807SJeff Garzik * recovered the port but before this point. Repeat 678c6fd2807SJeff Garzik * EH in such case. 679c6fd2807SJeff Garzik */ 680c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 681c6fd2807SJeff Garzik 682c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_EH_PENDING) { 683a1e10f7eSTejun Heo if (--ap->eh_tries) { 684c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 685c6fd2807SJeff Garzik goto repeat; 686c6fd2807SJeff Garzik } 687c6fd2807SJeff Garzik ata_port_printk(ap, KERN_ERR, "EH pending after %d " 688a1e10f7eSTejun Heo "tries, giving up\n", ATA_EH_MAX_TRIES); 689914616a3STejun Heo ap->pflags &= ~ATA_PFLAG_EH_PENDING; 690c6fd2807SJeff Garzik } 691c6fd2807SJeff Garzik 692c6fd2807SJeff Garzik /* this run is complete, make sure EH info is clear */ 6931eca4365STejun Heo ata_for_each_link(link, ap, HOST_FIRST) 694cf1b86c8STejun Heo memset(&link->eh_info, 0, sizeof(link->eh_info)); 695c6fd2807SJeff Garzik 696c6fd2807SJeff Garzik /* Clear host_eh_scheduled while holding ap->lock such 697c6fd2807SJeff Garzik * that if exception occurs after this point but 698c6fd2807SJeff Garzik * before EH completion, SCSI midlayer will 699c6fd2807SJeff Garzik * re-initiate EH. 700c6fd2807SJeff Garzik */ 701c6fd2807SJeff Garzik host->host_eh_scheduled = 0; 702c6fd2807SJeff Garzik 703c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 704c6fd2807SJeff Garzik } else { 7059af5c9c9STejun Heo WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL); 706c6fd2807SJeff Garzik ap->ops->eng_timeout(ap); 707c6fd2807SJeff Garzik } 708c6fd2807SJeff Garzik 709c6fd2807SJeff Garzik /* finish or retry handled scmd's and clean up */ 710c6fd2807SJeff Garzik WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q)); 711c6fd2807SJeff Garzik 712c6fd2807SJeff Garzik scsi_eh_flush_done_q(&ap->eh_done_q); 713c6fd2807SJeff Garzik 714c6fd2807SJeff Garzik /* clean up */ 715c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 716c6fd2807SJeff Garzik 717c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_LOADING) 718c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_LOADING; 719c6fd2807SJeff Garzik else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) 72052bad64dSDavid Howells queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 0); 721c6fd2807SJeff Garzik 722c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_RECOVERED) 723c6fd2807SJeff Garzik ata_port_printk(ap, KERN_INFO, "EH complete\n"); 724c6fd2807SJeff Garzik 725c6fd2807SJeff Garzik ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED); 726c6fd2807SJeff Garzik 727c6fd2807SJeff Garzik /* tell wait_eh that we're done */ 728c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS; 729c6fd2807SJeff Garzik wake_up_all(&ap->eh_wait_q); 730c6fd2807SJeff Garzik 731c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 732c6fd2807SJeff Garzik 733c6fd2807SJeff Garzik DPRINTK("EXIT\n"); 734c6fd2807SJeff Garzik } 735c6fd2807SJeff Garzik 736c6fd2807SJeff Garzik /** 737c6fd2807SJeff Garzik * ata_port_wait_eh - Wait for the currently pending EH to complete 738c6fd2807SJeff Garzik * @ap: Port to wait EH for 739c6fd2807SJeff Garzik * 740c6fd2807SJeff Garzik * Wait until the currently pending EH is complete. 741c6fd2807SJeff Garzik * 742c6fd2807SJeff Garzik * LOCKING: 743c6fd2807SJeff Garzik * Kernel thread context (may sleep). 744c6fd2807SJeff Garzik */ 745c6fd2807SJeff Garzik void ata_port_wait_eh(struct ata_port *ap) 746c6fd2807SJeff Garzik { 747c6fd2807SJeff Garzik unsigned long flags; 748c6fd2807SJeff Garzik DEFINE_WAIT(wait); 749c6fd2807SJeff Garzik 750c6fd2807SJeff Garzik retry: 751c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 752c6fd2807SJeff Garzik 753c6fd2807SJeff Garzik while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) { 754c6fd2807SJeff Garzik prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE); 755c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 756c6fd2807SJeff Garzik schedule(); 757c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 758c6fd2807SJeff Garzik } 759c6fd2807SJeff Garzik finish_wait(&ap->eh_wait_q, &wait); 760c6fd2807SJeff Garzik 761c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 762c6fd2807SJeff Garzik 763c6fd2807SJeff Garzik /* make sure SCSI EH is complete */ 764cca3974eSJeff Garzik if (scsi_host_in_recovery(ap->scsi_host)) { 765c6fd2807SJeff Garzik msleep(10); 766c6fd2807SJeff Garzik goto retry; 767c6fd2807SJeff Garzik } 768c6fd2807SJeff Garzik } 769c6fd2807SJeff Garzik 7705ddf24c5STejun Heo static int ata_eh_nr_in_flight(struct ata_port *ap) 7715ddf24c5STejun Heo { 7725ddf24c5STejun Heo unsigned int tag; 7735ddf24c5STejun Heo int nr = 0; 7745ddf24c5STejun Heo 7755ddf24c5STejun Heo /* count only non-internal commands */ 7765ddf24c5STejun Heo for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) 7775ddf24c5STejun Heo if (ata_qc_from_tag(ap, tag)) 7785ddf24c5STejun Heo nr++; 7795ddf24c5STejun Heo 7805ddf24c5STejun Heo return nr; 7815ddf24c5STejun Heo } 7825ddf24c5STejun Heo 7835ddf24c5STejun Heo void ata_eh_fastdrain_timerfn(unsigned long arg) 7845ddf24c5STejun Heo { 7855ddf24c5STejun Heo struct ata_port *ap = (void *)arg; 7865ddf24c5STejun Heo unsigned long flags; 7875ddf24c5STejun Heo int cnt; 7885ddf24c5STejun Heo 7895ddf24c5STejun Heo spin_lock_irqsave(ap->lock, flags); 7905ddf24c5STejun Heo 7915ddf24c5STejun Heo cnt = ata_eh_nr_in_flight(ap); 7925ddf24c5STejun Heo 7935ddf24c5STejun Heo /* are we done? */ 7945ddf24c5STejun Heo if (!cnt) 7955ddf24c5STejun Heo goto out_unlock; 7965ddf24c5STejun Heo 7975ddf24c5STejun Heo if (cnt == ap->fastdrain_cnt) { 7985ddf24c5STejun Heo unsigned int tag; 7995ddf24c5STejun Heo 8005ddf24c5STejun Heo /* No progress during the last interval, tag all 8015ddf24c5STejun Heo * in-flight qcs as timed out and freeze the port. 8025ddf24c5STejun Heo */ 8035ddf24c5STejun Heo for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) { 8045ddf24c5STejun Heo struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); 8055ddf24c5STejun Heo if (qc) 8065ddf24c5STejun Heo qc->err_mask |= AC_ERR_TIMEOUT; 8075ddf24c5STejun Heo } 8085ddf24c5STejun Heo 8095ddf24c5STejun Heo ata_port_freeze(ap); 8105ddf24c5STejun Heo } else { 8115ddf24c5STejun Heo /* some qcs have finished, give it another chance */ 8125ddf24c5STejun Heo ap->fastdrain_cnt = cnt; 8135ddf24c5STejun Heo ap->fastdrain_timer.expires = 814341c2c95STejun Heo ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); 8155ddf24c5STejun Heo add_timer(&ap->fastdrain_timer); 8165ddf24c5STejun Heo } 8175ddf24c5STejun Heo 8185ddf24c5STejun Heo out_unlock: 8195ddf24c5STejun Heo spin_unlock_irqrestore(ap->lock, flags); 8205ddf24c5STejun Heo } 8215ddf24c5STejun Heo 8225ddf24c5STejun Heo /** 8235ddf24c5STejun Heo * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain 8245ddf24c5STejun Heo * @ap: target ATA port 8255ddf24c5STejun Heo * @fastdrain: activate fast drain 8265ddf24c5STejun Heo * 8275ddf24c5STejun Heo * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain 8285ddf24c5STejun Heo * is non-zero and EH wasn't pending before. Fast drain ensures 8295ddf24c5STejun Heo * that EH kicks in in timely manner. 8305ddf24c5STejun Heo * 8315ddf24c5STejun Heo * LOCKING: 8325ddf24c5STejun Heo * spin_lock_irqsave(host lock) 8335ddf24c5STejun Heo */ 8345ddf24c5STejun Heo static void ata_eh_set_pending(struct ata_port *ap, int fastdrain) 8355ddf24c5STejun Heo { 8365ddf24c5STejun Heo int cnt; 8375ddf24c5STejun Heo 8385ddf24c5STejun Heo /* already scheduled? */ 8395ddf24c5STejun Heo if (ap->pflags & ATA_PFLAG_EH_PENDING) 8405ddf24c5STejun Heo return; 8415ddf24c5STejun Heo 8425ddf24c5STejun Heo ap->pflags |= ATA_PFLAG_EH_PENDING; 8435ddf24c5STejun Heo 8445ddf24c5STejun Heo if (!fastdrain) 8455ddf24c5STejun Heo return; 8465ddf24c5STejun Heo 8475ddf24c5STejun Heo /* do we have in-flight qcs? */ 8485ddf24c5STejun Heo cnt = ata_eh_nr_in_flight(ap); 8495ddf24c5STejun Heo if (!cnt) 8505ddf24c5STejun Heo return; 8515ddf24c5STejun Heo 8525ddf24c5STejun Heo /* activate fast drain */ 8535ddf24c5STejun Heo ap->fastdrain_cnt = cnt; 854341c2c95STejun Heo ap->fastdrain_timer.expires = 855341c2c95STejun Heo ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); 8565ddf24c5STejun Heo add_timer(&ap->fastdrain_timer); 8575ddf24c5STejun Heo } 8585ddf24c5STejun Heo 859c6fd2807SJeff Garzik /** 860c6fd2807SJeff Garzik * ata_qc_schedule_eh - schedule qc for error handling 861c6fd2807SJeff Garzik * @qc: command to schedule error handling for 862c6fd2807SJeff Garzik * 863c6fd2807SJeff Garzik * Schedule error handling for @qc. EH will kick in as soon as 864c6fd2807SJeff Garzik * other commands are drained. 865c6fd2807SJeff Garzik * 866c6fd2807SJeff Garzik * LOCKING: 867cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 868c6fd2807SJeff Garzik */ 869c6fd2807SJeff Garzik void ata_qc_schedule_eh(struct ata_queued_cmd *qc) 870c6fd2807SJeff Garzik { 871c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 872c6fd2807SJeff Garzik 873c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 874c6fd2807SJeff Garzik 875c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 8765ddf24c5STejun Heo ata_eh_set_pending(ap, 1); 877c6fd2807SJeff Garzik 878c6fd2807SJeff Garzik /* The following will fail if timeout has already expired. 879c6fd2807SJeff Garzik * ata_scsi_error() takes care of such scmds on EH entry. 880c6fd2807SJeff Garzik * Note that ATA_QCFLAG_FAILED is unconditionally set after 881c6fd2807SJeff Garzik * this function completes. 882c6fd2807SJeff Garzik */ 883242f9dcbSJens Axboe blk_abort_request(qc->scsicmd->request); 884c6fd2807SJeff Garzik } 885c6fd2807SJeff Garzik 886c6fd2807SJeff Garzik /** 887c6fd2807SJeff Garzik * ata_port_schedule_eh - schedule error handling without a qc 888c6fd2807SJeff Garzik * @ap: ATA port to schedule EH for 889c6fd2807SJeff Garzik * 890c6fd2807SJeff Garzik * Schedule error handling for @ap. EH will kick in as soon as 891c6fd2807SJeff Garzik * all commands are drained. 892c6fd2807SJeff Garzik * 893c6fd2807SJeff Garzik * LOCKING: 894cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 895c6fd2807SJeff Garzik */ 896c6fd2807SJeff Garzik void ata_port_schedule_eh(struct ata_port *ap) 897c6fd2807SJeff Garzik { 898c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 899c6fd2807SJeff Garzik 900f4d6d004STejun Heo if (ap->pflags & ATA_PFLAG_INITIALIZING) 901f4d6d004STejun Heo return; 902f4d6d004STejun Heo 9035ddf24c5STejun Heo ata_eh_set_pending(ap, 1); 904cca3974eSJeff Garzik scsi_schedule_eh(ap->scsi_host); 905c6fd2807SJeff Garzik 906c6fd2807SJeff Garzik DPRINTK("port EH scheduled\n"); 907c6fd2807SJeff Garzik } 908c6fd2807SJeff Garzik 909dbd82616STejun Heo static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link) 910c6fd2807SJeff Garzik { 911c6fd2807SJeff Garzik int tag, nr_aborted = 0; 912c6fd2807SJeff Garzik 913c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 914c6fd2807SJeff Garzik 9155ddf24c5STejun Heo /* we're gonna abort all commands, no need for fast drain */ 9165ddf24c5STejun Heo ata_eh_set_pending(ap, 0); 9175ddf24c5STejun Heo 918c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 919c6fd2807SJeff Garzik struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); 920c6fd2807SJeff Garzik 921dbd82616STejun Heo if (qc && (!link || qc->dev->link == link)) { 922c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 923c6fd2807SJeff Garzik ata_qc_complete(qc); 924c6fd2807SJeff Garzik nr_aborted++; 925c6fd2807SJeff Garzik } 926c6fd2807SJeff Garzik } 927c6fd2807SJeff Garzik 928c6fd2807SJeff Garzik if (!nr_aborted) 929c6fd2807SJeff Garzik ata_port_schedule_eh(ap); 930c6fd2807SJeff Garzik 931c6fd2807SJeff Garzik return nr_aborted; 932c6fd2807SJeff Garzik } 933c6fd2807SJeff Garzik 934c6fd2807SJeff Garzik /** 935dbd82616STejun Heo * ata_link_abort - abort all qc's on the link 936dbd82616STejun Heo * @link: ATA link to abort qc's for 937dbd82616STejun Heo * 938dbd82616STejun Heo * Abort all active qc's active on @link and schedule EH. 939dbd82616STejun Heo * 940dbd82616STejun Heo * LOCKING: 941dbd82616STejun Heo * spin_lock_irqsave(host lock) 942dbd82616STejun Heo * 943dbd82616STejun Heo * RETURNS: 944dbd82616STejun Heo * Number of aborted qc's. 945dbd82616STejun Heo */ 946dbd82616STejun Heo int ata_link_abort(struct ata_link *link) 947dbd82616STejun Heo { 948dbd82616STejun Heo return ata_do_link_abort(link->ap, link); 949dbd82616STejun Heo } 950dbd82616STejun Heo 951dbd82616STejun Heo /** 952dbd82616STejun Heo * ata_port_abort - abort all qc's on the port 953dbd82616STejun Heo * @ap: ATA port to abort qc's for 954dbd82616STejun Heo * 955dbd82616STejun Heo * Abort all active qc's of @ap and schedule EH. 956dbd82616STejun Heo * 957dbd82616STejun Heo * LOCKING: 958dbd82616STejun Heo * spin_lock_irqsave(host_set lock) 959dbd82616STejun Heo * 960dbd82616STejun Heo * RETURNS: 961dbd82616STejun Heo * Number of aborted qc's. 962dbd82616STejun Heo */ 963dbd82616STejun Heo int ata_port_abort(struct ata_port *ap) 964dbd82616STejun Heo { 965dbd82616STejun Heo return ata_do_link_abort(ap, NULL); 966dbd82616STejun Heo } 967dbd82616STejun Heo 968dbd82616STejun Heo /** 969c6fd2807SJeff Garzik * __ata_port_freeze - freeze port 970c6fd2807SJeff Garzik * @ap: ATA port to freeze 971c6fd2807SJeff Garzik * 972c6fd2807SJeff Garzik * This function is called when HSM violation or some other 973c6fd2807SJeff Garzik * condition disrupts normal operation of the port. Frozen port 974c6fd2807SJeff Garzik * is not allowed to perform any operation until the port is 975c6fd2807SJeff Garzik * thawed, which usually follows a successful reset. 976c6fd2807SJeff Garzik * 977c6fd2807SJeff Garzik * ap->ops->freeze() callback can be used for freezing the port 978c6fd2807SJeff Garzik * hardware-wise (e.g. mask interrupt and stop DMA engine). If a 979c6fd2807SJeff Garzik * port cannot be frozen hardware-wise, the interrupt handler 980c6fd2807SJeff Garzik * must ack and clear interrupts unconditionally while the port 981c6fd2807SJeff Garzik * is frozen. 982c6fd2807SJeff Garzik * 983c6fd2807SJeff Garzik * LOCKING: 984cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 985c6fd2807SJeff Garzik */ 986c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap) 987c6fd2807SJeff Garzik { 988c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 989c6fd2807SJeff Garzik 990c6fd2807SJeff Garzik if (ap->ops->freeze) 991c6fd2807SJeff Garzik ap->ops->freeze(ap); 992c6fd2807SJeff Garzik 993c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_FROZEN; 994c6fd2807SJeff Garzik 99544877b4eSTejun Heo DPRINTK("ata%u port frozen\n", ap->print_id); 996c6fd2807SJeff Garzik } 997c6fd2807SJeff Garzik 998c6fd2807SJeff Garzik /** 999c6fd2807SJeff Garzik * ata_port_freeze - abort & freeze port 1000c6fd2807SJeff Garzik * @ap: ATA port to freeze 1001c6fd2807SJeff Garzik * 1002c6fd2807SJeff Garzik * Abort and freeze @ap. 1003c6fd2807SJeff Garzik * 1004c6fd2807SJeff Garzik * LOCKING: 1005cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 1006c6fd2807SJeff Garzik * 1007c6fd2807SJeff Garzik * RETURNS: 1008c6fd2807SJeff Garzik * Number of aborted commands. 1009c6fd2807SJeff Garzik */ 1010c6fd2807SJeff Garzik int ata_port_freeze(struct ata_port *ap) 1011c6fd2807SJeff Garzik { 1012c6fd2807SJeff Garzik int nr_aborted; 1013c6fd2807SJeff Garzik 1014c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 1015c6fd2807SJeff Garzik 1016c6fd2807SJeff Garzik nr_aborted = ata_port_abort(ap); 1017c6fd2807SJeff Garzik __ata_port_freeze(ap); 1018c6fd2807SJeff Garzik 1019c6fd2807SJeff Garzik return nr_aborted; 1020c6fd2807SJeff Garzik } 1021c6fd2807SJeff Garzik 1022c6fd2807SJeff Garzik /** 10237d77b247STejun Heo * sata_async_notification - SATA async notification handler 10247d77b247STejun Heo * @ap: ATA port where async notification is received 10257d77b247STejun Heo * 10267d77b247STejun Heo * Handler to be called when async notification via SDB FIS is 10277d77b247STejun Heo * received. This function schedules EH if necessary. 10287d77b247STejun Heo * 10297d77b247STejun Heo * LOCKING: 10307d77b247STejun Heo * spin_lock_irqsave(host lock) 10317d77b247STejun Heo * 10327d77b247STejun Heo * RETURNS: 10337d77b247STejun Heo * 1 if EH is scheduled, 0 otherwise. 10347d77b247STejun Heo */ 10357d77b247STejun Heo int sata_async_notification(struct ata_port *ap) 10367d77b247STejun Heo { 10377d77b247STejun Heo u32 sntf; 10387d77b247STejun Heo int rc; 10397d77b247STejun Heo 10407d77b247STejun Heo if (!(ap->flags & ATA_FLAG_AN)) 10417d77b247STejun Heo return 0; 10427d77b247STejun Heo 10437d77b247STejun Heo rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf); 10447d77b247STejun Heo if (rc == 0) 10457d77b247STejun Heo sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf); 10467d77b247STejun Heo 1047071f44b1STejun Heo if (!sata_pmp_attached(ap) || rc) { 10487d77b247STejun Heo /* PMP is not attached or SNTF is not available */ 1049071f44b1STejun Heo if (!sata_pmp_attached(ap)) { 10507d77b247STejun Heo /* PMP is not attached. Check whether ATAPI 10517d77b247STejun Heo * AN is configured. If so, notify media 10527d77b247STejun Heo * change. 10537d77b247STejun Heo */ 10547d77b247STejun Heo struct ata_device *dev = ap->link.device; 10557d77b247STejun Heo 10567d77b247STejun Heo if ((dev->class == ATA_DEV_ATAPI) && 10577d77b247STejun Heo (dev->flags & ATA_DFLAG_AN)) 10587d77b247STejun Heo ata_scsi_media_change_notify(dev); 10597d77b247STejun Heo return 0; 10607d77b247STejun Heo } else { 10617d77b247STejun Heo /* PMP is attached but SNTF is not available. 10627d77b247STejun Heo * ATAPI async media change notification is 10637d77b247STejun Heo * not used. The PMP must be reporting PHY 10647d77b247STejun Heo * status change, schedule EH. 10657d77b247STejun Heo */ 10667d77b247STejun Heo ata_port_schedule_eh(ap); 10677d77b247STejun Heo return 1; 10687d77b247STejun Heo } 10697d77b247STejun Heo } else { 10707d77b247STejun Heo /* PMP is attached and SNTF is available */ 10717d77b247STejun Heo struct ata_link *link; 10727d77b247STejun Heo 10737d77b247STejun Heo /* check and notify ATAPI AN */ 10741eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 10757d77b247STejun Heo if (!(sntf & (1 << link->pmp))) 10767d77b247STejun Heo continue; 10777d77b247STejun Heo 10787d77b247STejun Heo if ((link->device->class == ATA_DEV_ATAPI) && 10797d77b247STejun Heo (link->device->flags & ATA_DFLAG_AN)) 10807d77b247STejun Heo ata_scsi_media_change_notify(link->device); 10817d77b247STejun Heo } 10827d77b247STejun Heo 10837d77b247STejun Heo /* If PMP is reporting that PHY status of some 10847d77b247STejun Heo * downstream ports has changed, schedule EH. 10857d77b247STejun Heo */ 10867d77b247STejun Heo if (sntf & (1 << SATA_PMP_CTRL_PORT)) { 10877d77b247STejun Heo ata_port_schedule_eh(ap); 10887d77b247STejun Heo return 1; 10897d77b247STejun Heo } 10907d77b247STejun Heo 10917d77b247STejun Heo return 0; 10927d77b247STejun Heo } 10937d77b247STejun Heo } 10947d77b247STejun Heo 10957d77b247STejun Heo /** 1096c6fd2807SJeff Garzik * ata_eh_freeze_port - EH helper to freeze port 1097c6fd2807SJeff Garzik * @ap: ATA port to freeze 1098c6fd2807SJeff Garzik * 1099c6fd2807SJeff Garzik * Freeze @ap. 1100c6fd2807SJeff Garzik * 1101c6fd2807SJeff Garzik * LOCKING: 1102c6fd2807SJeff Garzik * None. 1103c6fd2807SJeff Garzik */ 1104c6fd2807SJeff Garzik void ata_eh_freeze_port(struct ata_port *ap) 1105c6fd2807SJeff Garzik { 1106c6fd2807SJeff Garzik unsigned long flags; 1107c6fd2807SJeff Garzik 1108c6fd2807SJeff Garzik if (!ap->ops->error_handler) 1109c6fd2807SJeff Garzik return; 1110c6fd2807SJeff Garzik 1111c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1112c6fd2807SJeff Garzik __ata_port_freeze(ap); 1113c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1114c6fd2807SJeff Garzik } 1115c6fd2807SJeff Garzik 1116c6fd2807SJeff Garzik /** 1117c6fd2807SJeff Garzik * ata_port_thaw_port - EH helper to thaw port 1118c6fd2807SJeff Garzik * @ap: ATA port to thaw 1119c6fd2807SJeff Garzik * 1120c6fd2807SJeff Garzik * Thaw frozen port @ap. 1121c6fd2807SJeff Garzik * 1122c6fd2807SJeff Garzik * LOCKING: 1123c6fd2807SJeff Garzik * None. 1124c6fd2807SJeff Garzik */ 1125c6fd2807SJeff Garzik void ata_eh_thaw_port(struct ata_port *ap) 1126c6fd2807SJeff Garzik { 1127c6fd2807SJeff Garzik unsigned long flags; 1128c6fd2807SJeff Garzik 1129c6fd2807SJeff Garzik if (!ap->ops->error_handler) 1130c6fd2807SJeff Garzik return; 1131c6fd2807SJeff Garzik 1132c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1133c6fd2807SJeff Garzik 1134c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_FROZEN; 1135c6fd2807SJeff Garzik 1136c6fd2807SJeff Garzik if (ap->ops->thaw) 1137c6fd2807SJeff Garzik ap->ops->thaw(ap); 1138c6fd2807SJeff Garzik 1139c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1140c6fd2807SJeff Garzik 114144877b4eSTejun Heo DPRINTK("ata%u port thawed\n", ap->print_id); 1142c6fd2807SJeff Garzik } 1143c6fd2807SJeff Garzik 1144c6fd2807SJeff Garzik static void ata_eh_scsidone(struct scsi_cmnd *scmd) 1145c6fd2807SJeff Garzik { 1146c6fd2807SJeff Garzik /* nada */ 1147c6fd2807SJeff Garzik } 1148c6fd2807SJeff Garzik 1149c6fd2807SJeff Garzik static void __ata_eh_qc_complete(struct ata_queued_cmd *qc) 1150c6fd2807SJeff Garzik { 1151c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 1152c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 1153c6fd2807SJeff Garzik unsigned long flags; 1154c6fd2807SJeff Garzik 1155c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1156c6fd2807SJeff Garzik qc->scsidone = ata_eh_scsidone; 1157c6fd2807SJeff Garzik __ata_qc_complete(qc); 1158c6fd2807SJeff Garzik WARN_ON(ata_tag_valid(qc->tag)); 1159c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1160c6fd2807SJeff Garzik 1161c6fd2807SJeff Garzik scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 1162c6fd2807SJeff Garzik } 1163c6fd2807SJeff Garzik 1164c6fd2807SJeff Garzik /** 1165c6fd2807SJeff Garzik * ata_eh_qc_complete - Complete an active ATA command from EH 1166c6fd2807SJeff Garzik * @qc: Command to complete 1167c6fd2807SJeff Garzik * 1168c6fd2807SJeff Garzik * Indicate to the mid and upper layers that an ATA command has 1169c6fd2807SJeff Garzik * completed. To be used from EH. 1170c6fd2807SJeff Garzik */ 1171c6fd2807SJeff Garzik void ata_eh_qc_complete(struct ata_queued_cmd *qc) 1172c6fd2807SJeff Garzik { 1173c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 1174c6fd2807SJeff Garzik scmd->retries = scmd->allowed; 1175c6fd2807SJeff Garzik __ata_eh_qc_complete(qc); 1176c6fd2807SJeff Garzik } 1177c6fd2807SJeff Garzik 1178c6fd2807SJeff Garzik /** 1179c6fd2807SJeff Garzik * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH 1180c6fd2807SJeff Garzik * @qc: Command to retry 1181c6fd2807SJeff Garzik * 1182c6fd2807SJeff Garzik * Indicate to the mid and upper layers that an ATA command 1183c6fd2807SJeff Garzik * should be retried. To be used from EH. 1184c6fd2807SJeff Garzik * 1185c6fd2807SJeff Garzik * SCSI midlayer limits the number of retries to scmd->allowed. 1186c6fd2807SJeff Garzik * scmd->retries is decremented for commands which get retried 1187c6fd2807SJeff Garzik * due to unrelated failures (qc->err_mask is zero). 1188c6fd2807SJeff Garzik */ 1189c6fd2807SJeff Garzik void ata_eh_qc_retry(struct ata_queued_cmd *qc) 1190c6fd2807SJeff Garzik { 1191c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 1192c6fd2807SJeff Garzik if (!qc->err_mask && scmd->retries) 1193c6fd2807SJeff Garzik scmd->retries--; 1194c6fd2807SJeff Garzik __ata_eh_qc_complete(qc); 1195c6fd2807SJeff Garzik } 1196c6fd2807SJeff Garzik 1197c6fd2807SJeff Garzik /** 1198678afac6STejun Heo * ata_dev_disable - disable ATA device 1199678afac6STejun Heo * @dev: ATA device to disable 1200678afac6STejun Heo * 1201678afac6STejun Heo * Disable @dev. 1202678afac6STejun Heo * 1203678afac6STejun Heo * Locking: 1204678afac6STejun Heo * EH context. 1205678afac6STejun Heo */ 1206678afac6STejun Heo void ata_dev_disable(struct ata_device *dev) 1207678afac6STejun Heo { 1208678afac6STejun Heo if (!ata_dev_enabled(dev)) 1209678afac6STejun Heo return; 1210678afac6STejun Heo 1211678afac6STejun Heo if (ata_msg_drv(dev->link->ap)) 1212678afac6STejun Heo ata_dev_printk(dev, KERN_WARNING, "disabled\n"); 1213678afac6STejun Heo ata_acpi_on_disable(dev); 1214678afac6STejun Heo ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET); 1215678afac6STejun Heo dev->class++; 121699cf610aSTejun Heo 121799cf610aSTejun Heo /* From now till the next successful probe, ering is used to 121899cf610aSTejun Heo * track probe failures. Clear accumulated device error info. 121999cf610aSTejun Heo */ 122099cf610aSTejun Heo ata_ering_clear(&dev->ering); 1221678afac6STejun Heo } 1222678afac6STejun Heo 1223678afac6STejun Heo /** 1224c6fd2807SJeff Garzik * ata_eh_detach_dev - detach ATA device 1225c6fd2807SJeff Garzik * @dev: ATA device to detach 1226c6fd2807SJeff Garzik * 1227c6fd2807SJeff Garzik * Detach @dev. 1228c6fd2807SJeff Garzik * 1229c6fd2807SJeff Garzik * LOCKING: 1230c6fd2807SJeff Garzik * None. 1231c6fd2807SJeff Garzik */ 1232fb7fd614STejun Heo void ata_eh_detach_dev(struct ata_device *dev) 1233c6fd2807SJeff Garzik { 1234f58229f8STejun Heo struct ata_link *link = dev->link; 1235f58229f8STejun Heo struct ata_port *ap = link->ap; 123690484ebfSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 1237c6fd2807SJeff Garzik unsigned long flags; 1238c6fd2807SJeff Garzik 1239c6fd2807SJeff Garzik ata_dev_disable(dev); 1240c6fd2807SJeff Garzik 1241c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1242c6fd2807SJeff Garzik 1243c6fd2807SJeff Garzik dev->flags &= ~ATA_DFLAG_DETACH; 1244c6fd2807SJeff Garzik 1245c6fd2807SJeff Garzik if (ata_scsi_offline_dev(dev)) { 1246c6fd2807SJeff Garzik dev->flags |= ATA_DFLAG_DETACHED; 1247c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 1248c6fd2807SJeff Garzik } 1249c6fd2807SJeff Garzik 125090484ebfSTejun Heo /* clear per-dev EH info */ 1251f58229f8STejun Heo ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK); 1252f58229f8STejun Heo ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK); 125390484ebfSTejun Heo ehc->saved_xfer_mode[dev->devno] = 0; 125490484ebfSTejun Heo ehc->saved_ncq_enabled &= ~(1 << dev->devno); 1255c6fd2807SJeff Garzik 1256c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1257c6fd2807SJeff Garzik } 1258c6fd2807SJeff Garzik 1259c6fd2807SJeff Garzik /** 1260c6fd2807SJeff Garzik * ata_eh_about_to_do - about to perform eh_action 1261955e57dfSTejun Heo * @link: target ATA link 1262c6fd2807SJeff Garzik * @dev: target ATA dev for per-dev action (can be NULL) 1263c6fd2807SJeff Garzik * @action: action about to be performed 1264c6fd2807SJeff Garzik * 1265c6fd2807SJeff Garzik * Called just before performing EH actions to clear related bits 1266955e57dfSTejun Heo * in @link->eh_info such that eh actions are not unnecessarily 1267955e57dfSTejun Heo * repeated. 1268c6fd2807SJeff Garzik * 1269c6fd2807SJeff Garzik * LOCKING: 1270c6fd2807SJeff Garzik * None. 1271c6fd2807SJeff Garzik */ 1272fb7fd614STejun Heo void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev, 1273c6fd2807SJeff Garzik unsigned int action) 1274c6fd2807SJeff Garzik { 1275955e57dfSTejun Heo struct ata_port *ap = link->ap; 1276955e57dfSTejun Heo struct ata_eh_info *ehi = &link->eh_info; 1277955e57dfSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 1278c6fd2807SJeff Garzik unsigned long flags; 1279c6fd2807SJeff Garzik 1280c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1281c6fd2807SJeff Garzik 1282955e57dfSTejun Heo ata_eh_clear_action(link, dev, ehi, action); 1283c6fd2807SJeff Garzik 1284a568d1d2STejun Heo /* About to take EH action, set RECOVERED. Ignore actions on 1285a568d1d2STejun Heo * slave links as master will do them again. 1286a568d1d2STejun Heo */ 1287a568d1d2STejun Heo if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link) 1288c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_RECOVERED; 1289c6fd2807SJeff Garzik 1290c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1291c6fd2807SJeff Garzik } 1292c6fd2807SJeff Garzik 1293c6fd2807SJeff Garzik /** 1294c6fd2807SJeff Garzik * ata_eh_done - EH action complete 1295c6fd2807SJeff Garzik * @ap: target ATA port 1296c6fd2807SJeff Garzik * @dev: target ATA dev for per-dev action (can be NULL) 1297c6fd2807SJeff Garzik * @action: action just completed 1298c6fd2807SJeff Garzik * 1299c6fd2807SJeff Garzik * Called right after performing EH actions to clear related bits 1300955e57dfSTejun Heo * in @link->eh_context. 1301c6fd2807SJeff Garzik * 1302c6fd2807SJeff Garzik * LOCKING: 1303c6fd2807SJeff Garzik * None. 1304c6fd2807SJeff Garzik */ 1305fb7fd614STejun Heo void ata_eh_done(struct ata_link *link, struct ata_device *dev, 1306c6fd2807SJeff Garzik unsigned int action) 1307c6fd2807SJeff Garzik { 1308955e57dfSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 13099af5c9c9STejun Heo 1310955e57dfSTejun Heo ata_eh_clear_action(link, dev, &ehc->i, action); 1311c6fd2807SJeff Garzik } 1312c6fd2807SJeff Garzik 1313c6fd2807SJeff Garzik /** 1314c6fd2807SJeff Garzik * ata_err_string - convert err_mask to descriptive string 1315c6fd2807SJeff Garzik * @err_mask: error mask to convert to string 1316c6fd2807SJeff Garzik * 1317c6fd2807SJeff Garzik * Convert @err_mask to descriptive string. Errors are 1318c6fd2807SJeff Garzik * prioritized according to severity and only the most severe 1319c6fd2807SJeff Garzik * error is reported. 1320c6fd2807SJeff Garzik * 1321c6fd2807SJeff Garzik * LOCKING: 1322c6fd2807SJeff Garzik * None. 1323c6fd2807SJeff Garzik * 1324c6fd2807SJeff Garzik * RETURNS: 1325c6fd2807SJeff Garzik * Descriptive string for @err_mask 1326c6fd2807SJeff Garzik */ 1327c6fd2807SJeff Garzik static const char *ata_err_string(unsigned int err_mask) 1328c6fd2807SJeff Garzik { 1329c6fd2807SJeff Garzik if (err_mask & AC_ERR_HOST_BUS) 1330c6fd2807SJeff Garzik return "host bus error"; 1331c6fd2807SJeff Garzik if (err_mask & AC_ERR_ATA_BUS) 1332c6fd2807SJeff Garzik return "ATA bus error"; 1333c6fd2807SJeff Garzik if (err_mask & AC_ERR_TIMEOUT) 1334c6fd2807SJeff Garzik return "timeout"; 1335c6fd2807SJeff Garzik if (err_mask & AC_ERR_HSM) 1336c6fd2807SJeff Garzik return "HSM violation"; 1337c6fd2807SJeff Garzik if (err_mask & AC_ERR_SYSTEM) 1338c6fd2807SJeff Garzik return "internal error"; 1339c6fd2807SJeff Garzik if (err_mask & AC_ERR_MEDIA) 1340c6fd2807SJeff Garzik return "media error"; 1341c6fd2807SJeff Garzik if (err_mask & AC_ERR_INVALID) 1342c6fd2807SJeff Garzik return "invalid argument"; 1343c6fd2807SJeff Garzik if (err_mask & AC_ERR_DEV) 1344c6fd2807SJeff Garzik return "device error"; 1345c6fd2807SJeff Garzik return "unknown error"; 1346c6fd2807SJeff Garzik } 1347c6fd2807SJeff Garzik 1348c6fd2807SJeff Garzik /** 1349c6fd2807SJeff Garzik * ata_read_log_page - read a specific log page 1350c6fd2807SJeff Garzik * @dev: target device 1351c6fd2807SJeff Garzik * @page: page to read 1352c6fd2807SJeff Garzik * @buf: buffer to store read page 1353c6fd2807SJeff Garzik * @sectors: number of sectors to read 1354c6fd2807SJeff Garzik * 1355c6fd2807SJeff Garzik * Read log page using READ_LOG_EXT command. 1356c6fd2807SJeff Garzik * 1357c6fd2807SJeff Garzik * LOCKING: 1358c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1359c6fd2807SJeff Garzik * 1360c6fd2807SJeff Garzik * RETURNS: 1361c6fd2807SJeff Garzik * 0 on success, AC_ERR_* mask otherwise. 1362c6fd2807SJeff Garzik */ 1363c6fd2807SJeff Garzik static unsigned int ata_read_log_page(struct ata_device *dev, 1364c6fd2807SJeff Garzik u8 page, void *buf, unsigned int sectors) 1365c6fd2807SJeff Garzik { 1366c6fd2807SJeff Garzik struct ata_taskfile tf; 1367c6fd2807SJeff Garzik unsigned int err_mask; 1368c6fd2807SJeff Garzik 1369c6fd2807SJeff Garzik DPRINTK("read log page - page %d\n", page); 1370c6fd2807SJeff Garzik 1371c6fd2807SJeff Garzik ata_tf_init(dev, &tf); 1372c6fd2807SJeff Garzik tf.command = ATA_CMD_READ_LOG_EXT; 1373c6fd2807SJeff Garzik tf.lbal = page; 1374c6fd2807SJeff Garzik tf.nsect = sectors; 1375c6fd2807SJeff Garzik tf.hob_nsect = sectors >> 8; 1376c6fd2807SJeff Garzik tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE; 1377c6fd2807SJeff Garzik tf.protocol = ATA_PROT_PIO; 1378c6fd2807SJeff Garzik 1379c6fd2807SJeff Garzik err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, 13802b789108STejun Heo buf, sectors * ATA_SECT_SIZE, 0); 1381c6fd2807SJeff Garzik 1382c6fd2807SJeff Garzik DPRINTK("EXIT, err_mask=%x\n", err_mask); 1383c6fd2807SJeff Garzik return err_mask; 1384c6fd2807SJeff Garzik } 1385c6fd2807SJeff Garzik 1386c6fd2807SJeff Garzik /** 1387c6fd2807SJeff Garzik * ata_eh_read_log_10h - Read log page 10h for NCQ error details 1388c6fd2807SJeff Garzik * @dev: Device to read log page 10h from 1389c6fd2807SJeff Garzik * @tag: Resulting tag of the failed command 1390c6fd2807SJeff Garzik * @tf: Resulting taskfile registers of the failed command 1391c6fd2807SJeff Garzik * 1392c6fd2807SJeff Garzik * Read log page 10h to obtain NCQ error details and clear error 1393c6fd2807SJeff Garzik * condition. 1394c6fd2807SJeff Garzik * 1395c6fd2807SJeff Garzik * LOCKING: 1396c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1397c6fd2807SJeff Garzik * 1398c6fd2807SJeff Garzik * RETURNS: 1399c6fd2807SJeff Garzik * 0 on success, -errno otherwise. 1400c6fd2807SJeff Garzik */ 1401c6fd2807SJeff Garzik static int ata_eh_read_log_10h(struct ata_device *dev, 1402c6fd2807SJeff Garzik int *tag, struct ata_taskfile *tf) 1403c6fd2807SJeff Garzik { 14049af5c9c9STejun Heo u8 *buf = dev->link->ap->sector_buf; 1405c6fd2807SJeff Garzik unsigned int err_mask; 1406c6fd2807SJeff Garzik u8 csum; 1407c6fd2807SJeff Garzik int i; 1408c6fd2807SJeff Garzik 1409c6fd2807SJeff Garzik err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1); 1410c6fd2807SJeff Garzik if (err_mask) 1411c6fd2807SJeff Garzik return -EIO; 1412c6fd2807SJeff Garzik 1413c6fd2807SJeff Garzik csum = 0; 1414c6fd2807SJeff Garzik for (i = 0; i < ATA_SECT_SIZE; i++) 1415c6fd2807SJeff Garzik csum += buf[i]; 1416c6fd2807SJeff Garzik if (csum) 1417c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_WARNING, 1418c6fd2807SJeff Garzik "invalid checksum 0x%x on log page 10h\n", csum); 1419c6fd2807SJeff Garzik 1420c6fd2807SJeff Garzik if (buf[0] & 0x80) 1421c6fd2807SJeff Garzik return -ENOENT; 1422c6fd2807SJeff Garzik 1423c6fd2807SJeff Garzik *tag = buf[0] & 0x1f; 1424c6fd2807SJeff Garzik 1425c6fd2807SJeff Garzik tf->command = buf[2]; 1426c6fd2807SJeff Garzik tf->feature = buf[3]; 1427c6fd2807SJeff Garzik tf->lbal = buf[4]; 1428c6fd2807SJeff Garzik tf->lbam = buf[5]; 1429c6fd2807SJeff Garzik tf->lbah = buf[6]; 1430c6fd2807SJeff Garzik tf->device = buf[7]; 1431c6fd2807SJeff Garzik tf->hob_lbal = buf[8]; 1432c6fd2807SJeff Garzik tf->hob_lbam = buf[9]; 1433c6fd2807SJeff Garzik tf->hob_lbah = buf[10]; 1434c6fd2807SJeff Garzik tf->nsect = buf[12]; 1435c6fd2807SJeff Garzik tf->hob_nsect = buf[13]; 1436c6fd2807SJeff Garzik 1437c6fd2807SJeff Garzik return 0; 1438c6fd2807SJeff Garzik } 1439c6fd2807SJeff Garzik 1440c6fd2807SJeff Garzik /** 144111fc33daSTejun Heo * atapi_eh_tur - perform ATAPI TEST_UNIT_READY 144211fc33daSTejun Heo * @dev: target ATAPI device 144311fc33daSTejun Heo * @r_sense_key: out parameter for sense_key 144411fc33daSTejun Heo * 144511fc33daSTejun Heo * Perform ATAPI TEST_UNIT_READY. 144611fc33daSTejun Heo * 144711fc33daSTejun Heo * LOCKING: 144811fc33daSTejun Heo * EH context (may sleep). 144911fc33daSTejun Heo * 145011fc33daSTejun Heo * RETURNS: 145111fc33daSTejun Heo * 0 on success, AC_ERR_* mask on failure. 145211fc33daSTejun Heo */ 145311fc33daSTejun Heo static unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key) 145411fc33daSTejun Heo { 145511fc33daSTejun Heo u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 }; 145611fc33daSTejun Heo struct ata_taskfile tf; 145711fc33daSTejun Heo unsigned int err_mask; 145811fc33daSTejun Heo 145911fc33daSTejun Heo ata_tf_init(dev, &tf); 146011fc33daSTejun Heo 146111fc33daSTejun Heo tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 146211fc33daSTejun Heo tf.command = ATA_CMD_PACKET; 146311fc33daSTejun Heo tf.protocol = ATAPI_PROT_NODATA; 146411fc33daSTejun Heo 146511fc33daSTejun Heo err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0); 146611fc33daSTejun Heo if (err_mask == AC_ERR_DEV) 146711fc33daSTejun Heo *r_sense_key = tf.feature >> 4; 146811fc33daSTejun Heo return err_mask; 146911fc33daSTejun Heo } 147011fc33daSTejun Heo 147111fc33daSTejun Heo /** 1472c6fd2807SJeff Garzik * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE 1473c6fd2807SJeff Garzik * @dev: device to perform REQUEST_SENSE to 1474c6fd2807SJeff Garzik * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long) 14753eabddb8STejun Heo * @dfl_sense_key: default sense key to use 1476c6fd2807SJeff Garzik * 1477c6fd2807SJeff Garzik * Perform ATAPI REQUEST_SENSE after the device reported CHECK 1478c6fd2807SJeff Garzik * SENSE. This function is EH helper. 1479c6fd2807SJeff Garzik * 1480c6fd2807SJeff Garzik * LOCKING: 1481c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1482c6fd2807SJeff Garzik * 1483c6fd2807SJeff Garzik * RETURNS: 1484c6fd2807SJeff Garzik * 0 on success, AC_ERR_* mask on failure 1485c6fd2807SJeff Garzik */ 14863eabddb8STejun Heo static unsigned int atapi_eh_request_sense(struct ata_device *dev, 14873eabddb8STejun Heo u8 *sense_buf, u8 dfl_sense_key) 1488c6fd2807SJeff Garzik { 14893eabddb8STejun Heo u8 cdb[ATAPI_CDB_LEN] = 14903eabddb8STejun Heo { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 }; 14919af5c9c9STejun Heo struct ata_port *ap = dev->link->ap; 1492c6fd2807SJeff Garzik struct ata_taskfile tf; 1493c6fd2807SJeff Garzik 1494c6fd2807SJeff Garzik DPRINTK("ATAPI request sense\n"); 1495c6fd2807SJeff Garzik 1496c6fd2807SJeff Garzik /* FIXME: is this needed? */ 1497c6fd2807SJeff Garzik memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); 1498c6fd2807SJeff Garzik 149956287768SAlbert Lee /* initialize sense_buf with the error register, 150056287768SAlbert Lee * for the case where they are -not- overwritten 150156287768SAlbert Lee */ 1502c6fd2807SJeff Garzik sense_buf[0] = 0x70; 15033eabddb8STejun Heo sense_buf[2] = dfl_sense_key; 150456287768SAlbert Lee 150556287768SAlbert Lee /* some devices time out if garbage left in tf */ 150656287768SAlbert Lee ata_tf_init(dev, &tf); 1507c6fd2807SJeff Garzik 1508c6fd2807SJeff Garzik tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1509c6fd2807SJeff Garzik tf.command = ATA_CMD_PACKET; 1510c6fd2807SJeff Garzik 1511c6fd2807SJeff Garzik /* is it pointless to prefer PIO for "safety reasons"? */ 1512c6fd2807SJeff Garzik if (ap->flags & ATA_FLAG_PIO_DMA) { 15130dc36888STejun Heo tf.protocol = ATAPI_PROT_DMA; 1514c6fd2807SJeff Garzik tf.feature |= ATAPI_PKT_DMA; 1515c6fd2807SJeff Garzik } else { 15160dc36888STejun Heo tf.protocol = ATAPI_PROT_PIO; 1517f2dfc1a1STejun Heo tf.lbam = SCSI_SENSE_BUFFERSIZE; 1518f2dfc1a1STejun Heo tf.lbah = 0; 1519c6fd2807SJeff Garzik } 1520c6fd2807SJeff Garzik 1521c6fd2807SJeff Garzik return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE, 15222b789108STejun Heo sense_buf, SCSI_SENSE_BUFFERSIZE, 0); 1523c6fd2807SJeff Garzik } 1524c6fd2807SJeff Garzik 1525c6fd2807SJeff Garzik /** 1526c6fd2807SJeff Garzik * ata_eh_analyze_serror - analyze SError for a failed port 15270260731fSTejun Heo * @link: ATA link to analyze SError for 1528c6fd2807SJeff Garzik * 1529c6fd2807SJeff Garzik * Analyze SError if available and further determine cause of 1530c6fd2807SJeff Garzik * failure. 1531c6fd2807SJeff Garzik * 1532c6fd2807SJeff Garzik * LOCKING: 1533c6fd2807SJeff Garzik * None. 1534c6fd2807SJeff Garzik */ 15350260731fSTejun Heo static void ata_eh_analyze_serror(struct ata_link *link) 1536c6fd2807SJeff Garzik { 15370260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 1538c6fd2807SJeff Garzik u32 serror = ehc->i.serror; 1539c6fd2807SJeff Garzik unsigned int err_mask = 0, action = 0; 1540f9df58cbSTejun Heo u32 hotplug_mask; 1541c6fd2807SJeff Garzik 1542e0614db2STejun Heo if (serror & (SERR_PERSISTENT | SERR_DATA)) { 1543c6fd2807SJeff Garzik err_mask |= AC_ERR_ATA_BUS; 1544cf480626STejun Heo action |= ATA_EH_RESET; 1545c6fd2807SJeff Garzik } 1546c6fd2807SJeff Garzik if (serror & SERR_PROTOCOL) { 1547c6fd2807SJeff Garzik err_mask |= AC_ERR_HSM; 1548cf480626STejun Heo action |= ATA_EH_RESET; 1549c6fd2807SJeff Garzik } 1550c6fd2807SJeff Garzik if (serror & SERR_INTERNAL) { 1551c6fd2807SJeff Garzik err_mask |= AC_ERR_SYSTEM; 1552cf480626STejun Heo action |= ATA_EH_RESET; 1553c6fd2807SJeff Garzik } 1554f9df58cbSTejun Heo 1555f9df58cbSTejun Heo /* Determine whether a hotplug event has occurred. Both 1556f9df58cbSTejun Heo * SError.N/X are considered hotplug events for enabled or 1557f9df58cbSTejun Heo * host links. For disabled PMP links, only N bit is 1558f9df58cbSTejun Heo * considered as X bit is left at 1 for link plugging. 1559f9df58cbSTejun Heo */ 1560f9df58cbSTejun Heo hotplug_mask = 0; 1561f9df58cbSTejun Heo 1562f9df58cbSTejun Heo if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link)) 1563f9df58cbSTejun Heo hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG; 1564f9df58cbSTejun Heo else 1565f9df58cbSTejun Heo hotplug_mask = SERR_PHYRDY_CHG; 1566f9df58cbSTejun Heo 1567f9df58cbSTejun Heo if (serror & hotplug_mask) 1568c6fd2807SJeff Garzik ata_ehi_hotplugged(&ehc->i); 1569c6fd2807SJeff Garzik 1570c6fd2807SJeff Garzik ehc->i.err_mask |= err_mask; 1571c6fd2807SJeff Garzik ehc->i.action |= action; 1572c6fd2807SJeff Garzik } 1573c6fd2807SJeff Garzik 1574c6fd2807SJeff Garzik /** 1575c6fd2807SJeff Garzik * ata_eh_analyze_ncq_error - analyze NCQ error 15760260731fSTejun Heo * @link: ATA link to analyze NCQ error for 1577c6fd2807SJeff Garzik * 1578c6fd2807SJeff Garzik * Read log page 10h, determine the offending qc and acquire 1579c6fd2807SJeff Garzik * error status TF. For NCQ device errors, all LLDDs have to do 1580c6fd2807SJeff Garzik * is setting AC_ERR_DEV in ehi->err_mask. This function takes 1581c6fd2807SJeff Garzik * care of the rest. 1582c6fd2807SJeff Garzik * 1583c6fd2807SJeff Garzik * LOCKING: 1584c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1585c6fd2807SJeff Garzik */ 158610acf3b0SMark Lord void ata_eh_analyze_ncq_error(struct ata_link *link) 1587c6fd2807SJeff Garzik { 15880260731fSTejun Heo struct ata_port *ap = link->ap; 15890260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 15900260731fSTejun Heo struct ata_device *dev = link->device; 1591c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 1592c6fd2807SJeff Garzik struct ata_taskfile tf; 1593c6fd2807SJeff Garzik int tag, rc; 1594c6fd2807SJeff Garzik 1595c6fd2807SJeff Garzik /* if frozen, we can't do much */ 1596c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN) 1597c6fd2807SJeff Garzik return; 1598c6fd2807SJeff Garzik 1599c6fd2807SJeff Garzik /* is it NCQ device error? */ 16000260731fSTejun Heo if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV)) 1601c6fd2807SJeff Garzik return; 1602c6fd2807SJeff Garzik 1603c6fd2807SJeff Garzik /* has LLDD analyzed already? */ 1604c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1605c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, tag); 1606c6fd2807SJeff Garzik 1607c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) 1608c6fd2807SJeff Garzik continue; 1609c6fd2807SJeff Garzik 1610c6fd2807SJeff Garzik if (qc->err_mask) 1611c6fd2807SJeff Garzik return; 1612c6fd2807SJeff Garzik } 1613c6fd2807SJeff Garzik 1614c6fd2807SJeff Garzik /* okay, this error is ours */ 1615c6fd2807SJeff Garzik rc = ata_eh_read_log_10h(dev, &tag, &tf); 1616c6fd2807SJeff Garzik if (rc) { 16170260731fSTejun Heo ata_link_printk(link, KERN_ERR, "failed to read log page 10h " 1618c6fd2807SJeff Garzik "(errno=%d)\n", rc); 1619c6fd2807SJeff Garzik return; 1620c6fd2807SJeff Garzik } 1621c6fd2807SJeff Garzik 16220260731fSTejun Heo if (!(link->sactive & (1 << tag))) { 16230260731fSTejun Heo ata_link_printk(link, KERN_ERR, "log page 10h reported " 1624c6fd2807SJeff Garzik "inactive tag %d\n", tag); 1625c6fd2807SJeff Garzik return; 1626c6fd2807SJeff Garzik } 1627c6fd2807SJeff Garzik 1628c6fd2807SJeff Garzik /* we've got the perpetrator, condemn it */ 1629c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, tag); 1630c6fd2807SJeff Garzik memcpy(&qc->result_tf, &tf, sizeof(tf)); 1631a6116c9eSMark Lord qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 16325335b729STejun Heo qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ; 1633c6fd2807SJeff Garzik ehc->i.err_mask &= ~AC_ERR_DEV; 1634c6fd2807SJeff Garzik } 1635c6fd2807SJeff Garzik 1636c6fd2807SJeff Garzik /** 1637c6fd2807SJeff Garzik * ata_eh_analyze_tf - analyze taskfile of a failed qc 1638c6fd2807SJeff Garzik * @qc: qc to analyze 1639c6fd2807SJeff Garzik * @tf: Taskfile registers to analyze 1640c6fd2807SJeff Garzik * 1641c6fd2807SJeff Garzik * Analyze taskfile of @qc and further determine cause of 1642c6fd2807SJeff Garzik * failure. This function also requests ATAPI sense data if 1643c6fd2807SJeff Garzik * avaliable. 1644c6fd2807SJeff Garzik * 1645c6fd2807SJeff Garzik * LOCKING: 1646c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1647c6fd2807SJeff Garzik * 1648c6fd2807SJeff Garzik * RETURNS: 1649c6fd2807SJeff Garzik * Determined recovery action 1650c6fd2807SJeff Garzik */ 1651c6fd2807SJeff Garzik static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, 1652c6fd2807SJeff Garzik const struct ata_taskfile *tf) 1653c6fd2807SJeff Garzik { 1654c6fd2807SJeff Garzik unsigned int tmp, action = 0; 1655c6fd2807SJeff Garzik u8 stat = tf->command, err = tf->feature; 1656c6fd2807SJeff Garzik 1657c6fd2807SJeff Garzik if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) { 1658c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_HSM; 1659cf480626STejun Heo return ATA_EH_RESET; 1660c6fd2807SJeff Garzik } 1661c6fd2807SJeff Garzik 1662a51d644aSTejun Heo if (stat & (ATA_ERR | ATA_DF)) 1663a51d644aSTejun Heo qc->err_mask |= AC_ERR_DEV; 1664a51d644aSTejun Heo else 1665c6fd2807SJeff Garzik return 0; 1666c6fd2807SJeff Garzik 1667c6fd2807SJeff Garzik switch (qc->dev->class) { 1668c6fd2807SJeff Garzik case ATA_DEV_ATA: 1669c6fd2807SJeff Garzik if (err & ATA_ICRC) 1670c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_ATA_BUS; 1671c6fd2807SJeff Garzik if (err & ATA_UNC) 1672c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_MEDIA; 1673c6fd2807SJeff Garzik if (err & ATA_IDNF) 1674c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_INVALID; 1675c6fd2807SJeff Garzik break; 1676c6fd2807SJeff Garzik 1677c6fd2807SJeff Garzik case ATA_DEV_ATAPI: 1678a569a30dSTejun Heo if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) { 16793eabddb8STejun Heo tmp = atapi_eh_request_sense(qc->dev, 16803eabddb8STejun Heo qc->scsicmd->sense_buffer, 16813eabddb8STejun Heo qc->result_tf.feature >> 4); 1682c6fd2807SJeff Garzik if (!tmp) { 1683a569a30dSTejun Heo /* ATA_QCFLAG_SENSE_VALID is used to 1684a569a30dSTejun Heo * tell atapi_qc_complete() that sense 1685a569a30dSTejun Heo * data is already valid. 1686c6fd2807SJeff Garzik * 1687c6fd2807SJeff Garzik * TODO: interpret sense data and set 1688c6fd2807SJeff Garzik * appropriate err_mask. 1689c6fd2807SJeff Garzik */ 1690c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_SENSE_VALID; 1691c6fd2807SJeff Garzik } else 1692c6fd2807SJeff Garzik qc->err_mask |= tmp; 1693c6fd2807SJeff Garzik } 1694a569a30dSTejun Heo } 1695c6fd2807SJeff Garzik 1696c6fd2807SJeff Garzik if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS)) 1697cf480626STejun Heo action |= ATA_EH_RESET; 1698c6fd2807SJeff Garzik 1699c6fd2807SJeff Garzik return action; 1700c6fd2807SJeff Garzik } 1701c6fd2807SJeff Garzik 170276326ac1STejun Heo static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask, 170376326ac1STejun Heo int *xfer_ok) 1704c6fd2807SJeff Garzik { 170576326ac1STejun Heo int base = 0; 170676326ac1STejun Heo 170776326ac1STejun Heo if (!(eflags & ATA_EFLAG_DUBIOUS_XFER)) 170876326ac1STejun Heo *xfer_ok = 1; 170976326ac1STejun Heo 171076326ac1STejun Heo if (!*xfer_ok) 171175f9cafcSTejun Heo base = ATA_ECAT_DUBIOUS_NONE; 171276326ac1STejun Heo 17137d47e8d4STejun Heo if (err_mask & AC_ERR_ATA_BUS) 171476326ac1STejun Heo return base + ATA_ECAT_ATA_BUS; 1715c6fd2807SJeff Garzik 17167d47e8d4STejun Heo if (err_mask & AC_ERR_TIMEOUT) 171776326ac1STejun Heo return base + ATA_ECAT_TOUT_HSM; 17187d47e8d4STejun Heo 17193884f7b0STejun Heo if (eflags & ATA_EFLAG_IS_IO) { 17207d47e8d4STejun Heo if (err_mask & AC_ERR_HSM) 172176326ac1STejun Heo return base + ATA_ECAT_TOUT_HSM; 17227d47e8d4STejun Heo if ((err_mask & 17237d47e8d4STejun Heo (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV) 172476326ac1STejun Heo return base + ATA_ECAT_UNK_DEV; 1725c6fd2807SJeff Garzik } 1726c6fd2807SJeff Garzik 1727c6fd2807SJeff Garzik return 0; 1728c6fd2807SJeff Garzik } 1729c6fd2807SJeff Garzik 17307d47e8d4STejun Heo struct speed_down_verdict_arg { 1731c6fd2807SJeff Garzik u64 since; 173276326ac1STejun Heo int xfer_ok; 17333884f7b0STejun Heo int nr_errors[ATA_ECAT_NR]; 1734c6fd2807SJeff Garzik }; 1735c6fd2807SJeff Garzik 17367d47e8d4STejun Heo static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg) 1737c6fd2807SJeff Garzik { 17387d47e8d4STejun Heo struct speed_down_verdict_arg *arg = void_arg; 173976326ac1STejun Heo int cat; 1740c6fd2807SJeff Garzik 1741c6fd2807SJeff Garzik if (ent->timestamp < arg->since) 1742c6fd2807SJeff Garzik return -1; 1743c6fd2807SJeff Garzik 174476326ac1STejun Heo cat = ata_eh_categorize_error(ent->eflags, ent->err_mask, 174576326ac1STejun Heo &arg->xfer_ok); 17467d47e8d4STejun Heo arg->nr_errors[cat]++; 174776326ac1STejun Heo 1748c6fd2807SJeff Garzik return 0; 1749c6fd2807SJeff Garzik } 1750c6fd2807SJeff Garzik 1751c6fd2807SJeff Garzik /** 17527d47e8d4STejun Heo * ata_eh_speed_down_verdict - Determine speed down verdict 1753c6fd2807SJeff Garzik * @dev: Device of interest 1754c6fd2807SJeff Garzik * 1755c6fd2807SJeff Garzik * This function examines error ring of @dev and determines 17567d47e8d4STejun Heo * whether NCQ needs to be turned off, transfer speed should be 17577d47e8d4STejun Heo * stepped down, or falling back to PIO is necessary. 1758c6fd2807SJeff Garzik * 17593884f7b0STejun Heo * ECAT_ATA_BUS : ATA_BUS error for any command 1760c6fd2807SJeff Garzik * 17613884f7b0STejun Heo * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for 17623884f7b0STejun Heo * IO commands 17637d47e8d4STejun Heo * 17643884f7b0STejun Heo * ECAT_UNK_DEV : Unknown DEV error for IO commands 1765c6fd2807SJeff Garzik * 176676326ac1STejun Heo * ECAT_DUBIOUS_* : Identical to above three but occurred while 176776326ac1STejun Heo * data transfer hasn't been verified. 176876326ac1STejun Heo * 17693884f7b0STejun Heo * Verdicts are 17707d47e8d4STejun Heo * 17713884f7b0STejun Heo * NCQ_OFF : Turn off NCQ. 17727d47e8d4STejun Heo * 17733884f7b0STejun Heo * SPEED_DOWN : Speed down transfer speed but don't fall back 17743884f7b0STejun Heo * to PIO. 17753884f7b0STejun Heo * 17763884f7b0STejun Heo * FALLBACK_TO_PIO : Fall back to PIO. 17773884f7b0STejun Heo * 17783884f7b0STejun Heo * Even if multiple verdicts are returned, only one action is 177976326ac1STejun Heo * taken per error. An action triggered by non-DUBIOUS errors 178076326ac1STejun Heo * clears ering, while one triggered by DUBIOUS_* errors doesn't. 178176326ac1STejun Heo * This is to expedite speed down decisions right after device is 178276326ac1STejun Heo * initially configured. 17833884f7b0STejun Heo * 178476326ac1STejun Heo * The followings are speed down rules. #1 and #2 deal with 178576326ac1STejun Heo * DUBIOUS errors. 178676326ac1STejun Heo * 178776326ac1STejun Heo * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors 178876326ac1STejun Heo * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO. 178976326ac1STejun Heo * 179076326ac1STejun Heo * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors 179176326ac1STejun Heo * occurred during last 5 mins, NCQ_OFF. 179276326ac1STejun Heo * 179376326ac1STejun Heo * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors 17943884f7b0STejun Heo * ocurred during last 5 mins, FALLBACK_TO_PIO 17953884f7b0STejun Heo * 179676326ac1STejun Heo * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred 17973884f7b0STejun Heo * during last 10 mins, NCQ_OFF. 17983884f7b0STejun Heo * 179976326ac1STejun Heo * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6 18003884f7b0STejun Heo * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN. 18017d47e8d4STejun Heo * 1802c6fd2807SJeff Garzik * LOCKING: 1803c6fd2807SJeff Garzik * Inherited from caller. 1804c6fd2807SJeff Garzik * 1805c6fd2807SJeff Garzik * RETURNS: 18067d47e8d4STejun Heo * OR of ATA_EH_SPDN_* flags. 1807c6fd2807SJeff Garzik */ 18087d47e8d4STejun Heo static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev) 1809c6fd2807SJeff Garzik { 18107d47e8d4STejun Heo const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ; 18117d47e8d4STejun Heo u64 j64 = get_jiffies_64(); 18127d47e8d4STejun Heo struct speed_down_verdict_arg arg; 18137d47e8d4STejun Heo unsigned int verdict = 0; 1814c6fd2807SJeff Garzik 18153884f7b0STejun Heo /* scan past 5 mins of error history */ 18163884f7b0STejun Heo memset(&arg, 0, sizeof(arg)); 18173884f7b0STejun Heo arg.since = j64 - min(j64, j5mins); 18183884f7b0STejun Heo ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); 18193884f7b0STejun Heo 182076326ac1STejun Heo if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] + 182176326ac1STejun Heo arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1) 182276326ac1STejun Heo verdict |= ATA_EH_SPDN_SPEED_DOWN | 182376326ac1STejun Heo ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS; 182476326ac1STejun Heo 182576326ac1STejun Heo if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] + 182676326ac1STejun Heo arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1) 182776326ac1STejun Heo verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS; 182876326ac1STejun Heo 18293884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_ATA_BUS] + 18303884f7b0STejun Heo arg.nr_errors[ATA_ECAT_TOUT_HSM] + 1831663f99b8STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) 18323884f7b0STejun Heo verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO; 18333884f7b0STejun Heo 18347d47e8d4STejun Heo /* scan past 10 mins of error history */ 1835c6fd2807SJeff Garzik memset(&arg, 0, sizeof(arg)); 18367d47e8d4STejun Heo arg.since = j64 - min(j64, j10mins); 18377d47e8d4STejun Heo ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); 1838c6fd2807SJeff Garzik 18393884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_TOUT_HSM] + 18403884f7b0STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 3) 18417d47e8d4STejun Heo verdict |= ATA_EH_SPDN_NCQ_OFF; 18423884f7b0STejun Heo 18433884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_ATA_BUS] + 18443884f7b0STejun Heo arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 || 1845663f99b8STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) 18467d47e8d4STejun Heo verdict |= ATA_EH_SPDN_SPEED_DOWN; 1847c6fd2807SJeff Garzik 18487d47e8d4STejun Heo return verdict; 1849c6fd2807SJeff Garzik } 1850c6fd2807SJeff Garzik 1851c6fd2807SJeff Garzik /** 1852c6fd2807SJeff Garzik * ata_eh_speed_down - record error and speed down if necessary 1853c6fd2807SJeff Garzik * @dev: Failed device 18543884f7b0STejun Heo * @eflags: mask of ATA_EFLAG_* flags 1855c6fd2807SJeff Garzik * @err_mask: err_mask of the error 1856c6fd2807SJeff Garzik * 1857c6fd2807SJeff Garzik * Record error and examine error history to determine whether 1858c6fd2807SJeff Garzik * adjusting transmission speed is necessary. It also sets 1859c6fd2807SJeff Garzik * transmission limits appropriately if such adjustment is 1860c6fd2807SJeff Garzik * necessary. 1861c6fd2807SJeff Garzik * 1862c6fd2807SJeff Garzik * LOCKING: 1863c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1864c6fd2807SJeff Garzik * 1865c6fd2807SJeff Garzik * RETURNS: 18667d47e8d4STejun Heo * Determined recovery action. 1867c6fd2807SJeff Garzik */ 18683884f7b0STejun Heo static unsigned int ata_eh_speed_down(struct ata_device *dev, 18693884f7b0STejun Heo unsigned int eflags, unsigned int err_mask) 1870c6fd2807SJeff Garzik { 1871b1c72916STejun Heo struct ata_link *link = ata_dev_phys_link(dev); 187276326ac1STejun Heo int xfer_ok = 0; 18737d47e8d4STejun Heo unsigned int verdict; 18747d47e8d4STejun Heo unsigned int action = 0; 18757d47e8d4STejun Heo 18767d47e8d4STejun Heo /* don't bother if Cat-0 error */ 187776326ac1STejun Heo if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0) 1878c6fd2807SJeff Garzik return 0; 1879c6fd2807SJeff Garzik 1880c6fd2807SJeff Garzik /* record error and determine whether speed down is necessary */ 18813884f7b0STejun Heo ata_ering_record(&dev->ering, eflags, err_mask); 18827d47e8d4STejun Heo verdict = ata_eh_speed_down_verdict(dev); 1883c6fd2807SJeff Garzik 18847d47e8d4STejun Heo /* turn off NCQ? */ 18857d47e8d4STejun Heo if ((verdict & ATA_EH_SPDN_NCQ_OFF) && 18867d47e8d4STejun Heo (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ | 18877d47e8d4STejun Heo ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) { 18887d47e8d4STejun Heo dev->flags |= ATA_DFLAG_NCQ_OFF; 18897d47e8d4STejun Heo ata_dev_printk(dev, KERN_WARNING, 18907d47e8d4STejun Heo "NCQ disabled due to excessive errors\n"); 18917d47e8d4STejun Heo goto done; 18927d47e8d4STejun Heo } 1893c6fd2807SJeff Garzik 18947d47e8d4STejun Heo /* speed down? */ 18957d47e8d4STejun Heo if (verdict & ATA_EH_SPDN_SPEED_DOWN) { 1896c6fd2807SJeff Garzik /* speed down SATA link speed if possible */ 1897a07d499bSTejun Heo if (sata_down_spd_limit(link, 0) == 0) { 1898cf480626STejun Heo action |= ATA_EH_RESET; 18997d47e8d4STejun Heo goto done; 19007d47e8d4STejun Heo } 1901c6fd2807SJeff Garzik 1902c6fd2807SJeff Garzik /* lower transfer mode */ 19037d47e8d4STejun Heo if (dev->spdn_cnt < 2) { 19047d47e8d4STejun Heo static const int dma_dnxfer_sel[] = 19057d47e8d4STejun Heo { ATA_DNXFER_DMA, ATA_DNXFER_40C }; 19067d47e8d4STejun Heo static const int pio_dnxfer_sel[] = 19077d47e8d4STejun Heo { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 }; 19087d47e8d4STejun Heo int sel; 1909c6fd2807SJeff Garzik 19107d47e8d4STejun Heo if (dev->xfer_shift != ATA_SHIFT_PIO) 19117d47e8d4STejun Heo sel = dma_dnxfer_sel[dev->spdn_cnt]; 19127d47e8d4STejun Heo else 19137d47e8d4STejun Heo sel = pio_dnxfer_sel[dev->spdn_cnt]; 19147d47e8d4STejun Heo 19157d47e8d4STejun Heo dev->spdn_cnt++; 19167d47e8d4STejun Heo 19177d47e8d4STejun Heo if (ata_down_xfermask_limit(dev, sel) == 0) { 1918cf480626STejun Heo action |= ATA_EH_RESET; 19197d47e8d4STejun Heo goto done; 19207d47e8d4STejun Heo } 19217d47e8d4STejun Heo } 19227d47e8d4STejun Heo } 19237d47e8d4STejun Heo 19247d47e8d4STejun Heo /* Fall back to PIO? Slowing down to PIO is meaningless for 1925663f99b8STejun Heo * SATA ATA devices. Consider it only for PATA and SATAPI. 19267d47e8d4STejun Heo */ 19277d47e8d4STejun Heo if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) && 1928663f99b8STejun Heo (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) && 19297d47e8d4STejun Heo (dev->xfer_shift != ATA_SHIFT_PIO)) { 19307d47e8d4STejun Heo if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) { 19317d47e8d4STejun Heo dev->spdn_cnt = 0; 1932cf480626STejun Heo action |= ATA_EH_RESET; 19337d47e8d4STejun Heo goto done; 19347d47e8d4STejun Heo } 19357d47e8d4STejun Heo } 19367d47e8d4STejun Heo 1937c6fd2807SJeff Garzik return 0; 19387d47e8d4STejun Heo done: 19397d47e8d4STejun Heo /* device has been slowed down, blow error history */ 194076326ac1STejun Heo if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS)) 19417d47e8d4STejun Heo ata_ering_clear(&dev->ering); 19427d47e8d4STejun Heo return action; 1943c6fd2807SJeff Garzik } 1944c6fd2807SJeff Garzik 1945c6fd2807SJeff Garzik /** 19469b1e2658STejun Heo * ata_eh_link_autopsy - analyze error and determine recovery action 19479b1e2658STejun Heo * @link: host link to perform autopsy on 1948c6fd2807SJeff Garzik * 19490260731fSTejun Heo * Analyze why @link failed and determine which recovery actions 19500260731fSTejun Heo * are needed. This function also sets more detailed AC_ERR_* 19510260731fSTejun Heo * values and fills sense data for ATAPI CHECK SENSE. 1952c6fd2807SJeff Garzik * 1953c6fd2807SJeff Garzik * LOCKING: 1954c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1955c6fd2807SJeff Garzik */ 19569b1e2658STejun Heo static void ata_eh_link_autopsy(struct ata_link *link) 1957c6fd2807SJeff Garzik { 19580260731fSTejun Heo struct ata_port *ap = link->ap; 1959936fd732STejun Heo struct ata_eh_context *ehc = &link->eh_context; 1960dfcc173dSTejun Heo struct ata_device *dev; 19613884f7b0STejun Heo unsigned int all_err_mask = 0, eflags = 0; 19623884f7b0STejun Heo int tag; 1963c6fd2807SJeff Garzik u32 serror; 1964c6fd2807SJeff Garzik int rc; 1965c6fd2807SJeff Garzik 1966c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 1967c6fd2807SJeff Garzik 1968c6fd2807SJeff Garzik if (ehc->i.flags & ATA_EHI_NO_AUTOPSY) 1969c6fd2807SJeff Garzik return; 1970c6fd2807SJeff Garzik 1971c6fd2807SJeff Garzik /* obtain and analyze SError */ 1972936fd732STejun Heo rc = sata_scr_read(link, SCR_ERROR, &serror); 1973c6fd2807SJeff Garzik if (rc == 0) { 1974c6fd2807SJeff Garzik ehc->i.serror |= serror; 19750260731fSTejun Heo ata_eh_analyze_serror(link); 19764e57c517STejun Heo } else if (rc != -EOPNOTSUPP) { 1977cf480626STejun Heo /* SError read failed, force reset and probing */ 1978b558edddSTejun Heo ehc->i.probe_mask |= ATA_ALL_DEVICES; 1979cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 19804e57c517STejun Heo ehc->i.err_mask |= AC_ERR_OTHER; 19814e57c517STejun Heo } 1982c6fd2807SJeff Garzik 1983c6fd2807SJeff Garzik /* analyze NCQ failure */ 19840260731fSTejun Heo ata_eh_analyze_ncq_error(link); 1985c6fd2807SJeff Garzik 1986c6fd2807SJeff Garzik /* any real error trumps AC_ERR_OTHER */ 1987c6fd2807SJeff Garzik if (ehc->i.err_mask & ~AC_ERR_OTHER) 1988c6fd2807SJeff Garzik ehc->i.err_mask &= ~AC_ERR_OTHER; 1989c6fd2807SJeff Garzik 1990c6fd2807SJeff Garzik all_err_mask |= ehc->i.err_mask; 1991c6fd2807SJeff Garzik 1992c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1993c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 1994c6fd2807SJeff Garzik 1995b1c72916STejun Heo if (!(qc->flags & ATA_QCFLAG_FAILED) || 1996b1c72916STejun Heo ata_dev_phys_link(qc->dev) != link) 1997c6fd2807SJeff Garzik continue; 1998c6fd2807SJeff Garzik 1999c6fd2807SJeff Garzik /* inherit upper level err_mask */ 2000c6fd2807SJeff Garzik qc->err_mask |= ehc->i.err_mask; 2001c6fd2807SJeff Garzik 2002c6fd2807SJeff Garzik /* analyze TF */ 2003c6fd2807SJeff Garzik ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf); 2004c6fd2807SJeff Garzik 2005c6fd2807SJeff Garzik /* DEV errors are probably spurious in case of ATA_BUS error */ 2006c6fd2807SJeff Garzik if (qc->err_mask & AC_ERR_ATA_BUS) 2007c6fd2807SJeff Garzik qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA | 2008c6fd2807SJeff Garzik AC_ERR_INVALID); 2009c6fd2807SJeff Garzik 2010c6fd2807SJeff Garzik /* any real error trumps unknown error */ 2011c6fd2807SJeff Garzik if (qc->err_mask & ~AC_ERR_OTHER) 2012c6fd2807SJeff Garzik qc->err_mask &= ~AC_ERR_OTHER; 2013c6fd2807SJeff Garzik 2014c6fd2807SJeff Garzik /* SENSE_VALID trumps dev/unknown error and revalidation */ 2015f90f0828STejun Heo if (qc->flags & ATA_QCFLAG_SENSE_VALID) 2016c6fd2807SJeff Garzik qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); 2017c6fd2807SJeff Garzik 201803faab78STejun Heo /* determine whether the command is worth retrying */ 201903faab78STejun Heo if (!(qc->err_mask & AC_ERR_INVALID) && 202003faab78STejun Heo ((qc->flags & ATA_QCFLAG_IO) || qc->err_mask != AC_ERR_DEV)) 202103faab78STejun Heo qc->flags |= ATA_QCFLAG_RETRY; 202203faab78STejun Heo 2023c6fd2807SJeff Garzik /* accumulate error info */ 2024c6fd2807SJeff Garzik ehc->i.dev = qc->dev; 2025c6fd2807SJeff Garzik all_err_mask |= qc->err_mask; 2026c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_IO) 20273884f7b0STejun Heo eflags |= ATA_EFLAG_IS_IO; 2028c6fd2807SJeff Garzik } 2029c6fd2807SJeff Garzik 2030c6fd2807SJeff Garzik /* enforce default EH actions */ 2031c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN || 2032c6fd2807SJeff Garzik all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT)) 2033cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 20343884f7b0STejun Heo else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) || 20353884f7b0STejun Heo (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV))) 2036c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_REVALIDATE; 2037c6fd2807SJeff Garzik 2038dfcc173dSTejun Heo /* If we have offending qcs and the associated failed device, 2039dfcc173dSTejun Heo * perform per-dev EH action only on the offending device. 2040dfcc173dSTejun Heo */ 2041c6fd2807SJeff Garzik if (ehc->i.dev) { 2042c6fd2807SJeff Garzik ehc->i.dev_action[ehc->i.dev->devno] |= 2043c6fd2807SJeff Garzik ehc->i.action & ATA_EH_PERDEV_MASK; 2044c6fd2807SJeff Garzik ehc->i.action &= ~ATA_EH_PERDEV_MASK; 2045c6fd2807SJeff Garzik } 2046c6fd2807SJeff Garzik 20472695e366STejun Heo /* propagate timeout to host link */ 20482695e366STejun Heo if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link)) 20492695e366STejun Heo ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT; 20502695e366STejun Heo 20512695e366STejun Heo /* record error and consider speeding down */ 2052dfcc173dSTejun Heo dev = ehc->i.dev; 20532695e366STejun Heo if (!dev && ((ata_link_max_devices(link) == 1 && 20542695e366STejun Heo ata_dev_enabled(link->device)))) 2055dfcc173dSTejun Heo dev = link->device; 2056dfcc173dSTejun Heo 205776326ac1STejun Heo if (dev) { 205876326ac1STejun Heo if (dev->flags & ATA_DFLAG_DUBIOUS_XFER) 205976326ac1STejun Heo eflags |= ATA_EFLAG_DUBIOUS_XFER; 20603884f7b0STejun Heo ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask); 206176326ac1STejun Heo } 2062dfcc173dSTejun Heo 2063c6fd2807SJeff Garzik DPRINTK("EXIT\n"); 2064c6fd2807SJeff Garzik } 2065c6fd2807SJeff Garzik 2066c6fd2807SJeff Garzik /** 20679b1e2658STejun Heo * ata_eh_autopsy - analyze error and determine recovery action 20689b1e2658STejun Heo * @ap: host port to perform autopsy on 20699b1e2658STejun Heo * 20709b1e2658STejun Heo * Analyze all links of @ap and determine why they failed and 20719b1e2658STejun Heo * which recovery actions are needed. 20729b1e2658STejun Heo * 20739b1e2658STejun Heo * LOCKING: 20749b1e2658STejun Heo * Kernel thread context (may sleep). 20759b1e2658STejun Heo */ 2076fb7fd614STejun Heo void ata_eh_autopsy(struct ata_port *ap) 20779b1e2658STejun Heo { 20789b1e2658STejun Heo struct ata_link *link; 20799b1e2658STejun Heo 20801eca4365STejun Heo ata_for_each_link(link, ap, EDGE) 20819b1e2658STejun Heo ata_eh_link_autopsy(link); 20822695e366STejun Heo 2083b1c72916STejun Heo /* Handle the frigging slave link. Autopsy is done similarly 2084b1c72916STejun Heo * but actions and flags are transferred over to the master 2085b1c72916STejun Heo * link and handled from there. 2086b1c72916STejun Heo */ 2087b1c72916STejun Heo if (ap->slave_link) { 2088b1c72916STejun Heo struct ata_eh_context *mehc = &ap->link.eh_context; 2089b1c72916STejun Heo struct ata_eh_context *sehc = &ap->slave_link->eh_context; 2090b1c72916STejun Heo 2091848e4c68STejun Heo /* transfer control flags from master to slave */ 2092848e4c68STejun Heo sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK; 2093848e4c68STejun Heo 2094848e4c68STejun Heo /* perform autopsy on the slave link */ 2095b1c72916STejun Heo ata_eh_link_autopsy(ap->slave_link); 2096b1c72916STejun Heo 2097848e4c68STejun Heo /* transfer actions from slave to master and clear slave */ 2098b1c72916STejun Heo ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); 2099b1c72916STejun Heo mehc->i.action |= sehc->i.action; 2100b1c72916STejun Heo mehc->i.dev_action[1] |= sehc->i.dev_action[1]; 2101b1c72916STejun Heo mehc->i.flags |= sehc->i.flags; 2102b1c72916STejun Heo ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); 2103b1c72916STejun Heo } 2104b1c72916STejun Heo 21052695e366STejun Heo /* Autopsy of fanout ports can affect host link autopsy. 21062695e366STejun Heo * Perform host link autopsy last. 21072695e366STejun Heo */ 2108071f44b1STejun Heo if (sata_pmp_attached(ap)) 21092695e366STejun Heo ata_eh_link_autopsy(&ap->link); 21109b1e2658STejun Heo } 21119b1e2658STejun Heo 21129b1e2658STejun Heo /** 21139b1e2658STejun Heo * ata_eh_link_report - report error handling to user 21140260731fSTejun Heo * @link: ATA link EH is going on 2115c6fd2807SJeff Garzik * 2116c6fd2807SJeff Garzik * Report EH to user. 2117c6fd2807SJeff Garzik * 2118c6fd2807SJeff Garzik * LOCKING: 2119c6fd2807SJeff Garzik * None. 2120c6fd2807SJeff Garzik */ 21219b1e2658STejun Heo static void ata_eh_link_report(struct ata_link *link) 2122c6fd2807SJeff Garzik { 21230260731fSTejun Heo struct ata_port *ap = link->ap; 21240260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 2125c6fd2807SJeff Garzik const char *frozen, *desc; 2126a1e10f7eSTejun Heo char tries_buf[6]; 2127c6fd2807SJeff Garzik int tag, nr_failed = 0; 2128c6fd2807SJeff Garzik 212994ff3d54STejun Heo if (ehc->i.flags & ATA_EHI_QUIET) 213094ff3d54STejun Heo return; 213194ff3d54STejun Heo 2132c6fd2807SJeff Garzik desc = NULL; 2133c6fd2807SJeff Garzik if (ehc->i.desc[0] != '\0') 2134c6fd2807SJeff Garzik desc = ehc->i.desc; 2135c6fd2807SJeff Garzik 2136c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2137c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 2138c6fd2807SJeff Garzik 2139b1c72916STejun Heo if (!(qc->flags & ATA_QCFLAG_FAILED) || 2140b1c72916STejun Heo ata_dev_phys_link(qc->dev) != link || 2141e027bd36STejun Heo ((qc->flags & ATA_QCFLAG_QUIET) && 2142e027bd36STejun Heo qc->err_mask == AC_ERR_DEV)) 2143c6fd2807SJeff Garzik continue; 2144c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask) 2145c6fd2807SJeff Garzik continue; 2146c6fd2807SJeff Garzik 2147c6fd2807SJeff Garzik nr_failed++; 2148c6fd2807SJeff Garzik } 2149c6fd2807SJeff Garzik 2150c6fd2807SJeff Garzik if (!nr_failed && !ehc->i.err_mask) 2151c6fd2807SJeff Garzik return; 2152c6fd2807SJeff Garzik 2153c6fd2807SJeff Garzik frozen = ""; 2154c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN) 2155c6fd2807SJeff Garzik frozen = " frozen"; 2156c6fd2807SJeff Garzik 2157a1e10f7eSTejun Heo memset(tries_buf, 0, sizeof(tries_buf)); 2158a1e10f7eSTejun Heo if (ap->eh_tries < ATA_EH_MAX_TRIES) 2159a1e10f7eSTejun Heo snprintf(tries_buf, sizeof(tries_buf) - 1, " t%d", 2160a1e10f7eSTejun Heo ap->eh_tries); 2161a1e10f7eSTejun Heo 2162c6fd2807SJeff Garzik if (ehc->i.dev) { 2163c6fd2807SJeff Garzik ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x " 2164a1e10f7eSTejun Heo "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", 2165a1e10f7eSTejun Heo ehc->i.err_mask, link->sactive, ehc->i.serror, 2166a1e10f7eSTejun Heo ehc->i.action, frozen, tries_buf); 2167c6fd2807SJeff Garzik if (desc) 2168b64bbc39STejun Heo ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc); 2169c6fd2807SJeff Garzik } else { 21700260731fSTejun Heo ata_link_printk(link, KERN_ERR, "exception Emask 0x%x " 2171a1e10f7eSTejun Heo "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", 2172a1e10f7eSTejun Heo ehc->i.err_mask, link->sactive, ehc->i.serror, 2173a1e10f7eSTejun Heo ehc->i.action, frozen, tries_buf); 2174c6fd2807SJeff Garzik if (desc) 21750260731fSTejun Heo ata_link_printk(link, KERN_ERR, "%s\n", desc); 2176c6fd2807SJeff Garzik } 2177c6fd2807SJeff Garzik 21781333e194SRobert Hancock if (ehc->i.serror) 2179da0e21d3STejun Heo ata_link_printk(link, KERN_ERR, 21801333e194SRobert Hancock "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n", 21811333e194SRobert Hancock ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "", 21821333e194SRobert Hancock ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "", 21831333e194SRobert Hancock ehc->i.serror & SERR_DATA ? "UnrecovData " : "", 21841333e194SRobert Hancock ehc->i.serror & SERR_PERSISTENT ? "Persist " : "", 21851333e194SRobert Hancock ehc->i.serror & SERR_PROTOCOL ? "Proto " : "", 21861333e194SRobert Hancock ehc->i.serror & SERR_INTERNAL ? "HostInt " : "", 21871333e194SRobert Hancock ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "", 21881333e194SRobert Hancock ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "", 21891333e194SRobert Hancock ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "", 21901333e194SRobert Hancock ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "", 21911333e194SRobert Hancock ehc->i.serror & SERR_DISPARITY ? "Dispar " : "", 21921333e194SRobert Hancock ehc->i.serror & SERR_CRC ? "BadCRC " : "", 21931333e194SRobert Hancock ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "", 21941333e194SRobert Hancock ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "", 21951333e194SRobert Hancock ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "", 21961333e194SRobert Hancock ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "", 21971333e194SRobert Hancock ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : ""); 21981333e194SRobert Hancock 2199c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2200c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 22018a937581STejun Heo struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; 2202abb6a889STejun Heo const u8 *cdb = qc->cdb; 2203abb6a889STejun Heo char data_buf[20] = ""; 2204abb6a889STejun Heo char cdb_buf[70] = ""; 2205c6fd2807SJeff Garzik 22060260731fSTejun Heo if (!(qc->flags & ATA_QCFLAG_FAILED) || 2207b1c72916STejun Heo ata_dev_phys_link(qc->dev) != link || !qc->err_mask) 2208c6fd2807SJeff Garzik continue; 2209c6fd2807SJeff Garzik 2210abb6a889STejun Heo if (qc->dma_dir != DMA_NONE) { 2211abb6a889STejun Heo static const char *dma_str[] = { 2212abb6a889STejun Heo [DMA_BIDIRECTIONAL] = "bidi", 2213abb6a889STejun Heo [DMA_TO_DEVICE] = "out", 2214abb6a889STejun Heo [DMA_FROM_DEVICE] = "in", 2215abb6a889STejun Heo }; 2216abb6a889STejun Heo static const char *prot_str[] = { 2217abb6a889STejun Heo [ATA_PROT_PIO] = "pio", 2218abb6a889STejun Heo [ATA_PROT_DMA] = "dma", 2219abb6a889STejun Heo [ATA_PROT_NCQ] = "ncq", 22200dc36888STejun Heo [ATAPI_PROT_PIO] = "pio", 22210dc36888STejun Heo [ATAPI_PROT_DMA] = "dma", 2222abb6a889STejun Heo }; 2223abb6a889STejun Heo 2224abb6a889STejun Heo snprintf(data_buf, sizeof(data_buf), " %s %u %s", 2225abb6a889STejun Heo prot_str[qc->tf.protocol], qc->nbytes, 2226abb6a889STejun Heo dma_str[qc->dma_dir]); 2227abb6a889STejun Heo } 2228abb6a889STejun Heo 2229e39eec13SJeff Garzik if (ata_is_atapi(qc->tf.protocol)) 2230abb6a889STejun Heo snprintf(cdb_buf, sizeof(cdb_buf), 2231abb6a889STejun Heo "cdb %02x %02x %02x %02x %02x %02x %02x %02x " 2232abb6a889STejun Heo "%02x %02x %02x %02x %02x %02x %02x %02x\n ", 2233abb6a889STejun Heo cdb[0], cdb[1], cdb[2], cdb[3], 2234abb6a889STejun Heo cdb[4], cdb[5], cdb[6], cdb[7], 2235abb6a889STejun Heo cdb[8], cdb[9], cdb[10], cdb[11], 2236abb6a889STejun Heo cdb[12], cdb[13], cdb[14], cdb[15]); 2237abb6a889STejun Heo 22388a937581STejun Heo ata_dev_printk(qc->dev, KERN_ERR, 22398a937581STejun Heo "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " 2240abb6a889STejun Heo "tag %d%s\n %s" 22418a937581STejun Heo "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " 22425335b729STejun Heo "Emask 0x%x (%s)%s\n", 22438a937581STejun Heo cmd->command, cmd->feature, cmd->nsect, 22448a937581STejun Heo cmd->lbal, cmd->lbam, cmd->lbah, 22458a937581STejun Heo cmd->hob_feature, cmd->hob_nsect, 22468a937581STejun Heo cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah, 2247abb6a889STejun Heo cmd->device, qc->tag, data_buf, cdb_buf, 22488a937581STejun Heo res->command, res->feature, res->nsect, 22498a937581STejun Heo res->lbal, res->lbam, res->lbah, 22508a937581STejun Heo res->hob_feature, res->hob_nsect, 22518a937581STejun Heo res->hob_lbal, res->hob_lbam, res->hob_lbah, 22525335b729STejun Heo res->device, qc->err_mask, ata_err_string(qc->err_mask), 22535335b729STejun Heo qc->err_mask & AC_ERR_NCQ ? " <F>" : ""); 22541333e194SRobert Hancock 22551333e194SRobert Hancock if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | 22561333e194SRobert Hancock ATA_ERR)) { 22571333e194SRobert Hancock if (res->command & ATA_BUSY) 22581333e194SRobert Hancock ata_dev_printk(qc->dev, KERN_ERR, 22591333e194SRobert Hancock "status: { Busy }\n"); 22601333e194SRobert Hancock else 22611333e194SRobert Hancock ata_dev_printk(qc->dev, KERN_ERR, 22621333e194SRobert Hancock "status: { %s%s%s%s}\n", 22631333e194SRobert Hancock res->command & ATA_DRDY ? "DRDY " : "", 22641333e194SRobert Hancock res->command & ATA_DF ? "DF " : "", 22651333e194SRobert Hancock res->command & ATA_DRQ ? "DRQ " : "", 22661333e194SRobert Hancock res->command & ATA_ERR ? "ERR " : ""); 22671333e194SRobert Hancock } 22681333e194SRobert Hancock 22691333e194SRobert Hancock if (cmd->command != ATA_CMD_PACKET && 22701333e194SRobert Hancock (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF | 22711333e194SRobert Hancock ATA_ABORTED))) 22721333e194SRobert Hancock ata_dev_printk(qc->dev, KERN_ERR, 22731333e194SRobert Hancock "error: { %s%s%s%s}\n", 22741333e194SRobert Hancock res->feature & ATA_ICRC ? "ICRC " : "", 22751333e194SRobert Hancock res->feature & ATA_UNC ? "UNC " : "", 22761333e194SRobert Hancock res->feature & ATA_IDNF ? "IDNF " : "", 22771333e194SRobert Hancock res->feature & ATA_ABORTED ? "ABRT " : ""); 2278c6fd2807SJeff Garzik } 2279c6fd2807SJeff Garzik } 2280c6fd2807SJeff Garzik 22819b1e2658STejun Heo /** 22829b1e2658STejun Heo * ata_eh_report - report error handling to user 22839b1e2658STejun Heo * @ap: ATA port to report EH about 22849b1e2658STejun Heo * 22859b1e2658STejun Heo * Report EH to user. 22869b1e2658STejun Heo * 22879b1e2658STejun Heo * LOCKING: 22889b1e2658STejun Heo * None. 22899b1e2658STejun Heo */ 2290fb7fd614STejun Heo void ata_eh_report(struct ata_port *ap) 22919b1e2658STejun Heo { 22929b1e2658STejun Heo struct ata_link *link; 22939b1e2658STejun Heo 22941eca4365STejun Heo ata_for_each_link(link, ap, HOST_FIRST) 22959b1e2658STejun Heo ata_eh_link_report(link); 22969b1e2658STejun Heo } 22979b1e2658STejun Heo 2298cc0680a5STejun Heo static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset, 2299b1c72916STejun Heo unsigned int *classes, unsigned long deadline, 2300b1c72916STejun Heo bool clear_classes) 2301c6fd2807SJeff Garzik { 2302f58229f8STejun Heo struct ata_device *dev; 2303c6fd2807SJeff Garzik 2304b1c72916STejun Heo if (clear_classes) 23051eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 2306f58229f8STejun Heo classes[dev->devno] = ATA_DEV_UNKNOWN; 2307c6fd2807SJeff Garzik 2308f046519fSTejun Heo return reset(link, classes, deadline); 2309c6fd2807SJeff Garzik } 2310c6fd2807SJeff Garzik 2311ae791c05STejun Heo static int ata_eh_followup_srst_needed(struct ata_link *link, 23125dbfc9cbSTejun Heo int rc, const unsigned int *classes) 2313c6fd2807SJeff Garzik { 231445db2f6cSTejun Heo if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link)) 2315ae791c05STejun Heo return 0; 23165dbfc9cbSTejun Heo if (rc == -EAGAIN) 2317c6fd2807SJeff Garzik return 1; 2318071f44b1STejun Heo if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) 23193495de73STejun Heo return 1; 2320c6fd2807SJeff Garzik return 0; 2321c6fd2807SJeff Garzik } 2322c6fd2807SJeff Garzik 2323fb7fd614STejun Heo int ata_eh_reset(struct ata_link *link, int classify, 2324c6fd2807SJeff Garzik ata_prereset_fn_t prereset, ata_reset_fn_t softreset, 2325c6fd2807SJeff Garzik ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) 2326c6fd2807SJeff Garzik { 2327afaa5c37STejun Heo struct ata_port *ap = link->ap; 2328b1c72916STejun Heo struct ata_link *slave = ap->slave_link; 2329936fd732STejun Heo struct ata_eh_context *ehc = &link->eh_context; 2330b1c72916STejun Heo struct ata_eh_context *sehc = &slave->eh_context; 2331c6fd2807SJeff Garzik unsigned int *classes = ehc->classes; 2332416dc9edSTejun Heo unsigned int lflags = link->flags; 2333c6fd2807SJeff Garzik int verbose = !(ehc->i.flags & ATA_EHI_QUIET); 2334d8af0eb6STejun Heo int max_tries = 0, try = 0; 2335b1c72916STejun Heo struct ata_link *failed_link; 2336f58229f8STejun Heo struct ata_device *dev; 2337416dc9edSTejun Heo unsigned long deadline, now; 2338c6fd2807SJeff Garzik ata_reset_fn_t reset; 2339afaa5c37STejun Heo unsigned long flags; 2340416dc9edSTejun Heo u32 sstatus; 2341b1c72916STejun Heo int nr_unknown, rc; 2342c6fd2807SJeff Garzik 2343932648b0STejun Heo /* 2344932648b0STejun Heo * Prepare to reset 2345932648b0STejun Heo */ 2346d8af0eb6STejun Heo while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX) 2347d8af0eb6STejun Heo max_tries++; 234805944bdfSTejun Heo if (link->flags & ATA_LFLAG_NO_HRST) 234905944bdfSTejun Heo hardreset = NULL; 235005944bdfSTejun Heo if (link->flags & ATA_LFLAG_NO_SRST) 235105944bdfSTejun Heo softreset = NULL; 2352d8af0eb6STejun Heo 235319b72321STejun Heo /* make sure each reset attemp is at least COOL_DOWN apart */ 235419b72321STejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) { 23550a2c0f56STejun Heo now = jiffies; 235619b72321STejun Heo WARN_ON(time_after(ehc->last_reset, now)); 235719b72321STejun Heo deadline = ata_deadline(ehc->last_reset, 235819b72321STejun Heo ATA_EH_RESET_COOL_DOWN); 23590a2c0f56STejun Heo if (time_before(now, deadline)) 23600a2c0f56STejun Heo schedule_timeout_uninterruptible(deadline - now); 236119b72321STejun Heo } 23620a2c0f56STejun Heo 2363afaa5c37STejun Heo spin_lock_irqsave(ap->lock, flags); 2364afaa5c37STejun Heo ap->pflags |= ATA_PFLAG_RESETTING; 2365afaa5c37STejun Heo spin_unlock_irqrestore(ap->lock, flags); 2366afaa5c37STejun Heo 2367cf480626STejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2368c6fd2807SJeff Garzik 23691eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 2370cdeab114STejun Heo /* If we issue an SRST then an ATA drive (not ATAPI) 2371cdeab114STejun Heo * may change configuration and be in PIO0 timing. If 2372cdeab114STejun Heo * we do a hard reset (or are coming from power on) 2373cdeab114STejun Heo * this is true for ATA or ATAPI. Until we've set a 2374cdeab114STejun Heo * suitable controller mode we should not touch the 2375cdeab114STejun Heo * bus as we may be talking too fast. 2376cdeab114STejun Heo */ 2377cdeab114STejun Heo dev->pio_mode = XFER_PIO_0; 2378cdeab114STejun Heo 2379cdeab114STejun Heo /* If the controller has a pio mode setup function 2380cdeab114STejun Heo * then use it to set the chipset to rights. Don't 2381cdeab114STejun Heo * touch the DMA setup as that will be dealt with when 2382cdeab114STejun Heo * configuring devices. 2383cdeab114STejun Heo */ 2384cdeab114STejun Heo if (ap->ops->set_piomode) 2385cdeab114STejun Heo ap->ops->set_piomode(ap, dev); 2386cdeab114STejun Heo } 2387cdeab114STejun Heo 2388cf480626STejun Heo /* prefer hardreset */ 2389932648b0STejun Heo reset = NULL; 2390cf480626STejun Heo ehc->i.action &= ~ATA_EH_RESET; 2391cf480626STejun Heo if (hardreset) { 2392cf480626STejun Heo reset = hardreset; 2393a674050eSTejun Heo ehc->i.action |= ATA_EH_HARDRESET; 23944f7faa3fSTejun Heo } else if (softreset) { 2395cf480626STejun Heo reset = softreset; 2396a674050eSTejun Heo ehc->i.action |= ATA_EH_SOFTRESET; 2397cf480626STejun Heo } 2398c6fd2807SJeff Garzik 2399c6fd2807SJeff Garzik if (prereset) { 2400b1c72916STejun Heo unsigned long deadline = ata_deadline(jiffies, 2401b1c72916STejun Heo ATA_EH_PRERESET_TIMEOUT); 2402b1c72916STejun Heo 2403b1c72916STejun Heo if (slave) { 2404b1c72916STejun Heo sehc->i.action &= ~ATA_EH_RESET; 2405b1c72916STejun Heo sehc->i.action |= ehc->i.action; 2406b1c72916STejun Heo } 2407b1c72916STejun Heo 2408b1c72916STejun Heo rc = prereset(link, deadline); 2409b1c72916STejun Heo 2410b1c72916STejun Heo /* If present, do prereset on slave link too. Reset 2411b1c72916STejun Heo * is skipped iff both master and slave links report 2412b1c72916STejun Heo * -ENOENT or clear ATA_EH_RESET. 2413b1c72916STejun Heo */ 2414b1c72916STejun Heo if (slave && (rc == 0 || rc == -ENOENT)) { 2415b1c72916STejun Heo int tmp; 2416b1c72916STejun Heo 2417b1c72916STejun Heo tmp = prereset(slave, deadline); 2418b1c72916STejun Heo if (tmp != -ENOENT) 2419b1c72916STejun Heo rc = tmp; 2420b1c72916STejun Heo 2421b1c72916STejun Heo ehc->i.action |= sehc->i.action; 2422b1c72916STejun Heo } 2423b1c72916STejun Heo 2424c6fd2807SJeff Garzik if (rc) { 2425c961922bSAlan Cox if (rc == -ENOENT) { 2426cc0680a5STejun Heo ata_link_printk(link, KERN_DEBUG, 24274aa9ab67STejun Heo "port disabled. ignoring.\n"); 2428cf480626STejun Heo ehc->i.action &= ~ATA_EH_RESET; 24294aa9ab67STejun Heo 24301eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 2431f58229f8STejun Heo classes[dev->devno] = ATA_DEV_NONE; 24324aa9ab67STejun Heo 24334aa9ab67STejun Heo rc = 0; 2434c961922bSAlan Cox } else 2435cc0680a5STejun Heo ata_link_printk(link, KERN_ERR, 2436c6fd2807SJeff Garzik "prereset failed (errno=%d)\n", rc); 2437fccb6ea5STejun Heo goto out; 2438c6fd2807SJeff Garzik } 2439c6fd2807SJeff Garzik 2440932648b0STejun Heo /* prereset() might have cleared ATA_EH_RESET. If so, 2441d6515e6fSTejun Heo * bang classes, thaw and return. 2442932648b0STejun Heo */ 2443932648b0STejun Heo if (reset && !(ehc->i.action & ATA_EH_RESET)) { 24441eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 2445f58229f8STejun Heo classes[dev->devno] = ATA_DEV_NONE; 2446d6515e6fSTejun Heo if ((ap->pflags & ATA_PFLAG_FROZEN) && 2447d6515e6fSTejun Heo ata_is_host_link(link)) 2448d6515e6fSTejun Heo ata_eh_thaw_port(ap); 2449fccb6ea5STejun Heo rc = 0; 2450fccb6ea5STejun Heo goto out; 2451c6fd2807SJeff Garzik } 2452932648b0STejun Heo } 2453c6fd2807SJeff Garzik 2454c6fd2807SJeff Garzik retry: 2455932648b0STejun Heo /* 2456932648b0STejun Heo * Perform reset 2457932648b0STejun Heo */ 2458dc98c32cSTejun Heo if (ata_is_host_link(link)) 2459dc98c32cSTejun Heo ata_eh_freeze_port(ap); 2460dc98c32cSTejun Heo 2461341c2c95STejun Heo deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]); 246231daabdaSTejun Heo 2463932648b0STejun Heo if (reset) { 2464c6fd2807SJeff Garzik if (verbose) 2465cc0680a5STejun Heo ata_link_printk(link, KERN_INFO, "%s resetting link\n", 2466c6fd2807SJeff Garzik reset == softreset ? "soft" : "hard"); 2467c6fd2807SJeff Garzik 2468c6fd2807SJeff Garzik /* mark that this EH session started with reset */ 246919b72321STejun Heo ehc->last_reset = jiffies; 24700d64a233STejun Heo if (reset == hardreset) 24710d64a233STejun Heo ehc->i.flags |= ATA_EHI_DID_HARDRESET; 24720d64a233STejun Heo else 24730d64a233STejun Heo ehc->i.flags |= ATA_EHI_DID_SOFTRESET; 2474c6fd2807SJeff Garzik 2475b1c72916STejun Heo rc = ata_do_reset(link, reset, classes, deadline, true); 2476b1c72916STejun Heo if (rc && rc != -EAGAIN) { 2477b1c72916STejun Heo failed_link = link; 24785dbfc9cbSTejun Heo goto fail; 2479b1c72916STejun Heo } 2480c6fd2807SJeff Garzik 2481b1c72916STejun Heo /* hardreset slave link if existent */ 2482b1c72916STejun Heo if (slave && reset == hardreset) { 2483b1c72916STejun Heo int tmp; 2484b1c72916STejun Heo 2485b1c72916STejun Heo if (verbose) 2486b1c72916STejun Heo ata_link_printk(slave, KERN_INFO, 2487b1c72916STejun Heo "hard resetting link\n"); 2488b1c72916STejun Heo 2489b1c72916STejun Heo ata_eh_about_to_do(slave, NULL, ATA_EH_RESET); 2490b1c72916STejun Heo tmp = ata_do_reset(slave, reset, classes, deadline, 2491b1c72916STejun Heo false); 2492b1c72916STejun Heo switch (tmp) { 2493b1c72916STejun Heo case -EAGAIN: 2494b1c72916STejun Heo rc = -EAGAIN; 2495b1c72916STejun Heo case 0: 2496b1c72916STejun Heo break; 2497b1c72916STejun Heo default: 2498b1c72916STejun Heo failed_link = slave; 2499b1c72916STejun Heo rc = tmp; 2500b1c72916STejun Heo goto fail; 2501b1c72916STejun Heo } 2502b1c72916STejun Heo } 2503b1c72916STejun Heo 2504b1c72916STejun Heo /* perform follow-up SRST if necessary */ 2505c6fd2807SJeff Garzik if (reset == hardreset && 25065dbfc9cbSTejun Heo ata_eh_followup_srst_needed(link, rc, classes)) { 2507c6fd2807SJeff Garzik reset = softreset; 2508c6fd2807SJeff Garzik 2509c6fd2807SJeff Garzik if (!reset) { 2510cc0680a5STejun Heo ata_link_printk(link, KERN_ERR, 2511c6fd2807SJeff Garzik "follow-up softreset required " 2512c6fd2807SJeff Garzik "but no softreset avaliable\n"); 2513b1c72916STejun Heo failed_link = link; 2514fccb6ea5STejun Heo rc = -EINVAL; 251508cf69d0STejun Heo goto fail; 2516c6fd2807SJeff Garzik } 2517c6fd2807SJeff Garzik 2518cf480626STejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2519b1c72916STejun Heo rc = ata_do_reset(link, reset, classes, deadline, true); 2520c6fd2807SJeff Garzik } 2521932648b0STejun Heo } else { 2522932648b0STejun Heo if (verbose) 2523932648b0STejun Heo ata_link_printk(link, KERN_INFO, "no reset method " 2524932648b0STejun Heo "available, skipping reset\n"); 2525932648b0STejun Heo if (!(lflags & ATA_LFLAG_ASSUME_CLASS)) 2526932648b0STejun Heo lflags |= ATA_LFLAG_ASSUME_ATA; 2527932648b0STejun Heo } 2528008a7896STejun Heo 2529932648b0STejun Heo /* 2530932648b0STejun Heo * Post-reset processing 2531932648b0STejun Heo */ 25321eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 2533416dc9edSTejun Heo /* After the reset, the device state is PIO 0 and the 2534416dc9edSTejun Heo * controller state is undefined. Reset also wakes up 2535416dc9edSTejun Heo * drives from sleeping mode. 2536c6fd2807SJeff Garzik */ 2537f58229f8STejun Heo dev->pio_mode = XFER_PIO_0; 2538054a5fbaSTejun Heo dev->flags &= ~ATA_DFLAG_SLEEPING; 2539c6fd2807SJeff Garzik 2540816ab897STejun Heo if (!ata_phys_link_offline(ata_dev_phys_link(dev))) { 25414ccd3329STejun Heo /* apply class override */ 2542416dc9edSTejun Heo if (lflags & ATA_LFLAG_ASSUME_ATA) 2543ae791c05STejun Heo classes[dev->devno] = ATA_DEV_ATA; 2544416dc9edSTejun Heo else if (lflags & ATA_LFLAG_ASSUME_SEMB) 2545816ab897STejun Heo classes[dev->devno] = ATA_DEV_SEMB_UNSUP; 2546816ab897STejun Heo } else 2547816ab897STejun Heo classes[dev->devno] = ATA_DEV_NONE; 2548ae791c05STejun Heo } 2549ae791c05STejun Heo 2550008a7896STejun Heo /* record current link speed */ 2551936fd732STejun Heo if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0) 2552936fd732STejun Heo link->sata_spd = (sstatus >> 4) & 0xf; 2553b1c72916STejun Heo if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0) 2554b1c72916STejun Heo slave->sata_spd = (sstatus >> 4) & 0xf; 2555008a7896STejun Heo 2556dc98c32cSTejun Heo /* thaw the port */ 2557dc98c32cSTejun Heo if (ata_is_host_link(link)) 2558dc98c32cSTejun Heo ata_eh_thaw_port(ap); 2559dc98c32cSTejun Heo 2560f046519fSTejun Heo /* postreset() should clear hardware SError. Although SError 2561f046519fSTejun Heo * is cleared during link resume, clearing SError here is 2562f046519fSTejun Heo * necessary as some PHYs raise hotplug events after SRST. 2563f046519fSTejun Heo * This introduces race condition where hotplug occurs between 2564f046519fSTejun Heo * reset and here. This race is mediated by cross checking 2565f046519fSTejun Heo * link onlineness and classification result later. 2566f046519fSTejun Heo */ 2567b1c72916STejun Heo if (postreset) { 2568cc0680a5STejun Heo postreset(link, classes); 2569b1c72916STejun Heo if (slave) 2570b1c72916STejun Heo postreset(slave, classes); 2571b1c72916STejun Heo } 2572c6fd2807SJeff Garzik 2573f046519fSTejun Heo /* clear cached SError */ 2574f046519fSTejun Heo spin_lock_irqsave(link->ap->lock, flags); 2575f046519fSTejun Heo link->eh_info.serror = 0; 2576b1c72916STejun Heo if (slave) 2577b1c72916STejun Heo slave->eh_info.serror = 0; 2578f046519fSTejun Heo spin_unlock_irqrestore(link->ap->lock, flags); 2579f046519fSTejun Heo 2580f046519fSTejun Heo /* Make sure onlineness and classification result correspond. 2581f046519fSTejun Heo * Hotplug could have happened during reset and some 2582f046519fSTejun Heo * controllers fail to wait while a drive is spinning up after 2583f046519fSTejun Heo * being hotplugged causing misdetection. By cross checking 2584f046519fSTejun Heo * link onlineness and classification result, those conditions 2585f046519fSTejun Heo * can be reliably detected and retried. 2586f046519fSTejun Heo */ 2587b1c72916STejun Heo nr_unknown = 0; 25881eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 2589f046519fSTejun Heo /* convert all ATA_DEV_UNKNOWN to ATA_DEV_NONE */ 2590b1c72916STejun Heo if (classes[dev->devno] == ATA_DEV_UNKNOWN) { 2591f046519fSTejun Heo classes[dev->devno] = ATA_DEV_NONE; 2592b1c72916STejun Heo if (ata_phys_link_online(ata_dev_phys_link(dev))) 2593b1c72916STejun Heo nr_unknown++; 2594b1c72916STejun Heo } 2595f046519fSTejun Heo } 2596f046519fSTejun Heo 2597b1c72916STejun Heo if (classify && nr_unknown) { 2598f046519fSTejun Heo if (try < max_tries) { 2599f046519fSTejun Heo ata_link_printk(link, KERN_WARNING, "link online but " 2600f046519fSTejun Heo "device misclassified, retrying\n"); 2601b1c72916STejun Heo failed_link = link; 2602f046519fSTejun Heo rc = -EAGAIN; 2603f046519fSTejun Heo goto fail; 2604f046519fSTejun Heo } 2605f046519fSTejun Heo ata_link_printk(link, KERN_WARNING, 2606f046519fSTejun Heo "link online but device misclassified, " 2607f046519fSTejun Heo "device detection might fail\n"); 2608f046519fSTejun Heo } 2609f046519fSTejun Heo 2610c6fd2807SJeff Garzik /* reset successful, schedule revalidation */ 2611cf480626STejun Heo ata_eh_done(link, NULL, ATA_EH_RESET); 2612b1c72916STejun Heo if (slave) 2613b1c72916STejun Heo ata_eh_done(slave, NULL, ATA_EH_RESET); 261419b72321STejun Heo ehc->last_reset = jiffies; /* update to completion time */ 2615c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_REVALIDATE; 2616416dc9edSTejun Heo 2617416dc9edSTejun Heo rc = 0; 2618fccb6ea5STejun Heo out: 2619fccb6ea5STejun Heo /* clear hotplug flag */ 2620fccb6ea5STejun Heo ehc->i.flags &= ~ATA_EHI_HOTPLUGGED; 2621b1c72916STejun Heo if (slave) 2622b1c72916STejun Heo sehc->i.flags &= ~ATA_EHI_HOTPLUGGED; 2623afaa5c37STejun Heo 2624afaa5c37STejun Heo spin_lock_irqsave(ap->lock, flags); 2625afaa5c37STejun Heo ap->pflags &= ~ATA_PFLAG_RESETTING; 2626afaa5c37STejun Heo spin_unlock_irqrestore(ap->lock, flags); 2627afaa5c37STejun Heo 2628c6fd2807SJeff Garzik return rc; 2629416dc9edSTejun Heo 2630416dc9edSTejun Heo fail: 26315958e302STejun Heo /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */ 26325958e302STejun Heo if (!ata_is_host_link(link) && 26335958e302STejun Heo sata_scr_read(link, SCR_STATUS, &sstatus)) 26345958e302STejun Heo rc = -ERESTART; 26355958e302STejun Heo 2636416dc9edSTejun Heo if (rc == -ERESTART || try >= max_tries) 2637416dc9edSTejun Heo goto out; 2638416dc9edSTejun Heo 2639416dc9edSTejun Heo now = jiffies; 2640416dc9edSTejun Heo if (time_before(now, deadline)) { 2641416dc9edSTejun Heo unsigned long delta = deadline - now; 2642416dc9edSTejun Heo 2643b1c72916STejun Heo ata_link_printk(failed_link, KERN_WARNING, 26440a2c0f56STejun Heo "reset failed (errno=%d), retrying in %u secs\n", 26450a2c0f56STejun Heo rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000)); 2646416dc9edSTejun Heo 2647416dc9edSTejun Heo while (delta) 2648416dc9edSTejun Heo delta = schedule_timeout_uninterruptible(delta); 2649416dc9edSTejun Heo } 2650416dc9edSTejun Heo 2651b1c72916STejun Heo if (try == max_tries - 1) { 2652a07d499bSTejun Heo sata_down_spd_limit(link, 0); 2653b1c72916STejun Heo if (slave) 2654a07d499bSTejun Heo sata_down_spd_limit(slave, 0); 2655b1c72916STejun Heo } else if (rc == -EPIPE) 2656a07d499bSTejun Heo sata_down_spd_limit(failed_link, 0); 2657b1c72916STejun Heo 2658416dc9edSTejun Heo if (hardreset) 2659416dc9edSTejun Heo reset = hardreset; 2660416dc9edSTejun Heo goto retry; 2661c6fd2807SJeff Garzik } 2662c6fd2807SJeff Garzik 266345fabbb7SElias Oltmanns static inline void ata_eh_pull_park_action(struct ata_port *ap) 266445fabbb7SElias Oltmanns { 266545fabbb7SElias Oltmanns struct ata_link *link; 266645fabbb7SElias Oltmanns struct ata_device *dev; 266745fabbb7SElias Oltmanns unsigned long flags; 266845fabbb7SElias Oltmanns 266945fabbb7SElias Oltmanns /* 267045fabbb7SElias Oltmanns * This function can be thought of as an extended version of 267145fabbb7SElias Oltmanns * ata_eh_about_to_do() specially crafted to accommodate the 267245fabbb7SElias Oltmanns * requirements of ATA_EH_PARK handling. Since the EH thread 267345fabbb7SElias Oltmanns * does not leave the do {} while () loop in ata_eh_recover as 267445fabbb7SElias Oltmanns * long as the timeout for a park request to *one* device on 267545fabbb7SElias Oltmanns * the port has not expired, and since we still want to pick 267645fabbb7SElias Oltmanns * up park requests to other devices on the same port or 267745fabbb7SElias Oltmanns * timeout updates for the same device, we have to pull 267845fabbb7SElias Oltmanns * ATA_EH_PARK actions from eh_info into eh_context.i 267945fabbb7SElias Oltmanns * ourselves at the beginning of each pass over the loop. 268045fabbb7SElias Oltmanns * 268145fabbb7SElias Oltmanns * Additionally, all write accesses to &ap->park_req_pending 268245fabbb7SElias Oltmanns * through INIT_COMPLETION() (see below) or complete_all() 268345fabbb7SElias Oltmanns * (see ata_scsi_park_store()) are protected by the host lock. 268445fabbb7SElias Oltmanns * As a result we have that park_req_pending.done is zero on 268545fabbb7SElias Oltmanns * exit from this function, i.e. when ATA_EH_PARK actions for 268645fabbb7SElias Oltmanns * *all* devices on port ap have been pulled into the 268745fabbb7SElias Oltmanns * respective eh_context structs. If, and only if, 268845fabbb7SElias Oltmanns * park_req_pending.done is non-zero by the time we reach 268945fabbb7SElias Oltmanns * wait_for_completion_timeout(), another ATA_EH_PARK action 269045fabbb7SElias Oltmanns * has been scheduled for at least one of the devices on port 269145fabbb7SElias Oltmanns * ap and we have to cycle over the do {} while () loop in 269245fabbb7SElias Oltmanns * ata_eh_recover() again. 269345fabbb7SElias Oltmanns */ 269445fabbb7SElias Oltmanns 269545fabbb7SElias Oltmanns spin_lock_irqsave(ap->lock, flags); 269645fabbb7SElias Oltmanns INIT_COMPLETION(ap->park_req_pending); 26971eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 26981eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 269945fabbb7SElias Oltmanns struct ata_eh_info *ehi = &link->eh_info; 270045fabbb7SElias Oltmanns 270145fabbb7SElias Oltmanns link->eh_context.i.dev_action[dev->devno] |= 270245fabbb7SElias Oltmanns ehi->dev_action[dev->devno] & ATA_EH_PARK; 270345fabbb7SElias Oltmanns ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK); 270445fabbb7SElias Oltmanns } 270545fabbb7SElias Oltmanns } 270645fabbb7SElias Oltmanns spin_unlock_irqrestore(ap->lock, flags); 270745fabbb7SElias Oltmanns } 270845fabbb7SElias Oltmanns 270945fabbb7SElias Oltmanns static void ata_eh_park_issue_cmd(struct ata_device *dev, int park) 271045fabbb7SElias Oltmanns { 271145fabbb7SElias Oltmanns struct ata_eh_context *ehc = &dev->link->eh_context; 271245fabbb7SElias Oltmanns struct ata_taskfile tf; 271345fabbb7SElias Oltmanns unsigned int err_mask; 271445fabbb7SElias Oltmanns 271545fabbb7SElias Oltmanns ata_tf_init(dev, &tf); 271645fabbb7SElias Oltmanns if (park) { 271745fabbb7SElias Oltmanns ehc->unloaded_mask |= 1 << dev->devno; 271845fabbb7SElias Oltmanns tf.command = ATA_CMD_IDLEIMMEDIATE; 271945fabbb7SElias Oltmanns tf.feature = 0x44; 272045fabbb7SElias Oltmanns tf.lbal = 0x4c; 272145fabbb7SElias Oltmanns tf.lbam = 0x4e; 272245fabbb7SElias Oltmanns tf.lbah = 0x55; 272345fabbb7SElias Oltmanns } else { 272445fabbb7SElias Oltmanns ehc->unloaded_mask &= ~(1 << dev->devno); 272545fabbb7SElias Oltmanns tf.command = ATA_CMD_CHK_POWER; 272645fabbb7SElias Oltmanns } 272745fabbb7SElias Oltmanns 272845fabbb7SElias Oltmanns tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 272945fabbb7SElias Oltmanns tf.protocol |= ATA_PROT_NODATA; 273045fabbb7SElias Oltmanns err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 273145fabbb7SElias Oltmanns if (park && (err_mask || tf.lbal != 0xc4)) { 273245fabbb7SElias Oltmanns ata_dev_printk(dev, KERN_ERR, "head unload failed!\n"); 273345fabbb7SElias Oltmanns ehc->unloaded_mask &= ~(1 << dev->devno); 273445fabbb7SElias Oltmanns } 273545fabbb7SElias Oltmanns } 273645fabbb7SElias Oltmanns 27370260731fSTejun Heo static int ata_eh_revalidate_and_attach(struct ata_link *link, 2738c6fd2807SJeff Garzik struct ata_device **r_failed_dev) 2739c6fd2807SJeff Garzik { 27400260731fSTejun Heo struct ata_port *ap = link->ap; 27410260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 2742c6fd2807SJeff Garzik struct ata_device *dev; 27438c3c52a8STejun Heo unsigned int new_mask = 0; 2744c6fd2807SJeff Garzik unsigned long flags; 2745f58229f8STejun Heo int rc = 0; 2746c6fd2807SJeff Garzik 2747c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 2748c6fd2807SJeff Garzik 27498c3c52a8STejun Heo /* For PATA drive side cable detection to work, IDENTIFY must 27508c3c52a8STejun Heo * be done backwards such that PDIAG- is released by the slave 27518c3c52a8STejun Heo * device before the master device is identified. 27528c3c52a8STejun Heo */ 27531eca4365STejun Heo ata_for_each_dev(dev, link, ALL_REVERSE) { 2754f58229f8STejun Heo unsigned int action = ata_eh_dev_action(dev); 2755f58229f8STejun Heo unsigned int readid_flags = 0; 2756c6fd2807SJeff Garzik 2757bff04647STejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) 2758bff04647STejun Heo readid_flags |= ATA_READID_POSTRESET; 2759bff04647STejun Heo 27609666f400STejun Heo if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) { 2761633273a3STejun Heo WARN_ON(dev->class == ATA_DEV_PMP); 2762633273a3STejun Heo 2763b1c72916STejun Heo if (ata_phys_link_offline(ata_dev_phys_link(dev))) { 2764c6fd2807SJeff Garzik rc = -EIO; 27658c3c52a8STejun Heo goto err; 2766c6fd2807SJeff Garzik } 2767c6fd2807SJeff Garzik 27680260731fSTejun Heo ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE); 2769422c9daaSTejun Heo rc = ata_dev_revalidate(dev, ehc->classes[dev->devno], 2770422c9daaSTejun Heo readid_flags); 2771c6fd2807SJeff Garzik if (rc) 27728c3c52a8STejun Heo goto err; 2773c6fd2807SJeff Garzik 27740260731fSTejun Heo ata_eh_done(link, dev, ATA_EH_REVALIDATE); 2775c6fd2807SJeff Garzik 2776baa1e78aSTejun Heo /* Configuration may have changed, reconfigure 2777baa1e78aSTejun Heo * transfer mode. 2778baa1e78aSTejun Heo */ 2779baa1e78aSTejun Heo ehc->i.flags |= ATA_EHI_SETMODE; 2780baa1e78aSTejun Heo 2781c6fd2807SJeff Garzik /* schedule the scsi_rescan_device() here */ 2782c6fd2807SJeff Garzik queue_work(ata_aux_wq, &(ap->scsi_rescan_task)); 2783c6fd2807SJeff Garzik } else if (dev->class == ATA_DEV_UNKNOWN && 2784c6fd2807SJeff Garzik ehc->tries[dev->devno] && 2785c6fd2807SJeff Garzik ata_class_enabled(ehc->classes[dev->devno])) { 2786c6fd2807SJeff Garzik dev->class = ehc->classes[dev->devno]; 2787c6fd2807SJeff Garzik 2788633273a3STejun Heo if (dev->class == ATA_DEV_PMP) 2789633273a3STejun Heo rc = sata_pmp_attach(dev); 2790633273a3STejun Heo else 2791633273a3STejun Heo rc = ata_dev_read_id(dev, &dev->class, 2792633273a3STejun Heo readid_flags, dev->id); 27938c3c52a8STejun Heo switch (rc) { 27948c3c52a8STejun Heo case 0: 279599cf610aSTejun Heo /* clear error info accumulated during probe */ 279699cf610aSTejun Heo ata_ering_clear(&dev->ering); 2797f58229f8STejun Heo new_mask |= 1 << dev->devno; 27988c3c52a8STejun Heo break; 27998c3c52a8STejun Heo case -ENOENT: 280055a8e2c8STejun Heo /* IDENTIFY was issued to non-existent 280155a8e2c8STejun Heo * device. No need to reset. Just 280255a8e2c8STejun Heo * thaw and kill the device. 280355a8e2c8STejun Heo */ 280455a8e2c8STejun Heo ata_eh_thaw_port(ap); 280555a8e2c8STejun Heo dev->class = ATA_DEV_UNKNOWN; 2806c6fd2807SJeff Garzik break; 28078c3c52a8STejun Heo default: 28088c3c52a8STejun Heo dev->class = ATA_DEV_UNKNOWN; 28098c3c52a8STejun Heo goto err; 28108c3c52a8STejun Heo } 28118c3c52a8STejun Heo } 2812c6fd2807SJeff Garzik } 2813c6fd2807SJeff Garzik 2814c1c4e8d5STejun Heo /* PDIAG- should have been released, ask cable type if post-reset */ 281533267325STejun Heo if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) { 281633267325STejun Heo if (ap->ops->cable_detect) 2817c1c4e8d5STejun Heo ap->cbl = ap->ops->cable_detect(ap); 281833267325STejun Heo ata_force_cbl(ap); 281933267325STejun Heo } 2820c1c4e8d5STejun Heo 28218c3c52a8STejun Heo /* Configure new devices forward such that user doesn't see 28228c3c52a8STejun Heo * device detection messages backwards. 28238c3c52a8STejun Heo */ 28241eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 2825633273a3STejun Heo if (!(new_mask & (1 << dev->devno)) || 2826633273a3STejun Heo dev->class == ATA_DEV_PMP) 28278c3c52a8STejun Heo continue; 28288c3c52a8STejun Heo 28298c3c52a8STejun Heo ehc->i.flags |= ATA_EHI_PRINTINFO; 28308c3c52a8STejun Heo rc = ata_dev_configure(dev); 28318c3c52a8STejun Heo ehc->i.flags &= ~ATA_EHI_PRINTINFO; 28328c3c52a8STejun Heo if (rc) 28338c3c52a8STejun Heo goto err; 28348c3c52a8STejun Heo 2835c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 2836c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 2837c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 2838baa1e78aSTejun Heo 283955a8e2c8STejun Heo /* new device discovered, configure xfermode */ 2840baa1e78aSTejun Heo ehc->i.flags |= ATA_EHI_SETMODE; 2841c6fd2807SJeff Garzik } 2842c6fd2807SJeff Garzik 28438c3c52a8STejun Heo return 0; 28448c3c52a8STejun Heo 28458c3c52a8STejun Heo err: 2846c6fd2807SJeff Garzik *r_failed_dev = dev; 28478c3c52a8STejun Heo DPRINTK("EXIT rc=%d\n", rc); 2848c6fd2807SJeff Garzik return rc; 2849c6fd2807SJeff Garzik } 2850c6fd2807SJeff Garzik 28516f1d1e3aSTejun Heo /** 28526f1d1e3aSTejun Heo * ata_set_mode - Program timings and issue SET FEATURES - XFER 28536f1d1e3aSTejun Heo * @link: link on which timings will be programmed 28546f1d1e3aSTejun Heo * @r_failed_dev: out paramter for failed device 28556f1d1e3aSTejun Heo * 28566f1d1e3aSTejun Heo * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If 28576f1d1e3aSTejun Heo * ata_set_mode() fails, pointer to the failing device is 28586f1d1e3aSTejun Heo * returned in @r_failed_dev. 28596f1d1e3aSTejun Heo * 28606f1d1e3aSTejun Heo * LOCKING: 28616f1d1e3aSTejun Heo * PCI/etc. bus probe sem. 28626f1d1e3aSTejun Heo * 28636f1d1e3aSTejun Heo * RETURNS: 28646f1d1e3aSTejun Heo * 0 on success, negative errno otherwise 28656f1d1e3aSTejun Heo */ 28666f1d1e3aSTejun Heo int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) 28676f1d1e3aSTejun Heo { 28686f1d1e3aSTejun Heo struct ata_port *ap = link->ap; 286900115e0fSTejun Heo struct ata_device *dev; 287000115e0fSTejun Heo int rc; 28716f1d1e3aSTejun Heo 287276326ac1STejun Heo /* if data transfer is verified, clear DUBIOUS_XFER on ering top */ 28731eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) { 287476326ac1STejun Heo if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) { 287576326ac1STejun Heo struct ata_ering_entry *ent; 287676326ac1STejun Heo 287776326ac1STejun Heo ent = ata_ering_top(&dev->ering); 287876326ac1STejun Heo if (ent) 287976326ac1STejun Heo ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER; 288076326ac1STejun Heo } 288176326ac1STejun Heo } 288276326ac1STejun Heo 28836f1d1e3aSTejun Heo /* has private set_mode? */ 28846f1d1e3aSTejun Heo if (ap->ops->set_mode) 288500115e0fSTejun Heo rc = ap->ops->set_mode(link, r_failed_dev); 288600115e0fSTejun Heo else 288700115e0fSTejun Heo rc = ata_do_set_mode(link, r_failed_dev); 288800115e0fSTejun Heo 288900115e0fSTejun Heo /* if transfer mode has changed, set DUBIOUS_XFER on device */ 28901eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) { 289100115e0fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 289200115e0fSTejun Heo u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno]; 289300115e0fSTejun Heo u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno)); 289400115e0fSTejun Heo 289500115e0fSTejun Heo if (dev->xfer_mode != saved_xfer_mode || 289600115e0fSTejun Heo ata_ncq_enabled(dev) != saved_ncq) 289700115e0fSTejun Heo dev->flags |= ATA_DFLAG_DUBIOUS_XFER; 289800115e0fSTejun Heo } 289900115e0fSTejun Heo 290000115e0fSTejun Heo return rc; 29016f1d1e3aSTejun Heo } 29026f1d1e3aSTejun Heo 290311fc33daSTejun Heo /** 290411fc33daSTejun Heo * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset 290511fc33daSTejun Heo * @dev: ATAPI device to clear UA for 290611fc33daSTejun Heo * 290711fc33daSTejun Heo * Resets and other operations can make an ATAPI device raise 290811fc33daSTejun Heo * UNIT ATTENTION which causes the next operation to fail. This 290911fc33daSTejun Heo * function clears UA. 291011fc33daSTejun Heo * 291111fc33daSTejun Heo * LOCKING: 291211fc33daSTejun Heo * EH context (may sleep). 291311fc33daSTejun Heo * 291411fc33daSTejun Heo * RETURNS: 291511fc33daSTejun Heo * 0 on success, -errno on failure. 291611fc33daSTejun Heo */ 291711fc33daSTejun Heo static int atapi_eh_clear_ua(struct ata_device *dev) 291811fc33daSTejun Heo { 291911fc33daSTejun Heo int i; 292011fc33daSTejun Heo 292111fc33daSTejun Heo for (i = 0; i < ATA_EH_UA_TRIES; i++) { 2922b5357081STejun Heo u8 *sense_buffer = dev->link->ap->sector_buf; 292311fc33daSTejun Heo u8 sense_key = 0; 292411fc33daSTejun Heo unsigned int err_mask; 292511fc33daSTejun Heo 292611fc33daSTejun Heo err_mask = atapi_eh_tur(dev, &sense_key); 292711fc33daSTejun Heo if (err_mask != 0 && err_mask != AC_ERR_DEV) { 292811fc33daSTejun Heo ata_dev_printk(dev, KERN_WARNING, "TEST_UNIT_READY " 292911fc33daSTejun Heo "failed (err_mask=0x%x)\n", err_mask); 293011fc33daSTejun Heo return -EIO; 293111fc33daSTejun Heo } 293211fc33daSTejun Heo 293311fc33daSTejun Heo if (!err_mask || sense_key != UNIT_ATTENTION) 293411fc33daSTejun Heo return 0; 293511fc33daSTejun Heo 293611fc33daSTejun Heo err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key); 293711fc33daSTejun Heo if (err_mask) { 293811fc33daSTejun Heo ata_dev_printk(dev, KERN_WARNING, "failed to clear " 293911fc33daSTejun Heo "UNIT ATTENTION (err_mask=0x%x)\n", err_mask); 294011fc33daSTejun Heo return -EIO; 294111fc33daSTejun Heo } 294211fc33daSTejun Heo } 294311fc33daSTejun Heo 294411fc33daSTejun Heo ata_dev_printk(dev, KERN_WARNING, 294511fc33daSTejun Heo "UNIT ATTENTION persists after %d tries\n", ATA_EH_UA_TRIES); 294611fc33daSTejun Heo 294711fc33daSTejun Heo return 0; 294811fc33daSTejun Heo } 294911fc33daSTejun Heo 29500260731fSTejun Heo static int ata_link_nr_enabled(struct ata_link *link) 2951c6fd2807SJeff Garzik { 2952f58229f8STejun Heo struct ata_device *dev; 2953f58229f8STejun Heo int cnt = 0; 2954c6fd2807SJeff Garzik 29551eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) 2956c6fd2807SJeff Garzik cnt++; 2957c6fd2807SJeff Garzik return cnt; 2958c6fd2807SJeff Garzik } 2959c6fd2807SJeff Garzik 29600260731fSTejun Heo static int ata_link_nr_vacant(struct ata_link *link) 2961c6fd2807SJeff Garzik { 2962f58229f8STejun Heo struct ata_device *dev; 2963f58229f8STejun Heo int cnt = 0; 2964c6fd2807SJeff Garzik 29651eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 2966f58229f8STejun Heo if (dev->class == ATA_DEV_UNKNOWN) 2967c6fd2807SJeff Garzik cnt++; 2968c6fd2807SJeff Garzik return cnt; 2969c6fd2807SJeff Garzik } 2970c6fd2807SJeff Garzik 29710260731fSTejun Heo static int ata_eh_skip_recovery(struct ata_link *link) 2972c6fd2807SJeff Garzik { 2973672b2d65STejun Heo struct ata_port *ap = link->ap; 29740260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 2975f58229f8STejun Heo struct ata_device *dev; 2976c6fd2807SJeff Garzik 2977f9df58cbSTejun Heo /* skip disabled links */ 2978f9df58cbSTejun Heo if (link->flags & ATA_LFLAG_DISABLED) 2979f9df58cbSTejun Heo return 1; 2980f9df58cbSTejun Heo 2981672b2d65STejun Heo /* thaw frozen port and recover failed devices */ 2982672b2d65STejun Heo if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link)) 2983672b2d65STejun Heo return 0; 2984672b2d65STejun Heo 2985672b2d65STejun Heo /* reset at least once if reset is requested */ 2986672b2d65STejun Heo if ((ehc->i.action & ATA_EH_RESET) && 2987672b2d65STejun Heo !(ehc->i.flags & ATA_EHI_DID_RESET)) 2988c6fd2807SJeff Garzik return 0; 2989c6fd2807SJeff Garzik 2990c6fd2807SJeff Garzik /* skip if class codes for all vacant slots are ATA_DEV_NONE */ 29911eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 2992c6fd2807SJeff Garzik if (dev->class == ATA_DEV_UNKNOWN && 2993c6fd2807SJeff Garzik ehc->classes[dev->devno] != ATA_DEV_NONE) 2994c6fd2807SJeff Garzik return 0; 2995c6fd2807SJeff Garzik } 2996c6fd2807SJeff Garzik 2997c6fd2807SJeff Garzik return 1; 2998c6fd2807SJeff Garzik } 2999c6fd2807SJeff Garzik 3000c2c7a89cSTejun Heo static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg) 3001c2c7a89cSTejun Heo { 3002c2c7a89cSTejun Heo u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL); 3003c2c7a89cSTejun Heo u64 now = get_jiffies_64(); 3004c2c7a89cSTejun Heo int *trials = void_arg; 3005c2c7a89cSTejun Heo 3006c2c7a89cSTejun Heo if (ent->timestamp < now - min(now, interval)) 3007c2c7a89cSTejun Heo return -1; 3008c2c7a89cSTejun Heo 3009c2c7a89cSTejun Heo (*trials)++; 3010c2c7a89cSTejun Heo return 0; 3011c2c7a89cSTejun Heo } 3012c2c7a89cSTejun Heo 301302c05a27STejun Heo static int ata_eh_schedule_probe(struct ata_device *dev) 301402c05a27STejun Heo { 301502c05a27STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 3016c2c7a89cSTejun Heo struct ata_link *link = ata_dev_phys_link(dev); 3017c2c7a89cSTejun Heo int trials = 0; 301802c05a27STejun Heo 301902c05a27STejun Heo if (!(ehc->i.probe_mask & (1 << dev->devno)) || 302002c05a27STejun Heo (ehc->did_probe_mask & (1 << dev->devno))) 302102c05a27STejun Heo return 0; 302202c05a27STejun Heo 302302c05a27STejun Heo ata_eh_detach_dev(dev); 302402c05a27STejun Heo ata_dev_init(dev); 302502c05a27STejun Heo ehc->did_probe_mask |= (1 << dev->devno); 3026cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 302700115e0fSTejun Heo ehc->saved_xfer_mode[dev->devno] = 0; 302800115e0fSTejun Heo ehc->saved_ncq_enabled &= ~(1 << dev->devno); 302902c05a27STejun Heo 3030c2c7a89cSTejun Heo /* Record and count probe trials on the ering. The specific 3031c2c7a89cSTejun Heo * error mask used is irrelevant. Because a successful device 3032c2c7a89cSTejun Heo * detection clears the ering, this count accumulates only if 3033c2c7a89cSTejun Heo * there are consecutive failed probes. 3034c2c7a89cSTejun Heo * 3035c2c7a89cSTejun Heo * If the count is equal to or higher than ATA_EH_PROBE_TRIALS 3036c2c7a89cSTejun Heo * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is 3037c2c7a89cSTejun Heo * forced to 1.5Gbps. 3038c2c7a89cSTejun Heo * 3039c2c7a89cSTejun Heo * This is to work around cases where failed link speed 3040c2c7a89cSTejun Heo * negotiation results in device misdetection leading to 3041c2c7a89cSTejun Heo * infinite DEVXCHG or PHRDY CHG events. 3042c2c7a89cSTejun Heo */ 3043c2c7a89cSTejun Heo ata_ering_record(&dev->ering, 0, AC_ERR_OTHER); 3044c2c7a89cSTejun Heo ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials); 3045c2c7a89cSTejun Heo 3046c2c7a89cSTejun Heo if (trials > ATA_EH_PROBE_TRIALS) 3047c2c7a89cSTejun Heo sata_down_spd_limit(link, 1); 3048c2c7a89cSTejun Heo 304902c05a27STejun Heo return 1; 305002c05a27STejun Heo } 305102c05a27STejun Heo 30529b1e2658STejun Heo static int ata_eh_handle_dev_fail(struct ata_device *dev, int err) 3053fee7ca72STejun Heo { 30549af5c9c9STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 3055fee7ca72STejun Heo 3056cf9a590aSTejun Heo /* -EAGAIN from EH routine indicates retry without prejudice. 3057cf9a590aSTejun Heo * The requester is responsible for ensuring forward progress. 3058cf9a590aSTejun Heo */ 3059cf9a590aSTejun Heo if (err != -EAGAIN) 3060fee7ca72STejun Heo ehc->tries[dev->devno]--; 3061fee7ca72STejun Heo 3062fee7ca72STejun Heo switch (err) { 3063fee7ca72STejun Heo case -ENODEV: 3064fee7ca72STejun Heo /* device missing or wrong IDENTIFY data, schedule probing */ 3065fee7ca72STejun Heo ehc->i.probe_mask |= (1 << dev->devno); 3066fee7ca72STejun Heo case -EINVAL: 3067fee7ca72STejun Heo /* give it just one more chance */ 3068fee7ca72STejun Heo ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1); 3069fee7ca72STejun Heo case -EIO: 3070d89293abSTejun Heo if (ehc->tries[dev->devno] == 1) { 3071fee7ca72STejun Heo /* This is the last chance, better to slow 3072fee7ca72STejun Heo * down than lose it. 3073fee7ca72STejun Heo */ 3074a07d499bSTejun Heo sata_down_spd_limit(ata_dev_phys_link(dev), 0); 3075d89293abSTejun Heo if (dev->pio_mode > XFER_PIO_0) 3076fee7ca72STejun Heo ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); 3077fee7ca72STejun Heo } 3078fee7ca72STejun Heo } 3079fee7ca72STejun Heo 3080fee7ca72STejun Heo if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) { 3081fee7ca72STejun Heo /* disable device if it has used up all its chances */ 3082fee7ca72STejun Heo ata_dev_disable(dev); 3083fee7ca72STejun Heo 3084fee7ca72STejun Heo /* detach if offline */ 3085b1c72916STejun Heo if (ata_phys_link_offline(ata_dev_phys_link(dev))) 3086fee7ca72STejun Heo ata_eh_detach_dev(dev); 3087fee7ca72STejun Heo 308802c05a27STejun Heo /* schedule probe if necessary */ 308987fbc5a0STejun Heo if (ata_eh_schedule_probe(dev)) { 3090fee7ca72STejun Heo ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 309187fbc5a0STejun Heo memset(ehc->cmd_timeout_idx[dev->devno], 0, 309287fbc5a0STejun Heo sizeof(ehc->cmd_timeout_idx[dev->devno])); 309387fbc5a0STejun Heo } 30949b1e2658STejun Heo 30959b1e2658STejun Heo return 1; 3096fee7ca72STejun Heo } else { 3097cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 30989b1e2658STejun Heo return 0; 3099fee7ca72STejun Heo } 3100fee7ca72STejun Heo } 3101fee7ca72STejun Heo 3102c6fd2807SJeff Garzik /** 3103c6fd2807SJeff Garzik * ata_eh_recover - recover host port after error 3104c6fd2807SJeff Garzik * @ap: host port to recover 3105c6fd2807SJeff Garzik * @prereset: prereset method (can be NULL) 3106c6fd2807SJeff Garzik * @softreset: softreset method (can be NULL) 3107c6fd2807SJeff Garzik * @hardreset: hardreset method (can be NULL) 3108c6fd2807SJeff Garzik * @postreset: postreset method (can be NULL) 31099b1e2658STejun Heo * @r_failed_link: out parameter for failed link 3110c6fd2807SJeff Garzik * 3111c6fd2807SJeff Garzik * This is the alpha and omega, eum and yang, heart and soul of 3112c6fd2807SJeff Garzik * libata exception handling. On entry, actions required to 31139b1e2658STejun Heo * recover each link and hotplug requests are recorded in the 31149b1e2658STejun Heo * link's eh_context. This function executes all the operations 31159b1e2658STejun Heo * with appropriate retrials and fallbacks to resurrect failed 3116c6fd2807SJeff Garzik * devices, detach goners and greet newcomers. 3117c6fd2807SJeff Garzik * 3118c6fd2807SJeff Garzik * LOCKING: 3119c6fd2807SJeff Garzik * Kernel thread context (may sleep). 3120c6fd2807SJeff Garzik * 3121c6fd2807SJeff Garzik * RETURNS: 3122c6fd2807SJeff Garzik * 0 on success, -errno on failure. 3123c6fd2807SJeff Garzik */ 3124fb7fd614STejun Heo int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, 3125c6fd2807SJeff Garzik ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 31269b1e2658STejun Heo ata_postreset_fn_t postreset, 31279b1e2658STejun Heo struct ata_link **r_failed_link) 3128c6fd2807SJeff Garzik { 31299b1e2658STejun Heo struct ata_link *link; 3130c6fd2807SJeff Garzik struct ata_device *dev; 31310a2c0f56STejun Heo int nr_failed_devs; 3132dc98c32cSTejun Heo int rc; 313345fabbb7SElias Oltmanns unsigned long flags, deadline; 3134c6fd2807SJeff Garzik 3135c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 3136c6fd2807SJeff Garzik 3137c6fd2807SJeff Garzik /* prep for recovery */ 31381eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 31399b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 31409b1e2658STejun Heo 3141f9df58cbSTejun Heo /* re-enable link? */ 3142f9df58cbSTejun Heo if (ehc->i.action & ATA_EH_ENABLE_LINK) { 3143f9df58cbSTejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK); 3144f9df58cbSTejun Heo spin_lock_irqsave(ap->lock, flags); 3145f9df58cbSTejun Heo link->flags &= ~ATA_LFLAG_DISABLED; 3146f9df58cbSTejun Heo spin_unlock_irqrestore(ap->lock, flags); 3147f9df58cbSTejun Heo ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK); 3148f9df58cbSTejun Heo } 3149f9df58cbSTejun Heo 31501eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 3151fd995f70STejun Heo if (link->flags & ATA_LFLAG_NO_RETRY) 3152fd995f70STejun Heo ehc->tries[dev->devno] = 1; 3153fd995f70STejun Heo else 3154c6fd2807SJeff Garzik ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 3155c6fd2807SJeff Garzik 315679a55b72STejun Heo /* collect port action mask recorded in dev actions */ 31579b1e2658STejun Heo ehc->i.action |= ehc->i.dev_action[dev->devno] & 31589b1e2658STejun Heo ~ATA_EH_PERDEV_MASK; 3159f58229f8STejun Heo ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK; 316079a55b72STejun Heo 3161c6fd2807SJeff Garzik /* process hotplug request */ 3162c6fd2807SJeff Garzik if (dev->flags & ATA_DFLAG_DETACH) 3163c6fd2807SJeff Garzik ata_eh_detach_dev(dev); 3164c6fd2807SJeff Garzik 316502c05a27STejun Heo /* schedule probe if necessary */ 316602c05a27STejun Heo if (!ata_dev_enabled(dev)) 316702c05a27STejun Heo ata_eh_schedule_probe(dev); 3168c6fd2807SJeff Garzik } 31699b1e2658STejun Heo } 3170c6fd2807SJeff Garzik 3171c6fd2807SJeff Garzik retry: 3172c6fd2807SJeff Garzik rc = 0; 31739b1e2658STejun Heo nr_failed_devs = 0; 3174c6fd2807SJeff Garzik 3175c6fd2807SJeff Garzik /* if UNLOADING, finish immediately */ 3176c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_UNLOADING) 3177c6fd2807SJeff Garzik goto out; 3178c6fd2807SJeff Garzik 31799b1e2658STejun Heo /* prep for EH */ 31801eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 31819b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 31829b1e2658STejun Heo 3183c6fd2807SJeff Garzik /* skip EH if possible. */ 31840260731fSTejun Heo if (ata_eh_skip_recovery(link)) 3185c6fd2807SJeff Garzik ehc->i.action = 0; 3186c6fd2807SJeff Garzik 31871eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 3188f58229f8STejun Heo ehc->classes[dev->devno] = ATA_DEV_UNKNOWN; 31899b1e2658STejun Heo } 3190c6fd2807SJeff Garzik 3191c6fd2807SJeff Garzik /* reset */ 31921eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 31939b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 31949b1e2658STejun Heo 3195cf480626STejun Heo if (!(ehc->i.action & ATA_EH_RESET)) 31969b1e2658STejun Heo continue; 31979b1e2658STejun Heo 31989b1e2658STejun Heo rc = ata_eh_reset(link, ata_link_nr_vacant(link), 3199dc98c32cSTejun Heo prereset, softreset, hardreset, postreset); 3200c6fd2807SJeff Garzik if (rc) { 32010260731fSTejun Heo ata_link_printk(link, KERN_ERR, 3202c6fd2807SJeff Garzik "reset failed, giving up\n"); 3203c6fd2807SJeff Garzik goto out; 3204c6fd2807SJeff Garzik } 32059b1e2658STejun Heo } 3206c6fd2807SJeff Garzik 320745fabbb7SElias Oltmanns do { 320845fabbb7SElias Oltmanns unsigned long now; 320945fabbb7SElias Oltmanns 321045fabbb7SElias Oltmanns /* 321145fabbb7SElias Oltmanns * clears ATA_EH_PARK in eh_info and resets 321245fabbb7SElias Oltmanns * ap->park_req_pending 321345fabbb7SElias Oltmanns */ 321445fabbb7SElias Oltmanns ata_eh_pull_park_action(ap); 321545fabbb7SElias Oltmanns 321645fabbb7SElias Oltmanns deadline = jiffies; 32171eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 32181eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 321945fabbb7SElias Oltmanns struct ata_eh_context *ehc = &link->eh_context; 322045fabbb7SElias Oltmanns unsigned long tmp; 322145fabbb7SElias Oltmanns 322245fabbb7SElias Oltmanns if (dev->class != ATA_DEV_ATA) 322345fabbb7SElias Oltmanns continue; 322445fabbb7SElias Oltmanns if (!(ehc->i.dev_action[dev->devno] & 322545fabbb7SElias Oltmanns ATA_EH_PARK)) 322645fabbb7SElias Oltmanns continue; 322745fabbb7SElias Oltmanns tmp = dev->unpark_deadline; 322845fabbb7SElias Oltmanns if (time_before(deadline, tmp)) 322945fabbb7SElias Oltmanns deadline = tmp; 323045fabbb7SElias Oltmanns else if (time_before_eq(tmp, jiffies)) 323145fabbb7SElias Oltmanns continue; 323245fabbb7SElias Oltmanns if (ehc->unloaded_mask & (1 << dev->devno)) 323345fabbb7SElias Oltmanns continue; 323445fabbb7SElias Oltmanns 323545fabbb7SElias Oltmanns ata_eh_park_issue_cmd(dev, 1); 323645fabbb7SElias Oltmanns } 323745fabbb7SElias Oltmanns } 323845fabbb7SElias Oltmanns 323945fabbb7SElias Oltmanns now = jiffies; 324045fabbb7SElias Oltmanns if (time_before_eq(deadline, now)) 324145fabbb7SElias Oltmanns break; 324245fabbb7SElias Oltmanns 324345fabbb7SElias Oltmanns deadline = wait_for_completion_timeout(&ap->park_req_pending, 324445fabbb7SElias Oltmanns deadline - now); 324545fabbb7SElias Oltmanns } while (deadline); 32461eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 32471eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 324845fabbb7SElias Oltmanns if (!(link->eh_context.unloaded_mask & 324945fabbb7SElias Oltmanns (1 << dev->devno))) 325045fabbb7SElias Oltmanns continue; 325145fabbb7SElias Oltmanns 325245fabbb7SElias Oltmanns ata_eh_park_issue_cmd(dev, 0); 325345fabbb7SElias Oltmanns ata_eh_done(link, dev, ATA_EH_PARK); 325445fabbb7SElias Oltmanns } 325545fabbb7SElias Oltmanns } 325645fabbb7SElias Oltmanns 32579b1e2658STejun Heo /* the rest */ 32581eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 32599b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 32609b1e2658STejun Heo 3261c6fd2807SJeff Garzik /* revalidate existing devices and attach new ones */ 32620260731fSTejun Heo rc = ata_eh_revalidate_and_attach(link, &dev); 3263c6fd2807SJeff Garzik if (rc) 3264c6fd2807SJeff Garzik goto dev_fail; 3265c6fd2807SJeff Garzik 3266633273a3STejun Heo /* if PMP got attached, return, pmp EH will take care of it */ 3267633273a3STejun Heo if (link->device->class == ATA_DEV_PMP) { 3268633273a3STejun Heo ehc->i.action = 0; 3269633273a3STejun Heo return 0; 3270633273a3STejun Heo } 3271633273a3STejun Heo 3272baa1e78aSTejun Heo /* configure transfer mode if necessary */ 3273baa1e78aSTejun Heo if (ehc->i.flags & ATA_EHI_SETMODE) { 32740260731fSTejun Heo rc = ata_set_mode(link, &dev); 32754ae72a1eSTejun Heo if (rc) 3276c6fd2807SJeff Garzik goto dev_fail; 3277baa1e78aSTejun Heo ehc->i.flags &= ~ATA_EHI_SETMODE; 3278c6fd2807SJeff Garzik } 3279c6fd2807SJeff Garzik 328011fc33daSTejun Heo /* If reset has been issued, clear UA to avoid 328111fc33daSTejun Heo * disrupting the current users of the device. 328211fc33daSTejun Heo */ 328311fc33daSTejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) { 32841eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 328511fc33daSTejun Heo if (dev->class != ATA_DEV_ATAPI) 328611fc33daSTejun Heo continue; 328711fc33daSTejun Heo rc = atapi_eh_clear_ua(dev); 328811fc33daSTejun Heo if (rc) 328911fc33daSTejun Heo goto dev_fail; 329011fc33daSTejun Heo } 329111fc33daSTejun Heo } 329211fc33daSTejun Heo 329311fc33daSTejun Heo /* configure link power saving */ 32943ec25ebdSTejun Heo if (ehc->i.action & ATA_EH_LPM) 32951eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 3296ca77329fSKristen Carlson Accardi ata_dev_enable_pm(dev, ap->pm_policy); 3297ca77329fSKristen Carlson Accardi 32989b1e2658STejun Heo /* this link is okay now */ 32999b1e2658STejun Heo ehc->i.flags = 0; 33009b1e2658STejun Heo continue; 3301c6fd2807SJeff Garzik 3302c6fd2807SJeff Garzik dev_fail: 33039b1e2658STejun Heo nr_failed_devs++; 33040a2c0f56STejun Heo ata_eh_handle_dev_fail(dev, rc); 3305c6fd2807SJeff Garzik 3306b06ce3e5STejun Heo if (ap->pflags & ATA_PFLAG_FROZEN) { 3307b06ce3e5STejun Heo /* PMP reset requires working host port. 3308b06ce3e5STejun Heo * Can't retry if it's frozen. 3309b06ce3e5STejun Heo */ 3310071f44b1STejun Heo if (sata_pmp_attached(ap)) 3311b06ce3e5STejun Heo goto out; 33129b1e2658STejun Heo break; 33139b1e2658STejun Heo } 3314b06ce3e5STejun Heo } 33159b1e2658STejun Heo 33160a2c0f56STejun Heo if (nr_failed_devs) 3317c6fd2807SJeff Garzik goto retry; 3318c6fd2807SJeff Garzik 3319c6fd2807SJeff Garzik out: 33209b1e2658STejun Heo if (rc && r_failed_link) 33219b1e2658STejun Heo *r_failed_link = link; 3322c6fd2807SJeff Garzik 3323c6fd2807SJeff Garzik DPRINTK("EXIT, rc=%d\n", rc); 3324c6fd2807SJeff Garzik return rc; 3325c6fd2807SJeff Garzik } 3326c6fd2807SJeff Garzik 3327c6fd2807SJeff Garzik /** 3328c6fd2807SJeff Garzik * ata_eh_finish - finish up EH 3329c6fd2807SJeff Garzik * @ap: host port to finish EH for 3330c6fd2807SJeff Garzik * 3331c6fd2807SJeff Garzik * Recovery is complete. Clean up EH states and retry or finish 3332c6fd2807SJeff Garzik * failed qcs. 3333c6fd2807SJeff Garzik * 3334c6fd2807SJeff Garzik * LOCKING: 3335c6fd2807SJeff Garzik * None. 3336c6fd2807SJeff Garzik */ 3337fb7fd614STejun Heo void ata_eh_finish(struct ata_port *ap) 3338c6fd2807SJeff Garzik { 3339c6fd2807SJeff Garzik int tag; 3340c6fd2807SJeff Garzik 3341c6fd2807SJeff Garzik /* retry or finish qcs */ 3342c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 3343c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 3344c6fd2807SJeff Garzik 3345c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) 3346c6fd2807SJeff Garzik continue; 3347c6fd2807SJeff Garzik 3348c6fd2807SJeff Garzik if (qc->err_mask) { 3349c6fd2807SJeff Garzik /* FIXME: Once EH migration is complete, 3350c6fd2807SJeff Garzik * generate sense data in this function, 3351c6fd2807SJeff Garzik * considering both err_mask and tf. 3352c6fd2807SJeff Garzik */ 335303faab78STejun Heo if (qc->flags & ATA_QCFLAG_RETRY) 3354c6fd2807SJeff Garzik ata_eh_qc_retry(qc); 335503faab78STejun Heo else 335603faab78STejun Heo ata_eh_qc_complete(qc); 3357c6fd2807SJeff Garzik } else { 3358c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_SENSE_VALID) { 3359c6fd2807SJeff Garzik ata_eh_qc_complete(qc); 3360c6fd2807SJeff Garzik } else { 3361c6fd2807SJeff Garzik /* feed zero TF to sense generation */ 3362c6fd2807SJeff Garzik memset(&qc->result_tf, 0, sizeof(qc->result_tf)); 3363c6fd2807SJeff Garzik ata_eh_qc_retry(qc); 3364c6fd2807SJeff Garzik } 3365c6fd2807SJeff Garzik } 3366c6fd2807SJeff Garzik } 3367da917d69STejun Heo 3368da917d69STejun Heo /* make sure nr_active_links is zero after EH */ 3369da917d69STejun Heo WARN_ON(ap->nr_active_links); 3370da917d69STejun Heo ap->nr_active_links = 0; 3371c6fd2807SJeff Garzik } 3372c6fd2807SJeff Garzik 3373c6fd2807SJeff Garzik /** 3374c6fd2807SJeff Garzik * ata_do_eh - do standard error handling 3375c6fd2807SJeff Garzik * @ap: host port to handle error for 3376a1efdabaSTejun Heo * 3377c6fd2807SJeff Garzik * @prereset: prereset method (can be NULL) 3378c6fd2807SJeff Garzik * @softreset: softreset method (can be NULL) 3379c6fd2807SJeff Garzik * @hardreset: hardreset method (can be NULL) 3380c6fd2807SJeff Garzik * @postreset: postreset method (can be NULL) 3381c6fd2807SJeff Garzik * 3382c6fd2807SJeff Garzik * Perform standard error handling sequence. 3383c6fd2807SJeff Garzik * 3384c6fd2807SJeff Garzik * LOCKING: 3385c6fd2807SJeff Garzik * Kernel thread context (may sleep). 3386c6fd2807SJeff Garzik */ 3387c6fd2807SJeff Garzik void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, 3388c6fd2807SJeff Garzik ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 3389c6fd2807SJeff Garzik ata_postreset_fn_t postreset) 3390c6fd2807SJeff Garzik { 33919b1e2658STejun Heo struct ata_device *dev; 33929b1e2658STejun Heo int rc; 33939b1e2658STejun Heo 33949b1e2658STejun Heo ata_eh_autopsy(ap); 33959b1e2658STejun Heo ata_eh_report(ap); 33969b1e2658STejun Heo 33979b1e2658STejun Heo rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset, 33989b1e2658STejun Heo NULL); 33999b1e2658STejun Heo if (rc) { 34001eca4365STejun Heo ata_for_each_dev(dev, &ap->link, ALL) 34019b1e2658STejun Heo ata_dev_disable(dev); 34029b1e2658STejun Heo } 34039b1e2658STejun Heo 3404c6fd2807SJeff Garzik ata_eh_finish(ap); 3405c6fd2807SJeff Garzik } 3406c6fd2807SJeff Garzik 3407a1efdabaSTejun Heo /** 3408a1efdabaSTejun Heo * ata_std_error_handler - standard error handler 3409a1efdabaSTejun Heo * @ap: host port to handle error for 3410a1efdabaSTejun Heo * 3411a1efdabaSTejun Heo * Standard error handler 3412a1efdabaSTejun Heo * 3413a1efdabaSTejun Heo * LOCKING: 3414a1efdabaSTejun Heo * Kernel thread context (may sleep). 3415a1efdabaSTejun Heo */ 3416a1efdabaSTejun Heo void ata_std_error_handler(struct ata_port *ap) 3417a1efdabaSTejun Heo { 3418a1efdabaSTejun Heo struct ata_port_operations *ops = ap->ops; 3419a1efdabaSTejun Heo ata_reset_fn_t hardreset = ops->hardreset; 3420a1efdabaSTejun Heo 342157c9efdfSTejun Heo /* ignore built-in hardreset if SCR access is not available */ 342257c9efdfSTejun Heo if (ata_is_builtin_hardreset(hardreset) && !sata_scr_valid(&ap->link)) 3423a1efdabaSTejun Heo hardreset = NULL; 3424a1efdabaSTejun Heo 3425a1efdabaSTejun Heo ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset); 3426a1efdabaSTejun Heo } 3427a1efdabaSTejun Heo 34286ffa01d8STejun Heo #ifdef CONFIG_PM 3429c6fd2807SJeff Garzik /** 3430c6fd2807SJeff Garzik * ata_eh_handle_port_suspend - perform port suspend operation 3431c6fd2807SJeff Garzik * @ap: port to suspend 3432c6fd2807SJeff Garzik * 3433c6fd2807SJeff Garzik * Suspend @ap. 3434c6fd2807SJeff Garzik * 3435c6fd2807SJeff Garzik * LOCKING: 3436c6fd2807SJeff Garzik * Kernel thread context (may sleep). 3437c6fd2807SJeff Garzik */ 3438c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap) 3439c6fd2807SJeff Garzik { 3440c6fd2807SJeff Garzik unsigned long flags; 3441c6fd2807SJeff Garzik int rc = 0; 3442c6fd2807SJeff Garzik 3443c6fd2807SJeff Garzik /* are we suspending? */ 3444c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 3445c6fd2807SJeff Garzik if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || 3446c6fd2807SJeff Garzik ap->pm_mesg.event == PM_EVENT_ON) { 3447c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3448c6fd2807SJeff Garzik return; 3449c6fd2807SJeff Garzik } 3450c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3451c6fd2807SJeff Garzik 3452c6fd2807SJeff Garzik WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED); 3453c6fd2807SJeff Garzik 345464578a3dSTejun Heo /* tell ACPI we're suspending */ 345564578a3dSTejun Heo rc = ata_acpi_on_suspend(ap); 345664578a3dSTejun Heo if (rc) 345764578a3dSTejun Heo goto out; 345864578a3dSTejun Heo 3459c6fd2807SJeff Garzik /* suspend */ 3460c6fd2807SJeff Garzik ata_eh_freeze_port(ap); 3461c6fd2807SJeff Garzik 3462c6fd2807SJeff Garzik if (ap->ops->port_suspend) 3463c6fd2807SJeff Garzik rc = ap->ops->port_suspend(ap, ap->pm_mesg); 3464c6fd2807SJeff Garzik 3465bd3adca5SShaohua Li ata_acpi_set_state(ap, PMSG_SUSPEND); 346664578a3dSTejun Heo out: 3467c6fd2807SJeff Garzik /* report result */ 3468c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 3469c6fd2807SJeff Garzik 3470c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_PM_PENDING; 3471c6fd2807SJeff Garzik if (rc == 0) 3472c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SUSPENDED; 347364578a3dSTejun Heo else if (ap->pflags & ATA_PFLAG_FROZEN) 3474c6fd2807SJeff Garzik ata_port_schedule_eh(ap); 3475c6fd2807SJeff Garzik 3476c6fd2807SJeff Garzik if (ap->pm_result) { 3477c6fd2807SJeff Garzik *ap->pm_result = rc; 3478c6fd2807SJeff Garzik ap->pm_result = NULL; 3479c6fd2807SJeff Garzik } 3480c6fd2807SJeff Garzik 3481c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3482c6fd2807SJeff Garzik 3483c6fd2807SJeff Garzik return; 3484c6fd2807SJeff Garzik } 3485c6fd2807SJeff Garzik 3486c6fd2807SJeff Garzik /** 3487c6fd2807SJeff Garzik * ata_eh_handle_port_resume - perform port resume operation 3488c6fd2807SJeff Garzik * @ap: port to resume 3489c6fd2807SJeff Garzik * 3490c6fd2807SJeff Garzik * Resume @ap. 3491c6fd2807SJeff Garzik * 3492c6fd2807SJeff Garzik * LOCKING: 3493c6fd2807SJeff Garzik * Kernel thread context (may sleep). 3494c6fd2807SJeff Garzik */ 3495c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap) 3496c6fd2807SJeff Garzik { 3497c6fd2807SJeff Garzik unsigned long flags; 34989666f400STejun Heo int rc = 0; 3499c6fd2807SJeff Garzik 3500c6fd2807SJeff Garzik /* are we resuming? */ 3501c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 3502c6fd2807SJeff Garzik if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || 3503c6fd2807SJeff Garzik ap->pm_mesg.event != PM_EVENT_ON) { 3504c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3505c6fd2807SJeff Garzik return; 3506c6fd2807SJeff Garzik } 3507c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3508c6fd2807SJeff Garzik 35099666f400STejun Heo WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED)); 3510c6fd2807SJeff Garzik 3511bd3adca5SShaohua Li ata_acpi_set_state(ap, PMSG_ON); 3512bd3adca5SShaohua Li 3513c6fd2807SJeff Garzik if (ap->ops->port_resume) 3514c6fd2807SJeff Garzik rc = ap->ops->port_resume(ap); 3515c6fd2807SJeff Garzik 35166746544cSTejun Heo /* tell ACPI that we're resuming */ 35176746544cSTejun Heo ata_acpi_on_resume(ap); 35186746544cSTejun Heo 35199666f400STejun Heo /* report result */ 3520c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 3521c6fd2807SJeff Garzik ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED); 3522c6fd2807SJeff Garzik if (ap->pm_result) { 3523c6fd2807SJeff Garzik *ap->pm_result = rc; 3524c6fd2807SJeff Garzik ap->pm_result = NULL; 3525c6fd2807SJeff Garzik } 3526c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3527c6fd2807SJeff Garzik } 35286ffa01d8STejun Heo #endif /* CONFIG_PM */ 3529