1c6fd2807SJeff Garzik /* 2c6fd2807SJeff Garzik * libata-eh.c - libata error handling 3c6fd2807SJeff Garzik * 4c6fd2807SJeff Garzik * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5c6fd2807SJeff Garzik * Please ALWAYS copy linux-ide@vger.kernel.org 6c6fd2807SJeff Garzik * on emails. 7c6fd2807SJeff Garzik * 8c6fd2807SJeff Garzik * Copyright 2006 Tejun Heo <htejun@gmail.com> 9c6fd2807SJeff Garzik * 10c6fd2807SJeff Garzik * 11c6fd2807SJeff Garzik * This program is free software; you can redistribute it and/or 12c6fd2807SJeff Garzik * modify it under the terms of the GNU General Public License as 13c6fd2807SJeff Garzik * published by the Free Software Foundation; either version 2, or 14c6fd2807SJeff Garzik * (at your option) any later version. 15c6fd2807SJeff Garzik * 16c6fd2807SJeff Garzik * This program is distributed in the hope that it will be useful, 17c6fd2807SJeff Garzik * but WITHOUT ANY WARRANTY; without even the implied warranty of 18c6fd2807SJeff Garzik * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19c6fd2807SJeff Garzik * General Public License for more details. 20c6fd2807SJeff Garzik * 21c6fd2807SJeff Garzik * You should have received a copy of the GNU General Public License 22c6fd2807SJeff Garzik * along with this program; see the file COPYING. If not, write to 23c6fd2807SJeff Garzik * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, 24c6fd2807SJeff Garzik * USA. 25c6fd2807SJeff Garzik * 26c6fd2807SJeff Garzik * 27c6fd2807SJeff Garzik * libata documentation is available via 'make {ps|pdf}docs', 28c6fd2807SJeff Garzik * as Documentation/DocBook/libata.* 29c6fd2807SJeff Garzik * 30c6fd2807SJeff Garzik * Hardware documentation available from http://www.t13.org/ and 31c6fd2807SJeff Garzik * http://www.sata-io.org/ 32c6fd2807SJeff Garzik * 33c6fd2807SJeff Garzik */ 34c6fd2807SJeff Garzik 35c6fd2807SJeff Garzik #include <linux/kernel.h> 36242f9dcbSJens Axboe #include <linux/blkdev.h> 372855568bSJeff Garzik #include <linux/pci.h> 38c6fd2807SJeff Garzik #include <scsi/scsi.h> 39c6fd2807SJeff Garzik #include <scsi/scsi_host.h> 40c6fd2807SJeff Garzik #include <scsi/scsi_eh.h> 41c6fd2807SJeff Garzik #include <scsi/scsi_device.h> 42c6fd2807SJeff Garzik #include <scsi/scsi_cmnd.h> 436521148cSRobert Hancock #include <scsi/scsi_dbg.h> 44c6fd2807SJeff Garzik #include "../scsi/scsi_transport_api.h" 45c6fd2807SJeff Garzik 46c6fd2807SJeff Garzik #include <linux/libata.h> 47c6fd2807SJeff Garzik 48c6fd2807SJeff Garzik #include "libata.h" 49c6fd2807SJeff Garzik 507d47e8d4STejun Heo enum { 513884f7b0STejun Heo /* speed down verdicts */ 527d47e8d4STejun Heo ATA_EH_SPDN_NCQ_OFF = (1 << 0), 537d47e8d4STejun Heo ATA_EH_SPDN_SPEED_DOWN = (1 << 1), 547d47e8d4STejun Heo ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2), 5576326ac1STejun Heo ATA_EH_SPDN_KEEP_ERRORS = (1 << 3), 563884f7b0STejun Heo 573884f7b0STejun Heo /* error flags */ 583884f7b0STejun Heo ATA_EFLAG_IS_IO = (1 << 0), 5976326ac1STejun Heo ATA_EFLAG_DUBIOUS_XFER = (1 << 1), 603884f7b0STejun Heo 613884f7b0STejun Heo /* error categories */ 623884f7b0STejun Heo ATA_ECAT_NONE = 0, 633884f7b0STejun Heo ATA_ECAT_ATA_BUS = 1, 643884f7b0STejun Heo ATA_ECAT_TOUT_HSM = 2, 653884f7b0STejun Heo ATA_ECAT_UNK_DEV = 3, 6675f9cafcSTejun Heo ATA_ECAT_DUBIOUS_NONE = 4, 6775f9cafcSTejun Heo ATA_ECAT_DUBIOUS_ATA_BUS = 5, 6875f9cafcSTejun Heo ATA_ECAT_DUBIOUS_TOUT_HSM = 6, 6975f9cafcSTejun Heo ATA_ECAT_DUBIOUS_UNK_DEV = 7, 7075f9cafcSTejun Heo ATA_ECAT_NR = 8, 717d47e8d4STejun Heo 7287fbc5a0STejun Heo ATA_EH_CMD_DFL_TIMEOUT = 5000, 7387fbc5a0STejun Heo 740a2c0f56STejun Heo /* always put at least this amount of time between resets */ 750a2c0f56STejun Heo ATA_EH_RESET_COOL_DOWN = 5000, 760a2c0f56STejun Heo 77341c2c95STejun Heo /* Waiting in ->prereset can never be reliable. It's 78341c2c95STejun Heo * sometimes nice to wait there but it can't be depended upon; 79341c2c95STejun Heo * otherwise, we wouldn't be resetting. Just give it enough 80341c2c95STejun Heo * time for most drives to spin up. 8131daabdaSTejun Heo */ 82341c2c95STejun Heo ATA_EH_PRERESET_TIMEOUT = 10000, 83341c2c95STejun Heo ATA_EH_FASTDRAIN_INTERVAL = 3000, 8411fc33daSTejun Heo 8511fc33daSTejun Heo ATA_EH_UA_TRIES = 5, 86c2c7a89cSTejun Heo 87c2c7a89cSTejun Heo /* probe speed down parameters, see ata_eh_schedule_probe() */ 88c2c7a89cSTejun Heo ATA_EH_PROBE_TRIAL_INTERVAL = 60000, /* 1 min */ 89c2c7a89cSTejun Heo ATA_EH_PROBE_TRIALS = 2, 9031daabdaSTejun Heo }; 9131daabdaSTejun Heo 9231daabdaSTejun Heo /* The following table determines how we sequence resets. Each entry 9331daabdaSTejun Heo * represents timeout for that try. The first try can be soft or 9431daabdaSTejun Heo * hardreset. All others are hardreset if available. In most cases 9531daabdaSTejun Heo * the first reset w/ 10sec timeout should succeed. Following entries 9631daabdaSTejun Heo * are mostly for error handling, hotplug and retarded devices. 9731daabdaSTejun Heo */ 9831daabdaSTejun Heo static const unsigned long ata_eh_reset_timeouts[] = { 99341c2c95STejun Heo 10000, /* most drives spin up by 10sec */ 100341c2c95STejun Heo 10000, /* > 99% working drives spin up before 20sec */ 101341c2c95STejun Heo 35000, /* give > 30 secs of idleness for retarded devices */ 102341c2c95STejun Heo 5000, /* and sweet one last chance */ 103d8af0eb6STejun Heo ULONG_MAX, /* > 1 min has elapsed, give up */ 10431daabdaSTejun Heo }; 10531daabdaSTejun Heo 10687fbc5a0STejun Heo static const unsigned long ata_eh_identify_timeouts[] = { 10787fbc5a0STejun Heo 5000, /* covers > 99% of successes and not too boring on failures */ 10887fbc5a0STejun Heo 10000, /* combined time till here is enough even for media access */ 10987fbc5a0STejun Heo 30000, /* for true idiots */ 11087fbc5a0STejun Heo ULONG_MAX, 11187fbc5a0STejun Heo }; 11287fbc5a0STejun Heo 113*6013efd8STejun Heo static const unsigned long ata_eh_flush_timeouts[] = { 114*6013efd8STejun Heo 15000, /* be generous with flush */ 115*6013efd8STejun Heo 15000, /* ditto */ 116*6013efd8STejun Heo 30000, /* and even more generous */ 117*6013efd8STejun Heo ULONG_MAX, 118*6013efd8STejun Heo }; 119*6013efd8STejun Heo 12087fbc5a0STejun Heo static const unsigned long ata_eh_other_timeouts[] = { 12187fbc5a0STejun Heo 5000, /* same rationale as identify timeout */ 12287fbc5a0STejun Heo 10000, /* ditto */ 12387fbc5a0STejun Heo /* but no merciful 30sec for other commands, it just isn't worth it */ 12487fbc5a0STejun Heo ULONG_MAX, 12587fbc5a0STejun Heo }; 12687fbc5a0STejun Heo 12787fbc5a0STejun Heo struct ata_eh_cmd_timeout_ent { 12887fbc5a0STejun Heo const u8 *commands; 12987fbc5a0STejun Heo const unsigned long *timeouts; 13087fbc5a0STejun Heo }; 13187fbc5a0STejun Heo 13287fbc5a0STejun Heo /* The following table determines timeouts to use for EH internal 13387fbc5a0STejun Heo * commands. Each table entry is a command class and matches the 13487fbc5a0STejun Heo * commands the entry applies to and the timeout table to use. 13587fbc5a0STejun Heo * 13687fbc5a0STejun Heo * On the retry after a command timed out, the next timeout value from 13787fbc5a0STejun Heo * the table is used. If the table doesn't contain further entries, 13887fbc5a0STejun Heo * the last value is used. 13987fbc5a0STejun Heo * 14087fbc5a0STejun Heo * ehc->cmd_timeout_idx keeps track of which timeout to use per 14187fbc5a0STejun Heo * command class, so if SET_FEATURES times out on the first try, the 14287fbc5a0STejun Heo * next try will use the second timeout value only for that class. 14387fbc5a0STejun Heo */ 14487fbc5a0STejun Heo #define CMDS(cmds...) (const u8 []){ cmds, 0 } 14587fbc5a0STejun Heo static const struct ata_eh_cmd_timeout_ent 14687fbc5a0STejun Heo ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = { 14787fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI), 14887fbc5a0STejun Heo .timeouts = ata_eh_identify_timeouts, }, 14987fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT), 15087fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 15187fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT), 15287fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 15387fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_SET_FEATURES), 15487fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 15587fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS), 15687fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 157*6013efd8STejun Heo { .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT), 158*6013efd8STejun Heo .timeouts = ata_eh_flush_timeouts }, 15987fbc5a0STejun Heo }; 16087fbc5a0STejun Heo #undef CMDS 16187fbc5a0STejun Heo 162c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap); 1636ffa01d8STejun Heo #ifdef CONFIG_PM 164c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap); 165c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap); 1666ffa01d8STejun Heo #else /* CONFIG_PM */ 1676ffa01d8STejun Heo static void ata_eh_handle_port_suspend(struct ata_port *ap) 1686ffa01d8STejun Heo { } 1696ffa01d8STejun Heo 1706ffa01d8STejun Heo static void ata_eh_handle_port_resume(struct ata_port *ap) 1716ffa01d8STejun Heo { } 1726ffa01d8STejun Heo #endif /* CONFIG_PM */ 173c6fd2807SJeff Garzik 174b64bbc39STejun Heo static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt, 175b64bbc39STejun Heo va_list args) 176b64bbc39STejun Heo { 177b64bbc39STejun Heo ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len, 178b64bbc39STejun Heo ATA_EH_DESC_LEN - ehi->desc_len, 179b64bbc39STejun Heo fmt, args); 180b64bbc39STejun Heo } 181b64bbc39STejun Heo 182b64bbc39STejun Heo /** 183b64bbc39STejun Heo * __ata_ehi_push_desc - push error description without adding separator 184b64bbc39STejun Heo * @ehi: target EHI 185b64bbc39STejun Heo * @fmt: printf format string 186b64bbc39STejun Heo * 187b64bbc39STejun Heo * Format string according to @fmt and append it to @ehi->desc. 188b64bbc39STejun Heo * 189b64bbc39STejun Heo * LOCKING: 190b64bbc39STejun Heo * spin_lock_irqsave(host lock) 191b64bbc39STejun Heo */ 192b64bbc39STejun Heo void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) 193b64bbc39STejun Heo { 194b64bbc39STejun Heo va_list args; 195b64bbc39STejun Heo 196b64bbc39STejun Heo va_start(args, fmt); 197b64bbc39STejun Heo __ata_ehi_pushv_desc(ehi, fmt, args); 198b64bbc39STejun Heo va_end(args); 199b64bbc39STejun Heo } 200b64bbc39STejun Heo 201b64bbc39STejun Heo /** 202b64bbc39STejun Heo * ata_ehi_push_desc - push error description with separator 203b64bbc39STejun Heo * @ehi: target EHI 204b64bbc39STejun Heo * @fmt: printf format string 205b64bbc39STejun Heo * 206b64bbc39STejun Heo * Format string according to @fmt and append it to @ehi->desc. 207b64bbc39STejun Heo * If @ehi->desc is not empty, ", " is added in-between. 208b64bbc39STejun Heo * 209b64bbc39STejun Heo * LOCKING: 210b64bbc39STejun Heo * spin_lock_irqsave(host lock) 211b64bbc39STejun Heo */ 212b64bbc39STejun Heo void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) 213b64bbc39STejun Heo { 214b64bbc39STejun Heo va_list args; 215b64bbc39STejun Heo 216b64bbc39STejun Heo if (ehi->desc_len) 217b64bbc39STejun Heo __ata_ehi_push_desc(ehi, ", "); 218b64bbc39STejun Heo 219b64bbc39STejun Heo va_start(args, fmt); 220b64bbc39STejun Heo __ata_ehi_pushv_desc(ehi, fmt, args); 221b64bbc39STejun Heo va_end(args); 222b64bbc39STejun Heo } 223b64bbc39STejun Heo 224b64bbc39STejun Heo /** 225b64bbc39STejun Heo * ata_ehi_clear_desc - clean error description 226b64bbc39STejun Heo * @ehi: target EHI 227b64bbc39STejun Heo * 228b64bbc39STejun Heo * Clear @ehi->desc. 229b64bbc39STejun Heo * 230b64bbc39STejun Heo * LOCKING: 231b64bbc39STejun Heo * spin_lock_irqsave(host lock) 232b64bbc39STejun Heo */ 233b64bbc39STejun Heo void ata_ehi_clear_desc(struct ata_eh_info *ehi) 234b64bbc39STejun Heo { 235b64bbc39STejun Heo ehi->desc[0] = '\0'; 236b64bbc39STejun Heo ehi->desc_len = 0; 237b64bbc39STejun Heo } 238b64bbc39STejun Heo 239cbcdd875STejun Heo /** 240cbcdd875STejun Heo * ata_port_desc - append port description 241cbcdd875STejun Heo * @ap: target ATA port 242cbcdd875STejun Heo * @fmt: printf format string 243cbcdd875STejun Heo * 244cbcdd875STejun Heo * Format string according to @fmt and append it to port 245cbcdd875STejun Heo * description. If port description is not empty, " " is added 246cbcdd875STejun Heo * in-between. This function is to be used while initializing 247cbcdd875STejun Heo * ata_host. The description is printed on host registration. 248cbcdd875STejun Heo * 249cbcdd875STejun Heo * LOCKING: 250cbcdd875STejun Heo * None. 251cbcdd875STejun Heo */ 252cbcdd875STejun Heo void ata_port_desc(struct ata_port *ap, const char *fmt, ...) 253cbcdd875STejun Heo { 254cbcdd875STejun Heo va_list args; 255cbcdd875STejun Heo 256cbcdd875STejun Heo WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING)); 257cbcdd875STejun Heo 258cbcdd875STejun Heo if (ap->link.eh_info.desc_len) 259cbcdd875STejun Heo __ata_ehi_push_desc(&ap->link.eh_info, " "); 260cbcdd875STejun Heo 261cbcdd875STejun Heo va_start(args, fmt); 262cbcdd875STejun Heo __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args); 263cbcdd875STejun Heo va_end(args); 264cbcdd875STejun Heo } 265cbcdd875STejun Heo 266cbcdd875STejun Heo #ifdef CONFIG_PCI 267cbcdd875STejun Heo 268cbcdd875STejun Heo /** 269cbcdd875STejun Heo * ata_port_pbar_desc - append PCI BAR description 270cbcdd875STejun Heo * @ap: target ATA port 271cbcdd875STejun Heo * @bar: target PCI BAR 272cbcdd875STejun Heo * @offset: offset into PCI BAR 273cbcdd875STejun Heo * @name: name of the area 274cbcdd875STejun Heo * 275cbcdd875STejun Heo * If @offset is negative, this function formats a string which 276cbcdd875STejun Heo * contains the name, address, size and type of the BAR and 277cbcdd875STejun Heo * appends it to the port description. If @offset is zero or 278cbcdd875STejun Heo * positive, only name and offsetted address is appended. 279cbcdd875STejun Heo * 280cbcdd875STejun Heo * LOCKING: 281cbcdd875STejun Heo * None. 282cbcdd875STejun Heo */ 283cbcdd875STejun Heo void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset, 284cbcdd875STejun Heo const char *name) 285cbcdd875STejun Heo { 286cbcdd875STejun Heo struct pci_dev *pdev = to_pci_dev(ap->host->dev); 287cbcdd875STejun Heo char *type = ""; 288cbcdd875STejun Heo unsigned long long start, len; 289cbcdd875STejun Heo 290cbcdd875STejun Heo if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) 291cbcdd875STejun Heo type = "m"; 292cbcdd875STejun Heo else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) 293cbcdd875STejun Heo type = "i"; 294cbcdd875STejun Heo 295cbcdd875STejun Heo start = (unsigned long long)pci_resource_start(pdev, bar); 296cbcdd875STejun Heo len = (unsigned long long)pci_resource_len(pdev, bar); 297cbcdd875STejun Heo 298cbcdd875STejun Heo if (offset < 0) 299cbcdd875STejun Heo ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start); 300cbcdd875STejun Heo else 301e6a73ab1SAndrew Morton ata_port_desc(ap, "%s 0x%llx", name, 302e6a73ab1SAndrew Morton start + (unsigned long long)offset); 303cbcdd875STejun Heo } 304cbcdd875STejun Heo 305cbcdd875STejun Heo #endif /* CONFIG_PCI */ 306cbcdd875STejun Heo 30787fbc5a0STejun Heo static int ata_lookup_timeout_table(u8 cmd) 30887fbc5a0STejun Heo { 30987fbc5a0STejun Heo int i; 31087fbc5a0STejun Heo 31187fbc5a0STejun Heo for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) { 31287fbc5a0STejun Heo const u8 *cur; 31387fbc5a0STejun Heo 31487fbc5a0STejun Heo for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++) 31587fbc5a0STejun Heo if (*cur == cmd) 31687fbc5a0STejun Heo return i; 31787fbc5a0STejun Heo } 31887fbc5a0STejun Heo 31987fbc5a0STejun Heo return -1; 32087fbc5a0STejun Heo } 32187fbc5a0STejun Heo 32287fbc5a0STejun Heo /** 32387fbc5a0STejun Heo * ata_internal_cmd_timeout - determine timeout for an internal command 32487fbc5a0STejun Heo * @dev: target device 32587fbc5a0STejun Heo * @cmd: internal command to be issued 32687fbc5a0STejun Heo * 32787fbc5a0STejun Heo * Determine timeout for internal command @cmd for @dev. 32887fbc5a0STejun Heo * 32987fbc5a0STejun Heo * LOCKING: 33087fbc5a0STejun Heo * EH context. 33187fbc5a0STejun Heo * 33287fbc5a0STejun Heo * RETURNS: 33387fbc5a0STejun Heo * Determined timeout. 33487fbc5a0STejun Heo */ 33587fbc5a0STejun Heo unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd) 33687fbc5a0STejun Heo { 33787fbc5a0STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 33887fbc5a0STejun Heo int ent = ata_lookup_timeout_table(cmd); 33987fbc5a0STejun Heo int idx; 34087fbc5a0STejun Heo 34187fbc5a0STejun Heo if (ent < 0) 34287fbc5a0STejun Heo return ATA_EH_CMD_DFL_TIMEOUT; 34387fbc5a0STejun Heo 34487fbc5a0STejun Heo idx = ehc->cmd_timeout_idx[dev->devno][ent]; 34587fbc5a0STejun Heo return ata_eh_cmd_timeout_table[ent].timeouts[idx]; 34687fbc5a0STejun Heo } 34787fbc5a0STejun Heo 34887fbc5a0STejun Heo /** 34987fbc5a0STejun Heo * ata_internal_cmd_timed_out - notification for internal command timeout 35087fbc5a0STejun Heo * @dev: target device 35187fbc5a0STejun Heo * @cmd: internal command which timed out 35287fbc5a0STejun Heo * 35387fbc5a0STejun Heo * Notify EH that internal command @cmd for @dev timed out. This 35487fbc5a0STejun Heo * function should be called only for commands whose timeouts are 35587fbc5a0STejun Heo * determined using ata_internal_cmd_timeout(). 35687fbc5a0STejun Heo * 35787fbc5a0STejun Heo * LOCKING: 35887fbc5a0STejun Heo * EH context. 35987fbc5a0STejun Heo */ 36087fbc5a0STejun Heo void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd) 36187fbc5a0STejun Heo { 36287fbc5a0STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 36387fbc5a0STejun Heo int ent = ata_lookup_timeout_table(cmd); 36487fbc5a0STejun Heo int idx; 36587fbc5a0STejun Heo 36687fbc5a0STejun Heo if (ent < 0) 36787fbc5a0STejun Heo return; 36887fbc5a0STejun Heo 36987fbc5a0STejun Heo idx = ehc->cmd_timeout_idx[dev->devno][ent]; 37087fbc5a0STejun Heo if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX) 37187fbc5a0STejun Heo ehc->cmd_timeout_idx[dev->devno][ent]++; 37287fbc5a0STejun Heo } 37387fbc5a0STejun Heo 3743884f7b0STejun Heo static void ata_ering_record(struct ata_ering *ering, unsigned int eflags, 375c6fd2807SJeff Garzik unsigned int err_mask) 376c6fd2807SJeff Garzik { 377c6fd2807SJeff Garzik struct ata_ering_entry *ent; 378c6fd2807SJeff Garzik 379c6fd2807SJeff Garzik WARN_ON(!err_mask); 380c6fd2807SJeff Garzik 381c6fd2807SJeff Garzik ering->cursor++; 382c6fd2807SJeff Garzik ering->cursor %= ATA_ERING_SIZE; 383c6fd2807SJeff Garzik 384c6fd2807SJeff Garzik ent = &ering->ring[ering->cursor]; 3853884f7b0STejun Heo ent->eflags = eflags; 386c6fd2807SJeff Garzik ent->err_mask = err_mask; 387c6fd2807SJeff Garzik ent->timestamp = get_jiffies_64(); 388c6fd2807SJeff Garzik } 389c6fd2807SJeff Garzik 39076326ac1STejun Heo static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering) 39176326ac1STejun Heo { 39276326ac1STejun Heo struct ata_ering_entry *ent = &ering->ring[ering->cursor]; 39376326ac1STejun Heo 39476326ac1STejun Heo if (ent->err_mask) 39576326ac1STejun Heo return ent; 39676326ac1STejun Heo return NULL; 39776326ac1STejun Heo } 39876326ac1STejun Heo 3997d47e8d4STejun Heo static void ata_ering_clear(struct ata_ering *ering) 400c6fd2807SJeff Garzik { 4017d47e8d4STejun Heo memset(ering, 0, sizeof(*ering)); 402c6fd2807SJeff Garzik } 403c6fd2807SJeff Garzik 404c6fd2807SJeff Garzik static int ata_ering_map(struct ata_ering *ering, 405c6fd2807SJeff Garzik int (*map_fn)(struct ata_ering_entry *, void *), 406c6fd2807SJeff Garzik void *arg) 407c6fd2807SJeff Garzik { 408c6fd2807SJeff Garzik int idx, rc = 0; 409c6fd2807SJeff Garzik struct ata_ering_entry *ent; 410c6fd2807SJeff Garzik 411c6fd2807SJeff Garzik idx = ering->cursor; 412c6fd2807SJeff Garzik do { 413c6fd2807SJeff Garzik ent = &ering->ring[idx]; 414c6fd2807SJeff Garzik if (!ent->err_mask) 415c6fd2807SJeff Garzik break; 416c6fd2807SJeff Garzik rc = map_fn(ent, arg); 417c6fd2807SJeff Garzik if (rc) 418c6fd2807SJeff Garzik break; 419c6fd2807SJeff Garzik idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE; 420c6fd2807SJeff Garzik } while (idx != ering->cursor); 421c6fd2807SJeff Garzik 422c6fd2807SJeff Garzik return rc; 423c6fd2807SJeff Garzik } 424c6fd2807SJeff Garzik 425c6fd2807SJeff Garzik static unsigned int ata_eh_dev_action(struct ata_device *dev) 426c6fd2807SJeff Garzik { 4279af5c9c9STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 428c6fd2807SJeff Garzik 429c6fd2807SJeff Garzik return ehc->i.action | ehc->i.dev_action[dev->devno]; 430c6fd2807SJeff Garzik } 431c6fd2807SJeff Garzik 432f58229f8STejun Heo static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev, 433c6fd2807SJeff Garzik struct ata_eh_info *ehi, unsigned int action) 434c6fd2807SJeff Garzik { 435f58229f8STejun Heo struct ata_device *tdev; 436c6fd2807SJeff Garzik 437c6fd2807SJeff Garzik if (!dev) { 438c6fd2807SJeff Garzik ehi->action &= ~action; 4391eca4365STejun Heo ata_for_each_dev(tdev, link, ALL) 440f58229f8STejun Heo ehi->dev_action[tdev->devno] &= ~action; 441c6fd2807SJeff Garzik } else { 442c6fd2807SJeff Garzik /* doesn't make sense for port-wide EH actions */ 443c6fd2807SJeff Garzik WARN_ON(!(action & ATA_EH_PERDEV_MASK)); 444c6fd2807SJeff Garzik 445c6fd2807SJeff Garzik /* break ehi->action into ehi->dev_action */ 446c6fd2807SJeff Garzik if (ehi->action & action) { 4471eca4365STejun Heo ata_for_each_dev(tdev, link, ALL) 448f58229f8STejun Heo ehi->dev_action[tdev->devno] |= 449f58229f8STejun Heo ehi->action & action; 450c6fd2807SJeff Garzik ehi->action &= ~action; 451c6fd2807SJeff Garzik } 452c6fd2807SJeff Garzik 453c6fd2807SJeff Garzik /* turn off the specified per-dev action */ 454c6fd2807SJeff Garzik ehi->dev_action[dev->devno] &= ~action; 455c6fd2807SJeff Garzik } 456c6fd2807SJeff Garzik } 457c6fd2807SJeff Garzik 458c6fd2807SJeff Garzik /** 459c6fd2807SJeff Garzik * ata_scsi_timed_out - SCSI layer time out callback 460c6fd2807SJeff Garzik * @cmd: timed out SCSI command 461c6fd2807SJeff Garzik * 462c6fd2807SJeff Garzik * Handles SCSI layer timeout. We race with normal completion of 463c6fd2807SJeff Garzik * the qc for @cmd. If the qc is already gone, we lose and let 464c6fd2807SJeff Garzik * the scsi command finish (EH_HANDLED). Otherwise, the qc has 465c6fd2807SJeff Garzik * timed out and EH should be invoked. Prevent ata_qc_complete() 466c6fd2807SJeff Garzik * from finishing it by setting EH_SCHEDULED and return 467c6fd2807SJeff Garzik * EH_NOT_HANDLED. 468c6fd2807SJeff Garzik * 469c6fd2807SJeff Garzik * TODO: kill this function once old EH is gone. 470c6fd2807SJeff Garzik * 471c6fd2807SJeff Garzik * LOCKING: 472c6fd2807SJeff Garzik * Called from timer context 473c6fd2807SJeff Garzik * 474c6fd2807SJeff Garzik * RETURNS: 475c6fd2807SJeff Garzik * EH_HANDLED or EH_NOT_HANDLED 476c6fd2807SJeff Garzik */ 477242f9dcbSJens Axboe enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) 478c6fd2807SJeff Garzik { 479c6fd2807SJeff Garzik struct Scsi_Host *host = cmd->device->host; 480c6fd2807SJeff Garzik struct ata_port *ap = ata_shost_to_port(host); 481c6fd2807SJeff Garzik unsigned long flags; 482c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 483242f9dcbSJens Axboe enum blk_eh_timer_return ret; 484c6fd2807SJeff Garzik 485c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 486c6fd2807SJeff Garzik 487c6fd2807SJeff Garzik if (ap->ops->error_handler) { 488242f9dcbSJens Axboe ret = BLK_EH_NOT_HANDLED; 489c6fd2807SJeff Garzik goto out; 490c6fd2807SJeff Garzik } 491c6fd2807SJeff Garzik 492242f9dcbSJens Axboe ret = BLK_EH_HANDLED; 493c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 4949af5c9c9STejun Heo qc = ata_qc_from_tag(ap, ap->link.active_tag); 495c6fd2807SJeff Garzik if (qc) { 496c6fd2807SJeff Garzik WARN_ON(qc->scsicmd != cmd); 497c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_EH_SCHEDULED; 498c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_TIMEOUT; 499242f9dcbSJens Axboe ret = BLK_EH_NOT_HANDLED; 500c6fd2807SJeff Garzik } 501c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 502c6fd2807SJeff Garzik 503c6fd2807SJeff Garzik out: 504c6fd2807SJeff Garzik DPRINTK("EXIT, ret=%d\n", ret); 505c6fd2807SJeff Garzik return ret; 506c6fd2807SJeff Garzik } 507c6fd2807SJeff Garzik 508ece180d1STejun Heo static void ata_eh_unload(struct ata_port *ap) 509ece180d1STejun Heo { 510ece180d1STejun Heo struct ata_link *link; 511ece180d1STejun Heo struct ata_device *dev; 512ece180d1STejun Heo unsigned long flags; 513ece180d1STejun Heo 514ece180d1STejun Heo /* Restore SControl IPM and SPD for the next driver and 515ece180d1STejun Heo * disable attached devices. 516ece180d1STejun Heo */ 517ece180d1STejun Heo ata_for_each_link(link, ap, PMP_FIRST) { 518ece180d1STejun Heo sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0); 519ece180d1STejun Heo ata_for_each_dev(dev, link, ALL) 520ece180d1STejun Heo ata_dev_disable(dev); 521ece180d1STejun Heo } 522ece180d1STejun Heo 523ece180d1STejun Heo /* freeze and set UNLOADED */ 524ece180d1STejun Heo spin_lock_irqsave(ap->lock, flags); 525ece180d1STejun Heo 526ece180d1STejun Heo ata_port_freeze(ap); /* won't be thawed */ 527ece180d1STejun Heo ap->pflags &= ~ATA_PFLAG_EH_PENDING; /* clear pending from freeze */ 528ece180d1STejun Heo ap->pflags |= ATA_PFLAG_UNLOADED; 529ece180d1STejun Heo 530ece180d1STejun Heo spin_unlock_irqrestore(ap->lock, flags); 531ece180d1STejun Heo } 532ece180d1STejun Heo 533c6fd2807SJeff Garzik /** 534c6fd2807SJeff Garzik * ata_scsi_error - SCSI layer error handler callback 535c6fd2807SJeff Garzik * @host: SCSI host on which error occurred 536c6fd2807SJeff Garzik * 537c6fd2807SJeff Garzik * Handles SCSI-layer-thrown error events. 538c6fd2807SJeff Garzik * 539c6fd2807SJeff Garzik * LOCKING: 540c6fd2807SJeff Garzik * Inherited from SCSI layer (none, can sleep) 541c6fd2807SJeff Garzik * 542c6fd2807SJeff Garzik * RETURNS: 543c6fd2807SJeff Garzik * Zero. 544c6fd2807SJeff Garzik */ 545c6fd2807SJeff Garzik void ata_scsi_error(struct Scsi_Host *host) 546c6fd2807SJeff Garzik { 547c6fd2807SJeff Garzik struct ata_port *ap = ata_shost_to_port(host); 548a1e10f7eSTejun Heo int i; 549c6fd2807SJeff Garzik unsigned long flags; 550c6fd2807SJeff Garzik 551c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 552c6fd2807SJeff Garzik 553c6fd2807SJeff Garzik /* synchronize with port task */ 554c6fd2807SJeff Garzik ata_port_flush_task(ap); 555c6fd2807SJeff Garzik 556cca3974eSJeff Garzik /* synchronize with host lock and sort out timeouts */ 557c6fd2807SJeff Garzik 558c6fd2807SJeff Garzik /* For new EH, all qcs are finished in one of three ways - 559c6fd2807SJeff Garzik * normal completion, error completion, and SCSI timeout. 560c96f1732SAlan Cox * Both completions can race against SCSI timeout. When normal 561c6fd2807SJeff Garzik * completion wins, the qc never reaches EH. When error 562c6fd2807SJeff Garzik * completion wins, the qc has ATA_QCFLAG_FAILED set. 563c6fd2807SJeff Garzik * 564c6fd2807SJeff Garzik * When SCSI timeout wins, things are a bit more complex. 565c6fd2807SJeff Garzik * Normal or error completion can occur after the timeout but 566c6fd2807SJeff Garzik * before this point. In such cases, both types of 567c6fd2807SJeff Garzik * completions are honored. A scmd is determined to have 568c6fd2807SJeff Garzik * timed out iff its associated qc is active and not failed. 569c6fd2807SJeff Garzik */ 570c6fd2807SJeff Garzik if (ap->ops->error_handler) { 571c6fd2807SJeff Garzik struct scsi_cmnd *scmd, *tmp; 572c6fd2807SJeff Garzik int nr_timedout = 0; 573c6fd2807SJeff Garzik 574c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 575c6fd2807SJeff Garzik 576c96f1732SAlan Cox /* This must occur under the ap->lock as we don't want 577c96f1732SAlan Cox a polled recovery to race the real interrupt handler 578c96f1732SAlan Cox 579c96f1732SAlan Cox The lost_interrupt handler checks for any completed but 580c96f1732SAlan Cox non-notified command and completes much like an IRQ handler. 581c96f1732SAlan Cox 582c96f1732SAlan Cox We then fall into the error recovery code which will treat 583c96f1732SAlan Cox this as if normal completion won the race */ 584c96f1732SAlan Cox 585c96f1732SAlan Cox if (ap->ops->lost_interrupt) 586c96f1732SAlan Cox ap->ops->lost_interrupt(ap); 587c96f1732SAlan Cox 588c6fd2807SJeff Garzik list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) { 589c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 590c6fd2807SJeff Garzik 591c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_QUEUE; i++) { 592c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, i); 593c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_ACTIVE && 594c6fd2807SJeff Garzik qc->scsicmd == scmd) 595c6fd2807SJeff Garzik break; 596c6fd2807SJeff Garzik } 597c6fd2807SJeff Garzik 598c6fd2807SJeff Garzik if (i < ATA_MAX_QUEUE) { 599c6fd2807SJeff Garzik /* the scmd has an associated qc */ 600c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) { 601c6fd2807SJeff Garzik /* which hasn't failed yet, timeout */ 602c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_TIMEOUT; 603c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 604c6fd2807SJeff Garzik nr_timedout++; 605c6fd2807SJeff Garzik } 606c6fd2807SJeff Garzik } else { 607c6fd2807SJeff Garzik /* Normal completion occurred after 608c6fd2807SJeff Garzik * SCSI timeout but before this point. 609c6fd2807SJeff Garzik * Successfully complete it. 610c6fd2807SJeff Garzik */ 611c6fd2807SJeff Garzik scmd->retries = scmd->allowed; 612c6fd2807SJeff Garzik scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 613c6fd2807SJeff Garzik } 614c6fd2807SJeff Garzik } 615c6fd2807SJeff Garzik 616c6fd2807SJeff Garzik /* If we have timed out qcs. They belong to EH from 617c6fd2807SJeff Garzik * this point but the state of the controller is 618c6fd2807SJeff Garzik * unknown. Freeze the port to make sure the IRQ 619c6fd2807SJeff Garzik * handler doesn't diddle with those qcs. This must 620c6fd2807SJeff Garzik * be done atomically w.r.t. setting QCFLAG_FAILED. 621c6fd2807SJeff Garzik */ 622c6fd2807SJeff Garzik if (nr_timedout) 623c6fd2807SJeff Garzik __ata_port_freeze(ap); 624c6fd2807SJeff Garzik 625c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 626a1e10f7eSTejun Heo 627a1e10f7eSTejun Heo /* initialize eh_tries */ 628a1e10f7eSTejun Heo ap->eh_tries = ATA_EH_MAX_TRIES; 629c6fd2807SJeff Garzik } else 630c6fd2807SJeff Garzik spin_unlock_wait(ap->lock); 631c6fd2807SJeff Garzik 632c96f1732SAlan Cox /* If we timed raced normal completion and there is nothing to 633c96f1732SAlan Cox recover nr_timedout == 0 why exactly are we doing error recovery ? */ 634c96f1732SAlan Cox 635c6fd2807SJeff Garzik repeat: 636c6fd2807SJeff Garzik /* invoke error handler */ 637c6fd2807SJeff Garzik if (ap->ops->error_handler) { 638cf1b86c8STejun Heo struct ata_link *link; 639cf1b86c8STejun Heo 6405ddf24c5STejun Heo /* kill fast drain timer */ 6415ddf24c5STejun Heo del_timer_sync(&ap->fastdrain_timer); 6425ddf24c5STejun Heo 643c6fd2807SJeff Garzik /* process port resume request */ 644c6fd2807SJeff Garzik ata_eh_handle_port_resume(ap); 645c6fd2807SJeff Garzik 646c6fd2807SJeff Garzik /* fetch & clear EH info */ 647c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 648c6fd2807SJeff Garzik 6491eca4365STejun Heo ata_for_each_link(link, ap, HOST_FIRST) { 65000115e0fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 65100115e0fSTejun Heo struct ata_device *dev; 65200115e0fSTejun Heo 653cf1b86c8STejun Heo memset(&link->eh_context, 0, sizeof(link->eh_context)); 654cf1b86c8STejun Heo link->eh_context.i = link->eh_info; 655cf1b86c8STejun Heo memset(&link->eh_info, 0, sizeof(link->eh_info)); 65600115e0fSTejun Heo 6571eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) { 65800115e0fSTejun Heo int devno = dev->devno; 65900115e0fSTejun Heo 66000115e0fSTejun Heo ehc->saved_xfer_mode[devno] = dev->xfer_mode; 66100115e0fSTejun Heo if (ata_ncq_enabled(dev)) 66200115e0fSTejun Heo ehc->saved_ncq_enabled |= 1 << devno; 66300115e0fSTejun Heo } 664cf1b86c8STejun Heo } 665c6fd2807SJeff Garzik 666c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS; 667c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_EH_PENDING; 668da917d69STejun Heo ap->excl_link = NULL; /* don't maintain exclusion over EH */ 669c6fd2807SJeff Garzik 670c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 671c6fd2807SJeff Garzik 672c6fd2807SJeff Garzik /* invoke EH, skip if unloading or suspended */ 673c6fd2807SJeff Garzik if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED))) 674c6fd2807SJeff Garzik ap->ops->error_handler(ap); 675ece180d1STejun Heo else { 676ece180d1STejun Heo /* if unloading, commence suicide */ 677ece180d1STejun Heo if ((ap->pflags & ATA_PFLAG_UNLOADING) && 678ece180d1STejun Heo !(ap->pflags & ATA_PFLAG_UNLOADED)) 679ece180d1STejun Heo ata_eh_unload(ap); 680c6fd2807SJeff Garzik ata_eh_finish(ap); 681ece180d1STejun Heo } 682c6fd2807SJeff Garzik 683c6fd2807SJeff Garzik /* process port suspend request */ 684c6fd2807SJeff Garzik ata_eh_handle_port_suspend(ap); 685c6fd2807SJeff Garzik 686c6fd2807SJeff Garzik /* Exception might have happend after ->error_handler 687c6fd2807SJeff Garzik * recovered the port but before this point. Repeat 688c6fd2807SJeff Garzik * EH in such case. 689c6fd2807SJeff Garzik */ 690c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 691c6fd2807SJeff Garzik 692c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_EH_PENDING) { 693a1e10f7eSTejun Heo if (--ap->eh_tries) { 694c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 695c6fd2807SJeff Garzik goto repeat; 696c6fd2807SJeff Garzik } 697c6fd2807SJeff Garzik ata_port_printk(ap, KERN_ERR, "EH pending after %d " 698a1e10f7eSTejun Heo "tries, giving up\n", ATA_EH_MAX_TRIES); 699914616a3STejun Heo ap->pflags &= ~ATA_PFLAG_EH_PENDING; 700c6fd2807SJeff Garzik } 701c6fd2807SJeff Garzik 702c6fd2807SJeff Garzik /* this run is complete, make sure EH info is clear */ 7031eca4365STejun Heo ata_for_each_link(link, ap, HOST_FIRST) 704cf1b86c8STejun Heo memset(&link->eh_info, 0, sizeof(link->eh_info)); 705c6fd2807SJeff Garzik 706c6fd2807SJeff Garzik /* Clear host_eh_scheduled while holding ap->lock such 707c6fd2807SJeff Garzik * that if exception occurs after this point but 708c6fd2807SJeff Garzik * before EH completion, SCSI midlayer will 709c6fd2807SJeff Garzik * re-initiate EH. 710c6fd2807SJeff Garzik */ 711c6fd2807SJeff Garzik host->host_eh_scheduled = 0; 712c6fd2807SJeff Garzik 713c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 714c6fd2807SJeff Garzik } else { 7159af5c9c9STejun Heo WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL); 716c6fd2807SJeff Garzik ap->ops->eng_timeout(ap); 717c6fd2807SJeff Garzik } 718c6fd2807SJeff Garzik 719c6fd2807SJeff Garzik /* finish or retry handled scmd's and clean up */ 720c6fd2807SJeff Garzik WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q)); 721c6fd2807SJeff Garzik 722c6fd2807SJeff Garzik scsi_eh_flush_done_q(&ap->eh_done_q); 723c6fd2807SJeff Garzik 724c6fd2807SJeff Garzik /* clean up */ 725c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 726c6fd2807SJeff Garzik 727c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_LOADING) 728c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_LOADING; 729c6fd2807SJeff Garzik else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) 73052bad64dSDavid Howells queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 0); 731c6fd2807SJeff Garzik 732c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_RECOVERED) 733c6fd2807SJeff Garzik ata_port_printk(ap, KERN_INFO, "EH complete\n"); 734c6fd2807SJeff Garzik 735c6fd2807SJeff Garzik ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED); 736c6fd2807SJeff Garzik 737c6fd2807SJeff Garzik /* tell wait_eh that we're done */ 738c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS; 739c6fd2807SJeff Garzik wake_up_all(&ap->eh_wait_q); 740c6fd2807SJeff Garzik 741c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 742c6fd2807SJeff Garzik 743c6fd2807SJeff Garzik DPRINTK("EXIT\n"); 744c6fd2807SJeff Garzik } 745c6fd2807SJeff Garzik 746c6fd2807SJeff Garzik /** 747c6fd2807SJeff Garzik * ata_port_wait_eh - Wait for the currently pending EH to complete 748c6fd2807SJeff Garzik * @ap: Port to wait EH for 749c6fd2807SJeff Garzik * 750c6fd2807SJeff Garzik * Wait until the currently pending EH is complete. 751c6fd2807SJeff Garzik * 752c6fd2807SJeff Garzik * LOCKING: 753c6fd2807SJeff Garzik * Kernel thread context (may sleep). 754c6fd2807SJeff Garzik */ 755c6fd2807SJeff Garzik void ata_port_wait_eh(struct ata_port *ap) 756c6fd2807SJeff Garzik { 757c6fd2807SJeff Garzik unsigned long flags; 758c6fd2807SJeff Garzik DEFINE_WAIT(wait); 759c6fd2807SJeff Garzik 760c6fd2807SJeff Garzik retry: 761c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 762c6fd2807SJeff Garzik 763c6fd2807SJeff Garzik while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) { 764c6fd2807SJeff Garzik prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE); 765c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 766c6fd2807SJeff Garzik schedule(); 767c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 768c6fd2807SJeff Garzik } 769c6fd2807SJeff Garzik finish_wait(&ap->eh_wait_q, &wait); 770c6fd2807SJeff Garzik 771c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 772c6fd2807SJeff Garzik 773c6fd2807SJeff Garzik /* make sure SCSI EH is complete */ 774cca3974eSJeff Garzik if (scsi_host_in_recovery(ap->scsi_host)) { 775c6fd2807SJeff Garzik msleep(10); 776c6fd2807SJeff Garzik goto retry; 777c6fd2807SJeff Garzik } 778c6fd2807SJeff Garzik } 779c6fd2807SJeff Garzik 7805ddf24c5STejun Heo static int ata_eh_nr_in_flight(struct ata_port *ap) 7815ddf24c5STejun Heo { 7825ddf24c5STejun Heo unsigned int tag; 7835ddf24c5STejun Heo int nr = 0; 7845ddf24c5STejun Heo 7855ddf24c5STejun Heo /* count only non-internal commands */ 7865ddf24c5STejun Heo for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) 7875ddf24c5STejun Heo if (ata_qc_from_tag(ap, tag)) 7885ddf24c5STejun Heo nr++; 7895ddf24c5STejun Heo 7905ddf24c5STejun Heo return nr; 7915ddf24c5STejun Heo } 7925ddf24c5STejun Heo 7935ddf24c5STejun Heo void ata_eh_fastdrain_timerfn(unsigned long arg) 7945ddf24c5STejun Heo { 7955ddf24c5STejun Heo struct ata_port *ap = (void *)arg; 7965ddf24c5STejun Heo unsigned long flags; 7975ddf24c5STejun Heo int cnt; 7985ddf24c5STejun Heo 7995ddf24c5STejun Heo spin_lock_irqsave(ap->lock, flags); 8005ddf24c5STejun Heo 8015ddf24c5STejun Heo cnt = ata_eh_nr_in_flight(ap); 8025ddf24c5STejun Heo 8035ddf24c5STejun Heo /* are we done? */ 8045ddf24c5STejun Heo if (!cnt) 8055ddf24c5STejun Heo goto out_unlock; 8065ddf24c5STejun Heo 8075ddf24c5STejun Heo if (cnt == ap->fastdrain_cnt) { 8085ddf24c5STejun Heo unsigned int tag; 8095ddf24c5STejun Heo 8105ddf24c5STejun Heo /* No progress during the last interval, tag all 8115ddf24c5STejun Heo * in-flight qcs as timed out and freeze the port. 8125ddf24c5STejun Heo */ 8135ddf24c5STejun Heo for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) { 8145ddf24c5STejun Heo struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); 8155ddf24c5STejun Heo if (qc) 8165ddf24c5STejun Heo qc->err_mask |= AC_ERR_TIMEOUT; 8175ddf24c5STejun Heo } 8185ddf24c5STejun Heo 8195ddf24c5STejun Heo ata_port_freeze(ap); 8205ddf24c5STejun Heo } else { 8215ddf24c5STejun Heo /* some qcs have finished, give it another chance */ 8225ddf24c5STejun Heo ap->fastdrain_cnt = cnt; 8235ddf24c5STejun Heo ap->fastdrain_timer.expires = 824341c2c95STejun Heo ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); 8255ddf24c5STejun Heo add_timer(&ap->fastdrain_timer); 8265ddf24c5STejun Heo } 8275ddf24c5STejun Heo 8285ddf24c5STejun Heo out_unlock: 8295ddf24c5STejun Heo spin_unlock_irqrestore(ap->lock, flags); 8305ddf24c5STejun Heo } 8315ddf24c5STejun Heo 8325ddf24c5STejun Heo /** 8335ddf24c5STejun Heo * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain 8345ddf24c5STejun Heo * @ap: target ATA port 8355ddf24c5STejun Heo * @fastdrain: activate fast drain 8365ddf24c5STejun Heo * 8375ddf24c5STejun Heo * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain 8385ddf24c5STejun Heo * is non-zero and EH wasn't pending before. Fast drain ensures 8395ddf24c5STejun Heo * that EH kicks in in timely manner. 8405ddf24c5STejun Heo * 8415ddf24c5STejun Heo * LOCKING: 8425ddf24c5STejun Heo * spin_lock_irqsave(host lock) 8435ddf24c5STejun Heo */ 8445ddf24c5STejun Heo static void ata_eh_set_pending(struct ata_port *ap, int fastdrain) 8455ddf24c5STejun Heo { 8465ddf24c5STejun Heo int cnt; 8475ddf24c5STejun Heo 8485ddf24c5STejun Heo /* already scheduled? */ 8495ddf24c5STejun Heo if (ap->pflags & ATA_PFLAG_EH_PENDING) 8505ddf24c5STejun Heo return; 8515ddf24c5STejun Heo 8525ddf24c5STejun Heo ap->pflags |= ATA_PFLAG_EH_PENDING; 8535ddf24c5STejun Heo 8545ddf24c5STejun Heo if (!fastdrain) 8555ddf24c5STejun Heo return; 8565ddf24c5STejun Heo 8575ddf24c5STejun Heo /* do we have in-flight qcs? */ 8585ddf24c5STejun Heo cnt = ata_eh_nr_in_flight(ap); 8595ddf24c5STejun Heo if (!cnt) 8605ddf24c5STejun Heo return; 8615ddf24c5STejun Heo 8625ddf24c5STejun Heo /* activate fast drain */ 8635ddf24c5STejun Heo ap->fastdrain_cnt = cnt; 864341c2c95STejun Heo ap->fastdrain_timer.expires = 865341c2c95STejun Heo ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); 8665ddf24c5STejun Heo add_timer(&ap->fastdrain_timer); 8675ddf24c5STejun Heo } 8685ddf24c5STejun Heo 869c6fd2807SJeff Garzik /** 870c6fd2807SJeff Garzik * ata_qc_schedule_eh - schedule qc for error handling 871c6fd2807SJeff Garzik * @qc: command to schedule error handling for 872c6fd2807SJeff Garzik * 873c6fd2807SJeff Garzik * Schedule error handling for @qc. EH will kick in as soon as 874c6fd2807SJeff Garzik * other commands are drained. 875c6fd2807SJeff Garzik * 876c6fd2807SJeff Garzik * LOCKING: 877cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 878c6fd2807SJeff Garzik */ 879c6fd2807SJeff Garzik void ata_qc_schedule_eh(struct ata_queued_cmd *qc) 880c6fd2807SJeff Garzik { 881c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 882c6fd2807SJeff Garzik 883c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 884c6fd2807SJeff Garzik 885c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 8865ddf24c5STejun Heo ata_eh_set_pending(ap, 1); 887c6fd2807SJeff Garzik 888c6fd2807SJeff Garzik /* The following will fail if timeout has already expired. 889c6fd2807SJeff Garzik * ata_scsi_error() takes care of such scmds on EH entry. 890c6fd2807SJeff Garzik * Note that ATA_QCFLAG_FAILED is unconditionally set after 891c6fd2807SJeff Garzik * this function completes. 892c6fd2807SJeff Garzik */ 893242f9dcbSJens Axboe blk_abort_request(qc->scsicmd->request); 894c6fd2807SJeff Garzik } 895c6fd2807SJeff Garzik 896c6fd2807SJeff Garzik /** 897c6fd2807SJeff Garzik * ata_port_schedule_eh - schedule error handling without a qc 898c6fd2807SJeff Garzik * @ap: ATA port to schedule EH for 899c6fd2807SJeff Garzik * 900c6fd2807SJeff Garzik * Schedule error handling for @ap. EH will kick in as soon as 901c6fd2807SJeff Garzik * all commands are drained. 902c6fd2807SJeff Garzik * 903c6fd2807SJeff Garzik * LOCKING: 904cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 905c6fd2807SJeff Garzik */ 906c6fd2807SJeff Garzik void ata_port_schedule_eh(struct ata_port *ap) 907c6fd2807SJeff Garzik { 908c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 909c6fd2807SJeff Garzik 910f4d6d004STejun Heo if (ap->pflags & ATA_PFLAG_INITIALIZING) 911f4d6d004STejun Heo return; 912f4d6d004STejun Heo 9135ddf24c5STejun Heo ata_eh_set_pending(ap, 1); 914cca3974eSJeff Garzik scsi_schedule_eh(ap->scsi_host); 915c6fd2807SJeff Garzik 916c6fd2807SJeff Garzik DPRINTK("port EH scheduled\n"); 917c6fd2807SJeff Garzik } 918c6fd2807SJeff Garzik 919dbd82616STejun Heo static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link) 920c6fd2807SJeff Garzik { 921c6fd2807SJeff Garzik int tag, nr_aborted = 0; 922c6fd2807SJeff Garzik 923c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 924c6fd2807SJeff Garzik 9255ddf24c5STejun Heo /* we're gonna abort all commands, no need for fast drain */ 9265ddf24c5STejun Heo ata_eh_set_pending(ap, 0); 9275ddf24c5STejun Heo 928c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 929c6fd2807SJeff Garzik struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); 930c6fd2807SJeff Garzik 931dbd82616STejun Heo if (qc && (!link || qc->dev->link == link)) { 932c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 933c6fd2807SJeff Garzik ata_qc_complete(qc); 934c6fd2807SJeff Garzik nr_aborted++; 935c6fd2807SJeff Garzik } 936c6fd2807SJeff Garzik } 937c6fd2807SJeff Garzik 938c6fd2807SJeff Garzik if (!nr_aborted) 939c6fd2807SJeff Garzik ata_port_schedule_eh(ap); 940c6fd2807SJeff Garzik 941c6fd2807SJeff Garzik return nr_aborted; 942c6fd2807SJeff Garzik } 943c6fd2807SJeff Garzik 944c6fd2807SJeff Garzik /** 945dbd82616STejun Heo * ata_link_abort - abort all qc's on the link 946dbd82616STejun Heo * @link: ATA link to abort qc's for 947dbd82616STejun Heo * 948dbd82616STejun Heo * Abort all active qc's active on @link and schedule EH. 949dbd82616STejun Heo * 950dbd82616STejun Heo * LOCKING: 951dbd82616STejun Heo * spin_lock_irqsave(host lock) 952dbd82616STejun Heo * 953dbd82616STejun Heo * RETURNS: 954dbd82616STejun Heo * Number of aborted qc's. 955dbd82616STejun Heo */ 956dbd82616STejun Heo int ata_link_abort(struct ata_link *link) 957dbd82616STejun Heo { 958dbd82616STejun Heo return ata_do_link_abort(link->ap, link); 959dbd82616STejun Heo } 960dbd82616STejun Heo 961dbd82616STejun Heo /** 962dbd82616STejun Heo * ata_port_abort - abort all qc's on the port 963dbd82616STejun Heo * @ap: ATA port to abort qc's for 964dbd82616STejun Heo * 965dbd82616STejun Heo * Abort all active qc's of @ap and schedule EH. 966dbd82616STejun Heo * 967dbd82616STejun Heo * LOCKING: 968dbd82616STejun Heo * spin_lock_irqsave(host_set lock) 969dbd82616STejun Heo * 970dbd82616STejun Heo * RETURNS: 971dbd82616STejun Heo * Number of aborted qc's. 972dbd82616STejun Heo */ 973dbd82616STejun Heo int ata_port_abort(struct ata_port *ap) 974dbd82616STejun Heo { 975dbd82616STejun Heo return ata_do_link_abort(ap, NULL); 976dbd82616STejun Heo } 977dbd82616STejun Heo 978dbd82616STejun Heo /** 979c6fd2807SJeff Garzik * __ata_port_freeze - freeze port 980c6fd2807SJeff Garzik * @ap: ATA port to freeze 981c6fd2807SJeff Garzik * 982c6fd2807SJeff Garzik * This function is called when HSM violation or some other 983c6fd2807SJeff Garzik * condition disrupts normal operation of the port. Frozen port 984c6fd2807SJeff Garzik * is not allowed to perform any operation until the port is 985c6fd2807SJeff Garzik * thawed, which usually follows a successful reset. 986c6fd2807SJeff Garzik * 987c6fd2807SJeff Garzik * ap->ops->freeze() callback can be used for freezing the port 988c6fd2807SJeff Garzik * hardware-wise (e.g. mask interrupt and stop DMA engine). If a 989c6fd2807SJeff Garzik * port cannot be frozen hardware-wise, the interrupt handler 990c6fd2807SJeff Garzik * must ack and clear interrupts unconditionally while the port 991c6fd2807SJeff Garzik * is frozen. 992c6fd2807SJeff Garzik * 993c6fd2807SJeff Garzik * LOCKING: 994cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 995c6fd2807SJeff Garzik */ 996c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap) 997c6fd2807SJeff Garzik { 998c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 999c6fd2807SJeff Garzik 1000c6fd2807SJeff Garzik if (ap->ops->freeze) 1001c6fd2807SJeff Garzik ap->ops->freeze(ap); 1002c6fd2807SJeff Garzik 1003c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_FROZEN; 1004c6fd2807SJeff Garzik 100544877b4eSTejun Heo DPRINTK("ata%u port frozen\n", ap->print_id); 1006c6fd2807SJeff Garzik } 1007c6fd2807SJeff Garzik 1008c6fd2807SJeff Garzik /** 1009c6fd2807SJeff Garzik * ata_port_freeze - abort & freeze port 1010c6fd2807SJeff Garzik * @ap: ATA port to freeze 1011c6fd2807SJeff Garzik * 101254c38444SJeff Garzik * Abort and freeze @ap. The freeze operation must be called 101354c38444SJeff Garzik * first, because some hardware requires special operations 101454c38444SJeff Garzik * before the taskfile registers are accessible. 1015c6fd2807SJeff Garzik * 1016c6fd2807SJeff Garzik * LOCKING: 1017cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 1018c6fd2807SJeff Garzik * 1019c6fd2807SJeff Garzik * RETURNS: 1020c6fd2807SJeff Garzik * Number of aborted commands. 1021c6fd2807SJeff Garzik */ 1022c6fd2807SJeff Garzik int ata_port_freeze(struct ata_port *ap) 1023c6fd2807SJeff Garzik { 1024c6fd2807SJeff Garzik int nr_aborted; 1025c6fd2807SJeff Garzik 1026c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 1027c6fd2807SJeff Garzik 1028c6fd2807SJeff Garzik __ata_port_freeze(ap); 102954c38444SJeff Garzik nr_aborted = ata_port_abort(ap); 1030c6fd2807SJeff Garzik 1031c6fd2807SJeff Garzik return nr_aborted; 1032c6fd2807SJeff Garzik } 1033c6fd2807SJeff Garzik 1034c6fd2807SJeff Garzik /** 10357d77b247STejun Heo * sata_async_notification - SATA async notification handler 10367d77b247STejun Heo * @ap: ATA port where async notification is received 10377d77b247STejun Heo * 10387d77b247STejun Heo * Handler to be called when async notification via SDB FIS is 10397d77b247STejun Heo * received. This function schedules EH if necessary. 10407d77b247STejun Heo * 10417d77b247STejun Heo * LOCKING: 10427d77b247STejun Heo * spin_lock_irqsave(host lock) 10437d77b247STejun Heo * 10447d77b247STejun Heo * RETURNS: 10457d77b247STejun Heo * 1 if EH is scheduled, 0 otherwise. 10467d77b247STejun Heo */ 10477d77b247STejun Heo int sata_async_notification(struct ata_port *ap) 10487d77b247STejun Heo { 10497d77b247STejun Heo u32 sntf; 10507d77b247STejun Heo int rc; 10517d77b247STejun Heo 10527d77b247STejun Heo if (!(ap->flags & ATA_FLAG_AN)) 10537d77b247STejun Heo return 0; 10547d77b247STejun Heo 10557d77b247STejun Heo rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf); 10567d77b247STejun Heo if (rc == 0) 10577d77b247STejun Heo sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf); 10587d77b247STejun Heo 1059071f44b1STejun Heo if (!sata_pmp_attached(ap) || rc) { 10607d77b247STejun Heo /* PMP is not attached or SNTF is not available */ 1061071f44b1STejun Heo if (!sata_pmp_attached(ap)) { 10627d77b247STejun Heo /* PMP is not attached. Check whether ATAPI 10637d77b247STejun Heo * AN is configured. If so, notify media 10647d77b247STejun Heo * change. 10657d77b247STejun Heo */ 10667d77b247STejun Heo struct ata_device *dev = ap->link.device; 10677d77b247STejun Heo 10687d77b247STejun Heo if ((dev->class == ATA_DEV_ATAPI) && 10697d77b247STejun Heo (dev->flags & ATA_DFLAG_AN)) 10707d77b247STejun Heo ata_scsi_media_change_notify(dev); 10717d77b247STejun Heo return 0; 10727d77b247STejun Heo } else { 10737d77b247STejun Heo /* PMP is attached but SNTF is not available. 10747d77b247STejun Heo * ATAPI async media change notification is 10757d77b247STejun Heo * not used. The PMP must be reporting PHY 10767d77b247STejun Heo * status change, schedule EH. 10777d77b247STejun Heo */ 10787d77b247STejun Heo ata_port_schedule_eh(ap); 10797d77b247STejun Heo return 1; 10807d77b247STejun Heo } 10817d77b247STejun Heo } else { 10827d77b247STejun Heo /* PMP is attached and SNTF is available */ 10837d77b247STejun Heo struct ata_link *link; 10847d77b247STejun Heo 10857d77b247STejun Heo /* check and notify ATAPI AN */ 10861eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 10877d77b247STejun Heo if (!(sntf & (1 << link->pmp))) 10887d77b247STejun Heo continue; 10897d77b247STejun Heo 10907d77b247STejun Heo if ((link->device->class == ATA_DEV_ATAPI) && 10917d77b247STejun Heo (link->device->flags & ATA_DFLAG_AN)) 10927d77b247STejun Heo ata_scsi_media_change_notify(link->device); 10937d77b247STejun Heo } 10947d77b247STejun Heo 10957d77b247STejun Heo /* If PMP is reporting that PHY status of some 10967d77b247STejun Heo * downstream ports has changed, schedule EH. 10977d77b247STejun Heo */ 10987d77b247STejun Heo if (sntf & (1 << SATA_PMP_CTRL_PORT)) { 10997d77b247STejun Heo ata_port_schedule_eh(ap); 11007d77b247STejun Heo return 1; 11017d77b247STejun Heo } 11027d77b247STejun Heo 11037d77b247STejun Heo return 0; 11047d77b247STejun Heo } 11057d77b247STejun Heo } 11067d77b247STejun Heo 11077d77b247STejun Heo /** 1108c6fd2807SJeff Garzik * ata_eh_freeze_port - EH helper to freeze port 1109c6fd2807SJeff Garzik * @ap: ATA port to freeze 1110c6fd2807SJeff Garzik * 1111c6fd2807SJeff Garzik * Freeze @ap. 1112c6fd2807SJeff Garzik * 1113c6fd2807SJeff Garzik * LOCKING: 1114c6fd2807SJeff Garzik * None. 1115c6fd2807SJeff Garzik */ 1116c6fd2807SJeff Garzik void ata_eh_freeze_port(struct ata_port *ap) 1117c6fd2807SJeff Garzik { 1118c6fd2807SJeff Garzik unsigned long flags; 1119c6fd2807SJeff Garzik 1120c6fd2807SJeff Garzik if (!ap->ops->error_handler) 1121c6fd2807SJeff Garzik return; 1122c6fd2807SJeff Garzik 1123c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1124c6fd2807SJeff Garzik __ata_port_freeze(ap); 1125c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1126c6fd2807SJeff Garzik } 1127c6fd2807SJeff Garzik 1128c6fd2807SJeff Garzik /** 1129c6fd2807SJeff Garzik * ata_port_thaw_port - EH helper to thaw port 1130c6fd2807SJeff Garzik * @ap: ATA port to thaw 1131c6fd2807SJeff Garzik * 1132c6fd2807SJeff Garzik * Thaw frozen port @ap. 1133c6fd2807SJeff Garzik * 1134c6fd2807SJeff Garzik * LOCKING: 1135c6fd2807SJeff Garzik * None. 1136c6fd2807SJeff Garzik */ 1137c6fd2807SJeff Garzik void ata_eh_thaw_port(struct ata_port *ap) 1138c6fd2807SJeff Garzik { 1139c6fd2807SJeff Garzik unsigned long flags; 1140c6fd2807SJeff Garzik 1141c6fd2807SJeff Garzik if (!ap->ops->error_handler) 1142c6fd2807SJeff Garzik return; 1143c6fd2807SJeff Garzik 1144c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1145c6fd2807SJeff Garzik 1146c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_FROZEN; 1147c6fd2807SJeff Garzik 1148c6fd2807SJeff Garzik if (ap->ops->thaw) 1149c6fd2807SJeff Garzik ap->ops->thaw(ap); 1150c6fd2807SJeff Garzik 1151c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1152c6fd2807SJeff Garzik 115344877b4eSTejun Heo DPRINTK("ata%u port thawed\n", ap->print_id); 1154c6fd2807SJeff Garzik } 1155c6fd2807SJeff Garzik 1156c6fd2807SJeff Garzik static void ata_eh_scsidone(struct scsi_cmnd *scmd) 1157c6fd2807SJeff Garzik { 1158c6fd2807SJeff Garzik /* nada */ 1159c6fd2807SJeff Garzik } 1160c6fd2807SJeff Garzik 1161c6fd2807SJeff Garzik static void __ata_eh_qc_complete(struct ata_queued_cmd *qc) 1162c6fd2807SJeff Garzik { 1163c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 1164c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 1165c6fd2807SJeff Garzik unsigned long flags; 1166c6fd2807SJeff Garzik 1167c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1168c6fd2807SJeff Garzik qc->scsidone = ata_eh_scsidone; 1169c6fd2807SJeff Garzik __ata_qc_complete(qc); 1170c6fd2807SJeff Garzik WARN_ON(ata_tag_valid(qc->tag)); 1171c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1172c6fd2807SJeff Garzik 1173c6fd2807SJeff Garzik scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 1174c6fd2807SJeff Garzik } 1175c6fd2807SJeff Garzik 1176c6fd2807SJeff Garzik /** 1177c6fd2807SJeff Garzik * ata_eh_qc_complete - Complete an active ATA command from EH 1178c6fd2807SJeff Garzik * @qc: Command to complete 1179c6fd2807SJeff Garzik * 1180c6fd2807SJeff Garzik * Indicate to the mid and upper layers that an ATA command has 1181c6fd2807SJeff Garzik * completed. To be used from EH. 1182c6fd2807SJeff Garzik */ 1183c6fd2807SJeff Garzik void ata_eh_qc_complete(struct ata_queued_cmd *qc) 1184c6fd2807SJeff Garzik { 1185c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 1186c6fd2807SJeff Garzik scmd->retries = scmd->allowed; 1187c6fd2807SJeff Garzik __ata_eh_qc_complete(qc); 1188c6fd2807SJeff Garzik } 1189c6fd2807SJeff Garzik 1190c6fd2807SJeff Garzik /** 1191c6fd2807SJeff Garzik * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH 1192c6fd2807SJeff Garzik * @qc: Command to retry 1193c6fd2807SJeff Garzik * 1194c6fd2807SJeff Garzik * Indicate to the mid and upper layers that an ATA command 1195c6fd2807SJeff Garzik * should be retried. To be used from EH. 1196c6fd2807SJeff Garzik * 1197c6fd2807SJeff Garzik * SCSI midlayer limits the number of retries to scmd->allowed. 1198c6fd2807SJeff Garzik * scmd->retries is decremented for commands which get retried 1199c6fd2807SJeff Garzik * due to unrelated failures (qc->err_mask is zero). 1200c6fd2807SJeff Garzik */ 1201c6fd2807SJeff Garzik void ata_eh_qc_retry(struct ata_queued_cmd *qc) 1202c6fd2807SJeff Garzik { 1203c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 1204c6fd2807SJeff Garzik if (!qc->err_mask && scmd->retries) 1205c6fd2807SJeff Garzik scmd->retries--; 1206c6fd2807SJeff Garzik __ata_eh_qc_complete(qc); 1207c6fd2807SJeff Garzik } 1208c6fd2807SJeff Garzik 1209c6fd2807SJeff Garzik /** 1210678afac6STejun Heo * ata_dev_disable - disable ATA device 1211678afac6STejun Heo * @dev: ATA device to disable 1212678afac6STejun Heo * 1213678afac6STejun Heo * Disable @dev. 1214678afac6STejun Heo * 1215678afac6STejun Heo * Locking: 1216678afac6STejun Heo * EH context. 1217678afac6STejun Heo */ 1218678afac6STejun Heo void ata_dev_disable(struct ata_device *dev) 1219678afac6STejun Heo { 1220678afac6STejun Heo if (!ata_dev_enabled(dev)) 1221678afac6STejun Heo return; 1222678afac6STejun Heo 1223678afac6STejun Heo if (ata_msg_drv(dev->link->ap)) 1224678afac6STejun Heo ata_dev_printk(dev, KERN_WARNING, "disabled\n"); 1225678afac6STejun Heo ata_acpi_on_disable(dev); 1226678afac6STejun Heo ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET); 1227678afac6STejun Heo dev->class++; 122899cf610aSTejun Heo 122999cf610aSTejun Heo /* From now till the next successful probe, ering is used to 123099cf610aSTejun Heo * track probe failures. Clear accumulated device error info. 123199cf610aSTejun Heo */ 123299cf610aSTejun Heo ata_ering_clear(&dev->ering); 1233678afac6STejun Heo } 1234678afac6STejun Heo 1235678afac6STejun Heo /** 1236c6fd2807SJeff Garzik * ata_eh_detach_dev - detach ATA device 1237c6fd2807SJeff Garzik * @dev: ATA device to detach 1238c6fd2807SJeff Garzik * 1239c6fd2807SJeff Garzik * Detach @dev. 1240c6fd2807SJeff Garzik * 1241c6fd2807SJeff Garzik * LOCKING: 1242c6fd2807SJeff Garzik * None. 1243c6fd2807SJeff Garzik */ 1244fb7fd614STejun Heo void ata_eh_detach_dev(struct ata_device *dev) 1245c6fd2807SJeff Garzik { 1246f58229f8STejun Heo struct ata_link *link = dev->link; 1247f58229f8STejun Heo struct ata_port *ap = link->ap; 124890484ebfSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 1249c6fd2807SJeff Garzik unsigned long flags; 1250c6fd2807SJeff Garzik 1251c6fd2807SJeff Garzik ata_dev_disable(dev); 1252c6fd2807SJeff Garzik 1253c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1254c6fd2807SJeff Garzik 1255c6fd2807SJeff Garzik dev->flags &= ~ATA_DFLAG_DETACH; 1256c6fd2807SJeff Garzik 1257c6fd2807SJeff Garzik if (ata_scsi_offline_dev(dev)) { 1258c6fd2807SJeff Garzik dev->flags |= ATA_DFLAG_DETACHED; 1259c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 1260c6fd2807SJeff Garzik } 1261c6fd2807SJeff Garzik 126290484ebfSTejun Heo /* clear per-dev EH info */ 1263f58229f8STejun Heo ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK); 1264f58229f8STejun Heo ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK); 126590484ebfSTejun Heo ehc->saved_xfer_mode[dev->devno] = 0; 126690484ebfSTejun Heo ehc->saved_ncq_enabled &= ~(1 << dev->devno); 1267c6fd2807SJeff Garzik 1268c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1269c6fd2807SJeff Garzik } 1270c6fd2807SJeff Garzik 1271c6fd2807SJeff Garzik /** 1272c6fd2807SJeff Garzik * ata_eh_about_to_do - about to perform eh_action 1273955e57dfSTejun Heo * @link: target ATA link 1274c6fd2807SJeff Garzik * @dev: target ATA dev for per-dev action (can be NULL) 1275c6fd2807SJeff Garzik * @action: action about to be performed 1276c6fd2807SJeff Garzik * 1277c6fd2807SJeff Garzik * Called just before performing EH actions to clear related bits 1278955e57dfSTejun Heo * in @link->eh_info such that eh actions are not unnecessarily 1279955e57dfSTejun Heo * repeated. 1280c6fd2807SJeff Garzik * 1281c6fd2807SJeff Garzik * LOCKING: 1282c6fd2807SJeff Garzik * None. 1283c6fd2807SJeff Garzik */ 1284fb7fd614STejun Heo void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev, 1285c6fd2807SJeff Garzik unsigned int action) 1286c6fd2807SJeff Garzik { 1287955e57dfSTejun Heo struct ata_port *ap = link->ap; 1288955e57dfSTejun Heo struct ata_eh_info *ehi = &link->eh_info; 1289955e57dfSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 1290c6fd2807SJeff Garzik unsigned long flags; 1291c6fd2807SJeff Garzik 1292c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1293c6fd2807SJeff Garzik 1294955e57dfSTejun Heo ata_eh_clear_action(link, dev, ehi, action); 1295c6fd2807SJeff Garzik 1296a568d1d2STejun Heo /* About to take EH action, set RECOVERED. Ignore actions on 1297a568d1d2STejun Heo * slave links as master will do them again. 1298a568d1d2STejun Heo */ 1299a568d1d2STejun Heo if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link) 1300c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_RECOVERED; 1301c6fd2807SJeff Garzik 1302c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1303c6fd2807SJeff Garzik } 1304c6fd2807SJeff Garzik 1305c6fd2807SJeff Garzik /** 1306c6fd2807SJeff Garzik * ata_eh_done - EH action complete 1307c6fd2807SJeff Garzik * @ap: target ATA port 1308c6fd2807SJeff Garzik * @dev: target ATA dev for per-dev action (can be NULL) 1309c6fd2807SJeff Garzik * @action: action just completed 1310c6fd2807SJeff Garzik * 1311c6fd2807SJeff Garzik * Called right after performing EH actions to clear related bits 1312955e57dfSTejun Heo * in @link->eh_context. 1313c6fd2807SJeff Garzik * 1314c6fd2807SJeff Garzik * LOCKING: 1315c6fd2807SJeff Garzik * None. 1316c6fd2807SJeff Garzik */ 1317fb7fd614STejun Heo void ata_eh_done(struct ata_link *link, struct ata_device *dev, 1318c6fd2807SJeff Garzik unsigned int action) 1319c6fd2807SJeff Garzik { 1320955e57dfSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 13219af5c9c9STejun Heo 1322955e57dfSTejun Heo ata_eh_clear_action(link, dev, &ehc->i, action); 1323c6fd2807SJeff Garzik } 1324c6fd2807SJeff Garzik 1325c6fd2807SJeff Garzik /** 1326c6fd2807SJeff Garzik * ata_err_string - convert err_mask to descriptive string 1327c6fd2807SJeff Garzik * @err_mask: error mask to convert to string 1328c6fd2807SJeff Garzik * 1329c6fd2807SJeff Garzik * Convert @err_mask to descriptive string. Errors are 1330c6fd2807SJeff Garzik * prioritized according to severity and only the most severe 1331c6fd2807SJeff Garzik * error is reported. 1332c6fd2807SJeff Garzik * 1333c6fd2807SJeff Garzik * LOCKING: 1334c6fd2807SJeff Garzik * None. 1335c6fd2807SJeff Garzik * 1336c6fd2807SJeff Garzik * RETURNS: 1337c6fd2807SJeff Garzik * Descriptive string for @err_mask 1338c6fd2807SJeff Garzik */ 1339c6fd2807SJeff Garzik static const char *ata_err_string(unsigned int err_mask) 1340c6fd2807SJeff Garzik { 1341c6fd2807SJeff Garzik if (err_mask & AC_ERR_HOST_BUS) 1342c6fd2807SJeff Garzik return "host bus error"; 1343c6fd2807SJeff Garzik if (err_mask & AC_ERR_ATA_BUS) 1344c6fd2807SJeff Garzik return "ATA bus error"; 1345c6fd2807SJeff Garzik if (err_mask & AC_ERR_TIMEOUT) 1346c6fd2807SJeff Garzik return "timeout"; 1347c6fd2807SJeff Garzik if (err_mask & AC_ERR_HSM) 1348c6fd2807SJeff Garzik return "HSM violation"; 1349c6fd2807SJeff Garzik if (err_mask & AC_ERR_SYSTEM) 1350c6fd2807SJeff Garzik return "internal error"; 1351c6fd2807SJeff Garzik if (err_mask & AC_ERR_MEDIA) 1352c6fd2807SJeff Garzik return "media error"; 1353c6fd2807SJeff Garzik if (err_mask & AC_ERR_INVALID) 1354c6fd2807SJeff Garzik return "invalid argument"; 1355c6fd2807SJeff Garzik if (err_mask & AC_ERR_DEV) 1356c6fd2807SJeff Garzik return "device error"; 1357c6fd2807SJeff Garzik return "unknown error"; 1358c6fd2807SJeff Garzik } 1359c6fd2807SJeff Garzik 1360c6fd2807SJeff Garzik /** 1361c6fd2807SJeff Garzik * ata_read_log_page - read a specific log page 1362c6fd2807SJeff Garzik * @dev: target device 1363c6fd2807SJeff Garzik * @page: page to read 1364c6fd2807SJeff Garzik * @buf: buffer to store read page 1365c6fd2807SJeff Garzik * @sectors: number of sectors to read 1366c6fd2807SJeff Garzik * 1367c6fd2807SJeff Garzik * Read log page using READ_LOG_EXT command. 1368c6fd2807SJeff Garzik * 1369c6fd2807SJeff Garzik * LOCKING: 1370c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1371c6fd2807SJeff Garzik * 1372c6fd2807SJeff Garzik * RETURNS: 1373c6fd2807SJeff Garzik * 0 on success, AC_ERR_* mask otherwise. 1374c6fd2807SJeff Garzik */ 1375c6fd2807SJeff Garzik static unsigned int ata_read_log_page(struct ata_device *dev, 1376c6fd2807SJeff Garzik u8 page, void *buf, unsigned int sectors) 1377c6fd2807SJeff Garzik { 1378c6fd2807SJeff Garzik struct ata_taskfile tf; 1379c6fd2807SJeff Garzik unsigned int err_mask; 1380c6fd2807SJeff Garzik 1381c6fd2807SJeff Garzik DPRINTK("read log page - page %d\n", page); 1382c6fd2807SJeff Garzik 1383c6fd2807SJeff Garzik ata_tf_init(dev, &tf); 1384c6fd2807SJeff Garzik tf.command = ATA_CMD_READ_LOG_EXT; 1385c6fd2807SJeff Garzik tf.lbal = page; 1386c6fd2807SJeff Garzik tf.nsect = sectors; 1387c6fd2807SJeff Garzik tf.hob_nsect = sectors >> 8; 1388c6fd2807SJeff Garzik tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE; 1389c6fd2807SJeff Garzik tf.protocol = ATA_PROT_PIO; 1390c6fd2807SJeff Garzik 1391c6fd2807SJeff Garzik err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, 13922b789108STejun Heo buf, sectors * ATA_SECT_SIZE, 0); 1393c6fd2807SJeff Garzik 1394c6fd2807SJeff Garzik DPRINTK("EXIT, err_mask=%x\n", err_mask); 1395c6fd2807SJeff Garzik return err_mask; 1396c6fd2807SJeff Garzik } 1397c6fd2807SJeff Garzik 1398c6fd2807SJeff Garzik /** 1399c6fd2807SJeff Garzik * ata_eh_read_log_10h - Read log page 10h for NCQ error details 1400c6fd2807SJeff Garzik * @dev: Device to read log page 10h from 1401c6fd2807SJeff Garzik * @tag: Resulting tag of the failed command 1402c6fd2807SJeff Garzik * @tf: Resulting taskfile registers of the failed command 1403c6fd2807SJeff Garzik * 1404c6fd2807SJeff Garzik * Read log page 10h to obtain NCQ error details and clear error 1405c6fd2807SJeff Garzik * condition. 1406c6fd2807SJeff Garzik * 1407c6fd2807SJeff Garzik * LOCKING: 1408c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1409c6fd2807SJeff Garzik * 1410c6fd2807SJeff Garzik * RETURNS: 1411c6fd2807SJeff Garzik * 0 on success, -errno otherwise. 1412c6fd2807SJeff Garzik */ 1413c6fd2807SJeff Garzik static int ata_eh_read_log_10h(struct ata_device *dev, 1414c6fd2807SJeff Garzik int *tag, struct ata_taskfile *tf) 1415c6fd2807SJeff Garzik { 14169af5c9c9STejun Heo u8 *buf = dev->link->ap->sector_buf; 1417c6fd2807SJeff Garzik unsigned int err_mask; 1418c6fd2807SJeff Garzik u8 csum; 1419c6fd2807SJeff Garzik int i; 1420c6fd2807SJeff Garzik 1421c6fd2807SJeff Garzik err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1); 1422c6fd2807SJeff Garzik if (err_mask) 1423c6fd2807SJeff Garzik return -EIO; 1424c6fd2807SJeff Garzik 1425c6fd2807SJeff Garzik csum = 0; 1426c6fd2807SJeff Garzik for (i = 0; i < ATA_SECT_SIZE; i++) 1427c6fd2807SJeff Garzik csum += buf[i]; 1428c6fd2807SJeff Garzik if (csum) 1429c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_WARNING, 1430c6fd2807SJeff Garzik "invalid checksum 0x%x on log page 10h\n", csum); 1431c6fd2807SJeff Garzik 1432c6fd2807SJeff Garzik if (buf[0] & 0x80) 1433c6fd2807SJeff Garzik return -ENOENT; 1434c6fd2807SJeff Garzik 1435c6fd2807SJeff Garzik *tag = buf[0] & 0x1f; 1436c6fd2807SJeff Garzik 1437c6fd2807SJeff Garzik tf->command = buf[2]; 1438c6fd2807SJeff Garzik tf->feature = buf[3]; 1439c6fd2807SJeff Garzik tf->lbal = buf[4]; 1440c6fd2807SJeff Garzik tf->lbam = buf[5]; 1441c6fd2807SJeff Garzik tf->lbah = buf[6]; 1442c6fd2807SJeff Garzik tf->device = buf[7]; 1443c6fd2807SJeff Garzik tf->hob_lbal = buf[8]; 1444c6fd2807SJeff Garzik tf->hob_lbam = buf[9]; 1445c6fd2807SJeff Garzik tf->hob_lbah = buf[10]; 1446c6fd2807SJeff Garzik tf->nsect = buf[12]; 1447c6fd2807SJeff Garzik tf->hob_nsect = buf[13]; 1448c6fd2807SJeff Garzik 1449c6fd2807SJeff Garzik return 0; 1450c6fd2807SJeff Garzik } 1451c6fd2807SJeff Garzik 1452c6fd2807SJeff Garzik /** 145311fc33daSTejun Heo * atapi_eh_tur - perform ATAPI TEST_UNIT_READY 145411fc33daSTejun Heo * @dev: target ATAPI device 145511fc33daSTejun Heo * @r_sense_key: out parameter for sense_key 145611fc33daSTejun Heo * 145711fc33daSTejun Heo * Perform ATAPI TEST_UNIT_READY. 145811fc33daSTejun Heo * 145911fc33daSTejun Heo * LOCKING: 146011fc33daSTejun Heo * EH context (may sleep). 146111fc33daSTejun Heo * 146211fc33daSTejun Heo * RETURNS: 146311fc33daSTejun Heo * 0 on success, AC_ERR_* mask on failure. 146411fc33daSTejun Heo */ 146511fc33daSTejun Heo static unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key) 146611fc33daSTejun Heo { 146711fc33daSTejun Heo u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 }; 146811fc33daSTejun Heo struct ata_taskfile tf; 146911fc33daSTejun Heo unsigned int err_mask; 147011fc33daSTejun Heo 147111fc33daSTejun Heo ata_tf_init(dev, &tf); 147211fc33daSTejun Heo 147311fc33daSTejun Heo tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 147411fc33daSTejun Heo tf.command = ATA_CMD_PACKET; 147511fc33daSTejun Heo tf.protocol = ATAPI_PROT_NODATA; 147611fc33daSTejun Heo 147711fc33daSTejun Heo err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0); 147811fc33daSTejun Heo if (err_mask == AC_ERR_DEV) 147911fc33daSTejun Heo *r_sense_key = tf.feature >> 4; 148011fc33daSTejun Heo return err_mask; 148111fc33daSTejun Heo } 148211fc33daSTejun Heo 148311fc33daSTejun Heo /** 1484c6fd2807SJeff Garzik * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE 1485c6fd2807SJeff Garzik * @dev: device to perform REQUEST_SENSE to 1486c6fd2807SJeff Garzik * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long) 14873eabddb8STejun Heo * @dfl_sense_key: default sense key to use 1488c6fd2807SJeff Garzik * 1489c6fd2807SJeff Garzik * Perform ATAPI REQUEST_SENSE after the device reported CHECK 1490c6fd2807SJeff Garzik * SENSE. This function is EH helper. 1491c6fd2807SJeff Garzik * 1492c6fd2807SJeff Garzik * LOCKING: 1493c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1494c6fd2807SJeff Garzik * 1495c6fd2807SJeff Garzik * RETURNS: 1496c6fd2807SJeff Garzik * 0 on success, AC_ERR_* mask on failure 1497c6fd2807SJeff Garzik */ 14983eabddb8STejun Heo static unsigned int atapi_eh_request_sense(struct ata_device *dev, 14993eabddb8STejun Heo u8 *sense_buf, u8 dfl_sense_key) 1500c6fd2807SJeff Garzik { 15013eabddb8STejun Heo u8 cdb[ATAPI_CDB_LEN] = 15023eabddb8STejun Heo { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 }; 15039af5c9c9STejun Heo struct ata_port *ap = dev->link->ap; 1504c6fd2807SJeff Garzik struct ata_taskfile tf; 1505c6fd2807SJeff Garzik 1506c6fd2807SJeff Garzik DPRINTK("ATAPI request sense\n"); 1507c6fd2807SJeff Garzik 1508c6fd2807SJeff Garzik /* FIXME: is this needed? */ 1509c6fd2807SJeff Garzik memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); 1510c6fd2807SJeff Garzik 151156287768SAlbert Lee /* initialize sense_buf with the error register, 151256287768SAlbert Lee * for the case where they are -not- overwritten 151356287768SAlbert Lee */ 1514c6fd2807SJeff Garzik sense_buf[0] = 0x70; 15153eabddb8STejun Heo sense_buf[2] = dfl_sense_key; 151656287768SAlbert Lee 151756287768SAlbert Lee /* some devices time out if garbage left in tf */ 151856287768SAlbert Lee ata_tf_init(dev, &tf); 1519c6fd2807SJeff Garzik 1520c6fd2807SJeff Garzik tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1521c6fd2807SJeff Garzik tf.command = ATA_CMD_PACKET; 1522c6fd2807SJeff Garzik 1523c6fd2807SJeff Garzik /* is it pointless to prefer PIO for "safety reasons"? */ 1524c6fd2807SJeff Garzik if (ap->flags & ATA_FLAG_PIO_DMA) { 15250dc36888STejun Heo tf.protocol = ATAPI_PROT_DMA; 1526c6fd2807SJeff Garzik tf.feature |= ATAPI_PKT_DMA; 1527c6fd2807SJeff Garzik } else { 15280dc36888STejun Heo tf.protocol = ATAPI_PROT_PIO; 1529f2dfc1a1STejun Heo tf.lbam = SCSI_SENSE_BUFFERSIZE; 1530f2dfc1a1STejun Heo tf.lbah = 0; 1531c6fd2807SJeff Garzik } 1532c6fd2807SJeff Garzik 1533c6fd2807SJeff Garzik return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE, 15342b789108STejun Heo sense_buf, SCSI_SENSE_BUFFERSIZE, 0); 1535c6fd2807SJeff Garzik } 1536c6fd2807SJeff Garzik 1537c6fd2807SJeff Garzik /** 1538c6fd2807SJeff Garzik * ata_eh_analyze_serror - analyze SError for a failed port 15390260731fSTejun Heo * @link: ATA link to analyze SError for 1540c6fd2807SJeff Garzik * 1541c6fd2807SJeff Garzik * Analyze SError if available and further determine cause of 1542c6fd2807SJeff Garzik * failure. 1543c6fd2807SJeff Garzik * 1544c6fd2807SJeff Garzik * LOCKING: 1545c6fd2807SJeff Garzik * None. 1546c6fd2807SJeff Garzik */ 15470260731fSTejun Heo static void ata_eh_analyze_serror(struct ata_link *link) 1548c6fd2807SJeff Garzik { 15490260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 1550c6fd2807SJeff Garzik u32 serror = ehc->i.serror; 1551c6fd2807SJeff Garzik unsigned int err_mask = 0, action = 0; 1552f9df58cbSTejun Heo u32 hotplug_mask; 1553c6fd2807SJeff Garzik 1554e0614db2STejun Heo if (serror & (SERR_PERSISTENT | SERR_DATA)) { 1555c6fd2807SJeff Garzik err_mask |= AC_ERR_ATA_BUS; 1556cf480626STejun Heo action |= ATA_EH_RESET; 1557c6fd2807SJeff Garzik } 1558c6fd2807SJeff Garzik if (serror & SERR_PROTOCOL) { 1559c6fd2807SJeff Garzik err_mask |= AC_ERR_HSM; 1560cf480626STejun Heo action |= ATA_EH_RESET; 1561c6fd2807SJeff Garzik } 1562c6fd2807SJeff Garzik if (serror & SERR_INTERNAL) { 1563c6fd2807SJeff Garzik err_mask |= AC_ERR_SYSTEM; 1564cf480626STejun Heo action |= ATA_EH_RESET; 1565c6fd2807SJeff Garzik } 1566f9df58cbSTejun Heo 1567f9df58cbSTejun Heo /* Determine whether a hotplug event has occurred. Both 1568f9df58cbSTejun Heo * SError.N/X are considered hotplug events for enabled or 1569f9df58cbSTejun Heo * host links. For disabled PMP links, only N bit is 1570f9df58cbSTejun Heo * considered as X bit is left at 1 for link plugging. 1571f9df58cbSTejun Heo */ 1572f9df58cbSTejun Heo hotplug_mask = 0; 1573f9df58cbSTejun Heo 1574f9df58cbSTejun Heo if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link)) 1575f9df58cbSTejun Heo hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG; 1576f9df58cbSTejun Heo else 1577f9df58cbSTejun Heo hotplug_mask = SERR_PHYRDY_CHG; 1578f9df58cbSTejun Heo 1579f9df58cbSTejun Heo if (serror & hotplug_mask) 1580c6fd2807SJeff Garzik ata_ehi_hotplugged(&ehc->i); 1581c6fd2807SJeff Garzik 1582c6fd2807SJeff Garzik ehc->i.err_mask |= err_mask; 1583c6fd2807SJeff Garzik ehc->i.action |= action; 1584c6fd2807SJeff Garzik } 1585c6fd2807SJeff Garzik 1586c6fd2807SJeff Garzik /** 1587c6fd2807SJeff Garzik * ata_eh_analyze_ncq_error - analyze NCQ error 15880260731fSTejun Heo * @link: ATA link to analyze NCQ error for 1589c6fd2807SJeff Garzik * 1590c6fd2807SJeff Garzik * Read log page 10h, determine the offending qc and acquire 1591c6fd2807SJeff Garzik * error status TF. For NCQ device errors, all LLDDs have to do 1592c6fd2807SJeff Garzik * is setting AC_ERR_DEV in ehi->err_mask. This function takes 1593c6fd2807SJeff Garzik * care of the rest. 1594c6fd2807SJeff Garzik * 1595c6fd2807SJeff Garzik * LOCKING: 1596c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1597c6fd2807SJeff Garzik */ 159810acf3b0SMark Lord void ata_eh_analyze_ncq_error(struct ata_link *link) 1599c6fd2807SJeff Garzik { 16000260731fSTejun Heo struct ata_port *ap = link->ap; 16010260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 16020260731fSTejun Heo struct ata_device *dev = link->device; 1603c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 1604c6fd2807SJeff Garzik struct ata_taskfile tf; 1605c6fd2807SJeff Garzik int tag, rc; 1606c6fd2807SJeff Garzik 1607c6fd2807SJeff Garzik /* if frozen, we can't do much */ 1608c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN) 1609c6fd2807SJeff Garzik return; 1610c6fd2807SJeff Garzik 1611c6fd2807SJeff Garzik /* is it NCQ device error? */ 16120260731fSTejun Heo if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV)) 1613c6fd2807SJeff Garzik return; 1614c6fd2807SJeff Garzik 1615c6fd2807SJeff Garzik /* has LLDD analyzed already? */ 1616c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1617c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, tag); 1618c6fd2807SJeff Garzik 1619c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) 1620c6fd2807SJeff Garzik continue; 1621c6fd2807SJeff Garzik 1622c6fd2807SJeff Garzik if (qc->err_mask) 1623c6fd2807SJeff Garzik return; 1624c6fd2807SJeff Garzik } 1625c6fd2807SJeff Garzik 1626c6fd2807SJeff Garzik /* okay, this error is ours */ 1627c6fd2807SJeff Garzik rc = ata_eh_read_log_10h(dev, &tag, &tf); 1628c6fd2807SJeff Garzik if (rc) { 16290260731fSTejun Heo ata_link_printk(link, KERN_ERR, "failed to read log page 10h " 1630c6fd2807SJeff Garzik "(errno=%d)\n", rc); 1631c6fd2807SJeff Garzik return; 1632c6fd2807SJeff Garzik } 1633c6fd2807SJeff Garzik 16340260731fSTejun Heo if (!(link->sactive & (1 << tag))) { 16350260731fSTejun Heo ata_link_printk(link, KERN_ERR, "log page 10h reported " 1636c6fd2807SJeff Garzik "inactive tag %d\n", tag); 1637c6fd2807SJeff Garzik return; 1638c6fd2807SJeff Garzik } 1639c6fd2807SJeff Garzik 1640c6fd2807SJeff Garzik /* we've got the perpetrator, condemn it */ 1641c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, tag); 1642c6fd2807SJeff Garzik memcpy(&qc->result_tf, &tf, sizeof(tf)); 1643a6116c9eSMark Lord qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 16445335b729STejun Heo qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ; 1645c6fd2807SJeff Garzik ehc->i.err_mask &= ~AC_ERR_DEV; 1646c6fd2807SJeff Garzik } 1647c6fd2807SJeff Garzik 1648c6fd2807SJeff Garzik /** 1649c6fd2807SJeff Garzik * ata_eh_analyze_tf - analyze taskfile of a failed qc 1650c6fd2807SJeff Garzik * @qc: qc to analyze 1651c6fd2807SJeff Garzik * @tf: Taskfile registers to analyze 1652c6fd2807SJeff Garzik * 1653c6fd2807SJeff Garzik * Analyze taskfile of @qc and further determine cause of 1654c6fd2807SJeff Garzik * failure. This function also requests ATAPI sense data if 1655c6fd2807SJeff Garzik * avaliable. 1656c6fd2807SJeff Garzik * 1657c6fd2807SJeff Garzik * LOCKING: 1658c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1659c6fd2807SJeff Garzik * 1660c6fd2807SJeff Garzik * RETURNS: 1661c6fd2807SJeff Garzik * Determined recovery action 1662c6fd2807SJeff Garzik */ 1663c6fd2807SJeff Garzik static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, 1664c6fd2807SJeff Garzik const struct ata_taskfile *tf) 1665c6fd2807SJeff Garzik { 1666c6fd2807SJeff Garzik unsigned int tmp, action = 0; 1667c6fd2807SJeff Garzik u8 stat = tf->command, err = tf->feature; 1668c6fd2807SJeff Garzik 1669c6fd2807SJeff Garzik if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) { 1670c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_HSM; 1671cf480626STejun Heo return ATA_EH_RESET; 1672c6fd2807SJeff Garzik } 1673c6fd2807SJeff Garzik 1674a51d644aSTejun Heo if (stat & (ATA_ERR | ATA_DF)) 1675a51d644aSTejun Heo qc->err_mask |= AC_ERR_DEV; 1676a51d644aSTejun Heo else 1677c6fd2807SJeff Garzik return 0; 1678c6fd2807SJeff Garzik 1679c6fd2807SJeff Garzik switch (qc->dev->class) { 1680c6fd2807SJeff Garzik case ATA_DEV_ATA: 1681c6fd2807SJeff Garzik if (err & ATA_ICRC) 1682c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_ATA_BUS; 1683c6fd2807SJeff Garzik if (err & ATA_UNC) 1684c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_MEDIA; 1685c6fd2807SJeff Garzik if (err & ATA_IDNF) 1686c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_INVALID; 1687c6fd2807SJeff Garzik break; 1688c6fd2807SJeff Garzik 1689c6fd2807SJeff Garzik case ATA_DEV_ATAPI: 1690a569a30dSTejun Heo if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) { 16913eabddb8STejun Heo tmp = atapi_eh_request_sense(qc->dev, 16923eabddb8STejun Heo qc->scsicmd->sense_buffer, 16933eabddb8STejun Heo qc->result_tf.feature >> 4); 1694c6fd2807SJeff Garzik if (!tmp) { 1695a569a30dSTejun Heo /* ATA_QCFLAG_SENSE_VALID is used to 1696a569a30dSTejun Heo * tell atapi_qc_complete() that sense 1697a569a30dSTejun Heo * data is already valid. 1698c6fd2807SJeff Garzik * 1699c6fd2807SJeff Garzik * TODO: interpret sense data and set 1700c6fd2807SJeff Garzik * appropriate err_mask. 1701c6fd2807SJeff Garzik */ 1702c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_SENSE_VALID; 1703c6fd2807SJeff Garzik } else 1704c6fd2807SJeff Garzik qc->err_mask |= tmp; 1705c6fd2807SJeff Garzik } 1706a569a30dSTejun Heo } 1707c6fd2807SJeff Garzik 1708c6fd2807SJeff Garzik if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS)) 1709cf480626STejun Heo action |= ATA_EH_RESET; 1710c6fd2807SJeff Garzik 1711c6fd2807SJeff Garzik return action; 1712c6fd2807SJeff Garzik } 1713c6fd2807SJeff Garzik 171476326ac1STejun Heo static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask, 171576326ac1STejun Heo int *xfer_ok) 1716c6fd2807SJeff Garzik { 171776326ac1STejun Heo int base = 0; 171876326ac1STejun Heo 171976326ac1STejun Heo if (!(eflags & ATA_EFLAG_DUBIOUS_XFER)) 172076326ac1STejun Heo *xfer_ok = 1; 172176326ac1STejun Heo 172276326ac1STejun Heo if (!*xfer_ok) 172375f9cafcSTejun Heo base = ATA_ECAT_DUBIOUS_NONE; 172476326ac1STejun Heo 17257d47e8d4STejun Heo if (err_mask & AC_ERR_ATA_BUS) 172676326ac1STejun Heo return base + ATA_ECAT_ATA_BUS; 1727c6fd2807SJeff Garzik 17287d47e8d4STejun Heo if (err_mask & AC_ERR_TIMEOUT) 172976326ac1STejun Heo return base + ATA_ECAT_TOUT_HSM; 17307d47e8d4STejun Heo 17313884f7b0STejun Heo if (eflags & ATA_EFLAG_IS_IO) { 17327d47e8d4STejun Heo if (err_mask & AC_ERR_HSM) 173376326ac1STejun Heo return base + ATA_ECAT_TOUT_HSM; 17347d47e8d4STejun Heo if ((err_mask & 17357d47e8d4STejun Heo (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV) 173676326ac1STejun Heo return base + ATA_ECAT_UNK_DEV; 1737c6fd2807SJeff Garzik } 1738c6fd2807SJeff Garzik 1739c6fd2807SJeff Garzik return 0; 1740c6fd2807SJeff Garzik } 1741c6fd2807SJeff Garzik 17427d47e8d4STejun Heo struct speed_down_verdict_arg { 1743c6fd2807SJeff Garzik u64 since; 174476326ac1STejun Heo int xfer_ok; 17453884f7b0STejun Heo int nr_errors[ATA_ECAT_NR]; 1746c6fd2807SJeff Garzik }; 1747c6fd2807SJeff Garzik 17487d47e8d4STejun Heo static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg) 1749c6fd2807SJeff Garzik { 17507d47e8d4STejun Heo struct speed_down_verdict_arg *arg = void_arg; 175176326ac1STejun Heo int cat; 1752c6fd2807SJeff Garzik 1753c6fd2807SJeff Garzik if (ent->timestamp < arg->since) 1754c6fd2807SJeff Garzik return -1; 1755c6fd2807SJeff Garzik 175676326ac1STejun Heo cat = ata_eh_categorize_error(ent->eflags, ent->err_mask, 175776326ac1STejun Heo &arg->xfer_ok); 17587d47e8d4STejun Heo arg->nr_errors[cat]++; 175976326ac1STejun Heo 1760c6fd2807SJeff Garzik return 0; 1761c6fd2807SJeff Garzik } 1762c6fd2807SJeff Garzik 1763c6fd2807SJeff Garzik /** 17647d47e8d4STejun Heo * ata_eh_speed_down_verdict - Determine speed down verdict 1765c6fd2807SJeff Garzik * @dev: Device of interest 1766c6fd2807SJeff Garzik * 1767c6fd2807SJeff Garzik * This function examines error ring of @dev and determines 17687d47e8d4STejun Heo * whether NCQ needs to be turned off, transfer speed should be 17697d47e8d4STejun Heo * stepped down, or falling back to PIO is necessary. 1770c6fd2807SJeff Garzik * 17713884f7b0STejun Heo * ECAT_ATA_BUS : ATA_BUS error for any command 1772c6fd2807SJeff Garzik * 17733884f7b0STejun Heo * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for 17743884f7b0STejun Heo * IO commands 17757d47e8d4STejun Heo * 17763884f7b0STejun Heo * ECAT_UNK_DEV : Unknown DEV error for IO commands 1777c6fd2807SJeff Garzik * 177876326ac1STejun Heo * ECAT_DUBIOUS_* : Identical to above three but occurred while 177976326ac1STejun Heo * data transfer hasn't been verified. 178076326ac1STejun Heo * 17813884f7b0STejun Heo * Verdicts are 17827d47e8d4STejun Heo * 17833884f7b0STejun Heo * NCQ_OFF : Turn off NCQ. 17847d47e8d4STejun Heo * 17853884f7b0STejun Heo * SPEED_DOWN : Speed down transfer speed but don't fall back 17863884f7b0STejun Heo * to PIO. 17873884f7b0STejun Heo * 17883884f7b0STejun Heo * FALLBACK_TO_PIO : Fall back to PIO. 17893884f7b0STejun Heo * 17903884f7b0STejun Heo * Even if multiple verdicts are returned, only one action is 179176326ac1STejun Heo * taken per error. An action triggered by non-DUBIOUS errors 179276326ac1STejun Heo * clears ering, while one triggered by DUBIOUS_* errors doesn't. 179376326ac1STejun Heo * This is to expedite speed down decisions right after device is 179476326ac1STejun Heo * initially configured. 17953884f7b0STejun Heo * 179676326ac1STejun Heo * The followings are speed down rules. #1 and #2 deal with 179776326ac1STejun Heo * DUBIOUS errors. 179876326ac1STejun Heo * 179976326ac1STejun Heo * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors 180076326ac1STejun Heo * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO. 180176326ac1STejun Heo * 180276326ac1STejun Heo * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors 180376326ac1STejun Heo * occurred during last 5 mins, NCQ_OFF. 180476326ac1STejun Heo * 180576326ac1STejun Heo * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors 18063884f7b0STejun Heo * ocurred during last 5 mins, FALLBACK_TO_PIO 18073884f7b0STejun Heo * 180876326ac1STejun Heo * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred 18093884f7b0STejun Heo * during last 10 mins, NCQ_OFF. 18103884f7b0STejun Heo * 181176326ac1STejun Heo * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6 18123884f7b0STejun Heo * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN. 18137d47e8d4STejun Heo * 1814c6fd2807SJeff Garzik * LOCKING: 1815c6fd2807SJeff Garzik * Inherited from caller. 1816c6fd2807SJeff Garzik * 1817c6fd2807SJeff Garzik * RETURNS: 18187d47e8d4STejun Heo * OR of ATA_EH_SPDN_* flags. 1819c6fd2807SJeff Garzik */ 18207d47e8d4STejun Heo static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev) 1821c6fd2807SJeff Garzik { 18227d47e8d4STejun Heo const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ; 18237d47e8d4STejun Heo u64 j64 = get_jiffies_64(); 18247d47e8d4STejun Heo struct speed_down_verdict_arg arg; 18257d47e8d4STejun Heo unsigned int verdict = 0; 1826c6fd2807SJeff Garzik 18273884f7b0STejun Heo /* scan past 5 mins of error history */ 18283884f7b0STejun Heo memset(&arg, 0, sizeof(arg)); 18293884f7b0STejun Heo arg.since = j64 - min(j64, j5mins); 18303884f7b0STejun Heo ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); 18313884f7b0STejun Heo 183276326ac1STejun Heo if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] + 183376326ac1STejun Heo arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1) 183476326ac1STejun Heo verdict |= ATA_EH_SPDN_SPEED_DOWN | 183576326ac1STejun Heo ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS; 183676326ac1STejun Heo 183776326ac1STejun Heo if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] + 183876326ac1STejun Heo arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1) 183976326ac1STejun Heo verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS; 184076326ac1STejun Heo 18413884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_ATA_BUS] + 18423884f7b0STejun Heo arg.nr_errors[ATA_ECAT_TOUT_HSM] + 1843663f99b8STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) 18443884f7b0STejun Heo verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO; 18453884f7b0STejun Heo 18467d47e8d4STejun Heo /* scan past 10 mins of error history */ 1847c6fd2807SJeff Garzik memset(&arg, 0, sizeof(arg)); 18487d47e8d4STejun Heo arg.since = j64 - min(j64, j10mins); 18497d47e8d4STejun Heo ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); 1850c6fd2807SJeff Garzik 18513884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_TOUT_HSM] + 18523884f7b0STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 3) 18537d47e8d4STejun Heo verdict |= ATA_EH_SPDN_NCQ_OFF; 18543884f7b0STejun Heo 18553884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_ATA_BUS] + 18563884f7b0STejun Heo arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 || 1857663f99b8STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) 18587d47e8d4STejun Heo verdict |= ATA_EH_SPDN_SPEED_DOWN; 1859c6fd2807SJeff Garzik 18607d47e8d4STejun Heo return verdict; 1861c6fd2807SJeff Garzik } 1862c6fd2807SJeff Garzik 1863c6fd2807SJeff Garzik /** 1864c6fd2807SJeff Garzik * ata_eh_speed_down - record error and speed down if necessary 1865c6fd2807SJeff Garzik * @dev: Failed device 18663884f7b0STejun Heo * @eflags: mask of ATA_EFLAG_* flags 1867c6fd2807SJeff Garzik * @err_mask: err_mask of the error 1868c6fd2807SJeff Garzik * 1869c6fd2807SJeff Garzik * Record error and examine error history to determine whether 1870c6fd2807SJeff Garzik * adjusting transmission speed is necessary. It also sets 1871c6fd2807SJeff Garzik * transmission limits appropriately if such adjustment is 1872c6fd2807SJeff Garzik * necessary. 1873c6fd2807SJeff Garzik * 1874c6fd2807SJeff Garzik * LOCKING: 1875c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1876c6fd2807SJeff Garzik * 1877c6fd2807SJeff Garzik * RETURNS: 18787d47e8d4STejun Heo * Determined recovery action. 1879c6fd2807SJeff Garzik */ 18803884f7b0STejun Heo static unsigned int ata_eh_speed_down(struct ata_device *dev, 18813884f7b0STejun Heo unsigned int eflags, unsigned int err_mask) 1882c6fd2807SJeff Garzik { 1883b1c72916STejun Heo struct ata_link *link = ata_dev_phys_link(dev); 188476326ac1STejun Heo int xfer_ok = 0; 18857d47e8d4STejun Heo unsigned int verdict; 18867d47e8d4STejun Heo unsigned int action = 0; 18877d47e8d4STejun Heo 18887d47e8d4STejun Heo /* don't bother if Cat-0 error */ 188976326ac1STejun Heo if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0) 1890c6fd2807SJeff Garzik return 0; 1891c6fd2807SJeff Garzik 1892c6fd2807SJeff Garzik /* record error and determine whether speed down is necessary */ 18933884f7b0STejun Heo ata_ering_record(&dev->ering, eflags, err_mask); 18947d47e8d4STejun Heo verdict = ata_eh_speed_down_verdict(dev); 1895c6fd2807SJeff Garzik 18967d47e8d4STejun Heo /* turn off NCQ? */ 18977d47e8d4STejun Heo if ((verdict & ATA_EH_SPDN_NCQ_OFF) && 18987d47e8d4STejun Heo (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ | 18997d47e8d4STejun Heo ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) { 19007d47e8d4STejun Heo dev->flags |= ATA_DFLAG_NCQ_OFF; 19017d47e8d4STejun Heo ata_dev_printk(dev, KERN_WARNING, 19027d47e8d4STejun Heo "NCQ disabled due to excessive errors\n"); 19037d47e8d4STejun Heo goto done; 19047d47e8d4STejun Heo } 1905c6fd2807SJeff Garzik 19067d47e8d4STejun Heo /* speed down? */ 19077d47e8d4STejun Heo if (verdict & ATA_EH_SPDN_SPEED_DOWN) { 1908c6fd2807SJeff Garzik /* speed down SATA link speed if possible */ 1909a07d499bSTejun Heo if (sata_down_spd_limit(link, 0) == 0) { 1910cf480626STejun Heo action |= ATA_EH_RESET; 19117d47e8d4STejun Heo goto done; 19127d47e8d4STejun Heo } 1913c6fd2807SJeff Garzik 1914c6fd2807SJeff Garzik /* lower transfer mode */ 19157d47e8d4STejun Heo if (dev->spdn_cnt < 2) { 19167d47e8d4STejun Heo static const int dma_dnxfer_sel[] = 19177d47e8d4STejun Heo { ATA_DNXFER_DMA, ATA_DNXFER_40C }; 19187d47e8d4STejun Heo static const int pio_dnxfer_sel[] = 19197d47e8d4STejun Heo { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 }; 19207d47e8d4STejun Heo int sel; 1921c6fd2807SJeff Garzik 19227d47e8d4STejun Heo if (dev->xfer_shift != ATA_SHIFT_PIO) 19237d47e8d4STejun Heo sel = dma_dnxfer_sel[dev->spdn_cnt]; 19247d47e8d4STejun Heo else 19257d47e8d4STejun Heo sel = pio_dnxfer_sel[dev->spdn_cnt]; 19267d47e8d4STejun Heo 19277d47e8d4STejun Heo dev->spdn_cnt++; 19287d47e8d4STejun Heo 19297d47e8d4STejun Heo if (ata_down_xfermask_limit(dev, sel) == 0) { 1930cf480626STejun Heo action |= ATA_EH_RESET; 19317d47e8d4STejun Heo goto done; 19327d47e8d4STejun Heo } 19337d47e8d4STejun Heo } 19347d47e8d4STejun Heo } 19357d47e8d4STejun Heo 19367d47e8d4STejun Heo /* Fall back to PIO? Slowing down to PIO is meaningless for 1937663f99b8STejun Heo * SATA ATA devices. Consider it only for PATA and SATAPI. 19387d47e8d4STejun Heo */ 19397d47e8d4STejun Heo if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) && 1940663f99b8STejun Heo (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) && 19417d47e8d4STejun Heo (dev->xfer_shift != ATA_SHIFT_PIO)) { 19427d47e8d4STejun Heo if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) { 19437d47e8d4STejun Heo dev->spdn_cnt = 0; 1944cf480626STejun Heo action |= ATA_EH_RESET; 19457d47e8d4STejun Heo goto done; 19467d47e8d4STejun Heo } 19477d47e8d4STejun Heo } 19487d47e8d4STejun Heo 1949c6fd2807SJeff Garzik return 0; 19507d47e8d4STejun Heo done: 19517d47e8d4STejun Heo /* device has been slowed down, blow error history */ 195276326ac1STejun Heo if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS)) 19537d47e8d4STejun Heo ata_ering_clear(&dev->ering); 19547d47e8d4STejun Heo return action; 1955c6fd2807SJeff Garzik } 1956c6fd2807SJeff Garzik 1957c6fd2807SJeff Garzik /** 19589b1e2658STejun Heo * ata_eh_link_autopsy - analyze error and determine recovery action 19599b1e2658STejun Heo * @link: host link to perform autopsy on 1960c6fd2807SJeff Garzik * 19610260731fSTejun Heo * Analyze why @link failed and determine which recovery actions 19620260731fSTejun Heo * are needed. This function also sets more detailed AC_ERR_* 19630260731fSTejun Heo * values and fills sense data for ATAPI CHECK SENSE. 1964c6fd2807SJeff Garzik * 1965c6fd2807SJeff Garzik * LOCKING: 1966c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1967c6fd2807SJeff Garzik */ 19689b1e2658STejun Heo static void ata_eh_link_autopsy(struct ata_link *link) 1969c6fd2807SJeff Garzik { 19700260731fSTejun Heo struct ata_port *ap = link->ap; 1971936fd732STejun Heo struct ata_eh_context *ehc = &link->eh_context; 1972dfcc173dSTejun Heo struct ata_device *dev; 19733884f7b0STejun Heo unsigned int all_err_mask = 0, eflags = 0; 19743884f7b0STejun Heo int tag; 1975c6fd2807SJeff Garzik u32 serror; 1976c6fd2807SJeff Garzik int rc; 1977c6fd2807SJeff Garzik 1978c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 1979c6fd2807SJeff Garzik 1980c6fd2807SJeff Garzik if (ehc->i.flags & ATA_EHI_NO_AUTOPSY) 1981c6fd2807SJeff Garzik return; 1982c6fd2807SJeff Garzik 1983c6fd2807SJeff Garzik /* obtain and analyze SError */ 1984936fd732STejun Heo rc = sata_scr_read(link, SCR_ERROR, &serror); 1985c6fd2807SJeff Garzik if (rc == 0) { 1986c6fd2807SJeff Garzik ehc->i.serror |= serror; 19870260731fSTejun Heo ata_eh_analyze_serror(link); 19884e57c517STejun Heo } else if (rc != -EOPNOTSUPP) { 1989cf480626STejun Heo /* SError read failed, force reset and probing */ 1990b558edddSTejun Heo ehc->i.probe_mask |= ATA_ALL_DEVICES; 1991cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 19924e57c517STejun Heo ehc->i.err_mask |= AC_ERR_OTHER; 19934e57c517STejun Heo } 1994c6fd2807SJeff Garzik 1995c6fd2807SJeff Garzik /* analyze NCQ failure */ 19960260731fSTejun Heo ata_eh_analyze_ncq_error(link); 1997c6fd2807SJeff Garzik 1998c6fd2807SJeff Garzik /* any real error trumps AC_ERR_OTHER */ 1999c6fd2807SJeff Garzik if (ehc->i.err_mask & ~AC_ERR_OTHER) 2000c6fd2807SJeff Garzik ehc->i.err_mask &= ~AC_ERR_OTHER; 2001c6fd2807SJeff Garzik 2002c6fd2807SJeff Garzik all_err_mask |= ehc->i.err_mask; 2003c6fd2807SJeff Garzik 2004c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2005c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 2006c6fd2807SJeff Garzik 2007b1c72916STejun Heo if (!(qc->flags & ATA_QCFLAG_FAILED) || 2008b1c72916STejun Heo ata_dev_phys_link(qc->dev) != link) 2009c6fd2807SJeff Garzik continue; 2010c6fd2807SJeff Garzik 2011c6fd2807SJeff Garzik /* inherit upper level err_mask */ 2012c6fd2807SJeff Garzik qc->err_mask |= ehc->i.err_mask; 2013c6fd2807SJeff Garzik 2014c6fd2807SJeff Garzik /* analyze TF */ 2015c6fd2807SJeff Garzik ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf); 2016c6fd2807SJeff Garzik 2017c6fd2807SJeff Garzik /* DEV errors are probably spurious in case of ATA_BUS error */ 2018c6fd2807SJeff Garzik if (qc->err_mask & AC_ERR_ATA_BUS) 2019c6fd2807SJeff Garzik qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA | 2020c6fd2807SJeff Garzik AC_ERR_INVALID); 2021c6fd2807SJeff Garzik 2022c6fd2807SJeff Garzik /* any real error trumps unknown error */ 2023c6fd2807SJeff Garzik if (qc->err_mask & ~AC_ERR_OTHER) 2024c6fd2807SJeff Garzik qc->err_mask &= ~AC_ERR_OTHER; 2025c6fd2807SJeff Garzik 2026c6fd2807SJeff Garzik /* SENSE_VALID trumps dev/unknown error and revalidation */ 2027f90f0828STejun Heo if (qc->flags & ATA_QCFLAG_SENSE_VALID) 2028c6fd2807SJeff Garzik qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); 2029c6fd2807SJeff Garzik 203003faab78STejun Heo /* determine whether the command is worth retrying */ 203103faab78STejun Heo if (!(qc->err_mask & AC_ERR_INVALID) && 203203faab78STejun Heo ((qc->flags & ATA_QCFLAG_IO) || qc->err_mask != AC_ERR_DEV)) 203303faab78STejun Heo qc->flags |= ATA_QCFLAG_RETRY; 203403faab78STejun Heo 2035c6fd2807SJeff Garzik /* accumulate error info */ 2036c6fd2807SJeff Garzik ehc->i.dev = qc->dev; 2037c6fd2807SJeff Garzik all_err_mask |= qc->err_mask; 2038c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_IO) 20393884f7b0STejun Heo eflags |= ATA_EFLAG_IS_IO; 2040c6fd2807SJeff Garzik } 2041c6fd2807SJeff Garzik 2042c6fd2807SJeff Garzik /* enforce default EH actions */ 2043c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN || 2044c6fd2807SJeff Garzik all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT)) 2045cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 20463884f7b0STejun Heo else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) || 20473884f7b0STejun Heo (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV))) 2048c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_REVALIDATE; 2049c6fd2807SJeff Garzik 2050dfcc173dSTejun Heo /* If we have offending qcs and the associated failed device, 2051dfcc173dSTejun Heo * perform per-dev EH action only on the offending device. 2052dfcc173dSTejun Heo */ 2053c6fd2807SJeff Garzik if (ehc->i.dev) { 2054c6fd2807SJeff Garzik ehc->i.dev_action[ehc->i.dev->devno] |= 2055c6fd2807SJeff Garzik ehc->i.action & ATA_EH_PERDEV_MASK; 2056c6fd2807SJeff Garzik ehc->i.action &= ~ATA_EH_PERDEV_MASK; 2057c6fd2807SJeff Garzik } 2058c6fd2807SJeff Garzik 20592695e366STejun Heo /* propagate timeout to host link */ 20602695e366STejun Heo if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link)) 20612695e366STejun Heo ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT; 20622695e366STejun Heo 20632695e366STejun Heo /* record error and consider speeding down */ 2064dfcc173dSTejun Heo dev = ehc->i.dev; 20652695e366STejun Heo if (!dev && ((ata_link_max_devices(link) == 1 && 20662695e366STejun Heo ata_dev_enabled(link->device)))) 2067dfcc173dSTejun Heo dev = link->device; 2068dfcc173dSTejun Heo 206976326ac1STejun Heo if (dev) { 207076326ac1STejun Heo if (dev->flags & ATA_DFLAG_DUBIOUS_XFER) 207176326ac1STejun Heo eflags |= ATA_EFLAG_DUBIOUS_XFER; 20723884f7b0STejun Heo ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask); 207376326ac1STejun Heo } 2074dfcc173dSTejun Heo 2075c6fd2807SJeff Garzik DPRINTK("EXIT\n"); 2076c6fd2807SJeff Garzik } 2077c6fd2807SJeff Garzik 2078c6fd2807SJeff Garzik /** 20799b1e2658STejun Heo * ata_eh_autopsy - analyze error and determine recovery action 20809b1e2658STejun Heo * @ap: host port to perform autopsy on 20819b1e2658STejun Heo * 20829b1e2658STejun Heo * Analyze all links of @ap and determine why they failed and 20839b1e2658STejun Heo * which recovery actions are needed. 20849b1e2658STejun Heo * 20859b1e2658STejun Heo * LOCKING: 20869b1e2658STejun Heo * Kernel thread context (may sleep). 20879b1e2658STejun Heo */ 2088fb7fd614STejun Heo void ata_eh_autopsy(struct ata_port *ap) 20899b1e2658STejun Heo { 20909b1e2658STejun Heo struct ata_link *link; 20919b1e2658STejun Heo 20921eca4365STejun Heo ata_for_each_link(link, ap, EDGE) 20939b1e2658STejun Heo ata_eh_link_autopsy(link); 20942695e366STejun Heo 2095b1c72916STejun Heo /* Handle the frigging slave link. Autopsy is done similarly 2096b1c72916STejun Heo * but actions and flags are transferred over to the master 2097b1c72916STejun Heo * link and handled from there. 2098b1c72916STejun Heo */ 2099b1c72916STejun Heo if (ap->slave_link) { 2100b1c72916STejun Heo struct ata_eh_context *mehc = &ap->link.eh_context; 2101b1c72916STejun Heo struct ata_eh_context *sehc = &ap->slave_link->eh_context; 2102b1c72916STejun Heo 2103848e4c68STejun Heo /* transfer control flags from master to slave */ 2104848e4c68STejun Heo sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK; 2105848e4c68STejun Heo 2106848e4c68STejun Heo /* perform autopsy on the slave link */ 2107b1c72916STejun Heo ata_eh_link_autopsy(ap->slave_link); 2108b1c72916STejun Heo 2109848e4c68STejun Heo /* transfer actions from slave to master and clear slave */ 2110b1c72916STejun Heo ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); 2111b1c72916STejun Heo mehc->i.action |= sehc->i.action; 2112b1c72916STejun Heo mehc->i.dev_action[1] |= sehc->i.dev_action[1]; 2113b1c72916STejun Heo mehc->i.flags |= sehc->i.flags; 2114b1c72916STejun Heo ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); 2115b1c72916STejun Heo } 2116b1c72916STejun Heo 21172695e366STejun Heo /* Autopsy of fanout ports can affect host link autopsy. 21182695e366STejun Heo * Perform host link autopsy last. 21192695e366STejun Heo */ 2120071f44b1STejun Heo if (sata_pmp_attached(ap)) 21212695e366STejun Heo ata_eh_link_autopsy(&ap->link); 21229b1e2658STejun Heo } 21239b1e2658STejun Heo 21249b1e2658STejun Heo /** 21256521148cSRobert Hancock * ata_get_cmd_descript - get description for ATA command 21266521148cSRobert Hancock * @command: ATA command code to get description for 21276521148cSRobert Hancock * 21286521148cSRobert Hancock * Return a textual description of the given command, or NULL if the 21296521148cSRobert Hancock * command is not known. 21306521148cSRobert Hancock * 21316521148cSRobert Hancock * LOCKING: 21326521148cSRobert Hancock * None 21336521148cSRobert Hancock */ 21346521148cSRobert Hancock const char *ata_get_cmd_descript(u8 command) 21356521148cSRobert Hancock { 21366521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR 21376521148cSRobert Hancock static const struct 21386521148cSRobert Hancock { 21396521148cSRobert Hancock u8 command; 21406521148cSRobert Hancock const char *text; 21416521148cSRobert Hancock } cmd_descr[] = { 21426521148cSRobert Hancock { ATA_CMD_DEV_RESET, "DEVICE RESET" }, 21436521148cSRobert Hancock { ATA_CMD_CHK_POWER, "CHECK POWER MODE" }, 21446521148cSRobert Hancock { ATA_CMD_STANDBY, "STANDBY" }, 21456521148cSRobert Hancock { ATA_CMD_IDLE, "IDLE" }, 21466521148cSRobert Hancock { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" }, 21476521148cSRobert Hancock { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" }, 21486521148cSRobert Hancock { ATA_CMD_NOP, "NOP" }, 21496521148cSRobert Hancock { ATA_CMD_FLUSH, "FLUSH CACHE" }, 21506521148cSRobert Hancock { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" }, 21516521148cSRobert Hancock { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" }, 21526521148cSRobert Hancock { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" }, 21536521148cSRobert Hancock { ATA_CMD_SERVICE, "SERVICE" }, 21546521148cSRobert Hancock { ATA_CMD_READ, "READ DMA" }, 21556521148cSRobert Hancock { ATA_CMD_READ_EXT, "READ DMA EXT" }, 21566521148cSRobert Hancock { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" }, 21576521148cSRobert Hancock { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" }, 21586521148cSRobert Hancock { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" }, 21596521148cSRobert Hancock { ATA_CMD_WRITE, "WRITE DMA" }, 21606521148cSRobert Hancock { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" }, 21616521148cSRobert Hancock { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" }, 21626521148cSRobert Hancock { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" }, 21636521148cSRobert Hancock { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" }, 21646521148cSRobert Hancock { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" }, 21656521148cSRobert Hancock { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" }, 21666521148cSRobert Hancock { ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" }, 21676521148cSRobert Hancock { ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" }, 21686521148cSRobert Hancock { ATA_CMD_PIO_READ, "READ SECTOR(S)" }, 21696521148cSRobert Hancock { ATA_CMD_PIO_READ_EXT, "READ SECTOR(S) EXT" }, 21706521148cSRobert Hancock { ATA_CMD_PIO_WRITE, "WRITE SECTOR(S)" }, 21716521148cSRobert Hancock { ATA_CMD_PIO_WRITE_EXT, "WRITE SECTOR(S) EXT" }, 21726521148cSRobert Hancock { ATA_CMD_READ_MULTI, "READ MULTIPLE" }, 21736521148cSRobert Hancock { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" }, 21746521148cSRobert Hancock { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" }, 21756521148cSRobert Hancock { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" }, 21766521148cSRobert Hancock { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" }, 21776521148cSRobert Hancock { ATA_CMD_SET_FEATURES, "SET FEATURES" }, 21786521148cSRobert Hancock { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" }, 21796521148cSRobert Hancock { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" }, 21806521148cSRobert Hancock { ATA_CMD_VERIFY_EXT, "READ VERIFY SECTOR(S) EXT" }, 21816521148cSRobert Hancock { ATA_CMD_WRITE_UNCORR_EXT, "WRITE UNCORRECTABLE EXT" }, 21826521148cSRobert Hancock { ATA_CMD_STANDBYNOW1, "STANDBY IMMEDIATE" }, 21836521148cSRobert Hancock { ATA_CMD_IDLEIMMEDIATE, "IDLE IMMEDIATE" }, 21846521148cSRobert Hancock { ATA_CMD_SLEEP, "SLEEP" }, 21856521148cSRobert Hancock { ATA_CMD_INIT_DEV_PARAMS, "INITIALIZE DEVICE PARAMETERS" }, 21866521148cSRobert Hancock { ATA_CMD_READ_NATIVE_MAX, "READ NATIVE MAX ADDRESS" }, 21876521148cSRobert Hancock { ATA_CMD_READ_NATIVE_MAX_EXT, "READ NATIVE MAX ADDRESS EXT" }, 21886521148cSRobert Hancock { ATA_CMD_SET_MAX, "SET MAX ADDRESS" }, 21896521148cSRobert Hancock { ATA_CMD_SET_MAX_EXT, "SET MAX ADDRESS EXT" }, 21906521148cSRobert Hancock { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" }, 21916521148cSRobert Hancock { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" }, 21926521148cSRobert Hancock { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" }, 21936521148cSRobert Hancock { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" }, 21946521148cSRobert Hancock { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" }, 21956521148cSRobert Hancock { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" }, 21966521148cSRobert Hancock { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" }, 21976521148cSRobert Hancock { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" }, 21986521148cSRobert Hancock { ATA_CMD_PMP_READ, "READ BUFFER" }, 21996521148cSRobert Hancock { ATA_CMD_PMP_WRITE, "WRITE BUFFER" }, 22006521148cSRobert Hancock { ATA_CMD_CONF_OVERLAY, "DEVICE CONFIGURATION OVERLAY" }, 22016521148cSRobert Hancock { ATA_CMD_SEC_SET_PASS, "SECURITY SET PASSWORD" }, 22026521148cSRobert Hancock { ATA_CMD_SEC_UNLOCK, "SECURITY UNLOCK" }, 22036521148cSRobert Hancock { ATA_CMD_SEC_ERASE_PREP, "SECURITY ERASE PREPARE" }, 22046521148cSRobert Hancock { ATA_CMD_SEC_ERASE_UNIT, "SECURITY ERASE UNIT" }, 22056521148cSRobert Hancock { ATA_CMD_SEC_FREEZE_LOCK, "SECURITY FREEZE LOCK" }, 22066521148cSRobert Hancock { ATA_CMD_SEC_DISABLE_PASS, "SECURITY DISABLE PASSWORD" }, 22076521148cSRobert Hancock { ATA_CMD_CONFIG_STREAM, "CONFIGURE STREAM" }, 22086521148cSRobert Hancock { ATA_CMD_SMART, "SMART" }, 22096521148cSRobert Hancock { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" }, 22106521148cSRobert Hancock { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" }, 22116521148cSRobert Hancock { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" }, 22126521148cSRobert Hancock { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" }, 22136521148cSRobert Hancock { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" }, 22146521148cSRobert Hancock { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" }, 22156521148cSRobert Hancock { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" }, 22166521148cSRobert Hancock { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" }, 22176521148cSRobert Hancock { ATA_CMD_READ_LONG, "READ LONG (with retries)" }, 22186521148cSRobert Hancock { ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" }, 22196521148cSRobert Hancock { ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" }, 22206521148cSRobert Hancock { ATA_CMD_WRITE_LONG_ONCE, "WRITE LONG (without retries)" }, 22216521148cSRobert Hancock { ATA_CMD_RESTORE, "RECALIBRATE" }, 22226521148cSRobert Hancock { 0, NULL } /* terminate list */ 22236521148cSRobert Hancock }; 22246521148cSRobert Hancock 22256521148cSRobert Hancock unsigned int i; 22266521148cSRobert Hancock for (i = 0; cmd_descr[i].text; i++) 22276521148cSRobert Hancock if (cmd_descr[i].command == command) 22286521148cSRobert Hancock return cmd_descr[i].text; 22296521148cSRobert Hancock #endif 22306521148cSRobert Hancock 22316521148cSRobert Hancock return NULL; 22326521148cSRobert Hancock } 22336521148cSRobert Hancock 22346521148cSRobert Hancock /** 22359b1e2658STejun Heo * ata_eh_link_report - report error handling to user 22360260731fSTejun Heo * @link: ATA link EH is going on 2237c6fd2807SJeff Garzik * 2238c6fd2807SJeff Garzik * Report EH to user. 2239c6fd2807SJeff Garzik * 2240c6fd2807SJeff Garzik * LOCKING: 2241c6fd2807SJeff Garzik * None. 2242c6fd2807SJeff Garzik */ 22439b1e2658STejun Heo static void ata_eh_link_report(struct ata_link *link) 2244c6fd2807SJeff Garzik { 22450260731fSTejun Heo struct ata_port *ap = link->ap; 22460260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 2247c6fd2807SJeff Garzik const char *frozen, *desc; 2248a1e10f7eSTejun Heo char tries_buf[6]; 2249c6fd2807SJeff Garzik int tag, nr_failed = 0; 2250c6fd2807SJeff Garzik 225194ff3d54STejun Heo if (ehc->i.flags & ATA_EHI_QUIET) 225294ff3d54STejun Heo return; 225394ff3d54STejun Heo 2254c6fd2807SJeff Garzik desc = NULL; 2255c6fd2807SJeff Garzik if (ehc->i.desc[0] != '\0') 2256c6fd2807SJeff Garzik desc = ehc->i.desc; 2257c6fd2807SJeff Garzik 2258c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2259c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 2260c6fd2807SJeff Garzik 2261b1c72916STejun Heo if (!(qc->flags & ATA_QCFLAG_FAILED) || 2262b1c72916STejun Heo ata_dev_phys_link(qc->dev) != link || 2263e027bd36STejun Heo ((qc->flags & ATA_QCFLAG_QUIET) && 2264e027bd36STejun Heo qc->err_mask == AC_ERR_DEV)) 2265c6fd2807SJeff Garzik continue; 2266c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask) 2267c6fd2807SJeff Garzik continue; 2268c6fd2807SJeff Garzik 2269c6fd2807SJeff Garzik nr_failed++; 2270c6fd2807SJeff Garzik } 2271c6fd2807SJeff Garzik 2272c6fd2807SJeff Garzik if (!nr_failed && !ehc->i.err_mask) 2273c6fd2807SJeff Garzik return; 2274c6fd2807SJeff Garzik 2275c6fd2807SJeff Garzik frozen = ""; 2276c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN) 2277c6fd2807SJeff Garzik frozen = " frozen"; 2278c6fd2807SJeff Garzik 2279a1e10f7eSTejun Heo memset(tries_buf, 0, sizeof(tries_buf)); 2280a1e10f7eSTejun Heo if (ap->eh_tries < ATA_EH_MAX_TRIES) 2281a1e10f7eSTejun Heo snprintf(tries_buf, sizeof(tries_buf) - 1, " t%d", 2282a1e10f7eSTejun Heo ap->eh_tries); 2283a1e10f7eSTejun Heo 2284c6fd2807SJeff Garzik if (ehc->i.dev) { 2285c6fd2807SJeff Garzik ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x " 2286a1e10f7eSTejun Heo "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", 2287a1e10f7eSTejun Heo ehc->i.err_mask, link->sactive, ehc->i.serror, 2288a1e10f7eSTejun Heo ehc->i.action, frozen, tries_buf); 2289c6fd2807SJeff Garzik if (desc) 2290b64bbc39STejun Heo ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc); 2291c6fd2807SJeff Garzik } else { 22920260731fSTejun Heo ata_link_printk(link, KERN_ERR, "exception Emask 0x%x " 2293a1e10f7eSTejun Heo "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", 2294a1e10f7eSTejun Heo ehc->i.err_mask, link->sactive, ehc->i.serror, 2295a1e10f7eSTejun Heo ehc->i.action, frozen, tries_buf); 2296c6fd2807SJeff Garzik if (desc) 22970260731fSTejun Heo ata_link_printk(link, KERN_ERR, "%s\n", desc); 2298c6fd2807SJeff Garzik } 2299c6fd2807SJeff Garzik 23006521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR 23011333e194SRobert Hancock if (ehc->i.serror) 2302da0e21d3STejun Heo ata_link_printk(link, KERN_ERR, 23031333e194SRobert Hancock "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n", 23041333e194SRobert Hancock ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "", 23051333e194SRobert Hancock ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "", 23061333e194SRobert Hancock ehc->i.serror & SERR_DATA ? "UnrecovData " : "", 23071333e194SRobert Hancock ehc->i.serror & SERR_PERSISTENT ? "Persist " : "", 23081333e194SRobert Hancock ehc->i.serror & SERR_PROTOCOL ? "Proto " : "", 23091333e194SRobert Hancock ehc->i.serror & SERR_INTERNAL ? "HostInt " : "", 23101333e194SRobert Hancock ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "", 23111333e194SRobert Hancock ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "", 23121333e194SRobert Hancock ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "", 23131333e194SRobert Hancock ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "", 23141333e194SRobert Hancock ehc->i.serror & SERR_DISPARITY ? "Dispar " : "", 23151333e194SRobert Hancock ehc->i.serror & SERR_CRC ? "BadCRC " : "", 23161333e194SRobert Hancock ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "", 23171333e194SRobert Hancock ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "", 23181333e194SRobert Hancock ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "", 23191333e194SRobert Hancock ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "", 23201333e194SRobert Hancock ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : ""); 23216521148cSRobert Hancock #endif 23221333e194SRobert Hancock 2323c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2324c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 23258a937581STejun Heo struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; 2326abb6a889STejun Heo const u8 *cdb = qc->cdb; 2327abb6a889STejun Heo char data_buf[20] = ""; 2328abb6a889STejun Heo char cdb_buf[70] = ""; 2329c6fd2807SJeff Garzik 23300260731fSTejun Heo if (!(qc->flags & ATA_QCFLAG_FAILED) || 2331b1c72916STejun Heo ata_dev_phys_link(qc->dev) != link || !qc->err_mask) 2332c6fd2807SJeff Garzik continue; 2333c6fd2807SJeff Garzik 2334abb6a889STejun Heo if (qc->dma_dir != DMA_NONE) { 2335abb6a889STejun Heo static const char *dma_str[] = { 2336abb6a889STejun Heo [DMA_BIDIRECTIONAL] = "bidi", 2337abb6a889STejun Heo [DMA_TO_DEVICE] = "out", 2338abb6a889STejun Heo [DMA_FROM_DEVICE] = "in", 2339abb6a889STejun Heo }; 2340abb6a889STejun Heo static const char *prot_str[] = { 2341abb6a889STejun Heo [ATA_PROT_PIO] = "pio", 2342abb6a889STejun Heo [ATA_PROT_DMA] = "dma", 2343abb6a889STejun Heo [ATA_PROT_NCQ] = "ncq", 23440dc36888STejun Heo [ATAPI_PROT_PIO] = "pio", 23450dc36888STejun Heo [ATAPI_PROT_DMA] = "dma", 2346abb6a889STejun Heo }; 2347abb6a889STejun Heo 2348abb6a889STejun Heo snprintf(data_buf, sizeof(data_buf), " %s %u %s", 2349abb6a889STejun Heo prot_str[qc->tf.protocol], qc->nbytes, 2350abb6a889STejun Heo dma_str[qc->dma_dir]); 2351abb6a889STejun Heo } 2352abb6a889STejun Heo 23536521148cSRobert Hancock if (ata_is_atapi(qc->tf.protocol)) { 23546521148cSRobert Hancock if (qc->scsicmd) 23556521148cSRobert Hancock scsi_print_command(qc->scsicmd); 23566521148cSRobert Hancock else 2357abb6a889STejun Heo snprintf(cdb_buf, sizeof(cdb_buf), 2358abb6a889STejun Heo "cdb %02x %02x %02x %02x %02x %02x %02x %02x " 2359abb6a889STejun Heo "%02x %02x %02x %02x %02x %02x %02x %02x\n ", 2360abb6a889STejun Heo cdb[0], cdb[1], cdb[2], cdb[3], 2361abb6a889STejun Heo cdb[4], cdb[5], cdb[6], cdb[7], 2362abb6a889STejun Heo cdb[8], cdb[9], cdb[10], cdb[11], 2363abb6a889STejun Heo cdb[12], cdb[13], cdb[14], cdb[15]); 23646521148cSRobert Hancock } else { 23656521148cSRobert Hancock const char *descr = ata_get_cmd_descript(cmd->command); 23666521148cSRobert Hancock if (descr) 23676521148cSRobert Hancock ata_dev_printk(qc->dev, KERN_ERR, 23686521148cSRobert Hancock "failed command: %s\n", descr); 23696521148cSRobert Hancock } 2370abb6a889STejun Heo 23718a937581STejun Heo ata_dev_printk(qc->dev, KERN_ERR, 23728a937581STejun Heo "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " 2373abb6a889STejun Heo "tag %d%s\n %s" 23748a937581STejun Heo "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " 23755335b729STejun Heo "Emask 0x%x (%s)%s\n", 23768a937581STejun Heo cmd->command, cmd->feature, cmd->nsect, 23778a937581STejun Heo cmd->lbal, cmd->lbam, cmd->lbah, 23788a937581STejun Heo cmd->hob_feature, cmd->hob_nsect, 23798a937581STejun Heo cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah, 2380abb6a889STejun Heo cmd->device, qc->tag, data_buf, cdb_buf, 23818a937581STejun Heo res->command, res->feature, res->nsect, 23828a937581STejun Heo res->lbal, res->lbam, res->lbah, 23838a937581STejun Heo res->hob_feature, res->hob_nsect, 23848a937581STejun Heo res->hob_lbal, res->hob_lbam, res->hob_lbah, 23855335b729STejun Heo res->device, qc->err_mask, ata_err_string(qc->err_mask), 23865335b729STejun Heo qc->err_mask & AC_ERR_NCQ ? " <F>" : ""); 23871333e194SRobert Hancock 23886521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR 23891333e194SRobert Hancock if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | 23901333e194SRobert Hancock ATA_ERR)) { 23911333e194SRobert Hancock if (res->command & ATA_BUSY) 23921333e194SRobert Hancock ata_dev_printk(qc->dev, KERN_ERR, 23931333e194SRobert Hancock "status: { Busy }\n"); 23941333e194SRobert Hancock else 23951333e194SRobert Hancock ata_dev_printk(qc->dev, KERN_ERR, 23961333e194SRobert Hancock "status: { %s%s%s%s}\n", 23971333e194SRobert Hancock res->command & ATA_DRDY ? "DRDY " : "", 23981333e194SRobert Hancock res->command & ATA_DF ? "DF " : "", 23991333e194SRobert Hancock res->command & ATA_DRQ ? "DRQ " : "", 24001333e194SRobert Hancock res->command & ATA_ERR ? "ERR " : ""); 24011333e194SRobert Hancock } 24021333e194SRobert Hancock 24031333e194SRobert Hancock if (cmd->command != ATA_CMD_PACKET && 24041333e194SRobert Hancock (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF | 24051333e194SRobert Hancock ATA_ABORTED))) 24061333e194SRobert Hancock ata_dev_printk(qc->dev, KERN_ERR, 24071333e194SRobert Hancock "error: { %s%s%s%s}\n", 24081333e194SRobert Hancock res->feature & ATA_ICRC ? "ICRC " : "", 24091333e194SRobert Hancock res->feature & ATA_UNC ? "UNC " : "", 24101333e194SRobert Hancock res->feature & ATA_IDNF ? "IDNF " : "", 24111333e194SRobert Hancock res->feature & ATA_ABORTED ? "ABRT " : ""); 24126521148cSRobert Hancock #endif 2413c6fd2807SJeff Garzik } 2414c6fd2807SJeff Garzik } 2415c6fd2807SJeff Garzik 24169b1e2658STejun Heo /** 24179b1e2658STejun Heo * ata_eh_report - report error handling to user 24189b1e2658STejun Heo * @ap: ATA port to report EH about 24199b1e2658STejun Heo * 24209b1e2658STejun Heo * Report EH to user. 24219b1e2658STejun Heo * 24229b1e2658STejun Heo * LOCKING: 24239b1e2658STejun Heo * None. 24249b1e2658STejun Heo */ 2425fb7fd614STejun Heo void ata_eh_report(struct ata_port *ap) 24269b1e2658STejun Heo { 24279b1e2658STejun Heo struct ata_link *link; 24289b1e2658STejun Heo 24291eca4365STejun Heo ata_for_each_link(link, ap, HOST_FIRST) 24309b1e2658STejun Heo ata_eh_link_report(link); 24319b1e2658STejun Heo } 24329b1e2658STejun Heo 2433cc0680a5STejun Heo static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset, 2434b1c72916STejun Heo unsigned int *classes, unsigned long deadline, 2435b1c72916STejun Heo bool clear_classes) 2436c6fd2807SJeff Garzik { 2437f58229f8STejun Heo struct ata_device *dev; 2438c6fd2807SJeff Garzik 2439b1c72916STejun Heo if (clear_classes) 24401eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 2441f58229f8STejun Heo classes[dev->devno] = ATA_DEV_UNKNOWN; 2442c6fd2807SJeff Garzik 2443f046519fSTejun Heo return reset(link, classes, deadline); 2444c6fd2807SJeff Garzik } 2445c6fd2807SJeff Garzik 2446ae791c05STejun Heo static int ata_eh_followup_srst_needed(struct ata_link *link, 24475dbfc9cbSTejun Heo int rc, const unsigned int *classes) 2448c6fd2807SJeff Garzik { 244945db2f6cSTejun Heo if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link)) 2450ae791c05STejun Heo return 0; 24515dbfc9cbSTejun Heo if (rc == -EAGAIN) 2452c6fd2807SJeff Garzik return 1; 2453071f44b1STejun Heo if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) 24543495de73STejun Heo return 1; 2455c6fd2807SJeff Garzik return 0; 2456c6fd2807SJeff Garzik } 2457c6fd2807SJeff Garzik 2458fb7fd614STejun Heo int ata_eh_reset(struct ata_link *link, int classify, 2459c6fd2807SJeff Garzik ata_prereset_fn_t prereset, ata_reset_fn_t softreset, 2460c6fd2807SJeff Garzik ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) 2461c6fd2807SJeff Garzik { 2462afaa5c37STejun Heo struct ata_port *ap = link->ap; 2463b1c72916STejun Heo struct ata_link *slave = ap->slave_link; 2464936fd732STejun Heo struct ata_eh_context *ehc = &link->eh_context; 2465705d2014SBartlomiej Zolnierkiewicz struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL; 2466c6fd2807SJeff Garzik unsigned int *classes = ehc->classes; 2467416dc9edSTejun Heo unsigned int lflags = link->flags; 2468c6fd2807SJeff Garzik int verbose = !(ehc->i.flags & ATA_EHI_QUIET); 2469d8af0eb6STejun Heo int max_tries = 0, try = 0; 2470b1c72916STejun Heo struct ata_link *failed_link; 2471f58229f8STejun Heo struct ata_device *dev; 2472416dc9edSTejun Heo unsigned long deadline, now; 2473c6fd2807SJeff Garzik ata_reset_fn_t reset; 2474afaa5c37STejun Heo unsigned long flags; 2475416dc9edSTejun Heo u32 sstatus; 2476b1c72916STejun Heo int nr_unknown, rc; 2477c6fd2807SJeff Garzik 2478932648b0STejun Heo /* 2479932648b0STejun Heo * Prepare to reset 2480932648b0STejun Heo */ 2481d8af0eb6STejun Heo while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX) 2482d8af0eb6STejun Heo max_tries++; 248305944bdfSTejun Heo if (link->flags & ATA_LFLAG_NO_HRST) 248405944bdfSTejun Heo hardreset = NULL; 248505944bdfSTejun Heo if (link->flags & ATA_LFLAG_NO_SRST) 248605944bdfSTejun Heo softreset = NULL; 2487d8af0eb6STejun Heo 248819b72321STejun Heo /* make sure each reset attemp is at least COOL_DOWN apart */ 248919b72321STejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) { 24900a2c0f56STejun Heo now = jiffies; 249119b72321STejun Heo WARN_ON(time_after(ehc->last_reset, now)); 249219b72321STejun Heo deadline = ata_deadline(ehc->last_reset, 249319b72321STejun Heo ATA_EH_RESET_COOL_DOWN); 24940a2c0f56STejun Heo if (time_before(now, deadline)) 24950a2c0f56STejun Heo schedule_timeout_uninterruptible(deadline - now); 249619b72321STejun Heo } 24970a2c0f56STejun Heo 2498afaa5c37STejun Heo spin_lock_irqsave(ap->lock, flags); 2499afaa5c37STejun Heo ap->pflags |= ATA_PFLAG_RESETTING; 2500afaa5c37STejun Heo spin_unlock_irqrestore(ap->lock, flags); 2501afaa5c37STejun Heo 2502cf480626STejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2503c6fd2807SJeff Garzik 25041eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 2505cdeab114STejun Heo /* If we issue an SRST then an ATA drive (not ATAPI) 2506cdeab114STejun Heo * may change configuration and be in PIO0 timing. If 2507cdeab114STejun Heo * we do a hard reset (or are coming from power on) 2508cdeab114STejun Heo * this is true for ATA or ATAPI. Until we've set a 2509cdeab114STejun Heo * suitable controller mode we should not touch the 2510cdeab114STejun Heo * bus as we may be talking too fast. 2511cdeab114STejun Heo */ 2512cdeab114STejun Heo dev->pio_mode = XFER_PIO_0; 2513cdeab114STejun Heo 2514cdeab114STejun Heo /* If the controller has a pio mode setup function 2515cdeab114STejun Heo * then use it to set the chipset to rights. Don't 2516cdeab114STejun Heo * touch the DMA setup as that will be dealt with when 2517cdeab114STejun Heo * configuring devices. 2518cdeab114STejun Heo */ 2519cdeab114STejun Heo if (ap->ops->set_piomode) 2520cdeab114STejun Heo ap->ops->set_piomode(ap, dev); 2521cdeab114STejun Heo } 2522cdeab114STejun Heo 2523cf480626STejun Heo /* prefer hardreset */ 2524932648b0STejun Heo reset = NULL; 2525cf480626STejun Heo ehc->i.action &= ~ATA_EH_RESET; 2526cf480626STejun Heo if (hardreset) { 2527cf480626STejun Heo reset = hardreset; 2528a674050eSTejun Heo ehc->i.action |= ATA_EH_HARDRESET; 25294f7faa3fSTejun Heo } else if (softreset) { 2530cf480626STejun Heo reset = softreset; 2531a674050eSTejun Heo ehc->i.action |= ATA_EH_SOFTRESET; 2532cf480626STejun Heo } 2533c6fd2807SJeff Garzik 2534c6fd2807SJeff Garzik if (prereset) { 2535b1c72916STejun Heo unsigned long deadline = ata_deadline(jiffies, 2536b1c72916STejun Heo ATA_EH_PRERESET_TIMEOUT); 2537b1c72916STejun Heo 2538b1c72916STejun Heo if (slave) { 2539b1c72916STejun Heo sehc->i.action &= ~ATA_EH_RESET; 2540b1c72916STejun Heo sehc->i.action |= ehc->i.action; 2541b1c72916STejun Heo } 2542b1c72916STejun Heo 2543b1c72916STejun Heo rc = prereset(link, deadline); 2544b1c72916STejun Heo 2545b1c72916STejun Heo /* If present, do prereset on slave link too. Reset 2546b1c72916STejun Heo * is skipped iff both master and slave links report 2547b1c72916STejun Heo * -ENOENT or clear ATA_EH_RESET. 2548b1c72916STejun Heo */ 2549b1c72916STejun Heo if (slave && (rc == 0 || rc == -ENOENT)) { 2550b1c72916STejun Heo int tmp; 2551b1c72916STejun Heo 2552b1c72916STejun Heo tmp = prereset(slave, deadline); 2553b1c72916STejun Heo if (tmp != -ENOENT) 2554b1c72916STejun Heo rc = tmp; 2555b1c72916STejun Heo 2556b1c72916STejun Heo ehc->i.action |= sehc->i.action; 2557b1c72916STejun Heo } 2558b1c72916STejun Heo 2559c6fd2807SJeff Garzik if (rc) { 2560c961922bSAlan Cox if (rc == -ENOENT) { 2561cc0680a5STejun Heo ata_link_printk(link, KERN_DEBUG, 25624aa9ab67STejun Heo "port disabled. ignoring.\n"); 2563cf480626STejun Heo ehc->i.action &= ~ATA_EH_RESET; 25644aa9ab67STejun Heo 25651eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 2566f58229f8STejun Heo classes[dev->devno] = ATA_DEV_NONE; 25674aa9ab67STejun Heo 25684aa9ab67STejun Heo rc = 0; 2569c961922bSAlan Cox } else 2570cc0680a5STejun Heo ata_link_printk(link, KERN_ERR, 2571c6fd2807SJeff Garzik "prereset failed (errno=%d)\n", rc); 2572fccb6ea5STejun Heo goto out; 2573c6fd2807SJeff Garzik } 2574c6fd2807SJeff Garzik 2575932648b0STejun Heo /* prereset() might have cleared ATA_EH_RESET. If so, 2576d6515e6fSTejun Heo * bang classes, thaw and return. 2577932648b0STejun Heo */ 2578932648b0STejun Heo if (reset && !(ehc->i.action & ATA_EH_RESET)) { 25791eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 2580f58229f8STejun Heo classes[dev->devno] = ATA_DEV_NONE; 2581d6515e6fSTejun Heo if ((ap->pflags & ATA_PFLAG_FROZEN) && 2582d6515e6fSTejun Heo ata_is_host_link(link)) 2583d6515e6fSTejun Heo ata_eh_thaw_port(ap); 2584fccb6ea5STejun Heo rc = 0; 2585fccb6ea5STejun Heo goto out; 2586c6fd2807SJeff Garzik } 2587932648b0STejun Heo } 2588c6fd2807SJeff Garzik 2589c6fd2807SJeff Garzik retry: 2590932648b0STejun Heo /* 2591932648b0STejun Heo * Perform reset 2592932648b0STejun Heo */ 2593dc98c32cSTejun Heo if (ata_is_host_link(link)) 2594dc98c32cSTejun Heo ata_eh_freeze_port(ap); 2595dc98c32cSTejun Heo 2596341c2c95STejun Heo deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]); 259731daabdaSTejun Heo 2598932648b0STejun Heo if (reset) { 2599c6fd2807SJeff Garzik if (verbose) 2600cc0680a5STejun Heo ata_link_printk(link, KERN_INFO, "%s resetting link\n", 2601c6fd2807SJeff Garzik reset == softreset ? "soft" : "hard"); 2602c6fd2807SJeff Garzik 2603c6fd2807SJeff Garzik /* mark that this EH session started with reset */ 260419b72321STejun Heo ehc->last_reset = jiffies; 26050d64a233STejun Heo if (reset == hardreset) 26060d64a233STejun Heo ehc->i.flags |= ATA_EHI_DID_HARDRESET; 26070d64a233STejun Heo else 26080d64a233STejun Heo ehc->i.flags |= ATA_EHI_DID_SOFTRESET; 2609c6fd2807SJeff Garzik 2610b1c72916STejun Heo rc = ata_do_reset(link, reset, classes, deadline, true); 2611b1c72916STejun Heo if (rc && rc != -EAGAIN) { 2612b1c72916STejun Heo failed_link = link; 26135dbfc9cbSTejun Heo goto fail; 2614b1c72916STejun Heo } 2615c6fd2807SJeff Garzik 2616b1c72916STejun Heo /* hardreset slave link if existent */ 2617b1c72916STejun Heo if (slave && reset == hardreset) { 2618b1c72916STejun Heo int tmp; 2619b1c72916STejun Heo 2620b1c72916STejun Heo if (verbose) 2621b1c72916STejun Heo ata_link_printk(slave, KERN_INFO, 2622b1c72916STejun Heo "hard resetting link\n"); 2623b1c72916STejun Heo 2624b1c72916STejun Heo ata_eh_about_to_do(slave, NULL, ATA_EH_RESET); 2625b1c72916STejun Heo tmp = ata_do_reset(slave, reset, classes, deadline, 2626b1c72916STejun Heo false); 2627b1c72916STejun Heo switch (tmp) { 2628b1c72916STejun Heo case -EAGAIN: 2629b1c72916STejun Heo rc = -EAGAIN; 2630b1c72916STejun Heo case 0: 2631b1c72916STejun Heo break; 2632b1c72916STejun Heo default: 2633b1c72916STejun Heo failed_link = slave; 2634b1c72916STejun Heo rc = tmp; 2635b1c72916STejun Heo goto fail; 2636b1c72916STejun Heo } 2637b1c72916STejun Heo } 2638b1c72916STejun Heo 2639b1c72916STejun Heo /* perform follow-up SRST if necessary */ 2640c6fd2807SJeff Garzik if (reset == hardreset && 26415dbfc9cbSTejun Heo ata_eh_followup_srst_needed(link, rc, classes)) { 2642c6fd2807SJeff Garzik reset = softreset; 2643c6fd2807SJeff Garzik 2644c6fd2807SJeff Garzik if (!reset) { 2645cc0680a5STejun Heo ata_link_printk(link, KERN_ERR, 2646c6fd2807SJeff Garzik "follow-up softreset required " 2647c6fd2807SJeff Garzik "but no softreset avaliable\n"); 2648b1c72916STejun Heo failed_link = link; 2649fccb6ea5STejun Heo rc = -EINVAL; 265008cf69d0STejun Heo goto fail; 2651c6fd2807SJeff Garzik } 2652c6fd2807SJeff Garzik 2653cf480626STejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2654b1c72916STejun Heo rc = ata_do_reset(link, reset, classes, deadline, true); 2655fe2c4d01STejun Heo if (rc) { 2656fe2c4d01STejun Heo failed_link = link; 2657fe2c4d01STejun Heo goto fail; 2658fe2c4d01STejun Heo } 2659c6fd2807SJeff Garzik } 2660932648b0STejun Heo } else { 2661932648b0STejun Heo if (verbose) 2662932648b0STejun Heo ata_link_printk(link, KERN_INFO, "no reset method " 2663932648b0STejun Heo "available, skipping reset\n"); 2664932648b0STejun Heo if (!(lflags & ATA_LFLAG_ASSUME_CLASS)) 2665932648b0STejun Heo lflags |= ATA_LFLAG_ASSUME_ATA; 2666932648b0STejun Heo } 2667008a7896STejun Heo 2668932648b0STejun Heo /* 2669932648b0STejun Heo * Post-reset processing 2670932648b0STejun Heo */ 26711eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 2672416dc9edSTejun Heo /* After the reset, the device state is PIO 0 and the 2673416dc9edSTejun Heo * controller state is undefined. Reset also wakes up 2674416dc9edSTejun Heo * drives from sleeping mode. 2675c6fd2807SJeff Garzik */ 2676f58229f8STejun Heo dev->pio_mode = XFER_PIO_0; 2677054a5fbaSTejun Heo dev->flags &= ~ATA_DFLAG_SLEEPING; 2678c6fd2807SJeff Garzik 26793b761d3dSTejun Heo if (ata_phys_link_offline(ata_dev_phys_link(dev))) 26803b761d3dSTejun Heo continue; 26813b761d3dSTejun Heo 26824ccd3329STejun Heo /* apply class override */ 2683416dc9edSTejun Heo if (lflags & ATA_LFLAG_ASSUME_ATA) 2684ae791c05STejun Heo classes[dev->devno] = ATA_DEV_ATA; 2685416dc9edSTejun Heo else if (lflags & ATA_LFLAG_ASSUME_SEMB) 2686816ab897STejun Heo classes[dev->devno] = ATA_DEV_SEMB_UNSUP; 2687ae791c05STejun Heo } 2688ae791c05STejun Heo 2689008a7896STejun Heo /* record current link speed */ 2690936fd732STejun Heo if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0) 2691936fd732STejun Heo link->sata_spd = (sstatus >> 4) & 0xf; 2692b1c72916STejun Heo if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0) 2693b1c72916STejun Heo slave->sata_spd = (sstatus >> 4) & 0xf; 2694008a7896STejun Heo 2695dc98c32cSTejun Heo /* thaw the port */ 2696dc98c32cSTejun Heo if (ata_is_host_link(link)) 2697dc98c32cSTejun Heo ata_eh_thaw_port(ap); 2698dc98c32cSTejun Heo 2699f046519fSTejun Heo /* postreset() should clear hardware SError. Although SError 2700f046519fSTejun Heo * is cleared during link resume, clearing SError here is 2701f046519fSTejun Heo * necessary as some PHYs raise hotplug events after SRST. 2702f046519fSTejun Heo * This introduces race condition where hotplug occurs between 2703f046519fSTejun Heo * reset and here. This race is mediated by cross checking 2704f046519fSTejun Heo * link onlineness and classification result later. 2705f046519fSTejun Heo */ 2706b1c72916STejun Heo if (postreset) { 2707cc0680a5STejun Heo postreset(link, classes); 2708b1c72916STejun Heo if (slave) 2709b1c72916STejun Heo postreset(slave, classes); 2710b1c72916STejun Heo } 2711c6fd2807SJeff Garzik 27121e641060STejun Heo /* 27131e641060STejun Heo * Some controllers can't be frozen very well and may set 27141e641060STejun Heo * spuruious error conditions during reset. Clear accumulated 27151e641060STejun Heo * error information. As reset is the final recovery action, 27161e641060STejun Heo * nothing is lost by doing this. 27171e641060STejun Heo */ 2718f046519fSTejun Heo spin_lock_irqsave(link->ap->lock, flags); 27191e641060STejun Heo memset(&link->eh_info, 0, sizeof(link->eh_info)); 2720b1c72916STejun Heo if (slave) 27211e641060STejun Heo memset(&slave->eh_info, 0, sizeof(link->eh_info)); 27221e641060STejun Heo ap->pflags &= ~ATA_PFLAG_EH_PENDING; 2723f046519fSTejun Heo spin_unlock_irqrestore(link->ap->lock, flags); 2724f046519fSTejun Heo 27253b761d3dSTejun Heo /* 27263b761d3dSTejun Heo * Make sure onlineness and classification result correspond. 2727f046519fSTejun Heo * Hotplug could have happened during reset and some 2728f046519fSTejun Heo * controllers fail to wait while a drive is spinning up after 2729f046519fSTejun Heo * being hotplugged causing misdetection. By cross checking 27303b761d3dSTejun Heo * link on/offlineness and classification result, those 27313b761d3dSTejun Heo * conditions can be reliably detected and retried. 2732f046519fSTejun Heo */ 2733b1c72916STejun Heo nr_unknown = 0; 27341eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 27353b761d3dSTejun Heo if (ata_phys_link_online(ata_dev_phys_link(dev))) { 2736b1c72916STejun Heo if (classes[dev->devno] == ATA_DEV_UNKNOWN) { 27373b761d3dSTejun Heo ata_dev_printk(dev, KERN_DEBUG, "link online " 27383b761d3dSTejun Heo "but device misclassifed\n"); 2739f046519fSTejun Heo classes[dev->devno] = ATA_DEV_NONE; 2740b1c72916STejun Heo nr_unknown++; 2741b1c72916STejun Heo } 27423b761d3dSTejun Heo } else if (ata_phys_link_offline(ata_dev_phys_link(dev))) { 27433b761d3dSTejun Heo if (ata_class_enabled(classes[dev->devno])) 27443b761d3dSTejun Heo ata_dev_printk(dev, KERN_DEBUG, "link offline, " 27453b761d3dSTejun Heo "clearing class %d to NONE\n", 27463b761d3dSTejun Heo classes[dev->devno]); 27473b761d3dSTejun Heo classes[dev->devno] = ATA_DEV_NONE; 27483b761d3dSTejun Heo } else if (classes[dev->devno] == ATA_DEV_UNKNOWN) { 27493b761d3dSTejun Heo ata_dev_printk(dev, KERN_DEBUG, "link status unknown, " 27503b761d3dSTejun Heo "clearing UNKNOWN to NONE\n"); 27513b761d3dSTejun Heo classes[dev->devno] = ATA_DEV_NONE; 27523b761d3dSTejun Heo } 2753f046519fSTejun Heo } 2754f046519fSTejun Heo 2755b1c72916STejun Heo if (classify && nr_unknown) { 2756f046519fSTejun Heo if (try < max_tries) { 2757f046519fSTejun Heo ata_link_printk(link, KERN_WARNING, "link online but " 27583b761d3dSTejun Heo "%d devices misclassified, retrying\n", 27593b761d3dSTejun Heo nr_unknown); 2760b1c72916STejun Heo failed_link = link; 2761f046519fSTejun Heo rc = -EAGAIN; 2762f046519fSTejun Heo goto fail; 2763f046519fSTejun Heo } 2764f046519fSTejun Heo ata_link_printk(link, KERN_WARNING, 27653b761d3dSTejun Heo "link online but %d devices misclassified, " 27663b761d3dSTejun Heo "device detection might fail\n", nr_unknown); 2767f046519fSTejun Heo } 2768f046519fSTejun Heo 2769c6fd2807SJeff Garzik /* reset successful, schedule revalidation */ 2770cf480626STejun Heo ata_eh_done(link, NULL, ATA_EH_RESET); 2771b1c72916STejun Heo if (slave) 2772b1c72916STejun Heo ata_eh_done(slave, NULL, ATA_EH_RESET); 277319b72321STejun Heo ehc->last_reset = jiffies; /* update to completion time */ 2774c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_REVALIDATE; 2775416dc9edSTejun Heo 2776416dc9edSTejun Heo rc = 0; 2777fccb6ea5STejun Heo out: 2778fccb6ea5STejun Heo /* clear hotplug flag */ 2779fccb6ea5STejun Heo ehc->i.flags &= ~ATA_EHI_HOTPLUGGED; 2780b1c72916STejun Heo if (slave) 2781b1c72916STejun Heo sehc->i.flags &= ~ATA_EHI_HOTPLUGGED; 2782afaa5c37STejun Heo 2783afaa5c37STejun Heo spin_lock_irqsave(ap->lock, flags); 2784afaa5c37STejun Heo ap->pflags &= ~ATA_PFLAG_RESETTING; 2785afaa5c37STejun Heo spin_unlock_irqrestore(ap->lock, flags); 2786afaa5c37STejun Heo 2787c6fd2807SJeff Garzik return rc; 2788416dc9edSTejun Heo 2789416dc9edSTejun Heo fail: 27905958e302STejun Heo /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */ 27915958e302STejun Heo if (!ata_is_host_link(link) && 27925958e302STejun Heo sata_scr_read(link, SCR_STATUS, &sstatus)) 27935958e302STejun Heo rc = -ERESTART; 27945958e302STejun Heo 2795416dc9edSTejun Heo if (rc == -ERESTART || try >= max_tries) 2796416dc9edSTejun Heo goto out; 2797416dc9edSTejun Heo 2798416dc9edSTejun Heo now = jiffies; 2799416dc9edSTejun Heo if (time_before(now, deadline)) { 2800416dc9edSTejun Heo unsigned long delta = deadline - now; 2801416dc9edSTejun Heo 2802b1c72916STejun Heo ata_link_printk(failed_link, KERN_WARNING, 28030a2c0f56STejun Heo "reset failed (errno=%d), retrying in %u secs\n", 28040a2c0f56STejun Heo rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000)); 2805416dc9edSTejun Heo 2806416dc9edSTejun Heo while (delta) 2807416dc9edSTejun Heo delta = schedule_timeout_uninterruptible(delta); 2808416dc9edSTejun Heo } 2809416dc9edSTejun Heo 2810b1c72916STejun Heo if (try == max_tries - 1) { 2811a07d499bSTejun Heo sata_down_spd_limit(link, 0); 2812b1c72916STejun Heo if (slave) 2813a07d499bSTejun Heo sata_down_spd_limit(slave, 0); 2814b1c72916STejun Heo } else if (rc == -EPIPE) 2815a07d499bSTejun Heo sata_down_spd_limit(failed_link, 0); 2816b1c72916STejun Heo 2817416dc9edSTejun Heo if (hardreset) 2818416dc9edSTejun Heo reset = hardreset; 2819416dc9edSTejun Heo goto retry; 2820c6fd2807SJeff Garzik } 2821c6fd2807SJeff Garzik 282245fabbb7SElias Oltmanns static inline void ata_eh_pull_park_action(struct ata_port *ap) 282345fabbb7SElias Oltmanns { 282445fabbb7SElias Oltmanns struct ata_link *link; 282545fabbb7SElias Oltmanns struct ata_device *dev; 282645fabbb7SElias Oltmanns unsigned long flags; 282745fabbb7SElias Oltmanns 282845fabbb7SElias Oltmanns /* 282945fabbb7SElias Oltmanns * This function can be thought of as an extended version of 283045fabbb7SElias Oltmanns * ata_eh_about_to_do() specially crafted to accommodate the 283145fabbb7SElias Oltmanns * requirements of ATA_EH_PARK handling. Since the EH thread 283245fabbb7SElias Oltmanns * does not leave the do {} while () loop in ata_eh_recover as 283345fabbb7SElias Oltmanns * long as the timeout for a park request to *one* device on 283445fabbb7SElias Oltmanns * the port has not expired, and since we still want to pick 283545fabbb7SElias Oltmanns * up park requests to other devices on the same port or 283645fabbb7SElias Oltmanns * timeout updates for the same device, we have to pull 283745fabbb7SElias Oltmanns * ATA_EH_PARK actions from eh_info into eh_context.i 283845fabbb7SElias Oltmanns * ourselves at the beginning of each pass over the loop. 283945fabbb7SElias Oltmanns * 284045fabbb7SElias Oltmanns * Additionally, all write accesses to &ap->park_req_pending 284145fabbb7SElias Oltmanns * through INIT_COMPLETION() (see below) or complete_all() 284245fabbb7SElias Oltmanns * (see ata_scsi_park_store()) are protected by the host lock. 284345fabbb7SElias Oltmanns * As a result we have that park_req_pending.done is zero on 284445fabbb7SElias Oltmanns * exit from this function, i.e. when ATA_EH_PARK actions for 284545fabbb7SElias Oltmanns * *all* devices on port ap have been pulled into the 284645fabbb7SElias Oltmanns * respective eh_context structs. If, and only if, 284745fabbb7SElias Oltmanns * park_req_pending.done is non-zero by the time we reach 284845fabbb7SElias Oltmanns * wait_for_completion_timeout(), another ATA_EH_PARK action 284945fabbb7SElias Oltmanns * has been scheduled for at least one of the devices on port 285045fabbb7SElias Oltmanns * ap and we have to cycle over the do {} while () loop in 285145fabbb7SElias Oltmanns * ata_eh_recover() again. 285245fabbb7SElias Oltmanns */ 285345fabbb7SElias Oltmanns 285445fabbb7SElias Oltmanns spin_lock_irqsave(ap->lock, flags); 285545fabbb7SElias Oltmanns INIT_COMPLETION(ap->park_req_pending); 28561eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 28571eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 285845fabbb7SElias Oltmanns struct ata_eh_info *ehi = &link->eh_info; 285945fabbb7SElias Oltmanns 286045fabbb7SElias Oltmanns link->eh_context.i.dev_action[dev->devno] |= 286145fabbb7SElias Oltmanns ehi->dev_action[dev->devno] & ATA_EH_PARK; 286245fabbb7SElias Oltmanns ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK); 286345fabbb7SElias Oltmanns } 286445fabbb7SElias Oltmanns } 286545fabbb7SElias Oltmanns spin_unlock_irqrestore(ap->lock, flags); 286645fabbb7SElias Oltmanns } 286745fabbb7SElias Oltmanns 286845fabbb7SElias Oltmanns static void ata_eh_park_issue_cmd(struct ata_device *dev, int park) 286945fabbb7SElias Oltmanns { 287045fabbb7SElias Oltmanns struct ata_eh_context *ehc = &dev->link->eh_context; 287145fabbb7SElias Oltmanns struct ata_taskfile tf; 287245fabbb7SElias Oltmanns unsigned int err_mask; 287345fabbb7SElias Oltmanns 287445fabbb7SElias Oltmanns ata_tf_init(dev, &tf); 287545fabbb7SElias Oltmanns if (park) { 287645fabbb7SElias Oltmanns ehc->unloaded_mask |= 1 << dev->devno; 287745fabbb7SElias Oltmanns tf.command = ATA_CMD_IDLEIMMEDIATE; 287845fabbb7SElias Oltmanns tf.feature = 0x44; 287945fabbb7SElias Oltmanns tf.lbal = 0x4c; 288045fabbb7SElias Oltmanns tf.lbam = 0x4e; 288145fabbb7SElias Oltmanns tf.lbah = 0x55; 288245fabbb7SElias Oltmanns } else { 288345fabbb7SElias Oltmanns ehc->unloaded_mask &= ~(1 << dev->devno); 288445fabbb7SElias Oltmanns tf.command = ATA_CMD_CHK_POWER; 288545fabbb7SElias Oltmanns } 288645fabbb7SElias Oltmanns 288745fabbb7SElias Oltmanns tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 288845fabbb7SElias Oltmanns tf.protocol |= ATA_PROT_NODATA; 288945fabbb7SElias Oltmanns err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 289045fabbb7SElias Oltmanns if (park && (err_mask || tf.lbal != 0xc4)) { 289145fabbb7SElias Oltmanns ata_dev_printk(dev, KERN_ERR, "head unload failed!\n"); 289245fabbb7SElias Oltmanns ehc->unloaded_mask &= ~(1 << dev->devno); 289345fabbb7SElias Oltmanns } 289445fabbb7SElias Oltmanns } 289545fabbb7SElias Oltmanns 28960260731fSTejun Heo static int ata_eh_revalidate_and_attach(struct ata_link *link, 2897c6fd2807SJeff Garzik struct ata_device **r_failed_dev) 2898c6fd2807SJeff Garzik { 28990260731fSTejun Heo struct ata_port *ap = link->ap; 29000260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 2901c6fd2807SJeff Garzik struct ata_device *dev; 29028c3c52a8STejun Heo unsigned int new_mask = 0; 2903c6fd2807SJeff Garzik unsigned long flags; 2904f58229f8STejun Heo int rc = 0; 2905c6fd2807SJeff Garzik 2906c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 2907c6fd2807SJeff Garzik 29088c3c52a8STejun Heo /* For PATA drive side cable detection to work, IDENTIFY must 29098c3c52a8STejun Heo * be done backwards such that PDIAG- is released by the slave 29108c3c52a8STejun Heo * device before the master device is identified. 29118c3c52a8STejun Heo */ 29121eca4365STejun Heo ata_for_each_dev(dev, link, ALL_REVERSE) { 2913f58229f8STejun Heo unsigned int action = ata_eh_dev_action(dev); 2914f58229f8STejun Heo unsigned int readid_flags = 0; 2915c6fd2807SJeff Garzik 2916bff04647STejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) 2917bff04647STejun Heo readid_flags |= ATA_READID_POSTRESET; 2918bff04647STejun Heo 29199666f400STejun Heo if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) { 2920633273a3STejun Heo WARN_ON(dev->class == ATA_DEV_PMP); 2921633273a3STejun Heo 2922b1c72916STejun Heo if (ata_phys_link_offline(ata_dev_phys_link(dev))) { 2923c6fd2807SJeff Garzik rc = -EIO; 29248c3c52a8STejun Heo goto err; 2925c6fd2807SJeff Garzik } 2926c6fd2807SJeff Garzik 29270260731fSTejun Heo ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE); 2928422c9daaSTejun Heo rc = ata_dev_revalidate(dev, ehc->classes[dev->devno], 2929422c9daaSTejun Heo readid_flags); 2930c6fd2807SJeff Garzik if (rc) 29318c3c52a8STejun Heo goto err; 2932c6fd2807SJeff Garzik 29330260731fSTejun Heo ata_eh_done(link, dev, ATA_EH_REVALIDATE); 2934c6fd2807SJeff Garzik 2935baa1e78aSTejun Heo /* Configuration may have changed, reconfigure 2936baa1e78aSTejun Heo * transfer mode. 2937baa1e78aSTejun Heo */ 2938baa1e78aSTejun Heo ehc->i.flags |= ATA_EHI_SETMODE; 2939baa1e78aSTejun Heo 2940c6fd2807SJeff Garzik /* schedule the scsi_rescan_device() here */ 2941c6fd2807SJeff Garzik queue_work(ata_aux_wq, &(ap->scsi_rescan_task)); 2942c6fd2807SJeff Garzik } else if (dev->class == ATA_DEV_UNKNOWN && 2943c6fd2807SJeff Garzik ehc->tries[dev->devno] && 2944c6fd2807SJeff Garzik ata_class_enabled(ehc->classes[dev->devno])) { 2945842faa6cSTejun Heo /* Temporarily set dev->class, it will be 2946842faa6cSTejun Heo * permanently set once all configurations are 2947842faa6cSTejun Heo * complete. This is necessary because new 2948842faa6cSTejun Heo * device configuration is done in two 2949842faa6cSTejun Heo * separate loops. 2950842faa6cSTejun Heo */ 2951c6fd2807SJeff Garzik dev->class = ehc->classes[dev->devno]; 2952c6fd2807SJeff Garzik 2953633273a3STejun Heo if (dev->class == ATA_DEV_PMP) 2954633273a3STejun Heo rc = sata_pmp_attach(dev); 2955633273a3STejun Heo else 2956633273a3STejun Heo rc = ata_dev_read_id(dev, &dev->class, 2957633273a3STejun Heo readid_flags, dev->id); 2958842faa6cSTejun Heo 2959842faa6cSTejun Heo /* read_id might have changed class, store and reset */ 2960842faa6cSTejun Heo ehc->classes[dev->devno] = dev->class; 2961842faa6cSTejun Heo dev->class = ATA_DEV_UNKNOWN; 2962842faa6cSTejun Heo 29638c3c52a8STejun Heo switch (rc) { 29648c3c52a8STejun Heo case 0: 296599cf610aSTejun Heo /* clear error info accumulated during probe */ 296699cf610aSTejun Heo ata_ering_clear(&dev->ering); 2967f58229f8STejun Heo new_mask |= 1 << dev->devno; 29688c3c52a8STejun Heo break; 29698c3c52a8STejun Heo case -ENOENT: 297055a8e2c8STejun Heo /* IDENTIFY was issued to non-existent 297155a8e2c8STejun Heo * device. No need to reset. Just 2972842faa6cSTejun Heo * thaw and ignore the device. 297355a8e2c8STejun Heo */ 297455a8e2c8STejun Heo ata_eh_thaw_port(ap); 2975c6fd2807SJeff Garzik break; 29768c3c52a8STejun Heo default: 29778c3c52a8STejun Heo goto err; 29788c3c52a8STejun Heo } 29798c3c52a8STejun Heo } 2980c6fd2807SJeff Garzik } 2981c6fd2807SJeff Garzik 2982c1c4e8d5STejun Heo /* PDIAG- should have been released, ask cable type if post-reset */ 298333267325STejun Heo if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) { 298433267325STejun Heo if (ap->ops->cable_detect) 2985c1c4e8d5STejun Heo ap->cbl = ap->ops->cable_detect(ap); 298633267325STejun Heo ata_force_cbl(ap); 298733267325STejun Heo } 2988c1c4e8d5STejun Heo 29898c3c52a8STejun Heo /* Configure new devices forward such that user doesn't see 29908c3c52a8STejun Heo * device detection messages backwards. 29918c3c52a8STejun Heo */ 29921eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 29934f7c2874STejun Heo if (!(new_mask & (1 << dev->devno))) 29948c3c52a8STejun Heo continue; 29958c3c52a8STejun Heo 2996842faa6cSTejun Heo dev->class = ehc->classes[dev->devno]; 2997842faa6cSTejun Heo 29984f7c2874STejun Heo if (dev->class == ATA_DEV_PMP) 29994f7c2874STejun Heo continue; 30004f7c2874STejun Heo 30018c3c52a8STejun Heo ehc->i.flags |= ATA_EHI_PRINTINFO; 30028c3c52a8STejun Heo rc = ata_dev_configure(dev); 30038c3c52a8STejun Heo ehc->i.flags &= ~ATA_EHI_PRINTINFO; 3004842faa6cSTejun Heo if (rc) { 3005842faa6cSTejun Heo dev->class = ATA_DEV_UNKNOWN; 30068c3c52a8STejun Heo goto err; 3007842faa6cSTejun Heo } 30088c3c52a8STejun Heo 3009c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 3010c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 3011c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3012baa1e78aSTejun Heo 301355a8e2c8STejun Heo /* new device discovered, configure xfermode */ 3014baa1e78aSTejun Heo ehc->i.flags |= ATA_EHI_SETMODE; 3015c6fd2807SJeff Garzik } 3016c6fd2807SJeff Garzik 30178c3c52a8STejun Heo return 0; 30188c3c52a8STejun Heo 30198c3c52a8STejun Heo err: 3020c6fd2807SJeff Garzik *r_failed_dev = dev; 30218c3c52a8STejun Heo DPRINTK("EXIT rc=%d\n", rc); 3022c6fd2807SJeff Garzik return rc; 3023c6fd2807SJeff Garzik } 3024c6fd2807SJeff Garzik 30256f1d1e3aSTejun Heo /** 30266f1d1e3aSTejun Heo * ata_set_mode - Program timings and issue SET FEATURES - XFER 30276f1d1e3aSTejun Heo * @link: link on which timings will be programmed 302898a1708dSMartin Olsson * @r_failed_dev: out parameter for failed device 30296f1d1e3aSTejun Heo * 30306f1d1e3aSTejun Heo * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If 30316f1d1e3aSTejun Heo * ata_set_mode() fails, pointer to the failing device is 30326f1d1e3aSTejun Heo * returned in @r_failed_dev. 30336f1d1e3aSTejun Heo * 30346f1d1e3aSTejun Heo * LOCKING: 30356f1d1e3aSTejun Heo * PCI/etc. bus probe sem. 30366f1d1e3aSTejun Heo * 30376f1d1e3aSTejun Heo * RETURNS: 30386f1d1e3aSTejun Heo * 0 on success, negative errno otherwise 30396f1d1e3aSTejun Heo */ 30406f1d1e3aSTejun Heo int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) 30416f1d1e3aSTejun Heo { 30426f1d1e3aSTejun Heo struct ata_port *ap = link->ap; 304300115e0fSTejun Heo struct ata_device *dev; 304400115e0fSTejun Heo int rc; 30456f1d1e3aSTejun Heo 304676326ac1STejun Heo /* if data transfer is verified, clear DUBIOUS_XFER on ering top */ 30471eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) { 304876326ac1STejun Heo if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) { 304976326ac1STejun Heo struct ata_ering_entry *ent; 305076326ac1STejun Heo 305176326ac1STejun Heo ent = ata_ering_top(&dev->ering); 305276326ac1STejun Heo if (ent) 305376326ac1STejun Heo ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER; 305476326ac1STejun Heo } 305576326ac1STejun Heo } 305676326ac1STejun Heo 30576f1d1e3aSTejun Heo /* has private set_mode? */ 30586f1d1e3aSTejun Heo if (ap->ops->set_mode) 305900115e0fSTejun Heo rc = ap->ops->set_mode(link, r_failed_dev); 306000115e0fSTejun Heo else 306100115e0fSTejun Heo rc = ata_do_set_mode(link, r_failed_dev); 306200115e0fSTejun Heo 306300115e0fSTejun Heo /* if transfer mode has changed, set DUBIOUS_XFER on device */ 30641eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) { 306500115e0fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 306600115e0fSTejun Heo u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno]; 306700115e0fSTejun Heo u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno)); 306800115e0fSTejun Heo 306900115e0fSTejun Heo if (dev->xfer_mode != saved_xfer_mode || 307000115e0fSTejun Heo ata_ncq_enabled(dev) != saved_ncq) 307100115e0fSTejun Heo dev->flags |= ATA_DFLAG_DUBIOUS_XFER; 307200115e0fSTejun Heo } 307300115e0fSTejun Heo 307400115e0fSTejun Heo return rc; 30756f1d1e3aSTejun Heo } 30766f1d1e3aSTejun Heo 307711fc33daSTejun Heo /** 307811fc33daSTejun Heo * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset 307911fc33daSTejun Heo * @dev: ATAPI device to clear UA for 308011fc33daSTejun Heo * 308111fc33daSTejun Heo * Resets and other operations can make an ATAPI device raise 308211fc33daSTejun Heo * UNIT ATTENTION which causes the next operation to fail. This 308311fc33daSTejun Heo * function clears UA. 308411fc33daSTejun Heo * 308511fc33daSTejun Heo * LOCKING: 308611fc33daSTejun Heo * EH context (may sleep). 308711fc33daSTejun Heo * 308811fc33daSTejun Heo * RETURNS: 308911fc33daSTejun Heo * 0 on success, -errno on failure. 309011fc33daSTejun Heo */ 309111fc33daSTejun Heo static int atapi_eh_clear_ua(struct ata_device *dev) 309211fc33daSTejun Heo { 309311fc33daSTejun Heo int i; 309411fc33daSTejun Heo 309511fc33daSTejun Heo for (i = 0; i < ATA_EH_UA_TRIES; i++) { 3096b5357081STejun Heo u8 *sense_buffer = dev->link->ap->sector_buf; 309711fc33daSTejun Heo u8 sense_key = 0; 309811fc33daSTejun Heo unsigned int err_mask; 309911fc33daSTejun Heo 310011fc33daSTejun Heo err_mask = atapi_eh_tur(dev, &sense_key); 310111fc33daSTejun Heo if (err_mask != 0 && err_mask != AC_ERR_DEV) { 310211fc33daSTejun Heo ata_dev_printk(dev, KERN_WARNING, "TEST_UNIT_READY " 310311fc33daSTejun Heo "failed (err_mask=0x%x)\n", err_mask); 310411fc33daSTejun Heo return -EIO; 310511fc33daSTejun Heo } 310611fc33daSTejun Heo 310711fc33daSTejun Heo if (!err_mask || sense_key != UNIT_ATTENTION) 310811fc33daSTejun Heo return 0; 310911fc33daSTejun Heo 311011fc33daSTejun Heo err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key); 311111fc33daSTejun Heo if (err_mask) { 311211fc33daSTejun Heo ata_dev_printk(dev, KERN_WARNING, "failed to clear " 311311fc33daSTejun Heo "UNIT ATTENTION (err_mask=0x%x)\n", err_mask); 311411fc33daSTejun Heo return -EIO; 311511fc33daSTejun Heo } 311611fc33daSTejun Heo } 311711fc33daSTejun Heo 311811fc33daSTejun Heo ata_dev_printk(dev, KERN_WARNING, 311911fc33daSTejun Heo "UNIT ATTENTION persists after %d tries\n", ATA_EH_UA_TRIES); 312011fc33daSTejun Heo 312111fc33daSTejun Heo return 0; 312211fc33daSTejun Heo } 312311fc33daSTejun Heo 3124*6013efd8STejun Heo /** 3125*6013efd8STejun Heo * ata_eh_maybe_retry_flush - Retry FLUSH if necessary 3126*6013efd8STejun Heo * @dev: ATA device which may need FLUSH retry 3127*6013efd8STejun Heo * 3128*6013efd8STejun Heo * If @dev failed FLUSH, it needs to be reported upper layer 3129*6013efd8STejun Heo * immediately as it means that @dev failed to remap and already 3130*6013efd8STejun Heo * lost at least a sector and further FLUSH retrials won't make 3131*6013efd8STejun Heo * any difference to the lost sector. However, if FLUSH failed 3132*6013efd8STejun Heo * for other reasons, for example transmission error, FLUSH needs 3133*6013efd8STejun Heo * to be retried. 3134*6013efd8STejun Heo * 3135*6013efd8STejun Heo * This function determines whether FLUSH failure retry is 3136*6013efd8STejun Heo * necessary and performs it if so. 3137*6013efd8STejun Heo * 3138*6013efd8STejun Heo * RETURNS: 3139*6013efd8STejun Heo * 0 if EH can continue, -errno if EH needs to be repeated. 3140*6013efd8STejun Heo */ 3141*6013efd8STejun Heo static int ata_eh_maybe_retry_flush(struct ata_device *dev) 3142*6013efd8STejun Heo { 3143*6013efd8STejun Heo struct ata_link *link = dev->link; 3144*6013efd8STejun Heo struct ata_port *ap = link->ap; 3145*6013efd8STejun Heo struct ata_queued_cmd *qc; 3146*6013efd8STejun Heo struct ata_taskfile tf; 3147*6013efd8STejun Heo unsigned int err_mask; 3148*6013efd8STejun Heo int rc = 0; 3149*6013efd8STejun Heo 3150*6013efd8STejun Heo /* did flush fail for this device? */ 3151*6013efd8STejun Heo if (!ata_tag_valid(link->active_tag)) 3152*6013efd8STejun Heo return 0; 3153*6013efd8STejun Heo 3154*6013efd8STejun Heo qc = __ata_qc_from_tag(ap, link->active_tag); 3155*6013efd8STejun Heo if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT && 3156*6013efd8STejun Heo qc->tf.command != ATA_CMD_FLUSH)) 3157*6013efd8STejun Heo return 0; 3158*6013efd8STejun Heo 3159*6013efd8STejun Heo /* if the device failed it, it should be reported to upper layers */ 3160*6013efd8STejun Heo if (qc->err_mask & AC_ERR_DEV) 3161*6013efd8STejun Heo return 0; 3162*6013efd8STejun Heo 3163*6013efd8STejun Heo /* flush failed for some other reason, give it another shot */ 3164*6013efd8STejun Heo ata_tf_init(dev, &tf); 3165*6013efd8STejun Heo 3166*6013efd8STejun Heo tf.command = qc->tf.command; 3167*6013efd8STejun Heo tf.flags |= ATA_TFLAG_DEVICE; 3168*6013efd8STejun Heo tf.protocol = ATA_PROT_NODATA; 3169*6013efd8STejun Heo 3170*6013efd8STejun Heo ata_dev_printk(dev, KERN_WARNING, "retrying FLUSH 0x%x Emask 0x%x\n", 3171*6013efd8STejun Heo tf.command, qc->err_mask); 3172*6013efd8STejun Heo 3173*6013efd8STejun Heo err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 3174*6013efd8STejun Heo if (!err_mask) { 3175*6013efd8STejun Heo /* 3176*6013efd8STejun Heo * FLUSH is complete but there's no way to 3177*6013efd8STejun Heo * successfully complete a failed command from EH. 3178*6013efd8STejun Heo * Making sure retry is allowed at least once and 3179*6013efd8STejun Heo * retrying it should do the trick - whatever was in 3180*6013efd8STejun Heo * the cache is already on the platter and this won't 3181*6013efd8STejun Heo * cause infinite loop. 3182*6013efd8STejun Heo */ 3183*6013efd8STejun Heo qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1); 3184*6013efd8STejun Heo } else { 3185*6013efd8STejun Heo ata_dev_printk(dev, KERN_WARNING, "FLUSH failed Emask 0x%x\n", 3186*6013efd8STejun Heo err_mask); 3187*6013efd8STejun Heo rc = -EIO; 3188*6013efd8STejun Heo 3189*6013efd8STejun Heo /* if device failed it, report it to upper layers */ 3190*6013efd8STejun Heo if (err_mask & AC_ERR_DEV) { 3191*6013efd8STejun Heo qc->err_mask |= AC_ERR_DEV; 3192*6013efd8STejun Heo qc->result_tf = tf; 3193*6013efd8STejun Heo if (!(ap->pflags & ATA_PFLAG_FROZEN)) 3194*6013efd8STejun Heo rc = 0; 3195*6013efd8STejun Heo } 3196*6013efd8STejun Heo } 3197*6013efd8STejun Heo return rc; 3198*6013efd8STejun Heo } 3199*6013efd8STejun Heo 32000260731fSTejun Heo static int ata_link_nr_enabled(struct ata_link *link) 3201c6fd2807SJeff Garzik { 3202f58229f8STejun Heo struct ata_device *dev; 3203f58229f8STejun Heo int cnt = 0; 3204c6fd2807SJeff Garzik 32051eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) 3206c6fd2807SJeff Garzik cnt++; 3207c6fd2807SJeff Garzik return cnt; 3208c6fd2807SJeff Garzik } 3209c6fd2807SJeff Garzik 32100260731fSTejun Heo static int ata_link_nr_vacant(struct ata_link *link) 3211c6fd2807SJeff Garzik { 3212f58229f8STejun Heo struct ata_device *dev; 3213f58229f8STejun Heo int cnt = 0; 3214c6fd2807SJeff Garzik 32151eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 3216f58229f8STejun Heo if (dev->class == ATA_DEV_UNKNOWN) 3217c6fd2807SJeff Garzik cnt++; 3218c6fd2807SJeff Garzik return cnt; 3219c6fd2807SJeff Garzik } 3220c6fd2807SJeff Garzik 32210260731fSTejun Heo static int ata_eh_skip_recovery(struct ata_link *link) 3222c6fd2807SJeff Garzik { 3223672b2d65STejun Heo struct ata_port *ap = link->ap; 32240260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 3225f58229f8STejun Heo struct ata_device *dev; 3226c6fd2807SJeff Garzik 3227f9df58cbSTejun Heo /* skip disabled links */ 3228f9df58cbSTejun Heo if (link->flags & ATA_LFLAG_DISABLED) 3229f9df58cbSTejun Heo return 1; 3230f9df58cbSTejun Heo 3231672b2d65STejun Heo /* thaw frozen port and recover failed devices */ 3232672b2d65STejun Heo if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link)) 3233672b2d65STejun Heo return 0; 3234672b2d65STejun Heo 3235672b2d65STejun Heo /* reset at least once if reset is requested */ 3236672b2d65STejun Heo if ((ehc->i.action & ATA_EH_RESET) && 3237672b2d65STejun Heo !(ehc->i.flags & ATA_EHI_DID_RESET)) 3238c6fd2807SJeff Garzik return 0; 3239c6fd2807SJeff Garzik 3240c6fd2807SJeff Garzik /* skip if class codes for all vacant slots are ATA_DEV_NONE */ 32411eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 3242c6fd2807SJeff Garzik if (dev->class == ATA_DEV_UNKNOWN && 3243c6fd2807SJeff Garzik ehc->classes[dev->devno] != ATA_DEV_NONE) 3244c6fd2807SJeff Garzik return 0; 3245c6fd2807SJeff Garzik } 3246c6fd2807SJeff Garzik 3247c6fd2807SJeff Garzik return 1; 3248c6fd2807SJeff Garzik } 3249c6fd2807SJeff Garzik 3250c2c7a89cSTejun Heo static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg) 3251c2c7a89cSTejun Heo { 3252c2c7a89cSTejun Heo u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL); 3253c2c7a89cSTejun Heo u64 now = get_jiffies_64(); 3254c2c7a89cSTejun Heo int *trials = void_arg; 3255c2c7a89cSTejun Heo 3256c2c7a89cSTejun Heo if (ent->timestamp < now - min(now, interval)) 3257c2c7a89cSTejun Heo return -1; 3258c2c7a89cSTejun Heo 3259c2c7a89cSTejun Heo (*trials)++; 3260c2c7a89cSTejun Heo return 0; 3261c2c7a89cSTejun Heo } 3262c2c7a89cSTejun Heo 326302c05a27STejun Heo static int ata_eh_schedule_probe(struct ata_device *dev) 326402c05a27STejun Heo { 326502c05a27STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 3266c2c7a89cSTejun Heo struct ata_link *link = ata_dev_phys_link(dev); 3267c2c7a89cSTejun Heo int trials = 0; 326802c05a27STejun Heo 326902c05a27STejun Heo if (!(ehc->i.probe_mask & (1 << dev->devno)) || 327002c05a27STejun Heo (ehc->did_probe_mask & (1 << dev->devno))) 327102c05a27STejun Heo return 0; 327202c05a27STejun Heo 327302c05a27STejun Heo ata_eh_detach_dev(dev); 327402c05a27STejun Heo ata_dev_init(dev); 327502c05a27STejun Heo ehc->did_probe_mask |= (1 << dev->devno); 3276cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 327700115e0fSTejun Heo ehc->saved_xfer_mode[dev->devno] = 0; 327800115e0fSTejun Heo ehc->saved_ncq_enabled &= ~(1 << dev->devno); 327902c05a27STejun Heo 3280c2c7a89cSTejun Heo /* Record and count probe trials on the ering. The specific 3281c2c7a89cSTejun Heo * error mask used is irrelevant. Because a successful device 3282c2c7a89cSTejun Heo * detection clears the ering, this count accumulates only if 3283c2c7a89cSTejun Heo * there are consecutive failed probes. 3284c2c7a89cSTejun Heo * 3285c2c7a89cSTejun Heo * If the count is equal to or higher than ATA_EH_PROBE_TRIALS 3286c2c7a89cSTejun Heo * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is 3287c2c7a89cSTejun Heo * forced to 1.5Gbps. 3288c2c7a89cSTejun Heo * 3289c2c7a89cSTejun Heo * This is to work around cases where failed link speed 3290c2c7a89cSTejun Heo * negotiation results in device misdetection leading to 3291c2c7a89cSTejun Heo * infinite DEVXCHG or PHRDY CHG events. 3292c2c7a89cSTejun Heo */ 3293c2c7a89cSTejun Heo ata_ering_record(&dev->ering, 0, AC_ERR_OTHER); 3294c2c7a89cSTejun Heo ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials); 3295c2c7a89cSTejun Heo 3296c2c7a89cSTejun Heo if (trials > ATA_EH_PROBE_TRIALS) 3297c2c7a89cSTejun Heo sata_down_spd_limit(link, 1); 3298c2c7a89cSTejun Heo 329902c05a27STejun Heo return 1; 330002c05a27STejun Heo } 330102c05a27STejun Heo 33029b1e2658STejun Heo static int ata_eh_handle_dev_fail(struct ata_device *dev, int err) 3303fee7ca72STejun Heo { 33049af5c9c9STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 3305fee7ca72STejun Heo 3306cf9a590aSTejun Heo /* -EAGAIN from EH routine indicates retry without prejudice. 3307cf9a590aSTejun Heo * The requester is responsible for ensuring forward progress. 3308cf9a590aSTejun Heo */ 3309cf9a590aSTejun Heo if (err != -EAGAIN) 3310fee7ca72STejun Heo ehc->tries[dev->devno]--; 3311fee7ca72STejun Heo 3312fee7ca72STejun Heo switch (err) { 3313fee7ca72STejun Heo case -ENODEV: 3314fee7ca72STejun Heo /* device missing or wrong IDENTIFY data, schedule probing */ 3315fee7ca72STejun Heo ehc->i.probe_mask |= (1 << dev->devno); 3316fee7ca72STejun Heo case -EINVAL: 3317fee7ca72STejun Heo /* give it just one more chance */ 3318fee7ca72STejun Heo ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1); 3319fee7ca72STejun Heo case -EIO: 3320d89293abSTejun Heo if (ehc->tries[dev->devno] == 1) { 3321fee7ca72STejun Heo /* This is the last chance, better to slow 3322fee7ca72STejun Heo * down than lose it. 3323fee7ca72STejun Heo */ 3324a07d499bSTejun Heo sata_down_spd_limit(ata_dev_phys_link(dev), 0); 3325d89293abSTejun Heo if (dev->pio_mode > XFER_PIO_0) 3326fee7ca72STejun Heo ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); 3327fee7ca72STejun Heo } 3328fee7ca72STejun Heo } 3329fee7ca72STejun Heo 3330fee7ca72STejun Heo if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) { 3331fee7ca72STejun Heo /* disable device if it has used up all its chances */ 3332fee7ca72STejun Heo ata_dev_disable(dev); 3333fee7ca72STejun Heo 3334fee7ca72STejun Heo /* detach if offline */ 3335b1c72916STejun Heo if (ata_phys_link_offline(ata_dev_phys_link(dev))) 3336fee7ca72STejun Heo ata_eh_detach_dev(dev); 3337fee7ca72STejun Heo 333802c05a27STejun Heo /* schedule probe if necessary */ 333987fbc5a0STejun Heo if (ata_eh_schedule_probe(dev)) { 3340fee7ca72STejun Heo ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 334187fbc5a0STejun Heo memset(ehc->cmd_timeout_idx[dev->devno], 0, 334287fbc5a0STejun Heo sizeof(ehc->cmd_timeout_idx[dev->devno])); 334387fbc5a0STejun Heo } 33449b1e2658STejun Heo 33459b1e2658STejun Heo return 1; 3346fee7ca72STejun Heo } else { 3347cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 33489b1e2658STejun Heo return 0; 3349fee7ca72STejun Heo } 3350fee7ca72STejun Heo } 3351fee7ca72STejun Heo 3352c6fd2807SJeff Garzik /** 3353c6fd2807SJeff Garzik * ata_eh_recover - recover host port after error 3354c6fd2807SJeff Garzik * @ap: host port to recover 3355c6fd2807SJeff Garzik * @prereset: prereset method (can be NULL) 3356c6fd2807SJeff Garzik * @softreset: softreset method (can be NULL) 3357c6fd2807SJeff Garzik * @hardreset: hardreset method (can be NULL) 3358c6fd2807SJeff Garzik * @postreset: postreset method (can be NULL) 33599b1e2658STejun Heo * @r_failed_link: out parameter for failed link 3360c6fd2807SJeff Garzik * 3361c6fd2807SJeff Garzik * This is the alpha and omega, eum and yang, heart and soul of 3362c6fd2807SJeff Garzik * libata exception handling. On entry, actions required to 33639b1e2658STejun Heo * recover each link and hotplug requests are recorded in the 33649b1e2658STejun Heo * link's eh_context. This function executes all the operations 33659b1e2658STejun Heo * with appropriate retrials and fallbacks to resurrect failed 3366c6fd2807SJeff Garzik * devices, detach goners and greet newcomers. 3367c6fd2807SJeff Garzik * 3368c6fd2807SJeff Garzik * LOCKING: 3369c6fd2807SJeff Garzik * Kernel thread context (may sleep). 3370c6fd2807SJeff Garzik * 3371c6fd2807SJeff Garzik * RETURNS: 3372c6fd2807SJeff Garzik * 0 on success, -errno on failure. 3373c6fd2807SJeff Garzik */ 3374fb7fd614STejun Heo int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, 3375c6fd2807SJeff Garzik ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 33769b1e2658STejun Heo ata_postreset_fn_t postreset, 33779b1e2658STejun Heo struct ata_link **r_failed_link) 3378c6fd2807SJeff Garzik { 33799b1e2658STejun Heo struct ata_link *link; 3380c6fd2807SJeff Garzik struct ata_device *dev; 33810a2c0f56STejun Heo int nr_failed_devs; 3382dc98c32cSTejun Heo int rc; 338345fabbb7SElias Oltmanns unsigned long flags, deadline; 3384c6fd2807SJeff Garzik 3385c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 3386c6fd2807SJeff Garzik 3387c6fd2807SJeff Garzik /* prep for recovery */ 33881eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 33899b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 33909b1e2658STejun Heo 3391f9df58cbSTejun Heo /* re-enable link? */ 3392f9df58cbSTejun Heo if (ehc->i.action & ATA_EH_ENABLE_LINK) { 3393f9df58cbSTejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK); 3394f9df58cbSTejun Heo spin_lock_irqsave(ap->lock, flags); 3395f9df58cbSTejun Heo link->flags &= ~ATA_LFLAG_DISABLED; 3396f9df58cbSTejun Heo spin_unlock_irqrestore(ap->lock, flags); 3397f9df58cbSTejun Heo ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK); 3398f9df58cbSTejun Heo } 3399f9df58cbSTejun Heo 34001eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 3401fd995f70STejun Heo if (link->flags & ATA_LFLAG_NO_RETRY) 3402fd995f70STejun Heo ehc->tries[dev->devno] = 1; 3403fd995f70STejun Heo else 3404c6fd2807SJeff Garzik ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 3405c6fd2807SJeff Garzik 340679a55b72STejun Heo /* collect port action mask recorded in dev actions */ 34079b1e2658STejun Heo ehc->i.action |= ehc->i.dev_action[dev->devno] & 34089b1e2658STejun Heo ~ATA_EH_PERDEV_MASK; 3409f58229f8STejun Heo ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK; 341079a55b72STejun Heo 3411c6fd2807SJeff Garzik /* process hotplug request */ 3412c6fd2807SJeff Garzik if (dev->flags & ATA_DFLAG_DETACH) 3413c6fd2807SJeff Garzik ata_eh_detach_dev(dev); 3414c6fd2807SJeff Garzik 341502c05a27STejun Heo /* schedule probe if necessary */ 341602c05a27STejun Heo if (!ata_dev_enabled(dev)) 341702c05a27STejun Heo ata_eh_schedule_probe(dev); 3418c6fd2807SJeff Garzik } 34199b1e2658STejun Heo } 3420c6fd2807SJeff Garzik 3421c6fd2807SJeff Garzik retry: 3422c6fd2807SJeff Garzik rc = 0; 34239b1e2658STejun Heo nr_failed_devs = 0; 3424c6fd2807SJeff Garzik 3425c6fd2807SJeff Garzik /* if UNLOADING, finish immediately */ 3426c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_UNLOADING) 3427c6fd2807SJeff Garzik goto out; 3428c6fd2807SJeff Garzik 34299b1e2658STejun Heo /* prep for EH */ 34301eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 34319b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 34329b1e2658STejun Heo 3433c6fd2807SJeff Garzik /* skip EH if possible. */ 34340260731fSTejun Heo if (ata_eh_skip_recovery(link)) 3435c6fd2807SJeff Garzik ehc->i.action = 0; 3436c6fd2807SJeff Garzik 34371eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 3438f58229f8STejun Heo ehc->classes[dev->devno] = ATA_DEV_UNKNOWN; 34399b1e2658STejun Heo } 3440c6fd2807SJeff Garzik 3441c6fd2807SJeff Garzik /* reset */ 34421eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 34439b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 34449b1e2658STejun Heo 3445cf480626STejun Heo if (!(ehc->i.action & ATA_EH_RESET)) 34469b1e2658STejun Heo continue; 34479b1e2658STejun Heo 34489b1e2658STejun Heo rc = ata_eh_reset(link, ata_link_nr_vacant(link), 3449dc98c32cSTejun Heo prereset, softreset, hardreset, postreset); 3450c6fd2807SJeff Garzik if (rc) { 34510260731fSTejun Heo ata_link_printk(link, KERN_ERR, 3452c6fd2807SJeff Garzik "reset failed, giving up\n"); 3453c6fd2807SJeff Garzik goto out; 3454c6fd2807SJeff Garzik } 34559b1e2658STejun Heo } 3456c6fd2807SJeff Garzik 345745fabbb7SElias Oltmanns do { 345845fabbb7SElias Oltmanns unsigned long now; 345945fabbb7SElias Oltmanns 346045fabbb7SElias Oltmanns /* 346145fabbb7SElias Oltmanns * clears ATA_EH_PARK in eh_info and resets 346245fabbb7SElias Oltmanns * ap->park_req_pending 346345fabbb7SElias Oltmanns */ 346445fabbb7SElias Oltmanns ata_eh_pull_park_action(ap); 346545fabbb7SElias Oltmanns 346645fabbb7SElias Oltmanns deadline = jiffies; 34671eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 34681eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 346945fabbb7SElias Oltmanns struct ata_eh_context *ehc = &link->eh_context; 347045fabbb7SElias Oltmanns unsigned long tmp; 347145fabbb7SElias Oltmanns 347245fabbb7SElias Oltmanns if (dev->class != ATA_DEV_ATA) 347345fabbb7SElias Oltmanns continue; 347445fabbb7SElias Oltmanns if (!(ehc->i.dev_action[dev->devno] & 347545fabbb7SElias Oltmanns ATA_EH_PARK)) 347645fabbb7SElias Oltmanns continue; 347745fabbb7SElias Oltmanns tmp = dev->unpark_deadline; 347845fabbb7SElias Oltmanns if (time_before(deadline, tmp)) 347945fabbb7SElias Oltmanns deadline = tmp; 348045fabbb7SElias Oltmanns else if (time_before_eq(tmp, jiffies)) 348145fabbb7SElias Oltmanns continue; 348245fabbb7SElias Oltmanns if (ehc->unloaded_mask & (1 << dev->devno)) 348345fabbb7SElias Oltmanns continue; 348445fabbb7SElias Oltmanns 348545fabbb7SElias Oltmanns ata_eh_park_issue_cmd(dev, 1); 348645fabbb7SElias Oltmanns } 348745fabbb7SElias Oltmanns } 348845fabbb7SElias Oltmanns 348945fabbb7SElias Oltmanns now = jiffies; 349045fabbb7SElias Oltmanns if (time_before_eq(deadline, now)) 349145fabbb7SElias Oltmanns break; 349245fabbb7SElias Oltmanns 349345fabbb7SElias Oltmanns deadline = wait_for_completion_timeout(&ap->park_req_pending, 349445fabbb7SElias Oltmanns deadline - now); 349545fabbb7SElias Oltmanns } while (deadline); 34961eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 34971eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 349845fabbb7SElias Oltmanns if (!(link->eh_context.unloaded_mask & 349945fabbb7SElias Oltmanns (1 << dev->devno))) 350045fabbb7SElias Oltmanns continue; 350145fabbb7SElias Oltmanns 350245fabbb7SElias Oltmanns ata_eh_park_issue_cmd(dev, 0); 350345fabbb7SElias Oltmanns ata_eh_done(link, dev, ATA_EH_PARK); 350445fabbb7SElias Oltmanns } 350545fabbb7SElias Oltmanns } 350645fabbb7SElias Oltmanns 35079b1e2658STejun Heo /* the rest */ 35081eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 35099b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 35109b1e2658STejun Heo 3511c6fd2807SJeff Garzik /* revalidate existing devices and attach new ones */ 35120260731fSTejun Heo rc = ata_eh_revalidate_and_attach(link, &dev); 3513c6fd2807SJeff Garzik if (rc) 3514c6fd2807SJeff Garzik goto dev_fail; 3515c6fd2807SJeff Garzik 3516633273a3STejun Heo /* if PMP got attached, return, pmp EH will take care of it */ 3517633273a3STejun Heo if (link->device->class == ATA_DEV_PMP) { 3518633273a3STejun Heo ehc->i.action = 0; 3519633273a3STejun Heo return 0; 3520633273a3STejun Heo } 3521633273a3STejun Heo 3522baa1e78aSTejun Heo /* configure transfer mode if necessary */ 3523baa1e78aSTejun Heo if (ehc->i.flags & ATA_EHI_SETMODE) { 35240260731fSTejun Heo rc = ata_set_mode(link, &dev); 35254ae72a1eSTejun Heo if (rc) 3526c6fd2807SJeff Garzik goto dev_fail; 3527baa1e78aSTejun Heo ehc->i.flags &= ~ATA_EHI_SETMODE; 3528c6fd2807SJeff Garzik } 3529c6fd2807SJeff Garzik 353011fc33daSTejun Heo /* If reset has been issued, clear UA to avoid 353111fc33daSTejun Heo * disrupting the current users of the device. 353211fc33daSTejun Heo */ 353311fc33daSTejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) { 35341eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 353511fc33daSTejun Heo if (dev->class != ATA_DEV_ATAPI) 353611fc33daSTejun Heo continue; 353711fc33daSTejun Heo rc = atapi_eh_clear_ua(dev); 353811fc33daSTejun Heo if (rc) 353911fc33daSTejun Heo goto dev_fail; 354011fc33daSTejun Heo } 354111fc33daSTejun Heo } 354211fc33daSTejun Heo 3543*6013efd8STejun Heo /* retry flush if necessary */ 3544*6013efd8STejun Heo ata_for_each_dev(dev, link, ALL) { 3545*6013efd8STejun Heo if (dev->class != ATA_DEV_ATA) 3546*6013efd8STejun Heo continue; 3547*6013efd8STejun Heo rc = ata_eh_maybe_retry_flush(dev); 3548*6013efd8STejun Heo if (rc) 3549*6013efd8STejun Heo goto dev_fail; 3550*6013efd8STejun Heo } 3551*6013efd8STejun Heo 355211fc33daSTejun Heo /* configure link power saving */ 35533ec25ebdSTejun Heo if (ehc->i.action & ATA_EH_LPM) 35541eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 3555ca77329fSKristen Carlson Accardi ata_dev_enable_pm(dev, ap->pm_policy); 3556ca77329fSKristen Carlson Accardi 35579b1e2658STejun Heo /* this link is okay now */ 35589b1e2658STejun Heo ehc->i.flags = 0; 35599b1e2658STejun Heo continue; 3560c6fd2807SJeff Garzik 3561c6fd2807SJeff Garzik dev_fail: 35629b1e2658STejun Heo nr_failed_devs++; 35630a2c0f56STejun Heo ata_eh_handle_dev_fail(dev, rc); 3564c6fd2807SJeff Garzik 3565b06ce3e5STejun Heo if (ap->pflags & ATA_PFLAG_FROZEN) { 3566b06ce3e5STejun Heo /* PMP reset requires working host port. 3567b06ce3e5STejun Heo * Can't retry if it's frozen. 3568b06ce3e5STejun Heo */ 3569071f44b1STejun Heo if (sata_pmp_attached(ap)) 3570b06ce3e5STejun Heo goto out; 35719b1e2658STejun Heo break; 35729b1e2658STejun Heo } 3573b06ce3e5STejun Heo } 35749b1e2658STejun Heo 35750a2c0f56STejun Heo if (nr_failed_devs) 3576c6fd2807SJeff Garzik goto retry; 3577c6fd2807SJeff Garzik 3578c6fd2807SJeff Garzik out: 35799b1e2658STejun Heo if (rc && r_failed_link) 35809b1e2658STejun Heo *r_failed_link = link; 3581c6fd2807SJeff Garzik 3582c6fd2807SJeff Garzik DPRINTK("EXIT, rc=%d\n", rc); 3583c6fd2807SJeff Garzik return rc; 3584c6fd2807SJeff Garzik } 3585c6fd2807SJeff Garzik 3586c6fd2807SJeff Garzik /** 3587c6fd2807SJeff Garzik * ata_eh_finish - finish up EH 3588c6fd2807SJeff Garzik * @ap: host port to finish EH for 3589c6fd2807SJeff Garzik * 3590c6fd2807SJeff Garzik * Recovery is complete. Clean up EH states and retry or finish 3591c6fd2807SJeff Garzik * failed qcs. 3592c6fd2807SJeff Garzik * 3593c6fd2807SJeff Garzik * LOCKING: 3594c6fd2807SJeff Garzik * None. 3595c6fd2807SJeff Garzik */ 3596fb7fd614STejun Heo void ata_eh_finish(struct ata_port *ap) 3597c6fd2807SJeff Garzik { 3598c6fd2807SJeff Garzik int tag; 3599c6fd2807SJeff Garzik 3600c6fd2807SJeff Garzik /* retry or finish qcs */ 3601c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 3602c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 3603c6fd2807SJeff Garzik 3604c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) 3605c6fd2807SJeff Garzik continue; 3606c6fd2807SJeff Garzik 3607c6fd2807SJeff Garzik if (qc->err_mask) { 3608c6fd2807SJeff Garzik /* FIXME: Once EH migration is complete, 3609c6fd2807SJeff Garzik * generate sense data in this function, 3610c6fd2807SJeff Garzik * considering both err_mask and tf. 3611c6fd2807SJeff Garzik */ 361203faab78STejun Heo if (qc->flags & ATA_QCFLAG_RETRY) 3613c6fd2807SJeff Garzik ata_eh_qc_retry(qc); 361403faab78STejun Heo else 361503faab78STejun Heo ata_eh_qc_complete(qc); 3616c6fd2807SJeff Garzik } else { 3617c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_SENSE_VALID) { 3618c6fd2807SJeff Garzik ata_eh_qc_complete(qc); 3619c6fd2807SJeff Garzik } else { 3620c6fd2807SJeff Garzik /* feed zero TF to sense generation */ 3621c6fd2807SJeff Garzik memset(&qc->result_tf, 0, sizeof(qc->result_tf)); 3622c6fd2807SJeff Garzik ata_eh_qc_retry(qc); 3623c6fd2807SJeff Garzik } 3624c6fd2807SJeff Garzik } 3625c6fd2807SJeff Garzik } 3626da917d69STejun Heo 3627da917d69STejun Heo /* make sure nr_active_links is zero after EH */ 3628da917d69STejun Heo WARN_ON(ap->nr_active_links); 3629da917d69STejun Heo ap->nr_active_links = 0; 3630c6fd2807SJeff Garzik } 3631c6fd2807SJeff Garzik 3632c6fd2807SJeff Garzik /** 3633c6fd2807SJeff Garzik * ata_do_eh - do standard error handling 3634c6fd2807SJeff Garzik * @ap: host port to handle error for 3635a1efdabaSTejun Heo * 3636c6fd2807SJeff Garzik * @prereset: prereset method (can be NULL) 3637c6fd2807SJeff Garzik * @softreset: softreset method (can be NULL) 3638c6fd2807SJeff Garzik * @hardreset: hardreset method (can be NULL) 3639c6fd2807SJeff Garzik * @postreset: postreset method (can be NULL) 3640c6fd2807SJeff Garzik * 3641c6fd2807SJeff Garzik * Perform standard error handling sequence. 3642c6fd2807SJeff Garzik * 3643c6fd2807SJeff Garzik * LOCKING: 3644c6fd2807SJeff Garzik * Kernel thread context (may sleep). 3645c6fd2807SJeff Garzik */ 3646c6fd2807SJeff Garzik void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, 3647c6fd2807SJeff Garzik ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 3648c6fd2807SJeff Garzik ata_postreset_fn_t postreset) 3649c6fd2807SJeff Garzik { 36509b1e2658STejun Heo struct ata_device *dev; 36519b1e2658STejun Heo int rc; 36529b1e2658STejun Heo 36539b1e2658STejun Heo ata_eh_autopsy(ap); 36549b1e2658STejun Heo ata_eh_report(ap); 36559b1e2658STejun Heo 36569b1e2658STejun Heo rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset, 36579b1e2658STejun Heo NULL); 36589b1e2658STejun Heo if (rc) { 36591eca4365STejun Heo ata_for_each_dev(dev, &ap->link, ALL) 36609b1e2658STejun Heo ata_dev_disable(dev); 36619b1e2658STejun Heo } 36629b1e2658STejun Heo 3663c6fd2807SJeff Garzik ata_eh_finish(ap); 3664c6fd2807SJeff Garzik } 3665c6fd2807SJeff Garzik 3666a1efdabaSTejun Heo /** 3667a1efdabaSTejun Heo * ata_std_error_handler - standard error handler 3668a1efdabaSTejun Heo * @ap: host port to handle error for 3669a1efdabaSTejun Heo * 3670a1efdabaSTejun Heo * Standard error handler 3671a1efdabaSTejun Heo * 3672a1efdabaSTejun Heo * LOCKING: 3673a1efdabaSTejun Heo * Kernel thread context (may sleep). 3674a1efdabaSTejun Heo */ 3675a1efdabaSTejun Heo void ata_std_error_handler(struct ata_port *ap) 3676a1efdabaSTejun Heo { 3677a1efdabaSTejun Heo struct ata_port_operations *ops = ap->ops; 3678a1efdabaSTejun Heo ata_reset_fn_t hardreset = ops->hardreset; 3679a1efdabaSTejun Heo 368057c9efdfSTejun Heo /* ignore built-in hardreset if SCR access is not available */ 368157c9efdfSTejun Heo if (ata_is_builtin_hardreset(hardreset) && !sata_scr_valid(&ap->link)) 3682a1efdabaSTejun Heo hardreset = NULL; 3683a1efdabaSTejun Heo 3684a1efdabaSTejun Heo ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset); 3685a1efdabaSTejun Heo } 3686a1efdabaSTejun Heo 36876ffa01d8STejun Heo #ifdef CONFIG_PM 3688c6fd2807SJeff Garzik /** 3689c6fd2807SJeff Garzik * ata_eh_handle_port_suspend - perform port suspend operation 3690c6fd2807SJeff Garzik * @ap: port to suspend 3691c6fd2807SJeff Garzik * 3692c6fd2807SJeff Garzik * Suspend @ap. 3693c6fd2807SJeff Garzik * 3694c6fd2807SJeff Garzik * LOCKING: 3695c6fd2807SJeff Garzik * Kernel thread context (may sleep). 3696c6fd2807SJeff Garzik */ 3697c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap) 3698c6fd2807SJeff Garzik { 3699c6fd2807SJeff Garzik unsigned long flags; 3700c6fd2807SJeff Garzik int rc = 0; 3701c6fd2807SJeff Garzik 3702c6fd2807SJeff Garzik /* are we suspending? */ 3703c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 3704c6fd2807SJeff Garzik if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || 3705c6fd2807SJeff Garzik ap->pm_mesg.event == PM_EVENT_ON) { 3706c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3707c6fd2807SJeff Garzik return; 3708c6fd2807SJeff Garzik } 3709c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3710c6fd2807SJeff Garzik 3711c6fd2807SJeff Garzik WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED); 3712c6fd2807SJeff Garzik 371364578a3dSTejun Heo /* tell ACPI we're suspending */ 371464578a3dSTejun Heo rc = ata_acpi_on_suspend(ap); 371564578a3dSTejun Heo if (rc) 371664578a3dSTejun Heo goto out; 371764578a3dSTejun Heo 3718c6fd2807SJeff Garzik /* suspend */ 3719c6fd2807SJeff Garzik ata_eh_freeze_port(ap); 3720c6fd2807SJeff Garzik 3721c6fd2807SJeff Garzik if (ap->ops->port_suspend) 3722c6fd2807SJeff Garzik rc = ap->ops->port_suspend(ap, ap->pm_mesg); 3723c6fd2807SJeff Garzik 3724bd3adca5SShaohua Li ata_acpi_set_state(ap, PMSG_SUSPEND); 372564578a3dSTejun Heo out: 3726c6fd2807SJeff Garzik /* report result */ 3727c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 3728c6fd2807SJeff Garzik 3729c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_PM_PENDING; 3730c6fd2807SJeff Garzik if (rc == 0) 3731c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SUSPENDED; 373264578a3dSTejun Heo else if (ap->pflags & ATA_PFLAG_FROZEN) 3733c6fd2807SJeff Garzik ata_port_schedule_eh(ap); 3734c6fd2807SJeff Garzik 3735c6fd2807SJeff Garzik if (ap->pm_result) { 3736c6fd2807SJeff Garzik *ap->pm_result = rc; 3737c6fd2807SJeff Garzik ap->pm_result = NULL; 3738c6fd2807SJeff Garzik } 3739c6fd2807SJeff Garzik 3740c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3741c6fd2807SJeff Garzik 3742c6fd2807SJeff Garzik return; 3743c6fd2807SJeff Garzik } 3744c6fd2807SJeff Garzik 3745c6fd2807SJeff Garzik /** 3746c6fd2807SJeff Garzik * ata_eh_handle_port_resume - perform port resume operation 3747c6fd2807SJeff Garzik * @ap: port to resume 3748c6fd2807SJeff Garzik * 3749c6fd2807SJeff Garzik * Resume @ap. 3750c6fd2807SJeff Garzik * 3751c6fd2807SJeff Garzik * LOCKING: 3752c6fd2807SJeff Garzik * Kernel thread context (may sleep). 3753c6fd2807SJeff Garzik */ 3754c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap) 3755c6fd2807SJeff Garzik { 37566f9c1ea2STejun Heo struct ata_link *link; 37576f9c1ea2STejun Heo struct ata_device *dev; 3758c6fd2807SJeff Garzik unsigned long flags; 37599666f400STejun Heo int rc = 0; 3760c6fd2807SJeff Garzik 3761c6fd2807SJeff Garzik /* are we resuming? */ 3762c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 3763c6fd2807SJeff Garzik if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || 3764c6fd2807SJeff Garzik ap->pm_mesg.event != PM_EVENT_ON) { 3765c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3766c6fd2807SJeff Garzik return; 3767c6fd2807SJeff Garzik } 3768c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3769c6fd2807SJeff Garzik 37709666f400STejun Heo WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED)); 3771c6fd2807SJeff Garzik 37726f9c1ea2STejun Heo /* 37736f9c1ea2STejun Heo * Error timestamps are in jiffies which doesn't run while 37746f9c1ea2STejun Heo * suspended and PHY events during resume isn't too uncommon. 37756f9c1ea2STejun Heo * When the two are combined, it can lead to unnecessary speed 37766f9c1ea2STejun Heo * downs if the machine is suspended and resumed repeatedly. 37776f9c1ea2STejun Heo * Clear error history. 37786f9c1ea2STejun Heo */ 37796f9c1ea2STejun Heo ata_for_each_link(link, ap, HOST_FIRST) 37806f9c1ea2STejun Heo ata_for_each_dev(dev, link, ALL) 37816f9c1ea2STejun Heo ata_ering_clear(&dev->ering); 37826f9c1ea2STejun Heo 3783bd3adca5SShaohua Li ata_acpi_set_state(ap, PMSG_ON); 3784bd3adca5SShaohua Li 3785c6fd2807SJeff Garzik if (ap->ops->port_resume) 3786c6fd2807SJeff Garzik rc = ap->ops->port_resume(ap); 3787c6fd2807SJeff Garzik 37886746544cSTejun Heo /* tell ACPI that we're resuming */ 37896746544cSTejun Heo ata_acpi_on_resume(ap); 37906746544cSTejun Heo 37919666f400STejun Heo /* report result */ 3792c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 3793c6fd2807SJeff Garzik ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED); 3794c6fd2807SJeff Garzik if (ap->pm_result) { 3795c6fd2807SJeff Garzik *ap->pm_result = rc; 3796c6fd2807SJeff Garzik ap->pm_result = NULL; 3797c6fd2807SJeff Garzik } 3798c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3799c6fd2807SJeff Garzik } 38006ffa01d8STejun Heo #endif /* CONFIG_PM */ 3801