1c6fd2807SJeff Garzik /* 2c6fd2807SJeff Garzik * libata-eh.c - libata error handling 3c6fd2807SJeff Garzik * 48c3d3d4bSTejun Heo * Maintained by: Tejun Heo <tj@kernel.org> 5c6fd2807SJeff Garzik * Please ALWAYS copy linux-ide@vger.kernel.org 6c6fd2807SJeff Garzik * on emails. 7c6fd2807SJeff Garzik * 8c6fd2807SJeff Garzik * Copyright 2006 Tejun Heo <htejun@gmail.com> 9c6fd2807SJeff Garzik * 10c6fd2807SJeff Garzik * 11c6fd2807SJeff Garzik * This program is free software; you can redistribute it and/or 12c6fd2807SJeff Garzik * modify it under the terms of the GNU General Public License as 13c6fd2807SJeff Garzik * published by the Free Software Foundation; either version 2, or 14c6fd2807SJeff Garzik * (at your option) any later version. 15c6fd2807SJeff Garzik * 16c6fd2807SJeff Garzik * This program is distributed in the hope that it will be useful, 17c6fd2807SJeff Garzik * but WITHOUT ANY WARRANTY; without even the implied warranty of 18c6fd2807SJeff Garzik * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19c6fd2807SJeff Garzik * General Public License for more details. 20c6fd2807SJeff Garzik * 21c6fd2807SJeff Garzik * You should have received a copy of the GNU General Public License 22c6fd2807SJeff Garzik * along with this program; see the file COPYING. If not, write to 23c6fd2807SJeff Garzik * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, 24c6fd2807SJeff Garzik * USA. 25c6fd2807SJeff Garzik * 26c6fd2807SJeff Garzik * 27c6fd2807SJeff Garzik * libata documentation is available via 'make {ps|pdf}docs', 289bb9a39cSMauro Carvalho Chehab * as Documentation/driver-api/libata.rst 29c6fd2807SJeff Garzik * 30c6fd2807SJeff Garzik * Hardware documentation available from http://www.t13.org/ and 31c6fd2807SJeff Garzik * http://www.sata-io.org/ 32c6fd2807SJeff Garzik * 33c6fd2807SJeff Garzik */ 34c6fd2807SJeff Garzik 35c6fd2807SJeff Garzik #include <linux/kernel.h> 36242f9dcbSJens Axboe #include <linux/blkdev.h> 3738789fdaSPaul Gortmaker #include <linux/export.h> 382855568bSJeff Garzik #include <linux/pci.h> 39c6fd2807SJeff Garzik #include <scsi/scsi.h> 40c6fd2807SJeff Garzik #include <scsi/scsi_host.h> 41c6fd2807SJeff Garzik #include <scsi/scsi_eh.h> 42c6fd2807SJeff Garzik #include <scsi/scsi_device.h> 43c6fd2807SJeff Garzik #include <scsi/scsi_cmnd.h> 446521148cSRobert Hancock #include <scsi/scsi_dbg.h> 45c6fd2807SJeff Garzik #include "../scsi/scsi_transport_api.h" 46c6fd2807SJeff Garzik 47c6fd2807SJeff Garzik #include <linux/libata.h> 48c6fd2807SJeff Garzik 49255c03d1SHannes Reinecke #include <trace/events/libata.h> 50c6fd2807SJeff Garzik #include "libata.h" 51c6fd2807SJeff Garzik 527d47e8d4STejun Heo enum { 533884f7b0STejun Heo /* speed down verdicts */ 547d47e8d4STejun Heo ATA_EH_SPDN_NCQ_OFF = (1 << 0), 557d47e8d4STejun Heo ATA_EH_SPDN_SPEED_DOWN = (1 << 1), 567d47e8d4STejun Heo ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2), 5776326ac1STejun Heo ATA_EH_SPDN_KEEP_ERRORS = (1 << 3), 583884f7b0STejun Heo 593884f7b0STejun Heo /* error flags */ 603884f7b0STejun Heo ATA_EFLAG_IS_IO = (1 << 0), 6176326ac1STejun Heo ATA_EFLAG_DUBIOUS_XFER = (1 << 1), 62d9027470SGwendal Grignou ATA_EFLAG_OLD_ER = (1 << 31), 633884f7b0STejun Heo 643884f7b0STejun Heo /* error categories */ 653884f7b0STejun Heo ATA_ECAT_NONE = 0, 663884f7b0STejun Heo ATA_ECAT_ATA_BUS = 1, 673884f7b0STejun Heo ATA_ECAT_TOUT_HSM = 2, 683884f7b0STejun Heo ATA_ECAT_UNK_DEV = 3, 6975f9cafcSTejun Heo ATA_ECAT_DUBIOUS_NONE = 4, 7075f9cafcSTejun Heo ATA_ECAT_DUBIOUS_ATA_BUS = 5, 7175f9cafcSTejun Heo ATA_ECAT_DUBIOUS_TOUT_HSM = 6, 7275f9cafcSTejun Heo ATA_ECAT_DUBIOUS_UNK_DEV = 7, 7375f9cafcSTejun Heo ATA_ECAT_NR = 8, 747d47e8d4STejun Heo 7587fbc5a0STejun Heo ATA_EH_CMD_DFL_TIMEOUT = 5000, 7687fbc5a0STejun Heo 770a2c0f56STejun Heo /* always put at least this amount of time between resets */ 780a2c0f56STejun Heo ATA_EH_RESET_COOL_DOWN = 5000, 790a2c0f56STejun Heo 80341c2c95STejun Heo /* Waiting in ->prereset can never be reliable. It's 81341c2c95STejun Heo * sometimes nice to wait there but it can't be depended upon; 82341c2c95STejun Heo * otherwise, we wouldn't be resetting. Just give it enough 83341c2c95STejun Heo * time for most drives to spin up. 8431daabdaSTejun Heo */ 85341c2c95STejun Heo ATA_EH_PRERESET_TIMEOUT = 10000, 86341c2c95STejun Heo ATA_EH_FASTDRAIN_INTERVAL = 3000, 8711fc33daSTejun Heo 8811fc33daSTejun Heo ATA_EH_UA_TRIES = 5, 89c2c7a89cSTejun Heo 90c2c7a89cSTejun Heo /* probe speed down parameters, see ata_eh_schedule_probe() */ 91c2c7a89cSTejun Heo ATA_EH_PROBE_TRIAL_INTERVAL = 60000, /* 1 min */ 92c2c7a89cSTejun Heo ATA_EH_PROBE_TRIALS = 2, 9331daabdaSTejun Heo }; 9431daabdaSTejun Heo 9531daabdaSTejun Heo /* The following table determines how we sequence resets. Each entry 9631daabdaSTejun Heo * represents timeout for that try. The first try can be soft or 9731daabdaSTejun Heo * hardreset. All others are hardreset if available. In most cases 9831daabdaSTejun Heo * the first reset w/ 10sec timeout should succeed. Following entries 9935bf8821SDan Williams * are mostly for error handling, hotplug and those outlier devices that 10035bf8821SDan Williams * take an exceptionally long time to recover from reset. 10131daabdaSTejun Heo */ 10231daabdaSTejun Heo static const unsigned long ata_eh_reset_timeouts[] = { 103341c2c95STejun Heo 10000, /* most drives spin up by 10sec */ 104341c2c95STejun Heo 10000, /* > 99% working drives spin up before 20sec */ 10535bf8821SDan Williams 35000, /* give > 30 secs of idleness for outlier devices */ 106341c2c95STejun Heo 5000, /* and sweet one last chance */ 107d8af0eb6STejun Heo ULONG_MAX, /* > 1 min has elapsed, give up */ 10831daabdaSTejun Heo }; 10931daabdaSTejun Heo 11087fbc5a0STejun Heo static const unsigned long ata_eh_identify_timeouts[] = { 11187fbc5a0STejun Heo 5000, /* covers > 99% of successes and not too boring on failures */ 11287fbc5a0STejun Heo 10000, /* combined time till here is enough even for media access */ 11387fbc5a0STejun Heo 30000, /* for true idiots */ 11487fbc5a0STejun Heo ULONG_MAX, 11587fbc5a0STejun Heo }; 11687fbc5a0STejun Heo 1176013efd8STejun Heo static const unsigned long ata_eh_flush_timeouts[] = { 1186013efd8STejun Heo 15000, /* be generous with flush */ 1196013efd8STejun Heo 15000, /* ditto */ 1206013efd8STejun Heo 30000, /* and even more generous */ 1216013efd8STejun Heo ULONG_MAX, 1226013efd8STejun Heo }; 1236013efd8STejun Heo 12487fbc5a0STejun Heo static const unsigned long ata_eh_other_timeouts[] = { 12587fbc5a0STejun Heo 5000, /* same rationale as identify timeout */ 12687fbc5a0STejun Heo 10000, /* ditto */ 12787fbc5a0STejun Heo /* but no merciful 30sec for other commands, it just isn't worth it */ 12887fbc5a0STejun Heo ULONG_MAX, 12987fbc5a0STejun Heo }; 13087fbc5a0STejun Heo 13187fbc5a0STejun Heo struct ata_eh_cmd_timeout_ent { 13287fbc5a0STejun Heo const u8 *commands; 13387fbc5a0STejun Heo const unsigned long *timeouts; 13487fbc5a0STejun Heo }; 13587fbc5a0STejun Heo 13687fbc5a0STejun Heo /* The following table determines timeouts to use for EH internal 13787fbc5a0STejun Heo * commands. Each table entry is a command class and matches the 13887fbc5a0STejun Heo * commands the entry applies to and the timeout table to use. 13987fbc5a0STejun Heo * 14087fbc5a0STejun Heo * On the retry after a command timed out, the next timeout value from 14187fbc5a0STejun Heo * the table is used. If the table doesn't contain further entries, 14287fbc5a0STejun Heo * the last value is used. 14387fbc5a0STejun Heo * 14487fbc5a0STejun Heo * ehc->cmd_timeout_idx keeps track of which timeout to use per 14587fbc5a0STejun Heo * command class, so if SET_FEATURES times out on the first try, the 14687fbc5a0STejun Heo * next try will use the second timeout value only for that class. 14787fbc5a0STejun Heo */ 14887fbc5a0STejun Heo #define CMDS(cmds...) (const u8 []){ cmds, 0 } 14987fbc5a0STejun Heo static const struct ata_eh_cmd_timeout_ent 15087fbc5a0STejun Heo ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = { 15187fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI), 15287fbc5a0STejun Heo .timeouts = ata_eh_identify_timeouts, }, 15387fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT), 15487fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 15587fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT), 15687fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 15787fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_SET_FEATURES), 15887fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 15987fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS), 16087fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 1616013efd8STejun Heo { .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT), 1626013efd8STejun Heo .timeouts = ata_eh_flush_timeouts }, 16387fbc5a0STejun Heo }; 16487fbc5a0STejun Heo #undef CMDS 16587fbc5a0STejun Heo 166c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap); 1676ffa01d8STejun Heo #ifdef CONFIG_PM 168c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap); 169c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap); 1706ffa01d8STejun Heo #else /* CONFIG_PM */ 1716ffa01d8STejun Heo static void ata_eh_handle_port_suspend(struct ata_port *ap) 1726ffa01d8STejun Heo { } 1736ffa01d8STejun Heo 1746ffa01d8STejun Heo static void ata_eh_handle_port_resume(struct ata_port *ap) 1756ffa01d8STejun Heo { } 1766ffa01d8STejun Heo #endif /* CONFIG_PM */ 177c6fd2807SJeff Garzik 178b64bbc39STejun Heo static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt, 179b64bbc39STejun Heo va_list args) 180b64bbc39STejun Heo { 181b64bbc39STejun Heo ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len, 182b64bbc39STejun Heo ATA_EH_DESC_LEN - ehi->desc_len, 183b64bbc39STejun Heo fmt, args); 184b64bbc39STejun Heo } 185b64bbc39STejun Heo 186b64bbc39STejun Heo /** 187b64bbc39STejun Heo * __ata_ehi_push_desc - push error description without adding separator 188b64bbc39STejun Heo * @ehi: target EHI 189b64bbc39STejun Heo * @fmt: printf format string 190b64bbc39STejun Heo * 191b64bbc39STejun Heo * Format string according to @fmt and append it to @ehi->desc. 192b64bbc39STejun Heo * 193b64bbc39STejun Heo * LOCKING: 194b64bbc39STejun Heo * spin_lock_irqsave(host lock) 195b64bbc39STejun Heo */ 196b64bbc39STejun Heo void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) 197b64bbc39STejun Heo { 198b64bbc39STejun Heo va_list args; 199b64bbc39STejun Heo 200b64bbc39STejun Heo va_start(args, fmt); 201b64bbc39STejun Heo __ata_ehi_pushv_desc(ehi, fmt, args); 202b64bbc39STejun Heo va_end(args); 203b64bbc39STejun Heo } 204b64bbc39STejun Heo 205b64bbc39STejun Heo /** 206b64bbc39STejun Heo * ata_ehi_push_desc - push error description with separator 207b64bbc39STejun Heo * @ehi: target EHI 208b64bbc39STejun Heo * @fmt: printf format string 209b64bbc39STejun Heo * 210b64bbc39STejun Heo * Format string according to @fmt and append it to @ehi->desc. 211b64bbc39STejun Heo * If @ehi->desc is not empty, ", " is added in-between. 212b64bbc39STejun Heo * 213b64bbc39STejun Heo * LOCKING: 214b64bbc39STejun Heo * spin_lock_irqsave(host lock) 215b64bbc39STejun Heo */ 216b64bbc39STejun Heo void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) 217b64bbc39STejun Heo { 218b64bbc39STejun Heo va_list args; 219b64bbc39STejun Heo 220b64bbc39STejun Heo if (ehi->desc_len) 221b64bbc39STejun Heo __ata_ehi_push_desc(ehi, ", "); 222b64bbc39STejun Heo 223b64bbc39STejun Heo va_start(args, fmt); 224b64bbc39STejun Heo __ata_ehi_pushv_desc(ehi, fmt, args); 225b64bbc39STejun Heo va_end(args); 226b64bbc39STejun Heo } 227b64bbc39STejun Heo 228b64bbc39STejun Heo /** 229b64bbc39STejun Heo * ata_ehi_clear_desc - clean error description 230b64bbc39STejun Heo * @ehi: target EHI 231b64bbc39STejun Heo * 232b64bbc39STejun Heo * Clear @ehi->desc. 233b64bbc39STejun Heo * 234b64bbc39STejun Heo * LOCKING: 235b64bbc39STejun Heo * spin_lock_irqsave(host lock) 236b64bbc39STejun Heo */ 237b64bbc39STejun Heo void ata_ehi_clear_desc(struct ata_eh_info *ehi) 238b64bbc39STejun Heo { 239b64bbc39STejun Heo ehi->desc[0] = '\0'; 240b64bbc39STejun Heo ehi->desc_len = 0; 241b64bbc39STejun Heo } 242b64bbc39STejun Heo 243cbcdd875STejun Heo /** 244cbcdd875STejun Heo * ata_port_desc - append port description 245cbcdd875STejun Heo * @ap: target ATA port 246cbcdd875STejun Heo * @fmt: printf format string 247cbcdd875STejun Heo * 248cbcdd875STejun Heo * Format string according to @fmt and append it to port 249cbcdd875STejun Heo * description. If port description is not empty, " " is added 250cbcdd875STejun Heo * in-between. This function is to be used while initializing 251cbcdd875STejun Heo * ata_host. The description is printed on host registration. 252cbcdd875STejun Heo * 253cbcdd875STejun Heo * LOCKING: 254cbcdd875STejun Heo * None. 255cbcdd875STejun Heo */ 256cbcdd875STejun Heo void ata_port_desc(struct ata_port *ap, const char *fmt, ...) 257cbcdd875STejun Heo { 258cbcdd875STejun Heo va_list args; 259cbcdd875STejun Heo 260cbcdd875STejun Heo WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING)); 261cbcdd875STejun Heo 262cbcdd875STejun Heo if (ap->link.eh_info.desc_len) 263cbcdd875STejun Heo __ata_ehi_push_desc(&ap->link.eh_info, " "); 264cbcdd875STejun Heo 265cbcdd875STejun Heo va_start(args, fmt); 266cbcdd875STejun Heo __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args); 267cbcdd875STejun Heo va_end(args); 268cbcdd875STejun Heo } 269cbcdd875STejun Heo 270cbcdd875STejun Heo #ifdef CONFIG_PCI 271cbcdd875STejun Heo 272cbcdd875STejun Heo /** 273cbcdd875STejun Heo * ata_port_pbar_desc - append PCI BAR description 274cbcdd875STejun Heo * @ap: target ATA port 275cbcdd875STejun Heo * @bar: target PCI BAR 276cbcdd875STejun Heo * @offset: offset into PCI BAR 277cbcdd875STejun Heo * @name: name of the area 278cbcdd875STejun Heo * 279cbcdd875STejun Heo * If @offset is negative, this function formats a string which 280cbcdd875STejun Heo * contains the name, address, size and type of the BAR and 281cbcdd875STejun Heo * appends it to the port description. If @offset is zero or 282cbcdd875STejun Heo * positive, only name and offsetted address is appended. 283cbcdd875STejun Heo * 284cbcdd875STejun Heo * LOCKING: 285cbcdd875STejun Heo * None. 286cbcdd875STejun Heo */ 287cbcdd875STejun Heo void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset, 288cbcdd875STejun Heo const char *name) 289cbcdd875STejun Heo { 290cbcdd875STejun Heo struct pci_dev *pdev = to_pci_dev(ap->host->dev); 291cbcdd875STejun Heo char *type = ""; 292cbcdd875STejun Heo unsigned long long start, len; 293cbcdd875STejun Heo 294cbcdd875STejun Heo if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) 295cbcdd875STejun Heo type = "m"; 296cbcdd875STejun Heo else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) 297cbcdd875STejun Heo type = "i"; 298cbcdd875STejun Heo 299cbcdd875STejun Heo start = (unsigned long long)pci_resource_start(pdev, bar); 300cbcdd875STejun Heo len = (unsigned long long)pci_resource_len(pdev, bar); 301cbcdd875STejun Heo 302cbcdd875STejun Heo if (offset < 0) 303cbcdd875STejun Heo ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start); 304cbcdd875STejun Heo else 305e6a73ab1SAndrew Morton ata_port_desc(ap, "%s 0x%llx", name, 306e6a73ab1SAndrew Morton start + (unsigned long long)offset); 307cbcdd875STejun Heo } 308cbcdd875STejun Heo 309cbcdd875STejun Heo #endif /* CONFIG_PCI */ 310cbcdd875STejun Heo 31187fbc5a0STejun Heo static int ata_lookup_timeout_table(u8 cmd) 31287fbc5a0STejun Heo { 31387fbc5a0STejun Heo int i; 31487fbc5a0STejun Heo 31587fbc5a0STejun Heo for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) { 31687fbc5a0STejun Heo const u8 *cur; 31787fbc5a0STejun Heo 31887fbc5a0STejun Heo for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++) 31987fbc5a0STejun Heo if (*cur == cmd) 32087fbc5a0STejun Heo return i; 32187fbc5a0STejun Heo } 32287fbc5a0STejun Heo 32387fbc5a0STejun Heo return -1; 32487fbc5a0STejun Heo } 32587fbc5a0STejun Heo 32687fbc5a0STejun Heo /** 32787fbc5a0STejun Heo * ata_internal_cmd_timeout - determine timeout for an internal command 32887fbc5a0STejun Heo * @dev: target device 32987fbc5a0STejun Heo * @cmd: internal command to be issued 33087fbc5a0STejun Heo * 33187fbc5a0STejun Heo * Determine timeout for internal command @cmd for @dev. 33287fbc5a0STejun Heo * 33387fbc5a0STejun Heo * LOCKING: 33487fbc5a0STejun Heo * EH context. 33587fbc5a0STejun Heo * 33687fbc5a0STejun Heo * RETURNS: 33787fbc5a0STejun Heo * Determined timeout. 33887fbc5a0STejun Heo */ 33987fbc5a0STejun Heo unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd) 34087fbc5a0STejun Heo { 34187fbc5a0STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 34287fbc5a0STejun Heo int ent = ata_lookup_timeout_table(cmd); 34387fbc5a0STejun Heo int idx; 34487fbc5a0STejun Heo 34587fbc5a0STejun Heo if (ent < 0) 34687fbc5a0STejun Heo return ATA_EH_CMD_DFL_TIMEOUT; 34787fbc5a0STejun Heo 34887fbc5a0STejun Heo idx = ehc->cmd_timeout_idx[dev->devno][ent]; 34987fbc5a0STejun Heo return ata_eh_cmd_timeout_table[ent].timeouts[idx]; 35087fbc5a0STejun Heo } 35187fbc5a0STejun Heo 35287fbc5a0STejun Heo /** 35387fbc5a0STejun Heo * ata_internal_cmd_timed_out - notification for internal command timeout 35487fbc5a0STejun Heo * @dev: target device 35587fbc5a0STejun Heo * @cmd: internal command which timed out 35687fbc5a0STejun Heo * 35787fbc5a0STejun Heo * Notify EH that internal command @cmd for @dev timed out. This 35887fbc5a0STejun Heo * function should be called only for commands whose timeouts are 35987fbc5a0STejun Heo * determined using ata_internal_cmd_timeout(). 36087fbc5a0STejun Heo * 36187fbc5a0STejun Heo * LOCKING: 36287fbc5a0STejun Heo * EH context. 36387fbc5a0STejun Heo */ 36487fbc5a0STejun Heo void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd) 36587fbc5a0STejun Heo { 36687fbc5a0STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 36787fbc5a0STejun Heo int ent = ata_lookup_timeout_table(cmd); 36887fbc5a0STejun Heo int idx; 36987fbc5a0STejun Heo 37087fbc5a0STejun Heo if (ent < 0) 37187fbc5a0STejun Heo return; 37287fbc5a0STejun Heo 37387fbc5a0STejun Heo idx = ehc->cmd_timeout_idx[dev->devno][ent]; 37487fbc5a0STejun Heo if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX) 37587fbc5a0STejun Heo ehc->cmd_timeout_idx[dev->devno][ent]++; 37687fbc5a0STejun Heo } 37787fbc5a0STejun Heo 3783884f7b0STejun Heo static void ata_ering_record(struct ata_ering *ering, unsigned int eflags, 379c6fd2807SJeff Garzik unsigned int err_mask) 380c6fd2807SJeff Garzik { 381c6fd2807SJeff Garzik struct ata_ering_entry *ent; 382c6fd2807SJeff Garzik 383c6fd2807SJeff Garzik WARN_ON(!err_mask); 384c6fd2807SJeff Garzik 385c6fd2807SJeff Garzik ering->cursor++; 386c6fd2807SJeff Garzik ering->cursor %= ATA_ERING_SIZE; 387c6fd2807SJeff Garzik 388c6fd2807SJeff Garzik ent = &ering->ring[ering->cursor]; 3893884f7b0STejun Heo ent->eflags = eflags; 390c6fd2807SJeff Garzik ent->err_mask = err_mask; 391c6fd2807SJeff Garzik ent->timestamp = get_jiffies_64(); 392c6fd2807SJeff Garzik } 393c6fd2807SJeff Garzik 39476326ac1STejun Heo static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering) 39576326ac1STejun Heo { 39676326ac1STejun Heo struct ata_ering_entry *ent = &ering->ring[ering->cursor]; 39776326ac1STejun Heo 39876326ac1STejun Heo if (ent->err_mask) 39976326ac1STejun Heo return ent; 40076326ac1STejun Heo return NULL; 40176326ac1STejun Heo } 40276326ac1STejun Heo 403d9027470SGwendal Grignou int ata_ering_map(struct ata_ering *ering, 404c6fd2807SJeff Garzik int (*map_fn)(struct ata_ering_entry *, void *), 405c6fd2807SJeff Garzik void *arg) 406c6fd2807SJeff Garzik { 407c6fd2807SJeff Garzik int idx, rc = 0; 408c6fd2807SJeff Garzik struct ata_ering_entry *ent; 409c6fd2807SJeff Garzik 410c6fd2807SJeff Garzik idx = ering->cursor; 411c6fd2807SJeff Garzik do { 412c6fd2807SJeff Garzik ent = &ering->ring[idx]; 413c6fd2807SJeff Garzik if (!ent->err_mask) 414c6fd2807SJeff Garzik break; 415c6fd2807SJeff Garzik rc = map_fn(ent, arg); 416c6fd2807SJeff Garzik if (rc) 417c6fd2807SJeff Garzik break; 418c6fd2807SJeff Garzik idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE; 419c6fd2807SJeff Garzik } while (idx != ering->cursor); 420c6fd2807SJeff Garzik 421c6fd2807SJeff Garzik return rc; 422c6fd2807SJeff Garzik } 423c6fd2807SJeff Garzik 42460428407SH Hartley Sweeten static int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg) 425d9027470SGwendal Grignou { 426d9027470SGwendal Grignou ent->eflags |= ATA_EFLAG_OLD_ER; 427d9027470SGwendal Grignou return 0; 428d9027470SGwendal Grignou } 429d9027470SGwendal Grignou 430d9027470SGwendal Grignou static void ata_ering_clear(struct ata_ering *ering) 431d9027470SGwendal Grignou { 432d9027470SGwendal Grignou ata_ering_map(ering, ata_ering_clear_cb, NULL); 433d9027470SGwendal Grignou } 434d9027470SGwendal Grignou 435c6fd2807SJeff Garzik static unsigned int ata_eh_dev_action(struct ata_device *dev) 436c6fd2807SJeff Garzik { 4379af5c9c9STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 438c6fd2807SJeff Garzik 439c6fd2807SJeff Garzik return ehc->i.action | ehc->i.dev_action[dev->devno]; 440c6fd2807SJeff Garzik } 441c6fd2807SJeff Garzik 442f58229f8STejun Heo static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev, 443c6fd2807SJeff Garzik struct ata_eh_info *ehi, unsigned int action) 444c6fd2807SJeff Garzik { 445f58229f8STejun Heo struct ata_device *tdev; 446c6fd2807SJeff Garzik 447c6fd2807SJeff Garzik if (!dev) { 448c6fd2807SJeff Garzik ehi->action &= ~action; 4491eca4365STejun Heo ata_for_each_dev(tdev, link, ALL) 450f58229f8STejun Heo ehi->dev_action[tdev->devno] &= ~action; 451c6fd2807SJeff Garzik } else { 452c6fd2807SJeff Garzik /* doesn't make sense for port-wide EH actions */ 453c6fd2807SJeff Garzik WARN_ON(!(action & ATA_EH_PERDEV_MASK)); 454c6fd2807SJeff Garzik 455c6fd2807SJeff Garzik /* break ehi->action into ehi->dev_action */ 456c6fd2807SJeff Garzik if (ehi->action & action) { 4571eca4365STejun Heo ata_for_each_dev(tdev, link, ALL) 458f58229f8STejun Heo ehi->dev_action[tdev->devno] |= 459f58229f8STejun Heo ehi->action & action; 460c6fd2807SJeff Garzik ehi->action &= ~action; 461c6fd2807SJeff Garzik } 462c6fd2807SJeff Garzik 463c6fd2807SJeff Garzik /* turn off the specified per-dev action */ 464c6fd2807SJeff Garzik ehi->dev_action[dev->devno] &= ~action; 465c6fd2807SJeff Garzik } 466c6fd2807SJeff Garzik } 467c6fd2807SJeff Garzik 468c6fd2807SJeff Garzik /** 469c0c362b6STejun Heo * ata_eh_acquire - acquire EH ownership 470c0c362b6STejun Heo * @ap: ATA port to acquire EH ownership for 471c0c362b6STejun Heo * 472c0c362b6STejun Heo * Acquire EH ownership for @ap. This is the basic exclusion 473c0c362b6STejun Heo * mechanism for ports sharing a host. Only one port hanging off 474c0c362b6STejun Heo * the same host can claim the ownership of EH. 475c0c362b6STejun Heo * 476c0c362b6STejun Heo * LOCKING: 477c0c362b6STejun Heo * EH context. 478c0c362b6STejun Heo */ 479c0c362b6STejun Heo void ata_eh_acquire(struct ata_port *ap) 480c0c362b6STejun Heo { 481c0c362b6STejun Heo mutex_lock(&ap->host->eh_mutex); 482c0c362b6STejun Heo WARN_ON_ONCE(ap->host->eh_owner); 483c0c362b6STejun Heo ap->host->eh_owner = current; 484c0c362b6STejun Heo } 485c0c362b6STejun Heo 486c0c362b6STejun Heo /** 487c0c362b6STejun Heo * ata_eh_release - release EH ownership 488c0c362b6STejun Heo * @ap: ATA port to release EH ownership for 489c0c362b6STejun Heo * 490c0c362b6STejun Heo * Release EH ownership for @ap if the caller. The caller must 491c0c362b6STejun Heo * have acquired EH ownership using ata_eh_acquire() previously. 492c0c362b6STejun Heo * 493c0c362b6STejun Heo * LOCKING: 494c0c362b6STejun Heo * EH context. 495c0c362b6STejun Heo */ 496c0c362b6STejun Heo void ata_eh_release(struct ata_port *ap) 497c0c362b6STejun Heo { 498c0c362b6STejun Heo WARN_ON_ONCE(ap->host->eh_owner != current); 499c0c362b6STejun Heo ap->host->eh_owner = NULL; 500c0c362b6STejun Heo mutex_unlock(&ap->host->eh_mutex); 501c0c362b6STejun Heo } 502c0c362b6STejun Heo 503c0c362b6STejun Heo /** 504c6fd2807SJeff Garzik * ata_scsi_timed_out - SCSI layer time out callback 505c6fd2807SJeff Garzik * @cmd: timed out SCSI command 506c6fd2807SJeff Garzik * 507c6fd2807SJeff Garzik * Handles SCSI layer timeout. We race with normal completion of 508c6fd2807SJeff Garzik * the qc for @cmd. If the qc is already gone, we lose and let 509c6fd2807SJeff Garzik * the scsi command finish (EH_HANDLED). Otherwise, the qc has 510c6fd2807SJeff Garzik * timed out and EH should be invoked. Prevent ata_qc_complete() 511c6fd2807SJeff Garzik * from finishing it by setting EH_SCHEDULED and return 512c6fd2807SJeff Garzik * EH_NOT_HANDLED. 513c6fd2807SJeff Garzik * 514c6fd2807SJeff Garzik * TODO: kill this function once old EH is gone. 515c6fd2807SJeff Garzik * 516c6fd2807SJeff Garzik * LOCKING: 517c6fd2807SJeff Garzik * Called from timer context 518c6fd2807SJeff Garzik * 519c6fd2807SJeff Garzik * RETURNS: 520c6fd2807SJeff Garzik * EH_HANDLED or EH_NOT_HANDLED 521c6fd2807SJeff Garzik */ 522242f9dcbSJens Axboe enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) 523c6fd2807SJeff Garzik { 524c6fd2807SJeff Garzik struct Scsi_Host *host = cmd->device->host; 525c6fd2807SJeff Garzik struct ata_port *ap = ata_shost_to_port(host); 526c6fd2807SJeff Garzik unsigned long flags; 527c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 528242f9dcbSJens Axboe enum blk_eh_timer_return ret; 529c6fd2807SJeff Garzik 530c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 531c6fd2807SJeff Garzik 532c6fd2807SJeff Garzik if (ap->ops->error_handler) { 533242f9dcbSJens Axboe ret = BLK_EH_NOT_HANDLED; 534c6fd2807SJeff Garzik goto out; 535c6fd2807SJeff Garzik } 536c6fd2807SJeff Garzik 537242f9dcbSJens Axboe ret = BLK_EH_HANDLED; 538c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 5399af5c9c9STejun Heo qc = ata_qc_from_tag(ap, ap->link.active_tag); 540c6fd2807SJeff Garzik if (qc) { 541c6fd2807SJeff Garzik WARN_ON(qc->scsicmd != cmd); 542c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_EH_SCHEDULED; 543c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_TIMEOUT; 544242f9dcbSJens Axboe ret = BLK_EH_NOT_HANDLED; 545c6fd2807SJeff Garzik } 546c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 547c6fd2807SJeff Garzik 548c6fd2807SJeff Garzik out: 549c6fd2807SJeff Garzik DPRINTK("EXIT, ret=%d\n", ret); 550c6fd2807SJeff Garzik return ret; 551c6fd2807SJeff Garzik } 552b6a05c82SChristoph Hellwig EXPORT_SYMBOL(ata_scsi_timed_out); 553c6fd2807SJeff Garzik 554ece180d1STejun Heo static void ata_eh_unload(struct ata_port *ap) 555ece180d1STejun Heo { 556ece180d1STejun Heo struct ata_link *link; 557ece180d1STejun Heo struct ata_device *dev; 558ece180d1STejun Heo unsigned long flags; 559ece180d1STejun Heo 560ece180d1STejun Heo /* Restore SControl IPM and SPD for the next driver and 561ece180d1STejun Heo * disable attached devices. 562ece180d1STejun Heo */ 563ece180d1STejun Heo ata_for_each_link(link, ap, PMP_FIRST) { 564ece180d1STejun Heo sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0); 565ece180d1STejun Heo ata_for_each_dev(dev, link, ALL) 566ece180d1STejun Heo ata_dev_disable(dev); 567ece180d1STejun Heo } 568ece180d1STejun Heo 569ece180d1STejun Heo /* freeze and set UNLOADED */ 570ece180d1STejun Heo spin_lock_irqsave(ap->lock, flags); 571ece180d1STejun Heo 572ece180d1STejun Heo ata_port_freeze(ap); /* won't be thawed */ 573ece180d1STejun Heo ap->pflags &= ~ATA_PFLAG_EH_PENDING; /* clear pending from freeze */ 574ece180d1STejun Heo ap->pflags |= ATA_PFLAG_UNLOADED; 575ece180d1STejun Heo 576ece180d1STejun Heo spin_unlock_irqrestore(ap->lock, flags); 577ece180d1STejun Heo } 578ece180d1STejun Heo 579c6fd2807SJeff Garzik /** 580c6fd2807SJeff Garzik * ata_scsi_error - SCSI layer error handler callback 581c6fd2807SJeff Garzik * @host: SCSI host on which error occurred 582c6fd2807SJeff Garzik * 583c6fd2807SJeff Garzik * Handles SCSI-layer-thrown error events. 584c6fd2807SJeff Garzik * 585c6fd2807SJeff Garzik * LOCKING: 586c6fd2807SJeff Garzik * Inherited from SCSI layer (none, can sleep) 587c6fd2807SJeff Garzik * 588c6fd2807SJeff Garzik * RETURNS: 589c6fd2807SJeff Garzik * Zero. 590c6fd2807SJeff Garzik */ 591c6fd2807SJeff Garzik void ata_scsi_error(struct Scsi_Host *host) 592c6fd2807SJeff Garzik { 593c6fd2807SJeff Garzik struct ata_port *ap = ata_shost_to_port(host); 594c6fd2807SJeff Garzik unsigned long flags; 595c34aeebcSJames Bottomley LIST_HEAD(eh_work_q); 596c6fd2807SJeff Garzik 597c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 598c6fd2807SJeff Garzik 599c34aeebcSJames Bottomley spin_lock_irqsave(host->host_lock, flags); 600c34aeebcSJames Bottomley list_splice_init(&host->eh_cmd_q, &eh_work_q); 601c34aeebcSJames Bottomley spin_unlock_irqrestore(host->host_lock, flags); 602c34aeebcSJames Bottomley 6030e0b494cSJames Bottomley ata_scsi_cmd_error_handler(host, ap, &eh_work_q); 6040e0b494cSJames Bottomley 6050e0b494cSJames Bottomley /* If we timed raced normal completion and there is nothing to 6060e0b494cSJames Bottomley recover nr_timedout == 0 why exactly are we doing error recovery ? */ 6070e0b494cSJames Bottomley ata_scsi_port_error_handler(host, ap); 6080e0b494cSJames Bottomley 6090e0b494cSJames Bottomley /* finish or retry handled scmd's and clean up */ 61072d8c36eSWei Fang WARN_ON(!list_empty(&eh_work_q)); 6110e0b494cSJames Bottomley 6120e0b494cSJames Bottomley DPRINTK("EXIT\n"); 6130e0b494cSJames Bottomley } 6140e0b494cSJames Bottomley 6150e0b494cSJames Bottomley /** 6160e0b494cSJames Bottomley * ata_scsi_cmd_error_handler - error callback for a list of commands 6170e0b494cSJames Bottomley * @host: scsi host containing the port 6180e0b494cSJames Bottomley * @ap: ATA port within the host 6190e0b494cSJames Bottomley * @eh_work_q: list of commands to process 6200e0b494cSJames Bottomley * 6210e0b494cSJames Bottomley * process the given list of commands and return those finished to the 6220e0b494cSJames Bottomley * ap->eh_done_q. This function is the first part of the libata error 6230e0b494cSJames Bottomley * handler which processes a given list of failed commands. 6240e0b494cSJames Bottomley */ 6250e0b494cSJames Bottomley void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, 6260e0b494cSJames Bottomley struct list_head *eh_work_q) 6270e0b494cSJames Bottomley { 6280e0b494cSJames Bottomley int i; 6290e0b494cSJames Bottomley unsigned long flags; 6300e0b494cSJames Bottomley 631c429137aSTejun Heo /* make sure sff pio task is not running */ 632c429137aSTejun Heo ata_sff_flush_pio_task(ap); 633c6fd2807SJeff Garzik 634cca3974eSJeff Garzik /* synchronize with host lock and sort out timeouts */ 635c6fd2807SJeff Garzik 636c6fd2807SJeff Garzik /* For new EH, all qcs are finished in one of three ways - 637c6fd2807SJeff Garzik * normal completion, error completion, and SCSI timeout. 638c96f1732SAlan Cox * Both completions can race against SCSI timeout. When normal 639c6fd2807SJeff Garzik * completion wins, the qc never reaches EH. When error 640c6fd2807SJeff Garzik * completion wins, the qc has ATA_QCFLAG_FAILED set. 641c6fd2807SJeff Garzik * 642c6fd2807SJeff Garzik * When SCSI timeout wins, things are a bit more complex. 643c6fd2807SJeff Garzik * Normal or error completion can occur after the timeout but 644c6fd2807SJeff Garzik * before this point. In such cases, both types of 645c6fd2807SJeff Garzik * completions are honored. A scmd is determined to have 646c6fd2807SJeff Garzik * timed out iff its associated qc is active and not failed. 647c6fd2807SJeff Garzik */ 648a4f08141SPaul E. McKenney spin_lock_irqsave(ap->lock, flags); 649c6fd2807SJeff Garzik if (ap->ops->error_handler) { 650c6fd2807SJeff Garzik struct scsi_cmnd *scmd, *tmp; 651c6fd2807SJeff Garzik int nr_timedout = 0; 652c6fd2807SJeff Garzik 653c96f1732SAlan Cox /* This must occur under the ap->lock as we don't want 654c96f1732SAlan Cox a polled recovery to race the real interrupt handler 655c96f1732SAlan Cox 656c96f1732SAlan Cox The lost_interrupt handler checks for any completed but 657c96f1732SAlan Cox non-notified command and completes much like an IRQ handler. 658c96f1732SAlan Cox 659c96f1732SAlan Cox We then fall into the error recovery code which will treat 660c96f1732SAlan Cox this as if normal completion won the race */ 661c96f1732SAlan Cox 662c96f1732SAlan Cox if (ap->ops->lost_interrupt) 663c96f1732SAlan Cox ap->ops->lost_interrupt(ap); 664c96f1732SAlan Cox 6650e0b494cSJames Bottomley list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) { 666c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 667c6fd2807SJeff Garzik 668c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_QUEUE; i++) { 669c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, i); 670c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_ACTIVE && 671c6fd2807SJeff Garzik qc->scsicmd == scmd) 672c6fd2807SJeff Garzik break; 673c6fd2807SJeff Garzik } 674c6fd2807SJeff Garzik 675c6fd2807SJeff Garzik if (i < ATA_MAX_QUEUE) { 676c6fd2807SJeff Garzik /* the scmd has an associated qc */ 677c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) { 678c6fd2807SJeff Garzik /* which hasn't failed yet, timeout */ 679c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_TIMEOUT; 680c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 681c6fd2807SJeff Garzik nr_timedout++; 682c6fd2807SJeff Garzik } 683c6fd2807SJeff Garzik } else { 684c6fd2807SJeff Garzik /* Normal completion occurred after 685c6fd2807SJeff Garzik * SCSI timeout but before this point. 686c6fd2807SJeff Garzik * Successfully complete it. 687c6fd2807SJeff Garzik */ 688c6fd2807SJeff Garzik scmd->retries = scmd->allowed; 689c6fd2807SJeff Garzik scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 690c6fd2807SJeff Garzik } 691c6fd2807SJeff Garzik } 692c6fd2807SJeff Garzik 693c6fd2807SJeff Garzik /* If we have timed out qcs. They belong to EH from 694c6fd2807SJeff Garzik * this point but the state of the controller is 695c6fd2807SJeff Garzik * unknown. Freeze the port to make sure the IRQ 696c6fd2807SJeff Garzik * handler doesn't diddle with those qcs. This must 697c6fd2807SJeff Garzik * be done atomically w.r.t. setting QCFLAG_FAILED. 698c6fd2807SJeff Garzik */ 699c6fd2807SJeff Garzik if (nr_timedout) 700c6fd2807SJeff Garzik __ata_port_freeze(ap); 701c6fd2807SJeff Garzik 702a1e10f7eSTejun Heo 703a1e10f7eSTejun Heo /* initialize eh_tries */ 704a1e10f7eSTejun Heo ap->eh_tries = ATA_EH_MAX_TRIES; 705a4f08141SPaul E. McKenney } 706a4f08141SPaul E. McKenney spin_unlock_irqrestore(ap->lock, flags); 707c6fd2807SJeff Garzik 7080e0b494cSJames Bottomley } 7090e0b494cSJames Bottomley EXPORT_SYMBOL(ata_scsi_cmd_error_handler); 7100e0b494cSJames Bottomley 7110e0b494cSJames Bottomley /** 7120e0b494cSJames Bottomley * ata_scsi_port_error_handler - recover the port after the commands 7130e0b494cSJames Bottomley * @host: SCSI host containing the port 7140e0b494cSJames Bottomley * @ap: the ATA port 7150e0b494cSJames Bottomley * 7160e0b494cSJames Bottomley * Handle the recovery of the port @ap after all the commands 7170e0b494cSJames Bottomley * have been recovered. 7180e0b494cSJames Bottomley */ 7190e0b494cSJames Bottomley void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap) 7200e0b494cSJames Bottomley { 7210e0b494cSJames Bottomley unsigned long flags; 722c96f1732SAlan Cox 723c6fd2807SJeff Garzik /* invoke error handler */ 724c6fd2807SJeff Garzik if (ap->ops->error_handler) { 725cf1b86c8STejun Heo struct ata_link *link; 726cf1b86c8STejun Heo 727c0c362b6STejun Heo /* acquire EH ownership */ 728c0c362b6STejun Heo ata_eh_acquire(ap); 729c0c362b6STejun Heo repeat: 7305ddf24c5STejun Heo /* kill fast drain timer */ 7315ddf24c5STejun Heo del_timer_sync(&ap->fastdrain_timer); 7325ddf24c5STejun Heo 733c6fd2807SJeff Garzik /* process port resume request */ 734c6fd2807SJeff Garzik ata_eh_handle_port_resume(ap); 735c6fd2807SJeff Garzik 736c6fd2807SJeff Garzik /* fetch & clear EH info */ 737c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 738c6fd2807SJeff Garzik 7391eca4365STejun Heo ata_for_each_link(link, ap, HOST_FIRST) { 74000115e0fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 74100115e0fSTejun Heo struct ata_device *dev; 74200115e0fSTejun Heo 743cf1b86c8STejun Heo memset(&link->eh_context, 0, sizeof(link->eh_context)); 744cf1b86c8STejun Heo link->eh_context.i = link->eh_info; 745cf1b86c8STejun Heo memset(&link->eh_info, 0, sizeof(link->eh_info)); 74600115e0fSTejun Heo 7471eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) { 74800115e0fSTejun Heo int devno = dev->devno; 74900115e0fSTejun Heo 75000115e0fSTejun Heo ehc->saved_xfer_mode[devno] = dev->xfer_mode; 75100115e0fSTejun Heo if (ata_ncq_enabled(dev)) 75200115e0fSTejun Heo ehc->saved_ncq_enabled |= 1 << devno; 75300115e0fSTejun Heo } 754cf1b86c8STejun Heo } 755c6fd2807SJeff Garzik 756c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS; 757c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_EH_PENDING; 758da917d69STejun Heo ap->excl_link = NULL; /* don't maintain exclusion over EH */ 759c6fd2807SJeff Garzik 760c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 761c6fd2807SJeff Garzik 762c6fd2807SJeff Garzik /* invoke EH, skip if unloading or suspended */ 763c6fd2807SJeff Garzik if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED))) 764c6fd2807SJeff Garzik ap->ops->error_handler(ap); 765ece180d1STejun Heo else { 766ece180d1STejun Heo /* if unloading, commence suicide */ 767ece180d1STejun Heo if ((ap->pflags & ATA_PFLAG_UNLOADING) && 768ece180d1STejun Heo !(ap->pflags & ATA_PFLAG_UNLOADED)) 769ece180d1STejun Heo ata_eh_unload(ap); 770c6fd2807SJeff Garzik ata_eh_finish(ap); 771ece180d1STejun Heo } 772c6fd2807SJeff Garzik 773c6fd2807SJeff Garzik /* process port suspend request */ 774c6fd2807SJeff Garzik ata_eh_handle_port_suspend(ap); 775c6fd2807SJeff Garzik 77625985edcSLucas De Marchi /* Exception might have happened after ->error_handler 777c6fd2807SJeff Garzik * recovered the port but before this point. Repeat 778c6fd2807SJeff Garzik * EH in such case. 779c6fd2807SJeff Garzik */ 780c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 781c6fd2807SJeff Garzik 782c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_EH_PENDING) { 783a1e10f7eSTejun Heo if (--ap->eh_tries) { 784c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 785c6fd2807SJeff Garzik goto repeat; 786c6fd2807SJeff Garzik } 787a9a79dfeSJoe Perches ata_port_err(ap, 788a9a79dfeSJoe Perches "EH pending after %d tries, giving up\n", 789a9a79dfeSJoe Perches ATA_EH_MAX_TRIES); 790914616a3STejun Heo ap->pflags &= ~ATA_PFLAG_EH_PENDING; 791c6fd2807SJeff Garzik } 792c6fd2807SJeff Garzik 793c6fd2807SJeff Garzik /* this run is complete, make sure EH info is clear */ 7941eca4365STejun Heo ata_for_each_link(link, ap, HOST_FIRST) 795cf1b86c8STejun Heo memset(&link->eh_info, 0, sizeof(link->eh_info)); 796c6fd2807SJeff Garzik 797e4a9c373SDan Williams /* end eh (clear host_eh_scheduled) while holding 798e4a9c373SDan Williams * ap->lock such that if exception occurs after this 799e4a9c373SDan Williams * point but before EH completion, SCSI midlayer will 800c6fd2807SJeff Garzik * re-initiate EH. 801c6fd2807SJeff Garzik */ 802e4a9c373SDan Williams ap->ops->end_eh(ap); 803c6fd2807SJeff Garzik 804c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 805c0c362b6STejun Heo ata_eh_release(ap); 806c6fd2807SJeff Garzik } else { 8079af5c9c9STejun Heo WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL); 808c6fd2807SJeff Garzik ap->ops->eng_timeout(ap); 809c6fd2807SJeff Garzik } 810c6fd2807SJeff Garzik 811c6fd2807SJeff Garzik scsi_eh_flush_done_q(&ap->eh_done_q); 812c6fd2807SJeff Garzik 813c6fd2807SJeff Garzik /* clean up */ 814c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 815c6fd2807SJeff Garzik 816c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_LOADING) 817c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_LOADING; 818c6fd2807SJeff Garzik else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) 819ad72cf98STejun Heo schedule_delayed_work(&ap->hotplug_task, 0); 820c6fd2807SJeff Garzik 821c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_RECOVERED) 822a9a79dfeSJoe Perches ata_port_info(ap, "EH complete\n"); 823c6fd2807SJeff Garzik 824c6fd2807SJeff Garzik ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED); 825c6fd2807SJeff Garzik 826c6fd2807SJeff Garzik /* tell wait_eh that we're done */ 827c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS; 828c6fd2807SJeff Garzik wake_up_all(&ap->eh_wait_q); 829c6fd2807SJeff Garzik 830c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 831c6fd2807SJeff Garzik } 8320e0b494cSJames Bottomley EXPORT_SYMBOL_GPL(ata_scsi_port_error_handler); 833c6fd2807SJeff Garzik 834c6fd2807SJeff Garzik /** 835c6fd2807SJeff Garzik * ata_port_wait_eh - Wait for the currently pending EH to complete 836c6fd2807SJeff Garzik * @ap: Port to wait EH for 837c6fd2807SJeff Garzik * 838c6fd2807SJeff Garzik * Wait until the currently pending EH is complete. 839c6fd2807SJeff Garzik * 840c6fd2807SJeff Garzik * LOCKING: 841c6fd2807SJeff Garzik * Kernel thread context (may sleep). 842c6fd2807SJeff Garzik */ 843c6fd2807SJeff Garzik void ata_port_wait_eh(struct ata_port *ap) 844c6fd2807SJeff Garzik { 845c6fd2807SJeff Garzik unsigned long flags; 846c6fd2807SJeff Garzik DEFINE_WAIT(wait); 847c6fd2807SJeff Garzik 848c6fd2807SJeff Garzik retry: 849c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 850c6fd2807SJeff Garzik 851c6fd2807SJeff Garzik while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) { 852c6fd2807SJeff Garzik prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE); 853c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 854c6fd2807SJeff Garzik schedule(); 855c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 856c6fd2807SJeff Garzik } 857c6fd2807SJeff Garzik finish_wait(&ap->eh_wait_q, &wait); 858c6fd2807SJeff Garzik 859c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 860c6fd2807SJeff Garzik 861c6fd2807SJeff Garzik /* make sure SCSI EH is complete */ 862cca3974eSJeff Garzik if (scsi_host_in_recovery(ap->scsi_host)) { 86397750cebSTejun Heo ata_msleep(ap, 10); 864c6fd2807SJeff Garzik goto retry; 865c6fd2807SJeff Garzik } 866c6fd2807SJeff Garzik } 86781c757bcSDan Williams EXPORT_SYMBOL_GPL(ata_port_wait_eh); 868c6fd2807SJeff Garzik 8695ddf24c5STejun Heo static int ata_eh_nr_in_flight(struct ata_port *ap) 8705ddf24c5STejun Heo { 8715ddf24c5STejun Heo unsigned int tag; 8725ddf24c5STejun Heo int nr = 0; 8735ddf24c5STejun Heo 8745ddf24c5STejun Heo /* count only non-internal commands */ 8755ddf24c5STejun Heo for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) 8765ddf24c5STejun Heo if (ata_qc_from_tag(ap, tag)) 8775ddf24c5STejun Heo nr++; 8785ddf24c5STejun Heo 8795ddf24c5STejun Heo return nr; 8805ddf24c5STejun Heo } 8815ddf24c5STejun Heo 8825ddf24c5STejun Heo void ata_eh_fastdrain_timerfn(unsigned long arg) 8835ddf24c5STejun Heo { 8845ddf24c5STejun Heo struct ata_port *ap = (void *)arg; 8855ddf24c5STejun Heo unsigned long flags; 8865ddf24c5STejun Heo int cnt; 8875ddf24c5STejun Heo 8885ddf24c5STejun Heo spin_lock_irqsave(ap->lock, flags); 8895ddf24c5STejun Heo 8905ddf24c5STejun Heo cnt = ata_eh_nr_in_flight(ap); 8915ddf24c5STejun Heo 8925ddf24c5STejun Heo /* are we done? */ 8935ddf24c5STejun Heo if (!cnt) 8945ddf24c5STejun Heo goto out_unlock; 8955ddf24c5STejun Heo 8965ddf24c5STejun Heo if (cnt == ap->fastdrain_cnt) { 8975ddf24c5STejun Heo unsigned int tag; 8985ddf24c5STejun Heo 8995ddf24c5STejun Heo /* No progress during the last interval, tag all 9005ddf24c5STejun Heo * in-flight qcs as timed out and freeze the port. 9015ddf24c5STejun Heo */ 9025ddf24c5STejun Heo for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) { 9035ddf24c5STejun Heo struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); 9045ddf24c5STejun Heo if (qc) 9055ddf24c5STejun Heo qc->err_mask |= AC_ERR_TIMEOUT; 9065ddf24c5STejun Heo } 9075ddf24c5STejun Heo 9085ddf24c5STejun Heo ata_port_freeze(ap); 9095ddf24c5STejun Heo } else { 9105ddf24c5STejun Heo /* some qcs have finished, give it another chance */ 9115ddf24c5STejun Heo ap->fastdrain_cnt = cnt; 9125ddf24c5STejun Heo ap->fastdrain_timer.expires = 913341c2c95STejun Heo ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); 9145ddf24c5STejun Heo add_timer(&ap->fastdrain_timer); 9155ddf24c5STejun Heo } 9165ddf24c5STejun Heo 9175ddf24c5STejun Heo out_unlock: 9185ddf24c5STejun Heo spin_unlock_irqrestore(ap->lock, flags); 9195ddf24c5STejun Heo } 9205ddf24c5STejun Heo 9215ddf24c5STejun Heo /** 9225ddf24c5STejun Heo * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain 9235ddf24c5STejun Heo * @ap: target ATA port 9245ddf24c5STejun Heo * @fastdrain: activate fast drain 9255ddf24c5STejun Heo * 9265ddf24c5STejun Heo * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain 9275ddf24c5STejun Heo * is non-zero and EH wasn't pending before. Fast drain ensures 9285ddf24c5STejun Heo * that EH kicks in in timely manner. 9295ddf24c5STejun Heo * 9305ddf24c5STejun Heo * LOCKING: 9315ddf24c5STejun Heo * spin_lock_irqsave(host lock) 9325ddf24c5STejun Heo */ 9335ddf24c5STejun Heo static void ata_eh_set_pending(struct ata_port *ap, int fastdrain) 9345ddf24c5STejun Heo { 9355ddf24c5STejun Heo int cnt; 9365ddf24c5STejun Heo 9375ddf24c5STejun Heo /* already scheduled? */ 9385ddf24c5STejun Heo if (ap->pflags & ATA_PFLAG_EH_PENDING) 9395ddf24c5STejun Heo return; 9405ddf24c5STejun Heo 9415ddf24c5STejun Heo ap->pflags |= ATA_PFLAG_EH_PENDING; 9425ddf24c5STejun Heo 9435ddf24c5STejun Heo if (!fastdrain) 9445ddf24c5STejun Heo return; 9455ddf24c5STejun Heo 9465ddf24c5STejun Heo /* do we have in-flight qcs? */ 9475ddf24c5STejun Heo cnt = ata_eh_nr_in_flight(ap); 9485ddf24c5STejun Heo if (!cnt) 9495ddf24c5STejun Heo return; 9505ddf24c5STejun Heo 9515ddf24c5STejun Heo /* activate fast drain */ 9525ddf24c5STejun Heo ap->fastdrain_cnt = cnt; 953341c2c95STejun Heo ap->fastdrain_timer.expires = 954341c2c95STejun Heo ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); 9555ddf24c5STejun Heo add_timer(&ap->fastdrain_timer); 9565ddf24c5STejun Heo } 9575ddf24c5STejun Heo 958c6fd2807SJeff Garzik /** 959c6fd2807SJeff Garzik * ata_qc_schedule_eh - schedule qc for error handling 960c6fd2807SJeff Garzik * @qc: command to schedule error handling for 961c6fd2807SJeff Garzik * 962c6fd2807SJeff Garzik * Schedule error handling for @qc. EH will kick in as soon as 963c6fd2807SJeff Garzik * other commands are drained. 964c6fd2807SJeff Garzik * 965c6fd2807SJeff Garzik * LOCKING: 966cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 967c6fd2807SJeff Garzik */ 968c6fd2807SJeff Garzik void ata_qc_schedule_eh(struct ata_queued_cmd *qc) 969c6fd2807SJeff Garzik { 970c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 971fa41efdaSTejun Heo struct request_queue *q = qc->scsicmd->device->request_queue; 972fa41efdaSTejun Heo unsigned long flags; 973c6fd2807SJeff Garzik 974c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 975c6fd2807SJeff Garzik 976c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 9775ddf24c5STejun Heo ata_eh_set_pending(ap, 1); 978c6fd2807SJeff Garzik 979c6fd2807SJeff Garzik /* The following will fail if timeout has already expired. 980c6fd2807SJeff Garzik * ata_scsi_error() takes care of such scmds on EH entry. 981c6fd2807SJeff Garzik * Note that ATA_QCFLAG_FAILED is unconditionally set after 982c6fd2807SJeff Garzik * this function completes. 983c6fd2807SJeff Garzik */ 984fa41efdaSTejun Heo spin_lock_irqsave(q->queue_lock, flags); 985242f9dcbSJens Axboe blk_abort_request(qc->scsicmd->request); 986fa41efdaSTejun Heo spin_unlock_irqrestore(q->queue_lock, flags); 987c6fd2807SJeff Garzik } 988c6fd2807SJeff Garzik 989c6fd2807SJeff Garzik /** 990e4a9c373SDan Williams * ata_std_sched_eh - non-libsas ata_ports issue eh with this common routine 991e4a9c373SDan Williams * @ap: ATA port to schedule EH for 992e4a9c373SDan Williams * 993e4a9c373SDan Williams * LOCKING: inherited from ata_port_schedule_eh 994e4a9c373SDan Williams * spin_lock_irqsave(host lock) 995e4a9c373SDan Williams */ 996e4a9c373SDan Williams void ata_std_sched_eh(struct ata_port *ap) 997e4a9c373SDan Williams { 998e4a9c373SDan Williams WARN_ON(!ap->ops->error_handler); 999e4a9c373SDan Williams 1000e4a9c373SDan Williams if (ap->pflags & ATA_PFLAG_INITIALIZING) 1001e4a9c373SDan Williams return; 1002e4a9c373SDan Williams 1003e4a9c373SDan Williams ata_eh_set_pending(ap, 1); 1004e4a9c373SDan Williams scsi_schedule_eh(ap->scsi_host); 1005e4a9c373SDan Williams 1006e4a9c373SDan Williams DPRINTK("port EH scheduled\n"); 1007e4a9c373SDan Williams } 1008e4a9c373SDan Williams EXPORT_SYMBOL_GPL(ata_std_sched_eh); 1009e4a9c373SDan Williams 1010e4a9c373SDan Williams /** 1011e4a9c373SDan Williams * ata_std_end_eh - non-libsas ata_ports complete eh with this common routine 1012e4a9c373SDan Williams * @ap: ATA port to end EH for 1013e4a9c373SDan Williams * 1014e4a9c373SDan Williams * In the libata object model there is a 1:1 mapping of ata_port to 1015e4a9c373SDan Williams * shost, so host fields can be directly manipulated under ap->lock, in 1016e4a9c373SDan Williams * the libsas case we need to hold a lock at the ha->level to coordinate 1017e4a9c373SDan Williams * these events. 1018e4a9c373SDan Williams * 1019e4a9c373SDan Williams * LOCKING: 1020e4a9c373SDan Williams * spin_lock_irqsave(host lock) 1021e4a9c373SDan Williams */ 1022e4a9c373SDan Williams void ata_std_end_eh(struct ata_port *ap) 1023e4a9c373SDan Williams { 1024e4a9c373SDan Williams struct Scsi_Host *host = ap->scsi_host; 1025e4a9c373SDan Williams 1026e4a9c373SDan Williams host->host_eh_scheduled = 0; 1027e4a9c373SDan Williams } 1028e4a9c373SDan Williams EXPORT_SYMBOL(ata_std_end_eh); 1029e4a9c373SDan Williams 1030e4a9c373SDan Williams 1031e4a9c373SDan Williams /** 1032c6fd2807SJeff Garzik * ata_port_schedule_eh - schedule error handling without a qc 1033c6fd2807SJeff Garzik * @ap: ATA port to schedule EH for 1034c6fd2807SJeff Garzik * 1035c6fd2807SJeff Garzik * Schedule error handling for @ap. EH will kick in as soon as 1036c6fd2807SJeff Garzik * all commands are drained. 1037c6fd2807SJeff Garzik * 1038c6fd2807SJeff Garzik * LOCKING: 1039cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 1040c6fd2807SJeff Garzik */ 1041c6fd2807SJeff Garzik void ata_port_schedule_eh(struct ata_port *ap) 1042c6fd2807SJeff Garzik { 1043e4a9c373SDan Williams /* see: ata_std_sched_eh, unless you know better */ 1044e4a9c373SDan Williams ap->ops->sched_eh(ap); 1045c6fd2807SJeff Garzik } 1046c6fd2807SJeff Garzik 1047dbd82616STejun Heo static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link) 1048c6fd2807SJeff Garzik { 1049c6fd2807SJeff Garzik int tag, nr_aborted = 0; 1050c6fd2807SJeff Garzik 1051c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 1052c6fd2807SJeff Garzik 10535ddf24c5STejun Heo /* we're gonna abort all commands, no need for fast drain */ 10545ddf24c5STejun Heo ata_eh_set_pending(ap, 0); 10555ddf24c5STejun Heo 1056c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1057c6fd2807SJeff Garzik struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); 1058c6fd2807SJeff Garzik 1059dbd82616STejun Heo if (qc && (!link || qc->dev->link == link)) { 1060c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 1061c6fd2807SJeff Garzik ata_qc_complete(qc); 1062c6fd2807SJeff Garzik nr_aborted++; 1063c6fd2807SJeff Garzik } 1064c6fd2807SJeff Garzik } 1065c6fd2807SJeff Garzik 1066c6fd2807SJeff Garzik if (!nr_aborted) 1067c6fd2807SJeff Garzik ata_port_schedule_eh(ap); 1068c6fd2807SJeff Garzik 1069c6fd2807SJeff Garzik return nr_aborted; 1070c6fd2807SJeff Garzik } 1071c6fd2807SJeff Garzik 1072c6fd2807SJeff Garzik /** 1073dbd82616STejun Heo * ata_link_abort - abort all qc's on the link 1074dbd82616STejun Heo * @link: ATA link to abort qc's for 1075dbd82616STejun Heo * 1076dbd82616STejun Heo * Abort all active qc's active on @link and schedule EH. 1077dbd82616STejun Heo * 1078dbd82616STejun Heo * LOCKING: 1079dbd82616STejun Heo * spin_lock_irqsave(host lock) 1080dbd82616STejun Heo * 1081dbd82616STejun Heo * RETURNS: 1082dbd82616STejun Heo * Number of aborted qc's. 1083dbd82616STejun Heo */ 1084dbd82616STejun Heo int ata_link_abort(struct ata_link *link) 1085dbd82616STejun Heo { 1086dbd82616STejun Heo return ata_do_link_abort(link->ap, link); 1087dbd82616STejun Heo } 1088dbd82616STejun Heo 1089dbd82616STejun Heo /** 1090dbd82616STejun Heo * ata_port_abort - abort all qc's on the port 1091dbd82616STejun Heo * @ap: ATA port to abort qc's for 1092dbd82616STejun Heo * 1093dbd82616STejun Heo * Abort all active qc's of @ap and schedule EH. 1094dbd82616STejun Heo * 1095dbd82616STejun Heo * LOCKING: 1096dbd82616STejun Heo * spin_lock_irqsave(host_set lock) 1097dbd82616STejun Heo * 1098dbd82616STejun Heo * RETURNS: 1099dbd82616STejun Heo * Number of aborted qc's. 1100dbd82616STejun Heo */ 1101dbd82616STejun Heo int ata_port_abort(struct ata_port *ap) 1102dbd82616STejun Heo { 1103dbd82616STejun Heo return ata_do_link_abort(ap, NULL); 1104dbd82616STejun Heo } 1105dbd82616STejun Heo 1106dbd82616STejun Heo /** 1107c6fd2807SJeff Garzik * __ata_port_freeze - freeze port 1108c6fd2807SJeff Garzik * @ap: ATA port to freeze 1109c6fd2807SJeff Garzik * 1110c6fd2807SJeff Garzik * This function is called when HSM violation or some other 1111c6fd2807SJeff Garzik * condition disrupts normal operation of the port. Frozen port 1112c6fd2807SJeff Garzik * is not allowed to perform any operation until the port is 1113c6fd2807SJeff Garzik * thawed, which usually follows a successful reset. 1114c6fd2807SJeff Garzik * 1115c6fd2807SJeff Garzik * ap->ops->freeze() callback can be used for freezing the port 1116c6fd2807SJeff Garzik * hardware-wise (e.g. mask interrupt and stop DMA engine). If a 1117c6fd2807SJeff Garzik * port cannot be frozen hardware-wise, the interrupt handler 1118c6fd2807SJeff Garzik * must ack and clear interrupts unconditionally while the port 1119c6fd2807SJeff Garzik * is frozen. 1120c6fd2807SJeff Garzik * 1121c6fd2807SJeff Garzik * LOCKING: 1122cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 1123c6fd2807SJeff Garzik */ 1124c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap) 1125c6fd2807SJeff Garzik { 1126c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 1127c6fd2807SJeff Garzik 1128c6fd2807SJeff Garzik if (ap->ops->freeze) 1129c6fd2807SJeff Garzik ap->ops->freeze(ap); 1130c6fd2807SJeff Garzik 1131c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_FROZEN; 1132c6fd2807SJeff Garzik 113344877b4eSTejun Heo DPRINTK("ata%u port frozen\n", ap->print_id); 1134c6fd2807SJeff Garzik } 1135c6fd2807SJeff Garzik 1136c6fd2807SJeff Garzik /** 1137c6fd2807SJeff Garzik * ata_port_freeze - abort & freeze port 1138c6fd2807SJeff Garzik * @ap: ATA port to freeze 1139c6fd2807SJeff Garzik * 114054c38444SJeff Garzik * Abort and freeze @ap. The freeze operation must be called 114154c38444SJeff Garzik * first, because some hardware requires special operations 114254c38444SJeff Garzik * before the taskfile registers are accessible. 1143c6fd2807SJeff Garzik * 1144c6fd2807SJeff Garzik * LOCKING: 1145cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 1146c6fd2807SJeff Garzik * 1147c6fd2807SJeff Garzik * RETURNS: 1148c6fd2807SJeff Garzik * Number of aborted commands. 1149c6fd2807SJeff Garzik */ 1150c6fd2807SJeff Garzik int ata_port_freeze(struct ata_port *ap) 1151c6fd2807SJeff Garzik { 1152c6fd2807SJeff Garzik int nr_aborted; 1153c6fd2807SJeff Garzik 1154c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 1155c6fd2807SJeff Garzik 1156c6fd2807SJeff Garzik __ata_port_freeze(ap); 115754c38444SJeff Garzik nr_aborted = ata_port_abort(ap); 1158c6fd2807SJeff Garzik 1159c6fd2807SJeff Garzik return nr_aborted; 1160c6fd2807SJeff Garzik } 1161c6fd2807SJeff Garzik 1162c6fd2807SJeff Garzik /** 11637d77b247STejun Heo * sata_async_notification - SATA async notification handler 11647d77b247STejun Heo * @ap: ATA port where async notification is received 11657d77b247STejun Heo * 11667d77b247STejun Heo * Handler to be called when async notification via SDB FIS is 11677d77b247STejun Heo * received. This function schedules EH if necessary. 11687d77b247STejun Heo * 11697d77b247STejun Heo * LOCKING: 11707d77b247STejun Heo * spin_lock_irqsave(host lock) 11717d77b247STejun Heo * 11727d77b247STejun Heo * RETURNS: 11737d77b247STejun Heo * 1 if EH is scheduled, 0 otherwise. 11747d77b247STejun Heo */ 11757d77b247STejun Heo int sata_async_notification(struct ata_port *ap) 11767d77b247STejun Heo { 11777d77b247STejun Heo u32 sntf; 11787d77b247STejun Heo int rc; 11797d77b247STejun Heo 11807d77b247STejun Heo if (!(ap->flags & ATA_FLAG_AN)) 11817d77b247STejun Heo return 0; 11827d77b247STejun Heo 11837d77b247STejun Heo rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf); 11847d77b247STejun Heo if (rc == 0) 11857d77b247STejun Heo sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf); 11867d77b247STejun Heo 1187071f44b1STejun Heo if (!sata_pmp_attached(ap) || rc) { 11887d77b247STejun Heo /* PMP is not attached or SNTF is not available */ 1189071f44b1STejun Heo if (!sata_pmp_attached(ap)) { 11907d77b247STejun Heo /* PMP is not attached. Check whether ATAPI 11917d77b247STejun Heo * AN is configured. If so, notify media 11927d77b247STejun Heo * change. 11937d77b247STejun Heo */ 11947d77b247STejun Heo struct ata_device *dev = ap->link.device; 11957d77b247STejun Heo 11967d77b247STejun Heo if ((dev->class == ATA_DEV_ATAPI) && 11977d77b247STejun Heo (dev->flags & ATA_DFLAG_AN)) 11987d77b247STejun Heo ata_scsi_media_change_notify(dev); 11997d77b247STejun Heo return 0; 12007d77b247STejun Heo } else { 12017d77b247STejun Heo /* PMP is attached but SNTF is not available. 12027d77b247STejun Heo * ATAPI async media change notification is 12037d77b247STejun Heo * not used. The PMP must be reporting PHY 12047d77b247STejun Heo * status change, schedule EH. 12057d77b247STejun Heo */ 12067d77b247STejun Heo ata_port_schedule_eh(ap); 12077d77b247STejun Heo return 1; 12087d77b247STejun Heo } 12097d77b247STejun Heo } else { 12107d77b247STejun Heo /* PMP is attached and SNTF is available */ 12117d77b247STejun Heo struct ata_link *link; 12127d77b247STejun Heo 12137d77b247STejun Heo /* check and notify ATAPI AN */ 12141eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 12157d77b247STejun Heo if (!(sntf & (1 << link->pmp))) 12167d77b247STejun Heo continue; 12177d77b247STejun Heo 12187d77b247STejun Heo if ((link->device->class == ATA_DEV_ATAPI) && 12197d77b247STejun Heo (link->device->flags & ATA_DFLAG_AN)) 12207d77b247STejun Heo ata_scsi_media_change_notify(link->device); 12217d77b247STejun Heo } 12227d77b247STejun Heo 12237d77b247STejun Heo /* If PMP is reporting that PHY status of some 12247d77b247STejun Heo * downstream ports has changed, schedule EH. 12257d77b247STejun Heo */ 12267d77b247STejun Heo if (sntf & (1 << SATA_PMP_CTRL_PORT)) { 12277d77b247STejun Heo ata_port_schedule_eh(ap); 12287d77b247STejun Heo return 1; 12297d77b247STejun Heo } 12307d77b247STejun Heo 12317d77b247STejun Heo return 0; 12327d77b247STejun Heo } 12337d77b247STejun Heo } 12347d77b247STejun Heo 12357d77b247STejun Heo /** 1236c6fd2807SJeff Garzik * ata_eh_freeze_port - EH helper to freeze port 1237c6fd2807SJeff Garzik * @ap: ATA port to freeze 1238c6fd2807SJeff Garzik * 1239c6fd2807SJeff Garzik * Freeze @ap. 1240c6fd2807SJeff Garzik * 1241c6fd2807SJeff Garzik * LOCKING: 1242c6fd2807SJeff Garzik * None. 1243c6fd2807SJeff Garzik */ 1244c6fd2807SJeff Garzik void ata_eh_freeze_port(struct ata_port *ap) 1245c6fd2807SJeff Garzik { 1246c6fd2807SJeff Garzik unsigned long flags; 1247c6fd2807SJeff Garzik 1248c6fd2807SJeff Garzik if (!ap->ops->error_handler) 1249c6fd2807SJeff Garzik return; 1250c6fd2807SJeff Garzik 1251c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1252c6fd2807SJeff Garzik __ata_port_freeze(ap); 1253c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1254c6fd2807SJeff Garzik } 1255c6fd2807SJeff Garzik 1256c6fd2807SJeff Garzik /** 1257c6fd2807SJeff Garzik * ata_port_thaw_port - EH helper to thaw port 1258c6fd2807SJeff Garzik * @ap: ATA port to thaw 1259c6fd2807SJeff Garzik * 1260c6fd2807SJeff Garzik * Thaw frozen port @ap. 1261c6fd2807SJeff Garzik * 1262c6fd2807SJeff Garzik * LOCKING: 1263c6fd2807SJeff Garzik * None. 1264c6fd2807SJeff Garzik */ 1265c6fd2807SJeff Garzik void ata_eh_thaw_port(struct ata_port *ap) 1266c6fd2807SJeff Garzik { 1267c6fd2807SJeff Garzik unsigned long flags; 1268c6fd2807SJeff Garzik 1269c6fd2807SJeff Garzik if (!ap->ops->error_handler) 1270c6fd2807SJeff Garzik return; 1271c6fd2807SJeff Garzik 1272c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1273c6fd2807SJeff Garzik 1274c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_FROZEN; 1275c6fd2807SJeff Garzik 1276c6fd2807SJeff Garzik if (ap->ops->thaw) 1277c6fd2807SJeff Garzik ap->ops->thaw(ap); 1278c6fd2807SJeff Garzik 1279c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1280c6fd2807SJeff Garzik 128144877b4eSTejun Heo DPRINTK("ata%u port thawed\n", ap->print_id); 1282c6fd2807SJeff Garzik } 1283c6fd2807SJeff Garzik 1284c6fd2807SJeff Garzik static void ata_eh_scsidone(struct scsi_cmnd *scmd) 1285c6fd2807SJeff Garzik { 1286c6fd2807SJeff Garzik /* nada */ 1287c6fd2807SJeff Garzik } 1288c6fd2807SJeff Garzik 1289c6fd2807SJeff Garzik static void __ata_eh_qc_complete(struct ata_queued_cmd *qc) 1290c6fd2807SJeff Garzik { 1291c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 1292c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 1293c6fd2807SJeff Garzik unsigned long flags; 1294c6fd2807SJeff Garzik 1295c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1296c6fd2807SJeff Garzik qc->scsidone = ata_eh_scsidone; 1297c6fd2807SJeff Garzik __ata_qc_complete(qc); 1298c6fd2807SJeff Garzik WARN_ON(ata_tag_valid(qc->tag)); 1299c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1300c6fd2807SJeff Garzik 1301c6fd2807SJeff Garzik scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 1302c6fd2807SJeff Garzik } 1303c6fd2807SJeff Garzik 1304c6fd2807SJeff Garzik /** 1305c6fd2807SJeff Garzik * ata_eh_qc_complete - Complete an active ATA command from EH 1306c6fd2807SJeff Garzik * @qc: Command to complete 1307c6fd2807SJeff Garzik * 1308c6fd2807SJeff Garzik * Indicate to the mid and upper layers that an ATA command has 1309c6fd2807SJeff Garzik * completed. To be used from EH. 1310c6fd2807SJeff Garzik */ 1311c6fd2807SJeff Garzik void ata_eh_qc_complete(struct ata_queued_cmd *qc) 1312c6fd2807SJeff Garzik { 1313c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 1314c6fd2807SJeff Garzik scmd->retries = scmd->allowed; 1315c6fd2807SJeff Garzik __ata_eh_qc_complete(qc); 1316c6fd2807SJeff Garzik } 1317c6fd2807SJeff Garzik 1318c6fd2807SJeff Garzik /** 1319c6fd2807SJeff Garzik * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH 1320c6fd2807SJeff Garzik * @qc: Command to retry 1321c6fd2807SJeff Garzik * 1322c6fd2807SJeff Garzik * Indicate to the mid and upper layers that an ATA command 1323c6fd2807SJeff Garzik * should be retried. To be used from EH. 1324c6fd2807SJeff Garzik * 1325c6fd2807SJeff Garzik * SCSI midlayer limits the number of retries to scmd->allowed. 1326f13e2201SGwendal Grignou * scmd->allowed is incremented for commands which get retried 1327c6fd2807SJeff Garzik * due to unrelated failures (qc->err_mask is zero). 1328c6fd2807SJeff Garzik */ 1329c6fd2807SJeff Garzik void ata_eh_qc_retry(struct ata_queued_cmd *qc) 1330c6fd2807SJeff Garzik { 1331c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 1332f13e2201SGwendal Grignou if (!qc->err_mask) 1333f13e2201SGwendal Grignou scmd->allowed++; 1334c6fd2807SJeff Garzik __ata_eh_qc_complete(qc); 1335c6fd2807SJeff Garzik } 1336c6fd2807SJeff Garzik 1337c6fd2807SJeff Garzik /** 1338678afac6STejun Heo * ata_dev_disable - disable ATA device 1339678afac6STejun Heo * @dev: ATA device to disable 1340678afac6STejun Heo * 1341678afac6STejun Heo * Disable @dev. 1342678afac6STejun Heo * 1343678afac6STejun Heo * Locking: 1344678afac6STejun Heo * EH context. 1345678afac6STejun Heo */ 1346678afac6STejun Heo void ata_dev_disable(struct ata_device *dev) 1347678afac6STejun Heo { 1348678afac6STejun Heo if (!ata_dev_enabled(dev)) 1349678afac6STejun Heo return; 1350678afac6STejun Heo 1351678afac6STejun Heo if (ata_msg_drv(dev->link->ap)) 1352a9a79dfeSJoe Perches ata_dev_warn(dev, "disabled\n"); 1353678afac6STejun Heo ata_acpi_on_disable(dev); 1354678afac6STejun Heo ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET); 1355678afac6STejun Heo dev->class++; 135699cf610aSTejun Heo 135799cf610aSTejun Heo /* From now till the next successful probe, ering is used to 135899cf610aSTejun Heo * track probe failures. Clear accumulated device error info. 135999cf610aSTejun Heo */ 136099cf610aSTejun Heo ata_ering_clear(&dev->ering); 1361678afac6STejun Heo } 1362678afac6STejun Heo 1363678afac6STejun Heo /** 1364c6fd2807SJeff Garzik * ata_eh_detach_dev - detach ATA device 1365c6fd2807SJeff Garzik * @dev: ATA device to detach 1366c6fd2807SJeff Garzik * 1367c6fd2807SJeff Garzik * Detach @dev. 1368c6fd2807SJeff Garzik * 1369c6fd2807SJeff Garzik * LOCKING: 1370c6fd2807SJeff Garzik * None. 1371c6fd2807SJeff Garzik */ 1372fb7fd614STejun Heo void ata_eh_detach_dev(struct ata_device *dev) 1373c6fd2807SJeff Garzik { 1374f58229f8STejun Heo struct ata_link *link = dev->link; 1375f58229f8STejun Heo struct ata_port *ap = link->ap; 137690484ebfSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 1377c6fd2807SJeff Garzik unsigned long flags; 1378c6fd2807SJeff Garzik 1379c6fd2807SJeff Garzik ata_dev_disable(dev); 1380c6fd2807SJeff Garzik 1381c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1382c6fd2807SJeff Garzik 1383c6fd2807SJeff Garzik dev->flags &= ~ATA_DFLAG_DETACH; 1384c6fd2807SJeff Garzik 1385c6fd2807SJeff Garzik if (ata_scsi_offline_dev(dev)) { 1386c6fd2807SJeff Garzik dev->flags |= ATA_DFLAG_DETACHED; 1387c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 1388c6fd2807SJeff Garzik } 1389c6fd2807SJeff Garzik 139090484ebfSTejun Heo /* clear per-dev EH info */ 1391f58229f8STejun Heo ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK); 1392f58229f8STejun Heo ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK); 139390484ebfSTejun Heo ehc->saved_xfer_mode[dev->devno] = 0; 139490484ebfSTejun Heo ehc->saved_ncq_enabled &= ~(1 << dev->devno); 1395c6fd2807SJeff Garzik 1396c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1397c6fd2807SJeff Garzik } 1398c6fd2807SJeff Garzik 1399c6fd2807SJeff Garzik /** 1400c6fd2807SJeff Garzik * ata_eh_about_to_do - about to perform eh_action 1401955e57dfSTejun Heo * @link: target ATA link 1402c6fd2807SJeff Garzik * @dev: target ATA dev for per-dev action (can be NULL) 1403c6fd2807SJeff Garzik * @action: action about to be performed 1404c6fd2807SJeff Garzik * 1405c6fd2807SJeff Garzik * Called just before performing EH actions to clear related bits 1406955e57dfSTejun Heo * in @link->eh_info such that eh actions are not unnecessarily 1407955e57dfSTejun Heo * repeated. 1408c6fd2807SJeff Garzik * 1409c6fd2807SJeff Garzik * LOCKING: 1410c6fd2807SJeff Garzik * None. 1411c6fd2807SJeff Garzik */ 1412fb7fd614STejun Heo void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev, 1413c6fd2807SJeff Garzik unsigned int action) 1414c6fd2807SJeff Garzik { 1415955e57dfSTejun Heo struct ata_port *ap = link->ap; 1416955e57dfSTejun Heo struct ata_eh_info *ehi = &link->eh_info; 1417955e57dfSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 1418c6fd2807SJeff Garzik unsigned long flags; 1419c6fd2807SJeff Garzik 1420c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1421c6fd2807SJeff Garzik 1422955e57dfSTejun Heo ata_eh_clear_action(link, dev, ehi, action); 1423c6fd2807SJeff Garzik 1424a568d1d2STejun Heo /* About to take EH action, set RECOVERED. Ignore actions on 1425a568d1d2STejun Heo * slave links as master will do them again. 1426a568d1d2STejun Heo */ 1427a568d1d2STejun Heo if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link) 1428c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_RECOVERED; 1429c6fd2807SJeff Garzik 1430c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1431c6fd2807SJeff Garzik } 1432c6fd2807SJeff Garzik 1433c6fd2807SJeff Garzik /** 1434c6fd2807SJeff Garzik * ata_eh_done - EH action complete 14352f60e1abSJonathan Corbet * @link: ATA link for which EH actions are complete 1436c6fd2807SJeff Garzik * @dev: target ATA dev for per-dev action (can be NULL) 1437c6fd2807SJeff Garzik * @action: action just completed 1438c6fd2807SJeff Garzik * 1439c6fd2807SJeff Garzik * Called right after performing EH actions to clear related bits 1440955e57dfSTejun Heo * in @link->eh_context. 1441c6fd2807SJeff Garzik * 1442c6fd2807SJeff Garzik * LOCKING: 1443c6fd2807SJeff Garzik * None. 1444c6fd2807SJeff Garzik */ 1445fb7fd614STejun Heo void ata_eh_done(struct ata_link *link, struct ata_device *dev, 1446c6fd2807SJeff Garzik unsigned int action) 1447c6fd2807SJeff Garzik { 1448955e57dfSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 14499af5c9c9STejun Heo 1450955e57dfSTejun Heo ata_eh_clear_action(link, dev, &ehc->i, action); 1451c6fd2807SJeff Garzik } 1452c6fd2807SJeff Garzik 1453c6fd2807SJeff Garzik /** 1454c6fd2807SJeff Garzik * ata_err_string - convert err_mask to descriptive string 1455c6fd2807SJeff Garzik * @err_mask: error mask to convert to string 1456c6fd2807SJeff Garzik * 1457c6fd2807SJeff Garzik * Convert @err_mask to descriptive string. Errors are 1458c6fd2807SJeff Garzik * prioritized according to severity and only the most severe 1459c6fd2807SJeff Garzik * error is reported. 1460c6fd2807SJeff Garzik * 1461c6fd2807SJeff Garzik * LOCKING: 1462c6fd2807SJeff Garzik * None. 1463c6fd2807SJeff Garzik * 1464c6fd2807SJeff Garzik * RETURNS: 1465c6fd2807SJeff Garzik * Descriptive string for @err_mask 1466c6fd2807SJeff Garzik */ 1467c6fd2807SJeff Garzik static const char *ata_err_string(unsigned int err_mask) 1468c6fd2807SJeff Garzik { 1469c6fd2807SJeff Garzik if (err_mask & AC_ERR_HOST_BUS) 1470c6fd2807SJeff Garzik return "host bus error"; 1471c6fd2807SJeff Garzik if (err_mask & AC_ERR_ATA_BUS) 1472c6fd2807SJeff Garzik return "ATA bus error"; 1473c6fd2807SJeff Garzik if (err_mask & AC_ERR_TIMEOUT) 1474c6fd2807SJeff Garzik return "timeout"; 1475c6fd2807SJeff Garzik if (err_mask & AC_ERR_HSM) 1476c6fd2807SJeff Garzik return "HSM violation"; 1477c6fd2807SJeff Garzik if (err_mask & AC_ERR_SYSTEM) 1478c6fd2807SJeff Garzik return "internal error"; 1479c6fd2807SJeff Garzik if (err_mask & AC_ERR_MEDIA) 1480c6fd2807SJeff Garzik return "media error"; 1481c6fd2807SJeff Garzik if (err_mask & AC_ERR_INVALID) 1482c6fd2807SJeff Garzik return "invalid argument"; 1483c6fd2807SJeff Garzik if (err_mask & AC_ERR_DEV) 1484c6fd2807SJeff Garzik return "device error"; 1485c6fd2807SJeff Garzik return "unknown error"; 1486c6fd2807SJeff Garzik } 1487c6fd2807SJeff Garzik 1488c6fd2807SJeff Garzik /** 1489c6fd2807SJeff Garzik * ata_eh_read_log_10h - Read log page 10h for NCQ error details 1490c6fd2807SJeff Garzik * @dev: Device to read log page 10h from 1491c6fd2807SJeff Garzik * @tag: Resulting tag of the failed command 1492c6fd2807SJeff Garzik * @tf: Resulting taskfile registers of the failed command 1493c6fd2807SJeff Garzik * 1494c6fd2807SJeff Garzik * Read log page 10h to obtain NCQ error details and clear error 1495c6fd2807SJeff Garzik * condition. 1496c6fd2807SJeff Garzik * 1497c6fd2807SJeff Garzik * LOCKING: 1498c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1499c6fd2807SJeff Garzik * 1500c6fd2807SJeff Garzik * RETURNS: 1501c6fd2807SJeff Garzik * 0 on success, -errno otherwise. 1502c6fd2807SJeff Garzik */ 1503c6fd2807SJeff Garzik static int ata_eh_read_log_10h(struct ata_device *dev, 1504c6fd2807SJeff Garzik int *tag, struct ata_taskfile *tf) 1505c6fd2807SJeff Garzik { 15069af5c9c9STejun Heo u8 *buf = dev->link->ap->sector_buf; 1507c6fd2807SJeff Garzik unsigned int err_mask; 1508c6fd2807SJeff Garzik u8 csum; 1509c6fd2807SJeff Garzik int i; 1510c6fd2807SJeff Garzik 151165fe1f0fSShane Huang err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, 0, buf, 1); 1512c6fd2807SJeff Garzik if (err_mask) 1513c6fd2807SJeff Garzik return -EIO; 1514c6fd2807SJeff Garzik 1515c6fd2807SJeff Garzik csum = 0; 1516c6fd2807SJeff Garzik for (i = 0; i < ATA_SECT_SIZE; i++) 1517c6fd2807SJeff Garzik csum += buf[i]; 1518c6fd2807SJeff Garzik if (csum) 1519a9a79dfeSJoe Perches ata_dev_warn(dev, "invalid checksum 0x%x on log page 10h\n", 1520a9a79dfeSJoe Perches csum); 1521c6fd2807SJeff Garzik 1522c6fd2807SJeff Garzik if (buf[0] & 0x80) 1523c6fd2807SJeff Garzik return -ENOENT; 1524c6fd2807SJeff Garzik 1525c6fd2807SJeff Garzik *tag = buf[0] & 0x1f; 1526c6fd2807SJeff Garzik 1527c6fd2807SJeff Garzik tf->command = buf[2]; 1528c6fd2807SJeff Garzik tf->feature = buf[3]; 1529c6fd2807SJeff Garzik tf->lbal = buf[4]; 1530c6fd2807SJeff Garzik tf->lbam = buf[5]; 1531c6fd2807SJeff Garzik tf->lbah = buf[6]; 1532c6fd2807SJeff Garzik tf->device = buf[7]; 1533c6fd2807SJeff Garzik tf->hob_lbal = buf[8]; 1534c6fd2807SJeff Garzik tf->hob_lbam = buf[9]; 1535c6fd2807SJeff Garzik tf->hob_lbah = buf[10]; 1536c6fd2807SJeff Garzik tf->nsect = buf[12]; 1537c6fd2807SJeff Garzik tf->hob_nsect = buf[13]; 15385b01e4b9SHannes Reinecke if (ata_id_has_ncq_autosense(dev->id)) 15395b01e4b9SHannes Reinecke tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16]; 1540c6fd2807SJeff Garzik 1541c6fd2807SJeff Garzik return 0; 1542c6fd2807SJeff Garzik } 1543c6fd2807SJeff Garzik 1544c6fd2807SJeff Garzik /** 154511fc33daSTejun Heo * atapi_eh_tur - perform ATAPI TEST_UNIT_READY 154611fc33daSTejun Heo * @dev: target ATAPI device 154711fc33daSTejun Heo * @r_sense_key: out parameter for sense_key 154811fc33daSTejun Heo * 154911fc33daSTejun Heo * Perform ATAPI TEST_UNIT_READY. 155011fc33daSTejun Heo * 155111fc33daSTejun Heo * LOCKING: 155211fc33daSTejun Heo * EH context (may sleep). 155311fc33daSTejun Heo * 155411fc33daSTejun Heo * RETURNS: 155511fc33daSTejun Heo * 0 on success, AC_ERR_* mask on failure. 155611fc33daSTejun Heo */ 15573dc67440SAaron Lu unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key) 155811fc33daSTejun Heo { 155911fc33daSTejun Heo u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 }; 156011fc33daSTejun Heo struct ata_taskfile tf; 156111fc33daSTejun Heo unsigned int err_mask; 156211fc33daSTejun Heo 156311fc33daSTejun Heo ata_tf_init(dev, &tf); 156411fc33daSTejun Heo 156511fc33daSTejun Heo tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 156611fc33daSTejun Heo tf.command = ATA_CMD_PACKET; 156711fc33daSTejun Heo tf.protocol = ATAPI_PROT_NODATA; 156811fc33daSTejun Heo 156911fc33daSTejun Heo err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0); 157011fc33daSTejun Heo if (err_mask == AC_ERR_DEV) 157111fc33daSTejun Heo *r_sense_key = tf.feature >> 4; 157211fc33daSTejun Heo return err_mask; 157311fc33daSTejun Heo } 157411fc33daSTejun Heo 157511fc33daSTejun Heo /** 1576e87fd28cSHannes Reinecke * ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT 15772f60e1abSJonathan Corbet * @qc: qc to perform REQUEST_SENSE_SENSE_DATA_EXT to 1578e87fd28cSHannes Reinecke * @cmd: scsi command for which the sense code should be set 1579e87fd28cSHannes Reinecke * 1580e87fd28cSHannes Reinecke * Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK 1581e87fd28cSHannes Reinecke * SENSE. This function is an EH helper. 1582e87fd28cSHannes Reinecke * 1583e87fd28cSHannes Reinecke * LOCKING: 1584e87fd28cSHannes Reinecke * Kernel thread context (may sleep). 1585e87fd28cSHannes Reinecke */ 1586e87fd28cSHannes Reinecke static void ata_eh_request_sense(struct ata_queued_cmd *qc, 1587e87fd28cSHannes Reinecke struct scsi_cmnd *cmd) 1588e87fd28cSHannes Reinecke { 1589e87fd28cSHannes Reinecke struct ata_device *dev = qc->dev; 1590e87fd28cSHannes Reinecke struct ata_taskfile tf; 1591e87fd28cSHannes Reinecke unsigned int err_mask; 1592e87fd28cSHannes Reinecke 1593e87fd28cSHannes Reinecke if (qc->ap->pflags & ATA_PFLAG_FROZEN) { 1594e87fd28cSHannes Reinecke ata_dev_warn(dev, "sense data available but port frozen\n"); 1595e87fd28cSHannes Reinecke return; 1596e87fd28cSHannes Reinecke } 1597e87fd28cSHannes Reinecke 1598d238ffd5SHannes Reinecke if (!cmd || qc->flags & ATA_QCFLAG_SENSE_VALID) 1599e87fd28cSHannes Reinecke return; 1600e87fd28cSHannes Reinecke 1601e87fd28cSHannes Reinecke if (!ata_id_sense_reporting_enabled(dev->id)) { 1602e87fd28cSHannes Reinecke ata_dev_warn(qc->dev, "sense data reporting disabled\n"); 1603e87fd28cSHannes Reinecke return; 1604e87fd28cSHannes Reinecke } 1605e87fd28cSHannes Reinecke 1606e87fd28cSHannes Reinecke DPRINTK("ATA request sense\n"); 1607e87fd28cSHannes Reinecke 1608e87fd28cSHannes Reinecke ata_tf_init(dev, &tf); 1609e87fd28cSHannes Reinecke tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1610e87fd28cSHannes Reinecke tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 1611e87fd28cSHannes Reinecke tf.command = ATA_CMD_REQ_SENSE_DATA; 1612e87fd28cSHannes Reinecke tf.protocol = ATA_PROT_NODATA; 1613e87fd28cSHannes Reinecke 1614e87fd28cSHannes Reinecke err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1615e87fd28cSHannes Reinecke /* Ignore err_mask; ATA_ERR might be set */ 1616e87fd28cSHannes Reinecke if (tf.command & ATA_SENSE) { 161706dbde5fSHannes Reinecke ata_scsi_set_sense(dev, cmd, tf.lbah, tf.lbam, tf.lbal); 1618e87fd28cSHannes Reinecke qc->flags |= ATA_QCFLAG_SENSE_VALID; 1619e87fd28cSHannes Reinecke } else { 1620e87fd28cSHannes Reinecke ata_dev_warn(dev, "request sense failed stat %02x emask %x\n", 1621e87fd28cSHannes Reinecke tf.command, err_mask); 1622e87fd28cSHannes Reinecke } 1623e87fd28cSHannes Reinecke } 1624e87fd28cSHannes Reinecke 1625e87fd28cSHannes Reinecke /** 1626c6fd2807SJeff Garzik * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE 1627c6fd2807SJeff Garzik * @dev: device to perform REQUEST_SENSE to 1628c6fd2807SJeff Garzik * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long) 16293eabddb8STejun Heo * @dfl_sense_key: default sense key to use 1630c6fd2807SJeff Garzik * 1631c6fd2807SJeff Garzik * Perform ATAPI REQUEST_SENSE after the device reported CHECK 1632c6fd2807SJeff Garzik * SENSE. This function is EH helper. 1633c6fd2807SJeff Garzik * 1634c6fd2807SJeff Garzik * LOCKING: 1635c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1636c6fd2807SJeff Garzik * 1637c6fd2807SJeff Garzik * RETURNS: 1638c6fd2807SJeff Garzik * 0 on success, AC_ERR_* mask on failure 1639c6fd2807SJeff Garzik */ 16403dc67440SAaron Lu unsigned int atapi_eh_request_sense(struct ata_device *dev, 16413eabddb8STejun Heo u8 *sense_buf, u8 dfl_sense_key) 1642c6fd2807SJeff Garzik { 16433eabddb8STejun Heo u8 cdb[ATAPI_CDB_LEN] = 16443eabddb8STejun Heo { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 }; 16459af5c9c9STejun Heo struct ata_port *ap = dev->link->ap; 1646c6fd2807SJeff Garzik struct ata_taskfile tf; 1647c6fd2807SJeff Garzik 1648c6fd2807SJeff Garzik DPRINTK("ATAPI request sense\n"); 1649c6fd2807SJeff Garzik 1650c6fd2807SJeff Garzik memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); 1651c6fd2807SJeff Garzik 165256287768SAlbert Lee /* initialize sense_buf with the error register, 165356287768SAlbert Lee * for the case where they are -not- overwritten 165456287768SAlbert Lee */ 1655c6fd2807SJeff Garzik sense_buf[0] = 0x70; 16563eabddb8STejun Heo sense_buf[2] = dfl_sense_key; 165756287768SAlbert Lee 165856287768SAlbert Lee /* some devices time out if garbage left in tf */ 165956287768SAlbert Lee ata_tf_init(dev, &tf); 1660c6fd2807SJeff Garzik 1661c6fd2807SJeff Garzik tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1662c6fd2807SJeff Garzik tf.command = ATA_CMD_PACKET; 1663c6fd2807SJeff Garzik 1664c6fd2807SJeff Garzik /* is it pointless to prefer PIO for "safety reasons"? */ 1665c6fd2807SJeff Garzik if (ap->flags & ATA_FLAG_PIO_DMA) { 16660dc36888STejun Heo tf.protocol = ATAPI_PROT_DMA; 1667c6fd2807SJeff Garzik tf.feature |= ATAPI_PKT_DMA; 1668c6fd2807SJeff Garzik } else { 16690dc36888STejun Heo tf.protocol = ATAPI_PROT_PIO; 1670f2dfc1a1STejun Heo tf.lbam = SCSI_SENSE_BUFFERSIZE; 1671f2dfc1a1STejun Heo tf.lbah = 0; 1672c6fd2807SJeff Garzik } 1673c6fd2807SJeff Garzik 1674c6fd2807SJeff Garzik return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE, 16752b789108STejun Heo sense_buf, SCSI_SENSE_BUFFERSIZE, 0); 1676c6fd2807SJeff Garzik } 1677c6fd2807SJeff Garzik 1678c6fd2807SJeff Garzik /** 1679c6fd2807SJeff Garzik * ata_eh_analyze_serror - analyze SError for a failed port 16800260731fSTejun Heo * @link: ATA link to analyze SError for 1681c6fd2807SJeff Garzik * 1682c6fd2807SJeff Garzik * Analyze SError if available and further determine cause of 1683c6fd2807SJeff Garzik * failure. 1684c6fd2807SJeff Garzik * 1685c6fd2807SJeff Garzik * LOCKING: 1686c6fd2807SJeff Garzik * None. 1687c6fd2807SJeff Garzik */ 16880260731fSTejun Heo static void ata_eh_analyze_serror(struct ata_link *link) 1689c6fd2807SJeff Garzik { 16900260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 1691c6fd2807SJeff Garzik u32 serror = ehc->i.serror; 1692c6fd2807SJeff Garzik unsigned int err_mask = 0, action = 0; 1693f9df58cbSTejun Heo u32 hotplug_mask; 1694c6fd2807SJeff Garzik 1695e0614db2STejun Heo if (serror & (SERR_PERSISTENT | SERR_DATA)) { 1696c6fd2807SJeff Garzik err_mask |= AC_ERR_ATA_BUS; 1697cf480626STejun Heo action |= ATA_EH_RESET; 1698c6fd2807SJeff Garzik } 1699c6fd2807SJeff Garzik if (serror & SERR_PROTOCOL) { 1700c6fd2807SJeff Garzik err_mask |= AC_ERR_HSM; 1701cf480626STejun Heo action |= ATA_EH_RESET; 1702c6fd2807SJeff Garzik } 1703c6fd2807SJeff Garzik if (serror & SERR_INTERNAL) { 1704c6fd2807SJeff Garzik err_mask |= AC_ERR_SYSTEM; 1705cf480626STejun Heo action |= ATA_EH_RESET; 1706c6fd2807SJeff Garzik } 1707f9df58cbSTejun Heo 1708f9df58cbSTejun Heo /* Determine whether a hotplug event has occurred. Both 1709f9df58cbSTejun Heo * SError.N/X are considered hotplug events for enabled or 1710f9df58cbSTejun Heo * host links. For disabled PMP links, only N bit is 1711f9df58cbSTejun Heo * considered as X bit is left at 1 for link plugging. 1712f9df58cbSTejun Heo */ 1713eb0e85e3STejun Heo if (link->lpm_policy > ATA_LPM_MAX_POWER) 17146b7ae954STejun Heo hotplug_mask = 0; /* hotplug doesn't work w/ LPM */ 17156b7ae954STejun Heo else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link)) 1716f9df58cbSTejun Heo hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG; 1717f9df58cbSTejun Heo else 1718f9df58cbSTejun Heo hotplug_mask = SERR_PHYRDY_CHG; 1719f9df58cbSTejun Heo 1720f9df58cbSTejun Heo if (serror & hotplug_mask) 1721c6fd2807SJeff Garzik ata_ehi_hotplugged(&ehc->i); 1722c6fd2807SJeff Garzik 1723c6fd2807SJeff Garzik ehc->i.err_mask |= err_mask; 1724c6fd2807SJeff Garzik ehc->i.action |= action; 1725c6fd2807SJeff Garzik } 1726c6fd2807SJeff Garzik 1727c6fd2807SJeff Garzik /** 1728c6fd2807SJeff Garzik * ata_eh_analyze_ncq_error - analyze NCQ error 17290260731fSTejun Heo * @link: ATA link to analyze NCQ error for 1730c6fd2807SJeff Garzik * 1731c6fd2807SJeff Garzik * Read log page 10h, determine the offending qc and acquire 1732c6fd2807SJeff Garzik * error status TF. For NCQ device errors, all LLDDs have to do 1733c6fd2807SJeff Garzik * is setting AC_ERR_DEV in ehi->err_mask. This function takes 1734c6fd2807SJeff Garzik * care of the rest. 1735c6fd2807SJeff Garzik * 1736c6fd2807SJeff Garzik * LOCKING: 1737c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1738c6fd2807SJeff Garzik */ 173910acf3b0SMark Lord void ata_eh_analyze_ncq_error(struct ata_link *link) 1740c6fd2807SJeff Garzik { 17410260731fSTejun Heo struct ata_port *ap = link->ap; 17420260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 17430260731fSTejun Heo struct ata_device *dev = link->device; 1744c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 1745c6fd2807SJeff Garzik struct ata_taskfile tf; 1746c6fd2807SJeff Garzik int tag, rc; 1747c6fd2807SJeff Garzik 1748c6fd2807SJeff Garzik /* if frozen, we can't do much */ 1749c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN) 1750c6fd2807SJeff Garzik return; 1751c6fd2807SJeff Garzik 1752c6fd2807SJeff Garzik /* is it NCQ device error? */ 17530260731fSTejun Heo if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV)) 1754c6fd2807SJeff Garzik return; 1755c6fd2807SJeff Garzik 1756c6fd2807SJeff Garzik /* has LLDD analyzed already? */ 1757c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1758c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, tag); 1759c6fd2807SJeff Garzik 1760c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) 1761c6fd2807SJeff Garzik continue; 1762c6fd2807SJeff Garzik 1763c6fd2807SJeff Garzik if (qc->err_mask) 1764c6fd2807SJeff Garzik return; 1765c6fd2807SJeff Garzik } 1766c6fd2807SJeff Garzik 1767c6fd2807SJeff Garzik /* okay, this error is ours */ 1768a09bf4cdSJeff Garzik memset(&tf, 0, sizeof(tf)); 1769c6fd2807SJeff Garzik rc = ata_eh_read_log_10h(dev, &tag, &tf); 1770c6fd2807SJeff Garzik if (rc) { 1771a9a79dfeSJoe Perches ata_link_err(link, "failed to read log page 10h (errno=%d)\n", 1772a9a79dfeSJoe Perches rc); 1773c6fd2807SJeff Garzik return; 1774c6fd2807SJeff Garzik } 1775c6fd2807SJeff Garzik 17760260731fSTejun Heo if (!(link->sactive & (1 << tag))) { 1777a9a79dfeSJoe Perches ata_link_err(link, "log page 10h reported inactive tag %d\n", 1778a9a79dfeSJoe Perches tag); 1779c6fd2807SJeff Garzik return; 1780c6fd2807SJeff Garzik } 1781c6fd2807SJeff Garzik 1782c6fd2807SJeff Garzik /* we've got the perpetrator, condemn it */ 1783c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, tag); 1784c6fd2807SJeff Garzik memcpy(&qc->result_tf, &tf, sizeof(tf)); 1785a6116c9eSMark Lord qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 17865335b729STejun Heo qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ; 1787d238ffd5SHannes Reinecke if ((qc->result_tf.command & ATA_SENSE) || qc->result_tf.auxiliary) { 17885b01e4b9SHannes Reinecke char sense_key, asc, ascq; 17895b01e4b9SHannes Reinecke 17905b01e4b9SHannes Reinecke sense_key = (qc->result_tf.auxiliary >> 16) & 0xff; 17915b01e4b9SHannes Reinecke asc = (qc->result_tf.auxiliary >> 8) & 0xff; 17925b01e4b9SHannes Reinecke ascq = qc->result_tf.auxiliary & 0xff; 179306dbde5fSHannes Reinecke ata_scsi_set_sense(dev, qc->scsicmd, sense_key, asc, ascq); 1794492bf621SHannes Reinecke ata_scsi_set_sense_information(dev, qc->scsicmd, 1795492bf621SHannes Reinecke &qc->result_tf); 17965b01e4b9SHannes Reinecke qc->flags |= ATA_QCFLAG_SENSE_VALID; 17975b01e4b9SHannes Reinecke } 17985b01e4b9SHannes Reinecke 1799c6fd2807SJeff Garzik ehc->i.err_mask &= ~AC_ERR_DEV; 1800c6fd2807SJeff Garzik } 1801c6fd2807SJeff Garzik 1802c6fd2807SJeff Garzik /** 1803c6fd2807SJeff Garzik * ata_eh_analyze_tf - analyze taskfile of a failed qc 1804c6fd2807SJeff Garzik * @qc: qc to analyze 1805c6fd2807SJeff Garzik * @tf: Taskfile registers to analyze 1806c6fd2807SJeff Garzik * 1807c6fd2807SJeff Garzik * Analyze taskfile of @qc and further determine cause of 1808c6fd2807SJeff Garzik * failure. This function also requests ATAPI sense data if 180925985edcSLucas De Marchi * available. 1810c6fd2807SJeff Garzik * 1811c6fd2807SJeff Garzik * LOCKING: 1812c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1813c6fd2807SJeff Garzik * 1814c6fd2807SJeff Garzik * RETURNS: 1815c6fd2807SJeff Garzik * Determined recovery action 1816c6fd2807SJeff Garzik */ 1817c6fd2807SJeff Garzik static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, 1818c6fd2807SJeff Garzik const struct ata_taskfile *tf) 1819c6fd2807SJeff Garzik { 1820c6fd2807SJeff Garzik unsigned int tmp, action = 0; 1821c6fd2807SJeff Garzik u8 stat = tf->command, err = tf->feature; 1822c6fd2807SJeff Garzik 1823c6fd2807SJeff Garzik if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) { 1824c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_HSM; 1825cf480626STejun Heo return ATA_EH_RESET; 1826c6fd2807SJeff Garzik } 1827c6fd2807SJeff Garzik 1828e87fd28cSHannes Reinecke if (stat & (ATA_ERR | ATA_DF)) { 1829a51d644aSTejun Heo qc->err_mask |= AC_ERR_DEV; 1830e87fd28cSHannes Reinecke /* 1831e87fd28cSHannes Reinecke * Sense data reporting does not work if the 1832e87fd28cSHannes Reinecke * device fault bit is set. 1833e87fd28cSHannes Reinecke */ 1834e87fd28cSHannes Reinecke if (stat & ATA_DF) 1835e87fd28cSHannes Reinecke stat &= ~ATA_SENSE; 1836e87fd28cSHannes Reinecke } else { 1837c6fd2807SJeff Garzik return 0; 1838e87fd28cSHannes Reinecke } 1839c6fd2807SJeff Garzik 1840c6fd2807SJeff Garzik switch (qc->dev->class) { 1841c6fd2807SJeff Garzik case ATA_DEV_ATA: 18429162c657SHannes Reinecke case ATA_DEV_ZAC: 1843e87fd28cSHannes Reinecke if (stat & ATA_SENSE) 1844e87fd28cSHannes Reinecke ata_eh_request_sense(qc, qc->scsicmd); 1845c6fd2807SJeff Garzik if (err & ATA_ICRC) 1846c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_ATA_BUS; 1847eec7e1c1SAlexey Asemov if (err & (ATA_UNC | ATA_AMNF)) 1848c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_MEDIA; 1849c6fd2807SJeff Garzik if (err & ATA_IDNF) 1850c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_INVALID; 1851c6fd2807SJeff Garzik break; 1852c6fd2807SJeff Garzik 1853c6fd2807SJeff Garzik case ATA_DEV_ATAPI: 1854a569a30dSTejun Heo if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) { 18553eabddb8STejun Heo tmp = atapi_eh_request_sense(qc->dev, 18563eabddb8STejun Heo qc->scsicmd->sense_buffer, 18573eabddb8STejun Heo qc->result_tf.feature >> 4); 18583852e373SHannes Reinecke if (!tmp) 1859c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_SENSE_VALID; 18603852e373SHannes Reinecke else 1861c6fd2807SJeff Garzik qc->err_mask |= tmp; 1862c6fd2807SJeff Garzik } 1863a569a30dSTejun Heo } 1864c6fd2807SJeff Garzik 18653852e373SHannes Reinecke if (qc->flags & ATA_QCFLAG_SENSE_VALID) { 18663852e373SHannes Reinecke int ret = scsi_check_sense(qc->scsicmd); 18673852e373SHannes Reinecke /* 18683852e373SHannes Reinecke * SUCCESS here means that the sense code could 18693852e373SHannes Reinecke * evaluated and should be passed to the upper layers 18703852e373SHannes Reinecke * for correct evaluation. 18713852e373SHannes Reinecke * FAILED means the sense code could not interpreted 18723852e373SHannes Reinecke * and the device would need to be reset. 18733852e373SHannes Reinecke * NEEDS_RETRY and ADD_TO_MLQUEUE means that the 18743852e373SHannes Reinecke * command would need to be retried. 18753852e373SHannes Reinecke */ 18763852e373SHannes Reinecke if (ret == NEEDS_RETRY || ret == ADD_TO_MLQUEUE) { 18773852e373SHannes Reinecke qc->flags |= ATA_QCFLAG_RETRY; 18783852e373SHannes Reinecke qc->err_mask |= AC_ERR_OTHER; 18793852e373SHannes Reinecke } else if (ret != SUCCESS) { 18803852e373SHannes Reinecke qc->err_mask |= AC_ERR_HSM; 18813852e373SHannes Reinecke } 18823852e373SHannes Reinecke } 1883c6fd2807SJeff Garzik if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS)) 1884cf480626STejun Heo action |= ATA_EH_RESET; 1885c6fd2807SJeff Garzik 1886c6fd2807SJeff Garzik return action; 1887c6fd2807SJeff Garzik } 1888c6fd2807SJeff Garzik 188976326ac1STejun Heo static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask, 189076326ac1STejun Heo int *xfer_ok) 1891c6fd2807SJeff Garzik { 189276326ac1STejun Heo int base = 0; 189376326ac1STejun Heo 189476326ac1STejun Heo if (!(eflags & ATA_EFLAG_DUBIOUS_XFER)) 189576326ac1STejun Heo *xfer_ok = 1; 189676326ac1STejun Heo 189776326ac1STejun Heo if (!*xfer_ok) 189875f9cafcSTejun Heo base = ATA_ECAT_DUBIOUS_NONE; 189976326ac1STejun Heo 19007d47e8d4STejun Heo if (err_mask & AC_ERR_ATA_BUS) 190176326ac1STejun Heo return base + ATA_ECAT_ATA_BUS; 1902c6fd2807SJeff Garzik 19037d47e8d4STejun Heo if (err_mask & AC_ERR_TIMEOUT) 190476326ac1STejun Heo return base + ATA_ECAT_TOUT_HSM; 19057d47e8d4STejun Heo 19063884f7b0STejun Heo if (eflags & ATA_EFLAG_IS_IO) { 19077d47e8d4STejun Heo if (err_mask & AC_ERR_HSM) 190876326ac1STejun Heo return base + ATA_ECAT_TOUT_HSM; 19097d47e8d4STejun Heo if ((err_mask & 19107d47e8d4STejun Heo (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV) 191176326ac1STejun Heo return base + ATA_ECAT_UNK_DEV; 1912c6fd2807SJeff Garzik } 1913c6fd2807SJeff Garzik 1914c6fd2807SJeff Garzik return 0; 1915c6fd2807SJeff Garzik } 1916c6fd2807SJeff Garzik 19177d47e8d4STejun Heo struct speed_down_verdict_arg { 1918c6fd2807SJeff Garzik u64 since; 191976326ac1STejun Heo int xfer_ok; 19203884f7b0STejun Heo int nr_errors[ATA_ECAT_NR]; 1921c6fd2807SJeff Garzik }; 1922c6fd2807SJeff Garzik 19237d47e8d4STejun Heo static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg) 1924c6fd2807SJeff Garzik { 19257d47e8d4STejun Heo struct speed_down_verdict_arg *arg = void_arg; 192676326ac1STejun Heo int cat; 1927c6fd2807SJeff Garzik 1928d9027470SGwendal Grignou if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since)) 1929c6fd2807SJeff Garzik return -1; 1930c6fd2807SJeff Garzik 193176326ac1STejun Heo cat = ata_eh_categorize_error(ent->eflags, ent->err_mask, 193276326ac1STejun Heo &arg->xfer_ok); 19337d47e8d4STejun Heo arg->nr_errors[cat]++; 193476326ac1STejun Heo 1935c6fd2807SJeff Garzik return 0; 1936c6fd2807SJeff Garzik } 1937c6fd2807SJeff Garzik 1938c6fd2807SJeff Garzik /** 19397d47e8d4STejun Heo * ata_eh_speed_down_verdict - Determine speed down verdict 1940c6fd2807SJeff Garzik * @dev: Device of interest 1941c6fd2807SJeff Garzik * 1942c6fd2807SJeff Garzik * This function examines error ring of @dev and determines 19437d47e8d4STejun Heo * whether NCQ needs to be turned off, transfer speed should be 19447d47e8d4STejun Heo * stepped down, or falling back to PIO is necessary. 1945c6fd2807SJeff Garzik * 19463884f7b0STejun Heo * ECAT_ATA_BUS : ATA_BUS error for any command 1947c6fd2807SJeff Garzik * 19483884f7b0STejun Heo * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for 19493884f7b0STejun Heo * IO commands 19507d47e8d4STejun Heo * 19513884f7b0STejun Heo * ECAT_UNK_DEV : Unknown DEV error for IO commands 1952c6fd2807SJeff Garzik * 195376326ac1STejun Heo * ECAT_DUBIOUS_* : Identical to above three but occurred while 195476326ac1STejun Heo * data transfer hasn't been verified. 195576326ac1STejun Heo * 19563884f7b0STejun Heo * Verdicts are 19577d47e8d4STejun Heo * 19583884f7b0STejun Heo * NCQ_OFF : Turn off NCQ. 19597d47e8d4STejun Heo * 19603884f7b0STejun Heo * SPEED_DOWN : Speed down transfer speed but don't fall back 19613884f7b0STejun Heo * to PIO. 19623884f7b0STejun Heo * 19633884f7b0STejun Heo * FALLBACK_TO_PIO : Fall back to PIO. 19643884f7b0STejun Heo * 19653884f7b0STejun Heo * Even if multiple verdicts are returned, only one action is 196676326ac1STejun Heo * taken per error. An action triggered by non-DUBIOUS errors 196776326ac1STejun Heo * clears ering, while one triggered by DUBIOUS_* errors doesn't. 196876326ac1STejun Heo * This is to expedite speed down decisions right after device is 196976326ac1STejun Heo * initially configured. 19703884f7b0STejun Heo * 19714091fb95SMasahiro Yamada * The following are speed down rules. #1 and #2 deal with 197276326ac1STejun Heo * DUBIOUS errors. 197376326ac1STejun Heo * 197476326ac1STejun Heo * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors 197576326ac1STejun Heo * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO. 197676326ac1STejun Heo * 197776326ac1STejun Heo * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors 197876326ac1STejun Heo * occurred during last 5 mins, NCQ_OFF. 197976326ac1STejun Heo * 198076326ac1STejun Heo * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors 198125985edcSLucas De Marchi * occurred during last 5 mins, FALLBACK_TO_PIO 19823884f7b0STejun Heo * 198376326ac1STejun Heo * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred 19843884f7b0STejun Heo * during last 10 mins, NCQ_OFF. 19853884f7b0STejun Heo * 198676326ac1STejun Heo * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6 19873884f7b0STejun Heo * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN. 19887d47e8d4STejun Heo * 1989c6fd2807SJeff Garzik * LOCKING: 1990c6fd2807SJeff Garzik * Inherited from caller. 1991c6fd2807SJeff Garzik * 1992c6fd2807SJeff Garzik * RETURNS: 19937d47e8d4STejun Heo * OR of ATA_EH_SPDN_* flags. 1994c6fd2807SJeff Garzik */ 19957d47e8d4STejun Heo static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev) 1996c6fd2807SJeff Garzik { 19977d47e8d4STejun Heo const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ; 19987d47e8d4STejun Heo u64 j64 = get_jiffies_64(); 19997d47e8d4STejun Heo struct speed_down_verdict_arg arg; 20007d47e8d4STejun Heo unsigned int verdict = 0; 2001c6fd2807SJeff Garzik 20023884f7b0STejun Heo /* scan past 5 mins of error history */ 20033884f7b0STejun Heo memset(&arg, 0, sizeof(arg)); 20043884f7b0STejun Heo arg.since = j64 - min(j64, j5mins); 20053884f7b0STejun Heo ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); 20063884f7b0STejun Heo 200776326ac1STejun Heo if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] + 200876326ac1STejun Heo arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1) 200976326ac1STejun Heo verdict |= ATA_EH_SPDN_SPEED_DOWN | 201076326ac1STejun Heo ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS; 201176326ac1STejun Heo 201276326ac1STejun Heo if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] + 201376326ac1STejun Heo arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1) 201476326ac1STejun Heo verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS; 201576326ac1STejun Heo 20163884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_ATA_BUS] + 20173884f7b0STejun Heo arg.nr_errors[ATA_ECAT_TOUT_HSM] + 2018663f99b8STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) 20193884f7b0STejun Heo verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO; 20203884f7b0STejun Heo 20217d47e8d4STejun Heo /* scan past 10 mins of error history */ 2022c6fd2807SJeff Garzik memset(&arg, 0, sizeof(arg)); 20237d47e8d4STejun Heo arg.since = j64 - min(j64, j10mins); 20247d47e8d4STejun Heo ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); 2025c6fd2807SJeff Garzik 20263884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_TOUT_HSM] + 20273884f7b0STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 3) 20287d47e8d4STejun Heo verdict |= ATA_EH_SPDN_NCQ_OFF; 20293884f7b0STejun Heo 20303884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_ATA_BUS] + 20313884f7b0STejun Heo arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 || 2032663f99b8STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) 20337d47e8d4STejun Heo verdict |= ATA_EH_SPDN_SPEED_DOWN; 2034c6fd2807SJeff Garzik 20357d47e8d4STejun Heo return verdict; 2036c6fd2807SJeff Garzik } 2037c6fd2807SJeff Garzik 2038c6fd2807SJeff Garzik /** 2039c6fd2807SJeff Garzik * ata_eh_speed_down - record error and speed down if necessary 2040c6fd2807SJeff Garzik * @dev: Failed device 20413884f7b0STejun Heo * @eflags: mask of ATA_EFLAG_* flags 2042c6fd2807SJeff Garzik * @err_mask: err_mask of the error 2043c6fd2807SJeff Garzik * 2044c6fd2807SJeff Garzik * Record error and examine error history to determine whether 2045c6fd2807SJeff Garzik * adjusting transmission speed is necessary. It also sets 2046c6fd2807SJeff Garzik * transmission limits appropriately if such adjustment is 2047c6fd2807SJeff Garzik * necessary. 2048c6fd2807SJeff Garzik * 2049c6fd2807SJeff Garzik * LOCKING: 2050c6fd2807SJeff Garzik * Kernel thread context (may sleep). 2051c6fd2807SJeff Garzik * 2052c6fd2807SJeff Garzik * RETURNS: 20537d47e8d4STejun Heo * Determined recovery action. 2054c6fd2807SJeff Garzik */ 20553884f7b0STejun Heo static unsigned int ata_eh_speed_down(struct ata_device *dev, 20563884f7b0STejun Heo unsigned int eflags, unsigned int err_mask) 2057c6fd2807SJeff Garzik { 2058b1c72916STejun Heo struct ata_link *link = ata_dev_phys_link(dev); 205976326ac1STejun Heo int xfer_ok = 0; 20607d47e8d4STejun Heo unsigned int verdict; 20617d47e8d4STejun Heo unsigned int action = 0; 20627d47e8d4STejun Heo 20637d47e8d4STejun Heo /* don't bother if Cat-0 error */ 206476326ac1STejun Heo if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0) 2065c6fd2807SJeff Garzik return 0; 2066c6fd2807SJeff Garzik 2067c6fd2807SJeff Garzik /* record error and determine whether speed down is necessary */ 20683884f7b0STejun Heo ata_ering_record(&dev->ering, eflags, err_mask); 20697d47e8d4STejun Heo verdict = ata_eh_speed_down_verdict(dev); 2070c6fd2807SJeff Garzik 20717d47e8d4STejun Heo /* turn off NCQ? */ 20727d47e8d4STejun Heo if ((verdict & ATA_EH_SPDN_NCQ_OFF) && 20737d47e8d4STejun Heo (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ | 20747d47e8d4STejun Heo ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) { 20757d47e8d4STejun Heo dev->flags |= ATA_DFLAG_NCQ_OFF; 2076a9a79dfeSJoe Perches ata_dev_warn(dev, "NCQ disabled due to excessive errors\n"); 20777d47e8d4STejun Heo goto done; 20787d47e8d4STejun Heo } 2079c6fd2807SJeff Garzik 20807d47e8d4STejun Heo /* speed down? */ 20817d47e8d4STejun Heo if (verdict & ATA_EH_SPDN_SPEED_DOWN) { 2082c6fd2807SJeff Garzik /* speed down SATA link speed if possible */ 2083a07d499bSTejun Heo if (sata_down_spd_limit(link, 0) == 0) { 2084cf480626STejun Heo action |= ATA_EH_RESET; 20857d47e8d4STejun Heo goto done; 20867d47e8d4STejun Heo } 2087c6fd2807SJeff Garzik 2088c6fd2807SJeff Garzik /* lower transfer mode */ 20897d47e8d4STejun Heo if (dev->spdn_cnt < 2) { 20907d47e8d4STejun Heo static const int dma_dnxfer_sel[] = 20917d47e8d4STejun Heo { ATA_DNXFER_DMA, ATA_DNXFER_40C }; 20927d47e8d4STejun Heo static const int pio_dnxfer_sel[] = 20937d47e8d4STejun Heo { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 }; 20947d47e8d4STejun Heo int sel; 2095c6fd2807SJeff Garzik 20967d47e8d4STejun Heo if (dev->xfer_shift != ATA_SHIFT_PIO) 20977d47e8d4STejun Heo sel = dma_dnxfer_sel[dev->spdn_cnt]; 20987d47e8d4STejun Heo else 20997d47e8d4STejun Heo sel = pio_dnxfer_sel[dev->spdn_cnt]; 21007d47e8d4STejun Heo 21017d47e8d4STejun Heo dev->spdn_cnt++; 21027d47e8d4STejun Heo 21037d47e8d4STejun Heo if (ata_down_xfermask_limit(dev, sel) == 0) { 2104cf480626STejun Heo action |= ATA_EH_RESET; 21057d47e8d4STejun Heo goto done; 21067d47e8d4STejun Heo } 21077d47e8d4STejun Heo } 21087d47e8d4STejun Heo } 21097d47e8d4STejun Heo 21107d47e8d4STejun Heo /* Fall back to PIO? Slowing down to PIO is meaningless for 2111663f99b8STejun Heo * SATA ATA devices. Consider it only for PATA and SATAPI. 21127d47e8d4STejun Heo */ 21137d47e8d4STejun Heo if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) && 2114663f99b8STejun Heo (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) && 21157d47e8d4STejun Heo (dev->xfer_shift != ATA_SHIFT_PIO)) { 21167d47e8d4STejun Heo if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) { 21177d47e8d4STejun Heo dev->spdn_cnt = 0; 2118cf480626STejun Heo action |= ATA_EH_RESET; 21197d47e8d4STejun Heo goto done; 21207d47e8d4STejun Heo } 21217d47e8d4STejun Heo } 21227d47e8d4STejun Heo 2123c6fd2807SJeff Garzik return 0; 21247d47e8d4STejun Heo done: 21257d47e8d4STejun Heo /* device has been slowed down, blow error history */ 212676326ac1STejun Heo if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS)) 21277d47e8d4STejun Heo ata_ering_clear(&dev->ering); 21287d47e8d4STejun Heo return action; 2129c6fd2807SJeff Garzik } 2130c6fd2807SJeff Garzik 2131c6fd2807SJeff Garzik /** 21328d899e70SMark Lord * ata_eh_worth_retry - analyze error and decide whether to retry 21338d899e70SMark Lord * @qc: qc to possibly retry 21348d899e70SMark Lord * 21358d899e70SMark Lord * Look at the cause of the error and decide if a retry 21368d899e70SMark Lord * might be useful or not. We don't want to retry media errors 21378d899e70SMark Lord * because the drive itself has probably already taken 10-30 seconds 21388d899e70SMark Lord * doing its own internal retries before reporting the failure. 21398d899e70SMark Lord */ 21408d899e70SMark Lord static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc) 21418d899e70SMark Lord { 21421eaca39aSBian Yu if (qc->err_mask & AC_ERR_MEDIA) 21438d899e70SMark Lord return 0; /* don't retry media errors */ 21448d899e70SMark Lord if (qc->flags & ATA_QCFLAG_IO) 21458d899e70SMark Lord return 1; /* otherwise retry anything from fs stack */ 21468d899e70SMark Lord if (qc->err_mask & AC_ERR_INVALID) 21478d899e70SMark Lord return 0; /* don't retry these */ 21488d899e70SMark Lord return qc->err_mask != AC_ERR_DEV; /* retry if not dev error */ 21498d899e70SMark Lord } 21508d899e70SMark Lord 21518d899e70SMark Lord /** 21529b1e2658STejun Heo * ata_eh_link_autopsy - analyze error and determine recovery action 21539b1e2658STejun Heo * @link: host link to perform autopsy on 2154c6fd2807SJeff Garzik * 21550260731fSTejun Heo * Analyze why @link failed and determine which recovery actions 21560260731fSTejun Heo * are needed. This function also sets more detailed AC_ERR_* 21570260731fSTejun Heo * values and fills sense data for ATAPI CHECK SENSE. 2158c6fd2807SJeff Garzik * 2159c6fd2807SJeff Garzik * LOCKING: 2160c6fd2807SJeff Garzik * Kernel thread context (may sleep). 2161c6fd2807SJeff Garzik */ 21629b1e2658STejun Heo static void ata_eh_link_autopsy(struct ata_link *link) 2163c6fd2807SJeff Garzik { 21640260731fSTejun Heo struct ata_port *ap = link->ap; 2165936fd732STejun Heo struct ata_eh_context *ehc = &link->eh_context; 2166dfcc173dSTejun Heo struct ata_device *dev; 21673884f7b0STejun Heo unsigned int all_err_mask = 0, eflags = 0; 21683884f7b0STejun Heo int tag; 2169c6fd2807SJeff Garzik u32 serror; 2170c6fd2807SJeff Garzik int rc; 2171c6fd2807SJeff Garzik 2172c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 2173c6fd2807SJeff Garzik 2174c6fd2807SJeff Garzik if (ehc->i.flags & ATA_EHI_NO_AUTOPSY) 2175c6fd2807SJeff Garzik return; 2176c6fd2807SJeff Garzik 2177c6fd2807SJeff Garzik /* obtain and analyze SError */ 2178936fd732STejun Heo rc = sata_scr_read(link, SCR_ERROR, &serror); 2179c6fd2807SJeff Garzik if (rc == 0) { 2180c6fd2807SJeff Garzik ehc->i.serror |= serror; 21810260731fSTejun Heo ata_eh_analyze_serror(link); 21824e57c517STejun Heo } else if (rc != -EOPNOTSUPP) { 2183cf480626STejun Heo /* SError read failed, force reset and probing */ 2184b558edddSTejun Heo ehc->i.probe_mask |= ATA_ALL_DEVICES; 2185cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 21864e57c517STejun Heo ehc->i.err_mask |= AC_ERR_OTHER; 21874e57c517STejun Heo } 2188c6fd2807SJeff Garzik 2189c6fd2807SJeff Garzik /* analyze NCQ failure */ 21900260731fSTejun Heo ata_eh_analyze_ncq_error(link); 2191c6fd2807SJeff Garzik 2192c6fd2807SJeff Garzik /* any real error trumps AC_ERR_OTHER */ 2193c6fd2807SJeff Garzik if (ehc->i.err_mask & ~AC_ERR_OTHER) 2194c6fd2807SJeff Garzik ehc->i.err_mask &= ~AC_ERR_OTHER; 2195c6fd2807SJeff Garzik 2196c6fd2807SJeff Garzik all_err_mask |= ehc->i.err_mask; 2197c6fd2807SJeff Garzik 2198c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2199c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 2200c6fd2807SJeff Garzik 2201b1c72916STejun Heo if (!(qc->flags & ATA_QCFLAG_FAILED) || 2202b1c72916STejun Heo ata_dev_phys_link(qc->dev) != link) 2203c6fd2807SJeff Garzik continue; 2204c6fd2807SJeff Garzik 2205c6fd2807SJeff Garzik /* inherit upper level err_mask */ 2206c6fd2807SJeff Garzik qc->err_mask |= ehc->i.err_mask; 2207c6fd2807SJeff Garzik 2208c6fd2807SJeff Garzik /* analyze TF */ 2209c6fd2807SJeff Garzik ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf); 2210c6fd2807SJeff Garzik 2211c6fd2807SJeff Garzik /* DEV errors are probably spurious in case of ATA_BUS error */ 2212c6fd2807SJeff Garzik if (qc->err_mask & AC_ERR_ATA_BUS) 2213c6fd2807SJeff Garzik qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA | 2214c6fd2807SJeff Garzik AC_ERR_INVALID); 2215c6fd2807SJeff Garzik 2216c6fd2807SJeff Garzik /* any real error trumps unknown error */ 2217c6fd2807SJeff Garzik if (qc->err_mask & ~AC_ERR_OTHER) 2218c6fd2807SJeff Garzik qc->err_mask &= ~AC_ERR_OTHER; 2219c6fd2807SJeff Garzik 2220c6fd2807SJeff Garzik /* SENSE_VALID trumps dev/unknown error and revalidation */ 2221f90f0828STejun Heo if (qc->flags & ATA_QCFLAG_SENSE_VALID) 2222c6fd2807SJeff Garzik qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); 2223c6fd2807SJeff Garzik 222403faab78STejun Heo /* determine whether the command is worth retrying */ 22258d899e70SMark Lord if (ata_eh_worth_retry(qc)) 222603faab78STejun Heo qc->flags |= ATA_QCFLAG_RETRY; 222703faab78STejun Heo 2228c6fd2807SJeff Garzik /* accumulate error info */ 2229c6fd2807SJeff Garzik ehc->i.dev = qc->dev; 2230c6fd2807SJeff Garzik all_err_mask |= qc->err_mask; 2231c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_IO) 22323884f7b0STejun Heo eflags |= ATA_EFLAG_IS_IO; 2233255c03d1SHannes Reinecke trace_ata_eh_link_autopsy_qc(qc); 2234c6fd2807SJeff Garzik } 2235c6fd2807SJeff Garzik 2236c6fd2807SJeff Garzik /* enforce default EH actions */ 2237c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN || 2238c6fd2807SJeff Garzik all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT)) 2239cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 22403884f7b0STejun Heo else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) || 22413884f7b0STejun Heo (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV))) 2242c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_REVALIDATE; 2243c6fd2807SJeff Garzik 2244dfcc173dSTejun Heo /* If we have offending qcs and the associated failed device, 2245dfcc173dSTejun Heo * perform per-dev EH action only on the offending device. 2246dfcc173dSTejun Heo */ 2247c6fd2807SJeff Garzik if (ehc->i.dev) { 2248c6fd2807SJeff Garzik ehc->i.dev_action[ehc->i.dev->devno] |= 2249c6fd2807SJeff Garzik ehc->i.action & ATA_EH_PERDEV_MASK; 2250c6fd2807SJeff Garzik ehc->i.action &= ~ATA_EH_PERDEV_MASK; 2251c6fd2807SJeff Garzik } 2252c6fd2807SJeff Garzik 22532695e366STejun Heo /* propagate timeout to host link */ 22542695e366STejun Heo if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link)) 22552695e366STejun Heo ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT; 22562695e366STejun Heo 22572695e366STejun Heo /* record error and consider speeding down */ 2258dfcc173dSTejun Heo dev = ehc->i.dev; 22592695e366STejun Heo if (!dev && ((ata_link_max_devices(link) == 1 && 22602695e366STejun Heo ata_dev_enabled(link->device)))) 2261dfcc173dSTejun Heo dev = link->device; 2262dfcc173dSTejun Heo 226376326ac1STejun Heo if (dev) { 226476326ac1STejun Heo if (dev->flags & ATA_DFLAG_DUBIOUS_XFER) 226576326ac1STejun Heo eflags |= ATA_EFLAG_DUBIOUS_XFER; 22663884f7b0STejun Heo ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask); 2267255c03d1SHannes Reinecke trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask); 2268*f1601113SRameshwar Prasad Sahu } 2269c6fd2807SJeff Garzik DPRINTK("EXIT\n"); 2270c6fd2807SJeff Garzik } 2271c6fd2807SJeff Garzik 2272c6fd2807SJeff Garzik /** 22739b1e2658STejun Heo * ata_eh_autopsy - analyze error and determine recovery action 22749b1e2658STejun Heo * @ap: host port to perform autopsy on 22759b1e2658STejun Heo * 22769b1e2658STejun Heo * Analyze all links of @ap and determine why they failed and 22779b1e2658STejun Heo * which recovery actions are needed. 22789b1e2658STejun Heo * 22799b1e2658STejun Heo * LOCKING: 22809b1e2658STejun Heo * Kernel thread context (may sleep). 22819b1e2658STejun Heo */ 2282fb7fd614STejun Heo void ata_eh_autopsy(struct ata_port *ap) 22839b1e2658STejun Heo { 22849b1e2658STejun Heo struct ata_link *link; 22859b1e2658STejun Heo 22861eca4365STejun Heo ata_for_each_link(link, ap, EDGE) 22879b1e2658STejun Heo ata_eh_link_autopsy(link); 22882695e366STejun Heo 2289b1c72916STejun Heo /* Handle the frigging slave link. Autopsy is done similarly 2290b1c72916STejun Heo * but actions and flags are transferred over to the master 2291b1c72916STejun Heo * link and handled from there. 2292b1c72916STejun Heo */ 2293b1c72916STejun Heo if (ap->slave_link) { 2294b1c72916STejun Heo struct ata_eh_context *mehc = &ap->link.eh_context; 2295b1c72916STejun Heo struct ata_eh_context *sehc = &ap->slave_link->eh_context; 2296b1c72916STejun Heo 2297848e4c68STejun Heo /* transfer control flags from master to slave */ 2298848e4c68STejun Heo sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK; 2299848e4c68STejun Heo 2300848e4c68STejun Heo /* perform autopsy on the slave link */ 2301b1c72916STejun Heo ata_eh_link_autopsy(ap->slave_link); 2302b1c72916STejun Heo 2303848e4c68STejun Heo /* transfer actions from slave to master and clear slave */ 2304b1c72916STejun Heo ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); 2305b1c72916STejun Heo mehc->i.action |= sehc->i.action; 2306b1c72916STejun Heo mehc->i.dev_action[1] |= sehc->i.dev_action[1]; 2307b1c72916STejun Heo mehc->i.flags |= sehc->i.flags; 2308b1c72916STejun Heo ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); 2309b1c72916STejun Heo } 2310b1c72916STejun Heo 23112695e366STejun Heo /* Autopsy of fanout ports can affect host link autopsy. 23122695e366STejun Heo * Perform host link autopsy last. 23132695e366STejun Heo */ 2314071f44b1STejun Heo if (sata_pmp_attached(ap)) 23152695e366STejun Heo ata_eh_link_autopsy(&ap->link); 23169b1e2658STejun Heo } 23179b1e2658STejun Heo 23189b1e2658STejun Heo /** 23196521148cSRobert Hancock * ata_get_cmd_descript - get description for ATA command 23206521148cSRobert Hancock * @command: ATA command code to get description for 23216521148cSRobert Hancock * 23226521148cSRobert Hancock * Return a textual description of the given command, or NULL if the 23236521148cSRobert Hancock * command is not known. 23246521148cSRobert Hancock * 23256521148cSRobert Hancock * LOCKING: 23266521148cSRobert Hancock * None 23276521148cSRobert Hancock */ 23286521148cSRobert Hancock const char *ata_get_cmd_descript(u8 command) 23296521148cSRobert Hancock { 23306521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR 23316521148cSRobert Hancock static const struct 23326521148cSRobert Hancock { 23336521148cSRobert Hancock u8 command; 23346521148cSRobert Hancock const char *text; 23356521148cSRobert Hancock } cmd_descr[] = { 23366521148cSRobert Hancock { ATA_CMD_DEV_RESET, "DEVICE RESET" }, 23376521148cSRobert Hancock { ATA_CMD_CHK_POWER, "CHECK POWER MODE" }, 23386521148cSRobert Hancock { ATA_CMD_STANDBY, "STANDBY" }, 23396521148cSRobert Hancock { ATA_CMD_IDLE, "IDLE" }, 23406521148cSRobert Hancock { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" }, 23416521148cSRobert Hancock { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" }, 23423915c3b5SRobert Hancock { ATA_CMD_DOWNLOAD_MICRO_DMA, "DOWNLOAD MICROCODE DMA" }, 23436521148cSRobert Hancock { ATA_CMD_NOP, "NOP" }, 23446521148cSRobert Hancock { ATA_CMD_FLUSH, "FLUSH CACHE" }, 23456521148cSRobert Hancock { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" }, 23466521148cSRobert Hancock { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" }, 23476521148cSRobert Hancock { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" }, 23486521148cSRobert Hancock { ATA_CMD_SERVICE, "SERVICE" }, 23496521148cSRobert Hancock { ATA_CMD_READ, "READ DMA" }, 23506521148cSRobert Hancock { ATA_CMD_READ_EXT, "READ DMA EXT" }, 23516521148cSRobert Hancock { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" }, 23526521148cSRobert Hancock { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" }, 23536521148cSRobert Hancock { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" }, 23546521148cSRobert Hancock { ATA_CMD_WRITE, "WRITE DMA" }, 23556521148cSRobert Hancock { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" }, 23566521148cSRobert Hancock { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" }, 23576521148cSRobert Hancock { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" }, 23586521148cSRobert Hancock { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" }, 23596521148cSRobert Hancock { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" }, 23606521148cSRobert Hancock { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" }, 23616521148cSRobert Hancock { ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" }, 23626521148cSRobert Hancock { ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" }, 23633915c3b5SRobert Hancock { ATA_CMD_FPDMA_SEND, "SEND FPDMA QUEUED" }, 23643915c3b5SRobert Hancock { ATA_CMD_FPDMA_RECV, "RECEIVE FPDMA QUEUED" }, 23656521148cSRobert Hancock { ATA_CMD_PIO_READ, "READ SECTOR(S)" }, 23666521148cSRobert Hancock { ATA_CMD_PIO_READ_EXT, "READ SECTOR(S) EXT" }, 23676521148cSRobert Hancock { ATA_CMD_PIO_WRITE, "WRITE SECTOR(S)" }, 23686521148cSRobert Hancock { ATA_CMD_PIO_WRITE_EXT, "WRITE SECTOR(S) EXT" }, 23696521148cSRobert Hancock { ATA_CMD_READ_MULTI, "READ MULTIPLE" }, 23706521148cSRobert Hancock { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" }, 23716521148cSRobert Hancock { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" }, 23726521148cSRobert Hancock { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" }, 23736521148cSRobert Hancock { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" }, 23746521148cSRobert Hancock { ATA_CMD_SET_FEATURES, "SET FEATURES" }, 23756521148cSRobert Hancock { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" }, 23766521148cSRobert Hancock { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" }, 23776521148cSRobert Hancock { ATA_CMD_VERIFY_EXT, "READ VERIFY SECTOR(S) EXT" }, 23786521148cSRobert Hancock { ATA_CMD_WRITE_UNCORR_EXT, "WRITE UNCORRECTABLE EXT" }, 23796521148cSRobert Hancock { ATA_CMD_STANDBYNOW1, "STANDBY IMMEDIATE" }, 23806521148cSRobert Hancock { ATA_CMD_IDLEIMMEDIATE, "IDLE IMMEDIATE" }, 23816521148cSRobert Hancock { ATA_CMD_SLEEP, "SLEEP" }, 23826521148cSRobert Hancock { ATA_CMD_INIT_DEV_PARAMS, "INITIALIZE DEVICE PARAMETERS" }, 23836521148cSRobert Hancock { ATA_CMD_READ_NATIVE_MAX, "READ NATIVE MAX ADDRESS" }, 23846521148cSRobert Hancock { ATA_CMD_READ_NATIVE_MAX_EXT, "READ NATIVE MAX ADDRESS EXT" }, 23856521148cSRobert Hancock { ATA_CMD_SET_MAX, "SET MAX ADDRESS" }, 23866521148cSRobert Hancock { ATA_CMD_SET_MAX_EXT, "SET MAX ADDRESS EXT" }, 23876521148cSRobert Hancock { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" }, 23886521148cSRobert Hancock { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" }, 23896521148cSRobert Hancock { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" }, 23906521148cSRobert Hancock { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" }, 23913915c3b5SRobert Hancock { ATA_CMD_TRUSTED_NONDATA, "TRUSTED NON-DATA" }, 23926521148cSRobert Hancock { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" }, 23936521148cSRobert Hancock { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" }, 23946521148cSRobert Hancock { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" }, 23956521148cSRobert Hancock { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" }, 23966521148cSRobert Hancock { ATA_CMD_PMP_READ, "READ BUFFER" }, 23973915c3b5SRobert Hancock { ATA_CMD_PMP_READ_DMA, "READ BUFFER DMA" }, 23986521148cSRobert Hancock { ATA_CMD_PMP_WRITE, "WRITE BUFFER" }, 23993915c3b5SRobert Hancock { ATA_CMD_PMP_WRITE_DMA, "WRITE BUFFER DMA" }, 24006521148cSRobert Hancock { ATA_CMD_CONF_OVERLAY, "DEVICE CONFIGURATION OVERLAY" }, 24016521148cSRobert Hancock { ATA_CMD_SEC_SET_PASS, "SECURITY SET PASSWORD" }, 24026521148cSRobert Hancock { ATA_CMD_SEC_UNLOCK, "SECURITY UNLOCK" }, 24036521148cSRobert Hancock { ATA_CMD_SEC_ERASE_PREP, "SECURITY ERASE PREPARE" }, 24046521148cSRobert Hancock { ATA_CMD_SEC_ERASE_UNIT, "SECURITY ERASE UNIT" }, 24056521148cSRobert Hancock { ATA_CMD_SEC_FREEZE_LOCK, "SECURITY FREEZE LOCK" }, 24066521148cSRobert Hancock { ATA_CMD_SEC_DISABLE_PASS, "SECURITY DISABLE PASSWORD" }, 24076521148cSRobert Hancock { ATA_CMD_CONFIG_STREAM, "CONFIGURE STREAM" }, 24086521148cSRobert Hancock { ATA_CMD_SMART, "SMART" }, 24096521148cSRobert Hancock { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" }, 24106521148cSRobert Hancock { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" }, 2411acad7627SFUJITA Tomonori { ATA_CMD_DSM, "DATA SET MANAGEMENT" }, 24126521148cSRobert Hancock { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" }, 24136521148cSRobert Hancock { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" }, 24146521148cSRobert Hancock { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" }, 24156521148cSRobert Hancock { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" }, 24166521148cSRobert Hancock { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" }, 24176521148cSRobert Hancock { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" }, 24183915c3b5SRobert Hancock { ATA_CMD_REQ_SENSE_DATA, "REQUEST SENSE DATA EXT" }, 24193915c3b5SRobert Hancock { ATA_CMD_SANITIZE_DEVICE, "SANITIZE DEVICE" }, 242028a3fc22SHannes Reinecke { ATA_CMD_ZAC_MGMT_IN, "ZAC MANAGEMENT IN" }, 242127708a95SHannes Reinecke { ATA_CMD_ZAC_MGMT_OUT, "ZAC MANAGEMENT OUT" }, 24226521148cSRobert Hancock { ATA_CMD_READ_LONG, "READ LONG (with retries)" }, 24236521148cSRobert Hancock { ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" }, 24246521148cSRobert Hancock { ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" }, 24256521148cSRobert Hancock { ATA_CMD_WRITE_LONG_ONCE, "WRITE LONG (without retries)" }, 24266521148cSRobert Hancock { ATA_CMD_RESTORE, "RECALIBRATE" }, 24276521148cSRobert Hancock { 0, NULL } /* terminate list */ 24286521148cSRobert Hancock }; 24296521148cSRobert Hancock 24306521148cSRobert Hancock unsigned int i; 24316521148cSRobert Hancock for (i = 0; cmd_descr[i].text; i++) 24326521148cSRobert Hancock if (cmd_descr[i].command == command) 24336521148cSRobert Hancock return cmd_descr[i].text; 24346521148cSRobert Hancock #endif 24356521148cSRobert Hancock 24366521148cSRobert Hancock return NULL; 24376521148cSRobert Hancock } 243836aae28eSAndy Shevchenko EXPORT_SYMBOL_GPL(ata_get_cmd_descript); 24396521148cSRobert Hancock 24406521148cSRobert Hancock /** 24419b1e2658STejun Heo * ata_eh_link_report - report error handling to user 24420260731fSTejun Heo * @link: ATA link EH is going on 2443c6fd2807SJeff Garzik * 2444c6fd2807SJeff Garzik * Report EH to user. 2445c6fd2807SJeff Garzik * 2446c6fd2807SJeff Garzik * LOCKING: 2447c6fd2807SJeff Garzik * None. 2448c6fd2807SJeff Garzik */ 24499b1e2658STejun Heo static void ata_eh_link_report(struct ata_link *link) 2450c6fd2807SJeff Garzik { 24510260731fSTejun Heo struct ata_port *ap = link->ap; 24520260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 2453c6fd2807SJeff Garzik const char *frozen, *desc; 2454462098b0SLevente Kurusa char tries_buf[6] = ""; 2455c6fd2807SJeff Garzik int tag, nr_failed = 0; 2456c6fd2807SJeff Garzik 245794ff3d54STejun Heo if (ehc->i.flags & ATA_EHI_QUIET) 245894ff3d54STejun Heo return; 245994ff3d54STejun Heo 2460c6fd2807SJeff Garzik desc = NULL; 2461c6fd2807SJeff Garzik if (ehc->i.desc[0] != '\0') 2462c6fd2807SJeff Garzik desc = ehc->i.desc; 2463c6fd2807SJeff Garzik 2464c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2465c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 2466c6fd2807SJeff Garzik 2467b1c72916STejun Heo if (!(qc->flags & ATA_QCFLAG_FAILED) || 2468b1c72916STejun Heo ata_dev_phys_link(qc->dev) != link || 2469e027bd36STejun Heo ((qc->flags & ATA_QCFLAG_QUIET) && 2470e027bd36STejun Heo qc->err_mask == AC_ERR_DEV)) 2471c6fd2807SJeff Garzik continue; 2472c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask) 2473c6fd2807SJeff Garzik continue; 2474c6fd2807SJeff Garzik 2475c6fd2807SJeff Garzik nr_failed++; 2476c6fd2807SJeff Garzik } 2477c6fd2807SJeff Garzik 2478c6fd2807SJeff Garzik if (!nr_failed && !ehc->i.err_mask) 2479c6fd2807SJeff Garzik return; 2480c6fd2807SJeff Garzik 2481c6fd2807SJeff Garzik frozen = ""; 2482c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN) 2483c6fd2807SJeff Garzik frozen = " frozen"; 2484c6fd2807SJeff Garzik 2485a1e10f7eSTejun Heo if (ap->eh_tries < ATA_EH_MAX_TRIES) 2486462098b0SLevente Kurusa snprintf(tries_buf, sizeof(tries_buf), " t%d", 2487a1e10f7eSTejun Heo ap->eh_tries); 2488a1e10f7eSTejun Heo 2489c6fd2807SJeff Garzik if (ehc->i.dev) { 2490a9a79dfeSJoe Perches ata_dev_err(ehc->i.dev, "exception Emask 0x%x " 2491a1e10f7eSTejun Heo "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", 2492a1e10f7eSTejun Heo ehc->i.err_mask, link->sactive, ehc->i.serror, 2493a1e10f7eSTejun Heo ehc->i.action, frozen, tries_buf); 2494c6fd2807SJeff Garzik if (desc) 2495a9a79dfeSJoe Perches ata_dev_err(ehc->i.dev, "%s\n", desc); 2496c6fd2807SJeff Garzik } else { 2497a9a79dfeSJoe Perches ata_link_err(link, "exception Emask 0x%x " 2498a1e10f7eSTejun Heo "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", 2499a1e10f7eSTejun Heo ehc->i.err_mask, link->sactive, ehc->i.serror, 2500a1e10f7eSTejun Heo ehc->i.action, frozen, tries_buf); 2501c6fd2807SJeff Garzik if (desc) 2502a9a79dfeSJoe Perches ata_link_err(link, "%s\n", desc); 2503c6fd2807SJeff Garzik } 2504c6fd2807SJeff Garzik 25056521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR 25061333e194SRobert Hancock if (ehc->i.serror) 2507a9a79dfeSJoe Perches ata_link_err(link, 25081333e194SRobert Hancock "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n", 25091333e194SRobert Hancock ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "", 25101333e194SRobert Hancock ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "", 25111333e194SRobert Hancock ehc->i.serror & SERR_DATA ? "UnrecovData " : "", 25121333e194SRobert Hancock ehc->i.serror & SERR_PERSISTENT ? "Persist " : "", 25131333e194SRobert Hancock ehc->i.serror & SERR_PROTOCOL ? "Proto " : "", 25141333e194SRobert Hancock ehc->i.serror & SERR_INTERNAL ? "HostInt " : "", 25151333e194SRobert Hancock ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "", 25161333e194SRobert Hancock ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "", 25171333e194SRobert Hancock ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "", 25181333e194SRobert Hancock ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "", 25191333e194SRobert Hancock ehc->i.serror & SERR_DISPARITY ? "Dispar " : "", 25201333e194SRobert Hancock ehc->i.serror & SERR_CRC ? "BadCRC " : "", 25211333e194SRobert Hancock ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "", 25221333e194SRobert Hancock ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "", 25231333e194SRobert Hancock ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "", 25241333e194SRobert Hancock ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "", 25251333e194SRobert Hancock ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : ""); 25266521148cSRobert Hancock #endif 25271333e194SRobert Hancock 2528c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2529c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 25308a937581STejun Heo struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; 2531abb6a889STejun Heo char data_buf[20] = ""; 2532abb6a889STejun Heo char cdb_buf[70] = ""; 2533c6fd2807SJeff Garzik 25340260731fSTejun Heo if (!(qc->flags & ATA_QCFLAG_FAILED) || 2535b1c72916STejun Heo ata_dev_phys_link(qc->dev) != link || !qc->err_mask) 2536c6fd2807SJeff Garzik continue; 2537c6fd2807SJeff Garzik 2538abb6a889STejun Heo if (qc->dma_dir != DMA_NONE) { 2539abb6a889STejun Heo static const char *dma_str[] = { 2540abb6a889STejun Heo [DMA_BIDIRECTIONAL] = "bidi", 2541abb6a889STejun Heo [DMA_TO_DEVICE] = "out", 2542abb6a889STejun Heo [DMA_FROM_DEVICE] = "in", 2543abb6a889STejun Heo }; 2544fb1b8b11SGeert Uytterhoeven const char *prot_str = NULL; 2545abb6a889STejun Heo 2546fb1b8b11SGeert Uytterhoeven switch (qc->tf.protocol) { 2547fb1b8b11SGeert Uytterhoeven case ATA_PROT_UNKNOWN: 2548fb1b8b11SGeert Uytterhoeven prot_str = "unknown"; 2549fb1b8b11SGeert Uytterhoeven break; 2550fb1b8b11SGeert Uytterhoeven case ATA_PROT_NODATA: 2551fb1b8b11SGeert Uytterhoeven prot_str = "nodata"; 2552fb1b8b11SGeert Uytterhoeven break; 2553fb1b8b11SGeert Uytterhoeven case ATA_PROT_PIO: 2554fb1b8b11SGeert Uytterhoeven prot_str = "pio"; 2555fb1b8b11SGeert Uytterhoeven break; 2556fb1b8b11SGeert Uytterhoeven case ATA_PROT_DMA: 2557fb1b8b11SGeert Uytterhoeven prot_str = "dma"; 2558fb1b8b11SGeert Uytterhoeven break; 2559fb1b8b11SGeert Uytterhoeven case ATA_PROT_NCQ: 2560fb1b8b11SGeert Uytterhoeven prot_str = "ncq dma"; 2561fb1b8b11SGeert Uytterhoeven break; 2562fb1b8b11SGeert Uytterhoeven case ATA_PROT_NCQ_NODATA: 2563fb1b8b11SGeert Uytterhoeven prot_str = "ncq nodata"; 2564fb1b8b11SGeert Uytterhoeven break; 2565fb1b8b11SGeert Uytterhoeven case ATAPI_PROT_NODATA: 2566fb1b8b11SGeert Uytterhoeven prot_str = "nodata"; 2567fb1b8b11SGeert Uytterhoeven break; 2568fb1b8b11SGeert Uytterhoeven case ATAPI_PROT_PIO: 2569fb1b8b11SGeert Uytterhoeven prot_str = "pio"; 2570fb1b8b11SGeert Uytterhoeven break; 2571fb1b8b11SGeert Uytterhoeven case ATAPI_PROT_DMA: 2572fb1b8b11SGeert Uytterhoeven prot_str = "dma"; 2573fb1b8b11SGeert Uytterhoeven break; 2574fb1b8b11SGeert Uytterhoeven } 2575abb6a889STejun Heo snprintf(data_buf, sizeof(data_buf), " %s %u %s", 2576fb1b8b11SGeert Uytterhoeven prot_str, qc->nbytes, dma_str[qc->dma_dir]); 2577abb6a889STejun Heo } 2578abb6a889STejun Heo 25796521148cSRobert Hancock if (ata_is_atapi(qc->tf.protocol)) { 2580a13b0c9dSHannes Reinecke const u8 *cdb = qc->cdb; 2581a13b0c9dSHannes Reinecke size_t cdb_len = qc->dev->cdb_len; 2582a13b0c9dSHannes Reinecke 2583cbba5b0eSHannes Reinecke if (qc->scsicmd) { 2584cbba5b0eSHannes Reinecke cdb = qc->scsicmd->cmnd; 2585cbba5b0eSHannes Reinecke cdb_len = qc->scsicmd->cmd_len; 2586cbba5b0eSHannes Reinecke } 2587cbba5b0eSHannes Reinecke __scsi_format_command(cdb_buf, sizeof(cdb_buf), 2588cbba5b0eSHannes Reinecke cdb, cdb_len); 25896521148cSRobert Hancock } else { 25906521148cSRobert Hancock const char *descr = ata_get_cmd_descript(cmd->command); 25916521148cSRobert Hancock if (descr) 2592a9a79dfeSJoe Perches ata_dev_err(qc->dev, "failed command: %s\n", 2593a9a79dfeSJoe Perches descr); 25946521148cSRobert Hancock } 2595abb6a889STejun Heo 2596a9a79dfeSJoe Perches ata_dev_err(qc->dev, 25978a937581STejun Heo "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " 2598abb6a889STejun Heo "tag %d%s\n %s" 25998a937581STejun Heo "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " 26005335b729STejun Heo "Emask 0x%x (%s)%s\n", 26018a937581STejun Heo cmd->command, cmd->feature, cmd->nsect, 26028a937581STejun Heo cmd->lbal, cmd->lbam, cmd->lbah, 26038a937581STejun Heo cmd->hob_feature, cmd->hob_nsect, 26048a937581STejun Heo cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah, 2605abb6a889STejun Heo cmd->device, qc->tag, data_buf, cdb_buf, 26068a937581STejun Heo res->command, res->feature, res->nsect, 26078a937581STejun Heo res->lbal, res->lbam, res->lbah, 26088a937581STejun Heo res->hob_feature, res->hob_nsect, 26098a937581STejun Heo res->hob_lbal, res->hob_lbam, res->hob_lbah, 26105335b729STejun Heo res->device, qc->err_mask, ata_err_string(qc->err_mask), 26115335b729STejun Heo qc->err_mask & AC_ERR_NCQ ? " <F>" : ""); 26121333e194SRobert Hancock 26136521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR 26141333e194SRobert Hancock if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | 2615e87fd28cSHannes Reinecke ATA_SENSE | ATA_ERR)) { 26161333e194SRobert Hancock if (res->command & ATA_BUSY) 2617a9a79dfeSJoe Perches ata_dev_err(qc->dev, "status: { Busy }\n"); 26181333e194SRobert Hancock else 2619e87fd28cSHannes Reinecke ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n", 26201333e194SRobert Hancock res->command & ATA_DRDY ? "DRDY " : "", 26211333e194SRobert Hancock res->command & ATA_DF ? "DF " : "", 26221333e194SRobert Hancock res->command & ATA_DRQ ? "DRQ " : "", 2623e87fd28cSHannes Reinecke res->command & ATA_SENSE ? "SENSE " : "", 26241333e194SRobert Hancock res->command & ATA_ERR ? "ERR " : ""); 26251333e194SRobert Hancock } 26261333e194SRobert Hancock 26271333e194SRobert Hancock if (cmd->command != ATA_CMD_PACKET && 2628eec7e1c1SAlexey Asemov (res->feature & (ATA_ICRC | ATA_UNC | ATA_AMNF | 2629eec7e1c1SAlexey Asemov ATA_IDNF | ATA_ABORTED))) 2630eec7e1c1SAlexey Asemov ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n", 26311333e194SRobert Hancock res->feature & ATA_ICRC ? "ICRC " : "", 26321333e194SRobert Hancock res->feature & ATA_UNC ? "UNC " : "", 2633eec7e1c1SAlexey Asemov res->feature & ATA_AMNF ? "AMNF " : "", 26341333e194SRobert Hancock res->feature & ATA_IDNF ? "IDNF " : "", 26351333e194SRobert Hancock res->feature & ATA_ABORTED ? "ABRT " : ""); 26366521148cSRobert Hancock #endif 2637c6fd2807SJeff Garzik } 2638c6fd2807SJeff Garzik } 2639c6fd2807SJeff Garzik 26409b1e2658STejun Heo /** 26419b1e2658STejun Heo * ata_eh_report - report error handling to user 26429b1e2658STejun Heo * @ap: ATA port to report EH about 26439b1e2658STejun Heo * 26449b1e2658STejun Heo * Report EH to user. 26459b1e2658STejun Heo * 26469b1e2658STejun Heo * LOCKING: 26479b1e2658STejun Heo * None. 26489b1e2658STejun Heo */ 2649fb7fd614STejun Heo void ata_eh_report(struct ata_port *ap) 26509b1e2658STejun Heo { 26519b1e2658STejun Heo struct ata_link *link; 26529b1e2658STejun Heo 26531eca4365STejun Heo ata_for_each_link(link, ap, HOST_FIRST) 26549b1e2658STejun Heo ata_eh_link_report(link); 26559b1e2658STejun Heo } 26569b1e2658STejun Heo 2657cc0680a5STejun Heo static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset, 2658b1c72916STejun Heo unsigned int *classes, unsigned long deadline, 2659b1c72916STejun Heo bool clear_classes) 2660c6fd2807SJeff Garzik { 2661f58229f8STejun Heo struct ata_device *dev; 2662c6fd2807SJeff Garzik 2663b1c72916STejun Heo if (clear_classes) 26641eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 2665f58229f8STejun Heo classes[dev->devno] = ATA_DEV_UNKNOWN; 2666c6fd2807SJeff Garzik 2667f046519fSTejun Heo return reset(link, classes, deadline); 2668c6fd2807SJeff Garzik } 2669c6fd2807SJeff Garzik 2670e8411fbaSSergei Shtylyov static int ata_eh_followup_srst_needed(struct ata_link *link, int rc) 2671c6fd2807SJeff Garzik { 267245db2f6cSTejun Heo if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link)) 2673ae791c05STejun Heo return 0; 26745dbfc9cbSTejun Heo if (rc == -EAGAIN) 2675c6fd2807SJeff Garzik return 1; 2676071f44b1STejun Heo if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) 26773495de73STejun Heo return 1; 2678c6fd2807SJeff Garzik return 0; 2679c6fd2807SJeff Garzik } 2680c6fd2807SJeff Garzik 2681fb7fd614STejun Heo int ata_eh_reset(struct ata_link *link, int classify, 2682c6fd2807SJeff Garzik ata_prereset_fn_t prereset, ata_reset_fn_t softreset, 2683c6fd2807SJeff Garzik ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) 2684c6fd2807SJeff Garzik { 2685afaa5c37STejun Heo struct ata_port *ap = link->ap; 2686b1c72916STejun Heo struct ata_link *slave = ap->slave_link; 2687936fd732STejun Heo struct ata_eh_context *ehc = &link->eh_context; 2688705d2014SBartlomiej Zolnierkiewicz struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL; 2689c6fd2807SJeff Garzik unsigned int *classes = ehc->classes; 2690416dc9edSTejun Heo unsigned int lflags = link->flags; 2691c6fd2807SJeff Garzik int verbose = !(ehc->i.flags & ATA_EHI_QUIET); 2692d8af0eb6STejun Heo int max_tries = 0, try = 0; 2693b1c72916STejun Heo struct ata_link *failed_link; 2694f58229f8STejun Heo struct ata_device *dev; 2695416dc9edSTejun Heo unsigned long deadline, now; 2696c6fd2807SJeff Garzik ata_reset_fn_t reset; 2697afaa5c37STejun Heo unsigned long flags; 2698416dc9edSTejun Heo u32 sstatus; 2699b1c72916STejun Heo int nr_unknown, rc; 2700c6fd2807SJeff Garzik 2701932648b0STejun Heo /* 2702932648b0STejun Heo * Prepare to reset 2703932648b0STejun Heo */ 2704d8af0eb6STejun Heo while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX) 2705d8af0eb6STejun Heo max_tries++; 2706ca6d43b0SDan Williams if (link->flags & ATA_LFLAG_RST_ONCE) 2707ca6d43b0SDan Williams max_tries = 1; 270805944bdfSTejun Heo if (link->flags & ATA_LFLAG_NO_HRST) 270905944bdfSTejun Heo hardreset = NULL; 271005944bdfSTejun Heo if (link->flags & ATA_LFLAG_NO_SRST) 271105944bdfSTejun Heo softreset = NULL; 2712d8af0eb6STejun Heo 271325985edcSLucas De Marchi /* make sure each reset attempt is at least COOL_DOWN apart */ 271419b72321STejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) { 27150a2c0f56STejun Heo now = jiffies; 271619b72321STejun Heo WARN_ON(time_after(ehc->last_reset, now)); 271719b72321STejun Heo deadline = ata_deadline(ehc->last_reset, 271819b72321STejun Heo ATA_EH_RESET_COOL_DOWN); 27190a2c0f56STejun Heo if (time_before(now, deadline)) 27200a2c0f56STejun Heo schedule_timeout_uninterruptible(deadline - now); 272119b72321STejun Heo } 27220a2c0f56STejun Heo 2723afaa5c37STejun Heo spin_lock_irqsave(ap->lock, flags); 2724afaa5c37STejun Heo ap->pflags |= ATA_PFLAG_RESETTING; 2725afaa5c37STejun Heo spin_unlock_irqrestore(ap->lock, flags); 2726afaa5c37STejun Heo 2727cf480626STejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2728c6fd2807SJeff Garzik 27291eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 2730cdeab114STejun Heo /* If we issue an SRST then an ATA drive (not ATAPI) 2731cdeab114STejun Heo * may change configuration and be in PIO0 timing. If 2732cdeab114STejun Heo * we do a hard reset (or are coming from power on) 2733cdeab114STejun Heo * this is true for ATA or ATAPI. Until we've set a 2734cdeab114STejun Heo * suitable controller mode we should not touch the 2735cdeab114STejun Heo * bus as we may be talking too fast. 2736cdeab114STejun Heo */ 2737cdeab114STejun Heo dev->pio_mode = XFER_PIO_0; 27385416912aSAaron Lu dev->dma_mode = 0xff; 2739cdeab114STejun Heo 2740cdeab114STejun Heo /* If the controller has a pio mode setup function 2741cdeab114STejun Heo * then use it to set the chipset to rights. Don't 2742cdeab114STejun Heo * touch the DMA setup as that will be dealt with when 2743cdeab114STejun Heo * configuring devices. 2744cdeab114STejun Heo */ 2745cdeab114STejun Heo if (ap->ops->set_piomode) 2746cdeab114STejun Heo ap->ops->set_piomode(ap, dev); 2747cdeab114STejun Heo } 2748cdeab114STejun Heo 2749cf480626STejun Heo /* prefer hardreset */ 2750932648b0STejun Heo reset = NULL; 2751cf480626STejun Heo ehc->i.action &= ~ATA_EH_RESET; 2752cf480626STejun Heo if (hardreset) { 2753cf480626STejun Heo reset = hardreset; 2754a674050eSTejun Heo ehc->i.action |= ATA_EH_HARDRESET; 27554f7faa3fSTejun Heo } else if (softreset) { 2756cf480626STejun Heo reset = softreset; 2757a674050eSTejun Heo ehc->i.action |= ATA_EH_SOFTRESET; 2758cf480626STejun Heo } 2759c6fd2807SJeff Garzik 2760c6fd2807SJeff Garzik if (prereset) { 2761b1c72916STejun Heo unsigned long deadline = ata_deadline(jiffies, 2762b1c72916STejun Heo ATA_EH_PRERESET_TIMEOUT); 2763b1c72916STejun Heo 2764b1c72916STejun Heo if (slave) { 2765b1c72916STejun Heo sehc->i.action &= ~ATA_EH_RESET; 2766b1c72916STejun Heo sehc->i.action |= ehc->i.action; 2767b1c72916STejun Heo } 2768b1c72916STejun Heo 2769b1c72916STejun Heo rc = prereset(link, deadline); 2770b1c72916STejun Heo 2771b1c72916STejun Heo /* If present, do prereset on slave link too. Reset 2772b1c72916STejun Heo * is skipped iff both master and slave links report 2773b1c72916STejun Heo * -ENOENT or clear ATA_EH_RESET. 2774b1c72916STejun Heo */ 2775b1c72916STejun Heo if (slave && (rc == 0 || rc == -ENOENT)) { 2776b1c72916STejun Heo int tmp; 2777b1c72916STejun Heo 2778b1c72916STejun Heo tmp = prereset(slave, deadline); 2779b1c72916STejun Heo if (tmp != -ENOENT) 2780b1c72916STejun Heo rc = tmp; 2781b1c72916STejun Heo 2782b1c72916STejun Heo ehc->i.action |= sehc->i.action; 2783b1c72916STejun Heo } 2784b1c72916STejun Heo 2785c6fd2807SJeff Garzik if (rc) { 2786c961922bSAlan Cox if (rc == -ENOENT) { 2787a9a79dfeSJoe Perches ata_link_dbg(link, "port disabled--ignoring\n"); 2788cf480626STejun Heo ehc->i.action &= ~ATA_EH_RESET; 27894aa9ab67STejun Heo 27901eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 2791f58229f8STejun Heo classes[dev->devno] = ATA_DEV_NONE; 27924aa9ab67STejun Heo 27934aa9ab67STejun Heo rc = 0; 2794c961922bSAlan Cox } else 2795a9a79dfeSJoe Perches ata_link_err(link, 2796a9a79dfeSJoe Perches "prereset failed (errno=%d)\n", 2797a9a79dfeSJoe Perches rc); 2798fccb6ea5STejun Heo goto out; 2799c6fd2807SJeff Garzik } 2800c6fd2807SJeff Garzik 2801932648b0STejun Heo /* prereset() might have cleared ATA_EH_RESET. If so, 2802d6515e6fSTejun Heo * bang classes, thaw and return. 2803932648b0STejun Heo */ 2804932648b0STejun Heo if (reset && !(ehc->i.action & ATA_EH_RESET)) { 28051eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 2806f58229f8STejun Heo classes[dev->devno] = ATA_DEV_NONE; 2807d6515e6fSTejun Heo if ((ap->pflags & ATA_PFLAG_FROZEN) && 2808d6515e6fSTejun Heo ata_is_host_link(link)) 2809d6515e6fSTejun Heo ata_eh_thaw_port(ap); 2810fccb6ea5STejun Heo rc = 0; 2811fccb6ea5STejun Heo goto out; 2812c6fd2807SJeff Garzik } 2813932648b0STejun Heo } 2814c6fd2807SJeff Garzik 2815c6fd2807SJeff Garzik retry: 2816932648b0STejun Heo /* 2817932648b0STejun Heo * Perform reset 2818932648b0STejun Heo */ 2819dc98c32cSTejun Heo if (ata_is_host_link(link)) 2820dc98c32cSTejun Heo ata_eh_freeze_port(ap); 2821dc98c32cSTejun Heo 2822341c2c95STejun Heo deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]); 282331daabdaSTejun Heo 2824932648b0STejun Heo if (reset) { 2825c6fd2807SJeff Garzik if (verbose) 2826a9a79dfeSJoe Perches ata_link_info(link, "%s resetting link\n", 2827c6fd2807SJeff Garzik reset == softreset ? "soft" : "hard"); 2828c6fd2807SJeff Garzik 2829c6fd2807SJeff Garzik /* mark that this EH session started with reset */ 283019b72321STejun Heo ehc->last_reset = jiffies; 28310d64a233STejun Heo if (reset == hardreset) 28320d64a233STejun Heo ehc->i.flags |= ATA_EHI_DID_HARDRESET; 28330d64a233STejun Heo else 28340d64a233STejun Heo ehc->i.flags |= ATA_EHI_DID_SOFTRESET; 2835c6fd2807SJeff Garzik 2836b1c72916STejun Heo rc = ata_do_reset(link, reset, classes, deadline, true); 2837b1c72916STejun Heo if (rc && rc != -EAGAIN) { 2838b1c72916STejun Heo failed_link = link; 28395dbfc9cbSTejun Heo goto fail; 2840b1c72916STejun Heo } 2841c6fd2807SJeff Garzik 2842b1c72916STejun Heo /* hardreset slave link if existent */ 2843b1c72916STejun Heo if (slave && reset == hardreset) { 2844b1c72916STejun Heo int tmp; 2845b1c72916STejun Heo 2846b1c72916STejun Heo if (verbose) 2847a9a79dfeSJoe Perches ata_link_info(slave, "hard resetting link\n"); 2848b1c72916STejun Heo 2849b1c72916STejun Heo ata_eh_about_to_do(slave, NULL, ATA_EH_RESET); 2850b1c72916STejun Heo tmp = ata_do_reset(slave, reset, classes, deadline, 2851b1c72916STejun Heo false); 2852b1c72916STejun Heo switch (tmp) { 2853b1c72916STejun Heo case -EAGAIN: 2854b1c72916STejun Heo rc = -EAGAIN; 2855b1c72916STejun Heo case 0: 2856b1c72916STejun Heo break; 2857b1c72916STejun Heo default: 2858b1c72916STejun Heo failed_link = slave; 2859b1c72916STejun Heo rc = tmp; 2860b1c72916STejun Heo goto fail; 2861b1c72916STejun Heo } 2862b1c72916STejun Heo } 2863b1c72916STejun Heo 2864b1c72916STejun Heo /* perform follow-up SRST if necessary */ 2865c6fd2807SJeff Garzik if (reset == hardreset && 2866e8411fbaSSergei Shtylyov ata_eh_followup_srst_needed(link, rc)) { 2867c6fd2807SJeff Garzik reset = softreset; 2868c6fd2807SJeff Garzik 2869c6fd2807SJeff Garzik if (!reset) { 2870a9a79dfeSJoe Perches ata_link_err(link, 2871a9a79dfeSJoe Perches "follow-up softreset required but no softreset available\n"); 2872b1c72916STejun Heo failed_link = link; 2873fccb6ea5STejun Heo rc = -EINVAL; 287408cf69d0STejun Heo goto fail; 2875c6fd2807SJeff Garzik } 2876c6fd2807SJeff Garzik 2877cf480626STejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2878b1c72916STejun Heo rc = ata_do_reset(link, reset, classes, deadline, true); 2879fe2c4d01STejun Heo if (rc) { 2880fe2c4d01STejun Heo failed_link = link; 2881fe2c4d01STejun Heo goto fail; 2882fe2c4d01STejun Heo } 2883c6fd2807SJeff Garzik } 2884932648b0STejun Heo } else { 2885932648b0STejun Heo if (verbose) 2886a9a79dfeSJoe Perches ata_link_info(link, 2887a9a79dfeSJoe Perches "no reset method available, skipping reset\n"); 2888932648b0STejun Heo if (!(lflags & ATA_LFLAG_ASSUME_CLASS)) 2889932648b0STejun Heo lflags |= ATA_LFLAG_ASSUME_ATA; 2890932648b0STejun Heo } 2891008a7896STejun Heo 2892932648b0STejun Heo /* 2893932648b0STejun Heo * Post-reset processing 2894932648b0STejun Heo */ 28951eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 2896416dc9edSTejun Heo /* After the reset, the device state is PIO 0 and the 2897416dc9edSTejun Heo * controller state is undefined. Reset also wakes up 2898416dc9edSTejun Heo * drives from sleeping mode. 2899c6fd2807SJeff Garzik */ 2900f58229f8STejun Heo dev->pio_mode = XFER_PIO_0; 2901054a5fbaSTejun Heo dev->flags &= ~ATA_DFLAG_SLEEPING; 2902c6fd2807SJeff Garzik 29033b761d3dSTejun Heo if (ata_phys_link_offline(ata_dev_phys_link(dev))) 29043b761d3dSTejun Heo continue; 29053b761d3dSTejun Heo 29064ccd3329STejun Heo /* apply class override */ 2907416dc9edSTejun Heo if (lflags & ATA_LFLAG_ASSUME_ATA) 2908ae791c05STejun Heo classes[dev->devno] = ATA_DEV_ATA; 2909416dc9edSTejun Heo else if (lflags & ATA_LFLAG_ASSUME_SEMB) 2910816ab897STejun Heo classes[dev->devno] = ATA_DEV_SEMB_UNSUP; 2911ae791c05STejun Heo } 2912ae791c05STejun Heo 2913008a7896STejun Heo /* record current link speed */ 2914936fd732STejun Heo if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0) 2915936fd732STejun Heo link->sata_spd = (sstatus >> 4) & 0xf; 2916b1c72916STejun Heo if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0) 2917b1c72916STejun Heo slave->sata_spd = (sstatus >> 4) & 0xf; 2918008a7896STejun Heo 2919dc98c32cSTejun Heo /* thaw the port */ 2920dc98c32cSTejun Heo if (ata_is_host_link(link)) 2921dc98c32cSTejun Heo ata_eh_thaw_port(ap); 2922dc98c32cSTejun Heo 2923f046519fSTejun Heo /* postreset() should clear hardware SError. Although SError 2924f046519fSTejun Heo * is cleared during link resume, clearing SError here is 2925f046519fSTejun Heo * necessary as some PHYs raise hotplug events after SRST. 2926f046519fSTejun Heo * This introduces race condition where hotplug occurs between 2927f046519fSTejun Heo * reset and here. This race is mediated by cross checking 2928f046519fSTejun Heo * link onlineness and classification result later. 2929f046519fSTejun Heo */ 2930b1c72916STejun Heo if (postreset) { 2931cc0680a5STejun Heo postreset(link, classes); 2932b1c72916STejun Heo if (slave) 2933b1c72916STejun Heo postreset(slave, classes); 2934b1c72916STejun Heo } 2935c6fd2807SJeff Garzik 29361e641060STejun Heo /* 29378c56caccSTejun Heo * Some controllers can't be frozen very well and may set spurious 29388c56caccSTejun Heo * error conditions during reset. Clear accumulated error 29398c56caccSTejun Heo * information and re-thaw the port if frozen. As reset is the 29408c56caccSTejun Heo * final recovery action and we cross check link onlineness against 29418c56caccSTejun Heo * device classification later, no hotplug event is lost by this. 29421e641060STejun Heo */ 2943f046519fSTejun Heo spin_lock_irqsave(link->ap->lock, flags); 29441e641060STejun Heo memset(&link->eh_info, 0, sizeof(link->eh_info)); 2945b1c72916STejun Heo if (slave) 29461e641060STejun Heo memset(&slave->eh_info, 0, sizeof(link->eh_info)); 29471e641060STejun Heo ap->pflags &= ~ATA_PFLAG_EH_PENDING; 2948f046519fSTejun Heo spin_unlock_irqrestore(link->ap->lock, flags); 2949f046519fSTejun Heo 29508c56caccSTejun Heo if (ap->pflags & ATA_PFLAG_FROZEN) 29518c56caccSTejun Heo ata_eh_thaw_port(ap); 29528c56caccSTejun Heo 29533b761d3dSTejun Heo /* 29543b761d3dSTejun Heo * Make sure onlineness and classification result correspond. 2955f046519fSTejun Heo * Hotplug could have happened during reset and some 2956f046519fSTejun Heo * controllers fail to wait while a drive is spinning up after 2957f046519fSTejun Heo * being hotplugged causing misdetection. By cross checking 29583b761d3dSTejun Heo * link on/offlineness and classification result, those 29593b761d3dSTejun Heo * conditions can be reliably detected and retried. 2960f046519fSTejun Heo */ 2961b1c72916STejun Heo nr_unknown = 0; 29621eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 29633b761d3dSTejun Heo if (ata_phys_link_online(ata_dev_phys_link(dev))) { 2964b1c72916STejun Heo if (classes[dev->devno] == ATA_DEV_UNKNOWN) { 2965a9a79dfeSJoe Perches ata_dev_dbg(dev, "link online but device misclassified\n"); 2966f046519fSTejun Heo classes[dev->devno] = ATA_DEV_NONE; 2967b1c72916STejun Heo nr_unknown++; 2968b1c72916STejun Heo } 29693b761d3dSTejun Heo } else if (ata_phys_link_offline(ata_dev_phys_link(dev))) { 29703b761d3dSTejun Heo if (ata_class_enabled(classes[dev->devno])) 2971a9a79dfeSJoe Perches ata_dev_dbg(dev, 2972a9a79dfeSJoe Perches "link offline, clearing class %d to NONE\n", 29733b761d3dSTejun Heo classes[dev->devno]); 29743b761d3dSTejun Heo classes[dev->devno] = ATA_DEV_NONE; 29753b761d3dSTejun Heo } else if (classes[dev->devno] == ATA_DEV_UNKNOWN) { 2976a9a79dfeSJoe Perches ata_dev_dbg(dev, 2977a9a79dfeSJoe Perches "link status unknown, clearing UNKNOWN to NONE\n"); 29783b761d3dSTejun Heo classes[dev->devno] = ATA_DEV_NONE; 29793b761d3dSTejun Heo } 2980f046519fSTejun Heo } 2981f046519fSTejun Heo 2982b1c72916STejun Heo if (classify && nr_unknown) { 2983f046519fSTejun Heo if (try < max_tries) { 2984a9a79dfeSJoe Perches ata_link_warn(link, 2985a9a79dfeSJoe Perches "link online but %d devices misclassified, retrying\n", 29863b761d3dSTejun Heo nr_unknown); 2987b1c72916STejun Heo failed_link = link; 2988f046519fSTejun Heo rc = -EAGAIN; 2989f046519fSTejun Heo goto fail; 2990f046519fSTejun Heo } 2991a9a79dfeSJoe Perches ata_link_warn(link, 29923b761d3dSTejun Heo "link online but %d devices misclassified, " 29933b761d3dSTejun Heo "device detection might fail\n", nr_unknown); 2994f046519fSTejun Heo } 2995f046519fSTejun Heo 2996c6fd2807SJeff Garzik /* reset successful, schedule revalidation */ 2997cf480626STejun Heo ata_eh_done(link, NULL, ATA_EH_RESET); 2998b1c72916STejun Heo if (slave) 2999b1c72916STejun Heo ata_eh_done(slave, NULL, ATA_EH_RESET); 300019b72321STejun Heo ehc->last_reset = jiffies; /* update to completion time */ 3001c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_REVALIDATE; 30026b7ae954STejun Heo link->lpm_policy = ATA_LPM_UNKNOWN; /* reset LPM state */ 3003416dc9edSTejun Heo 3004416dc9edSTejun Heo rc = 0; 3005fccb6ea5STejun Heo out: 3006fccb6ea5STejun Heo /* clear hotplug flag */ 3007fccb6ea5STejun Heo ehc->i.flags &= ~ATA_EHI_HOTPLUGGED; 3008b1c72916STejun Heo if (slave) 3009b1c72916STejun Heo sehc->i.flags &= ~ATA_EHI_HOTPLUGGED; 3010afaa5c37STejun Heo 3011afaa5c37STejun Heo spin_lock_irqsave(ap->lock, flags); 3012afaa5c37STejun Heo ap->pflags &= ~ATA_PFLAG_RESETTING; 3013afaa5c37STejun Heo spin_unlock_irqrestore(ap->lock, flags); 3014afaa5c37STejun Heo 3015c6fd2807SJeff Garzik return rc; 3016416dc9edSTejun Heo 3017416dc9edSTejun Heo fail: 30185958e302STejun Heo /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */ 30195958e302STejun Heo if (!ata_is_host_link(link) && 30205958e302STejun Heo sata_scr_read(link, SCR_STATUS, &sstatus)) 30215958e302STejun Heo rc = -ERESTART; 30225958e302STejun Heo 30237a46c078SGwendal Grignou if (try >= max_tries) { 30248ea7645cSTejun Heo /* 30258ea7645cSTejun Heo * Thaw host port even if reset failed, so that the port 30268ea7645cSTejun Heo * can be retried on the next phy event. This risks 30278ea7645cSTejun Heo * repeated EH runs but seems to be a better tradeoff than 30288ea7645cSTejun Heo * shutting down a port after a botched hotplug attempt. 30298ea7645cSTejun Heo */ 30308ea7645cSTejun Heo if (ata_is_host_link(link)) 30318ea7645cSTejun Heo ata_eh_thaw_port(ap); 3032416dc9edSTejun Heo goto out; 30338ea7645cSTejun Heo } 3034416dc9edSTejun Heo 3035416dc9edSTejun Heo now = jiffies; 3036416dc9edSTejun Heo if (time_before(now, deadline)) { 3037416dc9edSTejun Heo unsigned long delta = deadline - now; 3038416dc9edSTejun Heo 3039a9a79dfeSJoe Perches ata_link_warn(failed_link, 30400a2c0f56STejun Heo "reset failed (errno=%d), retrying in %u secs\n", 30410a2c0f56STejun Heo rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000)); 3042416dc9edSTejun Heo 3043c0c362b6STejun Heo ata_eh_release(ap); 3044416dc9edSTejun Heo while (delta) 3045416dc9edSTejun Heo delta = schedule_timeout_uninterruptible(delta); 3046c0c362b6STejun Heo ata_eh_acquire(ap); 3047416dc9edSTejun Heo } 3048416dc9edSTejun Heo 30497a46c078SGwendal Grignou /* 30507a46c078SGwendal Grignou * While disks spinup behind PMP, some controllers fail sending SRST. 30517a46c078SGwendal Grignou * They need to be reset - as well as the PMP - before retrying. 30527a46c078SGwendal Grignou */ 30537a46c078SGwendal Grignou if (rc == -ERESTART) { 30547a46c078SGwendal Grignou if (ata_is_host_link(link)) 30557a46c078SGwendal Grignou ata_eh_thaw_port(ap); 30567a46c078SGwendal Grignou goto out; 30577a46c078SGwendal Grignou } 30587a46c078SGwendal Grignou 3059b1c72916STejun Heo if (try == max_tries - 1) { 3060a07d499bSTejun Heo sata_down_spd_limit(link, 0); 3061b1c72916STejun Heo if (slave) 3062a07d499bSTejun Heo sata_down_spd_limit(slave, 0); 3063b1c72916STejun Heo } else if (rc == -EPIPE) 3064a07d499bSTejun Heo sata_down_spd_limit(failed_link, 0); 3065b1c72916STejun Heo 3066416dc9edSTejun Heo if (hardreset) 3067416dc9edSTejun Heo reset = hardreset; 3068416dc9edSTejun Heo goto retry; 3069c6fd2807SJeff Garzik } 3070c6fd2807SJeff Garzik 307145fabbb7SElias Oltmanns static inline void ata_eh_pull_park_action(struct ata_port *ap) 307245fabbb7SElias Oltmanns { 307345fabbb7SElias Oltmanns struct ata_link *link; 307445fabbb7SElias Oltmanns struct ata_device *dev; 307545fabbb7SElias Oltmanns unsigned long flags; 307645fabbb7SElias Oltmanns 307745fabbb7SElias Oltmanns /* 307845fabbb7SElias Oltmanns * This function can be thought of as an extended version of 307945fabbb7SElias Oltmanns * ata_eh_about_to_do() specially crafted to accommodate the 308045fabbb7SElias Oltmanns * requirements of ATA_EH_PARK handling. Since the EH thread 308145fabbb7SElias Oltmanns * does not leave the do {} while () loop in ata_eh_recover as 308245fabbb7SElias Oltmanns * long as the timeout for a park request to *one* device on 308345fabbb7SElias Oltmanns * the port has not expired, and since we still want to pick 308445fabbb7SElias Oltmanns * up park requests to other devices on the same port or 308545fabbb7SElias Oltmanns * timeout updates for the same device, we have to pull 308645fabbb7SElias Oltmanns * ATA_EH_PARK actions from eh_info into eh_context.i 308745fabbb7SElias Oltmanns * ourselves at the beginning of each pass over the loop. 308845fabbb7SElias Oltmanns * 308945fabbb7SElias Oltmanns * Additionally, all write accesses to &ap->park_req_pending 309016735d02SWolfram Sang * through reinit_completion() (see below) or complete_all() 309145fabbb7SElias Oltmanns * (see ata_scsi_park_store()) are protected by the host lock. 309245fabbb7SElias Oltmanns * As a result we have that park_req_pending.done is zero on 309345fabbb7SElias Oltmanns * exit from this function, i.e. when ATA_EH_PARK actions for 309445fabbb7SElias Oltmanns * *all* devices on port ap have been pulled into the 309545fabbb7SElias Oltmanns * respective eh_context structs. If, and only if, 309645fabbb7SElias Oltmanns * park_req_pending.done is non-zero by the time we reach 309745fabbb7SElias Oltmanns * wait_for_completion_timeout(), another ATA_EH_PARK action 309845fabbb7SElias Oltmanns * has been scheduled for at least one of the devices on port 309945fabbb7SElias Oltmanns * ap and we have to cycle over the do {} while () loop in 310045fabbb7SElias Oltmanns * ata_eh_recover() again. 310145fabbb7SElias Oltmanns */ 310245fabbb7SElias Oltmanns 310345fabbb7SElias Oltmanns spin_lock_irqsave(ap->lock, flags); 310416735d02SWolfram Sang reinit_completion(&ap->park_req_pending); 31051eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 31061eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 310745fabbb7SElias Oltmanns struct ata_eh_info *ehi = &link->eh_info; 310845fabbb7SElias Oltmanns 310945fabbb7SElias Oltmanns link->eh_context.i.dev_action[dev->devno] |= 311045fabbb7SElias Oltmanns ehi->dev_action[dev->devno] & ATA_EH_PARK; 311145fabbb7SElias Oltmanns ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK); 311245fabbb7SElias Oltmanns } 311345fabbb7SElias Oltmanns } 311445fabbb7SElias Oltmanns spin_unlock_irqrestore(ap->lock, flags); 311545fabbb7SElias Oltmanns } 311645fabbb7SElias Oltmanns 311745fabbb7SElias Oltmanns static void ata_eh_park_issue_cmd(struct ata_device *dev, int park) 311845fabbb7SElias Oltmanns { 311945fabbb7SElias Oltmanns struct ata_eh_context *ehc = &dev->link->eh_context; 312045fabbb7SElias Oltmanns struct ata_taskfile tf; 312145fabbb7SElias Oltmanns unsigned int err_mask; 312245fabbb7SElias Oltmanns 312345fabbb7SElias Oltmanns ata_tf_init(dev, &tf); 312445fabbb7SElias Oltmanns if (park) { 312545fabbb7SElias Oltmanns ehc->unloaded_mask |= 1 << dev->devno; 312645fabbb7SElias Oltmanns tf.command = ATA_CMD_IDLEIMMEDIATE; 312745fabbb7SElias Oltmanns tf.feature = 0x44; 312845fabbb7SElias Oltmanns tf.lbal = 0x4c; 312945fabbb7SElias Oltmanns tf.lbam = 0x4e; 313045fabbb7SElias Oltmanns tf.lbah = 0x55; 313145fabbb7SElias Oltmanns } else { 313245fabbb7SElias Oltmanns ehc->unloaded_mask &= ~(1 << dev->devno); 313345fabbb7SElias Oltmanns tf.command = ATA_CMD_CHK_POWER; 313445fabbb7SElias Oltmanns } 313545fabbb7SElias Oltmanns 313645fabbb7SElias Oltmanns tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 3137bd18bc04SHannes Reinecke tf.protocol = ATA_PROT_NODATA; 313845fabbb7SElias Oltmanns err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 313945fabbb7SElias Oltmanns if (park && (err_mask || tf.lbal != 0xc4)) { 3140a9a79dfeSJoe Perches ata_dev_err(dev, "head unload failed!\n"); 314145fabbb7SElias Oltmanns ehc->unloaded_mask &= ~(1 << dev->devno); 314245fabbb7SElias Oltmanns } 314345fabbb7SElias Oltmanns } 314445fabbb7SElias Oltmanns 31450260731fSTejun Heo static int ata_eh_revalidate_and_attach(struct ata_link *link, 3146c6fd2807SJeff Garzik struct ata_device **r_failed_dev) 3147c6fd2807SJeff Garzik { 31480260731fSTejun Heo struct ata_port *ap = link->ap; 31490260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 3150c6fd2807SJeff Garzik struct ata_device *dev; 31518c3c52a8STejun Heo unsigned int new_mask = 0; 3152c6fd2807SJeff Garzik unsigned long flags; 3153f58229f8STejun Heo int rc = 0; 3154c6fd2807SJeff Garzik 3155c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 3156c6fd2807SJeff Garzik 31578c3c52a8STejun Heo /* For PATA drive side cable detection to work, IDENTIFY must 31588c3c52a8STejun Heo * be done backwards such that PDIAG- is released by the slave 31598c3c52a8STejun Heo * device before the master device is identified. 31608c3c52a8STejun Heo */ 31611eca4365STejun Heo ata_for_each_dev(dev, link, ALL_REVERSE) { 3162f58229f8STejun Heo unsigned int action = ata_eh_dev_action(dev); 3163f58229f8STejun Heo unsigned int readid_flags = 0; 3164c6fd2807SJeff Garzik 3165bff04647STejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) 3166bff04647STejun Heo readid_flags |= ATA_READID_POSTRESET; 3167bff04647STejun Heo 31689666f400STejun Heo if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) { 3169633273a3STejun Heo WARN_ON(dev->class == ATA_DEV_PMP); 3170633273a3STejun Heo 3171b1c72916STejun Heo if (ata_phys_link_offline(ata_dev_phys_link(dev))) { 3172c6fd2807SJeff Garzik rc = -EIO; 31738c3c52a8STejun Heo goto err; 3174c6fd2807SJeff Garzik } 3175c6fd2807SJeff Garzik 31760260731fSTejun Heo ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE); 3177422c9daaSTejun Heo rc = ata_dev_revalidate(dev, ehc->classes[dev->devno], 3178422c9daaSTejun Heo readid_flags); 3179c6fd2807SJeff Garzik if (rc) 31808c3c52a8STejun Heo goto err; 3181c6fd2807SJeff Garzik 31820260731fSTejun Heo ata_eh_done(link, dev, ATA_EH_REVALIDATE); 3183c6fd2807SJeff Garzik 3184baa1e78aSTejun Heo /* Configuration may have changed, reconfigure 3185baa1e78aSTejun Heo * transfer mode. 3186baa1e78aSTejun Heo */ 3187baa1e78aSTejun Heo ehc->i.flags |= ATA_EHI_SETMODE; 3188baa1e78aSTejun Heo 3189c6fd2807SJeff Garzik /* schedule the scsi_rescan_device() here */ 3190ad72cf98STejun Heo schedule_work(&(ap->scsi_rescan_task)); 3191c6fd2807SJeff Garzik } else if (dev->class == ATA_DEV_UNKNOWN && 3192c6fd2807SJeff Garzik ehc->tries[dev->devno] && 3193c6fd2807SJeff Garzik ata_class_enabled(ehc->classes[dev->devno])) { 3194842faa6cSTejun Heo /* Temporarily set dev->class, it will be 3195842faa6cSTejun Heo * permanently set once all configurations are 3196842faa6cSTejun Heo * complete. This is necessary because new 3197842faa6cSTejun Heo * device configuration is done in two 3198842faa6cSTejun Heo * separate loops. 3199842faa6cSTejun Heo */ 3200c6fd2807SJeff Garzik dev->class = ehc->classes[dev->devno]; 3201c6fd2807SJeff Garzik 3202633273a3STejun Heo if (dev->class == ATA_DEV_PMP) 3203633273a3STejun Heo rc = sata_pmp_attach(dev); 3204633273a3STejun Heo else 3205633273a3STejun Heo rc = ata_dev_read_id(dev, &dev->class, 3206633273a3STejun Heo readid_flags, dev->id); 3207842faa6cSTejun Heo 3208842faa6cSTejun Heo /* read_id might have changed class, store and reset */ 3209842faa6cSTejun Heo ehc->classes[dev->devno] = dev->class; 3210842faa6cSTejun Heo dev->class = ATA_DEV_UNKNOWN; 3211842faa6cSTejun Heo 32128c3c52a8STejun Heo switch (rc) { 32138c3c52a8STejun Heo case 0: 321499cf610aSTejun Heo /* clear error info accumulated during probe */ 321599cf610aSTejun Heo ata_ering_clear(&dev->ering); 3216f58229f8STejun Heo new_mask |= 1 << dev->devno; 32178c3c52a8STejun Heo break; 32188c3c52a8STejun Heo case -ENOENT: 321955a8e2c8STejun Heo /* IDENTIFY was issued to non-existent 322055a8e2c8STejun Heo * device. No need to reset. Just 3221842faa6cSTejun Heo * thaw and ignore the device. 322255a8e2c8STejun Heo */ 322355a8e2c8STejun Heo ata_eh_thaw_port(ap); 3224c6fd2807SJeff Garzik break; 32258c3c52a8STejun Heo default: 32268c3c52a8STejun Heo goto err; 32278c3c52a8STejun Heo } 32288c3c52a8STejun Heo } 3229c6fd2807SJeff Garzik } 3230c6fd2807SJeff Garzik 3231c1c4e8d5STejun Heo /* PDIAG- should have been released, ask cable type if post-reset */ 323233267325STejun Heo if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) { 323333267325STejun Heo if (ap->ops->cable_detect) 3234c1c4e8d5STejun Heo ap->cbl = ap->ops->cable_detect(ap); 323533267325STejun Heo ata_force_cbl(ap); 323633267325STejun Heo } 3237c1c4e8d5STejun Heo 32388c3c52a8STejun Heo /* Configure new devices forward such that user doesn't see 32398c3c52a8STejun Heo * device detection messages backwards. 32408c3c52a8STejun Heo */ 32411eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 32424f7c2874STejun Heo if (!(new_mask & (1 << dev->devno))) 32438c3c52a8STejun Heo continue; 32448c3c52a8STejun Heo 3245842faa6cSTejun Heo dev->class = ehc->classes[dev->devno]; 3246842faa6cSTejun Heo 32474f7c2874STejun Heo if (dev->class == ATA_DEV_PMP) 32484f7c2874STejun Heo continue; 32494f7c2874STejun Heo 32508c3c52a8STejun Heo ehc->i.flags |= ATA_EHI_PRINTINFO; 32518c3c52a8STejun Heo rc = ata_dev_configure(dev); 32528c3c52a8STejun Heo ehc->i.flags &= ~ATA_EHI_PRINTINFO; 3253842faa6cSTejun Heo if (rc) { 3254842faa6cSTejun Heo dev->class = ATA_DEV_UNKNOWN; 32558c3c52a8STejun Heo goto err; 3256842faa6cSTejun Heo } 32578c3c52a8STejun Heo 3258c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 3259c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 3260c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3261baa1e78aSTejun Heo 326255a8e2c8STejun Heo /* new device discovered, configure xfermode */ 3263baa1e78aSTejun Heo ehc->i.flags |= ATA_EHI_SETMODE; 3264c6fd2807SJeff Garzik } 3265c6fd2807SJeff Garzik 32668c3c52a8STejun Heo return 0; 32678c3c52a8STejun Heo 32688c3c52a8STejun Heo err: 3269c6fd2807SJeff Garzik *r_failed_dev = dev; 32708c3c52a8STejun Heo DPRINTK("EXIT rc=%d\n", rc); 3271c6fd2807SJeff Garzik return rc; 3272c6fd2807SJeff Garzik } 3273c6fd2807SJeff Garzik 32746f1d1e3aSTejun Heo /** 32756f1d1e3aSTejun Heo * ata_set_mode - Program timings and issue SET FEATURES - XFER 32766f1d1e3aSTejun Heo * @link: link on which timings will be programmed 327798a1708dSMartin Olsson * @r_failed_dev: out parameter for failed device 32786f1d1e3aSTejun Heo * 32796f1d1e3aSTejun Heo * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If 32806f1d1e3aSTejun Heo * ata_set_mode() fails, pointer to the failing device is 32816f1d1e3aSTejun Heo * returned in @r_failed_dev. 32826f1d1e3aSTejun Heo * 32836f1d1e3aSTejun Heo * LOCKING: 32846f1d1e3aSTejun Heo * PCI/etc. bus probe sem. 32856f1d1e3aSTejun Heo * 32866f1d1e3aSTejun Heo * RETURNS: 32876f1d1e3aSTejun Heo * 0 on success, negative errno otherwise 32886f1d1e3aSTejun Heo */ 32896f1d1e3aSTejun Heo int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) 32906f1d1e3aSTejun Heo { 32916f1d1e3aSTejun Heo struct ata_port *ap = link->ap; 329200115e0fSTejun Heo struct ata_device *dev; 329300115e0fSTejun Heo int rc; 32946f1d1e3aSTejun Heo 329576326ac1STejun Heo /* if data transfer is verified, clear DUBIOUS_XFER on ering top */ 32961eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) { 329776326ac1STejun Heo if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) { 329876326ac1STejun Heo struct ata_ering_entry *ent; 329976326ac1STejun Heo 330076326ac1STejun Heo ent = ata_ering_top(&dev->ering); 330176326ac1STejun Heo if (ent) 330276326ac1STejun Heo ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER; 330376326ac1STejun Heo } 330476326ac1STejun Heo } 330576326ac1STejun Heo 33066f1d1e3aSTejun Heo /* has private set_mode? */ 33076f1d1e3aSTejun Heo if (ap->ops->set_mode) 330800115e0fSTejun Heo rc = ap->ops->set_mode(link, r_failed_dev); 330900115e0fSTejun Heo else 331000115e0fSTejun Heo rc = ata_do_set_mode(link, r_failed_dev); 331100115e0fSTejun Heo 331200115e0fSTejun Heo /* if transfer mode has changed, set DUBIOUS_XFER on device */ 33131eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) { 331400115e0fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 331500115e0fSTejun Heo u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno]; 331600115e0fSTejun Heo u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno)); 331700115e0fSTejun Heo 331800115e0fSTejun Heo if (dev->xfer_mode != saved_xfer_mode || 331900115e0fSTejun Heo ata_ncq_enabled(dev) != saved_ncq) 332000115e0fSTejun Heo dev->flags |= ATA_DFLAG_DUBIOUS_XFER; 332100115e0fSTejun Heo } 332200115e0fSTejun Heo 332300115e0fSTejun Heo return rc; 33246f1d1e3aSTejun Heo } 33256f1d1e3aSTejun Heo 332611fc33daSTejun Heo /** 332711fc33daSTejun Heo * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset 332811fc33daSTejun Heo * @dev: ATAPI device to clear UA for 332911fc33daSTejun Heo * 333011fc33daSTejun Heo * Resets and other operations can make an ATAPI device raise 333111fc33daSTejun Heo * UNIT ATTENTION which causes the next operation to fail. This 333211fc33daSTejun Heo * function clears UA. 333311fc33daSTejun Heo * 333411fc33daSTejun Heo * LOCKING: 333511fc33daSTejun Heo * EH context (may sleep). 333611fc33daSTejun Heo * 333711fc33daSTejun Heo * RETURNS: 333811fc33daSTejun Heo * 0 on success, -errno on failure. 333911fc33daSTejun Heo */ 334011fc33daSTejun Heo static int atapi_eh_clear_ua(struct ata_device *dev) 334111fc33daSTejun Heo { 334211fc33daSTejun Heo int i; 334311fc33daSTejun Heo 334411fc33daSTejun Heo for (i = 0; i < ATA_EH_UA_TRIES; i++) { 3345b5357081STejun Heo u8 *sense_buffer = dev->link->ap->sector_buf; 334611fc33daSTejun Heo u8 sense_key = 0; 334711fc33daSTejun Heo unsigned int err_mask; 334811fc33daSTejun Heo 334911fc33daSTejun Heo err_mask = atapi_eh_tur(dev, &sense_key); 335011fc33daSTejun Heo if (err_mask != 0 && err_mask != AC_ERR_DEV) { 3351a9a79dfeSJoe Perches ata_dev_warn(dev, 3352a9a79dfeSJoe Perches "TEST_UNIT_READY failed (err_mask=0x%x)\n", 3353a9a79dfeSJoe Perches err_mask); 335411fc33daSTejun Heo return -EIO; 335511fc33daSTejun Heo } 335611fc33daSTejun Heo 335711fc33daSTejun Heo if (!err_mask || sense_key != UNIT_ATTENTION) 335811fc33daSTejun Heo return 0; 335911fc33daSTejun Heo 336011fc33daSTejun Heo err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key); 336111fc33daSTejun Heo if (err_mask) { 3362a9a79dfeSJoe Perches ata_dev_warn(dev, "failed to clear " 336311fc33daSTejun Heo "UNIT ATTENTION (err_mask=0x%x)\n", err_mask); 336411fc33daSTejun Heo return -EIO; 336511fc33daSTejun Heo } 336611fc33daSTejun Heo } 336711fc33daSTejun Heo 3368a9a79dfeSJoe Perches ata_dev_warn(dev, "UNIT ATTENTION persists after %d tries\n", 3369a9a79dfeSJoe Perches ATA_EH_UA_TRIES); 337011fc33daSTejun Heo 337111fc33daSTejun Heo return 0; 337211fc33daSTejun Heo } 337311fc33daSTejun Heo 33746013efd8STejun Heo /** 33756013efd8STejun Heo * ata_eh_maybe_retry_flush - Retry FLUSH if necessary 33766013efd8STejun Heo * @dev: ATA device which may need FLUSH retry 33776013efd8STejun Heo * 33786013efd8STejun Heo * If @dev failed FLUSH, it needs to be reported upper layer 33796013efd8STejun Heo * immediately as it means that @dev failed to remap and already 33806013efd8STejun Heo * lost at least a sector and further FLUSH retrials won't make 33816013efd8STejun Heo * any difference to the lost sector. However, if FLUSH failed 33826013efd8STejun Heo * for other reasons, for example transmission error, FLUSH needs 33836013efd8STejun Heo * to be retried. 33846013efd8STejun Heo * 33856013efd8STejun Heo * This function determines whether FLUSH failure retry is 33866013efd8STejun Heo * necessary and performs it if so. 33876013efd8STejun Heo * 33886013efd8STejun Heo * RETURNS: 33896013efd8STejun Heo * 0 if EH can continue, -errno if EH needs to be repeated. 33906013efd8STejun Heo */ 33916013efd8STejun Heo static int ata_eh_maybe_retry_flush(struct ata_device *dev) 33926013efd8STejun Heo { 33936013efd8STejun Heo struct ata_link *link = dev->link; 33946013efd8STejun Heo struct ata_port *ap = link->ap; 33956013efd8STejun Heo struct ata_queued_cmd *qc; 33966013efd8STejun Heo struct ata_taskfile tf; 33976013efd8STejun Heo unsigned int err_mask; 33986013efd8STejun Heo int rc = 0; 33996013efd8STejun Heo 34006013efd8STejun Heo /* did flush fail for this device? */ 34016013efd8STejun Heo if (!ata_tag_valid(link->active_tag)) 34026013efd8STejun Heo return 0; 34036013efd8STejun Heo 34046013efd8STejun Heo qc = __ata_qc_from_tag(ap, link->active_tag); 34056013efd8STejun Heo if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT && 34066013efd8STejun Heo qc->tf.command != ATA_CMD_FLUSH)) 34076013efd8STejun Heo return 0; 34086013efd8STejun Heo 34096013efd8STejun Heo /* if the device failed it, it should be reported to upper layers */ 34106013efd8STejun Heo if (qc->err_mask & AC_ERR_DEV) 34116013efd8STejun Heo return 0; 34126013efd8STejun Heo 34136013efd8STejun Heo /* flush failed for some other reason, give it another shot */ 34146013efd8STejun Heo ata_tf_init(dev, &tf); 34156013efd8STejun Heo 34166013efd8STejun Heo tf.command = qc->tf.command; 34176013efd8STejun Heo tf.flags |= ATA_TFLAG_DEVICE; 34186013efd8STejun Heo tf.protocol = ATA_PROT_NODATA; 34196013efd8STejun Heo 3420a9a79dfeSJoe Perches ata_dev_warn(dev, "retrying FLUSH 0x%x Emask 0x%x\n", 34216013efd8STejun Heo tf.command, qc->err_mask); 34226013efd8STejun Heo 34236013efd8STejun Heo err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 34246013efd8STejun Heo if (!err_mask) { 34256013efd8STejun Heo /* 34266013efd8STejun Heo * FLUSH is complete but there's no way to 34276013efd8STejun Heo * successfully complete a failed command from EH. 34286013efd8STejun Heo * Making sure retry is allowed at least once and 34296013efd8STejun Heo * retrying it should do the trick - whatever was in 34306013efd8STejun Heo * the cache is already on the platter and this won't 34316013efd8STejun Heo * cause infinite loop. 34326013efd8STejun Heo */ 34336013efd8STejun Heo qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1); 34346013efd8STejun Heo } else { 3435a9a79dfeSJoe Perches ata_dev_warn(dev, "FLUSH failed Emask 0x%x\n", 34366013efd8STejun Heo err_mask); 34376013efd8STejun Heo rc = -EIO; 34386013efd8STejun Heo 34396013efd8STejun Heo /* if device failed it, report it to upper layers */ 34406013efd8STejun Heo if (err_mask & AC_ERR_DEV) { 34416013efd8STejun Heo qc->err_mask |= AC_ERR_DEV; 34426013efd8STejun Heo qc->result_tf = tf; 34436013efd8STejun Heo if (!(ap->pflags & ATA_PFLAG_FROZEN)) 34446013efd8STejun Heo rc = 0; 34456013efd8STejun Heo } 34466013efd8STejun Heo } 34476013efd8STejun Heo return rc; 34486013efd8STejun Heo } 34496013efd8STejun Heo 34506b7ae954STejun Heo /** 34516b7ae954STejun Heo * ata_eh_set_lpm - configure SATA interface power management 34526b7ae954STejun Heo * @link: link to configure power management 34536b7ae954STejun Heo * @policy: the link power management policy 34546b7ae954STejun Heo * @r_failed_dev: out parameter for failed device 34556b7ae954STejun Heo * 34566b7ae954STejun Heo * Enable SATA Interface power management. This will enable 3457f4ac6476SHans de Goede * Device Interface Power Management (DIPM) for min_power and 3458f4ac6476SHans de Goede * medium_power_with_dipm policies, and then call driver specific 3459f4ac6476SHans de Goede * callbacks for enabling Host Initiated Power management. 34606b7ae954STejun Heo * 34616b7ae954STejun Heo * LOCKING: 34626b7ae954STejun Heo * EH context. 34636b7ae954STejun Heo * 34646b7ae954STejun Heo * RETURNS: 34656b7ae954STejun Heo * 0 on success, -errno on failure. 34666b7ae954STejun Heo */ 34676b7ae954STejun Heo static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, 34686b7ae954STejun Heo struct ata_device **r_failed_dev) 34696b7ae954STejun Heo { 34706c8ea89cSTejun Heo struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL; 34716b7ae954STejun Heo struct ata_eh_context *ehc = &link->eh_context; 34726b7ae954STejun Heo struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL; 3473e5005b15STejun Heo enum ata_lpm_policy old_policy = link->lpm_policy; 34745f6f12ccSTejun Heo bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM; 34756b7ae954STejun Heo unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM; 34766b7ae954STejun Heo unsigned int err_mask; 34776b7ae954STejun Heo int rc; 34786b7ae954STejun Heo 34796b7ae954STejun Heo /* if the link or host doesn't do LPM, noop */ 34806b7ae954STejun Heo if ((link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm)) 34816b7ae954STejun Heo return 0; 34826b7ae954STejun Heo 34836b7ae954STejun Heo /* 34846b7ae954STejun Heo * DIPM is enabled only for MIN_POWER as some devices 34856b7ae954STejun Heo * misbehave when the host NACKs transition to SLUMBER. Order 34866b7ae954STejun Heo * device and link configurations such that the host always 34876b7ae954STejun Heo * allows DIPM requests. 34886b7ae954STejun Heo */ 34896b7ae954STejun Heo ata_for_each_dev(dev, link, ENABLED) { 34906b7ae954STejun Heo bool hipm = ata_id_has_hipm(dev->id); 3491ae01b249STejun Heo bool dipm = ata_id_has_dipm(dev->id) && !no_dipm; 34926b7ae954STejun Heo 34936b7ae954STejun Heo /* find the first enabled and LPM enabled devices */ 34946b7ae954STejun Heo if (!link_dev) 34956b7ae954STejun Heo link_dev = dev; 34966b7ae954STejun Heo 34976b7ae954STejun Heo if (!lpm_dev && (hipm || dipm)) 34986b7ae954STejun Heo lpm_dev = dev; 34996b7ae954STejun Heo 35006b7ae954STejun Heo hints &= ~ATA_LPM_EMPTY; 35016b7ae954STejun Heo if (!hipm) 35026b7ae954STejun Heo hints &= ~ATA_LPM_HIPM; 35036b7ae954STejun Heo 35046b7ae954STejun Heo /* disable DIPM before changing link config */ 3505f4ac6476SHans de Goede if (policy < ATA_LPM_MED_POWER_WITH_DIPM && dipm) { 35066b7ae954STejun Heo err_mask = ata_dev_set_feature(dev, 35076b7ae954STejun Heo SETFEATURES_SATA_DISABLE, SATA_DIPM); 35086b7ae954STejun Heo if (err_mask && err_mask != AC_ERR_DEV) { 3509a9a79dfeSJoe Perches ata_dev_warn(dev, 35106b7ae954STejun Heo "failed to disable DIPM, Emask 0x%x\n", 35116b7ae954STejun Heo err_mask); 35126b7ae954STejun Heo rc = -EIO; 35136b7ae954STejun Heo goto fail; 35146b7ae954STejun Heo } 35156b7ae954STejun Heo } 35166b7ae954STejun Heo } 35176b7ae954STejun Heo 35186c8ea89cSTejun Heo if (ap) { 35196b7ae954STejun Heo rc = ap->ops->set_lpm(link, policy, hints); 35206b7ae954STejun Heo if (!rc && ap->slave_link) 35216b7ae954STejun Heo rc = ap->ops->set_lpm(ap->slave_link, policy, hints); 35226c8ea89cSTejun Heo } else 35236c8ea89cSTejun Heo rc = sata_pmp_set_lpm(link, policy, hints); 35246b7ae954STejun Heo 35256b7ae954STejun Heo /* 35266b7ae954STejun Heo * Attribute link config failure to the first (LPM) enabled 35276b7ae954STejun Heo * device on the link. 35286b7ae954STejun Heo */ 35296b7ae954STejun Heo if (rc) { 35306b7ae954STejun Heo if (rc == -EOPNOTSUPP) { 35316b7ae954STejun Heo link->flags |= ATA_LFLAG_NO_LPM; 35326b7ae954STejun Heo return 0; 35336b7ae954STejun Heo } 35346b7ae954STejun Heo dev = lpm_dev ? lpm_dev : link_dev; 35356b7ae954STejun Heo goto fail; 35366b7ae954STejun Heo } 35376b7ae954STejun Heo 3538e5005b15STejun Heo /* 3539e5005b15STejun Heo * Low level driver acked the transition. Issue DIPM command 3540e5005b15STejun Heo * with the new policy set. 3541e5005b15STejun Heo */ 3542e5005b15STejun Heo link->lpm_policy = policy; 3543e5005b15STejun Heo if (ap && ap->slave_link) 3544e5005b15STejun Heo ap->slave_link->lpm_policy = policy; 3545e5005b15STejun Heo 35466b7ae954STejun Heo /* host config updated, enable DIPM if transitioning to MIN_POWER */ 35476b7ae954STejun Heo ata_for_each_dev(dev, link, ENABLED) { 3548f4ac6476SHans de Goede if (policy >= ATA_LPM_MED_POWER_WITH_DIPM && !no_dipm && 3549ae01b249STejun Heo ata_id_has_dipm(dev->id)) { 35506b7ae954STejun Heo err_mask = ata_dev_set_feature(dev, 35516b7ae954STejun Heo SETFEATURES_SATA_ENABLE, SATA_DIPM); 35526b7ae954STejun Heo if (err_mask && err_mask != AC_ERR_DEV) { 3553a9a79dfeSJoe Perches ata_dev_warn(dev, 35546b7ae954STejun Heo "failed to enable DIPM, Emask 0x%x\n", 35556b7ae954STejun Heo err_mask); 35566b7ae954STejun Heo rc = -EIO; 35576b7ae954STejun Heo goto fail; 35586b7ae954STejun Heo } 35596b7ae954STejun Heo } 35606b7ae954STejun Heo } 35616b7ae954STejun Heo 356209c5b480SGabriele Mazzotta link->last_lpm_change = jiffies; 356309c5b480SGabriele Mazzotta link->flags |= ATA_LFLAG_CHANGED; 356409c5b480SGabriele Mazzotta 35656b7ae954STejun Heo return 0; 35666b7ae954STejun Heo 35676b7ae954STejun Heo fail: 3568e5005b15STejun Heo /* restore the old policy */ 3569e5005b15STejun Heo link->lpm_policy = old_policy; 3570e5005b15STejun Heo if (ap && ap->slave_link) 3571e5005b15STejun Heo ap->slave_link->lpm_policy = old_policy; 3572e5005b15STejun Heo 35736b7ae954STejun Heo /* if no device or only one more chance is left, disable LPM */ 35746b7ae954STejun Heo if (!dev || ehc->tries[dev->devno] <= 2) { 3575a9a79dfeSJoe Perches ata_link_warn(link, "disabling LPM on the link\n"); 35766b7ae954STejun Heo link->flags |= ATA_LFLAG_NO_LPM; 35776b7ae954STejun Heo } 35786b7ae954STejun Heo if (r_failed_dev) 35796b7ae954STejun Heo *r_failed_dev = dev; 35806b7ae954STejun Heo return rc; 35816b7ae954STejun Heo } 35826b7ae954STejun Heo 35838a745f1fSKristen Carlson Accardi int ata_link_nr_enabled(struct ata_link *link) 3584c6fd2807SJeff Garzik { 3585f58229f8STejun Heo struct ata_device *dev; 3586f58229f8STejun Heo int cnt = 0; 3587c6fd2807SJeff Garzik 35881eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) 3589c6fd2807SJeff Garzik cnt++; 3590c6fd2807SJeff Garzik return cnt; 3591c6fd2807SJeff Garzik } 3592c6fd2807SJeff Garzik 35930260731fSTejun Heo static int ata_link_nr_vacant(struct ata_link *link) 3594c6fd2807SJeff Garzik { 3595f58229f8STejun Heo struct ata_device *dev; 3596f58229f8STejun Heo int cnt = 0; 3597c6fd2807SJeff Garzik 35981eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 3599f58229f8STejun Heo if (dev->class == ATA_DEV_UNKNOWN) 3600c6fd2807SJeff Garzik cnt++; 3601c6fd2807SJeff Garzik return cnt; 3602c6fd2807SJeff Garzik } 3603c6fd2807SJeff Garzik 36040260731fSTejun Heo static int ata_eh_skip_recovery(struct ata_link *link) 3605c6fd2807SJeff Garzik { 3606672b2d65STejun Heo struct ata_port *ap = link->ap; 36070260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 3608f58229f8STejun Heo struct ata_device *dev; 3609c6fd2807SJeff Garzik 3610f9df58cbSTejun Heo /* skip disabled links */ 3611f9df58cbSTejun Heo if (link->flags & ATA_LFLAG_DISABLED) 3612f9df58cbSTejun Heo return 1; 3613f9df58cbSTejun Heo 3614e2f3d75fSTejun Heo /* skip if explicitly requested */ 3615e2f3d75fSTejun Heo if (ehc->i.flags & ATA_EHI_NO_RECOVERY) 3616e2f3d75fSTejun Heo return 1; 3617e2f3d75fSTejun Heo 3618672b2d65STejun Heo /* thaw frozen port and recover failed devices */ 3619672b2d65STejun Heo if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link)) 3620672b2d65STejun Heo return 0; 3621672b2d65STejun Heo 3622672b2d65STejun Heo /* reset at least once if reset is requested */ 3623672b2d65STejun Heo if ((ehc->i.action & ATA_EH_RESET) && 3624672b2d65STejun Heo !(ehc->i.flags & ATA_EHI_DID_RESET)) 3625c6fd2807SJeff Garzik return 0; 3626c6fd2807SJeff Garzik 3627c6fd2807SJeff Garzik /* skip if class codes for all vacant slots are ATA_DEV_NONE */ 36281eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 3629c6fd2807SJeff Garzik if (dev->class == ATA_DEV_UNKNOWN && 3630c6fd2807SJeff Garzik ehc->classes[dev->devno] != ATA_DEV_NONE) 3631c6fd2807SJeff Garzik return 0; 3632c6fd2807SJeff Garzik } 3633c6fd2807SJeff Garzik 3634c6fd2807SJeff Garzik return 1; 3635c6fd2807SJeff Garzik } 3636c6fd2807SJeff Garzik 3637c2c7a89cSTejun Heo static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg) 3638c2c7a89cSTejun Heo { 3639c2c7a89cSTejun Heo u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL); 3640c2c7a89cSTejun Heo u64 now = get_jiffies_64(); 3641c2c7a89cSTejun Heo int *trials = void_arg; 3642c2c7a89cSTejun Heo 36436868225eSLin Ming if ((ent->eflags & ATA_EFLAG_OLD_ER) || 36446868225eSLin Ming (ent->timestamp < now - min(now, interval))) 3645c2c7a89cSTejun Heo return -1; 3646c2c7a89cSTejun Heo 3647c2c7a89cSTejun Heo (*trials)++; 3648c2c7a89cSTejun Heo return 0; 3649c2c7a89cSTejun Heo } 3650c2c7a89cSTejun Heo 365102c05a27STejun Heo static int ata_eh_schedule_probe(struct ata_device *dev) 365202c05a27STejun Heo { 365302c05a27STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 3654c2c7a89cSTejun Heo struct ata_link *link = ata_dev_phys_link(dev); 3655c2c7a89cSTejun Heo int trials = 0; 365602c05a27STejun Heo 365702c05a27STejun Heo if (!(ehc->i.probe_mask & (1 << dev->devno)) || 365802c05a27STejun Heo (ehc->did_probe_mask & (1 << dev->devno))) 365902c05a27STejun Heo return 0; 366002c05a27STejun Heo 366102c05a27STejun Heo ata_eh_detach_dev(dev); 366202c05a27STejun Heo ata_dev_init(dev); 366302c05a27STejun Heo ehc->did_probe_mask |= (1 << dev->devno); 3664cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 366500115e0fSTejun Heo ehc->saved_xfer_mode[dev->devno] = 0; 366600115e0fSTejun Heo ehc->saved_ncq_enabled &= ~(1 << dev->devno); 366702c05a27STejun Heo 36686b7ae954STejun Heo /* the link maybe in a deep sleep, wake it up */ 36696c8ea89cSTejun Heo if (link->lpm_policy > ATA_LPM_MAX_POWER) { 36706c8ea89cSTejun Heo if (ata_is_host_link(link)) 36716c8ea89cSTejun Heo link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER, 36726c8ea89cSTejun Heo ATA_LPM_EMPTY); 36736c8ea89cSTejun Heo else 36746c8ea89cSTejun Heo sata_pmp_set_lpm(link, ATA_LPM_MAX_POWER, 36756c8ea89cSTejun Heo ATA_LPM_EMPTY); 36766c8ea89cSTejun Heo } 36776b7ae954STejun Heo 3678c2c7a89cSTejun Heo /* Record and count probe trials on the ering. The specific 3679c2c7a89cSTejun Heo * error mask used is irrelevant. Because a successful device 3680c2c7a89cSTejun Heo * detection clears the ering, this count accumulates only if 3681c2c7a89cSTejun Heo * there are consecutive failed probes. 3682c2c7a89cSTejun Heo * 3683c2c7a89cSTejun Heo * If the count is equal to or higher than ATA_EH_PROBE_TRIALS 3684c2c7a89cSTejun Heo * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is 3685c2c7a89cSTejun Heo * forced to 1.5Gbps. 3686c2c7a89cSTejun Heo * 3687c2c7a89cSTejun Heo * This is to work around cases where failed link speed 3688c2c7a89cSTejun Heo * negotiation results in device misdetection leading to 3689c2c7a89cSTejun Heo * infinite DEVXCHG or PHRDY CHG events. 3690c2c7a89cSTejun Heo */ 3691c2c7a89cSTejun Heo ata_ering_record(&dev->ering, 0, AC_ERR_OTHER); 3692c2c7a89cSTejun Heo ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials); 3693c2c7a89cSTejun Heo 3694c2c7a89cSTejun Heo if (trials > ATA_EH_PROBE_TRIALS) 3695c2c7a89cSTejun Heo sata_down_spd_limit(link, 1); 3696c2c7a89cSTejun Heo 369702c05a27STejun Heo return 1; 369802c05a27STejun Heo } 369902c05a27STejun Heo 37009b1e2658STejun Heo static int ata_eh_handle_dev_fail(struct ata_device *dev, int err) 3701fee7ca72STejun Heo { 37029af5c9c9STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 3703fee7ca72STejun Heo 3704cf9a590aSTejun Heo /* -EAGAIN from EH routine indicates retry without prejudice. 3705cf9a590aSTejun Heo * The requester is responsible for ensuring forward progress. 3706cf9a590aSTejun Heo */ 3707cf9a590aSTejun Heo if (err != -EAGAIN) 3708fee7ca72STejun Heo ehc->tries[dev->devno]--; 3709fee7ca72STejun Heo 3710fee7ca72STejun Heo switch (err) { 3711fee7ca72STejun Heo case -ENODEV: 3712fee7ca72STejun Heo /* device missing or wrong IDENTIFY data, schedule probing */ 3713fee7ca72STejun Heo ehc->i.probe_mask |= (1 << dev->devno); 371405b83605SGustavo A. R. Silva /* fall through */ 3715fee7ca72STejun Heo case -EINVAL: 3716fee7ca72STejun Heo /* give it just one more chance */ 3717fee7ca72STejun Heo ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1); 371805b83605SGustavo A. R. Silva /* fall through */ 3719fee7ca72STejun Heo case -EIO: 3720d89293abSTejun Heo if (ehc->tries[dev->devno] == 1) { 3721fee7ca72STejun Heo /* This is the last chance, better to slow 3722fee7ca72STejun Heo * down than lose it. 3723fee7ca72STejun Heo */ 3724a07d499bSTejun Heo sata_down_spd_limit(ata_dev_phys_link(dev), 0); 3725d89293abSTejun Heo if (dev->pio_mode > XFER_PIO_0) 3726fee7ca72STejun Heo ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); 3727fee7ca72STejun Heo } 3728fee7ca72STejun Heo } 3729fee7ca72STejun Heo 3730fee7ca72STejun Heo if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) { 3731fee7ca72STejun Heo /* disable device if it has used up all its chances */ 3732fee7ca72STejun Heo ata_dev_disable(dev); 3733fee7ca72STejun Heo 3734fee7ca72STejun Heo /* detach if offline */ 3735b1c72916STejun Heo if (ata_phys_link_offline(ata_dev_phys_link(dev))) 3736fee7ca72STejun Heo ata_eh_detach_dev(dev); 3737fee7ca72STejun Heo 373802c05a27STejun Heo /* schedule probe if necessary */ 373987fbc5a0STejun Heo if (ata_eh_schedule_probe(dev)) { 3740fee7ca72STejun Heo ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 374187fbc5a0STejun Heo memset(ehc->cmd_timeout_idx[dev->devno], 0, 374287fbc5a0STejun Heo sizeof(ehc->cmd_timeout_idx[dev->devno])); 374387fbc5a0STejun Heo } 37449b1e2658STejun Heo 37459b1e2658STejun Heo return 1; 3746fee7ca72STejun Heo } else { 3747cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 37489b1e2658STejun Heo return 0; 3749fee7ca72STejun Heo } 3750fee7ca72STejun Heo } 3751fee7ca72STejun Heo 3752c6fd2807SJeff Garzik /** 3753c6fd2807SJeff Garzik * ata_eh_recover - recover host port after error 3754c6fd2807SJeff Garzik * @ap: host port to recover 3755c6fd2807SJeff Garzik * @prereset: prereset method (can be NULL) 3756c6fd2807SJeff Garzik * @softreset: softreset method (can be NULL) 3757c6fd2807SJeff Garzik * @hardreset: hardreset method (can be NULL) 3758c6fd2807SJeff Garzik * @postreset: postreset method (can be NULL) 37599b1e2658STejun Heo * @r_failed_link: out parameter for failed link 3760c6fd2807SJeff Garzik * 3761c6fd2807SJeff Garzik * This is the alpha and omega, eum and yang, heart and soul of 3762c6fd2807SJeff Garzik * libata exception handling. On entry, actions required to 37639b1e2658STejun Heo * recover each link and hotplug requests are recorded in the 37649b1e2658STejun Heo * link's eh_context. This function executes all the operations 37659b1e2658STejun Heo * with appropriate retrials and fallbacks to resurrect failed 3766c6fd2807SJeff Garzik * devices, detach goners and greet newcomers. 3767c6fd2807SJeff Garzik * 3768c6fd2807SJeff Garzik * LOCKING: 3769c6fd2807SJeff Garzik * Kernel thread context (may sleep). 3770c6fd2807SJeff Garzik * 3771c6fd2807SJeff Garzik * RETURNS: 3772c6fd2807SJeff Garzik * 0 on success, -errno on failure. 3773c6fd2807SJeff Garzik */ 3774fb7fd614STejun Heo int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, 3775c6fd2807SJeff Garzik ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 37769b1e2658STejun Heo ata_postreset_fn_t postreset, 37779b1e2658STejun Heo struct ata_link **r_failed_link) 3778c6fd2807SJeff Garzik { 37799b1e2658STejun Heo struct ata_link *link; 3780c6fd2807SJeff Garzik struct ata_device *dev; 37816b7ae954STejun Heo int rc, nr_fails; 378245fabbb7SElias Oltmanns unsigned long flags, deadline; 3783c6fd2807SJeff Garzik 3784c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 3785c6fd2807SJeff Garzik 3786c6fd2807SJeff Garzik /* prep for recovery */ 37871eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 37889b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 37899b1e2658STejun Heo 3790f9df58cbSTejun Heo /* re-enable link? */ 3791f9df58cbSTejun Heo if (ehc->i.action & ATA_EH_ENABLE_LINK) { 3792f9df58cbSTejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK); 3793f9df58cbSTejun Heo spin_lock_irqsave(ap->lock, flags); 3794f9df58cbSTejun Heo link->flags &= ~ATA_LFLAG_DISABLED; 3795f9df58cbSTejun Heo spin_unlock_irqrestore(ap->lock, flags); 3796f9df58cbSTejun Heo ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK); 3797f9df58cbSTejun Heo } 3798f9df58cbSTejun Heo 37991eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 3800fd995f70STejun Heo if (link->flags & ATA_LFLAG_NO_RETRY) 3801fd995f70STejun Heo ehc->tries[dev->devno] = 1; 3802fd995f70STejun Heo else 3803c6fd2807SJeff Garzik ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 3804c6fd2807SJeff Garzik 380579a55b72STejun Heo /* collect port action mask recorded in dev actions */ 38069b1e2658STejun Heo ehc->i.action |= ehc->i.dev_action[dev->devno] & 38079b1e2658STejun Heo ~ATA_EH_PERDEV_MASK; 3808f58229f8STejun Heo ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK; 380979a55b72STejun Heo 3810c6fd2807SJeff Garzik /* process hotplug request */ 3811c6fd2807SJeff Garzik if (dev->flags & ATA_DFLAG_DETACH) 3812c6fd2807SJeff Garzik ata_eh_detach_dev(dev); 3813c6fd2807SJeff Garzik 381402c05a27STejun Heo /* schedule probe if necessary */ 381502c05a27STejun Heo if (!ata_dev_enabled(dev)) 381602c05a27STejun Heo ata_eh_schedule_probe(dev); 3817c6fd2807SJeff Garzik } 38189b1e2658STejun Heo } 3819c6fd2807SJeff Garzik 3820c6fd2807SJeff Garzik retry: 3821c6fd2807SJeff Garzik rc = 0; 3822c6fd2807SJeff Garzik 3823c6fd2807SJeff Garzik /* if UNLOADING, finish immediately */ 3824c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_UNLOADING) 3825c6fd2807SJeff Garzik goto out; 3826c6fd2807SJeff Garzik 38279b1e2658STejun Heo /* prep for EH */ 38281eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 38299b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 38309b1e2658STejun Heo 3831c6fd2807SJeff Garzik /* skip EH if possible. */ 38320260731fSTejun Heo if (ata_eh_skip_recovery(link)) 3833c6fd2807SJeff Garzik ehc->i.action = 0; 3834c6fd2807SJeff Garzik 38351eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 3836f58229f8STejun Heo ehc->classes[dev->devno] = ATA_DEV_UNKNOWN; 38379b1e2658STejun Heo } 3838c6fd2807SJeff Garzik 3839c6fd2807SJeff Garzik /* reset */ 38401eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 38419b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 38429b1e2658STejun Heo 3843cf480626STejun Heo if (!(ehc->i.action & ATA_EH_RESET)) 38449b1e2658STejun Heo continue; 38459b1e2658STejun Heo 38469b1e2658STejun Heo rc = ata_eh_reset(link, ata_link_nr_vacant(link), 3847dc98c32cSTejun Heo prereset, softreset, hardreset, postreset); 3848c6fd2807SJeff Garzik if (rc) { 3849a9a79dfeSJoe Perches ata_link_err(link, "reset failed, giving up\n"); 3850c6fd2807SJeff Garzik goto out; 3851c6fd2807SJeff Garzik } 38529b1e2658STejun Heo } 3853c6fd2807SJeff Garzik 385445fabbb7SElias Oltmanns do { 385545fabbb7SElias Oltmanns unsigned long now; 385645fabbb7SElias Oltmanns 385745fabbb7SElias Oltmanns /* 385845fabbb7SElias Oltmanns * clears ATA_EH_PARK in eh_info and resets 385945fabbb7SElias Oltmanns * ap->park_req_pending 386045fabbb7SElias Oltmanns */ 386145fabbb7SElias Oltmanns ata_eh_pull_park_action(ap); 386245fabbb7SElias Oltmanns 386345fabbb7SElias Oltmanns deadline = jiffies; 38641eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 38651eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 386645fabbb7SElias Oltmanns struct ata_eh_context *ehc = &link->eh_context; 386745fabbb7SElias Oltmanns unsigned long tmp; 386845fabbb7SElias Oltmanns 38699162c657SHannes Reinecke if (dev->class != ATA_DEV_ATA && 38709162c657SHannes Reinecke dev->class != ATA_DEV_ZAC) 387145fabbb7SElias Oltmanns continue; 387245fabbb7SElias Oltmanns if (!(ehc->i.dev_action[dev->devno] & 387345fabbb7SElias Oltmanns ATA_EH_PARK)) 387445fabbb7SElias Oltmanns continue; 387545fabbb7SElias Oltmanns tmp = dev->unpark_deadline; 387645fabbb7SElias Oltmanns if (time_before(deadline, tmp)) 387745fabbb7SElias Oltmanns deadline = tmp; 387845fabbb7SElias Oltmanns else if (time_before_eq(tmp, jiffies)) 387945fabbb7SElias Oltmanns continue; 388045fabbb7SElias Oltmanns if (ehc->unloaded_mask & (1 << dev->devno)) 388145fabbb7SElias Oltmanns continue; 388245fabbb7SElias Oltmanns 388345fabbb7SElias Oltmanns ata_eh_park_issue_cmd(dev, 1); 388445fabbb7SElias Oltmanns } 388545fabbb7SElias Oltmanns } 388645fabbb7SElias Oltmanns 388745fabbb7SElias Oltmanns now = jiffies; 388845fabbb7SElias Oltmanns if (time_before_eq(deadline, now)) 388945fabbb7SElias Oltmanns break; 389045fabbb7SElias Oltmanns 3891c0c362b6STejun Heo ata_eh_release(ap); 389245fabbb7SElias Oltmanns deadline = wait_for_completion_timeout(&ap->park_req_pending, 389345fabbb7SElias Oltmanns deadline - now); 3894c0c362b6STejun Heo ata_eh_acquire(ap); 389545fabbb7SElias Oltmanns } while (deadline); 38961eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 38971eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 389845fabbb7SElias Oltmanns if (!(link->eh_context.unloaded_mask & 389945fabbb7SElias Oltmanns (1 << dev->devno))) 390045fabbb7SElias Oltmanns continue; 390145fabbb7SElias Oltmanns 390245fabbb7SElias Oltmanns ata_eh_park_issue_cmd(dev, 0); 390345fabbb7SElias Oltmanns ata_eh_done(link, dev, ATA_EH_PARK); 390445fabbb7SElias Oltmanns } 390545fabbb7SElias Oltmanns } 390645fabbb7SElias Oltmanns 39079b1e2658STejun Heo /* the rest */ 39086b7ae954STejun Heo nr_fails = 0; 39096b7ae954STejun Heo ata_for_each_link(link, ap, PMP_FIRST) { 39109b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 39119b1e2658STejun Heo 39126b7ae954STejun Heo if (sata_pmp_attached(ap) && ata_is_host_link(link)) 39136b7ae954STejun Heo goto config_lpm; 39146b7ae954STejun Heo 3915c6fd2807SJeff Garzik /* revalidate existing devices and attach new ones */ 39160260731fSTejun Heo rc = ata_eh_revalidate_and_attach(link, &dev); 3917c6fd2807SJeff Garzik if (rc) 39186b7ae954STejun Heo goto rest_fail; 3919c6fd2807SJeff Garzik 3920633273a3STejun Heo /* if PMP got attached, return, pmp EH will take care of it */ 3921633273a3STejun Heo if (link->device->class == ATA_DEV_PMP) { 3922633273a3STejun Heo ehc->i.action = 0; 3923633273a3STejun Heo return 0; 3924633273a3STejun Heo } 3925633273a3STejun Heo 3926baa1e78aSTejun Heo /* configure transfer mode if necessary */ 3927baa1e78aSTejun Heo if (ehc->i.flags & ATA_EHI_SETMODE) { 39280260731fSTejun Heo rc = ata_set_mode(link, &dev); 39294ae72a1eSTejun Heo if (rc) 39306b7ae954STejun Heo goto rest_fail; 3931baa1e78aSTejun Heo ehc->i.flags &= ~ATA_EHI_SETMODE; 3932c6fd2807SJeff Garzik } 3933c6fd2807SJeff Garzik 393411fc33daSTejun Heo /* If reset has been issued, clear UA to avoid 393511fc33daSTejun Heo * disrupting the current users of the device. 393611fc33daSTejun Heo */ 393711fc33daSTejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) { 39381eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 393911fc33daSTejun Heo if (dev->class != ATA_DEV_ATAPI) 394011fc33daSTejun Heo continue; 394111fc33daSTejun Heo rc = atapi_eh_clear_ua(dev); 394211fc33daSTejun Heo if (rc) 39436b7ae954STejun Heo goto rest_fail; 394421334205SAaron Lu if (zpodd_dev_enabled(dev)) 394521334205SAaron Lu zpodd_post_poweron(dev); 394611fc33daSTejun Heo } 394711fc33daSTejun Heo } 394811fc33daSTejun Heo 39496013efd8STejun Heo /* retry flush if necessary */ 39506013efd8STejun Heo ata_for_each_dev(dev, link, ALL) { 39519162c657SHannes Reinecke if (dev->class != ATA_DEV_ATA && 39529162c657SHannes Reinecke dev->class != ATA_DEV_ZAC) 39536013efd8STejun Heo continue; 39546013efd8STejun Heo rc = ata_eh_maybe_retry_flush(dev); 39556013efd8STejun Heo if (rc) 39566b7ae954STejun Heo goto rest_fail; 39576013efd8STejun Heo } 39586013efd8STejun Heo 39596b7ae954STejun Heo config_lpm: 396011fc33daSTejun Heo /* configure link power saving */ 39616b7ae954STejun Heo if (link->lpm_policy != ap->target_lpm_policy) { 39626b7ae954STejun Heo rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev); 39636b7ae954STejun Heo if (rc) 39646b7ae954STejun Heo goto rest_fail; 39656b7ae954STejun Heo } 3966ca77329fSKristen Carlson Accardi 39679b1e2658STejun Heo /* this link is okay now */ 39689b1e2658STejun Heo ehc->i.flags = 0; 39699b1e2658STejun Heo continue; 3970c6fd2807SJeff Garzik 39716b7ae954STejun Heo rest_fail: 39726b7ae954STejun Heo nr_fails++; 39736b7ae954STejun Heo if (dev) 39740a2c0f56STejun Heo ata_eh_handle_dev_fail(dev, rc); 3975c6fd2807SJeff Garzik 3976b06ce3e5STejun Heo if (ap->pflags & ATA_PFLAG_FROZEN) { 3977b06ce3e5STejun Heo /* PMP reset requires working host port. 3978b06ce3e5STejun Heo * Can't retry if it's frozen. 3979b06ce3e5STejun Heo */ 3980071f44b1STejun Heo if (sata_pmp_attached(ap)) 3981b06ce3e5STejun Heo goto out; 39829b1e2658STejun Heo break; 39839b1e2658STejun Heo } 3984b06ce3e5STejun Heo } 39859b1e2658STejun Heo 39866b7ae954STejun Heo if (nr_fails) 3987c6fd2807SJeff Garzik goto retry; 3988c6fd2807SJeff Garzik 3989c6fd2807SJeff Garzik out: 39909b1e2658STejun Heo if (rc && r_failed_link) 39919b1e2658STejun Heo *r_failed_link = link; 3992c6fd2807SJeff Garzik 3993c6fd2807SJeff Garzik DPRINTK("EXIT, rc=%d\n", rc); 3994c6fd2807SJeff Garzik return rc; 3995c6fd2807SJeff Garzik } 3996c6fd2807SJeff Garzik 3997c6fd2807SJeff Garzik /** 3998c6fd2807SJeff Garzik * ata_eh_finish - finish up EH 3999c6fd2807SJeff Garzik * @ap: host port to finish EH for 4000c6fd2807SJeff Garzik * 4001c6fd2807SJeff Garzik * Recovery is complete. Clean up EH states and retry or finish 4002c6fd2807SJeff Garzik * failed qcs. 4003c6fd2807SJeff Garzik * 4004c6fd2807SJeff Garzik * LOCKING: 4005c6fd2807SJeff Garzik * None. 4006c6fd2807SJeff Garzik */ 4007fb7fd614STejun Heo void ata_eh_finish(struct ata_port *ap) 4008c6fd2807SJeff Garzik { 4009c6fd2807SJeff Garzik int tag; 4010c6fd2807SJeff Garzik 4011c6fd2807SJeff Garzik /* retry or finish qcs */ 4012c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 4013c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 4014c6fd2807SJeff Garzik 4015c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) 4016c6fd2807SJeff Garzik continue; 4017c6fd2807SJeff Garzik 4018c6fd2807SJeff Garzik if (qc->err_mask) { 4019c6fd2807SJeff Garzik /* FIXME: Once EH migration is complete, 4020c6fd2807SJeff Garzik * generate sense data in this function, 4021c6fd2807SJeff Garzik * considering both err_mask and tf. 4022c6fd2807SJeff Garzik */ 402303faab78STejun Heo if (qc->flags & ATA_QCFLAG_RETRY) 4024c6fd2807SJeff Garzik ata_eh_qc_retry(qc); 402503faab78STejun Heo else 402603faab78STejun Heo ata_eh_qc_complete(qc); 4027c6fd2807SJeff Garzik } else { 4028c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_SENSE_VALID) { 4029c6fd2807SJeff Garzik ata_eh_qc_complete(qc); 4030c6fd2807SJeff Garzik } else { 4031c6fd2807SJeff Garzik /* feed zero TF to sense generation */ 4032c6fd2807SJeff Garzik memset(&qc->result_tf, 0, sizeof(qc->result_tf)); 4033c6fd2807SJeff Garzik ata_eh_qc_retry(qc); 4034c6fd2807SJeff Garzik } 4035c6fd2807SJeff Garzik } 4036c6fd2807SJeff Garzik } 4037da917d69STejun Heo 4038da917d69STejun Heo /* make sure nr_active_links is zero after EH */ 4039da917d69STejun Heo WARN_ON(ap->nr_active_links); 4040da917d69STejun Heo ap->nr_active_links = 0; 4041c6fd2807SJeff Garzik } 4042c6fd2807SJeff Garzik 4043c6fd2807SJeff Garzik /** 4044c6fd2807SJeff Garzik * ata_do_eh - do standard error handling 4045c6fd2807SJeff Garzik * @ap: host port to handle error for 4046a1efdabaSTejun Heo * 4047c6fd2807SJeff Garzik * @prereset: prereset method (can be NULL) 4048c6fd2807SJeff Garzik * @softreset: softreset method (can be NULL) 4049c6fd2807SJeff Garzik * @hardreset: hardreset method (can be NULL) 4050c6fd2807SJeff Garzik * @postreset: postreset method (can be NULL) 4051c6fd2807SJeff Garzik * 4052c6fd2807SJeff Garzik * Perform standard error handling sequence. 4053c6fd2807SJeff Garzik * 4054c6fd2807SJeff Garzik * LOCKING: 4055c6fd2807SJeff Garzik * Kernel thread context (may sleep). 4056c6fd2807SJeff Garzik */ 4057c6fd2807SJeff Garzik void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, 4058c6fd2807SJeff Garzik ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 4059c6fd2807SJeff Garzik ata_postreset_fn_t postreset) 4060c6fd2807SJeff Garzik { 40619b1e2658STejun Heo struct ata_device *dev; 40629b1e2658STejun Heo int rc; 40639b1e2658STejun Heo 40649b1e2658STejun Heo ata_eh_autopsy(ap); 40659b1e2658STejun Heo ata_eh_report(ap); 40669b1e2658STejun Heo 40679b1e2658STejun Heo rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset, 40689b1e2658STejun Heo NULL); 40699b1e2658STejun Heo if (rc) { 40701eca4365STejun Heo ata_for_each_dev(dev, &ap->link, ALL) 40719b1e2658STejun Heo ata_dev_disable(dev); 40729b1e2658STejun Heo } 40739b1e2658STejun Heo 4074c6fd2807SJeff Garzik ata_eh_finish(ap); 4075c6fd2807SJeff Garzik } 4076c6fd2807SJeff Garzik 4077a1efdabaSTejun Heo /** 4078a1efdabaSTejun Heo * ata_std_error_handler - standard error handler 4079a1efdabaSTejun Heo * @ap: host port to handle error for 4080a1efdabaSTejun Heo * 4081a1efdabaSTejun Heo * Standard error handler 4082a1efdabaSTejun Heo * 4083a1efdabaSTejun Heo * LOCKING: 4084a1efdabaSTejun Heo * Kernel thread context (may sleep). 4085a1efdabaSTejun Heo */ 4086a1efdabaSTejun Heo void ata_std_error_handler(struct ata_port *ap) 4087a1efdabaSTejun Heo { 4088a1efdabaSTejun Heo struct ata_port_operations *ops = ap->ops; 4089a1efdabaSTejun Heo ata_reset_fn_t hardreset = ops->hardreset; 4090a1efdabaSTejun Heo 409157c9efdfSTejun Heo /* ignore built-in hardreset if SCR access is not available */ 4092fe06e5f9STejun Heo if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link)) 4093a1efdabaSTejun Heo hardreset = NULL; 4094a1efdabaSTejun Heo 4095a1efdabaSTejun Heo ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset); 4096a1efdabaSTejun Heo } 4097a1efdabaSTejun Heo 40986ffa01d8STejun Heo #ifdef CONFIG_PM 4099c6fd2807SJeff Garzik /** 4100c6fd2807SJeff Garzik * ata_eh_handle_port_suspend - perform port suspend operation 4101c6fd2807SJeff Garzik * @ap: port to suspend 4102c6fd2807SJeff Garzik * 4103c6fd2807SJeff Garzik * Suspend @ap. 4104c6fd2807SJeff Garzik * 4105c6fd2807SJeff Garzik * LOCKING: 4106c6fd2807SJeff Garzik * Kernel thread context (may sleep). 4107c6fd2807SJeff Garzik */ 4108c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap) 4109c6fd2807SJeff Garzik { 4110c6fd2807SJeff Garzik unsigned long flags; 4111c6fd2807SJeff Garzik int rc = 0; 41123dc67440SAaron Lu struct ata_device *dev; 4113c6fd2807SJeff Garzik 4114c6fd2807SJeff Garzik /* are we suspending? */ 4115c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 4116c6fd2807SJeff Garzik if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || 4117a7ff60dbSAaron Lu ap->pm_mesg.event & PM_EVENT_RESUME) { 4118c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 4119c6fd2807SJeff Garzik return; 4120c6fd2807SJeff Garzik } 4121c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 4122c6fd2807SJeff Garzik 4123c6fd2807SJeff Garzik WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED); 4124c6fd2807SJeff Garzik 41253dc67440SAaron Lu /* 41263dc67440SAaron Lu * If we have a ZPODD attached, check its zero 41273dc67440SAaron Lu * power ready status before the port is frozen. 4128a7ff60dbSAaron Lu * Only needed for runtime suspend. 41293dc67440SAaron Lu */ 4130a7ff60dbSAaron Lu if (PMSG_IS_AUTO(ap->pm_mesg)) { 41313dc67440SAaron Lu ata_for_each_dev(dev, &ap->link, ENABLED) { 41323dc67440SAaron Lu if (zpodd_dev_enabled(dev)) 41333dc67440SAaron Lu zpodd_on_suspend(dev); 41343dc67440SAaron Lu } 4135a7ff60dbSAaron Lu } 41363dc67440SAaron Lu 413764578a3dSTejun Heo /* tell ACPI we're suspending */ 413864578a3dSTejun Heo rc = ata_acpi_on_suspend(ap); 413964578a3dSTejun Heo if (rc) 414064578a3dSTejun Heo goto out; 414164578a3dSTejun Heo 4142c6fd2807SJeff Garzik /* suspend */ 4143c6fd2807SJeff Garzik ata_eh_freeze_port(ap); 4144c6fd2807SJeff Garzik 4145c6fd2807SJeff Garzik if (ap->ops->port_suspend) 4146c6fd2807SJeff Garzik rc = ap->ops->port_suspend(ap, ap->pm_mesg); 4147c6fd2807SJeff Garzik 4148a7ff60dbSAaron Lu ata_acpi_set_state(ap, ap->pm_mesg); 414964578a3dSTejun Heo out: 4150bc6e7c4bSDan Williams /* update the flags */ 4151c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 4152c6fd2807SJeff Garzik 4153c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_PM_PENDING; 4154c6fd2807SJeff Garzik if (rc == 0) 4155c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SUSPENDED; 415664578a3dSTejun Heo else if (ap->pflags & ATA_PFLAG_FROZEN) 4157c6fd2807SJeff Garzik ata_port_schedule_eh(ap); 4158c6fd2807SJeff Garzik 4159c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 4160c6fd2807SJeff Garzik 4161c6fd2807SJeff Garzik return; 4162c6fd2807SJeff Garzik } 4163c6fd2807SJeff Garzik 4164c6fd2807SJeff Garzik /** 4165c6fd2807SJeff Garzik * ata_eh_handle_port_resume - perform port resume operation 4166c6fd2807SJeff Garzik * @ap: port to resume 4167c6fd2807SJeff Garzik * 4168c6fd2807SJeff Garzik * Resume @ap. 4169c6fd2807SJeff Garzik * 4170c6fd2807SJeff Garzik * LOCKING: 4171c6fd2807SJeff Garzik * Kernel thread context (may sleep). 4172c6fd2807SJeff Garzik */ 4173c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap) 4174c6fd2807SJeff Garzik { 41756f9c1ea2STejun Heo struct ata_link *link; 41766f9c1ea2STejun Heo struct ata_device *dev; 4177c6fd2807SJeff Garzik unsigned long flags; 4178c6fd2807SJeff Garzik 4179c6fd2807SJeff Garzik /* are we resuming? */ 4180c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 4181c6fd2807SJeff Garzik if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || 4182a7ff60dbSAaron Lu !(ap->pm_mesg.event & PM_EVENT_RESUME)) { 4183c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 4184c6fd2807SJeff Garzik return; 4185c6fd2807SJeff Garzik } 4186c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 4187c6fd2807SJeff Garzik 41889666f400STejun Heo WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED)); 4189c6fd2807SJeff Garzik 41906f9c1ea2STejun Heo /* 41916f9c1ea2STejun Heo * Error timestamps are in jiffies which doesn't run while 41926f9c1ea2STejun Heo * suspended and PHY events during resume isn't too uncommon. 41936f9c1ea2STejun Heo * When the two are combined, it can lead to unnecessary speed 41946f9c1ea2STejun Heo * downs if the machine is suspended and resumed repeatedly. 41956f9c1ea2STejun Heo * Clear error history. 41966f9c1ea2STejun Heo */ 41976f9c1ea2STejun Heo ata_for_each_link(link, ap, HOST_FIRST) 41986f9c1ea2STejun Heo ata_for_each_dev(dev, link, ALL) 41996f9c1ea2STejun Heo ata_ering_clear(&dev->ering); 42006f9c1ea2STejun Heo 4201a7ff60dbSAaron Lu ata_acpi_set_state(ap, ap->pm_mesg); 4202bd3adca5SShaohua Li 4203c6fd2807SJeff Garzik if (ap->ops->port_resume) 4204ae867937SKefeng Wang ap->ops->port_resume(ap); 4205c6fd2807SJeff Garzik 42066746544cSTejun Heo /* tell ACPI that we're resuming */ 42076746544cSTejun Heo ata_acpi_on_resume(ap); 42086746544cSTejun Heo 4209bc6e7c4bSDan Williams /* update the flags */ 4210c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 4211c6fd2807SJeff Garzik ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED); 4212c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 4213c6fd2807SJeff Garzik } 42146ffa01d8STejun Heo #endif /* CONFIG_PM */ 4215