1c6fd2807SJeff Garzik /* 2c6fd2807SJeff Garzik * libata-eh.c - libata error handling 3c6fd2807SJeff Garzik * 48c3d3d4bSTejun Heo * Maintained by: Tejun Heo <tj@kernel.org> 5c6fd2807SJeff Garzik * Please ALWAYS copy linux-ide@vger.kernel.org 6c6fd2807SJeff Garzik * on emails. 7c6fd2807SJeff Garzik * 8c6fd2807SJeff Garzik * Copyright 2006 Tejun Heo <htejun@gmail.com> 9c6fd2807SJeff Garzik * 10c6fd2807SJeff Garzik * 11c6fd2807SJeff Garzik * This program is free software; you can redistribute it and/or 12c6fd2807SJeff Garzik * modify it under the terms of the GNU General Public License as 13c6fd2807SJeff Garzik * published by the Free Software Foundation; either version 2, or 14c6fd2807SJeff Garzik * (at your option) any later version. 15c6fd2807SJeff Garzik * 16c6fd2807SJeff Garzik * This program is distributed in the hope that it will be useful, 17c6fd2807SJeff Garzik * but WITHOUT ANY WARRANTY; without even the implied warranty of 18c6fd2807SJeff Garzik * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19c6fd2807SJeff Garzik * General Public License for more details. 20c6fd2807SJeff Garzik * 21c6fd2807SJeff Garzik * You should have received a copy of the GNU General Public License 22c6fd2807SJeff Garzik * along with this program; see the file COPYING. If not, write to 23c6fd2807SJeff Garzik * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, 24c6fd2807SJeff Garzik * USA. 25c6fd2807SJeff Garzik * 26c6fd2807SJeff Garzik * 27c6fd2807SJeff Garzik * libata documentation is available via 'make {ps|pdf}docs', 28c6fd2807SJeff Garzik * as Documentation/DocBook/libata.* 29c6fd2807SJeff Garzik * 30c6fd2807SJeff Garzik * Hardware documentation available from http://www.t13.org/ and 31c6fd2807SJeff Garzik * http://www.sata-io.org/ 32c6fd2807SJeff Garzik * 33c6fd2807SJeff Garzik */ 34c6fd2807SJeff Garzik 35c6fd2807SJeff Garzik #include <linux/kernel.h> 36242f9dcbSJens Axboe #include <linux/blkdev.h> 3738789fdaSPaul Gortmaker #include <linux/export.h> 382855568bSJeff Garzik #include <linux/pci.h> 39c6fd2807SJeff Garzik #include <scsi/scsi.h> 40c6fd2807SJeff Garzik #include <scsi/scsi_host.h> 41c6fd2807SJeff Garzik #include <scsi/scsi_eh.h> 42c6fd2807SJeff Garzik #include <scsi/scsi_device.h> 43c6fd2807SJeff Garzik #include <scsi/scsi_cmnd.h> 446521148cSRobert Hancock #include <scsi/scsi_dbg.h> 45c6fd2807SJeff Garzik #include "../scsi/scsi_transport_api.h" 46c6fd2807SJeff Garzik 47c6fd2807SJeff Garzik #include <linux/libata.h> 48c6fd2807SJeff Garzik 49255c03d1SHannes Reinecke #include <trace/events/libata.h> 50c6fd2807SJeff Garzik #include "libata.h" 51c6fd2807SJeff Garzik 527d47e8d4STejun Heo enum { 533884f7b0STejun Heo /* speed down verdicts */ 547d47e8d4STejun Heo ATA_EH_SPDN_NCQ_OFF = (1 << 0), 557d47e8d4STejun Heo ATA_EH_SPDN_SPEED_DOWN = (1 << 1), 567d47e8d4STejun Heo ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2), 5776326ac1STejun Heo ATA_EH_SPDN_KEEP_ERRORS = (1 << 3), 583884f7b0STejun Heo 593884f7b0STejun Heo /* error flags */ 603884f7b0STejun Heo ATA_EFLAG_IS_IO = (1 << 0), 6176326ac1STejun Heo ATA_EFLAG_DUBIOUS_XFER = (1 << 1), 62d9027470SGwendal Grignou ATA_EFLAG_OLD_ER = (1 << 31), 633884f7b0STejun Heo 643884f7b0STejun Heo /* error categories */ 653884f7b0STejun Heo ATA_ECAT_NONE = 0, 663884f7b0STejun Heo ATA_ECAT_ATA_BUS = 1, 673884f7b0STejun Heo ATA_ECAT_TOUT_HSM = 2, 683884f7b0STejun Heo ATA_ECAT_UNK_DEV = 3, 6975f9cafcSTejun Heo ATA_ECAT_DUBIOUS_NONE = 4, 7075f9cafcSTejun Heo ATA_ECAT_DUBIOUS_ATA_BUS = 5, 7175f9cafcSTejun Heo ATA_ECAT_DUBIOUS_TOUT_HSM = 6, 7275f9cafcSTejun Heo ATA_ECAT_DUBIOUS_UNK_DEV = 7, 7375f9cafcSTejun Heo ATA_ECAT_NR = 8, 747d47e8d4STejun Heo 7587fbc5a0STejun Heo ATA_EH_CMD_DFL_TIMEOUT = 5000, 7687fbc5a0STejun Heo 770a2c0f56STejun Heo /* always put at least this amount of time between resets */ 780a2c0f56STejun Heo ATA_EH_RESET_COOL_DOWN = 5000, 790a2c0f56STejun Heo 80341c2c95STejun Heo /* Waiting in ->prereset can never be reliable. It's 81341c2c95STejun Heo * sometimes nice to wait there but it can't be depended upon; 82341c2c95STejun Heo * otherwise, we wouldn't be resetting. Just give it enough 83341c2c95STejun Heo * time for most drives to spin up. 8431daabdaSTejun Heo */ 85341c2c95STejun Heo ATA_EH_PRERESET_TIMEOUT = 10000, 86341c2c95STejun Heo ATA_EH_FASTDRAIN_INTERVAL = 3000, 8711fc33daSTejun Heo 8811fc33daSTejun Heo ATA_EH_UA_TRIES = 5, 89c2c7a89cSTejun Heo 90c2c7a89cSTejun Heo /* probe speed down parameters, see ata_eh_schedule_probe() */ 91c2c7a89cSTejun Heo ATA_EH_PROBE_TRIAL_INTERVAL = 60000, /* 1 min */ 92c2c7a89cSTejun Heo ATA_EH_PROBE_TRIALS = 2, 9331daabdaSTejun Heo }; 9431daabdaSTejun Heo 9531daabdaSTejun Heo /* The following table determines how we sequence resets. Each entry 9631daabdaSTejun Heo * represents timeout for that try. The first try can be soft or 9731daabdaSTejun Heo * hardreset. All others are hardreset if available. In most cases 9831daabdaSTejun Heo * the first reset w/ 10sec timeout should succeed. Following entries 9935bf8821SDan Williams * are mostly for error handling, hotplug and those outlier devices that 10035bf8821SDan Williams * take an exceptionally long time to recover from reset. 10131daabdaSTejun Heo */ 10231daabdaSTejun Heo static const unsigned long ata_eh_reset_timeouts[] = { 103341c2c95STejun Heo 10000, /* most drives spin up by 10sec */ 104341c2c95STejun Heo 10000, /* > 99% working drives spin up before 20sec */ 10535bf8821SDan Williams 35000, /* give > 30 secs of idleness for outlier devices */ 106341c2c95STejun Heo 5000, /* and sweet one last chance */ 107d8af0eb6STejun Heo ULONG_MAX, /* > 1 min has elapsed, give up */ 10831daabdaSTejun Heo }; 10931daabdaSTejun Heo 11087fbc5a0STejun Heo static const unsigned long ata_eh_identify_timeouts[] = { 11187fbc5a0STejun Heo 5000, /* covers > 99% of successes and not too boring on failures */ 11287fbc5a0STejun Heo 10000, /* combined time till here is enough even for media access */ 11387fbc5a0STejun Heo 30000, /* for true idiots */ 11487fbc5a0STejun Heo ULONG_MAX, 11587fbc5a0STejun Heo }; 11687fbc5a0STejun Heo 1176013efd8STejun Heo static const unsigned long ata_eh_flush_timeouts[] = { 1186013efd8STejun Heo 15000, /* be generous with flush */ 1196013efd8STejun Heo 15000, /* ditto */ 1206013efd8STejun Heo 30000, /* and even more generous */ 1216013efd8STejun Heo ULONG_MAX, 1226013efd8STejun Heo }; 1236013efd8STejun Heo 12487fbc5a0STejun Heo static const unsigned long ata_eh_other_timeouts[] = { 12587fbc5a0STejun Heo 5000, /* same rationale as identify timeout */ 12687fbc5a0STejun Heo 10000, /* ditto */ 12787fbc5a0STejun Heo /* but no merciful 30sec for other commands, it just isn't worth it */ 12887fbc5a0STejun Heo ULONG_MAX, 12987fbc5a0STejun Heo }; 13087fbc5a0STejun Heo 13187fbc5a0STejun Heo struct ata_eh_cmd_timeout_ent { 13287fbc5a0STejun Heo const u8 *commands; 13387fbc5a0STejun Heo const unsigned long *timeouts; 13487fbc5a0STejun Heo }; 13587fbc5a0STejun Heo 13687fbc5a0STejun Heo /* The following table determines timeouts to use for EH internal 13787fbc5a0STejun Heo * commands. Each table entry is a command class and matches the 13887fbc5a0STejun Heo * commands the entry applies to and the timeout table to use. 13987fbc5a0STejun Heo * 14087fbc5a0STejun Heo * On the retry after a command timed out, the next timeout value from 14187fbc5a0STejun Heo * the table is used. If the table doesn't contain further entries, 14287fbc5a0STejun Heo * the last value is used. 14387fbc5a0STejun Heo * 14487fbc5a0STejun Heo * ehc->cmd_timeout_idx keeps track of which timeout to use per 14587fbc5a0STejun Heo * command class, so if SET_FEATURES times out on the first try, the 14687fbc5a0STejun Heo * next try will use the second timeout value only for that class. 14787fbc5a0STejun Heo */ 14887fbc5a0STejun Heo #define CMDS(cmds...) (const u8 []){ cmds, 0 } 14987fbc5a0STejun Heo static const struct ata_eh_cmd_timeout_ent 15087fbc5a0STejun Heo ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = { 15187fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI), 15287fbc5a0STejun Heo .timeouts = ata_eh_identify_timeouts, }, 15387fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT), 15487fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 15587fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT), 15687fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 15787fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_SET_FEATURES), 15887fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 15987fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS), 16087fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 1616013efd8STejun Heo { .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT), 1626013efd8STejun Heo .timeouts = ata_eh_flush_timeouts }, 16387fbc5a0STejun Heo }; 16487fbc5a0STejun Heo #undef CMDS 16587fbc5a0STejun Heo 166c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap); 1676ffa01d8STejun Heo #ifdef CONFIG_PM 168c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap); 169c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap); 1706ffa01d8STejun Heo #else /* CONFIG_PM */ 1716ffa01d8STejun Heo static void ata_eh_handle_port_suspend(struct ata_port *ap) 1726ffa01d8STejun Heo { } 1736ffa01d8STejun Heo 1746ffa01d8STejun Heo static void ata_eh_handle_port_resume(struct ata_port *ap) 1756ffa01d8STejun Heo { } 1766ffa01d8STejun Heo #endif /* CONFIG_PM */ 177c6fd2807SJeff Garzik 178b64bbc39STejun Heo static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt, 179b64bbc39STejun Heo va_list args) 180b64bbc39STejun Heo { 181b64bbc39STejun Heo ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len, 182b64bbc39STejun Heo ATA_EH_DESC_LEN - ehi->desc_len, 183b64bbc39STejun Heo fmt, args); 184b64bbc39STejun Heo } 185b64bbc39STejun Heo 186b64bbc39STejun Heo /** 187b64bbc39STejun Heo * __ata_ehi_push_desc - push error description without adding separator 188b64bbc39STejun Heo * @ehi: target EHI 189b64bbc39STejun Heo * @fmt: printf format string 190b64bbc39STejun Heo * 191b64bbc39STejun Heo * Format string according to @fmt and append it to @ehi->desc. 192b64bbc39STejun Heo * 193b64bbc39STejun Heo * LOCKING: 194b64bbc39STejun Heo * spin_lock_irqsave(host lock) 195b64bbc39STejun Heo */ 196b64bbc39STejun Heo void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) 197b64bbc39STejun Heo { 198b64bbc39STejun Heo va_list args; 199b64bbc39STejun Heo 200b64bbc39STejun Heo va_start(args, fmt); 201b64bbc39STejun Heo __ata_ehi_pushv_desc(ehi, fmt, args); 202b64bbc39STejun Heo va_end(args); 203b64bbc39STejun Heo } 204b64bbc39STejun Heo 205b64bbc39STejun Heo /** 206b64bbc39STejun Heo * ata_ehi_push_desc - push error description with separator 207b64bbc39STejun Heo * @ehi: target EHI 208b64bbc39STejun Heo * @fmt: printf format string 209b64bbc39STejun Heo * 210b64bbc39STejun Heo * Format string according to @fmt and append it to @ehi->desc. 211b64bbc39STejun Heo * If @ehi->desc is not empty, ", " is added in-between. 212b64bbc39STejun Heo * 213b64bbc39STejun Heo * LOCKING: 214b64bbc39STejun Heo * spin_lock_irqsave(host lock) 215b64bbc39STejun Heo */ 216b64bbc39STejun Heo void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) 217b64bbc39STejun Heo { 218b64bbc39STejun Heo va_list args; 219b64bbc39STejun Heo 220b64bbc39STejun Heo if (ehi->desc_len) 221b64bbc39STejun Heo __ata_ehi_push_desc(ehi, ", "); 222b64bbc39STejun Heo 223b64bbc39STejun Heo va_start(args, fmt); 224b64bbc39STejun Heo __ata_ehi_pushv_desc(ehi, fmt, args); 225b64bbc39STejun Heo va_end(args); 226b64bbc39STejun Heo } 227b64bbc39STejun Heo 228b64bbc39STejun Heo /** 229b64bbc39STejun Heo * ata_ehi_clear_desc - clean error description 230b64bbc39STejun Heo * @ehi: target EHI 231b64bbc39STejun Heo * 232b64bbc39STejun Heo * Clear @ehi->desc. 233b64bbc39STejun Heo * 234b64bbc39STejun Heo * LOCKING: 235b64bbc39STejun Heo * spin_lock_irqsave(host lock) 236b64bbc39STejun Heo */ 237b64bbc39STejun Heo void ata_ehi_clear_desc(struct ata_eh_info *ehi) 238b64bbc39STejun Heo { 239b64bbc39STejun Heo ehi->desc[0] = '\0'; 240b64bbc39STejun Heo ehi->desc_len = 0; 241b64bbc39STejun Heo } 242b64bbc39STejun Heo 243cbcdd875STejun Heo /** 244cbcdd875STejun Heo * ata_port_desc - append port description 245cbcdd875STejun Heo * @ap: target ATA port 246cbcdd875STejun Heo * @fmt: printf format string 247cbcdd875STejun Heo * 248cbcdd875STejun Heo * Format string according to @fmt and append it to port 249cbcdd875STejun Heo * description. If port description is not empty, " " is added 250cbcdd875STejun Heo * in-between. This function is to be used while initializing 251cbcdd875STejun Heo * ata_host. The description is printed on host registration. 252cbcdd875STejun Heo * 253cbcdd875STejun Heo * LOCKING: 254cbcdd875STejun Heo * None. 255cbcdd875STejun Heo */ 256cbcdd875STejun Heo void ata_port_desc(struct ata_port *ap, const char *fmt, ...) 257cbcdd875STejun Heo { 258cbcdd875STejun Heo va_list args; 259cbcdd875STejun Heo 260cbcdd875STejun Heo WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING)); 261cbcdd875STejun Heo 262cbcdd875STejun Heo if (ap->link.eh_info.desc_len) 263cbcdd875STejun Heo __ata_ehi_push_desc(&ap->link.eh_info, " "); 264cbcdd875STejun Heo 265cbcdd875STejun Heo va_start(args, fmt); 266cbcdd875STejun Heo __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args); 267cbcdd875STejun Heo va_end(args); 268cbcdd875STejun Heo } 269cbcdd875STejun Heo 270cbcdd875STejun Heo #ifdef CONFIG_PCI 271cbcdd875STejun Heo 272cbcdd875STejun Heo /** 273cbcdd875STejun Heo * ata_port_pbar_desc - append PCI BAR description 274cbcdd875STejun Heo * @ap: target ATA port 275cbcdd875STejun Heo * @bar: target PCI BAR 276cbcdd875STejun Heo * @offset: offset into PCI BAR 277cbcdd875STejun Heo * @name: name of the area 278cbcdd875STejun Heo * 279cbcdd875STejun Heo * If @offset is negative, this function formats a string which 280cbcdd875STejun Heo * contains the name, address, size and type of the BAR and 281cbcdd875STejun Heo * appends it to the port description. If @offset is zero or 282cbcdd875STejun Heo * positive, only name and offsetted address is appended. 283cbcdd875STejun Heo * 284cbcdd875STejun Heo * LOCKING: 285cbcdd875STejun Heo * None. 286cbcdd875STejun Heo */ 287cbcdd875STejun Heo void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset, 288cbcdd875STejun Heo const char *name) 289cbcdd875STejun Heo { 290cbcdd875STejun Heo struct pci_dev *pdev = to_pci_dev(ap->host->dev); 291cbcdd875STejun Heo char *type = ""; 292cbcdd875STejun Heo unsigned long long start, len; 293cbcdd875STejun Heo 294cbcdd875STejun Heo if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) 295cbcdd875STejun Heo type = "m"; 296cbcdd875STejun Heo else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) 297cbcdd875STejun Heo type = "i"; 298cbcdd875STejun Heo 299cbcdd875STejun Heo start = (unsigned long long)pci_resource_start(pdev, bar); 300cbcdd875STejun Heo len = (unsigned long long)pci_resource_len(pdev, bar); 301cbcdd875STejun Heo 302cbcdd875STejun Heo if (offset < 0) 303cbcdd875STejun Heo ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start); 304cbcdd875STejun Heo else 305e6a73ab1SAndrew Morton ata_port_desc(ap, "%s 0x%llx", name, 306e6a73ab1SAndrew Morton start + (unsigned long long)offset); 307cbcdd875STejun Heo } 308cbcdd875STejun Heo 309cbcdd875STejun Heo #endif /* CONFIG_PCI */ 310cbcdd875STejun Heo 31187fbc5a0STejun Heo static int ata_lookup_timeout_table(u8 cmd) 31287fbc5a0STejun Heo { 31387fbc5a0STejun Heo int i; 31487fbc5a0STejun Heo 31587fbc5a0STejun Heo for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) { 31687fbc5a0STejun Heo const u8 *cur; 31787fbc5a0STejun Heo 31887fbc5a0STejun Heo for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++) 31987fbc5a0STejun Heo if (*cur == cmd) 32087fbc5a0STejun Heo return i; 32187fbc5a0STejun Heo } 32287fbc5a0STejun Heo 32387fbc5a0STejun Heo return -1; 32487fbc5a0STejun Heo } 32587fbc5a0STejun Heo 32687fbc5a0STejun Heo /** 32787fbc5a0STejun Heo * ata_internal_cmd_timeout - determine timeout for an internal command 32887fbc5a0STejun Heo * @dev: target device 32987fbc5a0STejun Heo * @cmd: internal command to be issued 33087fbc5a0STejun Heo * 33187fbc5a0STejun Heo * Determine timeout for internal command @cmd for @dev. 33287fbc5a0STejun Heo * 33387fbc5a0STejun Heo * LOCKING: 33487fbc5a0STejun Heo * EH context. 33587fbc5a0STejun Heo * 33687fbc5a0STejun Heo * RETURNS: 33787fbc5a0STejun Heo * Determined timeout. 33887fbc5a0STejun Heo */ 33987fbc5a0STejun Heo unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd) 34087fbc5a0STejun Heo { 34187fbc5a0STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 34287fbc5a0STejun Heo int ent = ata_lookup_timeout_table(cmd); 34387fbc5a0STejun Heo int idx; 34487fbc5a0STejun Heo 34587fbc5a0STejun Heo if (ent < 0) 34687fbc5a0STejun Heo return ATA_EH_CMD_DFL_TIMEOUT; 34787fbc5a0STejun Heo 34887fbc5a0STejun Heo idx = ehc->cmd_timeout_idx[dev->devno][ent]; 34987fbc5a0STejun Heo return ata_eh_cmd_timeout_table[ent].timeouts[idx]; 35087fbc5a0STejun Heo } 35187fbc5a0STejun Heo 35287fbc5a0STejun Heo /** 35387fbc5a0STejun Heo * ata_internal_cmd_timed_out - notification for internal command timeout 35487fbc5a0STejun Heo * @dev: target device 35587fbc5a0STejun Heo * @cmd: internal command which timed out 35687fbc5a0STejun Heo * 35787fbc5a0STejun Heo * Notify EH that internal command @cmd for @dev timed out. This 35887fbc5a0STejun Heo * function should be called only for commands whose timeouts are 35987fbc5a0STejun Heo * determined using ata_internal_cmd_timeout(). 36087fbc5a0STejun Heo * 36187fbc5a0STejun Heo * LOCKING: 36287fbc5a0STejun Heo * EH context. 36387fbc5a0STejun Heo */ 36487fbc5a0STejun Heo void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd) 36587fbc5a0STejun Heo { 36687fbc5a0STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 36787fbc5a0STejun Heo int ent = ata_lookup_timeout_table(cmd); 36887fbc5a0STejun Heo int idx; 36987fbc5a0STejun Heo 37087fbc5a0STejun Heo if (ent < 0) 37187fbc5a0STejun Heo return; 37287fbc5a0STejun Heo 37387fbc5a0STejun Heo idx = ehc->cmd_timeout_idx[dev->devno][ent]; 37487fbc5a0STejun Heo if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX) 37587fbc5a0STejun Heo ehc->cmd_timeout_idx[dev->devno][ent]++; 37687fbc5a0STejun Heo } 37787fbc5a0STejun Heo 3783884f7b0STejun Heo static void ata_ering_record(struct ata_ering *ering, unsigned int eflags, 379c6fd2807SJeff Garzik unsigned int err_mask) 380c6fd2807SJeff Garzik { 381c6fd2807SJeff Garzik struct ata_ering_entry *ent; 382c6fd2807SJeff Garzik 383c6fd2807SJeff Garzik WARN_ON(!err_mask); 384c6fd2807SJeff Garzik 385c6fd2807SJeff Garzik ering->cursor++; 386c6fd2807SJeff Garzik ering->cursor %= ATA_ERING_SIZE; 387c6fd2807SJeff Garzik 388c6fd2807SJeff Garzik ent = &ering->ring[ering->cursor]; 3893884f7b0STejun Heo ent->eflags = eflags; 390c6fd2807SJeff Garzik ent->err_mask = err_mask; 391c6fd2807SJeff Garzik ent->timestamp = get_jiffies_64(); 392c6fd2807SJeff Garzik } 393c6fd2807SJeff Garzik 39476326ac1STejun Heo static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering) 39576326ac1STejun Heo { 39676326ac1STejun Heo struct ata_ering_entry *ent = &ering->ring[ering->cursor]; 39776326ac1STejun Heo 39876326ac1STejun Heo if (ent->err_mask) 39976326ac1STejun Heo return ent; 40076326ac1STejun Heo return NULL; 40176326ac1STejun Heo } 40276326ac1STejun Heo 403d9027470SGwendal Grignou int ata_ering_map(struct ata_ering *ering, 404c6fd2807SJeff Garzik int (*map_fn)(struct ata_ering_entry *, void *), 405c6fd2807SJeff Garzik void *arg) 406c6fd2807SJeff Garzik { 407c6fd2807SJeff Garzik int idx, rc = 0; 408c6fd2807SJeff Garzik struct ata_ering_entry *ent; 409c6fd2807SJeff Garzik 410c6fd2807SJeff Garzik idx = ering->cursor; 411c6fd2807SJeff Garzik do { 412c6fd2807SJeff Garzik ent = &ering->ring[idx]; 413c6fd2807SJeff Garzik if (!ent->err_mask) 414c6fd2807SJeff Garzik break; 415c6fd2807SJeff Garzik rc = map_fn(ent, arg); 416c6fd2807SJeff Garzik if (rc) 417c6fd2807SJeff Garzik break; 418c6fd2807SJeff Garzik idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE; 419c6fd2807SJeff Garzik } while (idx != ering->cursor); 420c6fd2807SJeff Garzik 421c6fd2807SJeff Garzik return rc; 422c6fd2807SJeff Garzik } 423c6fd2807SJeff Garzik 42460428407SH Hartley Sweeten static int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg) 425d9027470SGwendal Grignou { 426d9027470SGwendal Grignou ent->eflags |= ATA_EFLAG_OLD_ER; 427d9027470SGwendal Grignou return 0; 428d9027470SGwendal Grignou } 429d9027470SGwendal Grignou 430d9027470SGwendal Grignou static void ata_ering_clear(struct ata_ering *ering) 431d9027470SGwendal Grignou { 432d9027470SGwendal Grignou ata_ering_map(ering, ata_ering_clear_cb, NULL); 433d9027470SGwendal Grignou } 434d9027470SGwendal Grignou 435c6fd2807SJeff Garzik static unsigned int ata_eh_dev_action(struct ata_device *dev) 436c6fd2807SJeff Garzik { 4379af5c9c9STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 438c6fd2807SJeff Garzik 439c6fd2807SJeff Garzik return ehc->i.action | ehc->i.dev_action[dev->devno]; 440c6fd2807SJeff Garzik } 441c6fd2807SJeff Garzik 442f58229f8STejun Heo static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev, 443c6fd2807SJeff Garzik struct ata_eh_info *ehi, unsigned int action) 444c6fd2807SJeff Garzik { 445f58229f8STejun Heo struct ata_device *tdev; 446c6fd2807SJeff Garzik 447c6fd2807SJeff Garzik if (!dev) { 448c6fd2807SJeff Garzik ehi->action &= ~action; 4491eca4365STejun Heo ata_for_each_dev(tdev, link, ALL) 450f58229f8STejun Heo ehi->dev_action[tdev->devno] &= ~action; 451c6fd2807SJeff Garzik } else { 452c6fd2807SJeff Garzik /* doesn't make sense for port-wide EH actions */ 453c6fd2807SJeff Garzik WARN_ON(!(action & ATA_EH_PERDEV_MASK)); 454c6fd2807SJeff Garzik 455c6fd2807SJeff Garzik /* break ehi->action into ehi->dev_action */ 456c6fd2807SJeff Garzik if (ehi->action & action) { 4571eca4365STejun Heo ata_for_each_dev(tdev, link, ALL) 458f58229f8STejun Heo ehi->dev_action[tdev->devno] |= 459f58229f8STejun Heo ehi->action & action; 460c6fd2807SJeff Garzik ehi->action &= ~action; 461c6fd2807SJeff Garzik } 462c6fd2807SJeff Garzik 463c6fd2807SJeff Garzik /* turn off the specified per-dev action */ 464c6fd2807SJeff Garzik ehi->dev_action[dev->devno] &= ~action; 465c6fd2807SJeff Garzik } 466c6fd2807SJeff Garzik } 467c6fd2807SJeff Garzik 468c6fd2807SJeff Garzik /** 469c0c362b6STejun Heo * ata_eh_acquire - acquire EH ownership 470c0c362b6STejun Heo * @ap: ATA port to acquire EH ownership for 471c0c362b6STejun Heo * 472c0c362b6STejun Heo * Acquire EH ownership for @ap. This is the basic exclusion 473c0c362b6STejun Heo * mechanism for ports sharing a host. Only one port hanging off 474c0c362b6STejun Heo * the same host can claim the ownership of EH. 475c0c362b6STejun Heo * 476c0c362b6STejun Heo * LOCKING: 477c0c362b6STejun Heo * EH context. 478c0c362b6STejun Heo */ 479c0c362b6STejun Heo void ata_eh_acquire(struct ata_port *ap) 480c0c362b6STejun Heo { 481c0c362b6STejun Heo mutex_lock(&ap->host->eh_mutex); 482c0c362b6STejun Heo WARN_ON_ONCE(ap->host->eh_owner); 483c0c362b6STejun Heo ap->host->eh_owner = current; 484c0c362b6STejun Heo } 485c0c362b6STejun Heo 486c0c362b6STejun Heo /** 487c0c362b6STejun Heo * ata_eh_release - release EH ownership 488c0c362b6STejun Heo * @ap: ATA port to release EH ownership for 489c0c362b6STejun Heo * 490c0c362b6STejun Heo * Release EH ownership for @ap if the caller. The caller must 491c0c362b6STejun Heo * have acquired EH ownership using ata_eh_acquire() previously. 492c0c362b6STejun Heo * 493c0c362b6STejun Heo * LOCKING: 494c0c362b6STejun Heo * EH context. 495c0c362b6STejun Heo */ 496c0c362b6STejun Heo void ata_eh_release(struct ata_port *ap) 497c0c362b6STejun Heo { 498c0c362b6STejun Heo WARN_ON_ONCE(ap->host->eh_owner != current); 499c0c362b6STejun Heo ap->host->eh_owner = NULL; 500c0c362b6STejun Heo mutex_unlock(&ap->host->eh_mutex); 501c0c362b6STejun Heo } 502c0c362b6STejun Heo 503c0c362b6STejun Heo /** 504c6fd2807SJeff Garzik * ata_scsi_timed_out - SCSI layer time out callback 505c6fd2807SJeff Garzik * @cmd: timed out SCSI command 506c6fd2807SJeff Garzik * 507c6fd2807SJeff Garzik * Handles SCSI layer timeout. We race with normal completion of 508c6fd2807SJeff Garzik * the qc for @cmd. If the qc is already gone, we lose and let 509c6fd2807SJeff Garzik * the scsi command finish (EH_HANDLED). Otherwise, the qc has 510c6fd2807SJeff Garzik * timed out and EH should be invoked. Prevent ata_qc_complete() 511c6fd2807SJeff Garzik * from finishing it by setting EH_SCHEDULED and return 512c6fd2807SJeff Garzik * EH_NOT_HANDLED. 513c6fd2807SJeff Garzik * 514c6fd2807SJeff Garzik * TODO: kill this function once old EH is gone. 515c6fd2807SJeff Garzik * 516c6fd2807SJeff Garzik * LOCKING: 517c6fd2807SJeff Garzik * Called from timer context 518c6fd2807SJeff Garzik * 519c6fd2807SJeff Garzik * RETURNS: 520c6fd2807SJeff Garzik * EH_HANDLED or EH_NOT_HANDLED 521c6fd2807SJeff Garzik */ 522242f9dcbSJens Axboe enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) 523c6fd2807SJeff Garzik { 524c6fd2807SJeff Garzik struct Scsi_Host *host = cmd->device->host; 525c6fd2807SJeff Garzik struct ata_port *ap = ata_shost_to_port(host); 526c6fd2807SJeff Garzik unsigned long flags; 527c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 528242f9dcbSJens Axboe enum blk_eh_timer_return ret; 529c6fd2807SJeff Garzik 530c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 531c6fd2807SJeff Garzik 532c6fd2807SJeff Garzik if (ap->ops->error_handler) { 533242f9dcbSJens Axboe ret = BLK_EH_NOT_HANDLED; 534c6fd2807SJeff Garzik goto out; 535c6fd2807SJeff Garzik } 536c6fd2807SJeff Garzik 537242f9dcbSJens Axboe ret = BLK_EH_HANDLED; 538c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 5399af5c9c9STejun Heo qc = ata_qc_from_tag(ap, ap->link.active_tag); 540c6fd2807SJeff Garzik if (qc) { 541c6fd2807SJeff Garzik WARN_ON(qc->scsicmd != cmd); 542c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_EH_SCHEDULED; 543c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_TIMEOUT; 544242f9dcbSJens Axboe ret = BLK_EH_NOT_HANDLED; 545c6fd2807SJeff Garzik } 546c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 547c6fd2807SJeff Garzik 548c6fd2807SJeff Garzik out: 549c6fd2807SJeff Garzik DPRINTK("EXIT, ret=%d\n", ret); 550c6fd2807SJeff Garzik return ret; 551c6fd2807SJeff Garzik } 552b6a05c82SChristoph Hellwig EXPORT_SYMBOL(ata_scsi_timed_out); 553c6fd2807SJeff Garzik 554ece180d1STejun Heo static void ata_eh_unload(struct ata_port *ap) 555ece180d1STejun Heo { 556ece180d1STejun Heo struct ata_link *link; 557ece180d1STejun Heo struct ata_device *dev; 558ece180d1STejun Heo unsigned long flags; 559ece180d1STejun Heo 560ece180d1STejun Heo /* Restore SControl IPM and SPD for the next driver and 561ece180d1STejun Heo * disable attached devices. 562ece180d1STejun Heo */ 563ece180d1STejun Heo ata_for_each_link(link, ap, PMP_FIRST) { 564ece180d1STejun Heo sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0); 565ece180d1STejun Heo ata_for_each_dev(dev, link, ALL) 566ece180d1STejun Heo ata_dev_disable(dev); 567ece180d1STejun Heo } 568ece180d1STejun Heo 569ece180d1STejun Heo /* freeze and set UNLOADED */ 570ece180d1STejun Heo spin_lock_irqsave(ap->lock, flags); 571ece180d1STejun Heo 572ece180d1STejun Heo ata_port_freeze(ap); /* won't be thawed */ 573ece180d1STejun Heo ap->pflags &= ~ATA_PFLAG_EH_PENDING; /* clear pending from freeze */ 574ece180d1STejun Heo ap->pflags |= ATA_PFLAG_UNLOADED; 575ece180d1STejun Heo 576ece180d1STejun Heo spin_unlock_irqrestore(ap->lock, flags); 577ece180d1STejun Heo } 578ece180d1STejun Heo 579c6fd2807SJeff Garzik /** 580c6fd2807SJeff Garzik * ata_scsi_error - SCSI layer error handler callback 581c6fd2807SJeff Garzik * @host: SCSI host on which error occurred 582c6fd2807SJeff Garzik * 583c6fd2807SJeff Garzik * Handles SCSI-layer-thrown error events. 584c6fd2807SJeff Garzik * 585c6fd2807SJeff Garzik * LOCKING: 586c6fd2807SJeff Garzik * Inherited from SCSI layer (none, can sleep) 587c6fd2807SJeff Garzik * 588c6fd2807SJeff Garzik * RETURNS: 589c6fd2807SJeff Garzik * Zero. 590c6fd2807SJeff Garzik */ 591c6fd2807SJeff Garzik void ata_scsi_error(struct Scsi_Host *host) 592c6fd2807SJeff Garzik { 593c6fd2807SJeff Garzik struct ata_port *ap = ata_shost_to_port(host); 594c6fd2807SJeff Garzik unsigned long flags; 595c34aeebcSJames Bottomley LIST_HEAD(eh_work_q); 596c6fd2807SJeff Garzik 597c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 598c6fd2807SJeff Garzik 599c34aeebcSJames Bottomley spin_lock_irqsave(host->host_lock, flags); 600c34aeebcSJames Bottomley list_splice_init(&host->eh_cmd_q, &eh_work_q); 601c34aeebcSJames Bottomley spin_unlock_irqrestore(host->host_lock, flags); 602c34aeebcSJames Bottomley 6030e0b494cSJames Bottomley ata_scsi_cmd_error_handler(host, ap, &eh_work_q); 6040e0b494cSJames Bottomley 6050e0b494cSJames Bottomley /* If we timed raced normal completion and there is nothing to 6060e0b494cSJames Bottomley recover nr_timedout == 0 why exactly are we doing error recovery ? */ 6070e0b494cSJames Bottomley ata_scsi_port_error_handler(host, ap); 6080e0b494cSJames Bottomley 6090e0b494cSJames Bottomley /* finish or retry handled scmd's and clean up */ 61072d8c36eSWei Fang WARN_ON(!list_empty(&eh_work_q)); 6110e0b494cSJames Bottomley 6120e0b494cSJames Bottomley DPRINTK("EXIT\n"); 6130e0b494cSJames Bottomley } 6140e0b494cSJames Bottomley 6150e0b494cSJames Bottomley /** 6160e0b494cSJames Bottomley * ata_scsi_cmd_error_handler - error callback for a list of commands 6170e0b494cSJames Bottomley * @host: scsi host containing the port 6180e0b494cSJames Bottomley * @ap: ATA port within the host 6190e0b494cSJames Bottomley * @eh_work_q: list of commands to process 6200e0b494cSJames Bottomley * 6210e0b494cSJames Bottomley * process the given list of commands and return those finished to the 6220e0b494cSJames Bottomley * ap->eh_done_q. This function is the first part of the libata error 6230e0b494cSJames Bottomley * handler which processes a given list of failed commands. 6240e0b494cSJames Bottomley */ 6250e0b494cSJames Bottomley void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, 6260e0b494cSJames Bottomley struct list_head *eh_work_q) 6270e0b494cSJames Bottomley { 6280e0b494cSJames Bottomley int i; 6290e0b494cSJames Bottomley unsigned long flags; 6300e0b494cSJames Bottomley 631c429137aSTejun Heo /* make sure sff pio task is not running */ 632c429137aSTejun Heo ata_sff_flush_pio_task(ap); 633c6fd2807SJeff Garzik 634cca3974eSJeff Garzik /* synchronize with host lock and sort out timeouts */ 635c6fd2807SJeff Garzik 636c6fd2807SJeff Garzik /* For new EH, all qcs are finished in one of three ways - 637c6fd2807SJeff Garzik * normal completion, error completion, and SCSI timeout. 638c96f1732SAlan Cox * Both completions can race against SCSI timeout. When normal 639c6fd2807SJeff Garzik * completion wins, the qc never reaches EH. When error 640c6fd2807SJeff Garzik * completion wins, the qc has ATA_QCFLAG_FAILED set. 641c6fd2807SJeff Garzik * 642c6fd2807SJeff Garzik * When SCSI timeout wins, things are a bit more complex. 643c6fd2807SJeff Garzik * Normal or error completion can occur after the timeout but 644c6fd2807SJeff Garzik * before this point. In such cases, both types of 645c6fd2807SJeff Garzik * completions are honored. A scmd is determined to have 646c6fd2807SJeff Garzik * timed out iff its associated qc is active and not failed. 647c6fd2807SJeff Garzik */ 648c6fd2807SJeff Garzik if (ap->ops->error_handler) { 649c6fd2807SJeff Garzik struct scsi_cmnd *scmd, *tmp; 650c6fd2807SJeff Garzik int nr_timedout = 0; 651c6fd2807SJeff Garzik 652c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 653c6fd2807SJeff Garzik 654c96f1732SAlan Cox /* This must occur under the ap->lock as we don't want 655c96f1732SAlan Cox a polled recovery to race the real interrupt handler 656c96f1732SAlan Cox 657c96f1732SAlan Cox The lost_interrupt handler checks for any completed but 658c96f1732SAlan Cox non-notified command and completes much like an IRQ handler. 659c96f1732SAlan Cox 660c96f1732SAlan Cox We then fall into the error recovery code which will treat 661c96f1732SAlan Cox this as if normal completion won the race */ 662c96f1732SAlan Cox 663c96f1732SAlan Cox if (ap->ops->lost_interrupt) 664c96f1732SAlan Cox ap->ops->lost_interrupt(ap); 665c96f1732SAlan Cox 6660e0b494cSJames Bottomley list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) { 667c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 668c6fd2807SJeff Garzik 669c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_QUEUE; i++) { 670c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, i); 671c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_ACTIVE && 672c6fd2807SJeff Garzik qc->scsicmd == scmd) 673c6fd2807SJeff Garzik break; 674c6fd2807SJeff Garzik } 675c6fd2807SJeff Garzik 676c6fd2807SJeff Garzik if (i < ATA_MAX_QUEUE) { 677c6fd2807SJeff Garzik /* the scmd has an associated qc */ 678c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) { 679c6fd2807SJeff Garzik /* which hasn't failed yet, timeout */ 680c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_TIMEOUT; 681c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 682c6fd2807SJeff Garzik nr_timedout++; 683c6fd2807SJeff Garzik } 684c6fd2807SJeff Garzik } else { 685c6fd2807SJeff Garzik /* Normal completion occurred after 686c6fd2807SJeff Garzik * SCSI timeout but before this point. 687c6fd2807SJeff Garzik * Successfully complete it. 688c6fd2807SJeff Garzik */ 689c6fd2807SJeff Garzik scmd->retries = scmd->allowed; 690c6fd2807SJeff Garzik scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 691c6fd2807SJeff Garzik } 692c6fd2807SJeff Garzik } 693c6fd2807SJeff Garzik 694c6fd2807SJeff Garzik /* If we have timed out qcs. They belong to EH from 695c6fd2807SJeff Garzik * this point but the state of the controller is 696c6fd2807SJeff Garzik * unknown. Freeze the port to make sure the IRQ 697c6fd2807SJeff Garzik * handler doesn't diddle with those qcs. This must 698c6fd2807SJeff Garzik * be done atomically w.r.t. setting QCFLAG_FAILED. 699c6fd2807SJeff Garzik */ 700c6fd2807SJeff Garzik if (nr_timedout) 701c6fd2807SJeff Garzik __ata_port_freeze(ap); 702c6fd2807SJeff Garzik 703c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 704a1e10f7eSTejun Heo 705a1e10f7eSTejun Heo /* initialize eh_tries */ 706a1e10f7eSTejun Heo ap->eh_tries = ATA_EH_MAX_TRIES; 707c6fd2807SJeff Garzik } else 708c6fd2807SJeff Garzik spin_unlock_wait(ap->lock); 709c6fd2807SJeff Garzik 7100e0b494cSJames Bottomley } 7110e0b494cSJames Bottomley EXPORT_SYMBOL(ata_scsi_cmd_error_handler); 7120e0b494cSJames Bottomley 7130e0b494cSJames Bottomley /** 7140e0b494cSJames Bottomley * ata_scsi_port_error_handler - recover the port after the commands 7150e0b494cSJames Bottomley * @host: SCSI host containing the port 7160e0b494cSJames Bottomley * @ap: the ATA port 7170e0b494cSJames Bottomley * 7180e0b494cSJames Bottomley * Handle the recovery of the port @ap after all the commands 7190e0b494cSJames Bottomley * have been recovered. 7200e0b494cSJames Bottomley */ 7210e0b494cSJames Bottomley void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap) 7220e0b494cSJames Bottomley { 7230e0b494cSJames Bottomley unsigned long flags; 724c96f1732SAlan Cox 725c6fd2807SJeff Garzik /* invoke error handler */ 726c6fd2807SJeff Garzik if (ap->ops->error_handler) { 727cf1b86c8STejun Heo struct ata_link *link; 728cf1b86c8STejun Heo 729c0c362b6STejun Heo /* acquire EH ownership */ 730c0c362b6STejun Heo ata_eh_acquire(ap); 731c0c362b6STejun Heo repeat: 7325ddf24c5STejun Heo /* kill fast drain timer */ 7335ddf24c5STejun Heo del_timer_sync(&ap->fastdrain_timer); 7345ddf24c5STejun Heo 735c6fd2807SJeff Garzik /* process port resume request */ 736c6fd2807SJeff Garzik ata_eh_handle_port_resume(ap); 737c6fd2807SJeff Garzik 738c6fd2807SJeff Garzik /* fetch & clear EH info */ 739c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 740c6fd2807SJeff Garzik 7411eca4365STejun Heo ata_for_each_link(link, ap, HOST_FIRST) { 74200115e0fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 74300115e0fSTejun Heo struct ata_device *dev; 74400115e0fSTejun Heo 745cf1b86c8STejun Heo memset(&link->eh_context, 0, sizeof(link->eh_context)); 746cf1b86c8STejun Heo link->eh_context.i = link->eh_info; 747cf1b86c8STejun Heo memset(&link->eh_info, 0, sizeof(link->eh_info)); 74800115e0fSTejun Heo 7491eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) { 75000115e0fSTejun Heo int devno = dev->devno; 75100115e0fSTejun Heo 75200115e0fSTejun Heo ehc->saved_xfer_mode[devno] = dev->xfer_mode; 75300115e0fSTejun Heo if (ata_ncq_enabled(dev)) 75400115e0fSTejun Heo ehc->saved_ncq_enabled |= 1 << devno; 75500115e0fSTejun Heo } 756cf1b86c8STejun Heo } 757c6fd2807SJeff Garzik 758c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS; 759c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_EH_PENDING; 760da917d69STejun Heo ap->excl_link = NULL; /* don't maintain exclusion over EH */ 761c6fd2807SJeff Garzik 762c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 763c6fd2807SJeff Garzik 764c6fd2807SJeff Garzik /* invoke EH, skip if unloading or suspended */ 765c6fd2807SJeff Garzik if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED))) 766c6fd2807SJeff Garzik ap->ops->error_handler(ap); 767ece180d1STejun Heo else { 768ece180d1STejun Heo /* if unloading, commence suicide */ 769ece180d1STejun Heo if ((ap->pflags & ATA_PFLAG_UNLOADING) && 770ece180d1STejun Heo !(ap->pflags & ATA_PFLAG_UNLOADED)) 771ece180d1STejun Heo ata_eh_unload(ap); 772c6fd2807SJeff Garzik ata_eh_finish(ap); 773ece180d1STejun Heo } 774c6fd2807SJeff Garzik 775c6fd2807SJeff Garzik /* process port suspend request */ 776c6fd2807SJeff Garzik ata_eh_handle_port_suspend(ap); 777c6fd2807SJeff Garzik 77825985edcSLucas De Marchi /* Exception might have happened after ->error_handler 779c6fd2807SJeff Garzik * recovered the port but before this point. Repeat 780c6fd2807SJeff Garzik * EH in such case. 781c6fd2807SJeff Garzik */ 782c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 783c6fd2807SJeff Garzik 784c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_EH_PENDING) { 785a1e10f7eSTejun Heo if (--ap->eh_tries) { 786c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 787c6fd2807SJeff Garzik goto repeat; 788c6fd2807SJeff Garzik } 789a9a79dfeSJoe Perches ata_port_err(ap, 790a9a79dfeSJoe Perches "EH pending after %d tries, giving up\n", 791a9a79dfeSJoe Perches ATA_EH_MAX_TRIES); 792914616a3STejun Heo ap->pflags &= ~ATA_PFLAG_EH_PENDING; 793c6fd2807SJeff Garzik } 794c6fd2807SJeff Garzik 795c6fd2807SJeff Garzik /* this run is complete, make sure EH info is clear */ 7961eca4365STejun Heo ata_for_each_link(link, ap, HOST_FIRST) 797cf1b86c8STejun Heo memset(&link->eh_info, 0, sizeof(link->eh_info)); 798c6fd2807SJeff Garzik 799e4a9c373SDan Williams /* end eh (clear host_eh_scheduled) while holding 800e4a9c373SDan Williams * ap->lock such that if exception occurs after this 801e4a9c373SDan Williams * point but before EH completion, SCSI midlayer will 802c6fd2807SJeff Garzik * re-initiate EH. 803c6fd2807SJeff Garzik */ 804e4a9c373SDan Williams ap->ops->end_eh(ap); 805c6fd2807SJeff Garzik 806c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 807c0c362b6STejun Heo ata_eh_release(ap); 808c6fd2807SJeff Garzik } else { 8099af5c9c9STejun Heo WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL); 810c6fd2807SJeff Garzik ap->ops->eng_timeout(ap); 811c6fd2807SJeff Garzik } 812c6fd2807SJeff Garzik 813c6fd2807SJeff Garzik scsi_eh_flush_done_q(&ap->eh_done_q); 814c6fd2807SJeff Garzik 815c6fd2807SJeff Garzik /* clean up */ 816c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 817c6fd2807SJeff Garzik 818c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_LOADING) 819c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_LOADING; 820c6fd2807SJeff Garzik else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) 821ad72cf98STejun Heo schedule_delayed_work(&ap->hotplug_task, 0); 822c6fd2807SJeff Garzik 823c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_RECOVERED) 824a9a79dfeSJoe Perches ata_port_info(ap, "EH complete\n"); 825c6fd2807SJeff Garzik 826c6fd2807SJeff Garzik ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED); 827c6fd2807SJeff Garzik 828c6fd2807SJeff Garzik /* tell wait_eh that we're done */ 829c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS; 830c6fd2807SJeff Garzik wake_up_all(&ap->eh_wait_q); 831c6fd2807SJeff Garzik 832c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 833c6fd2807SJeff Garzik } 8340e0b494cSJames Bottomley EXPORT_SYMBOL_GPL(ata_scsi_port_error_handler); 835c6fd2807SJeff Garzik 836c6fd2807SJeff Garzik /** 837c6fd2807SJeff Garzik * ata_port_wait_eh - Wait for the currently pending EH to complete 838c6fd2807SJeff Garzik * @ap: Port to wait EH for 839c6fd2807SJeff Garzik * 840c6fd2807SJeff Garzik * Wait until the currently pending EH is complete. 841c6fd2807SJeff Garzik * 842c6fd2807SJeff Garzik * LOCKING: 843c6fd2807SJeff Garzik * Kernel thread context (may sleep). 844c6fd2807SJeff Garzik */ 845c6fd2807SJeff Garzik void ata_port_wait_eh(struct ata_port *ap) 846c6fd2807SJeff Garzik { 847c6fd2807SJeff Garzik unsigned long flags; 848c6fd2807SJeff Garzik DEFINE_WAIT(wait); 849c6fd2807SJeff Garzik 850c6fd2807SJeff Garzik retry: 851c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 852c6fd2807SJeff Garzik 853c6fd2807SJeff Garzik while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) { 854c6fd2807SJeff Garzik prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE); 855c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 856c6fd2807SJeff Garzik schedule(); 857c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 858c6fd2807SJeff Garzik } 859c6fd2807SJeff Garzik finish_wait(&ap->eh_wait_q, &wait); 860c6fd2807SJeff Garzik 861c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 862c6fd2807SJeff Garzik 863c6fd2807SJeff Garzik /* make sure SCSI EH is complete */ 864cca3974eSJeff Garzik if (scsi_host_in_recovery(ap->scsi_host)) { 86597750cebSTejun Heo ata_msleep(ap, 10); 866c6fd2807SJeff Garzik goto retry; 867c6fd2807SJeff Garzik } 868c6fd2807SJeff Garzik } 86981c757bcSDan Williams EXPORT_SYMBOL_GPL(ata_port_wait_eh); 870c6fd2807SJeff Garzik 8715ddf24c5STejun Heo static int ata_eh_nr_in_flight(struct ata_port *ap) 8725ddf24c5STejun Heo { 8735ddf24c5STejun Heo unsigned int tag; 8745ddf24c5STejun Heo int nr = 0; 8755ddf24c5STejun Heo 8765ddf24c5STejun Heo /* count only non-internal commands */ 8775ddf24c5STejun Heo for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) 8785ddf24c5STejun Heo if (ata_qc_from_tag(ap, tag)) 8795ddf24c5STejun Heo nr++; 8805ddf24c5STejun Heo 8815ddf24c5STejun Heo return nr; 8825ddf24c5STejun Heo } 8835ddf24c5STejun Heo 8845ddf24c5STejun Heo void ata_eh_fastdrain_timerfn(unsigned long arg) 8855ddf24c5STejun Heo { 8865ddf24c5STejun Heo struct ata_port *ap = (void *)arg; 8875ddf24c5STejun Heo unsigned long flags; 8885ddf24c5STejun Heo int cnt; 8895ddf24c5STejun Heo 8905ddf24c5STejun Heo spin_lock_irqsave(ap->lock, flags); 8915ddf24c5STejun Heo 8925ddf24c5STejun Heo cnt = ata_eh_nr_in_flight(ap); 8935ddf24c5STejun Heo 8945ddf24c5STejun Heo /* are we done? */ 8955ddf24c5STejun Heo if (!cnt) 8965ddf24c5STejun Heo goto out_unlock; 8975ddf24c5STejun Heo 8985ddf24c5STejun Heo if (cnt == ap->fastdrain_cnt) { 8995ddf24c5STejun Heo unsigned int tag; 9005ddf24c5STejun Heo 9015ddf24c5STejun Heo /* No progress during the last interval, tag all 9025ddf24c5STejun Heo * in-flight qcs as timed out and freeze the port. 9035ddf24c5STejun Heo */ 9045ddf24c5STejun Heo for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) { 9055ddf24c5STejun Heo struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); 9065ddf24c5STejun Heo if (qc) 9075ddf24c5STejun Heo qc->err_mask |= AC_ERR_TIMEOUT; 9085ddf24c5STejun Heo } 9095ddf24c5STejun Heo 9105ddf24c5STejun Heo ata_port_freeze(ap); 9115ddf24c5STejun Heo } else { 9125ddf24c5STejun Heo /* some qcs have finished, give it another chance */ 9135ddf24c5STejun Heo ap->fastdrain_cnt = cnt; 9145ddf24c5STejun Heo ap->fastdrain_timer.expires = 915341c2c95STejun Heo ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); 9165ddf24c5STejun Heo add_timer(&ap->fastdrain_timer); 9175ddf24c5STejun Heo } 9185ddf24c5STejun Heo 9195ddf24c5STejun Heo out_unlock: 9205ddf24c5STejun Heo spin_unlock_irqrestore(ap->lock, flags); 9215ddf24c5STejun Heo } 9225ddf24c5STejun Heo 9235ddf24c5STejun Heo /** 9245ddf24c5STejun Heo * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain 9255ddf24c5STejun Heo * @ap: target ATA port 9265ddf24c5STejun Heo * @fastdrain: activate fast drain 9275ddf24c5STejun Heo * 9285ddf24c5STejun Heo * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain 9295ddf24c5STejun Heo * is non-zero and EH wasn't pending before. Fast drain ensures 9305ddf24c5STejun Heo * that EH kicks in in timely manner. 9315ddf24c5STejun Heo * 9325ddf24c5STejun Heo * LOCKING: 9335ddf24c5STejun Heo * spin_lock_irqsave(host lock) 9345ddf24c5STejun Heo */ 9355ddf24c5STejun Heo static void ata_eh_set_pending(struct ata_port *ap, int fastdrain) 9365ddf24c5STejun Heo { 9375ddf24c5STejun Heo int cnt; 9385ddf24c5STejun Heo 9395ddf24c5STejun Heo /* already scheduled? */ 9405ddf24c5STejun Heo if (ap->pflags & ATA_PFLAG_EH_PENDING) 9415ddf24c5STejun Heo return; 9425ddf24c5STejun Heo 9435ddf24c5STejun Heo ap->pflags |= ATA_PFLAG_EH_PENDING; 9445ddf24c5STejun Heo 9455ddf24c5STejun Heo if (!fastdrain) 9465ddf24c5STejun Heo return; 9475ddf24c5STejun Heo 9485ddf24c5STejun Heo /* do we have in-flight qcs? */ 9495ddf24c5STejun Heo cnt = ata_eh_nr_in_flight(ap); 9505ddf24c5STejun Heo if (!cnt) 9515ddf24c5STejun Heo return; 9525ddf24c5STejun Heo 9535ddf24c5STejun Heo /* activate fast drain */ 9545ddf24c5STejun Heo ap->fastdrain_cnt = cnt; 955341c2c95STejun Heo ap->fastdrain_timer.expires = 956341c2c95STejun Heo ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); 9575ddf24c5STejun Heo add_timer(&ap->fastdrain_timer); 9585ddf24c5STejun Heo } 9595ddf24c5STejun Heo 960c6fd2807SJeff Garzik /** 961c6fd2807SJeff Garzik * ata_qc_schedule_eh - schedule qc for error handling 962c6fd2807SJeff Garzik * @qc: command to schedule error handling for 963c6fd2807SJeff Garzik * 964c6fd2807SJeff Garzik * Schedule error handling for @qc. EH will kick in as soon as 965c6fd2807SJeff Garzik * other commands are drained. 966c6fd2807SJeff Garzik * 967c6fd2807SJeff Garzik * LOCKING: 968cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 969c6fd2807SJeff Garzik */ 970c6fd2807SJeff Garzik void ata_qc_schedule_eh(struct ata_queued_cmd *qc) 971c6fd2807SJeff Garzik { 972c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 973fa41efdaSTejun Heo struct request_queue *q = qc->scsicmd->device->request_queue; 974fa41efdaSTejun Heo unsigned long flags; 975c6fd2807SJeff Garzik 976c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 977c6fd2807SJeff Garzik 978c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 9795ddf24c5STejun Heo ata_eh_set_pending(ap, 1); 980c6fd2807SJeff Garzik 981c6fd2807SJeff Garzik /* The following will fail if timeout has already expired. 982c6fd2807SJeff Garzik * ata_scsi_error() takes care of such scmds on EH entry. 983c6fd2807SJeff Garzik * Note that ATA_QCFLAG_FAILED is unconditionally set after 984c6fd2807SJeff Garzik * this function completes. 985c6fd2807SJeff Garzik */ 986fa41efdaSTejun Heo spin_lock_irqsave(q->queue_lock, flags); 987242f9dcbSJens Axboe blk_abort_request(qc->scsicmd->request); 988fa41efdaSTejun Heo spin_unlock_irqrestore(q->queue_lock, flags); 989c6fd2807SJeff Garzik } 990c6fd2807SJeff Garzik 991c6fd2807SJeff Garzik /** 992e4a9c373SDan Williams * ata_std_sched_eh - non-libsas ata_ports issue eh with this common routine 993e4a9c373SDan Williams * @ap: ATA port to schedule EH for 994e4a9c373SDan Williams * 995e4a9c373SDan Williams * LOCKING: inherited from ata_port_schedule_eh 996e4a9c373SDan Williams * spin_lock_irqsave(host lock) 997e4a9c373SDan Williams */ 998e4a9c373SDan Williams void ata_std_sched_eh(struct ata_port *ap) 999e4a9c373SDan Williams { 1000e4a9c373SDan Williams WARN_ON(!ap->ops->error_handler); 1001e4a9c373SDan Williams 1002e4a9c373SDan Williams if (ap->pflags & ATA_PFLAG_INITIALIZING) 1003e4a9c373SDan Williams return; 1004e4a9c373SDan Williams 1005e4a9c373SDan Williams ata_eh_set_pending(ap, 1); 1006e4a9c373SDan Williams scsi_schedule_eh(ap->scsi_host); 1007e4a9c373SDan Williams 1008e4a9c373SDan Williams DPRINTK("port EH scheduled\n"); 1009e4a9c373SDan Williams } 1010e4a9c373SDan Williams EXPORT_SYMBOL_GPL(ata_std_sched_eh); 1011e4a9c373SDan Williams 1012e4a9c373SDan Williams /** 1013e4a9c373SDan Williams * ata_std_end_eh - non-libsas ata_ports complete eh with this common routine 1014e4a9c373SDan Williams * @ap: ATA port to end EH for 1015e4a9c373SDan Williams * 1016e4a9c373SDan Williams * In the libata object model there is a 1:1 mapping of ata_port to 1017e4a9c373SDan Williams * shost, so host fields can be directly manipulated under ap->lock, in 1018e4a9c373SDan Williams * the libsas case we need to hold a lock at the ha->level to coordinate 1019e4a9c373SDan Williams * these events. 1020e4a9c373SDan Williams * 1021e4a9c373SDan Williams * LOCKING: 1022e4a9c373SDan Williams * spin_lock_irqsave(host lock) 1023e4a9c373SDan Williams */ 1024e4a9c373SDan Williams void ata_std_end_eh(struct ata_port *ap) 1025e4a9c373SDan Williams { 1026e4a9c373SDan Williams struct Scsi_Host *host = ap->scsi_host; 1027e4a9c373SDan Williams 1028e4a9c373SDan Williams host->host_eh_scheduled = 0; 1029e4a9c373SDan Williams } 1030e4a9c373SDan Williams EXPORT_SYMBOL(ata_std_end_eh); 1031e4a9c373SDan Williams 1032e4a9c373SDan Williams 1033e4a9c373SDan Williams /** 1034c6fd2807SJeff Garzik * ata_port_schedule_eh - schedule error handling without a qc 1035c6fd2807SJeff Garzik * @ap: ATA port to schedule EH for 1036c6fd2807SJeff Garzik * 1037c6fd2807SJeff Garzik * Schedule error handling for @ap. EH will kick in as soon as 1038c6fd2807SJeff Garzik * all commands are drained. 1039c6fd2807SJeff Garzik * 1040c6fd2807SJeff Garzik * LOCKING: 1041cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 1042c6fd2807SJeff Garzik */ 1043c6fd2807SJeff Garzik void ata_port_schedule_eh(struct ata_port *ap) 1044c6fd2807SJeff Garzik { 1045e4a9c373SDan Williams /* see: ata_std_sched_eh, unless you know better */ 1046e4a9c373SDan Williams ap->ops->sched_eh(ap); 1047c6fd2807SJeff Garzik } 1048c6fd2807SJeff Garzik 1049dbd82616STejun Heo static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link) 1050c6fd2807SJeff Garzik { 1051c6fd2807SJeff Garzik int tag, nr_aborted = 0; 1052c6fd2807SJeff Garzik 1053c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 1054c6fd2807SJeff Garzik 10555ddf24c5STejun Heo /* we're gonna abort all commands, no need for fast drain */ 10565ddf24c5STejun Heo ata_eh_set_pending(ap, 0); 10575ddf24c5STejun Heo 1058c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1059c6fd2807SJeff Garzik struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); 1060c6fd2807SJeff Garzik 1061dbd82616STejun Heo if (qc && (!link || qc->dev->link == link)) { 1062c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 1063c6fd2807SJeff Garzik ata_qc_complete(qc); 1064c6fd2807SJeff Garzik nr_aborted++; 1065c6fd2807SJeff Garzik } 1066c6fd2807SJeff Garzik } 1067c6fd2807SJeff Garzik 1068c6fd2807SJeff Garzik if (!nr_aborted) 1069c6fd2807SJeff Garzik ata_port_schedule_eh(ap); 1070c6fd2807SJeff Garzik 1071c6fd2807SJeff Garzik return nr_aborted; 1072c6fd2807SJeff Garzik } 1073c6fd2807SJeff Garzik 1074c6fd2807SJeff Garzik /** 1075dbd82616STejun Heo * ata_link_abort - abort all qc's on the link 1076dbd82616STejun Heo * @link: ATA link to abort qc's for 1077dbd82616STejun Heo * 1078dbd82616STejun Heo * Abort all active qc's active on @link and schedule EH. 1079dbd82616STejun Heo * 1080dbd82616STejun Heo * LOCKING: 1081dbd82616STejun Heo * spin_lock_irqsave(host lock) 1082dbd82616STejun Heo * 1083dbd82616STejun Heo * RETURNS: 1084dbd82616STejun Heo * Number of aborted qc's. 1085dbd82616STejun Heo */ 1086dbd82616STejun Heo int ata_link_abort(struct ata_link *link) 1087dbd82616STejun Heo { 1088dbd82616STejun Heo return ata_do_link_abort(link->ap, link); 1089dbd82616STejun Heo } 1090dbd82616STejun Heo 1091dbd82616STejun Heo /** 1092dbd82616STejun Heo * ata_port_abort - abort all qc's on the port 1093dbd82616STejun Heo * @ap: ATA port to abort qc's for 1094dbd82616STejun Heo * 1095dbd82616STejun Heo * Abort all active qc's of @ap and schedule EH. 1096dbd82616STejun Heo * 1097dbd82616STejun Heo * LOCKING: 1098dbd82616STejun Heo * spin_lock_irqsave(host_set lock) 1099dbd82616STejun Heo * 1100dbd82616STejun Heo * RETURNS: 1101dbd82616STejun Heo * Number of aborted qc's. 1102dbd82616STejun Heo */ 1103dbd82616STejun Heo int ata_port_abort(struct ata_port *ap) 1104dbd82616STejun Heo { 1105dbd82616STejun Heo return ata_do_link_abort(ap, NULL); 1106dbd82616STejun Heo } 1107dbd82616STejun Heo 1108dbd82616STejun Heo /** 1109c6fd2807SJeff Garzik * __ata_port_freeze - freeze port 1110c6fd2807SJeff Garzik * @ap: ATA port to freeze 1111c6fd2807SJeff Garzik * 1112c6fd2807SJeff Garzik * This function is called when HSM violation or some other 1113c6fd2807SJeff Garzik * condition disrupts normal operation of the port. Frozen port 1114c6fd2807SJeff Garzik * is not allowed to perform any operation until the port is 1115c6fd2807SJeff Garzik * thawed, which usually follows a successful reset. 1116c6fd2807SJeff Garzik * 1117c6fd2807SJeff Garzik * ap->ops->freeze() callback can be used for freezing the port 1118c6fd2807SJeff Garzik * hardware-wise (e.g. mask interrupt and stop DMA engine). If a 1119c6fd2807SJeff Garzik * port cannot be frozen hardware-wise, the interrupt handler 1120c6fd2807SJeff Garzik * must ack and clear interrupts unconditionally while the port 1121c6fd2807SJeff Garzik * is frozen. 1122c6fd2807SJeff Garzik * 1123c6fd2807SJeff Garzik * LOCKING: 1124cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 1125c6fd2807SJeff Garzik */ 1126c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap) 1127c6fd2807SJeff Garzik { 1128c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 1129c6fd2807SJeff Garzik 1130c6fd2807SJeff Garzik if (ap->ops->freeze) 1131c6fd2807SJeff Garzik ap->ops->freeze(ap); 1132c6fd2807SJeff Garzik 1133c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_FROZEN; 1134c6fd2807SJeff Garzik 113544877b4eSTejun Heo DPRINTK("ata%u port frozen\n", ap->print_id); 1136c6fd2807SJeff Garzik } 1137c6fd2807SJeff Garzik 1138c6fd2807SJeff Garzik /** 1139c6fd2807SJeff Garzik * ata_port_freeze - abort & freeze port 1140c6fd2807SJeff Garzik * @ap: ATA port to freeze 1141c6fd2807SJeff Garzik * 114254c38444SJeff Garzik * Abort and freeze @ap. The freeze operation must be called 114354c38444SJeff Garzik * first, because some hardware requires special operations 114454c38444SJeff Garzik * before the taskfile registers are accessible. 1145c6fd2807SJeff Garzik * 1146c6fd2807SJeff Garzik * LOCKING: 1147cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 1148c6fd2807SJeff Garzik * 1149c6fd2807SJeff Garzik * RETURNS: 1150c6fd2807SJeff Garzik * Number of aborted commands. 1151c6fd2807SJeff Garzik */ 1152c6fd2807SJeff Garzik int ata_port_freeze(struct ata_port *ap) 1153c6fd2807SJeff Garzik { 1154c6fd2807SJeff Garzik int nr_aborted; 1155c6fd2807SJeff Garzik 1156c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 1157c6fd2807SJeff Garzik 1158c6fd2807SJeff Garzik __ata_port_freeze(ap); 115954c38444SJeff Garzik nr_aborted = ata_port_abort(ap); 1160c6fd2807SJeff Garzik 1161c6fd2807SJeff Garzik return nr_aborted; 1162c6fd2807SJeff Garzik } 1163c6fd2807SJeff Garzik 1164c6fd2807SJeff Garzik /** 11657d77b247STejun Heo * sata_async_notification - SATA async notification handler 11667d77b247STejun Heo * @ap: ATA port where async notification is received 11677d77b247STejun Heo * 11687d77b247STejun Heo * Handler to be called when async notification via SDB FIS is 11697d77b247STejun Heo * received. This function schedules EH if necessary. 11707d77b247STejun Heo * 11717d77b247STejun Heo * LOCKING: 11727d77b247STejun Heo * spin_lock_irqsave(host lock) 11737d77b247STejun Heo * 11747d77b247STejun Heo * RETURNS: 11757d77b247STejun Heo * 1 if EH is scheduled, 0 otherwise. 11767d77b247STejun Heo */ 11777d77b247STejun Heo int sata_async_notification(struct ata_port *ap) 11787d77b247STejun Heo { 11797d77b247STejun Heo u32 sntf; 11807d77b247STejun Heo int rc; 11817d77b247STejun Heo 11827d77b247STejun Heo if (!(ap->flags & ATA_FLAG_AN)) 11837d77b247STejun Heo return 0; 11847d77b247STejun Heo 11857d77b247STejun Heo rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf); 11867d77b247STejun Heo if (rc == 0) 11877d77b247STejun Heo sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf); 11887d77b247STejun Heo 1189071f44b1STejun Heo if (!sata_pmp_attached(ap) || rc) { 11907d77b247STejun Heo /* PMP is not attached or SNTF is not available */ 1191071f44b1STejun Heo if (!sata_pmp_attached(ap)) { 11927d77b247STejun Heo /* PMP is not attached. Check whether ATAPI 11937d77b247STejun Heo * AN is configured. If so, notify media 11947d77b247STejun Heo * change. 11957d77b247STejun Heo */ 11967d77b247STejun Heo struct ata_device *dev = ap->link.device; 11977d77b247STejun Heo 11987d77b247STejun Heo if ((dev->class == ATA_DEV_ATAPI) && 11997d77b247STejun Heo (dev->flags & ATA_DFLAG_AN)) 12007d77b247STejun Heo ata_scsi_media_change_notify(dev); 12017d77b247STejun Heo return 0; 12027d77b247STejun Heo } else { 12037d77b247STejun Heo /* PMP is attached but SNTF is not available. 12047d77b247STejun Heo * ATAPI async media change notification is 12057d77b247STejun Heo * not used. The PMP must be reporting PHY 12067d77b247STejun Heo * status change, schedule EH. 12077d77b247STejun Heo */ 12087d77b247STejun Heo ata_port_schedule_eh(ap); 12097d77b247STejun Heo return 1; 12107d77b247STejun Heo } 12117d77b247STejun Heo } else { 12127d77b247STejun Heo /* PMP is attached and SNTF is available */ 12137d77b247STejun Heo struct ata_link *link; 12147d77b247STejun Heo 12157d77b247STejun Heo /* check and notify ATAPI AN */ 12161eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 12177d77b247STejun Heo if (!(sntf & (1 << link->pmp))) 12187d77b247STejun Heo continue; 12197d77b247STejun Heo 12207d77b247STejun Heo if ((link->device->class == ATA_DEV_ATAPI) && 12217d77b247STejun Heo (link->device->flags & ATA_DFLAG_AN)) 12227d77b247STejun Heo ata_scsi_media_change_notify(link->device); 12237d77b247STejun Heo } 12247d77b247STejun Heo 12257d77b247STejun Heo /* If PMP is reporting that PHY status of some 12267d77b247STejun Heo * downstream ports has changed, schedule EH. 12277d77b247STejun Heo */ 12287d77b247STejun Heo if (sntf & (1 << SATA_PMP_CTRL_PORT)) { 12297d77b247STejun Heo ata_port_schedule_eh(ap); 12307d77b247STejun Heo return 1; 12317d77b247STejun Heo } 12327d77b247STejun Heo 12337d77b247STejun Heo return 0; 12347d77b247STejun Heo } 12357d77b247STejun Heo } 12367d77b247STejun Heo 12377d77b247STejun Heo /** 1238c6fd2807SJeff Garzik * ata_eh_freeze_port - EH helper to freeze port 1239c6fd2807SJeff Garzik * @ap: ATA port to freeze 1240c6fd2807SJeff Garzik * 1241c6fd2807SJeff Garzik * Freeze @ap. 1242c6fd2807SJeff Garzik * 1243c6fd2807SJeff Garzik * LOCKING: 1244c6fd2807SJeff Garzik * None. 1245c6fd2807SJeff Garzik */ 1246c6fd2807SJeff Garzik void ata_eh_freeze_port(struct ata_port *ap) 1247c6fd2807SJeff Garzik { 1248c6fd2807SJeff Garzik unsigned long flags; 1249c6fd2807SJeff Garzik 1250c6fd2807SJeff Garzik if (!ap->ops->error_handler) 1251c6fd2807SJeff Garzik return; 1252c6fd2807SJeff Garzik 1253c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1254c6fd2807SJeff Garzik __ata_port_freeze(ap); 1255c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1256c6fd2807SJeff Garzik } 1257c6fd2807SJeff Garzik 1258c6fd2807SJeff Garzik /** 1259c6fd2807SJeff Garzik * ata_port_thaw_port - EH helper to thaw port 1260c6fd2807SJeff Garzik * @ap: ATA port to thaw 1261c6fd2807SJeff Garzik * 1262c6fd2807SJeff Garzik * Thaw frozen port @ap. 1263c6fd2807SJeff Garzik * 1264c6fd2807SJeff Garzik * LOCKING: 1265c6fd2807SJeff Garzik * None. 1266c6fd2807SJeff Garzik */ 1267c6fd2807SJeff Garzik void ata_eh_thaw_port(struct ata_port *ap) 1268c6fd2807SJeff Garzik { 1269c6fd2807SJeff Garzik unsigned long flags; 1270c6fd2807SJeff Garzik 1271c6fd2807SJeff Garzik if (!ap->ops->error_handler) 1272c6fd2807SJeff Garzik return; 1273c6fd2807SJeff Garzik 1274c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1275c6fd2807SJeff Garzik 1276c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_FROZEN; 1277c6fd2807SJeff Garzik 1278c6fd2807SJeff Garzik if (ap->ops->thaw) 1279c6fd2807SJeff Garzik ap->ops->thaw(ap); 1280c6fd2807SJeff Garzik 1281c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1282c6fd2807SJeff Garzik 128344877b4eSTejun Heo DPRINTK("ata%u port thawed\n", ap->print_id); 1284c6fd2807SJeff Garzik } 1285c6fd2807SJeff Garzik 1286c6fd2807SJeff Garzik static void ata_eh_scsidone(struct scsi_cmnd *scmd) 1287c6fd2807SJeff Garzik { 1288c6fd2807SJeff Garzik /* nada */ 1289c6fd2807SJeff Garzik } 1290c6fd2807SJeff Garzik 1291c6fd2807SJeff Garzik static void __ata_eh_qc_complete(struct ata_queued_cmd *qc) 1292c6fd2807SJeff Garzik { 1293c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 1294c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 1295c6fd2807SJeff Garzik unsigned long flags; 1296c6fd2807SJeff Garzik 1297c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1298c6fd2807SJeff Garzik qc->scsidone = ata_eh_scsidone; 1299c6fd2807SJeff Garzik __ata_qc_complete(qc); 1300c6fd2807SJeff Garzik WARN_ON(ata_tag_valid(qc->tag)); 1301c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1302c6fd2807SJeff Garzik 1303c6fd2807SJeff Garzik scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 1304c6fd2807SJeff Garzik } 1305c6fd2807SJeff Garzik 1306c6fd2807SJeff Garzik /** 1307c6fd2807SJeff Garzik * ata_eh_qc_complete - Complete an active ATA command from EH 1308c6fd2807SJeff Garzik * @qc: Command to complete 1309c6fd2807SJeff Garzik * 1310c6fd2807SJeff Garzik * Indicate to the mid and upper layers that an ATA command has 1311c6fd2807SJeff Garzik * completed. To be used from EH. 1312c6fd2807SJeff Garzik */ 1313c6fd2807SJeff Garzik void ata_eh_qc_complete(struct ata_queued_cmd *qc) 1314c6fd2807SJeff Garzik { 1315c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 1316c6fd2807SJeff Garzik scmd->retries = scmd->allowed; 1317c6fd2807SJeff Garzik __ata_eh_qc_complete(qc); 1318c6fd2807SJeff Garzik } 1319c6fd2807SJeff Garzik 1320c6fd2807SJeff Garzik /** 1321c6fd2807SJeff Garzik * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH 1322c6fd2807SJeff Garzik * @qc: Command to retry 1323c6fd2807SJeff Garzik * 1324c6fd2807SJeff Garzik * Indicate to the mid and upper layers that an ATA command 1325c6fd2807SJeff Garzik * should be retried. To be used from EH. 1326c6fd2807SJeff Garzik * 1327c6fd2807SJeff Garzik * SCSI midlayer limits the number of retries to scmd->allowed. 1328f13e2201SGwendal Grignou * scmd->allowed is incremented for commands which get retried 1329c6fd2807SJeff Garzik * due to unrelated failures (qc->err_mask is zero). 1330c6fd2807SJeff Garzik */ 1331c6fd2807SJeff Garzik void ata_eh_qc_retry(struct ata_queued_cmd *qc) 1332c6fd2807SJeff Garzik { 1333c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 1334f13e2201SGwendal Grignou if (!qc->err_mask) 1335f13e2201SGwendal Grignou scmd->allowed++; 1336c6fd2807SJeff Garzik __ata_eh_qc_complete(qc); 1337c6fd2807SJeff Garzik } 1338c6fd2807SJeff Garzik 1339c6fd2807SJeff Garzik /** 1340678afac6STejun Heo * ata_dev_disable - disable ATA device 1341678afac6STejun Heo * @dev: ATA device to disable 1342678afac6STejun Heo * 1343678afac6STejun Heo * Disable @dev. 1344678afac6STejun Heo * 1345678afac6STejun Heo * Locking: 1346678afac6STejun Heo * EH context. 1347678afac6STejun Heo */ 1348678afac6STejun Heo void ata_dev_disable(struct ata_device *dev) 1349678afac6STejun Heo { 1350678afac6STejun Heo if (!ata_dev_enabled(dev)) 1351678afac6STejun Heo return; 1352678afac6STejun Heo 1353678afac6STejun Heo if (ata_msg_drv(dev->link->ap)) 1354a9a79dfeSJoe Perches ata_dev_warn(dev, "disabled\n"); 1355678afac6STejun Heo ata_acpi_on_disable(dev); 1356678afac6STejun Heo ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET); 1357678afac6STejun Heo dev->class++; 135899cf610aSTejun Heo 135999cf610aSTejun Heo /* From now till the next successful probe, ering is used to 136099cf610aSTejun Heo * track probe failures. Clear accumulated device error info. 136199cf610aSTejun Heo */ 136299cf610aSTejun Heo ata_ering_clear(&dev->ering); 1363678afac6STejun Heo } 1364678afac6STejun Heo 1365678afac6STejun Heo /** 1366c6fd2807SJeff Garzik * ata_eh_detach_dev - detach ATA device 1367c6fd2807SJeff Garzik * @dev: ATA device to detach 1368c6fd2807SJeff Garzik * 1369c6fd2807SJeff Garzik * Detach @dev. 1370c6fd2807SJeff Garzik * 1371c6fd2807SJeff Garzik * LOCKING: 1372c6fd2807SJeff Garzik * None. 1373c6fd2807SJeff Garzik */ 1374fb7fd614STejun Heo void ata_eh_detach_dev(struct ata_device *dev) 1375c6fd2807SJeff Garzik { 1376f58229f8STejun Heo struct ata_link *link = dev->link; 1377f58229f8STejun Heo struct ata_port *ap = link->ap; 137890484ebfSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 1379c6fd2807SJeff Garzik unsigned long flags; 1380c6fd2807SJeff Garzik 1381c6fd2807SJeff Garzik ata_dev_disable(dev); 1382c6fd2807SJeff Garzik 1383c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1384c6fd2807SJeff Garzik 1385c6fd2807SJeff Garzik dev->flags &= ~ATA_DFLAG_DETACH; 1386c6fd2807SJeff Garzik 1387c6fd2807SJeff Garzik if (ata_scsi_offline_dev(dev)) { 1388c6fd2807SJeff Garzik dev->flags |= ATA_DFLAG_DETACHED; 1389c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 1390c6fd2807SJeff Garzik } 1391c6fd2807SJeff Garzik 139290484ebfSTejun Heo /* clear per-dev EH info */ 1393f58229f8STejun Heo ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK); 1394f58229f8STejun Heo ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK); 139590484ebfSTejun Heo ehc->saved_xfer_mode[dev->devno] = 0; 139690484ebfSTejun Heo ehc->saved_ncq_enabled &= ~(1 << dev->devno); 1397c6fd2807SJeff Garzik 1398c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1399c6fd2807SJeff Garzik } 1400c6fd2807SJeff Garzik 1401c6fd2807SJeff Garzik /** 1402c6fd2807SJeff Garzik * ata_eh_about_to_do - about to perform eh_action 1403955e57dfSTejun Heo * @link: target ATA link 1404c6fd2807SJeff Garzik * @dev: target ATA dev for per-dev action (can be NULL) 1405c6fd2807SJeff Garzik * @action: action about to be performed 1406c6fd2807SJeff Garzik * 1407c6fd2807SJeff Garzik * Called just before performing EH actions to clear related bits 1408955e57dfSTejun Heo * in @link->eh_info such that eh actions are not unnecessarily 1409955e57dfSTejun Heo * repeated. 1410c6fd2807SJeff Garzik * 1411c6fd2807SJeff Garzik * LOCKING: 1412c6fd2807SJeff Garzik * None. 1413c6fd2807SJeff Garzik */ 1414fb7fd614STejun Heo void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev, 1415c6fd2807SJeff Garzik unsigned int action) 1416c6fd2807SJeff Garzik { 1417955e57dfSTejun Heo struct ata_port *ap = link->ap; 1418955e57dfSTejun Heo struct ata_eh_info *ehi = &link->eh_info; 1419955e57dfSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 1420c6fd2807SJeff Garzik unsigned long flags; 1421c6fd2807SJeff Garzik 1422c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1423c6fd2807SJeff Garzik 1424955e57dfSTejun Heo ata_eh_clear_action(link, dev, ehi, action); 1425c6fd2807SJeff Garzik 1426a568d1d2STejun Heo /* About to take EH action, set RECOVERED. Ignore actions on 1427a568d1d2STejun Heo * slave links as master will do them again. 1428a568d1d2STejun Heo */ 1429a568d1d2STejun Heo if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link) 1430c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_RECOVERED; 1431c6fd2807SJeff Garzik 1432c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1433c6fd2807SJeff Garzik } 1434c6fd2807SJeff Garzik 1435c6fd2807SJeff Garzik /** 1436c6fd2807SJeff Garzik * ata_eh_done - EH action complete 1437c6fd2807SJeff Garzik * @ap: target ATA port 1438c6fd2807SJeff Garzik * @dev: target ATA dev for per-dev action (can be NULL) 1439c6fd2807SJeff Garzik * @action: action just completed 1440c6fd2807SJeff Garzik * 1441c6fd2807SJeff Garzik * Called right after performing EH actions to clear related bits 1442955e57dfSTejun Heo * in @link->eh_context. 1443c6fd2807SJeff Garzik * 1444c6fd2807SJeff Garzik * LOCKING: 1445c6fd2807SJeff Garzik * None. 1446c6fd2807SJeff Garzik */ 1447fb7fd614STejun Heo void ata_eh_done(struct ata_link *link, struct ata_device *dev, 1448c6fd2807SJeff Garzik unsigned int action) 1449c6fd2807SJeff Garzik { 1450955e57dfSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 14519af5c9c9STejun Heo 1452955e57dfSTejun Heo ata_eh_clear_action(link, dev, &ehc->i, action); 1453c6fd2807SJeff Garzik } 1454c6fd2807SJeff Garzik 1455c6fd2807SJeff Garzik /** 1456c6fd2807SJeff Garzik * ata_err_string - convert err_mask to descriptive string 1457c6fd2807SJeff Garzik * @err_mask: error mask to convert to string 1458c6fd2807SJeff Garzik * 1459c6fd2807SJeff Garzik * Convert @err_mask to descriptive string. Errors are 1460c6fd2807SJeff Garzik * prioritized according to severity and only the most severe 1461c6fd2807SJeff Garzik * error is reported. 1462c6fd2807SJeff Garzik * 1463c6fd2807SJeff Garzik * LOCKING: 1464c6fd2807SJeff Garzik * None. 1465c6fd2807SJeff Garzik * 1466c6fd2807SJeff Garzik * RETURNS: 1467c6fd2807SJeff Garzik * Descriptive string for @err_mask 1468c6fd2807SJeff Garzik */ 1469c6fd2807SJeff Garzik static const char *ata_err_string(unsigned int err_mask) 1470c6fd2807SJeff Garzik { 1471c6fd2807SJeff Garzik if (err_mask & AC_ERR_HOST_BUS) 1472c6fd2807SJeff Garzik return "host bus error"; 1473c6fd2807SJeff Garzik if (err_mask & AC_ERR_ATA_BUS) 1474c6fd2807SJeff Garzik return "ATA bus error"; 1475c6fd2807SJeff Garzik if (err_mask & AC_ERR_TIMEOUT) 1476c6fd2807SJeff Garzik return "timeout"; 1477c6fd2807SJeff Garzik if (err_mask & AC_ERR_HSM) 1478c6fd2807SJeff Garzik return "HSM violation"; 1479c6fd2807SJeff Garzik if (err_mask & AC_ERR_SYSTEM) 1480c6fd2807SJeff Garzik return "internal error"; 1481c6fd2807SJeff Garzik if (err_mask & AC_ERR_MEDIA) 1482c6fd2807SJeff Garzik return "media error"; 1483c6fd2807SJeff Garzik if (err_mask & AC_ERR_INVALID) 1484c6fd2807SJeff Garzik return "invalid argument"; 1485c6fd2807SJeff Garzik if (err_mask & AC_ERR_DEV) 1486c6fd2807SJeff Garzik return "device error"; 1487c6fd2807SJeff Garzik return "unknown error"; 1488c6fd2807SJeff Garzik } 1489c6fd2807SJeff Garzik 1490c6fd2807SJeff Garzik /** 1491c6fd2807SJeff Garzik * ata_read_log_page - read a specific log page 1492c6fd2807SJeff Garzik * @dev: target device 149365fe1f0fSShane Huang * @log: log to read 1494c6fd2807SJeff Garzik * @page: page to read 1495c6fd2807SJeff Garzik * @buf: buffer to store read page 1496c6fd2807SJeff Garzik * @sectors: number of sectors to read 1497c6fd2807SJeff Garzik * 1498c6fd2807SJeff Garzik * Read log page using READ_LOG_EXT command. 1499c6fd2807SJeff Garzik * 1500c6fd2807SJeff Garzik * LOCKING: 1501c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1502c6fd2807SJeff Garzik * 1503c6fd2807SJeff Garzik * RETURNS: 1504c6fd2807SJeff Garzik * 0 on success, AC_ERR_* mask otherwise. 1505c6fd2807SJeff Garzik */ 150665fe1f0fSShane Huang unsigned int ata_read_log_page(struct ata_device *dev, u8 log, 1507c6fd2807SJeff Garzik u8 page, void *buf, unsigned int sectors) 1508c6fd2807SJeff Garzik { 1509ea013a9bSAndreas Werner unsigned long ap_flags = dev->link->ap->flags; 1510c6fd2807SJeff Garzik struct ata_taskfile tf; 1511c6fd2807SJeff Garzik unsigned int err_mask; 15125d3abf8fSMartin K. Petersen bool dma = false; 1513c6fd2807SJeff Garzik 151465fe1f0fSShane Huang DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page); 1515c6fd2807SJeff Garzik 1516ea013a9bSAndreas Werner /* 1517ea013a9bSAndreas Werner * Return error without actually issuing the command on controllers 1518ea013a9bSAndreas Werner * which e.g. lockup on a read log page. 1519ea013a9bSAndreas Werner */ 1520ea013a9bSAndreas Werner if (ap_flags & ATA_FLAG_NO_LOG_PAGE) 1521ea013a9bSAndreas Werner return AC_ERR_DEV; 1522ea013a9bSAndreas Werner 15235d3abf8fSMartin K. Petersen retry: 1524c6fd2807SJeff Garzik ata_tf_init(dev, &tf); 15255d3abf8fSMartin K. Petersen if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) && 15265d3abf8fSMartin K. Petersen !(dev->horkage & ATA_HORKAGE_NO_NCQ_LOG)) { 15279faa6438SHannes Reinecke tf.command = ATA_CMD_READ_LOG_DMA_EXT; 15289faa6438SHannes Reinecke tf.protocol = ATA_PROT_DMA; 15295d3abf8fSMartin K. Petersen dma = true; 15309faa6438SHannes Reinecke } else { 1531c6fd2807SJeff Garzik tf.command = ATA_CMD_READ_LOG_EXT; 15329faa6438SHannes Reinecke tf.protocol = ATA_PROT_PIO; 1533eab6ee1cSMartin K. Petersen dma = false; 15349faa6438SHannes Reinecke } 153565fe1f0fSShane Huang tf.lbal = log; 153665fe1f0fSShane Huang tf.lbam = page; 1537c6fd2807SJeff Garzik tf.nsect = sectors; 1538c6fd2807SJeff Garzik tf.hob_nsect = sectors >> 8; 1539c6fd2807SJeff Garzik tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE; 1540c6fd2807SJeff Garzik 1541c6fd2807SJeff Garzik err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, 15422b789108STejun Heo buf, sectors * ATA_SECT_SIZE, 0); 1543c6fd2807SJeff Garzik 15445d3abf8fSMartin K. Petersen if (err_mask && dma) { 15455d3abf8fSMartin K. Petersen dev->horkage |= ATA_HORKAGE_NO_NCQ_LOG; 15465d3abf8fSMartin K. Petersen ata_dev_warn(dev, "READ LOG DMA EXT failed, trying unqueued\n"); 15475d3abf8fSMartin K. Petersen goto retry; 15485d3abf8fSMartin K. Petersen } 15495d3abf8fSMartin K. Petersen 1550c6fd2807SJeff Garzik DPRINTK("EXIT, err_mask=%x\n", err_mask); 1551c6fd2807SJeff Garzik return err_mask; 1552c6fd2807SJeff Garzik } 1553c6fd2807SJeff Garzik 1554c6fd2807SJeff Garzik /** 1555c6fd2807SJeff Garzik * ata_eh_read_log_10h - Read log page 10h for NCQ error details 1556c6fd2807SJeff Garzik * @dev: Device to read log page 10h from 1557c6fd2807SJeff Garzik * @tag: Resulting tag of the failed command 1558c6fd2807SJeff Garzik * @tf: Resulting taskfile registers of the failed command 1559c6fd2807SJeff Garzik * 1560c6fd2807SJeff Garzik * Read log page 10h to obtain NCQ error details and clear error 1561c6fd2807SJeff Garzik * condition. 1562c6fd2807SJeff Garzik * 1563c6fd2807SJeff Garzik * LOCKING: 1564c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1565c6fd2807SJeff Garzik * 1566c6fd2807SJeff Garzik * RETURNS: 1567c6fd2807SJeff Garzik * 0 on success, -errno otherwise. 1568c6fd2807SJeff Garzik */ 1569c6fd2807SJeff Garzik static int ata_eh_read_log_10h(struct ata_device *dev, 1570c6fd2807SJeff Garzik int *tag, struct ata_taskfile *tf) 1571c6fd2807SJeff Garzik { 15729af5c9c9STejun Heo u8 *buf = dev->link->ap->sector_buf; 1573c6fd2807SJeff Garzik unsigned int err_mask; 1574c6fd2807SJeff Garzik u8 csum; 1575c6fd2807SJeff Garzik int i; 1576c6fd2807SJeff Garzik 157765fe1f0fSShane Huang err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, 0, buf, 1); 1578c6fd2807SJeff Garzik if (err_mask) 1579c6fd2807SJeff Garzik return -EIO; 1580c6fd2807SJeff Garzik 1581c6fd2807SJeff Garzik csum = 0; 1582c6fd2807SJeff Garzik for (i = 0; i < ATA_SECT_SIZE; i++) 1583c6fd2807SJeff Garzik csum += buf[i]; 1584c6fd2807SJeff Garzik if (csum) 1585a9a79dfeSJoe Perches ata_dev_warn(dev, "invalid checksum 0x%x on log page 10h\n", 1586a9a79dfeSJoe Perches csum); 1587c6fd2807SJeff Garzik 1588c6fd2807SJeff Garzik if (buf[0] & 0x80) 1589c6fd2807SJeff Garzik return -ENOENT; 1590c6fd2807SJeff Garzik 1591c6fd2807SJeff Garzik *tag = buf[0] & 0x1f; 1592c6fd2807SJeff Garzik 1593c6fd2807SJeff Garzik tf->command = buf[2]; 1594c6fd2807SJeff Garzik tf->feature = buf[3]; 1595c6fd2807SJeff Garzik tf->lbal = buf[4]; 1596c6fd2807SJeff Garzik tf->lbam = buf[5]; 1597c6fd2807SJeff Garzik tf->lbah = buf[6]; 1598c6fd2807SJeff Garzik tf->device = buf[7]; 1599c6fd2807SJeff Garzik tf->hob_lbal = buf[8]; 1600c6fd2807SJeff Garzik tf->hob_lbam = buf[9]; 1601c6fd2807SJeff Garzik tf->hob_lbah = buf[10]; 1602c6fd2807SJeff Garzik tf->nsect = buf[12]; 1603c6fd2807SJeff Garzik tf->hob_nsect = buf[13]; 16045b01e4b9SHannes Reinecke if (ata_id_has_ncq_autosense(dev->id)) 16055b01e4b9SHannes Reinecke tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16]; 1606c6fd2807SJeff Garzik 1607c6fd2807SJeff Garzik return 0; 1608c6fd2807SJeff Garzik } 1609c6fd2807SJeff Garzik 1610c6fd2807SJeff Garzik /** 161111fc33daSTejun Heo * atapi_eh_tur - perform ATAPI TEST_UNIT_READY 161211fc33daSTejun Heo * @dev: target ATAPI device 161311fc33daSTejun Heo * @r_sense_key: out parameter for sense_key 161411fc33daSTejun Heo * 161511fc33daSTejun Heo * Perform ATAPI TEST_UNIT_READY. 161611fc33daSTejun Heo * 161711fc33daSTejun Heo * LOCKING: 161811fc33daSTejun Heo * EH context (may sleep). 161911fc33daSTejun Heo * 162011fc33daSTejun Heo * RETURNS: 162111fc33daSTejun Heo * 0 on success, AC_ERR_* mask on failure. 162211fc33daSTejun Heo */ 16233dc67440SAaron Lu unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key) 162411fc33daSTejun Heo { 162511fc33daSTejun Heo u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 }; 162611fc33daSTejun Heo struct ata_taskfile tf; 162711fc33daSTejun Heo unsigned int err_mask; 162811fc33daSTejun Heo 162911fc33daSTejun Heo ata_tf_init(dev, &tf); 163011fc33daSTejun Heo 163111fc33daSTejun Heo tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 163211fc33daSTejun Heo tf.command = ATA_CMD_PACKET; 163311fc33daSTejun Heo tf.protocol = ATAPI_PROT_NODATA; 163411fc33daSTejun Heo 163511fc33daSTejun Heo err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0); 163611fc33daSTejun Heo if (err_mask == AC_ERR_DEV) 163711fc33daSTejun Heo *r_sense_key = tf.feature >> 4; 163811fc33daSTejun Heo return err_mask; 163911fc33daSTejun Heo } 164011fc33daSTejun Heo 164111fc33daSTejun Heo /** 1642e87fd28cSHannes Reinecke * ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT 1643e87fd28cSHannes Reinecke * @dev: device to perform REQUEST_SENSE_SENSE_DATA_EXT to 1644e87fd28cSHannes Reinecke * @cmd: scsi command for which the sense code should be set 1645e87fd28cSHannes Reinecke * 1646e87fd28cSHannes Reinecke * Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK 1647e87fd28cSHannes Reinecke * SENSE. This function is an EH helper. 1648e87fd28cSHannes Reinecke * 1649e87fd28cSHannes Reinecke * LOCKING: 1650e87fd28cSHannes Reinecke * Kernel thread context (may sleep). 1651e87fd28cSHannes Reinecke */ 1652e87fd28cSHannes Reinecke static void ata_eh_request_sense(struct ata_queued_cmd *qc, 1653e87fd28cSHannes Reinecke struct scsi_cmnd *cmd) 1654e87fd28cSHannes Reinecke { 1655e87fd28cSHannes Reinecke struct ata_device *dev = qc->dev; 1656e87fd28cSHannes Reinecke struct ata_taskfile tf; 1657e87fd28cSHannes Reinecke unsigned int err_mask; 1658e87fd28cSHannes Reinecke 1659e87fd28cSHannes Reinecke if (qc->ap->pflags & ATA_PFLAG_FROZEN) { 1660e87fd28cSHannes Reinecke ata_dev_warn(dev, "sense data available but port frozen\n"); 1661e87fd28cSHannes Reinecke return; 1662e87fd28cSHannes Reinecke } 1663e87fd28cSHannes Reinecke 1664d238ffd5SHannes Reinecke if (!cmd || qc->flags & ATA_QCFLAG_SENSE_VALID) 1665e87fd28cSHannes Reinecke return; 1666e87fd28cSHannes Reinecke 1667e87fd28cSHannes Reinecke if (!ata_id_sense_reporting_enabled(dev->id)) { 1668e87fd28cSHannes Reinecke ata_dev_warn(qc->dev, "sense data reporting disabled\n"); 1669e87fd28cSHannes Reinecke return; 1670e87fd28cSHannes Reinecke } 1671e87fd28cSHannes Reinecke 1672e87fd28cSHannes Reinecke DPRINTK("ATA request sense\n"); 1673e87fd28cSHannes Reinecke 1674e87fd28cSHannes Reinecke ata_tf_init(dev, &tf); 1675e87fd28cSHannes Reinecke tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1676e87fd28cSHannes Reinecke tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 1677e87fd28cSHannes Reinecke tf.command = ATA_CMD_REQ_SENSE_DATA; 1678e87fd28cSHannes Reinecke tf.protocol = ATA_PROT_NODATA; 1679e87fd28cSHannes Reinecke 1680e87fd28cSHannes Reinecke err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1681e87fd28cSHannes Reinecke /* Ignore err_mask; ATA_ERR might be set */ 1682e87fd28cSHannes Reinecke if (tf.command & ATA_SENSE) { 168306dbde5fSHannes Reinecke ata_scsi_set_sense(dev, cmd, tf.lbah, tf.lbam, tf.lbal); 1684e87fd28cSHannes Reinecke qc->flags |= ATA_QCFLAG_SENSE_VALID; 1685e87fd28cSHannes Reinecke } else { 1686e87fd28cSHannes Reinecke ata_dev_warn(dev, "request sense failed stat %02x emask %x\n", 1687e87fd28cSHannes Reinecke tf.command, err_mask); 1688e87fd28cSHannes Reinecke } 1689e87fd28cSHannes Reinecke } 1690e87fd28cSHannes Reinecke 1691e87fd28cSHannes Reinecke /** 1692c6fd2807SJeff Garzik * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE 1693c6fd2807SJeff Garzik * @dev: device to perform REQUEST_SENSE to 1694c6fd2807SJeff Garzik * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long) 16953eabddb8STejun Heo * @dfl_sense_key: default sense key to use 1696c6fd2807SJeff Garzik * 1697c6fd2807SJeff Garzik * Perform ATAPI REQUEST_SENSE after the device reported CHECK 1698c6fd2807SJeff Garzik * SENSE. This function is EH helper. 1699c6fd2807SJeff Garzik * 1700c6fd2807SJeff Garzik * LOCKING: 1701c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1702c6fd2807SJeff Garzik * 1703c6fd2807SJeff Garzik * RETURNS: 1704c6fd2807SJeff Garzik * 0 on success, AC_ERR_* mask on failure 1705c6fd2807SJeff Garzik */ 17063dc67440SAaron Lu unsigned int atapi_eh_request_sense(struct ata_device *dev, 17073eabddb8STejun Heo u8 *sense_buf, u8 dfl_sense_key) 1708c6fd2807SJeff Garzik { 17093eabddb8STejun Heo u8 cdb[ATAPI_CDB_LEN] = 17103eabddb8STejun Heo { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 }; 17119af5c9c9STejun Heo struct ata_port *ap = dev->link->ap; 1712c6fd2807SJeff Garzik struct ata_taskfile tf; 1713c6fd2807SJeff Garzik 1714c6fd2807SJeff Garzik DPRINTK("ATAPI request sense\n"); 1715c6fd2807SJeff Garzik 1716c6fd2807SJeff Garzik memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); 1717c6fd2807SJeff Garzik 171856287768SAlbert Lee /* initialize sense_buf with the error register, 171956287768SAlbert Lee * for the case where they are -not- overwritten 172056287768SAlbert Lee */ 1721c6fd2807SJeff Garzik sense_buf[0] = 0x70; 17223eabddb8STejun Heo sense_buf[2] = dfl_sense_key; 172356287768SAlbert Lee 172456287768SAlbert Lee /* some devices time out if garbage left in tf */ 172556287768SAlbert Lee ata_tf_init(dev, &tf); 1726c6fd2807SJeff Garzik 1727c6fd2807SJeff Garzik tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1728c6fd2807SJeff Garzik tf.command = ATA_CMD_PACKET; 1729c6fd2807SJeff Garzik 1730c6fd2807SJeff Garzik /* is it pointless to prefer PIO for "safety reasons"? */ 1731c6fd2807SJeff Garzik if (ap->flags & ATA_FLAG_PIO_DMA) { 17320dc36888STejun Heo tf.protocol = ATAPI_PROT_DMA; 1733c6fd2807SJeff Garzik tf.feature |= ATAPI_PKT_DMA; 1734c6fd2807SJeff Garzik } else { 17350dc36888STejun Heo tf.protocol = ATAPI_PROT_PIO; 1736f2dfc1a1STejun Heo tf.lbam = SCSI_SENSE_BUFFERSIZE; 1737f2dfc1a1STejun Heo tf.lbah = 0; 1738c6fd2807SJeff Garzik } 1739c6fd2807SJeff Garzik 1740c6fd2807SJeff Garzik return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE, 17412b789108STejun Heo sense_buf, SCSI_SENSE_BUFFERSIZE, 0); 1742c6fd2807SJeff Garzik } 1743c6fd2807SJeff Garzik 1744c6fd2807SJeff Garzik /** 1745c6fd2807SJeff Garzik * ata_eh_analyze_serror - analyze SError for a failed port 17460260731fSTejun Heo * @link: ATA link to analyze SError for 1747c6fd2807SJeff Garzik * 1748c6fd2807SJeff Garzik * Analyze SError if available and further determine cause of 1749c6fd2807SJeff Garzik * failure. 1750c6fd2807SJeff Garzik * 1751c6fd2807SJeff Garzik * LOCKING: 1752c6fd2807SJeff Garzik * None. 1753c6fd2807SJeff Garzik */ 17540260731fSTejun Heo static void ata_eh_analyze_serror(struct ata_link *link) 1755c6fd2807SJeff Garzik { 17560260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 1757c6fd2807SJeff Garzik u32 serror = ehc->i.serror; 1758c6fd2807SJeff Garzik unsigned int err_mask = 0, action = 0; 1759f9df58cbSTejun Heo u32 hotplug_mask; 1760c6fd2807SJeff Garzik 1761e0614db2STejun Heo if (serror & (SERR_PERSISTENT | SERR_DATA)) { 1762c6fd2807SJeff Garzik err_mask |= AC_ERR_ATA_BUS; 1763cf480626STejun Heo action |= ATA_EH_RESET; 1764c6fd2807SJeff Garzik } 1765c6fd2807SJeff Garzik if (serror & SERR_PROTOCOL) { 1766c6fd2807SJeff Garzik err_mask |= AC_ERR_HSM; 1767cf480626STejun Heo action |= ATA_EH_RESET; 1768c6fd2807SJeff Garzik } 1769c6fd2807SJeff Garzik if (serror & SERR_INTERNAL) { 1770c6fd2807SJeff Garzik err_mask |= AC_ERR_SYSTEM; 1771cf480626STejun Heo action |= ATA_EH_RESET; 1772c6fd2807SJeff Garzik } 1773f9df58cbSTejun Heo 1774f9df58cbSTejun Heo /* Determine whether a hotplug event has occurred. Both 1775f9df58cbSTejun Heo * SError.N/X are considered hotplug events for enabled or 1776f9df58cbSTejun Heo * host links. For disabled PMP links, only N bit is 1777f9df58cbSTejun Heo * considered as X bit is left at 1 for link plugging. 1778f9df58cbSTejun Heo */ 1779eb0e85e3STejun Heo if (link->lpm_policy > ATA_LPM_MAX_POWER) 17806b7ae954STejun Heo hotplug_mask = 0; /* hotplug doesn't work w/ LPM */ 17816b7ae954STejun Heo else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link)) 1782f9df58cbSTejun Heo hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG; 1783f9df58cbSTejun Heo else 1784f9df58cbSTejun Heo hotplug_mask = SERR_PHYRDY_CHG; 1785f9df58cbSTejun Heo 1786f9df58cbSTejun Heo if (serror & hotplug_mask) 1787c6fd2807SJeff Garzik ata_ehi_hotplugged(&ehc->i); 1788c6fd2807SJeff Garzik 1789c6fd2807SJeff Garzik ehc->i.err_mask |= err_mask; 1790c6fd2807SJeff Garzik ehc->i.action |= action; 1791c6fd2807SJeff Garzik } 1792c6fd2807SJeff Garzik 1793c6fd2807SJeff Garzik /** 1794c6fd2807SJeff Garzik * ata_eh_analyze_ncq_error - analyze NCQ error 17950260731fSTejun Heo * @link: ATA link to analyze NCQ error for 1796c6fd2807SJeff Garzik * 1797c6fd2807SJeff Garzik * Read log page 10h, determine the offending qc and acquire 1798c6fd2807SJeff Garzik * error status TF. For NCQ device errors, all LLDDs have to do 1799c6fd2807SJeff Garzik * is setting AC_ERR_DEV in ehi->err_mask. This function takes 1800c6fd2807SJeff Garzik * care of the rest. 1801c6fd2807SJeff Garzik * 1802c6fd2807SJeff Garzik * LOCKING: 1803c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1804c6fd2807SJeff Garzik */ 180510acf3b0SMark Lord void ata_eh_analyze_ncq_error(struct ata_link *link) 1806c6fd2807SJeff Garzik { 18070260731fSTejun Heo struct ata_port *ap = link->ap; 18080260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 18090260731fSTejun Heo struct ata_device *dev = link->device; 1810c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 1811c6fd2807SJeff Garzik struct ata_taskfile tf; 1812c6fd2807SJeff Garzik int tag, rc; 1813c6fd2807SJeff Garzik 1814c6fd2807SJeff Garzik /* if frozen, we can't do much */ 1815c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN) 1816c6fd2807SJeff Garzik return; 1817c6fd2807SJeff Garzik 1818c6fd2807SJeff Garzik /* is it NCQ device error? */ 18190260731fSTejun Heo if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV)) 1820c6fd2807SJeff Garzik return; 1821c6fd2807SJeff Garzik 1822c6fd2807SJeff Garzik /* has LLDD analyzed already? */ 1823c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1824c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, tag); 1825c6fd2807SJeff Garzik 1826c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) 1827c6fd2807SJeff Garzik continue; 1828c6fd2807SJeff Garzik 1829c6fd2807SJeff Garzik if (qc->err_mask) 1830c6fd2807SJeff Garzik return; 1831c6fd2807SJeff Garzik } 1832c6fd2807SJeff Garzik 1833c6fd2807SJeff Garzik /* okay, this error is ours */ 1834a09bf4cdSJeff Garzik memset(&tf, 0, sizeof(tf)); 1835c6fd2807SJeff Garzik rc = ata_eh_read_log_10h(dev, &tag, &tf); 1836c6fd2807SJeff Garzik if (rc) { 1837a9a79dfeSJoe Perches ata_link_err(link, "failed to read log page 10h (errno=%d)\n", 1838a9a79dfeSJoe Perches rc); 1839c6fd2807SJeff Garzik return; 1840c6fd2807SJeff Garzik } 1841c6fd2807SJeff Garzik 18420260731fSTejun Heo if (!(link->sactive & (1 << tag))) { 1843a9a79dfeSJoe Perches ata_link_err(link, "log page 10h reported inactive tag %d\n", 1844a9a79dfeSJoe Perches tag); 1845c6fd2807SJeff Garzik return; 1846c6fd2807SJeff Garzik } 1847c6fd2807SJeff Garzik 1848c6fd2807SJeff Garzik /* we've got the perpetrator, condemn it */ 1849c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, tag); 1850c6fd2807SJeff Garzik memcpy(&qc->result_tf, &tf, sizeof(tf)); 1851a6116c9eSMark Lord qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 18525335b729STejun Heo qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ; 1853d238ffd5SHannes Reinecke if ((qc->result_tf.command & ATA_SENSE) || qc->result_tf.auxiliary) { 18545b01e4b9SHannes Reinecke char sense_key, asc, ascq; 18555b01e4b9SHannes Reinecke 18565b01e4b9SHannes Reinecke sense_key = (qc->result_tf.auxiliary >> 16) & 0xff; 18575b01e4b9SHannes Reinecke asc = (qc->result_tf.auxiliary >> 8) & 0xff; 18585b01e4b9SHannes Reinecke ascq = qc->result_tf.auxiliary & 0xff; 185906dbde5fSHannes Reinecke ata_scsi_set_sense(dev, qc->scsicmd, sense_key, asc, ascq); 1860492bf621SHannes Reinecke ata_scsi_set_sense_information(dev, qc->scsicmd, 1861492bf621SHannes Reinecke &qc->result_tf); 18625b01e4b9SHannes Reinecke qc->flags |= ATA_QCFLAG_SENSE_VALID; 18635b01e4b9SHannes Reinecke } 18645b01e4b9SHannes Reinecke 1865c6fd2807SJeff Garzik ehc->i.err_mask &= ~AC_ERR_DEV; 1866c6fd2807SJeff Garzik } 1867c6fd2807SJeff Garzik 1868c6fd2807SJeff Garzik /** 1869c6fd2807SJeff Garzik * ata_eh_analyze_tf - analyze taskfile of a failed qc 1870c6fd2807SJeff Garzik * @qc: qc to analyze 1871c6fd2807SJeff Garzik * @tf: Taskfile registers to analyze 1872c6fd2807SJeff Garzik * 1873c6fd2807SJeff Garzik * Analyze taskfile of @qc and further determine cause of 1874c6fd2807SJeff Garzik * failure. This function also requests ATAPI sense data if 187525985edcSLucas De Marchi * available. 1876c6fd2807SJeff Garzik * 1877c6fd2807SJeff Garzik * LOCKING: 1878c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1879c6fd2807SJeff Garzik * 1880c6fd2807SJeff Garzik * RETURNS: 1881c6fd2807SJeff Garzik * Determined recovery action 1882c6fd2807SJeff Garzik */ 1883c6fd2807SJeff Garzik static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, 1884c6fd2807SJeff Garzik const struct ata_taskfile *tf) 1885c6fd2807SJeff Garzik { 1886c6fd2807SJeff Garzik unsigned int tmp, action = 0; 1887c6fd2807SJeff Garzik u8 stat = tf->command, err = tf->feature; 1888c6fd2807SJeff Garzik 1889c6fd2807SJeff Garzik if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) { 1890c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_HSM; 1891cf480626STejun Heo return ATA_EH_RESET; 1892c6fd2807SJeff Garzik } 1893c6fd2807SJeff Garzik 1894e87fd28cSHannes Reinecke if (stat & (ATA_ERR | ATA_DF)) { 1895a51d644aSTejun Heo qc->err_mask |= AC_ERR_DEV; 1896e87fd28cSHannes Reinecke /* 1897e87fd28cSHannes Reinecke * Sense data reporting does not work if the 1898e87fd28cSHannes Reinecke * device fault bit is set. 1899e87fd28cSHannes Reinecke */ 1900e87fd28cSHannes Reinecke if (stat & ATA_DF) 1901e87fd28cSHannes Reinecke stat &= ~ATA_SENSE; 1902e87fd28cSHannes Reinecke } else { 1903c6fd2807SJeff Garzik return 0; 1904e87fd28cSHannes Reinecke } 1905c6fd2807SJeff Garzik 1906c6fd2807SJeff Garzik switch (qc->dev->class) { 1907c6fd2807SJeff Garzik case ATA_DEV_ATA: 19089162c657SHannes Reinecke case ATA_DEV_ZAC: 1909e87fd28cSHannes Reinecke if (stat & ATA_SENSE) 1910e87fd28cSHannes Reinecke ata_eh_request_sense(qc, qc->scsicmd); 1911c6fd2807SJeff Garzik if (err & ATA_ICRC) 1912c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_ATA_BUS; 1913eec7e1c1SAlexey Asemov if (err & (ATA_UNC | ATA_AMNF)) 1914c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_MEDIA; 1915c6fd2807SJeff Garzik if (err & ATA_IDNF) 1916c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_INVALID; 1917c6fd2807SJeff Garzik break; 1918c6fd2807SJeff Garzik 1919c6fd2807SJeff Garzik case ATA_DEV_ATAPI: 1920a569a30dSTejun Heo if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) { 19213eabddb8STejun Heo tmp = atapi_eh_request_sense(qc->dev, 19223eabddb8STejun Heo qc->scsicmd->sense_buffer, 19233eabddb8STejun Heo qc->result_tf.feature >> 4); 19243852e373SHannes Reinecke if (!tmp) 1925c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_SENSE_VALID; 19263852e373SHannes Reinecke else 1927c6fd2807SJeff Garzik qc->err_mask |= tmp; 1928c6fd2807SJeff Garzik } 1929a569a30dSTejun Heo } 1930c6fd2807SJeff Garzik 19313852e373SHannes Reinecke if (qc->flags & ATA_QCFLAG_SENSE_VALID) { 19323852e373SHannes Reinecke int ret = scsi_check_sense(qc->scsicmd); 19333852e373SHannes Reinecke /* 19343852e373SHannes Reinecke * SUCCESS here means that the sense code could 19353852e373SHannes Reinecke * evaluated and should be passed to the upper layers 19363852e373SHannes Reinecke * for correct evaluation. 19373852e373SHannes Reinecke * FAILED means the sense code could not interpreted 19383852e373SHannes Reinecke * and the device would need to be reset. 19393852e373SHannes Reinecke * NEEDS_RETRY and ADD_TO_MLQUEUE means that the 19403852e373SHannes Reinecke * command would need to be retried. 19413852e373SHannes Reinecke */ 19423852e373SHannes Reinecke if (ret == NEEDS_RETRY || ret == ADD_TO_MLQUEUE) { 19433852e373SHannes Reinecke qc->flags |= ATA_QCFLAG_RETRY; 19443852e373SHannes Reinecke qc->err_mask |= AC_ERR_OTHER; 19453852e373SHannes Reinecke } else if (ret != SUCCESS) { 19463852e373SHannes Reinecke qc->err_mask |= AC_ERR_HSM; 19473852e373SHannes Reinecke } 19483852e373SHannes Reinecke } 1949c6fd2807SJeff Garzik if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS)) 1950cf480626STejun Heo action |= ATA_EH_RESET; 1951c6fd2807SJeff Garzik 1952c6fd2807SJeff Garzik return action; 1953c6fd2807SJeff Garzik } 1954c6fd2807SJeff Garzik 195576326ac1STejun Heo static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask, 195676326ac1STejun Heo int *xfer_ok) 1957c6fd2807SJeff Garzik { 195876326ac1STejun Heo int base = 0; 195976326ac1STejun Heo 196076326ac1STejun Heo if (!(eflags & ATA_EFLAG_DUBIOUS_XFER)) 196176326ac1STejun Heo *xfer_ok = 1; 196276326ac1STejun Heo 196376326ac1STejun Heo if (!*xfer_ok) 196475f9cafcSTejun Heo base = ATA_ECAT_DUBIOUS_NONE; 196576326ac1STejun Heo 19667d47e8d4STejun Heo if (err_mask & AC_ERR_ATA_BUS) 196776326ac1STejun Heo return base + ATA_ECAT_ATA_BUS; 1968c6fd2807SJeff Garzik 19697d47e8d4STejun Heo if (err_mask & AC_ERR_TIMEOUT) 197076326ac1STejun Heo return base + ATA_ECAT_TOUT_HSM; 19717d47e8d4STejun Heo 19723884f7b0STejun Heo if (eflags & ATA_EFLAG_IS_IO) { 19737d47e8d4STejun Heo if (err_mask & AC_ERR_HSM) 197476326ac1STejun Heo return base + ATA_ECAT_TOUT_HSM; 19757d47e8d4STejun Heo if ((err_mask & 19767d47e8d4STejun Heo (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV) 197776326ac1STejun Heo return base + ATA_ECAT_UNK_DEV; 1978c6fd2807SJeff Garzik } 1979c6fd2807SJeff Garzik 1980c6fd2807SJeff Garzik return 0; 1981c6fd2807SJeff Garzik } 1982c6fd2807SJeff Garzik 19837d47e8d4STejun Heo struct speed_down_verdict_arg { 1984c6fd2807SJeff Garzik u64 since; 198576326ac1STejun Heo int xfer_ok; 19863884f7b0STejun Heo int nr_errors[ATA_ECAT_NR]; 1987c6fd2807SJeff Garzik }; 1988c6fd2807SJeff Garzik 19897d47e8d4STejun Heo static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg) 1990c6fd2807SJeff Garzik { 19917d47e8d4STejun Heo struct speed_down_verdict_arg *arg = void_arg; 199276326ac1STejun Heo int cat; 1993c6fd2807SJeff Garzik 1994d9027470SGwendal Grignou if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since)) 1995c6fd2807SJeff Garzik return -1; 1996c6fd2807SJeff Garzik 199776326ac1STejun Heo cat = ata_eh_categorize_error(ent->eflags, ent->err_mask, 199876326ac1STejun Heo &arg->xfer_ok); 19997d47e8d4STejun Heo arg->nr_errors[cat]++; 200076326ac1STejun Heo 2001c6fd2807SJeff Garzik return 0; 2002c6fd2807SJeff Garzik } 2003c6fd2807SJeff Garzik 2004c6fd2807SJeff Garzik /** 20057d47e8d4STejun Heo * ata_eh_speed_down_verdict - Determine speed down verdict 2006c6fd2807SJeff Garzik * @dev: Device of interest 2007c6fd2807SJeff Garzik * 2008c6fd2807SJeff Garzik * This function examines error ring of @dev and determines 20097d47e8d4STejun Heo * whether NCQ needs to be turned off, transfer speed should be 20107d47e8d4STejun Heo * stepped down, or falling back to PIO is necessary. 2011c6fd2807SJeff Garzik * 20123884f7b0STejun Heo * ECAT_ATA_BUS : ATA_BUS error for any command 2013c6fd2807SJeff Garzik * 20143884f7b0STejun Heo * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for 20153884f7b0STejun Heo * IO commands 20167d47e8d4STejun Heo * 20173884f7b0STejun Heo * ECAT_UNK_DEV : Unknown DEV error for IO commands 2018c6fd2807SJeff Garzik * 201976326ac1STejun Heo * ECAT_DUBIOUS_* : Identical to above three but occurred while 202076326ac1STejun Heo * data transfer hasn't been verified. 202176326ac1STejun Heo * 20223884f7b0STejun Heo * Verdicts are 20237d47e8d4STejun Heo * 20243884f7b0STejun Heo * NCQ_OFF : Turn off NCQ. 20257d47e8d4STejun Heo * 20263884f7b0STejun Heo * SPEED_DOWN : Speed down transfer speed but don't fall back 20273884f7b0STejun Heo * to PIO. 20283884f7b0STejun Heo * 20293884f7b0STejun Heo * FALLBACK_TO_PIO : Fall back to PIO. 20303884f7b0STejun Heo * 20313884f7b0STejun Heo * Even if multiple verdicts are returned, only one action is 203276326ac1STejun Heo * taken per error. An action triggered by non-DUBIOUS errors 203376326ac1STejun Heo * clears ering, while one triggered by DUBIOUS_* errors doesn't. 203476326ac1STejun Heo * This is to expedite speed down decisions right after device is 203576326ac1STejun Heo * initially configured. 20363884f7b0STejun Heo * 2037*4091fb95SMasahiro Yamada * The following are speed down rules. #1 and #2 deal with 203876326ac1STejun Heo * DUBIOUS errors. 203976326ac1STejun Heo * 204076326ac1STejun Heo * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors 204176326ac1STejun Heo * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO. 204276326ac1STejun Heo * 204376326ac1STejun Heo * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors 204476326ac1STejun Heo * occurred during last 5 mins, NCQ_OFF. 204576326ac1STejun Heo * 204676326ac1STejun Heo * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors 204725985edcSLucas De Marchi * occurred during last 5 mins, FALLBACK_TO_PIO 20483884f7b0STejun Heo * 204976326ac1STejun Heo * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred 20503884f7b0STejun Heo * during last 10 mins, NCQ_OFF. 20513884f7b0STejun Heo * 205276326ac1STejun Heo * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6 20533884f7b0STejun Heo * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN. 20547d47e8d4STejun Heo * 2055c6fd2807SJeff Garzik * LOCKING: 2056c6fd2807SJeff Garzik * Inherited from caller. 2057c6fd2807SJeff Garzik * 2058c6fd2807SJeff Garzik * RETURNS: 20597d47e8d4STejun Heo * OR of ATA_EH_SPDN_* flags. 2060c6fd2807SJeff Garzik */ 20617d47e8d4STejun Heo static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev) 2062c6fd2807SJeff Garzik { 20637d47e8d4STejun Heo const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ; 20647d47e8d4STejun Heo u64 j64 = get_jiffies_64(); 20657d47e8d4STejun Heo struct speed_down_verdict_arg arg; 20667d47e8d4STejun Heo unsigned int verdict = 0; 2067c6fd2807SJeff Garzik 20683884f7b0STejun Heo /* scan past 5 mins of error history */ 20693884f7b0STejun Heo memset(&arg, 0, sizeof(arg)); 20703884f7b0STejun Heo arg.since = j64 - min(j64, j5mins); 20713884f7b0STejun Heo ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); 20723884f7b0STejun Heo 207376326ac1STejun Heo if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] + 207476326ac1STejun Heo arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1) 207576326ac1STejun Heo verdict |= ATA_EH_SPDN_SPEED_DOWN | 207676326ac1STejun Heo ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS; 207776326ac1STejun Heo 207876326ac1STejun Heo if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] + 207976326ac1STejun Heo arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1) 208076326ac1STejun Heo verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS; 208176326ac1STejun Heo 20823884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_ATA_BUS] + 20833884f7b0STejun Heo arg.nr_errors[ATA_ECAT_TOUT_HSM] + 2084663f99b8STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) 20853884f7b0STejun Heo verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO; 20863884f7b0STejun Heo 20877d47e8d4STejun Heo /* scan past 10 mins of error history */ 2088c6fd2807SJeff Garzik memset(&arg, 0, sizeof(arg)); 20897d47e8d4STejun Heo arg.since = j64 - min(j64, j10mins); 20907d47e8d4STejun Heo ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); 2091c6fd2807SJeff Garzik 20923884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_TOUT_HSM] + 20933884f7b0STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 3) 20947d47e8d4STejun Heo verdict |= ATA_EH_SPDN_NCQ_OFF; 20953884f7b0STejun Heo 20963884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_ATA_BUS] + 20973884f7b0STejun Heo arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 || 2098663f99b8STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) 20997d47e8d4STejun Heo verdict |= ATA_EH_SPDN_SPEED_DOWN; 2100c6fd2807SJeff Garzik 21017d47e8d4STejun Heo return verdict; 2102c6fd2807SJeff Garzik } 2103c6fd2807SJeff Garzik 2104c6fd2807SJeff Garzik /** 2105c6fd2807SJeff Garzik * ata_eh_speed_down - record error and speed down if necessary 2106c6fd2807SJeff Garzik * @dev: Failed device 21073884f7b0STejun Heo * @eflags: mask of ATA_EFLAG_* flags 2108c6fd2807SJeff Garzik * @err_mask: err_mask of the error 2109c6fd2807SJeff Garzik * 2110c6fd2807SJeff Garzik * Record error and examine error history to determine whether 2111c6fd2807SJeff Garzik * adjusting transmission speed is necessary. It also sets 2112c6fd2807SJeff Garzik * transmission limits appropriately if such adjustment is 2113c6fd2807SJeff Garzik * necessary. 2114c6fd2807SJeff Garzik * 2115c6fd2807SJeff Garzik * LOCKING: 2116c6fd2807SJeff Garzik * Kernel thread context (may sleep). 2117c6fd2807SJeff Garzik * 2118c6fd2807SJeff Garzik * RETURNS: 21197d47e8d4STejun Heo * Determined recovery action. 2120c6fd2807SJeff Garzik */ 21213884f7b0STejun Heo static unsigned int ata_eh_speed_down(struct ata_device *dev, 21223884f7b0STejun Heo unsigned int eflags, unsigned int err_mask) 2123c6fd2807SJeff Garzik { 2124b1c72916STejun Heo struct ata_link *link = ata_dev_phys_link(dev); 212576326ac1STejun Heo int xfer_ok = 0; 21267d47e8d4STejun Heo unsigned int verdict; 21277d47e8d4STejun Heo unsigned int action = 0; 21287d47e8d4STejun Heo 21297d47e8d4STejun Heo /* don't bother if Cat-0 error */ 213076326ac1STejun Heo if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0) 2131c6fd2807SJeff Garzik return 0; 2132c6fd2807SJeff Garzik 2133c6fd2807SJeff Garzik /* record error and determine whether speed down is necessary */ 21343884f7b0STejun Heo ata_ering_record(&dev->ering, eflags, err_mask); 21357d47e8d4STejun Heo verdict = ata_eh_speed_down_verdict(dev); 2136c6fd2807SJeff Garzik 21377d47e8d4STejun Heo /* turn off NCQ? */ 21387d47e8d4STejun Heo if ((verdict & ATA_EH_SPDN_NCQ_OFF) && 21397d47e8d4STejun Heo (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ | 21407d47e8d4STejun Heo ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) { 21417d47e8d4STejun Heo dev->flags |= ATA_DFLAG_NCQ_OFF; 2142a9a79dfeSJoe Perches ata_dev_warn(dev, "NCQ disabled due to excessive errors\n"); 21437d47e8d4STejun Heo goto done; 21447d47e8d4STejun Heo } 2145c6fd2807SJeff Garzik 21467d47e8d4STejun Heo /* speed down? */ 21477d47e8d4STejun Heo if (verdict & ATA_EH_SPDN_SPEED_DOWN) { 2148c6fd2807SJeff Garzik /* speed down SATA link speed if possible */ 2149a07d499bSTejun Heo if (sata_down_spd_limit(link, 0) == 0) { 2150cf480626STejun Heo action |= ATA_EH_RESET; 21517d47e8d4STejun Heo goto done; 21527d47e8d4STejun Heo } 2153c6fd2807SJeff Garzik 2154c6fd2807SJeff Garzik /* lower transfer mode */ 21557d47e8d4STejun Heo if (dev->spdn_cnt < 2) { 21567d47e8d4STejun Heo static const int dma_dnxfer_sel[] = 21577d47e8d4STejun Heo { ATA_DNXFER_DMA, ATA_DNXFER_40C }; 21587d47e8d4STejun Heo static const int pio_dnxfer_sel[] = 21597d47e8d4STejun Heo { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 }; 21607d47e8d4STejun Heo int sel; 2161c6fd2807SJeff Garzik 21627d47e8d4STejun Heo if (dev->xfer_shift != ATA_SHIFT_PIO) 21637d47e8d4STejun Heo sel = dma_dnxfer_sel[dev->spdn_cnt]; 21647d47e8d4STejun Heo else 21657d47e8d4STejun Heo sel = pio_dnxfer_sel[dev->spdn_cnt]; 21667d47e8d4STejun Heo 21677d47e8d4STejun Heo dev->spdn_cnt++; 21687d47e8d4STejun Heo 21697d47e8d4STejun Heo if (ata_down_xfermask_limit(dev, sel) == 0) { 2170cf480626STejun Heo action |= ATA_EH_RESET; 21717d47e8d4STejun Heo goto done; 21727d47e8d4STejun Heo } 21737d47e8d4STejun Heo } 21747d47e8d4STejun Heo } 21757d47e8d4STejun Heo 21767d47e8d4STejun Heo /* Fall back to PIO? Slowing down to PIO is meaningless for 2177663f99b8STejun Heo * SATA ATA devices. Consider it only for PATA and SATAPI. 21787d47e8d4STejun Heo */ 21797d47e8d4STejun Heo if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) && 2180663f99b8STejun Heo (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) && 21817d47e8d4STejun Heo (dev->xfer_shift != ATA_SHIFT_PIO)) { 21827d47e8d4STejun Heo if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) { 21837d47e8d4STejun Heo dev->spdn_cnt = 0; 2184cf480626STejun Heo action |= ATA_EH_RESET; 21857d47e8d4STejun Heo goto done; 21867d47e8d4STejun Heo } 21877d47e8d4STejun Heo } 21887d47e8d4STejun Heo 2189c6fd2807SJeff Garzik return 0; 21907d47e8d4STejun Heo done: 21917d47e8d4STejun Heo /* device has been slowed down, blow error history */ 219276326ac1STejun Heo if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS)) 21937d47e8d4STejun Heo ata_ering_clear(&dev->ering); 21947d47e8d4STejun Heo return action; 2195c6fd2807SJeff Garzik } 2196c6fd2807SJeff Garzik 2197c6fd2807SJeff Garzik /** 21988d899e70SMark Lord * ata_eh_worth_retry - analyze error and decide whether to retry 21998d899e70SMark Lord * @qc: qc to possibly retry 22008d899e70SMark Lord * 22018d899e70SMark Lord * Look at the cause of the error and decide if a retry 22028d899e70SMark Lord * might be useful or not. We don't want to retry media errors 22038d899e70SMark Lord * because the drive itself has probably already taken 10-30 seconds 22048d899e70SMark Lord * doing its own internal retries before reporting the failure. 22058d899e70SMark Lord */ 22068d899e70SMark Lord static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc) 22078d899e70SMark Lord { 22081eaca39aSBian Yu if (qc->err_mask & AC_ERR_MEDIA) 22098d899e70SMark Lord return 0; /* don't retry media errors */ 22108d899e70SMark Lord if (qc->flags & ATA_QCFLAG_IO) 22118d899e70SMark Lord return 1; /* otherwise retry anything from fs stack */ 22128d899e70SMark Lord if (qc->err_mask & AC_ERR_INVALID) 22138d899e70SMark Lord return 0; /* don't retry these */ 22148d899e70SMark Lord return qc->err_mask != AC_ERR_DEV; /* retry if not dev error */ 22158d899e70SMark Lord } 22168d899e70SMark Lord 22178d899e70SMark Lord /** 22189b1e2658STejun Heo * ata_eh_link_autopsy - analyze error and determine recovery action 22199b1e2658STejun Heo * @link: host link to perform autopsy on 2220c6fd2807SJeff Garzik * 22210260731fSTejun Heo * Analyze why @link failed and determine which recovery actions 22220260731fSTejun Heo * are needed. This function also sets more detailed AC_ERR_* 22230260731fSTejun Heo * values and fills sense data for ATAPI CHECK SENSE. 2224c6fd2807SJeff Garzik * 2225c6fd2807SJeff Garzik * LOCKING: 2226c6fd2807SJeff Garzik * Kernel thread context (may sleep). 2227c6fd2807SJeff Garzik */ 22289b1e2658STejun Heo static void ata_eh_link_autopsy(struct ata_link *link) 2229c6fd2807SJeff Garzik { 22300260731fSTejun Heo struct ata_port *ap = link->ap; 2231936fd732STejun Heo struct ata_eh_context *ehc = &link->eh_context; 2232dfcc173dSTejun Heo struct ata_device *dev; 22333884f7b0STejun Heo unsigned int all_err_mask = 0, eflags = 0; 22343884f7b0STejun Heo int tag; 2235c6fd2807SJeff Garzik u32 serror; 2236c6fd2807SJeff Garzik int rc; 2237c6fd2807SJeff Garzik 2238c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 2239c6fd2807SJeff Garzik 2240c6fd2807SJeff Garzik if (ehc->i.flags & ATA_EHI_NO_AUTOPSY) 2241c6fd2807SJeff Garzik return; 2242c6fd2807SJeff Garzik 2243c6fd2807SJeff Garzik /* obtain and analyze SError */ 2244936fd732STejun Heo rc = sata_scr_read(link, SCR_ERROR, &serror); 2245c6fd2807SJeff Garzik if (rc == 0) { 2246c6fd2807SJeff Garzik ehc->i.serror |= serror; 22470260731fSTejun Heo ata_eh_analyze_serror(link); 22484e57c517STejun Heo } else if (rc != -EOPNOTSUPP) { 2249cf480626STejun Heo /* SError read failed, force reset and probing */ 2250b558edddSTejun Heo ehc->i.probe_mask |= ATA_ALL_DEVICES; 2251cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 22524e57c517STejun Heo ehc->i.err_mask |= AC_ERR_OTHER; 22534e57c517STejun Heo } 2254c6fd2807SJeff Garzik 2255c6fd2807SJeff Garzik /* analyze NCQ failure */ 22560260731fSTejun Heo ata_eh_analyze_ncq_error(link); 2257c6fd2807SJeff Garzik 2258c6fd2807SJeff Garzik /* any real error trumps AC_ERR_OTHER */ 2259c6fd2807SJeff Garzik if (ehc->i.err_mask & ~AC_ERR_OTHER) 2260c6fd2807SJeff Garzik ehc->i.err_mask &= ~AC_ERR_OTHER; 2261c6fd2807SJeff Garzik 2262c6fd2807SJeff Garzik all_err_mask |= ehc->i.err_mask; 2263c6fd2807SJeff Garzik 2264c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2265c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 2266c6fd2807SJeff Garzik 2267b1c72916STejun Heo if (!(qc->flags & ATA_QCFLAG_FAILED) || 2268b1c72916STejun Heo ata_dev_phys_link(qc->dev) != link) 2269c6fd2807SJeff Garzik continue; 2270c6fd2807SJeff Garzik 2271c6fd2807SJeff Garzik /* inherit upper level err_mask */ 2272c6fd2807SJeff Garzik qc->err_mask |= ehc->i.err_mask; 2273c6fd2807SJeff Garzik 2274c6fd2807SJeff Garzik /* analyze TF */ 2275c6fd2807SJeff Garzik ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf); 2276c6fd2807SJeff Garzik 2277c6fd2807SJeff Garzik /* DEV errors are probably spurious in case of ATA_BUS error */ 2278c6fd2807SJeff Garzik if (qc->err_mask & AC_ERR_ATA_BUS) 2279c6fd2807SJeff Garzik qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA | 2280c6fd2807SJeff Garzik AC_ERR_INVALID); 2281c6fd2807SJeff Garzik 2282c6fd2807SJeff Garzik /* any real error trumps unknown error */ 2283c6fd2807SJeff Garzik if (qc->err_mask & ~AC_ERR_OTHER) 2284c6fd2807SJeff Garzik qc->err_mask &= ~AC_ERR_OTHER; 2285c6fd2807SJeff Garzik 2286c6fd2807SJeff Garzik /* SENSE_VALID trumps dev/unknown error and revalidation */ 2287f90f0828STejun Heo if (qc->flags & ATA_QCFLAG_SENSE_VALID) 2288c6fd2807SJeff Garzik qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); 2289c6fd2807SJeff Garzik 229003faab78STejun Heo /* determine whether the command is worth retrying */ 22918d899e70SMark Lord if (ata_eh_worth_retry(qc)) 229203faab78STejun Heo qc->flags |= ATA_QCFLAG_RETRY; 229303faab78STejun Heo 2294c6fd2807SJeff Garzik /* accumulate error info */ 2295c6fd2807SJeff Garzik ehc->i.dev = qc->dev; 2296c6fd2807SJeff Garzik all_err_mask |= qc->err_mask; 2297c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_IO) 22983884f7b0STejun Heo eflags |= ATA_EFLAG_IS_IO; 2299255c03d1SHannes Reinecke trace_ata_eh_link_autopsy_qc(qc); 2300c6fd2807SJeff Garzik } 2301c6fd2807SJeff Garzik 2302c6fd2807SJeff Garzik /* enforce default EH actions */ 2303c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN || 2304c6fd2807SJeff Garzik all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT)) 2305cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 23063884f7b0STejun Heo else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) || 23073884f7b0STejun Heo (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV))) 2308c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_REVALIDATE; 2309c6fd2807SJeff Garzik 2310dfcc173dSTejun Heo /* If we have offending qcs and the associated failed device, 2311dfcc173dSTejun Heo * perform per-dev EH action only on the offending device. 2312dfcc173dSTejun Heo */ 2313c6fd2807SJeff Garzik if (ehc->i.dev) { 2314c6fd2807SJeff Garzik ehc->i.dev_action[ehc->i.dev->devno] |= 2315c6fd2807SJeff Garzik ehc->i.action & ATA_EH_PERDEV_MASK; 2316c6fd2807SJeff Garzik ehc->i.action &= ~ATA_EH_PERDEV_MASK; 2317c6fd2807SJeff Garzik } 2318c6fd2807SJeff Garzik 23192695e366STejun Heo /* propagate timeout to host link */ 23202695e366STejun Heo if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link)) 23212695e366STejun Heo ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT; 23222695e366STejun Heo 23232695e366STejun Heo /* record error and consider speeding down */ 2324dfcc173dSTejun Heo dev = ehc->i.dev; 23252695e366STejun Heo if (!dev && ((ata_link_max_devices(link) == 1 && 23262695e366STejun Heo ata_dev_enabled(link->device)))) 2327dfcc173dSTejun Heo dev = link->device; 2328dfcc173dSTejun Heo 232976326ac1STejun Heo if (dev) { 233076326ac1STejun Heo if (dev->flags & ATA_DFLAG_DUBIOUS_XFER) 233176326ac1STejun Heo eflags |= ATA_EFLAG_DUBIOUS_XFER; 23323884f7b0STejun Heo ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask); 233376326ac1STejun Heo } 2334255c03d1SHannes Reinecke trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask); 2335c6fd2807SJeff Garzik DPRINTK("EXIT\n"); 2336c6fd2807SJeff Garzik } 2337c6fd2807SJeff Garzik 2338c6fd2807SJeff Garzik /** 23399b1e2658STejun Heo * ata_eh_autopsy - analyze error and determine recovery action 23409b1e2658STejun Heo * @ap: host port to perform autopsy on 23419b1e2658STejun Heo * 23429b1e2658STejun Heo * Analyze all links of @ap and determine why they failed and 23439b1e2658STejun Heo * which recovery actions are needed. 23449b1e2658STejun Heo * 23459b1e2658STejun Heo * LOCKING: 23469b1e2658STejun Heo * Kernel thread context (may sleep). 23479b1e2658STejun Heo */ 2348fb7fd614STejun Heo void ata_eh_autopsy(struct ata_port *ap) 23499b1e2658STejun Heo { 23509b1e2658STejun Heo struct ata_link *link; 23519b1e2658STejun Heo 23521eca4365STejun Heo ata_for_each_link(link, ap, EDGE) 23539b1e2658STejun Heo ata_eh_link_autopsy(link); 23542695e366STejun Heo 2355b1c72916STejun Heo /* Handle the frigging slave link. Autopsy is done similarly 2356b1c72916STejun Heo * but actions and flags are transferred over to the master 2357b1c72916STejun Heo * link and handled from there. 2358b1c72916STejun Heo */ 2359b1c72916STejun Heo if (ap->slave_link) { 2360b1c72916STejun Heo struct ata_eh_context *mehc = &ap->link.eh_context; 2361b1c72916STejun Heo struct ata_eh_context *sehc = &ap->slave_link->eh_context; 2362b1c72916STejun Heo 2363848e4c68STejun Heo /* transfer control flags from master to slave */ 2364848e4c68STejun Heo sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK; 2365848e4c68STejun Heo 2366848e4c68STejun Heo /* perform autopsy on the slave link */ 2367b1c72916STejun Heo ata_eh_link_autopsy(ap->slave_link); 2368b1c72916STejun Heo 2369848e4c68STejun Heo /* transfer actions from slave to master and clear slave */ 2370b1c72916STejun Heo ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); 2371b1c72916STejun Heo mehc->i.action |= sehc->i.action; 2372b1c72916STejun Heo mehc->i.dev_action[1] |= sehc->i.dev_action[1]; 2373b1c72916STejun Heo mehc->i.flags |= sehc->i.flags; 2374b1c72916STejun Heo ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); 2375b1c72916STejun Heo } 2376b1c72916STejun Heo 23772695e366STejun Heo /* Autopsy of fanout ports can affect host link autopsy. 23782695e366STejun Heo * Perform host link autopsy last. 23792695e366STejun Heo */ 2380071f44b1STejun Heo if (sata_pmp_attached(ap)) 23812695e366STejun Heo ata_eh_link_autopsy(&ap->link); 23829b1e2658STejun Heo } 23839b1e2658STejun Heo 23849b1e2658STejun Heo /** 23856521148cSRobert Hancock * ata_get_cmd_descript - get description for ATA command 23866521148cSRobert Hancock * @command: ATA command code to get description for 23876521148cSRobert Hancock * 23886521148cSRobert Hancock * Return a textual description of the given command, or NULL if the 23896521148cSRobert Hancock * command is not known. 23906521148cSRobert Hancock * 23916521148cSRobert Hancock * LOCKING: 23926521148cSRobert Hancock * None 23936521148cSRobert Hancock */ 23946521148cSRobert Hancock const char *ata_get_cmd_descript(u8 command) 23956521148cSRobert Hancock { 23966521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR 23976521148cSRobert Hancock static const struct 23986521148cSRobert Hancock { 23996521148cSRobert Hancock u8 command; 24006521148cSRobert Hancock const char *text; 24016521148cSRobert Hancock } cmd_descr[] = { 24026521148cSRobert Hancock { ATA_CMD_DEV_RESET, "DEVICE RESET" }, 24036521148cSRobert Hancock { ATA_CMD_CHK_POWER, "CHECK POWER MODE" }, 24046521148cSRobert Hancock { ATA_CMD_STANDBY, "STANDBY" }, 24056521148cSRobert Hancock { ATA_CMD_IDLE, "IDLE" }, 24066521148cSRobert Hancock { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" }, 24076521148cSRobert Hancock { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" }, 24083915c3b5SRobert Hancock { ATA_CMD_DOWNLOAD_MICRO_DMA, "DOWNLOAD MICROCODE DMA" }, 24096521148cSRobert Hancock { ATA_CMD_NOP, "NOP" }, 24106521148cSRobert Hancock { ATA_CMD_FLUSH, "FLUSH CACHE" }, 24116521148cSRobert Hancock { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" }, 24126521148cSRobert Hancock { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" }, 24136521148cSRobert Hancock { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" }, 24146521148cSRobert Hancock { ATA_CMD_SERVICE, "SERVICE" }, 24156521148cSRobert Hancock { ATA_CMD_READ, "READ DMA" }, 24166521148cSRobert Hancock { ATA_CMD_READ_EXT, "READ DMA EXT" }, 24176521148cSRobert Hancock { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" }, 24186521148cSRobert Hancock { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" }, 24196521148cSRobert Hancock { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" }, 24206521148cSRobert Hancock { ATA_CMD_WRITE, "WRITE DMA" }, 24216521148cSRobert Hancock { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" }, 24226521148cSRobert Hancock { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" }, 24236521148cSRobert Hancock { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" }, 24246521148cSRobert Hancock { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" }, 24256521148cSRobert Hancock { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" }, 24266521148cSRobert Hancock { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" }, 24276521148cSRobert Hancock { ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" }, 24286521148cSRobert Hancock { ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" }, 24293915c3b5SRobert Hancock { ATA_CMD_FPDMA_SEND, "SEND FPDMA QUEUED" }, 24303915c3b5SRobert Hancock { ATA_CMD_FPDMA_RECV, "RECEIVE FPDMA QUEUED" }, 24316521148cSRobert Hancock { ATA_CMD_PIO_READ, "READ SECTOR(S)" }, 24326521148cSRobert Hancock { ATA_CMD_PIO_READ_EXT, "READ SECTOR(S) EXT" }, 24336521148cSRobert Hancock { ATA_CMD_PIO_WRITE, "WRITE SECTOR(S)" }, 24346521148cSRobert Hancock { ATA_CMD_PIO_WRITE_EXT, "WRITE SECTOR(S) EXT" }, 24356521148cSRobert Hancock { ATA_CMD_READ_MULTI, "READ MULTIPLE" }, 24366521148cSRobert Hancock { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" }, 24376521148cSRobert Hancock { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" }, 24386521148cSRobert Hancock { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" }, 24396521148cSRobert Hancock { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" }, 24406521148cSRobert Hancock { ATA_CMD_SET_FEATURES, "SET FEATURES" }, 24416521148cSRobert Hancock { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" }, 24426521148cSRobert Hancock { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" }, 24436521148cSRobert Hancock { ATA_CMD_VERIFY_EXT, "READ VERIFY SECTOR(S) EXT" }, 24446521148cSRobert Hancock { ATA_CMD_WRITE_UNCORR_EXT, "WRITE UNCORRECTABLE EXT" }, 24456521148cSRobert Hancock { ATA_CMD_STANDBYNOW1, "STANDBY IMMEDIATE" }, 24466521148cSRobert Hancock { ATA_CMD_IDLEIMMEDIATE, "IDLE IMMEDIATE" }, 24476521148cSRobert Hancock { ATA_CMD_SLEEP, "SLEEP" }, 24486521148cSRobert Hancock { ATA_CMD_INIT_DEV_PARAMS, "INITIALIZE DEVICE PARAMETERS" }, 24496521148cSRobert Hancock { ATA_CMD_READ_NATIVE_MAX, "READ NATIVE MAX ADDRESS" }, 24506521148cSRobert Hancock { ATA_CMD_READ_NATIVE_MAX_EXT, "READ NATIVE MAX ADDRESS EXT" }, 24516521148cSRobert Hancock { ATA_CMD_SET_MAX, "SET MAX ADDRESS" }, 24526521148cSRobert Hancock { ATA_CMD_SET_MAX_EXT, "SET MAX ADDRESS EXT" }, 24536521148cSRobert Hancock { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" }, 24546521148cSRobert Hancock { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" }, 24556521148cSRobert Hancock { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" }, 24566521148cSRobert Hancock { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" }, 24573915c3b5SRobert Hancock { ATA_CMD_TRUSTED_NONDATA, "TRUSTED NON-DATA" }, 24586521148cSRobert Hancock { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" }, 24596521148cSRobert Hancock { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" }, 24606521148cSRobert Hancock { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" }, 24616521148cSRobert Hancock { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" }, 24626521148cSRobert Hancock { ATA_CMD_PMP_READ, "READ BUFFER" }, 24633915c3b5SRobert Hancock { ATA_CMD_PMP_READ_DMA, "READ BUFFER DMA" }, 24646521148cSRobert Hancock { ATA_CMD_PMP_WRITE, "WRITE BUFFER" }, 24653915c3b5SRobert Hancock { ATA_CMD_PMP_WRITE_DMA, "WRITE BUFFER DMA" }, 24666521148cSRobert Hancock { ATA_CMD_CONF_OVERLAY, "DEVICE CONFIGURATION OVERLAY" }, 24676521148cSRobert Hancock { ATA_CMD_SEC_SET_PASS, "SECURITY SET PASSWORD" }, 24686521148cSRobert Hancock { ATA_CMD_SEC_UNLOCK, "SECURITY UNLOCK" }, 24696521148cSRobert Hancock { ATA_CMD_SEC_ERASE_PREP, "SECURITY ERASE PREPARE" }, 24706521148cSRobert Hancock { ATA_CMD_SEC_ERASE_UNIT, "SECURITY ERASE UNIT" }, 24716521148cSRobert Hancock { ATA_CMD_SEC_FREEZE_LOCK, "SECURITY FREEZE LOCK" }, 24726521148cSRobert Hancock { ATA_CMD_SEC_DISABLE_PASS, "SECURITY DISABLE PASSWORD" }, 24736521148cSRobert Hancock { ATA_CMD_CONFIG_STREAM, "CONFIGURE STREAM" }, 24746521148cSRobert Hancock { ATA_CMD_SMART, "SMART" }, 24756521148cSRobert Hancock { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" }, 24766521148cSRobert Hancock { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" }, 2477acad7627SFUJITA Tomonori { ATA_CMD_DSM, "DATA SET MANAGEMENT" }, 24786521148cSRobert Hancock { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" }, 24796521148cSRobert Hancock { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" }, 24806521148cSRobert Hancock { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" }, 24816521148cSRobert Hancock { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" }, 24826521148cSRobert Hancock { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" }, 24836521148cSRobert Hancock { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" }, 24843915c3b5SRobert Hancock { ATA_CMD_REQ_SENSE_DATA, "REQUEST SENSE DATA EXT" }, 24853915c3b5SRobert Hancock { ATA_CMD_SANITIZE_DEVICE, "SANITIZE DEVICE" }, 248628a3fc22SHannes Reinecke { ATA_CMD_ZAC_MGMT_IN, "ZAC MANAGEMENT IN" }, 248727708a95SHannes Reinecke { ATA_CMD_ZAC_MGMT_OUT, "ZAC MANAGEMENT OUT" }, 24886521148cSRobert Hancock { ATA_CMD_READ_LONG, "READ LONG (with retries)" }, 24896521148cSRobert Hancock { ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" }, 24906521148cSRobert Hancock { ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" }, 24916521148cSRobert Hancock { ATA_CMD_WRITE_LONG_ONCE, "WRITE LONG (without retries)" }, 24926521148cSRobert Hancock { ATA_CMD_RESTORE, "RECALIBRATE" }, 24936521148cSRobert Hancock { 0, NULL } /* terminate list */ 24946521148cSRobert Hancock }; 24956521148cSRobert Hancock 24966521148cSRobert Hancock unsigned int i; 24976521148cSRobert Hancock for (i = 0; cmd_descr[i].text; i++) 24986521148cSRobert Hancock if (cmd_descr[i].command == command) 24996521148cSRobert Hancock return cmd_descr[i].text; 25006521148cSRobert Hancock #endif 25016521148cSRobert Hancock 25026521148cSRobert Hancock return NULL; 25036521148cSRobert Hancock } 250436aae28eSAndy Shevchenko EXPORT_SYMBOL_GPL(ata_get_cmd_descript); 25056521148cSRobert Hancock 25066521148cSRobert Hancock /** 25079b1e2658STejun Heo * ata_eh_link_report - report error handling to user 25080260731fSTejun Heo * @link: ATA link EH is going on 2509c6fd2807SJeff Garzik * 2510c6fd2807SJeff Garzik * Report EH to user. 2511c6fd2807SJeff Garzik * 2512c6fd2807SJeff Garzik * LOCKING: 2513c6fd2807SJeff Garzik * None. 2514c6fd2807SJeff Garzik */ 25159b1e2658STejun Heo static void ata_eh_link_report(struct ata_link *link) 2516c6fd2807SJeff Garzik { 25170260731fSTejun Heo struct ata_port *ap = link->ap; 25180260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 2519c6fd2807SJeff Garzik const char *frozen, *desc; 2520462098b0SLevente Kurusa char tries_buf[6] = ""; 2521c6fd2807SJeff Garzik int tag, nr_failed = 0; 2522c6fd2807SJeff Garzik 252394ff3d54STejun Heo if (ehc->i.flags & ATA_EHI_QUIET) 252494ff3d54STejun Heo return; 252594ff3d54STejun Heo 2526c6fd2807SJeff Garzik desc = NULL; 2527c6fd2807SJeff Garzik if (ehc->i.desc[0] != '\0') 2528c6fd2807SJeff Garzik desc = ehc->i.desc; 2529c6fd2807SJeff Garzik 2530c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2531c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 2532c6fd2807SJeff Garzik 2533b1c72916STejun Heo if (!(qc->flags & ATA_QCFLAG_FAILED) || 2534b1c72916STejun Heo ata_dev_phys_link(qc->dev) != link || 2535e027bd36STejun Heo ((qc->flags & ATA_QCFLAG_QUIET) && 2536e027bd36STejun Heo qc->err_mask == AC_ERR_DEV)) 2537c6fd2807SJeff Garzik continue; 2538c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask) 2539c6fd2807SJeff Garzik continue; 2540c6fd2807SJeff Garzik 2541c6fd2807SJeff Garzik nr_failed++; 2542c6fd2807SJeff Garzik } 2543c6fd2807SJeff Garzik 2544c6fd2807SJeff Garzik if (!nr_failed && !ehc->i.err_mask) 2545c6fd2807SJeff Garzik return; 2546c6fd2807SJeff Garzik 2547c6fd2807SJeff Garzik frozen = ""; 2548c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN) 2549c6fd2807SJeff Garzik frozen = " frozen"; 2550c6fd2807SJeff Garzik 2551a1e10f7eSTejun Heo if (ap->eh_tries < ATA_EH_MAX_TRIES) 2552462098b0SLevente Kurusa snprintf(tries_buf, sizeof(tries_buf), " t%d", 2553a1e10f7eSTejun Heo ap->eh_tries); 2554a1e10f7eSTejun Heo 2555c6fd2807SJeff Garzik if (ehc->i.dev) { 2556a9a79dfeSJoe Perches ata_dev_err(ehc->i.dev, "exception Emask 0x%x " 2557a1e10f7eSTejun Heo "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", 2558a1e10f7eSTejun Heo ehc->i.err_mask, link->sactive, ehc->i.serror, 2559a1e10f7eSTejun Heo ehc->i.action, frozen, tries_buf); 2560c6fd2807SJeff Garzik if (desc) 2561a9a79dfeSJoe Perches ata_dev_err(ehc->i.dev, "%s\n", desc); 2562c6fd2807SJeff Garzik } else { 2563a9a79dfeSJoe Perches ata_link_err(link, "exception Emask 0x%x " 2564a1e10f7eSTejun Heo "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", 2565a1e10f7eSTejun Heo ehc->i.err_mask, link->sactive, ehc->i.serror, 2566a1e10f7eSTejun Heo ehc->i.action, frozen, tries_buf); 2567c6fd2807SJeff Garzik if (desc) 2568a9a79dfeSJoe Perches ata_link_err(link, "%s\n", desc); 2569c6fd2807SJeff Garzik } 2570c6fd2807SJeff Garzik 25716521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR 25721333e194SRobert Hancock if (ehc->i.serror) 2573a9a79dfeSJoe Perches ata_link_err(link, 25741333e194SRobert Hancock "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n", 25751333e194SRobert Hancock ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "", 25761333e194SRobert Hancock ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "", 25771333e194SRobert Hancock ehc->i.serror & SERR_DATA ? "UnrecovData " : "", 25781333e194SRobert Hancock ehc->i.serror & SERR_PERSISTENT ? "Persist " : "", 25791333e194SRobert Hancock ehc->i.serror & SERR_PROTOCOL ? "Proto " : "", 25801333e194SRobert Hancock ehc->i.serror & SERR_INTERNAL ? "HostInt " : "", 25811333e194SRobert Hancock ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "", 25821333e194SRobert Hancock ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "", 25831333e194SRobert Hancock ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "", 25841333e194SRobert Hancock ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "", 25851333e194SRobert Hancock ehc->i.serror & SERR_DISPARITY ? "Dispar " : "", 25861333e194SRobert Hancock ehc->i.serror & SERR_CRC ? "BadCRC " : "", 25871333e194SRobert Hancock ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "", 25881333e194SRobert Hancock ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "", 25891333e194SRobert Hancock ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "", 25901333e194SRobert Hancock ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "", 25911333e194SRobert Hancock ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : ""); 25926521148cSRobert Hancock #endif 25931333e194SRobert Hancock 2594c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2595c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 25968a937581STejun Heo struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; 2597abb6a889STejun Heo char data_buf[20] = ""; 2598abb6a889STejun Heo char cdb_buf[70] = ""; 2599c6fd2807SJeff Garzik 26000260731fSTejun Heo if (!(qc->flags & ATA_QCFLAG_FAILED) || 2601b1c72916STejun Heo ata_dev_phys_link(qc->dev) != link || !qc->err_mask) 2602c6fd2807SJeff Garzik continue; 2603c6fd2807SJeff Garzik 2604abb6a889STejun Heo if (qc->dma_dir != DMA_NONE) { 2605abb6a889STejun Heo static const char *dma_str[] = { 2606abb6a889STejun Heo [DMA_BIDIRECTIONAL] = "bidi", 2607abb6a889STejun Heo [DMA_TO_DEVICE] = "out", 2608abb6a889STejun Heo [DMA_FROM_DEVICE] = "in", 2609abb6a889STejun Heo }; 2610fb1b8b11SGeert Uytterhoeven const char *prot_str = NULL; 2611abb6a889STejun Heo 2612fb1b8b11SGeert Uytterhoeven switch (qc->tf.protocol) { 2613fb1b8b11SGeert Uytterhoeven case ATA_PROT_UNKNOWN: 2614fb1b8b11SGeert Uytterhoeven prot_str = "unknown"; 2615fb1b8b11SGeert Uytterhoeven break; 2616fb1b8b11SGeert Uytterhoeven case ATA_PROT_NODATA: 2617fb1b8b11SGeert Uytterhoeven prot_str = "nodata"; 2618fb1b8b11SGeert Uytterhoeven break; 2619fb1b8b11SGeert Uytterhoeven case ATA_PROT_PIO: 2620fb1b8b11SGeert Uytterhoeven prot_str = "pio"; 2621fb1b8b11SGeert Uytterhoeven break; 2622fb1b8b11SGeert Uytterhoeven case ATA_PROT_DMA: 2623fb1b8b11SGeert Uytterhoeven prot_str = "dma"; 2624fb1b8b11SGeert Uytterhoeven break; 2625fb1b8b11SGeert Uytterhoeven case ATA_PROT_NCQ: 2626fb1b8b11SGeert Uytterhoeven prot_str = "ncq dma"; 2627fb1b8b11SGeert Uytterhoeven break; 2628fb1b8b11SGeert Uytterhoeven case ATA_PROT_NCQ_NODATA: 2629fb1b8b11SGeert Uytterhoeven prot_str = "ncq nodata"; 2630fb1b8b11SGeert Uytterhoeven break; 2631fb1b8b11SGeert Uytterhoeven case ATAPI_PROT_NODATA: 2632fb1b8b11SGeert Uytterhoeven prot_str = "nodata"; 2633fb1b8b11SGeert Uytterhoeven break; 2634fb1b8b11SGeert Uytterhoeven case ATAPI_PROT_PIO: 2635fb1b8b11SGeert Uytterhoeven prot_str = "pio"; 2636fb1b8b11SGeert Uytterhoeven break; 2637fb1b8b11SGeert Uytterhoeven case ATAPI_PROT_DMA: 2638fb1b8b11SGeert Uytterhoeven prot_str = "dma"; 2639fb1b8b11SGeert Uytterhoeven break; 2640fb1b8b11SGeert Uytterhoeven } 2641abb6a889STejun Heo snprintf(data_buf, sizeof(data_buf), " %s %u %s", 2642fb1b8b11SGeert Uytterhoeven prot_str, qc->nbytes, dma_str[qc->dma_dir]); 2643abb6a889STejun Heo } 2644abb6a889STejun Heo 26456521148cSRobert Hancock if (ata_is_atapi(qc->tf.protocol)) { 2646a13b0c9dSHannes Reinecke const u8 *cdb = qc->cdb; 2647a13b0c9dSHannes Reinecke size_t cdb_len = qc->dev->cdb_len; 2648a13b0c9dSHannes Reinecke 2649cbba5b0eSHannes Reinecke if (qc->scsicmd) { 2650cbba5b0eSHannes Reinecke cdb = qc->scsicmd->cmnd; 2651cbba5b0eSHannes Reinecke cdb_len = qc->scsicmd->cmd_len; 2652cbba5b0eSHannes Reinecke } 2653cbba5b0eSHannes Reinecke __scsi_format_command(cdb_buf, sizeof(cdb_buf), 2654cbba5b0eSHannes Reinecke cdb, cdb_len); 26556521148cSRobert Hancock } else { 26566521148cSRobert Hancock const char *descr = ata_get_cmd_descript(cmd->command); 26576521148cSRobert Hancock if (descr) 2658a9a79dfeSJoe Perches ata_dev_err(qc->dev, "failed command: %s\n", 2659a9a79dfeSJoe Perches descr); 26606521148cSRobert Hancock } 2661abb6a889STejun Heo 2662a9a79dfeSJoe Perches ata_dev_err(qc->dev, 26638a937581STejun Heo "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " 2664abb6a889STejun Heo "tag %d%s\n %s" 26658a937581STejun Heo "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " 26665335b729STejun Heo "Emask 0x%x (%s)%s\n", 26678a937581STejun Heo cmd->command, cmd->feature, cmd->nsect, 26688a937581STejun Heo cmd->lbal, cmd->lbam, cmd->lbah, 26698a937581STejun Heo cmd->hob_feature, cmd->hob_nsect, 26708a937581STejun Heo cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah, 2671abb6a889STejun Heo cmd->device, qc->tag, data_buf, cdb_buf, 26728a937581STejun Heo res->command, res->feature, res->nsect, 26738a937581STejun Heo res->lbal, res->lbam, res->lbah, 26748a937581STejun Heo res->hob_feature, res->hob_nsect, 26758a937581STejun Heo res->hob_lbal, res->hob_lbam, res->hob_lbah, 26765335b729STejun Heo res->device, qc->err_mask, ata_err_string(qc->err_mask), 26775335b729STejun Heo qc->err_mask & AC_ERR_NCQ ? " <F>" : ""); 26781333e194SRobert Hancock 26796521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR 26801333e194SRobert Hancock if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | 2681e87fd28cSHannes Reinecke ATA_SENSE | ATA_ERR)) { 26821333e194SRobert Hancock if (res->command & ATA_BUSY) 2683a9a79dfeSJoe Perches ata_dev_err(qc->dev, "status: { Busy }\n"); 26841333e194SRobert Hancock else 2685e87fd28cSHannes Reinecke ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n", 26861333e194SRobert Hancock res->command & ATA_DRDY ? "DRDY " : "", 26871333e194SRobert Hancock res->command & ATA_DF ? "DF " : "", 26881333e194SRobert Hancock res->command & ATA_DRQ ? "DRQ " : "", 2689e87fd28cSHannes Reinecke res->command & ATA_SENSE ? "SENSE " : "", 26901333e194SRobert Hancock res->command & ATA_ERR ? "ERR " : ""); 26911333e194SRobert Hancock } 26921333e194SRobert Hancock 26931333e194SRobert Hancock if (cmd->command != ATA_CMD_PACKET && 2694eec7e1c1SAlexey Asemov (res->feature & (ATA_ICRC | ATA_UNC | ATA_AMNF | 2695eec7e1c1SAlexey Asemov ATA_IDNF | ATA_ABORTED))) 2696eec7e1c1SAlexey Asemov ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n", 26971333e194SRobert Hancock res->feature & ATA_ICRC ? "ICRC " : "", 26981333e194SRobert Hancock res->feature & ATA_UNC ? "UNC " : "", 2699eec7e1c1SAlexey Asemov res->feature & ATA_AMNF ? "AMNF " : "", 27001333e194SRobert Hancock res->feature & ATA_IDNF ? "IDNF " : "", 27011333e194SRobert Hancock res->feature & ATA_ABORTED ? "ABRT " : ""); 27026521148cSRobert Hancock #endif 2703c6fd2807SJeff Garzik } 2704c6fd2807SJeff Garzik } 2705c6fd2807SJeff Garzik 27069b1e2658STejun Heo /** 27079b1e2658STejun Heo * ata_eh_report - report error handling to user 27089b1e2658STejun Heo * @ap: ATA port to report EH about 27099b1e2658STejun Heo * 27109b1e2658STejun Heo * Report EH to user. 27119b1e2658STejun Heo * 27129b1e2658STejun Heo * LOCKING: 27139b1e2658STejun Heo * None. 27149b1e2658STejun Heo */ 2715fb7fd614STejun Heo void ata_eh_report(struct ata_port *ap) 27169b1e2658STejun Heo { 27179b1e2658STejun Heo struct ata_link *link; 27189b1e2658STejun Heo 27191eca4365STejun Heo ata_for_each_link(link, ap, HOST_FIRST) 27209b1e2658STejun Heo ata_eh_link_report(link); 27219b1e2658STejun Heo } 27229b1e2658STejun Heo 2723cc0680a5STejun Heo static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset, 2724b1c72916STejun Heo unsigned int *classes, unsigned long deadline, 2725b1c72916STejun Heo bool clear_classes) 2726c6fd2807SJeff Garzik { 2727f58229f8STejun Heo struct ata_device *dev; 2728c6fd2807SJeff Garzik 2729b1c72916STejun Heo if (clear_classes) 27301eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 2731f58229f8STejun Heo classes[dev->devno] = ATA_DEV_UNKNOWN; 2732c6fd2807SJeff Garzik 2733f046519fSTejun Heo return reset(link, classes, deadline); 2734c6fd2807SJeff Garzik } 2735c6fd2807SJeff Garzik 2736e8411fbaSSergei Shtylyov static int ata_eh_followup_srst_needed(struct ata_link *link, int rc) 2737c6fd2807SJeff Garzik { 273845db2f6cSTejun Heo if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link)) 2739ae791c05STejun Heo return 0; 27405dbfc9cbSTejun Heo if (rc == -EAGAIN) 2741c6fd2807SJeff Garzik return 1; 2742071f44b1STejun Heo if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) 27433495de73STejun Heo return 1; 2744c6fd2807SJeff Garzik return 0; 2745c6fd2807SJeff Garzik } 2746c6fd2807SJeff Garzik 2747fb7fd614STejun Heo int ata_eh_reset(struct ata_link *link, int classify, 2748c6fd2807SJeff Garzik ata_prereset_fn_t prereset, ata_reset_fn_t softreset, 2749c6fd2807SJeff Garzik ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) 2750c6fd2807SJeff Garzik { 2751afaa5c37STejun Heo struct ata_port *ap = link->ap; 2752b1c72916STejun Heo struct ata_link *slave = ap->slave_link; 2753936fd732STejun Heo struct ata_eh_context *ehc = &link->eh_context; 2754705d2014SBartlomiej Zolnierkiewicz struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL; 2755c6fd2807SJeff Garzik unsigned int *classes = ehc->classes; 2756416dc9edSTejun Heo unsigned int lflags = link->flags; 2757c6fd2807SJeff Garzik int verbose = !(ehc->i.flags & ATA_EHI_QUIET); 2758d8af0eb6STejun Heo int max_tries = 0, try = 0; 2759b1c72916STejun Heo struct ata_link *failed_link; 2760f58229f8STejun Heo struct ata_device *dev; 2761416dc9edSTejun Heo unsigned long deadline, now; 2762c6fd2807SJeff Garzik ata_reset_fn_t reset; 2763afaa5c37STejun Heo unsigned long flags; 2764416dc9edSTejun Heo u32 sstatus; 2765b1c72916STejun Heo int nr_unknown, rc; 2766c6fd2807SJeff Garzik 2767932648b0STejun Heo /* 2768932648b0STejun Heo * Prepare to reset 2769932648b0STejun Heo */ 2770d8af0eb6STejun Heo while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX) 2771d8af0eb6STejun Heo max_tries++; 2772ca6d43b0SDan Williams if (link->flags & ATA_LFLAG_RST_ONCE) 2773ca6d43b0SDan Williams max_tries = 1; 277405944bdfSTejun Heo if (link->flags & ATA_LFLAG_NO_HRST) 277505944bdfSTejun Heo hardreset = NULL; 277605944bdfSTejun Heo if (link->flags & ATA_LFLAG_NO_SRST) 277705944bdfSTejun Heo softreset = NULL; 2778d8af0eb6STejun Heo 277925985edcSLucas De Marchi /* make sure each reset attempt is at least COOL_DOWN apart */ 278019b72321STejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) { 27810a2c0f56STejun Heo now = jiffies; 278219b72321STejun Heo WARN_ON(time_after(ehc->last_reset, now)); 278319b72321STejun Heo deadline = ata_deadline(ehc->last_reset, 278419b72321STejun Heo ATA_EH_RESET_COOL_DOWN); 27850a2c0f56STejun Heo if (time_before(now, deadline)) 27860a2c0f56STejun Heo schedule_timeout_uninterruptible(deadline - now); 278719b72321STejun Heo } 27880a2c0f56STejun Heo 2789afaa5c37STejun Heo spin_lock_irqsave(ap->lock, flags); 2790afaa5c37STejun Heo ap->pflags |= ATA_PFLAG_RESETTING; 2791afaa5c37STejun Heo spin_unlock_irqrestore(ap->lock, flags); 2792afaa5c37STejun Heo 2793cf480626STejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2794c6fd2807SJeff Garzik 27951eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 2796cdeab114STejun Heo /* If we issue an SRST then an ATA drive (not ATAPI) 2797cdeab114STejun Heo * may change configuration and be in PIO0 timing. If 2798cdeab114STejun Heo * we do a hard reset (or are coming from power on) 2799cdeab114STejun Heo * this is true for ATA or ATAPI. Until we've set a 2800cdeab114STejun Heo * suitable controller mode we should not touch the 2801cdeab114STejun Heo * bus as we may be talking too fast. 2802cdeab114STejun Heo */ 2803cdeab114STejun Heo dev->pio_mode = XFER_PIO_0; 28045416912aSAaron Lu dev->dma_mode = 0xff; 2805cdeab114STejun Heo 2806cdeab114STejun Heo /* If the controller has a pio mode setup function 2807cdeab114STejun Heo * then use it to set the chipset to rights. Don't 2808cdeab114STejun Heo * touch the DMA setup as that will be dealt with when 2809cdeab114STejun Heo * configuring devices. 2810cdeab114STejun Heo */ 2811cdeab114STejun Heo if (ap->ops->set_piomode) 2812cdeab114STejun Heo ap->ops->set_piomode(ap, dev); 2813cdeab114STejun Heo } 2814cdeab114STejun Heo 2815cf480626STejun Heo /* prefer hardreset */ 2816932648b0STejun Heo reset = NULL; 2817cf480626STejun Heo ehc->i.action &= ~ATA_EH_RESET; 2818cf480626STejun Heo if (hardreset) { 2819cf480626STejun Heo reset = hardreset; 2820a674050eSTejun Heo ehc->i.action |= ATA_EH_HARDRESET; 28214f7faa3fSTejun Heo } else if (softreset) { 2822cf480626STejun Heo reset = softreset; 2823a674050eSTejun Heo ehc->i.action |= ATA_EH_SOFTRESET; 2824cf480626STejun Heo } 2825c6fd2807SJeff Garzik 2826c6fd2807SJeff Garzik if (prereset) { 2827b1c72916STejun Heo unsigned long deadline = ata_deadline(jiffies, 2828b1c72916STejun Heo ATA_EH_PRERESET_TIMEOUT); 2829b1c72916STejun Heo 2830b1c72916STejun Heo if (slave) { 2831b1c72916STejun Heo sehc->i.action &= ~ATA_EH_RESET; 2832b1c72916STejun Heo sehc->i.action |= ehc->i.action; 2833b1c72916STejun Heo } 2834b1c72916STejun Heo 2835b1c72916STejun Heo rc = prereset(link, deadline); 2836b1c72916STejun Heo 2837b1c72916STejun Heo /* If present, do prereset on slave link too. Reset 2838b1c72916STejun Heo * is skipped iff both master and slave links report 2839b1c72916STejun Heo * -ENOENT or clear ATA_EH_RESET. 2840b1c72916STejun Heo */ 2841b1c72916STejun Heo if (slave && (rc == 0 || rc == -ENOENT)) { 2842b1c72916STejun Heo int tmp; 2843b1c72916STejun Heo 2844b1c72916STejun Heo tmp = prereset(slave, deadline); 2845b1c72916STejun Heo if (tmp != -ENOENT) 2846b1c72916STejun Heo rc = tmp; 2847b1c72916STejun Heo 2848b1c72916STejun Heo ehc->i.action |= sehc->i.action; 2849b1c72916STejun Heo } 2850b1c72916STejun Heo 2851c6fd2807SJeff Garzik if (rc) { 2852c961922bSAlan Cox if (rc == -ENOENT) { 2853a9a79dfeSJoe Perches ata_link_dbg(link, "port disabled--ignoring\n"); 2854cf480626STejun Heo ehc->i.action &= ~ATA_EH_RESET; 28554aa9ab67STejun Heo 28561eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 2857f58229f8STejun Heo classes[dev->devno] = ATA_DEV_NONE; 28584aa9ab67STejun Heo 28594aa9ab67STejun Heo rc = 0; 2860c961922bSAlan Cox } else 2861a9a79dfeSJoe Perches ata_link_err(link, 2862a9a79dfeSJoe Perches "prereset failed (errno=%d)\n", 2863a9a79dfeSJoe Perches rc); 2864fccb6ea5STejun Heo goto out; 2865c6fd2807SJeff Garzik } 2866c6fd2807SJeff Garzik 2867932648b0STejun Heo /* prereset() might have cleared ATA_EH_RESET. If so, 2868d6515e6fSTejun Heo * bang classes, thaw and return. 2869932648b0STejun Heo */ 2870932648b0STejun Heo if (reset && !(ehc->i.action & ATA_EH_RESET)) { 28711eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 2872f58229f8STejun Heo classes[dev->devno] = ATA_DEV_NONE; 2873d6515e6fSTejun Heo if ((ap->pflags & ATA_PFLAG_FROZEN) && 2874d6515e6fSTejun Heo ata_is_host_link(link)) 2875d6515e6fSTejun Heo ata_eh_thaw_port(ap); 2876fccb6ea5STejun Heo rc = 0; 2877fccb6ea5STejun Heo goto out; 2878c6fd2807SJeff Garzik } 2879932648b0STejun Heo } 2880c6fd2807SJeff Garzik 2881c6fd2807SJeff Garzik retry: 2882932648b0STejun Heo /* 2883932648b0STejun Heo * Perform reset 2884932648b0STejun Heo */ 2885dc98c32cSTejun Heo if (ata_is_host_link(link)) 2886dc98c32cSTejun Heo ata_eh_freeze_port(ap); 2887dc98c32cSTejun Heo 2888341c2c95STejun Heo deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]); 288931daabdaSTejun Heo 2890932648b0STejun Heo if (reset) { 2891c6fd2807SJeff Garzik if (verbose) 2892a9a79dfeSJoe Perches ata_link_info(link, "%s resetting link\n", 2893c6fd2807SJeff Garzik reset == softreset ? "soft" : "hard"); 2894c6fd2807SJeff Garzik 2895c6fd2807SJeff Garzik /* mark that this EH session started with reset */ 289619b72321STejun Heo ehc->last_reset = jiffies; 28970d64a233STejun Heo if (reset == hardreset) 28980d64a233STejun Heo ehc->i.flags |= ATA_EHI_DID_HARDRESET; 28990d64a233STejun Heo else 29000d64a233STejun Heo ehc->i.flags |= ATA_EHI_DID_SOFTRESET; 2901c6fd2807SJeff Garzik 2902b1c72916STejun Heo rc = ata_do_reset(link, reset, classes, deadline, true); 2903b1c72916STejun Heo if (rc && rc != -EAGAIN) { 2904b1c72916STejun Heo failed_link = link; 29055dbfc9cbSTejun Heo goto fail; 2906b1c72916STejun Heo } 2907c6fd2807SJeff Garzik 2908b1c72916STejun Heo /* hardreset slave link if existent */ 2909b1c72916STejun Heo if (slave && reset == hardreset) { 2910b1c72916STejun Heo int tmp; 2911b1c72916STejun Heo 2912b1c72916STejun Heo if (verbose) 2913a9a79dfeSJoe Perches ata_link_info(slave, "hard resetting link\n"); 2914b1c72916STejun Heo 2915b1c72916STejun Heo ata_eh_about_to_do(slave, NULL, ATA_EH_RESET); 2916b1c72916STejun Heo tmp = ata_do_reset(slave, reset, classes, deadline, 2917b1c72916STejun Heo false); 2918b1c72916STejun Heo switch (tmp) { 2919b1c72916STejun Heo case -EAGAIN: 2920b1c72916STejun Heo rc = -EAGAIN; 2921b1c72916STejun Heo case 0: 2922b1c72916STejun Heo break; 2923b1c72916STejun Heo default: 2924b1c72916STejun Heo failed_link = slave; 2925b1c72916STejun Heo rc = tmp; 2926b1c72916STejun Heo goto fail; 2927b1c72916STejun Heo } 2928b1c72916STejun Heo } 2929b1c72916STejun Heo 2930b1c72916STejun Heo /* perform follow-up SRST if necessary */ 2931c6fd2807SJeff Garzik if (reset == hardreset && 2932e8411fbaSSergei Shtylyov ata_eh_followup_srst_needed(link, rc)) { 2933c6fd2807SJeff Garzik reset = softreset; 2934c6fd2807SJeff Garzik 2935c6fd2807SJeff Garzik if (!reset) { 2936a9a79dfeSJoe Perches ata_link_err(link, 2937a9a79dfeSJoe Perches "follow-up softreset required but no softreset available\n"); 2938b1c72916STejun Heo failed_link = link; 2939fccb6ea5STejun Heo rc = -EINVAL; 294008cf69d0STejun Heo goto fail; 2941c6fd2807SJeff Garzik } 2942c6fd2807SJeff Garzik 2943cf480626STejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2944b1c72916STejun Heo rc = ata_do_reset(link, reset, classes, deadline, true); 2945fe2c4d01STejun Heo if (rc) { 2946fe2c4d01STejun Heo failed_link = link; 2947fe2c4d01STejun Heo goto fail; 2948fe2c4d01STejun Heo } 2949c6fd2807SJeff Garzik } 2950932648b0STejun Heo } else { 2951932648b0STejun Heo if (verbose) 2952a9a79dfeSJoe Perches ata_link_info(link, 2953a9a79dfeSJoe Perches "no reset method available, skipping reset\n"); 2954932648b0STejun Heo if (!(lflags & ATA_LFLAG_ASSUME_CLASS)) 2955932648b0STejun Heo lflags |= ATA_LFLAG_ASSUME_ATA; 2956932648b0STejun Heo } 2957008a7896STejun Heo 2958932648b0STejun Heo /* 2959932648b0STejun Heo * Post-reset processing 2960932648b0STejun Heo */ 29611eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 2962416dc9edSTejun Heo /* After the reset, the device state is PIO 0 and the 2963416dc9edSTejun Heo * controller state is undefined. Reset also wakes up 2964416dc9edSTejun Heo * drives from sleeping mode. 2965c6fd2807SJeff Garzik */ 2966f58229f8STejun Heo dev->pio_mode = XFER_PIO_0; 2967054a5fbaSTejun Heo dev->flags &= ~ATA_DFLAG_SLEEPING; 2968c6fd2807SJeff Garzik 29693b761d3dSTejun Heo if (ata_phys_link_offline(ata_dev_phys_link(dev))) 29703b761d3dSTejun Heo continue; 29713b761d3dSTejun Heo 29724ccd3329STejun Heo /* apply class override */ 2973416dc9edSTejun Heo if (lflags & ATA_LFLAG_ASSUME_ATA) 2974ae791c05STejun Heo classes[dev->devno] = ATA_DEV_ATA; 2975416dc9edSTejun Heo else if (lflags & ATA_LFLAG_ASSUME_SEMB) 2976816ab897STejun Heo classes[dev->devno] = ATA_DEV_SEMB_UNSUP; 2977ae791c05STejun Heo } 2978ae791c05STejun Heo 2979008a7896STejun Heo /* record current link speed */ 2980936fd732STejun Heo if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0) 2981936fd732STejun Heo link->sata_spd = (sstatus >> 4) & 0xf; 2982b1c72916STejun Heo if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0) 2983b1c72916STejun Heo slave->sata_spd = (sstatus >> 4) & 0xf; 2984008a7896STejun Heo 2985dc98c32cSTejun Heo /* thaw the port */ 2986dc98c32cSTejun Heo if (ata_is_host_link(link)) 2987dc98c32cSTejun Heo ata_eh_thaw_port(ap); 2988dc98c32cSTejun Heo 2989f046519fSTejun Heo /* postreset() should clear hardware SError. Although SError 2990f046519fSTejun Heo * is cleared during link resume, clearing SError here is 2991f046519fSTejun Heo * necessary as some PHYs raise hotplug events after SRST. 2992f046519fSTejun Heo * This introduces race condition where hotplug occurs between 2993f046519fSTejun Heo * reset and here. This race is mediated by cross checking 2994f046519fSTejun Heo * link onlineness and classification result later. 2995f046519fSTejun Heo */ 2996b1c72916STejun Heo if (postreset) { 2997cc0680a5STejun Heo postreset(link, classes); 2998b1c72916STejun Heo if (slave) 2999b1c72916STejun Heo postreset(slave, classes); 3000b1c72916STejun Heo } 3001c6fd2807SJeff Garzik 30021e641060STejun Heo /* 30038c56caccSTejun Heo * Some controllers can't be frozen very well and may set spurious 30048c56caccSTejun Heo * error conditions during reset. Clear accumulated error 30058c56caccSTejun Heo * information and re-thaw the port if frozen. As reset is the 30068c56caccSTejun Heo * final recovery action and we cross check link onlineness against 30078c56caccSTejun Heo * device classification later, no hotplug event is lost by this. 30081e641060STejun Heo */ 3009f046519fSTejun Heo spin_lock_irqsave(link->ap->lock, flags); 30101e641060STejun Heo memset(&link->eh_info, 0, sizeof(link->eh_info)); 3011b1c72916STejun Heo if (slave) 30121e641060STejun Heo memset(&slave->eh_info, 0, sizeof(link->eh_info)); 30131e641060STejun Heo ap->pflags &= ~ATA_PFLAG_EH_PENDING; 3014f046519fSTejun Heo spin_unlock_irqrestore(link->ap->lock, flags); 3015f046519fSTejun Heo 30168c56caccSTejun Heo if (ap->pflags & ATA_PFLAG_FROZEN) 30178c56caccSTejun Heo ata_eh_thaw_port(ap); 30188c56caccSTejun Heo 30193b761d3dSTejun Heo /* 30203b761d3dSTejun Heo * Make sure onlineness and classification result correspond. 3021f046519fSTejun Heo * Hotplug could have happened during reset and some 3022f046519fSTejun Heo * controllers fail to wait while a drive is spinning up after 3023f046519fSTejun Heo * being hotplugged causing misdetection. By cross checking 30243b761d3dSTejun Heo * link on/offlineness and classification result, those 30253b761d3dSTejun Heo * conditions can be reliably detected and retried. 3026f046519fSTejun Heo */ 3027b1c72916STejun Heo nr_unknown = 0; 30281eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 30293b761d3dSTejun Heo if (ata_phys_link_online(ata_dev_phys_link(dev))) { 3030b1c72916STejun Heo if (classes[dev->devno] == ATA_DEV_UNKNOWN) { 3031a9a79dfeSJoe Perches ata_dev_dbg(dev, "link online but device misclassified\n"); 3032f046519fSTejun Heo classes[dev->devno] = ATA_DEV_NONE; 3033b1c72916STejun Heo nr_unknown++; 3034b1c72916STejun Heo } 30353b761d3dSTejun Heo } else if (ata_phys_link_offline(ata_dev_phys_link(dev))) { 30363b761d3dSTejun Heo if (ata_class_enabled(classes[dev->devno])) 3037a9a79dfeSJoe Perches ata_dev_dbg(dev, 3038a9a79dfeSJoe Perches "link offline, clearing class %d to NONE\n", 30393b761d3dSTejun Heo classes[dev->devno]); 30403b761d3dSTejun Heo classes[dev->devno] = ATA_DEV_NONE; 30413b761d3dSTejun Heo } else if (classes[dev->devno] == ATA_DEV_UNKNOWN) { 3042a9a79dfeSJoe Perches ata_dev_dbg(dev, 3043a9a79dfeSJoe Perches "link status unknown, clearing UNKNOWN to NONE\n"); 30443b761d3dSTejun Heo classes[dev->devno] = ATA_DEV_NONE; 30453b761d3dSTejun Heo } 3046f046519fSTejun Heo } 3047f046519fSTejun Heo 3048b1c72916STejun Heo if (classify && nr_unknown) { 3049f046519fSTejun Heo if (try < max_tries) { 3050a9a79dfeSJoe Perches ata_link_warn(link, 3051a9a79dfeSJoe Perches "link online but %d devices misclassified, retrying\n", 30523b761d3dSTejun Heo nr_unknown); 3053b1c72916STejun Heo failed_link = link; 3054f046519fSTejun Heo rc = -EAGAIN; 3055f046519fSTejun Heo goto fail; 3056f046519fSTejun Heo } 3057a9a79dfeSJoe Perches ata_link_warn(link, 30583b761d3dSTejun Heo "link online but %d devices misclassified, " 30593b761d3dSTejun Heo "device detection might fail\n", nr_unknown); 3060f046519fSTejun Heo } 3061f046519fSTejun Heo 3062c6fd2807SJeff Garzik /* reset successful, schedule revalidation */ 3063cf480626STejun Heo ata_eh_done(link, NULL, ATA_EH_RESET); 3064b1c72916STejun Heo if (slave) 3065b1c72916STejun Heo ata_eh_done(slave, NULL, ATA_EH_RESET); 306619b72321STejun Heo ehc->last_reset = jiffies; /* update to completion time */ 3067c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_REVALIDATE; 30686b7ae954STejun Heo link->lpm_policy = ATA_LPM_UNKNOWN; /* reset LPM state */ 3069416dc9edSTejun Heo 3070416dc9edSTejun Heo rc = 0; 3071fccb6ea5STejun Heo out: 3072fccb6ea5STejun Heo /* clear hotplug flag */ 3073fccb6ea5STejun Heo ehc->i.flags &= ~ATA_EHI_HOTPLUGGED; 3074b1c72916STejun Heo if (slave) 3075b1c72916STejun Heo sehc->i.flags &= ~ATA_EHI_HOTPLUGGED; 3076afaa5c37STejun Heo 3077afaa5c37STejun Heo spin_lock_irqsave(ap->lock, flags); 3078afaa5c37STejun Heo ap->pflags &= ~ATA_PFLAG_RESETTING; 3079afaa5c37STejun Heo spin_unlock_irqrestore(ap->lock, flags); 3080afaa5c37STejun Heo 3081c6fd2807SJeff Garzik return rc; 3082416dc9edSTejun Heo 3083416dc9edSTejun Heo fail: 30845958e302STejun Heo /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */ 30855958e302STejun Heo if (!ata_is_host_link(link) && 30865958e302STejun Heo sata_scr_read(link, SCR_STATUS, &sstatus)) 30875958e302STejun Heo rc = -ERESTART; 30885958e302STejun Heo 30897a46c078SGwendal Grignou if (try >= max_tries) { 30908ea7645cSTejun Heo /* 30918ea7645cSTejun Heo * Thaw host port even if reset failed, so that the port 30928ea7645cSTejun Heo * can be retried on the next phy event. This risks 30938ea7645cSTejun Heo * repeated EH runs but seems to be a better tradeoff than 30948ea7645cSTejun Heo * shutting down a port after a botched hotplug attempt. 30958ea7645cSTejun Heo */ 30968ea7645cSTejun Heo if (ata_is_host_link(link)) 30978ea7645cSTejun Heo ata_eh_thaw_port(ap); 3098416dc9edSTejun Heo goto out; 30998ea7645cSTejun Heo } 3100416dc9edSTejun Heo 3101416dc9edSTejun Heo now = jiffies; 3102416dc9edSTejun Heo if (time_before(now, deadline)) { 3103416dc9edSTejun Heo unsigned long delta = deadline - now; 3104416dc9edSTejun Heo 3105a9a79dfeSJoe Perches ata_link_warn(failed_link, 31060a2c0f56STejun Heo "reset failed (errno=%d), retrying in %u secs\n", 31070a2c0f56STejun Heo rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000)); 3108416dc9edSTejun Heo 3109c0c362b6STejun Heo ata_eh_release(ap); 3110416dc9edSTejun Heo while (delta) 3111416dc9edSTejun Heo delta = schedule_timeout_uninterruptible(delta); 3112c0c362b6STejun Heo ata_eh_acquire(ap); 3113416dc9edSTejun Heo } 3114416dc9edSTejun Heo 31157a46c078SGwendal Grignou /* 31167a46c078SGwendal Grignou * While disks spinup behind PMP, some controllers fail sending SRST. 31177a46c078SGwendal Grignou * They need to be reset - as well as the PMP - before retrying. 31187a46c078SGwendal Grignou */ 31197a46c078SGwendal Grignou if (rc == -ERESTART) { 31207a46c078SGwendal Grignou if (ata_is_host_link(link)) 31217a46c078SGwendal Grignou ata_eh_thaw_port(ap); 31227a46c078SGwendal Grignou goto out; 31237a46c078SGwendal Grignou } 31247a46c078SGwendal Grignou 3125b1c72916STejun Heo if (try == max_tries - 1) { 3126a07d499bSTejun Heo sata_down_spd_limit(link, 0); 3127b1c72916STejun Heo if (slave) 3128a07d499bSTejun Heo sata_down_spd_limit(slave, 0); 3129b1c72916STejun Heo } else if (rc == -EPIPE) 3130a07d499bSTejun Heo sata_down_spd_limit(failed_link, 0); 3131b1c72916STejun Heo 3132416dc9edSTejun Heo if (hardreset) 3133416dc9edSTejun Heo reset = hardreset; 3134416dc9edSTejun Heo goto retry; 3135c6fd2807SJeff Garzik } 3136c6fd2807SJeff Garzik 313745fabbb7SElias Oltmanns static inline void ata_eh_pull_park_action(struct ata_port *ap) 313845fabbb7SElias Oltmanns { 313945fabbb7SElias Oltmanns struct ata_link *link; 314045fabbb7SElias Oltmanns struct ata_device *dev; 314145fabbb7SElias Oltmanns unsigned long flags; 314245fabbb7SElias Oltmanns 314345fabbb7SElias Oltmanns /* 314445fabbb7SElias Oltmanns * This function can be thought of as an extended version of 314545fabbb7SElias Oltmanns * ata_eh_about_to_do() specially crafted to accommodate the 314645fabbb7SElias Oltmanns * requirements of ATA_EH_PARK handling. Since the EH thread 314745fabbb7SElias Oltmanns * does not leave the do {} while () loop in ata_eh_recover as 314845fabbb7SElias Oltmanns * long as the timeout for a park request to *one* device on 314945fabbb7SElias Oltmanns * the port has not expired, and since we still want to pick 315045fabbb7SElias Oltmanns * up park requests to other devices on the same port or 315145fabbb7SElias Oltmanns * timeout updates for the same device, we have to pull 315245fabbb7SElias Oltmanns * ATA_EH_PARK actions from eh_info into eh_context.i 315345fabbb7SElias Oltmanns * ourselves at the beginning of each pass over the loop. 315445fabbb7SElias Oltmanns * 315545fabbb7SElias Oltmanns * Additionally, all write accesses to &ap->park_req_pending 315616735d02SWolfram Sang * through reinit_completion() (see below) or complete_all() 315745fabbb7SElias Oltmanns * (see ata_scsi_park_store()) are protected by the host lock. 315845fabbb7SElias Oltmanns * As a result we have that park_req_pending.done is zero on 315945fabbb7SElias Oltmanns * exit from this function, i.e. when ATA_EH_PARK actions for 316045fabbb7SElias Oltmanns * *all* devices on port ap have been pulled into the 316145fabbb7SElias Oltmanns * respective eh_context structs. If, and only if, 316245fabbb7SElias Oltmanns * park_req_pending.done is non-zero by the time we reach 316345fabbb7SElias Oltmanns * wait_for_completion_timeout(), another ATA_EH_PARK action 316445fabbb7SElias Oltmanns * has been scheduled for at least one of the devices on port 316545fabbb7SElias Oltmanns * ap and we have to cycle over the do {} while () loop in 316645fabbb7SElias Oltmanns * ata_eh_recover() again. 316745fabbb7SElias Oltmanns */ 316845fabbb7SElias Oltmanns 316945fabbb7SElias Oltmanns spin_lock_irqsave(ap->lock, flags); 317016735d02SWolfram Sang reinit_completion(&ap->park_req_pending); 31711eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 31721eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 317345fabbb7SElias Oltmanns struct ata_eh_info *ehi = &link->eh_info; 317445fabbb7SElias Oltmanns 317545fabbb7SElias Oltmanns link->eh_context.i.dev_action[dev->devno] |= 317645fabbb7SElias Oltmanns ehi->dev_action[dev->devno] & ATA_EH_PARK; 317745fabbb7SElias Oltmanns ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK); 317845fabbb7SElias Oltmanns } 317945fabbb7SElias Oltmanns } 318045fabbb7SElias Oltmanns spin_unlock_irqrestore(ap->lock, flags); 318145fabbb7SElias Oltmanns } 318245fabbb7SElias Oltmanns 318345fabbb7SElias Oltmanns static void ata_eh_park_issue_cmd(struct ata_device *dev, int park) 318445fabbb7SElias Oltmanns { 318545fabbb7SElias Oltmanns struct ata_eh_context *ehc = &dev->link->eh_context; 318645fabbb7SElias Oltmanns struct ata_taskfile tf; 318745fabbb7SElias Oltmanns unsigned int err_mask; 318845fabbb7SElias Oltmanns 318945fabbb7SElias Oltmanns ata_tf_init(dev, &tf); 319045fabbb7SElias Oltmanns if (park) { 319145fabbb7SElias Oltmanns ehc->unloaded_mask |= 1 << dev->devno; 319245fabbb7SElias Oltmanns tf.command = ATA_CMD_IDLEIMMEDIATE; 319345fabbb7SElias Oltmanns tf.feature = 0x44; 319445fabbb7SElias Oltmanns tf.lbal = 0x4c; 319545fabbb7SElias Oltmanns tf.lbam = 0x4e; 319645fabbb7SElias Oltmanns tf.lbah = 0x55; 319745fabbb7SElias Oltmanns } else { 319845fabbb7SElias Oltmanns ehc->unloaded_mask &= ~(1 << dev->devno); 319945fabbb7SElias Oltmanns tf.command = ATA_CMD_CHK_POWER; 320045fabbb7SElias Oltmanns } 320145fabbb7SElias Oltmanns 320245fabbb7SElias Oltmanns tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 3203bd18bc04SHannes Reinecke tf.protocol = ATA_PROT_NODATA; 320445fabbb7SElias Oltmanns err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 320545fabbb7SElias Oltmanns if (park && (err_mask || tf.lbal != 0xc4)) { 3206a9a79dfeSJoe Perches ata_dev_err(dev, "head unload failed!\n"); 320745fabbb7SElias Oltmanns ehc->unloaded_mask &= ~(1 << dev->devno); 320845fabbb7SElias Oltmanns } 320945fabbb7SElias Oltmanns } 321045fabbb7SElias Oltmanns 32110260731fSTejun Heo static int ata_eh_revalidate_and_attach(struct ata_link *link, 3212c6fd2807SJeff Garzik struct ata_device **r_failed_dev) 3213c6fd2807SJeff Garzik { 32140260731fSTejun Heo struct ata_port *ap = link->ap; 32150260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 3216c6fd2807SJeff Garzik struct ata_device *dev; 32178c3c52a8STejun Heo unsigned int new_mask = 0; 3218c6fd2807SJeff Garzik unsigned long flags; 3219f58229f8STejun Heo int rc = 0; 3220c6fd2807SJeff Garzik 3221c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 3222c6fd2807SJeff Garzik 32238c3c52a8STejun Heo /* For PATA drive side cable detection to work, IDENTIFY must 32248c3c52a8STejun Heo * be done backwards such that PDIAG- is released by the slave 32258c3c52a8STejun Heo * device before the master device is identified. 32268c3c52a8STejun Heo */ 32271eca4365STejun Heo ata_for_each_dev(dev, link, ALL_REVERSE) { 3228f58229f8STejun Heo unsigned int action = ata_eh_dev_action(dev); 3229f58229f8STejun Heo unsigned int readid_flags = 0; 3230c6fd2807SJeff Garzik 3231bff04647STejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) 3232bff04647STejun Heo readid_flags |= ATA_READID_POSTRESET; 3233bff04647STejun Heo 32349666f400STejun Heo if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) { 3235633273a3STejun Heo WARN_ON(dev->class == ATA_DEV_PMP); 3236633273a3STejun Heo 3237b1c72916STejun Heo if (ata_phys_link_offline(ata_dev_phys_link(dev))) { 3238c6fd2807SJeff Garzik rc = -EIO; 32398c3c52a8STejun Heo goto err; 3240c6fd2807SJeff Garzik } 3241c6fd2807SJeff Garzik 32420260731fSTejun Heo ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE); 3243422c9daaSTejun Heo rc = ata_dev_revalidate(dev, ehc->classes[dev->devno], 3244422c9daaSTejun Heo readid_flags); 3245c6fd2807SJeff Garzik if (rc) 32468c3c52a8STejun Heo goto err; 3247c6fd2807SJeff Garzik 32480260731fSTejun Heo ata_eh_done(link, dev, ATA_EH_REVALIDATE); 3249c6fd2807SJeff Garzik 3250baa1e78aSTejun Heo /* Configuration may have changed, reconfigure 3251baa1e78aSTejun Heo * transfer mode. 3252baa1e78aSTejun Heo */ 3253baa1e78aSTejun Heo ehc->i.flags |= ATA_EHI_SETMODE; 3254baa1e78aSTejun Heo 3255c6fd2807SJeff Garzik /* schedule the scsi_rescan_device() here */ 3256ad72cf98STejun Heo schedule_work(&(ap->scsi_rescan_task)); 3257c6fd2807SJeff Garzik } else if (dev->class == ATA_DEV_UNKNOWN && 3258c6fd2807SJeff Garzik ehc->tries[dev->devno] && 3259c6fd2807SJeff Garzik ata_class_enabled(ehc->classes[dev->devno])) { 3260842faa6cSTejun Heo /* Temporarily set dev->class, it will be 3261842faa6cSTejun Heo * permanently set once all configurations are 3262842faa6cSTejun Heo * complete. This is necessary because new 3263842faa6cSTejun Heo * device configuration is done in two 3264842faa6cSTejun Heo * separate loops. 3265842faa6cSTejun Heo */ 3266c6fd2807SJeff Garzik dev->class = ehc->classes[dev->devno]; 3267c6fd2807SJeff Garzik 3268633273a3STejun Heo if (dev->class == ATA_DEV_PMP) 3269633273a3STejun Heo rc = sata_pmp_attach(dev); 3270633273a3STejun Heo else 3271633273a3STejun Heo rc = ata_dev_read_id(dev, &dev->class, 3272633273a3STejun Heo readid_flags, dev->id); 3273842faa6cSTejun Heo 3274842faa6cSTejun Heo /* read_id might have changed class, store and reset */ 3275842faa6cSTejun Heo ehc->classes[dev->devno] = dev->class; 3276842faa6cSTejun Heo dev->class = ATA_DEV_UNKNOWN; 3277842faa6cSTejun Heo 32788c3c52a8STejun Heo switch (rc) { 32798c3c52a8STejun Heo case 0: 328099cf610aSTejun Heo /* clear error info accumulated during probe */ 328199cf610aSTejun Heo ata_ering_clear(&dev->ering); 3282f58229f8STejun Heo new_mask |= 1 << dev->devno; 32838c3c52a8STejun Heo break; 32848c3c52a8STejun Heo case -ENOENT: 328555a8e2c8STejun Heo /* IDENTIFY was issued to non-existent 328655a8e2c8STejun Heo * device. No need to reset. Just 3287842faa6cSTejun Heo * thaw and ignore the device. 328855a8e2c8STejun Heo */ 328955a8e2c8STejun Heo ata_eh_thaw_port(ap); 3290c6fd2807SJeff Garzik break; 32918c3c52a8STejun Heo default: 32928c3c52a8STejun Heo goto err; 32938c3c52a8STejun Heo } 32948c3c52a8STejun Heo } 3295c6fd2807SJeff Garzik } 3296c6fd2807SJeff Garzik 3297c1c4e8d5STejun Heo /* PDIAG- should have been released, ask cable type if post-reset */ 329833267325STejun Heo if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) { 329933267325STejun Heo if (ap->ops->cable_detect) 3300c1c4e8d5STejun Heo ap->cbl = ap->ops->cable_detect(ap); 330133267325STejun Heo ata_force_cbl(ap); 330233267325STejun Heo } 3303c1c4e8d5STejun Heo 33048c3c52a8STejun Heo /* Configure new devices forward such that user doesn't see 33058c3c52a8STejun Heo * device detection messages backwards. 33068c3c52a8STejun Heo */ 33071eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 33084f7c2874STejun Heo if (!(new_mask & (1 << dev->devno))) 33098c3c52a8STejun Heo continue; 33108c3c52a8STejun Heo 3311842faa6cSTejun Heo dev->class = ehc->classes[dev->devno]; 3312842faa6cSTejun Heo 33134f7c2874STejun Heo if (dev->class == ATA_DEV_PMP) 33144f7c2874STejun Heo continue; 33154f7c2874STejun Heo 33168c3c52a8STejun Heo ehc->i.flags |= ATA_EHI_PRINTINFO; 33178c3c52a8STejun Heo rc = ata_dev_configure(dev); 33188c3c52a8STejun Heo ehc->i.flags &= ~ATA_EHI_PRINTINFO; 3319842faa6cSTejun Heo if (rc) { 3320842faa6cSTejun Heo dev->class = ATA_DEV_UNKNOWN; 33218c3c52a8STejun Heo goto err; 3322842faa6cSTejun Heo } 33238c3c52a8STejun Heo 3324c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 3325c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 3326c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3327baa1e78aSTejun Heo 332855a8e2c8STejun Heo /* new device discovered, configure xfermode */ 3329baa1e78aSTejun Heo ehc->i.flags |= ATA_EHI_SETMODE; 3330c6fd2807SJeff Garzik } 3331c6fd2807SJeff Garzik 33328c3c52a8STejun Heo return 0; 33338c3c52a8STejun Heo 33348c3c52a8STejun Heo err: 3335c6fd2807SJeff Garzik *r_failed_dev = dev; 33368c3c52a8STejun Heo DPRINTK("EXIT rc=%d\n", rc); 3337c6fd2807SJeff Garzik return rc; 3338c6fd2807SJeff Garzik } 3339c6fd2807SJeff Garzik 33406f1d1e3aSTejun Heo /** 33416f1d1e3aSTejun Heo * ata_set_mode - Program timings and issue SET FEATURES - XFER 33426f1d1e3aSTejun Heo * @link: link on which timings will be programmed 334398a1708dSMartin Olsson * @r_failed_dev: out parameter for failed device 33446f1d1e3aSTejun Heo * 33456f1d1e3aSTejun Heo * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If 33466f1d1e3aSTejun Heo * ata_set_mode() fails, pointer to the failing device is 33476f1d1e3aSTejun Heo * returned in @r_failed_dev. 33486f1d1e3aSTejun Heo * 33496f1d1e3aSTejun Heo * LOCKING: 33506f1d1e3aSTejun Heo * PCI/etc. bus probe sem. 33516f1d1e3aSTejun Heo * 33526f1d1e3aSTejun Heo * RETURNS: 33536f1d1e3aSTejun Heo * 0 on success, negative errno otherwise 33546f1d1e3aSTejun Heo */ 33556f1d1e3aSTejun Heo int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) 33566f1d1e3aSTejun Heo { 33576f1d1e3aSTejun Heo struct ata_port *ap = link->ap; 335800115e0fSTejun Heo struct ata_device *dev; 335900115e0fSTejun Heo int rc; 33606f1d1e3aSTejun Heo 336176326ac1STejun Heo /* if data transfer is verified, clear DUBIOUS_XFER on ering top */ 33621eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) { 336376326ac1STejun Heo if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) { 336476326ac1STejun Heo struct ata_ering_entry *ent; 336576326ac1STejun Heo 336676326ac1STejun Heo ent = ata_ering_top(&dev->ering); 336776326ac1STejun Heo if (ent) 336876326ac1STejun Heo ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER; 336976326ac1STejun Heo } 337076326ac1STejun Heo } 337176326ac1STejun Heo 33726f1d1e3aSTejun Heo /* has private set_mode? */ 33736f1d1e3aSTejun Heo if (ap->ops->set_mode) 337400115e0fSTejun Heo rc = ap->ops->set_mode(link, r_failed_dev); 337500115e0fSTejun Heo else 337600115e0fSTejun Heo rc = ata_do_set_mode(link, r_failed_dev); 337700115e0fSTejun Heo 337800115e0fSTejun Heo /* if transfer mode has changed, set DUBIOUS_XFER on device */ 33791eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) { 338000115e0fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 338100115e0fSTejun Heo u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno]; 338200115e0fSTejun Heo u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno)); 338300115e0fSTejun Heo 338400115e0fSTejun Heo if (dev->xfer_mode != saved_xfer_mode || 338500115e0fSTejun Heo ata_ncq_enabled(dev) != saved_ncq) 338600115e0fSTejun Heo dev->flags |= ATA_DFLAG_DUBIOUS_XFER; 338700115e0fSTejun Heo } 338800115e0fSTejun Heo 338900115e0fSTejun Heo return rc; 33906f1d1e3aSTejun Heo } 33916f1d1e3aSTejun Heo 339211fc33daSTejun Heo /** 339311fc33daSTejun Heo * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset 339411fc33daSTejun Heo * @dev: ATAPI device to clear UA for 339511fc33daSTejun Heo * 339611fc33daSTejun Heo * Resets and other operations can make an ATAPI device raise 339711fc33daSTejun Heo * UNIT ATTENTION which causes the next operation to fail. This 339811fc33daSTejun Heo * function clears UA. 339911fc33daSTejun Heo * 340011fc33daSTejun Heo * LOCKING: 340111fc33daSTejun Heo * EH context (may sleep). 340211fc33daSTejun Heo * 340311fc33daSTejun Heo * RETURNS: 340411fc33daSTejun Heo * 0 on success, -errno on failure. 340511fc33daSTejun Heo */ 340611fc33daSTejun Heo static int atapi_eh_clear_ua(struct ata_device *dev) 340711fc33daSTejun Heo { 340811fc33daSTejun Heo int i; 340911fc33daSTejun Heo 341011fc33daSTejun Heo for (i = 0; i < ATA_EH_UA_TRIES; i++) { 3411b5357081STejun Heo u8 *sense_buffer = dev->link->ap->sector_buf; 341211fc33daSTejun Heo u8 sense_key = 0; 341311fc33daSTejun Heo unsigned int err_mask; 341411fc33daSTejun Heo 341511fc33daSTejun Heo err_mask = atapi_eh_tur(dev, &sense_key); 341611fc33daSTejun Heo if (err_mask != 0 && err_mask != AC_ERR_DEV) { 3417a9a79dfeSJoe Perches ata_dev_warn(dev, 3418a9a79dfeSJoe Perches "TEST_UNIT_READY failed (err_mask=0x%x)\n", 3419a9a79dfeSJoe Perches err_mask); 342011fc33daSTejun Heo return -EIO; 342111fc33daSTejun Heo } 342211fc33daSTejun Heo 342311fc33daSTejun Heo if (!err_mask || sense_key != UNIT_ATTENTION) 342411fc33daSTejun Heo return 0; 342511fc33daSTejun Heo 342611fc33daSTejun Heo err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key); 342711fc33daSTejun Heo if (err_mask) { 3428a9a79dfeSJoe Perches ata_dev_warn(dev, "failed to clear " 342911fc33daSTejun Heo "UNIT ATTENTION (err_mask=0x%x)\n", err_mask); 343011fc33daSTejun Heo return -EIO; 343111fc33daSTejun Heo } 343211fc33daSTejun Heo } 343311fc33daSTejun Heo 3434a9a79dfeSJoe Perches ata_dev_warn(dev, "UNIT ATTENTION persists after %d tries\n", 3435a9a79dfeSJoe Perches ATA_EH_UA_TRIES); 343611fc33daSTejun Heo 343711fc33daSTejun Heo return 0; 343811fc33daSTejun Heo } 343911fc33daSTejun Heo 34406013efd8STejun Heo /** 34416013efd8STejun Heo * ata_eh_maybe_retry_flush - Retry FLUSH if necessary 34426013efd8STejun Heo * @dev: ATA device which may need FLUSH retry 34436013efd8STejun Heo * 34446013efd8STejun Heo * If @dev failed FLUSH, it needs to be reported upper layer 34456013efd8STejun Heo * immediately as it means that @dev failed to remap and already 34466013efd8STejun Heo * lost at least a sector and further FLUSH retrials won't make 34476013efd8STejun Heo * any difference to the lost sector. However, if FLUSH failed 34486013efd8STejun Heo * for other reasons, for example transmission error, FLUSH needs 34496013efd8STejun Heo * to be retried. 34506013efd8STejun Heo * 34516013efd8STejun Heo * This function determines whether FLUSH failure retry is 34526013efd8STejun Heo * necessary and performs it if so. 34536013efd8STejun Heo * 34546013efd8STejun Heo * RETURNS: 34556013efd8STejun Heo * 0 if EH can continue, -errno if EH needs to be repeated. 34566013efd8STejun Heo */ 34576013efd8STejun Heo static int ata_eh_maybe_retry_flush(struct ata_device *dev) 34586013efd8STejun Heo { 34596013efd8STejun Heo struct ata_link *link = dev->link; 34606013efd8STejun Heo struct ata_port *ap = link->ap; 34616013efd8STejun Heo struct ata_queued_cmd *qc; 34626013efd8STejun Heo struct ata_taskfile tf; 34636013efd8STejun Heo unsigned int err_mask; 34646013efd8STejun Heo int rc = 0; 34656013efd8STejun Heo 34666013efd8STejun Heo /* did flush fail for this device? */ 34676013efd8STejun Heo if (!ata_tag_valid(link->active_tag)) 34686013efd8STejun Heo return 0; 34696013efd8STejun Heo 34706013efd8STejun Heo qc = __ata_qc_from_tag(ap, link->active_tag); 34716013efd8STejun Heo if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT && 34726013efd8STejun Heo qc->tf.command != ATA_CMD_FLUSH)) 34736013efd8STejun Heo return 0; 34746013efd8STejun Heo 34756013efd8STejun Heo /* if the device failed it, it should be reported to upper layers */ 34766013efd8STejun Heo if (qc->err_mask & AC_ERR_DEV) 34776013efd8STejun Heo return 0; 34786013efd8STejun Heo 34796013efd8STejun Heo /* flush failed for some other reason, give it another shot */ 34806013efd8STejun Heo ata_tf_init(dev, &tf); 34816013efd8STejun Heo 34826013efd8STejun Heo tf.command = qc->tf.command; 34836013efd8STejun Heo tf.flags |= ATA_TFLAG_DEVICE; 34846013efd8STejun Heo tf.protocol = ATA_PROT_NODATA; 34856013efd8STejun Heo 3486a9a79dfeSJoe Perches ata_dev_warn(dev, "retrying FLUSH 0x%x Emask 0x%x\n", 34876013efd8STejun Heo tf.command, qc->err_mask); 34886013efd8STejun Heo 34896013efd8STejun Heo err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 34906013efd8STejun Heo if (!err_mask) { 34916013efd8STejun Heo /* 34926013efd8STejun Heo * FLUSH is complete but there's no way to 34936013efd8STejun Heo * successfully complete a failed command from EH. 34946013efd8STejun Heo * Making sure retry is allowed at least once and 34956013efd8STejun Heo * retrying it should do the trick - whatever was in 34966013efd8STejun Heo * the cache is already on the platter and this won't 34976013efd8STejun Heo * cause infinite loop. 34986013efd8STejun Heo */ 34996013efd8STejun Heo qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1); 35006013efd8STejun Heo } else { 3501a9a79dfeSJoe Perches ata_dev_warn(dev, "FLUSH failed Emask 0x%x\n", 35026013efd8STejun Heo err_mask); 35036013efd8STejun Heo rc = -EIO; 35046013efd8STejun Heo 35056013efd8STejun Heo /* if device failed it, report it to upper layers */ 35066013efd8STejun Heo if (err_mask & AC_ERR_DEV) { 35076013efd8STejun Heo qc->err_mask |= AC_ERR_DEV; 35086013efd8STejun Heo qc->result_tf = tf; 35096013efd8STejun Heo if (!(ap->pflags & ATA_PFLAG_FROZEN)) 35106013efd8STejun Heo rc = 0; 35116013efd8STejun Heo } 35126013efd8STejun Heo } 35136013efd8STejun Heo return rc; 35146013efd8STejun Heo } 35156013efd8STejun Heo 35166b7ae954STejun Heo /** 35176b7ae954STejun Heo * ata_eh_set_lpm - configure SATA interface power management 35186b7ae954STejun Heo * @link: link to configure power management 35196b7ae954STejun Heo * @policy: the link power management policy 35206b7ae954STejun Heo * @r_failed_dev: out parameter for failed device 35216b7ae954STejun Heo * 35226b7ae954STejun Heo * Enable SATA Interface power management. This will enable 35236b7ae954STejun Heo * Device Interface Power Management (DIPM) for min_power 35246b7ae954STejun Heo * policy, and then call driver specific callbacks for 35256b7ae954STejun Heo * enabling Host Initiated Power management. 35266b7ae954STejun Heo * 35276b7ae954STejun Heo * LOCKING: 35286b7ae954STejun Heo * EH context. 35296b7ae954STejun Heo * 35306b7ae954STejun Heo * RETURNS: 35316b7ae954STejun Heo * 0 on success, -errno on failure. 35326b7ae954STejun Heo */ 35336b7ae954STejun Heo static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, 35346b7ae954STejun Heo struct ata_device **r_failed_dev) 35356b7ae954STejun Heo { 35366c8ea89cSTejun Heo struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL; 35376b7ae954STejun Heo struct ata_eh_context *ehc = &link->eh_context; 35386b7ae954STejun Heo struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL; 3539e5005b15STejun Heo enum ata_lpm_policy old_policy = link->lpm_policy; 35405f6f12ccSTejun Heo bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM; 35416b7ae954STejun Heo unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM; 35426b7ae954STejun Heo unsigned int err_mask; 35436b7ae954STejun Heo int rc; 35446b7ae954STejun Heo 35456b7ae954STejun Heo /* if the link or host doesn't do LPM, noop */ 35466b7ae954STejun Heo if ((link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm)) 35476b7ae954STejun Heo return 0; 35486b7ae954STejun Heo 35496b7ae954STejun Heo /* 35506b7ae954STejun Heo * DIPM is enabled only for MIN_POWER as some devices 35516b7ae954STejun Heo * misbehave when the host NACKs transition to SLUMBER. Order 35526b7ae954STejun Heo * device and link configurations such that the host always 35536b7ae954STejun Heo * allows DIPM requests. 35546b7ae954STejun Heo */ 35556b7ae954STejun Heo ata_for_each_dev(dev, link, ENABLED) { 35566b7ae954STejun Heo bool hipm = ata_id_has_hipm(dev->id); 3557ae01b249STejun Heo bool dipm = ata_id_has_dipm(dev->id) && !no_dipm; 35586b7ae954STejun Heo 35596b7ae954STejun Heo /* find the first enabled and LPM enabled devices */ 35606b7ae954STejun Heo if (!link_dev) 35616b7ae954STejun Heo link_dev = dev; 35626b7ae954STejun Heo 35636b7ae954STejun Heo if (!lpm_dev && (hipm || dipm)) 35646b7ae954STejun Heo lpm_dev = dev; 35656b7ae954STejun Heo 35666b7ae954STejun Heo hints &= ~ATA_LPM_EMPTY; 35676b7ae954STejun Heo if (!hipm) 35686b7ae954STejun Heo hints &= ~ATA_LPM_HIPM; 35696b7ae954STejun Heo 35706b7ae954STejun Heo /* disable DIPM before changing link config */ 35716b7ae954STejun Heo if (policy != ATA_LPM_MIN_POWER && dipm) { 35726b7ae954STejun Heo err_mask = ata_dev_set_feature(dev, 35736b7ae954STejun Heo SETFEATURES_SATA_DISABLE, SATA_DIPM); 35746b7ae954STejun Heo if (err_mask && err_mask != AC_ERR_DEV) { 3575a9a79dfeSJoe Perches ata_dev_warn(dev, 35766b7ae954STejun Heo "failed to disable DIPM, Emask 0x%x\n", 35776b7ae954STejun Heo err_mask); 35786b7ae954STejun Heo rc = -EIO; 35796b7ae954STejun Heo goto fail; 35806b7ae954STejun Heo } 35816b7ae954STejun Heo } 35826b7ae954STejun Heo } 35836b7ae954STejun Heo 35846c8ea89cSTejun Heo if (ap) { 35856b7ae954STejun Heo rc = ap->ops->set_lpm(link, policy, hints); 35866b7ae954STejun Heo if (!rc && ap->slave_link) 35876b7ae954STejun Heo rc = ap->ops->set_lpm(ap->slave_link, policy, hints); 35886c8ea89cSTejun Heo } else 35896c8ea89cSTejun Heo rc = sata_pmp_set_lpm(link, policy, hints); 35906b7ae954STejun Heo 35916b7ae954STejun Heo /* 35926b7ae954STejun Heo * Attribute link config failure to the first (LPM) enabled 35936b7ae954STejun Heo * device on the link. 35946b7ae954STejun Heo */ 35956b7ae954STejun Heo if (rc) { 35966b7ae954STejun Heo if (rc == -EOPNOTSUPP) { 35976b7ae954STejun Heo link->flags |= ATA_LFLAG_NO_LPM; 35986b7ae954STejun Heo return 0; 35996b7ae954STejun Heo } 36006b7ae954STejun Heo dev = lpm_dev ? lpm_dev : link_dev; 36016b7ae954STejun Heo goto fail; 36026b7ae954STejun Heo } 36036b7ae954STejun Heo 3604e5005b15STejun Heo /* 3605e5005b15STejun Heo * Low level driver acked the transition. Issue DIPM command 3606e5005b15STejun Heo * with the new policy set. 3607e5005b15STejun Heo */ 3608e5005b15STejun Heo link->lpm_policy = policy; 3609e5005b15STejun Heo if (ap && ap->slave_link) 3610e5005b15STejun Heo ap->slave_link->lpm_policy = policy; 3611e5005b15STejun Heo 36126b7ae954STejun Heo /* host config updated, enable DIPM if transitioning to MIN_POWER */ 36136b7ae954STejun Heo ata_for_each_dev(dev, link, ENABLED) { 3614ae01b249STejun Heo if (policy == ATA_LPM_MIN_POWER && !no_dipm && 3615ae01b249STejun Heo ata_id_has_dipm(dev->id)) { 36166b7ae954STejun Heo err_mask = ata_dev_set_feature(dev, 36176b7ae954STejun Heo SETFEATURES_SATA_ENABLE, SATA_DIPM); 36186b7ae954STejun Heo if (err_mask && err_mask != AC_ERR_DEV) { 3619a9a79dfeSJoe Perches ata_dev_warn(dev, 36206b7ae954STejun Heo "failed to enable DIPM, Emask 0x%x\n", 36216b7ae954STejun Heo err_mask); 36226b7ae954STejun Heo rc = -EIO; 36236b7ae954STejun Heo goto fail; 36246b7ae954STejun Heo } 36256b7ae954STejun Heo } 36266b7ae954STejun Heo } 36276b7ae954STejun Heo 362809c5b480SGabriele Mazzotta link->last_lpm_change = jiffies; 362909c5b480SGabriele Mazzotta link->flags |= ATA_LFLAG_CHANGED; 363009c5b480SGabriele Mazzotta 36316b7ae954STejun Heo return 0; 36326b7ae954STejun Heo 36336b7ae954STejun Heo fail: 3634e5005b15STejun Heo /* restore the old policy */ 3635e5005b15STejun Heo link->lpm_policy = old_policy; 3636e5005b15STejun Heo if (ap && ap->slave_link) 3637e5005b15STejun Heo ap->slave_link->lpm_policy = old_policy; 3638e5005b15STejun Heo 36396b7ae954STejun Heo /* if no device or only one more chance is left, disable LPM */ 36406b7ae954STejun Heo if (!dev || ehc->tries[dev->devno] <= 2) { 3641a9a79dfeSJoe Perches ata_link_warn(link, "disabling LPM on the link\n"); 36426b7ae954STejun Heo link->flags |= ATA_LFLAG_NO_LPM; 36436b7ae954STejun Heo } 36446b7ae954STejun Heo if (r_failed_dev) 36456b7ae954STejun Heo *r_failed_dev = dev; 36466b7ae954STejun Heo return rc; 36476b7ae954STejun Heo } 36486b7ae954STejun Heo 36498a745f1fSKristen Carlson Accardi int ata_link_nr_enabled(struct ata_link *link) 3650c6fd2807SJeff Garzik { 3651f58229f8STejun Heo struct ata_device *dev; 3652f58229f8STejun Heo int cnt = 0; 3653c6fd2807SJeff Garzik 36541eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) 3655c6fd2807SJeff Garzik cnt++; 3656c6fd2807SJeff Garzik return cnt; 3657c6fd2807SJeff Garzik } 3658c6fd2807SJeff Garzik 36590260731fSTejun Heo static int ata_link_nr_vacant(struct ata_link *link) 3660c6fd2807SJeff Garzik { 3661f58229f8STejun Heo struct ata_device *dev; 3662f58229f8STejun Heo int cnt = 0; 3663c6fd2807SJeff Garzik 36641eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 3665f58229f8STejun Heo if (dev->class == ATA_DEV_UNKNOWN) 3666c6fd2807SJeff Garzik cnt++; 3667c6fd2807SJeff Garzik return cnt; 3668c6fd2807SJeff Garzik } 3669c6fd2807SJeff Garzik 36700260731fSTejun Heo static int ata_eh_skip_recovery(struct ata_link *link) 3671c6fd2807SJeff Garzik { 3672672b2d65STejun Heo struct ata_port *ap = link->ap; 36730260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 3674f58229f8STejun Heo struct ata_device *dev; 3675c6fd2807SJeff Garzik 3676f9df58cbSTejun Heo /* skip disabled links */ 3677f9df58cbSTejun Heo if (link->flags & ATA_LFLAG_DISABLED) 3678f9df58cbSTejun Heo return 1; 3679f9df58cbSTejun Heo 3680e2f3d75fSTejun Heo /* skip if explicitly requested */ 3681e2f3d75fSTejun Heo if (ehc->i.flags & ATA_EHI_NO_RECOVERY) 3682e2f3d75fSTejun Heo return 1; 3683e2f3d75fSTejun Heo 3684672b2d65STejun Heo /* thaw frozen port and recover failed devices */ 3685672b2d65STejun Heo if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link)) 3686672b2d65STejun Heo return 0; 3687672b2d65STejun Heo 3688672b2d65STejun Heo /* reset at least once if reset is requested */ 3689672b2d65STejun Heo if ((ehc->i.action & ATA_EH_RESET) && 3690672b2d65STejun Heo !(ehc->i.flags & ATA_EHI_DID_RESET)) 3691c6fd2807SJeff Garzik return 0; 3692c6fd2807SJeff Garzik 3693c6fd2807SJeff Garzik /* skip if class codes for all vacant slots are ATA_DEV_NONE */ 36941eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 3695c6fd2807SJeff Garzik if (dev->class == ATA_DEV_UNKNOWN && 3696c6fd2807SJeff Garzik ehc->classes[dev->devno] != ATA_DEV_NONE) 3697c6fd2807SJeff Garzik return 0; 3698c6fd2807SJeff Garzik } 3699c6fd2807SJeff Garzik 3700c6fd2807SJeff Garzik return 1; 3701c6fd2807SJeff Garzik } 3702c6fd2807SJeff Garzik 3703c2c7a89cSTejun Heo static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg) 3704c2c7a89cSTejun Heo { 3705c2c7a89cSTejun Heo u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL); 3706c2c7a89cSTejun Heo u64 now = get_jiffies_64(); 3707c2c7a89cSTejun Heo int *trials = void_arg; 3708c2c7a89cSTejun Heo 37096868225eSLin Ming if ((ent->eflags & ATA_EFLAG_OLD_ER) || 37106868225eSLin Ming (ent->timestamp < now - min(now, interval))) 3711c2c7a89cSTejun Heo return -1; 3712c2c7a89cSTejun Heo 3713c2c7a89cSTejun Heo (*trials)++; 3714c2c7a89cSTejun Heo return 0; 3715c2c7a89cSTejun Heo } 3716c2c7a89cSTejun Heo 371702c05a27STejun Heo static int ata_eh_schedule_probe(struct ata_device *dev) 371802c05a27STejun Heo { 371902c05a27STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 3720c2c7a89cSTejun Heo struct ata_link *link = ata_dev_phys_link(dev); 3721c2c7a89cSTejun Heo int trials = 0; 372202c05a27STejun Heo 372302c05a27STejun Heo if (!(ehc->i.probe_mask & (1 << dev->devno)) || 372402c05a27STejun Heo (ehc->did_probe_mask & (1 << dev->devno))) 372502c05a27STejun Heo return 0; 372602c05a27STejun Heo 372702c05a27STejun Heo ata_eh_detach_dev(dev); 372802c05a27STejun Heo ata_dev_init(dev); 372902c05a27STejun Heo ehc->did_probe_mask |= (1 << dev->devno); 3730cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 373100115e0fSTejun Heo ehc->saved_xfer_mode[dev->devno] = 0; 373200115e0fSTejun Heo ehc->saved_ncq_enabled &= ~(1 << dev->devno); 373302c05a27STejun Heo 37346b7ae954STejun Heo /* the link maybe in a deep sleep, wake it up */ 37356c8ea89cSTejun Heo if (link->lpm_policy > ATA_LPM_MAX_POWER) { 37366c8ea89cSTejun Heo if (ata_is_host_link(link)) 37376c8ea89cSTejun Heo link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER, 37386c8ea89cSTejun Heo ATA_LPM_EMPTY); 37396c8ea89cSTejun Heo else 37406c8ea89cSTejun Heo sata_pmp_set_lpm(link, ATA_LPM_MAX_POWER, 37416c8ea89cSTejun Heo ATA_LPM_EMPTY); 37426c8ea89cSTejun Heo } 37436b7ae954STejun Heo 3744c2c7a89cSTejun Heo /* Record and count probe trials on the ering. The specific 3745c2c7a89cSTejun Heo * error mask used is irrelevant. Because a successful device 3746c2c7a89cSTejun Heo * detection clears the ering, this count accumulates only if 3747c2c7a89cSTejun Heo * there are consecutive failed probes. 3748c2c7a89cSTejun Heo * 3749c2c7a89cSTejun Heo * If the count is equal to or higher than ATA_EH_PROBE_TRIALS 3750c2c7a89cSTejun Heo * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is 3751c2c7a89cSTejun Heo * forced to 1.5Gbps. 3752c2c7a89cSTejun Heo * 3753c2c7a89cSTejun Heo * This is to work around cases where failed link speed 3754c2c7a89cSTejun Heo * negotiation results in device misdetection leading to 3755c2c7a89cSTejun Heo * infinite DEVXCHG or PHRDY CHG events. 3756c2c7a89cSTejun Heo */ 3757c2c7a89cSTejun Heo ata_ering_record(&dev->ering, 0, AC_ERR_OTHER); 3758c2c7a89cSTejun Heo ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials); 3759c2c7a89cSTejun Heo 3760c2c7a89cSTejun Heo if (trials > ATA_EH_PROBE_TRIALS) 3761c2c7a89cSTejun Heo sata_down_spd_limit(link, 1); 3762c2c7a89cSTejun Heo 376302c05a27STejun Heo return 1; 376402c05a27STejun Heo } 376502c05a27STejun Heo 37669b1e2658STejun Heo static int ata_eh_handle_dev_fail(struct ata_device *dev, int err) 3767fee7ca72STejun Heo { 37689af5c9c9STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 3769fee7ca72STejun Heo 3770cf9a590aSTejun Heo /* -EAGAIN from EH routine indicates retry without prejudice. 3771cf9a590aSTejun Heo * The requester is responsible for ensuring forward progress. 3772cf9a590aSTejun Heo */ 3773cf9a590aSTejun Heo if (err != -EAGAIN) 3774fee7ca72STejun Heo ehc->tries[dev->devno]--; 3775fee7ca72STejun Heo 3776fee7ca72STejun Heo switch (err) { 3777fee7ca72STejun Heo case -ENODEV: 3778fee7ca72STejun Heo /* device missing or wrong IDENTIFY data, schedule probing */ 3779fee7ca72STejun Heo ehc->i.probe_mask |= (1 << dev->devno); 3780fee7ca72STejun Heo case -EINVAL: 3781fee7ca72STejun Heo /* give it just one more chance */ 3782fee7ca72STejun Heo ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1); 3783fee7ca72STejun Heo case -EIO: 3784d89293abSTejun Heo if (ehc->tries[dev->devno] == 1) { 3785fee7ca72STejun Heo /* This is the last chance, better to slow 3786fee7ca72STejun Heo * down than lose it. 3787fee7ca72STejun Heo */ 3788a07d499bSTejun Heo sata_down_spd_limit(ata_dev_phys_link(dev), 0); 3789d89293abSTejun Heo if (dev->pio_mode > XFER_PIO_0) 3790fee7ca72STejun Heo ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); 3791fee7ca72STejun Heo } 3792fee7ca72STejun Heo } 3793fee7ca72STejun Heo 3794fee7ca72STejun Heo if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) { 3795fee7ca72STejun Heo /* disable device if it has used up all its chances */ 3796fee7ca72STejun Heo ata_dev_disable(dev); 3797fee7ca72STejun Heo 3798fee7ca72STejun Heo /* detach if offline */ 3799b1c72916STejun Heo if (ata_phys_link_offline(ata_dev_phys_link(dev))) 3800fee7ca72STejun Heo ata_eh_detach_dev(dev); 3801fee7ca72STejun Heo 380202c05a27STejun Heo /* schedule probe if necessary */ 380387fbc5a0STejun Heo if (ata_eh_schedule_probe(dev)) { 3804fee7ca72STejun Heo ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 380587fbc5a0STejun Heo memset(ehc->cmd_timeout_idx[dev->devno], 0, 380687fbc5a0STejun Heo sizeof(ehc->cmd_timeout_idx[dev->devno])); 380787fbc5a0STejun Heo } 38089b1e2658STejun Heo 38099b1e2658STejun Heo return 1; 3810fee7ca72STejun Heo } else { 3811cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 38129b1e2658STejun Heo return 0; 3813fee7ca72STejun Heo } 3814fee7ca72STejun Heo } 3815fee7ca72STejun Heo 3816c6fd2807SJeff Garzik /** 3817c6fd2807SJeff Garzik * ata_eh_recover - recover host port after error 3818c6fd2807SJeff Garzik * @ap: host port to recover 3819c6fd2807SJeff Garzik * @prereset: prereset method (can be NULL) 3820c6fd2807SJeff Garzik * @softreset: softreset method (can be NULL) 3821c6fd2807SJeff Garzik * @hardreset: hardreset method (can be NULL) 3822c6fd2807SJeff Garzik * @postreset: postreset method (can be NULL) 38239b1e2658STejun Heo * @r_failed_link: out parameter for failed link 3824c6fd2807SJeff Garzik * 3825c6fd2807SJeff Garzik * This is the alpha and omega, eum and yang, heart and soul of 3826c6fd2807SJeff Garzik * libata exception handling. On entry, actions required to 38279b1e2658STejun Heo * recover each link and hotplug requests are recorded in the 38289b1e2658STejun Heo * link's eh_context. This function executes all the operations 38299b1e2658STejun Heo * with appropriate retrials and fallbacks to resurrect failed 3830c6fd2807SJeff Garzik * devices, detach goners and greet newcomers. 3831c6fd2807SJeff Garzik * 3832c6fd2807SJeff Garzik * LOCKING: 3833c6fd2807SJeff Garzik * Kernel thread context (may sleep). 3834c6fd2807SJeff Garzik * 3835c6fd2807SJeff Garzik * RETURNS: 3836c6fd2807SJeff Garzik * 0 on success, -errno on failure. 3837c6fd2807SJeff Garzik */ 3838fb7fd614STejun Heo int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, 3839c6fd2807SJeff Garzik ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 38409b1e2658STejun Heo ata_postreset_fn_t postreset, 38419b1e2658STejun Heo struct ata_link **r_failed_link) 3842c6fd2807SJeff Garzik { 38439b1e2658STejun Heo struct ata_link *link; 3844c6fd2807SJeff Garzik struct ata_device *dev; 38456b7ae954STejun Heo int rc, nr_fails; 384645fabbb7SElias Oltmanns unsigned long flags, deadline; 3847c6fd2807SJeff Garzik 3848c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 3849c6fd2807SJeff Garzik 3850c6fd2807SJeff Garzik /* prep for recovery */ 38511eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 38529b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 38539b1e2658STejun Heo 3854f9df58cbSTejun Heo /* re-enable link? */ 3855f9df58cbSTejun Heo if (ehc->i.action & ATA_EH_ENABLE_LINK) { 3856f9df58cbSTejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK); 3857f9df58cbSTejun Heo spin_lock_irqsave(ap->lock, flags); 3858f9df58cbSTejun Heo link->flags &= ~ATA_LFLAG_DISABLED; 3859f9df58cbSTejun Heo spin_unlock_irqrestore(ap->lock, flags); 3860f9df58cbSTejun Heo ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK); 3861f9df58cbSTejun Heo } 3862f9df58cbSTejun Heo 38631eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 3864fd995f70STejun Heo if (link->flags & ATA_LFLAG_NO_RETRY) 3865fd995f70STejun Heo ehc->tries[dev->devno] = 1; 3866fd995f70STejun Heo else 3867c6fd2807SJeff Garzik ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 3868c6fd2807SJeff Garzik 386979a55b72STejun Heo /* collect port action mask recorded in dev actions */ 38709b1e2658STejun Heo ehc->i.action |= ehc->i.dev_action[dev->devno] & 38719b1e2658STejun Heo ~ATA_EH_PERDEV_MASK; 3872f58229f8STejun Heo ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK; 387379a55b72STejun Heo 3874c6fd2807SJeff Garzik /* process hotplug request */ 3875c6fd2807SJeff Garzik if (dev->flags & ATA_DFLAG_DETACH) 3876c6fd2807SJeff Garzik ata_eh_detach_dev(dev); 3877c6fd2807SJeff Garzik 387802c05a27STejun Heo /* schedule probe if necessary */ 387902c05a27STejun Heo if (!ata_dev_enabled(dev)) 388002c05a27STejun Heo ata_eh_schedule_probe(dev); 3881c6fd2807SJeff Garzik } 38829b1e2658STejun Heo } 3883c6fd2807SJeff Garzik 3884c6fd2807SJeff Garzik retry: 3885c6fd2807SJeff Garzik rc = 0; 3886c6fd2807SJeff Garzik 3887c6fd2807SJeff Garzik /* if UNLOADING, finish immediately */ 3888c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_UNLOADING) 3889c6fd2807SJeff Garzik goto out; 3890c6fd2807SJeff Garzik 38919b1e2658STejun Heo /* prep for EH */ 38921eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 38939b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 38949b1e2658STejun Heo 3895c6fd2807SJeff Garzik /* skip EH if possible. */ 38960260731fSTejun Heo if (ata_eh_skip_recovery(link)) 3897c6fd2807SJeff Garzik ehc->i.action = 0; 3898c6fd2807SJeff Garzik 38991eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 3900f58229f8STejun Heo ehc->classes[dev->devno] = ATA_DEV_UNKNOWN; 39019b1e2658STejun Heo } 3902c6fd2807SJeff Garzik 3903c6fd2807SJeff Garzik /* reset */ 39041eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 39059b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 39069b1e2658STejun Heo 3907cf480626STejun Heo if (!(ehc->i.action & ATA_EH_RESET)) 39089b1e2658STejun Heo continue; 39099b1e2658STejun Heo 39109b1e2658STejun Heo rc = ata_eh_reset(link, ata_link_nr_vacant(link), 3911dc98c32cSTejun Heo prereset, softreset, hardreset, postreset); 3912c6fd2807SJeff Garzik if (rc) { 3913a9a79dfeSJoe Perches ata_link_err(link, "reset failed, giving up\n"); 3914c6fd2807SJeff Garzik goto out; 3915c6fd2807SJeff Garzik } 39169b1e2658STejun Heo } 3917c6fd2807SJeff Garzik 391845fabbb7SElias Oltmanns do { 391945fabbb7SElias Oltmanns unsigned long now; 392045fabbb7SElias Oltmanns 392145fabbb7SElias Oltmanns /* 392245fabbb7SElias Oltmanns * clears ATA_EH_PARK in eh_info and resets 392345fabbb7SElias Oltmanns * ap->park_req_pending 392445fabbb7SElias Oltmanns */ 392545fabbb7SElias Oltmanns ata_eh_pull_park_action(ap); 392645fabbb7SElias Oltmanns 392745fabbb7SElias Oltmanns deadline = jiffies; 39281eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 39291eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 393045fabbb7SElias Oltmanns struct ata_eh_context *ehc = &link->eh_context; 393145fabbb7SElias Oltmanns unsigned long tmp; 393245fabbb7SElias Oltmanns 39339162c657SHannes Reinecke if (dev->class != ATA_DEV_ATA && 39349162c657SHannes Reinecke dev->class != ATA_DEV_ZAC) 393545fabbb7SElias Oltmanns continue; 393645fabbb7SElias Oltmanns if (!(ehc->i.dev_action[dev->devno] & 393745fabbb7SElias Oltmanns ATA_EH_PARK)) 393845fabbb7SElias Oltmanns continue; 393945fabbb7SElias Oltmanns tmp = dev->unpark_deadline; 394045fabbb7SElias Oltmanns if (time_before(deadline, tmp)) 394145fabbb7SElias Oltmanns deadline = tmp; 394245fabbb7SElias Oltmanns else if (time_before_eq(tmp, jiffies)) 394345fabbb7SElias Oltmanns continue; 394445fabbb7SElias Oltmanns if (ehc->unloaded_mask & (1 << dev->devno)) 394545fabbb7SElias Oltmanns continue; 394645fabbb7SElias Oltmanns 394745fabbb7SElias Oltmanns ata_eh_park_issue_cmd(dev, 1); 394845fabbb7SElias Oltmanns } 394945fabbb7SElias Oltmanns } 395045fabbb7SElias Oltmanns 395145fabbb7SElias Oltmanns now = jiffies; 395245fabbb7SElias Oltmanns if (time_before_eq(deadline, now)) 395345fabbb7SElias Oltmanns break; 395445fabbb7SElias Oltmanns 3955c0c362b6STejun Heo ata_eh_release(ap); 395645fabbb7SElias Oltmanns deadline = wait_for_completion_timeout(&ap->park_req_pending, 395745fabbb7SElias Oltmanns deadline - now); 3958c0c362b6STejun Heo ata_eh_acquire(ap); 395945fabbb7SElias Oltmanns } while (deadline); 39601eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 39611eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 396245fabbb7SElias Oltmanns if (!(link->eh_context.unloaded_mask & 396345fabbb7SElias Oltmanns (1 << dev->devno))) 396445fabbb7SElias Oltmanns continue; 396545fabbb7SElias Oltmanns 396645fabbb7SElias Oltmanns ata_eh_park_issue_cmd(dev, 0); 396745fabbb7SElias Oltmanns ata_eh_done(link, dev, ATA_EH_PARK); 396845fabbb7SElias Oltmanns } 396945fabbb7SElias Oltmanns } 397045fabbb7SElias Oltmanns 39719b1e2658STejun Heo /* the rest */ 39726b7ae954STejun Heo nr_fails = 0; 39736b7ae954STejun Heo ata_for_each_link(link, ap, PMP_FIRST) { 39749b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 39759b1e2658STejun Heo 39766b7ae954STejun Heo if (sata_pmp_attached(ap) && ata_is_host_link(link)) 39776b7ae954STejun Heo goto config_lpm; 39786b7ae954STejun Heo 3979c6fd2807SJeff Garzik /* revalidate existing devices and attach new ones */ 39800260731fSTejun Heo rc = ata_eh_revalidate_and_attach(link, &dev); 3981c6fd2807SJeff Garzik if (rc) 39826b7ae954STejun Heo goto rest_fail; 3983c6fd2807SJeff Garzik 3984633273a3STejun Heo /* if PMP got attached, return, pmp EH will take care of it */ 3985633273a3STejun Heo if (link->device->class == ATA_DEV_PMP) { 3986633273a3STejun Heo ehc->i.action = 0; 3987633273a3STejun Heo return 0; 3988633273a3STejun Heo } 3989633273a3STejun Heo 3990baa1e78aSTejun Heo /* configure transfer mode if necessary */ 3991baa1e78aSTejun Heo if (ehc->i.flags & ATA_EHI_SETMODE) { 39920260731fSTejun Heo rc = ata_set_mode(link, &dev); 39934ae72a1eSTejun Heo if (rc) 39946b7ae954STejun Heo goto rest_fail; 3995baa1e78aSTejun Heo ehc->i.flags &= ~ATA_EHI_SETMODE; 3996c6fd2807SJeff Garzik } 3997c6fd2807SJeff Garzik 399811fc33daSTejun Heo /* If reset has been issued, clear UA to avoid 399911fc33daSTejun Heo * disrupting the current users of the device. 400011fc33daSTejun Heo */ 400111fc33daSTejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) { 40021eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 400311fc33daSTejun Heo if (dev->class != ATA_DEV_ATAPI) 400411fc33daSTejun Heo continue; 400511fc33daSTejun Heo rc = atapi_eh_clear_ua(dev); 400611fc33daSTejun Heo if (rc) 40076b7ae954STejun Heo goto rest_fail; 400821334205SAaron Lu if (zpodd_dev_enabled(dev)) 400921334205SAaron Lu zpodd_post_poweron(dev); 401011fc33daSTejun Heo } 401111fc33daSTejun Heo } 401211fc33daSTejun Heo 40136013efd8STejun Heo /* retry flush if necessary */ 40146013efd8STejun Heo ata_for_each_dev(dev, link, ALL) { 40159162c657SHannes Reinecke if (dev->class != ATA_DEV_ATA && 40169162c657SHannes Reinecke dev->class != ATA_DEV_ZAC) 40176013efd8STejun Heo continue; 40186013efd8STejun Heo rc = ata_eh_maybe_retry_flush(dev); 40196013efd8STejun Heo if (rc) 40206b7ae954STejun Heo goto rest_fail; 40216013efd8STejun Heo } 40226013efd8STejun Heo 40236b7ae954STejun Heo config_lpm: 402411fc33daSTejun Heo /* configure link power saving */ 40256b7ae954STejun Heo if (link->lpm_policy != ap->target_lpm_policy) { 40266b7ae954STejun Heo rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev); 40276b7ae954STejun Heo if (rc) 40286b7ae954STejun Heo goto rest_fail; 40296b7ae954STejun Heo } 4030ca77329fSKristen Carlson Accardi 40319b1e2658STejun Heo /* this link is okay now */ 40329b1e2658STejun Heo ehc->i.flags = 0; 40339b1e2658STejun Heo continue; 4034c6fd2807SJeff Garzik 40356b7ae954STejun Heo rest_fail: 40366b7ae954STejun Heo nr_fails++; 40376b7ae954STejun Heo if (dev) 40380a2c0f56STejun Heo ata_eh_handle_dev_fail(dev, rc); 4039c6fd2807SJeff Garzik 4040b06ce3e5STejun Heo if (ap->pflags & ATA_PFLAG_FROZEN) { 4041b06ce3e5STejun Heo /* PMP reset requires working host port. 4042b06ce3e5STejun Heo * Can't retry if it's frozen. 4043b06ce3e5STejun Heo */ 4044071f44b1STejun Heo if (sata_pmp_attached(ap)) 4045b06ce3e5STejun Heo goto out; 40469b1e2658STejun Heo break; 40479b1e2658STejun Heo } 4048b06ce3e5STejun Heo } 40499b1e2658STejun Heo 40506b7ae954STejun Heo if (nr_fails) 4051c6fd2807SJeff Garzik goto retry; 4052c6fd2807SJeff Garzik 4053c6fd2807SJeff Garzik out: 40549b1e2658STejun Heo if (rc && r_failed_link) 40559b1e2658STejun Heo *r_failed_link = link; 4056c6fd2807SJeff Garzik 4057c6fd2807SJeff Garzik DPRINTK("EXIT, rc=%d\n", rc); 4058c6fd2807SJeff Garzik return rc; 4059c6fd2807SJeff Garzik } 4060c6fd2807SJeff Garzik 4061c6fd2807SJeff Garzik /** 4062c6fd2807SJeff Garzik * ata_eh_finish - finish up EH 4063c6fd2807SJeff Garzik * @ap: host port to finish EH for 4064c6fd2807SJeff Garzik * 4065c6fd2807SJeff Garzik * Recovery is complete. Clean up EH states and retry or finish 4066c6fd2807SJeff Garzik * failed qcs. 4067c6fd2807SJeff Garzik * 4068c6fd2807SJeff Garzik * LOCKING: 4069c6fd2807SJeff Garzik * None. 4070c6fd2807SJeff Garzik */ 4071fb7fd614STejun Heo void ata_eh_finish(struct ata_port *ap) 4072c6fd2807SJeff Garzik { 4073c6fd2807SJeff Garzik int tag; 4074c6fd2807SJeff Garzik 4075c6fd2807SJeff Garzik /* retry or finish qcs */ 4076c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 4077c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 4078c6fd2807SJeff Garzik 4079c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) 4080c6fd2807SJeff Garzik continue; 4081c6fd2807SJeff Garzik 4082c6fd2807SJeff Garzik if (qc->err_mask) { 4083c6fd2807SJeff Garzik /* FIXME: Once EH migration is complete, 4084c6fd2807SJeff Garzik * generate sense data in this function, 4085c6fd2807SJeff Garzik * considering both err_mask and tf. 4086c6fd2807SJeff Garzik */ 408703faab78STejun Heo if (qc->flags & ATA_QCFLAG_RETRY) 4088c6fd2807SJeff Garzik ata_eh_qc_retry(qc); 408903faab78STejun Heo else 409003faab78STejun Heo ata_eh_qc_complete(qc); 4091c6fd2807SJeff Garzik } else { 4092c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_SENSE_VALID) { 4093c6fd2807SJeff Garzik ata_eh_qc_complete(qc); 4094c6fd2807SJeff Garzik } else { 4095c6fd2807SJeff Garzik /* feed zero TF to sense generation */ 4096c6fd2807SJeff Garzik memset(&qc->result_tf, 0, sizeof(qc->result_tf)); 4097c6fd2807SJeff Garzik ata_eh_qc_retry(qc); 4098c6fd2807SJeff Garzik } 4099c6fd2807SJeff Garzik } 4100c6fd2807SJeff Garzik } 4101da917d69STejun Heo 4102da917d69STejun Heo /* make sure nr_active_links is zero after EH */ 4103da917d69STejun Heo WARN_ON(ap->nr_active_links); 4104da917d69STejun Heo ap->nr_active_links = 0; 4105c6fd2807SJeff Garzik } 4106c6fd2807SJeff Garzik 4107c6fd2807SJeff Garzik /** 4108c6fd2807SJeff Garzik * ata_do_eh - do standard error handling 4109c6fd2807SJeff Garzik * @ap: host port to handle error for 4110a1efdabaSTejun Heo * 4111c6fd2807SJeff Garzik * @prereset: prereset method (can be NULL) 4112c6fd2807SJeff Garzik * @softreset: softreset method (can be NULL) 4113c6fd2807SJeff Garzik * @hardreset: hardreset method (can be NULL) 4114c6fd2807SJeff Garzik * @postreset: postreset method (can be NULL) 4115c6fd2807SJeff Garzik * 4116c6fd2807SJeff Garzik * Perform standard error handling sequence. 4117c6fd2807SJeff Garzik * 4118c6fd2807SJeff Garzik * LOCKING: 4119c6fd2807SJeff Garzik * Kernel thread context (may sleep). 4120c6fd2807SJeff Garzik */ 4121c6fd2807SJeff Garzik void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, 4122c6fd2807SJeff Garzik ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 4123c6fd2807SJeff Garzik ata_postreset_fn_t postreset) 4124c6fd2807SJeff Garzik { 41259b1e2658STejun Heo struct ata_device *dev; 41269b1e2658STejun Heo int rc; 41279b1e2658STejun Heo 41289b1e2658STejun Heo ata_eh_autopsy(ap); 41299b1e2658STejun Heo ata_eh_report(ap); 41309b1e2658STejun Heo 41319b1e2658STejun Heo rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset, 41329b1e2658STejun Heo NULL); 41339b1e2658STejun Heo if (rc) { 41341eca4365STejun Heo ata_for_each_dev(dev, &ap->link, ALL) 41359b1e2658STejun Heo ata_dev_disable(dev); 41369b1e2658STejun Heo } 41379b1e2658STejun Heo 4138c6fd2807SJeff Garzik ata_eh_finish(ap); 4139c6fd2807SJeff Garzik } 4140c6fd2807SJeff Garzik 4141a1efdabaSTejun Heo /** 4142a1efdabaSTejun Heo * ata_std_error_handler - standard error handler 4143a1efdabaSTejun Heo * @ap: host port to handle error for 4144a1efdabaSTejun Heo * 4145a1efdabaSTejun Heo * Standard error handler 4146a1efdabaSTejun Heo * 4147a1efdabaSTejun Heo * LOCKING: 4148a1efdabaSTejun Heo * Kernel thread context (may sleep). 4149a1efdabaSTejun Heo */ 4150a1efdabaSTejun Heo void ata_std_error_handler(struct ata_port *ap) 4151a1efdabaSTejun Heo { 4152a1efdabaSTejun Heo struct ata_port_operations *ops = ap->ops; 4153a1efdabaSTejun Heo ata_reset_fn_t hardreset = ops->hardreset; 4154a1efdabaSTejun Heo 415557c9efdfSTejun Heo /* ignore built-in hardreset if SCR access is not available */ 4156fe06e5f9STejun Heo if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link)) 4157a1efdabaSTejun Heo hardreset = NULL; 4158a1efdabaSTejun Heo 4159a1efdabaSTejun Heo ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset); 4160a1efdabaSTejun Heo } 4161a1efdabaSTejun Heo 41626ffa01d8STejun Heo #ifdef CONFIG_PM 4163c6fd2807SJeff Garzik /** 4164c6fd2807SJeff Garzik * ata_eh_handle_port_suspend - perform port suspend operation 4165c6fd2807SJeff Garzik * @ap: port to suspend 4166c6fd2807SJeff Garzik * 4167c6fd2807SJeff Garzik * Suspend @ap. 4168c6fd2807SJeff Garzik * 4169c6fd2807SJeff Garzik * LOCKING: 4170c6fd2807SJeff Garzik * Kernel thread context (may sleep). 4171c6fd2807SJeff Garzik */ 4172c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap) 4173c6fd2807SJeff Garzik { 4174c6fd2807SJeff Garzik unsigned long flags; 4175c6fd2807SJeff Garzik int rc = 0; 41763dc67440SAaron Lu struct ata_device *dev; 4177c6fd2807SJeff Garzik 4178c6fd2807SJeff Garzik /* are we suspending? */ 4179c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 4180c6fd2807SJeff Garzik if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || 4181a7ff60dbSAaron Lu ap->pm_mesg.event & PM_EVENT_RESUME) { 4182c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 4183c6fd2807SJeff Garzik return; 4184c6fd2807SJeff Garzik } 4185c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 4186c6fd2807SJeff Garzik 4187c6fd2807SJeff Garzik WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED); 4188c6fd2807SJeff Garzik 41893dc67440SAaron Lu /* 41903dc67440SAaron Lu * If we have a ZPODD attached, check its zero 41913dc67440SAaron Lu * power ready status before the port is frozen. 4192a7ff60dbSAaron Lu * Only needed for runtime suspend. 41933dc67440SAaron Lu */ 4194a7ff60dbSAaron Lu if (PMSG_IS_AUTO(ap->pm_mesg)) { 41953dc67440SAaron Lu ata_for_each_dev(dev, &ap->link, ENABLED) { 41963dc67440SAaron Lu if (zpodd_dev_enabled(dev)) 41973dc67440SAaron Lu zpodd_on_suspend(dev); 41983dc67440SAaron Lu } 4199a7ff60dbSAaron Lu } 42003dc67440SAaron Lu 420164578a3dSTejun Heo /* tell ACPI we're suspending */ 420264578a3dSTejun Heo rc = ata_acpi_on_suspend(ap); 420364578a3dSTejun Heo if (rc) 420464578a3dSTejun Heo goto out; 420564578a3dSTejun Heo 4206c6fd2807SJeff Garzik /* suspend */ 4207c6fd2807SJeff Garzik ata_eh_freeze_port(ap); 4208c6fd2807SJeff Garzik 4209c6fd2807SJeff Garzik if (ap->ops->port_suspend) 4210c6fd2807SJeff Garzik rc = ap->ops->port_suspend(ap, ap->pm_mesg); 4211c6fd2807SJeff Garzik 4212a7ff60dbSAaron Lu ata_acpi_set_state(ap, ap->pm_mesg); 421364578a3dSTejun Heo out: 4214bc6e7c4bSDan Williams /* update the flags */ 4215c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 4216c6fd2807SJeff Garzik 4217c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_PM_PENDING; 4218c6fd2807SJeff Garzik if (rc == 0) 4219c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SUSPENDED; 422064578a3dSTejun Heo else if (ap->pflags & ATA_PFLAG_FROZEN) 4221c6fd2807SJeff Garzik ata_port_schedule_eh(ap); 4222c6fd2807SJeff Garzik 4223c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 4224c6fd2807SJeff Garzik 4225c6fd2807SJeff Garzik return; 4226c6fd2807SJeff Garzik } 4227c6fd2807SJeff Garzik 4228c6fd2807SJeff Garzik /** 4229c6fd2807SJeff Garzik * ata_eh_handle_port_resume - perform port resume operation 4230c6fd2807SJeff Garzik * @ap: port to resume 4231c6fd2807SJeff Garzik * 4232c6fd2807SJeff Garzik * Resume @ap. 4233c6fd2807SJeff Garzik * 4234c6fd2807SJeff Garzik * LOCKING: 4235c6fd2807SJeff Garzik * Kernel thread context (may sleep). 4236c6fd2807SJeff Garzik */ 4237c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap) 4238c6fd2807SJeff Garzik { 42396f9c1ea2STejun Heo struct ata_link *link; 42406f9c1ea2STejun Heo struct ata_device *dev; 4241c6fd2807SJeff Garzik unsigned long flags; 42429666f400STejun Heo int rc = 0; 4243c6fd2807SJeff Garzik 4244c6fd2807SJeff Garzik /* are we resuming? */ 4245c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 4246c6fd2807SJeff Garzik if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || 4247a7ff60dbSAaron Lu !(ap->pm_mesg.event & PM_EVENT_RESUME)) { 4248c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 4249c6fd2807SJeff Garzik return; 4250c6fd2807SJeff Garzik } 4251c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 4252c6fd2807SJeff Garzik 42539666f400STejun Heo WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED)); 4254c6fd2807SJeff Garzik 42556f9c1ea2STejun Heo /* 42566f9c1ea2STejun Heo * Error timestamps are in jiffies which doesn't run while 42576f9c1ea2STejun Heo * suspended and PHY events during resume isn't too uncommon. 42586f9c1ea2STejun Heo * When the two are combined, it can lead to unnecessary speed 42596f9c1ea2STejun Heo * downs if the machine is suspended and resumed repeatedly. 42606f9c1ea2STejun Heo * Clear error history. 42616f9c1ea2STejun Heo */ 42626f9c1ea2STejun Heo ata_for_each_link(link, ap, HOST_FIRST) 42636f9c1ea2STejun Heo ata_for_each_dev(dev, link, ALL) 42646f9c1ea2STejun Heo ata_ering_clear(&dev->ering); 42656f9c1ea2STejun Heo 4266a7ff60dbSAaron Lu ata_acpi_set_state(ap, ap->pm_mesg); 4267bd3adca5SShaohua Li 4268c6fd2807SJeff Garzik if (ap->ops->port_resume) 4269c6fd2807SJeff Garzik rc = ap->ops->port_resume(ap); 4270c6fd2807SJeff Garzik 42716746544cSTejun Heo /* tell ACPI that we're resuming */ 42726746544cSTejun Heo ata_acpi_on_resume(ap); 42736746544cSTejun Heo 4274bc6e7c4bSDan Williams /* update the flags */ 4275c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 4276c6fd2807SJeff Garzik ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED); 4277c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 4278c6fd2807SJeff Garzik } 42796ffa01d8STejun Heo #endif /* CONFIG_PM */ 4280