1c6fd2807SJeff Garzik /* 2c6fd2807SJeff Garzik * libata-eh.c - libata error handling 3c6fd2807SJeff Garzik * 48c3d3d4bSTejun Heo * Maintained by: Tejun Heo <tj@kernel.org> 5c6fd2807SJeff Garzik * Please ALWAYS copy linux-ide@vger.kernel.org 6c6fd2807SJeff Garzik * on emails. 7c6fd2807SJeff Garzik * 8c6fd2807SJeff Garzik * Copyright 2006 Tejun Heo <htejun@gmail.com> 9c6fd2807SJeff Garzik * 10c6fd2807SJeff Garzik * 11c6fd2807SJeff Garzik * This program is free software; you can redistribute it and/or 12c6fd2807SJeff Garzik * modify it under the terms of the GNU General Public License as 13c6fd2807SJeff Garzik * published by the Free Software Foundation; either version 2, or 14c6fd2807SJeff Garzik * (at your option) any later version. 15c6fd2807SJeff Garzik * 16c6fd2807SJeff Garzik * This program is distributed in the hope that it will be useful, 17c6fd2807SJeff Garzik * but WITHOUT ANY WARRANTY; without even the implied warranty of 18c6fd2807SJeff Garzik * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19c6fd2807SJeff Garzik * General Public License for more details. 20c6fd2807SJeff Garzik * 21c6fd2807SJeff Garzik * You should have received a copy of the GNU General Public License 22c6fd2807SJeff Garzik * along with this program; see the file COPYING. If not, write to 23c6fd2807SJeff Garzik * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, 24c6fd2807SJeff Garzik * USA. 25c6fd2807SJeff Garzik * 26c6fd2807SJeff Garzik * 27c6fd2807SJeff Garzik * libata documentation is available via 'make {ps|pdf}docs', 28c6fd2807SJeff Garzik * as Documentation/DocBook/libata.* 29c6fd2807SJeff Garzik * 30c6fd2807SJeff Garzik * Hardware documentation available from http://www.t13.org/ and 31c6fd2807SJeff Garzik * http://www.sata-io.org/ 32c6fd2807SJeff Garzik * 33c6fd2807SJeff Garzik */ 34c6fd2807SJeff Garzik 35c6fd2807SJeff Garzik #include <linux/kernel.h> 36242f9dcbSJens Axboe #include <linux/blkdev.h> 3738789fdaSPaul Gortmaker #include <linux/export.h> 382855568bSJeff Garzik #include <linux/pci.h> 39c6fd2807SJeff Garzik #include <scsi/scsi.h> 40c6fd2807SJeff Garzik #include <scsi/scsi_host.h> 41c6fd2807SJeff Garzik #include <scsi/scsi_eh.h> 42c6fd2807SJeff Garzik #include <scsi/scsi_device.h> 43c6fd2807SJeff Garzik #include <scsi/scsi_cmnd.h> 446521148cSRobert Hancock #include <scsi/scsi_dbg.h> 45c6fd2807SJeff Garzik #include "../scsi/scsi_transport_api.h" 46c6fd2807SJeff Garzik 47c6fd2807SJeff Garzik #include <linux/libata.h> 48c6fd2807SJeff Garzik 49255c03d1SHannes Reinecke #include <trace/events/libata.h> 50c6fd2807SJeff Garzik #include "libata.h" 51c6fd2807SJeff Garzik 527d47e8d4STejun Heo enum { 533884f7b0STejun Heo /* speed down verdicts */ 547d47e8d4STejun Heo ATA_EH_SPDN_NCQ_OFF = (1 << 0), 557d47e8d4STejun Heo ATA_EH_SPDN_SPEED_DOWN = (1 << 1), 567d47e8d4STejun Heo ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2), 5776326ac1STejun Heo ATA_EH_SPDN_KEEP_ERRORS = (1 << 3), 583884f7b0STejun Heo 593884f7b0STejun Heo /* error flags */ 603884f7b0STejun Heo ATA_EFLAG_IS_IO = (1 << 0), 6176326ac1STejun Heo ATA_EFLAG_DUBIOUS_XFER = (1 << 1), 62d9027470SGwendal Grignou ATA_EFLAG_OLD_ER = (1 << 31), 633884f7b0STejun Heo 643884f7b0STejun Heo /* error categories */ 653884f7b0STejun Heo ATA_ECAT_NONE = 0, 663884f7b0STejun Heo ATA_ECAT_ATA_BUS = 1, 673884f7b0STejun Heo ATA_ECAT_TOUT_HSM = 2, 683884f7b0STejun Heo ATA_ECAT_UNK_DEV = 3, 6975f9cafcSTejun Heo ATA_ECAT_DUBIOUS_NONE = 4, 7075f9cafcSTejun Heo ATA_ECAT_DUBIOUS_ATA_BUS = 5, 7175f9cafcSTejun Heo ATA_ECAT_DUBIOUS_TOUT_HSM = 6, 7275f9cafcSTejun Heo ATA_ECAT_DUBIOUS_UNK_DEV = 7, 7375f9cafcSTejun Heo ATA_ECAT_NR = 8, 747d47e8d4STejun Heo 7587fbc5a0STejun Heo ATA_EH_CMD_DFL_TIMEOUT = 5000, 7687fbc5a0STejun Heo 770a2c0f56STejun Heo /* always put at least this amount of time between resets */ 780a2c0f56STejun Heo ATA_EH_RESET_COOL_DOWN = 5000, 790a2c0f56STejun Heo 80341c2c95STejun Heo /* Waiting in ->prereset can never be reliable. It's 81341c2c95STejun Heo * sometimes nice to wait there but it can't be depended upon; 82341c2c95STejun Heo * otherwise, we wouldn't be resetting. Just give it enough 83341c2c95STejun Heo * time for most drives to spin up. 8431daabdaSTejun Heo */ 85341c2c95STejun Heo ATA_EH_PRERESET_TIMEOUT = 10000, 86341c2c95STejun Heo ATA_EH_FASTDRAIN_INTERVAL = 3000, 8711fc33daSTejun Heo 8811fc33daSTejun Heo ATA_EH_UA_TRIES = 5, 89c2c7a89cSTejun Heo 90c2c7a89cSTejun Heo /* probe speed down parameters, see ata_eh_schedule_probe() */ 91c2c7a89cSTejun Heo ATA_EH_PROBE_TRIAL_INTERVAL = 60000, /* 1 min */ 92c2c7a89cSTejun Heo ATA_EH_PROBE_TRIALS = 2, 9331daabdaSTejun Heo }; 9431daabdaSTejun Heo 9531daabdaSTejun Heo /* The following table determines how we sequence resets. Each entry 9631daabdaSTejun Heo * represents timeout for that try. The first try can be soft or 9731daabdaSTejun Heo * hardreset. All others are hardreset if available. In most cases 9831daabdaSTejun Heo * the first reset w/ 10sec timeout should succeed. Following entries 9935bf8821SDan Williams * are mostly for error handling, hotplug and those outlier devices that 10035bf8821SDan Williams * take an exceptionally long time to recover from reset. 10131daabdaSTejun Heo */ 10231daabdaSTejun Heo static const unsigned long ata_eh_reset_timeouts[] = { 103341c2c95STejun Heo 10000, /* most drives spin up by 10sec */ 104341c2c95STejun Heo 10000, /* > 99% working drives spin up before 20sec */ 10535bf8821SDan Williams 35000, /* give > 30 secs of idleness for outlier devices */ 106341c2c95STejun Heo 5000, /* and sweet one last chance */ 107d8af0eb6STejun Heo ULONG_MAX, /* > 1 min has elapsed, give up */ 10831daabdaSTejun Heo }; 10931daabdaSTejun Heo 11087fbc5a0STejun Heo static const unsigned long ata_eh_identify_timeouts[] = { 11187fbc5a0STejun Heo 5000, /* covers > 99% of successes and not too boring on failures */ 11287fbc5a0STejun Heo 10000, /* combined time till here is enough even for media access */ 11387fbc5a0STejun Heo 30000, /* for true idiots */ 11487fbc5a0STejun Heo ULONG_MAX, 11587fbc5a0STejun Heo }; 11687fbc5a0STejun Heo 1176013efd8STejun Heo static const unsigned long ata_eh_flush_timeouts[] = { 1186013efd8STejun Heo 15000, /* be generous with flush */ 1196013efd8STejun Heo 15000, /* ditto */ 1206013efd8STejun Heo 30000, /* and even more generous */ 1216013efd8STejun Heo ULONG_MAX, 1226013efd8STejun Heo }; 1236013efd8STejun Heo 12487fbc5a0STejun Heo static const unsigned long ata_eh_other_timeouts[] = { 12587fbc5a0STejun Heo 5000, /* same rationale as identify timeout */ 12687fbc5a0STejun Heo 10000, /* ditto */ 12787fbc5a0STejun Heo /* but no merciful 30sec for other commands, it just isn't worth it */ 12887fbc5a0STejun Heo ULONG_MAX, 12987fbc5a0STejun Heo }; 13087fbc5a0STejun Heo 13187fbc5a0STejun Heo struct ata_eh_cmd_timeout_ent { 13287fbc5a0STejun Heo const u8 *commands; 13387fbc5a0STejun Heo const unsigned long *timeouts; 13487fbc5a0STejun Heo }; 13587fbc5a0STejun Heo 13687fbc5a0STejun Heo /* The following table determines timeouts to use for EH internal 13787fbc5a0STejun Heo * commands. Each table entry is a command class and matches the 13887fbc5a0STejun Heo * commands the entry applies to and the timeout table to use. 13987fbc5a0STejun Heo * 14087fbc5a0STejun Heo * On the retry after a command timed out, the next timeout value from 14187fbc5a0STejun Heo * the table is used. If the table doesn't contain further entries, 14287fbc5a0STejun Heo * the last value is used. 14387fbc5a0STejun Heo * 14487fbc5a0STejun Heo * ehc->cmd_timeout_idx keeps track of which timeout to use per 14587fbc5a0STejun Heo * command class, so if SET_FEATURES times out on the first try, the 14687fbc5a0STejun Heo * next try will use the second timeout value only for that class. 14787fbc5a0STejun Heo */ 14887fbc5a0STejun Heo #define CMDS(cmds...) (const u8 []){ cmds, 0 } 14987fbc5a0STejun Heo static const struct ata_eh_cmd_timeout_ent 15087fbc5a0STejun Heo ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = { 15187fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI), 15287fbc5a0STejun Heo .timeouts = ata_eh_identify_timeouts, }, 15387fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT), 15487fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 15587fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT), 15687fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 15787fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_SET_FEATURES), 15887fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 15987fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS), 16087fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 1616013efd8STejun Heo { .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT), 1626013efd8STejun Heo .timeouts = ata_eh_flush_timeouts }, 16387fbc5a0STejun Heo }; 16487fbc5a0STejun Heo #undef CMDS 16587fbc5a0STejun Heo 166c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap); 1676ffa01d8STejun Heo #ifdef CONFIG_PM 168c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap); 169c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap); 1706ffa01d8STejun Heo #else /* CONFIG_PM */ 1716ffa01d8STejun Heo static void ata_eh_handle_port_suspend(struct ata_port *ap) 1726ffa01d8STejun Heo { } 1736ffa01d8STejun Heo 1746ffa01d8STejun Heo static void ata_eh_handle_port_resume(struct ata_port *ap) 1756ffa01d8STejun Heo { } 1766ffa01d8STejun Heo #endif /* CONFIG_PM */ 177c6fd2807SJeff Garzik 178b64bbc39STejun Heo static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt, 179b64bbc39STejun Heo va_list args) 180b64bbc39STejun Heo { 181b64bbc39STejun Heo ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len, 182b64bbc39STejun Heo ATA_EH_DESC_LEN - ehi->desc_len, 183b64bbc39STejun Heo fmt, args); 184b64bbc39STejun Heo } 185b64bbc39STejun Heo 186b64bbc39STejun Heo /** 187b64bbc39STejun Heo * __ata_ehi_push_desc - push error description without adding separator 188b64bbc39STejun Heo * @ehi: target EHI 189b64bbc39STejun Heo * @fmt: printf format string 190b64bbc39STejun Heo * 191b64bbc39STejun Heo * Format string according to @fmt and append it to @ehi->desc. 192b64bbc39STejun Heo * 193b64bbc39STejun Heo * LOCKING: 194b64bbc39STejun Heo * spin_lock_irqsave(host lock) 195b64bbc39STejun Heo */ 196b64bbc39STejun Heo void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) 197b64bbc39STejun Heo { 198b64bbc39STejun Heo va_list args; 199b64bbc39STejun Heo 200b64bbc39STejun Heo va_start(args, fmt); 201b64bbc39STejun Heo __ata_ehi_pushv_desc(ehi, fmt, args); 202b64bbc39STejun Heo va_end(args); 203b64bbc39STejun Heo } 204b64bbc39STejun Heo 205b64bbc39STejun Heo /** 206b64bbc39STejun Heo * ata_ehi_push_desc - push error description with separator 207b64bbc39STejun Heo * @ehi: target EHI 208b64bbc39STejun Heo * @fmt: printf format string 209b64bbc39STejun Heo * 210b64bbc39STejun Heo * Format string according to @fmt and append it to @ehi->desc. 211b64bbc39STejun Heo * If @ehi->desc is not empty, ", " is added in-between. 212b64bbc39STejun Heo * 213b64bbc39STejun Heo * LOCKING: 214b64bbc39STejun Heo * spin_lock_irqsave(host lock) 215b64bbc39STejun Heo */ 216b64bbc39STejun Heo void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) 217b64bbc39STejun Heo { 218b64bbc39STejun Heo va_list args; 219b64bbc39STejun Heo 220b64bbc39STejun Heo if (ehi->desc_len) 221b64bbc39STejun Heo __ata_ehi_push_desc(ehi, ", "); 222b64bbc39STejun Heo 223b64bbc39STejun Heo va_start(args, fmt); 224b64bbc39STejun Heo __ata_ehi_pushv_desc(ehi, fmt, args); 225b64bbc39STejun Heo va_end(args); 226b64bbc39STejun Heo } 227b64bbc39STejun Heo 228b64bbc39STejun Heo /** 229b64bbc39STejun Heo * ata_ehi_clear_desc - clean error description 230b64bbc39STejun Heo * @ehi: target EHI 231b64bbc39STejun Heo * 232b64bbc39STejun Heo * Clear @ehi->desc. 233b64bbc39STejun Heo * 234b64bbc39STejun Heo * LOCKING: 235b64bbc39STejun Heo * spin_lock_irqsave(host lock) 236b64bbc39STejun Heo */ 237b64bbc39STejun Heo void ata_ehi_clear_desc(struct ata_eh_info *ehi) 238b64bbc39STejun Heo { 239b64bbc39STejun Heo ehi->desc[0] = '\0'; 240b64bbc39STejun Heo ehi->desc_len = 0; 241b64bbc39STejun Heo } 242b64bbc39STejun Heo 243cbcdd875STejun Heo /** 244cbcdd875STejun Heo * ata_port_desc - append port description 245cbcdd875STejun Heo * @ap: target ATA port 246cbcdd875STejun Heo * @fmt: printf format string 247cbcdd875STejun Heo * 248cbcdd875STejun Heo * Format string according to @fmt and append it to port 249cbcdd875STejun Heo * description. If port description is not empty, " " is added 250cbcdd875STejun Heo * in-between. This function is to be used while initializing 251cbcdd875STejun Heo * ata_host. The description is printed on host registration. 252cbcdd875STejun Heo * 253cbcdd875STejun Heo * LOCKING: 254cbcdd875STejun Heo * None. 255cbcdd875STejun Heo */ 256cbcdd875STejun Heo void ata_port_desc(struct ata_port *ap, const char *fmt, ...) 257cbcdd875STejun Heo { 258cbcdd875STejun Heo va_list args; 259cbcdd875STejun Heo 260cbcdd875STejun Heo WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING)); 261cbcdd875STejun Heo 262cbcdd875STejun Heo if (ap->link.eh_info.desc_len) 263cbcdd875STejun Heo __ata_ehi_push_desc(&ap->link.eh_info, " "); 264cbcdd875STejun Heo 265cbcdd875STejun Heo va_start(args, fmt); 266cbcdd875STejun Heo __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args); 267cbcdd875STejun Heo va_end(args); 268cbcdd875STejun Heo } 269cbcdd875STejun Heo 270cbcdd875STejun Heo #ifdef CONFIG_PCI 271cbcdd875STejun Heo 272cbcdd875STejun Heo /** 273cbcdd875STejun Heo * ata_port_pbar_desc - append PCI BAR description 274cbcdd875STejun Heo * @ap: target ATA port 275cbcdd875STejun Heo * @bar: target PCI BAR 276cbcdd875STejun Heo * @offset: offset into PCI BAR 277cbcdd875STejun Heo * @name: name of the area 278cbcdd875STejun Heo * 279cbcdd875STejun Heo * If @offset is negative, this function formats a string which 280cbcdd875STejun Heo * contains the name, address, size and type of the BAR and 281cbcdd875STejun Heo * appends it to the port description. If @offset is zero or 282cbcdd875STejun Heo * positive, only name and offsetted address is appended. 283cbcdd875STejun Heo * 284cbcdd875STejun Heo * LOCKING: 285cbcdd875STejun Heo * None. 286cbcdd875STejun Heo */ 287cbcdd875STejun Heo void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset, 288cbcdd875STejun Heo const char *name) 289cbcdd875STejun Heo { 290cbcdd875STejun Heo struct pci_dev *pdev = to_pci_dev(ap->host->dev); 291cbcdd875STejun Heo char *type = ""; 292cbcdd875STejun Heo unsigned long long start, len; 293cbcdd875STejun Heo 294cbcdd875STejun Heo if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) 295cbcdd875STejun Heo type = "m"; 296cbcdd875STejun Heo else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) 297cbcdd875STejun Heo type = "i"; 298cbcdd875STejun Heo 299cbcdd875STejun Heo start = (unsigned long long)pci_resource_start(pdev, bar); 300cbcdd875STejun Heo len = (unsigned long long)pci_resource_len(pdev, bar); 301cbcdd875STejun Heo 302cbcdd875STejun Heo if (offset < 0) 303cbcdd875STejun Heo ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start); 304cbcdd875STejun Heo else 305e6a73ab1SAndrew Morton ata_port_desc(ap, "%s 0x%llx", name, 306e6a73ab1SAndrew Morton start + (unsigned long long)offset); 307cbcdd875STejun Heo } 308cbcdd875STejun Heo 309cbcdd875STejun Heo #endif /* CONFIG_PCI */ 310cbcdd875STejun Heo 31187fbc5a0STejun Heo static int ata_lookup_timeout_table(u8 cmd) 31287fbc5a0STejun Heo { 31387fbc5a0STejun Heo int i; 31487fbc5a0STejun Heo 31587fbc5a0STejun Heo for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) { 31687fbc5a0STejun Heo const u8 *cur; 31787fbc5a0STejun Heo 31887fbc5a0STejun Heo for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++) 31987fbc5a0STejun Heo if (*cur == cmd) 32087fbc5a0STejun Heo return i; 32187fbc5a0STejun Heo } 32287fbc5a0STejun Heo 32387fbc5a0STejun Heo return -1; 32487fbc5a0STejun Heo } 32587fbc5a0STejun Heo 32687fbc5a0STejun Heo /** 32787fbc5a0STejun Heo * ata_internal_cmd_timeout - determine timeout for an internal command 32887fbc5a0STejun Heo * @dev: target device 32987fbc5a0STejun Heo * @cmd: internal command to be issued 33087fbc5a0STejun Heo * 33187fbc5a0STejun Heo * Determine timeout for internal command @cmd for @dev. 33287fbc5a0STejun Heo * 33387fbc5a0STejun Heo * LOCKING: 33487fbc5a0STejun Heo * EH context. 33587fbc5a0STejun Heo * 33687fbc5a0STejun Heo * RETURNS: 33787fbc5a0STejun Heo * Determined timeout. 33887fbc5a0STejun Heo */ 33987fbc5a0STejun Heo unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd) 34087fbc5a0STejun Heo { 34187fbc5a0STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 34287fbc5a0STejun Heo int ent = ata_lookup_timeout_table(cmd); 34387fbc5a0STejun Heo int idx; 34487fbc5a0STejun Heo 34587fbc5a0STejun Heo if (ent < 0) 34687fbc5a0STejun Heo return ATA_EH_CMD_DFL_TIMEOUT; 34787fbc5a0STejun Heo 34887fbc5a0STejun Heo idx = ehc->cmd_timeout_idx[dev->devno][ent]; 34987fbc5a0STejun Heo return ata_eh_cmd_timeout_table[ent].timeouts[idx]; 35087fbc5a0STejun Heo } 35187fbc5a0STejun Heo 35287fbc5a0STejun Heo /** 35387fbc5a0STejun Heo * ata_internal_cmd_timed_out - notification for internal command timeout 35487fbc5a0STejun Heo * @dev: target device 35587fbc5a0STejun Heo * @cmd: internal command which timed out 35687fbc5a0STejun Heo * 35787fbc5a0STejun Heo * Notify EH that internal command @cmd for @dev timed out. This 35887fbc5a0STejun Heo * function should be called only for commands whose timeouts are 35987fbc5a0STejun Heo * determined using ata_internal_cmd_timeout(). 36087fbc5a0STejun Heo * 36187fbc5a0STejun Heo * LOCKING: 36287fbc5a0STejun Heo * EH context. 36387fbc5a0STejun Heo */ 36487fbc5a0STejun Heo void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd) 36587fbc5a0STejun Heo { 36687fbc5a0STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 36787fbc5a0STejun Heo int ent = ata_lookup_timeout_table(cmd); 36887fbc5a0STejun Heo int idx; 36987fbc5a0STejun Heo 37087fbc5a0STejun Heo if (ent < 0) 37187fbc5a0STejun Heo return; 37287fbc5a0STejun Heo 37387fbc5a0STejun Heo idx = ehc->cmd_timeout_idx[dev->devno][ent]; 37487fbc5a0STejun Heo if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX) 37587fbc5a0STejun Heo ehc->cmd_timeout_idx[dev->devno][ent]++; 37687fbc5a0STejun Heo } 37787fbc5a0STejun Heo 3783884f7b0STejun Heo static void ata_ering_record(struct ata_ering *ering, unsigned int eflags, 379c6fd2807SJeff Garzik unsigned int err_mask) 380c6fd2807SJeff Garzik { 381c6fd2807SJeff Garzik struct ata_ering_entry *ent; 382c6fd2807SJeff Garzik 383c6fd2807SJeff Garzik WARN_ON(!err_mask); 384c6fd2807SJeff Garzik 385c6fd2807SJeff Garzik ering->cursor++; 386c6fd2807SJeff Garzik ering->cursor %= ATA_ERING_SIZE; 387c6fd2807SJeff Garzik 388c6fd2807SJeff Garzik ent = &ering->ring[ering->cursor]; 3893884f7b0STejun Heo ent->eflags = eflags; 390c6fd2807SJeff Garzik ent->err_mask = err_mask; 391c6fd2807SJeff Garzik ent->timestamp = get_jiffies_64(); 392c6fd2807SJeff Garzik } 393c6fd2807SJeff Garzik 39476326ac1STejun Heo static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering) 39576326ac1STejun Heo { 39676326ac1STejun Heo struct ata_ering_entry *ent = &ering->ring[ering->cursor]; 39776326ac1STejun Heo 39876326ac1STejun Heo if (ent->err_mask) 39976326ac1STejun Heo return ent; 40076326ac1STejun Heo return NULL; 40176326ac1STejun Heo } 40276326ac1STejun Heo 403d9027470SGwendal Grignou int ata_ering_map(struct ata_ering *ering, 404c6fd2807SJeff Garzik int (*map_fn)(struct ata_ering_entry *, void *), 405c6fd2807SJeff Garzik void *arg) 406c6fd2807SJeff Garzik { 407c6fd2807SJeff Garzik int idx, rc = 0; 408c6fd2807SJeff Garzik struct ata_ering_entry *ent; 409c6fd2807SJeff Garzik 410c6fd2807SJeff Garzik idx = ering->cursor; 411c6fd2807SJeff Garzik do { 412c6fd2807SJeff Garzik ent = &ering->ring[idx]; 413c6fd2807SJeff Garzik if (!ent->err_mask) 414c6fd2807SJeff Garzik break; 415c6fd2807SJeff Garzik rc = map_fn(ent, arg); 416c6fd2807SJeff Garzik if (rc) 417c6fd2807SJeff Garzik break; 418c6fd2807SJeff Garzik idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE; 419c6fd2807SJeff Garzik } while (idx != ering->cursor); 420c6fd2807SJeff Garzik 421c6fd2807SJeff Garzik return rc; 422c6fd2807SJeff Garzik } 423c6fd2807SJeff Garzik 42460428407SH Hartley Sweeten static int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg) 425d9027470SGwendal Grignou { 426d9027470SGwendal Grignou ent->eflags |= ATA_EFLAG_OLD_ER; 427d9027470SGwendal Grignou return 0; 428d9027470SGwendal Grignou } 429d9027470SGwendal Grignou 430d9027470SGwendal Grignou static void ata_ering_clear(struct ata_ering *ering) 431d9027470SGwendal Grignou { 432d9027470SGwendal Grignou ata_ering_map(ering, ata_ering_clear_cb, NULL); 433d9027470SGwendal Grignou } 434d9027470SGwendal Grignou 435c6fd2807SJeff Garzik static unsigned int ata_eh_dev_action(struct ata_device *dev) 436c6fd2807SJeff Garzik { 4379af5c9c9STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 438c6fd2807SJeff Garzik 439c6fd2807SJeff Garzik return ehc->i.action | ehc->i.dev_action[dev->devno]; 440c6fd2807SJeff Garzik } 441c6fd2807SJeff Garzik 442f58229f8STejun Heo static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev, 443c6fd2807SJeff Garzik struct ata_eh_info *ehi, unsigned int action) 444c6fd2807SJeff Garzik { 445f58229f8STejun Heo struct ata_device *tdev; 446c6fd2807SJeff Garzik 447c6fd2807SJeff Garzik if (!dev) { 448c6fd2807SJeff Garzik ehi->action &= ~action; 4491eca4365STejun Heo ata_for_each_dev(tdev, link, ALL) 450f58229f8STejun Heo ehi->dev_action[tdev->devno] &= ~action; 451c6fd2807SJeff Garzik } else { 452c6fd2807SJeff Garzik /* doesn't make sense for port-wide EH actions */ 453c6fd2807SJeff Garzik WARN_ON(!(action & ATA_EH_PERDEV_MASK)); 454c6fd2807SJeff Garzik 455c6fd2807SJeff Garzik /* break ehi->action into ehi->dev_action */ 456c6fd2807SJeff Garzik if (ehi->action & action) { 4571eca4365STejun Heo ata_for_each_dev(tdev, link, ALL) 458f58229f8STejun Heo ehi->dev_action[tdev->devno] |= 459f58229f8STejun Heo ehi->action & action; 460c6fd2807SJeff Garzik ehi->action &= ~action; 461c6fd2807SJeff Garzik } 462c6fd2807SJeff Garzik 463c6fd2807SJeff Garzik /* turn off the specified per-dev action */ 464c6fd2807SJeff Garzik ehi->dev_action[dev->devno] &= ~action; 465c6fd2807SJeff Garzik } 466c6fd2807SJeff Garzik } 467c6fd2807SJeff Garzik 468c6fd2807SJeff Garzik /** 469c0c362b6STejun Heo * ata_eh_acquire - acquire EH ownership 470c0c362b6STejun Heo * @ap: ATA port to acquire EH ownership for 471c0c362b6STejun Heo * 472c0c362b6STejun Heo * Acquire EH ownership for @ap. This is the basic exclusion 473c0c362b6STejun Heo * mechanism for ports sharing a host. Only one port hanging off 474c0c362b6STejun Heo * the same host can claim the ownership of EH. 475c0c362b6STejun Heo * 476c0c362b6STejun Heo * LOCKING: 477c0c362b6STejun Heo * EH context. 478c0c362b6STejun Heo */ 479c0c362b6STejun Heo void ata_eh_acquire(struct ata_port *ap) 480c0c362b6STejun Heo { 481c0c362b6STejun Heo mutex_lock(&ap->host->eh_mutex); 482c0c362b6STejun Heo WARN_ON_ONCE(ap->host->eh_owner); 483c0c362b6STejun Heo ap->host->eh_owner = current; 484c0c362b6STejun Heo } 485c0c362b6STejun Heo 486c0c362b6STejun Heo /** 487c0c362b6STejun Heo * ata_eh_release - release EH ownership 488c0c362b6STejun Heo * @ap: ATA port to release EH ownership for 489c0c362b6STejun Heo * 490c0c362b6STejun Heo * Release EH ownership for @ap if the caller. The caller must 491c0c362b6STejun Heo * have acquired EH ownership using ata_eh_acquire() previously. 492c0c362b6STejun Heo * 493c0c362b6STejun Heo * LOCKING: 494c0c362b6STejun Heo * EH context. 495c0c362b6STejun Heo */ 496c0c362b6STejun Heo void ata_eh_release(struct ata_port *ap) 497c0c362b6STejun Heo { 498c0c362b6STejun Heo WARN_ON_ONCE(ap->host->eh_owner != current); 499c0c362b6STejun Heo ap->host->eh_owner = NULL; 500c0c362b6STejun Heo mutex_unlock(&ap->host->eh_mutex); 501c0c362b6STejun Heo } 502c0c362b6STejun Heo 503c0c362b6STejun Heo /** 504c6fd2807SJeff Garzik * ata_scsi_timed_out - SCSI layer time out callback 505c6fd2807SJeff Garzik * @cmd: timed out SCSI command 506c6fd2807SJeff Garzik * 507c6fd2807SJeff Garzik * Handles SCSI layer timeout. We race with normal completion of 508c6fd2807SJeff Garzik * the qc for @cmd. If the qc is already gone, we lose and let 509c6fd2807SJeff Garzik * the scsi command finish (EH_HANDLED). Otherwise, the qc has 510c6fd2807SJeff Garzik * timed out and EH should be invoked. Prevent ata_qc_complete() 511c6fd2807SJeff Garzik * from finishing it by setting EH_SCHEDULED and return 512c6fd2807SJeff Garzik * EH_NOT_HANDLED. 513c6fd2807SJeff Garzik * 514c6fd2807SJeff Garzik * TODO: kill this function once old EH is gone. 515c6fd2807SJeff Garzik * 516c6fd2807SJeff Garzik * LOCKING: 517c6fd2807SJeff Garzik * Called from timer context 518c6fd2807SJeff Garzik * 519c6fd2807SJeff Garzik * RETURNS: 520c6fd2807SJeff Garzik * EH_HANDLED or EH_NOT_HANDLED 521c6fd2807SJeff Garzik */ 522242f9dcbSJens Axboe enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) 523c6fd2807SJeff Garzik { 524c6fd2807SJeff Garzik struct Scsi_Host *host = cmd->device->host; 525c6fd2807SJeff Garzik struct ata_port *ap = ata_shost_to_port(host); 526c6fd2807SJeff Garzik unsigned long flags; 527c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 528242f9dcbSJens Axboe enum blk_eh_timer_return ret; 529c6fd2807SJeff Garzik 530c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 531c6fd2807SJeff Garzik 532c6fd2807SJeff Garzik if (ap->ops->error_handler) { 533242f9dcbSJens Axboe ret = BLK_EH_NOT_HANDLED; 534c6fd2807SJeff Garzik goto out; 535c6fd2807SJeff Garzik } 536c6fd2807SJeff Garzik 537242f9dcbSJens Axboe ret = BLK_EH_HANDLED; 538c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 5399af5c9c9STejun Heo qc = ata_qc_from_tag(ap, ap->link.active_tag); 540c6fd2807SJeff Garzik if (qc) { 541c6fd2807SJeff Garzik WARN_ON(qc->scsicmd != cmd); 542c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_EH_SCHEDULED; 543c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_TIMEOUT; 544242f9dcbSJens Axboe ret = BLK_EH_NOT_HANDLED; 545c6fd2807SJeff Garzik } 546c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 547c6fd2807SJeff Garzik 548c6fd2807SJeff Garzik out: 549c6fd2807SJeff Garzik DPRINTK("EXIT, ret=%d\n", ret); 550c6fd2807SJeff Garzik return ret; 551c6fd2807SJeff Garzik } 552c6fd2807SJeff Garzik 553ece180d1STejun Heo static void ata_eh_unload(struct ata_port *ap) 554ece180d1STejun Heo { 555ece180d1STejun Heo struct ata_link *link; 556ece180d1STejun Heo struct ata_device *dev; 557ece180d1STejun Heo unsigned long flags; 558ece180d1STejun Heo 559ece180d1STejun Heo /* Restore SControl IPM and SPD for the next driver and 560ece180d1STejun Heo * disable attached devices. 561ece180d1STejun Heo */ 562ece180d1STejun Heo ata_for_each_link(link, ap, PMP_FIRST) { 563ece180d1STejun Heo sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0); 564ece180d1STejun Heo ata_for_each_dev(dev, link, ALL) 565ece180d1STejun Heo ata_dev_disable(dev); 566ece180d1STejun Heo } 567ece180d1STejun Heo 568ece180d1STejun Heo /* freeze and set UNLOADED */ 569ece180d1STejun Heo spin_lock_irqsave(ap->lock, flags); 570ece180d1STejun Heo 571ece180d1STejun Heo ata_port_freeze(ap); /* won't be thawed */ 572ece180d1STejun Heo ap->pflags &= ~ATA_PFLAG_EH_PENDING; /* clear pending from freeze */ 573ece180d1STejun Heo ap->pflags |= ATA_PFLAG_UNLOADED; 574ece180d1STejun Heo 575ece180d1STejun Heo spin_unlock_irqrestore(ap->lock, flags); 576ece180d1STejun Heo } 577ece180d1STejun Heo 578c6fd2807SJeff Garzik /** 579c6fd2807SJeff Garzik * ata_scsi_error - SCSI layer error handler callback 580c6fd2807SJeff Garzik * @host: SCSI host on which error occurred 581c6fd2807SJeff Garzik * 582c6fd2807SJeff Garzik * Handles SCSI-layer-thrown error events. 583c6fd2807SJeff Garzik * 584c6fd2807SJeff Garzik * LOCKING: 585c6fd2807SJeff Garzik * Inherited from SCSI layer (none, can sleep) 586c6fd2807SJeff Garzik * 587c6fd2807SJeff Garzik * RETURNS: 588c6fd2807SJeff Garzik * Zero. 589c6fd2807SJeff Garzik */ 590c6fd2807SJeff Garzik void ata_scsi_error(struct Scsi_Host *host) 591c6fd2807SJeff Garzik { 592c6fd2807SJeff Garzik struct ata_port *ap = ata_shost_to_port(host); 593c6fd2807SJeff Garzik unsigned long flags; 594c34aeebcSJames Bottomley LIST_HEAD(eh_work_q); 595c6fd2807SJeff Garzik 596c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 597c6fd2807SJeff Garzik 598c34aeebcSJames Bottomley spin_lock_irqsave(host->host_lock, flags); 599c34aeebcSJames Bottomley list_splice_init(&host->eh_cmd_q, &eh_work_q); 600c34aeebcSJames Bottomley spin_unlock_irqrestore(host->host_lock, flags); 601c34aeebcSJames Bottomley 6020e0b494cSJames Bottomley ata_scsi_cmd_error_handler(host, ap, &eh_work_q); 6030e0b494cSJames Bottomley 6040e0b494cSJames Bottomley /* If we timed raced normal completion and there is nothing to 6050e0b494cSJames Bottomley recover nr_timedout == 0 why exactly are we doing error recovery ? */ 6060e0b494cSJames Bottomley ata_scsi_port_error_handler(host, ap); 6070e0b494cSJames Bottomley 6080e0b494cSJames Bottomley /* finish or retry handled scmd's and clean up */ 609*72d8c36eSWei Fang WARN_ON(!list_empty(&eh_work_q)); 6100e0b494cSJames Bottomley 6110e0b494cSJames Bottomley DPRINTK("EXIT\n"); 6120e0b494cSJames Bottomley } 6130e0b494cSJames Bottomley 6140e0b494cSJames Bottomley /** 6150e0b494cSJames Bottomley * ata_scsi_cmd_error_handler - error callback for a list of commands 6160e0b494cSJames Bottomley * @host: scsi host containing the port 6170e0b494cSJames Bottomley * @ap: ATA port within the host 6180e0b494cSJames Bottomley * @eh_work_q: list of commands to process 6190e0b494cSJames Bottomley * 6200e0b494cSJames Bottomley * process the given list of commands and return those finished to the 6210e0b494cSJames Bottomley * ap->eh_done_q. This function is the first part of the libata error 6220e0b494cSJames Bottomley * handler which processes a given list of failed commands. 6230e0b494cSJames Bottomley */ 6240e0b494cSJames Bottomley void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, 6250e0b494cSJames Bottomley struct list_head *eh_work_q) 6260e0b494cSJames Bottomley { 6270e0b494cSJames Bottomley int i; 6280e0b494cSJames Bottomley unsigned long flags; 6290e0b494cSJames Bottomley 630c429137aSTejun Heo /* make sure sff pio task is not running */ 631c429137aSTejun Heo ata_sff_flush_pio_task(ap); 632c6fd2807SJeff Garzik 633cca3974eSJeff Garzik /* synchronize with host lock and sort out timeouts */ 634c6fd2807SJeff Garzik 635c6fd2807SJeff Garzik /* For new EH, all qcs are finished in one of three ways - 636c6fd2807SJeff Garzik * normal completion, error completion, and SCSI timeout. 637c96f1732SAlan Cox * Both completions can race against SCSI timeout. When normal 638c6fd2807SJeff Garzik * completion wins, the qc never reaches EH. When error 639c6fd2807SJeff Garzik * completion wins, the qc has ATA_QCFLAG_FAILED set. 640c6fd2807SJeff Garzik * 641c6fd2807SJeff Garzik * When SCSI timeout wins, things are a bit more complex. 642c6fd2807SJeff Garzik * Normal or error completion can occur after the timeout but 643c6fd2807SJeff Garzik * before this point. In such cases, both types of 644c6fd2807SJeff Garzik * completions are honored. A scmd is determined to have 645c6fd2807SJeff Garzik * timed out iff its associated qc is active and not failed. 646c6fd2807SJeff Garzik */ 647c6fd2807SJeff Garzik if (ap->ops->error_handler) { 648c6fd2807SJeff Garzik struct scsi_cmnd *scmd, *tmp; 649c6fd2807SJeff Garzik int nr_timedout = 0; 650c6fd2807SJeff Garzik 651c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 652c6fd2807SJeff Garzik 653c96f1732SAlan Cox /* This must occur under the ap->lock as we don't want 654c96f1732SAlan Cox a polled recovery to race the real interrupt handler 655c96f1732SAlan Cox 656c96f1732SAlan Cox The lost_interrupt handler checks for any completed but 657c96f1732SAlan Cox non-notified command and completes much like an IRQ handler. 658c96f1732SAlan Cox 659c96f1732SAlan Cox We then fall into the error recovery code which will treat 660c96f1732SAlan Cox this as if normal completion won the race */ 661c96f1732SAlan Cox 662c96f1732SAlan Cox if (ap->ops->lost_interrupt) 663c96f1732SAlan Cox ap->ops->lost_interrupt(ap); 664c96f1732SAlan Cox 6650e0b494cSJames Bottomley list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) { 666c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 667c6fd2807SJeff Garzik 668c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_QUEUE; i++) { 669c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, i); 670c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_ACTIVE && 671c6fd2807SJeff Garzik qc->scsicmd == scmd) 672c6fd2807SJeff Garzik break; 673c6fd2807SJeff Garzik } 674c6fd2807SJeff Garzik 675c6fd2807SJeff Garzik if (i < ATA_MAX_QUEUE) { 676c6fd2807SJeff Garzik /* the scmd has an associated qc */ 677c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) { 678c6fd2807SJeff Garzik /* which hasn't failed yet, timeout */ 679c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_TIMEOUT; 680c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 681c6fd2807SJeff Garzik nr_timedout++; 682c6fd2807SJeff Garzik } 683c6fd2807SJeff Garzik } else { 684c6fd2807SJeff Garzik /* Normal completion occurred after 685c6fd2807SJeff Garzik * SCSI timeout but before this point. 686c6fd2807SJeff Garzik * Successfully complete it. 687c6fd2807SJeff Garzik */ 688c6fd2807SJeff Garzik scmd->retries = scmd->allowed; 689c6fd2807SJeff Garzik scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 690c6fd2807SJeff Garzik } 691c6fd2807SJeff Garzik } 692c6fd2807SJeff Garzik 693c6fd2807SJeff Garzik /* If we have timed out qcs. They belong to EH from 694c6fd2807SJeff Garzik * this point but the state of the controller is 695c6fd2807SJeff Garzik * unknown. Freeze the port to make sure the IRQ 696c6fd2807SJeff Garzik * handler doesn't diddle with those qcs. This must 697c6fd2807SJeff Garzik * be done atomically w.r.t. setting QCFLAG_FAILED. 698c6fd2807SJeff Garzik */ 699c6fd2807SJeff Garzik if (nr_timedout) 700c6fd2807SJeff Garzik __ata_port_freeze(ap); 701c6fd2807SJeff Garzik 702c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 703a1e10f7eSTejun Heo 704a1e10f7eSTejun Heo /* initialize eh_tries */ 705a1e10f7eSTejun Heo ap->eh_tries = ATA_EH_MAX_TRIES; 706c6fd2807SJeff Garzik } else 707c6fd2807SJeff Garzik spin_unlock_wait(ap->lock); 708c6fd2807SJeff Garzik 7090e0b494cSJames Bottomley } 7100e0b494cSJames Bottomley EXPORT_SYMBOL(ata_scsi_cmd_error_handler); 7110e0b494cSJames Bottomley 7120e0b494cSJames Bottomley /** 7130e0b494cSJames Bottomley * ata_scsi_port_error_handler - recover the port after the commands 7140e0b494cSJames Bottomley * @host: SCSI host containing the port 7150e0b494cSJames Bottomley * @ap: the ATA port 7160e0b494cSJames Bottomley * 7170e0b494cSJames Bottomley * Handle the recovery of the port @ap after all the commands 7180e0b494cSJames Bottomley * have been recovered. 7190e0b494cSJames Bottomley */ 7200e0b494cSJames Bottomley void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap) 7210e0b494cSJames Bottomley { 7220e0b494cSJames Bottomley unsigned long flags; 723c96f1732SAlan Cox 724c6fd2807SJeff Garzik /* invoke error handler */ 725c6fd2807SJeff Garzik if (ap->ops->error_handler) { 726cf1b86c8STejun Heo struct ata_link *link; 727cf1b86c8STejun Heo 728c0c362b6STejun Heo /* acquire EH ownership */ 729c0c362b6STejun Heo ata_eh_acquire(ap); 730c0c362b6STejun Heo repeat: 7315ddf24c5STejun Heo /* kill fast drain timer */ 7325ddf24c5STejun Heo del_timer_sync(&ap->fastdrain_timer); 7335ddf24c5STejun Heo 734c6fd2807SJeff Garzik /* process port resume request */ 735c6fd2807SJeff Garzik ata_eh_handle_port_resume(ap); 736c6fd2807SJeff Garzik 737c6fd2807SJeff Garzik /* fetch & clear EH info */ 738c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 739c6fd2807SJeff Garzik 7401eca4365STejun Heo ata_for_each_link(link, ap, HOST_FIRST) { 74100115e0fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 74200115e0fSTejun Heo struct ata_device *dev; 74300115e0fSTejun Heo 744cf1b86c8STejun Heo memset(&link->eh_context, 0, sizeof(link->eh_context)); 745cf1b86c8STejun Heo link->eh_context.i = link->eh_info; 746cf1b86c8STejun Heo memset(&link->eh_info, 0, sizeof(link->eh_info)); 74700115e0fSTejun Heo 7481eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) { 74900115e0fSTejun Heo int devno = dev->devno; 75000115e0fSTejun Heo 75100115e0fSTejun Heo ehc->saved_xfer_mode[devno] = dev->xfer_mode; 75200115e0fSTejun Heo if (ata_ncq_enabled(dev)) 75300115e0fSTejun Heo ehc->saved_ncq_enabled |= 1 << devno; 75400115e0fSTejun Heo } 755cf1b86c8STejun Heo } 756c6fd2807SJeff Garzik 757c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS; 758c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_EH_PENDING; 759da917d69STejun Heo ap->excl_link = NULL; /* don't maintain exclusion over EH */ 760c6fd2807SJeff Garzik 761c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 762c6fd2807SJeff Garzik 763c6fd2807SJeff Garzik /* invoke EH, skip if unloading or suspended */ 764c6fd2807SJeff Garzik if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED))) 765c6fd2807SJeff Garzik ap->ops->error_handler(ap); 766ece180d1STejun Heo else { 767ece180d1STejun Heo /* if unloading, commence suicide */ 768ece180d1STejun Heo if ((ap->pflags & ATA_PFLAG_UNLOADING) && 769ece180d1STejun Heo !(ap->pflags & ATA_PFLAG_UNLOADED)) 770ece180d1STejun Heo ata_eh_unload(ap); 771c6fd2807SJeff Garzik ata_eh_finish(ap); 772ece180d1STejun Heo } 773c6fd2807SJeff Garzik 774c6fd2807SJeff Garzik /* process port suspend request */ 775c6fd2807SJeff Garzik ata_eh_handle_port_suspend(ap); 776c6fd2807SJeff Garzik 77725985edcSLucas De Marchi /* Exception might have happened after ->error_handler 778c6fd2807SJeff Garzik * recovered the port but before this point. Repeat 779c6fd2807SJeff Garzik * EH in such case. 780c6fd2807SJeff Garzik */ 781c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 782c6fd2807SJeff Garzik 783c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_EH_PENDING) { 784a1e10f7eSTejun Heo if (--ap->eh_tries) { 785c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 786c6fd2807SJeff Garzik goto repeat; 787c6fd2807SJeff Garzik } 788a9a79dfeSJoe Perches ata_port_err(ap, 789a9a79dfeSJoe Perches "EH pending after %d tries, giving up\n", 790a9a79dfeSJoe Perches ATA_EH_MAX_TRIES); 791914616a3STejun Heo ap->pflags &= ~ATA_PFLAG_EH_PENDING; 792c6fd2807SJeff Garzik } 793c6fd2807SJeff Garzik 794c6fd2807SJeff Garzik /* this run is complete, make sure EH info is clear */ 7951eca4365STejun Heo ata_for_each_link(link, ap, HOST_FIRST) 796cf1b86c8STejun Heo memset(&link->eh_info, 0, sizeof(link->eh_info)); 797c6fd2807SJeff Garzik 798e4a9c373SDan Williams /* end eh (clear host_eh_scheduled) while holding 799e4a9c373SDan Williams * ap->lock such that if exception occurs after this 800e4a9c373SDan Williams * point but before EH completion, SCSI midlayer will 801c6fd2807SJeff Garzik * re-initiate EH. 802c6fd2807SJeff Garzik */ 803e4a9c373SDan Williams ap->ops->end_eh(ap); 804c6fd2807SJeff Garzik 805c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 806c0c362b6STejun Heo ata_eh_release(ap); 807c6fd2807SJeff Garzik } else { 8089af5c9c9STejun Heo WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL); 809c6fd2807SJeff Garzik ap->ops->eng_timeout(ap); 810c6fd2807SJeff Garzik } 811c6fd2807SJeff Garzik 812c6fd2807SJeff Garzik scsi_eh_flush_done_q(&ap->eh_done_q); 813c6fd2807SJeff Garzik 814c6fd2807SJeff Garzik /* clean up */ 815c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 816c6fd2807SJeff Garzik 817c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_LOADING) 818c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_LOADING; 819c6fd2807SJeff Garzik else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) 820ad72cf98STejun Heo schedule_delayed_work(&ap->hotplug_task, 0); 821c6fd2807SJeff Garzik 822c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_RECOVERED) 823a9a79dfeSJoe Perches ata_port_info(ap, "EH complete\n"); 824c6fd2807SJeff Garzik 825c6fd2807SJeff Garzik ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED); 826c6fd2807SJeff Garzik 827c6fd2807SJeff Garzik /* tell wait_eh that we're done */ 828c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS; 829c6fd2807SJeff Garzik wake_up_all(&ap->eh_wait_q); 830c6fd2807SJeff Garzik 831c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 832c6fd2807SJeff Garzik } 8330e0b494cSJames Bottomley EXPORT_SYMBOL_GPL(ata_scsi_port_error_handler); 834c6fd2807SJeff Garzik 835c6fd2807SJeff Garzik /** 836c6fd2807SJeff Garzik * ata_port_wait_eh - Wait for the currently pending EH to complete 837c6fd2807SJeff Garzik * @ap: Port to wait EH for 838c6fd2807SJeff Garzik * 839c6fd2807SJeff Garzik * Wait until the currently pending EH is complete. 840c6fd2807SJeff Garzik * 841c6fd2807SJeff Garzik * LOCKING: 842c6fd2807SJeff Garzik * Kernel thread context (may sleep). 843c6fd2807SJeff Garzik */ 844c6fd2807SJeff Garzik void ata_port_wait_eh(struct ata_port *ap) 845c6fd2807SJeff Garzik { 846c6fd2807SJeff Garzik unsigned long flags; 847c6fd2807SJeff Garzik DEFINE_WAIT(wait); 848c6fd2807SJeff Garzik 849c6fd2807SJeff Garzik retry: 850c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 851c6fd2807SJeff Garzik 852c6fd2807SJeff Garzik while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) { 853c6fd2807SJeff Garzik prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE); 854c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 855c6fd2807SJeff Garzik schedule(); 856c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 857c6fd2807SJeff Garzik } 858c6fd2807SJeff Garzik finish_wait(&ap->eh_wait_q, &wait); 859c6fd2807SJeff Garzik 860c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 861c6fd2807SJeff Garzik 862c6fd2807SJeff Garzik /* make sure SCSI EH is complete */ 863cca3974eSJeff Garzik if (scsi_host_in_recovery(ap->scsi_host)) { 86497750cebSTejun Heo ata_msleep(ap, 10); 865c6fd2807SJeff Garzik goto retry; 866c6fd2807SJeff Garzik } 867c6fd2807SJeff Garzik } 86881c757bcSDan Williams EXPORT_SYMBOL_GPL(ata_port_wait_eh); 869c6fd2807SJeff Garzik 8705ddf24c5STejun Heo static int ata_eh_nr_in_flight(struct ata_port *ap) 8715ddf24c5STejun Heo { 8725ddf24c5STejun Heo unsigned int tag; 8735ddf24c5STejun Heo int nr = 0; 8745ddf24c5STejun Heo 8755ddf24c5STejun Heo /* count only non-internal commands */ 8765ddf24c5STejun Heo for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) 8775ddf24c5STejun Heo if (ata_qc_from_tag(ap, tag)) 8785ddf24c5STejun Heo nr++; 8795ddf24c5STejun Heo 8805ddf24c5STejun Heo return nr; 8815ddf24c5STejun Heo } 8825ddf24c5STejun Heo 8835ddf24c5STejun Heo void ata_eh_fastdrain_timerfn(unsigned long arg) 8845ddf24c5STejun Heo { 8855ddf24c5STejun Heo struct ata_port *ap = (void *)arg; 8865ddf24c5STejun Heo unsigned long flags; 8875ddf24c5STejun Heo int cnt; 8885ddf24c5STejun Heo 8895ddf24c5STejun Heo spin_lock_irqsave(ap->lock, flags); 8905ddf24c5STejun Heo 8915ddf24c5STejun Heo cnt = ata_eh_nr_in_flight(ap); 8925ddf24c5STejun Heo 8935ddf24c5STejun Heo /* are we done? */ 8945ddf24c5STejun Heo if (!cnt) 8955ddf24c5STejun Heo goto out_unlock; 8965ddf24c5STejun Heo 8975ddf24c5STejun Heo if (cnt == ap->fastdrain_cnt) { 8985ddf24c5STejun Heo unsigned int tag; 8995ddf24c5STejun Heo 9005ddf24c5STejun Heo /* No progress during the last interval, tag all 9015ddf24c5STejun Heo * in-flight qcs as timed out and freeze the port. 9025ddf24c5STejun Heo */ 9035ddf24c5STejun Heo for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) { 9045ddf24c5STejun Heo struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); 9055ddf24c5STejun Heo if (qc) 9065ddf24c5STejun Heo qc->err_mask |= AC_ERR_TIMEOUT; 9075ddf24c5STejun Heo } 9085ddf24c5STejun Heo 9095ddf24c5STejun Heo ata_port_freeze(ap); 9105ddf24c5STejun Heo } else { 9115ddf24c5STejun Heo /* some qcs have finished, give it another chance */ 9125ddf24c5STejun Heo ap->fastdrain_cnt = cnt; 9135ddf24c5STejun Heo ap->fastdrain_timer.expires = 914341c2c95STejun Heo ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); 9155ddf24c5STejun Heo add_timer(&ap->fastdrain_timer); 9165ddf24c5STejun Heo } 9175ddf24c5STejun Heo 9185ddf24c5STejun Heo out_unlock: 9195ddf24c5STejun Heo spin_unlock_irqrestore(ap->lock, flags); 9205ddf24c5STejun Heo } 9215ddf24c5STejun Heo 9225ddf24c5STejun Heo /** 9235ddf24c5STejun Heo * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain 9245ddf24c5STejun Heo * @ap: target ATA port 9255ddf24c5STejun Heo * @fastdrain: activate fast drain 9265ddf24c5STejun Heo * 9275ddf24c5STejun Heo * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain 9285ddf24c5STejun Heo * is non-zero and EH wasn't pending before. Fast drain ensures 9295ddf24c5STejun Heo * that EH kicks in in timely manner. 9305ddf24c5STejun Heo * 9315ddf24c5STejun Heo * LOCKING: 9325ddf24c5STejun Heo * spin_lock_irqsave(host lock) 9335ddf24c5STejun Heo */ 9345ddf24c5STejun Heo static void ata_eh_set_pending(struct ata_port *ap, int fastdrain) 9355ddf24c5STejun Heo { 9365ddf24c5STejun Heo int cnt; 9375ddf24c5STejun Heo 9385ddf24c5STejun Heo /* already scheduled? */ 9395ddf24c5STejun Heo if (ap->pflags & ATA_PFLAG_EH_PENDING) 9405ddf24c5STejun Heo return; 9415ddf24c5STejun Heo 9425ddf24c5STejun Heo ap->pflags |= ATA_PFLAG_EH_PENDING; 9435ddf24c5STejun Heo 9445ddf24c5STejun Heo if (!fastdrain) 9455ddf24c5STejun Heo return; 9465ddf24c5STejun Heo 9475ddf24c5STejun Heo /* do we have in-flight qcs? */ 9485ddf24c5STejun Heo cnt = ata_eh_nr_in_flight(ap); 9495ddf24c5STejun Heo if (!cnt) 9505ddf24c5STejun Heo return; 9515ddf24c5STejun Heo 9525ddf24c5STejun Heo /* activate fast drain */ 9535ddf24c5STejun Heo ap->fastdrain_cnt = cnt; 954341c2c95STejun Heo ap->fastdrain_timer.expires = 955341c2c95STejun Heo ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); 9565ddf24c5STejun Heo add_timer(&ap->fastdrain_timer); 9575ddf24c5STejun Heo } 9585ddf24c5STejun Heo 959c6fd2807SJeff Garzik /** 960c6fd2807SJeff Garzik * ata_qc_schedule_eh - schedule qc for error handling 961c6fd2807SJeff Garzik * @qc: command to schedule error handling for 962c6fd2807SJeff Garzik * 963c6fd2807SJeff Garzik * Schedule error handling for @qc. EH will kick in as soon as 964c6fd2807SJeff Garzik * other commands are drained. 965c6fd2807SJeff Garzik * 966c6fd2807SJeff Garzik * LOCKING: 967cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 968c6fd2807SJeff Garzik */ 969c6fd2807SJeff Garzik void ata_qc_schedule_eh(struct ata_queued_cmd *qc) 970c6fd2807SJeff Garzik { 971c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 972fa41efdaSTejun Heo struct request_queue *q = qc->scsicmd->device->request_queue; 973fa41efdaSTejun Heo unsigned long flags; 974c6fd2807SJeff Garzik 975c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 976c6fd2807SJeff Garzik 977c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 9785ddf24c5STejun Heo ata_eh_set_pending(ap, 1); 979c6fd2807SJeff Garzik 980c6fd2807SJeff Garzik /* The following will fail if timeout has already expired. 981c6fd2807SJeff Garzik * ata_scsi_error() takes care of such scmds on EH entry. 982c6fd2807SJeff Garzik * Note that ATA_QCFLAG_FAILED is unconditionally set after 983c6fd2807SJeff Garzik * this function completes. 984c6fd2807SJeff Garzik */ 985fa41efdaSTejun Heo spin_lock_irqsave(q->queue_lock, flags); 986242f9dcbSJens Axboe blk_abort_request(qc->scsicmd->request); 987fa41efdaSTejun Heo spin_unlock_irqrestore(q->queue_lock, flags); 988c6fd2807SJeff Garzik } 989c6fd2807SJeff Garzik 990c6fd2807SJeff Garzik /** 991e4a9c373SDan Williams * ata_std_sched_eh - non-libsas ata_ports issue eh with this common routine 992e4a9c373SDan Williams * @ap: ATA port to schedule EH for 993e4a9c373SDan Williams * 994e4a9c373SDan Williams * LOCKING: inherited from ata_port_schedule_eh 995e4a9c373SDan Williams * spin_lock_irqsave(host lock) 996e4a9c373SDan Williams */ 997e4a9c373SDan Williams void ata_std_sched_eh(struct ata_port *ap) 998e4a9c373SDan Williams { 999e4a9c373SDan Williams WARN_ON(!ap->ops->error_handler); 1000e4a9c373SDan Williams 1001e4a9c373SDan Williams if (ap->pflags & ATA_PFLAG_INITIALIZING) 1002e4a9c373SDan Williams return; 1003e4a9c373SDan Williams 1004e4a9c373SDan Williams ata_eh_set_pending(ap, 1); 1005e4a9c373SDan Williams scsi_schedule_eh(ap->scsi_host); 1006e4a9c373SDan Williams 1007e4a9c373SDan Williams DPRINTK("port EH scheduled\n"); 1008e4a9c373SDan Williams } 1009e4a9c373SDan Williams EXPORT_SYMBOL_GPL(ata_std_sched_eh); 1010e4a9c373SDan Williams 1011e4a9c373SDan Williams /** 1012e4a9c373SDan Williams * ata_std_end_eh - non-libsas ata_ports complete eh with this common routine 1013e4a9c373SDan Williams * @ap: ATA port to end EH for 1014e4a9c373SDan Williams * 1015e4a9c373SDan Williams * In the libata object model there is a 1:1 mapping of ata_port to 1016e4a9c373SDan Williams * shost, so host fields can be directly manipulated under ap->lock, in 1017e4a9c373SDan Williams * the libsas case we need to hold a lock at the ha->level to coordinate 1018e4a9c373SDan Williams * these events. 1019e4a9c373SDan Williams * 1020e4a9c373SDan Williams * LOCKING: 1021e4a9c373SDan Williams * spin_lock_irqsave(host lock) 1022e4a9c373SDan Williams */ 1023e4a9c373SDan Williams void ata_std_end_eh(struct ata_port *ap) 1024e4a9c373SDan Williams { 1025e4a9c373SDan Williams struct Scsi_Host *host = ap->scsi_host; 1026e4a9c373SDan Williams 1027e4a9c373SDan Williams host->host_eh_scheduled = 0; 1028e4a9c373SDan Williams } 1029e4a9c373SDan Williams EXPORT_SYMBOL(ata_std_end_eh); 1030e4a9c373SDan Williams 1031e4a9c373SDan Williams 1032e4a9c373SDan Williams /** 1033c6fd2807SJeff Garzik * ata_port_schedule_eh - schedule error handling without a qc 1034c6fd2807SJeff Garzik * @ap: ATA port to schedule EH for 1035c6fd2807SJeff Garzik * 1036c6fd2807SJeff Garzik * Schedule error handling for @ap. EH will kick in as soon as 1037c6fd2807SJeff Garzik * all commands are drained. 1038c6fd2807SJeff Garzik * 1039c6fd2807SJeff Garzik * LOCKING: 1040cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 1041c6fd2807SJeff Garzik */ 1042c6fd2807SJeff Garzik void ata_port_schedule_eh(struct ata_port *ap) 1043c6fd2807SJeff Garzik { 1044e4a9c373SDan Williams /* see: ata_std_sched_eh, unless you know better */ 1045e4a9c373SDan Williams ap->ops->sched_eh(ap); 1046c6fd2807SJeff Garzik } 1047c6fd2807SJeff Garzik 1048dbd82616STejun Heo static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link) 1049c6fd2807SJeff Garzik { 1050c6fd2807SJeff Garzik int tag, nr_aborted = 0; 1051c6fd2807SJeff Garzik 1052c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 1053c6fd2807SJeff Garzik 10545ddf24c5STejun Heo /* we're gonna abort all commands, no need for fast drain */ 10555ddf24c5STejun Heo ata_eh_set_pending(ap, 0); 10565ddf24c5STejun Heo 1057c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1058c6fd2807SJeff Garzik struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); 1059c6fd2807SJeff Garzik 1060dbd82616STejun Heo if (qc && (!link || qc->dev->link == link)) { 1061c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 1062c6fd2807SJeff Garzik ata_qc_complete(qc); 1063c6fd2807SJeff Garzik nr_aborted++; 1064c6fd2807SJeff Garzik } 1065c6fd2807SJeff Garzik } 1066c6fd2807SJeff Garzik 1067c6fd2807SJeff Garzik if (!nr_aborted) 1068c6fd2807SJeff Garzik ata_port_schedule_eh(ap); 1069c6fd2807SJeff Garzik 1070c6fd2807SJeff Garzik return nr_aborted; 1071c6fd2807SJeff Garzik } 1072c6fd2807SJeff Garzik 1073c6fd2807SJeff Garzik /** 1074dbd82616STejun Heo * ata_link_abort - abort all qc's on the link 1075dbd82616STejun Heo * @link: ATA link to abort qc's for 1076dbd82616STejun Heo * 1077dbd82616STejun Heo * Abort all active qc's active on @link and schedule EH. 1078dbd82616STejun Heo * 1079dbd82616STejun Heo * LOCKING: 1080dbd82616STejun Heo * spin_lock_irqsave(host lock) 1081dbd82616STejun Heo * 1082dbd82616STejun Heo * RETURNS: 1083dbd82616STejun Heo * Number of aborted qc's. 1084dbd82616STejun Heo */ 1085dbd82616STejun Heo int ata_link_abort(struct ata_link *link) 1086dbd82616STejun Heo { 1087dbd82616STejun Heo return ata_do_link_abort(link->ap, link); 1088dbd82616STejun Heo } 1089dbd82616STejun Heo 1090dbd82616STejun Heo /** 1091dbd82616STejun Heo * ata_port_abort - abort all qc's on the port 1092dbd82616STejun Heo * @ap: ATA port to abort qc's for 1093dbd82616STejun Heo * 1094dbd82616STejun Heo * Abort all active qc's of @ap and schedule EH. 1095dbd82616STejun Heo * 1096dbd82616STejun Heo * LOCKING: 1097dbd82616STejun Heo * spin_lock_irqsave(host_set lock) 1098dbd82616STejun Heo * 1099dbd82616STejun Heo * RETURNS: 1100dbd82616STejun Heo * Number of aborted qc's. 1101dbd82616STejun Heo */ 1102dbd82616STejun Heo int ata_port_abort(struct ata_port *ap) 1103dbd82616STejun Heo { 1104dbd82616STejun Heo return ata_do_link_abort(ap, NULL); 1105dbd82616STejun Heo } 1106dbd82616STejun Heo 1107dbd82616STejun Heo /** 1108c6fd2807SJeff Garzik * __ata_port_freeze - freeze port 1109c6fd2807SJeff Garzik * @ap: ATA port to freeze 1110c6fd2807SJeff Garzik * 1111c6fd2807SJeff Garzik * This function is called when HSM violation or some other 1112c6fd2807SJeff Garzik * condition disrupts normal operation of the port. Frozen port 1113c6fd2807SJeff Garzik * is not allowed to perform any operation until the port is 1114c6fd2807SJeff Garzik * thawed, which usually follows a successful reset. 1115c6fd2807SJeff Garzik * 1116c6fd2807SJeff Garzik * ap->ops->freeze() callback can be used for freezing the port 1117c6fd2807SJeff Garzik * hardware-wise (e.g. mask interrupt and stop DMA engine). If a 1118c6fd2807SJeff Garzik * port cannot be frozen hardware-wise, the interrupt handler 1119c6fd2807SJeff Garzik * must ack and clear interrupts unconditionally while the port 1120c6fd2807SJeff Garzik * is frozen. 1121c6fd2807SJeff Garzik * 1122c6fd2807SJeff Garzik * LOCKING: 1123cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 1124c6fd2807SJeff Garzik */ 1125c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap) 1126c6fd2807SJeff Garzik { 1127c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 1128c6fd2807SJeff Garzik 1129c6fd2807SJeff Garzik if (ap->ops->freeze) 1130c6fd2807SJeff Garzik ap->ops->freeze(ap); 1131c6fd2807SJeff Garzik 1132c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_FROZEN; 1133c6fd2807SJeff Garzik 113444877b4eSTejun Heo DPRINTK("ata%u port frozen\n", ap->print_id); 1135c6fd2807SJeff Garzik } 1136c6fd2807SJeff Garzik 1137c6fd2807SJeff Garzik /** 1138c6fd2807SJeff Garzik * ata_port_freeze - abort & freeze port 1139c6fd2807SJeff Garzik * @ap: ATA port to freeze 1140c6fd2807SJeff Garzik * 114154c38444SJeff Garzik * Abort and freeze @ap. The freeze operation must be called 114254c38444SJeff Garzik * first, because some hardware requires special operations 114354c38444SJeff Garzik * before the taskfile registers are accessible. 1144c6fd2807SJeff Garzik * 1145c6fd2807SJeff Garzik * LOCKING: 1146cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 1147c6fd2807SJeff Garzik * 1148c6fd2807SJeff Garzik * RETURNS: 1149c6fd2807SJeff Garzik * Number of aborted commands. 1150c6fd2807SJeff Garzik */ 1151c6fd2807SJeff Garzik int ata_port_freeze(struct ata_port *ap) 1152c6fd2807SJeff Garzik { 1153c6fd2807SJeff Garzik int nr_aborted; 1154c6fd2807SJeff Garzik 1155c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 1156c6fd2807SJeff Garzik 1157c6fd2807SJeff Garzik __ata_port_freeze(ap); 115854c38444SJeff Garzik nr_aborted = ata_port_abort(ap); 1159c6fd2807SJeff Garzik 1160c6fd2807SJeff Garzik return nr_aborted; 1161c6fd2807SJeff Garzik } 1162c6fd2807SJeff Garzik 1163c6fd2807SJeff Garzik /** 11647d77b247STejun Heo * sata_async_notification - SATA async notification handler 11657d77b247STejun Heo * @ap: ATA port where async notification is received 11667d77b247STejun Heo * 11677d77b247STejun Heo * Handler to be called when async notification via SDB FIS is 11687d77b247STejun Heo * received. This function schedules EH if necessary. 11697d77b247STejun Heo * 11707d77b247STejun Heo * LOCKING: 11717d77b247STejun Heo * spin_lock_irqsave(host lock) 11727d77b247STejun Heo * 11737d77b247STejun Heo * RETURNS: 11747d77b247STejun Heo * 1 if EH is scheduled, 0 otherwise. 11757d77b247STejun Heo */ 11767d77b247STejun Heo int sata_async_notification(struct ata_port *ap) 11777d77b247STejun Heo { 11787d77b247STejun Heo u32 sntf; 11797d77b247STejun Heo int rc; 11807d77b247STejun Heo 11817d77b247STejun Heo if (!(ap->flags & ATA_FLAG_AN)) 11827d77b247STejun Heo return 0; 11837d77b247STejun Heo 11847d77b247STejun Heo rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf); 11857d77b247STejun Heo if (rc == 0) 11867d77b247STejun Heo sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf); 11877d77b247STejun Heo 1188071f44b1STejun Heo if (!sata_pmp_attached(ap) || rc) { 11897d77b247STejun Heo /* PMP is not attached or SNTF is not available */ 1190071f44b1STejun Heo if (!sata_pmp_attached(ap)) { 11917d77b247STejun Heo /* PMP is not attached. Check whether ATAPI 11927d77b247STejun Heo * AN is configured. If so, notify media 11937d77b247STejun Heo * change. 11947d77b247STejun Heo */ 11957d77b247STejun Heo struct ata_device *dev = ap->link.device; 11967d77b247STejun Heo 11977d77b247STejun Heo if ((dev->class == ATA_DEV_ATAPI) && 11987d77b247STejun Heo (dev->flags & ATA_DFLAG_AN)) 11997d77b247STejun Heo ata_scsi_media_change_notify(dev); 12007d77b247STejun Heo return 0; 12017d77b247STejun Heo } else { 12027d77b247STejun Heo /* PMP is attached but SNTF is not available. 12037d77b247STejun Heo * ATAPI async media change notification is 12047d77b247STejun Heo * not used. The PMP must be reporting PHY 12057d77b247STejun Heo * status change, schedule EH. 12067d77b247STejun Heo */ 12077d77b247STejun Heo ata_port_schedule_eh(ap); 12087d77b247STejun Heo return 1; 12097d77b247STejun Heo } 12107d77b247STejun Heo } else { 12117d77b247STejun Heo /* PMP is attached and SNTF is available */ 12127d77b247STejun Heo struct ata_link *link; 12137d77b247STejun Heo 12147d77b247STejun Heo /* check and notify ATAPI AN */ 12151eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 12167d77b247STejun Heo if (!(sntf & (1 << link->pmp))) 12177d77b247STejun Heo continue; 12187d77b247STejun Heo 12197d77b247STejun Heo if ((link->device->class == ATA_DEV_ATAPI) && 12207d77b247STejun Heo (link->device->flags & ATA_DFLAG_AN)) 12217d77b247STejun Heo ata_scsi_media_change_notify(link->device); 12227d77b247STejun Heo } 12237d77b247STejun Heo 12247d77b247STejun Heo /* If PMP is reporting that PHY status of some 12257d77b247STejun Heo * downstream ports has changed, schedule EH. 12267d77b247STejun Heo */ 12277d77b247STejun Heo if (sntf & (1 << SATA_PMP_CTRL_PORT)) { 12287d77b247STejun Heo ata_port_schedule_eh(ap); 12297d77b247STejun Heo return 1; 12307d77b247STejun Heo } 12317d77b247STejun Heo 12327d77b247STejun Heo return 0; 12337d77b247STejun Heo } 12347d77b247STejun Heo } 12357d77b247STejun Heo 12367d77b247STejun Heo /** 1237c6fd2807SJeff Garzik * ata_eh_freeze_port - EH helper to freeze port 1238c6fd2807SJeff Garzik * @ap: ATA port to freeze 1239c6fd2807SJeff Garzik * 1240c6fd2807SJeff Garzik * Freeze @ap. 1241c6fd2807SJeff Garzik * 1242c6fd2807SJeff Garzik * LOCKING: 1243c6fd2807SJeff Garzik * None. 1244c6fd2807SJeff Garzik */ 1245c6fd2807SJeff Garzik void ata_eh_freeze_port(struct ata_port *ap) 1246c6fd2807SJeff Garzik { 1247c6fd2807SJeff Garzik unsigned long flags; 1248c6fd2807SJeff Garzik 1249c6fd2807SJeff Garzik if (!ap->ops->error_handler) 1250c6fd2807SJeff Garzik return; 1251c6fd2807SJeff Garzik 1252c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1253c6fd2807SJeff Garzik __ata_port_freeze(ap); 1254c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1255c6fd2807SJeff Garzik } 1256c6fd2807SJeff Garzik 1257c6fd2807SJeff Garzik /** 1258c6fd2807SJeff Garzik * ata_port_thaw_port - EH helper to thaw port 1259c6fd2807SJeff Garzik * @ap: ATA port to thaw 1260c6fd2807SJeff Garzik * 1261c6fd2807SJeff Garzik * Thaw frozen port @ap. 1262c6fd2807SJeff Garzik * 1263c6fd2807SJeff Garzik * LOCKING: 1264c6fd2807SJeff Garzik * None. 1265c6fd2807SJeff Garzik */ 1266c6fd2807SJeff Garzik void ata_eh_thaw_port(struct ata_port *ap) 1267c6fd2807SJeff Garzik { 1268c6fd2807SJeff Garzik unsigned long flags; 1269c6fd2807SJeff Garzik 1270c6fd2807SJeff Garzik if (!ap->ops->error_handler) 1271c6fd2807SJeff Garzik return; 1272c6fd2807SJeff Garzik 1273c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1274c6fd2807SJeff Garzik 1275c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_FROZEN; 1276c6fd2807SJeff Garzik 1277c6fd2807SJeff Garzik if (ap->ops->thaw) 1278c6fd2807SJeff Garzik ap->ops->thaw(ap); 1279c6fd2807SJeff Garzik 1280c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1281c6fd2807SJeff Garzik 128244877b4eSTejun Heo DPRINTK("ata%u port thawed\n", ap->print_id); 1283c6fd2807SJeff Garzik } 1284c6fd2807SJeff Garzik 1285c6fd2807SJeff Garzik static void ata_eh_scsidone(struct scsi_cmnd *scmd) 1286c6fd2807SJeff Garzik { 1287c6fd2807SJeff Garzik /* nada */ 1288c6fd2807SJeff Garzik } 1289c6fd2807SJeff Garzik 1290c6fd2807SJeff Garzik static void __ata_eh_qc_complete(struct ata_queued_cmd *qc) 1291c6fd2807SJeff Garzik { 1292c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 1293c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 1294c6fd2807SJeff Garzik unsigned long flags; 1295c6fd2807SJeff Garzik 1296c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1297c6fd2807SJeff Garzik qc->scsidone = ata_eh_scsidone; 1298c6fd2807SJeff Garzik __ata_qc_complete(qc); 1299c6fd2807SJeff Garzik WARN_ON(ata_tag_valid(qc->tag)); 1300c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1301c6fd2807SJeff Garzik 1302c6fd2807SJeff Garzik scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 1303c6fd2807SJeff Garzik } 1304c6fd2807SJeff Garzik 1305c6fd2807SJeff Garzik /** 1306c6fd2807SJeff Garzik * ata_eh_qc_complete - Complete an active ATA command from EH 1307c6fd2807SJeff Garzik * @qc: Command to complete 1308c6fd2807SJeff Garzik * 1309c6fd2807SJeff Garzik * Indicate to the mid and upper layers that an ATA command has 1310c6fd2807SJeff Garzik * completed. To be used from EH. 1311c6fd2807SJeff Garzik */ 1312c6fd2807SJeff Garzik void ata_eh_qc_complete(struct ata_queued_cmd *qc) 1313c6fd2807SJeff Garzik { 1314c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 1315c6fd2807SJeff Garzik scmd->retries = scmd->allowed; 1316c6fd2807SJeff Garzik __ata_eh_qc_complete(qc); 1317c6fd2807SJeff Garzik } 1318c6fd2807SJeff Garzik 1319c6fd2807SJeff Garzik /** 1320c6fd2807SJeff Garzik * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH 1321c6fd2807SJeff Garzik * @qc: Command to retry 1322c6fd2807SJeff Garzik * 1323c6fd2807SJeff Garzik * Indicate to the mid and upper layers that an ATA command 1324c6fd2807SJeff Garzik * should be retried. To be used from EH. 1325c6fd2807SJeff Garzik * 1326c6fd2807SJeff Garzik * SCSI midlayer limits the number of retries to scmd->allowed. 1327f13e2201SGwendal Grignou * scmd->allowed is incremented for commands which get retried 1328c6fd2807SJeff Garzik * due to unrelated failures (qc->err_mask is zero). 1329c6fd2807SJeff Garzik */ 1330c6fd2807SJeff Garzik void ata_eh_qc_retry(struct ata_queued_cmd *qc) 1331c6fd2807SJeff Garzik { 1332c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 1333f13e2201SGwendal Grignou if (!qc->err_mask) 1334f13e2201SGwendal Grignou scmd->allowed++; 1335c6fd2807SJeff Garzik __ata_eh_qc_complete(qc); 1336c6fd2807SJeff Garzik } 1337c6fd2807SJeff Garzik 1338c6fd2807SJeff Garzik /** 1339678afac6STejun Heo * ata_dev_disable - disable ATA device 1340678afac6STejun Heo * @dev: ATA device to disable 1341678afac6STejun Heo * 1342678afac6STejun Heo * Disable @dev. 1343678afac6STejun Heo * 1344678afac6STejun Heo * Locking: 1345678afac6STejun Heo * EH context. 1346678afac6STejun Heo */ 1347678afac6STejun Heo void ata_dev_disable(struct ata_device *dev) 1348678afac6STejun Heo { 1349678afac6STejun Heo if (!ata_dev_enabled(dev)) 1350678afac6STejun Heo return; 1351678afac6STejun Heo 1352678afac6STejun Heo if (ata_msg_drv(dev->link->ap)) 1353a9a79dfeSJoe Perches ata_dev_warn(dev, "disabled\n"); 1354678afac6STejun Heo ata_acpi_on_disable(dev); 1355678afac6STejun Heo ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET); 1356678afac6STejun Heo dev->class++; 135799cf610aSTejun Heo 135899cf610aSTejun Heo /* From now till the next successful probe, ering is used to 135999cf610aSTejun Heo * track probe failures. Clear accumulated device error info. 136099cf610aSTejun Heo */ 136199cf610aSTejun Heo ata_ering_clear(&dev->ering); 1362678afac6STejun Heo } 1363678afac6STejun Heo 1364678afac6STejun Heo /** 1365c6fd2807SJeff Garzik * ata_eh_detach_dev - detach ATA device 1366c6fd2807SJeff Garzik * @dev: ATA device to detach 1367c6fd2807SJeff Garzik * 1368c6fd2807SJeff Garzik * Detach @dev. 1369c6fd2807SJeff Garzik * 1370c6fd2807SJeff Garzik * LOCKING: 1371c6fd2807SJeff Garzik * None. 1372c6fd2807SJeff Garzik */ 1373fb7fd614STejun Heo void ata_eh_detach_dev(struct ata_device *dev) 1374c6fd2807SJeff Garzik { 1375f58229f8STejun Heo struct ata_link *link = dev->link; 1376f58229f8STejun Heo struct ata_port *ap = link->ap; 137790484ebfSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 1378c6fd2807SJeff Garzik unsigned long flags; 1379c6fd2807SJeff Garzik 1380c6fd2807SJeff Garzik ata_dev_disable(dev); 1381c6fd2807SJeff Garzik 1382c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1383c6fd2807SJeff Garzik 1384c6fd2807SJeff Garzik dev->flags &= ~ATA_DFLAG_DETACH; 1385c6fd2807SJeff Garzik 1386c6fd2807SJeff Garzik if (ata_scsi_offline_dev(dev)) { 1387c6fd2807SJeff Garzik dev->flags |= ATA_DFLAG_DETACHED; 1388c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 1389c6fd2807SJeff Garzik } 1390c6fd2807SJeff Garzik 139190484ebfSTejun Heo /* clear per-dev EH info */ 1392f58229f8STejun Heo ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK); 1393f58229f8STejun Heo ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK); 139490484ebfSTejun Heo ehc->saved_xfer_mode[dev->devno] = 0; 139590484ebfSTejun Heo ehc->saved_ncq_enabled &= ~(1 << dev->devno); 1396c6fd2807SJeff Garzik 1397c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1398c6fd2807SJeff Garzik } 1399c6fd2807SJeff Garzik 1400c6fd2807SJeff Garzik /** 1401c6fd2807SJeff Garzik * ata_eh_about_to_do - about to perform eh_action 1402955e57dfSTejun Heo * @link: target ATA link 1403c6fd2807SJeff Garzik * @dev: target ATA dev for per-dev action (can be NULL) 1404c6fd2807SJeff Garzik * @action: action about to be performed 1405c6fd2807SJeff Garzik * 1406c6fd2807SJeff Garzik * Called just before performing EH actions to clear related bits 1407955e57dfSTejun Heo * in @link->eh_info such that eh actions are not unnecessarily 1408955e57dfSTejun Heo * repeated. 1409c6fd2807SJeff Garzik * 1410c6fd2807SJeff Garzik * LOCKING: 1411c6fd2807SJeff Garzik * None. 1412c6fd2807SJeff Garzik */ 1413fb7fd614STejun Heo void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev, 1414c6fd2807SJeff Garzik unsigned int action) 1415c6fd2807SJeff Garzik { 1416955e57dfSTejun Heo struct ata_port *ap = link->ap; 1417955e57dfSTejun Heo struct ata_eh_info *ehi = &link->eh_info; 1418955e57dfSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 1419c6fd2807SJeff Garzik unsigned long flags; 1420c6fd2807SJeff Garzik 1421c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1422c6fd2807SJeff Garzik 1423955e57dfSTejun Heo ata_eh_clear_action(link, dev, ehi, action); 1424c6fd2807SJeff Garzik 1425a568d1d2STejun Heo /* About to take EH action, set RECOVERED. Ignore actions on 1426a568d1d2STejun Heo * slave links as master will do them again. 1427a568d1d2STejun Heo */ 1428a568d1d2STejun Heo if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link) 1429c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_RECOVERED; 1430c6fd2807SJeff Garzik 1431c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1432c6fd2807SJeff Garzik } 1433c6fd2807SJeff Garzik 1434c6fd2807SJeff Garzik /** 1435c6fd2807SJeff Garzik * ata_eh_done - EH action complete 1436c6fd2807SJeff Garzik * @ap: target ATA port 1437c6fd2807SJeff Garzik * @dev: target ATA dev for per-dev action (can be NULL) 1438c6fd2807SJeff Garzik * @action: action just completed 1439c6fd2807SJeff Garzik * 1440c6fd2807SJeff Garzik * Called right after performing EH actions to clear related bits 1441955e57dfSTejun Heo * in @link->eh_context. 1442c6fd2807SJeff Garzik * 1443c6fd2807SJeff Garzik * LOCKING: 1444c6fd2807SJeff Garzik * None. 1445c6fd2807SJeff Garzik */ 1446fb7fd614STejun Heo void ata_eh_done(struct ata_link *link, struct ata_device *dev, 1447c6fd2807SJeff Garzik unsigned int action) 1448c6fd2807SJeff Garzik { 1449955e57dfSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 14509af5c9c9STejun Heo 1451955e57dfSTejun Heo ata_eh_clear_action(link, dev, &ehc->i, action); 1452c6fd2807SJeff Garzik } 1453c6fd2807SJeff Garzik 1454c6fd2807SJeff Garzik /** 1455c6fd2807SJeff Garzik * ata_err_string - convert err_mask to descriptive string 1456c6fd2807SJeff Garzik * @err_mask: error mask to convert to string 1457c6fd2807SJeff Garzik * 1458c6fd2807SJeff Garzik * Convert @err_mask to descriptive string. Errors are 1459c6fd2807SJeff Garzik * prioritized according to severity and only the most severe 1460c6fd2807SJeff Garzik * error is reported. 1461c6fd2807SJeff Garzik * 1462c6fd2807SJeff Garzik * LOCKING: 1463c6fd2807SJeff Garzik * None. 1464c6fd2807SJeff Garzik * 1465c6fd2807SJeff Garzik * RETURNS: 1466c6fd2807SJeff Garzik * Descriptive string for @err_mask 1467c6fd2807SJeff Garzik */ 1468c6fd2807SJeff Garzik static const char *ata_err_string(unsigned int err_mask) 1469c6fd2807SJeff Garzik { 1470c6fd2807SJeff Garzik if (err_mask & AC_ERR_HOST_BUS) 1471c6fd2807SJeff Garzik return "host bus error"; 1472c6fd2807SJeff Garzik if (err_mask & AC_ERR_ATA_BUS) 1473c6fd2807SJeff Garzik return "ATA bus error"; 1474c6fd2807SJeff Garzik if (err_mask & AC_ERR_TIMEOUT) 1475c6fd2807SJeff Garzik return "timeout"; 1476c6fd2807SJeff Garzik if (err_mask & AC_ERR_HSM) 1477c6fd2807SJeff Garzik return "HSM violation"; 1478c6fd2807SJeff Garzik if (err_mask & AC_ERR_SYSTEM) 1479c6fd2807SJeff Garzik return "internal error"; 1480c6fd2807SJeff Garzik if (err_mask & AC_ERR_MEDIA) 1481c6fd2807SJeff Garzik return "media error"; 1482c6fd2807SJeff Garzik if (err_mask & AC_ERR_INVALID) 1483c6fd2807SJeff Garzik return "invalid argument"; 1484c6fd2807SJeff Garzik if (err_mask & AC_ERR_DEV) 1485c6fd2807SJeff Garzik return "device error"; 1486c6fd2807SJeff Garzik return "unknown error"; 1487c6fd2807SJeff Garzik } 1488c6fd2807SJeff Garzik 1489c6fd2807SJeff Garzik /** 1490c6fd2807SJeff Garzik * ata_read_log_page - read a specific log page 1491c6fd2807SJeff Garzik * @dev: target device 149265fe1f0fSShane Huang * @log: log to read 1493c6fd2807SJeff Garzik * @page: page to read 1494c6fd2807SJeff Garzik * @buf: buffer to store read page 1495c6fd2807SJeff Garzik * @sectors: number of sectors to read 1496c6fd2807SJeff Garzik * 1497c6fd2807SJeff Garzik * Read log page using READ_LOG_EXT command. 1498c6fd2807SJeff Garzik * 1499c6fd2807SJeff Garzik * LOCKING: 1500c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1501c6fd2807SJeff Garzik * 1502c6fd2807SJeff Garzik * RETURNS: 1503c6fd2807SJeff Garzik * 0 on success, AC_ERR_* mask otherwise. 1504c6fd2807SJeff Garzik */ 150565fe1f0fSShane Huang unsigned int ata_read_log_page(struct ata_device *dev, u8 log, 1506c6fd2807SJeff Garzik u8 page, void *buf, unsigned int sectors) 1507c6fd2807SJeff Garzik { 1508ea013a9bSAndreas Werner unsigned long ap_flags = dev->link->ap->flags; 1509c6fd2807SJeff Garzik struct ata_taskfile tf; 1510c6fd2807SJeff Garzik unsigned int err_mask; 15115d3abf8fSMartin K. Petersen bool dma = false; 1512c6fd2807SJeff Garzik 151365fe1f0fSShane Huang DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page); 1514c6fd2807SJeff Garzik 1515ea013a9bSAndreas Werner /* 1516ea013a9bSAndreas Werner * Return error without actually issuing the command on controllers 1517ea013a9bSAndreas Werner * which e.g. lockup on a read log page. 1518ea013a9bSAndreas Werner */ 1519ea013a9bSAndreas Werner if (ap_flags & ATA_FLAG_NO_LOG_PAGE) 1520ea013a9bSAndreas Werner return AC_ERR_DEV; 1521ea013a9bSAndreas Werner 15225d3abf8fSMartin K. Petersen retry: 1523c6fd2807SJeff Garzik ata_tf_init(dev, &tf); 15245d3abf8fSMartin K. Petersen if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) && 15255d3abf8fSMartin K. Petersen !(dev->horkage & ATA_HORKAGE_NO_NCQ_LOG)) { 15269faa6438SHannes Reinecke tf.command = ATA_CMD_READ_LOG_DMA_EXT; 15279faa6438SHannes Reinecke tf.protocol = ATA_PROT_DMA; 15285d3abf8fSMartin K. Petersen dma = true; 15299faa6438SHannes Reinecke } else { 1530c6fd2807SJeff Garzik tf.command = ATA_CMD_READ_LOG_EXT; 15319faa6438SHannes Reinecke tf.protocol = ATA_PROT_PIO; 1532eab6ee1cSMartin K. Petersen dma = false; 15339faa6438SHannes Reinecke } 153465fe1f0fSShane Huang tf.lbal = log; 153565fe1f0fSShane Huang tf.lbam = page; 1536c6fd2807SJeff Garzik tf.nsect = sectors; 1537c6fd2807SJeff Garzik tf.hob_nsect = sectors >> 8; 1538c6fd2807SJeff Garzik tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE; 1539c6fd2807SJeff Garzik 1540c6fd2807SJeff Garzik err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, 15412b789108STejun Heo buf, sectors * ATA_SECT_SIZE, 0); 1542c6fd2807SJeff Garzik 15435d3abf8fSMartin K. Petersen if (err_mask && dma) { 15445d3abf8fSMartin K. Petersen dev->horkage |= ATA_HORKAGE_NO_NCQ_LOG; 15455d3abf8fSMartin K. Petersen ata_dev_warn(dev, "READ LOG DMA EXT failed, trying unqueued\n"); 15465d3abf8fSMartin K. Petersen goto retry; 15475d3abf8fSMartin K. Petersen } 15485d3abf8fSMartin K. Petersen 1549c6fd2807SJeff Garzik DPRINTK("EXIT, err_mask=%x\n", err_mask); 1550c6fd2807SJeff Garzik return err_mask; 1551c6fd2807SJeff Garzik } 1552c6fd2807SJeff Garzik 1553c6fd2807SJeff Garzik /** 1554c6fd2807SJeff Garzik * ata_eh_read_log_10h - Read log page 10h for NCQ error details 1555c6fd2807SJeff Garzik * @dev: Device to read log page 10h from 1556c6fd2807SJeff Garzik * @tag: Resulting tag of the failed command 1557c6fd2807SJeff Garzik * @tf: Resulting taskfile registers of the failed command 1558c6fd2807SJeff Garzik * 1559c6fd2807SJeff Garzik * Read log page 10h to obtain NCQ error details and clear error 1560c6fd2807SJeff Garzik * condition. 1561c6fd2807SJeff Garzik * 1562c6fd2807SJeff Garzik * LOCKING: 1563c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1564c6fd2807SJeff Garzik * 1565c6fd2807SJeff Garzik * RETURNS: 1566c6fd2807SJeff Garzik * 0 on success, -errno otherwise. 1567c6fd2807SJeff Garzik */ 1568c6fd2807SJeff Garzik static int ata_eh_read_log_10h(struct ata_device *dev, 1569c6fd2807SJeff Garzik int *tag, struct ata_taskfile *tf) 1570c6fd2807SJeff Garzik { 15719af5c9c9STejun Heo u8 *buf = dev->link->ap->sector_buf; 1572c6fd2807SJeff Garzik unsigned int err_mask; 1573c6fd2807SJeff Garzik u8 csum; 1574c6fd2807SJeff Garzik int i; 1575c6fd2807SJeff Garzik 157665fe1f0fSShane Huang err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, 0, buf, 1); 1577c6fd2807SJeff Garzik if (err_mask) 1578c6fd2807SJeff Garzik return -EIO; 1579c6fd2807SJeff Garzik 1580c6fd2807SJeff Garzik csum = 0; 1581c6fd2807SJeff Garzik for (i = 0; i < ATA_SECT_SIZE; i++) 1582c6fd2807SJeff Garzik csum += buf[i]; 1583c6fd2807SJeff Garzik if (csum) 1584a9a79dfeSJoe Perches ata_dev_warn(dev, "invalid checksum 0x%x on log page 10h\n", 1585a9a79dfeSJoe Perches csum); 1586c6fd2807SJeff Garzik 1587c6fd2807SJeff Garzik if (buf[0] & 0x80) 1588c6fd2807SJeff Garzik return -ENOENT; 1589c6fd2807SJeff Garzik 1590c6fd2807SJeff Garzik *tag = buf[0] & 0x1f; 1591c6fd2807SJeff Garzik 1592c6fd2807SJeff Garzik tf->command = buf[2]; 1593c6fd2807SJeff Garzik tf->feature = buf[3]; 1594c6fd2807SJeff Garzik tf->lbal = buf[4]; 1595c6fd2807SJeff Garzik tf->lbam = buf[5]; 1596c6fd2807SJeff Garzik tf->lbah = buf[6]; 1597c6fd2807SJeff Garzik tf->device = buf[7]; 1598c6fd2807SJeff Garzik tf->hob_lbal = buf[8]; 1599c6fd2807SJeff Garzik tf->hob_lbam = buf[9]; 1600c6fd2807SJeff Garzik tf->hob_lbah = buf[10]; 1601c6fd2807SJeff Garzik tf->nsect = buf[12]; 1602c6fd2807SJeff Garzik tf->hob_nsect = buf[13]; 1603c6fd2807SJeff Garzik 1604c6fd2807SJeff Garzik return 0; 1605c6fd2807SJeff Garzik } 1606c6fd2807SJeff Garzik 1607c6fd2807SJeff Garzik /** 160811fc33daSTejun Heo * atapi_eh_tur - perform ATAPI TEST_UNIT_READY 160911fc33daSTejun Heo * @dev: target ATAPI device 161011fc33daSTejun Heo * @r_sense_key: out parameter for sense_key 161111fc33daSTejun Heo * 161211fc33daSTejun Heo * Perform ATAPI TEST_UNIT_READY. 161311fc33daSTejun Heo * 161411fc33daSTejun Heo * LOCKING: 161511fc33daSTejun Heo * EH context (may sleep). 161611fc33daSTejun Heo * 161711fc33daSTejun Heo * RETURNS: 161811fc33daSTejun Heo * 0 on success, AC_ERR_* mask on failure. 161911fc33daSTejun Heo */ 16203dc67440SAaron Lu unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key) 162111fc33daSTejun Heo { 162211fc33daSTejun Heo u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 }; 162311fc33daSTejun Heo struct ata_taskfile tf; 162411fc33daSTejun Heo unsigned int err_mask; 162511fc33daSTejun Heo 162611fc33daSTejun Heo ata_tf_init(dev, &tf); 162711fc33daSTejun Heo 162811fc33daSTejun Heo tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 162911fc33daSTejun Heo tf.command = ATA_CMD_PACKET; 163011fc33daSTejun Heo tf.protocol = ATAPI_PROT_NODATA; 163111fc33daSTejun Heo 163211fc33daSTejun Heo err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0); 163311fc33daSTejun Heo if (err_mask == AC_ERR_DEV) 163411fc33daSTejun Heo *r_sense_key = tf.feature >> 4; 163511fc33daSTejun Heo return err_mask; 163611fc33daSTejun Heo } 163711fc33daSTejun Heo 163811fc33daSTejun Heo /** 1639c6fd2807SJeff Garzik * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE 1640c6fd2807SJeff Garzik * @dev: device to perform REQUEST_SENSE to 1641c6fd2807SJeff Garzik * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long) 16423eabddb8STejun Heo * @dfl_sense_key: default sense key to use 1643c6fd2807SJeff Garzik * 1644c6fd2807SJeff Garzik * Perform ATAPI REQUEST_SENSE after the device reported CHECK 1645c6fd2807SJeff Garzik * SENSE. This function is EH helper. 1646c6fd2807SJeff Garzik * 1647c6fd2807SJeff Garzik * LOCKING: 1648c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1649c6fd2807SJeff Garzik * 1650c6fd2807SJeff Garzik * RETURNS: 1651c6fd2807SJeff Garzik * 0 on success, AC_ERR_* mask on failure 1652c6fd2807SJeff Garzik */ 16533dc67440SAaron Lu unsigned int atapi_eh_request_sense(struct ata_device *dev, 16543eabddb8STejun Heo u8 *sense_buf, u8 dfl_sense_key) 1655c6fd2807SJeff Garzik { 16563eabddb8STejun Heo u8 cdb[ATAPI_CDB_LEN] = 16573eabddb8STejun Heo { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 }; 16589af5c9c9STejun Heo struct ata_port *ap = dev->link->ap; 1659c6fd2807SJeff Garzik struct ata_taskfile tf; 1660c6fd2807SJeff Garzik 1661c6fd2807SJeff Garzik DPRINTK("ATAPI request sense\n"); 1662c6fd2807SJeff Garzik 1663c6fd2807SJeff Garzik memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); 1664c6fd2807SJeff Garzik 166556287768SAlbert Lee /* initialize sense_buf with the error register, 166656287768SAlbert Lee * for the case where they are -not- overwritten 166756287768SAlbert Lee */ 1668c6fd2807SJeff Garzik sense_buf[0] = 0x70; 16693eabddb8STejun Heo sense_buf[2] = dfl_sense_key; 167056287768SAlbert Lee 167156287768SAlbert Lee /* some devices time out if garbage left in tf */ 167256287768SAlbert Lee ata_tf_init(dev, &tf); 1673c6fd2807SJeff Garzik 1674c6fd2807SJeff Garzik tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1675c6fd2807SJeff Garzik tf.command = ATA_CMD_PACKET; 1676c6fd2807SJeff Garzik 1677c6fd2807SJeff Garzik /* is it pointless to prefer PIO for "safety reasons"? */ 1678c6fd2807SJeff Garzik if (ap->flags & ATA_FLAG_PIO_DMA) { 16790dc36888STejun Heo tf.protocol = ATAPI_PROT_DMA; 1680c6fd2807SJeff Garzik tf.feature |= ATAPI_PKT_DMA; 1681c6fd2807SJeff Garzik } else { 16820dc36888STejun Heo tf.protocol = ATAPI_PROT_PIO; 1683f2dfc1a1STejun Heo tf.lbam = SCSI_SENSE_BUFFERSIZE; 1684f2dfc1a1STejun Heo tf.lbah = 0; 1685c6fd2807SJeff Garzik } 1686c6fd2807SJeff Garzik 1687c6fd2807SJeff Garzik return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE, 16882b789108STejun Heo sense_buf, SCSI_SENSE_BUFFERSIZE, 0); 1689c6fd2807SJeff Garzik } 1690c6fd2807SJeff Garzik 1691c6fd2807SJeff Garzik /** 1692c6fd2807SJeff Garzik * ata_eh_analyze_serror - analyze SError for a failed port 16930260731fSTejun Heo * @link: ATA link to analyze SError for 1694c6fd2807SJeff Garzik * 1695c6fd2807SJeff Garzik * Analyze SError if available and further determine cause of 1696c6fd2807SJeff Garzik * failure. 1697c6fd2807SJeff Garzik * 1698c6fd2807SJeff Garzik * LOCKING: 1699c6fd2807SJeff Garzik * None. 1700c6fd2807SJeff Garzik */ 17010260731fSTejun Heo static void ata_eh_analyze_serror(struct ata_link *link) 1702c6fd2807SJeff Garzik { 17030260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 1704c6fd2807SJeff Garzik u32 serror = ehc->i.serror; 1705c6fd2807SJeff Garzik unsigned int err_mask = 0, action = 0; 1706f9df58cbSTejun Heo u32 hotplug_mask; 1707c6fd2807SJeff Garzik 1708e0614db2STejun Heo if (serror & (SERR_PERSISTENT | SERR_DATA)) { 1709c6fd2807SJeff Garzik err_mask |= AC_ERR_ATA_BUS; 1710cf480626STejun Heo action |= ATA_EH_RESET; 1711c6fd2807SJeff Garzik } 1712c6fd2807SJeff Garzik if (serror & SERR_PROTOCOL) { 1713c6fd2807SJeff Garzik err_mask |= AC_ERR_HSM; 1714cf480626STejun Heo action |= ATA_EH_RESET; 1715c6fd2807SJeff Garzik } 1716c6fd2807SJeff Garzik if (serror & SERR_INTERNAL) { 1717c6fd2807SJeff Garzik err_mask |= AC_ERR_SYSTEM; 1718cf480626STejun Heo action |= ATA_EH_RESET; 1719c6fd2807SJeff Garzik } 1720f9df58cbSTejun Heo 1721f9df58cbSTejun Heo /* Determine whether a hotplug event has occurred. Both 1722f9df58cbSTejun Heo * SError.N/X are considered hotplug events for enabled or 1723f9df58cbSTejun Heo * host links. For disabled PMP links, only N bit is 1724f9df58cbSTejun Heo * considered as X bit is left at 1 for link plugging. 1725f9df58cbSTejun Heo */ 1726eb0e85e3STejun Heo if (link->lpm_policy > ATA_LPM_MAX_POWER) 17276b7ae954STejun Heo hotplug_mask = 0; /* hotplug doesn't work w/ LPM */ 17286b7ae954STejun Heo else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link)) 1729f9df58cbSTejun Heo hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG; 1730f9df58cbSTejun Heo else 1731f9df58cbSTejun Heo hotplug_mask = SERR_PHYRDY_CHG; 1732f9df58cbSTejun Heo 1733f9df58cbSTejun Heo if (serror & hotplug_mask) 1734c6fd2807SJeff Garzik ata_ehi_hotplugged(&ehc->i); 1735c6fd2807SJeff Garzik 1736c6fd2807SJeff Garzik ehc->i.err_mask |= err_mask; 1737c6fd2807SJeff Garzik ehc->i.action |= action; 1738c6fd2807SJeff Garzik } 1739c6fd2807SJeff Garzik 1740c6fd2807SJeff Garzik /** 1741c6fd2807SJeff Garzik * ata_eh_analyze_ncq_error - analyze NCQ error 17420260731fSTejun Heo * @link: ATA link to analyze NCQ error for 1743c6fd2807SJeff Garzik * 1744c6fd2807SJeff Garzik * Read log page 10h, determine the offending qc and acquire 1745c6fd2807SJeff Garzik * error status TF. For NCQ device errors, all LLDDs have to do 1746c6fd2807SJeff Garzik * is setting AC_ERR_DEV in ehi->err_mask. This function takes 1747c6fd2807SJeff Garzik * care of the rest. 1748c6fd2807SJeff Garzik * 1749c6fd2807SJeff Garzik * LOCKING: 1750c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1751c6fd2807SJeff Garzik */ 175210acf3b0SMark Lord void ata_eh_analyze_ncq_error(struct ata_link *link) 1753c6fd2807SJeff Garzik { 17540260731fSTejun Heo struct ata_port *ap = link->ap; 17550260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 17560260731fSTejun Heo struct ata_device *dev = link->device; 1757c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 1758c6fd2807SJeff Garzik struct ata_taskfile tf; 1759c6fd2807SJeff Garzik int tag, rc; 1760c6fd2807SJeff Garzik 1761c6fd2807SJeff Garzik /* if frozen, we can't do much */ 1762c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN) 1763c6fd2807SJeff Garzik return; 1764c6fd2807SJeff Garzik 1765c6fd2807SJeff Garzik /* is it NCQ device error? */ 17660260731fSTejun Heo if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV)) 1767c6fd2807SJeff Garzik return; 1768c6fd2807SJeff Garzik 1769c6fd2807SJeff Garzik /* has LLDD analyzed already? */ 1770c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1771c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, tag); 1772c6fd2807SJeff Garzik 1773c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) 1774c6fd2807SJeff Garzik continue; 1775c6fd2807SJeff Garzik 1776c6fd2807SJeff Garzik if (qc->err_mask) 1777c6fd2807SJeff Garzik return; 1778c6fd2807SJeff Garzik } 1779c6fd2807SJeff Garzik 1780c6fd2807SJeff Garzik /* okay, this error is ours */ 1781a09bf4cdSJeff Garzik memset(&tf, 0, sizeof(tf)); 1782c6fd2807SJeff Garzik rc = ata_eh_read_log_10h(dev, &tag, &tf); 1783c6fd2807SJeff Garzik if (rc) { 1784a9a79dfeSJoe Perches ata_link_err(link, "failed to read log page 10h (errno=%d)\n", 1785a9a79dfeSJoe Perches rc); 1786c6fd2807SJeff Garzik return; 1787c6fd2807SJeff Garzik } 1788c6fd2807SJeff Garzik 17890260731fSTejun Heo if (!(link->sactive & (1 << tag))) { 1790a9a79dfeSJoe Perches ata_link_err(link, "log page 10h reported inactive tag %d\n", 1791a9a79dfeSJoe Perches tag); 1792c6fd2807SJeff Garzik return; 1793c6fd2807SJeff Garzik } 1794c6fd2807SJeff Garzik 1795c6fd2807SJeff Garzik /* we've got the perpetrator, condemn it */ 1796c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, tag); 1797c6fd2807SJeff Garzik memcpy(&qc->result_tf, &tf, sizeof(tf)); 1798a6116c9eSMark Lord qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 17995335b729STejun Heo qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ; 1800c6fd2807SJeff Garzik ehc->i.err_mask &= ~AC_ERR_DEV; 1801c6fd2807SJeff Garzik } 1802c6fd2807SJeff Garzik 1803c6fd2807SJeff Garzik /** 1804c6fd2807SJeff Garzik * ata_eh_analyze_tf - analyze taskfile of a failed qc 1805c6fd2807SJeff Garzik * @qc: qc to analyze 1806c6fd2807SJeff Garzik * @tf: Taskfile registers to analyze 1807c6fd2807SJeff Garzik * 1808c6fd2807SJeff Garzik * Analyze taskfile of @qc and further determine cause of 1809c6fd2807SJeff Garzik * failure. This function also requests ATAPI sense data if 181025985edcSLucas De Marchi * available. 1811c6fd2807SJeff Garzik * 1812c6fd2807SJeff Garzik * LOCKING: 1813c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1814c6fd2807SJeff Garzik * 1815c6fd2807SJeff Garzik * RETURNS: 1816c6fd2807SJeff Garzik * Determined recovery action 1817c6fd2807SJeff Garzik */ 1818c6fd2807SJeff Garzik static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, 1819c6fd2807SJeff Garzik const struct ata_taskfile *tf) 1820c6fd2807SJeff Garzik { 1821c6fd2807SJeff Garzik unsigned int tmp, action = 0; 1822c6fd2807SJeff Garzik u8 stat = tf->command, err = tf->feature; 1823c6fd2807SJeff Garzik 1824c6fd2807SJeff Garzik if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) { 1825c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_HSM; 1826cf480626STejun Heo return ATA_EH_RESET; 1827c6fd2807SJeff Garzik } 1828c6fd2807SJeff Garzik 1829a51d644aSTejun Heo if (stat & (ATA_ERR | ATA_DF)) 1830a51d644aSTejun Heo qc->err_mask |= AC_ERR_DEV; 1831a51d644aSTejun Heo else 1832c6fd2807SJeff Garzik return 0; 1833c6fd2807SJeff Garzik 1834c6fd2807SJeff Garzik switch (qc->dev->class) { 1835c6fd2807SJeff Garzik case ATA_DEV_ATA: 18369162c657SHannes Reinecke case ATA_DEV_ZAC: 1837c6fd2807SJeff Garzik if (err & ATA_ICRC) 1838c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_ATA_BUS; 1839eec7e1c1SAlexey Asemov if (err & (ATA_UNC | ATA_AMNF)) 1840c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_MEDIA; 1841c6fd2807SJeff Garzik if (err & ATA_IDNF) 1842c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_INVALID; 1843c6fd2807SJeff Garzik break; 1844c6fd2807SJeff Garzik 1845c6fd2807SJeff Garzik case ATA_DEV_ATAPI: 1846a569a30dSTejun Heo if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) { 18473eabddb8STejun Heo tmp = atapi_eh_request_sense(qc->dev, 18483eabddb8STejun Heo qc->scsicmd->sense_buffer, 18493eabddb8STejun Heo qc->result_tf.feature >> 4); 1850c6fd2807SJeff Garzik if (!tmp) { 1851a569a30dSTejun Heo /* ATA_QCFLAG_SENSE_VALID is used to 1852a569a30dSTejun Heo * tell atapi_qc_complete() that sense 1853a569a30dSTejun Heo * data is already valid. 1854c6fd2807SJeff Garzik * 1855c6fd2807SJeff Garzik * TODO: interpret sense data and set 1856c6fd2807SJeff Garzik * appropriate err_mask. 1857c6fd2807SJeff Garzik */ 1858c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_SENSE_VALID; 1859c6fd2807SJeff Garzik } else 1860c6fd2807SJeff Garzik qc->err_mask |= tmp; 1861c6fd2807SJeff Garzik } 1862a569a30dSTejun Heo } 1863c6fd2807SJeff Garzik 1864c6fd2807SJeff Garzik if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS)) 1865cf480626STejun Heo action |= ATA_EH_RESET; 1866c6fd2807SJeff Garzik 1867c6fd2807SJeff Garzik return action; 1868c6fd2807SJeff Garzik } 1869c6fd2807SJeff Garzik 187076326ac1STejun Heo static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask, 187176326ac1STejun Heo int *xfer_ok) 1872c6fd2807SJeff Garzik { 187376326ac1STejun Heo int base = 0; 187476326ac1STejun Heo 187576326ac1STejun Heo if (!(eflags & ATA_EFLAG_DUBIOUS_XFER)) 187676326ac1STejun Heo *xfer_ok = 1; 187776326ac1STejun Heo 187876326ac1STejun Heo if (!*xfer_ok) 187975f9cafcSTejun Heo base = ATA_ECAT_DUBIOUS_NONE; 188076326ac1STejun Heo 18817d47e8d4STejun Heo if (err_mask & AC_ERR_ATA_BUS) 188276326ac1STejun Heo return base + ATA_ECAT_ATA_BUS; 1883c6fd2807SJeff Garzik 18847d47e8d4STejun Heo if (err_mask & AC_ERR_TIMEOUT) 188576326ac1STejun Heo return base + ATA_ECAT_TOUT_HSM; 18867d47e8d4STejun Heo 18873884f7b0STejun Heo if (eflags & ATA_EFLAG_IS_IO) { 18887d47e8d4STejun Heo if (err_mask & AC_ERR_HSM) 188976326ac1STejun Heo return base + ATA_ECAT_TOUT_HSM; 18907d47e8d4STejun Heo if ((err_mask & 18917d47e8d4STejun Heo (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV) 189276326ac1STejun Heo return base + ATA_ECAT_UNK_DEV; 1893c6fd2807SJeff Garzik } 1894c6fd2807SJeff Garzik 1895c6fd2807SJeff Garzik return 0; 1896c6fd2807SJeff Garzik } 1897c6fd2807SJeff Garzik 18987d47e8d4STejun Heo struct speed_down_verdict_arg { 1899c6fd2807SJeff Garzik u64 since; 190076326ac1STejun Heo int xfer_ok; 19013884f7b0STejun Heo int nr_errors[ATA_ECAT_NR]; 1902c6fd2807SJeff Garzik }; 1903c6fd2807SJeff Garzik 19047d47e8d4STejun Heo static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg) 1905c6fd2807SJeff Garzik { 19067d47e8d4STejun Heo struct speed_down_verdict_arg *arg = void_arg; 190776326ac1STejun Heo int cat; 1908c6fd2807SJeff Garzik 1909d9027470SGwendal Grignou if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since)) 1910c6fd2807SJeff Garzik return -1; 1911c6fd2807SJeff Garzik 191276326ac1STejun Heo cat = ata_eh_categorize_error(ent->eflags, ent->err_mask, 191376326ac1STejun Heo &arg->xfer_ok); 19147d47e8d4STejun Heo arg->nr_errors[cat]++; 191576326ac1STejun Heo 1916c6fd2807SJeff Garzik return 0; 1917c6fd2807SJeff Garzik } 1918c6fd2807SJeff Garzik 1919c6fd2807SJeff Garzik /** 19207d47e8d4STejun Heo * ata_eh_speed_down_verdict - Determine speed down verdict 1921c6fd2807SJeff Garzik * @dev: Device of interest 1922c6fd2807SJeff Garzik * 1923c6fd2807SJeff Garzik * This function examines error ring of @dev and determines 19247d47e8d4STejun Heo * whether NCQ needs to be turned off, transfer speed should be 19257d47e8d4STejun Heo * stepped down, or falling back to PIO is necessary. 1926c6fd2807SJeff Garzik * 19273884f7b0STejun Heo * ECAT_ATA_BUS : ATA_BUS error for any command 1928c6fd2807SJeff Garzik * 19293884f7b0STejun Heo * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for 19303884f7b0STejun Heo * IO commands 19317d47e8d4STejun Heo * 19323884f7b0STejun Heo * ECAT_UNK_DEV : Unknown DEV error for IO commands 1933c6fd2807SJeff Garzik * 193476326ac1STejun Heo * ECAT_DUBIOUS_* : Identical to above three but occurred while 193576326ac1STejun Heo * data transfer hasn't been verified. 193676326ac1STejun Heo * 19373884f7b0STejun Heo * Verdicts are 19387d47e8d4STejun Heo * 19393884f7b0STejun Heo * NCQ_OFF : Turn off NCQ. 19407d47e8d4STejun Heo * 19413884f7b0STejun Heo * SPEED_DOWN : Speed down transfer speed but don't fall back 19423884f7b0STejun Heo * to PIO. 19433884f7b0STejun Heo * 19443884f7b0STejun Heo * FALLBACK_TO_PIO : Fall back to PIO. 19453884f7b0STejun Heo * 19463884f7b0STejun Heo * Even if multiple verdicts are returned, only one action is 194776326ac1STejun Heo * taken per error. An action triggered by non-DUBIOUS errors 194876326ac1STejun Heo * clears ering, while one triggered by DUBIOUS_* errors doesn't. 194976326ac1STejun Heo * This is to expedite speed down decisions right after device is 195076326ac1STejun Heo * initially configured. 19513884f7b0STejun Heo * 195276326ac1STejun Heo * The followings are speed down rules. #1 and #2 deal with 195376326ac1STejun Heo * DUBIOUS errors. 195476326ac1STejun Heo * 195576326ac1STejun Heo * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors 195676326ac1STejun Heo * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO. 195776326ac1STejun Heo * 195876326ac1STejun Heo * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors 195976326ac1STejun Heo * occurred during last 5 mins, NCQ_OFF. 196076326ac1STejun Heo * 196176326ac1STejun Heo * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors 196225985edcSLucas De Marchi * occurred during last 5 mins, FALLBACK_TO_PIO 19633884f7b0STejun Heo * 196476326ac1STejun Heo * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred 19653884f7b0STejun Heo * during last 10 mins, NCQ_OFF. 19663884f7b0STejun Heo * 196776326ac1STejun Heo * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6 19683884f7b0STejun Heo * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN. 19697d47e8d4STejun Heo * 1970c6fd2807SJeff Garzik * LOCKING: 1971c6fd2807SJeff Garzik * Inherited from caller. 1972c6fd2807SJeff Garzik * 1973c6fd2807SJeff Garzik * RETURNS: 19747d47e8d4STejun Heo * OR of ATA_EH_SPDN_* flags. 1975c6fd2807SJeff Garzik */ 19767d47e8d4STejun Heo static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev) 1977c6fd2807SJeff Garzik { 19787d47e8d4STejun Heo const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ; 19797d47e8d4STejun Heo u64 j64 = get_jiffies_64(); 19807d47e8d4STejun Heo struct speed_down_verdict_arg arg; 19817d47e8d4STejun Heo unsigned int verdict = 0; 1982c6fd2807SJeff Garzik 19833884f7b0STejun Heo /* scan past 5 mins of error history */ 19843884f7b0STejun Heo memset(&arg, 0, sizeof(arg)); 19853884f7b0STejun Heo arg.since = j64 - min(j64, j5mins); 19863884f7b0STejun Heo ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); 19873884f7b0STejun Heo 198876326ac1STejun Heo if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] + 198976326ac1STejun Heo arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1) 199076326ac1STejun Heo verdict |= ATA_EH_SPDN_SPEED_DOWN | 199176326ac1STejun Heo ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS; 199276326ac1STejun Heo 199376326ac1STejun Heo if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] + 199476326ac1STejun Heo arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1) 199576326ac1STejun Heo verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS; 199676326ac1STejun Heo 19973884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_ATA_BUS] + 19983884f7b0STejun Heo arg.nr_errors[ATA_ECAT_TOUT_HSM] + 1999663f99b8STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) 20003884f7b0STejun Heo verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO; 20013884f7b0STejun Heo 20027d47e8d4STejun Heo /* scan past 10 mins of error history */ 2003c6fd2807SJeff Garzik memset(&arg, 0, sizeof(arg)); 20047d47e8d4STejun Heo arg.since = j64 - min(j64, j10mins); 20057d47e8d4STejun Heo ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); 2006c6fd2807SJeff Garzik 20073884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_TOUT_HSM] + 20083884f7b0STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 3) 20097d47e8d4STejun Heo verdict |= ATA_EH_SPDN_NCQ_OFF; 20103884f7b0STejun Heo 20113884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_ATA_BUS] + 20123884f7b0STejun Heo arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 || 2013663f99b8STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) 20147d47e8d4STejun Heo verdict |= ATA_EH_SPDN_SPEED_DOWN; 2015c6fd2807SJeff Garzik 20167d47e8d4STejun Heo return verdict; 2017c6fd2807SJeff Garzik } 2018c6fd2807SJeff Garzik 2019c6fd2807SJeff Garzik /** 2020c6fd2807SJeff Garzik * ata_eh_speed_down - record error and speed down if necessary 2021c6fd2807SJeff Garzik * @dev: Failed device 20223884f7b0STejun Heo * @eflags: mask of ATA_EFLAG_* flags 2023c6fd2807SJeff Garzik * @err_mask: err_mask of the error 2024c6fd2807SJeff Garzik * 2025c6fd2807SJeff Garzik * Record error and examine error history to determine whether 2026c6fd2807SJeff Garzik * adjusting transmission speed is necessary. It also sets 2027c6fd2807SJeff Garzik * transmission limits appropriately if such adjustment is 2028c6fd2807SJeff Garzik * necessary. 2029c6fd2807SJeff Garzik * 2030c6fd2807SJeff Garzik * LOCKING: 2031c6fd2807SJeff Garzik * Kernel thread context (may sleep). 2032c6fd2807SJeff Garzik * 2033c6fd2807SJeff Garzik * RETURNS: 20347d47e8d4STejun Heo * Determined recovery action. 2035c6fd2807SJeff Garzik */ 20363884f7b0STejun Heo static unsigned int ata_eh_speed_down(struct ata_device *dev, 20373884f7b0STejun Heo unsigned int eflags, unsigned int err_mask) 2038c6fd2807SJeff Garzik { 2039b1c72916STejun Heo struct ata_link *link = ata_dev_phys_link(dev); 204076326ac1STejun Heo int xfer_ok = 0; 20417d47e8d4STejun Heo unsigned int verdict; 20427d47e8d4STejun Heo unsigned int action = 0; 20437d47e8d4STejun Heo 20447d47e8d4STejun Heo /* don't bother if Cat-0 error */ 204576326ac1STejun Heo if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0) 2046c6fd2807SJeff Garzik return 0; 2047c6fd2807SJeff Garzik 2048c6fd2807SJeff Garzik /* record error and determine whether speed down is necessary */ 20493884f7b0STejun Heo ata_ering_record(&dev->ering, eflags, err_mask); 20507d47e8d4STejun Heo verdict = ata_eh_speed_down_verdict(dev); 2051c6fd2807SJeff Garzik 20527d47e8d4STejun Heo /* turn off NCQ? */ 20537d47e8d4STejun Heo if ((verdict & ATA_EH_SPDN_NCQ_OFF) && 20547d47e8d4STejun Heo (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ | 20557d47e8d4STejun Heo ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) { 20567d47e8d4STejun Heo dev->flags |= ATA_DFLAG_NCQ_OFF; 2057a9a79dfeSJoe Perches ata_dev_warn(dev, "NCQ disabled due to excessive errors\n"); 20587d47e8d4STejun Heo goto done; 20597d47e8d4STejun Heo } 2060c6fd2807SJeff Garzik 20617d47e8d4STejun Heo /* speed down? */ 20627d47e8d4STejun Heo if (verdict & ATA_EH_SPDN_SPEED_DOWN) { 2063c6fd2807SJeff Garzik /* speed down SATA link speed if possible */ 2064a07d499bSTejun Heo if (sata_down_spd_limit(link, 0) == 0) { 2065cf480626STejun Heo action |= ATA_EH_RESET; 20667d47e8d4STejun Heo goto done; 20677d47e8d4STejun Heo } 2068c6fd2807SJeff Garzik 2069c6fd2807SJeff Garzik /* lower transfer mode */ 20707d47e8d4STejun Heo if (dev->spdn_cnt < 2) { 20717d47e8d4STejun Heo static const int dma_dnxfer_sel[] = 20727d47e8d4STejun Heo { ATA_DNXFER_DMA, ATA_DNXFER_40C }; 20737d47e8d4STejun Heo static const int pio_dnxfer_sel[] = 20747d47e8d4STejun Heo { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 }; 20757d47e8d4STejun Heo int sel; 2076c6fd2807SJeff Garzik 20777d47e8d4STejun Heo if (dev->xfer_shift != ATA_SHIFT_PIO) 20787d47e8d4STejun Heo sel = dma_dnxfer_sel[dev->spdn_cnt]; 20797d47e8d4STejun Heo else 20807d47e8d4STejun Heo sel = pio_dnxfer_sel[dev->spdn_cnt]; 20817d47e8d4STejun Heo 20827d47e8d4STejun Heo dev->spdn_cnt++; 20837d47e8d4STejun Heo 20847d47e8d4STejun Heo if (ata_down_xfermask_limit(dev, sel) == 0) { 2085cf480626STejun Heo action |= ATA_EH_RESET; 20867d47e8d4STejun Heo goto done; 20877d47e8d4STejun Heo } 20887d47e8d4STejun Heo } 20897d47e8d4STejun Heo } 20907d47e8d4STejun Heo 20917d47e8d4STejun Heo /* Fall back to PIO? Slowing down to PIO is meaningless for 2092663f99b8STejun Heo * SATA ATA devices. Consider it only for PATA and SATAPI. 20937d47e8d4STejun Heo */ 20947d47e8d4STejun Heo if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) && 2095663f99b8STejun Heo (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) && 20967d47e8d4STejun Heo (dev->xfer_shift != ATA_SHIFT_PIO)) { 20977d47e8d4STejun Heo if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) { 20987d47e8d4STejun Heo dev->spdn_cnt = 0; 2099cf480626STejun Heo action |= ATA_EH_RESET; 21007d47e8d4STejun Heo goto done; 21017d47e8d4STejun Heo } 21027d47e8d4STejun Heo } 21037d47e8d4STejun Heo 2104c6fd2807SJeff Garzik return 0; 21057d47e8d4STejun Heo done: 21067d47e8d4STejun Heo /* device has been slowed down, blow error history */ 210776326ac1STejun Heo if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS)) 21087d47e8d4STejun Heo ata_ering_clear(&dev->ering); 21097d47e8d4STejun Heo return action; 2110c6fd2807SJeff Garzik } 2111c6fd2807SJeff Garzik 2112c6fd2807SJeff Garzik /** 21138d899e70SMark Lord * ata_eh_worth_retry - analyze error and decide whether to retry 21148d899e70SMark Lord * @qc: qc to possibly retry 21158d899e70SMark Lord * 21168d899e70SMark Lord * Look at the cause of the error and decide if a retry 21178d899e70SMark Lord * might be useful or not. We don't want to retry media errors 21188d899e70SMark Lord * because the drive itself has probably already taken 10-30 seconds 21198d899e70SMark Lord * doing its own internal retries before reporting the failure. 21208d899e70SMark Lord */ 21218d899e70SMark Lord static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc) 21228d899e70SMark Lord { 21231eaca39aSBian Yu if (qc->err_mask & AC_ERR_MEDIA) 21248d899e70SMark Lord return 0; /* don't retry media errors */ 21258d899e70SMark Lord if (qc->flags & ATA_QCFLAG_IO) 21268d899e70SMark Lord return 1; /* otherwise retry anything from fs stack */ 21278d899e70SMark Lord if (qc->err_mask & AC_ERR_INVALID) 21288d899e70SMark Lord return 0; /* don't retry these */ 21298d899e70SMark Lord return qc->err_mask != AC_ERR_DEV; /* retry if not dev error */ 21308d899e70SMark Lord } 21318d899e70SMark Lord 21328d899e70SMark Lord /** 21339b1e2658STejun Heo * ata_eh_link_autopsy - analyze error and determine recovery action 21349b1e2658STejun Heo * @link: host link to perform autopsy on 2135c6fd2807SJeff Garzik * 21360260731fSTejun Heo * Analyze why @link failed and determine which recovery actions 21370260731fSTejun Heo * are needed. This function also sets more detailed AC_ERR_* 21380260731fSTejun Heo * values and fills sense data for ATAPI CHECK SENSE. 2139c6fd2807SJeff Garzik * 2140c6fd2807SJeff Garzik * LOCKING: 2141c6fd2807SJeff Garzik * Kernel thread context (may sleep). 2142c6fd2807SJeff Garzik */ 21439b1e2658STejun Heo static void ata_eh_link_autopsy(struct ata_link *link) 2144c6fd2807SJeff Garzik { 21450260731fSTejun Heo struct ata_port *ap = link->ap; 2146936fd732STejun Heo struct ata_eh_context *ehc = &link->eh_context; 2147dfcc173dSTejun Heo struct ata_device *dev; 21483884f7b0STejun Heo unsigned int all_err_mask = 0, eflags = 0; 21493884f7b0STejun Heo int tag; 2150c6fd2807SJeff Garzik u32 serror; 2151c6fd2807SJeff Garzik int rc; 2152c6fd2807SJeff Garzik 2153c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 2154c6fd2807SJeff Garzik 2155c6fd2807SJeff Garzik if (ehc->i.flags & ATA_EHI_NO_AUTOPSY) 2156c6fd2807SJeff Garzik return; 2157c6fd2807SJeff Garzik 2158c6fd2807SJeff Garzik /* obtain and analyze SError */ 2159936fd732STejun Heo rc = sata_scr_read(link, SCR_ERROR, &serror); 2160c6fd2807SJeff Garzik if (rc == 0) { 2161c6fd2807SJeff Garzik ehc->i.serror |= serror; 21620260731fSTejun Heo ata_eh_analyze_serror(link); 21634e57c517STejun Heo } else if (rc != -EOPNOTSUPP) { 2164cf480626STejun Heo /* SError read failed, force reset and probing */ 2165b558edddSTejun Heo ehc->i.probe_mask |= ATA_ALL_DEVICES; 2166cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 21674e57c517STejun Heo ehc->i.err_mask |= AC_ERR_OTHER; 21684e57c517STejun Heo } 2169c6fd2807SJeff Garzik 2170c6fd2807SJeff Garzik /* analyze NCQ failure */ 21710260731fSTejun Heo ata_eh_analyze_ncq_error(link); 2172c6fd2807SJeff Garzik 2173c6fd2807SJeff Garzik /* any real error trumps AC_ERR_OTHER */ 2174c6fd2807SJeff Garzik if (ehc->i.err_mask & ~AC_ERR_OTHER) 2175c6fd2807SJeff Garzik ehc->i.err_mask &= ~AC_ERR_OTHER; 2176c6fd2807SJeff Garzik 2177c6fd2807SJeff Garzik all_err_mask |= ehc->i.err_mask; 2178c6fd2807SJeff Garzik 2179c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2180c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 2181c6fd2807SJeff Garzik 2182b1c72916STejun Heo if (!(qc->flags & ATA_QCFLAG_FAILED) || 2183b1c72916STejun Heo ata_dev_phys_link(qc->dev) != link) 2184c6fd2807SJeff Garzik continue; 2185c6fd2807SJeff Garzik 2186c6fd2807SJeff Garzik /* inherit upper level err_mask */ 2187c6fd2807SJeff Garzik qc->err_mask |= ehc->i.err_mask; 2188c6fd2807SJeff Garzik 2189c6fd2807SJeff Garzik /* analyze TF */ 2190c6fd2807SJeff Garzik ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf); 2191c6fd2807SJeff Garzik 2192c6fd2807SJeff Garzik /* DEV errors are probably spurious in case of ATA_BUS error */ 2193c6fd2807SJeff Garzik if (qc->err_mask & AC_ERR_ATA_BUS) 2194c6fd2807SJeff Garzik qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA | 2195c6fd2807SJeff Garzik AC_ERR_INVALID); 2196c6fd2807SJeff Garzik 2197c6fd2807SJeff Garzik /* any real error trumps unknown error */ 2198c6fd2807SJeff Garzik if (qc->err_mask & ~AC_ERR_OTHER) 2199c6fd2807SJeff Garzik qc->err_mask &= ~AC_ERR_OTHER; 2200c6fd2807SJeff Garzik 2201c6fd2807SJeff Garzik /* SENSE_VALID trumps dev/unknown error and revalidation */ 2202f90f0828STejun Heo if (qc->flags & ATA_QCFLAG_SENSE_VALID) 2203c6fd2807SJeff Garzik qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); 2204c6fd2807SJeff Garzik 220503faab78STejun Heo /* determine whether the command is worth retrying */ 22068d899e70SMark Lord if (ata_eh_worth_retry(qc)) 220703faab78STejun Heo qc->flags |= ATA_QCFLAG_RETRY; 220803faab78STejun Heo 2209c6fd2807SJeff Garzik /* accumulate error info */ 2210c6fd2807SJeff Garzik ehc->i.dev = qc->dev; 2211c6fd2807SJeff Garzik all_err_mask |= qc->err_mask; 2212c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_IO) 22133884f7b0STejun Heo eflags |= ATA_EFLAG_IS_IO; 2214255c03d1SHannes Reinecke trace_ata_eh_link_autopsy_qc(qc); 2215c6fd2807SJeff Garzik } 2216c6fd2807SJeff Garzik 2217c6fd2807SJeff Garzik /* enforce default EH actions */ 2218c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN || 2219c6fd2807SJeff Garzik all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT)) 2220cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 22213884f7b0STejun Heo else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) || 22223884f7b0STejun Heo (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV))) 2223c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_REVALIDATE; 2224c6fd2807SJeff Garzik 2225dfcc173dSTejun Heo /* If we have offending qcs and the associated failed device, 2226dfcc173dSTejun Heo * perform per-dev EH action only on the offending device. 2227dfcc173dSTejun Heo */ 2228c6fd2807SJeff Garzik if (ehc->i.dev) { 2229c6fd2807SJeff Garzik ehc->i.dev_action[ehc->i.dev->devno] |= 2230c6fd2807SJeff Garzik ehc->i.action & ATA_EH_PERDEV_MASK; 2231c6fd2807SJeff Garzik ehc->i.action &= ~ATA_EH_PERDEV_MASK; 2232c6fd2807SJeff Garzik } 2233c6fd2807SJeff Garzik 22342695e366STejun Heo /* propagate timeout to host link */ 22352695e366STejun Heo if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link)) 22362695e366STejun Heo ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT; 22372695e366STejun Heo 22382695e366STejun Heo /* record error and consider speeding down */ 2239dfcc173dSTejun Heo dev = ehc->i.dev; 22402695e366STejun Heo if (!dev && ((ata_link_max_devices(link) == 1 && 22412695e366STejun Heo ata_dev_enabled(link->device)))) 2242dfcc173dSTejun Heo dev = link->device; 2243dfcc173dSTejun Heo 224476326ac1STejun Heo if (dev) { 224576326ac1STejun Heo if (dev->flags & ATA_DFLAG_DUBIOUS_XFER) 224676326ac1STejun Heo eflags |= ATA_EFLAG_DUBIOUS_XFER; 22473884f7b0STejun Heo ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask); 224876326ac1STejun Heo } 2249255c03d1SHannes Reinecke trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask); 2250c6fd2807SJeff Garzik DPRINTK("EXIT\n"); 2251c6fd2807SJeff Garzik } 2252c6fd2807SJeff Garzik 2253c6fd2807SJeff Garzik /** 22549b1e2658STejun Heo * ata_eh_autopsy - analyze error and determine recovery action 22559b1e2658STejun Heo * @ap: host port to perform autopsy on 22569b1e2658STejun Heo * 22579b1e2658STejun Heo * Analyze all links of @ap and determine why they failed and 22589b1e2658STejun Heo * which recovery actions are needed. 22599b1e2658STejun Heo * 22609b1e2658STejun Heo * LOCKING: 22619b1e2658STejun Heo * Kernel thread context (may sleep). 22629b1e2658STejun Heo */ 2263fb7fd614STejun Heo void ata_eh_autopsy(struct ata_port *ap) 22649b1e2658STejun Heo { 22659b1e2658STejun Heo struct ata_link *link; 22669b1e2658STejun Heo 22671eca4365STejun Heo ata_for_each_link(link, ap, EDGE) 22689b1e2658STejun Heo ata_eh_link_autopsy(link); 22692695e366STejun Heo 2270b1c72916STejun Heo /* Handle the frigging slave link. Autopsy is done similarly 2271b1c72916STejun Heo * but actions and flags are transferred over to the master 2272b1c72916STejun Heo * link and handled from there. 2273b1c72916STejun Heo */ 2274b1c72916STejun Heo if (ap->slave_link) { 2275b1c72916STejun Heo struct ata_eh_context *mehc = &ap->link.eh_context; 2276b1c72916STejun Heo struct ata_eh_context *sehc = &ap->slave_link->eh_context; 2277b1c72916STejun Heo 2278848e4c68STejun Heo /* transfer control flags from master to slave */ 2279848e4c68STejun Heo sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK; 2280848e4c68STejun Heo 2281848e4c68STejun Heo /* perform autopsy on the slave link */ 2282b1c72916STejun Heo ata_eh_link_autopsy(ap->slave_link); 2283b1c72916STejun Heo 2284848e4c68STejun Heo /* transfer actions from slave to master and clear slave */ 2285b1c72916STejun Heo ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); 2286b1c72916STejun Heo mehc->i.action |= sehc->i.action; 2287b1c72916STejun Heo mehc->i.dev_action[1] |= sehc->i.dev_action[1]; 2288b1c72916STejun Heo mehc->i.flags |= sehc->i.flags; 2289b1c72916STejun Heo ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); 2290b1c72916STejun Heo } 2291b1c72916STejun Heo 22922695e366STejun Heo /* Autopsy of fanout ports can affect host link autopsy. 22932695e366STejun Heo * Perform host link autopsy last. 22942695e366STejun Heo */ 2295071f44b1STejun Heo if (sata_pmp_attached(ap)) 22962695e366STejun Heo ata_eh_link_autopsy(&ap->link); 22979b1e2658STejun Heo } 22989b1e2658STejun Heo 22999b1e2658STejun Heo /** 23006521148cSRobert Hancock * ata_get_cmd_descript - get description for ATA command 23016521148cSRobert Hancock * @command: ATA command code to get description for 23026521148cSRobert Hancock * 23036521148cSRobert Hancock * Return a textual description of the given command, or NULL if the 23046521148cSRobert Hancock * command is not known. 23056521148cSRobert Hancock * 23066521148cSRobert Hancock * LOCKING: 23076521148cSRobert Hancock * None 23086521148cSRobert Hancock */ 23096521148cSRobert Hancock const char *ata_get_cmd_descript(u8 command) 23106521148cSRobert Hancock { 23116521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR 23126521148cSRobert Hancock static const struct 23136521148cSRobert Hancock { 23146521148cSRobert Hancock u8 command; 23156521148cSRobert Hancock const char *text; 23166521148cSRobert Hancock } cmd_descr[] = { 23176521148cSRobert Hancock { ATA_CMD_DEV_RESET, "DEVICE RESET" }, 23186521148cSRobert Hancock { ATA_CMD_CHK_POWER, "CHECK POWER MODE" }, 23196521148cSRobert Hancock { ATA_CMD_STANDBY, "STANDBY" }, 23206521148cSRobert Hancock { ATA_CMD_IDLE, "IDLE" }, 23216521148cSRobert Hancock { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" }, 23226521148cSRobert Hancock { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" }, 23233915c3b5SRobert Hancock { ATA_CMD_DOWNLOAD_MICRO_DMA, "DOWNLOAD MICROCODE DMA" }, 23246521148cSRobert Hancock { ATA_CMD_NOP, "NOP" }, 23256521148cSRobert Hancock { ATA_CMD_FLUSH, "FLUSH CACHE" }, 23266521148cSRobert Hancock { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" }, 23276521148cSRobert Hancock { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" }, 23286521148cSRobert Hancock { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" }, 23296521148cSRobert Hancock { ATA_CMD_SERVICE, "SERVICE" }, 23306521148cSRobert Hancock { ATA_CMD_READ, "READ DMA" }, 23316521148cSRobert Hancock { ATA_CMD_READ_EXT, "READ DMA EXT" }, 23326521148cSRobert Hancock { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" }, 23336521148cSRobert Hancock { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" }, 23346521148cSRobert Hancock { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" }, 23356521148cSRobert Hancock { ATA_CMD_WRITE, "WRITE DMA" }, 23366521148cSRobert Hancock { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" }, 23376521148cSRobert Hancock { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" }, 23386521148cSRobert Hancock { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" }, 23396521148cSRobert Hancock { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" }, 23406521148cSRobert Hancock { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" }, 23416521148cSRobert Hancock { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" }, 23426521148cSRobert Hancock { ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" }, 23436521148cSRobert Hancock { ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" }, 23443915c3b5SRobert Hancock { ATA_CMD_FPDMA_SEND, "SEND FPDMA QUEUED" }, 23453915c3b5SRobert Hancock { ATA_CMD_FPDMA_RECV, "RECEIVE FPDMA QUEUED" }, 23466521148cSRobert Hancock { ATA_CMD_PIO_READ, "READ SECTOR(S)" }, 23476521148cSRobert Hancock { ATA_CMD_PIO_READ_EXT, "READ SECTOR(S) EXT" }, 23486521148cSRobert Hancock { ATA_CMD_PIO_WRITE, "WRITE SECTOR(S)" }, 23496521148cSRobert Hancock { ATA_CMD_PIO_WRITE_EXT, "WRITE SECTOR(S) EXT" }, 23506521148cSRobert Hancock { ATA_CMD_READ_MULTI, "READ MULTIPLE" }, 23516521148cSRobert Hancock { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" }, 23526521148cSRobert Hancock { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" }, 23536521148cSRobert Hancock { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" }, 23546521148cSRobert Hancock { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" }, 23556521148cSRobert Hancock { ATA_CMD_SET_FEATURES, "SET FEATURES" }, 23566521148cSRobert Hancock { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" }, 23576521148cSRobert Hancock { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" }, 23586521148cSRobert Hancock { ATA_CMD_VERIFY_EXT, "READ VERIFY SECTOR(S) EXT" }, 23596521148cSRobert Hancock { ATA_CMD_WRITE_UNCORR_EXT, "WRITE UNCORRECTABLE EXT" }, 23606521148cSRobert Hancock { ATA_CMD_STANDBYNOW1, "STANDBY IMMEDIATE" }, 23616521148cSRobert Hancock { ATA_CMD_IDLEIMMEDIATE, "IDLE IMMEDIATE" }, 23626521148cSRobert Hancock { ATA_CMD_SLEEP, "SLEEP" }, 23636521148cSRobert Hancock { ATA_CMD_INIT_DEV_PARAMS, "INITIALIZE DEVICE PARAMETERS" }, 23646521148cSRobert Hancock { ATA_CMD_READ_NATIVE_MAX, "READ NATIVE MAX ADDRESS" }, 23656521148cSRobert Hancock { ATA_CMD_READ_NATIVE_MAX_EXT, "READ NATIVE MAX ADDRESS EXT" }, 23666521148cSRobert Hancock { ATA_CMD_SET_MAX, "SET MAX ADDRESS" }, 23676521148cSRobert Hancock { ATA_CMD_SET_MAX_EXT, "SET MAX ADDRESS EXT" }, 23686521148cSRobert Hancock { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" }, 23696521148cSRobert Hancock { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" }, 23706521148cSRobert Hancock { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" }, 23716521148cSRobert Hancock { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" }, 23723915c3b5SRobert Hancock { ATA_CMD_TRUSTED_NONDATA, "TRUSTED NON-DATA" }, 23736521148cSRobert Hancock { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" }, 23746521148cSRobert Hancock { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" }, 23756521148cSRobert Hancock { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" }, 23766521148cSRobert Hancock { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" }, 23776521148cSRobert Hancock { ATA_CMD_PMP_READ, "READ BUFFER" }, 23783915c3b5SRobert Hancock { ATA_CMD_PMP_READ_DMA, "READ BUFFER DMA" }, 23796521148cSRobert Hancock { ATA_CMD_PMP_WRITE, "WRITE BUFFER" }, 23803915c3b5SRobert Hancock { ATA_CMD_PMP_WRITE_DMA, "WRITE BUFFER DMA" }, 23816521148cSRobert Hancock { ATA_CMD_CONF_OVERLAY, "DEVICE CONFIGURATION OVERLAY" }, 23826521148cSRobert Hancock { ATA_CMD_SEC_SET_PASS, "SECURITY SET PASSWORD" }, 23836521148cSRobert Hancock { ATA_CMD_SEC_UNLOCK, "SECURITY UNLOCK" }, 23846521148cSRobert Hancock { ATA_CMD_SEC_ERASE_PREP, "SECURITY ERASE PREPARE" }, 23856521148cSRobert Hancock { ATA_CMD_SEC_ERASE_UNIT, "SECURITY ERASE UNIT" }, 23866521148cSRobert Hancock { ATA_CMD_SEC_FREEZE_LOCK, "SECURITY FREEZE LOCK" }, 23876521148cSRobert Hancock { ATA_CMD_SEC_DISABLE_PASS, "SECURITY DISABLE PASSWORD" }, 23886521148cSRobert Hancock { ATA_CMD_CONFIG_STREAM, "CONFIGURE STREAM" }, 23896521148cSRobert Hancock { ATA_CMD_SMART, "SMART" }, 23906521148cSRobert Hancock { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" }, 23916521148cSRobert Hancock { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" }, 2392acad7627SFUJITA Tomonori { ATA_CMD_DSM, "DATA SET MANAGEMENT" }, 23936521148cSRobert Hancock { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" }, 23946521148cSRobert Hancock { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" }, 23956521148cSRobert Hancock { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" }, 23966521148cSRobert Hancock { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" }, 23976521148cSRobert Hancock { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" }, 23986521148cSRobert Hancock { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" }, 23993915c3b5SRobert Hancock { ATA_CMD_REQ_SENSE_DATA, "REQUEST SENSE DATA EXT" }, 24003915c3b5SRobert Hancock { ATA_CMD_SANITIZE_DEVICE, "SANITIZE DEVICE" }, 24016521148cSRobert Hancock { ATA_CMD_READ_LONG, "READ LONG (with retries)" }, 24026521148cSRobert Hancock { ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" }, 24036521148cSRobert Hancock { ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" }, 24046521148cSRobert Hancock { ATA_CMD_WRITE_LONG_ONCE, "WRITE LONG (without retries)" }, 24056521148cSRobert Hancock { ATA_CMD_RESTORE, "RECALIBRATE" }, 24066521148cSRobert Hancock { 0, NULL } /* terminate list */ 24076521148cSRobert Hancock }; 24086521148cSRobert Hancock 24096521148cSRobert Hancock unsigned int i; 24106521148cSRobert Hancock for (i = 0; cmd_descr[i].text; i++) 24116521148cSRobert Hancock if (cmd_descr[i].command == command) 24126521148cSRobert Hancock return cmd_descr[i].text; 24136521148cSRobert Hancock #endif 24146521148cSRobert Hancock 24156521148cSRobert Hancock return NULL; 24166521148cSRobert Hancock } 241736aae28eSAndy Shevchenko EXPORT_SYMBOL_GPL(ata_get_cmd_descript); 24186521148cSRobert Hancock 24196521148cSRobert Hancock /** 24209b1e2658STejun Heo * ata_eh_link_report - report error handling to user 24210260731fSTejun Heo * @link: ATA link EH is going on 2422c6fd2807SJeff Garzik * 2423c6fd2807SJeff Garzik * Report EH to user. 2424c6fd2807SJeff Garzik * 2425c6fd2807SJeff Garzik * LOCKING: 2426c6fd2807SJeff Garzik * None. 2427c6fd2807SJeff Garzik */ 24289b1e2658STejun Heo static void ata_eh_link_report(struct ata_link *link) 2429c6fd2807SJeff Garzik { 24300260731fSTejun Heo struct ata_port *ap = link->ap; 24310260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 2432c6fd2807SJeff Garzik const char *frozen, *desc; 2433462098b0SLevente Kurusa char tries_buf[6] = ""; 2434c6fd2807SJeff Garzik int tag, nr_failed = 0; 2435c6fd2807SJeff Garzik 243694ff3d54STejun Heo if (ehc->i.flags & ATA_EHI_QUIET) 243794ff3d54STejun Heo return; 243894ff3d54STejun Heo 2439c6fd2807SJeff Garzik desc = NULL; 2440c6fd2807SJeff Garzik if (ehc->i.desc[0] != '\0') 2441c6fd2807SJeff Garzik desc = ehc->i.desc; 2442c6fd2807SJeff Garzik 2443c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2444c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 2445c6fd2807SJeff Garzik 2446b1c72916STejun Heo if (!(qc->flags & ATA_QCFLAG_FAILED) || 2447b1c72916STejun Heo ata_dev_phys_link(qc->dev) != link || 2448e027bd36STejun Heo ((qc->flags & ATA_QCFLAG_QUIET) && 2449e027bd36STejun Heo qc->err_mask == AC_ERR_DEV)) 2450c6fd2807SJeff Garzik continue; 2451c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask) 2452c6fd2807SJeff Garzik continue; 2453c6fd2807SJeff Garzik 2454c6fd2807SJeff Garzik nr_failed++; 2455c6fd2807SJeff Garzik } 2456c6fd2807SJeff Garzik 2457c6fd2807SJeff Garzik if (!nr_failed && !ehc->i.err_mask) 2458c6fd2807SJeff Garzik return; 2459c6fd2807SJeff Garzik 2460c6fd2807SJeff Garzik frozen = ""; 2461c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN) 2462c6fd2807SJeff Garzik frozen = " frozen"; 2463c6fd2807SJeff Garzik 2464a1e10f7eSTejun Heo if (ap->eh_tries < ATA_EH_MAX_TRIES) 2465462098b0SLevente Kurusa snprintf(tries_buf, sizeof(tries_buf), " t%d", 2466a1e10f7eSTejun Heo ap->eh_tries); 2467a1e10f7eSTejun Heo 2468c6fd2807SJeff Garzik if (ehc->i.dev) { 2469a9a79dfeSJoe Perches ata_dev_err(ehc->i.dev, "exception Emask 0x%x " 2470a1e10f7eSTejun Heo "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", 2471a1e10f7eSTejun Heo ehc->i.err_mask, link->sactive, ehc->i.serror, 2472a1e10f7eSTejun Heo ehc->i.action, frozen, tries_buf); 2473c6fd2807SJeff Garzik if (desc) 2474a9a79dfeSJoe Perches ata_dev_err(ehc->i.dev, "%s\n", desc); 2475c6fd2807SJeff Garzik } else { 2476a9a79dfeSJoe Perches ata_link_err(link, "exception Emask 0x%x " 2477a1e10f7eSTejun Heo "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", 2478a1e10f7eSTejun Heo ehc->i.err_mask, link->sactive, ehc->i.serror, 2479a1e10f7eSTejun Heo ehc->i.action, frozen, tries_buf); 2480c6fd2807SJeff Garzik if (desc) 2481a9a79dfeSJoe Perches ata_link_err(link, "%s\n", desc); 2482c6fd2807SJeff Garzik } 2483c6fd2807SJeff Garzik 24846521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR 24851333e194SRobert Hancock if (ehc->i.serror) 2486a9a79dfeSJoe Perches ata_link_err(link, 24871333e194SRobert Hancock "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n", 24881333e194SRobert Hancock ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "", 24891333e194SRobert Hancock ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "", 24901333e194SRobert Hancock ehc->i.serror & SERR_DATA ? "UnrecovData " : "", 24911333e194SRobert Hancock ehc->i.serror & SERR_PERSISTENT ? "Persist " : "", 24921333e194SRobert Hancock ehc->i.serror & SERR_PROTOCOL ? "Proto " : "", 24931333e194SRobert Hancock ehc->i.serror & SERR_INTERNAL ? "HostInt " : "", 24941333e194SRobert Hancock ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "", 24951333e194SRobert Hancock ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "", 24961333e194SRobert Hancock ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "", 24971333e194SRobert Hancock ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "", 24981333e194SRobert Hancock ehc->i.serror & SERR_DISPARITY ? "Dispar " : "", 24991333e194SRobert Hancock ehc->i.serror & SERR_CRC ? "BadCRC " : "", 25001333e194SRobert Hancock ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "", 25011333e194SRobert Hancock ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "", 25021333e194SRobert Hancock ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "", 25031333e194SRobert Hancock ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "", 25041333e194SRobert Hancock ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : ""); 25056521148cSRobert Hancock #endif 25061333e194SRobert Hancock 2507c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2508c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 25098a937581STejun Heo struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; 2510abb6a889STejun Heo char data_buf[20] = ""; 2511abb6a889STejun Heo char cdb_buf[70] = ""; 2512c6fd2807SJeff Garzik 25130260731fSTejun Heo if (!(qc->flags & ATA_QCFLAG_FAILED) || 2514b1c72916STejun Heo ata_dev_phys_link(qc->dev) != link || !qc->err_mask) 2515c6fd2807SJeff Garzik continue; 2516c6fd2807SJeff Garzik 2517abb6a889STejun Heo if (qc->dma_dir != DMA_NONE) { 2518abb6a889STejun Heo static const char *dma_str[] = { 2519abb6a889STejun Heo [DMA_BIDIRECTIONAL] = "bidi", 2520abb6a889STejun Heo [DMA_TO_DEVICE] = "out", 2521abb6a889STejun Heo [DMA_FROM_DEVICE] = "in", 2522abb6a889STejun Heo }; 2523abb6a889STejun Heo static const char *prot_str[] = { 2524abb6a889STejun Heo [ATA_PROT_PIO] = "pio", 2525abb6a889STejun Heo [ATA_PROT_DMA] = "dma", 2526abb6a889STejun Heo [ATA_PROT_NCQ] = "ncq", 25270dc36888STejun Heo [ATAPI_PROT_PIO] = "pio", 25280dc36888STejun Heo [ATAPI_PROT_DMA] = "dma", 2529abb6a889STejun Heo }; 2530abb6a889STejun Heo 2531abb6a889STejun Heo snprintf(data_buf, sizeof(data_buf), " %s %u %s", 2532abb6a889STejun Heo prot_str[qc->tf.protocol], qc->nbytes, 2533abb6a889STejun Heo dma_str[qc->dma_dir]); 2534abb6a889STejun Heo } 2535abb6a889STejun Heo 25366521148cSRobert Hancock if (ata_is_atapi(qc->tf.protocol)) { 2537a13b0c9dSHannes Reinecke const u8 *cdb = qc->cdb; 2538a13b0c9dSHannes Reinecke size_t cdb_len = qc->dev->cdb_len; 2539a13b0c9dSHannes Reinecke 2540cbba5b0eSHannes Reinecke if (qc->scsicmd) { 2541cbba5b0eSHannes Reinecke cdb = qc->scsicmd->cmnd; 2542cbba5b0eSHannes Reinecke cdb_len = qc->scsicmd->cmd_len; 2543cbba5b0eSHannes Reinecke } 2544cbba5b0eSHannes Reinecke __scsi_format_command(cdb_buf, sizeof(cdb_buf), 2545cbba5b0eSHannes Reinecke cdb, cdb_len); 25466521148cSRobert Hancock } else { 25476521148cSRobert Hancock const char *descr = ata_get_cmd_descript(cmd->command); 25486521148cSRobert Hancock if (descr) 2549a9a79dfeSJoe Perches ata_dev_err(qc->dev, "failed command: %s\n", 2550a9a79dfeSJoe Perches descr); 25516521148cSRobert Hancock } 2552abb6a889STejun Heo 2553a9a79dfeSJoe Perches ata_dev_err(qc->dev, 25548a937581STejun Heo "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " 2555abb6a889STejun Heo "tag %d%s\n %s" 25568a937581STejun Heo "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " 25575335b729STejun Heo "Emask 0x%x (%s)%s\n", 25588a937581STejun Heo cmd->command, cmd->feature, cmd->nsect, 25598a937581STejun Heo cmd->lbal, cmd->lbam, cmd->lbah, 25608a937581STejun Heo cmd->hob_feature, cmd->hob_nsect, 25618a937581STejun Heo cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah, 2562abb6a889STejun Heo cmd->device, qc->tag, data_buf, cdb_buf, 25638a937581STejun Heo res->command, res->feature, res->nsect, 25648a937581STejun Heo res->lbal, res->lbam, res->lbah, 25658a937581STejun Heo res->hob_feature, res->hob_nsect, 25668a937581STejun Heo res->hob_lbal, res->hob_lbam, res->hob_lbah, 25675335b729STejun Heo res->device, qc->err_mask, ata_err_string(qc->err_mask), 25685335b729STejun Heo qc->err_mask & AC_ERR_NCQ ? " <F>" : ""); 25691333e194SRobert Hancock 25706521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR 25711333e194SRobert Hancock if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | 257284ded2f8STejun Heo ATA_ERR)) { 25731333e194SRobert Hancock if (res->command & ATA_BUSY) 2574a9a79dfeSJoe Perches ata_dev_err(qc->dev, "status: { Busy }\n"); 25751333e194SRobert Hancock else 257684ded2f8STejun Heo ata_dev_err(qc->dev, "status: { %s%s%s%s}\n", 25771333e194SRobert Hancock res->command & ATA_DRDY ? "DRDY " : "", 25781333e194SRobert Hancock res->command & ATA_DF ? "DF " : "", 25791333e194SRobert Hancock res->command & ATA_DRQ ? "DRQ " : "", 25801333e194SRobert Hancock res->command & ATA_ERR ? "ERR " : ""); 25811333e194SRobert Hancock } 25821333e194SRobert Hancock 25831333e194SRobert Hancock if (cmd->command != ATA_CMD_PACKET && 2584eec7e1c1SAlexey Asemov (res->feature & (ATA_ICRC | ATA_UNC | ATA_AMNF | 2585eec7e1c1SAlexey Asemov ATA_IDNF | ATA_ABORTED))) 2586eec7e1c1SAlexey Asemov ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n", 25871333e194SRobert Hancock res->feature & ATA_ICRC ? "ICRC " : "", 25881333e194SRobert Hancock res->feature & ATA_UNC ? "UNC " : "", 2589eec7e1c1SAlexey Asemov res->feature & ATA_AMNF ? "AMNF " : "", 25901333e194SRobert Hancock res->feature & ATA_IDNF ? "IDNF " : "", 25911333e194SRobert Hancock res->feature & ATA_ABORTED ? "ABRT " : ""); 25926521148cSRobert Hancock #endif 2593c6fd2807SJeff Garzik } 2594c6fd2807SJeff Garzik } 2595c6fd2807SJeff Garzik 25969b1e2658STejun Heo /** 25979b1e2658STejun Heo * ata_eh_report - report error handling to user 25989b1e2658STejun Heo * @ap: ATA port to report EH about 25999b1e2658STejun Heo * 26009b1e2658STejun Heo * Report EH to user. 26019b1e2658STejun Heo * 26029b1e2658STejun Heo * LOCKING: 26039b1e2658STejun Heo * None. 26049b1e2658STejun Heo */ 2605fb7fd614STejun Heo void ata_eh_report(struct ata_port *ap) 26069b1e2658STejun Heo { 26079b1e2658STejun Heo struct ata_link *link; 26089b1e2658STejun Heo 26091eca4365STejun Heo ata_for_each_link(link, ap, HOST_FIRST) 26109b1e2658STejun Heo ata_eh_link_report(link); 26119b1e2658STejun Heo } 26129b1e2658STejun Heo 2613cc0680a5STejun Heo static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset, 2614b1c72916STejun Heo unsigned int *classes, unsigned long deadline, 2615b1c72916STejun Heo bool clear_classes) 2616c6fd2807SJeff Garzik { 2617f58229f8STejun Heo struct ata_device *dev; 2618c6fd2807SJeff Garzik 2619b1c72916STejun Heo if (clear_classes) 26201eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 2621f58229f8STejun Heo classes[dev->devno] = ATA_DEV_UNKNOWN; 2622c6fd2807SJeff Garzik 2623f046519fSTejun Heo return reset(link, classes, deadline); 2624c6fd2807SJeff Garzik } 2625c6fd2807SJeff Garzik 2626e8411fbaSSergei Shtylyov static int ata_eh_followup_srst_needed(struct ata_link *link, int rc) 2627c6fd2807SJeff Garzik { 262845db2f6cSTejun Heo if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link)) 2629ae791c05STejun Heo return 0; 26305dbfc9cbSTejun Heo if (rc == -EAGAIN) 2631c6fd2807SJeff Garzik return 1; 2632071f44b1STejun Heo if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) 26333495de73STejun Heo return 1; 2634c6fd2807SJeff Garzik return 0; 2635c6fd2807SJeff Garzik } 2636c6fd2807SJeff Garzik 2637fb7fd614STejun Heo int ata_eh_reset(struct ata_link *link, int classify, 2638c6fd2807SJeff Garzik ata_prereset_fn_t prereset, ata_reset_fn_t softreset, 2639c6fd2807SJeff Garzik ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) 2640c6fd2807SJeff Garzik { 2641afaa5c37STejun Heo struct ata_port *ap = link->ap; 2642b1c72916STejun Heo struct ata_link *slave = ap->slave_link; 2643936fd732STejun Heo struct ata_eh_context *ehc = &link->eh_context; 2644705d2014SBartlomiej Zolnierkiewicz struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL; 2645c6fd2807SJeff Garzik unsigned int *classes = ehc->classes; 2646416dc9edSTejun Heo unsigned int lflags = link->flags; 2647c6fd2807SJeff Garzik int verbose = !(ehc->i.flags & ATA_EHI_QUIET); 2648d8af0eb6STejun Heo int max_tries = 0, try = 0; 2649b1c72916STejun Heo struct ata_link *failed_link; 2650f58229f8STejun Heo struct ata_device *dev; 2651416dc9edSTejun Heo unsigned long deadline, now; 2652c6fd2807SJeff Garzik ata_reset_fn_t reset; 2653afaa5c37STejun Heo unsigned long flags; 2654416dc9edSTejun Heo u32 sstatus; 2655b1c72916STejun Heo int nr_unknown, rc; 2656c6fd2807SJeff Garzik 2657932648b0STejun Heo /* 2658932648b0STejun Heo * Prepare to reset 2659932648b0STejun Heo */ 2660d8af0eb6STejun Heo while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX) 2661d8af0eb6STejun Heo max_tries++; 2662ca6d43b0SDan Williams if (link->flags & ATA_LFLAG_RST_ONCE) 2663ca6d43b0SDan Williams max_tries = 1; 266405944bdfSTejun Heo if (link->flags & ATA_LFLAG_NO_HRST) 266505944bdfSTejun Heo hardreset = NULL; 266605944bdfSTejun Heo if (link->flags & ATA_LFLAG_NO_SRST) 266705944bdfSTejun Heo softreset = NULL; 2668d8af0eb6STejun Heo 266925985edcSLucas De Marchi /* make sure each reset attempt is at least COOL_DOWN apart */ 267019b72321STejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) { 26710a2c0f56STejun Heo now = jiffies; 267219b72321STejun Heo WARN_ON(time_after(ehc->last_reset, now)); 267319b72321STejun Heo deadline = ata_deadline(ehc->last_reset, 267419b72321STejun Heo ATA_EH_RESET_COOL_DOWN); 26750a2c0f56STejun Heo if (time_before(now, deadline)) 26760a2c0f56STejun Heo schedule_timeout_uninterruptible(deadline - now); 267719b72321STejun Heo } 26780a2c0f56STejun Heo 2679afaa5c37STejun Heo spin_lock_irqsave(ap->lock, flags); 2680afaa5c37STejun Heo ap->pflags |= ATA_PFLAG_RESETTING; 2681afaa5c37STejun Heo spin_unlock_irqrestore(ap->lock, flags); 2682afaa5c37STejun Heo 2683cf480626STejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2684c6fd2807SJeff Garzik 26851eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 2686cdeab114STejun Heo /* If we issue an SRST then an ATA drive (not ATAPI) 2687cdeab114STejun Heo * may change configuration and be in PIO0 timing. If 2688cdeab114STejun Heo * we do a hard reset (or are coming from power on) 2689cdeab114STejun Heo * this is true for ATA or ATAPI. Until we've set a 2690cdeab114STejun Heo * suitable controller mode we should not touch the 2691cdeab114STejun Heo * bus as we may be talking too fast. 2692cdeab114STejun Heo */ 2693cdeab114STejun Heo dev->pio_mode = XFER_PIO_0; 26945416912aSAaron Lu dev->dma_mode = 0xff; 2695cdeab114STejun Heo 2696cdeab114STejun Heo /* If the controller has a pio mode setup function 2697cdeab114STejun Heo * then use it to set the chipset to rights. Don't 2698cdeab114STejun Heo * touch the DMA setup as that will be dealt with when 2699cdeab114STejun Heo * configuring devices. 2700cdeab114STejun Heo */ 2701cdeab114STejun Heo if (ap->ops->set_piomode) 2702cdeab114STejun Heo ap->ops->set_piomode(ap, dev); 2703cdeab114STejun Heo } 2704cdeab114STejun Heo 2705cf480626STejun Heo /* prefer hardreset */ 2706932648b0STejun Heo reset = NULL; 2707cf480626STejun Heo ehc->i.action &= ~ATA_EH_RESET; 2708cf480626STejun Heo if (hardreset) { 2709cf480626STejun Heo reset = hardreset; 2710a674050eSTejun Heo ehc->i.action |= ATA_EH_HARDRESET; 27114f7faa3fSTejun Heo } else if (softreset) { 2712cf480626STejun Heo reset = softreset; 2713a674050eSTejun Heo ehc->i.action |= ATA_EH_SOFTRESET; 2714cf480626STejun Heo } 2715c6fd2807SJeff Garzik 2716c6fd2807SJeff Garzik if (prereset) { 2717b1c72916STejun Heo unsigned long deadline = ata_deadline(jiffies, 2718b1c72916STejun Heo ATA_EH_PRERESET_TIMEOUT); 2719b1c72916STejun Heo 2720b1c72916STejun Heo if (slave) { 2721b1c72916STejun Heo sehc->i.action &= ~ATA_EH_RESET; 2722b1c72916STejun Heo sehc->i.action |= ehc->i.action; 2723b1c72916STejun Heo } 2724b1c72916STejun Heo 2725b1c72916STejun Heo rc = prereset(link, deadline); 2726b1c72916STejun Heo 2727b1c72916STejun Heo /* If present, do prereset on slave link too. Reset 2728b1c72916STejun Heo * is skipped iff both master and slave links report 2729b1c72916STejun Heo * -ENOENT or clear ATA_EH_RESET. 2730b1c72916STejun Heo */ 2731b1c72916STejun Heo if (slave && (rc == 0 || rc == -ENOENT)) { 2732b1c72916STejun Heo int tmp; 2733b1c72916STejun Heo 2734b1c72916STejun Heo tmp = prereset(slave, deadline); 2735b1c72916STejun Heo if (tmp != -ENOENT) 2736b1c72916STejun Heo rc = tmp; 2737b1c72916STejun Heo 2738b1c72916STejun Heo ehc->i.action |= sehc->i.action; 2739b1c72916STejun Heo } 2740b1c72916STejun Heo 2741c6fd2807SJeff Garzik if (rc) { 2742c961922bSAlan Cox if (rc == -ENOENT) { 2743a9a79dfeSJoe Perches ata_link_dbg(link, "port disabled--ignoring\n"); 2744cf480626STejun Heo ehc->i.action &= ~ATA_EH_RESET; 27454aa9ab67STejun Heo 27461eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 2747f58229f8STejun Heo classes[dev->devno] = ATA_DEV_NONE; 27484aa9ab67STejun Heo 27494aa9ab67STejun Heo rc = 0; 2750c961922bSAlan Cox } else 2751a9a79dfeSJoe Perches ata_link_err(link, 2752a9a79dfeSJoe Perches "prereset failed (errno=%d)\n", 2753a9a79dfeSJoe Perches rc); 2754fccb6ea5STejun Heo goto out; 2755c6fd2807SJeff Garzik } 2756c6fd2807SJeff Garzik 2757932648b0STejun Heo /* prereset() might have cleared ATA_EH_RESET. If so, 2758d6515e6fSTejun Heo * bang classes, thaw and return. 2759932648b0STejun Heo */ 2760932648b0STejun Heo if (reset && !(ehc->i.action & ATA_EH_RESET)) { 27611eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 2762f58229f8STejun Heo classes[dev->devno] = ATA_DEV_NONE; 2763d6515e6fSTejun Heo if ((ap->pflags & ATA_PFLAG_FROZEN) && 2764d6515e6fSTejun Heo ata_is_host_link(link)) 2765d6515e6fSTejun Heo ata_eh_thaw_port(ap); 2766fccb6ea5STejun Heo rc = 0; 2767fccb6ea5STejun Heo goto out; 2768c6fd2807SJeff Garzik } 2769932648b0STejun Heo } 2770c6fd2807SJeff Garzik 2771c6fd2807SJeff Garzik retry: 2772932648b0STejun Heo /* 2773932648b0STejun Heo * Perform reset 2774932648b0STejun Heo */ 2775dc98c32cSTejun Heo if (ata_is_host_link(link)) 2776dc98c32cSTejun Heo ata_eh_freeze_port(ap); 2777dc98c32cSTejun Heo 2778341c2c95STejun Heo deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]); 277931daabdaSTejun Heo 2780932648b0STejun Heo if (reset) { 2781c6fd2807SJeff Garzik if (verbose) 2782a9a79dfeSJoe Perches ata_link_info(link, "%s resetting link\n", 2783c6fd2807SJeff Garzik reset == softreset ? "soft" : "hard"); 2784c6fd2807SJeff Garzik 2785c6fd2807SJeff Garzik /* mark that this EH session started with reset */ 278619b72321STejun Heo ehc->last_reset = jiffies; 27870d64a233STejun Heo if (reset == hardreset) 27880d64a233STejun Heo ehc->i.flags |= ATA_EHI_DID_HARDRESET; 27890d64a233STejun Heo else 27900d64a233STejun Heo ehc->i.flags |= ATA_EHI_DID_SOFTRESET; 2791c6fd2807SJeff Garzik 2792b1c72916STejun Heo rc = ata_do_reset(link, reset, classes, deadline, true); 2793b1c72916STejun Heo if (rc && rc != -EAGAIN) { 2794b1c72916STejun Heo failed_link = link; 27955dbfc9cbSTejun Heo goto fail; 2796b1c72916STejun Heo } 2797c6fd2807SJeff Garzik 2798b1c72916STejun Heo /* hardreset slave link if existent */ 2799b1c72916STejun Heo if (slave && reset == hardreset) { 2800b1c72916STejun Heo int tmp; 2801b1c72916STejun Heo 2802b1c72916STejun Heo if (verbose) 2803a9a79dfeSJoe Perches ata_link_info(slave, "hard resetting link\n"); 2804b1c72916STejun Heo 2805b1c72916STejun Heo ata_eh_about_to_do(slave, NULL, ATA_EH_RESET); 2806b1c72916STejun Heo tmp = ata_do_reset(slave, reset, classes, deadline, 2807b1c72916STejun Heo false); 2808b1c72916STejun Heo switch (tmp) { 2809b1c72916STejun Heo case -EAGAIN: 2810b1c72916STejun Heo rc = -EAGAIN; 2811b1c72916STejun Heo case 0: 2812b1c72916STejun Heo break; 2813b1c72916STejun Heo default: 2814b1c72916STejun Heo failed_link = slave; 2815b1c72916STejun Heo rc = tmp; 2816b1c72916STejun Heo goto fail; 2817b1c72916STejun Heo } 2818b1c72916STejun Heo } 2819b1c72916STejun Heo 2820b1c72916STejun Heo /* perform follow-up SRST if necessary */ 2821c6fd2807SJeff Garzik if (reset == hardreset && 2822e8411fbaSSergei Shtylyov ata_eh_followup_srst_needed(link, rc)) { 2823c6fd2807SJeff Garzik reset = softreset; 2824c6fd2807SJeff Garzik 2825c6fd2807SJeff Garzik if (!reset) { 2826a9a79dfeSJoe Perches ata_link_err(link, 2827a9a79dfeSJoe Perches "follow-up softreset required but no softreset available\n"); 2828b1c72916STejun Heo failed_link = link; 2829fccb6ea5STejun Heo rc = -EINVAL; 283008cf69d0STejun Heo goto fail; 2831c6fd2807SJeff Garzik } 2832c6fd2807SJeff Garzik 2833cf480626STejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2834b1c72916STejun Heo rc = ata_do_reset(link, reset, classes, deadline, true); 2835fe2c4d01STejun Heo if (rc) { 2836fe2c4d01STejun Heo failed_link = link; 2837fe2c4d01STejun Heo goto fail; 2838fe2c4d01STejun Heo } 2839c6fd2807SJeff Garzik } 2840932648b0STejun Heo } else { 2841932648b0STejun Heo if (verbose) 2842a9a79dfeSJoe Perches ata_link_info(link, 2843a9a79dfeSJoe Perches "no reset method available, skipping reset\n"); 2844932648b0STejun Heo if (!(lflags & ATA_LFLAG_ASSUME_CLASS)) 2845932648b0STejun Heo lflags |= ATA_LFLAG_ASSUME_ATA; 2846932648b0STejun Heo } 2847008a7896STejun Heo 2848932648b0STejun Heo /* 2849932648b0STejun Heo * Post-reset processing 2850932648b0STejun Heo */ 28511eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 2852416dc9edSTejun Heo /* After the reset, the device state is PIO 0 and the 2853416dc9edSTejun Heo * controller state is undefined. Reset also wakes up 2854416dc9edSTejun Heo * drives from sleeping mode. 2855c6fd2807SJeff Garzik */ 2856f58229f8STejun Heo dev->pio_mode = XFER_PIO_0; 2857054a5fbaSTejun Heo dev->flags &= ~ATA_DFLAG_SLEEPING; 2858c6fd2807SJeff Garzik 28593b761d3dSTejun Heo if (ata_phys_link_offline(ata_dev_phys_link(dev))) 28603b761d3dSTejun Heo continue; 28613b761d3dSTejun Heo 28624ccd3329STejun Heo /* apply class override */ 2863416dc9edSTejun Heo if (lflags & ATA_LFLAG_ASSUME_ATA) 2864ae791c05STejun Heo classes[dev->devno] = ATA_DEV_ATA; 2865416dc9edSTejun Heo else if (lflags & ATA_LFLAG_ASSUME_SEMB) 2866816ab897STejun Heo classes[dev->devno] = ATA_DEV_SEMB_UNSUP; 2867ae791c05STejun Heo } 2868ae791c05STejun Heo 2869008a7896STejun Heo /* record current link speed */ 2870936fd732STejun Heo if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0) 2871936fd732STejun Heo link->sata_spd = (sstatus >> 4) & 0xf; 2872b1c72916STejun Heo if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0) 2873b1c72916STejun Heo slave->sata_spd = (sstatus >> 4) & 0xf; 2874008a7896STejun Heo 2875dc98c32cSTejun Heo /* thaw the port */ 2876dc98c32cSTejun Heo if (ata_is_host_link(link)) 2877dc98c32cSTejun Heo ata_eh_thaw_port(ap); 2878dc98c32cSTejun Heo 2879f046519fSTejun Heo /* postreset() should clear hardware SError. Although SError 2880f046519fSTejun Heo * is cleared during link resume, clearing SError here is 2881f046519fSTejun Heo * necessary as some PHYs raise hotplug events after SRST. 2882f046519fSTejun Heo * This introduces race condition where hotplug occurs between 2883f046519fSTejun Heo * reset and here. This race is mediated by cross checking 2884f046519fSTejun Heo * link onlineness and classification result later. 2885f046519fSTejun Heo */ 2886b1c72916STejun Heo if (postreset) { 2887cc0680a5STejun Heo postreset(link, classes); 2888b1c72916STejun Heo if (slave) 2889b1c72916STejun Heo postreset(slave, classes); 2890b1c72916STejun Heo } 2891c6fd2807SJeff Garzik 28921e641060STejun Heo /* 28938c56caccSTejun Heo * Some controllers can't be frozen very well and may set spurious 28948c56caccSTejun Heo * error conditions during reset. Clear accumulated error 28958c56caccSTejun Heo * information and re-thaw the port if frozen. As reset is the 28968c56caccSTejun Heo * final recovery action and we cross check link onlineness against 28978c56caccSTejun Heo * device classification later, no hotplug event is lost by this. 28981e641060STejun Heo */ 2899f046519fSTejun Heo spin_lock_irqsave(link->ap->lock, flags); 29001e641060STejun Heo memset(&link->eh_info, 0, sizeof(link->eh_info)); 2901b1c72916STejun Heo if (slave) 29021e641060STejun Heo memset(&slave->eh_info, 0, sizeof(link->eh_info)); 29031e641060STejun Heo ap->pflags &= ~ATA_PFLAG_EH_PENDING; 2904f046519fSTejun Heo spin_unlock_irqrestore(link->ap->lock, flags); 2905f046519fSTejun Heo 29068c56caccSTejun Heo if (ap->pflags & ATA_PFLAG_FROZEN) 29078c56caccSTejun Heo ata_eh_thaw_port(ap); 29088c56caccSTejun Heo 29093b761d3dSTejun Heo /* 29103b761d3dSTejun Heo * Make sure onlineness and classification result correspond. 2911f046519fSTejun Heo * Hotplug could have happened during reset and some 2912f046519fSTejun Heo * controllers fail to wait while a drive is spinning up after 2913f046519fSTejun Heo * being hotplugged causing misdetection. By cross checking 29143b761d3dSTejun Heo * link on/offlineness and classification result, those 29153b761d3dSTejun Heo * conditions can be reliably detected and retried. 2916f046519fSTejun Heo */ 2917b1c72916STejun Heo nr_unknown = 0; 29181eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 29193b761d3dSTejun Heo if (ata_phys_link_online(ata_dev_phys_link(dev))) { 2920b1c72916STejun Heo if (classes[dev->devno] == ATA_DEV_UNKNOWN) { 2921a9a79dfeSJoe Perches ata_dev_dbg(dev, "link online but device misclassified\n"); 2922f046519fSTejun Heo classes[dev->devno] = ATA_DEV_NONE; 2923b1c72916STejun Heo nr_unknown++; 2924b1c72916STejun Heo } 29253b761d3dSTejun Heo } else if (ata_phys_link_offline(ata_dev_phys_link(dev))) { 29263b761d3dSTejun Heo if (ata_class_enabled(classes[dev->devno])) 2927a9a79dfeSJoe Perches ata_dev_dbg(dev, 2928a9a79dfeSJoe Perches "link offline, clearing class %d to NONE\n", 29293b761d3dSTejun Heo classes[dev->devno]); 29303b761d3dSTejun Heo classes[dev->devno] = ATA_DEV_NONE; 29313b761d3dSTejun Heo } else if (classes[dev->devno] == ATA_DEV_UNKNOWN) { 2932a9a79dfeSJoe Perches ata_dev_dbg(dev, 2933a9a79dfeSJoe Perches "link status unknown, clearing UNKNOWN to NONE\n"); 29343b761d3dSTejun Heo classes[dev->devno] = ATA_DEV_NONE; 29353b761d3dSTejun Heo } 2936f046519fSTejun Heo } 2937f046519fSTejun Heo 2938b1c72916STejun Heo if (classify && nr_unknown) { 2939f046519fSTejun Heo if (try < max_tries) { 2940a9a79dfeSJoe Perches ata_link_warn(link, 2941a9a79dfeSJoe Perches "link online but %d devices misclassified, retrying\n", 29423b761d3dSTejun Heo nr_unknown); 2943b1c72916STejun Heo failed_link = link; 2944f046519fSTejun Heo rc = -EAGAIN; 2945f046519fSTejun Heo goto fail; 2946f046519fSTejun Heo } 2947a9a79dfeSJoe Perches ata_link_warn(link, 29483b761d3dSTejun Heo "link online but %d devices misclassified, " 29493b761d3dSTejun Heo "device detection might fail\n", nr_unknown); 2950f046519fSTejun Heo } 2951f046519fSTejun Heo 2952c6fd2807SJeff Garzik /* reset successful, schedule revalidation */ 2953cf480626STejun Heo ata_eh_done(link, NULL, ATA_EH_RESET); 2954b1c72916STejun Heo if (slave) 2955b1c72916STejun Heo ata_eh_done(slave, NULL, ATA_EH_RESET); 295619b72321STejun Heo ehc->last_reset = jiffies; /* update to completion time */ 2957c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_REVALIDATE; 29586b7ae954STejun Heo link->lpm_policy = ATA_LPM_UNKNOWN; /* reset LPM state */ 2959416dc9edSTejun Heo 2960416dc9edSTejun Heo rc = 0; 2961fccb6ea5STejun Heo out: 2962fccb6ea5STejun Heo /* clear hotplug flag */ 2963fccb6ea5STejun Heo ehc->i.flags &= ~ATA_EHI_HOTPLUGGED; 2964b1c72916STejun Heo if (slave) 2965b1c72916STejun Heo sehc->i.flags &= ~ATA_EHI_HOTPLUGGED; 2966afaa5c37STejun Heo 2967afaa5c37STejun Heo spin_lock_irqsave(ap->lock, flags); 2968afaa5c37STejun Heo ap->pflags &= ~ATA_PFLAG_RESETTING; 2969afaa5c37STejun Heo spin_unlock_irqrestore(ap->lock, flags); 2970afaa5c37STejun Heo 2971c6fd2807SJeff Garzik return rc; 2972416dc9edSTejun Heo 2973416dc9edSTejun Heo fail: 29745958e302STejun Heo /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */ 29755958e302STejun Heo if (!ata_is_host_link(link) && 29765958e302STejun Heo sata_scr_read(link, SCR_STATUS, &sstatus)) 29775958e302STejun Heo rc = -ERESTART; 29785958e302STejun Heo 29797a46c078SGwendal Grignou if (try >= max_tries) { 29808ea7645cSTejun Heo /* 29818ea7645cSTejun Heo * Thaw host port even if reset failed, so that the port 29828ea7645cSTejun Heo * can be retried on the next phy event. This risks 29838ea7645cSTejun Heo * repeated EH runs but seems to be a better tradeoff than 29848ea7645cSTejun Heo * shutting down a port after a botched hotplug attempt. 29858ea7645cSTejun Heo */ 29868ea7645cSTejun Heo if (ata_is_host_link(link)) 29878ea7645cSTejun Heo ata_eh_thaw_port(ap); 2988416dc9edSTejun Heo goto out; 29898ea7645cSTejun Heo } 2990416dc9edSTejun Heo 2991416dc9edSTejun Heo now = jiffies; 2992416dc9edSTejun Heo if (time_before(now, deadline)) { 2993416dc9edSTejun Heo unsigned long delta = deadline - now; 2994416dc9edSTejun Heo 2995a9a79dfeSJoe Perches ata_link_warn(failed_link, 29960a2c0f56STejun Heo "reset failed (errno=%d), retrying in %u secs\n", 29970a2c0f56STejun Heo rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000)); 2998416dc9edSTejun Heo 2999c0c362b6STejun Heo ata_eh_release(ap); 3000416dc9edSTejun Heo while (delta) 3001416dc9edSTejun Heo delta = schedule_timeout_uninterruptible(delta); 3002c0c362b6STejun Heo ata_eh_acquire(ap); 3003416dc9edSTejun Heo } 3004416dc9edSTejun Heo 30057a46c078SGwendal Grignou /* 30067a46c078SGwendal Grignou * While disks spinup behind PMP, some controllers fail sending SRST. 30077a46c078SGwendal Grignou * They need to be reset - as well as the PMP - before retrying. 30087a46c078SGwendal Grignou */ 30097a46c078SGwendal Grignou if (rc == -ERESTART) { 30107a46c078SGwendal Grignou if (ata_is_host_link(link)) 30117a46c078SGwendal Grignou ata_eh_thaw_port(ap); 30127a46c078SGwendal Grignou goto out; 30137a46c078SGwendal Grignou } 30147a46c078SGwendal Grignou 3015b1c72916STejun Heo if (try == max_tries - 1) { 3016a07d499bSTejun Heo sata_down_spd_limit(link, 0); 3017b1c72916STejun Heo if (slave) 3018a07d499bSTejun Heo sata_down_spd_limit(slave, 0); 3019b1c72916STejun Heo } else if (rc == -EPIPE) 3020a07d499bSTejun Heo sata_down_spd_limit(failed_link, 0); 3021b1c72916STejun Heo 3022416dc9edSTejun Heo if (hardreset) 3023416dc9edSTejun Heo reset = hardreset; 3024416dc9edSTejun Heo goto retry; 3025c6fd2807SJeff Garzik } 3026c6fd2807SJeff Garzik 302745fabbb7SElias Oltmanns static inline void ata_eh_pull_park_action(struct ata_port *ap) 302845fabbb7SElias Oltmanns { 302945fabbb7SElias Oltmanns struct ata_link *link; 303045fabbb7SElias Oltmanns struct ata_device *dev; 303145fabbb7SElias Oltmanns unsigned long flags; 303245fabbb7SElias Oltmanns 303345fabbb7SElias Oltmanns /* 303445fabbb7SElias Oltmanns * This function can be thought of as an extended version of 303545fabbb7SElias Oltmanns * ata_eh_about_to_do() specially crafted to accommodate the 303645fabbb7SElias Oltmanns * requirements of ATA_EH_PARK handling. Since the EH thread 303745fabbb7SElias Oltmanns * does not leave the do {} while () loop in ata_eh_recover as 303845fabbb7SElias Oltmanns * long as the timeout for a park request to *one* device on 303945fabbb7SElias Oltmanns * the port has not expired, and since we still want to pick 304045fabbb7SElias Oltmanns * up park requests to other devices on the same port or 304145fabbb7SElias Oltmanns * timeout updates for the same device, we have to pull 304245fabbb7SElias Oltmanns * ATA_EH_PARK actions from eh_info into eh_context.i 304345fabbb7SElias Oltmanns * ourselves at the beginning of each pass over the loop. 304445fabbb7SElias Oltmanns * 304545fabbb7SElias Oltmanns * Additionally, all write accesses to &ap->park_req_pending 304616735d02SWolfram Sang * through reinit_completion() (see below) or complete_all() 304745fabbb7SElias Oltmanns * (see ata_scsi_park_store()) are protected by the host lock. 304845fabbb7SElias Oltmanns * As a result we have that park_req_pending.done is zero on 304945fabbb7SElias Oltmanns * exit from this function, i.e. when ATA_EH_PARK actions for 305045fabbb7SElias Oltmanns * *all* devices on port ap have been pulled into the 305145fabbb7SElias Oltmanns * respective eh_context structs. If, and only if, 305245fabbb7SElias Oltmanns * park_req_pending.done is non-zero by the time we reach 305345fabbb7SElias Oltmanns * wait_for_completion_timeout(), another ATA_EH_PARK action 305445fabbb7SElias Oltmanns * has been scheduled for at least one of the devices on port 305545fabbb7SElias Oltmanns * ap and we have to cycle over the do {} while () loop in 305645fabbb7SElias Oltmanns * ata_eh_recover() again. 305745fabbb7SElias Oltmanns */ 305845fabbb7SElias Oltmanns 305945fabbb7SElias Oltmanns spin_lock_irqsave(ap->lock, flags); 306016735d02SWolfram Sang reinit_completion(&ap->park_req_pending); 30611eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 30621eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 306345fabbb7SElias Oltmanns struct ata_eh_info *ehi = &link->eh_info; 306445fabbb7SElias Oltmanns 306545fabbb7SElias Oltmanns link->eh_context.i.dev_action[dev->devno] |= 306645fabbb7SElias Oltmanns ehi->dev_action[dev->devno] & ATA_EH_PARK; 306745fabbb7SElias Oltmanns ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK); 306845fabbb7SElias Oltmanns } 306945fabbb7SElias Oltmanns } 307045fabbb7SElias Oltmanns spin_unlock_irqrestore(ap->lock, flags); 307145fabbb7SElias Oltmanns } 307245fabbb7SElias Oltmanns 307345fabbb7SElias Oltmanns static void ata_eh_park_issue_cmd(struct ata_device *dev, int park) 307445fabbb7SElias Oltmanns { 307545fabbb7SElias Oltmanns struct ata_eh_context *ehc = &dev->link->eh_context; 307645fabbb7SElias Oltmanns struct ata_taskfile tf; 307745fabbb7SElias Oltmanns unsigned int err_mask; 307845fabbb7SElias Oltmanns 307945fabbb7SElias Oltmanns ata_tf_init(dev, &tf); 308045fabbb7SElias Oltmanns if (park) { 308145fabbb7SElias Oltmanns ehc->unloaded_mask |= 1 << dev->devno; 308245fabbb7SElias Oltmanns tf.command = ATA_CMD_IDLEIMMEDIATE; 308345fabbb7SElias Oltmanns tf.feature = 0x44; 308445fabbb7SElias Oltmanns tf.lbal = 0x4c; 308545fabbb7SElias Oltmanns tf.lbam = 0x4e; 308645fabbb7SElias Oltmanns tf.lbah = 0x55; 308745fabbb7SElias Oltmanns } else { 308845fabbb7SElias Oltmanns ehc->unloaded_mask &= ~(1 << dev->devno); 308945fabbb7SElias Oltmanns tf.command = ATA_CMD_CHK_POWER; 309045fabbb7SElias Oltmanns } 309145fabbb7SElias Oltmanns 309245fabbb7SElias Oltmanns tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 309345fabbb7SElias Oltmanns tf.protocol |= ATA_PROT_NODATA; 309445fabbb7SElias Oltmanns err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 309545fabbb7SElias Oltmanns if (park && (err_mask || tf.lbal != 0xc4)) { 3096a9a79dfeSJoe Perches ata_dev_err(dev, "head unload failed!\n"); 309745fabbb7SElias Oltmanns ehc->unloaded_mask &= ~(1 << dev->devno); 309845fabbb7SElias Oltmanns } 309945fabbb7SElias Oltmanns } 310045fabbb7SElias Oltmanns 31010260731fSTejun Heo static int ata_eh_revalidate_and_attach(struct ata_link *link, 3102c6fd2807SJeff Garzik struct ata_device **r_failed_dev) 3103c6fd2807SJeff Garzik { 31040260731fSTejun Heo struct ata_port *ap = link->ap; 31050260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 3106c6fd2807SJeff Garzik struct ata_device *dev; 31078c3c52a8STejun Heo unsigned int new_mask = 0; 3108c6fd2807SJeff Garzik unsigned long flags; 3109f58229f8STejun Heo int rc = 0; 3110c6fd2807SJeff Garzik 3111c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 3112c6fd2807SJeff Garzik 31138c3c52a8STejun Heo /* For PATA drive side cable detection to work, IDENTIFY must 31148c3c52a8STejun Heo * be done backwards such that PDIAG- is released by the slave 31158c3c52a8STejun Heo * device before the master device is identified. 31168c3c52a8STejun Heo */ 31171eca4365STejun Heo ata_for_each_dev(dev, link, ALL_REVERSE) { 3118f58229f8STejun Heo unsigned int action = ata_eh_dev_action(dev); 3119f58229f8STejun Heo unsigned int readid_flags = 0; 3120c6fd2807SJeff Garzik 3121bff04647STejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) 3122bff04647STejun Heo readid_flags |= ATA_READID_POSTRESET; 3123bff04647STejun Heo 31249666f400STejun Heo if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) { 3125633273a3STejun Heo WARN_ON(dev->class == ATA_DEV_PMP); 3126633273a3STejun Heo 3127b1c72916STejun Heo if (ata_phys_link_offline(ata_dev_phys_link(dev))) { 3128c6fd2807SJeff Garzik rc = -EIO; 31298c3c52a8STejun Heo goto err; 3130c6fd2807SJeff Garzik } 3131c6fd2807SJeff Garzik 31320260731fSTejun Heo ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE); 3133422c9daaSTejun Heo rc = ata_dev_revalidate(dev, ehc->classes[dev->devno], 3134422c9daaSTejun Heo readid_flags); 3135c6fd2807SJeff Garzik if (rc) 31368c3c52a8STejun Heo goto err; 3137c6fd2807SJeff Garzik 31380260731fSTejun Heo ata_eh_done(link, dev, ATA_EH_REVALIDATE); 3139c6fd2807SJeff Garzik 3140baa1e78aSTejun Heo /* Configuration may have changed, reconfigure 3141baa1e78aSTejun Heo * transfer mode. 3142baa1e78aSTejun Heo */ 3143baa1e78aSTejun Heo ehc->i.flags |= ATA_EHI_SETMODE; 3144baa1e78aSTejun Heo 3145c6fd2807SJeff Garzik /* schedule the scsi_rescan_device() here */ 3146ad72cf98STejun Heo schedule_work(&(ap->scsi_rescan_task)); 3147c6fd2807SJeff Garzik } else if (dev->class == ATA_DEV_UNKNOWN && 3148c6fd2807SJeff Garzik ehc->tries[dev->devno] && 3149c6fd2807SJeff Garzik ata_class_enabled(ehc->classes[dev->devno])) { 3150842faa6cSTejun Heo /* Temporarily set dev->class, it will be 3151842faa6cSTejun Heo * permanently set once all configurations are 3152842faa6cSTejun Heo * complete. This is necessary because new 3153842faa6cSTejun Heo * device configuration is done in two 3154842faa6cSTejun Heo * separate loops. 3155842faa6cSTejun Heo */ 3156c6fd2807SJeff Garzik dev->class = ehc->classes[dev->devno]; 3157c6fd2807SJeff Garzik 3158633273a3STejun Heo if (dev->class == ATA_DEV_PMP) 3159633273a3STejun Heo rc = sata_pmp_attach(dev); 3160633273a3STejun Heo else 3161633273a3STejun Heo rc = ata_dev_read_id(dev, &dev->class, 3162633273a3STejun Heo readid_flags, dev->id); 3163842faa6cSTejun Heo 3164842faa6cSTejun Heo /* read_id might have changed class, store and reset */ 3165842faa6cSTejun Heo ehc->classes[dev->devno] = dev->class; 3166842faa6cSTejun Heo dev->class = ATA_DEV_UNKNOWN; 3167842faa6cSTejun Heo 31688c3c52a8STejun Heo switch (rc) { 31698c3c52a8STejun Heo case 0: 317099cf610aSTejun Heo /* clear error info accumulated during probe */ 317199cf610aSTejun Heo ata_ering_clear(&dev->ering); 3172f58229f8STejun Heo new_mask |= 1 << dev->devno; 31738c3c52a8STejun Heo break; 31748c3c52a8STejun Heo case -ENOENT: 317555a8e2c8STejun Heo /* IDENTIFY was issued to non-existent 317655a8e2c8STejun Heo * device. No need to reset. Just 3177842faa6cSTejun Heo * thaw and ignore the device. 317855a8e2c8STejun Heo */ 317955a8e2c8STejun Heo ata_eh_thaw_port(ap); 3180c6fd2807SJeff Garzik break; 31818c3c52a8STejun Heo default: 31828c3c52a8STejun Heo goto err; 31838c3c52a8STejun Heo } 31848c3c52a8STejun Heo } 3185c6fd2807SJeff Garzik } 3186c6fd2807SJeff Garzik 3187c1c4e8d5STejun Heo /* PDIAG- should have been released, ask cable type if post-reset */ 318833267325STejun Heo if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) { 318933267325STejun Heo if (ap->ops->cable_detect) 3190c1c4e8d5STejun Heo ap->cbl = ap->ops->cable_detect(ap); 319133267325STejun Heo ata_force_cbl(ap); 319233267325STejun Heo } 3193c1c4e8d5STejun Heo 31948c3c52a8STejun Heo /* Configure new devices forward such that user doesn't see 31958c3c52a8STejun Heo * device detection messages backwards. 31968c3c52a8STejun Heo */ 31971eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 31984f7c2874STejun Heo if (!(new_mask & (1 << dev->devno))) 31998c3c52a8STejun Heo continue; 32008c3c52a8STejun Heo 3201842faa6cSTejun Heo dev->class = ehc->classes[dev->devno]; 3202842faa6cSTejun Heo 32034f7c2874STejun Heo if (dev->class == ATA_DEV_PMP) 32044f7c2874STejun Heo continue; 32054f7c2874STejun Heo 32068c3c52a8STejun Heo ehc->i.flags |= ATA_EHI_PRINTINFO; 32078c3c52a8STejun Heo rc = ata_dev_configure(dev); 32088c3c52a8STejun Heo ehc->i.flags &= ~ATA_EHI_PRINTINFO; 3209842faa6cSTejun Heo if (rc) { 3210842faa6cSTejun Heo dev->class = ATA_DEV_UNKNOWN; 32118c3c52a8STejun Heo goto err; 3212842faa6cSTejun Heo } 32138c3c52a8STejun Heo 3214c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 3215c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 3216c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3217baa1e78aSTejun Heo 321855a8e2c8STejun Heo /* new device discovered, configure xfermode */ 3219baa1e78aSTejun Heo ehc->i.flags |= ATA_EHI_SETMODE; 3220c6fd2807SJeff Garzik } 3221c6fd2807SJeff Garzik 32228c3c52a8STejun Heo return 0; 32238c3c52a8STejun Heo 32248c3c52a8STejun Heo err: 3225c6fd2807SJeff Garzik *r_failed_dev = dev; 32268c3c52a8STejun Heo DPRINTK("EXIT rc=%d\n", rc); 3227c6fd2807SJeff Garzik return rc; 3228c6fd2807SJeff Garzik } 3229c6fd2807SJeff Garzik 32306f1d1e3aSTejun Heo /** 32316f1d1e3aSTejun Heo * ata_set_mode - Program timings and issue SET FEATURES - XFER 32326f1d1e3aSTejun Heo * @link: link on which timings will be programmed 323398a1708dSMartin Olsson * @r_failed_dev: out parameter for failed device 32346f1d1e3aSTejun Heo * 32356f1d1e3aSTejun Heo * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If 32366f1d1e3aSTejun Heo * ata_set_mode() fails, pointer to the failing device is 32376f1d1e3aSTejun Heo * returned in @r_failed_dev. 32386f1d1e3aSTejun Heo * 32396f1d1e3aSTejun Heo * LOCKING: 32406f1d1e3aSTejun Heo * PCI/etc. bus probe sem. 32416f1d1e3aSTejun Heo * 32426f1d1e3aSTejun Heo * RETURNS: 32436f1d1e3aSTejun Heo * 0 on success, negative errno otherwise 32446f1d1e3aSTejun Heo */ 32456f1d1e3aSTejun Heo int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) 32466f1d1e3aSTejun Heo { 32476f1d1e3aSTejun Heo struct ata_port *ap = link->ap; 324800115e0fSTejun Heo struct ata_device *dev; 324900115e0fSTejun Heo int rc; 32506f1d1e3aSTejun Heo 325176326ac1STejun Heo /* if data transfer is verified, clear DUBIOUS_XFER on ering top */ 32521eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) { 325376326ac1STejun Heo if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) { 325476326ac1STejun Heo struct ata_ering_entry *ent; 325576326ac1STejun Heo 325676326ac1STejun Heo ent = ata_ering_top(&dev->ering); 325776326ac1STejun Heo if (ent) 325876326ac1STejun Heo ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER; 325976326ac1STejun Heo } 326076326ac1STejun Heo } 326176326ac1STejun Heo 32626f1d1e3aSTejun Heo /* has private set_mode? */ 32636f1d1e3aSTejun Heo if (ap->ops->set_mode) 326400115e0fSTejun Heo rc = ap->ops->set_mode(link, r_failed_dev); 326500115e0fSTejun Heo else 326600115e0fSTejun Heo rc = ata_do_set_mode(link, r_failed_dev); 326700115e0fSTejun Heo 326800115e0fSTejun Heo /* if transfer mode has changed, set DUBIOUS_XFER on device */ 32691eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) { 327000115e0fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 327100115e0fSTejun Heo u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno]; 327200115e0fSTejun Heo u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno)); 327300115e0fSTejun Heo 327400115e0fSTejun Heo if (dev->xfer_mode != saved_xfer_mode || 327500115e0fSTejun Heo ata_ncq_enabled(dev) != saved_ncq) 327600115e0fSTejun Heo dev->flags |= ATA_DFLAG_DUBIOUS_XFER; 327700115e0fSTejun Heo } 327800115e0fSTejun Heo 327900115e0fSTejun Heo return rc; 32806f1d1e3aSTejun Heo } 32816f1d1e3aSTejun Heo 328211fc33daSTejun Heo /** 328311fc33daSTejun Heo * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset 328411fc33daSTejun Heo * @dev: ATAPI device to clear UA for 328511fc33daSTejun Heo * 328611fc33daSTejun Heo * Resets and other operations can make an ATAPI device raise 328711fc33daSTejun Heo * UNIT ATTENTION which causes the next operation to fail. This 328811fc33daSTejun Heo * function clears UA. 328911fc33daSTejun Heo * 329011fc33daSTejun Heo * LOCKING: 329111fc33daSTejun Heo * EH context (may sleep). 329211fc33daSTejun Heo * 329311fc33daSTejun Heo * RETURNS: 329411fc33daSTejun Heo * 0 on success, -errno on failure. 329511fc33daSTejun Heo */ 329611fc33daSTejun Heo static int atapi_eh_clear_ua(struct ata_device *dev) 329711fc33daSTejun Heo { 329811fc33daSTejun Heo int i; 329911fc33daSTejun Heo 330011fc33daSTejun Heo for (i = 0; i < ATA_EH_UA_TRIES; i++) { 3301b5357081STejun Heo u8 *sense_buffer = dev->link->ap->sector_buf; 330211fc33daSTejun Heo u8 sense_key = 0; 330311fc33daSTejun Heo unsigned int err_mask; 330411fc33daSTejun Heo 330511fc33daSTejun Heo err_mask = atapi_eh_tur(dev, &sense_key); 330611fc33daSTejun Heo if (err_mask != 0 && err_mask != AC_ERR_DEV) { 3307a9a79dfeSJoe Perches ata_dev_warn(dev, 3308a9a79dfeSJoe Perches "TEST_UNIT_READY failed (err_mask=0x%x)\n", 3309a9a79dfeSJoe Perches err_mask); 331011fc33daSTejun Heo return -EIO; 331111fc33daSTejun Heo } 331211fc33daSTejun Heo 331311fc33daSTejun Heo if (!err_mask || sense_key != UNIT_ATTENTION) 331411fc33daSTejun Heo return 0; 331511fc33daSTejun Heo 331611fc33daSTejun Heo err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key); 331711fc33daSTejun Heo if (err_mask) { 3318a9a79dfeSJoe Perches ata_dev_warn(dev, "failed to clear " 331911fc33daSTejun Heo "UNIT ATTENTION (err_mask=0x%x)\n", err_mask); 332011fc33daSTejun Heo return -EIO; 332111fc33daSTejun Heo } 332211fc33daSTejun Heo } 332311fc33daSTejun Heo 3324a9a79dfeSJoe Perches ata_dev_warn(dev, "UNIT ATTENTION persists after %d tries\n", 3325a9a79dfeSJoe Perches ATA_EH_UA_TRIES); 332611fc33daSTejun Heo 332711fc33daSTejun Heo return 0; 332811fc33daSTejun Heo } 332911fc33daSTejun Heo 33306013efd8STejun Heo /** 33316013efd8STejun Heo * ata_eh_maybe_retry_flush - Retry FLUSH if necessary 33326013efd8STejun Heo * @dev: ATA device which may need FLUSH retry 33336013efd8STejun Heo * 33346013efd8STejun Heo * If @dev failed FLUSH, it needs to be reported upper layer 33356013efd8STejun Heo * immediately as it means that @dev failed to remap and already 33366013efd8STejun Heo * lost at least a sector and further FLUSH retrials won't make 33376013efd8STejun Heo * any difference to the lost sector. However, if FLUSH failed 33386013efd8STejun Heo * for other reasons, for example transmission error, FLUSH needs 33396013efd8STejun Heo * to be retried. 33406013efd8STejun Heo * 33416013efd8STejun Heo * This function determines whether FLUSH failure retry is 33426013efd8STejun Heo * necessary and performs it if so. 33436013efd8STejun Heo * 33446013efd8STejun Heo * RETURNS: 33456013efd8STejun Heo * 0 if EH can continue, -errno if EH needs to be repeated. 33466013efd8STejun Heo */ 33476013efd8STejun Heo static int ata_eh_maybe_retry_flush(struct ata_device *dev) 33486013efd8STejun Heo { 33496013efd8STejun Heo struct ata_link *link = dev->link; 33506013efd8STejun Heo struct ata_port *ap = link->ap; 33516013efd8STejun Heo struct ata_queued_cmd *qc; 33526013efd8STejun Heo struct ata_taskfile tf; 33536013efd8STejun Heo unsigned int err_mask; 33546013efd8STejun Heo int rc = 0; 33556013efd8STejun Heo 33566013efd8STejun Heo /* did flush fail for this device? */ 33576013efd8STejun Heo if (!ata_tag_valid(link->active_tag)) 33586013efd8STejun Heo return 0; 33596013efd8STejun Heo 33606013efd8STejun Heo qc = __ata_qc_from_tag(ap, link->active_tag); 33616013efd8STejun Heo if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT && 33626013efd8STejun Heo qc->tf.command != ATA_CMD_FLUSH)) 33636013efd8STejun Heo return 0; 33646013efd8STejun Heo 33656013efd8STejun Heo /* if the device failed it, it should be reported to upper layers */ 33666013efd8STejun Heo if (qc->err_mask & AC_ERR_DEV) 33676013efd8STejun Heo return 0; 33686013efd8STejun Heo 33696013efd8STejun Heo /* flush failed for some other reason, give it another shot */ 33706013efd8STejun Heo ata_tf_init(dev, &tf); 33716013efd8STejun Heo 33726013efd8STejun Heo tf.command = qc->tf.command; 33736013efd8STejun Heo tf.flags |= ATA_TFLAG_DEVICE; 33746013efd8STejun Heo tf.protocol = ATA_PROT_NODATA; 33756013efd8STejun Heo 3376a9a79dfeSJoe Perches ata_dev_warn(dev, "retrying FLUSH 0x%x Emask 0x%x\n", 33776013efd8STejun Heo tf.command, qc->err_mask); 33786013efd8STejun Heo 33796013efd8STejun Heo err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 33806013efd8STejun Heo if (!err_mask) { 33816013efd8STejun Heo /* 33826013efd8STejun Heo * FLUSH is complete but there's no way to 33836013efd8STejun Heo * successfully complete a failed command from EH. 33846013efd8STejun Heo * Making sure retry is allowed at least once and 33856013efd8STejun Heo * retrying it should do the trick - whatever was in 33866013efd8STejun Heo * the cache is already on the platter and this won't 33876013efd8STejun Heo * cause infinite loop. 33886013efd8STejun Heo */ 33896013efd8STejun Heo qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1); 33906013efd8STejun Heo } else { 3391a9a79dfeSJoe Perches ata_dev_warn(dev, "FLUSH failed Emask 0x%x\n", 33926013efd8STejun Heo err_mask); 33936013efd8STejun Heo rc = -EIO; 33946013efd8STejun Heo 33956013efd8STejun Heo /* if device failed it, report it to upper layers */ 33966013efd8STejun Heo if (err_mask & AC_ERR_DEV) { 33976013efd8STejun Heo qc->err_mask |= AC_ERR_DEV; 33986013efd8STejun Heo qc->result_tf = tf; 33996013efd8STejun Heo if (!(ap->pflags & ATA_PFLAG_FROZEN)) 34006013efd8STejun Heo rc = 0; 34016013efd8STejun Heo } 34026013efd8STejun Heo } 34036013efd8STejun Heo return rc; 34046013efd8STejun Heo } 34056013efd8STejun Heo 34066b7ae954STejun Heo /** 34076b7ae954STejun Heo * ata_eh_set_lpm - configure SATA interface power management 34086b7ae954STejun Heo * @link: link to configure power management 34096b7ae954STejun Heo * @policy: the link power management policy 34106b7ae954STejun Heo * @r_failed_dev: out parameter for failed device 34116b7ae954STejun Heo * 34126b7ae954STejun Heo * Enable SATA Interface power management. This will enable 34136b7ae954STejun Heo * Device Interface Power Management (DIPM) for min_power 34146b7ae954STejun Heo * policy, and then call driver specific callbacks for 34156b7ae954STejun Heo * enabling Host Initiated Power management. 34166b7ae954STejun Heo * 34176b7ae954STejun Heo * LOCKING: 34186b7ae954STejun Heo * EH context. 34196b7ae954STejun Heo * 34206b7ae954STejun Heo * RETURNS: 34216b7ae954STejun Heo * 0 on success, -errno on failure. 34226b7ae954STejun Heo */ 34236b7ae954STejun Heo static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, 34246b7ae954STejun Heo struct ata_device **r_failed_dev) 34256b7ae954STejun Heo { 34266c8ea89cSTejun Heo struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL; 34276b7ae954STejun Heo struct ata_eh_context *ehc = &link->eh_context; 34286b7ae954STejun Heo struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL; 3429e5005b15STejun Heo enum ata_lpm_policy old_policy = link->lpm_policy; 34305f6f12ccSTejun Heo bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM; 34316b7ae954STejun Heo unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM; 34326b7ae954STejun Heo unsigned int err_mask; 34336b7ae954STejun Heo int rc; 34346b7ae954STejun Heo 34356b7ae954STejun Heo /* if the link or host doesn't do LPM, noop */ 34366b7ae954STejun Heo if ((link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm)) 34376b7ae954STejun Heo return 0; 34386b7ae954STejun Heo 34396b7ae954STejun Heo /* 34406b7ae954STejun Heo * DIPM is enabled only for MIN_POWER as some devices 34416b7ae954STejun Heo * misbehave when the host NACKs transition to SLUMBER. Order 34426b7ae954STejun Heo * device and link configurations such that the host always 34436b7ae954STejun Heo * allows DIPM requests. 34446b7ae954STejun Heo */ 34456b7ae954STejun Heo ata_for_each_dev(dev, link, ENABLED) { 34466b7ae954STejun Heo bool hipm = ata_id_has_hipm(dev->id); 3447ae01b249STejun Heo bool dipm = ata_id_has_dipm(dev->id) && !no_dipm; 34486b7ae954STejun Heo 34496b7ae954STejun Heo /* find the first enabled and LPM enabled devices */ 34506b7ae954STejun Heo if (!link_dev) 34516b7ae954STejun Heo link_dev = dev; 34526b7ae954STejun Heo 34536b7ae954STejun Heo if (!lpm_dev && (hipm || dipm)) 34546b7ae954STejun Heo lpm_dev = dev; 34556b7ae954STejun Heo 34566b7ae954STejun Heo hints &= ~ATA_LPM_EMPTY; 34576b7ae954STejun Heo if (!hipm) 34586b7ae954STejun Heo hints &= ~ATA_LPM_HIPM; 34596b7ae954STejun Heo 34606b7ae954STejun Heo /* disable DIPM before changing link config */ 34616b7ae954STejun Heo if (policy != ATA_LPM_MIN_POWER && dipm) { 34626b7ae954STejun Heo err_mask = ata_dev_set_feature(dev, 34636b7ae954STejun Heo SETFEATURES_SATA_DISABLE, SATA_DIPM); 34646b7ae954STejun Heo if (err_mask && err_mask != AC_ERR_DEV) { 3465a9a79dfeSJoe Perches ata_dev_warn(dev, 34666b7ae954STejun Heo "failed to disable DIPM, Emask 0x%x\n", 34676b7ae954STejun Heo err_mask); 34686b7ae954STejun Heo rc = -EIO; 34696b7ae954STejun Heo goto fail; 34706b7ae954STejun Heo } 34716b7ae954STejun Heo } 34726b7ae954STejun Heo } 34736b7ae954STejun Heo 34746c8ea89cSTejun Heo if (ap) { 34756b7ae954STejun Heo rc = ap->ops->set_lpm(link, policy, hints); 34766b7ae954STejun Heo if (!rc && ap->slave_link) 34776b7ae954STejun Heo rc = ap->ops->set_lpm(ap->slave_link, policy, hints); 34786c8ea89cSTejun Heo } else 34796c8ea89cSTejun Heo rc = sata_pmp_set_lpm(link, policy, hints); 34806b7ae954STejun Heo 34816b7ae954STejun Heo /* 34826b7ae954STejun Heo * Attribute link config failure to the first (LPM) enabled 34836b7ae954STejun Heo * device on the link. 34846b7ae954STejun Heo */ 34856b7ae954STejun Heo if (rc) { 34866b7ae954STejun Heo if (rc == -EOPNOTSUPP) { 34876b7ae954STejun Heo link->flags |= ATA_LFLAG_NO_LPM; 34886b7ae954STejun Heo return 0; 34896b7ae954STejun Heo } 34906b7ae954STejun Heo dev = lpm_dev ? lpm_dev : link_dev; 34916b7ae954STejun Heo goto fail; 34926b7ae954STejun Heo } 34936b7ae954STejun Heo 3494e5005b15STejun Heo /* 3495e5005b15STejun Heo * Low level driver acked the transition. Issue DIPM command 3496e5005b15STejun Heo * with the new policy set. 3497e5005b15STejun Heo */ 3498e5005b15STejun Heo link->lpm_policy = policy; 3499e5005b15STejun Heo if (ap && ap->slave_link) 3500e5005b15STejun Heo ap->slave_link->lpm_policy = policy; 3501e5005b15STejun Heo 35026b7ae954STejun Heo /* host config updated, enable DIPM if transitioning to MIN_POWER */ 35036b7ae954STejun Heo ata_for_each_dev(dev, link, ENABLED) { 3504ae01b249STejun Heo if (policy == ATA_LPM_MIN_POWER && !no_dipm && 3505ae01b249STejun Heo ata_id_has_dipm(dev->id)) { 35066b7ae954STejun Heo err_mask = ata_dev_set_feature(dev, 35076b7ae954STejun Heo SETFEATURES_SATA_ENABLE, SATA_DIPM); 35086b7ae954STejun Heo if (err_mask && err_mask != AC_ERR_DEV) { 3509a9a79dfeSJoe Perches ata_dev_warn(dev, 35106b7ae954STejun Heo "failed to enable DIPM, Emask 0x%x\n", 35116b7ae954STejun Heo err_mask); 35126b7ae954STejun Heo rc = -EIO; 35136b7ae954STejun Heo goto fail; 35146b7ae954STejun Heo } 35156b7ae954STejun Heo } 35166b7ae954STejun Heo } 35176b7ae954STejun Heo 351809c5b480SGabriele Mazzotta link->last_lpm_change = jiffies; 351909c5b480SGabriele Mazzotta link->flags |= ATA_LFLAG_CHANGED; 352009c5b480SGabriele Mazzotta 35216b7ae954STejun Heo return 0; 35226b7ae954STejun Heo 35236b7ae954STejun Heo fail: 3524e5005b15STejun Heo /* restore the old policy */ 3525e5005b15STejun Heo link->lpm_policy = old_policy; 3526e5005b15STejun Heo if (ap && ap->slave_link) 3527e5005b15STejun Heo ap->slave_link->lpm_policy = old_policy; 3528e5005b15STejun Heo 35296b7ae954STejun Heo /* if no device or only one more chance is left, disable LPM */ 35306b7ae954STejun Heo if (!dev || ehc->tries[dev->devno] <= 2) { 3531a9a79dfeSJoe Perches ata_link_warn(link, "disabling LPM on the link\n"); 35326b7ae954STejun Heo link->flags |= ATA_LFLAG_NO_LPM; 35336b7ae954STejun Heo } 35346b7ae954STejun Heo if (r_failed_dev) 35356b7ae954STejun Heo *r_failed_dev = dev; 35366b7ae954STejun Heo return rc; 35376b7ae954STejun Heo } 35386b7ae954STejun Heo 35398a745f1fSKristen Carlson Accardi int ata_link_nr_enabled(struct ata_link *link) 3540c6fd2807SJeff Garzik { 3541f58229f8STejun Heo struct ata_device *dev; 3542f58229f8STejun Heo int cnt = 0; 3543c6fd2807SJeff Garzik 35441eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) 3545c6fd2807SJeff Garzik cnt++; 3546c6fd2807SJeff Garzik return cnt; 3547c6fd2807SJeff Garzik } 3548c6fd2807SJeff Garzik 35490260731fSTejun Heo static int ata_link_nr_vacant(struct ata_link *link) 3550c6fd2807SJeff Garzik { 3551f58229f8STejun Heo struct ata_device *dev; 3552f58229f8STejun Heo int cnt = 0; 3553c6fd2807SJeff Garzik 35541eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 3555f58229f8STejun Heo if (dev->class == ATA_DEV_UNKNOWN) 3556c6fd2807SJeff Garzik cnt++; 3557c6fd2807SJeff Garzik return cnt; 3558c6fd2807SJeff Garzik } 3559c6fd2807SJeff Garzik 35600260731fSTejun Heo static int ata_eh_skip_recovery(struct ata_link *link) 3561c6fd2807SJeff Garzik { 3562672b2d65STejun Heo struct ata_port *ap = link->ap; 35630260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 3564f58229f8STejun Heo struct ata_device *dev; 3565c6fd2807SJeff Garzik 3566f9df58cbSTejun Heo /* skip disabled links */ 3567f9df58cbSTejun Heo if (link->flags & ATA_LFLAG_DISABLED) 3568f9df58cbSTejun Heo return 1; 3569f9df58cbSTejun Heo 3570e2f3d75fSTejun Heo /* skip if explicitly requested */ 3571e2f3d75fSTejun Heo if (ehc->i.flags & ATA_EHI_NO_RECOVERY) 3572e2f3d75fSTejun Heo return 1; 3573e2f3d75fSTejun Heo 3574672b2d65STejun Heo /* thaw frozen port and recover failed devices */ 3575672b2d65STejun Heo if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link)) 3576672b2d65STejun Heo return 0; 3577672b2d65STejun Heo 3578672b2d65STejun Heo /* reset at least once if reset is requested */ 3579672b2d65STejun Heo if ((ehc->i.action & ATA_EH_RESET) && 3580672b2d65STejun Heo !(ehc->i.flags & ATA_EHI_DID_RESET)) 3581c6fd2807SJeff Garzik return 0; 3582c6fd2807SJeff Garzik 3583c6fd2807SJeff Garzik /* skip if class codes for all vacant slots are ATA_DEV_NONE */ 35841eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 3585c6fd2807SJeff Garzik if (dev->class == ATA_DEV_UNKNOWN && 3586c6fd2807SJeff Garzik ehc->classes[dev->devno] != ATA_DEV_NONE) 3587c6fd2807SJeff Garzik return 0; 3588c6fd2807SJeff Garzik } 3589c6fd2807SJeff Garzik 3590c6fd2807SJeff Garzik return 1; 3591c6fd2807SJeff Garzik } 3592c6fd2807SJeff Garzik 3593c2c7a89cSTejun Heo static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg) 3594c2c7a89cSTejun Heo { 3595c2c7a89cSTejun Heo u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL); 3596c2c7a89cSTejun Heo u64 now = get_jiffies_64(); 3597c2c7a89cSTejun Heo int *trials = void_arg; 3598c2c7a89cSTejun Heo 35996868225eSLin Ming if ((ent->eflags & ATA_EFLAG_OLD_ER) || 36006868225eSLin Ming (ent->timestamp < now - min(now, interval))) 3601c2c7a89cSTejun Heo return -1; 3602c2c7a89cSTejun Heo 3603c2c7a89cSTejun Heo (*trials)++; 3604c2c7a89cSTejun Heo return 0; 3605c2c7a89cSTejun Heo } 3606c2c7a89cSTejun Heo 360702c05a27STejun Heo static int ata_eh_schedule_probe(struct ata_device *dev) 360802c05a27STejun Heo { 360902c05a27STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 3610c2c7a89cSTejun Heo struct ata_link *link = ata_dev_phys_link(dev); 3611c2c7a89cSTejun Heo int trials = 0; 361202c05a27STejun Heo 361302c05a27STejun Heo if (!(ehc->i.probe_mask & (1 << dev->devno)) || 361402c05a27STejun Heo (ehc->did_probe_mask & (1 << dev->devno))) 361502c05a27STejun Heo return 0; 361602c05a27STejun Heo 361702c05a27STejun Heo ata_eh_detach_dev(dev); 361802c05a27STejun Heo ata_dev_init(dev); 361902c05a27STejun Heo ehc->did_probe_mask |= (1 << dev->devno); 3620cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 362100115e0fSTejun Heo ehc->saved_xfer_mode[dev->devno] = 0; 362200115e0fSTejun Heo ehc->saved_ncq_enabled &= ~(1 << dev->devno); 362302c05a27STejun Heo 36246b7ae954STejun Heo /* the link maybe in a deep sleep, wake it up */ 36256c8ea89cSTejun Heo if (link->lpm_policy > ATA_LPM_MAX_POWER) { 36266c8ea89cSTejun Heo if (ata_is_host_link(link)) 36276c8ea89cSTejun Heo link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER, 36286c8ea89cSTejun Heo ATA_LPM_EMPTY); 36296c8ea89cSTejun Heo else 36306c8ea89cSTejun Heo sata_pmp_set_lpm(link, ATA_LPM_MAX_POWER, 36316c8ea89cSTejun Heo ATA_LPM_EMPTY); 36326c8ea89cSTejun Heo } 36336b7ae954STejun Heo 3634c2c7a89cSTejun Heo /* Record and count probe trials on the ering. The specific 3635c2c7a89cSTejun Heo * error mask used is irrelevant. Because a successful device 3636c2c7a89cSTejun Heo * detection clears the ering, this count accumulates only if 3637c2c7a89cSTejun Heo * there are consecutive failed probes. 3638c2c7a89cSTejun Heo * 3639c2c7a89cSTejun Heo * If the count is equal to or higher than ATA_EH_PROBE_TRIALS 3640c2c7a89cSTejun Heo * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is 3641c2c7a89cSTejun Heo * forced to 1.5Gbps. 3642c2c7a89cSTejun Heo * 3643c2c7a89cSTejun Heo * This is to work around cases where failed link speed 3644c2c7a89cSTejun Heo * negotiation results in device misdetection leading to 3645c2c7a89cSTejun Heo * infinite DEVXCHG or PHRDY CHG events. 3646c2c7a89cSTejun Heo */ 3647c2c7a89cSTejun Heo ata_ering_record(&dev->ering, 0, AC_ERR_OTHER); 3648c2c7a89cSTejun Heo ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials); 3649c2c7a89cSTejun Heo 3650c2c7a89cSTejun Heo if (trials > ATA_EH_PROBE_TRIALS) 3651c2c7a89cSTejun Heo sata_down_spd_limit(link, 1); 3652c2c7a89cSTejun Heo 365302c05a27STejun Heo return 1; 365402c05a27STejun Heo } 365502c05a27STejun Heo 36569b1e2658STejun Heo static int ata_eh_handle_dev_fail(struct ata_device *dev, int err) 3657fee7ca72STejun Heo { 36589af5c9c9STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 3659fee7ca72STejun Heo 3660cf9a590aSTejun Heo /* -EAGAIN from EH routine indicates retry without prejudice. 3661cf9a590aSTejun Heo * The requester is responsible for ensuring forward progress. 3662cf9a590aSTejun Heo */ 3663cf9a590aSTejun Heo if (err != -EAGAIN) 3664fee7ca72STejun Heo ehc->tries[dev->devno]--; 3665fee7ca72STejun Heo 3666fee7ca72STejun Heo switch (err) { 3667fee7ca72STejun Heo case -ENODEV: 3668fee7ca72STejun Heo /* device missing or wrong IDENTIFY data, schedule probing */ 3669fee7ca72STejun Heo ehc->i.probe_mask |= (1 << dev->devno); 3670fee7ca72STejun Heo case -EINVAL: 3671fee7ca72STejun Heo /* give it just one more chance */ 3672fee7ca72STejun Heo ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1); 3673fee7ca72STejun Heo case -EIO: 3674d89293abSTejun Heo if (ehc->tries[dev->devno] == 1) { 3675fee7ca72STejun Heo /* This is the last chance, better to slow 3676fee7ca72STejun Heo * down than lose it. 3677fee7ca72STejun Heo */ 3678a07d499bSTejun Heo sata_down_spd_limit(ata_dev_phys_link(dev), 0); 3679d89293abSTejun Heo if (dev->pio_mode > XFER_PIO_0) 3680fee7ca72STejun Heo ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); 3681fee7ca72STejun Heo } 3682fee7ca72STejun Heo } 3683fee7ca72STejun Heo 3684fee7ca72STejun Heo if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) { 3685fee7ca72STejun Heo /* disable device if it has used up all its chances */ 3686fee7ca72STejun Heo ata_dev_disable(dev); 3687fee7ca72STejun Heo 3688fee7ca72STejun Heo /* detach if offline */ 3689b1c72916STejun Heo if (ata_phys_link_offline(ata_dev_phys_link(dev))) 3690fee7ca72STejun Heo ata_eh_detach_dev(dev); 3691fee7ca72STejun Heo 369202c05a27STejun Heo /* schedule probe if necessary */ 369387fbc5a0STejun Heo if (ata_eh_schedule_probe(dev)) { 3694fee7ca72STejun Heo ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 369587fbc5a0STejun Heo memset(ehc->cmd_timeout_idx[dev->devno], 0, 369687fbc5a0STejun Heo sizeof(ehc->cmd_timeout_idx[dev->devno])); 369787fbc5a0STejun Heo } 36989b1e2658STejun Heo 36999b1e2658STejun Heo return 1; 3700fee7ca72STejun Heo } else { 3701cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 37029b1e2658STejun Heo return 0; 3703fee7ca72STejun Heo } 3704fee7ca72STejun Heo } 3705fee7ca72STejun Heo 3706c6fd2807SJeff Garzik /** 3707c6fd2807SJeff Garzik * ata_eh_recover - recover host port after error 3708c6fd2807SJeff Garzik * @ap: host port to recover 3709c6fd2807SJeff Garzik * @prereset: prereset method (can be NULL) 3710c6fd2807SJeff Garzik * @softreset: softreset method (can be NULL) 3711c6fd2807SJeff Garzik * @hardreset: hardreset method (can be NULL) 3712c6fd2807SJeff Garzik * @postreset: postreset method (can be NULL) 37139b1e2658STejun Heo * @r_failed_link: out parameter for failed link 3714c6fd2807SJeff Garzik * 3715c6fd2807SJeff Garzik * This is the alpha and omega, eum and yang, heart and soul of 3716c6fd2807SJeff Garzik * libata exception handling. On entry, actions required to 37179b1e2658STejun Heo * recover each link and hotplug requests are recorded in the 37189b1e2658STejun Heo * link's eh_context. This function executes all the operations 37199b1e2658STejun Heo * with appropriate retrials and fallbacks to resurrect failed 3720c6fd2807SJeff Garzik * devices, detach goners and greet newcomers. 3721c6fd2807SJeff Garzik * 3722c6fd2807SJeff Garzik * LOCKING: 3723c6fd2807SJeff Garzik * Kernel thread context (may sleep). 3724c6fd2807SJeff Garzik * 3725c6fd2807SJeff Garzik * RETURNS: 3726c6fd2807SJeff Garzik * 0 on success, -errno on failure. 3727c6fd2807SJeff Garzik */ 3728fb7fd614STejun Heo int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, 3729c6fd2807SJeff Garzik ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 37309b1e2658STejun Heo ata_postreset_fn_t postreset, 37319b1e2658STejun Heo struct ata_link **r_failed_link) 3732c6fd2807SJeff Garzik { 37339b1e2658STejun Heo struct ata_link *link; 3734c6fd2807SJeff Garzik struct ata_device *dev; 37356b7ae954STejun Heo int rc, nr_fails; 373645fabbb7SElias Oltmanns unsigned long flags, deadline; 3737c6fd2807SJeff Garzik 3738c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 3739c6fd2807SJeff Garzik 3740c6fd2807SJeff Garzik /* prep for recovery */ 37411eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 37429b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 37439b1e2658STejun Heo 3744f9df58cbSTejun Heo /* re-enable link? */ 3745f9df58cbSTejun Heo if (ehc->i.action & ATA_EH_ENABLE_LINK) { 3746f9df58cbSTejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK); 3747f9df58cbSTejun Heo spin_lock_irqsave(ap->lock, flags); 3748f9df58cbSTejun Heo link->flags &= ~ATA_LFLAG_DISABLED; 3749f9df58cbSTejun Heo spin_unlock_irqrestore(ap->lock, flags); 3750f9df58cbSTejun Heo ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK); 3751f9df58cbSTejun Heo } 3752f9df58cbSTejun Heo 37531eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 3754fd995f70STejun Heo if (link->flags & ATA_LFLAG_NO_RETRY) 3755fd995f70STejun Heo ehc->tries[dev->devno] = 1; 3756fd995f70STejun Heo else 3757c6fd2807SJeff Garzik ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 3758c6fd2807SJeff Garzik 375979a55b72STejun Heo /* collect port action mask recorded in dev actions */ 37609b1e2658STejun Heo ehc->i.action |= ehc->i.dev_action[dev->devno] & 37619b1e2658STejun Heo ~ATA_EH_PERDEV_MASK; 3762f58229f8STejun Heo ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK; 376379a55b72STejun Heo 3764c6fd2807SJeff Garzik /* process hotplug request */ 3765c6fd2807SJeff Garzik if (dev->flags & ATA_DFLAG_DETACH) 3766c6fd2807SJeff Garzik ata_eh_detach_dev(dev); 3767c6fd2807SJeff Garzik 376802c05a27STejun Heo /* schedule probe if necessary */ 376902c05a27STejun Heo if (!ata_dev_enabled(dev)) 377002c05a27STejun Heo ata_eh_schedule_probe(dev); 3771c6fd2807SJeff Garzik } 37729b1e2658STejun Heo } 3773c6fd2807SJeff Garzik 3774c6fd2807SJeff Garzik retry: 3775c6fd2807SJeff Garzik rc = 0; 3776c6fd2807SJeff Garzik 3777c6fd2807SJeff Garzik /* if UNLOADING, finish immediately */ 3778c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_UNLOADING) 3779c6fd2807SJeff Garzik goto out; 3780c6fd2807SJeff Garzik 37819b1e2658STejun Heo /* prep for EH */ 37821eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 37839b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 37849b1e2658STejun Heo 3785c6fd2807SJeff Garzik /* skip EH if possible. */ 37860260731fSTejun Heo if (ata_eh_skip_recovery(link)) 3787c6fd2807SJeff Garzik ehc->i.action = 0; 3788c6fd2807SJeff Garzik 37891eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 3790f58229f8STejun Heo ehc->classes[dev->devno] = ATA_DEV_UNKNOWN; 37919b1e2658STejun Heo } 3792c6fd2807SJeff Garzik 3793c6fd2807SJeff Garzik /* reset */ 37941eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 37959b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 37969b1e2658STejun Heo 3797cf480626STejun Heo if (!(ehc->i.action & ATA_EH_RESET)) 37989b1e2658STejun Heo continue; 37999b1e2658STejun Heo 38009b1e2658STejun Heo rc = ata_eh_reset(link, ata_link_nr_vacant(link), 3801dc98c32cSTejun Heo prereset, softreset, hardreset, postreset); 3802c6fd2807SJeff Garzik if (rc) { 3803a9a79dfeSJoe Perches ata_link_err(link, "reset failed, giving up\n"); 3804c6fd2807SJeff Garzik goto out; 3805c6fd2807SJeff Garzik } 38069b1e2658STejun Heo } 3807c6fd2807SJeff Garzik 380845fabbb7SElias Oltmanns do { 380945fabbb7SElias Oltmanns unsigned long now; 381045fabbb7SElias Oltmanns 381145fabbb7SElias Oltmanns /* 381245fabbb7SElias Oltmanns * clears ATA_EH_PARK in eh_info and resets 381345fabbb7SElias Oltmanns * ap->park_req_pending 381445fabbb7SElias Oltmanns */ 381545fabbb7SElias Oltmanns ata_eh_pull_park_action(ap); 381645fabbb7SElias Oltmanns 381745fabbb7SElias Oltmanns deadline = jiffies; 38181eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 38191eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 382045fabbb7SElias Oltmanns struct ata_eh_context *ehc = &link->eh_context; 382145fabbb7SElias Oltmanns unsigned long tmp; 382245fabbb7SElias Oltmanns 38239162c657SHannes Reinecke if (dev->class != ATA_DEV_ATA && 38249162c657SHannes Reinecke dev->class != ATA_DEV_ZAC) 382545fabbb7SElias Oltmanns continue; 382645fabbb7SElias Oltmanns if (!(ehc->i.dev_action[dev->devno] & 382745fabbb7SElias Oltmanns ATA_EH_PARK)) 382845fabbb7SElias Oltmanns continue; 382945fabbb7SElias Oltmanns tmp = dev->unpark_deadline; 383045fabbb7SElias Oltmanns if (time_before(deadline, tmp)) 383145fabbb7SElias Oltmanns deadline = tmp; 383245fabbb7SElias Oltmanns else if (time_before_eq(tmp, jiffies)) 383345fabbb7SElias Oltmanns continue; 383445fabbb7SElias Oltmanns if (ehc->unloaded_mask & (1 << dev->devno)) 383545fabbb7SElias Oltmanns continue; 383645fabbb7SElias Oltmanns 383745fabbb7SElias Oltmanns ata_eh_park_issue_cmd(dev, 1); 383845fabbb7SElias Oltmanns } 383945fabbb7SElias Oltmanns } 384045fabbb7SElias Oltmanns 384145fabbb7SElias Oltmanns now = jiffies; 384245fabbb7SElias Oltmanns if (time_before_eq(deadline, now)) 384345fabbb7SElias Oltmanns break; 384445fabbb7SElias Oltmanns 3845c0c362b6STejun Heo ata_eh_release(ap); 384645fabbb7SElias Oltmanns deadline = wait_for_completion_timeout(&ap->park_req_pending, 384745fabbb7SElias Oltmanns deadline - now); 3848c0c362b6STejun Heo ata_eh_acquire(ap); 384945fabbb7SElias Oltmanns } while (deadline); 38501eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 38511eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 385245fabbb7SElias Oltmanns if (!(link->eh_context.unloaded_mask & 385345fabbb7SElias Oltmanns (1 << dev->devno))) 385445fabbb7SElias Oltmanns continue; 385545fabbb7SElias Oltmanns 385645fabbb7SElias Oltmanns ata_eh_park_issue_cmd(dev, 0); 385745fabbb7SElias Oltmanns ata_eh_done(link, dev, ATA_EH_PARK); 385845fabbb7SElias Oltmanns } 385945fabbb7SElias Oltmanns } 386045fabbb7SElias Oltmanns 38619b1e2658STejun Heo /* the rest */ 38626b7ae954STejun Heo nr_fails = 0; 38636b7ae954STejun Heo ata_for_each_link(link, ap, PMP_FIRST) { 38649b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 38659b1e2658STejun Heo 38666b7ae954STejun Heo if (sata_pmp_attached(ap) && ata_is_host_link(link)) 38676b7ae954STejun Heo goto config_lpm; 38686b7ae954STejun Heo 3869c6fd2807SJeff Garzik /* revalidate existing devices and attach new ones */ 38700260731fSTejun Heo rc = ata_eh_revalidate_and_attach(link, &dev); 3871c6fd2807SJeff Garzik if (rc) 38726b7ae954STejun Heo goto rest_fail; 3873c6fd2807SJeff Garzik 3874633273a3STejun Heo /* if PMP got attached, return, pmp EH will take care of it */ 3875633273a3STejun Heo if (link->device->class == ATA_DEV_PMP) { 3876633273a3STejun Heo ehc->i.action = 0; 3877633273a3STejun Heo return 0; 3878633273a3STejun Heo } 3879633273a3STejun Heo 3880baa1e78aSTejun Heo /* configure transfer mode if necessary */ 3881baa1e78aSTejun Heo if (ehc->i.flags & ATA_EHI_SETMODE) { 38820260731fSTejun Heo rc = ata_set_mode(link, &dev); 38834ae72a1eSTejun Heo if (rc) 38846b7ae954STejun Heo goto rest_fail; 3885baa1e78aSTejun Heo ehc->i.flags &= ~ATA_EHI_SETMODE; 3886c6fd2807SJeff Garzik } 3887c6fd2807SJeff Garzik 388811fc33daSTejun Heo /* If reset has been issued, clear UA to avoid 388911fc33daSTejun Heo * disrupting the current users of the device. 389011fc33daSTejun Heo */ 389111fc33daSTejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) { 38921eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 389311fc33daSTejun Heo if (dev->class != ATA_DEV_ATAPI) 389411fc33daSTejun Heo continue; 389511fc33daSTejun Heo rc = atapi_eh_clear_ua(dev); 389611fc33daSTejun Heo if (rc) 38976b7ae954STejun Heo goto rest_fail; 389821334205SAaron Lu if (zpodd_dev_enabled(dev)) 389921334205SAaron Lu zpodd_post_poweron(dev); 390011fc33daSTejun Heo } 390111fc33daSTejun Heo } 390211fc33daSTejun Heo 39036013efd8STejun Heo /* retry flush if necessary */ 39046013efd8STejun Heo ata_for_each_dev(dev, link, ALL) { 39059162c657SHannes Reinecke if (dev->class != ATA_DEV_ATA && 39069162c657SHannes Reinecke dev->class != ATA_DEV_ZAC) 39076013efd8STejun Heo continue; 39086013efd8STejun Heo rc = ata_eh_maybe_retry_flush(dev); 39096013efd8STejun Heo if (rc) 39106b7ae954STejun Heo goto rest_fail; 39116013efd8STejun Heo } 39126013efd8STejun Heo 39136b7ae954STejun Heo config_lpm: 391411fc33daSTejun Heo /* configure link power saving */ 39156b7ae954STejun Heo if (link->lpm_policy != ap->target_lpm_policy) { 39166b7ae954STejun Heo rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev); 39176b7ae954STejun Heo if (rc) 39186b7ae954STejun Heo goto rest_fail; 39196b7ae954STejun Heo } 3920ca77329fSKristen Carlson Accardi 39219b1e2658STejun Heo /* this link is okay now */ 39229b1e2658STejun Heo ehc->i.flags = 0; 39239b1e2658STejun Heo continue; 3924c6fd2807SJeff Garzik 39256b7ae954STejun Heo rest_fail: 39266b7ae954STejun Heo nr_fails++; 39276b7ae954STejun Heo if (dev) 39280a2c0f56STejun Heo ata_eh_handle_dev_fail(dev, rc); 3929c6fd2807SJeff Garzik 3930b06ce3e5STejun Heo if (ap->pflags & ATA_PFLAG_FROZEN) { 3931b06ce3e5STejun Heo /* PMP reset requires working host port. 3932b06ce3e5STejun Heo * Can't retry if it's frozen. 3933b06ce3e5STejun Heo */ 3934071f44b1STejun Heo if (sata_pmp_attached(ap)) 3935b06ce3e5STejun Heo goto out; 39369b1e2658STejun Heo break; 39379b1e2658STejun Heo } 3938b06ce3e5STejun Heo } 39399b1e2658STejun Heo 39406b7ae954STejun Heo if (nr_fails) 3941c6fd2807SJeff Garzik goto retry; 3942c6fd2807SJeff Garzik 3943c6fd2807SJeff Garzik out: 39449b1e2658STejun Heo if (rc && r_failed_link) 39459b1e2658STejun Heo *r_failed_link = link; 3946c6fd2807SJeff Garzik 3947c6fd2807SJeff Garzik DPRINTK("EXIT, rc=%d\n", rc); 3948c6fd2807SJeff Garzik return rc; 3949c6fd2807SJeff Garzik } 3950c6fd2807SJeff Garzik 3951c6fd2807SJeff Garzik /** 3952c6fd2807SJeff Garzik * ata_eh_finish - finish up EH 3953c6fd2807SJeff Garzik * @ap: host port to finish EH for 3954c6fd2807SJeff Garzik * 3955c6fd2807SJeff Garzik * Recovery is complete. Clean up EH states and retry or finish 3956c6fd2807SJeff Garzik * failed qcs. 3957c6fd2807SJeff Garzik * 3958c6fd2807SJeff Garzik * LOCKING: 3959c6fd2807SJeff Garzik * None. 3960c6fd2807SJeff Garzik */ 3961fb7fd614STejun Heo void ata_eh_finish(struct ata_port *ap) 3962c6fd2807SJeff Garzik { 3963c6fd2807SJeff Garzik int tag; 3964c6fd2807SJeff Garzik 3965c6fd2807SJeff Garzik /* retry or finish qcs */ 3966c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 3967c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 3968c6fd2807SJeff Garzik 3969c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) 3970c6fd2807SJeff Garzik continue; 3971c6fd2807SJeff Garzik 3972c6fd2807SJeff Garzik if (qc->err_mask) { 3973c6fd2807SJeff Garzik /* FIXME: Once EH migration is complete, 3974c6fd2807SJeff Garzik * generate sense data in this function, 3975c6fd2807SJeff Garzik * considering both err_mask and tf. 3976c6fd2807SJeff Garzik */ 397703faab78STejun Heo if (qc->flags & ATA_QCFLAG_RETRY) 3978c6fd2807SJeff Garzik ata_eh_qc_retry(qc); 397903faab78STejun Heo else 398003faab78STejun Heo ata_eh_qc_complete(qc); 3981c6fd2807SJeff Garzik } else { 3982c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_SENSE_VALID) { 3983c6fd2807SJeff Garzik ata_eh_qc_complete(qc); 3984c6fd2807SJeff Garzik } else { 3985c6fd2807SJeff Garzik /* feed zero TF to sense generation */ 3986c6fd2807SJeff Garzik memset(&qc->result_tf, 0, sizeof(qc->result_tf)); 3987c6fd2807SJeff Garzik ata_eh_qc_retry(qc); 3988c6fd2807SJeff Garzik } 3989c6fd2807SJeff Garzik } 3990c6fd2807SJeff Garzik } 3991da917d69STejun Heo 3992da917d69STejun Heo /* make sure nr_active_links is zero after EH */ 3993da917d69STejun Heo WARN_ON(ap->nr_active_links); 3994da917d69STejun Heo ap->nr_active_links = 0; 3995c6fd2807SJeff Garzik } 3996c6fd2807SJeff Garzik 3997c6fd2807SJeff Garzik /** 3998c6fd2807SJeff Garzik * ata_do_eh - do standard error handling 3999c6fd2807SJeff Garzik * @ap: host port to handle error for 4000a1efdabaSTejun Heo * 4001c6fd2807SJeff Garzik * @prereset: prereset method (can be NULL) 4002c6fd2807SJeff Garzik * @softreset: softreset method (can be NULL) 4003c6fd2807SJeff Garzik * @hardreset: hardreset method (can be NULL) 4004c6fd2807SJeff Garzik * @postreset: postreset method (can be NULL) 4005c6fd2807SJeff Garzik * 4006c6fd2807SJeff Garzik * Perform standard error handling sequence. 4007c6fd2807SJeff Garzik * 4008c6fd2807SJeff Garzik * LOCKING: 4009c6fd2807SJeff Garzik * Kernel thread context (may sleep). 4010c6fd2807SJeff Garzik */ 4011c6fd2807SJeff Garzik void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, 4012c6fd2807SJeff Garzik ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 4013c6fd2807SJeff Garzik ata_postreset_fn_t postreset) 4014c6fd2807SJeff Garzik { 40159b1e2658STejun Heo struct ata_device *dev; 40169b1e2658STejun Heo int rc; 40179b1e2658STejun Heo 40189b1e2658STejun Heo ata_eh_autopsy(ap); 40199b1e2658STejun Heo ata_eh_report(ap); 40209b1e2658STejun Heo 40219b1e2658STejun Heo rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset, 40229b1e2658STejun Heo NULL); 40239b1e2658STejun Heo if (rc) { 40241eca4365STejun Heo ata_for_each_dev(dev, &ap->link, ALL) 40259b1e2658STejun Heo ata_dev_disable(dev); 40269b1e2658STejun Heo } 40279b1e2658STejun Heo 4028c6fd2807SJeff Garzik ata_eh_finish(ap); 4029c6fd2807SJeff Garzik } 4030c6fd2807SJeff Garzik 4031a1efdabaSTejun Heo /** 4032a1efdabaSTejun Heo * ata_std_error_handler - standard error handler 4033a1efdabaSTejun Heo * @ap: host port to handle error for 4034a1efdabaSTejun Heo * 4035a1efdabaSTejun Heo * Standard error handler 4036a1efdabaSTejun Heo * 4037a1efdabaSTejun Heo * LOCKING: 4038a1efdabaSTejun Heo * Kernel thread context (may sleep). 4039a1efdabaSTejun Heo */ 4040a1efdabaSTejun Heo void ata_std_error_handler(struct ata_port *ap) 4041a1efdabaSTejun Heo { 4042a1efdabaSTejun Heo struct ata_port_operations *ops = ap->ops; 4043a1efdabaSTejun Heo ata_reset_fn_t hardreset = ops->hardreset; 4044a1efdabaSTejun Heo 404557c9efdfSTejun Heo /* ignore built-in hardreset if SCR access is not available */ 4046fe06e5f9STejun Heo if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link)) 4047a1efdabaSTejun Heo hardreset = NULL; 4048a1efdabaSTejun Heo 4049a1efdabaSTejun Heo ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset); 4050a1efdabaSTejun Heo } 4051a1efdabaSTejun Heo 40526ffa01d8STejun Heo #ifdef CONFIG_PM 4053c6fd2807SJeff Garzik /** 4054c6fd2807SJeff Garzik * ata_eh_handle_port_suspend - perform port suspend operation 4055c6fd2807SJeff Garzik * @ap: port to suspend 4056c6fd2807SJeff Garzik * 4057c6fd2807SJeff Garzik * Suspend @ap. 4058c6fd2807SJeff Garzik * 4059c6fd2807SJeff Garzik * LOCKING: 4060c6fd2807SJeff Garzik * Kernel thread context (may sleep). 4061c6fd2807SJeff Garzik */ 4062c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap) 4063c6fd2807SJeff Garzik { 4064c6fd2807SJeff Garzik unsigned long flags; 4065c6fd2807SJeff Garzik int rc = 0; 40663dc67440SAaron Lu struct ata_device *dev; 4067c6fd2807SJeff Garzik 4068c6fd2807SJeff Garzik /* are we suspending? */ 4069c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 4070c6fd2807SJeff Garzik if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || 4071a7ff60dbSAaron Lu ap->pm_mesg.event & PM_EVENT_RESUME) { 4072c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 4073c6fd2807SJeff Garzik return; 4074c6fd2807SJeff Garzik } 4075c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 4076c6fd2807SJeff Garzik 4077c6fd2807SJeff Garzik WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED); 4078c6fd2807SJeff Garzik 40793dc67440SAaron Lu /* 40803dc67440SAaron Lu * If we have a ZPODD attached, check its zero 40813dc67440SAaron Lu * power ready status before the port is frozen. 4082a7ff60dbSAaron Lu * Only needed for runtime suspend. 40833dc67440SAaron Lu */ 4084a7ff60dbSAaron Lu if (PMSG_IS_AUTO(ap->pm_mesg)) { 40853dc67440SAaron Lu ata_for_each_dev(dev, &ap->link, ENABLED) { 40863dc67440SAaron Lu if (zpodd_dev_enabled(dev)) 40873dc67440SAaron Lu zpodd_on_suspend(dev); 40883dc67440SAaron Lu } 4089a7ff60dbSAaron Lu } 40903dc67440SAaron Lu 409164578a3dSTejun Heo /* tell ACPI we're suspending */ 409264578a3dSTejun Heo rc = ata_acpi_on_suspend(ap); 409364578a3dSTejun Heo if (rc) 409464578a3dSTejun Heo goto out; 409564578a3dSTejun Heo 4096c6fd2807SJeff Garzik /* suspend */ 4097c6fd2807SJeff Garzik ata_eh_freeze_port(ap); 4098c6fd2807SJeff Garzik 4099c6fd2807SJeff Garzik if (ap->ops->port_suspend) 4100c6fd2807SJeff Garzik rc = ap->ops->port_suspend(ap, ap->pm_mesg); 4101c6fd2807SJeff Garzik 4102a7ff60dbSAaron Lu ata_acpi_set_state(ap, ap->pm_mesg); 410364578a3dSTejun Heo out: 4104bc6e7c4bSDan Williams /* update the flags */ 4105c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 4106c6fd2807SJeff Garzik 4107c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_PM_PENDING; 4108c6fd2807SJeff Garzik if (rc == 0) 4109c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SUSPENDED; 411064578a3dSTejun Heo else if (ap->pflags & ATA_PFLAG_FROZEN) 4111c6fd2807SJeff Garzik ata_port_schedule_eh(ap); 4112c6fd2807SJeff Garzik 4113c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 4114c6fd2807SJeff Garzik 4115c6fd2807SJeff Garzik return; 4116c6fd2807SJeff Garzik } 4117c6fd2807SJeff Garzik 4118c6fd2807SJeff Garzik /** 4119c6fd2807SJeff Garzik * ata_eh_handle_port_resume - perform port resume operation 4120c6fd2807SJeff Garzik * @ap: port to resume 4121c6fd2807SJeff Garzik * 4122c6fd2807SJeff Garzik * Resume @ap. 4123c6fd2807SJeff Garzik * 4124c6fd2807SJeff Garzik * LOCKING: 4125c6fd2807SJeff Garzik * Kernel thread context (may sleep). 4126c6fd2807SJeff Garzik */ 4127c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap) 4128c6fd2807SJeff Garzik { 41296f9c1ea2STejun Heo struct ata_link *link; 41306f9c1ea2STejun Heo struct ata_device *dev; 4131c6fd2807SJeff Garzik unsigned long flags; 41329666f400STejun Heo int rc = 0; 4133c6fd2807SJeff Garzik 4134c6fd2807SJeff Garzik /* are we resuming? */ 4135c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 4136c6fd2807SJeff Garzik if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || 4137a7ff60dbSAaron Lu !(ap->pm_mesg.event & PM_EVENT_RESUME)) { 4138c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 4139c6fd2807SJeff Garzik return; 4140c6fd2807SJeff Garzik } 4141c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 4142c6fd2807SJeff Garzik 41439666f400STejun Heo WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED)); 4144c6fd2807SJeff Garzik 41456f9c1ea2STejun Heo /* 41466f9c1ea2STejun Heo * Error timestamps are in jiffies which doesn't run while 41476f9c1ea2STejun Heo * suspended and PHY events during resume isn't too uncommon. 41486f9c1ea2STejun Heo * When the two are combined, it can lead to unnecessary speed 41496f9c1ea2STejun Heo * downs if the machine is suspended and resumed repeatedly. 41506f9c1ea2STejun Heo * Clear error history. 41516f9c1ea2STejun Heo */ 41526f9c1ea2STejun Heo ata_for_each_link(link, ap, HOST_FIRST) 41536f9c1ea2STejun Heo ata_for_each_dev(dev, link, ALL) 41546f9c1ea2STejun Heo ata_ering_clear(&dev->ering); 41556f9c1ea2STejun Heo 4156a7ff60dbSAaron Lu ata_acpi_set_state(ap, ap->pm_mesg); 4157bd3adca5SShaohua Li 4158c6fd2807SJeff Garzik if (ap->ops->port_resume) 4159c6fd2807SJeff Garzik rc = ap->ops->port_resume(ap); 4160c6fd2807SJeff Garzik 41616746544cSTejun Heo /* tell ACPI that we're resuming */ 41626746544cSTejun Heo ata_acpi_on_resume(ap); 41636746544cSTejun Heo 4164bc6e7c4bSDan Williams /* update the flags */ 4165c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 4166c6fd2807SJeff Garzik ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED); 4167c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 4168c6fd2807SJeff Garzik } 41696ffa01d8STejun Heo #endif /* CONFIG_PM */ 4170