1c6fd2807SJeff Garzik /* 2c6fd2807SJeff Garzik * libata-eh.c - libata error handling 3c6fd2807SJeff Garzik * 4c6fd2807SJeff Garzik * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5c6fd2807SJeff Garzik * Please ALWAYS copy linux-ide@vger.kernel.org 6c6fd2807SJeff Garzik * on emails. 7c6fd2807SJeff Garzik * 8c6fd2807SJeff Garzik * Copyright 2006 Tejun Heo <htejun@gmail.com> 9c6fd2807SJeff Garzik * 10c6fd2807SJeff Garzik * 11c6fd2807SJeff Garzik * This program is free software; you can redistribute it and/or 12c6fd2807SJeff Garzik * modify it under the terms of the GNU General Public License as 13c6fd2807SJeff Garzik * published by the Free Software Foundation; either version 2, or 14c6fd2807SJeff Garzik * (at your option) any later version. 15c6fd2807SJeff Garzik * 16c6fd2807SJeff Garzik * This program is distributed in the hope that it will be useful, 17c6fd2807SJeff Garzik * but WITHOUT ANY WARRANTY; without even the implied warranty of 18c6fd2807SJeff Garzik * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19c6fd2807SJeff Garzik * General Public License for more details. 20c6fd2807SJeff Garzik * 21c6fd2807SJeff Garzik * You should have received a copy of the GNU General Public License 22c6fd2807SJeff Garzik * along with this program; see the file COPYING. If not, write to 23c6fd2807SJeff Garzik * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, 24c6fd2807SJeff Garzik * USA. 25c6fd2807SJeff Garzik * 26c6fd2807SJeff Garzik * 27c6fd2807SJeff Garzik * libata documentation is available via 'make {ps|pdf}docs', 28c6fd2807SJeff Garzik * as Documentation/DocBook/libata.* 29c6fd2807SJeff Garzik * 30c6fd2807SJeff Garzik * Hardware documentation available from http://www.t13.org/ and 31c6fd2807SJeff Garzik * http://www.sata-io.org/ 32c6fd2807SJeff Garzik * 33c6fd2807SJeff Garzik */ 34c6fd2807SJeff Garzik 35c6fd2807SJeff Garzik #include <linux/kernel.h> 36242f9dcbSJens Axboe #include <linux/blkdev.h> 3738789fdaSPaul Gortmaker #include <linux/export.h> 382855568bSJeff Garzik #include <linux/pci.h> 39c6fd2807SJeff Garzik #include <scsi/scsi.h> 40c6fd2807SJeff Garzik #include <scsi/scsi_host.h> 41c6fd2807SJeff Garzik #include <scsi/scsi_eh.h> 42c6fd2807SJeff Garzik #include <scsi/scsi_device.h> 43c6fd2807SJeff Garzik #include <scsi/scsi_cmnd.h> 446521148cSRobert Hancock #include <scsi/scsi_dbg.h> 45c6fd2807SJeff Garzik #include "../scsi/scsi_transport_api.h" 46c6fd2807SJeff Garzik 47c6fd2807SJeff Garzik #include <linux/libata.h> 48c6fd2807SJeff Garzik 49c6fd2807SJeff Garzik #include "libata.h" 50c6fd2807SJeff Garzik 517d47e8d4STejun Heo enum { 523884f7b0STejun Heo /* speed down verdicts */ 537d47e8d4STejun Heo ATA_EH_SPDN_NCQ_OFF = (1 << 0), 547d47e8d4STejun Heo ATA_EH_SPDN_SPEED_DOWN = (1 << 1), 557d47e8d4STejun Heo ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2), 5676326ac1STejun Heo ATA_EH_SPDN_KEEP_ERRORS = (1 << 3), 573884f7b0STejun Heo 583884f7b0STejun Heo /* error flags */ 593884f7b0STejun Heo ATA_EFLAG_IS_IO = (1 << 0), 6076326ac1STejun Heo ATA_EFLAG_DUBIOUS_XFER = (1 << 1), 61d9027470SGwendal Grignou ATA_EFLAG_OLD_ER = (1 << 31), 623884f7b0STejun Heo 633884f7b0STejun Heo /* error categories */ 643884f7b0STejun Heo ATA_ECAT_NONE = 0, 653884f7b0STejun Heo ATA_ECAT_ATA_BUS = 1, 663884f7b0STejun Heo ATA_ECAT_TOUT_HSM = 2, 673884f7b0STejun Heo ATA_ECAT_UNK_DEV = 3, 6875f9cafcSTejun Heo ATA_ECAT_DUBIOUS_NONE = 4, 6975f9cafcSTejun Heo ATA_ECAT_DUBIOUS_ATA_BUS = 5, 7075f9cafcSTejun Heo ATA_ECAT_DUBIOUS_TOUT_HSM = 6, 7175f9cafcSTejun Heo ATA_ECAT_DUBIOUS_UNK_DEV = 7, 7275f9cafcSTejun Heo ATA_ECAT_NR = 8, 737d47e8d4STejun Heo 7487fbc5a0STejun Heo ATA_EH_CMD_DFL_TIMEOUT = 5000, 7587fbc5a0STejun Heo 760a2c0f56STejun Heo /* always put at least this amount of time between resets */ 770a2c0f56STejun Heo ATA_EH_RESET_COOL_DOWN = 5000, 780a2c0f56STejun Heo 79341c2c95STejun Heo /* Waiting in ->prereset can never be reliable. It's 80341c2c95STejun Heo * sometimes nice to wait there but it can't be depended upon; 81341c2c95STejun Heo * otherwise, we wouldn't be resetting. Just give it enough 82341c2c95STejun Heo * time for most drives to spin up. 8331daabdaSTejun Heo */ 84341c2c95STejun Heo ATA_EH_PRERESET_TIMEOUT = 10000, 85341c2c95STejun Heo ATA_EH_FASTDRAIN_INTERVAL = 3000, 8611fc33daSTejun Heo 8711fc33daSTejun Heo ATA_EH_UA_TRIES = 5, 88c2c7a89cSTejun Heo 89c2c7a89cSTejun Heo /* probe speed down parameters, see ata_eh_schedule_probe() */ 90c2c7a89cSTejun Heo ATA_EH_PROBE_TRIAL_INTERVAL = 60000, /* 1 min */ 91c2c7a89cSTejun Heo ATA_EH_PROBE_TRIALS = 2, 9231daabdaSTejun Heo }; 9331daabdaSTejun Heo 9431daabdaSTejun Heo /* The following table determines how we sequence resets. Each entry 9531daabdaSTejun Heo * represents timeout for that try. The first try can be soft or 9631daabdaSTejun Heo * hardreset. All others are hardreset if available. In most cases 9731daabdaSTejun Heo * the first reset w/ 10sec timeout should succeed. Following entries 9831daabdaSTejun Heo * are mostly for error handling, hotplug and retarded devices. 9931daabdaSTejun Heo */ 10031daabdaSTejun Heo static const unsigned long ata_eh_reset_timeouts[] = { 101341c2c95STejun Heo 10000, /* most drives spin up by 10sec */ 102341c2c95STejun Heo 10000, /* > 99% working drives spin up before 20sec */ 103341c2c95STejun Heo 35000, /* give > 30 secs of idleness for retarded devices */ 104341c2c95STejun Heo 5000, /* and sweet one last chance */ 105d8af0eb6STejun Heo ULONG_MAX, /* > 1 min has elapsed, give up */ 10631daabdaSTejun Heo }; 10731daabdaSTejun Heo 10887fbc5a0STejun Heo static const unsigned long ata_eh_identify_timeouts[] = { 10987fbc5a0STejun Heo 5000, /* covers > 99% of successes and not too boring on failures */ 11087fbc5a0STejun Heo 10000, /* combined time till here is enough even for media access */ 11187fbc5a0STejun Heo 30000, /* for true idiots */ 11287fbc5a0STejun Heo ULONG_MAX, 11387fbc5a0STejun Heo }; 11487fbc5a0STejun Heo 1156013efd8STejun Heo static const unsigned long ata_eh_flush_timeouts[] = { 1166013efd8STejun Heo 15000, /* be generous with flush */ 1176013efd8STejun Heo 15000, /* ditto */ 1186013efd8STejun Heo 30000, /* and even more generous */ 1196013efd8STejun Heo ULONG_MAX, 1206013efd8STejun Heo }; 1216013efd8STejun Heo 12287fbc5a0STejun Heo static const unsigned long ata_eh_other_timeouts[] = { 12387fbc5a0STejun Heo 5000, /* same rationale as identify timeout */ 12487fbc5a0STejun Heo 10000, /* ditto */ 12587fbc5a0STejun Heo /* but no merciful 30sec for other commands, it just isn't worth it */ 12687fbc5a0STejun Heo ULONG_MAX, 12787fbc5a0STejun Heo }; 12887fbc5a0STejun Heo 12987fbc5a0STejun Heo struct ata_eh_cmd_timeout_ent { 13087fbc5a0STejun Heo const u8 *commands; 13187fbc5a0STejun Heo const unsigned long *timeouts; 13287fbc5a0STejun Heo }; 13387fbc5a0STejun Heo 13487fbc5a0STejun Heo /* The following table determines timeouts to use for EH internal 13587fbc5a0STejun Heo * commands. Each table entry is a command class and matches the 13687fbc5a0STejun Heo * commands the entry applies to and the timeout table to use. 13787fbc5a0STejun Heo * 13887fbc5a0STejun Heo * On the retry after a command timed out, the next timeout value from 13987fbc5a0STejun Heo * the table is used. If the table doesn't contain further entries, 14087fbc5a0STejun Heo * the last value is used. 14187fbc5a0STejun Heo * 14287fbc5a0STejun Heo * ehc->cmd_timeout_idx keeps track of which timeout to use per 14387fbc5a0STejun Heo * command class, so if SET_FEATURES times out on the first try, the 14487fbc5a0STejun Heo * next try will use the second timeout value only for that class. 14587fbc5a0STejun Heo */ 14687fbc5a0STejun Heo #define CMDS(cmds...) (const u8 []){ cmds, 0 } 14787fbc5a0STejun Heo static const struct ata_eh_cmd_timeout_ent 14887fbc5a0STejun Heo ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = { 14987fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI), 15087fbc5a0STejun Heo .timeouts = ata_eh_identify_timeouts, }, 15187fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT), 15287fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 15387fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT), 15487fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 15587fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_SET_FEATURES), 15687fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 15787fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS), 15887fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 1596013efd8STejun Heo { .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT), 1606013efd8STejun Heo .timeouts = ata_eh_flush_timeouts }, 16187fbc5a0STejun Heo }; 16287fbc5a0STejun Heo #undef CMDS 16387fbc5a0STejun Heo 164c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap); 1656ffa01d8STejun Heo #ifdef CONFIG_PM 166c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap); 167c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap); 1686ffa01d8STejun Heo #else /* CONFIG_PM */ 1696ffa01d8STejun Heo static void ata_eh_handle_port_suspend(struct ata_port *ap) 1706ffa01d8STejun Heo { } 1716ffa01d8STejun Heo 1726ffa01d8STejun Heo static void ata_eh_handle_port_resume(struct ata_port *ap) 1736ffa01d8STejun Heo { } 1746ffa01d8STejun Heo #endif /* CONFIG_PM */ 175c6fd2807SJeff Garzik 176b64bbc39STejun Heo static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt, 177b64bbc39STejun Heo va_list args) 178b64bbc39STejun Heo { 179b64bbc39STejun Heo ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len, 180b64bbc39STejun Heo ATA_EH_DESC_LEN - ehi->desc_len, 181b64bbc39STejun Heo fmt, args); 182b64bbc39STejun Heo } 183b64bbc39STejun Heo 184b64bbc39STejun Heo /** 185b64bbc39STejun Heo * __ata_ehi_push_desc - push error description without adding separator 186b64bbc39STejun Heo * @ehi: target EHI 187b64bbc39STejun Heo * @fmt: printf format string 188b64bbc39STejun Heo * 189b64bbc39STejun Heo * Format string according to @fmt and append it to @ehi->desc. 190b64bbc39STejun Heo * 191b64bbc39STejun Heo * LOCKING: 192b64bbc39STejun Heo * spin_lock_irqsave(host lock) 193b64bbc39STejun Heo */ 194b64bbc39STejun Heo void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) 195b64bbc39STejun Heo { 196b64bbc39STejun Heo va_list args; 197b64bbc39STejun Heo 198b64bbc39STejun Heo va_start(args, fmt); 199b64bbc39STejun Heo __ata_ehi_pushv_desc(ehi, fmt, args); 200b64bbc39STejun Heo va_end(args); 201b64bbc39STejun Heo } 202b64bbc39STejun Heo 203b64bbc39STejun Heo /** 204b64bbc39STejun Heo * ata_ehi_push_desc - push error description with separator 205b64bbc39STejun Heo * @ehi: target EHI 206b64bbc39STejun Heo * @fmt: printf format string 207b64bbc39STejun Heo * 208b64bbc39STejun Heo * Format string according to @fmt and append it to @ehi->desc. 209b64bbc39STejun Heo * If @ehi->desc is not empty, ", " is added in-between. 210b64bbc39STejun Heo * 211b64bbc39STejun Heo * LOCKING: 212b64bbc39STejun Heo * spin_lock_irqsave(host lock) 213b64bbc39STejun Heo */ 214b64bbc39STejun Heo void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) 215b64bbc39STejun Heo { 216b64bbc39STejun Heo va_list args; 217b64bbc39STejun Heo 218b64bbc39STejun Heo if (ehi->desc_len) 219b64bbc39STejun Heo __ata_ehi_push_desc(ehi, ", "); 220b64bbc39STejun Heo 221b64bbc39STejun Heo va_start(args, fmt); 222b64bbc39STejun Heo __ata_ehi_pushv_desc(ehi, fmt, args); 223b64bbc39STejun Heo va_end(args); 224b64bbc39STejun Heo } 225b64bbc39STejun Heo 226b64bbc39STejun Heo /** 227b64bbc39STejun Heo * ata_ehi_clear_desc - clean error description 228b64bbc39STejun Heo * @ehi: target EHI 229b64bbc39STejun Heo * 230b64bbc39STejun Heo * Clear @ehi->desc. 231b64bbc39STejun Heo * 232b64bbc39STejun Heo * LOCKING: 233b64bbc39STejun Heo * spin_lock_irqsave(host lock) 234b64bbc39STejun Heo */ 235b64bbc39STejun Heo void ata_ehi_clear_desc(struct ata_eh_info *ehi) 236b64bbc39STejun Heo { 237b64bbc39STejun Heo ehi->desc[0] = '\0'; 238b64bbc39STejun Heo ehi->desc_len = 0; 239b64bbc39STejun Heo } 240b64bbc39STejun Heo 241cbcdd875STejun Heo /** 242cbcdd875STejun Heo * ata_port_desc - append port description 243cbcdd875STejun Heo * @ap: target ATA port 244cbcdd875STejun Heo * @fmt: printf format string 245cbcdd875STejun Heo * 246cbcdd875STejun Heo * Format string according to @fmt and append it to port 247cbcdd875STejun Heo * description. If port description is not empty, " " is added 248cbcdd875STejun Heo * in-between. This function is to be used while initializing 249cbcdd875STejun Heo * ata_host. The description is printed on host registration. 250cbcdd875STejun Heo * 251cbcdd875STejun Heo * LOCKING: 252cbcdd875STejun Heo * None. 253cbcdd875STejun Heo */ 254cbcdd875STejun Heo void ata_port_desc(struct ata_port *ap, const char *fmt, ...) 255cbcdd875STejun Heo { 256cbcdd875STejun Heo va_list args; 257cbcdd875STejun Heo 258cbcdd875STejun Heo WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING)); 259cbcdd875STejun Heo 260cbcdd875STejun Heo if (ap->link.eh_info.desc_len) 261cbcdd875STejun Heo __ata_ehi_push_desc(&ap->link.eh_info, " "); 262cbcdd875STejun Heo 263cbcdd875STejun Heo va_start(args, fmt); 264cbcdd875STejun Heo __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args); 265cbcdd875STejun Heo va_end(args); 266cbcdd875STejun Heo } 267cbcdd875STejun Heo 268cbcdd875STejun Heo #ifdef CONFIG_PCI 269cbcdd875STejun Heo 270cbcdd875STejun Heo /** 271cbcdd875STejun Heo * ata_port_pbar_desc - append PCI BAR description 272cbcdd875STejun Heo * @ap: target ATA port 273cbcdd875STejun Heo * @bar: target PCI BAR 274cbcdd875STejun Heo * @offset: offset into PCI BAR 275cbcdd875STejun Heo * @name: name of the area 276cbcdd875STejun Heo * 277cbcdd875STejun Heo * If @offset is negative, this function formats a string which 278cbcdd875STejun Heo * contains the name, address, size and type of the BAR and 279cbcdd875STejun Heo * appends it to the port description. If @offset is zero or 280cbcdd875STejun Heo * positive, only name and offsetted address is appended. 281cbcdd875STejun Heo * 282cbcdd875STejun Heo * LOCKING: 283cbcdd875STejun Heo * None. 284cbcdd875STejun Heo */ 285cbcdd875STejun Heo void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset, 286cbcdd875STejun Heo const char *name) 287cbcdd875STejun Heo { 288cbcdd875STejun Heo struct pci_dev *pdev = to_pci_dev(ap->host->dev); 289cbcdd875STejun Heo char *type = ""; 290cbcdd875STejun Heo unsigned long long start, len; 291cbcdd875STejun Heo 292cbcdd875STejun Heo if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) 293cbcdd875STejun Heo type = "m"; 294cbcdd875STejun Heo else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) 295cbcdd875STejun Heo type = "i"; 296cbcdd875STejun Heo 297cbcdd875STejun Heo start = (unsigned long long)pci_resource_start(pdev, bar); 298cbcdd875STejun Heo len = (unsigned long long)pci_resource_len(pdev, bar); 299cbcdd875STejun Heo 300cbcdd875STejun Heo if (offset < 0) 301cbcdd875STejun Heo ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start); 302cbcdd875STejun Heo else 303e6a73ab1SAndrew Morton ata_port_desc(ap, "%s 0x%llx", name, 304e6a73ab1SAndrew Morton start + (unsigned long long)offset); 305cbcdd875STejun Heo } 306cbcdd875STejun Heo 307cbcdd875STejun Heo #endif /* CONFIG_PCI */ 308cbcdd875STejun Heo 30987fbc5a0STejun Heo static int ata_lookup_timeout_table(u8 cmd) 31087fbc5a0STejun Heo { 31187fbc5a0STejun Heo int i; 31287fbc5a0STejun Heo 31387fbc5a0STejun Heo for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) { 31487fbc5a0STejun Heo const u8 *cur; 31587fbc5a0STejun Heo 31687fbc5a0STejun Heo for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++) 31787fbc5a0STejun Heo if (*cur == cmd) 31887fbc5a0STejun Heo return i; 31987fbc5a0STejun Heo } 32087fbc5a0STejun Heo 32187fbc5a0STejun Heo return -1; 32287fbc5a0STejun Heo } 32387fbc5a0STejun Heo 32487fbc5a0STejun Heo /** 32587fbc5a0STejun Heo * ata_internal_cmd_timeout - determine timeout for an internal command 32687fbc5a0STejun Heo * @dev: target device 32787fbc5a0STejun Heo * @cmd: internal command to be issued 32887fbc5a0STejun Heo * 32987fbc5a0STejun Heo * Determine timeout for internal command @cmd for @dev. 33087fbc5a0STejun Heo * 33187fbc5a0STejun Heo * LOCKING: 33287fbc5a0STejun Heo * EH context. 33387fbc5a0STejun Heo * 33487fbc5a0STejun Heo * RETURNS: 33587fbc5a0STejun Heo * Determined timeout. 33687fbc5a0STejun Heo */ 33787fbc5a0STejun Heo unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd) 33887fbc5a0STejun Heo { 33987fbc5a0STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 34087fbc5a0STejun Heo int ent = ata_lookup_timeout_table(cmd); 34187fbc5a0STejun Heo int idx; 34287fbc5a0STejun Heo 34387fbc5a0STejun Heo if (ent < 0) 34487fbc5a0STejun Heo return ATA_EH_CMD_DFL_TIMEOUT; 34587fbc5a0STejun Heo 34687fbc5a0STejun Heo idx = ehc->cmd_timeout_idx[dev->devno][ent]; 34787fbc5a0STejun Heo return ata_eh_cmd_timeout_table[ent].timeouts[idx]; 34887fbc5a0STejun Heo } 34987fbc5a0STejun Heo 35087fbc5a0STejun Heo /** 35187fbc5a0STejun Heo * ata_internal_cmd_timed_out - notification for internal command timeout 35287fbc5a0STejun Heo * @dev: target device 35387fbc5a0STejun Heo * @cmd: internal command which timed out 35487fbc5a0STejun Heo * 35587fbc5a0STejun Heo * Notify EH that internal command @cmd for @dev timed out. This 35687fbc5a0STejun Heo * function should be called only for commands whose timeouts are 35787fbc5a0STejun Heo * determined using ata_internal_cmd_timeout(). 35887fbc5a0STejun Heo * 35987fbc5a0STejun Heo * LOCKING: 36087fbc5a0STejun Heo * EH context. 36187fbc5a0STejun Heo */ 36287fbc5a0STejun Heo void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd) 36387fbc5a0STejun Heo { 36487fbc5a0STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 36587fbc5a0STejun Heo int ent = ata_lookup_timeout_table(cmd); 36687fbc5a0STejun Heo int idx; 36787fbc5a0STejun Heo 36887fbc5a0STejun Heo if (ent < 0) 36987fbc5a0STejun Heo return; 37087fbc5a0STejun Heo 37187fbc5a0STejun Heo idx = ehc->cmd_timeout_idx[dev->devno][ent]; 37287fbc5a0STejun Heo if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX) 37387fbc5a0STejun Heo ehc->cmd_timeout_idx[dev->devno][ent]++; 37487fbc5a0STejun Heo } 37587fbc5a0STejun Heo 3763884f7b0STejun Heo static void ata_ering_record(struct ata_ering *ering, unsigned int eflags, 377c6fd2807SJeff Garzik unsigned int err_mask) 378c6fd2807SJeff Garzik { 379c6fd2807SJeff Garzik struct ata_ering_entry *ent; 380c6fd2807SJeff Garzik 381c6fd2807SJeff Garzik WARN_ON(!err_mask); 382c6fd2807SJeff Garzik 383c6fd2807SJeff Garzik ering->cursor++; 384c6fd2807SJeff Garzik ering->cursor %= ATA_ERING_SIZE; 385c6fd2807SJeff Garzik 386c6fd2807SJeff Garzik ent = &ering->ring[ering->cursor]; 3873884f7b0STejun Heo ent->eflags = eflags; 388c6fd2807SJeff Garzik ent->err_mask = err_mask; 389c6fd2807SJeff Garzik ent->timestamp = get_jiffies_64(); 390c6fd2807SJeff Garzik } 391c6fd2807SJeff Garzik 39276326ac1STejun Heo static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering) 39376326ac1STejun Heo { 39476326ac1STejun Heo struct ata_ering_entry *ent = &ering->ring[ering->cursor]; 39576326ac1STejun Heo 39676326ac1STejun Heo if (ent->err_mask) 39776326ac1STejun Heo return ent; 39876326ac1STejun Heo return NULL; 39976326ac1STejun Heo } 40076326ac1STejun Heo 401d9027470SGwendal Grignou int ata_ering_map(struct ata_ering *ering, 402c6fd2807SJeff Garzik int (*map_fn)(struct ata_ering_entry *, void *), 403c6fd2807SJeff Garzik void *arg) 404c6fd2807SJeff Garzik { 405c6fd2807SJeff Garzik int idx, rc = 0; 406c6fd2807SJeff Garzik struct ata_ering_entry *ent; 407c6fd2807SJeff Garzik 408c6fd2807SJeff Garzik idx = ering->cursor; 409c6fd2807SJeff Garzik do { 410c6fd2807SJeff Garzik ent = &ering->ring[idx]; 411c6fd2807SJeff Garzik if (!ent->err_mask) 412c6fd2807SJeff Garzik break; 413c6fd2807SJeff Garzik rc = map_fn(ent, arg); 414c6fd2807SJeff Garzik if (rc) 415c6fd2807SJeff Garzik break; 416c6fd2807SJeff Garzik idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE; 417c6fd2807SJeff Garzik } while (idx != ering->cursor); 418c6fd2807SJeff Garzik 419c6fd2807SJeff Garzik return rc; 420c6fd2807SJeff Garzik } 421c6fd2807SJeff Garzik 422d9027470SGwendal Grignou int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg) 423d9027470SGwendal Grignou { 424d9027470SGwendal Grignou ent->eflags |= ATA_EFLAG_OLD_ER; 425d9027470SGwendal Grignou return 0; 426d9027470SGwendal Grignou } 427d9027470SGwendal Grignou 428d9027470SGwendal Grignou static void ata_ering_clear(struct ata_ering *ering) 429d9027470SGwendal Grignou { 430d9027470SGwendal Grignou ata_ering_map(ering, ata_ering_clear_cb, NULL); 431d9027470SGwendal Grignou } 432d9027470SGwendal Grignou 433c6fd2807SJeff Garzik static unsigned int ata_eh_dev_action(struct ata_device *dev) 434c6fd2807SJeff Garzik { 4359af5c9c9STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 436c6fd2807SJeff Garzik 437c6fd2807SJeff Garzik return ehc->i.action | ehc->i.dev_action[dev->devno]; 438c6fd2807SJeff Garzik } 439c6fd2807SJeff Garzik 440f58229f8STejun Heo static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev, 441c6fd2807SJeff Garzik struct ata_eh_info *ehi, unsigned int action) 442c6fd2807SJeff Garzik { 443f58229f8STejun Heo struct ata_device *tdev; 444c6fd2807SJeff Garzik 445c6fd2807SJeff Garzik if (!dev) { 446c6fd2807SJeff Garzik ehi->action &= ~action; 4471eca4365STejun Heo ata_for_each_dev(tdev, link, ALL) 448f58229f8STejun Heo ehi->dev_action[tdev->devno] &= ~action; 449c6fd2807SJeff Garzik } else { 450c6fd2807SJeff Garzik /* doesn't make sense for port-wide EH actions */ 451c6fd2807SJeff Garzik WARN_ON(!(action & ATA_EH_PERDEV_MASK)); 452c6fd2807SJeff Garzik 453c6fd2807SJeff Garzik /* break ehi->action into ehi->dev_action */ 454c6fd2807SJeff Garzik if (ehi->action & action) { 4551eca4365STejun Heo ata_for_each_dev(tdev, link, ALL) 456f58229f8STejun Heo ehi->dev_action[tdev->devno] |= 457f58229f8STejun Heo ehi->action & action; 458c6fd2807SJeff Garzik ehi->action &= ~action; 459c6fd2807SJeff Garzik } 460c6fd2807SJeff Garzik 461c6fd2807SJeff Garzik /* turn off the specified per-dev action */ 462c6fd2807SJeff Garzik ehi->dev_action[dev->devno] &= ~action; 463c6fd2807SJeff Garzik } 464c6fd2807SJeff Garzik } 465c6fd2807SJeff Garzik 466c6fd2807SJeff Garzik /** 467c0c362b6STejun Heo * ata_eh_acquire - acquire EH ownership 468c0c362b6STejun Heo * @ap: ATA port to acquire EH ownership for 469c0c362b6STejun Heo * 470c0c362b6STejun Heo * Acquire EH ownership for @ap. This is the basic exclusion 471c0c362b6STejun Heo * mechanism for ports sharing a host. Only one port hanging off 472c0c362b6STejun Heo * the same host can claim the ownership of EH. 473c0c362b6STejun Heo * 474c0c362b6STejun Heo * LOCKING: 475c0c362b6STejun Heo * EH context. 476c0c362b6STejun Heo */ 477c0c362b6STejun Heo void ata_eh_acquire(struct ata_port *ap) 478c0c362b6STejun Heo { 479c0c362b6STejun Heo mutex_lock(&ap->host->eh_mutex); 480c0c362b6STejun Heo WARN_ON_ONCE(ap->host->eh_owner); 481c0c362b6STejun Heo ap->host->eh_owner = current; 482c0c362b6STejun Heo } 483c0c362b6STejun Heo 484c0c362b6STejun Heo /** 485c0c362b6STejun Heo * ata_eh_release - release EH ownership 486c0c362b6STejun Heo * @ap: ATA port to release EH ownership for 487c0c362b6STejun Heo * 488c0c362b6STejun Heo * Release EH ownership for @ap if the caller. The caller must 489c0c362b6STejun Heo * have acquired EH ownership using ata_eh_acquire() previously. 490c0c362b6STejun Heo * 491c0c362b6STejun Heo * LOCKING: 492c0c362b6STejun Heo * EH context. 493c0c362b6STejun Heo */ 494c0c362b6STejun Heo void ata_eh_release(struct ata_port *ap) 495c0c362b6STejun Heo { 496c0c362b6STejun Heo WARN_ON_ONCE(ap->host->eh_owner != current); 497c0c362b6STejun Heo ap->host->eh_owner = NULL; 498c0c362b6STejun Heo mutex_unlock(&ap->host->eh_mutex); 499c0c362b6STejun Heo } 500c0c362b6STejun Heo 501c0c362b6STejun Heo /** 502c6fd2807SJeff Garzik * ata_scsi_timed_out - SCSI layer time out callback 503c6fd2807SJeff Garzik * @cmd: timed out SCSI command 504c6fd2807SJeff Garzik * 505c6fd2807SJeff Garzik * Handles SCSI layer timeout. We race with normal completion of 506c6fd2807SJeff Garzik * the qc for @cmd. If the qc is already gone, we lose and let 507c6fd2807SJeff Garzik * the scsi command finish (EH_HANDLED). Otherwise, the qc has 508c6fd2807SJeff Garzik * timed out and EH should be invoked. Prevent ata_qc_complete() 509c6fd2807SJeff Garzik * from finishing it by setting EH_SCHEDULED and return 510c6fd2807SJeff Garzik * EH_NOT_HANDLED. 511c6fd2807SJeff Garzik * 512c6fd2807SJeff Garzik * TODO: kill this function once old EH is gone. 513c6fd2807SJeff Garzik * 514c6fd2807SJeff Garzik * LOCKING: 515c6fd2807SJeff Garzik * Called from timer context 516c6fd2807SJeff Garzik * 517c6fd2807SJeff Garzik * RETURNS: 518c6fd2807SJeff Garzik * EH_HANDLED or EH_NOT_HANDLED 519c6fd2807SJeff Garzik */ 520242f9dcbSJens Axboe enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) 521c6fd2807SJeff Garzik { 522c6fd2807SJeff Garzik struct Scsi_Host *host = cmd->device->host; 523c6fd2807SJeff Garzik struct ata_port *ap = ata_shost_to_port(host); 524c6fd2807SJeff Garzik unsigned long flags; 525c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 526242f9dcbSJens Axboe enum blk_eh_timer_return ret; 527c6fd2807SJeff Garzik 528c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 529c6fd2807SJeff Garzik 530c6fd2807SJeff Garzik if (ap->ops->error_handler) { 531242f9dcbSJens Axboe ret = BLK_EH_NOT_HANDLED; 532c6fd2807SJeff Garzik goto out; 533c6fd2807SJeff Garzik } 534c6fd2807SJeff Garzik 535242f9dcbSJens Axboe ret = BLK_EH_HANDLED; 536c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 5379af5c9c9STejun Heo qc = ata_qc_from_tag(ap, ap->link.active_tag); 538c6fd2807SJeff Garzik if (qc) { 539c6fd2807SJeff Garzik WARN_ON(qc->scsicmd != cmd); 540c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_EH_SCHEDULED; 541c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_TIMEOUT; 542242f9dcbSJens Axboe ret = BLK_EH_NOT_HANDLED; 543c6fd2807SJeff Garzik } 544c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 545c6fd2807SJeff Garzik 546c6fd2807SJeff Garzik out: 547c6fd2807SJeff Garzik DPRINTK("EXIT, ret=%d\n", ret); 548c6fd2807SJeff Garzik return ret; 549c6fd2807SJeff Garzik } 550c6fd2807SJeff Garzik 551ece180d1STejun Heo static void ata_eh_unload(struct ata_port *ap) 552ece180d1STejun Heo { 553ece180d1STejun Heo struct ata_link *link; 554ece180d1STejun Heo struct ata_device *dev; 555ece180d1STejun Heo unsigned long flags; 556ece180d1STejun Heo 557ece180d1STejun Heo /* Restore SControl IPM and SPD for the next driver and 558ece180d1STejun Heo * disable attached devices. 559ece180d1STejun Heo */ 560ece180d1STejun Heo ata_for_each_link(link, ap, PMP_FIRST) { 561ece180d1STejun Heo sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0); 562ece180d1STejun Heo ata_for_each_dev(dev, link, ALL) 563ece180d1STejun Heo ata_dev_disable(dev); 564ece180d1STejun Heo } 565ece180d1STejun Heo 566ece180d1STejun Heo /* freeze and set UNLOADED */ 567ece180d1STejun Heo spin_lock_irqsave(ap->lock, flags); 568ece180d1STejun Heo 569ece180d1STejun Heo ata_port_freeze(ap); /* won't be thawed */ 570ece180d1STejun Heo ap->pflags &= ~ATA_PFLAG_EH_PENDING; /* clear pending from freeze */ 571ece180d1STejun Heo ap->pflags |= ATA_PFLAG_UNLOADED; 572ece180d1STejun Heo 573ece180d1STejun Heo spin_unlock_irqrestore(ap->lock, flags); 574ece180d1STejun Heo } 575ece180d1STejun Heo 576c6fd2807SJeff Garzik /** 577c6fd2807SJeff Garzik * ata_scsi_error - SCSI layer error handler callback 578c6fd2807SJeff Garzik * @host: SCSI host on which error occurred 579c6fd2807SJeff Garzik * 580c6fd2807SJeff Garzik * Handles SCSI-layer-thrown error events. 581c6fd2807SJeff Garzik * 582c6fd2807SJeff Garzik * LOCKING: 583c6fd2807SJeff Garzik * Inherited from SCSI layer (none, can sleep) 584c6fd2807SJeff Garzik * 585c6fd2807SJeff Garzik * RETURNS: 586c6fd2807SJeff Garzik * Zero. 587c6fd2807SJeff Garzik */ 588c6fd2807SJeff Garzik void ata_scsi_error(struct Scsi_Host *host) 589c6fd2807SJeff Garzik { 590c6fd2807SJeff Garzik struct ata_port *ap = ata_shost_to_port(host); 591c6fd2807SJeff Garzik unsigned long flags; 592c34aeebcSJames Bottomley LIST_HEAD(eh_work_q); 593c6fd2807SJeff Garzik 594c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 595c6fd2807SJeff Garzik 596c34aeebcSJames Bottomley spin_lock_irqsave(host->host_lock, flags); 597c34aeebcSJames Bottomley list_splice_init(&host->eh_cmd_q, &eh_work_q); 598c34aeebcSJames Bottomley spin_unlock_irqrestore(host->host_lock, flags); 599c34aeebcSJames Bottomley 6000e0b494cSJames Bottomley ata_scsi_cmd_error_handler(host, ap, &eh_work_q); 6010e0b494cSJames Bottomley 6020e0b494cSJames Bottomley /* If we timed raced normal completion and there is nothing to 6030e0b494cSJames Bottomley recover nr_timedout == 0 why exactly are we doing error recovery ? */ 6040e0b494cSJames Bottomley ata_scsi_port_error_handler(host, ap); 6050e0b494cSJames Bottomley 6060e0b494cSJames Bottomley /* finish or retry handled scmd's and clean up */ 6070e0b494cSJames Bottomley WARN_ON(host->host_failed || !list_empty(&eh_work_q)); 6080e0b494cSJames Bottomley 6090e0b494cSJames Bottomley DPRINTK("EXIT\n"); 6100e0b494cSJames Bottomley } 6110e0b494cSJames Bottomley 6120e0b494cSJames Bottomley /** 6130e0b494cSJames Bottomley * ata_scsi_cmd_error_handler - error callback for a list of commands 6140e0b494cSJames Bottomley * @host: scsi host containing the port 6150e0b494cSJames Bottomley * @ap: ATA port within the host 6160e0b494cSJames Bottomley * @eh_work_q: list of commands to process 6170e0b494cSJames Bottomley * 6180e0b494cSJames Bottomley * process the given list of commands and return those finished to the 6190e0b494cSJames Bottomley * ap->eh_done_q. This function is the first part of the libata error 6200e0b494cSJames Bottomley * handler which processes a given list of failed commands. 6210e0b494cSJames Bottomley */ 6220e0b494cSJames Bottomley void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, 6230e0b494cSJames Bottomley struct list_head *eh_work_q) 6240e0b494cSJames Bottomley { 6250e0b494cSJames Bottomley int i; 6260e0b494cSJames Bottomley unsigned long flags; 6270e0b494cSJames Bottomley 628c429137aSTejun Heo /* make sure sff pio task is not running */ 629c429137aSTejun Heo ata_sff_flush_pio_task(ap); 630c6fd2807SJeff Garzik 631cca3974eSJeff Garzik /* synchronize with host lock and sort out timeouts */ 632c6fd2807SJeff Garzik 633c6fd2807SJeff Garzik /* For new EH, all qcs are finished in one of three ways - 634c6fd2807SJeff Garzik * normal completion, error completion, and SCSI timeout. 635c96f1732SAlan Cox * Both completions can race against SCSI timeout. When normal 636c6fd2807SJeff Garzik * completion wins, the qc never reaches EH. When error 637c6fd2807SJeff Garzik * completion wins, the qc has ATA_QCFLAG_FAILED set. 638c6fd2807SJeff Garzik * 639c6fd2807SJeff Garzik * When SCSI timeout wins, things are a bit more complex. 640c6fd2807SJeff Garzik * Normal or error completion can occur after the timeout but 641c6fd2807SJeff Garzik * before this point. In such cases, both types of 642c6fd2807SJeff Garzik * completions are honored. A scmd is determined to have 643c6fd2807SJeff Garzik * timed out iff its associated qc is active and not failed. 644c6fd2807SJeff Garzik */ 645c6fd2807SJeff Garzik if (ap->ops->error_handler) { 646c6fd2807SJeff Garzik struct scsi_cmnd *scmd, *tmp; 647c6fd2807SJeff Garzik int nr_timedout = 0; 648c6fd2807SJeff Garzik 649c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 650c6fd2807SJeff Garzik 651c96f1732SAlan Cox /* This must occur under the ap->lock as we don't want 652c96f1732SAlan Cox a polled recovery to race the real interrupt handler 653c96f1732SAlan Cox 654c96f1732SAlan Cox The lost_interrupt handler checks for any completed but 655c96f1732SAlan Cox non-notified command and completes much like an IRQ handler. 656c96f1732SAlan Cox 657c96f1732SAlan Cox We then fall into the error recovery code which will treat 658c96f1732SAlan Cox this as if normal completion won the race */ 659c96f1732SAlan Cox 660c96f1732SAlan Cox if (ap->ops->lost_interrupt) 661c96f1732SAlan Cox ap->ops->lost_interrupt(ap); 662c96f1732SAlan Cox 6630e0b494cSJames Bottomley list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) { 664c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 665c6fd2807SJeff Garzik 666c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_QUEUE; i++) { 667c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, i); 668c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_ACTIVE && 669c6fd2807SJeff Garzik qc->scsicmd == scmd) 670c6fd2807SJeff Garzik break; 671c6fd2807SJeff Garzik } 672c6fd2807SJeff Garzik 673c6fd2807SJeff Garzik if (i < ATA_MAX_QUEUE) { 674c6fd2807SJeff Garzik /* the scmd has an associated qc */ 675c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) { 676c6fd2807SJeff Garzik /* which hasn't failed yet, timeout */ 677c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_TIMEOUT; 678c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 679c6fd2807SJeff Garzik nr_timedout++; 680c6fd2807SJeff Garzik } 681c6fd2807SJeff Garzik } else { 682c6fd2807SJeff Garzik /* Normal completion occurred after 683c6fd2807SJeff Garzik * SCSI timeout but before this point. 684c6fd2807SJeff Garzik * Successfully complete it. 685c6fd2807SJeff Garzik */ 686c6fd2807SJeff Garzik scmd->retries = scmd->allowed; 687c6fd2807SJeff Garzik scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 688c6fd2807SJeff Garzik } 689c6fd2807SJeff Garzik } 690c6fd2807SJeff Garzik 691c6fd2807SJeff Garzik /* If we have timed out qcs. They belong to EH from 692c6fd2807SJeff Garzik * this point but the state of the controller is 693c6fd2807SJeff Garzik * unknown. Freeze the port to make sure the IRQ 694c6fd2807SJeff Garzik * handler doesn't diddle with those qcs. This must 695c6fd2807SJeff Garzik * be done atomically w.r.t. setting QCFLAG_FAILED. 696c6fd2807SJeff Garzik */ 697c6fd2807SJeff Garzik if (nr_timedout) 698c6fd2807SJeff Garzik __ata_port_freeze(ap); 699c6fd2807SJeff Garzik 700c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 701a1e10f7eSTejun Heo 702a1e10f7eSTejun Heo /* initialize eh_tries */ 703a1e10f7eSTejun Heo ap->eh_tries = ATA_EH_MAX_TRIES; 704c6fd2807SJeff Garzik } else 705c6fd2807SJeff Garzik spin_unlock_wait(ap->lock); 706c6fd2807SJeff Garzik 7070e0b494cSJames Bottomley } 7080e0b494cSJames Bottomley EXPORT_SYMBOL(ata_scsi_cmd_error_handler); 7090e0b494cSJames Bottomley 7100e0b494cSJames Bottomley /** 7110e0b494cSJames Bottomley * ata_scsi_port_error_handler - recover the port after the commands 7120e0b494cSJames Bottomley * @host: SCSI host containing the port 7130e0b494cSJames Bottomley * @ap: the ATA port 7140e0b494cSJames Bottomley * 7150e0b494cSJames Bottomley * Handle the recovery of the port @ap after all the commands 7160e0b494cSJames Bottomley * have been recovered. 7170e0b494cSJames Bottomley */ 7180e0b494cSJames Bottomley void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap) 7190e0b494cSJames Bottomley { 7200e0b494cSJames Bottomley unsigned long flags; 721c96f1732SAlan Cox 722c6fd2807SJeff Garzik /* invoke error handler */ 723c6fd2807SJeff Garzik if (ap->ops->error_handler) { 724cf1b86c8STejun Heo struct ata_link *link; 725cf1b86c8STejun Heo 726c0c362b6STejun Heo /* acquire EH ownership */ 727c0c362b6STejun Heo ata_eh_acquire(ap); 728c0c362b6STejun Heo repeat: 7295ddf24c5STejun Heo /* kill fast drain timer */ 7305ddf24c5STejun Heo del_timer_sync(&ap->fastdrain_timer); 7315ddf24c5STejun Heo 732c6fd2807SJeff Garzik /* process port resume request */ 733c6fd2807SJeff Garzik ata_eh_handle_port_resume(ap); 734c6fd2807SJeff Garzik 735c6fd2807SJeff Garzik /* fetch & clear EH info */ 736c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 737c6fd2807SJeff Garzik 7381eca4365STejun Heo ata_for_each_link(link, ap, HOST_FIRST) { 73900115e0fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 74000115e0fSTejun Heo struct ata_device *dev; 74100115e0fSTejun Heo 742cf1b86c8STejun Heo memset(&link->eh_context, 0, sizeof(link->eh_context)); 743cf1b86c8STejun Heo link->eh_context.i = link->eh_info; 744cf1b86c8STejun Heo memset(&link->eh_info, 0, sizeof(link->eh_info)); 74500115e0fSTejun Heo 7461eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) { 74700115e0fSTejun Heo int devno = dev->devno; 74800115e0fSTejun Heo 74900115e0fSTejun Heo ehc->saved_xfer_mode[devno] = dev->xfer_mode; 75000115e0fSTejun Heo if (ata_ncq_enabled(dev)) 75100115e0fSTejun Heo ehc->saved_ncq_enabled |= 1 << devno; 75200115e0fSTejun Heo } 753cf1b86c8STejun Heo } 754c6fd2807SJeff Garzik 755c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS; 756c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_EH_PENDING; 757da917d69STejun Heo ap->excl_link = NULL; /* don't maintain exclusion over EH */ 758c6fd2807SJeff Garzik 759c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 760c6fd2807SJeff Garzik 761c6fd2807SJeff Garzik /* invoke EH, skip if unloading or suspended */ 762c6fd2807SJeff Garzik if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED))) 763c6fd2807SJeff Garzik ap->ops->error_handler(ap); 764ece180d1STejun Heo else { 765ece180d1STejun Heo /* if unloading, commence suicide */ 766ece180d1STejun Heo if ((ap->pflags & ATA_PFLAG_UNLOADING) && 767ece180d1STejun Heo !(ap->pflags & ATA_PFLAG_UNLOADED)) 768ece180d1STejun Heo ata_eh_unload(ap); 769c6fd2807SJeff Garzik ata_eh_finish(ap); 770ece180d1STejun Heo } 771c6fd2807SJeff Garzik 772c6fd2807SJeff Garzik /* process port suspend request */ 773c6fd2807SJeff Garzik ata_eh_handle_port_suspend(ap); 774c6fd2807SJeff Garzik 77525985edcSLucas De Marchi /* Exception might have happened after ->error_handler 776c6fd2807SJeff Garzik * recovered the port but before this point. Repeat 777c6fd2807SJeff Garzik * EH in such case. 778c6fd2807SJeff Garzik */ 779c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 780c6fd2807SJeff Garzik 781c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_EH_PENDING) { 782a1e10f7eSTejun Heo if (--ap->eh_tries) { 783c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 784c6fd2807SJeff Garzik goto repeat; 785c6fd2807SJeff Garzik } 786a9a79dfeSJoe Perches ata_port_err(ap, 787a9a79dfeSJoe Perches "EH pending after %d tries, giving up\n", 788a9a79dfeSJoe Perches ATA_EH_MAX_TRIES); 789914616a3STejun Heo ap->pflags &= ~ATA_PFLAG_EH_PENDING; 790c6fd2807SJeff Garzik } 791c6fd2807SJeff Garzik 792c6fd2807SJeff Garzik /* this run is complete, make sure EH info is clear */ 7931eca4365STejun Heo ata_for_each_link(link, ap, HOST_FIRST) 794cf1b86c8STejun Heo memset(&link->eh_info, 0, sizeof(link->eh_info)); 795c6fd2807SJeff Garzik 796*e4a9c373SDan Williams /* end eh (clear host_eh_scheduled) while holding 797*e4a9c373SDan Williams * ap->lock such that if exception occurs after this 798*e4a9c373SDan Williams * point but before EH completion, SCSI midlayer will 799c6fd2807SJeff Garzik * re-initiate EH. 800c6fd2807SJeff Garzik */ 801*e4a9c373SDan Williams ap->ops->end_eh(ap); 802c6fd2807SJeff Garzik 803c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 804c0c362b6STejun Heo ata_eh_release(ap); 805c6fd2807SJeff Garzik } else { 8069af5c9c9STejun Heo WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL); 807c6fd2807SJeff Garzik ap->ops->eng_timeout(ap); 808c6fd2807SJeff Garzik } 809c6fd2807SJeff Garzik 810c6fd2807SJeff Garzik scsi_eh_flush_done_q(&ap->eh_done_q); 811c6fd2807SJeff Garzik 812c6fd2807SJeff Garzik /* clean up */ 813c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 814c6fd2807SJeff Garzik 815c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_LOADING) 816c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_LOADING; 817c6fd2807SJeff Garzik else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) 818ad72cf98STejun Heo schedule_delayed_work(&ap->hotplug_task, 0); 819c6fd2807SJeff Garzik 820c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_RECOVERED) 821a9a79dfeSJoe Perches ata_port_info(ap, "EH complete\n"); 822c6fd2807SJeff Garzik 823c6fd2807SJeff Garzik ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED); 824c6fd2807SJeff Garzik 825c6fd2807SJeff Garzik /* tell wait_eh that we're done */ 826c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS; 827c6fd2807SJeff Garzik wake_up_all(&ap->eh_wait_q); 828c6fd2807SJeff Garzik 829c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 830c6fd2807SJeff Garzik } 8310e0b494cSJames Bottomley EXPORT_SYMBOL_GPL(ata_scsi_port_error_handler); 832c6fd2807SJeff Garzik 833c6fd2807SJeff Garzik /** 834c6fd2807SJeff Garzik * ata_port_wait_eh - Wait for the currently pending EH to complete 835c6fd2807SJeff Garzik * @ap: Port to wait EH for 836c6fd2807SJeff Garzik * 837c6fd2807SJeff Garzik * Wait until the currently pending EH is complete. 838c6fd2807SJeff Garzik * 839c6fd2807SJeff Garzik * LOCKING: 840c6fd2807SJeff Garzik * Kernel thread context (may sleep). 841c6fd2807SJeff Garzik */ 842c6fd2807SJeff Garzik void ata_port_wait_eh(struct ata_port *ap) 843c6fd2807SJeff Garzik { 844c6fd2807SJeff Garzik unsigned long flags; 845c6fd2807SJeff Garzik DEFINE_WAIT(wait); 846c6fd2807SJeff Garzik 847c6fd2807SJeff Garzik retry: 848c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 849c6fd2807SJeff Garzik 850c6fd2807SJeff Garzik while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) { 851c6fd2807SJeff Garzik prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE); 852c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 853c6fd2807SJeff Garzik schedule(); 854c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 855c6fd2807SJeff Garzik } 856c6fd2807SJeff Garzik finish_wait(&ap->eh_wait_q, &wait); 857c6fd2807SJeff Garzik 858c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 859c6fd2807SJeff Garzik 860c6fd2807SJeff Garzik /* make sure SCSI EH is complete */ 861cca3974eSJeff Garzik if (scsi_host_in_recovery(ap->scsi_host)) { 86297750cebSTejun Heo ata_msleep(ap, 10); 863c6fd2807SJeff Garzik goto retry; 864c6fd2807SJeff Garzik } 865c6fd2807SJeff Garzik } 86681c757bcSDan Williams EXPORT_SYMBOL_GPL(ata_port_wait_eh); 867c6fd2807SJeff Garzik 8685ddf24c5STejun Heo static int ata_eh_nr_in_flight(struct ata_port *ap) 8695ddf24c5STejun Heo { 8705ddf24c5STejun Heo unsigned int tag; 8715ddf24c5STejun Heo int nr = 0; 8725ddf24c5STejun Heo 8735ddf24c5STejun Heo /* count only non-internal commands */ 8745ddf24c5STejun Heo for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) 8755ddf24c5STejun Heo if (ata_qc_from_tag(ap, tag)) 8765ddf24c5STejun Heo nr++; 8775ddf24c5STejun Heo 8785ddf24c5STejun Heo return nr; 8795ddf24c5STejun Heo } 8805ddf24c5STejun Heo 8815ddf24c5STejun Heo void ata_eh_fastdrain_timerfn(unsigned long arg) 8825ddf24c5STejun Heo { 8835ddf24c5STejun Heo struct ata_port *ap = (void *)arg; 8845ddf24c5STejun Heo unsigned long flags; 8855ddf24c5STejun Heo int cnt; 8865ddf24c5STejun Heo 8875ddf24c5STejun Heo spin_lock_irqsave(ap->lock, flags); 8885ddf24c5STejun Heo 8895ddf24c5STejun Heo cnt = ata_eh_nr_in_flight(ap); 8905ddf24c5STejun Heo 8915ddf24c5STejun Heo /* are we done? */ 8925ddf24c5STejun Heo if (!cnt) 8935ddf24c5STejun Heo goto out_unlock; 8945ddf24c5STejun Heo 8955ddf24c5STejun Heo if (cnt == ap->fastdrain_cnt) { 8965ddf24c5STejun Heo unsigned int tag; 8975ddf24c5STejun Heo 8985ddf24c5STejun Heo /* No progress during the last interval, tag all 8995ddf24c5STejun Heo * in-flight qcs as timed out and freeze the port. 9005ddf24c5STejun Heo */ 9015ddf24c5STejun Heo for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) { 9025ddf24c5STejun Heo struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); 9035ddf24c5STejun Heo if (qc) 9045ddf24c5STejun Heo qc->err_mask |= AC_ERR_TIMEOUT; 9055ddf24c5STejun Heo } 9065ddf24c5STejun Heo 9075ddf24c5STejun Heo ata_port_freeze(ap); 9085ddf24c5STejun Heo } else { 9095ddf24c5STejun Heo /* some qcs have finished, give it another chance */ 9105ddf24c5STejun Heo ap->fastdrain_cnt = cnt; 9115ddf24c5STejun Heo ap->fastdrain_timer.expires = 912341c2c95STejun Heo ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); 9135ddf24c5STejun Heo add_timer(&ap->fastdrain_timer); 9145ddf24c5STejun Heo } 9155ddf24c5STejun Heo 9165ddf24c5STejun Heo out_unlock: 9175ddf24c5STejun Heo spin_unlock_irqrestore(ap->lock, flags); 9185ddf24c5STejun Heo } 9195ddf24c5STejun Heo 9205ddf24c5STejun Heo /** 9215ddf24c5STejun Heo * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain 9225ddf24c5STejun Heo * @ap: target ATA port 9235ddf24c5STejun Heo * @fastdrain: activate fast drain 9245ddf24c5STejun Heo * 9255ddf24c5STejun Heo * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain 9265ddf24c5STejun Heo * is non-zero and EH wasn't pending before. Fast drain ensures 9275ddf24c5STejun Heo * that EH kicks in in timely manner. 9285ddf24c5STejun Heo * 9295ddf24c5STejun Heo * LOCKING: 9305ddf24c5STejun Heo * spin_lock_irqsave(host lock) 9315ddf24c5STejun Heo */ 9325ddf24c5STejun Heo static void ata_eh_set_pending(struct ata_port *ap, int fastdrain) 9335ddf24c5STejun Heo { 9345ddf24c5STejun Heo int cnt; 9355ddf24c5STejun Heo 9365ddf24c5STejun Heo /* already scheduled? */ 9375ddf24c5STejun Heo if (ap->pflags & ATA_PFLAG_EH_PENDING) 9385ddf24c5STejun Heo return; 9395ddf24c5STejun Heo 9405ddf24c5STejun Heo ap->pflags |= ATA_PFLAG_EH_PENDING; 9415ddf24c5STejun Heo 9425ddf24c5STejun Heo if (!fastdrain) 9435ddf24c5STejun Heo return; 9445ddf24c5STejun Heo 9455ddf24c5STejun Heo /* do we have in-flight qcs? */ 9465ddf24c5STejun Heo cnt = ata_eh_nr_in_flight(ap); 9475ddf24c5STejun Heo if (!cnt) 9485ddf24c5STejun Heo return; 9495ddf24c5STejun Heo 9505ddf24c5STejun Heo /* activate fast drain */ 9515ddf24c5STejun Heo ap->fastdrain_cnt = cnt; 952341c2c95STejun Heo ap->fastdrain_timer.expires = 953341c2c95STejun Heo ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); 9545ddf24c5STejun Heo add_timer(&ap->fastdrain_timer); 9555ddf24c5STejun Heo } 9565ddf24c5STejun Heo 957c6fd2807SJeff Garzik /** 958c6fd2807SJeff Garzik * ata_qc_schedule_eh - schedule qc for error handling 959c6fd2807SJeff Garzik * @qc: command to schedule error handling for 960c6fd2807SJeff Garzik * 961c6fd2807SJeff Garzik * Schedule error handling for @qc. EH will kick in as soon as 962c6fd2807SJeff Garzik * other commands are drained. 963c6fd2807SJeff Garzik * 964c6fd2807SJeff Garzik * LOCKING: 965cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 966c6fd2807SJeff Garzik */ 967c6fd2807SJeff Garzik void ata_qc_schedule_eh(struct ata_queued_cmd *qc) 968c6fd2807SJeff Garzik { 969c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 970fa41efdaSTejun Heo struct request_queue *q = qc->scsicmd->device->request_queue; 971fa41efdaSTejun Heo unsigned long flags; 972c6fd2807SJeff Garzik 973c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 974c6fd2807SJeff Garzik 975c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 9765ddf24c5STejun Heo ata_eh_set_pending(ap, 1); 977c6fd2807SJeff Garzik 978c6fd2807SJeff Garzik /* The following will fail if timeout has already expired. 979c6fd2807SJeff Garzik * ata_scsi_error() takes care of such scmds on EH entry. 980c6fd2807SJeff Garzik * Note that ATA_QCFLAG_FAILED is unconditionally set after 981c6fd2807SJeff Garzik * this function completes. 982c6fd2807SJeff Garzik */ 983fa41efdaSTejun Heo spin_lock_irqsave(q->queue_lock, flags); 984242f9dcbSJens Axboe blk_abort_request(qc->scsicmd->request); 985fa41efdaSTejun Heo spin_unlock_irqrestore(q->queue_lock, flags); 986c6fd2807SJeff Garzik } 987c6fd2807SJeff Garzik 988c6fd2807SJeff Garzik /** 989*e4a9c373SDan Williams * ata_std_sched_eh - non-libsas ata_ports issue eh with this common routine 990*e4a9c373SDan Williams * @ap: ATA port to schedule EH for 991*e4a9c373SDan Williams * 992*e4a9c373SDan Williams * LOCKING: inherited from ata_port_schedule_eh 993*e4a9c373SDan Williams * spin_lock_irqsave(host lock) 994*e4a9c373SDan Williams */ 995*e4a9c373SDan Williams void ata_std_sched_eh(struct ata_port *ap) 996*e4a9c373SDan Williams { 997*e4a9c373SDan Williams WARN_ON(!ap->ops->error_handler); 998*e4a9c373SDan Williams 999*e4a9c373SDan Williams if (ap->pflags & ATA_PFLAG_INITIALIZING) 1000*e4a9c373SDan Williams return; 1001*e4a9c373SDan Williams 1002*e4a9c373SDan Williams ata_eh_set_pending(ap, 1); 1003*e4a9c373SDan Williams scsi_schedule_eh(ap->scsi_host); 1004*e4a9c373SDan Williams 1005*e4a9c373SDan Williams DPRINTK("port EH scheduled\n"); 1006*e4a9c373SDan Williams } 1007*e4a9c373SDan Williams EXPORT_SYMBOL_GPL(ata_std_sched_eh); 1008*e4a9c373SDan Williams 1009*e4a9c373SDan Williams /** 1010*e4a9c373SDan Williams * ata_std_end_eh - non-libsas ata_ports complete eh with this common routine 1011*e4a9c373SDan Williams * @ap: ATA port to end EH for 1012*e4a9c373SDan Williams * 1013*e4a9c373SDan Williams * In the libata object model there is a 1:1 mapping of ata_port to 1014*e4a9c373SDan Williams * shost, so host fields can be directly manipulated under ap->lock, in 1015*e4a9c373SDan Williams * the libsas case we need to hold a lock at the ha->level to coordinate 1016*e4a9c373SDan Williams * these events. 1017*e4a9c373SDan Williams * 1018*e4a9c373SDan Williams * LOCKING: 1019*e4a9c373SDan Williams * spin_lock_irqsave(host lock) 1020*e4a9c373SDan Williams */ 1021*e4a9c373SDan Williams void ata_std_end_eh(struct ata_port *ap) 1022*e4a9c373SDan Williams { 1023*e4a9c373SDan Williams struct Scsi_Host *host = ap->scsi_host; 1024*e4a9c373SDan Williams 1025*e4a9c373SDan Williams host->host_eh_scheduled = 0; 1026*e4a9c373SDan Williams } 1027*e4a9c373SDan Williams EXPORT_SYMBOL(ata_std_end_eh); 1028*e4a9c373SDan Williams 1029*e4a9c373SDan Williams 1030*e4a9c373SDan Williams /** 1031c6fd2807SJeff Garzik * ata_port_schedule_eh - schedule error handling without a qc 1032c6fd2807SJeff Garzik * @ap: ATA port to schedule EH for 1033c6fd2807SJeff Garzik * 1034c6fd2807SJeff Garzik * Schedule error handling for @ap. EH will kick in as soon as 1035c6fd2807SJeff Garzik * all commands are drained. 1036c6fd2807SJeff Garzik * 1037c6fd2807SJeff Garzik * LOCKING: 1038cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 1039c6fd2807SJeff Garzik */ 1040c6fd2807SJeff Garzik void ata_port_schedule_eh(struct ata_port *ap) 1041c6fd2807SJeff Garzik { 1042*e4a9c373SDan Williams /* see: ata_std_sched_eh, unless you know better */ 1043*e4a9c373SDan Williams ap->ops->sched_eh(ap); 1044c6fd2807SJeff Garzik } 1045c6fd2807SJeff Garzik 1046dbd82616STejun Heo static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link) 1047c6fd2807SJeff Garzik { 1048c6fd2807SJeff Garzik int tag, nr_aborted = 0; 1049c6fd2807SJeff Garzik 1050c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 1051c6fd2807SJeff Garzik 10525ddf24c5STejun Heo /* we're gonna abort all commands, no need for fast drain */ 10535ddf24c5STejun Heo ata_eh_set_pending(ap, 0); 10545ddf24c5STejun Heo 1055c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1056c6fd2807SJeff Garzik struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); 1057c6fd2807SJeff Garzik 1058dbd82616STejun Heo if (qc && (!link || qc->dev->link == link)) { 1059c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 1060c6fd2807SJeff Garzik ata_qc_complete(qc); 1061c6fd2807SJeff Garzik nr_aborted++; 1062c6fd2807SJeff Garzik } 1063c6fd2807SJeff Garzik } 1064c6fd2807SJeff Garzik 1065c6fd2807SJeff Garzik if (!nr_aborted) 1066c6fd2807SJeff Garzik ata_port_schedule_eh(ap); 1067c6fd2807SJeff Garzik 1068c6fd2807SJeff Garzik return nr_aborted; 1069c6fd2807SJeff Garzik } 1070c6fd2807SJeff Garzik 1071c6fd2807SJeff Garzik /** 1072dbd82616STejun Heo * ata_link_abort - abort all qc's on the link 1073dbd82616STejun Heo * @link: ATA link to abort qc's for 1074dbd82616STejun Heo * 1075dbd82616STejun Heo * Abort all active qc's active on @link and schedule EH. 1076dbd82616STejun Heo * 1077dbd82616STejun Heo * LOCKING: 1078dbd82616STejun Heo * spin_lock_irqsave(host lock) 1079dbd82616STejun Heo * 1080dbd82616STejun Heo * RETURNS: 1081dbd82616STejun Heo * Number of aborted qc's. 1082dbd82616STejun Heo */ 1083dbd82616STejun Heo int ata_link_abort(struct ata_link *link) 1084dbd82616STejun Heo { 1085dbd82616STejun Heo return ata_do_link_abort(link->ap, link); 1086dbd82616STejun Heo } 1087dbd82616STejun Heo 1088dbd82616STejun Heo /** 1089dbd82616STejun Heo * ata_port_abort - abort all qc's on the port 1090dbd82616STejun Heo * @ap: ATA port to abort qc's for 1091dbd82616STejun Heo * 1092dbd82616STejun Heo * Abort all active qc's of @ap and schedule EH. 1093dbd82616STejun Heo * 1094dbd82616STejun Heo * LOCKING: 1095dbd82616STejun Heo * spin_lock_irqsave(host_set lock) 1096dbd82616STejun Heo * 1097dbd82616STejun Heo * RETURNS: 1098dbd82616STejun Heo * Number of aborted qc's. 1099dbd82616STejun Heo */ 1100dbd82616STejun Heo int ata_port_abort(struct ata_port *ap) 1101dbd82616STejun Heo { 1102dbd82616STejun Heo return ata_do_link_abort(ap, NULL); 1103dbd82616STejun Heo } 1104dbd82616STejun Heo 1105dbd82616STejun Heo /** 1106c6fd2807SJeff Garzik * __ata_port_freeze - freeze port 1107c6fd2807SJeff Garzik * @ap: ATA port to freeze 1108c6fd2807SJeff Garzik * 1109c6fd2807SJeff Garzik * This function is called when HSM violation or some other 1110c6fd2807SJeff Garzik * condition disrupts normal operation of the port. Frozen port 1111c6fd2807SJeff Garzik * is not allowed to perform any operation until the port is 1112c6fd2807SJeff Garzik * thawed, which usually follows a successful reset. 1113c6fd2807SJeff Garzik * 1114c6fd2807SJeff Garzik * ap->ops->freeze() callback can be used for freezing the port 1115c6fd2807SJeff Garzik * hardware-wise (e.g. mask interrupt and stop DMA engine). If a 1116c6fd2807SJeff Garzik * port cannot be frozen hardware-wise, the interrupt handler 1117c6fd2807SJeff Garzik * must ack and clear interrupts unconditionally while the port 1118c6fd2807SJeff Garzik * is frozen. 1119c6fd2807SJeff Garzik * 1120c6fd2807SJeff Garzik * LOCKING: 1121cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 1122c6fd2807SJeff Garzik */ 1123c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap) 1124c6fd2807SJeff Garzik { 1125c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 1126c6fd2807SJeff Garzik 1127c6fd2807SJeff Garzik if (ap->ops->freeze) 1128c6fd2807SJeff Garzik ap->ops->freeze(ap); 1129c6fd2807SJeff Garzik 1130c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_FROZEN; 1131c6fd2807SJeff Garzik 113244877b4eSTejun Heo DPRINTK("ata%u port frozen\n", ap->print_id); 1133c6fd2807SJeff Garzik } 1134c6fd2807SJeff Garzik 1135c6fd2807SJeff Garzik /** 1136c6fd2807SJeff Garzik * ata_port_freeze - abort & freeze port 1137c6fd2807SJeff Garzik * @ap: ATA port to freeze 1138c6fd2807SJeff Garzik * 113954c38444SJeff Garzik * Abort and freeze @ap. The freeze operation must be called 114054c38444SJeff Garzik * first, because some hardware requires special operations 114154c38444SJeff Garzik * before the taskfile registers are accessible. 1142c6fd2807SJeff Garzik * 1143c6fd2807SJeff Garzik * LOCKING: 1144cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 1145c6fd2807SJeff Garzik * 1146c6fd2807SJeff Garzik * RETURNS: 1147c6fd2807SJeff Garzik * Number of aborted commands. 1148c6fd2807SJeff Garzik */ 1149c6fd2807SJeff Garzik int ata_port_freeze(struct ata_port *ap) 1150c6fd2807SJeff Garzik { 1151c6fd2807SJeff Garzik int nr_aborted; 1152c6fd2807SJeff Garzik 1153c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 1154c6fd2807SJeff Garzik 1155c6fd2807SJeff Garzik __ata_port_freeze(ap); 115654c38444SJeff Garzik nr_aborted = ata_port_abort(ap); 1157c6fd2807SJeff Garzik 1158c6fd2807SJeff Garzik return nr_aborted; 1159c6fd2807SJeff Garzik } 1160c6fd2807SJeff Garzik 1161c6fd2807SJeff Garzik /** 11627d77b247STejun Heo * sata_async_notification - SATA async notification handler 11637d77b247STejun Heo * @ap: ATA port where async notification is received 11647d77b247STejun Heo * 11657d77b247STejun Heo * Handler to be called when async notification via SDB FIS is 11667d77b247STejun Heo * received. This function schedules EH if necessary. 11677d77b247STejun Heo * 11687d77b247STejun Heo * LOCKING: 11697d77b247STejun Heo * spin_lock_irqsave(host lock) 11707d77b247STejun Heo * 11717d77b247STejun Heo * RETURNS: 11727d77b247STejun Heo * 1 if EH is scheduled, 0 otherwise. 11737d77b247STejun Heo */ 11747d77b247STejun Heo int sata_async_notification(struct ata_port *ap) 11757d77b247STejun Heo { 11767d77b247STejun Heo u32 sntf; 11777d77b247STejun Heo int rc; 11787d77b247STejun Heo 11797d77b247STejun Heo if (!(ap->flags & ATA_FLAG_AN)) 11807d77b247STejun Heo return 0; 11817d77b247STejun Heo 11827d77b247STejun Heo rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf); 11837d77b247STejun Heo if (rc == 0) 11847d77b247STejun Heo sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf); 11857d77b247STejun Heo 1186071f44b1STejun Heo if (!sata_pmp_attached(ap) || rc) { 11877d77b247STejun Heo /* PMP is not attached or SNTF is not available */ 1188071f44b1STejun Heo if (!sata_pmp_attached(ap)) { 11897d77b247STejun Heo /* PMP is not attached. Check whether ATAPI 11907d77b247STejun Heo * AN is configured. If so, notify media 11917d77b247STejun Heo * change. 11927d77b247STejun Heo */ 11937d77b247STejun Heo struct ata_device *dev = ap->link.device; 11947d77b247STejun Heo 11957d77b247STejun Heo if ((dev->class == ATA_DEV_ATAPI) && 11967d77b247STejun Heo (dev->flags & ATA_DFLAG_AN)) 11977d77b247STejun Heo ata_scsi_media_change_notify(dev); 11987d77b247STejun Heo return 0; 11997d77b247STejun Heo } else { 12007d77b247STejun Heo /* PMP is attached but SNTF is not available. 12017d77b247STejun Heo * ATAPI async media change notification is 12027d77b247STejun Heo * not used. The PMP must be reporting PHY 12037d77b247STejun Heo * status change, schedule EH. 12047d77b247STejun Heo */ 12057d77b247STejun Heo ata_port_schedule_eh(ap); 12067d77b247STejun Heo return 1; 12077d77b247STejun Heo } 12087d77b247STejun Heo } else { 12097d77b247STejun Heo /* PMP is attached and SNTF is available */ 12107d77b247STejun Heo struct ata_link *link; 12117d77b247STejun Heo 12127d77b247STejun Heo /* check and notify ATAPI AN */ 12131eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 12147d77b247STejun Heo if (!(sntf & (1 << link->pmp))) 12157d77b247STejun Heo continue; 12167d77b247STejun Heo 12177d77b247STejun Heo if ((link->device->class == ATA_DEV_ATAPI) && 12187d77b247STejun Heo (link->device->flags & ATA_DFLAG_AN)) 12197d77b247STejun Heo ata_scsi_media_change_notify(link->device); 12207d77b247STejun Heo } 12217d77b247STejun Heo 12227d77b247STejun Heo /* If PMP is reporting that PHY status of some 12237d77b247STejun Heo * downstream ports has changed, schedule EH. 12247d77b247STejun Heo */ 12257d77b247STejun Heo if (sntf & (1 << SATA_PMP_CTRL_PORT)) { 12267d77b247STejun Heo ata_port_schedule_eh(ap); 12277d77b247STejun Heo return 1; 12287d77b247STejun Heo } 12297d77b247STejun Heo 12307d77b247STejun Heo return 0; 12317d77b247STejun Heo } 12327d77b247STejun Heo } 12337d77b247STejun Heo 12347d77b247STejun Heo /** 1235c6fd2807SJeff Garzik * ata_eh_freeze_port - EH helper to freeze port 1236c6fd2807SJeff Garzik * @ap: ATA port to freeze 1237c6fd2807SJeff Garzik * 1238c6fd2807SJeff Garzik * Freeze @ap. 1239c6fd2807SJeff Garzik * 1240c6fd2807SJeff Garzik * LOCKING: 1241c6fd2807SJeff Garzik * None. 1242c6fd2807SJeff Garzik */ 1243c6fd2807SJeff Garzik void ata_eh_freeze_port(struct ata_port *ap) 1244c6fd2807SJeff Garzik { 1245c6fd2807SJeff Garzik unsigned long flags; 1246c6fd2807SJeff Garzik 1247c6fd2807SJeff Garzik if (!ap->ops->error_handler) 1248c6fd2807SJeff Garzik return; 1249c6fd2807SJeff Garzik 1250c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1251c6fd2807SJeff Garzik __ata_port_freeze(ap); 1252c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1253c6fd2807SJeff Garzik } 1254c6fd2807SJeff Garzik 1255c6fd2807SJeff Garzik /** 1256c6fd2807SJeff Garzik * ata_port_thaw_port - EH helper to thaw port 1257c6fd2807SJeff Garzik * @ap: ATA port to thaw 1258c6fd2807SJeff Garzik * 1259c6fd2807SJeff Garzik * Thaw frozen port @ap. 1260c6fd2807SJeff Garzik * 1261c6fd2807SJeff Garzik * LOCKING: 1262c6fd2807SJeff Garzik * None. 1263c6fd2807SJeff Garzik */ 1264c6fd2807SJeff Garzik void ata_eh_thaw_port(struct ata_port *ap) 1265c6fd2807SJeff Garzik { 1266c6fd2807SJeff Garzik unsigned long flags; 1267c6fd2807SJeff Garzik 1268c6fd2807SJeff Garzik if (!ap->ops->error_handler) 1269c6fd2807SJeff Garzik return; 1270c6fd2807SJeff Garzik 1271c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1272c6fd2807SJeff Garzik 1273c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_FROZEN; 1274c6fd2807SJeff Garzik 1275c6fd2807SJeff Garzik if (ap->ops->thaw) 1276c6fd2807SJeff Garzik ap->ops->thaw(ap); 1277c6fd2807SJeff Garzik 1278c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1279c6fd2807SJeff Garzik 128044877b4eSTejun Heo DPRINTK("ata%u port thawed\n", ap->print_id); 1281c6fd2807SJeff Garzik } 1282c6fd2807SJeff Garzik 1283c6fd2807SJeff Garzik static void ata_eh_scsidone(struct scsi_cmnd *scmd) 1284c6fd2807SJeff Garzik { 1285c6fd2807SJeff Garzik /* nada */ 1286c6fd2807SJeff Garzik } 1287c6fd2807SJeff Garzik 1288c6fd2807SJeff Garzik static void __ata_eh_qc_complete(struct ata_queued_cmd *qc) 1289c6fd2807SJeff Garzik { 1290c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 1291c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 1292c6fd2807SJeff Garzik unsigned long flags; 1293c6fd2807SJeff Garzik 1294c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1295c6fd2807SJeff Garzik qc->scsidone = ata_eh_scsidone; 1296c6fd2807SJeff Garzik __ata_qc_complete(qc); 1297c6fd2807SJeff Garzik WARN_ON(ata_tag_valid(qc->tag)); 1298c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1299c6fd2807SJeff Garzik 1300c6fd2807SJeff Garzik scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 1301c6fd2807SJeff Garzik } 1302c6fd2807SJeff Garzik 1303c6fd2807SJeff Garzik /** 1304c6fd2807SJeff Garzik * ata_eh_qc_complete - Complete an active ATA command from EH 1305c6fd2807SJeff Garzik * @qc: Command to complete 1306c6fd2807SJeff Garzik * 1307c6fd2807SJeff Garzik * Indicate to the mid and upper layers that an ATA command has 1308c6fd2807SJeff Garzik * completed. To be used from EH. 1309c6fd2807SJeff Garzik */ 1310c6fd2807SJeff Garzik void ata_eh_qc_complete(struct ata_queued_cmd *qc) 1311c6fd2807SJeff Garzik { 1312c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 1313c6fd2807SJeff Garzik scmd->retries = scmd->allowed; 1314c6fd2807SJeff Garzik __ata_eh_qc_complete(qc); 1315c6fd2807SJeff Garzik } 1316c6fd2807SJeff Garzik 1317c6fd2807SJeff Garzik /** 1318c6fd2807SJeff Garzik * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH 1319c6fd2807SJeff Garzik * @qc: Command to retry 1320c6fd2807SJeff Garzik * 1321c6fd2807SJeff Garzik * Indicate to the mid and upper layers that an ATA command 1322c6fd2807SJeff Garzik * should be retried. To be used from EH. 1323c6fd2807SJeff Garzik * 1324c6fd2807SJeff Garzik * SCSI midlayer limits the number of retries to scmd->allowed. 1325c6fd2807SJeff Garzik * scmd->retries is decremented for commands which get retried 1326c6fd2807SJeff Garzik * due to unrelated failures (qc->err_mask is zero). 1327c6fd2807SJeff Garzik */ 1328c6fd2807SJeff Garzik void ata_eh_qc_retry(struct ata_queued_cmd *qc) 1329c6fd2807SJeff Garzik { 1330c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 1331c6fd2807SJeff Garzik if (!qc->err_mask && scmd->retries) 1332c6fd2807SJeff Garzik scmd->retries--; 1333c6fd2807SJeff Garzik __ata_eh_qc_complete(qc); 1334c6fd2807SJeff Garzik } 1335c6fd2807SJeff Garzik 1336c6fd2807SJeff Garzik /** 1337678afac6STejun Heo * ata_dev_disable - disable ATA device 1338678afac6STejun Heo * @dev: ATA device to disable 1339678afac6STejun Heo * 1340678afac6STejun Heo * Disable @dev. 1341678afac6STejun Heo * 1342678afac6STejun Heo * Locking: 1343678afac6STejun Heo * EH context. 1344678afac6STejun Heo */ 1345678afac6STejun Heo void ata_dev_disable(struct ata_device *dev) 1346678afac6STejun Heo { 1347678afac6STejun Heo if (!ata_dev_enabled(dev)) 1348678afac6STejun Heo return; 1349678afac6STejun Heo 1350678afac6STejun Heo if (ata_msg_drv(dev->link->ap)) 1351a9a79dfeSJoe Perches ata_dev_warn(dev, "disabled\n"); 1352678afac6STejun Heo ata_acpi_on_disable(dev); 1353678afac6STejun Heo ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET); 1354678afac6STejun Heo dev->class++; 135599cf610aSTejun Heo 135699cf610aSTejun Heo /* From now till the next successful probe, ering is used to 135799cf610aSTejun Heo * track probe failures. Clear accumulated device error info. 135899cf610aSTejun Heo */ 135999cf610aSTejun Heo ata_ering_clear(&dev->ering); 1360678afac6STejun Heo } 1361678afac6STejun Heo 1362678afac6STejun Heo /** 1363c6fd2807SJeff Garzik * ata_eh_detach_dev - detach ATA device 1364c6fd2807SJeff Garzik * @dev: ATA device to detach 1365c6fd2807SJeff Garzik * 1366c6fd2807SJeff Garzik * Detach @dev. 1367c6fd2807SJeff Garzik * 1368c6fd2807SJeff Garzik * LOCKING: 1369c6fd2807SJeff Garzik * None. 1370c6fd2807SJeff Garzik */ 1371fb7fd614STejun Heo void ata_eh_detach_dev(struct ata_device *dev) 1372c6fd2807SJeff Garzik { 1373f58229f8STejun Heo struct ata_link *link = dev->link; 1374f58229f8STejun Heo struct ata_port *ap = link->ap; 137590484ebfSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 1376c6fd2807SJeff Garzik unsigned long flags; 1377c6fd2807SJeff Garzik 1378c6fd2807SJeff Garzik ata_dev_disable(dev); 1379c6fd2807SJeff Garzik 1380c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1381c6fd2807SJeff Garzik 1382c6fd2807SJeff Garzik dev->flags &= ~ATA_DFLAG_DETACH; 1383c6fd2807SJeff Garzik 1384c6fd2807SJeff Garzik if (ata_scsi_offline_dev(dev)) { 1385c6fd2807SJeff Garzik dev->flags |= ATA_DFLAG_DETACHED; 1386c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 1387c6fd2807SJeff Garzik } 1388c6fd2807SJeff Garzik 138990484ebfSTejun Heo /* clear per-dev EH info */ 1390f58229f8STejun Heo ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK); 1391f58229f8STejun Heo ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK); 139290484ebfSTejun Heo ehc->saved_xfer_mode[dev->devno] = 0; 139390484ebfSTejun Heo ehc->saved_ncq_enabled &= ~(1 << dev->devno); 1394c6fd2807SJeff Garzik 1395c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1396c6fd2807SJeff Garzik } 1397c6fd2807SJeff Garzik 1398c6fd2807SJeff Garzik /** 1399c6fd2807SJeff Garzik * ata_eh_about_to_do - about to perform eh_action 1400955e57dfSTejun Heo * @link: target ATA link 1401c6fd2807SJeff Garzik * @dev: target ATA dev for per-dev action (can be NULL) 1402c6fd2807SJeff Garzik * @action: action about to be performed 1403c6fd2807SJeff Garzik * 1404c6fd2807SJeff Garzik * Called just before performing EH actions to clear related bits 1405955e57dfSTejun Heo * in @link->eh_info such that eh actions are not unnecessarily 1406955e57dfSTejun Heo * repeated. 1407c6fd2807SJeff Garzik * 1408c6fd2807SJeff Garzik * LOCKING: 1409c6fd2807SJeff Garzik * None. 1410c6fd2807SJeff Garzik */ 1411fb7fd614STejun Heo void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev, 1412c6fd2807SJeff Garzik unsigned int action) 1413c6fd2807SJeff Garzik { 1414955e57dfSTejun Heo struct ata_port *ap = link->ap; 1415955e57dfSTejun Heo struct ata_eh_info *ehi = &link->eh_info; 1416955e57dfSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 1417c6fd2807SJeff Garzik unsigned long flags; 1418c6fd2807SJeff Garzik 1419c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1420c6fd2807SJeff Garzik 1421955e57dfSTejun Heo ata_eh_clear_action(link, dev, ehi, action); 1422c6fd2807SJeff Garzik 1423a568d1d2STejun Heo /* About to take EH action, set RECOVERED. Ignore actions on 1424a568d1d2STejun Heo * slave links as master will do them again. 1425a568d1d2STejun Heo */ 1426a568d1d2STejun Heo if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link) 1427c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_RECOVERED; 1428c6fd2807SJeff Garzik 1429c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1430c6fd2807SJeff Garzik } 1431c6fd2807SJeff Garzik 1432c6fd2807SJeff Garzik /** 1433c6fd2807SJeff Garzik * ata_eh_done - EH action complete 1434c6fd2807SJeff Garzik * @ap: target ATA port 1435c6fd2807SJeff Garzik * @dev: target ATA dev for per-dev action (can be NULL) 1436c6fd2807SJeff Garzik * @action: action just completed 1437c6fd2807SJeff Garzik * 1438c6fd2807SJeff Garzik * Called right after performing EH actions to clear related bits 1439955e57dfSTejun Heo * in @link->eh_context. 1440c6fd2807SJeff Garzik * 1441c6fd2807SJeff Garzik * LOCKING: 1442c6fd2807SJeff Garzik * None. 1443c6fd2807SJeff Garzik */ 1444fb7fd614STejun Heo void ata_eh_done(struct ata_link *link, struct ata_device *dev, 1445c6fd2807SJeff Garzik unsigned int action) 1446c6fd2807SJeff Garzik { 1447955e57dfSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 14489af5c9c9STejun Heo 1449955e57dfSTejun Heo ata_eh_clear_action(link, dev, &ehc->i, action); 1450c6fd2807SJeff Garzik } 1451c6fd2807SJeff Garzik 1452c6fd2807SJeff Garzik /** 1453c6fd2807SJeff Garzik * ata_err_string - convert err_mask to descriptive string 1454c6fd2807SJeff Garzik * @err_mask: error mask to convert to string 1455c6fd2807SJeff Garzik * 1456c6fd2807SJeff Garzik * Convert @err_mask to descriptive string. Errors are 1457c6fd2807SJeff Garzik * prioritized according to severity and only the most severe 1458c6fd2807SJeff Garzik * error is reported. 1459c6fd2807SJeff Garzik * 1460c6fd2807SJeff Garzik * LOCKING: 1461c6fd2807SJeff Garzik * None. 1462c6fd2807SJeff Garzik * 1463c6fd2807SJeff Garzik * RETURNS: 1464c6fd2807SJeff Garzik * Descriptive string for @err_mask 1465c6fd2807SJeff Garzik */ 1466c6fd2807SJeff Garzik static const char *ata_err_string(unsigned int err_mask) 1467c6fd2807SJeff Garzik { 1468c6fd2807SJeff Garzik if (err_mask & AC_ERR_HOST_BUS) 1469c6fd2807SJeff Garzik return "host bus error"; 1470c6fd2807SJeff Garzik if (err_mask & AC_ERR_ATA_BUS) 1471c6fd2807SJeff Garzik return "ATA bus error"; 1472c6fd2807SJeff Garzik if (err_mask & AC_ERR_TIMEOUT) 1473c6fd2807SJeff Garzik return "timeout"; 1474c6fd2807SJeff Garzik if (err_mask & AC_ERR_HSM) 1475c6fd2807SJeff Garzik return "HSM violation"; 1476c6fd2807SJeff Garzik if (err_mask & AC_ERR_SYSTEM) 1477c6fd2807SJeff Garzik return "internal error"; 1478c6fd2807SJeff Garzik if (err_mask & AC_ERR_MEDIA) 1479c6fd2807SJeff Garzik return "media error"; 1480c6fd2807SJeff Garzik if (err_mask & AC_ERR_INVALID) 1481c6fd2807SJeff Garzik return "invalid argument"; 1482c6fd2807SJeff Garzik if (err_mask & AC_ERR_DEV) 1483c6fd2807SJeff Garzik return "device error"; 1484c6fd2807SJeff Garzik return "unknown error"; 1485c6fd2807SJeff Garzik } 1486c6fd2807SJeff Garzik 1487c6fd2807SJeff Garzik /** 1488c6fd2807SJeff Garzik * ata_read_log_page - read a specific log page 1489c6fd2807SJeff Garzik * @dev: target device 1490c6fd2807SJeff Garzik * @page: page to read 1491c6fd2807SJeff Garzik * @buf: buffer to store read page 1492c6fd2807SJeff Garzik * @sectors: number of sectors to read 1493c6fd2807SJeff Garzik * 1494c6fd2807SJeff Garzik * Read log page using READ_LOG_EXT command. 1495c6fd2807SJeff Garzik * 1496c6fd2807SJeff Garzik * LOCKING: 1497c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1498c6fd2807SJeff Garzik * 1499c6fd2807SJeff Garzik * RETURNS: 1500c6fd2807SJeff Garzik * 0 on success, AC_ERR_* mask otherwise. 1501c6fd2807SJeff Garzik */ 1502c6fd2807SJeff Garzik static unsigned int ata_read_log_page(struct ata_device *dev, 1503c6fd2807SJeff Garzik u8 page, void *buf, unsigned int sectors) 1504c6fd2807SJeff Garzik { 1505c6fd2807SJeff Garzik struct ata_taskfile tf; 1506c6fd2807SJeff Garzik unsigned int err_mask; 1507c6fd2807SJeff Garzik 1508c6fd2807SJeff Garzik DPRINTK("read log page - page %d\n", page); 1509c6fd2807SJeff Garzik 1510c6fd2807SJeff Garzik ata_tf_init(dev, &tf); 1511c6fd2807SJeff Garzik tf.command = ATA_CMD_READ_LOG_EXT; 1512c6fd2807SJeff Garzik tf.lbal = page; 1513c6fd2807SJeff Garzik tf.nsect = sectors; 1514c6fd2807SJeff Garzik tf.hob_nsect = sectors >> 8; 1515c6fd2807SJeff Garzik tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE; 1516c6fd2807SJeff Garzik tf.protocol = ATA_PROT_PIO; 1517c6fd2807SJeff Garzik 1518c6fd2807SJeff Garzik err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, 15192b789108STejun Heo buf, sectors * ATA_SECT_SIZE, 0); 1520c6fd2807SJeff Garzik 1521c6fd2807SJeff Garzik DPRINTK("EXIT, err_mask=%x\n", err_mask); 1522c6fd2807SJeff Garzik return err_mask; 1523c6fd2807SJeff Garzik } 1524c6fd2807SJeff Garzik 1525c6fd2807SJeff Garzik /** 1526c6fd2807SJeff Garzik * ata_eh_read_log_10h - Read log page 10h for NCQ error details 1527c6fd2807SJeff Garzik * @dev: Device to read log page 10h from 1528c6fd2807SJeff Garzik * @tag: Resulting tag of the failed command 1529c6fd2807SJeff Garzik * @tf: Resulting taskfile registers of the failed command 1530c6fd2807SJeff Garzik * 1531c6fd2807SJeff Garzik * Read log page 10h to obtain NCQ error details and clear error 1532c6fd2807SJeff Garzik * condition. 1533c6fd2807SJeff Garzik * 1534c6fd2807SJeff Garzik * LOCKING: 1535c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1536c6fd2807SJeff Garzik * 1537c6fd2807SJeff Garzik * RETURNS: 1538c6fd2807SJeff Garzik * 0 on success, -errno otherwise. 1539c6fd2807SJeff Garzik */ 1540c6fd2807SJeff Garzik static int ata_eh_read_log_10h(struct ata_device *dev, 1541c6fd2807SJeff Garzik int *tag, struct ata_taskfile *tf) 1542c6fd2807SJeff Garzik { 15439af5c9c9STejun Heo u8 *buf = dev->link->ap->sector_buf; 1544c6fd2807SJeff Garzik unsigned int err_mask; 1545c6fd2807SJeff Garzik u8 csum; 1546c6fd2807SJeff Garzik int i; 1547c6fd2807SJeff Garzik 1548c6fd2807SJeff Garzik err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1); 1549c6fd2807SJeff Garzik if (err_mask) 1550c6fd2807SJeff Garzik return -EIO; 1551c6fd2807SJeff Garzik 1552c6fd2807SJeff Garzik csum = 0; 1553c6fd2807SJeff Garzik for (i = 0; i < ATA_SECT_SIZE; i++) 1554c6fd2807SJeff Garzik csum += buf[i]; 1555c6fd2807SJeff Garzik if (csum) 1556a9a79dfeSJoe Perches ata_dev_warn(dev, "invalid checksum 0x%x on log page 10h\n", 1557a9a79dfeSJoe Perches csum); 1558c6fd2807SJeff Garzik 1559c6fd2807SJeff Garzik if (buf[0] & 0x80) 1560c6fd2807SJeff Garzik return -ENOENT; 1561c6fd2807SJeff Garzik 1562c6fd2807SJeff Garzik *tag = buf[0] & 0x1f; 1563c6fd2807SJeff Garzik 1564c6fd2807SJeff Garzik tf->command = buf[2]; 1565c6fd2807SJeff Garzik tf->feature = buf[3]; 1566c6fd2807SJeff Garzik tf->lbal = buf[4]; 1567c6fd2807SJeff Garzik tf->lbam = buf[5]; 1568c6fd2807SJeff Garzik tf->lbah = buf[6]; 1569c6fd2807SJeff Garzik tf->device = buf[7]; 1570c6fd2807SJeff Garzik tf->hob_lbal = buf[8]; 1571c6fd2807SJeff Garzik tf->hob_lbam = buf[9]; 1572c6fd2807SJeff Garzik tf->hob_lbah = buf[10]; 1573c6fd2807SJeff Garzik tf->nsect = buf[12]; 1574c6fd2807SJeff Garzik tf->hob_nsect = buf[13]; 1575c6fd2807SJeff Garzik 1576c6fd2807SJeff Garzik return 0; 1577c6fd2807SJeff Garzik } 1578c6fd2807SJeff Garzik 1579c6fd2807SJeff Garzik /** 158011fc33daSTejun Heo * atapi_eh_tur - perform ATAPI TEST_UNIT_READY 158111fc33daSTejun Heo * @dev: target ATAPI device 158211fc33daSTejun Heo * @r_sense_key: out parameter for sense_key 158311fc33daSTejun Heo * 158411fc33daSTejun Heo * Perform ATAPI TEST_UNIT_READY. 158511fc33daSTejun Heo * 158611fc33daSTejun Heo * LOCKING: 158711fc33daSTejun Heo * EH context (may sleep). 158811fc33daSTejun Heo * 158911fc33daSTejun Heo * RETURNS: 159011fc33daSTejun Heo * 0 on success, AC_ERR_* mask on failure. 159111fc33daSTejun Heo */ 159211fc33daSTejun Heo static unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key) 159311fc33daSTejun Heo { 159411fc33daSTejun Heo u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 }; 159511fc33daSTejun Heo struct ata_taskfile tf; 159611fc33daSTejun Heo unsigned int err_mask; 159711fc33daSTejun Heo 159811fc33daSTejun Heo ata_tf_init(dev, &tf); 159911fc33daSTejun Heo 160011fc33daSTejun Heo tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 160111fc33daSTejun Heo tf.command = ATA_CMD_PACKET; 160211fc33daSTejun Heo tf.protocol = ATAPI_PROT_NODATA; 160311fc33daSTejun Heo 160411fc33daSTejun Heo err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0); 160511fc33daSTejun Heo if (err_mask == AC_ERR_DEV) 160611fc33daSTejun Heo *r_sense_key = tf.feature >> 4; 160711fc33daSTejun Heo return err_mask; 160811fc33daSTejun Heo } 160911fc33daSTejun Heo 161011fc33daSTejun Heo /** 1611c6fd2807SJeff Garzik * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE 1612c6fd2807SJeff Garzik * @dev: device to perform REQUEST_SENSE to 1613c6fd2807SJeff Garzik * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long) 16143eabddb8STejun Heo * @dfl_sense_key: default sense key to use 1615c6fd2807SJeff Garzik * 1616c6fd2807SJeff Garzik * Perform ATAPI REQUEST_SENSE after the device reported CHECK 1617c6fd2807SJeff Garzik * SENSE. This function is EH helper. 1618c6fd2807SJeff Garzik * 1619c6fd2807SJeff Garzik * LOCKING: 1620c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1621c6fd2807SJeff Garzik * 1622c6fd2807SJeff Garzik * RETURNS: 1623c6fd2807SJeff Garzik * 0 on success, AC_ERR_* mask on failure 1624c6fd2807SJeff Garzik */ 16253eabddb8STejun Heo static unsigned int atapi_eh_request_sense(struct ata_device *dev, 16263eabddb8STejun Heo u8 *sense_buf, u8 dfl_sense_key) 1627c6fd2807SJeff Garzik { 16283eabddb8STejun Heo u8 cdb[ATAPI_CDB_LEN] = 16293eabddb8STejun Heo { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 }; 16309af5c9c9STejun Heo struct ata_port *ap = dev->link->ap; 1631c6fd2807SJeff Garzik struct ata_taskfile tf; 1632c6fd2807SJeff Garzik 1633c6fd2807SJeff Garzik DPRINTK("ATAPI request sense\n"); 1634c6fd2807SJeff Garzik 1635c6fd2807SJeff Garzik /* FIXME: is this needed? */ 1636c6fd2807SJeff Garzik memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); 1637c6fd2807SJeff Garzik 163856287768SAlbert Lee /* initialize sense_buf with the error register, 163956287768SAlbert Lee * for the case where they are -not- overwritten 164056287768SAlbert Lee */ 1641c6fd2807SJeff Garzik sense_buf[0] = 0x70; 16423eabddb8STejun Heo sense_buf[2] = dfl_sense_key; 164356287768SAlbert Lee 164456287768SAlbert Lee /* some devices time out if garbage left in tf */ 164556287768SAlbert Lee ata_tf_init(dev, &tf); 1646c6fd2807SJeff Garzik 1647c6fd2807SJeff Garzik tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1648c6fd2807SJeff Garzik tf.command = ATA_CMD_PACKET; 1649c6fd2807SJeff Garzik 1650c6fd2807SJeff Garzik /* is it pointless to prefer PIO for "safety reasons"? */ 1651c6fd2807SJeff Garzik if (ap->flags & ATA_FLAG_PIO_DMA) { 16520dc36888STejun Heo tf.protocol = ATAPI_PROT_DMA; 1653c6fd2807SJeff Garzik tf.feature |= ATAPI_PKT_DMA; 1654c6fd2807SJeff Garzik } else { 16550dc36888STejun Heo tf.protocol = ATAPI_PROT_PIO; 1656f2dfc1a1STejun Heo tf.lbam = SCSI_SENSE_BUFFERSIZE; 1657f2dfc1a1STejun Heo tf.lbah = 0; 1658c6fd2807SJeff Garzik } 1659c6fd2807SJeff Garzik 1660c6fd2807SJeff Garzik return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE, 16612b789108STejun Heo sense_buf, SCSI_SENSE_BUFFERSIZE, 0); 1662c6fd2807SJeff Garzik } 1663c6fd2807SJeff Garzik 1664c6fd2807SJeff Garzik /** 1665c6fd2807SJeff Garzik * ata_eh_analyze_serror - analyze SError for a failed port 16660260731fSTejun Heo * @link: ATA link to analyze SError for 1667c6fd2807SJeff Garzik * 1668c6fd2807SJeff Garzik * Analyze SError if available and further determine cause of 1669c6fd2807SJeff Garzik * failure. 1670c6fd2807SJeff Garzik * 1671c6fd2807SJeff Garzik * LOCKING: 1672c6fd2807SJeff Garzik * None. 1673c6fd2807SJeff Garzik */ 16740260731fSTejun Heo static void ata_eh_analyze_serror(struct ata_link *link) 1675c6fd2807SJeff Garzik { 16760260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 1677c6fd2807SJeff Garzik u32 serror = ehc->i.serror; 1678c6fd2807SJeff Garzik unsigned int err_mask = 0, action = 0; 1679f9df58cbSTejun Heo u32 hotplug_mask; 1680c6fd2807SJeff Garzik 1681e0614db2STejun Heo if (serror & (SERR_PERSISTENT | SERR_DATA)) { 1682c6fd2807SJeff Garzik err_mask |= AC_ERR_ATA_BUS; 1683cf480626STejun Heo action |= ATA_EH_RESET; 1684c6fd2807SJeff Garzik } 1685c6fd2807SJeff Garzik if (serror & SERR_PROTOCOL) { 1686c6fd2807SJeff Garzik err_mask |= AC_ERR_HSM; 1687cf480626STejun Heo action |= ATA_EH_RESET; 1688c6fd2807SJeff Garzik } 1689c6fd2807SJeff Garzik if (serror & SERR_INTERNAL) { 1690c6fd2807SJeff Garzik err_mask |= AC_ERR_SYSTEM; 1691cf480626STejun Heo action |= ATA_EH_RESET; 1692c6fd2807SJeff Garzik } 1693f9df58cbSTejun Heo 1694f9df58cbSTejun Heo /* Determine whether a hotplug event has occurred. Both 1695f9df58cbSTejun Heo * SError.N/X are considered hotplug events for enabled or 1696f9df58cbSTejun Heo * host links. For disabled PMP links, only N bit is 1697f9df58cbSTejun Heo * considered as X bit is left at 1 for link plugging. 1698f9df58cbSTejun Heo */ 1699eb0e85e3STejun Heo if (link->lpm_policy > ATA_LPM_MAX_POWER) 17006b7ae954STejun Heo hotplug_mask = 0; /* hotplug doesn't work w/ LPM */ 17016b7ae954STejun Heo else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link)) 1702f9df58cbSTejun Heo hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG; 1703f9df58cbSTejun Heo else 1704f9df58cbSTejun Heo hotplug_mask = SERR_PHYRDY_CHG; 1705f9df58cbSTejun Heo 1706f9df58cbSTejun Heo if (serror & hotplug_mask) 1707c6fd2807SJeff Garzik ata_ehi_hotplugged(&ehc->i); 1708c6fd2807SJeff Garzik 1709c6fd2807SJeff Garzik ehc->i.err_mask |= err_mask; 1710c6fd2807SJeff Garzik ehc->i.action |= action; 1711c6fd2807SJeff Garzik } 1712c6fd2807SJeff Garzik 1713c6fd2807SJeff Garzik /** 1714c6fd2807SJeff Garzik * ata_eh_analyze_ncq_error - analyze NCQ error 17150260731fSTejun Heo * @link: ATA link to analyze NCQ error for 1716c6fd2807SJeff Garzik * 1717c6fd2807SJeff Garzik * Read log page 10h, determine the offending qc and acquire 1718c6fd2807SJeff Garzik * error status TF. For NCQ device errors, all LLDDs have to do 1719c6fd2807SJeff Garzik * is setting AC_ERR_DEV in ehi->err_mask. This function takes 1720c6fd2807SJeff Garzik * care of the rest. 1721c6fd2807SJeff Garzik * 1722c6fd2807SJeff Garzik * LOCKING: 1723c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1724c6fd2807SJeff Garzik */ 172510acf3b0SMark Lord void ata_eh_analyze_ncq_error(struct ata_link *link) 1726c6fd2807SJeff Garzik { 17270260731fSTejun Heo struct ata_port *ap = link->ap; 17280260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 17290260731fSTejun Heo struct ata_device *dev = link->device; 1730c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 1731c6fd2807SJeff Garzik struct ata_taskfile tf; 1732c6fd2807SJeff Garzik int tag, rc; 1733c6fd2807SJeff Garzik 1734c6fd2807SJeff Garzik /* if frozen, we can't do much */ 1735c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN) 1736c6fd2807SJeff Garzik return; 1737c6fd2807SJeff Garzik 1738c6fd2807SJeff Garzik /* is it NCQ device error? */ 17390260731fSTejun Heo if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV)) 1740c6fd2807SJeff Garzik return; 1741c6fd2807SJeff Garzik 1742c6fd2807SJeff Garzik /* has LLDD analyzed already? */ 1743c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1744c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, tag); 1745c6fd2807SJeff Garzik 1746c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) 1747c6fd2807SJeff Garzik continue; 1748c6fd2807SJeff Garzik 1749c6fd2807SJeff Garzik if (qc->err_mask) 1750c6fd2807SJeff Garzik return; 1751c6fd2807SJeff Garzik } 1752c6fd2807SJeff Garzik 1753c6fd2807SJeff Garzik /* okay, this error is ours */ 1754a09bf4cdSJeff Garzik memset(&tf, 0, sizeof(tf)); 1755c6fd2807SJeff Garzik rc = ata_eh_read_log_10h(dev, &tag, &tf); 1756c6fd2807SJeff Garzik if (rc) { 1757a9a79dfeSJoe Perches ata_link_err(link, "failed to read log page 10h (errno=%d)\n", 1758a9a79dfeSJoe Perches rc); 1759c6fd2807SJeff Garzik return; 1760c6fd2807SJeff Garzik } 1761c6fd2807SJeff Garzik 17620260731fSTejun Heo if (!(link->sactive & (1 << tag))) { 1763a9a79dfeSJoe Perches ata_link_err(link, "log page 10h reported inactive tag %d\n", 1764a9a79dfeSJoe Perches tag); 1765c6fd2807SJeff Garzik return; 1766c6fd2807SJeff Garzik } 1767c6fd2807SJeff Garzik 1768c6fd2807SJeff Garzik /* we've got the perpetrator, condemn it */ 1769c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, tag); 1770c6fd2807SJeff Garzik memcpy(&qc->result_tf, &tf, sizeof(tf)); 1771a6116c9eSMark Lord qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 17725335b729STejun Heo qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ; 1773c6fd2807SJeff Garzik ehc->i.err_mask &= ~AC_ERR_DEV; 1774c6fd2807SJeff Garzik } 1775c6fd2807SJeff Garzik 1776c6fd2807SJeff Garzik /** 1777c6fd2807SJeff Garzik * ata_eh_analyze_tf - analyze taskfile of a failed qc 1778c6fd2807SJeff Garzik * @qc: qc to analyze 1779c6fd2807SJeff Garzik * @tf: Taskfile registers to analyze 1780c6fd2807SJeff Garzik * 1781c6fd2807SJeff Garzik * Analyze taskfile of @qc and further determine cause of 1782c6fd2807SJeff Garzik * failure. This function also requests ATAPI sense data if 178325985edcSLucas De Marchi * available. 1784c6fd2807SJeff Garzik * 1785c6fd2807SJeff Garzik * LOCKING: 1786c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1787c6fd2807SJeff Garzik * 1788c6fd2807SJeff Garzik * RETURNS: 1789c6fd2807SJeff Garzik * Determined recovery action 1790c6fd2807SJeff Garzik */ 1791c6fd2807SJeff Garzik static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, 1792c6fd2807SJeff Garzik const struct ata_taskfile *tf) 1793c6fd2807SJeff Garzik { 1794c6fd2807SJeff Garzik unsigned int tmp, action = 0; 1795c6fd2807SJeff Garzik u8 stat = tf->command, err = tf->feature; 1796c6fd2807SJeff Garzik 1797c6fd2807SJeff Garzik if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) { 1798c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_HSM; 1799cf480626STejun Heo return ATA_EH_RESET; 1800c6fd2807SJeff Garzik } 1801c6fd2807SJeff Garzik 1802a51d644aSTejun Heo if (stat & (ATA_ERR | ATA_DF)) 1803a51d644aSTejun Heo qc->err_mask |= AC_ERR_DEV; 1804a51d644aSTejun Heo else 1805c6fd2807SJeff Garzik return 0; 1806c6fd2807SJeff Garzik 1807c6fd2807SJeff Garzik switch (qc->dev->class) { 1808c6fd2807SJeff Garzik case ATA_DEV_ATA: 1809c6fd2807SJeff Garzik if (err & ATA_ICRC) 1810c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_ATA_BUS; 1811c6fd2807SJeff Garzik if (err & ATA_UNC) 1812c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_MEDIA; 1813c6fd2807SJeff Garzik if (err & ATA_IDNF) 1814c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_INVALID; 1815c6fd2807SJeff Garzik break; 1816c6fd2807SJeff Garzik 1817c6fd2807SJeff Garzik case ATA_DEV_ATAPI: 1818a569a30dSTejun Heo if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) { 18193eabddb8STejun Heo tmp = atapi_eh_request_sense(qc->dev, 18203eabddb8STejun Heo qc->scsicmd->sense_buffer, 18213eabddb8STejun Heo qc->result_tf.feature >> 4); 1822c6fd2807SJeff Garzik if (!tmp) { 1823a569a30dSTejun Heo /* ATA_QCFLAG_SENSE_VALID is used to 1824a569a30dSTejun Heo * tell atapi_qc_complete() that sense 1825a569a30dSTejun Heo * data is already valid. 1826c6fd2807SJeff Garzik * 1827c6fd2807SJeff Garzik * TODO: interpret sense data and set 1828c6fd2807SJeff Garzik * appropriate err_mask. 1829c6fd2807SJeff Garzik */ 1830c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_SENSE_VALID; 1831c6fd2807SJeff Garzik } else 1832c6fd2807SJeff Garzik qc->err_mask |= tmp; 1833c6fd2807SJeff Garzik } 1834a569a30dSTejun Heo } 1835c6fd2807SJeff Garzik 1836c6fd2807SJeff Garzik if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS)) 1837cf480626STejun Heo action |= ATA_EH_RESET; 1838c6fd2807SJeff Garzik 1839c6fd2807SJeff Garzik return action; 1840c6fd2807SJeff Garzik } 1841c6fd2807SJeff Garzik 184276326ac1STejun Heo static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask, 184376326ac1STejun Heo int *xfer_ok) 1844c6fd2807SJeff Garzik { 184576326ac1STejun Heo int base = 0; 184676326ac1STejun Heo 184776326ac1STejun Heo if (!(eflags & ATA_EFLAG_DUBIOUS_XFER)) 184876326ac1STejun Heo *xfer_ok = 1; 184976326ac1STejun Heo 185076326ac1STejun Heo if (!*xfer_ok) 185175f9cafcSTejun Heo base = ATA_ECAT_DUBIOUS_NONE; 185276326ac1STejun Heo 18537d47e8d4STejun Heo if (err_mask & AC_ERR_ATA_BUS) 185476326ac1STejun Heo return base + ATA_ECAT_ATA_BUS; 1855c6fd2807SJeff Garzik 18567d47e8d4STejun Heo if (err_mask & AC_ERR_TIMEOUT) 185776326ac1STejun Heo return base + ATA_ECAT_TOUT_HSM; 18587d47e8d4STejun Heo 18593884f7b0STejun Heo if (eflags & ATA_EFLAG_IS_IO) { 18607d47e8d4STejun Heo if (err_mask & AC_ERR_HSM) 186176326ac1STejun Heo return base + ATA_ECAT_TOUT_HSM; 18627d47e8d4STejun Heo if ((err_mask & 18637d47e8d4STejun Heo (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV) 186476326ac1STejun Heo return base + ATA_ECAT_UNK_DEV; 1865c6fd2807SJeff Garzik } 1866c6fd2807SJeff Garzik 1867c6fd2807SJeff Garzik return 0; 1868c6fd2807SJeff Garzik } 1869c6fd2807SJeff Garzik 18707d47e8d4STejun Heo struct speed_down_verdict_arg { 1871c6fd2807SJeff Garzik u64 since; 187276326ac1STejun Heo int xfer_ok; 18733884f7b0STejun Heo int nr_errors[ATA_ECAT_NR]; 1874c6fd2807SJeff Garzik }; 1875c6fd2807SJeff Garzik 18767d47e8d4STejun Heo static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg) 1877c6fd2807SJeff Garzik { 18787d47e8d4STejun Heo struct speed_down_verdict_arg *arg = void_arg; 187976326ac1STejun Heo int cat; 1880c6fd2807SJeff Garzik 1881d9027470SGwendal Grignou if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since)) 1882c6fd2807SJeff Garzik return -1; 1883c6fd2807SJeff Garzik 188476326ac1STejun Heo cat = ata_eh_categorize_error(ent->eflags, ent->err_mask, 188576326ac1STejun Heo &arg->xfer_ok); 18867d47e8d4STejun Heo arg->nr_errors[cat]++; 188776326ac1STejun Heo 1888c6fd2807SJeff Garzik return 0; 1889c6fd2807SJeff Garzik } 1890c6fd2807SJeff Garzik 1891c6fd2807SJeff Garzik /** 18927d47e8d4STejun Heo * ata_eh_speed_down_verdict - Determine speed down verdict 1893c6fd2807SJeff Garzik * @dev: Device of interest 1894c6fd2807SJeff Garzik * 1895c6fd2807SJeff Garzik * This function examines error ring of @dev and determines 18967d47e8d4STejun Heo * whether NCQ needs to be turned off, transfer speed should be 18977d47e8d4STejun Heo * stepped down, or falling back to PIO is necessary. 1898c6fd2807SJeff Garzik * 18993884f7b0STejun Heo * ECAT_ATA_BUS : ATA_BUS error for any command 1900c6fd2807SJeff Garzik * 19013884f7b0STejun Heo * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for 19023884f7b0STejun Heo * IO commands 19037d47e8d4STejun Heo * 19043884f7b0STejun Heo * ECAT_UNK_DEV : Unknown DEV error for IO commands 1905c6fd2807SJeff Garzik * 190676326ac1STejun Heo * ECAT_DUBIOUS_* : Identical to above three but occurred while 190776326ac1STejun Heo * data transfer hasn't been verified. 190876326ac1STejun Heo * 19093884f7b0STejun Heo * Verdicts are 19107d47e8d4STejun Heo * 19113884f7b0STejun Heo * NCQ_OFF : Turn off NCQ. 19127d47e8d4STejun Heo * 19133884f7b0STejun Heo * SPEED_DOWN : Speed down transfer speed but don't fall back 19143884f7b0STejun Heo * to PIO. 19153884f7b0STejun Heo * 19163884f7b0STejun Heo * FALLBACK_TO_PIO : Fall back to PIO. 19173884f7b0STejun Heo * 19183884f7b0STejun Heo * Even if multiple verdicts are returned, only one action is 191976326ac1STejun Heo * taken per error. An action triggered by non-DUBIOUS errors 192076326ac1STejun Heo * clears ering, while one triggered by DUBIOUS_* errors doesn't. 192176326ac1STejun Heo * This is to expedite speed down decisions right after device is 192276326ac1STejun Heo * initially configured. 19233884f7b0STejun Heo * 192476326ac1STejun Heo * The followings are speed down rules. #1 and #2 deal with 192576326ac1STejun Heo * DUBIOUS errors. 192676326ac1STejun Heo * 192776326ac1STejun Heo * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors 192876326ac1STejun Heo * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO. 192976326ac1STejun Heo * 193076326ac1STejun Heo * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors 193176326ac1STejun Heo * occurred during last 5 mins, NCQ_OFF. 193276326ac1STejun Heo * 193376326ac1STejun Heo * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors 193425985edcSLucas De Marchi * occurred during last 5 mins, FALLBACK_TO_PIO 19353884f7b0STejun Heo * 193676326ac1STejun Heo * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred 19373884f7b0STejun Heo * during last 10 mins, NCQ_OFF. 19383884f7b0STejun Heo * 193976326ac1STejun Heo * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6 19403884f7b0STejun Heo * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN. 19417d47e8d4STejun Heo * 1942c6fd2807SJeff Garzik * LOCKING: 1943c6fd2807SJeff Garzik * Inherited from caller. 1944c6fd2807SJeff Garzik * 1945c6fd2807SJeff Garzik * RETURNS: 19467d47e8d4STejun Heo * OR of ATA_EH_SPDN_* flags. 1947c6fd2807SJeff Garzik */ 19487d47e8d4STejun Heo static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev) 1949c6fd2807SJeff Garzik { 19507d47e8d4STejun Heo const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ; 19517d47e8d4STejun Heo u64 j64 = get_jiffies_64(); 19527d47e8d4STejun Heo struct speed_down_verdict_arg arg; 19537d47e8d4STejun Heo unsigned int verdict = 0; 1954c6fd2807SJeff Garzik 19553884f7b0STejun Heo /* scan past 5 mins of error history */ 19563884f7b0STejun Heo memset(&arg, 0, sizeof(arg)); 19573884f7b0STejun Heo arg.since = j64 - min(j64, j5mins); 19583884f7b0STejun Heo ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); 19593884f7b0STejun Heo 196076326ac1STejun Heo if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] + 196176326ac1STejun Heo arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1) 196276326ac1STejun Heo verdict |= ATA_EH_SPDN_SPEED_DOWN | 196376326ac1STejun Heo ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS; 196476326ac1STejun Heo 196576326ac1STejun Heo if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] + 196676326ac1STejun Heo arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1) 196776326ac1STejun Heo verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS; 196876326ac1STejun Heo 19693884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_ATA_BUS] + 19703884f7b0STejun Heo arg.nr_errors[ATA_ECAT_TOUT_HSM] + 1971663f99b8STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) 19723884f7b0STejun Heo verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO; 19733884f7b0STejun Heo 19747d47e8d4STejun Heo /* scan past 10 mins of error history */ 1975c6fd2807SJeff Garzik memset(&arg, 0, sizeof(arg)); 19767d47e8d4STejun Heo arg.since = j64 - min(j64, j10mins); 19777d47e8d4STejun Heo ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); 1978c6fd2807SJeff Garzik 19793884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_TOUT_HSM] + 19803884f7b0STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 3) 19817d47e8d4STejun Heo verdict |= ATA_EH_SPDN_NCQ_OFF; 19823884f7b0STejun Heo 19833884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_ATA_BUS] + 19843884f7b0STejun Heo arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 || 1985663f99b8STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) 19867d47e8d4STejun Heo verdict |= ATA_EH_SPDN_SPEED_DOWN; 1987c6fd2807SJeff Garzik 19887d47e8d4STejun Heo return verdict; 1989c6fd2807SJeff Garzik } 1990c6fd2807SJeff Garzik 1991c6fd2807SJeff Garzik /** 1992c6fd2807SJeff Garzik * ata_eh_speed_down - record error and speed down if necessary 1993c6fd2807SJeff Garzik * @dev: Failed device 19943884f7b0STejun Heo * @eflags: mask of ATA_EFLAG_* flags 1995c6fd2807SJeff Garzik * @err_mask: err_mask of the error 1996c6fd2807SJeff Garzik * 1997c6fd2807SJeff Garzik * Record error and examine error history to determine whether 1998c6fd2807SJeff Garzik * adjusting transmission speed is necessary. It also sets 1999c6fd2807SJeff Garzik * transmission limits appropriately if such adjustment is 2000c6fd2807SJeff Garzik * necessary. 2001c6fd2807SJeff Garzik * 2002c6fd2807SJeff Garzik * LOCKING: 2003c6fd2807SJeff Garzik * Kernel thread context (may sleep). 2004c6fd2807SJeff Garzik * 2005c6fd2807SJeff Garzik * RETURNS: 20067d47e8d4STejun Heo * Determined recovery action. 2007c6fd2807SJeff Garzik */ 20083884f7b0STejun Heo static unsigned int ata_eh_speed_down(struct ata_device *dev, 20093884f7b0STejun Heo unsigned int eflags, unsigned int err_mask) 2010c6fd2807SJeff Garzik { 2011b1c72916STejun Heo struct ata_link *link = ata_dev_phys_link(dev); 201276326ac1STejun Heo int xfer_ok = 0; 20137d47e8d4STejun Heo unsigned int verdict; 20147d47e8d4STejun Heo unsigned int action = 0; 20157d47e8d4STejun Heo 20167d47e8d4STejun Heo /* don't bother if Cat-0 error */ 201776326ac1STejun Heo if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0) 2018c6fd2807SJeff Garzik return 0; 2019c6fd2807SJeff Garzik 2020c6fd2807SJeff Garzik /* record error and determine whether speed down is necessary */ 20213884f7b0STejun Heo ata_ering_record(&dev->ering, eflags, err_mask); 20227d47e8d4STejun Heo verdict = ata_eh_speed_down_verdict(dev); 2023c6fd2807SJeff Garzik 20247d47e8d4STejun Heo /* turn off NCQ? */ 20257d47e8d4STejun Heo if ((verdict & ATA_EH_SPDN_NCQ_OFF) && 20267d47e8d4STejun Heo (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ | 20277d47e8d4STejun Heo ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) { 20287d47e8d4STejun Heo dev->flags |= ATA_DFLAG_NCQ_OFF; 2029a9a79dfeSJoe Perches ata_dev_warn(dev, "NCQ disabled due to excessive errors\n"); 20307d47e8d4STejun Heo goto done; 20317d47e8d4STejun Heo } 2032c6fd2807SJeff Garzik 20337d47e8d4STejun Heo /* speed down? */ 20347d47e8d4STejun Heo if (verdict & ATA_EH_SPDN_SPEED_DOWN) { 2035c6fd2807SJeff Garzik /* speed down SATA link speed if possible */ 2036a07d499bSTejun Heo if (sata_down_spd_limit(link, 0) == 0) { 2037cf480626STejun Heo action |= ATA_EH_RESET; 20387d47e8d4STejun Heo goto done; 20397d47e8d4STejun Heo } 2040c6fd2807SJeff Garzik 2041c6fd2807SJeff Garzik /* lower transfer mode */ 20427d47e8d4STejun Heo if (dev->spdn_cnt < 2) { 20437d47e8d4STejun Heo static const int dma_dnxfer_sel[] = 20447d47e8d4STejun Heo { ATA_DNXFER_DMA, ATA_DNXFER_40C }; 20457d47e8d4STejun Heo static const int pio_dnxfer_sel[] = 20467d47e8d4STejun Heo { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 }; 20477d47e8d4STejun Heo int sel; 2048c6fd2807SJeff Garzik 20497d47e8d4STejun Heo if (dev->xfer_shift != ATA_SHIFT_PIO) 20507d47e8d4STejun Heo sel = dma_dnxfer_sel[dev->spdn_cnt]; 20517d47e8d4STejun Heo else 20527d47e8d4STejun Heo sel = pio_dnxfer_sel[dev->spdn_cnt]; 20537d47e8d4STejun Heo 20547d47e8d4STejun Heo dev->spdn_cnt++; 20557d47e8d4STejun Heo 20567d47e8d4STejun Heo if (ata_down_xfermask_limit(dev, sel) == 0) { 2057cf480626STejun Heo action |= ATA_EH_RESET; 20587d47e8d4STejun Heo goto done; 20597d47e8d4STejun Heo } 20607d47e8d4STejun Heo } 20617d47e8d4STejun Heo } 20627d47e8d4STejun Heo 20637d47e8d4STejun Heo /* Fall back to PIO? Slowing down to PIO is meaningless for 2064663f99b8STejun Heo * SATA ATA devices. Consider it only for PATA and SATAPI. 20657d47e8d4STejun Heo */ 20667d47e8d4STejun Heo if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) && 2067663f99b8STejun Heo (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) && 20687d47e8d4STejun Heo (dev->xfer_shift != ATA_SHIFT_PIO)) { 20697d47e8d4STejun Heo if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) { 20707d47e8d4STejun Heo dev->spdn_cnt = 0; 2071cf480626STejun Heo action |= ATA_EH_RESET; 20727d47e8d4STejun Heo goto done; 20737d47e8d4STejun Heo } 20747d47e8d4STejun Heo } 20757d47e8d4STejun Heo 2076c6fd2807SJeff Garzik return 0; 20777d47e8d4STejun Heo done: 20787d47e8d4STejun Heo /* device has been slowed down, blow error history */ 207976326ac1STejun Heo if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS)) 20807d47e8d4STejun Heo ata_ering_clear(&dev->ering); 20817d47e8d4STejun Heo return action; 2082c6fd2807SJeff Garzik } 2083c6fd2807SJeff Garzik 2084c6fd2807SJeff Garzik /** 20858d899e70SMark Lord * ata_eh_worth_retry - analyze error and decide whether to retry 20868d899e70SMark Lord * @qc: qc to possibly retry 20878d899e70SMark Lord * 20888d899e70SMark Lord * Look at the cause of the error and decide if a retry 20898d899e70SMark Lord * might be useful or not. We don't want to retry media errors 20908d899e70SMark Lord * because the drive itself has probably already taken 10-30 seconds 20918d899e70SMark Lord * doing its own internal retries before reporting the failure. 20928d899e70SMark Lord */ 20938d899e70SMark Lord static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc) 20948d899e70SMark Lord { 20958d899e70SMark Lord if (qc->flags & AC_ERR_MEDIA) 20968d899e70SMark Lord return 0; /* don't retry media errors */ 20978d899e70SMark Lord if (qc->flags & ATA_QCFLAG_IO) 20988d899e70SMark Lord return 1; /* otherwise retry anything from fs stack */ 20998d899e70SMark Lord if (qc->err_mask & AC_ERR_INVALID) 21008d899e70SMark Lord return 0; /* don't retry these */ 21018d899e70SMark Lord return qc->err_mask != AC_ERR_DEV; /* retry if not dev error */ 21028d899e70SMark Lord } 21038d899e70SMark Lord 21048d899e70SMark Lord /** 21059b1e2658STejun Heo * ata_eh_link_autopsy - analyze error and determine recovery action 21069b1e2658STejun Heo * @link: host link to perform autopsy on 2107c6fd2807SJeff Garzik * 21080260731fSTejun Heo * Analyze why @link failed and determine which recovery actions 21090260731fSTejun Heo * are needed. This function also sets more detailed AC_ERR_* 21100260731fSTejun Heo * values and fills sense data for ATAPI CHECK SENSE. 2111c6fd2807SJeff Garzik * 2112c6fd2807SJeff Garzik * LOCKING: 2113c6fd2807SJeff Garzik * Kernel thread context (may sleep). 2114c6fd2807SJeff Garzik */ 21159b1e2658STejun Heo static void ata_eh_link_autopsy(struct ata_link *link) 2116c6fd2807SJeff Garzik { 21170260731fSTejun Heo struct ata_port *ap = link->ap; 2118936fd732STejun Heo struct ata_eh_context *ehc = &link->eh_context; 2119dfcc173dSTejun Heo struct ata_device *dev; 21203884f7b0STejun Heo unsigned int all_err_mask = 0, eflags = 0; 21213884f7b0STejun Heo int tag; 2122c6fd2807SJeff Garzik u32 serror; 2123c6fd2807SJeff Garzik int rc; 2124c6fd2807SJeff Garzik 2125c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 2126c6fd2807SJeff Garzik 2127c6fd2807SJeff Garzik if (ehc->i.flags & ATA_EHI_NO_AUTOPSY) 2128c6fd2807SJeff Garzik return; 2129c6fd2807SJeff Garzik 2130c6fd2807SJeff Garzik /* obtain and analyze SError */ 2131936fd732STejun Heo rc = sata_scr_read(link, SCR_ERROR, &serror); 2132c6fd2807SJeff Garzik if (rc == 0) { 2133c6fd2807SJeff Garzik ehc->i.serror |= serror; 21340260731fSTejun Heo ata_eh_analyze_serror(link); 21354e57c517STejun Heo } else if (rc != -EOPNOTSUPP) { 2136cf480626STejun Heo /* SError read failed, force reset and probing */ 2137b558edddSTejun Heo ehc->i.probe_mask |= ATA_ALL_DEVICES; 2138cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 21394e57c517STejun Heo ehc->i.err_mask |= AC_ERR_OTHER; 21404e57c517STejun Heo } 2141c6fd2807SJeff Garzik 2142c6fd2807SJeff Garzik /* analyze NCQ failure */ 21430260731fSTejun Heo ata_eh_analyze_ncq_error(link); 2144c6fd2807SJeff Garzik 2145c6fd2807SJeff Garzik /* any real error trumps AC_ERR_OTHER */ 2146c6fd2807SJeff Garzik if (ehc->i.err_mask & ~AC_ERR_OTHER) 2147c6fd2807SJeff Garzik ehc->i.err_mask &= ~AC_ERR_OTHER; 2148c6fd2807SJeff Garzik 2149c6fd2807SJeff Garzik all_err_mask |= ehc->i.err_mask; 2150c6fd2807SJeff Garzik 2151c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2152c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 2153c6fd2807SJeff Garzik 2154b1c72916STejun Heo if (!(qc->flags & ATA_QCFLAG_FAILED) || 2155b1c72916STejun Heo ata_dev_phys_link(qc->dev) != link) 2156c6fd2807SJeff Garzik continue; 2157c6fd2807SJeff Garzik 2158c6fd2807SJeff Garzik /* inherit upper level err_mask */ 2159c6fd2807SJeff Garzik qc->err_mask |= ehc->i.err_mask; 2160c6fd2807SJeff Garzik 2161c6fd2807SJeff Garzik /* analyze TF */ 2162c6fd2807SJeff Garzik ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf); 2163c6fd2807SJeff Garzik 2164c6fd2807SJeff Garzik /* DEV errors are probably spurious in case of ATA_BUS error */ 2165c6fd2807SJeff Garzik if (qc->err_mask & AC_ERR_ATA_BUS) 2166c6fd2807SJeff Garzik qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA | 2167c6fd2807SJeff Garzik AC_ERR_INVALID); 2168c6fd2807SJeff Garzik 2169c6fd2807SJeff Garzik /* any real error trumps unknown error */ 2170c6fd2807SJeff Garzik if (qc->err_mask & ~AC_ERR_OTHER) 2171c6fd2807SJeff Garzik qc->err_mask &= ~AC_ERR_OTHER; 2172c6fd2807SJeff Garzik 2173c6fd2807SJeff Garzik /* SENSE_VALID trumps dev/unknown error and revalidation */ 2174f90f0828STejun Heo if (qc->flags & ATA_QCFLAG_SENSE_VALID) 2175c6fd2807SJeff Garzik qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); 2176c6fd2807SJeff Garzik 217703faab78STejun Heo /* determine whether the command is worth retrying */ 21788d899e70SMark Lord if (ata_eh_worth_retry(qc)) 217903faab78STejun Heo qc->flags |= ATA_QCFLAG_RETRY; 218003faab78STejun Heo 2181c6fd2807SJeff Garzik /* accumulate error info */ 2182c6fd2807SJeff Garzik ehc->i.dev = qc->dev; 2183c6fd2807SJeff Garzik all_err_mask |= qc->err_mask; 2184c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_IO) 21853884f7b0STejun Heo eflags |= ATA_EFLAG_IS_IO; 2186c6fd2807SJeff Garzik } 2187c6fd2807SJeff Garzik 2188c6fd2807SJeff Garzik /* enforce default EH actions */ 2189c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN || 2190c6fd2807SJeff Garzik all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT)) 2191cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 21923884f7b0STejun Heo else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) || 21933884f7b0STejun Heo (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV))) 2194c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_REVALIDATE; 2195c6fd2807SJeff Garzik 2196dfcc173dSTejun Heo /* If we have offending qcs and the associated failed device, 2197dfcc173dSTejun Heo * perform per-dev EH action only on the offending device. 2198dfcc173dSTejun Heo */ 2199c6fd2807SJeff Garzik if (ehc->i.dev) { 2200c6fd2807SJeff Garzik ehc->i.dev_action[ehc->i.dev->devno] |= 2201c6fd2807SJeff Garzik ehc->i.action & ATA_EH_PERDEV_MASK; 2202c6fd2807SJeff Garzik ehc->i.action &= ~ATA_EH_PERDEV_MASK; 2203c6fd2807SJeff Garzik } 2204c6fd2807SJeff Garzik 22052695e366STejun Heo /* propagate timeout to host link */ 22062695e366STejun Heo if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link)) 22072695e366STejun Heo ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT; 22082695e366STejun Heo 22092695e366STejun Heo /* record error and consider speeding down */ 2210dfcc173dSTejun Heo dev = ehc->i.dev; 22112695e366STejun Heo if (!dev && ((ata_link_max_devices(link) == 1 && 22122695e366STejun Heo ata_dev_enabled(link->device)))) 2213dfcc173dSTejun Heo dev = link->device; 2214dfcc173dSTejun Heo 221576326ac1STejun Heo if (dev) { 221676326ac1STejun Heo if (dev->flags & ATA_DFLAG_DUBIOUS_XFER) 221776326ac1STejun Heo eflags |= ATA_EFLAG_DUBIOUS_XFER; 22183884f7b0STejun Heo ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask); 221976326ac1STejun Heo } 2220dfcc173dSTejun Heo 2221c6fd2807SJeff Garzik DPRINTK("EXIT\n"); 2222c6fd2807SJeff Garzik } 2223c6fd2807SJeff Garzik 2224c6fd2807SJeff Garzik /** 22259b1e2658STejun Heo * ata_eh_autopsy - analyze error and determine recovery action 22269b1e2658STejun Heo * @ap: host port to perform autopsy on 22279b1e2658STejun Heo * 22289b1e2658STejun Heo * Analyze all links of @ap and determine why they failed and 22299b1e2658STejun Heo * which recovery actions are needed. 22309b1e2658STejun Heo * 22319b1e2658STejun Heo * LOCKING: 22329b1e2658STejun Heo * Kernel thread context (may sleep). 22339b1e2658STejun Heo */ 2234fb7fd614STejun Heo void ata_eh_autopsy(struct ata_port *ap) 22359b1e2658STejun Heo { 22369b1e2658STejun Heo struct ata_link *link; 22379b1e2658STejun Heo 22381eca4365STejun Heo ata_for_each_link(link, ap, EDGE) 22399b1e2658STejun Heo ata_eh_link_autopsy(link); 22402695e366STejun Heo 2241b1c72916STejun Heo /* Handle the frigging slave link. Autopsy is done similarly 2242b1c72916STejun Heo * but actions and flags are transferred over to the master 2243b1c72916STejun Heo * link and handled from there. 2244b1c72916STejun Heo */ 2245b1c72916STejun Heo if (ap->slave_link) { 2246b1c72916STejun Heo struct ata_eh_context *mehc = &ap->link.eh_context; 2247b1c72916STejun Heo struct ata_eh_context *sehc = &ap->slave_link->eh_context; 2248b1c72916STejun Heo 2249848e4c68STejun Heo /* transfer control flags from master to slave */ 2250848e4c68STejun Heo sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK; 2251848e4c68STejun Heo 2252848e4c68STejun Heo /* perform autopsy on the slave link */ 2253b1c72916STejun Heo ata_eh_link_autopsy(ap->slave_link); 2254b1c72916STejun Heo 2255848e4c68STejun Heo /* transfer actions from slave to master and clear slave */ 2256b1c72916STejun Heo ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); 2257b1c72916STejun Heo mehc->i.action |= sehc->i.action; 2258b1c72916STejun Heo mehc->i.dev_action[1] |= sehc->i.dev_action[1]; 2259b1c72916STejun Heo mehc->i.flags |= sehc->i.flags; 2260b1c72916STejun Heo ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); 2261b1c72916STejun Heo } 2262b1c72916STejun Heo 22632695e366STejun Heo /* Autopsy of fanout ports can affect host link autopsy. 22642695e366STejun Heo * Perform host link autopsy last. 22652695e366STejun Heo */ 2266071f44b1STejun Heo if (sata_pmp_attached(ap)) 22672695e366STejun Heo ata_eh_link_autopsy(&ap->link); 22689b1e2658STejun Heo } 22699b1e2658STejun Heo 22709b1e2658STejun Heo /** 22716521148cSRobert Hancock * ata_get_cmd_descript - get description for ATA command 22726521148cSRobert Hancock * @command: ATA command code to get description for 22736521148cSRobert Hancock * 22746521148cSRobert Hancock * Return a textual description of the given command, or NULL if the 22756521148cSRobert Hancock * command is not known. 22766521148cSRobert Hancock * 22776521148cSRobert Hancock * LOCKING: 22786521148cSRobert Hancock * None 22796521148cSRobert Hancock */ 22806521148cSRobert Hancock const char *ata_get_cmd_descript(u8 command) 22816521148cSRobert Hancock { 22826521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR 22836521148cSRobert Hancock static const struct 22846521148cSRobert Hancock { 22856521148cSRobert Hancock u8 command; 22866521148cSRobert Hancock const char *text; 22876521148cSRobert Hancock } cmd_descr[] = { 22886521148cSRobert Hancock { ATA_CMD_DEV_RESET, "DEVICE RESET" }, 22896521148cSRobert Hancock { ATA_CMD_CHK_POWER, "CHECK POWER MODE" }, 22906521148cSRobert Hancock { ATA_CMD_STANDBY, "STANDBY" }, 22916521148cSRobert Hancock { ATA_CMD_IDLE, "IDLE" }, 22926521148cSRobert Hancock { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" }, 22936521148cSRobert Hancock { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" }, 22946521148cSRobert Hancock { ATA_CMD_NOP, "NOP" }, 22956521148cSRobert Hancock { ATA_CMD_FLUSH, "FLUSH CACHE" }, 22966521148cSRobert Hancock { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" }, 22976521148cSRobert Hancock { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" }, 22986521148cSRobert Hancock { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" }, 22996521148cSRobert Hancock { ATA_CMD_SERVICE, "SERVICE" }, 23006521148cSRobert Hancock { ATA_CMD_READ, "READ DMA" }, 23016521148cSRobert Hancock { ATA_CMD_READ_EXT, "READ DMA EXT" }, 23026521148cSRobert Hancock { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" }, 23036521148cSRobert Hancock { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" }, 23046521148cSRobert Hancock { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" }, 23056521148cSRobert Hancock { ATA_CMD_WRITE, "WRITE DMA" }, 23066521148cSRobert Hancock { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" }, 23076521148cSRobert Hancock { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" }, 23086521148cSRobert Hancock { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" }, 23096521148cSRobert Hancock { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" }, 23106521148cSRobert Hancock { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" }, 23116521148cSRobert Hancock { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" }, 23126521148cSRobert Hancock { ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" }, 23136521148cSRobert Hancock { ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" }, 23146521148cSRobert Hancock { ATA_CMD_PIO_READ, "READ SECTOR(S)" }, 23156521148cSRobert Hancock { ATA_CMD_PIO_READ_EXT, "READ SECTOR(S) EXT" }, 23166521148cSRobert Hancock { ATA_CMD_PIO_WRITE, "WRITE SECTOR(S)" }, 23176521148cSRobert Hancock { ATA_CMD_PIO_WRITE_EXT, "WRITE SECTOR(S) EXT" }, 23186521148cSRobert Hancock { ATA_CMD_READ_MULTI, "READ MULTIPLE" }, 23196521148cSRobert Hancock { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" }, 23206521148cSRobert Hancock { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" }, 23216521148cSRobert Hancock { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" }, 23226521148cSRobert Hancock { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" }, 23236521148cSRobert Hancock { ATA_CMD_SET_FEATURES, "SET FEATURES" }, 23246521148cSRobert Hancock { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" }, 23256521148cSRobert Hancock { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" }, 23266521148cSRobert Hancock { ATA_CMD_VERIFY_EXT, "READ VERIFY SECTOR(S) EXT" }, 23276521148cSRobert Hancock { ATA_CMD_WRITE_UNCORR_EXT, "WRITE UNCORRECTABLE EXT" }, 23286521148cSRobert Hancock { ATA_CMD_STANDBYNOW1, "STANDBY IMMEDIATE" }, 23296521148cSRobert Hancock { ATA_CMD_IDLEIMMEDIATE, "IDLE IMMEDIATE" }, 23306521148cSRobert Hancock { ATA_CMD_SLEEP, "SLEEP" }, 23316521148cSRobert Hancock { ATA_CMD_INIT_DEV_PARAMS, "INITIALIZE DEVICE PARAMETERS" }, 23326521148cSRobert Hancock { ATA_CMD_READ_NATIVE_MAX, "READ NATIVE MAX ADDRESS" }, 23336521148cSRobert Hancock { ATA_CMD_READ_NATIVE_MAX_EXT, "READ NATIVE MAX ADDRESS EXT" }, 23346521148cSRobert Hancock { ATA_CMD_SET_MAX, "SET MAX ADDRESS" }, 23356521148cSRobert Hancock { ATA_CMD_SET_MAX_EXT, "SET MAX ADDRESS EXT" }, 23366521148cSRobert Hancock { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" }, 23376521148cSRobert Hancock { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" }, 23386521148cSRobert Hancock { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" }, 23396521148cSRobert Hancock { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" }, 23406521148cSRobert Hancock { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" }, 23416521148cSRobert Hancock { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" }, 23426521148cSRobert Hancock { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" }, 23436521148cSRobert Hancock { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" }, 23446521148cSRobert Hancock { ATA_CMD_PMP_READ, "READ BUFFER" }, 23456521148cSRobert Hancock { ATA_CMD_PMP_WRITE, "WRITE BUFFER" }, 23466521148cSRobert Hancock { ATA_CMD_CONF_OVERLAY, "DEVICE CONFIGURATION OVERLAY" }, 23476521148cSRobert Hancock { ATA_CMD_SEC_SET_PASS, "SECURITY SET PASSWORD" }, 23486521148cSRobert Hancock { ATA_CMD_SEC_UNLOCK, "SECURITY UNLOCK" }, 23496521148cSRobert Hancock { ATA_CMD_SEC_ERASE_PREP, "SECURITY ERASE PREPARE" }, 23506521148cSRobert Hancock { ATA_CMD_SEC_ERASE_UNIT, "SECURITY ERASE UNIT" }, 23516521148cSRobert Hancock { ATA_CMD_SEC_FREEZE_LOCK, "SECURITY FREEZE LOCK" }, 23526521148cSRobert Hancock { ATA_CMD_SEC_DISABLE_PASS, "SECURITY DISABLE PASSWORD" }, 23536521148cSRobert Hancock { ATA_CMD_CONFIG_STREAM, "CONFIGURE STREAM" }, 23546521148cSRobert Hancock { ATA_CMD_SMART, "SMART" }, 23556521148cSRobert Hancock { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" }, 23566521148cSRobert Hancock { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" }, 2357acad7627SFUJITA Tomonori { ATA_CMD_DSM, "DATA SET MANAGEMENT" }, 23586521148cSRobert Hancock { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" }, 23596521148cSRobert Hancock { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" }, 23606521148cSRobert Hancock { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" }, 23616521148cSRobert Hancock { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" }, 23626521148cSRobert Hancock { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" }, 23636521148cSRobert Hancock { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" }, 23646521148cSRobert Hancock { ATA_CMD_READ_LONG, "READ LONG (with retries)" }, 23656521148cSRobert Hancock { ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" }, 23666521148cSRobert Hancock { ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" }, 23676521148cSRobert Hancock { ATA_CMD_WRITE_LONG_ONCE, "WRITE LONG (without retries)" }, 23686521148cSRobert Hancock { ATA_CMD_RESTORE, "RECALIBRATE" }, 23696521148cSRobert Hancock { 0, NULL } /* terminate list */ 23706521148cSRobert Hancock }; 23716521148cSRobert Hancock 23726521148cSRobert Hancock unsigned int i; 23736521148cSRobert Hancock for (i = 0; cmd_descr[i].text; i++) 23746521148cSRobert Hancock if (cmd_descr[i].command == command) 23756521148cSRobert Hancock return cmd_descr[i].text; 23766521148cSRobert Hancock #endif 23776521148cSRobert Hancock 23786521148cSRobert Hancock return NULL; 23796521148cSRobert Hancock } 23806521148cSRobert Hancock 23816521148cSRobert Hancock /** 23829b1e2658STejun Heo * ata_eh_link_report - report error handling to user 23830260731fSTejun Heo * @link: ATA link EH is going on 2384c6fd2807SJeff Garzik * 2385c6fd2807SJeff Garzik * Report EH to user. 2386c6fd2807SJeff Garzik * 2387c6fd2807SJeff Garzik * LOCKING: 2388c6fd2807SJeff Garzik * None. 2389c6fd2807SJeff Garzik */ 23909b1e2658STejun Heo static void ata_eh_link_report(struct ata_link *link) 2391c6fd2807SJeff Garzik { 23920260731fSTejun Heo struct ata_port *ap = link->ap; 23930260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 2394c6fd2807SJeff Garzik const char *frozen, *desc; 2395a1e10f7eSTejun Heo char tries_buf[6]; 2396c6fd2807SJeff Garzik int tag, nr_failed = 0; 2397c6fd2807SJeff Garzik 239894ff3d54STejun Heo if (ehc->i.flags & ATA_EHI_QUIET) 239994ff3d54STejun Heo return; 240094ff3d54STejun Heo 2401c6fd2807SJeff Garzik desc = NULL; 2402c6fd2807SJeff Garzik if (ehc->i.desc[0] != '\0') 2403c6fd2807SJeff Garzik desc = ehc->i.desc; 2404c6fd2807SJeff Garzik 2405c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2406c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 2407c6fd2807SJeff Garzik 2408b1c72916STejun Heo if (!(qc->flags & ATA_QCFLAG_FAILED) || 2409b1c72916STejun Heo ata_dev_phys_link(qc->dev) != link || 2410e027bd36STejun Heo ((qc->flags & ATA_QCFLAG_QUIET) && 2411e027bd36STejun Heo qc->err_mask == AC_ERR_DEV)) 2412c6fd2807SJeff Garzik continue; 2413c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask) 2414c6fd2807SJeff Garzik continue; 2415c6fd2807SJeff Garzik 2416c6fd2807SJeff Garzik nr_failed++; 2417c6fd2807SJeff Garzik } 2418c6fd2807SJeff Garzik 2419c6fd2807SJeff Garzik if (!nr_failed && !ehc->i.err_mask) 2420c6fd2807SJeff Garzik return; 2421c6fd2807SJeff Garzik 2422c6fd2807SJeff Garzik frozen = ""; 2423c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN) 2424c6fd2807SJeff Garzik frozen = " frozen"; 2425c6fd2807SJeff Garzik 2426a1e10f7eSTejun Heo memset(tries_buf, 0, sizeof(tries_buf)); 2427a1e10f7eSTejun Heo if (ap->eh_tries < ATA_EH_MAX_TRIES) 2428a1e10f7eSTejun Heo snprintf(tries_buf, sizeof(tries_buf) - 1, " t%d", 2429a1e10f7eSTejun Heo ap->eh_tries); 2430a1e10f7eSTejun Heo 2431c6fd2807SJeff Garzik if (ehc->i.dev) { 2432a9a79dfeSJoe Perches ata_dev_err(ehc->i.dev, "exception Emask 0x%x " 2433a1e10f7eSTejun Heo "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", 2434a1e10f7eSTejun Heo ehc->i.err_mask, link->sactive, ehc->i.serror, 2435a1e10f7eSTejun Heo ehc->i.action, frozen, tries_buf); 2436c6fd2807SJeff Garzik if (desc) 2437a9a79dfeSJoe Perches ata_dev_err(ehc->i.dev, "%s\n", desc); 2438c6fd2807SJeff Garzik } else { 2439a9a79dfeSJoe Perches ata_link_err(link, "exception Emask 0x%x " 2440a1e10f7eSTejun Heo "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", 2441a1e10f7eSTejun Heo ehc->i.err_mask, link->sactive, ehc->i.serror, 2442a1e10f7eSTejun Heo ehc->i.action, frozen, tries_buf); 2443c6fd2807SJeff Garzik if (desc) 2444a9a79dfeSJoe Perches ata_link_err(link, "%s\n", desc); 2445c6fd2807SJeff Garzik } 2446c6fd2807SJeff Garzik 24476521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR 24481333e194SRobert Hancock if (ehc->i.serror) 2449a9a79dfeSJoe Perches ata_link_err(link, 24501333e194SRobert Hancock "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n", 24511333e194SRobert Hancock ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "", 24521333e194SRobert Hancock ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "", 24531333e194SRobert Hancock ehc->i.serror & SERR_DATA ? "UnrecovData " : "", 24541333e194SRobert Hancock ehc->i.serror & SERR_PERSISTENT ? "Persist " : "", 24551333e194SRobert Hancock ehc->i.serror & SERR_PROTOCOL ? "Proto " : "", 24561333e194SRobert Hancock ehc->i.serror & SERR_INTERNAL ? "HostInt " : "", 24571333e194SRobert Hancock ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "", 24581333e194SRobert Hancock ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "", 24591333e194SRobert Hancock ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "", 24601333e194SRobert Hancock ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "", 24611333e194SRobert Hancock ehc->i.serror & SERR_DISPARITY ? "Dispar " : "", 24621333e194SRobert Hancock ehc->i.serror & SERR_CRC ? "BadCRC " : "", 24631333e194SRobert Hancock ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "", 24641333e194SRobert Hancock ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "", 24651333e194SRobert Hancock ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "", 24661333e194SRobert Hancock ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "", 24671333e194SRobert Hancock ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : ""); 24686521148cSRobert Hancock #endif 24691333e194SRobert Hancock 2470c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2471c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 24728a937581STejun Heo struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; 2473abb6a889STejun Heo const u8 *cdb = qc->cdb; 2474abb6a889STejun Heo char data_buf[20] = ""; 2475abb6a889STejun Heo char cdb_buf[70] = ""; 2476c6fd2807SJeff Garzik 24770260731fSTejun Heo if (!(qc->flags & ATA_QCFLAG_FAILED) || 2478b1c72916STejun Heo ata_dev_phys_link(qc->dev) != link || !qc->err_mask) 2479c6fd2807SJeff Garzik continue; 2480c6fd2807SJeff Garzik 2481abb6a889STejun Heo if (qc->dma_dir != DMA_NONE) { 2482abb6a889STejun Heo static const char *dma_str[] = { 2483abb6a889STejun Heo [DMA_BIDIRECTIONAL] = "bidi", 2484abb6a889STejun Heo [DMA_TO_DEVICE] = "out", 2485abb6a889STejun Heo [DMA_FROM_DEVICE] = "in", 2486abb6a889STejun Heo }; 2487abb6a889STejun Heo static const char *prot_str[] = { 2488abb6a889STejun Heo [ATA_PROT_PIO] = "pio", 2489abb6a889STejun Heo [ATA_PROT_DMA] = "dma", 2490abb6a889STejun Heo [ATA_PROT_NCQ] = "ncq", 24910dc36888STejun Heo [ATAPI_PROT_PIO] = "pio", 24920dc36888STejun Heo [ATAPI_PROT_DMA] = "dma", 2493abb6a889STejun Heo }; 2494abb6a889STejun Heo 2495abb6a889STejun Heo snprintf(data_buf, sizeof(data_buf), " %s %u %s", 2496abb6a889STejun Heo prot_str[qc->tf.protocol], qc->nbytes, 2497abb6a889STejun Heo dma_str[qc->dma_dir]); 2498abb6a889STejun Heo } 2499abb6a889STejun Heo 25006521148cSRobert Hancock if (ata_is_atapi(qc->tf.protocol)) { 25016521148cSRobert Hancock if (qc->scsicmd) 25026521148cSRobert Hancock scsi_print_command(qc->scsicmd); 25036521148cSRobert Hancock else 2504abb6a889STejun Heo snprintf(cdb_buf, sizeof(cdb_buf), 2505abb6a889STejun Heo "cdb %02x %02x %02x %02x %02x %02x %02x %02x " 2506abb6a889STejun Heo "%02x %02x %02x %02x %02x %02x %02x %02x\n ", 2507abb6a889STejun Heo cdb[0], cdb[1], cdb[2], cdb[3], 2508abb6a889STejun Heo cdb[4], cdb[5], cdb[6], cdb[7], 2509abb6a889STejun Heo cdb[8], cdb[9], cdb[10], cdb[11], 2510abb6a889STejun Heo cdb[12], cdb[13], cdb[14], cdb[15]); 25116521148cSRobert Hancock } else { 25126521148cSRobert Hancock const char *descr = ata_get_cmd_descript(cmd->command); 25136521148cSRobert Hancock if (descr) 2514a9a79dfeSJoe Perches ata_dev_err(qc->dev, "failed command: %s\n", 2515a9a79dfeSJoe Perches descr); 25166521148cSRobert Hancock } 2517abb6a889STejun Heo 2518a9a79dfeSJoe Perches ata_dev_err(qc->dev, 25198a937581STejun Heo "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " 2520abb6a889STejun Heo "tag %d%s\n %s" 25218a937581STejun Heo "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " 25225335b729STejun Heo "Emask 0x%x (%s)%s\n", 25238a937581STejun Heo cmd->command, cmd->feature, cmd->nsect, 25248a937581STejun Heo cmd->lbal, cmd->lbam, cmd->lbah, 25258a937581STejun Heo cmd->hob_feature, cmd->hob_nsect, 25268a937581STejun Heo cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah, 2527abb6a889STejun Heo cmd->device, qc->tag, data_buf, cdb_buf, 25288a937581STejun Heo res->command, res->feature, res->nsect, 25298a937581STejun Heo res->lbal, res->lbam, res->lbah, 25308a937581STejun Heo res->hob_feature, res->hob_nsect, 25318a937581STejun Heo res->hob_lbal, res->hob_lbam, res->hob_lbah, 25325335b729STejun Heo res->device, qc->err_mask, ata_err_string(qc->err_mask), 25335335b729STejun Heo qc->err_mask & AC_ERR_NCQ ? " <F>" : ""); 25341333e194SRobert Hancock 25356521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR 25361333e194SRobert Hancock if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | 25371333e194SRobert Hancock ATA_ERR)) { 25381333e194SRobert Hancock if (res->command & ATA_BUSY) 2539a9a79dfeSJoe Perches ata_dev_err(qc->dev, "status: { Busy }\n"); 25401333e194SRobert Hancock else 2541a9a79dfeSJoe Perches ata_dev_err(qc->dev, "status: { %s%s%s%s}\n", 25421333e194SRobert Hancock res->command & ATA_DRDY ? "DRDY " : "", 25431333e194SRobert Hancock res->command & ATA_DF ? "DF " : "", 25441333e194SRobert Hancock res->command & ATA_DRQ ? "DRQ " : "", 25451333e194SRobert Hancock res->command & ATA_ERR ? "ERR " : ""); 25461333e194SRobert Hancock } 25471333e194SRobert Hancock 25481333e194SRobert Hancock if (cmd->command != ATA_CMD_PACKET && 25491333e194SRobert Hancock (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF | 25501333e194SRobert Hancock ATA_ABORTED))) 2551a9a79dfeSJoe Perches ata_dev_err(qc->dev, "error: { %s%s%s%s}\n", 25521333e194SRobert Hancock res->feature & ATA_ICRC ? "ICRC " : "", 25531333e194SRobert Hancock res->feature & ATA_UNC ? "UNC " : "", 25541333e194SRobert Hancock res->feature & ATA_IDNF ? "IDNF " : "", 25551333e194SRobert Hancock res->feature & ATA_ABORTED ? "ABRT " : ""); 25566521148cSRobert Hancock #endif 2557c6fd2807SJeff Garzik } 2558c6fd2807SJeff Garzik } 2559c6fd2807SJeff Garzik 25609b1e2658STejun Heo /** 25619b1e2658STejun Heo * ata_eh_report - report error handling to user 25629b1e2658STejun Heo * @ap: ATA port to report EH about 25639b1e2658STejun Heo * 25649b1e2658STejun Heo * Report EH to user. 25659b1e2658STejun Heo * 25669b1e2658STejun Heo * LOCKING: 25679b1e2658STejun Heo * None. 25689b1e2658STejun Heo */ 2569fb7fd614STejun Heo void ata_eh_report(struct ata_port *ap) 25709b1e2658STejun Heo { 25719b1e2658STejun Heo struct ata_link *link; 25729b1e2658STejun Heo 25731eca4365STejun Heo ata_for_each_link(link, ap, HOST_FIRST) 25749b1e2658STejun Heo ata_eh_link_report(link); 25759b1e2658STejun Heo } 25769b1e2658STejun Heo 2577cc0680a5STejun Heo static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset, 2578b1c72916STejun Heo unsigned int *classes, unsigned long deadline, 2579b1c72916STejun Heo bool clear_classes) 2580c6fd2807SJeff Garzik { 2581f58229f8STejun Heo struct ata_device *dev; 2582c6fd2807SJeff Garzik 2583b1c72916STejun Heo if (clear_classes) 25841eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 2585f58229f8STejun Heo classes[dev->devno] = ATA_DEV_UNKNOWN; 2586c6fd2807SJeff Garzik 2587f046519fSTejun Heo return reset(link, classes, deadline); 2588c6fd2807SJeff Garzik } 2589c6fd2807SJeff Garzik 2590e8411fbaSSergei Shtylyov static int ata_eh_followup_srst_needed(struct ata_link *link, int rc) 2591c6fd2807SJeff Garzik { 259245db2f6cSTejun Heo if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link)) 2593ae791c05STejun Heo return 0; 25945dbfc9cbSTejun Heo if (rc == -EAGAIN) 2595c6fd2807SJeff Garzik return 1; 2596071f44b1STejun Heo if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) 25973495de73STejun Heo return 1; 2598c6fd2807SJeff Garzik return 0; 2599c6fd2807SJeff Garzik } 2600c6fd2807SJeff Garzik 2601fb7fd614STejun Heo int ata_eh_reset(struct ata_link *link, int classify, 2602c6fd2807SJeff Garzik ata_prereset_fn_t prereset, ata_reset_fn_t softreset, 2603c6fd2807SJeff Garzik ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) 2604c6fd2807SJeff Garzik { 2605afaa5c37STejun Heo struct ata_port *ap = link->ap; 2606b1c72916STejun Heo struct ata_link *slave = ap->slave_link; 2607936fd732STejun Heo struct ata_eh_context *ehc = &link->eh_context; 2608705d2014SBartlomiej Zolnierkiewicz struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL; 2609c6fd2807SJeff Garzik unsigned int *classes = ehc->classes; 2610416dc9edSTejun Heo unsigned int lflags = link->flags; 2611c6fd2807SJeff Garzik int verbose = !(ehc->i.flags & ATA_EHI_QUIET); 2612d8af0eb6STejun Heo int max_tries = 0, try = 0; 2613b1c72916STejun Heo struct ata_link *failed_link; 2614f58229f8STejun Heo struct ata_device *dev; 2615416dc9edSTejun Heo unsigned long deadline, now; 2616c6fd2807SJeff Garzik ata_reset_fn_t reset; 2617afaa5c37STejun Heo unsigned long flags; 2618416dc9edSTejun Heo u32 sstatus; 2619b1c72916STejun Heo int nr_unknown, rc; 2620c6fd2807SJeff Garzik 2621932648b0STejun Heo /* 2622932648b0STejun Heo * Prepare to reset 2623932648b0STejun Heo */ 2624d8af0eb6STejun Heo while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX) 2625d8af0eb6STejun Heo max_tries++; 262605944bdfSTejun Heo if (link->flags & ATA_LFLAG_NO_HRST) 262705944bdfSTejun Heo hardreset = NULL; 262805944bdfSTejun Heo if (link->flags & ATA_LFLAG_NO_SRST) 262905944bdfSTejun Heo softreset = NULL; 2630d8af0eb6STejun Heo 263125985edcSLucas De Marchi /* make sure each reset attempt is at least COOL_DOWN apart */ 263219b72321STejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) { 26330a2c0f56STejun Heo now = jiffies; 263419b72321STejun Heo WARN_ON(time_after(ehc->last_reset, now)); 263519b72321STejun Heo deadline = ata_deadline(ehc->last_reset, 263619b72321STejun Heo ATA_EH_RESET_COOL_DOWN); 26370a2c0f56STejun Heo if (time_before(now, deadline)) 26380a2c0f56STejun Heo schedule_timeout_uninterruptible(deadline - now); 263919b72321STejun Heo } 26400a2c0f56STejun Heo 2641afaa5c37STejun Heo spin_lock_irqsave(ap->lock, flags); 2642afaa5c37STejun Heo ap->pflags |= ATA_PFLAG_RESETTING; 2643afaa5c37STejun Heo spin_unlock_irqrestore(ap->lock, flags); 2644afaa5c37STejun Heo 2645cf480626STejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2646c6fd2807SJeff Garzik 26471eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 2648cdeab114STejun Heo /* If we issue an SRST then an ATA drive (not ATAPI) 2649cdeab114STejun Heo * may change configuration and be in PIO0 timing. If 2650cdeab114STejun Heo * we do a hard reset (or are coming from power on) 2651cdeab114STejun Heo * this is true for ATA or ATAPI. Until we've set a 2652cdeab114STejun Heo * suitable controller mode we should not touch the 2653cdeab114STejun Heo * bus as we may be talking too fast. 2654cdeab114STejun Heo */ 2655cdeab114STejun Heo dev->pio_mode = XFER_PIO_0; 2656cdeab114STejun Heo 2657cdeab114STejun Heo /* If the controller has a pio mode setup function 2658cdeab114STejun Heo * then use it to set the chipset to rights. Don't 2659cdeab114STejun Heo * touch the DMA setup as that will be dealt with when 2660cdeab114STejun Heo * configuring devices. 2661cdeab114STejun Heo */ 2662cdeab114STejun Heo if (ap->ops->set_piomode) 2663cdeab114STejun Heo ap->ops->set_piomode(ap, dev); 2664cdeab114STejun Heo } 2665cdeab114STejun Heo 2666cf480626STejun Heo /* prefer hardreset */ 2667932648b0STejun Heo reset = NULL; 2668cf480626STejun Heo ehc->i.action &= ~ATA_EH_RESET; 2669cf480626STejun Heo if (hardreset) { 2670cf480626STejun Heo reset = hardreset; 2671a674050eSTejun Heo ehc->i.action |= ATA_EH_HARDRESET; 26724f7faa3fSTejun Heo } else if (softreset) { 2673cf480626STejun Heo reset = softreset; 2674a674050eSTejun Heo ehc->i.action |= ATA_EH_SOFTRESET; 2675cf480626STejun Heo } 2676c6fd2807SJeff Garzik 2677c6fd2807SJeff Garzik if (prereset) { 2678b1c72916STejun Heo unsigned long deadline = ata_deadline(jiffies, 2679b1c72916STejun Heo ATA_EH_PRERESET_TIMEOUT); 2680b1c72916STejun Heo 2681b1c72916STejun Heo if (slave) { 2682b1c72916STejun Heo sehc->i.action &= ~ATA_EH_RESET; 2683b1c72916STejun Heo sehc->i.action |= ehc->i.action; 2684b1c72916STejun Heo } 2685b1c72916STejun Heo 2686b1c72916STejun Heo rc = prereset(link, deadline); 2687b1c72916STejun Heo 2688b1c72916STejun Heo /* If present, do prereset on slave link too. Reset 2689b1c72916STejun Heo * is skipped iff both master and slave links report 2690b1c72916STejun Heo * -ENOENT or clear ATA_EH_RESET. 2691b1c72916STejun Heo */ 2692b1c72916STejun Heo if (slave && (rc == 0 || rc == -ENOENT)) { 2693b1c72916STejun Heo int tmp; 2694b1c72916STejun Heo 2695b1c72916STejun Heo tmp = prereset(slave, deadline); 2696b1c72916STejun Heo if (tmp != -ENOENT) 2697b1c72916STejun Heo rc = tmp; 2698b1c72916STejun Heo 2699b1c72916STejun Heo ehc->i.action |= sehc->i.action; 2700b1c72916STejun Heo } 2701b1c72916STejun Heo 2702c6fd2807SJeff Garzik if (rc) { 2703c961922bSAlan Cox if (rc == -ENOENT) { 2704a9a79dfeSJoe Perches ata_link_dbg(link, "port disabled--ignoring\n"); 2705cf480626STejun Heo ehc->i.action &= ~ATA_EH_RESET; 27064aa9ab67STejun Heo 27071eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 2708f58229f8STejun Heo classes[dev->devno] = ATA_DEV_NONE; 27094aa9ab67STejun Heo 27104aa9ab67STejun Heo rc = 0; 2711c961922bSAlan Cox } else 2712a9a79dfeSJoe Perches ata_link_err(link, 2713a9a79dfeSJoe Perches "prereset failed (errno=%d)\n", 2714a9a79dfeSJoe Perches rc); 2715fccb6ea5STejun Heo goto out; 2716c6fd2807SJeff Garzik } 2717c6fd2807SJeff Garzik 2718932648b0STejun Heo /* prereset() might have cleared ATA_EH_RESET. If so, 2719d6515e6fSTejun Heo * bang classes, thaw and return. 2720932648b0STejun Heo */ 2721932648b0STejun Heo if (reset && !(ehc->i.action & ATA_EH_RESET)) { 27221eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 2723f58229f8STejun Heo classes[dev->devno] = ATA_DEV_NONE; 2724d6515e6fSTejun Heo if ((ap->pflags & ATA_PFLAG_FROZEN) && 2725d6515e6fSTejun Heo ata_is_host_link(link)) 2726d6515e6fSTejun Heo ata_eh_thaw_port(ap); 2727fccb6ea5STejun Heo rc = 0; 2728fccb6ea5STejun Heo goto out; 2729c6fd2807SJeff Garzik } 2730932648b0STejun Heo } 2731c6fd2807SJeff Garzik 2732c6fd2807SJeff Garzik retry: 2733932648b0STejun Heo /* 2734932648b0STejun Heo * Perform reset 2735932648b0STejun Heo */ 2736dc98c32cSTejun Heo if (ata_is_host_link(link)) 2737dc98c32cSTejun Heo ata_eh_freeze_port(ap); 2738dc98c32cSTejun Heo 2739341c2c95STejun Heo deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]); 274031daabdaSTejun Heo 2741932648b0STejun Heo if (reset) { 2742c6fd2807SJeff Garzik if (verbose) 2743a9a79dfeSJoe Perches ata_link_info(link, "%s resetting link\n", 2744c6fd2807SJeff Garzik reset == softreset ? "soft" : "hard"); 2745c6fd2807SJeff Garzik 2746c6fd2807SJeff Garzik /* mark that this EH session started with reset */ 274719b72321STejun Heo ehc->last_reset = jiffies; 27480d64a233STejun Heo if (reset == hardreset) 27490d64a233STejun Heo ehc->i.flags |= ATA_EHI_DID_HARDRESET; 27500d64a233STejun Heo else 27510d64a233STejun Heo ehc->i.flags |= ATA_EHI_DID_SOFTRESET; 2752c6fd2807SJeff Garzik 2753b1c72916STejun Heo rc = ata_do_reset(link, reset, classes, deadline, true); 2754b1c72916STejun Heo if (rc && rc != -EAGAIN) { 2755b1c72916STejun Heo failed_link = link; 27565dbfc9cbSTejun Heo goto fail; 2757b1c72916STejun Heo } 2758c6fd2807SJeff Garzik 2759b1c72916STejun Heo /* hardreset slave link if existent */ 2760b1c72916STejun Heo if (slave && reset == hardreset) { 2761b1c72916STejun Heo int tmp; 2762b1c72916STejun Heo 2763b1c72916STejun Heo if (verbose) 2764a9a79dfeSJoe Perches ata_link_info(slave, "hard resetting link\n"); 2765b1c72916STejun Heo 2766b1c72916STejun Heo ata_eh_about_to_do(slave, NULL, ATA_EH_RESET); 2767b1c72916STejun Heo tmp = ata_do_reset(slave, reset, classes, deadline, 2768b1c72916STejun Heo false); 2769b1c72916STejun Heo switch (tmp) { 2770b1c72916STejun Heo case -EAGAIN: 2771b1c72916STejun Heo rc = -EAGAIN; 2772b1c72916STejun Heo case 0: 2773b1c72916STejun Heo break; 2774b1c72916STejun Heo default: 2775b1c72916STejun Heo failed_link = slave; 2776b1c72916STejun Heo rc = tmp; 2777b1c72916STejun Heo goto fail; 2778b1c72916STejun Heo } 2779b1c72916STejun Heo } 2780b1c72916STejun Heo 2781b1c72916STejun Heo /* perform follow-up SRST if necessary */ 2782c6fd2807SJeff Garzik if (reset == hardreset && 2783e8411fbaSSergei Shtylyov ata_eh_followup_srst_needed(link, rc)) { 2784c6fd2807SJeff Garzik reset = softreset; 2785c6fd2807SJeff Garzik 2786c6fd2807SJeff Garzik if (!reset) { 2787a9a79dfeSJoe Perches ata_link_err(link, 2788a9a79dfeSJoe Perches "follow-up softreset required but no softreset available\n"); 2789b1c72916STejun Heo failed_link = link; 2790fccb6ea5STejun Heo rc = -EINVAL; 279108cf69d0STejun Heo goto fail; 2792c6fd2807SJeff Garzik } 2793c6fd2807SJeff Garzik 2794cf480626STejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2795b1c72916STejun Heo rc = ata_do_reset(link, reset, classes, deadline, true); 2796fe2c4d01STejun Heo if (rc) { 2797fe2c4d01STejun Heo failed_link = link; 2798fe2c4d01STejun Heo goto fail; 2799fe2c4d01STejun Heo } 2800c6fd2807SJeff Garzik } 2801932648b0STejun Heo } else { 2802932648b0STejun Heo if (verbose) 2803a9a79dfeSJoe Perches ata_link_info(link, 2804a9a79dfeSJoe Perches "no reset method available, skipping reset\n"); 2805932648b0STejun Heo if (!(lflags & ATA_LFLAG_ASSUME_CLASS)) 2806932648b0STejun Heo lflags |= ATA_LFLAG_ASSUME_ATA; 2807932648b0STejun Heo } 2808008a7896STejun Heo 2809932648b0STejun Heo /* 2810932648b0STejun Heo * Post-reset processing 2811932648b0STejun Heo */ 28121eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 2813416dc9edSTejun Heo /* After the reset, the device state is PIO 0 and the 2814416dc9edSTejun Heo * controller state is undefined. Reset also wakes up 2815416dc9edSTejun Heo * drives from sleeping mode. 2816c6fd2807SJeff Garzik */ 2817f58229f8STejun Heo dev->pio_mode = XFER_PIO_0; 2818054a5fbaSTejun Heo dev->flags &= ~ATA_DFLAG_SLEEPING; 2819c6fd2807SJeff Garzik 28203b761d3dSTejun Heo if (ata_phys_link_offline(ata_dev_phys_link(dev))) 28213b761d3dSTejun Heo continue; 28223b761d3dSTejun Heo 28234ccd3329STejun Heo /* apply class override */ 2824416dc9edSTejun Heo if (lflags & ATA_LFLAG_ASSUME_ATA) 2825ae791c05STejun Heo classes[dev->devno] = ATA_DEV_ATA; 2826416dc9edSTejun Heo else if (lflags & ATA_LFLAG_ASSUME_SEMB) 2827816ab897STejun Heo classes[dev->devno] = ATA_DEV_SEMB_UNSUP; 2828ae791c05STejun Heo } 2829ae791c05STejun Heo 2830008a7896STejun Heo /* record current link speed */ 2831936fd732STejun Heo if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0) 2832936fd732STejun Heo link->sata_spd = (sstatus >> 4) & 0xf; 2833b1c72916STejun Heo if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0) 2834b1c72916STejun Heo slave->sata_spd = (sstatus >> 4) & 0xf; 2835008a7896STejun Heo 2836dc98c32cSTejun Heo /* thaw the port */ 2837dc98c32cSTejun Heo if (ata_is_host_link(link)) 2838dc98c32cSTejun Heo ata_eh_thaw_port(ap); 2839dc98c32cSTejun Heo 2840f046519fSTejun Heo /* postreset() should clear hardware SError. Although SError 2841f046519fSTejun Heo * is cleared during link resume, clearing SError here is 2842f046519fSTejun Heo * necessary as some PHYs raise hotplug events after SRST. 2843f046519fSTejun Heo * This introduces race condition where hotplug occurs between 2844f046519fSTejun Heo * reset and here. This race is mediated by cross checking 2845f046519fSTejun Heo * link onlineness and classification result later. 2846f046519fSTejun Heo */ 2847b1c72916STejun Heo if (postreset) { 2848cc0680a5STejun Heo postreset(link, classes); 2849b1c72916STejun Heo if (slave) 2850b1c72916STejun Heo postreset(slave, classes); 2851b1c72916STejun Heo } 2852c6fd2807SJeff Garzik 28531e641060STejun Heo /* 28548c56caccSTejun Heo * Some controllers can't be frozen very well and may set spurious 28558c56caccSTejun Heo * error conditions during reset. Clear accumulated error 28568c56caccSTejun Heo * information and re-thaw the port if frozen. As reset is the 28578c56caccSTejun Heo * final recovery action and we cross check link onlineness against 28588c56caccSTejun Heo * device classification later, no hotplug event is lost by this. 28591e641060STejun Heo */ 2860f046519fSTejun Heo spin_lock_irqsave(link->ap->lock, flags); 28611e641060STejun Heo memset(&link->eh_info, 0, sizeof(link->eh_info)); 2862b1c72916STejun Heo if (slave) 28631e641060STejun Heo memset(&slave->eh_info, 0, sizeof(link->eh_info)); 28641e641060STejun Heo ap->pflags &= ~ATA_PFLAG_EH_PENDING; 2865f046519fSTejun Heo spin_unlock_irqrestore(link->ap->lock, flags); 2866f046519fSTejun Heo 28678c56caccSTejun Heo if (ap->pflags & ATA_PFLAG_FROZEN) 28688c56caccSTejun Heo ata_eh_thaw_port(ap); 28698c56caccSTejun Heo 28703b761d3dSTejun Heo /* 28713b761d3dSTejun Heo * Make sure onlineness and classification result correspond. 2872f046519fSTejun Heo * Hotplug could have happened during reset and some 2873f046519fSTejun Heo * controllers fail to wait while a drive is spinning up after 2874f046519fSTejun Heo * being hotplugged causing misdetection. By cross checking 28753b761d3dSTejun Heo * link on/offlineness and classification result, those 28763b761d3dSTejun Heo * conditions can be reliably detected and retried. 2877f046519fSTejun Heo */ 2878b1c72916STejun Heo nr_unknown = 0; 28791eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 28803b761d3dSTejun Heo if (ata_phys_link_online(ata_dev_phys_link(dev))) { 2881b1c72916STejun Heo if (classes[dev->devno] == ATA_DEV_UNKNOWN) { 2882a9a79dfeSJoe Perches ata_dev_dbg(dev, "link online but device misclassified\n"); 2883f046519fSTejun Heo classes[dev->devno] = ATA_DEV_NONE; 2884b1c72916STejun Heo nr_unknown++; 2885b1c72916STejun Heo } 28863b761d3dSTejun Heo } else if (ata_phys_link_offline(ata_dev_phys_link(dev))) { 28873b761d3dSTejun Heo if (ata_class_enabled(classes[dev->devno])) 2888a9a79dfeSJoe Perches ata_dev_dbg(dev, 2889a9a79dfeSJoe Perches "link offline, clearing class %d to NONE\n", 28903b761d3dSTejun Heo classes[dev->devno]); 28913b761d3dSTejun Heo classes[dev->devno] = ATA_DEV_NONE; 28923b761d3dSTejun Heo } else if (classes[dev->devno] == ATA_DEV_UNKNOWN) { 2893a9a79dfeSJoe Perches ata_dev_dbg(dev, 2894a9a79dfeSJoe Perches "link status unknown, clearing UNKNOWN to NONE\n"); 28953b761d3dSTejun Heo classes[dev->devno] = ATA_DEV_NONE; 28963b761d3dSTejun Heo } 2897f046519fSTejun Heo } 2898f046519fSTejun Heo 2899b1c72916STejun Heo if (classify && nr_unknown) { 2900f046519fSTejun Heo if (try < max_tries) { 2901a9a79dfeSJoe Perches ata_link_warn(link, 2902a9a79dfeSJoe Perches "link online but %d devices misclassified, retrying\n", 29033b761d3dSTejun Heo nr_unknown); 2904b1c72916STejun Heo failed_link = link; 2905f046519fSTejun Heo rc = -EAGAIN; 2906f046519fSTejun Heo goto fail; 2907f046519fSTejun Heo } 2908a9a79dfeSJoe Perches ata_link_warn(link, 29093b761d3dSTejun Heo "link online but %d devices misclassified, " 29103b761d3dSTejun Heo "device detection might fail\n", nr_unknown); 2911f046519fSTejun Heo } 2912f046519fSTejun Heo 2913c6fd2807SJeff Garzik /* reset successful, schedule revalidation */ 2914cf480626STejun Heo ata_eh_done(link, NULL, ATA_EH_RESET); 2915b1c72916STejun Heo if (slave) 2916b1c72916STejun Heo ata_eh_done(slave, NULL, ATA_EH_RESET); 291719b72321STejun Heo ehc->last_reset = jiffies; /* update to completion time */ 2918c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_REVALIDATE; 29196b7ae954STejun Heo link->lpm_policy = ATA_LPM_UNKNOWN; /* reset LPM state */ 2920416dc9edSTejun Heo 2921416dc9edSTejun Heo rc = 0; 2922fccb6ea5STejun Heo out: 2923fccb6ea5STejun Heo /* clear hotplug flag */ 2924fccb6ea5STejun Heo ehc->i.flags &= ~ATA_EHI_HOTPLUGGED; 2925b1c72916STejun Heo if (slave) 2926b1c72916STejun Heo sehc->i.flags &= ~ATA_EHI_HOTPLUGGED; 2927afaa5c37STejun Heo 2928afaa5c37STejun Heo spin_lock_irqsave(ap->lock, flags); 2929afaa5c37STejun Heo ap->pflags &= ~ATA_PFLAG_RESETTING; 2930afaa5c37STejun Heo spin_unlock_irqrestore(ap->lock, flags); 2931afaa5c37STejun Heo 2932c6fd2807SJeff Garzik return rc; 2933416dc9edSTejun Heo 2934416dc9edSTejun Heo fail: 29355958e302STejun Heo /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */ 29365958e302STejun Heo if (!ata_is_host_link(link) && 29375958e302STejun Heo sata_scr_read(link, SCR_STATUS, &sstatus)) 29385958e302STejun Heo rc = -ERESTART; 29395958e302STejun Heo 29407a46c078SGwendal Grignou if (try >= max_tries) { 29418ea7645cSTejun Heo /* 29428ea7645cSTejun Heo * Thaw host port even if reset failed, so that the port 29438ea7645cSTejun Heo * can be retried on the next phy event. This risks 29448ea7645cSTejun Heo * repeated EH runs but seems to be a better tradeoff than 29458ea7645cSTejun Heo * shutting down a port after a botched hotplug attempt. 29468ea7645cSTejun Heo */ 29478ea7645cSTejun Heo if (ata_is_host_link(link)) 29488ea7645cSTejun Heo ata_eh_thaw_port(ap); 2949416dc9edSTejun Heo goto out; 29508ea7645cSTejun Heo } 2951416dc9edSTejun Heo 2952416dc9edSTejun Heo now = jiffies; 2953416dc9edSTejun Heo if (time_before(now, deadline)) { 2954416dc9edSTejun Heo unsigned long delta = deadline - now; 2955416dc9edSTejun Heo 2956a9a79dfeSJoe Perches ata_link_warn(failed_link, 29570a2c0f56STejun Heo "reset failed (errno=%d), retrying in %u secs\n", 29580a2c0f56STejun Heo rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000)); 2959416dc9edSTejun Heo 2960c0c362b6STejun Heo ata_eh_release(ap); 2961416dc9edSTejun Heo while (delta) 2962416dc9edSTejun Heo delta = schedule_timeout_uninterruptible(delta); 2963c0c362b6STejun Heo ata_eh_acquire(ap); 2964416dc9edSTejun Heo } 2965416dc9edSTejun Heo 29667a46c078SGwendal Grignou /* 29677a46c078SGwendal Grignou * While disks spinup behind PMP, some controllers fail sending SRST. 29687a46c078SGwendal Grignou * They need to be reset - as well as the PMP - before retrying. 29697a46c078SGwendal Grignou */ 29707a46c078SGwendal Grignou if (rc == -ERESTART) { 29717a46c078SGwendal Grignou if (ata_is_host_link(link)) 29727a46c078SGwendal Grignou ata_eh_thaw_port(ap); 29737a46c078SGwendal Grignou goto out; 29747a46c078SGwendal Grignou } 29757a46c078SGwendal Grignou 2976b1c72916STejun Heo if (try == max_tries - 1) { 2977a07d499bSTejun Heo sata_down_spd_limit(link, 0); 2978b1c72916STejun Heo if (slave) 2979a07d499bSTejun Heo sata_down_spd_limit(slave, 0); 2980b1c72916STejun Heo } else if (rc == -EPIPE) 2981a07d499bSTejun Heo sata_down_spd_limit(failed_link, 0); 2982b1c72916STejun Heo 2983416dc9edSTejun Heo if (hardreset) 2984416dc9edSTejun Heo reset = hardreset; 2985416dc9edSTejun Heo goto retry; 2986c6fd2807SJeff Garzik } 2987c6fd2807SJeff Garzik 298845fabbb7SElias Oltmanns static inline void ata_eh_pull_park_action(struct ata_port *ap) 298945fabbb7SElias Oltmanns { 299045fabbb7SElias Oltmanns struct ata_link *link; 299145fabbb7SElias Oltmanns struct ata_device *dev; 299245fabbb7SElias Oltmanns unsigned long flags; 299345fabbb7SElias Oltmanns 299445fabbb7SElias Oltmanns /* 299545fabbb7SElias Oltmanns * This function can be thought of as an extended version of 299645fabbb7SElias Oltmanns * ata_eh_about_to_do() specially crafted to accommodate the 299745fabbb7SElias Oltmanns * requirements of ATA_EH_PARK handling. Since the EH thread 299845fabbb7SElias Oltmanns * does not leave the do {} while () loop in ata_eh_recover as 299945fabbb7SElias Oltmanns * long as the timeout for a park request to *one* device on 300045fabbb7SElias Oltmanns * the port has not expired, and since we still want to pick 300145fabbb7SElias Oltmanns * up park requests to other devices on the same port or 300245fabbb7SElias Oltmanns * timeout updates for the same device, we have to pull 300345fabbb7SElias Oltmanns * ATA_EH_PARK actions from eh_info into eh_context.i 300445fabbb7SElias Oltmanns * ourselves at the beginning of each pass over the loop. 300545fabbb7SElias Oltmanns * 300645fabbb7SElias Oltmanns * Additionally, all write accesses to &ap->park_req_pending 300745fabbb7SElias Oltmanns * through INIT_COMPLETION() (see below) or complete_all() 300845fabbb7SElias Oltmanns * (see ata_scsi_park_store()) are protected by the host lock. 300945fabbb7SElias Oltmanns * As a result we have that park_req_pending.done is zero on 301045fabbb7SElias Oltmanns * exit from this function, i.e. when ATA_EH_PARK actions for 301145fabbb7SElias Oltmanns * *all* devices on port ap have been pulled into the 301245fabbb7SElias Oltmanns * respective eh_context structs. If, and only if, 301345fabbb7SElias Oltmanns * park_req_pending.done is non-zero by the time we reach 301445fabbb7SElias Oltmanns * wait_for_completion_timeout(), another ATA_EH_PARK action 301545fabbb7SElias Oltmanns * has been scheduled for at least one of the devices on port 301645fabbb7SElias Oltmanns * ap and we have to cycle over the do {} while () loop in 301745fabbb7SElias Oltmanns * ata_eh_recover() again. 301845fabbb7SElias Oltmanns */ 301945fabbb7SElias Oltmanns 302045fabbb7SElias Oltmanns spin_lock_irqsave(ap->lock, flags); 302145fabbb7SElias Oltmanns INIT_COMPLETION(ap->park_req_pending); 30221eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 30231eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 302445fabbb7SElias Oltmanns struct ata_eh_info *ehi = &link->eh_info; 302545fabbb7SElias Oltmanns 302645fabbb7SElias Oltmanns link->eh_context.i.dev_action[dev->devno] |= 302745fabbb7SElias Oltmanns ehi->dev_action[dev->devno] & ATA_EH_PARK; 302845fabbb7SElias Oltmanns ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK); 302945fabbb7SElias Oltmanns } 303045fabbb7SElias Oltmanns } 303145fabbb7SElias Oltmanns spin_unlock_irqrestore(ap->lock, flags); 303245fabbb7SElias Oltmanns } 303345fabbb7SElias Oltmanns 303445fabbb7SElias Oltmanns static void ata_eh_park_issue_cmd(struct ata_device *dev, int park) 303545fabbb7SElias Oltmanns { 303645fabbb7SElias Oltmanns struct ata_eh_context *ehc = &dev->link->eh_context; 303745fabbb7SElias Oltmanns struct ata_taskfile tf; 303845fabbb7SElias Oltmanns unsigned int err_mask; 303945fabbb7SElias Oltmanns 304045fabbb7SElias Oltmanns ata_tf_init(dev, &tf); 304145fabbb7SElias Oltmanns if (park) { 304245fabbb7SElias Oltmanns ehc->unloaded_mask |= 1 << dev->devno; 304345fabbb7SElias Oltmanns tf.command = ATA_CMD_IDLEIMMEDIATE; 304445fabbb7SElias Oltmanns tf.feature = 0x44; 304545fabbb7SElias Oltmanns tf.lbal = 0x4c; 304645fabbb7SElias Oltmanns tf.lbam = 0x4e; 304745fabbb7SElias Oltmanns tf.lbah = 0x55; 304845fabbb7SElias Oltmanns } else { 304945fabbb7SElias Oltmanns ehc->unloaded_mask &= ~(1 << dev->devno); 305045fabbb7SElias Oltmanns tf.command = ATA_CMD_CHK_POWER; 305145fabbb7SElias Oltmanns } 305245fabbb7SElias Oltmanns 305345fabbb7SElias Oltmanns tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 305445fabbb7SElias Oltmanns tf.protocol |= ATA_PROT_NODATA; 305545fabbb7SElias Oltmanns err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 305645fabbb7SElias Oltmanns if (park && (err_mask || tf.lbal != 0xc4)) { 3057a9a79dfeSJoe Perches ata_dev_err(dev, "head unload failed!\n"); 305845fabbb7SElias Oltmanns ehc->unloaded_mask &= ~(1 << dev->devno); 305945fabbb7SElias Oltmanns } 306045fabbb7SElias Oltmanns } 306145fabbb7SElias Oltmanns 30620260731fSTejun Heo static int ata_eh_revalidate_and_attach(struct ata_link *link, 3063c6fd2807SJeff Garzik struct ata_device **r_failed_dev) 3064c6fd2807SJeff Garzik { 30650260731fSTejun Heo struct ata_port *ap = link->ap; 30660260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 3067c6fd2807SJeff Garzik struct ata_device *dev; 30688c3c52a8STejun Heo unsigned int new_mask = 0; 3069c6fd2807SJeff Garzik unsigned long flags; 3070f58229f8STejun Heo int rc = 0; 3071c6fd2807SJeff Garzik 3072c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 3073c6fd2807SJeff Garzik 30748c3c52a8STejun Heo /* For PATA drive side cable detection to work, IDENTIFY must 30758c3c52a8STejun Heo * be done backwards such that PDIAG- is released by the slave 30768c3c52a8STejun Heo * device before the master device is identified. 30778c3c52a8STejun Heo */ 30781eca4365STejun Heo ata_for_each_dev(dev, link, ALL_REVERSE) { 3079f58229f8STejun Heo unsigned int action = ata_eh_dev_action(dev); 3080f58229f8STejun Heo unsigned int readid_flags = 0; 3081c6fd2807SJeff Garzik 3082bff04647STejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) 3083bff04647STejun Heo readid_flags |= ATA_READID_POSTRESET; 3084bff04647STejun Heo 30859666f400STejun Heo if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) { 3086633273a3STejun Heo WARN_ON(dev->class == ATA_DEV_PMP); 3087633273a3STejun Heo 3088b1c72916STejun Heo if (ata_phys_link_offline(ata_dev_phys_link(dev))) { 3089c6fd2807SJeff Garzik rc = -EIO; 30908c3c52a8STejun Heo goto err; 3091c6fd2807SJeff Garzik } 3092c6fd2807SJeff Garzik 30930260731fSTejun Heo ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE); 3094422c9daaSTejun Heo rc = ata_dev_revalidate(dev, ehc->classes[dev->devno], 3095422c9daaSTejun Heo readid_flags); 3096c6fd2807SJeff Garzik if (rc) 30978c3c52a8STejun Heo goto err; 3098c6fd2807SJeff Garzik 30990260731fSTejun Heo ata_eh_done(link, dev, ATA_EH_REVALIDATE); 3100c6fd2807SJeff Garzik 3101baa1e78aSTejun Heo /* Configuration may have changed, reconfigure 3102baa1e78aSTejun Heo * transfer mode. 3103baa1e78aSTejun Heo */ 3104baa1e78aSTejun Heo ehc->i.flags |= ATA_EHI_SETMODE; 3105baa1e78aSTejun Heo 3106c6fd2807SJeff Garzik /* schedule the scsi_rescan_device() here */ 3107ad72cf98STejun Heo schedule_work(&(ap->scsi_rescan_task)); 3108c6fd2807SJeff Garzik } else if (dev->class == ATA_DEV_UNKNOWN && 3109c6fd2807SJeff Garzik ehc->tries[dev->devno] && 3110c6fd2807SJeff Garzik ata_class_enabled(ehc->classes[dev->devno])) { 3111842faa6cSTejun Heo /* Temporarily set dev->class, it will be 3112842faa6cSTejun Heo * permanently set once all configurations are 3113842faa6cSTejun Heo * complete. This is necessary because new 3114842faa6cSTejun Heo * device configuration is done in two 3115842faa6cSTejun Heo * separate loops. 3116842faa6cSTejun Heo */ 3117c6fd2807SJeff Garzik dev->class = ehc->classes[dev->devno]; 3118c6fd2807SJeff Garzik 3119633273a3STejun Heo if (dev->class == ATA_DEV_PMP) 3120633273a3STejun Heo rc = sata_pmp_attach(dev); 3121633273a3STejun Heo else 3122633273a3STejun Heo rc = ata_dev_read_id(dev, &dev->class, 3123633273a3STejun Heo readid_flags, dev->id); 3124842faa6cSTejun Heo 3125842faa6cSTejun Heo /* read_id might have changed class, store and reset */ 3126842faa6cSTejun Heo ehc->classes[dev->devno] = dev->class; 3127842faa6cSTejun Heo dev->class = ATA_DEV_UNKNOWN; 3128842faa6cSTejun Heo 31298c3c52a8STejun Heo switch (rc) { 31308c3c52a8STejun Heo case 0: 313199cf610aSTejun Heo /* clear error info accumulated during probe */ 313299cf610aSTejun Heo ata_ering_clear(&dev->ering); 3133f58229f8STejun Heo new_mask |= 1 << dev->devno; 31348c3c52a8STejun Heo break; 31358c3c52a8STejun Heo case -ENOENT: 313655a8e2c8STejun Heo /* IDENTIFY was issued to non-existent 313755a8e2c8STejun Heo * device. No need to reset. Just 3138842faa6cSTejun Heo * thaw and ignore the device. 313955a8e2c8STejun Heo */ 314055a8e2c8STejun Heo ata_eh_thaw_port(ap); 3141c6fd2807SJeff Garzik break; 31428c3c52a8STejun Heo default: 31438c3c52a8STejun Heo goto err; 31448c3c52a8STejun Heo } 31458c3c52a8STejun Heo } 3146c6fd2807SJeff Garzik } 3147c6fd2807SJeff Garzik 3148c1c4e8d5STejun Heo /* PDIAG- should have been released, ask cable type if post-reset */ 314933267325STejun Heo if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) { 315033267325STejun Heo if (ap->ops->cable_detect) 3151c1c4e8d5STejun Heo ap->cbl = ap->ops->cable_detect(ap); 315233267325STejun Heo ata_force_cbl(ap); 315333267325STejun Heo } 3154c1c4e8d5STejun Heo 31558c3c52a8STejun Heo /* Configure new devices forward such that user doesn't see 31568c3c52a8STejun Heo * device detection messages backwards. 31578c3c52a8STejun Heo */ 31581eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 31594f7c2874STejun Heo if (!(new_mask & (1 << dev->devno))) 31608c3c52a8STejun Heo continue; 31618c3c52a8STejun Heo 3162842faa6cSTejun Heo dev->class = ehc->classes[dev->devno]; 3163842faa6cSTejun Heo 31644f7c2874STejun Heo if (dev->class == ATA_DEV_PMP) 31654f7c2874STejun Heo continue; 31664f7c2874STejun Heo 31678c3c52a8STejun Heo ehc->i.flags |= ATA_EHI_PRINTINFO; 31688c3c52a8STejun Heo rc = ata_dev_configure(dev); 31698c3c52a8STejun Heo ehc->i.flags &= ~ATA_EHI_PRINTINFO; 3170842faa6cSTejun Heo if (rc) { 3171842faa6cSTejun Heo dev->class = ATA_DEV_UNKNOWN; 31728c3c52a8STejun Heo goto err; 3173842faa6cSTejun Heo } 31748c3c52a8STejun Heo 3175c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 3176c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 3177c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3178baa1e78aSTejun Heo 317955a8e2c8STejun Heo /* new device discovered, configure xfermode */ 3180baa1e78aSTejun Heo ehc->i.flags |= ATA_EHI_SETMODE; 3181c6fd2807SJeff Garzik } 3182c6fd2807SJeff Garzik 31838c3c52a8STejun Heo return 0; 31848c3c52a8STejun Heo 31858c3c52a8STejun Heo err: 3186c6fd2807SJeff Garzik *r_failed_dev = dev; 31878c3c52a8STejun Heo DPRINTK("EXIT rc=%d\n", rc); 3188c6fd2807SJeff Garzik return rc; 3189c6fd2807SJeff Garzik } 3190c6fd2807SJeff Garzik 31916f1d1e3aSTejun Heo /** 31926f1d1e3aSTejun Heo * ata_set_mode - Program timings and issue SET FEATURES - XFER 31936f1d1e3aSTejun Heo * @link: link on which timings will be programmed 319498a1708dSMartin Olsson * @r_failed_dev: out parameter for failed device 31956f1d1e3aSTejun Heo * 31966f1d1e3aSTejun Heo * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If 31976f1d1e3aSTejun Heo * ata_set_mode() fails, pointer to the failing device is 31986f1d1e3aSTejun Heo * returned in @r_failed_dev. 31996f1d1e3aSTejun Heo * 32006f1d1e3aSTejun Heo * LOCKING: 32016f1d1e3aSTejun Heo * PCI/etc. bus probe sem. 32026f1d1e3aSTejun Heo * 32036f1d1e3aSTejun Heo * RETURNS: 32046f1d1e3aSTejun Heo * 0 on success, negative errno otherwise 32056f1d1e3aSTejun Heo */ 32066f1d1e3aSTejun Heo int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) 32076f1d1e3aSTejun Heo { 32086f1d1e3aSTejun Heo struct ata_port *ap = link->ap; 320900115e0fSTejun Heo struct ata_device *dev; 321000115e0fSTejun Heo int rc; 32116f1d1e3aSTejun Heo 321276326ac1STejun Heo /* if data transfer is verified, clear DUBIOUS_XFER on ering top */ 32131eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) { 321476326ac1STejun Heo if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) { 321576326ac1STejun Heo struct ata_ering_entry *ent; 321676326ac1STejun Heo 321776326ac1STejun Heo ent = ata_ering_top(&dev->ering); 321876326ac1STejun Heo if (ent) 321976326ac1STejun Heo ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER; 322076326ac1STejun Heo } 322176326ac1STejun Heo } 322276326ac1STejun Heo 32236f1d1e3aSTejun Heo /* has private set_mode? */ 32246f1d1e3aSTejun Heo if (ap->ops->set_mode) 322500115e0fSTejun Heo rc = ap->ops->set_mode(link, r_failed_dev); 322600115e0fSTejun Heo else 322700115e0fSTejun Heo rc = ata_do_set_mode(link, r_failed_dev); 322800115e0fSTejun Heo 322900115e0fSTejun Heo /* if transfer mode has changed, set DUBIOUS_XFER on device */ 32301eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) { 323100115e0fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 323200115e0fSTejun Heo u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno]; 323300115e0fSTejun Heo u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno)); 323400115e0fSTejun Heo 323500115e0fSTejun Heo if (dev->xfer_mode != saved_xfer_mode || 323600115e0fSTejun Heo ata_ncq_enabled(dev) != saved_ncq) 323700115e0fSTejun Heo dev->flags |= ATA_DFLAG_DUBIOUS_XFER; 323800115e0fSTejun Heo } 323900115e0fSTejun Heo 324000115e0fSTejun Heo return rc; 32416f1d1e3aSTejun Heo } 32426f1d1e3aSTejun Heo 324311fc33daSTejun Heo /** 324411fc33daSTejun Heo * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset 324511fc33daSTejun Heo * @dev: ATAPI device to clear UA for 324611fc33daSTejun Heo * 324711fc33daSTejun Heo * Resets and other operations can make an ATAPI device raise 324811fc33daSTejun Heo * UNIT ATTENTION which causes the next operation to fail. This 324911fc33daSTejun Heo * function clears UA. 325011fc33daSTejun Heo * 325111fc33daSTejun Heo * LOCKING: 325211fc33daSTejun Heo * EH context (may sleep). 325311fc33daSTejun Heo * 325411fc33daSTejun Heo * RETURNS: 325511fc33daSTejun Heo * 0 on success, -errno on failure. 325611fc33daSTejun Heo */ 325711fc33daSTejun Heo static int atapi_eh_clear_ua(struct ata_device *dev) 325811fc33daSTejun Heo { 325911fc33daSTejun Heo int i; 326011fc33daSTejun Heo 326111fc33daSTejun Heo for (i = 0; i < ATA_EH_UA_TRIES; i++) { 3262b5357081STejun Heo u8 *sense_buffer = dev->link->ap->sector_buf; 326311fc33daSTejun Heo u8 sense_key = 0; 326411fc33daSTejun Heo unsigned int err_mask; 326511fc33daSTejun Heo 326611fc33daSTejun Heo err_mask = atapi_eh_tur(dev, &sense_key); 326711fc33daSTejun Heo if (err_mask != 0 && err_mask != AC_ERR_DEV) { 3268a9a79dfeSJoe Perches ata_dev_warn(dev, 3269a9a79dfeSJoe Perches "TEST_UNIT_READY failed (err_mask=0x%x)\n", 3270a9a79dfeSJoe Perches err_mask); 327111fc33daSTejun Heo return -EIO; 327211fc33daSTejun Heo } 327311fc33daSTejun Heo 327411fc33daSTejun Heo if (!err_mask || sense_key != UNIT_ATTENTION) 327511fc33daSTejun Heo return 0; 327611fc33daSTejun Heo 327711fc33daSTejun Heo err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key); 327811fc33daSTejun Heo if (err_mask) { 3279a9a79dfeSJoe Perches ata_dev_warn(dev, "failed to clear " 328011fc33daSTejun Heo "UNIT ATTENTION (err_mask=0x%x)\n", err_mask); 328111fc33daSTejun Heo return -EIO; 328211fc33daSTejun Heo } 328311fc33daSTejun Heo } 328411fc33daSTejun Heo 3285a9a79dfeSJoe Perches ata_dev_warn(dev, "UNIT ATTENTION persists after %d tries\n", 3286a9a79dfeSJoe Perches ATA_EH_UA_TRIES); 328711fc33daSTejun Heo 328811fc33daSTejun Heo return 0; 328911fc33daSTejun Heo } 329011fc33daSTejun Heo 32916013efd8STejun Heo /** 32926013efd8STejun Heo * ata_eh_maybe_retry_flush - Retry FLUSH if necessary 32936013efd8STejun Heo * @dev: ATA device which may need FLUSH retry 32946013efd8STejun Heo * 32956013efd8STejun Heo * If @dev failed FLUSH, it needs to be reported upper layer 32966013efd8STejun Heo * immediately as it means that @dev failed to remap and already 32976013efd8STejun Heo * lost at least a sector and further FLUSH retrials won't make 32986013efd8STejun Heo * any difference to the lost sector. However, if FLUSH failed 32996013efd8STejun Heo * for other reasons, for example transmission error, FLUSH needs 33006013efd8STejun Heo * to be retried. 33016013efd8STejun Heo * 33026013efd8STejun Heo * This function determines whether FLUSH failure retry is 33036013efd8STejun Heo * necessary and performs it if so. 33046013efd8STejun Heo * 33056013efd8STejun Heo * RETURNS: 33066013efd8STejun Heo * 0 if EH can continue, -errno if EH needs to be repeated. 33076013efd8STejun Heo */ 33086013efd8STejun Heo static int ata_eh_maybe_retry_flush(struct ata_device *dev) 33096013efd8STejun Heo { 33106013efd8STejun Heo struct ata_link *link = dev->link; 33116013efd8STejun Heo struct ata_port *ap = link->ap; 33126013efd8STejun Heo struct ata_queued_cmd *qc; 33136013efd8STejun Heo struct ata_taskfile tf; 33146013efd8STejun Heo unsigned int err_mask; 33156013efd8STejun Heo int rc = 0; 33166013efd8STejun Heo 33176013efd8STejun Heo /* did flush fail for this device? */ 33186013efd8STejun Heo if (!ata_tag_valid(link->active_tag)) 33196013efd8STejun Heo return 0; 33206013efd8STejun Heo 33216013efd8STejun Heo qc = __ata_qc_from_tag(ap, link->active_tag); 33226013efd8STejun Heo if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT && 33236013efd8STejun Heo qc->tf.command != ATA_CMD_FLUSH)) 33246013efd8STejun Heo return 0; 33256013efd8STejun Heo 33266013efd8STejun Heo /* if the device failed it, it should be reported to upper layers */ 33276013efd8STejun Heo if (qc->err_mask & AC_ERR_DEV) 33286013efd8STejun Heo return 0; 33296013efd8STejun Heo 33306013efd8STejun Heo /* flush failed for some other reason, give it another shot */ 33316013efd8STejun Heo ata_tf_init(dev, &tf); 33326013efd8STejun Heo 33336013efd8STejun Heo tf.command = qc->tf.command; 33346013efd8STejun Heo tf.flags |= ATA_TFLAG_DEVICE; 33356013efd8STejun Heo tf.protocol = ATA_PROT_NODATA; 33366013efd8STejun Heo 3337a9a79dfeSJoe Perches ata_dev_warn(dev, "retrying FLUSH 0x%x Emask 0x%x\n", 33386013efd8STejun Heo tf.command, qc->err_mask); 33396013efd8STejun Heo 33406013efd8STejun Heo err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 33416013efd8STejun Heo if (!err_mask) { 33426013efd8STejun Heo /* 33436013efd8STejun Heo * FLUSH is complete but there's no way to 33446013efd8STejun Heo * successfully complete a failed command from EH. 33456013efd8STejun Heo * Making sure retry is allowed at least once and 33466013efd8STejun Heo * retrying it should do the trick - whatever was in 33476013efd8STejun Heo * the cache is already on the platter and this won't 33486013efd8STejun Heo * cause infinite loop. 33496013efd8STejun Heo */ 33506013efd8STejun Heo qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1); 33516013efd8STejun Heo } else { 3352a9a79dfeSJoe Perches ata_dev_warn(dev, "FLUSH failed Emask 0x%x\n", 33536013efd8STejun Heo err_mask); 33546013efd8STejun Heo rc = -EIO; 33556013efd8STejun Heo 33566013efd8STejun Heo /* if device failed it, report it to upper layers */ 33576013efd8STejun Heo if (err_mask & AC_ERR_DEV) { 33586013efd8STejun Heo qc->err_mask |= AC_ERR_DEV; 33596013efd8STejun Heo qc->result_tf = tf; 33606013efd8STejun Heo if (!(ap->pflags & ATA_PFLAG_FROZEN)) 33616013efd8STejun Heo rc = 0; 33626013efd8STejun Heo } 33636013efd8STejun Heo } 33646013efd8STejun Heo return rc; 33656013efd8STejun Heo } 33666013efd8STejun Heo 33676b7ae954STejun Heo /** 33686b7ae954STejun Heo * ata_eh_set_lpm - configure SATA interface power management 33696b7ae954STejun Heo * @link: link to configure power management 33706b7ae954STejun Heo * @policy: the link power management policy 33716b7ae954STejun Heo * @r_failed_dev: out parameter for failed device 33726b7ae954STejun Heo * 33736b7ae954STejun Heo * Enable SATA Interface power management. This will enable 33746b7ae954STejun Heo * Device Interface Power Management (DIPM) for min_power 33756b7ae954STejun Heo * policy, and then call driver specific callbacks for 33766b7ae954STejun Heo * enabling Host Initiated Power management. 33776b7ae954STejun Heo * 33786b7ae954STejun Heo * LOCKING: 33796b7ae954STejun Heo * EH context. 33806b7ae954STejun Heo * 33816b7ae954STejun Heo * RETURNS: 33826b7ae954STejun Heo * 0 on success, -errno on failure. 33836b7ae954STejun Heo */ 33846b7ae954STejun Heo static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, 33856b7ae954STejun Heo struct ata_device **r_failed_dev) 33866b7ae954STejun Heo { 33876c8ea89cSTejun Heo struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL; 33886b7ae954STejun Heo struct ata_eh_context *ehc = &link->eh_context; 33896b7ae954STejun Heo struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL; 3390e5005b15STejun Heo enum ata_lpm_policy old_policy = link->lpm_policy; 33915f6f12ccSTejun Heo bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM; 33926b7ae954STejun Heo unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM; 33936b7ae954STejun Heo unsigned int err_mask; 33946b7ae954STejun Heo int rc; 33956b7ae954STejun Heo 33966b7ae954STejun Heo /* if the link or host doesn't do LPM, noop */ 33976b7ae954STejun Heo if ((link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm)) 33986b7ae954STejun Heo return 0; 33996b7ae954STejun Heo 34006b7ae954STejun Heo /* 34016b7ae954STejun Heo * DIPM is enabled only for MIN_POWER as some devices 34026b7ae954STejun Heo * misbehave when the host NACKs transition to SLUMBER. Order 34036b7ae954STejun Heo * device and link configurations such that the host always 34046b7ae954STejun Heo * allows DIPM requests. 34056b7ae954STejun Heo */ 34066b7ae954STejun Heo ata_for_each_dev(dev, link, ENABLED) { 34076b7ae954STejun Heo bool hipm = ata_id_has_hipm(dev->id); 3408ae01b249STejun Heo bool dipm = ata_id_has_dipm(dev->id) && !no_dipm; 34096b7ae954STejun Heo 34106b7ae954STejun Heo /* find the first enabled and LPM enabled devices */ 34116b7ae954STejun Heo if (!link_dev) 34126b7ae954STejun Heo link_dev = dev; 34136b7ae954STejun Heo 34146b7ae954STejun Heo if (!lpm_dev && (hipm || dipm)) 34156b7ae954STejun Heo lpm_dev = dev; 34166b7ae954STejun Heo 34176b7ae954STejun Heo hints &= ~ATA_LPM_EMPTY; 34186b7ae954STejun Heo if (!hipm) 34196b7ae954STejun Heo hints &= ~ATA_LPM_HIPM; 34206b7ae954STejun Heo 34216b7ae954STejun Heo /* disable DIPM before changing link config */ 34226b7ae954STejun Heo if (policy != ATA_LPM_MIN_POWER && dipm) { 34236b7ae954STejun Heo err_mask = ata_dev_set_feature(dev, 34246b7ae954STejun Heo SETFEATURES_SATA_DISABLE, SATA_DIPM); 34256b7ae954STejun Heo if (err_mask && err_mask != AC_ERR_DEV) { 3426a9a79dfeSJoe Perches ata_dev_warn(dev, 34276b7ae954STejun Heo "failed to disable DIPM, Emask 0x%x\n", 34286b7ae954STejun Heo err_mask); 34296b7ae954STejun Heo rc = -EIO; 34306b7ae954STejun Heo goto fail; 34316b7ae954STejun Heo } 34326b7ae954STejun Heo } 34336b7ae954STejun Heo } 34346b7ae954STejun Heo 34356c8ea89cSTejun Heo if (ap) { 34366b7ae954STejun Heo rc = ap->ops->set_lpm(link, policy, hints); 34376b7ae954STejun Heo if (!rc && ap->slave_link) 34386b7ae954STejun Heo rc = ap->ops->set_lpm(ap->slave_link, policy, hints); 34396c8ea89cSTejun Heo } else 34406c8ea89cSTejun Heo rc = sata_pmp_set_lpm(link, policy, hints); 34416b7ae954STejun Heo 34426b7ae954STejun Heo /* 34436b7ae954STejun Heo * Attribute link config failure to the first (LPM) enabled 34446b7ae954STejun Heo * device on the link. 34456b7ae954STejun Heo */ 34466b7ae954STejun Heo if (rc) { 34476b7ae954STejun Heo if (rc == -EOPNOTSUPP) { 34486b7ae954STejun Heo link->flags |= ATA_LFLAG_NO_LPM; 34496b7ae954STejun Heo return 0; 34506b7ae954STejun Heo } 34516b7ae954STejun Heo dev = lpm_dev ? lpm_dev : link_dev; 34526b7ae954STejun Heo goto fail; 34536b7ae954STejun Heo } 34546b7ae954STejun Heo 3455e5005b15STejun Heo /* 3456e5005b15STejun Heo * Low level driver acked the transition. Issue DIPM command 3457e5005b15STejun Heo * with the new policy set. 3458e5005b15STejun Heo */ 3459e5005b15STejun Heo link->lpm_policy = policy; 3460e5005b15STejun Heo if (ap && ap->slave_link) 3461e5005b15STejun Heo ap->slave_link->lpm_policy = policy; 3462e5005b15STejun Heo 34636b7ae954STejun Heo /* host config updated, enable DIPM if transitioning to MIN_POWER */ 34646b7ae954STejun Heo ata_for_each_dev(dev, link, ENABLED) { 3465ae01b249STejun Heo if (policy == ATA_LPM_MIN_POWER && !no_dipm && 3466ae01b249STejun Heo ata_id_has_dipm(dev->id)) { 34676b7ae954STejun Heo err_mask = ata_dev_set_feature(dev, 34686b7ae954STejun Heo SETFEATURES_SATA_ENABLE, SATA_DIPM); 34696b7ae954STejun Heo if (err_mask && err_mask != AC_ERR_DEV) { 3470a9a79dfeSJoe Perches ata_dev_warn(dev, 34716b7ae954STejun Heo "failed to enable DIPM, Emask 0x%x\n", 34726b7ae954STejun Heo err_mask); 34736b7ae954STejun Heo rc = -EIO; 34746b7ae954STejun Heo goto fail; 34756b7ae954STejun Heo } 34766b7ae954STejun Heo } 34776b7ae954STejun Heo } 34786b7ae954STejun Heo 34796b7ae954STejun Heo return 0; 34806b7ae954STejun Heo 34816b7ae954STejun Heo fail: 3482e5005b15STejun Heo /* restore the old policy */ 3483e5005b15STejun Heo link->lpm_policy = old_policy; 3484e5005b15STejun Heo if (ap && ap->slave_link) 3485e5005b15STejun Heo ap->slave_link->lpm_policy = old_policy; 3486e5005b15STejun Heo 34876b7ae954STejun Heo /* if no device or only one more chance is left, disable LPM */ 34886b7ae954STejun Heo if (!dev || ehc->tries[dev->devno] <= 2) { 3489a9a79dfeSJoe Perches ata_link_warn(link, "disabling LPM on the link\n"); 34906b7ae954STejun Heo link->flags |= ATA_LFLAG_NO_LPM; 34916b7ae954STejun Heo } 34926b7ae954STejun Heo if (r_failed_dev) 34936b7ae954STejun Heo *r_failed_dev = dev; 34946b7ae954STejun Heo return rc; 34956b7ae954STejun Heo } 34966b7ae954STejun Heo 34978a745f1fSKristen Carlson Accardi int ata_link_nr_enabled(struct ata_link *link) 3498c6fd2807SJeff Garzik { 3499f58229f8STejun Heo struct ata_device *dev; 3500f58229f8STejun Heo int cnt = 0; 3501c6fd2807SJeff Garzik 35021eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) 3503c6fd2807SJeff Garzik cnt++; 3504c6fd2807SJeff Garzik return cnt; 3505c6fd2807SJeff Garzik } 3506c6fd2807SJeff Garzik 35070260731fSTejun Heo static int ata_link_nr_vacant(struct ata_link *link) 3508c6fd2807SJeff Garzik { 3509f58229f8STejun Heo struct ata_device *dev; 3510f58229f8STejun Heo int cnt = 0; 3511c6fd2807SJeff Garzik 35121eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 3513f58229f8STejun Heo if (dev->class == ATA_DEV_UNKNOWN) 3514c6fd2807SJeff Garzik cnt++; 3515c6fd2807SJeff Garzik return cnt; 3516c6fd2807SJeff Garzik } 3517c6fd2807SJeff Garzik 35180260731fSTejun Heo static int ata_eh_skip_recovery(struct ata_link *link) 3519c6fd2807SJeff Garzik { 3520672b2d65STejun Heo struct ata_port *ap = link->ap; 35210260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 3522f58229f8STejun Heo struct ata_device *dev; 3523c6fd2807SJeff Garzik 3524f9df58cbSTejun Heo /* skip disabled links */ 3525f9df58cbSTejun Heo if (link->flags & ATA_LFLAG_DISABLED) 3526f9df58cbSTejun Heo return 1; 3527f9df58cbSTejun Heo 3528e2f3d75fSTejun Heo /* skip if explicitly requested */ 3529e2f3d75fSTejun Heo if (ehc->i.flags & ATA_EHI_NO_RECOVERY) 3530e2f3d75fSTejun Heo return 1; 3531e2f3d75fSTejun Heo 3532672b2d65STejun Heo /* thaw frozen port and recover failed devices */ 3533672b2d65STejun Heo if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link)) 3534672b2d65STejun Heo return 0; 3535672b2d65STejun Heo 3536672b2d65STejun Heo /* reset at least once if reset is requested */ 3537672b2d65STejun Heo if ((ehc->i.action & ATA_EH_RESET) && 3538672b2d65STejun Heo !(ehc->i.flags & ATA_EHI_DID_RESET)) 3539c6fd2807SJeff Garzik return 0; 3540c6fd2807SJeff Garzik 3541c6fd2807SJeff Garzik /* skip if class codes for all vacant slots are ATA_DEV_NONE */ 35421eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 3543c6fd2807SJeff Garzik if (dev->class == ATA_DEV_UNKNOWN && 3544c6fd2807SJeff Garzik ehc->classes[dev->devno] != ATA_DEV_NONE) 3545c6fd2807SJeff Garzik return 0; 3546c6fd2807SJeff Garzik } 3547c6fd2807SJeff Garzik 3548c6fd2807SJeff Garzik return 1; 3549c6fd2807SJeff Garzik } 3550c6fd2807SJeff Garzik 3551c2c7a89cSTejun Heo static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg) 3552c2c7a89cSTejun Heo { 3553c2c7a89cSTejun Heo u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL); 3554c2c7a89cSTejun Heo u64 now = get_jiffies_64(); 3555c2c7a89cSTejun Heo int *trials = void_arg; 3556c2c7a89cSTejun Heo 35576868225eSLin Ming if ((ent->eflags & ATA_EFLAG_OLD_ER) || 35586868225eSLin Ming (ent->timestamp < now - min(now, interval))) 3559c2c7a89cSTejun Heo return -1; 3560c2c7a89cSTejun Heo 3561c2c7a89cSTejun Heo (*trials)++; 3562c2c7a89cSTejun Heo return 0; 3563c2c7a89cSTejun Heo } 3564c2c7a89cSTejun Heo 356502c05a27STejun Heo static int ata_eh_schedule_probe(struct ata_device *dev) 356602c05a27STejun Heo { 356702c05a27STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 3568c2c7a89cSTejun Heo struct ata_link *link = ata_dev_phys_link(dev); 3569c2c7a89cSTejun Heo int trials = 0; 357002c05a27STejun Heo 357102c05a27STejun Heo if (!(ehc->i.probe_mask & (1 << dev->devno)) || 357202c05a27STejun Heo (ehc->did_probe_mask & (1 << dev->devno))) 357302c05a27STejun Heo return 0; 357402c05a27STejun Heo 357502c05a27STejun Heo ata_eh_detach_dev(dev); 357602c05a27STejun Heo ata_dev_init(dev); 357702c05a27STejun Heo ehc->did_probe_mask |= (1 << dev->devno); 3578cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 357900115e0fSTejun Heo ehc->saved_xfer_mode[dev->devno] = 0; 358000115e0fSTejun Heo ehc->saved_ncq_enabled &= ~(1 << dev->devno); 358102c05a27STejun Heo 35826b7ae954STejun Heo /* the link maybe in a deep sleep, wake it up */ 35836c8ea89cSTejun Heo if (link->lpm_policy > ATA_LPM_MAX_POWER) { 35846c8ea89cSTejun Heo if (ata_is_host_link(link)) 35856c8ea89cSTejun Heo link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER, 35866c8ea89cSTejun Heo ATA_LPM_EMPTY); 35876c8ea89cSTejun Heo else 35886c8ea89cSTejun Heo sata_pmp_set_lpm(link, ATA_LPM_MAX_POWER, 35896c8ea89cSTejun Heo ATA_LPM_EMPTY); 35906c8ea89cSTejun Heo } 35916b7ae954STejun Heo 3592c2c7a89cSTejun Heo /* Record and count probe trials on the ering. The specific 3593c2c7a89cSTejun Heo * error mask used is irrelevant. Because a successful device 3594c2c7a89cSTejun Heo * detection clears the ering, this count accumulates only if 3595c2c7a89cSTejun Heo * there are consecutive failed probes. 3596c2c7a89cSTejun Heo * 3597c2c7a89cSTejun Heo * If the count is equal to or higher than ATA_EH_PROBE_TRIALS 3598c2c7a89cSTejun Heo * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is 3599c2c7a89cSTejun Heo * forced to 1.5Gbps. 3600c2c7a89cSTejun Heo * 3601c2c7a89cSTejun Heo * This is to work around cases where failed link speed 3602c2c7a89cSTejun Heo * negotiation results in device misdetection leading to 3603c2c7a89cSTejun Heo * infinite DEVXCHG or PHRDY CHG events. 3604c2c7a89cSTejun Heo */ 3605c2c7a89cSTejun Heo ata_ering_record(&dev->ering, 0, AC_ERR_OTHER); 3606c2c7a89cSTejun Heo ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials); 3607c2c7a89cSTejun Heo 3608c2c7a89cSTejun Heo if (trials > ATA_EH_PROBE_TRIALS) 3609c2c7a89cSTejun Heo sata_down_spd_limit(link, 1); 3610c2c7a89cSTejun Heo 361102c05a27STejun Heo return 1; 361202c05a27STejun Heo } 361302c05a27STejun Heo 36149b1e2658STejun Heo static int ata_eh_handle_dev_fail(struct ata_device *dev, int err) 3615fee7ca72STejun Heo { 36169af5c9c9STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 3617fee7ca72STejun Heo 3618cf9a590aSTejun Heo /* -EAGAIN from EH routine indicates retry without prejudice. 3619cf9a590aSTejun Heo * The requester is responsible for ensuring forward progress. 3620cf9a590aSTejun Heo */ 3621cf9a590aSTejun Heo if (err != -EAGAIN) 3622fee7ca72STejun Heo ehc->tries[dev->devno]--; 3623fee7ca72STejun Heo 3624fee7ca72STejun Heo switch (err) { 3625fee7ca72STejun Heo case -ENODEV: 3626fee7ca72STejun Heo /* device missing or wrong IDENTIFY data, schedule probing */ 3627fee7ca72STejun Heo ehc->i.probe_mask |= (1 << dev->devno); 3628fee7ca72STejun Heo case -EINVAL: 3629fee7ca72STejun Heo /* give it just one more chance */ 3630fee7ca72STejun Heo ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1); 3631fee7ca72STejun Heo case -EIO: 3632d89293abSTejun Heo if (ehc->tries[dev->devno] == 1) { 3633fee7ca72STejun Heo /* This is the last chance, better to slow 3634fee7ca72STejun Heo * down than lose it. 3635fee7ca72STejun Heo */ 3636a07d499bSTejun Heo sata_down_spd_limit(ata_dev_phys_link(dev), 0); 3637d89293abSTejun Heo if (dev->pio_mode > XFER_PIO_0) 3638fee7ca72STejun Heo ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); 3639fee7ca72STejun Heo } 3640fee7ca72STejun Heo } 3641fee7ca72STejun Heo 3642fee7ca72STejun Heo if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) { 3643fee7ca72STejun Heo /* disable device if it has used up all its chances */ 3644fee7ca72STejun Heo ata_dev_disable(dev); 3645fee7ca72STejun Heo 3646fee7ca72STejun Heo /* detach if offline */ 3647b1c72916STejun Heo if (ata_phys_link_offline(ata_dev_phys_link(dev))) 3648fee7ca72STejun Heo ata_eh_detach_dev(dev); 3649fee7ca72STejun Heo 365002c05a27STejun Heo /* schedule probe if necessary */ 365187fbc5a0STejun Heo if (ata_eh_schedule_probe(dev)) { 3652fee7ca72STejun Heo ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 365387fbc5a0STejun Heo memset(ehc->cmd_timeout_idx[dev->devno], 0, 365487fbc5a0STejun Heo sizeof(ehc->cmd_timeout_idx[dev->devno])); 365587fbc5a0STejun Heo } 36569b1e2658STejun Heo 36579b1e2658STejun Heo return 1; 3658fee7ca72STejun Heo } else { 3659cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 36609b1e2658STejun Heo return 0; 3661fee7ca72STejun Heo } 3662fee7ca72STejun Heo } 3663fee7ca72STejun Heo 3664c6fd2807SJeff Garzik /** 3665c6fd2807SJeff Garzik * ata_eh_recover - recover host port after error 3666c6fd2807SJeff Garzik * @ap: host port to recover 3667c6fd2807SJeff Garzik * @prereset: prereset method (can be NULL) 3668c6fd2807SJeff Garzik * @softreset: softreset method (can be NULL) 3669c6fd2807SJeff Garzik * @hardreset: hardreset method (can be NULL) 3670c6fd2807SJeff Garzik * @postreset: postreset method (can be NULL) 36719b1e2658STejun Heo * @r_failed_link: out parameter for failed link 3672c6fd2807SJeff Garzik * 3673c6fd2807SJeff Garzik * This is the alpha and omega, eum and yang, heart and soul of 3674c6fd2807SJeff Garzik * libata exception handling. On entry, actions required to 36759b1e2658STejun Heo * recover each link and hotplug requests are recorded in the 36769b1e2658STejun Heo * link's eh_context. This function executes all the operations 36779b1e2658STejun Heo * with appropriate retrials and fallbacks to resurrect failed 3678c6fd2807SJeff Garzik * devices, detach goners and greet newcomers. 3679c6fd2807SJeff Garzik * 3680c6fd2807SJeff Garzik * LOCKING: 3681c6fd2807SJeff Garzik * Kernel thread context (may sleep). 3682c6fd2807SJeff Garzik * 3683c6fd2807SJeff Garzik * RETURNS: 3684c6fd2807SJeff Garzik * 0 on success, -errno on failure. 3685c6fd2807SJeff Garzik */ 3686fb7fd614STejun Heo int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, 3687c6fd2807SJeff Garzik ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 36889b1e2658STejun Heo ata_postreset_fn_t postreset, 36899b1e2658STejun Heo struct ata_link **r_failed_link) 3690c6fd2807SJeff Garzik { 36919b1e2658STejun Heo struct ata_link *link; 3692c6fd2807SJeff Garzik struct ata_device *dev; 36936b7ae954STejun Heo int rc, nr_fails; 369445fabbb7SElias Oltmanns unsigned long flags, deadline; 3695c6fd2807SJeff Garzik 3696c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 3697c6fd2807SJeff Garzik 3698c6fd2807SJeff Garzik /* prep for recovery */ 36991eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 37009b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 37019b1e2658STejun Heo 3702f9df58cbSTejun Heo /* re-enable link? */ 3703f9df58cbSTejun Heo if (ehc->i.action & ATA_EH_ENABLE_LINK) { 3704f9df58cbSTejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK); 3705f9df58cbSTejun Heo spin_lock_irqsave(ap->lock, flags); 3706f9df58cbSTejun Heo link->flags &= ~ATA_LFLAG_DISABLED; 3707f9df58cbSTejun Heo spin_unlock_irqrestore(ap->lock, flags); 3708f9df58cbSTejun Heo ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK); 3709f9df58cbSTejun Heo } 3710f9df58cbSTejun Heo 37111eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 3712fd995f70STejun Heo if (link->flags & ATA_LFLAG_NO_RETRY) 3713fd995f70STejun Heo ehc->tries[dev->devno] = 1; 3714fd995f70STejun Heo else 3715c6fd2807SJeff Garzik ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 3716c6fd2807SJeff Garzik 371779a55b72STejun Heo /* collect port action mask recorded in dev actions */ 37189b1e2658STejun Heo ehc->i.action |= ehc->i.dev_action[dev->devno] & 37199b1e2658STejun Heo ~ATA_EH_PERDEV_MASK; 3720f58229f8STejun Heo ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK; 372179a55b72STejun Heo 3722c6fd2807SJeff Garzik /* process hotplug request */ 3723c6fd2807SJeff Garzik if (dev->flags & ATA_DFLAG_DETACH) 3724c6fd2807SJeff Garzik ata_eh_detach_dev(dev); 3725c6fd2807SJeff Garzik 372602c05a27STejun Heo /* schedule probe if necessary */ 372702c05a27STejun Heo if (!ata_dev_enabled(dev)) 372802c05a27STejun Heo ata_eh_schedule_probe(dev); 3729c6fd2807SJeff Garzik } 37309b1e2658STejun Heo } 3731c6fd2807SJeff Garzik 3732c6fd2807SJeff Garzik retry: 3733c6fd2807SJeff Garzik rc = 0; 3734c6fd2807SJeff Garzik 3735c6fd2807SJeff Garzik /* if UNLOADING, finish immediately */ 3736c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_UNLOADING) 3737c6fd2807SJeff Garzik goto out; 3738c6fd2807SJeff Garzik 37399b1e2658STejun Heo /* prep for EH */ 37401eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 37419b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 37429b1e2658STejun Heo 3743c6fd2807SJeff Garzik /* skip EH if possible. */ 37440260731fSTejun Heo if (ata_eh_skip_recovery(link)) 3745c6fd2807SJeff Garzik ehc->i.action = 0; 3746c6fd2807SJeff Garzik 37471eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 3748f58229f8STejun Heo ehc->classes[dev->devno] = ATA_DEV_UNKNOWN; 37499b1e2658STejun Heo } 3750c6fd2807SJeff Garzik 3751c6fd2807SJeff Garzik /* reset */ 37521eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 37539b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 37549b1e2658STejun Heo 3755cf480626STejun Heo if (!(ehc->i.action & ATA_EH_RESET)) 37569b1e2658STejun Heo continue; 37579b1e2658STejun Heo 37589b1e2658STejun Heo rc = ata_eh_reset(link, ata_link_nr_vacant(link), 3759dc98c32cSTejun Heo prereset, softreset, hardreset, postreset); 3760c6fd2807SJeff Garzik if (rc) { 3761a9a79dfeSJoe Perches ata_link_err(link, "reset failed, giving up\n"); 3762c6fd2807SJeff Garzik goto out; 3763c6fd2807SJeff Garzik } 37649b1e2658STejun Heo } 3765c6fd2807SJeff Garzik 376645fabbb7SElias Oltmanns do { 376745fabbb7SElias Oltmanns unsigned long now; 376845fabbb7SElias Oltmanns 376945fabbb7SElias Oltmanns /* 377045fabbb7SElias Oltmanns * clears ATA_EH_PARK in eh_info and resets 377145fabbb7SElias Oltmanns * ap->park_req_pending 377245fabbb7SElias Oltmanns */ 377345fabbb7SElias Oltmanns ata_eh_pull_park_action(ap); 377445fabbb7SElias Oltmanns 377545fabbb7SElias Oltmanns deadline = jiffies; 37761eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 37771eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 377845fabbb7SElias Oltmanns struct ata_eh_context *ehc = &link->eh_context; 377945fabbb7SElias Oltmanns unsigned long tmp; 378045fabbb7SElias Oltmanns 378145fabbb7SElias Oltmanns if (dev->class != ATA_DEV_ATA) 378245fabbb7SElias Oltmanns continue; 378345fabbb7SElias Oltmanns if (!(ehc->i.dev_action[dev->devno] & 378445fabbb7SElias Oltmanns ATA_EH_PARK)) 378545fabbb7SElias Oltmanns continue; 378645fabbb7SElias Oltmanns tmp = dev->unpark_deadline; 378745fabbb7SElias Oltmanns if (time_before(deadline, tmp)) 378845fabbb7SElias Oltmanns deadline = tmp; 378945fabbb7SElias Oltmanns else if (time_before_eq(tmp, jiffies)) 379045fabbb7SElias Oltmanns continue; 379145fabbb7SElias Oltmanns if (ehc->unloaded_mask & (1 << dev->devno)) 379245fabbb7SElias Oltmanns continue; 379345fabbb7SElias Oltmanns 379445fabbb7SElias Oltmanns ata_eh_park_issue_cmd(dev, 1); 379545fabbb7SElias Oltmanns } 379645fabbb7SElias Oltmanns } 379745fabbb7SElias Oltmanns 379845fabbb7SElias Oltmanns now = jiffies; 379945fabbb7SElias Oltmanns if (time_before_eq(deadline, now)) 380045fabbb7SElias Oltmanns break; 380145fabbb7SElias Oltmanns 3802c0c362b6STejun Heo ata_eh_release(ap); 380345fabbb7SElias Oltmanns deadline = wait_for_completion_timeout(&ap->park_req_pending, 380445fabbb7SElias Oltmanns deadline - now); 3805c0c362b6STejun Heo ata_eh_acquire(ap); 380645fabbb7SElias Oltmanns } while (deadline); 38071eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 38081eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 380945fabbb7SElias Oltmanns if (!(link->eh_context.unloaded_mask & 381045fabbb7SElias Oltmanns (1 << dev->devno))) 381145fabbb7SElias Oltmanns continue; 381245fabbb7SElias Oltmanns 381345fabbb7SElias Oltmanns ata_eh_park_issue_cmd(dev, 0); 381445fabbb7SElias Oltmanns ata_eh_done(link, dev, ATA_EH_PARK); 381545fabbb7SElias Oltmanns } 381645fabbb7SElias Oltmanns } 381745fabbb7SElias Oltmanns 38189b1e2658STejun Heo /* the rest */ 38196b7ae954STejun Heo nr_fails = 0; 38206b7ae954STejun Heo ata_for_each_link(link, ap, PMP_FIRST) { 38219b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 38229b1e2658STejun Heo 38236b7ae954STejun Heo if (sata_pmp_attached(ap) && ata_is_host_link(link)) 38246b7ae954STejun Heo goto config_lpm; 38256b7ae954STejun Heo 3826c6fd2807SJeff Garzik /* revalidate existing devices and attach new ones */ 38270260731fSTejun Heo rc = ata_eh_revalidate_and_attach(link, &dev); 3828c6fd2807SJeff Garzik if (rc) 38296b7ae954STejun Heo goto rest_fail; 3830c6fd2807SJeff Garzik 3831633273a3STejun Heo /* if PMP got attached, return, pmp EH will take care of it */ 3832633273a3STejun Heo if (link->device->class == ATA_DEV_PMP) { 3833633273a3STejun Heo ehc->i.action = 0; 3834633273a3STejun Heo return 0; 3835633273a3STejun Heo } 3836633273a3STejun Heo 3837baa1e78aSTejun Heo /* configure transfer mode if necessary */ 3838baa1e78aSTejun Heo if (ehc->i.flags & ATA_EHI_SETMODE) { 38390260731fSTejun Heo rc = ata_set_mode(link, &dev); 38404ae72a1eSTejun Heo if (rc) 38416b7ae954STejun Heo goto rest_fail; 3842baa1e78aSTejun Heo ehc->i.flags &= ~ATA_EHI_SETMODE; 3843c6fd2807SJeff Garzik } 3844c6fd2807SJeff Garzik 384511fc33daSTejun Heo /* If reset has been issued, clear UA to avoid 384611fc33daSTejun Heo * disrupting the current users of the device. 384711fc33daSTejun Heo */ 384811fc33daSTejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) { 38491eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 385011fc33daSTejun Heo if (dev->class != ATA_DEV_ATAPI) 385111fc33daSTejun Heo continue; 385211fc33daSTejun Heo rc = atapi_eh_clear_ua(dev); 385311fc33daSTejun Heo if (rc) 38546b7ae954STejun Heo goto rest_fail; 385511fc33daSTejun Heo } 385611fc33daSTejun Heo } 385711fc33daSTejun Heo 38586013efd8STejun Heo /* retry flush if necessary */ 38596013efd8STejun Heo ata_for_each_dev(dev, link, ALL) { 38606013efd8STejun Heo if (dev->class != ATA_DEV_ATA) 38616013efd8STejun Heo continue; 38626013efd8STejun Heo rc = ata_eh_maybe_retry_flush(dev); 38636013efd8STejun Heo if (rc) 38646b7ae954STejun Heo goto rest_fail; 38656013efd8STejun Heo } 38666013efd8STejun Heo 38676b7ae954STejun Heo config_lpm: 386811fc33daSTejun Heo /* configure link power saving */ 38696b7ae954STejun Heo if (link->lpm_policy != ap->target_lpm_policy) { 38706b7ae954STejun Heo rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev); 38716b7ae954STejun Heo if (rc) 38726b7ae954STejun Heo goto rest_fail; 38736b7ae954STejun Heo } 3874ca77329fSKristen Carlson Accardi 38759b1e2658STejun Heo /* this link is okay now */ 38769b1e2658STejun Heo ehc->i.flags = 0; 38779b1e2658STejun Heo continue; 3878c6fd2807SJeff Garzik 38796b7ae954STejun Heo rest_fail: 38806b7ae954STejun Heo nr_fails++; 38816b7ae954STejun Heo if (dev) 38820a2c0f56STejun Heo ata_eh_handle_dev_fail(dev, rc); 3883c6fd2807SJeff Garzik 3884b06ce3e5STejun Heo if (ap->pflags & ATA_PFLAG_FROZEN) { 3885b06ce3e5STejun Heo /* PMP reset requires working host port. 3886b06ce3e5STejun Heo * Can't retry if it's frozen. 3887b06ce3e5STejun Heo */ 3888071f44b1STejun Heo if (sata_pmp_attached(ap)) 3889b06ce3e5STejun Heo goto out; 38909b1e2658STejun Heo break; 38919b1e2658STejun Heo } 3892b06ce3e5STejun Heo } 38939b1e2658STejun Heo 38946b7ae954STejun Heo if (nr_fails) 3895c6fd2807SJeff Garzik goto retry; 3896c6fd2807SJeff Garzik 3897c6fd2807SJeff Garzik out: 38989b1e2658STejun Heo if (rc && r_failed_link) 38999b1e2658STejun Heo *r_failed_link = link; 3900c6fd2807SJeff Garzik 3901c6fd2807SJeff Garzik DPRINTK("EXIT, rc=%d\n", rc); 3902c6fd2807SJeff Garzik return rc; 3903c6fd2807SJeff Garzik } 3904c6fd2807SJeff Garzik 3905c6fd2807SJeff Garzik /** 3906c6fd2807SJeff Garzik * ata_eh_finish - finish up EH 3907c6fd2807SJeff Garzik * @ap: host port to finish EH for 3908c6fd2807SJeff Garzik * 3909c6fd2807SJeff Garzik * Recovery is complete. Clean up EH states and retry or finish 3910c6fd2807SJeff Garzik * failed qcs. 3911c6fd2807SJeff Garzik * 3912c6fd2807SJeff Garzik * LOCKING: 3913c6fd2807SJeff Garzik * None. 3914c6fd2807SJeff Garzik */ 3915fb7fd614STejun Heo void ata_eh_finish(struct ata_port *ap) 3916c6fd2807SJeff Garzik { 3917c6fd2807SJeff Garzik int tag; 3918c6fd2807SJeff Garzik 3919c6fd2807SJeff Garzik /* retry or finish qcs */ 3920c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 3921c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 3922c6fd2807SJeff Garzik 3923c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) 3924c6fd2807SJeff Garzik continue; 3925c6fd2807SJeff Garzik 3926c6fd2807SJeff Garzik if (qc->err_mask) { 3927c6fd2807SJeff Garzik /* FIXME: Once EH migration is complete, 3928c6fd2807SJeff Garzik * generate sense data in this function, 3929c6fd2807SJeff Garzik * considering both err_mask and tf. 3930c6fd2807SJeff Garzik */ 393103faab78STejun Heo if (qc->flags & ATA_QCFLAG_RETRY) 3932c6fd2807SJeff Garzik ata_eh_qc_retry(qc); 393303faab78STejun Heo else 393403faab78STejun Heo ata_eh_qc_complete(qc); 3935c6fd2807SJeff Garzik } else { 3936c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_SENSE_VALID) { 3937c6fd2807SJeff Garzik ata_eh_qc_complete(qc); 3938c6fd2807SJeff Garzik } else { 3939c6fd2807SJeff Garzik /* feed zero TF to sense generation */ 3940c6fd2807SJeff Garzik memset(&qc->result_tf, 0, sizeof(qc->result_tf)); 3941c6fd2807SJeff Garzik ata_eh_qc_retry(qc); 3942c6fd2807SJeff Garzik } 3943c6fd2807SJeff Garzik } 3944c6fd2807SJeff Garzik } 3945da917d69STejun Heo 3946da917d69STejun Heo /* make sure nr_active_links is zero after EH */ 3947da917d69STejun Heo WARN_ON(ap->nr_active_links); 3948da917d69STejun Heo ap->nr_active_links = 0; 3949c6fd2807SJeff Garzik } 3950c6fd2807SJeff Garzik 3951c6fd2807SJeff Garzik /** 3952c6fd2807SJeff Garzik * ata_do_eh - do standard error handling 3953c6fd2807SJeff Garzik * @ap: host port to handle error for 3954a1efdabaSTejun Heo * 3955c6fd2807SJeff Garzik * @prereset: prereset method (can be NULL) 3956c6fd2807SJeff Garzik * @softreset: softreset method (can be NULL) 3957c6fd2807SJeff Garzik * @hardreset: hardreset method (can be NULL) 3958c6fd2807SJeff Garzik * @postreset: postreset method (can be NULL) 3959c6fd2807SJeff Garzik * 3960c6fd2807SJeff Garzik * Perform standard error handling sequence. 3961c6fd2807SJeff Garzik * 3962c6fd2807SJeff Garzik * LOCKING: 3963c6fd2807SJeff Garzik * Kernel thread context (may sleep). 3964c6fd2807SJeff Garzik */ 3965c6fd2807SJeff Garzik void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, 3966c6fd2807SJeff Garzik ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 3967c6fd2807SJeff Garzik ata_postreset_fn_t postreset) 3968c6fd2807SJeff Garzik { 39699b1e2658STejun Heo struct ata_device *dev; 39709b1e2658STejun Heo int rc; 39719b1e2658STejun Heo 39729b1e2658STejun Heo ata_eh_autopsy(ap); 39739b1e2658STejun Heo ata_eh_report(ap); 39749b1e2658STejun Heo 39759b1e2658STejun Heo rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset, 39769b1e2658STejun Heo NULL); 39779b1e2658STejun Heo if (rc) { 39781eca4365STejun Heo ata_for_each_dev(dev, &ap->link, ALL) 39799b1e2658STejun Heo ata_dev_disable(dev); 39809b1e2658STejun Heo } 39819b1e2658STejun Heo 3982c6fd2807SJeff Garzik ata_eh_finish(ap); 3983c6fd2807SJeff Garzik } 3984c6fd2807SJeff Garzik 3985a1efdabaSTejun Heo /** 3986a1efdabaSTejun Heo * ata_std_error_handler - standard error handler 3987a1efdabaSTejun Heo * @ap: host port to handle error for 3988a1efdabaSTejun Heo * 3989a1efdabaSTejun Heo * Standard error handler 3990a1efdabaSTejun Heo * 3991a1efdabaSTejun Heo * LOCKING: 3992a1efdabaSTejun Heo * Kernel thread context (may sleep). 3993a1efdabaSTejun Heo */ 3994a1efdabaSTejun Heo void ata_std_error_handler(struct ata_port *ap) 3995a1efdabaSTejun Heo { 3996a1efdabaSTejun Heo struct ata_port_operations *ops = ap->ops; 3997a1efdabaSTejun Heo ata_reset_fn_t hardreset = ops->hardreset; 3998a1efdabaSTejun Heo 399957c9efdfSTejun Heo /* ignore built-in hardreset if SCR access is not available */ 4000fe06e5f9STejun Heo if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link)) 4001a1efdabaSTejun Heo hardreset = NULL; 4002a1efdabaSTejun Heo 4003a1efdabaSTejun Heo ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset); 4004a1efdabaSTejun Heo } 4005a1efdabaSTejun Heo 40066ffa01d8STejun Heo #ifdef CONFIG_PM 4007c6fd2807SJeff Garzik /** 4008c6fd2807SJeff Garzik * ata_eh_handle_port_suspend - perform port suspend operation 4009c6fd2807SJeff Garzik * @ap: port to suspend 4010c6fd2807SJeff Garzik * 4011c6fd2807SJeff Garzik * Suspend @ap. 4012c6fd2807SJeff Garzik * 4013c6fd2807SJeff Garzik * LOCKING: 4014c6fd2807SJeff Garzik * Kernel thread context (may sleep). 4015c6fd2807SJeff Garzik */ 4016c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap) 4017c6fd2807SJeff Garzik { 4018c6fd2807SJeff Garzik unsigned long flags; 4019c6fd2807SJeff Garzik int rc = 0; 4020c6fd2807SJeff Garzik 4021c6fd2807SJeff Garzik /* are we suspending? */ 4022c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 4023c6fd2807SJeff Garzik if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || 4024c6fd2807SJeff Garzik ap->pm_mesg.event == PM_EVENT_ON) { 4025c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 4026c6fd2807SJeff Garzik return; 4027c6fd2807SJeff Garzik } 4028c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 4029c6fd2807SJeff Garzik 4030c6fd2807SJeff Garzik WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED); 4031c6fd2807SJeff Garzik 403264578a3dSTejun Heo /* tell ACPI we're suspending */ 403364578a3dSTejun Heo rc = ata_acpi_on_suspend(ap); 403464578a3dSTejun Heo if (rc) 403564578a3dSTejun Heo goto out; 403664578a3dSTejun Heo 4037c6fd2807SJeff Garzik /* suspend */ 4038c6fd2807SJeff Garzik ata_eh_freeze_port(ap); 4039c6fd2807SJeff Garzik 4040c6fd2807SJeff Garzik if (ap->ops->port_suspend) 4041c6fd2807SJeff Garzik rc = ap->ops->port_suspend(ap, ap->pm_mesg); 4042c6fd2807SJeff Garzik 4043bd3adca5SShaohua Li ata_acpi_set_state(ap, PMSG_SUSPEND); 404464578a3dSTejun Heo out: 4045c6fd2807SJeff Garzik /* report result */ 4046c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 4047c6fd2807SJeff Garzik 4048c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_PM_PENDING; 4049c6fd2807SJeff Garzik if (rc == 0) 4050c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SUSPENDED; 405164578a3dSTejun Heo else if (ap->pflags & ATA_PFLAG_FROZEN) 4052c6fd2807SJeff Garzik ata_port_schedule_eh(ap); 4053c6fd2807SJeff Garzik 4054c6fd2807SJeff Garzik if (ap->pm_result) { 4055c6fd2807SJeff Garzik *ap->pm_result = rc; 4056c6fd2807SJeff Garzik ap->pm_result = NULL; 4057c6fd2807SJeff Garzik } 4058c6fd2807SJeff Garzik 4059c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 4060c6fd2807SJeff Garzik 4061c6fd2807SJeff Garzik return; 4062c6fd2807SJeff Garzik } 4063c6fd2807SJeff Garzik 4064c6fd2807SJeff Garzik /** 4065c6fd2807SJeff Garzik * ata_eh_handle_port_resume - perform port resume operation 4066c6fd2807SJeff Garzik * @ap: port to resume 4067c6fd2807SJeff Garzik * 4068c6fd2807SJeff Garzik * Resume @ap. 4069c6fd2807SJeff Garzik * 4070c6fd2807SJeff Garzik * LOCKING: 4071c6fd2807SJeff Garzik * Kernel thread context (may sleep). 4072c6fd2807SJeff Garzik */ 4073c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap) 4074c6fd2807SJeff Garzik { 40756f9c1ea2STejun Heo struct ata_link *link; 40766f9c1ea2STejun Heo struct ata_device *dev; 4077c6fd2807SJeff Garzik unsigned long flags; 40789666f400STejun Heo int rc = 0; 4079c6fd2807SJeff Garzik 4080c6fd2807SJeff Garzik /* are we resuming? */ 4081c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 4082c6fd2807SJeff Garzik if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || 4083c6fd2807SJeff Garzik ap->pm_mesg.event != PM_EVENT_ON) { 4084c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 4085c6fd2807SJeff Garzik return; 4086c6fd2807SJeff Garzik } 4087c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 4088c6fd2807SJeff Garzik 40899666f400STejun Heo WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED)); 4090c6fd2807SJeff Garzik 40916f9c1ea2STejun Heo /* 40926f9c1ea2STejun Heo * Error timestamps are in jiffies which doesn't run while 40936f9c1ea2STejun Heo * suspended and PHY events during resume isn't too uncommon. 40946f9c1ea2STejun Heo * When the two are combined, it can lead to unnecessary speed 40956f9c1ea2STejun Heo * downs if the machine is suspended and resumed repeatedly. 40966f9c1ea2STejun Heo * Clear error history. 40976f9c1ea2STejun Heo */ 40986f9c1ea2STejun Heo ata_for_each_link(link, ap, HOST_FIRST) 40996f9c1ea2STejun Heo ata_for_each_dev(dev, link, ALL) 41006f9c1ea2STejun Heo ata_ering_clear(&dev->ering); 41016f9c1ea2STejun Heo 4102bd3adca5SShaohua Li ata_acpi_set_state(ap, PMSG_ON); 4103bd3adca5SShaohua Li 4104c6fd2807SJeff Garzik if (ap->ops->port_resume) 4105c6fd2807SJeff Garzik rc = ap->ops->port_resume(ap); 4106c6fd2807SJeff Garzik 41076746544cSTejun Heo /* tell ACPI that we're resuming */ 41086746544cSTejun Heo ata_acpi_on_resume(ap); 41096746544cSTejun Heo 41109666f400STejun Heo /* report result */ 4111c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 4112c6fd2807SJeff Garzik ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED); 4113c6fd2807SJeff Garzik if (ap->pm_result) { 4114c6fd2807SJeff Garzik *ap->pm_result = rc; 4115c6fd2807SJeff Garzik ap->pm_result = NULL; 4116c6fd2807SJeff Garzik } 4117c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 4118c6fd2807SJeff Garzik } 41196ffa01d8STejun Heo #endif /* CONFIG_PM */ 4120