1c82ee6d3SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 2c6fd2807SJeff Garzik /* 3c6fd2807SJeff Garzik * libata-eh.c - libata error handling 4c6fd2807SJeff Garzik * 5c6fd2807SJeff Garzik * Copyright 2006 Tejun Heo <htejun@gmail.com> 6c6fd2807SJeff Garzik * 7c6fd2807SJeff Garzik * libata documentation is available via 'make {ps|pdf}docs', 89bb9a39cSMauro Carvalho Chehab * as Documentation/driver-api/libata.rst 9c6fd2807SJeff Garzik * 10c6fd2807SJeff Garzik * Hardware documentation available from http://www.t13.org/ and 11c6fd2807SJeff Garzik * http://www.sata-io.org/ 12c6fd2807SJeff Garzik */ 13c6fd2807SJeff Garzik 14c6fd2807SJeff Garzik #include <linux/kernel.h> 15242f9dcbSJens Axboe #include <linux/blkdev.h> 1638789fdaSPaul Gortmaker #include <linux/export.h> 172855568bSJeff Garzik #include <linux/pci.h> 18c6fd2807SJeff Garzik #include <scsi/scsi.h> 19c6fd2807SJeff Garzik #include <scsi/scsi_host.h> 20c6fd2807SJeff Garzik #include <scsi/scsi_eh.h> 21c6fd2807SJeff Garzik #include <scsi/scsi_device.h> 22c6fd2807SJeff Garzik #include <scsi/scsi_cmnd.h> 236521148cSRobert Hancock #include <scsi/scsi_dbg.h> 24c6fd2807SJeff Garzik #include "../scsi/scsi_transport_api.h" 25c6fd2807SJeff Garzik 26c6fd2807SJeff Garzik #include <linux/libata.h> 27c6fd2807SJeff Garzik 28255c03d1SHannes Reinecke #include <trace/events/libata.h> 29c6fd2807SJeff Garzik #include "libata.h" 30c6fd2807SJeff Garzik 317d47e8d4STejun Heo enum { 323884f7b0STejun Heo /* speed down verdicts */ 337d47e8d4STejun Heo ATA_EH_SPDN_NCQ_OFF = (1 << 0), 347d47e8d4STejun Heo ATA_EH_SPDN_SPEED_DOWN = (1 << 1), 357d47e8d4STejun Heo ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2), 3676326ac1STejun Heo ATA_EH_SPDN_KEEP_ERRORS = (1 << 3), 373884f7b0STejun Heo 383884f7b0STejun Heo /* error flags */ 393884f7b0STejun Heo ATA_EFLAG_IS_IO = (1 << 0), 4076326ac1STejun Heo ATA_EFLAG_DUBIOUS_XFER = (1 << 1), 41d9027470SGwendal Grignou ATA_EFLAG_OLD_ER = (1 << 31), 423884f7b0STejun Heo 433884f7b0STejun Heo /* error categories */ 443884f7b0STejun Heo ATA_ECAT_NONE = 0, 453884f7b0STejun Heo ATA_ECAT_ATA_BUS = 1, 463884f7b0STejun Heo ATA_ECAT_TOUT_HSM = 2, 473884f7b0STejun Heo ATA_ECAT_UNK_DEV = 3, 4875f9cafcSTejun Heo ATA_ECAT_DUBIOUS_NONE = 4, 4975f9cafcSTejun Heo ATA_ECAT_DUBIOUS_ATA_BUS = 5, 5075f9cafcSTejun Heo ATA_ECAT_DUBIOUS_TOUT_HSM = 6, 5175f9cafcSTejun Heo ATA_ECAT_DUBIOUS_UNK_DEV = 7, 5275f9cafcSTejun Heo ATA_ECAT_NR = 8, 537d47e8d4STejun Heo 5487fbc5a0STejun Heo ATA_EH_CMD_DFL_TIMEOUT = 5000, 5587fbc5a0STejun Heo 560a2c0f56STejun Heo /* always put at least this amount of time between resets */ 570a2c0f56STejun Heo ATA_EH_RESET_COOL_DOWN = 5000, 580a2c0f56STejun Heo 59341c2c95STejun Heo /* Waiting in ->prereset can never be reliable. It's 60341c2c95STejun Heo * sometimes nice to wait there but it can't be depended upon; 61341c2c95STejun Heo * otherwise, we wouldn't be resetting. Just give it enough 62341c2c95STejun Heo * time for most drives to spin up. 6331daabdaSTejun Heo */ 64341c2c95STejun Heo ATA_EH_PRERESET_TIMEOUT = 10000, 65341c2c95STejun Heo ATA_EH_FASTDRAIN_INTERVAL = 3000, 6611fc33daSTejun Heo 6711fc33daSTejun Heo ATA_EH_UA_TRIES = 5, 68c2c7a89cSTejun Heo 69c2c7a89cSTejun Heo /* probe speed down parameters, see ata_eh_schedule_probe() */ 70c2c7a89cSTejun Heo ATA_EH_PROBE_TRIAL_INTERVAL = 60000, /* 1 min */ 71c2c7a89cSTejun Heo ATA_EH_PROBE_TRIALS = 2, 7231daabdaSTejun Heo }; 7331daabdaSTejun Heo 7431daabdaSTejun Heo /* The following table determines how we sequence resets. Each entry 7531daabdaSTejun Heo * represents timeout for that try. The first try can be soft or 7631daabdaSTejun Heo * hardreset. All others are hardreset if available. In most cases 7731daabdaSTejun Heo * the first reset w/ 10sec timeout should succeed. Following entries 7835bf8821SDan Williams * are mostly for error handling, hotplug and those outlier devices that 7935bf8821SDan Williams * take an exceptionally long time to recover from reset. 8031daabdaSTejun Heo */ 8131daabdaSTejun Heo static const unsigned long ata_eh_reset_timeouts[] = { 82341c2c95STejun Heo 10000, /* most drives spin up by 10sec */ 83341c2c95STejun Heo 10000, /* > 99% working drives spin up before 20sec */ 8435bf8821SDan Williams 35000, /* give > 30 secs of idleness for outlier devices */ 85341c2c95STejun Heo 5000, /* and sweet one last chance */ 86d8af0eb6STejun Heo ULONG_MAX, /* > 1 min has elapsed, give up */ 8731daabdaSTejun Heo }; 8831daabdaSTejun Heo 8987fbc5a0STejun Heo static const unsigned long ata_eh_identify_timeouts[] = { 9087fbc5a0STejun Heo 5000, /* covers > 99% of successes and not too boring on failures */ 9187fbc5a0STejun Heo 10000, /* combined time till here is enough even for media access */ 9287fbc5a0STejun Heo 30000, /* for true idiots */ 9387fbc5a0STejun Heo ULONG_MAX, 9487fbc5a0STejun Heo }; 9587fbc5a0STejun Heo 966013efd8STejun Heo static const unsigned long ata_eh_flush_timeouts[] = { 976013efd8STejun Heo 15000, /* be generous with flush */ 986013efd8STejun Heo 15000, /* ditto */ 996013efd8STejun Heo 30000, /* and even more generous */ 1006013efd8STejun Heo ULONG_MAX, 1016013efd8STejun Heo }; 1026013efd8STejun Heo 10387fbc5a0STejun Heo static const unsigned long ata_eh_other_timeouts[] = { 10487fbc5a0STejun Heo 5000, /* same rationale as identify timeout */ 10587fbc5a0STejun Heo 10000, /* ditto */ 10687fbc5a0STejun Heo /* but no merciful 30sec for other commands, it just isn't worth it */ 10787fbc5a0STejun Heo ULONG_MAX, 10887fbc5a0STejun Heo }; 10987fbc5a0STejun Heo 11087fbc5a0STejun Heo struct ata_eh_cmd_timeout_ent { 11187fbc5a0STejun Heo const u8 *commands; 11287fbc5a0STejun Heo const unsigned long *timeouts; 11387fbc5a0STejun Heo }; 11487fbc5a0STejun Heo 11587fbc5a0STejun Heo /* The following table determines timeouts to use for EH internal 11687fbc5a0STejun Heo * commands. Each table entry is a command class and matches the 11787fbc5a0STejun Heo * commands the entry applies to and the timeout table to use. 11887fbc5a0STejun Heo * 11987fbc5a0STejun Heo * On the retry after a command timed out, the next timeout value from 12087fbc5a0STejun Heo * the table is used. If the table doesn't contain further entries, 12187fbc5a0STejun Heo * the last value is used. 12287fbc5a0STejun Heo * 12387fbc5a0STejun Heo * ehc->cmd_timeout_idx keeps track of which timeout to use per 12487fbc5a0STejun Heo * command class, so if SET_FEATURES times out on the first try, the 12587fbc5a0STejun Heo * next try will use the second timeout value only for that class. 12687fbc5a0STejun Heo */ 12787fbc5a0STejun Heo #define CMDS(cmds...) (const u8 []){ cmds, 0 } 12887fbc5a0STejun Heo static const struct ata_eh_cmd_timeout_ent 12987fbc5a0STejun Heo ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = { 13087fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI), 13187fbc5a0STejun Heo .timeouts = ata_eh_identify_timeouts, }, 13287fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT), 13387fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 13487fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT), 13587fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 13687fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_SET_FEATURES), 13787fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 13887fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS), 13987fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 1406013efd8STejun Heo { .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT), 1416013efd8STejun Heo .timeouts = ata_eh_flush_timeouts }, 14287fbc5a0STejun Heo }; 14387fbc5a0STejun Heo #undef CMDS 14487fbc5a0STejun Heo 145c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap); 1466ffa01d8STejun Heo #ifdef CONFIG_PM 147c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap); 148c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap); 1496ffa01d8STejun Heo #else /* CONFIG_PM */ 1506ffa01d8STejun Heo static void ata_eh_handle_port_suspend(struct ata_port *ap) 1516ffa01d8STejun Heo { } 1526ffa01d8STejun Heo 1536ffa01d8STejun Heo static void ata_eh_handle_port_resume(struct ata_port *ap) 1546ffa01d8STejun Heo { } 1556ffa01d8STejun Heo #endif /* CONFIG_PM */ 156c6fd2807SJeff Garzik 1570d74d872SMathieu Malaterre static __printf(2, 0) void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, 1580d74d872SMathieu Malaterre const char *fmt, va_list args) 159b64bbc39STejun Heo { 160b64bbc39STejun Heo ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len, 161b64bbc39STejun Heo ATA_EH_DESC_LEN - ehi->desc_len, 162b64bbc39STejun Heo fmt, args); 163b64bbc39STejun Heo } 164b64bbc39STejun Heo 165b64bbc39STejun Heo /** 166b64bbc39STejun Heo * __ata_ehi_push_desc - push error description without adding separator 167b64bbc39STejun Heo * @ehi: target EHI 168b64bbc39STejun Heo * @fmt: printf format string 169b64bbc39STejun Heo * 170b64bbc39STejun Heo * Format string according to @fmt and append it to @ehi->desc. 171b64bbc39STejun Heo * 172b64bbc39STejun Heo * LOCKING: 173b64bbc39STejun Heo * spin_lock_irqsave(host lock) 174b64bbc39STejun Heo */ 175b64bbc39STejun Heo void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) 176b64bbc39STejun Heo { 177b64bbc39STejun Heo va_list args; 178b64bbc39STejun Heo 179b64bbc39STejun Heo va_start(args, fmt); 180b64bbc39STejun Heo __ata_ehi_pushv_desc(ehi, fmt, args); 181b64bbc39STejun Heo va_end(args); 182b64bbc39STejun Heo } 183a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(__ata_ehi_push_desc); 184b64bbc39STejun Heo 185b64bbc39STejun Heo /** 186b64bbc39STejun Heo * ata_ehi_push_desc - push error description with separator 187b64bbc39STejun Heo * @ehi: target EHI 188b64bbc39STejun Heo * @fmt: printf format string 189b64bbc39STejun Heo * 190b64bbc39STejun Heo * Format string according to @fmt and append it to @ehi->desc. 191b64bbc39STejun Heo * If @ehi->desc is not empty, ", " is added in-between. 192b64bbc39STejun Heo * 193b64bbc39STejun Heo * LOCKING: 194b64bbc39STejun Heo * spin_lock_irqsave(host lock) 195b64bbc39STejun Heo */ 196b64bbc39STejun Heo void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) 197b64bbc39STejun Heo { 198b64bbc39STejun Heo va_list args; 199b64bbc39STejun Heo 200b64bbc39STejun Heo if (ehi->desc_len) 201b64bbc39STejun Heo __ata_ehi_push_desc(ehi, ", "); 202b64bbc39STejun Heo 203b64bbc39STejun Heo va_start(args, fmt); 204b64bbc39STejun Heo __ata_ehi_pushv_desc(ehi, fmt, args); 205b64bbc39STejun Heo va_end(args); 206b64bbc39STejun Heo } 207a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_ehi_push_desc); 208b64bbc39STejun Heo 209b64bbc39STejun Heo /** 210b64bbc39STejun Heo * ata_ehi_clear_desc - clean error description 211b64bbc39STejun Heo * @ehi: target EHI 212b64bbc39STejun Heo * 213b64bbc39STejun Heo * Clear @ehi->desc. 214b64bbc39STejun Heo * 215b64bbc39STejun Heo * LOCKING: 216b64bbc39STejun Heo * spin_lock_irqsave(host lock) 217b64bbc39STejun Heo */ 218b64bbc39STejun Heo void ata_ehi_clear_desc(struct ata_eh_info *ehi) 219b64bbc39STejun Heo { 220b64bbc39STejun Heo ehi->desc[0] = '\0'; 221b64bbc39STejun Heo ehi->desc_len = 0; 222b64bbc39STejun Heo } 223a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_ehi_clear_desc); 224b64bbc39STejun Heo 225cbcdd875STejun Heo /** 226cbcdd875STejun Heo * ata_port_desc - append port description 227cbcdd875STejun Heo * @ap: target ATA port 228cbcdd875STejun Heo * @fmt: printf format string 229cbcdd875STejun Heo * 230cbcdd875STejun Heo * Format string according to @fmt and append it to port 231cbcdd875STejun Heo * description. If port description is not empty, " " is added 232cbcdd875STejun Heo * in-between. This function is to be used while initializing 233cbcdd875STejun Heo * ata_host. The description is printed on host registration. 234cbcdd875STejun Heo * 235cbcdd875STejun Heo * LOCKING: 236cbcdd875STejun Heo * None. 237cbcdd875STejun Heo */ 238cbcdd875STejun Heo void ata_port_desc(struct ata_port *ap, const char *fmt, ...) 239cbcdd875STejun Heo { 240cbcdd875STejun Heo va_list args; 241cbcdd875STejun Heo 242cbcdd875STejun Heo WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING)); 243cbcdd875STejun Heo 244cbcdd875STejun Heo if (ap->link.eh_info.desc_len) 245cbcdd875STejun Heo __ata_ehi_push_desc(&ap->link.eh_info, " "); 246cbcdd875STejun Heo 247cbcdd875STejun Heo va_start(args, fmt); 248cbcdd875STejun Heo __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args); 249cbcdd875STejun Heo va_end(args); 250cbcdd875STejun Heo } 251a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_port_desc); 252cbcdd875STejun Heo 253cbcdd875STejun Heo #ifdef CONFIG_PCI 254cbcdd875STejun Heo /** 255cbcdd875STejun Heo * ata_port_pbar_desc - append PCI BAR description 256cbcdd875STejun Heo * @ap: target ATA port 257cbcdd875STejun Heo * @bar: target PCI BAR 258cbcdd875STejun Heo * @offset: offset into PCI BAR 259cbcdd875STejun Heo * @name: name of the area 260cbcdd875STejun Heo * 261cbcdd875STejun Heo * If @offset is negative, this function formats a string which 262cbcdd875STejun Heo * contains the name, address, size and type of the BAR and 263cbcdd875STejun Heo * appends it to the port description. If @offset is zero or 264cbcdd875STejun Heo * positive, only name and offsetted address is appended. 265cbcdd875STejun Heo * 266cbcdd875STejun Heo * LOCKING: 267cbcdd875STejun Heo * None. 268cbcdd875STejun Heo */ 269cbcdd875STejun Heo void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset, 270cbcdd875STejun Heo const char *name) 271cbcdd875STejun Heo { 272cbcdd875STejun Heo struct pci_dev *pdev = to_pci_dev(ap->host->dev); 273cbcdd875STejun Heo char *type = ""; 274cbcdd875STejun Heo unsigned long long start, len; 275cbcdd875STejun Heo 276cbcdd875STejun Heo if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) 277cbcdd875STejun Heo type = "m"; 278cbcdd875STejun Heo else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) 279cbcdd875STejun Heo type = "i"; 280cbcdd875STejun Heo 281cbcdd875STejun Heo start = (unsigned long long)pci_resource_start(pdev, bar); 282cbcdd875STejun Heo len = (unsigned long long)pci_resource_len(pdev, bar); 283cbcdd875STejun Heo 284cbcdd875STejun Heo if (offset < 0) 285cbcdd875STejun Heo ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start); 286cbcdd875STejun Heo else 287e6a73ab1SAndrew Morton ata_port_desc(ap, "%s 0x%llx", name, 288e6a73ab1SAndrew Morton start + (unsigned long long)offset); 289cbcdd875STejun Heo } 290a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_port_pbar_desc); 291cbcdd875STejun Heo #endif /* CONFIG_PCI */ 292cbcdd875STejun Heo 29387fbc5a0STejun Heo static int ata_lookup_timeout_table(u8 cmd) 29487fbc5a0STejun Heo { 29587fbc5a0STejun Heo int i; 29687fbc5a0STejun Heo 29787fbc5a0STejun Heo for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) { 29887fbc5a0STejun Heo const u8 *cur; 29987fbc5a0STejun Heo 30087fbc5a0STejun Heo for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++) 30187fbc5a0STejun Heo if (*cur == cmd) 30287fbc5a0STejun Heo return i; 30387fbc5a0STejun Heo } 30487fbc5a0STejun Heo 30587fbc5a0STejun Heo return -1; 30687fbc5a0STejun Heo } 30787fbc5a0STejun Heo 30887fbc5a0STejun Heo /** 30987fbc5a0STejun Heo * ata_internal_cmd_timeout - determine timeout for an internal command 31087fbc5a0STejun Heo * @dev: target device 31187fbc5a0STejun Heo * @cmd: internal command to be issued 31287fbc5a0STejun Heo * 31387fbc5a0STejun Heo * Determine timeout for internal command @cmd for @dev. 31487fbc5a0STejun Heo * 31587fbc5a0STejun Heo * LOCKING: 31687fbc5a0STejun Heo * EH context. 31787fbc5a0STejun Heo * 31887fbc5a0STejun Heo * RETURNS: 31987fbc5a0STejun Heo * Determined timeout. 32087fbc5a0STejun Heo */ 32187fbc5a0STejun Heo unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd) 32287fbc5a0STejun Heo { 32387fbc5a0STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 32487fbc5a0STejun Heo int ent = ata_lookup_timeout_table(cmd); 32587fbc5a0STejun Heo int idx; 32687fbc5a0STejun Heo 32787fbc5a0STejun Heo if (ent < 0) 32887fbc5a0STejun Heo return ATA_EH_CMD_DFL_TIMEOUT; 32987fbc5a0STejun Heo 33087fbc5a0STejun Heo idx = ehc->cmd_timeout_idx[dev->devno][ent]; 33187fbc5a0STejun Heo return ata_eh_cmd_timeout_table[ent].timeouts[idx]; 33287fbc5a0STejun Heo } 33387fbc5a0STejun Heo 33487fbc5a0STejun Heo /** 33587fbc5a0STejun Heo * ata_internal_cmd_timed_out - notification for internal command timeout 33687fbc5a0STejun Heo * @dev: target device 33787fbc5a0STejun Heo * @cmd: internal command which timed out 33887fbc5a0STejun Heo * 33987fbc5a0STejun Heo * Notify EH that internal command @cmd for @dev timed out. This 34087fbc5a0STejun Heo * function should be called only for commands whose timeouts are 34187fbc5a0STejun Heo * determined using ata_internal_cmd_timeout(). 34287fbc5a0STejun Heo * 34387fbc5a0STejun Heo * LOCKING: 34487fbc5a0STejun Heo * EH context. 34587fbc5a0STejun Heo */ 34687fbc5a0STejun Heo void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd) 34787fbc5a0STejun Heo { 34887fbc5a0STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 34987fbc5a0STejun Heo int ent = ata_lookup_timeout_table(cmd); 35087fbc5a0STejun Heo int idx; 35187fbc5a0STejun Heo 35287fbc5a0STejun Heo if (ent < 0) 35387fbc5a0STejun Heo return; 35487fbc5a0STejun Heo 35587fbc5a0STejun Heo idx = ehc->cmd_timeout_idx[dev->devno][ent]; 35687fbc5a0STejun Heo if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX) 35787fbc5a0STejun Heo ehc->cmd_timeout_idx[dev->devno][ent]++; 35887fbc5a0STejun Heo } 35987fbc5a0STejun Heo 3603884f7b0STejun Heo static void ata_ering_record(struct ata_ering *ering, unsigned int eflags, 361c6fd2807SJeff Garzik unsigned int err_mask) 362c6fd2807SJeff Garzik { 363c6fd2807SJeff Garzik struct ata_ering_entry *ent; 364c6fd2807SJeff Garzik 365c6fd2807SJeff Garzik WARN_ON(!err_mask); 366c6fd2807SJeff Garzik 367c6fd2807SJeff Garzik ering->cursor++; 368c6fd2807SJeff Garzik ering->cursor %= ATA_ERING_SIZE; 369c6fd2807SJeff Garzik 370c6fd2807SJeff Garzik ent = &ering->ring[ering->cursor]; 3713884f7b0STejun Heo ent->eflags = eflags; 372c6fd2807SJeff Garzik ent->err_mask = err_mask; 373c6fd2807SJeff Garzik ent->timestamp = get_jiffies_64(); 374c6fd2807SJeff Garzik } 375c6fd2807SJeff Garzik 37676326ac1STejun Heo static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering) 37776326ac1STejun Heo { 37876326ac1STejun Heo struct ata_ering_entry *ent = &ering->ring[ering->cursor]; 37976326ac1STejun Heo 38076326ac1STejun Heo if (ent->err_mask) 38176326ac1STejun Heo return ent; 38276326ac1STejun Heo return NULL; 38376326ac1STejun Heo } 38476326ac1STejun Heo 385d9027470SGwendal Grignou int ata_ering_map(struct ata_ering *ering, 386c6fd2807SJeff Garzik int (*map_fn)(struct ata_ering_entry *, void *), 387c6fd2807SJeff Garzik void *arg) 388c6fd2807SJeff Garzik { 389c6fd2807SJeff Garzik int idx, rc = 0; 390c6fd2807SJeff Garzik struct ata_ering_entry *ent; 391c6fd2807SJeff Garzik 392c6fd2807SJeff Garzik idx = ering->cursor; 393c6fd2807SJeff Garzik do { 394c6fd2807SJeff Garzik ent = &ering->ring[idx]; 395c6fd2807SJeff Garzik if (!ent->err_mask) 396c6fd2807SJeff Garzik break; 397c6fd2807SJeff Garzik rc = map_fn(ent, arg); 398c6fd2807SJeff Garzik if (rc) 399c6fd2807SJeff Garzik break; 400c6fd2807SJeff Garzik idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE; 401c6fd2807SJeff Garzik } while (idx != ering->cursor); 402c6fd2807SJeff Garzik 403c6fd2807SJeff Garzik return rc; 404c6fd2807SJeff Garzik } 405c6fd2807SJeff Garzik 40660428407SH Hartley Sweeten static int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg) 407d9027470SGwendal Grignou { 408d9027470SGwendal Grignou ent->eflags |= ATA_EFLAG_OLD_ER; 409d9027470SGwendal Grignou return 0; 410d9027470SGwendal Grignou } 411d9027470SGwendal Grignou 412d9027470SGwendal Grignou static void ata_ering_clear(struct ata_ering *ering) 413d9027470SGwendal Grignou { 414d9027470SGwendal Grignou ata_ering_map(ering, ata_ering_clear_cb, NULL); 415d9027470SGwendal Grignou } 416d9027470SGwendal Grignou 417c6fd2807SJeff Garzik static unsigned int ata_eh_dev_action(struct ata_device *dev) 418c6fd2807SJeff Garzik { 4199af5c9c9STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 420c6fd2807SJeff Garzik 421c6fd2807SJeff Garzik return ehc->i.action | ehc->i.dev_action[dev->devno]; 422c6fd2807SJeff Garzik } 423c6fd2807SJeff Garzik 424f58229f8STejun Heo static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev, 425c6fd2807SJeff Garzik struct ata_eh_info *ehi, unsigned int action) 426c6fd2807SJeff Garzik { 427f58229f8STejun Heo struct ata_device *tdev; 428c6fd2807SJeff Garzik 429c6fd2807SJeff Garzik if (!dev) { 430c6fd2807SJeff Garzik ehi->action &= ~action; 4311eca4365STejun Heo ata_for_each_dev(tdev, link, ALL) 432f58229f8STejun Heo ehi->dev_action[tdev->devno] &= ~action; 433c6fd2807SJeff Garzik } else { 434c6fd2807SJeff Garzik /* doesn't make sense for port-wide EH actions */ 435c6fd2807SJeff Garzik WARN_ON(!(action & ATA_EH_PERDEV_MASK)); 436c6fd2807SJeff Garzik 437c6fd2807SJeff Garzik /* break ehi->action into ehi->dev_action */ 438c6fd2807SJeff Garzik if (ehi->action & action) { 4391eca4365STejun Heo ata_for_each_dev(tdev, link, ALL) 440f58229f8STejun Heo ehi->dev_action[tdev->devno] |= 441f58229f8STejun Heo ehi->action & action; 442c6fd2807SJeff Garzik ehi->action &= ~action; 443c6fd2807SJeff Garzik } 444c6fd2807SJeff Garzik 445c6fd2807SJeff Garzik /* turn off the specified per-dev action */ 446c6fd2807SJeff Garzik ehi->dev_action[dev->devno] &= ~action; 447c6fd2807SJeff Garzik } 448c6fd2807SJeff Garzik } 449c6fd2807SJeff Garzik 450c6fd2807SJeff Garzik /** 451c0c362b6STejun Heo * ata_eh_acquire - acquire EH ownership 452c0c362b6STejun Heo * @ap: ATA port to acquire EH ownership for 453c0c362b6STejun Heo * 454c0c362b6STejun Heo * Acquire EH ownership for @ap. This is the basic exclusion 455c0c362b6STejun Heo * mechanism for ports sharing a host. Only one port hanging off 456c0c362b6STejun Heo * the same host can claim the ownership of EH. 457c0c362b6STejun Heo * 458c0c362b6STejun Heo * LOCKING: 459c0c362b6STejun Heo * EH context. 460c0c362b6STejun Heo */ 461c0c362b6STejun Heo void ata_eh_acquire(struct ata_port *ap) 462c0c362b6STejun Heo { 463c0c362b6STejun Heo mutex_lock(&ap->host->eh_mutex); 464c0c362b6STejun Heo WARN_ON_ONCE(ap->host->eh_owner); 465c0c362b6STejun Heo ap->host->eh_owner = current; 466c0c362b6STejun Heo } 467c0c362b6STejun Heo 468c0c362b6STejun Heo /** 469c0c362b6STejun Heo * ata_eh_release - release EH ownership 470c0c362b6STejun Heo * @ap: ATA port to release EH ownership for 471c0c362b6STejun Heo * 472c0c362b6STejun Heo * Release EH ownership for @ap if the caller. The caller must 473c0c362b6STejun Heo * have acquired EH ownership using ata_eh_acquire() previously. 474c0c362b6STejun Heo * 475c0c362b6STejun Heo * LOCKING: 476c0c362b6STejun Heo * EH context. 477c0c362b6STejun Heo */ 478c0c362b6STejun Heo void ata_eh_release(struct ata_port *ap) 479c0c362b6STejun Heo { 480c0c362b6STejun Heo WARN_ON_ONCE(ap->host->eh_owner != current); 481c0c362b6STejun Heo ap->host->eh_owner = NULL; 482c0c362b6STejun Heo mutex_unlock(&ap->host->eh_mutex); 483c0c362b6STejun Heo } 484c0c362b6STejun Heo 485ece180d1STejun Heo static void ata_eh_unload(struct ata_port *ap) 486ece180d1STejun Heo { 487ece180d1STejun Heo struct ata_link *link; 488ece180d1STejun Heo struct ata_device *dev; 489ece180d1STejun Heo unsigned long flags; 490ece180d1STejun Heo 491ece180d1STejun Heo /* Restore SControl IPM and SPD for the next driver and 492ece180d1STejun Heo * disable attached devices. 493ece180d1STejun Heo */ 494ece180d1STejun Heo ata_for_each_link(link, ap, PMP_FIRST) { 495ece180d1STejun Heo sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0); 496ece180d1STejun Heo ata_for_each_dev(dev, link, ALL) 497ece180d1STejun Heo ata_dev_disable(dev); 498ece180d1STejun Heo } 499ece180d1STejun Heo 500ece180d1STejun Heo /* freeze and set UNLOADED */ 501ece180d1STejun Heo spin_lock_irqsave(ap->lock, flags); 502ece180d1STejun Heo 503ece180d1STejun Heo ata_port_freeze(ap); /* won't be thawed */ 504ece180d1STejun Heo ap->pflags &= ~ATA_PFLAG_EH_PENDING; /* clear pending from freeze */ 505ece180d1STejun Heo ap->pflags |= ATA_PFLAG_UNLOADED; 506ece180d1STejun Heo 507ece180d1STejun Heo spin_unlock_irqrestore(ap->lock, flags); 508ece180d1STejun Heo } 509ece180d1STejun Heo 510c6fd2807SJeff Garzik /** 511c6fd2807SJeff Garzik * ata_scsi_error - SCSI layer error handler callback 512c6fd2807SJeff Garzik * @host: SCSI host on which error occurred 513c6fd2807SJeff Garzik * 514c6fd2807SJeff Garzik * Handles SCSI-layer-thrown error events. 515c6fd2807SJeff Garzik * 516c6fd2807SJeff Garzik * LOCKING: 517c6fd2807SJeff Garzik * Inherited from SCSI layer (none, can sleep) 518c6fd2807SJeff Garzik * 519c6fd2807SJeff Garzik * RETURNS: 520c6fd2807SJeff Garzik * Zero. 521c6fd2807SJeff Garzik */ 522c6fd2807SJeff Garzik void ata_scsi_error(struct Scsi_Host *host) 523c6fd2807SJeff Garzik { 524c6fd2807SJeff Garzik struct ata_port *ap = ata_shost_to_port(host); 525c6fd2807SJeff Garzik unsigned long flags; 526c34aeebcSJames Bottomley LIST_HEAD(eh_work_q); 527c6fd2807SJeff Garzik 528c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 529c6fd2807SJeff Garzik 530c34aeebcSJames Bottomley spin_lock_irqsave(host->host_lock, flags); 531c34aeebcSJames Bottomley list_splice_init(&host->eh_cmd_q, &eh_work_q); 532c34aeebcSJames Bottomley spin_unlock_irqrestore(host->host_lock, flags); 533c34aeebcSJames Bottomley 5340e0b494cSJames Bottomley ata_scsi_cmd_error_handler(host, ap, &eh_work_q); 5350e0b494cSJames Bottomley 5360e0b494cSJames Bottomley /* If we timed raced normal completion and there is nothing to 5370e0b494cSJames Bottomley recover nr_timedout == 0 why exactly are we doing error recovery ? */ 5380e0b494cSJames Bottomley ata_scsi_port_error_handler(host, ap); 5390e0b494cSJames Bottomley 5400e0b494cSJames Bottomley /* finish or retry handled scmd's and clean up */ 54172d8c36eSWei Fang WARN_ON(!list_empty(&eh_work_q)); 5420e0b494cSJames Bottomley 5430e0b494cSJames Bottomley DPRINTK("EXIT\n"); 5440e0b494cSJames Bottomley } 5450e0b494cSJames Bottomley 5460e0b494cSJames Bottomley /** 5470e0b494cSJames Bottomley * ata_scsi_cmd_error_handler - error callback for a list of commands 5480e0b494cSJames Bottomley * @host: scsi host containing the port 5490e0b494cSJames Bottomley * @ap: ATA port within the host 5500e0b494cSJames Bottomley * @eh_work_q: list of commands to process 5510e0b494cSJames Bottomley * 5520e0b494cSJames Bottomley * process the given list of commands and return those finished to the 5530e0b494cSJames Bottomley * ap->eh_done_q. This function is the first part of the libata error 5540e0b494cSJames Bottomley * handler which processes a given list of failed commands. 5550e0b494cSJames Bottomley */ 5560e0b494cSJames Bottomley void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, 5570e0b494cSJames Bottomley struct list_head *eh_work_q) 5580e0b494cSJames Bottomley { 5590e0b494cSJames Bottomley int i; 5600e0b494cSJames Bottomley unsigned long flags; 5610e0b494cSJames Bottomley 562c429137aSTejun Heo /* make sure sff pio task is not running */ 563c429137aSTejun Heo ata_sff_flush_pio_task(ap); 564c6fd2807SJeff Garzik 565cca3974eSJeff Garzik /* synchronize with host lock and sort out timeouts */ 566c6fd2807SJeff Garzik 567c6fd2807SJeff Garzik /* For new EH, all qcs are finished in one of three ways - 568c6fd2807SJeff Garzik * normal completion, error completion, and SCSI timeout. 569c96f1732SAlan Cox * Both completions can race against SCSI timeout. When normal 570c6fd2807SJeff Garzik * completion wins, the qc never reaches EH. When error 571c6fd2807SJeff Garzik * completion wins, the qc has ATA_QCFLAG_FAILED set. 572c6fd2807SJeff Garzik * 573c6fd2807SJeff Garzik * When SCSI timeout wins, things are a bit more complex. 574c6fd2807SJeff Garzik * Normal or error completion can occur after the timeout but 575c6fd2807SJeff Garzik * before this point. In such cases, both types of 576c6fd2807SJeff Garzik * completions are honored. A scmd is determined to have 577c6fd2807SJeff Garzik * timed out iff its associated qc is active and not failed. 578c6fd2807SJeff Garzik */ 579a4f08141SPaul E. McKenney spin_lock_irqsave(ap->lock, flags); 580c6fd2807SJeff Garzik if (ap->ops->error_handler) { 581c6fd2807SJeff Garzik struct scsi_cmnd *scmd, *tmp; 582c6fd2807SJeff Garzik int nr_timedout = 0; 583c6fd2807SJeff Garzik 584c96f1732SAlan Cox /* This must occur under the ap->lock as we don't want 585c96f1732SAlan Cox a polled recovery to race the real interrupt handler 586c96f1732SAlan Cox 587c96f1732SAlan Cox The lost_interrupt handler checks for any completed but 588c96f1732SAlan Cox non-notified command and completes much like an IRQ handler. 589c96f1732SAlan Cox 590c96f1732SAlan Cox We then fall into the error recovery code which will treat 591c96f1732SAlan Cox this as if normal completion won the race */ 592c96f1732SAlan Cox 593c96f1732SAlan Cox if (ap->ops->lost_interrupt) 594c96f1732SAlan Cox ap->ops->lost_interrupt(ap); 595c96f1732SAlan Cox 5960e0b494cSJames Bottomley list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) { 597c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 598c6fd2807SJeff Garzik 599258c4e5cSJens Axboe ata_qc_for_each_raw(ap, qc, i) { 600c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_ACTIVE && 601c6fd2807SJeff Garzik qc->scsicmd == scmd) 602c6fd2807SJeff Garzik break; 603c6fd2807SJeff Garzik } 604c6fd2807SJeff Garzik 605c6fd2807SJeff Garzik if (i < ATA_MAX_QUEUE) { 606c6fd2807SJeff Garzik /* the scmd has an associated qc */ 607c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) { 608c6fd2807SJeff Garzik /* which hasn't failed yet, timeout */ 609c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_TIMEOUT; 610c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 611c6fd2807SJeff Garzik nr_timedout++; 612c6fd2807SJeff Garzik } 613c6fd2807SJeff Garzik } else { 614c6fd2807SJeff Garzik /* Normal completion occurred after 615c6fd2807SJeff Garzik * SCSI timeout but before this point. 616c6fd2807SJeff Garzik * Successfully complete it. 617c6fd2807SJeff Garzik */ 618c6fd2807SJeff Garzik scmd->retries = scmd->allowed; 619c6fd2807SJeff Garzik scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 620c6fd2807SJeff Garzik } 621c6fd2807SJeff Garzik } 622c6fd2807SJeff Garzik 623c6fd2807SJeff Garzik /* If we have timed out qcs. They belong to EH from 624c6fd2807SJeff Garzik * this point but the state of the controller is 625c6fd2807SJeff Garzik * unknown. Freeze the port to make sure the IRQ 626c6fd2807SJeff Garzik * handler doesn't diddle with those qcs. This must 627c6fd2807SJeff Garzik * be done atomically w.r.t. setting QCFLAG_FAILED. 628c6fd2807SJeff Garzik */ 629c6fd2807SJeff Garzik if (nr_timedout) 630c6fd2807SJeff Garzik __ata_port_freeze(ap); 631c6fd2807SJeff Garzik 632a1e10f7eSTejun Heo 633a1e10f7eSTejun Heo /* initialize eh_tries */ 634a1e10f7eSTejun Heo ap->eh_tries = ATA_EH_MAX_TRIES; 635a4f08141SPaul E. McKenney } 636a4f08141SPaul E. McKenney spin_unlock_irqrestore(ap->lock, flags); 637c6fd2807SJeff Garzik 6380e0b494cSJames Bottomley } 6390e0b494cSJames Bottomley EXPORT_SYMBOL(ata_scsi_cmd_error_handler); 6400e0b494cSJames Bottomley 6410e0b494cSJames Bottomley /** 6420e0b494cSJames Bottomley * ata_scsi_port_error_handler - recover the port after the commands 6430e0b494cSJames Bottomley * @host: SCSI host containing the port 6440e0b494cSJames Bottomley * @ap: the ATA port 6450e0b494cSJames Bottomley * 6460e0b494cSJames Bottomley * Handle the recovery of the port @ap after all the commands 6470e0b494cSJames Bottomley * have been recovered. 6480e0b494cSJames Bottomley */ 6490e0b494cSJames Bottomley void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap) 6500e0b494cSJames Bottomley { 6510e0b494cSJames Bottomley unsigned long flags; 652c96f1732SAlan Cox 653c6fd2807SJeff Garzik /* invoke error handler */ 654c6fd2807SJeff Garzik if (ap->ops->error_handler) { 655cf1b86c8STejun Heo struct ata_link *link; 656cf1b86c8STejun Heo 657c0c362b6STejun Heo /* acquire EH ownership */ 658c0c362b6STejun Heo ata_eh_acquire(ap); 659c0c362b6STejun Heo repeat: 6605ddf24c5STejun Heo /* kill fast drain timer */ 6615ddf24c5STejun Heo del_timer_sync(&ap->fastdrain_timer); 6625ddf24c5STejun Heo 663c6fd2807SJeff Garzik /* process port resume request */ 664c6fd2807SJeff Garzik ata_eh_handle_port_resume(ap); 665c6fd2807SJeff Garzik 666c6fd2807SJeff Garzik /* fetch & clear EH info */ 667c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 668c6fd2807SJeff Garzik 6691eca4365STejun Heo ata_for_each_link(link, ap, HOST_FIRST) { 67000115e0fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 67100115e0fSTejun Heo struct ata_device *dev; 67200115e0fSTejun Heo 673cf1b86c8STejun Heo memset(&link->eh_context, 0, sizeof(link->eh_context)); 674cf1b86c8STejun Heo link->eh_context.i = link->eh_info; 675cf1b86c8STejun Heo memset(&link->eh_info, 0, sizeof(link->eh_info)); 67600115e0fSTejun Heo 6771eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) { 67800115e0fSTejun Heo int devno = dev->devno; 67900115e0fSTejun Heo 68000115e0fSTejun Heo ehc->saved_xfer_mode[devno] = dev->xfer_mode; 68100115e0fSTejun Heo if (ata_ncq_enabled(dev)) 68200115e0fSTejun Heo ehc->saved_ncq_enabled |= 1 << devno; 68300115e0fSTejun Heo } 684cf1b86c8STejun Heo } 685c6fd2807SJeff Garzik 686c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS; 687c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_EH_PENDING; 688da917d69STejun Heo ap->excl_link = NULL; /* don't maintain exclusion over EH */ 689c6fd2807SJeff Garzik 690c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 691c6fd2807SJeff Garzik 692c6fd2807SJeff Garzik /* invoke EH, skip if unloading or suspended */ 693c6fd2807SJeff Garzik if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED))) 694c6fd2807SJeff Garzik ap->ops->error_handler(ap); 695ece180d1STejun Heo else { 696ece180d1STejun Heo /* if unloading, commence suicide */ 697ece180d1STejun Heo if ((ap->pflags & ATA_PFLAG_UNLOADING) && 698ece180d1STejun Heo !(ap->pflags & ATA_PFLAG_UNLOADED)) 699ece180d1STejun Heo ata_eh_unload(ap); 700c6fd2807SJeff Garzik ata_eh_finish(ap); 701ece180d1STejun Heo } 702c6fd2807SJeff Garzik 703c6fd2807SJeff Garzik /* process port suspend request */ 704c6fd2807SJeff Garzik ata_eh_handle_port_suspend(ap); 705c6fd2807SJeff Garzik 70625985edcSLucas De Marchi /* Exception might have happened after ->error_handler 707c6fd2807SJeff Garzik * recovered the port but before this point. Repeat 708c6fd2807SJeff Garzik * EH in such case. 709c6fd2807SJeff Garzik */ 710c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 711c6fd2807SJeff Garzik 712c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_EH_PENDING) { 713a1e10f7eSTejun Heo if (--ap->eh_tries) { 714c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 715c6fd2807SJeff Garzik goto repeat; 716c6fd2807SJeff Garzik } 717a9a79dfeSJoe Perches ata_port_err(ap, 718a9a79dfeSJoe Perches "EH pending after %d tries, giving up\n", 719a9a79dfeSJoe Perches ATA_EH_MAX_TRIES); 720914616a3STejun Heo ap->pflags &= ~ATA_PFLAG_EH_PENDING; 721c6fd2807SJeff Garzik } 722c6fd2807SJeff Garzik 723c6fd2807SJeff Garzik /* this run is complete, make sure EH info is clear */ 7241eca4365STejun Heo ata_for_each_link(link, ap, HOST_FIRST) 725cf1b86c8STejun Heo memset(&link->eh_info, 0, sizeof(link->eh_info)); 726c6fd2807SJeff Garzik 727e4a9c373SDan Williams /* end eh (clear host_eh_scheduled) while holding 728e4a9c373SDan Williams * ap->lock such that if exception occurs after this 729e4a9c373SDan Williams * point but before EH completion, SCSI midlayer will 730c6fd2807SJeff Garzik * re-initiate EH. 731c6fd2807SJeff Garzik */ 732e4a9c373SDan Williams ap->ops->end_eh(ap); 733c6fd2807SJeff Garzik 734c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 735c0c362b6STejun Heo ata_eh_release(ap); 736c6fd2807SJeff Garzik } else { 7379af5c9c9STejun Heo WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL); 738c6fd2807SJeff Garzik ap->ops->eng_timeout(ap); 739c6fd2807SJeff Garzik } 740c6fd2807SJeff Garzik 741c6fd2807SJeff Garzik scsi_eh_flush_done_q(&ap->eh_done_q); 742c6fd2807SJeff Garzik 743c6fd2807SJeff Garzik /* clean up */ 744c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 745c6fd2807SJeff Garzik 746c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_LOADING) 747c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_LOADING; 7486f54120eSJason Yan else if ((ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) && 7496f54120eSJason Yan !(ap->flags & ATA_FLAG_SAS_HOST)) 750ad72cf98STejun Heo schedule_delayed_work(&ap->hotplug_task, 0); 751c6fd2807SJeff Garzik 752c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_RECOVERED) 753a9a79dfeSJoe Perches ata_port_info(ap, "EH complete\n"); 754c6fd2807SJeff Garzik 755c6fd2807SJeff Garzik ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED); 756c6fd2807SJeff Garzik 757c6fd2807SJeff Garzik /* tell wait_eh that we're done */ 758c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS; 759c6fd2807SJeff Garzik wake_up_all(&ap->eh_wait_q); 760c6fd2807SJeff Garzik 761c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 762c6fd2807SJeff Garzik } 7630e0b494cSJames Bottomley EXPORT_SYMBOL_GPL(ata_scsi_port_error_handler); 764c6fd2807SJeff Garzik 765c6fd2807SJeff Garzik /** 766c6fd2807SJeff Garzik * ata_port_wait_eh - Wait for the currently pending EH to complete 767c6fd2807SJeff Garzik * @ap: Port to wait EH for 768c6fd2807SJeff Garzik * 769c6fd2807SJeff Garzik * Wait until the currently pending EH is complete. 770c6fd2807SJeff Garzik * 771c6fd2807SJeff Garzik * LOCKING: 772c6fd2807SJeff Garzik * Kernel thread context (may sleep). 773c6fd2807SJeff Garzik */ 774c6fd2807SJeff Garzik void ata_port_wait_eh(struct ata_port *ap) 775c6fd2807SJeff Garzik { 776c6fd2807SJeff Garzik unsigned long flags; 777c6fd2807SJeff Garzik DEFINE_WAIT(wait); 778c6fd2807SJeff Garzik 779c6fd2807SJeff Garzik retry: 780c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 781c6fd2807SJeff Garzik 782c6fd2807SJeff Garzik while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) { 783c6fd2807SJeff Garzik prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE); 784c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 785c6fd2807SJeff Garzik schedule(); 786c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 787c6fd2807SJeff Garzik } 788c6fd2807SJeff Garzik finish_wait(&ap->eh_wait_q, &wait); 789c6fd2807SJeff Garzik 790c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 791c6fd2807SJeff Garzik 792c6fd2807SJeff Garzik /* make sure SCSI EH is complete */ 793cca3974eSJeff Garzik if (scsi_host_in_recovery(ap->scsi_host)) { 79497750cebSTejun Heo ata_msleep(ap, 10); 795c6fd2807SJeff Garzik goto retry; 796c6fd2807SJeff Garzik } 797c6fd2807SJeff Garzik } 79881c757bcSDan Williams EXPORT_SYMBOL_GPL(ata_port_wait_eh); 799c6fd2807SJeff Garzik 8005ddf24c5STejun Heo static int ata_eh_nr_in_flight(struct ata_port *ap) 8015ddf24c5STejun Heo { 802258c4e5cSJens Axboe struct ata_queued_cmd *qc; 8035ddf24c5STejun Heo unsigned int tag; 8045ddf24c5STejun Heo int nr = 0; 8055ddf24c5STejun Heo 8065ddf24c5STejun Heo /* count only non-internal commands */ 807258c4e5cSJens Axboe ata_qc_for_each(ap, qc, tag) { 808258c4e5cSJens Axboe if (qc) 8095ddf24c5STejun Heo nr++; 8109d207accSJens Axboe } 8115ddf24c5STejun Heo 8125ddf24c5STejun Heo return nr; 8135ddf24c5STejun Heo } 8145ddf24c5STejun Heo 815b93ab338SKees Cook void ata_eh_fastdrain_timerfn(struct timer_list *t) 8165ddf24c5STejun Heo { 817b93ab338SKees Cook struct ata_port *ap = from_timer(ap, t, fastdrain_timer); 8185ddf24c5STejun Heo unsigned long flags; 8195ddf24c5STejun Heo int cnt; 8205ddf24c5STejun Heo 8215ddf24c5STejun Heo spin_lock_irqsave(ap->lock, flags); 8225ddf24c5STejun Heo 8235ddf24c5STejun Heo cnt = ata_eh_nr_in_flight(ap); 8245ddf24c5STejun Heo 8255ddf24c5STejun Heo /* are we done? */ 8265ddf24c5STejun Heo if (!cnt) 8275ddf24c5STejun Heo goto out_unlock; 8285ddf24c5STejun Heo 8295ddf24c5STejun Heo if (cnt == ap->fastdrain_cnt) { 830258c4e5cSJens Axboe struct ata_queued_cmd *qc; 8315ddf24c5STejun Heo unsigned int tag; 8325ddf24c5STejun Heo 8335ddf24c5STejun Heo /* No progress during the last interval, tag all 8345ddf24c5STejun Heo * in-flight qcs as timed out and freeze the port. 8355ddf24c5STejun Heo */ 836258c4e5cSJens Axboe ata_qc_for_each(ap, qc, tag) { 8375ddf24c5STejun Heo if (qc) 8385ddf24c5STejun Heo qc->err_mask |= AC_ERR_TIMEOUT; 8395ddf24c5STejun Heo } 8405ddf24c5STejun Heo 8415ddf24c5STejun Heo ata_port_freeze(ap); 8425ddf24c5STejun Heo } else { 8435ddf24c5STejun Heo /* some qcs have finished, give it another chance */ 8445ddf24c5STejun Heo ap->fastdrain_cnt = cnt; 8455ddf24c5STejun Heo ap->fastdrain_timer.expires = 846341c2c95STejun Heo ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); 8475ddf24c5STejun Heo add_timer(&ap->fastdrain_timer); 8485ddf24c5STejun Heo } 8495ddf24c5STejun Heo 8505ddf24c5STejun Heo out_unlock: 8515ddf24c5STejun Heo spin_unlock_irqrestore(ap->lock, flags); 8525ddf24c5STejun Heo } 8535ddf24c5STejun Heo 8545ddf24c5STejun Heo /** 8555ddf24c5STejun Heo * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain 8565ddf24c5STejun Heo * @ap: target ATA port 8575ddf24c5STejun Heo * @fastdrain: activate fast drain 8585ddf24c5STejun Heo * 8595ddf24c5STejun Heo * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain 8605ddf24c5STejun Heo * is non-zero and EH wasn't pending before. Fast drain ensures 8615ddf24c5STejun Heo * that EH kicks in in timely manner. 8625ddf24c5STejun Heo * 8635ddf24c5STejun Heo * LOCKING: 8645ddf24c5STejun Heo * spin_lock_irqsave(host lock) 8655ddf24c5STejun Heo */ 8665ddf24c5STejun Heo static void ata_eh_set_pending(struct ata_port *ap, int fastdrain) 8675ddf24c5STejun Heo { 8685ddf24c5STejun Heo int cnt; 8695ddf24c5STejun Heo 8705ddf24c5STejun Heo /* already scheduled? */ 8715ddf24c5STejun Heo if (ap->pflags & ATA_PFLAG_EH_PENDING) 8725ddf24c5STejun Heo return; 8735ddf24c5STejun Heo 8745ddf24c5STejun Heo ap->pflags |= ATA_PFLAG_EH_PENDING; 8755ddf24c5STejun Heo 8765ddf24c5STejun Heo if (!fastdrain) 8775ddf24c5STejun Heo return; 8785ddf24c5STejun Heo 8795ddf24c5STejun Heo /* do we have in-flight qcs? */ 8805ddf24c5STejun Heo cnt = ata_eh_nr_in_flight(ap); 8815ddf24c5STejun Heo if (!cnt) 8825ddf24c5STejun Heo return; 8835ddf24c5STejun Heo 8845ddf24c5STejun Heo /* activate fast drain */ 8855ddf24c5STejun Heo ap->fastdrain_cnt = cnt; 886341c2c95STejun Heo ap->fastdrain_timer.expires = 887341c2c95STejun Heo ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); 8885ddf24c5STejun Heo add_timer(&ap->fastdrain_timer); 8895ddf24c5STejun Heo } 8905ddf24c5STejun Heo 891c6fd2807SJeff Garzik /** 892c6fd2807SJeff Garzik * ata_qc_schedule_eh - schedule qc for error handling 893c6fd2807SJeff Garzik * @qc: command to schedule error handling for 894c6fd2807SJeff Garzik * 895c6fd2807SJeff Garzik * Schedule error handling for @qc. EH will kick in as soon as 896c6fd2807SJeff Garzik * other commands are drained. 897c6fd2807SJeff Garzik * 898c6fd2807SJeff Garzik * LOCKING: 899cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 900c6fd2807SJeff Garzik */ 901c6fd2807SJeff Garzik void ata_qc_schedule_eh(struct ata_queued_cmd *qc) 902c6fd2807SJeff Garzik { 903c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 904c6fd2807SJeff Garzik 905c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 906c6fd2807SJeff Garzik 907c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 9085ddf24c5STejun Heo ata_eh_set_pending(ap, 1); 909c6fd2807SJeff Garzik 910c6fd2807SJeff Garzik /* The following will fail if timeout has already expired. 911c6fd2807SJeff Garzik * ata_scsi_error() takes care of such scmds on EH entry. 912c6fd2807SJeff Garzik * Note that ATA_QCFLAG_FAILED is unconditionally set after 913c6fd2807SJeff Garzik * this function completes. 914c6fd2807SJeff Garzik */ 915242f9dcbSJens Axboe blk_abort_request(qc->scsicmd->request); 916c6fd2807SJeff Garzik } 917c6fd2807SJeff Garzik 918c6fd2807SJeff Garzik /** 919e4a9c373SDan Williams * ata_std_sched_eh - non-libsas ata_ports issue eh with this common routine 920e4a9c373SDan Williams * @ap: ATA port to schedule EH for 921e4a9c373SDan Williams * 922e4a9c373SDan Williams * LOCKING: inherited from ata_port_schedule_eh 923e4a9c373SDan Williams * spin_lock_irqsave(host lock) 924e4a9c373SDan Williams */ 925e4a9c373SDan Williams void ata_std_sched_eh(struct ata_port *ap) 926e4a9c373SDan Williams { 927e4a9c373SDan Williams WARN_ON(!ap->ops->error_handler); 928e4a9c373SDan Williams 929e4a9c373SDan Williams if (ap->pflags & ATA_PFLAG_INITIALIZING) 930e4a9c373SDan Williams return; 931e4a9c373SDan Williams 932e4a9c373SDan Williams ata_eh_set_pending(ap, 1); 933e4a9c373SDan Williams scsi_schedule_eh(ap->scsi_host); 934e4a9c373SDan Williams 935e4a9c373SDan Williams DPRINTK("port EH scheduled\n"); 936e4a9c373SDan Williams } 937e4a9c373SDan Williams EXPORT_SYMBOL_GPL(ata_std_sched_eh); 938e4a9c373SDan Williams 939e4a9c373SDan Williams /** 940e4a9c373SDan Williams * ata_std_end_eh - non-libsas ata_ports complete eh with this common routine 941e4a9c373SDan Williams * @ap: ATA port to end EH for 942e4a9c373SDan Williams * 943e4a9c373SDan Williams * In the libata object model there is a 1:1 mapping of ata_port to 944e4a9c373SDan Williams * shost, so host fields can be directly manipulated under ap->lock, in 945e4a9c373SDan Williams * the libsas case we need to hold a lock at the ha->level to coordinate 946e4a9c373SDan Williams * these events. 947e4a9c373SDan Williams * 948e4a9c373SDan Williams * LOCKING: 949e4a9c373SDan Williams * spin_lock_irqsave(host lock) 950e4a9c373SDan Williams */ 951e4a9c373SDan Williams void ata_std_end_eh(struct ata_port *ap) 952e4a9c373SDan Williams { 953e4a9c373SDan Williams struct Scsi_Host *host = ap->scsi_host; 954e4a9c373SDan Williams 955e4a9c373SDan Williams host->host_eh_scheduled = 0; 956e4a9c373SDan Williams } 957e4a9c373SDan Williams EXPORT_SYMBOL(ata_std_end_eh); 958e4a9c373SDan Williams 959e4a9c373SDan Williams 960e4a9c373SDan Williams /** 961c6fd2807SJeff Garzik * ata_port_schedule_eh - schedule error handling without a qc 962c6fd2807SJeff Garzik * @ap: ATA port to schedule EH for 963c6fd2807SJeff Garzik * 964c6fd2807SJeff Garzik * Schedule error handling for @ap. EH will kick in as soon as 965c6fd2807SJeff Garzik * all commands are drained. 966c6fd2807SJeff Garzik * 967c6fd2807SJeff Garzik * LOCKING: 968cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 969c6fd2807SJeff Garzik */ 970c6fd2807SJeff Garzik void ata_port_schedule_eh(struct ata_port *ap) 971c6fd2807SJeff Garzik { 972e4a9c373SDan Williams /* see: ata_std_sched_eh, unless you know better */ 973e4a9c373SDan Williams ap->ops->sched_eh(ap); 974c6fd2807SJeff Garzik } 975a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_port_schedule_eh); 976c6fd2807SJeff Garzik 977dbd82616STejun Heo static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link) 978c6fd2807SJeff Garzik { 979258c4e5cSJens Axboe struct ata_queued_cmd *qc; 980c6fd2807SJeff Garzik int tag, nr_aborted = 0; 981c6fd2807SJeff Garzik 982c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 983c6fd2807SJeff Garzik 9845ddf24c5STejun Heo /* we're gonna abort all commands, no need for fast drain */ 9855ddf24c5STejun Heo ata_eh_set_pending(ap, 0); 9865ddf24c5STejun Heo 98728361c40SJens Axboe /* include internal tag in iteration */ 988258c4e5cSJens Axboe ata_qc_for_each_with_internal(ap, qc, tag) { 989dbd82616STejun Heo if (qc && (!link || qc->dev->link == link)) { 990c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 991c6fd2807SJeff Garzik ata_qc_complete(qc); 992c6fd2807SJeff Garzik nr_aborted++; 993c6fd2807SJeff Garzik } 994c6fd2807SJeff Garzik } 995c6fd2807SJeff Garzik 996c6fd2807SJeff Garzik if (!nr_aborted) 997c6fd2807SJeff Garzik ata_port_schedule_eh(ap); 998c6fd2807SJeff Garzik 999c6fd2807SJeff Garzik return nr_aborted; 1000c6fd2807SJeff Garzik } 1001c6fd2807SJeff Garzik 1002c6fd2807SJeff Garzik /** 1003dbd82616STejun Heo * ata_link_abort - abort all qc's on the link 1004dbd82616STejun Heo * @link: ATA link to abort qc's for 1005dbd82616STejun Heo * 1006dbd82616STejun Heo * Abort all active qc's active on @link and schedule EH. 1007dbd82616STejun Heo * 1008dbd82616STejun Heo * LOCKING: 1009dbd82616STejun Heo * spin_lock_irqsave(host lock) 1010dbd82616STejun Heo * 1011dbd82616STejun Heo * RETURNS: 1012dbd82616STejun Heo * Number of aborted qc's. 1013dbd82616STejun Heo */ 1014dbd82616STejun Heo int ata_link_abort(struct ata_link *link) 1015dbd82616STejun Heo { 1016dbd82616STejun Heo return ata_do_link_abort(link->ap, link); 1017dbd82616STejun Heo } 1018a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_link_abort); 1019dbd82616STejun Heo 1020dbd82616STejun Heo /** 1021dbd82616STejun Heo * ata_port_abort - abort all qc's on the port 1022dbd82616STejun Heo * @ap: ATA port to abort qc's for 1023dbd82616STejun Heo * 1024dbd82616STejun Heo * Abort all active qc's of @ap and schedule EH. 1025dbd82616STejun Heo * 1026dbd82616STejun Heo * LOCKING: 1027dbd82616STejun Heo * spin_lock_irqsave(host_set lock) 1028dbd82616STejun Heo * 1029dbd82616STejun Heo * RETURNS: 1030dbd82616STejun Heo * Number of aborted qc's. 1031dbd82616STejun Heo */ 1032dbd82616STejun Heo int ata_port_abort(struct ata_port *ap) 1033dbd82616STejun Heo { 1034dbd82616STejun Heo return ata_do_link_abort(ap, NULL); 1035dbd82616STejun Heo } 1036a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_port_abort); 1037dbd82616STejun Heo 1038dbd82616STejun Heo /** 1039c6fd2807SJeff Garzik * __ata_port_freeze - freeze port 1040c6fd2807SJeff Garzik * @ap: ATA port to freeze 1041c6fd2807SJeff Garzik * 1042c6fd2807SJeff Garzik * This function is called when HSM violation or some other 1043c6fd2807SJeff Garzik * condition disrupts normal operation of the port. Frozen port 1044c6fd2807SJeff Garzik * is not allowed to perform any operation until the port is 1045c6fd2807SJeff Garzik * thawed, which usually follows a successful reset. 1046c6fd2807SJeff Garzik * 1047c6fd2807SJeff Garzik * ap->ops->freeze() callback can be used for freezing the port 1048c6fd2807SJeff Garzik * hardware-wise (e.g. mask interrupt and stop DMA engine). If a 1049c6fd2807SJeff Garzik * port cannot be frozen hardware-wise, the interrupt handler 1050c6fd2807SJeff Garzik * must ack and clear interrupts unconditionally while the port 1051c6fd2807SJeff Garzik * is frozen. 1052c6fd2807SJeff Garzik * 1053c6fd2807SJeff Garzik * LOCKING: 1054cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 1055c6fd2807SJeff Garzik */ 1056c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap) 1057c6fd2807SJeff Garzik { 1058c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 1059c6fd2807SJeff Garzik 1060c6fd2807SJeff Garzik if (ap->ops->freeze) 1061c6fd2807SJeff Garzik ap->ops->freeze(ap); 1062c6fd2807SJeff Garzik 1063c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_FROZEN; 1064c6fd2807SJeff Garzik 106544877b4eSTejun Heo DPRINTK("ata%u port frozen\n", ap->print_id); 1066c6fd2807SJeff Garzik } 1067c6fd2807SJeff Garzik 1068c6fd2807SJeff Garzik /** 1069c6fd2807SJeff Garzik * ata_port_freeze - abort & freeze port 1070c6fd2807SJeff Garzik * @ap: ATA port to freeze 1071c6fd2807SJeff Garzik * 107254c38444SJeff Garzik * Abort and freeze @ap. The freeze operation must be called 107354c38444SJeff Garzik * first, because some hardware requires special operations 107454c38444SJeff Garzik * before the taskfile registers are accessible. 1075c6fd2807SJeff Garzik * 1076c6fd2807SJeff Garzik * LOCKING: 1077cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 1078c6fd2807SJeff Garzik * 1079c6fd2807SJeff Garzik * RETURNS: 1080c6fd2807SJeff Garzik * Number of aborted commands. 1081c6fd2807SJeff Garzik */ 1082c6fd2807SJeff Garzik int ata_port_freeze(struct ata_port *ap) 1083c6fd2807SJeff Garzik { 1084c6fd2807SJeff Garzik int nr_aborted; 1085c6fd2807SJeff Garzik 1086c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 1087c6fd2807SJeff Garzik 1088c6fd2807SJeff Garzik __ata_port_freeze(ap); 108954c38444SJeff Garzik nr_aborted = ata_port_abort(ap); 1090c6fd2807SJeff Garzik 1091c6fd2807SJeff Garzik return nr_aborted; 1092c6fd2807SJeff Garzik } 1093a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_port_freeze); 1094c6fd2807SJeff Garzik 1095c6fd2807SJeff Garzik /** 1096c6fd2807SJeff Garzik * ata_eh_freeze_port - EH helper to freeze port 1097c6fd2807SJeff Garzik * @ap: ATA port to freeze 1098c6fd2807SJeff Garzik * 1099c6fd2807SJeff Garzik * Freeze @ap. 1100c6fd2807SJeff Garzik * 1101c6fd2807SJeff Garzik * LOCKING: 1102c6fd2807SJeff Garzik * None. 1103c6fd2807SJeff Garzik */ 1104c6fd2807SJeff Garzik void ata_eh_freeze_port(struct ata_port *ap) 1105c6fd2807SJeff Garzik { 1106c6fd2807SJeff Garzik unsigned long flags; 1107c6fd2807SJeff Garzik 1108c6fd2807SJeff Garzik if (!ap->ops->error_handler) 1109c6fd2807SJeff Garzik return; 1110c6fd2807SJeff Garzik 1111c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1112c6fd2807SJeff Garzik __ata_port_freeze(ap); 1113c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1114c6fd2807SJeff Garzik } 1115a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_eh_freeze_port); 1116c6fd2807SJeff Garzik 1117c6fd2807SJeff Garzik /** 1118*94bd5719SMauro Carvalho Chehab * ata_eh_thaw_port - EH helper to thaw port 1119c6fd2807SJeff Garzik * @ap: ATA port to thaw 1120c6fd2807SJeff Garzik * 1121c6fd2807SJeff Garzik * Thaw frozen port @ap. 1122c6fd2807SJeff Garzik * 1123c6fd2807SJeff Garzik * LOCKING: 1124c6fd2807SJeff Garzik * None. 1125c6fd2807SJeff Garzik */ 1126c6fd2807SJeff Garzik void ata_eh_thaw_port(struct ata_port *ap) 1127c6fd2807SJeff Garzik { 1128c6fd2807SJeff Garzik unsigned long flags; 1129c6fd2807SJeff Garzik 1130c6fd2807SJeff Garzik if (!ap->ops->error_handler) 1131c6fd2807SJeff Garzik return; 1132c6fd2807SJeff Garzik 1133c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1134c6fd2807SJeff Garzik 1135c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_FROZEN; 1136c6fd2807SJeff Garzik 1137c6fd2807SJeff Garzik if (ap->ops->thaw) 1138c6fd2807SJeff Garzik ap->ops->thaw(ap); 1139c6fd2807SJeff Garzik 1140c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1141c6fd2807SJeff Garzik 114244877b4eSTejun Heo DPRINTK("ata%u port thawed\n", ap->print_id); 1143c6fd2807SJeff Garzik } 1144c6fd2807SJeff Garzik 1145c6fd2807SJeff Garzik static void ata_eh_scsidone(struct scsi_cmnd *scmd) 1146c6fd2807SJeff Garzik { 1147c6fd2807SJeff Garzik /* nada */ 1148c6fd2807SJeff Garzik } 1149c6fd2807SJeff Garzik 1150c6fd2807SJeff Garzik static void __ata_eh_qc_complete(struct ata_queued_cmd *qc) 1151c6fd2807SJeff Garzik { 1152c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 1153c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 1154c6fd2807SJeff Garzik unsigned long flags; 1155c6fd2807SJeff Garzik 1156c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1157c6fd2807SJeff Garzik qc->scsidone = ata_eh_scsidone; 1158c6fd2807SJeff Garzik __ata_qc_complete(qc); 1159c6fd2807SJeff Garzik WARN_ON(ata_tag_valid(qc->tag)); 1160c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1161c6fd2807SJeff Garzik 1162c6fd2807SJeff Garzik scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 1163c6fd2807SJeff Garzik } 1164c6fd2807SJeff Garzik 1165c6fd2807SJeff Garzik /** 1166c6fd2807SJeff Garzik * ata_eh_qc_complete - Complete an active ATA command from EH 1167c6fd2807SJeff Garzik * @qc: Command to complete 1168c6fd2807SJeff Garzik * 1169c6fd2807SJeff Garzik * Indicate to the mid and upper layers that an ATA command has 1170c6fd2807SJeff Garzik * completed. To be used from EH. 1171c6fd2807SJeff Garzik */ 1172c6fd2807SJeff Garzik void ata_eh_qc_complete(struct ata_queued_cmd *qc) 1173c6fd2807SJeff Garzik { 1174c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 1175c6fd2807SJeff Garzik scmd->retries = scmd->allowed; 1176c6fd2807SJeff Garzik __ata_eh_qc_complete(qc); 1177c6fd2807SJeff Garzik } 1178c6fd2807SJeff Garzik 1179c6fd2807SJeff Garzik /** 1180c6fd2807SJeff Garzik * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH 1181c6fd2807SJeff Garzik * @qc: Command to retry 1182c6fd2807SJeff Garzik * 1183c6fd2807SJeff Garzik * Indicate to the mid and upper layers that an ATA command 1184c6fd2807SJeff Garzik * should be retried. To be used from EH. 1185c6fd2807SJeff Garzik * 1186c6fd2807SJeff Garzik * SCSI midlayer limits the number of retries to scmd->allowed. 1187f13e2201SGwendal Grignou * scmd->allowed is incremented for commands which get retried 1188c6fd2807SJeff Garzik * due to unrelated failures (qc->err_mask is zero). 1189c6fd2807SJeff Garzik */ 1190c6fd2807SJeff Garzik void ata_eh_qc_retry(struct ata_queued_cmd *qc) 1191c6fd2807SJeff Garzik { 1192c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 1193f13e2201SGwendal Grignou if (!qc->err_mask) 1194f13e2201SGwendal Grignou scmd->allowed++; 1195c6fd2807SJeff Garzik __ata_eh_qc_complete(qc); 1196c6fd2807SJeff Garzik } 1197c6fd2807SJeff Garzik 1198c6fd2807SJeff Garzik /** 1199678afac6STejun Heo * ata_dev_disable - disable ATA device 1200678afac6STejun Heo * @dev: ATA device to disable 1201678afac6STejun Heo * 1202678afac6STejun Heo * Disable @dev. 1203678afac6STejun Heo * 1204678afac6STejun Heo * Locking: 1205678afac6STejun Heo * EH context. 1206678afac6STejun Heo */ 1207678afac6STejun Heo void ata_dev_disable(struct ata_device *dev) 1208678afac6STejun Heo { 1209678afac6STejun Heo if (!ata_dev_enabled(dev)) 1210678afac6STejun Heo return; 1211678afac6STejun Heo 1212678afac6STejun Heo if (ata_msg_drv(dev->link->ap)) 1213a9a79dfeSJoe Perches ata_dev_warn(dev, "disabled\n"); 1214678afac6STejun Heo ata_acpi_on_disable(dev); 1215678afac6STejun Heo ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET); 1216678afac6STejun Heo dev->class++; 121799cf610aSTejun Heo 121899cf610aSTejun Heo /* From now till the next successful probe, ering is used to 121999cf610aSTejun Heo * track probe failures. Clear accumulated device error info. 122099cf610aSTejun Heo */ 122199cf610aSTejun Heo ata_ering_clear(&dev->ering); 1222678afac6STejun Heo } 1223a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_dev_disable); 1224678afac6STejun Heo 1225678afac6STejun Heo /** 1226c6fd2807SJeff Garzik * ata_eh_detach_dev - detach ATA device 1227c6fd2807SJeff Garzik * @dev: ATA device to detach 1228c6fd2807SJeff Garzik * 1229c6fd2807SJeff Garzik * Detach @dev. 1230c6fd2807SJeff Garzik * 1231c6fd2807SJeff Garzik * LOCKING: 1232c6fd2807SJeff Garzik * None. 1233c6fd2807SJeff Garzik */ 1234fb7fd614STejun Heo void ata_eh_detach_dev(struct ata_device *dev) 1235c6fd2807SJeff Garzik { 1236f58229f8STejun Heo struct ata_link *link = dev->link; 1237f58229f8STejun Heo struct ata_port *ap = link->ap; 123890484ebfSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 1239c6fd2807SJeff Garzik unsigned long flags; 1240c6fd2807SJeff Garzik 1241c6fd2807SJeff Garzik ata_dev_disable(dev); 1242c6fd2807SJeff Garzik 1243c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1244c6fd2807SJeff Garzik 1245c6fd2807SJeff Garzik dev->flags &= ~ATA_DFLAG_DETACH; 1246c6fd2807SJeff Garzik 1247c6fd2807SJeff Garzik if (ata_scsi_offline_dev(dev)) { 1248c6fd2807SJeff Garzik dev->flags |= ATA_DFLAG_DETACHED; 1249c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 1250c6fd2807SJeff Garzik } 1251c6fd2807SJeff Garzik 125290484ebfSTejun Heo /* clear per-dev EH info */ 1253f58229f8STejun Heo ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK); 1254f58229f8STejun Heo ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK); 125590484ebfSTejun Heo ehc->saved_xfer_mode[dev->devno] = 0; 125690484ebfSTejun Heo ehc->saved_ncq_enabled &= ~(1 << dev->devno); 1257c6fd2807SJeff Garzik 1258c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1259c6fd2807SJeff Garzik } 1260c6fd2807SJeff Garzik 1261c6fd2807SJeff Garzik /** 1262c6fd2807SJeff Garzik * ata_eh_about_to_do - about to perform eh_action 1263955e57dfSTejun Heo * @link: target ATA link 1264c6fd2807SJeff Garzik * @dev: target ATA dev for per-dev action (can be NULL) 1265c6fd2807SJeff Garzik * @action: action about to be performed 1266c6fd2807SJeff Garzik * 1267c6fd2807SJeff Garzik * Called just before performing EH actions to clear related bits 1268955e57dfSTejun Heo * in @link->eh_info such that eh actions are not unnecessarily 1269955e57dfSTejun Heo * repeated. 1270c6fd2807SJeff Garzik * 1271c6fd2807SJeff Garzik * LOCKING: 1272c6fd2807SJeff Garzik * None. 1273c6fd2807SJeff Garzik */ 1274fb7fd614STejun Heo void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev, 1275c6fd2807SJeff Garzik unsigned int action) 1276c6fd2807SJeff Garzik { 1277955e57dfSTejun Heo struct ata_port *ap = link->ap; 1278955e57dfSTejun Heo struct ata_eh_info *ehi = &link->eh_info; 1279955e57dfSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 1280c6fd2807SJeff Garzik unsigned long flags; 1281c6fd2807SJeff Garzik 1282c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1283c6fd2807SJeff Garzik 1284955e57dfSTejun Heo ata_eh_clear_action(link, dev, ehi, action); 1285c6fd2807SJeff Garzik 1286a568d1d2STejun Heo /* About to take EH action, set RECOVERED. Ignore actions on 1287a568d1d2STejun Heo * slave links as master will do them again. 1288a568d1d2STejun Heo */ 1289a568d1d2STejun Heo if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link) 1290c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_RECOVERED; 1291c6fd2807SJeff Garzik 1292c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1293c6fd2807SJeff Garzik } 1294c6fd2807SJeff Garzik 1295c6fd2807SJeff Garzik /** 1296c6fd2807SJeff Garzik * ata_eh_done - EH action complete 12972f60e1abSJonathan Corbet * @link: ATA link for which EH actions are complete 1298c6fd2807SJeff Garzik * @dev: target ATA dev for per-dev action (can be NULL) 1299c6fd2807SJeff Garzik * @action: action just completed 1300c6fd2807SJeff Garzik * 1301c6fd2807SJeff Garzik * Called right after performing EH actions to clear related bits 1302955e57dfSTejun Heo * in @link->eh_context. 1303c6fd2807SJeff Garzik * 1304c6fd2807SJeff Garzik * LOCKING: 1305c6fd2807SJeff Garzik * None. 1306c6fd2807SJeff Garzik */ 1307fb7fd614STejun Heo void ata_eh_done(struct ata_link *link, struct ata_device *dev, 1308c6fd2807SJeff Garzik unsigned int action) 1309c6fd2807SJeff Garzik { 1310955e57dfSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 13119af5c9c9STejun Heo 1312955e57dfSTejun Heo ata_eh_clear_action(link, dev, &ehc->i, action); 1313c6fd2807SJeff Garzik } 1314c6fd2807SJeff Garzik 1315c6fd2807SJeff Garzik /** 1316c6fd2807SJeff Garzik * ata_err_string - convert err_mask to descriptive string 1317c6fd2807SJeff Garzik * @err_mask: error mask to convert to string 1318c6fd2807SJeff Garzik * 1319c6fd2807SJeff Garzik * Convert @err_mask to descriptive string. Errors are 1320c6fd2807SJeff Garzik * prioritized according to severity and only the most severe 1321c6fd2807SJeff Garzik * error is reported. 1322c6fd2807SJeff Garzik * 1323c6fd2807SJeff Garzik * LOCKING: 1324c6fd2807SJeff Garzik * None. 1325c6fd2807SJeff Garzik * 1326c6fd2807SJeff Garzik * RETURNS: 1327c6fd2807SJeff Garzik * Descriptive string for @err_mask 1328c6fd2807SJeff Garzik */ 1329c6fd2807SJeff Garzik static const char *ata_err_string(unsigned int err_mask) 1330c6fd2807SJeff Garzik { 1331c6fd2807SJeff Garzik if (err_mask & AC_ERR_HOST_BUS) 1332c6fd2807SJeff Garzik return "host bus error"; 1333c6fd2807SJeff Garzik if (err_mask & AC_ERR_ATA_BUS) 1334c6fd2807SJeff Garzik return "ATA bus error"; 1335c6fd2807SJeff Garzik if (err_mask & AC_ERR_TIMEOUT) 1336c6fd2807SJeff Garzik return "timeout"; 1337c6fd2807SJeff Garzik if (err_mask & AC_ERR_HSM) 1338c6fd2807SJeff Garzik return "HSM violation"; 1339c6fd2807SJeff Garzik if (err_mask & AC_ERR_SYSTEM) 1340c6fd2807SJeff Garzik return "internal error"; 1341c6fd2807SJeff Garzik if (err_mask & AC_ERR_MEDIA) 1342c6fd2807SJeff Garzik return "media error"; 1343c6fd2807SJeff Garzik if (err_mask & AC_ERR_INVALID) 1344c6fd2807SJeff Garzik return "invalid argument"; 1345c6fd2807SJeff Garzik if (err_mask & AC_ERR_DEV) 1346c6fd2807SJeff Garzik return "device error"; 134754fb131bSDamien Le Moal if (err_mask & AC_ERR_NCQ) 134854fb131bSDamien Le Moal return "NCQ error"; 134954fb131bSDamien Le Moal if (err_mask & AC_ERR_NODEV_HINT) 135054fb131bSDamien Le Moal return "Polling detection error"; 1351c6fd2807SJeff Garzik return "unknown error"; 1352c6fd2807SJeff Garzik } 1353c6fd2807SJeff Garzik 1354c6fd2807SJeff Garzik /** 135511fc33daSTejun Heo * atapi_eh_tur - perform ATAPI TEST_UNIT_READY 135611fc33daSTejun Heo * @dev: target ATAPI device 135711fc33daSTejun Heo * @r_sense_key: out parameter for sense_key 135811fc33daSTejun Heo * 135911fc33daSTejun Heo * Perform ATAPI TEST_UNIT_READY. 136011fc33daSTejun Heo * 136111fc33daSTejun Heo * LOCKING: 136211fc33daSTejun Heo * EH context (may sleep). 136311fc33daSTejun Heo * 136411fc33daSTejun Heo * RETURNS: 136511fc33daSTejun Heo * 0 on success, AC_ERR_* mask on failure. 136611fc33daSTejun Heo */ 13673dc67440SAaron Lu unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key) 136811fc33daSTejun Heo { 136911fc33daSTejun Heo u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 }; 137011fc33daSTejun Heo struct ata_taskfile tf; 137111fc33daSTejun Heo unsigned int err_mask; 137211fc33daSTejun Heo 137311fc33daSTejun Heo ata_tf_init(dev, &tf); 137411fc33daSTejun Heo 137511fc33daSTejun Heo tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 137611fc33daSTejun Heo tf.command = ATA_CMD_PACKET; 137711fc33daSTejun Heo tf.protocol = ATAPI_PROT_NODATA; 137811fc33daSTejun Heo 137911fc33daSTejun Heo err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0); 138011fc33daSTejun Heo if (err_mask == AC_ERR_DEV) 138111fc33daSTejun Heo *r_sense_key = tf.feature >> 4; 138211fc33daSTejun Heo return err_mask; 138311fc33daSTejun Heo } 138411fc33daSTejun Heo 138511fc33daSTejun Heo /** 1386e87fd28cSHannes Reinecke * ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT 13872f60e1abSJonathan Corbet * @qc: qc to perform REQUEST_SENSE_SENSE_DATA_EXT to 1388e87fd28cSHannes Reinecke * @cmd: scsi command for which the sense code should be set 1389e87fd28cSHannes Reinecke * 1390e87fd28cSHannes Reinecke * Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK 1391e87fd28cSHannes Reinecke * SENSE. This function is an EH helper. 1392e87fd28cSHannes Reinecke * 1393e87fd28cSHannes Reinecke * LOCKING: 1394e87fd28cSHannes Reinecke * Kernel thread context (may sleep). 1395e87fd28cSHannes Reinecke */ 1396e87fd28cSHannes Reinecke static void ata_eh_request_sense(struct ata_queued_cmd *qc, 1397e87fd28cSHannes Reinecke struct scsi_cmnd *cmd) 1398e87fd28cSHannes Reinecke { 1399e87fd28cSHannes Reinecke struct ata_device *dev = qc->dev; 1400e87fd28cSHannes Reinecke struct ata_taskfile tf; 1401e87fd28cSHannes Reinecke unsigned int err_mask; 1402e87fd28cSHannes Reinecke 1403e87fd28cSHannes Reinecke if (qc->ap->pflags & ATA_PFLAG_FROZEN) { 1404e87fd28cSHannes Reinecke ata_dev_warn(dev, "sense data available but port frozen\n"); 1405e87fd28cSHannes Reinecke return; 1406e87fd28cSHannes Reinecke } 1407e87fd28cSHannes Reinecke 1408d238ffd5SHannes Reinecke if (!cmd || qc->flags & ATA_QCFLAG_SENSE_VALID) 1409e87fd28cSHannes Reinecke return; 1410e87fd28cSHannes Reinecke 1411e87fd28cSHannes Reinecke if (!ata_id_sense_reporting_enabled(dev->id)) { 1412e87fd28cSHannes Reinecke ata_dev_warn(qc->dev, "sense data reporting disabled\n"); 1413e87fd28cSHannes Reinecke return; 1414e87fd28cSHannes Reinecke } 1415e87fd28cSHannes Reinecke 1416e87fd28cSHannes Reinecke DPRINTK("ATA request sense\n"); 1417e87fd28cSHannes Reinecke 1418e87fd28cSHannes Reinecke ata_tf_init(dev, &tf); 1419e87fd28cSHannes Reinecke tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1420e87fd28cSHannes Reinecke tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 1421e87fd28cSHannes Reinecke tf.command = ATA_CMD_REQ_SENSE_DATA; 1422e87fd28cSHannes Reinecke tf.protocol = ATA_PROT_NODATA; 1423e87fd28cSHannes Reinecke 1424e87fd28cSHannes Reinecke err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1425e87fd28cSHannes Reinecke /* Ignore err_mask; ATA_ERR might be set */ 1426e87fd28cSHannes Reinecke if (tf.command & ATA_SENSE) { 142706dbde5fSHannes Reinecke ata_scsi_set_sense(dev, cmd, tf.lbah, tf.lbam, tf.lbal); 1428e87fd28cSHannes Reinecke qc->flags |= ATA_QCFLAG_SENSE_VALID; 1429e87fd28cSHannes Reinecke } else { 1430e87fd28cSHannes Reinecke ata_dev_warn(dev, "request sense failed stat %02x emask %x\n", 1431e87fd28cSHannes Reinecke tf.command, err_mask); 1432e87fd28cSHannes Reinecke } 1433e87fd28cSHannes Reinecke } 1434e87fd28cSHannes Reinecke 1435e87fd28cSHannes Reinecke /** 1436c6fd2807SJeff Garzik * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE 1437c6fd2807SJeff Garzik * @dev: device to perform REQUEST_SENSE to 1438c6fd2807SJeff Garzik * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long) 14393eabddb8STejun Heo * @dfl_sense_key: default sense key to use 1440c6fd2807SJeff Garzik * 1441c6fd2807SJeff Garzik * Perform ATAPI REQUEST_SENSE after the device reported CHECK 1442c6fd2807SJeff Garzik * SENSE. This function is EH helper. 1443c6fd2807SJeff Garzik * 1444c6fd2807SJeff Garzik * LOCKING: 1445c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1446c6fd2807SJeff Garzik * 1447c6fd2807SJeff Garzik * RETURNS: 1448c6fd2807SJeff Garzik * 0 on success, AC_ERR_* mask on failure 1449c6fd2807SJeff Garzik */ 14503dc67440SAaron Lu unsigned int atapi_eh_request_sense(struct ata_device *dev, 14513eabddb8STejun Heo u8 *sense_buf, u8 dfl_sense_key) 1452c6fd2807SJeff Garzik { 14533eabddb8STejun Heo u8 cdb[ATAPI_CDB_LEN] = 14543eabddb8STejun Heo { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 }; 14559af5c9c9STejun Heo struct ata_port *ap = dev->link->ap; 1456c6fd2807SJeff Garzik struct ata_taskfile tf; 1457c6fd2807SJeff Garzik 1458c6fd2807SJeff Garzik DPRINTK("ATAPI request sense\n"); 1459c6fd2807SJeff Garzik 1460c6fd2807SJeff Garzik memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); 1461c6fd2807SJeff Garzik 146256287768SAlbert Lee /* initialize sense_buf with the error register, 146356287768SAlbert Lee * for the case where they are -not- overwritten 146456287768SAlbert Lee */ 1465c6fd2807SJeff Garzik sense_buf[0] = 0x70; 14663eabddb8STejun Heo sense_buf[2] = dfl_sense_key; 146756287768SAlbert Lee 146856287768SAlbert Lee /* some devices time out if garbage left in tf */ 146956287768SAlbert Lee ata_tf_init(dev, &tf); 1470c6fd2807SJeff Garzik 1471c6fd2807SJeff Garzik tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1472c6fd2807SJeff Garzik tf.command = ATA_CMD_PACKET; 1473c6fd2807SJeff Garzik 1474c6fd2807SJeff Garzik /* is it pointless to prefer PIO for "safety reasons"? */ 1475c6fd2807SJeff Garzik if (ap->flags & ATA_FLAG_PIO_DMA) { 14760dc36888STejun Heo tf.protocol = ATAPI_PROT_DMA; 1477c6fd2807SJeff Garzik tf.feature |= ATAPI_PKT_DMA; 1478c6fd2807SJeff Garzik } else { 14790dc36888STejun Heo tf.protocol = ATAPI_PROT_PIO; 1480f2dfc1a1STejun Heo tf.lbam = SCSI_SENSE_BUFFERSIZE; 1481f2dfc1a1STejun Heo tf.lbah = 0; 1482c6fd2807SJeff Garzik } 1483c6fd2807SJeff Garzik 1484c6fd2807SJeff Garzik return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE, 14852b789108STejun Heo sense_buf, SCSI_SENSE_BUFFERSIZE, 0); 1486c6fd2807SJeff Garzik } 1487c6fd2807SJeff Garzik 1488c6fd2807SJeff Garzik /** 1489c6fd2807SJeff Garzik * ata_eh_analyze_serror - analyze SError for a failed port 14900260731fSTejun Heo * @link: ATA link to analyze SError for 1491c6fd2807SJeff Garzik * 1492c6fd2807SJeff Garzik * Analyze SError if available and further determine cause of 1493c6fd2807SJeff Garzik * failure. 1494c6fd2807SJeff Garzik * 1495c6fd2807SJeff Garzik * LOCKING: 1496c6fd2807SJeff Garzik * None. 1497c6fd2807SJeff Garzik */ 14980260731fSTejun Heo static void ata_eh_analyze_serror(struct ata_link *link) 1499c6fd2807SJeff Garzik { 15000260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 1501c6fd2807SJeff Garzik u32 serror = ehc->i.serror; 1502c6fd2807SJeff Garzik unsigned int err_mask = 0, action = 0; 1503f9df58cbSTejun Heo u32 hotplug_mask; 1504c6fd2807SJeff Garzik 1505e0614db2STejun Heo if (serror & (SERR_PERSISTENT | SERR_DATA)) { 1506c6fd2807SJeff Garzik err_mask |= AC_ERR_ATA_BUS; 1507cf480626STejun Heo action |= ATA_EH_RESET; 1508c6fd2807SJeff Garzik } 1509c6fd2807SJeff Garzik if (serror & SERR_PROTOCOL) { 1510c6fd2807SJeff Garzik err_mask |= AC_ERR_HSM; 1511cf480626STejun Heo action |= ATA_EH_RESET; 1512c6fd2807SJeff Garzik } 1513c6fd2807SJeff Garzik if (serror & SERR_INTERNAL) { 1514c6fd2807SJeff Garzik err_mask |= AC_ERR_SYSTEM; 1515cf480626STejun Heo action |= ATA_EH_RESET; 1516c6fd2807SJeff Garzik } 1517f9df58cbSTejun Heo 1518f9df58cbSTejun Heo /* Determine whether a hotplug event has occurred. Both 1519f9df58cbSTejun Heo * SError.N/X are considered hotplug events for enabled or 1520f9df58cbSTejun Heo * host links. For disabled PMP links, only N bit is 1521f9df58cbSTejun Heo * considered as X bit is left at 1 for link plugging. 1522f9df58cbSTejun Heo */ 1523eb0e85e3STejun Heo if (link->lpm_policy > ATA_LPM_MAX_POWER) 15246b7ae954STejun Heo hotplug_mask = 0; /* hotplug doesn't work w/ LPM */ 15256b7ae954STejun Heo else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link)) 1526f9df58cbSTejun Heo hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG; 1527f9df58cbSTejun Heo else 1528f9df58cbSTejun Heo hotplug_mask = SERR_PHYRDY_CHG; 1529f9df58cbSTejun Heo 1530f9df58cbSTejun Heo if (serror & hotplug_mask) 1531c6fd2807SJeff Garzik ata_ehi_hotplugged(&ehc->i); 1532c6fd2807SJeff Garzik 1533c6fd2807SJeff Garzik ehc->i.err_mask |= err_mask; 1534c6fd2807SJeff Garzik ehc->i.action |= action; 1535c6fd2807SJeff Garzik } 1536c6fd2807SJeff Garzik 1537c6fd2807SJeff Garzik /** 1538c6fd2807SJeff Garzik * ata_eh_analyze_tf - analyze taskfile of a failed qc 1539c6fd2807SJeff Garzik * @qc: qc to analyze 1540c6fd2807SJeff Garzik * @tf: Taskfile registers to analyze 1541c6fd2807SJeff Garzik * 1542c6fd2807SJeff Garzik * Analyze taskfile of @qc and further determine cause of 1543c6fd2807SJeff Garzik * failure. This function also requests ATAPI sense data if 154425985edcSLucas De Marchi * available. 1545c6fd2807SJeff Garzik * 1546c6fd2807SJeff Garzik * LOCKING: 1547c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1548c6fd2807SJeff Garzik * 1549c6fd2807SJeff Garzik * RETURNS: 1550c6fd2807SJeff Garzik * Determined recovery action 1551c6fd2807SJeff Garzik */ 1552c6fd2807SJeff Garzik static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, 1553c6fd2807SJeff Garzik const struct ata_taskfile *tf) 1554c6fd2807SJeff Garzik { 1555c6fd2807SJeff Garzik unsigned int tmp, action = 0; 1556c6fd2807SJeff Garzik u8 stat = tf->command, err = tf->feature; 1557c6fd2807SJeff Garzik 1558c6fd2807SJeff Garzik if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) { 1559c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_HSM; 1560cf480626STejun Heo return ATA_EH_RESET; 1561c6fd2807SJeff Garzik } 1562c6fd2807SJeff Garzik 1563e87fd28cSHannes Reinecke if (stat & (ATA_ERR | ATA_DF)) { 1564a51d644aSTejun Heo qc->err_mask |= AC_ERR_DEV; 1565e87fd28cSHannes Reinecke /* 1566e87fd28cSHannes Reinecke * Sense data reporting does not work if the 1567e87fd28cSHannes Reinecke * device fault bit is set. 1568e87fd28cSHannes Reinecke */ 1569e87fd28cSHannes Reinecke if (stat & ATA_DF) 1570e87fd28cSHannes Reinecke stat &= ~ATA_SENSE; 1571e87fd28cSHannes Reinecke } else { 1572c6fd2807SJeff Garzik return 0; 1573e87fd28cSHannes Reinecke } 1574c6fd2807SJeff Garzik 1575c6fd2807SJeff Garzik switch (qc->dev->class) { 15769162c657SHannes Reinecke case ATA_DEV_ZAC: 1577e87fd28cSHannes Reinecke if (stat & ATA_SENSE) 1578e87fd28cSHannes Reinecke ata_eh_request_sense(qc, qc->scsicmd); 1579df561f66SGustavo A. R. Silva fallthrough; 1580ca156e00STejun Heo case ATA_DEV_ATA: 1581c6fd2807SJeff Garzik if (err & ATA_ICRC) 1582c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_ATA_BUS; 1583eec7e1c1SAlexey Asemov if (err & (ATA_UNC | ATA_AMNF)) 1584c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_MEDIA; 1585c6fd2807SJeff Garzik if (err & ATA_IDNF) 1586c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_INVALID; 1587c6fd2807SJeff Garzik break; 1588c6fd2807SJeff Garzik 1589c6fd2807SJeff Garzik case ATA_DEV_ATAPI: 1590a569a30dSTejun Heo if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) { 15913eabddb8STejun Heo tmp = atapi_eh_request_sense(qc->dev, 15923eabddb8STejun Heo qc->scsicmd->sense_buffer, 15933eabddb8STejun Heo qc->result_tf.feature >> 4); 15943852e373SHannes Reinecke if (!tmp) 1595c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_SENSE_VALID; 15963852e373SHannes Reinecke else 1597c6fd2807SJeff Garzik qc->err_mask |= tmp; 1598c6fd2807SJeff Garzik } 1599a569a30dSTejun Heo } 1600c6fd2807SJeff Garzik 16013852e373SHannes Reinecke if (qc->flags & ATA_QCFLAG_SENSE_VALID) { 16023852e373SHannes Reinecke int ret = scsi_check_sense(qc->scsicmd); 16033852e373SHannes Reinecke /* 160479487259SDamien Le Moal * SUCCESS here means that the sense code could be 16053852e373SHannes Reinecke * evaluated and should be passed to the upper layers 16063852e373SHannes Reinecke * for correct evaluation. 160779487259SDamien Le Moal * FAILED means the sense code could not be interpreted 16083852e373SHannes Reinecke * and the device would need to be reset. 16093852e373SHannes Reinecke * NEEDS_RETRY and ADD_TO_MLQUEUE means that the 16103852e373SHannes Reinecke * command would need to be retried. 16113852e373SHannes Reinecke */ 16123852e373SHannes Reinecke if (ret == NEEDS_RETRY || ret == ADD_TO_MLQUEUE) { 16133852e373SHannes Reinecke qc->flags |= ATA_QCFLAG_RETRY; 16143852e373SHannes Reinecke qc->err_mask |= AC_ERR_OTHER; 16153852e373SHannes Reinecke } else if (ret != SUCCESS) { 16163852e373SHannes Reinecke qc->err_mask |= AC_ERR_HSM; 16173852e373SHannes Reinecke } 16183852e373SHannes Reinecke } 1619c6fd2807SJeff Garzik if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS)) 1620cf480626STejun Heo action |= ATA_EH_RESET; 1621c6fd2807SJeff Garzik 1622c6fd2807SJeff Garzik return action; 1623c6fd2807SJeff Garzik } 1624c6fd2807SJeff Garzik 162576326ac1STejun Heo static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask, 162676326ac1STejun Heo int *xfer_ok) 1627c6fd2807SJeff Garzik { 162876326ac1STejun Heo int base = 0; 162976326ac1STejun Heo 163076326ac1STejun Heo if (!(eflags & ATA_EFLAG_DUBIOUS_XFER)) 163176326ac1STejun Heo *xfer_ok = 1; 163276326ac1STejun Heo 163376326ac1STejun Heo if (!*xfer_ok) 163475f9cafcSTejun Heo base = ATA_ECAT_DUBIOUS_NONE; 163576326ac1STejun Heo 16367d47e8d4STejun Heo if (err_mask & AC_ERR_ATA_BUS) 163776326ac1STejun Heo return base + ATA_ECAT_ATA_BUS; 1638c6fd2807SJeff Garzik 16397d47e8d4STejun Heo if (err_mask & AC_ERR_TIMEOUT) 164076326ac1STejun Heo return base + ATA_ECAT_TOUT_HSM; 16417d47e8d4STejun Heo 16423884f7b0STejun Heo if (eflags & ATA_EFLAG_IS_IO) { 16437d47e8d4STejun Heo if (err_mask & AC_ERR_HSM) 164476326ac1STejun Heo return base + ATA_ECAT_TOUT_HSM; 16457d47e8d4STejun Heo if ((err_mask & 16467d47e8d4STejun Heo (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV) 164776326ac1STejun Heo return base + ATA_ECAT_UNK_DEV; 1648c6fd2807SJeff Garzik } 1649c6fd2807SJeff Garzik 1650c6fd2807SJeff Garzik return 0; 1651c6fd2807SJeff Garzik } 1652c6fd2807SJeff Garzik 16537d47e8d4STejun Heo struct speed_down_verdict_arg { 1654c6fd2807SJeff Garzik u64 since; 165576326ac1STejun Heo int xfer_ok; 16563884f7b0STejun Heo int nr_errors[ATA_ECAT_NR]; 1657c6fd2807SJeff Garzik }; 1658c6fd2807SJeff Garzik 16597d47e8d4STejun Heo static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg) 1660c6fd2807SJeff Garzik { 16617d47e8d4STejun Heo struct speed_down_verdict_arg *arg = void_arg; 166276326ac1STejun Heo int cat; 1663c6fd2807SJeff Garzik 1664d9027470SGwendal Grignou if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since)) 1665c6fd2807SJeff Garzik return -1; 1666c6fd2807SJeff Garzik 166776326ac1STejun Heo cat = ata_eh_categorize_error(ent->eflags, ent->err_mask, 166876326ac1STejun Heo &arg->xfer_ok); 16697d47e8d4STejun Heo arg->nr_errors[cat]++; 167076326ac1STejun Heo 1671c6fd2807SJeff Garzik return 0; 1672c6fd2807SJeff Garzik } 1673c6fd2807SJeff Garzik 1674c6fd2807SJeff Garzik /** 16757d47e8d4STejun Heo * ata_eh_speed_down_verdict - Determine speed down verdict 1676c6fd2807SJeff Garzik * @dev: Device of interest 1677c6fd2807SJeff Garzik * 1678c6fd2807SJeff Garzik * This function examines error ring of @dev and determines 16797d47e8d4STejun Heo * whether NCQ needs to be turned off, transfer speed should be 16807d47e8d4STejun Heo * stepped down, or falling back to PIO is necessary. 1681c6fd2807SJeff Garzik * 16823884f7b0STejun Heo * ECAT_ATA_BUS : ATA_BUS error for any command 1683c6fd2807SJeff Garzik * 16843884f7b0STejun Heo * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for 16853884f7b0STejun Heo * IO commands 16867d47e8d4STejun Heo * 16873884f7b0STejun Heo * ECAT_UNK_DEV : Unknown DEV error for IO commands 1688c6fd2807SJeff Garzik * 168976326ac1STejun Heo * ECAT_DUBIOUS_* : Identical to above three but occurred while 169076326ac1STejun Heo * data transfer hasn't been verified. 169176326ac1STejun Heo * 16923884f7b0STejun Heo * Verdicts are 16937d47e8d4STejun Heo * 16943884f7b0STejun Heo * NCQ_OFF : Turn off NCQ. 16957d47e8d4STejun Heo * 16963884f7b0STejun Heo * SPEED_DOWN : Speed down transfer speed but don't fall back 16973884f7b0STejun Heo * to PIO. 16983884f7b0STejun Heo * 16993884f7b0STejun Heo * FALLBACK_TO_PIO : Fall back to PIO. 17003884f7b0STejun Heo * 17013884f7b0STejun Heo * Even if multiple verdicts are returned, only one action is 170276326ac1STejun Heo * taken per error. An action triggered by non-DUBIOUS errors 170376326ac1STejun Heo * clears ering, while one triggered by DUBIOUS_* errors doesn't. 170476326ac1STejun Heo * This is to expedite speed down decisions right after device is 170576326ac1STejun Heo * initially configured. 17063884f7b0STejun Heo * 17074091fb95SMasahiro Yamada * The following are speed down rules. #1 and #2 deal with 170876326ac1STejun Heo * DUBIOUS errors. 170976326ac1STejun Heo * 171076326ac1STejun Heo * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors 171176326ac1STejun Heo * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO. 171276326ac1STejun Heo * 171376326ac1STejun Heo * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors 171476326ac1STejun Heo * occurred during last 5 mins, NCQ_OFF. 171576326ac1STejun Heo * 171676326ac1STejun Heo * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors 171725985edcSLucas De Marchi * occurred during last 5 mins, FALLBACK_TO_PIO 17183884f7b0STejun Heo * 171976326ac1STejun Heo * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred 17203884f7b0STejun Heo * during last 10 mins, NCQ_OFF. 17213884f7b0STejun Heo * 172276326ac1STejun Heo * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6 17233884f7b0STejun Heo * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN. 17247d47e8d4STejun Heo * 1725c6fd2807SJeff Garzik * LOCKING: 1726c6fd2807SJeff Garzik * Inherited from caller. 1727c6fd2807SJeff Garzik * 1728c6fd2807SJeff Garzik * RETURNS: 17297d47e8d4STejun Heo * OR of ATA_EH_SPDN_* flags. 1730c6fd2807SJeff Garzik */ 17317d47e8d4STejun Heo static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev) 1732c6fd2807SJeff Garzik { 17337d47e8d4STejun Heo const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ; 17347d47e8d4STejun Heo u64 j64 = get_jiffies_64(); 17357d47e8d4STejun Heo struct speed_down_verdict_arg arg; 17367d47e8d4STejun Heo unsigned int verdict = 0; 1737c6fd2807SJeff Garzik 17383884f7b0STejun Heo /* scan past 5 mins of error history */ 17393884f7b0STejun Heo memset(&arg, 0, sizeof(arg)); 17403884f7b0STejun Heo arg.since = j64 - min(j64, j5mins); 17413884f7b0STejun Heo ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); 17423884f7b0STejun Heo 174376326ac1STejun Heo if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] + 174476326ac1STejun Heo arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1) 174576326ac1STejun Heo verdict |= ATA_EH_SPDN_SPEED_DOWN | 174676326ac1STejun Heo ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS; 174776326ac1STejun Heo 174876326ac1STejun Heo if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] + 174976326ac1STejun Heo arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1) 175076326ac1STejun Heo verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS; 175176326ac1STejun Heo 17523884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_ATA_BUS] + 17533884f7b0STejun Heo arg.nr_errors[ATA_ECAT_TOUT_HSM] + 1754663f99b8STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) 17553884f7b0STejun Heo verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO; 17563884f7b0STejun Heo 17577d47e8d4STejun Heo /* scan past 10 mins of error history */ 1758c6fd2807SJeff Garzik memset(&arg, 0, sizeof(arg)); 17597d47e8d4STejun Heo arg.since = j64 - min(j64, j10mins); 17607d47e8d4STejun Heo ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); 1761c6fd2807SJeff Garzik 17623884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_TOUT_HSM] + 17633884f7b0STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 3) 17647d47e8d4STejun Heo verdict |= ATA_EH_SPDN_NCQ_OFF; 17653884f7b0STejun Heo 17663884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_ATA_BUS] + 17673884f7b0STejun Heo arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 || 1768663f99b8STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) 17697d47e8d4STejun Heo verdict |= ATA_EH_SPDN_SPEED_DOWN; 1770c6fd2807SJeff Garzik 17717d47e8d4STejun Heo return verdict; 1772c6fd2807SJeff Garzik } 1773c6fd2807SJeff Garzik 1774c6fd2807SJeff Garzik /** 1775c6fd2807SJeff Garzik * ata_eh_speed_down - record error and speed down if necessary 1776c6fd2807SJeff Garzik * @dev: Failed device 17773884f7b0STejun Heo * @eflags: mask of ATA_EFLAG_* flags 1778c6fd2807SJeff Garzik * @err_mask: err_mask of the error 1779c6fd2807SJeff Garzik * 1780c6fd2807SJeff Garzik * Record error and examine error history to determine whether 1781c6fd2807SJeff Garzik * adjusting transmission speed is necessary. It also sets 1782c6fd2807SJeff Garzik * transmission limits appropriately if such adjustment is 1783c6fd2807SJeff Garzik * necessary. 1784c6fd2807SJeff Garzik * 1785c6fd2807SJeff Garzik * LOCKING: 1786c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1787c6fd2807SJeff Garzik * 1788c6fd2807SJeff Garzik * RETURNS: 17897d47e8d4STejun Heo * Determined recovery action. 1790c6fd2807SJeff Garzik */ 17913884f7b0STejun Heo static unsigned int ata_eh_speed_down(struct ata_device *dev, 17923884f7b0STejun Heo unsigned int eflags, unsigned int err_mask) 1793c6fd2807SJeff Garzik { 1794b1c72916STejun Heo struct ata_link *link = ata_dev_phys_link(dev); 179576326ac1STejun Heo int xfer_ok = 0; 17967d47e8d4STejun Heo unsigned int verdict; 17977d47e8d4STejun Heo unsigned int action = 0; 17987d47e8d4STejun Heo 17997d47e8d4STejun Heo /* don't bother if Cat-0 error */ 180076326ac1STejun Heo if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0) 1801c6fd2807SJeff Garzik return 0; 1802c6fd2807SJeff Garzik 1803c6fd2807SJeff Garzik /* record error and determine whether speed down is necessary */ 18043884f7b0STejun Heo ata_ering_record(&dev->ering, eflags, err_mask); 18057d47e8d4STejun Heo verdict = ata_eh_speed_down_verdict(dev); 1806c6fd2807SJeff Garzik 18077d47e8d4STejun Heo /* turn off NCQ? */ 18087d47e8d4STejun Heo if ((verdict & ATA_EH_SPDN_NCQ_OFF) && 18097d47e8d4STejun Heo (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ | 18107d47e8d4STejun Heo ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) { 18117d47e8d4STejun Heo dev->flags |= ATA_DFLAG_NCQ_OFF; 1812a9a79dfeSJoe Perches ata_dev_warn(dev, "NCQ disabled due to excessive errors\n"); 18137d47e8d4STejun Heo goto done; 18147d47e8d4STejun Heo } 1815c6fd2807SJeff Garzik 18167d47e8d4STejun Heo /* speed down? */ 18177d47e8d4STejun Heo if (verdict & ATA_EH_SPDN_SPEED_DOWN) { 1818c6fd2807SJeff Garzik /* speed down SATA link speed if possible */ 1819a07d499bSTejun Heo if (sata_down_spd_limit(link, 0) == 0) { 1820cf480626STejun Heo action |= ATA_EH_RESET; 18217d47e8d4STejun Heo goto done; 18227d47e8d4STejun Heo } 1823c6fd2807SJeff Garzik 1824c6fd2807SJeff Garzik /* lower transfer mode */ 18257d47e8d4STejun Heo if (dev->spdn_cnt < 2) { 18267d47e8d4STejun Heo static const int dma_dnxfer_sel[] = 18277d47e8d4STejun Heo { ATA_DNXFER_DMA, ATA_DNXFER_40C }; 18287d47e8d4STejun Heo static const int pio_dnxfer_sel[] = 18297d47e8d4STejun Heo { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 }; 18307d47e8d4STejun Heo int sel; 1831c6fd2807SJeff Garzik 18327d47e8d4STejun Heo if (dev->xfer_shift != ATA_SHIFT_PIO) 18337d47e8d4STejun Heo sel = dma_dnxfer_sel[dev->spdn_cnt]; 18347d47e8d4STejun Heo else 18357d47e8d4STejun Heo sel = pio_dnxfer_sel[dev->spdn_cnt]; 18367d47e8d4STejun Heo 18377d47e8d4STejun Heo dev->spdn_cnt++; 18387d47e8d4STejun Heo 18397d47e8d4STejun Heo if (ata_down_xfermask_limit(dev, sel) == 0) { 1840cf480626STejun Heo action |= ATA_EH_RESET; 18417d47e8d4STejun Heo goto done; 18427d47e8d4STejun Heo } 18437d47e8d4STejun Heo } 18447d47e8d4STejun Heo } 18457d47e8d4STejun Heo 18467d47e8d4STejun Heo /* Fall back to PIO? Slowing down to PIO is meaningless for 1847663f99b8STejun Heo * SATA ATA devices. Consider it only for PATA and SATAPI. 18487d47e8d4STejun Heo */ 18497d47e8d4STejun Heo if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) && 1850663f99b8STejun Heo (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) && 18517d47e8d4STejun Heo (dev->xfer_shift != ATA_SHIFT_PIO)) { 18527d47e8d4STejun Heo if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) { 18537d47e8d4STejun Heo dev->spdn_cnt = 0; 1854cf480626STejun Heo action |= ATA_EH_RESET; 18557d47e8d4STejun Heo goto done; 18567d47e8d4STejun Heo } 18577d47e8d4STejun Heo } 18587d47e8d4STejun Heo 1859c6fd2807SJeff Garzik return 0; 18607d47e8d4STejun Heo done: 18617d47e8d4STejun Heo /* device has been slowed down, blow error history */ 186276326ac1STejun Heo if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS)) 18637d47e8d4STejun Heo ata_ering_clear(&dev->ering); 18647d47e8d4STejun Heo return action; 1865c6fd2807SJeff Garzik } 1866c6fd2807SJeff Garzik 1867c6fd2807SJeff Garzik /** 18688d899e70SMark Lord * ata_eh_worth_retry - analyze error and decide whether to retry 18698d899e70SMark Lord * @qc: qc to possibly retry 18708d899e70SMark Lord * 18718d899e70SMark Lord * Look at the cause of the error and decide if a retry 18728d899e70SMark Lord * might be useful or not. We don't want to retry media errors 18738d899e70SMark Lord * because the drive itself has probably already taken 10-30 seconds 18748d899e70SMark Lord * doing its own internal retries before reporting the failure. 18758d899e70SMark Lord */ 18768d899e70SMark Lord static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc) 18778d899e70SMark Lord { 18781eaca39aSBian Yu if (qc->err_mask & AC_ERR_MEDIA) 18798d899e70SMark Lord return 0; /* don't retry media errors */ 18808d899e70SMark Lord if (qc->flags & ATA_QCFLAG_IO) 18818d899e70SMark Lord return 1; /* otherwise retry anything from fs stack */ 18828d899e70SMark Lord if (qc->err_mask & AC_ERR_INVALID) 18838d899e70SMark Lord return 0; /* don't retry these */ 18848d899e70SMark Lord return qc->err_mask != AC_ERR_DEV; /* retry if not dev error */ 18858d899e70SMark Lord } 18868d899e70SMark Lord 18878d899e70SMark Lord /** 18887eb49509SDamien Le Moal * ata_eh_quiet - check if we need to be quiet about a command error 18897eb49509SDamien Le Moal * @qc: qc to check 18907eb49509SDamien Le Moal * 18917eb49509SDamien Le Moal * Look at the qc flags anbd its scsi command request flags to determine 18927eb49509SDamien Le Moal * if we need to be quiet about the command failure. 18937eb49509SDamien Le Moal */ 18947eb49509SDamien Le Moal static inline bool ata_eh_quiet(struct ata_queued_cmd *qc) 18957eb49509SDamien Le Moal { 18967eb49509SDamien Le Moal if (qc->scsicmd && 18977eb49509SDamien Le Moal qc->scsicmd->request->rq_flags & RQF_QUIET) 18987eb49509SDamien Le Moal qc->flags |= ATA_QCFLAG_QUIET; 18997eb49509SDamien Le Moal return qc->flags & ATA_QCFLAG_QUIET; 19007eb49509SDamien Le Moal } 19017eb49509SDamien Le Moal 19027eb49509SDamien Le Moal /** 19039b1e2658STejun Heo * ata_eh_link_autopsy - analyze error and determine recovery action 19049b1e2658STejun Heo * @link: host link to perform autopsy on 1905c6fd2807SJeff Garzik * 19060260731fSTejun Heo * Analyze why @link failed and determine which recovery actions 19070260731fSTejun Heo * are needed. This function also sets more detailed AC_ERR_* 19080260731fSTejun Heo * values and fills sense data for ATAPI CHECK SENSE. 1909c6fd2807SJeff Garzik * 1910c6fd2807SJeff Garzik * LOCKING: 1911c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1912c6fd2807SJeff Garzik */ 19139b1e2658STejun Heo static void ata_eh_link_autopsy(struct ata_link *link) 1914c6fd2807SJeff Garzik { 19150260731fSTejun Heo struct ata_port *ap = link->ap; 1916936fd732STejun Heo struct ata_eh_context *ehc = &link->eh_context; 1917258c4e5cSJens Axboe struct ata_queued_cmd *qc; 1918dfcc173dSTejun Heo struct ata_device *dev; 19193884f7b0STejun Heo unsigned int all_err_mask = 0, eflags = 0; 19207eb49509SDamien Le Moal int tag, nr_failed = 0, nr_quiet = 0; 1921c6fd2807SJeff Garzik u32 serror; 1922c6fd2807SJeff Garzik int rc; 1923c6fd2807SJeff Garzik 1924c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 1925c6fd2807SJeff Garzik 1926c6fd2807SJeff Garzik if (ehc->i.flags & ATA_EHI_NO_AUTOPSY) 1927c6fd2807SJeff Garzik return; 1928c6fd2807SJeff Garzik 1929c6fd2807SJeff Garzik /* obtain and analyze SError */ 1930936fd732STejun Heo rc = sata_scr_read(link, SCR_ERROR, &serror); 1931c6fd2807SJeff Garzik if (rc == 0) { 1932c6fd2807SJeff Garzik ehc->i.serror |= serror; 19330260731fSTejun Heo ata_eh_analyze_serror(link); 19344e57c517STejun Heo } else if (rc != -EOPNOTSUPP) { 1935cf480626STejun Heo /* SError read failed, force reset and probing */ 1936b558edddSTejun Heo ehc->i.probe_mask |= ATA_ALL_DEVICES; 1937cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 19384e57c517STejun Heo ehc->i.err_mask |= AC_ERR_OTHER; 19394e57c517STejun Heo } 1940c6fd2807SJeff Garzik 1941c6fd2807SJeff Garzik /* analyze NCQ failure */ 19420260731fSTejun Heo ata_eh_analyze_ncq_error(link); 1943c6fd2807SJeff Garzik 1944c6fd2807SJeff Garzik /* any real error trumps AC_ERR_OTHER */ 1945c6fd2807SJeff Garzik if (ehc->i.err_mask & ~AC_ERR_OTHER) 1946c6fd2807SJeff Garzik ehc->i.err_mask &= ~AC_ERR_OTHER; 1947c6fd2807SJeff Garzik 1948c6fd2807SJeff Garzik all_err_mask |= ehc->i.err_mask; 1949c6fd2807SJeff Garzik 1950258c4e5cSJens Axboe ata_qc_for_each_raw(ap, qc, tag) { 1951b1c72916STejun Heo if (!(qc->flags & ATA_QCFLAG_FAILED) || 1952b1c72916STejun Heo ata_dev_phys_link(qc->dev) != link) 1953c6fd2807SJeff Garzik continue; 1954c6fd2807SJeff Garzik 1955c6fd2807SJeff Garzik /* inherit upper level err_mask */ 1956c6fd2807SJeff Garzik qc->err_mask |= ehc->i.err_mask; 1957c6fd2807SJeff Garzik 1958c6fd2807SJeff Garzik /* analyze TF */ 1959c6fd2807SJeff Garzik ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf); 1960c6fd2807SJeff Garzik 1961c6fd2807SJeff Garzik /* DEV errors are probably spurious in case of ATA_BUS error */ 1962c6fd2807SJeff Garzik if (qc->err_mask & AC_ERR_ATA_BUS) 1963c6fd2807SJeff Garzik qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA | 1964c6fd2807SJeff Garzik AC_ERR_INVALID); 1965c6fd2807SJeff Garzik 1966c6fd2807SJeff Garzik /* any real error trumps unknown error */ 1967c6fd2807SJeff Garzik if (qc->err_mask & ~AC_ERR_OTHER) 1968c6fd2807SJeff Garzik qc->err_mask &= ~AC_ERR_OTHER; 1969c6fd2807SJeff Garzik 1970804689adSDamien Le Moal /* 1971804689adSDamien Le Moal * SENSE_VALID trumps dev/unknown error and revalidation. Upper 1972804689adSDamien Le Moal * layers will determine whether the command is worth retrying 1973804689adSDamien Le Moal * based on the sense data and device class/type. Otherwise, 1974804689adSDamien Le Moal * determine directly if the command is worth retrying using its 1975804689adSDamien Le Moal * error mask and flags. 1976804689adSDamien Le Moal */ 1977f90f0828STejun Heo if (qc->flags & ATA_QCFLAG_SENSE_VALID) 1978c6fd2807SJeff Garzik qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); 1979804689adSDamien Le Moal else if (ata_eh_worth_retry(qc)) 198003faab78STejun Heo qc->flags |= ATA_QCFLAG_RETRY; 198103faab78STejun Heo 1982c6fd2807SJeff Garzik /* accumulate error info */ 1983c6fd2807SJeff Garzik ehc->i.dev = qc->dev; 1984c6fd2807SJeff Garzik all_err_mask |= qc->err_mask; 1985c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_IO) 19863884f7b0STejun Heo eflags |= ATA_EFLAG_IS_IO; 1987255c03d1SHannes Reinecke trace_ata_eh_link_autopsy_qc(qc); 19887eb49509SDamien Le Moal 19897eb49509SDamien Le Moal /* Count quiet errors */ 19907eb49509SDamien Le Moal if (ata_eh_quiet(qc)) 19917eb49509SDamien Le Moal nr_quiet++; 19927eb49509SDamien Le Moal nr_failed++; 1993c6fd2807SJeff Garzik } 1994c6fd2807SJeff Garzik 19957eb49509SDamien Le Moal /* If all failed commands requested silence, then be quiet */ 19967eb49509SDamien Le Moal if (nr_quiet == nr_failed) 19977eb49509SDamien Le Moal ehc->i.flags |= ATA_EHI_QUIET; 19987eb49509SDamien Le Moal 1999c6fd2807SJeff Garzik /* enforce default EH actions */ 2000c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN || 2001c6fd2807SJeff Garzik all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT)) 2002cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 20033884f7b0STejun Heo else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) || 20043884f7b0STejun Heo (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV))) 2005c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_REVALIDATE; 2006c6fd2807SJeff Garzik 2007dfcc173dSTejun Heo /* If we have offending qcs and the associated failed device, 2008dfcc173dSTejun Heo * perform per-dev EH action only on the offending device. 2009dfcc173dSTejun Heo */ 2010c6fd2807SJeff Garzik if (ehc->i.dev) { 2011c6fd2807SJeff Garzik ehc->i.dev_action[ehc->i.dev->devno] |= 2012c6fd2807SJeff Garzik ehc->i.action & ATA_EH_PERDEV_MASK; 2013c6fd2807SJeff Garzik ehc->i.action &= ~ATA_EH_PERDEV_MASK; 2014c6fd2807SJeff Garzik } 2015c6fd2807SJeff Garzik 20162695e366STejun Heo /* propagate timeout to host link */ 20172695e366STejun Heo if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link)) 20182695e366STejun Heo ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT; 20192695e366STejun Heo 20202695e366STejun Heo /* record error and consider speeding down */ 2021dfcc173dSTejun Heo dev = ehc->i.dev; 20222695e366STejun Heo if (!dev && ((ata_link_max_devices(link) == 1 && 20232695e366STejun Heo ata_dev_enabled(link->device)))) 2024dfcc173dSTejun Heo dev = link->device; 2025dfcc173dSTejun Heo 202676326ac1STejun Heo if (dev) { 202776326ac1STejun Heo if (dev->flags & ATA_DFLAG_DUBIOUS_XFER) 202876326ac1STejun Heo eflags |= ATA_EFLAG_DUBIOUS_XFER; 20293884f7b0STejun Heo ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask); 2030255c03d1SHannes Reinecke trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask); 2031f1601113SRameshwar Prasad Sahu } 2032c6fd2807SJeff Garzik DPRINTK("EXIT\n"); 2033c6fd2807SJeff Garzik } 2034c6fd2807SJeff Garzik 2035c6fd2807SJeff Garzik /** 20369b1e2658STejun Heo * ata_eh_autopsy - analyze error and determine recovery action 20379b1e2658STejun Heo * @ap: host port to perform autopsy on 20389b1e2658STejun Heo * 20399b1e2658STejun Heo * Analyze all links of @ap and determine why they failed and 20409b1e2658STejun Heo * which recovery actions are needed. 20419b1e2658STejun Heo * 20429b1e2658STejun Heo * LOCKING: 20439b1e2658STejun Heo * Kernel thread context (may sleep). 20449b1e2658STejun Heo */ 2045fb7fd614STejun Heo void ata_eh_autopsy(struct ata_port *ap) 20469b1e2658STejun Heo { 20479b1e2658STejun Heo struct ata_link *link; 20489b1e2658STejun Heo 20491eca4365STejun Heo ata_for_each_link(link, ap, EDGE) 20509b1e2658STejun Heo ata_eh_link_autopsy(link); 20512695e366STejun Heo 2052b1c72916STejun Heo /* Handle the frigging slave link. Autopsy is done similarly 2053b1c72916STejun Heo * but actions and flags are transferred over to the master 2054b1c72916STejun Heo * link and handled from there. 2055b1c72916STejun Heo */ 2056b1c72916STejun Heo if (ap->slave_link) { 2057b1c72916STejun Heo struct ata_eh_context *mehc = &ap->link.eh_context; 2058b1c72916STejun Heo struct ata_eh_context *sehc = &ap->slave_link->eh_context; 2059b1c72916STejun Heo 2060848e4c68STejun Heo /* transfer control flags from master to slave */ 2061848e4c68STejun Heo sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK; 2062848e4c68STejun Heo 2063848e4c68STejun Heo /* perform autopsy on the slave link */ 2064b1c72916STejun Heo ata_eh_link_autopsy(ap->slave_link); 2065b1c72916STejun Heo 2066848e4c68STejun Heo /* transfer actions from slave to master and clear slave */ 2067b1c72916STejun Heo ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); 2068b1c72916STejun Heo mehc->i.action |= sehc->i.action; 2069b1c72916STejun Heo mehc->i.dev_action[1] |= sehc->i.dev_action[1]; 2070b1c72916STejun Heo mehc->i.flags |= sehc->i.flags; 2071b1c72916STejun Heo ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); 2072b1c72916STejun Heo } 2073b1c72916STejun Heo 20742695e366STejun Heo /* Autopsy of fanout ports can affect host link autopsy. 20752695e366STejun Heo * Perform host link autopsy last. 20762695e366STejun Heo */ 2077071f44b1STejun Heo if (sata_pmp_attached(ap)) 20782695e366STejun Heo ata_eh_link_autopsy(&ap->link); 20799b1e2658STejun Heo } 20809b1e2658STejun Heo 20819b1e2658STejun Heo /** 20826521148cSRobert Hancock * ata_get_cmd_descript - get description for ATA command 20836521148cSRobert Hancock * @command: ATA command code to get description for 20846521148cSRobert Hancock * 20856521148cSRobert Hancock * Return a textual description of the given command, or NULL if the 20866521148cSRobert Hancock * command is not known. 20876521148cSRobert Hancock * 20886521148cSRobert Hancock * LOCKING: 20896521148cSRobert Hancock * None 20906521148cSRobert Hancock */ 20916521148cSRobert Hancock const char *ata_get_cmd_descript(u8 command) 20926521148cSRobert Hancock { 20936521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR 20946521148cSRobert Hancock static const struct 20956521148cSRobert Hancock { 20966521148cSRobert Hancock u8 command; 20976521148cSRobert Hancock const char *text; 20986521148cSRobert Hancock } cmd_descr[] = { 20996521148cSRobert Hancock { ATA_CMD_DEV_RESET, "DEVICE RESET" }, 21006521148cSRobert Hancock { ATA_CMD_CHK_POWER, "CHECK POWER MODE" }, 21016521148cSRobert Hancock { ATA_CMD_STANDBY, "STANDBY" }, 21026521148cSRobert Hancock { ATA_CMD_IDLE, "IDLE" }, 21036521148cSRobert Hancock { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" }, 21046521148cSRobert Hancock { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" }, 21053915c3b5SRobert Hancock { ATA_CMD_DOWNLOAD_MICRO_DMA, "DOWNLOAD MICROCODE DMA" }, 21066521148cSRobert Hancock { ATA_CMD_NOP, "NOP" }, 21076521148cSRobert Hancock { ATA_CMD_FLUSH, "FLUSH CACHE" }, 21086521148cSRobert Hancock { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" }, 21096521148cSRobert Hancock { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" }, 21106521148cSRobert Hancock { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" }, 21116521148cSRobert Hancock { ATA_CMD_SERVICE, "SERVICE" }, 21126521148cSRobert Hancock { ATA_CMD_READ, "READ DMA" }, 21136521148cSRobert Hancock { ATA_CMD_READ_EXT, "READ DMA EXT" }, 21146521148cSRobert Hancock { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" }, 21156521148cSRobert Hancock { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" }, 21166521148cSRobert Hancock { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" }, 21176521148cSRobert Hancock { ATA_CMD_WRITE, "WRITE DMA" }, 21186521148cSRobert Hancock { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" }, 21196521148cSRobert Hancock { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" }, 21206521148cSRobert Hancock { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" }, 21216521148cSRobert Hancock { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" }, 21226521148cSRobert Hancock { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" }, 21236521148cSRobert Hancock { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" }, 21246521148cSRobert Hancock { ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" }, 21256521148cSRobert Hancock { ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" }, 21263915c3b5SRobert Hancock { ATA_CMD_FPDMA_SEND, "SEND FPDMA QUEUED" }, 21273915c3b5SRobert Hancock { ATA_CMD_FPDMA_RECV, "RECEIVE FPDMA QUEUED" }, 21286521148cSRobert Hancock { ATA_CMD_PIO_READ, "READ SECTOR(S)" }, 21296521148cSRobert Hancock { ATA_CMD_PIO_READ_EXT, "READ SECTOR(S) EXT" }, 21306521148cSRobert Hancock { ATA_CMD_PIO_WRITE, "WRITE SECTOR(S)" }, 21316521148cSRobert Hancock { ATA_CMD_PIO_WRITE_EXT, "WRITE SECTOR(S) EXT" }, 21326521148cSRobert Hancock { ATA_CMD_READ_MULTI, "READ MULTIPLE" }, 21336521148cSRobert Hancock { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" }, 21346521148cSRobert Hancock { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" }, 21356521148cSRobert Hancock { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" }, 21366521148cSRobert Hancock { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" }, 21376521148cSRobert Hancock { ATA_CMD_SET_FEATURES, "SET FEATURES" }, 21386521148cSRobert Hancock { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" }, 21396521148cSRobert Hancock { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" }, 21406521148cSRobert Hancock { ATA_CMD_VERIFY_EXT, "READ VERIFY SECTOR(S) EXT" }, 21416521148cSRobert Hancock { ATA_CMD_WRITE_UNCORR_EXT, "WRITE UNCORRECTABLE EXT" }, 21426521148cSRobert Hancock { ATA_CMD_STANDBYNOW1, "STANDBY IMMEDIATE" }, 21436521148cSRobert Hancock { ATA_CMD_IDLEIMMEDIATE, "IDLE IMMEDIATE" }, 21446521148cSRobert Hancock { ATA_CMD_SLEEP, "SLEEP" }, 21456521148cSRobert Hancock { ATA_CMD_INIT_DEV_PARAMS, "INITIALIZE DEVICE PARAMETERS" }, 21466521148cSRobert Hancock { ATA_CMD_READ_NATIVE_MAX, "READ NATIVE MAX ADDRESS" }, 21476521148cSRobert Hancock { ATA_CMD_READ_NATIVE_MAX_EXT, "READ NATIVE MAX ADDRESS EXT" }, 21486521148cSRobert Hancock { ATA_CMD_SET_MAX, "SET MAX ADDRESS" }, 21496521148cSRobert Hancock { ATA_CMD_SET_MAX_EXT, "SET MAX ADDRESS EXT" }, 21506521148cSRobert Hancock { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" }, 21516521148cSRobert Hancock { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" }, 21526521148cSRobert Hancock { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" }, 21536521148cSRobert Hancock { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" }, 21543915c3b5SRobert Hancock { ATA_CMD_TRUSTED_NONDATA, "TRUSTED NON-DATA" }, 21556521148cSRobert Hancock { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" }, 21566521148cSRobert Hancock { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" }, 21576521148cSRobert Hancock { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" }, 21586521148cSRobert Hancock { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" }, 21596521148cSRobert Hancock { ATA_CMD_PMP_READ, "READ BUFFER" }, 21603915c3b5SRobert Hancock { ATA_CMD_PMP_READ_DMA, "READ BUFFER DMA" }, 21616521148cSRobert Hancock { ATA_CMD_PMP_WRITE, "WRITE BUFFER" }, 21623915c3b5SRobert Hancock { ATA_CMD_PMP_WRITE_DMA, "WRITE BUFFER DMA" }, 21636521148cSRobert Hancock { ATA_CMD_CONF_OVERLAY, "DEVICE CONFIGURATION OVERLAY" }, 21646521148cSRobert Hancock { ATA_CMD_SEC_SET_PASS, "SECURITY SET PASSWORD" }, 21656521148cSRobert Hancock { ATA_CMD_SEC_UNLOCK, "SECURITY UNLOCK" }, 21666521148cSRobert Hancock { ATA_CMD_SEC_ERASE_PREP, "SECURITY ERASE PREPARE" }, 21676521148cSRobert Hancock { ATA_CMD_SEC_ERASE_UNIT, "SECURITY ERASE UNIT" }, 21686521148cSRobert Hancock { ATA_CMD_SEC_FREEZE_LOCK, "SECURITY FREEZE LOCK" }, 21696521148cSRobert Hancock { ATA_CMD_SEC_DISABLE_PASS, "SECURITY DISABLE PASSWORD" }, 21706521148cSRobert Hancock { ATA_CMD_CONFIG_STREAM, "CONFIGURE STREAM" }, 21716521148cSRobert Hancock { ATA_CMD_SMART, "SMART" }, 21726521148cSRobert Hancock { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" }, 21736521148cSRobert Hancock { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" }, 2174acad7627SFUJITA Tomonori { ATA_CMD_DSM, "DATA SET MANAGEMENT" }, 21756521148cSRobert Hancock { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" }, 21766521148cSRobert Hancock { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" }, 21776521148cSRobert Hancock { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" }, 21786521148cSRobert Hancock { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" }, 21796521148cSRobert Hancock { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" }, 21806521148cSRobert Hancock { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" }, 21813915c3b5SRobert Hancock { ATA_CMD_REQ_SENSE_DATA, "REQUEST SENSE DATA EXT" }, 21823915c3b5SRobert Hancock { ATA_CMD_SANITIZE_DEVICE, "SANITIZE DEVICE" }, 218328a3fc22SHannes Reinecke { ATA_CMD_ZAC_MGMT_IN, "ZAC MANAGEMENT IN" }, 218427708a95SHannes Reinecke { ATA_CMD_ZAC_MGMT_OUT, "ZAC MANAGEMENT OUT" }, 21856521148cSRobert Hancock { ATA_CMD_READ_LONG, "READ LONG (with retries)" }, 21866521148cSRobert Hancock { ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" }, 21876521148cSRobert Hancock { ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" }, 21886521148cSRobert Hancock { ATA_CMD_WRITE_LONG_ONCE, "WRITE LONG (without retries)" }, 21896521148cSRobert Hancock { ATA_CMD_RESTORE, "RECALIBRATE" }, 21906521148cSRobert Hancock { 0, NULL } /* terminate list */ 21916521148cSRobert Hancock }; 21926521148cSRobert Hancock 21936521148cSRobert Hancock unsigned int i; 21946521148cSRobert Hancock for (i = 0; cmd_descr[i].text; i++) 21956521148cSRobert Hancock if (cmd_descr[i].command == command) 21966521148cSRobert Hancock return cmd_descr[i].text; 21976521148cSRobert Hancock #endif 21986521148cSRobert Hancock 21996521148cSRobert Hancock return NULL; 22006521148cSRobert Hancock } 220136aae28eSAndy Shevchenko EXPORT_SYMBOL_GPL(ata_get_cmd_descript); 22026521148cSRobert Hancock 22036521148cSRobert Hancock /** 22049b1e2658STejun Heo * ata_eh_link_report - report error handling to user 22050260731fSTejun Heo * @link: ATA link EH is going on 2206c6fd2807SJeff Garzik * 2207c6fd2807SJeff Garzik * Report EH to user. 2208c6fd2807SJeff Garzik * 2209c6fd2807SJeff Garzik * LOCKING: 2210c6fd2807SJeff Garzik * None. 2211c6fd2807SJeff Garzik */ 22129b1e2658STejun Heo static void ata_eh_link_report(struct ata_link *link) 2213c6fd2807SJeff Garzik { 22140260731fSTejun Heo struct ata_port *ap = link->ap; 22150260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 2216258c4e5cSJens Axboe struct ata_queued_cmd *qc; 2217c6fd2807SJeff Garzik const char *frozen, *desc; 2218462098b0SLevente Kurusa char tries_buf[6] = ""; 2219c6fd2807SJeff Garzik int tag, nr_failed = 0; 2220c6fd2807SJeff Garzik 222194ff3d54STejun Heo if (ehc->i.flags & ATA_EHI_QUIET) 222294ff3d54STejun Heo return; 222394ff3d54STejun Heo 2224c6fd2807SJeff Garzik desc = NULL; 2225c6fd2807SJeff Garzik if (ehc->i.desc[0] != '\0') 2226c6fd2807SJeff Garzik desc = ehc->i.desc; 2227c6fd2807SJeff Garzik 2228258c4e5cSJens Axboe ata_qc_for_each_raw(ap, qc, tag) { 2229b1c72916STejun Heo if (!(qc->flags & ATA_QCFLAG_FAILED) || 2230b1c72916STejun Heo ata_dev_phys_link(qc->dev) != link || 2231e027bd36STejun Heo ((qc->flags & ATA_QCFLAG_QUIET) && 2232e027bd36STejun Heo qc->err_mask == AC_ERR_DEV)) 2233c6fd2807SJeff Garzik continue; 2234c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask) 2235c6fd2807SJeff Garzik continue; 2236c6fd2807SJeff Garzik 2237c6fd2807SJeff Garzik nr_failed++; 2238c6fd2807SJeff Garzik } 2239c6fd2807SJeff Garzik 2240c6fd2807SJeff Garzik if (!nr_failed && !ehc->i.err_mask) 2241c6fd2807SJeff Garzik return; 2242c6fd2807SJeff Garzik 2243c6fd2807SJeff Garzik frozen = ""; 2244c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN) 2245c6fd2807SJeff Garzik frozen = " frozen"; 2246c6fd2807SJeff Garzik 2247a1e10f7eSTejun Heo if (ap->eh_tries < ATA_EH_MAX_TRIES) 2248462098b0SLevente Kurusa snprintf(tries_buf, sizeof(tries_buf), " t%d", 2249a1e10f7eSTejun Heo ap->eh_tries); 2250a1e10f7eSTejun Heo 2251c6fd2807SJeff Garzik if (ehc->i.dev) { 2252a9a79dfeSJoe Perches ata_dev_err(ehc->i.dev, "exception Emask 0x%x " 2253a1e10f7eSTejun Heo "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", 2254a1e10f7eSTejun Heo ehc->i.err_mask, link->sactive, ehc->i.serror, 2255a1e10f7eSTejun Heo ehc->i.action, frozen, tries_buf); 2256c6fd2807SJeff Garzik if (desc) 2257a9a79dfeSJoe Perches ata_dev_err(ehc->i.dev, "%s\n", desc); 2258c6fd2807SJeff Garzik } else { 2259a9a79dfeSJoe Perches ata_link_err(link, "exception Emask 0x%x " 2260a1e10f7eSTejun Heo "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", 2261a1e10f7eSTejun Heo ehc->i.err_mask, link->sactive, ehc->i.serror, 2262a1e10f7eSTejun Heo ehc->i.action, frozen, tries_buf); 2263c6fd2807SJeff Garzik if (desc) 2264a9a79dfeSJoe Perches ata_link_err(link, "%s\n", desc); 2265c6fd2807SJeff Garzik } 2266c6fd2807SJeff Garzik 22676521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR 22681333e194SRobert Hancock if (ehc->i.serror) 2269a9a79dfeSJoe Perches ata_link_err(link, 22701333e194SRobert Hancock "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n", 22711333e194SRobert Hancock ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "", 22721333e194SRobert Hancock ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "", 22731333e194SRobert Hancock ehc->i.serror & SERR_DATA ? "UnrecovData " : "", 22741333e194SRobert Hancock ehc->i.serror & SERR_PERSISTENT ? "Persist " : "", 22751333e194SRobert Hancock ehc->i.serror & SERR_PROTOCOL ? "Proto " : "", 22761333e194SRobert Hancock ehc->i.serror & SERR_INTERNAL ? "HostInt " : "", 22771333e194SRobert Hancock ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "", 22781333e194SRobert Hancock ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "", 22791333e194SRobert Hancock ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "", 22801333e194SRobert Hancock ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "", 22811333e194SRobert Hancock ehc->i.serror & SERR_DISPARITY ? "Dispar " : "", 22821333e194SRobert Hancock ehc->i.serror & SERR_CRC ? "BadCRC " : "", 22831333e194SRobert Hancock ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "", 22841333e194SRobert Hancock ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "", 22851333e194SRobert Hancock ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "", 22861333e194SRobert Hancock ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "", 22871333e194SRobert Hancock ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : ""); 22886521148cSRobert Hancock #endif 22891333e194SRobert Hancock 2290258c4e5cSJens Axboe ata_qc_for_each_raw(ap, qc, tag) { 22918a937581STejun Heo struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; 2292abb6a889STejun Heo char data_buf[20] = ""; 2293abb6a889STejun Heo char cdb_buf[70] = ""; 2294c6fd2807SJeff Garzik 22950260731fSTejun Heo if (!(qc->flags & ATA_QCFLAG_FAILED) || 2296b1c72916STejun Heo ata_dev_phys_link(qc->dev) != link || !qc->err_mask) 2297c6fd2807SJeff Garzik continue; 2298c6fd2807SJeff Garzik 2299abb6a889STejun Heo if (qc->dma_dir != DMA_NONE) { 2300abb6a889STejun Heo static const char *dma_str[] = { 2301abb6a889STejun Heo [DMA_BIDIRECTIONAL] = "bidi", 2302abb6a889STejun Heo [DMA_TO_DEVICE] = "out", 2303abb6a889STejun Heo [DMA_FROM_DEVICE] = "in", 2304abb6a889STejun Heo }; 2305fb1b8b11SGeert Uytterhoeven const char *prot_str = NULL; 2306abb6a889STejun Heo 2307fb1b8b11SGeert Uytterhoeven switch (qc->tf.protocol) { 2308fb1b8b11SGeert Uytterhoeven case ATA_PROT_UNKNOWN: 2309fb1b8b11SGeert Uytterhoeven prot_str = "unknown"; 2310fb1b8b11SGeert Uytterhoeven break; 2311fb1b8b11SGeert Uytterhoeven case ATA_PROT_NODATA: 2312fb1b8b11SGeert Uytterhoeven prot_str = "nodata"; 2313fb1b8b11SGeert Uytterhoeven break; 2314fb1b8b11SGeert Uytterhoeven case ATA_PROT_PIO: 2315fb1b8b11SGeert Uytterhoeven prot_str = "pio"; 2316fb1b8b11SGeert Uytterhoeven break; 2317fb1b8b11SGeert Uytterhoeven case ATA_PROT_DMA: 2318fb1b8b11SGeert Uytterhoeven prot_str = "dma"; 2319fb1b8b11SGeert Uytterhoeven break; 2320fb1b8b11SGeert Uytterhoeven case ATA_PROT_NCQ: 2321fb1b8b11SGeert Uytterhoeven prot_str = "ncq dma"; 2322fb1b8b11SGeert Uytterhoeven break; 2323fb1b8b11SGeert Uytterhoeven case ATA_PROT_NCQ_NODATA: 2324fb1b8b11SGeert Uytterhoeven prot_str = "ncq nodata"; 2325fb1b8b11SGeert Uytterhoeven break; 2326fb1b8b11SGeert Uytterhoeven case ATAPI_PROT_NODATA: 2327fb1b8b11SGeert Uytterhoeven prot_str = "nodata"; 2328fb1b8b11SGeert Uytterhoeven break; 2329fb1b8b11SGeert Uytterhoeven case ATAPI_PROT_PIO: 2330fb1b8b11SGeert Uytterhoeven prot_str = "pio"; 2331fb1b8b11SGeert Uytterhoeven break; 2332fb1b8b11SGeert Uytterhoeven case ATAPI_PROT_DMA: 2333fb1b8b11SGeert Uytterhoeven prot_str = "dma"; 2334fb1b8b11SGeert Uytterhoeven break; 2335fb1b8b11SGeert Uytterhoeven } 2336abb6a889STejun Heo snprintf(data_buf, sizeof(data_buf), " %s %u %s", 2337fb1b8b11SGeert Uytterhoeven prot_str, qc->nbytes, dma_str[qc->dma_dir]); 2338abb6a889STejun Heo } 2339abb6a889STejun Heo 23406521148cSRobert Hancock if (ata_is_atapi(qc->tf.protocol)) { 2341a13b0c9dSHannes Reinecke const u8 *cdb = qc->cdb; 2342a13b0c9dSHannes Reinecke size_t cdb_len = qc->dev->cdb_len; 2343a13b0c9dSHannes Reinecke 2344cbba5b0eSHannes Reinecke if (qc->scsicmd) { 2345cbba5b0eSHannes Reinecke cdb = qc->scsicmd->cmnd; 2346cbba5b0eSHannes Reinecke cdb_len = qc->scsicmd->cmd_len; 2347cbba5b0eSHannes Reinecke } 2348cbba5b0eSHannes Reinecke __scsi_format_command(cdb_buf, sizeof(cdb_buf), 2349cbba5b0eSHannes Reinecke cdb, cdb_len); 23506521148cSRobert Hancock } else { 23516521148cSRobert Hancock const char *descr = ata_get_cmd_descript(cmd->command); 23526521148cSRobert Hancock if (descr) 2353a9a79dfeSJoe Perches ata_dev_err(qc->dev, "failed command: %s\n", 2354a9a79dfeSJoe Perches descr); 23556521148cSRobert Hancock } 2356abb6a889STejun Heo 2357a9a79dfeSJoe Perches ata_dev_err(qc->dev, 23588a937581STejun Heo "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " 2359abb6a889STejun Heo "tag %d%s\n %s" 23608a937581STejun Heo "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " 23615335b729STejun Heo "Emask 0x%x (%s)%s\n", 23628a937581STejun Heo cmd->command, cmd->feature, cmd->nsect, 23638a937581STejun Heo cmd->lbal, cmd->lbam, cmd->lbah, 23648a937581STejun Heo cmd->hob_feature, cmd->hob_nsect, 23658a937581STejun Heo cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah, 2366abb6a889STejun Heo cmd->device, qc->tag, data_buf, cdb_buf, 23678a937581STejun Heo res->command, res->feature, res->nsect, 23688a937581STejun Heo res->lbal, res->lbam, res->lbah, 23698a937581STejun Heo res->hob_feature, res->hob_nsect, 23708a937581STejun Heo res->hob_lbal, res->hob_lbam, res->hob_lbah, 23715335b729STejun Heo res->device, qc->err_mask, ata_err_string(qc->err_mask), 23725335b729STejun Heo qc->err_mask & AC_ERR_NCQ ? " <F>" : ""); 23731333e194SRobert Hancock 23746521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR 23751333e194SRobert Hancock if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | 2376e87fd28cSHannes Reinecke ATA_SENSE | ATA_ERR)) { 23771333e194SRobert Hancock if (res->command & ATA_BUSY) 2378a9a79dfeSJoe Perches ata_dev_err(qc->dev, "status: { Busy }\n"); 23791333e194SRobert Hancock else 2380e87fd28cSHannes Reinecke ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n", 23811333e194SRobert Hancock res->command & ATA_DRDY ? "DRDY " : "", 23821333e194SRobert Hancock res->command & ATA_DF ? "DF " : "", 23831333e194SRobert Hancock res->command & ATA_DRQ ? "DRQ " : "", 2384e87fd28cSHannes Reinecke res->command & ATA_SENSE ? "SENSE " : "", 23851333e194SRobert Hancock res->command & ATA_ERR ? "ERR " : ""); 23861333e194SRobert Hancock } 23871333e194SRobert Hancock 23881333e194SRobert Hancock if (cmd->command != ATA_CMD_PACKET && 2389eec7e1c1SAlexey Asemov (res->feature & (ATA_ICRC | ATA_UNC | ATA_AMNF | 2390eec7e1c1SAlexey Asemov ATA_IDNF | ATA_ABORTED))) 2391eec7e1c1SAlexey Asemov ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n", 23921333e194SRobert Hancock res->feature & ATA_ICRC ? "ICRC " : "", 23931333e194SRobert Hancock res->feature & ATA_UNC ? "UNC " : "", 2394eec7e1c1SAlexey Asemov res->feature & ATA_AMNF ? "AMNF " : "", 23951333e194SRobert Hancock res->feature & ATA_IDNF ? "IDNF " : "", 23961333e194SRobert Hancock res->feature & ATA_ABORTED ? "ABRT " : ""); 23976521148cSRobert Hancock #endif 2398c6fd2807SJeff Garzik } 2399c6fd2807SJeff Garzik } 2400c6fd2807SJeff Garzik 24019b1e2658STejun Heo /** 24029b1e2658STejun Heo * ata_eh_report - report error handling to user 24039b1e2658STejun Heo * @ap: ATA port to report EH about 24049b1e2658STejun Heo * 24059b1e2658STejun Heo * Report EH to user. 24069b1e2658STejun Heo * 24079b1e2658STejun Heo * LOCKING: 24089b1e2658STejun Heo * None. 24099b1e2658STejun Heo */ 2410fb7fd614STejun Heo void ata_eh_report(struct ata_port *ap) 24119b1e2658STejun Heo { 24129b1e2658STejun Heo struct ata_link *link; 24139b1e2658STejun Heo 24141eca4365STejun Heo ata_for_each_link(link, ap, HOST_FIRST) 24159b1e2658STejun Heo ata_eh_link_report(link); 24169b1e2658STejun Heo } 24179b1e2658STejun Heo 2418cc0680a5STejun Heo static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset, 2419b1c72916STejun Heo unsigned int *classes, unsigned long deadline, 2420b1c72916STejun Heo bool clear_classes) 2421c6fd2807SJeff Garzik { 2422f58229f8STejun Heo struct ata_device *dev; 2423c6fd2807SJeff Garzik 2424b1c72916STejun Heo if (clear_classes) 24251eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 2426f58229f8STejun Heo classes[dev->devno] = ATA_DEV_UNKNOWN; 2427c6fd2807SJeff Garzik 2428f046519fSTejun Heo return reset(link, classes, deadline); 2429c6fd2807SJeff Garzik } 2430c6fd2807SJeff Garzik 2431e8411fbaSSergei Shtylyov static int ata_eh_followup_srst_needed(struct ata_link *link, int rc) 2432c6fd2807SJeff Garzik { 243345db2f6cSTejun Heo if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link)) 2434ae791c05STejun Heo return 0; 24355dbfc9cbSTejun Heo if (rc == -EAGAIN) 2436c6fd2807SJeff Garzik return 1; 2437071f44b1STejun Heo if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) 24383495de73STejun Heo return 1; 2439c6fd2807SJeff Garzik return 0; 2440c6fd2807SJeff Garzik } 2441c6fd2807SJeff Garzik 2442fb7fd614STejun Heo int ata_eh_reset(struct ata_link *link, int classify, 2443c6fd2807SJeff Garzik ata_prereset_fn_t prereset, ata_reset_fn_t softreset, 2444c6fd2807SJeff Garzik ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) 2445c6fd2807SJeff Garzik { 2446afaa5c37STejun Heo struct ata_port *ap = link->ap; 2447b1c72916STejun Heo struct ata_link *slave = ap->slave_link; 2448936fd732STejun Heo struct ata_eh_context *ehc = &link->eh_context; 2449705d2014SBartlomiej Zolnierkiewicz struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL; 2450c6fd2807SJeff Garzik unsigned int *classes = ehc->classes; 2451416dc9edSTejun Heo unsigned int lflags = link->flags; 2452c6fd2807SJeff Garzik int verbose = !(ehc->i.flags & ATA_EHI_QUIET); 2453d8af0eb6STejun Heo int max_tries = 0, try = 0; 2454b1c72916STejun Heo struct ata_link *failed_link; 2455f58229f8STejun Heo struct ata_device *dev; 2456416dc9edSTejun Heo unsigned long deadline, now; 2457c6fd2807SJeff Garzik ata_reset_fn_t reset; 2458afaa5c37STejun Heo unsigned long flags; 2459416dc9edSTejun Heo u32 sstatus; 2460b1c72916STejun Heo int nr_unknown, rc; 2461c6fd2807SJeff Garzik 2462932648b0STejun Heo /* 2463932648b0STejun Heo * Prepare to reset 2464932648b0STejun Heo */ 2465d8af0eb6STejun Heo while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX) 2466d8af0eb6STejun Heo max_tries++; 2467ca6d43b0SDan Williams if (link->flags & ATA_LFLAG_RST_ONCE) 2468ca6d43b0SDan Williams max_tries = 1; 246905944bdfSTejun Heo if (link->flags & ATA_LFLAG_NO_HRST) 247005944bdfSTejun Heo hardreset = NULL; 247105944bdfSTejun Heo if (link->flags & ATA_LFLAG_NO_SRST) 247205944bdfSTejun Heo softreset = NULL; 2473d8af0eb6STejun Heo 247425985edcSLucas De Marchi /* make sure each reset attempt is at least COOL_DOWN apart */ 247519b72321STejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) { 24760a2c0f56STejun Heo now = jiffies; 247719b72321STejun Heo WARN_ON(time_after(ehc->last_reset, now)); 247819b72321STejun Heo deadline = ata_deadline(ehc->last_reset, 247919b72321STejun Heo ATA_EH_RESET_COOL_DOWN); 24800a2c0f56STejun Heo if (time_before(now, deadline)) 24810a2c0f56STejun Heo schedule_timeout_uninterruptible(deadline - now); 248219b72321STejun Heo } 24830a2c0f56STejun Heo 2484afaa5c37STejun Heo spin_lock_irqsave(ap->lock, flags); 2485afaa5c37STejun Heo ap->pflags |= ATA_PFLAG_RESETTING; 2486afaa5c37STejun Heo spin_unlock_irqrestore(ap->lock, flags); 2487afaa5c37STejun Heo 2488cf480626STejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2489c6fd2807SJeff Garzik 24901eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 2491cdeab114STejun Heo /* If we issue an SRST then an ATA drive (not ATAPI) 2492cdeab114STejun Heo * may change configuration and be in PIO0 timing. If 2493cdeab114STejun Heo * we do a hard reset (or are coming from power on) 2494cdeab114STejun Heo * this is true for ATA or ATAPI. Until we've set a 2495cdeab114STejun Heo * suitable controller mode we should not touch the 2496cdeab114STejun Heo * bus as we may be talking too fast. 2497cdeab114STejun Heo */ 2498cdeab114STejun Heo dev->pio_mode = XFER_PIO_0; 24995416912aSAaron Lu dev->dma_mode = 0xff; 2500cdeab114STejun Heo 2501cdeab114STejun Heo /* If the controller has a pio mode setup function 2502cdeab114STejun Heo * then use it to set the chipset to rights. Don't 2503cdeab114STejun Heo * touch the DMA setup as that will be dealt with when 2504cdeab114STejun Heo * configuring devices. 2505cdeab114STejun Heo */ 2506cdeab114STejun Heo if (ap->ops->set_piomode) 2507cdeab114STejun Heo ap->ops->set_piomode(ap, dev); 2508cdeab114STejun Heo } 2509cdeab114STejun Heo 2510cf480626STejun Heo /* prefer hardreset */ 2511932648b0STejun Heo reset = NULL; 2512cf480626STejun Heo ehc->i.action &= ~ATA_EH_RESET; 2513cf480626STejun Heo if (hardreset) { 2514cf480626STejun Heo reset = hardreset; 2515a674050eSTejun Heo ehc->i.action |= ATA_EH_HARDRESET; 25164f7faa3fSTejun Heo } else if (softreset) { 2517cf480626STejun Heo reset = softreset; 2518a674050eSTejun Heo ehc->i.action |= ATA_EH_SOFTRESET; 2519cf480626STejun Heo } 2520c6fd2807SJeff Garzik 2521c6fd2807SJeff Garzik if (prereset) { 2522b1c72916STejun Heo unsigned long deadline = ata_deadline(jiffies, 2523b1c72916STejun Heo ATA_EH_PRERESET_TIMEOUT); 2524b1c72916STejun Heo 2525b1c72916STejun Heo if (slave) { 2526b1c72916STejun Heo sehc->i.action &= ~ATA_EH_RESET; 2527b1c72916STejun Heo sehc->i.action |= ehc->i.action; 2528b1c72916STejun Heo } 2529b1c72916STejun Heo 2530b1c72916STejun Heo rc = prereset(link, deadline); 2531b1c72916STejun Heo 2532b1c72916STejun Heo /* If present, do prereset on slave link too. Reset 2533b1c72916STejun Heo * is skipped iff both master and slave links report 2534b1c72916STejun Heo * -ENOENT or clear ATA_EH_RESET. 2535b1c72916STejun Heo */ 2536b1c72916STejun Heo if (slave && (rc == 0 || rc == -ENOENT)) { 2537b1c72916STejun Heo int tmp; 2538b1c72916STejun Heo 2539b1c72916STejun Heo tmp = prereset(slave, deadline); 2540b1c72916STejun Heo if (tmp != -ENOENT) 2541b1c72916STejun Heo rc = tmp; 2542b1c72916STejun Heo 2543b1c72916STejun Heo ehc->i.action |= sehc->i.action; 2544b1c72916STejun Heo } 2545b1c72916STejun Heo 2546c6fd2807SJeff Garzik if (rc) { 2547c961922bSAlan Cox if (rc == -ENOENT) { 2548a9a79dfeSJoe Perches ata_link_dbg(link, "port disabled--ignoring\n"); 2549cf480626STejun Heo ehc->i.action &= ~ATA_EH_RESET; 25504aa9ab67STejun Heo 25511eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 2552f58229f8STejun Heo classes[dev->devno] = ATA_DEV_NONE; 25534aa9ab67STejun Heo 25544aa9ab67STejun Heo rc = 0; 2555c961922bSAlan Cox } else 2556a9a79dfeSJoe Perches ata_link_err(link, 2557a9a79dfeSJoe Perches "prereset failed (errno=%d)\n", 2558a9a79dfeSJoe Perches rc); 2559fccb6ea5STejun Heo goto out; 2560c6fd2807SJeff Garzik } 2561c6fd2807SJeff Garzik 2562932648b0STejun Heo /* prereset() might have cleared ATA_EH_RESET. If so, 2563d6515e6fSTejun Heo * bang classes, thaw and return. 2564932648b0STejun Heo */ 2565932648b0STejun Heo if (reset && !(ehc->i.action & ATA_EH_RESET)) { 25661eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 2567f58229f8STejun Heo classes[dev->devno] = ATA_DEV_NONE; 2568d6515e6fSTejun Heo if ((ap->pflags & ATA_PFLAG_FROZEN) && 2569d6515e6fSTejun Heo ata_is_host_link(link)) 2570d6515e6fSTejun Heo ata_eh_thaw_port(ap); 2571fccb6ea5STejun Heo rc = 0; 2572fccb6ea5STejun Heo goto out; 2573c6fd2807SJeff Garzik } 2574932648b0STejun Heo } 2575c6fd2807SJeff Garzik 2576c6fd2807SJeff Garzik retry: 2577932648b0STejun Heo /* 2578932648b0STejun Heo * Perform reset 2579932648b0STejun Heo */ 2580dc98c32cSTejun Heo if (ata_is_host_link(link)) 2581dc98c32cSTejun Heo ata_eh_freeze_port(ap); 2582dc98c32cSTejun Heo 2583341c2c95STejun Heo deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]); 258431daabdaSTejun Heo 2585932648b0STejun Heo if (reset) { 2586c6fd2807SJeff Garzik if (verbose) 2587a9a79dfeSJoe Perches ata_link_info(link, "%s resetting link\n", 2588c6fd2807SJeff Garzik reset == softreset ? "soft" : "hard"); 2589c6fd2807SJeff Garzik 2590c6fd2807SJeff Garzik /* mark that this EH session started with reset */ 259119b72321STejun Heo ehc->last_reset = jiffies; 25920d64a233STejun Heo if (reset == hardreset) 25930d64a233STejun Heo ehc->i.flags |= ATA_EHI_DID_HARDRESET; 25940d64a233STejun Heo else 25950d64a233STejun Heo ehc->i.flags |= ATA_EHI_DID_SOFTRESET; 2596c6fd2807SJeff Garzik 2597b1c72916STejun Heo rc = ata_do_reset(link, reset, classes, deadline, true); 2598b1c72916STejun Heo if (rc && rc != -EAGAIN) { 2599b1c72916STejun Heo failed_link = link; 26005dbfc9cbSTejun Heo goto fail; 2601b1c72916STejun Heo } 2602c6fd2807SJeff Garzik 2603b1c72916STejun Heo /* hardreset slave link if existent */ 2604b1c72916STejun Heo if (slave && reset == hardreset) { 2605b1c72916STejun Heo int tmp; 2606b1c72916STejun Heo 2607b1c72916STejun Heo if (verbose) 2608a9a79dfeSJoe Perches ata_link_info(slave, "hard resetting link\n"); 2609b1c72916STejun Heo 2610b1c72916STejun Heo ata_eh_about_to_do(slave, NULL, ATA_EH_RESET); 2611b1c72916STejun Heo tmp = ata_do_reset(slave, reset, classes, deadline, 2612b1c72916STejun Heo false); 2613b1c72916STejun Heo switch (tmp) { 2614b1c72916STejun Heo case -EAGAIN: 2615b1c72916STejun Heo rc = -EAGAIN; 2616b1c72916STejun Heo case 0: 2617b1c72916STejun Heo break; 2618b1c72916STejun Heo default: 2619b1c72916STejun Heo failed_link = slave; 2620b1c72916STejun Heo rc = tmp; 2621b1c72916STejun Heo goto fail; 2622b1c72916STejun Heo } 2623b1c72916STejun Heo } 2624b1c72916STejun Heo 2625b1c72916STejun Heo /* perform follow-up SRST if necessary */ 2626c6fd2807SJeff Garzik if (reset == hardreset && 2627e8411fbaSSergei Shtylyov ata_eh_followup_srst_needed(link, rc)) { 2628c6fd2807SJeff Garzik reset = softreset; 2629c6fd2807SJeff Garzik 2630c6fd2807SJeff Garzik if (!reset) { 2631a9a79dfeSJoe Perches ata_link_err(link, 2632a9a79dfeSJoe Perches "follow-up softreset required but no softreset available\n"); 2633b1c72916STejun Heo failed_link = link; 2634fccb6ea5STejun Heo rc = -EINVAL; 263508cf69d0STejun Heo goto fail; 2636c6fd2807SJeff Garzik } 2637c6fd2807SJeff Garzik 2638cf480626STejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2639b1c72916STejun Heo rc = ata_do_reset(link, reset, classes, deadline, true); 2640fe2c4d01STejun Heo if (rc) { 2641fe2c4d01STejun Heo failed_link = link; 2642fe2c4d01STejun Heo goto fail; 2643fe2c4d01STejun Heo } 2644c6fd2807SJeff Garzik } 2645932648b0STejun Heo } else { 2646932648b0STejun Heo if (verbose) 2647a9a79dfeSJoe Perches ata_link_info(link, 2648a9a79dfeSJoe Perches "no reset method available, skipping reset\n"); 2649932648b0STejun Heo if (!(lflags & ATA_LFLAG_ASSUME_CLASS)) 2650932648b0STejun Heo lflags |= ATA_LFLAG_ASSUME_ATA; 2651932648b0STejun Heo } 2652008a7896STejun Heo 2653932648b0STejun Heo /* 2654932648b0STejun Heo * Post-reset processing 2655932648b0STejun Heo */ 26561eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 2657416dc9edSTejun Heo /* After the reset, the device state is PIO 0 and the 2658416dc9edSTejun Heo * controller state is undefined. Reset also wakes up 2659416dc9edSTejun Heo * drives from sleeping mode. 2660c6fd2807SJeff Garzik */ 2661f58229f8STejun Heo dev->pio_mode = XFER_PIO_0; 2662054a5fbaSTejun Heo dev->flags &= ~ATA_DFLAG_SLEEPING; 2663c6fd2807SJeff Garzik 26643b761d3dSTejun Heo if (ata_phys_link_offline(ata_dev_phys_link(dev))) 26653b761d3dSTejun Heo continue; 26663b761d3dSTejun Heo 26674ccd3329STejun Heo /* apply class override */ 2668416dc9edSTejun Heo if (lflags & ATA_LFLAG_ASSUME_ATA) 2669ae791c05STejun Heo classes[dev->devno] = ATA_DEV_ATA; 2670416dc9edSTejun Heo else if (lflags & ATA_LFLAG_ASSUME_SEMB) 2671816ab897STejun Heo classes[dev->devno] = ATA_DEV_SEMB_UNSUP; 2672ae791c05STejun Heo } 2673ae791c05STejun Heo 2674008a7896STejun Heo /* record current link speed */ 2675936fd732STejun Heo if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0) 2676936fd732STejun Heo link->sata_spd = (sstatus >> 4) & 0xf; 2677b1c72916STejun Heo if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0) 2678b1c72916STejun Heo slave->sata_spd = (sstatus >> 4) & 0xf; 2679008a7896STejun Heo 2680dc98c32cSTejun Heo /* thaw the port */ 2681dc98c32cSTejun Heo if (ata_is_host_link(link)) 2682dc98c32cSTejun Heo ata_eh_thaw_port(ap); 2683dc98c32cSTejun Heo 2684f046519fSTejun Heo /* postreset() should clear hardware SError. Although SError 2685f046519fSTejun Heo * is cleared during link resume, clearing SError here is 2686f046519fSTejun Heo * necessary as some PHYs raise hotplug events after SRST. 2687f046519fSTejun Heo * This introduces race condition where hotplug occurs between 2688f046519fSTejun Heo * reset and here. This race is mediated by cross checking 2689f046519fSTejun Heo * link onlineness and classification result later. 2690f046519fSTejun Heo */ 2691b1c72916STejun Heo if (postreset) { 2692cc0680a5STejun Heo postreset(link, classes); 2693b1c72916STejun Heo if (slave) 2694b1c72916STejun Heo postreset(slave, classes); 2695b1c72916STejun Heo } 2696c6fd2807SJeff Garzik 26971e641060STejun Heo /* 26988c56caccSTejun Heo * Some controllers can't be frozen very well and may set spurious 26998c56caccSTejun Heo * error conditions during reset. Clear accumulated error 27008c56caccSTejun Heo * information and re-thaw the port if frozen. As reset is the 27018c56caccSTejun Heo * final recovery action and we cross check link onlineness against 27028c56caccSTejun Heo * device classification later, no hotplug event is lost by this. 27031e641060STejun Heo */ 2704f046519fSTejun Heo spin_lock_irqsave(link->ap->lock, flags); 27051e641060STejun Heo memset(&link->eh_info, 0, sizeof(link->eh_info)); 2706b1c72916STejun Heo if (slave) 27071e641060STejun Heo memset(&slave->eh_info, 0, sizeof(link->eh_info)); 27081e641060STejun Heo ap->pflags &= ~ATA_PFLAG_EH_PENDING; 2709f046519fSTejun Heo spin_unlock_irqrestore(link->ap->lock, flags); 2710f046519fSTejun Heo 27118c56caccSTejun Heo if (ap->pflags & ATA_PFLAG_FROZEN) 27128c56caccSTejun Heo ata_eh_thaw_port(ap); 27138c56caccSTejun Heo 27143b761d3dSTejun Heo /* 27153b761d3dSTejun Heo * Make sure onlineness and classification result correspond. 2716f046519fSTejun Heo * Hotplug could have happened during reset and some 2717f046519fSTejun Heo * controllers fail to wait while a drive is spinning up after 2718f046519fSTejun Heo * being hotplugged causing misdetection. By cross checking 27193b761d3dSTejun Heo * link on/offlineness and classification result, those 27203b761d3dSTejun Heo * conditions can be reliably detected and retried. 2721f046519fSTejun Heo */ 2722b1c72916STejun Heo nr_unknown = 0; 27231eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 27243b761d3dSTejun Heo if (ata_phys_link_online(ata_dev_phys_link(dev))) { 2725b1c72916STejun Heo if (classes[dev->devno] == ATA_DEV_UNKNOWN) { 2726a9a79dfeSJoe Perches ata_dev_dbg(dev, "link online but device misclassified\n"); 2727f046519fSTejun Heo classes[dev->devno] = ATA_DEV_NONE; 2728b1c72916STejun Heo nr_unknown++; 2729b1c72916STejun Heo } 27303b761d3dSTejun Heo } else if (ata_phys_link_offline(ata_dev_phys_link(dev))) { 27313b761d3dSTejun Heo if (ata_class_enabled(classes[dev->devno])) 2732a9a79dfeSJoe Perches ata_dev_dbg(dev, 2733a9a79dfeSJoe Perches "link offline, clearing class %d to NONE\n", 27343b761d3dSTejun Heo classes[dev->devno]); 27353b761d3dSTejun Heo classes[dev->devno] = ATA_DEV_NONE; 27363b761d3dSTejun Heo } else if (classes[dev->devno] == ATA_DEV_UNKNOWN) { 2737a9a79dfeSJoe Perches ata_dev_dbg(dev, 2738a9a79dfeSJoe Perches "link status unknown, clearing UNKNOWN to NONE\n"); 27393b761d3dSTejun Heo classes[dev->devno] = ATA_DEV_NONE; 27403b761d3dSTejun Heo } 2741f046519fSTejun Heo } 2742f046519fSTejun Heo 2743b1c72916STejun Heo if (classify && nr_unknown) { 2744f046519fSTejun Heo if (try < max_tries) { 2745a9a79dfeSJoe Perches ata_link_warn(link, 2746a9a79dfeSJoe Perches "link online but %d devices misclassified, retrying\n", 27473b761d3dSTejun Heo nr_unknown); 2748b1c72916STejun Heo failed_link = link; 2749f046519fSTejun Heo rc = -EAGAIN; 2750f046519fSTejun Heo goto fail; 2751f046519fSTejun Heo } 2752a9a79dfeSJoe Perches ata_link_warn(link, 27533b761d3dSTejun Heo "link online but %d devices misclassified, " 27543b761d3dSTejun Heo "device detection might fail\n", nr_unknown); 2755f046519fSTejun Heo } 2756f046519fSTejun Heo 2757c6fd2807SJeff Garzik /* reset successful, schedule revalidation */ 2758cf480626STejun Heo ata_eh_done(link, NULL, ATA_EH_RESET); 2759b1c72916STejun Heo if (slave) 2760b1c72916STejun Heo ata_eh_done(slave, NULL, ATA_EH_RESET); 276119b72321STejun Heo ehc->last_reset = jiffies; /* update to completion time */ 2762c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_REVALIDATE; 27636b7ae954STejun Heo link->lpm_policy = ATA_LPM_UNKNOWN; /* reset LPM state */ 2764416dc9edSTejun Heo 2765416dc9edSTejun Heo rc = 0; 2766fccb6ea5STejun Heo out: 2767fccb6ea5STejun Heo /* clear hotplug flag */ 2768fccb6ea5STejun Heo ehc->i.flags &= ~ATA_EHI_HOTPLUGGED; 2769b1c72916STejun Heo if (slave) 2770b1c72916STejun Heo sehc->i.flags &= ~ATA_EHI_HOTPLUGGED; 2771afaa5c37STejun Heo 2772afaa5c37STejun Heo spin_lock_irqsave(ap->lock, flags); 2773afaa5c37STejun Heo ap->pflags &= ~ATA_PFLAG_RESETTING; 2774afaa5c37STejun Heo spin_unlock_irqrestore(ap->lock, flags); 2775afaa5c37STejun Heo 2776c6fd2807SJeff Garzik return rc; 2777416dc9edSTejun Heo 2778416dc9edSTejun Heo fail: 27795958e302STejun Heo /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */ 27805958e302STejun Heo if (!ata_is_host_link(link) && 27815958e302STejun Heo sata_scr_read(link, SCR_STATUS, &sstatus)) 27825958e302STejun Heo rc = -ERESTART; 27835958e302STejun Heo 27847a46c078SGwendal Grignou if (try >= max_tries) { 27858ea7645cSTejun Heo /* 27868ea7645cSTejun Heo * Thaw host port even if reset failed, so that the port 27878ea7645cSTejun Heo * can be retried on the next phy event. This risks 27888ea7645cSTejun Heo * repeated EH runs but seems to be a better tradeoff than 27898ea7645cSTejun Heo * shutting down a port after a botched hotplug attempt. 27908ea7645cSTejun Heo */ 27918ea7645cSTejun Heo if (ata_is_host_link(link)) 27928ea7645cSTejun Heo ata_eh_thaw_port(ap); 2793416dc9edSTejun Heo goto out; 27948ea7645cSTejun Heo } 2795416dc9edSTejun Heo 2796416dc9edSTejun Heo now = jiffies; 2797416dc9edSTejun Heo if (time_before(now, deadline)) { 2798416dc9edSTejun Heo unsigned long delta = deadline - now; 2799416dc9edSTejun Heo 2800a9a79dfeSJoe Perches ata_link_warn(failed_link, 28010a2c0f56STejun Heo "reset failed (errno=%d), retrying in %u secs\n", 28020a2c0f56STejun Heo rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000)); 2803416dc9edSTejun Heo 2804c0c362b6STejun Heo ata_eh_release(ap); 2805416dc9edSTejun Heo while (delta) 2806416dc9edSTejun Heo delta = schedule_timeout_uninterruptible(delta); 2807c0c362b6STejun Heo ata_eh_acquire(ap); 2808416dc9edSTejun Heo } 2809416dc9edSTejun Heo 28107a46c078SGwendal Grignou /* 28117a46c078SGwendal Grignou * While disks spinup behind PMP, some controllers fail sending SRST. 28127a46c078SGwendal Grignou * They need to be reset - as well as the PMP - before retrying. 28137a46c078SGwendal Grignou */ 28147a46c078SGwendal Grignou if (rc == -ERESTART) { 28157a46c078SGwendal Grignou if (ata_is_host_link(link)) 28167a46c078SGwendal Grignou ata_eh_thaw_port(ap); 28177a46c078SGwendal Grignou goto out; 28187a46c078SGwendal Grignou } 28197a46c078SGwendal Grignou 2820b1c72916STejun Heo if (try == max_tries - 1) { 2821a07d499bSTejun Heo sata_down_spd_limit(link, 0); 2822b1c72916STejun Heo if (slave) 2823a07d499bSTejun Heo sata_down_spd_limit(slave, 0); 2824b1c72916STejun Heo } else if (rc == -EPIPE) 2825a07d499bSTejun Heo sata_down_spd_limit(failed_link, 0); 2826b1c72916STejun Heo 2827416dc9edSTejun Heo if (hardreset) 2828416dc9edSTejun Heo reset = hardreset; 2829416dc9edSTejun Heo goto retry; 2830c6fd2807SJeff Garzik } 2831c6fd2807SJeff Garzik 283245fabbb7SElias Oltmanns static inline void ata_eh_pull_park_action(struct ata_port *ap) 283345fabbb7SElias Oltmanns { 283445fabbb7SElias Oltmanns struct ata_link *link; 283545fabbb7SElias Oltmanns struct ata_device *dev; 283645fabbb7SElias Oltmanns unsigned long flags; 283745fabbb7SElias Oltmanns 283845fabbb7SElias Oltmanns /* 283945fabbb7SElias Oltmanns * This function can be thought of as an extended version of 284045fabbb7SElias Oltmanns * ata_eh_about_to_do() specially crafted to accommodate the 284145fabbb7SElias Oltmanns * requirements of ATA_EH_PARK handling. Since the EH thread 284245fabbb7SElias Oltmanns * does not leave the do {} while () loop in ata_eh_recover as 284345fabbb7SElias Oltmanns * long as the timeout for a park request to *one* device on 284445fabbb7SElias Oltmanns * the port has not expired, and since we still want to pick 284545fabbb7SElias Oltmanns * up park requests to other devices on the same port or 284645fabbb7SElias Oltmanns * timeout updates for the same device, we have to pull 284745fabbb7SElias Oltmanns * ATA_EH_PARK actions from eh_info into eh_context.i 284845fabbb7SElias Oltmanns * ourselves at the beginning of each pass over the loop. 284945fabbb7SElias Oltmanns * 285045fabbb7SElias Oltmanns * Additionally, all write accesses to &ap->park_req_pending 285116735d02SWolfram Sang * through reinit_completion() (see below) or complete_all() 285245fabbb7SElias Oltmanns * (see ata_scsi_park_store()) are protected by the host lock. 285345fabbb7SElias Oltmanns * As a result we have that park_req_pending.done is zero on 285445fabbb7SElias Oltmanns * exit from this function, i.e. when ATA_EH_PARK actions for 285545fabbb7SElias Oltmanns * *all* devices on port ap have been pulled into the 285645fabbb7SElias Oltmanns * respective eh_context structs. If, and only if, 285745fabbb7SElias Oltmanns * park_req_pending.done is non-zero by the time we reach 285845fabbb7SElias Oltmanns * wait_for_completion_timeout(), another ATA_EH_PARK action 285945fabbb7SElias Oltmanns * has been scheduled for at least one of the devices on port 286045fabbb7SElias Oltmanns * ap and we have to cycle over the do {} while () loop in 286145fabbb7SElias Oltmanns * ata_eh_recover() again. 286245fabbb7SElias Oltmanns */ 286345fabbb7SElias Oltmanns 286445fabbb7SElias Oltmanns spin_lock_irqsave(ap->lock, flags); 286516735d02SWolfram Sang reinit_completion(&ap->park_req_pending); 28661eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 28671eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 286845fabbb7SElias Oltmanns struct ata_eh_info *ehi = &link->eh_info; 286945fabbb7SElias Oltmanns 287045fabbb7SElias Oltmanns link->eh_context.i.dev_action[dev->devno] |= 287145fabbb7SElias Oltmanns ehi->dev_action[dev->devno] & ATA_EH_PARK; 287245fabbb7SElias Oltmanns ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK); 287345fabbb7SElias Oltmanns } 287445fabbb7SElias Oltmanns } 287545fabbb7SElias Oltmanns spin_unlock_irqrestore(ap->lock, flags); 287645fabbb7SElias Oltmanns } 287745fabbb7SElias Oltmanns 287845fabbb7SElias Oltmanns static void ata_eh_park_issue_cmd(struct ata_device *dev, int park) 287945fabbb7SElias Oltmanns { 288045fabbb7SElias Oltmanns struct ata_eh_context *ehc = &dev->link->eh_context; 288145fabbb7SElias Oltmanns struct ata_taskfile tf; 288245fabbb7SElias Oltmanns unsigned int err_mask; 288345fabbb7SElias Oltmanns 288445fabbb7SElias Oltmanns ata_tf_init(dev, &tf); 288545fabbb7SElias Oltmanns if (park) { 288645fabbb7SElias Oltmanns ehc->unloaded_mask |= 1 << dev->devno; 288745fabbb7SElias Oltmanns tf.command = ATA_CMD_IDLEIMMEDIATE; 288845fabbb7SElias Oltmanns tf.feature = 0x44; 288945fabbb7SElias Oltmanns tf.lbal = 0x4c; 289045fabbb7SElias Oltmanns tf.lbam = 0x4e; 289145fabbb7SElias Oltmanns tf.lbah = 0x55; 289245fabbb7SElias Oltmanns } else { 289345fabbb7SElias Oltmanns ehc->unloaded_mask &= ~(1 << dev->devno); 289445fabbb7SElias Oltmanns tf.command = ATA_CMD_CHK_POWER; 289545fabbb7SElias Oltmanns } 289645fabbb7SElias Oltmanns 289745fabbb7SElias Oltmanns tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 2898bd18bc04SHannes Reinecke tf.protocol = ATA_PROT_NODATA; 289945fabbb7SElias Oltmanns err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 290045fabbb7SElias Oltmanns if (park && (err_mask || tf.lbal != 0xc4)) { 2901a9a79dfeSJoe Perches ata_dev_err(dev, "head unload failed!\n"); 290245fabbb7SElias Oltmanns ehc->unloaded_mask &= ~(1 << dev->devno); 290345fabbb7SElias Oltmanns } 290445fabbb7SElias Oltmanns } 290545fabbb7SElias Oltmanns 29060260731fSTejun Heo static int ata_eh_revalidate_and_attach(struct ata_link *link, 2907c6fd2807SJeff Garzik struct ata_device **r_failed_dev) 2908c6fd2807SJeff Garzik { 29090260731fSTejun Heo struct ata_port *ap = link->ap; 29100260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 2911c6fd2807SJeff Garzik struct ata_device *dev; 29128c3c52a8STejun Heo unsigned int new_mask = 0; 2913c6fd2807SJeff Garzik unsigned long flags; 2914f58229f8STejun Heo int rc = 0; 2915c6fd2807SJeff Garzik 2916c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 2917c6fd2807SJeff Garzik 29188c3c52a8STejun Heo /* For PATA drive side cable detection to work, IDENTIFY must 29198c3c52a8STejun Heo * be done backwards such that PDIAG- is released by the slave 29208c3c52a8STejun Heo * device before the master device is identified. 29218c3c52a8STejun Heo */ 29221eca4365STejun Heo ata_for_each_dev(dev, link, ALL_REVERSE) { 2923f58229f8STejun Heo unsigned int action = ata_eh_dev_action(dev); 2924f58229f8STejun Heo unsigned int readid_flags = 0; 2925c6fd2807SJeff Garzik 2926bff04647STejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) 2927bff04647STejun Heo readid_flags |= ATA_READID_POSTRESET; 2928bff04647STejun Heo 29299666f400STejun Heo if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) { 2930633273a3STejun Heo WARN_ON(dev->class == ATA_DEV_PMP); 2931633273a3STejun Heo 2932b1c72916STejun Heo if (ata_phys_link_offline(ata_dev_phys_link(dev))) { 2933c6fd2807SJeff Garzik rc = -EIO; 29348c3c52a8STejun Heo goto err; 2935c6fd2807SJeff Garzik } 2936c6fd2807SJeff Garzik 29370260731fSTejun Heo ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE); 2938422c9daaSTejun Heo rc = ata_dev_revalidate(dev, ehc->classes[dev->devno], 2939422c9daaSTejun Heo readid_flags); 2940c6fd2807SJeff Garzik if (rc) 29418c3c52a8STejun Heo goto err; 2942c6fd2807SJeff Garzik 29430260731fSTejun Heo ata_eh_done(link, dev, ATA_EH_REVALIDATE); 2944c6fd2807SJeff Garzik 2945baa1e78aSTejun Heo /* Configuration may have changed, reconfigure 2946baa1e78aSTejun Heo * transfer mode. 2947baa1e78aSTejun Heo */ 2948baa1e78aSTejun Heo ehc->i.flags |= ATA_EHI_SETMODE; 2949baa1e78aSTejun Heo 2950c6fd2807SJeff Garzik /* schedule the scsi_rescan_device() here */ 2951ad72cf98STejun Heo schedule_work(&(ap->scsi_rescan_task)); 2952c6fd2807SJeff Garzik } else if (dev->class == ATA_DEV_UNKNOWN && 2953c6fd2807SJeff Garzik ehc->tries[dev->devno] && 2954c6fd2807SJeff Garzik ata_class_enabled(ehc->classes[dev->devno])) { 2955842faa6cSTejun Heo /* Temporarily set dev->class, it will be 2956842faa6cSTejun Heo * permanently set once all configurations are 2957842faa6cSTejun Heo * complete. This is necessary because new 2958842faa6cSTejun Heo * device configuration is done in two 2959842faa6cSTejun Heo * separate loops. 2960842faa6cSTejun Heo */ 2961c6fd2807SJeff Garzik dev->class = ehc->classes[dev->devno]; 2962c6fd2807SJeff Garzik 2963633273a3STejun Heo if (dev->class == ATA_DEV_PMP) 2964633273a3STejun Heo rc = sata_pmp_attach(dev); 2965633273a3STejun Heo else 2966633273a3STejun Heo rc = ata_dev_read_id(dev, &dev->class, 2967633273a3STejun Heo readid_flags, dev->id); 2968842faa6cSTejun Heo 2969842faa6cSTejun Heo /* read_id might have changed class, store and reset */ 2970842faa6cSTejun Heo ehc->classes[dev->devno] = dev->class; 2971842faa6cSTejun Heo dev->class = ATA_DEV_UNKNOWN; 2972842faa6cSTejun Heo 29738c3c52a8STejun Heo switch (rc) { 29748c3c52a8STejun Heo case 0: 297599cf610aSTejun Heo /* clear error info accumulated during probe */ 297699cf610aSTejun Heo ata_ering_clear(&dev->ering); 2977f58229f8STejun Heo new_mask |= 1 << dev->devno; 29788c3c52a8STejun Heo break; 29798c3c52a8STejun Heo case -ENOENT: 298055a8e2c8STejun Heo /* IDENTIFY was issued to non-existent 298155a8e2c8STejun Heo * device. No need to reset. Just 2982842faa6cSTejun Heo * thaw and ignore the device. 298355a8e2c8STejun Heo */ 298455a8e2c8STejun Heo ata_eh_thaw_port(ap); 2985c6fd2807SJeff Garzik break; 29868c3c52a8STejun Heo default: 29878c3c52a8STejun Heo goto err; 29888c3c52a8STejun Heo } 29898c3c52a8STejun Heo } 2990c6fd2807SJeff Garzik } 2991c6fd2807SJeff Garzik 2992c1c4e8d5STejun Heo /* PDIAG- should have been released, ask cable type if post-reset */ 299333267325STejun Heo if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) { 299433267325STejun Heo if (ap->ops->cable_detect) 2995c1c4e8d5STejun Heo ap->cbl = ap->ops->cable_detect(ap); 299633267325STejun Heo ata_force_cbl(ap); 299733267325STejun Heo } 2998c1c4e8d5STejun Heo 29998c3c52a8STejun Heo /* Configure new devices forward such that user doesn't see 30008c3c52a8STejun Heo * device detection messages backwards. 30018c3c52a8STejun Heo */ 30021eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 30034f7c2874STejun Heo if (!(new_mask & (1 << dev->devno))) 30048c3c52a8STejun Heo continue; 30058c3c52a8STejun Heo 3006842faa6cSTejun Heo dev->class = ehc->classes[dev->devno]; 3007842faa6cSTejun Heo 30084f7c2874STejun Heo if (dev->class == ATA_DEV_PMP) 30094f7c2874STejun Heo continue; 30104f7c2874STejun Heo 30118c3c52a8STejun Heo ehc->i.flags |= ATA_EHI_PRINTINFO; 30128c3c52a8STejun Heo rc = ata_dev_configure(dev); 30138c3c52a8STejun Heo ehc->i.flags &= ~ATA_EHI_PRINTINFO; 3014842faa6cSTejun Heo if (rc) { 3015842faa6cSTejun Heo dev->class = ATA_DEV_UNKNOWN; 30168c3c52a8STejun Heo goto err; 3017842faa6cSTejun Heo } 30188c3c52a8STejun Heo 3019c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 3020c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 3021c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3022baa1e78aSTejun Heo 302355a8e2c8STejun Heo /* new device discovered, configure xfermode */ 3024baa1e78aSTejun Heo ehc->i.flags |= ATA_EHI_SETMODE; 3025c6fd2807SJeff Garzik } 3026c6fd2807SJeff Garzik 30278c3c52a8STejun Heo return 0; 30288c3c52a8STejun Heo 30298c3c52a8STejun Heo err: 3030c6fd2807SJeff Garzik *r_failed_dev = dev; 30318c3c52a8STejun Heo DPRINTK("EXIT rc=%d\n", rc); 3032c6fd2807SJeff Garzik return rc; 3033c6fd2807SJeff Garzik } 3034c6fd2807SJeff Garzik 30356f1d1e3aSTejun Heo /** 30366f1d1e3aSTejun Heo * ata_set_mode - Program timings and issue SET FEATURES - XFER 30376f1d1e3aSTejun Heo * @link: link on which timings will be programmed 303898a1708dSMartin Olsson * @r_failed_dev: out parameter for failed device 30396f1d1e3aSTejun Heo * 30406f1d1e3aSTejun Heo * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If 30416f1d1e3aSTejun Heo * ata_set_mode() fails, pointer to the failing device is 30426f1d1e3aSTejun Heo * returned in @r_failed_dev. 30436f1d1e3aSTejun Heo * 30446f1d1e3aSTejun Heo * LOCKING: 30456f1d1e3aSTejun Heo * PCI/etc. bus probe sem. 30466f1d1e3aSTejun Heo * 30476f1d1e3aSTejun Heo * RETURNS: 30486f1d1e3aSTejun Heo * 0 on success, negative errno otherwise 30496f1d1e3aSTejun Heo */ 30506f1d1e3aSTejun Heo int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) 30516f1d1e3aSTejun Heo { 30526f1d1e3aSTejun Heo struct ata_port *ap = link->ap; 305300115e0fSTejun Heo struct ata_device *dev; 305400115e0fSTejun Heo int rc; 30556f1d1e3aSTejun Heo 305676326ac1STejun Heo /* if data transfer is verified, clear DUBIOUS_XFER on ering top */ 30571eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) { 305876326ac1STejun Heo if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) { 305976326ac1STejun Heo struct ata_ering_entry *ent; 306076326ac1STejun Heo 306176326ac1STejun Heo ent = ata_ering_top(&dev->ering); 306276326ac1STejun Heo if (ent) 306376326ac1STejun Heo ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER; 306476326ac1STejun Heo } 306576326ac1STejun Heo } 306676326ac1STejun Heo 30676f1d1e3aSTejun Heo /* has private set_mode? */ 30686f1d1e3aSTejun Heo if (ap->ops->set_mode) 306900115e0fSTejun Heo rc = ap->ops->set_mode(link, r_failed_dev); 307000115e0fSTejun Heo else 307100115e0fSTejun Heo rc = ata_do_set_mode(link, r_failed_dev); 307200115e0fSTejun Heo 307300115e0fSTejun Heo /* if transfer mode has changed, set DUBIOUS_XFER on device */ 30741eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) { 307500115e0fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 307600115e0fSTejun Heo u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno]; 307700115e0fSTejun Heo u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno)); 307800115e0fSTejun Heo 307900115e0fSTejun Heo if (dev->xfer_mode != saved_xfer_mode || 308000115e0fSTejun Heo ata_ncq_enabled(dev) != saved_ncq) 308100115e0fSTejun Heo dev->flags |= ATA_DFLAG_DUBIOUS_XFER; 308200115e0fSTejun Heo } 308300115e0fSTejun Heo 308400115e0fSTejun Heo return rc; 30856f1d1e3aSTejun Heo } 30866f1d1e3aSTejun Heo 308711fc33daSTejun Heo /** 308811fc33daSTejun Heo * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset 308911fc33daSTejun Heo * @dev: ATAPI device to clear UA for 309011fc33daSTejun Heo * 309111fc33daSTejun Heo * Resets and other operations can make an ATAPI device raise 309211fc33daSTejun Heo * UNIT ATTENTION which causes the next operation to fail. This 309311fc33daSTejun Heo * function clears UA. 309411fc33daSTejun Heo * 309511fc33daSTejun Heo * LOCKING: 309611fc33daSTejun Heo * EH context (may sleep). 309711fc33daSTejun Heo * 309811fc33daSTejun Heo * RETURNS: 309911fc33daSTejun Heo * 0 on success, -errno on failure. 310011fc33daSTejun Heo */ 310111fc33daSTejun Heo static int atapi_eh_clear_ua(struct ata_device *dev) 310211fc33daSTejun Heo { 310311fc33daSTejun Heo int i; 310411fc33daSTejun Heo 310511fc33daSTejun Heo for (i = 0; i < ATA_EH_UA_TRIES; i++) { 3106b5357081STejun Heo u8 *sense_buffer = dev->link->ap->sector_buf; 310711fc33daSTejun Heo u8 sense_key = 0; 310811fc33daSTejun Heo unsigned int err_mask; 310911fc33daSTejun Heo 311011fc33daSTejun Heo err_mask = atapi_eh_tur(dev, &sense_key); 311111fc33daSTejun Heo if (err_mask != 0 && err_mask != AC_ERR_DEV) { 3112a9a79dfeSJoe Perches ata_dev_warn(dev, 3113a9a79dfeSJoe Perches "TEST_UNIT_READY failed (err_mask=0x%x)\n", 3114a9a79dfeSJoe Perches err_mask); 311511fc33daSTejun Heo return -EIO; 311611fc33daSTejun Heo } 311711fc33daSTejun Heo 311811fc33daSTejun Heo if (!err_mask || sense_key != UNIT_ATTENTION) 311911fc33daSTejun Heo return 0; 312011fc33daSTejun Heo 312111fc33daSTejun Heo err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key); 312211fc33daSTejun Heo if (err_mask) { 3123a9a79dfeSJoe Perches ata_dev_warn(dev, "failed to clear " 312411fc33daSTejun Heo "UNIT ATTENTION (err_mask=0x%x)\n", err_mask); 312511fc33daSTejun Heo return -EIO; 312611fc33daSTejun Heo } 312711fc33daSTejun Heo } 312811fc33daSTejun Heo 3129a9a79dfeSJoe Perches ata_dev_warn(dev, "UNIT ATTENTION persists after %d tries\n", 3130a9a79dfeSJoe Perches ATA_EH_UA_TRIES); 313111fc33daSTejun Heo 313211fc33daSTejun Heo return 0; 313311fc33daSTejun Heo } 313411fc33daSTejun Heo 31356013efd8STejun Heo /** 31366013efd8STejun Heo * ata_eh_maybe_retry_flush - Retry FLUSH if necessary 31376013efd8STejun Heo * @dev: ATA device which may need FLUSH retry 31386013efd8STejun Heo * 31396013efd8STejun Heo * If @dev failed FLUSH, it needs to be reported upper layer 31406013efd8STejun Heo * immediately as it means that @dev failed to remap and already 31416013efd8STejun Heo * lost at least a sector and further FLUSH retrials won't make 31426013efd8STejun Heo * any difference to the lost sector. However, if FLUSH failed 31436013efd8STejun Heo * for other reasons, for example transmission error, FLUSH needs 31446013efd8STejun Heo * to be retried. 31456013efd8STejun Heo * 31466013efd8STejun Heo * This function determines whether FLUSH failure retry is 31476013efd8STejun Heo * necessary and performs it if so. 31486013efd8STejun Heo * 31496013efd8STejun Heo * RETURNS: 31506013efd8STejun Heo * 0 if EH can continue, -errno if EH needs to be repeated. 31516013efd8STejun Heo */ 31526013efd8STejun Heo static int ata_eh_maybe_retry_flush(struct ata_device *dev) 31536013efd8STejun Heo { 31546013efd8STejun Heo struct ata_link *link = dev->link; 31556013efd8STejun Heo struct ata_port *ap = link->ap; 31566013efd8STejun Heo struct ata_queued_cmd *qc; 31576013efd8STejun Heo struct ata_taskfile tf; 31586013efd8STejun Heo unsigned int err_mask; 31596013efd8STejun Heo int rc = 0; 31606013efd8STejun Heo 31616013efd8STejun Heo /* did flush fail for this device? */ 31626013efd8STejun Heo if (!ata_tag_valid(link->active_tag)) 31636013efd8STejun Heo return 0; 31646013efd8STejun Heo 31656013efd8STejun Heo qc = __ata_qc_from_tag(ap, link->active_tag); 31666013efd8STejun Heo if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT && 31676013efd8STejun Heo qc->tf.command != ATA_CMD_FLUSH)) 31686013efd8STejun Heo return 0; 31696013efd8STejun Heo 31706013efd8STejun Heo /* if the device failed it, it should be reported to upper layers */ 31716013efd8STejun Heo if (qc->err_mask & AC_ERR_DEV) 31726013efd8STejun Heo return 0; 31736013efd8STejun Heo 31746013efd8STejun Heo /* flush failed for some other reason, give it another shot */ 31756013efd8STejun Heo ata_tf_init(dev, &tf); 31766013efd8STejun Heo 31776013efd8STejun Heo tf.command = qc->tf.command; 31786013efd8STejun Heo tf.flags |= ATA_TFLAG_DEVICE; 31796013efd8STejun Heo tf.protocol = ATA_PROT_NODATA; 31806013efd8STejun Heo 3181a9a79dfeSJoe Perches ata_dev_warn(dev, "retrying FLUSH 0x%x Emask 0x%x\n", 31826013efd8STejun Heo tf.command, qc->err_mask); 31836013efd8STejun Heo 31846013efd8STejun Heo err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 31856013efd8STejun Heo if (!err_mask) { 31866013efd8STejun Heo /* 31876013efd8STejun Heo * FLUSH is complete but there's no way to 31886013efd8STejun Heo * successfully complete a failed command from EH. 31896013efd8STejun Heo * Making sure retry is allowed at least once and 31906013efd8STejun Heo * retrying it should do the trick - whatever was in 31916013efd8STejun Heo * the cache is already on the platter and this won't 31926013efd8STejun Heo * cause infinite loop. 31936013efd8STejun Heo */ 31946013efd8STejun Heo qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1); 31956013efd8STejun Heo } else { 3196a9a79dfeSJoe Perches ata_dev_warn(dev, "FLUSH failed Emask 0x%x\n", 31976013efd8STejun Heo err_mask); 31986013efd8STejun Heo rc = -EIO; 31996013efd8STejun Heo 32006013efd8STejun Heo /* if device failed it, report it to upper layers */ 32016013efd8STejun Heo if (err_mask & AC_ERR_DEV) { 32026013efd8STejun Heo qc->err_mask |= AC_ERR_DEV; 32036013efd8STejun Heo qc->result_tf = tf; 32046013efd8STejun Heo if (!(ap->pflags & ATA_PFLAG_FROZEN)) 32056013efd8STejun Heo rc = 0; 32066013efd8STejun Heo } 32076013efd8STejun Heo } 32086013efd8STejun Heo return rc; 32096013efd8STejun Heo } 32106013efd8STejun Heo 32116b7ae954STejun Heo /** 32126b7ae954STejun Heo * ata_eh_set_lpm - configure SATA interface power management 32136b7ae954STejun Heo * @link: link to configure power management 32146b7ae954STejun Heo * @policy: the link power management policy 32156b7ae954STejun Heo * @r_failed_dev: out parameter for failed device 32166b7ae954STejun Heo * 32176b7ae954STejun Heo * Enable SATA Interface power management. This will enable 3218f4ac6476SHans de Goede * Device Interface Power Management (DIPM) for min_power and 3219f4ac6476SHans de Goede * medium_power_with_dipm policies, and then call driver specific 3220f4ac6476SHans de Goede * callbacks for enabling Host Initiated Power management. 32216b7ae954STejun Heo * 32226b7ae954STejun Heo * LOCKING: 32236b7ae954STejun Heo * EH context. 32246b7ae954STejun Heo * 32256b7ae954STejun Heo * RETURNS: 32266b7ae954STejun Heo * 0 on success, -errno on failure. 32276b7ae954STejun Heo */ 32286b7ae954STejun Heo static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, 32296b7ae954STejun Heo struct ata_device **r_failed_dev) 32306b7ae954STejun Heo { 32316c8ea89cSTejun Heo struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL; 32326b7ae954STejun Heo struct ata_eh_context *ehc = &link->eh_context; 32336b7ae954STejun Heo struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL; 3234e5005b15STejun Heo enum ata_lpm_policy old_policy = link->lpm_policy; 32355f6f12ccSTejun Heo bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM; 32366b7ae954STejun Heo unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM; 32376b7ae954STejun Heo unsigned int err_mask; 32386b7ae954STejun Heo int rc; 32396b7ae954STejun Heo 32406b7ae954STejun Heo /* if the link or host doesn't do LPM, noop */ 32414c9029e7SBartlomiej Zolnierkiewicz if (!IS_ENABLED(CONFIG_SATA_HOST) || 32424c9029e7SBartlomiej Zolnierkiewicz (link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm)) 32436b7ae954STejun Heo return 0; 32446b7ae954STejun Heo 32456b7ae954STejun Heo /* 32466b7ae954STejun Heo * DIPM is enabled only for MIN_POWER as some devices 32476b7ae954STejun Heo * misbehave when the host NACKs transition to SLUMBER. Order 32486b7ae954STejun Heo * device and link configurations such that the host always 32496b7ae954STejun Heo * allows DIPM requests. 32506b7ae954STejun Heo */ 32516b7ae954STejun Heo ata_for_each_dev(dev, link, ENABLED) { 32526b7ae954STejun Heo bool hipm = ata_id_has_hipm(dev->id); 3253ae01b249STejun Heo bool dipm = ata_id_has_dipm(dev->id) && !no_dipm; 32546b7ae954STejun Heo 32556b7ae954STejun Heo /* find the first enabled and LPM enabled devices */ 32566b7ae954STejun Heo if (!link_dev) 32576b7ae954STejun Heo link_dev = dev; 32586b7ae954STejun Heo 32596b7ae954STejun Heo if (!lpm_dev && (hipm || dipm)) 32606b7ae954STejun Heo lpm_dev = dev; 32616b7ae954STejun Heo 32626b7ae954STejun Heo hints &= ~ATA_LPM_EMPTY; 32636b7ae954STejun Heo if (!hipm) 32646b7ae954STejun Heo hints &= ~ATA_LPM_HIPM; 32656b7ae954STejun Heo 32666b7ae954STejun Heo /* disable DIPM before changing link config */ 3267f4ac6476SHans de Goede if (policy < ATA_LPM_MED_POWER_WITH_DIPM && dipm) { 32686b7ae954STejun Heo err_mask = ata_dev_set_feature(dev, 32696b7ae954STejun Heo SETFEATURES_SATA_DISABLE, SATA_DIPM); 32706b7ae954STejun Heo if (err_mask && err_mask != AC_ERR_DEV) { 3271a9a79dfeSJoe Perches ata_dev_warn(dev, 32726b7ae954STejun Heo "failed to disable DIPM, Emask 0x%x\n", 32736b7ae954STejun Heo err_mask); 32746b7ae954STejun Heo rc = -EIO; 32756b7ae954STejun Heo goto fail; 32766b7ae954STejun Heo } 32776b7ae954STejun Heo } 32786b7ae954STejun Heo } 32796b7ae954STejun Heo 32806c8ea89cSTejun Heo if (ap) { 32816b7ae954STejun Heo rc = ap->ops->set_lpm(link, policy, hints); 32826b7ae954STejun Heo if (!rc && ap->slave_link) 32836b7ae954STejun Heo rc = ap->ops->set_lpm(ap->slave_link, policy, hints); 32846c8ea89cSTejun Heo } else 32856c8ea89cSTejun Heo rc = sata_pmp_set_lpm(link, policy, hints); 32866b7ae954STejun Heo 32876b7ae954STejun Heo /* 32886b7ae954STejun Heo * Attribute link config failure to the first (LPM) enabled 32896b7ae954STejun Heo * device on the link. 32906b7ae954STejun Heo */ 32916b7ae954STejun Heo if (rc) { 32926b7ae954STejun Heo if (rc == -EOPNOTSUPP) { 32936b7ae954STejun Heo link->flags |= ATA_LFLAG_NO_LPM; 32946b7ae954STejun Heo return 0; 32956b7ae954STejun Heo } 32966b7ae954STejun Heo dev = lpm_dev ? lpm_dev : link_dev; 32976b7ae954STejun Heo goto fail; 32986b7ae954STejun Heo } 32996b7ae954STejun Heo 3300e5005b15STejun Heo /* 3301e5005b15STejun Heo * Low level driver acked the transition. Issue DIPM command 3302e5005b15STejun Heo * with the new policy set. 3303e5005b15STejun Heo */ 3304e5005b15STejun Heo link->lpm_policy = policy; 3305e5005b15STejun Heo if (ap && ap->slave_link) 3306e5005b15STejun Heo ap->slave_link->lpm_policy = policy; 3307e5005b15STejun Heo 33086b7ae954STejun Heo /* host config updated, enable DIPM if transitioning to MIN_POWER */ 33096b7ae954STejun Heo ata_for_each_dev(dev, link, ENABLED) { 3310f4ac6476SHans de Goede if (policy >= ATA_LPM_MED_POWER_WITH_DIPM && !no_dipm && 3311ae01b249STejun Heo ata_id_has_dipm(dev->id)) { 33126b7ae954STejun Heo err_mask = ata_dev_set_feature(dev, 33136b7ae954STejun Heo SETFEATURES_SATA_ENABLE, SATA_DIPM); 33146b7ae954STejun Heo if (err_mask && err_mask != AC_ERR_DEV) { 3315a9a79dfeSJoe Perches ata_dev_warn(dev, 33166b7ae954STejun Heo "failed to enable DIPM, Emask 0x%x\n", 33176b7ae954STejun Heo err_mask); 33186b7ae954STejun Heo rc = -EIO; 33196b7ae954STejun Heo goto fail; 33206b7ae954STejun Heo } 33216b7ae954STejun Heo } 33226b7ae954STejun Heo } 33236b7ae954STejun Heo 332409c5b480SGabriele Mazzotta link->last_lpm_change = jiffies; 332509c5b480SGabriele Mazzotta link->flags |= ATA_LFLAG_CHANGED; 332609c5b480SGabriele Mazzotta 33276b7ae954STejun Heo return 0; 33286b7ae954STejun Heo 33296b7ae954STejun Heo fail: 3330e5005b15STejun Heo /* restore the old policy */ 3331e5005b15STejun Heo link->lpm_policy = old_policy; 3332e5005b15STejun Heo if (ap && ap->slave_link) 3333e5005b15STejun Heo ap->slave_link->lpm_policy = old_policy; 3334e5005b15STejun Heo 33356b7ae954STejun Heo /* if no device or only one more chance is left, disable LPM */ 33366b7ae954STejun Heo if (!dev || ehc->tries[dev->devno] <= 2) { 3337a9a79dfeSJoe Perches ata_link_warn(link, "disabling LPM on the link\n"); 33386b7ae954STejun Heo link->flags |= ATA_LFLAG_NO_LPM; 33396b7ae954STejun Heo } 33406b7ae954STejun Heo if (r_failed_dev) 33416b7ae954STejun Heo *r_failed_dev = dev; 33426b7ae954STejun Heo return rc; 33436b7ae954STejun Heo } 33446b7ae954STejun Heo 33458a745f1fSKristen Carlson Accardi int ata_link_nr_enabled(struct ata_link *link) 3346c6fd2807SJeff Garzik { 3347f58229f8STejun Heo struct ata_device *dev; 3348f58229f8STejun Heo int cnt = 0; 3349c6fd2807SJeff Garzik 33501eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) 3351c6fd2807SJeff Garzik cnt++; 3352c6fd2807SJeff Garzik return cnt; 3353c6fd2807SJeff Garzik } 3354c6fd2807SJeff Garzik 33550260731fSTejun Heo static int ata_link_nr_vacant(struct ata_link *link) 3356c6fd2807SJeff Garzik { 3357f58229f8STejun Heo struct ata_device *dev; 3358f58229f8STejun Heo int cnt = 0; 3359c6fd2807SJeff Garzik 33601eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 3361f58229f8STejun Heo if (dev->class == ATA_DEV_UNKNOWN) 3362c6fd2807SJeff Garzik cnt++; 3363c6fd2807SJeff Garzik return cnt; 3364c6fd2807SJeff Garzik } 3365c6fd2807SJeff Garzik 33660260731fSTejun Heo static int ata_eh_skip_recovery(struct ata_link *link) 3367c6fd2807SJeff Garzik { 3368672b2d65STejun Heo struct ata_port *ap = link->ap; 33690260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 3370f58229f8STejun Heo struct ata_device *dev; 3371c6fd2807SJeff Garzik 3372f9df58cbSTejun Heo /* skip disabled links */ 3373f9df58cbSTejun Heo if (link->flags & ATA_LFLAG_DISABLED) 3374f9df58cbSTejun Heo return 1; 3375f9df58cbSTejun Heo 3376e2f3d75fSTejun Heo /* skip if explicitly requested */ 3377e2f3d75fSTejun Heo if (ehc->i.flags & ATA_EHI_NO_RECOVERY) 3378e2f3d75fSTejun Heo return 1; 3379e2f3d75fSTejun Heo 3380672b2d65STejun Heo /* thaw frozen port and recover failed devices */ 3381672b2d65STejun Heo if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link)) 3382672b2d65STejun Heo return 0; 3383672b2d65STejun Heo 3384672b2d65STejun Heo /* reset at least once if reset is requested */ 3385672b2d65STejun Heo if ((ehc->i.action & ATA_EH_RESET) && 3386672b2d65STejun Heo !(ehc->i.flags & ATA_EHI_DID_RESET)) 3387c6fd2807SJeff Garzik return 0; 3388c6fd2807SJeff Garzik 3389c6fd2807SJeff Garzik /* skip if class codes for all vacant slots are ATA_DEV_NONE */ 33901eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 3391c6fd2807SJeff Garzik if (dev->class == ATA_DEV_UNKNOWN && 3392c6fd2807SJeff Garzik ehc->classes[dev->devno] != ATA_DEV_NONE) 3393c6fd2807SJeff Garzik return 0; 3394c6fd2807SJeff Garzik } 3395c6fd2807SJeff Garzik 3396c6fd2807SJeff Garzik return 1; 3397c6fd2807SJeff Garzik } 3398c6fd2807SJeff Garzik 3399c2c7a89cSTejun Heo static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg) 3400c2c7a89cSTejun Heo { 3401c2c7a89cSTejun Heo u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL); 3402c2c7a89cSTejun Heo u64 now = get_jiffies_64(); 3403c2c7a89cSTejun Heo int *trials = void_arg; 3404c2c7a89cSTejun Heo 34056868225eSLin Ming if ((ent->eflags & ATA_EFLAG_OLD_ER) || 34066868225eSLin Ming (ent->timestamp < now - min(now, interval))) 3407c2c7a89cSTejun Heo return -1; 3408c2c7a89cSTejun Heo 3409c2c7a89cSTejun Heo (*trials)++; 3410c2c7a89cSTejun Heo return 0; 3411c2c7a89cSTejun Heo } 3412c2c7a89cSTejun Heo 341302c05a27STejun Heo static int ata_eh_schedule_probe(struct ata_device *dev) 341402c05a27STejun Heo { 341502c05a27STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 3416c2c7a89cSTejun Heo struct ata_link *link = ata_dev_phys_link(dev); 3417c2c7a89cSTejun Heo int trials = 0; 341802c05a27STejun Heo 341902c05a27STejun Heo if (!(ehc->i.probe_mask & (1 << dev->devno)) || 342002c05a27STejun Heo (ehc->did_probe_mask & (1 << dev->devno))) 342102c05a27STejun Heo return 0; 342202c05a27STejun Heo 342302c05a27STejun Heo ata_eh_detach_dev(dev); 342402c05a27STejun Heo ata_dev_init(dev); 342502c05a27STejun Heo ehc->did_probe_mask |= (1 << dev->devno); 3426cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 342700115e0fSTejun Heo ehc->saved_xfer_mode[dev->devno] = 0; 342800115e0fSTejun Heo ehc->saved_ncq_enabled &= ~(1 << dev->devno); 342902c05a27STejun Heo 34306b7ae954STejun Heo /* the link maybe in a deep sleep, wake it up */ 34316c8ea89cSTejun Heo if (link->lpm_policy > ATA_LPM_MAX_POWER) { 34326c8ea89cSTejun Heo if (ata_is_host_link(link)) 34336c8ea89cSTejun Heo link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER, 34346c8ea89cSTejun Heo ATA_LPM_EMPTY); 34356c8ea89cSTejun Heo else 34366c8ea89cSTejun Heo sata_pmp_set_lpm(link, ATA_LPM_MAX_POWER, 34376c8ea89cSTejun Heo ATA_LPM_EMPTY); 34386c8ea89cSTejun Heo } 34396b7ae954STejun Heo 3440c2c7a89cSTejun Heo /* Record and count probe trials on the ering. The specific 3441c2c7a89cSTejun Heo * error mask used is irrelevant. Because a successful device 3442c2c7a89cSTejun Heo * detection clears the ering, this count accumulates only if 3443c2c7a89cSTejun Heo * there are consecutive failed probes. 3444c2c7a89cSTejun Heo * 3445c2c7a89cSTejun Heo * If the count is equal to or higher than ATA_EH_PROBE_TRIALS 3446c2c7a89cSTejun Heo * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is 3447c2c7a89cSTejun Heo * forced to 1.5Gbps. 3448c2c7a89cSTejun Heo * 3449c2c7a89cSTejun Heo * This is to work around cases where failed link speed 3450c2c7a89cSTejun Heo * negotiation results in device misdetection leading to 3451c2c7a89cSTejun Heo * infinite DEVXCHG or PHRDY CHG events. 3452c2c7a89cSTejun Heo */ 3453c2c7a89cSTejun Heo ata_ering_record(&dev->ering, 0, AC_ERR_OTHER); 3454c2c7a89cSTejun Heo ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials); 3455c2c7a89cSTejun Heo 3456c2c7a89cSTejun Heo if (trials > ATA_EH_PROBE_TRIALS) 3457c2c7a89cSTejun Heo sata_down_spd_limit(link, 1); 3458c2c7a89cSTejun Heo 345902c05a27STejun Heo return 1; 346002c05a27STejun Heo } 346102c05a27STejun Heo 34629b1e2658STejun Heo static int ata_eh_handle_dev_fail(struct ata_device *dev, int err) 3463fee7ca72STejun Heo { 34649af5c9c9STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 3465fee7ca72STejun Heo 3466cf9a590aSTejun Heo /* -EAGAIN from EH routine indicates retry without prejudice. 3467cf9a590aSTejun Heo * The requester is responsible for ensuring forward progress. 3468cf9a590aSTejun Heo */ 3469cf9a590aSTejun Heo if (err != -EAGAIN) 3470fee7ca72STejun Heo ehc->tries[dev->devno]--; 3471fee7ca72STejun Heo 3472fee7ca72STejun Heo switch (err) { 3473fee7ca72STejun Heo case -ENODEV: 3474fee7ca72STejun Heo /* device missing or wrong IDENTIFY data, schedule probing */ 3475fee7ca72STejun Heo ehc->i.probe_mask |= (1 << dev->devno); 3476df561f66SGustavo A. R. Silva fallthrough; 3477fee7ca72STejun Heo case -EINVAL: 3478fee7ca72STejun Heo /* give it just one more chance */ 3479fee7ca72STejun Heo ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1); 3480df561f66SGustavo A. R. Silva fallthrough; 3481fee7ca72STejun Heo case -EIO: 3482d89293abSTejun Heo if (ehc->tries[dev->devno] == 1) { 3483fee7ca72STejun Heo /* This is the last chance, better to slow 3484fee7ca72STejun Heo * down than lose it. 3485fee7ca72STejun Heo */ 3486a07d499bSTejun Heo sata_down_spd_limit(ata_dev_phys_link(dev), 0); 3487d89293abSTejun Heo if (dev->pio_mode > XFER_PIO_0) 3488fee7ca72STejun Heo ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); 3489fee7ca72STejun Heo } 3490fee7ca72STejun Heo } 3491fee7ca72STejun Heo 3492fee7ca72STejun Heo if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) { 3493fee7ca72STejun Heo /* disable device if it has used up all its chances */ 3494fee7ca72STejun Heo ata_dev_disable(dev); 3495fee7ca72STejun Heo 3496fee7ca72STejun Heo /* detach if offline */ 3497b1c72916STejun Heo if (ata_phys_link_offline(ata_dev_phys_link(dev))) 3498fee7ca72STejun Heo ata_eh_detach_dev(dev); 3499fee7ca72STejun Heo 350002c05a27STejun Heo /* schedule probe if necessary */ 350187fbc5a0STejun Heo if (ata_eh_schedule_probe(dev)) { 3502fee7ca72STejun Heo ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 350387fbc5a0STejun Heo memset(ehc->cmd_timeout_idx[dev->devno], 0, 350487fbc5a0STejun Heo sizeof(ehc->cmd_timeout_idx[dev->devno])); 350587fbc5a0STejun Heo } 35069b1e2658STejun Heo 35079b1e2658STejun Heo return 1; 3508fee7ca72STejun Heo } else { 3509cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 35109b1e2658STejun Heo return 0; 3511fee7ca72STejun Heo } 3512fee7ca72STejun Heo } 3513fee7ca72STejun Heo 3514c6fd2807SJeff Garzik /** 3515c6fd2807SJeff Garzik * ata_eh_recover - recover host port after error 3516c6fd2807SJeff Garzik * @ap: host port to recover 3517c6fd2807SJeff Garzik * @prereset: prereset method (can be NULL) 3518c6fd2807SJeff Garzik * @softreset: softreset method (can be NULL) 3519c6fd2807SJeff Garzik * @hardreset: hardreset method (can be NULL) 3520c6fd2807SJeff Garzik * @postreset: postreset method (can be NULL) 35219b1e2658STejun Heo * @r_failed_link: out parameter for failed link 3522c6fd2807SJeff Garzik * 3523c6fd2807SJeff Garzik * This is the alpha and omega, eum and yang, heart and soul of 3524c6fd2807SJeff Garzik * libata exception handling. On entry, actions required to 35259b1e2658STejun Heo * recover each link and hotplug requests are recorded in the 35269b1e2658STejun Heo * link's eh_context. This function executes all the operations 35279b1e2658STejun Heo * with appropriate retrials and fallbacks to resurrect failed 3528c6fd2807SJeff Garzik * devices, detach goners and greet newcomers. 3529c6fd2807SJeff Garzik * 3530c6fd2807SJeff Garzik * LOCKING: 3531c6fd2807SJeff Garzik * Kernel thread context (may sleep). 3532c6fd2807SJeff Garzik * 3533c6fd2807SJeff Garzik * RETURNS: 3534c6fd2807SJeff Garzik * 0 on success, -errno on failure. 3535c6fd2807SJeff Garzik */ 3536fb7fd614STejun Heo int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, 3537c6fd2807SJeff Garzik ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 35389b1e2658STejun Heo ata_postreset_fn_t postreset, 35399b1e2658STejun Heo struct ata_link **r_failed_link) 3540c6fd2807SJeff Garzik { 35419b1e2658STejun Heo struct ata_link *link; 3542c6fd2807SJeff Garzik struct ata_device *dev; 35436b7ae954STejun Heo int rc, nr_fails; 354445fabbb7SElias Oltmanns unsigned long flags, deadline; 3545c6fd2807SJeff Garzik 3546c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 3547c6fd2807SJeff Garzik 3548c6fd2807SJeff Garzik /* prep for recovery */ 35491eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 35509b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 35519b1e2658STejun Heo 3552f9df58cbSTejun Heo /* re-enable link? */ 3553f9df58cbSTejun Heo if (ehc->i.action & ATA_EH_ENABLE_LINK) { 3554f9df58cbSTejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK); 3555f9df58cbSTejun Heo spin_lock_irqsave(ap->lock, flags); 3556f9df58cbSTejun Heo link->flags &= ~ATA_LFLAG_DISABLED; 3557f9df58cbSTejun Heo spin_unlock_irqrestore(ap->lock, flags); 3558f9df58cbSTejun Heo ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK); 3559f9df58cbSTejun Heo } 3560f9df58cbSTejun Heo 35611eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 3562fd995f70STejun Heo if (link->flags & ATA_LFLAG_NO_RETRY) 3563fd995f70STejun Heo ehc->tries[dev->devno] = 1; 3564fd995f70STejun Heo else 3565c6fd2807SJeff Garzik ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 3566c6fd2807SJeff Garzik 356779a55b72STejun Heo /* collect port action mask recorded in dev actions */ 35689b1e2658STejun Heo ehc->i.action |= ehc->i.dev_action[dev->devno] & 35699b1e2658STejun Heo ~ATA_EH_PERDEV_MASK; 3570f58229f8STejun Heo ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK; 357179a55b72STejun Heo 3572c6fd2807SJeff Garzik /* process hotplug request */ 3573c6fd2807SJeff Garzik if (dev->flags & ATA_DFLAG_DETACH) 3574c6fd2807SJeff Garzik ata_eh_detach_dev(dev); 3575c6fd2807SJeff Garzik 357602c05a27STejun Heo /* schedule probe if necessary */ 357702c05a27STejun Heo if (!ata_dev_enabled(dev)) 357802c05a27STejun Heo ata_eh_schedule_probe(dev); 3579c6fd2807SJeff Garzik } 35809b1e2658STejun Heo } 3581c6fd2807SJeff Garzik 3582c6fd2807SJeff Garzik retry: 3583c6fd2807SJeff Garzik rc = 0; 3584c6fd2807SJeff Garzik 3585c6fd2807SJeff Garzik /* if UNLOADING, finish immediately */ 3586c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_UNLOADING) 3587c6fd2807SJeff Garzik goto out; 3588c6fd2807SJeff Garzik 35899b1e2658STejun Heo /* prep for EH */ 35901eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 35919b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 35929b1e2658STejun Heo 3593c6fd2807SJeff Garzik /* skip EH if possible. */ 35940260731fSTejun Heo if (ata_eh_skip_recovery(link)) 3595c6fd2807SJeff Garzik ehc->i.action = 0; 3596c6fd2807SJeff Garzik 35971eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 3598f58229f8STejun Heo ehc->classes[dev->devno] = ATA_DEV_UNKNOWN; 35999b1e2658STejun Heo } 3600c6fd2807SJeff Garzik 3601c6fd2807SJeff Garzik /* reset */ 36021eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 36039b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 36049b1e2658STejun Heo 3605cf480626STejun Heo if (!(ehc->i.action & ATA_EH_RESET)) 36069b1e2658STejun Heo continue; 36079b1e2658STejun Heo 36089b1e2658STejun Heo rc = ata_eh_reset(link, ata_link_nr_vacant(link), 3609dc98c32cSTejun Heo prereset, softreset, hardreset, postreset); 3610c6fd2807SJeff Garzik if (rc) { 3611a9a79dfeSJoe Perches ata_link_err(link, "reset failed, giving up\n"); 3612c6fd2807SJeff Garzik goto out; 3613c6fd2807SJeff Garzik } 36149b1e2658STejun Heo } 3615c6fd2807SJeff Garzik 361645fabbb7SElias Oltmanns do { 361745fabbb7SElias Oltmanns unsigned long now; 361845fabbb7SElias Oltmanns 361945fabbb7SElias Oltmanns /* 362045fabbb7SElias Oltmanns * clears ATA_EH_PARK in eh_info and resets 362145fabbb7SElias Oltmanns * ap->park_req_pending 362245fabbb7SElias Oltmanns */ 362345fabbb7SElias Oltmanns ata_eh_pull_park_action(ap); 362445fabbb7SElias Oltmanns 362545fabbb7SElias Oltmanns deadline = jiffies; 36261eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 36271eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 362845fabbb7SElias Oltmanns struct ata_eh_context *ehc = &link->eh_context; 362945fabbb7SElias Oltmanns unsigned long tmp; 363045fabbb7SElias Oltmanns 36319162c657SHannes Reinecke if (dev->class != ATA_DEV_ATA && 36329162c657SHannes Reinecke dev->class != ATA_DEV_ZAC) 363345fabbb7SElias Oltmanns continue; 363445fabbb7SElias Oltmanns if (!(ehc->i.dev_action[dev->devno] & 363545fabbb7SElias Oltmanns ATA_EH_PARK)) 363645fabbb7SElias Oltmanns continue; 363745fabbb7SElias Oltmanns tmp = dev->unpark_deadline; 363845fabbb7SElias Oltmanns if (time_before(deadline, tmp)) 363945fabbb7SElias Oltmanns deadline = tmp; 364045fabbb7SElias Oltmanns else if (time_before_eq(tmp, jiffies)) 364145fabbb7SElias Oltmanns continue; 364245fabbb7SElias Oltmanns if (ehc->unloaded_mask & (1 << dev->devno)) 364345fabbb7SElias Oltmanns continue; 364445fabbb7SElias Oltmanns 364545fabbb7SElias Oltmanns ata_eh_park_issue_cmd(dev, 1); 364645fabbb7SElias Oltmanns } 364745fabbb7SElias Oltmanns } 364845fabbb7SElias Oltmanns 364945fabbb7SElias Oltmanns now = jiffies; 365045fabbb7SElias Oltmanns if (time_before_eq(deadline, now)) 365145fabbb7SElias Oltmanns break; 365245fabbb7SElias Oltmanns 3653c0c362b6STejun Heo ata_eh_release(ap); 365445fabbb7SElias Oltmanns deadline = wait_for_completion_timeout(&ap->park_req_pending, 365545fabbb7SElias Oltmanns deadline - now); 3656c0c362b6STejun Heo ata_eh_acquire(ap); 365745fabbb7SElias Oltmanns } while (deadline); 36581eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 36591eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 366045fabbb7SElias Oltmanns if (!(link->eh_context.unloaded_mask & 366145fabbb7SElias Oltmanns (1 << dev->devno))) 366245fabbb7SElias Oltmanns continue; 366345fabbb7SElias Oltmanns 366445fabbb7SElias Oltmanns ata_eh_park_issue_cmd(dev, 0); 366545fabbb7SElias Oltmanns ata_eh_done(link, dev, ATA_EH_PARK); 366645fabbb7SElias Oltmanns } 366745fabbb7SElias Oltmanns } 366845fabbb7SElias Oltmanns 36699b1e2658STejun Heo /* the rest */ 36706b7ae954STejun Heo nr_fails = 0; 36716b7ae954STejun Heo ata_for_each_link(link, ap, PMP_FIRST) { 36729b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 36739b1e2658STejun Heo 36746b7ae954STejun Heo if (sata_pmp_attached(ap) && ata_is_host_link(link)) 36756b7ae954STejun Heo goto config_lpm; 36766b7ae954STejun Heo 3677c6fd2807SJeff Garzik /* revalidate existing devices and attach new ones */ 36780260731fSTejun Heo rc = ata_eh_revalidate_and_attach(link, &dev); 3679c6fd2807SJeff Garzik if (rc) 36806b7ae954STejun Heo goto rest_fail; 3681c6fd2807SJeff Garzik 3682633273a3STejun Heo /* if PMP got attached, return, pmp EH will take care of it */ 3683633273a3STejun Heo if (link->device->class == ATA_DEV_PMP) { 3684633273a3STejun Heo ehc->i.action = 0; 3685633273a3STejun Heo return 0; 3686633273a3STejun Heo } 3687633273a3STejun Heo 3688baa1e78aSTejun Heo /* configure transfer mode if necessary */ 3689baa1e78aSTejun Heo if (ehc->i.flags & ATA_EHI_SETMODE) { 36900260731fSTejun Heo rc = ata_set_mode(link, &dev); 36914ae72a1eSTejun Heo if (rc) 36926b7ae954STejun Heo goto rest_fail; 3693baa1e78aSTejun Heo ehc->i.flags &= ~ATA_EHI_SETMODE; 3694c6fd2807SJeff Garzik } 3695c6fd2807SJeff Garzik 369611fc33daSTejun Heo /* If reset has been issued, clear UA to avoid 369711fc33daSTejun Heo * disrupting the current users of the device. 369811fc33daSTejun Heo */ 369911fc33daSTejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) { 37001eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 370111fc33daSTejun Heo if (dev->class != ATA_DEV_ATAPI) 370211fc33daSTejun Heo continue; 370311fc33daSTejun Heo rc = atapi_eh_clear_ua(dev); 370411fc33daSTejun Heo if (rc) 37056b7ae954STejun Heo goto rest_fail; 370621334205SAaron Lu if (zpodd_dev_enabled(dev)) 370721334205SAaron Lu zpodd_post_poweron(dev); 370811fc33daSTejun Heo } 370911fc33daSTejun Heo } 371011fc33daSTejun Heo 37116013efd8STejun Heo /* retry flush if necessary */ 37126013efd8STejun Heo ata_for_each_dev(dev, link, ALL) { 37139162c657SHannes Reinecke if (dev->class != ATA_DEV_ATA && 37149162c657SHannes Reinecke dev->class != ATA_DEV_ZAC) 37156013efd8STejun Heo continue; 37166013efd8STejun Heo rc = ata_eh_maybe_retry_flush(dev); 37176013efd8STejun Heo if (rc) 37186b7ae954STejun Heo goto rest_fail; 37196013efd8STejun Heo } 37206013efd8STejun Heo 37216b7ae954STejun Heo config_lpm: 372211fc33daSTejun Heo /* configure link power saving */ 37236b7ae954STejun Heo if (link->lpm_policy != ap->target_lpm_policy) { 37246b7ae954STejun Heo rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev); 37256b7ae954STejun Heo if (rc) 37266b7ae954STejun Heo goto rest_fail; 37276b7ae954STejun Heo } 3728ca77329fSKristen Carlson Accardi 37299b1e2658STejun Heo /* this link is okay now */ 37309b1e2658STejun Heo ehc->i.flags = 0; 37319b1e2658STejun Heo continue; 3732c6fd2807SJeff Garzik 37336b7ae954STejun Heo rest_fail: 37346b7ae954STejun Heo nr_fails++; 37356b7ae954STejun Heo if (dev) 37360a2c0f56STejun Heo ata_eh_handle_dev_fail(dev, rc); 3737c6fd2807SJeff Garzik 3738b06ce3e5STejun Heo if (ap->pflags & ATA_PFLAG_FROZEN) { 3739b06ce3e5STejun Heo /* PMP reset requires working host port. 3740b06ce3e5STejun Heo * Can't retry if it's frozen. 3741b06ce3e5STejun Heo */ 3742071f44b1STejun Heo if (sata_pmp_attached(ap)) 3743b06ce3e5STejun Heo goto out; 37449b1e2658STejun Heo break; 37459b1e2658STejun Heo } 3746b06ce3e5STejun Heo } 37479b1e2658STejun Heo 37486b7ae954STejun Heo if (nr_fails) 3749c6fd2807SJeff Garzik goto retry; 3750c6fd2807SJeff Garzik 3751c6fd2807SJeff Garzik out: 37529b1e2658STejun Heo if (rc && r_failed_link) 37539b1e2658STejun Heo *r_failed_link = link; 3754c6fd2807SJeff Garzik 3755c6fd2807SJeff Garzik DPRINTK("EXIT, rc=%d\n", rc); 3756c6fd2807SJeff Garzik return rc; 3757c6fd2807SJeff Garzik } 3758c6fd2807SJeff Garzik 3759c6fd2807SJeff Garzik /** 3760c6fd2807SJeff Garzik * ata_eh_finish - finish up EH 3761c6fd2807SJeff Garzik * @ap: host port to finish EH for 3762c6fd2807SJeff Garzik * 3763c6fd2807SJeff Garzik * Recovery is complete. Clean up EH states and retry or finish 3764c6fd2807SJeff Garzik * failed qcs. 3765c6fd2807SJeff Garzik * 3766c6fd2807SJeff Garzik * LOCKING: 3767c6fd2807SJeff Garzik * None. 3768c6fd2807SJeff Garzik */ 3769fb7fd614STejun Heo void ata_eh_finish(struct ata_port *ap) 3770c6fd2807SJeff Garzik { 3771258c4e5cSJens Axboe struct ata_queued_cmd *qc; 3772c6fd2807SJeff Garzik int tag; 3773c6fd2807SJeff Garzik 3774c6fd2807SJeff Garzik /* retry or finish qcs */ 3775258c4e5cSJens Axboe ata_qc_for_each_raw(ap, qc, tag) { 3776c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) 3777c6fd2807SJeff Garzik continue; 3778c6fd2807SJeff Garzik 3779c6fd2807SJeff Garzik if (qc->err_mask) { 3780c6fd2807SJeff Garzik /* FIXME: Once EH migration is complete, 3781c6fd2807SJeff Garzik * generate sense data in this function, 3782c6fd2807SJeff Garzik * considering both err_mask and tf. 3783c6fd2807SJeff Garzik */ 378403faab78STejun Heo if (qc->flags & ATA_QCFLAG_RETRY) 3785c6fd2807SJeff Garzik ata_eh_qc_retry(qc); 378603faab78STejun Heo else 378703faab78STejun Heo ata_eh_qc_complete(qc); 3788c6fd2807SJeff Garzik } else { 3789c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_SENSE_VALID) { 3790c6fd2807SJeff Garzik ata_eh_qc_complete(qc); 3791c6fd2807SJeff Garzik } else { 3792c6fd2807SJeff Garzik /* feed zero TF to sense generation */ 3793c6fd2807SJeff Garzik memset(&qc->result_tf, 0, sizeof(qc->result_tf)); 3794c6fd2807SJeff Garzik ata_eh_qc_retry(qc); 3795c6fd2807SJeff Garzik } 3796c6fd2807SJeff Garzik } 3797c6fd2807SJeff Garzik } 3798da917d69STejun Heo 3799da917d69STejun Heo /* make sure nr_active_links is zero after EH */ 3800da917d69STejun Heo WARN_ON(ap->nr_active_links); 3801da917d69STejun Heo ap->nr_active_links = 0; 3802c6fd2807SJeff Garzik } 3803c6fd2807SJeff Garzik 3804c6fd2807SJeff Garzik /** 3805c6fd2807SJeff Garzik * ata_do_eh - do standard error handling 3806c6fd2807SJeff Garzik * @ap: host port to handle error for 3807a1efdabaSTejun Heo * 3808c6fd2807SJeff Garzik * @prereset: prereset method (can be NULL) 3809c6fd2807SJeff Garzik * @softreset: softreset method (can be NULL) 3810c6fd2807SJeff Garzik * @hardreset: hardreset method (can be NULL) 3811c6fd2807SJeff Garzik * @postreset: postreset method (can be NULL) 3812c6fd2807SJeff Garzik * 3813c6fd2807SJeff Garzik * Perform standard error handling sequence. 3814c6fd2807SJeff Garzik * 3815c6fd2807SJeff Garzik * LOCKING: 3816c6fd2807SJeff Garzik * Kernel thread context (may sleep). 3817c6fd2807SJeff Garzik */ 3818c6fd2807SJeff Garzik void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, 3819c6fd2807SJeff Garzik ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 3820c6fd2807SJeff Garzik ata_postreset_fn_t postreset) 3821c6fd2807SJeff Garzik { 38229b1e2658STejun Heo struct ata_device *dev; 38239b1e2658STejun Heo int rc; 38249b1e2658STejun Heo 38259b1e2658STejun Heo ata_eh_autopsy(ap); 38269b1e2658STejun Heo ata_eh_report(ap); 38279b1e2658STejun Heo 38289b1e2658STejun Heo rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset, 38299b1e2658STejun Heo NULL); 38309b1e2658STejun Heo if (rc) { 38311eca4365STejun Heo ata_for_each_dev(dev, &ap->link, ALL) 38329b1e2658STejun Heo ata_dev_disable(dev); 38339b1e2658STejun Heo } 38349b1e2658STejun Heo 3835c6fd2807SJeff Garzik ata_eh_finish(ap); 3836c6fd2807SJeff Garzik } 3837c6fd2807SJeff Garzik 3838a1efdabaSTejun Heo /** 3839a1efdabaSTejun Heo * ata_std_error_handler - standard error handler 3840a1efdabaSTejun Heo * @ap: host port to handle error for 3841a1efdabaSTejun Heo * 3842a1efdabaSTejun Heo * Standard error handler 3843a1efdabaSTejun Heo * 3844a1efdabaSTejun Heo * LOCKING: 3845a1efdabaSTejun Heo * Kernel thread context (may sleep). 3846a1efdabaSTejun Heo */ 3847a1efdabaSTejun Heo void ata_std_error_handler(struct ata_port *ap) 3848a1efdabaSTejun Heo { 3849a1efdabaSTejun Heo struct ata_port_operations *ops = ap->ops; 3850a1efdabaSTejun Heo ata_reset_fn_t hardreset = ops->hardreset; 3851a1efdabaSTejun Heo 385257c9efdfSTejun Heo /* ignore built-in hardreset if SCR access is not available */ 3853fe06e5f9STejun Heo if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link)) 3854a1efdabaSTejun Heo hardreset = NULL; 3855a1efdabaSTejun Heo 3856a1efdabaSTejun Heo ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset); 3857a1efdabaSTejun Heo } 3858a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_std_error_handler); 3859a1efdabaSTejun Heo 38606ffa01d8STejun Heo #ifdef CONFIG_PM 3861c6fd2807SJeff Garzik /** 3862c6fd2807SJeff Garzik * ata_eh_handle_port_suspend - perform port suspend operation 3863c6fd2807SJeff Garzik * @ap: port to suspend 3864c6fd2807SJeff Garzik * 3865c6fd2807SJeff Garzik * Suspend @ap. 3866c6fd2807SJeff Garzik * 3867c6fd2807SJeff Garzik * LOCKING: 3868c6fd2807SJeff Garzik * Kernel thread context (may sleep). 3869c6fd2807SJeff Garzik */ 3870c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap) 3871c6fd2807SJeff Garzik { 3872c6fd2807SJeff Garzik unsigned long flags; 3873c6fd2807SJeff Garzik int rc = 0; 38743dc67440SAaron Lu struct ata_device *dev; 3875c6fd2807SJeff Garzik 3876c6fd2807SJeff Garzik /* are we suspending? */ 3877c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 3878c6fd2807SJeff Garzik if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || 3879a7ff60dbSAaron Lu ap->pm_mesg.event & PM_EVENT_RESUME) { 3880c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3881c6fd2807SJeff Garzik return; 3882c6fd2807SJeff Garzik } 3883c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3884c6fd2807SJeff Garzik 3885c6fd2807SJeff Garzik WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED); 3886c6fd2807SJeff Garzik 38873dc67440SAaron Lu /* 38883dc67440SAaron Lu * If we have a ZPODD attached, check its zero 38893dc67440SAaron Lu * power ready status before the port is frozen. 3890a7ff60dbSAaron Lu * Only needed for runtime suspend. 38913dc67440SAaron Lu */ 3892a7ff60dbSAaron Lu if (PMSG_IS_AUTO(ap->pm_mesg)) { 38933dc67440SAaron Lu ata_for_each_dev(dev, &ap->link, ENABLED) { 38943dc67440SAaron Lu if (zpodd_dev_enabled(dev)) 38953dc67440SAaron Lu zpodd_on_suspend(dev); 38963dc67440SAaron Lu } 3897a7ff60dbSAaron Lu } 38983dc67440SAaron Lu 389964578a3dSTejun Heo /* tell ACPI we're suspending */ 390064578a3dSTejun Heo rc = ata_acpi_on_suspend(ap); 390164578a3dSTejun Heo if (rc) 390264578a3dSTejun Heo goto out; 390364578a3dSTejun Heo 3904c6fd2807SJeff Garzik /* suspend */ 3905c6fd2807SJeff Garzik ata_eh_freeze_port(ap); 3906c6fd2807SJeff Garzik 3907c6fd2807SJeff Garzik if (ap->ops->port_suspend) 3908c6fd2807SJeff Garzik rc = ap->ops->port_suspend(ap, ap->pm_mesg); 3909c6fd2807SJeff Garzik 3910a7ff60dbSAaron Lu ata_acpi_set_state(ap, ap->pm_mesg); 391164578a3dSTejun Heo out: 3912bc6e7c4bSDan Williams /* update the flags */ 3913c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 3914c6fd2807SJeff Garzik 3915c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_PM_PENDING; 3916c6fd2807SJeff Garzik if (rc == 0) 3917c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SUSPENDED; 391864578a3dSTejun Heo else if (ap->pflags & ATA_PFLAG_FROZEN) 3919c6fd2807SJeff Garzik ata_port_schedule_eh(ap); 3920c6fd2807SJeff Garzik 3921c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3922c6fd2807SJeff Garzik 3923c6fd2807SJeff Garzik return; 3924c6fd2807SJeff Garzik } 3925c6fd2807SJeff Garzik 3926c6fd2807SJeff Garzik /** 3927c6fd2807SJeff Garzik * ata_eh_handle_port_resume - perform port resume operation 3928c6fd2807SJeff Garzik * @ap: port to resume 3929c6fd2807SJeff Garzik * 3930c6fd2807SJeff Garzik * Resume @ap. 3931c6fd2807SJeff Garzik * 3932c6fd2807SJeff Garzik * LOCKING: 3933c6fd2807SJeff Garzik * Kernel thread context (may sleep). 3934c6fd2807SJeff Garzik */ 3935c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap) 3936c6fd2807SJeff Garzik { 39376f9c1ea2STejun Heo struct ata_link *link; 39386f9c1ea2STejun Heo struct ata_device *dev; 3939c6fd2807SJeff Garzik unsigned long flags; 3940c6fd2807SJeff Garzik 3941c6fd2807SJeff Garzik /* are we resuming? */ 3942c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 3943c6fd2807SJeff Garzik if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || 3944a7ff60dbSAaron Lu !(ap->pm_mesg.event & PM_EVENT_RESUME)) { 3945c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3946c6fd2807SJeff Garzik return; 3947c6fd2807SJeff Garzik } 3948c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3949c6fd2807SJeff Garzik 39509666f400STejun Heo WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED)); 3951c6fd2807SJeff Garzik 39526f9c1ea2STejun Heo /* 39536f9c1ea2STejun Heo * Error timestamps are in jiffies which doesn't run while 39546f9c1ea2STejun Heo * suspended and PHY events during resume isn't too uncommon. 39556f9c1ea2STejun Heo * When the two are combined, it can lead to unnecessary speed 39566f9c1ea2STejun Heo * downs if the machine is suspended and resumed repeatedly. 39576f9c1ea2STejun Heo * Clear error history. 39586f9c1ea2STejun Heo */ 39596f9c1ea2STejun Heo ata_for_each_link(link, ap, HOST_FIRST) 39606f9c1ea2STejun Heo ata_for_each_dev(dev, link, ALL) 39616f9c1ea2STejun Heo ata_ering_clear(&dev->ering); 39626f9c1ea2STejun Heo 3963a7ff60dbSAaron Lu ata_acpi_set_state(ap, ap->pm_mesg); 3964bd3adca5SShaohua Li 3965c6fd2807SJeff Garzik if (ap->ops->port_resume) 3966ae867937SKefeng Wang ap->ops->port_resume(ap); 3967c6fd2807SJeff Garzik 39686746544cSTejun Heo /* tell ACPI that we're resuming */ 39696746544cSTejun Heo ata_acpi_on_resume(ap); 39706746544cSTejun Heo 3971bc6e7c4bSDan Williams /* update the flags */ 3972c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 3973c6fd2807SJeff Garzik ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED); 3974c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3975c6fd2807SJeff Garzik } 39766ffa01d8STejun Heo #endif /* CONFIG_PM */ 3977