1c82ee6d3SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 2c6fd2807SJeff Garzik /* 3c6fd2807SJeff Garzik * libata-eh.c - libata error handling 4c6fd2807SJeff Garzik * 5c6fd2807SJeff Garzik * Copyright 2006 Tejun Heo <htejun@gmail.com> 6c6fd2807SJeff Garzik * 7c6fd2807SJeff Garzik * libata documentation is available via 'make {ps|pdf}docs', 89bb9a39cSMauro Carvalho Chehab * as Documentation/driver-api/libata.rst 9c6fd2807SJeff Garzik * 10c6fd2807SJeff Garzik * Hardware documentation available from http://www.t13.org/ and 11c6fd2807SJeff Garzik * http://www.sata-io.org/ 12c6fd2807SJeff Garzik */ 13c6fd2807SJeff Garzik 14c6fd2807SJeff Garzik #include <linux/kernel.h> 15242f9dcbSJens Axboe #include <linux/blkdev.h> 1638789fdaSPaul Gortmaker #include <linux/export.h> 172855568bSJeff Garzik #include <linux/pci.h> 18c6fd2807SJeff Garzik #include <scsi/scsi.h> 19c6fd2807SJeff Garzik #include <scsi/scsi_host.h> 20c6fd2807SJeff Garzik #include <scsi/scsi_eh.h> 21c6fd2807SJeff Garzik #include <scsi/scsi_device.h> 22c6fd2807SJeff Garzik #include <scsi/scsi_cmnd.h> 236521148cSRobert Hancock #include <scsi/scsi_dbg.h> 24c6fd2807SJeff Garzik #include "../scsi/scsi_transport_api.h" 25c6fd2807SJeff Garzik 26c6fd2807SJeff Garzik #include <linux/libata.h> 27c6fd2807SJeff Garzik 28255c03d1SHannes Reinecke #include <trace/events/libata.h> 29c6fd2807SJeff Garzik #include "libata.h" 30c6fd2807SJeff Garzik 317d47e8d4STejun Heo enum { 323884f7b0STejun Heo /* speed down verdicts */ 337d47e8d4STejun Heo ATA_EH_SPDN_NCQ_OFF = (1 << 0), 347d47e8d4STejun Heo ATA_EH_SPDN_SPEED_DOWN = (1 << 1), 357d47e8d4STejun Heo ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2), 3676326ac1STejun Heo ATA_EH_SPDN_KEEP_ERRORS = (1 << 3), 373884f7b0STejun Heo 383884f7b0STejun Heo /* error flags */ 393884f7b0STejun Heo ATA_EFLAG_IS_IO = (1 << 0), 4076326ac1STejun Heo ATA_EFLAG_DUBIOUS_XFER = (1 << 1), 41d9027470SGwendal Grignou ATA_EFLAG_OLD_ER = (1 << 31), 423884f7b0STejun Heo 433884f7b0STejun Heo /* error categories */ 443884f7b0STejun Heo ATA_ECAT_NONE = 0, 453884f7b0STejun Heo ATA_ECAT_ATA_BUS = 1, 463884f7b0STejun Heo ATA_ECAT_TOUT_HSM = 2, 473884f7b0STejun Heo ATA_ECAT_UNK_DEV = 3, 4875f9cafcSTejun Heo ATA_ECAT_DUBIOUS_NONE = 4, 4975f9cafcSTejun Heo ATA_ECAT_DUBIOUS_ATA_BUS = 5, 5075f9cafcSTejun Heo ATA_ECAT_DUBIOUS_TOUT_HSM = 6, 5175f9cafcSTejun Heo ATA_ECAT_DUBIOUS_UNK_DEV = 7, 5275f9cafcSTejun Heo ATA_ECAT_NR = 8, 537d47e8d4STejun Heo 5487fbc5a0STejun Heo ATA_EH_CMD_DFL_TIMEOUT = 5000, 5587fbc5a0STejun Heo 560a2c0f56STejun Heo /* always put at least this amount of time between resets */ 570a2c0f56STejun Heo ATA_EH_RESET_COOL_DOWN = 5000, 580a2c0f56STejun Heo 59341c2c95STejun Heo /* Waiting in ->prereset can never be reliable. It's 60341c2c95STejun Heo * sometimes nice to wait there but it can't be depended upon; 61341c2c95STejun Heo * otherwise, we wouldn't be resetting. Just give it enough 62341c2c95STejun Heo * time for most drives to spin up. 6331daabdaSTejun Heo */ 64341c2c95STejun Heo ATA_EH_PRERESET_TIMEOUT = 10000, 65341c2c95STejun Heo ATA_EH_FASTDRAIN_INTERVAL = 3000, 6611fc33daSTejun Heo 6711fc33daSTejun Heo ATA_EH_UA_TRIES = 5, 68c2c7a89cSTejun Heo 69c2c7a89cSTejun Heo /* probe speed down parameters, see ata_eh_schedule_probe() */ 70c2c7a89cSTejun Heo ATA_EH_PROBE_TRIAL_INTERVAL = 60000, /* 1 min */ 71c2c7a89cSTejun Heo ATA_EH_PROBE_TRIALS = 2, 7231daabdaSTejun Heo }; 7331daabdaSTejun Heo 7431daabdaSTejun Heo /* The following table determines how we sequence resets. Each entry 7531daabdaSTejun Heo * represents timeout for that try. The first try can be soft or 7631daabdaSTejun Heo * hardreset. All others are hardreset if available. In most cases 7731daabdaSTejun Heo * the first reset w/ 10sec timeout should succeed. Following entries 7835bf8821SDan Williams * are mostly for error handling, hotplug and those outlier devices that 7935bf8821SDan Williams * take an exceptionally long time to recover from reset. 8031daabdaSTejun Heo */ 8131daabdaSTejun Heo static const unsigned long ata_eh_reset_timeouts[] = { 82341c2c95STejun Heo 10000, /* most drives spin up by 10sec */ 83341c2c95STejun Heo 10000, /* > 99% working drives spin up before 20sec */ 8435bf8821SDan Williams 35000, /* give > 30 secs of idleness for outlier devices */ 85341c2c95STejun Heo 5000, /* and sweet one last chance */ 86d8af0eb6STejun Heo ULONG_MAX, /* > 1 min has elapsed, give up */ 8731daabdaSTejun Heo }; 8831daabdaSTejun Heo 89e06233f9SSergey Shtylyov static const unsigned int ata_eh_identify_timeouts[] = { 9087fbc5a0STejun Heo 5000, /* covers > 99% of successes and not too boring on failures */ 9187fbc5a0STejun Heo 10000, /* combined time till here is enough even for media access */ 9287fbc5a0STejun Heo 30000, /* for true idiots */ 93e06233f9SSergey Shtylyov UINT_MAX, 9487fbc5a0STejun Heo }; 9587fbc5a0STejun Heo 96e06233f9SSergey Shtylyov static const unsigned int ata_eh_revalidate_timeouts[] = { 9768dbbe7dSDamien Le Moal 15000, /* Some drives are slow to read log pages when waking-up */ 9868dbbe7dSDamien Le Moal 15000, /* combined time till here is enough even for media access */ 99e06233f9SSergey Shtylyov UINT_MAX, 10068dbbe7dSDamien Le Moal }; 10168dbbe7dSDamien Le Moal 102e06233f9SSergey Shtylyov static const unsigned int ata_eh_flush_timeouts[] = { 1036013efd8STejun Heo 15000, /* be generous with flush */ 1046013efd8STejun Heo 15000, /* ditto */ 1056013efd8STejun Heo 30000, /* and even more generous */ 106e06233f9SSergey Shtylyov UINT_MAX, 1076013efd8STejun Heo }; 1086013efd8STejun Heo 109e06233f9SSergey Shtylyov static const unsigned int ata_eh_other_timeouts[] = { 11087fbc5a0STejun Heo 5000, /* same rationale as identify timeout */ 11187fbc5a0STejun Heo 10000, /* ditto */ 11287fbc5a0STejun Heo /* but no merciful 30sec for other commands, it just isn't worth it */ 113e06233f9SSergey Shtylyov UINT_MAX, 11487fbc5a0STejun Heo }; 11587fbc5a0STejun Heo 11687fbc5a0STejun Heo struct ata_eh_cmd_timeout_ent { 11787fbc5a0STejun Heo const u8 *commands; 118e06233f9SSergey Shtylyov const unsigned int *timeouts; 11987fbc5a0STejun Heo }; 12087fbc5a0STejun Heo 12187fbc5a0STejun Heo /* The following table determines timeouts to use for EH internal 12287fbc5a0STejun Heo * commands. Each table entry is a command class and matches the 12387fbc5a0STejun Heo * commands the entry applies to and the timeout table to use. 12487fbc5a0STejun Heo * 12587fbc5a0STejun Heo * On the retry after a command timed out, the next timeout value from 12687fbc5a0STejun Heo * the table is used. If the table doesn't contain further entries, 12787fbc5a0STejun Heo * the last value is used. 12887fbc5a0STejun Heo * 12987fbc5a0STejun Heo * ehc->cmd_timeout_idx keeps track of which timeout to use per 13087fbc5a0STejun Heo * command class, so if SET_FEATURES times out on the first try, the 13187fbc5a0STejun Heo * next try will use the second timeout value only for that class. 13287fbc5a0STejun Heo */ 13387fbc5a0STejun Heo #define CMDS(cmds...) (const u8 []){ cmds, 0 } 13487fbc5a0STejun Heo static const struct ata_eh_cmd_timeout_ent 13587fbc5a0STejun Heo ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = { 13687fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI), 13787fbc5a0STejun Heo .timeouts = ata_eh_identify_timeouts, }, 13868dbbe7dSDamien Le Moal { .commands = CMDS(ATA_CMD_READ_LOG_EXT, ATA_CMD_READ_LOG_DMA_EXT), 13968dbbe7dSDamien Le Moal .timeouts = ata_eh_revalidate_timeouts, }, 14087fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT), 14187fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 14287fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT), 14387fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 14487fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_SET_FEATURES), 14587fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 14687fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS), 14787fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 1486013efd8STejun Heo { .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT), 1496013efd8STejun Heo .timeouts = ata_eh_flush_timeouts }, 15087fbc5a0STejun Heo }; 15187fbc5a0STejun Heo #undef CMDS 15287fbc5a0STejun Heo 153c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap); 15471d7b6e5SNiklas Cassel static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, 15571d7b6e5SNiklas Cassel struct ata_device **r_failed_dev); 1566ffa01d8STejun Heo #ifdef CONFIG_PM 157c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap); 158c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap); 1596ffa01d8STejun Heo #else /* CONFIG_PM */ 1606ffa01d8STejun Heo static void ata_eh_handle_port_suspend(struct ata_port *ap) 1616ffa01d8STejun Heo { } 1626ffa01d8STejun Heo 1636ffa01d8STejun Heo static void ata_eh_handle_port_resume(struct ata_port *ap) 1646ffa01d8STejun Heo { } 1656ffa01d8STejun Heo #endif /* CONFIG_PM */ 166c6fd2807SJeff Garzik 1670d74d872SMathieu Malaterre static __printf(2, 0) void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, 1680d74d872SMathieu Malaterre const char *fmt, va_list args) 169b64bbc39STejun Heo { 170b64bbc39STejun Heo ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len, 171b64bbc39STejun Heo ATA_EH_DESC_LEN - ehi->desc_len, 172b64bbc39STejun Heo fmt, args); 173b64bbc39STejun Heo } 174b64bbc39STejun Heo 175b64bbc39STejun Heo /** 176b64bbc39STejun Heo * __ata_ehi_push_desc - push error description without adding separator 177b64bbc39STejun Heo * @ehi: target EHI 178b64bbc39STejun Heo * @fmt: printf format string 179b64bbc39STejun Heo * 180b64bbc39STejun Heo * Format string according to @fmt and append it to @ehi->desc. 181b64bbc39STejun Heo * 182b64bbc39STejun Heo * LOCKING: 183b64bbc39STejun Heo * spin_lock_irqsave(host lock) 184b64bbc39STejun Heo */ 185b64bbc39STejun Heo void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) 186b64bbc39STejun Heo { 187b64bbc39STejun Heo va_list args; 188b64bbc39STejun Heo 189b64bbc39STejun Heo va_start(args, fmt); 190b64bbc39STejun Heo __ata_ehi_pushv_desc(ehi, fmt, args); 191b64bbc39STejun Heo va_end(args); 192b64bbc39STejun Heo } 193a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(__ata_ehi_push_desc); 194b64bbc39STejun Heo 195b64bbc39STejun Heo /** 196b64bbc39STejun Heo * ata_ehi_push_desc - push error description with separator 197b64bbc39STejun Heo * @ehi: target EHI 198b64bbc39STejun Heo * @fmt: printf format string 199b64bbc39STejun Heo * 200b64bbc39STejun Heo * Format string according to @fmt and append it to @ehi->desc. 201b64bbc39STejun Heo * If @ehi->desc is not empty, ", " is added in-between. 202b64bbc39STejun Heo * 203b64bbc39STejun Heo * LOCKING: 204b64bbc39STejun Heo * spin_lock_irqsave(host lock) 205b64bbc39STejun Heo */ 206b64bbc39STejun Heo void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) 207b64bbc39STejun Heo { 208b64bbc39STejun Heo va_list args; 209b64bbc39STejun Heo 210b64bbc39STejun Heo if (ehi->desc_len) 211b64bbc39STejun Heo __ata_ehi_push_desc(ehi, ", "); 212b64bbc39STejun Heo 213b64bbc39STejun Heo va_start(args, fmt); 214b64bbc39STejun Heo __ata_ehi_pushv_desc(ehi, fmt, args); 215b64bbc39STejun Heo va_end(args); 216b64bbc39STejun Heo } 217a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_ehi_push_desc); 218b64bbc39STejun Heo 219b64bbc39STejun Heo /** 220b64bbc39STejun Heo * ata_ehi_clear_desc - clean error description 221b64bbc39STejun Heo * @ehi: target EHI 222b64bbc39STejun Heo * 223b64bbc39STejun Heo * Clear @ehi->desc. 224b64bbc39STejun Heo * 225b64bbc39STejun Heo * LOCKING: 226b64bbc39STejun Heo * spin_lock_irqsave(host lock) 227b64bbc39STejun Heo */ 228b64bbc39STejun Heo void ata_ehi_clear_desc(struct ata_eh_info *ehi) 229b64bbc39STejun Heo { 230b64bbc39STejun Heo ehi->desc[0] = '\0'; 231b64bbc39STejun Heo ehi->desc_len = 0; 232b64bbc39STejun Heo } 233a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_ehi_clear_desc); 234b64bbc39STejun Heo 235cbcdd875STejun Heo /** 236cbcdd875STejun Heo * ata_port_desc - append port description 237cbcdd875STejun Heo * @ap: target ATA port 238cbcdd875STejun Heo * @fmt: printf format string 239cbcdd875STejun Heo * 240cbcdd875STejun Heo * Format string according to @fmt and append it to port 241cbcdd875STejun Heo * description. If port description is not empty, " " is added 242cbcdd875STejun Heo * in-between. This function is to be used while initializing 243cbcdd875STejun Heo * ata_host. The description is printed on host registration. 244cbcdd875STejun Heo * 245cbcdd875STejun Heo * LOCKING: 246cbcdd875STejun Heo * None. 247cbcdd875STejun Heo */ 248cbcdd875STejun Heo void ata_port_desc(struct ata_port *ap, const char *fmt, ...) 249cbcdd875STejun Heo { 250cbcdd875STejun Heo va_list args; 251cbcdd875STejun Heo 252cbcdd875STejun Heo WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING)); 253cbcdd875STejun Heo 254cbcdd875STejun Heo if (ap->link.eh_info.desc_len) 255cbcdd875STejun Heo __ata_ehi_push_desc(&ap->link.eh_info, " "); 256cbcdd875STejun Heo 257cbcdd875STejun Heo va_start(args, fmt); 258cbcdd875STejun Heo __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args); 259cbcdd875STejun Heo va_end(args); 260cbcdd875STejun Heo } 261a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_port_desc); 262cbcdd875STejun Heo 263cbcdd875STejun Heo #ifdef CONFIG_PCI 264cbcdd875STejun Heo /** 265cbcdd875STejun Heo * ata_port_pbar_desc - append PCI BAR description 266cbcdd875STejun Heo * @ap: target ATA port 267cbcdd875STejun Heo * @bar: target PCI BAR 268cbcdd875STejun Heo * @offset: offset into PCI BAR 269cbcdd875STejun Heo * @name: name of the area 270cbcdd875STejun Heo * 271cbcdd875STejun Heo * If @offset is negative, this function formats a string which 272cbcdd875STejun Heo * contains the name, address, size and type of the BAR and 273cbcdd875STejun Heo * appends it to the port description. If @offset is zero or 274cbcdd875STejun Heo * positive, only name and offsetted address is appended. 275cbcdd875STejun Heo * 276cbcdd875STejun Heo * LOCKING: 277cbcdd875STejun Heo * None. 278cbcdd875STejun Heo */ 279cbcdd875STejun Heo void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset, 280cbcdd875STejun Heo const char *name) 281cbcdd875STejun Heo { 282cbcdd875STejun Heo struct pci_dev *pdev = to_pci_dev(ap->host->dev); 283cbcdd875STejun Heo char *type = ""; 284cbcdd875STejun Heo unsigned long long start, len; 285cbcdd875STejun Heo 286cbcdd875STejun Heo if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) 287cbcdd875STejun Heo type = "m"; 288cbcdd875STejun Heo else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) 289cbcdd875STejun Heo type = "i"; 290cbcdd875STejun Heo 291cbcdd875STejun Heo start = (unsigned long long)pci_resource_start(pdev, bar); 292cbcdd875STejun Heo len = (unsigned long long)pci_resource_len(pdev, bar); 293cbcdd875STejun Heo 294cbcdd875STejun Heo if (offset < 0) 295cbcdd875STejun Heo ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start); 296cbcdd875STejun Heo else 297e6a73ab1SAndrew Morton ata_port_desc(ap, "%s 0x%llx", name, 298e6a73ab1SAndrew Morton start + (unsigned long long)offset); 299cbcdd875STejun Heo } 300a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_port_pbar_desc); 301cbcdd875STejun Heo #endif /* CONFIG_PCI */ 302cbcdd875STejun Heo 30387fbc5a0STejun Heo static int ata_lookup_timeout_table(u8 cmd) 30487fbc5a0STejun Heo { 30587fbc5a0STejun Heo int i; 30687fbc5a0STejun Heo 30787fbc5a0STejun Heo for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) { 30887fbc5a0STejun Heo const u8 *cur; 30987fbc5a0STejun Heo 31087fbc5a0STejun Heo for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++) 31187fbc5a0STejun Heo if (*cur == cmd) 31287fbc5a0STejun Heo return i; 31387fbc5a0STejun Heo } 31487fbc5a0STejun Heo 31587fbc5a0STejun Heo return -1; 31687fbc5a0STejun Heo } 31787fbc5a0STejun Heo 31887fbc5a0STejun Heo /** 31987fbc5a0STejun Heo * ata_internal_cmd_timeout - determine timeout for an internal command 32087fbc5a0STejun Heo * @dev: target device 32187fbc5a0STejun Heo * @cmd: internal command to be issued 32287fbc5a0STejun Heo * 32387fbc5a0STejun Heo * Determine timeout for internal command @cmd for @dev. 32487fbc5a0STejun Heo * 32587fbc5a0STejun Heo * LOCKING: 32687fbc5a0STejun Heo * EH context. 32787fbc5a0STejun Heo * 32887fbc5a0STejun Heo * RETURNS: 32987fbc5a0STejun Heo * Determined timeout. 33087fbc5a0STejun Heo */ 331e06233f9SSergey Shtylyov unsigned int ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd) 33287fbc5a0STejun Heo { 33387fbc5a0STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 33487fbc5a0STejun Heo int ent = ata_lookup_timeout_table(cmd); 33587fbc5a0STejun Heo int idx; 33687fbc5a0STejun Heo 33787fbc5a0STejun Heo if (ent < 0) 33887fbc5a0STejun Heo return ATA_EH_CMD_DFL_TIMEOUT; 33987fbc5a0STejun Heo 34087fbc5a0STejun Heo idx = ehc->cmd_timeout_idx[dev->devno][ent]; 34187fbc5a0STejun Heo return ata_eh_cmd_timeout_table[ent].timeouts[idx]; 34287fbc5a0STejun Heo } 34387fbc5a0STejun Heo 34487fbc5a0STejun Heo /** 34587fbc5a0STejun Heo * ata_internal_cmd_timed_out - notification for internal command timeout 34687fbc5a0STejun Heo * @dev: target device 34787fbc5a0STejun Heo * @cmd: internal command which timed out 34887fbc5a0STejun Heo * 34987fbc5a0STejun Heo * Notify EH that internal command @cmd for @dev timed out. This 35087fbc5a0STejun Heo * function should be called only for commands whose timeouts are 35187fbc5a0STejun Heo * determined using ata_internal_cmd_timeout(). 35287fbc5a0STejun Heo * 35387fbc5a0STejun Heo * LOCKING: 35487fbc5a0STejun Heo * EH context. 35587fbc5a0STejun Heo */ 35687fbc5a0STejun Heo void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd) 35787fbc5a0STejun Heo { 35887fbc5a0STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 35987fbc5a0STejun Heo int ent = ata_lookup_timeout_table(cmd); 36087fbc5a0STejun Heo int idx; 36187fbc5a0STejun Heo 36287fbc5a0STejun Heo if (ent < 0) 36387fbc5a0STejun Heo return; 36487fbc5a0STejun Heo 36587fbc5a0STejun Heo idx = ehc->cmd_timeout_idx[dev->devno][ent]; 366e06233f9SSergey Shtylyov if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != UINT_MAX) 36787fbc5a0STejun Heo ehc->cmd_timeout_idx[dev->devno][ent]++; 36887fbc5a0STejun Heo } 36987fbc5a0STejun Heo 3703884f7b0STejun Heo static void ata_ering_record(struct ata_ering *ering, unsigned int eflags, 371c6fd2807SJeff Garzik unsigned int err_mask) 372c6fd2807SJeff Garzik { 373c6fd2807SJeff Garzik struct ata_ering_entry *ent; 374c6fd2807SJeff Garzik 375c6fd2807SJeff Garzik WARN_ON(!err_mask); 376c6fd2807SJeff Garzik 377c6fd2807SJeff Garzik ering->cursor++; 378c6fd2807SJeff Garzik ering->cursor %= ATA_ERING_SIZE; 379c6fd2807SJeff Garzik 380c6fd2807SJeff Garzik ent = &ering->ring[ering->cursor]; 3813884f7b0STejun Heo ent->eflags = eflags; 382c6fd2807SJeff Garzik ent->err_mask = err_mask; 383c6fd2807SJeff Garzik ent->timestamp = get_jiffies_64(); 384c6fd2807SJeff Garzik } 385c6fd2807SJeff Garzik 38676326ac1STejun Heo static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering) 38776326ac1STejun Heo { 38876326ac1STejun Heo struct ata_ering_entry *ent = &ering->ring[ering->cursor]; 38976326ac1STejun Heo 39076326ac1STejun Heo if (ent->err_mask) 39176326ac1STejun Heo return ent; 39276326ac1STejun Heo return NULL; 39376326ac1STejun Heo } 39476326ac1STejun Heo 395d9027470SGwendal Grignou int ata_ering_map(struct ata_ering *ering, 396c6fd2807SJeff Garzik int (*map_fn)(struct ata_ering_entry *, void *), 397c6fd2807SJeff Garzik void *arg) 398c6fd2807SJeff Garzik { 399c6fd2807SJeff Garzik int idx, rc = 0; 400c6fd2807SJeff Garzik struct ata_ering_entry *ent; 401c6fd2807SJeff Garzik 402c6fd2807SJeff Garzik idx = ering->cursor; 403c6fd2807SJeff Garzik do { 404c6fd2807SJeff Garzik ent = &ering->ring[idx]; 405c6fd2807SJeff Garzik if (!ent->err_mask) 406c6fd2807SJeff Garzik break; 407c6fd2807SJeff Garzik rc = map_fn(ent, arg); 408c6fd2807SJeff Garzik if (rc) 409c6fd2807SJeff Garzik break; 410c6fd2807SJeff Garzik idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE; 411c6fd2807SJeff Garzik } while (idx != ering->cursor); 412c6fd2807SJeff Garzik 413c6fd2807SJeff Garzik return rc; 414c6fd2807SJeff Garzik } 415c6fd2807SJeff Garzik 41660428407SH Hartley Sweeten static int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg) 417d9027470SGwendal Grignou { 418d9027470SGwendal Grignou ent->eflags |= ATA_EFLAG_OLD_ER; 419d9027470SGwendal Grignou return 0; 420d9027470SGwendal Grignou } 421d9027470SGwendal Grignou 422d9027470SGwendal Grignou static void ata_ering_clear(struct ata_ering *ering) 423d9027470SGwendal Grignou { 424d9027470SGwendal Grignou ata_ering_map(ering, ata_ering_clear_cb, NULL); 425d9027470SGwendal Grignou } 426d9027470SGwendal Grignou 427c6fd2807SJeff Garzik static unsigned int ata_eh_dev_action(struct ata_device *dev) 428c6fd2807SJeff Garzik { 4299af5c9c9STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 430c6fd2807SJeff Garzik 431c6fd2807SJeff Garzik return ehc->i.action | ehc->i.dev_action[dev->devno]; 432c6fd2807SJeff Garzik } 433c6fd2807SJeff Garzik 434f58229f8STejun Heo static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev, 435c6fd2807SJeff Garzik struct ata_eh_info *ehi, unsigned int action) 436c6fd2807SJeff Garzik { 437f58229f8STejun Heo struct ata_device *tdev; 438c6fd2807SJeff Garzik 439c6fd2807SJeff Garzik if (!dev) { 440c6fd2807SJeff Garzik ehi->action &= ~action; 4411eca4365STejun Heo ata_for_each_dev(tdev, link, ALL) 442f58229f8STejun Heo ehi->dev_action[tdev->devno] &= ~action; 443c6fd2807SJeff Garzik } else { 444c6fd2807SJeff Garzik /* doesn't make sense for port-wide EH actions */ 445c6fd2807SJeff Garzik WARN_ON(!(action & ATA_EH_PERDEV_MASK)); 446c6fd2807SJeff Garzik 447c6fd2807SJeff Garzik /* break ehi->action into ehi->dev_action */ 448c6fd2807SJeff Garzik if (ehi->action & action) { 4491eca4365STejun Heo ata_for_each_dev(tdev, link, ALL) 450f58229f8STejun Heo ehi->dev_action[tdev->devno] |= 451f58229f8STejun Heo ehi->action & action; 452c6fd2807SJeff Garzik ehi->action &= ~action; 453c6fd2807SJeff Garzik } 454c6fd2807SJeff Garzik 455c6fd2807SJeff Garzik /* turn off the specified per-dev action */ 456c6fd2807SJeff Garzik ehi->dev_action[dev->devno] &= ~action; 457c6fd2807SJeff Garzik } 458c6fd2807SJeff Garzik } 459c6fd2807SJeff Garzik 460c6fd2807SJeff Garzik /** 461c0c362b6STejun Heo * ata_eh_acquire - acquire EH ownership 462c0c362b6STejun Heo * @ap: ATA port to acquire EH ownership for 463c0c362b6STejun Heo * 464c0c362b6STejun Heo * Acquire EH ownership for @ap. This is the basic exclusion 465c0c362b6STejun Heo * mechanism for ports sharing a host. Only one port hanging off 466c0c362b6STejun Heo * the same host can claim the ownership of EH. 467c0c362b6STejun Heo * 468c0c362b6STejun Heo * LOCKING: 469c0c362b6STejun Heo * EH context. 470c0c362b6STejun Heo */ 471c0c362b6STejun Heo void ata_eh_acquire(struct ata_port *ap) 472c0c362b6STejun Heo { 473c0c362b6STejun Heo mutex_lock(&ap->host->eh_mutex); 474c0c362b6STejun Heo WARN_ON_ONCE(ap->host->eh_owner); 475c0c362b6STejun Heo ap->host->eh_owner = current; 476c0c362b6STejun Heo } 477c0c362b6STejun Heo 478c0c362b6STejun Heo /** 479c0c362b6STejun Heo * ata_eh_release - release EH ownership 480c0c362b6STejun Heo * @ap: ATA port to release EH ownership for 481c0c362b6STejun Heo * 482c0c362b6STejun Heo * Release EH ownership for @ap if the caller. The caller must 483c0c362b6STejun Heo * have acquired EH ownership using ata_eh_acquire() previously. 484c0c362b6STejun Heo * 485c0c362b6STejun Heo * LOCKING: 486c0c362b6STejun Heo * EH context. 487c0c362b6STejun Heo */ 488c0c362b6STejun Heo void ata_eh_release(struct ata_port *ap) 489c0c362b6STejun Heo { 490c0c362b6STejun Heo WARN_ON_ONCE(ap->host->eh_owner != current); 491c0c362b6STejun Heo ap->host->eh_owner = NULL; 492c0c362b6STejun Heo mutex_unlock(&ap->host->eh_mutex); 493c0c362b6STejun Heo } 494c0c362b6STejun Heo 495ece180d1STejun Heo static void ata_eh_unload(struct ata_port *ap) 496ece180d1STejun Heo { 497ece180d1STejun Heo struct ata_link *link; 498ece180d1STejun Heo struct ata_device *dev; 499ece180d1STejun Heo unsigned long flags; 500ece180d1STejun Heo 501ece180d1STejun Heo /* Restore SControl IPM and SPD for the next driver and 502ece180d1STejun Heo * disable attached devices. 503ece180d1STejun Heo */ 504ece180d1STejun Heo ata_for_each_link(link, ap, PMP_FIRST) { 505ece180d1STejun Heo sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0); 506ece180d1STejun Heo ata_for_each_dev(dev, link, ALL) 507ece180d1STejun Heo ata_dev_disable(dev); 508ece180d1STejun Heo } 509ece180d1STejun Heo 510ece180d1STejun Heo /* freeze and set UNLOADED */ 511ece180d1STejun Heo spin_lock_irqsave(ap->lock, flags); 512ece180d1STejun Heo 513ece180d1STejun Heo ata_port_freeze(ap); /* won't be thawed */ 514ece180d1STejun Heo ap->pflags &= ~ATA_PFLAG_EH_PENDING; /* clear pending from freeze */ 515ece180d1STejun Heo ap->pflags |= ATA_PFLAG_UNLOADED; 516ece180d1STejun Heo 517ece180d1STejun Heo spin_unlock_irqrestore(ap->lock, flags); 518ece180d1STejun Heo } 519ece180d1STejun Heo 520c6fd2807SJeff Garzik /** 521c6fd2807SJeff Garzik * ata_scsi_error - SCSI layer error handler callback 522c6fd2807SJeff Garzik * @host: SCSI host on which error occurred 523c6fd2807SJeff Garzik * 524c6fd2807SJeff Garzik * Handles SCSI-layer-thrown error events. 525c6fd2807SJeff Garzik * 526c6fd2807SJeff Garzik * LOCKING: 527c6fd2807SJeff Garzik * Inherited from SCSI layer (none, can sleep) 528c6fd2807SJeff Garzik * 529c6fd2807SJeff Garzik * RETURNS: 530c6fd2807SJeff Garzik * Zero. 531c6fd2807SJeff Garzik */ 532c6fd2807SJeff Garzik void ata_scsi_error(struct Scsi_Host *host) 533c6fd2807SJeff Garzik { 534c6fd2807SJeff Garzik struct ata_port *ap = ata_shost_to_port(host); 535c6fd2807SJeff Garzik unsigned long flags; 536c34aeebcSJames Bottomley LIST_HEAD(eh_work_q); 537c6fd2807SJeff Garzik 538c34aeebcSJames Bottomley spin_lock_irqsave(host->host_lock, flags); 539c34aeebcSJames Bottomley list_splice_init(&host->eh_cmd_q, &eh_work_q); 540c34aeebcSJames Bottomley spin_unlock_irqrestore(host->host_lock, flags); 541c34aeebcSJames Bottomley 5420e0b494cSJames Bottomley ata_scsi_cmd_error_handler(host, ap, &eh_work_q); 5430e0b494cSJames Bottomley 5440e0b494cSJames Bottomley /* If we timed raced normal completion and there is nothing to 5450e0b494cSJames Bottomley recover nr_timedout == 0 why exactly are we doing error recovery ? */ 5460e0b494cSJames Bottomley ata_scsi_port_error_handler(host, ap); 5470e0b494cSJames Bottomley 5480e0b494cSJames Bottomley /* finish or retry handled scmd's and clean up */ 54972d8c36eSWei Fang WARN_ON(!list_empty(&eh_work_q)); 5500e0b494cSJames Bottomley 5510e0b494cSJames Bottomley } 5520e0b494cSJames Bottomley 5530e0b494cSJames Bottomley /** 5540e0b494cSJames Bottomley * ata_scsi_cmd_error_handler - error callback for a list of commands 5550e0b494cSJames Bottomley * @host: scsi host containing the port 5560e0b494cSJames Bottomley * @ap: ATA port within the host 5570e0b494cSJames Bottomley * @eh_work_q: list of commands to process 5580e0b494cSJames Bottomley * 5590e0b494cSJames Bottomley * process the given list of commands and return those finished to the 5600e0b494cSJames Bottomley * ap->eh_done_q. This function is the first part of the libata error 5610e0b494cSJames Bottomley * handler which processes a given list of failed commands. 5620e0b494cSJames Bottomley */ 5630e0b494cSJames Bottomley void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, 5640e0b494cSJames Bottomley struct list_head *eh_work_q) 5650e0b494cSJames Bottomley { 5660e0b494cSJames Bottomley int i; 5670e0b494cSJames Bottomley unsigned long flags; 5680e0b494cSJames Bottomley 569c429137aSTejun Heo /* make sure sff pio task is not running */ 570c429137aSTejun Heo ata_sff_flush_pio_task(ap); 571c6fd2807SJeff Garzik 572cca3974eSJeff Garzik /* synchronize with host lock and sort out timeouts */ 573c6fd2807SJeff Garzik 574c6fd2807SJeff Garzik /* For new EH, all qcs are finished in one of three ways - 575c6fd2807SJeff Garzik * normal completion, error completion, and SCSI timeout. 576c96f1732SAlan Cox * Both completions can race against SCSI timeout. When normal 577c6fd2807SJeff Garzik * completion wins, the qc never reaches EH. When error 578c6fd2807SJeff Garzik * completion wins, the qc has ATA_QCFLAG_FAILED set. 579c6fd2807SJeff Garzik * 580c6fd2807SJeff Garzik * When SCSI timeout wins, things are a bit more complex. 581c6fd2807SJeff Garzik * Normal or error completion can occur after the timeout but 582c6fd2807SJeff Garzik * before this point. In such cases, both types of 583c6fd2807SJeff Garzik * completions are honored. A scmd is determined to have 584c6fd2807SJeff Garzik * timed out iff its associated qc is active and not failed. 585c6fd2807SJeff Garzik */ 586a4f08141SPaul E. McKenney spin_lock_irqsave(ap->lock, flags); 587c6fd2807SJeff Garzik if (ap->ops->error_handler) { 588c6fd2807SJeff Garzik struct scsi_cmnd *scmd, *tmp; 589c6fd2807SJeff Garzik int nr_timedout = 0; 590c6fd2807SJeff Garzik 591c96f1732SAlan Cox /* This must occur under the ap->lock as we don't want 592c96f1732SAlan Cox a polled recovery to race the real interrupt handler 593c96f1732SAlan Cox 594c96f1732SAlan Cox The lost_interrupt handler checks for any completed but 595c96f1732SAlan Cox non-notified command and completes much like an IRQ handler. 596c96f1732SAlan Cox 597c96f1732SAlan Cox We then fall into the error recovery code which will treat 598c96f1732SAlan Cox this as if normal completion won the race */ 599c96f1732SAlan Cox 600c96f1732SAlan Cox if (ap->ops->lost_interrupt) 601c96f1732SAlan Cox ap->ops->lost_interrupt(ap); 602c96f1732SAlan Cox 6030e0b494cSJames Bottomley list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) { 604c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 605c6fd2807SJeff Garzik 606258c4e5cSJens Axboe ata_qc_for_each_raw(ap, qc, i) { 607c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_ACTIVE && 608c6fd2807SJeff Garzik qc->scsicmd == scmd) 609c6fd2807SJeff Garzik break; 610c6fd2807SJeff Garzik } 611c6fd2807SJeff Garzik 612c6fd2807SJeff Garzik if (i < ATA_MAX_QUEUE) { 613c6fd2807SJeff Garzik /* the scmd has an associated qc */ 614c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) { 615c6fd2807SJeff Garzik /* which hasn't failed yet, timeout */ 616c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_TIMEOUT; 617c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 618c6fd2807SJeff Garzik nr_timedout++; 619c6fd2807SJeff Garzik } 620c6fd2807SJeff Garzik } else { 621c6fd2807SJeff Garzik /* Normal completion occurred after 622c6fd2807SJeff Garzik * SCSI timeout but before this point. 623c6fd2807SJeff Garzik * Successfully complete it. 624c6fd2807SJeff Garzik */ 625c6fd2807SJeff Garzik scmd->retries = scmd->allowed; 626c6fd2807SJeff Garzik scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 627c6fd2807SJeff Garzik } 628c6fd2807SJeff Garzik } 629c6fd2807SJeff Garzik 630c6fd2807SJeff Garzik /* If we have timed out qcs. They belong to EH from 631c6fd2807SJeff Garzik * this point but the state of the controller is 632c6fd2807SJeff Garzik * unknown. Freeze the port to make sure the IRQ 633c6fd2807SJeff Garzik * handler doesn't diddle with those qcs. This must 634c6fd2807SJeff Garzik * be done atomically w.r.t. setting QCFLAG_FAILED. 635c6fd2807SJeff Garzik */ 636c6fd2807SJeff Garzik if (nr_timedout) 637c6fd2807SJeff Garzik __ata_port_freeze(ap); 638c6fd2807SJeff Garzik 639a1e10f7eSTejun Heo 640a1e10f7eSTejun Heo /* initialize eh_tries */ 641a1e10f7eSTejun Heo ap->eh_tries = ATA_EH_MAX_TRIES; 642a4f08141SPaul E. McKenney } 643a4f08141SPaul E. McKenney spin_unlock_irqrestore(ap->lock, flags); 644c6fd2807SJeff Garzik 6450e0b494cSJames Bottomley } 6460e0b494cSJames Bottomley EXPORT_SYMBOL(ata_scsi_cmd_error_handler); 6470e0b494cSJames Bottomley 6480e0b494cSJames Bottomley /** 6490e0b494cSJames Bottomley * ata_scsi_port_error_handler - recover the port after the commands 6500e0b494cSJames Bottomley * @host: SCSI host containing the port 6510e0b494cSJames Bottomley * @ap: the ATA port 6520e0b494cSJames Bottomley * 6530e0b494cSJames Bottomley * Handle the recovery of the port @ap after all the commands 6540e0b494cSJames Bottomley * have been recovered. 6550e0b494cSJames Bottomley */ 6560e0b494cSJames Bottomley void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap) 6570e0b494cSJames Bottomley { 6580e0b494cSJames Bottomley unsigned long flags; 659c96f1732SAlan Cox 660c6fd2807SJeff Garzik /* invoke error handler */ 661c6fd2807SJeff Garzik if (ap->ops->error_handler) { 662cf1b86c8STejun Heo struct ata_link *link; 663cf1b86c8STejun Heo 664c0c362b6STejun Heo /* acquire EH ownership */ 665c0c362b6STejun Heo ata_eh_acquire(ap); 666c0c362b6STejun Heo repeat: 6675ddf24c5STejun Heo /* kill fast drain timer */ 6685ddf24c5STejun Heo del_timer_sync(&ap->fastdrain_timer); 6695ddf24c5STejun Heo 670c6fd2807SJeff Garzik /* process port resume request */ 671c6fd2807SJeff Garzik ata_eh_handle_port_resume(ap); 672c6fd2807SJeff Garzik 673c6fd2807SJeff Garzik /* fetch & clear EH info */ 674c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 675c6fd2807SJeff Garzik 6761eca4365STejun Heo ata_for_each_link(link, ap, HOST_FIRST) { 67700115e0fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 67800115e0fSTejun Heo struct ata_device *dev; 67900115e0fSTejun Heo 680cf1b86c8STejun Heo memset(&link->eh_context, 0, sizeof(link->eh_context)); 681cf1b86c8STejun Heo link->eh_context.i = link->eh_info; 682cf1b86c8STejun Heo memset(&link->eh_info, 0, sizeof(link->eh_info)); 68300115e0fSTejun Heo 6841eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) { 68500115e0fSTejun Heo int devno = dev->devno; 68600115e0fSTejun Heo 68700115e0fSTejun Heo ehc->saved_xfer_mode[devno] = dev->xfer_mode; 68800115e0fSTejun Heo if (ata_ncq_enabled(dev)) 68900115e0fSTejun Heo ehc->saved_ncq_enabled |= 1 << devno; 69000115e0fSTejun Heo } 691cf1b86c8STejun Heo } 692c6fd2807SJeff Garzik 693c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS; 694c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_EH_PENDING; 695da917d69STejun Heo ap->excl_link = NULL; /* don't maintain exclusion over EH */ 696c6fd2807SJeff Garzik 697c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 698c6fd2807SJeff Garzik 699c6fd2807SJeff Garzik /* invoke EH, skip if unloading or suspended */ 700c6fd2807SJeff Garzik if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED))) 701c6fd2807SJeff Garzik ap->ops->error_handler(ap); 702ece180d1STejun Heo else { 703ece180d1STejun Heo /* if unloading, commence suicide */ 704ece180d1STejun Heo if ((ap->pflags & ATA_PFLAG_UNLOADING) && 705ece180d1STejun Heo !(ap->pflags & ATA_PFLAG_UNLOADED)) 706ece180d1STejun Heo ata_eh_unload(ap); 707c6fd2807SJeff Garzik ata_eh_finish(ap); 708ece180d1STejun Heo } 709c6fd2807SJeff Garzik 710c6fd2807SJeff Garzik /* process port suspend request */ 711c6fd2807SJeff Garzik ata_eh_handle_port_suspend(ap); 712c6fd2807SJeff Garzik 71325985edcSLucas De Marchi /* Exception might have happened after ->error_handler 714c6fd2807SJeff Garzik * recovered the port but before this point. Repeat 715c6fd2807SJeff Garzik * EH in such case. 716c6fd2807SJeff Garzik */ 717c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 718c6fd2807SJeff Garzik 719c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_EH_PENDING) { 720a1e10f7eSTejun Heo if (--ap->eh_tries) { 721c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 722c6fd2807SJeff Garzik goto repeat; 723c6fd2807SJeff Garzik } 724a9a79dfeSJoe Perches ata_port_err(ap, 725a9a79dfeSJoe Perches "EH pending after %d tries, giving up\n", 726a9a79dfeSJoe Perches ATA_EH_MAX_TRIES); 727914616a3STejun Heo ap->pflags &= ~ATA_PFLAG_EH_PENDING; 728c6fd2807SJeff Garzik } 729c6fd2807SJeff Garzik 730c6fd2807SJeff Garzik /* this run is complete, make sure EH info is clear */ 7311eca4365STejun Heo ata_for_each_link(link, ap, HOST_FIRST) 732cf1b86c8STejun Heo memset(&link->eh_info, 0, sizeof(link->eh_info)); 733c6fd2807SJeff Garzik 734e4a9c373SDan Williams /* end eh (clear host_eh_scheduled) while holding 735e4a9c373SDan Williams * ap->lock such that if exception occurs after this 736e4a9c373SDan Williams * point but before EH completion, SCSI midlayer will 737c6fd2807SJeff Garzik * re-initiate EH. 738c6fd2807SJeff Garzik */ 739e4a9c373SDan Williams ap->ops->end_eh(ap); 740c6fd2807SJeff Garzik 741c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 742c0c362b6STejun Heo ata_eh_release(ap); 743c6fd2807SJeff Garzik } else { 7449af5c9c9STejun Heo WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL); 745c6fd2807SJeff Garzik ap->ops->eng_timeout(ap); 746c6fd2807SJeff Garzik } 747c6fd2807SJeff Garzik 748c6fd2807SJeff Garzik scsi_eh_flush_done_q(&ap->eh_done_q); 749c6fd2807SJeff Garzik 750c6fd2807SJeff Garzik /* clean up */ 751c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 752c6fd2807SJeff Garzik 753c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_LOADING) 754c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_LOADING; 7556f54120eSJason Yan else if ((ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) && 7566f54120eSJason Yan !(ap->flags & ATA_FLAG_SAS_HOST)) 757ad72cf98STejun Heo schedule_delayed_work(&ap->hotplug_task, 0); 758c6fd2807SJeff Garzik 759c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_RECOVERED) 760a9a79dfeSJoe Perches ata_port_info(ap, "EH complete\n"); 761c6fd2807SJeff Garzik 762c6fd2807SJeff Garzik ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED); 763c6fd2807SJeff Garzik 764c6fd2807SJeff Garzik /* tell wait_eh that we're done */ 765c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS; 766c6fd2807SJeff Garzik wake_up_all(&ap->eh_wait_q); 767c6fd2807SJeff Garzik 768c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 769c6fd2807SJeff Garzik } 7700e0b494cSJames Bottomley EXPORT_SYMBOL_GPL(ata_scsi_port_error_handler); 771c6fd2807SJeff Garzik 772c6fd2807SJeff Garzik /** 773c6fd2807SJeff Garzik * ata_port_wait_eh - Wait for the currently pending EH to complete 774c6fd2807SJeff Garzik * @ap: Port to wait EH for 775c6fd2807SJeff Garzik * 776c6fd2807SJeff Garzik * Wait until the currently pending EH is complete. 777c6fd2807SJeff Garzik * 778c6fd2807SJeff Garzik * LOCKING: 779c6fd2807SJeff Garzik * Kernel thread context (may sleep). 780c6fd2807SJeff Garzik */ 781c6fd2807SJeff Garzik void ata_port_wait_eh(struct ata_port *ap) 782c6fd2807SJeff Garzik { 783c6fd2807SJeff Garzik unsigned long flags; 784c6fd2807SJeff Garzik DEFINE_WAIT(wait); 785c6fd2807SJeff Garzik 786c6fd2807SJeff Garzik retry: 787c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 788c6fd2807SJeff Garzik 789c6fd2807SJeff Garzik while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) { 790c6fd2807SJeff Garzik prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE); 791c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 792c6fd2807SJeff Garzik schedule(); 793c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 794c6fd2807SJeff Garzik } 795c6fd2807SJeff Garzik finish_wait(&ap->eh_wait_q, &wait); 796c6fd2807SJeff Garzik 797c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 798c6fd2807SJeff Garzik 799c6fd2807SJeff Garzik /* make sure SCSI EH is complete */ 800cca3974eSJeff Garzik if (scsi_host_in_recovery(ap->scsi_host)) { 80197750cebSTejun Heo ata_msleep(ap, 10); 802c6fd2807SJeff Garzik goto retry; 803c6fd2807SJeff Garzik } 804c6fd2807SJeff Garzik } 80581c757bcSDan Williams EXPORT_SYMBOL_GPL(ata_port_wait_eh); 806c6fd2807SJeff Garzik 807afae461aSSergey Shtylyov static unsigned int ata_eh_nr_in_flight(struct ata_port *ap) 8085ddf24c5STejun Heo { 809258c4e5cSJens Axboe struct ata_queued_cmd *qc; 8105ddf24c5STejun Heo unsigned int tag; 811afae461aSSergey Shtylyov unsigned int nr = 0; 8125ddf24c5STejun Heo 8135ddf24c5STejun Heo /* count only non-internal commands */ 814258c4e5cSJens Axboe ata_qc_for_each(ap, qc, tag) { 815258c4e5cSJens Axboe if (qc) 8165ddf24c5STejun Heo nr++; 8179d207accSJens Axboe } 8185ddf24c5STejun Heo 8195ddf24c5STejun Heo return nr; 8205ddf24c5STejun Heo } 8215ddf24c5STejun Heo 822b93ab338SKees Cook void ata_eh_fastdrain_timerfn(struct timer_list *t) 8235ddf24c5STejun Heo { 824b93ab338SKees Cook struct ata_port *ap = from_timer(ap, t, fastdrain_timer); 8255ddf24c5STejun Heo unsigned long flags; 826afae461aSSergey Shtylyov unsigned int cnt; 8275ddf24c5STejun Heo 8285ddf24c5STejun Heo spin_lock_irqsave(ap->lock, flags); 8295ddf24c5STejun Heo 8305ddf24c5STejun Heo cnt = ata_eh_nr_in_flight(ap); 8315ddf24c5STejun Heo 8325ddf24c5STejun Heo /* are we done? */ 8335ddf24c5STejun Heo if (!cnt) 8345ddf24c5STejun Heo goto out_unlock; 8355ddf24c5STejun Heo 8365ddf24c5STejun Heo if (cnt == ap->fastdrain_cnt) { 837258c4e5cSJens Axboe struct ata_queued_cmd *qc; 8385ddf24c5STejun Heo unsigned int tag; 8395ddf24c5STejun Heo 8405ddf24c5STejun Heo /* No progress during the last interval, tag all 8415ddf24c5STejun Heo * in-flight qcs as timed out and freeze the port. 8425ddf24c5STejun Heo */ 843258c4e5cSJens Axboe ata_qc_for_each(ap, qc, tag) { 8445ddf24c5STejun Heo if (qc) 8455ddf24c5STejun Heo qc->err_mask |= AC_ERR_TIMEOUT; 8465ddf24c5STejun Heo } 8475ddf24c5STejun Heo 8485ddf24c5STejun Heo ata_port_freeze(ap); 8495ddf24c5STejun Heo } else { 8505ddf24c5STejun Heo /* some qcs have finished, give it another chance */ 8515ddf24c5STejun Heo ap->fastdrain_cnt = cnt; 8525ddf24c5STejun Heo ap->fastdrain_timer.expires = 853341c2c95STejun Heo ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); 8545ddf24c5STejun Heo add_timer(&ap->fastdrain_timer); 8555ddf24c5STejun Heo } 8565ddf24c5STejun Heo 8575ddf24c5STejun Heo out_unlock: 8585ddf24c5STejun Heo spin_unlock_irqrestore(ap->lock, flags); 8595ddf24c5STejun Heo } 8605ddf24c5STejun Heo 8615ddf24c5STejun Heo /** 8625ddf24c5STejun Heo * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain 8635ddf24c5STejun Heo * @ap: target ATA port 8645ddf24c5STejun Heo * @fastdrain: activate fast drain 8655ddf24c5STejun Heo * 8665ddf24c5STejun Heo * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain 8675ddf24c5STejun Heo * is non-zero and EH wasn't pending before. Fast drain ensures 8685ddf24c5STejun Heo * that EH kicks in in timely manner. 8695ddf24c5STejun Heo * 8705ddf24c5STejun Heo * LOCKING: 8715ddf24c5STejun Heo * spin_lock_irqsave(host lock) 8725ddf24c5STejun Heo */ 8735ddf24c5STejun Heo static void ata_eh_set_pending(struct ata_port *ap, int fastdrain) 8745ddf24c5STejun Heo { 875afae461aSSergey Shtylyov unsigned int cnt; 8765ddf24c5STejun Heo 8775ddf24c5STejun Heo /* already scheduled? */ 8785ddf24c5STejun Heo if (ap->pflags & ATA_PFLAG_EH_PENDING) 8795ddf24c5STejun Heo return; 8805ddf24c5STejun Heo 8815ddf24c5STejun Heo ap->pflags |= ATA_PFLAG_EH_PENDING; 8825ddf24c5STejun Heo 8835ddf24c5STejun Heo if (!fastdrain) 8845ddf24c5STejun Heo return; 8855ddf24c5STejun Heo 8865ddf24c5STejun Heo /* do we have in-flight qcs? */ 8875ddf24c5STejun Heo cnt = ata_eh_nr_in_flight(ap); 8885ddf24c5STejun Heo if (!cnt) 8895ddf24c5STejun Heo return; 8905ddf24c5STejun Heo 8915ddf24c5STejun Heo /* activate fast drain */ 8925ddf24c5STejun Heo ap->fastdrain_cnt = cnt; 893341c2c95STejun Heo ap->fastdrain_timer.expires = 894341c2c95STejun Heo ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); 8955ddf24c5STejun Heo add_timer(&ap->fastdrain_timer); 8965ddf24c5STejun Heo } 8975ddf24c5STejun Heo 898c6fd2807SJeff Garzik /** 899c6fd2807SJeff Garzik * ata_qc_schedule_eh - schedule qc for error handling 900c6fd2807SJeff Garzik * @qc: command to schedule error handling for 901c6fd2807SJeff Garzik * 902c6fd2807SJeff Garzik * Schedule error handling for @qc. EH will kick in as soon as 903c6fd2807SJeff Garzik * other commands are drained. 904c6fd2807SJeff Garzik * 905c6fd2807SJeff Garzik * LOCKING: 906cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 907c6fd2807SJeff Garzik */ 908c6fd2807SJeff Garzik void ata_qc_schedule_eh(struct ata_queued_cmd *qc) 909c6fd2807SJeff Garzik { 910c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 911c6fd2807SJeff Garzik 912c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 913c6fd2807SJeff Garzik 914c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 9155ddf24c5STejun Heo ata_eh_set_pending(ap, 1); 916c6fd2807SJeff Garzik 917c6fd2807SJeff Garzik /* The following will fail if timeout has already expired. 918c6fd2807SJeff Garzik * ata_scsi_error() takes care of such scmds on EH entry. 919c6fd2807SJeff Garzik * Note that ATA_QCFLAG_FAILED is unconditionally set after 920c6fd2807SJeff Garzik * this function completes. 921c6fd2807SJeff Garzik */ 922c8329cd5SBart Van Assche blk_abort_request(scsi_cmd_to_rq(qc->scsicmd)); 923c6fd2807SJeff Garzik } 924c6fd2807SJeff Garzik 925c6fd2807SJeff Garzik /** 926e4a9c373SDan Williams * ata_std_sched_eh - non-libsas ata_ports issue eh with this common routine 927e4a9c373SDan Williams * @ap: ATA port to schedule EH for 928e4a9c373SDan Williams * 929e4a9c373SDan Williams * LOCKING: inherited from ata_port_schedule_eh 930e4a9c373SDan Williams * spin_lock_irqsave(host lock) 931e4a9c373SDan Williams */ 932e4a9c373SDan Williams void ata_std_sched_eh(struct ata_port *ap) 933e4a9c373SDan Williams { 934e4a9c373SDan Williams WARN_ON(!ap->ops->error_handler); 935e4a9c373SDan Williams 936e4a9c373SDan Williams if (ap->pflags & ATA_PFLAG_INITIALIZING) 937e4a9c373SDan Williams return; 938e4a9c373SDan Williams 939e4a9c373SDan Williams ata_eh_set_pending(ap, 1); 940e4a9c373SDan Williams scsi_schedule_eh(ap->scsi_host); 941e4a9c373SDan Williams 942c318458cSHannes Reinecke trace_ata_std_sched_eh(ap); 943e4a9c373SDan Williams } 944e4a9c373SDan Williams EXPORT_SYMBOL_GPL(ata_std_sched_eh); 945e4a9c373SDan Williams 946e4a9c373SDan Williams /** 947e4a9c373SDan Williams * ata_std_end_eh - non-libsas ata_ports complete eh with this common routine 948e4a9c373SDan Williams * @ap: ATA port to end EH for 949e4a9c373SDan Williams * 950e4a9c373SDan Williams * In the libata object model there is a 1:1 mapping of ata_port to 951e4a9c373SDan Williams * shost, so host fields can be directly manipulated under ap->lock, in 952e4a9c373SDan Williams * the libsas case we need to hold a lock at the ha->level to coordinate 953e4a9c373SDan Williams * these events. 954e4a9c373SDan Williams * 955e4a9c373SDan Williams * LOCKING: 956e4a9c373SDan Williams * spin_lock_irqsave(host lock) 957e4a9c373SDan Williams */ 958e4a9c373SDan Williams void ata_std_end_eh(struct ata_port *ap) 959e4a9c373SDan Williams { 960e4a9c373SDan Williams struct Scsi_Host *host = ap->scsi_host; 961e4a9c373SDan Williams 962e4a9c373SDan Williams host->host_eh_scheduled = 0; 963e4a9c373SDan Williams } 964e4a9c373SDan Williams EXPORT_SYMBOL(ata_std_end_eh); 965e4a9c373SDan Williams 966e4a9c373SDan Williams 967e4a9c373SDan Williams /** 968c6fd2807SJeff Garzik * ata_port_schedule_eh - schedule error handling without a qc 969c6fd2807SJeff Garzik * @ap: ATA port to schedule EH for 970c6fd2807SJeff Garzik * 971c6fd2807SJeff Garzik * Schedule error handling for @ap. EH will kick in as soon as 972c6fd2807SJeff Garzik * all commands are drained. 973c6fd2807SJeff Garzik * 974c6fd2807SJeff Garzik * LOCKING: 975cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 976c6fd2807SJeff Garzik */ 977c6fd2807SJeff Garzik void ata_port_schedule_eh(struct ata_port *ap) 978c6fd2807SJeff Garzik { 979e4a9c373SDan Williams /* see: ata_std_sched_eh, unless you know better */ 980e4a9c373SDan Williams ap->ops->sched_eh(ap); 981c6fd2807SJeff Garzik } 982a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_port_schedule_eh); 983c6fd2807SJeff Garzik 984dbd82616STejun Heo static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link) 985c6fd2807SJeff Garzik { 986258c4e5cSJens Axboe struct ata_queued_cmd *qc; 987c6fd2807SJeff Garzik int tag, nr_aborted = 0; 988c6fd2807SJeff Garzik 989c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 990c6fd2807SJeff Garzik 9915ddf24c5STejun Heo /* we're gonna abort all commands, no need for fast drain */ 9925ddf24c5STejun Heo ata_eh_set_pending(ap, 0); 9935ddf24c5STejun Heo 99428361c40SJens Axboe /* include internal tag in iteration */ 995258c4e5cSJens Axboe ata_qc_for_each_with_internal(ap, qc, tag) { 996dbd82616STejun Heo if (qc && (!link || qc->dev->link == link)) { 997c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 998c6fd2807SJeff Garzik ata_qc_complete(qc); 999c6fd2807SJeff Garzik nr_aborted++; 1000c6fd2807SJeff Garzik } 1001c6fd2807SJeff Garzik } 1002c6fd2807SJeff Garzik 1003c6fd2807SJeff Garzik if (!nr_aborted) 1004c6fd2807SJeff Garzik ata_port_schedule_eh(ap); 1005c6fd2807SJeff Garzik 1006c6fd2807SJeff Garzik return nr_aborted; 1007c6fd2807SJeff Garzik } 1008c6fd2807SJeff Garzik 1009c6fd2807SJeff Garzik /** 1010dbd82616STejun Heo * ata_link_abort - abort all qc's on the link 1011dbd82616STejun Heo * @link: ATA link to abort qc's for 1012dbd82616STejun Heo * 1013dbd82616STejun Heo * Abort all active qc's active on @link and schedule EH. 1014dbd82616STejun Heo * 1015dbd82616STejun Heo * LOCKING: 1016dbd82616STejun Heo * spin_lock_irqsave(host lock) 1017dbd82616STejun Heo * 1018dbd82616STejun Heo * RETURNS: 1019dbd82616STejun Heo * Number of aborted qc's. 1020dbd82616STejun Heo */ 1021dbd82616STejun Heo int ata_link_abort(struct ata_link *link) 1022dbd82616STejun Heo { 1023dbd82616STejun Heo return ata_do_link_abort(link->ap, link); 1024dbd82616STejun Heo } 1025a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_link_abort); 1026dbd82616STejun Heo 1027dbd82616STejun Heo /** 1028dbd82616STejun Heo * ata_port_abort - abort all qc's on the port 1029dbd82616STejun Heo * @ap: ATA port to abort qc's for 1030dbd82616STejun Heo * 1031dbd82616STejun Heo * Abort all active qc's of @ap and schedule EH. 1032dbd82616STejun Heo * 1033dbd82616STejun Heo * LOCKING: 1034dbd82616STejun Heo * spin_lock_irqsave(host_set lock) 1035dbd82616STejun Heo * 1036dbd82616STejun Heo * RETURNS: 1037dbd82616STejun Heo * Number of aborted qc's. 1038dbd82616STejun Heo */ 1039dbd82616STejun Heo int ata_port_abort(struct ata_port *ap) 1040dbd82616STejun Heo { 1041dbd82616STejun Heo return ata_do_link_abort(ap, NULL); 1042dbd82616STejun Heo } 1043a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_port_abort); 1044dbd82616STejun Heo 1045dbd82616STejun Heo /** 1046c6fd2807SJeff Garzik * __ata_port_freeze - freeze port 1047c6fd2807SJeff Garzik * @ap: ATA port to freeze 1048c6fd2807SJeff Garzik * 1049c6fd2807SJeff Garzik * This function is called when HSM violation or some other 1050c6fd2807SJeff Garzik * condition disrupts normal operation of the port. Frozen port 1051c6fd2807SJeff Garzik * is not allowed to perform any operation until the port is 1052c6fd2807SJeff Garzik * thawed, which usually follows a successful reset. 1053c6fd2807SJeff Garzik * 1054c6fd2807SJeff Garzik * ap->ops->freeze() callback can be used for freezing the port 1055c6fd2807SJeff Garzik * hardware-wise (e.g. mask interrupt and stop DMA engine). If a 1056c6fd2807SJeff Garzik * port cannot be frozen hardware-wise, the interrupt handler 1057c6fd2807SJeff Garzik * must ack and clear interrupts unconditionally while the port 1058c6fd2807SJeff Garzik * is frozen. 1059c6fd2807SJeff Garzik * 1060c6fd2807SJeff Garzik * LOCKING: 1061cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 1062c6fd2807SJeff Garzik */ 1063c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap) 1064c6fd2807SJeff Garzik { 1065c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 1066c6fd2807SJeff Garzik 1067c6fd2807SJeff Garzik if (ap->ops->freeze) 1068c6fd2807SJeff Garzik ap->ops->freeze(ap); 1069c6fd2807SJeff Garzik 1070c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_FROZEN; 1071c6fd2807SJeff Garzik 1072c318458cSHannes Reinecke trace_ata_port_freeze(ap); 1073c6fd2807SJeff Garzik } 1074c6fd2807SJeff Garzik 1075c6fd2807SJeff Garzik /** 1076c6fd2807SJeff Garzik * ata_port_freeze - abort & freeze port 1077c6fd2807SJeff Garzik * @ap: ATA port to freeze 1078c6fd2807SJeff Garzik * 107954c38444SJeff Garzik * Abort and freeze @ap. The freeze operation must be called 108054c38444SJeff Garzik * first, because some hardware requires special operations 108154c38444SJeff Garzik * before the taskfile registers are accessible. 1082c6fd2807SJeff Garzik * 1083c6fd2807SJeff Garzik * LOCKING: 1084cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 1085c6fd2807SJeff Garzik * 1086c6fd2807SJeff Garzik * RETURNS: 1087c6fd2807SJeff Garzik * Number of aborted commands. 1088c6fd2807SJeff Garzik */ 1089c6fd2807SJeff Garzik int ata_port_freeze(struct ata_port *ap) 1090c6fd2807SJeff Garzik { 1091c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 1092c6fd2807SJeff Garzik 1093c6fd2807SJeff Garzik __ata_port_freeze(ap); 1094c6fd2807SJeff Garzik 1095cb6e73aaSye xingchen return ata_port_abort(ap); 1096c6fd2807SJeff Garzik } 1097a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_port_freeze); 1098c6fd2807SJeff Garzik 1099c6fd2807SJeff Garzik /** 1100c6fd2807SJeff Garzik * ata_eh_freeze_port - EH helper to freeze port 1101c6fd2807SJeff Garzik * @ap: ATA port to freeze 1102c6fd2807SJeff Garzik * 1103c6fd2807SJeff Garzik * Freeze @ap. 1104c6fd2807SJeff Garzik * 1105c6fd2807SJeff Garzik * LOCKING: 1106c6fd2807SJeff Garzik * None. 1107c6fd2807SJeff Garzik */ 1108c6fd2807SJeff Garzik void ata_eh_freeze_port(struct ata_port *ap) 1109c6fd2807SJeff Garzik { 1110c6fd2807SJeff Garzik unsigned long flags; 1111c6fd2807SJeff Garzik 1112c6fd2807SJeff Garzik if (!ap->ops->error_handler) 1113c6fd2807SJeff Garzik return; 1114c6fd2807SJeff Garzik 1115c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1116c6fd2807SJeff Garzik __ata_port_freeze(ap); 1117c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1118c6fd2807SJeff Garzik } 1119a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_eh_freeze_port); 1120c6fd2807SJeff Garzik 1121c6fd2807SJeff Garzik /** 112294bd5719SMauro Carvalho Chehab * ata_eh_thaw_port - EH helper to thaw port 1123c6fd2807SJeff Garzik * @ap: ATA port to thaw 1124c6fd2807SJeff Garzik * 1125c6fd2807SJeff Garzik * Thaw frozen port @ap. 1126c6fd2807SJeff Garzik * 1127c6fd2807SJeff Garzik * LOCKING: 1128c6fd2807SJeff Garzik * None. 1129c6fd2807SJeff Garzik */ 1130c6fd2807SJeff Garzik void ata_eh_thaw_port(struct ata_port *ap) 1131c6fd2807SJeff Garzik { 1132c6fd2807SJeff Garzik unsigned long flags; 1133c6fd2807SJeff Garzik 1134c6fd2807SJeff Garzik if (!ap->ops->error_handler) 1135c6fd2807SJeff Garzik return; 1136c6fd2807SJeff Garzik 1137c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1138c6fd2807SJeff Garzik 1139c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_FROZEN; 1140c6fd2807SJeff Garzik 1141c6fd2807SJeff Garzik if (ap->ops->thaw) 1142c6fd2807SJeff Garzik ap->ops->thaw(ap); 1143c6fd2807SJeff Garzik 1144c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1145c6fd2807SJeff Garzik 1146c318458cSHannes Reinecke trace_ata_port_thaw(ap); 1147c6fd2807SJeff Garzik } 1148c6fd2807SJeff Garzik 1149c6fd2807SJeff Garzik static void ata_eh_scsidone(struct scsi_cmnd *scmd) 1150c6fd2807SJeff Garzik { 1151c6fd2807SJeff Garzik /* nada */ 1152c6fd2807SJeff Garzik } 1153c6fd2807SJeff Garzik 1154c6fd2807SJeff Garzik static void __ata_eh_qc_complete(struct ata_queued_cmd *qc) 1155c6fd2807SJeff Garzik { 1156c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 1157c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 1158c6fd2807SJeff Garzik unsigned long flags; 1159c6fd2807SJeff Garzik 1160c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1161c6fd2807SJeff Garzik qc->scsidone = ata_eh_scsidone; 1162c6fd2807SJeff Garzik __ata_qc_complete(qc); 1163c6fd2807SJeff Garzik WARN_ON(ata_tag_valid(qc->tag)); 1164c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1165c6fd2807SJeff Garzik 1166c6fd2807SJeff Garzik scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 1167c6fd2807SJeff Garzik } 1168c6fd2807SJeff Garzik 1169c6fd2807SJeff Garzik /** 1170c6fd2807SJeff Garzik * ata_eh_qc_complete - Complete an active ATA command from EH 1171c6fd2807SJeff Garzik * @qc: Command to complete 1172c6fd2807SJeff Garzik * 1173c6fd2807SJeff Garzik * Indicate to the mid and upper layers that an ATA command has 1174c6fd2807SJeff Garzik * completed. To be used from EH. 1175c6fd2807SJeff Garzik */ 1176c6fd2807SJeff Garzik void ata_eh_qc_complete(struct ata_queued_cmd *qc) 1177c6fd2807SJeff Garzik { 1178c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 1179c6fd2807SJeff Garzik scmd->retries = scmd->allowed; 1180c6fd2807SJeff Garzik __ata_eh_qc_complete(qc); 1181c6fd2807SJeff Garzik } 1182c6fd2807SJeff Garzik 1183c6fd2807SJeff Garzik /** 1184c6fd2807SJeff Garzik * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH 1185c6fd2807SJeff Garzik * @qc: Command to retry 1186c6fd2807SJeff Garzik * 1187c6fd2807SJeff Garzik * Indicate to the mid and upper layers that an ATA command 1188c6fd2807SJeff Garzik * should be retried. To be used from EH. 1189c6fd2807SJeff Garzik * 1190c6fd2807SJeff Garzik * SCSI midlayer limits the number of retries to scmd->allowed. 1191f13e2201SGwendal Grignou * scmd->allowed is incremented for commands which get retried 1192c6fd2807SJeff Garzik * due to unrelated failures (qc->err_mask is zero). 1193c6fd2807SJeff Garzik */ 1194c6fd2807SJeff Garzik void ata_eh_qc_retry(struct ata_queued_cmd *qc) 1195c6fd2807SJeff Garzik { 1196c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 1197f13e2201SGwendal Grignou if (!qc->err_mask) 1198f13e2201SGwendal Grignou scmd->allowed++; 1199c6fd2807SJeff Garzik __ata_eh_qc_complete(qc); 1200c6fd2807SJeff Garzik } 1201c6fd2807SJeff Garzik 1202c6fd2807SJeff Garzik /** 1203678afac6STejun Heo * ata_dev_disable - disable ATA device 1204678afac6STejun Heo * @dev: ATA device to disable 1205678afac6STejun Heo * 1206678afac6STejun Heo * Disable @dev. 1207678afac6STejun Heo * 1208678afac6STejun Heo * Locking: 1209678afac6STejun Heo * EH context. 1210678afac6STejun Heo */ 1211678afac6STejun Heo void ata_dev_disable(struct ata_device *dev) 1212678afac6STejun Heo { 1213678afac6STejun Heo if (!ata_dev_enabled(dev)) 1214678afac6STejun Heo return; 1215678afac6STejun Heo 12161c95a27cSHannes Reinecke ata_dev_warn(dev, "disable device\n"); 1217678afac6STejun Heo ata_acpi_on_disable(dev); 1218678afac6STejun Heo ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET); 1219678afac6STejun Heo dev->class++; 122099cf610aSTejun Heo 122199cf610aSTejun Heo /* From now till the next successful probe, ering is used to 122299cf610aSTejun Heo * track probe failures. Clear accumulated device error info. 122399cf610aSTejun Heo */ 122499cf610aSTejun Heo ata_ering_clear(&dev->ering); 1225678afac6STejun Heo } 1226a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_dev_disable); 1227678afac6STejun Heo 1228678afac6STejun Heo /** 1229c6fd2807SJeff Garzik * ata_eh_detach_dev - detach ATA device 1230c6fd2807SJeff Garzik * @dev: ATA device to detach 1231c6fd2807SJeff Garzik * 1232c6fd2807SJeff Garzik * Detach @dev. 1233c6fd2807SJeff Garzik * 1234c6fd2807SJeff Garzik * LOCKING: 1235c6fd2807SJeff Garzik * None. 1236c6fd2807SJeff Garzik */ 1237fb7fd614STejun Heo void ata_eh_detach_dev(struct ata_device *dev) 1238c6fd2807SJeff Garzik { 1239f58229f8STejun Heo struct ata_link *link = dev->link; 1240f58229f8STejun Heo struct ata_port *ap = link->ap; 124190484ebfSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 1242c6fd2807SJeff Garzik unsigned long flags; 1243c6fd2807SJeff Garzik 1244c6fd2807SJeff Garzik ata_dev_disable(dev); 1245c6fd2807SJeff Garzik 1246c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1247c6fd2807SJeff Garzik 1248c6fd2807SJeff Garzik dev->flags &= ~ATA_DFLAG_DETACH; 1249c6fd2807SJeff Garzik 1250c6fd2807SJeff Garzik if (ata_scsi_offline_dev(dev)) { 1251c6fd2807SJeff Garzik dev->flags |= ATA_DFLAG_DETACHED; 1252c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 1253c6fd2807SJeff Garzik } 1254c6fd2807SJeff Garzik 125590484ebfSTejun Heo /* clear per-dev EH info */ 1256f58229f8STejun Heo ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK); 1257f58229f8STejun Heo ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK); 125890484ebfSTejun Heo ehc->saved_xfer_mode[dev->devno] = 0; 125990484ebfSTejun Heo ehc->saved_ncq_enabled &= ~(1 << dev->devno); 1260c6fd2807SJeff Garzik 1261c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1262c6fd2807SJeff Garzik } 1263c6fd2807SJeff Garzik 1264c6fd2807SJeff Garzik /** 1265c6fd2807SJeff Garzik * ata_eh_about_to_do - about to perform eh_action 1266955e57dfSTejun Heo * @link: target ATA link 1267c6fd2807SJeff Garzik * @dev: target ATA dev for per-dev action (can be NULL) 1268c6fd2807SJeff Garzik * @action: action about to be performed 1269c6fd2807SJeff Garzik * 1270c6fd2807SJeff Garzik * Called just before performing EH actions to clear related bits 1271955e57dfSTejun Heo * in @link->eh_info such that eh actions are not unnecessarily 1272955e57dfSTejun Heo * repeated. 1273c6fd2807SJeff Garzik * 1274c6fd2807SJeff Garzik * LOCKING: 1275c6fd2807SJeff Garzik * None. 1276c6fd2807SJeff Garzik */ 1277fb7fd614STejun Heo void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev, 1278c6fd2807SJeff Garzik unsigned int action) 1279c6fd2807SJeff Garzik { 1280955e57dfSTejun Heo struct ata_port *ap = link->ap; 1281955e57dfSTejun Heo struct ata_eh_info *ehi = &link->eh_info; 1282955e57dfSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 1283c6fd2807SJeff Garzik unsigned long flags; 1284c6fd2807SJeff Garzik 1285c318458cSHannes Reinecke trace_ata_eh_about_to_do(link, dev ? dev->devno : 0, action); 1286c318458cSHannes Reinecke 1287c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1288c6fd2807SJeff Garzik 1289955e57dfSTejun Heo ata_eh_clear_action(link, dev, ehi, action); 1290c6fd2807SJeff Garzik 1291a568d1d2STejun Heo /* About to take EH action, set RECOVERED. Ignore actions on 1292a568d1d2STejun Heo * slave links as master will do them again. 1293a568d1d2STejun Heo */ 1294a568d1d2STejun Heo if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link) 1295c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_RECOVERED; 1296c6fd2807SJeff Garzik 1297c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1298c6fd2807SJeff Garzik } 1299c6fd2807SJeff Garzik 1300c6fd2807SJeff Garzik /** 1301c6fd2807SJeff Garzik * ata_eh_done - EH action complete 13022f60e1abSJonathan Corbet * @link: ATA link for which EH actions are complete 1303c6fd2807SJeff Garzik * @dev: target ATA dev for per-dev action (can be NULL) 1304c6fd2807SJeff Garzik * @action: action just completed 1305c6fd2807SJeff Garzik * 1306c6fd2807SJeff Garzik * Called right after performing EH actions to clear related bits 1307955e57dfSTejun Heo * in @link->eh_context. 1308c6fd2807SJeff Garzik * 1309c6fd2807SJeff Garzik * LOCKING: 1310c6fd2807SJeff Garzik * None. 1311c6fd2807SJeff Garzik */ 1312fb7fd614STejun Heo void ata_eh_done(struct ata_link *link, struct ata_device *dev, 1313c6fd2807SJeff Garzik unsigned int action) 1314c6fd2807SJeff Garzik { 1315955e57dfSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 13169af5c9c9STejun Heo 1317c318458cSHannes Reinecke trace_ata_eh_done(link, dev ? dev->devno : 0, action); 1318c318458cSHannes Reinecke 1319955e57dfSTejun Heo ata_eh_clear_action(link, dev, &ehc->i, action); 1320c6fd2807SJeff Garzik } 1321c6fd2807SJeff Garzik 1322c6fd2807SJeff Garzik /** 1323c6fd2807SJeff Garzik * ata_err_string - convert err_mask to descriptive string 1324c6fd2807SJeff Garzik * @err_mask: error mask to convert to string 1325c6fd2807SJeff Garzik * 1326c6fd2807SJeff Garzik * Convert @err_mask to descriptive string. Errors are 1327c6fd2807SJeff Garzik * prioritized according to severity and only the most severe 1328c6fd2807SJeff Garzik * error is reported. 1329c6fd2807SJeff Garzik * 1330c6fd2807SJeff Garzik * LOCKING: 1331c6fd2807SJeff Garzik * None. 1332c6fd2807SJeff Garzik * 1333c6fd2807SJeff Garzik * RETURNS: 1334c6fd2807SJeff Garzik * Descriptive string for @err_mask 1335c6fd2807SJeff Garzik */ 1336c6fd2807SJeff Garzik static const char *ata_err_string(unsigned int err_mask) 1337c6fd2807SJeff Garzik { 1338c6fd2807SJeff Garzik if (err_mask & AC_ERR_HOST_BUS) 1339c6fd2807SJeff Garzik return "host bus error"; 1340c6fd2807SJeff Garzik if (err_mask & AC_ERR_ATA_BUS) 1341c6fd2807SJeff Garzik return "ATA bus error"; 1342c6fd2807SJeff Garzik if (err_mask & AC_ERR_TIMEOUT) 1343c6fd2807SJeff Garzik return "timeout"; 1344c6fd2807SJeff Garzik if (err_mask & AC_ERR_HSM) 1345c6fd2807SJeff Garzik return "HSM violation"; 1346c6fd2807SJeff Garzik if (err_mask & AC_ERR_SYSTEM) 1347c6fd2807SJeff Garzik return "internal error"; 1348c6fd2807SJeff Garzik if (err_mask & AC_ERR_MEDIA) 1349c6fd2807SJeff Garzik return "media error"; 1350c6fd2807SJeff Garzik if (err_mask & AC_ERR_INVALID) 1351c6fd2807SJeff Garzik return "invalid argument"; 1352c6fd2807SJeff Garzik if (err_mask & AC_ERR_DEV) 1353c6fd2807SJeff Garzik return "device error"; 135454fb131bSDamien Le Moal if (err_mask & AC_ERR_NCQ) 135554fb131bSDamien Le Moal return "NCQ error"; 135654fb131bSDamien Le Moal if (err_mask & AC_ERR_NODEV_HINT) 135754fb131bSDamien Le Moal return "Polling detection error"; 1358c6fd2807SJeff Garzik return "unknown error"; 1359c6fd2807SJeff Garzik } 1360c6fd2807SJeff Garzik 1361c6fd2807SJeff Garzik /** 136211fc33daSTejun Heo * atapi_eh_tur - perform ATAPI TEST_UNIT_READY 136311fc33daSTejun Heo * @dev: target ATAPI device 136411fc33daSTejun Heo * @r_sense_key: out parameter for sense_key 136511fc33daSTejun Heo * 136611fc33daSTejun Heo * Perform ATAPI TEST_UNIT_READY. 136711fc33daSTejun Heo * 136811fc33daSTejun Heo * LOCKING: 136911fc33daSTejun Heo * EH context (may sleep). 137011fc33daSTejun Heo * 137111fc33daSTejun Heo * RETURNS: 137211fc33daSTejun Heo * 0 on success, AC_ERR_* mask on failure. 137311fc33daSTejun Heo */ 13743dc67440SAaron Lu unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key) 137511fc33daSTejun Heo { 137611fc33daSTejun Heo u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 }; 137711fc33daSTejun Heo struct ata_taskfile tf; 137811fc33daSTejun Heo unsigned int err_mask; 137911fc33daSTejun Heo 138011fc33daSTejun Heo ata_tf_init(dev, &tf); 138111fc33daSTejun Heo 138211fc33daSTejun Heo tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 138311fc33daSTejun Heo tf.command = ATA_CMD_PACKET; 138411fc33daSTejun Heo tf.protocol = ATAPI_PROT_NODATA; 138511fc33daSTejun Heo 138611fc33daSTejun Heo err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0); 138711fc33daSTejun Heo if (err_mask == AC_ERR_DEV) 1388efcef265SSergey Shtylyov *r_sense_key = tf.error >> 4; 138911fc33daSTejun Heo return err_mask; 139011fc33daSTejun Heo } 139111fc33daSTejun Heo 139211fc33daSTejun Heo /** 1393e87fd28cSHannes Reinecke * ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT 13942f60e1abSJonathan Corbet * @qc: qc to perform REQUEST_SENSE_SENSE_DATA_EXT to 1395e87fd28cSHannes Reinecke * 1396e87fd28cSHannes Reinecke * Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK 1397e87fd28cSHannes Reinecke * SENSE. This function is an EH helper. 1398e87fd28cSHannes Reinecke * 1399e87fd28cSHannes Reinecke * LOCKING: 1400e87fd28cSHannes Reinecke * Kernel thread context (may sleep). 1401e87fd28cSHannes Reinecke */ 1402b46c760eSNiklas Cassel static void ata_eh_request_sense(struct ata_queued_cmd *qc) 1403e87fd28cSHannes Reinecke { 1404b46c760eSNiklas Cassel struct scsi_cmnd *cmd = qc->scsicmd; 1405e87fd28cSHannes Reinecke struct ata_device *dev = qc->dev; 1406e87fd28cSHannes Reinecke struct ata_taskfile tf; 1407e87fd28cSHannes Reinecke unsigned int err_mask; 1408e87fd28cSHannes Reinecke 1409e87fd28cSHannes Reinecke if (qc->ap->pflags & ATA_PFLAG_FROZEN) { 1410e87fd28cSHannes Reinecke ata_dev_warn(dev, "sense data available but port frozen\n"); 1411e87fd28cSHannes Reinecke return; 1412e87fd28cSHannes Reinecke } 1413e87fd28cSHannes Reinecke 1414d238ffd5SHannes Reinecke if (!cmd || qc->flags & ATA_QCFLAG_SENSE_VALID) 1415e87fd28cSHannes Reinecke return; 1416e87fd28cSHannes Reinecke 1417e87fd28cSHannes Reinecke if (!ata_id_sense_reporting_enabled(dev->id)) { 1418e87fd28cSHannes Reinecke ata_dev_warn(qc->dev, "sense data reporting disabled\n"); 1419e87fd28cSHannes Reinecke return; 1420e87fd28cSHannes Reinecke } 1421e87fd28cSHannes Reinecke 1422e87fd28cSHannes Reinecke ata_tf_init(dev, &tf); 1423e87fd28cSHannes Reinecke tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1424e87fd28cSHannes Reinecke tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 1425e87fd28cSHannes Reinecke tf.command = ATA_CMD_REQ_SENSE_DATA; 1426e87fd28cSHannes Reinecke tf.protocol = ATA_PROT_NODATA; 1427e87fd28cSHannes Reinecke 1428e87fd28cSHannes Reinecke err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1429e87fd28cSHannes Reinecke /* Ignore err_mask; ATA_ERR might be set */ 1430efcef265SSergey Shtylyov if (tf.status & ATA_SENSE) { 143106dbde5fSHannes Reinecke ata_scsi_set_sense(dev, cmd, tf.lbah, tf.lbam, tf.lbal); 1432e87fd28cSHannes Reinecke qc->flags |= ATA_QCFLAG_SENSE_VALID; 1433e87fd28cSHannes Reinecke } else { 1434e87fd28cSHannes Reinecke ata_dev_warn(dev, "request sense failed stat %02x emask %x\n", 1435efcef265SSergey Shtylyov tf.status, err_mask); 1436e87fd28cSHannes Reinecke } 1437e87fd28cSHannes Reinecke } 1438e87fd28cSHannes Reinecke 1439e87fd28cSHannes Reinecke /** 1440c6fd2807SJeff Garzik * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE 1441c6fd2807SJeff Garzik * @dev: device to perform REQUEST_SENSE to 1442c6fd2807SJeff Garzik * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long) 14433eabddb8STejun Heo * @dfl_sense_key: default sense key to use 1444c6fd2807SJeff Garzik * 1445c6fd2807SJeff Garzik * Perform ATAPI REQUEST_SENSE after the device reported CHECK 1446c6fd2807SJeff Garzik * SENSE. This function is EH helper. 1447c6fd2807SJeff Garzik * 1448c6fd2807SJeff Garzik * LOCKING: 1449c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1450c6fd2807SJeff Garzik * 1451c6fd2807SJeff Garzik * RETURNS: 1452c6fd2807SJeff Garzik * 0 on success, AC_ERR_* mask on failure 1453c6fd2807SJeff Garzik */ 14543dc67440SAaron Lu unsigned int atapi_eh_request_sense(struct ata_device *dev, 14553eabddb8STejun Heo u8 *sense_buf, u8 dfl_sense_key) 1456c6fd2807SJeff Garzik { 14573eabddb8STejun Heo u8 cdb[ATAPI_CDB_LEN] = 14583eabddb8STejun Heo { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 }; 14599af5c9c9STejun Heo struct ata_port *ap = dev->link->ap; 1460c6fd2807SJeff Garzik struct ata_taskfile tf; 1461c6fd2807SJeff Garzik 1462c6fd2807SJeff Garzik memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); 1463c6fd2807SJeff Garzik 146456287768SAlbert Lee /* initialize sense_buf with the error register, 146556287768SAlbert Lee * for the case where they are -not- overwritten 146656287768SAlbert Lee */ 1467c6fd2807SJeff Garzik sense_buf[0] = 0x70; 14683eabddb8STejun Heo sense_buf[2] = dfl_sense_key; 146956287768SAlbert Lee 147056287768SAlbert Lee /* some devices time out if garbage left in tf */ 147156287768SAlbert Lee ata_tf_init(dev, &tf); 1472c6fd2807SJeff Garzik 1473c6fd2807SJeff Garzik tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1474c6fd2807SJeff Garzik tf.command = ATA_CMD_PACKET; 1475c6fd2807SJeff Garzik 1476c6fd2807SJeff Garzik /* is it pointless to prefer PIO for "safety reasons"? */ 1477c6fd2807SJeff Garzik if (ap->flags & ATA_FLAG_PIO_DMA) { 14780dc36888STejun Heo tf.protocol = ATAPI_PROT_DMA; 1479c6fd2807SJeff Garzik tf.feature |= ATAPI_PKT_DMA; 1480c6fd2807SJeff Garzik } else { 14810dc36888STejun Heo tf.protocol = ATAPI_PROT_PIO; 1482f2dfc1a1STejun Heo tf.lbam = SCSI_SENSE_BUFFERSIZE; 1483f2dfc1a1STejun Heo tf.lbah = 0; 1484c6fd2807SJeff Garzik } 1485c6fd2807SJeff Garzik 1486c6fd2807SJeff Garzik return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE, 14872b789108STejun Heo sense_buf, SCSI_SENSE_BUFFERSIZE, 0); 1488c6fd2807SJeff Garzik } 1489c6fd2807SJeff Garzik 1490c6fd2807SJeff Garzik /** 1491c6fd2807SJeff Garzik * ata_eh_analyze_serror - analyze SError for a failed port 14920260731fSTejun Heo * @link: ATA link to analyze SError for 1493c6fd2807SJeff Garzik * 1494c6fd2807SJeff Garzik * Analyze SError if available and further determine cause of 1495c6fd2807SJeff Garzik * failure. 1496c6fd2807SJeff Garzik * 1497c6fd2807SJeff Garzik * LOCKING: 1498c6fd2807SJeff Garzik * None. 1499c6fd2807SJeff Garzik */ 15000260731fSTejun Heo static void ata_eh_analyze_serror(struct ata_link *link) 1501c6fd2807SJeff Garzik { 15020260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 1503c6fd2807SJeff Garzik u32 serror = ehc->i.serror; 1504c6fd2807SJeff Garzik unsigned int err_mask = 0, action = 0; 1505f9df58cbSTejun Heo u32 hotplug_mask; 1506c6fd2807SJeff Garzik 1507e0614db2STejun Heo if (serror & (SERR_PERSISTENT | SERR_DATA)) { 1508c6fd2807SJeff Garzik err_mask |= AC_ERR_ATA_BUS; 1509cf480626STejun Heo action |= ATA_EH_RESET; 1510c6fd2807SJeff Garzik } 1511c6fd2807SJeff Garzik if (serror & SERR_PROTOCOL) { 1512c6fd2807SJeff Garzik err_mask |= AC_ERR_HSM; 1513cf480626STejun Heo action |= ATA_EH_RESET; 1514c6fd2807SJeff Garzik } 1515c6fd2807SJeff Garzik if (serror & SERR_INTERNAL) { 1516c6fd2807SJeff Garzik err_mask |= AC_ERR_SYSTEM; 1517cf480626STejun Heo action |= ATA_EH_RESET; 1518c6fd2807SJeff Garzik } 1519f9df58cbSTejun Heo 1520f9df58cbSTejun Heo /* Determine whether a hotplug event has occurred. Both 1521f9df58cbSTejun Heo * SError.N/X are considered hotplug events for enabled or 1522f9df58cbSTejun Heo * host links. For disabled PMP links, only N bit is 1523f9df58cbSTejun Heo * considered as X bit is left at 1 for link plugging. 1524f9df58cbSTejun Heo */ 1525eb0e85e3STejun Heo if (link->lpm_policy > ATA_LPM_MAX_POWER) 15266b7ae954STejun Heo hotplug_mask = 0; /* hotplug doesn't work w/ LPM */ 15276b7ae954STejun Heo else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link)) 1528f9df58cbSTejun Heo hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG; 1529f9df58cbSTejun Heo else 1530f9df58cbSTejun Heo hotplug_mask = SERR_PHYRDY_CHG; 1531f9df58cbSTejun Heo 1532f9df58cbSTejun Heo if (serror & hotplug_mask) 1533c6fd2807SJeff Garzik ata_ehi_hotplugged(&ehc->i); 1534c6fd2807SJeff Garzik 1535c6fd2807SJeff Garzik ehc->i.err_mask |= err_mask; 1536c6fd2807SJeff Garzik ehc->i.action |= action; 1537c6fd2807SJeff Garzik } 1538c6fd2807SJeff Garzik 1539c6fd2807SJeff Garzik /** 1540c6fd2807SJeff Garzik * ata_eh_analyze_tf - analyze taskfile of a failed qc 1541c6fd2807SJeff Garzik * @qc: qc to analyze 1542c6fd2807SJeff Garzik * 1543c6fd2807SJeff Garzik * Analyze taskfile of @qc and further determine cause of 1544c6fd2807SJeff Garzik * failure. This function also requests ATAPI sense data if 154525985edcSLucas De Marchi * available. 1546c6fd2807SJeff Garzik * 1547c6fd2807SJeff Garzik * LOCKING: 1548c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1549c6fd2807SJeff Garzik * 1550c6fd2807SJeff Garzik * RETURNS: 1551c6fd2807SJeff Garzik * Determined recovery action 1552c6fd2807SJeff Garzik */ 1553e3b1fff6SNiklas Cassel static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc) 1554c6fd2807SJeff Garzik { 1555e3b1fff6SNiklas Cassel const struct ata_taskfile *tf = &qc->result_tf; 1556c6fd2807SJeff Garzik unsigned int tmp, action = 0; 1557efcef265SSergey Shtylyov u8 stat = tf->status, err = tf->error; 1558c6fd2807SJeff Garzik 1559c6fd2807SJeff Garzik if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) { 1560c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_HSM; 1561cf480626STejun Heo return ATA_EH_RESET; 1562c6fd2807SJeff Garzik } 1563c6fd2807SJeff Garzik 1564e87fd28cSHannes Reinecke if (stat & (ATA_ERR | ATA_DF)) { 1565a51d644aSTejun Heo qc->err_mask |= AC_ERR_DEV; 1566e87fd28cSHannes Reinecke /* 1567e87fd28cSHannes Reinecke * Sense data reporting does not work if the 1568e87fd28cSHannes Reinecke * device fault bit is set. 1569e87fd28cSHannes Reinecke */ 1570e87fd28cSHannes Reinecke if (stat & ATA_DF) 1571e87fd28cSHannes Reinecke stat &= ~ATA_SENSE; 1572e87fd28cSHannes Reinecke } else { 1573c6fd2807SJeff Garzik return 0; 1574e87fd28cSHannes Reinecke } 1575c6fd2807SJeff Garzik 1576c6fd2807SJeff Garzik switch (qc->dev->class) { 15779162c657SHannes Reinecke case ATA_DEV_ZAC: 1578*461ec040SNiklas Cassel /* 1579*461ec040SNiklas Cassel * Fetch the sense data explicitly if: 1580*461ec040SNiklas Cassel * -It was a non-NCQ command that failed, or 1581*461ec040SNiklas Cassel * -It was a NCQ command that failed, but the sense data 1582*461ec040SNiklas Cassel * was not included in the NCQ command error log 1583*461ec040SNiklas Cassel * (i.e. NCQ autosense is not supported by the device). 1584*461ec040SNiklas Cassel */ 1585*461ec040SNiklas Cassel if (!(qc->flags & ATA_QCFLAG_SENSE_VALID) && (stat & ATA_SENSE)) 1586b46c760eSNiklas Cassel ata_eh_request_sense(qc); 1587df561f66SGustavo A. R. Silva fallthrough; 1588ca156e00STejun Heo case ATA_DEV_ATA: 1589c6fd2807SJeff Garzik if (err & ATA_ICRC) 1590c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_ATA_BUS; 1591eec7e1c1SAlexey Asemov if (err & (ATA_UNC | ATA_AMNF)) 1592c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_MEDIA; 1593c6fd2807SJeff Garzik if (err & ATA_IDNF) 1594c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_INVALID; 1595c6fd2807SJeff Garzik break; 1596c6fd2807SJeff Garzik 1597c6fd2807SJeff Garzik case ATA_DEV_ATAPI: 1598a569a30dSTejun Heo if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) { 15993eabddb8STejun Heo tmp = atapi_eh_request_sense(qc->dev, 16003eabddb8STejun Heo qc->scsicmd->sense_buffer, 1601efcef265SSergey Shtylyov qc->result_tf.error >> 4); 16023852e373SHannes Reinecke if (!tmp) 1603c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_SENSE_VALID; 16043852e373SHannes Reinecke else 1605c6fd2807SJeff Garzik qc->err_mask |= tmp; 1606c6fd2807SJeff Garzik } 1607a569a30dSTejun Heo } 1608c6fd2807SJeff Garzik 16093852e373SHannes Reinecke if (qc->flags & ATA_QCFLAG_SENSE_VALID) { 1610b8e162f9SBart Van Assche enum scsi_disposition ret = scsi_check_sense(qc->scsicmd); 16113852e373SHannes Reinecke /* 161279487259SDamien Le Moal * SUCCESS here means that the sense code could be 16133852e373SHannes Reinecke * evaluated and should be passed to the upper layers 16143852e373SHannes Reinecke * for correct evaluation. 161579487259SDamien Le Moal * FAILED means the sense code could not be interpreted 16163852e373SHannes Reinecke * and the device would need to be reset. 16173852e373SHannes Reinecke * NEEDS_RETRY and ADD_TO_MLQUEUE means that the 16183852e373SHannes Reinecke * command would need to be retried. 16193852e373SHannes Reinecke */ 16203852e373SHannes Reinecke if (ret == NEEDS_RETRY || ret == ADD_TO_MLQUEUE) { 16213852e373SHannes Reinecke qc->flags |= ATA_QCFLAG_RETRY; 16223852e373SHannes Reinecke qc->err_mask |= AC_ERR_OTHER; 16233852e373SHannes Reinecke } else if (ret != SUCCESS) { 16243852e373SHannes Reinecke qc->err_mask |= AC_ERR_HSM; 16253852e373SHannes Reinecke } 16263852e373SHannes Reinecke } 1627c6fd2807SJeff Garzik if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS)) 1628cf480626STejun Heo action |= ATA_EH_RESET; 1629c6fd2807SJeff Garzik 1630c6fd2807SJeff Garzik return action; 1631c6fd2807SJeff Garzik } 1632c6fd2807SJeff Garzik 163376326ac1STejun Heo static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask, 163476326ac1STejun Heo int *xfer_ok) 1635c6fd2807SJeff Garzik { 163676326ac1STejun Heo int base = 0; 163776326ac1STejun Heo 163876326ac1STejun Heo if (!(eflags & ATA_EFLAG_DUBIOUS_XFER)) 163976326ac1STejun Heo *xfer_ok = 1; 164076326ac1STejun Heo 164176326ac1STejun Heo if (!*xfer_ok) 164275f9cafcSTejun Heo base = ATA_ECAT_DUBIOUS_NONE; 164376326ac1STejun Heo 16447d47e8d4STejun Heo if (err_mask & AC_ERR_ATA_BUS) 164576326ac1STejun Heo return base + ATA_ECAT_ATA_BUS; 1646c6fd2807SJeff Garzik 16477d47e8d4STejun Heo if (err_mask & AC_ERR_TIMEOUT) 164876326ac1STejun Heo return base + ATA_ECAT_TOUT_HSM; 16497d47e8d4STejun Heo 16503884f7b0STejun Heo if (eflags & ATA_EFLAG_IS_IO) { 16517d47e8d4STejun Heo if (err_mask & AC_ERR_HSM) 165276326ac1STejun Heo return base + ATA_ECAT_TOUT_HSM; 16537d47e8d4STejun Heo if ((err_mask & 16547d47e8d4STejun Heo (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV) 165576326ac1STejun Heo return base + ATA_ECAT_UNK_DEV; 1656c6fd2807SJeff Garzik } 1657c6fd2807SJeff Garzik 1658c6fd2807SJeff Garzik return 0; 1659c6fd2807SJeff Garzik } 1660c6fd2807SJeff Garzik 16617d47e8d4STejun Heo struct speed_down_verdict_arg { 1662c6fd2807SJeff Garzik u64 since; 166376326ac1STejun Heo int xfer_ok; 16643884f7b0STejun Heo int nr_errors[ATA_ECAT_NR]; 1665c6fd2807SJeff Garzik }; 1666c6fd2807SJeff Garzik 16677d47e8d4STejun Heo static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg) 1668c6fd2807SJeff Garzik { 16697d47e8d4STejun Heo struct speed_down_verdict_arg *arg = void_arg; 167076326ac1STejun Heo int cat; 1671c6fd2807SJeff Garzik 1672d9027470SGwendal Grignou if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since)) 1673c6fd2807SJeff Garzik return -1; 1674c6fd2807SJeff Garzik 167576326ac1STejun Heo cat = ata_eh_categorize_error(ent->eflags, ent->err_mask, 167676326ac1STejun Heo &arg->xfer_ok); 16777d47e8d4STejun Heo arg->nr_errors[cat]++; 167876326ac1STejun Heo 1679c6fd2807SJeff Garzik return 0; 1680c6fd2807SJeff Garzik } 1681c6fd2807SJeff Garzik 1682c6fd2807SJeff Garzik /** 16837d47e8d4STejun Heo * ata_eh_speed_down_verdict - Determine speed down verdict 1684c6fd2807SJeff Garzik * @dev: Device of interest 1685c6fd2807SJeff Garzik * 1686c6fd2807SJeff Garzik * This function examines error ring of @dev and determines 16877d47e8d4STejun Heo * whether NCQ needs to be turned off, transfer speed should be 16887d47e8d4STejun Heo * stepped down, or falling back to PIO is necessary. 1689c6fd2807SJeff Garzik * 16903884f7b0STejun Heo * ECAT_ATA_BUS : ATA_BUS error for any command 1691c6fd2807SJeff Garzik * 16923884f7b0STejun Heo * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for 16933884f7b0STejun Heo * IO commands 16947d47e8d4STejun Heo * 16953884f7b0STejun Heo * ECAT_UNK_DEV : Unknown DEV error for IO commands 1696c6fd2807SJeff Garzik * 169776326ac1STejun Heo * ECAT_DUBIOUS_* : Identical to above three but occurred while 169876326ac1STejun Heo * data transfer hasn't been verified. 169976326ac1STejun Heo * 17003884f7b0STejun Heo * Verdicts are 17017d47e8d4STejun Heo * 17023884f7b0STejun Heo * NCQ_OFF : Turn off NCQ. 17037d47e8d4STejun Heo * 17043884f7b0STejun Heo * SPEED_DOWN : Speed down transfer speed but don't fall back 17053884f7b0STejun Heo * to PIO. 17063884f7b0STejun Heo * 17073884f7b0STejun Heo * FALLBACK_TO_PIO : Fall back to PIO. 17083884f7b0STejun Heo * 17093884f7b0STejun Heo * Even if multiple verdicts are returned, only one action is 171076326ac1STejun Heo * taken per error. An action triggered by non-DUBIOUS errors 171176326ac1STejun Heo * clears ering, while one triggered by DUBIOUS_* errors doesn't. 171276326ac1STejun Heo * This is to expedite speed down decisions right after device is 171376326ac1STejun Heo * initially configured. 17143884f7b0STejun Heo * 17154091fb95SMasahiro Yamada * The following are speed down rules. #1 and #2 deal with 171676326ac1STejun Heo * DUBIOUS errors. 171776326ac1STejun Heo * 171876326ac1STejun Heo * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors 171976326ac1STejun Heo * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO. 172076326ac1STejun Heo * 172176326ac1STejun Heo * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors 172276326ac1STejun Heo * occurred during last 5 mins, NCQ_OFF. 172376326ac1STejun Heo * 172476326ac1STejun Heo * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors 172525985edcSLucas De Marchi * occurred during last 5 mins, FALLBACK_TO_PIO 17263884f7b0STejun Heo * 172776326ac1STejun Heo * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred 17283884f7b0STejun Heo * during last 10 mins, NCQ_OFF. 17293884f7b0STejun Heo * 173076326ac1STejun Heo * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6 17313884f7b0STejun Heo * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN. 17327d47e8d4STejun Heo * 1733c6fd2807SJeff Garzik * LOCKING: 1734c6fd2807SJeff Garzik * Inherited from caller. 1735c6fd2807SJeff Garzik * 1736c6fd2807SJeff Garzik * RETURNS: 17377d47e8d4STejun Heo * OR of ATA_EH_SPDN_* flags. 1738c6fd2807SJeff Garzik */ 17397d47e8d4STejun Heo static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev) 1740c6fd2807SJeff Garzik { 17417d47e8d4STejun Heo const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ; 17427d47e8d4STejun Heo u64 j64 = get_jiffies_64(); 17437d47e8d4STejun Heo struct speed_down_verdict_arg arg; 17447d47e8d4STejun Heo unsigned int verdict = 0; 1745c6fd2807SJeff Garzik 17463884f7b0STejun Heo /* scan past 5 mins of error history */ 17473884f7b0STejun Heo memset(&arg, 0, sizeof(arg)); 17483884f7b0STejun Heo arg.since = j64 - min(j64, j5mins); 17493884f7b0STejun Heo ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); 17503884f7b0STejun Heo 175176326ac1STejun Heo if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] + 175276326ac1STejun Heo arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1) 175376326ac1STejun Heo verdict |= ATA_EH_SPDN_SPEED_DOWN | 175476326ac1STejun Heo ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS; 175576326ac1STejun Heo 175676326ac1STejun Heo if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] + 175776326ac1STejun Heo arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1) 175876326ac1STejun Heo verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS; 175976326ac1STejun Heo 17603884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_ATA_BUS] + 17613884f7b0STejun Heo arg.nr_errors[ATA_ECAT_TOUT_HSM] + 1762663f99b8STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) 17633884f7b0STejun Heo verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO; 17643884f7b0STejun Heo 17657d47e8d4STejun Heo /* scan past 10 mins of error history */ 1766c6fd2807SJeff Garzik memset(&arg, 0, sizeof(arg)); 17677d47e8d4STejun Heo arg.since = j64 - min(j64, j10mins); 17687d47e8d4STejun Heo ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); 1769c6fd2807SJeff Garzik 17703884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_TOUT_HSM] + 17713884f7b0STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 3) 17727d47e8d4STejun Heo verdict |= ATA_EH_SPDN_NCQ_OFF; 17733884f7b0STejun Heo 17743884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_ATA_BUS] + 17753884f7b0STejun Heo arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 || 1776663f99b8STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) 17777d47e8d4STejun Heo verdict |= ATA_EH_SPDN_SPEED_DOWN; 1778c6fd2807SJeff Garzik 17797d47e8d4STejun Heo return verdict; 1780c6fd2807SJeff Garzik } 1781c6fd2807SJeff Garzik 1782c6fd2807SJeff Garzik /** 1783c6fd2807SJeff Garzik * ata_eh_speed_down - record error and speed down if necessary 1784c6fd2807SJeff Garzik * @dev: Failed device 17853884f7b0STejun Heo * @eflags: mask of ATA_EFLAG_* flags 1786c6fd2807SJeff Garzik * @err_mask: err_mask of the error 1787c6fd2807SJeff Garzik * 1788c6fd2807SJeff Garzik * Record error and examine error history to determine whether 1789c6fd2807SJeff Garzik * adjusting transmission speed is necessary. It also sets 1790c6fd2807SJeff Garzik * transmission limits appropriately if such adjustment is 1791c6fd2807SJeff Garzik * necessary. 1792c6fd2807SJeff Garzik * 1793c6fd2807SJeff Garzik * LOCKING: 1794c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1795c6fd2807SJeff Garzik * 1796c6fd2807SJeff Garzik * RETURNS: 17977d47e8d4STejun Heo * Determined recovery action. 1798c6fd2807SJeff Garzik */ 17993884f7b0STejun Heo static unsigned int ata_eh_speed_down(struct ata_device *dev, 18003884f7b0STejun Heo unsigned int eflags, unsigned int err_mask) 1801c6fd2807SJeff Garzik { 1802b1c72916STejun Heo struct ata_link *link = ata_dev_phys_link(dev); 180376326ac1STejun Heo int xfer_ok = 0; 18047d47e8d4STejun Heo unsigned int verdict; 18057d47e8d4STejun Heo unsigned int action = 0; 18067d47e8d4STejun Heo 18077d47e8d4STejun Heo /* don't bother if Cat-0 error */ 180876326ac1STejun Heo if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0) 1809c6fd2807SJeff Garzik return 0; 1810c6fd2807SJeff Garzik 1811c6fd2807SJeff Garzik /* record error and determine whether speed down is necessary */ 18123884f7b0STejun Heo ata_ering_record(&dev->ering, eflags, err_mask); 18137d47e8d4STejun Heo verdict = ata_eh_speed_down_verdict(dev); 1814c6fd2807SJeff Garzik 18157d47e8d4STejun Heo /* turn off NCQ? */ 18167d47e8d4STejun Heo if ((verdict & ATA_EH_SPDN_NCQ_OFF) && 18177d47e8d4STejun Heo (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ | 18187d47e8d4STejun Heo ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) { 18197d47e8d4STejun Heo dev->flags |= ATA_DFLAG_NCQ_OFF; 1820a9a79dfeSJoe Perches ata_dev_warn(dev, "NCQ disabled due to excessive errors\n"); 18217d47e8d4STejun Heo goto done; 18227d47e8d4STejun Heo } 1823c6fd2807SJeff Garzik 18247d47e8d4STejun Heo /* speed down? */ 18257d47e8d4STejun Heo if (verdict & ATA_EH_SPDN_SPEED_DOWN) { 1826c6fd2807SJeff Garzik /* speed down SATA link speed if possible */ 1827a07d499bSTejun Heo if (sata_down_spd_limit(link, 0) == 0) { 1828cf480626STejun Heo action |= ATA_EH_RESET; 18297d47e8d4STejun Heo goto done; 18307d47e8d4STejun Heo } 1831c6fd2807SJeff Garzik 1832c6fd2807SJeff Garzik /* lower transfer mode */ 18337d47e8d4STejun Heo if (dev->spdn_cnt < 2) { 18347d47e8d4STejun Heo static const int dma_dnxfer_sel[] = 18357d47e8d4STejun Heo { ATA_DNXFER_DMA, ATA_DNXFER_40C }; 18367d47e8d4STejun Heo static const int pio_dnxfer_sel[] = 18377d47e8d4STejun Heo { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 }; 18387d47e8d4STejun Heo int sel; 1839c6fd2807SJeff Garzik 18407d47e8d4STejun Heo if (dev->xfer_shift != ATA_SHIFT_PIO) 18417d47e8d4STejun Heo sel = dma_dnxfer_sel[dev->spdn_cnt]; 18427d47e8d4STejun Heo else 18437d47e8d4STejun Heo sel = pio_dnxfer_sel[dev->spdn_cnt]; 18447d47e8d4STejun Heo 18457d47e8d4STejun Heo dev->spdn_cnt++; 18467d47e8d4STejun Heo 18477d47e8d4STejun Heo if (ata_down_xfermask_limit(dev, sel) == 0) { 1848cf480626STejun Heo action |= ATA_EH_RESET; 18497d47e8d4STejun Heo goto done; 18507d47e8d4STejun Heo } 18517d47e8d4STejun Heo } 18527d47e8d4STejun Heo } 18537d47e8d4STejun Heo 18547d47e8d4STejun Heo /* Fall back to PIO? Slowing down to PIO is meaningless for 1855663f99b8STejun Heo * SATA ATA devices. Consider it only for PATA and SATAPI. 18567d47e8d4STejun Heo */ 18577d47e8d4STejun Heo if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) && 1858663f99b8STejun Heo (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) && 18597d47e8d4STejun Heo (dev->xfer_shift != ATA_SHIFT_PIO)) { 18607d47e8d4STejun Heo if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) { 18617d47e8d4STejun Heo dev->spdn_cnt = 0; 1862cf480626STejun Heo action |= ATA_EH_RESET; 18637d47e8d4STejun Heo goto done; 18647d47e8d4STejun Heo } 18657d47e8d4STejun Heo } 18667d47e8d4STejun Heo 1867c6fd2807SJeff Garzik return 0; 18687d47e8d4STejun Heo done: 18697d47e8d4STejun Heo /* device has been slowed down, blow error history */ 187076326ac1STejun Heo if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS)) 18717d47e8d4STejun Heo ata_ering_clear(&dev->ering); 18727d47e8d4STejun Heo return action; 1873c6fd2807SJeff Garzik } 1874c6fd2807SJeff Garzik 1875c6fd2807SJeff Garzik /** 18768d899e70SMark Lord * ata_eh_worth_retry - analyze error and decide whether to retry 18778d899e70SMark Lord * @qc: qc to possibly retry 18788d899e70SMark Lord * 18798d899e70SMark Lord * Look at the cause of the error and decide if a retry 18808d899e70SMark Lord * might be useful or not. We don't want to retry media errors 18818d899e70SMark Lord * because the drive itself has probably already taken 10-30 seconds 18828d899e70SMark Lord * doing its own internal retries before reporting the failure. 18838d899e70SMark Lord */ 18848d899e70SMark Lord static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc) 18858d899e70SMark Lord { 18861eaca39aSBian Yu if (qc->err_mask & AC_ERR_MEDIA) 18878d899e70SMark Lord return 0; /* don't retry media errors */ 18888d899e70SMark Lord if (qc->flags & ATA_QCFLAG_IO) 18898d899e70SMark Lord return 1; /* otherwise retry anything from fs stack */ 18908d899e70SMark Lord if (qc->err_mask & AC_ERR_INVALID) 18918d899e70SMark Lord return 0; /* don't retry these */ 18928d899e70SMark Lord return qc->err_mask != AC_ERR_DEV; /* retry if not dev error */ 18938d899e70SMark Lord } 18948d899e70SMark Lord 18958d899e70SMark Lord /** 18967eb49509SDamien Le Moal * ata_eh_quiet - check if we need to be quiet about a command error 18977eb49509SDamien Le Moal * @qc: qc to check 18987eb49509SDamien Le Moal * 18997eb49509SDamien Le Moal * Look at the qc flags anbd its scsi command request flags to determine 19007eb49509SDamien Le Moal * if we need to be quiet about the command failure. 19017eb49509SDamien Le Moal */ 19027eb49509SDamien Le Moal static inline bool ata_eh_quiet(struct ata_queued_cmd *qc) 19037eb49509SDamien Le Moal { 1904c8329cd5SBart Van Assche if (qc->scsicmd && scsi_cmd_to_rq(qc->scsicmd)->rq_flags & RQF_QUIET) 19057eb49509SDamien Le Moal qc->flags |= ATA_QCFLAG_QUIET; 19067eb49509SDamien Le Moal return qc->flags & ATA_QCFLAG_QUIET; 19077eb49509SDamien Le Moal } 19087eb49509SDamien Le Moal 19097eb49509SDamien Le Moal /** 19109b1e2658STejun Heo * ata_eh_link_autopsy - analyze error and determine recovery action 19119b1e2658STejun Heo * @link: host link to perform autopsy on 1912c6fd2807SJeff Garzik * 19130260731fSTejun Heo * Analyze why @link failed and determine which recovery actions 19140260731fSTejun Heo * are needed. This function also sets more detailed AC_ERR_* 19150260731fSTejun Heo * values and fills sense data for ATAPI CHECK SENSE. 1916c6fd2807SJeff Garzik * 1917c6fd2807SJeff Garzik * LOCKING: 1918c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1919c6fd2807SJeff Garzik */ 19209b1e2658STejun Heo static void ata_eh_link_autopsy(struct ata_link *link) 1921c6fd2807SJeff Garzik { 19220260731fSTejun Heo struct ata_port *ap = link->ap; 1923936fd732STejun Heo struct ata_eh_context *ehc = &link->eh_context; 1924258c4e5cSJens Axboe struct ata_queued_cmd *qc; 1925dfcc173dSTejun Heo struct ata_device *dev; 19263884f7b0STejun Heo unsigned int all_err_mask = 0, eflags = 0; 19277eb49509SDamien Le Moal int tag, nr_failed = 0, nr_quiet = 0; 1928c6fd2807SJeff Garzik u32 serror; 1929c6fd2807SJeff Garzik int rc; 1930c6fd2807SJeff Garzik 1931c6fd2807SJeff Garzik if (ehc->i.flags & ATA_EHI_NO_AUTOPSY) 1932c6fd2807SJeff Garzik return; 1933c6fd2807SJeff Garzik 1934c6fd2807SJeff Garzik /* obtain and analyze SError */ 1935936fd732STejun Heo rc = sata_scr_read(link, SCR_ERROR, &serror); 1936c6fd2807SJeff Garzik if (rc == 0) { 1937c6fd2807SJeff Garzik ehc->i.serror |= serror; 19380260731fSTejun Heo ata_eh_analyze_serror(link); 19394e57c517STejun Heo } else if (rc != -EOPNOTSUPP) { 1940cf480626STejun Heo /* SError read failed, force reset and probing */ 1941b558edddSTejun Heo ehc->i.probe_mask |= ATA_ALL_DEVICES; 1942cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 19434e57c517STejun Heo ehc->i.err_mask |= AC_ERR_OTHER; 19444e57c517STejun Heo } 1945c6fd2807SJeff Garzik 1946c6fd2807SJeff Garzik /* analyze NCQ failure */ 19470260731fSTejun Heo ata_eh_analyze_ncq_error(link); 1948c6fd2807SJeff Garzik 1949c6fd2807SJeff Garzik /* any real error trumps AC_ERR_OTHER */ 1950c6fd2807SJeff Garzik if (ehc->i.err_mask & ~AC_ERR_OTHER) 1951c6fd2807SJeff Garzik ehc->i.err_mask &= ~AC_ERR_OTHER; 1952c6fd2807SJeff Garzik 1953c6fd2807SJeff Garzik all_err_mask |= ehc->i.err_mask; 1954c6fd2807SJeff Garzik 1955258c4e5cSJens Axboe ata_qc_for_each_raw(ap, qc, tag) { 1956b1c72916STejun Heo if (!(qc->flags & ATA_QCFLAG_FAILED) || 1957b1c72916STejun Heo ata_dev_phys_link(qc->dev) != link) 1958c6fd2807SJeff Garzik continue; 1959c6fd2807SJeff Garzik 1960c6fd2807SJeff Garzik /* inherit upper level err_mask */ 1961c6fd2807SJeff Garzik qc->err_mask |= ehc->i.err_mask; 1962c6fd2807SJeff Garzik 1963c6fd2807SJeff Garzik /* analyze TF */ 1964e3b1fff6SNiklas Cassel ehc->i.action |= ata_eh_analyze_tf(qc); 1965c6fd2807SJeff Garzik 1966c6fd2807SJeff Garzik /* DEV errors are probably spurious in case of ATA_BUS error */ 1967c6fd2807SJeff Garzik if (qc->err_mask & AC_ERR_ATA_BUS) 1968c6fd2807SJeff Garzik qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA | 1969c6fd2807SJeff Garzik AC_ERR_INVALID); 1970c6fd2807SJeff Garzik 1971c6fd2807SJeff Garzik /* any real error trumps unknown error */ 1972c6fd2807SJeff Garzik if (qc->err_mask & ~AC_ERR_OTHER) 1973c6fd2807SJeff Garzik qc->err_mask &= ~AC_ERR_OTHER; 1974c6fd2807SJeff Garzik 1975804689adSDamien Le Moal /* 1976804689adSDamien Le Moal * SENSE_VALID trumps dev/unknown error and revalidation. Upper 1977804689adSDamien Le Moal * layers will determine whether the command is worth retrying 1978804689adSDamien Le Moal * based on the sense data and device class/type. Otherwise, 1979804689adSDamien Le Moal * determine directly if the command is worth retrying using its 1980804689adSDamien Le Moal * error mask and flags. 1981804689adSDamien Le Moal */ 1982f90f0828STejun Heo if (qc->flags & ATA_QCFLAG_SENSE_VALID) 1983c6fd2807SJeff Garzik qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); 1984804689adSDamien Le Moal else if (ata_eh_worth_retry(qc)) 198503faab78STejun Heo qc->flags |= ATA_QCFLAG_RETRY; 198603faab78STejun Heo 1987c6fd2807SJeff Garzik /* accumulate error info */ 1988c6fd2807SJeff Garzik ehc->i.dev = qc->dev; 1989c6fd2807SJeff Garzik all_err_mask |= qc->err_mask; 1990c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_IO) 19913884f7b0STejun Heo eflags |= ATA_EFLAG_IS_IO; 1992255c03d1SHannes Reinecke trace_ata_eh_link_autopsy_qc(qc); 19937eb49509SDamien Le Moal 19947eb49509SDamien Le Moal /* Count quiet errors */ 19957eb49509SDamien Le Moal if (ata_eh_quiet(qc)) 19967eb49509SDamien Le Moal nr_quiet++; 19977eb49509SDamien Le Moal nr_failed++; 1998c6fd2807SJeff Garzik } 1999c6fd2807SJeff Garzik 20007eb49509SDamien Le Moal /* If all failed commands requested silence, then be quiet */ 20017eb49509SDamien Le Moal if (nr_quiet == nr_failed) 20027eb49509SDamien Le Moal ehc->i.flags |= ATA_EHI_QUIET; 20037eb49509SDamien Le Moal 2004c6fd2807SJeff Garzik /* enforce default EH actions */ 2005c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN || 2006c6fd2807SJeff Garzik all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT)) 2007cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 20083884f7b0STejun Heo else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) || 20093884f7b0STejun Heo (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV))) 2010c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_REVALIDATE; 2011c6fd2807SJeff Garzik 2012dfcc173dSTejun Heo /* If we have offending qcs and the associated failed device, 2013dfcc173dSTejun Heo * perform per-dev EH action only on the offending device. 2014dfcc173dSTejun Heo */ 2015c6fd2807SJeff Garzik if (ehc->i.dev) { 2016c6fd2807SJeff Garzik ehc->i.dev_action[ehc->i.dev->devno] |= 2017c6fd2807SJeff Garzik ehc->i.action & ATA_EH_PERDEV_MASK; 2018c6fd2807SJeff Garzik ehc->i.action &= ~ATA_EH_PERDEV_MASK; 2019c6fd2807SJeff Garzik } 2020c6fd2807SJeff Garzik 20212695e366STejun Heo /* propagate timeout to host link */ 20222695e366STejun Heo if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link)) 20232695e366STejun Heo ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT; 20242695e366STejun Heo 20252695e366STejun Heo /* record error and consider speeding down */ 2026dfcc173dSTejun Heo dev = ehc->i.dev; 20272695e366STejun Heo if (!dev && ((ata_link_max_devices(link) == 1 && 20282695e366STejun Heo ata_dev_enabled(link->device)))) 2029dfcc173dSTejun Heo dev = link->device; 2030dfcc173dSTejun Heo 203176326ac1STejun Heo if (dev) { 203276326ac1STejun Heo if (dev->flags & ATA_DFLAG_DUBIOUS_XFER) 203376326ac1STejun Heo eflags |= ATA_EFLAG_DUBIOUS_XFER; 20343884f7b0STejun Heo ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask); 2035255c03d1SHannes Reinecke trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask); 2036f1601113SRameshwar Prasad Sahu } 2037c6fd2807SJeff Garzik } 2038c6fd2807SJeff Garzik 2039c6fd2807SJeff Garzik /** 20409b1e2658STejun Heo * ata_eh_autopsy - analyze error and determine recovery action 20419b1e2658STejun Heo * @ap: host port to perform autopsy on 20429b1e2658STejun Heo * 20439b1e2658STejun Heo * Analyze all links of @ap and determine why they failed and 20449b1e2658STejun Heo * which recovery actions are needed. 20459b1e2658STejun Heo * 20469b1e2658STejun Heo * LOCKING: 20479b1e2658STejun Heo * Kernel thread context (may sleep). 20489b1e2658STejun Heo */ 2049fb7fd614STejun Heo void ata_eh_autopsy(struct ata_port *ap) 20509b1e2658STejun Heo { 20519b1e2658STejun Heo struct ata_link *link; 20529b1e2658STejun Heo 20531eca4365STejun Heo ata_for_each_link(link, ap, EDGE) 20549b1e2658STejun Heo ata_eh_link_autopsy(link); 20552695e366STejun Heo 2056b1c72916STejun Heo /* Handle the frigging slave link. Autopsy is done similarly 2057b1c72916STejun Heo * but actions and flags are transferred over to the master 2058b1c72916STejun Heo * link and handled from there. 2059b1c72916STejun Heo */ 2060b1c72916STejun Heo if (ap->slave_link) { 2061b1c72916STejun Heo struct ata_eh_context *mehc = &ap->link.eh_context; 2062b1c72916STejun Heo struct ata_eh_context *sehc = &ap->slave_link->eh_context; 2063b1c72916STejun Heo 2064848e4c68STejun Heo /* transfer control flags from master to slave */ 2065848e4c68STejun Heo sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK; 2066848e4c68STejun Heo 2067848e4c68STejun Heo /* perform autopsy on the slave link */ 2068b1c72916STejun Heo ata_eh_link_autopsy(ap->slave_link); 2069b1c72916STejun Heo 2070848e4c68STejun Heo /* transfer actions from slave to master and clear slave */ 2071b1c72916STejun Heo ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); 2072b1c72916STejun Heo mehc->i.action |= sehc->i.action; 2073b1c72916STejun Heo mehc->i.dev_action[1] |= sehc->i.dev_action[1]; 2074b1c72916STejun Heo mehc->i.flags |= sehc->i.flags; 2075b1c72916STejun Heo ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); 2076b1c72916STejun Heo } 2077b1c72916STejun Heo 20782695e366STejun Heo /* Autopsy of fanout ports can affect host link autopsy. 20792695e366STejun Heo * Perform host link autopsy last. 20802695e366STejun Heo */ 2081071f44b1STejun Heo if (sata_pmp_attached(ap)) 20822695e366STejun Heo ata_eh_link_autopsy(&ap->link); 20839b1e2658STejun Heo } 20849b1e2658STejun Heo 20859b1e2658STejun Heo /** 2086d4520903SHannes Reinecke * ata_get_cmd_name - get name for ATA command 2087d4520903SHannes Reinecke * @command: ATA command code to get name for 20886521148cSRobert Hancock * 2089d4520903SHannes Reinecke * Return a textual name of the given command or "unknown" 20906521148cSRobert Hancock * 20916521148cSRobert Hancock * LOCKING: 20926521148cSRobert Hancock * None 20936521148cSRobert Hancock */ 2094d4520903SHannes Reinecke const char *ata_get_cmd_name(u8 command) 20956521148cSRobert Hancock { 20966521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR 20976521148cSRobert Hancock static const struct 20986521148cSRobert Hancock { 20996521148cSRobert Hancock u8 command; 21006521148cSRobert Hancock const char *text; 21016521148cSRobert Hancock } cmd_descr[] = { 21026521148cSRobert Hancock { ATA_CMD_DEV_RESET, "DEVICE RESET" }, 21036521148cSRobert Hancock { ATA_CMD_CHK_POWER, "CHECK POWER MODE" }, 21046521148cSRobert Hancock { ATA_CMD_STANDBY, "STANDBY" }, 21056521148cSRobert Hancock { ATA_CMD_IDLE, "IDLE" }, 21066521148cSRobert Hancock { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" }, 21076521148cSRobert Hancock { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" }, 21083915c3b5SRobert Hancock { ATA_CMD_DOWNLOAD_MICRO_DMA, "DOWNLOAD MICROCODE DMA" }, 21096521148cSRobert Hancock { ATA_CMD_NOP, "NOP" }, 21106521148cSRobert Hancock { ATA_CMD_FLUSH, "FLUSH CACHE" }, 21116521148cSRobert Hancock { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" }, 21126521148cSRobert Hancock { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" }, 21136521148cSRobert Hancock { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" }, 21146521148cSRobert Hancock { ATA_CMD_SERVICE, "SERVICE" }, 21156521148cSRobert Hancock { ATA_CMD_READ, "READ DMA" }, 21166521148cSRobert Hancock { ATA_CMD_READ_EXT, "READ DMA EXT" }, 21176521148cSRobert Hancock { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" }, 21186521148cSRobert Hancock { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" }, 21196521148cSRobert Hancock { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" }, 21206521148cSRobert Hancock { ATA_CMD_WRITE, "WRITE DMA" }, 21216521148cSRobert Hancock { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" }, 21226521148cSRobert Hancock { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" }, 21236521148cSRobert Hancock { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" }, 21246521148cSRobert Hancock { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" }, 21256521148cSRobert Hancock { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" }, 21266521148cSRobert Hancock { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" }, 21276521148cSRobert Hancock { ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" }, 21286521148cSRobert Hancock { ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" }, 2129d3122bf9SDamien Le Moal { ATA_CMD_NCQ_NON_DATA, "NCQ NON-DATA" }, 21303915c3b5SRobert Hancock { ATA_CMD_FPDMA_SEND, "SEND FPDMA QUEUED" }, 21313915c3b5SRobert Hancock { ATA_CMD_FPDMA_RECV, "RECEIVE FPDMA QUEUED" }, 21326521148cSRobert Hancock { ATA_CMD_PIO_READ, "READ SECTOR(S)" }, 21336521148cSRobert Hancock { ATA_CMD_PIO_READ_EXT, "READ SECTOR(S) EXT" }, 21346521148cSRobert Hancock { ATA_CMD_PIO_WRITE, "WRITE SECTOR(S)" }, 21356521148cSRobert Hancock { ATA_CMD_PIO_WRITE_EXT, "WRITE SECTOR(S) EXT" }, 21366521148cSRobert Hancock { ATA_CMD_READ_MULTI, "READ MULTIPLE" }, 21376521148cSRobert Hancock { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" }, 21386521148cSRobert Hancock { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" }, 21396521148cSRobert Hancock { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" }, 21406521148cSRobert Hancock { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" }, 21416521148cSRobert Hancock { ATA_CMD_SET_FEATURES, "SET FEATURES" }, 21426521148cSRobert Hancock { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" }, 21436521148cSRobert Hancock { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" }, 21446521148cSRobert Hancock { ATA_CMD_VERIFY_EXT, "READ VERIFY SECTOR(S) EXT" }, 21456521148cSRobert Hancock { ATA_CMD_WRITE_UNCORR_EXT, "WRITE UNCORRECTABLE EXT" }, 21466521148cSRobert Hancock { ATA_CMD_STANDBYNOW1, "STANDBY IMMEDIATE" }, 21476521148cSRobert Hancock { ATA_CMD_IDLEIMMEDIATE, "IDLE IMMEDIATE" }, 21486521148cSRobert Hancock { ATA_CMD_SLEEP, "SLEEP" }, 21496521148cSRobert Hancock { ATA_CMD_INIT_DEV_PARAMS, "INITIALIZE DEVICE PARAMETERS" }, 21506521148cSRobert Hancock { ATA_CMD_READ_NATIVE_MAX, "READ NATIVE MAX ADDRESS" }, 21516521148cSRobert Hancock { ATA_CMD_READ_NATIVE_MAX_EXT, "READ NATIVE MAX ADDRESS EXT" }, 21526521148cSRobert Hancock { ATA_CMD_SET_MAX, "SET MAX ADDRESS" }, 21536521148cSRobert Hancock { ATA_CMD_SET_MAX_EXT, "SET MAX ADDRESS EXT" }, 21546521148cSRobert Hancock { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" }, 21556521148cSRobert Hancock { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" }, 21566521148cSRobert Hancock { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" }, 21576521148cSRobert Hancock { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" }, 21583915c3b5SRobert Hancock { ATA_CMD_TRUSTED_NONDATA, "TRUSTED NON-DATA" }, 21596521148cSRobert Hancock { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" }, 21606521148cSRobert Hancock { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" }, 21616521148cSRobert Hancock { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" }, 21626521148cSRobert Hancock { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" }, 21636521148cSRobert Hancock { ATA_CMD_PMP_READ, "READ BUFFER" }, 21643915c3b5SRobert Hancock { ATA_CMD_PMP_READ_DMA, "READ BUFFER DMA" }, 21656521148cSRobert Hancock { ATA_CMD_PMP_WRITE, "WRITE BUFFER" }, 21663915c3b5SRobert Hancock { ATA_CMD_PMP_WRITE_DMA, "WRITE BUFFER DMA" }, 21676521148cSRobert Hancock { ATA_CMD_CONF_OVERLAY, "DEVICE CONFIGURATION OVERLAY" }, 21686521148cSRobert Hancock { ATA_CMD_SEC_SET_PASS, "SECURITY SET PASSWORD" }, 21696521148cSRobert Hancock { ATA_CMD_SEC_UNLOCK, "SECURITY UNLOCK" }, 21706521148cSRobert Hancock { ATA_CMD_SEC_ERASE_PREP, "SECURITY ERASE PREPARE" }, 21716521148cSRobert Hancock { ATA_CMD_SEC_ERASE_UNIT, "SECURITY ERASE UNIT" }, 21726521148cSRobert Hancock { ATA_CMD_SEC_FREEZE_LOCK, "SECURITY FREEZE LOCK" }, 21736521148cSRobert Hancock { ATA_CMD_SEC_DISABLE_PASS, "SECURITY DISABLE PASSWORD" }, 21746521148cSRobert Hancock { ATA_CMD_CONFIG_STREAM, "CONFIGURE STREAM" }, 21756521148cSRobert Hancock { ATA_CMD_SMART, "SMART" }, 21766521148cSRobert Hancock { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" }, 21776521148cSRobert Hancock { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" }, 2178acad7627SFUJITA Tomonori { ATA_CMD_DSM, "DATA SET MANAGEMENT" }, 21796521148cSRobert Hancock { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" }, 21806521148cSRobert Hancock { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" }, 21816521148cSRobert Hancock { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" }, 21826521148cSRobert Hancock { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" }, 21836521148cSRobert Hancock { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" }, 21846521148cSRobert Hancock { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" }, 21853915c3b5SRobert Hancock { ATA_CMD_REQ_SENSE_DATA, "REQUEST SENSE DATA EXT" }, 21863915c3b5SRobert Hancock { ATA_CMD_SANITIZE_DEVICE, "SANITIZE DEVICE" }, 218728a3fc22SHannes Reinecke { ATA_CMD_ZAC_MGMT_IN, "ZAC MANAGEMENT IN" }, 218827708a95SHannes Reinecke { ATA_CMD_ZAC_MGMT_OUT, "ZAC MANAGEMENT OUT" }, 21896521148cSRobert Hancock { ATA_CMD_READ_LONG, "READ LONG (with retries)" }, 21906521148cSRobert Hancock { ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" }, 21916521148cSRobert Hancock { ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" }, 21926521148cSRobert Hancock { ATA_CMD_WRITE_LONG_ONCE, "WRITE LONG (without retries)" }, 21936521148cSRobert Hancock { ATA_CMD_RESTORE, "RECALIBRATE" }, 21946521148cSRobert Hancock { 0, NULL } /* terminate list */ 21956521148cSRobert Hancock }; 21966521148cSRobert Hancock 21976521148cSRobert Hancock unsigned int i; 21986521148cSRobert Hancock for (i = 0; cmd_descr[i].text; i++) 21996521148cSRobert Hancock if (cmd_descr[i].command == command) 22006521148cSRobert Hancock return cmd_descr[i].text; 22016521148cSRobert Hancock #endif 22026521148cSRobert Hancock 2203d4520903SHannes Reinecke return "unknown"; 22046521148cSRobert Hancock } 2205d4520903SHannes Reinecke EXPORT_SYMBOL_GPL(ata_get_cmd_name); 22066521148cSRobert Hancock 22076521148cSRobert Hancock /** 22089b1e2658STejun Heo * ata_eh_link_report - report error handling to user 22090260731fSTejun Heo * @link: ATA link EH is going on 2210c6fd2807SJeff Garzik * 2211c6fd2807SJeff Garzik * Report EH to user. 2212c6fd2807SJeff Garzik * 2213c6fd2807SJeff Garzik * LOCKING: 2214c6fd2807SJeff Garzik * None. 2215c6fd2807SJeff Garzik */ 22169b1e2658STejun Heo static void ata_eh_link_report(struct ata_link *link) 2217c6fd2807SJeff Garzik { 22180260731fSTejun Heo struct ata_port *ap = link->ap; 22190260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 2220258c4e5cSJens Axboe struct ata_queued_cmd *qc; 2221c6fd2807SJeff Garzik const char *frozen, *desc; 2222462098b0SLevente Kurusa char tries_buf[6] = ""; 2223c6fd2807SJeff Garzik int tag, nr_failed = 0; 2224c6fd2807SJeff Garzik 222594ff3d54STejun Heo if (ehc->i.flags & ATA_EHI_QUIET) 222694ff3d54STejun Heo return; 222794ff3d54STejun Heo 2228c6fd2807SJeff Garzik desc = NULL; 2229c6fd2807SJeff Garzik if (ehc->i.desc[0] != '\0') 2230c6fd2807SJeff Garzik desc = ehc->i.desc; 2231c6fd2807SJeff Garzik 2232258c4e5cSJens Axboe ata_qc_for_each_raw(ap, qc, tag) { 2233b1c72916STejun Heo if (!(qc->flags & ATA_QCFLAG_FAILED) || 2234b1c72916STejun Heo ata_dev_phys_link(qc->dev) != link || 2235e027bd36STejun Heo ((qc->flags & ATA_QCFLAG_QUIET) && 2236e027bd36STejun Heo qc->err_mask == AC_ERR_DEV)) 2237c6fd2807SJeff Garzik continue; 2238c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask) 2239c6fd2807SJeff Garzik continue; 2240c6fd2807SJeff Garzik 2241c6fd2807SJeff Garzik nr_failed++; 2242c6fd2807SJeff Garzik } 2243c6fd2807SJeff Garzik 2244c6fd2807SJeff Garzik if (!nr_failed && !ehc->i.err_mask) 2245c6fd2807SJeff Garzik return; 2246c6fd2807SJeff Garzik 2247c6fd2807SJeff Garzik frozen = ""; 2248c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN) 2249c6fd2807SJeff Garzik frozen = " frozen"; 2250c6fd2807SJeff Garzik 2251a1e10f7eSTejun Heo if (ap->eh_tries < ATA_EH_MAX_TRIES) 2252462098b0SLevente Kurusa snprintf(tries_buf, sizeof(tries_buf), " t%d", 2253a1e10f7eSTejun Heo ap->eh_tries); 2254a1e10f7eSTejun Heo 2255c6fd2807SJeff Garzik if (ehc->i.dev) { 2256a9a79dfeSJoe Perches ata_dev_err(ehc->i.dev, "exception Emask 0x%x " 2257a1e10f7eSTejun Heo "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", 2258a1e10f7eSTejun Heo ehc->i.err_mask, link->sactive, ehc->i.serror, 2259a1e10f7eSTejun Heo ehc->i.action, frozen, tries_buf); 2260c6fd2807SJeff Garzik if (desc) 2261a9a79dfeSJoe Perches ata_dev_err(ehc->i.dev, "%s\n", desc); 2262c6fd2807SJeff Garzik } else { 2263a9a79dfeSJoe Perches ata_link_err(link, "exception Emask 0x%x " 2264a1e10f7eSTejun Heo "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", 2265a1e10f7eSTejun Heo ehc->i.err_mask, link->sactive, ehc->i.serror, 2266a1e10f7eSTejun Heo ehc->i.action, frozen, tries_buf); 2267c6fd2807SJeff Garzik if (desc) 2268a9a79dfeSJoe Perches ata_link_err(link, "%s\n", desc); 2269c6fd2807SJeff Garzik } 2270c6fd2807SJeff Garzik 22716521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR 22721333e194SRobert Hancock if (ehc->i.serror) 2273a9a79dfeSJoe Perches ata_link_err(link, 22741333e194SRobert Hancock "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n", 22751333e194SRobert Hancock ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "", 22761333e194SRobert Hancock ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "", 22771333e194SRobert Hancock ehc->i.serror & SERR_DATA ? "UnrecovData " : "", 22781333e194SRobert Hancock ehc->i.serror & SERR_PERSISTENT ? "Persist " : "", 22791333e194SRobert Hancock ehc->i.serror & SERR_PROTOCOL ? "Proto " : "", 22801333e194SRobert Hancock ehc->i.serror & SERR_INTERNAL ? "HostInt " : "", 22811333e194SRobert Hancock ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "", 22821333e194SRobert Hancock ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "", 22831333e194SRobert Hancock ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "", 22841333e194SRobert Hancock ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "", 22851333e194SRobert Hancock ehc->i.serror & SERR_DISPARITY ? "Dispar " : "", 22861333e194SRobert Hancock ehc->i.serror & SERR_CRC ? "BadCRC " : "", 22871333e194SRobert Hancock ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "", 22881333e194SRobert Hancock ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "", 22891333e194SRobert Hancock ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "", 22901333e194SRobert Hancock ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "", 22911333e194SRobert Hancock ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : ""); 22926521148cSRobert Hancock #endif 22931333e194SRobert Hancock 2294258c4e5cSJens Axboe ata_qc_for_each_raw(ap, qc, tag) { 22958a937581STejun Heo struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; 2296abb6a889STejun Heo char data_buf[20] = ""; 2297abb6a889STejun Heo char cdb_buf[70] = ""; 2298c6fd2807SJeff Garzik 22990260731fSTejun Heo if (!(qc->flags & ATA_QCFLAG_FAILED) || 2300b1c72916STejun Heo ata_dev_phys_link(qc->dev) != link || !qc->err_mask) 2301c6fd2807SJeff Garzik continue; 2302c6fd2807SJeff Garzik 2303abb6a889STejun Heo if (qc->dma_dir != DMA_NONE) { 2304abb6a889STejun Heo static const char *dma_str[] = { 2305abb6a889STejun Heo [DMA_BIDIRECTIONAL] = "bidi", 2306abb6a889STejun Heo [DMA_TO_DEVICE] = "out", 2307abb6a889STejun Heo [DMA_FROM_DEVICE] = "in", 2308abb6a889STejun Heo }; 2309fb1b8b11SGeert Uytterhoeven const char *prot_str = NULL; 2310abb6a889STejun Heo 2311fb1b8b11SGeert Uytterhoeven switch (qc->tf.protocol) { 2312fb1b8b11SGeert Uytterhoeven case ATA_PROT_UNKNOWN: 2313fb1b8b11SGeert Uytterhoeven prot_str = "unknown"; 2314fb1b8b11SGeert Uytterhoeven break; 2315fb1b8b11SGeert Uytterhoeven case ATA_PROT_NODATA: 2316fb1b8b11SGeert Uytterhoeven prot_str = "nodata"; 2317fb1b8b11SGeert Uytterhoeven break; 2318fb1b8b11SGeert Uytterhoeven case ATA_PROT_PIO: 2319fb1b8b11SGeert Uytterhoeven prot_str = "pio"; 2320fb1b8b11SGeert Uytterhoeven break; 2321fb1b8b11SGeert Uytterhoeven case ATA_PROT_DMA: 2322fb1b8b11SGeert Uytterhoeven prot_str = "dma"; 2323fb1b8b11SGeert Uytterhoeven break; 2324fb1b8b11SGeert Uytterhoeven case ATA_PROT_NCQ: 2325fb1b8b11SGeert Uytterhoeven prot_str = "ncq dma"; 2326fb1b8b11SGeert Uytterhoeven break; 2327fb1b8b11SGeert Uytterhoeven case ATA_PROT_NCQ_NODATA: 2328fb1b8b11SGeert Uytterhoeven prot_str = "ncq nodata"; 2329fb1b8b11SGeert Uytterhoeven break; 2330fb1b8b11SGeert Uytterhoeven case ATAPI_PROT_NODATA: 2331fb1b8b11SGeert Uytterhoeven prot_str = "nodata"; 2332fb1b8b11SGeert Uytterhoeven break; 2333fb1b8b11SGeert Uytterhoeven case ATAPI_PROT_PIO: 2334fb1b8b11SGeert Uytterhoeven prot_str = "pio"; 2335fb1b8b11SGeert Uytterhoeven break; 2336fb1b8b11SGeert Uytterhoeven case ATAPI_PROT_DMA: 2337fb1b8b11SGeert Uytterhoeven prot_str = "dma"; 2338fb1b8b11SGeert Uytterhoeven break; 2339fb1b8b11SGeert Uytterhoeven } 2340abb6a889STejun Heo snprintf(data_buf, sizeof(data_buf), " %s %u %s", 2341fb1b8b11SGeert Uytterhoeven prot_str, qc->nbytes, dma_str[qc->dma_dir]); 2342abb6a889STejun Heo } 2343abb6a889STejun Heo 23446521148cSRobert Hancock if (ata_is_atapi(qc->tf.protocol)) { 2345a13b0c9dSHannes Reinecke const u8 *cdb = qc->cdb; 2346a13b0c9dSHannes Reinecke size_t cdb_len = qc->dev->cdb_len; 2347a13b0c9dSHannes Reinecke 2348cbba5b0eSHannes Reinecke if (qc->scsicmd) { 2349cbba5b0eSHannes Reinecke cdb = qc->scsicmd->cmnd; 2350cbba5b0eSHannes Reinecke cdb_len = qc->scsicmd->cmd_len; 2351cbba5b0eSHannes Reinecke } 2352cbba5b0eSHannes Reinecke __scsi_format_command(cdb_buf, sizeof(cdb_buf), 2353cbba5b0eSHannes Reinecke cdb, cdb_len); 2354d4520903SHannes Reinecke } else 2355a9a79dfeSJoe Perches ata_dev_err(qc->dev, "failed command: %s\n", 2356d4520903SHannes Reinecke ata_get_cmd_name(cmd->command)); 2357abb6a889STejun Heo 2358a9a79dfeSJoe Perches ata_dev_err(qc->dev, 23598a937581STejun Heo "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " 2360abb6a889STejun Heo "tag %d%s\n %s" 23618a937581STejun Heo "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " 23625335b729STejun Heo "Emask 0x%x (%s)%s\n", 23638a937581STejun Heo cmd->command, cmd->feature, cmd->nsect, 23648a937581STejun Heo cmd->lbal, cmd->lbam, cmd->lbah, 23658a937581STejun Heo cmd->hob_feature, cmd->hob_nsect, 23668a937581STejun Heo cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah, 2367abb6a889STejun Heo cmd->device, qc->tag, data_buf, cdb_buf, 2368efcef265SSergey Shtylyov res->status, res->error, res->nsect, 23698a937581STejun Heo res->lbal, res->lbam, res->lbah, 23708a937581STejun Heo res->hob_feature, res->hob_nsect, 23718a937581STejun Heo res->hob_lbal, res->hob_lbam, res->hob_lbah, 23725335b729STejun Heo res->device, qc->err_mask, ata_err_string(qc->err_mask), 23735335b729STejun Heo qc->err_mask & AC_ERR_NCQ ? " <F>" : ""); 23741333e194SRobert Hancock 23756521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR 2376efcef265SSergey Shtylyov if (res->status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | 2377e87fd28cSHannes Reinecke ATA_SENSE | ATA_ERR)) { 2378efcef265SSergey Shtylyov if (res->status & ATA_BUSY) 2379a9a79dfeSJoe Perches ata_dev_err(qc->dev, "status: { Busy }\n"); 23801333e194SRobert Hancock else 2381e87fd28cSHannes Reinecke ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n", 2382efcef265SSergey Shtylyov res->status & ATA_DRDY ? "DRDY " : "", 2383efcef265SSergey Shtylyov res->status & ATA_DF ? "DF " : "", 2384efcef265SSergey Shtylyov res->status & ATA_DRQ ? "DRQ " : "", 2385efcef265SSergey Shtylyov res->status & ATA_SENSE ? "SENSE " : "", 2386efcef265SSergey Shtylyov res->status & ATA_ERR ? "ERR " : ""); 23871333e194SRobert Hancock } 23881333e194SRobert Hancock 23891333e194SRobert Hancock if (cmd->command != ATA_CMD_PACKET && 2390efcef265SSergey Shtylyov (res->error & (ATA_ICRC | ATA_UNC | ATA_AMNF | ATA_IDNF | 2391efcef265SSergey Shtylyov ATA_ABORTED))) 2392eec7e1c1SAlexey Asemov ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n", 2393efcef265SSergey Shtylyov res->error & ATA_ICRC ? "ICRC " : "", 2394efcef265SSergey Shtylyov res->error & ATA_UNC ? "UNC " : "", 2395efcef265SSergey Shtylyov res->error & ATA_AMNF ? "AMNF " : "", 2396efcef265SSergey Shtylyov res->error & ATA_IDNF ? "IDNF " : "", 2397efcef265SSergey Shtylyov res->error & ATA_ABORTED ? "ABRT " : ""); 23986521148cSRobert Hancock #endif 2399c6fd2807SJeff Garzik } 2400c6fd2807SJeff Garzik } 2401c6fd2807SJeff Garzik 24029b1e2658STejun Heo /** 24039b1e2658STejun Heo * ata_eh_report - report error handling to user 24049b1e2658STejun Heo * @ap: ATA port to report EH about 24059b1e2658STejun Heo * 24069b1e2658STejun Heo * Report EH to user. 24079b1e2658STejun Heo * 24089b1e2658STejun Heo * LOCKING: 24099b1e2658STejun Heo * None. 24109b1e2658STejun Heo */ 2411fb7fd614STejun Heo void ata_eh_report(struct ata_port *ap) 24129b1e2658STejun Heo { 24139b1e2658STejun Heo struct ata_link *link; 24149b1e2658STejun Heo 24151eca4365STejun Heo ata_for_each_link(link, ap, HOST_FIRST) 24169b1e2658STejun Heo ata_eh_link_report(link); 24179b1e2658STejun Heo } 24189b1e2658STejun Heo 2419cc0680a5STejun Heo static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset, 2420b1c72916STejun Heo unsigned int *classes, unsigned long deadline, 2421b1c72916STejun Heo bool clear_classes) 2422c6fd2807SJeff Garzik { 2423f58229f8STejun Heo struct ata_device *dev; 2424c6fd2807SJeff Garzik 2425b1c72916STejun Heo if (clear_classes) 24261eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 2427f58229f8STejun Heo classes[dev->devno] = ATA_DEV_UNKNOWN; 2428c6fd2807SJeff Garzik 2429f046519fSTejun Heo return reset(link, classes, deadline); 2430c6fd2807SJeff Garzik } 2431c6fd2807SJeff Garzik 2432e8411fbaSSergei Shtylyov static int ata_eh_followup_srst_needed(struct ata_link *link, int rc) 2433c6fd2807SJeff Garzik { 243445db2f6cSTejun Heo if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link)) 2435ae791c05STejun Heo return 0; 24365dbfc9cbSTejun Heo if (rc == -EAGAIN) 2437c6fd2807SJeff Garzik return 1; 2438071f44b1STejun Heo if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) 24393495de73STejun Heo return 1; 2440c6fd2807SJeff Garzik return 0; 2441c6fd2807SJeff Garzik } 2442c6fd2807SJeff Garzik 2443fb7fd614STejun Heo int ata_eh_reset(struct ata_link *link, int classify, 2444c6fd2807SJeff Garzik ata_prereset_fn_t prereset, ata_reset_fn_t softreset, 2445c6fd2807SJeff Garzik ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) 2446c6fd2807SJeff Garzik { 2447afaa5c37STejun Heo struct ata_port *ap = link->ap; 2448b1c72916STejun Heo struct ata_link *slave = ap->slave_link; 2449936fd732STejun Heo struct ata_eh_context *ehc = &link->eh_context; 2450705d2014SBartlomiej Zolnierkiewicz struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL; 2451c6fd2807SJeff Garzik unsigned int *classes = ehc->classes; 2452416dc9edSTejun Heo unsigned int lflags = link->flags; 2453c6fd2807SJeff Garzik int verbose = !(ehc->i.flags & ATA_EHI_QUIET); 2454d8af0eb6STejun Heo int max_tries = 0, try = 0; 2455b1c72916STejun Heo struct ata_link *failed_link; 2456f58229f8STejun Heo struct ata_device *dev; 2457416dc9edSTejun Heo unsigned long deadline, now; 2458c6fd2807SJeff Garzik ata_reset_fn_t reset; 2459afaa5c37STejun Heo unsigned long flags; 2460416dc9edSTejun Heo u32 sstatus; 2461b1c72916STejun Heo int nr_unknown, rc; 2462c6fd2807SJeff Garzik 2463932648b0STejun Heo /* 2464932648b0STejun Heo * Prepare to reset 2465932648b0STejun Heo */ 2466d8af0eb6STejun Heo while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX) 2467d8af0eb6STejun Heo max_tries++; 2468ca6d43b0SDan Williams if (link->flags & ATA_LFLAG_RST_ONCE) 2469ca6d43b0SDan Williams max_tries = 1; 247005944bdfSTejun Heo if (link->flags & ATA_LFLAG_NO_HRST) 247105944bdfSTejun Heo hardreset = NULL; 247205944bdfSTejun Heo if (link->flags & ATA_LFLAG_NO_SRST) 247305944bdfSTejun Heo softreset = NULL; 2474d8af0eb6STejun Heo 247525985edcSLucas De Marchi /* make sure each reset attempt is at least COOL_DOWN apart */ 247619b72321STejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) { 24770a2c0f56STejun Heo now = jiffies; 247819b72321STejun Heo WARN_ON(time_after(ehc->last_reset, now)); 247919b72321STejun Heo deadline = ata_deadline(ehc->last_reset, 248019b72321STejun Heo ATA_EH_RESET_COOL_DOWN); 24810a2c0f56STejun Heo if (time_before(now, deadline)) 24820a2c0f56STejun Heo schedule_timeout_uninterruptible(deadline - now); 248319b72321STejun Heo } 24840a2c0f56STejun Heo 2485afaa5c37STejun Heo spin_lock_irqsave(ap->lock, flags); 2486afaa5c37STejun Heo ap->pflags |= ATA_PFLAG_RESETTING; 2487afaa5c37STejun Heo spin_unlock_irqrestore(ap->lock, flags); 2488afaa5c37STejun Heo 2489cf480626STejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2490c6fd2807SJeff Garzik 24911eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 2492cdeab114STejun Heo /* If we issue an SRST then an ATA drive (not ATAPI) 2493cdeab114STejun Heo * may change configuration and be in PIO0 timing. If 2494cdeab114STejun Heo * we do a hard reset (or are coming from power on) 2495cdeab114STejun Heo * this is true for ATA or ATAPI. Until we've set a 2496cdeab114STejun Heo * suitable controller mode we should not touch the 2497cdeab114STejun Heo * bus as we may be talking too fast. 2498cdeab114STejun Heo */ 2499cdeab114STejun Heo dev->pio_mode = XFER_PIO_0; 25005416912aSAaron Lu dev->dma_mode = 0xff; 2501cdeab114STejun Heo 2502cdeab114STejun Heo /* If the controller has a pio mode setup function 2503cdeab114STejun Heo * then use it to set the chipset to rights. Don't 2504cdeab114STejun Heo * touch the DMA setup as that will be dealt with when 2505cdeab114STejun Heo * configuring devices. 2506cdeab114STejun Heo */ 2507cdeab114STejun Heo if (ap->ops->set_piomode) 2508cdeab114STejun Heo ap->ops->set_piomode(ap, dev); 2509cdeab114STejun Heo } 2510cdeab114STejun Heo 2511cf480626STejun Heo /* prefer hardreset */ 2512932648b0STejun Heo reset = NULL; 2513cf480626STejun Heo ehc->i.action &= ~ATA_EH_RESET; 2514cf480626STejun Heo if (hardreset) { 2515cf480626STejun Heo reset = hardreset; 2516a674050eSTejun Heo ehc->i.action |= ATA_EH_HARDRESET; 25174f7faa3fSTejun Heo } else if (softreset) { 2518cf480626STejun Heo reset = softreset; 2519a674050eSTejun Heo ehc->i.action |= ATA_EH_SOFTRESET; 2520cf480626STejun Heo } 2521c6fd2807SJeff Garzik 2522c6fd2807SJeff Garzik if (prereset) { 2523b1c72916STejun Heo unsigned long deadline = ata_deadline(jiffies, 2524b1c72916STejun Heo ATA_EH_PRERESET_TIMEOUT); 2525b1c72916STejun Heo 2526b1c72916STejun Heo if (slave) { 2527b1c72916STejun Heo sehc->i.action &= ~ATA_EH_RESET; 2528b1c72916STejun Heo sehc->i.action |= ehc->i.action; 2529b1c72916STejun Heo } 2530b1c72916STejun Heo 2531b1c72916STejun Heo rc = prereset(link, deadline); 2532b1c72916STejun Heo 2533b1c72916STejun Heo /* If present, do prereset on slave link too. Reset 2534b1c72916STejun Heo * is skipped iff both master and slave links report 2535b1c72916STejun Heo * -ENOENT or clear ATA_EH_RESET. 2536b1c72916STejun Heo */ 2537b1c72916STejun Heo if (slave && (rc == 0 || rc == -ENOENT)) { 2538b1c72916STejun Heo int tmp; 2539b1c72916STejun Heo 2540b1c72916STejun Heo tmp = prereset(slave, deadline); 2541b1c72916STejun Heo if (tmp != -ENOENT) 2542b1c72916STejun Heo rc = tmp; 2543b1c72916STejun Heo 2544b1c72916STejun Heo ehc->i.action |= sehc->i.action; 2545b1c72916STejun Heo } 2546b1c72916STejun Heo 2547c6fd2807SJeff Garzik if (rc) { 2548c961922bSAlan Cox if (rc == -ENOENT) { 2549a9a79dfeSJoe Perches ata_link_dbg(link, "port disabled--ignoring\n"); 2550cf480626STejun Heo ehc->i.action &= ~ATA_EH_RESET; 25514aa9ab67STejun Heo 25521eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 2553f58229f8STejun Heo classes[dev->devno] = ATA_DEV_NONE; 25544aa9ab67STejun Heo 25554aa9ab67STejun Heo rc = 0; 2556c961922bSAlan Cox } else 2557a9a79dfeSJoe Perches ata_link_err(link, 2558a9a79dfeSJoe Perches "prereset failed (errno=%d)\n", 2559a9a79dfeSJoe Perches rc); 2560fccb6ea5STejun Heo goto out; 2561c6fd2807SJeff Garzik } 2562c6fd2807SJeff Garzik 2563932648b0STejun Heo /* prereset() might have cleared ATA_EH_RESET. If so, 2564d6515e6fSTejun Heo * bang classes, thaw and return. 2565932648b0STejun Heo */ 2566932648b0STejun Heo if (reset && !(ehc->i.action & ATA_EH_RESET)) { 25671eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 2568f58229f8STejun Heo classes[dev->devno] = ATA_DEV_NONE; 2569d6515e6fSTejun Heo if ((ap->pflags & ATA_PFLAG_FROZEN) && 2570d6515e6fSTejun Heo ata_is_host_link(link)) 2571d6515e6fSTejun Heo ata_eh_thaw_port(ap); 2572fccb6ea5STejun Heo rc = 0; 2573fccb6ea5STejun Heo goto out; 2574c6fd2807SJeff Garzik } 2575932648b0STejun Heo } 2576c6fd2807SJeff Garzik 2577c6fd2807SJeff Garzik retry: 2578932648b0STejun Heo /* 2579932648b0STejun Heo * Perform reset 2580932648b0STejun Heo */ 2581dc98c32cSTejun Heo if (ata_is_host_link(link)) 2582dc98c32cSTejun Heo ata_eh_freeze_port(ap); 2583dc98c32cSTejun Heo 2584341c2c95STejun Heo deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]); 258531daabdaSTejun Heo 2586932648b0STejun Heo if (reset) { 2587c6fd2807SJeff Garzik if (verbose) 2588a9a79dfeSJoe Perches ata_link_info(link, "%s resetting link\n", 2589c6fd2807SJeff Garzik reset == softreset ? "soft" : "hard"); 2590c6fd2807SJeff Garzik 2591c6fd2807SJeff Garzik /* mark that this EH session started with reset */ 259219b72321STejun Heo ehc->last_reset = jiffies; 2593f8ec26d0SHannes Reinecke if (reset == hardreset) { 25940d64a233STejun Heo ehc->i.flags |= ATA_EHI_DID_HARDRESET; 2595f8ec26d0SHannes Reinecke trace_ata_link_hardreset_begin(link, classes, deadline); 2596f8ec26d0SHannes Reinecke } else { 25970d64a233STejun Heo ehc->i.flags |= ATA_EHI_DID_SOFTRESET; 2598f8ec26d0SHannes Reinecke trace_ata_link_softreset_begin(link, classes, deadline); 2599f8ec26d0SHannes Reinecke } 2600c6fd2807SJeff Garzik 2601b1c72916STejun Heo rc = ata_do_reset(link, reset, classes, deadline, true); 2602f8ec26d0SHannes Reinecke if (reset == hardreset) 2603f8ec26d0SHannes Reinecke trace_ata_link_hardreset_end(link, classes, rc); 2604f8ec26d0SHannes Reinecke else 2605f8ec26d0SHannes Reinecke trace_ata_link_softreset_end(link, classes, rc); 2606b1c72916STejun Heo if (rc && rc != -EAGAIN) { 2607b1c72916STejun Heo failed_link = link; 26085dbfc9cbSTejun Heo goto fail; 2609b1c72916STejun Heo } 2610c6fd2807SJeff Garzik 2611b1c72916STejun Heo /* hardreset slave link if existent */ 2612b1c72916STejun Heo if (slave && reset == hardreset) { 2613b1c72916STejun Heo int tmp; 2614b1c72916STejun Heo 2615b1c72916STejun Heo if (verbose) 2616a9a79dfeSJoe Perches ata_link_info(slave, "hard resetting link\n"); 2617b1c72916STejun Heo 2618b1c72916STejun Heo ata_eh_about_to_do(slave, NULL, ATA_EH_RESET); 2619f8ec26d0SHannes Reinecke trace_ata_slave_hardreset_begin(slave, classes, 2620f8ec26d0SHannes Reinecke deadline); 2621b1c72916STejun Heo tmp = ata_do_reset(slave, reset, classes, deadline, 2622b1c72916STejun Heo false); 2623f8ec26d0SHannes Reinecke trace_ata_slave_hardreset_end(slave, classes, tmp); 2624b1c72916STejun Heo switch (tmp) { 2625b1c72916STejun Heo case -EAGAIN: 2626b1c72916STejun Heo rc = -EAGAIN; 2627e06abcc6SGustavo A. R. Silva break; 2628b1c72916STejun Heo case 0: 2629b1c72916STejun Heo break; 2630b1c72916STejun Heo default: 2631b1c72916STejun Heo failed_link = slave; 2632b1c72916STejun Heo rc = tmp; 2633b1c72916STejun Heo goto fail; 2634b1c72916STejun Heo } 2635b1c72916STejun Heo } 2636b1c72916STejun Heo 2637b1c72916STejun Heo /* perform follow-up SRST if necessary */ 2638c6fd2807SJeff Garzik if (reset == hardreset && 2639e8411fbaSSergei Shtylyov ata_eh_followup_srst_needed(link, rc)) { 2640c6fd2807SJeff Garzik reset = softreset; 2641c6fd2807SJeff Garzik 2642c6fd2807SJeff Garzik if (!reset) { 2643a9a79dfeSJoe Perches ata_link_err(link, 2644a9a79dfeSJoe Perches "follow-up softreset required but no softreset available\n"); 2645b1c72916STejun Heo failed_link = link; 2646fccb6ea5STejun Heo rc = -EINVAL; 264708cf69d0STejun Heo goto fail; 2648c6fd2807SJeff Garzik } 2649c6fd2807SJeff Garzik 2650cf480626STejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2651f8ec26d0SHannes Reinecke trace_ata_link_softreset_begin(link, classes, deadline); 2652b1c72916STejun Heo rc = ata_do_reset(link, reset, classes, deadline, true); 2653f8ec26d0SHannes Reinecke trace_ata_link_softreset_end(link, classes, rc); 2654fe2c4d01STejun Heo if (rc) { 2655fe2c4d01STejun Heo failed_link = link; 2656fe2c4d01STejun Heo goto fail; 2657fe2c4d01STejun Heo } 2658c6fd2807SJeff Garzik } 2659932648b0STejun Heo } else { 2660932648b0STejun Heo if (verbose) 2661a9a79dfeSJoe Perches ata_link_info(link, 2662a9a79dfeSJoe Perches "no reset method available, skipping reset\n"); 2663932648b0STejun Heo if (!(lflags & ATA_LFLAG_ASSUME_CLASS)) 2664932648b0STejun Heo lflags |= ATA_LFLAG_ASSUME_ATA; 2665932648b0STejun Heo } 2666008a7896STejun Heo 2667932648b0STejun Heo /* 2668932648b0STejun Heo * Post-reset processing 2669932648b0STejun Heo */ 26701eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 2671416dc9edSTejun Heo /* After the reset, the device state is PIO 0 and the 2672416dc9edSTejun Heo * controller state is undefined. Reset also wakes up 2673416dc9edSTejun Heo * drives from sleeping mode. 2674c6fd2807SJeff Garzik */ 2675f58229f8STejun Heo dev->pio_mode = XFER_PIO_0; 2676054a5fbaSTejun Heo dev->flags &= ~ATA_DFLAG_SLEEPING; 2677c6fd2807SJeff Garzik 26783b761d3dSTejun Heo if (ata_phys_link_offline(ata_dev_phys_link(dev))) 26793b761d3dSTejun Heo continue; 26803b761d3dSTejun Heo 26814ccd3329STejun Heo /* apply class override */ 2682416dc9edSTejun Heo if (lflags & ATA_LFLAG_ASSUME_ATA) 2683ae791c05STejun Heo classes[dev->devno] = ATA_DEV_ATA; 2684416dc9edSTejun Heo else if (lflags & ATA_LFLAG_ASSUME_SEMB) 2685816ab897STejun Heo classes[dev->devno] = ATA_DEV_SEMB_UNSUP; 2686ae791c05STejun Heo } 2687ae791c05STejun Heo 2688008a7896STejun Heo /* record current link speed */ 2689936fd732STejun Heo if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0) 2690936fd732STejun Heo link->sata_spd = (sstatus >> 4) & 0xf; 2691b1c72916STejun Heo if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0) 2692b1c72916STejun Heo slave->sata_spd = (sstatus >> 4) & 0xf; 2693008a7896STejun Heo 2694dc98c32cSTejun Heo /* thaw the port */ 2695dc98c32cSTejun Heo if (ata_is_host_link(link)) 2696dc98c32cSTejun Heo ata_eh_thaw_port(ap); 2697dc98c32cSTejun Heo 2698f046519fSTejun Heo /* postreset() should clear hardware SError. Although SError 2699f046519fSTejun Heo * is cleared during link resume, clearing SError here is 2700f046519fSTejun Heo * necessary as some PHYs raise hotplug events after SRST. 2701f046519fSTejun Heo * This introduces race condition where hotplug occurs between 2702f046519fSTejun Heo * reset and here. This race is mediated by cross checking 2703f046519fSTejun Heo * link onlineness and classification result later. 2704f046519fSTejun Heo */ 2705b1c72916STejun Heo if (postreset) { 2706cc0680a5STejun Heo postreset(link, classes); 2707f8ec26d0SHannes Reinecke trace_ata_link_postreset(link, classes, rc); 2708f8ec26d0SHannes Reinecke if (slave) { 2709b1c72916STejun Heo postreset(slave, classes); 2710f8ec26d0SHannes Reinecke trace_ata_slave_postreset(slave, classes, rc); 2711f8ec26d0SHannes Reinecke } 2712b1c72916STejun Heo } 2713c6fd2807SJeff Garzik 27141e641060STejun Heo /* 27158c56caccSTejun Heo * Some controllers can't be frozen very well and may set spurious 27168c56caccSTejun Heo * error conditions during reset. Clear accumulated error 27178c56caccSTejun Heo * information and re-thaw the port if frozen. As reset is the 27188c56caccSTejun Heo * final recovery action and we cross check link onlineness against 27198c56caccSTejun Heo * device classification later, no hotplug event is lost by this. 27201e641060STejun Heo */ 2721f046519fSTejun Heo spin_lock_irqsave(link->ap->lock, flags); 27221e641060STejun Heo memset(&link->eh_info, 0, sizeof(link->eh_info)); 2723b1c72916STejun Heo if (slave) 27241e641060STejun Heo memset(&slave->eh_info, 0, sizeof(link->eh_info)); 27251e641060STejun Heo ap->pflags &= ~ATA_PFLAG_EH_PENDING; 2726f046519fSTejun Heo spin_unlock_irqrestore(link->ap->lock, flags); 2727f046519fSTejun Heo 27288c56caccSTejun Heo if (ap->pflags & ATA_PFLAG_FROZEN) 27298c56caccSTejun Heo ata_eh_thaw_port(ap); 27308c56caccSTejun Heo 27313b761d3dSTejun Heo /* 27323b761d3dSTejun Heo * Make sure onlineness and classification result correspond. 2733f046519fSTejun Heo * Hotplug could have happened during reset and some 2734f046519fSTejun Heo * controllers fail to wait while a drive is spinning up after 2735f046519fSTejun Heo * being hotplugged causing misdetection. By cross checking 27363b761d3dSTejun Heo * link on/offlineness and classification result, those 27373b761d3dSTejun Heo * conditions can be reliably detected and retried. 2738f046519fSTejun Heo */ 2739b1c72916STejun Heo nr_unknown = 0; 27401eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 27413b761d3dSTejun Heo if (ata_phys_link_online(ata_dev_phys_link(dev))) { 2742b1c72916STejun Heo if (classes[dev->devno] == ATA_DEV_UNKNOWN) { 2743a9a79dfeSJoe Perches ata_dev_dbg(dev, "link online but device misclassified\n"); 2744f046519fSTejun Heo classes[dev->devno] = ATA_DEV_NONE; 2745b1c72916STejun Heo nr_unknown++; 2746b1c72916STejun Heo } 27473b761d3dSTejun Heo } else if (ata_phys_link_offline(ata_dev_phys_link(dev))) { 27483b761d3dSTejun Heo if (ata_class_enabled(classes[dev->devno])) 2749a9a79dfeSJoe Perches ata_dev_dbg(dev, 2750a9a79dfeSJoe Perches "link offline, clearing class %d to NONE\n", 27513b761d3dSTejun Heo classes[dev->devno]); 27523b761d3dSTejun Heo classes[dev->devno] = ATA_DEV_NONE; 27533b761d3dSTejun Heo } else if (classes[dev->devno] == ATA_DEV_UNKNOWN) { 2754a9a79dfeSJoe Perches ata_dev_dbg(dev, 2755a9a79dfeSJoe Perches "link status unknown, clearing UNKNOWN to NONE\n"); 27563b761d3dSTejun Heo classes[dev->devno] = ATA_DEV_NONE; 27573b761d3dSTejun Heo } 2758f046519fSTejun Heo } 2759f046519fSTejun Heo 2760b1c72916STejun Heo if (classify && nr_unknown) { 2761f046519fSTejun Heo if (try < max_tries) { 2762a9a79dfeSJoe Perches ata_link_warn(link, 2763a9a79dfeSJoe Perches "link online but %d devices misclassified, retrying\n", 27643b761d3dSTejun Heo nr_unknown); 2765b1c72916STejun Heo failed_link = link; 2766f046519fSTejun Heo rc = -EAGAIN; 2767f046519fSTejun Heo goto fail; 2768f046519fSTejun Heo } 2769a9a79dfeSJoe Perches ata_link_warn(link, 27703b761d3dSTejun Heo "link online but %d devices misclassified, " 27713b761d3dSTejun Heo "device detection might fail\n", nr_unknown); 2772f046519fSTejun Heo } 2773f046519fSTejun Heo 2774c6fd2807SJeff Garzik /* reset successful, schedule revalidation */ 2775cf480626STejun Heo ata_eh_done(link, NULL, ATA_EH_RESET); 2776b1c72916STejun Heo if (slave) 2777b1c72916STejun Heo ata_eh_done(slave, NULL, ATA_EH_RESET); 277819b72321STejun Heo ehc->last_reset = jiffies; /* update to completion time */ 2779c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_REVALIDATE; 27806b7ae954STejun Heo link->lpm_policy = ATA_LPM_UNKNOWN; /* reset LPM state */ 2781416dc9edSTejun Heo 2782416dc9edSTejun Heo rc = 0; 2783fccb6ea5STejun Heo out: 2784fccb6ea5STejun Heo /* clear hotplug flag */ 2785fccb6ea5STejun Heo ehc->i.flags &= ~ATA_EHI_HOTPLUGGED; 2786b1c72916STejun Heo if (slave) 2787b1c72916STejun Heo sehc->i.flags &= ~ATA_EHI_HOTPLUGGED; 2788afaa5c37STejun Heo 2789afaa5c37STejun Heo spin_lock_irqsave(ap->lock, flags); 2790afaa5c37STejun Heo ap->pflags &= ~ATA_PFLAG_RESETTING; 2791afaa5c37STejun Heo spin_unlock_irqrestore(ap->lock, flags); 2792afaa5c37STejun Heo 2793c6fd2807SJeff Garzik return rc; 2794416dc9edSTejun Heo 2795416dc9edSTejun Heo fail: 27965958e302STejun Heo /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */ 27975958e302STejun Heo if (!ata_is_host_link(link) && 27985958e302STejun Heo sata_scr_read(link, SCR_STATUS, &sstatus)) 27995958e302STejun Heo rc = -ERESTART; 28005958e302STejun Heo 28017a46c078SGwendal Grignou if (try >= max_tries) { 28028ea7645cSTejun Heo /* 28038ea7645cSTejun Heo * Thaw host port even if reset failed, so that the port 28048ea7645cSTejun Heo * can be retried on the next phy event. This risks 28058ea7645cSTejun Heo * repeated EH runs but seems to be a better tradeoff than 28068ea7645cSTejun Heo * shutting down a port after a botched hotplug attempt. 28078ea7645cSTejun Heo */ 28088ea7645cSTejun Heo if (ata_is_host_link(link)) 28098ea7645cSTejun Heo ata_eh_thaw_port(ap); 2810416dc9edSTejun Heo goto out; 28118ea7645cSTejun Heo } 2812416dc9edSTejun Heo 2813416dc9edSTejun Heo now = jiffies; 2814416dc9edSTejun Heo if (time_before(now, deadline)) { 2815416dc9edSTejun Heo unsigned long delta = deadline - now; 2816416dc9edSTejun Heo 2817a9a79dfeSJoe Perches ata_link_warn(failed_link, 28180a2c0f56STejun Heo "reset failed (errno=%d), retrying in %u secs\n", 28190a2c0f56STejun Heo rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000)); 2820416dc9edSTejun Heo 2821c0c362b6STejun Heo ata_eh_release(ap); 2822416dc9edSTejun Heo while (delta) 2823416dc9edSTejun Heo delta = schedule_timeout_uninterruptible(delta); 2824c0c362b6STejun Heo ata_eh_acquire(ap); 2825416dc9edSTejun Heo } 2826416dc9edSTejun Heo 28277a46c078SGwendal Grignou /* 28287a46c078SGwendal Grignou * While disks spinup behind PMP, some controllers fail sending SRST. 28297a46c078SGwendal Grignou * They need to be reset - as well as the PMP - before retrying. 28307a46c078SGwendal Grignou */ 28317a46c078SGwendal Grignou if (rc == -ERESTART) { 28327a46c078SGwendal Grignou if (ata_is_host_link(link)) 28337a46c078SGwendal Grignou ata_eh_thaw_port(ap); 28347a46c078SGwendal Grignou goto out; 28357a46c078SGwendal Grignou } 28367a46c078SGwendal Grignou 2837b1c72916STejun Heo if (try == max_tries - 1) { 2838a07d499bSTejun Heo sata_down_spd_limit(link, 0); 2839b1c72916STejun Heo if (slave) 2840a07d499bSTejun Heo sata_down_spd_limit(slave, 0); 2841b1c72916STejun Heo } else if (rc == -EPIPE) 2842a07d499bSTejun Heo sata_down_spd_limit(failed_link, 0); 2843b1c72916STejun Heo 2844416dc9edSTejun Heo if (hardreset) 2845416dc9edSTejun Heo reset = hardreset; 2846416dc9edSTejun Heo goto retry; 2847c6fd2807SJeff Garzik } 2848c6fd2807SJeff Garzik 284945fabbb7SElias Oltmanns static inline void ata_eh_pull_park_action(struct ata_port *ap) 285045fabbb7SElias Oltmanns { 285145fabbb7SElias Oltmanns struct ata_link *link; 285245fabbb7SElias Oltmanns struct ata_device *dev; 285345fabbb7SElias Oltmanns unsigned long flags; 285445fabbb7SElias Oltmanns 285545fabbb7SElias Oltmanns /* 285645fabbb7SElias Oltmanns * This function can be thought of as an extended version of 285745fabbb7SElias Oltmanns * ata_eh_about_to_do() specially crafted to accommodate the 285845fabbb7SElias Oltmanns * requirements of ATA_EH_PARK handling. Since the EH thread 285945fabbb7SElias Oltmanns * does not leave the do {} while () loop in ata_eh_recover as 286045fabbb7SElias Oltmanns * long as the timeout for a park request to *one* device on 286145fabbb7SElias Oltmanns * the port has not expired, and since we still want to pick 286245fabbb7SElias Oltmanns * up park requests to other devices on the same port or 286345fabbb7SElias Oltmanns * timeout updates for the same device, we have to pull 286445fabbb7SElias Oltmanns * ATA_EH_PARK actions from eh_info into eh_context.i 286545fabbb7SElias Oltmanns * ourselves at the beginning of each pass over the loop. 286645fabbb7SElias Oltmanns * 286745fabbb7SElias Oltmanns * Additionally, all write accesses to &ap->park_req_pending 286816735d02SWolfram Sang * through reinit_completion() (see below) or complete_all() 286945fabbb7SElias Oltmanns * (see ata_scsi_park_store()) are protected by the host lock. 287045fabbb7SElias Oltmanns * As a result we have that park_req_pending.done is zero on 287145fabbb7SElias Oltmanns * exit from this function, i.e. when ATA_EH_PARK actions for 287245fabbb7SElias Oltmanns * *all* devices on port ap have been pulled into the 287345fabbb7SElias Oltmanns * respective eh_context structs. If, and only if, 287445fabbb7SElias Oltmanns * park_req_pending.done is non-zero by the time we reach 287545fabbb7SElias Oltmanns * wait_for_completion_timeout(), another ATA_EH_PARK action 287645fabbb7SElias Oltmanns * has been scheduled for at least one of the devices on port 287745fabbb7SElias Oltmanns * ap and we have to cycle over the do {} while () loop in 287845fabbb7SElias Oltmanns * ata_eh_recover() again. 287945fabbb7SElias Oltmanns */ 288045fabbb7SElias Oltmanns 288145fabbb7SElias Oltmanns spin_lock_irqsave(ap->lock, flags); 288216735d02SWolfram Sang reinit_completion(&ap->park_req_pending); 28831eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 28841eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 288545fabbb7SElias Oltmanns struct ata_eh_info *ehi = &link->eh_info; 288645fabbb7SElias Oltmanns 288745fabbb7SElias Oltmanns link->eh_context.i.dev_action[dev->devno] |= 288845fabbb7SElias Oltmanns ehi->dev_action[dev->devno] & ATA_EH_PARK; 288945fabbb7SElias Oltmanns ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK); 289045fabbb7SElias Oltmanns } 289145fabbb7SElias Oltmanns } 289245fabbb7SElias Oltmanns spin_unlock_irqrestore(ap->lock, flags); 289345fabbb7SElias Oltmanns } 289445fabbb7SElias Oltmanns 289545fabbb7SElias Oltmanns static void ata_eh_park_issue_cmd(struct ata_device *dev, int park) 289645fabbb7SElias Oltmanns { 289745fabbb7SElias Oltmanns struct ata_eh_context *ehc = &dev->link->eh_context; 289845fabbb7SElias Oltmanns struct ata_taskfile tf; 289945fabbb7SElias Oltmanns unsigned int err_mask; 290045fabbb7SElias Oltmanns 290145fabbb7SElias Oltmanns ata_tf_init(dev, &tf); 290245fabbb7SElias Oltmanns if (park) { 290345fabbb7SElias Oltmanns ehc->unloaded_mask |= 1 << dev->devno; 290445fabbb7SElias Oltmanns tf.command = ATA_CMD_IDLEIMMEDIATE; 290545fabbb7SElias Oltmanns tf.feature = 0x44; 290645fabbb7SElias Oltmanns tf.lbal = 0x4c; 290745fabbb7SElias Oltmanns tf.lbam = 0x4e; 290845fabbb7SElias Oltmanns tf.lbah = 0x55; 290945fabbb7SElias Oltmanns } else { 291045fabbb7SElias Oltmanns ehc->unloaded_mask &= ~(1 << dev->devno); 291145fabbb7SElias Oltmanns tf.command = ATA_CMD_CHK_POWER; 291245fabbb7SElias Oltmanns } 291345fabbb7SElias Oltmanns 291445fabbb7SElias Oltmanns tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 2915bd18bc04SHannes Reinecke tf.protocol = ATA_PROT_NODATA; 291645fabbb7SElias Oltmanns err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 291745fabbb7SElias Oltmanns if (park && (err_mask || tf.lbal != 0xc4)) { 2918a9a79dfeSJoe Perches ata_dev_err(dev, "head unload failed!\n"); 291945fabbb7SElias Oltmanns ehc->unloaded_mask &= ~(1 << dev->devno); 292045fabbb7SElias Oltmanns } 292145fabbb7SElias Oltmanns } 292245fabbb7SElias Oltmanns 29230260731fSTejun Heo static int ata_eh_revalidate_and_attach(struct ata_link *link, 2924c6fd2807SJeff Garzik struct ata_device **r_failed_dev) 2925c6fd2807SJeff Garzik { 29260260731fSTejun Heo struct ata_port *ap = link->ap; 29270260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 2928c6fd2807SJeff Garzik struct ata_device *dev; 29298c3c52a8STejun Heo unsigned int new_mask = 0; 2930c6fd2807SJeff Garzik unsigned long flags; 2931f58229f8STejun Heo int rc = 0; 2932c6fd2807SJeff Garzik 29338c3c52a8STejun Heo /* For PATA drive side cable detection to work, IDENTIFY must 29348c3c52a8STejun Heo * be done backwards such that PDIAG- is released by the slave 29358c3c52a8STejun Heo * device before the master device is identified. 29368c3c52a8STejun Heo */ 29371eca4365STejun Heo ata_for_each_dev(dev, link, ALL_REVERSE) { 2938f58229f8STejun Heo unsigned int action = ata_eh_dev_action(dev); 2939f58229f8STejun Heo unsigned int readid_flags = 0; 2940c6fd2807SJeff Garzik 2941bff04647STejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) 2942bff04647STejun Heo readid_flags |= ATA_READID_POSTRESET; 2943bff04647STejun Heo 29449666f400STejun Heo if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) { 2945633273a3STejun Heo WARN_ON(dev->class == ATA_DEV_PMP); 2946633273a3STejun Heo 294771d7b6e5SNiklas Cassel /* 294871d7b6e5SNiklas Cassel * The link may be in a deep sleep, wake it up. 294971d7b6e5SNiklas Cassel * 295071d7b6e5SNiklas Cassel * If the link is in deep sleep, ata_phys_link_offline() 295171d7b6e5SNiklas Cassel * will return true, causing the revalidation to fail, 295271d7b6e5SNiklas Cassel * which leads to a (potentially) needless hard reset. 295371d7b6e5SNiklas Cassel * 295471d7b6e5SNiklas Cassel * ata_eh_recover() will later restore the link policy 295571d7b6e5SNiklas Cassel * to ap->target_lpm_policy after revalidation is done. 295671d7b6e5SNiklas Cassel */ 295771d7b6e5SNiklas Cassel if (link->lpm_policy > ATA_LPM_MAX_POWER) { 295871d7b6e5SNiklas Cassel rc = ata_eh_set_lpm(link, ATA_LPM_MAX_POWER, 295971d7b6e5SNiklas Cassel r_failed_dev); 296071d7b6e5SNiklas Cassel if (rc) 296171d7b6e5SNiklas Cassel goto err; 296271d7b6e5SNiklas Cassel } 296371d7b6e5SNiklas Cassel 2964b1c72916STejun Heo if (ata_phys_link_offline(ata_dev_phys_link(dev))) { 2965c6fd2807SJeff Garzik rc = -EIO; 29668c3c52a8STejun Heo goto err; 2967c6fd2807SJeff Garzik } 2968c6fd2807SJeff Garzik 29690260731fSTejun Heo ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE); 2970422c9daaSTejun Heo rc = ata_dev_revalidate(dev, ehc->classes[dev->devno], 2971422c9daaSTejun Heo readid_flags); 2972c6fd2807SJeff Garzik if (rc) 29738c3c52a8STejun Heo goto err; 2974c6fd2807SJeff Garzik 29750260731fSTejun Heo ata_eh_done(link, dev, ATA_EH_REVALIDATE); 2976c6fd2807SJeff Garzik 2977baa1e78aSTejun Heo /* Configuration may have changed, reconfigure 2978baa1e78aSTejun Heo * transfer mode. 2979baa1e78aSTejun Heo */ 2980baa1e78aSTejun Heo ehc->i.flags |= ATA_EHI_SETMODE; 2981baa1e78aSTejun Heo 2982c6fd2807SJeff Garzik /* schedule the scsi_rescan_device() here */ 2983ad72cf98STejun Heo schedule_work(&(ap->scsi_rescan_task)); 2984c6fd2807SJeff Garzik } else if (dev->class == ATA_DEV_UNKNOWN && 2985c6fd2807SJeff Garzik ehc->tries[dev->devno] && 2986c6fd2807SJeff Garzik ata_class_enabled(ehc->classes[dev->devno])) { 2987842faa6cSTejun Heo /* Temporarily set dev->class, it will be 2988842faa6cSTejun Heo * permanently set once all configurations are 2989842faa6cSTejun Heo * complete. This is necessary because new 2990842faa6cSTejun Heo * device configuration is done in two 2991842faa6cSTejun Heo * separate loops. 2992842faa6cSTejun Heo */ 2993c6fd2807SJeff Garzik dev->class = ehc->classes[dev->devno]; 2994c6fd2807SJeff Garzik 2995633273a3STejun Heo if (dev->class == ATA_DEV_PMP) 2996633273a3STejun Heo rc = sata_pmp_attach(dev); 2997633273a3STejun Heo else 2998633273a3STejun Heo rc = ata_dev_read_id(dev, &dev->class, 2999633273a3STejun Heo readid_flags, dev->id); 3000842faa6cSTejun Heo 3001842faa6cSTejun Heo /* read_id might have changed class, store and reset */ 3002842faa6cSTejun Heo ehc->classes[dev->devno] = dev->class; 3003842faa6cSTejun Heo dev->class = ATA_DEV_UNKNOWN; 3004842faa6cSTejun Heo 30058c3c52a8STejun Heo switch (rc) { 30068c3c52a8STejun Heo case 0: 300799cf610aSTejun Heo /* clear error info accumulated during probe */ 300899cf610aSTejun Heo ata_ering_clear(&dev->ering); 3009f58229f8STejun Heo new_mask |= 1 << dev->devno; 30108c3c52a8STejun Heo break; 30118c3c52a8STejun Heo case -ENOENT: 301255a8e2c8STejun Heo /* IDENTIFY was issued to non-existent 301355a8e2c8STejun Heo * device. No need to reset. Just 3014842faa6cSTejun Heo * thaw and ignore the device. 301555a8e2c8STejun Heo */ 301655a8e2c8STejun Heo ata_eh_thaw_port(ap); 3017c6fd2807SJeff Garzik break; 30188c3c52a8STejun Heo default: 30198c3c52a8STejun Heo goto err; 30208c3c52a8STejun Heo } 30218c3c52a8STejun Heo } 3022c6fd2807SJeff Garzik } 3023c6fd2807SJeff Garzik 3024c1c4e8d5STejun Heo /* PDIAG- should have been released, ask cable type if post-reset */ 302533267325STejun Heo if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) { 302633267325STejun Heo if (ap->ops->cable_detect) 3027c1c4e8d5STejun Heo ap->cbl = ap->ops->cable_detect(ap); 302833267325STejun Heo ata_force_cbl(ap); 302933267325STejun Heo } 3030c1c4e8d5STejun Heo 30318c3c52a8STejun Heo /* Configure new devices forward such that user doesn't see 30328c3c52a8STejun Heo * device detection messages backwards. 30338c3c52a8STejun Heo */ 30341eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 30354f7c2874STejun Heo if (!(new_mask & (1 << dev->devno))) 30368c3c52a8STejun Heo continue; 30378c3c52a8STejun Heo 3038842faa6cSTejun Heo dev->class = ehc->classes[dev->devno]; 3039842faa6cSTejun Heo 30404f7c2874STejun Heo if (dev->class == ATA_DEV_PMP) 30414f7c2874STejun Heo continue; 30424f7c2874STejun Heo 30438c3c52a8STejun Heo ehc->i.flags |= ATA_EHI_PRINTINFO; 30448c3c52a8STejun Heo rc = ata_dev_configure(dev); 30458c3c52a8STejun Heo ehc->i.flags &= ~ATA_EHI_PRINTINFO; 3046842faa6cSTejun Heo if (rc) { 3047842faa6cSTejun Heo dev->class = ATA_DEV_UNKNOWN; 30488c3c52a8STejun Heo goto err; 3049842faa6cSTejun Heo } 30508c3c52a8STejun Heo 3051c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 3052c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 3053c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3054baa1e78aSTejun Heo 305555a8e2c8STejun Heo /* new device discovered, configure xfermode */ 3056baa1e78aSTejun Heo ehc->i.flags |= ATA_EHI_SETMODE; 3057c6fd2807SJeff Garzik } 3058c6fd2807SJeff Garzik 30598c3c52a8STejun Heo return 0; 30608c3c52a8STejun Heo 30618c3c52a8STejun Heo err: 3062c6fd2807SJeff Garzik *r_failed_dev = dev; 3063c6fd2807SJeff Garzik return rc; 3064c6fd2807SJeff Garzik } 3065c6fd2807SJeff Garzik 30666f1d1e3aSTejun Heo /** 30676f1d1e3aSTejun Heo * ata_set_mode - Program timings and issue SET FEATURES - XFER 30686f1d1e3aSTejun Heo * @link: link on which timings will be programmed 306998a1708dSMartin Olsson * @r_failed_dev: out parameter for failed device 30706f1d1e3aSTejun Heo * 30716f1d1e3aSTejun Heo * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If 30726f1d1e3aSTejun Heo * ata_set_mode() fails, pointer to the failing device is 30736f1d1e3aSTejun Heo * returned in @r_failed_dev. 30746f1d1e3aSTejun Heo * 30756f1d1e3aSTejun Heo * LOCKING: 30766f1d1e3aSTejun Heo * PCI/etc. bus probe sem. 30776f1d1e3aSTejun Heo * 30786f1d1e3aSTejun Heo * RETURNS: 30796f1d1e3aSTejun Heo * 0 on success, negative errno otherwise 30806f1d1e3aSTejun Heo */ 30816f1d1e3aSTejun Heo int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) 30826f1d1e3aSTejun Heo { 30836f1d1e3aSTejun Heo struct ata_port *ap = link->ap; 308400115e0fSTejun Heo struct ata_device *dev; 308500115e0fSTejun Heo int rc; 30866f1d1e3aSTejun Heo 308776326ac1STejun Heo /* if data transfer is verified, clear DUBIOUS_XFER on ering top */ 30881eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) { 308976326ac1STejun Heo if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) { 309076326ac1STejun Heo struct ata_ering_entry *ent; 309176326ac1STejun Heo 309276326ac1STejun Heo ent = ata_ering_top(&dev->ering); 309376326ac1STejun Heo if (ent) 309476326ac1STejun Heo ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER; 309576326ac1STejun Heo } 309676326ac1STejun Heo } 309776326ac1STejun Heo 30986f1d1e3aSTejun Heo /* has private set_mode? */ 30996f1d1e3aSTejun Heo if (ap->ops->set_mode) 310000115e0fSTejun Heo rc = ap->ops->set_mode(link, r_failed_dev); 310100115e0fSTejun Heo else 310200115e0fSTejun Heo rc = ata_do_set_mode(link, r_failed_dev); 310300115e0fSTejun Heo 310400115e0fSTejun Heo /* if transfer mode has changed, set DUBIOUS_XFER on device */ 31051eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) { 310600115e0fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 310700115e0fSTejun Heo u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno]; 310800115e0fSTejun Heo u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno)); 310900115e0fSTejun Heo 311000115e0fSTejun Heo if (dev->xfer_mode != saved_xfer_mode || 311100115e0fSTejun Heo ata_ncq_enabled(dev) != saved_ncq) 311200115e0fSTejun Heo dev->flags |= ATA_DFLAG_DUBIOUS_XFER; 311300115e0fSTejun Heo } 311400115e0fSTejun Heo 311500115e0fSTejun Heo return rc; 31166f1d1e3aSTejun Heo } 31176f1d1e3aSTejun Heo 311811fc33daSTejun Heo /** 311911fc33daSTejun Heo * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset 312011fc33daSTejun Heo * @dev: ATAPI device to clear UA for 312111fc33daSTejun Heo * 312211fc33daSTejun Heo * Resets and other operations can make an ATAPI device raise 312311fc33daSTejun Heo * UNIT ATTENTION which causes the next operation to fail. This 312411fc33daSTejun Heo * function clears UA. 312511fc33daSTejun Heo * 312611fc33daSTejun Heo * LOCKING: 312711fc33daSTejun Heo * EH context (may sleep). 312811fc33daSTejun Heo * 312911fc33daSTejun Heo * RETURNS: 313011fc33daSTejun Heo * 0 on success, -errno on failure. 313111fc33daSTejun Heo */ 313211fc33daSTejun Heo static int atapi_eh_clear_ua(struct ata_device *dev) 313311fc33daSTejun Heo { 313411fc33daSTejun Heo int i; 313511fc33daSTejun Heo 313611fc33daSTejun Heo for (i = 0; i < ATA_EH_UA_TRIES; i++) { 3137b5357081STejun Heo u8 *sense_buffer = dev->link->ap->sector_buf; 313811fc33daSTejun Heo u8 sense_key = 0; 313911fc33daSTejun Heo unsigned int err_mask; 314011fc33daSTejun Heo 314111fc33daSTejun Heo err_mask = atapi_eh_tur(dev, &sense_key); 314211fc33daSTejun Heo if (err_mask != 0 && err_mask != AC_ERR_DEV) { 3143a9a79dfeSJoe Perches ata_dev_warn(dev, 3144a9a79dfeSJoe Perches "TEST_UNIT_READY failed (err_mask=0x%x)\n", 3145a9a79dfeSJoe Perches err_mask); 314611fc33daSTejun Heo return -EIO; 314711fc33daSTejun Heo } 314811fc33daSTejun Heo 314911fc33daSTejun Heo if (!err_mask || sense_key != UNIT_ATTENTION) 315011fc33daSTejun Heo return 0; 315111fc33daSTejun Heo 315211fc33daSTejun Heo err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key); 315311fc33daSTejun Heo if (err_mask) { 3154a9a79dfeSJoe Perches ata_dev_warn(dev, "failed to clear " 315511fc33daSTejun Heo "UNIT ATTENTION (err_mask=0x%x)\n", err_mask); 315611fc33daSTejun Heo return -EIO; 315711fc33daSTejun Heo } 315811fc33daSTejun Heo } 315911fc33daSTejun Heo 3160a9a79dfeSJoe Perches ata_dev_warn(dev, "UNIT ATTENTION persists after %d tries\n", 3161a9a79dfeSJoe Perches ATA_EH_UA_TRIES); 316211fc33daSTejun Heo 316311fc33daSTejun Heo return 0; 316411fc33daSTejun Heo } 316511fc33daSTejun Heo 31666013efd8STejun Heo /** 31676013efd8STejun Heo * ata_eh_maybe_retry_flush - Retry FLUSH if necessary 31686013efd8STejun Heo * @dev: ATA device which may need FLUSH retry 31696013efd8STejun Heo * 31706013efd8STejun Heo * If @dev failed FLUSH, it needs to be reported upper layer 31716013efd8STejun Heo * immediately as it means that @dev failed to remap and already 31726013efd8STejun Heo * lost at least a sector and further FLUSH retrials won't make 31736013efd8STejun Heo * any difference to the lost sector. However, if FLUSH failed 31746013efd8STejun Heo * for other reasons, for example transmission error, FLUSH needs 31756013efd8STejun Heo * to be retried. 31766013efd8STejun Heo * 31776013efd8STejun Heo * This function determines whether FLUSH failure retry is 31786013efd8STejun Heo * necessary and performs it if so. 31796013efd8STejun Heo * 31806013efd8STejun Heo * RETURNS: 31816013efd8STejun Heo * 0 if EH can continue, -errno if EH needs to be repeated. 31826013efd8STejun Heo */ 31836013efd8STejun Heo static int ata_eh_maybe_retry_flush(struct ata_device *dev) 31846013efd8STejun Heo { 31856013efd8STejun Heo struct ata_link *link = dev->link; 31866013efd8STejun Heo struct ata_port *ap = link->ap; 31876013efd8STejun Heo struct ata_queued_cmd *qc; 31886013efd8STejun Heo struct ata_taskfile tf; 31896013efd8STejun Heo unsigned int err_mask; 31906013efd8STejun Heo int rc = 0; 31916013efd8STejun Heo 31926013efd8STejun Heo /* did flush fail for this device? */ 31936013efd8STejun Heo if (!ata_tag_valid(link->active_tag)) 31946013efd8STejun Heo return 0; 31956013efd8STejun Heo 31966013efd8STejun Heo qc = __ata_qc_from_tag(ap, link->active_tag); 31976013efd8STejun Heo if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT && 31986013efd8STejun Heo qc->tf.command != ATA_CMD_FLUSH)) 31996013efd8STejun Heo return 0; 32006013efd8STejun Heo 32016013efd8STejun Heo /* if the device failed it, it should be reported to upper layers */ 32026013efd8STejun Heo if (qc->err_mask & AC_ERR_DEV) 32036013efd8STejun Heo return 0; 32046013efd8STejun Heo 32056013efd8STejun Heo /* flush failed for some other reason, give it another shot */ 32066013efd8STejun Heo ata_tf_init(dev, &tf); 32076013efd8STejun Heo 32086013efd8STejun Heo tf.command = qc->tf.command; 32096013efd8STejun Heo tf.flags |= ATA_TFLAG_DEVICE; 32106013efd8STejun Heo tf.protocol = ATA_PROT_NODATA; 32116013efd8STejun Heo 3212a9a79dfeSJoe Perches ata_dev_warn(dev, "retrying FLUSH 0x%x Emask 0x%x\n", 32136013efd8STejun Heo tf.command, qc->err_mask); 32146013efd8STejun Heo 32156013efd8STejun Heo err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 32166013efd8STejun Heo if (!err_mask) { 32176013efd8STejun Heo /* 32186013efd8STejun Heo * FLUSH is complete but there's no way to 32196013efd8STejun Heo * successfully complete a failed command from EH. 32206013efd8STejun Heo * Making sure retry is allowed at least once and 32216013efd8STejun Heo * retrying it should do the trick - whatever was in 32226013efd8STejun Heo * the cache is already on the platter and this won't 32236013efd8STejun Heo * cause infinite loop. 32246013efd8STejun Heo */ 32256013efd8STejun Heo qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1); 32266013efd8STejun Heo } else { 3227a9a79dfeSJoe Perches ata_dev_warn(dev, "FLUSH failed Emask 0x%x\n", 32286013efd8STejun Heo err_mask); 32296013efd8STejun Heo rc = -EIO; 32306013efd8STejun Heo 32316013efd8STejun Heo /* if device failed it, report it to upper layers */ 32326013efd8STejun Heo if (err_mask & AC_ERR_DEV) { 32336013efd8STejun Heo qc->err_mask |= AC_ERR_DEV; 32346013efd8STejun Heo qc->result_tf = tf; 32356013efd8STejun Heo if (!(ap->pflags & ATA_PFLAG_FROZEN)) 32366013efd8STejun Heo rc = 0; 32376013efd8STejun Heo } 32386013efd8STejun Heo } 32396013efd8STejun Heo return rc; 32406013efd8STejun Heo } 32416013efd8STejun Heo 32426b7ae954STejun Heo /** 32436b7ae954STejun Heo * ata_eh_set_lpm - configure SATA interface power management 32446b7ae954STejun Heo * @link: link to configure power management 32456b7ae954STejun Heo * @policy: the link power management policy 32466b7ae954STejun Heo * @r_failed_dev: out parameter for failed device 32476b7ae954STejun Heo * 32486b7ae954STejun Heo * Enable SATA Interface power management. This will enable 3249f4ac6476SHans de Goede * Device Interface Power Management (DIPM) for min_power and 3250f4ac6476SHans de Goede * medium_power_with_dipm policies, and then call driver specific 3251f4ac6476SHans de Goede * callbacks for enabling Host Initiated Power management. 32526b7ae954STejun Heo * 32536b7ae954STejun Heo * LOCKING: 32546b7ae954STejun Heo * EH context. 32556b7ae954STejun Heo * 32566b7ae954STejun Heo * RETURNS: 32576b7ae954STejun Heo * 0 on success, -errno on failure. 32586b7ae954STejun Heo */ 32596b7ae954STejun Heo static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, 32606b7ae954STejun Heo struct ata_device **r_failed_dev) 32616b7ae954STejun Heo { 32626c8ea89cSTejun Heo struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL; 32636b7ae954STejun Heo struct ata_eh_context *ehc = &link->eh_context; 32646b7ae954STejun Heo struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL; 3265e5005b15STejun Heo enum ata_lpm_policy old_policy = link->lpm_policy; 32665f6f12ccSTejun Heo bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM; 32676b7ae954STejun Heo unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM; 32686b7ae954STejun Heo unsigned int err_mask; 32696b7ae954STejun Heo int rc; 32706b7ae954STejun Heo 32716b7ae954STejun Heo /* if the link or host doesn't do LPM, noop */ 32724c9029e7SBartlomiej Zolnierkiewicz if (!IS_ENABLED(CONFIG_SATA_HOST) || 32734c9029e7SBartlomiej Zolnierkiewicz (link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm)) 32746b7ae954STejun Heo return 0; 32756b7ae954STejun Heo 32766b7ae954STejun Heo /* 32776b7ae954STejun Heo * DIPM is enabled only for MIN_POWER as some devices 32786b7ae954STejun Heo * misbehave when the host NACKs transition to SLUMBER. Order 32796b7ae954STejun Heo * device and link configurations such that the host always 32806b7ae954STejun Heo * allows DIPM requests. 32816b7ae954STejun Heo */ 32826b7ae954STejun Heo ata_for_each_dev(dev, link, ENABLED) { 32836b7ae954STejun Heo bool hipm = ata_id_has_hipm(dev->id); 3284ae01b249STejun Heo bool dipm = ata_id_has_dipm(dev->id) && !no_dipm; 32856b7ae954STejun Heo 32866b7ae954STejun Heo /* find the first enabled and LPM enabled devices */ 32876b7ae954STejun Heo if (!link_dev) 32886b7ae954STejun Heo link_dev = dev; 32896b7ae954STejun Heo 32906b7ae954STejun Heo if (!lpm_dev && (hipm || dipm)) 32916b7ae954STejun Heo lpm_dev = dev; 32926b7ae954STejun Heo 32936b7ae954STejun Heo hints &= ~ATA_LPM_EMPTY; 32946b7ae954STejun Heo if (!hipm) 32956b7ae954STejun Heo hints &= ~ATA_LPM_HIPM; 32966b7ae954STejun Heo 32976b7ae954STejun Heo /* disable DIPM before changing link config */ 3298f4ac6476SHans de Goede if (policy < ATA_LPM_MED_POWER_WITH_DIPM && dipm) { 32996b7ae954STejun Heo err_mask = ata_dev_set_feature(dev, 33006b7ae954STejun Heo SETFEATURES_SATA_DISABLE, SATA_DIPM); 33016b7ae954STejun Heo if (err_mask && err_mask != AC_ERR_DEV) { 3302a9a79dfeSJoe Perches ata_dev_warn(dev, 33036b7ae954STejun Heo "failed to disable DIPM, Emask 0x%x\n", 33046b7ae954STejun Heo err_mask); 33056b7ae954STejun Heo rc = -EIO; 33066b7ae954STejun Heo goto fail; 33076b7ae954STejun Heo } 33086b7ae954STejun Heo } 33096b7ae954STejun Heo } 33106b7ae954STejun Heo 33116c8ea89cSTejun Heo if (ap) { 33126b7ae954STejun Heo rc = ap->ops->set_lpm(link, policy, hints); 33136b7ae954STejun Heo if (!rc && ap->slave_link) 33146b7ae954STejun Heo rc = ap->ops->set_lpm(ap->slave_link, policy, hints); 33156c8ea89cSTejun Heo } else 33166c8ea89cSTejun Heo rc = sata_pmp_set_lpm(link, policy, hints); 33176b7ae954STejun Heo 33186b7ae954STejun Heo /* 33196b7ae954STejun Heo * Attribute link config failure to the first (LPM) enabled 33206b7ae954STejun Heo * device on the link. 33216b7ae954STejun Heo */ 33226b7ae954STejun Heo if (rc) { 33236b7ae954STejun Heo if (rc == -EOPNOTSUPP) { 33246b7ae954STejun Heo link->flags |= ATA_LFLAG_NO_LPM; 33256b7ae954STejun Heo return 0; 33266b7ae954STejun Heo } 33276b7ae954STejun Heo dev = lpm_dev ? lpm_dev : link_dev; 33286b7ae954STejun Heo goto fail; 33296b7ae954STejun Heo } 33306b7ae954STejun Heo 3331e5005b15STejun Heo /* 3332e5005b15STejun Heo * Low level driver acked the transition. Issue DIPM command 3333e5005b15STejun Heo * with the new policy set. 3334e5005b15STejun Heo */ 3335e5005b15STejun Heo link->lpm_policy = policy; 3336e5005b15STejun Heo if (ap && ap->slave_link) 3337e5005b15STejun Heo ap->slave_link->lpm_policy = policy; 3338e5005b15STejun Heo 33396b7ae954STejun Heo /* host config updated, enable DIPM if transitioning to MIN_POWER */ 33406b7ae954STejun Heo ata_for_each_dev(dev, link, ENABLED) { 3341f4ac6476SHans de Goede if (policy >= ATA_LPM_MED_POWER_WITH_DIPM && !no_dipm && 3342ae01b249STejun Heo ata_id_has_dipm(dev->id)) { 33436b7ae954STejun Heo err_mask = ata_dev_set_feature(dev, 33446b7ae954STejun Heo SETFEATURES_SATA_ENABLE, SATA_DIPM); 33456b7ae954STejun Heo if (err_mask && err_mask != AC_ERR_DEV) { 3346a9a79dfeSJoe Perches ata_dev_warn(dev, 33476b7ae954STejun Heo "failed to enable DIPM, Emask 0x%x\n", 33486b7ae954STejun Heo err_mask); 33496b7ae954STejun Heo rc = -EIO; 33506b7ae954STejun Heo goto fail; 33516b7ae954STejun Heo } 33526b7ae954STejun Heo } 33536b7ae954STejun Heo } 33546b7ae954STejun Heo 335509c5b480SGabriele Mazzotta link->last_lpm_change = jiffies; 335609c5b480SGabriele Mazzotta link->flags |= ATA_LFLAG_CHANGED; 335709c5b480SGabriele Mazzotta 33586b7ae954STejun Heo return 0; 33596b7ae954STejun Heo 33606b7ae954STejun Heo fail: 3361e5005b15STejun Heo /* restore the old policy */ 3362e5005b15STejun Heo link->lpm_policy = old_policy; 3363e5005b15STejun Heo if (ap && ap->slave_link) 3364e5005b15STejun Heo ap->slave_link->lpm_policy = old_policy; 3365e5005b15STejun Heo 33666b7ae954STejun Heo /* if no device or only one more chance is left, disable LPM */ 33676b7ae954STejun Heo if (!dev || ehc->tries[dev->devno] <= 2) { 3368a9a79dfeSJoe Perches ata_link_warn(link, "disabling LPM on the link\n"); 33696b7ae954STejun Heo link->flags |= ATA_LFLAG_NO_LPM; 33706b7ae954STejun Heo } 33716b7ae954STejun Heo if (r_failed_dev) 33726b7ae954STejun Heo *r_failed_dev = dev; 33736b7ae954STejun Heo return rc; 33746b7ae954STejun Heo } 33756b7ae954STejun Heo 33768a745f1fSKristen Carlson Accardi int ata_link_nr_enabled(struct ata_link *link) 3377c6fd2807SJeff Garzik { 3378f58229f8STejun Heo struct ata_device *dev; 3379f58229f8STejun Heo int cnt = 0; 3380c6fd2807SJeff Garzik 33811eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) 3382c6fd2807SJeff Garzik cnt++; 3383c6fd2807SJeff Garzik return cnt; 3384c6fd2807SJeff Garzik } 3385c6fd2807SJeff Garzik 33860260731fSTejun Heo static int ata_link_nr_vacant(struct ata_link *link) 3387c6fd2807SJeff Garzik { 3388f58229f8STejun Heo struct ata_device *dev; 3389f58229f8STejun Heo int cnt = 0; 3390c6fd2807SJeff Garzik 33911eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 3392f58229f8STejun Heo if (dev->class == ATA_DEV_UNKNOWN) 3393c6fd2807SJeff Garzik cnt++; 3394c6fd2807SJeff Garzik return cnt; 3395c6fd2807SJeff Garzik } 3396c6fd2807SJeff Garzik 33970260731fSTejun Heo static int ata_eh_skip_recovery(struct ata_link *link) 3398c6fd2807SJeff Garzik { 3399672b2d65STejun Heo struct ata_port *ap = link->ap; 34000260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 3401f58229f8STejun Heo struct ata_device *dev; 3402c6fd2807SJeff Garzik 3403f9df58cbSTejun Heo /* skip disabled links */ 3404f9df58cbSTejun Heo if (link->flags & ATA_LFLAG_DISABLED) 3405f9df58cbSTejun Heo return 1; 3406f9df58cbSTejun Heo 3407e2f3d75fSTejun Heo /* skip if explicitly requested */ 3408e2f3d75fSTejun Heo if (ehc->i.flags & ATA_EHI_NO_RECOVERY) 3409e2f3d75fSTejun Heo return 1; 3410e2f3d75fSTejun Heo 3411672b2d65STejun Heo /* thaw frozen port and recover failed devices */ 3412672b2d65STejun Heo if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link)) 3413672b2d65STejun Heo return 0; 3414672b2d65STejun Heo 3415672b2d65STejun Heo /* reset at least once if reset is requested */ 3416672b2d65STejun Heo if ((ehc->i.action & ATA_EH_RESET) && 3417672b2d65STejun Heo !(ehc->i.flags & ATA_EHI_DID_RESET)) 3418c6fd2807SJeff Garzik return 0; 3419c6fd2807SJeff Garzik 3420c6fd2807SJeff Garzik /* skip if class codes for all vacant slots are ATA_DEV_NONE */ 34211eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 3422c6fd2807SJeff Garzik if (dev->class == ATA_DEV_UNKNOWN && 3423c6fd2807SJeff Garzik ehc->classes[dev->devno] != ATA_DEV_NONE) 3424c6fd2807SJeff Garzik return 0; 3425c6fd2807SJeff Garzik } 3426c6fd2807SJeff Garzik 3427c6fd2807SJeff Garzik return 1; 3428c6fd2807SJeff Garzik } 3429c6fd2807SJeff Garzik 3430c2c7a89cSTejun Heo static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg) 3431c2c7a89cSTejun Heo { 3432c2c7a89cSTejun Heo u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL); 3433c2c7a89cSTejun Heo u64 now = get_jiffies_64(); 3434c2c7a89cSTejun Heo int *trials = void_arg; 3435c2c7a89cSTejun Heo 34366868225eSLin Ming if ((ent->eflags & ATA_EFLAG_OLD_ER) || 34376868225eSLin Ming (ent->timestamp < now - min(now, interval))) 3438c2c7a89cSTejun Heo return -1; 3439c2c7a89cSTejun Heo 3440c2c7a89cSTejun Heo (*trials)++; 3441c2c7a89cSTejun Heo return 0; 3442c2c7a89cSTejun Heo } 3443c2c7a89cSTejun Heo 344402c05a27STejun Heo static int ata_eh_schedule_probe(struct ata_device *dev) 344502c05a27STejun Heo { 344602c05a27STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 3447c2c7a89cSTejun Heo struct ata_link *link = ata_dev_phys_link(dev); 3448c2c7a89cSTejun Heo int trials = 0; 344902c05a27STejun Heo 345002c05a27STejun Heo if (!(ehc->i.probe_mask & (1 << dev->devno)) || 345102c05a27STejun Heo (ehc->did_probe_mask & (1 << dev->devno))) 345202c05a27STejun Heo return 0; 345302c05a27STejun Heo 345402c05a27STejun Heo ata_eh_detach_dev(dev); 345502c05a27STejun Heo ata_dev_init(dev); 345602c05a27STejun Heo ehc->did_probe_mask |= (1 << dev->devno); 3457cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 345800115e0fSTejun Heo ehc->saved_xfer_mode[dev->devno] = 0; 345900115e0fSTejun Heo ehc->saved_ncq_enabled &= ~(1 << dev->devno); 346002c05a27STejun Heo 34616b7ae954STejun Heo /* the link maybe in a deep sleep, wake it up */ 34626c8ea89cSTejun Heo if (link->lpm_policy > ATA_LPM_MAX_POWER) { 34636c8ea89cSTejun Heo if (ata_is_host_link(link)) 34646c8ea89cSTejun Heo link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER, 34656c8ea89cSTejun Heo ATA_LPM_EMPTY); 34666c8ea89cSTejun Heo else 34676c8ea89cSTejun Heo sata_pmp_set_lpm(link, ATA_LPM_MAX_POWER, 34686c8ea89cSTejun Heo ATA_LPM_EMPTY); 34696c8ea89cSTejun Heo } 34706b7ae954STejun Heo 3471c2c7a89cSTejun Heo /* Record and count probe trials on the ering. The specific 3472c2c7a89cSTejun Heo * error mask used is irrelevant. Because a successful device 3473c2c7a89cSTejun Heo * detection clears the ering, this count accumulates only if 3474c2c7a89cSTejun Heo * there are consecutive failed probes. 3475c2c7a89cSTejun Heo * 3476c2c7a89cSTejun Heo * If the count is equal to or higher than ATA_EH_PROBE_TRIALS 3477c2c7a89cSTejun Heo * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is 3478c2c7a89cSTejun Heo * forced to 1.5Gbps. 3479c2c7a89cSTejun Heo * 3480c2c7a89cSTejun Heo * This is to work around cases where failed link speed 3481c2c7a89cSTejun Heo * negotiation results in device misdetection leading to 3482c2c7a89cSTejun Heo * infinite DEVXCHG or PHRDY CHG events. 3483c2c7a89cSTejun Heo */ 3484c2c7a89cSTejun Heo ata_ering_record(&dev->ering, 0, AC_ERR_OTHER); 3485c2c7a89cSTejun Heo ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials); 3486c2c7a89cSTejun Heo 3487c2c7a89cSTejun Heo if (trials > ATA_EH_PROBE_TRIALS) 3488c2c7a89cSTejun Heo sata_down_spd_limit(link, 1); 3489c2c7a89cSTejun Heo 349002c05a27STejun Heo return 1; 349102c05a27STejun Heo } 349202c05a27STejun Heo 34939b1e2658STejun Heo static int ata_eh_handle_dev_fail(struct ata_device *dev, int err) 3494fee7ca72STejun Heo { 34959af5c9c9STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 3496fee7ca72STejun Heo 3497cf9a590aSTejun Heo /* -EAGAIN from EH routine indicates retry without prejudice. 3498cf9a590aSTejun Heo * The requester is responsible for ensuring forward progress. 3499cf9a590aSTejun Heo */ 3500cf9a590aSTejun Heo if (err != -EAGAIN) 3501fee7ca72STejun Heo ehc->tries[dev->devno]--; 3502fee7ca72STejun Heo 3503fee7ca72STejun Heo switch (err) { 3504fee7ca72STejun Heo case -ENODEV: 3505fee7ca72STejun Heo /* device missing or wrong IDENTIFY data, schedule probing */ 3506fee7ca72STejun Heo ehc->i.probe_mask |= (1 << dev->devno); 3507df561f66SGustavo A. R. Silva fallthrough; 3508fee7ca72STejun Heo case -EINVAL: 3509fee7ca72STejun Heo /* give it just one more chance */ 3510fee7ca72STejun Heo ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1); 3511df561f66SGustavo A. R. Silva fallthrough; 3512fee7ca72STejun Heo case -EIO: 3513d89293abSTejun Heo if (ehc->tries[dev->devno] == 1) { 3514fee7ca72STejun Heo /* This is the last chance, better to slow 3515fee7ca72STejun Heo * down than lose it. 3516fee7ca72STejun Heo */ 3517a07d499bSTejun Heo sata_down_spd_limit(ata_dev_phys_link(dev), 0); 3518d89293abSTejun Heo if (dev->pio_mode > XFER_PIO_0) 3519fee7ca72STejun Heo ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); 3520fee7ca72STejun Heo } 3521fee7ca72STejun Heo } 3522fee7ca72STejun Heo 3523fee7ca72STejun Heo if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) { 3524fee7ca72STejun Heo /* disable device if it has used up all its chances */ 3525fee7ca72STejun Heo ata_dev_disable(dev); 3526fee7ca72STejun Heo 3527fee7ca72STejun Heo /* detach if offline */ 3528b1c72916STejun Heo if (ata_phys_link_offline(ata_dev_phys_link(dev))) 3529fee7ca72STejun Heo ata_eh_detach_dev(dev); 3530fee7ca72STejun Heo 353102c05a27STejun Heo /* schedule probe if necessary */ 353287fbc5a0STejun Heo if (ata_eh_schedule_probe(dev)) { 3533fee7ca72STejun Heo ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 353487fbc5a0STejun Heo memset(ehc->cmd_timeout_idx[dev->devno], 0, 353587fbc5a0STejun Heo sizeof(ehc->cmd_timeout_idx[dev->devno])); 353687fbc5a0STejun Heo } 35379b1e2658STejun Heo 35389b1e2658STejun Heo return 1; 3539fee7ca72STejun Heo } else { 3540cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 35419b1e2658STejun Heo return 0; 3542fee7ca72STejun Heo } 3543fee7ca72STejun Heo } 3544fee7ca72STejun Heo 3545c6fd2807SJeff Garzik /** 3546c6fd2807SJeff Garzik * ata_eh_recover - recover host port after error 3547c6fd2807SJeff Garzik * @ap: host port to recover 3548c6fd2807SJeff Garzik * @prereset: prereset method (can be NULL) 3549c6fd2807SJeff Garzik * @softreset: softreset method (can be NULL) 3550c6fd2807SJeff Garzik * @hardreset: hardreset method (can be NULL) 3551c6fd2807SJeff Garzik * @postreset: postreset method (can be NULL) 35529b1e2658STejun Heo * @r_failed_link: out parameter for failed link 3553c6fd2807SJeff Garzik * 3554c6fd2807SJeff Garzik * This is the alpha and omega, eum and yang, heart and soul of 3555c6fd2807SJeff Garzik * libata exception handling. On entry, actions required to 35569b1e2658STejun Heo * recover each link and hotplug requests are recorded in the 35579b1e2658STejun Heo * link's eh_context. This function executes all the operations 35589b1e2658STejun Heo * with appropriate retrials and fallbacks to resurrect failed 3559c6fd2807SJeff Garzik * devices, detach goners and greet newcomers. 3560c6fd2807SJeff Garzik * 3561c6fd2807SJeff Garzik * LOCKING: 3562c6fd2807SJeff Garzik * Kernel thread context (may sleep). 3563c6fd2807SJeff Garzik * 3564c6fd2807SJeff Garzik * RETURNS: 3565c6fd2807SJeff Garzik * 0 on success, -errno on failure. 3566c6fd2807SJeff Garzik */ 3567fb7fd614STejun Heo int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, 3568c6fd2807SJeff Garzik ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 35699b1e2658STejun Heo ata_postreset_fn_t postreset, 35709b1e2658STejun Heo struct ata_link **r_failed_link) 3571c6fd2807SJeff Garzik { 35729b1e2658STejun Heo struct ata_link *link; 3573c6fd2807SJeff Garzik struct ata_device *dev; 35746b7ae954STejun Heo int rc, nr_fails; 357545fabbb7SElias Oltmanns unsigned long flags, deadline; 3576c6fd2807SJeff Garzik 3577c6fd2807SJeff Garzik /* prep for recovery */ 35781eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 35799b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 35809b1e2658STejun Heo 3581f9df58cbSTejun Heo /* re-enable link? */ 3582f9df58cbSTejun Heo if (ehc->i.action & ATA_EH_ENABLE_LINK) { 3583f9df58cbSTejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK); 3584f9df58cbSTejun Heo spin_lock_irqsave(ap->lock, flags); 3585f9df58cbSTejun Heo link->flags &= ~ATA_LFLAG_DISABLED; 3586f9df58cbSTejun Heo spin_unlock_irqrestore(ap->lock, flags); 3587f9df58cbSTejun Heo ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK); 3588f9df58cbSTejun Heo } 3589f9df58cbSTejun Heo 35901eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 3591fd995f70STejun Heo if (link->flags & ATA_LFLAG_NO_RETRY) 3592fd995f70STejun Heo ehc->tries[dev->devno] = 1; 3593fd995f70STejun Heo else 3594c6fd2807SJeff Garzik ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 3595c6fd2807SJeff Garzik 359679a55b72STejun Heo /* collect port action mask recorded in dev actions */ 35979b1e2658STejun Heo ehc->i.action |= ehc->i.dev_action[dev->devno] & 35989b1e2658STejun Heo ~ATA_EH_PERDEV_MASK; 3599f58229f8STejun Heo ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK; 360079a55b72STejun Heo 3601c6fd2807SJeff Garzik /* process hotplug request */ 3602c6fd2807SJeff Garzik if (dev->flags & ATA_DFLAG_DETACH) 3603c6fd2807SJeff Garzik ata_eh_detach_dev(dev); 3604c6fd2807SJeff Garzik 360502c05a27STejun Heo /* schedule probe if necessary */ 360602c05a27STejun Heo if (!ata_dev_enabled(dev)) 360702c05a27STejun Heo ata_eh_schedule_probe(dev); 3608c6fd2807SJeff Garzik } 36099b1e2658STejun Heo } 3610c6fd2807SJeff Garzik 3611c6fd2807SJeff Garzik retry: 3612c6fd2807SJeff Garzik rc = 0; 3613c6fd2807SJeff Garzik 3614c6fd2807SJeff Garzik /* if UNLOADING, finish immediately */ 3615c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_UNLOADING) 3616c6fd2807SJeff Garzik goto out; 3617c6fd2807SJeff Garzik 36189b1e2658STejun Heo /* prep for EH */ 36191eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 36209b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 36219b1e2658STejun Heo 3622c6fd2807SJeff Garzik /* skip EH if possible. */ 36230260731fSTejun Heo if (ata_eh_skip_recovery(link)) 3624c6fd2807SJeff Garzik ehc->i.action = 0; 3625c6fd2807SJeff Garzik 36261eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 3627f58229f8STejun Heo ehc->classes[dev->devno] = ATA_DEV_UNKNOWN; 36289b1e2658STejun Heo } 3629c6fd2807SJeff Garzik 3630c6fd2807SJeff Garzik /* reset */ 36311eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 36329b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 36339b1e2658STejun Heo 3634cf480626STejun Heo if (!(ehc->i.action & ATA_EH_RESET)) 36359b1e2658STejun Heo continue; 36369b1e2658STejun Heo 36379b1e2658STejun Heo rc = ata_eh_reset(link, ata_link_nr_vacant(link), 3638dc98c32cSTejun Heo prereset, softreset, hardreset, postreset); 3639c6fd2807SJeff Garzik if (rc) { 3640a9a79dfeSJoe Perches ata_link_err(link, "reset failed, giving up\n"); 3641c6fd2807SJeff Garzik goto out; 3642c6fd2807SJeff Garzik } 36439b1e2658STejun Heo } 3644c6fd2807SJeff Garzik 364545fabbb7SElias Oltmanns do { 364645fabbb7SElias Oltmanns unsigned long now; 364745fabbb7SElias Oltmanns 364845fabbb7SElias Oltmanns /* 364945fabbb7SElias Oltmanns * clears ATA_EH_PARK in eh_info and resets 365045fabbb7SElias Oltmanns * ap->park_req_pending 365145fabbb7SElias Oltmanns */ 365245fabbb7SElias Oltmanns ata_eh_pull_park_action(ap); 365345fabbb7SElias Oltmanns 365445fabbb7SElias Oltmanns deadline = jiffies; 36551eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 36561eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 365745fabbb7SElias Oltmanns struct ata_eh_context *ehc = &link->eh_context; 365845fabbb7SElias Oltmanns unsigned long tmp; 365945fabbb7SElias Oltmanns 36609162c657SHannes Reinecke if (dev->class != ATA_DEV_ATA && 36619162c657SHannes Reinecke dev->class != ATA_DEV_ZAC) 366245fabbb7SElias Oltmanns continue; 366345fabbb7SElias Oltmanns if (!(ehc->i.dev_action[dev->devno] & 366445fabbb7SElias Oltmanns ATA_EH_PARK)) 366545fabbb7SElias Oltmanns continue; 366645fabbb7SElias Oltmanns tmp = dev->unpark_deadline; 366745fabbb7SElias Oltmanns if (time_before(deadline, tmp)) 366845fabbb7SElias Oltmanns deadline = tmp; 366945fabbb7SElias Oltmanns else if (time_before_eq(tmp, jiffies)) 367045fabbb7SElias Oltmanns continue; 367145fabbb7SElias Oltmanns if (ehc->unloaded_mask & (1 << dev->devno)) 367245fabbb7SElias Oltmanns continue; 367345fabbb7SElias Oltmanns 367445fabbb7SElias Oltmanns ata_eh_park_issue_cmd(dev, 1); 367545fabbb7SElias Oltmanns } 367645fabbb7SElias Oltmanns } 367745fabbb7SElias Oltmanns 367845fabbb7SElias Oltmanns now = jiffies; 367945fabbb7SElias Oltmanns if (time_before_eq(deadline, now)) 368045fabbb7SElias Oltmanns break; 368145fabbb7SElias Oltmanns 3682c0c362b6STejun Heo ata_eh_release(ap); 368345fabbb7SElias Oltmanns deadline = wait_for_completion_timeout(&ap->park_req_pending, 368445fabbb7SElias Oltmanns deadline - now); 3685c0c362b6STejun Heo ata_eh_acquire(ap); 368645fabbb7SElias Oltmanns } while (deadline); 36871eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 36881eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 368945fabbb7SElias Oltmanns if (!(link->eh_context.unloaded_mask & 369045fabbb7SElias Oltmanns (1 << dev->devno))) 369145fabbb7SElias Oltmanns continue; 369245fabbb7SElias Oltmanns 369345fabbb7SElias Oltmanns ata_eh_park_issue_cmd(dev, 0); 369445fabbb7SElias Oltmanns ata_eh_done(link, dev, ATA_EH_PARK); 369545fabbb7SElias Oltmanns } 369645fabbb7SElias Oltmanns } 369745fabbb7SElias Oltmanns 36989b1e2658STejun Heo /* the rest */ 36996b7ae954STejun Heo nr_fails = 0; 37006b7ae954STejun Heo ata_for_each_link(link, ap, PMP_FIRST) { 37019b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 37029b1e2658STejun Heo 37036b7ae954STejun Heo if (sata_pmp_attached(ap) && ata_is_host_link(link)) 37046b7ae954STejun Heo goto config_lpm; 37056b7ae954STejun Heo 3706c6fd2807SJeff Garzik /* revalidate existing devices and attach new ones */ 37070260731fSTejun Heo rc = ata_eh_revalidate_and_attach(link, &dev); 3708c6fd2807SJeff Garzik if (rc) 37096b7ae954STejun Heo goto rest_fail; 3710c6fd2807SJeff Garzik 3711633273a3STejun Heo /* if PMP got attached, return, pmp EH will take care of it */ 3712633273a3STejun Heo if (link->device->class == ATA_DEV_PMP) { 3713633273a3STejun Heo ehc->i.action = 0; 3714633273a3STejun Heo return 0; 3715633273a3STejun Heo } 3716633273a3STejun Heo 3717baa1e78aSTejun Heo /* configure transfer mode if necessary */ 3718baa1e78aSTejun Heo if (ehc->i.flags & ATA_EHI_SETMODE) { 37190260731fSTejun Heo rc = ata_set_mode(link, &dev); 37204ae72a1eSTejun Heo if (rc) 37216b7ae954STejun Heo goto rest_fail; 3722baa1e78aSTejun Heo ehc->i.flags &= ~ATA_EHI_SETMODE; 3723c6fd2807SJeff Garzik } 3724c6fd2807SJeff Garzik 372511fc33daSTejun Heo /* If reset has been issued, clear UA to avoid 372611fc33daSTejun Heo * disrupting the current users of the device. 372711fc33daSTejun Heo */ 372811fc33daSTejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) { 37291eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 373011fc33daSTejun Heo if (dev->class != ATA_DEV_ATAPI) 373111fc33daSTejun Heo continue; 373211fc33daSTejun Heo rc = atapi_eh_clear_ua(dev); 373311fc33daSTejun Heo if (rc) 37346b7ae954STejun Heo goto rest_fail; 373521334205SAaron Lu if (zpodd_dev_enabled(dev)) 373621334205SAaron Lu zpodd_post_poweron(dev); 373711fc33daSTejun Heo } 373811fc33daSTejun Heo } 373911fc33daSTejun Heo 37406013efd8STejun Heo /* retry flush if necessary */ 37416013efd8STejun Heo ata_for_each_dev(dev, link, ALL) { 37429162c657SHannes Reinecke if (dev->class != ATA_DEV_ATA && 37439162c657SHannes Reinecke dev->class != ATA_DEV_ZAC) 37446013efd8STejun Heo continue; 37456013efd8STejun Heo rc = ata_eh_maybe_retry_flush(dev); 37466013efd8STejun Heo if (rc) 37476b7ae954STejun Heo goto rest_fail; 37486013efd8STejun Heo } 37496013efd8STejun Heo 37506b7ae954STejun Heo config_lpm: 375111fc33daSTejun Heo /* configure link power saving */ 37526b7ae954STejun Heo if (link->lpm_policy != ap->target_lpm_policy) { 37536b7ae954STejun Heo rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev); 37546b7ae954STejun Heo if (rc) 37556b7ae954STejun Heo goto rest_fail; 37566b7ae954STejun Heo } 3757ca77329fSKristen Carlson Accardi 37589b1e2658STejun Heo /* this link is okay now */ 37599b1e2658STejun Heo ehc->i.flags = 0; 37609b1e2658STejun Heo continue; 3761c6fd2807SJeff Garzik 37626b7ae954STejun Heo rest_fail: 37636b7ae954STejun Heo nr_fails++; 37646b7ae954STejun Heo if (dev) 37650a2c0f56STejun Heo ata_eh_handle_dev_fail(dev, rc); 3766c6fd2807SJeff Garzik 3767b06ce3e5STejun Heo if (ap->pflags & ATA_PFLAG_FROZEN) { 3768b06ce3e5STejun Heo /* PMP reset requires working host port. 3769b06ce3e5STejun Heo * Can't retry if it's frozen. 3770b06ce3e5STejun Heo */ 3771071f44b1STejun Heo if (sata_pmp_attached(ap)) 3772b06ce3e5STejun Heo goto out; 37739b1e2658STejun Heo break; 37749b1e2658STejun Heo } 3775b06ce3e5STejun Heo } 37769b1e2658STejun Heo 37776b7ae954STejun Heo if (nr_fails) 3778c6fd2807SJeff Garzik goto retry; 3779c6fd2807SJeff Garzik 3780c6fd2807SJeff Garzik out: 37819b1e2658STejun Heo if (rc && r_failed_link) 37829b1e2658STejun Heo *r_failed_link = link; 3783c6fd2807SJeff Garzik 3784c6fd2807SJeff Garzik return rc; 3785c6fd2807SJeff Garzik } 3786c6fd2807SJeff Garzik 3787c6fd2807SJeff Garzik /** 3788c6fd2807SJeff Garzik * ata_eh_finish - finish up EH 3789c6fd2807SJeff Garzik * @ap: host port to finish EH for 3790c6fd2807SJeff Garzik * 3791c6fd2807SJeff Garzik * Recovery is complete. Clean up EH states and retry or finish 3792c6fd2807SJeff Garzik * failed qcs. 3793c6fd2807SJeff Garzik * 3794c6fd2807SJeff Garzik * LOCKING: 3795c6fd2807SJeff Garzik * None. 3796c6fd2807SJeff Garzik */ 3797fb7fd614STejun Heo void ata_eh_finish(struct ata_port *ap) 3798c6fd2807SJeff Garzik { 3799258c4e5cSJens Axboe struct ata_queued_cmd *qc; 3800c6fd2807SJeff Garzik int tag; 3801c6fd2807SJeff Garzik 3802c6fd2807SJeff Garzik /* retry or finish qcs */ 3803258c4e5cSJens Axboe ata_qc_for_each_raw(ap, qc, tag) { 3804c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) 3805c6fd2807SJeff Garzik continue; 3806c6fd2807SJeff Garzik 3807c6fd2807SJeff Garzik if (qc->err_mask) { 3808c6fd2807SJeff Garzik /* FIXME: Once EH migration is complete, 3809c6fd2807SJeff Garzik * generate sense data in this function, 3810c6fd2807SJeff Garzik * considering both err_mask and tf. 3811c6fd2807SJeff Garzik */ 381203faab78STejun Heo if (qc->flags & ATA_QCFLAG_RETRY) 3813c6fd2807SJeff Garzik ata_eh_qc_retry(qc); 381403faab78STejun Heo else 381503faab78STejun Heo ata_eh_qc_complete(qc); 3816c6fd2807SJeff Garzik } else { 3817c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_SENSE_VALID) { 3818c6fd2807SJeff Garzik ata_eh_qc_complete(qc); 3819c6fd2807SJeff Garzik } else { 3820c6fd2807SJeff Garzik /* feed zero TF to sense generation */ 3821c6fd2807SJeff Garzik memset(&qc->result_tf, 0, sizeof(qc->result_tf)); 3822c6fd2807SJeff Garzik ata_eh_qc_retry(qc); 3823c6fd2807SJeff Garzik } 3824c6fd2807SJeff Garzik } 3825c6fd2807SJeff Garzik } 3826da917d69STejun Heo 3827da917d69STejun Heo /* make sure nr_active_links is zero after EH */ 3828da917d69STejun Heo WARN_ON(ap->nr_active_links); 3829da917d69STejun Heo ap->nr_active_links = 0; 3830c6fd2807SJeff Garzik } 3831c6fd2807SJeff Garzik 3832c6fd2807SJeff Garzik /** 3833c6fd2807SJeff Garzik * ata_do_eh - do standard error handling 3834c6fd2807SJeff Garzik * @ap: host port to handle error for 3835a1efdabaSTejun Heo * 3836c6fd2807SJeff Garzik * @prereset: prereset method (can be NULL) 3837c6fd2807SJeff Garzik * @softreset: softreset method (can be NULL) 3838c6fd2807SJeff Garzik * @hardreset: hardreset method (can be NULL) 3839c6fd2807SJeff Garzik * @postreset: postreset method (can be NULL) 3840c6fd2807SJeff Garzik * 3841c6fd2807SJeff Garzik * Perform standard error handling sequence. 3842c6fd2807SJeff Garzik * 3843c6fd2807SJeff Garzik * LOCKING: 3844c6fd2807SJeff Garzik * Kernel thread context (may sleep). 3845c6fd2807SJeff Garzik */ 3846c6fd2807SJeff Garzik void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, 3847c6fd2807SJeff Garzik ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 3848c6fd2807SJeff Garzik ata_postreset_fn_t postreset) 3849c6fd2807SJeff Garzik { 38509b1e2658STejun Heo struct ata_device *dev; 38519b1e2658STejun Heo int rc; 38529b1e2658STejun Heo 38539b1e2658STejun Heo ata_eh_autopsy(ap); 38549b1e2658STejun Heo ata_eh_report(ap); 38559b1e2658STejun Heo 38569b1e2658STejun Heo rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset, 38579b1e2658STejun Heo NULL); 38589b1e2658STejun Heo if (rc) { 38591eca4365STejun Heo ata_for_each_dev(dev, &ap->link, ALL) 38609b1e2658STejun Heo ata_dev_disable(dev); 38619b1e2658STejun Heo } 38629b1e2658STejun Heo 3863c6fd2807SJeff Garzik ata_eh_finish(ap); 3864c6fd2807SJeff Garzik } 3865c6fd2807SJeff Garzik 3866a1efdabaSTejun Heo /** 3867a1efdabaSTejun Heo * ata_std_error_handler - standard error handler 3868a1efdabaSTejun Heo * @ap: host port to handle error for 3869a1efdabaSTejun Heo * 3870a1efdabaSTejun Heo * Standard error handler 3871a1efdabaSTejun Heo * 3872a1efdabaSTejun Heo * LOCKING: 3873a1efdabaSTejun Heo * Kernel thread context (may sleep). 3874a1efdabaSTejun Heo */ 3875a1efdabaSTejun Heo void ata_std_error_handler(struct ata_port *ap) 3876a1efdabaSTejun Heo { 3877a1efdabaSTejun Heo struct ata_port_operations *ops = ap->ops; 3878a1efdabaSTejun Heo ata_reset_fn_t hardreset = ops->hardreset; 3879a1efdabaSTejun Heo 388057c9efdfSTejun Heo /* ignore built-in hardreset if SCR access is not available */ 3881fe06e5f9STejun Heo if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link)) 3882a1efdabaSTejun Heo hardreset = NULL; 3883a1efdabaSTejun Heo 3884a1efdabaSTejun Heo ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset); 3885a1efdabaSTejun Heo } 3886a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_std_error_handler); 3887a1efdabaSTejun Heo 38886ffa01d8STejun Heo #ifdef CONFIG_PM 3889c6fd2807SJeff Garzik /** 3890c6fd2807SJeff Garzik * ata_eh_handle_port_suspend - perform port suspend operation 3891c6fd2807SJeff Garzik * @ap: port to suspend 3892c6fd2807SJeff Garzik * 3893c6fd2807SJeff Garzik * Suspend @ap. 3894c6fd2807SJeff Garzik * 3895c6fd2807SJeff Garzik * LOCKING: 3896c6fd2807SJeff Garzik * Kernel thread context (may sleep). 3897c6fd2807SJeff Garzik */ 3898c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap) 3899c6fd2807SJeff Garzik { 3900c6fd2807SJeff Garzik unsigned long flags; 3901c6fd2807SJeff Garzik int rc = 0; 39023dc67440SAaron Lu struct ata_device *dev; 3903c6fd2807SJeff Garzik 3904c6fd2807SJeff Garzik /* are we suspending? */ 3905c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 3906c6fd2807SJeff Garzik if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || 3907a7ff60dbSAaron Lu ap->pm_mesg.event & PM_EVENT_RESUME) { 3908c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3909c6fd2807SJeff Garzik return; 3910c6fd2807SJeff Garzik } 3911c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3912c6fd2807SJeff Garzik 3913c6fd2807SJeff Garzik WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED); 3914c6fd2807SJeff Garzik 39153dc67440SAaron Lu /* 39163dc67440SAaron Lu * If we have a ZPODD attached, check its zero 39173dc67440SAaron Lu * power ready status before the port is frozen. 3918a7ff60dbSAaron Lu * Only needed for runtime suspend. 39193dc67440SAaron Lu */ 3920a7ff60dbSAaron Lu if (PMSG_IS_AUTO(ap->pm_mesg)) { 39213dc67440SAaron Lu ata_for_each_dev(dev, &ap->link, ENABLED) { 39223dc67440SAaron Lu if (zpodd_dev_enabled(dev)) 39233dc67440SAaron Lu zpodd_on_suspend(dev); 39243dc67440SAaron Lu } 3925a7ff60dbSAaron Lu } 39263dc67440SAaron Lu 3927c6fd2807SJeff Garzik /* suspend */ 3928c6fd2807SJeff Garzik ata_eh_freeze_port(ap); 3929c6fd2807SJeff Garzik 3930c6fd2807SJeff Garzik if (ap->ops->port_suspend) 3931c6fd2807SJeff Garzik rc = ap->ops->port_suspend(ap, ap->pm_mesg); 3932c6fd2807SJeff Garzik 3933a7ff60dbSAaron Lu ata_acpi_set_state(ap, ap->pm_mesg); 39342a7b02eaSSergey Shtylyov 3935bc6e7c4bSDan Williams /* update the flags */ 3936c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 3937c6fd2807SJeff Garzik 3938c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_PM_PENDING; 3939c6fd2807SJeff Garzik if (rc == 0) 3940c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SUSPENDED; 394164578a3dSTejun Heo else if (ap->pflags & ATA_PFLAG_FROZEN) 3942c6fd2807SJeff Garzik ata_port_schedule_eh(ap); 3943c6fd2807SJeff Garzik 3944c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3945c6fd2807SJeff Garzik 3946c6fd2807SJeff Garzik return; 3947c6fd2807SJeff Garzik } 3948c6fd2807SJeff Garzik 3949c6fd2807SJeff Garzik /** 3950c6fd2807SJeff Garzik * ata_eh_handle_port_resume - perform port resume operation 3951c6fd2807SJeff Garzik * @ap: port to resume 3952c6fd2807SJeff Garzik * 3953c6fd2807SJeff Garzik * Resume @ap. 3954c6fd2807SJeff Garzik * 3955c6fd2807SJeff Garzik * LOCKING: 3956c6fd2807SJeff Garzik * Kernel thread context (may sleep). 3957c6fd2807SJeff Garzik */ 3958c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap) 3959c6fd2807SJeff Garzik { 39606f9c1ea2STejun Heo struct ata_link *link; 39616f9c1ea2STejun Heo struct ata_device *dev; 3962c6fd2807SJeff Garzik unsigned long flags; 3963c6fd2807SJeff Garzik 3964c6fd2807SJeff Garzik /* are we resuming? */ 3965c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 3966c6fd2807SJeff Garzik if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || 3967a7ff60dbSAaron Lu !(ap->pm_mesg.event & PM_EVENT_RESUME)) { 3968c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3969c6fd2807SJeff Garzik return; 3970c6fd2807SJeff Garzik } 3971c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3972c6fd2807SJeff Garzik 39739666f400STejun Heo WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED)); 3974c6fd2807SJeff Garzik 39756f9c1ea2STejun Heo /* 39766f9c1ea2STejun Heo * Error timestamps are in jiffies which doesn't run while 39776f9c1ea2STejun Heo * suspended and PHY events during resume isn't too uncommon. 39786f9c1ea2STejun Heo * When the two are combined, it can lead to unnecessary speed 39796f9c1ea2STejun Heo * downs if the machine is suspended and resumed repeatedly. 39806f9c1ea2STejun Heo * Clear error history. 39816f9c1ea2STejun Heo */ 39826f9c1ea2STejun Heo ata_for_each_link(link, ap, HOST_FIRST) 39836f9c1ea2STejun Heo ata_for_each_dev(dev, link, ALL) 39846f9c1ea2STejun Heo ata_ering_clear(&dev->ering); 39856f9c1ea2STejun Heo 3986a7ff60dbSAaron Lu ata_acpi_set_state(ap, ap->pm_mesg); 3987bd3adca5SShaohua Li 3988c6fd2807SJeff Garzik if (ap->ops->port_resume) 3989ae867937SKefeng Wang ap->ops->port_resume(ap); 3990c6fd2807SJeff Garzik 39916746544cSTejun Heo /* tell ACPI that we're resuming */ 39926746544cSTejun Heo ata_acpi_on_resume(ap); 39936746544cSTejun Heo 3994bc6e7c4bSDan Williams /* update the flags */ 3995c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 3996c6fd2807SJeff Garzik ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED); 3997c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3998c6fd2807SJeff Garzik } 39996ffa01d8STejun Heo #endif /* CONFIG_PM */ 4000