1c82ee6d3SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 2c6fd2807SJeff Garzik /* 3c6fd2807SJeff Garzik * libata-eh.c - libata error handling 4c6fd2807SJeff Garzik * 5c6fd2807SJeff Garzik * Copyright 2006 Tejun Heo <htejun@gmail.com> 6c6fd2807SJeff Garzik * 7c6fd2807SJeff Garzik * libata documentation is available via 'make {ps|pdf}docs', 89bb9a39cSMauro Carvalho Chehab * as Documentation/driver-api/libata.rst 9c6fd2807SJeff Garzik * 10c6fd2807SJeff Garzik * Hardware documentation available from http://www.t13.org/ and 11c6fd2807SJeff Garzik * http://www.sata-io.org/ 12c6fd2807SJeff Garzik */ 13c6fd2807SJeff Garzik 14c6fd2807SJeff Garzik #include <linux/kernel.h> 15242f9dcbSJens Axboe #include <linux/blkdev.h> 1638789fdaSPaul Gortmaker #include <linux/export.h> 172855568bSJeff Garzik #include <linux/pci.h> 18c6fd2807SJeff Garzik #include <scsi/scsi.h> 19c6fd2807SJeff Garzik #include <scsi/scsi_host.h> 20c6fd2807SJeff Garzik #include <scsi/scsi_eh.h> 21c6fd2807SJeff Garzik #include <scsi/scsi_device.h> 22c6fd2807SJeff Garzik #include <scsi/scsi_cmnd.h> 236521148cSRobert Hancock #include <scsi/scsi_dbg.h> 24c6fd2807SJeff Garzik #include "../scsi/scsi_transport_api.h" 25c6fd2807SJeff Garzik 26c6fd2807SJeff Garzik #include <linux/libata.h> 27c6fd2807SJeff Garzik 28255c03d1SHannes Reinecke #include <trace/events/libata.h> 29c6fd2807SJeff Garzik #include "libata.h" 30c6fd2807SJeff Garzik 317d47e8d4STejun Heo enum { 323884f7b0STejun Heo /* speed down verdicts */ 337d47e8d4STejun Heo ATA_EH_SPDN_NCQ_OFF = (1 << 0), 347d47e8d4STejun Heo ATA_EH_SPDN_SPEED_DOWN = (1 << 1), 357d47e8d4STejun Heo ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2), 3676326ac1STejun Heo ATA_EH_SPDN_KEEP_ERRORS = (1 << 3), 373884f7b0STejun Heo 383884f7b0STejun Heo /* error flags */ 393884f7b0STejun Heo ATA_EFLAG_IS_IO = (1 << 0), 4076326ac1STejun Heo ATA_EFLAG_DUBIOUS_XFER = (1 << 1), 41d9027470SGwendal Grignou ATA_EFLAG_OLD_ER = (1 << 31), 423884f7b0STejun Heo 433884f7b0STejun Heo /* error categories */ 443884f7b0STejun Heo ATA_ECAT_NONE = 0, 453884f7b0STejun Heo ATA_ECAT_ATA_BUS = 1, 463884f7b0STejun Heo ATA_ECAT_TOUT_HSM = 2, 473884f7b0STejun Heo ATA_ECAT_UNK_DEV = 3, 4875f9cafcSTejun Heo ATA_ECAT_DUBIOUS_NONE = 4, 4975f9cafcSTejun Heo ATA_ECAT_DUBIOUS_ATA_BUS = 5, 5075f9cafcSTejun Heo ATA_ECAT_DUBIOUS_TOUT_HSM = 6, 5175f9cafcSTejun Heo ATA_ECAT_DUBIOUS_UNK_DEV = 7, 5275f9cafcSTejun Heo ATA_ECAT_NR = 8, 537d47e8d4STejun Heo 5487fbc5a0STejun Heo ATA_EH_CMD_DFL_TIMEOUT = 5000, 5587fbc5a0STejun Heo 560a2c0f56STejun Heo /* always put at least this amount of time between resets */ 570a2c0f56STejun Heo ATA_EH_RESET_COOL_DOWN = 5000, 580a2c0f56STejun Heo 59341c2c95STejun Heo /* Waiting in ->prereset can never be reliable. It's 60341c2c95STejun Heo * sometimes nice to wait there but it can't be depended upon; 61341c2c95STejun Heo * otherwise, we wouldn't be resetting. Just give it enough 62341c2c95STejun Heo * time for most drives to spin up. 6331daabdaSTejun Heo */ 64341c2c95STejun Heo ATA_EH_PRERESET_TIMEOUT = 10000, 65341c2c95STejun Heo ATA_EH_FASTDRAIN_INTERVAL = 3000, 6611fc33daSTejun Heo 6711fc33daSTejun Heo ATA_EH_UA_TRIES = 5, 68c2c7a89cSTejun Heo 69c2c7a89cSTejun Heo /* probe speed down parameters, see ata_eh_schedule_probe() */ 70c2c7a89cSTejun Heo ATA_EH_PROBE_TRIAL_INTERVAL = 60000, /* 1 min */ 71c2c7a89cSTejun Heo ATA_EH_PROBE_TRIALS = 2, 7231daabdaSTejun Heo }; 7331daabdaSTejun Heo 7431daabdaSTejun Heo /* The following table determines how we sequence resets. Each entry 7531daabdaSTejun Heo * represents timeout for that try. The first try can be soft or 7631daabdaSTejun Heo * hardreset. All others are hardreset if available. In most cases 7731daabdaSTejun Heo * the first reset w/ 10sec timeout should succeed. Following entries 7835bf8821SDan Williams * are mostly for error handling, hotplug and those outlier devices that 7935bf8821SDan Williams * take an exceptionally long time to recover from reset. 8031daabdaSTejun Heo */ 81ca02f225SSergey Shtylyov static const unsigned int ata_eh_reset_timeouts[] = { 82341c2c95STejun Heo 10000, /* most drives spin up by 10sec */ 83341c2c95STejun Heo 10000, /* > 99% working drives spin up before 20sec */ 8435bf8821SDan Williams 35000, /* give > 30 secs of idleness for outlier devices */ 85341c2c95STejun Heo 5000, /* and sweet one last chance */ 86ca02f225SSergey Shtylyov UINT_MAX, /* > 1 min has elapsed, give up */ 8731daabdaSTejun Heo }; 8831daabdaSTejun Heo 89e06233f9SSergey Shtylyov static const unsigned int ata_eh_identify_timeouts[] = { 9087fbc5a0STejun Heo 5000, /* covers > 99% of successes and not too boring on failures */ 9187fbc5a0STejun Heo 10000, /* combined time till here is enough even for media access */ 9287fbc5a0STejun Heo 30000, /* for true idiots */ 93e06233f9SSergey Shtylyov UINT_MAX, 9487fbc5a0STejun Heo }; 9587fbc5a0STejun Heo 96e06233f9SSergey Shtylyov static const unsigned int ata_eh_revalidate_timeouts[] = { 9768dbbe7dSDamien Le Moal 15000, /* Some drives are slow to read log pages when waking-up */ 9868dbbe7dSDamien Le Moal 15000, /* combined time till here is enough even for media access */ 99e06233f9SSergey Shtylyov UINT_MAX, 10068dbbe7dSDamien Le Moal }; 10168dbbe7dSDamien Le Moal 102e06233f9SSergey Shtylyov static const unsigned int ata_eh_flush_timeouts[] = { 1036013efd8STejun Heo 15000, /* be generous with flush */ 1046013efd8STejun Heo 15000, /* ditto */ 1056013efd8STejun Heo 30000, /* and even more generous */ 106e06233f9SSergey Shtylyov UINT_MAX, 1076013efd8STejun Heo }; 1086013efd8STejun Heo 109e06233f9SSergey Shtylyov static const unsigned int ata_eh_other_timeouts[] = { 11087fbc5a0STejun Heo 5000, /* same rationale as identify timeout */ 11187fbc5a0STejun Heo 10000, /* ditto */ 11287fbc5a0STejun Heo /* but no merciful 30sec for other commands, it just isn't worth it */ 113e06233f9SSergey Shtylyov UINT_MAX, 11487fbc5a0STejun Heo }; 11587fbc5a0STejun Heo 11687fbc5a0STejun Heo struct ata_eh_cmd_timeout_ent { 11787fbc5a0STejun Heo const u8 *commands; 118e06233f9SSergey Shtylyov const unsigned int *timeouts; 11987fbc5a0STejun Heo }; 12087fbc5a0STejun Heo 12187fbc5a0STejun Heo /* The following table determines timeouts to use for EH internal 12287fbc5a0STejun Heo * commands. Each table entry is a command class and matches the 12387fbc5a0STejun Heo * commands the entry applies to and the timeout table to use. 12487fbc5a0STejun Heo * 12587fbc5a0STejun Heo * On the retry after a command timed out, the next timeout value from 12687fbc5a0STejun Heo * the table is used. If the table doesn't contain further entries, 12787fbc5a0STejun Heo * the last value is used. 12887fbc5a0STejun Heo * 12987fbc5a0STejun Heo * ehc->cmd_timeout_idx keeps track of which timeout to use per 13087fbc5a0STejun Heo * command class, so if SET_FEATURES times out on the first try, the 13187fbc5a0STejun Heo * next try will use the second timeout value only for that class. 13287fbc5a0STejun Heo */ 13387fbc5a0STejun Heo #define CMDS(cmds...) (const u8 []){ cmds, 0 } 13487fbc5a0STejun Heo static const struct ata_eh_cmd_timeout_ent 13587fbc5a0STejun Heo ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = { 13687fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI), 13787fbc5a0STejun Heo .timeouts = ata_eh_identify_timeouts, }, 13868dbbe7dSDamien Le Moal { .commands = CMDS(ATA_CMD_READ_LOG_EXT, ATA_CMD_READ_LOG_DMA_EXT), 13968dbbe7dSDamien Le Moal .timeouts = ata_eh_revalidate_timeouts, }, 14087fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT), 14187fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 14287fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT), 14387fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 14487fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_SET_FEATURES), 14587fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 14687fbc5a0STejun Heo { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS), 14787fbc5a0STejun Heo .timeouts = ata_eh_other_timeouts, }, 1486013efd8STejun Heo { .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT), 1496013efd8STejun Heo .timeouts = ata_eh_flush_timeouts }, 15087fbc5a0STejun Heo }; 15187fbc5a0STejun Heo #undef CMDS 15287fbc5a0STejun Heo 153c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap); 15471d7b6e5SNiklas Cassel static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, 15571d7b6e5SNiklas Cassel struct ata_device **r_failed_dev); 1566ffa01d8STejun Heo #ifdef CONFIG_PM 157c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap); 158c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap); 1596ffa01d8STejun Heo #else /* CONFIG_PM */ 1606ffa01d8STejun Heo static void ata_eh_handle_port_suspend(struct ata_port *ap) 1616ffa01d8STejun Heo { } 1626ffa01d8STejun Heo 1636ffa01d8STejun Heo static void ata_eh_handle_port_resume(struct ata_port *ap) 1646ffa01d8STejun Heo { } 1656ffa01d8STejun Heo #endif /* CONFIG_PM */ 166c6fd2807SJeff Garzik 1670d74d872SMathieu Malaterre static __printf(2, 0) void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, 1680d74d872SMathieu Malaterre const char *fmt, va_list args) 169b64bbc39STejun Heo { 170b64bbc39STejun Heo ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len, 171b64bbc39STejun Heo ATA_EH_DESC_LEN - ehi->desc_len, 172b64bbc39STejun Heo fmt, args); 173b64bbc39STejun Heo } 174b64bbc39STejun Heo 175b64bbc39STejun Heo /** 176b64bbc39STejun Heo * __ata_ehi_push_desc - push error description without adding separator 177b64bbc39STejun Heo * @ehi: target EHI 178b64bbc39STejun Heo * @fmt: printf format string 179b64bbc39STejun Heo * 180b64bbc39STejun Heo * Format string according to @fmt and append it to @ehi->desc. 181b64bbc39STejun Heo * 182b64bbc39STejun Heo * LOCKING: 183b64bbc39STejun Heo * spin_lock_irqsave(host lock) 184b64bbc39STejun Heo */ 185b64bbc39STejun Heo void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) 186b64bbc39STejun Heo { 187b64bbc39STejun Heo va_list args; 188b64bbc39STejun Heo 189b64bbc39STejun Heo va_start(args, fmt); 190b64bbc39STejun Heo __ata_ehi_pushv_desc(ehi, fmt, args); 191b64bbc39STejun Heo va_end(args); 192b64bbc39STejun Heo } 193a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(__ata_ehi_push_desc); 194b64bbc39STejun Heo 195b64bbc39STejun Heo /** 196b64bbc39STejun Heo * ata_ehi_push_desc - push error description with separator 197b64bbc39STejun Heo * @ehi: target EHI 198b64bbc39STejun Heo * @fmt: printf format string 199b64bbc39STejun Heo * 200b64bbc39STejun Heo * Format string according to @fmt and append it to @ehi->desc. 201b64bbc39STejun Heo * If @ehi->desc is not empty, ", " is added in-between. 202b64bbc39STejun Heo * 203b64bbc39STejun Heo * LOCKING: 204b64bbc39STejun Heo * spin_lock_irqsave(host lock) 205b64bbc39STejun Heo */ 206b64bbc39STejun Heo void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) 207b64bbc39STejun Heo { 208b64bbc39STejun Heo va_list args; 209b64bbc39STejun Heo 210b64bbc39STejun Heo if (ehi->desc_len) 211b64bbc39STejun Heo __ata_ehi_push_desc(ehi, ", "); 212b64bbc39STejun Heo 213b64bbc39STejun Heo va_start(args, fmt); 214b64bbc39STejun Heo __ata_ehi_pushv_desc(ehi, fmt, args); 215b64bbc39STejun Heo va_end(args); 216b64bbc39STejun Heo } 217a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_ehi_push_desc); 218b64bbc39STejun Heo 219b64bbc39STejun Heo /** 220b64bbc39STejun Heo * ata_ehi_clear_desc - clean error description 221b64bbc39STejun Heo * @ehi: target EHI 222b64bbc39STejun Heo * 223b64bbc39STejun Heo * Clear @ehi->desc. 224b64bbc39STejun Heo * 225b64bbc39STejun Heo * LOCKING: 226b64bbc39STejun Heo * spin_lock_irqsave(host lock) 227b64bbc39STejun Heo */ 228b64bbc39STejun Heo void ata_ehi_clear_desc(struct ata_eh_info *ehi) 229b64bbc39STejun Heo { 230b64bbc39STejun Heo ehi->desc[0] = '\0'; 231b64bbc39STejun Heo ehi->desc_len = 0; 232b64bbc39STejun Heo } 233a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_ehi_clear_desc); 234b64bbc39STejun Heo 235cbcdd875STejun Heo /** 236cbcdd875STejun Heo * ata_port_desc - append port description 237cbcdd875STejun Heo * @ap: target ATA port 238cbcdd875STejun Heo * @fmt: printf format string 239cbcdd875STejun Heo * 240cbcdd875STejun Heo * Format string according to @fmt and append it to port 241cbcdd875STejun Heo * description. If port description is not empty, " " is added 242cbcdd875STejun Heo * in-between. This function is to be used while initializing 243cbcdd875STejun Heo * ata_host. The description is printed on host registration. 244cbcdd875STejun Heo * 245cbcdd875STejun Heo * LOCKING: 246cbcdd875STejun Heo * None. 247cbcdd875STejun Heo */ 248cbcdd875STejun Heo void ata_port_desc(struct ata_port *ap, const char *fmt, ...) 249cbcdd875STejun Heo { 250cbcdd875STejun Heo va_list args; 251cbcdd875STejun Heo 252cbcdd875STejun Heo WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING)); 253cbcdd875STejun Heo 254cbcdd875STejun Heo if (ap->link.eh_info.desc_len) 255cbcdd875STejun Heo __ata_ehi_push_desc(&ap->link.eh_info, " "); 256cbcdd875STejun Heo 257cbcdd875STejun Heo va_start(args, fmt); 258cbcdd875STejun Heo __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args); 259cbcdd875STejun Heo va_end(args); 260cbcdd875STejun Heo } 261a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_port_desc); 262cbcdd875STejun Heo 263cbcdd875STejun Heo #ifdef CONFIG_PCI 264cbcdd875STejun Heo /** 265cbcdd875STejun Heo * ata_port_pbar_desc - append PCI BAR description 266cbcdd875STejun Heo * @ap: target ATA port 267cbcdd875STejun Heo * @bar: target PCI BAR 268cbcdd875STejun Heo * @offset: offset into PCI BAR 269cbcdd875STejun Heo * @name: name of the area 270cbcdd875STejun Heo * 271cbcdd875STejun Heo * If @offset is negative, this function formats a string which 272cbcdd875STejun Heo * contains the name, address, size and type of the BAR and 273cbcdd875STejun Heo * appends it to the port description. If @offset is zero or 274cbcdd875STejun Heo * positive, only name and offsetted address is appended. 275cbcdd875STejun Heo * 276cbcdd875STejun Heo * LOCKING: 277cbcdd875STejun Heo * None. 278cbcdd875STejun Heo */ 279cbcdd875STejun Heo void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset, 280cbcdd875STejun Heo const char *name) 281cbcdd875STejun Heo { 282cbcdd875STejun Heo struct pci_dev *pdev = to_pci_dev(ap->host->dev); 283cbcdd875STejun Heo char *type = ""; 284cbcdd875STejun Heo unsigned long long start, len; 285cbcdd875STejun Heo 286cbcdd875STejun Heo if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) 287cbcdd875STejun Heo type = "m"; 288cbcdd875STejun Heo else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) 289cbcdd875STejun Heo type = "i"; 290cbcdd875STejun Heo 291cbcdd875STejun Heo start = (unsigned long long)pci_resource_start(pdev, bar); 292cbcdd875STejun Heo len = (unsigned long long)pci_resource_len(pdev, bar); 293cbcdd875STejun Heo 294cbcdd875STejun Heo if (offset < 0) 295cbcdd875STejun Heo ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start); 296cbcdd875STejun Heo else 297e6a73ab1SAndrew Morton ata_port_desc(ap, "%s 0x%llx", name, 298e6a73ab1SAndrew Morton start + (unsigned long long)offset); 299cbcdd875STejun Heo } 300a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_port_pbar_desc); 301cbcdd875STejun Heo #endif /* CONFIG_PCI */ 302cbcdd875STejun Heo 30387fbc5a0STejun Heo static int ata_lookup_timeout_table(u8 cmd) 30487fbc5a0STejun Heo { 30587fbc5a0STejun Heo int i; 30687fbc5a0STejun Heo 30787fbc5a0STejun Heo for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) { 30887fbc5a0STejun Heo const u8 *cur; 30987fbc5a0STejun Heo 31087fbc5a0STejun Heo for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++) 31187fbc5a0STejun Heo if (*cur == cmd) 31287fbc5a0STejun Heo return i; 31387fbc5a0STejun Heo } 31487fbc5a0STejun Heo 31587fbc5a0STejun Heo return -1; 31687fbc5a0STejun Heo } 31787fbc5a0STejun Heo 31887fbc5a0STejun Heo /** 31987fbc5a0STejun Heo * ata_internal_cmd_timeout - determine timeout for an internal command 32087fbc5a0STejun Heo * @dev: target device 32187fbc5a0STejun Heo * @cmd: internal command to be issued 32287fbc5a0STejun Heo * 32387fbc5a0STejun Heo * Determine timeout for internal command @cmd for @dev. 32487fbc5a0STejun Heo * 32587fbc5a0STejun Heo * LOCKING: 32687fbc5a0STejun Heo * EH context. 32787fbc5a0STejun Heo * 32887fbc5a0STejun Heo * RETURNS: 32987fbc5a0STejun Heo * Determined timeout. 33087fbc5a0STejun Heo */ 331e06233f9SSergey Shtylyov unsigned int ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd) 33287fbc5a0STejun Heo { 33387fbc5a0STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 33487fbc5a0STejun Heo int ent = ata_lookup_timeout_table(cmd); 33587fbc5a0STejun Heo int idx; 33687fbc5a0STejun Heo 33787fbc5a0STejun Heo if (ent < 0) 33887fbc5a0STejun Heo return ATA_EH_CMD_DFL_TIMEOUT; 33987fbc5a0STejun Heo 34087fbc5a0STejun Heo idx = ehc->cmd_timeout_idx[dev->devno][ent]; 34187fbc5a0STejun Heo return ata_eh_cmd_timeout_table[ent].timeouts[idx]; 34287fbc5a0STejun Heo } 34387fbc5a0STejun Heo 34487fbc5a0STejun Heo /** 34587fbc5a0STejun Heo * ata_internal_cmd_timed_out - notification for internal command timeout 34687fbc5a0STejun Heo * @dev: target device 34787fbc5a0STejun Heo * @cmd: internal command which timed out 34887fbc5a0STejun Heo * 34987fbc5a0STejun Heo * Notify EH that internal command @cmd for @dev timed out. This 35087fbc5a0STejun Heo * function should be called only for commands whose timeouts are 35187fbc5a0STejun Heo * determined using ata_internal_cmd_timeout(). 35287fbc5a0STejun Heo * 35387fbc5a0STejun Heo * LOCKING: 35487fbc5a0STejun Heo * EH context. 35587fbc5a0STejun Heo */ 35687fbc5a0STejun Heo void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd) 35787fbc5a0STejun Heo { 35887fbc5a0STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 35987fbc5a0STejun Heo int ent = ata_lookup_timeout_table(cmd); 36087fbc5a0STejun Heo int idx; 36187fbc5a0STejun Heo 36287fbc5a0STejun Heo if (ent < 0) 36387fbc5a0STejun Heo return; 36487fbc5a0STejun Heo 36587fbc5a0STejun Heo idx = ehc->cmd_timeout_idx[dev->devno][ent]; 366e06233f9SSergey Shtylyov if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != UINT_MAX) 36787fbc5a0STejun Heo ehc->cmd_timeout_idx[dev->devno][ent]++; 36887fbc5a0STejun Heo } 36987fbc5a0STejun Heo 3703884f7b0STejun Heo static void ata_ering_record(struct ata_ering *ering, unsigned int eflags, 371c6fd2807SJeff Garzik unsigned int err_mask) 372c6fd2807SJeff Garzik { 373c6fd2807SJeff Garzik struct ata_ering_entry *ent; 374c6fd2807SJeff Garzik 375c6fd2807SJeff Garzik WARN_ON(!err_mask); 376c6fd2807SJeff Garzik 377c6fd2807SJeff Garzik ering->cursor++; 378c6fd2807SJeff Garzik ering->cursor %= ATA_ERING_SIZE; 379c6fd2807SJeff Garzik 380c6fd2807SJeff Garzik ent = &ering->ring[ering->cursor]; 3813884f7b0STejun Heo ent->eflags = eflags; 382c6fd2807SJeff Garzik ent->err_mask = err_mask; 383c6fd2807SJeff Garzik ent->timestamp = get_jiffies_64(); 384c6fd2807SJeff Garzik } 385c6fd2807SJeff Garzik 38676326ac1STejun Heo static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering) 38776326ac1STejun Heo { 38876326ac1STejun Heo struct ata_ering_entry *ent = &ering->ring[ering->cursor]; 38976326ac1STejun Heo 39076326ac1STejun Heo if (ent->err_mask) 39176326ac1STejun Heo return ent; 39276326ac1STejun Heo return NULL; 39376326ac1STejun Heo } 39476326ac1STejun Heo 395d9027470SGwendal Grignou int ata_ering_map(struct ata_ering *ering, 396c6fd2807SJeff Garzik int (*map_fn)(struct ata_ering_entry *, void *), 397c6fd2807SJeff Garzik void *arg) 398c6fd2807SJeff Garzik { 399c6fd2807SJeff Garzik int idx, rc = 0; 400c6fd2807SJeff Garzik struct ata_ering_entry *ent; 401c6fd2807SJeff Garzik 402c6fd2807SJeff Garzik idx = ering->cursor; 403c6fd2807SJeff Garzik do { 404c6fd2807SJeff Garzik ent = &ering->ring[idx]; 405c6fd2807SJeff Garzik if (!ent->err_mask) 406c6fd2807SJeff Garzik break; 407c6fd2807SJeff Garzik rc = map_fn(ent, arg); 408c6fd2807SJeff Garzik if (rc) 409c6fd2807SJeff Garzik break; 410c6fd2807SJeff Garzik idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE; 411c6fd2807SJeff Garzik } while (idx != ering->cursor); 412c6fd2807SJeff Garzik 413c6fd2807SJeff Garzik return rc; 414c6fd2807SJeff Garzik } 415c6fd2807SJeff Garzik 41660428407SH Hartley Sweeten static int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg) 417d9027470SGwendal Grignou { 418d9027470SGwendal Grignou ent->eflags |= ATA_EFLAG_OLD_ER; 419d9027470SGwendal Grignou return 0; 420d9027470SGwendal Grignou } 421d9027470SGwendal Grignou 422d9027470SGwendal Grignou static void ata_ering_clear(struct ata_ering *ering) 423d9027470SGwendal Grignou { 424d9027470SGwendal Grignou ata_ering_map(ering, ata_ering_clear_cb, NULL); 425d9027470SGwendal Grignou } 426d9027470SGwendal Grignou 427c6fd2807SJeff Garzik static unsigned int ata_eh_dev_action(struct ata_device *dev) 428c6fd2807SJeff Garzik { 4299af5c9c9STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 430c6fd2807SJeff Garzik 431c6fd2807SJeff Garzik return ehc->i.action | ehc->i.dev_action[dev->devno]; 432c6fd2807SJeff Garzik } 433c6fd2807SJeff Garzik 434f58229f8STejun Heo static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev, 435c6fd2807SJeff Garzik struct ata_eh_info *ehi, unsigned int action) 436c6fd2807SJeff Garzik { 437f58229f8STejun Heo struct ata_device *tdev; 438c6fd2807SJeff Garzik 439c6fd2807SJeff Garzik if (!dev) { 440c6fd2807SJeff Garzik ehi->action &= ~action; 4411eca4365STejun Heo ata_for_each_dev(tdev, link, ALL) 442f58229f8STejun Heo ehi->dev_action[tdev->devno] &= ~action; 443c6fd2807SJeff Garzik } else { 444c6fd2807SJeff Garzik /* doesn't make sense for port-wide EH actions */ 445c6fd2807SJeff Garzik WARN_ON(!(action & ATA_EH_PERDEV_MASK)); 446c6fd2807SJeff Garzik 447c6fd2807SJeff Garzik /* break ehi->action into ehi->dev_action */ 448c6fd2807SJeff Garzik if (ehi->action & action) { 4491eca4365STejun Heo ata_for_each_dev(tdev, link, ALL) 450f58229f8STejun Heo ehi->dev_action[tdev->devno] |= 451f58229f8STejun Heo ehi->action & action; 452c6fd2807SJeff Garzik ehi->action &= ~action; 453c6fd2807SJeff Garzik } 454c6fd2807SJeff Garzik 455c6fd2807SJeff Garzik /* turn off the specified per-dev action */ 456c6fd2807SJeff Garzik ehi->dev_action[dev->devno] &= ~action; 457c6fd2807SJeff Garzik } 458c6fd2807SJeff Garzik } 459c6fd2807SJeff Garzik 460c6fd2807SJeff Garzik /** 461c0c362b6STejun Heo * ata_eh_acquire - acquire EH ownership 462c0c362b6STejun Heo * @ap: ATA port to acquire EH ownership for 463c0c362b6STejun Heo * 464c0c362b6STejun Heo * Acquire EH ownership for @ap. This is the basic exclusion 465c0c362b6STejun Heo * mechanism for ports sharing a host. Only one port hanging off 466c0c362b6STejun Heo * the same host can claim the ownership of EH. 467c0c362b6STejun Heo * 468c0c362b6STejun Heo * LOCKING: 469c0c362b6STejun Heo * EH context. 470c0c362b6STejun Heo */ 471c0c362b6STejun Heo void ata_eh_acquire(struct ata_port *ap) 472c0c362b6STejun Heo { 473c0c362b6STejun Heo mutex_lock(&ap->host->eh_mutex); 474c0c362b6STejun Heo WARN_ON_ONCE(ap->host->eh_owner); 475c0c362b6STejun Heo ap->host->eh_owner = current; 476c0c362b6STejun Heo } 477c0c362b6STejun Heo 478c0c362b6STejun Heo /** 479c0c362b6STejun Heo * ata_eh_release - release EH ownership 480c0c362b6STejun Heo * @ap: ATA port to release EH ownership for 481c0c362b6STejun Heo * 482c0c362b6STejun Heo * Release EH ownership for @ap if the caller. The caller must 483c0c362b6STejun Heo * have acquired EH ownership using ata_eh_acquire() previously. 484c0c362b6STejun Heo * 485c0c362b6STejun Heo * LOCKING: 486c0c362b6STejun Heo * EH context. 487c0c362b6STejun Heo */ 488c0c362b6STejun Heo void ata_eh_release(struct ata_port *ap) 489c0c362b6STejun Heo { 490c0c362b6STejun Heo WARN_ON_ONCE(ap->host->eh_owner != current); 491c0c362b6STejun Heo ap->host->eh_owner = NULL; 492c0c362b6STejun Heo mutex_unlock(&ap->host->eh_mutex); 493c0c362b6STejun Heo } 494c0c362b6STejun Heo 495ece180d1STejun Heo static void ata_eh_unload(struct ata_port *ap) 496ece180d1STejun Heo { 497ece180d1STejun Heo struct ata_link *link; 498ece180d1STejun Heo struct ata_device *dev; 499ece180d1STejun Heo unsigned long flags; 500ece180d1STejun Heo 501ece180d1STejun Heo /* Restore SControl IPM and SPD for the next driver and 502ece180d1STejun Heo * disable attached devices. 503ece180d1STejun Heo */ 504ece180d1STejun Heo ata_for_each_link(link, ap, PMP_FIRST) { 505ece180d1STejun Heo sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0); 506ece180d1STejun Heo ata_for_each_dev(dev, link, ALL) 507ece180d1STejun Heo ata_dev_disable(dev); 508ece180d1STejun Heo } 509ece180d1STejun Heo 510ece180d1STejun Heo /* freeze and set UNLOADED */ 511ece180d1STejun Heo spin_lock_irqsave(ap->lock, flags); 512ece180d1STejun Heo 513ece180d1STejun Heo ata_port_freeze(ap); /* won't be thawed */ 514ece180d1STejun Heo ap->pflags &= ~ATA_PFLAG_EH_PENDING; /* clear pending from freeze */ 515ece180d1STejun Heo ap->pflags |= ATA_PFLAG_UNLOADED; 516ece180d1STejun Heo 517ece180d1STejun Heo spin_unlock_irqrestore(ap->lock, flags); 518ece180d1STejun Heo } 519ece180d1STejun Heo 520c6fd2807SJeff Garzik /** 521c6fd2807SJeff Garzik * ata_scsi_error - SCSI layer error handler callback 522c6fd2807SJeff Garzik * @host: SCSI host on which error occurred 523c6fd2807SJeff Garzik * 524c6fd2807SJeff Garzik * Handles SCSI-layer-thrown error events. 525c6fd2807SJeff Garzik * 526c6fd2807SJeff Garzik * LOCKING: 527c6fd2807SJeff Garzik * Inherited from SCSI layer (none, can sleep) 528c6fd2807SJeff Garzik * 529c6fd2807SJeff Garzik * RETURNS: 530c6fd2807SJeff Garzik * Zero. 531c6fd2807SJeff Garzik */ 532c6fd2807SJeff Garzik void ata_scsi_error(struct Scsi_Host *host) 533c6fd2807SJeff Garzik { 534c6fd2807SJeff Garzik struct ata_port *ap = ata_shost_to_port(host); 535c6fd2807SJeff Garzik unsigned long flags; 536c34aeebcSJames Bottomley LIST_HEAD(eh_work_q); 537c6fd2807SJeff Garzik 538c34aeebcSJames Bottomley spin_lock_irqsave(host->host_lock, flags); 539c34aeebcSJames Bottomley list_splice_init(&host->eh_cmd_q, &eh_work_q); 540c34aeebcSJames Bottomley spin_unlock_irqrestore(host->host_lock, flags); 541c34aeebcSJames Bottomley 5420e0b494cSJames Bottomley ata_scsi_cmd_error_handler(host, ap, &eh_work_q); 5430e0b494cSJames Bottomley 5440e0b494cSJames Bottomley /* If we timed raced normal completion and there is nothing to 5450e0b494cSJames Bottomley recover nr_timedout == 0 why exactly are we doing error recovery ? */ 5460e0b494cSJames Bottomley ata_scsi_port_error_handler(host, ap); 5470e0b494cSJames Bottomley 5480e0b494cSJames Bottomley /* finish or retry handled scmd's and clean up */ 54972d8c36eSWei Fang WARN_ON(!list_empty(&eh_work_q)); 5500e0b494cSJames Bottomley 5510e0b494cSJames Bottomley } 5520e0b494cSJames Bottomley 5530e0b494cSJames Bottomley /** 5540e0b494cSJames Bottomley * ata_scsi_cmd_error_handler - error callback for a list of commands 5550e0b494cSJames Bottomley * @host: scsi host containing the port 5560e0b494cSJames Bottomley * @ap: ATA port within the host 5570e0b494cSJames Bottomley * @eh_work_q: list of commands to process 5580e0b494cSJames Bottomley * 5590e0b494cSJames Bottomley * process the given list of commands and return those finished to the 5600e0b494cSJames Bottomley * ap->eh_done_q. This function is the first part of the libata error 5610e0b494cSJames Bottomley * handler which processes a given list of failed commands. 5620e0b494cSJames Bottomley */ 5630e0b494cSJames Bottomley void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, 5640e0b494cSJames Bottomley struct list_head *eh_work_q) 5650e0b494cSJames Bottomley { 5660e0b494cSJames Bottomley int i; 5670e0b494cSJames Bottomley unsigned long flags; 568b83ad9eeSWenchao Hao struct scsi_cmnd *scmd, *tmp; 569b83ad9eeSWenchao Hao int nr_timedout = 0; 5700e0b494cSJames Bottomley 571c429137aSTejun Heo /* make sure sff pio task is not running */ 572c429137aSTejun Heo ata_sff_flush_pio_task(ap); 573c6fd2807SJeff Garzik 574cca3974eSJeff Garzik /* synchronize with host lock and sort out timeouts */ 575c6fd2807SJeff Garzik 576b83ad9eeSWenchao Hao /* 577*ff8072d5SHannes Reinecke * For EH, all qcs are finished in one of three ways - 578c6fd2807SJeff Garzik * normal completion, error completion, and SCSI timeout. 579c96f1732SAlan Cox * Both completions can race against SCSI timeout. When normal 580c6fd2807SJeff Garzik * completion wins, the qc never reaches EH. When error 58187629312SNiklas Cassel * completion wins, the qc has ATA_QCFLAG_EH set. 582c6fd2807SJeff Garzik * 583c6fd2807SJeff Garzik * When SCSI timeout wins, things are a bit more complex. 584c6fd2807SJeff Garzik * Normal or error completion can occur after the timeout but 585c6fd2807SJeff Garzik * before this point. In such cases, both types of 586c6fd2807SJeff Garzik * completions are honored. A scmd is determined to have 587c6fd2807SJeff Garzik * timed out iff its associated qc is active and not failed. 588c6fd2807SJeff Garzik */ 589a4f08141SPaul E. McKenney spin_lock_irqsave(ap->lock, flags); 590c6fd2807SJeff Garzik 591b83ad9eeSWenchao Hao /* 592b83ad9eeSWenchao Hao * This must occur under the ap->lock as we don't want 593b83ad9eeSWenchao Hao * a polled recovery to race the real interrupt handler 594b83ad9eeSWenchao Hao * 595b83ad9eeSWenchao Hao * The lost_interrupt handler checks for any completed but 596b83ad9eeSWenchao Hao * non-notified command and completes much like an IRQ handler. 597b83ad9eeSWenchao Hao * 598b83ad9eeSWenchao Hao * We then fall into the error recovery code which will treat 599b83ad9eeSWenchao Hao * this as if normal completion won the race 600b83ad9eeSWenchao Hao */ 601c96f1732SAlan Cox if (ap->ops->lost_interrupt) 602c96f1732SAlan Cox ap->ops->lost_interrupt(ap); 603c96f1732SAlan Cox 6040e0b494cSJames Bottomley list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) { 605c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 606c6fd2807SJeff Garzik 607258c4e5cSJens Axboe ata_qc_for_each_raw(ap, qc, i) { 608c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_ACTIVE && 609c6fd2807SJeff Garzik qc->scsicmd == scmd) 610c6fd2807SJeff Garzik break; 611c6fd2807SJeff Garzik } 612c6fd2807SJeff Garzik 613c6fd2807SJeff Garzik if (i < ATA_MAX_QUEUE) { 614c6fd2807SJeff Garzik /* the scmd has an associated qc */ 61587629312SNiklas Cassel if (!(qc->flags & ATA_QCFLAG_EH)) { 616c6fd2807SJeff Garzik /* which hasn't failed yet, timeout */ 617c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_TIMEOUT; 61887629312SNiklas Cassel qc->flags |= ATA_QCFLAG_EH; 619c6fd2807SJeff Garzik nr_timedout++; 620c6fd2807SJeff Garzik } 621c6fd2807SJeff Garzik } else { 622c6fd2807SJeff Garzik /* Normal completion occurred after 623c6fd2807SJeff Garzik * SCSI timeout but before this point. 624c6fd2807SJeff Garzik * Successfully complete it. 625c6fd2807SJeff Garzik */ 626c6fd2807SJeff Garzik scmd->retries = scmd->allowed; 627c6fd2807SJeff Garzik scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 628c6fd2807SJeff Garzik } 629c6fd2807SJeff Garzik } 630c6fd2807SJeff Garzik 631b83ad9eeSWenchao Hao /* 632b83ad9eeSWenchao Hao * If we have timed out qcs. They belong to EH from 633c6fd2807SJeff Garzik * this point but the state of the controller is 634c6fd2807SJeff Garzik * unknown. Freeze the port to make sure the IRQ 635c6fd2807SJeff Garzik * handler doesn't diddle with those qcs. This must 63687629312SNiklas Cassel * be done atomically w.r.t. setting ATA_QCFLAG_EH. 637c6fd2807SJeff Garzik */ 638c6fd2807SJeff Garzik if (nr_timedout) 639c6fd2807SJeff Garzik __ata_port_freeze(ap); 640c6fd2807SJeff Garzik 641a1e10f7eSTejun Heo /* initialize eh_tries */ 642a1e10f7eSTejun Heo ap->eh_tries = ATA_EH_MAX_TRIES; 643c6fd2807SJeff Garzik 644b83ad9eeSWenchao Hao spin_unlock_irqrestore(ap->lock, flags); 6450e0b494cSJames Bottomley } 6460e0b494cSJames Bottomley EXPORT_SYMBOL(ata_scsi_cmd_error_handler); 6470e0b494cSJames Bottomley 6480e0b494cSJames Bottomley /** 6490e0b494cSJames Bottomley * ata_scsi_port_error_handler - recover the port after the commands 6500e0b494cSJames Bottomley * @host: SCSI host containing the port 6510e0b494cSJames Bottomley * @ap: the ATA port 6520e0b494cSJames Bottomley * 6530e0b494cSJames Bottomley * Handle the recovery of the port @ap after all the commands 6540e0b494cSJames Bottomley * have been recovered. 6550e0b494cSJames Bottomley */ 6560e0b494cSJames Bottomley void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap) 6570e0b494cSJames Bottomley { 6580e0b494cSJames Bottomley unsigned long flags; 659cf1b86c8STejun Heo struct ata_link *link; 660cf1b86c8STejun Heo 661c0c362b6STejun Heo /* acquire EH ownership */ 662c0c362b6STejun Heo ata_eh_acquire(ap); 663c0c362b6STejun Heo repeat: 6645ddf24c5STejun Heo /* kill fast drain timer */ 6655ddf24c5STejun Heo del_timer_sync(&ap->fastdrain_timer); 6665ddf24c5STejun Heo 667c6fd2807SJeff Garzik /* process port resume request */ 668c6fd2807SJeff Garzik ata_eh_handle_port_resume(ap); 669c6fd2807SJeff Garzik 670c6fd2807SJeff Garzik /* fetch & clear EH info */ 671c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 672c6fd2807SJeff Garzik 6731eca4365STejun Heo ata_for_each_link(link, ap, HOST_FIRST) { 67400115e0fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 67500115e0fSTejun Heo struct ata_device *dev; 67600115e0fSTejun Heo 677cf1b86c8STejun Heo memset(&link->eh_context, 0, sizeof(link->eh_context)); 678cf1b86c8STejun Heo link->eh_context.i = link->eh_info; 679cf1b86c8STejun Heo memset(&link->eh_info, 0, sizeof(link->eh_info)); 68000115e0fSTejun Heo 6811eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) { 68200115e0fSTejun Heo int devno = dev->devno; 68300115e0fSTejun Heo 68400115e0fSTejun Heo ehc->saved_xfer_mode[devno] = dev->xfer_mode; 68500115e0fSTejun Heo if (ata_ncq_enabled(dev)) 68600115e0fSTejun Heo ehc->saved_ncq_enabled |= 1 << devno; 68700115e0fSTejun Heo } 688cf1b86c8STejun Heo } 689c6fd2807SJeff Garzik 690c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS; 691c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_EH_PENDING; 692da917d69STejun Heo ap->excl_link = NULL; /* don't maintain exclusion over EH */ 693c6fd2807SJeff Garzik 694c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 695c6fd2807SJeff Garzik 696c6fd2807SJeff Garzik /* invoke EH, skip if unloading or suspended */ 697c6fd2807SJeff Garzik if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED))) 698c6fd2807SJeff Garzik ap->ops->error_handler(ap); 699ece180d1STejun Heo else { 700ece180d1STejun Heo /* if unloading, commence suicide */ 701ece180d1STejun Heo if ((ap->pflags & ATA_PFLAG_UNLOADING) && 702ece180d1STejun Heo !(ap->pflags & ATA_PFLAG_UNLOADED)) 703ece180d1STejun Heo ata_eh_unload(ap); 704c6fd2807SJeff Garzik ata_eh_finish(ap); 705ece180d1STejun Heo } 706c6fd2807SJeff Garzik 707c6fd2807SJeff Garzik /* process port suspend request */ 708c6fd2807SJeff Garzik ata_eh_handle_port_suspend(ap); 709c6fd2807SJeff Garzik 710*ff8072d5SHannes Reinecke /* 711*ff8072d5SHannes Reinecke * Exception might have happened after ->error_handler recovered the 712*ff8072d5SHannes Reinecke * port but before this point. Repeat EH in such case. 713c6fd2807SJeff Garzik */ 714c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 715c6fd2807SJeff Garzik 716c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_EH_PENDING) { 717a1e10f7eSTejun Heo if (--ap->eh_tries) { 718c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 719c6fd2807SJeff Garzik goto repeat; 720c6fd2807SJeff Garzik } 721a9a79dfeSJoe Perches ata_port_err(ap, 722a9a79dfeSJoe Perches "EH pending after %d tries, giving up\n", 723a9a79dfeSJoe Perches ATA_EH_MAX_TRIES); 724914616a3STejun Heo ap->pflags &= ~ATA_PFLAG_EH_PENDING; 725c6fd2807SJeff Garzik } 726c6fd2807SJeff Garzik 727c6fd2807SJeff Garzik /* this run is complete, make sure EH info is clear */ 7281eca4365STejun Heo ata_for_each_link(link, ap, HOST_FIRST) 729cf1b86c8STejun Heo memset(&link->eh_info, 0, sizeof(link->eh_info)); 730c6fd2807SJeff Garzik 731*ff8072d5SHannes Reinecke /* 732*ff8072d5SHannes Reinecke * end eh (clear host_eh_scheduled) while holding ap->lock such that if 733*ff8072d5SHannes Reinecke * exception occurs after this point but before EH completion, SCSI 734*ff8072d5SHannes Reinecke * midlayer will re-initiate EH. 735c6fd2807SJeff Garzik */ 736e4a9c373SDan Williams ap->ops->end_eh(ap); 737c6fd2807SJeff Garzik 738c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 739c0c362b6STejun Heo ata_eh_release(ap); 740c6fd2807SJeff Garzik 741c6fd2807SJeff Garzik scsi_eh_flush_done_q(&ap->eh_done_q); 742c6fd2807SJeff Garzik 743c6fd2807SJeff Garzik /* clean up */ 744c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 745c6fd2807SJeff Garzik 746c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_LOADING) 747c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_LOADING; 7486f54120eSJason Yan else if ((ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) && 7496f54120eSJason Yan !(ap->flags & ATA_FLAG_SAS_HOST)) 750ad72cf98STejun Heo schedule_delayed_work(&ap->hotplug_task, 0); 751c6fd2807SJeff Garzik 752c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_RECOVERED) 753a9a79dfeSJoe Perches ata_port_info(ap, "EH complete\n"); 754c6fd2807SJeff Garzik 755c6fd2807SJeff Garzik ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED); 756c6fd2807SJeff Garzik 757c6fd2807SJeff Garzik /* tell wait_eh that we're done */ 758c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS; 759c6fd2807SJeff Garzik wake_up_all(&ap->eh_wait_q); 760c6fd2807SJeff Garzik 761c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 762c6fd2807SJeff Garzik } 7630e0b494cSJames Bottomley EXPORT_SYMBOL_GPL(ata_scsi_port_error_handler); 764c6fd2807SJeff Garzik 765c6fd2807SJeff Garzik /** 766c6fd2807SJeff Garzik * ata_port_wait_eh - Wait for the currently pending EH to complete 767c6fd2807SJeff Garzik * @ap: Port to wait EH for 768c6fd2807SJeff Garzik * 769c6fd2807SJeff Garzik * Wait until the currently pending EH is complete. 770c6fd2807SJeff Garzik * 771c6fd2807SJeff Garzik * LOCKING: 772c6fd2807SJeff Garzik * Kernel thread context (may sleep). 773c6fd2807SJeff Garzik */ 774c6fd2807SJeff Garzik void ata_port_wait_eh(struct ata_port *ap) 775c6fd2807SJeff Garzik { 776c6fd2807SJeff Garzik unsigned long flags; 777c6fd2807SJeff Garzik DEFINE_WAIT(wait); 778c6fd2807SJeff Garzik 779c6fd2807SJeff Garzik retry: 780c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 781c6fd2807SJeff Garzik 782c6fd2807SJeff Garzik while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) { 783c6fd2807SJeff Garzik prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE); 784c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 785c6fd2807SJeff Garzik schedule(); 786c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 787c6fd2807SJeff Garzik } 788c6fd2807SJeff Garzik finish_wait(&ap->eh_wait_q, &wait); 789c6fd2807SJeff Garzik 790c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 791c6fd2807SJeff Garzik 792c6fd2807SJeff Garzik /* make sure SCSI EH is complete */ 793cca3974eSJeff Garzik if (scsi_host_in_recovery(ap->scsi_host)) { 79497750cebSTejun Heo ata_msleep(ap, 10); 795c6fd2807SJeff Garzik goto retry; 796c6fd2807SJeff Garzik } 797c6fd2807SJeff Garzik } 79881c757bcSDan Williams EXPORT_SYMBOL_GPL(ata_port_wait_eh); 799c6fd2807SJeff Garzik 800afae461aSSergey Shtylyov static unsigned int ata_eh_nr_in_flight(struct ata_port *ap) 8015ddf24c5STejun Heo { 802258c4e5cSJens Axboe struct ata_queued_cmd *qc; 8035ddf24c5STejun Heo unsigned int tag; 804afae461aSSergey Shtylyov unsigned int nr = 0; 8055ddf24c5STejun Heo 8065ddf24c5STejun Heo /* count only non-internal commands */ 807258c4e5cSJens Axboe ata_qc_for_each(ap, qc, tag) { 808258c4e5cSJens Axboe if (qc) 8095ddf24c5STejun Heo nr++; 8109d207accSJens Axboe } 8115ddf24c5STejun Heo 8125ddf24c5STejun Heo return nr; 8135ddf24c5STejun Heo } 8145ddf24c5STejun Heo 815b93ab338SKees Cook void ata_eh_fastdrain_timerfn(struct timer_list *t) 8165ddf24c5STejun Heo { 817b93ab338SKees Cook struct ata_port *ap = from_timer(ap, t, fastdrain_timer); 8185ddf24c5STejun Heo unsigned long flags; 819afae461aSSergey Shtylyov unsigned int cnt; 8205ddf24c5STejun Heo 8215ddf24c5STejun Heo spin_lock_irqsave(ap->lock, flags); 8225ddf24c5STejun Heo 8235ddf24c5STejun Heo cnt = ata_eh_nr_in_flight(ap); 8245ddf24c5STejun Heo 8255ddf24c5STejun Heo /* are we done? */ 8265ddf24c5STejun Heo if (!cnt) 8275ddf24c5STejun Heo goto out_unlock; 8285ddf24c5STejun Heo 8295ddf24c5STejun Heo if (cnt == ap->fastdrain_cnt) { 830258c4e5cSJens Axboe struct ata_queued_cmd *qc; 8315ddf24c5STejun Heo unsigned int tag; 8325ddf24c5STejun Heo 8335ddf24c5STejun Heo /* No progress during the last interval, tag all 8345ddf24c5STejun Heo * in-flight qcs as timed out and freeze the port. 8355ddf24c5STejun Heo */ 836258c4e5cSJens Axboe ata_qc_for_each(ap, qc, tag) { 8375ddf24c5STejun Heo if (qc) 8385ddf24c5STejun Heo qc->err_mask |= AC_ERR_TIMEOUT; 8395ddf24c5STejun Heo } 8405ddf24c5STejun Heo 8415ddf24c5STejun Heo ata_port_freeze(ap); 8425ddf24c5STejun Heo } else { 8435ddf24c5STejun Heo /* some qcs have finished, give it another chance */ 8445ddf24c5STejun Heo ap->fastdrain_cnt = cnt; 8455ddf24c5STejun Heo ap->fastdrain_timer.expires = 846341c2c95STejun Heo ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); 8475ddf24c5STejun Heo add_timer(&ap->fastdrain_timer); 8485ddf24c5STejun Heo } 8495ddf24c5STejun Heo 8505ddf24c5STejun Heo out_unlock: 8515ddf24c5STejun Heo spin_unlock_irqrestore(ap->lock, flags); 8525ddf24c5STejun Heo } 8535ddf24c5STejun Heo 8545ddf24c5STejun Heo /** 8555ddf24c5STejun Heo * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain 8565ddf24c5STejun Heo * @ap: target ATA port 8575ddf24c5STejun Heo * @fastdrain: activate fast drain 8585ddf24c5STejun Heo * 8595ddf24c5STejun Heo * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain 8605ddf24c5STejun Heo * is non-zero and EH wasn't pending before. Fast drain ensures 8615ddf24c5STejun Heo * that EH kicks in in timely manner. 8625ddf24c5STejun Heo * 8635ddf24c5STejun Heo * LOCKING: 8645ddf24c5STejun Heo * spin_lock_irqsave(host lock) 8655ddf24c5STejun Heo */ 8665ddf24c5STejun Heo static void ata_eh_set_pending(struct ata_port *ap, int fastdrain) 8675ddf24c5STejun Heo { 868afae461aSSergey Shtylyov unsigned int cnt; 8695ddf24c5STejun Heo 8705ddf24c5STejun Heo /* already scheduled? */ 8715ddf24c5STejun Heo if (ap->pflags & ATA_PFLAG_EH_PENDING) 8725ddf24c5STejun Heo return; 8735ddf24c5STejun Heo 8745ddf24c5STejun Heo ap->pflags |= ATA_PFLAG_EH_PENDING; 8755ddf24c5STejun Heo 8765ddf24c5STejun Heo if (!fastdrain) 8775ddf24c5STejun Heo return; 8785ddf24c5STejun Heo 8795ddf24c5STejun Heo /* do we have in-flight qcs? */ 8805ddf24c5STejun Heo cnt = ata_eh_nr_in_flight(ap); 8815ddf24c5STejun Heo if (!cnt) 8825ddf24c5STejun Heo return; 8835ddf24c5STejun Heo 8845ddf24c5STejun Heo /* activate fast drain */ 8855ddf24c5STejun Heo ap->fastdrain_cnt = cnt; 886341c2c95STejun Heo ap->fastdrain_timer.expires = 887341c2c95STejun Heo ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); 8885ddf24c5STejun Heo add_timer(&ap->fastdrain_timer); 8895ddf24c5STejun Heo } 8905ddf24c5STejun Heo 891c6fd2807SJeff Garzik /** 892c6fd2807SJeff Garzik * ata_qc_schedule_eh - schedule qc for error handling 893c6fd2807SJeff Garzik * @qc: command to schedule error handling for 894c6fd2807SJeff Garzik * 895c6fd2807SJeff Garzik * Schedule error handling for @qc. EH will kick in as soon as 896c6fd2807SJeff Garzik * other commands are drained. 897c6fd2807SJeff Garzik * 898c6fd2807SJeff Garzik * LOCKING: 899cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 900c6fd2807SJeff Garzik */ 901c6fd2807SJeff Garzik void ata_qc_schedule_eh(struct ata_queued_cmd *qc) 902c6fd2807SJeff Garzik { 903c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 904c6fd2807SJeff Garzik 90587629312SNiklas Cassel qc->flags |= ATA_QCFLAG_EH; 9065ddf24c5STejun Heo ata_eh_set_pending(ap, 1); 907c6fd2807SJeff Garzik 908c6fd2807SJeff Garzik /* The following will fail if timeout has already expired. 909c6fd2807SJeff Garzik * ata_scsi_error() takes care of such scmds on EH entry. 91087629312SNiklas Cassel * Note that ATA_QCFLAG_EH is unconditionally set after 911c6fd2807SJeff Garzik * this function completes. 912c6fd2807SJeff Garzik */ 913c8329cd5SBart Van Assche blk_abort_request(scsi_cmd_to_rq(qc->scsicmd)); 914c6fd2807SJeff Garzik } 915c6fd2807SJeff Garzik 916c6fd2807SJeff Garzik /** 917e4a9c373SDan Williams * ata_std_sched_eh - non-libsas ata_ports issue eh with this common routine 918e4a9c373SDan Williams * @ap: ATA port to schedule EH for 919e4a9c373SDan Williams * 920e4a9c373SDan Williams * LOCKING: inherited from ata_port_schedule_eh 921e4a9c373SDan Williams * spin_lock_irqsave(host lock) 922e4a9c373SDan Williams */ 923e4a9c373SDan Williams void ata_std_sched_eh(struct ata_port *ap) 924e4a9c373SDan Williams { 925e4a9c373SDan Williams if (ap->pflags & ATA_PFLAG_INITIALIZING) 926e4a9c373SDan Williams return; 927e4a9c373SDan Williams 928e4a9c373SDan Williams ata_eh_set_pending(ap, 1); 929e4a9c373SDan Williams scsi_schedule_eh(ap->scsi_host); 930e4a9c373SDan Williams 931c318458cSHannes Reinecke trace_ata_std_sched_eh(ap); 932e4a9c373SDan Williams } 933e4a9c373SDan Williams EXPORT_SYMBOL_GPL(ata_std_sched_eh); 934e4a9c373SDan Williams 935e4a9c373SDan Williams /** 936e4a9c373SDan Williams * ata_std_end_eh - non-libsas ata_ports complete eh with this common routine 937e4a9c373SDan Williams * @ap: ATA port to end EH for 938e4a9c373SDan Williams * 939e4a9c373SDan Williams * In the libata object model there is a 1:1 mapping of ata_port to 940e4a9c373SDan Williams * shost, so host fields can be directly manipulated under ap->lock, in 941e4a9c373SDan Williams * the libsas case we need to hold a lock at the ha->level to coordinate 942e4a9c373SDan Williams * these events. 943e4a9c373SDan Williams * 944e4a9c373SDan Williams * LOCKING: 945e4a9c373SDan Williams * spin_lock_irqsave(host lock) 946e4a9c373SDan Williams */ 947e4a9c373SDan Williams void ata_std_end_eh(struct ata_port *ap) 948e4a9c373SDan Williams { 949e4a9c373SDan Williams struct Scsi_Host *host = ap->scsi_host; 950e4a9c373SDan Williams 951e4a9c373SDan Williams host->host_eh_scheduled = 0; 952e4a9c373SDan Williams } 953e4a9c373SDan Williams EXPORT_SYMBOL(ata_std_end_eh); 954e4a9c373SDan Williams 955e4a9c373SDan Williams 956e4a9c373SDan Williams /** 957c6fd2807SJeff Garzik * ata_port_schedule_eh - schedule error handling without a qc 958c6fd2807SJeff Garzik * @ap: ATA port to schedule EH for 959c6fd2807SJeff Garzik * 960c6fd2807SJeff Garzik * Schedule error handling for @ap. EH will kick in as soon as 961c6fd2807SJeff Garzik * all commands are drained. 962c6fd2807SJeff Garzik * 963c6fd2807SJeff Garzik * LOCKING: 964cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 965c6fd2807SJeff Garzik */ 966c6fd2807SJeff Garzik void ata_port_schedule_eh(struct ata_port *ap) 967c6fd2807SJeff Garzik { 968e4a9c373SDan Williams /* see: ata_std_sched_eh, unless you know better */ 969e4a9c373SDan Williams ap->ops->sched_eh(ap); 970c6fd2807SJeff Garzik } 971a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_port_schedule_eh); 972c6fd2807SJeff Garzik 973dbd82616STejun Heo static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link) 974c6fd2807SJeff Garzik { 975258c4e5cSJens Axboe struct ata_queued_cmd *qc; 976c6fd2807SJeff Garzik int tag, nr_aborted = 0; 977c6fd2807SJeff Garzik 9785ddf24c5STejun Heo /* we're gonna abort all commands, no need for fast drain */ 9795ddf24c5STejun Heo ata_eh_set_pending(ap, 0); 9805ddf24c5STejun Heo 98128361c40SJens Axboe /* include internal tag in iteration */ 982258c4e5cSJens Axboe ata_qc_for_each_with_internal(ap, qc, tag) { 983dbd82616STejun Heo if (qc && (!link || qc->dev->link == link)) { 98487629312SNiklas Cassel qc->flags |= ATA_QCFLAG_EH; 985c6fd2807SJeff Garzik ata_qc_complete(qc); 986c6fd2807SJeff Garzik nr_aborted++; 987c6fd2807SJeff Garzik } 988c6fd2807SJeff Garzik } 989c6fd2807SJeff Garzik 990c6fd2807SJeff Garzik if (!nr_aborted) 991c6fd2807SJeff Garzik ata_port_schedule_eh(ap); 992c6fd2807SJeff Garzik 993c6fd2807SJeff Garzik return nr_aborted; 994c6fd2807SJeff Garzik } 995c6fd2807SJeff Garzik 996c6fd2807SJeff Garzik /** 997dbd82616STejun Heo * ata_link_abort - abort all qc's on the link 998dbd82616STejun Heo * @link: ATA link to abort qc's for 999dbd82616STejun Heo * 1000dbd82616STejun Heo * Abort all active qc's active on @link and schedule EH. 1001dbd82616STejun Heo * 1002dbd82616STejun Heo * LOCKING: 1003dbd82616STejun Heo * spin_lock_irqsave(host lock) 1004dbd82616STejun Heo * 1005dbd82616STejun Heo * RETURNS: 1006dbd82616STejun Heo * Number of aborted qc's. 1007dbd82616STejun Heo */ 1008dbd82616STejun Heo int ata_link_abort(struct ata_link *link) 1009dbd82616STejun Heo { 1010dbd82616STejun Heo return ata_do_link_abort(link->ap, link); 1011dbd82616STejun Heo } 1012a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_link_abort); 1013dbd82616STejun Heo 1014dbd82616STejun Heo /** 1015dbd82616STejun Heo * ata_port_abort - abort all qc's on the port 1016dbd82616STejun Heo * @ap: ATA port to abort qc's for 1017dbd82616STejun Heo * 1018dbd82616STejun Heo * Abort all active qc's of @ap and schedule EH. 1019dbd82616STejun Heo * 1020dbd82616STejun Heo * LOCKING: 1021dbd82616STejun Heo * spin_lock_irqsave(host_set lock) 1022dbd82616STejun Heo * 1023dbd82616STejun Heo * RETURNS: 1024dbd82616STejun Heo * Number of aborted qc's. 1025dbd82616STejun Heo */ 1026dbd82616STejun Heo int ata_port_abort(struct ata_port *ap) 1027dbd82616STejun Heo { 1028dbd82616STejun Heo return ata_do_link_abort(ap, NULL); 1029dbd82616STejun Heo } 1030a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_port_abort); 1031dbd82616STejun Heo 1032dbd82616STejun Heo /** 1033c6fd2807SJeff Garzik * __ata_port_freeze - freeze port 1034c6fd2807SJeff Garzik * @ap: ATA port to freeze 1035c6fd2807SJeff Garzik * 1036c6fd2807SJeff Garzik * This function is called when HSM violation or some other 1037c6fd2807SJeff Garzik * condition disrupts normal operation of the port. Frozen port 1038c6fd2807SJeff Garzik * is not allowed to perform any operation until the port is 1039c6fd2807SJeff Garzik * thawed, which usually follows a successful reset. 1040c6fd2807SJeff Garzik * 1041c6fd2807SJeff Garzik * ap->ops->freeze() callback can be used for freezing the port 1042c6fd2807SJeff Garzik * hardware-wise (e.g. mask interrupt and stop DMA engine). If a 1043c6fd2807SJeff Garzik * port cannot be frozen hardware-wise, the interrupt handler 1044c6fd2807SJeff Garzik * must ack and clear interrupts unconditionally while the port 1045c6fd2807SJeff Garzik * is frozen. 1046c6fd2807SJeff Garzik * 1047c6fd2807SJeff Garzik * LOCKING: 1048cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 1049c6fd2807SJeff Garzik */ 1050c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap) 1051c6fd2807SJeff Garzik { 1052c6fd2807SJeff Garzik if (ap->ops->freeze) 1053c6fd2807SJeff Garzik ap->ops->freeze(ap); 1054c6fd2807SJeff Garzik 1055c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_FROZEN; 1056c6fd2807SJeff Garzik 1057c318458cSHannes Reinecke trace_ata_port_freeze(ap); 1058c6fd2807SJeff Garzik } 1059c6fd2807SJeff Garzik 1060c6fd2807SJeff Garzik /** 1061c6fd2807SJeff Garzik * ata_port_freeze - abort & freeze port 1062c6fd2807SJeff Garzik * @ap: ATA port to freeze 1063c6fd2807SJeff Garzik * 106454c38444SJeff Garzik * Abort and freeze @ap. The freeze operation must be called 106554c38444SJeff Garzik * first, because some hardware requires special operations 106654c38444SJeff Garzik * before the taskfile registers are accessible. 1067c6fd2807SJeff Garzik * 1068c6fd2807SJeff Garzik * LOCKING: 1069cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 1070c6fd2807SJeff Garzik * 1071c6fd2807SJeff Garzik * RETURNS: 1072c6fd2807SJeff Garzik * Number of aborted commands. 1073c6fd2807SJeff Garzik */ 1074c6fd2807SJeff Garzik int ata_port_freeze(struct ata_port *ap) 1075c6fd2807SJeff Garzik { 1076c6fd2807SJeff Garzik __ata_port_freeze(ap); 1077c6fd2807SJeff Garzik 1078cb6e73aaSye xingchen return ata_port_abort(ap); 1079c6fd2807SJeff Garzik } 1080a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_port_freeze); 1081c6fd2807SJeff Garzik 1082c6fd2807SJeff Garzik /** 1083c6fd2807SJeff Garzik * ata_eh_freeze_port - EH helper to freeze port 1084c6fd2807SJeff Garzik * @ap: ATA port to freeze 1085c6fd2807SJeff Garzik * 1086c6fd2807SJeff Garzik * Freeze @ap. 1087c6fd2807SJeff Garzik * 1088c6fd2807SJeff Garzik * LOCKING: 1089c6fd2807SJeff Garzik * None. 1090c6fd2807SJeff Garzik */ 1091c6fd2807SJeff Garzik void ata_eh_freeze_port(struct ata_port *ap) 1092c6fd2807SJeff Garzik { 1093c6fd2807SJeff Garzik unsigned long flags; 1094c6fd2807SJeff Garzik 1095c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1096c6fd2807SJeff Garzik __ata_port_freeze(ap); 1097c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1098c6fd2807SJeff Garzik } 1099a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_eh_freeze_port); 1100c6fd2807SJeff Garzik 1101c6fd2807SJeff Garzik /** 110294bd5719SMauro Carvalho Chehab * ata_eh_thaw_port - EH helper to thaw port 1103c6fd2807SJeff Garzik * @ap: ATA port to thaw 1104c6fd2807SJeff Garzik * 1105c6fd2807SJeff Garzik * Thaw frozen port @ap. 1106c6fd2807SJeff Garzik * 1107c6fd2807SJeff Garzik * LOCKING: 1108c6fd2807SJeff Garzik * None. 1109c6fd2807SJeff Garzik */ 1110c6fd2807SJeff Garzik void ata_eh_thaw_port(struct ata_port *ap) 1111c6fd2807SJeff Garzik { 1112c6fd2807SJeff Garzik unsigned long flags; 1113c6fd2807SJeff Garzik 1114c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1115c6fd2807SJeff Garzik 1116c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_FROZEN; 1117c6fd2807SJeff Garzik 1118c6fd2807SJeff Garzik if (ap->ops->thaw) 1119c6fd2807SJeff Garzik ap->ops->thaw(ap); 1120c6fd2807SJeff Garzik 1121c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1122c6fd2807SJeff Garzik 1123c318458cSHannes Reinecke trace_ata_port_thaw(ap); 1124c6fd2807SJeff Garzik } 1125c6fd2807SJeff Garzik 1126c6fd2807SJeff Garzik static void ata_eh_scsidone(struct scsi_cmnd *scmd) 1127c6fd2807SJeff Garzik { 1128c6fd2807SJeff Garzik /* nada */ 1129c6fd2807SJeff Garzik } 1130c6fd2807SJeff Garzik 1131c6fd2807SJeff Garzik static void __ata_eh_qc_complete(struct ata_queued_cmd *qc) 1132c6fd2807SJeff Garzik { 1133c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 1134c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 1135c6fd2807SJeff Garzik unsigned long flags; 1136c6fd2807SJeff Garzik 1137c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1138c6fd2807SJeff Garzik qc->scsidone = ata_eh_scsidone; 1139c6fd2807SJeff Garzik __ata_qc_complete(qc); 1140c6fd2807SJeff Garzik WARN_ON(ata_tag_valid(qc->tag)); 1141c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1142c6fd2807SJeff Garzik 1143c6fd2807SJeff Garzik scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 1144c6fd2807SJeff Garzik } 1145c6fd2807SJeff Garzik 1146c6fd2807SJeff Garzik /** 1147c6fd2807SJeff Garzik * ata_eh_qc_complete - Complete an active ATA command from EH 1148c6fd2807SJeff Garzik * @qc: Command to complete 1149c6fd2807SJeff Garzik * 1150c6fd2807SJeff Garzik * Indicate to the mid and upper layers that an ATA command has 1151c6fd2807SJeff Garzik * completed. To be used from EH. 1152c6fd2807SJeff Garzik */ 1153c6fd2807SJeff Garzik void ata_eh_qc_complete(struct ata_queued_cmd *qc) 1154c6fd2807SJeff Garzik { 1155c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 1156c6fd2807SJeff Garzik scmd->retries = scmd->allowed; 1157c6fd2807SJeff Garzik __ata_eh_qc_complete(qc); 1158c6fd2807SJeff Garzik } 1159c6fd2807SJeff Garzik 1160c6fd2807SJeff Garzik /** 1161c6fd2807SJeff Garzik * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH 1162c6fd2807SJeff Garzik * @qc: Command to retry 1163c6fd2807SJeff Garzik * 1164c6fd2807SJeff Garzik * Indicate to the mid and upper layers that an ATA command 1165c6fd2807SJeff Garzik * should be retried. To be used from EH. 1166c6fd2807SJeff Garzik * 1167c6fd2807SJeff Garzik * SCSI midlayer limits the number of retries to scmd->allowed. 1168f13e2201SGwendal Grignou * scmd->allowed is incremented for commands which get retried 1169c6fd2807SJeff Garzik * due to unrelated failures (qc->err_mask is zero). 1170c6fd2807SJeff Garzik */ 1171c6fd2807SJeff Garzik void ata_eh_qc_retry(struct ata_queued_cmd *qc) 1172c6fd2807SJeff Garzik { 1173c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 1174f13e2201SGwendal Grignou if (!qc->err_mask) 1175f13e2201SGwendal Grignou scmd->allowed++; 1176c6fd2807SJeff Garzik __ata_eh_qc_complete(qc); 1177c6fd2807SJeff Garzik } 1178c6fd2807SJeff Garzik 1179c6fd2807SJeff Garzik /** 1180678afac6STejun Heo * ata_dev_disable - disable ATA device 1181678afac6STejun Heo * @dev: ATA device to disable 1182678afac6STejun Heo * 1183678afac6STejun Heo * Disable @dev. 1184678afac6STejun Heo * 1185678afac6STejun Heo * Locking: 1186678afac6STejun Heo * EH context. 1187678afac6STejun Heo */ 1188678afac6STejun Heo void ata_dev_disable(struct ata_device *dev) 1189678afac6STejun Heo { 1190678afac6STejun Heo if (!ata_dev_enabled(dev)) 1191678afac6STejun Heo return; 1192678afac6STejun Heo 11931c95a27cSHannes Reinecke ata_dev_warn(dev, "disable device\n"); 1194678afac6STejun Heo ata_acpi_on_disable(dev); 1195678afac6STejun Heo ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET); 1196678afac6STejun Heo dev->class++; 119799cf610aSTejun Heo 119899cf610aSTejun Heo /* From now till the next successful probe, ering is used to 119999cf610aSTejun Heo * track probe failures. Clear accumulated device error info. 120099cf610aSTejun Heo */ 120199cf610aSTejun Heo ata_ering_clear(&dev->ering); 1202678afac6STejun Heo } 1203a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_dev_disable); 1204678afac6STejun Heo 1205678afac6STejun Heo /** 1206c6fd2807SJeff Garzik * ata_eh_detach_dev - detach ATA device 1207c6fd2807SJeff Garzik * @dev: ATA device to detach 1208c6fd2807SJeff Garzik * 1209c6fd2807SJeff Garzik * Detach @dev. 1210c6fd2807SJeff Garzik * 1211c6fd2807SJeff Garzik * LOCKING: 1212c6fd2807SJeff Garzik * None. 1213c6fd2807SJeff Garzik */ 1214fb7fd614STejun Heo void ata_eh_detach_dev(struct ata_device *dev) 1215c6fd2807SJeff Garzik { 1216f58229f8STejun Heo struct ata_link *link = dev->link; 1217f58229f8STejun Heo struct ata_port *ap = link->ap; 121890484ebfSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 1219c6fd2807SJeff Garzik unsigned long flags; 1220c6fd2807SJeff Garzik 1221c6fd2807SJeff Garzik ata_dev_disable(dev); 1222c6fd2807SJeff Garzik 1223c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1224c6fd2807SJeff Garzik 1225c6fd2807SJeff Garzik dev->flags &= ~ATA_DFLAG_DETACH; 1226c6fd2807SJeff Garzik 1227c6fd2807SJeff Garzik if (ata_scsi_offline_dev(dev)) { 1228c6fd2807SJeff Garzik dev->flags |= ATA_DFLAG_DETACHED; 1229c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 1230c6fd2807SJeff Garzik } 1231c6fd2807SJeff Garzik 123290484ebfSTejun Heo /* clear per-dev EH info */ 1233f58229f8STejun Heo ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK); 1234f58229f8STejun Heo ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK); 123590484ebfSTejun Heo ehc->saved_xfer_mode[dev->devno] = 0; 123690484ebfSTejun Heo ehc->saved_ncq_enabled &= ~(1 << dev->devno); 1237c6fd2807SJeff Garzik 1238c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1239c6fd2807SJeff Garzik } 1240c6fd2807SJeff Garzik 1241c6fd2807SJeff Garzik /** 1242c6fd2807SJeff Garzik * ata_eh_about_to_do - about to perform eh_action 1243955e57dfSTejun Heo * @link: target ATA link 1244c6fd2807SJeff Garzik * @dev: target ATA dev for per-dev action (can be NULL) 1245c6fd2807SJeff Garzik * @action: action about to be performed 1246c6fd2807SJeff Garzik * 1247c6fd2807SJeff Garzik * Called just before performing EH actions to clear related bits 1248955e57dfSTejun Heo * in @link->eh_info such that eh actions are not unnecessarily 1249955e57dfSTejun Heo * repeated. 1250c6fd2807SJeff Garzik * 1251c6fd2807SJeff Garzik * LOCKING: 1252c6fd2807SJeff Garzik * None. 1253c6fd2807SJeff Garzik */ 1254fb7fd614STejun Heo void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev, 1255c6fd2807SJeff Garzik unsigned int action) 1256c6fd2807SJeff Garzik { 1257955e57dfSTejun Heo struct ata_port *ap = link->ap; 1258955e57dfSTejun Heo struct ata_eh_info *ehi = &link->eh_info; 1259955e57dfSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 1260c6fd2807SJeff Garzik unsigned long flags; 1261c6fd2807SJeff Garzik 1262c318458cSHannes Reinecke trace_ata_eh_about_to_do(link, dev ? dev->devno : 0, action); 1263c318458cSHannes Reinecke 1264c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1265c6fd2807SJeff Garzik 1266955e57dfSTejun Heo ata_eh_clear_action(link, dev, ehi, action); 1267c6fd2807SJeff Garzik 1268a568d1d2STejun Heo /* About to take EH action, set RECOVERED. Ignore actions on 1269a568d1d2STejun Heo * slave links as master will do them again. 1270a568d1d2STejun Heo */ 1271a568d1d2STejun Heo if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link) 1272c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_RECOVERED; 1273c6fd2807SJeff Garzik 1274c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1275c6fd2807SJeff Garzik } 1276c6fd2807SJeff Garzik 1277c6fd2807SJeff Garzik /** 1278c6fd2807SJeff Garzik * ata_eh_done - EH action complete 12792f60e1abSJonathan Corbet * @link: ATA link for which EH actions are complete 1280c6fd2807SJeff Garzik * @dev: target ATA dev for per-dev action (can be NULL) 1281c6fd2807SJeff Garzik * @action: action just completed 1282c6fd2807SJeff Garzik * 1283c6fd2807SJeff Garzik * Called right after performing EH actions to clear related bits 1284955e57dfSTejun Heo * in @link->eh_context. 1285c6fd2807SJeff Garzik * 1286c6fd2807SJeff Garzik * LOCKING: 1287c6fd2807SJeff Garzik * None. 1288c6fd2807SJeff Garzik */ 1289fb7fd614STejun Heo void ata_eh_done(struct ata_link *link, struct ata_device *dev, 1290c6fd2807SJeff Garzik unsigned int action) 1291c6fd2807SJeff Garzik { 1292955e57dfSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 12939af5c9c9STejun Heo 1294c318458cSHannes Reinecke trace_ata_eh_done(link, dev ? dev->devno : 0, action); 1295c318458cSHannes Reinecke 1296955e57dfSTejun Heo ata_eh_clear_action(link, dev, &ehc->i, action); 1297c6fd2807SJeff Garzik } 1298c6fd2807SJeff Garzik 1299c6fd2807SJeff Garzik /** 1300c6fd2807SJeff Garzik * ata_err_string - convert err_mask to descriptive string 1301c6fd2807SJeff Garzik * @err_mask: error mask to convert to string 1302c6fd2807SJeff Garzik * 1303c6fd2807SJeff Garzik * Convert @err_mask to descriptive string. Errors are 1304c6fd2807SJeff Garzik * prioritized according to severity and only the most severe 1305c6fd2807SJeff Garzik * error is reported. 1306c6fd2807SJeff Garzik * 1307c6fd2807SJeff Garzik * LOCKING: 1308c6fd2807SJeff Garzik * None. 1309c6fd2807SJeff Garzik * 1310c6fd2807SJeff Garzik * RETURNS: 1311c6fd2807SJeff Garzik * Descriptive string for @err_mask 1312c6fd2807SJeff Garzik */ 1313c6fd2807SJeff Garzik static const char *ata_err_string(unsigned int err_mask) 1314c6fd2807SJeff Garzik { 1315c6fd2807SJeff Garzik if (err_mask & AC_ERR_HOST_BUS) 1316c6fd2807SJeff Garzik return "host bus error"; 1317c6fd2807SJeff Garzik if (err_mask & AC_ERR_ATA_BUS) 1318c6fd2807SJeff Garzik return "ATA bus error"; 1319c6fd2807SJeff Garzik if (err_mask & AC_ERR_TIMEOUT) 1320c6fd2807SJeff Garzik return "timeout"; 1321c6fd2807SJeff Garzik if (err_mask & AC_ERR_HSM) 1322c6fd2807SJeff Garzik return "HSM violation"; 1323c6fd2807SJeff Garzik if (err_mask & AC_ERR_SYSTEM) 1324c6fd2807SJeff Garzik return "internal error"; 1325c6fd2807SJeff Garzik if (err_mask & AC_ERR_MEDIA) 1326c6fd2807SJeff Garzik return "media error"; 1327c6fd2807SJeff Garzik if (err_mask & AC_ERR_INVALID) 1328c6fd2807SJeff Garzik return "invalid argument"; 1329c6fd2807SJeff Garzik if (err_mask & AC_ERR_DEV) 1330c6fd2807SJeff Garzik return "device error"; 133154fb131bSDamien Le Moal if (err_mask & AC_ERR_NCQ) 133254fb131bSDamien Le Moal return "NCQ error"; 133354fb131bSDamien Le Moal if (err_mask & AC_ERR_NODEV_HINT) 133454fb131bSDamien Le Moal return "Polling detection error"; 1335c6fd2807SJeff Garzik return "unknown error"; 1336c6fd2807SJeff Garzik } 1337c6fd2807SJeff Garzik 1338c6fd2807SJeff Garzik /** 133911fc33daSTejun Heo * atapi_eh_tur - perform ATAPI TEST_UNIT_READY 134011fc33daSTejun Heo * @dev: target ATAPI device 134111fc33daSTejun Heo * @r_sense_key: out parameter for sense_key 134211fc33daSTejun Heo * 134311fc33daSTejun Heo * Perform ATAPI TEST_UNIT_READY. 134411fc33daSTejun Heo * 134511fc33daSTejun Heo * LOCKING: 134611fc33daSTejun Heo * EH context (may sleep). 134711fc33daSTejun Heo * 134811fc33daSTejun Heo * RETURNS: 134911fc33daSTejun Heo * 0 on success, AC_ERR_* mask on failure. 135011fc33daSTejun Heo */ 13513dc67440SAaron Lu unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key) 135211fc33daSTejun Heo { 135311fc33daSTejun Heo u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 }; 135411fc33daSTejun Heo struct ata_taskfile tf; 135511fc33daSTejun Heo unsigned int err_mask; 135611fc33daSTejun Heo 135711fc33daSTejun Heo ata_tf_init(dev, &tf); 135811fc33daSTejun Heo 135911fc33daSTejun Heo tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 136011fc33daSTejun Heo tf.command = ATA_CMD_PACKET; 136111fc33daSTejun Heo tf.protocol = ATAPI_PROT_NODATA; 136211fc33daSTejun Heo 136311fc33daSTejun Heo err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0); 136411fc33daSTejun Heo if (err_mask == AC_ERR_DEV) 1365efcef265SSergey Shtylyov *r_sense_key = tf.error >> 4; 136611fc33daSTejun Heo return err_mask; 136711fc33daSTejun Heo } 136811fc33daSTejun Heo 136911fc33daSTejun Heo /** 1370e87fd28cSHannes Reinecke * ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT 13712f60e1abSJonathan Corbet * @qc: qc to perform REQUEST_SENSE_SENSE_DATA_EXT to 1372e87fd28cSHannes Reinecke * 1373e87fd28cSHannes Reinecke * Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK 1374e87fd28cSHannes Reinecke * SENSE. This function is an EH helper. 1375e87fd28cSHannes Reinecke * 1376e87fd28cSHannes Reinecke * LOCKING: 1377e87fd28cSHannes Reinecke * Kernel thread context (may sleep). 137824aeebbfSNiklas Cassel * 137924aeebbfSNiklas Cassel * RETURNS: 138024aeebbfSNiklas Cassel * true if sense data could be fetched, false otherwise. 1381e87fd28cSHannes Reinecke */ 138224aeebbfSNiklas Cassel static bool ata_eh_request_sense(struct ata_queued_cmd *qc) 1383e87fd28cSHannes Reinecke { 1384b46c760eSNiklas Cassel struct scsi_cmnd *cmd = qc->scsicmd; 1385e87fd28cSHannes Reinecke struct ata_device *dev = qc->dev; 1386e87fd28cSHannes Reinecke struct ata_taskfile tf; 1387e87fd28cSHannes Reinecke unsigned int err_mask; 1388e87fd28cSHannes Reinecke 13894cb7c6f1SNiklas Cassel if (ata_port_is_frozen(qc->ap)) { 1390e87fd28cSHannes Reinecke ata_dev_warn(dev, "sense data available but port frozen\n"); 139124aeebbfSNiklas Cassel return false; 1392e87fd28cSHannes Reinecke } 1393e87fd28cSHannes Reinecke 1394e87fd28cSHannes Reinecke if (!ata_id_sense_reporting_enabled(dev->id)) { 1395e87fd28cSHannes Reinecke ata_dev_warn(qc->dev, "sense data reporting disabled\n"); 139624aeebbfSNiklas Cassel return false; 1397e87fd28cSHannes Reinecke } 1398e87fd28cSHannes Reinecke 1399e87fd28cSHannes Reinecke ata_tf_init(dev, &tf); 1400e87fd28cSHannes Reinecke tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1401e87fd28cSHannes Reinecke tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 1402e87fd28cSHannes Reinecke tf.command = ATA_CMD_REQ_SENSE_DATA; 1403e87fd28cSHannes Reinecke tf.protocol = ATA_PROT_NODATA; 1404e87fd28cSHannes Reinecke 1405e87fd28cSHannes Reinecke err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1406e87fd28cSHannes Reinecke /* Ignore err_mask; ATA_ERR might be set */ 1407efcef265SSergey Shtylyov if (tf.status & ATA_SENSE) { 14084b89ad8eSNiklas Cassel if (ata_scsi_sense_is_valid(tf.lbah, tf.lbam, tf.lbal)) { 140924aeebbfSNiklas Cassel /* Set sense without also setting scsicmd->result */ 141024aeebbfSNiklas Cassel scsi_build_sense_buffer(dev->flags & ATA_DFLAG_D_SENSE, 141124aeebbfSNiklas Cassel cmd->sense_buffer, tf.lbah, 141224aeebbfSNiklas Cassel tf.lbam, tf.lbal); 1413e87fd28cSHannes Reinecke qc->flags |= ATA_QCFLAG_SENSE_VALID; 141424aeebbfSNiklas Cassel return true; 14154b89ad8eSNiklas Cassel } 1416e87fd28cSHannes Reinecke } else { 1417e87fd28cSHannes Reinecke ata_dev_warn(dev, "request sense failed stat %02x emask %x\n", 1418efcef265SSergey Shtylyov tf.status, err_mask); 1419e87fd28cSHannes Reinecke } 142024aeebbfSNiklas Cassel 142124aeebbfSNiklas Cassel return false; 1422e87fd28cSHannes Reinecke } 1423e87fd28cSHannes Reinecke 1424e87fd28cSHannes Reinecke /** 1425c6fd2807SJeff Garzik * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE 1426c6fd2807SJeff Garzik * @dev: device to perform REQUEST_SENSE to 1427c6fd2807SJeff Garzik * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long) 14283eabddb8STejun Heo * @dfl_sense_key: default sense key to use 1429c6fd2807SJeff Garzik * 1430c6fd2807SJeff Garzik * Perform ATAPI REQUEST_SENSE after the device reported CHECK 1431c6fd2807SJeff Garzik * SENSE. This function is EH helper. 1432c6fd2807SJeff Garzik * 1433c6fd2807SJeff Garzik * LOCKING: 1434c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1435c6fd2807SJeff Garzik * 1436c6fd2807SJeff Garzik * RETURNS: 1437c6fd2807SJeff Garzik * 0 on success, AC_ERR_* mask on failure 1438c6fd2807SJeff Garzik */ 14393dc67440SAaron Lu unsigned int atapi_eh_request_sense(struct ata_device *dev, 14403eabddb8STejun Heo u8 *sense_buf, u8 dfl_sense_key) 1441c6fd2807SJeff Garzik { 14423eabddb8STejun Heo u8 cdb[ATAPI_CDB_LEN] = 14433eabddb8STejun Heo { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 }; 14449af5c9c9STejun Heo struct ata_port *ap = dev->link->ap; 1445c6fd2807SJeff Garzik struct ata_taskfile tf; 1446c6fd2807SJeff Garzik 1447c6fd2807SJeff Garzik memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); 1448c6fd2807SJeff Garzik 144956287768SAlbert Lee /* initialize sense_buf with the error register, 145056287768SAlbert Lee * for the case where they are -not- overwritten 145156287768SAlbert Lee */ 1452c6fd2807SJeff Garzik sense_buf[0] = 0x70; 14533eabddb8STejun Heo sense_buf[2] = dfl_sense_key; 145456287768SAlbert Lee 145556287768SAlbert Lee /* some devices time out if garbage left in tf */ 145656287768SAlbert Lee ata_tf_init(dev, &tf); 1457c6fd2807SJeff Garzik 1458c6fd2807SJeff Garzik tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1459c6fd2807SJeff Garzik tf.command = ATA_CMD_PACKET; 1460c6fd2807SJeff Garzik 1461c6fd2807SJeff Garzik /* is it pointless to prefer PIO for "safety reasons"? */ 1462c6fd2807SJeff Garzik if (ap->flags & ATA_FLAG_PIO_DMA) { 14630dc36888STejun Heo tf.protocol = ATAPI_PROT_DMA; 1464c6fd2807SJeff Garzik tf.feature |= ATAPI_PKT_DMA; 1465c6fd2807SJeff Garzik } else { 14660dc36888STejun Heo tf.protocol = ATAPI_PROT_PIO; 1467f2dfc1a1STejun Heo tf.lbam = SCSI_SENSE_BUFFERSIZE; 1468f2dfc1a1STejun Heo tf.lbah = 0; 1469c6fd2807SJeff Garzik } 1470c6fd2807SJeff Garzik 1471c6fd2807SJeff Garzik return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE, 14722b789108STejun Heo sense_buf, SCSI_SENSE_BUFFERSIZE, 0); 1473c6fd2807SJeff Garzik } 1474c6fd2807SJeff Garzik 1475c6fd2807SJeff Garzik /** 1476c6fd2807SJeff Garzik * ata_eh_analyze_serror - analyze SError for a failed port 14770260731fSTejun Heo * @link: ATA link to analyze SError for 1478c6fd2807SJeff Garzik * 1479c6fd2807SJeff Garzik * Analyze SError if available and further determine cause of 1480c6fd2807SJeff Garzik * failure. 1481c6fd2807SJeff Garzik * 1482c6fd2807SJeff Garzik * LOCKING: 1483c6fd2807SJeff Garzik * None. 1484c6fd2807SJeff Garzik */ 14850260731fSTejun Heo static void ata_eh_analyze_serror(struct ata_link *link) 1486c6fd2807SJeff Garzik { 14870260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 1488c6fd2807SJeff Garzik u32 serror = ehc->i.serror; 1489c6fd2807SJeff Garzik unsigned int err_mask = 0, action = 0; 1490f9df58cbSTejun Heo u32 hotplug_mask; 1491c6fd2807SJeff Garzik 1492e0614db2STejun Heo if (serror & (SERR_PERSISTENT | SERR_DATA)) { 1493c6fd2807SJeff Garzik err_mask |= AC_ERR_ATA_BUS; 1494cf480626STejun Heo action |= ATA_EH_RESET; 1495c6fd2807SJeff Garzik } 1496c6fd2807SJeff Garzik if (serror & SERR_PROTOCOL) { 1497c6fd2807SJeff Garzik err_mask |= AC_ERR_HSM; 1498cf480626STejun Heo action |= ATA_EH_RESET; 1499c6fd2807SJeff Garzik } 1500c6fd2807SJeff Garzik if (serror & SERR_INTERNAL) { 1501c6fd2807SJeff Garzik err_mask |= AC_ERR_SYSTEM; 1502cf480626STejun Heo action |= ATA_EH_RESET; 1503c6fd2807SJeff Garzik } 1504f9df58cbSTejun Heo 1505f9df58cbSTejun Heo /* Determine whether a hotplug event has occurred. Both 1506f9df58cbSTejun Heo * SError.N/X are considered hotplug events for enabled or 1507f9df58cbSTejun Heo * host links. For disabled PMP links, only N bit is 1508f9df58cbSTejun Heo * considered as X bit is left at 1 for link plugging. 1509f9df58cbSTejun Heo */ 1510eb0e85e3STejun Heo if (link->lpm_policy > ATA_LPM_MAX_POWER) 15116b7ae954STejun Heo hotplug_mask = 0; /* hotplug doesn't work w/ LPM */ 15126b7ae954STejun Heo else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link)) 1513f9df58cbSTejun Heo hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG; 1514f9df58cbSTejun Heo else 1515f9df58cbSTejun Heo hotplug_mask = SERR_PHYRDY_CHG; 1516f9df58cbSTejun Heo 1517f9df58cbSTejun Heo if (serror & hotplug_mask) 1518c6fd2807SJeff Garzik ata_ehi_hotplugged(&ehc->i); 1519c6fd2807SJeff Garzik 1520c6fd2807SJeff Garzik ehc->i.err_mask |= err_mask; 1521c6fd2807SJeff Garzik ehc->i.action |= action; 1522c6fd2807SJeff Garzik } 1523c6fd2807SJeff Garzik 1524c6fd2807SJeff Garzik /** 1525c6fd2807SJeff Garzik * ata_eh_analyze_tf - analyze taskfile of a failed qc 1526c6fd2807SJeff Garzik * @qc: qc to analyze 1527c6fd2807SJeff Garzik * 1528c6fd2807SJeff Garzik * Analyze taskfile of @qc and further determine cause of 1529c6fd2807SJeff Garzik * failure. This function also requests ATAPI sense data if 153025985edcSLucas De Marchi * available. 1531c6fd2807SJeff Garzik * 1532c6fd2807SJeff Garzik * LOCKING: 1533c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1534c6fd2807SJeff Garzik * 1535c6fd2807SJeff Garzik * RETURNS: 1536c6fd2807SJeff Garzik * Determined recovery action 1537c6fd2807SJeff Garzik */ 1538e3b1fff6SNiklas Cassel static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc) 1539c6fd2807SJeff Garzik { 1540e3b1fff6SNiklas Cassel const struct ata_taskfile *tf = &qc->result_tf; 1541c6fd2807SJeff Garzik unsigned int tmp, action = 0; 1542efcef265SSergey Shtylyov u8 stat = tf->status, err = tf->error; 1543c6fd2807SJeff Garzik 1544c6fd2807SJeff Garzik if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) { 1545c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_HSM; 1546cf480626STejun Heo return ATA_EH_RESET; 1547c6fd2807SJeff Garzik } 1548c6fd2807SJeff Garzik 1549e87fd28cSHannes Reinecke if (stat & (ATA_ERR | ATA_DF)) { 1550a51d644aSTejun Heo qc->err_mask |= AC_ERR_DEV; 1551e87fd28cSHannes Reinecke /* 1552e87fd28cSHannes Reinecke * Sense data reporting does not work if the 1553e87fd28cSHannes Reinecke * device fault bit is set. 1554e87fd28cSHannes Reinecke */ 1555e87fd28cSHannes Reinecke if (stat & ATA_DF) 1556e87fd28cSHannes Reinecke stat &= ~ATA_SENSE; 1557e87fd28cSHannes Reinecke } else { 1558c6fd2807SJeff Garzik return 0; 1559e87fd28cSHannes Reinecke } 1560c6fd2807SJeff Garzik 1561c6fd2807SJeff Garzik switch (qc->dev->class) { 1562013115d9SNiklas Cassel case ATA_DEV_ATA: 15639162c657SHannes Reinecke case ATA_DEV_ZAC: 1564461ec040SNiklas Cassel /* 1565461ec040SNiklas Cassel * Fetch the sense data explicitly if: 1566461ec040SNiklas Cassel * -It was a non-NCQ command that failed, or 1567461ec040SNiklas Cassel * -It was a NCQ command that failed, but the sense data 1568461ec040SNiklas Cassel * was not included in the NCQ command error log 1569461ec040SNiklas Cassel * (i.e. NCQ autosense is not supported by the device). 1570461ec040SNiklas Cassel */ 157124aeebbfSNiklas Cassel if (!(qc->flags & ATA_QCFLAG_SENSE_VALID) && 157224aeebbfSNiklas Cassel (stat & ATA_SENSE) && ata_eh_request_sense(qc)) 157324aeebbfSNiklas Cassel set_status_byte(qc->scsicmd, SAM_STAT_CHECK_CONDITION); 1574c6fd2807SJeff Garzik if (err & ATA_ICRC) 1575c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_ATA_BUS; 1576eec7e1c1SAlexey Asemov if (err & (ATA_UNC | ATA_AMNF)) 1577c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_MEDIA; 1578c6fd2807SJeff Garzik if (err & ATA_IDNF) 1579c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_INVALID; 1580c6fd2807SJeff Garzik break; 1581c6fd2807SJeff Garzik 1582c6fd2807SJeff Garzik case ATA_DEV_ATAPI: 15834cb7c6f1SNiklas Cassel if (!ata_port_is_frozen(qc->ap)) { 15843eabddb8STejun Heo tmp = atapi_eh_request_sense(qc->dev, 15853eabddb8STejun Heo qc->scsicmd->sense_buffer, 1586efcef265SSergey Shtylyov qc->result_tf.error >> 4); 15873852e373SHannes Reinecke if (!tmp) 1588c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_SENSE_VALID; 15893852e373SHannes Reinecke else 1590c6fd2807SJeff Garzik qc->err_mask |= tmp; 1591c6fd2807SJeff Garzik } 1592a569a30dSTejun Heo } 1593c6fd2807SJeff Garzik 15943852e373SHannes Reinecke if (qc->flags & ATA_QCFLAG_SENSE_VALID) { 1595b8e162f9SBart Van Assche enum scsi_disposition ret = scsi_check_sense(qc->scsicmd); 15963852e373SHannes Reinecke /* 159779487259SDamien Le Moal * SUCCESS here means that the sense code could be 15983852e373SHannes Reinecke * evaluated and should be passed to the upper layers 15993852e373SHannes Reinecke * for correct evaluation. 160079487259SDamien Le Moal * FAILED means the sense code could not be interpreted 16013852e373SHannes Reinecke * and the device would need to be reset. 16023852e373SHannes Reinecke * NEEDS_RETRY and ADD_TO_MLQUEUE means that the 16033852e373SHannes Reinecke * command would need to be retried. 16043852e373SHannes Reinecke */ 16053852e373SHannes Reinecke if (ret == NEEDS_RETRY || ret == ADD_TO_MLQUEUE) { 16063852e373SHannes Reinecke qc->flags |= ATA_QCFLAG_RETRY; 16073852e373SHannes Reinecke qc->err_mask |= AC_ERR_OTHER; 16083852e373SHannes Reinecke } else if (ret != SUCCESS) { 16093852e373SHannes Reinecke qc->err_mask |= AC_ERR_HSM; 16103852e373SHannes Reinecke } 16113852e373SHannes Reinecke } 1612c6fd2807SJeff Garzik if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS)) 1613cf480626STejun Heo action |= ATA_EH_RESET; 1614c6fd2807SJeff Garzik 1615c6fd2807SJeff Garzik return action; 1616c6fd2807SJeff Garzik } 1617c6fd2807SJeff Garzik 161876326ac1STejun Heo static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask, 161976326ac1STejun Heo int *xfer_ok) 1620c6fd2807SJeff Garzik { 162176326ac1STejun Heo int base = 0; 162276326ac1STejun Heo 162376326ac1STejun Heo if (!(eflags & ATA_EFLAG_DUBIOUS_XFER)) 162476326ac1STejun Heo *xfer_ok = 1; 162576326ac1STejun Heo 162676326ac1STejun Heo if (!*xfer_ok) 162775f9cafcSTejun Heo base = ATA_ECAT_DUBIOUS_NONE; 162876326ac1STejun Heo 16297d47e8d4STejun Heo if (err_mask & AC_ERR_ATA_BUS) 163076326ac1STejun Heo return base + ATA_ECAT_ATA_BUS; 1631c6fd2807SJeff Garzik 16327d47e8d4STejun Heo if (err_mask & AC_ERR_TIMEOUT) 163376326ac1STejun Heo return base + ATA_ECAT_TOUT_HSM; 16347d47e8d4STejun Heo 16353884f7b0STejun Heo if (eflags & ATA_EFLAG_IS_IO) { 16367d47e8d4STejun Heo if (err_mask & AC_ERR_HSM) 163776326ac1STejun Heo return base + ATA_ECAT_TOUT_HSM; 16387d47e8d4STejun Heo if ((err_mask & 16397d47e8d4STejun Heo (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV) 164076326ac1STejun Heo return base + ATA_ECAT_UNK_DEV; 1641c6fd2807SJeff Garzik } 1642c6fd2807SJeff Garzik 1643c6fd2807SJeff Garzik return 0; 1644c6fd2807SJeff Garzik } 1645c6fd2807SJeff Garzik 16467d47e8d4STejun Heo struct speed_down_verdict_arg { 1647c6fd2807SJeff Garzik u64 since; 164876326ac1STejun Heo int xfer_ok; 16493884f7b0STejun Heo int nr_errors[ATA_ECAT_NR]; 1650c6fd2807SJeff Garzik }; 1651c6fd2807SJeff Garzik 16527d47e8d4STejun Heo static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg) 1653c6fd2807SJeff Garzik { 16547d47e8d4STejun Heo struct speed_down_verdict_arg *arg = void_arg; 165576326ac1STejun Heo int cat; 1656c6fd2807SJeff Garzik 1657d9027470SGwendal Grignou if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since)) 1658c6fd2807SJeff Garzik return -1; 1659c6fd2807SJeff Garzik 166076326ac1STejun Heo cat = ata_eh_categorize_error(ent->eflags, ent->err_mask, 166176326ac1STejun Heo &arg->xfer_ok); 16627d47e8d4STejun Heo arg->nr_errors[cat]++; 166376326ac1STejun Heo 1664c6fd2807SJeff Garzik return 0; 1665c6fd2807SJeff Garzik } 1666c6fd2807SJeff Garzik 1667c6fd2807SJeff Garzik /** 16687d47e8d4STejun Heo * ata_eh_speed_down_verdict - Determine speed down verdict 1669c6fd2807SJeff Garzik * @dev: Device of interest 1670c6fd2807SJeff Garzik * 1671c6fd2807SJeff Garzik * This function examines error ring of @dev and determines 16727d47e8d4STejun Heo * whether NCQ needs to be turned off, transfer speed should be 16737d47e8d4STejun Heo * stepped down, or falling back to PIO is necessary. 1674c6fd2807SJeff Garzik * 16753884f7b0STejun Heo * ECAT_ATA_BUS : ATA_BUS error for any command 1676c6fd2807SJeff Garzik * 16773884f7b0STejun Heo * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for 16783884f7b0STejun Heo * IO commands 16797d47e8d4STejun Heo * 16803884f7b0STejun Heo * ECAT_UNK_DEV : Unknown DEV error for IO commands 1681c6fd2807SJeff Garzik * 168276326ac1STejun Heo * ECAT_DUBIOUS_* : Identical to above three but occurred while 168376326ac1STejun Heo * data transfer hasn't been verified. 168476326ac1STejun Heo * 16853884f7b0STejun Heo * Verdicts are 16867d47e8d4STejun Heo * 16873884f7b0STejun Heo * NCQ_OFF : Turn off NCQ. 16887d47e8d4STejun Heo * 16893884f7b0STejun Heo * SPEED_DOWN : Speed down transfer speed but don't fall back 16903884f7b0STejun Heo * to PIO. 16913884f7b0STejun Heo * 16923884f7b0STejun Heo * FALLBACK_TO_PIO : Fall back to PIO. 16933884f7b0STejun Heo * 16943884f7b0STejun Heo * Even if multiple verdicts are returned, only one action is 169576326ac1STejun Heo * taken per error. An action triggered by non-DUBIOUS errors 169676326ac1STejun Heo * clears ering, while one triggered by DUBIOUS_* errors doesn't. 169776326ac1STejun Heo * This is to expedite speed down decisions right after device is 169876326ac1STejun Heo * initially configured. 16993884f7b0STejun Heo * 17004091fb95SMasahiro Yamada * The following are speed down rules. #1 and #2 deal with 170176326ac1STejun Heo * DUBIOUS errors. 170276326ac1STejun Heo * 170376326ac1STejun Heo * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors 170476326ac1STejun Heo * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO. 170576326ac1STejun Heo * 170676326ac1STejun Heo * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors 170776326ac1STejun Heo * occurred during last 5 mins, NCQ_OFF. 170876326ac1STejun Heo * 170976326ac1STejun Heo * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors 171025985edcSLucas De Marchi * occurred during last 5 mins, FALLBACK_TO_PIO 17113884f7b0STejun Heo * 171276326ac1STejun Heo * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred 17133884f7b0STejun Heo * during last 10 mins, NCQ_OFF. 17143884f7b0STejun Heo * 171576326ac1STejun Heo * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6 17163884f7b0STejun Heo * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN. 17177d47e8d4STejun Heo * 1718c6fd2807SJeff Garzik * LOCKING: 1719c6fd2807SJeff Garzik * Inherited from caller. 1720c6fd2807SJeff Garzik * 1721c6fd2807SJeff Garzik * RETURNS: 17227d47e8d4STejun Heo * OR of ATA_EH_SPDN_* flags. 1723c6fd2807SJeff Garzik */ 17247d47e8d4STejun Heo static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev) 1725c6fd2807SJeff Garzik { 17267d47e8d4STejun Heo const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ; 17277d47e8d4STejun Heo u64 j64 = get_jiffies_64(); 17287d47e8d4STejun Heo struct speed_down_verdict_arg arg; 17297d47e8d4STejun Heo unsigned int verdict = 0; 1730c6fd2807SJeff Garzik 17313884f7b0STejun Heo /* scan past 5 mins of error history */ 17323884f7b0STejun Heo memset(&arg, 0, sizeof(arg)); 17333884f7b0STejun Heo arg.since = j64 - min(j64, j5mins); 17343884f7b0STejun Heo ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); 17353884f7b0STejun Heo 173676326ac1STejun Heo if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] + 173776326ac1STejun Heo arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1) 173876326ac1STejun Heo verdict |= ATA_EH_SPDN_SPEED_DOWN | 173976326ac1STejun Heo ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS; 174076326ac1STejun Heo 174176326ac1STejun Heo if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] + 174276326ac1STejun Heo arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1) 174376326ac1STejun Heo verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS; 174476326ac1STejun Heo 17453884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_ATA_BUS] + 17463884f7b0STejun Heo arg.nr_errors[ATA_ECAT_TOUT_HSM] + 1747663f99b8STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) 17483884f7b0STejun Heo verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO; 17493884f7b0STejun Heo 17507d47e8d4STejun Heo /* scan past 10 mins of error history */ 1751c6fd2807SJeff Garzik memset(&arg, 0, sizeof(arg)); 17527d47e8d4STejun Heo arg.since = j64 - min(j64, j10mins); 17537d47e8d4STejun Heo ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); 1754c6fd2807SJeff Garzik 17553884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_TOUT_HSM] + 17563884f7b0STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 3) 17577d47e8d4STejun Heo verdict |= ATA_EH_SPDN_NCQ_OFF; 17583884f7b0STejun Heo 17593884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_ATA_BUS] + 17603884f7b0STejun Heo arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 || 1761663f99b8STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) 17627d47e8d4STejun Heo verdict |= ATA_EH_SPDN_SPEED_DOWN; 1763c6fd2807SJeff Garzik 17647d47e8d4STejun Heo return verdict; 1765c6fd2807SJeff Garzik } 1766c6fd2807SJeff Garzik 1767c6fd2807SJeff Garzik /** 1768c6fd2807SJeff Garzik * ata_eh_speed_down - record error and speed down if necessary 1769c6fd2807SJeff Garzik * @dev: Failed device 17703884f7b0STejun Heo * @eflags: mask of ATA_EFLAG_* flags 1771c6fd2807SJeff Garzik * @err_mask: err_mask of the error 1772c6fd2807SJeff Garzik * 1773c6fd2807SJeff Garzik * Record error and examine error history to determine whether 1774c6fd2807SJeff Garzik * adjusting transmission speed is necessary. It also sets 1775c6fd2807SJeff Garzik * transmission limits appropriately if such adjustment is 1776c6fd2807SJeff Garzik * necessary. 1777c6fd2807SJeff Garzik * 1778c6fd2807SJeff Garzik * LOCKING: 1779c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1780c6fd2807SJeff Garzik * 1781c6fd2807SJeff Garzik * RETURNS: 17827d47e8d4STejun Heo * Determined recovery action. 1783c6fd2807SJeff Garzik */ 17843884f7b0STejun Heo static unsigned int ata_eh_speed_down(struct ata_device *dev, 17853884f7b0STejun Heo unsigned int eflags, unsigned int err_mask) 1786c6fd2807SJeff Garzik { 1787b1c72916STejun Heo struct ata_link *link = ata_dev_phys_link(dev); 178876326ac1STejun Heo int xfer_ok = 0; 17897d47e8d4STejun Heo unsigned int verdict; 17907d47e8d4STejun Heo unsigned int action = 0; 17917d47e8d4STejun Heo 17927d47e8d4STejun Heo /* don't bother if Cat-0 error */ 179376326ac1STejun Heo if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0) 1794c6fd2807SJeff Garzik return 0; 1795c6fd2807SJeff Garzik 1796c6fd2807SJeff Garzik /* record error and determine whether speed down is necessary */ 17973884f7b0STejun Heo ata_ering_record(&dev->ering, eflags, err_mask); 17987d47e8d4STejun Heo verdict = ata_eh_speed_down_verdict(dev); 1799c6fd2807SJeff Garzik 18007d47e8d4STejun Heo /* turn off NCQ? */ 180112980c1fSDamien Le Moal if ((verdict & ATA_EH_SPDN_NCQ_OFF) && ata_ncq_enabled(dev)) { 18027d47e8d4STejun Heo dev->flags |= ATA_DFLAG_NCQ_OFF; 1803a9a79dfeSJoe Perches ata_dev_warn(dev, "NCQ disabled due to excessive errors\n"); 18047d47e8d4STejun Heo goto done; 18057d47e8d4STejun Heo } 1806c6fd2807SJeff Garzik 18077d47e8d4STejun Heo /* speed down? */ 18087d47e8d4STejun Heo if (verdict & ATA_EH_SPDN_SPEED_DOWN) { 1809c6fd2807SJeff Garzik /* speed down SATA link speed if possible */ 1810a07d499bSTejun Heo if (sata_down_spd_limit(link, 0) == 0) { 1811cf480626STejun Heo action |= ATA_EH_RESET; 18127d47e8d4STejun Heo goto done; 18137d47e8d4STejun Heo } 1814c6fd2807SJeff Garzik 1815c6fd2807SJeff Garzik /* lower transfer mode */ 18167d47e8d4STejun Heo if (dev->spdn_cnt < 2) { 18177d47e8d4STejun Heo static const int dma_dnxfer_sel[] = 18187d47e8d4STejun Heo { ATA_DNXFER_DMA, ATA_DNXFER_40C }; 18197d47e8d4STejun Heo static const int pio_dnxfer_sel[] = 18207d47e8d4STejun Heo { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 }; 18217d47e8d4STejun Heo int sel; 1822c6fd2807SJeff Garzik 18237d47e8d4STejun Heo if (dev->xfer_shift != ATA_SHIFT_PIO) 18247d47e8d4STejun Heo sel = dma_dnxfer_sel[dev->spdn_cnt]; 18257d47e8d4STejun Heo else 18267d47e8d4STejun Heo sel = pio_dnxfer_sel[dev->spdn_cnt]; 18277d47e8d4STejun Heo 18287d47e8d4STejun Heo dev->spdn_cnt++; 18297d47e8d4STejun Heo 18307d47e8d4STejun Heo if (ata_down_xfermask_limit(dev, sel) == 0) { 1831cf480626STejun Heo action |= ATA_EH_RESET; 18327d47e8d4STejun Heo goto done; 18337d47e8d4STejun Heo } 18347d47e8d4STejun Heo } 18357d47e8d4STejun Heo } 18367d47e8d4STejun Heo 18377d47e8d4STejun Heo /* Fall back to PIO? Slowing down to PIO is meaningless for 1838663f99b8STejun Heo * SATA ATA devices. Consider it only for PATA and SATAPI. 18397d47e8d4STejun Heo */ 18407d47e8d4STejun Heo if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) && 1841663f99b8STejun Heo (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) && 18427d47e8d4STejun Heo (dev->xfer_shift != ATA_SHIFT_PIO)) { 18437d47e8d4STejun Heo if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) { 18447d47e8d4STejun Heo dev->spdn_cnt = 0; 1845cf480626STejun Heo action |= ATA_EH_RESET; 18467d47e8d4STejun Heo goto done; 18477d47e8d4STejun Heo } 18487d47e8d4STejun Heo } 18497d47e8d4STejun Heo 1850c6fd2807SJeff Garzik return 0; 18517d47e8d4STejun Heo done: 18527d47e8d4STejun Heo /* device has been slowed down, blow error history */ 185376326ac1STejun Heo if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS)) 18547d47e8d4STejun Heo ata_ering_clear(&dev->ering); 18557d47e8d4STejun Heo return action; 1856c6fd2807SJeff Garzik } 1857c6fd2807SJeff Garzik 1858c6fd2807SJeff Garzik /** 18598d899e70SMark Lord * ata_eh_worth_retry - analyze error and decide whether to retry 18608d899e70SMark Lord * @qc: qc to possibly retry 18618d899e70SMark Lord * 18628d899e70SMark Lord * Look at the cause of the error and decide if a retry 18638d899e70SMark Lord * might be useful or not. We don't want to retry media errors 18648d899e70SMark Lord * because the drive itself has probably already taken 10-30 seconds 18658d899e70SMark Lord * doing its own internal retries before reporting the failure. 18668d899e70SMark Lord */ 18678d899e70SMark Lord static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc) 18688d899e70SMark Lord { 18691eaca39aSBian Yu if (qc->err_mask & AC_ERR_MEDIA) 18708d899e70SMark Lord return 0; /* don't retry media errors */ 18718d899e70SMark Lord if (qc->flags & ATA_QCFLAG_IO) 18728d899e70SMark Lord return 1; /* otherwise retry anything from fs stack */ 18738d899e70SMark Lord if (qc->err_mask & AC_ERR_INVALID) 18748d899e70SMark Lord return 0; /* don't retry these */ 18758d899e70SMark Lord return qc->err_mask != AC_ERR_DEV; /* retry if not dev error */ 18768d899e70SMark Lord } 18778d899e70SMark Lord 18788d899e70SMark Lord /** 18797eb49509SDamien Le Moal * ata_eh_quiet - check if we need to be quiet about a command error 18807eb49509SDamien Le Moal * @qc: qc to check 18817eb49509SDamien Le Moal * 18827eb49509SDamien Le Moal * Look at the qc flags anbd its scsi command request flags to determine 18837eb49509SDamien Le Moal * if we need to be quiet about the command failure. 18847eb49509SDamien Le Moal */ 18857eb49509SDamien Le Moal static inline bool ata_eh_quiet(struct ata_queued_cmd *qc) 18867eb49509SDamien Le Moal { 1887c8329cd5SBart Van Assche if (qc->scsicmd && scsi_cmd_to_rq(qc->scsicmd)->rq_flags & RQF_QUIET) 18887eb49509SDamien Le Moal qc->flags |= ATA_QCFLAG_QUIET; 18897eb49509SDamien Le Moal return qc->flags & ATA_QCFLAG_QUIET; 18907eb49509SDamien Le Moal } 18917eb49509SDamien Le Moal 189218bd7718SNiklas Cassel static int ata_eh_read_sense_success_non_ncq(struct ata_link *link) 189318bd7718SNiklas Cassel { 189418bd7718SNiklas Cassel struct ata_port *ap = link->ap; 189518bd7718SNiklas Cassel struct ata_queued_cmd *qc; 189618bd7718SNiklas Cassel 189718bd7718SNiklas Cassel qc = __ata_qc_from_tag(ap, link->active_tag); 189818bd7718SNiklas Cassel if (!qc) 189918bd7718SNiklas Cassel return -EIO; 190018bd7718SNiklas Cassel 190118bd7718SNiklas Cassel if (!(qc->flags & ATA_QCFLAG_EH) || 190218bd7718SNiklas Cassel !(qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD) || 190318bd7718SNiklas Cassel qc->err_mask) 190418bd7718SNiklas Cassel return -EIO; 190518bd7718SNiklas Cassel 190618bd7718SNiklas Cassel if (!ata_eh_request_sense(qc)) 190718bd7718SNiklas Cassel return -EIO; 190818bd7718SNiklas Cassel 190918bd7718SNiklas Cassel /* 191018bd7718SNiklas Cassel * If we have sense data, call scsi_check_sense() in order to set the 191118bd7718SNiklas Cassel * correct SCSI ML byte (if any). No point in checking the return value, 191218bd7718SNiklas Cassel * since the command has already completed successfully. 191318bd7718SNiklas Cassel */ 191418bd7718SNiklas Cassel scsi_check_sense(qc->scsicmd); 191518bd7718SNiklas Cassel 191618bd7718SNiklas Cassel return 0; 191718bd7718SNiklas Cassel } 191818bd7718SNiklas Cassel 191918bd7718SNiklas Cassel static void ata_eh_get_success_sense(struct ata_link *link) 192018bd7718SNiklas Cassel { 192118bd7718SNiklas Cassel struct ata_eh_context *ehc = &link->eh_context; 192218bd7718SNiklas Cassel struct ata_device *dev = link->device; 192318bd7718SNiklas Cassel struct ata_port *ap = link->ap; 192418bd7718SNiklas Cassel struct ata_queued_cmd *qc; 192518bd7718SNiklas Cassel int tag, ret = 0; 192618bd7718SNiklas Cassel 192718bd7718SNiklas Cassel if (!(ehc->i.dev_action[dev->devno] & ATA_EH_GET_SUCCESS_SENSE)) 192818bd7718SNiklas Cassel return; 192918bd7718SNiklas Cassel 193018bd7718SNiklas Cassel /* if frozen, we can't do much */ 193118bd7718SNiklas Cassel if (ata_port_is_frozen(ap)) { 193218bd7718SNiklas Cassel ata_dev_warn(dev, 193318bd7718SNiklas Cassel "successful sense data available but port frozen\n"); 193418bd7718SNiklas Cassel goto out; 193518bd7718SNiklas Cassel } 193618bd7718SNiklas Cassel 193718bd7718SNiklas Cassel /* 193818bd7718SNiklas Cassel * If the link has sactive set, then we have outstanding NCQ commands 193918bd7718SNiklas Cassel * and have to read the Successful NCQ Commands log to get the sense 194018bd7718SNiklas Cassel * data. Otherwise, we are dealing with a non-NCQ command and use 194118bd7718SNiklas Cassel * request sense ext command to retrieve the sense data. 194218bd7718SNiklas Cassel */ 194318bd7718SNiklas Cassel if (link->sactive) 194418bd7718SNiklas Cassel ret = ata_eh_read_sense_success_ncq_log(link); 194518bd7718SNiklas Cassel else 194618bd7718SNiklas Cassel ret = ata_eh_read_sense_success_non_ncq(link); 194718bd7718SNiklas Cassel if (ret) 194818bd7718SNiklas Cassel goto out; 194918bd7718SNiklas Cassel 195018bd7718SNiklas Cassel ata_eh_done(link, dev, ATA_EH_GET_SUCCESS_SENSE); 195118bd7718SNiklas Cassel return; 195218bd7718SNiklas Cassel 195318bd7718SNiklas Cassel out: 195418bd7718SNiklas Cassel /* 195518bd7718SNiklas Cassel * If we failed to get sense data for a successful command that ought to 195618bd7718SNiklas Cassel * have sense data, we cannot simply return BLK_STS_OK to user space. 195718bd7718SNiklas Cassel * This is because we can't know if the sense data that we couldn't get 195818bd7718SNiklas Cassel * was actually "DATA CURRENTLY UNAVAILABLE". Reporting such a command 195918bd7718SNiklas Cassel * as success to user space would result in a silent data corruption. 196018bd7718SNiklas Cassel * Thus, add a bogus ABORTED_COMMAND sense data to such commands, such 196118bd7718SNiklas Cassel * that SCSI will report these commands as BLK_STS_IOERR to user space. 196218bd7718SNiklas Cassel */ 196318bd7718SNiklas Cassel ata_qc_for_each_raw(ap, qc, tag) { 196418bd7718SNiklas Cassel if (!(qc->flags & ATA_QCFLAG_EH) || 196518bd7718SNiklas Cassel !(qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD) || 196618bd7718SNiklas Cassel qc->err_mask || 196718bd7718SNiklas Cassel ata_dev_phys_link(qc->dev) != link) 196818bd7718SNiklas Cassel continue; 196918bd7718SNiklas Cassel 197018bd7718SNiklas Cassel /* We managed to get sense for this success command, skip. */ 197118bd7718SNiklas Cassel if (qc->flags & ATA_QCFLAG_SENSE_VALID) 197218bd7718SNiklas Cassel continue; 197318bd7718SNiklas Cassel 197418bd7718SNiklas Cassel /* This success command did not have any sense data, skip. */ 197518bd7718SNiklas Cassel if (!(qc->result_tf.status & ATA_SENSE)) 197618bd7718SNiklas Cassel continue; 197718bd7718SNiklas Cassel 197818bd7718SNiklas Cassel /* This success command had sense data, but we failed to get. */ 197918bd7718SNiklas Cassel ata_scsi_set_sense(dev, qc->scsicmd, ABORTED_COMMAND, 0, 0); 198018bd7718SNiklas Cassel qc->flags |= ATA_QCFLAG_SENSE_VALID; 198118bd7718SNiklas Cassel } 198218bd7718SNiklas Cassel ata_eh_done(link, dev, ATA_EH_GET_SUCCESS_SENSE); 198318bd7718SNiklas Cassel } 198418bd7718SNiklas Cassel 19857eb49509SDamien Le Moal /** 19869b1e2658STejun Heo * ata_eh_link_autopsy - analyze error and determine recovery action 19879b1e2658STejun Heo * @link: host link to perform autopsy on 1988c6fd2807SJeff Garzik * 19890260731fSTejun Heo * Analyze why @link failed and determine which recovery actions 19900260731fSTejun Heo * are needed. This function also sets more detailed AC_ERR_* 19910260731fSTejun Heo * values and fills sense data for ATAPI CHECK SENSE. 1992c6fd2807SJeff Garzik * 1993c6fd2807SJeff Garzik * LOCKING: 1994c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1995c6fd2807SJeff Garzik */ 19969b1e2658STejun Heo static void ata_eh_link_autopsy(struct ata_link *link) 1997c6fd2807SJeff Garzik { 19980260731fSTejun Heo struct ata_port *ap = link->ap; 1999936fd732STejun Heo struct ata_eh_context *ehc = &link->eh_context; 2000258c4e5cSJens Axboe struct ata_queued_cmd *qc; 2001dfcc173dSTejun Heo struct ata_device *dev; 20023884f7b0STejun Heo unsigned int all_err_mask = 0, eflags = 0; 20037eb49509SDamien Le Moal int tag, nr_failed = 0, nr_quiet = 0; 2004c6fd2807SJeff Garzik u32 serror; 2005c6fd2807SJeff Garzik int rc; 2006c6fd2807SJeff Garzik 2007c6fd2807SJeff Garzik if (ehc->i.flags & ATA_EHI_NO_AUTOPSY) 2008c6fd2807SJeff Garzik return; 2009c6fd2807SJeff Garzik 2010c6fd2807SJeff Garzik /* obtain and analyze SError */ 2011936fd732STejun Heo rc = sata_scr_read(link, SCR_ERROR, &serror); 2012c6fd2807SJeff Garzik if (rc == 0) { 2013c6fd2807SJeff Garzik ehc->i.serror |= serror; 20140260731fSTejun Heo ata_eh_analyze_serror(link); 20154e57c517STejun Heo } else if (rc != -EOPNOTSUPP) { 2016cf480626STejun Heo /* SError read failed, force reset and probing */ 2017b558edddSTejun Heo ehc->i.probe_mask |= ATA_ALL_DEVICES; 2018cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 20194e57c517STejun Heo ehc->i.err_mask |= AC_ERR_OTHER; 20204e57c517STejun Heo } 2021c6fd2807SJeff Garzik 2022c6fd2807SJeff Garzik /* analyze NCQ failure */ 20230260731fSTejun Heo ata_eh_analyze_ncq_error(link); 2024c6fd2807SJeff Garzik 202518bd7718SNiklas Cassel /* 202618bd7718SNiklas Cassel * Check if this was a successful command that simply needs sense data. 202718bd7718SNiklas Cassel * Since the sense data is not part of the completion, we need to fetch 202818bd7718SNiklas Cassel * it using an additional command. Since this can't be done from irq 202918bd7718SNiklas Cassel * context, the sense data for successful commands are fetched by EH. 203018bd7718SNiklas Cassel */ 203118bd7718SNiklas Cassel ata_eh_get_success_sense(link); 203218bd7718SNiklas Cassel 2033c6fd2807SJeff Garzik /* any real error trumps AC_ERR_OTHER */ 2034c6fd2807SJeff Garzik if (ehc->i.err_mask & ~AC_ERR_OTHER) 2035c6fd2807SJeff Garzik ehc->i.err_mask &= ~AC_ERR_OTHER; 2036c6fd2807SJeff Garzik 2037c6fd2807SJeff Garzik all_err_mask |= ehc->i.err_mask; 2038c6fd2807SJeff Garzik 2039258c4e5cSJens Axboe ata_qc_for_each_raw(ap, qc, tag) { 204087629312SNiklas Cassel if (!(qc->flags & ATA_QCFLAG_EH) || 20413d8a3ae3SNiklas Cassel qc->flags & ATA_QCFLAG_RETRY || 204218bd7718SNiklas Cassel qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD || 2043b1c72916STejun Heo ata_dev_phys_link(qc->dev) != link) 2044c6fd2807SJeff Garzik continue; 2045c6fd2807SJeff Garzik 2046c6fd2807SJeff Garzik /* inherit upper level err_mask */ 2047c6fd2807SJeff Garzik qc->err_mask |= ehc->i.err_mask; 2048c6fd2807SJeff Garzik 2049c6fd2807SJeff Garzik /* analyze TF */ 2050e3b1fff6SNiklas Cassel ehc->i.action |= ata_eh_analyze_tf(qc); 2051c6fd2807SJeff Garzik 2052c6fd2807SJeff Garzik /* DEV errors are probably spurious in case of ATA_BUS error */ 2053c6fd2807SJeff Garzik if (qc->err_mask & AC_ERR_ATA_BUS) 2054c6fd2807SJeff Garzik qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA | 2055c6fd2807SJeff Garzik AC_ERR_INVALID); 2056c6fd2807SJeff Garzik 2057c6fd2807SJeff Garzik /* any real error trumps unknown error */ 2058c6fd2807SJeff Garzik if (qc->err_mask & ~AC_ERR_OTHER) 2059c6fd2807SJeff Garzik qc->err_mask &= ~AC_ERR_OTHER; 2060c6fd2807SJeff Garzik 2061804689adSDamien Le Moal /* 2062804689adSDamien Le Moal * SENSE_VALID trumps dev/unknown error and revalidation. Upper 2063804689adSDamien Le Moal * layers will determine whether the command is worth retrying 2064804689adSDamien Le Moal * based on the sense data and device class/type. Otherwise, 2065804689adSDamien Le Moal * determine directly if the command is worth retrying using its 2066804689adSDamien Le Moal * error mask and flags. 2067804689adSDamien Le Moal */ 2068f90f0828STejun Heo if (qc->flags & ATA_QCFLAG_SENSE_VALID) 2069c6fd2807SJeff Garzik qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); 2070804689adSDamien Le Moal else if (ata_eh_worth_retry(qc)) 207103faab78STejun Heo qc->flags |= ATA_QCFLAG_RETRY; 207203faab78STejun Heo 2073c6fd2807SJeff Garzik /* accumulate error info */ 2074c6fd2807SJeff Garzik ehc->i.dev = qc->dev; 2075c6fd2807SJeff Garzik all_err_mask |= qc->err_mask; 2076c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_IO) 20773884f7b0STejun Heo eflags |= ATA_EFLAG_IS_IO; 2078255c03d1SHannes Reinecke trace_ata_eh_link_autopsy_qc(qc); 20797eb49509SDamien Le Moal 20807eb49509SDamien Le Moal /* Count quiet errors */ 20817eb49509SDamien Le Moal if (ata_eh_quiet(qc)) 20827eb49509SDamien Le Moal nr_quiet++; 20837eb49509SDamien Le Moal nr_failed++; 2084c6fd2807SJeff Garzik } 2085c6fd2807SJeff Garzik 20867eb49509SDamien Le Moal /* If all failed commands requested silence, then be quiet */ 20877eb49509SDamien Le Moal if (nr_quiet == nr_failed) 20887eb49509SDamien Le Moal ehc->i.flags |= ATA_EHI_QUIET; 20897eb49509SDamien Le Moal 2090c6fd2807SJeff Garzik /* enforce default EH actions */ 20914cb7c6f1SNiklas Cassel if (ata_port_is_frozen(ap) || 2092c6fd2807SJeff Garzik all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT)) 2093cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 20943884f7b0STejun Heo else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) || 20953884f7b0STejun Heo (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV))) 2096c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_REVALIDATE; 2097c6fd2807SJeff Garzik 2098dfcc173dSTejun Heo /* If we have offending qcs and the associated failed device, 2099dfcc173dSTejun Heo * perform per-dev EH action only on the offending device. 2100dfcc173dSTejun Heo */ 2101c6fd2807SJeff Garzik if (ehc->i.dev) { 2102c6fd2807SJeff Garzik ehc->i.dev_action[ehc->i.dev->devno] |= 2103c6fd2807SJeff Garzik ehc->i.action & ATA_EH_PERDEV_MASK; 2104c6fd2807SJeff Garzik ehc->i.action &= ~ATA_EH_PERDEV_MASK; 2105c6fd2807SJeff Garzik } 2106c6fd2807SJeff Garzik 21072695e366STejun Heo /* propagate timeout to host link */ 21082695e366STejun Heo if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link)) 21092695e366STejun Heo ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT; 21102695e366STejun Heo 21112695e366STejun Heo /* record error and consider speeding down */ 2112dfcc173dSTejun Heo dev = ehc->i.dev; 21132695e366STejun Heo if (!dev && ((ata_link_max_devices(link) == 1 && 21142695e366STejun Heo ata_dev_enabled(link->device)))) 2115dfcc173dSTejun Heo dev = link->device; 2116dfcc173dSTejun Heo 211776326ac1STejun Heo if (dev) { 211876326ac1STejun Heo if (dev->flags & ATA_DFLAG_DUBIOUS_XFER) 211976326ac1STejun Heo eflags |= ATA_EFLAG_DUBIOUS_XFER; 21203884f7b0STejun Heo ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask); 2121255c03d1SHannes Reinecke trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask); 2122f1601113SRameshwar Prasad Sahu } 2123c6fd2807SJeff Garzik } 2124c6fd2807SJeff Garzik 2125c6fd2807SJeff Garzik /** 21269b1e2658STejun Heo * ata_eh_autopsy - analyze error and determine recovery action 21279b1e2658STejun Heo * @ap: host port to perform autopsy on 21289b1e2658STejun Heo * 21299b1e2658STejun Heo * Analyze all links of @ap and determine why they failed and 21309b1e2658STejun Heo * which recovery actions are needed. 21319b1e2658STejun Heo * 21329b1e2658STejun Heo * LOCKING: 21339b1e2658STejun Heo * Kernel thread context (may sleep). 21349b1e2658STejun Heo */ 2135fb7fd614STejun Heo void ata_eh_autopsy(struct ata_port *ap) 21369b1e2658STejun Heo { 21379b1e2658STejun Heo struct ata_link *link; 21389b1e2658STejun Heo 21391eca4365STejun Heo ata_for_each_link(link, ap, EDGE) 21409b1e2658STejun Heo ata_eh_link_autopsy(link); 21412695e366STejun Heo 2142b1c72916STejun Heo /* Handle the frigging slave link. Autopsy is done similarly 2143b1c72916STejun Heo * but actions and flags are transferred over to the master 2144b1c72916STejun Heo * link and handled from there. 2145b1c72916STejun Heo */ 2146b1c72916STejun Heo if (ap->slave_link) { 2147b1c72916STejun Heo struct ata_eh_context *mehc = &ap->link.eh_context; 2148b1c72916STejun Heo struct ata_eh_context *sehc = &ap->slave_link->eh_context; 2149b1c72916STejun Heo 2150848e4c68STejun Heo /* transfer control flags from master to slave */ 2151848e4c68STejun Heo sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK; 2152848e4c68STejun Heo 2153848e4c68STejun Heo /* perform autopsy on the slave link */ 2154b1c72916STejun Heo ata_eh_link_autopsy(ap->slave_link); 2155b1c72916STejun Heo 2156848e4c68STejun Heo /* transfer actions from slave to master and clear slave */ 2157b1c72916STejun Heo ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); 2158b1c72916STejun Heo mehc->i.action |= sehc->i.action; 2159b1c72916STejun Heo mehc->i.dev_action[1] |= sehc->i.dev_action[1]; 2160b1c72916STejun Heo mehc->i.flags |= sehc->i.flags; 2161b1c72916STejun Heo ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); 2162b1c72916STejun Heo } 2163b1c72916STejun Heo 21642695e366STejun Heo /* Autopsy of fanout ports can affect host link autopsy. 21652695e366STejun Heo * Perform host link autopsy last. 21662695e366STejun Heo */ 2167071f44b1STejun Heo if (sata_pmp_attached(ap)) 21682695e366STejun Heo ata_eh_link_autopsy(&ap->link); 21699b1e2658STejun Heo } 21709b1e2658STejun Heo 21719b1e2658STejun Heo /** 2172d4520903SHannes Reinecke * ata_get_cmd_name - get name for ATA command 2173d4520903SHannes Reinecke * @command: ATA command code to get name for 21746521148cSRobert Hancock * 2175d4520903SHannes Reinecke * Return a textual name of the given command or "unknown" 21766521148cSRobert Hancock * 21776521148cSRobert Hancock * LOCKING: 21786521148cSRobert Hancock * None 21796521148cSRobert Hancock */ 2180d4520903SHannes Reinecke const char *ata_get_cmd_name(u8 command) 21816521148cSRobert Hancock { 21826521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR 21836521148cSRobert Hancock static const struct 21846521148cSRobert Hancock { 21856521148cSRobert Hancock u8 command; 21866521148cSRobert Hancock const char *text; 21876521148cSRobert Hancock } cmd_descr[] = { 21886521148cSRobert Hancock { ATA_CMD_DEV_RESET, "DEVICE RESET" }, 21896521148cSRobert Hancock { ATA_CMD_CHK_POWER, "CHECK POWER MODE" }, 21906521148cSRobert Hancock { ATA_CMD_STANDBY, "STANDBY" }, 21916521148cSRobert Hancock { ATA_CMD_IDLE, "IDLE" }, 21926521148cSRobert Hancock { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" }, 21936521148cSRobert Hancock { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" }, 21943915c3b5SRobert Hancock { ATA_CMD_DOWNLOAD_MICRO_DMA, "DOWNLOAD MICROCODE DMA" }, 21956521148cSRobert Hancock { ATA_CMD_NOP, "NOP" }, 21966521148cSRobert Hancock { ATA_CMD_FLUSH, "FLUSH CACHE" }, 21976521148cSRobert Hancock { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" }, 21986521148cSRobert Hancock { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" }, 21996521148cSRobert Hancock { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" }, 22006521148cSRobert Hancock { ATA_CMD_SERVICE, "SERVICE" }, 22016521148cSRobert Hancock { ATA_CMD_READ, "READ DMA" }, 22026521148cSRobert Hancock { ATA_CMD_READ_EXT, "READ DMA EXT" }, 22036521148cSRobert Hancock { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" }, 22046521148cSRobert Hancock { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" }, 22056521148cSRobert Hancock { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" }, 22066521148cSRobert Hancock { ATA_CMD_WRITE, "WRITE DMA" }, 22076521148cSRobert Hancock { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" }, 22086521148cSRobert Hancock { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" }, 22096521148cSRobert Hancock { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" }, 22106521148cSRobert Hancock { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" }, 22116521148cSRobert Hancock { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" }, 22126521148cSRobert Hancock { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" }, 22136521148cSRobert Hancock { ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" }, 22146521148cSRobert Hancock { ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" }, 2215d3122bf9SDamien Le Moal { ATA_CMD_NCQ_NON_DATA, "NCQ NON-DATA" }, 22163915c3b5SRobert Hancock { ATA_CMD_FPDMA_SEND, "SEND FPDMA QUEUED" }, 22173915c3b5SRobert Hancock { ATA_CMD_FPDMA_RECV, "RECEIVE FPDMA QUEUED" }, 22186521148cSRobert Hancock { ATA_CMD_PIO_READ, "READ SECTOR(S)" }, 22196521148cSRobert Hancock { ATA_CMD_PIO_READ_EXT, "READ SECTOR(S) EXT" }, 22206521148cSRobert Hancock { ATA_CMD_PIO_WRITE, "WRITE SECTOR(S)" }, 22216521148cSRobert Hancock { ATA_CMD_PIO_WRITE_EXT, "WRITE SECTOR(S) EXT" }, 22226521148cSRobert Hancock { ATA_CMD_READ_MULTI, "READ MULTIPLE" }, 22236521148cSRobert Hancock { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" }, 22246521148cSRobert Hancock { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" }, 22256521148cSRobert Hancock { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" }, 22266521148cSRobert Hancock { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" }, 22276521148cSRobert Hancock { ATA_CMD_SET_FEATURES, "SET FEATURES" }, 22286521148cSRobert Hancock { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" }, 22296521148cSRobert Hancock { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" }, 22306521148cSRobert Hancock { ATA_CMD_VERIFY_EXT, "READ VERIFY SECTOR(S) EXT" }, 22316521148cSRobert Hancock { ATA_CMD_WRITE_UNCORR_EXT, "WRITE UNCORRECTABLE EXT" }, 22326521148cSRobert Hancock { ATA_CMD_STANDBYNOW1, "STANDBY IMMEDIATE" }, 22336521148cSRobert Hancock { ATA_CMD_IDLEIMMEDIATE, "IDLE IMMEDIATE" }, 22346521148cSRobert Hancock { ATA_CMD_SLEEP, "SLEEP" }, 22356521148cSRobert Hancock { ATA_CMD_INIT_DEV_PARAMS, "INITIALIZE DEVICE PARAMETERS" }, 22366521148cSRobert Hancock { ATA_CMD_READ_NATIVE_MAX, "READ NATIVE MAX ADDRESS" }, 22376521148cSRobert Hancock { ATA_CMD_READ_NATIVE_MAX_EXT, "READ NATIVE MAX ADDRESS EXT" }, 22386521148cSRobert Hancock { ATA_CMD_SET_MAX, "SET MAX ADDRESS" }, 22396521148cSRobert Hancock { ATA_CMD_SET_MAX_EXT, "SET MAX ADDRESS EXT" }, 22406521148cSRobert Hancock { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" }, 22416521148cSRobert Hancock { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" }, 22426521148cSRobert Hancock { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" }, 22436521148cSRobert Hancock { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" }, 22443915c3b5SRobert Hancock { ATA_CMD_TRUSTED_NONDATA, "TRUSTED NON-DATA" }, 22456521148cSRobert Hancock { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" }, 22466521148cSRobert Hancock { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" }, 22476521148cSRobert Hancock { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" }, 22486521148cSRobert Hancock { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" }, 22496521148cSRobert Hancock { ATA_CMD_PMP_READ, "READ BUFFER" }, 22503915c3b5SRobert Hancock { ATA_CMD_PMP_READ_DMA, "READ BUFFER DMA" }, 22516521148cSRobert Hancock { ATA_CMD_PMP_WRITE, "WRITE BUFFER" }, 22523915c3b5SRobert Hancock { ATA_CMD_PMP_WRITE_DMA, "WRITE BUFFER DMA" }, 22536521148cSRobert Hancock { ATA_CMD_CONF_OVERLAY, "DEVICE CONFIGURATION OVERLAY" }, 22546521148cSRobert Hancock { ATA_CMD_SEC_SET_PASS, "SECURITY SET PASSWORD" }, 22556521148cSRobert Hancock { ATA_CMD_SEC_UNLOCK, "SECURITY UNLOCK" }, 22566521148cSRobert Hancock { ATA_CMD_SEC_ERASE_PREP, "SECURITY ERASE PREPARE" }, 22576521148cSRobert Hancock { ATA_CMD_SEC_ERASE_UNIT, "SECURITY ERASE UNIT" }, 22586521148cSRobert Hancock { ATA_CMD_SEC_FREEZE_LOCK, "SECURITY FREEZE LOCK" }, 22596521148cSRobert Hancock { ATA_CMD_SEC_DISABLE_PASS, "SECURITY DISABLE PASSWORD" }, 22606521148cSRobert Hancock { ATA_CMD_CONFIG_STREAM, "CONFIGURE STREAM" }, 22616521148cSRobert Hancock { ATA_CMD_SMART, "SMART" }, 22626521148cSRobert Hancock { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" }, 22636521148cSRobert Hancock { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" }, 2264acad7627SFUJITA Tomonori { ATA_CMD_DSM, "DATA SET MANAGEMENT" }, 22656521148cSRobert Hancock { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" }, 22666521148cSRobert Hancock { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" }, 22676521148cSRobert Hancock { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" }, 22686521148cSRobert Hancock { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" }, 22696521148cSRobert Hancock { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" }, 22706521148cSRobert Hancock { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" }, 22713915c3b5SRobert Hancock { ATA_CMD_REQ_SENSE_DATA, "REQUEST SENSE DATA EXT" }, 22723915c3b5SRobert Hancock { ATA_CMD_SANITIZE_DEVICE, "SANITIZE DEVICE" }, 227328a3fc22SHannes Reinecke { ATA_CMD_ZAC_MGMT_IN, "ZAC MANAGEMENT IN" }, 227427708a95SHannes Reinecke { ATA_CMD_ZAC_MGMT_OUT, "ZAC MANAGEMENT OUT" }, 22756521148cSRobert Hancock { ATA_CMD_READ_LONG, "READ LONG (with retries)" }, 22766521148cSRobert Hancock { ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" }, 22776521148cSRobert Hancock { ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" }, 22786521148cSRobert Hancock { ATA_CMD_WRITE_LONG_ONCE, "WRITE LONG (without retries)" }, 22796521148cSRobert Hancock { ATA_CMD_RESTORE, "RECALIBRATE" }, 22806521148cSRobert Hancock { 0, NULL } /* terminate list */ 22816521148cSRobert Hancock }; 22826521148cSRobert Hancock 22836521148cSRobert Hancock unsigned int i; 22846521148cSRobert Hancock for (i = 0; cmd_descr[i].text; i++) 22856521148cSRobert Hancock if (cmd_descr[i].command == command) 22866521148cSRobert Hancock return cmd_descr[i].text; 22876521148cSRobert Hancock #endif 22886521148cSRobert Hancock 2289d4520903SHannes Reinecke return "unknown"; 22906521148cSRobert Hancock } 2291d4520903SHannes Reinecke EXPORT_SYMBOL_GPL(ata_get_cmd_name); 22926521148cSRobert Hancock 22936521148cSRobert Hancock /** 22949b1e2658STejun Heo * ata_eh_link_report - report error handling to user 22950260731fSTejun Heo * @link: ATA link EH is going on 2296c6fd2807SJeff Garzik * 2297c6fd2807SJeff Garzik * Report EH to user. 2298c6fd2807SJeff Garzik * 2299c6fd2807SJeff Garzik * LOCKING: 2300c6fd2807SJeff Garzik * None. 2301c6fd2807SJeff Garzik */ 23029b1e2658STejun Heo static void ata_eh_link_report(struct ata_link *link) 2303c6fd2807SJeff Garzik { 23040260731fSTejun Heo struct ata_port *ap = link->ap; 23050260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 2306258c4e5cSJens Axboe struct ata_queued_cmd *qc; 2307c6fd2807SJeff Garzik const char *frozen, *desc; 2308462098b0SLevente Kurusa char tries_buf[6] = ""; 2309c6fd2807SJeff Garzik int tag, nr_failed = 0; 2310c6fd2807SJeff Garzik 231194ff3d54STejun Heo if (ehc->i.flags & ATA_EHI_QUIET) 231294ff3d54STejun Heo return; 231394ff3d54STejun Heo 2314c6fd2807SJeff Garzik desc = NULL; 2315c6fd2807SJeff Garzik if (ehc->i.desc[0] != '\0') 2316c6fd2807SJeff Garzik desc = ehc->i.desc; 2317c6fd2807SJeff Garzik 2318258c4e5cSJens Axboe ata_qc_for_each_raw(ap, qc, tag) { 231987629312SNiklas Cassel if (!(qc->flags & ATA_QCFLAG_EH) || 2320b1c72916STejun Heo ata_dev_phys_link(qc->dev) != link || 2321e027bd36STejun Heo ((qc->flags & ATA_QCFLAG_QUIET) && 2322e027bd36STejun Heo qc->err_mask == AC_ERR_DEV)) 2323c6fd2807SJeff Garzik continue; 2324c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask) 2325c6fd2807SJeff Garzik continue; 2326c6fd2807SJeff Garzik 2327c6fd2807SJeff Garzik nr_failed++; 2328c6fd2807SJeff Garzik } 2329c6fd2807SJeff Garzik 2330c6fd2807SJeff Garzik if (!nr_failed && !ehc->i.err_mask) 2331c6fd2807SJeff Garzik return; 2332c6fd2807SJeff Garzik 2333c6fd2807SJeff Garzik frozen = ""; 23344cb7c6f1SNiklas Cassel if (ata_port_is_frozen(ap)) 2335c6fd2807SJeff Garzik frozen = " frozen"; 2336c6fd2807SJeff Garzik 2337a1e10f7eSTejun Heo if (ap->eh_tries < ATA_EH_MAX_TRIES) 2338462098b0SLevente Kurusa snprintf(tries_buf, sizeof(tries_buf), " t%d", 2339a1e10f7eSTejun Heo ap->eh_tries); 2340a1e10f7eSTejun Heo 2341c6fd2807SJeff Garzik if (ehc->i.dev) { 2342a9a79dfeSJoe Perches ata_dev_err(ehc->i.dev, "exception Emask 0x%x " 2343a1e10f7eSTejun Heo "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", 2344a1e10f7eSTejun Heo ehc->i.err_mask, link->sactive, ehc->i.serror, 2345a1e10f7eSTejun Heo ehc->i.action, frozen, tries_buf); 2346c6fd2807SJeff Garzik if (desc) 2347a9a79dfeSJoe Perches ata_dev_err(ehc->i.dev, "%s\n", desc); 2348c6fd2807SJeff Garzik } else { 2349a9a79dfeSJoe Perches ata_link_err(link, "exception Emask 0x%x " 2350a1e10f7eSTejun Heo "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", 2351a1e10f7eSTejun Heo ehc->i.err_mask, link->sactive, ehc->i.serror, 2352a1e10f7eSTejun Heo ehc->i.action, frozen, tries_buf); 2353c6fd2807SJeff Garzik if (desc) 2354a9a79dfeSJoe Perches ata_link_err(link, "%s\n", desc); 2355c6fd2807SJeff Garzik } 2356c6fd2807SJeff Garzik 23576521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR 23581333e194SRobert Hancock if (ehc->i.serror) 2359a9a79dfeSJoe Perches ata_link_err(link, 23601333e194SRobert Hancock "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n", 23611333e194SRobert Hancock ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "", 23621333e194SRobert Hancock ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "", 23631333e194SRobert Hancock ehc->i.serror & SERR_DATA ? "UnrecovData " : "", 23641333e194SRobert Hancock ehc->i.serror & SERR_PERSISTENT ? "Persist " : "", 23651333e194SRobert Hancock ehc->i.serror & SERR_PROTOCOL ? "Proto " : "", 23661333e194SRobert Hancock ehc->i.serror & SERR_INTERNAL ? "HostInt " : "", 23671333e194SRobert Hancock ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "", 23681333e194SRobert Hancock ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "", 23691333e194SRobert Hancock ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "", 23701333e194SRobert Hancock ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "", 23711333e194SRobert Hancock ehc->i.serror & SERR_DISPARITY ? "Dispar " : "", 23721333e194SRobert Hancock ehc->i.serror & SERR_CRC ? "BadCRC " : "", 23731333e194SRobert Hancock ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "", 23741333e194SRobert Hancock ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "", 23751333e194SRobert Hancock ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "", 23761333e194SRobert Hancock ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "", 23771333e194SRobert Hancock ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : ""); 23786521148cSRobert Hancock #endif 23791333e194SRobert Hancock 2380258c4e5cSJens Axboe ata_qc_for_each_raw(ap, qc, tag) { 23818a937581STejun Heo struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; 2382abb6a889STejun Heo char data_buf[20] = ""; 2383abb6a889STejun Heo char cdb_buf[70] = ""; 2384c6fd2807SJeff Garzik 238587629312SNiklas Cassel if (!(qc->flags & ATA_QCFLAG_EH) || 2386b1c72916STejun Heo ata_dev_phys_link(qc->dev) != link || !qc->err_mask) 2387c6fd2807SJeff Garzik continue; 2388c6fd2807SJeff Garzik 2389abb6a889STejun Heo if (qc->dma_dir != DMA_NONE) { 2390abb6a889STejun Heo static const char *dma_str[] = { 2391abb6a889STejun Heo [DMA_BIDIRECTIONAL] = "bidi", 2392abb6a889STejun Heo [DMA_TO_DEVICE] = "out", 2393abb6a889STejun Heo [DMA_FROM_DEVICE] = "in", 2394abb6a889STejun Heo }; 2395fb1b8b11SGeert Uytterhoeven const char *prot_str = NULL; 2396abb6a889STejun Heo 2397fb1b8b11SGeert Uytterhoeven switch (qc->tf.protocol) { 2398fb1b8b11SGeert Uytterhoeven case ATA_PROT_UNKNOWN: 2399fb1b8b11SGeert Uytterhoeven prot_str = "unknown"; 2400fb1b8b11SGeert Uytterhoeven break; 2401fb1b8b11SGeert Uytterhoeven case ATA_PROT_NODATA: 2402fb1b8b11SGeert Uytterhoeven prot_str = "nodata"; 2403fb1b8b11SGeert Uytterhoeven break; 2404fb1b8b11SGeert Uytterhoeven case ATA_PROT_PIO: 2405fb1b8b11SGeert Uytterhoeven prot_str = "pio"; 2406fb1b8b11SGeert Uytterhoeven break; 2407fb1b8b11SGeert Uytterhoeven case ATA_PROT_DMA: 2408fb1b8b11SGeert Uytterhoeven prot_str = "dma"; 2409fb1b8b11SGeert Uytterhoeven break; 2410fb1b8b11SGeert Uytterhoeven case ATA_PROT_NCQ: 2411fb1b8b11SGeert Uytterhoeven prot_str = "ncq dma"; 2412fb1b8b11SGeert Uytterhoeven break; 2413fb1b8b11SGeert Uytterhoeven case ATA_PROT_NCQ_NODATA: 2414fb1b8b11SGeert Uytterhoeven prot_str = "ncq nodata"; 2415fb1b8b11SGeert Uytterhoeven break; 2416fb1b8b11SGeert Uytterhoeven case ATAPI_PROT_NODATA: 2417fb1b8b11SGeert Uytterhoeven prot_str = "nodata"; 2418fb1b8b11SGeert Uytterhoeven break; 2419fb1b8b11SGeert Uytterhoeven case ATAPI_PROT_PIO: 2420fb1b8b11SGeert Uytterhoeven prot_str = "pio"; 2421fb1b8b11SGeert Uytterhoeven break; 2422fb1b8b11SGeert Uytterhoeven case ATAPI_PROT_DMA: 2423fb1b8b11SGeert Uytterhoeven prot_str = "dma"; 2424fb1b8b11SGeert Uytterhoeven break; 2425fb1b8b11SGeert Uytterhoeven } 2426abb6a889STejun Heo snprintf(data_buf, sizeof(data_buf), " %s %u %s", 2427fb1b8b11SGeert Uytterhoeven prot_str, qc->nbytes, dma_str[qc->dma_dir]); 2428abb6a889STejun Heo } 2429abb6a889STejun Heo 24306521148cSRobert Hancock if (ata_is_atapi(qc->tf.protocol)) { 2431a13b0c9dSHannes Reinecke const u8 *cdb = qc->cdb; 2432a13b0c9dSHannes Reinecke size_t cdb_len = qc->dev->cdb_len; 2433a13b0c9dSHannes Reinecke 2434cbba5b0eSHannes Reinecke if (qc->scsicmd) { 2435cbba5b0eSHannes Reinecke cdb = qc->scsicmd->cmnd; 2436cbba5b0eSHannes Reinecke cdb_len = qc->scsicmd->cmd_len; 2437cbba5b0eSHannes Reinecke } 2438cbba5b0eSHannes Reinecke __scsi_format_command(cdb_buf, sizeof(cdb_buf), 2439cbba5b0eSHannes Reinecke cdb, cdb_len); 2440d4520903SHannes Reinecke } else 2441a9a79dfeSJoe Perches ata_dev_err(qc->dev, "failed command: %s\n", 2442d4520903SHannes Reinecke ata_get_cmd_name(cmd->command)); 2443abb6a889STejun Heo 2444a9a79dfeSJoe Perches ata_dev_err(qc->dev, 24458a937581STejun Heo "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " 2446abb6a889STejun Heo "tag %d%s\n %s" 24478a937581STejun Heo "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " 24485335b729STejun Heo "Emask 0x%x (%s)%s\n", 24498a937581STejun Heo cmd->command, cmd->feature, cmd->nsect, 24508a937581STejun Heo cmd->lbal, cmd->lbam, cmd->lbah, 24518a937581STejun Heo cmd->hob_feature, cmd->hob_nsect, 24528a937581STejun Heo cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah, 2453abb6a889STejun Heo cmd->device, qc->tag, data_buf, cdb_buf, 2454efcef265SSergey Shtylyov res->status, res->error, res->nsect, 24558a937581STejun Heo res->lbal, res->lbam, res->lbah, 24568a937581STejun Heo res->hob_feature, res->hob_nsect, 24578a937581STejun Heo res->hob_lbal, res->hob_lbam, res->hob_lbah, 24585335b729STejun Heo res->device, qc->err_mask, ata_err_string(qc->err_mask), 24595335b729STejun Heo qc->err_mask & AC_ERR_NCQ ? " <F>" : ""); 24601333e194SRobert Hancock 24616521148cSRobert Hancock #ifdef CONFIG_ATA_VERBOSE_ERROR 2462efcef265SSergey Shtylyov if (res->status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | 2463e87fd28cSHannes Reinecke ATA_SENSE | ATA_ERR)) { 2464efcef265SSergey Shtylyov if (res->status & ATA_BUSY) 2465a9a79dfeSJoe Perches ata_dev_err(qc->dev, "status: { Busy }\n"); 24661333e194SRobert Hancock else 2467e87fd28cSHannes Reinecke ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n", 2468efcef265SSergey Shtylyov res->status & ATA_DRDY ? "DRDY " : "", 2469efcef265SSergey Shtylyov res->status & ATA_DF ? "DF " : "", 2470efcef265SSergey Shtylyov res->status & ATA_DRQ ? "DRQ " : "", 2471efcef265SSergey Shtylyov res->status & ATA_SENSE ? "SENSE " : "", 2472efcef265SSergey Shtylyov res->status & ATA_ERR ? "ERR " : ""); 24731333e194SRobert Hancock } 24741333e194SRobert Hancock 24751333e194SRobert Hancock if (cmd->command != ATA_CMD_PACKET && 2476efcef265SSergey Shtylyov (res->error & (ATA_ICRC | ATA_UNC | ATA_AMNF | ATA_IDNF | 2477efcef265SSergey Shtylyov ATA_ABORTED))) 2478eec7e1c1SAlexey Asemov ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n", 2479efcef265SSergey Shtylyov res->error & ATA_ICRC ? "ICRC " : "", 2480efcef265SSergey Shtylyov res->error & ATA_UNC ? "UNC " : "", 2481efcef265SSergey Shtylyov res->error & ATA_AMNF ? "AMNF " : "", 2482efcef265SSergey Shtylyov res->error & ATA_IDNF ? "IDNF " : "", 2483efcef265SSergey Shtylyov res->error & ATA_ABORTED ? "ABRT " : ""); 24846521148cSRobert Hancock #endif 2485c6fd2807SJeff Garzik } 2486c6fd2807SJeff Garzik } 2487c6fd2807SJeff Garzik 24889b1e2658STejun Heo /** 24899b1e2658STejun Heo * ata_eh_report - report error handling to user 24909b1e2658STejun Heo * @ap: ATA port to report EH about 24919b1e2658STejun Heo * 24929b1e2658STejun Heo * Report EH to user. 24939b1e2658STejun Heo * 24949b1e2658STejun Heo * LOCKING: 24959b1e2658STejun Heo * None. 24969b1e2658STejun Heo */ 2497fb7fd614STejun Heo void ata_eh_report(struct ata_port *ap) 24989b1e2658STejun Heo { 24999b1e2658STejun Heo struct ata_link *link; 25009b1e2658STejun Heo 25011eca4365STejun Heo ata_for_each_link(link, ap, HOST_FIRST) 25029b1e2658STejun Heo ata_eh_link_report(link); 25039b1e2658STejun Heo } 25049b1e2658STejun Heo 2505cc0680a5STejun Heo static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset, 2506b1c72916STejun Heo unsigned int *classes, unsigned long deadline, 2507b1c72916STejun Heo bool clear_classes) 2508c6fd2807SJeff Garzik { 2509f58229f8STejun Heo struct ata_device *dev; 2510c6fd2807SJeff Garzik 2511b1c72916STejun Heo if (clear_classes) 25121eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 2513f58229f8STejun Heo classes[dev->devno] = ATA_DEV_UNKNOWN; 2514c6fd2807SJeff Garzik 2515f046519fSTejun Heo return reset(link, classes, deadline); 2516c6fd2807SJeff Garzik } 2517c6fd2807SJeff Garzik 2518e8411fbaSSergei Shtylyov static int ata_eh_followup_srst_needed(struct ata_link *link, int rc) 2519c6fd2807SJeff Garzik { 252045db2f6cSTejun Heo if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link)) 2521ae791c05STejun Heo return 0; 25225dbfc9cbSTejun Heo if (rc == -EAGAIN) 2523c6fd2807SJeff Garzik return 1; 2524071f44b1STejun Heo if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) 25253495de73STejun Heo return 1; 2526c6fd2807SJeff Garzik return 0; 2527c6fd2807SJeff Garzik } 2528c6fd2807SJeff Garzik 2529fb7fd614STejun Heo int ata_eh_reset(struct ata_link *link, int classify, 2530c6fd2807SJeff Garzik ata_prereset_fn_t prereset, ata_reset_fn_t softreset, 2531c6fd2807SJeff Garzik ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) 2532c6fd2807SJeff Garzik { 2533afaa5c37STejun Heo struct ata_port *ap = link->ap; 2534b1c72916STejun Heo struct ata_link *slave = ap->slave_link; 2535936fd732STejun Heo struct ata_eh_context *ehc = &link->eh_context; 2536705d2014SBartlomiej Zolnierkiewicz struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL; 2537c6fd2807SJeff Garzik unsigned int *classes = ehc->classes; 2538416dc9edSTejun Heo unsigned int lflags = link->flags; 2539c6fd2807SJeff Garzik int verbose = !(ehc->i.flags & ATA_EHI_QUIET); 2540d8af0eb6STejun Heo int max_tries = 0, try = 0; 2541b1c72916STejun Heo struct ata_link *failed_link; 2542f58229f8STejun Heo struct ata_device *dev; 2543416dc9edSTejun Heo unsigned long deadline, now; 2544c6fd2807SJeff Garzik ata_reset_fn_t reset; 2545afaa5c37STejun Heo unsigned long flags; 2546416dc9edSTejun Heo u32 sstatus; 2547b1c72916STejun Heo int nr_unknown, rc; 2548c6fd2807SJeff Garzik 2549932648b0STejun Heo /* 2550932648b0STejun Heo * Prepare to reset 2551932648b0STejun Heo */ 2552ca02f225SSergey Shtylyov while (ata_eh_reset_timeouts[max_tries] != UINT_MAX) 2553d8af0eb6STejun Heo max_tries++; 2554ca6d43b0SDan Williams if (link->flags & ATA_LFLAG_RST_ONCE) 2555ca6d43b0SDan Williams max_tries = 1; 255605944bdfSTejun Heo if (link->flags & ATA_LFLAG_NO_HRST) 255705944bdfSTejun Heo hardreset = NULL; 255805944bdfSTejun Heo if (link->flags & ATA_LFLAG_NO_SRST) 255905944bdfSTejun Heo softreset = NULL; 2560d8af0eb6STejun Heo 256125985edcSLucas De Marchi /* make sure each reset attempt is at least COOL_DOWN apart */ 256219b72321STejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) { 25630a2c0f56STejun Heo now = jiffies; 256419b72321STejun Heo WARN_ON(time_after(ehc->last_reset, now)); 256519b72321STejun Heo deadline = ata_deadline(ehc->last_reset, 256619b72321STejun Heo ATA_EH_RESET_COOL_DOWN); 25670a2c0f56STejun Heo if (time_before(now, deadline)) 25680a2c0f56STejun Heo schedule_timeout_uninterruptible(deadline - now); 256919b72321STejun Heo } 25700a2c0f56STejun Heo 2571afaa5c37STejun Heo spin_lock_irqsave(ap->lock, flags); 2572afaa5c37STejun Heo ap->pflags |= ATA_PFLAG_RESETTING; 2573afaa5c37STejun Heo spin_unlock_irqrestore(ap->lock, flags); 2574afaa5c37STejun Heo 2575cf480626STejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2576c6fd2807SJeff Garzik 25771eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 2578cdeab114STejun Heo /* If we issue an SRST then an ATA drive (not ATAPI) 2579cdeab114STejun Heo * may change configuration and be in PIO0 timing. If 2580cdeab114STejun Heo * we do a hard reset (or are coming from power on) 2581cdeab114STejun Heo * this is true for ATA or ATAPI. Until we've set a 2582cdeab114STejun Heo * suitable controller mode we should not touch the 2583cdeab114STejun Heo * bus as we may be talking too fast. 2584cdeab114STejun Heo */ 2585cdeab114STejun Heo dev->pio_mode = XFER_PIO_0; 25865416912aSAaron Lu dev->dma_mode = 0xff; 2587cdeab114STejun Heo 2588cdeab114STejun Heo /* If the controller has a pio mode setup function 2589cdeab114STejun Heo * then use it to set the chipset to rights. Don't 2590cdeab114STejun Heo * touch the DMA setup as that will be dealt with when 2591cdeab114STejun Heo * configuring devices. 2592cdeab114STejun Heo */ 2593cdeab114STejun Heo if (ap->ops->set_piomode) 2594cdeab114STejun Heo ap->ops->set_piomode(ap, dev); 2595cdeab114STejun Heo } 2596cdeab114STejun Heo 2597cf480626STejun Heo /* prefer hardreset */ 2598932648b0STejun Heo reset = NULL; 2599cf480626STejun Heo ehc->i.action &= ~ATA_EH_RESET; 2600cf480626STejun Heo if (hardreset) { 2601cf480626STejun Heo reset = hardreset; 2602a674050eSTejun Heo ehc->i.action |= ATA_EH_HARDRESET; 26034f7faa3fSTejun Heo } else if (softreset) { 2604cf480626STejun Heo reset = softreset; 2605a674050eSTejun Heo ehc->i.action |= ATA_EH_SOFTRESET; 2606cf480626STejun Heo } 2607c6fd2807SJeff Garzik 2608c6fd2807SJeff Garzik if (prereset) { 2609b1c72916STejun Heo unsigned long deadline = ata_deadline(jiffies, 2610b1c72916STejun Heo ATA_EH_PRERESET_TIMEOUT); 2611b1c72916STejun Heo 2612b1c72916STejun Heo if (slave) { 2613b1c72916STejun Heo sehc->i.action &= ~ATA_EH_RESET; 2614b1c72916STejun Heo sehc->i.action |= ehc->i.action; 2615b1c72916STejun Heo } 2616b1c72916STejun Heo 2617b1c72916STejun Heo rc = prereset(link, deadline); 2618b1c72916STejun Heo 2619b1c72916STejun Heo /* If present, do prereset on slave link too. Reset 2620b1c72916STejun Heo * is skipped iff both master and slave links report 2621b1c72916STejun Heo * -ENOENT or clear ATA_EH_RESET. 2622b1c72916STejun Heo */ 2623b1c72916STejun Heo if (slave && (rc == 0 || rc == -ENOENT)) { 2624b1c72916STejun Heo int tmp; 2625b1c72916STejun Heo 2626b1c72916STejun Heo tmp = prereset(slave, deadline); 2627b1c72916STejun Heo if (tmp != -ENOENT) 2628b1c72916STejun Heo rc = tmp; 2629b1c72916STejun Heo 2630b1c72916STejun Heo ehc->i.action |= sehc->i.action; 2631b1c72916STejun Heo } 2632b1c72916STejun Heo 2633c6fd2807SJeff Garzik if (rc) { 2634c961922bSAlan Cox if (rc == -ENOENT) { 2635a9a79dfeSJoe Perches ata_link_dbg(link, "port disabled--ignoring\n"); 2636cf480626STejun Heo ehc->i.action &= ~ATA_EH_RESET; 26374aa9ab67STejun Heo 26381eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 2639f58229f8STejun Heo classes[dev->devno] = ATA_DEV_NONE; 26404aa9ab67STejun Heo 26414aa9ab67STejun Heo rc = 0; 2642c961922bSAlan Cox } else 2643a9a79dfeSJoe Perches ata_link_err(link, 2644a9a79dfeSJoe Perches "prereset failed (errno=%d)\n", 2645a9a79dfeSJoe Perches rc); 2646fccb6ea5STejun Heo goto out; 2647c6fd2807SJeff Garzik } 2648c6fd2807SJeff Garzik 2649932648b0STejun Heo /* prereset() might have cleared ATA_EH_RESET. If so, 2650d6515e6fSTejun Heo * bang classes, thaw and return. 2651932648b0STejun Heo */ 2652932648b0STejun Heo if (reset && !(ehc->i.action & ATA_EH_RESET)) { 26531eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 2654f58229f8STejun Heo classes[dev->devno] = ATA_DEV_NONE; 26554cb7c6f1SNiklas Cassel if (ata_port_is_frozen(ap) && ata_is_host_link(link)) 2656d6515e6fSTejun Heo ata_eh_thaw_port(ap); 2657fccb6ea5STejun Heo rc = 0; 2658fccb6ea5STejun Heo goto out; 2659c6fd2807SJeff Garzik } 2660932648b0STejun Heo } 2661c6fd2807SJeff Garzik 2662c6fd2807SJeff Garzik retry: 2663932648b0STejun Heo /* 2664932648b0STejun Heo * Perform reset 2665932648b0STejun Heo */ 2666dc98c32cSTejun Heo if (ata_is_host_link(link)) 2667dc98c32cSTejun Heo ata_eh_freeze_port(ap); 2668dc98c32cSTejun Heo 2669341c2c95STejun Heo deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]); 267031daabdaSTejun Heo 2671932648b0STejun Heo if (reset) { 2672c6fd2807SJeff Garzik if (verbose) 2673a9a79dfeSJoe Perches ata_link_info(link, "%s resetting link\n", 2674c6fd2807SJeff Garzik reset == softreset ? "soft" : "hard"); 2675c6fd2807SJeff Garzik 2676c6fd2807SJeff Garzik /* mark that this EH session started with reset */ 267719b72321STejun Heo ehc->last_reset = jiffies; 2678f8ec26d0SHannes Reinecke if (reset == hardreset) { 26790d64a233STejun Heo ehc->i.flags |= ATA_EHI_DID_HARDRESET; 2680f8ec26d0SHannes Reinecke trace_ata_link_hardreset_begin(link, classes, deadline); 2681f8ec26d0SHannes Reinecke } else { 26820d64a233STejun Heo ehc->i.flags |= ATA_EHI_DID_SOFTRESET; 2683f8ec26d0SHannes Reinecke trace_ata_link_softreset_begin(link, classes, deadline); 2684f8ec26d0SHannes Reinecke } 2685c6fd2807SJeff Garzik 2686b1c72916STejun Heo rc = ata_do_reset(link, reset, classes, deadline, true); 2687f8ec26d0SHannes Reinecke if (reset == hardreset) 2688f8ec26d0SHannes Reinecke trace_ata_link_hardreset_end(link, classes, rc); 2689f8ec26d0SHannes Reinecke else 2690f8ec26d0SHannes Reinecke trace_ata_link_softreset_end(link, classes, rc); 2691b1c72916STejun Heo if (rc && rc != -EAGAIN) { 2692b1c72916STejun Heo failed_link = link; 26935dbfc9cbSTejun Heo goto fail; 2694b1c72916STejun Heo } 2695c6fd2807SJeff Garzik 2696b1c72916STejun Heo /* hardreset slave link if existent */ 2697b1c72916STejun Heo if (slave && reset == hardreset) { 2698b1c72916STejun Heo int tmp; 2699b1c72916STejun Heo 2700b1c72916STejun Heo if (verbose) 2701a9a79dfeSJoe Perches ata_link_info(slave, "hard resetting link\n"); 2702b1c72916STejun Heo 2703b1c72916STejun Heo ata_eh_about_to_do(slave, NULL, ATA_EH_RESET); 2704f8ec26d0SHannes Reinecke trace_ata_slave_hardreset_begin(slave, classes, 2705f8ec26d0SHannes Reinecke deadline); 2706b1c72916STejun Heo tmp = ata_do_reset(slave, reset, classes, deadline, 2707b1c72916STejun Heo false); 2708f8ec26d0SHannes Reinecke trace_ata_slave_hardreset_end(slave, classes, tmp); 2709b1c72916STejun Heo switch (tmp) { 2710b1c72916STejun Heo case -EAGAIN: 2711b1c72916STejun Heo rc = -EAGAIN; 2712e06abcc6SGustavo A. R. Silva break; 2713b1c72916STejun Heo case 0: 2714b1c72916STejun Heo break; 2715b1c72916STejun Heo default: 2716b1c72916STejun Heo failed_link = slave; 2717b1c72916STejun Heo rc = tmp; 2718b1c72916STejun Heo goto fail; 2719b1c72916STejun Heo } 2720b1c72916STejun Heo } 2721b1c72916STejun Heo 2722b1c72916STejun Heo /* perform follow-up SRST if necessary */ 2723c6fd2807SJeff Garzik if (reset == hardreset && 2724e8411fbaSSergei Shtylyov ata_eh_followup_srst_needed(link, rc)) { 2725c6fd2807SJeff Garzik reset = softreset; 2726c6fd2807SJeff Garzik 2727c6fd2807SJeff Garzik if (!reset) { 2728a9a79dfeSJoe Perches ata_link_err(link, 2729a9a79dfeSJoe Perches "follow-up softreset required but no softreset available\n"); 2730b1c72916STejun Heo failed_link = link; 2731fccb6ea5STejun Heo rc = -EINVAL; 273208cf69d0STejun Heo goto fail; 2733c6fd2807SJeff Garzik } 2734c6fd2807SJeff Garzik 2735cf480626STejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2736f8ec26d0SHannes Reinecke trace_ata_link_softreset_begin(link, classes, deadline); 2737b1c72916STejun Heo rc = ata_do_reset(link, reset, classes, deadline, true); 2738f8ec26d0SHannes Reinecke trace_ata_link_softreset_end(link, classes, rc); 2739fe2c4d01STejun Heo if (rc) { 2740fe2c4d01STejun Heo failed_link = link; 2741fe2c4d01STejun Heo goto fail; 2742fe2c4d01STejun Heo } 2743c6fd2807SJeff Garzik } 2744932648b0STejun Heo } else { 2745932648b0STejun Heo if (verbose) 2746a9a79dfeSJoe Perches ata_link_info(link, 2747a9a79dfeSJoe Perches "no reset method available, skipping reset\n"); 2748932648b0STejun Heo if (!(lflags & ATA_LFLAG_ASSUME_CLASS)) 2749932648b0STejun Heo lflags |= ATA_LFLAG_ASSUME_ATA; 2750932648b0STejun Heo } 2751008a7896STejun Heo 2752932648b0STejun Heo /* 2753932648b0STejun Heo * Post-reset processing 2754932648b0STejun Heo */ 27551eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 2756416dc9edSTejun Heo /* After the reset, the device state is PIO 0 and the 2757416dc9edSTejun Heo * controller state is undefined. Reset also wakes up 2758416dc9edSTejun Heo * drives from sleeping mode. 2759c6fd2807SJeff Garzik */ 2760f58229f8STejun Heo dev->pio_mode = XFER_PIO_0; 2761054a5fbaSTejun Heo dev->flags &= ~ATA_DFLAG_SLEEPING; 2762c6fd2807SJeff Garzik 27633b761d3dSTejun Heo if (ata_phys_link_offline(ata_dev_phys_link(dev))) 27643b761d3dSTejun Heo continue; 27653b761d3dSTejun Heo 27664ccd3329STejun Heo /* apply class override */ 2767416dc9edSTejun Heo if (lflags & ATA_LFLAG_ASSUME_ATA) 2768ae791c05STejun Heo classes[dev->devno] = ATA_DEV_ATA; 2769416dc9edSTejun Heo else if (lflags & ATA_LFLAG_ASSUME_SEMB) 2770816ab897STejun Heo classes[dev->devno] = ATA_DEV_SEMB_UNSUP; 2771ae791c05STejun Heo } 2772ae791c05STejun Heo 2773008a7896STejun Heo /* record current link speed */ 2774936fd732STejun Heo if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0) 2775936fd732STejun Heo link->sata_spd = (sstatus >> 4) & 0xf; 2776b1c72916STejun Heo if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0) 2777b1c72916STejun Heo slave->sata_spd = (sstatus >> 4) & 0xf; 2778008a7896STejun Heo 2779dc98c32cSTejun Heo /* thaw the port */ 2780dc98c32cSTejun Heo if (ata_is_host_link(link)) 2781dc98c32cSTejun Heo ata_eh_thaw_port(ap); 2782dc98c32cSTejun Heo 2783f046519fSTejun Heo /* postreset() should clear hardware SError. Although SError 2784f046519fSTejun Heo * is cleared during link resume, clearing SError here is 2785f046519fSTejun Heo * necessary as some PHYs raise hotplug events after SRST. 2786f046519fSTejun Heo * This introduces race condition where hotplug occurs between 2787f046519fSTejun Heo * reset and here. This race is mediated by cross checking 2788f046519fSTejun Heo * link onlineness and classification result later. 2789f046519fSTejun Heo */ 2790b1c72916STejun Heo if (postreset) { 2791cc0680a5STejun Heo postreset(link, classes); 2792f8ec26d0SHannes Reinecke trace_ata_link_postreset(link, classes, rc); 2793f8ec26d0SHannes Reinecke if (slave) { 2794b1c72916STejun Heo postreset(slave, classes); 2795f8ec26d0SHannes Reinecke trace_ata_slave_postreset(slave, classes, rc); 2796f8ec26d0SHannes Reinecke } 2797b1c72916STejun Heo } 2798c6fd2807SJeff Garzik 27991e641060STejun Heo /* 28008c56caccSTejun Heo * Some controllers can't be frozen very well and may set spurious 28018c56caccSTejun Heo * error conditions during reset. Clear accumulated error 28028c56caccSTejun Heo * information and re-thaw the port if frozen. As reset is the 28038c56caccSTejun Heo * final recovery action and we cross check link onlineness against 28048c56caccSTejun Heo * device classification later, no hotplug event is lost by this. 28051e641060STejun Heo */ 2806f046519fSTejun Heo spin_lock_irqsave(link->ap->lock, flags); 28071e641060STejun Heo memset(&link->eh_info, 0, sizeof(link->eh_info)); 2808b1c72916STejun Heo if (slave) 28091e641060STejun Heo memset(&slave->eh_info, 0, sizeof(link->eh_info)); 28101e641060STejun Heo ap->pflags &= ~ATA_PFLAG_EH_PENDING; 2811f046519fSTejun Heo spin_unlock_irqrestore(link->ap->lock, flags); 2812f046519fSTejun Heo 28134cb7c6f1SNiklas Cassel if (ata_port_is_frozen(ap)) 28148c56caccSTejun Heo ata_eh_thaw_port(ap); 28158c56caccSTejun Heo 28163b761d3dSTejun Heo /* 28173b761d3dSTejun Heo * Make sure onlineness and classification result correspond. 2818f046519fSTejun Heo * Hotplug could have happened during reset and some 2819f046519fSTejun Heo * controllers fail to wait while a drive is spinning up after 2820f046519fSTejun Heo * being hotplugged causing misdetection. By cross checking 28213b761d3dSTejun Heo * link on/offlineness and classification result, those 28223b761d3dSTejun Heo * conditions can be reliably detected and retried. 2823f046519fSTejun Heo */ 2824b1c72916STejun Heo nr_unknown = 0; 28251eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 28263b761d3dSTejun Heo if (ata_phys_link_online(ata_dev_phys_link(dev))) { 2827b1c72916STejun Heo if (classes[dev->devno] == ATA_DEV_UNKNOWN) { 2828a9a79dfeSJoe Perches ata_dev_dbg(dev, "link online but device misclassified\n"); 2829f046519fSTejun Heo classes[dev->devno] = ATA_DEV_NONE; 2830b1c72916STejun Heo nr_unknown++; 2831b1c72916STejun Heo } 28323b761d3dSTejun Heo } else if (ata_phys_link_offline(ata_dev_phys_link(dev))) { 28333b761d3dSTejun Heo if (ata_class_enabled(classes[dev->devno])) 2834a9a79dfeSJoe Perches ata_dev_dbg(dev, 2835a9a79dfeSJoe Perches "link offline, clearing class %d to NONE\n", 28363b761d3dSTejun Heo classes[dev->devno]); 28373b761d3dSTejun Heo classes[dev->devno] = ATA_DEV_NONE; 28383b761d3dSTejun Heo } else if (classes[dev->devno] == ATA_DEV_UNKNOWN) { 2839a9a79dfeSJoe Perches ata_dev_dbg(dev, 2840a9a79dfeSJoe Perches "link status unknown, clearing UNKNOWN to NONE\n"); 28413b761d3dSTejun Heo classes[dev->devno] = ATA_DEV_NONE; 28423b761d3dSTejun Heo } 2843f046519fSTejun Heo } 2844f046519fSTejun Heo 2845b1c72916STejun Heo if (classify && nr_unknown) { 2846f046519fSTejun Heo if (try < max_tries) { 2847a9a79dfeSJoe Perches ata_link_warn(link, 2848a9a79dfeSJoe Perches "link online but %d devices misclassified, retrying\n", 28493b761d3dSTejun Heo nr_unknown); 2850b1c72916STejun Heo failed_link = link; 2851f046519fSTejun Heo rc = -EAGAIN; 2852f046519fSTejun Heo goto fail; 2853f046519fSTejun Heo } 2854a9a79dfeSJoe Perches ata_link_warn(link, 28553b761d3dSTejun Heo "link online but %d devices misclassified, " 28563b761d3dSTejun Heo "device detection might fail\n", nr_unknown); 2857f046519fSTejun Heo } 2858f046519fSTejun Heo 2859c6fd2807SJeff Garzik /* reset successful, schedule revalidation */ 2860cf480626STejun Heo ata_eh_done(link, NULL, ATA_EH_RESET); 2861b1c72916STejun Heo if (slave) 2862b1c72916STejun Heo ata_eh_done(slave, NULL, ATA_EH_RESET); 286319b72321STejun Heo ehc->last_reset = jiffies; /* update to completion time */ 2864c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_REVALIDATE; 28656b7ae954STejun Heo link->lpm_policy = ATA_LPM_UNKNOWN; /* reset LPM state */ 2866416dc9edSTejun Heo 2867416dc9edSTejun Heo rc = 0; 2868fccb6ea5STejun Heo out: 2869fccb6ea5STejun Heo /* clear hotplug flag */ 2870fccb6ea5STejun Heo ehc->i.flags &= ~ATA_EHI_HOTPLUGGED; 2871b1c72916STejun Heo if (slave) 2872b1c72916STejun Heo sehc->i.flags &= ~ATA_EHI_HOTPLUGGED; 2873afaa5c37STejun Heo 2874afaa5c37STejun Heo spin_lock_irqsave(ap->lock, flags); 2875afaa5c37STejun Heo ap->pflags &= ~ATA_PFLAG_RESETTING; 2876afaa5c37STejun Heo spin_unlock_irqrestore(ap->lock, flags); 2877afaa5c37STejun Heo 2878c6fd2807SJeff Garzik return rc; 2879416dc9edSTejun Heo 2880416dc9edSTejun Heo fail: 28815958e302STejun Heo /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */ 28825958e302STejun Heo if (!ata_is_host_link(link) && 28835958e302STejun Heo sata_scr_read(link, SCR_STATUS, &sstatus)) 28845958e302STejun Heo rc = -ERESTART; 28855958e302STejun Heo 28867a46c078SGwendal Grignou if (try >= max_tries) { 28878ea7645cSTejun Heo /* 28888ea7645cSTejun Heo * Thaw host port even if reset failed, so that the port 28898ea7645cSTejun Heo * can be retried on the next phy event. This risks 28908ea7645cSTejun Heo * repeated EH runs but seems to be a better tradeoff than 28918ea7645cSTejun Heo * shutting down a port after a botched hotplug attempt. 28928ea7645cSTejun Heo */ 28938ea7645cSTejun Heo if (ata_is_host_link(link)) 28948ea7645cSTejun Heo ata_eh_thaw_port(ap); 2895416dc9edSTejun Heo goto out; 28968ea7645cSTejun Heo } 2897416dc9edSTejun Heo 2898416dc9edSTejun Heo now = jiffies; 2899416dc9edSTejun Heo if (time_before(now, deadline)) { 2900416dc9edSTejun Heo unsigned long delta = deadline - now; 2901416dc9edSTejun Heo 2902a9a79dfeSJoe Perches ata_link_warn(failed_link, 29030a2c0f56STejun Heo "reset failed (errno=%d), retrying in %u secs\n", 29040a2c0f56STejun Heo rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000)); 2905416dc9edSTejun Heo 2906c0c362b6STejun Heo ata_eh_release(ap); 2907416dc9edSTejun Heo while (delta) 2908416dc9edSTejun Heo delta = schedule_timeout_uninterruptible(delta); 2909c0c362b6STejun Heo ata_eh_acquire(ap); 2910416dc9edSTejun Heo } 2911416dc9edSTejun Heo 29127a46c078SGwendal Grignou /* 29137a46c078SGwendal Grignou * While disks spinup behind PMP, some controllers fail sending SRST. 29147a46c078SGwendal Grignou * They need to be reset - as well as the PMP - before retrying. 29157a46c078SGwendal Grignou */ 29167a46c078SGwendal Grignou if (rc == -ERESTART) { 29177a46c078SGwendal Grignou if (ata_is_host_link(link)) 29187a46c078SGwendal Grignou ata_eh_thaw_port(ap); 29197a46c078SGwendal Grignou goto out; 29207a46c078SGwendal Grignou } 29217a46c078SGwendal Grignou 2922b1c72916STejun Heo if (try == max_tries - 1) { 2923a07d499bSTejun Heo sata_down_spd_limit(link, 0); 2924b1c72916STejun Heo if (slave) 2925a07d499bSTejun Heo sata_down_spd_limit(slave, 0); 2926b1c72916STejun Heo } else if (rc == -EPIPE) 2927a07d499bSTejun Heo sata_down_spd_limit(failed_link, 0); 2928b1c72916STejun Heo 2929416dc9edSTejun Heo if (hardreset) 2930416dc9edSTejun Heo reset = hardreset; 2931416dc9edSTejun Heo goto retry; 2932c6fd2807SJeff Garzik } 2933c6fd2807SJeff Garzik 293445fabbb7SElias Oltmanns static inline void ata_eh_pull_park_action(struct ata_port *ap) 293545fabbb7SElias Oltmanns { 293645fabbb7SElias Oltmanns struct ata_link *link; 293745fabbb7SElias Oltmanns struct ata_device *dev; 293845fabbb7SElias Oltmanns unsigned long flags; 293945fabbb7SElias Oltmanns 294045fabbb7SElias Oltmanns /* 294145fabbb7SElias Oltmanns * This function can be thought of as an extended version of 294245fabbb7SElias Oltmanns * ata_eh_about_to_do() specially crafted to accommodate the 294345fabbb7SElias Oltmanns * requirements of ATA_EH_PARK handling. Since the EH thread 294445fabbb7SElias Oltmanns * does not leave the do {} while () loop in ata_eh_recover as 294545fabbb7SElias Oltmanns * long as the timeout for a park request to *one* device on 294645fabbb7SElias Oltmanns * the port has not expired, and since we still want to pick 294745fabbb7SElias Oltmanns * up park requests to other devices on the same port or 294845fabbb7SElias Oltmanns * timeout updates for the same device, we have to pull 294945fabbb7SElias Oltmanns * ATA_EH_PARK actions from eh_info into eh_context.i 295045fabbb7SElias Oltmanns * ourselves at the beginning of each pass over the loop. 295145fabbb7SElias Oltmanns * 295245fabbb7SElias Oltmanns * Additionally, all write accesses to &ap->park_req_pending 295316735d02SWolfram Sang * through reinit_completion() (see below) or complete_all() 295445fabbb7SElias Oltmanns * (see ata_scsi_park_store()) are protected by the host lock. 295545fabbb7SElias Oltmanns * As a result we have that park_req_pending.done is zero on 295645fabbb7SElias Oltmanns * exit from this function, i.e. when ATA_EH_PARK actions for 295745fabbb7SElias Oltmanns * *all* devices on port ap have been pulled into the 295845fabbb7SElias Oltmanns * respective eh_context structs. If, and only if, 295945fabbb7SElias Oltmanns * park_req_pending.done is non-zero by the time we reach 296045fabbb7SElias Oltmanns * wait_for_completion_timeout(), another ATA_EH_PARK action 296145fabbb7SElias Oltmanns * has been scheduled for at least one of the devices on port 296245fabbb7SElias Oltmanns * ap and we have to cycle over the do {} while () loop in 296345fabbb7SElias Oltmanns * ata_eh_recover() again. 296445fabbb7SElias Oltmanns */ 296545fabbb7SElias Oltmanns 296645fabbb7SElias Oltmanns spin_lock_irqsave(ap->lock, flags); 296716735d02SWolfram Sang reinit_completion(&ap->park_req_pending); 29681eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 29691eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 297045fabbb7SElias Oltmanns struct ata_eh_info *ehi = &link->eh_info; 297145fabbb7SElias Oltmanns 297245fabbb7SElias Oltmanns link->eh_context.i.dev_action[dev->devno] |= 297345fabbb7SElias Oltmanns ehi->dev_action[dev->devno] & ATA_EH_PARK; 297445fabbb7SElias Oltmanns ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK); 297545fabbb7SElias Oltmanns } 297645fabbb7SElias Oltmanns } 297745fabbb7SElias Oltmanns spin_unlock_irqrestore(ap->lock, flags); 297845fabbb7SElias Oltmanns } 297945fabbb7SElias Oltmanns 298045fabbb7SElias Oltmanns static void ata_eh_park_issue_cmd(struct ata_device *dev, int park) 298145fabbb7SElias Oltmanns { 298245fabbb7SElias Oltmanns struct ata_eh_context *ehc = &dev->link->eh_context; 298345fabbb7SElias Oltmanns struct ata_taskfile tf; 298445fabbb7SElias Oltmanns unsigned int err_mask; 298545fabbb7SElias Oltmanns 298645fabbb7SElias Oltmanns ata_tf_init(dev, &tf); 298745fabbb7SElias Oltmanns if (park) { 298845fabbb7SElias Oltmanns ehc->unloaded_mask |= 1 << dev->devno; 298945fabbb7SElias Oltmanns tf.command = ATA_CMD_IDLEIMMEDIATE; 299045fabbb7SElias Oltmanns tf.feature = 0x44; 299145fabbb7SElias Oltmanns tf.lbal = 0x4c; 299245fabbb7SElias Oltmanns tf.lbam = 0x4e; 299345fabbb7SElias Oltmanns tf.lbah = 0x55; 299445fabbb7SElias Oltmanns } else { 299545fabbb7SElias Oltmanns ehc->unloaded_mask &= ~(1 << dev->devno); 299645fabbb7SElias Oltmanns tf.command = ATA_CMD_CHK_POWER; 299745fabbb7SElias Oltmanns } 299845fabbb7SElias Oltmanns 299945fabbb7SElias Oltmanns tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 3000bd18bc04SHannes Reinecke tf.protocol = ATA_PROT_NODATA; 300145fabbb7SElias Oltmanns err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 300245fabbb7SElias Oltmanns if (park && (err_mask || tf.lbal != 0xc4)) { 3003a9a79dfeSJoe Perches ata_dev_err(dev, "head unload failed!\n"); 300445fabbb7SElias Oltmanns ehc->unloaded_mask &= ~(1 << dev->devno); 300545fabbb7SElias Oltmanns } 300645fabbb7SElias Oltmanns } 300745fabbb7SElias Oltmanns 30080260731fSTejun Heo static int ata_eh_revalidate_and_attach(struct ata_link *link, 3009c6fd2807SJeff Garzik struct ata_device **r_failed_dev) 3010c6fd2807SJeff Garzik { 30110260731fSTejun Heo struct ata_port *ap = link->ap; 30120260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 3013c6fd2807SJeff Garzik struct ata_device *dev; 30148c3c52a8STejun Heo unsigned int new_mask = 0; 3015c6fd2807SJeff Garzik unsigned long flags; 3016f58229f8STejun Heo int rc = 0; 3017c6fd2807SJeff Garzik 30188c3c52a8STejun Heo /* For PATA drive side cable detection to work, IDENTIFY must 30198c3c52a8STejun Heo * be done backwards such that PDIAG- is released by the slave 30208c3c52a8STejun Heo * device before the master device is identified. 30218c3c52a8STejun Heo */ 30221eca4365STejun Heo ata_for_each_dev(dev, link, ALL_REVERSE) { 3023f58229f8STejun Heo unsigned int action = ata_eh_dev_action(dev); 3024f58229f8STejun Heo unsigned int readid_flags = 0; 3025c6fd2807SJeff Garzik 3026bff04647STejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) 3027bff04647STejun Heo readid_flags |= ATA_READID_POSTRESET; 3028bff04647STejun Heo 30299666f400STejun Heo if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) { 3030633273a3STejun Heo WARN_ON(dev->class == ATA_DEV_PMP); 3031633273a3STejun Heo 303271d7b6e5SNiklas Cassel /* 303371d7b6e5SNiklas Cassel * The link may be in a deep sleep, wake it up. 303471d7b6e5SNiklas Cassel * 303571d7b6e5SNiklas Cassel * If the link is in deep sleep, ata_phys_link_offline() 303671d7b6e5SNiklas Cassel * will return true, causing the revalidation to fail, 303771d7b6e5SNiklas Cassel * which leads to a (potentially) needless hard reset. 303871d7b6e5SNiklas Cassel * 303971d7b6e5SNiklas Cassel * ata_eh_recover() will later restore the link policy 304071d7b6e5SNiklas Cassel * to ap->target_lpm_policy after revalidation is done. 304171d7b6e5SNiklas Cassel */ 304271d7b6e5SNiklas Cassel if (link->lpm_policy > ATA_LPM_MAX_POWER) { 304371d7b6e5SNiklas Cassel rc = ata_eh_set_lpm(link, ATA_LPM_MAX_POWER, 304471d7b6e5SNiklas Cassel r_failed_dev); 304571d7b6e5SNiklas Cassel if (rc) 304671d7b6e5SNiklas Cassel goto err; 304771d7b6e5SNiklas Cassel } 304871d7b6e5SNiklas Cassel 3049b1c72916STejun Heo if (ata_phys_link_offline(ata_dev_phys_link(dev))) { 3050c6fd2807SJeff Garzik rc = -EIO; 30518c3c52a8STejun Heo goto err; 3052c6fd2807SJeff Garzik } 3053c6fd2807SJeff Garzik 30540260731fSTejun Heo ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE); 3055422c9daaSTejun Heo rc = ata_dev_revalidate(dev, ehc->classes[dev->devno], 3056422c9daaSTejun Heo readid_flags); 3057c6fd2807SJeff Garzik if (rc) 30588c3c52a8STejun Heo goto err; 3059c6fd2807SJeff Garzik 30600260731fSTejun Heo ata_eh_done(link, dev, ATA_EH_REVALIDATE); 3061c6fd2807SJeff Garzik 3062baa1e78aSTejun Heo /* Configuration may have changed, reconfigure 3063baa1e78aSTejun Heo * transfer mode. 3064baa1e78aSTejun Heo */ 3065baa1e78aSTejun Heo ehc->i.flags |= ATA_EHI_SETMODE; 3066baa1e78aSTejun Heo 3067c6fd2807SJeff Garzik /* schedule the scsi_rescan_device() here */ 30686aa0365aSDamien Le Moal schedule_delayed_work(&ap->scsi_rescan_task, 0); 3069c6fd2807SJeff Garzik } else if (dev->class == ATA_DEV_UNKNOWN && 3070c6fd2807SJeff Garzik ehc->tries[dev->devno] && 3071c6fd2807SJeff Garzik ata_class_enabled(ehc->classes[dev->devno])) { 3072842faa6cSTejun Heo /* Temporarily set dev->class, it will be 3073842faa6cSTejun Heo * permanently set once all configurations are 3074842faa6cSTejun Heo * complete. This is necessary because new 3075842faa6cSTejun Heo * device configuration is done in two 3076842faa6cSTejun Heo * separate loops. 3077842faa6cSTejun Heo */ 3078c6fd2807SJeff Garzik dev->class = ehc->classes[dev->devno]; 3079c6fd2807SJeff Garzik 3080633273a3STejun Heo if (dev->class == ATA_DEV_PMP) 3081633273a3STejun Heo rc = sata_pmp_attach(dev); 3082633273a3STejun Heo else 3083633273a3STejun Heo rc = ata_dev_read_id(dev, &dev->class, 3084633273a3STejun Heo readid_flags, dev->id); 3085842faa6cSTejun Heo 3086842faa6cSTejun Heo /* read_id might have changed class, store and reset */ 3087842faa6cSTejun Heo ehc->classes[dev->devno] = dev->class; 3088842faa6cSTejun Heo dev->class = ATA_DEV_UNKNOWN; 3089842faa6cSTejun Heo 30908c3c52a8STejun Heo switch (rc) { 30918c3c52a8STejun Heo case 0: 309299cf610aSTejun Heo /* clear error info accumulated during probe */ 309399cf610aSTejun Heo ata_ering_clear(&dev->ering); 3094f58229f8STejun Heo new_mask |= 1 << dev->devno; 30958c3c52a8STejun Heo break; 30968c3c52a8STejun Heo case -ENOENT: 309755a8e2c8STejun Heo /* IDENTIFY was issued to non-existent 309855a8e2c8STejun Heo * device. No need to reset. Just 3099842faa6cSTejun Heo * thaw and ignore the device. 310055a8e2c8STejun Heo */ 310155a8e2c8STejun Heo ata_eh_thaw_port(ap); 3102c6fd2807SJeff Garzik break; 31038c3c52a8STejun Heo default: 31048c3c52a8STejun Heo goto err; 31058c3c52a8STejun Heo } 31068c3c52a8STejun Heo } 3107c6fd2807SJeff Garzik } 3108c6fd2807SJeff Garzik 3109c1c4e8d5STejun Heo /* PDIAG- should have been released, ask cable type if post-reset */ 311033267325STejun Heo if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) { 311133267325STejun Heo if (ap->ops->cable_detect) 3112c1c4e8d5STejun Heo ap->cbl = ap->ops->cable_detect(ap); 311333267325STejun Heo ata_force_cbl(ap); 311433267325STejun Heo } 3115c1c4e8d5STejun Heo 31168c3c52a8STejun Heo /* Configure new devices forward such that user doesn't see 31178c3c52a8STejun Heo * device detection messages backwards. 31188c3c52a8STejun Heo */ 31191eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 31204f7c2874STejun Heo if (!(new_mask & (1 << dev->devno))) 31218c3c52a8STejun Heo continue; 31228c3c52a8STejun Heo 3123842faa6cSTejun Heo dev->class = ehc->classes[dev->devno]; 3124842faa6cSTejun Heo 31254f7c2874STejun Heo if (dev->class == ATA_DEV_PMP) 31264f7c2874STejun Heo continue; 31274f7c2874STejun Heo 31288c3c52a8STejun Heo ehc->i.flags |= ATA_EHI_PRINTINFO; 31298c3c52a8STejun Heo rc = ata_dev_configure(dev); 31308c3c52a8STejun Heo ehc->i.flags &= ~ATA_EHI_PRINTINFO; 3131842faa6cSTejun Heo if (rc) { 3132842faa6cSTejun Heo dev->class = ATA_DEV_UNKNOWN; 31338c3c52a8STejun Heo goto err; 3134842faa6cSTejun Heo } 31358c3c52a8STejun Heo 3136c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 3137c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 3138c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 3139baa1e78aSTejun Heo 314055a8e2c8STejun Heo /* new device discovered, configure xfermode */ 3141baa1e78aSTejun Heo ehc->i.flags |= ATA_EHI_SETMODE; 3142c6fd2807SJeff Garzik } 3143c6fd2807SJeff Garzik 31448c3c52a8STejun Heo return 0; 31458c3c52a8STejun Heo 31468c3c52a8STejun Heo err: 3147c6fd2807SJeff Garzik *r_failed_dev = dev; 3148c6fd2807SJeff Garzik return rc; 3149c6fd2807SJeff Garzik } 3150c6fd2807SJeff Garzik 31516f1d1e3aSTejun Heo /** 31526f1d1e3aSTejun Heo * ata_set_mode - Program timings and issue SET FEATURES - XFER 31536f1d1e3aSTejun Heo * @link: link on which timings will be programmed 315498a1708dSMartin Olsson * @r_failed_dev: out parameter for failed device 31556f1d1e3aSTejun Heo * 31566f1d1e3aSTejun Heo * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If 31576f1d1e3aSTejun Heo * ata_set_mode() fails, pointer to the failing device is 31586f1d1e3aSTejun Heo * returned in @r_failed_dev. 31596f1d1e3aSTejun Heo * 31606f1d1e3aSTejun Heo * LOCKING: 31616f1d1e3aSTejun Heo * PCI/etc. bus probe sem. 31626f1d1e3aSTejun Heo * 31636f1d1e3aSTejun Heo * RETURNS: 31646f1d1e3aSTejun Heo * 0 on success, negative errno otherwise 31656f1d1e3aSTejun Heo */ 31666f1d1e3aSTejun Heo int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) 31676f1d1e3aSTejun Heo { 31686f1d1e3aSTejun Heo struct ata_port *ap = link->ap; 316900115e0fSTejun Heo struct ata_device *dev; 317000115e0fSTejun Heo int rc; 31716f1d1e3aSTejun Heo 317276326ac1STejun Heo /* if data transfer is verified, clear DUBIOUS_XFER on ering top */ 31731eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) { 317476326ac1STejun Heo if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) { 317576326ac1STejun Heo struct ata_ering_entry *ent; 317676326ac1STejun Heo 317776326ac1STejun Heo ent = ata_ering_top(&dev->ering); 317876326ac1STejun Heo if (ent) 317976326ac1STejun Heo ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER; 318076326ac1STejun Heo } 318176326ac1STejun Heo } 318276326ac1STejun Heo 31836f1d1e3aSTejun Heo /* has private set_mode? */ 31846f1d1e3aSTejun Heo if (ap->ops->set_mode) 318500115e0fSTejun Heo rc = ap->ops->set_mode(link, r_failed_dev); 318600115e0fSTejun Heo else 318700115e0fSTejun Heo rc = ata_do_set_mode(link, r_failed_dev); 318800115e0fSTejun Heo 318900115e0fSTejun Heo /* if transfer mode has changed, set DUBIOUS_XFER on device */ 31901eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) { 319100115e0fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 319200115e0fSTejun Heo u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno]; 319300115e0fSTejun Heo u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno)); 319400115e0fSTejun Heo 319500115e0fSTejun Heo if (dev->xfer_mode != saved_xfer_mode || 319600115e0fSTejun Heo ata_ncq_enabled(dev) != saved_ncq) 319700115e0fSTejun Heo dev->flags |= ATA_DFLAG_DUBIOUS_XFER; 319800115e0fSTejun Heo } 319900115e0fSTejun Heo 320000115e0fSTejun Heo return rc; 32016f1d1e3aSTejun Heo } 32026f1d1e3aSTejun Heo 320311fc33daSTejun Heo /** 320411fc33daSTejun Heo * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset 320511fc33daSTejun Heo * @dev: ATAPI device to clear UA for 320611fc33daSTejun Heo * 320711fc33daSTejun Heo * Resets and other operations can make an ATAPI device raise 320811fc33daSTejun Heo * UNIT ATTENTION which causes the next operation to fail. This 320911fc33daSTejun Heo * function clears UA. 321011fc33daSTejun Heo * 321111fc33daSTejun Heo * LOCKING: 321211fc33daSTejun Heo * EH context (may sleep). 321311fc33daSTejun Heo * 321411fc33daSTejun Heo * RETURNS: 321511fc33daSTejun Heo * 0 on success, -errno on failure. 321611fc33daSTejun Heo */ 321711fc33daSTejun Heo static int atapi_eh_clear_ua(struct ata_device *dev) 321811fc33daSTejun Heo { 321911fc33daSTejun Heo int i; 322011fc33daSTejun Heo 322111fc33daSTejun Heo for (i = 0; i < ATA_EH_UA_TRIES; i++) { 3222b5357081STejun Heo u8 *sense_buffer = dev->link->ap->sector_buf; 322311fc33daSTejun Heo u8 sense_key = 0; 322411fc33daSTejun Heo unsigned int err_mask; 322511fc33daSTejun Heo 322611fc33daSTejun Heo err_mask = atapi_eh_tur(dev, &sense_key); 322711fc33daSTejun Heo if (err_mask != 0 && err_mask != AC_ERR_DEV) { 3228a9a79dfeSJoe Perches ata_dev_warn(dev, 3229a9a79dfeSJoe Perches "TEST_UNIT_READY failed (err_mask=0x%x)\n", 3230a9a79dfeSJoe Perches err_mask); 323111fc33daSTejun Heo return -EIO; 323211fc33daSTejun Heo } 323311fc33daSTejun Heo 323411fc33daSTejun Heo if (!err_mask || sense_key != UNIT_ATTENTION) 323511fc33daSTejun Heo return 0; 323611fc33daSTejun Heo 323711fc33daSTejun Heo err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key); 323811fc33daSTejun Heo if (err_mask) { 3239a9a79dfeSJoe Perches ata_dev_warn(dev, "failed to clear " 324011fc33daSTejun Heo "UNIT ATTENTION (err_mask=0x%x)\n", err_mask); 324111fc33daSTejun Heo return -EIO; 324211fc33daSTejun Heo } 324311fc33daSTejun Heo } 324411fc33daSTejun Heo 3245a9a79dfeSJoe Perches ata_dev_warn(dev, "UNIT ATTENTION persists after %d tries\n", 3246a9a79dfeSJoe Perches ATA_EH_UA_TRIES); 324711fc33daSTejun Heo 324811fc33daSTejun Heo return 0; 324911fc33daSTejun Heo } 325011fc33daSTejun Heo 32516013efd8STejun Heo /** 32526013efd8STejun Heo * ata_eh_maybe_retry_flush - Retry FLUSH if necessary 32536013efd8STejun Heo * @dev: ATA device which may need FLUSH retry 32546013efd8STejun Heo * 32556013efd8STejun Heo * If @dev failed FLUSH, it needs to be reported upper layer 32566013efd8STejun Heo * immediately as it means that @dev failed to remap and already 32576013efd8STejun Heo * lost at least a sector and further FLUSH retrials won't make 32586013efd8STejun Heo * any difference to the lost sector. However, if FLUSH failed 32596013efd8STejun Heo * for other reasons, for example transmission error, FLUSH needs 32606013efd8STejun Heo * to be retried. 32616013efd8STejun Heo * 32626013efd8STejun Heo * This function determines whether FLUSH failure retry is 32636013efd8STejun Heo * necessary and performs it if so. 32646013efd8STejun Heo * 32656013efd8STejun Heo * RETURNS: 32666013efd8STejun Heo * 0 if EH can continue, -errno if EH needs to be repeated. 32676013efd8STejun Heo */ 32686013efd8STejun Heo static int ata_eh_maybe_retry_flush(struct ata_device *dev) 32696013efd8STejun Heo { 32706013efd8STejun Heo struct ata_link *link = dev->link; 32716013efd8STejun Heo struct ata_port *ap = link->ap; 32726013efd8STejun Heo struct ata_queued_cmd *qc; 32736013efd8STejun Heo struct ata_taskfile tf; 32746013efd8STejun Heo unsigned int err_mask; 32756013efd8STejun Heo int rc = 0; 32766013efd8STejun Heo 32776013efd8STejun Heo /* did flush fail for this device? */ 32786013efd8STejun Heo if (!ata_tag_valid(link->active_tag)) 32796013efd8STejun Heo return 0; 32806013efd8STejun Heo 32816013efd8STejun Heo qc = __ata_qc_from_tag(ap, link->active_tag); 32826013efd8STejun Heo if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT && 32836013efd8STejun Heo qc->tf.command != ATA_CMD_FLUSH)) 32846013efd8STejun Heo return 0; 32856013efd8STejun Heo 32866013efd8STejun Heo /* if the device failed it, it should be reported to upper layers */ 32876013efd8STejun Heo if (qc->err_mask & AC_ERR_DEV) 32886013efd8STejun Heo return 0; 32896013efd8STejun Heo 32906013efd8STejun Heo /* flush failed for some other reason, give it another shot */ 32916013efd8STejun Heo ata_tf_init(dev, &tf); 32926013efd8STejun Heo 32936013efd8STejun Heo tf.command = qc->tf.command; 32946013efd8STejun Heo tf.flags |= ATA_TFLAG_DEVICE; 32956013efd8STejun Heo tf.protocol = ATA_PROT_NODATA; 32966013efd8STejun Heo 3297a9a79dfeSJoe Perches ata_dev_warn(dev, "retrying FLUSH 0x%x Emask 0x%x\n", 32986013efd8STejun Heo tf.command, qc->err_mask); 32996013efd8STejun Heo 33006013efd8STejun Heo err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 33016013efd8STejun Heo if (!err_mask) { 33026013efd8STejun Heo /* 33036013efd8STejun Heo * FLUSH is complete but there's no way to 33046013efd8STejun Heo * successfully complete a failed command from EH. 33056013efd8STejun Heo * Making sure retry is allowed at least once and 33066013efd8STejun Heo * retrying it should do the trick - whatever was in 33076013efd8STejun Heo * the cache is already on the platter and this won't 33086013efd8STejun Heo * cause infinite loop. 33096013efd8STejun Heo */ 33106013efd8STejun Heo qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1); 33116013efd8STejun Heo } else { 3312a9a79dfeSJoe Perches ata_dev_warn(dev, "FLUSH failed Emask 0x%x\n", 33136013efd8STejun Heo err_mask); 33146013efd8STejun Heo rc = -EIO; 33156013efd8STejun Heo 33166013efd8STejun Heo /* if device failed it, report it to upper layers */ 33176013efd8STejun Heo if (err_mask & AC_ERR_DEV) { 33186013efd8STejun Heo qc->err_mask |= AC_ERR_DEV; 33196013efd8STejun Heo qc->result_tf = tf; 33204cb7c6f1SNiklas Cassel if (!ata_port_is_frozen(ap)) 33216013efd8STejun Heo rc = 0; 33226013efd8STejun Heo } 33236013efd8STejun Heo } 33246013efd8STejun Heo return rc; 33256013efd8STejun Heo } 33266013efd8STejun Heo 33276b7ae954STejun Heo /** 33286b7ae954STejun Heo * ata_eh_set_lpm - configure SATA interface power management 33296b7ae954STejun Heo * @link: link to configure power management 33306b7ae954STejun Heo * @policy: the link power management policy 33316b7ae954STejun Heo * @r_failed_dev: out parameter for failed device 33326b7ae954STejun Heo * 33336b7ae954STejun Heo * Enable SATA Interface power management. This will enable 3334f4ac6476SHans de Goede * Device Interface Power Management (DIPM) for min_power and 3335f4ac6476SHans de Goede * medium_power_with_dipm policies, and then call driver specific 3336f4ac6476SHans de Goede * callbacks for enabling Host Initiated Power management. 33376b7ae954STejun Heo * 33386b7ae954STejun Heo * LOCKING: 33396b7ae954STejun Heo * EH context. 33406b7ae954STejun Heo * 33416b7ae954STejun Heo * RETURNS: 33426b7ae954STejun Heo * 0 on success, -errno on failure. 33436b7ae954STejun Heo */ 33446b7ae954STejun Heo static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, 33456b7ae954STejun Heo struct ata_device **r_failed_dev) 33466b7ae954STejun Heo { 33476c8ea89cSTejun Heo struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL; 33486b7ae954STejun Heo struct ata_eh_context *ehc = &link->eh_context; 33496b7ae954STejun Heo struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL; 3350e5005b15STejun Heo enum ata_lpm_policy old_policy = link->lpm_policy; 33515f6f12ccSTejun Heo bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM; 33526b7ae954STejun Heo unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM; 33536b7ae954STejun Heo unsigned int err_mask; 33546b7ae954STejun Heo int rc; 33556b7ae954STejun Heo 33566b7ae954STejun Heo /* if the link or host doesn't do LPM, noop */ 33574c9029e7SBartlomiej Zolnierkiewicz if (!IS_ENABLED(CONFIG_SATA_HOST) || 33584c9029e7SBartlomiej Zolnierkiewicz (link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm)) 33596b7ae954STejun Heo return 0; 33606b7ae954STejun Heo 33616b7ae954STejun Heo /* 33626b7ae954STejun Heo * DIPM is enabled only for MIN_POWER as some devices 33636b7ae954STejun Heo * misbehave when the host NACKs transition to SLUMBER. Order 33646b7ae954STejun Heo * device and link configurations such that the host always 33656b7ae954STejun Heo * allows DIPM requests. 33666b7ae954STejun Heo */ 33676b7ae954STejun Heo ata_for_each_dev(dev, link, ENABLED) { 33686b7ae954STejun Heo bool hipm = ata_id_has_hipm(dev->id); 3369ae01b249STejun Heo bool dipm = ata_id_has_dipm(dev->id) && !no_dipm; 33706b7ae954STejun Heo 33716b7ae954STejun Heo /* find the first enabled and LPM enabled devices */ 33726b7ae954STejun Heo if (!link_dev) 33736b7ae954STejun Heo link_dev = dev; 33746b7ae954STejun Heo 33756b7ae954STejun Heo if (!lpm_dev && (hipm || dipm)) 33766b7ae954STejun Heo lpm_dev = dev; 33776b7ae954STejun Heo 33786b7ae954STejun Heo hints &= ~ATA_LPM_EMPTY; 33796b7ae954STejun Heo if (!hipm) 33806b7ae954STejun Heo hints &= ~ATA_LPM_HIPM; 33816b7ae954STejun Heo 33826b7ae954STejun Heo /* disable DIPM before changing link config */ 3383f4ac6476SHans de Goede if (policy < ATA_LPM_MED_POWER_WITH_DIPM && dipm) { 33846b7ae954STejun Heo err_mask = ata_dev_set_feature(dev, 33856b7ae954STejun Heo SETFEATURES_SATA_DISABLE, SATA_DIPM); 33866b7ae954STejun Heo if (err_mask && err_mask != AC_ERR_DEV) { 3387a9a79dfeSJoe Perches ata_dev_warn(dev, 33886b7ae954STejun Heo "failed to disable DIPM, Emask 0x%x\n", 33896b7ae954STejun Heo err_mask); 33906b7ae954STejun Heo rc = -EIO; 33916b7ae954STejun Heo goto fail; 33926b7ae954STejun Heo } 33936b7ae954STejun Heo } 33946b7ae954STejun Heo } 33956b7ae954STejun Heo 33966c8ea89cSTejun Heo if (ap) { 33976b7ae954STejun Heo rc = ap->ops->set_lpm(link, policy, hints); 33986b7ae954STejun Heo if (!rc && ap->slave_link) 33996b7ae954STejun Heo rc = ap->ops->set_lpm(ap->slave_link, policy, hints); 34006c8ea89cSTejun Heo } else 34016c8ea89cSTejun Heo rc = sata_pmp_set_lpm(link, policy, hints); 34026b7ae954STejun Heo 34036b7ae954STejun Heo /* 34046b7ae954STejun Heo * Attribute link config failure to the first (LPM) enabled 34056b7ae954STejun Heo * device on the link. 34066b7ae954STejun Heo */ 34076b7ae954STejun Heo if (rc) { 34086b7ae954STejun Heo if (rc == -EOPNOTSUPP) { 34096b7ae954STejun Heo link->flags |= ATA_LFLAG_NO_LPM; 34106b7ae954STejun Heo return 0; 34116b7ae954STejun Heo } 34126b7ae954STejun Heo dev = lpm_dev ? lpm_dev : link_dev; 34136b7ae954STejun Heo goto fail; 34146b7ae954STejun Heo } 34156b7ae954STejun Heo 3416e5005b15STejun Heo /* 3417e5005b15STejun Heo * Low level driver acked the transition. Issue DIPM command 3418e5005b15STejun Heo * with the new policy set. 3419e5005b15STejun Heo */ 3420e5005b15STejun Heo link->lpm_policy = policy; 3421e5005b15STejun Heo if (ap && ap->slave_link) 3422e5005b15STejun Heo ap->slave_link->lpm_policy = policy; 3423e5005b15STejun Heo 34246b7ae954STejun Heo /* host config updated, enable DIPM if transitioning to MIN_POWER */ 34256b7ae954STejun Heo ata_for_each_dev(dev, link, ENABLED) { 3426f4ac6476SHans de Goede if (policy >= ATA_LPM_MED_POWER_WITH_DIPM && !no_dipm && 3427ae01b249STejun Heo ata_id_has_dipm(dev->id)) { 34286b7ae954STejun Heo err_mask = ata_dev_set_feature(dev, 34296b7ae954STejun Heo SETFEATURES_SATA_ENABLE, SATA_DIPM); 34306b7ae954STejun Heo if (err_mask && err_mask != AC_ERR_DEV) { 3431a9a79dfeSJoe Perches ata_dev_warn(dev, 34326b7ae954STejun Heo "failed to enable DIPM, Emask 0x%x\n", 34336b7ae954STejun Heo err_mask); 34346b7ae954STejun Heo rc = -EIO; 34356b7ae954STejun Heo goto fail; 34366b7ae954STejun Heo } 34376b7ae954STejun Heo } 34386b7ae954STejun Heo } 34396b7ae954STejun Heo 344009c5b480SGabriele Mazzotta link->last_lpm_change = jiffies; 344109c5b480SGabriele Mazzotta link->flags |= ATA_LFLAG_CHANGED; 344209c5b480SGabriele Mazzotta 34436b7ae954STejun Heo return 0; 34446b7ae954STejun Heo 34456b7ae954STejun Heo fail: 3446e5005b15STejun Heo /* restore the old policy */ 3447e5005b15STejun Heo link->lpm_policy = old_policy; 3448e5005b15STejun Heo if (ap && ap->slave_link) 3449e5005b15STejun Heo ap->slave_link->lpm_policy = old_policy; 3450e5005b15STejun Heo 34516b7ae954STejun Heo /* if no device or only one more chance is left, disable LPM */ 34526b7ae954STejun Heo if (!dev || ehc->tries[dev->devno] <= 2) { 3453a9a79dfeSJoe Perches ata_link_warn(link, "disabling LPM on the link\n"); 34546b7ae954STejun Heo link->flags |= ATA_LFLAG_NO_LPM; 34556b7ae954STejun Heo } 34566b7ae954STejun Heo if (r_failed_dev) 34576b7ae954STejun Heo *r_failed_dev = dev; 34586b7ae954STejun Heo return rc; 34596b7ae954STejun Heo } 34606b7ae954STejun Heo 34618a745f1fSKristen Carlson Accardi int ata_link_nr_enabled(struct ata_link *link) 3462c6fd2807SJeff Garzik { 3463f58229f8STejun Heo struct ata_device *dev; 3464f58229f8STejun Heo int cnt = 0; 3465c6fd2807SJeff Garzik 34661eca4365STejun Heo ata_for_each_dev(dev, link, ENABLED) 3467c6fd2807SJeff Garzik cnt++; 3468c6fd2807SJeff Garzik return cnt; 3469c6fd2807SJeff Garzik } 3470c6fd2807SJeff Garzik 34710260731fSTejun Heo static int ata_link_nr_vacant(struct ata_link *link) 3472c6fd2807SJeff Garzik { 3473f58229f8STejun Heo struct ata_device *dev; 3474f58229f8STejun Heo int cnt = 0; 3475c6fd2807SJeff Garzik 34761eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 3477f58229f8STejun Heo if (dev->class == ATA_DEV_UNKNOWN) 3478c6fd2807SJeff Garzik cnt++; 3479c6fd2807SJeff Garzik return cnt; 3480c6fd2807SJeff Garzik } 3481c6fd2807SJeff Garzik 34820260731fSTejun Heo static int ata_eh_skip_recovery(struct ata_link *link) 3483c6fd2807SJeff Garzik { 3484672b2d65STejun Heo struct ata_port *ap = link->ap; 34850260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 3486f58229f8STejun Heo struct ata_device *dev; 3487c6fd2807SJeff Garzik 3488f9df58cbSTejun Heo /* skip disabled links */ 3489f9df58cbSTejun Heo if (link->flags & ATA_LFLAG_DISABLED) 3490f9df58cbSTejun Heo return 1; 3491f9df58cbSTejun Heo 3492e2f3d75fSTejun Heo /* skip if explicitly requested */ 3493e2f3d75fSTejun Heo if (ehc->i.flags & ATA_EHI_NO_RECOVERY) 3494e2f3d75fSTejun Heo return 1; 3495e2f3d75fSTejun Heo 3496672b2d65STejun Heo /* thaw frozen port and recover failed devices */ 34974cb7c6f1SNiklas Cassel if (ata_port_is_frozen(ap) || ata_link_nr_enabled(link)) 3498672b2d65STejun Heo return 0; 3499672b2d65STejun Heo 3500672b2d65STejun Heo /* reset at least once if reset is requested */ 3501672b2d65STejun Heo if ((ehc->i.action & ATA_EH_RESET) && 3502672b2d65STejun Heo !(ehc->i.flags & ATA_EHI_DID_RESET)) 3503c6fd2807SJeff Garzik return 0; 3504c6fd2807SJeff Garzik 3505c6fd2807SJeff Garzik /* skip if class codes for all vacant slots are ATA_DEV_NONE */ 35061eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 3507c6fd2807SJeff Garzik if (dev->class == ATA_DEV_UNKNOWN && 3508c6fd2807SJeff Garzik ehc->classes[dev->devno] != ATA_DEV_NONE) 3509c6fd2807SJeff Garzik return 0; 3510c6fd2807SJeff Garzik } 3511c6fd2807SJeff Garzik 3512c6fd2807SJeff Garzik return 1; 3513c6fd2807SJeff Garzik } 3514c6fd2807SJeff Garzik 3515c2c7a89cSTejun Heo static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg) 3516c2c7a89cSTejun Heo { 3517c2c7a89cSTejun Heo u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL); 3518c2c7a89cSTejun Heo u64 now = get_jiffies_64(); 3519c2c7a89cSTejun Heo int *trials = void_arg; 3520c2c7a89cSTejun Heo 35216868225eSLin Ming if ((ent->eflags & ATA_EFLAG_OLD_ER) || 35226868225eSLin Ming (ent->timestamp < now - min(now, interval))) 3523c2c7a89cSTejun Heo return -1; 3524c2c7a89cSTejun Heo 3525c2c7a89cSTejun Heo (*trials)++; 3526c2c7a89cSTejun Heo return 0; 3527c2c7a89cSTejun Heo } 3528c2c7a89cSTejun Heo 352902c05a27STejun Heo static int ata_eh_schedule_probe(struct ata_device *dev) 353002c05a27STejun Heo { 353102c05a27STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 3532c2c7a89cSTejun Heo struct ata_link *link = ata_dev_phys_link(dev); 3533c2c7a89cSTejun Heo int trials = 0; 353402c05a27STejun Heo 353502c05a27STejun Heo if (!(ehc->i.probe_mask & (1 << dev->devno)) || 353602c05a27STejun Heo (ehc->did_probe_mask & (1 << dev->devno))) 353702c05a27STejun Heo return 0; 353802c05a27STejun Heo 353902c05a27STejun Heo ata_eh_detach_dev(dev); 354002c05a27STejun Heo ata_dev_init(dev); 354102c05a27STejun Heo ehc->did_probe_mask |= (1 << dev->devno); 3542cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 354300115e0fSTejun Heo ehc->saved_xfer_mode[dev->devno] = 0; 354400115e0fSTejun Heo ehc->saved_ncq_enabled &= ~(1 << dev->devno); 354502c05a27STejun Heo 35466b7ae954STejun Heo /* the link maybe in a deep sleep, wake it up */ 35476c8ea89cSTejun Heo if (link->lpm_policy > ATA_LPM_MAX_POWER) { 35486c8ea89cSTejun Heo if (ata_is_host_link(link)) 35496c8ea89cSTejun Heo link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER, 35506c8ea89cSTejun Heo ATA_LPM_EMPTY); 35516c8ea89cSTejun Heo else 35526c8ea89cSTejun Heo sata_pmp_set_lpm(link, ATA_LPM_MAX_POWER, 35536c8ea89cSTejun Heo ATA_LPM_EMPTY); 35546c8ea89cSTejun Heo } 35556b7ae954STejun Heo 3556c2c7a89cSTejun Heo /* Record and count probe trials on the ering. The specific 3557c2c7a89cSTejun Heo * error mask used is irrelevant. Because a successful device 3558c2c7a89cSTejun Heo * detection clears the ering, this count accumulates only if 3559c2c7a89cSTejun Heo * there are consecutive failed probes. 3560c2c7a89cSTejun Heo * 3561c2c7a89cSTejun Heo * If the count is equal to or higher than ATA_EH_PROBE_TRIALS 3562c2c7a89cSTejun Heo * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is 3563c2c7a89cSTejun Heo * forced to 1.5Gbps. 3564c2c7a89cSTejun Heo * 3565c2c7a89cSTejun Heo * This is to work around cases where failed link speed 3566c2c7a89cSTejun Heo * negotiation results in device misdetection leading to 3567c2c7a89cSTejun Heo * infinite DEVXCHG or PHRDY CHG events. 3568c2c7a89cSTejun Heo */ 3569c2c7a89cSTejun Heo ata_ering_record(&dev->ering, 0, AC_ERR_OTHER); 3570c2c7a89cSTejun Heo ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials); 3571c2c7a89cSTejun Heo 3572c2c7a89cSTejun Heo if (trials > ATA_EH_PROBE_TRIALS) 3573c2c7a89cSTejun Heo sata_down_spd_limit(link, 1); 3574c2c7a89cSTejun Heo 357502c05a27STejun Heo return 1; 357602c05a27STejun Heo } 357702c05a27STejun Heo 35789b1e2658STejun Heo static int ata_eh_handle_dev_fail(struct ata_device *dev, int err) 3579fee7ca72STejun Heo { 35809af5c9c9STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 3581fee7ca72STejun Heo 3582cf9a590aSTejun Heo /* -EAGAIN from EH routine indicates retry without prejudice. 3583cf9a590aSTejun Heo * The requester is responsible for ensuring forward progress. 3584cf9a590aSTejun Heo */ 3585cf9a590aSTejun Heo if (err != -EAGAIN) 3586fee7ca72STejun Heo ehc->tries[dev->devno]--; 3587fee7ca72STejun Heo 3588fee7ca72STejun Heo switch (err) { 3589fee7ca72STejun Heo case -ENODEV: 3590fee7ca72STejun Heo /* device missing or wrong IDENTIFY data, schedule probing */ 3591fee7ca72STejun Heo ehc->i.probe_mask |= (1 << dev->devno); 3592df561f66SGustavo A. R. Silva fallthrough; 3593fee7ca72STejun Heo case -EINVAL: 3594fee7ca72STejun Heo /* give it just one more chance */ 3595fee7ca72STejun Heo ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1); 3596df561f66SGustavo A. R. Silva fallthrough; 3597fee7ca72STejun Heo case -EIO: 3598d89293abSTejun Heo if (ehc->tries[dev->devno] == 1) { 3599fee7ca72STejun Heo /* This is the last chance, better to slow 3600fee7ca72STejun Heo * down than lose it. 3601fee7ca72STejun Heo */ 3602a07d499bSTejun Heo sata_down_spd_limit(ata_dev_phys_link(dev), 0); 3603d89293abSTejun Heo if (dev->pio_mode > XFER_PIO_0) 3604fee7ca72STejun Heo ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); 3605fee7ca72STejun Heo } 3606fee7ca72STejun Heo } 3607fee7ca72STejun Heo 3608fee7ca72STejun Heo if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) { 3609fee7ca72STejun Heo /* disable device if it has used up all its chances */ 3610fee7ca72STejun Heo ata_dev_disable(dev); 3611fee7ca72STejun Heo 3612fee7ca72STejun Heo /* detach if offline */ 3613b1c72916STejun Heo if (ata_phys_link_offline(ata_dev_phys_link(dev))) 3614fee7ca72STejun Heo ata_eh_detach_dev(dev); 3615fee7ca72STejun Heo 361602c05a27STejun Heo /* schedule probe if necessary */ 361787fbc5a0STejun Heo if (ata_eh_schedule_probe(dev)) { 3618fee7ca72STejun Heo ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 361987fbc5a0STejun Heo memset(ehc->cmd_timeout_idx[dev->devno], 0, 362087fbc5a0STejun Heo sizeof(ehc->cmd_timeout_idx[dev->devno])); 362187fbc5a0STejun Heo } 36229b1e2658STejun Heo 36239b1e2658STejun Heo return 1; 3624fee7ca72STejun Heo } else { 3625cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 36269b1e2658STejun Heo return 0; 3627fee7ca72STejun Heo } 3628fee7ca72STejun Heo } 3629fee7ca72STejun Heo 3630c6fd2807SJeff Garzik /** 3631c6fd2807SJeff Garzik * ata_eh_recover - recover host port after error 3632c6fd2807SJeff Garzik * @ap: host port to recover 3633c6fd2807SJeff Garzik * @prereset: prereset method (can be NULL) 3634c6fd2807SJeff Garzik * @softreset: softreset method (can be NULL) 3635c6fd2807SJeff Garzik * @hardreset: hardreset method (can be NULL) 3636c6fd2807SJeff Garzik * @postreset: postreset method (can be NULL) 36379b1e2658STejun Heo * @r_failed_link: out parameter for failed link 3638c6fd2807SJeff Garzik * 3639c6fd2807SJeff Garzik * This is the alpha and omega, eum and yang, heart and soul of 3640c6fd2807SJeff Garzik * libata exception handling. On entry, actions required to 36419b1e2658STejun Heo * recover each link and hotplug requests are recorded in the 36429b1e2658STejun Heo * link's eh_context. This function executes all the operations 36439b1e2658STejun Heo * with appropriate retrials and fallbacks to resurrect failed 3644c6fd2807SJeff Garzik * devices, detach goners and greet newcomers. 3645c6fd2807SJeff Garzik * 3646c6fd2807SJeff Garzik * LOCKING: 3647c6fd2807SJeff Garzik * Kernel thread context (may sleep). 3648c6fd2807SJeff Garzik * 3649c6fd2807SJeff Garzik * RETURNS: 3650c6fd2807SJeff Garzik * 0 on success, -errno on failure. 3651c6fd2807SJeff Garzik */ 3652fb7fd614STejun Heo int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, 3653c6fd2807SJeff Garzik ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 36549b1e2658STejun Heo ata_postreset_fn_t postreset, 36559b1e2658STejun Heo struct ata_link **r_failed_link) 3656c6fd2807SJeff Garzik { 36579b1e2658STejun Heo struct ata_link *link; 3658c6fd2807SJeff Garzik struct ata_device *dev; 36596b7ae954STejun Heo int rc, nr_fails; 366045fabbb7SElias Oltmanns unsigned long flags, deadline; 3661c6fd2807SJeff Garzik 3662c6fd2807SJeff Garzik /* prep for recovery */ 36631eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 36649b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 36659b1e2658STejun Heo 3666f9df58cbSTejun Heo /* re-enable link? */ 3667f9df58cbSTejun Heo if (ehc->i.action & ATA_EH_ENABLE_LINK) { 3668f9df58cbSTejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK); 3669f9df58cbSTejun Heo spin_lock_irqsave(ap->lock, flags); 3670f9df58cbSTejun Heo link->flags &= ~ATA_LFLAG_DISABLED; 3671f9df58cbSTejun Heo spin_unlock_irqrestore(ap->lock, flags); 3672f9df58cbSTejun Heo ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK); 3673f9df58cbSTejun Heo } 3674f9df58cbSTejun Heo 36751eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 3676fd995f70STejun Heo if (link->flags & ATA_LFLAG_NO_RETRY) 3677fd995f70STejun Heo ehc->tries[dev->devno] = 1; 3678fd995f70STejun Heo else 3679c6fd2807SJeff Garzik ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 3680c6fd2807SJeff Garzik 368179a55b72STejun Heo /* collect port action mask recorded in dev actions */ 36829b1e2658STejun Heo ehc->i.action |= ehc->i.dev_action[dev->devno] & 36839b1e2658STejun Heo ~ATA_EH_PERDEV_MASK; 3684f58229f8STejun Heo ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK; 368579a55b72STejun Heo 3686c6fd2807SJeff Garzik /* process hotplug request */ 3687c6fd2807SJeff Garzik if (dev->flags & ATA_DFLAG_DETACH) 3688c6fd2807SJeff Garzik ata_eh_detach_dev(dev); 3689c6fd2807SJeff Garzik 369002c05a27STejun Heo /* schedule probe if necessary */ 369102c05a27STejun Heo if (!ata_dev_enabled(dev)) 369202c05a27STejun Heo ata_eh_schedule_probe(dev); 3693c6fd2807SJeff Garzik } 36949b1e2658STejun Heo } 3695c6fd2807SJeff Garzik 3696c6fd2807SJeff Garzik retry: 3697c6fd2807SJeff Garzik rc = 0; 3698c6fd2807SJeff Garzik 3699c6fd2807SJeff Garzik /* if UNLOADING, finish immediately */ 3700c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_UNLOADING) 3701c6fd2807SJeff Garzik goto out; 3702c6fd2807SJeff Garzik 37039b1e2658STejun Heo /* prep for EH */ 37041eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 37059b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 37069b1e2658STejun Heo 3707c6fd2807SJeff Garzik /* skip EH if possible. */ 37080260731fSTejun Heo if (ata_eh_skip_recovery(link)) 3709c6fd2807SJeff Garzik ehc->i.action = 0; 3710c6fd2807SJeff Garzik 37111eca4365STejun Heo ata_for_each_dev(dev, link, ALL) 3712f58229f8STejun Heo ehc->classes[dev->devno] = ATA_DEV_UNKNOWN; 37139b1e2658STejun Heo } 3714c6fd2807SJeff Garzik 3715c6fd2807SJeff Garzik /* reset */ 37161eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 37179b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 37189b1e2658STejun Heo 3719cf480626STejun Heo if (!(ehc->i.action & ATA_EH_RESET)) 37209b1e2658STejun Heo continue; 37219b1e2658STejun Heo 37229b1e2658STejun Heo rc = ata_eh_reset(link, ata_link_nr_vacant(link), 3723dc98c32cSTejun Heo prereset, softreset, hardreset, postreset); 3724c6fd2807SJeff Garzik if (rc) { 3725a9a79dfeSJoe Perches ata_link_err(link, "reset failed, giving up\n"); 3726c6fd2807SJeff Garzik goto out; 3727c6fd2807SJeff Garzik } 37289b1e2658STejun Heo } 3729c6fd2807SJeff Garzik 373045fabbb7SElias Oltmanns do { 373145fabbb7SElias Oltmanns unsigned long now; 373245fabbb7SElias Oltmanns 373345fabbb7SElias Oltmanns /* 373445fabbb7SElias Oltmanns * clears ATA_EH_PARK in eh_info and resets 373545fabbb7SElias Oltmanns * ap->park_req_pending 373645fabbb7SElias Oltmanns */ 373745fabbb7SElias Oltmanns ata_eh_pull_park_action(ap); 373845fabbb7SElias Oltmanns 373945fabbb7SElias Oltmanns deadline = jiffies; 37401eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 37411eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 374245fabbb7SElias Oltmanns struct ata_eh_context *ehc = &link->eh_context; 374345fabbb7SElias Oltmanns unsigned long tmp; 374445fabbb7SElias Oltmanns 37459162c657SHannes Reinecke if (dev->class != ATA_DEV_ATA && 37469162c657SHannes Reinecke dev->class != ATA_DEV_ZAC) 374745fabbb7SElias Oltmanns continue; 374845fabbb7SElias Oltmanns if (!(ehc->i.dev_action[dev->devno] & 374945fabbb7SElias Oltmanns ATA_EH_PARK)) 375045fabbb7SElias Oltmanns continue; 375145fabbb7SElias Oltmanns tmp = dev->unpark_deadline; 375245fabbb7SElias Oltmanns if (time_before(deadline, tmp)) 375345fabbb7SElias Oltmanns deadline = tmp; 375445fabbb7SElias Oltmanns else if (time_before_eq(tmp, jiffies)) 375545fabbb7SElias Oltmanns continue; 375645fabbb7SElias Oltmanns if (ehc->unloaded_mask & (1 << dev->devno)) 375745fabbb7SElias Oltmanns continue; 375845fabbb7SElias Oltmanns 375945fabbb7SElias Oltmanns ata_eh_park_issue_cmd(dev, 1); 376045fabbb7SElias Oltmanns } 376145fabbb7SElias Oltmanns } 376245fabbb7SElias Oltmanns 376345fabbb7SElias Oltmanns now = jiffies; 376445fabbb7SElias Oltmanns if (time_before_eq(deadline, now)) 376545fabbb7SElias Oltmanns break; 376645fabbb7SElias Oltmanns 3767c0c362b6STejun Heo ata_eh_release(ap); 376845fabbb7SElias Oltmanns deadline = wait_for_completion_timeout(&ap->park_req_pending, 376945fabbb7SElias Oltmanns deadline - now); 3770c0c362b6STejun Heo ata_eh_acquire(ap); 377145fabbb7SElias Oltmanns } while (deadline); 37721eca4365STejun Heo ata_for_each_link(link, ap, EDGE) { 37731eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 377445fabbb7SElias Oltmanns if (!(link->eh_context.unloaded_mask & 377545fabbb7SElias Oltmanns (1 << dev->devno))) 377645fabbb7SElias Oltmanns continue; 377745fabbb7SElias Oltmanns 377845fabbb7SElias Oltmanns ata_eh_park_issue_cmd(dev, 0); 377945fabbb7SElias Oltmanns ata_eh_done(link, dev, ATA_EH_PARK); 378045fabbb7SElias Oltmanns } 378145fabbb7SElias Oltmanns } 378245fabbb7SElias Oltmanns 37839b1e2658STejun Heo /* the rest */ 37846b7ae954STejun Heo nr_fails = 0; 37856b7ae954STejun Heo ata_for_each_link(link, ap, PMP_FIRST) { 37869b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 37879b1e2658STejun Heo 37886b7ae954STejun Heo if (sata_pmp_attached(ap) && ata_is_host_link(link)) 37896b7ae954STejun Heo goto config_lpm; 37906b7ae954STejun Heo 3791c6fd2807SJeff Garzik /* revalidate existing devices and attach new ones */ 37920260731fSTejun Heo rc = ata_eh_revalidate_and_attach(link, &dev); 3793c6fd2807SJeff Garzik if (rc) 37946b7ae954STejun Heo goto rest_fail; 3795c6fd2807SJeff Garzik 3796633273a3STejun Heo /* if PMP got attached, return, pmp EH will take care of it */ 3797633273a3STejun Heo if (link->device->class == ATA_DEV_PMP) { 3798633273a3STejun Heo ehc->i.action = 0; 3799633273a3STejun Heo return 0; 3800633273a3STejun Heo } 3801633273a3STejun Heo 3802baa1e78aSTejun Heo /* configure transfer mode if necessary */ 3803baa1e78aSTejun Heo if (ehc->i.flags & ATA_EHI_SETMODE) { 38040260731fSTejun Heo rc = ata_set_mode(link, &dev); 38054ae72a1eSTejun Heo if (rc) 38066b7ae954STejun Heo goto rest_fail; 3807baa1e78aSTejun Heo ehc->i.flags &= ~ATA_EHI_SETMODE; 3808c6fd2807SJeff Garzik } 3809c6fd2807SJeff Garzik 381011fc33daSTejun Heo /* If reset has been issued, clear UA to avoid 381111fc33daSTejun Heo * disrupting the current users of the device. 381211fc33daSTejun Heo */ 381311fc33daSTejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) { 38141eca4365STejun Heo ata_for_each_dev(dev, link, ALL) { 381511fc33daSTejun Heo if (dev->class != ATA_DEV_ATAPI) 381611fc33daSTejun Heo continue; 381711fc33daSTejun Heo rc = atapi_eh_clear_ua(dev); 381811fc33daSTejun Heo if (rc) 38196b7ae954STejun Heo goto rest_fail; 382021334205SAaron Lu if (zpodd_dev_enabled(dev)) 382121334205SAaron Lu zpodd_post_poweron(dev); 382211fc33daSTejun Heo } 382311fc33daSTejun Heo } 382411fc33daSTejun Heo 38256013efd8STejun Heo /* retry flush if necessary */ 38266013efd8STejun Heo ata_for_each_dev(dev, link, ALL) { 38279162c657SHannes Reinecke if (dev->class != ATA_DEV_ATA && 38289162c657SHannes Reinecke dev->class != ATA_DEV_ZAC) 38296013efd8STejun Heo continue; 38306013efd8STejun Heo rc = ata_eh_maybe_retry_flush(dev); 38316013efd8STejun Heo if (rc) 38326b7ae954STejun Heo goto rest_fail; 38336013efd8STejun Heo } 38346013efd8STejun Heo 38356b7ae954STejun Heo config_lpm: 383611fc33daSTejun Heo /* configure link power saving */ 38376b7ae954STejun Heo if (link->lpm_policy != ap->target_lpm_policy) { 38386b7ae954STejun Heo rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev); 38396b7ae954STejun Heo if (rc) 38406b7ae954STejun Heo goto rest_fail; 38416b7ae954STejun Heo } 3842ca77329fSKristen Carlson Accardi 38439b1e2658STejun Heo /* this link is okay now */ 38449b1e2658STejun Heo ehc->i.flags = 0; 38459b1e2658STejun Heo continue; 3846c6fd2807SJeff Garzik 38476b7ae954STejun Heo rest_fail: 38486b7ae954STejun Heo nr_fails++; 38496b7ae954STejun Heo if (dev) 38500a2c0f56STejun Heo ata_eh_handle_dev_fail(dev, rc); 3851c6fd2807SJeff Garzik 38524cb7c6f1SNiklas Cassel if (ata_port_is_frozen(ap)) { 3853b06ce3e5STejun Heo /* PMP reset requires working host port. 3854b06ce3e5STejun Heo * Can't retry if it's frozen. 3855b06ce3e5STejun Heo */ 3856071f44b1STejun Heo if (sata_pmp_attached(ap)) 3857b06ce3e5STejun Heo goto out; 38589b1e2658STejun Heo break; 38599b1e2658STejun Heo } 3860b06ce3e5STejun Heo } 38619b1e2658STejun Heo 38626b7ae954STejun Heo if (nr_fails) 3863c6fd2807SJeff Garzik goto retry; 3864c6fd2807SJeff Garzik 3865c6fd2807SJeff Garzik out: 38669b1e2658STejun Heo if (rc && r_failed_link) 38679b1e2658STejun Heo *r_failed_link = link; 3868c6fd2807SJeff Garzik 3869c6fd2807SJeff Garzik return rc; 3870c6fd2807SJeff Garzik } 3871c6fd2807SJeff Garzik 3872c6fd2807SJeff Garzik /** 3873c6fd2807SJeff Garzik * ata_eh_finish - finish up EH 3874c6fd2807SJeff Garzik * @ap: host port to finish EH for 3875c6fd2807SJeff Garzik * 3876c6fd2807SJeff Garzik * Recovery is complete. Clean up EH states and retry or finish 3877c6fd2807SJeff Garzik * failed qcs. 3878c6fd2807SJeff Garzik * 3879c6fd2807SJeff Garzik * LOCKING: 3880c6fd2807SJeff Garzik * None. 3881c6fd2807SJeff Garzik */ 3882fb7fd614STejun Heo void ata_eh_finish(struct ata_port *ap) 3883c6fd2807SJeff Garzik { 3884258c4e5cSJens Axboe struct ata_queued_cmd *qc; 3885c6fd2807SJeff Garzik int tag; 3886c6fd2807SJeff Garzik 3887c6fd2807SJeff Garzik /* retry or finish qcs */ 3888258c4e5cSJens Axboe ata_qc_for_each_raw(ap, qc, tag) { 388987629312SNiklas Cassel if (!(qc->flags & ATA_QCFLAG_EH)) 3890c6fd2807SJeff Garzik continue; 3891c6fd2807SJeff Garzik 3892c6fd2807SJeff Garzik if (qc->err_mask) { 3893c6fd2807SJeff Garzik /* FIXME: Once EH migration is complete, 3894c6fd2807SJeff Garzik * generate sense data in this function, 3895c6fd2807SJeff Garzik * considering both err_mask and tf. 3896c6fd2807SJeff Garzik */ 3897e4c26a1bSNiklas Cassel if (qc->flags & ATA_QCFLAG_RETRY) { 3898e4c26a1bSNiklas Cassel /* 3899e4c26a1bSNiklas Cassel * Since qc->err_mask is set, ata_eh_qc_retry() 3900e4c26a1bSNiklas Cassel * will not increment scmd->allowed, so upper 3901e4c26a1bSNiklas Cassel * layer will only retry the command if it has 3902e4c26a1bSNiklas Cassel * not already been retried too many times. 3903e4c26a1bSNiklas Cassel */ 3904c6fd2807SJeff Garzik ata_eh_qc_retry(qc); 3905e4c26a1bSNiklas Cassel } else { 390603faab78STejun Heo ata_eh_qc_complete(qc); 3907e4c26a1bSNiklas Cassel } 3908c6fd2807SJeff Garzik } else { 390918bd7718SNiklas Cassel if (qc->flags & ATA_QCFLAG_SENSE_VALID || 391018bd7718SNiklas Cassel qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD) { 3911c6fd2807SJeff Garzik ata_eh_qc_complete(qc); 3912c6fd2807SJeff Garzik } else { 3913c6fd2807SJeff Garzik /* feed zero TF to sense generation */ 3914c6fd2807SJeff Garzik memset(&qc->result_tf, 0, sizeof(qc->result_tf)); 3915e4c26a1bSNiklas Cassel /* 3916e4c26a1bSNiklas Cassel * Since qc->err_mask is not set, 3917e4c26a1bSNiklas Cassel * ata_eh_qc_retry() will increment 3918e4c26a1bSNiklas Cassel * scmd->allowed, so upper layer is guaranteed 3919e4c26a1bSNiklas Cassel * to retry the command. 3920e4c26a1bSNiklas Cassel */ 3921c6fd2807SJeff Garzik ata_eh_qc_retry(qc); 3922c6fd2807SJeff Garzik } 3923c6fd2807SJeff Garzik } 3924c6fd2807SJeff Garzik } 3925da917d69STejun Heo 3926da917d69STejun Heo /* make sure nr_active_links is zero after EH */ 3927da917d69STejun Heo WARN_ON(ap->nr_active_links); 3928da917d69STejun Heo ap->nr_active_links = 0; 3929c6fd2807SJeff Garzik } 3930c6fd2807SJeff Garzik 3931c6fd2807SJeff Garzik /** 3932c6fd2807SJeff Garzik * ata_do_eh - do standard error handling 3933c6fd2807SJeff Garzik * @ap: host port to handle error for 3934a1efdabaSTejun Heo * 3935c6fd2807SJeff Garzik * @prereset: prereset method (can be NULL) 3936c6fd2807SJeff Garzik * @softreset: softreset method (can be NULL) 3937c6fd2807SJeff Garzik * @hardreset: hardreset method (can be NULL) 3938c6fd2807SJeff Garzik * @postreset: postreset method (can be NULL) 3939c6fd2807SJeff Garzik * 3940c6fd2807SJeff Garzik * Perform standard error handling sequence. 3941c6fd2807SJeff Garzik * 3942c6fd2807SJeff Garzik * LOCKING: 3943c6fd2807SJeff Garzik * Kernel thread context (may sleep). 3944c6fd2807SJeff Garzik */ 3945c6fd2807SJeff Garzik void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, 3946c6fd2807SJeff Garzik ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 3947c6fd2807SJeff Garzik ata_postreset_fn_t postreset) 3948c6fd2807SJeff Garzik { 39499b1e2658STejun Heo struct ata_device *dev; 39509b1e2658STejun Heo int rc; 39519b1e2658STejun Heo 39529b1e2658STejun Heo ata_eh_autopsy(ap); 39539b1e2658STejun Heo ata_eh_report(ap); 39549b1e2658STejun Heo 39559b1e2658STejun Heo rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset, 39569b1e2658STejun Heo NULL); 39579b1e2658STejun Heo if (rc) { 39581eca4365STejun Heo ata_for_each_dev(dev, &ap->link, ALL) 39599b1e2658STejun Heo ata_dev_disable(dev); 39609b1e2658STejun Heo } 39619b1e2658STejun Heo 3962c6fd2807SJeff Garzik ata_eh_finish(ap); 3963c6fd2807SJeff Garzik } 3964c6fd2807SJeff Garzik 3965a1efdabaSTejun Heo /** 3966a1efdabaSTejun Heo * ata_std_error_handler - standard error handler 3967a1efdabaSTejun Heo * @ap: host port to handle error for 3968a1efdabaSTejun Heo * 3969a1efdabaSTejun Heo * Standard error handler 3970a1efdabaSTejun Heo * 3971a1efdabaSTejun Heo * LOCKING: 3972a1efdabaSTejun Heo * Kernel thread context (may sleep). 3973a1efdabaSTejun Heo */ 3974a1efdabaSTejun Heo void ata_std_error_handler(struct ata_port *ap) 3975a1efdabaSTejun Heo { 3976a1efdabaSTejun Heo struct ata_port_operations *ops = ap->ops; 3977a1efdabaSTejun Heo ata_reset_fn_t hardreset = ops->hardreset; 3978a1efdabaSTejun Heo 397957c9efdfSTejun Heo /* ignore built-in hardreset if SCR access is not available */ 3980fe06e5f9STejun Heo if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link)) 3981a1efdabaSTejun Heo hardreset = NULL; 3982a1efdabaSTejun Heo 3983a1efdabaSTejun Heo ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset); 3984a1efdabaSTejun Heo } 3985a52fbcfcSBartlomiej Zolnierkiewicz EXPORT_SYMBOL_GPL(ata_std_error_handler); 3986a1efdabaSTejun Heo 39876ffa01d8STejun Heo #ifdef CONFIG_PM 3988c6fd2807SJeff Garzik /** 3989c6fd2807SJeff Garzik * ata_eh_handle_port_suspend - perform port suspend operation 3990c6fd2807SJeff Garzik * @ap: port to suspend 3991c6fd2807SJeff Garzik * 3992c6fd2807SJeff Garzik * Suspend @ap. 3993c6fd2807SJeff Garzik * 3994c6fd2807SJeff Garzik * LOCKING: 3995c6fd2807SJeff Garzik * Kernel thread context (may sleep). 3996c6fd2807SJeff Garzik */ 3997c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap) 3998c6fd2807SJeff Garzik { 3999c6fd2807SJeff Garzik unsigned long flags; 4000c6fd2807SJeff Garzik int rc = 0; 40013dc67440SAaron Lu struct ata_device *dev; 4002c6fd2807SJeff Garzik 4003c6fd2807SJeff Garzik /* are we suspending? */ 4004c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 4005c6fd2807SJeff Garzik if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || 4006a7ff60dbSAaron Lu ap->pm_mesg.event & PM_EVENT_RESUME) { 4007c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 4008c6fd2807SJeff Garzik return; 4009c6fd2807SJeff Garzik } 4010c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 4011c6fd2807SJeff Garzik 4012c6fd2807SJeff Garzik WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED); 4013c6fd2807SJeff Garzik 40143dc67440SAaron Lu /* 40153dc67440SAaron Lu * If we have a ZPODD attached, check its zero 40163dc67440SAaron Lu * power ready status before the port is frozen. 4017a7ff60dbSAaron Lu * Only needed for runtime suspend. 40183dc67440SAaron Lu */ 4019a7ff60dbSAaron Lu if (PMSG_IS_AUTO(ap->pm_mesg)) { 40203dc67440SAaron Lu ata_for_each_dev(dev, &ap->link, ENABLED) { 40213dc67440SAaron Lu if (zpodd_dev_enabled(dev)) 40223dc67440SAaron Lu zpodd_on_suspend(dev); 40233dc67440SAaron Lu } 4024a7ff60dbSAaron Lu } 40253dc67440SAaron Lu 4026c6fd2807SJeff Garzik /* suspend */ 4027c6fd2807SJeff Garzik ata_eh_freeze_port(ap); 4028c6fd2807SJeff Garzik 4029c6fd2807SJeff Garzik if (ap->ops->port_suspend) 4030c6fd2807SJeff Garzik rc = ap->ops->port_suspend(ap, ap->pm_mesg); 4031c6fd2807SJeff Garzik 4032a7ff60dbSAaron Lu ata_acpi_set_state(ap, ap->pm_mesg); 40332a7b02eaSSergey Shtylyov 4034bc6e7c4bSDan Williams /* update the flags */ 4035c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 4036c6fd2807SJeff Garzik 4037c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_PM_PENDING; 4038c6fd2807SJeff Garzik if (rc == 0) 4039c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SUSPENDED; 40404cb7c6f1SNiklas Cassel else if (ata_port_is_frozen(ap)) 4041c6fd2807SJeff Garzik ata_port_schedule_eh(ap); 4042c6fd2807SJeff Garzik 4043c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 4044c6fd2807SJeff Garzik 4045c6fd2807SJeff Garzik return; 4046c6fd2807SJeff Garzik } 4047c6fd2807SJeff Garzik 4048c6fd2807SJeff Garzik /** 4049c6fd2807SJeff Garzik * ata_eh_handle_port_resume - perform port resume operation 4050c6fd2807SJeff Garzik * @ap: port to resume 4051c6fd2807SJeff Garzik * 4052c6fd2807SJeff Garzik * Resume @ap. 4053c6fd2807SJeff Garzik * 4054c6fd2807SJeff Garzik * LOCKING: 4055c6fd2807SJeff Garzik * Kernel thread context (may sleep). 4056c6fd2807SJeff Garzik */ 4057c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap) 4058c6fd2807SJeff Garzik { 40596f9c1ea2STejun Heo struct ata_link *link; 40606f9c1ea2STejun Heo struct ata_device *dev; 4061c6fd2807SJeff Garzik unsigned long flags; 4062c6fd2807SJeff Garzik 4063c6fd2807SJeff Garzik /* are we resuming? */ 4064c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 4065c6fd2807SJeff Garzik if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || 4066a7ff60dbSAaron Lu !(ap->pm_mesg.event & PM_EVENT_RESUME)) { 4067c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 4068c6fd2807SJeff Garzik return; 4069c6fd2807SJeff Garzik } 4070c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 4071c6fd2807SJeff Garzik 40729666f400STejun Heo WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED)); 4073c6fd2807SJeff Garzik 40746f9c1ea2STejun Heo /* 40756f9c1ea2STejun Heo * Error timestamps are in jiffies which doesn't run while 40766f9c1ea2STejun Heo * suspended and PHY events during resume isn't too uncommon. 40776f9c1ea2STejun Heo * When the two are combined, it can lead to unnecessary speed 40786f9c1ea2STejun Heo * downs if the machine is suspended and resumed repeatedly. 40796f9c1ea2STejun Heo * Clear error history. 40806f9c1ea2STejun Heo */ 40816f9c1ea2STejun Heo ata_for_each_link(link, ap, HOST_FIRST) 40826f9c1ea2STejun Heo ata_for_each_dev(dev, link, ALL) 40836f9c1ea2STejun Heo ata_ering_clear(&dev->ering); 40846f9c1ea2STejun Heo 4085a7ff60dbSAaron Lu ata_acpi_set_state(ap, ap->pm_mesg); 4086bd3adca5SShaohua Li 4087c6fd2807SJeff Garzik if (ap->ops->port_resume) 4088ae867937SKefeng Wang ap->ops->port_resume(ap); 4089c6fd2807SJeff Garzik 40906746544cSTejun Heo /* tell ACPI that we're resuming */ 40916746544cSTejun Heo ata_acpi_on_resume(ap); 40926746544cSTejun Heo 4093bc6e7c4bSDan Williams /* update the flags */ 4094c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 4095c6fd2807SJeff Garzik ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED); 4096c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 4097c6fd2807SJeff Garzik } 40986ffa01d8STejun Heo #endif /* CONFIG_PM */ 4099