1c6fd2807SJeff Garzik /* 2c6fd2807SJeff Garzik * libata-eh.c - libata error handling 3c6fd2807SJeff Garzik * 4c6fd2807SJeff Garzik * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5c6fd2807SJeff Garzik * Please ALWAYS copy linux-ide@vger.kernel.org 6c6fd2807SJeff Garzik * on emails. 7c6fd2807SJeff Garzik * 8c6fd2807SJeff Garzik * Copyright 2006 Tejun Heo <htejun@gmail.com> 9c6fd2807SJeff Garzik * 10c6fd2807SJeff Garzik * 11c6fd2807SJeff Garzik * This program is free software; you can redistribute it and/or 12c6fd2807SJeff Garzik * modify it under the terms of the GNU General Public License as 13c6fd2807SJeff Garzik * published by the Free Software Foundation; either version 2, or 14c6fd2807SJeff Garzik * (at your option) any later version. 15c6fd2807SJeff Garzik * 16c6fd2807SJeff Garzik * This program is distributed in the hope that it will be useful, 17c6fd2807SJeff Garzik * but WITHOUT ANY WARRANTY; without even the implied warranty of 18c6fd2807SJeff Garzik * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19c6fd2807SJeff Garzik * General Public License for more details. 20c6fd2807SJeff Garzik * 21c6fd2807SJeff Garzik * You should have received a copy of the GNU General Public License 22c6fd2807SJeff Garzik * along with this program; see the file COPYING. If not, write to 23c6fd2807SJeff Garzik * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, 24c6fd2807SJeff Garzik * USA. 25c6fd2807SJeff Garzik * 26c6fd2807SJeff Garzik * 27c6fd2807SJeff Garzik * libata documentation is available via 'make {ps|pdf}docs', 28c6fd2807SJeff Garzik * as Documentation/DocBook/libata.* 29c6fd2807SJeff Garzik * 30c6fd2807SJeff Garzik * Hardware documentation available from http://www.t13.org/ and 31c6fd2807SJeff Garzik * http://www.sata-io.org/ 32c6fd2807SJeff Garzik * 33c6fd2807SJeff Garzik */ 34c6fd2807SJeff Garzik 35c6fd2807SJeff Garzik #include <linux/kernel.h> 362855568bSJeff Garzik #include <linux/pci.h> 37c6fd2807SJeff Garzik #include <scsi/scsi.h> 38c6fd2807SJeff Garzik #include <scsi/scsi_host.h> 39c6fd2807SJeff Garzik #include <scsi/scsi_eh.h> 40c6fd2807SJeff Garzik #include <scsi/scsi_device.h> 41c6fd2807SJeff Garzik #include <scsi/scsi_cmnd.h> 42c6fd2807SJeff Garzik #include "../scsi/scsi_transport_api.h" 43c6fd2807SJeff Garzik 44c6fd2807SJeff Garzik #include <linux/libata.h> 45c6fd2807SJeff Garzik 46c6fd2807SJeff Garzik #include "libata.h" 47c6fd2807SJeff Garzik 487d47e8d4STejun Heo enum { 493884f7b0STejun Heo /* speed down verdicts */ 507d47e8d4STejun Heo ATA_EH_SPDN_NCQ_OFF = (1 << 0), 517d47e8d4STejun Heo ATA_EH_SPDN_SPEED_DOWN = (1 << 1), 527d47e8d4STejun Heo ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2), 5376326ac1STejun Heo ATA_EH_SPDN_KEEP_ERRORS = (1 << 3), 543884f7b0STejun Heo 553884f7b0STejun Heo /* error flags */ 563884f7b0STejun Heo ATA_EFLAG_IS_IO = (1 << 0), 5776326ac1STejun Heo ATA_EFLAG_DUBIOUS_XFER = (1 << 1), 583884f7b0STejun Heo 593884f7b0STejun Heo /* error categories */ 603884f7b0STejun Heo ATA_ECAT_NONE = 0, 613884f7b0STejun Heo ATA_ECAT_ATA_BUS = 1, 623884f7b0STejun Heo ATA_ECAT_TOUT_HSM = 2, 633884f7b0STejun Heo ATA_ECAT_UNK_DEV = 3, 6475f9cafcSTejun Heo ATA_ECAT_DUBIOUS_NONE = 4, 6575f9cafcSTejun Heo ATA_ECAT_DUBIOUS_ATA_BUS = 5, 6675f9cafcSTejun Heo ATA_ECAT_DUBIOUS_TOUT_HSM = 6, 6775f9cafcSTejun Heo ATA_ECAT_DUBIOUS_UNK_DEV = 7, 6875f9cafcSTejun Heo ATA_ECAT_NR = 8, 697d47e8d4STejun Heo 70*0a2c0f56STejun Heo /* always put at least this amount of time between resets */ 71*0a2c0f56STejun Heo ATA_EH_RESET_COOL_DOWN = 5000, 72*0a2c0f56STejun Heo 73341c2c95STejun Heo /* Waiting in ->prereset can never be reliable. It's 74341c2c95STejun Heo * sometimes nice to wait there but it can't be depended upon; 75341c2c95STejun Heo * otherwise, we wouldn't be resetting. Just give it enough 76341c2c95STejun Heo * time for most drives to spin up. 7731daabdaSTejun Heo */ 78341c2c95STejun Heo ATA_EH_PRERESET_TIMEOUT = 10000, 79341c2c95STejun Heo ATA_EH_FASTDRAIN_INTERVAL = 3000, 8031daabdaSTejun Heo }; 8131daabdaSTejun Heo 8231daabdaSTejun Heo /* The following table determines how we sequence resets. Each entry 8331daabdaSTejun Heo * represents timeout for that try. The first try can be soft or 8431daabdaSTejun Heo * hardreset. All others are hardreset if available. In most cases 8531daabdaSTejun Heo * the first reset w/ 10sec timeout should succeed. Following entries 8631daabdaSTejun Heo * are mostly for error handling, hotplug and retarded devices. 8731daabdaSTejun Heo */ 8831daabdaSTejun Heo static const unsigned long ata_eh_reset_timeouts[] = { 89341c2c95STejun Heo 10000, /* most drives spin up by 10sec */ 90341c2c95STejun Heo 10000, /* > 99% working drives spin up before 20sec */ 91341c2c95STejun Heo 35000, /* give > 30 secs of idleness for retarded devices */ 92341c2c95STejun Heo 5000, /* and sweet one last chance */ 9331daabdaSTejun Heo /* > 1 min has elapsed, give up */ 9431daabdaSTejun Heo }; 9531daabdaSTejun Heo 96c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap); 976ffa01d8STejun Heo #ifdef CONFIG_PM 98c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap); 99c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap); 1006ffa01d8STejun Heo #else /* CONFIG_PM */ 1016ffa01d8STejun Heo static void ata_eh_handle_port_suspend(struct ata_port *ap) 1026ffa01d8STejun Heo { } 1036ffa01d8STejun Heo 1046ffa01d8STejun Heo static void ata_eh_handle_port_resume(struct ata_port *ap) 1056ffa01d8STejun Heo { } 1066ffa01d8STejun Heo #endif /* CONFIG_PM */ 107c6fd2807SJeff Garzik 108b64bbc39STejun Heo static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt, 109b64bbc39STejun Heo va_list args) 110b64bbc39STejun Heo { 111b64bbc39STejun Heo ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len, 112b64bbc39STejun Heo ATA_EH_DESC_LEN - ehi->desc_len, 113b64bbc39STejun Heo fmt, args); 114b64bbc39STejun Heo } 115b64bbc39STejun Heo 116b64bbc39STejun Heo /** 117b64bbc39STejun Heo * __ata_ehi_push_desc - push error description without adding separator 118b64bbc39STejun Heo * @ehi: target EHI 119b64bbc39STejun Heo * @fmt: printf format string 120b64bbc39STejun Heo * 121b64bbc39STejun Heo * Format string according to @fmt and append it to @ehi->desc. 122b64bbc39STejun Heo * 123b64bbc39STejun Heo * LOCKING: 124b64bbc39STejun Heo * spin_lock_irqsave(host lock) 125b64bbc39STejun Heo */ 126b64bbc39STejun Heo void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) 127b64bbc39STejun Heo { 128b64bbc39STejun Heo va_list args; 129b64bbc39STejun Heo 130b64bbc39STejun Heo va_start(args, fmt); 131b64bbc39STejun Heo __ata_ehi_pushv_desc(ehi, fmt, args); 132b64bbc39STejun Heo va_end(args); 133b64bbc39STejun Heo } 134b64bbc39STejun Heo 135b64bbc39STejun Heo /** 136b64bbc39STejun Heo * ata_ehi_push_desc - push error description with separator 137b64bbc39STejun Heo * @ehi: target EHI 138b64bbc39STejun Heo * @fmt: printf format string 139b64bbc39STejun Heo * 140b64bbc39STejun Heo * Format string according to @fmt and append it to @ehi->desc. 141b64bbc39STejun Heo * If @ehi->desc is not empty, ", " is added in-between. 142b64bbc39STejun Heo * 143b64bbc39STejun Heo * LOCKING: 144b64bbc39STejun Heo * spin_lock_irqsave(host lock) 145b64bbc39STejun Heo */ 146b64bbc39STejun Heo void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) 147b64bbc39STejun Heo { 148b64bbc39STejun Heo va_list args; 149b64bbc39STejun Heo 150b64bbc39STejun Heo if (ehi->desc_len) 151b64bbc39STejun Heo __ata_ehi_push_desc(ehi, ", "); 152b64bbc39STejun Heo 153b64bbc39STejun Heo va_start(args, fmt); 154b64bbc39STejun Heo __ata_ehi_pushv_desc(ehi, fmt, args); 155b64bbc39STejun Heo va_end(args); 156b64bbc39STejun Heo } 157b64bbc39STejun Heo 158b64bbc39STejun Heo /** 159b64bbc39STejun Heo * ata_ehi_clear_desc - clean error description 160b64bbc39STejun Heo * @ehi: target EHI 161b64bbc39STejun Heo * 162b64bbc39STejun Heo * Clear @ehi->desc. 163b64bbc39STejun Heo * 164b64bbc39STejun Heo * LOCKING: 165b64bbc39STejun Heo * spin_lock_irqsave(host lock) 166b64bbc39STejun Heo */ 167b64bbc39STejun Heo void ata_ehi_clear_desc(struct ata_eh_info *ehi) 168b64bbc39STejun Heo { 169b64bbc39STejun Heo ehi->desc[0] = '\0'; 170b64bbc39STejun Heo ehi->desc_len = 0; 171b64bbc39STejun Heo } 172b64bbc39STejun Heo 173cbcdd875STejun Heo /** 174cbcdd875STejun Heo * ata_port_desc - append port description 175cbcdd875STejun Heo * @ap: target ATA port 176cbcdd875STejun Heo * @fmt: printf format string 177cbcdd875STejun Heo * 178cbcdd875STejun Heo * Format string according to @fmt and append it to port 179cbcdd875STejun Heo * description. If port description is not empty, " " is added 180cbcdd875STejun Heo * in-between. This function is to be used while initializing 181cbcdd875STejun Heo * ata_host. The description is printed on host registration. 182cbcdd875STejun Heo * 183cbcdd875STejun Heo * LOCKING: 184cbcdd875STejun Heo * None. 185cbcdd875STejun Heo */ 186cbcdd875STejun Heo void ata_port_desc(struct ata_port *ap, const char *fmt, ...) 187cbcdd875STejun Heo { 188cbcdd875STejun Heo va_list args; 189cbcdd875STejun Heo 190cbcdd875STejun Heo WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING)); 191cbcdd875STejun Heo 192cbcdd875STejun Heo if (ap->link.eh_info.desc_len) 193cbcdd875STejun Heo __ata_ehi_push_desc(&ap->link.eh_info, " "); 194cbcdd875STejun Heo 195cbcdd875STejun Heo va_start(args, fmt); 196cbcdd875STejun Heo __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args); 197cbcdd875STejun Heo va_end(args); 198cbcdd875STejun Heo } 199cbcdd875STejun Heo 200cbcdd875STejun Heo #ifdef CONFIG_PCI 201cbcdd875STejun Heo 202cbcdd875STejun Heo /** 203cbcdd875STejun Heo * ata_port_pbar_desc - append PCI BAR description 204cbcdd875STejun Heo * @ap: target ATA port 205cbcdd875STejun Heo * @bar: target PCI BAR 206cbcdd875STejun Heo * @offset: offset into PCI BAR 207cbcdd875STejun Heo * @name: name of the area 208cbcdd875STejun Heo * 209cbcdd875STejun Heo * If @offset is negative, this function formats a string which 210cbcdd875STejun Heo * contains the name, address, size and type of the BAR and 211cbcdd875STejun Heo * appends it to the port description. If @offset is zero or 212cbcdd875STejun Heo * positive, only name and offsetted address is appended. 213cbcdd875STejun Heo * 214cbcdd875STejun Heo * LOCKING: 215cbcdd875STejun Heo * None. 216cbcdd875STejun Heo */ 217cbcdd875STejun Heo void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset, 218cbcdd875STejun Heo const char *name) 219cbcdd875STejun Heo { 220cbcdd875STejun Heo struct pci_dev *pdev = to_pci_dev(ap->host->dev); 221cbcdd875STejun Heo char *type = ""; 222cbcdd875STejun Heo unsigned long long start, len; 223cbcdd875STejun Heo 224cbcdd875STejun Heo if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) 225cbcdd875STejun Heo type = "m"; 226cbcdd875STejun Heo else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) 227cbcdd875STejun Heo type = "i"; 228cbcdd875STejun Heo 229cbcdd875STejun Heo start = (unsigned long long)pci_resource_start(pdev, bar); 230cbcdd875STejun Heo len = (unsigned long long)pci_resource_len(pdev, bar); 231cbcdd875STejun Heo 232cbcdd875STejun Heo if (offset < 0) 233cbcdd875STejun Heo ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start); 234cbcdd875STejun Heo else 235e6a73ab1SAndrew Morton ata_port_desc(ap, "%s 0x%llx", name, 236e6a73ab1SAndrew Morton start + (unsigned long long)offset); 237cbcdd875STejun Heo } 238cbcdd875STejun Heo 239cbcdd875STejun Heo #endif /* CONFIG_PCI */ 240cbcdd875STejun Heo 2413884f7b0STejun Heo static void ata_ering_record(struct ata_ering *ering, unsigned int eflags, 242c6fd2807SJeff Garzik unsigned int err_mask) 243c6fd2807SJeff Garzik { 244c6fd2807SJeff Garzik struct ata_ering_entry *ent; 245c6fd2807SJeff Garzik 246c6fd2807SJeff Garzik WARN_ON(!err_mask); 247c6fd2807SJeff Garzik 248c6fd2807SJeff Garzik ering->cursor++; 249c6fd2807SJeff Garzik ering->cursor %= ATA_ERING_SIZE; 250c6fd2807SJeff Garzik 251c6fd2807SJeff Garzik ent = &ering->ring[ering->cursor]; 2523884f7b0STejun Heo ent->eflags = eflags; 253c6fd2807SJeff Garzik ent->err_mask = err_mask; 254c6fd2807SJeff Garzik ent->timestamp = get_jiffies_64(); 255c6fd2807SJeff Garzik } 256c6fd2807SJeff Garzik 25776326ac1STejun Heo static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering) 25876326ac1STejun Heo { 25976326ac1STejun Heo struct ata_ering_entry *ent = &ering->ring[ering->cursor]; 26076326ac1STejun Heo 26176326ac1STejun Heo if (ent->err_mask) 26276326ac1STejun Heo return ent; 26376326ac1STejun Heo return NULL; 26476326ac1STejun Heo } 26576326ac1STejun Heo 2667d47e8d4STejun Heo static void ata_ering_clear(struct ata_ering *ering) 267c6fd2807SJeff Garzik { 2687d47e8d4STejun Heo memset(ering, 0, sizeof(*ering)); 269c6fd2807SJeff Garzik } 270c6fd2807SJeff Garzik 271c6fd2807SJeff Garzik static int ata_ering_map(struct ata_ering *ering, 272c6fd2807SJeff Garzik int (*map_fn)(struct ata_ering_entry *, void *), 273c6fd2807SJeff Garzik void *arg) 274c6fd2807SJeff Garzik { 275c6fd2807SJeff Garzik int idx, rc = 0; 276c6fd2807SJeff Garzik struct ata_ering_entry *ent; 277c6fd2807SJeff Garzik 278c6fd2807SJeff Garzik idx = ering->cursor; 279c6fd2807SJeff Garzik do { 280c6fd2807SJeff Garzik ent = &ering->ring[idx]; 281c6fd2807SJeff Garzik if (!ent->err_mask) 282c6fd2807SJeff Garzik break; 283c6fd2807SJeff Garzik rc = map_fn(ent, arg); 284c6fd2807SJeff Garzik if (rc) 285c6fd2807SJeff Garzik break; 286c6fd2807SJeff Garzik idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE; 287c6fd2807SJeff Garzik } while (idx != ering->cursor); 288c6fd2807SJeff Garzik 289c6fd2807SJeff Garzik return rc; 290c6fd2807SJeff Garzik } 291c6fd2807SJeff Garzik 292c6fd2807SJeff Garzik static unsigned int ata_eh_dev_action(struct ata_device *dev) 293c6fd2807SJeff Garzik { 2949af5c9c9STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 295c6fd2807SJeff Garzik 296c6fd2807SJeff Garzik return ehc->i.action | ehc->i.dev_action[dev->devno]; 297c6fd2807SJeff Garzik } 298c6fd2807SJeff Garzik 299f58229f8STejun Heo static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev, 300c6fd2807SJeff Garzik struct ata_eh_info *ehi, unsigned int action) 301c6fd2807SJeff Garzik { 302f58229f8STejun Heo struct ata_device *tdev; 303c6fd2807SJeff Garzik 304c6fd2807SJeff Garzik if (!dev) { 305c6fd2807SJeff Garzik ehi->action &= ~action; 306f58229f8STejun Heo ata_link_for_each_dev(tdev, link) 307f58229f8STejun Heo ehi->dev_action[tdev->devno] &= ~action; 308c6fd2807SJeff Garzik } else { 309c6fd2807SJeff Garzik /* doesn't make sense for port-wide EH actions */ 310c6fd2807SJeff Garzik WARN_ON(!(action & ATA_EH_PERDEV_MASK)); 311c6fd2807SJeff Garzik 312c6fd2807SJeff Garzik /* break ehi->action into ehi->dev_action */ 313c6fd2807SJeff Garzik if (ehi->action & action) { 314f58229f8STejun Heo ata_link_for_each_dev(tdev, link) 315f58229f8STejun Heo ehi->dev_action[tdev->devno] |= 316f58229f8STejun Heo ehi->action & action; 317c6fd2807SJeff Garzik ehi->action &= ~action; 318c6fd2807SJeff Garzik } 319c6fd2807SJeff Garzik 320c6fd2807SJeff Garzik /* turn off the specified per-dev action */ 321c6fd2807SJeff Garzik ehi->dev_action[dev->devno] &= ~action; 322c6fd2807SJeff Garzik } 323c6fd2807SJeff Garzik } 324c6fd2807SJeff Garzik 325c6fd2807SJeff Garzik /** 326c6fd2807SJeff Garzik * ata_scsi_timed_out - SCSI layer time out callback 327c6fd2807SJeff Garzik * @cmd: timed out SCSI command 328c6fd2807SJeff Garzik * 329c6fd2807SJeff Garzik * Handles SCSI layer timeout. We race with normal completion of 330c6fd2807SJeff Garzik * the qc for @cmd. If the qc is already gone, we lose and let 331c6fd2807SJeff Garzik * the scsi command finish (EH_HANDLED). Otherwise, the qc has 332c6fd2807SJeff Garzik * timed out and EH should be invoked. Prevent ata_qc_complete() 333c6fd2807SJeff Garzik * from finishing it by setting EH_SCHEDULED and return 334c6fd2807SJeff Garzik * EH_NOT_HANDLED. 335c6fd2807SJeff Garzik * 336c6fd2807SJeff Garzik * TODO: kill this function once old EH is gone. 337c6fd2807SJeff Garzik * 338c6fd2807SJeff Garzik * LOCKING: 339c6fd2807SJeff Garzik * Called from timer context 340c6fd2807SJeff Garzik * 341c6fd2807SJeff Garzik * RETURNS: 342c6fd2807SJeff Garzik * EH_HANDLED or EH_NOT_HANDLED 343c6fd2807SJeff Garzik */ 344c6fd2807SJeff Garzik enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) 345c6fd2807SJeff Garzik { 346c6fd2807SJeff Garzik struct Scsi_Host *host = cmd->device->host; 347c6fd2807SJeff Garzik struct ata_port *ap = ata_shost_to_port(host); 348c6fd2807SJeff Garzik unsigned long flags; 349c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 350c6fd2807SJeff Garzik enum scsi_eh_timer_return ret; 351c6fd2807SJeff Garzik 352c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 353c6fd2807SJeff Garzik 354c6fd2807SJeff Garzik if (ap->ops->error_handler) { 355c6fd2807SJeff Garzik ret = EH_NOT_HANDLED; 356c6fd2807SJeff Garzik goto out; 357c6fd2807SJeff Garzik } 358c6fd2807SJeff Garzik 359c6fd2807SJeff Garzik ret = EH_HANDLED; 360c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 3619af5c9c9STejun Heo qc = ata_qc_from_tag(ap, ap->link.active_tag); 362c6fd2807SJeff Garzik if (qc) { 363c6fd2807SJeff Garzik WARN_ON(qc->scsicmd != cmd); 364c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_EH_SCHEDULED; 365c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_TIMEOUT; 366c6fd2807SJeff Garzik ret = EH_NOT_HANDLED; 367c6fd2807SJeff Garzik } 368c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 369c6fd2807SJeff Garzik 370c6fd2807SJeff Garzik out: 371c6fd2807SJeff Garzik DPRINTK("EXIT, ret=%d\n", ret); 372c6fd2807SJeff Garzik return ret; 373c6fd2807SJeff Garzik } 374c6fd2807SJeff Garzik 375c6fd2807SJeff Garzik /** 376c6fd2807SJeff Garzik * ata_scsi_error - SCSI layer error handler callback 377c6fd2807SJeff Garzik * @host: SCSI host on which error occurred 378c6fd2807SJeff Garzik * 379c6fd2807SJeff Garzik * Handles SCSI-layer-thrown error events. 380c6fd2807SJeff Garzik * 381c6fd2807SJeff Garzik * LOCKING: 382c6fd2807SJeff Garzik * Inherited from SCSI layer (none, can sleep) 383c6fd2807SJeff Garzik * 384c6fd2807SJeff Garzik * RETURNS: 385c6fd2807SJeff Garzik * Zero. 386c6fd2807SJeff Garzik */ 387c6fd2807SJeff Garzik void ata_scsi_error(struct Scsi_Host *host) 388c6fd2807SJeff Garzik { 389c6fd2807SJeff Garzik struct ata_port *ap = ata_shost_to_port(host); 390a1e10f7eSTejun Heo int i; 391c6fd2807SJeff Garzik unsigned long flags; 392c6fd2807SJeff Garzik 393c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 394c6fd2807SJeff Garzik 395c6fd2807SJeff Garzik /* synchronize with port task */ 396c6fd2807SJeff Garzik ata_port_flush_task(ap); 397c6fd2807SJeff Garzik 398cca3974eSJeff Garzik /* synchronize with host lock and sort out timeouts */ 399c6fd2807SJeff Garzik 400c6fd2807SJeff Garzik /* For new EH, all qcs are finished in one of three ways - 401c6fd2807SJeff Garzik * normal completion, error completion, and SCSI timeout. 402c6fd2807SJeff Garzik * Both cmpletions can race against SCSI timeout. When normal 403c6fd2807SJeff Garzik * completion wins, the qc never reaches EH. When error 404c6fd2807SJeff Garzik * completion wins, the qc has ATA_QCFLAG_FAILED set. 405c6fd2807SJeff Garzik * 406c6fd2807SJeff Garzik * When SCSI timeout wins, things are a bit more complex. 407c6fd2807SJeff Garzik * Normal or error completion can occur after the timeout but 408c6fd2807SJeff Garzik * before this point. In such cases, both types of 409c6fd2807SJeff Garzik * completions are honored. A scmd is determined to have 410c6fd2807SJeff Garzik * timed out iff its associated qc is active and not failed. 411c6fd2807SJeff Garzik */ 412c6fd2807SJeff Garzik if (ap->ops->error_handler) { 413c6fd2807SJeff Garzik struct scsi_cmnd *scmd, *tmp; 414c6fd2807SJeff Garzik int nr_timedout = 0; 415c6fd2807SJeff Garzik 416c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 417c6fd2807SJeff Garzik 418c6fd2807SJeff Garzik list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) { 419c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 420c6fd2807SJeff Garzik 421c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_QUEUE; i++) { 422c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, i); 423c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_ACTIVE && 424c6fd2807SJeff Garzik qc->scsicmd == scmd) 425c6fd2807SJeff Garzik break; 426c6fd2807SJeff Garzik } 427c6fd2807SJeff Garzik 428c6fd2807SJeff Garzik if (i < ATA_MAX_QUEUE) { 429c6fd2807SJeff Garzik /* the scmd has an associated qc */ 430c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) { 431c6fd2807SJeff Garzik /* which hasn't failed yet, timeout */ 432c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_TIMEOUT; 433c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 434c6fd2807SJeff Garzik nr_timedout++; 435c6fd2807SJeff Garzik } 436c6fd2807SJeff Garzik } else { 437c6fd2807SJeff Garzik /* Normal completion occurred after 438c6fd2807SJeff Garzik * SCSI timeout but before this point. 439c6fd2807SJeff Garzik * Successfully complete it. 440c6fd2807SJeff Garzik */ 441c6fd2807SJeff Garzik scmd->retries = scmd->allowed; 442c6fd2807SJeff Garzik scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 443c6fd2807SJeff Garzik } 444c6fd2807SJeff Garzik } 445c6fd2807SJeff Garzik 446c6fd2807SJeff Garzik /* If we have timed out qcs. They belong to EH from 447c6fd2807SJeff Garzik * this point but the state of the controller is 448c6fd2807SJeff Garzik * unknown. Freeze the port to make sure the IRQ 449c6fd2807SJeff Garzik * handler doesn't diddle with those qcs. This must 450c6fd2807SJeff Garzik * be done atomically w.r.t. setting QCFLAG_FAILED. 451c6fd2807SJeff Garzik */ 452c6fd2807SJeff Garzik if (nr_timedout) 453c6fd2807SJeff Garzik __ata_port_freeze(ap); 454c6fd2807SJeff Garzik 455c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 456a1e10f7eSTejun Heo 457a1e10f7eSTejun Heo /* initialize eh_tries */ 458a1e10f7eSTejun Heo ap->eh_tries = ATA_EH_MAX_TRIES; 459c6fd2807SJeff Garzik } else 460c6fd2807SJeff Garzik spin_unlock_wait(ap->lock); 461c6fd2807SJeff Garzik 462c6fd2807SJeff Garzik repeat: 463c6fd2807SJeff Garzik /* invoke error handler */ 464c6fd2807SJeff Garzik if (ap->ops->error_handler) { 465cf1b86c8STejun Heo struct ata_link *link; 466cf1b86c8STejun Heo 4675ddf24c5STejun Heo /* kill fast drain timer */ 4685ddf24c5STejun Heo del_timer_sync(&ap->fastdrain_timer); 4695ddf24c5STejun Heo 470c6fd2807SJeff Garzik /* process port resume request */ 471c6fd2807SJeff Garzik ata_eh_handle_port_resume(ap); 472c6fd2807SJeff Garzik 473c6fd2807SJeff Garzik /* fetch & clear EH info */ 474c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 475c6fd2807SJeff Garzik 476cf1b86c8STejun Heo __ata_port_for_each_link(link, ap) { 47700115e0fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 47800115e0fSTejun Heo struct ata_device *dev; 47900115e0fSTejun Heo 480cf1b86c8STejun Heo memset(&link->eh_context, 0, sizeof(link->eh_context)); 481cf1b86c8STejun Heo link->eh_context.i = link->eh_info; 482cf1b86c8STejun Heo memset(&link->eh_info, 0, sizeof(link->eh_info)); 48300115e0fSTejun Heo 48400115e0fSTejun Heo ata_link_for_each_dev(dev, link) { 48500115e0fSTejun Heo int devno = dev->devno; 48600115e0fSTejun Heo 48700115e0fSTejun Heo ehc->saved_xfer_mode[devno] = dev->xfer_mode; 48800115e0fSTejun Heo if (ata_ncq_enabled(dev)) 48900115e0fSTejun Heo ehc->saved_ncq_enabled |= 1 << devno; 49000115e0fSTejun Heo } 491*0a2c0f56STejun Heo 492*0a2c0f56STejun Heo /* set last reset timestamp to some time in the past */ 493*0a2c0f56STejun Heo ehc->last_reset = jiffies - 60 * HZ; 494cf1b86c8STejun Heo } 495c6fd2807SJeff Garzik 496c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS; 497c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_EH_PENDING; 498da917d69STejun Heo ap->excl_link = NULL; /* don't maintain exclusion over EH */ 499c6fd2807SJeff Garzik 500c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 501c6fd2807SJeff Garzik 502c6fd2807SJeff Garzik /* invoke EH, skip if unloading or suspended */ 503c6fd2807SJeff Garzik if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED))) 504c6fd2807SJeff Garzik ap->ops->error_handler(ap); 505c6fd2807SJeff Garzik else 506c6fd2807SJeff Garzik ata_eh_finish(ap); 507c6fd2807SJeff Garzik 508c6fd2807SJeff Garzik /* process port suspend request */ 509c6fd2807SJeff Garzik ata_eh_handle_port_suspend(ap); 510c6fd2807SJeff Garzik 511c6fd2807SJeff Garzik /* Exception might have happend after ->error_handler 512c6fd2807SJeff Garzik * recovered the port but before this point. Repeat 513c6fd2807SJeff Garzik * EH in such case. 514c6fd2807SJeff Garzik */ 515c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 516c6fd2807SJeff Garzik 517c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_EH_PENDING) { 518a1e10f7eSTejun Heo if (--ap->eh_tries) { 519c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 520c6fd2807SJeff Garzik goto repeat; 521c6fd2807SJeff Garzik } 522c6fd2807SJeff Garzik ata_port_printk(ap, KERN_ERR, "EH pending after %d " 523a1e10f7eSTejun Heo "tries, giving up\n", ATA_EH_MAX_TRIES); 524914616a3STejun Heo ap->pflags &= ~ATA_PFLAG_EH_PENDING; 525c6fd2807SJeff Garzik } 526c6fd2807SJeff Garzik 527c6fd2807SJeff Garzik /* this run is complete, make sure EH info is clear */ 528cf1b86c8STejun Heo __ata_port_for_each_link(link, ap) 529cf1b86c8STejun Heo memset(&link->eh_info, 0, sizeof(link->eh_info)); 530c6fd2807SJeff Garzik 531c6fd2807SJeff Garzik /* Clear host_eh_scheduled while holding ap->lock such 532c6fd2807SJeff Garzik * that if exception occurs after this point but 533c6fd2807SJeff Garzik * before EH completion, SCSI midlayer will 534c6fd2807SJeff Garzik * re-initiate EH. 535c6fd2807SJeff Garzik */ 536c6fd2807SJeff Garzik host->host_eh_scheduled = 0; 537c6fd2807SJeff Garzik 538c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 539c6fd2807SJeff Garzik } else { 5409af5c9c9STejun Heo WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL); 541c6fd2807SJeff Garzik ap->ops->eng_timeout(ap); 542c6fd2807SJeff Garzik } 543c6fd2807SJeff Garzik 544c6fd2807SJeff Garzik /* finish or retry handled scmd's and clean up */ 545c6fd2807SJeff Garzik WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q)); 546c6fd2807SJeff Garzik 547c6fd2807SJeff Garzik scsi_eh_flush_done_q(&ap->eh_done_q); 548c6fd2807SJeff Garzik 549c6fd2807SJeff Garzik /* clean up */ 550c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 551c6fd2807SJeff Garzik 552c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_LOADING) 553c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_LOADING; 554c6fd2807SJeff Garzik else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) 55552bad64dSDavid Howells queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 0); 556c6fd2807SJeff Garzik 557c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_RECOVERED) 558c6fd2807SJeff Garzik ata_port_printk(ap, KERN_INFO, "EH complete\n"); 559c6fd2807SJeff Garzik 560c6fd2807SJeff Garzik ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED); 561c6fd2807SJeff Garzik 562c6fd2807SJeff Garzik /* tell wait_eh that we're done */ 563c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS; 564c6fd2807SJeff Garzik wake_up_all(&ap->eh_wait_q); 565c6fd2807SJeff Garzik 566c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 567c6fd2807SJeff Garzik 568c6fd2807SJeff Garzik DPRINTK("EXIT\n"); 569c6fd2807SJeff Garzik } 570c6fd2807SJeff Garzik 571c6fd2807SJeff Garzik /** 572c6fd2807SJeff Garzik * ata_port_wait_eh - Wait for the currently pending EH to complete 573c6fd2807SJeff Garzik * @ap: Port to wait EH for 574c6fd2807SJeff Garzik * 575c6fd2807SJeff Garzik * Wait until the currently pending EH is complete. 576c6fd2807SJeff Garzik * 577c6fd2807SJeff Garzik * LOCKING: 578c6fd2807SJeff Garzik * Kernel thread context (may sleep). 579c6fd2807SJeff Garzik */ 580c6fd2807SJeff Garzik void ata_port_wait_eh(struct ata_port *ap) 581c6fd2807SJeff Garzik { 582c6fd2807SJeff Garzik unsigned long flags; 583c6fd2807SJeff Garzik DEFINE_WAIT(wait); 584c6fd2807SJeff Garzik 585c6fd2807SJeff Garzik retry: 586c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 587c6fd2807SJeff Garzik 588c6fd2807SJeff Garzik while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) { 589c6fd2807SJeff Garzik prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE); 590c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 591c6fd2807SJeff Garzik schedule(); 592c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 593c6fd2807SJeff Garzik } 594c6fd2807SJeff Garzik finish_wait(&ap->eh_wait_q, &wait); 595c6fd2807SJeff Garzik 596c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 597c6fd2807SJeff Garzik 598c6fd2807SJeff Garzik /* make sure SCSI EH is complete */ 599cca3974eSJeff Garzik if (scsi_host_in_recovery(ap->scsi_host)) { 600c6fd2807SJeff Garzik msleep(10); 601c6fd2807SJeff Garzik goto retry; 602c6fd2807SJeff Garzik } 603c6fd2807SJeff Garzik } 604c6fd2807SJeff Garzik 6055ddf24c5STejun Heo static int ata_eh_nr_in_flight(struct ata_port *ap) 6065ddf24c5STejun Heo { 6075ddf24c5STejun Heo unsigned int tag; 6085ddf24c5STejun Heo int nr = 0; 6095ddf24c5STejun Heo 6105ddf24c5STejun Heo /* count only non-internal commands */ 6115ddf24c5STejun Heo for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) 6125ddf24c5STejun Heo if (ata_qc_from_tag(ap, tag)) 6135ddf24c5STejun Heo nr++; 6145ddf24c5STejun Heo 6155ddf24c5STejun Heo return nr; 6165ddf24c5STejun Heo } 6175ddf24c5STejun Heo 6185ddf24c5STejun Heo void ata_eh_fastdrain_timerfn(unsigned long arg) 6195ddf24c5STejun Heo { 6205ddf24c5STejun Heo struct ata_port *ap = (void *)arg; 6215ddf24c5STejun Heo unsigned long flags; 6225ddf24c5STejun Heo int cnt; 6235ddf24c5STejun Heo 6245ddf24c5STejun Heo spin_lock_irqsave(ap->lock, flags); 6255ddf24c5STejun Heo 6265ddf24c5STejun Heo cnt = ata_eh_nr_in_flight(ap); 6275ddf24c5STejun Heo 6285ddf24c5STejun Heo /* are we done? */ 6295ddf24c5STejun Heo if (!cnt) 6305ddf24c5STejun Heo goto out_unlock; 6315ddf24c5STejun Heo 6325ddf24c5STejun Heo if (cnt == ap->fastdrain_cnt) { 6335ddf24c5STejun Heo unsigned int tag; 6345ddf24c5STejun Heo 6355ddf24c5STejun Heo /* No progress during the last interval, tag all 6365ddf24c5STejun Heo * in-flight qcs as timed out and freeze the port. 6375ddf24c5STejun Heo */ 6385ddf24c5STejun Heo for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) { 6395ddf24c5STejun Heo struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); 6405ddf24c5STejun Heo if (qc) 6415ddf24c5STejun Heo qc->err_mask |= AC_ERR_TIMEOUT; 6425ddf24c5STejun Heo } 6435ddf24c5STejun Heo 6445ddf24c5STejun Heo ata_port_freeze(ap); 6455ddf24c5STejun Heo } else { 6465ddf24c5STejun Heo /* some qcs have finished, give it another chance */ 6475ddf24c5STejun Heo ap->fastdrain_cnt = cnt; 6485ddf24c5STejun Heo ap->fastdrain_timer.expires = 649341c2c95STejun Heo ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); 6505ddf24c5STejun Heo add_timer(&ap->fastdrain_timer); 6515ddf24c5STejun Heo } 6525ddf24c5STejun Heo 6535ddf24c5STejun Heo out_unlock: 6545ddf24c5STejun Heo spin_unlock_irqrestore(ap->lock, flags); 6555ddf24c5STejun Heo } 6565ddf24c5STejun Heo 6575ddf24c5STejun Heo /** 6585ddf24c5STejun Heo * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain 6595ddf24c5STejun Heo * @ap: target ATA port 6605ddf24c5STejun Heo * @fastdrain: activate fast drain 6615ddf24c5STejun Heo * 6625ddf24c5STejun Heo * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain 6635ddf24c5STejun Heo * is non-zero and EH wasn't pending before. Fast drain ensures 6645ddf24c5STejun Heo * that EH kicks in in timely manner. 6655ddf24c5STejun Heo * 6665ddf24c5STejun Heo * LOCKING: 6675ddf24c5STejun Heo * spin_lock_irqsave(host lock) 6685ddf24c5STejun Heo */ 6695ddf24c5STejun Heo static void ata_eh_set_pending(struct ata_port *ap, int fastdrain) 6705ddf24c5STejun Heo { 6715ddf24c5STejun Heo int cnt; 6725ddf24c5STejun Heo 6735ddf24c5STejun Heo /* already scheduled? */ 6745ddf24c5STejun Heo if (ap->pflags & ATA_PFLAG_EH_PENDING) 6755ddf24c5STejun Heo return; 6765ddf24c5STejun Heo 6775ddf24c5STejun Heo ap->pflags |= ATA_PFLAG_EH_PENDING; 6785ddf24c5STejun Heo 6795ddf24c5STejun Heo if (!fastdrain) 6805ddf24c5STejun Heo return; 6815ddf24c5STejun Heo 6825ddf24c5STejun Heo /* do we have in-flight qcs? */ 6835ddf24c5STejun Heo cnt = ata_eh_nr_in_flight(ap); 6845ddf24c5STejun Heo if (!cnt) 6855ddf24c5STejun Heo return; 6865ddf24c5STejun Heo 6875ddf24c5STejun Heo /* activate fast drain */ 6885ddf24c5STejun Heo ap->fastdrain_cnt = cnt; 689341c2c95STejun Heo ap->fastdrain_timer.expires = 690341c2c95STejun Heo ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); 6915ddf24c5STejun Heo add_timer(&ap->fastdrain_timer); 6925ddf24c5STejun Heo } 6935ddf24c5STejun Heo 694c6fd2807SJeff Garzik /** 695c6fd2807SJeff Garzik * ata_qc_schedule_eh - schedule qc for error handling 696c6fd2807SJeff Garzik * @qc: command to schedule error handling for 697c6fd2807SJeff Garzik * 698c6fd2807SJeff Garzik * Schedule error handling for @qc. EH will kick in as soon as 699c6fd2807SJeff Garzik * other commands are drained. 700c6fd2807SJeff Garzik * 701c6fd2807SJeff Garzik * LOCKING: 702cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 703c6fd2807SJeff Garzik */ 704c6fd2807SJeff Garzik void ata_qc_schedule_eh(struct ata_queued_cmd *qc) 705c6fd2807SJeff Garzik { 706c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 707c6fd2807SJeff Garzik 708c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 709c6fd2807SJeff Garzik 710c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 7115ddf24c5STejun Heo ata_eh_set_pending(ap, 1); 712c6fd2807SJeff Garzik 713c6fd2807SJeff Garzik /* The following will fail if timeout has already expired. 714c6fd2807SJeff Garzik * ata_scsi_error() takes care of such scmds on EH entry. 715c6fd2807SJeff Garzik * Note that ATA_QCFLAG_FAILED is unconditionally set after 716c6fd2807SJeff Garzik * this function completes. 717c6fd2807SJeff Garzik */ 718c6fd2807SJeff Garzik scsi_req_abort_cmd(qc->scsicmd); 719c6fd2807SJeff Garzik } 720c6fd2807SJeff Garzik 721c6fd2807SJeff Garzik /** 722c6fd2807SJeff Garzik * ata_port_schedule_eh - schedule error handling without a qc 723c6fd2807SJeff Garzik * @ap: ATA port to schedule EH for 724c6fd2807SJeff Garzik * 725c6fd2807SJeff Garzik * Schedule error handling for @ap. EH will kick in as soon as 726c6fd2807SJeff Garzik * all commands are drained. 727c6fd2807SJeff Garzik * 728c6fd2807SJeff Garzik * LOCKING: 729cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 730c6fd2807SJeff Garzik */ 731c6fd2807SJeff Garzik void ata_port_schedule_eh(struct ata_port *ap) 732c6fd2807SJeff Garzik { 733c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 734c6fd2807SJeff Garzik 735f4d6d004STejun Heo if (ap->pflags & ATA_PFLAG_INITIALIZING) 736f4d6d004STejun Heo return; 737f4d6d004STejun Heo 7385ddf24c5STejun Heo ata_eh_set_pending(ap, 1); 739cca3974eSJeff Garzik scsi_schedule_eh(ap->scsi_host); 740c6fd2807SJeff Garzik 741c6fd2807SJeff Garzik DPRINTK("port EH scheduled\n"); 742c6fd2807SJeff Garzik } 743c6fd2807SJeff Garzik 744dbd82616STejun Heo static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link) 745c6fd2807SJeff Garzik { 746c6fd2807SJeff Garzik int tag, nr_aborted = 0; 747c6fd2807SJeff Garzik 748c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 749c6fd2807SJeff Garzik 7505ddf24c5STejun Heo /* we're gonna abort all commands, no need for fast drain */ 7515ddf24c5STejun Heo ata_eh_set_pending(ap, 0); 7525ddf24c5STejun Heo 753c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 754c6fd2807SJeff Garzik struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); 755c6fd2807SJeff Garzik 756dbd82616STejun Heo if (qc && (!link || qc->dev->link == link)) { 757c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 758c6fd2807SJeff Garzik ata_qc_complete(qc); 759c6fd2807SJeff Garzik nr_aborted++; 760c6fd2807SJeff Garzik } 761c6fd2807SJeff Garzik } 762c6fd2807SJeff Garzik 763c6fd2807SJeff Garzik if (!nr_aborted) 764c6fd2807SJeff Garzik ata_port_schedule_eh(ap); 765c6fd2807SJeff Garzik 766c6fd2807SJeff Garzik return nr_aborted; 767c6fd2807SJeff Garzik } 768c6fd2807SJeff Garzik 769c6fd2807SJeff Garzik /** 770dbd82616STejun Heo * ata_link_abort - abort all qc's on the link 771dbd82616STejun Heo * @link: ATA link to abort qc's for 772dbd82616STejun Heo * 773dbd82616STejun Heo * Abort all active qc's active on @link and schedule EH. 774dbd82616STejun Heo * 775dbd82616STejun Heo * LOCKING: 776dbd82616STejun Heo * spin_lock_irqsave(host lock) 777dbd82616STejun Heo * 778dbd82616STejun Heo * RETURNS: 779dbd82616STejun Heo * Number of aborted qc's. 780dbd82616STejun Heo */ 781dbd82616STejun Heo int ata_link_abort(struct ata_link *link) 782dbd82616STejun Heo { 783dbd82616STejun Heo return ata_do_link_abort(link->ap, link); 784dbd82616STejun Heo } 785dbd82616STejun Heo 786dbd82616STejun Heo /** 787dbd82616STejun Heo * ata_port_abort - abort all qc's on the port 788dbd82616STejun Heo * @ap: ATA port to abort qc's for 789dbd82616STejun Heo * 790dbd82616STejun Heo * Abort all active qc's of @ap and schedule EH. 791dbd82616STejun Heo * 792dbd82616STejun Heo * LOCKING: 793dbd82616STejun Heo * spin_lock_irqsave(host_set lock) 794dbd82616STejun Heo * 795dbd82616STejun Heo * RETURNS: 796dbd82616STejun Heo * Number of aborted qc's. 797dbd82616STejun Heo */ 798dbd82616STejun Heo int ata_port_abort(struct ata_port *ap) 799dbd82616STejun Heo { 800dbd82616STejun Heo return ata_do_link_abort(ap, NULL); 801dbd82616STejun Heo } 802dbd82616STejun Heo 803dbd82616STejun Heo /** 804c6fd2807SJeff Garzik * __ata_port_freeze - freeze port 805c6fd2807SJeff Garzik * @ap: ATA port to freeze 806c6fd2807SJeff Garzik * 807c6fd2807SJeff Garzik * This function is called when HSM violation or some other 808c6fd2807SJeff Garzik * condition disrupts normal operation of the port. Frozen port 809c6fd2807SJeff Garzik * is not allowed to perform any operation until the port is 810c6fd2807SJeff Garzik * thawed, which usually follows a successful reset. 811c6fd2807SJeff Garzik * 812c6fd2807SJeff Garzik * ap->ops->freeze() callback can be used for freezing the port 813c6fd2807SJeff Garzik * hardware-wise (e.g. mask interrupt and stop DMA engine). If a 814c6fd2807SJeff Garzik * port cannot be frozen hardware-wise, the interrupt handler 815c6fd2807SJeff Garzik * must ack and clear interrupts unconditionally while the port 816c6fd2807SJeff Garzik * is frozen. 817c6fd2807SJeff Garzik * 818c6fd2807SJeff Garzik * LOCKING: 819cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 820c6fd2807SJeff Garzik */ 821c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap) 822c6fd2807SJeff Garzik { 823c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 824c6fd2807SJeff Garzik 825c6fd2807SJeff Garzik if (ap->ops->freeze) 826c6fd2807SJeff Garzik ap->ops->freeze(ap); 827c6fd2807SJeff Garzik 828c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_FROZEN; 829c6fd2807SJeff Garzik 83044877b4eSTejun Heo DPRINTK("ata%u port frozen\n", ap->print_id); 831c6fd2807SJeff Garzik } 832c6fd2807SJeff Garzik 833c6fd2807SJeff Garzik /** 834c6fd2807SJeff Garzik * ata_port_freeze - abort & freeze port 835c6fd2807SJeff Garzik * @ap: ATA port to freeze 836c6fd2807SJeff Garzik * 837c6fd2807SJeff Garzik * Abort and freeze @ap. 838c6fd2807SJeff Garzik * 839c6fd2807SJeff Garzik * LOCKING: 840cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 841c6fd2807SJeff Garzik * 842c6fd2807SJeff Garzik * RETURNS: 843c6fd2807SJeff Garzik * Number of aborted commands. 844c6fd2807SJeff Garzik */ 845c6fd2807SJeff Garzik int ata_port_freeze(struct ata_port *ap) 846c6fd2807SJeff Garzik { 847c6fd2807SJeff Garzik int nr_aborted; 848c6fd2807SJeff Garzik 849c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 850c6fd2807SJeff Garzik 851c6fd2807SJeff Garzik nr_aborted = ata_port_abort(ap); 852c6fd2807SJeff Garzik __ata_port_freeze(ap); 853c6fd2807SJeff Garzik 854c6fd2807SJeff Garzik return nr_aborted; 855c6fd2807SJeff Garzik } 856c6fd2807SJeff Garzik 857c6fd2807SJeff Garzik /** 8587d77b247STejun Heo * sata_async_notification - SATA async notification handler 8597d77b247STejun Heo * @ap: ATA port where async notification is received 8607d77b247STejun Heo * 8617d77b247STejun Heo * Handler to be called when async notification via SDB FIS is 8627d77b247STejun Heo * received. This function schedules EH if necessary. 8637d77b247STejun Heo * 8647d77b247STejun Heo * LOCKING: 8657d77b247STejun Heo * spin_lock_irqsave(host lock) 8667d77b247STejun Heo * 8677d77b247STejun Heo * RETURNS: 8687d77b247STejun Heo * 1 if EH is scheduled, 0 otherwise. 8697d77b247STejun Heo */ 8707d77b247STejun Heo int sata_async_notification(struct ata_port *ap) 8717d77b247STejun Heo { 8727d77b247STejun Heo u32 sntf; 8737d77b247STejun Heo int rc; 8747d77b247STejun Heo 8757d77b247STejun Heo if (!(ap->flags & ATA_FLAG_AN)) 8767d77b247STejun Heo return 0; 8777d77b247STejun Heo 8787d77b247STejun Heo rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf); 8797d77b247STejun Heo if (rc == 0) 8807d77b247STejun Heo sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf); 8817d77b247STejun Heo 882071f44b1STejun Heo if (!sata_pmp_attached(ap) || rc) { 8837d77b247STejun Heo /* PMP is not attached or SNTF is not available */ 884071f44b1STejun Heo if (!sata_pmp_attached(ap)) { 8857d77b247STejun Heo /* PMP is not attached. Check whether ATAPI 8867d77b247STejun Heo * AN is configured. If so, notify media 8877d77b247STejun Heo * change. 8887d77b247STejun Heo */ 8897d77b247STejun Heo struct ata_device *dev = ap->link.device; 8907d77b247STejun Heo 8917d77b247STejun Heo if ((dev->class == ATA_DEV_ATAPI) && 8927d77b247STejun Heo (dev->flags & ATA_DFLAG_AN)) 8937d77b247STejun Heo ata_scsi_media_change_notify(dev); 8947d77b247STejun Heo return 0; 8957d77b247STejun Heo } else { 8967d77b247STejun Heo /* PMP is attached but SNTF is not available. 8977d77b247STejun Heo * ATAPI async media change notification is 8987d77b247STejun Heo * not used. The PMP must be reporting PHY 8997d77b247STejun Heo * status change, schedule EH. 9007d77b247STejun Heo */ 9017d77b247STejun Heo ata_port_schedule_eh(ap); 9027d77b247STejun Heo return 1; 9037d77b247STejun Heo } 9047d77b247STejun Heo } else { 9057d77b247STejun Heo /* PMP is attached and SNTF is available */ 9067d77b247STejun Heo struct ata_link *link; 9077d77b247STejun Heo 9087d77b247STejun Heo /* check and notify ATAPI AN */ 9097d77b247STejun Heo ata_port_for_each_link(link, ap) { 9107d77b247STejun Heo if (!(sntf & (1 << link->pmp))) 9117d77b247STejun Heo continue; 9127d77b247STejun Heo 9137d77b247STejun Heo if ((link->device->class == ATA_DEV_ATAPI) && 9147d77b247STejun Heo (link->device->flags & ATA_DFLAG_AN)) 9157d77b247STejun Heo ata_scsi_media_change_notify(link->device); 9167d77b247STejun Heo } 9177d77b247STejun Heo 9187d77b247STejun Heo /* If PMP is reporting that PHY status of some 9197d77b247STejun Heo * downstream ports has changed, schedule EH. 9207d77b247STejun Heo */ 9217d77b247STejun Heo if (sntf & (1 << SATA_PMP_CTRL_PORT)) { 9227d77b247STejun Heo ata_port_schedule_eh(ap); 9237d77b247STejun Heo return 1; 9247d77b247STejun Heo } 9257d77b247STejun Heo 9267d77b247STejun Heo return 0; 9277d77b247STejun Heo } 9287d77b247STejun Heo } 9297d77b247STejun Heo 9307d77b247STejun Heo /** 931c6fd2807SJeff Garzik * ata_eh_freeze_port - EH helper to freeze port 932c6fd2807SJeff Garzik * @ap: ATA port to freeze 933c6fd2807SJeff Garzik * 934c6fd2807SJeff Garzik * Freeze @ap. 935c6fd2807SJeff Garzik * 936c6fd2807SJeff Garzik * LOCKING: 937c6fd2807SJeff Garzik * None. 938c6fd2807SJeff Garzik */ 939c6fd2807SJeff Garzik void ata_eh_freeze_port(struct ata_port *ap) 940c6fd2807SJeff Garzik { 941c6fd2807SJeff Garzik unsigned long flags; 942c6fd2807SJeff Garzik 943c6fd2807SJeff Garzik if (!ap->ops->error_handler) 944c6fd2807SJeff Garzik return; 945c6fd2807SJeff Garzik 946c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 947c6fd2807SJeff Garzik __ata_port_freeze(ap); 948c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 949c6fd2807SJeff Garzik } 950c6fd2807SJeff Garzik 951c6fd2807SJeff Garzik /** 952c6fd2807SJeff Garzik * ata_port_thaw_port - EH helper to thaw port 953c6fd2807SJeff Garzik * @ap: ATA port to thaw 954c6fd2807SJeff Garzik * 955c6fd2807SJeff Garzik * Thaw frozen port @ap. 956c6fd2807SJeff Garzik * 957c6fd2807SJeff Garzik * LOCKING: 958c6fd2807SJeff Garzik * None. 959c6fd2807SJeff Garzik */ 960c6fd2807SJeff Garzik void ata_eh_thaw_port(struct ata_port *ap) 961c6fd2807SJeff Garzik { 962c6fd2807SJeff Garzik unsigned long flags; 963c6fd2807SJeff Garzik 964c6fd2807SJeff Garzik if (!ap->ops->error_handler) 965c6fd2807SJeff Garzik return; 966c6fd2807SJeff Garzik 967c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 968c6fd2807SJeff Garzik 969c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_FROZEN; 970c6fd2807SJeff Garzik 971c6fd2807SJeff Garzik if (ap->ops->thaw) 972c6fd2807SJeff Garzik ap->ops->thaw(ap); 973c6fd2807SJeff Garzik 974c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 975c6fd2807SJeff Garzik 97644877b4eSTejun Heo DPRINTK("ata%u port thawed\n", ap->print_id); 977c6fd2807SJeff Garzik } 978c6fd2807SJeff Garzik 979c6fd2807SJeff Garzik static void ata_eh_scsidone(struct scsi_cmnd *scmd) 980c6fd2807SJeff Garzik { 981c6fd2807SJeff Garzik /* nada */ 982c6fd2807SJeff Garzik } 983c6fd2807SJeff Garzik 984c6fd2807SJeff Garzik static void __ata_eh_qc_complete(struct ata_queued_cmd *qc) 985c6fd2807SJeff Garzik { 986c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 987c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 988c6fd2807SJeff Garzik unsigned long flags; 989c6fd2807SJeff Garzik 990c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 991c6fd2807SJeff Garzik qc->scsidone = ata_eh_scsidone; 992c6fd2807SJeff Garzik __ata_qc_complete(qc); 993c6fd2807SJeff Garzik WARN_ON(ata_tag_valid(qc->tag)); 994c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 995c6fd2807SJeff Garzik 996c6fd2807SJeff Garzik scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 997c6fd2807SJeff Garzik } 998c6fd2807SJeff Garzik 999c6fd2807SJeff Garzik /** 1000c6fd2807SJeff Garzik * ata_eh_qc_complete - Complete an active ATA command from EH 1001c6fd2807SJeff Garzik * @qc: Command to complete 1002c6fd2807SJeff Garzik * 1003c6fd2807SJeff Garzik * Indicate to the mid and upper layers that an ATA command has 1004c6fd2807SJeff Garzik * completed. To be used from EH. 1005c6fd2807SJeff Garzik */ 1006c6fd2807SJeff Garzik void ata_eh_qc_complete(struct ata_queued_cmd *qc) 1007c6fd2807SJeff Garzik { 1008c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 1009c6fd2807SJeff Garzik scmd->retries = scmd->allowed; 1010c6fd2807SJeff Garzik __ata_eh_qc_complete(qc); 1011c6fd2807SJeff Garzik } 1012c6fd2807SJeff Garzik 1013c6fd2807SJeff Garzik /** 1014c6fd2807SJeff Garzik * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH 1015c6fd2807SJeff Garzik * @qc: Command to retry 1016c6fd2807SJeff Garzik * 1017c6fd2807SJeff Garzik * Indicate to the mid and upper layers that an ATA command 1018c6fd2807SJeff Garzik * should be retried. To be used from EH. 1019c6fd2807SJeff Garzik * 1020c6fd2807SJeff Garzik * SCSI midlayer limits the number of retries to scmd->allowed. 1021c6fd2807SJeff Garzik * scmd->retries is decremented for commands which get retried 1022c6fd2807SJeff Garzik * due to unrelated failures (qc->err_mask is zero). 1023c6fd2807SJeff Garzik */ 1024c6fd2807SJeff Garzik void ata_eh_qc_retry(struct ata_queued_cmd *qc) 1025c6fd2807SJeff Garzik { 1026c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 1027c6fd2807SJeff Garzik if (!qc->err_mask && scmd->retries) 1028c6fd2807SJeff Garzik scmd->retries--; 1029c6fd2807SJeff Garzik __ata_eh_qc_complete(qc); 1030c6fd2807SJeff Garzik } 1031c6fd2807SJeff Garzik 1032c6fd2807SJeff Garzik /** 1033c6fd2807SJeff Garzik * ata_eh_detach_dev - detach ATA device 1034c6fd2807SJeff Garzik * @dev: ATA device to detach 1035c6fd2807SJeff Garzik * 1036c6fd2807SJeff Garzik * Detach @dev. 1037c6fd2807SJeff Garzik * 1038c6fd2807SJeff Garzik * LOCKING: 1039c6fd2807SJeff Garzik * None. 1040c6fd2807SJeff Garzik */ 1041fb7fd614STejun Heo void ata_eh_detach_dev(struct ata_device *dev) 1042c6fd2807SJeff Garzik { 1043f58229f8STejun Heo struct ata_link *link = dev->link; 1044f58229f8STejun Heo struct ata_port *ap = link->ap; 1045c6fd2807SJeff Garzik unsigned long flags; 1046c6fd2807SJeff Garzik 1047c6fd2807SJeff Garzik ata_dev_disable(dev); 1048c6fd2807SJeff Garzik 1049c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1050c6fd2807SJeff Garzik 1051c6fd2807SJeff Garzik dev->flags &= ~ATA_DFLAG_DETACH; 1052c6fd2807SJeff Garzik 1053c6fd2807SJeff Garzik if (ata_scsi_offline_dev(dev)) { 1054c6fd2807SJeff Garzik dev->flags |= ATA_DFLAG_DETACHED; 1055c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 1056c6fd2807SJeff Garzik } 1057c6fd2807SJeff Garzik 1058c6fd2807SJeff Garzik /* clear per-dev EH actions */ 1059f58229f8STejun Heo ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK); 1060f58229f8STejun Heo ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK); 1061c6fd2807SJeff Garzik 1062c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1063c6fd2807SJeff Garzik } 1064c6fd2807SJeff Garzik 1065c6fd2807SJeff Garzik /** 1066c6fd2807SJeff Garzik * ata_eh_about_to_do - about to perform eh_action 1067955e57dfSTejun Heo * @link: target ATA link 1068c6fd2807SJeff Garzik * @dev: target ATA dev for per-dev action (can be NULL) 1069c6fd2807SJeff Garzik * @action: action about to be performed 1070c6fd2807SJeff Garzik * 1071c6fd2807SJeff Garzik * Called just before performing EH actions to clear related bits 1072955e57dfSTejun Heo * in @link->eh_info such that eh actions are not unnecessarily 1073955e57dfSTejun Heo * repeated. 1074c6fd2807SJeff Garzik * 1075c6fd2807SJeff Garzik * LOCKING: 1076c6fd2807SJeff Garzik * None. 1077c6fd2807SJeff Garzik */ 1078fb7fd614STejun Heo void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev, 1079c6fd2807SJeff Garzik unsigned int action) 1080c6fd2807SJeff Garzik { 1081955e57dfSTejun Heo struct ata_port *ap = link->ap; 1082955e57dfSTejun Heo struct ata_eh_info *ehi = &link->eh_info; 1083955e57dfSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 1084c6fd2807SJeff Garzik unsigned long flags; 1085c6fd2807SJeff Garzik 1086c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1087c6fd2807SJeff Garzik 1088955e57dfSTejun Heo ata_eh_clear_action(link, dev, ehi, action); 1089c6fd2807SJeff Garzik 1090c6fd2807SJeff Garzik if (!(ehc->i.flags & ATA_EHI_QUIET)) 1091c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_RECOVERED; 1092c6fd2807SJeff Garzik 1093c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1094c6fd2807SJeff Garzik } 1095c6fd2807SJeff Garzik 1096c6fd2807SJeff Garzik /** 1097c6fd2807SJeff Garzik * ata_eh_done - EH action complete 1098c6fd2807SJeff Garzik * @ap: target ATA port 1099c6fd2807SJeff Garzik * @dev: target ATA dev for per-dev action (can be NULL) 1100c6fd2807SJeff Garzik * @action: action just completed 1101c6fd2807SJeff Garzik * 1102c6fd2807SJeff Garzik * Called right after performing EH actions to clear related bits 1103955e57dfSTejun Heo * in @link->eh_context. 1104c6fd2807SJeff Garzik * 1105c6fd2807SJeff Garzik * LOCKING: 1106c6fd2807SJeff Garzik * None. 1107c6fd2807SJeff Garzik */ 1108fb7fd614STejun Heo void ata_eh_done(struct ata_link *link, struct ata_device *dev, 1109c6fd2807SJeff Garzik unsigned int action) 1110c6fd2807SJeff Garzik { 1111955e57dfSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 11129af5c9c9STejun Heo 1113955e57dfSTejun Heo ata_eh_clear_action(link, dev, &ehc->i, action); 1114c6fd2807SJeff Garzik } 1115c6fd2807SJeff Garzik 1116c6fd2807SJeff Garzik /** 1117c6fd2807SJeff Garzik * ata_err_string - convert err_mask to descriptive string 1118c6fd2807SJeff Garzik * @err_mask: error mask to convert to string 1119c6fd2807SJeff Garzik * 1120c6fd2807SJeff Garzik * Convert @err_mask to descriptive string. Errors are 1121c6fd2807SJeff Garzik * prioritized according to severity and only the most severe 1122c6fd2807SJeff Garzik * error is reported. 1123c6fd2807SJeff Garzik * 1124c6fd2807SJeff Garzik * LOCKING: 1125c6fd2807SJeff Garzik * None. 1126c6fd2807SJeff Garzik * 1127c6fd2807SJeff Garzik * RETURNS: 1128c6fd2807SJeff Garzik * Descriptive string for @err_mask 1129c6fd2807SJeff Garzik */ 1130c6fd2807SJeff Garzik static const char *ata_err_string(unsigned int err_mask) 1131c6fd2807SJeff Garzik { 1132c6fd2807SJeff Garzik if (err_mask & AC_ERR_HOST_BUS) 1133c6fd2807SJeff Garzik return "host bus error"; 1134c6fd2807SJeff Garzik if (err_mask & AC_ERR_ATA_BUS) 1135c6fd2807SJeff Garzik return "ATA bus error"; 1136c6fd2807SJeff Garzik if (err_mask & AC_ERR_TIMEOUT) 1137c6fd2807SJeff Garzik return "timeout"; 1138c6fd2807SJeff Garzik if (err_mask & AC_ERR_HSM) 1139c6fd2807SJeff Garzik return "HSM violation"; 1140c6fd2807SJeff Garzik if (err_mask & AC_ERR_SYSTEM) 1141c6fd2807SJeff Garzik return "internal error"; 1142c6fd2807SJeff Garzik if (err_mask & AC_ERR_MEDIA) 1143c6fd2807SJeff Garzik return "media error"; 1144c6fd2807SJeff Garzik if (err_mask & AC_ERR_INVALID) 1145c6fd2807SJeff Garzik return "invalid argument"; 1146c6fd2807SJeff Garzik if (err_mask & AC_ERR_DEV) 1147c6fd2807SJeff Garzik return "device error"; 1148c6fd2807SJeff Garzik return "unknown error"; 1149c6fd2807SJeff Garzik } 1150c6fd2807SJeff Garzik 1151c6fd2807SJeff Garzik /** 1152c6fd2807SJeff Garzik * ata_read_log_page - read a specific log page 1153c6fd2807SJeff Garzik * @dev: target device 1154c6fd2807SJeff Garzik * @page: page to read 1155c6fd2807SJeff Garzik * @buf: buffer to store read page 1156c6fd2807SJeff Garzik * @sectors: number of sectors to read 1157c6fd2807SJeff Garzik * 1158c6fd2807SJeff Garzik * Read log page using READ_LOG_EXT command. 1159c6fd2807SJeff Garzik * 1160c6fd2807SJeff Garzik * LOCKING: 1161c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1162c6fd2807SJeff Garzik * 1163c6fd2807SJeff Garzik * RETURNS: 1164c6fd2807SJeff Garzik * 0 on success, AC_ERR_* mask otherwise. 1165c6fd2807SJeff Garzik */ 1166c6fd2807SJeff Garzik static unsigned int ata_read_log_page(struct ata_device *dev, 1167c6fd2807SJeff Garzik u8 page, void *buf, unsigned int sectors) 1168c6fd2807SJeff Garzik { 1169c6fd2807SJeff Garzik struct ata_taskfile tf; 1170c6fd2807SJeff Garzik unsigned int err_mask; 1171c6fd2807SJeff Garzik 1172c6fd2807SJeff Garzik DPRINTK("read log page - page %d\n", page); 1173c6fd2807SJeff Garzik 1174c6fd2807SJeff Garzik ata_tf_init(dev, &tf); 1175c6fd2807SJeff Garzik tf.command = ATA_CMD_READ_LOG_EXT; 1176c6fd2807SJeff Garzik tf.lbal = page; 1177c6fd2807SJeff Garzik tf.nsect = sectors; 1178c6fd2807SJeff Garzik tf.hob_nsect = sectors >> 8; 1179c6fd2807SJeff Garzik tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE; 1180c6fd2807SJeff Garzik tf.protocol = ATA_PROT_PIO; 1181c6fd2807SJeff Garzik 1182c6fd2807SJeff Garzik err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, 11832b789108STejun Heo buf, sectors * ATA_SECT_SIZE, 0); 1184c6fd2807SJeff Garzik 1185c6fd2807SJeff Garzik DPRINTK("EXIT, err_mask=%x\n", err_mask); 1186c6fd2807SJeff Garzik return err_mask; 1187c6fd2807SJeff Garzik } 1188c6fd2807SJeff Garzik 1189c6fd2807SJeff Garzik /** 1190c6fd2807SJeff Garzik * ata_eh_read_log_10h - Read log page 10h for NCQ error details 1191c6fd2807SJeff Garzik * @dev: Device to read log page 10h from 1192c6fd2807SJeff Garzik * @tag: Resulting tag of the failed command 1193c6fd2807SJeff Garzik * @tf: Resulting taskfile registers of the failed command 1194c6fd2807SJeff Garzik * 1195c6fd2807SJeff Garzik * Read log page 10h to obtain NCQ error details and clear error 1196c6fd2807SJeff Garzik * condition. 1197c6fd2807SJeff Garzik * 1198c6fd2807SJeff Garzik * LOCKING: 1199c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1200c6fd2807SJeff Garzik * 1201c6fd2807SJeff Garzik * RETURNS: 1202c6fd2807SJeff Garzik * 0 on success, -errno otherwise. 1203c6fd2807SJeff Garzik */ 1204c6fd2807SJeff Garzik static int ata_eh_read_log_10h(struct ata_device *dev, 1205c6fd2807SJeff Garzik int *tag, struct ata_taskfile *tf) 1206c6fd2807SJeff Garzik { 12079af5c9c9STejun Heo u8 *buf = dev->link->ap->sector_buf; 1208c6fd2807SJeff Garzik unsigned int err_mask; 1209c6fd2807SJeff Garzik u8 csum; 1210c6fd2807SJeff Garzik int i; 1211c6fd2807SJeff Garzik 1212c6fd2807SJeff Garzik err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1); 1213c6fd2807SJeff Garzik if (err_mask) 1214c6fd2807SJeff Garzik return -EIO; 1215c6fd2807SJeff Garzik 1216c6fd2807SJeff Garzik csum = 0; 1217c6fd2807SJeff Garzik for (i = 0; i < ATA_SECT_SIZE; i++) 1218c6fd2807SJeff Garzik csum += buf[i]; 1219c6fd2807SJeff Garzik if (csum) 1220c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_WARNING, 1221c6fd2807SJeff Garzik "invalid checksum 0x%x on log page 10h\n", csum); 1222c6fd2807SJeff Garzik 1223c6fd2807SJeff Garzik if (buf[0] & 0x80) 1224c6fd2807SJeff Garzik return -ENOENT; 1225c6fd2807SJeff Garzik 1226c6fd2807SJeff Garzik *tag = buf[0] & 0x1f; 1227c6fd2807SJeff Garzik 1228c6fd2807SJeff Garzik tf->command = buf[2]; 1229c6fd2807SJeff Garzik tf->feature = buf[3]; 1230c6fd2807SJeff Garzik tf->lbal = buf[4]; 1231c6fd2807SJeff Garzik tf->lbam = buf[5]; 1232c6fd2807SJeff Garzik tf->lbah = buf[6]; 1233c6fd2807SJeff Garzik tf->device = buf[7]; 1234c6fd2807SJeff Garzik tf->hob_lbal = buf[8]; 1235c6fd2807SJeff Garzik tf->hob_lbam = buf[9]; 1236c6fd2807SJeff Garzik tf->hob_lbah = buf[10]; 1237c6fd2807SJeff Garzik tf->nsect = buf[12]; 1238c6fd2807SJeff Garzik tf->hob_nsect = buf[13]; 1239c6fd2807SJeff Garzik 1240c6fd2807SJeff Garzik return 0; 1241c6fd2807SJeff Garzik } 1242c6fd2807SJeff Garzik 1243c6fd2807SJeff Garzik /** 1244c6fd2807SJeff Garzik * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE 1245c6fd2807SJeff Garzik * @dev: device to perform REQUEST_SENSE to 1246c6fd2807SJeff Garzik * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long) 1247c6fd2807SJeff Garzik * 1248c6fd2807SJeff Garzik * Perform ATAPI REQUEST_SENSE after the device reported CHECK 1249c6fd2807SJeff Garzik * SENSE. This function is EH helper. 1250c6fd2807SJeff Garzik * 1251c6fd2807SJeff Garzik * LOCKING: 1252c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1253c6fd2807SJeff Garzik * 1254c6fd2807SJeff Garzik * RETURNS: 1255c6fd2807SJeff Garzik * 0 on success, AC_ERR_* mask on failure 1256c6fd2807SJeff Garzik */ 125756287768SAlbert Lee static unsigned int atapi_eh_request_sense(struct ata_queued_cmd *qc) 1258c6fd2807SJeff Garzik { 125956287768SAlbert Lee struct ata_device *dev = qc->dev; 126056287768SAlbert Lee unsigned char *sense_buf = qc->scsicmd->sense_buffer; 12619af5c9c9STejun Heo struct ata_port *ap = dev->link->ap; 1262c6fd2807SJeff Garzik struct ata_taskfile tf; 1263c6fd2807SJeff Garzik u8 cdb[ATAPI_CDB_LEN]; 1264c6fd2807SJeff Garzik 1265c6fd2807SJeff Garzik DPRINTK("ATAPI request sense\n"); 1266c6fd2807SJeff Garzik 1267c6fd2807SJeff Garzik /* FIXME: is this needed? */ 1268c6fd2807SJeff Garzik memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); 1269c6fd2807SJeff Garzik 127056287768SAlbert Lee /* initialize sense_buf with the error register, 127156287768SAlbert Lee * for the case where they are -not- overwritten 127256287768SAlbert Lee */ 1273c6fd2807SJeff Garzik sense_buf[0] = 0x70; 127456287768SAlbert Lee sense_buf[2] = qc->result_tf.feature >> 4; 127556287768SAlbert Lee 127656287768SAlbert Lee /* some devices time out if garbage left in tf */ 127756287768SAlbert Lee ata_tf_init(dev, &tf); 1278c6fd2807SJeff Garzik 1279c6fd2807SJeff Garzik memset(cdb, 0, ATAPI_CDB_LEN); 1280c6fd2807SJeff Garzik cdb[0] = REQUEST_SENSE; 1281c6fd2807SJeff Garzik cdb[4] = SCSI_SENSE_BUFFERSIZE; 1282c6fd2807SJeff Garzik 1283c6fd2807SJeff Garzik tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1284c6fd2807SJeff Garzik tf.command = ATA_CMD_PACKET; 1285c6fd2807SJeff Garzik 1286c6fd2807SJeff Garzik /* is it pointless to prefer PIO for "safety reasons"? */ 1287c6fd2807SJeff Garzik if (ap->flags & ATA_FLAG_PIO_DMA) { 12880dc36888STejun Heo tf.protocol = ATAPI_PROT_DMA; 1289c6fd2807SJeff Garzik tf.feature |= ATAPI_PKT_DMA; 1290c6fd2807SJeff Garzik } else { 12910dc36888STejun Heo tf.protocol = ATAPI_PROT_PIO; 1292f2dfc1a1STejun Heo tf.lbam = SCSI_SENSE_BUFFERSIZE; 1293f2dfc1a1STejun Heo tf.lbah = 0; 1294c6fd2807SJeff Garzik } 1295c6fd2807SJeff Garzik 1296c6fd2807SJeff Garzik return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE, 12972b789108STejun Heo sense_buf, SCSI_SENSE_BUFFERSIZE, 0); 1298c6fd2807SJeff Garzik } 1299c6fd2807SJeff Garzik 1300c6fd2807SJeff Garzik /** 1301c6fd2807SJeff Garzik * ata_eh_analyze_serror - analyze SError for a failed port 13020260731fSTejun Heo * @link: ATA link to analyze SError for 1303c6fd2807SJeff Garzik * 1304c6fd2807SJeff Garzik * Analyze SError if available and further determine cause of 1305c6fd2807SJeff Garzik * failure. 1306c6fd2807SJeff Garzik * 1307c6fd2807SJeff Garzik * LOCKING: 1308c6fd2807SJeff Garzik * None. 1309c6fd2807SJeff Garzik */ 13100260731fSTejun Heo static void ata_eh_analyze_serror(struct ata_link *link) 1311c6fd2807SJeff Garzik { 13120260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 1313c6fd2807SJeff Garzik u32 serror = ehc->i.serror; 1314c6fd2807SJeff Garzik unsigned int err_mask = 0, action = 0; 1315f9df58cbSTejun Heo u32 hotplug_mask; 1316c6fd2807SJeff Garzik 1317e0614db2STejun Heo if (serror & (SERR_PERSISTENT | SERR_DATA)) { 1318c6fd2807SJeff Garzik err_mask |= AC_ERR_ATA_BUS; 1319cf480626STejun Heo action |= ATA_EH_RESET; 1320c6fd2807SJeff Garzik } 1321c6fd2807SJeff Garzik if (serror & SERR_PROTOCOL) { 1322c6fd2807SJeff Garzik err_mask |= AC_ERR_HSM; 1323cf480626STejun Heo action |= ATA_EH_RESET; 1324c6fd2807SJeff Garzik } 1325c6fd2807SJeff Garzik if (serror & SERR_INTERNAL) { 1326c6fd2807SJeff Garzik err_mask |= AC_ERR_SYSTEM; 1327cf480626STejun Heo action |= ATA_EH_RESET; 1328c6fd2807SJeff Garzik } 1329f9df58cbSTejun Heo 1330f9df58cbSTejun Heo /* Determine whether a hotplug event has occurred. Both 1331f9df58cbSTejun Heo * SError.N/X are considered hotplug events for enabled or 1332f9df58cbSTejun Heo * host links. For disabled PMP links, only N bit is 1333f9df58cbSTejun Heo * considered as X bit is left at 1 for link plugging. 1334f9df58cbSTejun Heo */ 1335f9df58cbSTejun Heo hotplug_mask = 0; 1336f9df58cbSTejun Heo 1337f9df58cbSTejun Heo if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link)) 1338f9df58cbSTejun Heo hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG; 1339f9df58cbSTejun Heo else 1340f9df58cbSTejun Heo hotplug_mask = SERR_PHYRDY_CHG; 1341f9df58cbSTejun Heo 1342f9df58cbSTejun Heo if (serror & hotplug_mask) 1343c6fd2807SJeff Garzik ata_ehi_hotplugged(&ehc->i); 1344c6fd2807SJeff Garzik 1345c6fd2807SJeff Garzik ehc->i.err_mask |= err_mask; 1346c6fd2807SJeff Garzik ehc->i.action |= action; 1347c6fd2807SJeff Garzik } 1348c6fd2807SJeff Garzik 1349c6fd2807SJeff Garzik /** 1350c6fd2807SJeff Garzik * ata_eh_analyze_ncq_error - analyze NCQ error 13510260731fSTejun Heo * @link: ATA link to analyze NCQ error for 1352c6fd2807SJeff Garzik * 1353c6fd2807SJeff Garzik * Read log page 10h, determine the offending qc and acquire 1354c6fd2807SJeff Garzik * error status TF. For NCQ device errors, all LLDDs have to do 1355c6fd2807SJeff Garzik * is setting AC_ERR_DEV in ehi->err_mask. This function takes 1356c6fd2807SJeff Garzik * care of the rest. 1357c6fd2807SJeff Garzik * 1358c6fd2807SJeff Garzik * LOCKING: 1359c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1360c6fd2807SJeff Garzik */ 136110acf3b0SMark Lord void ata_eh_analyze_ncq_error(struct ata_link *link) 1362c6fd2807SJeff Garzik { 13630260731fSTejun Heo struct ata_port *ap = link->ap; 13640260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 13650260731fSTejun Heo struct ata_device *dev = link->device; 1366c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 1367c6fd2807SJeff Garzik struct ata_taskfile tf; 1368c6fd2807SJeff Garzik int tag, rc; 1369c6fd2807SJeff Garzik 1370c6fd2807SJeff Garzik /* if frozen, we can't do much */ 1371c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN) 1372c6fd2807SJeff Garzik return; 1373c6fd2807SJeff Garzik 1374c6fd2807SJeff Garzik /* is it NCQ device error? */ 13750260731fSTejun Heo if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV)) 1376c6fd2807SJeff Garzik return; 1377c6fd2807SJeff Garzik 1378c6fd2807SJeff Garzik /* has LLDD analyzed already? */ 1379c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1380c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, tag); 1381c6fd2807SJeff Garzik 1382c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) 1383c6fd2807SJeff Garzik continue; 1384c6fd2807SJeff Garzik 1385c6fd2807SJeff Garzik if (qc->err_mask) 1386c6fd2807SJeff Garzik return; 1387c6fd2807SJeff Garzik } 1388c6fd2807SJeff Garzik 1389c6fd2807SJeff Garzik /* okay, this error is ours */ 1390c6fd2807SJeff Garzik rc = ata_eh_read_log_10h(dev, &tag, &tf); 1391c6fd2807SJeff Garzik if (rc) { 13920260731fSTejun Heo ata_link_printk(link, KERN_ERR, "failed to read log page 10h " 1393c6fd2807SJeff Garzik "(errno=%d)\n", rc); 1394c6fd2807SJeff Garzik return; 1395c6fd2807SJeff Garzik } 1396c6fd2807SJeff Garzik 13970260731fSTejun Heo if (!(link->sactive & (1 << tag))) { 13980260731fSTejun Heo ata_link_printk(link, KERN_ERR, "log page 10h reported " 1399c6fd2807SJeff Garzik "inactive tag %d\n", tag); 1400c6fd2807SJeff Garzik return; 1401c6fd2807SJeff Garzik } 1402c6fd2807SJeff Garzik 1403c6fd2807SJeff Garzik /* we've got the perpetrator, condemn it */ 1404c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, tag); 1405c6fd2807SJeff Garzik memcpy(&qc->result_tf, &tf, sizeof(tf)); 1406a6116c9eSMark Lord qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 14075335b729STejun Heo qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ; 1408c6fd2807SJeff Garzik ehc->i.err_mask &= ~AC_ERR_DEV; 1409c6fd2807SJeff Garzik } 1410c6fd2807SJeff Garzik 1411c6fd2807SJeff Garzik /** 1412c6fd2807SJeff Garzik * ata_eh_analyze_tf - analyze taskfile of a failed qc 1413c6fd2807SJeff Garzik * @qc: qc to analyze 1414c6fd2807SJeff Garzik * @tf: Taskfile registers to analyze 1415c6fd2807SJeff Garzik * 1416c6fd2807SJeff Garzik * Analyze taskfile of @qc and further determine cause of 1417c6fd2807SJeff Garzik * failure. This function also requests ATAPI sense data if 1418c6fd2807SJeff Garzik * avaliable. 1419c6fd2807SJeff Garzik * 1420c6fd2807SJeff Garzik * LOCKING: 1421c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1422c6fd2807SJeff Garzik * 1423c6fd2807SJeff Garzik * RETURNS: 1424c6fd2807SJeff Garzik * Determined recovery action 1425c6fd2807SJeff Garzik */ 1426c6fd2807SJeff Garzik static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, 1427c6fd2807SJeff Garzik const struct ata_taskfile *tf) 1428c6fd2807SJeff Garzik { 1429c6fd2807SJeff Garzik unsigned int tmp, action = 0; 1430c6fd2807SJeff Garzik u8 stat = tf->command, err = tf->feature; 1431c6fd2807SJeff Garzik 1432c6fd2807SJeff Garzik if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) { 1433c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_HSM; 1434cf480626STejun Heo return ATA_EH_RESET; 1435c6fd2807SJeff Garzik } 1436c6fd2807SJeff Garzik 1437a51d644aSTejun Heo if (stat & (ATA_ERR | ATA_DF)) 1438a51d644aSTejun Heo qc->err_mask |= AC_ERR_DEV; 1439a51d644aSTejun Heo else 1440c6fd2807SJeff Garzik return 0; 1441c6fd2807SJeff Garzik 1442c6fd2807SJeff Garzik switch (qc->dev->class) { 1443c6fd2807SJeff Garzik case ATA_DEV_ATA: 1444c6fd2807SJeff Garzik if (err & ATA_ICRC) 1445c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_ATA_BUS; 1446c6fd2807SJeff Garzik if (err & ATA_UNC) 1447c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_MEDIA; 1448c6fd2807SJeff Garzik if (err & ATA_IDNF) 1449c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_INVALID; 1450c6fd2807SJeff Garzik break; 1451c6fd2807SJeff Garzik 1452c6fd2807SJeff Garzik case ATA_DEV_ATAPI: 1453a569a30dSTejun Heo if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) { 145456287768SAlbert Lee tmp = atapi_eh_request_sense(qc); 1455c6fd2807SJeff Garzik if (!tmp) { 1456a569a30dSTejun Heo /* ATA_QCFLAG_SENSE_VALID is used to 1457a569a30dSTejun Heo * tell atapi_qc_complete() that sense 1458a569a30dSTejun Heo * data is already valid. 1459c6fd2807SJeff Garzik * 1460c6fd2807SJeff Garzik * TODO: interpret sense data and set 1461c6fd2807SJeff Garzik * appropriate err_mask. 1462c6fd2807SJeff Garzik */ 1463c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_SENSE_VALID; 1464c6fd2807SJeff Garzik } else 1465c6fd2807SJeff Garzik qc->err_mask |= tmp; 1466c6fd2807SJeff Garzik } 1467a569a30dSTejun Heo } 1468c6fd2807SJeff Garzik 1469c6fd2807SJeff Garzik if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS)) 1470cf480626STejun Heo action |= ATA_EH_RESET; 1471c6fd2807SJeff Garzik 1472c6fd2807SJeff Garzik return action; 1473c6fd2807SJeff Garzik } 1474c6fd2807SJeff Garzik 147576326ac1STejun Heo static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask, 147676326ac1STejun Heo int *xfer_ok) 1477c6fd2807SJeff Garzik { 147876326ac1STejun Heo int base = 0; 147976326ac1STejun Heo 148076326ac1STejun Heo if (!(eflags & ATA_EFLAG_DUBIOUS_XFER)) 148176326ac1STejun Heo *xfer_ok = 1; 148276326ac1STejun Heo 148376326ac1STejun Heo if (!*xfer_ok) 148475f9cafcSTejun Heo base = ATA_ECAT_DUBIOUS_NONE; 148576326ac1STejun Heo 14867d47e8d4STejun Heo if (err_mask & AC_ERR_ATA_BUS) 148776326ac1STejun Heo return base + ATA_ECAT_ATA_BUS; 1488c6fd2807SJeff Garzik 14897d47e8d4STejun Heo if (err_mask & AC_ERR_TIMEOUT) 149076326ac1STejun Heo return base + ATA_ECAT_TOUT_HSM; 14917d47e8d4STejun Heo 14923884f7b0STejun Heo if (eflags & ATA_EFLAG_IS_IO) { 14937d47e8d4STejun Heo if (err_mask & AC_ERR_HSM) 149476326ac1STejun Heo return base + ATA_ECAT_TOUT_HSM; 14957d47e8d4STejun Heo if ((err_mask & 14967d47e8d4STejun Heo (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV) 149776326ac1STejun Heo return base + ATA_ECAT_UNK_DEV; 1498c6fd2807SJeff Garzik } 1499c6fd2807SJeff Garzik 1500c6fd2807SJeff Garzik return 0; 1501c6fd2807SJeff Garzik } 1502c6fd2807SJeff Garzik 15037d47e8d4STejun Heo struct speed_down_verdict_arg { 1504c6fd2807SJeff Garzik u64 since; 150576326ac1STejun Heo int xfer_ok; 15063884f7b0STejun Heo int nr_errors[ATA_ECAT_NR]; 1507c6fd2807SJeff Garzik }; 1508c6fd2807SJeff Garzik 15097d47e8d4STejun Heo static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg) 1510c6fd2807SJeff Garzik { 15117d47e8d4STejun Heo struct speed_down_verdict_arg *arg = void_arg; 151276326ac1STejun Heo int cat; 1513c6fd2807SJeff Garzik 1514c6fd2807SJeff Garzik if (ent->timestamp < arg->since) 1515c6fd2807SJeff Garzik return -1; 1516c6fd2807SJeff Garzik 151776326ac1STejun Heo cat = ata_eh_categorize_error(ent->eflags, ent->err_mask, 151876326ac1STejun Heo &arg->xfer_ok); 15197d47e8d4STejun Heo arg->nr_errors[cat]++; 152076326ac1STejun Heo 1521c6fd2807SJeff Garzik return 0; 1522c6fd2807SJeff Garzik } 1523c6fd2807SJeff Garzik 1524c6fd2807SJeff Garzik /** 15257d47e8d4STejun Heo * ata_eh_speed_down_verdict - Determine speed down verdict 1526c6fd2807SJeff Garzik * @dev: Device of interest 1527c6fd2807SJeff Garzik * 1528c6fd2807SJeff Garzik * This function examines error ring of @dev and determines 15297d47e8d4STejun Heo * whether NCQ needs to be turned off, transfer speed should be 15307d47e8d4STejun Heo * stepped down, or falling back to PIO is necessary. 1531c6fd2807SJeff Garzik * 15323884f7b0STejun Heo * ECAT_ATA_BUS : ATA_BUS error for any command 1533c6fd2807SJeff Garzik * 15343884f7b0STejun Heo * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for 15353884f7b0STejun Heo * IO commands 15367d47e8d4STejun Heo * 15373884f7b0STejun Heo * ECAT_UNK_DEV : Unknown DEV error for IO commands 1538c6fd2807SJeff Garzik * 153976326ac1STejun Heo * ECAT_DUBIOUS_* : Identical to above three but occurred while 154076326ac1STejun Heo * data transfer hasn't been verified. 154176326ac1STejun Heo * 15423884f7b0STejun Heo * Verdicts are 15437d47e8d4STejun Heo * 15443884f7b0STejun Heo * NCQ_OFF : Turn off NCQ. 15457d47e8d4STejun Heo * 15463884f7b0STejun Heo * SPEED_DOWN : Speed down transfer speed but don't fall back 15473884f7b0STejun Heo * to PIO. 15483884f7b0STejun Heo * 15493884f7b0STejun Heo * FALLBACK_TO_PIO : Fall back to PIO. 15503884f7b0STejun Heo * 15513884f7b0STejun Heo * Even if multiple verdicts are returned, only one action is 155276326ac1STejun Heo * taken per error. An action triggered by non-DUBIOUS errors 155376326ac1STejun Heo * clears ering, while one triggered by DUBIOUS_* errors doesn't. 155476326ac1STejun Heo * This is to expedite speed down decisions right after device is 155576326ac1STejun Heo * initially configured. 15563884f7b0STejun Heo * 155776326ac1STejun Heo * The followings are speed down rules. #1 and #2 deal with 155876326ac1STejun Heo * DUBIOUS errors. 155976326ac1STejun Heo * 156076326ac1STejun Heo * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors 156176326ac1STejun Heo * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO. 156276326ac1STejun Heo * 156376326ac1STejun Heo * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors 156476326ac1STejun Heo * occurred during last 5 mins, NCQ_OFF. 156576326ac1STejun Heo * 156676326ac1STejun Heo * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors 15673884f7b0STejun Heo * ocurred during last 5 mins, FALLBACK_TO_PIO 15683884f7b0STejun Heo * 156976326ac1STejun Heo * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred 15703884f7b0STejun Heo * during last 10 mins, NCQ_OFF. 15713884f7b0STejun Heo * 157276326ac1STejun Heo * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6 15733884f7b0STejun Heo * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN. 15747d47e8d4STejun Heo * 1575c6fd2807SJeff Garzik * LOCKING: 1576c6fd2807SJeff Garzik * Inherited from caller. 1577c6fd2807SJeff Garzik * 1578c6fd2807SJeff Garzik * RETURNS: 15797d47e8d4STejun Heo * OR of ATA_EH_SPDN_* flags. 1580c6fd2807SJeff Garzik */ 15817d47e8d4STejun Heo static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev) 1582c6fd2807SJeff Garzik { 15837d47e8d4STejun Heo const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ; 15847d47e8d4STejun Heo u64 j64 = get_jiffies_64(); 15857d47e8d4STejun Heo struct speed_down_verdict_arg arg; 15867d47e8d4STejun Heo unsigned int verdict = 0; 1587c6fd2807SJeff Garzik 15883884f7b0STejun Heo /* scan past 5 mins of error history */ 15893884f7b0STejun Heo memset(&arg, 0, sizeof(arg)); 15903884f7b0STejun Heo arg.since = j64 - min(j64, j5mins); 15913884f7b0STejun Heo ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); 15923884f7b0STejun Heo 159376326ac1STejun Heo if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] + 159476326ac1STejun Heo arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1) 159576326ac1STejun Heo verdict |= ATA_EH_SPDN_SPEED_DOWN | 159676326ac1STejun Heo ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS; 159776326ac1STejun Heo 159876326ac1STejun Heo if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] + 159976326ac1STejun Heo arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1) 160076326ac1STejun Heo verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS; 160176326ac1STejun Heo 16023884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_ATA_BUS] + 16033884f7b0STejun Heo arg.nr_errors[ATA_ECAT_TOUT_HSM] + 1604663f99b8STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) 16053884f7b0STejun Heo verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO; 16063884f7b0STejun Heo 16077d47e8d4STejun Heo /* scan past 10 mins of error history */ 1608c6fd2807SJeff Garzik memset(&arg, 0, sizeof(arg)); 16097d47e8d4STejun Heo arg.since = j64 - min(j64, j10mins); 16107d47e8d4STejun Heo ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); 1611c6fd2807SJeff Garzik 16123884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_TOUT_HSM] + 16133884f7b0STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 3) 16147d47e8d4STejun Heo verdict |= ATA_EH_SPDN_NCQ_OFF; 16153884f7b0STejun Heo 16163884f7b0STejun Heo if (arg.nr_errors[ATA_ECAT_ATA_BUS] + 16173884f7b0STejun Heo arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 || 1618663f99b8STejun Heo arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) 16197d47e8d4STejun Heo verdict |= ATA_EH_SPDN_SPEED_DOWN; 1620c6fd2807SJeff Garzik 16217d47e8d4STejun Heo return verdict; 1622c6fd2807SJeff Garzik } 1623c6fd2807SJeff Garzik 1624c6fd2807SJeff Garzik /** 1625c6fd2807SJeff Garzik * ata_eh_speed_down - record error and speed down if necessary 1626c6fd2807SJeff Garzik * @dev: Failed device 16273884f7b0STejun Heo * @eflags: mask of ATA_EFLAG_* flags 1628c6fd2807SJeff Garzik * @err_mask: err_mask of the error 1629c6fd2807SJeff Garzik * 1630c6fd2807SJeff Garzik * Record error and examine error history to determine whether 1631c6fd2807SJeff Garzik * adjusting transmission speed is necessary. It also sets 1632c6fd2807SJeff Garzik * transmission limits appropriately if such adjustment is 1633c6fd2807SJeff Garzik * necessary. 1634c6fd2807SJeff Garzik * 1635c6fd2807SJeff Garzik * LOCKING: 1636c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1637c6fd2807SJeff Garzik * 1638c6fd2807SJeff Garzik * RETURNS: 16397d47e8d4STejun Heo * Determined recovery action. 1640c6fd2807SJeff Garzik */ 16413884f7b0STejun Heo static unsigned int ata_eh_speed_down(struct ata_device *dev, 16423884f7b0STejun Heo unsigned int eflags, unsigned int err_mask) 1643c6fd2807SJeff Garzik { 16443884f7b0STejun Heo struct ata_link *link = dev->link; 164576326ac1STejun Heo int xfer_ok = 0; 16467d47e8d4STejun Heo unsigned int verdict; 16477d47e8d4STejun Heo unsigned int action = 0; 16487d47e8d4STejun Heo 16497d47e8d4STejun Heo /* don't bother if Cat-0 error */ 165076326ac1STejun Heo if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0) 1651c6fd2807SJeff Garzik return 0; 1652c6fd2807SJeff Garzik 1653c6fd2807SJeff Garzik /* record error and determine whether speed down is necessary */ 16543884f7b0STejun Heo ata_ering_record(&dev->ering, eflags, err_mask); 16557d47e8d4STejun Heo verdict = ata_eh_speed_down_verdict(dev); 1656c6fd2807SJeff Garzik 16577d47e8d4STejun Heo /* turn off NCQ? */ 16587d47e8d4STejun Heo if ((verdict & ATA_EH_SPDN_NCQ_OFF) && 16597d47e8d4STejun Heo (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ | 16607d47e8d4STejun Heo ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) { 16617d47e8d4STejun Heo dev->flags |= ATA_DFLAG_NCQ_OFF; 16627d47e8d4STejun Heo ata_dev_printk(dev, KERN_WARNING, 16637d47e8d4STejun Heo "NCQ disabled due to excessive errors\n"); 16647d47e8d4STejun Heo goto done; 16657d47e8d4STejun Heo } 1666c6fd2807SJeff Garzik 16677d47e8d4STejun Heo /* speed down? */ 16687d47e8d4STejun Heo if (verdict & ATA_EH_SPDN_SPEED_DOWN) { 1669c6fd2807SJeff Garzik /* speed down SATA link speed if possible */ 16703884f7b0STejun Heo if (sata_down_spd_limit(link) == 0) { 1671cf480626STejun Heo action |= ATA_EH_RESET; 16727d47e8d4STejun Heo goto done; 16737d47e8d4STejun Heo } 1674c6fd2807SJeff Garzik 1675c6fd2807SJeff Garzik /* lower transfer mode */ 16767d47e8d4STejun Heo if (dev->spdn_cnt < 2) { 16777d47e8d4STejun Heo static const int dma_dnxfer_sel[] = 16787d47e8d4STejun Heo { ATA_DNXFER_DMA, ATA_DNXFER_40C }; 16797d47e8d4STejun Heo static const int pio_dnxfer_sel[] = 16807d47e8d4STejun Heo { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 }; 16817d47e8d4STejun Heo int sel; 1682c6fd2807SJeff Garzik 16837d47e8d4STejun Heo if (dev->xfer_shift != ATA_SHIFT_PIO) 16847d47e8d4STejun Heo sel = dma_dnxfer_sel[dev->spdn_cnt]; 16857d47e8d4STejun Heo else 16867d47e8d4STejun Heo sel = pio_dnxfer_sel[dev->spdn_cnt]; 16877d47e8d4STejun Heo 16887d47e8d4STejun Heo dev->spdn_cnt++; 16897d47e8d4STejun Heo 16907d47e8d4STejun Heo if (ata_down_xfermask_limit(dev, sel) == 0) { 1691cf480626STejun Heo action |= ATA_EH_RESET; 16927d47e8d4STejun Heo goto done; 16937d47e8d4STejun Heo } 16947d47e8d4STejun Heo } 16957d47e8d4STejun Heo } 16967d47e8d4STejun Heo 16977d47e8d4STejun Heo /* Fall back to PIO? Slowing down to PIO is meaningless for 1698663f99b8STejun Heo * SATA ATA devices. Consider it only for PATA and SATAPI. 16997d47e8d4STejun Heo */ 17007d47e8d4STejun Heo if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) && 1701663f99b8STejun Heo (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) && 17027d47e8d4STejun Heo (dev->xfer_shift != ATA_SHIFT_PIO)) { 17037d47e8d4STejun Heo if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) { 17047d47e8d4STejun Heo dev->spdn_cnt = 0; 1705cf480626STejun Heo action |= ATA_EH_RESET; 17067d47e8d4STejun Heo goto done; 17077d47e8d4STejun Heo } 17087d47e8d4STejun Heo } 17097d47e8d4STejun Heo 1710c6fd2807SJeff Garzik return 0; 17117d47e8d4STejun Heo done: 17127d47e8d4STejun Heo /* device has been slowed down, blow error history */ 171376326ac1STejun Heo if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS)) 17147d47e8d4STejun Heo ata_ering_clear(&dev->ering); 17157d47e8d4STejun Heo return action; 1716c6fd2807SJeff Garzik } 1717c6fd2807SJeff Garzik 1718c6fd2807SJeff Garzik /** 17199b1e2658STejun Heo * ata_eh_link_autopsy - analyze error and determine recovery action 17209b1e2658STejun Heo * @link: host link to perform autopsy on 1721c6fd2807SJeff Garzik * 17220260731fSTejun Heo * Analyze why @link failed and determine which recovery actions 17230260731fSTejun Heo * are needed. This function also sets more detailed AC_ERR_* 17240260731fSTejun Heo * values and fills sense data for ATAPI CHECK SENSE. 1725c6fd2807SJeff Garzik * 1726c6fd2807SJeff Garzik * LOCKING: 1727c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1728c6fd2807SJeff Garzik */ 17299b1e2658STejun Heo static void ata_eh_link_autopsy(struct ata_link *link) 1730c6fd2807SJeff Garzik { 17310260731fSTejun Heo struct ata_port *ap = link->ap; 1732936fd732STejun Heo struct ata_eh_context *ehc = &link->eh_context; 1733dfcc173dSTejun Heo struct ata_device *dev; 17343884f7b0STejun Heo unsigned int all_err_mask = 0, eflags = 0; 17353884f7b0STejun Heo int tag; 1736c6fd2807SJeff Garzik u32 serror; 1737c6fd2807SJeff Garzik int rc; 1738c6fd2807SJeff Garzik 1739c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 1740c6fd2807SJeff Garzik 1741c6fd2807SJeff Garzik if (ehc->i.flags & ATA_EHI_NO_AUTOPSY) 1742c6fd2807SJeff Garzik return; 1743c6fd2807SJeff Garzik 1744c6fd2807SJeff Garzik /* obtain and analyze SError */ 1745936fd732STejun Heo rc = sata_scr_read(link, SCR_ERROR, &serror); 1746c6fd2807SJeff Garzik if (rc == 0) { 1747c6fd2807SJeff Garzik ehc->i.serror |= serror; 17480260731fSTejun Heo ata_eh_analyze_serror(link); 17494e57c517STejun Heo } else if (rc != -EOPNOTSUPP) { 1750cf480626STejun Heo /* SError read failed, force reset and probing */ 1751b558edddSTejun Heo ehc->i.probe_mask |= ATA_ALL_DEVICES; 1752cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 17534e57c517STejun Heo ehc->i.err_mask |= AC_ERR_OTHER; 17544e57c517STejun Heo } 1755c6fd2807SJeff Garzik 1756c6fd2807SJeff Garzik /* analyze NCQ failure */ 17570260731fSTejun Heo ata_eh_analyze_ncq_error(link); 1758c6fd2807SJeff Garzik 1759c6fd2807SJeff Garzik /* any real error trumps AC_ERR_OTHER */ 1760c6fd2807SJeff Garzik if (ehc->i.err_mask & ~AC_ERR_OTHER) 1761c6fd2807SJeff Garzik ehc->i.err_mask &= ~AC_ERR_OTHER; 1762c6fd2807SJeff Garzik 1763c6fd2807SJeff Garzik all_err_mask |= ehc->i.err_mask; 1764c6fd2807SJeff Garzik 1765c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1766c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 1767c6fd2807SJeff Garzik 17680260731fSTejun Heo if (!(qc->flags & ATA_QCFLAG_FAILED) || qc->dev->link != link) 1769c6fd2807SJeff Garzik continue; 1770c6fd2807SJeff Garzik 1771c6fd2807SJeff Garzik /* inherit upper level err_mask */ 1772c6fd2807SJeff Garzik qc->err_mask |= ehc->i.err_mask; 1773c6fd2807SJeff Garzik 1774c6fd2807SJeff Garzik /* analyze TF */ 1775c6fd2807SJeff Garzik ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf); 1776c6fd2807SJeff Garzik 1777c6fd2807SJeff Garzik /* DEV errors are probably spurious in case of ATA_BUS error */ 1778c6fd2807SJeff Garzik if (qc->err_mask & AC_ERR_ATA_BUS) 1779c6fd2807SJeff Garzik qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA | 1780c6fd2807SJeff Garzik AC_ERR_INVALID); 1781c6fd2807SJeff Garzik 1782c6fd2807SJeff Garzik /* any real error trumps unknown error */ 1783c6fd2807SJeff Garzik if (qc->err_mask & ~AC_ERR_OTHER) 1784c6fd2807SJeff Garzik qc->err_mask &= ~AC_ERR_OTHER; 1785c6fd2807SJeff Garzik 1786c6fd2807SJeff Garzik /* SENSE_VALID trumps dev/unknown error and revalidation */ 1787f90f0828STejun Heo if (qc->flags & ATA_QCFLAG_SENSE_VALID) 1788c6fd2807SJeff Garzik qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); 1789c6fd2807SJeff Garzik 179003faab78STejun Heo /* determine whether the command is worth retrying */ 179103faab78STejun Heo if (!(qc->err_mask & AC_ERR_INVALID) && 179203faab78STejun Heo ((qc->flags & ATA_QCFLAG_IO) || qc->err_mask != AC_ERR_DEV)) 179303faab78STejun Heo qc->flags |= ATA_QCFLAG_RETRY; 179403faab78STejun Heo 1795c6fd2807SJeff Garzik /* accumulate error info */ 1796c6fd2807SJeff Garzik ehc->i.dev = qc->dev; 1797c6fd2807SJeff Garzik all_err_mask |= qc->err_mask; 1798c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_IO) 17993884f7b0STejun Heo eflags |= ATA_EFLAG_IS_IO; 1800c6fd2807SJeff Garzik } 1801c6fd2807SJeff Garzik 1802c6fd2807SJeff Garzik /* enforce default EH actions */ 1803c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN || 1804c6fd2807SJeff Garzik all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT)) 1805cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 18063884f7b0STejun Heo else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) || 18073884f7b0STejun Heo (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV))) 1808c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_REVALIDATE; 1809c6fd2807SJeff Garzik 1810dfcc173dSTejun Heo /* If we have offending qcs and the associated failed device, 1811dfcc173dSTejun Heo * perform per-dev EH action only on the offending device. 1812dfcc173dSTejun Heo */ 1813c6fd2807SJeff Garzik if (ehc->i.dev) { 1814c6fd2807SJeff Garzik ehc->i.dev_action[ehc->i.dev->devno] |= 1815c6fd2807SJeff Garzik ehc->i.action & ATA_EH_PERDEV_MASK; 1816c6fd2807SJeff Garzik ehc->i.action &= ~ATA_EH_PERDEV_MASK; 1817c6fd2807SJeff Garzik } 1818c6fd2807SJeff Garzik 18192695e366STejun Heo /* propagate timeout to host link */ 18202695e366STejun Heo if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link)) 18212695e366STejun Heo ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT; 18222695e366STejun Heo 18232695e366STejun Heo /* record error and consider speeding down */ 1824dfcc173dSTejun Heo dev = ehc->i.dev; 18252695e366STejun Heo if (!dev && ((ata_link_max_devices(link) == 1 && 18262695e366STejun Heo ata_dev_enabled(link->device)))) 1827dfcc173dSTejun Heo dev = link->device; 1828dfcc173dSTejun Heo 182976326ac1STejun Heo if (dev) { 183076326ac1STejun Heo if (dev->flags & ATA_DFLAG_DUBIOUS_XFER) 183176326ac1STejun Heo eflags |= ATA_EFLAG_DUBIOUS_XFER; 18323884f7b0STejun Heo ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask); 183376326ac1STejun Heo } 1834dfcc173dSTejun Heo 1835c6fd2807SJeff Garzik DPRINTK("EXIT\n"); 1836c6fd2807SJeff Garzik } 1837c6fd2807SJeff Garzik 1838c6fd2807SJeff Garzik /** 18399b1e2658STejun Heo * ata_eh_autopsy - analyze error and determine recovery action 18409b1e2658STejun Heo * @ap: host port to perform autopsy on 18419b1e2658STejun Heo * 18429b1e2658STejun Heo * Analyze all links of @ap and determine why they failed and 18439b1e2658STejun Heo * which recovery actions are needed. 18449b1e2658STejun Heo * 18459b1e2658STejun Heo * LOCKING: 18469b1e2658STejun Heo * Kernel thread context (may sleep). 18479b1e2658STejun Heo */ 1848fb7fd614STejun Heo void ata_eh_autopsy(struct ata_port *ap) 18499b1e2658STejun Heo { 18509b1e2658STejun Heo struct ata_link *link; 18519b1e2658STejun Heo 18522695e366STejun Heo ata_port_for_each_link(link, ap) 18539b1e2658STejun Heo ata_eh_link_autopsy(link); 18542695e366STejun Heo 18552695e366STejun Heo /* Autopsy of fanout ports can affect host link autopsy. 18562695e366STejun Heo * Perform host link autopsy last. 18572695e366STejun Heo */ 1858071f44b1STejun Heo if (sata_pmp_attached(ap)) 18592695e366STejun Heo ata_eh_link_autopsy(&ap->link); 18609b1e2658STejun Heo } 18619b1e2658STejun Heo 18629b1e2658STejun Heo /** 18639b1e2658STejun Heo * ata_eh_link_report - report error handling to user 18640260731fSTejun Heo * @link: ATA link EH is going on 1865c6fd2807SJeff Garzik * 1866c6fd2807SJeff Garzik * Report EH to user. 1867c6fd2807SJeff Garzik * 1868c6fd2807SJeff Garzik * LOCKING: 1869c6fd2807SJeff Garzik * None. 1870c6fd2807SJeff Garzik */ 18719b1e2658STejun Heo static void ata_eh_link_report(struct ata_link *link) 1872c6fd2807SJeff Garzik { 18730260731fSTejun Heo struct ata_port *ap = link->ap; 18740260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 1875c6fd2807SJeff Garzik const char *frozen, *desc; 1876a1e10f7eSTejun Heo char tries_buf[6]; 1877c6fd2807SJeff Garzik int tag, nr_failed = 0; 1878c6fd2807SJeff Garzik 187994ff3d54STejun Heo if (ehc->i.flags & ATA_EHI_QUIET) 188094ff3d54STejun Heo return; 188194ff3d54STejun Heo 1882c6fd2807SJeff Garzik desc = NULL; 1883c6fd2807SJeff Garzik if (ehc->i.desc[0] != '\0') 1884c6fd2807SJeff Garzik desc = ehc->i.desc; 1885c6fd2807SJeff Garzik 1886c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1887c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 1888c6fd2807SJeff Garzik 1889e027bd36STejun Heo if (!(qc->flags & ATA_QCFLAG_FAILED) || qc->dev->link != link || 1890e027bd36STejun Heo ((qc->flags & ATA_QCFLAG_QUIET) && 1891e027bd36STejun Heo qc->err_mask == AC_ERR_DEV)) 1892c6fd2807SJeff Garzik continue; 1893c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask) 1894c6fd2807SJeff Garzik continue; 1895c6fd2807SJeff Garzik 1896c6fd2807SJeff Garzik nr_failed++; 1897c6fd2807SJeff Garzik } 1898c6fd2807SJeff Garzik 1899c6fd2807SJeff Garzik if (!nr_failed && !ehc->i.err_mask) 1900c6fd2807SJeff Garzik return; 1901c6fd2807SJeff Garzik 1902c6fd2807SJeff Garzik frozen = ""; 1903c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN) 1904c6fd2807SJeff Garzik frozen = " frozen"; 1905c6fd2807SJeff Garzik 1906a1e10f7eSTejun Heo memset(tries_buf, 0, sizeof(tries_buf)); 1907a1e10f7eSTejun Heo if (ap->eh_tries < ATA_EH_MAX_TRIES) 1908a1e10f7eSTejun Heo snprintf(tries_buf, sizeof(tries_buf) - 1, " t%d", 1909a1e10f7eSTejun Heo ap->eh_tries); 1910a1e10f7eSTejun Heo 1911c6fd2807SJeff Garzik if (ehc->i.dev) { 1912c6fd2807SJeff Garzik ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x " 1913a1e10f7eSTejun Heo "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", 1914a1e10f7eSTejun Heo ehc->i.err_mask, link->sactive, ehc->i.serror, 1915a1e10f7eSTejun Heo ehc->i.action, frozen, tries_buf); 1916c6fd2807SJeff Garzik if (desc) 1917b64bbc39STejun Heo ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc); 1918c6fd2807SJeff Garzik } else { 19190260731fSTejun Heo ata_link_printk(link, KERN_ERR, "exception Emask 0x%x " 1920a1e10f7eSTejun Heo "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", 1921a1e10f7eSTejun Heo ehc->i.err_mask, link->sactive, ehc->i.serror, 1922a1e10f7eSTejun Heo ehc->i.action, frozen, tries_buf); 1923c6fd2807SJeff Garzik if (desc) 19240260731fSTejun Heo ata_link_printk(link, KERN_ERR, "%s\n", desc); 1925c6fd2807SJeff Garzik } 1926c6fd2807SJeff Garzik 19271333e194SRobert Hancock if (ehc->i.serror) 19281333e194SRobert Hancock ata_port_printk(ap, KERN_ERR, 19291333e194SRobert Hancock "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n", 19301333e194SRobert Hancock ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "", 19311333e194SRobert Hancock ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "", 19321333e194SRobert Hancock ehc->i.serror & SERR_DATA ? "UnrecovData " : "", 19331333e194SRobert Hancock ehc->i.serror & SERR_PERSISTENT ? "Persist " : "", 19341333e194SRobert Hancock ehc->i.serror & SERR_PROTOCOL ? "Proto " : "", 19351333e194SRobert Hancock ehc->i.serror & SERR_INTERNAL ? "HostInt " : "", 19361333e194SRobert Hancock ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "", 19371333e194SRobert Hancock ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "", 19381333e194SRobert Hancock ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "", 19391333e194SRobert Hancock ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "", 19401333e194SRobert Hancock ehc->i.serror & SERR_DISPARITY ? "Dispar " : "", 19411333e194SRobert Hancock ehc->i.serror & SERR_CRC ? "BadCRC " : "", 19421333e194SRobert Hancock ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "", 19431333e194SRobert Hancock ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "", 19441333e194SRobert Hancock ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "", 19451333e194SRobert Hancock ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "", 19461333e194SRobert Hancock ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : ""); 19471333e194SRobert Hancock 1948c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1949c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 19508a937581STejun Heo struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; 1951abb6a889STejun Heo const u8 *cdb = qc->cdb; 1952abb6a889STejun Heo char data_buf[20] = ""; 1953abb6a889STejun Heo char cdb_buf[70] = ""; 1954c6fd2807SJeff Garzik 19550260731fSTejun Heo if (!(qc->flags & ATA_QCFLAG_FAILED) || 19560260731fSTejun Heo qc->dev->link != link || !qc->err_mask) 1957c6fd2807SJeff Garzik continue; 1958c6fd2807SJeff Garzik 1959abb6a889STejun Heo if (qc->dma_dir != DMA_NONE) { 1960abb6a889STejun Heo static const char *dma_str[] = { 1961abb6a889STejun Heo [DMA_BIDIRECTIONAL] = "bidi", 1962abb6a889STejun Heo [DMA_TO_DEVICE] = "out", 1963abb6a889STejun Heo [DMA_FROM_DEVICE] = "in", 1964abb6a889STejun Heo }; 1965abb6a889STejun Heo static const char *prot_str[] = { 1966abb6a889STejun Heo [ATA_PROT_PIO] = "pio", 1967abb6a889STejun Heo [ATA_PROT_DMA] = "dma", 1968abb6a889STejun Heo [ATA_PROT_NCQ] = "ncq", 19690dc36888STejun Heo [ATAPI_PROT_PIO] = "pio", 19700dc36888STejun Heo [ATAPI_PROT_DMA] = "dma", 1971abb6a889STejun Heo }; 1972abb6a889STejun Heo 1973abb6a889STejun Heo snprintf(data_buf, sizeof(data_buf), " %s %u %s", 1974abb6a889STejun Heo prot_str[qc->tf.protocol], qc->nbytes, 1975abb6a889STejun Heo dma_str[qc->dma_dir]); 1976abb6a889STejun Heo } 1977abb6a889STejun Heo 1978e39eec13SJeff Garzik if (ata_is_atapi(qc->tf.protocol)) 1979abb6a889STejun Heo snprintf(cdb_buf, sizeof(cdb_buf), 1980abb6a889STejun Heo "cdb %02x %02x %02x %02x %02x %02x %02x %02x " 1981abb6a889STejun Heo "%02x %02x %02x %02x %02x %02x %02x %02x\n ", 1982abb6a889STejun Heo cdb[0], cdb[1], cdb[2], cdb[3], 1983abb6a889STejun Heo cdb[4], cdb[5], cdb[6], cdb[7], 1984abb6a889STejun Heo cdb[8], cdb[9], cdb[10], cdb[11], 1985abb6a889STejun Heo cdb[12], cdb[13], cdb[14], cdb[15]); 1986abb6a889STejun Heo 19878a937581STejun Heo ata_dev_printk(qc->dev, KERN_ERR, 19888a937581STejun Heo "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " 1989abb6a889STejun Heo "tag %d%s\n %s" 19908a937581STejun Heo "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " 19915335b729STejun Heo "Emask 0x%x (%s)%s\n", 19928a937581STejun Heo cmd->command, cmd->feature, cmd->nsect, 19938a937581STejun Heo cmd->lbal, cmd->lbam, cmd->lbah, 19948a937581STejun Heo cmd->hob_feature, cmd->hob_nsect, 19958a937581STejun Heo cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah, 1996abb6a889STejun Heo cmd->device, qc->tag, data_buf, cdb_buf, 19978a937581STejun Heo res->command, res->feature, res->nsect, 19988a937581STejun Heo res->lbal, res->lbam, res->lbah, 19998a937581STejun Heo res->hob_feature, res->hob_nsect, 20008a937581STejun Heo res->hob_lbal, res->hob_lbam, res->hob_lbah, 20015335b729STejun Heo res->device, qc->err_mask, ata_err_string(qc->err_mask), 20025335b729STejun Heo qc->err_mask & AC_ERR_NCQ ? " <F>" : ""); 20031333e194SRobert Hancock 20041333e194SRobert Hancock if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | 20051333e194SRobert Hancock ATA_ERR)) { 20061333e194SRobert Hancock if (res->command & ATA_BUSY) 20071333e194SRobert Hancock ata_dev_printk(qc->dev, KERN_ERR, 20081333e194SRobert Hancock "status: { Busy }\n"); 20091333e194SRobert Hancock else 20101333e194SRobert Hancock ata_dev_printk(qc->dev, KERN_ERR, 20111333e194SRobert Hancock "status: { %s%s%s%s}\n", 20121333e194SRobert Hancock res->command & ATA_DRDY ? "DRDY " : "", 20131333e194SRobert Hancock res->command & ATA_DF ? "DF " : "", 20141333e194SRobert Hancock res->command & ATA_DRQ ? "DRQ " : "", 20151333e194SRobert Hancock res->command & ATA_ERR ? "ERR " : ""); 20161333e194SRobert Hancock } 20171333e194SRobert Hancock 20181333e194SRobert Hancock if (cmd->command != ATA_CMD_PACKET && 20191333e194SRobert Hancock (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF | 20201333e194SRobert Hancock ATA_ABORTED))) 20211333e194SRobert Hancock ata_dev_printk(qc->dev, KERN_ERR, 20221333e194SRobert Hancock "error: { %s%s%s%s}\n", 20231333e194SRobert Hancock res->feature & ATA_ICRC ? "ICRC " : "", 20241333e194SRobert Hancock res->feature & ATA_UNC ? "UNC " : "", 20251333e194SRobert Hancock res->feature & ATA_IDNF ? "IDNF " : "", 20261333e194SRobert Hancock res->feature & ATA_ABORTED ? "ABRT " : ""); 2027c6fd2807SJeff Garzik } 2028c6fd2807SJeff Garzik } 2029c6fd2807SJeff Garzik 20309b1e2658STejun Heo /** 20319b1e2658STejun Heo * ata_eh_report - report error handling to user 20329b1e2658STejun Heo * @ap: ATA port to report EH about 20339b1e2658STejun Heo * 20349b1e2658STejun Heo * Report EH to user. 20359b1e2658STejun Heo * 20369b1e2658STejun Heo * LOCKING: 20379b1e2658STejun Heo * None. 20389b1e2658STejun Heo */ 2039fb7fd614STejun Heo void ata_eh_report(struct ata_port *ap) 20409b1e2658STejun Heo { 20419b1e2658STejun Heo struct ata_link *link; 20429b1e2658STejun Heo 20439b1e2658STejun Heo __ata_port_for_each_link(link, ap) 20449b1e2658STejun Heo ata_eh_link_report(link); 20459b1e2658STejun Heo } 20469b1e2658STejun Heo 2047cc0680a5STejun Heo static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset, 2048d4b2bab4STejun Heo unsigned int *classes, unsigned long deadline) 2049c6fd2807SJeff Garzik { 2050f58229f8STejun Heo struct ata_device *dev; 2051c6fd2807SJeff Garzik 2052cc0680a5STejun Heo ata_link_for_each_dev(dev, link) 2053f58229f8STejun Heo classes[dev->devno] = ATA_DEV_UNKNOWN; 2054c6fd2807SJeff Garzik 2055f046519fSTejun Heo return reset(link, classes, deadline); 2056c6fd2807SJeff Garzik } 2057c6fd2807SJeff Garzik 2058ae791c05STejun Heo static int ata_eh_followup_srst_needed(struct ata_link *link, 2059ae791c05STejun Heo int rc, int classify, 2060c6fd2807SJeff Garzik const unsigned int *classes) 2061c6fd2807SJeff Garzik { 206245db2f6cSTejun Heo if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link)) 2063ae791c05STejun Heo return 0; 2064305d2a1aSTejun Heo if (rc == -EAGAIN) { 2065305d2a1aSTejun Heo if (classify) 2066c6fd2807SJeff Garzik return 1; 2067305d2a1aSTejun Heo rc = 0; 2068305d2a1aSTejun Heo } 2069c6fd2807SJeff Garzik if (rc != 0) 2070c6fd2807SJeff Garzik return 0; 2071071f44b1STejun Heo if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) 20723495de73STejun Heo return 1; 2073c6fd2807SJeff Garzik return 0; 2074c6fd2807SJeff Garzik } 2075c6fd2807SJeff Garzik 2076fb7fd614STejun Heo int ata_eh_reset(struct ata_link *link, int classify, 2077c6fd2807SJeff Garzik ata_prereset_fn_t prereset, ata_reset_fn_t softreset, 2078c6fd2807SJeff Garzik ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) 2079c6fd2807SJeff Garzik { 2080416dc9edSTejun Heo const int max_tries = ARRAY_SIZE(ata_eh_reset_timeouts); 2081afaa5c37STejun Heo struct ata_port *ap = link->ap; 2082936fd732STejun Heo struct ata_eh_context *ehc = &link->eh_context; 2083c6fd2807SJeff Garzik unsigned int *classes = ehc->classes; 2084416dc9edSTejun Heo unsigned int lflags = link->flags; 2085c6fd2807SJeff Garzik int verbose = !(ehc->i.flags & ATA_EHI_QUIET); 208631daabdaSTejun Heo int try = 0; 2087f58229f8STejun Heo struct ata_device *dev; 2088416dc9edSTejun Heo unsigned long deadline, now; 2089c6fd2807SJeff Garzik ata_reset_fn_t reset; 2090afaa5c37STejun Heo unsigned long flags; 2091416dc9edSTejun Heo u32 sstatus; 2092f046519fSTejun Heo int nr_known, rc; 2093c6fd2807SJeff Garzik 2094932648b0STejun Heo /* 2095932648b0STejun Heo * Prepare to reset 2096932648b0STejun Heo */ 2097*0a2c0f56STejun Heo now = jiffies; 2098*0a2c0f56STejun Heo deadline = ata_deadline(ehc->last_reset, ATA_EH_RESET_COOL_DOWN); 2099*0a2c0f56STejun Heo if (time_before(now, deadline)) 2100*0a2c0f56STejun Heo schedule_timeout_uninterruptible(deadline - now); 2101*0a2c0f56STejun Heo 2102afaa5c37STejun Heo spin_lock_irqsave(ap->lock, flags); 2103afaa5c37STejun Heo ap->pflags |= ATA_PFLAG_RESETTING; 2104afaa5c37STejun Heo spin_unlock_irqrestore(ap->lock, flags); 2105afaa5c37STejun Heo 2106cf480626STejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2107*0a2c0f56STejun Heo ehc->last_reset = jiffies; 2108c6fd2807SJeff Garzik 2109cdeab114STejun Heo ata_link_for_each_dev(dev, link) { 2110cdeab114STejun Heo /* If we issue an SRST then an ATA drive (not ATAPI) 2111cdeab114STejun Heo * may change configuration and be in PIO0 timing. If 2112cdeab114STejun Heo * we do a hard reset (or are coming from power on) 2113cdeab114STejun Heo * this is true for ATA or ATAPI. Until we've set a 2114cdeab114STejun Heo * suitable controller mode we should not touch the 2115cdeab114STejun Heo * bus as we may be talking too fast. 2116cdeab114STejun Heo */ 2117cdeab114STejun Heo dev->pio_mode = XFER_PIO_0; 2118cdeab114STejun Heo 2119cdeab114STejun Heo /* If the controller has a pio mode setup function 2120cdeab114STejun Heo * then use it to set the chipset to rights. Don't 2121cdeab114STejun Heo * touch the DMA setup as that will be dealt with when 2122cdeab114STejun Heo * configuring devices. 2123cdeab114STejun Heo */ 2124cdeab114STejun Heo if (ap->ops->set_piomode) 2125cdeab114STejun Heo ap->ops->set_piomode(ap, dev); 2126cdeab114STejun Heo } 2127cdeab114STejun Heo 2128cf480626STejun Heo /* prefer hardreset */ 2129932648b0STejun Heo reset = NULL; 2130cf480626STejun Heo ehc->i.action &= ~ATA_EH_RESET; 2131cf480626STejun Heo if (hardreset) { 2132cf480626STejun Heo reset = hardreset; 2133cf480626STejun Heo ehc->i.action = ATA_EH_HARDRESET; 21344f7faa3fSTejun Heo } else if (softreset) { 2135cf480626STejun Heo reset = softreset; 2136cf480626STejun Heo ehc->i.action = ATA_EH_SOFTRESET; 2137cf480626STejun Heo } 2138c6fd2807SJeff Garzik 2139c6fd2807SJeff Garzik if (prereset) { 2140341c2c95STejun Heo rc = prereset(link, 2141341c2c95STejun Heo ata_deadline(jiffies, ATA_EH_PRERESET_TIMEOUT)); 2142c6fd2807SJeff Garzik if (rc) { 2143c961922bSAlan Cox if (rc == -ENOENT) { 2144cc0680a5STejun Heo ata_link_printk(link, KERN_DEBUG, 21454aa9ab67STejun Heo "port disabled. ignoring.\n"); 2146cf480626STejun Heo ehc->i.action &= ~ATA_EH_RESET; 21474aa9ab67STejun Heo 2148936fd732STejun Heo ata_link_for_each_dev(dev, link) 2149f58229f8STejun Heo classes[dev->devno] = ATA_DEV_NONE; 21504aa9ab67STejun Heo 21514aa9ab67STejun Heo rc = 0; 2152c961922bSAlan Cox } else 2153cc0680a5STejun Heo ata_link_printk(link, KERN_ERR, 2154c6fd2807SJeff Garzik "prereset failed (errno=%d)\n", rc); 2155fccb6ea5STejun Heo goto out; 2156c6fd2807SJeff Garzik } 2157c6fd2807SJeff Garzik 2158932648b0STejun Heo /* prereset() might have cleared ATA_EH_RESET. If so, 2159932648b0STejun Heo * bang classes and return. 2160932648b0STejun Heo */ 2161932648b0STejun Heo if (reset && !(ehc->i.action & ATA_EH_RESET)) { 2162936fd732STejun Heo ata_link_for_each_dev(dev, link) 2163f58229f8STejun Heo classes[dev->devno] = ATA_DEV_NONE; 2164fccb6ea5STejun Heo rc = 0; 2165fccb6ea5STejun Heo goto out; 2166c6fd2807SJeff Garzik } 2167932648b0STejun Heo } 2168c6fd2807SJeff Garzik 2169c6fd2807SJeff Garzik retry: 2170932648b0STejun Heo /* 2171932648b0STejun Heo * Perform reset 2172932648b0STejun Heo */ 2173*0a2c0f56STejun Heo ehc->last_reset = jiffies; 2174dc98c32cSTejun Heo if (ata_is_host_link(link)) 2175dc98c32cSTejun Heo ata_eh_freeze_port(ap); 2176dc98c32cSTejun Heo 2177341c2c95STejun Heo deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]); 217831daabdaSTejun Heo 2179932648b0STejun Heo if (reset) { 2180c6fd2807SJeff Garzik if (verbose) 2181cc0680a5STejun Heo ata_link_printk(link, KERN_INFO, "%s resetting link\n", 2182c6fd2807SJeff Garzik reset == softreset ? "soft" : "hard"); 2183c6fd2807SJeff Garzik 2184c6fd2807SJeff Garzik /* mark that this EH session started with reset */ 21850d64a233STejun Heo if (reset == hardreset) 21860d64a233STejun Heo ehc->i.flags |= ATA_EHI_DID_HARDRESET; 21870d64a233STejun Heo else 21880d64a233STejun Heo ehc->i.flags |= ATA_EHI_DID_SOFTRESET; 2189c6fd2807SJeff Garzik 2190cc0680a5STejun Heo rc = ata_do_reset(link, reset, classes, deadline); 2191c6fd2807SJeff Garzik 2192c6fd2807SJeff Garzik if (reset == hardreset && 2193ae791c05STejun Heo ata_eh_followup_srst_needed(link, rc, classify, classes)) { 2194c6fd2807SJeff Garzik /* okay, let's do follow-up softreset */ 2195c6fd2807SJeff Garzik reset = softreset; 2196c6fd2807SJeff Garzik 2197c6fd2807SJeff Garzik if (!reset) { 2198cc0680a5STejun Heo ata_link_printk(link, KERN_ERR, 2199c6fd2807SJeff Garzik "follow-up softreset required " 2200c6fd2807SJeff Garzik "but no softreset avaliable\n"); 2201fccb6ea5STejun Heo rc = -EINVAL; 220208cf69d0STejun Heo goto fail; 2203c6fd2807SJeff Garzik } 2204c6fd2807SJeff Garzik 2205cf480626STejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2206cc0680a5STejun Heo rc = ata_do_reset(link, reset, classes, deadline); 2207c6fd2807SJeff Garzik } 2208c6fd2807SJeff Garzik 2209416dc9edSTejun Heo /* -EAGAIN can happen if we skipped followup SRST */ 2210416dc9edSTejun Heo if (rc && rc != -EAGAIN) 2211416dc9edSTejun Heo goto fail; 2212932648b0STejun Heo } else { 2213932648b0STejun Heo if (verbose) 2214932648b0STejun Heo ata_link_printk(link, KERN_INFO, "no reset method " 2215932648b0STejun Heo "available, skipping reset\n"); 2216932648b0STejun Heo if (!(lflags & ATA_LFLAG_ASSUME_CLASS)) 2217932648b0STejun Heo lflags |= ATA_LFLAG_ASSUME_ATA; 2218932648b0STejun Heo } 2219008a7896STejun Heo 2220932648b0STejun Heo /* 2221932648b0STejun Heo * Post-reset processing 2222932648b0STejun Heo */ 2223ae791c05STejun Heo ata_link_for_each_dev(dev, link) { 2224416dc9edSTejun Heo /* After the reset, the device state is PIO 0 and the 2225416dc9edSTejun Heo * controller state is undefined. Reset also wakes up 2226416dc9edSTejun Heo * drives from sleeping mode. 2227c6fd2807SJeff Garzik */ 2228f58229f8STejun Heo dev->pio_mode = XFER_PIO_0; 2229054a5fbaSTejun Heo dev->flags &= ~ATA_DFLAG_SLEEPING; 2230c6fd2807SJeff Garzik 2231ae791c05STejun Heo if (ata_link_offline(link)) 2232ae791c05STejun Heo continue; 2233ae791c05STejun Heo 22344ccd3329STejun Heo /* apply class override */ 2235416dc9edSTejun Heo if (lflags & ATA_LFLAG_ASSUME_ATA) 2236ae791c05STejun Heo classes[dev->devno] = ATA_DEV_ATA; 2237416dc9edSTejun Heo else if (lflags & ATA_LFLAG_ASSUME_SEMB) 2238ae791c05STejun Heo classes[dev->devno] = ATA_DEV_SEMB_UNSUP; /* not yet */ 2239ae791c05STejun Heo } 2240ae791c05STejun Heo 2241008a7896STejun Heo /* record current link speed */ 2242936fd732STejun Heo if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0) 2243936fd732STejun Heo link->sata_spd = (sstatus >> 4) & 0xf; 2244008a7896STejun Heo 2245dc98c32cSTejun Heo /* thaw the port */ 2246dc98c32cSTejun Heo if (ata_is_host_link(link)) 2247dc98c32cSTejun Heo ata_eh_thaw_port(ap); 2248dc98c32cSTejun Heo 2249f046519fSTejun Heo /* postreset() should clear hardware SError. Although SError 2250f046519fSTejun Heo * is cleared during link resume, clearing SError here is 2251f046519fSTejun Heo * necessary as some PHYs raise hotplug events after SRST. 2252f046519fSTejun Heo * This introduces race condition where hotplug occurs between 2253f046519fSTejun Heo * reset and here. This race is mediated by cross checking 2254f046519fSTejun Heo * link onlineness and classification result later. 2255f046519fSTejun Heo */ 2256c6fd2807SJeff Garzik if (postreset) 2257cc0680a5STejun Heo postreset(link, classes); 2258c6fd2807SJeff Garzik 2259f046519fSTejun Heo /* clear cached SError */ 2260f046519fSTejun Heo spin_lock_irqsave(link->ap->lock, flags); 2261f046519fSTejun Heo link->eh_info.serror = 0; 2262f046519fSTejun Heo spin_unlock_irqrestore(link->ap->lock, flags); 2263f046519fSTejun Heo 2264f046519fSTejun Heo /* Make sure onlineness and classification result correspond. 2265f046519fSTejun Heo * Hotplug could have happened during reset and some 2266f046519fSTejun Heo * controllers fail to wait while a drive is spinning up after 2267f046519fSTejun Heo * being hotplugged causing misdetection. By cross checking 2268f046519fSTejun Heo * link onlineness and classification result, those conditions 2269f046519fSTejun Heo * can be reliably detected and retried. 2270f046519fSTejun Heo */ 2271f046519fSTejun Heo nr_known = 0; 2272f046519fSTejun Heo ata_link_for_each_dev(dev, link) { 2273f046519fSTejun Heo /* convert all ATA_DEV_UNKNOWN to ATA_DEV_NONE */ 2274f046519fSTejun Heo if (classes[dev->devno] == ATA_DEV_UNKNOWN) 2275f046519fSTejun Heo classes[dev->devno] = ATA_DEV_NONE; 2276f046519fSTejun Heo else 2277f046519fSTejun Heo nr_known++; 2278f046519fSTejun Heo } 2279f046519fSTejun Heo 2280f046519fSTejun Heo if (classify && !nr_known && ata_link_online(link)) { 2281f046519fSTejun Heo if (try < max_tries) { 2282f046519fSTejun Heo ata_link_printk(link, KERN_WARNING, "link online but " 2283f046519fSTejun Heo "device misclassified, retrying\n"); 2284f046519fSTejun Heo rc = -EAGAIN; 2285f046519fSTejun Heo goto fail; 2286f046519fSTejun Heo } 2287f046519fSTejun Heo ata_link_printk(link, KERN_WARNING, 2288f046519fSTejun Heo "link online but device misclassified, " 2289f046519fSTejun Heo "device detection might fail\n"); 2290f046519fSTejun Heo } 2291f046519fSTejun Heo 2292c6fd2807SJeff Garzik /* reset successful, schedule revalidation */ 2293cf480626STejun Heo ata_eh_done(link, NULL, ATA_EH_RESET); 2294*0a2c0f56STejun Heo ehc->last_reset = jiffies; 2295c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_REVALIDATE; 2296416dc9edSTejun Heo 2297416dc9edSTejun Heo rc = 0; 2298fccb6ea5STejun Heo out: 2299fccb6ea5STejun Heo /* clear hotplug flag */ 2300fccb6ea5STejun Heo ehc->i.flags &= ~ATA_EHI_HOTPLUGGED; 2301afaa5c37STejun Heo 2302afaa5c37STejun Heo spin_lock_irqsave(ap->lock, flags); 2303afaa5c37STejun Heo ap->pflags &= ~ATA_PFLAG_RESETTING; 2304afaa5c37STejun Heo spin_unlock_irqrestore(ap->lock, flags); 2305afaa5c37STejun Heo 2306c6fd2807SJeff Garzik return rc; 2307416dc9edSTejun Heo 2308416dc9edSTejun Heo fail: 23095958e302STejun Heo /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */ 23105958e302STejun Heo if (!ata_is_host_link(link) && 23115958e302STejun Heo sata_scr_read(link, SCR_STATUS, &sstatus)) 23125958e302STejun Heo rc = -ERESTART; 23135958e302STejun Heo 2314416dc9edSTejun Heo if (rc == -ERESTART || try >= max_tries) 2315416dc9edSTejun Heo goto out; 2316416dc9edSTejun Heo 2317416dc9edSTejun Heo now = jiffies; 2318416dc9edSTejun Heo if (time_before(now, deadline)) { 2319416dc9edSTejun Heo unsigned long delta = deadline - now; 2320416dc9edSTejun Heo 2321*0a2c0f56STejun Heo ata_link_printk(link, KERN_WARNING, 2322*0a2c0f56STejun Heo "reset failed (errno=%d), retrying in %u secs\n", 2323*0a2c0f56STejun Heo rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000)); 2324416dc9edSTejun Heo 2325416dc9edSTejun Heo while (delta) 2326416dc9edSTejun Heo delta = schedule_timeout_uninterruptible(delta); 2327416dc9edSTejun Heo } 2328416dc9edSTejun Heo 2329416dc9edSTejun Heo if (rc == -EPIPE || try == max_tries - 1) 2330416dc9edSTejun Heo sata_down_spd_limit(link); 2331416dc9edSTejun Heo if (hardreset) 2332416dc9edSTejun Heo reset = hardreset; 2333416dc9edSTejun Heo goto retry; 2334c6fd2807SJeff Garzik } 2335c6fd2807SJeff Garzik 23360260731fSTejun Heo static int ata_eh_revalidate_and_attach(struct ata_link *link, 2337c6fd2807SJeff Garzik struct ata_device **r_failed_dev) 2338c6fd2807SJeff Garzik { 23390260731fSTejun Heo struct ata_port *ap = link->ap; 23400260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 2341c6fd2807SJeff Garzik struct ata_device *dev; 23428c3c52a8STejun Heo unsigned int new_mask = 0; 2343c6fd2807SJeff Garzik unsigned long flags; 2344f58229f8STejun Heo int rc = 0; 2345c6fd2807SJeff Garzik 2346c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 2347c6fd2807SJeff Garzik 23488c3c52a8STejun Heo /* For PATA drive side cable detection to work, IDENTIFY must 23498c3c52a8STejun Heo * be done backwards such that PDIAG- is released by the slave 23508c3c52a8STejun Heo * device before the master device is identified. 23518c3c52a8STejun Heo */ 23520260731fSTejun Heo ata_link_for_each_dev_reverse(dev, link) { 2353f58229f8STejun Heo unsigned int action = ata_eh_dev_action(dev); 2354f58229f8STejun Heo unsigned int readid_flags = 0; 2355c6fd2807SJeff Garzik 2356bff04647STejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) 2357bff04647STejun Heo readid_flags |= ATA_READID_POSTRESET; 2358bff04647STejun Heo 23599666f400STejun Heo if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) { 2360633273a3STejun Heo WARN_ON(dev->class == ATA_DEV_PMP); 2361633273a3STejun Heo 23620260731fSTejun Heo if (ata_link_offline(link)) { 2363c6fd2807SJeff Garzik rc = -EIO; 23648c3c52a8STejun Heo goto err; 2365c6fd2807SJeff Garzik } 2366c6fd2807SJeff Garzik 23670260731fSTejun Heo ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE); 2368422c9daaSTejun Heo rc = ata_dev_revalidate(dev, ehc->classes[dev->devno], 2369422c9daaSTejun Heo readid_flags); 2370c6fd2807SJeff Garzik if (rc) 23718c3c52a8STejun Heo goto err; 2372c6fd2807SJeff Garzik 23730260731fSTejun Heo ata_eh_done(link, dev, ATA_EH_REVALIDATE); 2374c6fd2807SJeff Garzik 2375baa1e78aSTejun Heo /* Configuration may have changed, reconfigure 2376baa1e78aSTejun Heo * transfer mode. 2377baa1e78aSTejun Heo */ 2378baa1e78aSTejun Heo ehc->i.flags |= ATA_EHI_SETMODE; 2379baa1e78aSTejun Heo 2380c6fd2807SJeff Garzik /* schedule the scsi_rescan_device() here */ 2381c6fd2807SJeff Garzik queue_work(ata_aux_wq, &(ap->scsi_rescan_task)); 2382c6fd2807SJeff Garzik } else if (dev->class == ATA_DEV_UNKNOWN && 2383c6fd2807SJeff Garzik ehc->tries[dev->devno] && 2384c6fd2807SJeff Garzik ata_class_enabled(ehc->classes[dev->devno])) { 2385c6fd2807SJeff Garzik dev->class = ehc->classes[dev->devno]; 2386c6fd2807SJeff Garzik 2387633273a3STejun Heo if (dev->class == ATA_DEV_PMP) 2388633273a3STejun Heo rc = sata_pmp_attach(dev); 2389633273a3STejun Heo else 2390633273a3STejun Heo rc = ata_dev_read_id(dev, &dev->class, 2391633273a3STejun Heo readid_flags, dev->id); 23928c3c52a8STejun Heo switch (rc) { 23938c3c52a8STejun Heo case 0: 2394f58229f8STejun Heo new_mask |= 1 << dev->devno; 23958c3c52a8STejun Heo break; 23968c3c52a8STejun Heo case -ENOENT: 239755a8e2c8STejun Heo /* IDENTIFY was issued to non-existent 239855a8e2c8STejun Heo * device. No need to reset. Just 239955a8e2c8STejun Heo * thaw and kill the device. 240055a8e2c8STejun Heo */ 240155a8e2c8STejun Heo ata_eh_thaw_port(ap); 240255a8e2c8STejun Heo dev->class = ATA_DEV_UNKNOWN; 2403c6fd2807SJeff Garzik break; 24048c3c52a8STejun Heo default: 24058c3c52a8STejun Heo dev->class = ATA_DEV_UNKNOWN; 24068c3c52a8STejun Heo goto err; 24078c3c52a8STejun Heo } 24088c3c52a8STejun Heo } 2409c6fd2807SJeff Garzik } 2410c6fd2807SJeff Garzik 2411c1c4e8d5STejun Heo /* PDIAG- should have been released, ask cable type if post-reset */ 241233267325STejun Heo if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) { 241333267325STejun Heo if (ap->ops->cable_detect) 2414c1c4e8d5STejun Heo ap->cbl = ap->ops->cable_detect(ap); 241533267325STejun Heo ata_force_cbl(ap); 241633267325STejun Heo } 2417c1c4e8d5STejun Heo 24188c3c52a8STejun Heo /* Configure new devices forward such that user doesn't see 24198c3c52a8STejun Heo * device detection messages backwards. 24208c3c52a8STejun Heo */ 24210260731fSTejun Heo ata_link_for_each_dev(dev, link) { 2422633273a3STejun Heo if (!(new_mask & (1 << dev->devno)) || 2423633273a3STejun Heo dev->class == ATA_DEV_PMP) 24248c3c52a8STejun Heo continue; 24258c3c52a8STejun Heo 24268c3c52a8STejun Heo ehc->i.flags |= ATA_EHI_PRINTINFO; 24278c3c52a8STejun Heo rc = ata_dev_configure(dev); 24288c3c52a8STejun Heo ehc->i.flags &= ~ATA_EHI_PRINTINFO; 24298c3c52a8STejun Heo if (rc) 24308c3c52a8STejun Heo goto err; 24318c3c52a8STejun Heo 2432c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 2433c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 2434c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 2435baa1e78aSTejun Heo 243655a8e2c8STejun Heo /* new device discovered, configure xfermode */ 2437baa1e78aSTejun Heo ehc->i.flags |= ATA_EHI_SETMODE; 2438c6fd2807SJeff Garzik } 2439c6fd2807SJeff Garzik 24408c3c52a8STejun Heo return 0; 24418c3c52a8STejun Heo 24428c3c52a8STejun Heo err: 2443c6fd2807SJeff Garzik *r_failed_dev = dev; 24448c3c52a8STejun Heo DPRINTK("EXIT rc=%d\n", rc); 2445c6fd2807SJeff Garzik return rc; 2446c6fd2807SJeff Garzik } 2447c6fd2807SJeff Garzik 24486f1d1e3aSTejun Heo /** 24496f1d1e3aSTejun Heo * ata_set_mode - Program timings and issue SET FEATURES - XFER 24506f1d1e3aSTejun Heo * @link: link on which timings will be programmed 24516f1d1e3aSTejun Heo * @r_failed_dev: out paramter for failed device 24526f1d1e3aSTejun Heo * 24536f1d1e3aSTejun Heo * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If 24546f1d1e3aSTejun Heo * ata_set_mode() fails, pointer to the failing device is 24556f1d1e3aSTejun Heo * returned in @r_failed_dev. 24566f1d1e3aSTejun Heo * 24576f1d1e3aSTejun Heo * LOCKING: 24586f1d1e3aSTejun Heo * PCI/etc. bus probe sem. 24596f1d1e3aSTejun Heo * 24606f1d1e3aSTejun Heo * RETURNS: 24616f1d1e3aSTejun Heo * 0 on success, negative errno otherwise 24626f1d1e3aSTejun Heo */ 24636f1d1e3aSTejun Heo int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) 24646f1d1e3aSTejun Heo { 24656f1d1e3aSTejun Heo struct ata_port *ap = link->ap; 246600115e0fSTejun Heo struct ata_device *dev; 246700115e0fSTejun Heo int rc; 24686f1d1e3aSTejun Heo 246976326ac1STejun Heo /* if data transfer is verified, clear DUBIOUS_XFER on ering top */ 247076326ac1STejun Heo ata_link_for_each_dev(dev, link) { 247176326ac1STejun Heo if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) { 247276326ac1STejun Heo struct ata_ering_entry *ent; 247376326ac1STejun Heo 247476326ac1STejun Heo ent = ata_ering_top(&dev->ering); 247576326ac1STejun Heo if (ent) 247676326ac1STejun Heo ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER; 247776326ac1STejun Heo } 247876326ac1STejun Heo } 247976326ac1STejun Heo 24806f1d1e3aSTejun Heo /* has private set_mode? */ 24816f1d1e3aSTejun Heo if (ap->ops->set_mode) 248200115e0fSTejun Heo rc = ap->ops->set_mode(link, r_failed_dev); 248300115e0fSTejun Heo else 248400115e0fSTejun Heo rc = ata_do_set_mode(link, r_failed_dev); 248500115e0fSTejun Heo 248600115e0fSTejun Heo /* if transfer mode has changed, set DUBIOUS_XFER on device */ 248700115e0fSTejun Heo ata_link_for_each_dev(dev, link) { 248800115e0fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 248900115e0fSTejun Heo u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno]; 249000115e0fSTejun Heo u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno)); 249100115e0fSTejun Heo 249200115e0fSTejun Heo if (dev->xfer_mode != saved_xfer_mode || 249300115e0fSTejun Heo ata_ncq_enabled(dev) != saved_ncq) 249400115e0fSTejun Heo dev->flags |= ATA_DFLAG_DUBIOUS_XFER; 249500115e0fSTejun Heo } 249600115e0fSTejun Heo 249700115e0fSTejun Heo return rc; 24986f1d1e3aSTejun Heo } 24996f1d1e3aSTejun Heo 25000260731fSTejun Heo static int ata_link_nr_enabled(struct ata_link *link) 2501c6fd2807SJeff Garzik { 2502f58229f8STejun Heo struct ata_device *dev; 2503f58229f8STejun Heo int cnt = 0; 2504c6fd2807SJeff Garzik 25050260731fSTejun Heo ata_link_for_each_dev(dev, link) 2506f58229f8STejun Heo if (ata_dev_enabled(dev)) 2507c6fd2807SJeff Garzik cnt++; 2508c6fd2807SJeff Garzik return cnt; 2509c6fd2807SJeff Garzik } 2510c6fd2807SJeff Garzik 25110260731fSTejun Heo static int ata_link_nr_vacant(struct ata_link *link) 2512c6fd2807SJeff Garzik { 2513f58229f8STejun Heo struct ata_device *dev; 2514f58229f8STejun Heo int cnt = 0; 2515c6fd2807SJeff Garzik 25160260731fSTejun Heo ata_link_for_each_dev(dev, link) 2517f58229f8STejun Heo if (dev->class == ATA_DEV_UNKNOWN) 2518c6fd2807SJeff Garzik cnt++; 2519c6fd2807SJeff Garzik return cnt; 2520c6fd2807SJeff Garzik } 2521c6fd2807SJeff Garzik 25220260731fSTejun Heo static int ata_eh_skip_recovery(struct ata_link *link) 2523c6fd2807SJeff Garzik { 2524672b2d65STejun Heo struct ata_port *ap = link->ap; 25250260731fSTejun Heo struct ata_eh_context *ehc = &link->eh_context; 2526f58229f8STejun Heo struct ata_device *dev; 2527c6fd2807SJeff Garzik 2528f9df58cbSTejun Heo /* skip disabled links */ 2529f9df58cbSTejun Heo if (link->flags & ATA_LFLAG_DISABLED) 2530f9df58cbSTejun Heo return 1; 2531f9df58cbSTejun Heo 2532672b2d65STejun Heo /* thaw frozen port and recover failed devices */ 2533672b2d65STejun Heo if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link)) 2534672b2d65STejun Heo return 0; 2535672b2d65STejun Heo 2536672b2d65STejun Heo /* reset at least once if reset is requested */ 2537672b2d65STejun Heo if ((ehc->i.action & ATA_EH_RESET) && 2538672b2d65STejun Heo !(ehc->i.flags & ATA_EHI_DID_RESET)) 2539c6fd2807SJeff Garzik return 0; 2540c6fd2807SJeff Garzik 2541c6fd2807SJeff Garzik /* skip if class codes for all vacant slots are ATA_DEV_NONE */ 25420260731fSTejun Heo ata_link_for_each_dev(dev, link) { 2543c6fd2807SJeff Garzik if (dev->class == ATA_DEV_UNKNOWN && 2544c6fd2807SJeff Garzik ehc->classes[dev->devno] != ATA_DEV_NONE) 2545c6fd2807SJeff Garzik return 0; 2546c6fd2807SJeff Garzik } 2547c6fd2807SJeff Garzik 2548c6fd2807SJeff Garzik return 1; 2549c6fd2807SJeff Garzik } 2550c6fd2807SJeff Garzik 255102c05a27STejun Heo static int ata_eh_schedule_probe(struct ata_device *dev) 255202c05a27STejun Heo { 255302c05a27STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 255402c05a27STejun Heo 255502c05a27STejun Heo if (!(ehc->i.probe_mask & (1 << dev->devno)) || 255602c05a27STejun Heo (ehc->did_probe_mask & (1 << dev->devno))) 255702c05a27STejun Heo return 0; 255802c05a27STejun Heo 255902c05a27STejun Heo ata_eh_detach_dev(dev); 256002c05a27STejun Heo ata_dev_init(dev); 256102c05a27STejun Heo ehc->did_probe_mask |= (1 << dev->devno); 2562cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 256300115e0fSTejun Heo ehc->saved_xfer_mode[dev->devno] = 0; 256400115e0fSTejun Heo ehc->saved_ncq_enabled &= ~(1 << dev->devno); 256502c05a27STejun Heo 256602c05a27STejun Heo return 1; 256702c05a27STejun Heo } 256802c05a27STejun Heo 25699b1e2658STejun Heo static int ata_eh_handle_dev_fail(struct ata_device *dev, int err) 2570fee7ca72STejun Heo { 25719af5c9c9STejun Heo struct ata_eh_context *ehc = &dev->link->eh_context; 2572fee7ca72STejun Heo 2573fee7ca72STejun Heo ehc->tries[dev->devno]--; 2574fee7ca72STejun Heo 2575fee7ca72STejun Heo switch (err) { 2576fee7ca72STejun Heo case -ENODEV: 2577fee7ca72STejun Heo /* device missing or wrong IDENTIFY data, schedule probing */ 2578fee7ca72STejun Heo ehc->i.probe_mask |= (1 << dev->devno); 2579fee7ca72STejun Heo case -EINVAL: 2580fee7ca72STejun Heo /* give it just one more chance */ 2581fee7ca72STejun Heo ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1); 2582fee7ca72STejun Heo case -EIO: 25834fb4615bSTejun Heo if (ehc->tries[dev->devno] == 1 && dev->pio_mode > XFER_PIO_0) { 2584fee7ca72STejun Heo /* This is the last chance, better to slow 2585fee7ca72STejun Heo * down than lose it. 2586fee7ca72STejun Heo */ 2587936fd732STejun Heo sata_down_spd_limit(dev->link); 2588fee7ca72STejun Heo ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); 2589fee7ca72STejun Heo } 2590fee7ca72STejun Heo } 2591fee7ca72STejun Heo 2592fee7ca72STejun Heo if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) { 2593fee7ca72STejun Heo /* disable device if it has used up all its chances */ 2594fee7ca72STejun Heo ata_dev_disable(dev); 2595fee7ca72STejun Heo 2596fee7ca72STejun Heo /* detach if offline */ 2597936fd732STejun Heo if (ata_link_offline(dev->link)) 2598fee7ca72STejun Heo ata_eh_detach_dev(dev); 2599fee7ca72STejun Heo 260002c05a27STejun Heo /* schedule probe if necessary */ 260102c05a27STejun Heo if (ata_eh_schedule_probe(dev)) 2602fee7ca72STejun Heo ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 26039b1e2658STejun Heo 26049b1e2658STejun Heo return 1; 2605fee7ca72STejun Heo } else { 2606cf480626STejun Heo ehc->i.action |= ATA_EH_RESET; 26079b1e2658STejun Heo return 0; 2608fee7ca72STejun Heo } 2609fee7ca72STejun Heo } 2610fee7ca72STejun Heo 2611c6fd2807SJeff Garzik /** 2612c6fd2807SJeff Garzik * ata_eh_recover - recover host port after error 2613c6fd2807SJeff Garzik * @ap: host port to recover 2614c6fd2807SJeff Garzik * @prereset: prereset method (can be NULL) 2615c6fd2807SJeff Garzik * @softreset: softreset method (can be NULL) 2616c6fd2807SJeff Garzik * @hardreset: hardreset method (can be NULL) 2617c6fd2807SJeff Garzik * @postreset: postreset method (can be NULL) 26189b1e2658STejun Heo * @r_failed_link: out parameter for failed link 2619c6fd2807SJeff Garzik * 2620c6fd2807SJeff Garzik * This is the alpha and omega, eum and yang, heart and soul of 2621c6fd2807SJeff Garzik * libata exception handling. On entry, actions required to 26229b1e2658STejun Heo * recover each link and hotplug requests are recorded in the 26239b1e2658STejun Heo * link's eh_context. This function executes all the operations 26249b1e2658STejun Heo * with appropriate retrials and fallbacks to resurrect failed 2625c6fd2807SJeff Garzik * devices, detach goners and greet newcomers. 2626c6fd2807SJeff Garzik * 2627c6fd2807SJeff Garzik * LOCKING: 2628c6fd2807SJeff Garzik * Kernel thread context (may sleep). 2629c6fd2807SJeff Garzik * 2630c6fd2807SJeff Garzik * RETURNS: 2631c6fd2807SJeff Garzik * 0 on success, -errno on failure. 2632c6fd2807SJeff Garzik */ 2633fb7fd614STejun Heo int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, 2634c6fd2807SJeff Garzik ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 26359b1e2658STejun Heo ata_postreset_fn_t postreset, 26369b1e2658STejun Heo struct ata_link **r_failed_link) 2637c6fd2807SJeff Garzik { 26389b1e2658STejun Heo struct ata_link *link; 2639c6fd2807SJeff Garzik struct ata_device *dev; 2640*0a2c0f56STejun Heo int nr_failed_devs; 2641dc98c32cSTejun Heo int rc; 2642f9df58cbSTejun Heo unsigned long flags; 2643c6fd2807SJeff Garzik 2644c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 2645c6fd2807SJeff Garzik 2646c6fd2807SJeff Garzik /* prep for recovery */ 26479b1e2658STejun Heo ata_port_for_each_link(link, ap) { 26489b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 26499b1e2658STejun Heo 2650f9df58cbSTejun Heo /* re-enable link? */ 2651f9df58cbSTejun Heo if (ehc->i.action & ATA_EH_ENABLE_LINK) { 2652f9df58cbSTejun Heo ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK); 2653f9df58cbSTejun Heo spin_lock_irqsave(ap->lock, flags); 2654f9df58cbSTejun Heo link->flags &= ~ATA_LFLAG_DISABLED; 2655f9df58cbSTejun Heo spin_unlock_irqrestore(ap->lock, flags); 2656f9df58cbSTejun Heo ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK); 2657f9df58cbSTejun Heo } 2658f9df58cbSTejun Heo 26590260731fSTejun Heo ata_link_for_each_dev(dev, link) { 2660fd995f70STejun Heo if (link->flags & ATA_LFLAG_NO_RETRY) 2661fd995f70STejun Heo ehc->tries[dev->devno] = 1; 2662fd995f70STejun Heo else 2663c6fd2807SJeff Garzik ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 2664c6fd2807SJeff Garzik 266579a55b72STejun Heo /* collect port action mask recorded in dev actions */ 26669b1e2658STejun Heo ehc->i.action |= ehc->i.dev_action[dev->devno] & 26679b1e2658STejun Heo ~ATA_EH_PERDEV_MASK; 2668f58229f8STejun Heo ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK; 266979a55b72STejun Heo 2670c6fd2807SJeff Garzik /* process hotplug request */ 2671c6fd2807SJeff Garzik if (dev->flags & ATA_DFLAG_DETACH) 2672c6fd2807SJeff Garzik ata_eh_detach_dev(dev); 2673c6fd2807SJeff Garzik 267402c05a27STejun Heo /* schedule probe if necessary */ 267502c05a27STejun Heo if (!ata_dev_enabled(dev)) 267602c05a27STejun Heo ata_eh_schedule_probe(dev); 2677c6fd2807SJeff Garzik } 26789b1e2658STejun Heo } 2679c6fd2807SJeff Garzik 2680c6fd2807SJeff Garzik retry: 2681c6fd2807SJeff Garzik rc = 0; 26829b1e2658STejun Heo nr_failed_devs = 0; 2683c6fd2807SJeff Garzik 2684c6fd2807SJeff Garzik /* if UNLOADING, finish immediately */ 2685c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_UNLOADING) 2686c6fd2807SJeff Garzik goto out; 2687c6fd2807SJeff Garzik 26889b1e2658STejun Heo /* prep for EH */ 26899b1e2658STejun Heo ata_port_for_each_link(link, ap) { 26909b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 26919b1e2658STejun Heo 2692c6fd2807SJeff Garzik /* skip EH if possible. */ 26930260731fSTejun Heo if (ata_eh_skip_recovery(link)) 2694c6fd2807SJeff Garzik ehc->i.action = 0; 2695c6fd2807SJeff Garzik 26960260731fSTejun Heo ata_link_for_each_dev(dev, link) 2697f58229f8STejun Heo ehc->classes[dev->devno] = ATA_DEV_UNKNOWN; 26989b1e2658STejun Heo } 2699c6fd2807SJeff Garzik 2700c6fd2807SJeff Garzik /* reset */ 27019b1e2658STejun Heo ata_port_for_each_link(link, ap) { 27029b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 27039b1e2658STejun Heo 2704cf480626STejun Heo if (!(ehc->i.action & ATA_EH_RESET)) 27059b1e2658STejun Heo continue; 27069b1e2658STejun Heo 27079b1e2658STejun Heo rc = ata_eh_reset(link, ata_link_nr_vacant(link), 2708dc98c32cSTejun Heo prereset, softreset, hardreset, postreset); 2709c6fd2807SJeff Garzik if (rc) { 27100260731fSTejun Heo ata_link_printk(link, KERN_ERR, 2711c6fd2807SJeff Garzik "reset failed, giving up\n"); 2712c6fd2807SJeff Garzik goto out; 2713c6fd2807SJeff Garzik } 27149b1e2658STejun Heo } 2715c6fd2807SJeff Garzik 27169b1e2658STejun Heo /* the rest */ 27179b1e2658STejun Heo ata_port_for_each_link(link, ap) { 27189b1e2658STejun Heo struct ata_eh_context *ehc = &link->eh_context; 27199b1e2658STejun Heo 2720c6fd2807SJeff Garzik /* revalidate existing devices and attach new ones */ 27210260731fSTejun Heo rc = ata_eh_revalidate_and_attach(link, &dev); 2722c6fd2807SJeff Garzik if (rc) 2723c6fd2807SJeff Garzik goto dev_fail; 2724c6fd2807SJeff Garzik 2725633273a3STejun Heo /* if PMP got attached, return, pmp EH will take care of it */ 2726633273a3STejun Heo if (link->device->class == ATA_DEV_PMP) { 2727633273a3STejun Heo ehc->i.action = 0; 2728633273a3STejun Heo return 0; 2729633273a3STejun Heo } 2730633273a3STejun Heo 2731baa1e78aSTejun Heo /* configure transfer mode if necessary */ 2732baa1e78aSTejun Heo if (ehc->i.flags & ATA_EHI_SETMODE) { 27330260731fSTejun Heo rc = ata_set_mode(link, &dev); 27344ae72a1eSTejun Heo if (rc) 2735c6fd2807SJeff Garzik goto dev_fail; 2736baa1e78aSTejun Heo ehc->i.flags &= ~ATA_EHI_SETMODE; 2737c6fd2807SJeff Garzik } 2738c6fd2807SJeff Garzik 27393ec25ebdSTejun Heo if (ehc->i.action & ATA_EH_LPM) 2740ca77329fSKristen Carlson Accardi ata_link_for_each_dev(dev, link) 2741ca77329fSKristen Carlson Accardi ata_dev_enable_pm(dev, ap->pm_policy); 2742ca77329fSKristen Carlson Accardi 27439b1e2658STejun Heo /* this link is okay now */ 27449b1e2658STejun Heo ehc->i.flags = 0; 27459b1e2658STejun Heo continue; 2746c6fd2807SJeff Garzik 2747c6fd2807SJeff Garzik dev_fail: 27489b1e2658STejun Heo nr_failed_devs++; 2749*0a2c0f56STejun Heo ata_eh_handle_dev_fail(dev, rc); 2750c6fd2807SJeff Garzik 2751b06ce3e5STejun Heo if (ap->pflags & ATA_PFLAG_FROZEN) { 2752b06ce3e5STejun Heo /* PMP reset requires working host port. 2753b06ce3e5STejun Heo * Can't retry if it's frozen. 2754b06ce3e5STejun Heo */ 2755071f44b1STejun Heo if (sata_pmp_attached(ap)) 2756b06ce3e5STejun Heo goto out; 27579b1e2658STejun Heo break; 27589b1e2658STejun Heo } 2759b06ce3e5STejun Heo } 27609b1e2658STejun Heo 2761*0a2c0f56STejun Heo if (nr_failed_devs) 2762c6fd2807SJeff Garzik goto retry; 2763c6fd2807SJeff Garzik 2764c6fd2807SJeff Garzik out: 27659b1e2658STejun Heo if (rc && r_failed_link) 27669b1e2658STejun Heo *r_failed_link = link; 2767c6fd2807SJeff Garzik 2768c6fd2807SJeff Garzik DPRINTK("EXIT, rc=%d\n", rc); 2769c6fd2807SJeff Garzik return rc; 2770c6fd2807SJeff Garzik } 2771c6fd2807SJeff Garzik 2772c6fd2807SJeff Garzik /** 2773c6fd2807SJeff Garzik * ata_eh_finish - finish up EH 2774c6fd2807SJeff Garzik * @ap: host port to finish EH for 2775c6fd2807SJeff Garzik * 2776c6fd2807SJeff Garzik * Recovery is complete. Clean up EH states and retry or finish 2777c6fd2807SJeff Garzik * failed qcs. 2778c6fd2807SJeff Garzik * 2779c6fd2807SJeff Garzik * LOCKING: 2780c6fd2807SJeff Garzik * None. 2781c6fd2807SJeff Garzik */ 2782fb7fd614STejun Heo void ata_eh_finish(struct ata_port *ap) 2783c6fd2807SJeff Garzik { 2784c6fd2807SJeff Garzik int tag; 2785c6fd2807SJeff Garzik 2786c6fd2807SJeff Garzik /* retry or finish qcs */ 2787c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2788c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 2789c6fd2807SJeff Garzik 2790c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) 2791c6fd2807SJeff Garzik continue; 2792c6fd2807SJeff Garzik 2793c6fd2807SJeff Garzik if (qc->err_mask) { 2794c6fd2807SJeff Garzik /* FIXME: Once EH migration is complete, 2795c6fd2807SJeff Garzik * generate sense data in this function, 2796c6fd2807SJeff Garzik * considering both err_mask and tf. 2797c6fd2807SJeff Garzik */ 279803faab78STejun Heo if (qc->flags & ATA_QCFLAG_RETRY) 2799c6fd2807SJeff Garzik ata_eh_qc_retry(qc); 280003faab78STejun Heo else 280103faab78STejun Heo ata_eh_qc_complete(qc); 2802c6fd2807SJeff Garzik } else { 2803c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_SENSE_VALID) { 2804c6fd2807SJeff Garzik ata_eh_qc_complete(qc); 2805c6fd2807SJeff Garzik } else { 2806c6fd2807SJeff Garzik /* feed zero TF to sense generation */ 2807c6fd2807SJeff Garzik memset(&qc->result_tf, 0, sizeof(qc->result_tf)); 2808c6fd2807SJeff Garzik ata_eh_qc_retry(qc); 2809c6fd2807SJeff Garzik } 2810c6fd2807SJeff Garzik } 2811c6fd2807SJeff Garzik } 2812da917d69STejun Heo 2813da917d69STejun Heo /* make sure nr_active_links is zero after EH */ 2814da917d69STejun Heo WARN_ON(ap->nr_active_links); 2815da917d69STejun Heo ap->nr_active_links = 0; 2816c6fd2807SJeff Garzik } 2817c6fd2807SJeff Garzik 2818c6fd2807SJeff Garzik /** 2819c6fd2807SJeff Garzik * ata_do_eh - do standard error handling 2820c6fd2807SJeff Garzik * @ap: host port to handle error for 2821a1efdabaSTejun Heo * 2822c6fd2807SJeff Garzik * @prereset: prereset method (can be NULL) 2823c6fd2807SJeff Garzik * @softreset: softreset method (can be NULL) 2824c6fd2807SJeff Garzik * @hardreset: hardreset method (can be NULL) 2825c6fd2807SJeff Garzik * @postreset: postreset method (can be NULL) 2826c6fd2807SJeff Garzik * 2827c6fd2807SJeff Garzik * Perform standard error handling sequence. 2828c6fd2807SJeff Garzik * 2829c6fd2807SJeff Garzik * LOCKING: 2830c6fd2807SJeff Garzik * Kernel thread context (may sleep). 2831c6fd2807SJeff Garzik */ 2832c6fd2807SJeff Garzik void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, 2833c6fd2807SJeff Garzik ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 2834c6fd2807SJeff Garzik ata_postreset_fn_t postreset) 2835c6fd2807SJeff Garzik { 28369b1e2658STejun Heo struct ata_device *dev; 28379b1e2658STejun Heo int rc; 28389b1e2658STejun Heo 28399b1e2658STejun Heo ata_eh_autopsy(ap); 28409b1e2658STejun Heo ata_eh_report(ap); 28419b1e2658STejun Heo 28429b1e2658STejun Heo rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset, 28439b1e2658STejun Heo NULL); 28449b1e2658STejun Heo if (rc) { 28459b1e2658STejun Heo ata_link_for_each_dev(dev, &ap->link) 28469b1e2658STejun Heo ata_dev_disable(dev); 28479b1e2658STejun Heo } 28489b1e2658STejun Heo 2849c6fd2807SJeff Garzik ata_eh_finish(ap); 2850c6fd2807SJeff Garzik } 2851c6fd2807SJeff Garzik 2852a1efdabaSTejun Heo /** 2853a1efdabaSTejun Heo * ata_std_error_handler - standard error handler 2854a1efdabaSTejun Heo * @ap: host port to handle error for 2855a1efdabaSTejun Heo * 2856a1efdabaSTejun Heo * Standard error handler 2857a1efdabaSTejun Heo * 2858a1efdabaSTejun Heo * LOCKING: 2859a1efdabaSTejun Heo * Kernel thread context (may sleep). 2860a1efdabaSTejun Heo */ 2861a1efdabaSTejun Heo void ata_std_error_handler(struct ata_port *ap) 2862a1efdabaSTejun Heo { 2863a1efdabaSTejun Heo struct ata_port_operations *ops = ap->ops; 2864a1efdabaSTejun Heo ata_reset_fn_t hardreset = ops->hardreset; 2865a1efdabaSTejun Heo 286657c9efdfSTejun Heo /* ignore built-in hardreset if SCR access is not available */ 286757c9efdfSTejun Heo if (ata_is_builtin_hardreset(hardreset) && !sata_scr_valid(&ap->link)) 2868a1efdabaSTejun Heo hardreset = NULL; 2869a1efdabaSTejun Heo 2870a1efdabaSTejun Heo ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset); 2871a1efdabaSTejun Heo } 2872a1efdabaSTejun Heo 28736ffa01d8STejun Heo #ifdef CONFIG_PM 2874c6fd2807SJeff Garzik /** 2875c6fd2807SJeff Garzik * ata_eh_handle_port_suspend - perform port suspend operation 2876c6fd2807SJeff Garzik * @ap: port to suspend 2877c6fd2807SJeff Garzik * 2878c6fd2807SJeff Garzik * Suspend @ap. 2879c6fd2807SJeff Garzik * 2880c6fd2807SJeff Garzik * LOCKING: 2881c6fd2807SJeff Garzik * Kernel thread context (may sleep). 2882c6fd2807SJeff Garzik */ 2883c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap) 2884c6fd2807SJeff Garzik { 2885c6fd2807SJeff Garzik unsigned long flags; 2886c6fd2807SJeff Garzik int rc = 0; 2887c6fd2807SJeff Garzik 2888c6fd2807SJeff Garzik /* are we suspending? */ 2889c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 2890c6fd2807SJeff Garzik if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || 2891c6fd2807SJeff Garzik ap->pm_mesg.event == PM_EVENT_ON) { 2892c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 2893c6fd2807SJeff Garzik return; 2894c6fd2807SJeff Garzik } 2895c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 2896c6fd2807SJeff Garzik 2897c6fd2807SJeff Garzik WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED); 2898c6fd2807SJeff Garzik 289964578a3dSTejun Heo /* tell ACPI we're suspending */ 290064578a3dSTejun Heo rc = ata_acpi_on_suspend(ap); 290164578a3dSTejun Heo if (rc) 290264578a3dSTejun Heo goto out; 290364578a3dSTejun Heo 2904c6fd2807SJeff Garzik /* suspend */ 2905c6fd2807SJeff Garzik ata_eh_freeze_port(ap); 2906c6fd2807SJeff Garzik 2907c6fd2807SJeff Garzik if (ap->ops->port_suspend) 2908c6fd2807SJeff Garzik rc = ap->ops->port_suspend(ap, ap->pm_mesg); 2909c6fd2807SJeff Garzik 2910bd3adca5SShaohua Li ata_acpi_set_state(ap, PMSG_SUSPEND); 291164578a3dSTejun Heo out: 2912c6fd2807SJeff Garzik /* report result */ 2913c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 2914c6fd2807SJeff Garzik 2915c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_PM_PENDING; 2916c6fd2807SJeff Garzik if (rc == 0) 2917c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SUSPENDED; 291864578a3dSTejun Heo else if (ap->pflags & ATA_PFLAG_FROZEN) 2919c6fd2807SJeff Garzik ata_port_schedule_eh(ap); 2920c6fd2807SJeff Garzik 2921c6fd2807SJeff Garzik if (ap->pm_result) { 2922c6fd2807SJeff Garzik *ap->pm_result = rc; 2923c6fd2807SJeff Garzik ap->pm_result = NULL; 2924c6fd2807SJeff Garzik } 2925c6fd2807SJeff Garzik 2926c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 2927c6fd2807SJeff Garzik 2928c6fd2807SJeff Garzik return; 2929c6fd2807SJeff Garzik } 2930c6fd2807SJeff Garzik 2931c6fd2807SJeff Garzik /** 2932c6fd2807SJeff Garzik * ata_eh_handle_port_resume - perform port resume operation 2933c6fd2807SJeff Garzik * @ap: port to resume 2934c6fd2807SJeff Garzik * 2935c6fd2807SJeff Garzik * Resume @ap. 2936c6fd2807SJeff Garzik * 2937c6fd2807SJeff Garzik * LOCKING: 2938c6fd2807SJeff Garzik * Kernel thread context (may sleep). 2939c6fd2807SJeff Garzik */ 2940c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap) 2941c6fd2807SJeff Garzik { 2942c6fd2807SJeff Garzik unsigned long flags; 29439666f400STejun Heo int rc = 0; 2944c6fd2807SJeff Garzik 2945c6fd2807SJeff Garzik /* are we resuming? */ 2946c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 2947c6fd2807SJeff Garzik if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || 2948c6fd2807SJeff Garzik ap->pm_mesg.event != PM_EVENT_ON) { 2949c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 2950c6fd2807SJeff Garzik return; 2951c6fd2807SJeff Garzik } 2952c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 2953c6fd2807SJeff Garzik 29549666f400STejun Heo WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED)); 2955c6fd2807SJeff Garzik 2956bd3adca5SShaohua Li ata_acpi_set_state(ap, PMSG_ON); 2957bd3adca5SShaohua Li 2958c6fd2807SJeff Garzik if (ap->ops->port_resume) 2959c6fd2807SJeff Garzik rc = ap->ops->port_resume(ap); 2960c6fd2807SJeff Garzik 29616746544cSTejun Heo /* tell ACPI that we're resuming */ 29626746544cSTejun Heo ata_acpi_on_resume(ap); 29636746544cSTejun Heo 29649666f400STejun Heo /* report result */ 2965c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 2966c6fd2807SJeff Garzik ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED); 2967c6fd2807SJeff Garzik if (ap->pm_result) { 2968c6fd2807SJeff Garzik *ap->pm_result = rc; 2969c6fd2807SJeff Garzik ap->pm_result = NULL; 2970c6fd2807SJeff Garzik } 2971c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 2972c6fd2807SJeff Garzik } 29736ffa01d8STejun Heo #endif /* CONFIG_PM */ 2974