1c6fd2807SJeff Garzik /* 2c6fd2807SJeff Garzik * libata-eh.c - libata error handling 3c6fd2807SJeff Garzik * 4c6fd2807SJeff Garzik * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5c6fd2807SJeff Garzik * Please ALWAYS copy linux-ide@vger.kernel.org 6c6fd2807SJeff Garzik * on emails. 7c6fd2807SJeff Garzik * 8c6fd2807SJeff Garzik * Copyright 2006 Tejun Heo <htejun@gmail.com> 9c6fd2807SJeff Garzik * 10c6fd2807SJeff Garzik * 11c6fd2807SJeff Garzik * This program is free software; you can redistribute it and/or 12c6fd2807SJeff Garzik * modify it under the terms of the GNU General Public License as 13c6fd2807SJeff Garzik * published by the Free Software Foundation; either version 2, or 14c6fd2807SJeff Garzik * (at your option) any later version. 15c6fd2807SJeff Garzik * 16c6fd2807SJeff Garzik * This program is distributed in the hope that it will be useful, 17c6fd2807SJeff Garzik * but WITHOUT ANY WARRANTY; without even the implied warranty of 18c6fd2807SJeff Garzik * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19c6fd2807SJeff Garzik * General Public License for more details. 20c6fd2807SJeff Garzik * 21c6fd2807SJeff Garzik * You should have received a copy of the GNU General Public License 22c6fd2807SJeff Garzik * along with this program; see the file COPYING. If not, write to 23c6fd2807SJeff Garzik * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, 24c6fd2807SJeff Garzik * USA. 25c6fd2807SJeff Garzik * 26c6fd2807SJeff Garzik * 27c6fd2807SJeff Garzik * libata documentation is available via 'make {ps|pdf}docs', 28c6fd2807SJeff Garzik * as Documentation/DocBook/libata.* 29c6fd2807SJeff Garzik * 30c6fd2807SJeff Garzik * Hardware documentation available from http://www.t13.org/ and 31c6fd2807SJeff Garzik * http://www.sata-io.org/ 32c6fd2807SJeff Garzik * 33c6fd2807SJeff Garzik */ 34c6fd2807SJeff Garzik 35c6fd2807SJeff Garzik #include <linux/kernel.h> 36c6fd2807SJeff Garzik #include <scsi/scsi.h> 37c6fd2807SJeff Garzik #include <scsi/scsi_host.h> 38c6fd2807SJeff Garzik #include <scsi/scsi_eh.h> 39c6fd2807SJeff Garzik #include <scsi/scsi_device.h> 40c6fd2807SJeff Garzik #include <scsi/scsi_cmnd.h> 41c6fd2807SJeff Garzik #include "../scsi/scsi_transport_api.h" 42c6fd2807SJeff Garzik 43c6fd2807SJeff Garzik #include <linux/libata.h> 44c6fd2807SJeff Garzik 45c6fd2807SJeff Garzik #include "libata.h" 46c6fd2807SJeff Garzik 477d47e8d4STejun Heo enum { 487d47e8d4STejun Heo ATA_EH_SPDN_NCQ_OFF = (1 << 0), 497d47e8d4STejun Heo ATA_EH_SPDN_SPEED_DOWN = (1 << 1), 507d47e8d4STejun Heo ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2), 517d47e8d4STejun Heo }; 527d47e8d4STejun Heo 5331daabdaSTejun Heo /* Waiting in ->prereset can never be reliable. It's sometimes nice 5431daabdaSTejun Heo * to wait there but it can't be depended upon; otherwise, we wouldn't 5531daabdaSTejun Heo * be resetting. Just give it enough time for most drives to spin up. 5631daabdaSTejun Heo */ 5731daabdaSTejun Heo enum { 5831daabdaSTejun Heo ATA_EH_PRERESET_TIMEOUT = 10 * HZ, 5931daabdaSTejun Heo }; 6031daabdaSTejun Heo 6131daabdaSTejun Heo /* The following table determines how we sequence resets. Each entry 6231daabdaSTejun Heo * represents timeout for that try. The first try can be soft or 6331daabdaSTejun Heo * hardreset. All others are hardreset if available. In most cases 6431daabdaSTejun Heo * the first reset w/ 10sec timeout should succeed. Following entries 6531daabdaSTejun Heo * are mostly for error handling, hotplug and retarded devices. 6631daabdaSTejun Heo */ 6731daabdaSTejun Heo static const unsigned long ata_eh_reset_timeouts[] = { 6831daabdaSTejun Heo 10 * HZ, /* most drives spin up by 10sec */ 6931daabdaSTejun Heo 10 * HZ, /* > 99% working drives spin up before 20sec */ 7031daabdaSTejun Heo 35 * HZ, /* give > 30 secs of idleness for retarded devices */ 7131daabdaSTejun Heo 5 * HZ, /* and sweet one last chance */ 7231daabdaSTejun Heo /* > 1 min has elapsed, give up */ 7331daabdaSTejun Heo }; 7431daabdaSTejun Heo 75c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap); 76c6fd2807SJeff Garzik static void ata_eh_finish(struct ata_port *ap); 776ffa01d8STejun Heo #ifdef CONFIG_PM 78c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap); 79c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap); 806ffa01d8STejun Heo #else /* CONFIG_PM */ 816ffa01d8STejun Heo static void ata_eh_handle_port_suspend(struct ata_port *ap) 826ffa01d8STejun Heo { } 836ffa01d8STejun Heo 846ffa01d8STejun Heo static void ata_eh_handle_port_resume(struct ata_port *ap) 856ffa01d8STejun Heo { } 866ffa01d8STejun Heo #endif /* CONFIG_PM */ 87c6fd2807SJeff Garzik 88c6fd2807SJeff Garzik static void ata_ering_record(struct ata_ering *ering, int is_io, 89c6fd2807SJeff Garzik unsigned int err_mask) 90c6fd2807SJeff Garzik { 91c6fd2807SJeff Garzik struct ata_ering_entry *ent; 92c6fd2807SJeff Garzik 93c6fd2807SJeff Garzik WARN_ON(!err_mask); 94c6fd2807SJeff Garzik 95c6fd2807SJeff Garzik ering->cursor++; 96c6fd2807SJeff Garzik ering->cursor %= ATA_ERING_SIZE; 97c6fd2807SJeff Garzik 98c6fd2807SJeff Garzik ent = &ering->ring[ering->cursor]; 99c6fd2807SJeff Garzik ent->is_io = is_io; 100c6fd2807SJeff Garzik ent->err_mask = err_mask; 101c6fd2807SJeff Garzik ent->timestamp = get_jiffies_64(); 102c6fd2807SJeff Garzik } 103c6fd2807SJeff Garzik 1047d47e8d4STejun Heo static void ata_ering_clear(struct ata_ering *ering) 105c6fd2807SJeff Garzik { 1067d47e8d4STejun Heo memset(ering, 0, sizeof(*ering)); 107c6fd2807SJeff Garzik } 108c6fd2807SJeff Garzik 109c6fd2807SJeff Garzik static int ata_ering_map(struct ata_ering *ering, 110c6fd2807SJeff Garzik int (*map_fn)(struct ata_ering_entry *, void *), 111c6fd2807SJeff Garzik void *arg) 112c6fd2807SJeff Garzik { 113c6fd2807SJeff Garzik int idx, rc = 0; 114c6fd2807SJeff Garzik struct ata_ering_entry *ent; 115c6fd2807SJeff Garzik 116c6fd2807SJeff Garzik idx = ering->cursor; 117c6fd2807SJeff Garzik do { 118c6fd2807SJeff Garzik ent = &ering->ring[idx]; 119c6fd2807SJeff Garzik if (!ent->err_mask) 120c6fd2807SJeff Garzik break; 121c6fd2807SJeff Garzik rc = map_fn(ent, arg); 122c6fd2807SJeff Garzik if (rc) 123c6fd2807SJeff Garzik break; 124c6fd2807SJeff Garzik idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE; 125c6fd2807SJeff Garzik } while (idx != ering->cursor); 126c6fd2807SJeff Garzik 127c6fd2807SJeff Garzik return rc; 128c6fd2807SJeff Garzik } 129c6fd2807SJeff Garzik 130c6fd2807SJeff Garzik static unsigned int ata_eh_dev_action(struct ata_device *dev) 131c6fd2807SJeff Garzik { 132c6fd2807SJeff Garzik struct ata_eh_context *ehc = &dev->ap->eh_context; 133c6fd2807SJeff Garzik 134c6fd2807SJeff Garzik return ehc->i.action | ehc->i.dev_action[dev->devno]; 135c6fd2807SJeff Garzik } 136c6fd2807SJeff Garzik 137c6fd2807SJeff Garzik static void ata_eh_clear_action(struct ata_device *dev, 138c6fd2807SJeff Garzik struct ata_eh_info *ehi, unsigned int action) 139c6fd2807SJeff Garzik { 140c6fd2807SJeff Garzik int i; 141c6fd2807SJeff Garzik 142c6fd2807SJeff Garzik if (!dev) { 143c6fd2807SJeff Garzik ehi->action &= ~action; 144c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_DEVICES; i++) 145c6fd2807SJeff Garzik ehi->dev_action[i] &= ~action; 146c6fd2807SJeff Garzik } else { 147c6fd2807SJeff Garzik /* doesn't make sense for port-wide EH actions */ 148c6fd2807SJeff Garzik WARN_ON(!(action & ATA_EH_PERDEV_MASK)); 149c6fd2807SJeff Garzik 150c6fd2807SJeff Garzik /* break ehi->action into ehi->dev_action */ 151c6fd2807SJeff Garzik if (ehi->action & action) { 152c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_DEVICES; i++) 153c6fd2807SJeff Garzik ehi->dev_action[i] |= ehi->action & action; 154c6fd2807SJeff Garzik ehi->action &= ~action; 155c6fd2807SJeff Garzik } 156c6fd2807SJeff Garzik 157c6fd2807SJeff Garzik /* turn off the specified per-dev action */ 158c6fd2807SJeff Garzik ehi->dev_action[dev->devno] &= ~action; 159c6fd2807SJeff Garzik } 160c6fd2807SJeff Garzik } 161c6fd2807SJeff Garzik 162c6fd2807SJeff Garzik /** 163c6fd2807SJeff Garzik * ata_scsi_timed_out - SCSI layer time out callback 164c6fd2807SJeff Garzik * @cmd: timed out SCSI command 165c6fd2807SJeff Garzik * 166c6fd2807SJeff Garzik * Handles SCSI layer timeout. We race with normal completion of 167c6fd2807SJeff Garzik * the qc for @cmd. If the qc is already gone, we lose and let 168c6fd2807SJeff Garzik * the scsi command finish (EH_HANDLED). Otherwise, the qc has 169c6fd2807SJeff Garzik * timed out and EH should be invoked. Prevent ata_qc_complete() 170c6fd2807SJeff Garzik * from finishing it by setting EH_SCHEDULED and return 171c6fd2807SJeff Garzik * EH_NOT_HANDLED. 172c6fd2807SJeff Garzik * 173c6fd2807SJeff Garzik * TODO: kill this function once old EH is gone. 174c6fd2807SJeff Garzik * 175c6fd2807SJeff Garzik * LOCKING: 176c6fd2807SJeff Garzik * Called from timer context 177c6fd2807SJeff Garzik * 178c6fd2807SJeff Garzik * RETURNS: 179c6fd2807SJeff Garzik * EH_HANDLED or EH_NOT_HANDLED 180c6fd2807SJeff Garzik */ 181c6fd2807SJeff Garzik enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) 182c6fd2807SJeff Garzik { 183c6fd2807SJeff Garzik struct Scsi_Host *host = cmd->device->host; 184c6fd2807SJeff Garzik struct ata_port *ap = ata_shost_to_port(host); 185c6fd2807SJeff Garzik unsigned long flags; 186c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 187c6fd2807SJeff Garzik enum scsi_eh_timer_return ret; 188c6fd2807SJeff Garzik 189c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 190c6fd2807SJeff Garzik 191c6fd2807SJeff Garzik if (ap->ops->error_handler) { 192c6fd2807SJeff Garzik ret = EH_NOT_HANDLED; 193c6fd2807SJeff Garzik goto out; 194c6fd2807SJeff Garzik } 195c6fd2807SJeff Garzik 196c6fd2807SJeff Garzik ret = EH_HANDLED; 197c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 198c6fd2807SJeff Garzik qc = ata_qc_from_tag(ap, ap->active_tag); 199c6fd2807SJeff Garzik if (qc) { 200c6fd2807SJeff Garzik WARN_ON(qc->scsicmd != cmd); 201c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_EH_SCHEDULED; 202c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_TIMEOUT; 203c6fd2807SJeff Garzik ret = EH_NOT_HANDLED; 204c6fd2807SJeff Garzik } 205c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 206c6fd2807SJeff Garzik 207c6fd2807SJeff Garzik out: 208c6fd2807SJeff Garzik DPRINTK("EXIT, ret=%d\n", ret); 209c6fd2807SJeff Garzik return ret; 210c6fd2807SJeff Garzik } 211c6fd2807SJeff Garzik 212c6fd2807SJeff Garzik /** 213c6fd2807SJeff Garzik * ata_scsi_error - SCSI layer error handler callback 214c6fd2807SJeff Garzik * @host: SCSI host on which error occurred 215c6fd2807SJeff Garzik * 216c6fd2807SJeff Garzik * Handles SCSI-layer-thrown error events. 217c6fd2807SJeff Garzik * 218c6fd2807SJeff Garzik * LOCKING: 219c6fd2807SJeff Garzik * Inherited from SCSI layer (none, can sleep) 220c6fd2807SJeff Garzik * 221c6fd2807SJeff Garzik * RETURNS: 222c6fd2807SJeff Garzik * Zero. 223c6fd2807SJeff Garzik */ 224c6fd2807SJeff Garzik void ata_scsi_error(struct Scsi_Host *host) 225c6fd2807SJeff Garzik { 226c6fd2807SJeff Garzik struct ata_port *ap = ata_shost_to_port(host); 227c6fd2807SJeff Garzik int i, repeat_cnt = ATA_EH_MAX_REPEAT; 228c6fd2807SJeff Garzik unsigned long flags; 229c6fd2807SJeff Garzik 230c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 231c6fd2807SJeff Garzik 232c6fd2807SJeff Garzik /* synchronize with port task */ 233c6fd2807SJeff Garzik ata_port_flush_task(ap); 234c6fd2807SJeff Garzik 235cca3974eSJeff Garzik /* synchronize with host lock and sort out timeouts */ 236c6fd2807SJeff Garzik 237c6fd2807SJeff Garzik /* For new EH, all qcs are finished in one of three ways - 238c6fd2807SJeff Garzik * normal completion, error completion, and SCSI timeout. 239c6fd2807SJeff Garzik * Both cmpletions can race against SCSI timeout. When normal 240c6fd2807SJeff Garzik * completion wins, the qc never reaches EH. When error 241c6fd2807SJeff Garzik * completion wins, the qc has ATA_QCFLAG_FAILED set. 242c6fd2807SJeff Garzik * 243c6fd2807SJeff Garzik * When SCSI timeout wins, things are a bit more complex. 244c6fd2807SJeff Garzik * Normal or error completion can occur after the timeout but 245c6fd2807SJeff Garzik * before this point. In such cases, both types of 246c6fd2807SJeff Garzik * completions are honored. A scmd is determined to have 247c6fd2807SJeff Garzik * timed out iff its associated qc is active and not failed. 248c6fd2807SJeff Garzik */ 249c6fd2807SJeff Garzik if (ap->ops->error_handler) { 250c6fd2807SJeff Garzik struct scsi_cmnd *scmd, *tmp; 251c6fd2807SJeff Garzik int nr_timedout = 0; 252c6fd2807SJeff Garzik 253c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 254c6fd2807SJeff Garzik 255c6fd2807SJeff Garzik list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) { 256c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 257c6fd2807SJeff Garzik 258c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_QUEUE; i++) { 259c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, i); 260c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_ACTIVE && 261c6fd2807SJeff Garzik qc->scsicmd == scmd) 262c6fd2807SJeff Garzik break; 263c6fd2807SJeff Garzik } 264c6fd2807SJeff Garzik 265c6fd2807SJeff Garzik if (i < ATA_MAX_QUEUE) { 266c6fd2807SJeff Garzik /* the scmd has an associated qc */ 267c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) { 268c6fd2807SJeff Garzik /* which hasn't failed yet, timeout */ 269c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_TIMEOUT; 270c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 271c6fd2807SJeff Garzik nr_timedout++; 272c6fd2807SJeff Garzik } 273c6fd2807SJeff Garzik } else { 274c6fd2807SJeff Garzik /* Normal completion occurred after 275c6fd2807SJeff Garzik * SCSI timeout but before this point. 276c6fd2807SJeff Garzik * Successfully complete it. 277c6fd2807SJeff Garzik */ 278c6fd2807SJeff Garzik scmd->retries = scmd->allowed; 279c6fd2807SJeff Garzik scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 280c6fd2807SJeff Garzik } 281c6fd2807SJeff Garzik } 282c6fd2807SJeff Garzik 283c6fd2807SJeff Garzik /* If we have timed out qcs. They belong to EH from 284c6fd2807SJeff Garzik * this point but the state of the controller is 285c6fd2807SJeff Garzik * unknown. Freeze the port to make sure the IRQ 286c6fd2807SJeff Garzik * handler doesn't diddle with those qcs. This must 287c6fd2807SJeff Garzik * be done atomically w.r.t. setting QCFLAG_FAILED. 288c6fd2807SJeff Garzik */ 289c6fd2807SJeff Garzik if (nr_timedout) 290c6fd2807SJeff Garzik __ata_port_freeze(ap); 291c6fd2807SJeff Garzik 292c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 293c6fd2807SJeff Garzik } else 294c6fd2807SJeff Garzik spin_unlock_wait(ap->lock); 295c6fd2807SJeff Garzik 296c6fd2807SJeff Garzik repeat: 297c6fd2807SJeff Garzik /* invoke error handler */ 298c6fd2807SJeff Garzik if (ap->ops->error_handler) { 299c6fd2807SJeff Garzik /* process port resume request */ 300c6fd2807SJeff Garzik ata_eh_handle_port_resume(ap); 301c6fd2807SJeff Garzik 302c6fd2807SJeff Garzik /* fetch & clear EH info */ 303c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 304c6fd2807SJeff Garzik 305c6fd2807SJeff Garzik memset(&ap->eh_context, 0, sizeof(ap->eh_context)); 306c6fd2807SJeff Garzik ap->eh_context.i = ap->eh_info; 307c6fd2807SJeff Garzik memset(&ap->eh_info, 0, sizeof(ap->eh_info)); 308c6fd2807SJeff Garzik 309c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS; 310c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_EH_PENDING; 311c6fd2807SJeff Garzik 312c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 313c6fd2807SJeff Garzik 314c6fd2807SJeff Garzik /* invoke EH, skip if unloading or suspended */ 315c6fd2807SJeff Garzik if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED))) 316c6fd2807SJeff Garzik ap->ops->error_handler(ap); 317c6fd2807SJeff Garzik else 318c6fd2807SJeff Garzik ata_eh_finish(ap); 319c6fd2807SJeff Garzik 320c6fd2807SJeff Garzik /* process port suspend request */ 321c6fd2807SJeff Garzik ata_eh_handle_port_suspend(ap); 322c6fd2807SJeff Garzik 323c6fd2807SJeff Garzik /* Exception might have happend after ->error_handler 324c6fd2807SJeff Garzik * recovered the port but before this point. Repeat 325c6fd2807SJeff Garzik * EH in such case. 326c6fd2807SJeff Garzik */ 327c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 328c6fd2807SJeff Garzik 329c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_EH_PENDING) { 330c6fd2807SJeff Garzik if (--repeat_cnt) { 331c6fd2807SJeff Garzik ata_port_printk(ap, KERN_INFO, 332c6fd2807SJeff Garzik "EH pending after completion, " 333c6fd2807SJeff Garzik "repeating EH (cnt=%d)\n", repeat_cnt); 334c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 335c6fd2807SJeff Garzik goto repeat; 336c6fd2807SJeff Garzik } 337c6fd2807SJeff Garzik ata_port_printk(ap, KERN_ERR, "EH pending after %d " 338c6fd2807SJeff Garzik "tries, giving up\n", ATA_EH_MAX_REPEAT); 339914616a3STejun Heo ap->pflags &= ~ATA_PFLAG_EH_PENDING; 340c6fd2807SJeff Garzik } 341c6fd2807SJeff Garzik 342c6fd2807SJeff Garzik /* this run is complete, make sure EH info is clear */ 343c6fd2807SJeff Garzik memset(&ap->eh_info, 0, sizeof(ap->eh_info)); 344c6fd2807SJeff Garzik 345c6fd2807SJeff Garzik /* Clear host_eh_scheduled while holding ap->lock such 346c6fd2807SJeff Garzik * that if exception occurs after this point but 347c6fd2807SJeff Garzik * before EH completion, SCSI midlayer will 348c6fd2807SJeff Garzik * re-initiate EH. 349c6fd2807SJeff Garzik */ 350c6fd2807SJeff Garzik host->host_eh_scheduled = 0; 351c6fd2807SJeff Garzik 352c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 353c6fd2807SJeff Garzik } else { 354c6fd2807SJeff Garzik WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL); 355c6fd2807SJeff Garzik ap->ops->eng_timeout(ap); 356c6fd2807SJeff Garzik } 357c6fd2807SJeff Garzik 358c6fd2807SJeff Garzik /* finish or retry handled scmd's and clean up */ 359c6fd2807SJeff Garzik WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q)); 360c6fd2807SJeff Garzik 361c6fd2807SJeff Garzik scsi_eh_flush_done_q(&ap->eh_done_q); 362c6fd2807SJeff Garzik 363c6fd2807SJeff Garzik /* clean up */ 364c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 365c6fd2807SJeff Garzik 366c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_LOADING) 367c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_LOADING; 368c6fd2807SJeff Garzik else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) 36952bad64dSDavid Howells queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 0); 370c6fd2807SJeff Garzik 371c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_RECOVERED) 372c6fd2807SJeff Garzik ata_port_printk(ap, KERN_INFO, "EH complete\n"); 373c6fd2807SJeff Garzik 374c6fd2807SJeff Garzik ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED); 375c6fd2807SJeff Garzik 376c6fd2807SJeff Garzik /* tell wait_eh that we're done */ 377c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS; 378c6fd2807SJeff Garzik wake_up_all(&ap->eh_wait_q); 379c6fd2807SJeff Garzik 380c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 381c6fd2807SJeff Garzik 382c6fd2807SJeff Garzik DPRINTK("EXIT\n"); 383c6fd2807SJeff Garzik } 384c6fd2807SJeff Garzik 385c6fd2807SJeff Garzik /** 386c6fd2807SJeff Garzik * ata_port_wait_eh - Wait for the currently pending EH to complete 387c6fd2807SJeff Garzik * @ap: Port to wait EH for 388c6fd2807SJeff Garzik * 389c6fd2807SJeff Garzik * Wait until the currently pending EH is complete. 390c6fd2807SJeff Garzik * 391c6fd2807SJeff Garzik * LOCKING: 392c6fd2807SJeff Garzik * Kernel thread context (may sleep). 393c6fd2807SJeff Garzik */ 394c6fd2807SJeff Garzik void ata_port_wait_eh(struct ata_port *ap) 395c6fd2807SJeff Garzik { 396c6fd2807SJeff Garzik unsigned long flags; 397c6fd2807SJeff Garzik DEFINE_WAIT(wait); 398c6fd2807SJeff Garzik 399c6fd2807SJeff Garzik retry: 400c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 401c6fd2807SJeff Garzik 402c6fd2807SJeff Garzik while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) { 403c6fd2807SJeff Garzik prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE); 404c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 405c6fd2807SJeff Garzik schedule(); 406c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 407c6fd2807SJeff Garzik } 408c6fd2807SJeff Garzik finish_wait(&ap->eh_wait_q, &wait); 409c6fd2807SJeff Garzik 410c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 411c6fd2807SJeff Garzik 412c6fd2807SJeff Garzik /* make sure SCSI EH is complete */ 413cca3974eSJeff Garzik if (scsi_host_in_recovery(ap->scsi_host)) { 414c6fd2807SJeff Garzik msleep(10); 415c6fd2807SJeff Garzik goto retry; 416c6fd2807SJeff Garzik } 417c6fd2807SJeff Garzik } 418c6fd2807SJeff Garzik 419c6fd2807SJeff Garzik /** 420c6fd2807SJeff Garzik * ata_qc_timeout - Handle timeout of queued command 421c6fd2807SJeff Garzik * @qc: Command that timed out 422c6fd2807SJeff Garzik * 423c6fd2807SJeff Garzik * Some part of the kernel (currently, only the SCSI layer) 424c6fd2807SJeff Garzik * has noticed that the active command on port @ap has not 425c6fd2807SJeff Garzik * completed after a specified length of time. Handle this 426c6fd2807SJeff Garzik * condition by disabling DMA (if necessary) and completing 427c6fd2807SJeff Garzik * transactions, with error if necessary. 428c6fd2807SJeff Garzik * 429c6fd2807SJeff Garzik * This also handles the case of the "lost interrupt", where 430c6fd2807SJeff Garzik * for some reason (possibly hardware bug, possibly driver bug) 431c6fd2807SJeff Garzik * an interrupt was not delivered to the driver, even though the 432c6fd2807SJeff Garzik * transaction completed successfully. 433c6fd2807SJeff Garzik * 434c6fd2807SJeff Garzik * TODO: kill this function once old EH is gone. 435c6fd2807SJeff Garzik * 436c6fd2807SJeff Garzik * LOCKING: 437c6fd2807SJeff Garzik * Inherited from SCSI layer (none, can sleep) 438c6fd2807SJeff Garzik */ 439c6fd2807SJeff Garzik static void ata_qc_timeout(struct ata_queued_cmd *qc) 440c6fd2807SJeff Garzik { 441c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 442c6fd2807SJeff Garzik u8 host_stat = 0, drv_stat; 443c6fd2807SJeff Garzik unsigned long flags; 444c6fd2807SJeff Garzik 445c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 446c6fd2807SJeff Garzik 447c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_IDLE; 448c6fd2807SJeff Garzik 449c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 450c6fd2807SJeff Garzik 451c6fd2807SJeff Garzik switch (qc->tf.protocol) { 452c6fd2807SJeff Garzik 453c6fd2807SJeff Garzik case ATA_PROT_DMA: 454c6fd2807SJeff Garzik case ATA_PROT_ATAPI_DMA: 455c6fd2807SJeff Garzik host_stat = ap->ops->bmdma_status(ap); 456c6fd2807SJeff Garzik 457c6fd2807SJeff Garzik /* before we do anything else, clear DMA-Start bit */ 458c6fd2807SJeff Garzik ap->ops->bmdma_stop(qc); 459c6fd2807SJeff Garzik 460c6fd2807SJeff Garzik /* fall through */ 461c6fd2807SJeff Garzik 462c6fd2807SJeff Garzik default: 463c6fd2807SJeff Garzik ata_altstatus(ap); 464c6fd2807SJeff Garzik drv_stat = ata_chk_status(ap); 465c6fd2807SJeff Garzik 466c6fd2807SJeff Garzik /* ack bmdma irq events */ 467c6fd2807SJeff Garzik ap->ops->irq_clear(ap); 468c6fd2807SJeff Garzik 469c6fd2807SJeff Garzik ata_dev_printk(qc->dev, KERN_ERR, "command 0x%x timeout, " 470c6fd2807SJeff Garzik "stat 0x%x host_stat 0x%x\n", 471c6fd2807SJeff Garzik qc->tf.command, drv_stat, host_stat); 472c6fd2807SJeff Garzik 473c6fd2807SJeff Garzik /* complete taskfile transaction */ 474c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_TIMEOUT; 475c6fd2807SJeff Garzik break; 476c6fd2807SJeff Garzik } 477c6fd2807SJeff Garzik 478c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 479c6fd2807SJeff Garzik 480c6fd2807SJeff Garzik ata_eh_qc_complete(qc); 481c6fd2807SJeff Garzik 482c6fd2807SJeff Garzik DPRINTK("EXIT\n"); 483c6fd2807SJeff Garzik } 484c6fd2807SJeff Garzik 485c6fd2807SJeff Garzik /** 486c6fd2807SJeff Garzik * ata_eng_timeout - Handle timeout of queued command 487c6fd2807SJeff Garzik * @ap: Port on which timed-out command is active 488c6fd2807SJeff Garzik * 489c6fd2807SJeff Garzik * Some part of the kernel (currently, only the SCSI layer) 490c6fd2807SJeff Garzik * has noticed that the active command on port @ap has not 491c6fd2807SJeff Garzik * completed after a specified length of time. Handle this 492c6fd2807SJeff Garzik * condition by disabling DMA (if necessary) and completing 493c6fd2807SJeff Garzik * transactions, with error if necessary. 494c6fd2807SJeff Garzik * 495c6fd2807SJeff Garzik * This also handles the case of the "lost interrupt", where 496c6fd2807SJeff Garzik * for some reason (possibly hardware bug, possibly driver bug) 497c6fd2807SJeff Garzik * an interrupt was not delivered to the driver, even though the 498c6fd2807SJeff Garzik * transaction completed successfully. 499c6fd2807SJeff Garzik * 500c6fd2807SJeff Garzik * TODO: kill this function once old EH is gone. 501c6fd2807SJeff Garzik * 502c6fd2807SJeff Garzik * LOCKING: 503c6fd2807SJeff Garzik * Inherited from SCSI layer (none, can sleep) 504c6fd2807SJeff Garzik */ 505c6fd2807SJeff Garzik void ata_eng_timeout(struct ata_port *ap) 506c6fd2807SJeff Garzik { 507c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 508c6fd2807SJeff Garzik 509c6fd2807SJeff Garzik ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag)); 510c6fd2807SJeff Garzik 511c6fd2807SJeff Garzik DPRINTK("EXIT\n"); 512c6fd2807SJeff Garzik } 513c6fd2807SJeff Garzik 514c6fd2807SJeff Garzik /** 515c6fd2807SJeff Garzik * ata_qc_schedule_eh - schedule qc for error handling 516c6fd2807SJeff Garzik * @qc: command to schedule error handling for 517c6fd2807SJeff Garzik * 518c6fd2807SJeff Garzik * Schedule error handling for @qc. EH will kick in as soon as 519c6fd2807SJeff Garzik * other commands are drained. 520c6fd2807SJeff Garzik * 521c6fd2807SJeff Garzik * LOCKING: 522cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 523c6fd2807SJeff Garzik */ 524c6fd2807SJeff Garzik void ata_qc_schedule_eh(struct ata_queued_cmd *qc) 525c6fd2807SJeff Garzik { 526c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 527c6fd2807SJeff Garzik 528c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 529c6fd2807SJeff Garzik 530c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 531c6fd2807SJeff Garzik qc->ap->pflags |= ATA_PFLAG_EH_PENDING; 532c6fd2807SJeff Garzik 533c6fd2807SJeff Garzik /* The following will fail if timeout has already expired. 534c6fd2807SJeff Garzik * ata_scsi_error() takes care of such scmds on EH entry. 535c6fd2807SJeff Garzik * Note that ATA_QCFLAG_FAILED is unconditionally set after 536c6fd2807SJeff Garzik * this function completes. 537c6fd2807SJeff Garzik */ 538c6fd2807SJeff Garzik scsi_req_abort_cmd(qc->scsicmd); 539c6fd2807SJeff Garzik } 540c6fd2807SJeff Garzik 541c6fd2807SJeff Garzik /** 542c6fd2807SJeff Garzik * ata_port_schedule_eh - schedule error handling without a qc 543c6fd2807SJeff Garzik * @ap: ATA port to schedule EH for 544c6fd2807SJeff Garzik * 545c6fd2807SJeff Garzik * Schedule error handling for @ap. EH will kick in as soon as 546c6fd2807SJeff Garzik * all commands are drained. 547c6fd2807SJeff Garzik * 548c6fd2807SJeff Garzik * LOCKING: 549cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 550c6fd2807SJeff Garzik */ 551c6fd2807SJeff Garzik void ata_port_schedule_eh(struct ata_port *ap) 552c6fd2807SJeff Garzik { 553c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 554c6fd2807SJeff Garzik 555f4d6d004STejun Heo if (ap->pflags & ATA_PFLAG_INITIALIZING) 556f4d6d004STejun Heo return; 557f4d6d004STejun Heo 558c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_EH_PENDING; 559cca3974eSJeff Garzik scsi_schedule_eh(ap->scsi_host); 560c6fd2807SJeff Garzik 561c6fd2807SJeff Garzik DPRINTK("port EH scheduled\n"); 562c6fd2807SJeff Garzik } 563c6fd2807SJeff Garzik 564c6fd2807SJeff Garzik /** 565c6fd2807SJeff Garzik * ata_port_abort - abort all qc's on the port 566c6fd2807SJeff Garzik * @ap: ATA port to abort qc's for 567c6fd2807SJeff Garzik * 568c6fd2807SJeff Garzik * Abort all active qc's of @ap and schedule EH. 569c6fd2807SJeff Garzik * 570c6fd2807SJeff Garzik * LOCKING: 571cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 572c6fd2807SJeff Garzik * 573c6fd2807SJeff Garzik * RETURNS: 574c6fd2807SJeff Garzik * Number of aborted qc's. 575c6fd2807SJeff Garzik */ 576c6fd2807SJeff Garzik int ata_port_abort(struct ata_port *ap) 577c6fd2807SJeff Garzik { 578c6fd2807SJeff Garzik int tag, nr_aborted = 0; 579c6fd2807SJeff Garzik 580c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 581c6fd2807SJeff Garzik 582c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 583c6fd2807SJeff Garzik struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); 584c6fd2807SJeff Garzik 585c6fd2807SJeff Garzik if (qc) { 586c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 587c6fd2807SJeff Garzik ata_qc_complete(qc); 588c6fd2807SJeff Garzik nr_aborted++; 589c6fd2807SJeff Garzik } 590c6fd2807SJeff Garzik } 591c6fd2807SJeff Garzik 592c6fd2807SJeff Garzik if (!nr_aborted) 593c6fd2807SJeff Garzik ata_port_schedule_eh(ap); 594c6fd2807SJeff Garzik 595c6fd2807SJeff Garzik return nr_aborted; 596c6fd2807SJeff Garzik } 597c6fd2807SJeff Garzik 598c6fd2807SJeff Garzik /** 599c6fd2807SJeff Garzik * __ata_port_freeze - freeze port 600c6fd2807SJeff Garzik * @ap: ATA port to freeze 601c6fd2807SJeff Garzik * 602c6fd2807SJeff Garzik * This function is called when HSM violation or some other 603c6fd2807SJeff Garzik * condition disrupts normal operation of the port. Frozen port 604c6fd2807SJeff Garzik * is not allowed to perform any operation until the port is 605c6fd2807SJeff Garzik * thawed, which usually follows a successful reset. 606c6fd2807SJeff Garzik * 607c6fd2807SJeff Garzik * ap->ops->freeze() callback can be used for freezing the port 608c6fd2807SJeff Garzik * hardware-wise (e.g. mask interrupt and stop DMA engine). If a 609c6fd2807SJeff Garzik * port cannot be frozen hardware-wise, the interrupt handler 610c6fd2807SJeff Garzik * must ack and clear interrupts unconditionally while the port 611c6fd2807SJeff Garzik * is frozen. 612c6fd2807SJeff Garzik * 613c6fd2807SJeff Garzik * LOCKING: 614cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 615c6fd2807SJeff Garzik */ 616c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap) 617c6fd2807SJeff Garzik { 618c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 619c6fd2807SJeff Garzik 620c6fd2807SJeff Garzik if (ap->ops->freeze) 621c6fd2807SJeff Garzik ap->ops->freeze(ap); 622c6fd2807SJeff Garzik 623c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_FROZEN; 624c6fd2807SJeff Garzik 62544877b4eSTejun Heo DPRINTK("ata%u port frozen\n", ap->print_id); 626c6fd2807SJeff Garzik } 627c6fd2807SJeff Garzik 628c6fd2807SJeff Garzik /** 629c6fd2807SJeff Garzik * ata_port_freeze - abort & freeze port 630c6fd2807SJeff Garzik * @ap: ATA port to freeze 631c6fd2807SJeff Garzik * 632c6fd2807SJeff Garzik * Abort and freeze @ap. 633c6fd2807SJeff Garzik * 634c6fd2807SJeff Garzik * LOCKING: 635cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 636c6fd2807SJeff Garzik * 637c6fd2807SJeff Garzik * RETURNS: 638c6fd2807SJeff Garzik * Number of aborted commands. 639c6fd2807SJeff Garzik */ 640c6fd2807SJeff Garzik int ata_port_freeze(struct ata_port *ap) 641c6fd2807SJeff Garzik { 642c6fd2807SJeff Garzik int nr_aborted; 643c6fd2807SJeff Garzik 644c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 645c6fd2807SJeff Garzik 646c6fd2807SJeff Garzik nr_aborted = ata_port_abort(ap); 647c6fd2807SJeff Garzik __ata_port_freeze(ap); 648c6fd2807SJeff Garzik 649c6fd2807SJeff Garzik return nr_aborted; 650c6fd2807SJeff Garzik } 651c6fd2807SJeff Garzik 652c6fd2807SJeff Garzik /** 653c6fd2807SJeff Garzik * ata_eh_freeze_port - EH helper to freeze port 654c6fd2807SJeff Garzik * @ap: ATA port to freeze 655c6fd2807SJeff Garzik * 656c6fd2807SJeff Garzik * Freeze @ap. 657c6fd2807SJeff Garzik * 658c6fd2807SJeff Garzik * LOCKING: 659c6fd2807SJeff Garzik * None. 660c6fd2807SJeff Garzik */ 661c6fd2807SJeff Garzik void ata_eh_freeze_port(struct ata_port *ap) 662c6fd2807SJeff Garzik { 663c6fd2807SJeff Garzik unsigned long flags; 664c6fd2807SJeff Garzik 665c6fd2807SJeff Garzik if (!ap->ops->error_handler) 666c6fd2807SJeff Garzik return; 667c6fd2807SJeff Garzik 668c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 669c6fd2807SJeff Garzik __ata_port_freeze(ap); 670c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 671c6fd2807SJeff Garzik } 672c6fd2807SJeff Garzik 673c6fd2807SJeff Garzik /** 674c6fd2807SJeff Garzik * ata_port_thaw_port - EH helper to thaw port 675c6fd2807SJeff Garzik * @ap: ATA port to thaw 676c6fd2807SJeff Garzik * 677c6fd2807SJeff Garzik * Thaw frozen port @ap. 678c6fd2807SJeff Garzik * 679c6fd2807SJeff Garzik * LOCKING: 680c6fd2807SJeff Garzik * None. 681c6fd2807SJeff Garzik */ 682c6fd2807SJeff Garzik void ata_eh_thaw_port(struct ata_port *ap) 683c6fd2807SJeff Garzik { 684c6fd2807SJeff Garzik unsigned long flags; 685c6fd2807SJeff Garzik 686c6fd2807SJeff Garzik if (!ap->ops->error_handler) 687c6fd2807SJeff Garzik return; 688c6fd2807SJeff Garzik 689c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 690c6fd2807SJeff Garzik 691c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_FROZEN; 692c6fd2807SJeff Garzik 693c6fd2807SJeff Garzik if (ap->ops->thaw) 694c6fd2807SJeff Garzik ap->ops->thaw(ap); 695c6fd2807SJeff Garzik 696c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 697c6fd2807SJeff Garzik 69844877b4eSTejun Heo DPRINTK("ata%u port thawed\n", ap->print_id); 699c6fd2807SJeff Garzik } 700c6fd2807SJeff Garzik 701c6fd2807SJeff Garzik static void ata_eh_scsidone(struct scsi_cmnd *scmd) 702c6fd2807SJeff Garzik { 703c6fd2807SJeff Garzik /* nada */ 704c6fd2807SJeff Garzik } 705c6fd2807SJeff Garzik 706c6fd2807SJeff Garzik static void __ata_eh_qc_complete(struct ata_queued_cmd *qc) 707c6fd2807SJeff Garzik { 708c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 709c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 710c6fd2807SJeff Garzik unsigned long flags; 711c6fd2807SJeff Garzik 712c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 713c6fd2807SJeff Garzik qc->scsidone = ata_eh_scsidone; 714c6fd2807SJeff Garzik __ata_qc_complete(qc); 715c6fd2807SJeff Garzik WARN_ON(ata_tag_valid(qc->tag)); 716c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 717c6fd2807SJeff Garzik 718c6fd2807SJeff Garzik scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 719c6fd2807SJeff Garzik } 720c6fd2807SJeff Garzik 721c6fd2807SJeff Garzik /** 722c6fd2807SJeff Garzik * ata_eh_qc_complete - Complete an active ATA command from EH 723c6fd2807SJeff Garzik * @qc: Command to complete 724c6fd2807SJeff Garzik * 725c6fd2807SJeff Garzik * Indicate to the mid and upper layers that an ATA command has 726c6fd2807SJeff Garzik * completed. To be used from EH. 727c6fd2807SJeff Garzik */ 728c6fd2807SJeff Garzik void ata_eh_qc_complete(struct ata_queued_cmd *qc) 729c6fd2807SJeff Garzik { 730c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 731c6fd2807SJeff Garzik scmd->retries = scmd->allowed; 732c6fd2807SJeff Garzik __ata_eh_qc_complete(qc); 733c6fd2807SJeff Garzik } 734c6fd2807SJeff Garzik 735c6fd2807SJeff Garzik /** 736c6fd2807SJeff Garzik * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH 737c6fd2807SJeff Garzik * @qc: Command to retry 738c6fd2807SJeff Garzik * 739c6fd2807SJeff Garzik * Indicate to the mid and upper layers that an ATA command 740c6fd2807SJeff Garzik * should be retried. To be used from EH. 741c6fd2807SJeff Garzik * 742c6fd2807SJeff Garzik * SCSI midlayer limits the number of retries to scmd->allowed. 743c6fd2807SJeff Garzik * scmd->retries is decremented for commands which get retried 744c6fd2807SJeff Garzik * due to unrelated failures (qc->err_mask is zero). 745c6fd2807SJeff Garzik */ 746c6fd2807SJeff Garzik void ata_eh_qc_retry(struct ata_queued_cmd *qc) 747c6fd2807SJeff Garzik { 748c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 749c6fd2807SJeff Garzik if (!qc->err_mask && scmd->retries) 750c6fd2807SJeff Garzik scmd->retries--; 751c6fd2807SJeff Garzik __ata_eh_qc_complete(qc); 752c6fd2807SJeff Garzik } 753c6fd2807SJeff Garzik 754c6fd2807SJeff Garzik /** 755c6fd2807SJeff Garzik * ata_eh_detach_dev - detach ATA device 756c6fd2807SJeff Garzik * @dev: ATA device to detach 757c6fd2807SJeff Garzik * 758c6fd2807SJeff Garzik * Detach @dev. 759c6fd2807SJeff Garzik * 760c6fd2807SJeff Garzik * LOCKING: 761c6fd2807SJeff Garzik * None. 762c6fd2807SJeff Garzik */ 763c6fd2807SJeff Garzik static void ata_eh_detach_dev(struct ata_device *dev) 764c6fd2807SJeff Garzik { 765c6fd2807SJeff Garzik struct ata_port *ap = dev->ap; 766c6fd2807SJeff Garzik unsigned long flags; 767c6fd2807SJeff Garzik 768c6fd2807SJeff Garzik ata_dev_disable(dev); 769c6fd2807SJeff Garzik 770c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 771c6fd2807SJeff Garzik 772c6fd2807SJeff Garzik dev->flags &= ~ATA_DFLAG_DETACH; 773c6fd2807SJeff Garzik 774c6fd2807SJeff Garzik if (ata_scsi_offline_dev(dev)) { 775c6fd2807SJeff Garzik dev->flags |= ATA_DFLAG_DETACHED; 776c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 777c6fd2807SJeff Garzik } 778c6fd2807SJeff Garzik 779c6fd2807SJeff Garzik /* clear per-dev EH actions */ 780c6fd2807SJeff Garzik ata_eh_clear_action(dev, &ap->eh_info, ATA_EH_PERDEV_MASK); 781c6fd2807SJeff Garzik ata_eh_clear_action(dev, &ap->eh_context.i, ATA_EH_PERDEV_MASK); 782c6fd2807SJeff Garzik 783c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 784c6fd2807SJeff Garzik } 785c6fd2807SJeff Garzik 786c6fd2807SJeff Garzik /** 787c6fd2807SJeff Garzik * ata_eh_about_to_do - about to perform eh_action 788c6fd2807SJeff Garzik * @ap: target ATA port 789c6fd2807SJeff Garzik * @dev: target ATA dev for per-dev action (can be NULL) 790c6fd2807SJeff Garzik * @action: action about to be performed 791c6fd2807SJeff Garzik * 792c6fd2807SJeff Garzik * Called just before performing EH actions to clear related bits 793c6fd2807SJeff Garzik * in @ap->eh_info such that eh actions are not unnecessarily 794c6fd2807SJeff Garzik * repeated. 795c6fd2807SJeff Garzik * 796c6fd2807SJeff Garzik * LOCKING: 797c6fd2807SJeff Garzik * None. 798c6fd2807SJeff Garzik */ 799c6fd2807SJeff Garzik static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev, 800c6fd2807SJeff Garzik unsigned int action) 801c6fd2807SJeff Garzik { 802c6fd2807SJeff Garzik unsigned long flags; 803c6fd2807SJeff Garzik struct ata_eh_info *ehi = &ap->eh_info; 804c6fd2807SJeff Garzik struct ata_eh_context *ehc = &ap->eh_context; 805c6fd2807SJeff Garzik 806c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 807c6fd2807SJeff Garzik 808c6fd2807SJeff Garzik /* Reset is represented by combination of actions and EHI 809c6fd2807SJeff Garzik * flags. Suck in all related bits before clearing eh_info to 810c6fd2807SJeff Garzik * avoid losing requested action. 811c6fd2807SJeff Garzik */ 812c6fd2807SJeff Garzik if (action & ATA_EH_RESET_MASK) { 813c6fd2807SJeff Garzik ehc->i.action |= ehi->action & ATA_EH_RESET_MASK; 814c6fd2807SJeff Garzik ehc->i.flags |= ehi->flags & ATA_EHI_RESET_MODIFIER_MASK; 815c6fd2807SJeff Garzik 816c6fd2807SJeff Garzik /* make sure all reset actions are cleared & clear EHI flags */ 817c6fd2807SJeff Garzik action |= ATA_EH_RESET_MASK; 818c6fd2807SJeff Garzik ehi->flags &= ~ATA_EHI_RESET_MODIFIER_MASK; 819c6fd2807SJeff Garzik } 820c6fd2807SJeff Garzik 821c6fd2807SJeff Garzik ata_eh_clear_action(dev, ehi, action); 822c6fd2807SJeff Garzik 823c6fd2807SJeff Garzik if (!(ehc->i.flags & ATA_EHI_QUIET)) 824c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_RECOVERED; 825c6fd2807SJeff Garzik 826c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 827c6fd2807SJeff Garzik } 828c6fd2807SJeff Garzik 829c6fd2807SJeff Garzik /** 830c6fd2807SJeff Garzik * ata_eh_done - EH action complete 831c6fd2807SJeff Garzik * @ap: target ATA port 832c6fd2807SJeff Garzik * @dev: target ATA dev for per-dev action (can be NULL) 833c6fd2807SJeff Garzik * @action: action just completed 834c6fd2807SJeff Garzik * 835c6fd2807SJeff Garzik * Called right after performing EH actions to clear related bits 836c6fd2807SJeff Garzik * in @ap->eh_context. 837c6fd2807SJeff Garzik * 838c6fd2807SJeff Garzik * LOCKING: 839c6fd2807SJeff Garzik * None. 840c6fd2807SJeff Garzik */ 841c6fd2807SJeff Garzik static void ata_eh_done(struct ata_port *ap, struct ata_device *dev, 842c6fd2807SJeff Garzik unsigned int action) 843c6fd2807SJeff Garzik { 844c6fd2807SJeff Garzik /* if reset is complete, clear all reset actions & reset modifier */ 845c6fd2807SJeff Garzik if (action & ATA_EH_RESET_MASK) { 846c6fd2807SJeff Garzik action |= ATA_EH_RESET_MASK; 847c6fd2807SJeff Garzik ap->eh_context.i.flags &= ~ATA_EHI_RESET_MODIFIER_MASK; 848c6fd2807SJeff Garzik } 849c6fd2807SJeff Garzik 850c6fd2807SJeff Garzik ata_eh_clear_action(dev, &ap->eh_context.i, action); 851c6fd2807SJeff Garzik } 852c6fd2807SJeff Garzik 853c6fd2807SJeff Garzik /** 854c6fd2807SJeff Garzik * ata_err_string - convert err_mask to descriptive string 855c6fd2807SJeff Garzik * @err_mask: error mask to convert to string 856c6fd2807SJeff Garzik * 857c6fd2807SJeff Garzik * Convert @err_mask to descriptive string. Errors are 858c6fd2807SJeff Garzik * prioritized according to severity and only the most severe 859c6fd2807SJeff Garzik * error is reported. 860c6fd2807SJeff Garzik * 861c6fd2807SJeff Garzik * LOCKING: 862c6fd2807SJeff Garzik * None. 863c6fd2807SJeff Garzik * 864c6fd2807SJeff Garzik * RETURNS: 865c6fd2807SJeff Garzik * Descriptive string for @err_mask 866c6fd2807SJeff Garzik */ 867c6fd2807SJeff Garzik static const char * ata_err_string(unsigned int err_mask) 868c6fd2807SJeff Garzik { 869c6fd2807SJeff Garzik if (err_mask & AC_ERR_HOST_BUS) 870c6fd2807SJeff Garzik return "host bus error"; 871c6fd2807SJeff Garzik if (err_mask & AC_ERR_ATA_BUS) 872c6fd2807SJeff Garzik return "ATA bus error"; 873c6fd2807SJeff Garzik if (err_mask & AC_ERR_TIMEOUT) 874c6fd2807SJeff Garzik return "timeout"; 875c6fd2807SJeff Garzik if (err_mask & AC_ERR_HSM) 876c6fd2807SJeff Garzik return "HSM violation"; 877c6fd2807SJeff Garzik if (err_mask & AC_ERR_SYSTEM) 878c6fd2807SJeff Garzik return "internal error"; 879c6fd2807SJeff Garzik if (err_mask & AC_ERR_MEDIA) 880c6fd2807SJeff Garzik return "media error"; 881c6fd2807SJeff Garzik if (err_mask & AC_ERR_INVALID) 882c6fd2807SJeff Garzik return "invalid argument"; 883c6fd2807SJeff Garzik if (err_mask & AC_ERR_DEV) 884c6fd2807SJeff Garzik return "device error"; 885c6fd2807SJeff Garzik return "unknown error"; 886c6fd2807SJeff Garzik } 887c6fd2807SJeff Garzik 888c6fd2807SJeff Garzik /** 889c6fd2807SJeff Garzik * ata_read_log_page - read a specific log page 890c6fd2807SJeff Garzik * @dev: target device 891c6fd2807SJeff Garzik * @page: page to read 892c6fd2807SJeff Garzik * @buf: buffer to store read page 893c6fd2807SJeff Garzik * @sectors: number of sectors to read 894c6fd2807SJeff Garzik * 895c6fd2807SJeff Garzik * Read log page using READ_LOG_EXT command. 896c6fd2807SJeff Garzik * 897c6fd2807SJeff Garzik * LOCKING: 898c6fd2807SJeff Garzik * Kernel thread context (may sleep). 899c6fd2807SJeff Garzik * 900c6fd2807SJeff Garzik * RETURNS: 901c6fd2807SJeff Garzik * 0 on success, AC_ERR_* mask otherwise. 902c6fd2807SJeff Garzik */ 903c6fd2807SJeff Garzik static unsigned int ata_read_log_page(struct ata_device *dev, 904c6fd2807SJeff Garzik u8 page, void *buf, unsigned int sectors) 905c6fd2807SJeff Garzik { 906c6fd2807SJeff Garzik struct ata_taskfile tf; 907c6fd2807SJeff Garzik unsigned int err_mask; 908c6fd2807SJeff Garzik 909c6fd2807SJeff Garzik DPRINTK("read log page - page %d\n", page); 910c6fd2807SJeff Garzik 911c6fd2807SJeff Garzik ata_tf_init(dev, &tf); 912c6fd2807SJeff Garzik tf.command = ATA_CMD_READ_LOG_EXT; 913c6fd2807SJeff Garzik tf.lbal = page; 914c6fd2807SJeff Garzik tf.nsect = sectors; 915c6fd2807SJeff Garzik tf.hob_nsect = sectors >> 8; 916c6fd2807SJeff Garzik tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE; 917c6fd2807SJeff Garzik tf.protocol = ATA_PROT_PIO; 918c6fd2807SJeff Garzik 919c6fd2807SJeff Garzik err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, 920c6fd2807SJeff Garzik buf, sectors * ATA_SECT_SIZE); 921c6fd2807SJeff Garzik 922c6fd2807SJeff Garzik DPRINTK("EXIT, err_mask=%x\n", err_mask); 923c6fd2807SJeff Garzik return err_mask; 924c6fd2807SJeff Garzik } 925c6fd2807SJeff Garzik 926c6fd2807SJeff Garzik /** 927c6fd2807SJeff Garzik * ata_eh_read_log_10h - Read log page 10h for NCQ error details 928c6fd2807SJeff Garzik * @dev: Device to read log page 10h from 929c6fd2807SJeff Garzik * @tag: Resulting tag of the failed command 930c6fd2807SJeff Garzik * @tf: Resulting taskfile registers of the failed command 931c6fd2807SJeff Garzik * 932c6fd2807SJeff Garzik * Read log page 10h to obtain NCQ error details and clear error 933c6fd2807SJeff Garzik * condition. 934c6fd2807SJeff Garzik * 935c6fd2807SJeff Garzik * LOCKING: 936c6fd2807SJeff Garzik * Kernel thread context (may sleep). 937c6fd2807SJeff Garzik * 938c6fd2807SJeff Garzik * RETURNS: 939c6fd2807SJeff Garzik * 0 on success, -errno otherwise. 940c6fd2807SJeff Garzik */ 941c6fd2807SJeff Garzik static int ata_eh_read_log_10h(struct ata_device *dev, 942c6fd2807SJeff Garzik int *tag, struct ata_taskfile *tf) 943c6fd2807SJeff Garzik { 944c6fd2807SJeff Garzik u8 *buf = dev->ap->sector_buf; 945c6fd2807SJeff Garzik unsigned int err_mask; 946c6fd2807SJeff Garzik u8 csum; 947c6fd2807SJeff Garzik int i; 948c6fd2807SJeff Garzik 949c6fd2807SJeff Garzik err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1); 950c6fd2807SJeff Garzik if (err_mask) 951c6fd2807SJeff Garzik return -EIO; 952c6fd2807SJeff Garzik 953c6fd2807SJeff Garzik csum = 0; 954c6fd2807SJeff Garzik for (i = 0; i < ATA_SECT_SIZE; i++) 955c6fd2807SJeff Garzik csum += buf[i]; 956c6fd2807SJeff Garzik if (csum) 957c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_WARNING, 958c6fd2807SJeff Garzik "invalid checksum 0x%x on log page 10h\n", csum); 959c6fd2807SJeff Garzik 960c6fd2807SJeff Garzik if (buf[0] & 0x80) 961c6fd2807SJeff Garzik return -ENOENT; 962c6fd2807SJeff Garzik 963c6fd2807SJeff Garzik *tag = buf[0] & 0x1f; 964c6fd2807SJeff Garzik 965c6fd2807SJeff Garzik tf->command = buf[2]; 966c6fd2807SJeff Garzik tf->feature = buf[3]; 967c6fd2807SJeff Garzik tf->lbal = buf[4]; 968c6fd2807SJeff Garzik tf->lbam = buf[5]; 969c6fd2807SJeff Garzik tf->lbah = buf[6]; 970c6fd2807SJeff Garzik tf->device = buf[7]; 971c6fd2807SJeff Garzik tf->hob_lbal = buf[8]; 972c6fd2807SJeff Garzik tf->hob_lbam = buf[9]; 973c6fd2807SJeff Garzik tf->hob_lbah = buf[10]; 974c6fd2807SJeff Garzik tf->nsect = buf[12]; 975c6fd2807SJeff Garzik tf->hob_nsect = buf[13]; 976c6fd2807SJeff Garzik 977c6fd2807SJeff Garzik return 0; 978c6fd2807SJeff Garzik } 979c6fd2807SJeff Garzik 980c6fd2807SJeff Garzik /** 981c6fd2807SJeff Garzik * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE 982c6fd2807SJeff Garzik * @dev: device to perform REQUEST_SENSE to 983c6fd2807SJeff Garzik * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long) 984c6fd2807SJeff Garzik * 985c6fd2807SJeff Garzik * Perform ATAPI REQUEST_SENSE after the device reported CHECK 986c6fd2807SJeff Garzik * SENSE. This function is EH helper. 987c6fd2807SJeff Garzik * 988c6fd2807SJeff Garzik * LOCKING: 989c6fd2807SJeff Garzik * Kernel thread context (may sleep). 990c6fd2807SJeff Garzik * 991c6fd2807SJeff Garzik * RETURNS: 992c6fd2807SJeff Garzik * 0 on success, AC_ERR_* mask on failure 993c6fd2807SJeff Garzik */ 99456287768SAlbert Lee static unsigned int atapi_eh_request_sense(struct ata_queued_cmd *qc) 995c6fd2807SJeff Garzik { 99656287768SAlbert Lee struct ata_device *dev = qc->dev; 99756287768SAlbert Lee unsigned char *sense_buf = qc->scsicmd->sense_buffer; 998c6fd2807SJeff Garzik struct ata_port *ap = dev->ap; 999c6fd2807SJeff Garzik struct ata_taskfile tf; 1000c6fd2807SJeff Garzik u8 cdb[ATAPI_CDB_LEN]; 1001c6fd2807SJeff Garzik 1002c6fd2807SJeff Garzik DPRINTK("ATAPI request sense\n"); 1003c6fd2807SJeff Garzik 1004c6fd2807SJeff Garzik /* FIXME: is this needed? */ 1005c6fd2807SJeff Garzik memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); 1006c6fd2807SJeff Garzik 100756287768SAlbert Lee /* initialize sense_buf with the error register, 100856287768SAlbert Lee * for the case where they are -not- overwritten 100956287768SAlbert Lee */ 1010c6fd2807SJeff Garzik sense_buf[0] = 0x70; 101156287768SAlbert Lee sense_buf[2] = qc->result_tf.feature >> 4; 101256287768SAlbert Lee 101356287768SAlbert Lee /* some devices time out if garbage left in tf */ 101456287768SAlbert Lee ata_tf_init(dev, &tf); 1015c6fd2807SJeff Garzik 1016c6fd2807SJeff Garzik memset(cdb, 0, ATAPI_CDB_LEN); 1017c6fd2807SJeff Garzik cdb[0] = REQUEST_SENSE; 1018c6fd2807SJeff Garzik cdb[4] = SCSI_SENSE_BUFFERSIZE; 1019c6fd2807SJeff Garzik 1020c6fd2807SJeff Garzik tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1021c6fd2807SJeff Garzik tf.command = ATA_CMD_PACKET; 1022c6fd2807SJeff Garzik 1023c6fd2807SJeff Garzik /* is it pointless to prefer PIO for "safety reasons"? */ 1024c6fd2807SJeff Garzik if (ap->flags & ATA_FLAG_PIO_DMA) { 1025c6fd2807SJeff Garzik tf.protocol = ATA_PROT_ATAPI_DMA; 1026c6fd2807SJeff Garzik tf.feature |= ATAPI_PKT_DMA; 1027c6fd2807SJeff Garzik } else { 1028c6fd2807SJeff Garzik tf.protocol = ATA_PROT_ATAPI; 1029c6fd2807SJeff Garzik tf.lbam = (8 * 1024) & 0xff; 1030c6fd2807SJeff Garzik tf.lbah = (8 * 1024) >> 8; 1031c6fd2807SJeff Garzik } 1032c6fd2807SJeff Garzik 1033c6fd2807SJeff Garzik return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE, 1034c6fd2807SJeff Garzik sense_buf, SCSI_SENSE_BUFFERSIZE); 1035c6fd2807SJeff Garzik } 1036c6fd2807SJeff Garzik 1037c6fd2807SJeff Garzik /** 1038c6fd2807SJeff Garzik * ata_eh_analyze_serror - analyze SError for a failed port 1039c6fd2807SJeff Garzik * @ap: ATA port to analyze SError for 1040c6fd2807SJeff Garzik * 1041c6fd2807SJeff Garzik * Analyze SError if available and further determine cause of 1042c6fd2807SJeff Garzik * failure. 1043c6fd2807SJeff Garzik * 1044c6fd2807SJeff Garzik * LOCKING: 1045c6fd2807SJeff Garzik * None. 1046c6fd2807SJeff Garzik */ 1047c6fd2807SJeff Garzik static void ata_eh_analyze_serror(struct ata_port *ap) 1048c6fd2807SJeff Garzik { 1049c6fd2807SJeff Garzik struct ata_eh_context *ehc = &ap->eh_context; 1050c6fd2807SJeff Garzik u32 serror = ehc->i.serror; 1051c6fd2807SJeff Garzik unsigned int err_mask = 0, action = 0; 1052c6fd2807SJeff Garzik 1053c6fd2807SJeff Garzik if (serror & SERR_PERSISTENT) { 1054c6fd2807SJeff Garzik err_mask |= AC_ERR_ATA_BUS; 1055c6fd2807SJeff Garzik action |= ATA_EH_HARDRESET; 1056c6fd2807SJeff Garzik } 1057c6fd2807SJeff Garzik if (serror & 1058c6fd2807SJeff Garzik (SERR_DATA_RECOVERED | SERR_COMM_RECOVERED | SERR_DATA)) { 1059c6fd2807SJeff Garzik err_mask |= AC_ERR_ATA_BUS; 1060c6fd2807SJeff Garzik action |= ATA_EH_SOFTRESET; 1061c6fd2807SJeff Garzik } 1062c6fd2807SJeff Garzik if (serror & SERR_PROTOCOL) { 1063c6fd2807SJeff Garzik err_mask |= AC_ERR_HSM; 1064c6fd2807SJeff Garzik action |= ATA_EH_SOFTRESET; 1065c6fd2807SJeff Garzik } 1066c6fd2807SJeff Garzik if (serror & SERR_INTERNAL) { 1067c6fd2807SJeff Garzik err_mask |= AC_ERR_SYSTEM; 1068771b8dadSTejun Heo action |= ATA_EH_HARDRESET; 1069c6fd2807SJeff Garzik } 1070c6fd2807SJeff Garzik if (serror & (SERR_PHYRDY_CHG | SERR_DEV_XCHG)) 1071c6fd2807SJeff Garzik ata_ehi_hotplugged(&ehc->i); 1072c6fd2807SJeff Garzik 1073c6fd2807SJeff Garzik ehc->i.err_mask |= err_mask; 1074c6fd2807SJeff Garzik ehc->i.action |= action; 1075c6fd2807SJeff Garzik } 1076c6fd2807SJeff Garzik 1077c6fd2807SJeff Garzik /** 1078c6fd2807SJeff Garzik * ata_eh_analyze_ncq_error - analyze NCQ error 1079c6fd2807SJeff Garzik * @ap: ATA port to analyze NCQ error for 1080c6fd2807SJeff Garzik * 1081c6fd2807SJeff Garzik * Read log page 10h, determine the offending qc and acquire 1082c6fd2807SJeff Garzik * error status TF. For NCQ device errors, all LLDDs have to do 1083c6fd2807SJeff Garzik * is setting AC_ERR_DEV in ehi->err_mask. This function takes 1084c6fd2807SJeff Garzik * care of the rest. 1085c6fd2807SJeff Garzik * 1086c6fd2807SJeff Garzik * LOCKING: 1087c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1088c6fd2807SJeff Garzik */ 1089c6fd2807SJeff Garzik static void ata_eh_analyze_ncq_error(struct ata_port *ap) 1090c6fd2807SJeff Garzik { 1091c6fd2807SJeff Garzik struct ata_eh_context *ehc = &ap->eh_context; 1092c6fd2807SJeff Garzik struct ata_device *dev = ap->device; 1093c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 1094c6fd2807SJeff Garzik struct ata_taskfile tf; 1095c6fd2807SJeff Garzik int tag, rc; 1096c6fd2807SJeff Garzik 1097c6fd2807SJeff Garzik /* if frozen, we can't do much */ 1098c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN) 1099c6fd2807SJeff Garzik return; 1100c6fd2807SJeff Garzik 1101c6fd2807SJeff Garzik /* is it NCQ device error? */ 1102c6fd2807SJeff Garzik if (!ap->sactive || !(ehc->i.err_mask & AC_ERR_DEV)) 1103c6fd2807SJeff Garzik return; 1104c6fd2807SJeff Garzik 1105c6fd2807SJeff Garzik /* has LLDD analyzed already? */ 1106c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1107c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, tag); 1108c6fd2807SJeff Garzik 1109c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) 1110c6fd2807SJeff Garzik continue; 1111c6fd2807SJeff Garzik 1112c6fd2807SJeff Garzik if (qc->err_mask) 1113c6fd2807SJeff Garzik return; 1114c6fd2807SJeff Garzik } 1115c6fd2807SJeff Garzik 1116c6fd2807SJeff Garzik /* okay, this error is ours */ 1117c6fd2807SJeff Garzik rc = ata_eh_read_log_10h(dev, &tag, &tf); 1118c6fd2807SJeff Garzik if (rc) { 1119c6fd2807SJeff Garzik ata_port_printk(ap, KERN_ERR, "failed to read log page 10h " 1120c6fd2807SJeff Garzik "(errno=%d)\n", rc); 1121c6fd2807SJeff Garzik return; 1122c6fd2807SJeff Garzik } 1123c6fd2807SJeff Garzik 1124c6fd2807SJeff Garzik if (!(ap->sactive & (1 << tag))) { 1125c6fd2807SJeff Garzik ata_port_printk(ap, KERN_ERR, "log page 10h reported " 1126c6fd2807SJeff Garzik "inactive tag %d\n", tag); 1127c6fd2807SJeff Garzik return; 1128c6fd2807SJeff Garzik } 1129c6fd2807SJeff Garzik 1130c6fd2807SJeff Garzik /* we've got the perpetrator, condemn it */ 1131c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, tag); 1132c6fd2807SJeff Garzik memcpy(&qc->result_tf, &tf, sizeof(tf)); 1133c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_DEV; 1134c6fd2807SJeff Garzik ehc->i.err_mask &= ~AC_ERR_DEV; 1135c6fd2807SJeff Garzik } 1136c6fd2807SJeff Garzik 1137c6fd2807SJeff Garzik /** 1138c6fd2807SJeff Garzik * ata_eh_analyze_tf - analyze taskfile of a failed qc 1139c6fd2807SJeff Garzik * @qc: qc to analyze 1140c6fd2807SJeff Garzik * @tf: Taskfile registers to analyze 1141c6fd2807SJeff Garzik * 1142c6fd2807SJeff Garzik * Analyze taskfile of @qc and further determine cause of 1143c6fd2807SJeff Garzik * failure. This function also requests ATAPI sense data if 1144c6fd2807SJeff Garzik * avaliable. 1145c6fd2807SJeff Garzik * 1146c6fd2807SJeff Garzik * LOCKING: 1147c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1148c6fd2807SJeff Garzik * 1149c6fd2807SJeff Garzik * RETURNS: 1150c6fd2807SJeff Garzik * Determined recovery action 1151c6fd2807SJeff Garzik */ 1152c6fd2807SJeff Garzik static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, 1153c6fd2807SJeff Garzik const struct ata_taskfile *tf) 1154c6fd2807SJeff Garzik { 1155c6fd2807SJeff Garzik unsigned int tmp, action = 0; 1156c6fd2807SJeff Garzik u8 stat = tf->command, err = tf->feature; 1157c6fd2807SJeff Garzik 1158c6fd2807SJeff Garzik if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) { 1159c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_HSM; 1160c6fd2807SJeff Garzik return ATA_EH_SOFTRESET; 1161c6fd2807SJeff Garzik } 1162c6fd2807SJeff Garzik 1163a51d644aSTejun Heo if (stat & (ATA_ERR | ATA_DF)) 1164a51d644aSTejun Heo qc->err_mask |= AC_ERR_DEV; 1165a51d644aSTejun Heo else 1166c6fd2807SJeff Garzik return 0; 1167c6fd2807SJeff Garzik 1168c6fd2807SJeff Garzik switch (qc->dev->class) { 1169c6fd2807SJeff Garzik case ATA_DEV_ATA: 1170c6fd2807SJeff Garzik if (err & ATA_ICRC) 1171c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_ATA_BUS; 1172c6fd2807SJeff Garzik if (err & ATA_UNC) 1173c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_MEDIA; 1174c6fd2807SJeff Garzik if (err & ATA_IDNF) 1175c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_INVALID; 1176c6fd2807SJeff Garzik break; 1177c6fd2807SJeff Garzik 1178c6fd2807SJeff Garzik case ATA_DEV_ATAPI: 1179a569a30dSTejun Heo if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) { 118056287768SAlbert Lee tmp = atapi_eh_request_sense(qc); 1181c6fd2807SJeff Garzik if (!tmp) { 1182a569a30dSTejun Heo /* ATA_QCFLAG_SENSE_VALID is used to 1183a569a30dSTejun Heo * tell atapi_qc_complete() that sense 1184a569a30dSTejun Heo * data is already valid. 1185c6fd2807SJeff Garzik * 1186c6fd2807SJeff Garzik * TODO: interpret sense data and set 1187c6fd2807SJeff Garzik * appropriate err_mask. 1188c6fd2807SJeff Garzik */ 1189c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_SENSE_VALID; 1190c6fd2807SJeff Garzik } else 1191c6fd2807SJeff Garzik qc->err_mask |= tmp; 1192c6fd2807SJeff Garzik } 1193a569a30dSTejun Heo } 1194c6fd2807SJeff Garzik 1195c6fd2807SJeff Garzik if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS)) 1196c6fd2807SJeff Garzik action |= ATA_EH_SOFTRESET; 1197c6fd2807SJeff Garzik 1198c6fd2807SJeff Garzik return action; 1199c6fd2807SJeff Garzik } 1200c6fd2807SJeff Garzik 12017d47e8d4STejun Heo static int ata_eh_categorize_error(int is_io, unsigned int err_mask) 1202c6fd2807SJeff Garzik { 12037d47e8d4STejun Heo if (err_mask & AC_ERR_ATA_BUS) 1204c6fd2807SJeff Garzik return 1; 1205c6fd2807SJeff Garzik 12067d47e8d4STejun Heo if (err_mask & AC_ERR_TIMEOUT) 1207c6fd2807SJeff Garzik return 2; 12087d47e8d4STejun Heo 12097d47e8d4STejun Heo if (is_io) { 12107d47e8d4STejun Heo if (err_mask & AC_ERR_HSM) 12117d47e8d4STejun Heo return 2; 12127d47e8d4STejun Heo if ((err_mask & 12137d47e8d4STejun Heo (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV) 12147d47e8d4STejun Heo return 3; 1215c6fd2807SJeff Garzik } 1216c6fd2807SJeff Garzik 1217c6fd2807SJeff Garzik return 0; 1218c6fd2807SJeff Garzik } 1219c6fd2807SJeff Garzik 12207d47e8d4STejun Heo struct speed_down_verdict_arg { 1221c6fd2807SJeff Garzik u64 since; 12227d47e8d4STejun Heo int nr_errors[4]; 1223c6fd2807SJeff Garzik }; 1224c6fd2807SJeff Garzik 12257d47e8d4STejun Heo static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg) 1226c6fd2807SJeff Garzik { 12277d47e8d4STejun Heo struct speed_down_verdict_arg *arg = void_arg; 12287d47e8d4STejun Heo int cat = ata_eh_categorize_error(ent->is_io, ent->err_mask); 1229c6fd2807SJeff Garzik 1230c6fd2807SJeff Garzik if (ent->timestamp < arg->since) 1231c6fd2807SJeff Garzik return -1; 1232c6fd2807SJeff Garzik 12337d47e8d4STejun Heo arg->nr_errors[cat]++; 1234c6fd2807SJeff Garzik return 0; 1235c6fd2807SJeff Garzik } 1236c6fd2807SJeff Garzik 1237c6fd2807SJeff Garzik /** 12387d47e8d4STejun Heo * ata_eh_speed_down_verdict - Determine speed down verdict 1239c6fd2807SJeff Garzik * @dev: Device of interest 1240c6fd2807SJeff Garzik * 1241c6fd2807SJeff Garzik * This function examines error ring of @dev and determines 12427d47e8d4STejun Heo * whether NCQ needs to be turned off, transfer speed should be 12437d47e8d4STejun Heo * stepped down, or falling back to PIO is necessary. 1244c6fd2807SJeff Garzik * 12457d47e8d4STejun Heo * Cat-1 is ATA_BUS error for any command. 1246c6fd2807SJeff Garzik * 12477d47e8d4STejun Heo * Cat-2 is TIMEOUT for any command or HSM violation for known 12487d47e8d4STejun Heo * supported commands. 12497d47e8d4STejun Heo * 12507d47e8d4STejun Heo * Cat-3 is is unclassified DEV error for known supported 1251c6fd2807SJeff Garzik * command. 1252c6fd2807SJeff Garzik * 12537d47e8d4STejun Heo * NCQ needs to be turned off if there have been more than 3 12547d47e8d4STejun Heo * Cat-2 + Cat-3 errors during last 10 minutes. 12557d47e8d4STejun Heo * 12567d47e8d4STejun Heo * Speed down is necessary if there have been more than 3 Cat-1 + 12577d47e8d4STejun Heo * Cat-2 errors or 10 Cat-3 errors during last 10 minutes. 12587d47e8d4STejun Heo * 12597d47e8d4STejun Heo * Falling back to PIO mode is necessary if there have been more 12607d47e8d4STejun Heo * than 10 Cat-1 + Cat-2 + Cat-3 errors during last 5 minutes. 12617d47e8d4STejun Heo * 1262c6fd2807SJeff Garzik * LOCKING: 1263c6fd2807SJeff Garzik * Inherited from caller. 1264c6fd2807SJeff Garzik * 1265c6fd2807SJeff Garzik * RETURNS: 12667d47e8d4STejun Heo * OR of ATA_EH_SPDN_* flags. 1267c6fd2807SJeff Garzik */ 12687d47e8d4STejun Heo static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev) 1269c6fd2807SJeff Garzik { 12707d47e8d4STejun Heo const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ; 12717d47e8d4STejun Heo u64 j64 = get_jiffies_64(); 12727d47e8d4STejun Heo struct speed_down_verdict_arg arg; 12737d47e8d4STejun Heo unsigned int verdict = 0; 1274c6fd2807SJeff Garzik 12757d47e8d4STejun Heo /* scan past 10 mins of error history */ 1276c6fd2807SJeff Garzik memset(&arg, 0, sizeof(arg)); 12777d47e8d4STejun Heo arg.since = j64 - min(j64, j10mins); 12787d47e8d4STejun Heo ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); 1279c6fd2807SJeff Garzik 12807d47e8d4STejun Heo if (arg.nr_errors[2] + arg.nr_errors[3] > 3) 12817d47e8d4STejun Heo verdict |= ATA_EH_SPDN_NCQ_OFF; 12827d47e8d4STejun Heo if (arg.nr_errors[1] + arg.nr_errors[2] > 3 || arg.nr_errors[3] > 10) 12837d47e8d4STejun Heo verdict |= ATA_EH_SPDN_SPEED_DOWN; 1284c6fd2807SJeff Garzik 12857d47e8d4STejun Heo /* scan past 3 mins of error history */ 12867d47e8d4STejun Heo memset(&arg, 0, sizeof(arg)); 12877d47e8d4STejun Heo arg.since = j64 - min(j64, j5mins); 12887d47e8d4STejun Heo ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); 1289c6fd2807SJeff Garzik 12907d47e8d4STejun Heo if (arg.nr_errors[1] + arg.nr_errors[2] + arg.nr_errors[3] > 10) 12917d47e8d4STejun Heo verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO; 12927d47e8d4STejun Heo 12937d47e8d4STejun Heo return verdict; 1294c6fd2807SJeff Garzik } 1295c6fd2807SJeff Garzik 1296c6fd2807SJeff Garzik /** 1297c6fd2807SJeff Garzik * ata_eh_speed_down - record error and speed down if necessary 1298c6fd2807SJeff Garzik * @dev: Failed device 1299c6fd2807SJeff Garzik * @is_io: Did the device fail during normal IO? 1300c6fd2807SJeff Garzik * @err_mask: err_mask of the error 1301c6fd2807SJeff Garzik * 1302c6fd2807SJeff Garzik * Record error and examine error history to determine whether 1303c6fd2807SJeff Garzik * adjusting transmission speed is necessary. It also sets 1304c6fd2807SJeff Garzik * transmission limits appropriately if such adjustment is 1305c6fd2807SJeff Garzik * necessary. 1306c6fd2807SJeff Garzik * 1307c6fd2807SJeff Garzik * LOCKING: 1308c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1309c6fd2807SJeff Garzik * 1310c6fd2807SJeff Garzik * RETURNS: 13117d47e8d4STejun Heo * Determined recovery action. 1312c6fd2807SJeff Garzik */ 13137d47e8d4STejun Heo static unsigned int ata_eh_speed_down(struct ata_device *dev, int is_io, 1314c6fd2807SJeff Garzik unsigned int err_mask) 1315c6fd2807SJeff Garzik { 13167d47e8d4STejun Heo unsigned int verdict; 13177d47e8d4STejun Heo unsigned int action = 0; 13187d47e8d4STejun Heo 13197d47e8d4STejun Heo /* don't bother if Cat-0 error */ 13207d47e8d4STejun Heo if (ata_eh_categorize_error(is_io, err_mask) == 0) 1321c6fd2807SJeff Garzik return 0; 1322c6fd2807SJeff Garzik 1323c6fd2807SJeff Garzik /* record error and determine whether speed down is necessary */ 1324c6fd2807SJeff Garzik ata_ering_record(&dev->ering, is_io, err_mask); 13257d47e8d4STejun Heo verdict = ata_eh_speed_down_verdict(dev); 1326c6fd2807SJeff Garzik 13277d47e8d4STejun Heo /* turn off NCQ? */ 13287d47e8d4STejun Heo if ((verdict & ATA_EH_SPDN_NCQ_OFF) && 13297d47e8d4STejun Heo (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ | 13307d47e8d4STejun Heo ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) { 13317d47e8d4STejun Heo dev->flags |= ATA_DFLAG_NCQ_OFF; 13327d47e8d4STejun Heo ata_dev_printk(dev, KERN_WARNING, 13337d47e8d4STejun Heo "NCQ disabled due to excessive errors\n"); 13347d47e8d4STejun Heo goto done; 13357d47e8d4STejun Heo } 1336c6fd2807SJeff Garzik 13377d47e8d4STejun Heo /* speed down? */ 13387d47e8d4STejun Heo if (verdict & ATA_EH_SPDN_SPEED_DOWN) { 1339c6fd2807SJeff Garzik /* speed down SATA link speed if possible */ 13407d47e8d4STejun Heo if (sata_down_spd_limit(dev->ap) == 0) { 13417d47e8d4STejun Heo action |= ATA_EH_HARDRESET; 13427d47e8d4STejun Heo goto done; 13437d47e8d4STejun Heo } 1344c6fd2807SJeff Garzik 1345c6fd2807SJeff Garzik /* lower transfer mode */ 13467d47e8d4STejun Heo if (dev->spdn_cnt < 2) { 13477d47e8d4STejun Heo static const int dma_dnxfer_sel[] = 13487d47e8d4STejun Heo { ATA_DNXFER_DMA, ATA_DNXFER_40C }; 13497d47e8d4STejun Heo static const int pio_dnxfer_sel[] = 13507d47e8d4STejun Heo { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 }; 13517d47e8d4STejun Heo int sel; 1352c6fd2807SJeff Garzik 13537d47e8d4STejun Heo if (dev->xfer_shift != ATA_SHIFT_PIO) 13547d47e8d4STejun Heo sel = dma_dnxfer_sel[dev->spdn_cnt]; 13557d47e8d4STejun Heo else 13567d47e8d4STejun Heo sel = pio_dnxfer_sel[dev->spdn_cnt]; 13577d47e8d4STejun Heo 13587d47e8d4STejun Heo dev->spdn_cnt++; 13597d47e8d4STejun Heo 13607d47e8d4STejun Heo if (ata_down_xfermask_limit(dev, sel) == 0) { 13617d47e8d4STejun Heo action |= ATA_EH_SOFTRESET; 13627d47e8d4STejun Heo goto done; 13637d47e8d4STejun Heo } 13647d47e8d4STejun Heo } 13657d47e8d4STejun Heo } 13667d47e8d4STejun Heo 13677d47e8d4STejun Heo /* Fall back to PIO? Slowing down to PIO is meaningless for 13687d47e8d4STejun Heo * SATA. Consider it only for PATA. 13697d47e8d4STejun Heo */ 13707d47e8d4STejun Heo if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) && 13717d47e8d4STejun Heo (dev->ap->cbl != ATA_CBL_SATA) && 13727d47e8d4STejun Heo (dev->xfer_shift != ATA_SHIFT_PIO)) { 13737d47e8d4STejun Heo if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) { 13747d47e8d4STejun Heo dev->spdn_cnt = 0; 13757d47e8d4STejun Heo action |= ATA_EH_SOFTRESET; 13767d47e8d4STejun Heo goto done; 13777d47e8d4STejun Heo } 13787d47e8d4STejun Heo } 13797d47e8d4STejun Heo 1380c6fd2807SJeff Garzik return 0; 13817d47e8d4STejun Heo done: 13827d47e8d4STejun Heo /* device has been slowed down, blow error history */ 13837d47e8d4STejun Heo ata_ering_clear(&dev->ering); 13847d47e8d4STejun Heo return action; 1385c6fd2807SJeff Garzik } 1386c6fd2807SJeff Garzik 1387c6fd2807SJeff Garzik /** 1388c6fd2807SJeff Garzik * ata_eh_autopsy - analyze error and determine recovery action 1389c6fd2807SJeff Garzik * @ap: ATA port to perform autopsy on 1390c6fd2807SJeff Garzik * 1391c6fd2807SJeff Garzik * Analyze why @ap failed and determine which recovery action is 1392c6fd2807SJeff Garzik * needed. This function also sets more detailed AC_ERR_* values 1393c6fd2807SJeff Garzik * and fills sense data for ATAPI CHECK SENSE. 1394c6fd2807SJeff Garzik * 1395c6fd2807SJeff Garzik * LOCKING: 1396c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1397c6fd2807SJeff Garzik */ 1398c6fd2807SJeff Garzik static void ata_eh_autopsy(struct ata_port *ap) 1399c6fd2807SJeff Garzik { 1400c6fd2807SJeff Garzik struct ata_eh_context *ehc = &ap->eh_context; 1401c6fd2807SJeff Garzik unsigned int all_err_mask = 0; 1402c6fd2807SJeff Garzik int tag, is_io = 0; 1403c6fd2807SJeff Garzik u32 serror; 1404c6fd2807SJeff Garzik int rc; 1405c6fd2807SJeff Garzik 1406c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 1407c6fd2807SJeff Garzik 1408c6fd2807SJeff Garzik if (ehc->i.flags & ATA_EHI_NO_AUTOPSY) 1409c6fd2807SJeff Garzik return; 1410c6fd2807SJeff Garzik 1411c6fd2807SJeff Garzik /* obtain and analyze SError */ 1412c6fd2807SJeff Garzik rc = sata_scr_read(ap, SCR_ERROR, &serror); 1413c6fd2807SJeff Garzik if (rc == 0) { 1414c6fd2807SJeff Garzik ehc->i.serror |= serror; 1415c6fd2807SJeff Garzik ata_eh_analyze_serror(ap); 1416c6fd2807SJeff Garzik } else if (rc != -EOPNOTSUPP) 1417c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_HARDRESET; 1418c6fd2807SJeff Garzik 1419c6fd2807SJeff Garzik /* analyze NCQ failure */ 1420c6fd2807SJeff Garzik ata_eh_analyze_ncq_error(ap); 1421c6fd2807SJeff Garzik 1422c6fd2807SJeff Garzik /* any real error trumps AC_ERR_OTHER */ 1423c6fd2807SJeff Garzik if (ehc->i.err_mask & ~AC_ERR_OTHER) 1424c6fd2807SJeff Garzik ehc->i.err_mask &= ~AC_ERR_OTHER; 1425c6fd2807SJeff Garzik 1426c6fd2807SJeff Garzik all_err_mask |= ehc->i.err_mask; 1427c6fd2807SJeff Garzik 1428c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1429c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 1430c6fd2807SJeff Garzik 1431c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) 1432c6fd2807SJeff Garzik continue; 1433c6fd2807SJeff Garzik 1434c6fd2807SJeff Garzik /* inherit upper level err_mask */ 1435c6fd2807SJeff Garzik qc->err_mask |= ehc->i.err_mask; 1436c6fd2807SJeff Garzik 1437c6fd2807SJeff Garzik /* analyze TF */ 1438c6fd2807SJeff Garzik ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf); 1439c6fd2807SJeff Garzik 1440c6fd2807SJeff Garzik /* DEV errors are probably spurious in case of ATA_BUS error */ 1441c6fd2807SJeff Garzik if (qc->err_mask & AC_ERR_ATA_BUS) 1442c6fd2807SJeff Garzik qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA | 1443c6fd2807SJeff Garzik AC_ERR_INVALID); 1444c6fd2807SJeff Garzik 1445c6fd2807SJeff Garzik /* any real error trumps unknown error */ 1446c6fd2807SJeff Garzik if (qc->err_mask & ~AC_ERR_OTHER) 1447c6fd2807SJeff Garzik qc->err_mask &= ~AC_ERR_OTHER; 1448c6fd2807SJeff Garzik 1449c6fd2807SJeff Garzik /* SENSE_VALID trumps dev/unknown error and revalidation */ 1450c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_SENSE_VALID) { 1451c6fd2807SJeff Garzik qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); 1452c6fd2807SJeff Garzik ehc->i.action &= ~ATA_EH_REVALIDATE; 1453c6fd2807SJeff Garzik } 1454c6fd2807SJeff Garzik 1455c6fd2807SJeff Garzik /* accumulate error info */ 1456c6fd2807SJeff Garzik ehc->i.dev = qc->dev; 1457c6fd2807SJeff Garzik all_err_mask |= qc->err_mask; 1458c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_IO) 1459c6fd2807SJeff Garzik is_io = 1; 1460c6fd2807SJeff Garzik } 1461c6fd2807SJeff Garzik 1462c6fd2807SJeff Garzik /* enforce default EH actions */ 1463c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN || 1464c6fd2807SJeff Garzik all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT)) 1465c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_SOFTRESET; 1466c6fd2807SJeff Garzik else if (all_err_mask) 1467c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_REVALIDATE; 1468c6fd2807SJeff Garzik 1469c6fd2807SJeff Garzik /* if we have offending qcs and the associated failed device */ 1470c6fd2807SJeff Garzik if (ehc->i.dev) { 1471c6fd2807SJeff Garzik /* speed down */ 1472c6fd2807SJeff Garzik ehc->i.action |= ata_eh_speed_down(ehc->i.dev, is_io, 1473c6fd2807SJeff Garzik all_err_mask); 1474c6fd2807SJeff Garzik 1475c6fd2807SJeff Garzik /* perform per-dev EH action only on the offending device */ 1476c6fd2807SJeff Garzik ehc->i.dev_action[ehc->i.dev->devno] |= 1477c6fd2807SJeff Garzik ehc->i.action & ATA_EH_PERDEV_MASK; 1478c6fd2807SJeff Garzik ehc->i.action &= ~ATA_EH_PERDEV_MASK; 1479c6fd2807SJeff Garzik } 1480c6fd2807SJeff Garzik 1481c6fd2807SJeff Garzik DPRINTK("EXIT\n"); 1482c6fd2807SJeff Garzik } 1483c6fd2807SJeff Garzik 1484c6fd2807SJeff Garzik /** 1485c6fd2807SJeff Garzik * ata_eh_report - report error handling to user 1486c6fd2807SJeff Garzik * @ap: ATA port EH is going on 1487c6fd2807SJeff Garzik * 1488c6fd2807SJeff Garzik * Report EH to user. 1489c6fd2807SJeff Garzik * 1490c6fd2807SJeff Garzik * LOCKING: 1491c6fd2807SJeff Garzik * None. 1492c6fd2807SJeff Garzik */ 1493c6fd2807SJeff Garzik static void ata_eh_report(struct ata_port *ap) 1494c6fd2807SJeff Garzik { 1495c6fd2807SJeff Garzik struct ata_eh_context *ehc = &ap->eh_context; 1496c6fd2807SJeff Garzik const char *frozen, *desc; 1497c6fd2807SJeff Garzik int tag, nr_failed = 0; 1498c6fd2807SJeff Garzik 1499c6fd2807SJeff Garzik desc = NULL; 1500c6fd2807SJeff Garzik if (ehc->i.desc[0] != '\0') 1501c6fd2807SJeff Garzik desc = ehc->i.desc; 1502c6fd2807SJeff Garzik 1503c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1504c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 1505c6fd2807SJeff Garzik 1506c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) 1507c6fd2807SJeff Garzik continue; 1508c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask) 1509c6fd2807SJeff Garzik continue; 1510c6fd2807SJeff Garzik 1511c6fd2807SJeff Garzik nr_failed++; 1512c6fd2807SJeff Garzik } 1513c6fd2807SJeff Garzik 1514c6fd2807SJeff Garzik if (!nr_failed && !ehc->i.err_mask) 1515c6fd2807SJeff Garzik return; 1516c6fd2807SJeff Garzik 1517c6fd2807SJeff Garzik frozen = ""; 1518c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN) 1519c6fd2807SJeff Garzik frozen = " frozen"; 1520c6fd2807SJeff Garzik 1521c6fd2807SJeff Garzik if (ehc->i.dev) { 1522c6fd2807SJeff Garzik ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x " 1523c6fd2807SJeff Garzik "SAct 0x%x SErr 0x%x action 0x%x%s\n", 1524c6fd2807SJeff Garzik ehc->i.err_mask, ap->sactive, ehc->i.serror, 1525c6fd2807SJeff Garzik ehc->i.action, frozen); 1526c6fd2807SJeff Garzik if (desc) 1527c6fd2807SJeff Garzik ata_dev_printk(ehc->i.dev, KERN_ERR, "(%s)\n", desc); 1528c6fd2807SJeff Garzik } else { 1529c6fd2807SJeff Garzik ata_port_printk(ap, KERN_ERR, "exception Emask 0x%x " 1530c6fd2807SJeff Garzik "SAct 0x%x SErr 0x%x action 0x%x%s\n", 1531c6fd2807SJeff Garzik ehc->i.err_mask, ap->sactive, ehc->i.serror, 1532c6fd2807SJeff Garzik ehc->i.action, frozen); 1533c6fd2807SJeff Garzik if (desc) 1534c6fd2807SJeff Garzik ata_port_printk(ap, KERN_ERR, "(%s)\n", desc); 1535c6fd2807SJeff Garzik } 1536c6fd2807SJeff Garzik 1537c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 15388a937581STejun Heo static const char *dma_str[] = { 15398a937581STejun Heo [DMA_BIDIRECTIONAL] = "bidi", 15408a937581STejun Heo [DMA_TO_DEVICE] = "out", 15418a937581STejun Heo [DMA_FROM_DEVICE] = "in", 15428a937581STejun Heo [DMA_NONE] = "", 15438a937581STejun Heo }; 1544c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 15458a937581STejun Heo struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; 1546c6fd2807SJeff Garzik 1547c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED) || !qc->err_mask) 1548c6fd2807SJeff Garzik continue; 1549c6fd2807SJeff Garzik 15508a937581STejun Heo ata_dev_printk(qc->dev, KERN_ERR, 15518a937581STejun Heo "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " 1552664e8503STejun Heo "tag %d cdb 0x%x data %u %s\n " 15538a937581STejun Heo "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " 15548a937581STejun Heo "Emask 0x%x (%s)\n", 15558a937581STejun Heo cmd->command, cmd->feature, cmd->nsect, 15568a937581STejun Heo cmd->lbal, cmd->lbam, cmd->lbah, 15578a937581STejun Heo cmd->hob_feature, cmd->hob_nsect, 15588a937581STejun Heo cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah, 1559726f0785STejun Heo cmd->device, qc->tag, qc->cdb[0], qc->nbytes, 1560664e8503STejun Heo dma_str[qc->dma_dir], 15618a937581STejun Heo res->command, res->feature, res->nsect, 15628a937581STejun Heo res->lbal, res->lbam, res->lbah, 15638a937581STejun Heo res->hob_feature, res->hob_nsect, 15648a937581STejun Heo res->hob_lbal, res->hob_lbam, res->hob_lbah, 15658a937581STejun Heo res->device, qc->err_mask, ata_err_string(qc->err_mask)); 1566c6fd2807SJeff Garzik } 1567c6fd2807SJeff Garzik } 1568c6fd2807SJeff Garzik 1569c6fd2807SJeff Garzik static int ata_do_reset(struct ata_port *ap, ata_reset_fn_t reset, 1570d4b2bab4STejun Heo unsigned int *classes, unsigned long deadline) 1571c6fd2807SJeff Garzik { 1572c6fd2807SJeff Garzik int i, rc; 1573c6fd2807SJeff Garzik 1574c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_DEVICES; i++) 1575c6fd2807SJeff Garzik classes[i] = ATA_DEV_UNKNOWN; 1576c6fd2807SJeff Garzik 1577d4b2bab4STejun Heo rc = reset(ap, classes, deadline); 1578c6fd2807SJeff Garzik if (rc) 1579c6fd2807SJeff Garzik return rc; 1580c6fd2807SJeff Garzik 1581c6fd2807SJeff Garzik /* If any class isn't ATA_DEV_UNKNOWN, consider classification 1582c6fd2807SJeff Garzik * is complete and convert all ATA_DEV_UNKNOWN to 1583c6fd2807SJeff Garzik * ATA_DEV_NONE. 1584c6fd2807SJeff Garzik */ 1585c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_DEVICES; i++) 1586c6fd2807SJeff Garzik if (classes[i] != ATA_DEV_UNKNOWN) 1587c6fd2807SJeff Garzik break; 1588c6fd2807SJeff Garzik 1589c6fd2807SJeff Garzik if (i < ATA_MAX_DEVICES) 1590c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_DEVICES; i++) 1591c6fd2807SJeff Garzik if (classes[i] == ATA_DEV_UNKNOWN) 1592c6fd2807SJeff Garzik classes[i] = ATA_DEV_NONE; 1593c6fd2807SJeff Garzik 1594c6fd2807SJeff Garzik return 0; 1595c6fd2807SJeff Garzik } 1596c6fd2807SJeff Garzik 1597c6fd2807SJeff Garzik static int ata_eh_followup_srst_needed(int rc, int classify, 1598c6fd2807SJeff Garzik const unsigned int *classes) 1599c6fd2807SJeff Garzik { 1600c6fd2807SJeff Garzik if (rc == -EAGAIN) 1601c6fd2807SJeff Garzik return 1; 1602c6fd2807SJeff Garzik if (rc != 0) 1603c6fd2807SJeff Garzik return 0; 1604c6fd2807SJeff Garzik if (classify && classes[0] == ATA_DEV_UNKNOWN) 1605c6fd2807SJeff Garzik return 1; 1606c6fd2807SJeff Garzik return 0; 1607c6fd2807SJeff Garzik } 1608c6fd2807SJeff Garzik 1609c6fd2807SJeff Garzik static int ata_eh_reset(struct ata_port *ap, int classify, 1610c6fd2807SJeff Garzik ata_prereset_fn_t prereset, ata_reset_fn_t softreset, 1611c6fd2807SJeff Garzik ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) 1612c6fd2807SJeff Garzik { 1613c6fd2807SJeff Garzik struct ata_eh_context *ehc = &ap->eh_context; 1614c6fd2807SJeff Garzik unsigned int *classes = ehc->classes; 1615c6fd2807SJeff Garzik int verbose = !(ehc->i.flags & ATA_EHI_QUIET); 161631daabdaSTejun Heo int try = 0; 161731daabdaSTejun Heo unsigned long deadline; 1618c6fd2807SJeff Garzik unsigned int action; 1619c6fd2807SJeff Garzik ata_reset_fn_t reset; 16208b5bb2faSTejun Heo int i, rc; 1621c6fd2807SJeff Garzik 1622c6fd2807SJeff Garzik /* about to reset */ 1623c6fd2807SJeff Garzik ata_eh_about_to_do(ap, NULL, ehc->i.action & ATA_EH_RESET_MASK); 1624c6fd2807SJeff Garzik 1625c6fd2807SJeff Garzik /* Determine which reset to use and record in ehc->i.action. 1626c6fd2807SJeff Garzik * prereset() may examine and modify it. 1627c6fd2807SJeff Garzik */ 1628c6fd2807SJeff Garzik action = ehc->i.action; 1629c6fd2807SJeff Garzik ehc->i.action &= ~ATA_EH_RESET_MASK; 1630c6fd2807SJeff Garzik if (softreset && (!hardreset || (!sata_set_spd_needed(ap) && 1631c6fd2807SJeff Garzik !(action & ATA_EH_HARDRESET)))) 1632c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_SOFTRESET; 1633c6fd2807SJeff Garzik else 1634c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_HARDRESET; 1635c6fd2807SJeff Garzik 1636c6fd2807SJeff Garzik if (prereset) { 163731daabdaSTejun Heo rc = prereset(ap, jiffies + ATA_EH_PRERESET_TIMEOUT); 1638c6fd2807SJeff Garzik if (rc) { 1639c961922bSAlan Cox if (rc == -ENOENT) { 16404aa9ab67STejun Heo ata_port_printk(ap, KERN_DEBUG, 16414aa9ab67STejun Heo "port disabled. ignoring.\n"); 1642c961922bSAlan Cox ap->eh_context.i.action &= ~ATA_EH_RESET_MASK; 16434aa9ab67STejun Heo 16444aa9ab67STejun Heo for (i = 0; i < ATA_MAX_DEVICES; i++) 16454aa9ab67STejun Heo classes[i] = ATA_DEV_NONE; 16464aa9ab67STejun Heo 16474aa9ab67STejun Heo rc = 0; 1648c961922bSAlan Cox } else 1649c6fd2807SJeff Garzik ata_port_printk(ap, KERN_ERR, 1650c6fd2807SJeff Garzik "prereset failed (errno=%d)\n", rc); 1651c6fd2807SJeff Garzik return rc; 1652c6fd2807SJeff Garzik } 1653c6fd2807SJeff Garzik } 1654c6fd2807SJeff Garzik 1655c6fd2807SJeff Garzik /* prereset() might have modified ehc->i.action */ 1656c6fd2807SJeff Garzik if (ehc->i.action & ATA_EH_HARDRESET) 1657c6fd2807SJeff Garzik reset = hardreset; 1658c6fd2807SJeff Garzik else if (ehc->i.action & ATA_EH_SOFTRESET) 1659c6fd2807SJeff Garzik reset = softreset; 1660c6fd2807SJeff Garzik else { 1661c6fd2807SJeff Garzik /* prereset told us not to reset, bang classes and return */ 1662c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_DEVICES; i++) 1663c6fd2807SJeff Garzik classes[i] = ATA_DEV_NONE; 1664c6fd2807SJeff Garzik return 0; 1665c6fd2807SJeff Garzik } 1666c6fd2807SJeff Garzik 1667c6fd2807SJeff Garzik /* did prereset() screw up? if so, fix up to avoid oopsing */ 1668c6fd2807SJeff Garzik if (!reset) { 1669c6fd2807SJeff Garzik if (softreset) 1670c6fd2807SJeff Garzik reset = softreset; 1671c6fd2807SJeff Garzik else 1672c6fd2807SJeff Garzik reset = hardreset; 1673c6fd2807SJeff Garzik } 1674c6fd2807SJeff Garzik 1675c6fd2807SJeff Garzik retry: 167631daabdaSTejun Heo deadline = jiffies + ata_eh_reset_timeouts[try++]; 167731daabdaSTejun Heo 1678c6fd2807SJeff Garzik /* shut up during boot probing */ 1679c6fd2807SJeff Garzik if (verbose) 1680c6fd2807SJeff Garzik ata_port_printk(ap, KERN_INFO, "%s resetting port\n", 1681c6fd2807SJeff Garzik reset == softreset ? "soft" : "hard"); 1682c6fd2807SJeff Garzik 1683c6fd2807SJeff Garzik /* mark that this EH session started with reset */ 16840d64a233STejun Heo if (reset == hardreset) 16850d64a233STejun Heo ehc->i.flags |= ATA_EHI_DID_HARDRESET; 16860d64a233STejun Heo else 16870d64a233STejun Heo ehc->i.flags |= ATA_EHI_DID_SOFTRESET; 1688c6fd2807SJeff Garzik 168931daabdaSTejun Heo rc = ata_do_reset(ap, reset, classes, deadline); 1690c6fd2807SJeff Garzik 1691c6fd2807SJeff Garzik if (reset == hardreset && 1692c6fd2807SJeff Garzik ata_eh_followup_srst_needed(rc, classify, classes)) { 1693c6fd2807SJeff Garzik /* okay, let's do follow-up softreset */ 1694c6fd2807SJeff Garzik reset = softreset; 1695c6fd2807SJeff Garzik 1696c6fd2807SJeff Garzik if (!reset) { 1697c6fd2807SJeff Garzik ata_port_printk(ap, KERN_ERR, 1698c6fd2807SJeff Garzik "follow-up softreset required " 1699c6fd2807SJeff Garzik "but no softreset avaliable\n"); 1700c6fd2807SJeff Garzik return -EINVAL; 1701c6fd2807SJeff Garzik } 1702c6fd2807SJeff Garzik 1703c6fd2807SJeff Garzik ata_eh_about_to_do(ap, NULL, ATA_EH_RESET_MASK); 170431daabdaSTejun Heo rc = ata_do_reset(ap, reset, classes, deadline); 1705c6fd2807SJeff Garzik 1706c6fd2807SJeff Garzik if (rc == 0 && classify && 1707c6fd2807SJeff Garzik classes[0] == ATA_DEV_UNKNOWN) { 1708c6fd2807SJeff Garzik ata_port_printk(ap, KERN_ERR, 1709c6fd2807SJeff Garzik "classification failed\n"); 1710c6fd2807SJeff Garzik return -EINVAL; 1711c6fd2807SJeff Garzik } 1712c6fd2807SJeff Garzik } 1713c6fd2807SJeff Garzik 171431daabdaSTejun Heo if (rc && try < ARRAY_SIZE(ata_eh_reset_timeouts)) { 171531daabdaSTejun Heo unsigned long now = jiffies; 1716c6fd2807SJeff Garzik 171731daabdaSTejun Heo if (time_before(now, deadline)) { 171831daabdaSTejun Heo unsigned long delta = deadline - jiffies; 1719c6fd2807SJeff Garzik 172031daabdaSTejun Heo ata_port_printk(ap, KERN_WARNING, "reset failed " 172131daabdaSTejun Heo "(errno=%d), retrying in %u secs\n", 172231daabdaSTejun Heo rc, (jiffies_to_msecs(delta) + 999) / 1000); 1723c6fd2807SJeff Garzik 172431daabdaSTejun Heo schedule_timeout_uninterruptible(delta); 172531daabdaSTejun Heo } 172631daabdaSTejun Heo 172731daabdaSTejun Heo if (reset == hardreset && 172831daabdaSTejun Heo try == ARRAY_SIZE(ata_eh_reset_timeouts) - 1) 1729c6fd2807SJeff Garzik sata_down_spd_limit(ap); 1730c6fd2807SJeff Garzik if (hardreset) 1731c6fd2807SJeff Garzik reset = hardreset; 1732c6fd2807SJeff Garzik goto retry; 1733c6fd2807SJeff Garzik } 1734c6fd2807SJeff Garzik 1735c6fd2807SJeff Garzik if (rc == 0) { 1736c6fd2807SJeff Garzik /* After the reset, the device state is PIO 0 and the 1737c6fd2807SJeff Garzik * controller state is undefined. Record the mode. 1738c6fd2807SJeff Garzik */ 1739c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_DEVICES; i++) 1740c6fd2807SJeff Garzik ap->device[i].pio_mode = XFER_PIO_0; 1741c6fd2807SJeff Garzik 1742c6fd2807SJeff Garzik if (postreset) 1743c6fd2807SJeff Garzik postreset(ap, classes); 1744c6fd2807SJeff Garzik 1745c6fd2807SJeff Garzik /* reset successful, schedule revalidation */ 1746c6fd2807SJeff Garzik ata_eh_done(ap, NULL, ehc->i.action & ATA_EH_RESET_MASK); 1747c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_REVALIDATE; 1748c6fd2807SJeff Garzik } 1749c6fd2807SJeff Garzik 1750c6fd2807SJeff Garzik return rc; 1751c6fd2807SJeff Garzik } 1752c6fd2807SJeff Garzik 1753c6fd2807SJeff Garzik static int ata_eh_revalidate_and_attach(struct ata_port *ap, 1754c6fd2807SJeff Garzik struct ata_device **r_failed_dev) 1755c6fd2807SJeff Garzik { 1756c6fd2807SJeff Garzik struct ata_eh_context *ehc = &ap->eh_context; 1757c6fd2807SJeff Garzik struct ata_device *dev; 17588c3c52a8STejun Heo unsigned int new_mask = 0; 1759c6fd2807SJeff Garzik unsigned long flags; 1760c6fd2807SJeff Garzik int i, rc = 0; 1761c6fd2807SJeff Garzik 1762c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 1763c6fd2807SJeff Garzik 17648c3c52a8STejun Heo /* For PATA drive side cable detection to work, IDENTIFY must 17658c3c52a8STejun Heo * be done backwards such that PDIAG- is released by the slave 17668c3c52a8STejun Heo * device before the master device is identified. 17678c3c52a8STejun Heo */ 17688c3c52a8STejun Heo for (i = ATA_MAX_DEVICES - 1; i >= 0; i--) { 1769bff04647STejun Heo unsigned int action, readid_flags = 0; 1770c6fd2807SJeff Garzik 1771c6fd2807SJeff Garzik dev = &ap->device[i]; 1772c6fd2807SJeff Garzik action = ata_eh_dev_action(dev); 1773c6fd2807SJeff Garzik 1774bff04647STejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) 1775bff04647STejun Heo readid_flags |= ATA_READID_POSTRESET; 1776bff04647STejun Heo 17779666f400STejun Heo if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) { 1778c6fd2807SJeff Garzik if (ata_port_offline(ap)) { 1779c6fd2807SJeff Garzik rc = -EIO; 17808c3c52a8STejun Heo goto err; 1781c6fd2807SJeff Garzik } 1782c6fd2807SJeff Garzik 1783c6fd2807SJeff Garzik ata_eh_about_to_do(ap, dev, ATA_EH_REVALIDATE); 1784bff04647STejun Heo rc = ata_dev_revalidate(dev, readid_flags); 1785c6fd2807SJeff Garzik if (rc) 17868c3c52a8STejun Heo goto err; 1787c6fd2807SJeff Garzik 1788c6fd2807SJeff Garzik ata_eh_done(ap, dev, ATA_EH_REVALIDATE); 1789c6fd2807SJeff Garzik 1790baa1e78aSTejun Heo /* Configuration may have changed, reconfigure 1791baa1e78aSTejun Heo * transfer mode. 1792baa1e78aSTejun Heo */ 1793baa1e78aSTejun Heo ehc->i.flags |= ATA_EHI_SETMODE; 1794baa1e78aSTejun Heo 1795c6fd2807SJeff Garzik /* schedule the scsi_rescan_device() here */ 1796c6fd2807SJeff Garzik queue_work(ata_aux_wq, &(ap->scsi_rescan_task)); 1797c6fd2807SJeff Garzik } else if (dev->class == ATA_DEV_UNKNOWN && 1798c6fd2807SJeff Garzik ehc->tries[dev->devno] && 1799c6fd2807SJeff Garzik ata_class_enabled(ehc->classes[dev->devno])) { 1800c6fd2807SJeff Garzik dev->class = ehc->classes[dev->devno]; 1801c6fd2807SJeff Garzik 1802bff04647STejun Heo rc = ata_dev_read_id(dev, &dev->class, readid_flags, 1803bff04647STejun Heo dev->id); 18048c3c52a8STejun Heo switch (rc) { 18058c3c52a8STejun Heo case 0: 18068c3c52a8STejun Heo new_mask |= 1 << i; 18078c3c52a8STejun Heo break; 18088c3c52a8STejun Heo case -ENOENT: 180955a8e2c8STejun Heo /* IDENTIFY was issued to non-existent 181055a8e2c8STejun Heo * device. No need to reset. Just 181155a8e2c8STejun Heo * thaw and kill the device. 181255a8e2c8STejun Heo */ 181355a8e2c8STejun Heo ata_eh_thaw_port(ap); 181455a8e2c8STejun Heo dev->class = ATA_DEV_UNKNOWN; 1815c6fd2807SJeff Garzik break; 18168c3c52a8STejun Heo default: 18178c3c52a8STejun Heo dev->class = ATA_DEV_UNKNOWN; 18188c3c52a8STejun Heo goto err; 18198c3c52a8STejun Heo } 18208c3c52a8STejun Heo } 1821c6fd2807SJeff Garzik } 1822c6fd2807SJeff Garzik 1823c1c4e8d5STejun Heo /* PDIAG- should have been released, ask cable type if post-reset */ 1824c1c4e8d5STejun Heo if ((ehc->i.flags & ATA_EHI_DID_RESET) && ap->ops->cable_detect) 1825c1c4e8d5STejun Heo ap->cbl = ap->ops->cable_detect(ap); 1826c1c4e8d5STejun Heo 18278c3c52a8STejun Heo /* Configure new devices forward such that user doesn't see 18288c3c52a8STejun Heo * device detection messages backwards. 18298c3c52a8STejun Heo */ 18308c3c52a8STejun Heo for (i = 0; i < ATA_MAX_DEVICES; i++) { 18318c3c52a8STejun Heo dev = &ap->device[i]; 18328c3c52a8STejun Heo 18338c3c52a8STejun Heo if (!(new_mask & (1 << i))) 18348c3c52a8STejun Heo continue; 18358c3c52a8STejun Heo 18368c3c52a8STejun Heo ehc->i.flags |= ATA_EHI_PRINTINFO; 18378c3c52a8STejun Heo rc = ata_dev_configure(dev); 18388c3c52a8STejun Heo ehc->i.flags &= ~ATA_EHI_PRINTINFO; 18398c3c52a8STejun Heo if (rc) 18408c3c52a8STejun Heo goto err; 18418c3c52a8STejun Heo 1842c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1843c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 1844c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1845baa1e78aSTejun Heo 184655a8e2c8STejun Heo /* new device discovered, configure xfermode */ 1847baa1e78aSTejun Heo ehc->i.flags |= ATA_EHI_SETMODE; 1848c6fd2807SJeff Garzik } 1849c6fd2807SJeff Garzik 18508c3c52a8STejun Heo return 0; 18518c3c52a8STejun Heo 18528c3c52a8STejun Heo err: 1853c6fd2807SJeff Garzik *r_failed_dev = dev; 18548c3c52a8STejun Heo DPRINTK("EXIT rc=%d\n", rc); 1855c6fd2807SJeff Garzik return rc; 1856c6fd2807SJeff Garzik } 1857c6fd2807SJeff Garzik 1858c6fd2807SJeff Garzik static int ata_port_nr_enabled(struct ata_port *ap) 1859c6fd2807SJeff Garzik { 1860c6fd2807SJeff Garzik int i, cnt = 0; 1861c6fd2807SJeff Garzik 1862c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_DEVICES; i++) 1863c6fd2807SJeff Garzik if (ata_dev_enabled(&ap->device[i])) 1864c6fd2807SJeff Garzik cnt++; 1865c6fd2807SJeff Garzik return cnt; 1866c6fd2807SJeff Garzik } 1867c6fd2807SJeff Garzik 1868c6fd2807SJeff Garzik static int ata_port_nr_vacant(struct ata_port *ap) 1869c6fd2807SJeff Garzik { 1870c6fd2807SJeff Garzik int i, cnt = 0; 1871c6fd2807SJeff Garzik 1872c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_DEVICES; i++) 1873c6fd2807SJeff Garzik if (ap->device[i].class == ATA_DEV_UNKNOWN) 1874c6fd2807SJeff Garzik cnt++; 1875c6fd2807SJeff Garzik return cnt; 1876c6fd2807SJeff Garzik } 1877c6fd2807SJeff Garzik 1878c6fd2807SJeff Garzik static int ata_eh_skip_recovery(struct ata_port *ap) 1879c6fd2807SJeff Garzik { 1880c6fd2807SJeff Garzik struct ata_eh_context *ehc = &ap->eh_context; 1881c6fd2807SJeff Garzik int i; 1882c6fd2807SJeff Garzik 1883c6fd2807SJeff Garzik /* thaw frozen port, resume link and recover failed devices */ 1884c6fd2807SJeff Garzik if ((ap->pflags & ATA_PFLAG_FROZEN) || 1885c6fd2807SJeff Garzik (ehc->i.flags & ATA_EHI_RESUME_LINK) || ata_port_nr_enabled(ap)) 1886c6fd2807SJeff Garzik return 0; 1887c6fd2807SJeff Garzik 1888c6fd2807SJeff Garzik /* skip if class codes for all vacant slots are ATA_DEV_NONE */ 1889c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_DEVICES; i++) { 1890c6fd2807SJeff Garzik struct ata_device *dev = &ap->device[i]; 1891c6fd2807SJeff Garzik 1892c6fd2807SJeff Garzik if (dev->class == ATA_DEV_UNKNOWN && 1893c6fd2807SJeff Garzik ehc->classes[dev->devno] != ATA_DEV_NONE) 1894c6fd2807SJeff Garzik return 0; 1895c6fd2807SJeff Garzik } 1896c6fd2807SJeff Garzik 1897c6fd2807SJeff Garzik return 1; 1898c6fd2807SJeff Garzik } 1899c6fd2807SJeff Garzik 1900c6fd2807SJeff Garzik /** 1901c6fd2807SJeff Garzik * ata_eh_recover - recover host port after error 1902c6fd2807SJeff Garzik * @ap: host port to recover 1903c6fd2807SJeff Garzik * @prereset: prereset method (can be NULL) 1904c6fd2807SJeff Garzik * @softreset: softreset method (can be NULL) 1905c6fd2807SJeff Garzik * @hardreset: hardreset method (can be NULL) 1906c6fd2807SJeff Garzik * @postreset: postreset method (can be NULL) 1907c6fd2807SJeff Garzik * 1908c6fd2807SJeff Garzik * This is the alpha and omega, eum and yang, heart and soul of 1909c6fd2807SJeff Garzik * libata exception handling. On entry, actions required to 1910c6fd2807SJeff Garzik * recover the port and hotplug requests are recorded in 1911c6fd2807SJeff Garzik * eh_context. This function executes all the operations with 1912c6fd2807SJeff Garzik * appropriate retrials and fallbacks to resurrect failed 1913c6fd2807SJeff Garzik * devices, detach goners and greet newcomers. 1914c6fd2807SJeff Garzik * 1915c6fd2807SJeff Garzik * LOCKING: 1916c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1917c6fd2807SJeff Garzik * 1918c6fd2807SJeff Garzik * RETURNS: 1919c6fd2807SJeff Garzik * 0 on success, -errno on failure. 1920c6fd2807SJeff Garzik */ 1921c6fd2807SJeff Garzik static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, 1922c6fd2807SJeff Garzik ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 1923c6fd2807SJeff Garzik ata_postreset_fn_t postreset) 1924c6fd2807SJeff Garzik { 1925c6fd2807SJeff Garzik struct ata_eh_context *ehc = &ap->eh_context; 1926c6fd2807SJeff Garzik struct ata_device *dev; 19274ae72a1eSTejun Heo int i, rc; 1928c6fd2807SJeff Garzik 1929c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 1930c6fd2807SJeff Garzik 1931c6fd2807SJeff Garzik /* prep for recovery */ 1932c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_DEVICES; i++) { 1933c6fd2807SJeff Garzik dev = &ap->device[i]; 1934c6fd2807SJeff Garzik 1935c6fd2807SJeff Garzik ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 1936c6fd2807SJeff Garzik 193779a55b72STejun Heo /* collect port action mask recorded in dev actions */ 193879a55b72STejun Heo ehc->i.action |= ehc->i.dev_action[i] & ~ATA_EH_PERDEV_MASK; 193979a55b72STejun Heo ehc->i.dev_action[i] &= ATA_EH_PERDEV_MASK; 194079a55b72STejun Heo 1941c6fd2807SJeff Garzik /* process hotplug request */ 1942c6fd2807SJeff Garzik if (dev->flags & ATA_DFLAG_DETACH) 1943c6fd2807SJeff Garzik ata_eh_detach_dev(dev); 1944c6fd2807SJeff Garzik 1945c6fd2807SJeff Garzik if (!ata_dev_enabled(dev) && 1946c6fd2807SJeff Garzik ((ehc->i.probe_mask & (1 << dev->devno)) && 1947c6fd2807SJeff Garzik !(ehc->did_probe_mask & (1 << dev->devno)))) { 1948c6fd2807SJeff Garzik ata_eh_detach_dev(dev); 1949c6fd2807SJeff Garzik ata_dev_init(dev); 1950c6fd2807SJeff Garzik ehc->did_probe_mask |= (1 << dev->devno); 1951c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_SOFTRESET; 1952c6fd2807SJeff Garzik } 1953c6fd2807SJeff Garzik } 1954c6fd2807SJeff Garzik 1955c6fd2807SJeff Garzik retry: 1956c6fd2807SJeff Garzik rc = 0; 1957c6fd2807SJeff Garzik 1958c6fd2807SJeff Garzik /* if UNLOADING, finish immediately */ 1959c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_UNLOADING) 1960c6fd2807SJeff Garzik goto out; 1961c6fd2807SJeff Garzik 1962c6fd2807SJeff Garzik /* skip EH if possible. */ 1963c6fd2807SJeff Garzik if (ata_eh_skip_recovery(ap)) 1964c6fd2807SJeff Garzik ehc->i.action = 0; 1965c6fd2807SJeff Garzik 1966c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_DEVICES; i++) 1967c6fd2807SJeff Garzik ehc->classes[i] = ATA_DEV_UNKNOWN; 1968c6fd2807SJeff Garzik 1969c6fd2807SJeff Garzik /* reset */ 1970c6fd2807SJeff Garzik if (ehc->i.action & ATA_EH_RESET_MASK) { 1971c6fd2807SJeff Garzik ata_eh_freeze_port(ap); 1972c6fd2807SJeff Garzik 1973c6fd2807SJeff Garzik rc = ata_eh_reset(ap, ata_port_nr_vacant(ap), prereset, 1974c6fd2807SJeff Garzik softreset, hardreset, postreset); 1975c6fd2807SJeff Garzik if (rc) { 1976c6fd2807SJeff Garzik ata_port_printk(ap, KERN_ERR, 1977c6fd2807SJeff Garzik "reset failed, giving up\n"); 1978c6fd2807SJeff Garzik goto out; 1979c6fd2807SJeff Garzik } 1980c6fd2807SJeff Garzik 1981c6fd2807SJeff Garzik ata_eh_thaw_port(ap); 1982c6fd2807SJeff Garzik } 1983c6fd2807SJeff Garzik 1984c6fd2807SJeff Garzik /* revalidate existing devices and attach new ones */ 1985c6fd2807SJeff Garzik rc = ata_eh_revalidate_and_attach(ap, &dev); 1986c6fd2807SJeff Garzik if (rc) 1987c6fd2807SJeff Garzik goto dev_fail; 1988c6fd2807SJeff Garzik 1989baa1e78aSTejun Heo /* configure transfer mode if necessary */ 1990baa1e78aSTejun Heo if (ehc->i.flags & ATA_EHI_SETMODE) { 1991c6fd2807SJeff Garzik rc = ata_set_mode(ap, &dev); 19924ae72a1eSTejun Heo if (rc) 1993c6fd2807SJeff Garzik goto dev_fail; 1994baa1e78aSTejun Heo ehc->i.flags &= ~ATA_EHI_SETMODE; 1995c6fd2807SJeff Garzik } 1996c6fd2807SJeff Garzik 1997c6fd2807SJeff Garzik goto out; 1998c6fd2807SJeff Garzik 1999c6fd2807SJeff Garzik dev_fail: 20004ae72a1eSTejun Heo ehc->tries[dev->devno]--; 20014ae72a1eSTejun Heo 2002c6fd2807SJeff Garzik switch (rc) { 20034ae72a1eSTejun Heo case -ENODEV: 20044ae72a1eSTejun Heo /* device missing or wrong IDENTIFY data, schedule probing */ 20054ae72a1eSTejun Heo ehc->i.probe_mask |= (1 << dev->devno); 20068575b814STejun Heo case -EINVAL: 20074ae72a1eSTejun Heo /* give it just one more chance */ 20084ae72a1eSTejun Heo ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1); 2009c6fd2807SJeff Garzik case -EIO: 20104ae72a1eSTejun Heo if (ehc->tries[dev->devno] == 1) { 20114ae72a1eSTejun Heo /* This is the last chance, better to slow 20124ae72a1eSTejun Heo * down than lose it. 20134ae72a1eSTejun Heo */ 2014c6fd2807SJeff Garzik sata_down_spd_limit(ap); 20154ae72a1eSTejun Heo ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); 20164ae72a1eSTejun Heo } 2017c6fd2807SJeff Garzik } 2018c6fd2807SJeff Garzik 2019c6fd2807SJeff Garzik if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) { 2020c6fd2807SJeff Garzik /* disable device if it has used up all its chances */ 2021c6fd2807SJeff Garzik ata_dev_disable(dev); 2022c6fd2807SJeff Garzik 2023c6fd2807SJeff Garzik /* detach if offline */ 2024c6fd2807SJeff Garzik if (ata_port_offline(ap)) 2025c6fd2807SJeff Garzik ata_eh_detach_dev(dev); 2026c6fd2807SJeff Garzik 2027c6fd2807SJeff Garzik /* probe if requested */ 2028c6fd2807SJeff Garzik if ((ehc->i.probe_mask & (1 << dev->devno)) && 2029c6fd2807SJeff Garzik !(ehc->did_probe_mask & (1 << dev->devno))) { 2030c6fd2807SJeff Garzik ata_eh_detach_dev(dev); 2031c6fd2807SJeff Garzik ata_dev_init(dev); 2032c6fd2807SJeff Garzik 2033c6fd2807SJeff Garzik ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 2034c6fd2807SJeff Garzik ehc->did_probe_mask |= (1 << dev->devno); 2035c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_SOFTRESET; 2036c6fd2807SJeff Garzik } 2037c6fd2807SJeff Garzik } else { 2038c6fd2807SJeff Garzik /* soft didn't work? be haaaaard */ 2039c6fd2807SJeff Garzik if (ehc->i.flags & ATA_EHI_DID_RESET) 2040c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_HARDRESET; 2041c6fd2807SJeff Garzik else 2042c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_SOFTRESET; 2043c6fd2807SJeff Garzik } 2044c6fd2807SJeff Garzik 2045c6fd2807SJeff Garzik if (ata_port_nr_enabled(ap)) { 2046c6fd2807SJeff Garzik ata_port_printk(ap, KERN_WARNING, "failed to recover some " 2047c6fd2807SJeff Garzik "devices, retrying in 5 secs\n"); 2048c6fd2807SJeff Garzik ssleep(5); 2049c6fd2807SJeff Garzik } else { 2050c6fd2807SJeff Garzik /* no device left, repeat fast */ 2051c6fd2807SJeff Garzik msleep(500); 2052c6fd2807SJeff Garzik } 2053c6fd2807SJeff Garzik 2054c6fd2807SJeff Garzik goto retry; 2055c6fd2807SJeff Garzik 2056c6fd2807SJeff Garzik out: 2057c6fd2807SJeff Garzik if (rc) { 2058c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_DEVICES; i++) 2059c6fd2807SJeff Garzik ata_dev_disable(&ap->device[i]); 2060c6fd2807SJeff Garzik } 2061c6fd2807SJeff Garzik 2062c6fd2807SJeff Garzik DPRINTK("EXIT, rc=%d\n", rc); 2063c6fd2807SJeff Garzik return rc; 2064c6fd2807SJeff Garzik } 2065c6fd2807SJeff Garzik 2066c6fd2807SJeff Garzik /** 2067c6fd2807SJeff Garzik * ata_eh_finish - finish up EH 2068c6fd2807SJeff Garzik * @ap: host port to finish EH for 2069c6fd2807SJeff Garzik * 2070c6fd2807SJeff Garzik * Recovery is complete. Clean up EH states and retry or finish 2071c6fd2807SJeff Garzik * failed qcs. 2072c6fd2807SJeff Garzik * 2073c6fd2807SJeff Garzik * LOCKING: 2074c6fd2807SJeff Garzik * None. 2075c6fd2807SJeff Garzik */ 2076c6fd2807SJeff Garzik static void ata_eh_finish(struct ata_port *ap) 2077c6fd2807SJeff Garzik { 2078c6fd2807SJeff Garzik int tag; 2079c6fd2807SJeff Garzik 2080c6fd2807SJeff Garzik /* retry or finish qcs */ 2081c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2082c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 2083c6fd2807SJeff Garzik 2084c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) 2085c6fd2807SJeff Garzik continue; 2086c6fd2807SJeff Garzik 2087c6fd2807SJeff Garzik if (qc->err_mask) { 2088c6fd2807SJeff Garzik /* FIXME: Once EH migration is complete, 2089c6fd2807SJeff Garzik * generate sense data in this function, 2090c6fd2807SJeff Garzik * considering both err_mask and tf. 2091c6fd2807SJeff Garzik */ 2092c6fd2807SJeff Garzik if (qc->err_mask & AC_ERR_INVALID) 2093c6fd2807SJeff Garzik ata_eh_qc_complete(qc); 2094c6fd2807SJeff Garzik else 2095c6fd2807SJeff Garzik ata_eh_qc_retry(qc); 2096c6fd2807SJeff Garzik } else { 2097c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_SENSE_VALID) { 2098c6fd2807SJeff Garzik ata_eh_qc_complete(qc); 2099c6fd2807SJeff Garzik } else { 2100c6fd2807SJeff Garzik /* feed zero TF to sense generation */ 2101c6fd2807SJeff Garzik memset(&qc->result_tf, 0, sizeof(qc->result_tf)); 2102c6fd2807SJeff Garzik ata_eh_qc_retry(qc); 2103c6fd2807SJeff Garzik } 2104c6fd2807SJeff Garzik } 2105c6fd2807SJeff Garzik } 2106c6fd2807SJeff Garzik } 2107c6fd2807SJeff Garzik 2108c6fd2807SJeff Garzik /** 2109c6fd2807SJeff Garzik * ata_do_eh - do standard error handling 2110c6fd2807SJeff Garzik * @ap: host port to handle error for 2111c6fd2807SJeff Garzik * @prereset: prereset method (can be NULL) 2112c6fd2807SJeff Garzik * @softreset: softreset method (can be NULL) 2113c6fd2807SJeff Garzik * @hardreset: hardreset method (can be NULL) 2114c6fd2807SJeff Garzik * @postreset: postreset method (can be NULL) 2115c6fd2807SJeff Garzik * 2116c6fd2807SJeff Garzik * Perform standard error handling sequence. 2117c6fd2807SJeff Garzik * 2118c6fd2807SJeff Garzik * LOCKING: 2119c6fd2807SJeff Garzik * Kernel thread context (may sleep). 2120c6fd2807SJeff Garzik */ 2121c6fd2807SJeff Garzik void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, 2122c6fd2807SJeff Garzik ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 2123c6fd2807SJeff Garzik ata_postreset_fn_t postreset) 2124c6fd2807SJeff Garzik { 2125c6fd2807SJeff Garzik ata_eh_autopsy(ap); 2126c6fd2807SJeff Garzik ata_eh_report(ap); 2127c6fd2807SJeff Garzik ata_eh_recover(ap, prereset, softreset, hardreset, postreset); 2128c6fd2807SJeff Garzik ata_eh_finish(ap); 2129c6fd2807SJeff Garzik } 2130c6fd2807SJeff Garzik 21316ffa01d8STejun Heo #ifdef CONFIG_PM 2132c6fd2807SJeff Garzik /** 2133c6fd2807SJeff Garzik * ata_eh_handle_port_suspend - perform port suspend operation 2134c6fd2807SJeff Garzik * @ap: port to suspend 2135c6fd2807SJeff Garzik * 2136c6fd2807SJeff Garzik * Suspend @ap. 2137c6fd2807SJeff Garzik * 2138c6fd2807SJeff Garzik * LOCKING: 2139c6fd2807SJeff Garzik * Kernel thread context (may sleep). 2140c6fd2807SJeff Garzik */ 2141c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap) 2142c6fd2807SJeff Garzik { 2143c6fd2807SJeff Garzik unsigned long flags; 2144c6fd2807SJeff Garzik int rc = 0; 2145c6fd2807SJeff Garzik 2146c6fd2807SJeff Garzik /* are we suspending? */ 2147c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 2148c6fd2807SJeff Garzik if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || 2149c6fd2807SJeff Garzik ap->pm_mesg.event == PM_EVENT_ON) { 2150c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 2151c6fd2807SJeff Garzik return; 2152c6fd2807SJeff Garzik } 2153c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 2154c6fd2807SJeff Garzik 2155c6fd2807SJeff Garzik WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED); 2156c6fd2807SJeff Garzik 2157*64578a3dSTejun Heo /* tell ACPI we're suspending */ 2158*64578a3dSTejun Heo rc = ata_acpi_on_suspend(ap); 2159*64578a3dSTejun Heo if (rc) 2160*64578a3dSTejun Heo goto out; 2161*64578a3dSTejun Heo 2162c6fd2807SJeff Garzik /* suspend */ 2163c6fd2807SJeff Garzik ata_eh_freeze_port(ap); 2164c6fd2807SJeff Garzik 2165c6fd2807SJeff Garzik if (ap->ops->port_suspend) 2166c6fd2807SJeff Garzik rc = ap->ops->port_suspend(ap, ap->pm_mesg); 2167c6fd2807SJeff Garzik 2168*64578a3dSTejun Heo out: 2169c6fd2807SJeff Garzik /* report result */ 2170c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 2171c6fd2807SJeff Garzik 2172c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_PM_PENDING; 2173c6fd2807SJeff Garzik if (rc == 0) 2174c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SUSPENDED; 2175*64578a3dSTejun Heo else if (ap->pflags & ATA_PFLAG_FROZEN) 2176c6fd2807SJeff Garzik ata_port_schedule_eh(ap); 2177c6fd2807SJeff Garzik 2178c6fd2807SJeff Garzik if (ap->pm_result) { 2179c6fd2807SJeff Garzik *ap->pm_result = rc; 2180c6fd2807SJeff Garzik ap->pm_result = NULL; 2181c6fd2807SJeff Garzik } 2182c6fd2807SJeff Garzik 2183c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 2184c6fd2807SJeff Garzik 2185c6fd2807SJeff Garzik return; 2186c6fd2807SJeff Garzik } 2187c6fd2807SJeff Garzik 2188c6fd2807SJeff Garzik /** 2189c6fd2807SJeff Garzik * ata_eh_handle_port_resume - perform port resume operation 2190c6fd2807SJeff Garzik * @ap: port to resume 2191c6fd2807SJeff Garzik * 2192c6fd2807SJeff Garzik * Resume @ap. 2193c6fd2807SJeff Garzik * 2194c6fd2807SJeff Garzik * LOCKING: 2195c6fd2807SJeff Garzik * Kernel thread context (may sleep). 2196c6fd2807SJeff Garzik */ 2197c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap) 2198c6fd2807SJeff Garzik { 2199c6fd2807SJeff Garzik unsigned long flags; 22009666f400STejun Heo int rc = 0; 2201c6fd2807SJeff Garzik 2202c6fd2807SJeff Garzik /* are we resuming? */ 2203c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 2204c6fd2807SJeff Garzik if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || 2205c6fd2807SJeff Garzik ap->pm_mesg.event != PM_EVENT_ON) { 2206c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 2207c6fd2807SJeff Garzik return; 2208c6fd2807SJeff Garzik } 2209c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 2210c6fd2807SJeff Garzik 22119666f400STejun Heo WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED)); 2212c6fd2807SJeff Garzik 2213c6fd2807SJeff Garzik if (ap->ops->port_resume) 2214c6fd2807SJeff Garzik rc = ap->ops->port_resume(ap); 2215c6fd2807SJeff Garzik 22166746544cSTejun Heo /* tell ACPI that we're resuming */ 22176746544cSTejun Heo ata_acpi_on_resume(ap); 22186746544cSTejun Heo 22199666f400STejun Heo /* report result */ 2220c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 2221c6fd2807SJeff Garzik ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED); 2222c6fd2807SJeff Garzik if (ap->pm_result) { 2223c6fd2807SJeff Garzik *ap->pm_result = rc; 2224c6fd2807SJeff Garzik ap->pm_result = NULL; 2225c6fd2807SJeff Garzik } 2226c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 2227c6fd2807SJeff Garzik } 22286ffa01d8STejun Heo #endif /* CONFIG_PM */ 2229