1c6fd2807SJeff Garzik /* 2c6fd2807SJeff Garzik * libata-eh.c - libata error handling 3c6fd2807SJeff Garzik * 4c6fd2807SJeff Garzik * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5c6fd2807SJeff Garzik * Please ALWAYS copy linux-ide@vger.kernel.org 6c6fd2807SJeff Garzik * on emails. 7c6fd2807SJeff Garzik * 8c6fd2807SJeff Garzik * Copyright 2006 Tejun Heo <htejun@gmail.com> 9c6fd2807SJeff Garzik * 10c6fd2807SJeff Garzik * 11c6fd2807SJeff Garzik * This program is free software; you can redistribute it and/or 12c6fd2807SJeff Garzik * modify it under the terms of the GNU General Public License as 13c6fd2807SJeff Garzik * published by the Free Software Foundation; either version 2, or 14c6fd2807SJeff Garzik * (at your option) any later version. 15c6fd2807SJeff Garzik * 16c6fd2807SJeff Garzik * This program is distributed in the hope that it will be useful, 17c6fd2807SJeff Garzik * but WITHOUT ANY WARRANTY; without even the implied warranty of 18c6fd2807SJeff Garzik * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19c6fd2807SJeff Garzik * General Public License for more details. 20c6fd2807SJeff Garzik * 21c6fd2807SJeff Garzik * You should have received a copy of the GNU General Public License 22c6fd2807SJeff Garzik * along with this program; see the file COPYING. If not, write to 23c6fd2807SJeff Garzik * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, 24c6fd2807SJeff Garzik * USA. 25c6fd2807SJeff Garzik * 26c6fd2807SJeff Garzik * 27c6fd2807SJeff Garzik * libata documentation is available via 'make {ps|pdf}docs', 28c6fd2807SJeff Garzik * as Documentation/DocBook/libata.* 29c6fd2807SJeff Garzik * 30c6fd2807SJeff Garzik * Hardware documentation available from http://www.t13.org/ and 31c6fd2807SJeff Garzik * http://www.sata-io.org/ 32c6fd2807SJeff Garzik * 33c6fd2807SJeff Garzik */ 34c6fd2807SJeff Garzik 35c6fd2807SJeff Garzik #include <linux/kernel.h> 36c6fd2807SJeff Garzik #include <scsi/scsi.h> 37c6fd2807SJeff Garzik #include <scsi/scsi_host.h> 38c6fd2807SJeff Garzik #include <scsi/scsi_eh.h> 39c6fd2807SJeff Garzik #include <scsi/scsi_device.h> 40c6fd2807SJeff Garzik #include <scsi/scsi_cmnd.h> 41c6fd2807SJeff Garzik #include "../scsi/scsi_transport_api.h" 42c6fd2807SJeff Garzik 43c6fd2807SJeff Garzik #include <linux/libata.h> 44c6fd2807SJeff Garzik 45c6fd2807SJeff Garzik #include "libata.h" 46c6fd2807SJeff Garzik 47*7d47e8d4STejun Heo enum { 48*7d47e8d4STejun Heo ATA_EH_SPDN_NCQ_OFF = (1 << 0), 49*7d47e8d4STejun Heo ATA_EH_SPDN_SPEED_DOWN = (1 << 1), 50*7d47e8d4STejun Heo ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2), 51*7d47e8d4STejun Heo }; 52*7d47e8d4STejun Heo 53c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap); 54c6fd2807SJeff Garzik static void ata_eh_finish(struct ata_port *ap); 55c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap); 56c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap); 57c6fd2807SJeff Garzik 58c6fd2807SJeff Garzik static void ata_ering_record(struct ata_ering *ering, int is_io, 59c6fd2807SJeff Garzik unsigned int err_mask) 60c6fd2807SJeff Garzik { 61c6fd2807SJeff Garzik struct ata_ering_entry *ent; 62c6fd2807SJeff Garzik 63c6fd2807SJeff Garzik WARN_ON(!err_mask); 64c6fd2807SJeff Garzik 65c6fd2807SJeff Garzik ering->cursor++; 66c6fd2807SJeff Garzik ering->cursor %= ATA_ERING_SIZE; 67c6fd2807SJeff Garzik 68c6fd2807SJeff Garzik ent = &ering->ring[ering->cursor]; 69c6fd2807SJeff Garzik ent->is_io = is_io; 70c6fd2807SJeff Garzik ent->err_mask = err_mask; 71c6fd2807SJeff Garzik ent->timestamp = get_jiffies_64(); 72c6fd2807SJeff Garzik } 73c6fd2807SJeff Garzik 74*7d47e8d4STejun Heo static void ata_ering_clear(struct ata_ering *ering) 75c6fd2807SJeff Garzik { 76*7d47e8d4STejun Heo memset(ering, 0, sizeof(*ering)); 77c6fd2807SJeff Garzik } 78c6fd2807SJeff Garzik 79c6fd2807SJeff Garzik static int ata_ering_map(struct ata_ering *ering, 80c6fd2807SJeff Garzik int (*map_fn)(struct ata_ering_entry *, void *), 81c6fd2807SJeff Garzik void *arg) 82c6fd2807SJeff Garzik { 83c6fd2807SJeff Garzik int idx, rc = 0; 84c6fd2807SJeff Garzik struct ata_ering_entry *ent; 85c6fd2807SJeff Garzik 86c6fd2807SJeff Garzik idx = ering->cursor; 87c6fd2807SJeff Garzik do { 88c6fd2807SJeff Garzik ent = &ering->ring[idx]; 89c6fd2807SJeff Garzik if (!ent->err_mask) 90c6fd2807SJeff Garzik break; 91c6fd2807SJeff Garzik rc = map_fn(ent, arg); 92c6fd2807SJeff Garzik if (rc) 93c6fd2807SJeff Garzik break; 94c6fd2807SJeff Garzik idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE; 95c6fd2807SJeff Garzik } while (idx != ering->cursor); 96c6fd2807SJeff Garzik 97c6fd2807SJeff Garzik return rc; 98c6fd2807SJeff Garzik } 99c6fd2807SJeff Garzik 100c6fd2807SJeff Garzik static unsigned int ata_eh_dev_action(struct ata_device *dev) 101c6fd2807SJeff Garzik { 102c6fd2807SJeff Garzik struct ata_eh_context *ehc = &dev->ap->eh_context; 103c6fd2807SJeff Garzik 104c6fd2807SJeff Garzik return ehc->i.action | ehc->i.dev_action[dev->devno]; 105c6fd2807SJeff Garzik } 106c6fd2807SJeff Garzik 107c6fd2807SJeff Garzik static void ata_eh_clear_action(struct ata_device *dev, 108c6fd2807SJeff Garzik struct ata_eh_info *ehi, unsigned int action) 109c6fd2807SJeff Garzik { 110c6fd2807SJeff Garzik int i; 111c6fd2807SJeff Garzik 112c6fd2807SJeff Garzik if (!dev) { 113c6fd2807SJeff Garzik ehi->action &= ~action; 114c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_DEVICES; i++) 115c6fd2807SJeff Garzik ehi->dev_action[i] &= ~action; 116c6fd2807SJeff Garzik } else { 117c6fd2807SJeff Garzik /* doesn't make sense for port-wide EH actions */ 118c6fd2807SJeff Garzik WARN_ON(!(action & ATA_EH_PERDEV_MASK)); 119c6fd2807SJeff Garzik 120c6fd2807SJeff Garzik /* break ehi->action into ehi->dev_action */ 121c6fd2807SJeff Garzik if (ehi->action & action) { 122c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_DEVICES; i++) 123c6fd2807SJeff Garzik ehi->dev_action[i] |= ehi->action & action; 124c6fd2807SJeff Garzik ehi->action &= ~action; 125c6fd2807SJeff Garzik } 126c6fd2807SJeff Garzik 127c6fd2807SJeff Garzik /* turn off the specified per-dev action */ 128c6fd2807SJeff Garzik ehi->dev_action[dev->devno] &= ~action; 129c6fd2807SJeff Garzik } 130c6fd2807SJeff Garzik } 131c6fd2807SJeff Garzik 132c6fd2807SJeff Garzik /** 133c6fd2807SJeff Garzik * ata_scsi_timed_out - SCSI layer time out callback 134c6fd2807SJeff Garzik * @cmd: timed out SCSI command 135c6fd2807SJeff Garzik * 136c6fd2807SJeff Garzik * Handles SCSI layer timeout. We race with normal completion of 137c6fd2807SJeff Garzik * the qc for @cmd. If the qc is already gone, we lose and let 138c6fd2807SJeff Garzik * the scsi command finish (EH_HANDLED). Otherwise, the qc has 139c6fd2807SJeff Garzik * timed out and EH should be invoked. Prevent ata_qc_complete() 140c6fd2807SJeff Garzik * from finishing it by setting EH_SCHEDULED and return 141c6fd2807SJeff Garzik * EH_NOT_HANDLED. 142c6fd2807SJeff Garzik * 143c6fd2807SJeff Garzik * TODO: kill this function once old EH is gone. 144c6fd2807SJeff Garzik * 145c6fd2807SJeff Garzik * LOCKING: 146c6fd2807SJeff Garzik * Called from timer context 147c6fd2807SJeff Garzik * 148c6fd2807SJeff Garzik * RETURNS: 149c6fd2807SJeff Garzik * EH_HANDLED or EH_NOT_HANDLED 150c6fd2807SJeff Garzik */ 151c6fd2807SJeff Garzik enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) 152c6fd2807SJeff Garzik { 153c6fd2807SJeff Garzik struct Scsi_Host *host = cmd->device->host; 154c6fd2807SJeff Garzik struct ata_port *ap = ata_shost_to_port(host); 155c6fd2807SJeff Garzik unsigned long flags; 156c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 157c6fd2807SJeff Garzik enum scsi_eh_timer_return ret; 158c6fd2807SJeff Garzik 159c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 160c6fd2807SJeff Garzik 161c6fd2807SJeff Garzik if (ap->ops->error_handler) { 162c6fd2807SJeff Garzik ret = EH_NOT_HANDLED; 163c6fd2807SJeff Garzik goto out; 164c6fd2807SJeff Garzik } 165c6fd2807SJeff Garzik 166c6fd2807SJeff Garzik ret = EH_HANDLED; 167c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 168c6fd2807SJeff Garzik qc = ata_qc_from_tag(ap, ap->active_tag); 169c6fd2807SJeff Garzik if (qc) { 170c6fd2807SJeff Garzik WARN_ON(qc->scsicmd != cmd); 171c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_EH_SCHEDULED; 172c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_TIMEOUT; 173c6fd2807SJeff Garzik ret = EH_NOT_HANDLED; 174c6fd2807SJeff Garzik } 175c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 176c6fd2807SJeff Garzik 177c6fd2807SJeff Garzik out: 178c6fd2807SJeff Garzik DPRINTK("EXIT, ret=%d\n", ret); 179c6fd2807SJeff Garzik return ret; 180c6fd2807SJeff Garzik } 181c6fd2807SJeff Garzik 182c6fd2807SJeff Garzik /** 183c6fd2807SJeff Garzik * ata_scsi_error - SCSI layer error handler callback 184c6fd2807SJeff Garzik * @host: SCSI host on which error occurred 185c6fd2807SJeff Garzik * 186c6fd2807SJeff Garzik * Handles SCSI-layer-thrown error events. 187c6fd2807SJeff Garzik * 188c6fd2807SJeff Garzik * LOCKING: 189c6fd2807SJeff Garzik * Inherited from SCSI layer (none, can sleep) 190c6fd2807SJeff Garzik * 191c6fd2807SJeff Garzik * RETURNS: 192c6fd2807SJeff Garzik * Zero. 193c6fd2807SJeff Garzik */ 194c6fd2807SJeff Garzik void ata_scsi_error(struct Scsi_Host *host) 195c6fd2807SJeff Garzik { 196c6fd2807SJeff Garzik struct ata_port *ap = ata_shost_to_port(host); 197c6fd2807SJeff Garzik int i, repeat_cnt = ATA_EH_MAX_REPEAT; 198c6fd2807SJeff Garzik unsigned long flags; 199c6fd2807SJeff Garzik 200c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 201c6fd2807SJeff Garzik 202c6fd2807SJeff Garzik /* synchronize with port task */ 203c6fd2807SJeff Garzik ata_port_flush_task(ap); 204c6fd2807SJeff Garzik 205cca3974eSJeff Garzik /* synchronize with host lock and sort out timeouts */ 206c6fd2807SJeff Garzik 207c6fd2807SJeff Garzik /* For new EH, all qcs are finished in one of three ways - 208c6fd2807SJeff Garzik * normal completion, error completion, and SCSI timeout. 209c6fd2807SJeff Garzik * Both cmpletions can race against SCSI timeout. When normal 210c6fd2807SJeff Garzik * completion wins, the qc never reaches EH. When error 211c6fd2807SJeff Garzik * completion wins, the qc has ATA_QCFLAG_FAILED set. 212c6fd2807SJeff Garzik * 213c6fd2807SJeff Garzik * When SCSI timeout wins, things are a bit more complex. 214c6fd2807SJeff Garzik * Normal or error completion can occur after the timeout but 215c6fd2807SJeff Garzik * before this point. In such cases, both types of 216c6fd2807SJeff Garzik * completions are honored. A scmd is determined to have 217c6fd2807SJeff Garzik * timed out iff its associated qc is active and not failed. 218c6fd2807SJeff Garzik */ 219c6fd2807SJeff Garzik if (ap->ops->error_handler) { 220c6fd2807SJeff Garzik struct scsi_cmnd *scmd, *tmp; 221c6fd2807SJeff Garzik int nr_timedout = 0; 222c6fd2807SJeff Garzik 223c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 224c6fd2807SJeff Garzik 225c6fd2807SJeff Garzik list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) { 226c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 227c6fd2807SJeff Garzik 228c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_QUEUE; i++) { 229c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, i); 230c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_ACTIVE && 231c6fd2807SJeff Garzik qc->scsicmd == scmd) 232c6fd2807SJeff Garzik break; 233c6fd2807SJeff Garzik } 234c6fd2807SJeff Garzik 235c6fd2807SJeff Garzik if (i < ATA_MAX_QUEUE) { 236c6fd2807SJeff Garzik /* the scmd has an associated qc */ 237c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) { 238c6fd2807SJeff Garzik /* which hasn't failed yet, timeout */ 239c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_TIMEOUT; 240c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 241c6fd2807SJeff Garzik nr_timedout++; 242c6fd2807SJeff Garzik } 243c6fd2807SJeff Garzik } else { 244c6fd2807SJeff Garzik /* Normal completion occurred after 245c6fd2807SJeff Garzik * SCSI timeout but before this point. 246c6fd2807SJeff Garzik * Successfully complete it. 247c6fd2807SJeff Garzik */ 248c6fd2807SJeff Garzik scmd->retries = scmd->allowed; 249c6fd2807SJeff Garzik scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 250c6fd2807SJeff Garzik } 251c6fd2807SJeff Garzik } 252c6fd2807SJeff Garzik 253c6fd2807SJeff Garzik /* If we have timed out qcs. They belong to EH from 254c6fd2807SJeff Garzik * this point but the state of the controller is 255c6fd2807SJeff Garzik * unknown. Freeze the port to make sure the IRQ 256c6fd2807SJeff Garzik * handler doesn't diddle with those qcs. This must 257c6fd2807SJeff Garzik * be done atomically w.r.t. setting QCFLAG_FAILED. 258c6fd2807SJeff Garzik */ 259c6fd2807SJeff Garzik if (nr_timedout) 260c6fd2807SJeff Garzik __ata_port_freeze(ap); 261c6fd2807SJeff Garzik 262c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 263c6fd2807SJeff Garzik } else 264c6fd2807SJeff Garzik spin_unlock_wait(ap->lock); 265c6fd2807SJeff Garzik 266c6fd2807SJeff Garzik repeat: 267c6fd2807SJeff Garzik /* invoke error handler */ 268c6fd2807SJeff Garzik if (ap->ops->error_handler) { 269c6fd2807SJeff Garzik /* process port resume request */ 270c6fd2807SJeff Garzik ata_eh_handle_port_resume(ap); 271c6fd2807SJeff Garzik 272c6fd2807SJeff Garzik /* fetch & clear EH info */ 273c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 274c6fd2807SJeff Garzik 275c6fd2807SJeff Garzik memset(&ap->eh_context, 0, sizeof(ap->eh_context)); 276c6fd2807SJeff Garzik ap->eh_context.i = ap->eh_info; 277c6fd2807SJeff Garzik memset(&ap->eh_info, 0, sizeof(ap->eh_info)); 278c6fd2807SJeff Garzik 279c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS; 280c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_EH_PENDING; 281c6fd2807SJeff Garzik 282c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 283c6fd2807SJeff Garzik 284c6fd2807SJeff Garzik /* invoke EH, skip if unloading or suspended */ 285c6fd2807SJeff Garzik if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED))) 286c6fd2807SJeff Garzik ap->ops->error_handler(ap); 287c6fd2807SJeff Garzik else 288c6fd2807SJeff Garzik ata_eh_finish(ap); 289c6fd2807SJeff Garzik 290c6fd2807SJeff Garzik /* process port suspend request */ 291c6fd2807SJeff Garzik ata_eh_handle_port_suspend(ap); 292c6fd2807SJeff Garzik 293c6fd2807SJeff Garzik /* Exception might have happend after ->error_handler 294c6fd2807SJeff Garzik * recovered the port but before this point. Repeat 295c6fd2807SJeff Garzik * EH in such case. 296c6fd2807SJeff Garzik */ 297c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 298c6fd2807SJeff Garzik 299c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_EH_PENDING) { 300c6fd2807SJeff Garzik if (--repeat_cnt) { 301c6fd2807SJeff Garzik ata_port_printk(ap, KERN_INFO, 302c6fd2807SJeff Garzik "EH pending after completion, " 303c6fd2807SJeff Garzik "repeating EH (cnt=%d)\n", repeat_cnt); 304c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 305c6fd2807SJeff Garzik goto repeat; 306c6fd2807SJeff Garzik } 307c6fd2807SJeff Garzik ata_port_printk(ap, KERN_ERR, "EH pending after %d " 308c6fd2807SJeff Garzik "tries, giving up\n", ATA_EH_MAX_REPEAT); 309c6fd2807SJeff Garzik } 310c6fd2807SJeff Garzik 311c6fd2807SJeff Garzik /* this run is complete, make sure EH info is clear */ 312c6fd2807SJeff Garzik memset(&ap->eh_info, 0, sizeof(ap->eh_info)); 313c6fd2807SJeff Garzik 314c6fd2807SJeff Garzik /* Clear host_eh_scheduled while holding ap->lock such 315c6fd2807SJeff Garzik * that if exception occurs after this point but 316c6fd2807SJeff Garzik * before EH completion, SCSI midlayer will 317c6fd2807SJeff Garzik * re-initiate EH. 318c6fd2807SJeff Garzik */ 319c6fd2807SJeff Garzik host->host_eh_scheduled = 0; 320c6fd2807SJeff Garzik 321c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 322c6fd2807SJeff Garzik } else { 323c6fd2807SJeff Garzik WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL); 324c6fd2807SJeff Garzik ap->ops->eng_timeout(ap); 325c6fd2807SJeff Garzik } 326c6fd2807SJeff Garzik 327c6fd2807SJeff Garzik /* finish or retry handled scmd's and clean up */ 328c6fd2807SJeff Garzik WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q)); 329c6fd2807SJeff Garzik 330c6fd2807SJeff Garzik scsi_eh_flush_done_q(&ap->eh_done_q); 331c6fd2807SJeff Garzik 332c6fd2807SJeff Garzik /* clean up */ 333c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 334c6fd2807SJeff Garzik 335c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_LOADING) 336c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_LOADING; 337c6fd2807SJeff Garzik else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) 33852bad64dSDavid Howells queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 0); 339c6fd2807SJeff Garzik 340c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_RECOVERED) 341c6fd2807SJeff Garzik ata_port_printk(ap, KERN_INFO, "EH complete\n"); 342c6fd2807SJeff Garzik 343c6fd2807SJeff Garzik ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED); 344c6fd2807SJeff Garzik 345c6fd2807SJeff Garzik /* tell wait_eh that we're done */ 346c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS; 347c6fd2807SJeff Garzik wake_up_all(&ap->eh_wait_q); 348c6fd2807SJeff Garzik 349c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 350c6fd2807SJeff Garzik 351c6fd2807SJeff Garzik DPRINTK("EXIT\n"); 352c6fd2807SJeff Garzik } 353c6fd2807SJeff Garzik 354c6fd2807SJeff Garzik /** 355c6fd2807SJeff Garzik * ata_port_wait_eh - Wait for the currently pending EH to complete 356c6fd2807SJeff Garzik * @ap: Port to wait EH for 357c6fd2807SJeff Garzik * 358c6fd2807SJeff Garzik * Wait until the currently pending EH is complete. 359c6fd2807SJeff Garzik * 360c6fd2807SJeff Garzik * LOCKING: 361c6fd2807SJeff Garzik * Kernel thread context (may sleep). 362c6fd2807SJeff Garzik */ 363c6fd2807SJeff Garzik void ata_port_wait_eh(struct ata_port *ap) 364c6fd2807SJeff Garzik { 365c6fd2807SJeff Garzik unsigned long flags; 366c6fd2807SJeff Garzik DEFINE_WAIT(wait); 367c6fd2807SJeff Garzik 368c6fd2807SJeff Garzik retry: 369c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 370c6fd2807SJeff Garzik 371c6fd2807SJeff Garzik while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) { 372c6fd2807SJeff Garzik prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE); 373c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 374c6fd2807SJeff Garzik schedule(); 375c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 376c6fd2807SJeff Garzik } 377c6fd2807SJeff Garzik finish_wait(&ap->eh_wait_q, &wait); 378c6fd2807SJeff Garzik 379c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 380c6fd2807SJeff Garzik 381c6fd2807SJeff Garzik /* make sure SCSI EH is complete */ 382cca3974eSJeff Garzik if (scsi_host_in_recovery(ap->scsi_host)) { 383c6fd2807SJeff Garzik msleep(10); 384c6fd2807SJeff Garzik goto retry; 385c6fd2807SJeff Garzik } 386c6fd2807SJeff Garzik } 387c6fd2807SJeff Garzik 388c6fd2807SJeff Garzik /** 389c6fd2807SJeff Garzik * ata_qc_timeout - Handle timeout of queued command 390c6fd2807SJeff Garzik * @qc: Command that timed out 391c6fd2807SJeff Garzik * 392c6fd2807SJeff Garzik * Some part of the kernel (currently, only the SCSI layer) 393c6fd2807SJeff Garzik * has noticed that the active command on port @ap has not 394c6fd2807SJeff Garzik * completed after a specified length of time. Handle this 395c6fd2807SJeff Garzik * condition by disabling DMA (if necessary) and completing 396c6fd2807SJeff Garzik * transactions, with error if necessary. 397c6fd2807SJeff Garzik * 398c6fd2807SJeff Garzik * This also handles the case of the "lost interrupt", where 399c6fd2807SJeff Garzik * for some reason (possibly hardware bug, possibly driver bug) 400c6fd2807SJeff Garzik * an interrupt was not delivered to the driver, even though the 401c6fd2807SJeff Garzik * transaction completed successfully. 402c6fd2807SJeff Garzik * 403c6fd2807SJeff Garzik * TODO: kill this function once old EH is gone. 404c6fd2807SJeff Garzik * 405c6fd2807SJeff Garzik * LOCKING: 406c6fd2807SJeff Garzik * Inherited from SCSI layer (none, can sleep) 407c6fd2807SJeff Garzik */ 408c6fd2807SJeff Garzik static void ata_qc_timeout(struct ata_queued_cmd *qc) 409c6fd2807SJeff Garzik { 410c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 411c6fd2807SJeff Garzik u8 host_stat = 0, drv_stat; 412c6fd2807SJeff Garzik unsigned long flags; 413c6fd2807SJeff Garzik 414c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 415c6fd2807SJeff Garzik 416c6fd2807SJeff Garzik ap->hsm_task_state = HSM_ST_IDLE; 417c6fd2807SJeff Garzik 418c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 419c6fd2807SJeff Garzik 420c6fd2807SJeff Garzik switch (qc->tf.protocol) { 421c6fd2807SJeff Garzik 422c6fd2807SJeff Garzik case ATA_PROT_DMA: 423c6fd2807SJeff Garzik case ATA_PROT_ATAPI_DMA: 424c6fd2807SJeff Garzik host_stat = ap->ops->bmdma_status(ap); 425c6fd2807SJeff Garzik 426c6fd2807SJeff Garzik /* before we do anything else, clear DMA-Start bit */ 427c6fd2807SJeff Garzik ap->ops->bmdma_stop(qc); 428c6fd2807SJeff Garzik 429c6fd2807SJeff Garzik /* fall through */ 430c6fd2807SJeff Garzik 431c6fd2807SJeff Garzik default: 432c6fd2807SJeff Garzik ata_altstatus(ap); 433c6fd2807SJeff Garzik drv_stat = ata_chk_status(ap); 434c6fd2807SJeff Garzik 435c6fd2807SJeff Garzik /* ack bmdma irq events */ 436c6fd2807SJeff Garzik ap->ops->irq_clear(ap); 437c6fd2807SJeff Garzik 438c6fd2807SJeff Garzik ata_dev_printk(qc->dev, KERN_ERR, "command 0x%x timeout, " 439c6fd2807SJeff Garzik "stat 0x%x host_stat 0x%x\n", 440c6fd2807SJeff Garzik qc->tf.command, drv_stat, host_stat); 441c6fd2807SJeff Garzik 442c6fd2807SJeff Garzik /* complete taskfile transaction */ 443c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_TIMEOUT; 444c6fd2807SJeff Garzik break; 445c6fd2807SJeff Garzik } 446c6fd2807SJeff Garzik 447c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 448c6fd2807SJeff Garzik 449c6fd2807SJeff Garzik ata_eh_qc_complete(qc); 450c6fd2807SJeff Garzik 451c6fd2807SJeff Garzik DPRINTK("EXIT\n"); 452c6fd2807SJeff Garzik } 453c6fd2807SJeff Garzik 454c6fd2807SJeff Garzik /** 455c6fd2807SJeff Garzik * ata_eng_timeout - Handle timeout of queued command 456c6fd2807SJeff Garzik * @ap: Port on which timed-out command is active 457c6fd2807SJeff Garzik * 458c6fd2807SJeff Garzik * Some part of the kernel (currently, only the SCSI layer) 459c6fd2807SJeff Garzik * has noticed that the active command on port @ap has not 460c6fd2807SJeff Garzik * completed after a specified length of time. Handle this 461c6fd2807SJeff Garzik * condition by disabling DMA (if necessary) and completing 462c6fd2807SJeff Garzik * transactions, with error if necessary. 463c6fd2807SJeff Garzik * 464c6fd2807SJeff Garzik * This also handles the case of the "lost interrupt", where 465c6fd2807SJeff Garzik * for some reason (possibly hardware bug, possibly driver bug) 466c6fd2807SJeff Garzik * an interrupt was not delivered to the driver, even though the 467c6fd2807SJeff Garzik * transaction completed successfully. 468c6fd2807SJeff Garzik * 469c6fd2807SJeff Garzik * TODO: kill this function once old EH is gone. 470c6fd2807SJeff Garzik * 471c6fd2807SJeff Garzik * LOCKING: 472c6fd2807SJeff Garzik * Inherited from SCSI layer (none, can sleep) 473c6fd2807SJeff Garzik */ 474c6fd2807SJeff Garzik void ata_eng_timeout(struct ata_port *ap) 475c6fd2807SJeff Garzik { 476c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 477c6fd2807SJeff Garzik 478c6fd2807SJeff Garzik ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag)); 479c6fd2807SJeff Garzik 480c6fd2807SJeff Garzik DPRINTK("EXIT\n"); 481c6fd2807SJeff Garzik } 482c6fd2807SJeff Garzik 483c6fd2807SJeff Garzik /** 484c6fd2807SJeff Garzik * ata_qc_schedule_eh - schedule qc for error handling 485c6fd2807SJeff Garzik * @qc: command to schedule error handling for 486c6fd2807SJeff Garzik * 487c6fd2807SJeff Garzik * Schedule error handling for @qc. EH will kick in as soon as 488c6fd2807SJeff Garzik * other commands are drained. 489c6fd2807SJeff Garzik * 490c6fd2807SJeff Garzik * LOCKING: 491cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 492c6fd2807SJeff Garzik */ 493c6fd2807SJeff Garzik void ata_qc_schedule_eh(struct ata_queued_cmd *qc) 494c6fd2807SJeff Garzik { 495c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 496c6fd2807SJeff Garzik 497c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 498c6fd2807SJeff Garzik 499c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 500c6fd2807SJeff Garzik qc->ap->pflags |= ATA_PFLAG_EH_PENDING; 501c6fd2807SJeff Garzik 502c6fd2807SJeff Garzik /* The following will fail if timeout has already expired. 503c6fd2807SJeff Garzik * ata_scsi_error() takes care of such scmds on EH entry. 504c6fd2807SJeff Garzik * Note that ATA_QCFLAG_FAILED is unconditionally set after 505c6fd2807SJeff Garzik * this function completes. 506c6fd2807SJeff Garzik */ 507c6fd2807SJeff Garzik scsi_req_abort_cmd(qc->scsicmd); 508c6fd2807SJeff Garzik } 509c6fd2807SJeff Garzik 510c6fd2807SJeff Garzik /** 511c6fd2807SJeff Garzik * ata_port_schedule_eh - schedule error handling without a qc 512c6fd2807SJeff Garzik * @ap: ATA port to schedule EH for 513c6fd2807SJeff Garzik * 514c6fd2807SJeff Garzik * Schedule error handling for @ap. EH will kick in as soon as 515c6fd2807SJeff Garzik * all commands are drained. 516c6fd2807SJeff Garzik * 517c6fd2807SJeff Garzik * LOCKING: 518cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 519c6fd2807SJeff Garzik */ 520c6fd2807SJeff Garzik void ata_port_schedule_eh(struct ata_port *ap) 521c6fd2807SJeff Garzik { 522c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 523c6fd2807SJeff Garzik 524c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_EH_PENDING; 525cca3974eSJeff Garzik scsi_schedule_eh(ap->scsi_host); 526c6fd2807SJeff Garzik 527c6fd2807SJeff Garzik DPRINTK("port EH scheduled\n"); 528c6fd2807SJeff Garzik } 529c6fd2807SJeff Garzik 530c6fd2807SJeff Garzik /** 531c6fd2807SJeff Garzik * ata_port_abort - abort all qc's on the port 532c6fd2807SJeff Garzik * @ap: ATA port to abort qc's for 533c6fd2807SJeff Garzik * 534c6fd2807SJeff Garzik * Abort all active qc's of @ap and schedule EH. 535c6fd2807SJeff Garzik * 536c6fd2807SJeff Garzik * LOCKING: 537cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 538c6fd2807SJeff Garzik * 539c6fd2807SJeff Garzik * RETURNS: 540c6fd2807SJeff Garzik * Number of aborted qc's. 541c6fd2807SJeff Garzik */ 542c6fd2807SJeff Garzik int ata_port_abort(struct ata_port *ap) 543c6fd2807SJeff Garzik { 544c6fd2807SJeff Garzik int tag, nr_aborted = 0; 545c6fd2807SJeff Garzik 546c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 547c6fd2807SJeff Garzik 548c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 549c6fd2807SJeff Garzik struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); 550c6fd2807SJeff Garzik 551c6fd2807SJeff Garzik if (qc) { 552c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_FAILED; 553c6fd2807SJeff Garzik ata_qc_complete(qc); 554c6fd2807SJeff Garzik nr_aborted++; 555c6fd2807SJeff Garzik } 556c6fd2807SJeff Garzik } 557c6fd2807SJeff Garzik 558c6fd2807SJeff Garzik if (!nr_aborted) 559c6fd2807SJeff Garzik ata_port_schedule_eh(ap); 560c6fd2807SJeff Garzik 561c6fd2807SJeff Garzik return nr_aborted; 562c6fd2807SJeff Garzik } 563c6fd2807SJeff Garzik 564c6fd2807SJeff Garzik /** 565c6fd2807SJeff Garzik * __ata_port_freeze - freeze port 566c6fd2807SJeff Garzik * @ap: ATA port to freeze 567c6fd2807SJeff Garzik * 568c6fd2807SJeff Garzik * This function is called when HSM violation or some other 569c6fd2807SJeff Garzik * condition disrupts normal operation of the port. Frozen port 570c6fd2807SJeff Garzik * is not allowed to perform any operation until the port is 571c6fd2807SJeff Garzik * thawed, which usually follows a successful reset. 572c6fd2807SJeff Garzik * 573c6fd2807SJeff Garzik * ap->ops->freeze() callback can be used for freezing the port 574c6fd2807SJeff Garzik * hardware-wise (e.g. mask interrupt and stop DMA engine). If a 575c6fd2807SJeff Garzik * port cannot be frozen hardware-wise, the interrupt handler 576c6fd2807SJeff Garzik * must ack and clear interrupts unconditionally while the port 577c6fd2807SJeff Garzik * is frozen. 578c6fd2807SJeff Garzik * 579c6fd2807SJeff Garzik * LOCKING: 580cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 581c6fd2807SJeff Garzik */ 582c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap) 583c6fd2807SJeff Garzik { 584c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 585c6fd2807SJeff Garzik 586c6fd2807SJeff Garzik if (ap->ops->freeze) 587c6fd2807SJeff Garzik ap->ops->freeze(ap); 588c6fd2807SJeff Garzik 589c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_FROZEN; 590c6fd2807SJeff Garzik 591c6fd2807SJeff Garzik DPRINTK("ata%u port frozen\n", ap->id); 592c6fd2807SJeff Garzik } 593c6fd2807SJeff Garzik 594c6fd2807SJeff Garzik /** 595c6fd2807SJeff Garzik * ata_port_freeze - abort & freeze port 596c6fd2807SJeff Garzik * @ap: ATA port to freeze 597c6fd2807SJeff Garzik * 598c6fd2807SJeff Garzik * Abort and freeze @ap. 599c6fd2807SJeff Garzik * 600c6fd2807SJeff Garzik * LOCKING: 601cca3974eSJeff Garzik * spin_lock_irqsave(host lock) 602c6fd2807SJeff Garzik * 603c6fd2807SJeff Garzik * RETURNS: 604c6fd2807SJeff Garzik * Number of aborted commands. 605c6fd2807SJeff Garzik */ 606c6fd2807SJeff Garzik int ata_port_freeze(struct ata_port *ap) 607c6fd2807SJeff Garzik { 608c6fd2807SJeff Garzik int nr_aborted; 609c6fd2807SJeff Garzik 610c6fd2807SJeff Garzik WARN_ON(!ap->ops->error_handler); 611c6fd2807SJeff Garzik 612c6fd2807SJeff Garzik nr_aborted = ata_port_abort(ap); 613c6fd2807SJeff Garzik __ata_port_freeze(ap); 614c6fd2807SJeff Garzik 615c6fd2807SJeff Garzik return nr_aborted; 616c6fd2807SJeff Garzik } 617c6fd2807SJeff Garzik 618c6fd2807SJeff Garzik /** 619c6fd2807SJeff Garzik * ata_eh_freeze_port - EH helper to freeze port 620c6fd2807SJeff Garzik * @ap: ATA port to freeze 621c6fd2807SJeff Garzik * 622c6fd2807SJeff Garzik * Freeze @ap. 623c6fd2807SJeff Garzik * 624c6fd2807SJeff Garzik * LOCKING: 625c6fd2807SJeff Garzik * None. 626c6fd2807SJeff Garzik */ 627c6fd2807SJeff Garzik void ata_eh_freeze_port(struct ata_port *ap) 628c6fd2807SJeff Garzik { 629c6fd2807SJeff Garzik unsigned long flags; 630c6fd2807SJeff Garzik 631c6fd2807SJeff Garzik if (!ap->ops->error_handler) 632c6fd2807SJeff Garzik return; 633c6fd2807SJeff Garzik 634c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 635c6fd2807SJeff Garzik __ata_port_freeze(ap); 636c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 637c6fd2807SJeff Garzik } 638c6fd2807SJeff Garzik 639c6fd2807SJeff Garzik /** 640c6fd2807SJeff Garzik * ata_port_thaw_port - EH helper to thaw port 641c6fd2807SJeff Garzik * @ap: ATA port to thaw 642c6fd2807SJeff Garzik * 643c6fd2807SJeff Garzik * Thaw frozen port @ap. 644c6fd2807SJeff Garzik * 645c6fd2807SJeff Garzik * LOCKING: 646c6fd2807SJeff Garzik * None. 647c6fd2807SJeff Garzik */ 648c6fd2807SJeff Garzik void ata_eh_thaw_port(struct ata_port *ap) 649c6fd2807SJeff Garzik { 650c6fd2807SJeff Garzik unsigned long flags; 651c6fd2807SJeff Garzik 652c6fd2807SJeff Garzik if (!ap->ops->error_handler) 653c6fd2807SJeff Garzik return; 654c6fd2807SJeff Garzik 655c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 656c6fd2807SJeff Garzik 657c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_FROZEN; 658c6fd2807SJeff Garzik 659c6fd2807SJeff Garzik if (ap->ops->thaw) 660c6fd2807SJeff Garzik ap->ops->thaw(ap); 661c6fd2807SJeff Garzik 662c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 663c6fd2807SJeff Garzik 664c6fd2807SJeff Garzik DPRINTK("ata%u port thawed\n", ap->id); 665c6fd2807SJeff Garzik } 666c6fd2807SJeff Garzik 667c6fd2807SJeff Garzik static void ata_eh_scsidone(struct scsi_cmnd *scmd) 668c6fd2807SJeff Garzik { 669c6fd2807SJeff Garzik /* nada */ 670c6fd2807SJeff Garzik } 671c6fd2807SJeff Garzik 672c6fd2807SJeff Garzik static void __ata_eh_qc_complete(struct ata_queued_cmd *qc) 673c6fd2807SJeff Garzik { 674c6fd2807SJeff Garzik struct ata_port *ap = qc->ap; 675c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 676c6fd2807SJeff Garzik unsigned long flags; 677c6fd2807SJeff Garzik 678c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 679c6fd2807SJeff Garzik qc->scsidone = ata_eh_scsidone; 680c6fd2807SJeff Garzik __ata_qc_complete(qc); 681c6fd2807SJeff Garzik WARN_ON(ata_tag_valid(qc->tag)); 682c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 683c6fd2807SJeff Garzik 684c6fd2807SJeff Garzik scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 685c6fd2807SJeff Garzik } 686c6fd2807SJeff Garzik 687c6fd2807SJeff Garzik /** 688c6fd2807SJeff Garzik * ata_eh_qc_complete - Complete an active ATA command from EH 689c6fd2807SJeff Garzik * @qc: Command to complete 690c6fd2807SJeff Garzik * 691c6fd2807SJeff Garzik * Indicate to the mid and upper layers that an ATA command has 692c6fd2807SJeff Garzik * completed. To be used from EH. 693c6fd2807SJeff Garzik */ 694c6fd2807SJeff Garzik void ata_eh_qc_complete(struct ata_queued_cmd *qc) 695c6fd2807SJeff Garzik { 696c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 697c6fd2807SJeff Garzik scmd->retries = scmd->allowed; 698c6fd2807SJeff Garzik __ata_eh_qc_complete(qc); 699c6fd2807SJeff Garzik } 700c6fd2807SJeff Garzik 701c6fd2807SJeff Garzik /** 702c6fd2807SJeff Garzik * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH 703c6fd2807SJeff Garzik * @qc: Command to retry 704c6fd2807SJeff Garzik * 705c6fd2807SJeff Garzik * Indicate to the mid and upper layers that an ATA command 706c6fd2807SJeff Garzik * should be retried. To be used from EH. 707c6fd2807SJeff Garzik * 708c6fd2807SJeff Garzik * SCSI midlayer limits the number of retries to scmd->allowed. 709c6fd2807SJeff Garzik * scmd->retries is decremented for commands which get retried 710c6fd2807SJeff Garzik * due to unrelated failures (qc->err_mask is zero). 711c6fd2807SJeff Garzik */ 712c6fd2807SJeff Garzik void ata_eh_qc_retry(struct ata_queued_cmd *qc) 713c6fd2807SJeff Garzik { 714c6fd2807SJeff Garzik struct scsi_cmnd *scmd = qc->scsicmd; 715c6fd2807SJeff Garzik if (!qc->err_mask && scmd->retries) 716c6fd2807SJeff Garzik scmd->retries--; 717c6fd2807SJeff Garzik __ata_eh_qc_complete(qc); 718c6fd2807SJeff Garzik } 719c6fd2807SJeff Garzik 720c6fd2807SJeff Garzik /** 721c6fd2807SJeff Garzik * ata_eh_detach_dev - detach ATA device 722c6fd2807SJeff Garzik * @dev: ATA device to detach 723c6fd2807SJeff Garzik * 724c6fd2807SJeff Garzik * Detach @dev. 725c6fd2807SJeff Garzik * 726c6fd2807SJeff Garzik * LOCKING: 727c6fd2807SJeff Garzik * None. 728c6fd2807SJeff Garzik */ 729c6fd2807SJeff Garzik static void ata_eh_detach_dev(struct ata_device *dev) 730c6fd2807SJeff Garzik { 731c6fd2807SJeff Garzik struct ata_port *ap = dev->ap; 732c6fd2807SJeff Garzik unsigned long flags; 733c6fd2807SJeff Garzik 734c6fd2807SJeff Garzik ata_dev_disable(dev); 735c6fd2807SJeff Garzik 736c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 737c6fd2807SJeff Garzik 738c6fd2807SJeff Garzik dev->flags &= ~ATA_DFLAG_DETACH; 739c6fd2807SJeff Garzik 740c6fd2807SJeff Garzik if (ata_scsi_offline_dev(dev)) { 741c6fd2807SJeff Garzik dev->flags |= ATA_DFLAG_DETACHED; 742c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 743c6fd2807SJeff Garzik } 744c6fd2807SJeff Garzik 745c6fd2807SJeff Garzik /* clear per-dev EH actions */ 746c6fd2807SJeff Garzik ata_eh_clear_action(dev, &ap->eh_info, ATA_EH_PERDEV_MASK); 747c6fd2807SJeff Garzik ata_eh_clear_action(dev, &ap->eh_context.i, ATA_EH_PERDEV_MASK); 748c6fd2807SJeff Garzik 749c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 750c6fd2807SJeff Garzik } 751c6fd2807SJeff Garzik 752c6fd2807SJeff Garzik /** 753c6fd2807SJeff Garzik * ata_eh_about_to_do - about to perform eh_action 754c6fd2807SJeff Garzik * @ap: target ATA port 755c6fd2807SJeff Garzik * @dev: target ATA dev for per-dev action (can be NULL) 756c6fd2807SJeff Garzik * @action: action about to be performed 757c6fd2807SJeff Garzik * 758c6fd2807SJeff Garzik * Called just before performing EH actions to clear related bits 759c6fd2807SJeff Garzik * in @ap->eh_info such that eh actions are not unnecessarily 760c6fd2807SJeff Garzik * repeated. 761c6fd2807SJeff Garzik * 762c6fd2807SJeff Garzik * LOCKING: 763c6fd2807SJeff Garzik * None. 764c6fd2807SJeff Garzik */ 765c6fd2807SJeff Garzik static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev, 766c6fd2807SJeff Garzik unsigned int action) 767c6fd2807SJeff Garzik { 768c6fd2807SJeff Garzik unsigned long flags; 769c6fd2807SJeff Garzik struct ata_eh_info *ehi = &ap->eh_info; 770c6fd2807SJeff Garzik struct ata_eh_context *ehc = &ap->eh_context; 771c6fd2807SJeff Garzik 772c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 773c6fd2807SJeff Garzik 774c6fd2807SJeff Garzik /* Reset is represented by combination of actions and EHI 775c6fd2807SJeff Garzik * flags. Suck in all related bits before clearing eh_info to 776c6fd2807SJeff Garzik * avoid losing requested action. 777c6fd2807SJeff Garzik */ 778c6fd2807SJeff Garzik if (action & ATA_EH_RESET_MASK) { 779c6fd2807SJeff Garzik ehc->i.action |= ehi->action & ATA_EH_RESET_MASK; 780c6fd2807SJeff Garzik ehc->i.flags |= ehi->flags & ATA_EHI_RESET_MODIFIER_MASK; 781c6fd2807SJeff Garzik 782c6fd2807SJeff Garzik /* make sure all reset actions are cleared & clear EHI flags */ 783c6fd2807SJeff Garzik action |= ATA_EH_RESET_MASK; 784c6fd2807SJeff Garzik ehi->flags &= ~ATA_EHI_RESET_MODIFIER_MASK; 785c6fd2807SJeff Garzik } 786c6fd2807SJeff Garzik 787c6fd2807SJeff Garzik ata_eh_clear_action(dev, ehi, action); 788c6fd2807SJeff Garzik 789c6fd2807SJeff Garzik if (!(ehc->i.flags & ATA_EHI_QUIET)) 790c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_RECOVERED; 791c6fd2807SJeff Garzik 792c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 793c6fd2807SJeff Garzik } 794c6fd2807SJeff Garzik 795c6fd2807SJeff Garzik /** 796c6fd2807SJeff Garzik * ata_eh_done - EH action complete 797c6fd2807SJeff Garzik * @ap: target ATA port 798c6fd2807SJeff Garzik * @dev: target ATA dev for per-dev action (can be NULL) 799c6fd2807SJeff Garzik * @action: action just completed 800c6fd2807SJeff Garzik * 801c6fd2807SJeff Garzik * Called right after performing EH actions to clear related bits 802c6fd2807SJeff Garzik * in @ap->eh_context. 803c6fd2807SJeff Garzik * 804c6fd2807SJeff Garzik * LOCKING: 805c6fd2807SJeff Garzik * None. 806c6fd2807SJeff Garzik */ 807c6fd2807SJeff Garzik static void ata_eh_done(struct ata_port *ap, struct ata_device *dev, 808c6fd2807SJeff Garzik unsigned int action) 809c6fd2807SJeff Garzik { 810c6fd2807SJeff Garzik /* if reset is complete, clear all reset actions & reset modifier */ 811c6fd2807SJeff Garzik if (action & ATA_EH_RESET_MASK) { 812c6fd2807SJeff Garzik action |= ATA_EH_RESET_MASK; 813c6fd2807SJeff Garzik ap->eh_context.i.flags &= ~ATA_EHI_RESET_MODIFIER_MASK; 814c6fd2807SJeff Garzik } 815c6fd2807SJeff Garzik 816c6fd2807SJeff Garzik ata_eh_clear_action(dev, &ap->eh_context.i, action); 817c6fd2807SJeff Garzik } 818c6fd2807SJeff Garzik 819c6fd2807SJeff Garzik /** 820c6fd2807SJeff Garzik * ata_err_string - convert err_mask to descriptive string 821c6fd2807SJeff Garzik * @err_mask: error mask to convert to string 822c6fd2807SJeff Garzik * 823c6fd2807SJeff Garzik * Convert @err_mask to descriptive string. Errors are 824c6fd2807SJeff Garzik * prioritized according to severity and only the most severe 825c6fd2807SJeff Garzik * error is reported. 826c6fd2807SJeff Garzik * 827c6fd2807SJeff Garzik * LOCKING: 828c6fd2807SJeff Garzik * None. 829c6fd2807SJeff Garzik * 830c6fd2807SJeff Garzik * RETURNS: 831c6fd2807SJeff Garzik * Descriptive string for @err_mask 832c6fd2807SJeff Garzik */ 833c6fd2807SJeff Garzik static const char * ata_err_string(unsigned int err_mask) 834c6fd2807SJeff Garzik { 835c6fd2807SJeff Garzik if (err_mask & AC_ERR_HOST_BUS) 836c6fd2807SJeff Garzik return "host bus error"; 837c6fd2807SJeff Garzik if (err_mask & AC_ERR_ATA_BUS) 838c6fd2807SJeff Garzik return "ATA bus error"; 839c6fd2807SJeff Garzik if (err_mask & AC_ERR_TIMEOUT) 840c6fd2807SJeff Garzik return "timeout"; 841c6fd2807SJeff Garzik if (err_mask & AC_ERR_HSM) 842c6fd2807SJeff Garzik return "HSM violation"; 843c6fd2807SJeff Garzik if (err_mask & AC_ERR_SYSTEM) 844c6fd2807SJeff Garzik return "internal error"; 845c6fd2807SJeff Garzik if (err_mask & AC_ERR_MEDIA) 846c6fd2807SJeff Garzik return "media error"; 847c6fd2807SJeff Garzik if (err_mask & AC_ERR_INVALID) 848c6fd2807SJeff Garzik return "invalid argument"; 849c6fd2807SJeff Garzik if (err_mask & AC_ERR_DEV) 850c6fd2807SJeff Garzik return "device error"; 851c6fd2807SJeff Garzik return "unknown error"; 852c6fd2807SJeff Garzik } 853c6fd2807SJeff Garzik 854c6fd2807SJeff Garzik /** 855c6fd2807SJeff Garzik * ata_read_log_page - read a specific log page 856c6fd2807SJeff Garzik * @dev: target device 857c6fd2807SJeff Garzik * @page: page to read 858c6fd2807SJeff Garzik * @buf: buffer to store read page 859c6fd2807SJeff Garzik * @sectors: number of sectors to read 860c6fd2807SJeff Garzik * 861c6fd2807SJeff Garzik * Read log page using READ_LOG_EXT command. 862c6fd2807SJeff Garzik * 863c6fd2807SJeff Garzik * LOCKING: 864c6fd2807SJeff Garzik * Kernel thread context (may sleep). 865c6fd2807SJeff Garzik * 866c6fd2807SJeff Garzik * RETURNS: 867c6fd2807SJeff Garzik * 0 on success, AC_ERR_* mask otherwise. 868c6fd2807SJeff Garzik */ 869c6fd2807SJeff Garzik static unsigned int ata_read_log_page(struct ata_device *dev, 870c6fd2807SJeff Garzik u8 page, void *buf, unsigned int sectors) 871c6fd2807SJeff Garzik { 872c6fd2807SJeff Garzik struct ata_taskfile tf; 873c6fd2807SJeff Garzik unsigned int err_mask; 874c6fd2807SJeff Garzik 875c6fd2807SJeff Garzik DPRINTK("read log page - page %d\n", page); 876c6fd2807SJeff Garzik 877c6fd2807SJeff Garzik ata_tf_init(dev, &tf); 878c6fd2807SJeff Garzik tf.command = ATA_CMD_READ_LOG_EXT; 879c6fd2807SJeff Garzik tf.lbal = page; 880c6fd2807SJeff Garzik tf.nsect = sectors; 881c6fd2807SJeff Garzik tf.hob_nsect = sectors >> 8; 882c6fd2807SJeff Garzik tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE; 883c6fd2807SJeff Garzik tf.protocol = ATA_PROT_PIO; 884c6fd2807SJeff Garzik 885c6fd2807SJeff Garzik err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, 886c6fd2807SJeff Garzik buf, sectors * ATA_SECT_SIZE); 887c6fd2807SJeff Garzik 888c6fd2807SJeff Garzik DPRINTK("EXIT, err_mask=%x\n", err_mask); 889c6fd2807SJeff Garzik return err_mask; 890c6fd2807SJeff Garzik } 891c6fd2807SJeff Garzik 892c6fd2807SJeff Garzik /** 893c6fd2807SJeff Garzik * ata_eh_read_log_10h - Read log page 10h for NCQ error details 894c6fd2807SJeff Garzik * @dev: Device to read log page 10h from 895c6fd2807SJeff Garzik * @tag: Resulting tag of the failed command 896c6fd2807SJeff Garzik * @tf: Resulting taskfile registers of the failed command 897c6fd2807SJeff Garzik * 898c6fd2807SJeff Garzik * Read log page 10h to obtain NCQ error details and clear error 899c6fd2807SJeff Garzik * condition. 900c6fd2807SJeff Garzik * 901c6fd2807SJeff Garzik * LOCKING: 902c6fd2807SJeff Garzik * Kernel thread context (may sleep). 903c6fd2807SJeff Garzik * 904c6fd2807SJeff Garzik * RETURNS: 905c6fd2807SJeff Garzik * 0 on success, -errno otherwise. 906c6fd2807SJeff Garzik */ 907c6fd2807SJeff Garzik static int ata_eh_read_log_10h(struct ata_device *dev, 908c6fd2807SJeff Garzik int *tag, struct ata_taskfile *tf) 909c6fd2807SJeff Garzik { 910c6fd2807SJeff Garzik u8 *buf = dev->ap->sector_buf; 911c6fd2807SJeff Garzik unsigned int err_mask; 912c6fd2807SJeff Garzik u8 csum; 913c6fd2807SJeff Garzik int i; 914c6fd2807SJeff Garzik 915c6fd2807SJeff Garzik err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1); 916c6fd2807SJeff Garzik if (err_mask) 917c6fd2807SJeff Garzik return -EIO; 918c6fd2807SJeff Garzik 919c6fd2807SJeff Garzik csum = 0; 920c6fd2807SJeff Garzik for (i = 0; i < ATA_SECT_SIZE; i++) 921c6fd2807SJeff Garzik csum += buf[i]; 922c6fd2807SJeff Garzik if (csum) 923c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_WARNING, 924c6fd2807SJeff Garzik "invalid checksum 0x%x on log page 10h\n", csum); 925c6fd2807SJeff Garzik 926c6fd2807SJeff Garzik if (buf[0] & 0x80) 927c6fd2807SJeff Garzik return -ENOENT; 928c6fd2807SJeff Garzik 929c6fd2807SJeff Garzik *tag = buf[0] & 0x1f; 930c6fd2807SJeff Garzik 931c6fd2807SJeff Garzik tf->command = buf[2]; 932c6fd2807SJeff Garzik tf->feature = buf[3]; 933c6fd2807SJeff Garzik tf->lbal = buf[4]; 934c6fd2807SJeff Garzik tf->lbam = buf[5]; 935c6fd2807SJeff Garzik tf->lbah = buf[6]; 936c6fd2807SJeff Garzik tf->device = buf[7]; 937c6fd2807SJeff Garzik tf->hob_lbal = buf[8]; 938c6fd2807SJeff Garzik tf->hob_lbam = buf[9]; 939c6fd2807SJeff Garzik tf->hob_lbah = buf[10]; 940c6fd2807SJeff Garzik tf->nsect = buf[12]; 941c6fd2807SJeff Garzik tf->hob_nsect = buf[13]; 942c6fd2807SJeff Garzik 943c6fd2807SJeff Garzik return 0; 944c6fd2807SJeff Garzik } 945c6fd2807SJeff Garzik 946c6fd2807SJeff Garzik /** 947c6fd2807SJeff Garzik * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE 948c6fd2807SJeff Garzik * @dev: device to perform REQUEST_SENSE to 949c6fd2807SJeff Garzik * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long) 950c6fd2807SJeff Garzik * 951c6fd2807SJeff Garzik * Perform ATAPI REQUEST_SENSE after the device reported CHECK 952c6fd2807SJeff Garzik * SENSE. This function is EH helper. 953c6fd2807SJeff Garzik * 954c6fd2807SJeff Garzik * LOCKING: 955c6fd2807SJeff Garzik * Kernel thread context (may sleep). 956c6fd2807SJeff Garzik * 957c6fd2807SJeff Garzik * RETURNS: 958c6fd2807SJeff Garzik * 0 on success, AC_ERR_* mask on failure 959c6fd2807SJeff Garzik */ 960c6fd2807SJeff Garzik static unsigned int atapi_eh_request_sense(struct ata_device *dev, 961c6fd2807SJeff Garzik unsigned char *sense_buf) 962c6fd2807SJeff Garzik { 963c6fd2807SJeff Garzik struct ata_port *ap = dev->ap; 964c6fd2807SJeff Garzik struct ata_taskfile tf; 965c6fd2807SJeff Garzik u8 cdb[ATAPI_CDB_LEN]; 966c6fd2807SJeff Garzik 967c6fd2807SJeff Garzik DPRINTK("ATAPI request sense\n"); 968c6fd2807SJeff Garzik 969c6fd2807SJeff Garzik ata_tf_init(dev, &tf); 970c6fd2807SJeff Garzik 971c6fd2807SJeff Garzik /* FIXME: is this needed? */ 972c6fd2807SJeff Garzik memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); 973c6fd2807SJeff Garzik 974c6fd2807SJeff Garzik /* XXX: why tf_read here? */ 975c6fd2807SJeff Garzik ap->ops->tf_read(ap, &tf); 976c6fd2807SJeff Garzik 977c6fd2807SJeff Garzik /* fill these in, for the case where they are -not- overwritten */ 978c6fd2807SJeff Garzik sense_buf[0] = 0x70; 979c6fd2807SJeff Garzik sense_buf[2] = tf.feature >> 4; 980c6fd2807SJeff Garzik 981c6fd2807SJeff Garzik memset(cdb, 0, ATAPI_CDB_LEN); 982c6fd2807SJeff Garzik cdb[0] = REQUEST_SENSE; 983c6fd2807SJeff Garzik cdb[4] = SCSI_SENSE_BUFFERSIZE; 984c6fd2807SJeff Garzik 985c6fd2807SJeff Garzik tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 986c6fd2807SJeff Garzik tf.command = ATA_CMD_PACKET; 987c6fd2807SJeff Garzik 988c6fd2807SJeff Garzik /* is it pointless to prefer PIO for "safety reasons"? */ 989c6fd2807SJeff Garzik if (ap->flags & ATA_FLAG_PIO_DMA) { 990c6fd2807SJeff Garzik tf.protocol = ATA_PROT_ATAPI_DMA; 991c6fd2807SJeff Garzik tf.feature |= ATAPI_PKT_DMA; 992c6fd2807SJeff Garzik } else { 993c6fd2807SJeff Garzik tf.protocol = ATA_PROT_ATAPI; 994c6fd2807SJeff Garzik tf.lbam = (8 * 1024) & 0xff; 995c6fd2807SJeff Garzik tf.lbah = (8 * 1024) >> 8; 996c6fd2807SJeff Garzik } 997c6fd2807SJeff Garzik 998c6fd2807SJeff Garzik return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE, 999c6fd2807SJeff Garzik sense_buf, SCSI_SENSE_BUFFERSIZE); 1000c6fd2807SJeff Garzik } 1001c6fd2807SJeff Garzik 1002c6fd2807SJeff Garzik /** 1003c6fd2807SJeff Garzik * ata_eh_analyze_serror - analyze SError for a failed port 1004c6fd2807SJeff Garzik * @ap: ATA port to analyze SError for 1005c6fd2807SJeff Garzik * 1006c6fd2807SJeff Garzik * Analyze SError if available and further determine cause of 1007c6fd2807SJeff Garzik * failure. 1008c6fd2807SJeff Garzik * 1009c6fd2807SJeff Garzik * LOCKING: 1010c6fd2807SJeff Garzik * None. 1011c6fd2807SJeff Garzik */ 1012c6fd2807SJeff Garzik static void ata_eh_analyze_serror(struct ata_port *ap) 1013c6fd2807SJeff Garzik { 1014c6fd2807SJeff Garzik struct ata_eh_context *ehc = &ap->eh_context; 1015c6fd2807SJeff Garzik u32 serror = ehc->i.serror; 1016c6fd2807SJeff Garzik unsigned int err_mask = 0, action = 0; 1017c6fd2807SJeff Garzik 1018c6fd2807SJeff Garzik if (serror & SERR_PERSISTENT) { 1019c6fd2807SJeff Garzik err_mask |= AC_ERR_ATA_BUS; 1020c6fd2807SJeff Garzik action |= ATA_EH_HARDRESET; 1021c6fd2807SJeff Garzik } 1022c6fd2807SJeff Garzik if (serror & 1023c6fd2807SJeff Garzik (SERR_DATA_RECOVERED | SERR_COMM_RECOVERED | SERR_DATA)) { 1024c6fd2807SJeff Garzik err_mask |= AC_ERR_ATA_BUS; 1025c6fd2807SJeff Garzik action |= ATA_EH_SOFTRESET; 1026c6fd2807SJeff Garzik } 1027c6fd2807SJeff Garzik if (serror & SERR_PROTOCOL) { 1028c6fd2807SJeff Garzik err_mask |= AC_ERR_HSM; 1029c6fd2807SJeff Garzik action |= ATA_EH_SOFTRESET; 1030c6fd2807SJeff Garzik } 1031c6fd2807SJeff Garzik if (serror & SERR_INTERNAL) { 1032c6fd2807SJeff Garzik err_mask |= AC_ERR_SYSTEM; 1033c6fd2807SJeff Garzik action |= ATA_EH_SOFTRESET; 1034c6fd2807SJeff Garzik } 1035c6fd2807SJeff Garzik if (serror & (SERR_PHYRDY_CHG | SERR_DEV_XCHG)) 1036c6fd2807SJeff Garzik ata_ehi_hotplugged(&ehc->i); 1037c6fd2807SJeff Garzik 1038c6fd2807SJeff Garzik ehc->i.err_mask |= err_mask; 1039c6fd2807SJeff Garzik ehc->i.action |= action; 1040c6fd2807SJeff Garzik } 1041c6fd2807SJeff Garzik 1042c6fd2807SJeff Garzik /** 1043c6fd2807SJeff Garzik * ata_eh_analyze_ncq_error - analyze NCQ error 1044c6fd2807SJeff Garzik * @ap: ATA port to analyze NCQ error for 1045c6fd2807SJeff Garzik * 1046c6fd2807SJeff Garzik * Read log page 10h, determine the offending qc and acquire 1047c6fd2807SJeff Garzik * error status TF. For NCQ device errors, all LLDDs have to do 1048c6fd2807SJeff Garzik * is setting AC_ERR_DEV in ehi->err_mask. This function takes 1049c6fd2807SJeff Garzik * care of the rest. 1050c6fd2807SJeff Garzik * 1051c6fd2807SJeff Garzik * LOCKING: 1052c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1053c6fd2807SJeff Garzik */ 1054c6fd2807SJeff Garzik static void ata_eh_analyze_ncq_error(struct ata_port *ap) 1055c6fd2807SJeff Garzik { 1056c6fd2807SJeff Garzik struct ata_eh_context *ehc = &ap->eh_context; 1057c6fd2807SJeff Garzik struct ata_device *dev = ap->device; 1058c6fd2807SJeff Garzik struct ata_queued_cmd *qc; 1059c6fd2807SJeff Garzik struct ata_taskfile tf; 1060c6fd2807SJeff Garzik int tag, rc; 1061c6fd2807SJeff Garzik 1062c6fd2807SJeff Garzik /* if frozen, we can't do much */ 1063c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN) 1064c6fd2807SJeff Garzik return; 1065c6fd2807SJeff Garzik 1066c6fd2807SJeff Garzik /* is it NCQ device error? */ 1067c6fd2807SJeff Garzik if (!ap->sactive || !(ehc->i.err_mask & AC_ERR_DEV)) 1068c6fd2807SJeff Garzik return; 1069c6fd2807SJeff Garzik 1070c6fd2807SJeff Garzik /* has LLDD analyzed already? */ 1071c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1072c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, tag); 1073c6fd2807SJeff Garzik 1074c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) 1075c6fd2807SJeff Garzik continue; 1076c6fd2807SJeff Garzik 1077c6fd2807SJeff Garzik if (qc->err_mask) 1078c6fd2807SJeff Garzik return; 1079c6fd2807SJeff Garzik } 1080c6fd2807SJeff Garzik 1081c6fd2807SJeff Garzik /* okay, this error is ours */ 1082c6fd2807SJeff Garzik rc = ata_eh_read_log_10h(dev, &tag, &tf); 1083c6fd2807SJeff Garzik if (rc) { 1084c6fd2807SJeff Garzik ata_port_printk(ap, KERN_ERR, "failed to read log page 10h " 1085c6fd2807SJeff Garzik "(errno=%d)\n", rc); 1086c6fd2807SJeff Garzik return; 1087c6fd2807SJeff Garzik } 1088c6fd2807SJeff Garzik 1089c6fd2807SJeff Garzik if (!(ap->sactive & (1 << tag))) { 1090c6fd2807SJeff Garzik ata_port_printk(ap, KERN_ERR, "log page 10h reported " 1091c6fd2807SJeff Garzik "inactive tag %d\n", tag); 1092c6fd2807SJeff Garzik return; 1093c6fd2807SJeff Garzik } 1094c6fd2807SJeff Garzik 1095c6fd2807SJeff Garzik /* we've got the perpetrator, condemn it */ 1096c6fd2807SJeff Garzik qc = __ata_qc_from_tag(ap, tag); 1097c6fd2807SJeff Garzik memcpy(&qc->result_tf, &tf, sizeof(tf)); 1098c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_DEV; 1099c6fd2807SJeff Garzik ehc->i.err_mask &= ~AC_ERR_DEV; 1100c6fd2807SJeff Garzik } 1101c6fd2807SJeff Garzik 1102c6fd2807SJeff Garzik /** 1103c6fd2807SJeff Garzik * ata_eh_analyze_tf - analyze taskfile of a failed qc 1104c6fd2807SJeff Garzik * @qc: qc to analyze 1105c6fd2807SJeff Garzik * @tf: Taskfile registers to analyze 1106c6fd2807SJeff Garzik * 1107c6fd2807SJeff Garzik * Analyze taskfile of @qc and further determine cause of 1108c6fd2807SJeff Garzik * failure. This function also requests ATAPI sense data if 1109c6fd2807SJeff Garzik * avaliable. 1110c6fd2807SJeff Garzik * 1111c6fd2807SJeff Garzik * LOCKING: 1112c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1113c6fd2807SJeff Garzik * 1114c6fd2807SJeff Garzik * RETURNS: 1115c6fd2807SJeff Garzik * Determined recovery action 1116c6fd2807SJeff Garzik */ 1117c6fd2807SJeff Garzik static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, 1118c6fd2807SJeff Garzik const struct ata_taskfile *tf) 1119c6fd2807SJeff Garzik { 1120c6fd2807SJeff Garzik unsigned int tmp, action = 0; 1121c6fd2807SJeff Garzik u8 stat = tf->command, err = tf->feature; 1122c6fd2807SJeff Garzik 1123c6fd2807SJeff Garzik if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) { 1124c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_HSM; 1125c6fd2807SJeff Garzik return ATA_EH_SOFTRESET; 1126c6fd2807SJeff Garzik } 1127c6fd2807SJeff Garzik 1128c6fd2807SJeff Garzik if (!(qc->err_mask & AC_ERR_DEV)) 1129c6fd2807SJeff Garzik return 0; 1130c6fd2807SJeff Garzik 1131c6fd2807SJeff Garzik switch (qc->dev->class) { 1132c6fd2807SJeff Garzik case ATA_DEV_ATA: 1133c6fd2807SJeff Garzik if (err & ATA_ICRC) 1134c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_ATA_BUS; 1135c6fd2807SJeff Garzik if (err & ATA_UNC) 1136c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_MEDIA; 1137c6fd2807SJeff Garzik if (err & ATA_IDNF) 1138c6fd2807SJeff Garzik qc->err_mask |= AC_ERR_INVALID; 1139c6fd2807SJeff Garzik break; 1140c6fd2807SJeff Garzik 1141c6fd2807SJeff Garzik case ATA_DEV_ATAPI: 1142a569a30dSTejun Heo if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) { 1143c6fd2807SJeff Garzik tmp = atapi_eh_request_sense(qc->dev, 1144c6fd2807SJeff Garzik qc->scsicmd->sense_buffer); 1145c6fd2807SJeff Garzik if (!tmp) { 1146a569a30dSTejun Heo /* ATA_QCFLAG_SENSE_VALID is used to 1147a569a30dSTejun Heo * tell atapi_qc_complete() that sense 1148a569a30dSTejun Heo * data is already valid. 1149c6fd2807SJeff Garzik * 1150c6fd2807SJeff Garzik * TODO: interpret sense data and set 1151c6fd2807SJeff Garzik * appropriate err_mask. 1152c6fd2807SJeff Garzik */ 1153c6fd2807SJeff Garzik qc->flags |= ATA_QCFLAG_SENSE_VALID; 1154c6fd2807SJeff Garzik } else 1155c6fd2807SJeff Garzik qc->err_mask |= tmp; 1156c6fd2807SJeff Garzik } 1157a569a30dSTejun Heo } 1158c6fd2807SJeff Garzik 1159c6fd2807SJeff Garzik if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS)) 1160c6fd2807SJeff Garzik action |= ATA_EH_SOFTRESET; 1161c6fd2807SJeff Garzik 1162c6fd2807SJeff Garzik return action; 1163c6fd2807SJeff Garzik } 1164c6fd2807SJeff Garzik 1165*7d47e8d4STejun Heo static int ata_eh_categorize_error(int is_io, unsigned int err_mask) 1166c6fd2807SJeff Garzik { 1167*7d47e8d4STejun Heo if (err_mask & AC_ERR_ATA_BUS) 1168c6fd2807SJeff Garzik return 1; 1169c6fd2807SJeff Garzik 1170*7d47e8d4STejun Heo if (err_mask & AC_ERR_TIMEOUT) 1171c6fd2807SJeff Garzik return 2; 1172*7d47e8d4STejun Heo 1173*7d47e8d4STejun Heo if (is_io) { 1174*7d47e8d4STejun Heo if (err_mask & AC_ERR_HSM) 1175*7d47e8d4STejun Heo return 2; 1176*7d47e8d4STejun Heo if ((err_mask & 1177*7d47e8d4STejun Heo (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV) 1178*7d47e8d4STejun Heo return 3; 1179c6fd2807SJeff Garzik } 1180c6fd2807SJeff Garzik 1181c6fd2807SJeff Garzik return 0; 1182c6fd2807SJeff Garzik } 1183c6fd2807SJeff Garzik 1184*7d47e8d4STejun Heo struct speed_down_verdict_arg { 1185c6fd2807SJeff Garzik u64 since; 1186*7d47e8d4STejun Heo int nr_errors[4]; 1187c6fd2807SJeff Garzik }; 1188c6fd2807SJeff Garzik 1189*7d47e8d4STejun Heo static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg) 1190c6fd2807SJeff Garzik { 1191*7d47e8d4STejun Heo struct speed_down_verdict_arg *arg = void_arg; 1192*7d47e8d4STejun Heo int cat = ata_eh_categorize_error(ent->is_io, ent->err_mask); 1193c6fd2807SJeff Garzik 1194c6fd2807SJeff Garzik if (ent->timestamp < arg->since) 1195c6fd2807SJeff Garzik return -1; 1196c6fd2807SJeff Garzik 1197*7d47e8d4STejun Heo arg->nr_errors[cat]++; 1198c6fd2807SJeff Garzik return 0; 1199c6fd2807SJeff Garzik } 1200c6fd2807SJeff Garzik 1201c6fd2807SJeff Garzik /** 1202*7d47e8d4STejun Heo * ata_eh_speed_down_verdict - Determine speed down verdict 1203c6fd2807SJeff Garzik * @dev: Device of interest 1204c6fd2807SJeff Garzik * 1205c6fd2807SJeff Garzik * This function examines error ring of @dev and determines 1206*7d47e8d4STejun Heo * whether NCQ needs to be turned off, transfer speed should be 1207*7d47e8d4STejun Heo * stepped down, or falling back to PIO is necessary. 1208c6fd2807SJeff Garzik * 1209*7d47e8d4STejun Heo * Cat-1 is ATA_BUS error for any command. 1210c6fd2807SJeff Garzik * 1211*7d47e8d4STejun Heo * Cat-2 is TIMEOUT for any command or HSM violation for known 1212*7d47e8d4STejun Heo * supported commands. 1213*7d47e8d4STejun Heo * 1214*7d47e8d4STejun Heo * Cat-3 is is unclassified DEV error for known supported 1215c6fd2807SJeff Garzik * command. 1216c6fd2807SJeff Garzik * 1217*7d47e8d4STejun Heo * NCQ needs to be turned off if there have been more than 3 1218*7d47e8d4STejun Heo * Cat-2 + Cat-3 errors during last 10 minutes. 1219*7d47e8d4STejun Heo * 1220*7d47e8d4STejun Heo * Speed down is necessary if there have been more than 3 Cat-1 + 1221*7d47e8d4STejun Heo * Cat-2 errors or 10 Cat-3 errors during last 10 minutes. 1222*7d47e8d4STejun Heo * 1223*7d47e8d4STejun Heo * Falling back to PIO mode is necessary if there have been more 1224*7d47e8d4STejun Heo * than 10 Cat-1 + Cat-2 + Cat-3 errors during last 5 minutes. 1225*7d47e8d4STejun Heo * 1226c6fd2807SJeff Garzik * LOCKING: 1227c6fd2807SJeff Garzik * Inherited from caller. 1228c6fd2807SJeff Garzik * 1229c6fd2807SJeff Garzik * RETURNS: 1230*7d47e8d4STejun Heo * OR of ATA_EH_SPDN_* flags. 1231c6fd2807SJeff Garzik */ 1232*7d47e8d4STejun Heo static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev) 1233c6fd2807SJeff Garzik { 1234*7d47e8d4STejun Heo const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ; 1235*7d47e8d4STejun Heo u64 j64 = get_jiffies_64(); 1236*7d47e8d4STejun Heo struct speed_down_verdict_arg arg; 1237*7d47e8d4STejun Heo unsigned int verdict = 0; 1238c6fd2807SJeff Garzik 1239*7d47e8d4STejun Heo /* scan past 10 mins of error history */ 1240c6fd2807SJeff Garzik memset(&arg, 0, sizeof(arg)); 1241*7d47e8d4STejun Heo arg.since = j64 - min(j64, j10mins); 1242*7d47e8d4STejun Heo ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); 1243c6fd2807SJeff Garzik 1244*7d47e8d4STejun Heo if (arg.nr_errors[2] + arg.nr_errors[3] > 3) 1245*7d47e8d4STejun Heo verdict |= ATA_EH_SPDN_NCQ_OFF; 1246*7d47e8d4STejun Heo if (arg.nr_errors[1] + arg.nr_errors[2] > 3 || arg.nr_errors[3] > 10) 1247*7d47e8d4STejun Heo verdict |= ATA_EH_SPDN_SPEED_DOWN; 1248c6fd2807SJeff Garzik 1249*7d47e8d4STejun Heo /* scan past 3 mins of error history */ 1250*7d47e8d4STejun Heo memset(&arg, 0, sizeof(arg)); 1251*7d47e8d4STejun Heo arg.since = j64 - min(j64, j5mins); 1252*7d47e8d4STejun Heo ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); 1253c6fd2807SJeff Garzik 1254*7d47e8d4STejun Heo if (arg.nr_errors[1] + arg.nr_errors[2] + arg.nr_errors[3] > 10) 1255*7d47e8d4STejun Heo verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO; 1256*7d47e8d4STejun Heo 1257*7d47e8d4STejun Heo return verdict; 1258c6fd2807SJeff Garzik } 1259c6fd2807SJeff Garzik 1260c6fd2807SJeff Garzik /** 1261c6fd2807SJeff Garzik * ata_eh_speed_down - record error and speed down if necessary 1262c6fd2807SJeff Garzik * @dev: Failed device 1263c6fd2807SJeff Garzik * @is_io: Did the device fail during normal IO? 1264c6fd2807SJeff Garzik * @err_mask: err_mask of the error 1265c6fd2807SJeff Garzik * 1266c6fd2807SJeff Garzik * Record error and examine error history to determine whether 1267c6fd2807SJeff Garzik * adjusting transmission speed is necessary. It also sets 1268c6fd2807SJeff Garzik * transmission limits appropriately if such adjustment is 1269c6fd2807SJeff Garzik * necessary. 1270c6fd2807SJeff Garzik * 1271c6fd2807SJeff Garzik * LOCKING: 1272c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1273c6fd2807SJeff Garzik * 1274c6fd2807SJeff Garzik * RETURNS: 1275*7d47e8d4STejun Heo * Determined recovery action. 1276c6fd2807SJeff Garzik */ 1277*7d47e8d4STejun Heo static unsigned int ata_eh_speed_down(struct ata_device *dev, int is_io, 1278c6fd2807SJeff Garzik unsigned int err_mask) 1279c6fd2807SJeff Garzik { 1280*7d47e8d4STejun Heo unsigned int verdict; 1281*7d47e8d4STejun Heo unsigned int action = 0; 1282*7d47e8d4STejun Heo 1283*7d47e8d4STejun Heo /* don't bother if Cat-0 error */ 1284*7d47e8d4STejun Heo if (ata_eh_categorize_error(is_io, err_mask) == 0) 1285c6fd2807SJeff Garzik return 0; 1286c6fd2807SJeff Garzik 1287c6fd2807SJeff Garzik /* record error and determine whether speed down is necessary */ 1288c6fd2807SJeff Garzik ata_ering_record(&dev->ering, is_io, err_mask); 1289*7d47e8d4STejun Heo verdict = ata_eh_speed_down_verdict(dev); 1290c6fd2807SJeff Garzik 1291*7d47e8d4STejun Heo /* turn off NCQ? */ 1292*7d47e8d4STejun Heo if ((verdict & ATA_EH_SPDN_NCQ_OFF) && 1293*7d47e8d4STejun Heo (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ | 1294*7d47e8d4STejun Heo ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) { 1295*7d47e8d4STejun Heo dev->flags |= ATA_DFLAG_NCQ_OFF; 1296*7d47e8d4STejun Heo ata_dev_printk(dev, KERN_WARNING, 1297*7d47e8d4STejun Heo "NCQ disabled due to excessive errors\n"); 1298*7d47e8d4STejun Heo goto done; 1299*7d47e8d4STejun Heo } 1300c6fd2807SJeff Garzik 1301*7d47e8d4STejun Heo /* speed down? */ 1302*7d47e8d4STejun Heo if (verdict & ATA_EH_SPDN_SPEED_DOWN) { 1303c6fd2807SJeff Garzik /* speed down SATA link speed if possible */ 1304*7d47e8d4STejun Heo if (sata_down_spd_limit(dev->ap) == 0) { 1305*7d47e8d4STejun Heo action |= ATA_EH_HARDRESET; 1306*7d47e8d4STejun Heo goto done; 1307*7d47e8d4STejun Heo } 1308c6fd2807SJeff Garzik 1309c6fd2807SJeff Garzik /* lower transfer mode */ 1310*7d47e8d4STejun Heo if (dev->spdn_cnt < 2) { 1311*7d47e8d4STejun Heo static const int dma_dnxfer_sel[] = 1312*7d47e8d4STejun Heo { ATA_DNXFER_DMA, ATA_DNXFER_40C }; 1313*7d47e8d4STejun Heo static const int pio_dnxfer_sel[] = 1314*7d47e8d4STejun Heo { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 }; 1315*7d47e8d4STejun Heo int sel; 1316c6fd2807SJeff Garzik 1317*7d47e8d4STejun Heo if (dev->xfer_shift != ATA_SHIFT_PIO) 1318*7d47e8d4STejun Heo sel = dma_dnxfer_sel[dev->spdn_cnt]; 1319*7d47e8d4STejun Heo else 1320*7d47e8d4STejun Heo sel = pio_dnxfer_sel[dev->spdn_cnt]; 1321*7d47e8d4STejun Heo 1322*7d47e8d4STejun Heo dev->spdn_cnt++; 1323*7d47e8d4STejun Heo 1324*7d47e8d4STejun Heo if (ata_down_xfermask_limit(dev, sel) == 0) { 1325*7d47e8d4STejun Heo action |= ATA_EH_SOFTRESET; 1326*7d47e8d4STejun Heo goto done; 1327*7d47e8d4STejun Heo } 1328*7d47e8d4STejun Heo } 1329*7d47e8d4STejun Heo } 1330*7d47e8d4STejun Heo 1331*7d47e8d4STejun Heo /* Fall back to PIO? Slowing down to PIO is meaningless for 1332*7d47e8d4STejun Heo * SATA. Consider it only for PATA. 1333*7d47e8d4STejun Heo */ 1334*7d47e8d4STejun Heo if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) && 1335*7d47e8d4STejun Heo (dev->ap->cbl != ATA_CBL_SATA) && 1336*7d47e8d4STejun Heo (dev->xfer_shift != ATA_SHIFT_PIO)) { 1337*7d47e8d4STejun Heo if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) { 1338*7d47e8d4STejun Heo dev->spdn_cnt = 0; 1339*7d47e8d4STejun Heo action |= ATA_EH_SOFTRESET; 1340*7d47e8d4STejun Heo goto done; 1341*7d47e8d4STejun Heo } 1342*7d47e8d4STejun Heo } 1343*7d47e8d4STejun Heo 1344c6fd2807SJeff Garzik return 0; 1345*7d47e8d4STejun Heo done: 1346*7d47e8d4STejun Heo /* device has been slowed down, blow error history */ 1347*7d47e8d4STejun Heo ata_ering_clear(&dev->ering); 1348*7d47e8d4STejun Heo return action; 1349c6fd2807SJeff Garzik } 1350c6fd2807SJeff Garzik 1351c6fd2807SJeff Garzik /** 1352c6fd2807SJeff Garzik * ata_eh_autopsy - analyze error and determine recovery action 1353c6fd2807SJeff Garzik * @ap: ATA port to perform autopsy on 1354c6fd2807SJeff Garzik * 1355c6fd2807SJeff Garzik * Analyze why @ap failed and determine which recovery action is 1356c6fd2807SJeff Garzik * needed. This function also sets more detailed AC_ERR_* values 1357c6fd2807SJeff Garzik * and fills sense data for ATAPI CHECK SENSE. 1358c6fd2807SJeff Garzik * 1359c6fd2807SJeff Garzik * LOCKING: 1360c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1361c6fd2807SJeff Garzik */ 1362c6fd2807SJeff Garzik static void ata_eh_autopsy(struct ata_port *ap) 1363c6fd2807SJeff Garzik { 1364c6fd2807SJeff Garzik struct ata_eh_context *ehc = &ap->eh_context; 1365c6fd2807SJeff Garzik unsigned int all_err_mask = 0; 1366c6fd2807SJeff Garzik int tag, is_io = 0; 1367c6fd2807SJeff Garzik u32 serror; 1368c6fd2807SJeff Garzik int rc; 1369c6fd2807SJeff Garzik 1370c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 1371c6fd2807SJeff Garzik 1372c6fd2807SJeff Garzik if (ehc->i.flags & ATA_EHI_NO_AUTOPSY) 1373c6fd2807SJeff Garzik return; 1374c6fd2807SJeff Garzik 1375c6fd2807SJeff Garzik /* obtain and analyze SError */ 1376c6fd2807SJeff Garzik rc = sata_scr_read(ap, SCR_ERROR, &serror); 1377c6fd2807SJeff Garzik if (rc == 0) { 1378c6fd2807SJeff Garzik ehc->i.serror |= serror; 1379c6fd2807SJeff Garzik ata_eh_analyze_serror(ap); 1380c6fd2807SJeff Garzik } else if (rc != -EOPNOTSUPP) 1381c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_HARDRESET; 1382c6fd2807SJeff Garzik 1383c6fd2807SJeff Garzik /* analyze NCQ failure */ 1384c6fd2807SJeff Garzik ata_eh_analyze_ncq_error(ap); 1385c6fd2807SJeff Garzik 1386c6fd2807SJeff Garzik /* any real error trumps AC_ERR_OTHER */ 1387c6fd2807SJeff Garzik if (ehc->i.err_mask & ~AC_ERR_OTHER) 1388c6fd2807SJeff Garzik ehc->i.err_mask &= ~AC_ERR_OTHER; 1389c6fd2807SJeff Garzik 1390c6fd2807SJeff Garzik all_err_mask |= ehc->i.err_mask; 1391c6fd2807SJeff Garzik 1392c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1393c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 1394c6fd2807SJeff Garzik 1395c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) 1396c6fd2807SJeff Garzik continue; 1397c6fd2807SJeff Garzik 1398c6fd2807SJeff Garzik /* inherit upper level err_mask */ 1399c6fd2807SJeff Garzik qc->err_mask |= ehc->i.err_mask; 1400c6fd2807SJeff Garzik 1401c6fd2807SJeff Garzik /* analyze TF */ 1402c6fd2807SJeff Garzik ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf); 1403c6fd2807SJeff Garzik 1404c6fd2807SJeff Garzik /* DEV errors are probably spurious in case of ATA_BUS error */ 1405c6fd2807SJeff Garzik if (qc->err_mask & AC_ERR_ATA_BUS) 1406c6fd2807SJeff Garzik qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA | 1407c6fd2807SJeff Garzik AC_ERR_INVALID); 1408c6fd2807SJeff Garzik 1409c6fd2807SJeff Garzik /* any real error trumps unknown error */ 1410c6fd2807SJeff Garzik if (qc->err_mask & ~AC_ERR_OTHER) 1411c6fd2807SJeff Garzik qc->err_mask &= ~AC_ERR_OTHER; 1412c6fd2807SJeff Garzik 1413c6fd2807SJeff Garzik /* SENSE_VALID trumps dev/unknown error and revalidation */ 1414c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_SENSE_VALID) { 1415c6fd2807SJeff Garzik qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); 1416c6fd2807SJeff Garzik ehc->i.action &= ~ATA_EH_REVALIDATE; 1417c6fd2807SJeff Garzik } 1418c6fd2807SJeff Garzik 1419c6fd2807SJeff Garzik /* accumulate error info */ 1420c6fd2807SJeff Garzik ehc->i.dev = qc->dev; 1421c6fd2807SJeff Garzik all_err_mask |= qc->err_mask; 1422c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_IO) 1423c6fd2807SJeff Garzik is_io = 1; 1424c6fd2807SJeff Garzik } 1425c6fd2807SJeff Garzik 1426c6fd2807SJeff Garzik /* enforce default EH actions */ 1427c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN || 1428c6fd2807SJeff Garzik all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT)) 1429c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_SOFTRESET; 1430c6fd2807SJeff Garzik else if (all_err_mask) 1431c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_REVALIDATE; 1432c6fd2807SJeff Garzik 1433c6fd2807SJeff Garzik /* if we have offending qcs and the associated failed device */ 1434c6fd2807SJeff Garzik if (ehc->i.dev) { 1435c6fd2807SJeff Garzik /* speed down */ 1436c6fd2807SJeff Garzik ehc->i.action |= ata_eh_speed_down(ehc->i.dev, is_io, 1437c6fd2807SJeff Garzik all_err_mask); 1438c6fd2807SJeff Garzik 1439c6fd2807SJeff Garzik /* perform per-dev EH action only on the offending device */ 1440c6fd2807SJeff Garzik ehc->i.dev_action[ehc->i.dev->devno] |= 1441c6fd2807SJeff Garzik ehc->i.action & ATA_EH_PERDEV_MASK; 1442c6fd2807SJeff Garzik ehc->i.action &= ~ATA_EH_PERDEV_MASK; 1443c6fd2807SJeff Garzik } 1444c6fd2807SJeff Garzik 1445c6fd2807SJeff Garzik DPRINTK("EXIT\n"); 1446c6fd2807SJeff Garzik } 1447c6fd2807SJeff Garzik 1448c6fd2807SJeff Garzik /** 1449c6fd2807SJeff Garzik * ata_eh_report - report error handling to user 1450c6fd2807SJeff Garzik * @ap: ATA port EH is going on 1451c6fd2807SJeff Garzik * 1452c6fd2807SJeff Garzik * Report EH to user. 1453c6fd2807SJeff Garzik * 1454c6fd2807SJeff Garzik * LOCKING: 1455c6fd2807SJeff Garzik * None. 1456c6fd2807SJeff Garzik */ 1457c6fd2807SJeff Garzik static void ata_eh_report(struct ata_port *ap) 1458c6fd2807SJeff Garzik { 1459c6fd2807SJeff Garzik struct ata_eh_context *ehc = &ap->eh_context; 1460c6fd2807SJeff Garzik const char *frozen, *desc; 1461c6fd2807SJeff Garzik int tag, nr_failed = 0; 1462c6fd2807SJeff Garzik 1463c6fd2807SJeff Garzik desc = NULL; 1464c6fd2807SJeff Garzik if (ehc->i.desc[0] != '\0') 1465c6fd2807SJeff Garzik desc = ehc->i.desc; 1466c6fd2807SJeff Garzik 1467c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1468c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 1469c6fd2807SJeff Garzik 1470c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) 1471c6fd2807SJeff Garzik continue; 1472c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask) 1473c6fd2807SJeff Garzik continue; 1474c6fd2807SJeff Garzik 1475c6fd2807SJeff Garzik nr_failed++; 1476c6fd2807SJeff Garzik } 1477c6fd2807SJeff Garzik 1478c6fd2807SJeff Garzik if (!nr_failed && !ehc->i.err_mask) 1479c6fd2807SJeff Garzik return; 1480c6fd2807SJeff Garzik 1481c6fd2807SJeff Garzik frozen = ""; 1482c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_FROZEN) 1483c6fd2807SJeff Garzik frozen = " frozen"; 1484c6fd2807SJeff Garzik 1485c6fd2807SJeff Garzik if (ehc->i.dev) { 1486c6fd2807SJeff Garzik ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x " 1487c6fd2807SJeff Garzik "SAct 0x%x SErr 0x%x action 0x%x%s\n", 1488c6fd2807SJeff Garzik ehc->i.err_mask, ap->sactive, ehc->i.serror, 1489c6fd2807SJeff Garzik ehc->i.action, frozen); 1490c6fd2807SJeff Garzik if (desc) 1491c6fd2807SJeff Garzik ata_dev_printk(ehc->i.dev, KERN_ERR, "(%s)\n", desc); 1492c6fd2807SJeff Garzik } else { 1493c6fd2807SJeff Garzik ata_port_printk(ap, KERN_ERR, "exception Emask 0x%x " 1494c6fd2807SJeff Garzik "SAct 0x%x SErr 0x%x action 0x%x%s\n", 1495c6fd2807SJeff Garzik ehc->i.err_mask, ap->sactive, ehc->i.serror, 1496c6fd2807SJeff Garzik ehc->i.action, frozen); 1497c6fd2807SJeff Garzik if (desc) 1498c6fd2807SJeff Garzik ata_port_printk(ap, KERN_ERR, "(%s)\n", desc); 1499c6fd2807SJeff Garzik } 1500c6fd2807SJeff Garzik 1501c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 15028a937581STejun Heo static const char *dma_str[] = { 15038a937581STejun Heo [DMA_BIDIRECTIONAL] = "bidi", 15048a937581STejun Heo [DMA_TO_DEVICE] = "out", 15058a937581STejun Heo [DMA_FROM_DEVICE] = "in", 15068a937581STejun Heo [DMA_NONE] = "", 15078a937581STejun Heo }; 1508c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 15098a937581STejun Heo struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; 1510c6fd2807SJeff Garzik 1511c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED) || !qc->err_mask) 1512c6fd2807SJeff Garzik continue; 1513c6fd2807SJeff Garzik 15148a937581STejun Heo ata_dev_printk(qc->dev, KERN_ERR, 15158a937581STejun Heo "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " 1516664e8503STejun Heo "tag %d cdb 0x%x data %u %s\n " 15178a937581STejun Heo "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " 15188a937581STejun Heo "Emask 0x%x (%s)\n", 15198a937581STejun Heo cmd->command, cmd->feature, cmd->nsect, 15208a937581STejun Heo cmd->lbal, cmd->lbam, cmd->lbah, 15218a937581STejun Heo cmd->hob_feature, cmd->hob_nsect, 15228a937581STejun Heo cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah, 1523726f0785STejun Heo cmd->device, qc->tag, qc->cdb[0], qc->nbytes, 1524664e8503STejun Heo dma_str[qc->dma_dir], 15258a937581STejun Heo res->command, res->feature, res->nsect, 15268a937581STejun Heo res->lbal, res->lbam, res->lbah, 15278a937581STejun Heo res->hob_feature, res->hob_nsect, 15288a937581STejun Heo res->hob_lbal, res->hob_lbam, res->hob_lbah, 15298a937581STejun Heo res->device, qc->err_mask, ata_err_string(qc->err_mask)); 1530c6fd2807SJeff Garzik } 1531c6fd2807SJeff Garzik } 1532c6fd2807SJeff Garzik 1533c6fd2807SJeff Garzik static int ata_do_reset(struct ata_port *ap, ata_reset_fn_t reset, 1534c6fd2807SJeff Garzik unsigned int *classes) 1535c6fd2807SJeff Garzik { 1536c6fd2807SJeff Garzik int i, rc; 1537c6fd2807SJeff Garzik 1538c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_DEVICES; i++) 1539c6fd2807SJeff Garzik classes[i] = ATA_DEV_UNKNOWN; 1540c6fd2807SJeff Garzik 1541c6fd2807SJeff Garzik rc = reset(ap, classes); 1542c6fd2807SJeff Garzik if (rc) 1543c6fd2807SJeff Garzik return rc; 1544c6fd2807SJeff Garzik 1545c6fd2807SJeff Garzik /* If any class isn't ATA_DEV_UNKNOWN, consider classification 1546c6fd2807SJeff Garzik * is complete and convert all ATA_DEV_UNKNOWN to 1547c6fd2807SJeff Garzik * ATA_DEV_NONE. 1548c6fd2807SJeff Garzik */ 1549c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_DEVICES; i++) 1550c6fd2807SJeff Garzik if (classes[i] != ATA_DEV_UNKNOWN) 1551c6fd2807SJeff Garzik break; 1552c6fd2807SJeff Garzik 1553c6fd2807SJeff Garzik if (i < ATA_MAX_DEVICES) 1554c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_DEVICES; i++) 1555c6fd2807SJeff Garzik if (classes[i] == ATA_DEV_UNKNOWN) 1556c6fd2807SJeff Garzik classes[i] = ATA_DEV_NONE; 1557c6fd2807SJeff Garzik 1558c6fd2807SJeff Garzik return 0; 1559c6fd2807SJeff Garzik } 1560c6fd2807SJeff Garzik 1561c6fd2807SJeff Garzik static int ata_eh_followup_srst_needed(int rc, int classify, 1562c6fd2807SJeff Garzik const unsigned int *classes) 1563c6fd2807SJeff Garzik { 1564c6fd2807SJeff Garzik if (rc == -EAGAIN) 1565c6fd2807SJeff Garzik return 1; 1566c6fd2807SJeff Garzik if (rc != 0) 1567c6fd2807SJeff Garzik return 0; 1568c6fd2807SJeff Garzik if (classify && classes[0] == ATA_DEV_UNKNOWN) 1569c6fd2807SJeff Garzik return 1; 1570c6fd2807SJeff Garzik return 0; 1571c6fd2807SJeff Garzik } 1572c6fd2807SJeff Garzik 1573c6fd2807SJeff Garzik static int ata_eh_reset(struct ata_port *ap, int classify, 1574c6fd2807SJeff Garzik ata_prereset_fn_t prereset, ata_reset_fn_t softreset, 1575c6fd2807SJeff Garzik ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) 1576c6fd2807SJeff Garzik { 1577c6fd2807SJeff Garzik struct ata_eh_context *ehc = &ap->eh_context; 1578c6fd2807SJeff Garzik unsigned int *classes = ehc->classes; 1579c6fd2807SJeff Garzik int tries = ATA_EH_RESET_TRIES; 1580c6fd2807SJeff Garzik int verbose = !(ehc->i.flags & ATA_EHI_QUIET); 1581c6fd2807SJeff Garzik unsigned int action; 1582c6fd2807SJeff Garzik ata_reset_fn_t reset; 1583c6fd2807SJeff Garzik int i, did_followup_srst, rc; 1584c6fd2807SJeff Garzik 1585c6fd2807SJeff Garzik /* about to reset */ 1586c6fd2807SJeff Garzik ata_eh_about_to_do(ap, NULL, ehc->i.action & ATA_EH_RESET_MASK); 1587c6fd2807SJeff Garzik 1588c6fd2807SJeff Garzik /* Determine which reset to use and record in ehc->i.action. 1589c6fd2807SJeff Garzik * prereset() may examine and modify it. 1590c6fd2807SJeff Garzik */ 1591c6fd2807SJeff Garzik action = ehc->i.action; 1592c6fd2807SJeff Garzik ehc->i.action &= ~ATA_EH_RESET_MASK; 1593c6fd2807SJeff Garzik if (softreset && (!hardreset || (!sata_set_spd_needed(ap) && 1594c6fd2807SJeff Garzik !(action & ATA_EH_HARDRESET)))) 1595c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_SOFTRESET; 1596c6fd2807SJeff Garzik else 1597c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_HARDRESET; 1598c6fd2807SJeff Garzik 1599c6fd2807SJeff Garzik if (prereset) { 1600c6fd2807SJeff Garzik rc = prereset(ap); 1601c6fd2807SJeff Garzik if (rc) { 1602c961922bSAlan Cox if (rc == -ENOENT) { 1603c961922bSAlan Cox ata_port_printk(ap, KERN_DEBUG, "port disabled. ignoring.\n"); 1604c961922bSAlan Cox ap->eh_context.i.action &= ~ATA_EH_RESET_MASK; 1605c961922bSAlan Cox } else 1606c6fd2807SJeff Garzik ata_port_printk(ap, KERN_ERR, 1607c6fd2807SJeff Garzik "prereset failed (errno=%d)\n", rc); 1608c6fd2807SJeff Garzik return rc; 1609c6fd2807SJeff Garzik } 1610c6fd2807SJeff Garzik } 1611c6fd2807SJeff Garzik 1612c6fd2807SJeff Garzik /* prereset() might have modified ehc->i.action */ 1613c6fd2807SJeff Garzik if (ehc->i.action & ATA_EH_HARDRESET) 1614c6fd2807SJeff Garzik reset = hardreset; 1615c6fd2807SJeff Garzik else if (ehc->i.action & ATA_EH_SOFTRESET) 1616c6fd2807SJeff Garzik reset = softreset; 1617c6fd2807SJeff Garzik else { 1618c6fd2807SJeff Garzik /* prereset told us not to reset, bang classes and return */ 1619c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_DEVICES; i++) 1620c6fd2807SJeff Garzik classes[i] = ATA_DEV_NONE; 1621c6fd2807SJeff Garzik return 0; 1622c6fd2807SJeff Garzik } 1623c6fd2807SJeff Garzik 1624c6fd2807SJeff Garzik /* did prereset() screw up? if so, fix up to avoid oopsing */ 1625c6fd2807SJeff Garzik if (!reset) { 1626c6fd2807SJeff Garzik ata_port_printk(ap, KERN_ERR, "BUG: prereset() requested " 1627c6fd2807SJeff Garzik "invalid reset type\n"); 1628c6fd2807SJeff Garzik if (softreset) 1629c6fd2807SJeff Garzik reset = softreset; 1630c6fd2807SJeff Garzik else 1631c6fd2807SJeff Garzik reset = hardreset; 1632c6fd2807SJeff Garzik } 1633c6fd2807SJeff Garzik 1634c6fd2807SJeff Garzik retry: 1635c6fd2807SJeff Garzik /* shut up during boot probing */ 1636c6fd2807SJeff Garzik if (verbose) 1637c6fd2807SJeff Garzik ata_port_printk(ap, KERN_INFO, "%s resetting port\n", 1638c6fd2807SJeff Garzik reset == softreset ? "soft" : "hard"); 1639c6fd2807SJeff Garzik 1640c6fd2807SJeff Garzik /* mark that this EH session started with reset */ 1641c6fd2807SJeff Garzik ehc->i.flags |= ATA_EHI_DID_RESET; 1642c6fd2807SJeff Garzik 1643c6fd2807SJeff Garzik rc = ata_do_reset(ap, reset, classes); 1644c6fd2807SJeff Garzik 1645c6fd2807SJeff Garzik did_followup_srst = 0; 1646c6fd2807SJeff Garzik if (reset == hardreset && 1647c6fd2807SJeff Garzik ata_eh_followup_srst_needed(rc, classify, classes)) { 1648c6fd2807SJeff Garzik /* okay, let's do follow-up softreset */ 1649c6fd2807SJeff Garzik did_followup_srst = 1; 1650c6fd2807SJeff Garzik reset = softreset; 1651c6fd2807SJeff Garzik 1652c6fd2807SJeff Garzik if (!reset) { 1653c6fd2807SJeff Garzik ata_port_printk(ap, KERN_ERR, 1654c6fd2807SJeff Garzik "follow-up softreset required " 1655c6fd2807SJeff Garzik "but no softreset avaliable\n"); 1656c6fd2807SJeff Garzik return -EINVAL; 1657c6fd2807SJeff Garzik } 1658c6fd2807SJeff Garzik 1659c6fd2807SJeff Garzik ata_eh_about_to_do(ap, NULL, ATA_EH_RESET_MASK); 1660c6fd2807SJeff Garzik rc = ata_do_reset(ap, reset, classes); 1661c6fd2807SJeff Garzik 1662c6fd2807SJeff Garzik if (rc == 0 && classify && 1663c6fd2807SJeff Garzik classes[0] == ATA_DEV_UNKNOWN) { 1664c6fd2807SJeff Garzik ata_port_printk(ap, KERN_ERR, 1665c6fd2807SJeff Garzik "classification failed\n"); 1666c6fd2807SJeff Garzik return -EINVAL; 1667c6fd2807SJeff Garzik } 1668c6fd2807SJeff Garzik } 1669c6fd2807SJeff Garzik 1670c6fd2807SJeff Garzik if (rc && --tries) { 1671c6fd2807SJeff Garzik const char *type; 1672c6fd2807SJeff Garzik 1673c6fd2807SJeff Garzik if (reset == softreset) { 1674c6fd2807SJeff Garzik if (did_followup_srst) 1675c6fd2807SJeff Garzik type = "follow-up soft"; 1676c6fd2807SJeff Garzik else 1677c6fd2807SJeff Garzik type = "soft"; 1678c6fd2807SJeff Garzik } else 1679c6fd2807SJeff Garzik type = "hard"; 1680c6fd2807SJeff Garzik 1681c6fd2807SJeff Garzik ata_port_printk(ap, KERN_WARNING, 1682c6fd2807SJeff Garzik "%sreset failed, retrying in 5 secs\n", type); 1683c6fd2807SJeff Garzik ssleep(5); 1684c6fd2807SJeff Garzik 1685c6fd2807SJeff Garzik if (reset == hardreset) 1686c6fd2807SJeff Garzik sata_down_spd_limit(ap); 1687c6fd2807SJeff Garzik if (hardreset) 1688c6fd2807SJeff Garzik reset = hardreset; 1689c6fd2807SJeff Garzik goto retry; 1690c6fd2807SJeff Garzik } 1691c6fd2807SJeff Garzik 1692c6fd2807SJeff Garzik if (rc == 0) { 1693c6fd2807SJeff Garzik /* After the reset, the device state is PIO 0 and the 1694c6fd2807SJeff Garzik * controller state is undefined. Record the mode. 1695c6fd2807SJeff Garzik */ 1696c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_DEVICES; i++) 1697c6fd2807SJeff Garzik ap->device[i].pio_mode = XFER_PIO_0; 1698c6fd2807SJeff Garzik 1699c6fd2807SJeff Garzik if (postreset) 1700c6fd2807SJeff Garzik postreset(ap, classes); 1701c6fd2807SJeff Garzik 1702c6fd2807SJeff Garzik /* reset successful, schedule revalidation */ 1703c6fd2807SJeff Garzik ata_eh_done(ap, NULL, ehc->i.action & ATA_EH_RESET_MASK); 1704c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_REVALIDATE; 1705c6fd2807SJeff Garzik } 1706c6fd2807SJeff Garzik 1707c6fd2807SJeff Garzik return rc; 1708c6fd2807SJeff Garzik } 1709c6fd2807SJeff Garzik 1710c6fd2807SJeff Garzik static int ata_eh_revalidate_and_attach(struct ata_port *ap, 1711c6fd2807SJeff Garzik struct ata_device **r_failed_dev) 1712c6fd2807SJeff Garzik { 1713c6fd2807SJeff Garzik struct ata_eh_context *ehc = &ap->eh_context; 1714c6fd2807SJeff Garzik struct ata_device *dev; 1715c6fd2807SJeff Garzik unsigned long flags; 1716c6fd2807SJeff Garzik int i, rc = 0; 1717c6fd2807SJeff Garzik 1718c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 1719c6fd2807SJeff Garzik 1720c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_DEVICES; i++) { 1721bff04647STejun Heo unsigned int action, readid_flags = 0; 1722c6fd2807SJeff Garzik 1723c6fd2807SJeff Garzik dev = &ap->device[i]; 1724c6fd2807SJeff Garzik action = ata_eh_dev_action(dev); 1725c6fd2807SJeff Garzik 1726bff04647STejun Heo if (ehc->i.flags & ATA_EHI_DID_RESET) 1727bff04647STejun Heo readid_flags |= ATA_READID_POSTRESET; 1728bff04647STejun Heo 1729c6fd2807SJeff Garzik if (action & ATA_EH_REVALIDATE && ata_dev_ready(dev)) { 1730c6fd2807SJeff Garzik if (ata_port_offline(ap)) { 1731c6fd2807SJeff Garzik rc = -EIO; 1732c6fd2807SJeff Garzik break; 1733c6fd2807SJeff Garzik } 1734c6fd2807SJeff Garzik 1735c6fd2807SJeff Garzik ata_eh_about_to_do(ap, dev, ATA_EH_REVALIDATE); 1736bff04647STejun Heo rc = ata_dev_revalidate(dev, readid_flags); 1737c6fd2807SJeff Garzik if (rc) 1738c6fd2807SJeff Garzik break; 1739c6fd2807SJeff Garzik 1740c6fd2807SJeff Garzik ata_eh_done(ap, dev, ATA_EH_REVALIDATE); 1741c6fd2807SJeff Garzik 1742baa1e78aSTejun Heo /* Configuration may have changed, reconfigure 1743baa1e78aSTejun Heo * transfer mode. 1744baa1e78aSTejun Heo */ 1745baa1e78aSTejun Heo ehc->i.flags |= ATA_EHI_SETMODE; 1746baa1e78aSTejun Heo 1747c6fd2807SJeff Garzik /* schedule the scsi_rescan_device() here */ 1748c6fd2807SJeff Garzik queue_work(ata_aux_wq, &(ap->scsi_rescan_task)); 1749c6fd2807SJeff Garzik } else if (dev->class == ATA_DEV_UNKNOWN && 1750c6fd2807SJeff Garzik ehc->tries[dev->devno] && 1751c6fd2807SJeff Garzik ata_class_enabled(ehc->classes[dev->devno])) { 1752c6fd2807SJeff Garzik dev->class = ehc->classes[dev->devno]; 1753c6fd2807SJeff Garzik 1754bff04647STejun Heo rc = ata_dev_read_id(dev, &dev->class, readid_flags, 1755bff04647STejun Heo dev->id); 1756efdaedc4STejun Heo if (rc == 0) { 1757efdaedc4STejun Heo ehc->i.flags |= ATA_EHI_PRINTINFO; 1758efdaedc4STejun Heo rc = ata_dev_configure(dev); 1759efdaedc4STejun Heo ehc->i.flags &= ~ATA_EHI_PRINTINFO; 176055a8e2c8STejun Heo } else if (rc == -ENOENT) { 176155a8e2c8STejun Heo /* IDENTIFY was issued to non-existent 176255a8e2c8STejun Heo * device. No need to reset. Just 176355a8e2c8STejun Heo * thaw and kill the device. 176455a8e2c8STejun Heo */ 176555a8e2c8STejun Heo ata_eh_thaw_port(ap); 176655a8e2c8STejun Heo dev->class = ATA_DEV_UNKNOWN; 176755a8e2c8STejun Heo rc = 0; 1768efdaedc4STejun Heo } 1769c6fd2807SJeff Garzik 1770c6fd2807SJeff Garzik if (rc) { 1771c6fd2807SJeff Garzik dev->class = ATA_DEV_UNKNOWN; 1772c6fd2807SJeff Garzik break; 1773c6fd2807SJeff Garzik } 1774c6fd2807SJeff Garzik 177555a8e2c8STejun Heo if (ata_dev_enabled(dev)) { 1776c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1777c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 1778c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1779baa1e78aSTejun Heo 178055a8e2c8STejun Heo /* new device discovered, configure xfermode */ 1781baa1e78aSTejun Heo ehc->i.flags |= ATA_EHI_SETMODE; 1782c6fd2807SJeff Garzik } 1783c6fd2807SJeff Garzik } 1784c6fd2807SJeff Garzik } 1785c6fd2807SJeff Garzik 1786c6fd2807SJeff Garzik if (rc) 1787c6fd2807SJeff Garzik *r_failed_dev = dev; 1788c6fd2807SJeff Garzik 1789c6fd2807SJeff Garzik DPRINTK("EXIT\n"); 1790c6fd2807SJeff Garzik return rc; 1791c6fd2807SJeff Garzik } 1792c6fd2807SJeff Garzik 1793c6fd2807SJeff Garzik /** 1794c6fd2807SJeff Garzik * ata_eh_suspend - handle suspend EH action 1795c6fd2807SJeff Garzik * @ap: target host port 1796c6fd2807SJeff Garzik * @r_failed_dev: result parameter to indicate failing device 1797c6fd2807SJeff Garzik * 1798c6fd2807SJeff Garzik * Handle suspend EH action. Disk devices are spinned down and 1799c6fd2807SJeff Garzik * other types of devices are just marked suspended. Once 1800c6fd2807SJeff Garzik * suspended, no EH action to the device is allowed until it is 1801c6fd2807SJeff Garzik * resumed. 1802c6fd2807SJeff Garzik * 1803c6fd2807SJeff Garzik * LOCKING: 1804c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1805c6fd2807SJeff Garzik * 1806c6fd2807SJeff Garzik * RETURNS: 1807c6fd2807SJeff Garzik * 0 on success, -errno otherwise 1808c6fd2807SJeff Garzik */ 1809c6fd2807SJeff Garzik static int ata_eh_suspend(struct ata_port *ap, struct ata_device **r_failed_dev) 1810c6fd2807SJeff Garzik { 1811c6fd2807SJeff Garzik struct ata_device *dev; 1812c6fd2807SJeff Garzik int i, rc = 0; 1813c6fd2807SJeff Garzik 1814c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 1815c6fd2807SJeff Garzik 1816c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_DEVICES; i++) { 1817c6fd2807SJeff Garzik unsigned long flags; 1818c6fd2807SJeff Garzik unsigned int action, err_mask; 1819c6fd2807SJeff Garzik 1820c6fd2807SJeff Garzik dev = &ap->device[i]; 1821c6fd2807SJeff Garzik action = ata_eh_dev_action(dev); 1822c6fd2807SJeff Garzik 1823c6fd2807SJeff Garzik if (!ata_dev_enabled(dev) || !(action & ATA_EH_SUSPEND)) 1824c6fd2807SJeff Garzik continue; 1825c6fd2807SJeff Garzik 1826c6fd2807SJeff Garzik WARN_ON(dev->flags & ATA_DFLAG_SUSPENDED); 1827c6fd2807SJeff Garzik 1828c6fd2807SJeff Garzik ata_eh_about_to_do(ap, dev, ATA_EH_SUSPEND); 1829c6fd2807SJeff Garzik 1830c6fd2807SJeff Garzik if (dev->class == ATA_DEV_ATA && !(action & ATA_EH_PM_FREEZE)) { 1831c6fd2807SJeff Garzik /* flush cache */ 1832c6fd2807SJeff Garzik rc = ata_flush_cache(dev); 1833c6fd2807SJeff Garzik if (rc) 1834c6fd2807SJeff Garzik break; 1835c6fd2807SJeff Garzik 1836c6fd2807SJeff Garzik /* spin down */ 1837c6fd2807SJeff Garzik err_mask = ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1); 1838c6fd2807SJeff Garzik if (err_mask) { 1839c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_ERR, "failed to " 1840c6fd2807SJeff Garzik "spin down (err_mask=0x%x)\n", 1841c6fd2807SJeff Garzik err_mask); 1842c6fd2807SJeff Garzik rc = -EIO; 1843c6fd2807SJeff Garzik break; 1844c6fd2807SJeff Garzik } 1845c6fd2807SJeff Garzik } 1846c6fd2807SJeff Garzik 1847c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1848c6fd2807SJeff Garzik dev->flags |= ATA_DFLAG_SUSPENDED; 1849c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1850c6fd2807SJeff Garzik 1851c6fd2807SJeff Garzik ata_eh_done(ap, dev, ATA_EH_SUSPEND); 1852c6fd2807SJeff Garzik } 1853c6fd2807SJeff Garzik 1854c6fd2807SJeff Garzik if (rc) 1855c6fd2807SJeff Garzik *r_failed_dev = dev; 1856c6fd2807SJeff Garzik 1857c6fd2807SJeff Garzik DPRINTK("EXIT\n"); 185803ee5b1cSTejun Heo return rc; 1859c6fd2807SJeff Garzik } 1860c6fd2807SJeff Garzik 1861c6fd2807SJeff Garzik /** 1862c6fd2807SJeff Garzik * ata_eh_prep_resume - prep for resume EH action 1863c6fd2807SJeff Garzik * @ap: target host port 1864c6fd2807SJeff Garzik * 1865c6fd2807SJeff Garzik * Clear SUSPENDED in preparation for scheduled resume actions. 1866c6fd2807SJeff Garzik * This allows other parts of EH to access the devices being 1867c6fd2807SJeff Garzik * resumed. 1868c6fd2807SJeff Garzik * 1869c6fd2807SJeff Garzik * LOCKING: 1870c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1871c6fd2807SJeff Garzik */ 1872c6fd2807SJeff Garzik static void ata_eh_prep_resume(struct ata_port *ap) 1873c6fd2807SJeff Garzik { 1874c6fd2807SJeff Garzik struct ata_device *dev; 1875c6fd2807SJeff Garzik unsigned long flags; 1876c6fd2807SJeff Garzik int i; 1877c6fd2807SJeff Garzik 1878c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 1879c6fd2807SJeff Garzik 1880c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_DEVICES; i++) { 1881c6fd2807SJeff Garzik unsigned int action; 1882c6fd2807SJeff Garzik 1883c6fd2807SJeff Garzik dev = &ap->device[i]; 1884c6fd2807SJeff Garzik action = ata_eh_dev_action(dev); 1885c6fd2807SJeff Garzik 1886c6fd2807SJeff Garzik if (!ata_dev_enabled(dev) || !(action & ATA_EH_RESUME)) 1887c6fd2807SJeff Garzik continue; 1888c6fd2807SJeff Garzik 1889c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 1890c6fd2807SJeff Garzik dev->flags &= ~ATA_DFLAG_SUSPENDED; 1891c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 1892c6fd2807SJeff Garzik } 1893c6fd2807SJeff Garzik 1894c6fd2807SJeff Garzik DPRINTK("EXIT\n"); 1895c6fd2807SJeff Garzik } 1896c6fd2807SJeff Garzik 1897c6fd2807SJeff Garzik /** 1898c6fd2807SJeff Garzik * ata_eh_resume - handle resume EH action 1899c6fd2807SJeff Garzik * @ap: target host port 1900c6fd2807SJeff Garzik * @r_failed_dev: result parameter to indicate failing device 1901c6fd2807SJeff Garzik * 1902c6fd2807SJeff Garzik * Handle resume EH action. Target devices are already reset and 1903c6fd2807SJeff Garzik * revalidated. Spinning up is the only operation left. 1904c6fd2807SJeff Garzik * 1905c6fd2807SJeff Garzik * LOCKING: 1906c6fd2807SJeff Garzik * Kernel thread context (may sleep). 1907c6fd2807SJeff Garzik * 1908c6fd2807SJeff Garzik * RETURNS: 1909c6fd2807SJeff Garzik * 0 on success, -errno otherwise 1910c6fd2807SJeff Garzik */ 1911c6fd2807SJeff Garzik static int ata_eh_resume(struct ata_port *ap, struct ata_device **r_failed_dev) 1912c6fd2807SJeff Garzik { 1913c6fd2807SJeff Garzik struct ata_device *dev; 1914c6fd2807SJeff Garzik int i, rc = 0; 1915c6fd2807SJeff Garzik 1916c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 1917c6fd2807SJeff Garzik 1918c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_DEVICES; i++) { 1919c6fd2807SJeff Garzik unsigned int action, err_mask; 1920c6fd2807SJeff Garzik 1921c6fd2807SJeff Garzik dev = &ap->device[i]; 1922c6fd2807SJeff Garzik action = ata_eh_dev_action(dev); 1923c6fd2807SJeff Garzik 1924c6fd2807SJeff Garzik if (!ata_dev_enabled(dev) || !(action & ATA_EH_RESUME)) 1925c6fd2807SJeff Garzik continue; 1926c6fd2807SJeff Garzik 1927c6fd2807SJeff Garzik ata_eh_about_to_do(ap, dev, ATA_EH_RESUME); 1928c6fd2807SJeff Garzik 1929c6fd2807SJeff Garzik if (dev->class == ATA_DEV_ATA && !(action & ATA_EH_PM_FREEZE)) { 1930c6fd2807SJeff Garzik err_mask = ata_do_simple_cmd(dev, 1931c6fd2807SJeff Garzik ATA_CMD_IDLEIMMEDIATE); 1932c6fd2807SJeff Garzik if (err_mask) { 1933c6fd2807SJeff Garzik ata_dev_printk(dev, KERN_ERR, "failed to " 1934c6fd2807SJeff Garzik "spin up (err_mask=0x%x)\n", 1935c6fd2807SJeff Garzik err_mask); 1936c6fd2807SJeff Garzik rc = -EIO; 1937c6fd2807SJeff Garzik break; 1938c6fd2807SJeff Garzik } 1939c6fd2807SJeff Garzik } 1940c6fd2807SJeff Garzik 1941c6fd2807SJeff Garzik ata_eh_done(ap, dev, ATA_EH_RESUME); 1942c6fd2807SJeff Garzik } 1943c6fd2807SJeff Garzik 1944c6fd2807SJeff Garzik if (rc) 1945c6fd2807SJeff Garzik *r_failed_dev = dev; 1946c6fd2807SJeff Garzik 1947c6fd2807SJeff Garzik DPRINTK("EXIT\n"); 1948c6fd2807SJeff Garzik return 0; 1949c6fd2807SJeff Garzik } 1950c6fd2807SJeff Garzik 1951c6fd2807SJeff Garzik static int ata_port_nr_enabled(struct ata_port *ap) 1952c6fd2807SJeff Garzik { 1953c6fd2807SJeff Garzik int i, cnt = 0; 1954c6fd2807SJeff Garzik 1955c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_DEVICES; i++) 1956c6fd2807SJeff Garzik if (ata_dev_enabled(&ap->device[i])) 1957c6fd2807SJeff Garzik cnt++; 1958c6fd2807SJeff Garzik return cnt; 1959c6fd2807SJeff Garzik } 1960c6fd2807SJeff Garzik 1961c6fd2807SJeff Garzik static int ata_port_nr_vacant(struct ata_port *ap) 1962c6fd2807SJeff Garzik { 1963c6fd2807SJeff Garzik int i, cnt = 0; 1964c6fd2807SJeff Garzik 1965c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_DEVICES; i++) 1966c6fd2807SJeff Garzik if (ap->device[i].class == ATA_DEV_UNKNOWN) 1967c6fd2807SJeff Garzik cnt++; 1968c6fd2807SJeff Garzik return cnt; 1969c6fd2807SJeff Garzik } 1970c6fd2807SJeff Garzik 1971c6fd2807SJeff Garzik static int ata_eh_skip_recovery(struct ata_port *ap) 1972c6fd2807SJeff Garzik { 1973c6fd2807SJeff Garzik struct ata_eh_context *ehc = &ap->eh_context; 1974c6fd2807SJeff Garzik int i; 1975c6fd2807SJeff Garzik 1976c6fd2807SJeff Garzik /* skip if all possible devices are suspended */ 1977c6fd2807SJeff Garzik for (i = 0; i < ata_port_max_devices(ap); i++) { 1978c6fd2807SJeff Garzik struct ata_device *dev = &ap->device[i]; 1979c6fd2807SJeff Garzik 1980c6fd2807SJeff Garzik if (!(dev->flags & ATA_DFLAG_SUSPENDED)) 1981c6fd2807SJeff Garzik break; 1982c6fd2807SJeff Garzik } 1983c6fd2807SJeff Garzik 1984c6fd2807SJeff Garzik if (i == ata_port_max_devices(ap)) 1985c6fd2807SJeff Garzik return 1; 1986c6fd2807SJeff Garzik 1987c6fd2807SJeff Garzik /* thaw frozen port, resume link and recover failed devices */ 1988c6fd2807SJeff Garzik if ((ap->pflags & ATA_PFLAG_FROZEN) || 1989c6fd2807SJeff Garzik (ehc->i.flags & ATA_EHI_RESUME_LINK) || ata_port_nr_enabled(ap)) 1990c6fd2807SJeff Garzik return 0; 1991c6fd2807SJeff Garzik 1992c6fd2807SJeff Garzik /* skip if class codes for all vacant slots are ATA_DEV_NONE */ 1993c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_DEVICES; i++) { 1994c6fd2807SJeff Garzik struct ata_device *dev = &ap->device[i]; 1995c6fd2807SJeff Garzik 1996c6fd2807SJeff Garzik if (dev->class == ATA_DEV_UNKNOWN && 1997c6fd2807SJeff Garzik ehc->classes[dev->devno] != ATA_DEV_NONE) 1998c6fd2807SJeff Garzik return 0; 1999c6fd2807SJeff Garzik } 2000c6fd2807SJeff Garzik 2001c6fd2807SJeff Garzik return 1; 2002c6fd2807SJeff Garzik } 2003c6fd2807SJeff Garzik 2004c6fd2807SJeff Garzik /** 2005c6fd2807SJeff Garzik * ata_eh_recover - recover host port after error 2006c6fd2807SJeff Garzik * @ap: host port to recover 2007c6fd2807SJeff Garzik * @prereset: prereset method (can be NULL) 2008c6fd2807SJeff Garzik * @softreset: softreset method (can be NULL) 2009c6fd2807SJeff Garzik * @hardreset: hardreset method (can be NULL) 2010c6fd2807SJeff Garzik * @postreset: postreset method (can be NULL) 2011c6fd2807SJeff Garzik * 2012c6fd2807SJeff Garzik * This is the alpha and omega, eum and yang, heart and soul of 2013c6fd2807SJeff Garzik * libata exception handling. On entry, actions required to 2014c6fd2807SJeff Garzik * recover the port and hotplug requests are recorded in 2015c6fd2807SJeff Garzik * eh_context. This function executes all the operations with 2016c6fd2807SJeff Garzik * appropriate retrials and fallbacks to resurrect failed 2017c6fd2807SJeff Garzik * devices, detach goners and greet newcomers. 2018c6fd2807SJeff Garzik * 2019c6fd2807SJeff Garzik * LOCKING: 2020c6fd2807SJeff Garzik * Kernel thread context (may sleep). 2021c6fd2807SJeff Garzik * 2022c6fd2807SJeff Garzik * RETURNS: 2023c6fd2807SJeff Garzik * 0 on success, -errno on failure. 2024c6fd2807SJeff Garzik */ 2025c6fd2807SJeff Garzik static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, 2026c6fd2807SJeff Garzik ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 2027c6fd2807SJeff Garzik ata_postreset_fn_t postreset) 2028c6fd2807SJeff Garzik { 2029c6fd2807SJeff Garzik struct ata_eh_context *ehc = &ap->eh_context; 2030c6fd2807SJeff Garzik struct ata_device *dev; 20314ae72a1eSTejun Heo int i, rc; 2032c6fd2807SJeff Garzik 2033c6fd2807SJeff Garzik DPRINTK("ENTER\n"); 2034c6fd2807SJeff Garzik 2035c6fd2807SJeff Garzik /* prep for recovery */ 2036c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_DEVICES; i++) { 2037c6fd2807SJeff Garzik dev = &ap->device[i]; 2038c6fd2807SJeff Garzik 2039c6fd2807SJeff Garzik ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 2040c6fd2807SJeff Garzik 204179a55b72STejun Heo /* collect port action mask recorded in dev actions */ 204279a55b72STejun Heo ehc->i.action |= ehc->i.dev_action[i] & ~ATA_EH_PERDEV_MASK; 204379a55b72STejun Heo ehc->i.dev_action[i] &= ATA_EH_PERDEV_MASK; 204479a55b72STejun Heo 2045c6fd2807SJeff Garzik /* process hotplug request */ 2046c6fd2807SJeff Garzik if (dev->flags & ATA_DFLAG_DETACH) 2047c6fd2807SJeff Garzik ata_eh_detach_dev(dev); 2048c6fd2807SJeff Garzik 2049c6fd2807SJeff Garzik if (!ata_dev_enabled(dev) && 2050c6fd2807SJeff Garzik ((ehc->i.probe_mask & (1 << dev->devno)) && 2051c6fd2807SJeff Garzik !(ehc->did_probe_mask & (1 << dev->devno)))) { 2052c6fd2807SJeff Garzik ata_eh_detach_dev(dev); 2053c6fd2807SJeff Garzik ata_dev_init(dev); 2054c6fd2807SJeff Garzik ehc->did_probe_mask |= (1 << dev->devno); 2055c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_SOFTRESET; 2056c6fd2807SJeff Garzik } 2057c6fd2807SJeff Garzik } 2058c6fd2807SJeff Garzik 2059c6fd2807SJeff Garzik retry: 2060c6fd2807SJeff Garzik rc = 0; 2061c6fd2807SJeff Garzik 2062c6fd2807SJeff Garzik /* if UNLOADING, finish immediately */ 2063c6fd2807SJeff Garzik if (ap->pflags & ATA_PFLAG_UNLOADING) 2064c6fd2807SJeff Garzik goto out; 2065c6fd2807SJeff Garzik 2066c6fd2807SJeff Garzik /* prep for resume */ 2067c6fd2807SJeff Garzik ata_eh_prep_resume(ap); 2068c6fd2807SJeff Garzik 2069c6fd2807SJeff Garzik /* skip EH if possible. */ 2070c6fd2807SJeff Garzik if (ata_eh_skip_recovery(ap)) 2071c6fd2807SJeff Garzik ehc->i.action = 0; 2072c6fd2807SJeff Garzik 2073c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_DEVICES; i++) 2074c6fd2807SJeff Garzik ehc->classes[i] = ATA_DEV_UNKNOWN; 2075c6fd2807SJeff Garzik 2076c6fd2807SJeff Garzik /* reset */ 2077c6fd2807SJeff Garzik if (ehc->i.action & ATA_EH_RESET_MASK) { 2078c6fd2807SJeff Garzik ata_eh_freeze_port(ap); 2079c6fd2807SJeff Garzik 2080c6fd2807SJeff Garzik rc = ata_eh_reset(ap, ata_port_nr_vacant(ap), prereset, 2081c6fd2807SJeff Garzik softreset, hardreset, postreset); 2082c6fd2807SJeff Garzik if (rc) { 2083c6fd2807SJeff Garzik ata_port_printk(ap, KERN_ERR, 2084c6fd2807SJeff Garzik "reset failed, giving up\n"); 2085c6fd2807SJeff Garzik goto out; 2086c6fd2807SJeff Garzik } 2087c6fd2807SJeff Garzik 2088c6fd2807SJeff Garzik ata_eh_thaw_port(ap); 2089c6fd2807SJeff Garzik } 2090c6fd2807SJeff Garzik 2091c6fd2807SJeff Garzik /* revalidate existing devices and attach new ones */ 2092c6fd2807SJeff Garzik rc = ata_eh_revalidate_and_attach(ap, &dev); 2093c6fd2807SJeff Garzik if (rc) 2094c6fd2807SJeff Garzik goto dev_fail; 2095c6fd2807SJeff Garzik 2096c6fd2807SJeff Garzik /* resume devices */ 2097c6fd2807SJeff Garzik rc = ata_eh_resume(ap, &dev); 2098c6fd2807SJeff Garzik if (rc) 2099c6fd2807SJeff Garzik goto dev_fail; 2100c6fd2807SJeff Garzik 2101baa1e78aSTejun Heo /* configure transfer mode if necessary */ 2102baa1e78aSTejun Heo if (ehc->i.flags & ATA_EHI_SETMODE) { 2103c6fd2807SJeff Garzik rc = ata_set_mode(ap, &dev); 21044ae72a1eSTejun Heo if (rc) 2105c6fd2807SJeff Garzik goto dev_fail; 2106baa1e78aSTejun Heo ehc->i.flags &= ~ATA_EHI_SETMODE; 2107c6fd2807SJeff Garzik } 2108c6fd2807SJeff Garzik 2109c6fd2807SJeff Garzik /* suspend devices */ 2110c6fd2807SJeff Garzik rc = ata_eh_suspend(ap, &dev); 2111c6fd2807SJeff Garzik if (rc) 2112c6fd2807SJeff Garzik goto dev_fail; 2113c6fd2807SJeff Garzik 2114c6fd2807SJeff Garzik goto out; 2115c6fd2807SJeff Garzik 2116c6fd2807SJeff Garzik dev_fail: 21174ae72a1eSTejun Heo ehc->tries[dev->devno]--; 21184ae72a1eSTejun Heo 2119c6fd2807SJeff Garzik switch (rc) { 2120c6fd2807SJeff Garzik case -EINVAL: 21214ae72a1eSTejun Heo /* eeek, something went very wrong, give up */ 2122c6fd2807SJeff Garzik ehc->tries[dev->devno] = 0; 2123c6fd2807SJeff Garzik break; 21244ae72a1eSTejun Heo 21254ae72a1eSTejun Heo case -ENODEV: 21264ae72a1eSTejun Heo /* device missing or wrong IDENTIFY data, schedule probing */ 21274ae72a1eSTejun Heo ehc->i.probe_mask |= (1 << dev->devno); 21284ae72a1eSTejun Heo /* give it just one more chance */ 21294ae72a1eSTejun Heo ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1); 2130c6fd2807SJeff Garzik case -EIO: 21314ae72a1eSTejun Heo if (ehc->tries[dev->devno] == 1) { 21324ae72a1eSTejun Heo /* This is the last chance, better to slow 21334ae72a1eSTejun Heo * down than lose it. 21344ae72a1eSTejun Heo */ 2135c6fd2807SJeff Garzik sata_down_spd_limit(ap); 21364ae72a1eSTejun Heo ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); 21374ae72a1eSTejun Heo } 2138c6fd2807SJeff Garzik } 2139c6fd2807SJeff Garzik 2140c6fd2807SJeff Garzik if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) { 2141c6fd2807SJeff Garzik /* disable device if it has used up all its chances */ 2142c6fd2807SJeff Garzik ata_dev_disable(dev); 2143c6fd2807SJeff Garzik 2144c6fd2807SJeff Garzik /* detach if offline */ 2145c6fd2807SJeff Garzik if (ata_port_offline(ap)) 2146c6fd2807SJeff Garzik ata_eh_detach_dev(dev); 2147c6fd2807SJeff Garzik 2148c6fd2807SJeff Garzik /* probe if requested */ 2149c6fd2807SJeff Garzik if ((ehc->i.probe_mask & (1 << dev->devno)) && 2150c6fd2807SJeff Garzik !(ehc->did_probe_mask & (1 << dev->devno))) { 2151c6fd2807SJeff Garzik ata_eh_detach_dev(dev); 2152c6fd2807SJeff Garzik ata_dev_init(dev); 2153c6fd2807SJeff Garzik 2154c6fd2807SJeff Garzik ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 2155c6fd2807SJeff Garzik ehc->did_probe_mask |= (1 << dev->devno); 2156c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_SOFTRESET; 2157c6fd2807SJeff Garzik } 2158c6fd2807SJeff Garzik } else { 2159c6fd2807SJeff Garzik /* soft didn't work? be haaaaard */ 2160c6fd2807SJeff Garzik if (ehc->i.flags & ATA_EHI_DID_RESET) 2161c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_HARDRESET; 2162c6fd2807SJeff Garzik else 2163c6fd2807SJeff Garzik ehc->i.action |= ATA_EH_SOFTRESET; 2164c6fd2807SJeff Garzik } 2165c6fd2807SJeff Garzik 2166c6fd2807SJeff Garzik if (ata_port_nr_enabled(ap)) { 2167c6fd2807SJeff Garzik ata_port_printk(ap, KERN_WARNING, "failed to recover some " 2168c6fd2807SJeff Garzik "devices, retrying in 5 secs\n"); 2169c6fd2807SJeff Garzik ssleep(5); 2170c6fd2807SJeff Garzik } else { 2171c6fd2807SJeff Garzik /* no device left, repeat fast */ 2172c6fd2807SJeff Garzik msleep(500); 2173c6fd2807SJeff Garzik } 2174c6fd2807SJeff Garzik 2175c6fd2807SJeff Garzik goto retry; 2176c6fd2807SJeff Garzik 2177c6fd2807SJeff Garzik out: 2178c6fd2807SJeff Garzik if (rc) { 2179c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_DEVICES; i++) 2180c6fd2807SJeff Garzik ata_dev_disable(&ap->device[i]); 2181c6fd2807SJeff Garzik } 2182c6fd2807SJeff Garzik 2183c6fd2807SJeff Garzik DPRINTK("EXIT, rc=%d\n", rc); 2184c6fd2807SJeff Garzik return rc; 2185c6fd2807SJeff Garzik } 2186c6fd2807SJeff Garzik 2187c6fd2807SJeff Garzik /** 2188c6fd2807SJeff Garzik * ata_eh_finish - finish up EH 2189c6fd2807SJeff Garzik * @ap: host port to finish EH for 2190c6fd2807SJeff Garzik * 2191c6fd2807SJeff Garzik * Recovery is complete. Clean up EH states and retry or finish 2192c6fd2807SJeff Garzik * failed qcs. 2193c6fd2807SJeff Garzik * 2194c6fd2807SJeff Garzik * LOCKING: 2195c6fd2807SJeff Garzik * None. 2196c6fd2807SJeff Garzik */ 2197c6fd2807SJeff Garzik static void ata_eh_finish(struct ata_port *ap) 2198c6fd2807SJeff Garzik { 2199c6fd2807SJeff Garzik int tag; 2200c6fd2807SJeff Garzik 2201c6fd2807SJeff Garzik /* retry or finish qcs */ 2202c6fd2807SJeff Garzik for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2203c6fd2807SJeff Garzik struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 2204c6fd2807SJeff Garzik 2205c6fd2807SJeff Garzik if (!(qc->flags & ATA_QCFLAG_FAILED)) 2206c6fd2807SJeff Garzik continue; 2207c6fd2807SJeff Garzik 2208c6fd2807SJeff Garzik if (qc->err_mask) { 2209c6fd2807SJeff Garzik /* FIXME: Once EH migration is complete, 2210c6fd2807SJeff Garzik * generate sense data in this function, 2211c6fd2807SJeff Garzik * considering both err_mask and tf. 2212c6fd2807SJeff Garzik */ 2213c6fd2807SJeff Garzik if (qc->err_mask & AC_ERR_INVALID) 2214c6fd2807SJeff Garzik ata_eh_qc_complete(qc); 2215c6fd2807SJeff Garzik else 2216c6fd2807SJeff Garzik ata_eh_qc_retry(qc); 2217c6fd2807SJeff Garzik } else { 2218c6fd2807SJeff Garzik if (qc->flags & ATA_QCFLAG_SENSE_VALID) { 2219c6fd2807SJeff Garzik ata_eh_qc_complete(qc); 2220c6fd2807SJeff Garzik } else { 2221c6fd2807SJeff Garzik /* feed zero TF to sense generation */ 2222c6fd2807SJeff Garzik memset(&qc->result_tf, 0, sizeof(qc->result_tf)); 2223c6fd2807SJeff Garzik ata_eh_qc_retry(qc); 2224c6fd2807SJeff Garzik } 2225c6fd2807SJeff Garzik } 2226c6fd2807SJeff Garzik } 2227c6fd2807SJeff Garzik } 2228c6fd2807SJeff Garzik 2229c6fd2807SJeff Garzik /** 2230c6fd2807SJeff Garzik * ata_do_eh - do standard error handling 2231c6fd2807SJeff Garzik * @ap: host port to handle error for 2232c6fd2807SJeff Garzik * @prereset: prereset method (can be NULL) 2233c6fd2807SJeff Garzik * @softreset: softreset method (can be NULL) 2234c6fd2807SJeff Garzik * @hardreset: hardreset method (can be NULL) 2235c6fd2807SJeff Garzik * @postreset: postreset method (can be NULL) 2236c6fd2807SJeff Garzik * 2237c6fd2807SJeff Garzik * Perform standard error handling sequence. 2238c6fd2807SJeff Garzik * 2239c6fd2807SJeff Garzik * LOCKING: 2240c6fd2807SJeff Garzik * Kernel thread context (may sleep). 2241c6fd2807SJeff Garzik */ 2242c6fd2807SJeff Garzik void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, 2243c6fd2807SJeff Garzik ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 2244c6fd2807SJeff Garzik ata_postreset_fn_t postreset) 2245c6fd2807SJeff Garzik { 2246c6fd2807SJeff Garzik ata_eh_autopsy(ap); 2247c6fd2807SJeff Garzik ata_eh_report(ap); 2248c6fd2807SJeff Garzik ata_eh_recover(ap, prereset, softreset, hardreset, postreset); 2249c6fd2807SJeff Garzik ata_eh_finish(ap); 2250c6fd2807SJeff Garzik } 2251c6fd2807SJeff Garzik 2252c6fd2807SJeff Garzik /** 2253c6fd2807SJeff Garzik * ata_eh_handle_port_suspend - perform port suspend operation 2254c6fd2807SJeff Garzik * @ap: port to suspend 2255c6fd2807SJeff Garzik * 2256c6fd2807SJeff Garzik * Suspend @ap. 2257c6fd2807SJeff Garzik * 2258c6fd2807SJeff Garzik * LOCKING: 2259c6fd2807SJeff Garzik * Kernel thread context (may sleep). 2260c6fd2807SJeff Garzik */ 2261c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap) 2262c6fd2807SJeff Garzik { 2263c6fd2807SJeff Garzik unsigned long flags; 2264c6fd2807SJeff Garzik int rc = 0; 2265c6fd2807SJeff Garzik 2266c6fd2807SJeff Garzik /* are we suspending? */ 2267c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 2268c6fd2807SJeff Garzik if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || 2269c6fd2807SJeff Garzik ap->pm_mesg.event == PM_EVENT_ON) { 2270c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 2271c6fd2807SJeff Garzik return; 2272c6fd2807SJeff Garzik } 2273c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 2274c6fd2807SJeff Garzik 2275c6fd2807SJeff Garzik WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED); 2276c6fd2807SJeff Garzik 2277c6fd2807SJeff Garzik /* suspend */ 2278c6fd2807SJeff Garzik ata_eh_freeze_port(ap); 2279c6fd2807SJeff Garzik 2280c6fd2807SJeff Garzik if (ap->ops->port_suspend) 2281c6fd2807SJeff Garzik rc = ap->ops->port_suspend(ap, ap->pm_mesg); 2282c6fd2807SJeff Garzik 2283c6fd2807SJeff Garzik /* report result */ 2284c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 2285c6fd2807SJeff Garzik 2286c6fd2807SJeff Garzik ap->pflags &= ~ATA_PFLAG_PM_PENDING; 2287c6fd2807SJeff Garzik if (rc == 0) 2288c6fd2807SJeff Garzik ap->pflags |= ATA_PFLAG_SUSPENDED; 2289c6fd2807SJeff Garzik else 2290c6fd2807SJeff Garzik ata_port_schedule_eh(ap); 2291c6fd2807SJeff Garzik 2292c6fd2807SJeff Garzik if (ap->pm_result) { 2293c6fd2807SJeff Garzik *ap->pm_result = rc; 2294c6fd2807SJeff Garzik ap->pm_result = NULL; 2295c6fd2807SJeff Garzik } 2296c6fd2807SJeff Garzik 2297c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 2298c6fd2807SJeff Garzik 2299c6fd2807SJeff Garzik return; 2300c6fd2807SJeff Garzik } 2301c6fd2807SJeff Garzik 2302c6fd2807SJeff Garzik /** 2303c6fd2807SJeff Garzik * ata_eh_handle_port_resume - perform port resume operation 2304c6fd2807SJeff Garzik * @ap: port to resume 2305c6fd2807SJeff Garzik * 2306c6fd2807SJeff Garzik * Resume @ap. 2307c6fd2807SJeff Garzik * 2308c6fd2807SJeff Garzik * This function also waits upto one second until all devices 2309c6fd2807SJeff Garzik * hanging off this port requests resume EH action. This is to 2310c6fd2807SJeff Garzik * prevent invoking EH and thus reset multiple times on resume. 2311c6fd2807SJeff Garzik * 2312c6fd2807SJeff Garzik * On DPM resume, where some of devices might not be resumed 2313c6fd2807SJeff Garzik * together, this may delay port resume upto one second, but such 2314c6fd2807SJeff Garzik * DPM resumes are rare and 1 sec delay isn't too bad. 2315c6fd2807SJeff Garzik * 2316c6fd2807SJeff Garzik * LOCKING: 2317c6fd2807SJeff Garzik * Kernel thread context (may sleep). 2318c6fd2807SJeff Garzik */ 2319c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap) 2320c6fd2807SJeff Garzik { 2321c6fd2807SJeff Garzik unsigned long timeout; 2322c6fd2807SJeff Garzik unsigned long flags; 2323c6fd2807SJeff Garzik int i, rc = 0; 2324c6fd2807SJeff Garzik 2325c6fd2807SJeff Garzik /* are we resuming? */ 2326c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 2327c6fd2807SJeff Garzik if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || 2328c6fd2807SJeff Garzik ap->pm_mesg.event != PM_EVENT_ON) { 2329c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 2330c6fd2807SJeff Garzik return; 2331c6fd2807SJeff Garzik } 2332c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 2333c6fd2807SJeff Garzik 2334c6fd2807SJeff Garzik /* spurious? */ 2335c6fd2807SJeff Garzik if (!(ap->pflags & ATA_PFLAG_SUSPENDED)) 2336c6fd2807SJeff Garzik goto done; 2337c6fd2807SJeff Garzik 2338c6fd2807SJeff Garzik if (ap->ops->port_resume) 2339c6fd2807SJeff Garzik rc = ap->ops->port_resume(ap); 2340c6fd2807SJeff Garzik 2341c6fd2807SJeff Garzik /* give devices time to request EH */ 2342c6fd2807SJeff Garzik timeout = jiffies + HZ; /* 1s max */ 2343c6fd2807SJeff Garzik while (1) { 2344c6fd2807SJeff Garzik for (i = 0; i < ATA_MAX_DEVICES; i++) { 2345c6fd2807SJeff Garzik struct ata_device *dev = &ap->device[i]; 2346c6fd2807SJeff Garzik unsigned int action = ata_eh_dev_action(dev); 2347c6fd2807SJeff Garzik 2348c6fd2807SJeff Garzik if ((dev->flags & ATA_DFLAG_SUSPENDED) && 2349c6fd2807SJeff Garzik !(action & ATA_EH_RESUME)) 2350c6fd2807SJeff Garzik break; 2351c6fd2807SJeff Garzik } 2352c6fd2807SJeff Garzik 2353c6fd2807SJeff Garzik if (i == ATA_MAX_DEVICES || time_after(jiffies, timeout)) 2354c6fd2807SJeff Garzik break; 2355c6fd2807SJeff Garzik msleep(10); 2356c6fd2807SJeff Garzik } 2357c6fd2807SJeff Garzik 2358c6fd2807SJeff Garzik done: 2359c6fd2807SJeff Garzik spin_lock_irqsave(ap->lock, flags); 2360c6fd2807SJeff Garzik ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED); 2361c6fd2807SJeff Garzik if (ap->pm_result) { 2362c6fd2807SJeff Garzik *ap->pm_result = rc; 2363c6fd2807SJeff Garzik ap->pm_result = NULL; 2364c6fd2807SJeff Garzik } 2365c6fd2807SJeff Garzik spin_unlock_irqrestore(ap->lock, flags); 2366c6fd2807SJeff Garzik } 2367