Lines Matching +full:- +full:- +full:retry +full:- +full:all +full:- +full:errors
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * libata-eh.c - libata error handling
8 * as Documentation/driver-api/libata.rst
11 * http://www.sata-io.org/
59 /* Waiting in ->prereset can never be reliable. It's
76 * hardreset. All others are hardreset if available. In most cases
97 15000, /* Some drives are slow to read log pages when waking-up */
125 * On the retry after a command timed out, the next timeout value from
129 * ehc->cmd_timeout_idx keeps track of which timeout to use per
172 ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len, in __ata_ehi_pushv_desc()
173 ATA_EH_DESC_LEN - ehi->desc_len, in __ata_ehi_pushv_desc()
178 * __ata_ehi_push_desc - push error description without adding separator
182 * Format string according to @fmt and append it to @ehi->desc.
198 * ata_ehi_push_desc - push error description with separator
202 * Format string according to @fmt and append it to @ehi->desc.
203 * If @ehi->desc is not empty, ", " is added in-between.
212 if (ehi->desc_len) in ata_ehi_push_desc()
222 * ata_ehi_clear_desc - clean error description
225 * Clear @ehi->desc.
232 ehi->desc[0] = '\0'; in ata_ehi_clear_desc()
233 ehi->desc_len = 0; in ata_ehi_clear_desc()
238 * ata_port_desc - append port description
244 * in-between. This function is to be used while initializing
254 WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING)); in ata_port_desc()
256 if (ap->link.eh_info.desc_len) in ata_port_desc()
257 __ata_ehi_push_desc(&ap->link.eh_info, " "); in ata_port_desc()
260 __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args); in ata_port_desc()
267 * ata_port_pbar_desc - append PCI BAR description
284 struct pci_dev *pdev = to_pci_dev(ap->host->dev); in ata_port_pbar_desc()
317 return -1; in ata_lookup_timeout_table()
321 * ata_internal_cmd_timeout - determine timeout for an internal command
335 struct ata_eh_context *ehc = &dev->link->eh_context; in ata_internal_cmd_timeout()
342 idx = ehc->cmd_timeout_idx[dev->devno][ent]; in ata_internal_cmd_timeout()
347 * ata_internal_cmd_timed_out - notification for internal command timeout
360 struct ata_eh_context *ehc = &dev->link->eh_context; in ata_internal_cmd_timed_out()
367 idx = ehc->cmd_timeout_idx[dev->devno][ent]; in ata_internal_cmd_timed_out()
369 ehc->cmd_timeout_idx[dev->devno][ent]++; in ata_internal_cmd_timed_out()
379 ering->cursor++; in ata_ering_record()
380 ering->cursor %= ATA_ERING_SIZE; in ata_ering_record()
382 ent = &ering->ring[ering->cursor]; in ata_ering_record()
383 ent->eflags = eflags; in ata_ering_record()
384 ent->err_mask = err_mask; in ata_ering_record()
385 ent->timestamp = get_jiffies_64(); in ata_ering_record()
390 struct ata_ering_entry *ent = &ering->ring[ering->cursor]; in ata_ering_top()
392 if (ent->err_mask) in ata_ering_top()
404 idx = ering->cursor; in ata_ering_map()
406 ent = &ering->ring[idx]; in ata_ering_map()
407 if (!ent->err_mask) in ata_ering_map()
412 idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE; in ata_ering_map()
413 } while (idx != ering->cursor); in ata_ering_map()
420 ent->eflags |= ATA_EFLAG_OLD_ER; in ata_ering_clear_cb()
431 struct ata_eh_context *ehc = &dev->link->eh_context; in ata_eh_dev_action()
433 return ehc->i.action | ehc->i.dev_action[dev->devno]; in ata_eh_dev_action()
442 ehi->action &= ~action; in ata_eh_clear_action()
443 ata_for_each_dev(tdev, link, ALL) in ata_eh_clear_action()
444 ehi->dev_action[tdev->devno] &= ~action; in ata_eh_clear_action()
446 /* doesn't make sense for port-wide EH actions */ in ata_eh_clear_action()
449 /* break ehi->action into ehi->dev_action */ in ata_eh_clear_action()
450 if (ehi->action & action) { in ata_eh_clear_action()
451 ata_for_each_dev(tdev, link, ALL) in ata_eh_clear_action()
452 ehi->dev_action[tdev->devno] |= in ata_eh_clear_action()
453 ehi->action & action; in ata_eh_clear_action()
454 ehi->action &= ~action; in ata_eh_clear_action()
457 /* turn off the specified per-dev action */ in ata_eh_clear_action()
458 ehi->dev_action[dev->devno] &= ~action; in ata_eh_clear_action()
463 * ata_eh_acquire - acquire EH ownership
475 mutex_lock(&ap->host->eh_mutex); in ata_eh_acquire()
476 WARN_ON_ONCE(ap->host->eh_owner); in ata_eh_acquire()
477 ap->host->eh_owner = current; in ata_eh_acquire()
481 * ata_eh_release - release EH ownership
492 WARN_ON_ONCE(ap->host->eh_owner != current); in ata_eh_release()
493 ap->host->eh_owner = NULL; in ata_eh_release()
494 mutex_unlock(&ap->host->eh_mutex); in ata_eh_release()
504 * Unless we are restarting, transition all enabled devices to in ata_eh_unload()
519 sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0); in ata_eh_unload()
520 ata_for_each_dev(dev, link, ALL) in ata_eh_unload()
525 spin_lock_irqsave(ap->lock, flags); in ata_eh_unload()
528 ap->pflags &= ~ATA_PFLAG_EH_PENDING; /* clear pending from freeze */ in ata_eh_unload()
529 ap->pflags |= ATA_PFLAG_UNLOADED; in ata_eh_unload()
531 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_unload()
535 * ata_scsi_error - SCSI layer error handler callback
538 * Handles SCSI-layer-thrown error events.
552 spin_lock_irqsave(host->host_lock, flags); in ata_scsi_error()
553 list_splice_init(&host->eh_cmd_q, &eh_work_q); in ata_scsi_error()
554 spin_unlock_irqrestore(host->host_lock, flags); in ata_scsi_error()
562 /* finish or retry handled scmd's and clean up */ in ata_scsi_error()
568 * ata_scsi_cmd_error_handler - error callback for a list of commands
574 * ap->eh_done_q. This function is the first part of the libata error
591 * For EH, all qcs are finished in one of three ways - in ata_scsi_cmd_error_handler()
603 spin_lock_irqsave(ap->lock, flags); in ata_scsi_cmd_error_handler()
606 * This must occur under the ap->lock as we don't want in ata_scsi_cmd_error_handler()
610 * non-notified command and completes much like an IRQ handler. in ata_scsi_cmd_error_handler()
615 if (ap->ops->lost_interrupt) in ata_scsi_cmd_error_handler()
616 ap->ops->lost_interrupt(ap); in ata_scsi_cmd_error_handler()
622 * If the scmd was added to EH, via ata_qc_schedule_eh() -> in ata_scsi_cmd_error_handler()
623 * scsi_timeout() -> scsi_eh_scmd_add(), scsi_timeout() will in ata_scsi_cmd_error_handler()
630 if (qc->flags & ATA_QCFLAG_ACTIVE && in ata_scsi_cmd_error_handler()
631 qc->scsicmd == scmd) in ata_scsi_cmd_error_handler()
637 if (!(qc->flags & ATA_QCFLAG_EH)) { in ata_scsi_cmd_error_handler()
640 qc->err_mask |= AC_ERR_TIMEOUT; in ata_scsi_cmd_error_handler()
641 qc->flags |= ATA_QCFLAG_EH; in ata_scsi_cmd_error_handler()
649 scmd->retries = scmd->allowed; in ata_scsi_cmd_error_handler()
650 scsi_eh_finish_cmd(scmd, &ap->eh_done_q); in ata_scsi_cmd_error_handler()
665 ap->eh_tries = ATA_EH_MAX_TRIES; in ata_scsi_cmd_error_handler()
667 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_cmd_error_handler()
672 * ata_scsi_port_error_handler - recover the port after the commands
676 * Handle the recovery of the port @ap after all the commands
688 del_timer_sync(&ap->fastdrain_timer); in ata_scsi_port_error_handler()
694 spin_lock_irqsave(ap->lock, flags); in ata_scsi_port_error_handler()
697 struct ata_eh_context *ehc = &link->eh_context; in ata_scsi_port_error_handler()
700 memset(&link->eh_context, 0, sizeof(link->eh_context)); in ata_scsi_port_error_handler()
701 link->eh_context.i = link->eh_info; in ata_scsi_port_error_handler()
702 memset(&link->eh_info, 0, sizeof(link->eh_info)); in ata_scsi_port_error_handler()
705 int devno = dev->devno; in ata_scsi_port_error_handler()
707 ehc->saved_xfer_mode[devno] = dev->xfer_mode; in ata_scsi_port_error_handler()
709 ehc->saved_ncq_enabled |= 1 << devno; in ata_scsi_port_error_handler()
712 if (ap->pflags & ATA_PFLAG_RESUMING) { in ata_scsi_port_error_handler()
713 dev->flags |= ATA_DFLAG_RESUMING; in ata_scsi_port_error_handler()
714 ehc->i.dev_action[devno] |= ATA_EH_SET_ACTIVE; in ata_scsi_port_error_handler()
719 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS; in ata_scsi_port_error_handler()
720 ap->pflags &= ~ATA_PFLAG_EH_PENDING; in ata_scsi_port_error_handler()
721 ap->excl_link = NULL; /* don't maintain exclusion over EH */ in ata_scsi_port_error_handler()
723 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_port_error_handler()
726 if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED))) in ata_scsi_port_error_handler()
727 ap->ops->error_handler(ap); in ata_scsi_port_error_handler()
730 if ((ap->pflags & ATA_PFLAG_UNLOADING) && in ata_scsi_port_error_handler()
731 !(ap->pflags & ATA_PFLAG_UNLOADED)) in ata_scsi_port_error_handler()
740 * Exception might have happened after ->error_handler recovered the in ata_scsi_port_error_handler()
743 spin_lock_irqsave(ap->lock, flags); in ata_scsi_port_error_handler()
745 if (ap->pflags & ATA_PFLAG_EH_PENDING) { in ata_scsi_port_error_handler()
746 if (--ap->eh_tries) { in ata_scsi_port_error_handler()
747 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_port_error_handler()
753 ap->pflags &= ~ATA_PFLAG_EH_PENDING; in ata_scsi_port_error_handler()
758 memset(&link->eh_info, 0, sizeof(link->eh_info)); in ata_scsi_port_error_handler()
761 * end eh (clear host_eh_scheduled) while holding ap->lock such that if in ata_scsi_port_error_handler()
763 * midlayer will re-initiate EH. in ata_scsi_port_error_handler()
765 ap->ops->end_eh(ap); in ata_scsi_port_error_handler()
767 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_port_error_handler()
770 scsi_eh_flush_done_q(&ap->eh_done_q); in ata_scsi_port_error_handler()
773 spin_lock_irqsave(ap->lock, flags); in ata_scsi_port_error_handler()
775 ap->pflags &= ~ATA_PFLAG_RESUMING; in ata_scsi_port_error_handler()
777 if (ap->pflags & ATA_PFLAG_LOADING) in ata_scsi_port_error_handler()
778 ap->pflags &= ~ATA_PFLAG_LOADING; in ata_scsi_port_error_handler()
779 else if ((ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) && in ata_scsi_port_error_handler()
780 !(ap->flags & ATA_FLAG_SAS_HOST)) in ata_scsi_port_error_handler()
781 schedule_delayed_work(&ap->hotplug_task, 0); in ata_scsi_port_error_handler()
783 if (ap->pflags & ATA_PFLAG_RECOVERED) in ata_scsi_port_error_handler()
786 ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED); in ata_scsi_port_error_handler()
789 ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS; in ata_scsi_port_error_handler()
790 wake_up_all(&ap->eh_wait_q); in ata_scsi_port_error_handler()
792 spin_unlock_irqrestore(ap->lock, flags); in ata_scsi_port_error_handler()
797 * ata_port_wait_eh - Wait for the currently pending EH to complete
810 retry: in ata_port_wait_eh()
811 spin_lock_irqsave(ap->lock, flags); in ata_port_wait_eh()
813 while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) { in ata_port_wait_eh()
814 prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE); in ata_port_wait_eh()
815 spin_unlock_irqrestore(ap->lock, flags); in ata_port_wait_eh()
817 spin_lock_irqsave(ap->lock, flags); in ata_port_wait_eh()
819 finish_wait(&ap->eh_wait_q, &wait); in ata_port_wait_eh()
821 spin_unlock_irqrestore(ap->lock, flags); in ata_port_wait_eh()
824 if (scsi_host_in_recovery(ap->scsi_host)) { in ata_port_wait_eh()
826 goto retry; in ata_port_wait_eh()
837 /* count only non-internal commands */ in ata_eh_nr_in_flight()
852 spin_lock_irqsave(ap->lock, flags); in ata_eh_fastdrain_timerfn()
860 if (cnt == ap->fastdrain_cnt) { in ata_eh_fastdrain_timerfn()
864 /* No progress during the last interval, tag all in ata_eh_fastdrain_timerfn()
865 * in-flight qcs as timed out and freeze the port. in ata_eh_fastdrain_timerfn()
869 qc->err_mask |= AC_ERR_TIMEOUT; in ata_eh_fastdrain_timerfn()
875 ap->fastdrain_cnt = cnt; in ata_eh_fastdrain_timerfn()
876 ap->fastdrain_timer.expires = in ata_eh_fastdrain_timerfn()
878 add_timer(&ap->fastdrain_timer); in ata_eh_fastdrain_timerfn()
882 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_fastdrain_timerfn()
886 * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
891 * is non-zero and EH wasn't pending before. Fast drain ensures
902 if (ap->pflags & ATA_PFLAG_EH_PENDING) in ata_eh_set_pending()
905 ap->pflags |= ATA_PFLAG_EH_PENDING; in ata_eh_set_pending()
910 /* do we have in-flight qcs? */ in ata_eh_set_pending()
916 ap->fastdrain_cnt = cnt; in ata_eh_set_pending()
917 ap->fastdrain_timer.expires = in ata_eh_set_pending()
919 add_timer(&ap->fastdrain_timer); in ata_eh_set_pending()
923 * ata_qc_schedule_eh - schedule qc for error handling
934 struct ata_port *ap = qc->ap; in ata_qc_schedule_eh()
936 qc->flags |= ATA_QCFLAG_EH; in ata_qc_schedule_eh()
944 blk_abort_request(scsi_cmd_to_rq(qc->scsicmd)); in ata_qc_schedule_eh()
948 * ata_std_sched_eh - non-libsas ata_ports issue eh with this common routine
956 if (ap->pflags & ATA_PFLAG_INITIALIZING) in ata_std_sched_eh()
960 scsi_schedule_eh(ap->scsi_host); in ata_std_sched_eh()
967 * ata_std_end_eh - non-libsas ata_ports complete eh with this common routine
971 * shost, so host fields can be directly manipulated under ap->lock, in
972 * the libsas case we need to hold a lock at the ha->level to coordinate
980 struct Scsi_Host *host = ap->scsi_host; in ata_std_end_eh()
982 host->host_eh_scheduled = 0; in ata_std_end_eh()
988 * ata_port_schedule_eh - schedule error handling without a qc
992 * all commands are drained.
1000 ap->ops->sched_eh(ap); in ata_port_schedule_eh()
1009 /* we're gonna abort all commands, no need for fast drain */ in ata_do_link_abort()
1014 if (qc && (!link || qc->dev->link == link)) { in ata_do_link_abort()
1015 qc->flags |= ATA_QCFLAG_EH; in ata_do_link_abort()
1028 * ata_link_abort - abort all qc's on the link
1031 * Abort all active qc's active on @link and schedule EH.
1041 return ata_do_link_abort(link->ap, link); in ata_link_abort()
1046 * ata_port_abort - abort all qc's on the port
1049 * Abort all active qc's of @ap and schedule EH.
1064 * __ata_port_freeze - freeze port
1072 * ap->ops->freeze() callback can be used for freezing the port
1073 * hardware-wise (e.g. mask interrupt and stop DMA engine). If a
1074 * port cannot be frozen hardware-wise, the interrupt handler
1083 if (ap->ops->freeze) in __ata_port_freeze()
1084 ap->ops->freeze(ap); in __ata_port_freeze()
1086 ap->pflags |= ATA_PFLAG_FROZEN; in __ata_port_freeze()
1092 * ata_port_freeze - abort & freeze port
1114 * ata_eh_freeze_port - EH helper to freeze port
1126 spin_lock_irqsave(ap->lock, flags); in ata_eh_freeze_port()
1128 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_freeze_port()
1133 * ata_eh_thaw_port - EH helper to thaw port
1145 spin_lock_irqsave(ap->lock, flags); in ata_eh_thaw_port()
1147 ap->pflags &= ~ATA_PFLAG_FROZEN; in ata_eh_thaw_port()
1149 if (ap->ops->thaw) in ata_eh_thaw_port()
1150 ap->ops->thaw(ap); in ata_eh_thaw_port()
1152 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_thaw_port()
1164 struct ata_port *ap = qc->ap; in __ata_eh_qc_complete()
1165 struct scsi_cmnd *scmd = qc->scsicmd; in __ata_eh_qc_complete()
1168 spin_lock_irqsave(ap->lock, flags); in __ata_eh_qc_complete()
1169 qc->scsidone = ata_eh_scsidone; in __ata_eh_qc_complete()
1171 WARN_ON(ata_tag_valid(qc->tag)); in __ata_eh_qc_complete()
1172 spin_unlock_irqrestore(ap->lock, flags); in __ata_eh_qc_complete()
1174 scsi_eh_finish_cmd(scmd, &ap->eh_done_q); in __ata_eh_qc_complete()
1178 * ata_eh_qc_complete - Complete an active ATA command from EH
1186 struct scsi_cmnd *scmd = qc->scsicmd; in ata_eh_qc_complete()
1187 scmd->retries = scmd->allowed; in ata_eh_qc_complete()
1192 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
1193 * @qc: Command to retry
1198 * SCSI midlayer limits the number of retries to scmd->allowed.
1199 * scmd->allowed is incremented for commands which get retried
1200 * due to unrelated failures (qc->err_mask is zero).
1204 struct scsi_cmnd *scmd = qc->scsicmd; in ata_eh_qc_retry()
1205 if (!qc->err_mask) in ata_eh_qc_retry()
1206 scmd->allowed++; in ata_eh_qc_retry()
1211 * ata_dev_disable - disable ATA device
1227 dev->class++; in ata_dev_disable()
1232 ata_ering_clear(&dev->ering); in ata_dev_disable()
1237 * ata_eh_detach_dev - detach ATA device
1247 struct ata_link *link = dev->link; in ata_eh_detach_dev()
1248 struct ata_port *ap = link->ap; in ata_eh_detach_dev()
1249 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_detach_dev()
1261 spin_lock_irqsave(ap->lock, flags); in ata_eh_detach_dev()
1263 dev->flags &= ~ATA_DFLAG_DETACH; in ata_eh_detach_dev()
1266 dev->flags |= ATA_DFLAG_DETACHED; in ata_eh_detach_dev()
1267 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; in ata_eh_detach_dev()
1270 /* clear per-dev EH info */ in ata_eh_detach_dev()
1271 ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK); in ata_eh_detach_dev()
1272 ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK); in ata_eh_detach_dev()
1273 ehc->saved_xfer_mode[dev->devno] = 0; in ata_eh_detach_dev()
1274 ehc->saved_ncq_enabled &= ~(1 << dev->devno); in ata_eh_detach_dev()
1276 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_detach_dev()
1280 * ata_eh_about_to_do - about to perform eh_action
1282 * @dev: target ATA dev for per-dev action (can be NULL)
1286 * in @link->eh_info such that eh actions are not unnecessarily
1295 struct ata_port *ap = link->ap; in ata_eh_about_to_do()
1296 struct ata_eh_info *ehi = &link->eh_info; in ata_eh_about_to_do()
1297 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_about_to_do()
1300 trace_ata_eh_about_to_do(link, dev ? dev->devno : 0, action); in ata_eh_about_to_do()
1302 spin_lock_irqsave(ap->lock, flags); in ata_eh_about_to_do()
1309 if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link) in ata_eh_about_to_do()
1310 ap->pflags |= ATA_PFLAG_RECOVERED; in ata_eh_about_to_do()
1312 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_about_to_do()
1316 * ata_eh_done - EH action complete
1318 * @dev: target ATA dev for per-dev action (can be NULL)
1322 * in @link->eh_context.
1330 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_done()
1332 trace_ata_eh_done(link, dev ? dev->devno : 0, action); in ata_eh_done()
1334 ata_eh_clear_action(link, dev, &ehc->i, action); in ata_eh_done()
1338 * ata_err_string - convert err_mask to descriptive string
1341 * Convert @err_mask to descriptive string. Errors are
1377 * atapi_eh_tur - perform ATAPI TEST_UNIT_READY
1408 * ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
1422 struct scsi_cmnd *cmd = qc->scsicmd; in ata_eh_request_sense()
1423 struct ata_device *dev = qc->dev; in ata_eh_request_sense()
1427 if (ata_port_is_frozen(qc->ap)) { in ata_eh_request_sense()
1432 if (!ata_id_sense_reporting_enabled(dev->id)) { in ata_eh_request_sense()
1433 ata_dev_warn(qc->dev, "sense data reporting disabled\n"); in ata_eh_request_sense()
1447 /* Set sense without also setting scsicmd->result */ in ata_eh_request_sense()
1448 scsi_build_sense_buffer(dev->flags & ATA_DFLAG_D_SENSE, in ata_eh_request_sense()
1449 cmd->sense_buffer, tf.lbah, in ata_eh_request_sense()
1451 qc->flags |= ATA_QCFLAG_SENSE_VALID; in ata_eh_request_sense()
1463 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1482 struct ata_port *ap = dev->link->ap; in atapi_eh_request_sense()
1488 * for the case where they are -not- overwritten in atapi_eh_request_sense()
1500 if (ap->flags & ATA_FLAG_PIO_DMA) { in atapi_eh_request_sense()
1514 * ata_eh_analyze_serror - analyze SError for a failed port
1525 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_analyze_serror()
1526 u32 serror = ehc->i.serror; in ata_eh_analyze_serror()
1548 if (link->lpm_policy > ATA_LPM_MAX_POWER) in ata_eh_analyze_serror()
1550 else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link)) in ata_eh_analyze_serror()
1556 ata_ehi_hotplugged(&ehc->i); in ata_eh_analyze_serror()
1558 ehc->i.err_mask |= err_mask; in ata_eh_analyze_serror()
1559 ehc->i.action |= action; in ata_eh_analyze_serror()
1563 * ata_eh_analyze_tf - analyze taskfile of a failed qc
1578 const struct ata_taskfile *tf = &qc->result_tf; in ata_eh_analyze_tf()
1580 u8 stat = tf->status, err = tf->error; in ata_eh_analyze_tf()
1583 qc->err_mask |= AC_ERR_HSM; in ata_eh_analyze_tf()
1588 qc->err_mask |= AC_ERR_DEV; in ata_eh_analyze_tf()
1599 switch (qc->dev->class) { in ata_eh_analyze_tf()
1604 * -It was a non-NCQ command that failed, or in ata_eh_analyze_tf()
1605 * -It was a NCQ command that failed, but the sense data in ata_eh_analyze_tf()
1609 if (!(qc->flags & ATA_QCFLAG_SENSE_VALID) && in ata_eh_analyze_tf()
1611 set_status_byte(qc->scsicmd, SAM_STAT_CHECK_CONDITION); in ata_eh_analyze_tf()
1613 qc->err_mask |= AC_ERR_ATA_BUS; in ata_eh_analyze_tf()
1615 qc->err_mask |= AC_ERR_MEDIA; in ata_eh_analyze_tf()
1617 qc->err_mask |= AC_ERR_INVALID; in ata_eh_analyze_tf()
1621 if (!ata_port_is_frozen(qc->ap)) { in ata_eh_analyze_tf()
1622 tmp = atapi_eh_request_sense(qc->dev, in ata_eh_analyze_tf()
1623 qc->scsicmd->sense_buffer, in ata_eh_analyze_tf()
1624 qc->result_tf.error >> 4); in ata_eh_analyze_tf()
1626 qc->flags |= ATA_QCFLAG_SENSE_VALID; in ata_eh_analyze_tf()
1628 qc->err_mask |= tmp; in ata_eh_analyze_tf()
1632 if (qc->flags & ATA_QCFLAG_SENSE_VALID) { in ata_eh_analyze_tf()
1633 enum scsi_disposition ret = scsi_check_sense(qc->scsicmd); in ata_eh_analyze_tf()
1644 qc->flags |= ATA_QCFLAG_RETRY; in ata_eh_analyze_tf()
1645 qc->err_mask |= AC_ERR_OTHER; in ata_eh_analyze_tf()
1647 qc->err_mask |= AC_ERR_HSM; in ata_eh_analyze_tf()
1650 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS)) in ata_eh_analyze_tf()
1695 if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since)) in speed_down_verdict_cb()
1696 return -1; in speed_down_verdict_cb()
1698 cat = ata_eh_categorize_error(ent->eflags, ent->err_mask, in speed_down_verdict_cb()
1699 &arg->xfer_ok); in speed_down_verdict_cb()
1700 arg->nr_errors[cat]++; in speed_down_verdict_cb()
1706 * ata_eh_speed_down_verdict - Determine speed down verdict
1733 * taken per error. An action triggered by non-DUBIOUS errors
1734 * clears ering, while one triggered by DUBIOUS_* errors doesn't.
1739 * DUBIOUS errors.
1741 * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
1744 * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
1747 * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
1750 * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
1753 * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
1754 * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
1771 arg.since = j64 - min(j64, j5mins); in ata_eh_speed_down_verdict()
1772 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); in ata_eh_speed_down_verdict()
1790 arg.since = j64 - min(j64, j10mins); in ata_eh_speed_down_verdict()
1791 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); in ata_eh_speed_down_verdict()
1806 * ata_eh_speed_down - record error and speed down if necessary
1830 /* don't bother if Cat-0 error */ in ata_eh_speed_down()
1835 ata_ering_record(&dev->ering, eflags, err_mask); in ata_eh_speed_down()
1840 dev->flags |= ATA_DFLAG_NCQ_OFF; in ata_eh_speed_down()
1841 ata_dev_warn(dev, "NCQ disabled due to excessive errors\n"); in ata_eh_speed_down()
1854 if (dev->spdn_cnt < 2) { in ata_eh_speed_down()
1861 if (dev->xfer_shift != ATA_SHIFT_PIO) in ata_eh_speed_down()
1862 sel = dma_dnxfer_sel[dev->spdn_cnt]; in ata_eh_speed_down()
1864 sel = pio_dnxfer_sel[dev->spdn_cnt]; in ata_eh_speed_down()
1866 dev->spdn_cnt++; in ata_eh_speed_down()
1878 if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) && in ata_eh_speed_down()
1879 (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) && in ata_eh_speed_down()
1880 (dev->xfer_shift != ATA_SHIFT_PIO)) { in ata_eh_speed_down()
1882 dev->spdn_cnt = 0; in ata_eh_speed_down()
1892 ata_ering_clear(&dev->ering); in ata_eh_speed_down()
1897 * ata_eh_worth_retry - analyze error and decide whether to retry
1898 * @qc: qc to possibly retry
1900 * Look at the cause of the error and decide if a retry
1901 * might be useful or not. We don't want to retry media errors
1902 * because the drive itself has probably already taken 10-30 seconds
1907 if (qc->err_mask & AC_ERR_MEDIA) in ata_eh_worth_retry()
1908 return 0; /* don't retry media errors */ in ata_eh_worth_retry()
1909 if (qc->flags & ATA_QCFLAG_IO) in ata_eh_worth_retry()
1910 return 1; /* otherwise retry anything from fs stack */ in ata_eh_worth_retry()
1911 if (qc->err_mask & AC_ERR_INVALID) in ata_eh_worth_retry()
1912 return 0; /* don't retry these */ in ata_eh_worth_retry()
1913 return qc->err_mask != AC_ERR_DEV; /* retry if not dev error */ in ata_eh_worth_retry()
1917 * ata_eh_quiet - check if we need to be quiet about a command error
1925 if (qc->scsicmd && scsi_cmd_to_rq(qc->scsicmd)->rq_flags & RQF_QUIET) in ata_eh_quiet()
1926 qc->flags |= ATA_QCFLAG_QUIET; in ata_eh_quiet()
1927 return qc->flags & ATA_QCFLAG_QUIET; in ata_eh_quiet()
1932 struct ata_port *ap = link->ap; in ata_eh_read_sense_success_non_ncq()
1935 qc = __ata_qc_from_tag(ap, link->active_tag); in ata_eh_read_sense_success_non_ncq()
1937 return -EIO; in ata_eh_read_sense_success_non_ncq()
1939 if (!(qc->flags & ATA_QCFLAG_EH) || in ata_eh_read_sense_success_non_ncq()
1940 !(qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD) || in ata_eh_read_sense_success_non_ncq()
1941 qc->err_mask) in ata_eh_read_sense_success_non_ncq()
1942 return -EIO; in ata_eh_read_sense_success_non_ncq()
1945 return -EIO; in ata_eh_read_sense_success_non_ncq()
1952 scsi_check_sense(qc->scsicmd); in ata_eh_read_sense_success_non_ncq()
1959 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_get_success_sense()
1960 struct ata_device *dev = link->device; in ata_eh_get_success_sense()
1961 struct ata_port *ap = link->ap; in ata_eh_get_success_sense()
1965 if (!(ehc->i.dev_action[dev->devno] & ATA_EH_GET_SUCCESS_SENSE)) in ata_eh_get_success_sense()
1978 * data. Otherwise, we are dealing with a non-NCQ command and use in ata_eh_get_success_sense()
1981 if (link->sactive) in ata_eh_get_success_sense()
2002 if (!(qc->flags & ATA_QCFLAG_EH) || in ata_eh_get_success_sense()
2003 !(qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD) || in ata_eh_get_success_sense()
2004 qc->err_mask || in ata_eh_get_success_sense()
2005 ata_dev_phys_link(qc->dev) != link) in ata_eh_get_success_sense()
2009 if (qc->flags & ATA_QCFLAG_SENSE_VALID) in ata_eh_get_success_sense()
2013 if (!(qc->result_tf.status & ATA_SENSE)) in ata_eh_get_success_sense()
2017 ata_scsi_set_sense(dev, qc->scsicmd, ABORTED_COMMAND, 0, 0); in ata_eh_get_success_sense()
2018 qc->flags |= ATA_QCFLAG_SENSE_VALID; in ata_eh_get_success_sense()
2024 * ata_eh_link_autopsy - analyze error and determine recovery action
2036 struct ata_port *ap = link->ap; in ata_eh_link_autopsy()
2037 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_link_autopsy()
2045 if (ehc->i.flags & ATA_EHI_NO_AUTOPSY) in ata_eh_link_autopsy()
2051 ehc->i.serror |= serror; in ata_eh_link_autopsy()
2053 } else if (rc != -EOPNOTSUPP) { in ata_eh_link_autopsy()
2055 ehc->i.probe_mask |= ATA_ALL_DEVICES; in ata_eh_link_autopsy()
2056 ehc->i.action |= ATA_EH_RESET; in ata_eh_link_autopsy()
2057 ehc->i.err_mask |= AC_ERR_OTHER; in ata_eh_link_autopsy()
2072 if (ehc->i.err_mask & ~AC_ERR_OTHER) in ata_eh_link_autopsy()
2073 ehc->i.err_mask &= ~AC_ERR_OTHER; in ata_eh_link_autopsy()
2075 all_err_mask |= ehc->i.err_mask; in ata_eh_link_autopsy()
2078 if (!(qc->flags & ATA_QCFLAG_EH) || in ata_eh_link_autopsy()
2079 qc->flags & ATA_QCFLAG_RETRY || in ata_eh_link_autopsy()
2080 qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD || in ata_eh_link_autopsy()
2081 ata_dev_phys_link(qc->dev) != link) in ata_eh_link_autopsy()
2085 qc->err_mask |= ehc->i.err_mask; in ata_eh_link_autopsy()
2088 ehc->i.action |= ata_eh_analyze_tf(qc); in ata_eh_link_autopsy()
2090 /* DEV errors are probably spurious in case of ATA_BUS error */ in ata_eh_link_autopsy()
2091 if (qc->err_mask & AC_ERR_ATA_BUS) in ata_eh_link_autopsy()
2092 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA | in ata_eh_link_autopsy()
2096 if (qc->err_mask & ~AC_ERR_OTHER) in ata_eh_link_autopsy()
2097 qc->err_mask &= ~AC_ERR_OTHER; in ata_eh_link_autopsy()
2106 if (qc->flags & ATA_QCFLAG_SENSE_VALID) in ata_eh_link_autopsy()
2107 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); in ata_eh_link_autopsy()
2109 qc->flags |= ATA_QCFLAG_RETRY; in ata_eh_link_autopsy()
2112 ehc->i.dev = qc->dev; in ata_eh_link_autopsy()
2113 all_err_mask |= qc->err_mask; in ata_eh_link_autopsy()
2114 if (qc->flags & ATA_QCFLAG_IO) in ata_eh_link_autopsy()
2118 /* Count quiet errors */ in ata_eh_link_autopsy()
2124 /* If all failed commands requested silence, then be quiet */ in ata_eh_link_autopsy()
2126 ehc->i.flags |= ATA_EHI_QUIET; in ata_eh_link_autopsy()
2131 ehc->i.action |= ATA_EH_RESET; in ata_eh_link_autopsy()
2134 ehc->i.action |= ATA_EH_REVALIDATE; in ata_eh_link_autopsy()
2137 * perform per-dev EH action only on the offending device. in ata_eh_link_autopsy()
2139 if (ehc->i.dev) { in ata_eh_link_autopsy()
2140 ehc->i.dev_action[ehc->i.dev->devno] |= in ata_eh_link_autopsy()
2141 ehc->i.action & ATA_EH_PERDEV_MASK; in ata_eh_link_autopsy()
2142 ehc->i.action &= ~ATA_EH_PERDEV_MASK; in ata_eh_link_autopsy()
2147 ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT; in ata_eh_link_autopsy()
2150 dev = ehc->i.dev; in ata_eh_link_autopsy()
2152 ata_dev_enabled(link->device)))) in ata_eh_link_autopsy()
2153 dev = link->device; in ata_eh_link_autopsy()
2156 if (dev->flags & ATA_DFLAG_DUBIOUS_XFER) in ata_eh_link_autopsy()
2158 ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask); in ata_eh_link_autopsy()
2159 trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask); in ata_eh_link_autopsy()
2164 * ata_eh_autopsy - analyze error and determine recovery action
2167 * Analyze all links of @ap and determine why they failed and
2184 if (ap->slave_link) { in ata_eh_autopsy()
2185 struct ata_eh_context *mehc = &ap->link.eh_context; in ata_eh_autopsy()
2186 struct ata_eh_context *sehc = &ap->slave_link->eh_context; in ata_eh_autopsy()
2189 sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK; in ata_eh_autopsy()
2192 ata_eh_link_autopsy(ap->slave_link); in ata_eh_autopsy()
2195 ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); in ata_eh_autopsy()
2196 mehc->i.action |= sehc->i.action; in ata_eh_autopsy()
2197 mehc->i.dev_action[1] |= sehc->i.dev_action[1]; in ata_eh_autopsy()
2198 mehc->i.flags |= sehc->i.flags; in ata_eh_autopsy()
2199 ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); in ata_eh_autopsy()
2206 ata_eh_link_autopsy(&ap->link); in ata_eh_autopsy()
2210 * ata_get_cmd_name - get name for ATA command
2253 { ATA_CMD_NCQ_NON_DATA, "NCQ NON-DATA" }, in ata_get_cmd_name()
2282 { ATA_CMD_TRUSTED_NONDATA, "TRUSTED NON-DATA" }, in ata_get_cmd_name()
2332 * ata_eh_link_report - report error handling to user
2342 struct ata_port *ap = link->ap; in ata_eh_link_report()
2343 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_link_report()
2349 if (ehc->i.flags & ATA_EHI_QUIET) in ata_eh_link_report()
2353 if (ehc->i.desc[0] != '\0') in ata_eh_link_report()
2354 desc = ehc->i.desc; in ata_eh_link_report()
2357 if (!(qc->flags & ATA_QCFLAG_EH) || in ata_eh_link_report()
2358 ata_dev_phys_link(qc->dev) != link || in ata_eh_link_report()
2359 ((qc->flags & ATA_QCFLAG_QUIET) && in ata_eh_link_report()
2360 qc->err_mask == AC_ERR_DEV)) in ata_eh_link_report()
2362 if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask) in ata_eh_link_report()
2368 if (!nr_failed && !ehc->i.err_mask) in ata_eh_link_report()
2375 if (ap->eh_tries < ATA_EH_MAX_TRIES) in ata_eh_link_report()
2377 ap->eh_tries); in ata_eh_link_report()
2379 if (ehc->i.dev) { in ata_eh_link_report()
2380 ata_dev_err(ehc->i.dev, "exception Emask 0x%x " in ata_eh_link_report()
2382 ehc->i.err_mask, link->sactive, ehc->i.serror, in ata_eh_link_report()
2383 ehc->i.action, frozen, tries_buf); in ata_eh_link_report()
2385 ata_dev_err(ehc->i.dev, "%s\n", desc); in ata_eh_link_report()
2389 ehc->i.err_mask, link->sactive, ehc->i.serror, in ata_eh_link_report()
2390 ehc->i.action, frozen, tries_buf); in ata_eh_link_report()
2396 if (ehc->i.serror) in ata_eh_link_report()
2399 ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "", in ata_eh_link_report()
2400 ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "", in ata_eh_link_report()
2401 ehc->i.serror & SERR_DATA ? "UnrecovData " : "", in ata_eh_link_report()
2402 ehc->i.serror & SERR_PERSISTENT ? "Persist " : "", in ata_eh_link_report()
2403 ehc->i.serror & SERR_PROTOCOL ? "Proto " : "", in ata_eh_link_report()
2404 ehc->i.serror & SERR_INTERNAL ? "HostInt " : "", in ata_eh_link_report()
2405 ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "", in ata_eh_link_report()
2406 ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "", in ata_eh_link_report()
2407 ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "", in ata_eh_link_report()
2408 ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "", in ata_eh_link_report()
2409 ehc->i.serror & SERR_DISPARITY ? "Dispar " : "", in ata_eh_link_report()
2410 ehc->i.serror & SERR_CRC ? "BadCRC " : "", in ata_eh_link_report()
2411 ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "", in ata_eh_link_report()
2412 ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "", in ata_eh_link_report()
2413 ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "", in ata_eh_link_report()
2414 ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "", in ata_eh_link_report()
2415 ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : ""); in ata_eh_link_report()
2419 struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; in ata_eh_link_report()
2423 if (!(qc->flags & ATA_QCFLAG_EH) || in ata_eh_link_report()
2424 ata_dev_phys_link(qc->dev) != link || !qc->err_mask) in ata_eh_link_report()
2427 if (qc->dma_dir != DMA_NONE) { in ata_eh_link_report()
2435 switch (qc->tf.protocol) { in ata_eh_link_report()
2465 prot_str, qc->nbytes, dma_str[qc->dma_dir]); in ata_eh_link_report()
2468 if (ata_is_atapi(qc->tf.protocol)) { in ata_eh_link_report()
2469 const u8 *cdb = qc->cdb; in ata_eh_link_report()
2470 size_t cdb_len = qc->dev->cdb_len; in ata_eh_link_report()
2472 if (qc->scsicmd) { in ata_eh_link_report()
2473 cdb = qc->scsicmd->cmnd; in ata_eh_link_report()
2474 cdb_len = qc->scsicmd->cmd_len; in ata_eh_link_report()
2479 ata_dev_err(qc->dev, "failed command: %s\n", in ata_eh_link_report()
2480 ata_get_cmd_name(cmd->command)); in ata_eh_link_report()
2482 ata_dev_err(qc->dev, in ata_eh_link_report()
2487 cmd->command, cmd->feature, cmd->nsect, in ata_eh_link_report()
2488 cmd->lbal, cmd->lbam, cmd->lbah, in ata_eh_link_report()
2489 cmd->hob_feature, cmd->hob_nsect, in ata_eh_link_report()
2490 cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah, in ata_eh_link_report()
2491 cmd->device, qc->tag, data_buf, cdb_buf, in ata_eh_link_report()
2492 res->status, res->error, res->nsect, in ata_eh_link_report()
2493 res->lbal, res->lbam, res->lbah, in ata_eh_link_report()
2494 res->hob_feature, res->hob_nsect, in ata_eh_link_report()
2495 res->hob_lbal, res->hob_lbam, res->hob_lbah, in ata_eh_link_report()
2496 res->device, qc->err_mask, ata_err_string(qc->err_mask), in ata_eh_link_report()
2497 qc->err_mask & AC_ERR_NCQ ? " <F>" : ""); in ata_eh_link_report()
2500 if (res->status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | in ata_eh_link_report()
2502 if (res->status & ATA_BUSY) in ata_eh_link_report()
2503 ata_dev_err(qc->dev, "status: { Busy }\n"); in ata_eh_link_report()
2505 ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n", in ata_eh_link_report()
2506 res->status & ATA_DRDY ? "DRDY " : "", in ata_eh_link_report()
2507 res->status & ATA_DF ? "DF " : "", in ata_eh_link_report()
2508 res->status & ATA_DRQ ? "DRQ " : "", in ata_eh_link_report()
2509 res->status & ATA_SENSE ? "SENSE " : "", in ata_eh_link_report()
2510 res->status & ATA_ERR ? "ERR " : ""); in ata_eh_link_report()
2513 if (cmd->command != ATA_CMD_PACKET && in ata_eh_link_report()
2514 (res->error & (ATA_ICRC | ATA_UNC | ATA_AMNF | ATA_IDNF | in ata_eh_link_report()
2516 ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n", in ata_eh_link_report()
2517 res->error & ATA_ICRC ? "ICRC " : "", in ata_eh_link_report()
2518 res->error & ATA_UNC ? "UNC " : "", in ata_eh_link_report()
2519 res->error & ATA_AMNF ? "AMNF " : "", in ata_eh_link_report()
2520 res->error & ATA_IDNF ? "IDNF " : "", in ata_eh_link_report()
2521 res->error & ATA_ABORTED ? "ABRT " : ""); in ata_eh_link_report()
2527 * ata_eh_report - report error handling to user
2550 ata_for_each_dev(dev, link, ALL) in ata_do_reset()
2551 classes[dev->devno] = ATA_DEV_UNKNOWN; in ata_do_reset()
2558 if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link)) in ata_eh_followup_srst_needed()
2560 if (rc == -EAGAIN) in ata_eh_followup_srst_needed()
2562 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) in ata_eh_followup_srst_needed()
2571 struct ata_port *ap = link->ap; in ata_eh_reset()
2572 struct ata_link *slave = ap->slave_link; in ata_eh_reset()
2573 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_reset()
2574 struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL; in ata_eh_reset()
2575 unsigned int *classes = ehc->classes; in ata_eh_reset()
2576 unsigned int lflags = link->flags; in ata_eh_reset()
2577 int verbose = !(ehc->i.flags & ATA_EHI_QUIET); in ata_eh_reset()
2592 if (link->flags & ATA_LFLAG_RST_ONCE) in ata_eh_reset()
2594 if (link->flags & ATA_LFLAG_NO_HRST) in ata_eh_reset()
2596 if (link->flags & ATA_LFLAG_NO_SRST) in ata_eh_reset()
2600 if (ehc->i.flags & ATA_EHI_DID_RESET) { in ata_eh_reset()
2602 WARN_ON(time_after(ehc->last_reset, now)); in ata_eh_reset()
2603 deadline = ata_deadline(ehc->last_reset, in ata_eh_reset()
2606 schedule_timeout_uninterruptible(deadline - now); in ata_eh_reset()
2609 spin_lock_irqsave(ap->lock, flags); in ata_eh_reset()
2610 ap->pflags |= ATA_PFLAG_RESETTING; in ata_eh_reset()
2611 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_reset()
2615 ata_for_each_dev(dev, link, ALL) { in ata_eh_reset()
2623 dev->pio_mode = XFER_PIO_0; in ata_eh_reset()
2624 dev->dma_mode = 0xff; in ata_eh_reset()
2631 if (ap->ops->set_piomode) in ata_eh_reset()
2632 ap->ops->set_piomode(ap, dev); in ata_eh_reset()
2637 ehc->i.action &= ~ATA_EH_RESET; in ata_eh_reset()
2640 ehc->i.action |= ATA_EH_HARDRESET; in ata_eh_reset()
2643 ehc->i.action |= ATA_EH_SOFTRESET; in ata_eh_reset()
2651 sehc->i.action &= ~ATA_EH_RESET; in ata_eh_reset()
2652 sehc->i.action |= ehc->i.action; in ata_eh_reset()
2659 * -ENOENT or clear ATA_EH_RESET. in ata_eh_reset()
2661 if (slave && (rc == 0 || rc == -ENOENT)) { in ata_eh_reset()
2665 if (tmp != -ENOENT) in ata_eh_reset()
2668 ehc->i.action |= sehc->i.action; in ata_eh_reset()
2672 if (rc == -ENOENT) { in ata_eh_reset()
2673 ata_link_dbg(link, "port disabled--ignoring\n"); in ata_eh_reset()
2674 ehc->i.action &= ~ATA_EH_RESET; in ata_eh_reset()
2676 ata_for_each_dev(dev, link, ALL) in ata_eh_reset()
2677 classes[dev->devno] = ATA_DEV_NONE; in ata_eh_reset()
2690 if (reset && !(ehc->i.action & ATA_EH_RESET)) { in ata_eh_reset()
2691 ata_for_each_dev(dev, link, ALL) in ata_eh_reset()
2692 classes[dev->devno] = ATA_DEV_NONE; in ata_eh_reset()
2700 retry: in ata_eh_reset()
2715 ehc->last_reset = jiffies; in ata_eh_reset()
2717 ehc->i.flags |= ATA_EHI_DID_HARDRESET; in ata_eh_reset()
2720 ehc->i.flags |= ATA_EHI_DID_SOFTRESET; in ata_eh_reset()
2729 if (rc && rc != -EAGAIN) { in ata_eh_reset()
2748 case -EAGAIN: in ata_eh_reset()
2749 rc = -EAGAIN; in ata_eh_reset()
2760 /* perform follow-up SRST if necessary */ in ata_eh_reset()
2767 "follow-up softreset required but no softreset available\n"); in ata_eh_reset()
2769 rc = -EINVAL; in ata_eh_reset()
2791 * Post-reset processing in ata_eh_reset()
2793 ata_for_each_dev(dev, link, ALL) { in ata_eh_reset()
2798 dev->pio_mode = XFER_PIO_0; in ata_eh_reset()
2799 dev->flags &= ~ATA_DFLAG_SLEEPING; in ata_eh_reset()
2806 classes[dev->devno] = ATA_DEV_ATA; in ata_eh_reset()
2808 classes[dev->devno] = ATA_DEV_SEMB_UNSUP; in ata_eh_reset()
2813 link->sata_spd = (sstatus >> 4) & 0xf; in ata_eh_reset()
2815 slave->sata_spd = (sstatus >> 4) & 0xf; in ata_eh_reset()
2838 spin_lock_irqsave(link->ap->lock, flags); in ata_eh_reset()
2839 link->eh_info.serror = 0; in ata_eh_reset()
2841 slave->eh_info.serror = 0; in ata_eh_reset()
2842 spin_unlock_irqrestore(link->ap->lock, flags); in ata_eh_reset()
2853 ata_for_each_dev(dev, link, ALL) { in ata_eh_reset()
2855 if (classes[dev->devno] == ATA_DEV_UNKNOWN) { in ata_eh_reset()
2857 classes[dev->devno] = ATA_DEV_NONE; in ata_eh_reset()
2861 if (ata_class_enabled(classes[dev->devno])) in ata_eh_reset()
2864 classes[dev->devno]); in ata_eh_reset()
2865 classes[dev->devno] = ATA_DEV_NONE; in ata_eh_reset()
2866 } else if (classes[dev->devno] == ATA_DEV_UNKNOWN) { in ata_eh_reset()
2869 classes[dev->devno] = ATA_DEV_NONE; in ata_eh_reset()
2879 rc = -EAGAIN; in ata_eh_reset()
2891 ehc->last_reset = jiffies; /* update to completion time */ in ata_eh_reset()
2892 ehc->i.action |= ATA_EH_REVALIDATE; in ata_eh_reset()
2893 link->lpm_policy = ATA_LPM_UNKNOWN; /* reset LPM state */ in ata_eh_reset()
2898 ehc->i.flags &= ~ATA_EHI_HOTPLUGGED; in ata_eh_reset()
2900 sehc->i.flags &= ~ATA_EHI_HOTPLUGGED; in ata_eh_reset()
2902 spin_lock_irqsave(ap->lock, flags); in ata_eh_reset()
2903 ap->pflags &= ~ATA_PFLAG_RESETTING; in ata_eh_reset()
2904 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_reset()
2909 /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */ in ata_eh_reset()
2912 rc = -ERESTART; in ata_eh_reset()
2928 unsigned long delta = deadline - now; in ata_eh_reset()
2942 * They need to be reset - as well as the PMP - before retrying. in ata_eh_reset()
2944 if (rc == -ERESTART) { in ata_eh_reset()
2950 if (try == max_tries - 1) { in ata_eh_reset()
2954 } else if (rc == -EPIPE) in ata_eh_reset()
2959 goto retry; in ata_eh_reset()
2980 * Additionally, all write accesses to &ap->park_req_pending in ata_eh_pull_park_action()
2985 * *all* devices on port ap have been pulled into the in ata_eh_pull_park_action()
2987 * park_req_pending.done is non-zero by the time we reach in ata_eh_pull_park_action()
2994 spin_lock_irqsave(ap->lock, flags); in ata_eh_pull_park_action()
2995 reinit_completion(&ap->park_req_pending); in ata_eh_pull_park_action()
2997 ata_for_each_dev(dev, link, ALL) { in ata_eh_pull_park_action()
2998 struct ata_eh_info *ehi = &link->eh_info; in ata_eh_pull_park_action()
3000 link->eh_context.i.dev_action[dev->devno] |= in ata_eh_pull_park_action()
3001 ehi->dev_action[dev->devno] & ATA_EH_PARK; in ata_eh_pull_park_action()
3005 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_pull_park_action()
3010 struct ata_eh_context *ehc = &dev->link->eh_context; in ata_eh_park_issue_cmd()
3016 ehc->unloaded_mask |= 1 << dev->devno; in ata_eh_park_issue_cmd()
3023 ehc->unloaded_mask &= ~(1 << dev->devno); in ata_eh_park_issue_cmd()
3032 ehc->unloaded_mask &= ~(1 << dev->devno); in ata_eh_park_issue_cmd()
3039 struct ata_port *ap = link->ap; in ata_eh_revalidate_and_attach()
3040 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_revalidate_and_attach()
3047 * be done backwards such that PDIAG- is released by the slave in ata_eh_revalidate_and_attach()
3054 if (ehc->i.flags & ATA_EHI_DID_RESET) in ata_eh_revalidate_and_attach()
3067 WARN_ON(dev->class == ATA_DEV_PMP); in ata_eh_revalidate_and_attach()
3077 * to ap->target_lpm_policy after revalidation is done. in ata_eh_revalidate_and_attach()
3079 if (link->lpm_policy > ATA_LPM_MAX_POWER) { in ata_eh_revalidate_and_attach()
3087 rc = -EIO; in ata_eh_revalidate_and_attach()
3092 rc = ata_dev_revalidate(dev, ehc->classes[dev->devno], in ata_eh_revalidate_and_attach()
3102 ehc->i.flags |= ATA_EHI_SETMODE; in ata_eh_revalidate_and_attach()
3105 schedule_delayed_work(&ap->scsi_rescan_task, 0); in ata_eh_revalidate_and_attach()
3106 } else if (dev->class == ATA_DEV_UNKNOWN && in ata_eh_revalidate_and_attach()
3107 ehc->tries[dev->devno] && in ata_eh_revalidate_and_attach()
3108 ata_class_enabled(ehc->classes[dev->devno])) { in ata_eh_revalidate_and_attach()
3109 /* Temporarily set dev->class, it will be in ata_eh_revalidate_and_attach()
3110 * permanently set once all configurations are in ata_eh_revalidate_and_attach()
3115 dev->class = ehc->classes[dev->devno]; in ata_eh_revalidate_and_attach()
3117 if (dev->class == ATA_DEV_PMP) in ata_eh_revalidate_and_attach()
3120 rc = ata_dev_read_id(dev, &dev->class, in ata_eh_revalidate_and_attach()
3121 readid_flags, dev->id); in ata_eh_revalidate_and_attach()
3124 ehc->classes[dev->devno] = dev->class; in ata_eh_revalidate_and_attach()
3125 dev->class = ATA_DEV_UNKNOWN; in ata_eh_revalidate_and_attach()
3130 ata_ering_clear(&dev->ering); in ata_eh_revalidate_and_attach()
3131 new_mask |= 1 << dev->devno; in ata_eh_revalidate_and_attach()
3133 case -ENOENT: in ata_eh_revalidate_and_attach()
3134 /* IDENTIFY was issued to non-existent in ata_eh_revalidate_and_attach()
3146 /* PDIAG- should have been released, ask cable type if post-reset */ in ata_eh_revalidate_and_attach()
3147 if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) { in ata_eh_revalidate_and_attach()
3148 if (ap->ops->cable_detect) in ata_eh_revalidate_and_attach()
3149 ap->cbl = ap->ops->cable_detect(ap); in ata_eh_revalidate_and_attach()
3156 ata_for_each_dev(dev, link, ALL) { in ata_eh_revalidate_and_attach()
3157 if (!(new_mask & (1 << dev->devno))) in ata_eh_revalidate_and_attach()
3160 dev->class = ehc->classes[dev->devno]; in ata_eh_revalidate_and_attach()
3162 if (dev->class == ATA_DEV_PMP) in ata_eh_revalidate_and_attach()
3165 ehc->i.flags |= ATA_EHI_PRINTINFO; in ata_eh_revalidate_and_attach()
3167 ehc->i.flags &= ~ATA_EHI_PRINTINFO; in ata_eh_revalidate_and_attach()
3169 dev->class = ATA_DEV_UNKNOWN; in ata_eh_revalidate_and_attach()
3173 spin_lock_irqsave(ap->lock, flags); in ata_eh_revalidate_and_attach()
3174 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; in ata_eh_revalidate_and_attach()
3175 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_revalidate_and_attach()
3178 ehc->i.flags |= ATA_EHI_SETMODE; in ata_eh_revalidate_and_attach()
3184 dev->flags &= ~ATA_DFLAG_RESUMING; in ata_eh_revalidate_and_attach()
3190 * ata_set_mode - Program timings and issue SET FEATURES - XFER
3206 struct ata_port *ap = link->ap; in ata_set_mode()
3212 if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) { in ata_set_mode()
3215 ent = ata_ering_top(&dev->ering); in ata_set_mode()
3217 ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER; in ata_set_mode()
3222 if (ap->ops->set_mode) in ata_set_mode()
3223 rc = ap->ops->set_mode(link, r_failed_dev); in ata_set_mode()
3229 struct ata_eh_context *ehc = &link->eh_context; in ata_set_mode()
3230 u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno]; in ata_set_mode()
3231 u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno)); in ata_set_mode()
3233 if (dev->xfer_mode != saved_xfer_mode || in ata_set_mode()
3235 dev->flags |= ATA_DFLAG_DUBIOUS_XFER; in ata_set_mode()
3242 * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
3253 * 0 on success, -errno on failure.
3260 u8 *sense_buffer = dev->link->ap->sector_buf; in atapi_eh_clear_ua()
3269 return -EIO; in atapi_eh_clear_ua()
3279 return -EIO; in atapi_eh_clear_ua()
3290 * ata_eh_maybe_retry_flush - Retry FLUSH if necessary
3291 * @dev: ATA device which may need FLUSH retry
3300 * This function determines whether FLUSH failure retry is
3304 * 0 if EH can continue, -errno if EH needs to be repeated.
3308 struct ata_link *link = dev->link; in ata_eh_maybe_retry_flush()
3309 struct ata_port *ap = link->ap; in ata_eh_maybe_retry_flush()
3316 if (!ata_tag_valid(link->active_tag)) in ata_eh_maybe_retry_flush()
3319 qc = __ata_qc_from_tag(ap, link->active_tag); in ata_eh_maybe_retry_flush()
3320 if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT && in ata_eh_maybe_retry_flush()
3321 qc->tf.command != ATA_CMD_FLUSH)) in ata_eh_maybe_retry_flush()
3325 if (qc->err_mask & AC_ERR_DEV) in ata_eh_maybe_retry_flush()
3331 tf.command = qc->tf.command; in ata_eh_maybe_retry_flush()
3336 tf.command, qc->err_mask); in ata_eh_maybe_retry_flush()
3343 * Making sure retry is allowed at least once and in ata_eh_maybe_retry_flush()
3344 * retrying it should do the trick - whatever was in in ata_eh_maybe_retry_flush()
3348 qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1); in ata_eh_maybe_retry_flush()
3352 rc = -EIO; in ata_eh_maybe_retry_flush()
3356 qc->err_mask |= AC_ERR_DEV; in ata_eh_maybe_retry_flush()
3357 qc->result_tf = tf; in ata_eh_maybe_retry_flush()
3366 * ata_eh_set_lpm - configure SATA interface power management
3380 * 0 on success, -errno on failure.
3385 struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL; in ata_eh_set_lpm()
3386 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_set_lpm()
3388 enum ata_lpm_policy old_policy = link->lpm_policy; in ata_eh_set_lpm()
3389 bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM; in ata_eh_set_lpm()
3396 (link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm)) in ata_eh_set_lpm()
3406 bool hipm = ata_id_has_hipm(dev->id); in ata_eh_set_lpm()
3407 bool dipm = ata_id_has_dipm(dev->id) && !no_dipm; in ata_eh_set_lpm()
3428 rc = -EIO; in ata_eh_set_lpm()
3435 rc = ap->ops->set_lpm(link, policy, hints); in ata_eh_set_lpm()
3436 if (!rc && ap->slave_link) in ata_eh_set_lpm()
3437 rc = ap->ops->set_lpm(ap->slave_link, policy, hints); in ata_eh_set_lpm()
3446 if (rc == -EOPNOTSUPP) { in ata_eh_set_lpm()
3447 link->flags |= ATA_LFLAG_NO_LPM; in ata_eh_set_lpm()
3458 link->lpm_policy = policy; in ata_eh_set_lpm()
3459 if (ap && ap->slave_link) in ata_eh_set_lpm()
3460 ap->slave_link->lpm_policy = policy; in ata_eh_set_lpm()
3465 ata_id_has_dipm(dev->id)) { in ata_eh_set_lpm()
3472 rc = -EIO; in ata_eh_set_lpm()
3478 link->last_lpm_change = jiffies; in ata_eh_set_lpm()
3479 link->flags |= ATA_LFLAG_CHANGED; in ata_eh_set_lpm()
3485 link->lpm_policy = old_policy; in ata_eh_set_lpm()
3486 if (ap && ap->slave_link) in ata_eh_set_lpm()
3487 ap->slave_link->lpm_policy = old_policy; in ata_eh_set_lpm()
3490 if (!dev || ehc->tries[dev->devno] <= 2) { in ata_eh_set_lpm()
3492 link->flags |= ATA_LFLAG_NO_LPM; in ata_eh_set_lpm()
3514 ata_for_each_dev(dev, link, ALL) in ata_link_nr_vacant()
3515 if (dev->class == ATA_DEV_UNKNOWN) in ata_link_nr_vacant()
3522 struct ata_port *ap = link->ap; in ata_eh_skip_recovery()
3523 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_skip_recovery()
3527 if (link->flags & ATA_LFLAG_DISABLED) in ata_eh_skip_recovery()
3531 if (ehc->i.flags & ATA_EHI_NO_RECOVERY) in ata_eh_skip_recovery()
3539 if ((ehc->i.action & ATA_EH_RESET) && in ata_eh_skip_recovery()
3540 !(ehc->i.flags & ATA_EHI_DID_RESET)) in ata_eh_skip_recovery()
3543 /* skip if class codes for all vacant slots are ATA_DEV_NONE */ in ata_eh_skip_recovery()
3544 ata_for_each_dev(dev, link, ALL) { in ata_eh_skip_recovery()
3545 if (dev->class == ATA_DEV_UNKNOWN && in ata_eh_skip_recovery()
3546 ehc->classes[dev->devno] != ATA_DEV_NONE) in ata_eh_skip_recovery()
3559 if ((ent->eflags & ATA_EFLAG_OLD_ER) || in ata_count_probe_trials_cb()
3560 (ent->timestamp < now - min(now, interval))) in ata_count_probe_trials_cb()
3561 return -1; in ata_count_probe_trials_cb()
3569 struct ata_eh_context *ehc = &dev->link->eh_context; in ata_eh_schedule_probe()
3573 if (!(ehc->i.probe_mask & (1 << dev->devno)) || in ata_eh_schedule_probe()
3574 (ehc->did_probe_mask & (1 << dev->devno))) in ata_eh_schedule_probe()
3579 ehc->did_probe_mask |= (1 << dev->devno); in ata_eh_schedule_probe()
3580 ehc->i.action |= ATA_EH_RESET; in ata_eh_schedule_probe()
3581 ehc->saved_xfer_mode[dev->devno] = 0; in ata_eh_schedule_probe()
3582 ehc->saved_ncq_enabled &= ~(1 << dev->devno); in ata_eh_schedule_probe()
3585 if (link->lpm_policy > ATA_LPM_MAX_POWER) { in ata_eh_schedule_probe()
3587 link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER, in ata_eh_schedule_probe()
3607 ata_ering_record(&dev->ering, 0, AC_ERR_OTHER); in ata_eh_schedule_probe()
3608 ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials); in ata_eh_schedule_probe()
3618 struct ata_eh_context *ehc = &dev->link->eh_context; in ata_eh_handle_dev_fail()
3620 /* -EAGAIN from EH routine indicates retry without prejudice. in ata_eh_handle_dev_fail()
3623 if (err != -EAGAIN) in ata_eh_handle_dev_fail()
3624 ehc->tries[dev->devno]--; in ata_eh_handle_dev_fail()
3627 case -ENODEV: in ata_eh_handle_dev_fail()
3629 ehc->i.probe_mask |= (1 << dev->devno); in ata_eh_handle_dev_fail()
3631 case -EINVAL: in ata_eh_handle_dev_fail()
3633 ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1); in ata_eh_handle_dev_fail()
3635 case -EIO: in ata_eh_handle_dev_fail()
3636 if (ehc->tries[dev->devno] == 1) { in ata_eh_handle_dev_fail()
3641 if (dev->pio_mode > XFER_PIO_0) in ata_eh_handle_dev_fail()
3646 if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) { in ata_eh_handle_dev_fail()
3647 /* disable device if it has used up all its chances */ in ata_eh_handle_dev_fail()
3656 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; in ata_eh_handle_dev_fail()
3657 memset(ehc->cmd_timeout_idx[dev->devno], 0, in ata_eh_handle_dev_fail()
3658 sizeof(ehc->cmd_timeout_idx[dev->devno])); in ata_eh_handle_dev_fail()
3663 ehc->i.action |= ATA_EH_RESET; in ata_eh_handle_dev_fail()
3669 * ata_eh_recover - recover host port after error
3680 * link's eh_context. This function executes all the operations
3688 * 0 on success, -errno on failure.
3702 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_recover()
3704 /* re-enable link? */ in ata_eh_recover()
3705 if (ehc->i.action & ATA_EH_ENABLE_LINK) { in ata_eh_recover()
3707 spin_lock_irqsave(ap->lock, flags); in ata_eh_recover()
3708 link->flags &= ~ATA_LFLAG_DISABLED; in ata_eh_recover()
3709 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_recover()
3713 ata_for_each_dev(dev, link, ALL) { in ata_eh_recover()
3714 if (link->flags & ATA_LFLAG_NO_RETRY) in ata_eh_recover()
3715 ehc->tries[dev->devno] = 1; in ata_eh_recover()
3717 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; in ata_eh_recover()
3720 ehc->i.action |= ehc->i.dev_action[dev->devno] & in ata_eh_recover()
3722 ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK; in ata_eh_recover()
3725 if (dev->flags & ATA_DFLAG_DETACH) in ata_eh_recover()
3734 retry: in ata_eh_recover()
3738 if (ap->pflags & ATA_PFLAG_UNLOADING) in ata_eh_recover()
3743 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_recover()
3747 ehc->i.action = 0; in ata_eh_recover()
3749 ata_for_each_dev(dev, link, ALL) in ata_eh_recover()
3750 ehc->classes[dev->devno] = ATA_DEV_UNKNOWN; in ata_eh_recover()
3755 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_recover()
3757 if (!(ehc->i.action & ATA_EH_RESET)) in ata_eh_recover()
3773 * ap->park_req_pending in ata_eh_recover()
3779 ata_for_each_dev(dev, link, ALL) { in ata_eh_recover()
3780 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_recover()
3783 if (dev->class != ATA_DEV_ATA && in ata_eh_recover()
3784 dev->class != ATA_DEV_ZAC) in ata_eh_recover()
3786 if (!(ehc->i.dev_action[dev->devno] & in ata_eh_recover()
3789 tmp = dev->unpark_deadline; in ata_eh_recover()
3794 if (ehc->unloaded_mask & (1 << dev->devno)) in ata_eh_recover()
3806 deadline = wait_for_completion_timeout(&ap->park_req_pending, in ata_eh_recover()
3807 deadline - now); in ata_eh_recover()
3811 ata_for_each_dev(dev, link, ALL) { in ata_eh_recover()
3812 if (!(link->eh_context.unloaded_mask & in ata_eh_recover()
3813 (1 << dev->devno))) in ata_eh_recover()
3824 struct ata_eh_context *ehc = &link->eh_context; in ata_eh_recover()
3835 if (link->device->class == ATA_DEV_PMP) { in ata_eh_recover()
3836 ehc->i.action = 0; in ata_eh_recover()
3841 if (ehc->i.flags & ATA_EHI_SETMODE) { in ata_eh_recover()
3845 ehc->i.flags &= ~ATA_EHI_SETMODE; in ata_eh_recover()
3851 if (ehc->i.flags & ATA_EHI_DID_RESET) { in ata_eh_recover()
3852 ata_for_each_dev(dev, link, ALL) { in ata_eh_recover()
3853 if (dev->class != ATA_DEV_ATAPI) in ata_eh_recover()
3863 /* retry flush if necessary */ in ata_eh_recover()
3864 ata_for_each_dev(dev, link, ALL) { in ata_eh_recover()
3865 if (dev->class != ATA_DEV_ATA && in ata_eh_recover()
3866 dev->class != ATA_DEV_ZAC) in ata_eh_recover()
3875 if (link->lpm_policy != ap->target_lpm_policy) { in ata_eh_recover()
3876 rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev); in ata_eh_recover()
3882 ehc->i.flags = 0; in ata_eh_recover()
3892 * Can't retry if it's frozen. in ata_eh_recover()
3901 goto retry; in ata_eh_recover()
3911 * ata_eh_finish - finish up EH
3914 * Recovery is complete. Clean up EH states and retry or finish
3925 /* retry or finish qcs */ in ata_eh_finish()
3927 if (!(qc->flags & ATA_QCFLAG_EH)) in ata_eh_finish()
3930 if (qc->err_mask) { in ata_eh_finish()
3935 if (qc->flags & ATA_QCFLAG_RETRY) { in ata_eh_finish()
3937 * Since qc->err_mask is set, ata_eh_qc_retry() in ata_eh_finish()
3938 * will not increment scmd->allowed, so upper in ata_eh_finish()
3939 * layer will only retry the command if it has in ata_eh_finish()
3947 if (qc->flags & ATA_QCFLAG_SENSE_VALID || in ata_eh_finish()
3948 qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD) { in ata_eh_finish()
3952 memset(&qc->result_tf, 0, sizeof(qc->result_tf)); in ata_eh_finish()
3954 * Since qc->err_mask is not set, in ata_eh_finish()
3956 * scmd->allowed, so upper layer is guaranteed in ata_eh_finish()
3957 * to retry the command. in ata_eh_finish()
3965 WARN_ON(ap->nr_active_links); in ata_eh_finish()
3966 ap->nr_active_links = 0; in ata_eh_finish()
3970 * ata_do_eh - do standard error handling
3996 ata_for_each_dev(dev, &ap->link, ALL) in ata_do_eh()
4004 * ata_std_error_handler - standard error handler
4014 struct ata_port_operations *ops = ap->ops; in ata_std_error_handler()
4015 ata_reset_fn_t hardreset = ops->hardreset; in ata_std_error_handler()
4017 /* ignore built-in hardreset if SCR access is not available */ in ata_std_error_handler()
4018 if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link)) in ata_std_error_handler()
4021 ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset); in ata_std_error_handler()
4027 * ata_eh_handle_port_suspend - perform port suspend operation
4043 spin_lock_irqsave(ap->lock, flags); in ata_eh_handle_port_suspend()
4044 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || in ata_eh_handle_port_suspend()
4045 ap->pm_mesg.event & PM_EVENT_RESUME) { in ata_eh_handle_port_suspend()
4046 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_handle_port_suspend()
4049 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_handle_port_suspend()
4051 WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED); in ata_eh_handle_port_suspend()
4054 * We will reach this point for all of the PM events: in ata_eh_handle_port_suspend()
4061 if (!(ap->pm_mesg.event & PM_EVENT_FREEZE)) { in ata_eh_handle_port_suspend()
4062 /* Set all devices attached to the port in standby mode */ in ata_eh_handle_port_suspend()
4074 if (PMSG_IS_AUTO(ap->pm_mesg)) { in ata_eh_handle_port_suspend()
4075 ata_for_each_dev(dev, &ap->link, ENABLED) { in ata_eh_handle_port_suspend()
4084 if (ap->ops->port_suspend) in ata_eh_handle_port_suspend()
4085 rc = ap->ops->port_suspend(ap, ap->pm_mesg); in ata_eh_handle_port_suspend()
4087 ata_acpi_set_state(ap, ap->pm_mesg); in ata_eh_handle_port_suspend()
4090 spin_lock_irqsave(ap->lock, flags); in ata_eh_handle_port_suspend()
4092 ap->pflags &= ~ATA_PFLAG_PM_PENDING; in ata_eh_handle_port_suspend()
4094 ap->pflags |= ATA_PFLAG_SUSPENDED; in ata_eh_handle_port_suspend()
4098 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_handle_port_suspend()
4104 * ata_eh_handle_port_resume - perform port resume operation
4119 spin_lock_irqsave(ap->lock, flags); in ata_eh_handle_port_resume()
4120 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || in ata_eh_handle_port_resume()
4121 !(ap->pm_mesg.event & PM_EVENT_RESUME)) { in ata_eh_handle_port_resume()
4122 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_handle_port_resume()
4125 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_handle_port_resume()
4127 WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED)); in ata_eh_handle_port_resume()
4137 ata_for_each_dev(dev, link, ALL) in ata_eh_handle_port_resume()
4138 ata_ering_clear(&dev->ering); in ata_eh_handle_port_resume()
4140 ata_acpi_set_state(ap, ap->pm_mesg); in ata_eh_handle_port_resume()
4142 if (ap->ops->port_resume) in ata_eh_handle_port_resume()
4143 ap->ops->port_resume(ap); in ata_eh_handle_port_resume()
4149 spin_lock_irqsave(ap->lock, flags); in ata_eh_handle_port_resume()
4150 ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED); in ata_eh_handle_port_resume()
4151 ap->pflags |= ATA_PFLAG_RESUMING; in ata_eh_handle_port_resume()
4152 spin_unlock_irqrestore(ap->lock, flags); in ata_eh_handle_port_resume()