xref: /openbmc/linux/drivers/ata/libata-eh.c (revision d3597236)
1 /*
2  *  libata-eh.c - libata error handling
3  *
4  *  Maintained by:  Tejun Heo <tj@kernel.org>
5  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6  *		    on emails.
7  *
8  *  Copyright 2006 Tejun Heo <htejun@gmail.com>
9  *
10  *
11  *  This program is free software; you can redistribute it and/or
12  *  modify it under the terms of the GNU General Public License as
13  *  published by the Free Software Foundation; either version 2, or
14  *  (at your option) any later version.
15  *
16  *  This program is distributed in the hope that it will be useful,
17  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
18  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  *  General Public License for more details.
20  *
21  *  You should have received a copy of the GNU General Public License
22  *  along with this program; see the file COPYING.  If not, write to
23  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
24  *  USA.
25  *
26  *
27  *  libata documentation is available via 'make {ps|pdf}docs',
28  *  as Documentation/DocBook/libata.*
29  *
30  *  Hardware documentation available from http://www.t13.org/ and
31  *  http://www.sata-io.org/
32  *
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/blkdev.h>
37 #include <linux/export.h>
38 #include <linux/pci.h>
39 #include <scsi/scsi.h>
40 #include <scsi/scsi_host.h>
41 #include <scsi/scsi_eh.h>
42 #include <scsi/scsi_device.h>
43 #include <scsi/scsi_cmnd.h>
44 #include <scsi/scsi_dbg.h>
45 #include "../scsi/scsi_transport_api.h"
46 
47 #include <linux/libata.h>
48 
49 #include <trace/events/libata.h>
50 #include "libata.h"
51 
52 enum {
53 	/* speed down verdicts */
54 	ATA_EH_SPDN_NCQ_OFF		= (1 << 0),
55 	ATA_EH_SPDN_SPEED_DOWN		= (1 << 1),
56 	ATA_EH_SPDN_FALLBACK_TO_PIO	= (1 << 2),
57 	ATA_EH_SPDN_KEEP_ERRORS		= (1 << 3),
58 
59 	/* error flags */
60 	ATA_EFLAG_IS_IO			= (1 << 0),
61 	ATA_EFLAG_DUBIOUS_XFER		= (1 << 1),
62 	ATA_EFLAG_OLD_ER                = (1 << 31),
63 
64 	/* error categories */
65 	ATA_ECAT_NONE			= 0,
66 	ATA_ECAT_ATA_BUS		= 1,
67 	ATA_ECAT_TOUT_HSM		= 2,
68 	ATA_ECAT_UNK_DEV		= 3,
69 	ATA_ECAT_DUBIOUS_NONE		= 4,
70 	ATA_ECAT_DUBIOUS_ATA_BUS	= 5,
71 	ATA_ECAT_DUBIOUS_TOUT_HSM	= 6,
72 	ATA_ECAT_DUBIOUS_UNK_DEV	= 7,
73 	ATA_ECAT_NR			= 8,
74 
75 	ATA_EH_CMD_DFL_TIMEOUT		=  5000,
76 
77 	/* always put at least this amount of time between resets */
78 	ATA_EH_RESET_COOL_DOWN		=  5000,
79 
80 	/* Waiting in ->prereset can never be reliable.  It's
81 	 * sometimes nice to wait there but it can't be depended upon;
82 	 * otherwise, we wouldn't be resetting.  Just give it enough
83 	 * time for most drives to spin up.
84 	 */
85 	ATA_EH_PRERESET_TIMEOUT		= 10000,
86 	ATA_EH_FASTDRAIN_INTERVAL	=  3000,
87 
88 	ATA_EH_UA_TRIES			= 5,
89 
90 	/* probe speed down parameters, see ata_eh_schedule_probe() */
91 	ATA_EH_PROBE_TRIAL_INTERVAL	= 60000,	/* 1 min */
92 	ATA_EH_PROBE_TRIALS		= 2,
93 };
94 
95 /* The following table determines how we sequence resets.  Each entry
96  * represents timeout for that try.  The first try can be soft or
97  * hardreset.  All others are hardreset if available.  In most cases
98  * the first reset w/ 10sec timeout should succeed.  Following entries
99  * are mostly for error handling, hotplug and those outlier devices that
100  * take an exceptionally long time to recover from reset.
101  */
102 static const unsigned long ata_eh_reset_timeouts[] = {
103 	10000,	/* most drives spin up by 10sec */
104 	10000,	/* > 99% working drives spin up before 20sec */
105 	35000,	/* give > 30 secs of idleness for outlier devices */
106 	 5000,	/* and sweet one last chance */
107 	ULONG_MAX, /* > 1 min has elapsed, give up */
108 };
109 
110 static const unsigned long ata_eh_identify_timeouts[] = {
111 	 5000,	/* covers > 99% of successes and not too boring on failures */
112 	10000,  /* combined time till here is enough even for media access */
113 	30000,	/* for true idiots */
114 	ULONG_MAX,
115 };
116 
117 static const unsigned long ata_eh_flush_timeouts[] = {
118 	15000,	/* be generous with flush */
119 	15000,  /* ditto */
120 	30000,	/* and even more generous */
121 	ULONG_MAX,
122 };
123 
124 static const unsigned long ata_eh_other_timeouts[] = {
125 	 5000,	/* same rationale as identify timeout */
126 	10000,	/* ditto */
127 	/* but no merciful 30sec for other commands, it just isn't worth it */
128 	ULONG_MAX,
129 };
130 
131 struct ata_eh_cmd_timeout_ent {
132 	const u8		*commands;
133 	const unsigned long	*timeouts;
134 };
135 
136 /* The following table determines timeouts to use for EH internal
137  * commands.  Each table entry is a command class and matches the
138  * commands the entry applies to and the timeout table to use.
139  *
140  * On the retry after a command timed out, the next timeout value from
141  * the table is used.  If the table doesn't contain further entries,
142  * the last value is used.
143  *
144  * ehc->cmd_timeout_idx keeps track of which timeout to use per
145  * command class, so if SET_FEATURES times out on the first try, the
146  * next try will use the second timeout value only for that class.
147  */
148 #define CMDS(cmds...)	(const u8 []){ cmds, 0 }
149 static const struct ata_eh_cmd_timeout_ent
150 ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
151 	{ .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI),
152 	  .timeouts = ata_eh_identify_timeouts, },
153 	{ .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT),
154 	  .timeouts = ata_eh_other_timeouts, },
155 	{ .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT),
156 	  .timeouts = ata_eh_other_timeouts, },
157 	{ .commands = CMDS(ATA_CMD_SET_FEATURES),
158 	  .timeouts = ata_eh_other_timeouts, },
159 	{ .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS),
160 	  .timeouts = ata_eh_other_timeouts, },
161 	{ .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT),
162 	  .timeouts = ata_eh_flush_timeouts },
163 };
164 #undef CMDS
165 
166 static void __ata_port_freeze(struct ata_port *ap);
167 #ifdef CONFIG_PM
168 static void ata_eh_handle_port_suspend(struct ata_port *ap);
169 static void ata_eh_handle_port_resume(struct ata_port *ap);
170 #else /* CONFIG_PM */
171 static void ata_eh_handle_port_suspend(struct ata_port *ap)
172 { }
173 
174 static void ata_eh_handle_port_resume(struct ata_port *ap)
175 { }
176 #endif /* CONFIG_PM */
177 
178 static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt,
179 				 va_list args)
180 {
181 	ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
182 				     ATA_EH_DESC_LEN - ehi->desc_len,
183 				     fmt, args);
184 }
185 
186 /**
187  *	__ata_ehi_push_desc - push error description without adding separator
188  *	@ehi: target EHI
189  *	@fmt: printf format string
190  *
191  *	Format string according to @fmt and append it to @ehi->desc.
192  *
193  *	LOCKING:
194  *	spin_lock_irqsave(host lock)
195  */
196 void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
197 {
198 	va_list args;
199 
200 	va_start(args, fmt);
201 	__ata_ehi_pushv_desc(ehi, fmt, args);
202 	va_end(args);
203 }
204 
205 /**
206  *	ata_ehi_push_desc - push error description with separator
207  *	@ehi: target EHI
208  *	@fmt: printf format string
209  *
210  *	Format string according to @fmt and append it to @ehi->desc.
211  *	If @ehi->desc is not empty, ", " is added in-between.
212  *
213  *	LOCKING:
214  *	spin_lock_irqsave(host lock)
215  */
216 void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
217 {
218 	va_list args;
219 
220 	if (ehi->desc_len)
221 		__ata_ehi_push_desc(ehi, ", ");
222 
223 	va_start(args, fmt);
224 	__ata_ehi_pushv_desc(ehi, fmt, args);
225 	va_end(args);
226 }
227 
228 /**
229  *	ata_ehi_clear_desc - clean error description
230  *	@ehi: target EHI
231  *
232  *	Clear @ehi->desc.
233  *
234  *	LOCKING:
235  *	spin_lock_irqsave(host lock)
236  */
237 void ata_ehi_clear_desc(struct ata_eh_info *ehi)
238 {
239 	ehi->desc[0] = '\0';
240 	ehi->desc_len = 0;
241 }
242 
243 /**
244  *	ata_port_desc - append port description
245  *	@ap: target ATA port
246  *	@fmt: printf format string
247  *
248  *	Format string according to @fmt and append it to port
249  *	description.  If port description is not empty, " " is added
250  *	in-between.  This function is to be used while initializing
251  *	ata_host.  The description is printed on host registration.
252  *
253  *	LOCKING:
254  *	None.
255  */
256 void ata_port_desc(struct ata_port *ap, const char *fmt, ...)
257 {
258 	va_list args;
259 
260 	WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING));
261 
262 	if (ap->link.eh_info.desc_len)
263 		__ata_ehi_push_desc(&ap->link.eh_info, " ");
264 
265 	va_start(args, fmt);
266 	__ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args);
267 	va_end(args);
268 }
269 
270 #ifdef CONFIG_PCI
271 
272 /**
273  *	ata_port_pbar_desc - append PCI BAR description
274  *	@ap: target ATA port
275  *	@bar: target PCI BAR
276  *	@offset: offset into PCI BAR
277  *	@name: name of the area
278  *
279  *	If @offset is negative, this function formats a string which
280  *	contains the name, address, size and type of the BAR and
281  *	appends it to the port description.  If @offset is zero or
282  *	positive, only name and offsetted address is appended.
283  *
284  *	LOCKING:
285  *	None.
286  */
287 void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
288 			const char *name)
289 {
290 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
291 	char *type = "";
292 	unsigned long long start, len;
293 
294 	if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
295 		type = "m";
296 	else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
297 		type = "i";
298 
299 	start = (unsigned long long)pci_resource_start(pdev, bar);
300 	len = (unsigned long long)pci_resource_len(pdev, bar);
301 
302 	if (offset < 0)
303 		ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start);
304 	else
305 		ata_port_desc(ap, "%s 0x%llx", name,
306 				start + (unsigned long long)offset);
307 }
308 
309 #endif /* CONFIG_PCI */
310 
311 static int ata_lookup_timeout_table(u8 cmd)
312 {
313 	int i;
314 
315 	for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) {
316 		const u8 *cur;
317 
318 		for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++)
319 			if (*cur == cmd)
320 				return i;
321 	}
322 
323 	return -1;
324 }
325 
326 /**
327  *	ata_internal_cmd_timeout - determine timeout for an internal command
328  *	@dev: target device
329  *	@cmd: internal command to be issued
330  *
331  *	Determine timeout for internal command @cmd for @dev.
332  *
333  *	LOCKING:
334  *	EH context.
335  *
336  *	RETURNS:
337  *	Determined timeout.
338  */
339 unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd)
340 {
341 	struct ata_eh_context *ehc = &dev->link->eh_context;
342 	int ent = ata_lookup_timeout_table(cmd);
343 	int idx;
344 
345 	if (ent < 0)
346 		return ATA_EH_CMD_DFL_TIMEOUT;
347 
348 	idx = ehc->cmd_timeout_idx[dev->devno][ent];
349 	return ata_eh_cmd_timeout_table[ent].timeouts[idx];
350 }
351 
352 /**
353  *	ata_internal_cmd_timed_out - notification for internal command timeout
354  *	@dev: target device
355  *	@cmd: internal command which timed out
356  *
357  *	Notify EH that internal command @cmd for @dev timed out.  This
358  *	function should be called only for commands whose timeouts are
359  *	determined using ata_internal_cmd_timeout().
360  *
361  *	LOCKING:
362  *	EH context.
363  */
364 void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd)
365 {
366 	struct ata_eh_context *ehc = &dev->link->eh_context;
367 	int ent = ata_lookup_timeout_table(cmd);
368 	int idx;
369 
370 	if (ent < 0)
371 		return;
372 
373 	idx = ehc->cmd_timeout_idx[dev->devno][ent];
374 	if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX)
375 		ehc->cmd_timeout_idx[dev->devno][ent]++;
376 }
377 
378 static void ata_ering_record(struct ata_ering *ering, unsigned int eflags,
379 			     unsigned int err_mask)
380 {
381 	struct ata_ering_entry *ent;
382 
383 	WARN_ON(!err_mask);
384 
385 	ering->cursor++;
386 	ering->cursor %= ATA_ERING_SIZE;
387 
388 	ent = &ering->ring[ering->cursor];
389 	ent->eflags = eflags;
390 	ent->err_mask = err_mask;
391 	ent->timestamp = get_jiffies_64();
392 }
393 
394 static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering)
395 {
396 	struct ata_ering_entry *ent = &ering->ring[ering->cursor];
397 
398 	if (ent->err_mask)
399 		return ent;
400 	return NULL;
401 }
402 
403 int ata_ering_map(struct ata_ering *ering,
404 		  int (*map_fn)(struct ata_ering_entry *, void *),
405 		  void *arg)
406 {
407 	int idx, rc = 0;
408 	struct ata_ering_entry *ent;
409 
410 	idx = ering->cursor;
411 	do {
412 		ent = &ering->ring[idx];
413 		if (!ent->err_mask)
414 			break;
415 		rc = map_fn(ent, arg);
416 		if (rc)
417 			break;
418 		idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
419 	} while (idx != ering->cursor);
420 
421 	return rc;
422 }
423 
424 static int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg)
425 {
426 	ent->eflags |= ATA_EFLAG_OLD_ER;
427 	return 0;
428 }
429 
430 static void ata_ering_clear(struct ata_ering *ering)
431 {
432 	ata_ering_map(ering, ata_ering_clear_cb, NULL);
433 }
434 
435 static unsigned int ata_eh_dev_action(struct ata_device *dev)
436 {
437 	struct ata_eh_context *ehc = &dev->link->eh_context;
438 
439 	return ehc->i.action | ehc->i.dev_action[dev->devno];
440 }
441 
442 static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
443 				struct ata_eh_info *ehi, unsigned int action)
444 {
445 	struct ata_device *tdev;
446 
447 	if (!dev) {
448 		ehi->action &= ~action;
449 		ata_for_each_dev(tdev, link, ALL)
450 			ehi->dev_action[tdev->devno] &= ~action;
451 	} else {
452 		/* doesn't make sense for port-wide EH actions */
453 		WARN_ON(!(action & ATA_EH_PERDEV_MASK));
454 
455 		/* break ehi->action into ehi->dev_action */
456 		if (ehi->action & action) {
457 			ata_for_each_dev(tdev, link, ALL)
458 				ehi->dev_action[tdev->devno] |=
459 					ehi->action & action;
460 			ehi->action &= ~action;
461 		}
462 
463 		/* turn off the specified per-dev action */
464 		ehi->dev_action[dev->devno] &= ~action;
465 	}
466 }
467 
468 /**
469  *	ata_eh_acquire - acquire EH ownership
470  *	@ap: ATA port to acquire EH ownership for
471  *
472  *	Acquire EH ownership for @ap.  This is the basic exclusion
473  *	mechanism for ports sharing a host.  Only one port hanging off
474  *	the same host can claim the ownership of EH.
475  *
476  *	LOCKING:
477  *	EH context.
478  */
479 void ata_eh_acquire(struct ata_port *ap)
480 {
481 	mutex_lock(&ap->host->eh_mutex);
482 	WARN_ON_ONCE(ap->host->eh_owner);
483 	ap->host->eh_owner = current;
484 }
485 
486 /**
487  *	ata_eh_release - release EH ownership
488  *	@ap: ATA port to release EH ownership for
489  *
490  *	Release EH ownership for @ap if the caller.  The caller must
491  *	have acquired EH ownership using ata_eh_acquire() previously.
492  *
493  *	LOCKING:
494  *	EH context.
495  */
496 void ata_eh_release(struct ata_port *ap)
497 {
498 	WARN_ON_ONCE(ap->host->eh_owner != current);
499 	ap->host->eh_owner = NULL;
500 	mutex_unlock(&ap->host->eh_mutex);
501 }
502 
503 /**
504  *	ata_scsi_timed_out - SCSI layer time out callback
505  *	@cmd: timed out SCSI command
506  *
507  *	Handles SCSI layer timeout.  We race with normal completion of
508  *	the qc for @cmd.  If the qc is already gone, we lose and let
509  *	the scsi command finish (EH_HANDLED).  Otherwise, the qc has
510  *	timed out and EH should be invoked.  Prevent ata_qc_complete()
511  *	from finishing it by setting EH_SCHEDULED and return
512  *	EH_NOT_HANDLED.
513  *
514  *	TODO: kill this function once old EH is gone.
515  *
516  *	LOCKING:
517  *	Called from timer context
518  *
519  *	RETURNS:
520  *	EH_HANDLED or EH_NOT_HANDLED
521  */
522 enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
523 {
524 	struct Scsi_Host *host = cmd->device->host;
525 	struct ata_port *ap = ata_shost_to_port(host);
526 	unsigned long flags;
527 	struct ata_queued_cmd *qc;
528 	enum blk_eh_timer_return ret;
529 
530 	DPRINTK("ENTER\n");
531 
532 	if (ap->ops->error_handler) {
533 		ret = BLK_EH_NOT_HANDLED;
534 		goto out;
535 	}
536 
537 	ret = BLK_EH_HANDLED;
538 	spin_lock_irqsave(ap->lock, flags);
539 	qc = ata_qc_from_tag(ap, ap->link.active_tag);
540 	if (qc) {
541 		WARN_ON(qc->scsicmd != cmd);
542 		qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
543 		qc->err_mask |= AC_ERR_TIMEOUT;
544 		ret = BLK_EH_NOT_HANDLED;
545 	}
546 	spin_unlock_irqrestore(ap->lock, flags);
547 
548  out:
549 	DPRINTK("EXIT, ret=%d\n", ret);
550 	return ret;
551 }
552 
553 static void ata_eh_unload(struct ata_port *ap)
554 {
555 	struct ata_link *link;
556 	struct ata_device *dev;
557 	unsigned long flags;
558 
559 	/* Restore SControl IPM and SPD for the next driver and
560 	 * disable attached devices.
561 	 */
562 	ata_for_each_link(link, ap, PMP_FIRST) {
563 		sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0);
564 		ata_for_each_dev(dev, link, ALL)
565 			ata_dev_disable(dev);
566 	}
567 
568 	/* freeze and set UNLOADED */
569 	spin_lock_irqsave(ap->lock, flags);
570 
571 	ata_port_freeze(ap);			/* won't be thawed */
572 	ap->pflags &= ~ATA_PFLAG_EH_PENDING;	/* clear pending from freeze */
573 	ap->pflags |= ATA_PFLAG_UNLOADED;
574 
575 	spin_unlock_irqrestore(ap->lock, flags);
576 }
577 
578 /**
579  *	ata_scsi_error - SCSI layer error handler callback
580  *	@host: SCSI host on which error occurred
581  *
582  *	Handles SCSI-layer-thrown error events.
583  *
584  *	LOCKING:
585  *	Inherited from SCSI layer (none, can sleep)
586  *
587  *	RETURNS:
588  *	Zero.
589  */
590 void ata_scsi_error(struct Scsi_Host *host)
591 {
592 	struct ata_port *ap = ata_shost_to_port(host);
593 	unsigned long flags;
594 	LIST_HEAD(eh_work_q);
595 
596 	DPRINTK("ENTER\n");
597 
598 	spin_lock_irqsave(host->host_lock, flags);
599 	list_splice_init(&host->eh_cmd_q, &eh_work_q);
600 	spin_unlock_irqrestore(host->host_lock, flags);
601 
602 	ata_scsi_cmd_error_handler(host, ap, &eh_work_q);
603 
604 	/* If we timed raced normal completion and there is nothing to
605 	   recover nr_timedout == 0 why exactly are we doing error recovery ? */
606 	ata_scsi_port_error_handler(host, ap);
607 
608 	/* finish or retry handled scmd's and clean up */
609 	WARN_ON(host->host_failed || !list_empty(&eh_work_q));
610 
611 	DPRINTK("EXIT\n");
612 }
613 
614 /**
615  * ata_scsi_cmd_error_handler - error callback for a list of commands
616  * @host:	scsi host containing the port
617  * @ap:		ATA port within the host
618  * @eh_work_q:	list of commands to process
619  *
620  * process the given list of commands and return those finished to the
621  * ap->eh_done_q.  This function is the first part of the libata error
622  * handler which processes a given list of failed commands.
623  */
624 void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
625 				struct list_head *eh_work_q)
626 {
627 	int i;
628 	unsigned long flags;
629 
630 	/* make sure sff pio task is not running */
631 	ata_sff_flush_pio_task(ap);
632 
633 	/* synchronize with host lock and sort out timeouts */
634 
635 	/* For new EH, all qcs are finished in one of three ways -
636 	 * normal completion, error completion, and SCSI timeout.
637 	 * Both completions can race against SCSI timeout.  When normal
638 	 * completion wins, the qc never reaches EH.  When error
639 	 * completion wins, the qc has ATA_QCFLAG_FAILED set.
640 	 *
641 	 * When SCSI timeout wins, things are a bit more complex.
642 	 * Normal or error completion can occur after the timeout but
643 	 * before this point.  In such cases, both types of
644 	 * completions are honored.  A scmd is determined to have
645 	 * timed out iff its associated qc is active and not failed.
646 	 */
647 	if (ap->ops->error_handler) {
648 		struct scsi_cmnd *scmd, *tmp;
649 		int nr_timedout = 0;
650 
651 		spin_lock_irqsave(ap->lock, flags);
652 
653 		/* This must occur under the ap->lock as we don't want
654 		   a polled recovery to race the real interrupt handler
655 
656 		   The lost_interrupt handler checks for any completed but
657 		   non-notified command and completes much like an IRQ handler.
658 
659 		   We then fall into the error recovery code which will treat
660 		   this as if normal completion won the race */
661 
662 		if (ap->ops->lost_interrupt)
663 			ap->ops->lost_interrupt(ap);
664 
665 		list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) {
666 			struct ata_queued_cmd *qc;
667 
668 			for (i = 0; i < ATA_MAX_QUEUE; i++) {
669 				qc = __ata_qc_from_tag(ap, i);
670 				if (qc->flags & ATA_QCFLAG_ACTIVE &&
671 				    qc->scsicmd == scmd)
672 					break;
673 			}
674 
675 			if (i < ATA_MAX_QUEUE) {
676 				/* the scmd has an associated qc */
677 				if (!(qc->flags & ATA_QCFLAG_FAILED)) {
678 					/* which hasn't failed yet, timeout */
679 					qc->err_mask |= AC_ERR_TIMEOUT;
680 					qc->flags |= ATA_QCFLAG_FAILED;
681 					nr_timedout++;
682 				}
683 			} else {
684 				/* Normal completion occurred after
685 				 * SCSI timeout but before this point.
686 				 * Successfully complete it.
687 				 */
688 				scmd->retries = scmd->allowed;
689 				scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
690 			}
691 		}
692 
693 		/* If we have timed out qcs.  They belong to EH from
694 		 * this point but the state of the controller is
695 		 * unknown.  Freeze the port to make sure the IRQ
696 		 * handler doesn't diddle with those qcs.  This must
697 		 * be done atomically w.r.t. setting QCFLAG_FAILED.
698 		 */
699 		if (nr_timedout)
700 			__ata_port_freeze(ap);
701 
702 		spin_unlock_irqrestore(ap->lock, flags);
703 
704 		/* initialize eh_tries */
705 		ap->eh_tries = ATA_EH_MAX_TRIES;
706 	} else
707 		spin_unlock_wait(ap->lock);
708 
709 }
710 EXPORT_SYMBOL(ata_scsi_cmd_error_handler);
711 
712 /**
713  * ata_scsi_port_error_handler - recover the port after the commands
714  * @host:	SCSI host containing the port
715  * @ap:		the ATA port
716  *
717  * Handle the recovery of the port @ap after all the commands
718  * have been recovered.
719  */
720 void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
721 {
722 	unsigned long flags;
723 
724 	/* invoke error handler */
725 	if (ap->ops->error_handler) {
726 		struct ata_link *link;
727 
728 		/* acquire EH ownership */
729 		ata_eh_acquire(ap);
730  repeat:
731 		/* kill fast drain timer */
732 		del_timer_sync(&ap->fastdrain_timer);
733 
734 		/* process port resume request */
735 		ata_eh_handle_port_resume(ap);
736 
737 		/* fetch & clear EH info */
738 		spin_lock_irqsave(ap->lock, flags);
739 
740 		ata_for_each_link(link, ap, HOST_FIRST) {
741 			struct ata_eh_context *ehc = &link->eh_context;
742 			struct ata_device *dev;
743 
744 			memset(&link->eh_context, 0, sizeof(link->eh_context));
745 			link->eh_context.i = link->eh_info;
746 			memset(&link->eh_info, 0, sizeof(link->eh_info));
747 
748 			ata_for_each_dev(dev, link, ENABLED) {
749 				int devno = dev->devno;
750 
751 				ehc->saved_xfer_mode[devno] = dev->xfer_mode;
752 				if (ata_ncq_enabled(dev))
753 					ehc->saved_ncq_enabled |= 1 << devno;
754 			}
755 		}
756 
757 		ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
758 		ap->pflags &= ~ATA_PFLAG_EH_PENDING;
759 		ap->excl_link = NULL;	/* don't maintain exclusion over EH */
760 
761 		spin_unlock_irqrestore(ap->lock, flags);
762 
763 		/* invoke EH, skip if unloading or suspended */
764 		if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
765 			ap->ops->error_handler(ap);
766 		else {
767 			/* if unloading, commence suicide */
768 			if ((ap->pflags & ATA_PFLAG_UNLOADING) &&
769 			    !(ap->pflags & ATA_PFLAG_UNLOADED))
770 				ata_eh_unload(ap);
771 			ata_eh_finish(ap);
772 		}
773 
774 		/* process port suspend request */
775 		ata_eh_handle_port_suspend(ap);
776 
777 		/* Exception might have happened after ->error_handler
778 		 * recovered the port but before this point.  Repeat
779 		 * EH in such case.
780 		 */
781 		spin_lock_irqsave(ap->lock, flags);
782 
783 		if (ap->pflags & ATA_PFLAG_EH_PENDING) {
784 			if (--ap->eh_tries) {
785 				spin_unlock_irqrestore(ap->lock, flags);
786 				goto repeat;
787 			}
788 			ata_port_err(ap,
789 				     "EH pending after %d tries, giving up\n",
790 				     ATA_EH_MAX_TRIES);
791 			ap->pflags &= ~ATA_PFLAG_EH_PENDING;
792 		}
793 
794 		/* this run is complete, make sure EH info is clear */
795 		ata_for_each_link(link, ap, HOST_FIRST)
796 			memset(&link->eh_info, 0, sizeof(link->eh_info));
797 
798 		/* end eh (clear host_eh_scheduled) while holding
799 		 * ap->lock such that if exception occurs after this
800 		 * point but before EH completion, SCSI midlayer will
801 		 * re-initiate EH.
802 		 */
803 		ap->ops->end_eh(ap);
804 
805 		spin_unlock_irqrestore(ap->lock, flags);
806 		ata_eh_release(ap);
807 	} else {
808 		WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL);
809 		ap->ops->eng_timeout(ap);
810 	}
811 
812 	scsi_eh_flush_done_q(&ap->eh_done_q);
813 
814 	/* clean up */
815 	spin_lock_irqsave(ap->lock, flags);
816 
817 	if (ap->pflags & ATA_PFLAG_LOADING)
818 		ap->pflags &= ~ATA_PFLAG_LOADING;
819 	else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
820 		schedule_delayed_work(&ap->hotplug_task, 0);
821 
822 	if (ap->pflags & ATA_PFLAG_RECOVERED)
823 		ata_port_info(ap, "EH complete\n");
824 
825 	ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
826 
827 	/* tell wait_eh that we're done */
828 	ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
829 	wake_up_all(&ap->eh_wait_q);
830 
831 	spin_unlock_irqrestore(ap->lock, flags);
832 }
833 EXPORT_SYMBOL_GPL(ata_scsi_port_error_handler);
834 
835 /**
836  *	ata_port_wait_eh - Wait for the currently pending EH to complete
837  *	@ap: Port to wait EH for
838  *
839  *	Wait until the currently pending EH is complete.
840  *
841  *	LOCKING:
842  *	Kernel thread context (may sleep).
843  */
844 void ata_port_wait_eh(struct ata_port *ap)
845 {
846 	unsigned long flags;
847 	DEFINE_WAIT(wait);
848 
849  retry:
850 	spin_lock_irqsave(ap->lock, flags);
851 
852 	while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
853 		prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
854 		spin_unlock_irqrestore(ap->lock, flags);
855 		schedule();
856 		spin_lock_irqsave(ap->lock, flags);
857 	}
858 	finish_wait(&ap->eh_wait_q, &wait);
859 
860 	spin_unlock_irqrestore(ap->lock, flags);
861 
862 	/* make sure SCSI EH is complete */
863 	if (scsi_host_in_recovery(ap->scsi_host)) {
864 		ata_msleep(ap, 10);
865 		goto retry;
866 	}
867 }
868 EXPORT_SYMBOL_GPL(ata_port_wait_eh);
869 
870 static int ata_eh_nr_in_flight(struct ata_port *ap)
871 {
872 	unsigned int tag;
873 	int nr = 0;
874 
875 	/* count only non-internal commands */
876 	for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++)
877 		if (ata_qc_from_tag(ap, tag))
878 			nr++;
879 
880 	return nr;
881 }
882 
883 void ata_eh_fastdrain_timerfn(unsigned long arg)
884 {
885 	struct ata_port *ap = (void *)arg;
886 	unsigned long flags;
887 	int cnt;
888 
889 	spin_lock_irqsave(ap->lock, flags);
890 
891 	cnt = ata_eh_nr_in_flight(ap);
892 
893 	/* are we done? */
894 	if (!cnt)
895 		goto out_unlock;
896 
897 	if (cnt == ap->fastdrain_cnt) {
898 		unsigned int tag;
899 
900 		/* No progress during the last interval, tag all
901 		 * in-flight qcs as timed out and freeze the port.
902 		 */
903 		for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) {
904 			struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
905 			if (qc)
906 				qc->err_mask |= AC_ERR_TIMEOUT;
907 		}
908 
909 		ata_port_freeze(ap);
910 	} else {
911 		/* some qcs have finished, give it another chance */
912 		ap->fastdrain_cnt = cnt;
913 		ap->fastdrain_timer.expires =
914 			ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
915 		add_timer(&ap->fastdrain_timer);
916 	}
917 
918  out_unlock:
919 	spin_unlock_irqrestore(ap->lock, flags);
920 }
921 
922 /**
923  *	ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
924  *	@ap: target ATA port
925  *	@fastdrain: activate fast drain
926  *
927  *	Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain
928  *	is non-zero and EH wasn't pending before.  Fast drain ensures
929  *	that EH kicks in in timely manner.
930  *
931  *	LOCKING:
932  *	spin_lock_irqsave(host lock)
933  */
934 static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
935 {
936 	int cnt;
937 
938 	/* already scheduled? */
939 	if (ap->pflags & ATA_PFLAG_EH_PENDING)
940 		return;
941 
942 	ap->pflags |= ATA_PFLAG_EH_PENDING;
943 
944 	if (!fastdrain)
945 		return;
946 
947 	/* do we have in-flight qcs? */
948 	cnt = ata_eh_nr_in_flight(ap);
949 	if (!cnt)
950 		return;
951 
952 	/* activate fast drain */
953 	ap->fastdrain_cnt = cnt;
954 	ap->fastdrain_timer.expires =
955 		ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
956 	add_timer(&ap->fastdrain_timer);
957 }
958 
959 /**
960  *	ata_qc_schedule_eh - schedule qc for error handling
961  *	@qc: command to schedule error handling for
962  *
963  *	Schedule error handling for @qc.  EH will kick in as soon as
964  *	other commands are drained.
965  *
966  *	LOCKING:
967  *	spin_lock_irqsave(host lock)
968  */
969 void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
970 {
971 	struct ata_port *ap = qc->ap;
972 	struct request_queue *q = qc->scsicmd->device->request_queue;
973 	unsigned long flags;
974 
975 	WARN_ON(!ap->ops->error_handler);
976 
977 	qc->flags |= ATA_QCFLAG_FAILED;
978 	ata_eh_set_pending(ap, 1);
979 
980 	/* The following will fail if timeout has already expired.
981 	 * ata_scsi_error() takes care of such scmds on EH entry.
982 	 * Note that ATA_QCFLAG_FAILED is unconditionally set after
983 	 * this function completes.
984 	 */
985 	spin_lock_irqsave(q->queue_lock, flags);
986 	blk_abort_request(qc->scsicmd->request);
987 	spin_unlock_irqrestore(q->queue_lock, flags);
988 }
989 
990 /**
991  * ata_std_sched_eh - non-libsas ata_ports issue eh with this common routine
992  * @ap: ATA port to schedule EH for
993  *
994  *	LOCKING: inherited from ata_port_schedule_eh
995  *	spin_lock_irqsave(host lock)
996  */
997 void ata_std_sched_eh(struct ata_port *ap)
998 {
999 	WARN_ON(!ap->ops->error_handler);
1000 
1001 	if (ap->pflags & ATA_PFLAG_INITIALIZING)
1002 		return;
1003 
1004 	ata_eh_set_pending(ap, 1);
1005 	scsi_schedule_eh(ap->scsi_host);
1006 
1007 	DPRINTK("port EH scheduled\n");
1008 }
1009 EXPORT_SYMBOL_GPL(ata_std_sched_eh);
1010 
1011 /**
1012  * ata_std_end_eh - non-libsas ata_ports complete eh with this common routine
1013  * @ap: ATA port to end EH for
1014  *
1015  * In the libata object model there is a 1:1 mapping of ata_port to
1016  * shost, so host fields can be directly manipulated under ap->lock, in
1017  * the libsas case we need to hold a lock at the ha->level to coordinate
1018  * these events.
1019  *
1020  *	LOCKING:
1021  *	spin_lock_irqsave(host lock)
1022  */
1023 void ata_std_end_eh(struct ata_port *ap)
1024 {
1025 	struct Scsi_Host *host = ap->scsi_host;
1026 
1027 	host->host_eh_scheduled = 0;
1028 }
1029 EXPORT_SYMBOL(ata_std_end_eh);
1030 
1031 
1032 /**
1033  *	ata_port_schedule_eh - schedule error handling without a qc
1034  *	@ap: ATA port to schedule EH for
1035  *
1036  *	Schedule error handling for @ap.  EH will kick in as soon as
1037  *	all commands are drained.
1038  *
1039  *	LOCKING:
1040  *	spin_lock_irqsave(host lock)
1041  */
1042 void ata_port_schedule_eh(struct ata_port *ap)
1043 {
1044 	/* see: ata_std_sched_eh, unless you know better */
1045 	ap->ops->sched_eh(ap);
1046 }
1047 
1048 static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
1049 {
1050 	int tag, nr_aborted = 0;
1051 
1052 	WARN_ON(!ap->ops->error_handler);
1053 
1054 	/* we're gonna abort all commands, no need for fast drain */
1055 	ata_eh_set_pending(ap, 0);
1056 
1057 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1058 		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
1059 
1060 		if (qc && (!link || qc->dev->link == link)) {
1061 			qc->flags |= ATA_QCFLAG_FAILED;
1062 			ata_qc_complete(qc);
1063 			nr_aborted++;
1064 		}
1065 	}
1066 
1067 	if (!nr_aborted)
1068 		ata_port_schedule_eh(ap);
1069 
1070 	return nr_aborted;
1071 }
1072 
1073 /**
1074  *	ata_link_abort - abort all qc's on the link
1075  *	@link: ATA link to abort qc's for
1076  *
1077  *	Abort all active qc's active on @link and schedule EH.
1078  *
1079  *	LOCKING:
1080  *	spin_lock_irqsave(host lock)
1081  *
1082  *	RETURNS:
1083  *	Number of aborted qc's.
1084  */
1085 int ata_link_abort(struct ata_link *link)
1086 {
1087 	return ata_do_link_abort(link->ap, link);
1088 }
1089 
1090 /**
1091  *	ata_port_abort - abort all qc's on the port
1092  *	@ap: ATA port to abort qc's for
1093  *
1094  *	Abort all active qc's of @ap and schedule EH.
1095  *
1096  *	LOCKING:
1097  *	spin_lock_irqsave(host_set lock)
1098  *
1099  *	RETURNS:
1100  *	Number of aborted qc's.
1101  */
1102 int ata_port_abort(struct ata_port *ap)
1103 {
1104 	return ata_do_link_abort(ap, NULL);
1105 }
1106 
1107 /**
1108  *	__ata_port_freeze - freeze port
1109  *	@ap: ATA port to freeze
1110  *
1111  *	This function is called when HSM violation or some other
1112  *	condition disrupts normal operation of the port.  Frozen port
1113  *	is not allowed to perform any operation until the port is
1114  *	thawed, which usually follows a successful reset.
1115  *
1116  *	ap->ops->freeze() callback can be used for freezing the port
1117  *	hardware-wise (e.g. mask interrupt and stop DMA engine).  If a
1118  *	port cannot be frozen hardware-wise, the interrupt handler
1119  *	must ack and clear interrupts unconditionally while the port
1120  *	is frozen.
1121  *
1122  *	LOCKING:
1123  *	spin_lock_irqsave(host lock)
1124  */
1125 static void __ata_port_freeze(struct ata_port *ap)
1126 {
1127 	WARN_ON(!ap->ops->error_handler);
1128 
1129 	if (ap->ops->freeze)
1130 		ap->ops->freeze(ap);
1131 
1132 	ap->pflags |= ATA_PFLAG_FROZEN;
1133 
1134 	DPRINTK("ata%u port frozen\n", ap->print_id);
1135 }
1136 
1137 /**
1138  *	ata_port_freeze - abort & freeze port
1139  *	@ap: ATA port to freeze
1140  *
1141  *	Abort and freeze @ap.  The freeze operation must be called
1142  *	first, because some hardware requires special operations
1143  *	before the taskfile registers are accessible.
1144  *
1145  *	LOCKING:
1146  *	spin_lock_irqsave(host lock)
1147  *
1148  *	RETURNS:
1149  *	Number of aborted commands.
1150  */
1151 int ata_port_freeze(struct ata_port *ap)
1152 {
1153 	int nr_aborted;
1154 
1155 	WARN_ON(!ap->ops->error_handler);
1156 
1157 	__ata_port_freeze(ap);
1158 	nr_aborted = ata_port_abort(ap);
1159 
1160 	return nr_aborted;
1161 }
1162 
1163 /**
1164  *	sata_async_notification - SATA async notification handler
1165  *	@ap: ATA port where async notification is received
1166  *
1167  *	Handler to be called when async notification via SDB FIS is
1168  *	received.  This function schedules EH if necessary.
1169  *
1170  *	LOCKING:
1171  *	spin_lock_irqsave(host lock)
1172  *
1173  *	RETURNS:
1174  *	1 if EH is scheduled, 0 otherwise.
1175  */
1176 int sata_async_notification(struct ata_port *ap)
1177 {
1178 	u32 sntf;
1179 	int rc;
1180 
1181 	if (!(ap->flags & ATA_FLAG_AN))
1182 		return 0;
1183 
1184 	rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
1185 	if (rc == 0)
1186 		sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
1187 
1188 	if (!sata_pmp_attached(ap) || rc) {
1189 		/* PMP is not attached or SNTF is not available */
1190 		if (!sata_pmp_attached(ap)) {
1191 			/* PMP is not attached.  Check whether ATAPI
1192 			 * AN is configured.  If so, notify media
1193 			 * change.
1194 			 */
1195 			struct ata_device *dev = ap->link.device;
1196 
1197 			if ((dev->class == ATA_DEV_ATAPI) &&
1198 			    (dev->flags & ATA_DFLAG_AN))
1199 				ata_scsi_media_change_notify(dev);
1200 			return 0;
1201 		} else {
1202 			/* PMP is attached but SNTF is not available.
1203 			 * ATAPI async media change notification is
1204 			 * not used.  The PMP must be reporting PHY
1205 			 * status change, schedule EH.
1206 			 */
1207 			ata_port_schedule_eh(ap);
1208 			return 1;
1209 		}
1210 	} else {
1211 		/* PMP is attached and SNTF is available */
1212 		struct ata_link *link;
1213 
1214 		/* check and notify ATAPI AN */
1215 		ata_for_each_link(link, ap, EDGE) {
1216 			if (!(sntf & (1 << link->pmp)))
1217 				continue;
1218 
1219 			if ((link->device->class == ATA_DEV_ATAPI) &&
1220 			    (link->device->flags & ATA_DFLAG_AN))
1221 				ata_scsi_media_change_notify(link->device);
1222 		}
1223 
1224 		/* If PMP is reporting that PHY status of some
1225 		 * downstream ports has changed, schedule EH.
1226 		 */
1227 		if (sntf & (1 << SATA_PMP_CTRL_PORT)) {
1228 			ata_port_schedule_eh(ap);
1229 			return 1;
1230 		}
1231 
1232 		return 0;
1233 	}
1234 }
1235 
1236 /**
1237  *	ata_eh_freeze_port - EH helper to freeze port
1238  *	@ap: ATA port to freeze
1239  *
1240  *	Freeze @ap.
1241  *
1242  *	LOCKING:
1243  *	None.
1244  */
1245 void ata_eh_freeze_port(struct ata_port *ap)
1246 {
1247 	unsigned long flags;
1248 
1249 	if (!ap->ops->error_handler)
1250 		return;
1251 
1252 	spin_lock_irqsave(ap->lock, flags);
1253 	__ata_port_freeze(ap);
1254 	spin_unlock_irqrestore(ap->lock, flags);
1255 }
1256 
1257 /**
1258  *	ata_port_thaw_port - EH helper to thaw port
1259  *	@ap: ATA port to thaw
1260  *
1261  *	Thaw frozen port @ap.
1262  *
1263  *	LOCKING:
1264  *	None.
1265  */
1266 void ata_eh_thaw_port(struct ata_port *ap)
1267 {
1268 	unsigned long flags;
1269 
1270 	if (!ap->ops->error_handler)
1271 		return;
1272 
1273 	spin_lock_irqsave(ap->lock, flags);
1274 
1275 	ap->pflags &= ~ATA_PFLAG_FROZEN;
1276 
1277 	if (ap->ops->thaw)
1278 		ap->ops->thaw(ap);
1279 
1280 	spin_unlock_irqrestore(ap->lock, flags);
1281 
1282 	DPRINTK("ata%u port thawed\n", ap->print_id);
1283 }
1284 
1285 static void ata_eh_scsidone(struct scsi_cmnd *scmd)
1286 {
1287 	/* nada */
1288 }
1289 
1290 static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
1291 {
1292 	struct ata_port *ap = qc->ap;
1293 	struct scsi_cmnd *scmd = qc->scsicmd;
1294 	unsigned long flags;
1295 
1296 	spin_lock_irqsave(ap->lock, flags);
1297 	qc->scsidone = ata_eh_scsidone;
1298 	__ata_qc_complete(qc);
1299 	WARN_ON(ata_tag_valid(qc->tag));
1300 	spin_unlock_irqrestore(ap->lock, flags);
1301 
1302 	scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
1303 }
1304 
1305 /**
1306  *	ata_eh_qc_complete - Complete an active ATA command from EH
1307  *	@qc: Command to complete
1308  *
1309  *	Indicate to the mid and upper layers that an ATA command has
1310  *	completed.  To be used from EH.
1311  */
1312 void ata_eh_qc_complete(struct ata_queued_cmd *qc)
1313 {
1314 	struct scsi_cmnd *scmd = qc->scsicmd;
1315 	scmd->retries = scmd->allowed;
1316 	__ata_eh_qc_complete(qc);
1317 }
1318 
1319 /**
1320  *	ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
1321  *	@qc: Command to retry
1322  *
1323  *	Indicate to the mid and upper layers that an ATA command
1324  *	should be retried.  To be used from EH.
1325  *
1326  *	SCSI midlayer limits the number of retries to scmd->allowed.
1327  *	scmd->allowed is incremented for commands which get retried
1328  *	due to unrelated failures (qc->err_mask is zero).
1329  */
1330 void ata_eh_qc_retry(struct ata_queued_cmd *qc)
1331 {
1332 	struct scsi_cmnd *scmd = qc->scsicmd;
1333 	if (!qc->err_mask)
1334 		scmd->allowed++;
1335 	__ata_eh_qc_complete(qc);
1336 }
1337 
1338 /**
1339  *	ata_dev_disable - disable ATA device
1340  *	@dev: ATA device to disable
1341  *
1342  *	Disable @dev.
1343  *
1344  *	Locking:
1345  *	EH context.
1346  */
1347 void ata_dev_disable(struct ata_device *dev)
1348 {
1349 	if (!ata_dev_enabled(dev))
1350 		return;
1351 
1352 	if (ata_msg_drv(dev->link->ap))
1353 		ata_dev_warn(dev, "disabled\n");
1354 	ata_acpi_on_disable(dev);
1355 	ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET);
1356 	dev->class++;
1357 
1358 	/* From now till the next successful probe, ering is used to
1359 	 * track probe failures.  Clear accumulated device error info.
1360 	 */
1361 	ata_ering_clear(&dev->ering);
1362 }
1363 
1364 /**
1365  *	ata_eh_detach_dev - detach ATA device
1366  *	@dev: ATA device to detach
1367  *
1368  *	Detach @dev.
1369  *
1370  *	LOCKING:
1371  *	None.
1372  */
1373 void ata_eh_detach_dev(struct ata_device *dev)
1374 {
1375 	struct ata_link *link = dev->link;
1376 	struct ata_port *ap = link->ap;
1377 	struct ata_eh_context *ehc = &link->eh_context;
1378 	unsigned long flags;
1379 
1380 	ata_dev_disable(dev);
1381 
1382 	spin_lock_irqsave(ap->lock, flags);
1383 
1384 	dev->flags &= ~ATA_DFLAG_DETACH;
1385 
1386 	if (ata_scsi_offline_dev(dev)) {
1387 		dev->flags |= ATA_DFLAG_DETACHED;
1388 		ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
1389 	}
1390 
1391 	/* clear per-dev EH info */
1392 	ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK);
1393 	ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK);
1394 	ehc->saved_xfer_mode[dev->devno] = 0;
1395 	ehc->saved_ncq_enabled &= ~(1 << dev->devno);
1396 
1397 	spin_unlock_irqrestore(ap->lock, flags);
1398 }
1399 
1400 /**
1401  *	ata_eh_about_to_do - about to perform eh_action
1402  *	@link: target ATA link
1403  *	@dev: target ATA dev for per-dev action (can be NULL)
1404  *	@action: action about to be performed
1405  *
1406  *	Called just before performing EH actions to clear related bits
1407  *	in @link->eh_info such that eh actions are not unnecessarily
1408  *	repeated.
1409  *
1410  *	LOCKING:
1411  *	None.
1412  */
1413 void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
1414 			unsigned int action)
1415 {
1416 	struct ata_port *ap = link->ap;
1417 	struct ata_eh_info *ehi = &link->eh_info;
1418 	struct ata_eh_context *ehc = &link->eh_context;
1419 	unsigned long flags;
1420 
1421 	spin_lock_irqsave(ap->lock, flags);
1422 
1423 	ata_eh_clear_action(link, dev, ehi, action);
1424 
1425 	/* About to take EH action, set RECOVERED.  Ignore actions on
1426 	 * slave links as master will do them again.
1427 	 */
1428 	if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link)
1429 		ap->pflags |= ATA_PFLAG_RECOVERED;
1430 
1431 	spin_unlock_irqrestore(ap->lock, flags);
1432 }
1433 
1434 /**
1435  *	ata_eh_done - EH action complete
1436 *	@ap: target ATA port
1437  *	@dev: target ATA dev for per-dev action (can be NULL)
1438  *	@action: action just completed
1439  *
1440  *	Called right after performing EH actions to clear related bits
1441  *	in @link->eh_context.
1442  *
1443  *	LOCKING:
1444  *	None.
1445  */
1446 void ata_eh_done(struct ata_link *link, struct ata_device *dev,
1447 		 unsigned int action)
1448 {
1449 	struct ata_eh_context *ehc = &link->eh_context;
1450 
1451 	ata_eh_clear_action(link, dev, &ehc->i, action);
1452 }
1453 
1454 /**
1455  *	ata_err_string - convert err_mask to descriptive string
1456  *	@err_mask: error mask to convert to string
1457  *
1458  *	Convert @err_mask to descriptive string.  Errors are
1459  *	prioritized according to severity and only the most severe
1460  *	error is reported.
1461  *
1462  *	LOCKING:
1463  *	None.
1464  *
1465  *	RETURNS:
1466  *	Descriptive string for @err_mask
1467  */
1468 static const char *ata_err_string(unsigned int err_mask)
1469 {
1470 	if (err_mask & AC_ERR_HOST_BUS)
1471 		return "host bus error";
1472 	if (err_mask & AC_ERR_ATA_BUS)
1473 		return "ATA bus error";
1474 	if (err_mask & AC_ERR_TIMEOUT)
1475 		return "timeout";
1476 	if (err_mask & AC_ERR_HSM)
1477 		return "HSM violation";
1478 	if (err_mask & AC_ERR_SYSTEM)
1479 		return "internal error";
1480 	if (err_mask & AC_ERR_MEDIA)
1481 		return "media error";
1482 	if (err_mask & AC_ERR_INVALID)
1483 		return "invalid argument";
1484 	if (err_mask & AC_ERR_DEV)
1485 		return "device error";
1486 	return "unknown error";
1487 }
1488 
1489 /**
1490  *	ata_read_log_page - read a specific log page
1491  *	@dev: target device
1492  *	@log: log to read
1493  *	@page: page to read
1494  *	@buf: buffer to store read page
1495  *	@sectors: number of sectors to read
1496  *
1497  *	Read log page using READ_LOG_EXT command.
1498  *
1499  *	LOCKING:
1500  *	Kernel thread context (may sleep).
1501  *
1502  *	RETURNS:
1503  *	0 on success, AC_ERR_* mask otherwise.
1504  */
1505 unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
1506 			       u8 page, void *buf, unsigned int sectors)
1507 {
1508 	struct ata_taskfile tf;
1509 	unsigned int err_mask;
1510 	bool dma = false;
1511 
1512 	DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page);
1513 
1514 retry:
1515 	ata_tf_init(dev, &tf);
1516 	if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) &&
1517 	    !(dev->horkage & ATA_HORKAGE_NO_NCQ_LOG)) {
1518 		tf.command = ATA_CMD_READ_LOG_DMA_EXT;
1519 		tf.protocol = ATA_PROT_DMA;
1520 		dma = true;
1521 	} else {
1522 		tf.command = ATA_CMD_READ_LOG_EXT;
1523 		tf.protocol = ATA_PROT_PIO;
1524 		dma = false;
1525 	}
1526 	tf.lbal = log;
1527 	tf.lbam = page;
1528 	tf.nsect = sectors;
1529 	tf.hob_nsect = sectors >> 8;
1530 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
1531 
1532 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1533 				     buf, sectors * ATA_SECT_SIZE, 0);
1534 
1535 	if (err_mask && dma) {
1536 		dev->horkage |= ATA_HORKAGE_NO_NCQ_LOG;
1537 		ata_dev_warn(dev, "READ LOG DMA EXT failed, trying unqueued\n");
1538 		goto retry;
1539 	}
1540 
1541 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
1542 	return err_mask;
1543 }
1544 
1545 /**
1546  *	ata_eh_read_log_10h - Read log page 10h for NCQ error details
1547  *	@dev: Device to read log page 10h from
1548  *	@tag: Resulting tag of the failed command
1549  *	@tf: Resulting taskfile registers of the failed command
1550  *
1551  *	Read log page 10h to obtain NCQ error details and clear error
1552  *	condition.
1553  *
1554  *	LOCKING:
1555  *	Kernel thread context (may sleep).
1556  *
1557  *	RETURNS:
1558  *	0 on success, -errno otherwise.
1559  */
1560 static int ata_eh_read_log_10h(struct ata_device *dev,
1561 			       int *tag, struct ata_taskfile *tf)
1562 {
1563 	u8 *buf = dev->link->ap->sector_buf;
1564 	unsigned int err_mask;
1565 	u8 csum;
1566 	int i;
1567 
1568 	err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, 0, buf, 1);
1569 	if (err_mask)
1570 		return -EIO;
1571 
1572 	csum = 0;
1573 	for (i = 0; i < ATA_SECT_SIZE; i++)
1574 		csum += buf[i];
1575 	if (csum)
1576 		ata_dev_warn(dev, "invalid checksum 0x%x on log page 10h\n",
1577 			     csum);
1578 
1579 	if (buf[0] & 0x80)
1580 		return -ENOENT;
1581 
1582 	*tag = buf[0] & 0x1f;
1583 
1584 	tf->command = buf[2];
1585 	tf->feature = buf[3];
1586 	tf->lbal = buf[4];
1587 	tf->lbam = buf[5];
1588 	tf->lbah = buf[6];
1589 	tf->device = buf[7];
1590 	tf->hob_lbal = buf[8];
1591 	tf->hob_lbam = buf[9];
1592 	tf->hob_lbah = buf[10];
1593 	tf->nsect = buf[12];
1594 	tf->hob_nsect = buf[13];
1595 	if (ata_id_has_ncq_autosense(dev->id))
1596 		tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
1597 
1598 	return 0;
1599 }
1600 
1601 /**
1602  *	atapi_eh_tur - perform ATAPI TEST_UNIT_READY
1603  *	@dev: target ATAPI device
1604  *	@r_sense_key: out parameter for sense_key
1605  *
1606  *	Perform ATAPI TEST_UNIT_READY.
1607  *
1608  *	LOCKING:
1609  *	EH context (may sleep).
1610  *
1611  *	RETURNS:
1612  *	0 on success, AC_ERR_* mask on failure.
1613  */
1614 unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
1615 {
1616 	u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 };
1617 	struct ata_taskfile tf;
1618 	unsigned int err_mask;
1619 
1620 	ata_tf_init(dev, &tf);
1621 
1622 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1623 	tf.command = ATA_CMD_PACKET;
1624 	tf.protocol = ATAPI_PROT_NODATA;
1625 
1626 	err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
1627 	if (err_mask == AC_ERR_DEV)
1628 		*r_sense_key = tf.feature >> 4;
1629 	return err_mask;
1630 }
1631 
1632 /**
1633  *	ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
1634  *	@dev: device to perform REQUEST_SENSE_SENSE_DATA_EXT to
1635  *	@sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
1636  *	@dfl_sense_key: default sense key to use
1637  *
1638  *	Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
1639  *	SENSE.  This function is EH helper.
1640  *
1641  *	LOCKING:
1642  *	Kernel thread context (may sleep).
1643  *
1644  *	RETURNS:
1645  *	encoded sense data on success, 0 on failure or if sense data
1646  *	is not available.
1647  */
1648 static u32 ata_eh_request_sense(struct ata_queued_cmd *qc,
1649 				struct scsi_cmnd *cmd)
1650 {
1651 	struct ata_device *dev = qc->dev;
1652 	struct ata_taskfile tf;
1653 	unsigned int err_mask;
1654 
1655 	if (!cmd)
1656 		return 0;
1657 
1658 	DPRINTK("ATA request sense\n");
1659 	ata_dev_warn(dev, "request sense\n");
1660 	if (!ata_id_sense_reporting_enabled(dev->id)) {
1661 		ata_dev_warn(qc->dev, "sense data reporting disabled\n");
1662 		return 0;
1663 	}
1664 	ata_tf_init(dev, &tf);
1665 
1666 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1667 	tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1668 	tf.command = ATA_CMD_REQ_SENSE_DATA;
1669 	tf.protocol = ATA_PROT_NODATA;
1670 
1671 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1672 	/*
1673 	 * ACS-4 states:
1674 	 * The device may set the SENSE DATA AVAILABLE bit to one in the
1675 	 * STATUS field and clear the ERROR bit to zero in the STATUS field
1676 	 * to indicate that the command returned completion without an error
1677 	 * and the sense data described in table 306 is available.
1678 	 *
1679 	 * IOW the 'ATA_SENSE' bit might not be set even though valid
1680 	 * sense data is available.
1681 	 * So check for both.
1682 	 */
1683 	if ((tf.command & ATA_SENSE) ||
1684 		tf.lbah != 0 || tf.lbam != 0 || tf.lbal != 0) {
1685 		ata_scsi_set_sense(cmd, tf.lbah, tf.lbam, tf.lbal);
1686 		qc->flags |= ATA_QCFLAG_SENSE_VALID;
1687 		ata_dev_warn(dev, "sense data %02x/%02x/%02x\n",
1688 			     tf.lbah, tf.lbam, tf.lbal);
1689 	} else {
1690 		ata_dev_warn(dev, "request sense failed stat %02x emask %x\n",
1691 			     tf.command, err_mask);
1692 	}
1693 	return err_mask;
1694 }
1695 
1696 /**
1697  *	atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1698  *	@dev: device to perform REQUEST_SENSE to
1699  *	@sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
1700  *	@dfl_sense_key: default sense key to use
1701  *
1702  *	Perform ATAPI REQUEST_SENSE after the device reported CHECK
1703  *	SENSE.  This function is EH helper.
1704  *
1705  *	LOCKING:
1706  *	Kernel thread context (may sleep).
1707  *
1708  *	RETURNS:
1709  *	0 on success, AC_ERR_* mask on failure
1710  */
1711 unsigned int atapi_eh_request_sense(struct ata_device *dev,
1712 					   u8 *sense_buf, u8 dfl_sense_key)
1713 {
1714 	u8 cdb[ATAPI_CDB_LEN] =
1715 		{ REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 };
1716 	struct ata_port *ap = dev->link->ap;
1717 	struct ata_taskfile tf;
1718 
1719 	DPRINTK("ATAPI request sense\n");
1720 
1721 	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
1722 
1723 	/* initialize sense_buf with the error register,
1724 	 * for the case where they are -not- overwritten
1725 	 */
1726 	sense_buf[0] = 0x70;
1727 	sense_buf[2] = dfl_sense_key;
1728 
1729 	/* some devices time out if garbage left in tf */
1730 	ata_tf_init(dev, &tf);
1731 
1732 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1733 	tf.command = ATA_CMD_PACKET;
1734 
1735 	/* is it pointless to prefer PIO for "safety reasons"? */
1736 	if (ap->flags & ATA_FLAG_PIO_DMA) {
1737 		tf.protocol = ATAPI_PROT_DMA;
1738 		tf.feature |= ATAPI_PKT_DMA;
1739 	} else {
1740 		tf.protocol = ATAPI_PROT_PIO;
1741 		tf.lbam = SCSI_SENSE_BUFFERSIZE;
1742 		tf.lbah = 0;
1743 	}
1744 
1745 	return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
1746 				 sense_buf, SCSI_SENSE_BUFFERSIZE, 0);
1747 }
1748 
1749 /**
1750  *	ata_eh_analyze_serror - analyze SError for a failed port
1751  *	@link: ATA link to analyze SError for
1752  *
1753  *	Analyze SError if available and further determine cause of
1754  *	failure.
1755  *
1756  *	LOCKING:
1757  *	None.
1758  */
1759 static void ata_eh_analyze_serror(struct ata_link *link)
1760 {
1761 	struct ata_eh_context *ehc = &link->eh_context;
1762 	u32 serror = ehc->i.serror;
1763 	unsigned int err_mask = 0, action = 0;
1764 	u32 hotplug_mask;
1765 
1766 	if (serror & (SERR_PERSISTENT | SERR_DATA)) {
1767 		err_mask |= AC_ERR_ATA_BUS;
1768 		action |= ATA_EH_RESET;
1769 	}
1770 	if (serror & SERR_PROTOCOL) {
1771 		err_mask |= AC_ERR_HSM;
1772 		action |= ATA_EH_RESET;
1773 	}
1774 	if (serror & SERR_INTERNAL) {
1775 		err_mask |= AC_ERR_SYSTEM;
1776 		action |= ATA_EH_RESET;
1777 	}
1778 
1779 	/* Determine whether a hotplug event has occurred.  Both
1780 	 * SError.N/X are considered hotplug events for enabled or
1781 	 * host links.  For disabled PMP links, only N bit is
1782 	 * considered as X bit is left at 1 for link plugging.
1783 	 */
1784 	if (link->lpm_policy > ATA_LPM_MAX_POWER)
1785 		hotplug_mask = 0;	/* hotplug doesn't work w/ LPM */
1786 	else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
1787 		hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG;
1788 	else
1789 		hotplug_mask = SERR_PHYRDY_CHG;
1790 
1791 	if (serror & hotplug_mask)
1792 		ata_ehi_hotplugged(&ehc->i);
1793 
1794 	ehc->i.err_mask |= err_mask;
1795 	ehc->i.action |= action;
1796 }
1797 
1798 /**
1799  *	ata_eh_analyze_ncq_error - analyze NCQ error
1800  *	@link: ATA link to analyze NCQ error for
1801  *
1802  *	Read log page 10h, determine the offending qc and acquire
1803  *	error status TF.  For NCQ device errors, all LLDDs have to do
1804  *	is setting AC_ERR_DEV in ehi->err_mask.  This function takes
1805  *	care of the rest.
1806  *
1807  *	LOCKING:
1808  *	Kernel thread context (may sleep).
1809  */
1810 void ata_eh_analyze_ncq_error(struct ata_link *link)
1811 {
1812 	struct ata_port *ap = link->ap;
1813 	struct ata_eh_context *ehc = &link->eh_context;
1814 	struct ata_device *dev = link->device;
1815 	struct ata_queued_cmd *qc;
1816 	struct ata_taskfile tf;
1817 	int tag, rc;
1818 
1819 	/* if frozen, we can't do much */
1820 	if (ap->pflags & ATA_PFLAG_FROZEN)
1821 		return;
1822 
1823 	/* is it NCQ device error? */
1824 	if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
1825 		return;
1826 
1827 	/* has LLDD analyzed already? */
1828 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1829 		qc = __ata_qc_from_tag(ap, tag);
1830 
1831 		if (!(qc->flags & ATA_QCFLAG_FAILED))
1832 			continue;
1833 
1834 		if (qc->err_mask)
1835 			return;
1836 	}
1837 
1838 	/* okay, this error is ours */
1839 	memset(&tf, 0, sizeof(tf));
1840 	rc = ata_eh_read_log_10h(dev, &tag, &tf);
1841 	if (rc) {
1842 		ata_link_err(link, "failed to read log page 10h (errno=%d)\n",
1843 			     rc);
1844 		return;
1845 	}
1846 
1847 	if (!(link->sactive & (1 << tag))) {
1848 		ata_link_err(link, "log page 10h reported inactive tag %d\n",
1849 			     tag);
1850 		return;
1851 	}
1852 
1853 	/* we've got the perpetrator, condemn it */
1854 	qc = __ata_qc_from_tag(ap, tag);
1855 	memcpy(&qc->result_tf, &tf, sizeof(tf));
1856 	qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1857 	qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
1858 	if (qc->result_tf.auxiliary) {
1859 		char sense_key, asc, ascq;
1860 
1861 		sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
1862 		asc = (qc->result_tf.auxiliary >> 8) & 0xff;
1863 		ascq = qc->result_tf.auxiliary & 0xff;
1864 		ata_dev_dbg(dev, "NCQ Autosense %02x/%02x/%02x\n",
1865 			    sense_key, asc, ascq);
1866 		ata_scsi_set_sense(qc->scsicmd, sense_key, asc, ascq);
1867 		ata_scsi_set_sense_information(qc->scsicmd, &qc->result_tf);
1868 		qc->flags |= ATA_QCFLAG_SENSE_VALID;
1869 	}
1870 
1871 	ehc->i.err_mask &= ~AC_ERR_DEV;
1872 }
1873 
1874 /**
1875  *	ata_eh_analyze_tf - analyze taskfile of a failed qc
1876  *	@qc: qc to analyze
1877  *	@tf: Taskfile registers to analyze
1878  *
1879  *	Analyze taskfile of @qc and further determine cause of
1880  *	failure.  This function also requests ATAPI sense data if
1881  *	available.
1882  *
1883  *	LOCKING:
1884  *	Kernel thread context (may sleep).
1885  *
1886  *	RETURNS:
1887  *	Determined recovery action
1888  */
1889 static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1890 				      const struct ata_taskfile *tf)
1891 {
1892 	unsigned int tmp, action = 0;
1893 	u8 stat = tf->command, err = tf->feature;
1894 
1895 	if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1896 		qc->err_mask |= AC_ERR_HSM;
1897 		return ATA_EH_RESET;
1898 	}
1899 
1900 	/*
1901 	 * Sense data reporting does not work if the
1902 	 * device fault bit is set.
1903 	 */
1904 	if ((stat & ATA_SENSE) && !(stat & ATA_DF) &&
1905 	    !(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
1906 		if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
1907 			tmp = ata_eh_request_sense(qc, qc->scsicmd);
1908 			if (tmp)
1909 				qc->err_mask |= tmp;
1910 			else
1911 				ata_scsi_set_sense_information(qc->scsicmd, tf);
1912 		} else {
1913 			ata_dev_warn(qc->dev, "sense data available but port frozen\n");
1914 		}
1915 	}
1916 
1917 	/* Set by NCQ autosense or request sense above */
1918 	if (qc->flags & ATA_QCFLAG_SENSE_VALID)
1919 		return 0;
1920 
1921 	if (stat & (ATA_ERR | ATA_DF))
1922 		qc->err_mask |= AC_ERR_DEV;
1923 	else
1924 		return 0;
1925 
1926 	switch (qc->dev->class) {
1927 	case ATA_DEV_ATA:
1928 	case ATA_DEV_ZAC:
1929 		if (err & ATA_ICRC)
1930 			qc->err_mask |= AC_ERR_ATA_BUS;
1931 		if (err & (ATA_UNC | ATA_AMNF))
1932 			qc->err_mask |= AC_ERR_MEDIA;
1933 		if (err & ATA_IDNF)
1934 			qc->err_mask |= AC_ERR_INVALID;
1935 		break;
1936 
1937 	case ATA_DEV_ATAPI:
1938 		if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
1939 			tmp = atapi_eh_request_sense(qc->dev,
1940 						qc->scsicmd->sense_buffer,
1941 						qc->result_tf.feature >> 4);
1942 			if (!tmp) {
1943 				/* ATA_QCFLAG_SENSE_VALID is used to
1944 				 * tell atapi_qc_complete() that sense
1945 				 * data is already valid.
1946 				 *
1947 				 * TODO: interpret sense data and set
1948 				 * appropriate err_mask.
1949 				 */
1950 				qc->flags |= ATA_QCFLAG_SENSE_VALID;
1951 			} else
1952 				qc->err_mask |= tmp;
1953 		}
1954 	}
1955 
1956 	if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
1957 		action |= ATA_EH_RESET;
1958 
1959 	return action;
1960 }
1961 
1962 static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask,
1963 				   int *xfer_ok)
1964 {
1965 	int base = 0;
1966 
1967 	if (!(eflags & ATA_EFLAG_DUBIOUS_XFER))
1968 		*xfer_ok = 1;
1969 
1970 	if (!*xfer_ok)
1971 		base = ATA_ECAT_DUBIOUS_NONE;
1972 
1973 	if (err_mask & AC_ERR_ATA_BUS)
1974 		return base + ATA_ECAT_ATA_BUS;
1975 
1976 	if (err_mask & AC_ERR_TIMEOUT)
1977 		return base + ATA_ECAT_TOUT_HSM;
1978 
1979 	if (eflags & ATA_EFLAG_IS_IO) {
1980 		if (err_mask & AC_ERR_HSM)
1981 			return base + ATA_ECAT_TOUT_HSM;
1982 		if ((err_mask &
1983 		     (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
1984 			return base + ATA_ECAT_UNK_DEV;
1985 	}
1986 
1987 	return 0;
1988 }
1989 
1990 struct speed_down_verdict_arg {
1991 	u64 since;
1992 	int xfer_ok;
1993 	int nr_errors[ATA_ECAT_NR];
1994 };
1995 
1996 static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
1997 {
1998 	struct speed_down_verdict_arg *arg = void_arg;
1999 	int cat;
2000 
2001 	if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since))
2002 		return -1;
2003 
2004 	cat = ata_eh_categorize_error(ent->eflags, ent->err_mask,
2005 				      &arg->xfer_ok);
2006 	arg->nr_errors[cat]++;
2007 
2008 	return 0;
2009 }
2010 
2011 /**
2012  *	ata_eh_speed_down_verdict - Determine speed down verdict
2013  *	@dev: Device of interest
2014  *
2015  *	This function examines error ring of @dev and determines
2016  *	whether NCQ needs to be turned off, transfer speed should be
2017  *	stepped down, or falling back to PIO is necessary.
2018  *
2019  *	ECAT_ATA_BUS	: ATA_BUS error for any command
2020  *
2021  *	ECAT_TOUT_HSM	: TIMEOUT for any command or HSM violation for
2022  *			  IO commands
2023  *
2024  *	ECAT_UNK_DEV	: Unknown DEV error for IO commands
2025  *
2026  *	ECAT_DUBIOUS_*	: Identical to above three but occurred while
2027  *			  data transfer hasn't been verified.
2028  *
2029  *	Verdicts are
2030  *
2031  *	NCQ_OFF		: Turn off NCQ.
2032  *
2033  *	SPEED_DOWN	: Speed down transfer speed but don't fall back
2034  *			  to PIO.
2035  *
2036  *	FALLBACK_TO_PIO	: Fall back to PIO.
2037  *
2038  *	Even if multiple verdicts are returned, only one action is
2039  *	taken per error.  An action triggered by non-DUBIOUS errors
2040  *	clears ering, while one triggered by DUBIOUS_* errors doesn't.
2041  *	This is to expedite speed down decisions right after device is
2042  *	initially configured.
2043  *
2044  *	The followings are speed down rules.  #1 and #2 deal with
2045  *	DUBIOUS errors.
2046  *
2047  *	1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
2048  *	   occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO.
2049  *
2050  *	2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
2051  *	   occurred during last 5 mins, NCQ_OFF.
2052  *
2053  *	3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
2054  *	   occurred during last 5 mins, FALLBACK_TO_PIO
2055  *
2056  *	4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
2057  *	   during last 10 mins, NCQ_OFF.
2058  *
2059  *	5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
2060  *	   UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
2061  *
2062  *	LOCKING:
2063  *	Inherited from caller.
2064  *
2065  *	RETURNS:
2066  *	OR of ATA_EH_SPDN_* flags.
2067  */
2068 static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
2069 {
2070 	const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ;
2071 	u64 j64 = get_jiffies_64();
2072 	struct speed_down_verdict_arg arg;
2073 	unsigned int verdict = 0;
2074 
2075 	/* scan past 5 mins of error history */
2076 	memset(&arg, 0, sizeof(arg));
2077 	arg.since = j64 - min(j64, j5mins);
2078 	ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
2079 
2080 	if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] +
2081 	    arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1)
2082 		verdict |= ATA_EH_SPDN_SPEED_DOWN |
2083 			ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS;
2084 
2085 	if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] +
2086 	    arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1)
2087 		verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS;
2088 
2089 	if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
2090 	    arg.nr_errors[ATA_ECAT_TOUT_HSM] +
2091 	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
2092 		verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
2093 
2094 	/* scan past 10 mins of error history */
2095 	memset(&arg, 0, sizeof(arg));
2096 	arg.since = j64 - min(j64, j10mins);
2097 	ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
2098 
2099 	if (arg.nr_errors[ATA_ECAT_TOUT_HSM] +
2100 	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 3)
2101 		verdict |= ATA_EH_SPDN_NCQ_OFF;
2102 
2103 	if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
2104 	    arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 ||
2105 	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
2106 		verdict |= ATA_EH_SPDN_SPEED_DOWN;
2107 
2108 	return verdict;
2109 }
2110 
2111 /**
2112  *	ata_eh_speed_down - record error and speed down if necessary
2113  *	@dev: Failed device
2114  *	@eflags: mask of ATA_EFLAG_* flags
2115  *	@err_mask: err_mask of the error
2116  *
2117  *	Record error and examine error history to determine whether
2118  *	adjusting transmission speed is necessary.  It also sets
2119  *	transmission limits appropriately if such adjustment is
2120  *	necessary.
2121  *
2122  *	LOCKING:
2123  *	Kernel thread context (may sleep).
2124  *
2125  *	RETURNS:
2126  *	Determined recovery action.
2127  */
2128 static unsigned int ata_eh_speed_down(struct ata_device *dev,
2129 				unsigned int eflags, unsigned int err_mask)
2130 {
2131 	struct ata_link *link = ata_dev_phys_link(dev);
2132 	int xfer_ok = 0;
2133 	unsigned int verdict;
2134 	unsigned int action = 0;
2135 
2136 	/* don't bother if Cat-0 error */
2137 	if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0)
2138 		return 0;
2139 
2140 	/* record error and determine whether speed down is necessary */
2141 	ata_ering_record(&dev->ering, eflags, err_mask);
2142 	verdict = ata_eh_speed_down_verdict(dev);
2143 
2144 	/* turn off NCQ? */
2145 	if ((verdict & ATA_EH_SPDN_NCQ_OFF) &&
2146 	    (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ |
2147 			   ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) {
2148 		dev->flags |= ATA_DFLAG_NCQ_OFF;
2149 		ata_dev_warn(dev, "NCQ disabled due to excessive errors\n");
2150 		goto done;
2151 	}
2152 
2153 	/* speed down? */
2154 	if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
2155 		/* speed down SATA link speed if possible */
2156 		if (sata_down_spd_limit(link, 0) == 0) {
2157 			action |= ATA_EH_RESET;
2158 			goto done;
2159 		}
2160 
2161 		/* lower transfer mode */
2162 		if (dev->spdn_cnt < 2) {
2163 			static const int dma_dnxfer_sel[] =
2164 				{ ATA_DNXFER_DMA, ATA_DNXFER_40C };
2165 			static const int pio_dnxfer_sel[] =
2166 				{ ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 };
2167 			int sel;
2168 
2169 			if (dev->xfer_shift != ATA_SHIFT_PIO)
2170 				sel = dma_dnxfer_sel[dev->spdn_cnt];
2171 			else
2172 				sel = pio_dnxfer_sel[dev->spdn_cnt];
2173 
2174 			dev->spdn_cnt++;
2175 
2176 			if (ata_down_xfermask_limit(dev, sel) == 0) {
2177 				action |= ATA_EH_RESET;
2178 				goto done;
2179 			}
2180 		}
2181 	}
2182 
2183 	/* Fall back to PIO?  Slowing down to PIO is meaningless for
2184 	 * SATA ATA devices.  Consider it only for PATA and SATAPI.
2185 	 */
2186 	if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
2187 	    (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) &&
2188 	    (dev->xfer_shift != ATA_SHIFT_PIO)) {
2189 		if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
2190 			dev->spdn_cnt = 0;
2191 			action |= ATA_EH_RESET;
2192 			goto done;
2193 		}
2194 	}
2195 
2196 	return 0;
2197  done:
2198 	/* device has been slowed down, blow error history */
2199 	if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS))
2200 		ata_ering_clear(&dev->ering);
2201 	return action;
2202 }
2203 
2204 /**
2205  *	ata_eh_worth_retry - analyze error and decide whether to retry
2206  *	@qc: qc to possibly retry
2207  *
2208  *	Look at the cause of the error and decide if a retry
2209  * 	might be useful or not.  We don't want to retry media errors
2210  *	because the drive itself has probably already taken 10-30 seconds
2211  *	doing its own internal retries before reporting the failure.
2212  */
2213 static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc)
2214 {
2215 	if (qc->err_mask & AC_ERR_MEDIA)
2216 		return 0;	/* don't retry media errors */
2217 	if (qc->flags & ATA_QCFLAG_IO)
2218 		return 1;	/* otherwise retry anything from fs stack */
2219 	if (qc->err_mask & AC_ERR_INVALID)
2220 		return 0;	/* don't retry these */
2221 	return qc->err_mask != AC_ERR_DEV;  /* retry if not dev error */
2222 }
2223 
2224 /**
2225  *	ata_eh_link_autopsy - analyze error and determine recovery action
2226  *	@link: host link to perform autopsy on
2227  *
2228  *	Analyze why @link failed and determine which recovery actions
2229  *	are needed.  This function also sets more detailed AC_ERR_*
2230  *	values and fills sense data for ATAPI CHECK SENSE.
2231  *
2232  *	LOCKING:
2233  *	Kernel thread context (may sleep).
2234  */
2235 static void ata_eh_link_autopsy(struct ata_link *link)
2236 {
2237 	struct ata_port *ap = link->ap;
2238 	struct ata_eh_context *ehc = &link->eh_context;
2239 	struct ata_device *dev;
2240 	unsigned int all_err_mask = 0, eflags = 0;
2241 	int tag;
2242 	u32 serror;
2243 	int rc;
2244 
2245 	DPRINTK("ENTER\n");
2246 
2247 	if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
2248 		return;
2249 
2250 	/* obtain and analyze SError */
2251 	rc = sata_scr_read(link, SCR_ERROR, &serror);
2252 	if (rc == 0) {
2253 		ehc->i.serror |= serror;
2254 		ata_eh_analyze_serror(link);
2255 	} else if (rc != -EOPNOTSUPP) {
2256 		/* SError read failed, force reset and probing */
2257 		ehc->i.probe_mask |= ATA_ALL_DEVICES;
2258 		ehc->i.action |= ATA_EH_RESET;
2259 		ehc->i.err_mask |= AC_ERR_OTHER;
2260 	}
2261 
2262 	/* analyze NCQ failure */
2263 	ata_eh_analyze_ncq_error(link);
2264 
2265 	/* any real error trumps AC_ERR_OTHER */
2266 	if (ehc->i.err_mask & ~AC_ERR_OTHER)
2267 		ehc->i.err_mask &= ~AC_ERR_OTHER;
2268 
2269 	all_err_mask |= ehc->i.err_mask;
2270 
2271 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2272 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2273 
2274 		if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2275 		    ata_dev_phys_link(qc->dev) != link)
2276 			continue;
2277 
2278 		/* inherit upper level err_mask */
2279 		qc->err_mask |= ehc->i.err_mask;
2280 
2281 		/* analyze TF */
2282 		ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
2283 
2284 		/* DEV errors are probably spurious in case of ATA_BUS error */
2285 		if (qc->err_mask & AC_ERR_ATA_BUS)
2286 			qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
2287 					  AC_ERR_INVALID);
2288 
2289 		/* any real error trumps unknown error */
2290 		if (qc->err_mask & ~AC_ERR_OTHER)
2291 			qc->err_mask &= ~AC_ERR_OTHER;
2292 
2293 		/* SENSE_VALID trumps dev/unknown error and revalidation */
2294 		if (qc->flags & ATA_QCFLAG_SENSE_VALID)
2295 			qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
2296 
2297 		/* determine whether the command is worth retrying */
2298 		if (ata_eh_worth_retry(qc))
2299 			qc->flags |= ATA_QCFLAG_RETRY;
2300 
2301 		/* accumulate error info */
2302 		ehc->i.dev = qc->dev;
2303 		all_err_mask |= qc->err_mask;
2304 		if (qc->flags & ATA_QCFLAG_IO)
2305 			eflags |= ATA_EFLAG_IS_IO;
2306 		trace_ata_eh_link_autopsy_qc(qc);
2307 	}
2308 
2309 	/* enforce default EH actions */
2310 	if (ap->pflags & ATA_PFLAG_FROZEN ||
2311 	    all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
2312 		ehc->i.action |= ATA_EH_RESET;
2313 	else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) ||
2314 		 (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV)))
2315 		ehc->i.action |= ATA_EH_REVALIDATE;
2316 
2317 	/* If we have offending qcs and the associated failed device,
2318 	 * perform per-dev EH action only on the offending device.
2319 	 */
2320 	if (ehc->i.dev) {
2321 		ehc->i.dev_action[ehc->i.dev->devno] |=
2322 			ehc->i.action & ATA_EH_PERDEV_MASK;
2323 		ehc->i.action &= ~ATA_EH_PERDEV_MASK;
2324 	}
2325 
2326 	/* propagate timeout to host link */
2327 	if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link))
2328 		ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT;
2329 
2330 	/* record error and consider speeding down */
2331 	dev = ehc->i.dev;
2332 	if (!dev && ((ata_link_max_devices(link) == 1 &&
2333 		      ata_dev_enabled(link->device))))
2334 	    dev = link->device;
2335 
2336 	if (dev) {
2337 		if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
2338 			eflags |= ATA_EFLAG_DUBIOUS_XFER;
2339 		ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
2340 	}
2341 	trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask);
2342 	DPRINTK("EXIT\n");
2343 }
2344 
2345 /**
2346  *	ata_eh_autopsy - analyze error and determine recovery action
2347  *	@ap: host port to perform autopsy on
2348  *
2349  *	Analyze all links of @ap and determine why they failed and
2350  *	which recovery actions are needed.
2351  *
2352  *	LOCKING:
2353  *	Kernel thread context (may sleep).
2354  */
2355 void ata_eh_autopsy(struct ata_port *ap)
2356 {
2357 	struct ata_link *link;
2358 
2359 	ata_for_each_link(link, ap, EDGE)
2360 		ata_eh_link_autopsy(link);
2361 
2362 	/* Handle the frigging slave link.  Autopsy is done similarly
2363 	 * but actions and flags are transferred over to the master
2364 	 * link and handled from there.
2365 	 */
2366 	if (ap->slave_link) {
2367 		struct ata_eh_context *mehc = &ap->link.eh_context;
2368 		struct ata_eh_context *sehc = &ap->slave_link->eh_context;
2369 
2370 		/* transfer control flags from master to slave */
2371 		sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK;
2372 
2373 		/* perform autopsy on the slave link */
2374 		ata_eh_link_autopsy(ap->slave_link);
2375 
2376 		/* transfer actions from slave to master and clear slave */
2377 		ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2378 		mehc->i.action		|= sehc->i.action;
2379 		mehc->i.dev_action[1]	|= sehc->i.dev_action[1];
2380 		mehc->i.flags		|= sehc->i.flags;
2381 		ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2382 	}
2383 
2384 	/* Autopsy of fanout ports can affect host link autopsy.
2385 	 * Perform host link autopsy last.
2386 	 */
2387 	if (sata_pmp_attached(ap))
2388 		ata_eh_link_autopsy(&ap->link);
2389 }
2390 
2391 /**
2392  *	ata_get_cmd_descript - get description for ATA command
2393  *	@command: ATA command code to get description for
2394  *
2395  *	Return a textual description of the given command, or NULL if the
2396  *	command is not known.
2397  *
2398  *	LOCKING:
2399  *	None
2400  */
2401 const char *ata_get_cmd_descript(u8 command)
2402 {
2403 #ifdef CONFIG_ATA_VERBOSE_ERROR
2404 	static const struct
2405 	{
2406 		u8 command;
2407 		const char *text;
2408 	} cmd_descr[] = {
2409 		{ ATA_CMD_DEV_RESET,		"DEVICE RESET" },
2410 		{ ATA_CMD_CHK_POWER,		"CHECK POWER MODE" },
2411 		{ ATA_CMD_STANDBY,		"STANDBY" },
2412 		{ ATA_CMD_IDLE,			"IDLE" },
2413 		{ ATA_CMD_EDD,			"EXECUTE DEVICE DIAGNOSTIC" },
2414 		{ ATA_CMD_DOWNLOAD_MICRO,	"DOWNLOAD MICROCODE" },
2415 		{ ATA_CMD_DOWNLOAD_MICRO_DMA,	"DOWNLOAD MICROCODE DMA" },
2416 		{ ATA_CMD_NOP,			"NOP" },
2417 		{ ATA_CMD_FLUSH,		"FLUSH CACHE" },
2418 		{ ATA_CMD_FLUSH_EXT,		"FLUSH CACHE EXT" },
2419 		{ ATA_CMD_ID_ATA,		"IDENTIFY DEVICE" },
2420 		{ ATA_CMD_ID_ATAPI,		"IDENTIFY PACKET DEVICE" },
2421 		{ ATA_CMD_SERVICE,		"SERVICE" },
2422 		{ ATA_CMD_READ,			"READ DMA" },
2423 		{ ATA_CMD_READ_EXT,		"READ DMA EXT" },
2424 		{ ATA_CMD_READ_QUEUED,		"READ DMA QUEUED" },
2425 		{ ATA_CMD_READ_STREAM_EXT,	"READ STREAM EXT" },
2426 		{ ATA_CMD_READ_STREAM_DMA_EXT,  "READ STREAM DMA EXT" },
2427 		{ ATA_CMD_WRITE,		"WRITE DMA" },
2428 		{ ATA_CMD_WRITE_EXT,		"WRITE DMA EXT" },
2429 		{ ATA_CMD_WRITE_QUEUED,		"WRITE DMA QUEUED EXT" },
2430 		{ ATA_CMD_WRITE_STREAM_EXT,	"WRITE STREAM EXT" },
2431 		{ ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" },
2432 		{ ATA_CMD_WRITE_FUA_EXT,	"WRITE DMA FUA EXT" },
2433 		{ ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
2434 		{ ATA_CMD_FPDMA_READ,		"READ FPDMA QUEUED" },
2435 		{ ATA_CMD_FPDMA_WRITE,		"WRITE FPDMA QUEUED" },
2436 		{ ATA_CMD_FPDMA_SEND,		"SEND FPDMA QUEUED" },
2437 		{ ATA_CMD_FPDMA_RECV,		"RECEIVE FPDMA QUEUED" },
2438 		{ ATA_CMD_PIO_READ,		"READ SECTOR(S)" },
2439 		{ ATA_CMD_PIO_READ_EXT,		"READ SECTOR(S) EXT" },
2440 		{ ATA_CMD_PIO_WRITE,		"WRITE SECTOR(S)" },
2441 		{ ATA_CMD_PIO_WRITE_EXT,	"WRITE SECTOR(S) EXT" },
2442 		{ ATA_CMD_READ_MULTI,		"READ MULTIPLE" },
2443 		{ ATA_CMD_READ_MULTI_EXT,	"READ MULTIPLE EXT" },
2444 		{ ATA_CMD_WRITE_MULTI,		"WRITE MULTIPLE" },
2445 		{ ATA_CMD_WRITE_MULTI_EXT,	"WRITE MULTIPLE EXT" },
2446 		{ ATA_CMD_WRITE_MULTI_FUA_EXT,	"WRITE MULTIPLE FUA EXT" },
2447 		{ ATA_CMD_SET_FEATURES,		"SET FEATURES" },
2448 		{ ATA_CMD_SET_MULTI,		"SET MULTIPLE MODE" },
2449 		{ ATA_CMD_VERIFY,		"READ VERIFY SECTOR(S)" },
2450 		{ ATA_CMD_VERIFY_EXT,		"READ VERIFY SECTOR(S) EXT" },
2451 		{ ATA_CMD_WRITE_UNCORR_EXT,	"WRITE UNCORRECTABLE EXT" },
2452 		{ ATA_CMD_STANDBYNOW1,		"STANDBY IMMEDIATE" },
2453 		{ ATA_CMD_IDLEIMMEDIATE,	"IDLE IMMEDIATE" },
2454 		{ ATA_CMD_SLEEP,		"SLEEP" },
2455 		{ ATA_CMD_INIT_DEV_PARAMS,	"INITIALIZE DEVICE PARAMETERS" },
2456 		{ ATA_CMD_READ_NATIVE_MAX,	"READ NATIVE MAX ADDRESS" },
2457 		{ ATA_CMD_READ_NATIVE_MAX_EXT,	"READ NATIVE MAX ADDRESS EXT" },
2458 		{ ATA_CMD_SET_MAX,		"SET MAX ADDRESS" },
2459 		{ ATA_CMD_SET_MAX_EXT,		"SET MAX ADDRESS EXT" },
2460 		{ ATA_CMD_READ_LOG_EXT,		"READ LOG EXT" },
2461 		{ ATA_CMD_WRITE_LOG_EXT,	"WRITE LOG EXT" },
2462 		{ ATA_CMD_READ_LOG_DMA_EXT,	"READ LOG DMA EXT" },
2463 		{ ATA_CMD_WRITE_LOG_DMA_EXT,	"WRITE LOG DMA EXT" },
2464 		{ ATA_CMD_TRUSTED_NONDATA,	"TRUSTED NON-DATA" },
2465 		{ ATA_CMD_TRUSTED_RCV,		"TRUSTED RECEIVE" },
2466 		{ ATA_CMD_TRUSTED_RCV_DMA,	"TRUSTED RECEIVE DMA" },
2467 		{ ATA_CMD_TRUSTED_SND,		"TRUSTED SEND" },
2468 		{ ATA_CMD_TRUSTED_SND_DMA,	"TRUSTED SEND DMA" },
2469 		{ ATA_CMD_PMP_READ,		"READ BUFFER" },
2470 		{ ATA_CMD_PMP_READ_DMA,		"READ BUFFER DMA" },
2471 		{ ATA_CMD_PMP_WRITE,		"WRITE BUFFER" },
2472 		{ ATA_CMD_PMP_WRITE_DMA,	"WRITE BUFFER DMA" },
2473 		{ ATA_CMD_CONF_OVERLAY,		"DEVICE CONFIGURATION OVERLAY" },
2474 		{ ATA_CMD_SEC_SET_PASS,		"SECURITY SET PASSWORD" },
2475 		{ ATA_CMD_SEC_UNLOCK,		"SECURITY UNLOCK" },
2476 		{ ATA_CMD_SEC_ERASE_PREP,	"SECURITY ERASE PREPARE" },
2477 		{ ATA_CMD_SEC_ERASE_UNIT,	"SECURITY ERASE UNIT" },
2478 		{ ATA_CMD_SEC_FREEZE_LOCK,	"SECURITY FREEZE LOCK" },
2479 		{ ATA_CMD_SEC_DISABLE_PASS,	"SECURITY DISABLE PASSWORD" },
2480 		{ ATA_CMD_CONFIG_STREAM,	"CONFIGURE STREAM" },
2481 		{ ATA_CMD_SMART,		"SMART" },
2482 		{ ATA_CMD_MEDIA_LOCK,		"DOOR LOCK" },
2483 		{ ATA_CMD_MEDIA_UNLOCK,		"DOOR UNLOCK" },
2484 		{ ATA_CMD_DSM,			"DATA SET MANAGEMENT" },
2485 		{ ATA_CMD_CHK_MED_CRD_TYP,	"CHECK MEDIA CARD TYPE" },
2486 		{ ATA_CMD_CFA_REQ_EXT_ERR,	"CFA REQUEST EXTENDED ERROR" },
2487 		{ ATA_CMD_CFA_WRITE_NE,		"CFA WRITE SECTORS WITHOUT ERASE" },
2488 		{ ATA_CMD_CFA_TRANS_SECT,	"CFA TRANSLATE SECTOR" },
2489 		{ ATA_CMD_CFA_ERASE,		"CFA ERASE SECTORS" },
2490 		{ ATA_CMD_CFA_WRITE_MULT_NE,	"CFA WRITE MULTIPLE WITHOUT ERASE" },
2491 		{ ATA_CMD_REQ_SENSE_DATA,	"REQUEST SENSE DATA EXT" },
2492 		{ ATA_CMD_SANITIZE_DEVICE,	"SANITIZE DEVICE" },
2493 		{ ATA_CMD_READ_LONG,		"READ LONG (with retries)" },
2494 		{ ATA_CMD_READ_LONG_ONCE,	"READ LONG (without retries)" },
2495 		{ ATA_CMD_WRITE_LONG,		"WRITE LONG (with retries)" },
2496 		{ ATA_CMD_WRITE_LONG_ONCE,	"WRITE LONG (without retries)" },
2497 		{ ATA_CMD_RESTORE,		"RECALIBRATE" },
2498 		{ 0,				NULL } /* terminate list */
2499 	};
2500 
2501 	unsigned int i;
2502 	for (i = 0; cmd_descr[i].text; i++)
2503 		if (cmd_descr[i].command == command)
2504 			return cmd_descr[i].text;
2505 #endif
2506 
2507 	return NULL;
2508 }
2509 EXPORT_SYMBOL_GPL(ata_get_cmd_descript);
2510 
2511 /**
2512  *	ata_eh_link_report - report error handling to user
2513  *	@link: ATA link EH is going on
2514  *
2515  *	Report EH to user.
2516  *
2517  *	LOCKING:
2518  *	None.
2519  */
2520 static void ata_eh_link_report(struct ata_link *link)
2521 {
2522 	struct ata_port *ap = link->ap;
2523 	struct ata_eh_context *ehc = &link->eh_context;
2524 	const char *frozen, *desc;
2525 	char tries_buf[6] = "";
2526 	int tag, nr_failed = 0;
2527 
2528 	if (ehc->i.flags & ATA_EHI_QUIET)
2529 		return;
2530 
2531 	desc = NULL;
2532 	if (ehc->i.desc[0] != '\0')
2533 		desc = ehc->i.desc;
2534 
2535 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2536 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2537 
2538 		if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2539 		    ata_dev_phys_link(qc->dev) != link ||
2540 		    ((qc->flags & ATA_QCFLAG_QUIET) &&
2541 		     qc->err_mask == AC_ERR_DEV))
2542 			continue;
2543 		if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
2544 			continue;
2545 
2546 		nr_failed++;
2547 	}
2548 
2549 	if (!nr_failed && !ehc->i.err_mask)
2550 		return;
2551 
2552 	frozen = "";
2553 	if (ap->pflags & ATA_PFLAG_FROZEN)
2554 		frozen = " frozen";
2555 
2556 	if (ap->eh_tries < ATA_EH_MAX_TRIES)
2557 		snprintf(tries_buf, sizeof(tries_buf), " t%d",
2558 			 ap->eh_tries);
2559 
2560 	if (ehc->i.dev) {
2561 		ata_dev_err(ehc->i.dev, "exception Emask 0x%x "
2562 			    "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2563 			    ehc->i.err_mask, link->sactive, ehc->i.serror,
2564 			    ehc->i.action, frozen, tries_buf);
2565 		if (desc)
2566 			ata_dev_err(ehc->i.dev, "%s\n", desc);
2567 	} else {
2568 		ata_link_err(link, "exception Emask 0x%x "
2569 			     "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2570 			     ehc->i.err_mask, link->sactive, ehc->i.serror,
2571 			     ehc->i.action, frozen, tries_buf);
2572 		if (desc)
2573 			ata_link_err(link, "%s\n", desc);
2574 	}
2575 
2576 #ifdef CONFIG_ATA_VERBOSE_ERROR
2577 	if (ehc->i.serror)
2578 		ata_link_err(link,
2579 		  "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
2580 		  ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "",
2581 		  ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "",
2582 		  ehc->i.serror & SERR_DATA ? "UnrecovData " : "",
2583 		  ehc->i.serror & SERR_PERSISTENT ? "Persist " : "",
2584 		  ehc->i.serror & SERR_PROTOCOL ? "Proto " : "",
2585 		  ehc->i.serror & SERR_INTERNAL ? "HostInt " : "",
2586 		  ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "",
2587 		  ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "",
2588 		  ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "",
2589 		  ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "",
2590 		  ehc->i.serror & SERR_DISPARITY ? "Dispar " : "",
2591 		  ehc->i.serror & SERR_CRC ? "BadCRC " : "",
2592 		  ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "",
2593 		  ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "",
2594 		  ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "",
2595 		  ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "",
2596 		  ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
2597 #endif
2598 
2599 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2600 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2601 		struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
2602 		char data_buf[20] = "";
2603 		char cdb_buf[70] = "";
2604 
2605 		if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2606 		    ata_dev_phys_link(qc->dev) != link || !qc->err_mask)
2607 			continue;
2608 
2609 		if (qc->dma_dir != DMA_NONE) {
2610 			static const char *dma_str[] = {
2611 				[DMA_BIDIRECTIONAL]	= "bidi",
2612 				[DMA_TO_DEVICE]		= "out",
2613 				[DMA_FROM_DEVICE]	= "in",
2614 			};
2615 			static const char *prot_str[] = {
2616 				[ATA_PROT_PIO]		= "pio",
2617 				[ATA_PROT_DMA]		= "dma",
2618 				[ATA_PROT_NCQ]		= "ncq",
2619 				[ATAPI_PROT_PIO]	= "pio",
2620 				[ATAPI_PROT_DMA]	= "dma",
2621 			};
2622 
2623 			snprintf(data_buf, sizeof(data_buf), " %s %u %s",
2624 				 prot_str[qc->tf.protocol], qc->nbytes,
2625 				 dma_str[qc->dma_dir]);
2626 		}
2627 
2628 		if (ata_is_atapi(qc->tf.protocol)) {
2629 			const u8 *cdb = qc->cdb;
2630 			size_t cdb_len = qc->dev->cdb_len;
2631 
2632 			if (qc->scsicmd) {
2633 				cdb = qc->scsicmd->cmnd;
2634 				cdb_len = qc->scsicmd->cmd_len;
2635 			}
2636 			__scsi_format_command(cdb_buf, sizeof(cdb_buf),
2637 					      cdb, cdb_len);
2638 		} else {
2639 			const char *descr = ata_get_cmd_descript(cmd->command);
2640 			if (descr)
2641 				ata_dev_err(qc->dev, "failed command: %s\n",
2642 					    descr);
2643 		}
2644 
2645 		ata_dev_err(qc->dev,
2646 			"cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2647 			"tag %d%s\n         %s"
2648 			"res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2649 			"Emask 0x%x (%s)%s\n",
2650 			cmd->command, cmd->feature, cmd->nsect,
2651 			cmd->lbal, cmd->lbam, cmd->lbah,
2652 			cmd->hob_feature, cmd->hob_nsect,
2653 			cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
2654 			cmd->device, qc->tag, data_buf, cdb_buf,
2655 			res->command, res->feature, res->nsect,
2656 			res->lbal, res->lbam, res->lbah,
2657 			res->hob_feature, res->hob_nsect,
2658 			res->hob_lbal, res->hob_lbam, res->hob_lbah,
2659 			res->device, qc->err_mask, ata_err_string(qc->err_mask),
2660 			qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
2661 
2662 #ifdef CONFIG_ATA_VERBOSE_ERROR
2663 		if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
2664 				    ATA_SENSE | ATA_ERR)) {
2665 			if (res->command & ATA_BUSY)
2666 				ata_dev_err(qc->dev, "status: { Busy }\n");
2667 			else
2668 				ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n",
2669 				  res->command & ATA_DRDY ? "DRDY " : "",
2670 				  res->command & ATA_DF ? "DF " : "",
2671 				  res->command & ATA_DRQ ? "DRQ " : "",
2672 				  res->command & ATA_SENSE ? "SENSE " : "",
2673 				  res->command & ATA_ERR ? "ERR " : "");
2674 		}
2675 
2676 		if (cmd->command != ATA_CMD_PACKET &&
2677 		    (res->feature & (ATA_ICRC | ATA_UNC | ATA_AMNF |
2678 				     ATA_IDNF | ATA_ABORTED)))
2679 			ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n",
2680 			  res->feature & ATA_ICRC ? "ICRC " : "",
2681 			  res->feature & ATA_UNC ? "UNC " : "",
2682 			  res->feature & ATA_AMNF ? "AMNF " : "",
2683 			  res->feature & ATA_IDNF ? "IDNF " : "",
2684 			  res->feature & ATA_ABORTED ? "ABRT " : "");
2685 #endif
2686 	}
2687 }
2688 
2689 /**
2690  *	ata_eh_report - report error handling to user
2691  *	@ap: ATA port to report EH about
2692  *
2693  *	Report EH to user.
2694  *
2695  *	LOCKING:
2696  *	None.
2697  */
2698 void ata_eh_report(struct ata_port *ap)
2699 {
2700 	struct ata_link *link;
2701 
2702 	ata_for_each_link(link, ap, HOST_FIRST)
2703 		ata_eh_link_report(link);
2704 }
2705 
2706 static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
2707 			unsigned int *classes, unsigned long deadline,
2708 			bool clear_classes)
2709 {
2710 	struct ata_device *dev;
2711 
2712 	if (clear_classes)
2713 		ata_for_each_dev(dev, link, ALL)
2714 			classes[dev->devno] = ATA_DEV_UNKNOWN;
2715 
2716 	return reset(link, classes, deadline);
2717 }
2718 
2719 static int ata_eh_followup_srst_needed(struct ata_link *link, int rc)
2720 {
2721 	if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
2722 		return 0;
2723 	if (rc == -EAGAIN)
2724 		return 1;
2725 	if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
2726 		return 1;
2727 	return 0;
2728 }
2729 
2730 int ata_eh_reset(struct ata_link *link, int classify,
2731 		 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
2732 		 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
2733 {
2734 	struct ata_port *ap = link->ap;
2735 	struct ata_link *slave = ap->slave_link;
2736 	struct ata_eh_context *ehc = &link->eh_context;
2737 	struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL;
2738 	unsigned int *classes = ehc->classes;
2739 	unsigned int lflags = link->flags;
2740 	int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
2741 	int max_tries = 0, try = 0;
2742 	struct ata_link *failed_link;
2743 	struct ata_device *dev;
2744 	unsigned long deadline, now;
2745 	ata_reset_fn_t reset;
2746 	unsigned long flags;
2747 	u32 sstatus;
2748 	int nr_unknown, rc;
2749 
2750 	/*
2751 	 * Prepare to reset
2752 	 */
2753 	while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX)
2754 		max_tries++;
2755 	if (link->flags & ATA_LFLAG_RST_ONCE)
2756 		max_tries = 1;
2757 	if (link->flags & ATA_LFLAG_NO_HRST)
2758 		hardreset = NULL;
2759 	if (link->flags & ATA_LFLAG_NO_SRST)
2760 		softreset = NULL;
2761 
2762 	/* make sure each reset attempt is at least COOL_DOWN apart */
2763 	if (ehc->i.flags & ATA_EHI_DID_RESET) {
2764 		now = jiffies;
2765 		WARN_ON(time_after(ehc->last_reset, now));
2766 		deadline = ata_deadline(ehc->last_reset,
2767 					ATA_EH_RESET_COOL_DOWN);
2768 		if (time_before(now, deadline))
2769 			schedule_timeout_uninterruptible(deadline - now);
2770 	}
2771 
2772 	spin_lock_irqsave(ap->lock, flags);
2773 	ap->pflags |= ATA_PFLAG_RESETTING;
2774 	spin_unlock_irqrestore(ap->lock, flags);
2775 
2776 	ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2777 
2778 	ata_for_each_dev(dev, link, ALL) {
2779 		/* If we issue an SRST then an ATA drive (not ATAPI)
2780 		 * may change configuration and be in PIO0 timing. If
2781 		 * we do a hard reset (or are coming from power on)
2782 		 * this is true for ATA or ATAPI. Until we've set a
2783 		 * suitable controller mode we should not touch the
2784 		 * bus as we may be talking too fast.
2785 		 */
2786 		dev->pio_mode = XFER_PIO_0;
2787 		dev->dma_mode = 0xff;
2788 
2789 		/* If the controller has a pio mode setup function
2790 		 * then use it to set the chipset to rights. Don't
2791 		 * touch the DMA setup as that will be dealt with when
2792 		 * configuring devices.
2793 		 */
2794 		if (ap->ops->set_piomode)
2795 			ap->ops->set_piomode(ap, dev);
2796 	}
2797 
2798 	/* prefer hardreset */
2799 	reset = NULL;
2800 	ehc->i.action &= ~ATA_EH_RESET;
2801 	if (hardreset) {
2802 		reset = hardreset;
2803 		ehc->i.action |= ATA_EH_HARDRESET;
2804 	} else if (softreset) {
2805 		reset = softreset;
2806 		ehc->i.action |= ATA_EH_SOFTRESET;
2807 	}
2808 
2809 	if (prereset) {
2810 		unsigned long deadline = ata_deadline(jiffies,
2811 						      ATA_EH_PRERESET_TIMEOUT);
2812 
2813 		if (slave) {
2814 			sehc->i.action &= ~ATA_EH_RESET;
2815 			sehc->i.action |= ehc->i.action;
2816 		}
2817 
2818 		rc = prereset(link, deadline);
2819 
2820 		/* If present, do prereset on slave link too.  Reset
2821 		 * is skipped iff both master and slave links report
2822 		 * -ENOENT or clear ATA_EH_RESET.
2823 		 */
2824 		if (slave && (rc == 0 || rc == -ENOENT)) {
2825 			int tmp;
2826 
2827 			tmp = prereset(slave, deadline);
2828 			if (tmp != -ENOENT)
2829 				rc = tmp;
2830 
2831 			ehc->i.action |= sehc->i.action;
2832 		}
2833 
2834 		if (rc) {
2835 			if (rc == -ENOENT) {
2836 				ata_link_dbg(link, "port disabled--ignoring\n");
2837 				ehc->i.action &= ~ATA_EH_RESET;
2838 
2839 				ata_for_each_dev(dev, link, ALL)
2840 					classes[dev->devno] = ATA_DEV_NONE;
2841 
2842 				rc = 0;
2843 			} else
2844 				ata_link_err(link,
2845 					     "prereset failed (errno=%d)\n",
2846 					     rc);
2847 			goto out;
2848 		}
2849 
2850 		/* prereset() might have cleared ATA_EH_RESET.  If so,
2851 		 * bang classes, thaw and return.
2852 		 */
2853 		if (reset && !(ehc->i.action & ATA_EH_RESET)) {
2854 			ata_for_each_dev(dev, link, ALL)
2855 				classes[dev->devno] = ATA_DEV_NONE;
2856 			if ((ap->pflags & ATA_PFLAG_FROZEN) &&
2857 			    ata_is_host_link(link))
2858 				ata_eh_thaw_port(ap);
2859 			rc = 0;
2860 			goto out;
2861 		}
2862 	}
2863 
2864  retry:
2865 	/*
2866 	 * Perform reset
2867 	 */
2868 	if (ata_is_host_link(link))
2869 		ata_eh_freeze_port(ap);
2870 
2871 	deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]);
2872 
2873 	if (reset) {
2874 		if (verbose)
2875 			ata_link_info(link, "%s resetting link\n",
2876 				      reset == softreset ? "soft" : "hard");
2877 
2878 		/* mark that this EH session started with reset */
2879 		ehc->last_reset = jiffies;
2880 		if (reset == hardreset)
2881 			ehc->i.flags |= ATA_EHI_DID_HARDRESET;
2882 		else
2883 			ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
2884 
2885 		rc = ata_do_reset(link, reset, classes, deadline, true);
2886 		if (rc && rc != -EAGAIN) {
2887 			failed_link = link;
2888 			goto fail;
2889 		}
2890 
2891 		/* hardreset slave link if existent */
2892 		if (slave && reset == hardreset) {
2893 			int tmp;
2894 
2895 			if (verbose)
2896 				ata_link_info(slave, "hard resetting link\n");
2897 
2898 			ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
2899 			tmp = ata_do_reset(slave, reset, classes, deadline,
2900 					   false);
2901 			switch (tmp) {
2902 			case -EAGAIN:
2903 				rc = -EAGAIN;
2904 			case 0:
2905 				break;
2906 			default:
2907 				failed_link = slave;
2908 				rc = tmp;
2909 				goto fail;
2910 			}
2911 		}
2912 
2913 		/* perform follow-up SRST if necessary */
2914 		if (reset == hardreset &&
2915 		    ata_eh_followup_srst_needed(link, rc)) {
2916 			reset = softreset;
2917 
2918 			if (!reset) {
2919 				ata_link_err(link,
2920 	     "follow-up softreset required but no softreset available\n");
2921 				failed_link = link;
2922 				rc = -EINVAL;
2923 				goto fail;
2924 			}
2925 
2926 			ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2927 			rc = ata_do_reset(link, reset, classes, deadline, true);
2928 			if (rc) {
2929 				failed_link = link;
2930 				goto fail;
2931 			}
2932 		}
2933 	} else {
2934 		if (verbose)
2935 			ata_link_info(link,
2936 	"no reset method available, skipping reset\n");
2937 		if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
2938 			lflags |= ATA_LFLAG_ASSUME_ATA;
2939 	}
2940 
2941 	/*
2942 	 * Post-reset processing
2943 	 */
2944 	ata_for_each_dev(dev, link, ALL) {
2945 		/* After the reset, the device state is PIO 0 and the
2946 		 * controller state is undefined.  Reset also wakes up
2947 		 * drives from sleeping mode.
2948 		 */
2949 		dev->pio_mode = XFER_PIO_0;
2950 		dev->flags &= ~ATA_DFLAG_SLEEPING;
2951 
2952 		if (ata_phys_link_offline(ata_dev_phys_link(dev)))
2953 			continue;
2954 
2955 		/* apply class override */
2956 		if (lflags & ATA_LFLAG_ASSUME_ATA)
2957 			classes[dev->devno] = ATA_DEV_ATA;
2958 		else if (lflags & ATA_LFLAG_ASSUME_SEMB)
2959 			classes[dev->devno] = ATA_DEV_SEMB_UNSUP;
2960 	}
2961 
2962 	/* record current link speed */
2963 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
2964 		link->sata_spd = (sstatus >> 4) & 0xf;
2965 	if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0)
2966 		slave->sata_spd = (sstatus >> 4) & 0xf;
2967 
2968 	/* thaw the port */
2969 	if (ata_is_host_link(link))
2970 		ata_eh_thaw_port(ap);
2971 
2972 	/* postreset() should clear hardware SError.  Although SError
2973 	 * is cleared during link resume, clearing SError here is
2974 	 * necessary as some PHYs raise hotplug events after SRST.
2975 	 * This introduces race condition where hotplug occurs between
2976 	 * reset and here.  This race is mediated by cross checking
2977 	 * link onlineness and classification result later.
2978 	 */
2979 	if (postreset) {
2980 		postreset(link, classes);
2981 		if (slave)
2982 			postreset(slave, classes);
2983 	}
2984 
2985 	/*
2986 	 * Some controllers can't be frozen very well and may set spurious
2987 	 * error conditions during reset.  Clear accumulated error
2988 	 * information and re-thaw the port if frozen.  As reset is the
2989 	 * final recovery action and we cross check link onlineness against
2990 	 * device classification later, no hotplug event is lost by this.
2991 	 */
2992 	spin_lock_irqsave(link->ap->lock, flags);
2993 	memset(&link->eh_info, 0, sizeof(link->eh_info));
2994 	if (slave)
2995 		memset(&slave->eh_info, 0, sizeof(link->eh_info));
2996 	ap->pflags &= ~ATA_PFLAG_EH_PENDING;
2997 	spin_unlock_irqrestore(link->ap->lock, flags);
2998 
2999 	if (ap->pflags & ATA_PFLAG_FROZEN)
3000 		ata_eh_thaw_port(ap);
3001 
3002 	/*
3003 	 * Make sure onlineness and classification result correspond.
3004 	 * Hotplug could have happened during reset and some
3005 	 * controllers fail to wait while a drive is spinning up after
3006 	 * being hotplugged causing misdetection.  By cross checking
3007 	 * link on/offlineness and classification result, those
3008 	 * conditions can be reliably detected and retried.
3009 	 */
3010 	nr_unknown = 0;
3011 	ata_for_each_dev(dev, link, ALL) {
3012 		if (ata_phys_link_online(ata_dev_phys_link(dev))) {
3013 			if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
3014 				ata_dev_dbg(dev, "link online but device misclassified\n");
3015 				classes[dev->devno] = ATA_DEV_NONE;
3016 				nr_unknown++;
3017 			}
3018 		} else if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
3019 			if (ata_class_enabled(classes[dev->devno]))
3020 				ata_dev_dbg(dev,
3021 					    "link offline, clearing class %d to NONE\n",
3022 					    classes[dev->devno]);
3023 			classes[dev->devno] = ATA_DEV_NONE;
3024 		} else if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
3025 			ata_dev_dbg(dev,
3026 				    "link status unknown, clearing UNKNOWN to NONE\n");
3027 			classes[dev->devno] = ATA_DEV_NONE;
3028 		}
3029 	}
3030 
3031 	if (classify && nr_unknown) {
3032 		if (try < max_tries) {
3033 			ata_link_warn(link,
3034 				      "link online but %d devices misclassified, retrying\n",
3035 				      nr_unknown);
3036 			failed_link = link;
3037 			rc = -EAGAIN;
3038 			goto fail;
3039 		}
3040 		ata_link_warn(link,
3041 			      "link online but %d devices misclassified, "
3042 			      "device detection might fail\n", nr_unknown);
3043 	}
3044 
3045 	/* reset successful, schedule revalidation */
3046 	ata_eh_done(link, NULL, ATA_EH_RESET);
3047 	if (slave)
3048 		ata_eh_done(slave, NULL, ATA_EH_RESET);
3049 	ehc->last_reset = jiffies;		/* update to completion time */
3050 	ehc->i.action |= ATA_EH_REVALIDATE;
3051 	link->lpm_policy = ATA_LPM_UNKNOWN;	/* reset LPM state */
3052 
3053 	rc = 0;
3054  out:
3055 	/* clear hotplug flag */
3056 	ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
3057 	if (slave)
3058 		sehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
3059 
3060 	spin_lock_irqsave(ap->lock, flags);
3061 	ap->pflags &= ~ATA_PFLAG_RESETTING;
3062 	spin_unlock_irqrestore(ap->lock, flags);
3063 
3064 	return rc;
3065 
3066  fail:
3067 	/* if SCR isn't accessible on a fan-out port, PMP needs to be reset */
3068 	if (!ata_is_host_link(link) &&
3069 	    sata_scr_read(link, SCR_STATUS, &sstatus))
3070 		rc = -ERESTART;
3071 
3072 	if (try >= max_tries) {
3073 		/*
3074 		 * Thaw host port even if reset failed, so that the port
3075 		 * can be retried on the next phy event.  This risks
3076 		 * repeated EH runs but seems to be a better tradeoff than
3077 		 * shutting down a port after a botched hotplug attempt.
3078 		 */
3079 		if (ata_is_host_link(link))
3080 			ata_eh_thaw_port(ap);
3081 		goto out;
3082 	}
3083 
3084 	now = jiffies;
3085 	if (time_before(now, deadline)) {
3086 		unsigned long delta = deadline - now;
3087 
3088 		ata_link_warn(failed_link,
3089 			"reset failed (errno=%d), retrying in %u secs\n",
3090 			rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
3091 
3092 		ata_eh_release(ap);
3093 		while (delta)
3094 			delta = schedule_timeout_uninterruptible(delta);
3095 		ata_eh_acquire(ap);
3096 	}
3097 
3098 	/*
3099 	 * While disks spinup behind PMP, some controllers fail sending SRST.
3100 	 * They need to be reset - as well as the PMP - before retrying.
3101 	 */
3102 	if (rc == -ERESTART) {
3103 		if (ata_is_host_link(link))
3104 			ata_eh_thaw_port(ap);
3105 		goto out;
3106 	}
3107 
3108 	if (try == max_tries - 1) {
3109 		sata_down_spd_limit(link, 0);
3110 		if (slave)
3111 			sata_down_spd_limit(slave, 0);
3112 	} else if (rc == -EPIPE)
3113 		sata_down_spd_limit(failed_link, 0);
3114 
3115 	if (hardreset)
3116 		reset = hardreset;
3117 	goto retry;
3118 }
3119 
3120 static inline void ata_eh_pull_park_action(struct ata_port *ap)
3121 {
3122 	struct ata_link *link;
3123 	struct ata_device *dev;
3124 	unsigned long flags;
3125 
3126 	/*
3127 	 * This function can be thought of as an extended version of
3128 	 * ata_eh_about_to_do() specially crafted to accommodate the
3129 	 * requirements of ATA_EH_PARK handling. Since the EH thread
3130 	 * does not leave the do {} while () loop in ata_eh_recover as
3131 	 * long as the timeout for a park request to *one* device on
3132 	 * the port has not expired, and since we still want to pick
3133 	 * up park requests to other devices on the same port or
3134 	 * timeout updates for the same device, we have to pull
3135 	 * ATA_EH_PARK actions from eh_info into eh_context.i
3136 	 * ourselves at the beginning of each pass over the loop.
3137 	 *
3138 	 * Additionally, all write accesses to &ap->park_req_pending
3139 	 * through reinit_completion() (see below) or complete_all()
3140 	 * (see ata_scsi_park_store()) are protected by the host lock.
3141 	 * As a result we have that park_req_pending.done is zero on
3142 	 * exit from this function, i.e. when ATA_EH_PARK actions for
3143 	 * *all* devices on port ap have been pulled into the
3144 	 * respective eh_context structs. If, and only if,
3145 	 * park_req_pending.done is non-zero by the time we reach
3146 	 * wait_for_completion_timeout(), another ATA_EH_PARK action
3147 	 * has been scheduled for at least one of the devices on port
3148 	 * ap and we have to cycle over the do {} while () loop in
3149 	 * ata_eh_recover() again.
3150 	 */
3151 
3152 	spin_lock_irqsave(ap->lock, flags);
3153 	reinit_completion(&ap->park_req_pending);
3154 	ata_for_each_link(link, ap, EDGE) {
3155 		ata_for_each_dev(dev, link, ALL) {
3156 			struct ata_eh_info *ehi = &link->eh_info;
3157 
3158 			link->eh_context.i.dev_action[dev->devno] |=
3159 				ehi->dev_action[dev->devno] & ATA_EH_PARK;
3160 			ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
3161 		}
3162 	}
3163 	spin_unlock_irqrestore(ap->lock, flags);
3164 }
3165 
3166 static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
3167 {
3168 	struct ata_eh_context *ehc = &dev->link->eh_context;
3169 	struct ata_taskfile tf;
3170 	unsigned int err_mask;
3171 
3172 	ata_tf_init(dev, &tf);
3173 	if (park) {
3174 		ehc->unloaded_mask |= 1 << dev->devno;
3175 		tf.command = ATA_CMD_IDLEIMMEDIATE;
3176 		tf.feature = 0x44;
3177 		tf.lbal = 0x4c;
3178 		tf.lbam = 0x4e;
3179 		tf.lbah = 0x55;
3180 	} else {
3181 		ehc->unloaded_mask &= ~(1 << dev->devno);
3182 		tf.command = ATA_CMD_CHK_POWER;
3183 	}
3184 
3185 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
3186 	tf.protocol |= ATA_PROT_NODATA;
3187 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
3188 	if (park && (err_mask || tf.lbal != 0xc4)) {
3189 		ata_dev_err(dev, "head unload failed!\n");
3190 		ehc->unloaded_mask &= ~(1 << dev->devno);
3191 	}
3192 }
3193 
3194 static int ata_eh_revalidate_and_attach(struct ata_link *link,
3195 					struct ata_device **r_failed_dev)
3196 {
3197 	struct ata_port *ap = link->ap;
3198 	struct ata_eh_context *ehc = &link->eh_context;
3199 	struct ata_device *dev;
3200 	unsigned int new_mask = 0;
3201 	unsigned long flags;
3202 	int rc = 0;
3203 
3204 	DPRINTK("ENTER\n");
3205 
3206 	/* For PATA drive side cable detection to work, IDENTIFY must
3207 	 * be done backwards such that PDIAG- is released by the slave
3208 	 * device before the master device is identified.
3209 	 */
3210 	ata_for_each_dev(dev, link, ALL_REVERSE) {
3211 		unsigned int action = ata_eh_dev_action(dev);
3212 		unsigned int readid_flags = 0;
3213 
3214 		if (ehc->i.flags & ATA_EHI_DID_RESET)
3215 			readid_flags |= ATA_READID_POSTRESET;
3216 
3217 		if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
3218 			WARN_ON(dev->class == ATA_DEV_PMP);
3219 
3220 			if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
3221 				rc = -EIO;
3222 				goto err;
3223 			}
3224 
3225 			ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE);
3226 			rc = ata_dev_revalidate(dev, ehc->classes[dev->devno],
3227 						readid_flags);
3228 			if (rc)
3229 				goto err;
3230 
3231 			ata_eh_done(link, dev, ATA_EH_REVALIDATE);
3232 
3233 			/* Configuration may have changed, reconfigure
3234 			 * transfer mode.
3235 			 */
3236 			ehc->i.flags |= ATA_EHI_SETMODE;
3237 
3238 			/* schedule the scsi_rescan_device() here */
3239 			schedule_work(&(ap->scsi_rescan_task));
3240 		} else if (dev->class == ATA_DEV_UNKNOWN &&
3241 			   ehc->tries[dev->devno] &&
3242 			   ata_class_enabled(ehc->classes[dev->devno])) {
3243 			/* Temporarily set dev->class, it will be
3244 			 * permanently set once all configurations are
3245 			 * complete.  This is necessary because new
3246 			 * device configuration is done in two
3247 			 * separate loops.
3248 			 */
3249 			dev->class = ehc->classes[dev->devno];
3250 
3251 			if (dev->class == ATA_DEV_PMP)
3252 				rc = sata_pmp_attach(dev);
3253 			else
3254 				rc = ata_dev_read_id(dev, &dev->class,
3255 						     readid_flags, dev->id);
3256 
3257 			/* read_id might have changed class, store and reset */
3258 			ehc->classes[dev->devno] = dev->class;
3259 			dev->class = ATA_DEV_UNKNOWN;
3260 
3261 			switch (rc) {
3262 			case 0:
3263 				/* clear error info accumulated during probe */
3264 				ata_ering_clear(&dev->ering);
3265 				new_mask |= 1 << dev->devno;
3266 				break;
3267 			case -ENOENT:
3268 				/* IDENTIFY was issued to non-existent
3269 				 * device.  No need to reset.  Just
3270 				 * thaw and ignore the device.
3271 				 */
3272 				ata_eh_thaw_port(ap);
3273 				break;
3274 			default:
3275 				goto err;
3276 			}
3277 		}
3278 	}
3279 
3280 	/* PDIAG- should have been released, ask cable type if post-reset */
3281 	if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) {
3282 		if (ap->ops->cable_detect)
3283 			ap->cbl = ap->ops->cable_detect(ap);
3284 		ata_force_cbl(ap);
3285 	}
3286 
3287 	/* Configure new devices forward such that user doesn't see
3288 	 * device detection messages backwards.
3289 	 */
3290 	ata_for_each_dev(dev, link, ALL) {
3291 		if (!(new_mask & (1 << dev->devno)))
3292 			continue;
3293 
3294 		dev->class = ehc->classes[dev->devno];
3295 
3296 		if (dev->class == ATA_DEV_PMP)
3297 			continue;
3298 
3299 		ehc->i.flags |= ATA_EHI_PRINTINFO;
3300 		rc = ata_dev_configure(dev);
3301 		ehc->i.flags &= ~ATA_EHI_PRINTINFO;
3302 		if (rc) {
3303 			dev->class = ATA_DEV_UNKNOWN;
3304 			goto err;
3305 		}
3306 
3307 		spin_lock_irqsave(ap->lock, flags);
3308 		ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
3309 		spin_unlock_irqrestore(ap->lock, flags);
3310 
3311 		/* new device discovered, configure xfermode */
3312 		ehc->i.flags |= ATA_EHI_SETMODE;
3313 	}
3314 
3315 	return 0;
3316 
3317  err:
3318 	*r_failed_dev = dev;
3319 	DPRINTK("EXIT rc=%d\n", rc);
3320 	return rc;
3321 }
3322 
3323 /**
3324  *	ata_set_mode - Program timings and issue SET FEATURES - XFER
3325  *	@link: link on which timings will be programmed
3326  *	@r_failed_dev: out parameter for failed device
3327  *
3328  *	Set ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
3329  *	ata_set_mode() fails, pointer to the failing device is
3330  *	returned in @r_failed_dev.
3331  *
3332  *	LOCKING:
3333  *	PCI/etc. bus probe sem.
3334  *
3335  *	RETURNS:
3336  *	0 on success, negative errno otherwise
3337  */
3338 int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3339 {
3340 	struct ata_port *ap = link->ap;
3341 	struct ata_device *dev;
3342 	int rc;
3343 
3344 	/* if data transfer is verified, clear DUBIOUS_XFER on ering top */
3345 	ata_for_each_dev(dev, link, ENABLED) {
3346 		if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) {
3347 			struct ata_ering_entry *ent;
3348 
3349 			ent = ata_ering_top(&dev->ering);
3350 			if (ent)
3351 				ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER;
3352 		}
3353 	}
3354 
3355 	/* has private set_mode? */
3356 	if (ap->ops->set_mode)
3357 		rc = ap->ops->set_mode(link, r_failed_dev);
3358 	else
3359 		rc = ata_do_set_mode(link, r_failed_dev);
3360 
3361 	/* if transfer mode has changed, set DUBIOUS_XFER on device */
3362 	ata_for_each_dev(dev, link, ENABLED) {
3363 		struct ata_eh_context *ehc = &link->eh_context;
3364 		u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
3365 		u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
3366 
3367 		if (dev->xfer_mode != saved_xfer_mode ||
3368 		    ata_ncq_enabled(dev) != saved_ncq)
3369 			dev->flags |= ATA_DFLAG_DUBIOUS_XFER;
3370 	}
3371 
3372 	return rc;
3373 }
3374 
3375 /**
3376  *	atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
3377  *	@dev: ATAPI device to clear UA for
3378  *
3379  *	Resets and other operations can make an ATAPI device raise
3380  *	UNIT ATTENTION which causes the next operation to fail.  This
3381  *	function clears UA.
3382  *
3383  *	LOCKING:
3384  *	EH context (may sleep).
3385  *
3386  *	RETURNS:
3387  *	0 on success, -errno on failure.
3388  */
3389 static int atapi_eh_clear_ua(struct ata_device *dev)
3390 {
3391 	int i;
3392 
3393 	for (i = 0; i < ATA_EH_UA_TRIES; i++) {
3394 		u8 *sense_buffer = dev->link->ap->sector_buf;
3395 		u8 sense_key = 0;
3396 		unsigned int err_mask;
3397 
3398 		err_mask = atapi_eh_tur(dev, &sense_key);
3399 		if (err_mask != 0 && err_mask != AC_ERR_DEV) {
3400 			ata_dev_warn(dev,
3401 				     "TEST_UNIT_READY failed (err_mask=0x%x)\n",
3402 				     err_mask);
3403 			return -EIO;
3404 		}
3405 
3406 		if (!err_mask || sense_key != UNIT_ATTENTION)
3407 			return 0;
3408 
3409 		err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key);
3410 		if (err_mask) {
3411 			ata_dev_warn(dev, "failed to clear "
3412 				"UNIT ATTENTION (err_mask=0x%x)\n", err_mask);
3413 			return -EIO;
3414 		}
3415 	}
3416 
3417 	ata_dev_warn(dev, "UNIT ATTENTION persists after %d tries\n",
3418 		     ATA_EH_UA_TRIES);
3419 
3420 	return 0;
3421 }
3422 
3423 /**
3424  *	ata_eh_maybe_retry_flush - Retry FLUSH if necessary
3425  *	@dev: ATA device which may need FLUSH retry
3426  *
3427  *	If @dev failed FLUSH, it needs to be reported upper layer
3428  *	immediately as it means that @dev failed to remap and already
3429  *	lost at least a sector and further FLUSH retrials won't make
3430  *	any difference to the lost sector.  However, if FLUSH failed
3431  *	for other reasons, for example transmission error, FLUSH needs
3432  *	to be retried.
3433  *
3434  *	This function determines whether FLUSH failure retry is
3435  *	necessary and performs it if so.
3436  *
3437  *	RETURNS:
3438  *	0 if EH can continue, -errno if EH needs to be repeated.
3439  */
3440 static int ata_eh_maybe_retry_flush(struct ata_device *dev)
3441 {
3442 	struct ata_link *link = dev->link;
3443 	struct ata_port *ap = link->ap;
3444 	struct ata_queued_cmd *qc;
3445 	struct ata_taskfile tf;
3446 	unsigned int err_mask;
3447 	int rc = 0;
3448 
3449 	/* did flush fail for this device? */
3450 	if (!ata_tag_valid(link->active_tag))
3451 		return 0;
3452 
3453 	qc = __ata_qc_from_tag(ap, link->active_tag);
3454 	if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT &&
3455 			       qc->tf.command != ATA_CMD_FLUSH))
3456 		return 0;
3457 
3458 	/* if the device failed it, it should be reported to upper layers */
3459 	if (qc->err_mask & AC_ERR_DEV)
3460 		return 0;
3461 
3462 	/* flush failed for some other reason, give it another shot */
3463 	ata_tf_init(dev, &tf);
3464 
3465 	tf.command = qc->tf.command;
3466 	tf.flags |= ATA_TFLAG_DEVICE;
3467 	tf.protocol = ATA_PROT_NODATA;
3468 
3469 	ata_dev_warn(dev, "retrying FLUSH 0x%x Emask 0x%x\n",
3470 		       tf.command, qc->err_mask);
3471 
3472 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
3473 	if (!err_mask) {
3474 		/*
3475 		 * FLUSH is complete but there's no way to
3476 		 * successfully complete a failed command from EH.
3477 		 * Making sure retry is allowed at least once and
3478 		 * retrying it should do the trick - whatever was in
3479 		 * the cache is already on the platter and this won't
3480 		 * cause infinite loop.
3481 		 */
3482 		qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1);
3483 	} else {
3484 		ata_dev_warn(dev, "FLUSH failed Emask 0x%x\n",
3485 			       err_mask);
3486 		rc = -EIO;
3487 
3488 		/* if device failed it, report it to upper layers */
3489 		if (err_mask & AC_ERR_DEV) {
3490 			qc->err_mask |= AC_ERR_DEV;
3491 			qc->result_tf = tf;
3492 			if (!(ap->pflags & ATA_PFLAG_FROZEN))
3493 				rc = 0;
3494 		}
3495 	}
3496 	return rc;
3497 }
3498 
3499 /**
3500  *	ata_eh_set_lpm - configure SATA interface power management
3501  *	@link: link to configure power management
3502  *	@policy: the link power management policy
3503  *	@r_failed_dev: out parameter for failed device
3504  *
3505  *	Enable SATA Interface power management.  This will enable
3506  *	Device Interface Power Management (DIPM) for min_power
3507  * 	policy, and then call driver specific callbacks for
3508  *	enabling Host Initiated Power management.
3509  *
3510  *	LOCKING:
3511  *	EH context.
3512  *
3513  *	RETURNS:
3514  *	0 on success, -errno on failure.
3515  */
3516 static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3517 			  struct ata_device **r_failed_dev)
3518 {
3519 	struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL;
3520 	struct ata_eh_context *ehc = &link->eh_context;
3521 	struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
3522 	enum ata_lpm_policy old_policy = link->lpm_policy;
3523 	bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM;
3524 	unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
3525 	unsigned int err_mask;
3526 	int rc;
3527 
3528 	/* if the link or host doesn't do LPM, noop */
3529 	if ((link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm))
3530 		return 0;
3531 
3532 	/*
3533 	 * DIPM is enabled only for MIN_POWER as some devices
3534 	 * misbehave when the host NACKs transition to SLUMBER.  Order
3535 	 * device and link configurations such that the host always
3536 	 * allows DIPM requests.
3537 	 */
3538 	ata_for_each_dev(dev, link, ENABLED) {
3539 		bool hipm = ata_id_has_hipm(dev->id);
3540 		bool dipm = ata_id_has_dipm(dev->id) && !no_dipm;
3541 
3542 		/* find the first enabled and LPM enabled devices */
3543 		if (!link_dev)
3544 			link_dev = dev;
3545 
3546 		if (!lpm_dev && (hipm || dipm))
3547 			lpm_dev = dev;
3548 
3549 		hints &= ~ATA_LPM_EMPTY;
3550 		if (!hipm)
3551 			hints &= ~ATA_LPM_HIPM;
3552 
3553 		/* disable DIPM before changing link config */
3554 		if (policy != ATA_LPM_MIN_POWER && dipm) {
3555 			err_mask = ata_dev_set_feature(dev,
3556 					SETFEATURES_SATA_DISABLE, SATA_DIPM);
3557 			if (err_mask && err_mask != AC_ERR_DEV) {
3558 				ata_dev_warn(dev,
3559 					     "failed to disable DIPM, Emask 0x%x\n",
3560 					     err_mask);
3561 				rc = -EIO;
3562 				goto fail;
3563 			}
3564 		}
3565 	}
3566 
3567 	if (ap) {
3568 		rc = ap->ops->set_lpm(link, policy, hints);
3569 		if (!rc && ap->slave_link)
3570 			rc = ap->ops->set_lpm(ap->slave_link, policy, hints);
3571 	} else
3572 		rc = sata_pmp_set_lpm(link, policy, hints);
3573 
3574 	/*
3575 	 * Attribute link config failure to the first (LPM) enabled
3576 	 * device on the link.
3577 	 */
3578 	if (rc) {
3579 		if (rc == -EOPNOTSUPP) {
3580 			link->flags |= ATA_LFLAG_NO_LPM;
3581 			return 0;
3582 		}
3583 		dev = lpm_dev ? lpm_dev : link_dev;
3584 		goto fail;
3585 	}
3586 
3587 	/*
3588 	 * Low level driver acked the transition.  Issue DIPM command
3589 	 * with the new policy set.
3590 	 */
3591 	link->lpm_policy = policy;
3592 	if (ap && ap->slave_link)
3593 		ap->slave_link->lpm_policy = policy;
3594 
3595 	/* host config updated, enable DIPM if transitioning to MIN_POWER */
3596 	ata_for_each_dev(dev, link, ENABLED) {
3597 		if (policy == ATA_LPM_MIN_POWER && !no_dipm &&
3598 		    ata_id_has_dipm(dev->id)) {
3599 			err_mask = ata_dev_set_feature(dev,
3600 					SETFEATURES_SATA_ENABLE, SATA_DIPM);
3601 			if (err_mask && err_mask != AC_ERR_DEV) {
3602 				ata_dev_warn(dev,
3603 					"failed to enable DIPM, Emask 0x%x\n",
3604 					err_mask);
3605 				rc = -EIO;
3606 				goto fail;
3607 			}
3608 		}
3609 	}
3610 
3611 	link->last_lpm_change = jiffies;
3612 	link->flags |= ATA_LFLAG_CHANGED;
3613 
3614 	return 0;
3615 
3616 fail:
3617 	/* restore the old policy */
3618 	link->lpm_policy = old_policy;
3619 	if (ap && ap->slave_link)
3620 		ap->slave_link->lpm_policy = old_policy;
3621 
3622 	/* if no device or only one more chance is left, disable LPM */
3623 	if (!dev || ehc->tries[dev->devno] <= 2) {
3624 		ata_link_warn(link, "disabling LPM on the link\n");
3625 		link->flags |= ATA_LFLAG_NO_LPM;
3626 	}
3627 	if (r_failed_dev)
3628 		*r_failed_dev = dev;
3629 	return rc;
3630 }
3631 
3632 int ata_link_nr_enabled(struct ata_link *link)
3633 {
3634 	struct ata_device *dev;
3635 	int cnt = 0;
3636 
3637 	ata_for_each_dev(dev, link, ENABLED)
3638 		cnt++;
3639 	return cnt;
3640 }
3641 
3642 static int ata_link_nr_vacant(struct ata_link *link)
3643 {
3644 	struct ata_device *dev;
3645 	int cnt = 0;
3646 
3647 	ata_for_each_dev(dev, link, ALL)
3648 		if (dev->class == ATA_DEV_UNKNOWN)
3649 			cnt++;
3650 	return cnt;
3651 }
3652 
3653 static int ata_eh_skip_recovery(struct ata_link *link)
3654 {
3655 	struct ata_port *ap = link->ap;
3656 	struct ata_eh_context *ehc = &link->eh_context;
3657 	struct ata_device *dev;
3658 
3659 	/* skip disabled links */
3660 	if (link->flags & ATA_LFLAG_DISABLED)
3661 		return 1;
3662 
3663 	/* skip if explicitly requested */
3664 	if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
3665 		return 1;
3666 
3667 	/* thaw frozen port and recover failed devices */
3668 	if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
3669 		return 0;
3670 
3671 	/* reset at least once if reset is requested */
3672 	if ((ehc->i.action & ATA_EH_RESET) &&
3673 	    !(ehc->i.flags & ATA_EHI_DID_RESET))
3674 		return 0;
3675 
3676 	/* skip if class codes for all vacant slots are ATA_DEV_NONE */
3677 	ata_for_each_dev(dev, link, ALL) {
3678 		if (dev->class == ATA_DEV_UNKNOWN &&
3679 		    ehc->classes[dev->devno] != ATA_DEV_NONE)
3680 			return 0;
3681 	}
3682 
3683 	return 1;
3684 }
3685 
3686 static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg)
3687 {
3688 	u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL);
3689 	u64 now = get_jiffies_64();
3690 	int *trials = void_arg;
3691 
3692 	if ((ent->eflags & ATA_EFLAG_OLD_ER) ||
3693 	    (ent->timestamp < now - min(now, interval)))
3694 		return -1;
3695 
3696 	(*trials)++;
3697 	return 0;
3698 }
3699 
3700 static int ata_eh_schedule_probe(struct ata_device *dev)
3701 {
3702 	struct ata_eh_context *ehc = &dev->link->eh_context;
3703 	struct ata_link *link = ata_dev_phys_link(dev);
3704 	int trials = 0;
3705 
3706 	if (!(ehc->i.probe_mask & (1 << dev->devno)) ||
3707 	    (ehc->did_probe_mask & (1 << dev->devno)))
3708 		return 0;
3709 
3710 	ata_eh_detach_dev(dev);
3711 	ata_dev_init(dev);
3712 	ehc->did_probe_mask |= (1 << dev->devno);
3713 	ehc->i.action |= ATA_EH_RESET;
3714 	ehc->saved_xfer_mode[dev->devno] = 0;
3715 	ehc->saved_ncq_enabled &= ~(1 << dev->devno);
3716 
3717 	/* the link maybe in a deep sleep, wake it up */
3718 	if (link->lpm_policy > ATA_LPM_MAX_POWER) {
3719 		if (ata_is_host_link(link))
3720 			link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER,
3721 					       ATA_LPM_EMPTY);
3722 		else
3723 			sata_pmp_set_lpm(link, ATA_LPM_MAX_POWER,
3724 					 ATA_LPM_EMPTY);
3725 	}
3726 
3727 	/* Record and count probe trials on the ering.  The specific
3728 	 * error mask used is irrelevant.  Because a successful device
3729 	 * detection clears the ering, this count accumulates only if
3730 	 * there are consecutive failed probes.
3731 	 *
3732 	 * If the count is equal to or higher than ATA_EH_PROBE_TRIALS
3733 	 * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is
3734 	 * forced to 1.5Gbps.
3735 	 *
3736 	 * This is to work around cases where failed link speed
3737 	 * negotiation results in device misdetection leading to
3738 	 * infinite DEVXCHG or PHRDY CHG events.
3739 	 */
3740 	ata_ering_record(&dev->ering, 0, AC_ERR_OTHER);
3741 	ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials);
3742 
3743 	if (trials > ATA_EH_PROBE_TRIALS)
3744 		sata_down_spd_limit(link, 1);
3745 
3746 	return 1;
3747 }
3748 
3749 static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
3750 {
3751 	struct ata_eh_context *ehc = &dev->link->eh_context;
3752 
3753 	/* -EAGAIN from EH routine indicates retry without prejudice.
3754 	 * The requester is responsible for ensuring forward progress.
3755 	 */
3756 	if (err != -EAGAIN)
3757 		ehc->tries[dev->devno]--;
3758 
3759 	switch (err) {
3760 	case -ENODEV:
3761 		/* device missing or wrong IDENTIFY data, schedule probing */
3762 		ehc->i.probe_mask |= (1 << dev->devno);
3763 	case -EINVAL:
3764 		/* give it just one more chance */
3765 		ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
3766 	case -EIO:
3767 		if (ehc->tries[dev->devno] == 1) {
3768 			/* This is the last chance, better to slow
3769 			 * down than lose it.
3770 			 */
3771 			sata_down_spd_limit(ata_dev_phys_link(dev), 0);
3772 			if (dev->pio_mode > XFER_PIO_0)
3773 				ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
3774 		}
3775 	}
3776 
3777 	if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
3778 		/* disable device if it has used up all its chances */
3779 		ata_dev_disable(dev);
3780 
3781 		/* detach if offline */
3782 		if (ata_phys_link_offline(ata_dev_phys_link(dev)))
3783 			ata_eh_detach_dev(dev);
3784 
3785 		/* schedule probe if necessary */
3786 		if (ata_eh_schedule_probe(dev)) {
3787 			ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3788 			memset(ehc->cmd_timeout_idx[dev->devno], 0,
3789 			       sizeof(ehc->cmd_timeout_idx[dev->devno]));
3790 		}
3791 
3792 		return 1;
3793 	} else {
3794 		ehc->i.action |= ATA_EH_RESET;
3795 		return 0;
3796 	}
3797 }
3798 
3799 /**
3800  *	ata_eh_recover - recover host port after error
3801  *	@ap: host port to recover
3802  *	@prereset: prereset method (can be NULL)
3803  *	@softreset: softreset method (can be NULL)
3804  *	@hardreset: hardreset method (can be NULL)
3805  *	@postreset: postreset method (can be NULL)
3806  *	@r_failed_link: out parameter for failed link
3807  *
3808  *	This is the alpha and omega, eum and yang, heart and soul of
3809  *	libata exception handling.  On entry, actions required to
3810  *	recover each link and hotplug requests are recorded in the
3811  *	link's eh_context.  This function executes all the operations
3812  *	with appropriate retrials and fallbacks to resurrect failed
3813  *	devices, detach goners and greet newcomers.
3814  *
3815  *	LOCKING:
3816  *	Kernel thread context (may sleep).
3817  *
3818  *	RETURNS:
3819  *	0 on success, -errno on failure.
3820  */
3821 int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3822 		   ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3823 		   ata_postreset_fn_t postreset,
3824 		   struct ata_link **r_failed_link)
3825 {
3826 	struct ata_link *link;
3827 	struct ata_device *dev;
3828 	int rc, nr_fails;
3829 	unsigned long flags, deadline;
3830 
3831 	DPRINTK("ENTER\n");
3832 
3833 	/* prep for recovery */
3834 	ata_for_each_link(link, ap, EDGE) {
3835 		struct ata_eh_context *ehc = &link->eh_context;
3836 
3837 		/* re-enable link? */
3838 		if (ehc->i.action & ATA_EH_ENABLE_LINK) {
3839 			ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK);
3840 			spin_lock_irqsave(ap->lock, flags);
3841 			link->flags &= ~ATA_LFLAG_DISABLED;
3842 			spin_unlock_irqrestore(ap->lock, flags);
3843 			ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK);
3844 		}
3845 
3846 		ata_for_each_dev(dev, link, ALL) {
3847 			if (link->flags & ATA_LFLAG_NO_RETRY)
3848 				ehc->tries[dev->devno] = 1;
3849 			else
3850 				ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3851 
3852 			/* collect port action mask recorded in dev actions */
3853 			ehc->i.action |= ehc->i.dev_action[dev->devno] &
3854 					 ~ATA_EH_PERDEV_MASK;
3855 			ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK;
3856 
3857 			/* process hotplug request */
3858 			if (dev->flags & ATA_DFLAG_DETACH)
3859 				ata_eh_detach_dev(dev);
3860 
3861 			/* schedule probe if necessary */
3862 			if (!ata_dev_enabled(dev))
3863 				ata_eh_schedule_probe(dev);
3864 		}
3865 	}
3866 
3867  retry:
3868 	rc = 0;
3869 
3870 	/* if UNLOADING, finish immediately */
3871 	if (ap->pflags & ATA_PFLAG_UNLOADING)
3872 		goto out;
3873 
3874 	/* prep for EH */
3875 	ata_for_each_link(link, ap, EDGE) {
3876 		struct ata_eh_context *ehc = &link->eh_context;
3877 
3878 		/* skip EH if possible. */
3879 		if (ata_eh_skip_recovery(link))
3880 			ehc->i.action = 0;
3881 
3882 		ata_for_each_dev(dev, link, ALL)
3883 			ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
3884 	}
3885 
3886 	/* reset */
3887 	ata_for_each_link(link, ap, EDGE) {
3888 		struct ata_eh_context *ehc = &link->eh_context;
3889 
3890 		if (!(ehc->i.action & ATA_EH_RESET))
3891 			continue;
3892 
3893 		rc = ata_eh_reset(link, ata_link_nr_vacant(link),
3894 				  prereset, softreset, hardreset, postreset);
3895 		if (rc) {
3896 			ata_link_err(link, "reset failed, giving up\n");
3897 			goto out;
3898 		}
3899 	}
3900 
3901 	do {
3902 		unsigned long now;
3903 
3904 		/*
3905 		 * clears ATA_EH_PARK in eh_info and resets
3906 		 * ap->park_req_pending
3907 		 */
3908 		ata_eh_pull_park_action(ap);
3909 
3910 		deadline = jiffies;
3911 		ata_for_each_link(link, ap, EDGE) {
3912 			ata_for_each_dev(dev, link, ALL) {
3913 				struct ata_eh_context *ehc = &link->eh_context;
3914 				unsigned long tmp;
3915 
3916 				if (dev->class != ATA_DEV_ATA &&
3917 				    dev->class != ATA_DEV_ZAC)
3918 					continue;
3919 				if (!(ehc->i.dev_action[dev->devno] &
3920 				      ATA_EH_PARK))
3921 					continue;
3922 				tmp = dev->unpark_deadline;
3923 				if (time_before(deadline, tmp))
3924 					deadline = tmp;
3925 				else if (time_before_eq(tmp, jiffies))
3926 					continue;
3927 				if (ehc->unloaded_mask & (1 << dev->devno))
3928 					continue;
3929 
3930 				ata_eh_park_issue_cmd(dev, 1);
3931 			}
3932 		}
3933 
3934 		now = jiffies;
3935 		if (time_before_eq(deadline, now))
3936 			break;
3937 
3938 		ata_eh_release(ap);
3939 		deadline = wait_for_completion_timeout(&ap->park_req_pending,
3940 						       deadline - now);
3941 		ata_eh_acquire(ap);
3942 	} while (deadline);
3943 	ata_for_each_link(link, ap, EDGE) {
3944 		ata_for_each_dev(dev, link, ALL) {
3945 			if (!(link->eh_context.unloaded_mask &
3946 			      (1 << dev->devno)))
3947 				continue;
3948 
3949 			ata_eh_park_issue_cmd(dev, 0);
3950 			ata_eh_done(link, dev, ATA_EH_PARK);
3951 		}
3952 	}
3953 
3954 	/* the rest */
3955 	nr_fails = 0;
3956 	ata_for_each_link(link, ap, PMP_FIRST) {
3957 		struct ata_eh_context *ehc = &link->eh_context;
3958 
3959 		if (sata_pmp_attached(ap) && ata_is_host_link(link))
3960 			goto config_lpm;
3961 
3962 		/* revalidate existing devices and attach new ones */
3963 		rc = ata_eh_revalidate_and_attach(link, &dev);
3964 		if (rc)
3965 			goto rest_fail;
3966 
3967 		/* if PMP got attached, return, pmp EH will take care of it */
3968 		if (link->device->class == ATA_DEV_PMP) {
3969 			ehc->i.action = 0;
3970 			return 0;
3971 		}
3972 
3973 		/* configure transfer mode if necessary */
3974 		if (ehc->i.flags & ATA_EHI_SETMODE) {
3975 			rc = ata_set_mode(link, &dev);
3976 			if (rc)
3977 				goto rest_fail;
3978 			ehc->i.flags &= ~ATA_EHI_SETMODE;
3979 		}
3980 
3981 		/* If reset has been issued, clear UA to avoid
3982 		 * disrupting the current users of the device.
3983 		 */
3984 		if (ehc->i.flags & ATA_EHI_DID_RESET) {
3985 			ata_for_each_dev(dev, link, ALL) {
3986 				if (dev->class != ATA_DEV_ATAPI)
3987 					continue;
3988 				rc = atapi_eh_clear_ua(dev);
3989 				if (rc)
3990 					goto rest_fail;
3991 				if (zpodd_dev_enabled(dev))
3992 					zpodd_post_poweron(dev);
3993 			}
3994 		}
3995 
3996 		/* retry flush if necessary */
3997 		ata_for_each_dev(dev, link, ALL) {
3998 			if (dev->class != ATA_DEV_ATA &&
3999 			    dev->class != ATA_DEV_ZAC)
4000 				continue;
4001 			rc = ata_eh_maybe_retry_flush(dev);
4002 			if (rc)
4003 				goto rest_fail;
4004 		}
4005 
4006 	config_lpm:
4007 		/* configure link power saving */
4008 		if (link->lpm_policy != ap->target_lpm_policy) {
4009 			rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev);
4010 			if (rc)
4011 				goto rest_fail;
4012 		}
4013 
4014 		/* this link is okay now */
4015 		ehc->i.flags = 0;
4016 		continue;
4017 
4018 	rest_fail:
4019 		nr_fails++;
4020 		if (dev)
4021 			ata_eh_handle_dev_fail(dev, rc);
4022 
4023 		if (ap->pflags & ATA_PFLAG_FROZEN) {
4024 			/* PMP reset requires working host port.
4025 			 * Can't retry if it's frozen.
4026 			 */
4027 			if (sata_pmp_attached(ap))
4028 				goto out;
4029 			break;
4030 		}
4031 	}
4032 
4033 	if (nr_fails)
4034 		goto retry;
4035 
4036  out:
4037 	if (rc && r_failed_link)
4038 		*r_failed_link = link;
4039 
4040 	DPRINTK("EXIT, rc=%d\n", rc);
4041 	return rc;
4042 }
4043 
4044 /**
4045  *	ata_eh_finish - finish up EH
4046  *	@ap: host port to finish EH for
4047  *
4048  *	Recovery is complete.  Clean up EH states and retry or finish
4049  *	failed qcs.
4050  *
4051  *	LOCKING:
4052  *	None.
4053  */
4054 void ata_eh_finish(struct ata_port *ap)
4055 {
4056 	int tag;
4057 
4058 	/* retry or finish qcs */
4059 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
4060 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
4061 
4062 		if (!(qc->flags & ATA_QCFLAG_FAILED))
4063 			continue;
4064 
4065 		if (qc->err_mask) {
4066 			/* FIXME: Once EH migration is complete,
4067 			 * generate sense data in this function,
4068 			 * considering both err_mask and tf.
4069 			 */
4070 			if (qc->flags & ATA_QCFLAG_RETRY)
4071 				ata_eh_qc_retry(qc);
4072 			else
4073 				ata_eh_qc_complete(qc);
4074 		} else {
4075 			if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
4076 				ata_eh_qc_complete(qc);
4077 			} else {
4078 				/* feed zero TF to sense generation */
4079 				memset(&qc->result_tf, 0, sizeof(qc->result_tf));
4080 				ata_eh_qc_retry(qc);
4081 			}
4082 		}
4083 	}
4084 
4085 	/* make sure nr_active_links is zero after EH */
4086 	WARN_ON(ap->nr_active_links);
4087 	ap->nr_active_links = 0;
4088 }
4089 
4090 /**
4091  *	ata_do_eh - do standard error handling
4092  *	@ap: host port to handle error for
4093  *
4094  *	@prereset: prereset method (can be NULL)
4095  *	@softreset: softreset method (can be NULL)
4096  *	@hardreset: hardreset method (can be NULL)
4097  *	@postreset: postreset method (can be NULL)
4098  *
4099  *	Perform standard error handling sequence.
4100  *
4101  *	LOCKING:
4102  *	Kernel thread context (may sleep).
4103  */
4104 void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
4105 	       ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
4106 	       ata_postreset_fn_t postreset)
4107 {
4108 	struct ata_device *dev;
4109 	int rc;
4110 
4111 	ata_eh_autopsy(ap);
4112 	ata_eh_report(ap);
4113 
4114 	rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
4115 			    NULL);
4116 	if (rc) {
4117 		ata_for_each_dev(dev, &ap->link, ALL)
4118 			ata_dev_disable(dev);
4119 	}
4120 
4121 	ata_eh_finish(ap);
4122 }
4123 
4124 /**
4125  *	ata_std_error_handler - standard error handler
4126  *	@ap: host port to handle error for
4127  *
4128  *	Standard error handler
4129  *
4130  *	LOCKING:
4131  *	Kernel thread context (may sleep).
4132  */
4133 void ata_std_error_handler(struct ata_port *ap)
4134 {
4135 	struct ata_port_operations *ops = ap->ops;
4136 	ata_reset_fn_t hardreset = ops->hardreset;
4137 
4138 	/* ignore built-in hardreset if SCR access is not available */
4139 	if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link))
4140 		hardreset = NULL;
4141 
4142 	ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
4143 }
4144 
4145 #ifdef CONFIG_PM
4146 /**
4147  *	ata_eh_handle_port_suspend - perform port suspend operation
4148  *	@ap: port to suspend
4149  *
4150  *	Suspend @ap.
4151  *
4152  *	LOCKING:
4153  *	Kernel thread context (may sleep).
4154  */
4155 static void ata_eh_handle_port_suspend(struct ata_port *ap)
4156 {
4157 	unsigned long flags;
4158 	int rc = 0;
4159 	struct ata_device *dev;
4160 
4161 	/* are we suspending? */
4162 	spin_lock_irqsave(ap->lock, flags);
4163 	if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
4164 	    ap->pm_mesg.event & PM_EVENT_RESUME) {
4165 		spin_unlock_irqrestore(ap->lock, flags);
4166 		return;
4167 	}
4168 	spin_unlock_irqrestore(ap->lock, flags);
4169 
4170 	WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
4171 
4172 	/*
4173 	 * If we have a ZPODD attached, check its zero
4174 	 * power ready status before the port is frozen.
4175 	 * Only needed for runtime suspend.
4176 	 */
4177 	if (PMSG_IS_AUTO(ap->pm_mesg)) {
4178 		ata_for_each_dev(dev, &ap->link, ENABLED) {
4179 			if (zpodd_dev_enabled(dev))
4180 				zpodd_on_suspend(dev);
4181 		}
4182 	}
4183 
4184 	/* tell ACPI we're suspending */
4185 	rc = ata_acpi_on_suspend(ap);
4186 	if (rc)
4187 		goto out;
4188 
4189 	/* suspend */
4190 	ata_eh_freeze_port(ap);
4191 
4192 	if (ap->ops->port_suspend)
4193 		rc = ap->ops->port_suspend(ap, ap->pm_mesg);
4194 
4195 	ata_acpi_set_state(ap, ap->pm_mesg);
4196  out:
4197 	/* update the flags */
4198 	spin_lock_irqsave(ap->lock, flags);
4199 
4200 	ap->pflags &= ~ATA_PFLAG_PM_PENDING;
4201 	if (rc == 0)
4202 		ap->pflags |= ATA_PFLAG_SUSPENDED;
4203 	else if (ap->pflags & ATA_PFLAG_FROZEN)
4204 		ata_port_schedule_eh(ap);
4205 
4206 	spin_unlock_irqrestore(ap->lock, flags);
4207 
4208 	return;
4209 }
4210 
4211 /**
4212  *	ata_eh_handle_port_resume - perform port resume operation
4213  *	@ap: port to resume
4214  *
4215  *	Resume @ap.
4216  *
4217  *	LOCKING:
4218  *	Kernel thread context (may sleep).
4219  */
4220 static void ata_eh_handle_port_resume(struct ata_port *ap)
4221 {
4222 	struct ata_link *link;
4223 	struct ata_device *dev;
4224 	unsigned long flags;
4225 	int rc = 0;
4226 
4227 	/* are we resuming? */
4228 	spin_lock_irqsave(ap->lock, flags);
4229 	if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
4230 	    !(ap->pm_mesg.event & PM_EVENT_RESUME)) {
4231 		spin_unlock_irqrestore(ap->lock, flags);
4232 		return;
4233 	}
4234 	spin_unlock_irqrestore(ap->lock, flags);
4235 
4236 	WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED));
4237 
4238 	/*
4239 	 * Error timestamps are in jiffies which doesn't run while
4240 	 * suspended and PHY events during resume isn't too uncommon.
4241 	 * When the two are combined, it can lead to unnecessary speed
4242 	 * downs if the machine is suspended and resumed repeatedly.
4243 	 * Clear error history.
4244 	 */
4245 	ata_for_each_link(link, ap, HOST_FIRST)
4246 		ata_for_each_dev(dev, link, ALL)
4247 			ata_ering_clear(&dev->ering);
4248 
4249 	ata_acpi_set_state(ap, ap->pm_mesg);
4250 
4251 	if (ap->ops->port_resume)
4252 		rc = ap->ops->port_resume(ap);
4253 
4254 	/* tell ACPI that we're resuming */
4255 	ata_acpi_on_resume(ap);
4256 
4257 	/* update the flags */
4258 	spin_lock_irqsave(ap->lock, flags);
4259 	ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
4260 	spin_unlock_irqrestore(ap->lock, flags);
4261 }
4262 #endif /* CONFIG_PM */
4263