xref: /openbmc/linux/drivers/ata/libata-eh.c (revision 56d06fa2)
1 /*
2  *  libata-eh.c - libata error handling
3  *
4  *  Maintained by:  Tejun Heo <tj@kernel.org>
5  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6  *		    on emails.
7  *
8  *  Copyright 2006 Tejun Heo <htejun@gmail.com>
9  *
10  *
11  *  This program is free software; you can redistribute it and/or
12  *  modify it under the terms of the GNU General Public License as
13  *  published by the Free Software Foundation; either version 2, or
14  *  (at your option) any later version.
15  *
16  *  This program is distributed in the hope that it will be useful,
17  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
18  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  *  General Public License for more details.
20  *
21  *  You should have received a copy of the GNU General Public License
22  *  along with this program; see the file COPYING.  If not, write to
23  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
24  *  USA.
25  *
26  *
27  *  libata documentation is available via 'make {ps|pdf}docs',
28  *  as Documentation/DocBook/libata.*
29  *
30  *  Hardware documentation available from http://www.t13.org/ and
31  *  http://www.sata-io.org/
32  *
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/blkdev.h>
37 #include <linux/export.h>
38 #include <linux/pci.h>
39 #include <scsi/scsi.h>
40 #include <scsi/scsi_host.h>
41 #include <scsi/scsi_eh.h>
42 #include <scsi/scsi_device.h>
43 #include <scsi/scsi_cmnd.h>
44 #include <scsi/scsi_dbg.h>
45 #include "../scsi/scsi_transport_api.h"
46 
47 #include <linux/libata.h>
48 
49 #include <trace/events/libata.h>
50 #include "libata.h"
51 
52 enum {
53 	/* speed down verdicts */
54 	ATA_EH_SPDN_NCQ_OFF		= (1 << 0),
55 	ATA_EH_SPDN_SPEED_DOWN		= (1 << 1),
56 	ATA_EH_SPDN_FALLBACK_TO_PIO	= (1 << 2),
57 	ATA_EH_SPDN_KEEP_ERRORS		= (1 << 3),
58 
59 	/* error flags */
60 	ATA_EFLAG_IS_IO			= (1 << 0),
61 	ATA_EFLAG_DUBIOUS_XFER		= (1 << 1),
62 	ATA_EFLAG_OLD_ER                = (1 << 31),
63 
64 	/* error categories */
65 	ATA_ECAT_NONE			= 0,
66 	ATA_ECAT_ATA_BUS		= 1,
67 	ATA_ECAT_TOUT_HSM		= 2,
68 	ATA_ECAT_UNK_DEV		= 3,
69 	ATA_ECAT_DUBIOUS_NONE		= 4,
70 	ATA_ECAT_DUBIOUS_ATA_BUS	= 5,
71 	ATA_ECAT_DUBIOUS_TOUT_HSM	= 6,
72 	ATA_ECAT_DUBIOUS_UNK_DEV	= 7,
73 	ATA_ECAT_NR			= 8,
74 
75 	ATA_EH_CMD_DFL_TIMEOUT		=  5000,
76 
77 	/* always put at least this amount of time between resets */
78 	ATA_EH_RESET_COOL_DOWN		=  5000,
79 
80 	/* Waiting in ->prereset can never be reliable.  It's
81 	 * sometimes nice to wait there but it can't be depended upon;
82 	 * otherwise, we wouldn't be resetting.  Just give it enough
83 	 * time for most drives to spin up.
84 	 */
85 	ATA_EH_PRERESET_TIMEOUT		= 10000,
86 	ATA_EH_FASTDRAIN_INTERVAL	=  3000,
87 
88 	ATA_EH_UA_TRIES			= 5,
89 
90 	/* probe speed down parameters, see ata_eh_schedule_probe() */
91 	ATA_EH_PROBE_TRIAL_INTERVAL	= 60000,	/* 1 min */
92 	ATA_EH_PROBE_TRIALS		= 2,
93 };
94 
95 /* The following table determines how we sequence resets.  Each entry
96  * represents timeout for that try.  The first try can be soft or
97  * hardreset.  All others are hardreset if available.  In most cases
98  * the first reset w/ 10sec timeout should succeed.  Following entries
99  * are mostly for error handling, hotplug and those outlier devices that
100  * take an exceptionally long time to recover from reset.
101  */
102 static const unsigned long ata_eh_reset_timeouts[] = {
103 	10000,	/* most drives spin up by 10sec */
104 	10000,	/* > 99% working drives spin up before 20sec */
105 	35000,	/* give > 30 secs of idleness for outlier devices */
106 	 5000,	/* and sweet one last chance */
107 	ULONG_MAX, /* > 1 min has elapsed, give up */
108 };
109 
110 static const unsigned long ata_eh_identify_timeouts[] = {
111 	 5000,	/* covers > 99% of successes and not too boring on failures */
112 	10000,  /* combined time till here is enough even for media access */
113 	30000,	/* for true idiots */
114 	ULONG_MAX,
115 };
116 
117 static const unsigned long ata_eh_flush_timeouts[] = {
118 	15000,	/* be generous with flush */
119 	15000,  /* ditto */
120 	30000,	/* and even more generous */
121 	ULONG_MAX,
122 };
123 
124 static const unsigned long ata_eh_other_timeouts[] = {
125 	 5000,	/* same rationale as identify timeout */
126 	10000,	/* ditto */
127 	/* but no merciful 30sec for other commands, it just isn't worth it */
128 	ULONG_MAX,
129 };
130 
131 struct ata_eh_cmd_timeout_ent {
132 	const u8		*commands;
133 	const unsigned long	*timeouts;
134 };
135 
136 /* The following table determines timeouts to use for EH internal
137  * commands.  Each table entry is a command class and matches the
138  * commands the entry applies to and the timeout table to use.
139  *
140  * On the retry after a command timed out, the next timeout value from
141  * the table is used.  If the table doesn't contain further entries,
142  * the last value is used.
143  *
144  * ehc->cmd_timeout_idx keeps track of which timeout to use per
145  * command class, so if SET_FEATURES times out on the first try, the
146  * next try will use the second timeout value only for that class.
147  */
148 #define CMDS(cmds...)	(const u8 []){ cmds, 0 }
149 static const struct ata_eh_cmd_timeout_ent
150 ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
151 	{ .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI),
152 	  .timeouts = ata_eh_identify_timeouts, },
153 	{ .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT),
154 	  .timeouts = ata_eh_other_timeouts, },
155 	{ .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT),
156 	  .timeouts = ata_eh_other_timeouts, },
157 	{ .commands = CMDS(ATA_CMD_SET_FEATURES),
158 	  .timeouts = ata_eh_other_timeouts, },
159 	{ .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS),
160 	  .timeouts = ata_eh_other_timeouts, },
161 	{ .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT),
162 	  .timeouts = ata_eh_flush_timeouts },
163 };
164 #undef CMDS
165 
166 static void __ata_port_freeze(struct ata_port *ap);
167 #ifdef CONFIG_PM
168 static void ata_eh_handle_port_suspend(struct ata_port *ap);
169 static void ata_eh_handle_port_resume(struct ata_port *ap);
170 #else /* CONFIG_PM */
171 static void ata_eh_handle_port_suspend(struct ata_port *ap)
172 { }
173 
174 static void ata_eh_handle_port_resume(struct ata_port *ap)
175 { }
176 #endif /* CONFIG_PM */
177 
178 static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt,
179 				 va_list args)
180 {
181 	ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
182 				     ATA_EH_DESC_LEN - ehi->desc_len,
183 				     fmt, args);
184 }
185 
186 /**
187  *	__ata_ehi_push_desc - push error description without adding separator
188  *	@ehi: target EHI
189  *	@fmt: printf format string
190  *
191  *	Format string according to @fmt and append it to @ehi->desc.
192  *
193  *	LOCKING:
194  *	spin_lock_irqsave(host lock)
195  */
196 void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
197 {
198 	va_list args;
199 
200 	va_start(args, fmt);
201 	__ata_ehi_pushv_desc(ehi, fmt, args);
202 	va_end(args);
203 }
204 
205 /**
206  *	ata_ehi_push_desc - push error description with separator
207  *	@ehi: target EHI
208  *	@fmt: printf format string
209  *
210  *	Format string according to @fmt and append it to @ehi->desc.
211  *	If @ehi->desc is not empty, ", " is added in-between.
212  *
213  *	LOCKING:
214  *	spin_lock_irqsave(host lock)
215  */
216 void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
217 {
218 	va_list args;
219 
220 	if (ehi->desc_len)
221 		__ata_ehi_push_desc(ehi, ", ");
222 
223 	va_start(args, fmt);
224 	__ata_ehi_pushv_desc(ehi, fmt, args);
225 	va_end(args);
226 }
227 
228 /**
229  *	ata_ehi_clear_desc - clean error description
230  *	@ehi: target EHI
231  *
232  *	Clear @ehi->desc.
233  *
234  *	LOCKING:
235  *	spin_lock_irqsave(host lock)
236  */
237 void ata_ehi_clear_desc(struct ata_eh_info *ehi)
238 {
239 	ehi->desc[0] = '\0';
240 	ehi->desc_len = 0;
241 }
242 
243 /**
244  *	ata_port_desc - append port description
245  *	@ap: target ATA port
246  *	@fmt: printf format string
247  *
248  *	Format string according to @fmt and append it to port
249  *	description.  If port description is not empty, " " is added
250  *	in-between.  This function is to be used while initializing
251  *	ata_host.  The description is printed on host registration.
252  *
253  *	LOCKING:
254  *	None.
255  */
256 void ata_port_desc(struct ata_port *ap, const char *fmt, ...)
257 {
258 	va_list args;
259 
260 	WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING));
261 
262 	if (ap->link.eh_info.desc_len)
263 		__ata_ehi_push_desc(&ap->link.eh_info, " ");
264 
265 	va_start(args, fmt);
266 	__ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args);
267 	va_end(args);
268 }
269 
270 #ifdef CONFIG_PCI
271 
272 /**
273  *	ata_port_pbar_desc - append PCI BAR description
274  *	@ap: target ATA port
275  *	@bar: target PCI BAR
276  *	@offset: offset into PCI BAR
277  *	@name: name of the area
278  *
279  *	If @offset is negative, this function formats a string which
280  *	contains the name, address, size and type of the BAR and
281  *	appends it to the port description.  If @offset is zero or
282  *	positive, only name and offsetted address is appended.
283  *
284  *	LOCKING:
285  *	None.
286  */
287 void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
288 			const char *name)
289 {
290 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
291 	char *type = "";
292 	unsigned long long start, len;
293 
294 	if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
295 		type = "m";
296 	else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
297 		type = "i";
298 
299 	start = (unsigned long long)pci_resource_start(pdev, bar);
300 	len = (unsigned long long)pci_resource_len(pdev, bar);
301 
302 	if (offset < 0)
303 		ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start);
304 	else
305 		ata_port_desc(ap, "%s 0x%llx", name,
306 				start + (unsigned long long)offset);
307 }
308 
309 #endif /* CONFIG_PCI */
310 
311 static int ata_lookup_timeout_table(u8 cmd)
312 {
313 	int i;
314 
315 	for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) {
316 		const u8 *cur;
317 
318 		for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++)
319 			if (*cur == cmd)
320 				return i;
321 	}
322 
323 	return -1;
324 }
325 
326 /**
327  *	ata_internal_cmd_timeout - determine timeout for an internal command
328  *	@dev: target device
329  *	@cmd: internal command to be issued
330  *
331  *	Determine timeout for internal command @cmd for @dev.
332  *
333  *	LOCKING:
334  *	EH context.
335  *
336  *	RETURNS:
337  *	Determined timeout.
338  */
339 unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd)
340 {
341 	struct ata_eh_context *ehc = &dev->link->eh_context;
342 	int ent = ata_lookup_timeout_table(cmd);
343 	int idx;
344 
345 	if (ent < 0)
346 		return ATA_EH_CMD_DFL_TIMEOUT;
347 
348 	idx = ehc->cmd_timeout_idx[dev->devno][ent];
349 	return ata_eh_cmd_timeout_table[ent].timeouts[idx];
350 }
351 
352 /**
353  *	ata_internal_cmd_timed_out - notification for internal command timeout
354  *	@dev: target device
355  *	@cmd: internal command which timed out
356  *
357  *	Notify EH that internal command @cmd for @dev timed out.  This
358  *	function should be called only for commands whose timeouts are
359  *	determined using ata_internal_cmd_timeout().
360  *
361  *	LOCKING:
362  *	EH context.
363  */
364 void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd)
365 {
366 	struct ata_eh_context *ehc = &dev->link->eh_context;
367 	int ent = ata_lookup_timeout_table(cmd);
368 	int idx;
369 
370 	if (ent < 0)
371 		return;
372 
373 	idx = ehc->cmd_timeout_idx[dev->devno][ent];
374 	if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX)
375 		ehc->cmd_timeout_idx[dev->devno][ent]++;
376 }
377 
378 static void ata_ering_record(struct ata_ering *ering, unsigned int eflags,
379 			     unsigned int err_mask)
380 {
381 	struct ata_ering_entry *ent;
382 
383 	WARN_ON(!err_mask);
384 
385 	ering->cursor++;
386 	ering->cursor %= ATA_ERING_SIZE;
387 
388 	ent = &ering->ring[ering->cursor];
389 	ent->eflags = eflags;
390 	ent->err_mask = err_mask;
391 	ent->timestamp = get_jiffies_64();
392 }
393 
394 static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering)
395 {
396 	struct ata_ering_entry *ent = &ering->ring[ering->cursor];
397 
398 	if (ent->err_mask)
399 		return ent;
400 	return NULL;
401 }
402 
403 int ata_ering_map(struct ata_ering *ering,
404 		  int (*map_fn)(struct ata_ering_entry *, void *),
405 		  void *arg)
406 {
407 	int idx, rc = 0;
408 	struct ata_ering_entry *ent;
409 
410 	idx = ering->cursor;
411 	do {
412 		ent = &ering->ring[idx];
413 		if (!ent->err_mask)
414 			break;
415 		rc = map_fn(ent, arg);
416 		if (rc)
417 			break;
418 		idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
419 	} while (idx != ering->cursor);
420 
421 	return rc;
422 }
423 
424 static int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg)
425 {
426 	ent->eflags |= ATA_EFLAG_OLD_ER;
427 	return 0;
428 }
429 
430 static void ata_ering_clear(struct ata_ering *ering)
431 {
432 	ata_ering_map(ering, ata_ering_clear_cb, NULL);
433 }
434 
435 static unsigned int ata_eh_dev_action(struct ata_device *dev)
436 {
437 	struct ata_eh_context *ehc = &dev->link->eh_context;
438 
439 	return ehc->i.action | ehc->i.dev_action[dev->devno];
440 }
441 
442 static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
443 				struct ata_eh_info *ehi, unsigned int action)
444 {
445 	struct ata_device *tdev;
446 
447 	if (!dev) {
448 		ehi->action &= ~action;
449 		ata_for_each_dev(tdev, link, ALL)
450 			ehi->dev_action[tdev->devno] &= ~action;
451 	} else {
452 		/* doesn't make sense for port-wide EH actions */
453 		WARN_ON(!(action & ATA_EH_PERDEV_MASK));
454 
455 		/* break ehi->action into ehi->dev_action */
456 		if (ehi->action & action) {
457 			ata_for_each_dev(tdev, link, ALL)
458 				ehi->dev_action[tdev->devno] |=
459 					ehi->action & action;
460 			ehi->action &= ~action;
461 		}
462 
463 		/* turn off the specified per-dev action */
464 		ehi->dev_action[dev->devno] &= ~action;
465 	}
466 }
467 
468 /**
469  *	ata_eh_acquire - acquire EH ownership
470  *	@ap: ATA port to acquire EH ownership for
471  *
472  *	Acquire EH ownership for @ap.  This is the basic exclusion
473  *	mechanism for ports sharing a host.  Only one port hanging off
474  *	the same host can claim the ownership of EH.
475  *
476  *	LOCKING:
477  *	EH context.
478  */
479 void ata_eh_acquire(struct ata_port *ap)
480 {
481 	mutex_lock(&ap->host->eh_mutex);
482 	WARN_ON_ONCE(ap->host->eh_owner);
483 	ap->host->eh_owner = current;
484 }
485 
486 /**
487  *	ata_eh_release - release EH ownership
488  *	@ap: ATA port to release EH ownership for
489  *
490  *	Release EH ownership for @ap if the caller.  The caller must
491  *	have acquired EH ownership using ata_eh_acquire() previously.
492  *
493  *	LOCKING:
494  *	EH context.
495  */
496 void ata_eh_release(struct ata_port *ap)
497 {
498 	WARN_ON_ONCE(ap->host->eh_owner != current);
499 	ap->host->eh_owner = NULL;
500 	mutex_unlock(&ap->host->eh_mutex);
501 }
502 
503 /**
504  *	ata_scsi_timed_out - SCSI layer time out callback
505  *	@cmd: timed out SCSI command
506  *
507  *	Handles SCSI layer timeout.  We race with normal completion of
508  *	the qc for @cmd.  If the qc is already gone, we lose and let
509  *	the scsi command finish (EH_HANDLED).  Otherwise, the qc has
510  *	timed out and EH should be invoked.  Prevent ata_qc_complete()
511  *	from finishing it by setting EH_SCHEDULED and return
512  *	EH_NOT_HANDLED.
513  *
514  *	TODO: kill this function once old EH is gone.
515  *
516  *	LOCKING:
517  *	Called from timer context
518  *
519  *	RETURNS:
520  *	EH_HANDLED or EH_NOT_HANDLED
521  */
522 enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
523 {
524 	struct Scsi_Host *host = cmd->device->host;
525 	struct ata_port *ap = ata_shost_to_port(host);
526 	unsigned long flags;
527 	struct ata_queued_cmd *qc;
528 	enum blk_eh_timer_return ret;
529 
530 	DPRINTK("ENTER\n");
531 
532 	if (ap->ops->error_handler) {
533 		ret = BLK_EH_NOT_HANDLED;
534 		goto out;
535 	}
536 
537 	ret = BLK_EH_HANDLED;
538 	spin_lock_irqsave(ap->lock, flags);
539 	qc = ata_qc_from_tag(ap, ap->link.active_tag);
540 	if (qc) {
541 		WARN_ON(qc->scsicmd != cmd);
542 		qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
543 		qc->err_mask |= AC_ERR_TIMEOUT;
544 		ret = BLK_EH_NOT_HANDLED;
545 	}
546 	spin_unlock_irqrestore(ap->lock, flags);
547 
548  out:
549 	DPRINTK("EXIT, ret=%d\n", ret);
550 	return ret;
551 }
552 
553 static void ata_eh_unload(struct ata_port *ap)
554 {
555 	struct ata_link *link;
556 	struct ata_device *dev;
557 	unsigned long flags;
558 
559 	/* Restore SControl IPM and SPD for the next driver and
560 	 * disable attached devices.
561 	 */
562 	ata_for_each_link(link, ap, PMP_FIRST) {
563 		sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0);
564 		ata_for_each_dev(dev, link, ALL)
565 			ata_dev_disable(dev);
566 	}
567 
568 	/* freeze and set UNLOADED */
569 	spin_lock_irqsave(ap->lock, flags);
570 
571 	ata_port_freeze(ap);			/* won't be thawed */
572 	ap->pflags &= ~ATA_PFLAG_EH_PENDING;	/* clear pending from freeze */
573 	ap->pflags |= ATA_PFLAG_UNLOADED;
574 
575 	spin_unlock_irqrestore(ap->lock, flags);
576 }
577 
578 /**
579  *	ata_scsi_error - SCSI layer error handler callback
580  *	@host: SCSI host on which error occurred
581  *
582  *	Handles SCSI-layer-thrown error events.
583  *
584  *	LOCKING:
585  *	Inherited from SCSI layer (none, can sleep)
586  *
587  *	RETURNS:
588  *	Zero.
589  */
590 void ata_scsi_error(struct Scsi_Host *host)
591 {
592 	struct ata_port *ap = ata_shost_to_port(host);
593 	unsigned long flags;
594 	LIST_HEAD(eh_work_q);
595 
596 	DPRINTK("ENTER\n");
597 
598 	spin_lock_irqsave(host->host_lock, flags);
599 	list_splice_init(&host->eh_cmd_q, &eh_work_q);
600 	spin_unlock_irqrestore(host->host_lock, flags);
601 
602 	ata_scsi_cmd_error_handler(host, ap, &eh_work_q);
603 
604 	/* If we timed raced normal completion and there is nothing to
605 	   recover nr_timedout == 0 why exactly are we doing error recovery ? */
606 	ata_scsi_port_error_handler(host, ap);
607 
608 	/* finish or retry handled scmd's and clean up */
609 	WARN_ON(host->host_failed || !list_empty(&eh_work_q));
610 
611 	DPRINTK("EXIT\n");
612 }
613 
614 /**
615  * ata_scsi_cmd_error_handler - error callback for a list of commands
616  * @host:	scsi host containing the port
617  * @ap:		ATA port within the host
618  * @eh_work_q:	list of commands to process
619  *
620  * process the given list of commands and return those finished to the
621  * ap->eh_done_q.  This function is the first part of the libata error
622  * handler which processes a given list of failed commands.
623  */
624 void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
625 				struct list_head *eh_work_q)
626 {
627 	int i;
628 	unsigned long flags;
629 
630 	/* make sure sff pio task is not running */
631 	ata_sff_flush_pio_task(ap);
632 
633 	/* synchronize with host lock and sort out timeouts */
634 
635 	/* For new EH, all qcs are finished in one of three ways -
636 	 * normal completion, error completion, and SCSI timeout.
637 	 * Both completions can race against SCSI timeout.  When normal
638 	 * completion wins, the qc never reaches EH.  When error
639 	 * completion wins, the qc has ATA_QCFLAG_FAILED set.
640 	 *
641 	 * When SCSI timeout wins, things are a bit more complex.
642 	 * Normal or error completion can occur after the timeout but
643 	 * before this point.  In such cases, both types of
644 	 * completions are honored.  A scmd is determined to have
645 	 * timed out iff its associated qc is active and not failed.
646 	 */
647 	if (ap->ops->error_handler) {
648 		struct scsi_cmnd *scmd, *tmp;
649 		int nr_timedout = 0;
650 
651 		spin_lock_irqsave(ap->lock, flags);
652 
653 		/* This must occur under the ap->lock as we don't want
654 		   a polled recovery to race the real interrupt handler
655 
656 		   The lost_interrupt handler checks for any completed but
657 		   non-notified command and completes much like an IRQ handler.
658 
659 		   We then fall into the error recovery code which will treat
660 		   this as if normal completion won the race */
661 
662 		if (ap->ops->lost_interrupt)
663 			ap->ops->lost_interrupt(ap);
664 
665 		list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) {
666 			struct ata_queued_cmd *qc;
667 
668 			for (i = 0; i < ATA_MAX_QUEUE; i++) {
669 				qc = __ata_qc_from_tag(ap, i);
670 				if (qc->flags & ATA_QCFLAG_ACTIVE &&
671 				    qc->scsicmd == scmd)
672 					break;
673 			}
674 
675 			if (i < ATA_MAX_QUEUE) {
676 				/* the scmd has an associated qc */
677 				if (!(qc->flags & ATA_QCFLAG_FAILED)) {
678 					/* which hasn't failed yet, timeout */
679 					qc->err_mask |= AC_ERR_TIMEOUT;
680 					qc->flags |= ATA_QCFLAG_FAILED;
681 					nr_timedout++;
682 				}
683 			} else {
684 				/* Normal completion occurred after
685 				 * SCSI timeout but before this point.
686 				 * Successfully complete it.
687 				 */
688 				scmd->retries = scmd->allowed;
689 				scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
690 			}
691 		}
692 
693 		/* If we have timed out qcs.  They belong to EH from
694 		 * this point but the state of the controller is
695 		 * unknown.  Freeze the port to make sure the IRQ
696 		 * handler doesn't diddle with those qcs.  This must
697 		 * be done atomically w.r.t. setting QCFLAG_FAILED.
698 		 */
699 		if (nr_timedout)
700 			__ata_port_freeze(ap);
701 
702 		spin_unlock_irqrestore(ap->lock, flags);
703 
704 		/* initialize eh_tries */
705 		ap->eh_tries = ATA_EH_MAX_TRIES;
706 	} else
707 		spin_unlock_wait(ap->lock);
708 
709 }
710 EXPORT_SYMBOL(ata_scsi_cmd_error_handler);
711 
712 /**
713  * ata_scsi_port_error_handler - recover the port after the commands
714  * @host:	SCSI host containing the port
715  * @ap:		the ATA port
716  *
717  * Handle the recovery of the port @ap after all the commands
718  * have been recovered.
719  */
720 void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
721 {
722 	unsigned long flags;
723 
724 	/* invoke error handler */
725 	if (ap->ops->error_handler) {
726 		struct ata_link *link;
727 
728 		/* acquire EH ownership */
729 		ata_eh_acquire(ap);
730  repeat:
731 		/* kill fast drain timer */
732 		del_timer_sync(&ap->fastdrain_timer);
733 
734 		/* process port resume request */
735 		ata_eh_handle_port_resume(ap);
736 
737 		/* fetch & clear EH info */
738 		spin_lock_irqsave(ap->lock, flags);
739 
740 		ata_for_each_link(link, ap, HOST_FIRST) {
741 			struct ata_eh_context *ehc = &link->eh_context;
742 			struct ata_device *dev;
743 
744 			memset(&link->eh_context, 0, sizeof(link->eh_context));
745 			link->eh_context.i = link->eh_info;
746 			memset(&link->eh_info, 0, sizeof(link->eh_info));
747 
748 			ata_for_each_dev(dev, link, ENABLED) {
749 				int devno = dev->devno;
750 
751 				ehc->saved_xfer_mode[devno] = dev->xfer_mode;
752 				if (ata_ncq_enabled(dev))
753 					ehc->saved_ncq_enabled |= 1 << devno;
754 			}
755 		}
756 
757 		ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
758 		ap->pflags &= ~ATA_PFLAG_EH_PENDING;
759 		ap->excl_link = NULL;	/* don't maintain exclusion over EH */
760 
761 		spin_unlock_irqrestore(ap->lock, flags);
762 
763 		/* invoke EH, skip if unloading or suspended */
764 		if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
765 			ap->ops->error_handler(ap);
766 		else {
767 			/* if unloading, commence suicide */
768 			if ((ap->pflags & ATA_PFLAG_UNLOADING) &&
769 			    !(ap->pflags & ATA_PFLAG_UNLOADED))
770 				ata_eh_unload(ap);
771 			ata_eh_finish(ap);
772 		}
773 
774 		/* process port suspend request */
775 		ata_eh_handle_port_suspend(ap);
776 
777 		/* Exception might have happened after ->error_handler
778 		 * recovered the port but before this point.  Repeat
779 		 * EH in such case.
780 		 */
781 		spin_lock_irqsave(ap->lock, flags);
782 
783 		if (ap->pflags & ATA_PFLAG_EH_PENDING) {
784 			if (--ap->eh_tries) {
785 				spin_unlock_irqrestore(ap->lock, flags);
786 				goto repeat;
787 			}
788 			ata_port_err(ap,
789 				     "EH pending after %d tries, giving up\n",
790 				     ATA_EH_MAX_TRIES);
791 			ap->pflags &= ~ATA_PFLAG_EH_PENDING;
792 		}
793 
794 		/* this run is complete, make sure EH info is clear */
795 		ata_for_each_link(link, ap, HOST_FIRST)
796 			memset(&link->eh_info, 0, sizeof(link->eh_info));
797 
798 		/* end eh (clear host_eh_scheduled) while holding
799 		 * ap->lock such that if exception occurs after this
800 		 * point but before EH completion, SCSI midlayer will
801 		 * re-initiate EH.
802 		 */
803 		ap->ops->end_eh(ap);
804 
805 		spin_unlock_irqrestore(ap->lock, flags);
806 		ata_eh_release(ap);
807 	} else {
808 		WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL);
809 		ap->ops->eng_timeout(ap);
810 	}
811 
812 	scsi_eh_flush_done_q(&ap->eh_done_q);
813 
814 	/* clean up */
815 	spin_lock_irqsave(ap->lock, flags);
816 
817 	if (ap->pflags & ATA_PFLAG_LOADING)
818 		ap->pflags &= ~ATA_PFLAG_LOADING;
819 	else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
820 		schedule_delayed_work(&ap->hotplug_task, 0);
821 
822 	if (ap->pflags & ATA_PFLAG_RECOVERED)
823 		ata_port_info(ap, "EH complete\n");
824 
825 	ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
826 
827 	/* tell wait_eh that we're done */
828 	ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
829 	wake_up_all(&ap->eh_wait_q);
830 
831 	spin_unlock_irqrestore(ap->lock, flags);
832 }
833 EXPORT_SYMBOL_GPL(ata_scsi_port_error_handler);
834 
835 /**
836  *	ata_port_wait_eh - Wait for the currently pending EH to complete
837  *	@ap: Port to wait EH for
838  *
839  *	Wait until the currently pending EH is complete.
840  *
841  *	LOCKING:
842  *	Kernel thread context (may sleep).
843  */
844 void ata_port_wait_eh(struct ata_port *ap)
845 {
846 	unsigned long flags;
847 	DEFINE_WAIT(wait);
848 
849  retry:
850 	spin_lock_irqsave(ap->lock, flags);
851 
852 	while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
853 		prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
854 		spin_unlock_irqrestore(ap->lock, flags);
855 		schedule();
856 		spin_lock_irqsave(ap->lock, flags);
857 	}
858 	finish_wait(&ap->eh_wait_q, &wait);
859 
860 	spin_unlock_irqrestore(ap->lock, flags);
861 
862 	/* make sure SCSI EH is complete */
863 	if (scsi_host_in_recovery(ap->scsi_host)) {
864 		ata_msleep(ap, 10);
865 		goto retry;
866 	}
867 }
868 EXPORT_SYMBOL_GPL(ata_port_wait_eh);
869 
870 static int ata_eh_nr_in_flight(struct ata_port *ap)
871 {
872 	unsigned int tag;
873 	int nr = 0;
874 
875 	/* count only non-internal commands */
876 	for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++)
877 		if (ata_qc_from_tag(ap, tag))
878 			nr++;
879 
880 	return nr;
881 }
882 
883 void ata_eh_fastdrain_timerfn(unsigned long arg)
884 {
885 	struct ata_port *ap = (void *)arg;
886 	unsigned long flags;
887 	int cnt;
888 
889 	spin_lock_irqsave(ap->lock, flags);
890 
891 	cnt = ata_eh_nr_in_flight(ap);
892 
893 	/* are we done? */
894 	if (!cnt)
895 		goto out_unlock;
896 
897 	if (cnt == ap->fastdrain_cnt) {
898 		unsigned int tag;
899 
900 		/* No progress during the last interval, tag all
901 		 * in-flight qcs as timed out and freeze the port.
902 		 */
903 		for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) {
904 			struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
905 			if (qc)
906 				qc->err_mask |= AC_ERR_TIMEOUT;
907 		}
908 
909 		ata_port_freeze(ap);
910 	} else {
911 		/* some qcs have finished, give it another chance */
912 		ap->fastdrain_cnt = cnt;
913 		ap->fastdrain_timer.expires =
914 			ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
915 		add_timer(&ap->fastdrain_timer);
916 	}
917 
918  out_unlock:
919 	spin_unlock_irqrestore(ap->lock, flags);
920 }
921 
922 /**
923  *	ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
924  *	@ap: target ATA port
925  *	@fastdrain: activate fast drain
926  *
927  *	Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain
928  *	is non-zero and EH wasn't pending before.  Fast drain ensures
929  *	that EH kicks in in timely manner.
930  *
931  *	LOCKING:
932  *	spin_lock_irqsave(host lock)
933  */
934 static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
935 {
936 	int cnt;
937 
938 	/* already scheduled? */
939 	if (ap->pflags & ATA_PFLAG_EH_PENDING)
940 		return;
941 
942 	ap->pflags |= ATA_PFLAG_EH_PENDING;
943 
944 	if (!fastdrain)
945 		return;
946 
947 	/* do we have in-flight qcs? */
948 	cnt = ata_eh_nr_in_flight(ap);
949 	if (!cnt)
950 		return;
951 
952 	/* activate fast drain */
953 	ap->fastdrain_cnt = cnt;
954 	ap->fastdrain_timer.expires =
955 		ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
956 	add_timer(&ap->fastdrain_timer);
957 }
958 
959 /**
960  *	ata_qc_schedule_eh - schedule qc for error handling
961  *	@qc: command to schedule error handling for
962  *
963  *	Schedule error handling for @qc.  EH will kick in as soon as
964  *	other commands are drained.
965  *
966  *	LOCKING:
967  *	spin_lock_irqsave(host lock)
968  */
969 void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
970 {
971 	struct ata_port *ap = qc->ap;
972 	struct request_queue *q = qc->scsicmd->device->request_queue;
973 	unsigned long flags;
974 
975 	WARN_ON(!ap->ops->error_handler);
976 
977 	qc->flags |= ATA_QCFLAG_FAILED;
978 	ata_eh_set_pending(ap, 1);
979 
980 	/* The following will fail if timeout has already expired.
981 	 * ata_scsi_error() takes care of such scmds on EH entry.
982 	 * Note that ATA_QCFLAG_FAILED is unconditionally set after
983 	 * this function completes.
984 	 */
985 	spin_lock_irqsave(q->queue_lock, flags);
986 	blk_abort_request(qc->scsicmd->request);
987 	spin_unlock_irqrestore(q->queue_lock, flags);
988 }
989 
990 /**
991  * ata_std_sched_eh - non-libsas ata_ports issue eh with this common routine
992  * @ap: ATA port to schedule EH for
993  *
994  *	LOCKING: inherited from ata_port_schedule_eh
995  *	spin_lock_irqsave(host lock)
996  */
997 void ata_std_sched_eh(struct ata_port *ap)
998 {
999 	WARN_ON(!ap->ops->error_handler);
1000 
1001 	if (ap->pflags & ATA_PFLAG_INITIALIZING)
1002 		return;
1003 
1004 	ata_eh_set_pending(ap, 1);
1005 	scsi_schedule_eh(ap->scsi_host);
1006 
1007 	DPRINTK("port EH scheduled\n");
1008 }
1009 EXPORT_SYMBOL_GPL(ata_std_sched_eh);
1010 
1011 /**
1012  * ata_std_end_eh - non-libsas ata_ports complete eh with this common routine
1013  * @ap: ATA port to end EH for
1014  *
1015  * In the libata object model there is a 1:1 mapping of ata_port to
1016  * shost, so host fields can be directly manipulated under ap->lock, in
1017  * the libsas case we need to hold a lock at the ha->level to coordinate
1018  * these events.
1019  *
1020  *	LOCKING:
1021  *	spin_lock_irqsave(host lock)
1022  */
1023 void ata_std_end_eh(struct ata_port *ap)
1024 {
1025 	struct Scsi_Host *host = ap->scsi_host;
1026 
1027 	host->host_eh_scheduled = 0;
1028 }
1029 EXPORT_SYMBOL(ata_std_end_eh);
1030 
1031 
1032 /**
1033  *	ata_port_schedule_eh - schedule error handling without a qc
1034  *	@ap: ATA port to schedule EH for
1035  *
1036  *	Schedule error handling for @ap.  EH will kick in as soon as
1037  *	all commands are drained.
1038  *
1039  *	LOCKING:
1040  *	spin_lock_irqsave(host lock)
1041  */
1042 void ata_port_schedule_eh(struct ata_port *ap)
1043 {
1044 	/* see: ata_std_sched_eh, unless you know better */
1045 	ap->ops->sched_eh(ap);
1046 }
1047 
1048 static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
1049 {
1050 	int tag, nr_aborted = 0;
1051 
1052 	WARN_ON(!ap->ops->error_handler);
1053 
1054 	/* we're gonna abort all commands, no need for fast drain */
1055 	ata_eh_set_pending(ap, 0);
1056 
1057 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1058 		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
1059 
1060 		if (qc && (!link || qc->dev->link == link)) {
1061 			qc->flags |= ATA_QCFLAG_FAILED;
1062 			ata_qc_complete(qc);
1063 			nr_aborted++;
1064 		}
1065 	}
1066 
1067 	if (!nr_aborted)
1068 		ata_port_schedule_eh(ap);
1069 
1070 	return nr_aborted;
1071 }
1072 
1073 /**
1074  *	ata_link_abort - abort all qc's on the link
1075  *	@link: ATA link to abort qc's for
1076  *
1077  *	Abort all active qc's active on @link and schedule EH.
1078  *
1079  *	LOCKING:
1080  *	spin_lock_irqsave(host lock)
1081  *
1082  *	RETURNS:
1083  *	Number of aborted qc's.
1084  */
1085 int ata_link_abort(struct ata_link *link)
1086 {
1087 	return ata_do_link_abort(link->ap, link);
1088 }
1089 
1090 /**
1091  *	ata_port_abort - abort all qc's on the port
1092  *	@ap: ATA port to abort qc's for
1093  *
1094  *	Abort all active qc's of @ap and schedule EH.
1095  *
1096  *	LOCKING:
1097  *	spin_lock_irqsave(host_set lock)
1098  *
1099  *	RETURNS:
1100  *	Number of aborted qc's.
1101  */
1102 int ata_port_abort(struct ata_port *ap)
1103 {
1104 	return ata_do_link_abort(ap, NULL);
1105 }
1106 
1107 /**
1108  *	__ata_port_freeze - freeze port
1109  *	@ap: ATA port to freeze
1110  *
1111  *	This function is called when HSM violation or some other
1112  *	condition disrupts normal operation of the port.  Frozen port
1113  *	is not allowed to perform any operation until the port is
1114  *	thawed, which usually follows a successful reset.
1115  *
1116  *	ap->ops->freeze() callback can be used for freezing the port
1117  *	hardware-wise (e.g. mask interrupt and stop DMA engine).  If a
1118  *	port cannot be frozen hardware-wise, the interrupt handler
1119  *	must ack and clear interrupts unconditionally while the port
1120  *	is frozen.
1121  *
1122  *	LOCKING:
1123  *	spin_lock_irqsave(host lock)
1124  */
1125 static void __ata_port_freeze(struct ata_port *ap)
1126 {
1127 	WARN_ON(!ap->ops->error_handler);
1128 
1129 	if (ap->ops->freeze)
1130 		ap->ops->freeze(ap);
1131 
1132 	ap->pflags |= ATA_PFLAG_FROZEN;
1133 
1134 	DPRINTK("ata%u port frozen\n", ap->print_id);
1135 }
1136 
1137 /**
1138  *	ata_port_freeze - abort & freeze port
1139  *	@ap: ATA port to freeze
1140  *
1141  *	Abort and freeze @ap.  The freeze operation must be called
1142  *	first, because some hardware requires special operations
1143  *	before the taskfile registers are accessible.
1144  *
1145  *	LOCKING:
1146  *	spin_lock_irqsave(host lock)
1147  *
1148  *	RETURNS:
1149  *	Number of aborted commands.
1150  */
1151 int ata_port_freeze(struct ata_port *ap)
1152 {
1153 	int nr_aborted;
1154 
1155 	WARN_ON(!ap->ops->error_handler);
1156 
1157 	__ata_port_freeze(ap);
1158 	nr_aborted = ata_port_abort(ap);
1159 
1160 	return nr_aborted;
1161 }
1162 
1163 /**
1164  *	sata_async_notification - SATA async notification handler
1165  *	@ap: ATA port where async notification is received
1166  *
1167  *	Handler to be called when async notification via SDB FIS is
1168  *	received.  This function schedules EH if necessary.
1169  *
1170  *	LOCKING:
1171  *	spin_lock_irqsave(host lock)
1172  *
1173  *	RETURNS:
1174  *	1 if EH is scheduled, 0 otherwise.
1175  */
1176 int sata_async_notification(struct ata_port *ap)
1177 {
1178 	u32 sntf;
1179 	int rc;
1180 
1181 	if (!(ap->flags & ATA_FLAG_AN))
1182 		return 0;
1183 
1184 	rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
1185 	if (rc == 0)
1186 		sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
1187 
1188 	if (!sata_pmp_attached(ap) || rc) {
1189 		/* PMP is not attached or SNTF is not available */
1190 		if (!sata_pmp_attached(ap)) {
1191 			/* PMP is not attached.  Check whether ATAPI
1192 			 * AN is configured.  If so, notify media
1193 			 * change.
1194 			 */
1195 			struct ata_device *dev = ap->link.device;
1196 
1197 			if ((dev->class == ATA_DEV_ATAPI) &&
1198 			    (dev->flags & ATA_DFLAG_AN))
1199 				ata_scsi_media_change_notify(dev);
1200 			return 0;
1201 		} else {
1202 			/* PMP is attached but SNTF is not available.
1203 			 * ATAPI async media change notification is
1204 			 * not used.  The PMP must be reporting PHY
1205 			 * status change, schedule EH.
1206 			 */
1207 			ata_port_schedule_eh(ap);
1208 			return 1;
1209 		}
1210 	} else {
1211 		/* PMP is attached and SNTF is available */
1212 		struct ata_link *link;
1213 
1214 		/* check and notify ATAPI AN */
1215 		ata_for_each_link(link, ap, EDGE) {
1216 			if (!(sntf & (1 << link->pmp)))
1217 				continue;
1218 
1219 			if ((link->device->class == ATA_DEV_ATAPI) &&
1220 			    (link->device->flags & ATA_DFLAG_AN))
1221 				ata_scsi_media_change_notify(link->device);
1222 		}
1223 
1224 		/* If PMP is reporting that PHY status of some
1225 		 * downstream ports has changed, schedule EH.
1226 		 */
1227 		if (sntf & (1 << SATA_PMP_CTRL_PORT)) {
1228 			ata_port_schedule_eh(ap);
1229 			return 1;
1230 		}
1231 
1232 		return 0;
1233 	}
1234 }
1235 
1236 /**
1237  *	ata_eh_freeze_port - EH helper to freeze port
1238  *	@ap: ATA port to freeze
1239  *
1240  *	Freeze @ap.
1241  *
1242  *	LOCKING:
1243  *	None.
1244  */
1245 void ata_eh_freeze_port(struct ata_port *ap)
1246 {
1247 	unsigned long flags;
1248 
1249 	if (!ap->ops->error_handler)
1250 		return;
1251 
1252 	spin_lock_irqsave(ap->lock, flags);
1253 	__ata_port_freeze(ap);
1254 	spin_unlock_irqrestore(ap->lock, flags);
1255 }
1256 
1257 /**
1258  *	ata_port_thaw_port - EH helper to thaw port
1259  *	@ap: ATA port to thaw
1260  *
1261  *	Thaw frozen port @ap.
1262  *
1263  *	LOCKING:
1264  *	None.
1265  */
1266 void ata_eh_thaw_port(struct ata_port *ap)
1267 {
1268 	unsigned long flags;
1269 
1270 	if (!ap->ops->error_handler)
1271 		return;
1272 
1273 	spin_lock_irqsave(ap->lock, flags);
1274 
1275 	ap->pflags &= ~ATA_PFLAG_FROZEN;
1276 
1277 	if (ap->ops->thaw)
1278 		ap->ops->thaw(ap);
1279 
1280 	spin_unlock_irqrestore(ap->lock, flags);
1281 
1282 	DPRINTK("ata%u port thawed\n", ap->print_id);
1283 }
1284 
1285 static void ata_eh_scsidone(struct scsi_cmnd *scmd)
1286 {
1287 	/* nada */
1288 }
1289 
1290 static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
1291 {
1292 	struct ata_port *ap = qc->ap;
1293 	struct scsi_cmnd *scmd = qc->scsicmd;
1294 	unsigned long flags;
1295 
1296 	spin_lock_irqsave(ap->lock, flags);
1297 	qc->scsidone = ata_eh_scsidone;
1298 	__ata_qc_complete(qc);
1299 	WARN_ON(ata_tag_valid(qc->tag));
1300 	spin_unlock_irqrestore(ap->lock, flags);
1301 
1302 	scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
1303 }
1304 
1305 /**
1306  *	ata_eh_qc_complete - Complete an active ATA command from EH
1307  *	@qc: Command to complete
1308  *
1309  *	Indicate to the mid and upper layers that an ATA command has
1310  *	completed.  To be used from EH.
1311  */
1312 void ata_eh_qc_complete(struct ata_queued_cmd *qc)
1313 {
1314 	struct scsi_cmnd *scmd = qc->scsicmd;
1315 	scmd->retries = scmd->allowed;
1316 	__ata_eh_qc_complete(qc);
1317 }
1318 
1319 /**
1320  *	ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
1321  *	@qc: Command to retry
1322  *
1323  *	Indicate to the mid and upper layers that an ATA command
1324  *	should be retried.  To be used from EH.
1325  *
1326  *	SCSI midlayer limits the number of retries to scmd->allowed.
1327  *	scmd->allowed is incremented for commands which get retried
1328  *	due to unrelated failures (qc->err_mask is zero).
1329  */
1330 void ata_eh_qc_retry(struct ata_queued_cmd *qc)
1331 {
1332 	struct scsi_cmnd *scmd = qc->scsicmd;
1333 	if (!qc->err_mask)
1334 		scmd->allowed++;
1335 	__ata_eh_qc_complete(qc);
1336 }
1337 
1338 /**
1339  *	ata_dev_disable - disable ATA device
1340  *	@dev: ATA device to disable
1341  *
1342  *	Disable @dev.
1343  *
1344  *	Locking:
1345  *	EH context.
1346  */
1347 void ata_dev_disable(struct ata_device *dev)
1348 {
1349 	if (!ata_dev_enabled(dev))
1350 		return;
1351 
1352 	if (ata_msg_drv(dev->link->ap))
1353 		ata_dev_warn(dev, "disabled\n");
1354 	ata_acpi_on_disable(dev);
1355 	ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET);
1356 	dev->class++;
1357 
1358 	/* From now till the next successful probe, ering is used to
1359 	 * track probe failures.  Clear accumulated device error info.
1360 	 */
1361 	ata_ering_clear(&dev->ering);
1362 }
1363 
1364 /**
1365  *	ata_eh_detach_dev - detach ATA device
1366  *	@dev: ATA device to detach
1367  *
1368  *	Detach @dev.
1369  *
1370  *	LOCKING:
1371  *	None.
1372  */
1373 void ata_eh_detach_dev(struct ata_device *dev)
1374 {
1375 	struct ata_link *link = dev->link;
1376 	struct ata_port *ap = link->ap;
1377 	struct ata_eh_context *ehc = &link->eh_context;
1378 	unsigned long flags;
1379 
1380 	ata_dev_disable(dev);
1381 
1382 	spin_lock_irqsave(ap->lock, flags);
1383 
1384 	dev->flags &= ~ATA_DFLAG_DETACH;
1385 
1386 	if (ata_scsi_offline_dev(dev)) {
1387 		dev->flags |= ATA_DFLAG_DETACHED;
1388 		ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
1389 	}
1390 
1391 	/* clear per-dev EH info */
1392 	ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK);
1393 	ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK);
1394 	ehc->saved_xfer_mode[dev->devno] = 0;
1395 	ehc->saved_ncq_enabled &= ~(1 << dev->devno);
1396 
1397 	spin_unlock_irqrestore(ap->lock, flags);
1398 }
1399 
1400 /**
1401  *	ata_eh_about_to_do - about to perform eh_action
1402  *	@link: target ATA link
1403  *	@dev: target ATA dev for per-dev action (can be NULL)
1404  *	@action: action about to be performed
1405  *
1406  *	Called just before performing EH actions to clear related bits
1407  *	in @link->eh_info such that eh actions are not unnecessarily
1408  *	repeated.
1409  *
1410  *	LOCKING:
1411  *	None.
1412  */
1413 void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
1414 			unsigned int action)
1415 {
1416 	struct ata_port *ap = link->ap;
1417 	struct ata_eh_info *ehi = &link->eh_info;
1418 	struct ata_eh_context *ehc = &link->eh_context;
1419 	unsigned long flags;
1420 
1421 	spin_lock_irqsave(ap->lock, flags);
1422 
1423 	ata_eh_clear_action(link, dev, ehi, action);
1424 
1425 	/* About to take EH action, set RECOVERED.  Ignore actions on
1426 	 * slave links as master will do them again.
1427 	 */
1428 	if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link)
1429 		ap->pflags |= ATA_PFLAG_RECOVERED;
1430 
1431 	spin_unlock_irqrestore(ap->lock, flags);
1432 }
1433 
1434 /**
1435  *	ata_eh_done - EH action complete
1436 *	@ap: target ATA port
1437  *	@dev: target ATA dev for per-dev action (can be NULL)
1438  *	@action: action just completed
1439  *
1440  *	Called right after performing EH actions to clear related bits
1441  *	in @link->eh_context.
1442  *
1443  *	LOCKING:
1444  *	None.
1445  */
1446 void ata_eh_done(struct ata_link *link, struct ata_device *dev,
1447 		 unsigned int action)
1448 {
1449 	struct ata_eh_context *ehc = &link->eh_context;
1450 
1451 	ata_eh_clear_action(link, dev, &ehc->i, action);
1452 }
1453 
1454 /**
1455  *	ata_err_string - convert err_mask to descriptive string
1456  *	@err_mask: error mask to convert to string
1457  *
1458  *	Convert @err_mask to descriptive string.  Errors are
1459  *	prioritized according to severity and only the most severe
1460  *	error is reported.
1461  *
1462  *	LOCKING:
1463  *	None.
1464  *
1465  *	RETURNS:
1466  *	Descriptive string for @err_mask
1467  */
1468 static const char *ata_err_string(unsigned int err_mask)
1469 {
1470 	if (err_mask & AC_ERR_HOST_BUS)
1471 		return "host bus error";
1472 	if (err_mask & AC_ERR_ATA_BUS)
1473 		return "ATA bus error";
1474 	if (err_mask & AC_ERR_TIMEOUT)
1475 		return "timeout";
1476 	if (err_mask & AC_ERR_HSM)
1477 		return "HSM violation";
1478 	if (err_mask & AC_ERR_SYSTEM)
1479 		return "internal error";
1480 	if (err_mask & AC_ERR_MEDIA)
1481 		return "media error";
1482 	if (err_mask & AC_ERR_INVALID)
1483 		return "invalid argument";
1484 	if (err_mask & AC_ERR_DEV)
1485 		return "device error";
1486 	return "unknown error";
1487 }
1488 
1489 /**
1490  *	ata_read_log_page - read a specific log page
1491  *	@dev: target device
1492  *	@log: log to read
1493  *	@page: page to read
1494  *	@buf: buffer to store read page
1495  *	@sectors: number of sectors to read
1496  *
1497  *	Read log page using READ_LOG_EXT command.
1498  *
1499  *	LOCKING:
1500  *	Kernel thread context (may sleep).
1501  *
1502  *	RETURNS:
1503  *	0 on success, AC_ERR_* mask otherwise.
1504  */
1505 unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
1506 			       u8 page, void *buf, unsigned int sectors)
1507 {
1508 	unsigned long ap_flags = dev->link->ap->flags;
1509 	struct ata_taskfile tf;
1510 	unsigned int err_mask;
1511 	bool dma = false;
1512 
1513 	DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page);
1514 
1515 	/*
1516 	 * Return error without actually issuing the command on controllers
1517 	 * which e.g. lockup on a read log page.
1518 	 */
1519 	if (ap_flags & ATA_FLAG_NO_LOG_PAGE)
1520 		return AC_ERR_DEV;
1521 
1522 retry:
1523 	ata_tf_init(dev, &tf);
1524 	if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) &&
1525 	    !(dev->horkage & ATA_HORKAGE_NO_NCQ_LOG)) {
1526 		tf.command = ATA_CMD_READ_LOG_DMA_EXT;
1527 		tf.protocol = ATA_PROT_DMA;
1528 		dma = true;
1529 	} else {
1530 		tf.command = ATA_CMD_READ_LOG_EXT;
1531 		tf.protocol = ATA_PROT_PIO;
1532 		dma = false;
1533 	}
1534 	tf.lbal = log;
1535 	tf.lbam = page;
1536 	tf.nsect = sectors;
1537 	tf.hob_nsect = sectors >> 8;
1538 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
1539 
1540 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1541 				     buf, sectors * ATA_SECT_SIZE, 0);
1542 
1543 	if (err_mask && dma) {
1544 		dev->horkage |= ATA_HORKAGE_NO_NCQ_LOG;
1545 		ata_dev_warn(dev, "READ LOG DMA EXT failed, trying unqueued\n");
1546 		goto retry;
1547 	}
1548 
1549 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
1550 	return err_mask;
1551 }
1552 
1553 /**
1554  *	ata_eh_read_log_10h - Read log page 10h for NCQ error details
1555  *	@dev: Device to read log page 10h from
1556  *	@tag: Resulting tag of the failed command
1557  *	@tf: Resulting taskfile registers of the failed command
1558  *
1559  *	Read log page 10h to obtain NCQ error details and clear error
1560  *	condition.
1561  *
1562  *	LOCKING:
1563  *	Kernel thread context (may sleep).
1564  *
1565  *	RETURNS:
1566  *	0 on success, -errno otherwise.
1567  */
1568 static int ata_eh_read_log_10h(struct ata_device *dev,
1569 			       int *tag, struct ata_taskfile *tf)
1570 {
1571 	u8 *buf = dev->link->ap->sector_buf;
1572 	unsigned int err_mask;
1573 	u8 csum;
1574 	int i;
1575 
1576 	err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, 0, buf, 1);
1577 	if (err_mask)
1578 		return -EIO;
1579 
1580 	csum = 0;
1581 	for (i = 0; i < ATA_SECT_SIZE; i++)
1582 		csum += buf[i];
1583 	if (csum)
1584 		ata_dev_warn(dev, "invalid checksum 0x%x on log page 10h\n",
1585 			     csum);
1586 
1587 	if (buf[0] & 0x80)
1588 		return -ENOENT;
1589 
1590 	*tag = buf[0] & 0x1f;
1591 
1592 	tf->command = buf[2];
1593 	tf->feature = buf[3];
1594 	tf->lbal = buf[4];
1595 	tf->lbam = buf[5];
1596 	tf->lbah = buf[6];
1597 	tf->device = buf[7];
1598 	tf->hob_lbal = buf[8];
1599 	tf->hob_lbam = buf[9];
1600 	tf->hob_lbah = buf[10];
1601 	tf->nsect = buf[12];
1602 	tf->hob_nsect = buf[13];
1603 
1604 	return 0;
1605 }
1606 
1607 /**
1608  *	atapi_eh_tur - perform ATAPI TEST_UNIT_READY
1609  *	@dev: target ATAPI device
1610  *	@r_sense_key: out parameter for sense_key
1611  *
1612  *	Perform ATAPI TEST_UNIT_READY.
1613  *
1614  *	LOCKING:
1615  *	EH context (may sleep).
1616  *
1617  *	RETURNS:
1618  *	0 on success, AC_ERR_* mask on failure.
1619  */
1620 unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
1621 {
1622 	u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 };
1623 	struct ata_taskfile tf;
1624 	unsigned int err_mask;
1625 
1626 	ata_tf_init(dev, &tf);
1627 
1628 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1629 	tf.command = ATA_CMD_PACKET;
1630 	tf.protocol = ATAPI_PROT_NODATA;
1631 
1632 	err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
1633 	if (err_mask == AC_ERR_DEV)
1634 		*r_sense_key = tf.feature >> 4;
1635 	return err_mask;
1636 }
1637 
1638 /**
1639  *	atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1640  *	@dev: device to perform REQUEST_SENSE to
1641  *	@sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
1642  *	@dfl_sense_key: default sense key to use
1643  *
1644  *	Perform ATAPI REQUEST_SENSE after the device reported CHECK
1645  *	SENSE.  This function is EH helper.
1646  *
1647  *	LOCKING:
1648  *	Kernel thread context (may sleep).
1649  *
1650  *	RETURNS:
1651  *	0 on success, AC_ERR_* mask on failure
1652  */
1653 unsigned int atapi_eh_request_sense(struct ata_device *dev,
1654 					   u8 *sense_buf, u8 dfl_sense_key)
1655 {
1656 	u8 cdb[ATAPI_CDB_LEN] =
1657 		{ REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 };
1658 	struct ata_port *ap = dev->link->ap;
1659 	struct ata_taskfile tf;
1660 
1661 	DPRINTK("ATAPI request sense\n");
1662 
1663 	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
1664 
1665 	/* initialize sense_buf with the error register,
1666 	 * for the case where they are -not- overwritten
1667 	 */
1668 	sense_buf[0] = 0x70;
1669 	sense_buf[2] = dfl_sense_key;
1670 
1671 	/* some devices time out if garbage left in tf */
1672 	ata_tf_init(dev, &tf);
1673 
1674 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1675 	tf.command = ATA_CMD_PACKET;
1676 
1677 	/* is it pointless to prefer PIO for "safety reasons"? */
1678 	if (ap->flags & ATA_FLAG_PIO_DMA) {
1679 		tf.protocol = ATAPI_PROT_DMA;
1680 		tf.feature |= ATAPI_PKT_DMA;
1681 	} else {
1682 		tf.protocol = ATAPI_PROT_PIO;
1683 		tf.lbam = SCSI_SENSE_BUFFERSIZE;
1684 		tf.lbah = 0;
1685 	}
1686 
1687 	return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
1688 				 sense_buf, SCSI_SENSE_BUFFERSIZE, 0);
1689 }
1690 
1691 /**
1692  *	ata_eh_analyze_serror - analyze SError for a failed port
1693  *	@link: ATA link to analyze SError for
1694  *
1695  *	Analyze SError if available and further determine cause of
1696  *	failure.
1697  *
1698  *	LOCKING:
1699  *	None.
1700  */
1701 static void ata_eh_analyze_serror(struct ata_link *link)
1702 {
1703 	struct ata_eh_context *ehc = &link->eh_context;
1704 	u32 serror = ehc->i.serror;
1705 	unsigned int err_mask = 0, action = 0;
1706 	u32 hotplug_mask;
1707 
1708 	if (serror & (SERR_PERSISTENT | SERR_DATA)) {
1709 		err_mask |= AC_ERR_ATA_BUS;
1710 		action |= ATA_EH_RESET;
1711 	}
1712 	if (serror & SERR_PROTOCOL) {
1713 		err_mask |= AC_ERR_HSM;
1714 		action |= ATA_EH_RESET;
1715 	}
1716 	if (serror & SERR_INTERNAL) {
1717 		err_mask |= AC_ERR_SYSTEM;
1718 		action |= ATA_EH_RESET;
1719 	}
1720 
1721 	/* Determine whether a hotplug event has occurred.  Both
1722 	 * SError.N/X are considered hotplug events for enabled or
1723 	 * host links.  For disabled PMP links, only N bit is
1724 	 * considered as X bit is left at 1 for link plugging.
1725 	 */
1726 	if (link->lpm_policy > ATA_LPM_MAX_POWER)
1727 		hotplug_mask = 0;	/* hotplug doesn't work w/ LPM */
1728 	else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
1729 		hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG;
1730 	else
1731 		hotplug_mask = SERR_PHYRDY_CHG;
1732 
1733 	if (serror & hotplug_mask)
1734 		ata_ehi_hotplugged(&ehc->i);
1735 
1736 	ehc->i.err_mask |= err_mask;
1737 	ehc->i.action |= action;
1738 }
1739 
1740 /**
1741  *	ata_eh_analyze_ncq_error - analyze NCQ error
1742  *	@link: ATA link to analyze NCQ error for
1743  *
1744  *	Read log page 10h, determine the offending qc and acquire
1745  *	error status TF.  For NCQ device errors, all LLDDs have to do
1746  *	is setting AC_ERR_DEV in ehi->err_mask.  This function takes
1747  *	care of the rest.
1748  *
1749  *	LOCKING:
1750  *	Kernel thread context (may sleep).
1751  */
1752 void ata_eh_analyze_ncq_error(struct ata_link *link)
1753 {
1754 	struct ata_port *ap = link->ap;
1755 	struct ata_eh_context *ehc = &link->eh_context;
1756 	struct ata_device *dev = link->device;
1757 	struct ata_queued_cmd *qc;
1758 	struct ata_taskfile tf;
1759 	int tag, rc;
1760 
1761 	/* if frozen, we can't do much */
1762 	if (ap->pflags & ATA_PFLAG_FROZEN)
1763 		return;
1764 
1765 	/* is it NCQ device error? */
1766 	if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
1767 		return;
1768 
1769 	/* has LLDD analyzed already? */
1770 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1771 		qc = __ata_qc_from_tag(ap, tag);
1772 
1773 		if (!(qc->flags & ATA_QCFLAG_FAILED))
1774 			continue;
1775 
1776 		if (qc->err_mask)
1777 			return;
1778 	}
1779 
1780 	/* okay, this error is ours */
1781 	memset(&tf, 0, sizeof(tf));
1782 	rc = ata_eh_read_log_10h(dev, &tag, &tf);
1783 	if (rc) {
1784 		ata_link_err(link, "failed to read log page 10h (errno=%d)\n",
1785 			     rc);
1786 		return;
1787 	}
1788 
1789 	if (!(link->sactive & (1 << tag))) {
1790 		ata_link_err(link, "log page 10h reported inactive tag %d\n",
1791 			     tag);
1792 		return;
1793 	}
1794 
1795 	/* we've got the perpetrator, condemn it */
1796 	qc = __ata_qc_from_tag(ap, tag);
1797 	memcpy(&qc->result_tf, &tf, sizeof(tf));
1798 	qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1799 	qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
1800 	ehc->i.err_mask &= ~AC_ERR_DEV;
1801 }
1802 
1803 /**
1804  *	ata_eh_analyze_tf - analyze taskfile of a failed qc
1805  *	@qc: qc to analyze
1806  *	@tf: Taskfile registers to analyze
1807  *
1808  *	Analyze taskfile of @qc and further determine cause of
1809  *	failure.  This function also requests ATAPI sense data if
1810  *	available.
1811  *
1812  *	LOCKING:
1813  *	Kernel thread context (may sleep).
1814  *
1815  *	RETURNS:
1816  *	Determined recovery action
1817  */
1818 static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1819 				      const struct ata_taskfile *tf)
1820 {
1821 	unsigned int tmp, action = 0;
1822 	u8 stat = tf->command, err = tf->feature;
1823 
1824 	if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1825 		qc->err_mask |= AC_ERR_HSM;
1826 		return ATA_EH_RESET;
1827 	}
1828 
1829 	if (stat & (ATA_ERR | ATA_DF))
1830 		qc->err_mask |= AC_ERR_DEV;
1831 	else
1832 		return 0;
1833 
1834 	switch (qc->dev->class) {
1835 	case ATA_DEV_ATA:
1836 	case ATA_DEV_ZAC:
1837 		if (err & ATA_ICRC)
1838 			qc->err_mask |= AC_ERR_ATA_BUS;
1839 		if (err & (ATA_UNC | ATA_AMNF))
1840 			qc->err_mask |= AC_ERR_MEDIA;
1841 		if (err & ATA_IDNF)
1842 			qc->err_mask |= AC_ERR_INVALID;
1843 		break;
1844 
1845 	case ATA_DEV_ATAPI:
1846 		if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
1847 			tmp = atapi_eh_request_sense(qc->dev,
1848 						qc->scsicmd->sense_buffer,
1849 						qc->result_tf.feature >> 4);
1850 			if (!tmp) {
1851 				/* ATA_QCFLAG_SENSE_VALID is used to
1852 				 * tell atapi_qc_complete() that sense
1853 				 * data is already valid.
1854 				 *
1855 				 * TODO: interpret sense data and set
1856 				 * appropriate err_mask.
1857 				 */
1858 				qc->flags |= ATA_QCFLAG_SENSE_VALID;
1859 			} else
1860 				qc->err_mask |= tmp;
1861 		}
1862 	}
1863 
1864 	if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
1865 		action |= ATA_EH_RESET;
1866 
1867 	return action;
1868 }
1869 
1870 static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask,
1871 				   int *xfer_ok)
1872 {
1873 	int base = 0;
1874 
1875 	if (!(eflags & ATA_EFLAG_DUBIOUS_XFER))
1876 		*xfer_ok = 1;
1877 
1878 	if (!*xfer_ok)
1879 		base = ATA_ECAT_DUBIOUS_NONE;
1880 
1881 	if (err_mask & AC_ERR_ATA_BUS)
1882 		return base + ATA_ECAT_ATA_BUS;
1883 
1884 	if (err_mask & AC_ERR_TIMEOUT)
1885 		return base + ATA_ECAT_TOUT_HSM;
1886 
1887 	if (eflags & ATA_EFLAG_IS_IO) {
1888 		if (err_mask & AC_ERR_HSM)
1889 			return base + ATA_ECAT_TOUT_HSM;
1890 		if ((err_mask &
1891 		     (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
1892 			return base + ATA_ECAT_UNK_DEV;
1893 	}
1894 
1895 	return 0;
1896 }
1897 
1898 struct speed_down_verdict_arg {
1899 	u64 since;
1900 	int xfer_ok;
1901 	int nr_errors[ATA_ECAT_NR];
1902 };
1903 
1904 static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
1905 {
1906 	struct speed_down_verdict_arg *arg = void_arg;
1907 	int cat;
1908 
1909 	if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since))
1910 		return -1;
1911 
1912 	cat = ata_eh_categorize_error(ent->eflags, ent->err_mask,
1913 				      &arg->xfer_ok);
1914 	arg->nr_errors[cat]++;
1915 
1916 	return 0;
1917 }
1918 
1919 /**
1920  *	ata_eh_speed_down_verdict - Determine speed down verdict
1921  *	@dev: Device of interest
1922  *
1923  *	This function examines error ring of @dev and determines
1924  *	whether NCQ needs to be turned off, transfer speed should be
1925  *	stepped down, or falling back to PIO is necessary.
1926  *
1927  *	ECAT_ATA_BUS	: ATA_BUS error for any command
1928  *
1929  *	ECAT_TOUT_HSM	: TIMEOUT for any command or HSM violation for
1930  *			  IO commands
1931  *
1932  *	ECAT_UNK_DEV	: Unknown DEV error for IO commands
1933  *
1934  *	ECAT_DUBIOUS_*	: Identical to above three but occurred while
1935  *			  data transfer hasn't been verified.
1936  *
1937  *	Verdicts are
1938  *
1939  *	NCQ_OFF		: Turn off NCQ.
1940  *
1941  *	SPEED_DOWN	: Speed down transfer speed but don't fall back
1942  *			  to PIO.
1943  *
1944  *	FALLBACK_TO_PIO	: Fall back to PIO.
1945  *
1946  *	Even if multiple verdicts are returned, only one action is
1947  *	taken per error.  An action triggered by non-DUBIOUS errors
1948  *	clears ering, while one triggered by DUBIOUS_* errors doesn't.
1949  *	This is to expedite speed down decisions right after device is
1950  *	initially configured.
1951  *
1952  *	The followings are speed down rules.  #1 and #2 deal with
1953  *	DUBIOUS errors.
1954  *
1955  *	1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
1956  *	   occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO.
1957  *
1958  *	2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
1959  *	   occurred during last 5 mins, NCQ_OFF.
1960  *
1961  *	3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
1962  *	   occurred during last 5 mins, FALLBACK_TO_PIO
1963  *
1964  *	4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
1965  *	   during last 10 mins, NCQ_OFF.
1966  *
1967  *	5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
1968  *	   UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
1969  *
1970  *	LOCKING:
1971  *	Inherited from caller.
1972  *
1973  *	RETURNS:
1974  *	OR of ATA_EH_SPDN_* flags.
1975  */
1976 static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
1977 {
1978 	const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ;
1979 	u64 j64 = get_jiffies_64();
1980 	struct speed_down_verdict_arg arg;
1981 	unsigned int verdict = 0;
1982 
1983 	/* scan past 5 mins of error history */
1984 	memset(&arg, 0, sizeof(arg));
1985 	arg.since = j64 - min(j64, j5mins);
1986 	ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
1987 
1988 	if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] +
1989 	    arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1)
1990 		verdict |= ATA_EH_SPDN_SPEED_DOWN |
1991 			ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS;
1992 
1993 	if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] +
1994 	    arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1)
1995 		verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS;
1996 
1997 	if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
1998 	    arg.nr_errors[ATA_ECAT_TOUT_HSM] +
1999 	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
2000 		verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
2001 
2002 	/* scan past 10 mins of error history */
2003 	memset(&arg, 0, sizeof(arg));
2004 	arg.since = j64 - min(j64, j10mins);
2005 	ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
2006 
2007 	if (arg.nr_errors[ATA_ECAT_TOUT_HSM] +
2008 	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 3)
2009 		verdict |= ATA_EH_SPDN_NCQ_OFF;
2010 
2011 	if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
2012 	    arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 ||
2013 	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
2014 		verdict |= ATA_EH_SPDN_SPEED_DOWN;
2015 
2016 	return verdict;
2017 }
2018 
2019 /**
2020  *	ata_eh_speed_down - record error and speed down if necessary
2021  *	@dev: Failed device
2022  *	@eflags: mask of ATA_EFLAG_* flags
2023  *	@err_mask: err_mask of the error
2024  *
2025  *	Record error and examine error history to determine whether
2026  *	adjusting transmission speed is necessary.  It also sets
2027  *	transmission limits appropriately if such adjustment is
2028  *	necessary.
2029  *
2030  *	LOCKING:
2031  *	Kernel thread context (may sleep).
2032  *
2033  *	RETURNS:
2034  *	Determined recovery action.
2035  */
2036 static unsigned int ata_eh_speed_down(struct ata_device *dev,
2037 				unsigned int eflags, unsigned int err_mask)
2038 {
2039 	struct ata_link *link = ata_dev_phys_link(dev);
2040 	int xfer_ok = 0;
2041 	unsigned int verdict;
2042 	unsigned int action = 0;
2043 
2044 	/* don't bother if Cat-0 error */
2045 	if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0)
2046 		return 0;
2047 
2048 	/* record error and determine whether speed down is necessary */
2049 	ata_ering_record(&dev->ering, eflags, err_mask);
2050 	verdict = ata_eh_speed_down_verdict(dev);
2051 
2052 	/* turn off NCQ? */
2053 	if ((verdict & ATA_EH_SPDN_NCQ_OFF) &&
2054 	    (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ |
2055 			   ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) {
2056 		dev->flags |= ATA_DFLAG_NCQ_OFF;
2057 		ata_dev_warn(dev, "NCQ disabled due to excessive errors\n");
2058 		goto done;
2059 	}
2060 
2061 	/* speed down? */
2062 	if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
2063 		/* speed down SATA link speed if possible */
2064 		if (sata_down_spd_limit(link, 0) == 0) {
2065 			action |= ATA_EH_RESET;
2066 			goto done;
2067 		}
2068 
2069 		/* lower transfer mode */
2070 		if (dev->spdn_cnt < 2) {
2071 			static const int dma_dnxfer_sel[] =
2072 				{ ATA_DNXFER_DMA, ATA_DNXFER_40C };
2073 			static const int pio_dnxfer_sel[] =
2074 				{ ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 };
2075 			int sel;
2076 
2077 			if (dev->xfer_shift != ATA_SHIFT_PIO)
2078 				sel = dma_dnxfer_sel[dev->spdn_cnt];
2079 			else
2080 				sel = pio_dnxfer_sel[dev->spdn_cnt];
2081 
2082 			dev->spdn_cnt++;
2083 
2084 			if (ata_down_xfermask_limit(dev, sel) == 0) {
2085 				action |= ATA_EH_RESET;
2086 				goto done;
2087 			}
2088 		}
2089 	}
2090 
2091 	/* Fall back to PIO?  Slowing down to PIO is meaningless for
2092 	 * SATA ATA devices.  Consider it only for PATA and SATAPI.
2093 	 */
2094 	if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
2095 	    (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) &&
2096 	    (dev->xfer_shift != ATA_SHIFT_PIO)) {
2097 		if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
2098 			dev->spdn_cnt = 0;
2099 			action |= ATA_EH_RESET;
2100 			goto done;
2101 		}
2102 	}
2103 
2104 	return 0;
2105  done:
2106 	/* device has been slowed down, blow error history */
2107 	if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS))
2108 		ata_ering_clear(&dev->ering);
2109 	return action;
2110 }
2111 
2112 /**
2113  *	ata_eh_worth_retry - analyze error and decide whether to retry
2114  *	@qc: qc to possibly retry
2115  *
2116  *	Look at the cause of the error and decide if a retry
2117  * 	might be useful or not.  We don't want to retry media errors
2118  *	because the drive itself has probably already taken 10-30 seconds
2119  *	doing its own internal retries before reporting the failure.
2120  */
2121 static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc)
2122 {
2123 	if (qc->err_mask & AC_ERR_MEDIA)
2124 		return 0;	/* don't retry media errors */
2125 	if (qc->flags & ATA_QCFLAG_IO)
2126 		return 1;	/* otherwise retry anything from fs stack */
2127 	if (qc->err_mask & AC_ERR_INVALID)
2128 		return 0;	/* don't retry these */
2129 	return qc->err_mask != AC_ERR_DEV;  /* retry if not dev error */
2130 }
2131 
2132 /**
2133  *	ata_eh_link_autopsy - analyze error and determine recovery action
2134  *	@link: host link to perform autopsy on
2135  *
2136  *	Analyze why @link failed and determine which recovery actions
2137  *	are needed.  This function also sets more detailed AC_ERR_*
2138  *	values and fills sense data for ATAPI CHECK SENSE.
2139  *
2140  *	LOCKING:
2141  *	Kernel thread context (may sleep).
2142  */
2143 static void ata_eh_link_autopsy(struct ata_link *link)
2144 {
2145 	struct ata_port *ap = link->ap;
2146 	struct ata_eh_context *ehc = &link->eh_context;
2147 	struct ata_device *dev;
2148 	unsigned int all_err_mask = 0, eflags = 0;
2149 	int tag;
2150 	u32 serror;
2151 	int rc;
2152 
2153 	DPRINTK("ENTER\n");
2154 
2155 	if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
2156 		return;
2157 
2158 	/* obtain and analyze SError */
2159 	rc = sata_scr_read(link, SCR_ERROR, &serror);
2160 	if (rc == 0) {
2161 		ehc->i.serror |= serror;
2162 		ata_eh_analyze_serror(link);
2163 	} else if (rc != -EOPNOTSUPP) {
2164 		/* SError read failed, force reset and probing */
2165 		ehc->i.probe_mask |= ATA_ALL_DEVICES;
2166 		ehc->i.action |= ATA_EH_RESET;
2167 		ehc->i.err_mask |= AC_ERR_OTHER;
2168 	}
2169 
2170 	/* analyze NCQ failure */
2171 	ata_eh_analyze_ncq_error(link);
2172 
2173 	/* any real error trumps AC_ERR_OTHER */
2174 	if (ehc->i.err_mask & ~AC_ERR_OTHER)
2175 		ehc->i.err_mask &= ~AC_ERR_OTHER;
2176 
2177 	all_err_mask |= ehc->i.err_mask;
2178 
2179 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2180 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2181 
2182 		if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2183 		    ata_dev_phys_link(qc->dev) != link)
2184 			continue;
2185 
2186 		/* inherit upper level err_mask */
2187 		qc->err_mask |= ehc->i.err_mask;
2188 
2189 		/* analyze TF */
2190 		ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
2191 
2192 		/* DEV errors are probably spurious in case of ATA_BUS error */
2193 		if (qc->err_mask & AC_ERR_ATA_BUS)
2194 			qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
2195 					  AC_ERR_INVALID);
2196 
2197 		/* any real error trumps unknown error */
2198 		if (qc->err_mask & ~AC_ERR_OTHER)
2199 			qc->err_mask &= ~AC_ERR_OTHER;
2200 
2201 		/* SENSE_VALID trumps dev/unknown error and revalidation */
2202 		if (qc->flags & ATA_QCFLAG_SENSE_VALID)
2203 			qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
2204 
2205 		/* determine whether the command is worth retrying */
2206 		if (ata_eh_worth_retry(qc))
2207 			qc->flags |= ATA_QCFLAG_RETRY;
2208 
2209 		/* accumulate error info */
2210 		ehc->i.dev = qc->dev;
2211 		all_err_mask |= qc->err_mask;
2212 		if (qc->flags & ATA_QCFLAG_IO)
2213 			eflags |= ATA_EFLAG_IS_IO;
2214 		trace_ata_eh_link_autopsy_qc(qc);
2215 	}
2216 
2217 	/* enforce default EH actions */
2218 	if (ap->pflags & ATA_PFLAG_FROZEN ||
2219 	    all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
2220 		ehc->i.action |= ATA_EH_RESET;
2221 	else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) ||
2222 		 (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV)))
2223 		ehc->i.action |= ATA_EH_REVALIDATE;
2224 
2225 	/* If we have offending qcs and the associated failed device,
2226 	 * perform per-dev EH action only on the offending device.
2227 	 */
2228 	if (ehc->i.dev) {
2229 		ehc->i.dev_action[ehc->i.dev->devno] |=
2230 			ehc->i.action & ATA_EH_PERDEV_MASK;
2231 		ehc->i.action &= ~ATA_EH_PERDEV_MASK;
2232 	}
2233 
2234 	/* propagate timeout to host link */
2235 	if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link))
2236 		ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT;
2237 
2238 	/* record error and consider speeding down */
2239 	dev = ehc->i.dev;
2240 	if (!dev && ((ata_link_max_devices(link) == 1 &&
2241 		      ata_dev_enabled(link->device))))
2242 	    dev = link->device;
2243 
2244 	if (dev) {
2245 		if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
2246 			eflags |= ATA_EFLAG_DUBIOUS_XFER;
2247 		ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
2248 	}
2249 	trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask);
2250 	DPRINTK("EXIT\n");
2251 }
2252 
2253 /**
2254  *	ata_eh_autopsy - analyze error and determine recovery action
2255  *	@ap: host port to perform autopsy on
2256  *
2257  *	Analyze all links of @ap and determine why they failed and
2258  *	which recovery actions are needed.
2259  *
2260  *	LOCKING:
2261  *	Kernel thread context (may sleep).
2262  */
2263 void ata_eh_autopsy(struct ata_port *ap)
2264 {
2265 	struct ata_link *link;
2266 
2267 	ata_for_each_link(link, ap, EDGE)
2268 		ata_eh_link_autopsy(link);
2269 
2270 	/* Handle the frigging slave link.  Autopsy is done similarly
2271 	 * but actions and flags are transferred over to the master
2272 	 * link and handled from there.
2273 	 */
2274 	if (ap->slave_link) {
2275 		struct ata_eh_context *mehc = &ap->link.eh_context;
2276 		struct ata_eh_context *sehc = &ap->slave_link->eh_context;
2277 
2278 		/* transfer control flags from master to slave */
2279 		sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK;
2280 
2281 		/* perform autopsy on the slave link */
2282 		ata_eh_link_autopsy(ap->slave_link);
2283 
2284 		/* transfer actions from slave to master and clear slave */
2285 		ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2286 		mehc->i.action		|= sehc->i.action;
2287 		mehc->i.dev_action[1]	|= sehc->i.dev_action[1];
2288 		mehc->i.flags		|= sehc->i.flags;
2289 		ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2290 	}
2291 
2292 	/* Autopsy of fanout ports can affect host link autopsy.
2293 	 * Perform host link autopsy last.
2294 	 */
2295 	if (sata_pmp_attached(ap))
2296 		ata_eh_link_autopsy(&ap->link);
2297 }
2298 
2299 /**
2300  *	ata_get_cmd_descript - get description for ATA command
2301  *	@command: ATA command code to get description for
2302  *
2303  *	Return a textual description of the given command, or NULL if the
2304  *	command is not known.
2305  *
2306  *	LOCKING:
2307  *	None
2308  */
2309 const char *ata_get_cmd_descript(u8 command)
2310 {
2311 #ifdef CONFIG_ATA_VERBOSE_ERROR
2312 	static const struct
2313 	{
2314 		u8 command;
2315 		const char *text;
2316 	} cmd_descr[] = {
2317 		{ ATA_CMD_DEV_RESET,		"DEVICE RESET" },
2318 		{ ATA_CMD_CHK_POWER,		"CHECK POWER MODE" },
2319 		{ ATA_CMD_STANDBY,		"STANDBY" },
2320 		{ ATA_CMD_IDLE,			"IDLE" },
2321 		{ ATA_CMD_EDD,			"EXECUTE DEVICE DIAGNOSTIC" },
2322 		{ ATA_CMD_DOWNLOAD_MICRO,	"DOWNLOAD MICROCODE" },
2323 		{ ATA_CMD_DOWNLOAD_MICRO_DMA,	"DOWNLOAD MICROCODE DMA" },
2324 		{ ATA_CMD_NOP,			"NOP" },
2325 		{ ATA_CMD_FLUSH,		"FLUSH CACHE" },
2326 		{ ATA_CMD_FLUSH_EXT,		"FLUSH CACHE EXT" },
2327 		{ ATA_CMD_ID_ATA,		"IDENTIFY DEVICE" },
2328 		{ ATA_CMD_ID_ATAPI,		"IDENTIFY PACKET DEVICE" },
2329 		{ ATA_CMD_SERVICE,		"SERVICE" },
2330 		{ ATA_CMD_READ,			"READ DMA" },
2331 		{ ATA_CMD_READ_EXT,		"READ DMA EXT" },
2332 		{ ATA_CMD_READ_QUEUED,		"READ DMA QUEUED" },
2333 		{ ATA_CMD_READ_STREAM_EXT,	"READ STREAM EXT" },
2334 		{ ATA_CMD_READ_STREAM_DMA_EXT,  "READ STREAM DMA EXT" },
2335 		{ ATA_CMD_WRITE,		"WRITE DMA" },
2336 		{ ATA_CMD_WRITE_EXT,		"WRITE DMA EXT" },
2337 		{ ATA_CMD_WRITE_QUEUED,		"WRITE DMA QUEUED EXT" },
2338 		{ ATA_CMD_WRITE_STREAM_EXT,	"WRITE STREAM EXT" },
2339 		{ ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" },
2340 		{ ATA_CMD_WRITE_FUA_EXT,	"WRITE DMA FUA EXT" },
2341 		{ ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
2342 		{ ATA_CMD_FPDMA_READ,		"READ FPDMA QUEUED" },
2343 		{ ATA_CMD_FPDMA_WRITE,		"WRITE FPDMA QUEUED" },
2344 		{ ATA_CMD_FPDMA_SEND,		"SEND FPDMA QUEUED" },
2345 		{ ATA_CMD_FPDMA_RECV,		"RECEIVE FPDMA QUEUED" },
2346 		{ ATA_CMD_PIO_READ,		"READ SECTOR(S)" },
2347 		{ ATA_CMD_PIO_READ_EXT,		"READ SECTOR(S) EXT" },
2348 		{ ATA_CMD_PIO_WRITE,		"WRITE SECTOR(S)" },
2349 		{ ATA_CMD_PIO_WRITE_EXT,	"WRITE SECTOR(S) EXT" },
2350 		{ ATA_CMD_READ_MULTI,		"READ MULTIPLE" },
2351 		{ ATA_CMD_READ_MULTI_EXT,	"READ MULTIPLE EXT" },
2352 		{ ATA_CMD_WRITE_MULTI,		"WRITE MULTIPLE" },
2353 		{ ATA_CMD_WRITE_MULTI_EXT,	"WRITE MULTIPLE EXT" },
2354 		{ ATA_CMD_WRITE_MULTI_FUA_EXT,	"WRITE MULTIPLE FUA EXT" },
2355 		{ ATA_CMD_SET_FEATURES,		"SET FEATURES" },
2356 		{ ATA_CMD_SET_MULTI,		"SET MULTIPLE MODE" },
2357 		{ ATA_CMD_VERIFY,		"READ VERIFY SECTOR(S)" },
2358 		{ ATA_CMD_VERIFY_EXT,		"READ VERIFY SECTOR(S) EXT" },
2359 		{ ATA_CMD_WRITE_UNCORR_EXT,	"WRITE UNCORRECTABLE EXT" },
2360 		{ ATA_CMD_STANDBYNOW1,		"STANDBY IMMEDIATE" },
2361 		{ ATA_CMD_IDLEIMMEDIATE,	"IDLE IMMEDIATE" },
2362 		{ ATA_CMD_SLEEP,		"SLEEP" },
2363 		{ ATA_CMD_INIT_DEV_PARAMS,	"INITIALIZE DEVICE PARAMETERS" },
2364 		{ ATA_CMD_READ_NATIVE_MAX,	"READ NATIVE MAX ADDRESS" },
2365 		{ ATA_CMD_READ_NATIVE_MAX_EXT,	"READ NATIVE MAX ADDRESS EXT" },
2366 		{ ATA_CMD_SET_MAX,		"SET MAX ADDRESS" },
2367 		{ ATA_CMD_SET_MAX_EXT,		"SET MAX ADDRESS EXT" },
2368 		{ ATA_CMD_READ_LOG_EXT,		"READ LOG EXT" },
2369 		{ ATA_CMD_WRITE_LOG_EXT,	"WRITE LOG EXT" },
2370 		{ ATA_CMD_READ_LOG_DMA_EXT,	"READ LOG DMA EXT" },
2371 		{ ATA_CMD_WRITE_LOG_DMA_EXT,	"WRITE LOG DMA EXT" },
2372 		{ ATA_CMD_TRUSTED_NONDATA,	"TRUSTED NON-DATA" },
2373 		{ ATA_CMD_TRUSTED_RCV,		"TRUSTED RECEIVE" },
2374 		{ ATA_CMD_TRUSTED_RCV_DMA,	"TRUSTED RECEIVE DMA" },
2375 		{ ATA_CMD_TRUSTED_SND,		"TRUSTED SEND" },
2376 		{ ATA_CMD_TRUSTED_SND_DMA,	"TRUSTED SEND DMA" },
2377 		{ ATA_CMD_PMP_READ,		"READ BUFFER" },
2378 		{ ATA_CMD_PMP_READ_DMA,		"READ BUFFER DMA" },
2379 		{ ATA_CMD_PMP_WRITE,		"WRITE BUFFER" },
2380 		{ ATA_CMD_PMP_WRITE_DMA,	"WRITE BUFFER DMA" },
2381 		{ ATA_CMD_CONF_OVERLAY,		"DEVICE CONFIGURATION OVERLAY" },
2382 		{ ATA_CMD_SEC_SET_PASS,		"SECURITY SET PASSWORD" },
2383 		{ ATA_CMD_SEC_UNLOCK,		"SECURITY UNLOCK" },
2384 		{ ATA_CMD_SEC_ERASE_PREP,	"SECURITY ERASE PREPARE" },
2385 		{ ATA_CMD_SEC_ERASE_UNIT,	"SECURITY ERASE UNIT" },
2386 		{ ATA_CMD_SEC_FREEZE_LOCK,	"SECURITY FREEZE LOCK" },
2387 		{ ATA_CMD_SEC_DISABLE_PASS,	"SECURITY DISABLE PASSWORD" },
2388 		{ ATA_CMD_CONFIG_STREAM,	"CONFIGURE STREAM" },
2389 		{ ATA_CMD_SMART,		"SMART" },
2390 		{ ATA_CMD_MEDIA_LOCK,		"DOOR LOCK" },
2391 		{ ATA_CMD_MEDIA_UNLOCK,		"DOOR UNLOCK" },
2392 		{ ATA_CMD_DSM,			"DATA SET MANAGEMENT" },
2393 		{ ATA_CMD_CHK_MED_CRD_TYP,	"CHECK MEDIA CARD TYPE" },
2394 		{ ATA_CMD_CFA_REQ_EXT_ERR,	"CFA REQUEST EXTENDED ERROR" },
2395 		{ ATA_CMD_CFA_WRITE_NE,		"CFA WRITE SECTORS WITHOUT ERASE" },
2396 		{ ATA_CMD_CFA_TRANS_SECT,	"CFA TRANSLATE SECTOR" },
2397 		{ ATA_CMD_CFA_ERASE,		"CFA ERASE SECTORS" },
2398 		{ ATA_CMD_CFA_WRITE_MULT_NE,	"CFA WRITE MULTIPLE WITHOUT ERASE" },
2399 		{ ATA_CMD_REQ_SENSE_DATA,	"REQUEST SENSE DATA EXT" },
2400 		{ ATA_CMD_SANITIZE_DEVICE,	"SANITIZE DEVICE" },
2401 		{ ATA_CMD_READ_LONG,		"READ LONG (with retries)" },
2402 		{ ATA_CMD_READ_LONG_ONCE,	"READ LONG (without retries)" },
2403 		{ ATA_CMD_WRITE_LONG,		"WRITE LONG (with retries)" },
2404 		{ ATA_CMD_WRITE_LONG_ONCE,	"WRITE LONG (without retries)" },
2405 		{ ATA_CMD_RESTORE,		"RECALIBRATE" },
2406 		{ 0,				NULL } /* terminate list */
2407 	};
2408 
2409 	unsigned int i;
2410 	for (i = 0; cmd_descr[i].text; i++)
2411 		if (cmd_descr[i].command == command)
2412 			return cmd_descr[i].text;
2413 #endif
2414 
2415 	return NULL;
2416 }
2417 EXPORT_SYMBOL_GPL(ata_get_cmd_descript);
2418 
2419 /**
2420  *	ata_eh_link_report - report error handling to user
2421  *	@link: ATA link EH is going on
2422  *
2423  *	Report EH to user.
2424  *
2425  *	LOCKING:
2426  *	None.
2427  */
2428 static void ata_eh_link_report(struct ata_link *link)
2429 {
2430 	struct ata_port *ap = link->ap;
2431 	struct ata_eh_context *ehc = &link->eh_context;
2432 	const char *frozen, *desc;
2433 	char tries_buf[6] = "";
2434 	int tag, nr_failed = 0;
2435 
2436 	if (ehc->i.flags & ATA_EHI_QUIET)
2437 		return;
2438 
2439 	desc = NULL;
2440 	if (ehc->i.desc[0] != '\0')
2441 		desc = ehc->i.desc;
2442 
2443 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2444 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2445 
2446 		if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2447 		    ata_dev_phys_link(qc->dev) != link ||
2448 		    ((qc->flags & ATA_QCFLAG_QUIET) &&
2449 		     qc->err_mask == AC_ERR_DEV))
2450 			continue;
2451 		if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
2452 			continue;
2453 
2454 		nr_failed++;
2455 	}
2456 
2457 	if (!nr_failed && !ehc->i.err_mask)
2458 		return;
2459 
2460 	frozen = "";
2461 	if (ap->pflags & ATA_PFLAG_FROZEN)
2462 		frozen = " frozen";
2463 
2464 	if (ap->eh_tries < ATA_EH_MAX_TRIES)
2465 		snprintf(tries_buf, sizeof(tries_buf), " t%d",
2466 			 ap->eh_tries);
2467 
2468 	if (ehc->i.dev) {
2469 		ata_dev_err(ehc->i.dev, "exception Emask 0x%x "
2470 			    "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2471 			    ehc->i.err_mask, link->sactive, ehc->i.serror,
2472 			    ehc->i.action, frozen, tries_buf);
2473 		if (desc)
2474 			ata_dev_err(ehc->i.dev, "%s\n", desc);
2475 	} else {
2476 		ata_link_err(link, "exception Emask 0x%x "
2477 			     "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2478 			     ehc->i.err_mask, link->sactive, ehc->i.serror,
2479 			     ehc->i.action, frozen, tries_buf);
2480 		if (desc)
2481 			ata_link_err(link, "%s\n", desc);
2482 	}
2483 
2484 #ifdef CONFIG_ATA_VERBOSE_ERROR
2485 	if (ehc->i.serror)
2486 		ata_link_err(link,
2487 		  "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
2488 		  ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "",
2489 		  ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "",
2490 		  ehc->i.serror & SERR_DATA ? "UnrecovData " : "",
2491 		  ehc->i.serror & SERR_PERSISTENT ? "Persist " : "",
2492 		  ehc->i.serror & SERR_PROTOCOL ? "Proto " : "",
2493 		  ehc->i.serror & SERR_INTERNAL ? "HostInt " : "",
2494 		  ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "",
2495 		  ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "",
2496 		  ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "",
2497 		  ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "",
2498 		  ehc->i.serror & SERR_DISPARITY ? "Dispar " : "",
2499 		  ehc->i.serror & SERR_CRC ? "BadCRC " : "",
2500 		  ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "",
2501 		  ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "",
2502 		  ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "",
2503 		  ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "",
2504 		  ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
2505 #endif
2506 
2507 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2508 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2509 		struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
2510 		char data_buf[20] = "";
2511 		char cdb_buf[70] = "";
2512 
2513 		if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2514 		    ata_dev_phys_link(qc->dev) != link || !qc->err_mask)
2515 			continue;
2516 
2517 		if (qc->dma_dir != DMA_NONE) {
2518 			static const char *dma_str[] = {
2519 				[DMA_BIDIRECTIONAL]	= "bidi",
2520 				[DMA_TO_DEVICE]		= "out",
2521 				[DMA_FROM_DEVICE]	= "in",
2522 			};
2523 			static const char *prot_str[] = {
2524 				[ATA_PROT_PIO]		= "pio",
2525 				[ATA_PROT_DMA]		= "dma",
2526 				[ATA_PROT_NCQ]		= "ncq",
2527 				[ATAPI_PROT_PIO]	= "pio",
2528 				[ATAPI_PROT_DMA]	= "dma",
2529 			};
2530 
2531 			snprintf(data_buf, sizeof(data_buf), " %s %u %s",
2532 				 prot_str[qc->tf.protocol], qc->nbytes,
2533 				 dma_str[qc->dma_dir]);
2534 		}
2535 
2536 		if (ata_is_atapi(qc->tf.protocol)) {
2537 			const u8 *cdb = qc->cdb;
2538 			size_t cdb_len = qc->dev->cdb_len;
2539 
2540 			if (qc->scsicmd) {
2541 				cdb = qc->scsicmd->cmnd;
2542 				cdb_len = qc->scsicmd->cmd_len;
2543 			}
2544 			__scsi_format_command(cdb_buf, sizeof(cdb_buf),
2545 					      cdb, cdb_len);
2546 		} else {
2547 			const char *descr = ata_get_cmd_descript(cmd->command);
2548 			if (descr)
2549 				ata_dev_err(qc->dev, "failed command: %s\n",
2550 					    descr);
2551 		}
2552 
2553 		ata_dev_err(qc->dev,
2554 			"cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2555 			"tag %d%s\n         %s"
2556 			"res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2557 			"Emask 0x%x (%s)%s\n",
2558 			cmd->command, cmd->feature, cmd->nsect,
2559 			cmd->lbal, cmd->lbam, cmd->lbah,
2560 			cmd->hob_feature, cmd->hob_nsect,
2561 			cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
2562 			cmd->device, qc->tag, data_buf, cdb_buf,
2563 			res->command, res->feature, res->nsect,
2564 			res->lbal, res->lbam, res->lbah,
2565 			res->hob_feature, res->hob_nsect,
2566 			res->hob_lbal, res->hob_lbam, res->hob_lbah,
2567 			res->device, qc->err_mask, ata_err_string(qc->err_mask),
2568 			qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
2569 
2570 #ifdef CONFIG_ATA_VERBOSE_ERROR
2571 		if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
2572 				    ATA_ERR)) {
2573 			if (res->command & ATA_BUSY)
2574 				ata_dev_err(qc->dev, "status: { Busy }\n");
2575 			else
2576 				ata_dev_err(qc->dev, "status: { %s%s%s%s}\n",
2577 				  res->command & ATA_DRDY ? "DRDY " : "",
2578 				  res->command & ATA_DF ? "DF " : "",
2579 				  res->command & ATA_DRQ ? "DRQ " : "",
2580 				  res->command & ATA_ERR ? "ERR " : "");
2581 		}
2582 
2583 		if (cmd->command != ATA_CMD_PACKET &&
2584 		    (res->feature & (ATA_ICRC | ATA_UNC | ATA_AMNF |
2585 				     ATA_IDNF | ATA_ABORTED)))
2586 			ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n",
2587 			  res->feature & ATA_ICRC ? "ICRC " : "",
2588 			  res->feature & ATA_UNC ? "UNC " : "",
2589 			  res->feature & ATA_AMNF ? "AMNF " : "",
2590 			  res->feature & ATA_IDNF ? "IDNF " : "",
2591 			  res->feature & ATA_ABORTED ? "ABRT " : "");
2592 #endif
2593 	}
2594 }
2595 
2596 /**
2597  *	ata_eh_report - report error handling to user
2598  *	@ap: ATA port to report EH about
2599  *
2600  *	Report EH to user.
2601  *
2602  *	LOCKING:
2603  *	None.
2604  */
2605 void ata_eh_report(struct ata_port *ap)
2606 {
2607 	struct ata_link *link;
2608 
2609 	ata_for_each_link(link, ap, HOST_FIRST)
2610 		ata_eh_link_report(link);
2611 }
2612 
2613 static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
2614 			unsigned int *classes, unsigned long deadline,
2615 			bool clear_classes)
2616 {
2617 	struct ata_device *dev;
2618 
2619 	if (clear_classes)
2620 		ata_for_each_dev(dev, link, ALL)
2621 			classes[dev->devno] = ATA_DEV_UNKNOWN;
2622 
2623 	return reset(link, classes, deadline);
2624 }
2625 
2626 static int ata_eh_followup_srst_needed(struct ata_link *link, int rc)
2627 {
2628 	if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
2629 		return 0;
2630 	if (rc == -EAGAIN)
2631 		return 1;
2632 	if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
2633 		return 1;
2634 	return 0;
2635 }
2636 
2637 int ata_eh_reset(struct ata_link *link, int classify,
2638 		 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
2639 		 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
2640 {
2641 	struct ata_port *ap = link->ap;
2642 	struct ata_link *slave = ap->slave_link;
2643 	struct ata_eh_context *ehc = &link->eh_context;
2644 	struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL;
2645 	unsigned int *classes = ehc->classes;
2646 	unsigned int lflags = link->flags;
2647 	int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
2648 	int max_tries = 0, try = 0;
2649 	struct ata_link *failed_link;
2650 	struct ata_device *dev;
2651 	unsigned long deadline, now;
2652 	ata_reset_fn_t reset;
2653 	unsigned long flags;
2654 	u32 sstatus;
2655 	int nr_unknown, rc;
2656 
2657 	/*
2658 	 * Prepare to reset
2659 	 */
2660 	while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX)
2661 		max_tries++;
2662 	if (link->flags & ATA_LFLAG_RST_ONCE)
2663 		max_tries = 1;
2664 	if (link->flags & ATA_LFLAG_NO_HRST)
2665 		hardreset = NULL;
2666 	if (link->flags & ATA_LFLAG_NO_SRST)
2667 		softreset = NULL;
2668 
2669 	/* make sure each reset attempt is at least COOL_DOWN apart */
2670 	if (ehc->i.flags & ATA_EHI_DID_RESET) {
2671 		now = jiffies;
2672 		WARN_ON(time_after(ehc->last_reset, now));
2673 		deadline = ata_deadline(ehc->last_reset,
2674 					ATA_EH_RESET_COOL_DOWN);
2675 		if (time_before(now, deadline))
2676 			schedule_timeout_uninterruptible(deadline - now);
2677 	}
2678 
2679 	spin_lock_irqsave(ap->lock, flags);
2680 	ap->pflags |= ATA_PFLAG_RESETTING;
2681 	spin_unlock_irqrestore(ap->lock, flags);
2682 
2683 	ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2684 
2685 	ata_for_each_dev(dev, link, ALL) {
2686 		/* If we issue an SRST then an ATA drive (not ATAPI)
2687 		 * may change configuration and be in PIO0 timing. If
2688 		 * we do a hard reset (or are coming from power on)
2689 		 * this is true for ATA or ATAPI. Until we've set a
2690 		 * suitable controller mode we should not touch the
2691 		 * bus as we may be talking too fast.
2692 		 */
2693 		dev->pio_mode = XFER_PIO_0;
2694 		dev->dma_mode = 0xff;
2695 
2696 		/* If the controller has a pio mode setup function
2697 		 * then use it to set the chipset to rights. Don't
2698 		 * touch the DMA setup as that will be dealt with when
2699 		 * configuring devices.
2700 		 */
2701 		if (ap->ops->set_piomode)
2702 			ap->ops->set_piomode(ap, dev);
2703 	}
2704 
2705 	/* prefer hardreset */
2706 	reset = NULL;
2707 	ehc->i.action &= ~ATA_EH_RESET;
2708 	if (hardreset) {
2709 		reset = hardreset;
2710 		ehc->i.action |= ATA_EH_HARDRESET;
2711 	} else if (softreset) {
2712 		reset = softreset;
2713 		ehc->i.action |= ATA_EH_SOFTRESET;
2714 	}
2715 
2716 	if (prereset) {
2717 		unsigned long deadline = ata_deadline(jiffies,
2718 						      ATA_EH_PRERESET_TIMEOUT);
2719 
2720 		if (slave) {
2721 			sehc->i.action &= ~ATA_EH_RESET;
2722 			sehc->i.action |= ehc->i.action;
2723 		}
2724 
2725 		rc = prereset(link, deadline);
2726 
2727 		/* If present, do prereset on slave link too.  Reset
2728 		 * is skipped iff both master and slave links report
2729 		 * -ENOENT or clear ATA_EH_RESET.
2730 		 */
2731 		if (slave && (rc == 0 || rc == -ENOENT)) {
2732 			int tmp;
2733 
2734 			tmp = prereset(slave, deadline);
2735 			if (tmp != -ENOENT)
2736 				rc = tmp;
2737 
2738 			ehc->i.action |= sehc->i.action;
2739 		}
2740 
2741 		if (rc) {
2742 			if (rc == -ENOENT) {
2743 				ata_link_dbg(link, "port disabled--ignoring\n");
2744 				ehc->i.action &= ~ATA_EH_RESET;
2745 
2746 				ata_for_each_dev(dev, link, ALL)
2747 					classes[dev->devno] = ATA_DEV_NONE;
2748 
2749 				rc = 0;
2750 			} else
2751 				ata_link_err(link,
2752 					     "prereset failed (errno=%d)\n",
2753 					     rc);
2754 			goto out;
2755 		}
2756 
2757 		/* prereset() might have cleared ATA_EH_RESET.  If so,
2758 		 * bang classes, thaw and return.
2759 		 */
2760 		if (reset && !(ehc->i.action & ATA_EH_RESET)) {
2761 			ata_for_each_dev(dev, link, ALL)
2762 				classes[dev->devno] = ATA_DEV_NONE;
2763 			if ((ap->pflags & ATA_PFLAG_FROZEN) &&
2764 			    ata_is_host_link(link))
2765 				ata_eh_thaw_port(ap);
2766 			rc = 0;
2767 			goto out;
2768 		}
2769 	}
2770 
2771  retry:
2772 	/*
2773 	 * Perform reset
2774 	 */
2775 	if (ata_is_host_link(link))
2776 		ata_eh_freeze_port(ap);
2777 
2778 	deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]);
2779 
2780 	if (reset) {
2781 		if (verbose)
2782 			ata_link_info(link, "%s resetting link\n",
2783 				      reset == softreset ? "soft" : "hard");
2784 
2785 		/* mark that this EH session started with reset */
2786 		ehc->last_reset = jiffies;
2787 		if (reset == hardreset)
2788 			ehc->i.flags |= ATA_EHI_DID_HARDRESET;
2789 		else
2790 			ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
2791 
2792 		rc = ata_do_reset(link, reset, classes, deadline, true);
2793 		if (rc && rc != -EAGAIN) {
2794 			failed_link = link;
2795 			goto fail;
2796 		}
2797 
2798 		/* hardreset slave link if existent */
2799 		if (slave && reset == hardreset) {
2800 			int tmp;
2801 
2802 			if (verbose)
2803 				ata_link_info(slave, "hard resetting link\n");
2804 
2805 			ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
2806 			tmp = ata_do_reset(slave, reset, classes, deadline,
2807 					   false);
2808 			switch (tmp) {
2809 			case -EAGAIN:
2810 				rc = -EAGAIN;
2811 			case 0:
2812 				break;
2813 			default:
2814 				failed_link = slave;
2815 				rc = tmp;
2816 				goto fail;
2817 			}
2818 		}
2819 
2820 		/* perform follow-up SRST if necessary */
2821 		if (reset == hardreset &&
2822 		    ata_eh_followup_srst_needed(link, rc)) {
2823 			reset = softreset;
2824 
2825 			if (!reset) {
2826 				ata_link_err(link,
2827 	     "follow-up softreset required but no softreset available\n");
2828 				failed_link = link;
2829 				rc = -EINVAL;
2830 				goto fail;
2831 			}
2832 
2833 			ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2834 			rc = ata_do_reset(link, reset, classes, deadline, true);
2835 			if (rc) {
2836 				failed_link = link;
2837 				goto fail;
2838 			}
2839 		}
2840 	} else {
2841 		if (verbose)
2842 			ata_link_info(link,
2843 	"no reset method available, skipping reset\n");
2844 		if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
2845 			lflags |= ATA_LFLAG_ASSUME_ATA;
2846 	}
2847 
2848 	/*
2849 	 * Post-reset processing
2850 	 */
2851 	ata_for_each_dev(dev, link, ALL) {
2852 		/* After the reset, the device state is PIO 0 and the
2853 		 * controller state is undefined.  Reset also wakes up
2854 		 * drives from sleeping mode.
2855 		 */
2856 		dev->pio_mode = XFER_PIO_0;
2857 		dev->flags &= ~ATA_DFLAG_SLEEPING;
2858 
2859 		if (ata_phys_link_offline(ata_dev_phys_link(dev)))
2860 			continue;
2861 
2862 		/* apply class override */
2863 		if (lflags & ATA_LFLAG_ASSUME_ATA)
2864 			classes[dev->devno] = ATA_DEV_ATA;
2865 		else if (lflags & ATA_LFLAG_ASSUME_SEMB)
2866 			classes[dev->devno] = ATA_DEV_SEMB_UNSUP;
2867 	}
2868 
2869 	/* record current link speed */
2870 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
2871 		link->sata_spd = (sstatus >> 4) & 0xf;
2872 	if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0)
2873 		slave->sata_spd = (sstatus >> 4) & 0xf;
2874 
2875 	/* thaw the port */
2876 	if (ata_is_host_link(link))
2877 		ata_eh_thaw_port(ap);
2878 
2879 	/* postreset() should clear hardware SError.  Although SError
2880 	 * is cleared during link resume, clearing SError here is
2881 	 * necessary as some PHYs raise hotplug events after SRST.
2882 	 * This introduces race condition where hotplug occurs between
2883 	 * reset and here.  This race is mediated by cross checking
2884 	 * link onlineness and classification result later.
2885 	 */
2886 	if (postreset) {
2887 		postreset(link, classes);
2888 		if (slave)
2889 			postreset(slave, classes);
2890 	}
2891 
2892 	/*
2893 	 * Some controllers can't be frozen very well and may set spurious
2894 	 * error conditions during reset.  Clear accumulated error
2895 	 * information and re-thaw the port if frozen.  As reset is the
2896 	 * final recovery action and we cross check link onlineness against
2897 	 * device classification later, no hotplug event is lost by this.
2898 	 */
2899 	spin_lock_irqsave(link->ap->lock, flags);
2900 	memset(&link->eh_info, 0, sizeof(link->eh_info));
2901 	if (slave)
2902 		memset(&slave->eh_info, 0, sizeof(link->eh_info));
2903 	ap->pflags &= ~ATA_PFLAG_EH_PENDING;
2904 	spin_unlock_irqrestore(link->ap->lock, flags);
2905 
2906 	if (ap->pflags & ATA_PFLAG_FROZEN)
2907 		ata_eh_thaw_port(ap);
2908 
2909 	/*
2910 	 * Make sure onlineness and classification result correspond.
2911 	 * Hotplug could have happened during reset and some
2912 	 * controllers fail to wait while a drive is spinning up after
2913 	 * being hotplugged causing misdetection.  By cross checking
2914 	 * link on/offlineness and classification result, those
2915 	 * conditions can be reliably detected and retried.
2916 	 */
2917 	nr_unknown = 0;
2918 	ata_for_each_dev(dev, link, ALL) {
2919 		if (ata_phys_link_online(ata_dev_phys_link(dev))) {
2920 			if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2921 				ata_dev_dbg(dev, "link online but device misclassified\n");
2922 				classes[dev->devno] = ATA_DEV_NONE;
2923 				nr_unknown++;
2924 			}
2925 		} else if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
2926 			if (ata_class_enabled(classes[dev->devno]))
2927 				ata_dev_dbg(dev,
2928 					    "link offline, clearing class %d to NONE\n",
2929 					    classes[dev->devno]);
2930 			classes[dev->devno] = ATA_DEV_NONE;
2931 		} else if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2932 			ata_dev_dbg(dev,
2933 				    "link status unknown, clearing UNKNOWN to NONE\n");
2934 			classes[dev->devno] = ATA_DEV_NONE;
2935 		}
2936 	}
2937 
2938 	if (classify && nr_unknown) {
2939 		if (try < max_tries) {
2940 			ata_link_warn(link,
2941 				      "link online but %d devices misclassified, retrying\n",
2942 				      nr_unknown);
2943 			failed_link = link;
2944 			rc = -EAGAIN;
2945 			goto fail;
2946 		}
2947 		ata_link_warn(link,
2948 			      "link online but %d devices misclassified, "
2949 			      "device detection might fail\n", nr_unknown);
2950 	}
2951 
2952 	/* reset successful, schedule revalidation */
2953 	ata_eh_done(link, NULL, ATA_EH_RESET);
2954 	if (slave)
2955 		ata_eh_done(slave, NULL, ATA_EH_RESET);
2956 	ehc->last_reset = jiffies;		/* update to completion time */
2957 	ehc->i.action |= ATA_EH_REVALIDATE;
2958 	link->lpm_policy = ATA_LPM_UNKNOWN;	/* reset LPM state */
2959 
2960 	rc = 0;
2961  out:
2962 	/* clear hotplug flag */
2963 	ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2964 	if (slave)
2965 		sehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2966 
2967 	spin_lock_irqsave(ap->lock, flags);
2968 	ap->pflags &= ~ATA_PFLAG_RESETTING;
2969 	spin_unlock_irqrestore(ap->lock, flags);
2970 
2971 	return rc;
2972 
2973  fail:
2974 	/* if SCR isn't accessible on a fan-out port, PMP needs to be reset */
2975 	if (!ata_is_host_link(link) &&
2976 	    sata_scr_read(link, SCR_STATUS, &sstatus))
2977 		rc = -ERESTART;
2978 
2979 	if (try >= max_tries) {
2980 		/*
2981 		 * Thaw host port even if reset failed, so that the port
2982 		 * can be retried on the next phy event.  This risks
2983 		 * repeated EH runs but seems to be a better tradeoff than
2984 		 * shutting down a port after a botched hotplug attempt.
2985 		 */
2986 		if (ata_is_host_link(link))
2987 			ata_eh_thaw_port(ap);
2988 		goto out;
2989 	}
2990 
2991 	now = jiffies;
2992 	if (time_before(now, deadline)) {
2993 		unsigned long delta = deadline - now;
2994 
2995 		ata_link_warn(failed_link,
2996 			"reset failed (errno=%d), retrying in %u secs\n",
2997 			rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
2998 
2999 		ata_eh_release(ap);
3000 		while (delta)
3001 			delta = schedule_timeout_uninterruptible(delta);
3002 		ata_eh_acquire(ap);
3003 	}
3004 
3005 	/*
3006 	 * While disks spinup behind PMP, some controllers fail sending SRST.
3007 	 * They need to be reset - as well as the PMP - before retrying.
3008 	 */
3009 	if (rc == -ERESTART) {
3010 		if (ata_is_host_link(link))
3011 			ata_eh_thaw_port(ap);
3012 		goto out;
3013 	}
3014 
3015 	if (try == max_tries - 1) {
3016 		sata_down_spd_limit(link, 0);
3017 		if (slave)
3018 			sata_down_spd_limit(slave, 0);
3019 	} else if (rc == -EPIPE)
3020 		sata_down_spd_limit(failed_link, 0);
3021 
3022 	if (hardreset)
3023 		reset = hardreset;
3024 	goto retry;
3025 }
3026 
3027 static inline void ata_eh_pull_park_action(struct ata_port *ap)
3028 {
3029 	struct ata_link *link;
3030 	struct ata_device *dev;
3031 	unsigned long flags;
3032 
3033 	/*
3034 	 * This function can be thought of as an extended version of
3035 	 * ata_eh_about_to_do() specially crafted to accommodate the
3036 	 * requirements of ATA_EH_PARK handling. Since the EH thread
3037 	 * does not leave the do {} while () loop in ata_eh_recover as
3038 	 * long as the timeout for a park request to *one* device on
3039 	 * the port has not expired, and since we still want to pick
3040 	 * up park requests to other devices on the same port or
3041 	 * timeout updates for the same device, we have to pull
3042 	 * ATA_EH_PARK actions from eh_info into eh_context.i
3043 	 * ourselves at the beginning of each pass over the loop.
3044 	 *
3045 	 * Additionally, all write accesses to &ap->park_req_pending
3046 	 * through reinit_completion() (see below) or complete_all()
3047 	 * (see ata_scsi_park_store()) are protected by the host lock.
3048 	 * As a result we have that park_req_pending.done is zero on
3049 	 * exit from this function, i.e. when ATA_EH_PARK actions for
3050 	 * *all* devices on port ap have been pulled into the
3051 	 * respective eh_context structs. If, and only if,
3052 	 * park_req_pending.done is non-zero by the time we reach
3053 	 * wait_for_completion_timeout(), another ATA_EH_PARK action
3054 	 * has been scheduled for at least one of the devices on port
3055 	 * ap and we have to cycle over the do {} while () loop in
3056 	 * ata_eh_recover() again.
3057 	 */
3058 
3059 	spin_lock_irqsave(ap->lock, flags);
3060 	reinit_completion(&ap->park_req_pending);
3061 	ata_for_each_link(link, ap, EDGE) {
3062 		ata_for_each_dev(dev, link, ALL) {
3063 			struct ata_eh_info *ehi = &link->eh_info;
3064 
3065 			link->eh_context.i.dev_action[dev->devno] |=
3066 				ehi->dev_action[dev->devno] & ATA_EH_PARK;
3067 			ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
3068 		}
3069 	}
3070 	spin_unlock_irqrestore(ap->lock, flags);
3071 }
3072 
3073 static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
3074 {
3075 	struct ata_eh_context *ehc = &dev->link->eh_context;
3076 	struct ata_taskfile tf;
3077 	unsigned int err_mask;
3078 
3079 	ata_tf_init(dev, &tf);
3080 	if (park) {
3081 		ehc->unloaded_mask |= 1 << dev->devno;
3082 		tf.command = ATA_CMD_IDLEIMMEDIATE;
3083 		tf.feature = 0x44;
3084 		tf.lbal = 0x4c;
3085 		tf.lbam = 0x4e;
3086 		tf.lbah = 0x55;
3087 	} else {
3088 		ehc->unloaded_mask &= ~(1 << dev->devno);
3089 		tf.command = ATA_CMD_CHK_POWER;
3090 	}
3091 
3092 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
3093 	tf.protocol |= ATA_PROT_NODATA;
3094 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
3095 	if (park && (err_mask || tf.lbal != 0xc4)) {
3096 		ata_dev_err(dev, "head unload failed!\n");
3097 		ehc->unloaded_mask &= ~(1 << dev->devno);
3098 	}
3099 }
3100 
3101 static int ata_eh_revalidate_and_attach(struct ata_link *link,
3102 					struct ata_device **r_failed_dev)
3103 {
3104 	struct ata_port *ap = link->ap;
3105 	struct ata_eh_context *ehc = &link->eh_context;
3106 	struct ata_device *dev;
3107 	unsigned int new_mask = 0;
3108 	unsigned long flags;
3109 	int rc = 0;
3110 
3111 	DPRINTK("ENTER\n");
3112 
3113 	/* For PATA drive side cable detection to work, IDENTIFY must
3114 	 * be done backwards such that PDIAG- is released by the slave
3115 	 * device before the master device is identified.
3116 	 */
3117 	ata_for_each_dev(dev, link, ALL_REVERSE) {
3118 		unsigned int action = ata_eh_dev_action(dev);
3119 		unsigned int readid_flags = 0;
3120 
3121 		if (ehc->i.flags & ATA_EHI_DID_RESET)
3122 			readid_flags |= ATA_READID_POSTRESET;
3123 
3124 		if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
3125 			WARN_ON(dev->class == ATA_DEV_PMP);
3126 
3127 			if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
3128 				rc = -EIO;
3129 				goto err;
3130 			}
3131 
3132 			ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE);
3133 			rc = ata_dev_revalidate(dev, ehc->classes[dev->devno],
3134 						readid_flags);
3135 			if (rc)
3136 				goto err;
3137 
3138 			ata_eh_done(link, dev, ATA_EH_REVALIDATE);
3139 
3140 			/* Configuration may have changed, reconfigure
3141 			 * transfer mode.
3142 			 */
3143 			ehc->i.flags |= ATA_EHI_SETMODE;
3144 
3145 			/* schedule the scsi_rescan_device() here */
3146 			schedule_work(&(ap->scsi_rescan_task));
3147 		} else if (dev->class == ATA_DEV_UNKNOWN &&
3148 			   ehc->tries[dev->devno] &&
3149 			   ata_class_enabled(ehc->classes[dev->devno])) {
3150 			/* Temporarily set dev->class, it will be
3151 			 * permanently set once all configurations are
3152 			 * complete.  This is necessary because new
3153 			 * device configuration is done in two
3154 			 * separate loops.
3155 			 */
3156 			dev->class = ehc->classes[dev->devno];
3157 
3158 			if (dev->class == ATA_DEV_PMP)
3159 				rc = sata_pmp_attach(dev);
3160 			else
3161 				rc = ata_dev_read_id(dev, &dev->class,
3162 						     readid_flags, dev->id);
3163 
3164 			/* read_id might have changed class, store and reset */
3165 			ehc->classes[dev->devno] = dev->class;
3166 			dev->class = ATA_DEV_UNKNOWN;
3167 
3168 			switch (rc) {
3169 			case 0:
3170 				/* clear error info accumulated during probe */
3171 				ata_ering_clear(&dev->ering);
3172 				new_mask |= 1 << dev->devno;
3173 				break;
3174 			case -ENOENT:
3175 				/* IDENTIFY was issued to non-existent
3176 				 * device.  No need to reset.  Just
3177 				 * thaw and ignore the device.
3178 				 */
3179 				ata_eh_thaw_port(ap);
3180 				break;
3181 			default:
3182 				goto err;
3183 			}
3184 		}
3185 	}
3186 
3187 	/* PDIAG- should have been released, ask cable type if post-reset */
3188 	if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) {
3189 		if (ap->ops->cable_detect)
3190 			ap->cbl = ap->ops->cable_detect(ap);
3191 		ata_force_cbl(ap);
3192 	}
3193 
3194 	/* Configure new devices forward such that user doesn't see
3195 	 * device detection messages backwards.
3196 	 */
3197 	ata_for_each_dev(dev, link, ALL) {
3198 		if (!(new_mask & (1 << dev->devno)))
3199 			continue;
3200 
3201 		dev->class = ehc->classes[dev->devno];
3202 
3203 		if (dev->class == ATA_DEV_PMP)
3204 			continue;
3205 
3206 		ehc->i.flags |= ATA_EHI_PRINTINFO;
3207 		rc = ata_dev_configure(dev);
3208 		ehc->i.flags &= ~ATA_EHI_PRINTINFO;
3209 		if (rc) {
3210 			dev->class = ATA_DEV_UNKNOWN;
3211 			goto err;
3212 		}
3213 
3214 		spin_lock_irqsave(ap->lock, flags);
3215 		ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
3216 		spin_unlock_irqrestore(ap->lock, flags);
3217 
3218 		/* new device discovered, configure xfermode */
3219 		ehc->i.flags |= ATA_EHI_SETMODE;
3220 	}
3221 
3222 	return 0;
3223 
3224  err:
3225 	*r_failed_dev = dev;
3226 	DPRINTK("EXIT rc=%d\n", rc);
3227 	return rc;
3228 }
3229 
3230 /**
3231  *	ata_set_mode - Program timings and issue SET FEATURES - XFER
3232  *	@link: link on which timings will be programmed
3233  *	@r_failed_dev: out parameter for failed device
3234  *
3235  *	Set ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
3236  *	ata_set_mode() fails, pointer to the failing device is
3237  *	returned in @r_failed_dev.
3238  *
3239  *	LOCKING:
3240  *	PCI/etc. bus probe sem.
3241  *
3242  *	RETURNS:
3243  *	0 on success, negative errno otherwise
3244  */
3245 int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3246 {
3247 	struct ata_port *ap = link->ap;
3248 	struct ata_device *dev;
3249 	int rc;
3250 
3251 	/* if data transfer is verified, clear DUBIOUS_XFER on ering top */
3252 	ata_for_each_dev(dev, link, ENABLED) {
3253 		if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) {
3254 			struct ata_ering_entry *ent;
3255 
3256 			ent = ata_ering_top(&dev->ering);
3257 			if (ent)
3258 				ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER;
3259 		}
3260 	}
3261 
3262 	/* has private set_mode? */
3263 	if (ap->ops->set_mode)
3264 		rc = ap->ops->set_mode(link, r_failed_dev);
3265 	else
3266 		rc = ata_do_set_mode(link, r_failed_dev);
3267 
3268 	/* if transfer mode has changed, set DUBIOUS_XFER on device */
3269 	ata_for_each_dev(dev, link, ENABLED) {
3270 		struct ata_eh_context *ehc = &link->eh_context;
3271 		u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
3272 		u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
3273 
3274 		if (dev->xfer_mode != saved_xfer_mode ||
3275 		    ata_ncq_enabled(dev) != saved_ncq)
3276 			dev->flags |= ATA_DFLAG_DUBIOUS_XFER;
3277 	}
3278 
3279 	return rc;
3280 }
3281 
3282 /**
3283  *	atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
3284  *	@dev: ATAPI device to clear UA for
3285  *
3286  *	Resets and other operations can make an ATAPI device raise
3287  *	UNIT ATTENTION which causes the next operation to fail.  This
3288  *	function clears UA.
3289  *
3290  *	LOCKING:
3291  *	EH context (may sleep).
3292  *
3293  *	RETURNS:
3294  *	0 on success, -errno on failure.
3295  */
3296 static int atapi_eh_clear_ua(struct ata_device *dev)
3297 {
3298 	int i;
3299 
3300 	for (i = 0; i < ATA_EH_UA_TRIES; i++) {
3301 		u8 *sense_buffer = dev->link->ap->sector_buf;
3302 		u8 sense_key = 0;
3303 		unsigned int err_mask;
3304 
3305 		err_mask = atapi_eh_tur(dev, &sense_key);
3306 		if (err_mask != 0 && err_mask != AC_ERR_DEV) {
3307 			ata_dev_warn(dev,
3308 				     "TEST_UNIT_READY failed (err_mask=0x%x)\n",
3309 				     err_mask);
3310 			return -EIO;
3311 		}
3312 
3313 		if (!err_mask || sense_key != UNIT_ATTENTION)
3314 			return 0;
3315 
3316 		err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key);
3317 		if (err_mask) {
3318 			ata_dev_warn(dev, "failed to clear "
3319 				"UNIT ATTENTION (err_mask=0x%x)\n", err_mask);
3320 			return -EIO;
3321 		}
3322 	}
3323 
3324 	ata_dev_warn(dev, "UNIT ATTENTION persists after %d tries\n",
3325 		     ATA_EH_UA_TRIES);
3326 
3327 	return 0;
3328 }
3329 
3330 /**
3331  *	ata_eh_maybe_retry_flush - Retry FLUSH if necessary
3332  *	@dev: ATA device which may need FLUSH retry
3333  *
3334  *	If @dev failed FLUSH, it needs to be reported upper layer
3335  *	immediately as it means that @dev failed to remap and already
3336  *	lost at least a sector and further FLUSH retrials won't make
3337  *	any difference to the lost sector.  However, if FLUSH failed
3338  *	for other reasons, for example transmission error, FLUSH needs
3339  *	to be retried.
3340  *
3341  *	This function determines whether FLUSH failure retry is
3342  *	necessary and performs it if so.
3343  *
3344  *	RETURNS:
3345  *	0 if EH can continue, -errno if EH needs to be repeated.
3346  */
3347 static int ata_eh_maybe_retry_flush(struct ata_device *dev)
3348 {
3349 	struct ata_link *link = dev->link;
3350 	struct ata_port *ap = link->ap;
3351 	struct ata_queued_cmd *qc;
3352 	struct ata_taskfile tf;
3353 	unsigned int err_mask;
3354 	int rc = 0;
3355 
3356 	/* did flush fail for this device? */
3357 	if (!ata_tag_valid(link->active_tag))
3358 		return 0;
3359 
3360 	qc = __ata_qc_from_tag(ap, link->active_tag);
3361 	if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT &&
3362 			       qc->tf.command != ATA_CMD_FLUSH))
3363 		return 0;
3364 
3365 	/* if the device failed it, it should be reported to upper layers */
3366 	if (qc->err_mask & AC_ERR_DEV)
3367 		return 0;
3368 
3369 	/* flush failed for some other reason, give it another shot */
3370 	ata_tf_init(dev, &tf);
3371 
3372 	tf.command = qc->tf.command;
3373 	tf.flags |= ATA_TFLAG_DEVICE;
3374 	tf.protocol = ATA_PROT_NODATA;
3375 
3376 	ata_dev_warn(dev, "retrying FLUSH 0x%x Emask 0x%x\n",
3377 		       tf.command, qc->err_mask);
3378 
3379 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
3380 	if (!err_mask) {
3381 		/*
3382 		 * FLUSH is complete but there's no way to
3383 		 * successfully complete a failed command from EH.
3384 		 * Making sure retry is allowed at least once and
3385 		 * retrying it should do the trick - whatever was in
3386 		 * the cache is already on the platter and this won't
3387 		 * cause infinite loop.
3388 		 */
3389 		qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1);
3390 	} else {
3391 		ata_dev_warn(dev, "FLUSH failed Emask 0x%x\n",
3392 			       err_mask);
3393 		rc = -EIO;
3394 
3395 		/* if device failed it, report it to upper layers */
3396 		if (err_mask & AC_ERR_DEV) {
3397 			qc->err_mask |= AC_ERR_DEV;
3398 			qc->result_tf = tf;
3399 			if (!(ap->pflags & ATA_PFLAG_FROZEN))
3400 				rc = 0;
3401 		}
3402 	}
3403 	return rc;
3404 }
3405 
3406 /**
3407  *	ata_eh_set_lpm - configure SATA interface power management
3408  *	@link: link to configure power management
3409  *	@policy: the link power management policy
3410  *	@r_failed_dev: out parameter for failed device
3411  *
3412  *	Enable SATA Interface power management.  This will enable
3413  *	Device Interface Power Management (DIPM) for min_power
3414  * 	policy, and then call driver specific callbacks for
3415  *	enabling Host Initiated Power management.
3416  *
3417  *	LOCKING:
3418  *	EH context.
3419  *
3420  *	RETURNS:
3421  *	0 on success, -errno on failure.
3422  */
3423 static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3424 			  struct ata_device **r_failed_dev)
3425 {
3426 	struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL;
3427 	struct ata_eh_context *ehc = &link->eh_context;
3428 	struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
3429 	enum ata_lpm_policy old_policy = link->lpm_policy;
3430 	bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM;
3431 	unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
3432 	unsigned int err_mask;
3433 	int rc;
3434 
3435 	/* if the link or host doesn't do LPM, noop */
3436 	if ((link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm))
3437 		return 0;
3438 
3439 	/*
3440 	 * DIPM is enabled only for MIN_POWER as some devices
3441 	 * misbehave when the host NACKs transition to SLUMBER.  Order
3442 	 * device and link configurations such that the host always
3443 	 * allows DIPM requests.
3444 	 */
3445 	ata_for_each_dev(dev, link, ENABLED) {
3446 		bool hipm = ata_id_has_hipm(dev->id);
3447 		bool dipm = ata_id_has_dipm(dev->id) && !no_dipm;
3448 
3449 		/* find the first enabled and LPM enabled devices */
3450 		if (!link_dev)
3451 			link_dev = dev;
3452 
3453 		if (!lpm_dev && (hipm || dipm))
3454 			lpm_dev = dev;
3455 
3456 		hints &= ~ATA_LPM_EMPTY;
3457 		if (!hipm)
3458 			hints &= ~ATA_LPM_HIPM;
3459 
3460 		/* disable DIPM before changing link config */
3461 		if (policy != ATA_LPM_MIN_POWER && dipm) {
3462 			err_mask = ata_dev_set_feature(dev,
3463 					SETFEATURES_SATA_DISABLE, SATA_DIPM);
3464 			if (err_mask && err_mask != AC_ERR_DEV) {
3465 				ata_dev_warn(dev,
3466 					     "failed to disable DIPM, Emask 0x%x\n",
3467 					     err_mask);
3468 				rc = -EIO;
3469 				goto fail;
3470 			}
3471 		}
3472 	}
3473 
3474 	if (ap) {
3475 		rc = ap->ops->set_lpm(link, policy, hints);
3476 		if (!rc && ap->slave_link)
3477 			rc = ap->ops->set_lpm(ap->slave_link, policy, hints);
3478 	} else
3479 		rc = sata_pmp_set_lpm(link, policy, hints);
3480 
3481 	/*
3482 	 * Attribute link config failure to the first (LPM) enabled
3483 	 * device on the link.
3484 	 */
3485 	if (rc) {
3486 		if (rc == -EOPNOTSUPP) {
3487 			link->flags |= ATA_LFLAG_NO_LPM;
3488 			return 0;
3489 		}
3490 		dev = lpm_dev ? lpm_dev : link_dev;
3491 		goto fail;
3492 	}
3493 
3494 	/*
3495 	 * Low level driver acked the transition.  Issue DIPM command
3496 	 * with the new policy set.
3497 	 */
3498 	link->lpm_policy = policy;
3499 	if (ap && ap->slave_link)
3500 		ap->slave_link->lpm_policy = policy;
3501 
3502 	/* host config updated, enable DIPM if transitioning to MIN_POWER */
3503 	ata_for_each_dev(dev, link, ENABLED) {
3504 		if (policy == ATA_LPM_MIN_POWER && !no_dipm &&
3505 		    ata_id_has_dipm(dev->id)) {
3506 			err_mask = ata_dev_set_feature(dev,
3507 					SETFEATURES_SATA_ENABLE, SATA_DIPM);
3508 			if (err_mask && err_mask != AC_ERR_DEV) {
3509 				ata_dev_warn(dev,
3510 					"failed to enable DIPM, Emask 0x%x\n",
3511 					err_mask);
3512 				rc = -EIO;
3513 				goto fail;
3514 			}
3515 		}
3516 	}
3517 
3518 	link->last_lpm_change = jiffies;
3519 	link->flags |= ATA_LFLAG_CHANGED;
3520 
3521 	return 0;
3522 
3523 fail:
3524 	/* restore the old policy */
3525 	link->lpm_policy = old_policy;
3526 	if (ap && ap->slave_link)
3527 		ap->slave_link->lpm_policy = old_policy;
3528 
3529 	/* if no device or only one more chance is left, disable LPM */
3530 	if (!dev || ehc->tries[dev->devno] <= 2) {
3531 		ata_link_warn(link, "disabling LPM on the link\n");
3532 		link->flags |= ATA_LFLAG_NO_LPM;
3533 	}
3534 	if (r_failed_dev)
3535 		*r_failed_dev = dev;
3536 	return rc;
3537 }
3538 
3539 int ata_link_nr_enabled(struct ata_link *link)
3540 {
3541 	struct ata_device *dev;
3542 	int cnt = 0;
3543 
3544 	ata_for_each_dev(dev, link, ENABLED)
3545 		cnt++;
3546 	return cnt;
3547 }
3548 
3549 static int ata_link_nr_vacant(struct ata_link *link)
3550 {
3551 	struct ata_device *dev;
3552 	int cnt = 0;
3553 
3554 	ata_for_each_dev(dev, link, ALL)
3555 		if (dev->class == ATA_DEV_UNKNOWN)
3556 			cnt++;
3557 	return cnt;
3558 }
3559 
3560 static int ata_eh_skip_recovery(struct ata_link *link)
3561 {
3562 	struct ata_port *ap = link->ap;
3563 	struct ata_eh_context *ehc = &link->eh_context;
3564 	struct ata_device *dev;
3565 
3566 	/* skip disabled links */
3567 	if (link->flags & ATA_LFLAG_DISABLED)
3568 		return 1;
3569 
3570 	/* skip if explicitly requested */
3571 	if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
3572 		return 1;
3573 
3574 	/* thaw frozen port and recover failed devices */
3575 	if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
3576 		return 0;
3577 
3578 	/* reset at least once if reset is requested */
3579 	if ((ehc->i.action & ATA_EH_RESET) &&
3580 	    !(ehc->i.flags & ATA_EHI_DID_RESET))
3581 		return 0;
3582 
3583 	/* skip if class codes for all vacant slots are ATA_DEV_NONE */
3584 	ata_for_each_dev(dev, link, ALL) {
3585 		if (dev->class == ATA_DEV_UNKNOWN &&
3586 		    ehc->classes[dev->devno] != ATA_DEV_NONE)
3587 			return 0;
3588 	}
3589 
3590 	return 1;
3591 }
3592 
3593 static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg)
3594 {
3595 	u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL);
3596 	u64 now = get_jiffies_64();
3597 	int *trials = void_arg;
3598 
3599 	if ((ent->eflags & ATA_EFLAG_OLD_ER) ||
3600 	    (ent->timestamp < now - min(now, interval)))
3601 		return -1;
3602 
3603 	(*trials)++;
3604 	return 0;
3605 }
3606 
3607 static int ata_eh_schedule_probe(struct ata_device *dev)
3608 {
3609 	struct ata_eh_context *ehc = &dev->link->eh_context;
3610 	struct ata_link *link = ata_dev_phys_link(dev);
3611 	int trials = 0;
3612 
3613 	if (!(ehc->i.probe_mask & (1 << dev->devno)) ||
3614 	    (ehc->did_probe_mask & (1 << dev->devno)))
3615 		return 0;
3616 
3617 	ata_eh_detach_dev(dev);
3618 	ata_dev_init(dev);
3619 	ehc->did_probe_mask |= (1 << dev->devno);
3620 	ehc->i.action |= ATA_EH_RESET;
3621 	ehc->saved_xfer_mode[dev->devno] = 0;
3622 	ehc->saved_ncq_enabled &= ~(1 << dev->devno);
3623 
3624 	/* the link maybe in a deep sleep, wake it up */
3625 	if (link->lpm_policy > ATA_LPM_MAX_POWER) {
3626 		if (ata_is_host_link(link))
3627 			link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER,
3628 					       ATA_LPM_EMPTY);
3629 		else
3630 			sata_pmp_set_lpm(link, ATA_LPM_MAX_POWER,
3631 					 ATA_LPM_EMPTY);
3632 	}
3633 
3634 	/* Record and count probe trials on the ering.  The specific
3635 	 * error mask used is irrelevant.  Because a successful device
3636 	 * detection clears the ering, this count accumulates only if
3637 	 * there are consecutive failed probes.
3638 	 *
3639 	 * If the count is equal to or higher than ATA_EH_PROBE_TRIALS
3640 	 * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is
3641 	 * forced to 1.5Gbps.
3642 	 *
3643 	 * This is to work around cases where failed link speed
3644 	 * negotiation results in device misdetection leading to
3645 	 * infinite DEVXCHG or PHRDY CHG events.
3646 	 */
3647 	ata_ering_record(&dev->ering, 0, AC_ERR_OTHER);
3648 	ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials);
3649 
3650 	if (trials > ATA_EH_PROBE_TRIALS)
3651 		sata_down_spd_limit(link, 1);
3652 
3653 	return 1;
3654 }
3655 
3656 static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
3657 {
3658 	struct ata_eh_context *ehc = &dev->link->eh_context;
3659 
3660 	/* -EAGAIN from EH routine indicates retry without prejudice.
3661 	 * The requester is responsible for ensuring forward progress.
3662 	 */
3663 	if (err != -EAGAIN)
3664 		ehc->tries[dev->devno]--;
3665 
3666 	switch (err) {
3667 	case -ENODEV:
3668 		/* device missing or wrong IDENTIFY data, schedule probing */
3669 		ehc->i.probe_mask |= (1 << dev->devno);
3670 	case -EINVAL:
3671 		/* give it just one more chance */
3672 		ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
3673 	case -EIO:
3674 		if (ehc->tries[dev->devno] == 1) {
3675 			/* This is the last chance, better to slow
3676 			 * down than lose it.
3677 			 */
3678 			sata_down_spd_limit(ata_dev_phys_link(dev), 0);
3679 			if (dev->pio_mode > XFER_PIO_0)
3680 				ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
3681 		}
3682 	}
3683 
3684 	if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
3685 		/* disable device if it has used up all its chances */
3686 		ata_dev_disable(dev);
3687 
3688 		/* detach if offline */
3689 		if (ata_phys_link_offline(ata_dev_phys_link(dev)))
3690 			ata_eh_detach_dev(dev);
3691 
3692 		/* schedule probe if necessary */
3693 		if (ata_eh_schedule_probe(dev)) {
3694 			ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3695 			memset(ehc->cmd_timeout_idx[dev->devno], 0,
3696 			       sizeof(ehc->cmd_timeout_idx[dev->devno]));
3697 		}
3698 
3699 		return 1;
3700 	} else {
3701 		ehc->i.action |= ATA_EH_RESET;
3702 		return 0;
3703 	}
3704 }
3705 
3706 /**
3707  *	ata_eh_recover - recover host port after error
3708  *	@ap: host port to recover
3709  *	@prereset: prereset method (can be NULL)
3710  *	@softreset: softreset method (can be NULL)
3711  *	@hardreset: hardreset method (can be NULL)
3712  *	@postreset: postreset method (can be NULL)
3713  *	@r_failed_link: out parameter for failed link
3714  *
3715  *	This is the alpha and omega, eum and yang, heart and soul of
3716  *	libata exception handling.  On entry, actions required to
3717  *	recover each link and hotplug requests are recorded in the
3718  *	link's eh_context.  This function executes all the operations
3719  *	with appropriate retrials and fallbacks to resurrect failed
3720  *	devices, detach goners and greet newcomers.
3721  *
3722  *	LOCKING:
3723  *	Kernel thread context (may sleep).
3724  *
3725  *	RETURNS:
3726  *	0 on success, -errno on failure.
3727  */
3728 int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3729 		   ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3730 		   ata_postreset_fn_t postreset,
3731 		   struct ata_link **r_failed_link)
3732 {
3733 	struct ata_link *link;
3734 	struct ata_device *dev;
3735 	int rc, nr_fails;
3736 	unsigned long flags, deadline;
3737 
3738 	DPRINTK("ENTER\n");
3739 
3740 	/* prep for recovery */
3741 	ata_for_each_link(link, ap, EDGE) {
3742 		struct ata_eh_context *ehc = &link->eh_context;
3743 
3744 		/* re-enable link? */
3745 		if (ehc->i.action & ATA_EH_ENABLE_LINK) {
3746 			ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK);
3747 			spin_lock_irqsave(ap->lock, flags);
3748 			link->flags &= ~ATA_LFLAG_DISABLED;
3749 			spin_unlock_irqrestore(ap->lock, flags);
3750 			ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK);
3751 		}
3752 
3753 		ata_for_each_dev(dev, link, ALL) {
3754 			if (link->flags & ATA_LFLAG_NO_RETRY)
3755 				ehc->tries[dev->devno] = 1;
3756 			else
3757 				ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3758 
3759 			/* collect port action mask recorded in dev actions */
3760 			ehc->i.action |= ehc->i.dev_action[dev->devno] &
3761 					 ~ATA_EH_PERDEV_MASK;
3762 			ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK;
3763 
3764 			/* process hotplug request */
3765 			if (dev->flags & ATA_DFLAG_DETACH)
3766 				ata_eh_detach_dev(dev);
3767 
3768 			/* schedule probe if necessary */
3769 			if (!ata_dev_enabled(dev))
3770 				ata_eh_schedule_probe(dev);
3771 		}
3772 	}
3773 
3774  retry:
3775 	rc = 0;
3776 
3777 	/* if UNLOADING, finish immediately */
3778 	if (ap->pflags & ATA_PFLAG_UNLOADING)
3779 		goto out;
3780 
3781 	/* prep for EH */
3782 	ata_for_each_link(link, ap, EDGE) {
3783 		struct ata_eh_context *ehc = &link->eh_context;
3784 
3785 		/* skip EH if possible. */
3786 		if (ata_eh_skip_recovery(link))
3787 			ehc->i.action = 0;
3788 
3789 		ata_for_each_dev(dev, link, ALL)
3790 			ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
3791 	}
3792 
3793 	/* reset */
3794 	ata_for_each_link(link, ap, EDGE) {
3795 		struct ata_eh_context *ehc = &link->eh_context;
3796 
3797 		if (!(ehc->i.action & ATA_EH_RESET))
3798 			continue;
3799 
3800 		rc = ata_eh_reset(link, ata_link_nr_vacant(link),
3801 				  prereset, softreset, hardreset, postreset);
3802 		if (rc) {
3803 			ata_link_err(link, "reset failed, giving up\n");
3804 			goto out;
3805 		}
3806 	}
3807 
3808 	do {
3809 		unsigned long now;
3810 
3811 		/*
3812 		 * clears ATA_EH_PARK in eh_info and resets
3813 		 * ap->park_req_pending
3814 		 */
3815 		ata_eh_pull_park_action(ap);
3816 
3817 		deadline = jiffies;
3818 		ata_for_each_link(link, ap, EDGE) {
3819 			ata_for_each_dev(dev, link, ALL) {
3820 				struct ata_eh_context *ehc = &link->eh_context;
3821 				unsigned long tmp;
3822 
3823 				if (dev->class != ATA_DEV_ATA &&
3824 				    dev->class != ATA_DEV_ZAC)
3825 					continue;
3826 				if (!(ehc->i.dev_action[dev->devno] &
3827 				      ATA_EH_PARK))
3828 					continue;
3829 				tmp = dev->unpark_deadline;
3830 				if (time_before(deadline, tmp))
3831 					deadline = tmp;
3832 				else if (time_before_eq(tmp, jiffies))
3833 					continue;
3834 				if (ehc->unloaded_mask & (1 << dev->devno))
3835 					continue;
3836 
3837 				ata_eh_park_issue_cmd(dev, 1);
3838 			}
3839 		}
3840 
3841 		now = jiffies;
3842 		if (time_before_eq(deadline, now))
3843 			break;
3844 
3845 		ata_eh_release(ap);
3846 		deadline = wait_for_completion_timeout(&ap->park_req_pending,
3847 						       deadline - now);
3848 		ata_eh_acquire(ap);
3849 	} while (deadline);
3850 	ata_for_each_link(link, ap, EDGE) {
3851 		ata_for_each_dev(dev, link, ALL) {
3852 			if (!(link->eh_context.unloaded_mask &
3853 			      (1 << dev->devno)))
3854 				continue;
3855 
3856 			ata_eh_park_issue_cmd(dev, 0);
3857 			ata_eh_done(link, dev, ATA_EH_PARK);
3858 		}
3859 	}
3860 
3861 	/* the rest */
3862 	nr_fails = 0;
3863 	ata_for_each_link(link, ap, PMP_FIRST) {
3864 		struct ata_eh_context *ehc = &link->eh_context;
3865 
3866 		if (sata_pmp_attached(ap) && ata_is_host_link(link))
3867 			goto config_lpm;
3868 
3869 		/* revalidate existing devices and attach new ones */
3870 		rc = ata_eh_revalidate_and_attach(link, &dev);
3871 		if (rc)
3872 			goto rest_fail;
3873 
3874 		/* if PMP got attached, return, pmp EH will take care of it */
3875 		if (link->device->class == ATA_DEV_PMP) {
3876 			ehc->i.action = 0;
3877 			return 0;
3878 		}
3879 
3880 		/* configure transfer mode if necessary */
3881 		if (ehc->i.flags & ATA_EHI_SETMODE) {
3882 			rc = ata_set_mode(link, &dev);
3883 			if (rc)
3884 				goto rest_fail;
3885 			ehc->i.flags &= ~ATA_EHI_SETMODE;
3886 		}
3887 
3888 		/* If reset has been issued, clear UA to avoid
3889 		 * disrupting the current users of the device.
3890 		 */
3891 		if (ehc->i.flags & ATA_EHI_DID_RESET) {
3892 			ata_for_each_dev(dev, link, ALL) {
3893 				if (dev->class != ATA_DEV_ATAPI)
3894 					continue;
3895 				rc = atapi_eh_clear_ua(dev);
3896 				if (rc)
3897 					goto rest_fail;
3898 				if (zpodd_dev_enabled(dev))
3899 					zpodd_post_poweron(dev);
3900 			}
3901 		}
3902 
3903 		/* retry flush if necessary */
3904 		ata_for_each_dev(dev, link, ALL) {
3905 			if (dev->class != ATA_DEV_ATA &&
3906 			    dev->class != ATA_DEV_ZAC)
3907 				continue;
3908 			rc = ata_eh_maybe_retry_flush(dev);
3909 			if (rc)
3910 				goto rest_fail;
3911 		}
3912 
3913 	config_lpm:
3914 		/* configure link power saving */
3915 		if (link->lpm_policy != ap->target_lpm_policy) {
3916 			rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev);
3917 			if (rc)
3918 				goto rest_fail;
3919 		}
3920 
3921 		/* this link is okay now */
3922 		ehc->i.flags = 0;
3923 		continue;
3924 
3925 	rest_fail:
3926 		nr_fails++;
3927 		if (dev)
3928 			ata_eh_handle_dev_fail(dev, rc);
3929 
3930 		if (ap->pflags & ATA_PFLAG_FROZEN) {
3931 			/* PMP reset requires working host port.
3932 			 * Can't retry if it's frozen.
3933 			 */
3934 			if (sata_pmp_attached(ap))
3935 				goto out;
3936 			break;
3937 		}
3938 	}
3939 
3940 	if (nr_fails)
3941 		goto retry;
3942 
3943  out:
3944 	if (rc && r_failed_link)
3945 		*r_failed_link = link;
3946 
3947 	DPRINTK("EXIT, rc=%d\n", rc);
3948 	return rc;
3949 }
3950 
3951 /**
3952  *	ata_eh_finish - finish up EH
3953  *	@ap: host port to finish EH for
3954  *
3955  *	Recovery is complete.  Clean up EH states and retry or finish
3956  *	failed qcs.
3957  *
3958  *	LOCKING:
3959  *	None.
3960  */
3961 void ata_eh_finish(struct ata_port *ap)
3962 {
3963 	int tag;
3964 
3965 	/* retry or finish qcs */
3966 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
3967 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
3968 
3969 		if (!(qc->flags & ATA_QCFLAG_FAILED))
3970 			continue;
3971 
3972 		if (qc->err_mask) {
3973 			/* FIXME: Once EH migration is complete,
3974 			 * generate sense data in this function,
3975 			 * considering both err_mask and tf.
3976 			 */
3977 			if (qc->flags & ATA_QCFLAG_RETRY)
3978 				ata_eh_qc_retry(qc);
3979 			else
3980 				ata_eh_qc_complete(qc);
3981 		} else {
3982 			if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
3983 				ata_eh_qc_complete(qc);
3984 			} else {
3985 				/* feed zero TF to sense generation */
3986 				memset(&qc->result_tf, 0, sizeof(qc->result_tf));
3987 				ata_eh_qc_retry(qc);
3988 			}
3989 		}
3990 	}
3991 
3992 	/* make sure nr_active_links is zero after EH */
3993 	WARN_ON(ap->nr_active_links);
3994 	ap->nr_active_links = 0;
3995 }
3996 
3997 /**
3998  *	ata_do_eh - do standard error handling
3999  *	@ap: host port to handle error for
4000  *
4001  *	@prereset: prereset method (can be NULL)
4002  *	@softreset: softreset method (can be NULL)
4003  *	@hardreset: hardreset method (can be NULL)
4004  *	@postreset: postreset method (can be NULL)
4005  *
4006  *	Perform standard error handling sequence.
4007  *
4008  *	LOCKING:
4009  *	Kernel thread context (may sleep).
4010  */
4011 void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
4012 	       ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
4013 	       ata_postreset_fn_t postreset)
4014 {
4015 	struct ata_device *dev;
4016 	int rc;
4017 
4018 	ata_eh_autopsy(ap);
4019 	ata_eh_report(ap);
4020 
4021 	rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
4022 			    NULL);
4023 	if (rc) {
4024 		ata_for_each_dev(dev, &ap->link, ALL)
4025 			ata_dev_disable(dev);
4026 	}
4027 
4028 	ata_eh_finish(ap);
4029 }
4030 
4031 /**
4032  *	ata_std_error_handler - standard error handler
4033  *	@ap: host port to handle error for
4034  *
4035  *	Standard error handler
4036  *
4037  *	LOCKING:
4038  *	Kernel thread context (may sleep).
4039  */
4040 void ata_std_error_handler(struct ata_port *ap)
4041 {
4042 	struct ata_port_operations *ops = ap->ops;
4043 	ata_reset_fn_t hardreset = ops->hardreset;
4044 
4045 	/* ignore built-in hardreset if SCR access is not available */
4046 	if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link))
4047 		hardreset = NULL;
4048 
4049 	ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
4050 }
4051 
4052 #ifdef CONFIG_PM
4053 /**
4054  *	ata_eh_handle_port_suspend - perform port suspend operation
4055  *	@ap: port to suspend
4056  *
4057  *	Suspend @ap.
4058  *
4059  *	LOCKING:
4060  *	Kernel thread context (may sleep).
4061  */
4062 static void ata_eh_handle_port_suspend(struct ata_port *ap)
4063 {
4064 	unsigned long flags;
4065 	int rc = 0;
4066 	struct ata_device *dev;
4067 
4068 	/* are we suspending? */
4069 	spin_lock_irqsave(ap->lock, flags);
4070 	if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
4071 	    ap->pm_mesg.event & PM_EVENT_RESUME) {
4072 		spin_unlock_irqrestore(ap->lock, flags);
4073 		return;
4074 	}
4075 	spin_unlock_irqrestore(ap->lock, flags);
4076 
4077 	WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
4078 
4079 	/*
4080 	 * If we have a ZPODD attached, check its zero
4081 	 * power ready status before the port is frozen.
4082 	 * Only needed for runtime suspend.
4083 	 */
4084 	if (PMSG_IS_AUTO(ap->pm_mesg)) {
4085 		ata_for_each_dev(dev, &ap->link, ENABLED) {
4086 			if (zpodd_dev_enabled(dev))
4087 				zpodd_on_suspend(dev);
4088 		}
4089 	}
4090 
4091 	/* tell ACPI we're suspending */
4092 	rc = ata_acpi_on_suspend(ap);
4093 	if (rc)
4094 		goto out;
4095 
4096 	/* suspend */
4097 	ata_eh_freeze_port(ap);
4098 
4099 	if (ap->ops->port_suspend)
4100 		rc = ap->ops->port_suspend(ap, ap->pm_mesg);
4101 
4102 	ata_acpi_set_state(ap, ap->pm_mesg);
4103  out:
4104 	/* update the flags */
4105 	spin_lock_irqsave(ap->lock, flags);
4106 
4107 	ap->pflags &= ~ATA_PFLAG_PM_PENDING;
4108 	if (rc == 0)
4109 		ap->pflags |= ATA_PFLAG_SUSPENDED;
4110 	else if (ap->pflags & ATA_PFLAG_FROZEN)
4111 		ata_port_schedule_eh(ap);
4112 
4113 	spin_unlock_irqrestore(ap->lock, flags);
4114 
4115 	return;
4116 }
4117 
4118 /**
4119  *	ata_eh_handle_port_resume - perform port resume operation
4120  *	@ap: port to resume
4121  *
4122  *	Resume @ap.
4123  *
4124  *	LOCKING:
4125  *	Kernel thread context (may sleep).
4126  */
4127 static void ata_eh_handle_port_resume(struct ata_port *ap)
4128 {
4129 	struct ata_link *link;
4130 	struct ata_device *dev;
4131 	unsigned long flags;
4132 	int rc = 0;
4133 
4134 	/* are we resuming? */
4135 	spin_lock_irqsave(ap->lock, flags);
4136 	if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
4137 	    !(ap->pm_mesg.event & PM_EVENT_RESUME)) {
4138 		spin_unlock_irqrestore(ap->lock, flags);
4139 		return;
4140 	}
4141 	spin_unlock_irqrestore(ap->lock, flags);
4142 
4143 	WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED));
4144 
4145 	/*
4146 	 * Error timestamps are in jiffies which doesn't run while
4147 	 * suspended and PHY events during resume isn't too uncommon.
4148 	 * When the two are combined, it can lead to unnecessary speed
4149 	 * downs if the machine is suspended and resumed repeatedly.
4150 	 * Clear error history.
4151 	 */
4152 	ata_for_each_link(link, ap, HOST_FIRST)
4153 		ata_for_each_dev(dev, link, ALL)
4154 			ata_ering_clear(&dev->ering);
4155 
4156 	ata_acpi_set_state(ap, ap->pm_mesg);
4157 
4158 	if (ap->ops->port_resume)
4159 		rc = ap->ops->port_resume(ap);
4160 
4161 	/* tell ACPI that we're resuming */
4162 	ata_acpi_on_resume(ap);
4163 
4164 	/* update the flags */
4165 	spin_lock_irqsave(ap->lock, flags);
4166 	ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
4167 	spin_unlock_irqrestore(ap->lock, flags);
4168 }
4169 #endif /* CONFIG_PM */
4170