xref: /openbmc/linux/drivers/ata/libata-eh.c (revision d2ba09c1)
1 /*
2  *  libata-eh.c - libata error handling
3  *
4  *  Maintained by:  Tejun Heo <tj@kernel.org>
5  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6  *		    on emails.
7  *
8  *  Copyright 2006 Tejun Heo <htejun@gmail.com>
9  *
10  *
11  *  This program is free software; you can redistribute it and/or
12  *  modify it under the terms of the GNU General Public License as
13  *  published by the Free Software Foundation; either version 2, or
14  *  (at your option) any later version.
15  *
16  *  This program is distributed in the hope that it will be useful,
17  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
18  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  *  General Public License for more details.
20  *
21  *  You should have received a copy of the GNU General Public License
22  *  along with this program; see the file COPYING.  If not, write to
23  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
24  *  USA.
25  *
26  *
27  *  libata documentation is available via 'make {ps|pdf}docs',
28  *  as Documentation/driver-api/libata.rst
29  *
30  *  Hardware documentation available from http://www.t13.org/ and
31  *  http://www.sata-io.org/
32  *
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/blkdev.h>
37 #include <linux/export.h>
38 #include <linux/pci.h>
39 #include <scsi/scsi.h>
40 #include <scsi/scsi_host.h>
41 #include <scsi/scsi_eh.h>
42 #include <scsi/scsi_device.h>
43 #include <scsi/scsi_cmnd.h>
44 #include <scsi/scsi_dbg.h>
45 #include "../scsi/scsi_transport_api.h"
46 
47 #include <linux/libata.h>
48 
49 #include <trace/events/libata.h>
50 #include "libata.h"
51 
52 enum {
53 	/* speed down verdicts */
54 	ATA_EH_SPDN_NCQ_OFF		= (1 << 0),
55 	ATA_EH_SPDN_SPEED_DOWN		= (1 << 1),
56 	ATA_EH_SPDN_FALLBACK_TO_PIO	= (1 << 2),
57 	ATA_EH_SPDN_KEEP_ERRORS		= (1 << 3),
58 
59 	/* error flags */
60 	ATA_EFLAG_IS_IO			= (1 << 0),
61 	ATA_EFLAG_DUBIOUS_XFER		= (1 << 1),
62 	ATA_EFLAG_OLD_ER                = (1 << 31),
63 
64 	/* error categories */
65 	ATA_ECAT_NONE			= 0,
66 	ATA_ECAT_ATA_BUS		= 1,
67 	ATA_ECAT_TOUT_HSM		= 2,
68 	ATA_ECAT_UNK_DEV		= 3,
69 	ATA_ECAT_DUBIOUS_NONE		= 4,
70 	ATA_ECAT_DUBIOUS_ATA_BUS	= 5,
71 	ATA_ECAT_DUBIOUS_TOUT_HSM	= 6,
72 	ATA_ECAT_DUBIOUS_UNK_DEV	= 7,
73 	ATA_ECAT_NR			= 8,
74 
75 	ATA_EH_CMD_DFL_TIMEOUT		=  5000,
76 
77 	/* always put at least this amount of time between resets */
78 	ATA_EH_RESET_COOL_DOWN		=  5000,
79 
80 	/* Waiting in ->prereset can never be reliable.  It's
81 	 * sometimes nice to wait there but it can't be depended upon;
82 	 * otherwise, we wouldn't be resetting.  Just give it enough
83 	 * time for most drives to spin up.
84 	 */
85 	ATA_EH_PRERESET_TIMEOUT		= 10000,
86 	ATA_EH_FASTDRAIN_INTERVAL	=  3000,
87 
88 	ATA_EH_UA_TRIES			= 5,
89 
90 	/* probe speed down parameters, see ata_eh_schedule_probe() */
91 	ATA_EH_PROBE_TRIAL_INTERVAL	= 60000,	/* 1 min */
92 	ATA_EH_PROBE_TRIALS		= 2,
93 };
94 
95 /* The following table determines how we sequence resets.  Each entry
96  * represents timeout for that try.  The first try can be soft or
97  * hardreset.  All others are hardreset if available.  In most cases
98  * the first reset w/ 10sec timeout should succeed.  Following entries
99  * are mostly for error handling, hotplug and those outlier devices that
100  * take an exceptionally long time to recover from reset.
101  */
102 static const unsigned long ata_eh_reset_timeouts[] = {
103 	10000,	/* most drives spin up by 10sec */
104 	10000,	/* > 99% working drives spin up before 20sec */
105 	35000,	/* give > 30 secs of idleness for outlier devices */
106 	 5000,	/* and sweet one last chance */
107 	ULONG_MAX, /* > 1 min has elapsed, give up */
108 };
109 
110 static const unsigned long ata_eh_identify_timeouts[] = {
111 	 5000,	/* covers > 99% of successes and not too boring on failures */
112 	10000,  /* combined time till here is enough even for media access */
113 	30000,	/* for true idiots */
114 	ULONG_MAX,
115 };
116 
117 static const unsigned long ata_eh_flush_timeouts[] = {
118 	15000,	/* be generous with flush */
119 	15000,  /* ditto */
120 	30000,	/* and even more generous */
121 	ULONG_MAX,
122 };
123 
124 static const unsigned long ata_eh_other_timeouts[] = {
125 	 5000,	/* same rationale as identify timeout */
126 	10000,	/* ditto */
127 	/* but no merciful 30sec for other commands, it just isn't worth it */
128 	ULONG_MAX,
129 };
130 
131 struct ata_eh_cmd_timeout_ent {
132 	const u8		*commands;
133 	const unsigned long	*timeouts;
134 };
135 
136 /* The following table determines timeouts to use for EH internal
137  * commands.  Each table entry is a command class and matches the
138  * commands the entry applies to and the timeout table to use.
139  *
140  * On the retry after a command timed out, the next timeout value from
141  * the table is used.  If the table doesn't contain further entries,
142  * the last value is used.
143  *
144  * ehc->cmd_timeout_idx keeps track of which timeout to use per
145  * command class, so if SET_FEATURES times out on the first try, the
146  * next try will use the second timeout value only for that class.
147  */
148 #define CMDS(cmds...)	(const u8 []){ cmds, 0 }
149 static const struct ata_eh_cmd_timeout_ent
150 ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
151 	{ .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI),
152 	  .timeouts = ata_eh_identify_timeouts, },
153 	{ .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT),
154 	  .timeouts = ata_eh_other_timeouts, },
155 	{ .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT),
156 	  .timeouts = ata_eh_other_timeouts, },
157 	{ .commands = CMDS(ATA_CMD_SET_FEATURES),
158 	  .timeouts = ata_eh_other_timeouts, },
159 	{ .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS),
160 	  .timeouts = ata_eh_other_timeouts, },
161 	{ .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT),
162 	  .timeouts = ata_eh_flush_timeouts },
163 };
164 #undef CMDS
165 
166 static void __ata_port_freeze(struct ata_port *ap);
167 #ifdef CONFIG_PM
168 static void ata_eh_handle_port_suspend(struct ata_port *ap);
169 static void ata_eh_handle_port_resume(struct ata_port *ap);
170 #else /* CONFIG_PM */
171 static void ata_eh_handle_port_suspend(struct ata_port *ap)
172 { }
173 
174 static void ata_eh_handle_port_resume(struct ata_port *ap)
175 { }
176 #endif /* CONFIG_PM */
177 
178 static __printf(2, 0) void __ata_ehi_pushv_desc(struct ata_eh_info *ehi,
179 				 const char *fmt, va_list args)
180 {
181 	ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
182 				     ATA_EH_DESC_LEN - ehi->desc_len,
183 				     fmt, args);
184 }
185 
186 /**
187  *	__ata_ehi_push_desc - push error description without adding separator
188  *	@ehi: target EHI
189  *	@fmt: printf format string
190  *
191  *	Format string according to @fmt and append it to @ehi->desc.
192  *
193  *	LOCKING:
194  *	spin_lock_irqsave(host lock)
195  */
196 void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
197 {
198 	va_list args;
199 
200 	va_start(args, fmt);
201 	__ata_ehi_pushv_desc(ehi, fmt, args);
202 	va_end(args);
203 }
204 
205 /**
206  *	ata_ehi_push_desc - push error description with separator
207  *	@ehi: target EHI
208  *	@fmt: printf format string
209  *
210  *	Format string according to @fmt and append it to @ehi->desc.
211  *	If @ehi->desc is not empty, ", " is added in-between.
212  *
213  *	LOCKING:
214  *	spin_lock_irqsave(host lock)
215  */
216 void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
217 {
218 	va_list args;
219 
220 	if (ehi->desc_len)
221 		__ata_ehi_push_desc(ehi, ", ");
222 
223 	va_start(args, fmt);
224 	__ata_ehi_pushv_desc(ehi, fmt, args);
225 	va_end(args);
226 }
227 
228 /**
229  *	ata_ehi_clear_desc - clean error description
230  *	@ehi: target EHI
231  *
232  *	Clear @ehi->desc.
233  *
234  *	LOCKING:
235  *	spin_lock_irqsave(host lock)
236  */
237 void ata_ehi_clear_desc(struct ata_eh_info *ehi)
238 {
239 	ehi->desc[0] = '\0';
240 	ehi->desc_len = 0;
241 }
242 
243 /**
244  *	ata_port_desc - append port description
245  *	@ap: target ATA port
246  *	@fmt: printf format string
247  *
248  *	Format string according to @fmt and append it to port
249  *	description.  If port description is not empty, " " is added
250  *	in-between.  This function is to be used while initializing
251  *	ata_host.  The description is printed on host registration.
252  *
253  *	LOCKING:
254  *	None.
255  */
256 void ata_port_desc(struct ata_port *ap, const char *fmt, ...)
257 {
258 	va_list args;
259 
260 	WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING));
261 
262 	if (ap->link.eh_info.desc_len)
263 		__ata_ehi_push_desc(&ap->link.eh_info, " ");
264 
265 	va_start(args, fmt);
266 	__ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args);
267 	va_end(args);
268 }
269 
270 #ifdef CONFIG_PCI
271 
272 /**
273  *	ata_port_pbar_desc - append PCI BAR description
274  *	@ap: target ATA port
275  *	@bar: target PCI BAR
276  *	@offset: offset into PCI BAR
277  *	@name: name of the area
278  *
279  *	If @offset is negative, this function formats a string which
280  *	contains the name, address, size and type of the BAR and
281  *	appends it to the port description.  If @offset is zero or
282  *	positive, only name and offsetted address is appended.
283  *
284  *	LOCKING:
285  *	None.
286  */
287 void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
288 			const char *name)
289 {
290 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
291 	char *type = "";
292 	unsigned long long start, len;
293 
294 	if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
295 		type = "m";
296 	else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
297 		type = "i";
298 
299 	start = (unsigned long long)pci_resource_start(pdev, bar);
300 	len = (unsigned long long)pci_resource_len(pdev, bar);
301 
302 	if (offset < 0)
303 		ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start);
304 	else
305 		ata_port_desc(ap, "%s 0x%llx", name,
306 				start + (unsigned long long)offset);
307 }
308 
309 #endif /* CONFIG_PCI */
310 
311 static int ata_lookup_timeout_table(u8 cmd)
312 {
313 	int i;
314 
315 	for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) {
316 		const u8 *cur;
317 
318 		for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++)
319 			if (*cur == cmd)
320 				return i;
321 	}
322 
323 	return -1;
324 }
325 
326 /**
327  *	ata_internal_cmd_timeout - determine timeout for an internal command
328  *	@dev: target device
329  *	@cmd: internal command to be issued
330  *
331  *	Determine timeout for internal command @cmd for @dev.
332  *
333  *	LOCKING:
334  *	EH context.
335  *
336  *	RETURNS:
337  *	Determined timeout.
338  */
339 unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd)
340 {
341 	struct ata_eh_context *ehc = &dev->link->eh_context;
342 	int ent = ata_lookup_timeout_table(cmd);
343 	int idx;
344 
345 	if (ent < 0)
346 		return ATA_EH_CMD_DFL_TIMEOUT;
347 
348 	idx = ehc->cmd_timeout_idx[dev->devno][ent];
349 	return ata_eh_cmd_timeout_table[ent].timeouts[idx];
350 }
351 
352 /**
353  *	ata_internal_cmd_timed_out - notification for internal command timeout
354  *	@dev: target device
355  *	@cmd: internal command which timed out
356  *
357  *	Notify EH that internal command @cmd for @dev timed out.  This
358  *	function should be called only for commands whose timeouts are
359  *	determined using ata_internal_cmd_timeout().
360  *
361  *	LOCKING:
362  *	EH context.
363  */
364 void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd)
365 {
366 	struct ata_eh_context *ehc = &dev->link->eh_context;
367 	int ent = ata_lookup_timeout_table(cmd);
368 	int idx;
369 
370 	if (ent < 0)
371 		return;
372 
373 	idx = ehc->cmd_timeout_idx[dev->devno][ent];
374 	if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX)
375 		ehc->cmd_timeout_idx[dev->devno][ent]++;
376 }
377 
378 static void ata_ering_record(struct ata_ering *ering, unsigned int eflags,
379 			     unsigned int err_mask)
380 {
381 	struct ata_ering_entry *ent;
382 
383 	WARN_ON(!err_mask);
384 
385 	ering->cursor++;
386 	ering->cursor %= ATA_ERING_SIZE;
387 
388 	ent = &ering->ring[ering->cursor];
389 	ent->eflags = eflags;
390 	ent->err_mask = err_mask;
391 	ent->timestamp = get_jiffies_64();
392 }
393 
394 static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering)
395 {
396 	struct ata_ering_entry *ent = &ering->ring[ering->cursor];
397 
398 	if (ent->err_mask)
399 		return ent;
400 	return NULL;
401 }
402 
403 int ata_ering_map(struct ata_ering *ering,
404 		  int (*map_fn)(struct ata_ering_entry *, void *),
405 		  void *arg)
406 {
407 	int idx, rc = 0;
408 	struct ata_ering_entry *ent;
409 
410 	idx = ering->cursor;
411 	do {
412 		ent = &ering->ring[idx];
413 		if (!ent->err_mask)
414 			break;
415 		rc = map_fn(ent, arg);
416 		if (rc)
417 			break;
418 		idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
419 	} while (idx != ering->cursor);
420 
421 	return rc;
422 }
423 
424 static int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg)
425 {
426 	ent->eflags |= ATA_EFLAG_OLD_ER;
427 	return 0;
428 }
429 
430 static void ata_ering_clear(struct ata_ering *ering)
431 {
432 	ata_ering_map(ering, ata_ering_clear_cb, NULL);
433 }
434 
435 static unsigned int ata_eh_dev_action(struct ata_device *dev)
436 {
437 	struct ata_eh_context *ehc = &dev->link->eh_context;
438 
439 	return ehc->i.action | ehc->i.dev_action[dev->devno];
440 }
441 
442 static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
443 				struct ata_eh_info *ehi, unsigned int action)
444 {
445 	struct ata_device *tdev;
446 
447 	if (!dev) {
448 		ehi->action &= ~action;
449 		ata_for_each_dev(tdev, link, ALL)
450 			ehi->dev_action[tdev->devno] &= ~action;
451 	} else {
452 		/* doesn't make sense for port-wide EH actions */
453 		WARN_ON(!(action & ATA_EH_PERDEV_MASK));
454 
455 		/* break ehi->action into ehi->dev_action */
456 		if (ehi->action & action) {
457 			ata_for_each_dev(tdev, link, ALL)
458 				ehi->dev_action[tdev->devno] |=
459 					ehi->action & action;
460 			ehi->action &= ~action;
461 		}
462 
463 		/* turn off the specified per-dev action */
464 		ehi->dev_action[dev->devno] &= ~action;
465 	}
466 }
467 
468 /**
469  *	ata_eh_acquire - acquire EH ownership
470  *	@ap: ATA port to acquire EH ownership for
471  *
472  *	Acquire EH ownership for @ap.  This is the basic exclusion
473  *	mechanism for ports sharing a host.  Only one port hanging off
474  *	the same host can claim the ownership of EH.
475  *
476  *	LOCKING:
477  *	EH context.
478  */
479 void ata_eh_acquire(struct ata_port *ap)
480 {
481 	mutex_lock(&ap->host->eh_mutex);
482 	WARN_ON_ONCE(ap->host->eh_owner);
483 	ap->host->eh_owner = current;
484 }
485 
486 /**
487  *	ata_eh_release - release EH ownership
488  *	@ap: ATA port to release EH ownership for
489  *
490  *	Release EH ownership for @ap if the caller.  The caller must
491  *	have acquired EH ownership using ata_eh_acquire() previously.
492  *
493  *	LOCKING:
494  *	EH context.
495  */
496 void ata_eh_release(struct ata_port *ap)
497 {
498 	WARN_ON_ONCE(ap->host->eh_owner != current);
499 	ap->host->eh_owner = NULL;
500 	mutex_unlock(&ap->host->eh_mutex);
501 }
502 
503 /**
504  *	ata_scsi_timed_out - SCSI layer time out callback
505  *	@cmd: timed out SCSI command
506  *
507  *	Handles SCSI layer timeout.  We race with normal completion of
508  *	the qc for @cmd.  If the qc is already gone, we lose and let
509  *	the scsi command finish (EH_HANDLED).  Otherwise, the qc has
510  *	timed out and EH should be invoked.  Prevent ata_qc_complete()
511  *	from finishing it by setting EH_SCHEDULED and return
512  *	EH_NOT_HANDLED.
513  *
514  *	TODO: kill this function once old EH is gone.
515  *
516  *	LOCKING:
517  *	Called from timer context
518  *
519  *	RETURNS:
520  *	EH_HANDLED or EH_NOT_HANDLED
521  */
522 enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
523 {
524 	struct Scsi_Host *host = cmd->device->host;
525 	struct ata_port *ap = ata_shost_to_port(host);
526 	unsigned long flags;
527 	struct ata_queued_cmd *qc;
528 	enum blk_eh_timer_return ret;
529 
530 	DPRINTK("ENTER\n");
531 
532 	if (ap->ops->error_handler) {
533 		ret = BLK_EH_NOT_HANDLED;
534 		goto out;
535 	}
536 
537 	ret = BLK_EH_HANDLED;
538 	spin_lock_irqsave(ap->lock, flags);
539 	qc = ata_qc_from_tag(ap, ap->link.active_tag);
540 	if (qc) {
541 		WARN_ON(qc->scsicmd != cmd);
542 		qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
543 		qc->err_mask |= AC_ERR_TIMEOUT;
544 		ret = BLK_EH_NOT_HANDLED;
545 	}
546 	spin_unlock_irqrestore(ap->lock, flags);
547 
548  out:
549 	DPRINTK("EXIT, ret=%d\n", ret);
550 	return ret;
551 }
552 EXPORT_SYMBOL(ata_scsi_timed_out);
553 
554 static void ata_eh_unload(struct ata_port *ap)
555 {
556 	struct ata_link *link;
557 	struct ata_device *dev;
558 	unsigned long flags;
559 
560 	/* Restore SControl IPM and SPD for the next driver and
561 	 * disable attached devices.
562 	 */
563 	ata_for_each_link(link, ap, PMP_FIRST) {
564 		sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0);
565 		ata_for_each_dev(dev, link, ALL)
566 			ata_dev_disable(dev);
567 	}
568 
569 	/* freeze and set UNLOADED */
570 	spin_lock_irqsave(ap->lock, flags);
571 
572 	ata_port_freeze(ap);			/* won't be thawed */
573 	ap->pflags &= ~ATA_PFLAG_EH_PENDING;	/* clear pending from freeze */
574 	ap->pflags |= ATA_PFLAG_UNLOADED;
575 
576 	spin_unlock_irqrestore(ap->lock, flags);
577 }
578 
579 /**
580  *	ata_scsi_error - SCSI layer error handler callback
581  *	@host: SCSI host on which error occurred
582  *
583  *	Handles SCSI-layer-thrown error events.
584  *
585  *	LOCKING:
586  *	Inherited from SCSI layer (none, can sleep)
587  *
588  *	RETURNS:
589  *	Zero.
590  */
591 void ata_scsi_error(struct Scsi_Host *host)
592 {
593 	struct ata_port *ap = ata_shost_to_port(host);
594 	unsigned long flags;
595 	LIST_HEAD(eh_work_q);
596 
597 	DPRINTK("ENTER\n");
598 
599 	spin_lock_irqsave(host->host_lock, flags);
600 	list_splice_init(&host->eh_cmd_q, &eh_work_q);
601 	spin_unlock_irqrestore(host->host_lock, flags);
602 
603 	ata_scsi_cmd_error_handler(host, ap, &eh_work_q);
604 
605 	/* If we timed raced normal completion and there is nothing to
606 	   recover nr_timedout == 0 why exactly are we doing error recovery ? */
607 	ata_scsi_port_error_handler(host, ap);
608 
609 	/* finish or retry handled scmd's and clean up */
610 	WARN_ON(!list_empty(&eh_work_q));
611 
612 	DPRINTK("EXIT\n");
613 }
614 
615 /**
616  * ata_scsi_cmd_error_handler - error callback for a list of commands
617  * @host:	scsi host containing the port
618  * @ap:		ATA port within the host
619  * @eh_work_q:	list of commands to process
620  *
621  * process the given list of commands and return those finished to the
622  * ap->eh_done_q.  This function is the first part of the libata error
623  * handler which processes a given list of failed commands.
624  */
625 void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
626 				struct list_head *eh_work_q)
627 {
628 	int i;
629 	unsigned long flags;
630 
631 	/* make sure sff pio task is not running */
632 	ata_sff_flush_pio_task(ap);
633 
634 	/* synchronize with host lock and sort out timeouts */
635 
636 	/* For new EH, all qcs are finished in one of three ways -
637 	 * normal completion, error completion, and SCSI timeout.
638 	 * Both completions can race against SCSI timeout.  When normal
639 	 * completion wins, the qc never reaches EH.  When error
640 	 * completion wins, the qc has ATA_QCFLAG_FAILED set.
641 	 *
642 	 * When SCSI timeout wins, things are a bit more complex.
643 	 * Normal or error completion can occur after the timeout but
644 	 * before this point.  In such cases, both types of
645 	 * completions are honored.  A scmd is determined to have
646 	 * timed out iff its associated qc is active and not failed.
647 	 */
648 	spin_lock_irqsave(ap->lock, flags);
649 	if (ap->ops->error_handler) {
650 		struct scsi_cmnd *scmd, *tmp;
651 		int nr_timedout = 0;
652 
653 		/* This must occur under the ap->lock as we don't want
654 		   a polled recovery to race the real interrupt handler
655 
656 		   The lost_interrupt handler checks for any completed but
657 		   non-notified command and completes much like an IRQ handler.
658 
659 		   We then fall into the error recovery code which will treat
660 		   this as if normal completion won the race */
661 
662 		if (ap->ops->lost_interrupt)
663 			ap->ops->lost_interrupt(ap);
664 
665 		list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) {
666 			struct ata_queued_cmd *qc;
667 
668 			for (i = 0; i < ATA_MAX_QUEUE; i++) {
669 				qc = __ata_qc_from_tag(ap, i);
670 				if (qc->flags & ATA_QCFLAG_ACTIVE &&
671 				    qc->scsicmd == scmd)
672 					break;
673 			}
674 
675 			if (i < ATA_MAX_QUEUE) {
676 				/* the scmd has an associated qc */
677 				if (!(qc->flags & ATA_QCFLAG_FAILED)) {
678 					/* which hasn't failed yet, timeout */
679 					qc->err_mask |= AC_ERR_TIMEOUT;
680 					qc->flags |= ATA_QCFLAG_FAILED;
681 					nr_timedout++;
682 				}
683 			} else {
684 				/* Normal completion occurred after
685 				 * SCSI timeout but before this point.
686 				 * Successfully complete it.
687 				 */
688 				scmd->retries = scmd->allowed;
689 				scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
690 			}
691 		}
692 
693 		/* If we have timed out qcs.  They belong to EH from
694 		 * this point but the state of the controller is
695 		 * unknown.  Freeze the port to make sure the IRQ
696 		 * handler doesn't diddle with those qcs.  This must
697 		 * be done atomically w.r.t. setting QCFLAG_FAILED.
698 		 */
699 		if (nr_timedout)
700 			__ata_port_freeze(ap);
701 
702 
703 		/* initialize eh_tries */
704 		ap->eh_tries = ATA_EH_MAX_TRIES;
705 	}
706 	spin_unlock_irqrestore(ap->lock, flags);
707 
708 }
709 EXPORT_SYMBOL(ata_scsi_cmd_error_handler);
710 
711 /**
712  * ata_scsi_port_error_handler - recover the port after the commands
713  * @host:	SCSI host containing the port
714  * @ap:		the ATA port
715  *
716  * Handle the recovery of the port @ap after all the commands
717  * have been recovered.
718  */
719 void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
720 {
721 	unsigned long flags;
722 
723 	/* invoke error handler */
724 	if (ap->ops->error_handler) {
725 		struct ata_link *link;
726 
727 		/* acquire EH ownership */
728 		ata_eh_acquire(ap);
729  repeat:
730 		/* kill fast drain timer */
731 		del_timer_sync(&ap->fastdrain_timer);
732 
733 		/* process port resume request */
734 		ata_eh_handle_port_resume(ap);
735 
736 		/* fetch & clear EH info */
737 		spin_lock_irqsave(ap->lock, flags);
738 
739 		ata_for_each_link(link, ap, HOST_FIRST) {
740 			struct ata_eh_context *ehc = &link->eh_context;
741 			struct ata_device *dev;
742 
743 			memset(&link->eh_context, 0, sizeof(link->eh_context));
744 			link->eh_context.i = link->eh_info;
745 			memset(&link->eh_info, 0, sizeof(link->eh_info));
746 
747 			ata_for_each_dev(dev, link, ENABLED) {
748 				int devno = dev->devno;
749 
750 				ehc->saved_xfer_mode[devno] = dev->xfer_mode;
751 				if (ata_ncq_enabled(dev))
752 					ehc->saved_ncq_enabled |= 1 << devno;
753 			}
754 		}
755 
756 		ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
757 		ap->pflags &= ~ATA_PFLAG_EH_PENDING;
758 		ap->excl_link = NULL;	/* don't maintain exclusion over EH */
759 
760 		spin_unlock_irqrestore(ap->lock, flags);
761 
762 		/* invoke EH, skip if unloading or suspended */
763 		if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
764 			ap->ops->error_handler(ap);
765 		else {
766 			/* if unloading, commence suicide */
767 			if ((ap->pflags & ATA_PFLAG_UNLOADING) &&
768 			    !(ap->pflags & ATA_PFLAG_UNLOADED))
769 				ata_eh_unload(ap);
770 			ata_eh_finish(ap);
771 		}
772 
773 		/* process port suspend request */
774 		ata_eh_handle_port_suspend(ap);
775 
776 		/* Exception might have happened after ->error_handler
777 		 * recovered the port but before this point.  Repeat
778 		 * EH in such case.
779 		 */
780 		spin_lock_irqsave(ap->lock, flags);
781 
782 		if (ap->pflags & ATA_PFLAG_EH_PENDING) {
783 			if (--ap->eh_tries) {
784 				spin_unlock_irqrestore(ap->lock, flags);
785 				goto repeat;
786 			}
787 			ata_port_err(ap,
788 				     "EH pending after %d tries, giving up\n",
789 				     ATA_EH_MAX_TRIES);
790 			ap->pflags &= ~ATA_PFLAG_EH_PENDING;
791 		}
792 
793 		/* this run is complete, make sure EH info is clear */
794 		ata_for_each_link(link, ap, HOST_FIRST)
795 			memset(&link->eh_info, 0, sizeof(link->eh_info));
796 
797 		/* end eh (clear host_eh_scheduled) while holding
798 		 * ap->lock such that if exception occurs after this
799 		 * point but before EH completion, SCSI midlayer will
800 		 * re-initiate EH.
801 		 */
802 		ap->ops->end_eh(ap);
803 
804 		spin_unlock_irqrestore(ap->lock, flags);
805 		ata_eh_release(ap);
806 	} else {
807 		WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL);
808 		ap->ops->eng_timeout(ap);
809 	}
810 
811 	scsi_eh_flush_done_q(&ap->eh_done_q);
812 
813 	/* clean up */
814 	spin_lock_irqsave(ap->lock, flags);
815 
816 	if (ap->pflags & ATA_PFLAG_LOADING)
817 		ap->pflags &= ~ATA_PFLAG_LOADING;
818 	else if ((ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) &&
819 		!(ap->flags & ATA_FLAG_SAS_HOST))
820 		schedule_delayed_work(&ap->hotplug_task, 0);
821 
822 	if (ap->pflags & ATA_PFLAG_RECOVERED)
823 		ata_port_info(ap, "EH complete\n");
824 
825 	ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
826 
827 	/* tell wait_eh that we're done */
828 	ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
829 	wake_up_all(&ap->eh_wait_q);
830 
831 	spin_unlock_irqrestore(ap->lock, flags);
832 }
833 EXPORT_SYMBOL_GPL(ata_scsi_port_error_handler);
834 
835 /**
836  *	ata_port_wait_eh - Wait for the currently pending EH to complete
837  *	@ap: Port to wait EH for
838  *
839  *	Wait until the currently pending EH is complete.
840  *
841  *	LOCKING:
842  *	Kernel thread context (may sleep).
843  */
844 void ata_port_wait_eh(struct ata_port *ap)
845 {
846 	unsigned long flags;
847 	DEFINE_WAIT(wait);
848 
849  retry:
850 	spin_lock_irqsave(ap->lock, flags);
851 
852 	while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
853 		prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
854 		spin_unlock_irqrestore(ap->lock, flags);
855 		schedule();
856 		spin_lock_irqsave(ap->lock, flags);
857 	}
858 	finish_wait(&ap->eh_wait_q, &wait);
859 
860 	spin_unlock_irqrestore(ap->lock, flags);
861 
862 	/* make sure SCSI EH is complete */
863 	if (scsi_host_in_recovery(ap->scsi_host)) {
864 		ata_msleep(ap, 10);
865 		goto retry;
866 	}
867 }
868 EXPORT_SYMBOL_GPL(ata_port_wait_eh);
869 
870 static int ata_eh_nr_in_flight(struct ata_port *ap)
871 {
872 	unsigned int tag;
873 	int nr = 0;
874 
875 	/* count only non-internal commands */
876 	for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++)
877 		if (ata_qc_from_tag(ap, tag))
878 			nr++;
879 
880 	return nr;
881 }
882 
883 void ata_eh_fastdrain_timerfn(struct timer_list *t)
884 {
885 	struct ata_port *ap = from_timer(ap, t, fastdrain_timer);
886 	unsigned long flags;
887 	int cnt;
888 
889 	spin_lock_irqsave(ap->lock, flags);
890 
891 	cnt = ata_eh_nr_in_flight(ap);
892 
893 	/* are we done? */
894 	if (!cnt)
895 		goto out_unlock;
896 
897 	if (cnt == ap->fastdrain_cnt) {
898 		unsigned int tag;
899 
900 		/* No progress during the last interval, tag all
901 		 * in-flight qcs as timed out and freeze the port.
902 		 */
903 		for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) {
904 			struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
905 			if (qc)
906 				qc->err_mask |= AC_ERR_TIMEOUT;
907 		}
908 
909 		ata_port_freeze(ap);
910 	} else {
911 		/* some qcs have finished, give it another chance */
912 		ap->fastdrain_cnt = cnt;
913 		ap->fastdrain_timer.expires =
914 			ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
915 		add_timer(&ap->fastdrain_timer);
916 	}
917 
918  out_unlock:
919 	spin_unlock_irqrestore(ap->lock, flags);
920 }
921 
922 /**
923  *	ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
924  *	@ap: target ATA port
925  *	@fastdrain: activate fast drain
926  *
927  *	Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain
928  *	is non-zero and EH wasn't pending before.  Fast drain ensures
929  *	that EH kicks in in timely manner.
930  *
931  *	LOCKING:
932  *	spin_lock_irqsave(host lock)
933  */
934 static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
935 {
936 	int cnt;
937 
938 	/* already scheduled? */
939 	if (ap->pflags & ATA_PFLAG_EH_PENDING)
940 		return;
941 
942 	ap->pflags |= ATA_PFLAG_EH_PENDING;
943 
944 	if (!fastdrain)
945 		return;
946 
947 	/* do we have in-flight qcs? */
948 	cnt = ata_eh_nr_in_flight(ap);
949 	if (!cnt)
950 		return;
951 
952 	/* activate fast drain */
953 	ap->fastdrain_cnt = cnt;
954 	ap->fastdrain_timer.expires =
955 		ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
956 	add_timer(&ap->fastdrain_timer);
957 }
958 
959 /**
960  *	ata_qc_schedule_eh - schedule qc for error handling
961  *	@qc: command to schedule error handling for
962  *
963  *	Schedule error handling for @qc.  EH will kick in as soon as
964  *	other commands are drained.
965  *
966  *	LOCKING:
967  *	spin_lock_irqsave(host lock)
968  */
969 void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
970 {
971 	struct ata_port *ap = qc->ap;
972 	struct request_queue *q = qc->scsicmd->device->request_queue;
973 	unsigned long flags;
974 
975 	WARN_ON(!ap->ops->error_handler);
976 
977 	qc->flags |= ATA_QCFLAG_FAILED;
978 	ata_eh_set_pending(ap, 1);
979 
980 	/* The following will fail if timeout has already expired.
981 	 * ata_scsi_error() takes care of such scmds on EH entry.
982 	 * Note that ATA_QCFLAG_FAILED is unconditionally set after
983 	 * this function completes.
984 	 */
985 	spin_lock_irqsave(q->queue_lock, flags);
986 	blk_abort_request(qc->scsicmd->request);
987 	spin_unlock_irqrestore(q->queue_lock, flags);
988 }
989 
990 /**
991  * ata_std_sched_eh - non-libsas ata_ports issue eh with this common routine
992  * @ap: ATA port to schedule EH for
993  *
994  *	LOCKING: inherited from ata_port_schedule_eh
995  *	spin_lock_irqsave(host lock)
996  */
997 void ata_std_sched_eh(struct ata_port *ap)
998 {
999 	WARN_ON(!ap->ops->error_handler);
1000 
1001 	if (ap->pflags & ATA_PFLAG_INITIALIZING)
1002 		return;
1003 
1004 	ata_eh_set_pending(ap, 1);
1005 	scsi_schedule_eh(ap->scsi_host);
1006 
1007 	DPRINTK("port EH scheduled\n");
1008 }
1009 EXPORT_SYMBOL_GPL(ata_std_sched_eh);
1010 
1011 /**
1012  * ata_std_end_eh - non-libsas ata_ports complete eh with this common routine
1013  * @ap: ATA port to end EH for
1014  *
1015  * In the libata object model there is a 1:1 mapping of ata_port to
1016  * shost, so host fields can be directly manipulated under ap->lock, in
1017  * the libsas case we need to hold a lock at the ha->level to coordinate
1018  * these events.
1019  *
1020  *	LOCKING:
1021  *	spin_lock_irqsave(host lock)
1022  */
1023 void ata_std_end_eh(struct ata_port *ap)
1024 {
1025 	struct Scsi_Host *host = ap->scsi_host;
1026 
1027 	host->host_eh_scheduled = 0;
1028 }
1029 EXPORT_SYMBOL(ata_std_end_eh);
1030 
1031 
1032 /**
1033  *	ata_port_schedule_eh - schedule error handling without a qc
1034  *	@ap: ATA port to schedule EH for
1035  *
1036  *	Schedule error handling for @ap.  EH will kick in as soon as
1037  *	all commands are drained.
1038  *
1039  *	LOCKING:
1040  *	spin_lock_irqsave(host lock)
1041  */
1042 void ata_port_schedule_eh(struct ata_port *ap)
1043 {
1044 	/* see: ata_std_sched_eh, unless you know better */
1045 	ap->ops->sched_eh(ap);
1046 }
1047 
1048 static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
1049 {
1050 	int tag, nr_aborted = 0;
1051 
1052 	WARN_ON(!ap->ops->error_handler);
1053 
1054 	/* we're gonna abort all commands, no need for fast drain */
1055 	ata_eh_set_pending(ap, 0);
1056 
1057 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1058 		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
1059 
1060 		if (qc && (!link || qc->dev->link == link)) {
1061 			qc->flags |= ATA_QCFLAG_FAILED;
1062 			ata_qc_complete(qc);
1063 			nr_aborted++;
1064 		}
1065 	}
1066 
1067 	if (!nr_aborted)
1068 		ata_port_schedule_eh(ap);
1069 
1070 	return nr_aborted;
1071 }
1072 
1073 /**
1074  *	ata_link_abort - abort all qc's on the link
1075  *	@link: ATA link to abort qc's for
1076  *
1077  *	Abort all active qc's active on @link and schedule EH.
1078  *
1079  *	LOCKING:
1080  *	spin_lock_irqsave(host lock)
1081  *
1082  *	RETURNS:
1083  *	Number of aborted qc's.
1084  */
1085 int ata_link_abort(struct ata_link *link)
1086 {
1087 	return ata_do_link_abort(link->ap, link);
1088 }
1089 
1090 /**
1091  *	ata_port_abort - abort all qc's on the port
1092  *	@ap: ATA port to abort qc's for
1093  *
1094  *	Abort all active qc's of @ap and schedule EH.
1095  *
1096  *	LOCKING:
1097  *	spin_lock_irqsave(host_set lock)
1098  *
1099  *	RETURNS:
1100  *	Number of aborted qc's.
1101  */
1102 int ata_port_abort(struct ata_port *ap)
1103 {
1104 	return ata_do_link_abort(ap, NULL);
1105 }
1106 
1107 /**
1108  *	__ata_port_freeze - freeze port
1109  *	@ap: ATA port to freeze
1110  *
1111  *	This function is called when HSM violation or some other
1112  *	condition disrupts normal operation of the port.  Frozen port
1113  *	is not allowed to perform any operation until the port is
1114  *	thawed, which usually follows a successful reset.
1115  *
1116  *	ap->ops->freeze() callback can be used for freezing the port
1117  *	hardware-wise (e.g. mask interrupt and stop DMA engine).  If a
1118  *	port cannot be frozen hardware-wise, the interrupt handler
1119  *	must ack and clear interrupts unconditionally while the port
1120  *	is frozen.
1121  *
1122  *	LOCKING:
1123  *	spin_lock_irqsave(host lock)
1124  */
1125 static void __ata_port_freeze(struct ata_port *ap)
1126 {
1127 	WARN_ON(!ap->ops->error_handler);
1128 
1129 	if (ap->ops->freeze)
1130 		ap->ops->freeze(ap);
1131 
1132 	ap->pflags |= ATA_PFLAG_FROZEN;
1133 
1134 	DPRINTK("ata%u port frozen\n", ap->print_id);
1135 }
1136 
1137 /**
1138  *	ata_port_freeze - abort & freeze port
1139  *	@ap: ATA port to freeze
1140  *
1141  *	Abort and freeze @ap.  The freeze operation must be called
1142  *	first, because some hardware requires special operations
1143  *	before the taskfile registers are accessible.
1144  *
1145  *	LOCKING:
1146  *	spin_lock_irqsave(host lock)
1147  *
1148  *	RETURNS:
1149  *	Number of aborted commands.
1150  */
1151 int ata_port_freeze(struct ata_port *ap)
1152 {
1153 	int nr_aborted;
1154 
1155 	WARN_ON(!ap->ops->error_handler);
1156 
1157 	__ata_port_freeze(ap);
1158 	nr_aborted = ata_port_abort(ap);
1159 
1160 	return nr_aborted;
1161 }
1162 
1163 /**
1164  *	sata_async_notification - SATA async notification handler
1165  *	@ap: ATA port where async notification is received
1166  *
1167  *	Handler to be called when async notification via SDB FIS is
1168  *	received.  This function schedules EH if necessary.
1169  *
1170  *	LOCKING:
1171  *	spin_lock_irqsave(host lock)
1172  *
1173  *	RETURNS:
1174  *	1 if EH is scheduled, 0 otherwise.
1175  */
1176 int sata_async_notification(struct ata_port *ap)
1177 {
1178 	u32 sntf;
1179 	int rc;
1180 
1181 	if (!(ap->flags & ATA_FLAG_AN))
1182 		return 0;
1183 
1184 	rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
1185 	if (rc == 0)
1186 		sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
1187 
1188 	if (!sata_pmp_attached(ap) || rc) {
1189 		/* PMP is not attached or SNTF is not available */
1190 		if (!sata_pmp_attached(ap)) {
1191 			/* PMP is not attached.  Check whether ATAPI
1192 			 * AN is configured.  If so, notify media
1193 			 * change.
1194 			 */
1195 			struct ata_device *dev = ap->link.device;
1196 
1197 			if ((dev->class == ATA_DEV_ATAPI) &&
1198 			    (dev->flags & ATA_DFLAG_AN))
1199 				ata_scsi_media_change_notify(dev);
1200 			return 0;
1201 		} else {
1202 			/* PMP is attached but SNTF is not available.
1203 			 * ATAPI async media change notification is
1204 			 * not used.  The PMP must be reporting PHY
1205 			 * status change, schedule EH.
1206 			 */
1207 			ata_port_schedule_eh(ap);
1208 			return 1;
1209 		}
1210 	} else {
1211 		/* PMP is attached and SNTF is available */
1212 		struct ata_link *link;
1213 
1214 		/* check and notify ATAPI AN */
1215 		ata_for_each_link(link, ap, EDGE) {
1216 			if (!(sntf & (1 << link->pmp)))
1217 				continue;
1218 
1219 			if ((link->device->class == ATA_DEV_ATAPI) &&
1220 			    (link->device->flags & ATA_DFLAG_AN))
1221 				ata_scsi_media_change_notify(link->device);
1222 		}
1223 
1224 		/* If PMP is reporting that PHY status of some
1225 		 * downstream ports has changed, schedule EH.
1226 		 */
1227 		if (sntf & (1 << SATA_PMP_CTRL_PORT)) {
1228 			ata_port_schedule_eh(ap);
1229 			return 1;
1230 		}
1231 
1232 		return 0;
1233 	}
1234 }
1235 
1236 /**
1237  *	ata_eh_freeze_port - EH helper to freeze port
1238  *	@ap: ATA port to freeze
1239  *
1240  *	Freeze @ap.
1241  *
1242  *	LOCKING:
1243  *	None.
1244  */
1245 void ata_eh_freeze_port(struct ata_port *ap)
1246 {
1247 	unsigned long flags;
1248 
1249 	if (!ap->ops->error_handler)
1250 		return;
1251 
1252 	spin_lock_irqsave(ap->lock, flags);
1253 	__ata_port_freeze(ap);
1254 	spin_unlock_irqrestore(ap->lock, flags);
1255 }
1256 
1257 /**
1258  *	ata_port_thaw_port - EH helper to thaw port
1259  *	@ap: ATA port to thaw
1260  *
1261  *	Thaw frozen port @ap.
1262  *
1263  *	LOCKING:
1264  *	None.
1265  */
1266 void ata_eh_thaw_port(struct ata_port *ap)
1267 {
1268 	unsigned long flags;
1269 
1270 	if (!ap->ops->error_handler)
1271 		return;
1272 
1273 	spin_lock_irqsave(ap->lock, flags);
1274 
1275 	ap->pflags &= ~ATA_PFLAG_FROZEN;
1276 
1277 	if (ap->ops->thaw)
1278 		ap->ops->thaw(ap);
1279 
1280 	spin_unlock_irqrestore(ap->lock, flags);
1281 
1282 	DPRINTK("ata%u port thawed\n", ap->print_id);
1283 }
1284 
1285 static void ata_eh_scsidone(struct scsi_cmnd *scmd)
1286 {
1287 	/* nada */
1288 }
1289 
1290 static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
1291 {
1292 	struct ata_port *ap = qc->ap;
1293 	struct scsi_cmnd *scmd = qc->scsicmd;
1294 	unsigned long flags;
1295 
1296 	spin_lock_irqsave(ap->lock, flags);
1297 	qc->scsidone = ata_eh_scsidone;
1298 	__ata_qc_complete(qc);
1299 	WARN_ON(ata_tag_valid(qc->tag));
1300 	spin_unlock_irqrestore(ap->lock, flags);
1301 
1302 	scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
1303 }
1304 
1305 /**
1306  *	ata_eh_qc_complete - Complete an active ATA command from EH
1307  *	@qc: Command to complete
1308  *
1309  *	Indicate to the mid and upper layers that an ATA command has
1310  *	completed.  To be used from EH.
1311  */
1312 void ata_eh_qc_complete(struct ata_queued_cmd *qc)
1313 {
1314 	struct scsi_cmnd *scmd = qc->scsicmd;
1315 	scmd->retries = scmd->allowed;
1316 	__ata_eh_qc_complete(qc);
1317 }
1318 
1319 /**
1320  *	ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
1321  *	@qc: Command to retry
1322  *
1323  *	Indicate to the mid and upper layers that an ATA command
1324  *	should be retried.  To be used from EH.
1325  *
1326  *	SCSI midlayer limits the number of retries to scmd->allowed.
1327  *	scmd->allowed is incremented for commands which get retried
1328  *	due to unrelated failures (qc->err_mask is zero).
1329  */
1330 void ata_eh_qc_retry(struct ata_queued_cmd *qc)
1331 {
1332 	struct scsi_cmnd *scmd = qc->scsicmd;
1333 	if (!qc->err_mask)
1334 		scmd->allowed++;
1335 	__ata_eh_qc_complete(qc);
1336 }
1337 
1338 /**
1339  *	ata_dev_disable - disable ATA device
1340  *	@dev: ATA device to disable
1341  *
1342  *	Disable @dev.
1343  *
1344  *	Locking:
1345  *	EH context.
1346  */
1347 void ata_dev_disable(struct ata_device *dev)
1348 {
1349 	if (!ata_dev_enabled(dev))
1350 		return;
1351 
1352 	if (ata_msg_drv(dev->link->ap))
1353 		ata_dev_warn(dev, "disabled\n");
1354 	ata_acpi_on_disable(dev);
1355 	ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET);
1356 	dev->class++;
1357 
1358 	/* From now till the next successful probe, ering is used to
1359 	 * track probe failures.  Clear accumulated device error info.
1360 	 */
1361 	ata_ering_clear(&dev->ering);
1362 }
1363 
1364 /**
1365  *	ata_eh_detach_dev - detach ATA device
1366  *	@dev: ATA device to detach
1367  *
1368  *	Detach @dev.
1369  *
1370  *	LOCKING:
1371  *	None.
1372  */
1373 void ata_eh_detach_dev(struct ata_device *dev)
1374 {
1375 	struct ata_link *link = dev->link;
1376 	struct ata_port *ap = link->ap;
1377 	struct ata_eh_context *ehc = &link->eh_context;
1378 	unsigned long flags;
1379 
1380 	ata_dev_disable(dev);
1381 
1382 	spin_lock_irqsave(ap->lock, flags);
1383 
1384 	dev->flags &= ~ATA_DFLAG_DETACH;
1385 
1386 	if (ata_scsi_offline_dev(dev)) {
1387 		dev->flags |= ATA_DFLAG_DETACHED;
1388 		ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
1389 	}
1390 
1391 	/* clear per-dev EH info */
1392 	ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK);
1393 	ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK);
1394 	ehc->saved_xfer_mode[dev->devno] = 0;
1395 	ehc->saved_ncq_enabled &= ~(1 << dev->devno);
1396 
1397 	spin_unlock_irqrestore(ap->lock, flags);
1398 }
1399 
1400 /**
1401  *	ata_eh_about_to_do - about to perform eh_action
1402  *	@link: target ATA link
1403  *	@dev: target ATA dev for per-dev action (can be NULL)
1404  *	@action: action about to be performed
1405  *
1406  *	Called just before performing EH actions to clear related bits
1407  *	in @link->eh_info such that eh actions are not unnecessarily
1408  *	repeated.
1409  *
1410  *	LOCKING:
1411  *	None.
1412  */
1413 void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
1414 			unsigned int action)
1415 {
1416 	struct ata_port *ap = link->ap;
1417 	struct ata_eh_info *ehi = &link->eh_info;
1418 	struct ata_eh_context *ehc = &link->eh_context;
1419 	unsigned long flags;
1420 
1421 	spin_lock_irqsave(ap->lock, flags);
1422 
1423 	ata_eh_clear_action(link, dev, ehi, action);
1424 
1425 	/* About to take EH action, set RECOVERED.  Ignore actions on
1426 	 * slave links as master will do them again.
1427 	 */
1428 	if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link)
1429 		ap->pflags |= ATA_PFLAG_RECOVERED;
1430 
1431 	spin_unlock_irqrestore(ap->lock, flags);
1432 }
1433 
1434 /**
1435  *	ata_eh_done - EH action complete
1436  *	@link: ATA link for which EH actions are complete
1437  *	@dev: target ATA dev for per-dev action (can be NULL)
1438  *	@action: action just completed
1439  *
1440  *	Called right after performing EH actions to clear related bits
1441  *	in @link->eh_context.
1442  *
1443  *	LOCKING:
1444  *	None.
1445  */
1446 void ata_eh_done(struct ata_link *link, struct ata_device *dev,
1447 		 unsigned int action)
1448 {
1449 	struct ata_eh_context *ehc = &link->eh_context;
1450 
1451 	ata_eh_clear_action(link, dev, &ehc->i, action);
1452 }
1453 
1454 /**
1455  *	ata_err_string - convert err_mask to descriptive string
1456  *	@err_mask: error mask to convert to string
1457  *
1458  *	Convert @err_mask to descriptive string.  Errors are
1459  *	prioritized according to severity and only the most severe
1460  *	error is reported.
1461  *
1462  *	LOCKING:
1463  *	None.
1464  *
1465  *	RETURNS:
1466  *	Descriptive string for @err_mask
1467  */
1468 static const char *ata_err_string(unsigned int err_mask)
1469 {
1470 	if (err_mask & AC_ERR_HOST_BUS)
1471 		return "host bus error";
1472 	if (err_mask & AC_ERR_ATA_BUS)
1473 		return "ATA bus error";
1474 	if (err_mask & AC_ERR_TIMEOUT)
1475 		return "timeout";
1476 	if (err_mask & AC_ERR_HSM)
1477 		return "HSM violation";
1478 	if (err_mask & AC_ERR_SYSTEM)
1479 		return "internal error";
1480 	if (err_mask & AC_ERR_MEDIA)
1481 		return "media error";
1482 	if (err_mask & AC_ERR_INVALID)
1483 		return "invalid argument";
1484 	if (err_mask & AC_ERR_DEV)
1485 		return "device error";
1486 	return "unknown error";
1487 }
1488 
1489 /**
1490  *	ata_eh_read_log_10h - Read log page 10h for NCQ error details
1491  *	@dev: Device to read log page 10h from
1492  *	@tag: Resulting tag of the failed command
1493  *	@tf: Resulting taskfile registers of the failed command
1494  *
1495  *	Read log page 10h to obtain NCQ error details and clear error
1496  *	condition.
1497  *
1498  *	LOCKING:
1499  *	Kernel thread context (may sleep).
1500  *
1501  *	RETURNS:
1502  *	0 on success, -errno otherwise.
1503  */
1504 static int ata_eh_read_log_10h(struct ata_device *dev,
1505 			       int *tag, struct ata_taskfile *tf)
1506 {
1507 	u8 *buf = dev->link->ap->sector_buf;
1508 	unsigned int err_mask;
1509 	u8 csum;
1510 	int i;
1511 
1512 	err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, 0, buf, 1);
1513 	if (err_mask)
1514 		return -EIO;
1515 
1516 	csum = 0;
1517 	for (i = 0; i < ATA_SECT_SIZE; i++)
1518 		csum += buf[i];
1519 	if (csum)
1520 		ata_dev_warn(dev, "invalid checksum 0x%x on log page 10h\n",
1521 			     csum);
1522 
1523 	if (buf[0] & 0x80)
1524 		return -ENOENT;
1525 
1526 	*tag = buf[0] & 0x1f;
1527 
1528 	tf->command = buf[2];
1529 	tf->feature = buf[3];
1530 	tf->lbal = buf[4];
1531 	tf->lbam = buf[5];
1532 	tf->lbah = buf[6];
1533 	tf->device = buf[7];
1534 	tf->hob_lbal = buf[8];
1535 	tf->hob_lbam = buf[9];
1536 	tf->hob_lbah = buf[10];
1537 	tf->nsect = buf[12];
1538 	tf->hob_nsect = buf[13];
1539 	if (ata_id_has_ncq_autosense(dev->id))
1540 		tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
1541 
1542 	return 0;
1543 }
1544 
1545 /**
1546  *	atapi_eh_tur - perform ATAPI TEST_UNIT_READY
1547  *	@dev: target ATAPI device
1548  *	@r_sense_key: out parameter for sense_key
1549  *
1550  *	Perform ATAPI TEST_UNIT_READY.
1551  *
1552  *	LOCKING:
1553  *	EH context (may sleep).
1554  *
1555  *	RETURNS:
1556  *	0 on success, AC_ERR_* mask on failure.
1557  */
1558 unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
1559 {
1560 	u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 };
1561 	struct ata_taskfile tf;
1562 	unsigned int err_mask;
1563 
1564 	ata_tf_init(dev, &tf);
1565 
1566 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1567 	tf.command = ATA_CMD_PACKET;
1568 	tf.protocol = ATAPI_PROT_NODATA;
1569 
1570 	err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
1571 	if (err_mask == AC_ERR_DEV)
1572 		*r_sense_key = tf.feature >> 4;
1573 	return err_mask;
1574 }
1575 
1576 /**
1577  *	ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
1578  *	@qc: qc to perform REQUEST_SENSE_SENSE_DATA_EXT to
1579  *	@cmd: scsi command for which the sense code should be set
1580  *
1581  *	Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
1582  *	SENSE.  This function is an EH helper.
1583  *
1584  *	LOCKING:
1585  *	Kernel thread context (may sleep).
1586  */
1587 static void ata_eh_request_sense(struct ata_queued_cmd *qc,
1588 				 struct scsi_cmnd *cmd)
1589 {
1590 	struct ata_device *dev = qc->dev;
1591 	struct ata_taskfile tf;
1592 	unsigned int err_mask;
1593 
1594 	if (qc->ap->pflags & ATA_PFLAG_FROZEN) {
1595 		ata_dev_warn(dev, "sense data available but port frozen\n");
1596 		return;
1597 	}
1598 
1599 	if (!cmd || qc->flags & ATA_QCFLAG_SENSE_VALID)
1600 		return;
1601 
1602 	if (!ata_id_sense_reporting_enabled(dev->id)) {
1603 		ata_dev_warn(qc->dev, "sense data reporting disabled\n");
1604 		return;
1605 	}
1606 
1607 	DPRINTK("ATA request sense\n");
1608 
1609 	ata_tf_init(dev, &tf);
1610 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1611 	tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1612 	tf.command = ATA_CMD_REQ_SENSE_DATA;
1613 	tf.protocol = ATA_PROT_NODATA;
1614 
1615 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1616 	/* Ignore err_mask; ATA_ERR might be set */
1617 	if (tf.command & ATA_SENSE) {
1618 		ata_scsi_set_sense(dev, cmd, tf.lbah, tf.lbam, tf.lbal);
1619 		qc->flags |= ATA_QCFLAG_SENSE_VALID;
1620 	} else {
1621 		ata_dev_warn(dev, "request sense failed stat %02x emask %x\n",
1622 			     tf.command, err_mask);
1623 	}
1624 }
1625 
1626 /**
1627  *	atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1628  *	@dev: device to perform REQUEST_SENSE to
1629  *	@sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
1630  *	@dfl_sense_key: default sense key to use
1631  *
1632  *	Perform ATAPI REQUEST_SENSE after the device reported CHECK
1633  *	SENSE.  This function is EH helper.
1634  *
1635  *	LOCKING:
1636  *	Kernel thread context (may sleep).
1637  *
1638  *	RETURNS:
1639  *	0 on success, AC_ERR_* mask on failure
1640  */
1641 unsigned int atapi_eh_request_sense(struct ata_device *dev,
1642 					   u8 *sense_buf, u8 dfl_sense_key)
1643 {
1644 	u8 cdb[ATAPI_CDB_LEN] =
1645 		{ REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 };
1646 	struct ata_port *ap = dev->link->ap;
1647 	struct ata_taskfile tf;
1648 
1649 	DPRINTK("ATAPI request sense\n");
1650 
1651 	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
1652 
1653 	/* initialize sense_buf with the error register,
1654 	 * for the case where they are -not- overwritten
1655 	 */
1656 	sense_buf[0] = 0x70;
1657 	sense_buf[2] = dfl_sense_key;
1658 
1659 	/* some devices time out if garbage left in tf */
1660 	ata_tf_init(dev, &tf);
1661 
1662 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1663 	tf.command = ATA_CMD_PACKET;
1664 
1665 	/* is it pointless to prefer PIO for "safety reasons"? */
1666 	if (ap->flags & ATA_FLAG_PIO_DMA) {
1667 		tf.protocol = ATAPI_PROT_DMA;
1668 		tf.feature |= ATAPI_PKT_DMA;
1669 	} else {
1670 		tf.protocol = ATAPI_PROT_PIO;
1671 		tf.lbam = SCSI_SENSE_BUFFERSIZE;
1672 		tf.lbah = 0;
1673 	}
1674 
1675 	return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
1676 				 sense_buf, SCSI_SENSE_BUFFERSIZE, 0);
1677 }
1678 
1679 /**
1680  *	ata_eh_analyze_serror - analyze SError for a failed port
1681  *	@link: ATA link to analyze SError for
1682  *
1683  *	Analyze SError if available and further determine cause of
1684  *	failure.
1685  *
1686  *	LOCKING:
1687  *	None.
1688  */
1689 static void ata_eh_analyze_serror(struct ata_link *link)
1690 {
1691 	struct ata_eh_context *ehc = &link->eh_context;
1692 	u32 serror = ehc->i.serror;
1693 	unsigned int err_mask = 0, action = 0;
1694 	u32 hotplug_mask;
1695 
1696 	if (serror & (SERR_PERSISTENT | SERR_DATA)) {
1697 		err_mask |= AC_ERR_ATA_BUS;
1698 		action |= ATA_EH_RESET;
1699 	}
1700 	if (serror & SERR_PROTOCOL) {
1701 		err_mask |= AC_ERR_HSM;
1702 		action |= ATA_EH_RESET;
1703 	}
1704 	if (serror & SERR_INTERNAL) {
1705 		err_mask |= AC_ERR_SYSTEM;
1706 		action |= ATA_EH_RESET;
1707 	}
1708 
1709 	/* Determine whether a hotplug event has occurred.  Both
1710 	 * SError.N/X are considered hotplug events for enabled or
1711 	 * host links.  For disabled PMP links, only N bit is
1712 	 * considered as X bit is left at 1 for link plugging.
1713 	 */
1714 	if (link->lpm_policy > ATA_LPM_MAX_POWER)
1715 		hotplug_mask = 0;	/* hotplug doesn't work w/ LPM */
1716 	else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
1717 		hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG;
1718 	else
1719 		hotplug_mask = SERR_PHYRDY_CHG;
1720 
1721 	if (serror & hotplug_mask)
1722 		ata_ehi_hotplugged(&ehc->i);
1723 
1724 	ehc->i.err_mask |= err_mask;
1725 	ehc->i.action |= action;
1726 }
1727 
1728 /**
1729  *	ata_eh_analyze_ncq_error - analyze NCQ error
1730  *	@link: ATA link to analyze NCQ error for
1731  *
1732  *	Read log page 10h, determine the offending qc and acquire
1733  *	error status TF.  For NCQ device errors, all LLDDs have to do
1734  *	is setting AC_ERR_DEV in ehi->err_mask.  This function takes
1735  *	care of the rest.
1736  *
1737  *	LOCKING:
1738  *	Kernel thread context (may sleep).
1739  */
1740 void ata_eh_analyze_ncq_error(struct ata_link *link)
1741 {
1742 	struct ata_port *ap = link->ap;
1743 	struct ata_eh_context *ehc = &link->eh_context;
1744 	struct ata_device *dev = link->device;
1745 	struct ata_queued_cmd *qc;
1746 	struct ata_taskfile tf;
1747 	int tag, rc;
1748 
1749 	/* if frozen, we can't do much */
1750 	if (ap->pflags & ATA_PFLAG_FROZEN)
1751 		return;
1752 
1753 	/* is it NCQ device error? */
1754 	if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
1755 		return;
1756 
1757 	/* has LLDD analyzed already? */
1758 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1759 		qc = __ata_qc_from_tag(ap, tag);
1760 
1761 		if (!(qc->flags & ATA_QCFLAG_FAILED))
1762 			continue;
1763 
1764 		if (qc->err_mask)
1765 			return;
1766 	}
1767 
1768 	/* okay, this error is ours */
1769 	memset(&tf, 0, sizeof(tf));
1770 	rc = ata_eh_read_log_10h(dev, &tag, &tf);
1771 	if (rc) {
1772 		ata_link_err(link, "failed to read log page 10h (errno=%d)\n",
1773 			     rc);
1774 		return;
1775 	}
1776 
1777 	if (!(link->sactive & (1 << tag))) {
1778 		ata_link_err(link, "log page 10h reported inactive tag %d\n",
1779 			     tag);
1780 		return;
1781 	}
1782 
1783 	/* we've got the perpetrator, condemn it */
1784 	qc = __ata_qc_from_tag(ap, tag);
1785 	memcpy(&qc->result_tf, &tf, sizeof(tf));
1786 	qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1787 	qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
1788 	if ((qc->result_tf.command & ATA_SENSE) || qc->result_tf.auxiliary) {
1789 		char sense_key, asc, ascq;
1790 
1791 		sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
1792 		asc = (qc->result_tf.auxiliary >> 8) & 0xff;
1793 		ascq = qc->result_tf.auxiliary & 0xff;
1794 		ata_scsi_set_sense(dev, qc->scsicmd, sense_key, asc, ascq);
1795 		ata_scsi_set_sense_information(dev, qc->scsicmd,
1796 					       &qc->result_tf);
1797 		qc->flags |= ATA_QCFLAG_SENSE_VALID;
1798 	}
1799 
1800 	ehc->i.err_mask &= ~AC_ERR_DEV;
1801 }
1802 
1803 /**
1804  *	ata_eh_analyze_tf - analyze taskfile of a failed qc
1805  *	@qc: qc to analyze
1806  *	@tf: Taskfile registers to analyze
1807  *
1808  *	Analyze taskfile of @qc and further determine cause of
1809  *	failure.  This function also requests ATAPI sense data if
1810  *	available.
1811  *
1812  *	LOCKING:
1813  *	Kernel thread context (may sleep).
1814  *
1815  *	RETURNS:
1816  *	Determined recovery action
1817  */
1818 static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1819 				      const struct ata_taskfile *tf)
1820 {
1821 	unsigned int tmp, action = 0;
1822 	u8 stat = tf->command, err = tf->feature;
1823 
1824 	if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1825 		qc->err_mask |= AC_ERR_HSM;
1826 		return ATA_EH_RESET;
1827 	}
1828 
1829 	if (stat & (ATA_ERR | ATA_DF)) {
1830 		qc->err_mask |= AC_ERR_DEV;
1831 		/*
1832 		 * Sense data reporting does not work if the
1833 		 * device fault bit is set.
1834 		 */
1835 		if (stat & ATA_DF)
1836 			stat &= ~ATA_SENSE;
1837 	} else {
1838 		return 0;
1839 	}
1840 
1841 	switch (qc->dev->class) {
1842 	case ATA_DEV_ATA:
1843 	case ATA_DEV_ZAC:
1844 		if (stat & ATA_SENSE)
1845 			ata_eh_request_sense(qc, qc->scsicmd);
1846 		if (err & ATA_ICRC)
1847 			qc->err_mask |= AC_ERR_ATA_BUS;
1848 		if (err & (ATA_UNC | ATA_AMNF))
1849 			qc->err_mask |= AC_ERR_MEDIA;
1850 		if (err & ATA_IDNF)
1851 			qc->err_mask |= AC_ERR_INVALID;
1852 		break;
1853 
1854 	case ATA_DEV_ATAPI:
1855 		if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
1856 			tmp = atapi_eh_request_sense(qc->dev,
1857 						qc->scsicmd->sense_buffer,
1858 						qc->result_tf.feature >> 4);
1859 			if (!tmp)
1860 				qc->flags |= ATA_QCFLAG_SENSE_VALID;
1861 			else
1862 				qc->err_mask |= tmp;
1863 		}
1864 	}
1865 
1866 	if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
1867 		int ret = scsi_check_sense(qc->scsicmd);
1868 		/*
1869 		 * SUCCESS here means that the sense code could
1870 		 * evaluated and should be passed to the upper layers
1871 		 * for correct evaluation.
1872 		 * FAILED means the sense code could not interpreted
1873 		 * and the device would need to be reset.
1874 		 * NEEDS_RETRY and ADD_TO_MLQUEUE means that the
1875 		 * command would need to be retried.
1876 		 */
1877 		if (ret == NEEDS_RETRY || ret == ADD_TO_MLQUEUE) {
1878 			qc->flags |= ATA_QCFLAG_RETRY;
1879 			qc->err_mask |= AC_ERR_OTHER;
1880 		} else if (ret != SUCCESS) {
1881 			qc->err_mask |= AC_ERR_HSM;
1882 		}
1883 	}
1884 	if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
1885 		action |= ATA_EH_RESET;
1886 
1887 	return action;
1888 }
1889 
1890 static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask,
1891 				   int *xfer_ok)
1892 {
1893 	int base = 0;
1894 
1895 	if (!(eflags & ATA_EFLAG_DUBIOUS_XFER))
1896 		*xfer_ok = 1;
1897 
1898 	if (!*xfer_ok)
1899 		base = ATA_ECAT_DUBIOUS_NONE;
1900 
1901 	if (err_mask & AC_ERR_ATA_BUS)
1902 		return base + ATA_ECAT_ATA_BUS;
1903 
1904 	if (err_mask & AC_ERR_TIMEOUT)
1905 		return base + ATA_ECAT_TOUT_HSM;
1906 
1907 	if (eflags & ATA_EFLAG_IS_IO) {
1908 		if (err_mask & AC_ERR_HSM)
1909 			return base + ATA_ECAT_TOUT_HSM;
1910 		if ((err_mask &
1911 		     (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
1912 			return base + ATA_ECAT_UNK_DEV;
1913 	}
1914 
1915 	return 0;
1916 }
1917 
1918 struct speed_down_verdict_arg {
1919 	u64 since;
1920 	int xfer_ok;
1921 	int nr_errors[ATA_ECAT_NR];
1922 };
1923 
1924 static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
1925 {
1926 	struct speed_down_verdict_arg *arg = void_arg;
1927 	int cat;
1928 
1929 	if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since))
1930 		return -1;
1931 
1932 	cat = ata_eh_categorize_error(ent->eflags, ent->err_mask,
1933 				      &arg->xfer_ok);
1934 	arg->nr_errors[cat]++;
1935 
1936 	return 0;
1937 }
1938 
1939 /**
1940  *	ata_eh_speed_down_verdict - Determine speed down verdict
1941  *	@dev: Device of interest
1942  *
1943  *	This function examines error ring of @dev and determines
1944  *	whether NCQ needs to be turned off, transfer speed should be
1945  *	stepped down, or falling back to PIO is necessary.
1946  *
1947  *	ECAT_ATA_BUS	: ATA_BUS error for any command
1948  *
1949  *	ECAT_TOUT_HSM	: TIMEOUT for any command or HSM violation for
1950  *			  IO commands
1951  *
1952  *	ECAT_UNK_DEV	: Unknown DEV error for IO commands
1953  *
1954  *	ECAT_DUBIOUS_*	: Identical to above three but occurred while
1955  *			  data transfer hasn't been verified.
1956  *
1957  *	Verdicts are
1958  *
1959  *	NCQ_OFF		: Turn off NCQ.
1960  *
1961  *	SPEED_DOWN	: Speed down transfer speed but don't fall back
1962  *			  to PIO.
1963  *
1964  *	FALLBACK_TO_PIO	: Fall back to PIO.
1965  *
1966  *	Even if multiple verdicts are returned, only one action is
1967  *	taken per error.  An action triggered by non-DUBIOUS errors
1968  *	clears ering, while one triggered by DUBIOUS_* errors doesn't.
1969  *	This is to expedite speed down decisions right after device is
1970  *	initially configured.
1971  *
1972  *	The following are speed down rules.  #1 and #2 deal with
1973  *	DUBIOUS errors.
1974  *
1975  *	1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
1976  *	   occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO.
1977  *
1978  *	2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
1979  *	   occurred during last 5 mins, NCQ_OFF.
1980  *
1981  *	3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
1982  *	   occurred during last 5 mins, FALLBACK_TO_PIO
1983  *
1984  *	4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
1985  *	   during last 10 mins, NCQ_OFF.
1986  *
1987  *	5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
1988  *	   UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
1989  *
1990  *	LOCKING:
1991  *	Inherited from caller.
1992  *
1993  *	RETURNS:
1994  *	OR of ATA_EH_SPDN_* flags.
1995  */
1996 static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
1997 {
1998 	const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ;
1999 	u64 j64 = get_jiffies_64();
2000 	struct speed_down_verdict_arg arg;
2001 	unsigned int verdict = 0;
2002 
2003 	/* scan past 5 mins of error history */
2004 	memset(&arg, 0, sizeof(arg));
2005 	arg.since = j64 - min(j64, j5mins);
2006 	ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
2007 
2008 	if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] +
2009 	    arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1)
2010 		verdict |= ATA_EH_SPDN_SPEED_DOWN |
2011 			ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS;
2012 
2013 	if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] +
2014 	    arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1)
2015 		verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS;
2016 
2017 	if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
2018 	    arg.nr_errors[ATA_ECAT_TOUT_HSM] +
2019 	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
2020 		verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
2021 
2022 	/* scan past 10 mins of error history */
2023 	memset(&arg, 0, sizeof(arg));
2024 	arg.since = j64 - min(j64, j10mins);
2025 	ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
2026 
2027 	if (arg.nr_errors[ATA_ECAT_TOUT_HSM] +
2028 	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 3)
2029 		verdict |= ATA_EH_SPDN_NCQ_OFF;
2030 
2031 	if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
2032 	    arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 ||
2033 	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
2034 		verdict |= ATA_EH_SPDN_SPEED_DOWN;
2035 
2036 	return verdict;
2037 }
2038 
2039 /**
2040  *	ata_eh_speed_down - record error and speed down if necessary
2041  *	@dev: Failed device
2042  *	@eflags: mask of ATA_EFLAG_* flags
2043  *	@err_mask: err_mask of the error
2044  *
2045  *	Record error and examine error history to determine whether
2046  *	adjusting transmission speed is necessary.  It also sets
2047  *	transmission limits appropriately if such adjustment is
2048  *	necessary.
2049  *
2050  *	LOCKING:
2051  *	Kernel thread context (may sleep).
2052  *
2053  *	RETURNS:
2054  *	Determined recovery action.
2055  */
2056 static unsigned int ata_eh_speed_down(struct ata_device *dev,
2057 				unsigned int eflags, unsigned int err_mask)
2058 {
2059 	struct ata_link *link = ata_dev_phys_link(dev);
2060 	int xfer_ok = 0;
2061 	unsigned int verdict;
2062 	unsigned int action = 0;
2063 
2064 	/* don't bother if Cat-0 error */
2065 	if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0)
2066 		return 0;
2067 
2068 	/* record error and determine whether speed down is necessary */
2069 	ata_ering_record(&dev->ering, eflags, err_mask);
2070 	verdict = ata_eh_speed_down_verdict(dev);
2071 
2072 	/* turn off NCQ? */
2073 	if ((verdict & ATA_EH_SPDN_NCQ_OFF) &&
2074 	    (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ |
2075 			   ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) {
2076 		dev->flags |= ATA_DFLAG_NCQ_OFF;
2077 		ata_dev_warn(dev, "NCQ disabled due to excessive errors\n");
2078 		goto done;
2079 	}
2080 
2081 	/* speed down? */
2082 	if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
2083 		/* speed down SATA link speed if possible */
2084 		if (sata_down_spd_limit(link, 0) == 0) {
2085 			action |= ATA_EH_RESET;
2086 			goto done;
2087 		}
2088 
2089 		/* lower transfer mode */
2090 		if (dev->spdn_cnt < 2) {
2091 			static const int dma_dnxfer_sel[] =
2092 				{ ATA_DNXFER_DMA, ATA_DNXFER_40C };
2093 			static const int pio_dnxfer_sel[] =
2094 				{ ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 };
2095 			int sel;
2096 
2097 			if (dev->xfer_shift != ATA_SHIFT_PIO)
2098 				sel = dma_dnxfer_sel[dev->spdn_cnt];
2099 			else
2100 				sel = pio_dnxfer_sel[dev->spdn_cnt];
2101 
2102 			dev->spdn_cnt++;
2103 
2104 			if (ata_down_xfermask_limit(dev, sel) == 0) {
2105 				action |= ATA_EH_RESET;
2106 				goto done;
2107 			}
2108 		}
2109 	}
2110 
2111 	/* Fall back to PIO?  Slowing down to PIO is meaningless for
2112 	 * SATA ATA devices.  Consider it only for PATA and SATAPI.
2113 	 */
2114 	if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
2115 	    (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) &&
2116 	    (dev->xfer_shift != ATA_SHIFT_PIO)) {
2117 		if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
2118 			dev->spdn_cnt = 0;
2119 			action |= ATA_EH_RESET;
2120 			goto done;
2121 		}
2122 	}
2123 
2124 	return 0;
2125  done:
2126 	/* device has been slowed down, blow error history */
2127 	if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS))
2128 		ata_ering_clear(&dev->ering);
2129 	return action;
2130 }
2131 
2132 /**
2133  *	ata_eh_worth_retry - analyze error and decide whether to retry
2134  *	@qc: qc to possibly retry
2135  *
2136  *	Look at the cause of the error and decide if a retry
2137  * 	might be useful or not.  We don't want to retry media errors
2138  *	because the drive itself has probably already taken 10-30 seconds
2139  *	doing its own internal retries before reporting the failure.
2140  */
2141 static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc)
2142 {
2143 	if (qc->err_mask & AC_ERR_MEDIA)
2144 		return 0;	/* don't retry media errors */
2145 	if (qc->flags & ATA_QCFLAG_IO)
2146 		return 1;	/* otherwise retry anything from fs stack */
2147 	if (qc->err_mask & AC_ERR_INVALID)
2148 		return 0;	/* don't retry these */
2149 	return qc->err_mask != AC_ERR_DEV;  /* retry if not dev error */
2150 }
2151 
2152 /**
2153  *	ata_eh_link_autopsy - analyze error and determine recovery action
2154  *	@link: host link to perform autopsy on
2155  *
2156  *	Analyze why @link failed and determine which recovery actions
2157  *	are needed.  This function also sets more detailed AC_ERR_*
2158  *	values and fills sense data for ATAPI CHECK SENSE.
2159  *
2160  *	LOCKING:
2161  *	Kernel thread context (may sleep).
2162  */
2163 static void ata_eh_link_autopsy(struct ata_link *link)
2164 {
2165 	struct ata_port *ap = link->ap;
2166 	struct ata_eh_context *ehc = &link->eh_context;
2167 	struct ata_device *dev;
2168 	unsigned int all_err_mask = 0, eflags = 0;
2169 	int tag;
2170 	u32 serror;
2171 	int rc;
2172 
2173 	DPRINTK("ENTER\n");
2174 
2175 	if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
2176 		return;
2177 
2178 	/* obtain and analyze SError */
2179 	rc = sata_scr_read(link, SCR_ERROR, &serror);
2180 	if (rc == 0) {
2181 		ehc->i.serror |= serror;
2182 		ata_eh_analyze_serror(link);
2183 	} else if (rc != -EOPNOTSUPP) {
2184 		/* SError read failed, force reset and probing */
2185 		ehc->i.probe_mask |= ATA_ALL_DEVICES;
2186 		ehc->i.action |= ATA_EH_RESET;
2187 		ehc->i.err_mask |= AC_ERR_OTHER;
2188 	}
2189 
2190 	/* analyze NCQ failure */
2191 	ata_eh_analyze_ncq_error(link);
2192 
2193 	/* any real error trumps AC_ERR_OTHER */
2194 	if (ehc->i.err_mask & ~AC_ERR_OTHER)
2195 		ehc->i.err_mask &= ~AC_ERR_OTHER;
2196 
2197 	all_err_mask |= ehc->i.err_mask;
2198 
2199 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2200 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2201 
2202 		if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2203 		    ata_dev_phys_link(qc->dev) != link)
2204 			continue;
2205 
2206 		/* inherit upper level err_mask */
2207 		qc->err_mask |= ehc->i.err_mask;
2208 
2209 		/* analyze TF */
2210 		ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
2211 
2212 		/* DEV errors are probably spurious in case of ATA_BUS error */
2213 		if (qc->err_mask & AC_ERR_ATA_BUS)
2214 			qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
2215 					  AC_ERR_INVALID);
2216 
2217 		/* any real error trumps unknown error */
2218 		if (qc->err_mask & ~AC_ERR_OTHER)
2219 			qc->err_mask &= ~AC_ERR_OTHER;
2220 
2221 		/* SENSE_VALID trumps dev/unknown error and revalidation */
2222 		if (qc->flags & ATA_QCFLAG_SENSE_VALID)
2223 			qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
2224 
2225 		/* determine whether the command is worth retrying */
2226 		if (ata_eh_worth_retry(qc))
2227 			qc->flags |= ATA_QCFLAG_RETRY;
2228 
2229 		/* accumulate error info */
2230 		ehc->i.dev = qc->dev;
2231 		all_err_mask |= qc->err_mask;
2232 		if (qc->flags & ATA_QCFLAG_IO)
2233 			eflags |= ATA_EFLAG_IS_IO;
2234 		trace_ata_eh_link_autopsy_qc(qc);
2235 	}
2236 
2237 	/* enforce default EH actions */
2238 	if (ap->pflags & ATA_PFLAG_FROZEN ||
2239 	    all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
2240 		ehc->i.action |= ATA_EH_RESET;
2241 	else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) ||
2242 		 (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV)))
2243 		ehc->i.action |= ATA_EH_REVALIDATE;
2244 
2245 	/* If we have offending qcs and the associated failed device,
2246 	 * perform per-dev EH action only on the offending device.
2247 	 */
2248 	if (ehc->i.dev) {
2249 		ehc->i.dev_action[ehc->i.dev->devno] |=
2250 			ehc->i.action & ATA_EH_PERDEV_MASK;
2251 		ehc->i.action &= ~ATA_EH_PERDEV_MASK;
2252 	}
2253 
2254 	/* propagate timeout to host link */
2255 	if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link))
2256 		ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT;
2257 
2258 	/* record error and consider speeding down */
2259 	dev = ehc->i.dev;
2260 	if (!dev && ((ata_link_max_devices(link) == 1 &&
2261 		      ata_dev_enabled(link->device))))
2262 	    dev = link->device;
2263 
2264 	if (dev) {
2265 		if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
2266 			eflags |= ATA_EFLAG_DUBIOUS_XFER;
2267 		ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
2268 		trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask);
2269 	}
2270 	DPRINTK("EXIT\n");
2271 }
2272 
2273 /**
2274  *	ata_eh_autopsy - analyze error and determine recovery action
2275  *	@ap: host port to perform autopsy on
2276  *
2277  *	Analyze all links of @ap and determine why they failed and
2278  *	which recovery actions are needed.
2279  *
2280  *	LOCKING:
2281  *	Kernel thread context (may sleep).
2282  */
2283 void ata_eh_autopsy(struct ata_port *ap)
2284 {
2285 	struct ata_link *link;
2286 
2287 	ata_for_each_link(link, ap, EDGE)
2288 		ata_eh_link_autopsy(link);
2289 
2290 	/* Handle the frigging slave link.  Autopsy is done similarly
2291 	 * but actions and flags are transferred over to the master
2292 	 * link and handled from there.
2293 	 */
2294 	if (ap->slave_link) {
2295 		struct ata_eh_context *mehc = &ap->link.eh_context;
2296 		struct ata_eh_context *sehc = &ap->slave_link->eh_context;
2297 
2298 		/* transfer control flags from master to slave */
2299 		sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK;
2300 
2301 		/* perform autopsy on the slave link */
2302 		ata_eh_link_autopsy(ap->slave_link);
2303 
2304 		/* transfer actions from slave to master and clear slave */
2305 		ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2306 		mehc->i.action		|= sehc->i.action;
2307 		mehc->i.dev_action[1]	|= sehc->i.dev_action[1];
2308 		mehc->i.flags		|= sehc->i.flags;
2309 		ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2310 	}
2311 
2312 	/* Autopsy of fanout ports can affect host link autopsy.
2313 	 * Perform host link autopsy last.
2314 	 */
2315 	if (sata_pmp_attached(ap))
2316 		ata_eh_link_autopsy(&ap->link);
2317 }
2318 
2319 /**
2320  *	ata_get_cmd_descript - get description for ATA command
2321  *	@command: ATA command code to get description for
2322  *
2323  *	Return a textual description of the given command, or NULL if the
2324  *	command is not known.
2325  *
2326  *	LOCKING:
2327  *	None
2328  */
2329 const char *ata_get_cmd_descript(u8 command)
2330 {
2331 #ifdef CONFIG_ATA_VERBOSE_ERROR
2332 	static const struct
2333 	{
2334 		u8 command;
2335 		const char *text;
2336 	} cmd_descr[] = {
2337 		{ ATA_CMD_DEV_RESET,		"DEVICE RESET" },
2338 		{ ATA_CMD_CHK_POWER,		"CHECK POWER MODE" },
2339 		{ ATA_CMD_STANDBY,		"STANDBY" },
2340 		{ ATA_CMD_IDLE,			"IDLE" },
2341 		{ ATA_CMD_EDD,			"EXECUTE DEVICE DIAGNOSTIC" },
2342 		{ ATA_CMD_DOWNLOAD_MICRO,	"DOWNLOAD MICROCODE" },
2343 		{ ATA_CMD_DOWNLOAD_MICRO_DMA,	"DOWNLOAD MICROCODE DMA" },
2344 		{ ATA_CMD_NOP,			"NOP" },
2345 		{ ATA_CMD_FLUSH,		"FLUSH CACHE" },
2346 		{ ATA_CMD_FLUSH_EXT,		"FLUSH CACHE EXT" },
2347 		{ ATA_CMD_ID_ATA,		"IDENTIFY DEVICE" },
2348 		{ ATA_CMD_ID_ATAPI,		"IDENTIFY PACKET DEVICE" },
2349 		{ ATA_CMD_SERVICE,		"SERVICE" },
2350 		{ ATA_CMD_READ,			"READ DMA" },
2351 		{ ATA_CMD_READ_EXT,		"READ DMA EXT" },
2352 		{ ATA_CMD_READ_QUEUED,		"READ DMA QUEUED" },
2353 		{ ATA_CMD_READ_STREAM_EXT,	"READ STREAM EXT" },
2354 		{ ATA_CMD_READ_STREAM_DMA_EXT,  "READ STREAM DMA EXT" },
2355 		{ ATA_CMD_WRITE,		"WRITE DMA" },
2356 		{ ATA_CMD_WRITE_EXT,		"WRITE DMA EXT" },
2357 		{ ATA_CMD_WRITE_QUEUED,		"WRITE DMA QUEUED EXT" },
2358 		{ ATA_CMD_WRITE_STREAM_EXT,	"WRITE STREAM EXT" },
2359 		{ ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" },
2360 		{ ATA_CMD_WRITE_FUA_EXT,	"WRITE DMA FUA EXT" },
2361 		{ ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
2362 		{ ATA_CMD_FPDMA_READ,		"READ FPDMA QUEUED" },
2363 		{ ATA_CMD_FPDMA_WRITE,		"WRITE FPDMA QUEUED" },
2364 		{ ATA_CMD_FPDMA_SEND,		"SEND FPDMA QUEUED" },
2365 		{ ATA_CMD_FPDMA_RECV,		"RECEIVE FPDMA QUEUED" },
2366 		{ ATA_CMD_PIO_READ,		"READ SECTOR(S)" },
2367 		{ ATA_CMD_PIO_READ_EXT,		"READ SECTOR(S) EXT" },
2368 		{ ATA_CMD_PIO_WRITE,		"WRITE SECTOR(S)" },
2369 		{ ATA_CMD_PIO_WRITE_EXT,	"WRITE SECTOR(S) EXT" },
2370 		{ ATA_CMD_READ_MULTI,		"READ MULTIPLE" },
2371 		{ ATA_CMD_READ_MULTI_EXT,	"READ MULTIPLE EXT" },
2372 		{ ATA_CMD_WRITE_MULTI,		"WRITE MULTIPLE" },
2373 		{ ATA_CMD_WRITE_MULTI_EXT,	"WRITE MULTIPLE EXT" },
2374 		{ ATA_CMD_WRITE_MULTI_FUA_EXT,	"WRITE MULTIPLE FUA EXT" },
2375 		{ ATA_CMD_SET_FEATURES,		"SET FEATURES" },
2376 		{ ATA_CMD_SET_MULTI,		"SET MULTIPLE MODE" },
2377 		{ ATA_CMD_VERIFY,		"READ VERIFY SECTOR(S)" },
2378 		{ ATA_CMD_VERIFY_EXT,		"READ VERIFY SECTOR(S) EXT" },
2379 		{ ATA_CMD_WRITE_UNCORR_EXT,	"WRITE UNCORRECTABLE EXT" },
2380 		{ ATA_CMD_STANDBYNOW1,		"STANDBY IMMEDIATE" },
2381 		{ ATA_CMD_IDLEIMMEDIATE,	"IDLE IMMEDIATE" },
2382 		{ ATA_CMD_SLEEP,		"SLEEP" },
2383 		{ ATA_CMD_INIT_DEV_PARAMS,	"INITIALIZE DEVICE PARAMETERS" },
2384 		{ ATA_CMD_READ_NATIVE_MAX,	"READ NATIVE MAX ADDRESS" },
2385 		{ ATA_CMD_READ_NATIVE_MAX_EXT,	"READ NATIVE MAX ADDRESS EXT" },
2386 		{ ATA_CMD_SET_MAX,		"SET MAX ADDRESS" },
2387 		{ ATA_CMD_SET_MAX_EXT,		"SET MAX ADDRESS EXT" },
2388 		{ ATA_CMD_READ_LOG_EXT,		"READ LOG EXT" },
2389 		{ ATA_CMD_WRITE_LOG_EXT,	"WRITE LOG EXT" },
2390 		{ ATA_CMD_READ_LOG_DMA_EXT,	"READ LOG DMA EXT" },
2391 		{ ATA_CMD_WRITE_LOG_DMA_EXT,	"WRITE LOG DMA EXT" },
2392 		{ ATA_CMD_TRUSTED_NONDATA,	"TRUSTED NON-DATA" },
2393 		{ ATA_CMD_TRUSTED_RCV,		"TRUSTED RECEIVE" },
2394 		{ ATA_CMD_TRUSTED_RCV_DMA,	"TRUSTED RECEIVE DMA" },
2395 		{ ATA_CMD_TRUSTED_SND,		"TRUSTED SEND" },
2396 		{ ATA_CMD_TRUSTED_SND_DMA,	"TRUSTED SEND DMA" },
2397 		{ ATA_CMD_PMP_READ,		"READ BUFFER" },
2398 		{ ATA_CMD_PMP_READ_DMA,		"READ BUFFER DMA" },
2399 		{ ATA_CMD_PMP_WRITE,		"WRITE BUFFER" },
2400 		{ ATA_CMD_PMP_WRITE_DMA,	"WRITE BUFFER DMA" },
2401 		{ ATA_CMD_CONF_OVERLAY,		"DEVICE CONFIGURATION OVERLAY" },
2402 		{ ATA_CMD_SEC_SET_PASS,		"SECURITY SET PASSWORD" },
2403 		{ ATA_CMD_SEC_UNLOCK,		"SECURITY UNLOCK" },
2404 		{ ATA_CMD_SEC_ERASE_PREP,	"SECURITY ERASE PREPARE" },
2405 		{ ATA_CMD_SEC_ERASE_UNIT,	"SECURITY ERASE UNIT" },
2406 		{ ATA_CMD_SEC_FREEZE_LOCK,	"SECURITY FREEZE LOCK" },
2407 		{ ATA_CMD_SEC_DISABLE_PASS,	"SECURITY DISABLE PASSWORD" },
2408 		{ ATA_CMD_CONFIG_STREAM,	"CONFIGURE STREAM" },
2409 		{ ATA_CMD_SMART,		"SMART" },
2410 		{ ATA_CMD_MEDIA_LOCK,		"DOOR LOCK" },
2411 		{ ATA_CMD_MEDIA_UNLOCK,		"DOOR UNLOCK" },
2412 		{ ATA_CMD_DSM,			"DATA SET MANAGEMENT" },
2413 		{ ATA_CMD_CHK_MED_CRD_TYP,	"CHECK MEDIA CARD TYPE" },
2414 		{ ATA_CMD_CFA_REQ_EXT_ERR,	"CFA REQUEST EXTENDED ERROR" },
2415 		{ ATA_CMD_CFA_WRITE_NE,		"CFA WRITE SECTORS WITHOUT ERASE" },
2416 		{ ATA_CMD_CFA_TRANS_SECT,	"CFA TRANSLATE SECTOR" },
2417 		{ ATA_CMD_CFA_ERASE,		"CFA ERASE SECTORS" },
2418 		{ ATA_CMD_CFA_WRITE_MULT_NE,	"CFA WRITE MULTIPLE WITHOUT ERASE" },
2419 		{ ATA_CMD_REQ_SENSE_DATA,	"REQUEST SENSE DATA EXT" },
2420 		{ ATA_CMD_SANITIZE_DEVICE,	"SANITIZE DEVICE" },
2421 		{ ATA_CMD_ZAC_MGMT_IN,		"ZAC MANAGEMENT IN" },
2422 		{ ATA_CMD_ZAC_MGMT_OUT,		"ZAC MANAGEMENT OUT" },
2423 		{ ATA_CMD_READ_LONG,		"READ LONG (with retries)" },
2424 		{ ATA_CMD_READ_LONG_ONCE,	"READ LONG (without retries)" },
2425 		{ ATA_CMD_WRITE_LONG,		"WRITE LONG (with retries)" },
2426 		{ ATA_CMD_WRITE_LONG_ONCE,	"WRITE LONG (without retries)" },
2427 		{ ATA_CMD_RESTORE,		"RECALIBRATE" },
2428 		{ 0,				NULL } /* terminate list */
2429 	};
2430 
2431 	unsigned int i;
2432 	for (i = 0; cmd_descr[i].text; i++)
2433 		if (cmd_descr[i].command == command)
2434 			return cmd_descr[i].text;
2435 #endif
2436 
2437 	return NULL;
2438 }
2439 EXPORT_SYMBOL_GPL(ata_get_cmd_descript);
2440 
2441 /**
2442  *	ata_eh_link_report - report error handling to user
2443  *	@link: ATA link EH is going on
2444  *
2445  *	Report EH to user.
2446  *
2447  *	LOCKING:
2448  *	None.
2449  */
2450 static void ata_eh_link_report(struct ata_link *link)
2451 {
2452 	struct ata_port *ap = link->ap;
2453 	struct ata_eh_context *ehc = &link->eh_context;
2454 	const char *frozen, *desc;
2455 	char tries_buf[6] = "";
2456 	int tag, nr_failed = 0;
2457 
2458 	if (ehc->i.flags & ATA_EHI_QUIET)
2459 		return;
2460 
2461 	desc = NULL;
2462 	if (ehc->i.desc[0] != '\0')
2463 		desc = ehc->i.desc;
2464 
2465 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2466 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2467 
2468 		if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2469 		    ata_dev_phys_link(qc->dev) != link ||
2470 		    ((qc->flags & ATA_QCFLAG_QUIET) &&
2471 		     qc->err_mask == AC_ERR_DEV))
2472 			continue;
2473 		if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
2474 			continue;
2475 
2476 		nr_failed++;
2477 	}
2478 
2479 	if (!nr_failed && !ehc->i.err_mask)
2480 		return;
2481 
2482 	frozen = "";
2483 	if (ap->pflags & ATA_PFLAG_FROZEN)
2484 		frozen = " frozen";
2485 
2486 	if (ap->eh_tries < ATA_EH_MAX_TRIES)
2487 		snprintf(tries_buf, sizeof(tries_buf), " t%d",
2488 			 ap->eh_tries);
2489 
2490 	if (ehc->i.dev) {
2491 		ata_dev_err(ehc->i.dev, "exception Emask 0x%x "
2492 			    "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2493 			    ehc->i.err_mask, link->sactive, ehc->i.serror,
2494 			    ehc->i.action, frozen, tries_buf);
2495 		if (desc)
2496 			ata_dev_err(ehc->i.dev, "%s\n", desc);
2497 	} else {
2498 		ata_link_err(link, "exception Emask 0x%x "
2499 			     "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2500 			     ehc->i.err_mask, link->sactive, ehc->i.serror,
2501 			     ehc->i.action, frozen, tries_buf);
2502 		if (desc)
2503 			ata_link_err(link, "%s\n", desc);
2504 	}
2505 
2506 #ifdef CONFIG_ATA_VERBOSE_ERROR
2507 	if (ehc->i.serror)
2508 		ata_link_err(link,
2509 		  "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
2510 		  ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "",
2511 		  ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "",
2512 		  ehc->i.serror & SERR_DATA ? "UnrecovData " : "",
2513 		  ehc->i.serror & SERR_PERSISTENT ? "Persist " : "",
2514 		  ehc->i.serror & SERR_PROTOCOL ? "Proto " : "",
2515 		  ehc->i.serror & SERR_INTERNAL ? "HostInt " : "",
2516 		  ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "",
2517 		  ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "",
2518 		  ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "",
2519 		  ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "",
2520 		  ehc->i.serror & SERR_DISPARITY ? "Dispar " : "",
2521 		  ehc->i.serror & SERR_CRC ? "BadCRC " : "",
2522 		  ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "",
2523 		  ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "",
2524 		  ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "",
2525 		  ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "",
2526 		  ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
2527 #endif
2528 
2529 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2530 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2531 		struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
2532 		char data_buf[20] = "";
2533 		char cdb_buf[70] = "";
2534 
2535 		if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2536 		    ata_dev_phys_link(qc->dev) != link || !qc->err_mask)
2537 			continue;
2538 
2539 		if (qc->dma_dir != DMA_NONE) {
2540 			static const char *dma_str[] = {
2541 				[DMA_BIDIRECTIONAL]	= "bidi",
2542 				[DMA_TO_DEVICE]		= "out",
2543 				[DMA_FROM_DEVICE]	= "in",
2544 			};
2545 			const char *prot_str = NULL;
2546 
2547 			switch (qc->tf.protocol) {
2548 			case ATA_PROT_UNKNOWN:
2549 				prot_str = "unknown";
2550 				break;
2551 			case ATA_PROT_NODATA:
2552 				prot_str = "nodata";
2553 				break;
2554 			case ATA_PROT_PIO:
2555 				prot_str = "pio";
2556 				break;
2557 			case ATA_PROT_DMA:
2558 				prot_str = "dma";
2559 				break;
2560 			case ATA_PROT_NCQ:
2561 				prot_str = "ncq dma";
2562 				break;
2563 			case ATA_PROT_NCQ_NODATA:
2564 				prot_str = "ncq nodata";
2565 				break;
2566 			case ATAPI_PROT_NODATA:
2567 				prot_str = "nodata";
2568 				break;
2569 			case ATAPI_PROT_PIO:
2570 				prot_str = "pio";
2571 				break;
2572 			case ATAPI_PROT_DMA:
2573 				prot_str = "dma";
2574 				break;
2575 			}
2576 			snprintf(data_buf, sizeof(data_buf), " %s %u %s",
2577 				 prot_str, qc->nbytes, dma_str[qc->dma_dir]);
2578 		}
2579 
2580 		if (ata_is_atapi(qc->tf.protocol)) {
2581 			const u8 *cdb = qc->cdb;
2582 			size_t cdb_len = qc->dev->cdb_len;
2583 
2584 			if (qc->scsicmd) {
2585 				cdb = qc->scsicmd->cmnd;
2586 				cdb_len = qc->scsicmd->cmd_len;
2587 			}
2588 			__scsi_format_command(cdb_buf, sizeof(cdb_buf),
2589 					      cdb, cdb_len);
2590 		} else {
2591 			const char *descr = ata_get_cmd_descript(cmd->command);
2592 			if (descr)
2593 				ata_dev_err(qc->dev, "failed command: %s\n",
2594 					    descr);
2595 		}
2596 
2597 		ata_dev_err(qc->dev,
2598 			"cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2599 			"tag %d%s\n         %s"
2600 			"res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2601 			"Emask 0x%x (%s)%s\n",
2602 			cmd->command, cmd->feature, cmd->nsect,
2603 			cmd->lbal, cmd->lbam, cmd->lbah,
2604 			cmd->hob_feature, cmd->hob_nsect,
2605 			cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
2606 			cmd->device, qc->tag, data_buf, cdb_buf,
2607 			res->command, res->feature, res->nsect,
2608 			res->lbal, res->lbam, res->lbah,
2609 			res->hob_feature, res->hob_nsect,
2610 			res->hob_lbal, res->hob_lbam, res->hob_lbah,
2611 			res->device, qc->err_mask, ata_err_string(qc->err_mask),
2612 			qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
2613 
2614 #ifdef CONFIG_ATA_VERBOSE_ERROR
2615 		if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
2616 				    ATA_SENSE | ATA_ERR)) {
2617 			if (res->command & ATA_BUSY)
2618 				ata_dev_err(qc->dev, "status: { Busy }\n");
2619 			else
2620 				ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n",
2621 				  res->command & ATA_DRDY ? "DRDY " : "",
2622 				  res->command & ATA_DF ? "DF " : "",
2623 				  res->command & ATA_DRQ ? "DRQ " : "",
2624 				  res->command & ATA_SENSE ? "SENSE " : "",
2625 				  res->command & ATA_ERR ? "ERR " : "");
2626 		}
2627 
2628 		if (cmd->command != ATA_CMD_PACKET &&
2629 		    (res->feature & (ATA_ICRC | ATA_UNC | ATA_AMNF |
2630 				     ATA_IDNF | ATA_ABORTED)))
2631 			ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n",
2632 			  res->feature & ATA_ICRC ? "ICRC " : "",
2633 			  res->feature & ATA_UNC ? "UNC " : "",
2634 			  res->feature & ATA_AMNF ? "AMNF " : "",
2635 			  res->feature & ATA_IDNF ? "IDNF " : "",
2636 			  res->feature & ATA_ABORTED ? "ABRT " : "");
2637 #endif
2638 	}
2639 }
2640 
2641 /**
2642  *	ata_eh_report - report error handling to user
2643  *	@ap: ATA port to report EH about
2644  *
2645  *	Report EH to user.
2646  *
2647  *	LOCKING:
2648  *	None.
2649  */
2650 void ata_eh_report(struct ata_port *ap)
2651 {
2652 	struct ata_link *link;
2653 
2654 	ata_for_each_link(link, ap, HOST_FIRST)
2655 		ata_eh_link_report(link);
2656 }
2657 
2658 static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
2659 			unsigned int *classes, unsigned long deadline,
2660 			bool clear_classes)
2661 {
2662 	struct ata_device *dev;
2663 
2664 	if (clear_classes)
2665 		ata_for_each_dev(dev, link, ALL)
2666 			classes[dev->devno] = ATA_DEV_UNKNOWN;
2667 
2668 	return reset(link, classes, deadline);
2669 }
2670 
2671 static int ata_eh_followup_srst_needed(struct ata_link *link, int rc)
2672 {
2673 	if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
2674 		return 0;
2675 	if (rc == -EAGAIN)
2676 		return 1;
2677 	if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
2678 		return 1;
2679 	return 0;
2680 }
2681 
2682 int ata_eh_reset(struct ata_link *link, int classify,
2683 		 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
2684 		 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
2685 {
2686 	struct ata_port *ap = link->ap;
2687 	struct ata_link *slave = ap->slave_link;
2688 	struct ata_eh_context *ehc = &link->eh_context;
2689 	struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL;
2690 	unsigned int *classes = ehc->classes;
2691 	unsigned int lflags = link->flags;
2692 	int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
2693 	int max_tries = 0, try = 0;
2694 	struct ata_link *failed_link;
2695 	struct ata_device *dev;
2696 	unsigned long deadline, now;
2697 	ata_reset_fn_t reset;
2698 	unsigned long flags;
2699 	u32 sstatus;
2700 	int nr_unknown, rc;
2701 
2702 	/*
2703 	 * Prepare to reset
2704 	 */
2705 	while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX)
2706 		max_tries++;
2707 	if (link->flags & ATA_LFLAG_RST_ONCE)
2708 		max_tries = 1;
2709 	if (link->flags & ATA_LFLAG_NO_HRST)
2710 		hardreset = NULL;
2711 	if (link->flags & ATA_LFLAG_NO_SRST)
2712 		softreset = NULL;
2713 
2714 	/* make sure each reset attempt is at least COOL_DOWN apart */
2715 	if (ehc->i.flags & ATA_EHI_DID_RESET) {
2716 		now = jiffies;
2717 		WARN_ON(time_after(ehc->last_reset, now));
2718 		deadline = ata_deadline(ehc->last_reset,
2719 					ATA_EH_RESET_COOL_DOWN);
2720 		if (time_before(now, deadline))
2721 			schedule_timeout_uninterruptible(deadline - now);
2722 	}
2723 
2724 	spin_lock_irqsave(ap->lock, flags);
2725 	ap->pflags |= ATA_PFLAG_RESETTING;
2726 	spin_unlock_irqrestore(ap->lock, flags);
2727 
2728 	ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2729 
2730 	ata_for_each_dev(dev, link, ALL) {
2731 		/* If we issue an SRST then an ATA drive (not ATAPI)
2732 		 * may change configuration and be in PIO0 timing. If
2733 		 * we do a hard reset (or are coming from power on)
2734 		 * this is true for ATA or ATAPI. Until we've set a
2735 		 * suitable controller mode we should not touch the
2736 		 * bus as we may be talking too fast.
2737 		 */
2738 		dev->pio_mode = XFER_PIO_0;
2739 		dev->dma_mode = 0xff;
2740 
2741 		/* If the controller has a pio mode setup function
2742 		 * then use it to set the chipset to rights. Don't
2743 		 * touch the DMA setup as that will be dealt with when
2744 		 * configuring devices.
2745 		 */
2746 		if (ap->ops->set_piomode)
2747 			ap->ops->set_piomode(ap, dev);
2748 	}
2749 
2750 	/* prefer hardreset */
2751 	reset = NULL;
2752 	ehc->i.action &= ~ATA_EH_RESET;
2753 	if (hardreset) {
2754 		reset = hardreset;
2755 		ehc->i.action |= ATA_EH_HARDRESET;
2756 	} else if (softreset) {
2757 		reset = softreset;
2758 		ehc->i.action |= ATA_EH_SOFTRESET;
2759 	}
2760 
2761 	if (prereset) {
2762 		unsigned long deadline = ata_deadline(jiffies,
2763 						      ATA_EH_PRERESET_TIMEOUT);
2764 
2765 		if (slave) {
2766 			sehc->i.action &= ~ATA_EH_RESET;
2767 			sehc->i.action |= ehc->i.action;
2768 		}
2769 
2770 		rc = prereset(link, deadline);
2771 
2772 		/* If present, do prereset on slave link too.  Reset
2773 		 * is skipped iff both master and slave links report
2774 		 * -ENOENT or clear ATA_EH_RESET.
2775 		 */
2776 		if (slave && (rc == 0 || rc == -ENOENT)) {
2777 			int tmp;
2778 
2779 			tmp = prereset(slave, deadline);
2780 			if (tmp != -ENOENT)
2781 				rc = tmp;
2782 
2783 			ehc->i.action |= sehc->i.action;
2784 		}
2785 
2786 		if (rc) {
2787 			if (rc == -ENOENT) {
2788 				ata_link_dbg(link, "port disabled--ignoring\n");
2789 				ehc->i.action &= ~ATA_EH_RESET;
2790 
2791 				ata_for_each_dev(dev, link, ALL)
2792 					classes[dev->devno] = ATA_DEV_NONE;
2793 
2794 				rc = 0;
2795 			} else
2796 				ata_link_err(link,
2797 					     "prereset failed (errno=%d)\n",
2798 					     rc);
2799 			goto out;
2800 		}
2801 
2802 		/* prereset() might have cleared ATA_EH_RESET.  If so,
2803 		 * bang classes, thaw and return.
2804 		 */
2805 		if (reset && !(ehc->i.action & ATA_EH_RESET)) {
2806 			ata_for_each_dev(dev, link, ALL)
2807 				classes[dev->devno] = ATA_DEV_NONE;
2808 			if ((ap->pflags & ATA_PFLAG_FROZEN) &&
2809 			    ata_is_host_link(link))
2810 				ata_eh_thaw_port(ap);
2811 			rc = 0;
2812 			goto out;
2813 		}
2814 	}
2815 
2816  retry:
2817 	/*
2818 	 * Perform reset
2819 	 */
2820 	if (ata_is_host_link(link))
2821 		ata_eh_freeze_port(ap);
2822 
2823 	deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]);
2824 
2825 	if (reset) {
2826 		if (verbose)
2827 			ata_link_info(link, "%s resetting link\n",
2828 				      reset == softreset ? "soft" : "hard");
2829 
2830 		/* mark that this EH session started with reset */
2831 		ehc->last_reset = jiffies;
2832 		if (reset == hardreset)
2833 			ehc->i.flags |= ATA_EHI_DID_HARDRESET;
2834 		else
2835 			ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
2836 
2837 		rc = ata_do_reset(link, reset, classes, deadline, true);
2838 		if (rc && rc != -EAGAIN) {
2839 			failed_link = link;
2840 			goto fail;
2841 		}
2842 
2843 		/* hardreset slave link if existent */
2844 		if (slave && reset == hardreset) {
2845 			int tmp;
2846 
2847 			if (verbose)
2848 				ata_link_info(slave, "hard resetting link\n");
2849 
2850 			ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
2851 			tmp = ata_do_reset(slave, reset, classes, deadline,
2852 					   false);
2853 			switch (tmp) {
2854 			case -EAGAIN:
2855 				rc = -EAGAIN;
2856 			case 0:
2857 				break;
2858 			default:
2859 				failed_link = slave;
2860 				rc = tmp;
2861 				goto fail;
2862 			}
2863 		}
2864 
2865 		/* perform follow-up SRST if necessary */
2866 		if (reset == hardreset &&
2867 		    ata_eh_followup_srst_needed(link, rc)) {
2868 			reset = softreset;
2869 
2870 			if (!reset) {
2871 				ata_link_err(link,
2872 	     "follow-up softreset required but no softreset available\n");
2873 				failed_link = link;
2874 				rc = -EINVAL;
2875 				goto fail;
2876 			}
2877 
2878 			ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2879 			rc = ata_do_reset(link, reset, classes, deadline, true);
2880 			if (rc) {
2881 				failed_link = link;
2882 				goto fail;
2883 			}
2884 		}
2885 	} else {
2886 		if (verbose)
2887 			ata_link_info(link,
2888 	"no reset method available, skipping reset\n");
2889 		if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
2890 			lflags |= ATA_LFLAG_ASSUME_ATA;
2891 	}
2892 
2893 	/*
2894 	 * Post-reset processing
2895 	 */
2896 	ata_for_each_dev(dev, link, ALL) {
2897 		/* After the reset, the device state is PIO 0 and the
2898 		 * controller state is undefined.  Reset also wakes up
2899 		 * drives from sleeping mode.
2900 		 */
2901 		dev->pio_mode = XFER_PIO_0;
2902 		dev->flags &= ~ATA_DFLAG_SLEEPING;
2903 
2904 		if (ata_phys_link_offline(ata_dev_phys_link(dev)))
2905 			continue;
2906 
2907 		/* apply class override */
2908 		if (lflags & ATA_LFLAG_ASSUME_ATA)
2909 			classes[dev->devno] = ATA_DEV_ATA;
2910 		else if (lflags & ATA_LFLAG_ASSUME_SEMB)
2911 			classes[dev->devno] = ATA_DEV_SEMB_UNSUP;
2912 	}
2913 
2914 	/* record current link speed */
2915 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
2916 		link->sata_spd = (sstatus >> 4) & 0xf;
2917 	if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0)
2918 		slave->sata_spd = (sstatus >> 4) & 0xf;
2919 
2920 	/* thaw the port */
2921 	if (ata_is_host_link(link))
2922 		ata_eh_thaw_port(ap);
2923 
2924 	/* postreset() should clear hardware SError.  Although SError
2925 	 * is cleared during link resume, clearing SError here is
2926 	 * necessary as some PHYs raise hotplug events after SRST.
2927 	 * This introduces race condition where hotplug occurs between
2928 	 * reset and here.  This race is mediated by cross checking
2929 	 * link onlineness and classification result later.
2930 	 */
2931 	if (postreset) {
2932 		postreset(link, classes);
2933 		if (slave)
2934 			postreset(slave, classes);
2935 	}
2936 
2937 	/*
2938 	 * Some controllers can't be frozen very well and may set spurious
2939 	 * error conditions during reset.  Clear accumulated error
2940 	 * information and re-thaw the port if frozen.  As reset is the
2941 	 * final recovery action and we cross check link onlineness against
2942 	 * device classification later, no hotplug event is lost by this.
2943 	 */
2944 	spin_lock_irqsave(link->ap->lock, flags);
2945 	memset(&link->eh_info, 0, sizeof(link->eh_info));
2946 	if (slave)
2947 		memset(&slave->eh_info, 0, sizeof(link->eh_info));
2948 	ap->pflags &= ~ATA_PFLAG_EH_PENDING;
2949 	spin_unlock_irqrestore(link->ap->lock, flags);
2950 
2951 	if (ap->pflags & ATA_PFLAG_FROZEN)
2952 		ata_eh_thaw_port(ap);
2953 
2954 	/*
2955 	 * Make sure onlineness and classification result correspond.
2956 	 * Hotplug could have happened during reset and some
2957 	 * controllers fail to wait while a drive is spinning up after
2958 	 * being hotplugged causing misdetection.  By cross checking
2959 	 * link on/offlineness and classification result, those
2960 	 * conditions can be reliably detected and retried.
2961 	 */
2962 	nr_unknown = 0;
2963 	ata_for_each_dev(dev, link, ALL) {
2964 		if (ata_phys_link_online(ata_dev_phys_link(dev))) {
2965 			if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2966 				ata_dev_dbg(dev, "link online but device misclassified\n");
2967 				classes[dev->devno] = ATA_DEV_NONE;
2968 				nr_unknown++;
2969 			}
2970 		} else if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
2971 			if (ata_class_enabled(classes[dev->devno]))
2972 				ata_dev_dbg(dev,
2973 					    "link offline, clearing class %d to NONE\n",
2974 					    classes[dev->devno]);
2975 			classes[dev->devno] = ATA_DEV_NONE;
2976 		} else if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2977 			ata_dev_dbg(dev,
2978 				    "link status unknown, clearing UNKNOWN to NONE\n");
2979 			classes[dev->devno] = ATA_DEV_NONE;
2980 		}
2981 	}
2982 
2983 	if (classify && nr_unknown) {
2984 		if (try < max_tries) {
2985 			ata_link_warn(link,
2986 				      "link online but %d devices misclassified, retrying\n",
2987 				      nr_unknown);
2988 			failed_link = link;
2989 			rc = -EAGAIN;
2990 			goto fail;
2991 		}
2992 		ata_link_warn(link,
2993 			      "link online but %d devices misclassified, "
2994 			      "device detection might fail\n", nr_unknown);
2995 	}
2996 
2997 	/* reset successful, schedule revalidation */
2998 	ata_eh_done(link, NULL, ATA_EH_RESET);
2999 	if (slave)
3000 		ata_eh_done(slave, NULL, ATA_EH_RESET);
3001 	ehc->last_reset = jiffies;		/* update to completion time */
3002 	ehc->i.action |= ATA_EH_REVALIDATE;
3003 	link->lpm_policy = ATA_LPM_UNKNOWN;	/* reset LPM state */
3004 
3005 	rc = 0;
3006  out:
3007 	/* clear hotplug flag */
3008 	ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
3009 	if (slave)
3010 		sehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
3011 
3012 	spin_lock_irqsave(ap->lock, flags);
3013 	ap->pflags &= ~ATA_PFLAG_RESETTING;
3014 	spin_unlock_irqrestore(ap->lock, flags);
3015 
3016 	return rc;
3017 
3018  fail:
3019 	/* if SCR isn't accessible on a fan-out port, PMP needs to be reset */
3020 	if (!ata_is_host_link(link) &&
3021 	    sata_scr_read(link, SCR_STATUS, &sstatus))
3022 		rc = -ERESTART;
3023 
3024 	if (try >= max_tries) {
3025 		/*
3026 		 * Thaw host port even if reset failed, so that the port
3027 		 * can be retried on the next phy event.  This risks
3028 		 * repeated EH runs but seems to be a better tradeoff than
3029 		 * shutting down a port after a botched hotplug attempt.
3030 		 */
3031 		if (ata_is_host_link(link))
3032 			ata_eh_thaw_port(ap);
3033 		goto out;
3034 	}
3035 
3036 	now = jiffies;
3037 	if (time_before(now, deadline)) {
3038 		unsigned long delta = deadline - now;
3039 
3040 		ata_link_warn(failed_link,
3041 			"reset failed (errno=%d), retrying in %u secs\n",
3042 			rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
3043 
3044 		ata_eh_release(ap);
3045 		while (delta)
3046 			delta = schedule_timeout_uninterruptible(delta);
3047 		ata_eh_acquire(ap);
3048 	}
3049 
3050 	/*
3051 	 * While disks spinup behind PMP, some controllers fail sending SRST.
3052 	 * They need to be reset - as well as the PMP - before retrying.
3053 	 */
3054 	if (rc == -ERESTART) {
3055 		if (ata_is_host_link(link))
3056 			ata_eh_thaw_port(ap);
3057 		goto out;
3058 	}
3059 
3060 	if (try == max_tries - 1) {
3061 		sata_down_spd_limit(link, 0);
3062 		if (slave)
3063 			sata_down_spd_limit(slave, 0);
3064 	} else if (rc == -EPIPE)
3065 		sata_down_spd_limit(failed_link, 0);
3066 
3067 	if (hardreset)
3068 		reset = hardreset;
3069 	goto retry;
3070 }
3071 
3072 static inline void ata_eh_pull_park_action(struct ata_port *ap)
3073 {
3074 	struct ata_link *link;
3075 	struct ata_device *dev;
3076 	unsigned long flags;
3077 
3078 	/*
3079 	 * This function can be thought of as an extended version of
3080 	 * ata_eh_about_to_do() specially crafted to accommodate the
3081 	 * requirements of ATA_EH_PARK handling. Since the EH thread
3082 	 * does not leave the do {} while () loop in ata_eh_recover as
3083 	 * long as the timeout for a park request to *one* device on
3084 	 * the port has not expired, and since we still want to pick
3085 	 * up park requests to other devices on the same port or
3086 	 * timeout updates for the same device, we have to pull
3087 	 * ATA_EH_PARK actions from eh_info into eh_context.i
3088 	 * ourselves at the beginning of each pass over the loop.
3089 	 *
3090 	 * Additionally, all write accesses to &ap->park_req_pending
3091 	 * through reinit_completion() (see below) or complete_all()
3092 	 * (see ata_scsi_park_store()) are protected by the host lock.
3093 	 * As a result we have that park_req_pending.done is zero on
3094 	 * exit from this function, i.e. when ATA_EH_PARK actions for
3095 	 * *all* devices on port ap have been pulled into the
3096 	 * respective eh_context structs. If, and only if,
3097 	 * park_req_pending.done is non-zero by the time we reach
3098 	 * wait_for_completion_timeout(), another ATA_EH_PARK action
3099 	 * has been scheduled for at least one of the devices on port
3100 	 * ap and we have to cycle over the do {} while () loop in
3101 	 * ata_eh_recover() again.
3102 	 */
3103 
3104 	spin_lock_irqsave(ap->lock, flags);
3105 	reinit_completion(&ap->park_req_pending);
3106 	ata_for_each_link(link, ap, EDGE) {
3107 		ata_for_each_dev(dev, link, ALL) {
3108 			struct ata_eh_info *ehi = &link->eh_info;
3109 
3110 			link->eh_context.i.dev_action[dev->devno] |=
3111 				ehi->dev_action[dev->devno] & ATA_EH_PARK;
3112 			ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
3113 		}
3114 	}
3115 	spin_unlock_irqrestore(ap->lock, flags);
3116 }
3117 
3118 static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
3119 {
3120 	struct ata_eh_context *ehc = &dev->link->eh_context;
3121 	struct ata_taskfile tf;
3122 	unsigned int err_mask;
3123 
3124 	ata_tf_init(dev, &tf);
3125 	if (park) {
3126 		ehc->unloaded_mask |= 1 << dev->devno;
3127 		tf.command = ATA_CMD_IDLEIMMEDIATE;
3128 		tf.feature = 0x44;
3129 		tf.lbal = 0x4c;
3130 		tf.lbam = 0x4e;
3131 		tf.lbah = 0x55;
3132 	} else {
3133 		ehc->unloaded_mask &= ~(1 << dev->devno);
3134 		tf.command = ATA_CMD_CHK_POWER;
3135 	}
3136 
3137 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
3138 	tf.protocol = ATA_PROT_NODATA;
3139 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
3140 	if (park && (err_mask || tf.lbal != 0xc4)) {
3141 		ata_dev_err(dev, "head unload failed!\n");
3142 		ehc->unloaded_mask &= ~(1 << dev->devno);
3143 	}
3144 }
3145 
3146 static int ata_eh_revalidate_and_attach(struct ata_link *link,
3147 					struct ata_device **r_failed_dev)
3148 {
3149 	struct ata_port *ap = link->ap;
3150 	struct ata_eh_context *ehc = &link->eh_context;
3151 	struct ata_device *dev;
3152 	unsigned int new_mask = 0;
3153 	unsigned long flags;
3154 	int rc = 0;
3155 
3156 	DPRINTK("ENTER\n");
3157 
3158 	/* For PATA drive side cable detection to work, IDENTIFY must
3159 	 * be done backwards such that PDIAG- is released by the slave
3160 	 * device before the master device is identified.
3161 	 */
3162 	ata_for_each_dev(dev, link, ALL_REVERSE) {
3163 		unsigned int action = ata_eh_dev_action(dev);
3164 		unsigned int readid_flags = 0;
3165 
3166 		if (ehc->i.flags & ATA_EHI_DID_RESET)
3167 			readid_flags |= ATA_READID_POSTRESET;
3168 
3169 		if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
3170 			WARN_ON(dev->class == ATA_DEV_PMP);
3171 
3172 			if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
3173 				rc = -EIO;
3174 				goto err;
3175 			}
3176 
3177 			ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE);
3178 			rc = ata_dev_revalidate(dev, ehc->classes[dev->devno],
3179 						readid_flags);
3180 			if (rc)
3181 				goto err;
3182 
3183 			ata_eh_done(link, dev, ATA_EH_REVALIDATE);
3184 
3185 			/* Configuration may have changed, reconfigure
3186 			 * transfer mode.
3187 			 */
3188 			ehc->i.flags |= ATA_EHI_SETMODE;
3189 
3190 			/* schedule the scsi_rescan_device() here */
3191 			schedule_work(&(ap->scsi_rescan_task));
3192 		} else if (dev->class == ATA_DEV_UNKNOWN &&
3193 			   ehc->tries[dev->devno] &&
3194 			   ata_class_enabled(ehc->classes[dev->devno])) {
3195 			/* Temporarily set dev->class, it will be
3196 			 * permanently set once all configurations are
3197 			 * complete.  This is necessary because new
3198 			 * device configuration is done in two
3199 			 * separate loops.
3200 			 */
3201 			dev->class = ehc->classes[dev->devno];
3202 
3203 			if (dev->class == ATA_DEV_PMP)
3204 				rc = sata_pmp_attach(dev);
3205 			else
3206 				rc = ata_dev_read_id(dev, &dev->class,
3207 						     readid_flags, dev->id);
3208 
3209 			/* read_id might have changed class, store and reset */
3210 			ehc->classes[dev->devno] = dev->class;
3211 			dev->class = ATA_DEV_UNKNOWN;
3212 
3213 			switch (rc) {
3214 			case 0:
3215 				/* clear error info accumulated during probe */
3216 				ata_ering_clear(&dev->ering);
3217 				new_mask |= 1 << dev->devno;
3218 				break;
3219 			case -ENOENT:
3220 				/* IDENTIFY was issued to non-existent
3221 				 * device.  No need to reset.  Just
3222 				 * thaw and ignore the device.
3223 				 */
3224 				ata_eh_thaw_port(ap);
3225 				break;
3226 			default:
3227 				goto err;
3228 			}
3229 		}
3230 	}
3231 
3232 	/* PDIAG- should have been released, ask cable type if post-reset */
3233 	if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) {
3234 		if (ap->ops->cable_detect)
3235 			ap->cbl = ap->ops->cable_detect(ap);
3236 		ata_force_cbl(ap);
3237 	}
3238 
3239 	/* Configure new devices forward such that user doesn't see
3240 	 * device detection messages backwards.
3241 	 */
3242 	ata_for_each_dev(dev, link, ALL) {
3243 		if (!(new_mask & (1 << dev->devno)))
3244 			continue;
3245 
3246 		dev->class = ehc->classes[dev->devno];
3247 
3248 		if (dev->class == ATA_DEV_PMP)
3249 			continue;
3250 
3251 		ehc->i.flags |= ATA_EHI_PRINTINFO;
3252 		rc = ata_dev_configure(dev);
3253 		ehc->i.flags &= ~ATA_EHI_PRINTINFO;
3254 		if (rc) {
3255 			dev->class = ATA_DEV_UNKNOWN;
3256 			goto err;
3257 		}
3258 
3259 		spin_lock_irqsave(ap->lock, flags);
3260 		ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
3261 		spin_unlock_irqrestore(ap->lock, flags);
3262 
3263 		/* new device discovered, configure xfermode */
3264 		ehc->i.flags |= ATA_EHI_SETMODE;
3265 	}
3266 
3267 	return 0;
3268 
3269  err:
3270 	*r_failed_dev = dev;
3271 	DPRINTK("EXIT rc=%d\n", rc);
3272 	return rc;
3273 }
3274 
3275 /**
3276  *	ata_set_mode - Program timings and issue SET FEATURES - XFER
3277  *	@link: link on which timings will be programmed
3278  *	@r_failed_dev: out parameter for failed device
3279  *
3280  *	Set ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
3281  *	ata_set_mode() fails, pointer to the failing device is
3282  *	returned in @r_failed_dev.
3283  *
3284  *	LOCKING:
3285  *	PCI/etc. bus probe sem.
3286  *
3287  *	RETURNS:
3288  *	0 on success, negative errno otherwise
3289  */
3290 int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3291 {
3292 	struct ata_port *ap = link->ap;
3293 	struct ata_device *dev;
3294 	int rc;
3295 
3296 	/* if data transfer is verified, clear DUBIOUS_XFER on ering top */
3297 	ata_for_each_dev(dev, link, ENABLED) {
3298 		if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) {
3299 			struct ata_ering_entry *ent;
3300 
3301 			ent = ata_ering_top(&dev->ering);
3302 			if (ent)
3303 				ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER;
3304 		}
3305 	}
3306 
3307 	/* has private set_mode? */
3308 	if (ap->ops->set_mode)
3309 		rc = ap->ops->set_mode(link, r_failed_dev);
3310 	else
3311 		rc = ata_do_set_mode(link, r_failed_dev);
3312 
3313 	/* if transfer mode has changed, set DUBIOUS_XFER on device */
3314 	ata_for_each_dev(dev, link, ENABLED) {
3315 		struct ata_eh_context *ehc = &link->eh_context;
3316 		u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
3317 		u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
3318 
3319 		if (dev->xfer_mode != saved_xfer_mode ||
3320 		    ata_ncq_enabled(dev) != saved_ncq)
3321 			dev->flags |= ATA_DFLAG_DUBIOUS_XFER;
3322 	}
3323 
3324 	return rc;
3325 }
3326 
3327 /**
3328  *	atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
3329  *	@dev: ATAPI device to clear UA for
3330  *
3331  *	Resets and other operations can make an ATAPI device raise
3332  *	UNIT ATTENTION which causes the next operation to fail.  This
3333  *	function clears UA.
3334  *
3335  *	LOCKING:
3336  *	EH context (may sleep).
3337  *
3338  *	RETURNS:
3339  *	0 on success, -errno on failure.
3340  */
3341 static int atapi_eh_clear_ua(struct ata_device *dev)
3342 {
3343 	int i;
3344 
3345 	for (i = 0; i < ATA_EH_UA_TRIES; i++) {
3346 		u8 *sense_buffer = dev->link->ap->sector_buf;
3347 		u8 sense_key = 0;
3348 		unsigned int err_mask;
3349 
3350 		err_mask = atapi_eh_tur(dev, &sense_key);
3351 		if (err_mask != 0 && err_mask != AC_ERR_DEV) {
3352 			ata_dev_warn(dev,
3353 				     "TEST_UNIT_READY failed (err_mask=0x%x)\n",
3354 				     err_mask);
3355 			return -EIO;
3356 		}
3357 
3358 		if (!err_mask || sense_key != UNIT_ATTENTION)
3359 			return 0;
3360 
3361 		err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key);
3362 		if (err_mask) {
3363 			ata_dev_warn(dev, "failed to clear "
3364 				"UNIT ATTENTION (err_mask=0x%x)\n", err_mask);
3365 			return -EIO;
3366 		}
3367 	}
3368 
3369 	ata_dev_warn(dev, "UNIT ATTENTION persists after %d tries\n",
3370 		     ATA_EH_UA_TRIES);
3371 
3372 	return 0;
3373 }
3374 
3375 /**
3376  *	ata_eh_maybe_retry_flush - Retry FLUSH if necessary
3377  *	@dev: ATA device which may need FLUSH retry
3378  *
3379  *	If @dev failed FLUSH, it needs to be reported upper layer
3380  *	immediately as it means that @dev failed to remap and already
3381  *	lost at least a sector and further FLUSH retrials won't make
3382  *	any difference to the lost sector.  However, if FLUSH failed
3383  *	for other reasons, for example transmission error, FLUSH needs
3384  *	to be retried.
3385  *
3386  *	This function determines whether FLUSH failure retry is
3387  *	necessary and performs it if so.
3388  *
3389  *	RETURNS:
3390  *	0 if EH can continue, -errno if EH needs to be repeated.
3391  */
3392 static int ata_eh_maybe_retry_flush(struct ata_device *dev)
3393 {
3394 	struct ata_link *link = dev->link;
3395 	struct ata_port *ap = link->ap;
3396 	struct ata_queued_cmd *qc;
3397 	struct ata_taskfile tf;
3398 	unsigned int err_mask;
3399 	int rc = 0;
3400 
3401 	/* did flush fail for this device? */
3402 	if (!ata_tag_valid(link->active_tag))
3403 		return 0;
3404 
3405 	qc = __ata_qc_from_tag(ap, link->active_tag);
3406 	if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT &&
3407 			       qc->tf.command != ATA_CMD_FLUSH))
3408 		return 0;
3409 
3410 	/* if the device failed it, it should be reported to upper layers */
3411 	if (qc->err_mask & AC_ERR_DEV)
3412 		return 0;
3413 
3414 	/* flush failed for some other reason, give it another shot */
3415 	ata_tf_init(dev, &tf);
3416 
3417 	tf.command = qc->tf.command;
3418 	tf.flags |= ATA_TFLAG_DEVICE;
3419 	tf.protocol = ATA_PROT_NODATA;
3420 
3421 	ata_dev_warn(dev, "retrying FLUSH 0x%x Emask 0x%x\n",
3422 		       tf.command, qc->err_mask);
3423 
3424 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
3425 	if (!err_mask) {
3426 		/*
3427 		 * FLUSH is complete but there's no way to
3428 		 * successfully complete a failed command from EH.
3429 		 * Making sure retry is allowed at least once and
3430 		 * retrying it should do the trick - whatever was in
3431 		 * the cache is already on the platter and this won't
3432 		 * cause infinite loop.
3433 		 */
3434 		qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1);
3435 	} else {
3436 		ata_dev_warn(dev, "FLUSH failed Emask 0x%x\n",
3437 			       err_mask);
3438 		rc = -EIO;
3439 
3440 		/* if device failed it, report it to upper layers */
3441 		if (err_mask & AC_ERR_DEV) {
3442 			qc->err_mask |= AC_ERR_DEV;
3443 			qc->result_tf = tf;
3444 			if (!(ap->pflags & ATA_PFLAG_FROZEN))
3445 				rc = 0;
3446 		}
3447 	}
3448 	return rc;
3449 }
3450 
3451 /**
3452  *	ata_eh_set_lpm - configure SATA interface power management
3453  *	@link: link to configure power management
3454  *	@policy: the link power management policy
3455  *	@r_failed_dev: out parameter for failed device
3456  *
3457  *	Enable SATA Interface power management.  This will enable
3458  *	Device Interface Power Management (DIPM) for min_power and
3459  *	medium_power_with_dipm policies, and then call driver specific
3460  *	callbacks for enabling Host Initiated Power management.
3461  *
3462  *	LOCKING:
3463  *	EH context.
3464  *
3465  *	RETURNS:
3466  *	0 on success, -errno on failure.
3467  */
3468 static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3469 			  struct ata_device **r_failed_dev)
3470 {
3471 	struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL;
3472 	struct ata_eh_context *ehc = &link->eh_context;
3473 	struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
3474 	enum ata_lpm_policy old_policy = link->lpm_policy;
3475 	bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM;
3476 	unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
3477 	unsigned int err_mask;
3478 	int rc;
3479 
3480 	/* if the link or host doesn't do LPM, noop */
3481 	if ((link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm))
3482 		return 0;
3483 
3484 	/*
3485 	 * DIPM is enabled only for MIN_POWER as some devices
3486 	 * misbehave when the host NACKs transition to SLUMBER.  Order
3487 	 * device and link configurations such that the host always
3488 	 * allows DIPM requests.
3489 	 */
3490 	ata_for_each_dev(dev, link, ENABLED) {
3491 		bool hipm = ata_id_has_hipm(dev->id);
3492 		bool dipm = ata_id_has_dipm(dev->id) && !no_dipm;
3493 
3494 		/* find the first enabled and LPM enabled devices */
3495 		if (!link_dev)
3496 			link_dev = dev;
3497 
3498 		if (!lpm_dev && (hipm || dipm))
3499 			lpm_dev = dev;
3500 
3501 		hints &= ~ATA_LPM_EMPTY;
3502 		if (!hipm)
3503 			hints &= ~ATA_LPM_HIPM;
3504 
3505 		/* disable DIPM before changing link config */
3506 		if (policy < ATA_LPM_MED_POWER_WITH_DIPM && dipm) {
3507 			err_mask = ata_dev_set_feature(dev,
3508 					SETFEATURES_SATA_DISABLE, SATA_DIPM);
3509 			if (err_mask && err_mask != AC_ERR_DEV) {
3510 				ata_dev_warn(dev,
3511 					     "failed to disable DIPM, Emask 0x%x\n",
3512 					     err_mask);
3513 				rc = -EIO;
3514 				goto fail;
3515 			}
3516 		}
3517 	}
3518 
3519 	if (ap) {
3520 		rc = ap->ops->set_lpm(link, policy, hints);
3521 		if (!rc && ap->slave_link)
3522 			rc = ap->ops->set_lpm(ap->slave_link, policy, hints);
3523 	} else
3524 		rc = sata_pmp_set_lpm(link, policy, hints);
3525 
3526 	/*
3527 	 * Attribute link config failure to the first (LPM) enabled
3528 	 * device on the link.
3529 	 */
3530 	if (rc) {
3531 		if (rc == -EOPNOTSUPP) {
3532 			link->flags |= ATA_LFLAG_NO_LPM;
3533 			return 0;
3534 		}
3535 		dev = lpm_dev ? lpm_dev : link_dev;
3536 		goto fail;
3537 	}
3538 
3539 	/*
3540 	 * Low level driver acked the transition.  Issue DIPM command
3541 	 * with the new policy set.
3542 	 */
3543 	link->lpm_policy = policy;
3544 	if (ap && ap->slave_link)
3545 		ap->slave_link->lpm_policy = policy;
3546 
3547 	/* host config updated, enable DIPM if transitioning to MIN_POWER */
3548 	ata_for_each_dev(dev, link, ENABLED) {
3549 		if (policy >= ATA_LPM_MED_POWER_WITH_DIPM && !no_dipm &&
3550 		    ata_id_has_dipm(dev->id)) {
3551 			err_mask = ata_dev_set_feature(dev,
3552 					SETFEATURES_SATA_ENABLE, SATA_DIPM);
3553 			if (err_mask && err_mask != AC_ERR_DEV) {
3554 				ata_dev_warn(dev,
3555 					"failed to enable DIPM, Emask 0x%x\n",
3556 					err_mask);
3557 				rc = -EIO;
3558 				goto fail;
3559 			}
3560 		}
3561 	}
3562 
3563 	link->last_lpm_change = jiffies;
3564 	link->flags |= ATA_LFLAG_CHANGED;
3565 
3566 	return 0;
3567 
3568 fail:
3569 	/* restore the old policy */
3570 	link->lpm_policy = old_policy;
3571 	if (ap && ap->slave_link)
3572 		ap->slave_link->lpm_policy = old_policy;
3573 
3574 	/* if no device or only one more chance is left, disable LPM */
3575 	if (!dev || ehc->tries[dev->devno] <= 2) {
3576 		ata_link_warn(link, "disabling LPM on the link\n");
3577 		link->flags |= ATA_LFLAG_NO_LPM;
3578 	}
3579 	if (r_failed_dev)
3580 		*r_failed_dev = dev;
3581 	return rc;
3582 }
3583 
3584 int ata_link_nr_enabled(struct ata_link *link)
3585 {
3586 	struct ata_device *dev;
3587 	int cnt = 0;
3588 
3589 	ata_for_each_dev(dev, link, ENABLED)
3590 		cnt++;
3591 	return cnt;
3592 }
3593 
3594 static int ata_link_nr_vacant(struct ata_link *link)
3595 {
3596 	struct ata_device *dev;
3597 	int cnt = 0;
3598 
3599 	ata_for_each_dev(dev, link, ALL)
3600 		if (dev->class == ATA_DEV_UNKNOWN)
3601 			cnt++;
3602 	return cnt;
3603 }
3604 
3605 static int ata_eh_skip_recovery(struct ata_link *link)
3606 {
3607 	struct ata_port *ap = link->ap;
3608 	struct ata_eh_context *ehc = &link->eh_context;
3609 	struct ata_device *dev;
3610 
3611 	/* skip disabled links */
3612 	if (link->flags & ATA_LFLAG_DISABLED)
3613 		return 1;
3614 
3615 	/* skip if explicitly requested */
3616 	if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
3617 		return 1;
3618 
3619 	/* thaw frozen port and recover failed devices */
3620 	if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
3621 		return 0;
3622 
3623 	/* reset at least once if reset is requested */
3624 	if ((ehc->i.action & ATA_EH_RESET) &&
3625 	    !(ehc->i.flags & ATA_EHI_DID_RESET))
3626 		return 0;
3627 
3628 	/* skip if class codes for all vacant slots are ATA_DEV_NONE */
3629 	ata_for_each_dev(dev, link, ALL) {
3630 		if (dev->class == ATA_DEV_UNKNOWN &&
3631 		    ehc->classes[dev->devno] != ATA_DEV_NONE)
3632 			return 0;
3633 	}
3634 
3635 	return 1;
3636 }
3637 
3638 static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg)
3639 {
3640 	u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL);
3641 	u64 now = get_jiffies_64();
3642 	int *trials = void_arg;
3643 
3644 	if ((ent->eflags & ATA_EFLAG_OLD_ER) ||
3645 	    (ent->timestamp < now - min(now, interval)))
3646 		return -1;
3647 
3648 	(*trials)++;
3649 	return 0;
3650 }
3651 
3652 static int ata_eh_schedule_probe(struct ata_device *dev)
3653 {
3654 	struct ata_eh_context *ehc = &dev->link->eh_context;
3655 	struct ata_link *link = ata_dev_phys_link(dev);
3656 	int trials = 0;
3657 
3658 	if (!(ehc->i.probe_mask & (1 << dev->devno)) ||
3659 	    (ehc->did_probe_mask & (1 << dev->devno)))
3660 		return 0;
3661 
3662 	ata_eh_detach_dev(dev);
3663 	ata_dev_init(dev);
3664 	ehc->did_probe_mask |= (1 << dev->devno);
3665 	ehc->i.action |= ATA_EH_RESET;
3666 	ehc->saved_xfer_mode[dev->devno] = 0;
3667 	ehc->saved_ncq_enabled &= ~(1 << dev->devno);
3668 
3669 	/* the link maybe in a deep sleep, wake it up */
3670 	if (link->lpm_policy > ATA_LPM_MAX_POWER) {
3671 		if (ata_is_host_link(link))
3672 			link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER,
3673 					       ATA_LPM_EMPTY);
3674 		else
3675 			sata_pmp_set_lpm(link, ATA_LPM_MAX_POWER,
3676 					 ATA_LPM_EMPTY);
3677 	}
3678 
3679 	/* Record and count probe trials on the ering.  The specific
3680 	 * error mask used is irrelevant.  Because a successful device
3681 	 * detection clears the ering, this count accumulates only if
3682 	 * there are consecutive failed probes.
3683 	 *
3684 	 * If the count is equal to or higher than ATA_EH_PROBE_TRIALS
3685 	 * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is
3686 	 * forced to 1.5Gbps.
3687 	 *
3688 	 * This is to work around cases where failed link speed
3689 	 * negotiation results in device misdetection leading to
3690 	 * infinite DEVXCHG or PHRDY CHG events.
3691 	 */
3692 	ata_ering_record(&dev->ering, 0, AC_ERR_OTHER);
3693 	ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials);
3694 
3695 	if (trials > ATA_EH_PROBE_TRIALS)
3696 		sata_down_spd_limit(link, 1);
3697 
3698 	return 1;
3699 }
3700 
3701 static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
3702 {
3703 	struct ata_eh_context *ehc = &dev->link->eh_context;
3704 
3705 	/* -EAGAIN from EH routine indicates retry without prejudice.
3706 	 * The requester is responsible for ensuring forward progress.
3707 	 */
3708 	if (err != -EAGAIN)
3709 		ehc->tries[dev->devno]--;
3710 
3711 	switch (err) {
3712 	case -ENODEV:
3713 		/* device missing or wrong IDENTIFY data, schedule probing */
3714 		ehc->i.probe_mask |= (1 << dev->devno);
3715 		/* fall through */
3716 	case -EINVAL:
3717 		/* give it just one more chance */
3718 		ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
3719 		/* fall through */
3720 	case -EIO:
3721 		if (ehc->tries[dev->devno] == 1) {
3722 			/* This is the last chance, better to slow
3723 			 * down than lose it.
3724 			 */
3725 			sata_down_spd_limit(ata_dev_phys_link(dev), 0);
3726 			if (dev->pio_mode > XFER_PIO_0)
3727 				ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
3728 		}
3729 	}
3730 
3731 	if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
3732 		/* disable device if it has used up all its chances */
3733 		ata_dev_disable(dev);
3734 
3735 		/* detach if offline */
3736 		if (ata_phys_link_offline(ata_dev_phys_link(dev)))
3737 			ata_eh_detach_dev(dev);
3738 
3739 		/* schedule probe if necessary */
3740 		if (ata_eh_schedule_probe(dev)) {
3741 			ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3742 			memset(ehc->cmd_timeout_idx[dev->devno], 0,
3743 			       sizeof(ehc->cmd_timeout_idx[dev->devno]));
3744 		}
3745 
3746 		return 1;
3747 	} else {
3748 		ehc->i.action |= ATA_EH_RESET;
3749 		return 0;
3750 	}
3751 }
3752 
3753 /**
3754  *	ata_eh_recover - recover host port after error
3755  *	@ap: host port to recover
3756  *	@prereset: prereset method (can be NULL)
3757  *	@softreset: softreset method (can be NULL)
3758  *	@hardreset: hardreset method (can be NULL)
3759  *	@postreset: postreset method (can be NULL)
3760  *	@r_failed_link: out parameter for failed link
3761  *
3762  *	This is the alpha and omega, eum and yang, heart and soul of
3763  *	libata exception handling.  On entry, actions required to
3764  *	recover each link and hotplug requests are recorded in the
3765  *	link's eh_context.  This function executes all the operations
3766  *	with appropriate retrials and fallbacks to resurrect failed
3767  *	devices, detach goners and greet newcomers.
3768  *
3769  *	LOCKING:
3770  *	Kernel thread context (may sleep).
3771  *
3772  *	RETURNS:
3773  *	0 on success, -errno on failure.
3774  */
3775 int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3776 		   ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3777 		   ata_postreset_fn_t postreset,
3778 		   struct ata_link **r_failed_link)
3779 {
3780 	struct ata_link *link;
3781 	struct ata_device *dev;
3782 	int rc, nr_fails;
3783 	unsigned long flags, deadline;
3784 
3785 	DPRINTK("ENTER\n");
3786 
3787 	/* prep for recovery */
3788 	ata_for_each_link(link, ap, EDGE) {
3789 		struct ata_eh_context *ehc = &link->eh_context;
3790 
3791 		/* re-enable link? */
3792 		if (ehc->i.action & ATA_EH_ENABLE_LINK) {
3793 			ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK);
3794 			spin_lock_irqsave(ap->lock, flags);
3795 			link->flags &= ~ATA_LFLAG_DISABLED;
3796 			spin_unlock_irqrestore(ap->lock, flags);
3797 			ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK);
3798 		}
3799 
3800 		ata_for_each_dev(dev, link, ALL) {
3801 			if (link->flags & ATA_LFLAG_NO_RETRY)
3802 				ehc->tries[dev->devno] = 1;
3803 			else
3804 				ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3805 
3806 			/* collect port action mask recorded in dev actions */
3807 			ehc->i.action |= ehc->i.dev_action[dev->devno] &
3808 					 ~ATA_EH_PERDEV_MASK;
3809 			ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK;
3810 
3811 			/* process hotplug request */
3812 			if (dev->flags & ATA_DFLAG_DETACH)
3813 				ata_eh_detach_dev(dev);
3814 
3815 			/* schedule probe if necessary */
3816 			if (!ata_dev_enabled(dev))
3817 				ata_eh_schedule_probe(dev);
3818 		}
3819 	}
3820 
3821  retry:
3822 	rc = 0;
3823 
3824 	/* if UNLOADING, finish immediately */
3825 	if (ap->pflags & ATA_PFLAG_UNLOADING)
3826 		goto out;
3827 
3828 	/* prep for EH */
3829 	ata_for_each_link(link, ap, EDGE) {
3830 		struct ata_eh_context *ehc = &link->eh_context;
3831 
3832 		/* skip EH if possible. */
3833 		if (ata_eh_skip_recovery(link))
3834 			ehc->i.action = 0;
3835 
3836 		ata_for_each_dev(dev, link, ALL)
3837 			ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
3838 	}
3839 
3840 	/* reset */
3841 	ata_for_each_link(link, ap, EDGE) {
3842 		struct ata_eh_context *ehc = &link->eh_context;
3843 
3844 		if (!(ehc->i.action & ATA_EH_RESET))
3845 			continue;
3846 
3847 		rc = ata_eh_reset(link, ata_link_nr_vacant(link),
3848 				  prereset, softreset, hardreset, postreset);
3849 		if (rc) {
3850 			ata_link_err(link, "reset failed, giving up\n");
3851 			goto out;
3852 		}
3853 	}
3854 
3855 	do {
3856 		unsigned long now;
3857 
3858 		/*
3859 		 * clears ATA_EH_PARK in eh_info and resets
3860 		 * ap->park_req_pending
3861 		 */
3862 		ata_eh_pull_park_action(ap);
3863 
3864 		deadline = jiffies;
3865 		ata_for_each_link(link, ap, EDGE) {
3866 			ata_for_each_dev(dev, link, ALL) {
3867 				struct ata_eh_context *ehc = &link->eh_context;
3868 				unsigned long tmp;
3869 
3870 				if (dev->class != ATA_DEV_ATA &&
3871 				    dev->class != ATA_DEV_ZAC)
3872 					continue;
3873 				if (!(ehc->i.dev_action[dev->devno] &
3874 				      ATA_EH_PARK))
3875 					continue;
3876 				tmp = dev->unpark_deadline;
3877 				if (time_before(deadline, tmp))
3878 					deadline = tmp;
3879 				else if (time_before_eq(tmp, jiffies))
3880 					continue;
3881 				if (ehc->unloaded_mask & (1 << dev->devno))
3882 					continue;
3883 
3884 				ata_eh_park_issue_cmd(dev, 1);
3885 			}
3886 		}
3887 
3888 		now = jiffies;
3889 		if (time_before_eq(deadline, now))
3890 			break;
3891 
3892 		ata_eh_release(ap);
3893 		deadline = wait_for_completion_timeout(&ap->park_req_pending,
3894 						       deadline - now);
3895 		ata_eh_acquire(ap);
3896 	} while (deadline);
3897 	ata_for_each_link(link, ap, EDGE) {
3898 		ata_for_each_dev(dev, link, ALL) {
3899 			if (!(link->eh_context.unloaded_mask &
3900 			      (1 << dev->devno)))
3901 				continue;
3902 
3903 			ata_eh_park_issue_cmd(dev, 0);
3904 			ata_eh_done(link, dev, ATA_EH_PARK);
3905 		}
3906 	}
3907 
3908 	/* the rest */
3909 	nr_fails = 0;
3910 	ata_for_each_link(link, ap, PMP_FIRST) {
3911 		struct ata_eh_context *ehc = &link->eh_context;
3912 
3913 		if (sata_pmp_attached(ap) && ata_is_host_link(link))
3914 			goto config_lpm;
3915 
3916 		/* revalidate existing devices and attach new ones */
3917 		rc = ata_eh_revalidate_and_attach(link, &dev);
3918 		if (rc)
3919 			goto rest_fail;
3920 
3921 		/* if PMP got attached, return, pmp EH will take care of it */
3922 		if (link->device->class == ATA_DEV_PMP) {
3923 			ehc->i.action = 0;
3924 			return 0;
3925 		}
3926 
3927 		/* configure transfer mode if necessary */
3928 		if (ehc->i.flags & ATA_EHI_SETMODE) {
3929 			rc = ata_set_mode(link, &dev);
3930 			if (rc)
3931 				goto rest_fail;
3932 			ehc->i.flags &= ~ATA_EHI_SETMODE;
3933 		}
3934 
3935 		/* If reset has been issued, clear UA to avoid
3936 		 * disrupting the current users of the device.
3937 		 */
3938 		if (ehc->i.flags & ATA_EHI_DID_RESET) {
3939 			ata_for_each_dev(dev, link, ALL) {
3940 				if (dev->class != ATA_DEV_ATAPI)
3941 					continue;
3942 				rc = atapi_eh_clear_ua(dev);
3943 				if (rc)
3944 					goto rest_fail;
3945 				if (zpodd_dev_enabled(dev))
3946 					zpodd_post_poweron(dev);
3947 			}
3948 		}
3949 
3950 		/* retry flush if necessary */
3951 		ata_for_each_dev(dev, link, ALL) {
3952 			if (dev->class != ATA_DEV_ATA &&
3953 			    dev->class != ATA_DEV_ZAC)
3954 				continue;
3955 			rc = ata_eh_maybe_retry_flush(dev);
3956 			if (rc)
3957 				goto rest_fail;
3958 		}
3959 
3960 	config_lpm:
3961 		/* configure link power saving */
3962 		if (link->lpm_policy != ap->target_lpm_policy) {
3963 			rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev);
3964 			if (rc)
3965 				goto rest_fail;
3966 		}
3967 
3968 		/* this link is okay now */
3969 		ehc->i.flags = 0;
3970 		continue;
3971 
3972 	rest_fail:
3973 		nr_fails++;
3974 		if (dev)
3975 			ata_eh_handle_dev_fail(dev, rc);
3976 
3977 		if (ap->pflags & ATA_PFLAG_FROZEN) {
3978 			/* PMP reset requires working host port.
3979 			 * Can't retry if it's frozen.
3980 			 */
3981 			if (sata_pmp_attached(ap))
3982 				goto out;
3983 			break;
3984 		}
3985 	}
3986 
3987 	if (nr_fails)
3988 		goto retry;
3989 
3990  out:
3991 	if (rc && r_failed_link)
3992 		*r_failed_link = link;
3993 
3994 	DPRINTK("EXIT, rc=%d\n", rc);
3995 	return rc;
3996 }
3997 
3998 /**
3999  *	ata_eh_finish - finish up EH
4000  *	@ap: host port to finish EH for
4001  *
4002  *	Recovery is complete.  Clean up EH states and retry or finish
4003  *	failed qcs.
4004  *
4005  *	LOCKING:
4006  *	None.
4007  */
4008 void ata_eh_finish(struct ata_port *ap)
4009 {
4010 	int tag;
4011 
4012 	/* retry or finish qcs */
4013 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
4014 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
4015 
4016 		if (!(qc->flags & ATA_QCFLAG_FAILED))
4017 			continue;
4018 
4019 		if (qc->err_mask) {
4020 			/* FIXME: Once EH migration is complete,
4021 			 * generate sense data in this function,
4022 			 * considering both err_mask and tf.
4023 			 */
4024 			if (qc->flags & ATA_QCFLAG_RETRY)
4025 				ata_eh_qc_retry(qc);
4026 			else
4027 				ata_eh_qc_complete(qc);
4028 		} else {
4029 			if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
4030 				ata_eh_qc_complete(qc);
4031 			} else {
4032 				/* feed zero TF to sense generation */
4033 				memset(&qc->result_tf, 0, sizeof(qc->result_tf));
4034 				ata_eh_qc_retry(qc);
4035 			}
4036 		}
4037 	}
4038 
4039 	/* make sure nr_active_links is zero after EH */
4040 	WARN_ON(ap->nr_active_links);
4041 	ap->nr_active_links = 0;
4042 }
4043 
4044 /**
4045  *	ata_do_eh - do standard error handling
4046  *	@ap: host port to handle error for
4047  *
4048  *	@prereset: prereset method (can be NULL)
4049  *	@softreset: softreset method (can be NULL)
4050  *	@hardreset: hardreset method (can be NULL)
4051  *	@postreset: postreset method (can be NULL)
4052  *
4053  *	Perform standard error handling sequence.
4054  *
4055  *	LOCKING:
4056  *	Kernel thread context (may sleep).
4057  */
4058 void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
4059 	       ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
4060 	       ata_postreset_fn_t postreset)
4061 {
4062 	struct ata_device *dev;
4063 	int rc;
4064 
4065 	ata_eh_autopsy(ap);
4066 	ata_eh_report(ap);
4067 
4068 	rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
4069 			    NULL);
4070 	if (rc) {
4071 		ata_for_each_dev(dev, &ap->link, ALL)
4072 			ata_dev_disable(dev);
4073 	}
4074 
4075 	ata_eh_finish(ap);
4076 }
4077 
4078 /**
4079  *	ata_std_error_handler - standard error handler
4080  *	@ap: host port to handle error for
4081  *
4082  *	Standard error handler
4083  *
4084  *	LOCKING:
4085  *	Kernel thread context (may sleep).
4086  */
4087 void ata_std_error_handler(struct ata_port *ap)
4088 {
4089 	struct ata_port_operations *ops = ap->ops;
4090 	ata_reset_fn_t hardreset = ops->hardreset;
4091 
4092 	/* ignore built-in hardreset if SCR access is not available */
4093 	if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link))
4094 		hardreset = NULL;
4095 
4096 	ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
4097 }
4098 
4099 #ifdef CONFIG_PM
4100 /**
4101  *	ata_eh_handle_port_suspend - perform port suspend operation
4102  *	@ap: port to suspend
4103  *
4104  *	Suspend @ap.
4105  *
4106  *	LOCKING:
4107  *	Kernel thread context (may sleep).
4108  */
4109 static void ata_eh_handle_port_suspend(struct ata_port *ap)
4110 {
4111 	unsigned long flags;
4112 	int rc = 0;
4113 	struct ata_device *dev;
4114 
4115 	/* are we suspending? */
4116 	spin_lock_irqsave(ap->lock, flags);
4117 	if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
4118 	    ap->pm_mesg.event & PM_EVENT_RESUME) {
4119 		spin_unlock_irqrestore(ap->lock, flags);
4120 		return;
4121 	}
4122 	spin_unlock_irqrestore(ap->lock, flags);
4123 
4124 	WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
4125 
4126 	/*
4127 	 * If we have a ZPODD attached, check its zero
4128 	 * power ready status before the port is frozen.
4129 	 * Only needed for runtime suspend.
4130 	 */
4131 	if (PMSG_IS_AUTO(ap->pm_mesg)) {
4132 		ata_for_each_dev(dev, &ap->link, ENABLED) {
4133 			if (zpodd_dev_enabled(dev))
4134 				zpodd_on_suspend(dev);
4135 		}
4136 	}
4137 
4138 	/* tell ACPI we're suspending */
4139 	rc = ata_acpi_on_suspend(ap);
4140 	if (rc)
4141 		goto out;
4142 
4143 	/* suspend */
4144 	ata_eh_freeze_port(ap);
4145 
4146 	if (ap->ops->port_suspend)
4147 		rc = ap->ops->port_suspend(ap, ap->pm_mesg);
4148 
4149 	ata_acpi_set_state(ap, ap->pm_mesg);
4150  out:
4151 	/* update the flags */
4152 	spin_lock_irqsave(ap->lock, flags);
4153 
4154 	ap->pflags &= ~ATA_PFLAG_PM_PENDING;
4155 	if (rc == 0)
4156 		ap->pflags |= ATA_PFLAG_SUSPENDED;
4157 	else if (ap->pflags & ATA_PFLAG_FROZEN)
4158 		ata_port_schedule_eh(ap);
4159 
4160 	spin_unlock_irqrestore(ap->lock, flags);
4161 
4162 	return;
4163 }
4164 
4165 /**
4166  *	ata_eh_handle_port_resume - perform port resume operation
4167  *	@ap: port to resume
4168  *
4169  *	Resume @ap.
4170  *
4171  *	LOCKING:
4172  *	Kernel thread context (may sleep).
4173  */
4174 static void ata_eh_handle_port_resume(struct ata_port *ap)
4175 {
4176 	struct ata_link *link;
4177 	struct ata_device *dev;
4178 	unsigned long flags;
4179 
4180 	/* are we resuming? */
4181 	spin_lock_irqsave(ap->lock, flags);
4182 	if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
4183 	    !(ap->pm_mesg.event & PM_EVENT_RESUME)) {
4184 		spin_unlock_irqrestore(ap->lock, flags);
4185 		return;
4186 	}
4187 	spin_unlock_irqrestore(ap->lock, flags);
4188 
4189 	WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED));
4190 
4191 	/*
4192 	 * Error timestamps are in jiffies which doesn't run while
4193 	 * suspended and PHY events during resume isn't too uncommon.
4194 	 * When the two are combined, it can lead to unnecessary speed
4195 	 * downs if the machine is suspended and resumed repeatedly.
4196 	 * Clear error history.
4197 	 */
4198 	ata_for_each_link(link, ap, HOST_FIRST)
4199 		ata_for_each_dev(dev, link, ALL)
4200 			ata_ering_clear(&dev->ering);
4201 
4202 	ata_acpi_set_state(ap, ap->pm_mesg);
4203 
4204 	if (ap->ops->port_resume)
4205 		ap->ops->port_resume(ap);
4206 
4207 	/* tell ACPI that we're resuming */
4208 	ata_acpi_on_resume(ap);
4209 
4210 	/* update the flags */
4211 	spin_lock_irqsave(ap->lock, flags);
4212 	ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
4213 	spin_unlock_irqrestore(ap->lock, flags);
4214 }
4215 #endif /* CONFIG_PM */
4216