xref: /openbmc/linux/drivers/ata/libata-eh.c (revision 6c8ea89cecd780faa4f4c8ed8b3b6ab88f9fa841)
1 /*
2  *  libata-eh.c - libata error handling
3  *
4  *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
5  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6  *		    on emails.
7  *
8  *  Copyright 2006 Tejun Heo <htejun@gmail.com>
9  *
10  *
11  *  This program is free software; you can redistribute it and/or
12  *  modify it under the terms of the GNU General Public License as
13  *  published by the Free Software Foundation; either version 2, or
14  *  (at your option) any later version.
15  *
16  *  This program is distributed in the hope that it will be useful,
17  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
18  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  *  General Public License for more details.
20  *
21  *  You should have received a copy of the GNU General Public License
22  *  along with this program; see the file COPYING.  If not, write to
23  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
24  *  USA.
25  *
26  *
27  *  libata documentation is available via 'make {ps|pdf}docs',
28  *  as Documentation/DocBook/libata.*
29  *
30  *  Hardware documentation available from http://www.t13.org/ and
31  *  http://www.sata-io.org/
32  *
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/blkdev.h>
37 #include <linux/pci.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_host.h>
40 #include <scsi/scsi_eh.h>
41 #include <scsi/scsi_device.h>
42 #include <scsi/scsi_cmnd.h>
43 #include <scsi/scsi_dbg.h>
44 #include "../scsi/scsi_transport_api.h"
45 
46 #include <linux/libata.h>
47 
48 #include "libata.h"
49 
50 enum {
51 	/* speed down verdicts */
52 	ATA_EH_SPDN_NCQ_OFF		= (1 << 0),
53 	ATA_EH_SPDN_SPEED_DOWN		= (1 << 1),
54 	ATA_EH_SPDN_FALLBACK_TO_PIO	= (1 << 2),
55 	ATA_EH_SPDN_KEEP_ERRORS		= (1 << 3),
56 
57 	/* error flags */
58 	ATA_EFLAG_IS_IO			= (1 << 0),
59 	ATA_EFLAG_DUBIOUS_XFER		= (1 << 1),
60 	ATA_EFLAG_OLD_ER                = (1 << 31),
61 
62 	/* error categories */
63 	ATA_ECAT_NONE			= 0,
64 	ATA_ECAT_ATA_BUS		= 1,
65 	ATA_ECAT_TOUT_HSM		= 2,
66 	ATA_ECAT_UNK_DEV		= 3,
67 	ATA_ECAT_DUBIOUS_NONE		= 4,
68 	ATA_ECAT_DUBIOUS_ATA_BUS	= 5,
69 	ATA_ECAT_DUBIOUS_TOUT_HSM	= 6,
70 	ATA_ECAT_DUBIOUS_UNK_DEV	= 7,
71 	ATA_ECAT_NR			= 8,
72 
73 	ATA_EH_CMD_DFL_TIMEOUT		=  5000,
74 
75 	/* always put at least this amount of time between resets */
76 	ATA_EH_RESET_COOL_DOWN		=  5000,
77 
78 	/* Waiting in ->prereset can never be reliable.  It's
79 	 * sometimes nice to wait there but it can't be depended upon;
80 	 * otherwise, we wouldn't be resetting.  Just give it enough
81 	 * time for most drives to spin up.
82 	 */
83 	ATA_EH_PRERESET_TIMEOUT		= 10000,
84 	ATA_EH_FASTDRAIN_INTERVAL	=  3000,
85 
86 	ATA_EH_UA_TRIES			= 5,
87 
88 	/* probe speed down parameters, see ata_eh_schedule_probe() */
89 	ATA_EH_PROBE_TRIAL_INTERVAL	= 60000,	/* 1 min */
90 	ATA_EH_PROBE_TRIALS		= 2,
91 };
92 
93 /* The following table determines how we sequence resets.  Each entry
94  * represents timeout for that try.  The first try can be soft or
95  * hardreset.  All others are hardreset if available.  In most cases
96  * the first reset w/ 10sec timeout should succeed.  Following entries
97  * are mostly for error handling, hotplug and retarded devices.
98  */
99 static const unsigned long ata_eh_reset_timeouts[] = {
100 	10000,	/* most drives spin up by 10sec */
101 	10000,	/* > 99% working drives spin up before 20sec */
102 	35000,	/* give > 30 secs of idleness for retarded devices */
103 	 5000,	/* and sweet one last chance */
104 	ULONG_MAX, /* > 1 min has elapsed, give up */
105 };
106 
107 static const unsigned long ata_eh_identify_timeouts[] = {
108 	 5000,	/* covers > 99% of successes and not too boring on failures */
109 	10000,  /* combined time till here is enough even for media access */
110 	30000,	/* for true idiots */
111 	ULONG_MAX,
112 };
113 
114 static const unsigned long ata_eh_flush_timeouts[] = {
115 	15000,	/* be generous with flush */
116 	15000,  /* ditto */
117 	30000,	/* and even more generous */
118 	ULONG_MAX,
119 };
120 
121 static const unsigned long ata_eh_other_timeouts[] = {
122 	 5000,	/* same rationale as identify timeout */
123 	10000,	/* ditto */
124 	/* but no merciful 30sec for other commands, it just isn't worth it */
125 	ULONG_MAX,
126 };
127 
128 struct ata_eh_cmd_timeout_ent {
129 	const u8		*commands;
130 	const unsigned long	*timeouts;
131 };
132 
133 /* The following table determines timeouts to use for EH internal
134  * commands.  Each table entry is a command class and matches the
135  * commands the entry applies to and the timeout table to use.
136  *
137  * On the retry after a command timed out, the next timeout value from
138  * the table is used.  If the table doesn't contain further entries,
139  * the last value is used.
140  *
141  * ehc->cmd_timeout_idx keeps track of which timeout to use per
142  * command class, so if SET_FEATURES times out on the first try, the
143  * next try will use the second timeout value only for that class.
144  */
145 #define CMDS(cmds...)	(const u8 []){ cmds, 0 }
146 static const struct ata_eh_cmd_timeout_ent
147 ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
148 	{ .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI),
149 	  .timeouts = ata_eh_identify_timeouts, },
150 	{ .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT),
151 	  .timeouts = ata_eh_other_timeouts, },
152 	{ .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT),
153 	  .timeouts = ata_eh_other_timeouts, },
154 	{ .commands = CMDS(ATA_CMD_SET_FEATURES),
155 	  .timeouts = ata_eh_other_timeouts, },
156 	{ .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS),
157 	  .timeouts = ata_eh_other_timeouts, },
158 	{ .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT),
159 	  .timeouts = ata_eh_flush_timeouts },
160 };
161 #undef CMDS
162 
163 static void __ata_port_freeze(struct ata_port *ap);
164 #ifdef CONFIG_PM
165 static void ata_eh_handle_port_suspend(struct ata_port *ap);
166 static void ata_eh_handle_port_resume(struct ata_port *ap);
167 #else /* CONFIG_PM */
168 static void ata_eh_handle_port_suspend(struct ata_port *ap)
169 { }
170 
171 static void ata_eh_handle_port_resume(struct ata_port *ap)
172 { }
173 #endif /* CONFIG_PM */
174 
175 static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt,
176 				 va_list args)
177 {
178 	ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
179 				     ATA_EH_DESC_LEN - ehi->desc_len,
180 				     fmt, args);
181 }
182 
183 /**
184  *	__ata_ehi_push_desc - push error description without adding separator
185  *	@ehi: target EHI
186  *	@fmt: printf format string
187  *
188  *	Format string according to @fmt and append it to @ehi->desc.
189  *
190  *	LOCKING:
191  *	spin_lock_irqsave(host lock)
192  */
193 void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
194 {
195 	va_list args;
196 
197 	va_start(args, fmt);
198 	__ata_ehi_pushv_desc(ehi, fmt, args);
199 	va_end(args);
200 }
201 
202 /**
203  *	ata_ehi_push_desc - push error description with separator
204  *	@ehi: target EHI
205  *	@fmt: printf format string
206  *
207  *	Format string according to @fmt and append it to @ehi->desc.
208  *	If @ehi->desc is not empty, ", " is added in-between.
209  *
210  *	LOCKING:
211  *	spin_lock_irqsave(host lock)
212  */
213 void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
214 {
215 	va_list args;
216 
217 	if (ehi->desc_len)
218 		__ata_ehi_push_desc(ehi, ", ");
219 
220 	va_start(args, fmt);
221 	__ata_ehi_pushv_desc(ehi, fmt, args);
222 	va_end(args);
223 }
224 
225 /**
226  *	ata_ehi_clear_desc - clean error description
227  *	@ehi: target EHI
228  *
229  *	Clear @ehi->desc.
230  *
231  *	LOCKING:
232  *	spin_lock_irqsave(host lock)
233  */
234 void ata_ehi_clear_desc(struct ata_eh_info *ehi)
235 {
236 	ehi->desc[0] = '\0';
237 	ehi->desc_len = 0;
238 }
239 
240 /**
241  *	ata_port_desc - append port description
242  *	@ap: target ATA port
243  *	@fmt: printf format string
244  *
245  *	Format string according to @fmt and append it to port
246  *	description.  If port description is not empty, " " is added
247  *	in-between.  This function is to be used while initializing
248  *	ata_host.  The description is printed on host registration.
249  *
250  *	LOCKING:
251  *	None.
252  */
253 void ata_port_desc(struct ata_port *ap, const char *fmt, ...)
254 {
255 	va_list args;
256 
257 	WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING));
258 
259 	if (ap->link.eh_info.desc_len)
260 		__ata_ehi_push_desc(&ap->link.eh_info, " ");
261 
262 	va_start(args, fmt);
263 	__ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args);
264 	va_end(args);
265 }
266 
267 #ifdef CONFIG_PCI
268 
269 /**
270  *	ata_port_pbar_desc - append PCI BAR description
271  *	@ap: target ATA port
272  *	@bar: target PCI BAR
273  *	@offset: offset into PCI BAR
274  *	@name: name of the area
275  *
276  *	If @offset is negative, this function formats a string which
277  *	contains the name, address, size and type of the BAR and
278  *	appends it to the port description.  If @offset is zero or
279  *	positive, only name and offsetted address is appended.
280  *
281  *	LOCKING:
282  *	None.
283  */
284 void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
285 			const char *name)
286 {
287 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
288 	char *type = "";
289 	unsigned long long start, len;
290 
291 	if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
292 		type = "m";
293 	else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
294 		type = "i";
295 
296 	start = (unsigned long long)pci_resource_start(pdev, bar);
297 	len = (unsigned long long)pci_resource_len(pdev, bar);
298 
299 	if (offset < 0)
300 		ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start);
301 	else
302 		ata_port_desc(ap, "%s 0x%llx", name,
303 				start + (unsigned long long)offset);
304 }
305 
306 #endif /* CONFIG_PCI */
307 
308 static int ata_lookup_timeout_table(u8 cmd)
309 {
310 	int i;
311 
312 	for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) {
313 		const u8 *cur;
314 
315 		for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++)
316 			if (*cur == cmd)
317 				return i;
318 	}
319 
320 	return -1;
321 }
322 
323 /**
324  *	ata_internal_cmd_timeout - determine timeout for an internal command
325  *	@dev: target device
326  *	@cmd: internal command to be issued
327  *
328  *	Determine timeout for internal command @cmd for @dev.
329  *
330  *	LOCKING:
331  *	EH context.
332  *
333  *	RETURNS:
334  *	Determined timeout.
335  */
336 unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd)
337 {
338 	struct ata_eh_context *ehc = &dev->link->eh_context;
339 	int ent = ata_lookup_timeout_table(cmd);
340 	int idx;
341 
342 	if (ent < 0)
343 		return ATA_EH_CMD_DFL_TIMEOUT;
344 
345 	idx = ehc->cmd_timeout_idx[dev->devno][ent];
346 	return ata_eh_cmd_timeout_table[ent].timeouts[idx];
347 }
348 
349 /**
350  *	ata_internal_cmd_timed_out - notification for internal command timeout
351  *	@dev: target device
352  *	@cmd: internal command which timed out
353  *
354  *	Notify EH that internal command @cmd for @dev timed out.  This
355  *	function should be called only for commands whose timeouts are
356  *	determined using ata_internal_cmd_timeout().
357  *
358  *	LOCKING:
359  *	EH context.
360  */
361 void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd)
362 {
363 	struct ata_eh_context *ehc = &dev->link->eh_context;
364 	int ent = ata_lookup_timeout_table(cmd);
365 	int idx;
366 
367 	if (ent < 0)
368 		return;
369 
370 	idx = ehc->cmd_timeout_idx[dev->devno][ent];
371 	if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX)
372 		ehc->cmd_timeout_idx[dev->devno][ent]++;
373 }
374 
375 static void ata_ering_record(struct ata_ering *ering, unsigned int eflags,
376 			     unsigned int err_mask)
377 {
378 	struct ata_ering_entry *ent;
379 
380 	WARN_ON(!err_mask);
381 
382 	ering->cursor++;
383 	ering->cursor %= ATA_ERING_SIZE;
384 
385 	ent = &ering->ring[ering->cursor];
386 	ent->eflags = eflags;
387 	ent->err_mask = err_mask;
388 	ent->timestamp = get_jiffies_64();
389 }
390 
391 static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering)
392 {
393 	struct ata_ering_entry *ent = &ering->ring[ering->cursor];
394 
395 	if (ent->err_mask)
396 		return ent;
397 	return NULL;
398 }
399 
400 int ata_ering_map(struct ata_ering *ering,
401 		  int (*map_fn)(struct ata_ering_entry *, void *),
402 		  void *arg)
403 {
404 	int idx, rc = 0;
405 	struct ata_ering_entry *ent;
406 
407 	idx = ering->cursor;
408 	do {
409 		ent = &ering->ring[idx];
410 		if (!ent->err_mask)
411 			break;
412 		rc = map_fn(ent, arg);
413 		if (rc)
414 			break;
415 		idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
416 	} while (idx != ering->cursor);
417 
418 	return rc;
419 }
420 
421 int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg)
422 {
423 	ent->eflags |= ATA_EFLAG_OLD_ER;
424 	return 0;
425 }
426 
427 static void ata_ering_clear(struct ata_ering *ering)
428 {
429 	ata_ering_map(ering, ata_ering_clear_cb, NULL);
430 }
431 
432 static unsigned int ata_eh_dev_action(struct ata_device *dev)
433 {
434 	struct ata_eh_context *ehc = &dev->link->eh_context;
435 
436 	return ehc->i.action | ehc->i.dev_action[dev->devno];
437 }
438 
439 static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
440 				struct ata_eh_info *ehi, unsigned int action)
441 {
442 	struct ata_device *tdev;
443 
444 	if (!dev) {
445 		ehi->action &= ~action;
446 		ata_for_each_dev(tdev, link, ALL)
447 			ehi->dev_action[tdev->devno] &= ~action;
448 	} else {
449 		/* doesn't make sense for port-wide EH actions */
450 		WARN_ON(!(action & ATA_EH_PERDEV_MASK));
451 
452 		/* break ehi->action into ehi->dev_action */
453 		if (ehi->action & action) {
454 			ata_for_each_dev(tdev, link, ALL)
455 				ehi->dev_action[tdev->devno] |=
456 					ehi->action & action;
457 			ehi->action &= ~action;
458 		}
459 
460 		/* turn off the specified per-dev action */
461 		ehi->dev_action[dev->devno] &= ~action;
462 	}
463 }
464 
465 /**
466  *	ata_scsi_timed_out - SCSI layer time out callback
467  *	@cmd: timed out SCSI command
468  *
469  *	Handles SCSI layer timeout.  We race with normal completion of
470  *	the qc for @cmd.  If the qc is already gone, we lose and let
471  *	the scsi command finish (EH_HANDLED).  Otherwise, the qc has
472  *	timed out and EH should be invoked.  Prevent ata_qc_complete()
473  *	from finishing it by setting EH_SCHEDULED and return
474  *	EH_NOT_HANDLED.
475  *
476  *	TODO: kill this function once old EH is gone.
477  *
478  *	LOCKING:
479  *	Called from timer context
480  *
481  *	RETURNS:
482  *	EH_HANDLED or EH_NOT_HANDLED
483  */
484 enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
485 {
486 	struct Scsi_Host *host = cmd->device->host;
487 	struct ata_port *ap = ata_shost_to_port(host);
488 	unsigned long flags;
489 	struct ata_queued_cmd *qc;
490 	enum blk_eh_timer_return ret;
491 
492 	DPRINTK("ENTER\n");
493 
494 	if (ap->ops->error_handler) {
495 		ret = BLK_EH_NOT_HANDLED;
496 		goto out;
497 	}
498 
499 	ret = BLK_EH_HANDLED;
500 	spin_lock_irqsave(ap->lock, flags);
501 	qc = ata_qc_from_tag(ap, ap->link.active_tag);
502 	if (qc) {
503 		WARN_ON(qc->scsicmd != cmd);
504 		qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
505 		qc->err_mask |= AC_ERR_TIMEOUT;
506 		ret = BLK_EH_NOT_HANDLED;
507 	}
508 	spin_unlock_irqrestore(ap->lock, flags);
509 
510  out:
511 	DPRINTK("EXIT, ret=%d\n", ret);
512 	return ret;
513 }
514 
515 static void ata_eh_unload(struct ata_port *ap)
516 {
517 	struct ata_link *link;
518 	struct ata_device *dev;
519 	unsigned long flags;
520 
521 	/* Restore SControl IPM and SPD for the next driver and
522 	 * disable attached devices.
523 	 */
524 	ata_for_each_link(link, ap, PMP_FIRST) {
525 		sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0);
526 		ata_for_each_dev(dev, link, ALL)
527 			ata_dev_disable(dev);
528 	}
529 
530 	/* freeze and set UNLOADED */
531 	spin_lock_irqsave(ap->lock, flags);
532 
533 	ata_port_freeze(ap);			/* won't be thawed */
534 	ap->pflags &= ~ATA_PFLAG_EH_PENDING;	/* clear pending from freeze */
535 	ap->pflags |= ATA_PFLAG_UNLOADED;
536 
537 	spin_unlock_irqrestore(ap->lock, flags);
538 }
539 
540 /**
541  *	ata_scsi_error - SCSI layer error handler callback
542  *	@host: SCSI host on which error occurred
543  *
544  *	Handles SCSI-layer-thrown error events.
545  *
546  *	LOCKING:
547  *	Inherited from SCSI layer (none, can sleep)
548  *
549  *	RETURNS:
550  *	Zero.
551  */
552 void ata_scsi_error(struct Scsi_Host *host)
553 {
554 	struct ata_port *ap = ata_shost_to_port(host);
555 	int i;
556 	unsigned long flags;
557 
558 	DPRINTK("ENTER\n");
559 
560 	/* make sure sff pio task is not running */
561 	ata_sff_flush_pio_task(ap);
562 
563 	/* synchronize with host lock and sort out timeouts */
564 
565 	/* For new EH, all qcs are finished in one of three ways -
566 	 * normal completion, error completion, and SCSI timeout.
567 	 * Both completions can race against SCSI timeout.  When normal
568 	 * completion wins, the qc never reaches EH.  When error
569 	 * completion wins, the qc has ATA_QCFLAG_FAILED set.
570 	 *
571 	 * When SCSI timeout wins, things are a bit more complex.
572 	 * Normal or error completion can occur after the timeout but
573 	 * before this point.  In such cases, both types of
574 	 * completions are honored.  A scmd is determined to have
575 	 * timed out iff its associated qc is active and not failed.
576 	 */
577 	if (ap->ops->error_handler) {
578 		struct scsi_cmnd *scmd, *tmp;
579 		int nr_timedout = 0;
580 
581 		spin_lock_irqsave(ap->lock, flags);
582 
583 		/* This must occur under the ap->lock as we don't want
584 		   a polled recovery to race the real interrupt handler
585 
586 		   The lost_interrupt handler checks for any completed but
587 		   non-notified command and completes much like an IRQ handler.
588 
589 		   We then fall into the error recovery code which will treat
590 		   this as if normal completion won the race */
591 
592 		if (ap->ops->lost_interrupt)
593 			ap->ops->lost_interrupt(ap);
594 
595 		list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
596 			struct ata_queued_cmd *qc;
597 
598 			for (i = 0; i < ATA_MAX_QUEUE; i++) {
599 				qc = __ata_qc_from_tag(ap, i);
600 				if (qc->flags & ATA_QCFLAG_ACTIVE &&
601 				    qc->scsicmd == scmd)
602 					break;
603 			}
604 
605 			if (i < ATA_MAX_QUEUE) {
606 				/* the scmd has an associated qc */
607 				if (!(qc->flags & ATA_QCFLAG_FAILED)) {
608 					/* which hasn't failed yet, timeout */
609 					qc->err_mask |= AC_ERR_TIMEOUT;
610 					qc->flags |= ATA_QCFLAG_FAILED;
611 					nr_timedout++;
612 				}
613 			} else {
614 				/* Normal completion occurred after
615 				 * SCSI timeout but before this point.
616 				 * Successfully complete it.
617 				 */
618 				scmd->retries = scmd->allowed;
619 				scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
620 			}
621 		}
622 
623 		/* If we have timed out qcs.  They belong to EH from
624 		 * this point but the state of the controller is
625 		 * unknown.  Freeze the port to make sure the IRQ
626 		 * handler doesn't diddle with those qcs.  This must
627 		 * be done atomically w.r.t. setting QCFLAG_FAILED.
628 		 */
629 		if (nr_timedout)
630 			__ata_port_freeze(ap);
631 
632 		spin_unlock_irqrestore(ap->lock, flags);
633 
634 		/* initialize eh_tries */
635 		ap->eh_tries = ATA_EH_MAX_TRIES;
636 	} else
637 		spin_unlock_wait(ap->lock);
638 
639 	/* If we timed raced normal completion and there is nothing to
640 	   recover nr_timedout == 0 why exactly are we doing error recovery ? */
641 
642  repeat:
643 	/* invoke error handler */
644 	if (ap->ops->error_handler) {
645 		struct ata_link *link;
646 
647 		/* kill fast drain timer */
648 		del_timer_sync(&ap->fastdrain_timer);
649 
650 		/* process port resume request */
651 		ata_eh_handle_port_resume(ap);
652 
653 		/* fetch & clear EH info */
654 		spin_lock_irqsave(ap->lock, flags);
655 
656 		ata_for_each_link(link, ap, HOST_FIRST) {
657 			struct ata_eh_context *ehc = &link->eh_context;
658 			struct ata_device *dev;
659 
660 			memset(&link->eh_context, 0, sizeof(link->eh_context));
661 			link->eh_context.i = link->eh_info;
662 			memset(&link->eh_info, 0, sizeof(link->eh_info));
663 
664 			ata_for_each_dev(dev, link, ENABLED) {
665 				int devno = dev->devno;
666 
667 				ehc->saved_xfer_mode[devno] = dev->xfer_mode;
668 				if (ata_ncq_enabled(dev))
669 					ehc->saved_ncq_enabled |= 1 << devno;
670 			}
671 		}
672 
673 		ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
674 		ap->pflags &= ~ATA_PFLAG_EH_PENDING;
675 		ap->excl_link = NULL;	/* don't maintain exclusion over EH */
676 
677 		spin_unlock_irqrestore(ap->lock, flags);
678 
679 		/* invoke EH, skip if unloading or suspended */
680 		if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
681 			ap->ops->error_handler(ap);
682 		else {
683 			/* if unloading, commence suicide */
684 			if ((ap->pflags & ATA_PFLAG_UNLOADING) &&
685 			    !(ap->pflags & ATA_PFLAG_UNLOADED))
686 				ata_eh_unload(ap);
687 			ata_eh_finish(ap);
688 		}
689 
690 		/* process port suspend request */
691 		ata_eh_handle_port_suspend(ap);
692 
693 		/* Exception might have happend after ->error_handler
694 		 * recovered the port but before this point.  Repeat
695 		 * EH in such case.
696 		 */
697 		spin_lock_irqsave(ap->lock, flags);
698 
699 		if (ap->pflags & ATA_PFLAG_EH_PENDING) {
700 			if (--ap->eh_tries) {
701 				spin_unlock_irqrestore(ap->lock, flags);
702 				goto repeat;
703 			}
704 			ata_port_printk(ap, KERN_ERR, "EH pending after %d "
705 					"tries, giving up\n", ATA_EH_MAX_TRIES);
706 			ap->pflags &= ~ATA_PFLAG_EH_PENDING;
707 		}
708 
709 		/* this run is complete, make sure EH info is clear */
710 		ata_for_each_link(link, ap, HOST_FIRST)
711 			memset(&link->eh_info, 0, sizeof(link->eh_info));
712 
713 		/* Clear host_eh_scheduled while holding ap->lock such
714 		 * that if exception occurs after this point but
715 		 * before EH completion, SCSI midlayer will
716 		 * re-initiate EH.
717 		 */
718 		host->host_eh_scheduled = 0;
719 
720 		spin_unlock_irqrestore(ap->lock, flags);
721 	} else {
722 		WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL);
723 		ap->ops->eng_timeout(ap);
724 	}
725 
726 	/* finish or retry handled scmd's and clean up */
727 	WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
728 
729 	scsi_eh_flush_done_q(&ap->eh_done_q);
730 
731 	/* clean up */
732 	spin_lock_irqsave(ap->lock, flags);
733 
734 	if (ap->pflags & ATA_PFLAG_LOADING)
735 		ap->pflags &= ~ATA_PFLAG_LOADING;
736 	else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
737 		schedule_delayed_work(&ap->hotplug_task, 0);
738 
739 	if (ap->pflags & ATA_PFLAG_RECOVERED)
740 		ata_port_printk(ap, KERN_INFO, "EH complete\n");
741 
742 	ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
743 
744 	/* tell wait_eh that we're done */
745 	ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
746 	wake_up_all(&ap->eh_wait_q);
747 
748 	spin_unlock_irqrestore(ap->lock, flags);
749 
750 	DPRINTK("EXIT\n");
751 }
752 
753 /**
754  *	ata_port_wait_eh - Wait for the currently pending EH to complete
755  *	@ap: Port to wait EH for
756  *
757  *	Wait until the currently pending EH is complete.
758  *
759  *	LOCKING:
760  *	Kernel thread context (may sleep).
761  */
762 void ata_port_wait_eh(struct ata_port *ap)
763 {
764 	unsigned long flags;
765 	DEFINE_WAIT(wait);
766 
767  retry:
768 	spin_lock_irqsave(ap->lock, flags);
769 
770 	while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
771 		prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
772 		spin_unlock_irqrestore(ap->lock, flags);
773 		schedule();
774 		spin_lock_irqsave(ap->lock, flags);
775 	}
776 	finish_wait(&ap->eh_wait_q, &wait);
777 
778 	spin_unlock_irqrestore(ap->lock, flags);
779 
780 	/* make sure SCSI EH is complete */
781 	if (scsi_host_in_recovery(ap->scsi_host)) {
782 		msleep(10);
783 		goto retry;
784 	}
785 }
786 
787 static int ata_eh_nr_in_flight(struct ata_port *ap)
788 {
789 	unsigned int tag;
790 	int nr = 0;
791 
792 	/* count only non-internal commands */
793 	for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++)
794 		if (ata_qc_from_tag(ap, tag))
795 			nr++;
796 
797 	return nr;
798 }
799 
800 void ata_eh_fastdrain_timerfn(unsigned long arg)
801 {
802 	struct ata_port *ap = (void *)arg;
803 	unsigned long flags;
804 	int cnt;
805 
806 	spin_lock_irqsave(ap->lock, flags);
807 
808 	cnt = ata_eh_nr_in_flight(ap);
809 
810 	/* are we done? */
811 	if (!cnt)
812 		goto out_unlock;
813 
814 	if (cnt == ap->fastdrain_cnt) {
815 		unsigned int tag;
816 
817 		/* No progress during the last interval, tag all
818 		 * in-flight qcs as timed out and freeze the port.
819 		 */
820 		for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) {
821 			struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
822 			if (qc)
823 				qc->err_mask |= AC_ERR_TIMEOUT;
824 		}
825 
826 		ata_port_freeze(ap);
827 	} else {
828 		/* some qcs have finished, give it another chance */
829 		ap->fastdrain_cnt = cnt;
830 		ap->fastdrain_timer.expires =
831 			ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
832 		add_timer(&ap->fastdrain_timer);
833 	}
834 
835  out_unlock:
836 	spin_unlock_irqrestore(ap->lock, flags);
837 }
838 
839 /**
840  *	ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
841  *	@ap: target ATA port
842  *	@fastdrain: activate fast drain
843  *
844  *	Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain
845  *	is non-zero and EH wasn't pending before.  Fast drain ensures
846  *	that EH kicks in in timely manner.
847  *
848  *	LOCKING:
849  *	spin_lock_irqsave(host lock)
850  */
851 static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
852 {
853 	int cnt;
854 
855 	/* already scheduled? */
856 	if (ap->pflags & ATA_PFLAG_EH_PENDING)
857 		return;
858 
859 	ap->pflags |= ATA_PFLAG_EH_PENDING;
860 
861 	if (!fastdrain)
862 		return;
863 
864 	/* do we have in-flight qcs? */
865 	cnt = ata_eh_nr_in_flight(ap);
866 	if (!cnt)
867 		return;
868 
869 	/* activate fast drain */
870 	ap->fastdrain_cnt = cnt;
871 	ap->fastdrain_timer.expires =
872 		ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
873 	add_timer(&ap->fastdrain_timer);
874 }
875 
876 /**
877  *	ata_qc_schedule_eh - schedule qc for error handling
878  *	@qc: command to schedule error handling for
879  *
880  *	Schedule error handling for @qc.  EH will kick in as soon as
881  *	other commands are drained.
882  *
883  *	LOCKING:
884  *	spin_lock_irqsave(host lock)
885  */
886 void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
887 {
888 	struct ata_port *ap = qc->ap;
889 	struct request_queue *q = qc->scsicmd->device->request_queue;
890 	unsigned long flags;
891 
892 	WARN_ON(!ap->ops->error_handler);
893 
894 	qc->flags |= ATA_QCFLAG_FAILED;
895 	ata_eh_set_pending(ap, 1);
896 
897 	/* The following will fail if timeout has already expired.
898 	 * ata_scsi_error() takes care of such scmds on EH entry.
899 	 * Note that ATA_QCFLAG_FAILED is unconditionally set after
900 	 * this function completes.
901 	 */
902 	spin_lock_irqsave(q->queue_lock, flags);
903 	blk_abort_request(qc->scsicmd->request);
904 	spin_unlock_irqrestore(q->queue_lock, flags);
905 }
906 
907 /**
908  *	ata_port_schedule_eh - schedule error handling without a qc
909  *	@ap: ATA port to schedule EH for
910  *
911  *	Schedule error handling for @ap.  EH will kick in as soon as
912  *	all commands are drained.
913  *
914  *	LOCKING:
915  *	spin_lock_irqsave(host lock)
916  */
917 void ata_port_schedule_eh(struct ata_port *ap)
918 {
919 	WARN_ON(!ap->ops->error_handler);
920 
921 	if (ap->pflags & ATA_PFLAG_INITIALIZING)
922 		return;
923 
924 	ata_eh_set_pending(ap, 1);
925 	scsi_schedule_eh(ap->scsi_host);
926 
927 	DPRINTK("port EH scheduled\n");
928 }
929 
930 static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
931 {
932 	int tag, nr_aborted = 0;
933 
934 	WARN_ON(!ap->ops->error_handler);
935 
936 	/* we're gonna abort all commands, no need for fast drain */
937 	ata_eh_set_pending(ap, 0);
938 
939 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
940 		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
941 
942 		if (qc && (!link || qc->dev->link == link)) {
943 			qc->flags |= ATA_QCFLAG_FAILED;
944 			ata_qc_complete(qc);
945 			nr_aborted++;
946 		}
947 	}
948 
949 	if (!nr_aborted)
950 		ata_port_schedule_eh(ap);
951 
952 	return nr_aborted;
953 }
954 
955 /**
956  *	ata_link_abort - abort all qc's on the link
957  *	@link: ATA link to abort qc's for
958  *
959  *	Abort all active qc's active on @link and schedule EH.
960  *
961  *	LOCKING:
962  *	spin_lock_irqsave(host lock)
963  *
964  *	RETURNS:
965  *	Number of aborted qc's.
966  */
967 int ata_link_abort(struct ata_link *link)
968 {
969 	return ata_do_link_abort(link->ap, link);
970 }
971 
972 /**
973  *	ata_port_abort - abort all qc's on the port
974  *	@ap: ATA port to abort qc's for
975  *
976  *	Abort all active qc's of @ap and schedule EH.
977  *
978  *	LOCKING:
979  *	spin_lock_irqsave(host_set lock)
980  *
981  *	RETURNS:
982  *	Number of aborted qc's.
983  */
984 int ata_port_abort(struct ata_port *ap)
985 {
986 	return ata_do_link_abort(ap, NULL);
987 }
988 
989 /**
990  *	__ata_port_freeze - freeze port
991  *	@ap: ATA port to freeze
992  *
993  *	This function is called when HSM violation or some other
994  *	condition disrupts normal operation of the port.  Frozen port
995  *	is not allowed to perform any operation until the port is
996  *	thawed, which usually follows a successful reset.
997  *
998  *	ap->ops->freeze() callback can be used for freezing the port
999  *	hardware-wise (e.g. mask interrupt and stop DMA engine).  If a
1000  *	port cannot be frozen hardware-wise, the interrupt handler
1001  *	must ack and clear interrupts unconditionally while the port
1002  *	is frozen.
1003  *
1004  *	LOCKING:
1005  *	spin_lock_irqsave(host lock)
1006  */
1007 static void __ata_port_freeze(struct ata_port *ap)
1008 {
1009 	WARN_ON(!ap->ops->error_handler);
1010 
1011 	if (ap->ops->freeze)
1012 		ap->ops->freeze(ap);
1013 
1014 	ap->pflags |= ATA_PFLAG_FROZEN;
1015 
1016 	DPRINTK("ata%u port frozen\n", ap->print_id);
1017 }
1018 
1019 /**
1020  *	ata_port_freeze - abort & freeze port
1021  *	@ap: ATA port to freeze
1022  *
1023  *	Abort and freeze @ap.  The freeze operation must be called
1024  *	first, because some hardware requires special operations
1025  *	before the taskfile registers are accessible.
1026  *
1027  *	LOCKING:
1028  *	spin_lock_irqsave(host lock)
1029  *
1030  *	RETURNS:
1031  *	Number of aborted commands.
1032  */
1033 int ata_port_freeze(struct ata_port *ap)
1034 {
1035 	int nr_aborted;
1036 
1037 	WARN_ON(!ap->ops->error_handler);
1038 
1039 	__ata_port_freeze(ap);
1040 	nr_aborted = ata_port_abort(ap);
1041 
1042 	return nr_aborted;
1043 }
1044 
1045 /**
1046  *	sata_async_notification - SATA async notification handler
1047  *	@ap: ATA port where async notification is received
1048  *
1049  *	Handler to be called when async notification via SDB FIS is
1050  *	received.  This function schedules EH if necessary.
1051  *
1052  *	LOCKING:
1053  *	spin_lock_irqsave(host lock)
1054  *
1055  *	RETURNS:
1056  *	1 if EH is scheduled, 0 otherwise.
1057  */
1058 int sata_async_notification(struct ata_port *ap)
1059 {
1060 	u32 sntf;
1061 	int rc;
1062 
1063 	if (!(ap->flags & ATA_FLAG_AN))
1064 		return 0;
1065 
1066 	rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
1067 	if (rc == 0)
1068 		sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
1069 
1070 	if (!sata_pmp_attached(ap) || rc) {
1071 		/* PMP is not attached or SNTF is not available */
1072 		if (!sata_pmp_attached(ap)) {
1073 			/* PMP is not attached.  Check whether ATAPI
1074 			 * AN is configured.  If so, notify media
1075 			 * change.
1076 			 */
1077 			struct ata_device *dev = ap->link.device;
1078 
1079 			if ((dev->class == ATA_DEV_ATAPI) &&
1080 			    (dev->flags & ATA_DFLAG_AN))
1081 				ata_scsi_media_change_notify(dev);
1082 			return 0;
1083 		} else {
1084 			/* PMP is attached but SNTF is not available.
1085 			 * ATAPI async media change notification is
1086 			 * not used.  The PMP must be reporting PHY
1087 			 * status change, schedule EH.
1088 			 */
1089 			ata_port_schedule_eh(ap);
1090 			return 1;
1091 		}
1092 	} else {
1093 		/* PMP is attached and SNTF is available */
1094 		struct ata_link *link;
1095 
1096 		/* check and notify ATAPI AN */
1097 		ata_for_each_link(link, ap, EDGE) {
1098 			if (!(sntf & (1 << link->pmp)))
1099 				continue;
1100 
1101 			if ((link->device->class == ATA_DEV_ATAPI) &&
1102 			    (link->device->flags & ATA_DFLAG_AN))
1103 				ata_scsi_media_change_notify(link->device);
1104 		}
1105 
1106 		/* If PMP is reporting that PHY status of some
1107 		 * downstream ports has changed, schedule EH.
1108 		 */
1109 		if (sntf & (1 << SATA_PMP_CTRL_PORT)) {
1110 			ata_port_schedule_eh(ap);
1111 			return 1;
1112 		}
1113 
1114 		return 0;
1115 	}
1116 }
1117 
1118 /**
1119  *	ata_eh_freeze_port - EH helper to freeze port
1120  *	@ap: ATA port to freeze
1121  *
1122  *	Freeze @ap.
1123  *
1124  *	LOCKING:
1125  *	None.
1126  */
1127 void ata_eh_freeze_port(struct ata_port *ap)
1128 {
1129 	unsigned long flags;
1130 
1131 	if (!ap->ops->error_handler)
1132 		return;
1133 
1134 	spin_lock_irqsave(ap->lock, flags);
1135 	__ata_port_freeze(ap);
1136 	spin_unlock_irqrestore(ap->lock, flags);
1137 }
1138 
1139 /**
1140  *	ata_port_thaw_port - EH helper to thaw port
1141  *	@ap: ATA port to thaw
1142  *
1143  *	Thaw frozen port @ap.
1144  *
1145  *	LOCKING:
1146  *	None.
1147  */
1148 void ata_eh_thaw_port(struct ata_port *ap)
1149 {
1150 	unsigned long flags;
1151 
1152 	if (!ap->ops->error_handler)
1153 		return;
1154 
1155 	spin_lock_irqsave(ap->lock, flags);
1156 
1157 	ap->pflags &= ~ATA_PFLAG_FROZEN;
1158 
1159 	if (ap->ops->thaw)
1160 		ap->ops->thaw(ap);
1161 
1162 	spin_unlock_irqrestore(ap->lock, flags);
1163 
1164 	DPRINTK("ata%u port thawed\n", ap->print_id);
1165 }
1166 
1167 static void ata_eh_scsidone(struct scsi_cmnd *scmd)
1168 {
1169 	/* nada */
1170 }
1171 
1172 static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
1173 {
1174 	struct ata_port *ap = qc->ap;
1175 	struct scsi_cmnd *scmd = qc->scsicmd;
1176 	unsigned long flags;
1177 
1178 	spin_lock_irqsave(ap->lock, flags);
1179 	qc->scsidone = ata_eh_scsidone;
1180 	__ata_qc_complete(qc);
1181 	WARN_ON(ata_tag_valid(qc->tag));
1182 	spin_unlock_irqrestore(ap->lock, flags);
1183 
1184 	scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
1185 }
1186 
1187 /**
1188  *	ata_eh_qc_complete - Complete an active ATA command from EH
1189  *	@qc: Command to complete
1190  *
1191  *	Indicate to the mid and upper layers that an ATA command has
1192  *	completed.  To be used from EH.
1193  */
1194 void ata_eh_qc_complete(struct ata_queued_cmd *qc)
1195 {
1196 	struct scsi_cmnd *scmd = qc->scsicmd;
1197 	scmd->retries = scmd->allowed;
1198 	__ata_eh_qc_complete(qc);
1199 }
1200 
1201 /**
1202  *	ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
1203  *	@qc: Command to retry
1204  *
1205  *	Indicate to the mid and upper layers that an ATA command
1206  *	should be retried.  To be used from EH.
1207  *
1208  *	SCSI midlayer limits the number of retries to scmd->allowed.
1209  *	scmd->retries is decremented for commands which get retried
1210  *	due to unrelated failures (qc->err_mask is zero).
1211  */
1212 void ata_eh_qc_retry(struct ata_queued_cmd *qc)
1213 {
1214 	struct scsi_cmnd *scmd = qc->scsicmd;
1215 	if (!qc->err_mask && scmd->retries)
1216 		scmd->retries--;
1217 	__ata_eh_qc_complete(qc);
1218 }
1219 
1220 /**
1221  *	ata_dev_disable - disable ATA device
1222  *	@dev: ATA device to disable
1223  *
1224  *	Disable @dev.
1225  *
1226  *	Locking:
1227  *	EH context.
1228  */
1229 void ata_dev_disable(struct ata_device *dev)
1230 {
1231 	if (!ata_dev_enabled(dev))
1232 		return;
1233 
1234 	if (ata_msg_drv(dev->link->ap))
1235 		ata_dev_printk(dev, KERN_WARNING, "disabled\n");
1236 	ata_acpi_on_disable(dev);
1237 	ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET);
1238 	dev->class++;
1239 
1240 	/* From now till the next successful probe, ering is used to
1241 	 * track probe failures.  Clear accumulated device error info.
1242 	 */
1243 	ata_ering_clear(&dev->ering);
1244 }
1245 
1246 /**
1247  *	ata_eh_detach_dev - detach ATA device
1248  *	@dev: ATA device to detach
1249  *
1250  *	Detach @dev.
1251  *
1252  *	LOCKING:
1253  *	None.
1254  */
1255 void ata_eh_detach_dev(struct ata_device *dev)
1256 {
1257 	struct ata_link *link = dev->link;
1258 	struct ata_port *ap = link->ap;
1259 	struct ata_eh_context *ehc = &link->eh_context;
1260 	unsigned long flags;
1261 
1262 	ata_dev_disable(dev);
1263 
1264 	spin_lock_irqsave(ap->lock, flags);
1265 
1266 	dev->flags &= ~ATA_DFLAG_DETACH;
1267 
1268 	if (ata_scsi_offline_dev(dev)) {
1269 		dev->flags |= ATA_DFLAG_DETACHED;
1270 		ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
1271 	}
1272 
1273 	/* clear per-dev EH info */
1274 	ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK);
1275 	ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK);
1276 	ehc->saved_xfer_mode[dev->devno] = 0;
1277 	ehc->saved_ncq_enabled &= ~(1 << dev->devno);
1278 
1279 	spin_unlock_irqrestore(ap->lock, flags);
1280 }
1281 
1282 /**
1283  *	ata_eh_about_to_do - about to perform eh_action
1284  *	@link: target ATA link
1285  *	@dev: target ATA dev for per-dev action (can be NULL)
1286  *	@action: action about to be performed
1287  *
1288  *	Called just before performing EH actions to clear related bits
1289  *	in @link->eh_info such that eh actions are not unnecessarily
1290  *	repeated.
1291  *
1292  *	LOCKING:
1293  *	None.
1294  */
1295 void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
1296 			unsigned int action)
1297 {
1298 	struct ata_port *ap = link->ap;
1299 	struct ata_eh_info *ehi = &link->eh_info;
1300 	struct ata_eh_context *ehc = &link->eh_context;
1301 	unsigned long flags;
1302 
1303 	spin_lock_irqsave(ap->lock, flags);
1304 
1305 	ata_eh_clear_action(link, dev, ehi, action);
1306 
1307 	/* About to take EH action, set RECOVERED.  Ignore actions on
1308 	 * slave links as master will do them again.
1309 	 */
1310 	if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link)
1311 		ap->pflags |= ATA_PFLAG_RECOVERED;
1312 
1313 	spin_unlock_irqrestore(ap->lock, flags);
1314 }
1315 
1316 /**
1317  *	ata_eh_done - EH action complete
1318 *	@ap: target ATA port
1319  *	@dev: target ATA dev for per-dev action (can be NULL)
1320  *	@action: action just completed
1321  *
1322  *	Called right after performing EH actions to clear related bits
1323  *	in @link->eh_context.
1324  *
1325  *	LOCKING:
1326  *	None.
1327  */
1328 void ata_eh_done(struct ata_link *link, struct ata_device *dev,
1329 		 unsigned int action)
1330 {
1331 	struct ata_eh_context *ehc = &link->eh_context;
1332 
1333 	ata_eh_clear_action(link, dev, &ehc->i, action);
1334 }
1335 
1336 /**
1337  *	ata_err_string - convert err_mask to descriptive string
1338  *	@err_mask: error mask to convert to string
1339  *
1340  *	Convert @err_mask to descriptive string.  Errors are
1341  *	prioritized according to severity and only the most severe
1342  *	error is reported.
1343  *
1344  *	LOCKING:
1345  *	None.
1346  *
1347  *	RETURNS:
1348  *	Descriptive string for @err_mask
1349  */
1350 static const char *ata_err_string(unsigned int err_mask)
1351 {
1352 	if (err_mask & AC_ERR_HOST_BUS)
1353 		return "host bus error";
1354 	if (err_mask & AC_ERR_ATA_BUS)
1355 		return "ATA bus error";
1356 	if (err_mask & AC_ERR_TIMEOUT)
1357 		return "timeout";
1358 	if (err_mask & AC_ERR_HSM)
1359 		return "HSM violation";
1360 	if (err_mask & AC_ERR_SYSTEM)
1361 		return "internal error";
1362 	if (err_mask & AC_ERR_MEDIA)
1363 		return "media error";
1364 	if (err_mask & AC_ERR_INVALID)
1365 		return "invalid argument";
1366 	if (err_mask & AC_ERR_DEV)
1367 		return "device error";
1368 	return "unknown error";
1369 }
1370 
1371 /**
1372  *	ata_read_log_page - read a specific log page
1373  *	@dev: target device
1374  *	@page: page to read
1375  *	@buf: buffer to store read page
1376  *	@sectors: number of sectors to read
1377  *
1378  *	Read log page using READ_LOG_EXT command.
1379  *
1380  *	LOCKING:
1381  *	Kernel thread context (may sleep).
1382  *
1383  *	RETURNS:
1384  *	0 on success, AC_ERR_* mask otherwise.
1385  */
1386 static unsigned int ata_read_log_page(struct ata_device *dev,
1387 				      u8 page, void *buf, unsigned int sectors)
1388 {
1389 	struct ata_taskfile tf;
1390 	unsigned int err_mask;
1391 
1392 	DPRINTK("read log page - page %d\n", page);
1393 
1394 	ata_tf_init(dev, &tf);
1395 	tf.command = ATA_CMD_READ_LOG_EXT;
1396 	tf.lbal = page;
1397 	tf.nsect = sectors;
1398 	tf.hob_nsect = sectors >> 8;
1399 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
1400 	tf.protocol = ATA_PROT_PIO;
1401 
1402 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1403 				     buf, sectors * ATA_SECT_SIZE, 0);
1404 
1405 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
1406 	return err_mask;
1407 }
1408 
1409 /**
1410  *	ata_eh_read_log_10h - Read log page 10h for NCQ error details
1411  *	@dev: Device to read log page 10h from
1412  *	@tag: Resulting tag of the failed command
1413  *	@tf: Resulting taskfile registers of the failed command
1414  *
1415  *	Read log page 10h to obtain NCQ error details and clear error
1416  *	condition.
1417  *
1418  *	LOCKING:
1419  *	Kernel thread context (may sleep).
1420  *
1421  *	RETURNS:
1422  *	0 on success, -errno otherwise.
1423  */
1424 static int ata_eh_read_log_10h(struct ata_device *dev,
1425 			       int *tag, struct ata_taskfile *tf)
1426 {
1427 	u8 *buf = dev->link->ap->sector_buf;
1428 	unsigned int err_mask;
1429 	u8 csum;
1430 	int i;
1431 
1432 	err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1);
1433 	if (err_mask)
1434 		return -EIO;
1435 
1436 	csum = 0;
1437 	for (i = 0; i < ATA_SECT_SIZE; i++)
1438 		csum += buf[i];
1439 	if (csum)
1440 		ata_dev_printk(dev, KERN_WARNING,
1441 			       "invalid checksum 0x%x on log page 10h\n", csum);
1442 
1443 	if (buf[0] & 0x80)
1444 		return -ENOENT;
1445 
1446 	*tag = buf[0] & 0x1f;
1447 
1448 	tf->command = buf[2];
1449 	tf->feature = buf[3];
1450 	tf->lbal = buf[4];
1451 	tf->lbam = buf[5];
1452 	tf->lbah = buf[6];
1453 	tf->device = buf[7];
1454 	tf->hob_lbal = buf[8];
1455 	tf->hob_lbam = buf[9];
1456 	tf->hob_lbah = buf[10];
1457 	tf->nsect = buf[12];
1458 	tf->hob_nsect = buf[13];
1459 
1460 	return 0;
1461 }
1462 
1463 /**
1464  *	atapi_eh_tur - perform ATAPI TEST_UNIT_READY
1465  *	@dev: target ATAPI device
1466  *	@r_sense_key: out parameter for sense_key
1467  *
1468  *	Perform ATAPI TEST_UNIT_READY.
1469  *
1470  *	LOCKING:
1471  *	EH context (may sleep).
1472  *
1473  *	RETURNS:
1474  *	0 on success, AC_ERR_* mask on failure.
1475  */
1476 static unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
1477 {
1478 	u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 };
1479 	struct ata_taskfile tf;
1480 	unsigned int err_mask;
1481 
1482 	ata_tf_init(dev, &tf);
1483 
1484 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1485 	tf.command = ATA_CMD_PACKET;
1486 	tf.protocol = ATAPI_PROT_NODATA;
1487 
1488 	err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
1489 	if (err_mask == AC_ERR_DEV)
1490 		*r_sense_key = tf.feature >> 4;
1491 	return err_mask;
1492 }
1493 
1494 /**
1495  *	atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1496  *	@dev: device to perform REQUEST_SENSE to
1497  *	@sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
1498  *	@dfl_sense_key: default sense key to use
1499  *
1500  *	Perform ATAPI REQUEST_SENSE after the device reported CHECK
1501  *	SENSE.  This function is EH helper.
1502  *
1503  *	LOCKING:
1504  *	Kernel thread context (may sleep).
1505  *
1506  *	RETURNS:
1507  *	0 on success, AC_ERR_* mask on failure
1508  */
1509 static unsigned int atapi_eh_request_sense(struct ata_device *dev,
1510 					   u8 *sense_buf, u8 dfl_sense_key)
1511 {
1512 	u8 cdb[ATAPI_CDB_LEN] =
1513 		{ REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 };
1514 	struct ata_port *ap = dev->link->ap;
1515 	struct ata_taskfile tf;
1516 
1517 	DPRINTK("ATAPI request sense\n");
1518 
1519 	/* FIXME: is this needed? */
1520 	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
1521 
1522 	/* initialize sense_buf with the error register,
1523 	 * for the case where they are -not- overwritten
1524 	 */
1525 	sense_buf[0] = 0x70;
1526 	sense_buf[2] = dfl_sense_key;
1527 
1528 	/* some devices time out if garbage left in tf */
1529 	ata_tf_init(dev, &tf);
1530 
1531 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1532 	tf.command = ATA_CMD_PACKET;
1533 
1534 	/* is it pointless to prefer PIO for "safety reasons"? */
1535 	if (ap->flags & ATA_FLAG_PIO_DMA) {
1536 		tf.protocol = ATAPI_PROT_DMA;
1537 		tf.feature |= ATAPI_PKT_DMA;
1538 	} else {
1539 		tf.protocol = ATAPI_PROT_PIO;
1540 		tf.lbam = SCSI_SENSE_BUFFERSIZE;
1541 		tf.lbah = 0;
1542 	}
1543 
1544 	return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
1545 				 sense_buf, SCSI_SENSE_BUFFERSIZE, 0);
1546 }
1547 
1548 /**
1549  *	ata_eh_analyze_serror - analyze SError for a failed port
1550  *	@link: ATA link to analyze SError for
1551  *
1552  *	Analyze SError if available and further determine cause of
1553  *	failure.
1554  *
1555  *	LOCKING:
1556  *	None.
1557  */
1558 static void ata_eh_analyze_serror(struct ata_link *link)
1559 {
1560 	struct ata_eh_context *ehc = &link->eh_context;
1561 	u32 serror = ehc->i.serror;
1562 	unsigned int err_mask = 0, action = 0;
1563 	u32 hotplug_mask;
1564 
1565 	if (serror & (SERR_PERSISTENT | SERR_DATA)) {
1566 		err_mask |= AC_ERR_ATA_BUS;
1567 		action |= ATA_EH_RESET;
1568 	}
1569 	if (serror & SERR_PROTOCOL) {
1570 		err_mask |= AC_ERR_HSM;
1571 		action |= ATA_EH_RESET;
1572 	}
1573 	if (serror & SERR_INTERNAL) {
1574 		err_mask |= AC_ERR_SYSTEM;
1575 		action |= ATA_EH_RESET;
1576 	}
1577 
1578 	/* Determine whether a hotplug event has occurred.  Both
1579 	 * SError.N/X are considered hotplug events for enabled or
1580 	 * host links.  For disabled PMP links, only N bit is
1581 	 * considered as X bit is left at 1 for link plugging.
1582 	 */
1583 	if (link->lpm_policy != ATA_LPM_MAX_POWER)
1584 		hotplug_mask = 0;	/* hotplug doesn't work w/ LPM */
1585 	else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
1586 		hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG;
1587 	else
1588 		hotplug_mask = SERR_PHYRDY_CHG;
1589 
1590 	if (serror & hotplug_mask)
1591 		ata_ehi_hotplugged(&ehc->i);
1592 
1593 	ehc->i.err_mask |= err_mask;
1594 	ehc->i.action |= action;
1595 }
1596 
1597 /**
1598  *	ata_eh_analyze_ncq_error - analyze NCQ error
1599  *	@link: ATA link to analyze NCQ error for
1600  *
1601  *	Read log page 10h, determine the offending qc and acquire
1602  *	error status TF.  For NCQ device errors, all LLDDs have to do
1603  *	is setting AC_ERR_DEV in ehi->err_mask.  This function takes
1604  *	care of the rest.
1605  *
1606  *	LOCKING:
1607  *	Kernel thread context (may sleep).
1608  */
1609 void ata_eh_analyze_ncq_error(struct ata_link *link)
1610 {
1611 	struct ata_port *ap = link->ap;
1612 	struct ata_eh_context *ehc = &link->eh_context;
1613 	struct ata_device *dev = link->device;
1614 	struct ata_queued_cmd *qc;
1615 	struct ata_taskfile tf;
1616 	int tag, rc;
1617 
1618 	/* if frozen, we can't do much */
1619 	if (ap->pflags & ATA_PFLAG_FROZEN)
1620 		return;
1621 
1622 	/* is it NCQ device error? */
1623 	if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
1624 		return;
1625 
1626 	/* has LLDD analyzed already? */
1627 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1628 		qc = __ata_qc_from_tag(ap, tag);
1629 
1630 		if (!(qc->flags & ATA_QCFLAG_FAILED))
1631 			continue;
1632 
1633 		if (qc->err_mask)
1634 			return;
1635 	}
1636 
1637 	/* okay, this error is ours */
1638 	memset(&tf, 0, sizeof(tf));
1639 	rc = ata_eh_read_log_10h(dev, &tag, &tf);
1640 	if (rc) {
1641 		ata_link_printk(link, KERN_ERR, "failed to read log page 10h "
1642 				"(errno=%d)\n", rc);
1643 		return;
1644 	}
1645 
1646 	if (!(link->sactive & (1 << tag))) {
1647 		ata_link_printk(link, KERN_ERR, "log page 10h reported "
1648 				"inactive tag %d\n", tag);
1649 		return;
1650 	}
1651 
1652 	/* we've got the perpetrator, condemn it */
1653 	qc = __ata_qc_from_tag(ap, tag);
1654 	memcpy(&qc->result_tf, &tf, sizeof(tf));
1655 	qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1656 	qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
1657 	ehc->i.err_mask &= ~AC_ERR_DEV;
1658 }
1659 
1660 /**
1661  *	ata_eh_analyze_tf - analyze taskfile of a failed qc
1662  *	@qc: qc to analyze
1663  *	@tf: Taskfile registers to analyze
1664  *
1665  *	Analyze taskfile of @qc and further determine cause of
1666  *	failure.  This function also requests ATAPI sense data if
1667  *	avaliable.
1668  *
1669  *	LOCKING:
1670  *	Kernel thread context (may sleep).
1671  *
1672  *	RETURNS:
1673  *	Determined recovery action
1674  */
1675 static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1676 				      const struct ata_taskfile *tf)
1677 {
1678 	unsigned int tmp, action = 0;
1679 	u8 stat = tf->command, err = tf->feature;
1680 
1681 	if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1682 		qc->err_mask |= AC_ERR_HSM;
1683 		return ATA_EH_RESET;
1684 	}
1685 
1686 	if (stat & (ATA_ERR | ATA_DF))
1687 		qc->err_mask |= AC_ERR_DEV;
1688 	else
1689 		return 0;
1690 
1691 	switch (qc->dev->class) {
1692 	case ATA_DEV_ATA:
1693 		if (err & ATA_ICRC)
1694 			qc->err_mask |= AC_ERR_ATA_BUS;
1695 		if (err & ATA_UNC)
1696 			qc->err_mask |= AC_ERR_MEDIA;
1697 		if (err & ATA_IDNF)
1698 			qc->err_mask |= AC_ERR_INVALID;
1699 		break;
1700 
1701 	case ATA_DEV_ATAPI:
1702 		if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
1703 			tmp = atapi_eh_request_sense(qc->dev,
1704 						qc->scsicmd->sense_buffer,
1705 						qc->result_tf.feature >> 4);
1706 			if (!tmp) {
1707 				/* ATA_QCFLAG_SENSE_VALID is used to
1708 				 * tell atapi_qc_complete() that sense
1709 				 * data is already valid.
1710 				 *
1711 				 * TODO: interpret sense data and set
1712 				 * appropriate err_mask.
1713 				 */
1714 				qc->flags |= ATA_QCFLAG_SENSE_VALID;
1715 			} else
1716 				qc->err_mask |= tmp;
1717 		}
1718 	}
1719 
1720 	if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
1721 		action |= ATA_EH_RESET;
1722 
1723 	return action;
1724 }
1725 
1726 static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask,
1727 				   int *xfer_ok)
1728 {
1729 	int base = 0;
1730 
1731 	if (!(eflags & ATA_EFLAG_DUBIOUS_XFER))
1732 		*xfer_ok = 1;
1733 
1734 	if (!*xfer_ok)
1735 		base = ATA_ECAT_DUBIOUS_NONE;
1736 
1737 	if (err_mask & AC_ERR_ATA_BUS)
1738 		return base + ATA_ECAT_ATA_BUS;
1739 
1740 	if (err_mask & AC_ERR_TIMEOUT)
1741 		return base + ATA_ECAT_TOUT_HSM;
1742 
1743 	if (eflags & ATA_EFLAG_IS_IO) {
1744 		if (err_mask & AC_ERR_HSM)
1745 			return base + ATA_ECAT_TOUT_HSM;
1746 		if ((err_mask &
1747 		     (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
1748 			return base + ATA_ECAT_UNK_DEV;
1749 	}
1750 
1751 	return 0;
1752 }
1753 
1754 struct speed_down_verdict_arg {
1755 	u64 since;
1756 	int xfer_ok;
1757 	int nr_errors[ATA_ECAT_NR];
1758 };
1759 
1760 static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
1761 {
1762 	struct speed_down_verdict_arg *arg = void_arg;
1763 	int cat;
1764 
1765 	if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since))
1766 		return -1;
1767 
1768 	cat = ata_eh_categorize_error(ent->eflags, ent->err_mask,
1769 				      &arg->xfer_ok);
1770 	arg->nr_errors[cat]++;
1771 
1772 	return 0;
1773 }
1774 
1775 /**
1776  *	ata_eh_speed_down_verdict - Determine speed down verdict
1777  *	@dev: Device of interest
1778  *
1779  *	This function examines error ring of @dev and determines
1780  *	whether NCQ needs to be turned off, transfer speed should be
1781  *	stepped down, or falling back to PIO is necessary.
1782  *
1783  *	ECAT_ATA_BUS	: ATA_BUS error for any command
1784  *
1785  *	ECAT_TOUT_HSM	: TIMEOUT for any command or HSM violation for
1786  *			  IO commands
1787  *
1788  *	ECAT_UNK_DEV	: Unknown DEV error for IO commands
1789  *
1790  *	ECAT_DUBIOUS_*	: Identical to above three but occurred while
1791  *			  data transfer hasn't been verified.
1792  *
1793  *	Verdicts are
1794  *
1795  *	NCQ_OFF		: Turn off NCQ.
1796  *
1797  *	SPEED_DOWN	: Speed down transfer speed but don't fall back
1798  *			  to PIO.
1799  *
1800  *	FALLBACK_TO_PIO	: Fall back to PIO.
1801  *
1802  *	Even if multiple verdicts are returned, only one action is
1803  *	taken per error.  An action triggered by non-DUBIOUS errors
1804  *	clears ering, while one triggered by DUBIOUS_* errors doesn't.
1805  *	This is to expedite speed down decisions right after device is
1806  *	initially configured.
1807  *
1808  *	The followings are speed down rules.  #1 and #2 deal with
1809  *	DUBIOUS errors.
1810  *
1811  *	1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
1812  *	   occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO.
1813  *
1814  *	2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
1815  *	   occurred during last 5 mins, NCQ_OFF.
1816  *
1817  *	3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
1818  *	   ocurred during last 5 mins, FALLBACK_TO_PIO
1819  *
1820  *	4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
1821  *	   during last 10 mins, NCQ_OFF.
1822  *
1823  *	5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
1824  *	   UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
1825  *
1826  *	LOCKING:
1827  *	Inherited from caller.
1828  *
1829  *	RETURNS:
1830  *	OR of ATA_EH_SPDN_* flags.
1831  */
1832 static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
1833 {
1834 	const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ;
1835 	u64 j64 = get_jiffies_64();
1836 	struct speed_down_verdict_arg arg;
1837 	unsigned int verdict = 0;
1838 
1839 	/* scan past 5 mins of error history */
1840 	memset(&arg, 0, sizeof(arg));
1841 	arg.since = j64 - min(j64, j5mins);
1842 	ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
1843 
1844 	if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] +
1845 	    arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1)
1846 		verdict |= ATA_EH_SPDN_SPEED_DOWN |
1847 			ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS;
1848 
1849 	if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] +
1850 	    arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1)
1851 		verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS;
1852 
1853 	if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
1854 	    arg.nr_errors[ATA_ECAT_TOUT_HSM] +
1855 	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
1856 		verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
1857 
1858 	/* scan past 10 mins of error history */
1859 	memset(&arg, 0, sizeof(arg));
1860 	arg.since = j64 - min(j64, j10mins);
1861 	ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
1862 
1863 	if (arg.nr_errors[ATA_ECAT_TOUT_HSM] +
1864 	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 3)
1865 		verdict |= ATA_EH_SPDN_NCQ_OFF;
1866 
1867 	if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
1868 	    arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 ||
1869 	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
1870 		verdict |= ATA_EH_SPDN_SPEED_DOWN;
1871 
1872 	return verdict;
1873 }
1874 
1875 /**
1876  *	ata_eh_speed_down - record error and speed down if necessary
1877  *	@dev: Failed device
1878  *	@eflags: mask of ATA_EFLAG_* flags
1879  *	@err_mask: err_mask of the error
1880  *
1881  *	Record error and examine error history to determine whether
1882  *	adjusting transmission speed is necessary.  It also sets
1883  *	transmission limits appropriately if such adjustment is
1884  *	necessary.
1885  *
1886  *	LOCKING:
1887  *	Kernel thread context (may sleep).
1888  *
1889  *	RETURNS:
1890  *	Determined recovery action.
1891  */
1892 static unsigned int ata_eh_speed_down(struct ata_device *dev,
1893 				unsigned int eflags, unsigned int err_mask)
1894 {
1895 	struct ata_link *link = ata_dev_phys_link(dev);
1896 	int xfer_ok = 0;
1897 	unsigned int verdict;
1898 	unsigned int action = 0;
1899 
1900 	/* don't bother if Cat-0 error */
1901 	if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0)
1902 		return 0;
1903 
1904 	/* record error and determine whether speed down is necessary */
1905 	ata_ering_record(&dev->ering, eflags, err_mask);
1906 	verdict = ata_eh_speed_down_verdict(dev);
1907 
1908 	/* turn off NCQ? */
1909 	if ((verdict & ATA_EH_SPDN_NCQ_OFF) &&
1910 	    (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ |
1911 			   ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) {
1912 		dev->flags |= ATA_DFLAG_NCQ_OFF;
1913 		ata_dev_printk(dev, KERN_WARNING,
1914 			       "NCQ disabled due to excessive errors\n");
1915 		goto done;
1916 	}
1917 
1918 	/* speed down? */
1919 	if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
1920 		/* speed down SATA link speed if possible */
1921 		if (sata_down_spd_limit(link, 0) == 0) {
1922 			action |= ATA_EH_RESET;
1923 			goto done;
1924 		}
1925 
1926 		/* lower transfer mode */
1927 		if (dev->spdn_cnt < 2) {
1928 			static const int dma_dnxfer_sel[] =
1929 				{ ATA_DNXFER_DMA, ATA_DNXFER_40C };
1930 			static const int pio_dnxfer_sel[] =
1931 				{ ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 };
1932 			int sel;
1933 
1934 			if (dev->xfer_shift != ATA_SHIFT_PIO)
1935 				sel = dma_dnxfer_sel[dev->spdn_cnt];
1936 			else
1937 				sel = pio_dnxfer_sel[dev->spdn_cnt];
1938 
1939 			dev->spdn_cnt++;
1940 
1941 			if (ata_down_xfermask_limit(dev, sel) == 0) {
1942 				action |= ATA_EH_RESET;
1943 				goto done;
1944 			}
1945 		}
1946 	}
1947 
1948 	/* Fall back to PIO?  Slowing down to PIO is meaningless for
1949 	 * SATA ATA devices.  Consider it only for PATA and SATAPI.
1950 	 */
1951 	if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
1952 	    (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) &&
1953 	    (dev->xfer_shift != ATA_SHIFT_PIO)) {
1954 		if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
1955 			dev->spdn_cnt = 0;
1956 			action |= ATA_EH_RESET;
1957 			goto done;
1958 		}
1959 	}
1960 
1961 	return 0;
1962  done:
1963 	/* device has been slowed down, blow error history */
1964 	if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS))
1965 		ata_ering_clear(&dev->ering);
1966 	return action;
1967 }
1968 
1969 /**
1970  *	ata_eh_link_autopsy - analyze error and determine recovery action
1971  *	@link: host link to perform autopsy on
1972  *
1973  *	Analyze why @link failed and determine which recovery actions
1974  *	are needed.  This function also sets more detailed AC_ERR_*
1975  *	values and fills sense data for ATAPI CHECK SENSE.
1976  *
1977  *	LOCKING:
1978  *	Kernel thread context (may sleep).
1979  */
1980 static void ata_eh_link_autopsy(struct ata_link *link)
1981 {
1982 	struct ata_port *ap = link->ap;
1983 	struct ata_eh_context *ehc = &link->eh_context;
1984 	struct ata_device *dev;
1985 	unsigned int all_err_mask = 0, eflags = 0;
1986 	int tag;
1987 	u32 serror;
1988 	int rc;
1989 
1990 	DPRINTK("ENTER\n");
1991 
1992 	if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
1993 		return;
1994 
1995 	/* obtain and analyze SError */
1996 	rc = sata_scr_read(link, SCR_ERROR, &serror);
1997 	if (rc == 0) {
1998 		ehc->i.serror |= serror;
1999 		ata_eh_analyze_serror(link);
2000 	} else if (rc != -EOPNOTSUPP) {
2001 		/* SError read failed, force reset and probing */
2002 		ehc->i.probe_mask |= ATA_ALL_DEVICES;
2003 		ehc->i.action |= ATA_EH_RESET;
2004 		ehc->i.err_mask |= AC_ERR_OTHER;
2005 	}
2006 
2007 	/* analyze NCQ failure */
2008 	ata_eh_analyze_ncq_error(link);
2009 
2010 	/* any real error trumps AC_ERR_OTHER */
2011 	if (ehc->i.err_mask & ~AC_ERR_OTHER)
2012 		ehc->i.err_mask &= ~AC_ERR_OTHER;
2013 
2014 	all_err_mask |= ehc->i.err_mask;
2015 
2016 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2017 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2018 
2019 		if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2020 		    ata_dev_phys_link(qc->dev) != link)
2021 			continue;
2022 
2023 		/* inherit upper level err_mask */
2024 		qc->err_mask |= ehc->i.err_mask;
2025 
2026 		/* analyze TF */
2027 		ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
2028 
2029 		/* DEV errors are probably spurious in case of ATA_BUS error */
2030 		if (qc->err_mask & AC_ERR_ATA_BUS)
2031 			qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
2032 					  AC_ERR_INVALID);
2033 
2034 		/* any real error trumps unknown error */
2035 		if (qc->err_mask & ~AC_ERR_OTHER)
2036 			qc->err_mask &= ~AC_ERR_OTHER;
2037 
2038 		/* SENSE_VALID trumps dev/unknown error and revalidation */
2039 		if (qc->flags & ATA_QCFLAG_SENSE_VALID)
2040 			qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
2041 
2042 		/* determine whether the command is worth retrying */
2043 		if (qc->flags & ATA_QCFLAG_IO ||
2044 		    (!(qc->err_mask & AC_ERR_INVALID) &&
2045 		     qc->err_mask != AC_ERR_DEV))
2046 			qc->flags |= ATA_QCFLAG_RETRY;
2047 
2048 		/* accumulate error info */
2049 		ehc->i.dev = qc->dev;
2050 		all_err_mask |= qc->err_mask;
2051 		if (qc->flags & ATA_QCFLAG_IO)
2052 			eflags |= ATA_EFLAG_IS_IO;
2053 	}
2054 
2055 	/* enforce default EH actions */
2056 	if (ap->pflags & ATA_PFLAG_FROZEN ||
2057 	    all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
2058 		ehc->i.action |= ATA_EH_RESET;
2059 	else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) ||
2060 		 (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV)))
2061 		ehc->i.action |= ATA_EH_REVALIDATE;
2062 
2063 	/* If we have offending qcs and the associated failed device,
2064 	 * perform per-dev EH action only on the offending device.
2065 	 */
2066 	if (ehc->i.dev) {
2067 		ehc->i.dev_action[ehc->i.dev->devno] |=
2068 			ehc->i.action & ATA_EH_PERDEV_MASK;
2069 		ehc->i.action &= ~ATA_EH_PERDEV_MASK;
2070 	}
2071 
2072 	/* propagate timeout to host link */
2073 	if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link))
2074 		ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT;
2075 
2076 	/* record error and consider speeding down */
2077 	dev = ehc->i.dev;
2078 	if (!dev && ((ata_link_max_devices(link) == 1 &&
2079 		      ata_dev_enabled(link->device))))
2080 	    dev = link->device;
2081 
2082 	if (dev) {
2083 		if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
2084 			eflags |= ATA_EFLAG_DUBIOUS_XFER;
2085 		ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
2086 	}
2087 
2088 	DPRINTK("EXIT\n");
2089 }
2090 
2091 /**
2092  *	ata_eh_autopsy - analyze error and determine recovery action
2093  *	@ap: host port to perform autopsy on
2094  *
2095  *	Analyze all links of @ap and determine why they failed and
2096  *	which recovery actions are needed.
2097  *
2098  *	LOCKING:
2099  *	Kernel thread context (may sleep).
2100  */
2101 void ata_eh_autopsy(struct ata_port *ap)
2102 {
2103 	struct ata_link *link;
2104 
2105 	ata_for_each_link(link, ap, EDGE)
2106 		ata_eh_link_autopsy(link);
2107 
2108 	/* Handle the frigging slave link.  Autopsy is done similarly
2109 	 * but actions and flags are transferred over to the master
2110 	 * link and handled from there.
2111 	 */
2112 	if (ap->slave_link) {
2113 		struct ata_eh_context *mehc = &ap->link.eh_context;
2114 		struct ata_eh_context *sehc = &ap->slave_link->eh_context;
2115 
2116 		/* transfer control flags from master to slave */
2117 		sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK;
2118 
2119 		/* perform autopsy on the slave link */
2120 		ata_eh_link_autopsy(ap->slave_link);
2121 
2122 		/* transfer actions from slave to master and clear slave */
2123 		ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2124 		mehc->i.action		|= sehc->i.action;
2125 		mehc->i.dev_action[1]	|= sehc->i.dev_action[1];
2126 		mehc->i.flags		|= sehc->i.flags;
2127 		ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2128 	}
2129 
2130 	/* Autopsy of fanout ports can affect host link autopsy.
2131 	 * Perform host link autopsy last.
2132 	 */
2133 	if (sata_pmp_attached(ap))
2134 		ata_eh_link_autopsy(&ap->link);
2135 }
2136 
2137 /**
2138  *	ata_get_cmd_descript - get description for ATA command
2139  *	@command: ATA command code to get description for
2140  *
2141  *	Return a textual description of the given command, or NULL if the
2142  *	command is not known.
2143  *
2144  *	LOCKING:
2145  *	None
2146  */
2147 const char *ata_get_cmd_descript(u8 command)
2148 {
2149 #ifdef CONFIG_ATA_VERBOSE_ERROR
2150 	static const struct
2151 	{
2152 		u8 command;
2153 		const char *text;
2154 	} cmd_descr[] = {
2155 		{ ATA_CMD_DEV_RESET,		"DEVICE RESET" },
2156 		{ ATA_CMD_CHK_POWER, 		"CHECK POWER MODE" },
2157 		{ ATA_CMD_STANDBY, 		"STANDBY" },
2158 		{ ATA_CMD_IDLE, 		"IDLE" },
2159 		{ ATA_CMD_EDD, 			"EXECUTE DEVICE DIAGNOSTIC" },
2160 		{ ATA_CMD_DOWNLOAD_MICRO,   	"DOWNLOAD MICROCODE" },
2161 		{ ATA_CMD_NOP,			"NOP" },
2162 		{ ATA_CMD_FLUSH, 		"FLUSH CACHE" },
2163 		{ ATA_CMD_FLUSH_EXT, 		"FLUSH CACHE EXT" },
2164 		{ ATA_CMD_ID_ATA,  		"IDENTIFY DEVICE" },
2165 		{ ATA_CMD_ID_ATAPI, 		"IDENTIFY PACKET DEVICE" },
2166 		{ ATA_CMD_SERVICE, 		"SERVICE" },
2167 		{ ATA_CMD_READ, 		"READ DMA" },
2168 		{ ATA_CMD_READ_EXT, 		"READ DMA EXT" },
2169 		{ ATA_CMD_READ_QUEUED, 		"READ DMA QUEUED" },
2170 		{ ATA_CMD_READ_STREAM_EXT, 	"READ STREAM EXT" },
2171 		{ ATA_CMD_READ_STREAM_DMA_EXT,  "READ STREAM DMA EXT" },
2172 		{ ATA_CMD_WRITE, 		"WRITE DMA" },
2173 		{ ATA_CMD_WRITE_EXT, 		"WRITE DMA EXT" },
2174 		{ ATA_CMD_WRITE_QUEUED, 	"WRITE DMA QUEUED EXT" },
2175 		{ ATA_CMD_WRITE_STREAM_EXT, 	"WRITE STREAM EXT" },
2176 		{ ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" },
2177 		{ ATA_CMD_WRITE_FUA_EXT,	"WRITE DMA FUA EXT" },
2178 		{ ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
2179 		{ ATA_CMD_FPDMA_READ,		"READ FPDMA QUEUED" },
2180 		{ ATA_CMD_FPDMA_WRITE,		"WRITE FPDMA QUEUED" },
2181 		{ ATA_CMD_PIO_READ,		"READ SECTOR(S)" },
2182 		{ ATA_CMD_PIO_READ_EXT,		"READ SECTOR(S) EXT" },
2183 		{ ATA_CMD_PIO_WRITE,		"WRITE SECTOR(S)" },
2184 		{ ATA_CMD_PIO_WRITE_EXT,	"WRITE SECTOR(S) EXT" },
2185 		{ ATA_CMD_READ_MULTI,		"READ MULTIPLE" },
2186 		{ ATA_CMD_READ_MULTI_EXT,	"READ MULTIPLE EXT" },
2187 		{ ATA_CMD_WRITE_MULTI,		"WRITE MULTIPLE" },
2188 		{ ATA_CMD_WRITE_MULTI_EXT,	"WRITE MULTIPLE EXT" },
2189 		{ ATA_CMD_WRITE_MULTI_FUA_EXT, 	"WRITE MULTIPLE FUA EXT" },
2190 		{ ATA_CMD_SET_FEATURES,		"SET FEATURES" },
2191 		{ ATA_CMD_SET_MULTI,		"SET MULTIPLE MODE" },
2192 		{ ATA_CMD_VERIFY,		"READ VERIFY SECTOR(S)" },
2193 		{ ATA_CMD_VERIFY_EXT,		"READ VERIFY SECTOR(S) EXT" },
2194 		{ ATA_CMD_WRITE_UNCORR_EXT,	"WRITE UNCORRECTABLE EXT" },
2195 		{ ATA_CMD_STANDBYNOW1,		"STANDBY IMMEDIATE" },
2196 		{ ATA_CMD_IDLEIMMEDIATE,	"IDLE IMMEDIATE" },
2197 		{ ATA_CMD_SLEEP,		"SLEEP" },
2198 		{ ATA_CMD_INIT_DEV_PARAMS,	"INITIALIZE DEVICE PARAMETERS" },
2199 		{ ATA_CMD_READ_NATIVE_MAX,	"READ NATIVE MAX ADDRESS" },
2200 		{ ATA_CMD_READ_NATIVE_MAX_EXT,	"READ NATIVE MAX ADDRESS EXT" },
2201 		{ ATA_CMD_SET_MAX,		"SET MAX ADDRESS" },
2202 		{ ATA_CMD_SET_MAX_EXT,		"SET MAX ADDRESS EXT" },
2203 		{ ATA_CMD_READ_LOG_EXT,		"READ LOG EXT" },
2204 		{ ATA_CMD_WRITE_LOG_EXT,	"WRITE LOG EXT" },
2205 		{ ATA_CMD_READ_LOG_DMA_EXT,	"READ LOG DMA EXT" },
2206 		{ ATA_CMD_WRITE_LOG_DMA_EXT, 	"WRITE LOG DMA EXT" },
2207 		{ ATA_CMD_TRUSTED_RCV,		"TRUSTED RECEIVE" },
2208 		{ ATA_CMD_TRUSTED_RCV_DMA, 	"TRUSTED RECEIVE DMA" },
2209 		{ ATA_CMD_TRUSTED_SND,		"TRUSTED SEND" },
2210 		{ ATA_CMD_TRUSTED_SND_DMA, 	"TRUSTED SEND DMA" },
2211 		{ ATA_CMD_PMP_READ,		"READ BUFFER" },
2212 		{ ATA_CMD_PMP_WRITE,		"WRITE BUFFER" },
2213 		{ ATA_CMD_CONF_OVERLAY,		"DEVICE CONFIGURATION OVERLAY" },
2214 		{ ATA_CMD_SEC_SET_PASS,		"SECURITY SET PASSWORD" },
2215 		{ ATA_CMD_SEC_UNLOCK,		"SECURITY UNLOCK" },
2216 		{ ATA_CMD_SEC_ERASE_PREP,	"SECURITY ERASE PREPARE" },
2217 		{ ATA_CMD_SEC_ERASE_UNIT,	"SECURITY ERASE UNIT" },
2218 		{ ATA_CMD_SEC_FREEZE_LOCK,	"SECURITY FREEZE LOCK" },
2219 		{ ATA_CMD_SEC_DISABLE_PASS,	"SECURITY DISABLE PASSWORD" },
2220 		{ ATA_CMD_CONFIG_STREAM,	"CONFIGURE STREAM" },
2221 		{ ATA_CMD_SMART,		"SMART" },
2222 		{ ATA_CMD_MEDIA_LOCK,		"DOOR LOCK" },
2223 		{ ATA_CMD_MEDIA_UNLOCK,		"DOOR UNLOCK" },
2224 		{ ATA_CMD_DSM,			"DATA SET MANAGEMENT" },
2225 		{ ATA_CMD_CHK_MED_CRD_TYP, 	"CHECK MEDIA CARD TYPE" },
2226 		{ ATA_CMD_CFA_REQ_EXT_ERR, 	"CFA REQUEST EXTENDED ERROR" },
2227 		{ ATA_CMD_CFA_WRITE_NE,		"CFA WRITE SECTORS WITHOUT ERASE" },
2228 		{ ATA_CMD_CFA_TRANS_SECT,	"CFA TRANSLATE SECTOR" },
2229 		{ ATA_CMD_CFA_ERASE,		"CFA ERASE SECTORS" },
2230 		{ ATA_CMD_CFA_WRITE_MULT_NE, 	"CFA WRITE MULTIPLE WITHOUT ERASE" },
2231 		{ ATA_CMD_READ_LONG,		"READ LONG (with retries)" },
2232 		{ ATA_CMD_READ_LONG_ONCE,	"READ LONG (without retries)" },
2233 		{ ATA_CMD_WRITE_LONG,		"WRITE LONG (with retries)" },
2234 		{ ATA_CMD_WRITE_LONG_ONCE,	"WRITE LONG (without retries)" },
2235 		{ ATA_CMD_RESTORE,		"RECALIBRATE" },
2236 		{ 0,				NULL } /* terminate list */
2237 	};
2238 
2239 	unsigned int i;
2240 	for (i = 0; cmd_descr[i].text; i++)
2241 		if (cmd_descr[i].command == command)
2242 			return cmd_descr[i].text;
2243 #endif
2244 
2245 	return NULL;
2246 }
2247 
2248 /**
2249  *	ata_eh_link_report - report error handling to user
2250  *	@link: ATA link EH is going on
2251  *
2252  *	Report EH to user.
2253  *
2254  *	LOCKING:
2255  *	None.
2256  */
2257 static void ata_eh_link_report(struct ata_link *link)
2258 {
2259 	struct ata_port *ap = link->ap;
2260 	struct ata_eh_context *ehc = &link->eh_context;
2261 	const char *frozen, *desc;
2262 	char tries_buf[6];
2263 	int tag, nr_failed = 0;
2264 
2265 	if (ehc->i.flags & ATA_EHI_QUIET)
2266 		return;
2267 
2268 	desc = NULL;
2269 	if (ehc->i.desc[0] != '\0')
2270 		desc = ehc->i.desc;
2271 
2272 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2273 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2274 
2275 		if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2276 		    ata_dev_phys_link(qc->dev) != link ||
2277 		    ((qc->flags & ATA_QCFLAG_QUIET) &&
2278 		     qc->err_mask == AC_ERR_DEV))
2279 			continue;
2280 		if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
2281 			continue;
2282 
2283 		nr_failed++;
2284 	}
2285 
2286 	if (!nr_failed && !ehc->i.err_mask)
2287 		return;
2288 
2289 	frozen = "";
2290 	if (ap->pflags & ATA_PFLAG_FROZEN)
2291 		frozen = " frozen";
2292 
2293 	memset(tries_buf, 0, sizeof(tries_buf));
2294 	if (ap->eh_tries < ATA_EH_MAX_TRIES)
2295 		snprintf(tries_buf, sizeof(tries_buf) - 1, " t%d",
2296 			 ap->eh_tries);
2297 
2298 	if (ehc->i.dev) {
2299 		ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x "
2300 			       "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2301 			       ehc->i.err_mask, link->sactive, ehc->i.serror,
2302 			       ehc->i.action, frozen, tries_buf);
2303 		if (desc)
2304 			ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc);
2305 	} else {
2306 		ata_link_printk(link, KERN_ERR, "exception Emask 0x%x "
2307 				"SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2308 				ehc->i.err_mask, link->sactive, ehc->i.serror,
2309 				ehc->i.action, frozen, tries_buf);
2310 		if (desc)
2311 			ata_link_printk(link, KERN_ERR, "%s\n", desc);
2312 	}
2313 
2314 #ifdef CONFIG_ATA_VERBOSE_ERROR
2315 	if (ehc->i.serror)
2316 		ata_link_printk(link, KERN_ERR,
2317 		  "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
2318 		  ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "",
2319 		  ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "",
2320 		  ehc->i.serror & SERR_DATA ? "UnrecovData " : "",
2321 		  ehc->i.serror & SERR_PERSISTENT ? "Persist " : "",
2322 		  ehc->i.serror & SERR_PROTOCOL ? "Proto " : "",
2323 		  ehc->i.serror & SERR_INTERNAL ? "HostInt " : "",
2324 		  ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "",
2325 		  ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "",
2326 		  ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "",
2327 		  ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "",
2328 		  ehc->i.serror & SERR_DISPARITY ? "Dispar " : "",
2329 		  ehc->i.serror & SERR_CRC ? "BadCRC " : "",
2330 		  ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "",
2331 		  ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "",
2332 		  ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "",
2333 		  ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "",
2334 		  ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
2335 #endif
2336 
2337 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2338 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2339 		struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
2340 		const u8 *cdb = qc->cdb;
2341 		char data_buf[20] = "";
2342 		char cdb_buf[70] = "";
2343 
2344 		if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2345 		    ata_dev_phys_link(qc->dev) != link || !qc->err_mask)
2346 			continue;
2347 
2348 		if (qc->dma_dir != DMA_NONE) {
2349 			static const char *dma_str[] = {
2350 				[DMA_BIDIRECTIONAL]	= "bidi",
2351 				[DMA_TO_DEVICE]		= "out",
2352 				[DMA_FROM_DEVICE]	= "in",
2353 			};
2354 			static const char *prot_str[] = {
2355 				[ATA_PROT_PIO]		= "pio",
2356 				[ATA_PROT_DMA]		= "dma",
2357 				[ATA_PROT_NCQ]		= "ncq",
2358 				[ATAPI_PROT_PIO]	= "pio",
2359 				[ATAPI_PROT_DMA]	= "dma",
2360 			};
2361 
2362 			snprintf(data_buf, sizeof(data_buf), " %s %u %s",
2363 				 prot_str[qc->tf.protocol], qc->nbytes,
2364 				 dma_str[qc->dma_dir]);
2365 		}
2366 
2367 		if (ata_is_atapi(qc->tf.protocol)) {
2368 			if (qc->scsicmd)
2369 				scsi_print_command(qc->scsicmd);
2370 			else
2371 				snprintf(cdb_buf, sizeof(cdb_buf),
2372 				 "cdb %02x %02x %02x %02x %02x %02x %02x %02x  "
2373 				 "%02x %02x %02x %02x %02x %02x %02x %02x\n         ",
2374 				 cdb[0], cdb[1], cdb[2], cdb[3],
2375 				 cdb[4], cdb[5], cdb[6], cdb[7],
2376 				 cdb[8], cdb[9], cdb[10], cdb[11],
2377 				 cdb[12], cdb[13], cdb[14], cdb[15]);
2378 		} else {
2379 			const char *descr = ata_get_cmd_descript(cmd->command);
2380 			if (descr)
2381 				ata_dev_printk(qc->dev, KERN_ERR,
2382 					"failed command: %s\n", descr);
2383 		}
2384 
2385 		ata_dev_printk(qc->dev, KERN_ERR,
2386 			"cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2387 			"tag %d%s\n         %s"
2388 			"res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2389 			"Emask 0x%x (%s)%s\n",
2390 			cmd->command, cmd->feature, cmd->nsect,
2391 			cmd->lbal, cmd->lbam, cmd->lbah,
2392 			cmd->hob_feature, cmd->hob_nsect,
2393 			cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
2394 			cmd->device, qc->tag, data_buf, cdb_buf,
2395 			res->command, res->feature, res->nsect,
2396 			res->lbal, res->lbam, res->lbah,
2397 			res->hob_feature, res->hob_nsect,
2398 			res->hob_lbal, res->hob_lbam, res->hob_lbah,
2399 			res->device, qc->err_mask, ata_err_string(qc->err_mask),
2400 			qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
2401 
2402 #ifdef CONFIG_ATA_VERBOSE_ERROR
2403 		if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
2404 				    ATA_ERR)) {
2405 			if (res->command & ATA_BUSY)
2406 				ata_dev_printk(qc->dev, KERN_ERR,
2407 				  "status: { Busy }\n");
2408 			else
2409 				ata_dev_printk(qc->dev, KERN_ERR,
2410 				  "status: { %s%s%s%s}\n",
2411 				  res->command & ATA_DRDY ? "DRDY " : "",
2412 				  res->command & ATA_DF ? "DF " : "",
2413 				  res->command & ATA_DRQ ? "DRQ " : "",
2414 				  res->command & ATA_ERR ? "ERR " : "");
2415 		}
2416 
2417 		if (cmd->command != ATA_CMD_PACKET &&
2418 		    (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF |
2419 				     ATA_ABORTED)))
2420 			ata_dev_printk(qc->dev, KERN_ERR,
2421 			  "error: { %s%s%s%s}\n",
2422 			  res->feature & ATA_ICRC ? "ICRC " : "",
2423 			  res->feature & ATA_UNC ? "UNC " : "",
2424 			  res->feature & ATA_IDNF ? "IDNF " : "",
2425 			  res->feature & ATA_ABORTED ? "ABRT " : "");
2426 #endif
2427 	}
2428 }
2429 
2430 /**
2431  *	ata_eh_report - report error handling to user
2432  *	@ap: ATA port to report EH about
2433  *
2434  *	Report EH to user.
2435  *
2436  *	LOCKING:
2437  *	None.
2438  */
2439 void ata_eh_report(struct ata_port *ap)
2440 {
2441 	struct ata_link *link;
2442 
2443 	ata_for_each_link(link, ap, HOST_FIRST)
2444 		ata_eh_link_report(link);
2445 }
2446 
2447 static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
2448 			unsigned int *classes, unsigned long deadline,
2449 			bool clear_classes)
2450 {
2451 	struct ata_device *dev;
2452 
2453 	if (clear_classes)
2454 		ata_for_each_dev(dev, link, ALL)
2455 			classes[dev->devno] = ATA_DEV_UNKNOWN;
2456 
2457 	return reset(link, classes, deadline);
2458 }
2459 
2460 static int ata_eh_followup_srst_needed(struct ata_link *link,
2461 				       int rc, const unsigned int *classes)
2462 {
2463 	if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
2464 		return 0;
2465 	if (rc == -EAGAIN)
2466 		return 1;
2467 	if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
2468 		return 1;
2469 	return 0;
2470 }
2471 
2472 int ata_eh_reset(struct ata_link *link, int classify,
2473 		 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
2474 		 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
2475 {
2476 	struct ata_port *ap = link->ap;
2477 	struct ata_link *slave = ap->slave_link;
2478 	struct ata_eh_context *ehc = &link->eh_context;
2479 	struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL;
2480 	unsigned int *classes = ehc->classes;
2481 	unsigned int lflags = link->flags;
2482 	int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
2483 	int max_tries = 0, try = 0;
2484 	struct ata_link *failed_link;
2485 	struct ata_device *dev;
2486 	unsigned long deadline, now;
2487 	ata_reset_fn_t reset;
2488 	unsigned long flags;
2489 	u32 sstatus;
2490 	int nr_unknown, rc;
2491 
2492 	/*
2493 	 * Prepare to reset
2494 	 */
2495 	while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX)
2496 		max_tries++;
2497 	if (link->flags & ATA_LFLAG_NO_HRST)
2498 		hardreset = NULL;
2499 	if (link->flags & ATA_LFLAG_NO_SRST)
2500 		softreset = NULL;
2501 
2502 	/* make sure each reset attemp is at least COOL_DOWN apart */
2503 	if (ehc->i.flags & ATA_EHI_DID_RESET) {
2504 		now = jiffies;
2505 		WARN_ON(time_after(ehc->last_reset, now));
2506 		deadline = ata_deadline(ehc->last_reset,
2507 					ATA_EH_RESET_COOL_DOWN);
2508 		if (time_before(now, deadline))
2509 			schedule_timeout_uninterruptible(deadline - now);
2510 	}
2511 
2512 	spin_lock_irqsave(ap->lock, flags);
2513 	ap->pflags |= ATA_PFLAG_RESETTING;
2514 	spin_unlock_irqrestore(ap->lock, flags);
2515 
2516 	ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2517 
2518 	ata_for_each_dev(dev, link, ALL) {
2519 		/* If we issue an SRST then an ATA drive (not ATAPI)
2520 		 * may change configuration and be in PIO0 timing. If
2521 		 * we do a hard reset (or are coming from power on)
2522 		 * this is true for ATA or ATAPI. Until we've set a
2523 		 * suitable controller mode we should not touch the
2524 		 * bus as we may be talking too fast.
2525 		 */
2526 		dev->pio_mode = XFER_PIO_0;
2527 
2528 		/* If the controller has a pio mode setup function
2529 		 * then use it to set the chipset to rights. Don't
2530 		 * touch the DMA setup as that will be dealt with when
2531 		 * configuring devices.
2532 		 */
2533 		if (ap->ops->set_piomode)
2534 			ap->ops->set_piomode(ap, dev);
2535 	}
2536 
2537 	/* prefer hardreset */
2538 	reset = NULL;
2539 	ehc->i.action &= ~ATA_EH_RESET;
2540 	if (hardreset) {
2541 		reset = hardreset;
2542 		ehc->i.action |= ATA_EH_HARDRESET;
2543 	} else if (softreset) {
2544 		reset = softreset;
2545 		ehc->i.action |= ATA_EH_SOFTRESET;
2546 	}
2547 
2548 	if (prereset) {
2549 		unsigned long deadline = ata_deadline(jiffies,
2550 						      ATA_EH_PRERESET_TIMEOUT);
2551 
2552 		if (slave) {
2553 			sehc->i.action &= ~ATA_EH_RESET;
2554 			sehc->i.action |= ehc->i.action;
2555 		}
2556 
2557 		rc = prereset(link, deadline);
2558 
2559 		/* If present, do prereset on slave link too.  Reset
2560 		 * is skipped iff both master and slave links report
2561 		 * -ENOENT or clear ATA_EH_RESET.
2562 		 */
2563 		if (slave && (rc == 0 || rc == -ENOENT)) {
2564 			int tmp;
2565 
2566 			tmp = prereset(slave, deadline);
2567 			if (tmp != -ENOENT)
2568 				rc = tmp;
2569 
2570 			ehc->i.action |= sehc->i.action;
2571 		}
2572 
2573 		if (rc) {
2574 			if (rc == -ENOENT) {
2575 				ata_link_printk(link, KERN_DEBUG,
2576 						"port disabled. ignoring.\n");
2577 				ehc->i.action &= ~ATA_EH_RESET;
2578 
2579 				ata_for_each_dev(dev, link, ALL)
2580 					classes[dev->devno] = ATA_DEV_NONE;
2581 
2582 				rc = 0;
2583 			} else
2584 				ata_link_printk(link, KERN_ERR,
2585 					"prereset failed (errno=%d)\n", rc);
2586 			goto out;
2587 		}
2588 
2589 		/* prereset() might have cleared ATA_EH_RESET.  If so,
2590 		 * bang classes, thaw and return.
2591 		 */
2592 		if (reset && !(ehc->i.action & ATA_EH_RESET)) {
2593 			ata_for_each_dev(dev, link, ALL)
2594 				classes[dev->devno] = ATA_DEV_NONE;
2595 			if ((ap->pflags & ATA_PFLAG_FROZEN) &&
2596 			    ata_is_host_link(link))
2597 				ata_eh_thaw_port(ap);
2598 			rc = 0;
2599 			goto out;
2600 		}
2601 	}
2602 
2603  retry:
2604 	/*
2605 	 * Perform reset
2606 	 */
2607 	if (ata_is_host_link(link))
2608 		ata_eh_freeze_port(ap);
2609 
2610 	deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]);
2611 
2612 	if (reset) {
2613 		if (verbose)
2614 			ata_link_printk(link, KERN_INFO, "%s resetting link\n",
2615 					reset == softreset ? "soft" : "hard");
2616 
2617 		/* mark that this EH session started with reset */
2618 		ehc->last_reset = jiffies;
2619 		if (reset == hardreset)
2620 			ehc->i.flags |= ATA_EHI_DID_HARDRESET;
2621 		else
2622 			ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
2623 
2624 		rc = ata_do_reset(link, reset, classes, deadline, true);
2625 		if (rc && rc != -EAGAIN) {
2626 			failed_link = link;
2627 			goto fail;
2628 		}
2629 
2630 		/* hardreset slave link if existent */
2631 		if (slave && reset == hardreset) {
2632 			int tmp;
2633 
2634 			if (verbose)
2635 				ata_link_printk(slave, KERN_INFO,
2636 						"hard resetting link\n");
2637 
2638 			ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
2639 			tmp = ata_do_reset(slave, reset, classes, deadline,
2640 					   false);
2641 			switch (tmp) {
2642 			case -EAGAIN:
2643 				rc = -EAGAIN;
2644 			case 0:
2645 				break;
2646 			default:
2647 				failed_link = slave;
2648 				rc = tmp;
2649 				goto fail;
2650 			}
2651 		}
2652 
2653 		/* perform follow-up SRST if necessary */
2654 		if (reset == hardreset &&
2655 		    ata_eh_followup_srst_needed(link, rc, classes)) {
2656 			reset = softreset;
2657 
2658 			if (!reset) {
2659 				ata_link_printk(link, KERN_ERR,
2660 						"follow-up softreset required "
2661 						"but no softreset avaliable\n");
2662 				failed_link = link;
2663 				rc = -EINVAL;
2664 				goto fail;
2665 			}
2666 
2667 			ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2668 			rc = ata_do_reset(link, reset, classes, deadline, true);
2669 			if (rc) {
2670 				failed_link = link;
2671 				goto fail;
2672 			}
2673 		}
2674 	} else {
2675 		if (verbose)
2676 			ata_link_printk(link, KERN_INFO, "no reset method "
2677 					"available, skipping reset\n");
2678 		if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
2679 			lflags |= ATA_LFLAG_ASSUME_ATA;
2680 	}
2681 
2682 	/*
2683 	 * Post-reset processing
2684 	 */
2685 	ata_for_each_dev(dev, link, ALL) {
2686 		/* After the reset, the device state is PIO 0 and the
2687 		 * controller state is undefined.  Reset also wakes up
2688 		 * drives from sleeping mode.
2689 		 */
2690 		dev->pio_mode = XFER_PIO_0;
2691 		dev->flags &= ~ATA_DFLAG_SLEEPING;
2692 
2693 		if (ata_phys_link_offline(ata_dev_phys_link(dev)))
2694 			continue;
2695 
2696 		/* apply class override */
2697 		if (lflags & ATA_LFLAG_ASSUME_ATA)
2698 			classes[dev->devno] = ATA_DEV_ATA;
2699 		else if (lflags & ATA_LFLAG_ASSUME_SEMB)
2700 			classes[dev->devno] = ATA_DEV_SEMB_UNSUP;
2701 	}
2702 
2703 	/* record current link speed */
2704 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
2705 		link->sata_spd = (sstatus >> 4) & 0xf;
2706 	if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0)
2707 		slave->sata_spd = (sstatus >> 4) & 0xf;
2708 
2709 	/* thaw the port */
2710 	if (ata_is_host_link(link))
2711 		ata_eh_thaw_port(ap);
2712 
2713 	/* postreset() should clear hardware SError.  Although SError
2714 	 * is cleared during link resume, clearing SError here is
2715 	 * necessary as some PHYs raise hotplug events after SRST.
2716 	 * This introduces race condition where hotplug occurs between
2717 	 * reset and here.  This race is mediated by cross checking
2718 	 * link onlineness and classification result later.
2719 	 */
2720 	if (postreset) {
2721 		postreset(link, classes);
2722 		if (slave)
2723 			postreset(slave, classes);
2724 	}
2725 
2726 	/*
2727 	 * Some controllers can't be frozen very well and may set
2728 	 * spuruious error conditions during reset.  Clear accumulated
2729 	 * error information.  As reset is the final recovery action,
2730 	 * nothing is lost by doing this.
2731 	 */
2732 	spin_lock_irqsave(link->ap->lock, flags);
2733 	memset(&link->eh_info, 0, sizeof(link->eh_info));
2734 	if (slave)
2735 		memset(&slave->eh_info, 0, sizeof(link->eh_info));
2736 	ap->pflags &= ~ATA_PFLAG_EH_PENDING;
2737 	spin_unlock_irqrestore(link->ap->lock, flags);
2738 
2739 	/*
2740 	 * Make sure onlineness and classification result correspond.
2741 	 * Hotplug could have happened during reset and some
2742 	 * controllers fail to wait while a drive is spinning up after
2743 	 * being hotplugged causing misdetection.  By cross checking
2744 	 * link on/offlineness and classification result, those
2745 	 * conditions can be reliably detected and retried.
2746 	 */
2747 	nr_unknown = 0;
2748 	ata_for_each_dev(dev, link, ALL) {
2749 		if (ata_phys_link_online(ata_dev_phys_link(dev))) {
2750 			if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2751 				ata_dev_printk(dev, KERN_DEBUG, "link online "
2752 					       "but device misclassifed\n");
2753 				classes[dev->devno] = ATA_DEV_NONE;
2754 				nr_unknown++;
2755 			}
2756 		} else if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
2757 			if (ata_class_enabled(classes[dev->devno]))
2758 				ata_dev_printk(dev, KERN_DEBUG, "link offline, "
2759 					       "clearing class %d to NONE\n",
2760 					       classes[dev->devno]);
2761 			classes[dev->devno] = ATA_DEV_NONE;
2762 		} else if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2763 			ata_dev_printk(dev, KERN_DEBUG, "link status unknown, "
2764 				       "clearing UNKNOWN to NONE\n");
2765 			classes[dev->devno] = ATA_DEV_NONE;
2766 		}
2767 	}
2768 
2769 	if (classify && nr_unknown) {
2770 		if (try < max_tries) {
2771 			ata_link_printk(link, KERN_WARNING, "link online but "
2772 					"%d devices misclassified, retrying\n",
2773 					nr_unknown);
2774 			failed_link = link;
2775 			rc = -EAGAIN;
2776 			goto fail;
2777 		}
2778 		ata_link_printk(link, KERN_WARNING,
2779 				"link online but %d devices misclassified, "
2780 				"device detection might fail\n", nr_unknown);
2781 	}
2782 
2783 	/* reset successful, schedule revalidation */
2784 	ata_eh_done(link, NULL, ATA_EH_RESET);
2785 	if (slave)
2786 		ata_eh_done(slave, NULL, ATA_EH_RESET);
2787 	ehc->last_reset = jiffies;		/* update to completion time */
2788 	ehc->i.action |= ATA_EH_REVALIDATE;
2789 	link->lpm_policy = ATA_LPM_UNKNOWN;	/* reset LPM state */
2790 
2791 	rc = 0;
2792  out:
2793 	/* clear hotplug flag */
2794 	ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2795 	if (slave)
2796 		sehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2797 
2798 	spin_lock_irqsave(ap->lock, flags);
2799 	ap->pflags &= ~ATA_PFLAG_RESETTING;
2800 	spin_unlock_irqrestore(ap->lock, flags);
2801 
2802 	return rc;
2803 
2804  fail:
2805 	/* if SCR isn't accessible on a fan-out port, PMP needs to be reset */
2806 	if (!ata_is_host_link(link) &&
2807 	    sata_scr_read(link, SCR_STATUS, &sstatus))
2808 		rc = -ERESTART;
2809 
2810 	if (rc == -ERESTART || try >= max_tries)
2811 		goto out;
2812 
2813 	now = jiffies;
2814 	if (time_before(now, deadline)) {
2815 		unsigned long delta = deadline - now;
2816 
2817 		ata_link_printk(failed_link, KERN_WARNING,
2818 			"reset failed (errno=%d), retrying in %u secs\n",
2819 			rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
2820 
2821 		while (delta)
2822 			delta = schedule_timeout_uninterruptible(delta);
2823 	}
2824 
2825 	if (try == max_tries - 1) {
2826 		sata_down_spd_limit(link, 0);
2827 		if (slave)
2828 			sata_down_spd_limit(slave, 0);
2829 	} else if (rc == -EPIPE)
2830 		sata_down_spd_limit(failed_link, 0);
2831 
2832 	if (hardreset)
2833 		reset = hardreset;
2834 	goto retry;
2835 }
2836 
2837 static inline void ata_eh_pull_park_action(struct ata_port *ap)
2838 {
2839 	struct ata_link *link;
2840 	struct ata_device *dev;
2841 	unsigned long flags;
2842 
2843 	/*
2844 	 * This function can be thought of as an extended version of
2845 	 * ata_eh_about_to_do() specially crafted to accommodate the
2846 	 * requirements of ATA_EH_PARK handling. Since the EH thread
2847 	 * does not leave the do {} while () loop in ata_eh_recover as
2848 	 * long as the timeout for a park request to *one* device on
2849 	 * the port has not expired, and since we still want to pick
2850 	 * up park requests to other devices on the same port or
2851 	 * timeout updates for the same device, we have to pull
2852 	 * ATA_EH_PARK actions from eh_info into eh_context.i
2853 	 * ourselves at the beginning of each pass over the loop.
2854 	 *
2855 	 * Additionally, all write accesses to &ap->park_req_pending
2856 	 * through INIT_COMPLETION() (see below) or complete_all()
2857 	 * (see ata_scsi_park_store()) are protected by the host lock.
2858 	 * As a result we have that park_req_pending.done is zero on
2859 	 * exit from this function, i.e. when ATA_EH_PARK actions for
2860 	 * *all* devices on port ap have been pulled into the
2861 	 * respective eh_context structs. If, and only if,
2862 	 * park_req_pending.done is non-zero by the time we reach
2863 	 * wait_for_completion_timeout(), another ATA_EH_PARK action
2864 	 * has been scheduled for at least one of the devices on port
2865 	 * ap and we have to cycle over the do {} while () loop in
2866 	 * ata_eh_recover() again.
2867 	 */
2868 
2869 	spin_lock_irqsave(ap->lock, flags);
2870 	INIT_COMPLETION(ap->park_req_pending);
2871 	ata_for_each_link(link, ap, EDGE) {
2872 		ata_for_each_dev(dev, link, ALL) {
2873 			struct ata_eh_info *ehi = &link->eh_info;
2874 
2875 			link->eh_context.i.dev_action[dev->devno] |=
2876 				ehi->dev_action[dev->devno] & ATA_EH_PARK;
2877 			ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
2878 		}
2879 	}
2880 	spin_unlock_irqrestore(ap->lock, flags);
2881 }
2882 
2883 static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
2884 {
2885 	struct ata_eh_context *ehc = &dev->link->eh_context;
2886 	struct ata_taskfile tf;
2887 	unsigned int err_mask;
2888 
2889 	ata_tf_init(dev, &tf);
2890 	if (park) {
2891 		ehc->unloaded_mask |= 1 << dev->devno;
2892 		tf.command = ATA_CMD_IDLEIMMEDIATE;
2893 		tf.feature = 0x44;
2894 		tf.lbal = 0x4c;
2895 		tf.lbam = 0x4e;
2896 		tf.lbah = 0x55;
2897 	} else {
2898 		ehc->unloaded_mask &= ~(1 << dev->devno);
2899 		tf.command = ATA_CMD_CHK_POWER;
2900 	}
2901 
2902 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
2903 	tf.protocol |= ATA_PROT_NODATA;
2904 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
2905 	if (park && (err_mask || tf.lbal != 0xc4)) {
2906 		ata_dev_printk(dev, KERN_ERR, "head unload failed!\n");
2907 		ehc->unloaded_mask &= ~(1 << dev->devno);
2908 	}
2909 }
2910 
2911 static int ata_eh_revalidate_and_attach(struct ata_link *link,
2912 					struct ata_device **r_failed_dev)
2913 {
2914 	struct ata_port *ap = link->ap;
2915 	struct ata_eh_context *ehc = &link->eh_context;
2916 	struct ata_device *dev;
2917 	unsigned int new_mask = 0;
2918 	unsigned long flags;
2919 	int rc = 0;
2920 
2921 	DPRINTK("ENTER\n");
2922 
2923 	/* For PATA drive side cable detection to work, IDENTIFY must
2924 	 * be done backwards such that PDIAG- is released by the slave
2925 	 * device before the master device is identified.
2926 	 */
2927 	ata_for_each_dev(dev, link, ALL_REVERSE) {
2928 		unsigned int action = ata_eh_dev_action(dev);
2929 		unsigned int readid_flags = 0;
2930 
2931 		if (ehc->i.flags & ATA_EHI_DID_RESET)
2932 			readid_flags |= ATA_READID_POSTRESET;
2933 
2934 		if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
2935 			WARN_ON(dev->class == ATA_DEV_PMP);
2936 
2937 			if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
2938 				rc = -EIO;
2939 				goto err;
2940 			}
2941 
2942 			ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE);
2943 			rc = ata_dev_revalidate(dev, ehc->classes[dev->devno],
2944 						readid_flags);
2945 			if (rc)
2946 				goto err;
2947 
2948 			ata_eh_done(link, dev, ATA_EH_REVALIDATE);
2949 
2950 			/* Configuration may have changed, reconfigure
2951 			 * transfer mode.
2952 			 */
2953 			ehc->i.flags |= ATA_EHI_SETMODE;
2954 
2955 			/* schedule the scsi_rescan_device() here */
2956 			schedule_work(&(ap->scsi_rescan_task));
2957 		} else if (dev->class == ATA_DEV_UNKNOWN &&
2958 			   ehc->tries[dev->devno] &&
2959 			   ata_class_enabled(ehc->classes[dev->devno])) {
2960 			/* Temporarily set dev->class, it will be
2961 			 * permanently set once all configurations are
2962 			 * complete.  This is necessary because new
2963 			 * device configuration is done in two
2964 			 * separate loops.
2965 			 */
2966 			dev->class = ehc->classes[dev->devno];
2967 
2968 			if (dev->class == ATA_DEV_PMP)
2969 				rc = sata_pmp_attach(dev);
2970 			else
2971 				rc = ata_dev_read_id(dev, &dev->class,
2972 						     readid_flags, dev->id);
2973 
2974 			/* read_id might have changed class, store and reset */
2975 			ehc->classes[dev->devno] = dev->class;
2976 			dev->class = ATA_DEV_UNKNOWN;
2977 
2978 			switch (rc) {
2979 			case 0:
2980 				/* clear error info accumulated during probe */
2981 				ata_ering_clear(&dev->ering);
2982 				new_mask |= 1 << dev->devno;
2983 				break;
2984 			case -ENOENT:
2985 				/* IDENTIFY was issued to non-existent
2986 				 * device.  No need to reset.  Just
2987 				 * thaw and ignore the device.
2988 				 */
2989 				ata_eh_thaw_port(ap);
2990 				break;
2991 			default:
2992 				goto err;
2993 			}
2994 		}
2995 	}
2996 
2997 	/* PDIAG- should have been released, ask cable type if post-reset */
2998 	if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) {
2999 		if (ap->ops->cable_detect)
3000 			ap->cbl = ap->ops->cable_detect(ap);
3001 		ata_force_cbl(ap);
3002 	}
3003 
3004 	/* Configure new devices forward such that user doesn't see
3005 	 * device detection messages backwards.
3006 	 */
3007 	ata_for_each_dev(dev, link, ALL) {
3008 		if (!(new_mask & (1 << dev->devno)))
3009 			continue;
3010 
3011 		dev->class = ehc->classes[dev->devno];
3012 
3013 		if (dev->class == ATA_DEV_PMP)
3014 			continue;
3015 
3016 		ehc->i.flags |= ATA_EHI_PRINTINFO;
3017 		rc = ata_dev_configure(dev);
3018 		ehc->i.flags &= ~ATA_EHI_PRINTINFO;
3019 		if (rc) {
3020 			dev->class = ATA_DEV_UNKNOWN;
3021 			goto err;
3022 		}
3023 
3024 		spin_lock_irqsave(ap->lock, flags);
3025 		ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
3026 		spin_unlock_irqrestore(ap->lock, flags);
3027 
3028 		/* new device discovered, configure xfermode */
3029 		ehc->i.flags |= ATA_EHI_SETMODE;
3030 	}
3031 
3032 	return 0;
3033 
3034  err:
3035 	*r_failed_dev = dev;
3036 	DPRINTK("EXIT rc=%d\n", rc);
3037 	return rc;
3038 }
3039 
3040 /**
3041  *	ata_set_mode - Program timings and issue SET FEATURES - XFER
3042  *	@link: link on which timings will be programmed
3043  *	@r_failed_dev: out parameter for failed device
3044  *
3045  *	Set ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
3046  *	ata_set_mode() fails, pointer to the failing device is
3047  *	returned in @r_failed_dev.
3048  *
3049  *	LOCKING:
3050  *	PCI/etc. bus probe sem.
3051  *
3052  *	RETURNS:
3053  *	0 on success, negative errno otherwise
3054  */
3055 int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3056 {
3057 	struct ata_port *ap = link->ap;
3058 	struct ata_device *dev;
3059 	int rc;
3060 
3061 	/* if data transfer is verified, clear DUBIOUS_XFER on ering top */
3062 	ata_for_each_dev(dev, link, ENABLED) {
3063 		if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) {
3064 			struct ata_ering_entry *ent;
3065 
3066 			ent = ata_ering_top(&dev->ering);
3067 			if (ent)
3068 				ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER;
3069 		}
3070 	}
3071 
3072 	/* has private set_mode? */
3073 	if (ap->ops->set_mode)
3074 		rc = ap->ops->set_mode(link, r_failed_dev);
3075 	else
3076 		rc = ata_do_set_mode(link, r_failed_dev);
3077 
3078 	/* if transfer mode has changed, set DUBIOUS_XFER on device */
3079 	ata_for_each_dev(dev, link, ENABLED) {
3080 		struct ata_eh_context *ehc = &link->eh_context;
3081 		u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
3082 		u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
3083 
3084 		if (dev->xfer_mode != saved_xfer_mode ||
3085 		    ata_ncq_enabled(dev) != saved_ncq)
3086 			dev->flags |= ATA_DFLAG_DUBIOUS_XFER;
3087 	}
3088 
3089 	return rc;
3090 }
3091 
3092 /**
3093  *	atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
3094  *	@dev: ATAPI device to clear UA for
3095  *
3096  *	Resets and other operations can make an ATAPI device raise
3097  *	UNIT ATTENTION which causes the next operation to fail.  This
3098  *	function clears UA.
3099  *
3100  *	LOCKING:
3101  *	EH context (may sleep).
3102  *
3103  *	RETURNS:
3104  *	0 on success, -errno on failure.
3105  */
3106 static int atapi_eh_clear_ua(struct ata_device *dev)
3107 {
3108 	int i;
3109 
3110 	for (i = 0; i < ATA_EH_UA_TRIES; i++) {
3111 		u8 *sense_buffer = dev->link->ap->sector_buf;
3112 		u8 sense_key = 0;
3113 		unsigned int err_mask;
3114 
3115 		err_mask = atapi_eh_tur(dev, &sense_key);
3116 		if (err_mask != 0 && err_mask != AC_ERR_DEV) {
3117 			ata_dev_printk(dev, KERN_WARNING, "TEST_UNIT_READY "
3118 				"failed (err_mask=0x%x)\n", err_mask);
3119 			return -EIO;
3120 		}
3121 
3122 		if (!err_mask || sense_key != UNIT_ATTENTION)
3123 			return 0;
3124 
3125 		err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key);
3126 		if (err_mask) {
3127 			ata_dev_printk(dev, KERN_WARNING, "failed to clear "
3128 				"UNIT ATTENTION (err_mask=0x%x)\n", err_mask);
3129 			return -EIO;
3130 		}
3131 	}
3132 
3133 	ata_dev_printk(dev, KERN_WARNING,
3134 		"UNIT ATTENTION persists after %d tries\n", ATA_EH_UA_TRIES);
3135 
3136 	return 0;
3137 }
3138 
3139 /**
3140  *	ata_eh_maybe_retry_flush - Retry FLUSH if necessary
3141  *	@dev: ATA device which may need FLUSH retry
3142  *
3143  *	If @dev failed FLUSH, it needs to be reported upper layer
3144  *	immediately as it means that @dev failed to remap and already
3145  *	lost at least a sector and further FLUSH retrials won't make
3146  *	any difference to the lost sector.  However, if FLUSH failed
3147  *	for other reasons, for example transmission error, FLUSH needs
3148  *	to be retried.
3149  *
3150  *	This function determines whether FLUSH failure retry is
3151  *	necessary and performs it if so.
3152  *
3153  *	RETURNS:
3154  *	0 if EH can continue, -errno if EH needs to be repeated.
3155  */
3156 static int ata_eh_maybe_retry_flush(struct ata_device *dev)
3157 {
3158 	struct ata_link *link = dev->link;
3159 	struct ata_port *ap = link->ap;
3160 	struct ata_queued_cmd *qc;
3161 	struct ata_taskfile tf;
3162 	unsigned int err_mask;
3163 	int rc = 0;
3164 
3165 	/* did flush fail for this device? */
3166 	if (!ata_tag_valid(link->active_tag))
3167 		return 0;
3168 
3169 	qc = __ata_qc_from_tag(ap, link->active_tag);
3170 	if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT &&
3171 			       qc->tf.command != ATA_CMD_FLUSH))
3172 		return 0;
3173 
3174 	/* if the device failed it, it should be reported to upper layers */
3175 	if (qc->err_mask & AC_ERR_DEV)
3176 		return 0;
3177 
3178 	/* flush failed for some other reason, give it another shot */
3179 	ata_tf_init(dev, &tf);
3180 
3181 	tf.command = qc->tf.command;
3182 	tf.flags |= ATA_TFLAG_DEVICE;
3183 	tf.protocol = ATA_PROT_NODATA;
3184 
3185 	ata_dev_printk(dev, KERN_WARNING, "retrying FLUSH 0x%x Emask 0x%x\n",
3186 		       tf.command, qc->err_mask);
3187 
3188 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
3189 	if (!err_mask) {
3190 		/*
3191 		 * FLUSH is complete but there's no way to
3192 		 * successfully complete a failed command from EH.
3193 		 * Making sure retry is allowed at least once and
3194 		 * retrying it should do the trick - whatever was in
3195 		 * the cache is already on the platter and this won't
3196 		 * cause infinite loop.
3197 		 */
3198 		qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1);
3199 	} else {
3200 		ata_dev_printk(dev, KERN_WARNING, "FLUSH failed Emask 0x%x\n",
3201 			       err_mask);
3202 		rc = -EIO;
3203 
3204 		/* if device failed it, report it to upper layers */
3205 		if (err_mask & AC_ERR_DEV) {
3206 			qc->err_mask |= AC_ERR_DEV;
3207 			qc->result_tf = tf;
3208 			if (!(ap->pflags & ATA_PFLAG_FROZEN))
3209 				rc = 0;
3210 		}
3211 	}
3212 	return rc;
3213 }
3214 
3215 /**
3216  *	ata_eh_set_lpm - configure SATA interface power management
3217  *	@link: link to configure power management
3218  *	@policy: the link power management policy
3219  *	@r_failed_dev: out parameter for failed device
3220  *
3221  *	Enable SATA Interface power management.  This will enable
3222  *	Device Interface Power Management (DIPM) for min_power
3223  * 	policy, and then call driver specific callbacks for
3224  *	enabling Host Initiated Power management.
3225  *
3226  *	LOCKING:
3227  *	EH context.
3228  *
3229  *	RETURNS:
3230  *	0 on success, -errno on failure.
3231  */
3232 static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3233 			  struct ata_device **r_failed_dev)
3234 {
3235 	struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL;
3236 	struct ata_eh_context *ehc = &link->eh_context;
3237 	struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
3238 	unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
3239 	unsigned int err_mask;
3240 	int rc;
3241 
3242 	/* if the link or host doesn't do LPM, noop */
3243 	if ((link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm))
3244 		return 0;
3245 
3246 	/*
3247 	 * DIPM is enabled only for MIN_POWER as some devices
3248 	 * misbehave when the host NACKs transition to SLUMBER.  Order
3249 	 * device and link configurations such that the host always
3250 	 * allows DIPM requests.
3251 	 */
3252 	ata_for_each_dev(dev, link, ENABLED) {
3253 		bool hipm = ata_id_has_hipm(dev->id);
3254 		bool dipm = ata_id_has_dipm(dev->id);
3255 
3256 		/* find the first enabled and LPM enabled devices */
3257 		if (!link_dev)
3258 			link_dev = dev;
3259 
3260 		if (!lpm_dev && (hipm || dipm))
3261 			lpm_dev = dev;
3262 
3263 		hints &= ~ATA_LPM_EMPTY;
3264 		if (!hipm)
3265 			hints &= ~ATA_LPM_HIPM;
3266 
3267 		/* disable DIPM before changing link config */
3268 		if (policy != ATA_LPM_MIN_POWER && dipm) {
3269 			err_mask = ata_dev_set_feature(dev,
3270 					SETFEATURES_SATA_DISABLE, SATA_DIPM);
3271 			if (err_mask && err_mask != AC_ERR_DEV) {
3272 				ata_dev_printk(dev, KERN_WARNING,
3273 					"failed to disable DIPM, Emask 0x%x\n",
3274 					err_mask);
3275 				rc = -EIO;
3276 				goto fail;
3277 			}
3278 		}
3279 	}
3280 
3281 	if (ap) {
3282 		rc = ap->ops->set_lpm(link, policy, hints);
3283 		if (!rc && ap->slave_link)
3284 			rc = ap->ops->set_lpm(ap->slave_link, policy, hints);
3285 	} else
3286 		rc = sata_pmp_set_lpm(link, policy, hints);
3287 
3288 	/*
3289 	 * Attribute link config failure to the first (LPM) enabled
3290 	 * device on the link.
3291 	 */
3292 	if (rc) {
3293 		if (rc == -EOPNOTSUPP) {
3294 			link->flags |= ATA_LFLAG_NO_LPM;
3295 			return 0;
3296 		}
3297 		dev = lpm_dev ? lpm_dev : link_dev;
3298 		goto fail;
3299 	}
3300 
3301 	/* host config updated, enable DIPM if transitioning to MIN_POWER */
3302 	ata_for_each_dev(dev, link, ENABLED) {
3303 		if (policy == ATA_LPM_MIN_POWER && ata_id_has_dipm(dev->id)) {
3304 			err_mask = ata_dev_set_feature(dev,
3305 					SETFEATURES_SATA_ENABLE, SATA_DIPM);
3306 			if (err_mask && err_mask != AC_ERR_DEV) {
3307 				ata_dev_printk(dev, KERN_WARNING,
3308 					"failed to enable DIPM, Emask 0x%x\n",
3309 					err_mask);
3310 				rc = -EIO;
3311 				goto fail;
3312 			}
3313 		}
3314 	}
3315 
3316 	link->lpm_policy = policy;
3317 	if (ap && ap->slave_link)
3318 		ap->slave_link->lpm_policy = policy;
3319 	return 0;
3320 
3321 fail:
3322 	/* if no device or only one more chance is left, disable LPM */
3323 	if (!dev || ehc->tries[dev->devno] <= 2) {
3324 		ata_link_printk(link, KERN_WARNING,
3325 				"disabling LPM on the link\n");
3326 		link->flags |= ATA_LFLAG_NO_LPM;
3327 	}
3328 	if (r_failed_dev)
3329 		*r_failed_dev = dev;
3330 	return rc;
3331 }
3332 
3333 static int ata_link_nr_enabled(struct ata_link *link)
3334 {
3335 	struct ata_device *dev;
3336 	int cnt = 0;
3337 
3338 	ata_for_each_dev(dev, link, ENABLED)
3339 		cnt++;
3340 	return cnt;
3341 }
3342 
3343 static int ata_link_nr_vacant(struct ata_link *link)
3344 {
3345 	struct ata_device *dev;
3346 	int cnt = 0;
3347 
3348 	ata_for_each_dev(dev, link, ALL)
3349 		if (dev->class == ATA_DEV_UNKNOWN)
3350 			cnt++;
3351 	return cnt;
3352 }
3353 
3354 static int ata_eh_skip_recovery(struct ata_link *link)
3355 {
3356 	struct ata_port *ap = link->ap;
3357 	struct ata_eh_context *ehc = &link->eh_context;
3358 	struct ata_device *dev;
3359 
3360 	/* skip disabled links */
3361 	if (link->flags & ATA_LFLAG_DISABLED)
3362 		return 1;
3363 
3364 	/* skip if explicitly requested */
3365 	if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
3366 		return 1;
3367 
3368 	/* thaw frozen port and recover failed devices */
3369 	if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
3370 		return 0;
3371 
3372 	/* reset at least once if reset is requested */
3373 	if ((ehc->i.action & ATA_EH_RESET) &&
3374 	    !(ehc->i.flags & ATA_EHI_DID_RESET))
3375 		return 0;
3376 
3377 	/* skip if class codes for all vacant slots are ATA_DEV_NONE */
3378 	ata_for_each_dev(dev, link, ALL) {
3379 		if (dev->class == ATA_DEV_UNKNOWN &&
3380 		    ehc->classes[dev->devno] != ATA_DEV_NONE)
3381 			return 0;
3382 	}
3383 
3384 	return 1;
3385 }
3386 
3387 static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg)
3388 {
3389 	u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL);
3390 	u64 now = get_jiffies_64();
3391 	int *trials = void_arg;
3392 
3393 	if (ent->timestamp < now - min(now, interval))
3394 		return -1;
3395 
3396 	(*trials)++;
3397 	return 0;
3398 }
3399 
3400 static int ata_eh_schedule_probe(struct ata_device *dev)
3401 {
3402 	struct ata_eh_context *ehc = &dev->link->eh_context;
3403 	struct ata_link *link = ata_dev_phys_link(dev);
3404 	int trials = 0;
3405 
3406 	if (!(ehc->i.probe_mask & (1 << dev->devno)) ||
3407 	    (ehc->did_probe_mask & (1 << dev->devno)))
3408 		return 0;
3409 
3410 	ata_eh_detach_dev(dev);
3411 	ata_dev_init(dev);
3412 	ehc->did_probe_mask |= (1 << dev->devno);
3413 	ehc->i.action |= ATA_EH_RESET;
3414 	ehc->saved_xfer_mode[dev->devno] = 0;
3415 	ehc->saved_ncq_enabled &= ~(1 << dev->devno);
3416 
3417 	/* the link maybe in a deep sleep, wake it up */
3418 	if (link->lpm_policy > ATA_LPM_MAX_POWER) {
3419 		if (ata_is_host_link(link))
3420 			link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER,
3421 					       ATA_LPM_EMPTY);
3422 		else
3423 			sata_pmp_set_lpm(link, ATA_LPM_MAX_POWER,
3424 					 ATA_LPM_EMPTY);
3425 	}
3426 
3427 	/* Record and count probe trials on the ering.  The specific
3428 	 * error mask used is irrelevant.  Because a successful device
3429 	 * detection clears the ering, this count accumulates only if
3430 	 * there are consecutive failed probes.
3431 	 *
3432 	 * If the count is equal to or higher than ATA_EH_PROBE_TRIALS
3433 	 * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is
3434 	 * forced to 1.5Gbps.
3435 	 *
3436 	 * This is to work around cases where failed link speed
3437 	 * negotiation results in device misdetection leading to
3438 	 * infinite DEVXCHG or PHRDY CHG events.
3439 	 */
3440 	ata_ering_record(&dev->ering, 0, AC_ERR_OTHER);
3441 	ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials);
3442 
3443 	if (trials > ATA_EH_PROBE_TRIALS)
3444 		sata_down_spd_limit(link, 1);
3445 
3446 	return 1;
3447 }
3448 
3449 static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
3450 {
3451 	struct ata_eh_context *ehc = &dev->link->eh_context;
3452 
3453 	/* -EAGAIN from EH routine indicates retry without prejudice.
3454 	 * The requester is responsible for ensuring forward progress.
3455 	 */
3456 	if (err != -EAGAIN)
3457 		ehc->tries[dev->devno]--;
3458 
3459 	switch (err) {
3460 	case -ENODEV:
3461 		/* device missing or wrong IDENTIFY data, schedule probing */
3462 		ehc->i.probe_mask |= (1 << dev->devno);
3463 	case -EINVAL:
3464 		/* give it just one more chance */
3465 		ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
3466 	case -EIO:
3467 		if (ehc->tries[dev->devno] == 1) {
3468 			/* This is the last chance, better to slow
3469 			 * down than lose it.
3470 			 */
3471 			sata_down_spd_limit(ata_dev_phys_link(dev), 0);
3472 			if (dev->pio_mode > XFER_PIO_0)
3473 				ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
3474 		}
3475 	}
3476 
3477 	if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
3478 		/* disable device if it has used up all its chances */
3479 		ata_dev_disable(dev);
3480 
3481 		/* detach if offline */
3482 		if (ata_phys_link_offline(ata_dev_phys_link(dev)))
3483 			ata_eh_detach_dev(dev);
3484 
3485 		/* schedule probe if necessary */
3486 		if (ata_eh_schedule_probe(dev)) {
3487 			ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3488 			memset(ehc->cmd_timeout_idx[dev->devno], 0,
3489 			       sizeof(ehc->cmd_timeout_idx[dev->devno]));
3490 		}
3491 
3492 		return 1;
3493 	} else {
3494 		ehc->i.action |= ATA_EH_RESET;
3495 		return 0;
3496 	}
3497 }
3498 
3499 /**
3500  *	ata_eh_recover - recover host port after error
3501  *	@ap: host port to recover
3502  *	@prereset: prereset method (can be NULL)
3503  *	@softreset: softreset method (can be NULL)
3504  *	@hardreset: hardreset method (can be NULL)
3505  *	@postreset: postreset method (can be NULL)
3506  *	@r_failed_link: out parameter for failed link
3507  *
3508  *	This is the alpha and omega, eum and yang, heart and soul of
3509  *	libata exception handling.  On entry, actions required to
3510  *	recover each link and hotplug requests are recorded in the
3511  *	link's eh_context.  This function executes all the operations
3512  *	with appropriate retrials and fallbacks to resurrect failed
3513  *	devices, detach goners and greet newcomers.
3514  *
3515  *	LOCKING:
3516  *	Kernel thread context (may sleep).
3517  *
3518  *	RETURNS:
3519  *	0 on success, -errno on failure.
3520  */
3521 int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3522 		   ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3523 		   ata_postreset_fn_t postreset,
3524 		   struct ata_link **r_failed_link)
3525 {
3526 	struct ata_link *link;
3527 	struct ata_device *dev;
3528 	int rc, nr_fails;
3529 	unsigned long flags, deadline;
3530 
3531 	DPRINTK("ENTER\n");
3532 
3533 	/* prep for recovery */
3534 	ata_for_each_link(link, ap, EDGE) {
3535 		struct ata_eh_context *ehc = &link->eh_context;
3536 
3537 		/* re-enable link? */
3538 		if (ehc->i.action & ATA_EH_ENABLE_LINK) {
3539 			ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK);
3540 			spin_lock_irqsave(ap->lock, flags);
3541 			link->flags &= ~ATA_LFLAG_DISABLED;
3542 			spin_unlock_irqrestore(ap->lock, flags);
3543 			ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK);
3544 		}
3545 
3546 		ata_for_each_dev(dev, link, ALL) {
3547 			if (link->flags & ATA_LFLAG_NO_RETRY)
3548 				ehc->tries[dev->devno] = 1;
3549 			else
3550 				ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3551 
3552 			/* collect port action mask recorded in dev actions */
3553 			ehc->i.action |= ehc->i.dev_action[dev->devno] &
3554 					 ~ATA_EH_PERDEV_MASK;
3555 			ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK;
3556 
3557 			/* process hotplug request */
3558 			if (dev->flags & ATA_DFLAG_DETACH)
3559 				ata_eh_detach_dev(dev);
3560 
3561 			/* schedule probe if necessary */
3562 			if (!ata_dev_enabled(dev))
3563 				ata_eh_schedule_probe(dev);
3564 		}
3565 	}
3566 
3567  retry:
3568 	rc = 0;
3569 
3570 	/* if UNLOADING, finish immediately */
3571 	if (ap->pflags & ATA_PFLAG_UNLOADING)
3572 		goto out;
3573 
3574 	/* prep for EH */
3575 	ata_for_each_link(link, ap, EDGE) {
3576 		struct ata_eh_context *ehc = &link->eh_context;
3577 
3578 		/* skip EH if possible. */
3579 		if (ata_eh_skip_recovery(link))
3580 			ehc->i.action = 0;
3581 
3582 		ata_for_each_dev(dev, link, ALL)
3583 			ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
3584 	}
3585 
3586 	/* reset */
3587 	ata_for_each_link(link, ap, EDGE) {
3588 		struct ata_eh_context *ehc = &link->eh_context;
3589 
3590 		if (!(ehc->i.action & ATA_EH_RESET))
3591 			continue;
3592 
3593 		rc = ata_eh_reset(link, ata_link_nr_vacant(link),
3594 				  prereset, softreset, hardreset, postreset);
3595 		if (rc) {
3596 			ata_link_printk(link, KERN_ERR,
3597 					"reset failed, giving up\n");
3598 			goto out;
3599 		}
3600 	}
3601 
3602 	do {
3603 		unsigned long now;
3604 
3605 		/*
3606 		 * clears ATA_EH_PARK in eh_info and resets
3607 		 * ap->park_req_pending
3608 		 */
3609 		ata_eh_pull_park_action(ap);
3610 
3611 		deadline = jiffies;
3612 		ata_for_each_link(link, ap, EDGE) {
3613 			ata_for_each_dev(dev, link, ALL) {
3614 				struct ata_eh_context *ehc = &link->eh_context;
3615 				unsigned long tmp;
3616 
3617 				if (dev->class != ATA_DEV_ATA)
3618 					continue;
3619 				if (!(ehc->i.dev_action[dev->devno] &
3620 				      ATA_EH_PARK))
3621 					continue;
3622 				tmp = dev->unpark_deadline;
3623 				if (time_before(deadline, tmp))
3624 					deadline = tmp;
3625 				else if (time_before_eq(tmp, jiffies))
3626 					continue;
3627 				if (ehc->unloaded_mask & (1 << dev->devno))
3628 					continue;
3629 
3630 				ata_eh_park_issue_cmd(dev, 1);
3631 			}
3632 		}
3633 
3634 		now = jiffies;
3635 		if (time_before_eq(deadline, now))
3636 			break;
3637 
3638 		deadline = wait_for_completion_timeout(&ap->park_req_pending,
3639 						       deadline - now);
3640 	} while (deadline);
3641 	ata_for_each_link(link, ap, EDGE) {
3642 		ata_for_each_dev(dev, link, ALL) {
3643 			if (!(link->eh_context.unloaded_mask &
3644 			      (1 << dev->devno)))
3645 				continue;
3646 
3647 			ata_eh_park_issue_cmd(dev, 0);
3648 			ata_eh_done(link, dev, ATA_EH_PARK);
3649 		}
3650 	}
3651 
3652 	/* the rest */
3653 	nr_fails = 0;
3654 	ata_for_each_link(link, ap, PMP_FIRST) {
3655 		struct ata_eh_context *ehc = &link->eh_context;
3656 
3657 		if (sata_pmp_attached(ap) && ata_is_host_link(link))
3658 			goto config_lpm;
3659 
3660 		/* revalidate existing devices and attach new ones */
3661 		rc = ata_eh_revalidate_and_attach(link, &dev);
3662 		if (rc)
3663 			goto rest_fail;
3664 
3665 		/* if PMP got attached, return, pmp EH will take care of it */
3666 		if (link->device->class == ATA_DEV_PMP) {
3667 			ehc->i.action = 0;
3668 			return 0;
3669 		}
3670 
3671 		/* configure transfer mode if necessary */
3672 		if (ehc->i.flags & ATA_EHI_SETMODE) {
3673 			rc = ata_set_mode(link, &dev);
3674 			if (rc)
3675 				goto rest_fail;
3676 			ehc->i.flags &= ~ATA_EHI_SETMODE;
3677 		}
3678 
3679 		/* If reset has been issued, clear UA to avoid
3680 		 * disrupting the current users of the device.
3681 		 */
3682 		if (ehc->i.flags & ATA_EHI_DID_RESET) {
3683 			ata_for_each_dev(dev, link, ALL) {
3684 				if (dev->class != ATA_DEV_ATAPI)
3685 					continue;
3686 				rc = atapi_eh_clear_ua(dev);
3687 				if (rc)
3688 					goto rest_fail;
3689 			}
3690 		}
3691 
3692 		/* retry flush if necessary */
3693 		ata_for_each_dev(dev, link, ALL) {
3694 			if (dev->class != ATA_DEV_ATA)
3695 				continue;
3696 			rc = ata_eh_maybe_retry_flush(dev);
3697 			if (rc)
3698 				goto rest_fail;
3699 		}
3700 
3701 	config_lpm:
3702 		/* configure link power saving */
3703 		if (link->lpm_policy != ap->target_lpm_policy) {
3704 			rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev);
3705 			if (rc)
3706 				goto rest_fail;
3707 		}
3708 
3709 		/* this link is okay now */
3710 		ehc->i.flags = 0;
3711 		continue;
3712 
3713 	rest_fail:
3714 		nr_fails++;
3715 		if (dev)
3716 			ata_eh_handle_dev_fail(dev, rc);
3717 
3718 		if (ap->pflags & ATA_PFLAG_FROZEN) {
3719 			/* PMP reset requires working host port.
3720 			 * Can't retry if it's frozen.
3721 			 */
3722 			if (sata_pmp_attached(ap))
3723 				goto out;
3724 			break;
3725 		}
3726 	}
3727 
3728 	if (nr_fails)
3729 		goto retry;
3730 
3731  out:
3732 	if (rc && r_failed_link)
3733 		*r_failed_link = link;
3734 
3735 	DPRINTK("EXIT, rc=%d\n", rc);
3736 	return rc;
3737 }
3738 
3739 /**
3740  *	ata_eh_finish - finish up EH
3741  *	@ap: host port to finish EH for
3742  *
3743  *	Recovery is complete.  Clean up EH states and retry or finish
3744  *	failed qcs.
3745  *
3746  *	LOCKING:
3747  *	None.
3748  */
3749 void ata_eh_finish(struct ata_port *ap)
3750 {
3751 	int tag;
3752 
3753 	/* retry or finish qcs */
3754 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
3755 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
3756 
3757 		if (!(qc->flags & ATA_QCFLAG_FAILED))
3758 			continue;
3759 
3760 		if (qc->err_mask) {
3761 			/* FIXME: Once EH migration is complete,
3762 			 * generate sense data in this function,
3763 			 * considering both err_mask and tf.
3764 			 */
3765 			if (qc->flags & ATA_QCFLAG_RETRY)
3766 				ata_eh_qc_retry(qc);
3767 			else
3768 				ata_eh_qc_complete(qc);
3769 		} else {
3770 			if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
3771 				ata_eh_qc_complete(qc);
3772 			} else {
3773 				/* feed zero TF to sense generation */
3774 				memset(&qc->result_tf, 0, sizeof(qc->result_tf));
3775 				ata_eh_qc_retry(qc);
3776 			}
3777 		}
3778 	}
3779 
3780 	/* make sure nr_active_links is zero after EH */
3781 	WARN_ON(ap->nr_active_links);
3782 	ap->nr_active_links = 0;
3783 }
3784 
3785 /**
3786  *	ata_do_eh - do standard error handling
3787  *	@ap: host port to handle error for
3788  *
3789  *	@prereset: prereset method (can be NULL)
3790  *	@softreset: softreset method (can be NULL)
3791  *	@hardreset: hardreset method (can be NULL)
3792  *	@postreset: postreset method (can be NULL)
3793  *
3794  *	Perform standard error handling sequence.
3795  *
3796  *	LOCKING:
3797  *	Kernel thread context (may sleep).
3798  */
3799 void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
3800 	       ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3801 	       ata_postreset_fn_t postreset)
3802 {
3803 	struct ata_device *dev;
3804 	int rc;
3805 
3806 	ata_eh_autopsy(ap);
3807 	ata_eh_report(ap);
3808 
3809 	rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
3810 			    NULL);
3811 	if (rc) {
3812 		ata_for_each_dev(dev, &ap->link, ALL)
3813 			ata_dev_disable(dev);
3814 	}
3815 
3816 	ata_eh_finish(ap);
3817 }
3818 
3819 /**
3820  *	ata_std_error_handler - standard error handler
3821  *	@ap: host port to handle error for
3822  *
3823  *	Standard error handler
3824  *
3825  *	LOCKING:
3826  *	Kernel thread context (may sleep).
3827  */
3828 void ata_std_error_handler(struct ata_port *ap)
3829 {
3830 	struct ata_port_operations *ops = ap->ops;
3831 	ata_reset_fn_t hardreset = ops->hardreset;
3832 
3833 	/* ignore built-in hardreset if SCR access is not available */
3834 	if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link))
3835 		hardreset = NULL;
3836 
3837 	ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
3838 }
3839 
3840 #ifdef CONFIG_PM
3841 /**
3842  *	ata_eh_handle_port_suspend - perform port suspend operation
3843  *	@ap: port to suspend
3844  *
3845  *	Suspend @ap.
3846  *
3847  *	LOCKING:
3848  *	Kernel thread context (may sleep).
3849  */
3850 static void ata_eh_handle_port_suspend(struct ata_port *ap)
3851 {
3852 	unsigned long flags;
3853 	int rc = 0;
3854 
3855 	/* are we suspending? */
3856 	spin_lock_irqsave(ap->lock, flags);
3857 	if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
3858 	    ap->pm_mesg.event == PM_EVENT_ON) {
3859 		spin_unlock_irqrestore(ap->lock, flags);
3860 		return;
3861 	}
3862 	spin_unlock_irqrestore(ap->lock, flags);
3863 
3864 	WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
3865 
3866 	/* tell ACPI we're suspending */
3867 	rc = ata_acpi_on_suspend(ap);
3868 	if (rc)
3869 		goto out;
3870 
3871 	/* suspend */
3872 	ata_eh_freeze_port(ap);
3873 
3874 	if (ap->ops->port_suspend)
3875 		rc = ap->ops->port_suspend(ap, ap->pm_mesg);
3876 
3877 	ata_acpi_set_state(ap, PMSG_SUSPEND);
3878  out:
3879 	/* report result */
3880 	spin_lock_irqsave(ap->lock, flags);
3881 
3882 	ap->pflags &= ~ATA_PFLAG_PM_PENDING;
3883 	if (rc == 0)
3884 		ap->pflags |= ATA_PFLAG_SUSPENDED;
3885 	else if (ap->pflags & ATA_PFLAG_FROZEN)
3886 		ata_port_schedule_eh(ap);
3887 
3888 	if (ap->pm_result) {
3889 		*ap->pm_result = rc;
3890 		ap->pm_result = NULL;
3891 	}
3892 
3893 	spin_unlock_irqrestore(ap->lock, flags);
3894 
3895 	return;
3896 }
3897 
3898 /**
3899  *	ata_eh_handle_port_resume - perform port resume operation
3900  *	@ap: port to resume
3901  *
3902  *	Resume @ap.
3903  *
3904  *	LOCKING:
3905  *	Kernel thread context (may sleep).
3906  */
3907 static void ata_eh_handle_port_resume(struct ata_port *ap)
3908 {
3909 	struct ata_link *link;
3910 	struct ata_device *dev;
3911 	unsigned long flags;
3912 	int rc = 0;
3913 
3914 	/* are we resuming? */
3915 	spin_lock_irqsave(ap->lock, flags);
3916 	if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
3917 	    ap->pm_mesg.event != PM_EVENT_ON) {
3918 		spin_unlock_irqrestore(ap->lock, flags);
3919 		return;
3920 	}
3921 	spin_unlock_irqrestore(ap->lock, flags);
3922 
3923 	WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED));
3924 
3925 	/*
3926 	 * Error timestamps are in jiffies which doesn't run while
3927 	 * suspended and PHY events during resume isn't too uncommon.
3928 	 * When the two are combined, it can lead to unnecessary speed
3929 	 * downs if the machine is suspended and resumed repeatedly.
3930 	 * Clear error history.
3931 	 */
3932 	ata_for_each_link(link, ap, HOST_FIRST)
3933 		ata_for_each_dev(dev, link, ALL)
3934 			ata_ering_clear(&dev->ering);
3935 
3936 	ata_acpi_set_state(ap, PMSG_ON);
3937 
3938 	if (ap->ops->port_resume)
3939 		rc = ap->ops->port_resume(ap);
3940 
3941 	/* tell ACPI that we're resuming */
3942 	ata_acpi_on_resume(ap);
3943 
3944 	/* report result */
3945 	spin_lock_irqsave(ap->lock, flags);
3946 	ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
3947 	if (ap->pm_result) {
3948 		*ap->pm_result = rc;
3949 		ap->pm_result = NULL;
3950 	}
3951 	spin_unlock_irqrestore(ap->lock, flags);
3952 }
3953 #endif /* CONFIG_PM */
3954