xref: /openbmc/linux/drivers/ata/libata-eh.c (revision c6fd280766a050b13360d7c2d59a3d6bd3a27d9a)
1*c6fd2807SJeff Garzik /*
2*c6fd2807SJeff Garzik  *  libata-eh.c - libata error handling
3*c6fd2807SJeff Garzik  *
4*c6fd2807SJeff Garzik  *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
5*c6fd2807SJeff Garzik  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6*c6fd2807SJeff Garzik  *		    on emails.
7*c6fd2807SJeff Garzik  *
8*c6fd2807SJeff Garzik  *  Copyright 2006 Tejun Heo <htejun@gmail.com>
9*c6fd2807SJeff Garzik  *
10*c6fd2807SJeff Garzik  *
11*c6fd2807SJeff Garzik  *  This program is free software; you can redistribute it and/or
12*c6fd2807SJeff Garzik  *  modify it under the terms of the GNU General Public License as
13*c6fd2807SJeff Garzik  *  published by the Free Software Foundation; either version 2, or
14*c6fd2807SJeff Garzik  *  (at your option) any later version.
15*c6fd2807SJeff Garzik  *
16*c6fd2807SJeff Garzik  *  This program is distributed in the hope that it will be useful,
17*c6fd2807SJeff Garzik  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
18*c6fd2807SJeff Garzik  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19*c6fd2807SJeff Garzik  *  General Public License for more details.
20*c6fd2807SJeff Garzik  *
21*c6fd2807SJeff Garzik  *  You should have received a copy of the GNU General Public License
22*c6fd2807SJeff Garzik  *  along with this program; see the file COPYING.  If not, write to
23*c6fd2807SJeff Garzik  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
24*c6fd2807SJeff Garzik  *  USA.
25*c6fd2807SJeff Garzik  *
26*c6fd2807SJeff Garzik  *
27*c6fd2807SJeff Garzik  *  libata documentation is available via 'make {ps|pdf}docs',
28*c6fd2807SJeff Garzik  *  as Documentation/DocBook/libata.*
29*c6fd2807SJeff Garzik  *
30*c6fd2807SJeff Garzik  *  Hardware documentation available from http://www.t13.org/ and
31*c6fd2807SJeff Garzik  *  http://www.sata-io.org/
32*c6fd2807SJeff Garzik  *
33*c6fd2807SJeff Garzik  */
34*c6fd2807SJeff Garzik 
35*c6fd2807SJeff Garzik #include <linux/config.h>
36*c6fd2807SJeff Garzik #include <linux/kernel.h>
37*c6fd2807SJeff Garzik #include <scsi/scsi.h>
38*c6fd2807SJeff Garzik #include <scsi/scsi_host.h>
39*c6fd2807SJeff Garzik #include <scsi/scsi_eh.h>
40*c6fd2807SJeff Garzik #include <scsi/scsi_device.h>
41*c6fd2807SJeff Garzik #include <scsi/scsi_cmnd.h>
42*c6fd2807SJeff Garzik #include "../scsi/scsi_transport_api.h"
43*c6fd2807SJeff Garzik 
44*c6fd2807SJeff Garzik #include <linux/libata.h>
45*c6fd2807SJeff Garzik 
46*c6fd2807SJeff Garzik #include "libata.h"
47*c6fd2807SJeff Garzik 
48*c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap);
49*c6fd2807SJeff Garzik static void ata_eh_finish(struct ata_port *ap);
50*c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap);
51*c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap);
52*c6fd2807SJeff Garzik 
53*c6fd2807SJeff Garzik static void ata_ering_record(struct ata_ering *ering, int is_io,
54*c6fd2807SJeff Garzik 			     unsigned int err_mask)
55*c6fd2807SJeff Garzik {
56*c6fd2807SJeff Garzik 	struct ata_ering_entry *ent;
57*c6fd2807SJeff Garzik 
58*c6fd2807SJeff Garzik 	WARN_ON(!err_mask);
59*c6fd2807SJeff Garzik 
60*c6fd2807SJeff Garzik 	ering->cursor++;
61*c6fd2807SJeff Garzik 	ering->cursor %= ATA_ERING_SIZE;
62*c6fd2807SJeff Garzik 
63*c6fd2807SJeff Garzik 	ent = &ering->ring[ering->cursor];
64*c6fd2807SJeff Garzik 	ent->is_io = is_io;
65*c6fd2807SJeff Garzik 	ent->err_mask = err_mask;
66*c6fd2807SJeff Garzik 	ent->timestamp = get_jiffies_64();
67*c6fd2807SJeff Garzik }
68*c6fd2807SJeff Garzik 
69*c6fd2807SJeff Garzik static struct ata_ering_entry * ata_ering_top(struct ata_ering *ering)
70*c6fd2807SJeff Garzik {
71*c6fd2807SJeff Garzik 	struct ata_ering_entry *ent = &ering->ring[ering->cursor];
72*c6fd2807SJeff Garzik 	if (!ent->err_mask)
73*c6fd2807SJeff Garzik 		return NULL;
74*c6fd2807SJeff Garzik 	return ent;
75*c6fd2807SJeff Garzik }
76*c6fd2807SJeff Garzik 
77*c6fd2807SJeff Garzik static int ata_ering_map(struct ata_ering *ering,
78*c6fd2807SJeff Garzik 			 int (*map_fn)(struct ata_ering_entry *, void *),
79*c6fd2807SJeff Garzik 			 void *arg)
80*c6fd2807SJeff Garzik {
81*c6fd2807SJeff Garzik 	int idx, rc = 0;
82*c6fd2807SJeff Garzik 	struct ata_ering_entry *ent;
83*c6fd2807SJeff Garzik 
84*c6fd2807SJeff Garzik 	idx = ering->cursor;
85*c6fd2807SJeff Garzik 	do {
86*c6fd2807SJeff Garzik 		ent = &ering->ring[idx];
87*c6fd2807SJeff Garzik 		if (!ent->err_mask)
88*c6fd2807SJeff Garzik 			break;
89*c6fd2807SJeff Garzik 		rc = map_fn(ent, arg);
90*c6fd2807SJeff Garzik 		if (rc)
91*c6fd2807SJeff Garzik 			break;
92*c6fd2807SJeff Garzik 		idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
93*c6fd2807SJeff Garzik 	} while (idx != ering->cursor);
94*c6fd2807SJeff Garzik 
95*c6fd2807SJeff Garzik 	return rc;
96*c6fd2807SJeff Garzik }
97*c6fd2807SJeff Garzik 
98*c6fd2807SJeff Garzik static unsigned int ata_eh_dev_action(struct ata_device *dev)
99*c6fd2807SJeff Garzik {
100*c6fd2807SJeff Garzik 	struct ata_eh_context *ehc = &dev->ap->eh_context;
101*c6fd2807SJeff Garzik 
102*c6fd2807SJeff Garzik 	return ehc->i.action | ehc->i.dev_action[dev->devno];
103*c6fd2807SJeff Garzik }
104*c6fd2807SJeff Garzik 
105*c6fd2807SJeff Garzik static void ata_eh_clear_action(struct ata_device *dev,
106*c6fd2807SJeff Garzik 				struct ata_eh_info *ehi, unsigned int action)
107*c6fd2807SJeff Garzik {
108*c6fd2807SJeff Garzik 	int i;
109*c6fd2807SJeff Garzik 
110*c6fd2807SJeff Garzik 	if (!dev) {
111*c6fd2807SJeff Garzik 		ehi->action &= ~action;
112*c6fd2807SJeff Garzik 		for (i = 0; i < ATA_MAX_DEVICES; i++)
113*c6fd2807SJeff Garzik 			ehi->dev_action[i] &= ~action;
114*c6fd2807SJeff Garzik 	} else {
115*c6fd2807SJeff Garzik 		/* doesn't make sense for port-wide EH actions */
116*c6fd2807SJeff Garzik 		WARN_ON(!(action & ATA_EH_PERDEV_MASK));
117*c6fd2807SJeff Garzik 
118*c6fd2807SJeff Garzik 		/* break ehi->action into ehi->dev_action */
119*c6fd2807SJeff Garzik 		if (ehi->action & action) {
120*c6fd2807SJeff Garzik 			for (i = 0; i < ATA_MAX_DEVICES; i++)
121*c6fd2807SJeff Garzik 				ehi->dev_action[i] |= ehi->action & action;
122*c6fd2807SJeff Garzik 			ehi->action &= ~action;
123*c6fd2807SJeff Garzik 		}
124*c6fd2807SJeff Garzik 
125*c6fd2807SJeff Garzik 		/* turn off the specified per-dev action */
126*c6fd2807SJeff Garzik 		ehi->dev_action[dev->devno] &= ~action;
127*c6fd2807SJeff Garzik 	}
128*c6fd2807SJeff Garzik }
129*c6fd2807SJeff Garzik 
130*c6fd2807SJeff Garzik /**
131*c6fd2807SJeff Garzik  *	ata_scsi_timed_out - SCSI layer time out callback
132*c6fd2807SJeff Garzik  *	@cmd: timed out SCSI command
133*c6fd2807SJeff Garzik  *
134*c6fd2807SJeff Garzik  *	Handles SCSI layer timeout.  We race with normal completion of
135*c6fd2807SJeff Garzik  *	the qc for @cmd.  If the qc is already gone, we lose and let
136*c6fd2807SJeff Garzik  *	the scsi command finish (EH_HANDLED).  Otherwise, the qc has
137*c6fd2807SJeff Garzik  *	timed out and EH should be invoked.  Prevent ata_qc_complete()
138*c6fd2807SJeff Garzik  *	from finishing it by setting EH_SCHEDULED and return
139*c6fd2807SJeff Garzik  *	EH_NOT_HANDLED.
140*c6fd2807SJeff Garzik  *
141*c6fd2807SJeff Garzik  *	TODO: kill this function once old EH is gone.
142*c6fd2807SJeff Garzik  *
143*c6fd2807SJeff Garzik  *	LOCKING:
144*c6fd2807SJeff Garzik  *	Called from timer context
145*c6fd2807SJeff Garzik  *
146*c6fd2807SJeff Garzik  *	RETURNS:
147*c6fd2807SJeff Garzik  *	EH_HANDLED or EH_NOT_HANDLED
148*c6fd2807SJeff Garzik  */
149*c6fd2807SJeff Garzik enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
150*c6fd2807SJeff Garzik {
151*c6fd2807SJeff Garzik 	struct Scsi_Host *host = cmd->device->host;
152*c6fd2807SJeff Garzik 	struct ata_port *ap = ata_shost_to_port(host);
153*c6fd2807SJeff Garzik 	unsigned long flags;
154*c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc;
155*c6fd2807SJeff Garzik 	enum scsi_eh_timer_return ret;
156*c6fd2807SJeff Garzik 
157*c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
158*c6fd2807SJeff Garzik 
159*c6fd2807SJeff Garzik 	if (ap->ops->error_handler) {
160*c6fd2807SJeff Garzik 		ret = EH_NOT_HANDLED;
161*c6fd2807SJeff Garzik 		goto out;
162*c6fd2807SJeff Garzik 	}
163*c6fd2807SJeff Garzik 
164*c6fd2807SJeff Garzik 	ret = EH_HANDLED;
165*c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
166*c6fd2807SJeff Garzik 	qc = ata_qc_from_tag(ap, ap->active_tag);
167*c6fd2807SJeff Garzik 	if (qc) {
168*c6fd2807SJeff Garzik 		WARN_ON(qc->scsicmd != cmd);
169*c6fd2807SJeff Garzik 		qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
170*c6fd2807SJeff Garzik 		qc->err_mask |= AC_ERR_TIMEOUT;
171*c6fd2807SJeff Garzik 		ret = EH_NOT_HANDLED;
172*c6fd2807SJeff Garzik 	}
173*c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
174*c6fd2807SJeff Garzik 
175*c6fd2807SJeff Garzik  out:
176*c6fd2807SJeff Garzik 	DPRINTK("EXIT, ret=%d\n", ret);
177*c6fd2807SJeff Garzik 	return ret;
178*c6fd2807SJeff Garzik }
179*c6fd2807SJeff Garzik 
180*c6fd2807SJeff Garzik /**
181*c6fd2807SJeff Garzik  *	ata_scsi_error - SCSI layer error handler callback
182*c6fd2807SJeff Garzik  *	@host: SCSI host on which error occurred
183*c6fd2807SJeff Garzik  *
184*c6fd2807SJeff Garzik  *	Handles SCSI-layer-thrown error events.
185*c6fd2807SJeff Garzik  *
186*c6fd2807SJeff Garzik  *	LOCKING:
187*c6fd2807SJeff Garzik  *	Inherited from SCSI layer (none, can sleep)
188*c6fd2807SJeff Garzik  *
189*c6fd2807SJeff Garzik  *	RETURNS:
190*c6fd2807SJeff Garzik  *	Zero.
191*c6fd2807SJeff Garzik  */
192*c6fd2807SJeff Garzik void ata_scsi_error(struct Scsi_Host *host)
193*c6fd2807SJeff Garzik {
194*c6fd2807SJeff Garzik 	struct ata_port *ap = ata_shost_to_port(host);
195*c6fd2807SJeff Garzik 	int i, repeat_cnt = ATA_EH_MAX_REPEAT;
196*c6fd2807SJeff Garzik 	unsigned long flags;
197*c6fd2807SJeff Garzik 
198*c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
199*c6fd2807SJeff Garzik 
200*c6fd2807SJeff Garzik 	/* synchronize with port task */
201*c6fd2807SJeff Garzik 	ata_port_flush_task(ap);
202*c6fd2807SJeff Garzik 
203*c6fd2807SJeff Garzik 	/* synchronize with host_set lock and sort out timeouts */
204*c6fd2807SJeff Garzik 
205*c6fd2807SJeff Garzik 	/* For new EH, all qcs are finished in one of three ways -
206*c6fd2807SJeff Garzik 	 * normal completion, error completion, and SCSI timeout.
207*c6fd2807SJeff Garzik 	 * Both cmpletions can race against SCSI timeout.  When normal
208*c6fd2807SJeff Garzik 	 * completion wins, the qc never reaches EH.  When error
209*c6fd2807SJeff Garzik 	 * completion wins, the qc has ATA_QCFLAG_FAILED set.
210*c6fd2807SJeff Garzik 	 *
211*c6fd2807SJeff Garzik 	 * When SCSI timeout wins, things are a bit more complex.
212*c6fd2807SJeff Garzik 	 * Normal or error completion can occur after the timeout but
213*c6fd2807SJeff Garzik 	 * before this point.  In such cases, both types of
214*c6fd2807SJeff Garzik 	 * completions are honored.  A scmd is determined to have
215*c6fd2807SJeff Garzik 	 * timed out iff its associated qc is active and not failed.
216*c6fd2807SJeff Garzik 	 */
217*c6fd2807SJeff Garzik 	if (ap->ops->error_handler) {
218*c6fd2807SJeff Garzik 		struct scsi_cmnd *scmd, *tmp;
219*c6fd2807SJeff Garzik 		int nr_timedout = 0;
220*c6fd2807SJeff Garzik 
221*c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
222*c6fd2807SJeff Garzik 
223*c6fd2807SJeff Garzik 		list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
224*c6fd2807SJeff Garzik 			struct ata_queued_cmd *qc;
225*c6fd2807SJeff Garzik 
226*c6fd2807SJeff Garzik 			for (i = 0; i < ATA_MAX_QUEUE; i++) {
227*c6fd2807SJeff Garzik 				qc = __ata_qc_from_tag(ap, i);
228*c6fd2807SJeff Garzik 				if (qc->flags & ATA_QCFLAG_ACTIVE &&
229*c6fd2807SJeff Garzik 				    qc->scsicmd == scmd)
230*c6fd2807SJeff Garzik 					break;
231*c6fd2807SJeff Garzik 			}
232*c6fd2807SJeff Garzik 
233*c6fd2807SJeff Garzik 			if (i < ATA_MAX_QUEUE) {
234*c6fd2807SJeff Garzik 				/* the scmd has an associated qc */
235*c6fd2807SJeff Garzik 				if (!(qc->flags & ATA_QCFLAG_FAILED)) {
236*c6fd2807SJeff Garzik 					/* which hasn't failed yet, timeout */
237*c6fd2807SJeff Garzik 					qc->err_mask |= AC_ERR_TIMEOUT;
238*c6fd2807SJeff Garzik 					qc->flags |= ATA_QCFLAG_FAILED;
239*c6fd2807SJeff Garzik 					nr_timedout++;
240*c6fd2807SJeff Garzik 				}
241*c6fd2807SJeff Garzik 			} else {
242*c6fd2807SJeff Garzik 				/* Normal completion occurred after
243*c6fd2807SJeff Garzik 				 * SCSI timeout but before this point.
244*c6fd2807SJeff Garzik 				 * Successfully complete it.
245*c6fd2807SJeff Garzik 				 */
246*c6fd2807SJeff Garzik 				scmd->retries = scmd->allowed;
247*c6fd2807SJeff Garzik 				scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
248*c6fd2807SJeff Garzik 			}
249*c6fd2807SJeff Garzik 		}
250*c6fd2807SJeff Garzik 
251*c6fd2807SJeff Garzik 		/* If we have timed out qcs.  They belong to EH from
252*c6fd2807SJeff Garzik 		 * this point but the state of the controller is
253*c6fd2807SJeff Garzik 		 * unknown.  Freeze the port to make sure the IRQ
254*c6fd2807SJeff Garzik 		 * handler doesn't diddle with those qcs.  This must
255*c6fd2807SJeff Garzik 		 * be done atomically w.r.t. setting QCFLAG_FAILED.
256*c6fd2807SJeff Garzik 		 */
257*c6fd2807SJeff Garzik 		if (nr_timedout)
258*c6fd2807SJeff Garzik 			__ata_port_freeze(ap);
259*c6fd2807SJeff Garzik 
260*c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
261*c6fd2807SJeff Garzik 	} else
262*c6fd2807SJeff Garzik 		spin_unlock_wait(ap->lock);
263*c6fd2807SJeff Garzik 
264*c6fd2807SJeff Garzik  repeat:
265*c6fd2807SJeff Garzik 	/* invoke error handler */
266*c6fd2807SJeff Garzik 	if (ap->ops->error_handler) {
267*c6fd2807SJeff Garzik 		/* process port resume request */
268*c6fd2807SJeff Garzik 		ata_eh_handle_port_resume(ap);
269*c6fd2807SJeff Garzik 
270*c6fd2807SJeff Garzik 		/* fetch & clear EH info */
271*c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
272*c6fd2807SJeff Garzik 
273*c6fd2807SJeff Garzik 		memset(&ap->eh_context, 0, sizeof(ap->eh_context));
274*c6fd2807SJeff Garzik 		ap->eh_context.i = ap->eh_info;
275*c6fd2807SJeff Garzik 		memset(&ap->eh_info, 0, sizeof(ap->eh_info));
276*c6fd2807SJeff Garzik 
277*c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
278*c6fd2807SJeff Garzik 		ap->pflags &= ~ATA_PFLAG_EH_PENDING;
279*c6fd2807SJeff Garzik 
280*c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
281*c6fd2807SJeff Garzik 
282*c6fd2807SJeff Garzik 		/* invoke EH, skip if unloading or suspended */
283*c6fd2807SJeff Garzik 		if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
284*c6fd2807SJeff Garzik 			ap->ops->error_handler(ap);
285*c6fd2807SJeff Garzik 		else
286*c6fd2807SJeff Garzik 			ata_eh_finish(ap);
287*c6fd2807SJeff Garzik 
288*c6fd2807SJeff Garzik 		/* process port suspend request */
289*c6fd2807SJeff Garzik 		ata_eh_handle_port_suspend(ap);
290*c6fd2807SJeff Garzik 
291*c6fd2807SJeff Garzik 		/* Exception might have happend after ->error_handler
292*c6fd2807SJeff Garzik 		 * recovered the port but before this point.  Repeat
293*c6fd2807SJeff Garzik 		 * EH in such case.
294*c6fd2807SJeff Garzik 		 */
295*c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
296*c6fd2807SJeff Garzik 
297*c6fd2807SJeff Garzik 		if (ap->pflags & ATA_PFLAG_EH_PENDING) {
298*c6fd2807SJeff Garzik 			if (--repeat_cnt) {
299*c6fd2807SJeff Garzik 				ata_port_printk(ap, KERN_INFO,
300*c6fd2807SJeff Garzik 					"EH pending after completion, "
301*c6fd2807SJeff Garzik 					"repeating EH (cnt=%d)\n", repeat_cnt);
302*c6fd2807SJeff Garzik 				spin_unlock_irqrestore(ap->lock, flags);
303*c6fd2807SJeff Garzik 				goto repeat;
304*c6fd2807SJeff Garzik 			}
305*c6fd2807SJeff Garzik 			ata_port_printk(ap, KERN_ERR, "EH pending after %d "
306*c6fd2807SJeff Garzik 					"tries, giving up\n", ATA_EH_MAX_REPEAT);
307*c6fd2807SJeff Garzik 		}
308*c6fd2807SJeff Garzik 
309*c6fd2807SJeff Garzik 		/* this run is complete, make sure EH info is clear */
310*c6fd2807SJeff Garzik 		memset(&ap->eh_info, 0, sizeof(ap->eh_info));
311*c6fd2807SJeff Garzik 
312*c6fd2807SJeff Garzik 		/* Clear host_eh_scheduled while holding ap->lock such
313*c6fd2807SJeff Garzik 		 * that if exception occurs after this point but
314*c6fd2807SJeff Garzik 		 * before EH completion, SCSI midlayer will
315*c6fd2807SJeff Garzik 		 * re-initiate EH.
316*c6fd2807SJeff Garzik 		 */
317*c6fd2807SJeff Garzik 		host->host_eh_scheduled = 0;
318*c6fd2807SJeff Garzik 
319*c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
320*c6fd2807SJeff Garzik 	} else {
321*c6fd2807SJeff Garzik 		WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
322*c6fd2807SJeff Garzik 		ap->ops->eng_timeout(ap);
323*c6fd2807SJeff Garzik 	}
324*c6fd2807SJeff Garzik 
325*c6fd2807SJeff Garzik 	/* finish or retry handled scmd's and clean up */
326*c6fd2807SJeff Garzik 	WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
327*c6fd2807SJeff Garzik 
328*c6fd2807SJeff Garzik 	scsi_eh_flush_done_q(&ap->eh_done_q);
329*c6fd2807SJeff Garzik 
330*c6fd2807SJeff Garzik 	/* clean up */
331*c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
332*c6fd2807SJeff Garzik 
333*c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_LOADING)
334*c6fd2807SJeff Garzik 		ap->pflags &= ~ATA_PFLAG_LOADING;
335*c6fd2807SJeff Garzik 	else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
336*c6fd2807SJeff Garzik 		queue_work(ata_aux_wq, &ap->hotplug_task);
337*c6fd2807SJeff Garzik 
338*c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_RECOVERED)
339*c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_INFO, "EH complete\n");
340*c6fd2807SJeff Garzik 
341*c6fd2807SJeff Garzik 	ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
342*c6fd2807SJeff Garzik 
343*c6fd2807SJeff Garzik 	/* tell wait_eh that we're done */
344*c6fd2807SJeff Garzik 	ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
345*c6fd2807SJeff Garzik 	wake_up_all(&ap->eh_wait_q);
346*c6fd2807SJeff Garzik 
347*c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
348*c6fd2807SJeff Garzik 
349*c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
350*c6fd2807SJeff Garzik }
351*c6fd2807SJeff Garzik 
352*c6fd2807SJeff Garzik /**
353*c6fd2807SJeff Garzik  *	ata_port_wait_eh - Wait for the currently pending EH to complete
354*c6fd2807SJeff Garzik  *	@ap: Port to wait EH for
355*c6fd2807SJeff Garzik  *
356*c6fd2807SJeff Garzik  *	Wait until the currently pending EH is complete.
357*c6fd2807SJeff Garzik  *
358*c6fd2807SJeff Garzik  *	LOCKING:
359*c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
360*c6fd2807SJeff Garzik  */
361*c6fd2807SJeff Garzik void ata_port_wait_eh(struct ata_port *ap)
362*c6fd2807SJeff Garzik {
363*c6fd2807SJeff Garzik 	unsigned long flags;
364*c6fd2807SJeff Garzik 	DEFINE_WAIT(wait);
365*c6fd2807SJeff Garzik 
366*c6fd2807SJeff Garzik  retry:
367*c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
368*c6fd2807SJeff Garzik 
369*c6fd2807SJeff Garzik 	while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
370*c6fd2807SJeff Garzik 		prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
371*c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
372*c6fd2807SJeff Garzik 		schedule();
373*c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
374*c6fd2807SJeff Garzik 	}
375*c6fd2807SJeff Garzik 	finish_wait(&ap->eh_wait_q, &wait);
376*c6fd2807SJeff Garzik 
377*c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
378*c6fd2807SJeff Garzik 
379*c6fd2807SJeff Garzik 	/* make sure SCSI EH is complete */
380*c6fd2807SJeff Garzik 	if (scsi_host_in_recovery(ap->host)) {
381*c6fd2807SJeff Garzik 		msleep(10);
382*c6fd2807SJeff Garzik 		goto retry;
383*c6fd2807SJeff Garzik 	}
384*c6fd2807SJeff Garzik }
385*c6fd2807SJeff Garzik 
386*c6fd2807SJeff Garzik /**
387*c6fd2807SJeff Garzik  *	ata_qc_timeout - Handle timeout of queued command
388*c6fd2807SJeff Garzik  *	@qc: Command that timed out
389*c6fd2807SJeff Garzik  *
390*c6fd2807SJeff Garzik  *	Some part of the kernel (currently, only the SCSI layer)
391*c6fd2807SJeff Garzik  *	has noticed that the active command on port @ap has not
392*c6fd2807SJeff Garzik  *	completed after a specified length of time.  Handle this
393*c6fd2807SJeff Garzik  *	condition by disabling DMA (if necessary) and completing
394*c6fd2807SJeff Garzik  *	transactions, with error if necessary.
395*c6fd2807SJeff Garzik  *
396*c6fd2807SJeff Garzik  *	This also handles the case of the "lost interrupt", where
397*c6fd2807SJeff Garzik  *	for some reason (possibly hardware bug, possibly driver bug)
398*c6fd2807SJeff Garzik  *	an interrupt was not delivered to the driver, even though the
399*c6fd2807SJeff Garzik  *	transaction completed successfully.
400*c6fd2807SJeff Garzik  *
401*c6fd2807SJeff Garzik  *	TODO: kill this function once old EH is gone.
402*c6fd2807SJeff Garzik  *
403*c6fd2807SJeff Garzik  *	LOCKING:
404*c6fd2807SJeff Garzik  *	Inherited from SCSI layer (none, can sleep)
405*c6fd2807SJeff Garzik  */
406*c6fd2807SJeff Garzik static void ata_qc_timeout(struct ata_queued_cmd *qc)
407*c6fd2807SJeff Garzik {
408*c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
409*c6fd2807SJeff Garzik 	u8 host_stat = 0, drv_stat;
410*c6fd2807SJeff Garzik 	unsigned long flags;
411*c6fd2807SJeff Garzik 
412*c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
413*c6fd2807SJeff Garzik 
414*c6fd2807SJeff Garzik 	ap->hsm_task_state = HSM_ST_IDLE;
415*c6fd2807SJeff Garzik 
416*c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
417*c6fd2807SJeff Garzik 
418*c6fd2807SJeff Garzik 	switch (qc->tf.protocol) {
419*c6fd2807SJeff Garzik 
420*c6fd2807SJeff Garzik 	case ATA_PROT_DMA:
421*c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_DMA:
422*c6fd2807SJeff Garzik 		host_stat = ap->ops->bmdma_status(ap);
423*c6fd2807SJeff Garzik 
424*c6fd2807SJeff Garzik 		/* before we do anything else, clear DMA-Start bit */
425*c6fd2807SJeff Garzik 		ap->ops->bmdma_stop(qc);
426*c6fd2807SJeff Garzik 
427*c6fd2807SJeff Garzik 		/* fall through */
428*c6fd2807SJeff Garzik 
429*c6fd2807SJeff Garzik 	default:
430*c6fd2807SJeff Garzik 		ata_altstatus(ap);
431*c6fd2807SJeff Garzik 		drv_stat = ata_chk_status(ap);
432*c6fd2807SJeff Garzik 
433*c6fd2807SJeff Garzik 		/* ack bmdma irq events */
434*c6fd2807SJeff Garzik 		ap->ops->irq_clear(ap);
435*c6fd2807SJeff Garzik 
436*c6fd2807SJeff Garzik 		ata_dev_printk(qc->dev, KERN_ERR, "command 0x%x timeout, "
437*c6fd2807SJeff Garzik 			       "stat 0x%x host_stat 0x%x\n",
438*c6fd2807SJeff Garzik 			       qc->tf.command, drv_stat, host_stat);
439*c6fd2807SJeff Garzik 
440*c6fd2807SJeff Garzik 		/* complete taskfile transaction */
441*c6fd2807SJeff Garzik 		qc->err_mask |= AC_ERR_TIMEOUT;
442*c6fd2807SJeff Garzik 		break;
443*c6fd2807SJeff Garzik 	}
444*c6fd2807SJeff Garzik 
445*c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
446*c6fd2807SJeff Garzik 
447*c6fd2807SJeff Garzik 	ata_eh_qc_complete(qc);
448*c6fd2807SJeff Garzik 
449*c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
450*c6fd2807SJeff Garzik }
451*c6fd2807SJeff Garzik 
452*c6fd2807SJeff Garzik /**
453*c6fd2807SJeff Garzik  *	ata_eng_timeout - Handle timeout of queued command
454*c6fd2807SJeff Garzik  *	@ap: Port on which timed-out command is active
455*c6fd2807SJeff Garzik  *
456*c6fd2807SJeff Garzik  *	Some part of the kernel (currently, only the SCSI layer)
457*c6fd2807SJeff Garzik  *	has noticed that the active command on port @ap has not
458*c6fd2807SJeff Garzik  *	completed after a specified length of time.  Handle this
459*c6fd2807SJeff Garzik  *	condition by disabling DMA (if necessary) and completing
460*c6fd2807SJeff Garzik  *	transactions, with error if necessary.
461*c6fd2807SJeff Garzik  *
462*c6fd2807SJeff Garzik  *	This also handles the case of the "lost interrupt", where
463*c6fd2807SJeff Garzik  *	for some reason (possibly hardware bug, possibly driver bug)
464*c6fd2807SJeff Garzik  *	an interrupt was not delivered to the driver, even though the
465*c6fd2807SJeff Garzik  *	transaction completed successfully.
466*c6fd2807SJeff Garzik  *
467*c6fd2807SJeff Garzik  *	TODO: kill this function once old EH is gone.
468*c6fd2807SJeff Garzik  *
469*c6fd2807SJeff Garzik  *	LOCKING:
470*c6fd2807SJeff Garzik  *	Inherited from SCSI layer (none, can sleep)
471*c6fd2807SJeff Garzik  */
472*c6fd2807SJeff Garzik void ata_eng_timeout(struct ata_port *ap)
473*c6fd2807SJeff Garzik {
474*c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
475*c6fd2807SJeff Garzik 
476*c6fd2807SJeff Garzik 	ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
477*c6fd2807SJeff Garzik 
478*c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
479*c6fd2807SJeff Garzik }
480*c6fd2807SJeff Garzik 
481*c6fd2807SJeff Garzik /**
482*c6fd2807SJeff Garzik  *	ata_qc_schedule_eh - schedule qc for error handling
483*c6fd2807SJeff Garzik  *	@qc: command to schedule error handling for
484*c6fd2807SJeff Garzik  *
485*c6fd2807SJeff Garzik  *	Schedule error handling for @qc.  EH will kick in as soon as
486*c6fd2807SJeff Garzik  *	other commands are drained.
487*c6fd2807SJeff Garzik  *
488*c6fd2807SJeff Garzik  *	LOCKING:
489*c6fd2807SJeff Garzik  *	spin_lock_irqsave(host_set lock)
490*c6fd2807SJeff Garzik  */
491*c6fd2807SJeff Garzik void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
492*c6fd2807SJeff Garzik {
493*c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
494*c6fd2807SJeff Garzik 
495*c6fd2807SJeff Garzik 	WARN_ON(!ap->ops->error_handler);
496*c6fd2807SJeff Garzik 
497*c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_FAILED;
498*c6fd2807SJeff Garzik 	qc->ap->pflags |= ATA_PFLAG_EH_PENDING;
499*c6fd2807SJeff Garzik 
500*c6fd2807SJeff Garzik 	/* The following will fail if timeout has already expired.
501*c6fd2807SJeff Garzik 	 * ata_scsi_error() takes care of such scmds on EH entry.
502*c6fd2807SJeff Garzik 	 * Note that ATA_QCFLAG_FAILED is unconditionally set after
503*c6fd2807SJeff Garzik 	 * this function completes.
504*c6fd2807SJeff Garzik 	 */
505*c6fd2807SJeff Garzik 	scsi_req_abort_cmd(qc->scsicmd);
506*c6fd2807SJeff Garzik }
507*c6fd2807SJeff Garzik 
508*c6fd2807SJeff Garzik /**
509*c6fd2807SJeff Garzik  *	ata_port_schedule_eh - schedule error handling without a qc
510*c6fd2807SJeff Garzik  *	@ap: ATA port to schedule EH for
511*c6fd2807SJeff Garzik  *
512*c6fd2807SJeff Garzik  *	Schedule error handling for @ap.  EH will kick in as soon as
513*c6fd2807SJeff Garzik  *	all commands are drained.
514*c6fd2807SJeff Garzik  *
515*c6fd2807SJeff Garzik  *	LOCKING:
516*c6fd2807SJeff Garzik  *	spin_lock_irqsave(host_set lock)
517*c6fd2807SJeff Garzik  */
518*c6fd2807SJeff Garzik void ata_port_schedule_eh(struct ata_port *ap)
519*c6fd2807SJeff Garzik {
520*c6fd2807SJeff Garzik 	WARN_ON(!ap->ops->error_handler);
521*c6fd2807SJeff Garzik 
522*c6fd2807SJeff Garzik 	ap->pflags |= ATA_PFLAG_EH_PENDING;
523*c6fd2807SJeff Garzik 	scsi_schedule_eh(ap->host);
524*c6fd2807SJeff Garzik 
525*c6fd2807SJeff Garzik 	DPRINTK("port EH scheduled\n");
526*c6fd2807SJeff Garzik }
527*c6fd2807SJeff Garzik 
528*c6fd2807SJeff Garzik /**
529*c6fd2807SJeff Garzik  *	ata_port_abort - abort all qc's on the port
530*c6fd2807SJeff Garzik  *	@ap: ATA port to abort qc's for
531*c6fd2807SJeff Garzik  *
532*c6fd2807SJeff Garzik  *	Abort all active qc's of @ap and schedule EH.
533*c6fd2807SJeff Garzik  *
534*c6fd2807SJeff Garzik  *	LOCKING:
535*c6fd2807SJeff Garzik  *	spin_lock_irqsave(host_set lock)
536*c6fd2807SJeff Garzik  *
537*c6fd2807SJeff Garzik  *	RETURNS:
538*c6fd2807SJeff Garzik  *	Number of aborted qc's.
539*c6fd2807SJeff Garzik  */
540*c6fd2807SJeff Garzik int ata_port_abort(struct ata_port *ap)
541*c6fd2807SJeff Garzik {
542*c6fd2807SJeff Garzik 	int tag, nr_aborted = 0;
543*c6fd2807SJeff Garzik 
544*c6fd2807SJeff Garzik 	WARN_ON(!ap->ops->error_handler);
545*c6fd2807SJeff Garzik 
546*c6fd2807SJeff Garzik 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
547*c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
548*c6fd2807SJeff Garzik 
549*c6fd2807SJeff Garzik 		if (qc) {
550*c6fd2807SJeff Garzik 			qc->flags |= ATA_QCFLAG_FAILED;
551*c6fd2807SJeff Garzik 			ata_qc_complete(qc);
552*c6fd2807SJeff Garzik 			nr_aborted++;
553*c6fd2807SJeff Garzik 		}
554*c6fd2807SJeff Garzik 	}
555*c6fd2807SJeff Garzik 
556*c6fd2807SJeff Garzik 	if (!nr_aborted)
557*c6fd2807SJeff Garzik 		ata_port_schedule_eh(ap);
558*c6fd2807SJeff Garzik 
559*c6fd2807SJeff Garzik 	return nr_aborted;
560*c6fd2807SJeff Garzik }
561*c6fd2807SJeff Garzik 
562*c6fd2807SJeff Garzik /**
563*c6fd2807SJeff Garzik  *	__ata_port_freeze - freeze port
564*c6fd2807SJeff Garzik  *	@ap: ATA port to freeze
565*c6fd2807SJeff Garzik  *
566*c6fd2807SJeff Garzik  *	This function is called when HSM violation or some other
567*c6fd2807SJeff Garzik  *	condition disrupts normal operation of the port.  Frozen port
568*c6fd2807SJeff Garzik  *	is not allowed to perform any operation until the port is
569*c6fd2807SJeff Garzik  *	thawed, which usually follows a successful reset.
570*c6fd2807SJeff Garzik  *
571*c6fd2807SJeff Garzik  *	ap->ops->freeze() callback can be used for freezing the port
572*c6fd2807SJeff Garzik  *	hardware-wise (e.g. mask interrupt and stop DMA engine).  If a
573*c6fd2807SJeff Garzik  *	port cannot be frozen hardware-wise, the interrupt handler
574*c6fd2807SJeff Garzik  *	must ack and clear interrupts unconditionally while the port
575*c6fd2807SJeff Garzik  *	is frozen.
576*c6fd2807SJeff Garzik  *
577*c6fd2807SJeff Garzik  *	LOCKING:
578*c6fd2807SJeff Garzik  *	spin_lock_irqsave(host_set lock)
579*c6fd2807SJeff Garzik  */
580*c6fd2807SJeff Garzik static void __ata_port_freeze(struct ata_port *ap)
581*c6fd2807SJeff Garzik {
582*c6fd2807SJeff Garzik 	WARN_ON(!ap->ops->error_handler);
583*c6fd2807SJeff Garzik 
584*c6fd2807SJeff Garzik 	if (ap->ops->freeze)
585*c6fd2807SJeff Garzik 		ap->ops->freeze(ap);
586*c6fd2807SJeff Garzik 
587*c6fd2807SJeff Garzik 	ap->pflags |= ATA_PFLAG_FROZEN;
588*c6fd2807SJeff Garzik 
589*c6fd2807SJeff Garzik 	DPRINTK("ata%u port frozen\n", ap->id);
590*c6fd2807SJeff Garzik }
591*c6fd2807SJeff Garzik 
592*c6fd2807SJeff Garzik /**
593*c6fd2807SJeff Garzik  *	ata_port_freeze - abort & freeze port
594*c6fd2807SJeff Garzik  *	@ap: ATA port to freeze
595*c6fd2807SJeff Garzik  *
596*c6fd2807SJeff Garzik  *	Abort and freeze @ap.
597*c6fd2807SJeff Garzik  *
598*c6fd2807SJeff Garzik  *	LOCKING:
599*c6fd2807SJeff Garzik  *	spin_lock_irqsave(host_set lock)
600*c6fd2807SJeff Garzik  *
601*c6fd2807SJeff Garzik  *	RETURNS:
602*c6fd2807SJeff Garzik  *	Number of aborted commands.
603*c6fd2807SJeff Garzik  */
604*c6fd2807SJeff Garzik int ata_port_freeze(struct ata_port *ap)
605*c6fd2807SJeff Garzik {
606*c6fd2807SJeff Garzik 	int nr_aborted;
607*c6fd2807SJeff Garzik 
608*c6fd2807SJeff Garzik 	WARN_ON(!ap->ops->error_handler);
609*c6fd2807SJeff Garzik 
610*c6fd2807SJeff Garzik 	nr_aborted = ata_port_abort(ap);
611*c6fd2807SJeff Garzik 	__ata_port_freeze(ap);
612*c6fd2807SJeff Garzik 
613*c6fd2807SJeff Garzik 	return nr_aborted;
614*c6fd2807SJeff Garzik }
615*c6fd2807SJeff Garzik 
616*c6fd2807SJeff Garzik /**
617*c6fd2807SJeff Garzik  *	ata_eh_freeze_port - EH helper to freeze port
618*c6fd2807SJeff Garzik  *	@ap: ATA port to freeze
619*c6fd2807SJeff Garzik  *
620*c6fd2807SJeff Garzik  *	Freeze @ap.
621*c6fd2807SJeff Garzik  *
622*c6fd2807SJeff Garzik  *	LOCKING:
623*c6fd2807SJeff Garzik  *	None.
624*c6fd2807SJeff Garzik  */
625*c6fd2807SJeff Garzik void ata_eh_freeze_port(struct ata_port *ap)
626*c6fd2807SJeff Garzik {
627*c6fd2807SJeff Garzik 	unsigned long flags;
628*c6fd2807SJeff Garzik 
629*c6fd2807SJeff Garzik 	if (!ap->ops->error_handler)
630*c6fd2807SJeff Garzik 		return;
631*c6fd2807SJeff Garzik 
632*c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
633*c6fd2807SJeff Garzik 	__ata_port_freeze(ap);
634*c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
635*c6fd2807SJeff Garzik }
636*c6fd2807SJeff Garzik 
637*c6fd2807SJeff Garzik /**
638*c6fd2807SJeff Garzik  *	ata_port_thaw_port - EH helper to thaw port
639*c6fd2807SJeff Garzik  *	@ap: ATA port to thaw
640*c6fd2807SJeff Garzik  *
641*c6fd2807SJeff Garzik  *	Thaw frozen port @ap.
642*c6fd2807SJeff Garzik  *
643*c6fd2807SJeff Garzik  *	LOCKING:
644*c6fd2807SJeff Garzik  *	None.
645*c6fd2807SJeff Garzik  */
646*c6fd2807SJeff Garzik void ata_eh_thaw_port(struct ata_port *ap)
647*c6fd2807SJeff Garzik {
648*c6fd2807SJeff Garzik 	unsigned long flags;
649*c6fd2807SJeff Garzik 
650*c6fd2807SJeff Garzik 	if (!ap->ops->error_handler)
651*c6fd2807SJeff Garzik 		return;
652*c6fd2807SJeff Garzik 
653*c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
654*c6fd2807SJeff Garzik 
655*c6fd2807SJeff Garzik 	ap->pflags &= ~ATA_PFLAG_FROZEN;
656*c6fd2807SJeff Garzik 
657*c6fd2807SJeff Garzik 	if (ap->ops->thaw)
658*c6fd2807SJeff Garzik 		ap->ops->thaw(ap);
659*c6fd2807SJeff Garzik 
660*c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
661*c6fd2807SJeff Garzik 
662*c6fd2807SJeff Garzik 	DPRINTK("ata%u port thawed\n", ap->id);
663*c6fd2807SJeff Garzik }
664*c6fd2807SJeff Garzik 
665*c6fd2807SJeff Garzik static void ata_eh_scsidone(struct scsi_cmnd *scmd)
666*c6fd2807SJeff Garzik {
667*c6fd2807SJeff Garzik 	/* nada */
668*c6fd2807SJeff Garzik }
669*c6fd2807SJeff Garzik 
670*c6fd2807SJeff Garzik static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
671*c6fd2807SJeff Garzik {
672*c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
673*c6fd2807SJeff Garzik 	struct scsi_cmnd *scmd = qc->scsicmd;
674*c6fd2807SJeff Garzik 	unsigned long flags;
675*c6fd2807SJeff Garzik 
676*c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
677*c6fd2807SJeff Garzik 	qc->scsidone = ata_eh_scsidone;
678*c6fd2807SJeff Garzik 	__ata_qc_complete(qc);
679*c6fd2807SJeff Garzik 	WARN_ON(ata_tag_valid(qc->tag));
680*c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
681*c6fd2807SJeff Garzik 
682*c6fd2807SJeff Garzik 	scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
683*c6fd2807SJeff Garzik }
684*c6fd2807SJeff Garzik 
685*c6fd2807SJeff Garzik /**
686*c6fd2807SJeff Garzik  *	ata_eh_qc_complete - Complete an active ATA command from EH
687*c6fd2807SJeff Garzik  *	@qc: Command to complete
688*c6fd2807SJeff Garzik  *
689*c6fd2807SJeff Garzik  *	Indicate to the mid and upper layers that an ATA command has
690*c6fd2807SJeff Garzik  *	completed.  To be used from EH.
691*c6fd2807SJeff Garzik  */
692*c6fd2807SJeff Garzik void ata_eh_qc_complete(struct ata_queued_cmd *qc)
693*c6fd2807SJeff Garzik {
694*c6fd2807SJeff Garzik 	struct scsi_cmnd *scmd = qc->scsicmd;
695*c6fd2807SJeff Garzik 	scmd->retries = scmd->allowed;
696*c6fd2807SJeff Garzik 	__ata_eh_qc_complete(qc);
697*c6fd2807SJeff Garzik }
698*c6fd2807SJeff Garzik 
699*c6fd2807SJeff Garzik /**
700*c6fd2807SJeff Garzik  *	ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
701*c6fd2807SJeff Garzik  *	@qc: Command to retry
702*c6fd2807SJeff Garzik  *
703*c6fd2807SJeff Garzik  *	Indicate to the mid and upper layers that an ATA command
704*c6fd2807SJeff Garzik  *	should be retried.  To be used from EH.
705*c6fd2807SJeff Garzik  *
706*c6fd2807SJeff Garzik  *	SCSI midlayer limits the number of retries to scmd->allowed.
707*c6fd2807SJeff Garzik  *	scmd->retries is decremented for commands which get retried
708*c6fd2807SJeff Garzik  *	due to unrelated failures (qc->err_mask is zero).
709*c6fd2807SJeff Garzik  */
710*c6fd2807SJeff Garzik void ata_eh_qc_retry(struct ata_queued_cmd *qc)
711*c6fd2807SJeff Garzik {
712*c6fd2807SJeff Garzik 	struct scsi_cmnd *scmd = qc->scsicmd;
713*c6fd2807SJeff Garzik 	if (!qc->err_mask && scmd->retries)
714*c6fd2807SJeff Garzik 		scmd->retries--;
715*c6fd2807SJeff Garzik 	__ata_eh_qc_complete(qc);
716*c6fd2807SJeff Garzik }
717*c6fd2807SJeff Garzik 
718*c6fd2807SJeff Garzik /**
719*c6fd2807SJeff Garzik  *	ata_eh_detach_dev - detach ATA device
720*c6fd2807SJeff Garzik  *	@dev: ATA device to detach
721*c6fd2807SJeff Garzik  *
722*c6fd2807SJeff Garzik  *	Detach @dev.
723*c6fd2807SJeff Garzik  *
724*c6fd2807SJeff Garzik  *	LOCKING:
725*c6fd2807SJeff Garzik  *	None.
726*c6fd2807SJeff Garzik  */
727*c6fd2807SJeff Garzik static void ata_eh_detach_dev(struct ata_device *dev)
728*c6fd2807SJeff Garzik {
729*c6fd2807SJeff Garzik 	struct ata_port *ap = dev->ap;
730*c6fd2807SJeff Garzik 	unsigned long flags;
731*c6fd2807SJeff Garzik 
732*c6fd2807SJeff Garzik 	ata_dev_disable(dev);
733*c6fd2807SJeff Garzik 
734*c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
735*c6fd2807SJeff Garzik 
736*c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_DETACH;
737*c6fd2807SJeff Garzik 
738*c6fd2807SJeff Garzik 	if (ata_scsi_offline_dev(dev)) {
739*c6fd2807SJeff Garzik 		dev->flags |= ATA_DFLAG_DETACHED;
740*c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
741*c6fd2807SJeff Garzik 	}
742*c6fd2807SJeff Garzik 
743*c6fd2807SJeff Garzik 	/* clear per-dev EH actions */
744*c6fd2807SJeff Garzik 	ata_eh_clear_action(dev, &ap->eh_info, ATA_EH_PERDEV_MASK);
745*c6fd2807SJeff Garzik 	ata_eh_clear_action(dev, &ap->eh_context.i, ATA_EH_PERDEV_MASK);
746*c6fd2807SJeff Garzik 
747*c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
748*c6fd2807SJeff Garzik }
749*c6fd2807SJeff Garzik 
750*c6fd2807SJeff Garzik /**
751*c6fd2807SJeff Garzik  *	ata_eh_about_to_do - about to perform eh_action
752*c6fd2807SJeff Garzik  *	@ap: target ATA port
753*c6fd2807SJeff Garzik  *	@dev: target ATA dev for per-dev action (can be NULL)
754*c6fd2807SJeff Garzik  *	@action: action about to be performed
755*c6fd2807SJeff Garzik  *
756*c6fd2807SJeff Garzik  *	Called just before performing EH actions to clear related bits
757*c6fd2807SJeff Garzik  *	in @ap->eh_info such that eh actions are not unnecessarily
758*c6fd2807SJeff Garzik  *	repeated.
759*c6fd2807SJeff Garzik  *
760*c6fd2807SJeff Garzik  *	LOCKING:
761*c6fd2807SJeff Garzik  *	None.
762*c6fd2807SJeff Garzik  */
763*c6fd2807SJeff Garzik static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev,
764*c6fd2807SJeff Garzik 			       unsigned int action)
765*c6fd2807SJeff Garzik {
766*c6fd2807SJeff Garzik 	unsigned long flags;
767*c6fd2807SJeff Garzik 	struct ata_eh_info *ehi = &ap->eh_info;
768*c6fd2807SJeff Garzik 	struct ata_eh_context *ehc = &ap->eh_context;
769*c6fd2807SJeff Garzik 
770*c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
771*c6fd2807SJeff Garzik 
772*c6fd2807SJeff Garzik 	/* Reset is represented by combination of actions and EHI
773*c6fd2807SJeff Garzik 	 * flags.  Suck in all related bits before clearing eh_info to
774*c6fd2807SJeff Garzik 	 * avoid losing requested action.
775*c6fd2807SJeff Garzik 	 */
776*c6fd2807SJeff Garzik 	if (action & ATA_EH_RESET_MASK) {
777*c6fd2807SJeff Garzik 		ehc->i.action |= ehi->action & ATA_EH_RESET_MASK;
778*c6fd2807SJeff Garzik 		ehc->i.flags |= ehi->flags & ATA_EHI_RESET_MODIFIER_MASK;
779*c6fd2807SJeff Garzik 
780*c6fd2807SJeff Garzik 		/* make sure all reset actions are cleared & clear EHI flags */
781*c6fd2807SJeff Garzik 		action |= ATA_EH_RESET_MASK;
782*c6fd2807SJeff Garzik 		ehi->flags &= ~ATA_EHI_RESET_MODIFIER_MASK;
783*c6fd2807SJeff Garzik 	}
784*c6fd2807SJeff Garzik 
785*c6fd2807SJeff Garzik 	ata_eh_clear_action(dev, ehi, action);
786*c6fd2807SJeff Garzik 
787*c6fd2807SJeff Garzik 	if (!(ehc->i.flags & ATA_EHI_QUIET))
788*c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_RECOVERED;
789*c6fd2807SJeff Garzik 
790*c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
791*c6fd2807SJeff Garzik }
792*c6fd2807SJeff Garzik 
793*c6fd2807SJeff Garzik /**
794*c6fd2807SJeff Garzik  *	ata_eh_done - EH action complete
795*c6fd2807SJeff Garzik  *	@ap: target ATA port
796*c6fd2807SJeff Garzik  *	@dev: target ATA dev for per-dev action (can be NULL)
797*c6fd2807SJeff Garzik  *	@action: action just completed
798*c6fd2807SJeff Garzik  *
799*c6fd2807SJeff Garzik  *	Called right after performing EH actions to clear related bits
800*c6fd2807SJeff Garzik  *	in @ap->eh_context.
801*c6fd2807SJeff Garzik  *
802*c6fd2807SJeff Garzik  *	LOCKING:
803*c6fd2807SJeff Garzik  *	None.
804*c6fd2807SJeff Garzik  */
805*c6fd2807SJeff Garzik static void ata_eh_done(struct ata_port *ap, struct ata_device *dev,
806*c6fd2807SJeff Garzik 			unsigned int action)
807*c6fd2807SJeff Garzik {
808*c6fd2807SJeff Garzik 	/* if reset is complete, clear all reset actions & reset modifier */
809*c6fd2807SJeff Garzik 	if (action & ATA_EH_RESET_MASK) {
810*c6fd2807SJeff Garzik 		action |= ATA_EH_RESET_MASK;
811*c6fd2807SJeff Garzik 		ap->eh_context.i.flags &= ~ATA_EHI_RESET_MODIFIER_MASK;
812*c6fd2807SJeff Garzik 	}
813*c6fd2807SJeff Garzik 
814*c6fd2807SJeff Garzik 	ata_eh_clear_action(dev, &ap->eh_context.i, action);
815*c6fd2807SJeff Garzik }
816*c6fd2807SJeff Garzik 
817*c6fd2807SJeff Garzik /**
818*c6fd2807SJeff Garzik  *	ata_err_string - convert err_mask to descriptive string
819*c6fd2807SJeff Garzik  *	@err_mask: error mask to convert to string
820*c6fd2807SJeff Garzik  *
821*c6fd2807SJeff Garzik  *	Convert @err_mask to descriptive string.  Errors are
822*c6fd2807SJeff Garzik  *	prioritized according to severity and only the most severe
823*c6fd2807SJeff Garzik  *	error is reported.
824*c6fd2807SJeff Garzik  *
825*c6fd2807SJeff Garzik  *	LOCKING:
826*c6fd2807SJeff Garzik  *	None.
827*c6fd2807SJeff Garzik  *
828*c6fd2807SJeff Garzik  *	RETURNS:
829*c6fd2807SJeff Garzik  *	Descriptive string for @err_mask
830*c6fd2807SJeff Garzik  */
831*c6fd2807SJeff Garzik static const char * ata_err_string(unsigned int err_mask)
832*c6fd2807SJeff Garzik {
833*c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_HOST_BUS)
834*c6fd2807SJeff Garzik 		return "host bus error";
835*c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_ATA_BUS)
836*c6fd2807SJeff Garzik 		return "ATA bus error";
837*c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_TIMEOUT)
838*c6fd2807SJeff Garzik 		return "timeout";
839*c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_HSM)
840*c6fd2807SJeff Garzik 		return "HSM violation";
841*c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_SYSTEM)
842*c6fd2807SJeff Garzik 		return "internal error";
843*c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_MEDIA)
844*c6fd2807SJeff Garzik 		return "media error";
845*c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_INVALID)
846*c6fd2807SJeff Garzik 		return "invalid argument";
847*c6fd2807SJeff Garzik 	if (err_mask & AC_ERR_DEV)
848*c6fd2807SJeff Garzik 		return "device error";
849*c6fd2807SJeff Garzik 	return "unknown error";
850*c6fd2807SJeff Garzik }
851*c6fd2807SJeff Garzik 
852*c6fd2807SJeff Garzik /**
853*c6fd2807SJeff Garzik  *	ata_read_log_page - read a specific log page
854*c6fd2807SJeff Garzik  *	@dev: target device
855*c6fd2807SJeff Garzik  *	@page: page to read
856*c6fd2807SJeff Garzik  *	@buf: buffer to store read page
857*c6fd2807SJeff Garzik  *	@sectors: number of sectors to read
858*c6fd2807SJeff Garzik  *
859*c6fd2807SJeff Garzik  *	Read log page using READ_LOG_EXT command.
860*c6fd2807SJeff Garzik  *
861*c6fd2807SJeff Garzik  *	LOCKING:
862*c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
863*c6fd2807SJeff Garzik  *
864*c6fd2807SJeff Garzik  *	RETURNS:
865*c6fd2807SJeff Garzik  *	0 on success, AC_ERR_* mask otherwise.
866*c6fd2807SJeff Garzik  */
867*c6fd2807SJeff Garzik static unsigned int ata_read_log_page(struct ata_device *dev,
868*c6fd2807SJeff Garzik 				      u8 page, void *buf, unsigned int sectors)
869*c6fd2807SJeff Garzik {
870*c6fd2807SJeff Garzik 	struct ata_taskfile tf;
871*c6fd2807SJeff Garzik 	unsigned int err_mask;
872*c6fd2807SJeff Garzik 
873*c6fd2807SJeff Garzik 	DPRINTK("read log page - page %d\n", page);
874*c6fd2807SJeff Garzik 
875*c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
876*c6fd2807SJeff Garzik 	tf.command = ATA_CMD_READ_LOG_EXT;
877*c6fd2807SJeff Garzik 	tf.lbal = page;
878*c6fd2807SJeff Garzik 	tf.nsect = sectors;
879*c6fd2807SJeff Garzik 	tf.hob_nsect = sectors >> 8;
880*c6fd2807SJeff Garzik 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
881*c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_PIO;
882*c6fd2807SJeff Garzik 
883*c6fd2807SJeff Garzik 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
884*c6fd2807SJeff Garzik 				     buf, sectors * ATA_SECT_SIZE);
885*c6fd2807SJeff Garzik 
886*c6fd2807SJeff Garzik 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
887*c6fd2807SJeff Garzik 	return err_mask;
888*c6fd2807SJeff Garzik }
889*c6fd2807SJeff Garzik 
890*c6fd2807SJeff Garzik /**
891*c6fd2807SJeff Garzik  *	ata_eh_read_log_10h - Read log page 10h for NCQ error details
892*c6fd2807SJeff Garzik  *	@dev: Device to read log page 10h from
893*c6fd2807SJeff Garzik  *	@tag: Resulting tag of the failed command
894*c6fd2807SJeff Garzik  *	@tf: Resulting taskfile registers of the failed command
895*c6fd2807SJeff Garzik  *
896*c6fd2807SJeff Garzik  *	Read log page 10h to obtain NCQ error details and clear error
897*c6fd2807SJeff Garzik  *	condition.
898*c6fd2807SJeff Garzik  *
899*c6fd2807SJeff Garzik  *	LOCKING:
900*c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
901*c6fd2807SJeff Garzik  *
902*c6fd2807SJeff Garzik  *	RETURNS:
903*c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
904*c6fd2807SJeff Garzik  */
905*c6fd2807SJeff Garzik static int ata_eh_read_log_10h(struct ata_device *dev,
906*c6fd2807SJeff Garzik 			       int *tag, struct ata_taskfile *tf)
907*c6fd2807SJeff Garzik {
908*c6fd2807SJeff Garzik 	u8 *buf = dev->ap->sector_buf;
909*c6fd2807SJeff Garzik 	unsigned int err_mask;
910*c6fd2807SJeff Garzik 	u8 csum;
911*c6fd2807SJeff Garzik 	int i;
912*c6fd2807SJeff Garzik 
913*c6fd2807SJeff Garzik 	err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1);
914*c6fd2807SJeff Garzik 	if (err_mask)
915*c6fd2807SJeff Garzik 		return -EIO;
916*c6fd2807SJeff Garzik 
917*c6fd2807SJeff Garzik 	csum = 0;
918*c6fd2807SJeff Garzik 	for (i = 0; i < ATA_SECT_SIZE; i++)
919*c6fd2807SJeff Garzik 		csum += buf[i];
920*c6fd2807SJeff Garzik 	if (csum)
921*c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_WARNING,
922*c6fd2807SJeff Garzik 			       "invalid checksum 0x%x on log page 10h\n", csum);
923*c6fd2807SJeff Garzik 
924*c6fd2807SJeff Garzik 	if (buf[0] & 0x80)
925*c6fd2807SJeff Garzik 		return -ENOENT;
926*c6fd2807SJeff Garzik 
927*c6fd2807SJeff Garzik 	*tag = buf[0] & 0x1f;
928*c6fd2807SJeff Garzik 
929*c6fd2807SJeff Garzik 	tf->command = buf[2];
930*c6fd2807SJeff Garzik 	tf->feature = buf[3];
931*c6fd2807SJeff Garzik 	tf->lbal = buf[4];
932*c6fd2807SJeff Garzik 	tf->lbam = buf[5];
933*c6fd2807SJeff Garzik 	tf->lbah = buf[6];
934*c6fd2807SJeff Garzik 	tf->device = buf[7];
935*c6fd2807SJeff Garzik 	tf->hob_lbal = buf[8];
936*c6fd2807SJeff Garzik 	tf->hob_lbam = buf[9];
937*c6fd2807SJeff Garzik 	tf->hob_lbah = buf[10];
938*c6fd2807SJeff Garzik 	tf->nsect = buf[12];
939*c6fd2807SJeff Garzik 	tf->hob_nsect = buf[13];
940*c6fd2807SJeff Garzik 
941*c6fd2807SJeff Garzik 	return 0;
942*c6fd2807SJeff Garzik }
943*c6fd2807SJeff Garzik 
944*c6fd2807SJeff Garzik /**
945*c6fd2807SJeff Garzik  *	atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
946*c6fd2807SJeff Garzik  *	@dev: device to perform REQUEST_SENSE to
947*c6fd2807SJeff Garzik  *	@sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
948*c6fd2807SJeff Garzik  *
949*c6fd2807SJeff Garzik  *	Perform ATAPI REQUEST_SENSE after the device reported CHECK
950*c6fd2807SJeff Garzik  *	SENSE.  This function is EH helper.
951*c6fd2807SJeff Garzik  *
952*c6fd2807SJeff Garzik  *	LOCKING:
953*c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
954*c6fd2807SJeff Garzik  *
955*c6fd2807SJeff Garzik  *	RETURNS:
956*c6fd2807SJeff Garzik  *	0 on success, AC_ERR_* mask on failure
957*c6fd2807SJeff Garzik  */
958*c6fd2807SJeff Garzik static unsigned int atapi_eh_request_sense(struct ata_device *dev,
959*c6fd2807SJeff Garzik 					   unsigned char *sense_buf)
960*c6fd2807SJeff Garzik {
961*c6fd2807SJeff Garzik 	struct ata_port *ap = dev->ap;
962*c6fd2807SJeff Garzik 	struct ata_taskfile tf;
963*c6fd2807SJeff Garzik 	u8 cdb[ATAPI_CDB_LEN];
964*c6fd2807SJeff Garzik 
965*c6fd2807SJeff Garzik 	DPRINTK("ATAPI request sense\n");
966*c6fd2807SJeff Garzik 
967*c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
968*c6fd2807SJeff Garzik 
969*c6fd2807SJeff Garzik 	/* FIXME: is this needed? */
970*c6fd2807SJeff Garzik 	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
971*c6fd2807SJeff Garzik 
972*c6fd2807SJeff Garzik 	/* XXX: why tf_read here? */
973*c6fd2807SJeff Garzik 	ap->ops->tf_read(ap, &tf);
974*c6fd2807SJeff Garzik 
975*c6fd2807SJeff Garzik 	/* fill these in, for the case where they are -not- overwritten */
976*c6fd2807SJeff Garzik 	sense_buf[0] = 0x70;
977*c6fd2807SJeff Garzik 	sense_buf[2] = tf.feature >> 4;
978*c6fd2807SJeff Garzik 
979*c6fd2807SJeff Garzik 	memset(cdb, 0, ATAPI_CDB_LEN);
980*c6fd2807SJeff Garzik 	cdb[0] = REQUEST_SENSE;
981*c6fd2807SJeff Garzik 	cdb[4] = SCSI_SENSE_BUFFERSIZE;
982*c6fd2807SJeff Garzik 
983*c6fd2807SJeff Garzik 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
984*c6fd2807SJeff Garzik 	tf.command = ATA_CMD_PACKET;
985*c6fd2807SJeff Garzik 
986*c6fd2807SJeff Garzik 	/* is it pointless to prefer PIO for "safety reasons"? */
987*c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_PIO_DMA) {
988*c6fd2807SJeff Garzik 		tf.protocol = ATA_PROT_ATAPI_DMA;
989*c6fd2807SJeff Garzik 		tf.feature |= ATAPI_PKT_DMA;
990*c6fd2807SJeff Garzik 	} else {
991*c6fd2807SJeff Garzik 		tf.protocol = ATA_PROT_ATAPI;
992*c6fd2807SJeff Garzik 		tf.lbam = (8 * 1024) & 0xff;
993*c6fd2807SJeff Garzik 		tf.lbah = (8 * 1024) >> 8;
994*c6fd2807SJeff Garzik 	}
995*c6fd2807SJeff Garzik 
996*c6fd2807SJeff Garzik 	return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
997*c6fd2807SJeff Garzik 				 sense_buf, SCSI_SENSE_BUFFERSIZE);
998*c6fd2807SJeff Garzik }
999*c6fd2807SJeff Garzik 
1000*c6fd2807SJeff Garzik /**
1001*c6fd2807SJeff Garzik  *	ata_eh_analyze_serror - analyze SError for a failed port
1002*c6fd2807SJeff Garzik  *	@ap: ATA port to analyze SError for
1003*c6fd2807SJeff Garzik  *
1004*c6fd2807SJeff Garzik  *	Analyze SError if available and further determine cause of
1005*c6fd2807SJeff Garzik  *	failure.
1006*c6fd2807SJeff Garzik  *
1007*c6fd2807SJeff Garzik  *	LOCKING:
1008*c6fd2807SJeff Garzik  *	None.
1009*c6fd2807SJeff Garzik  */
1010*c6fd2807SJeff Garzik static void ata_eh_analyze_serror(struct ata_port *ap)
1011*c6fd2807SJeff Garzik {
1012*c6fd2807SJeff Garzik 	struct ata_eh_context *ehc = &ap->eh_context;
1013*c6fd2807SJeff Garzik 	u32 serror = ehc->i.serror;
1014*c6fd2807SJeff Garzik 	unsigned int err_mask = 0, action = 0;
1015*c6fd2807SJeff Garzik 
1016*c6fd2807SJeff Garzik 	if (serror & SERR_PERSISTENT) {
1017*c6fd2807SJeff Garzik 		err_mask |= AC_ERR_ATA_BUS;
1018*c6fd2807SJeff Garzik 		action |= ATA_EH_HARDRESET;
1019*c6fd2807SJeff Garzik 	}
1020*c6fd2807SJeff Garzik 	if (serror &
1021*c6fd2807SJeff Garzik 	    (SERR_DATA_RECOVERED | SERR_COMM_RECOVERED | SERR_DATA)) {
1022*c6fd2807SJeff Garzik 		err_mask |= AC_ERR_ATA_BUS;
1023*c6fd2807SJeff Garzik 		action |= ATA_EH_SOFTRESET;
1024*c6fd2807SJeff Garzik 	}
1025*c6fd2807SJeff Garzik 	if (serror & SERR_PROTOCOL) {
1026*c6fd2807SJeff Garzik 		err_mask |= AC_ERR_HSM;
1027*c6fd2807SJeff Garzik 		action |= ATA_EH_SOFTRESET;
1028*c6fd2807SJeff Garzik 	}
1029*c6fd2807SJeff Garzik 	if (serror & SERR_INTERNAL) {
1030*c6fd2807SJeff Garzik 		err_mask |= AC_ERR_SYSTEM;
1031*c6fd2807SJeff Garzik 		action |= ATA_EH_SOFTRESET;
1032*c6fd2807SJeff Garzik 	}
1033*c6fd2807SJeff Garzik 	if (serror & (SERR_PHYRDY_CHG | SERR_DEV_XCHG))
1034*c6fd2807SJeff Garzik 		ata_ehi_hotplugged(&ehc->i);
1035*c6fd2807SJeff Garzik 
1036*c6fd2807SJeff Garzik 	ehc->i.err_mask |= err_mask;
1037*c6fd2807SJeff Garzik 	ehc->i.action |= action;
1038*c6fd2807SJeff Garzik }
1039*c6fd2807SJeff Garzik 
1040*c6fd2807SJeff Garzik /**
1041*c6fd2807SJeff Garzik  *	ata_eh_analyze_ncq_error - analyze NCQ error
1042*c6fd2807SJeff Garzik  *	@ap: ATA port to analyze NCQ error for
1043*c6fd2807SJeff Garzik  *
1044*c6fd2807SJeff Garzik  *	Read log page 10h, determine the offending qc and acquire
1045*c6fd2807SJeff Garzik  *	error status TF.  For NCQ device errors, all LLDDs have to do
1046*c6fd2807SJeff Garzik  *	is setting AC_ERR_DEV in ehi->err_mask.  This function takes
1047*c6fd2807SJeff Garzik  *	care of the rest.
1048*c6fd2807SJeff Garzik  *
1049*c6fd2807SJeff Garzik  *	LOCKING:
1050*c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1051*c6fd2807SJeff Garzik  */
1052*c6fd2807SJeff Garzik static void ata_eh_analyze_ncq_error(struct ata_port *ap)
1053*c6fd2807SJeff Garzik {
1054*c6fd2807SJeff Garzik 	struct ata_eh_context *ehc = &ap->eh_context;
1055*c6fd2807SJeff Garzik 	struct ata_device *dev = ap->device;
1056*c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc;
1057*c6fd2807SJeff Garzik 	struct ata_taskfile tf;
1058*c6fd2807SJeff Garzik 	int tag, rc;
1059*c6fd2807SJeff Garzik 
1060*c6fd2807SJeff Garzik 	/* if frozen, we can't do much */
1061*c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_FROZEN)
1062*c6fd2807SJeff Garzik 		return;
1063*c6fd2807SJeff Garzik 
1064*c6fd2807SJeff Garzik 	/* is it NCQ device error? */
1065*c6fd2807SJeff Garzik 	if (!ap->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
1066*c6fd2807SJeff Garzik 		return;
1067*c6fd2807SJeff Garzik 
1068*c6fd2807SJeff Garzik 	/* has LLDD analyzed already? */
1069*c6fd2807SJeff Garzik 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1070*c6fd2807SJeff Garzik 		qc = __ata_qc_from_tag(ap, tag);
1071*c6fd2807SJeff Garzik 
1072*c6fd2807SJeff Garzik 		if (!(qc->flags & ATA_QCFLAG_FAILED))
1073*c6fd2807SJeff Garzik 			continue;
1074*c6fd2807SJeff Garzik 
1075*c6fd2807SJeff Garzik 		if (qc->err_mask)
1076*c6fd2807SJeff Garzik 			return;
1077*c6fd2807SJeff Garzik 	}
1078*c6fd2807SJeff Garzik 
1079*c6fd2807SJeff Garzik 	/* okay, this error is ours */
1080*c6fd2807SJeff Garzik 	rc = ata_eh_read_log_10h(dev, &tag, &tf);
1081*c6fd2807SJeff Garzik 	if (rc) {
1082*c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_ERR, "failed to read log page 10h "
1083*c6fd2807SJeff Garzik 				"(errno=%d)\n", rc);
1084*c6fd2807SJeff Garzik 		return;
1085*c6fd2807SJeff Garzik 	}
1086*c6fd2807SJeff Garzik 
1087*c6fd2807SJeff Garzik 	if (!(ap->sactive & (1 << tag))) {
1088*c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_ERR, "log page 10h reported "
1089*c6fd2807SJeff Garzik 				"inactive tag %d\n", tag);
1090*c6fd2807SJeff Garzik 		return;
1091*c6fd2807SJeff Garzik 	}
1092*c6fd2807SJeff Garzik 
1093*c6fd2807SJeff Garzik 	/* we've got the perpetrator, condemn it */
1094*c6fd2807SJeff Garzik 	qc = __ata_qc_from_tag(ap, tag);
1095*c6fd2807SJeff Garzik 	memcpy(&qc->result_tf, &tf, sizeof(tf));
1096*c6fd2807SJeff Garzik 	qc->err_mask |= AC_ERR_DEV;
1097*c6fd2807SJeff Garzik 	ehc->i.err_mask &= ~AC_ERR_DEV;
1098*c6fd2807SJeff Garzik }
1099*c6fd2807SJeff Garzik 
1100*c6fd2807SJeff Garzik /**
1101*c6fd2807SJeff Garzik  *	ata_eh_analyze_tf - analyze taskfile of a failed qc
1102*c6fd2807SJeff Garzik  *	@qc: qc to analyze
1103*c6fd2807SJeff Garzik  *	@tf: Taskfile registers to analyze
1104*c6fd2807SJeff Garzik  *
1105*c6fd2807SJeff Garzik  *	Analyze taskfile of @qc and further determine cause of
1106*c6fd2807SJeff Garzik  *	failure.  This function also requests ATAPI sense data if
1107*c6fd2807SJeff Garzik  *	avaliable.
1108*c6fd2807SJeff Garzik  *
1109*c6fd2807SJeff Garzik  *	LOCKING:
1110*c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1111*c6fd2807SJeff Garzik  *
1112*c6fd2807SJeff Garzik  *	RETURNS:
1113*c6fd2807SJeff Garzik  *	Determined recovery action
1114*c6fd2807SJeff Garzik  */
1115*c6fd2807SJeff Garzik static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1116*c6fd2807SJeff Garzik 				      const struct ata_taskfile *tf)
1117*c6fd2807SJeff Garzik {
1118*c6fd2807SJeff Garzik 	unsigned int tmp, action = 0;
1119*c6fd2807SJeff Garzik 	u8 stat = tf->command, err = tf->feature;
1120*c6fd2807SJeff Garzik 
1121*c6fd2807SJeff Garzik 	if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1122*c6fd2807SJeff Garzik 		qc->err_mask |= AC_ERR_HSM;
1123*c6fd2807SJeff Garzik 		return ATA_EH_SOFTRESET;
1124*c6fd2807SJeff Garzik 	}
1125*c6fd2807SJeff Garzik 
1126*c6fd2807SJeff Garzik 	if (!(qc->err_mask & AC_ERR_DEV))
1127*c6fd2807SJeff Garzik 		return 0;
1128*c6fd2807SJeff Garzik 
1129*c6fd2807SJeff Garzik 	switch (qc->dev->class) {
1130*c6fd2807SJeff Garzik 	case ATA_DEV_ATA:
1131*c6fd2807SJeff Garzik 		if (err & ATA_ICRC)
1132*c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_ATA_BUS;
1133*c6fd2807SJeff Garzik 		if (err & ATA_UNC)
1134*c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_MEDIA;
1135*c6fd2807SJeff Garzik 		if (err & ATA_IDNF)
1136*c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_INVALID;
1137*c6fd2807SJeff Garzik 		break;
1138*c6fd2807SJeff Garzik 
1139*c6fd2807SJeff Garzik 	case ATA_DEV_ATAPI:
1140*c6fd2807SJeff Garzik 		tmp = atapi_eh_request_sense(qc->dev,
1141*c6fd2807SJeff Garzik 					     qc->scsicmd->sense_buffer);
1142*c6fd2807SJeff Garzik 		if (!tmp) {
1143*c6fd2807SJeff Garzik 			/* ATA_QCFLAG_SENSE_VALID is used to tell
1144*c6fd2807SJeff Garzik 			 * atapi_qc_complete() that sense data is
1145*c6fd2807SJeff Garzik 			 * already valid.
1146*c6fd2807SJeff Garzik 			 *
1147*c6fd2807SJeff Garzik 			 * TODO: interpret sense data and set
1148*c6fd2807SJeff Garzik 			 * appropriate err_mask.
1149*c6fd2807SJeff Garzik 			 */
1150*c6fd2807SJeff Garzik 			qc->flags |= ATA_QCFLAG_SENSE_VALID;
1151*c6fd2807SJeff Garzik 		} else
1152*c6fd2807SJeff Garzik 			qc->err_mask |= tmp;
1153*c6fd2807SJeff Garzik 	}
1154*c6fd2807SJeff Garzik 
1155*c6fd2807SJeff Garzik 	if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
1156*c6fd2807SJeff Garzik 		action |= ATA_EH_SOFTRESET;
1157*c6fd2807SJeff Garzik 
1158*c6fd2807SJeff Garzik 	return action;
1159*c6fd2807SJeff Garzik }
1160*c6fd2807SJeff Garzik 
1161*c6fd2807SJeff Garzik static int ata_eh_categorize_ering_entry(struct ata_ering_entry *ent)
1162*c6fd2807SJeff Garzik {
1163*c6fd2807SJeff Garzik 	if (ent->err_mask & (AC_ERR_ATA_BUS | AC_ERR_TIMEOUT))
1164*c6fd2807SJeff Garzik 		return 1;
1165*c6fd2807SJeff Garzik 
1166*c6fd2807SJeff Garzik 	if (ent->is_io) {
1167*c6fd2807SJeff Garzik 		if (ent->err_mask & AC_ERR_HSM)
1168*c6fd2807SJeff Garzik 			return 1;
1169*c6fd2807SJeff Garzik 		if ((ent->err_mask &
1170*c6fd2807SJeff Garzik 		     (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
1171*c6fd2807SJeff Garzik 			return 2;
1172*c6fd2807SJeff Garzik 	}
1173*c6fd2807SJeff Garzik 
1174*c6fd2807SJeff Garzik 	return 0;
1175*c6fd2807SJeff Garzik }
1176*c6fd2807SJeff Garzik 
1177*c6fd2807SJeff Garzik struct speed_down_needed_arg {
1178*c6fd2807SJeff Garzik 	u64 since;
1179*c6fd2807SJeff Garzik 	int nr_errors[3];
1180*c6fd2807SJeff Garzik };
1181*c6fd2807SJeff Garzik 
1182*c6fd2807SJeff Garzik static int speed_down_needed_cb(struct ata_ering_entry *ent, void *void_arg)
1183*c6fd2807SJeff Garzik {
1184*c6fd2807SJeff Garzik 	struct speed_down_needed_arg *arg = void_arg;
1185*c6fd2807SJeff Garzik 
1186*c6fd2807SJeff Garzik 	if (ent->timestamp < arg->since)
1187*c6fd2807SJeff Garzik 		return -1;
1188*c6fd2807SJeff Garzik 
1189*c6fd2807SJeff Garzik 	arg->nr_errors[ata_eh_categorize_ering_entry(ent)]++;
1190*c6fd2807SJeff Garzik 	return 0;
1191*c6fd2807SJeff Garzik }
1192*c6fd2807SJeff Garzik 
1193*c6fd2807SJeff Garzik /**
1194*c6fd2807SJeff Garzik  *	ata_eh_speed_down_needed - Determine wheter speed down is necessary
1195*c6fd2807SJeff Garzik  *	@dev: Device of interest
1196*c6fd2807SJeff Garzik  *
1197*c6fd2807SJeff Garzik  *	This function examines error ring of @dev and determines
1198*c6fd2807SJeff Garzik  *	whether speed down is necessary.  Speed down is necessary if
1199*c6fd2807SJeff Garzik  *	there have been more than 3 of Cat-1 errors or 10 of Cat-2
1200*c6fd2807SJeff Garzik  *	errors during last 15 minutes.
1201*c6fd2807SJeff Garzik  *
1202*c6fd2807SJeff Garzik  *	Cat-1 errors are ATA_BUS, TIMEOUT for any command and HSM
1203*c6fd2807SJeff Garzik  *	violation for known supported commands.
1204*c6fd2807SJeff Garzik  *
1205*c6fd2807SJeff Garzik  *	Cat-2 errors are unclassified DEV error for known supported
1206*c6fd2807SJeff Garzik  *	command.
1207*c6fd2807SJeff Garzik  *
1208*c6fd2807SJeff Garzik  *	LOCKING:
1209*c6fd2807SJeff Garzik  *	Inherited from caller.
1210*c6fd2807SJeff Garzik  *
1211*c6fd2807SJeff Garzik  *	RETURNS:
1212*c6fd2807SJeff Garzik  *	1 if speed down is necessary, 0 otherwise
1213*c6fd2807SJeff Garzik  */
1214*c6fd2807SJeff Garzik static int ata_eh_speed_down_needed(struct ata_device *dev)
1215*c6fd2807SJeff Garzik {
1216*c6fd2807SJeff Garzik 	const u64 interval = 15LLU * 60 * HZ;
1217*c6fd2807SJeff Garzik 	static const int err_limits[3] = { -1, 3, 10 };
1218*c6fd2807SJeff Garzik 	struct speed_down_needed_arg arg;
1219*c6fd2807SJeff Garzik 	struct ata_ering_entry *ent;
1220*c6fd2807SJeff Garzik 	int err_cat;
1221*c6fd2807SJeff Garzik 	u64 j64;
1222*c6fd2807SJeff Garzik 
1223*c6fd2807SJeff Garzik 	ent = ata_ering_top(&dev->ering);
1224*c6fd2807SJeff Garzik 	if (!ent)
1225*c6fd2807SJeff Garzik 		return 0;
1226*c6fd2807SJeff Garzik 
1227*c6fd2807SJeff Garzik 	err_cat = ata_eh_categorize_ering_entry(ent);
1228*c6fd2807SJeff Garzik 	if (err_cat == 0)
1229*c6fd2807SJeff Garzik 		return 0;
1230*c6fd2807SJeff Garzik 
1231*c6fd2807SJeff Garzik 	memset(&arg, 0, sizeof(arg));
1232*c6fd2807SJeff Garzik 
1233*c6fd2807SJeff Garzik 	j64 = get_jiffies_64();
1234*c6fd2807SJeff Garzik 	if (j64 >= interval)
1235*c6fd2807SJeff Garzik 		arg.since = j64 - interval;
1236*c6fd2807SJeff Garzik 	else
1237*c6fd2807SJeff Garzik 		arg.since = 0;
1238*c6fd2807SJeff Garzik 
1239*c6fd2807SJeff Garzik 	ata_ering_map(&dev->ering, speed_down_needed_cb, &arg);
1240*c6fd2807SJeff Garzik 
1241*c6fd2807SJeff Garzik 	return arg.nr_errors[err_cat] > err_limits[err_cat];
1242*c6fd2807SJeff Garzik }
1243*c6fd2807SJeff Garzik 
1244*c6fd2807SJeff Garzik /**
1245*c6fd2807SJeff Garzik  *	ata_eh_speed_down - record error and speed down if necessary
1246*c6fd2807SJeff Garzik  *	@dev: Failed device
1247*c6fd2807SJeff Garzik  *	@is_io: Did the device fail during normal IO?
1248*c6fd2807SJeff Garzik  *	@err_mask: err_mask of the error
1249*c6fd2807SJeff Garzik  *
1250*c6fd2807SJeff Garzik  *	Record error and examine error history to determine whether
1251*c6fd2807SJeff Garzik  *	adjusting transmission speed is necessary.  It also sets
1252*c6fd2807SJeff Garzik  *	transmission limits appropriately if such adjustment is
1253*c6fd2807SJeff Garzik  *	necessary.
1254*c6fd2807SJeff Garzik  *
1255*c6fd2807SJeff Garzik  *	LOCKING:
1256*c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1257*c6fd2807SJeff Garzik  *
1258*c6fd2807SJeff Garzik  *	RETURNS:
1259*c6fd2807SJeff Garzik  *	0 on success, -errno otherwise
1260*c6fd2807SJeff Garzik  */
1261*c6fd2807SJeff Garzik static int ata_eh_speed_down(struct ata_device *dev, int is_io,
1262*c6fd2807SJeff Garzik 			     unsigned int err_mask)
1263*c6fd2807SJeff Garzik {
1264*c6fd2807SJeff Garzik 	if (!err_mask)
1265*c6fd2807SJeff Garzik 		return 0;
1266*c6fd2807SJeff Garzik 
1267*c6fd2807SJeff Garzik 	/* record error and determine whether speed down is necessary */
1268*c6fd2807SJeff Garzik 	ata_ering_record(&dev->ering, is_io, err_mask);
1269*c6fd2807SJeff Garzik 
1270*c6fd2807SJeff Garzik 	if (!ata_eh_speed_down_needed(dev))
1271*c6fd2807SJeff Garzik 		return 0;
1272*c6fd2807SJeff Garzik 
1273*c6fd2807SJeff Garzik 	/* speed down SATA link speed if possible */
1274*c6fd2807SJeff Garzik 	if (sata_down_spd_limit(dev->ap) == 0)
1275*c6fd2807SJeff Garzik 		return ATA_EH_HARDRESET;
1276*c6fd2807SJeff Garzik 
1277*c6fd2807SJeff Garzik 	/* lower transfer mode */
1278*c6fd2807SJeff Garzik 	if (ata_down_xfermask_limit(dev, 0) == 0)
1279*c6fd2807SJeff Garzik 		return ATA_EH_SOFTRESET;
1280*c6fd2807SJeff Garzik 
1281*c6fd2807SJeff Garzik 	ata_dev_printk(dev, KERN_ERR,
1282*c6fd2807SJeff Garzik 		       "speed down requested but no transfer mode left\n");
1283*c6fd2807SJeff Garzik 	return 0;
1284*c6fd2807SJeff Garzik }
1285*c6fd2807SJeff Garzik 
1286*c6fd2807SJeff Garzik /**
1287*c6fd2807SJeff Garzik  *	ata_eh_autopsy - analyze error and determine recovery action
1288*c6fd2807SJeff Garzik  *	@ap: ATA port to perform autopsy on
1289*c6fd2807SJeff Garzik  *
1290*c6fd2807SJeff Garzik  *	Analyze why @ap failed and determine which recovery action is
1291*c6fd2807SJeff Garzik  *	needed.  This function also sets more detailed AC_ERR_* values
1292*c6fd2807SJeff Garzik  *	and fills sense data for ATAPI CHECK SENSE.
1293*c6fd2807SJeff Garzik  *
1294*c6fd2807SJeff Garzik  *	LOCKING:
1295*c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1296*c6fd2807SJeff Garzik  */
1297*c6fd2807SJeff Garzik static void ata_eh_autopsy(struct ata_port *ap)
1298*c6fd2807SJeff Garzik {
1299*c6fd2807SJeff Garzik 	struct ata_eh_context *ehc = &ap->eh_context;
1300*c6fd2807SJeff Garzik 	unsigned int all_err_mask = 0;
1301*c6fd2807SJeff Garzik 	int tag, is_io = 0;
1302*c6fd2807SJeff Garzik 	u32 serror;
1303*c6fd2807SJeff Garzik 	int rc;
1304*c6fd2807SJeff Garzik 
1305*c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
1306*c6fd2807SJeff Garzik 
1307*c6fd2807SJeff Garzik 	if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
1308*c6fd2807SJeff Garzik 		return;
1309*c6fd2807SJeff Garzik 
1310*c6fd2807SJeff Garzik 	/* obtain and analyze SError */
1311*c6fd2807SJeff Garzik 	rc = sata_scr_read(ap, SCR_ERROR, &serror);
1312*c6fd2807SJeff Garzik 	if (rc == 0) {
1313*c6fd2807SJeff Garzik 		ehc->i.serror |= serror;
1314*c6fd2807SJeff Garzik 		ata_eh_analyze_serror(ap);
1315*c6fd2807SJeff Garzik 	} else if (rc != -EOPNOTSUPP)
1316*c6fd2807SJeff Garzik 		ehc->i.action |= ATA_EH_HARDRESET;
1317*c6fd2807SJeff Garzik 
1318*c6fd2807SJeff Garzik 	/* analyze NCQ failure */
1319*c6fd2807SJeff Garzik 	ata_eh_analyze_ncq_error(ap);
1320*c6fd2807SJeff Garzik 
1321*c6fd2807SJeff Garzik 	/* any real error trumps AC_ERR_OTHER */
1322*c6fd2807SJeff Garzik 	if (ehc->i.err_mask & ~AC_ERR_OTHER)
1323*c6fd2807SJeff Garzik 		ehc->i.err_mask &= ~AC_ERR_OTHER;
1324*c6fd2807SJeff Garzik 
1325*c6fd2807SJeff Garzik 	all_err_mask |= ehc->i.err_mask;
1326*c6fd2807SJeff Garzik 
1327*c6fd2807SJeff Garzik 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1328*c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1329*c6fd2807SJeff Garzik 
1330*c6fd2807SJeff Garzik 		if (!(qc->flags & ATA_QCFLAG_FAILED))
1331*c6fd2807SJeff Garzik 			continue;
1332*c6fd2807SJeff Garzik 
1333*c6fd2807SJeff Garzik 		/* inherit upper level err_mask */
1334*c6fd2807SJeff Garzik 		qc->err_mask |= ehc->i.err_mask;
1335*c6fd2807SJeff Garzik 
1336*c6fd2807SJeff Garzik 		/* analyze TF */
1337*c6fd2807SJeff Garzik 		ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
1338*c6fd2807SJeff Garzik 
1339*c6fd2807SJeff Garzik 		/* DEV errors are probably spurious in case of ATA_BUS error */
1340*c6fd2807SJeff Garzik 		if (qc->err_mask & AC_ERR_ATA_BUS)
1341*c6fd2807SJeff Garzik 			qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
1342*c6fd2807SJeff Garzik 					  AC_ERR_INVALID);
1343*c6fd2807SJeff Garzik 
1344*c6fd2807SJeff Garzik 		/* any real error trumps unknown error */
1345*c6fd2807SJeff Garzik 		if (qc->err_mask & ~AC_ERR_OTHER)
1346*c6fd2807SJeff Garzik 			qc->err_mask &= ~AC_ERR_OTHER;
1347*c6fd2807SJeff Garzik 
1348*c6fd2807SJeff Garzik 		/* SENSE_VALID trumps dev/unknown error and revalidation */
1349*c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
1350*c6fd2807SJeff Garzik 			qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
1351*c6fd2807SJeff Garzik 			ehc->i.action &= ~ATA_EH_REVALIDATE;
1352*c6fd2807SJeff Garzik 		}
1353*c6fd2807SJeff Garzik 
1354*c6fd2807SJeff Garzik 		/* accumulate error info */
1355*c6fd2807SJeff Garzik 		ehc->i.dev = qc->dev;
1356*c6fd2807SJeff Garzik 		all_err_mask |= qc->err_mask;
1357*c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_IO)
1358*c6fd2807SJeff Garzik 			is_io = 1;
1359*c6fd2807SJeff Garzik 	}
1360*c6fd2807SJeff Garzik 
1361*c6fd2807SJeff Garzik 	/* enforce default EH actions */
1362*c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_FROZEN ||
1363*c6fd2807SJeff Garzik 	    all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
1364*c6fd2807SJeff Garzik 		ehc->i.action |= ATA_EH_SOFTRESET;
1365*c6fd2807SJeff Garzik 	else if (all_err_mask)
1366*c6fd2807SJeff Garzik 		ehc->i.action |= ATA_EH_REVALIDATE;
1367*c6fd2807SJeff Garzik 
1368*c6fd2807SJeff Garzik 	/* if we have offending qcs and the associated failed device */
1369*c6fd2807SJeff Garzik 	if (ehc->i.dev) {
1370*c6fd2807SJeff Garzik 		/* speed down */
1371*c6fd2807SJeff Garzik 		ehc->i.action |= ata_eh_speed_down(ehc->i.dev, is_io,
1372*c6fd2807SJeff Garzik 						   all_err_mask);
1373*c6fd2807SJeff Garzik 
1374*c6fd2807SJeff Garzik 		/* perform per-dev EH action only on the offending device */
1375*c6fd2807SJeff Garzik 		ehc->i.dev_action[ehc->i.dev->devno] |=
1376*c6fd2807SJeff Garzik 			ehc->i.action & ATA_EH_PERDEV_MASK;
1377*c6fd2807SJeff Garzik 		ehc->i.action &= ~ATA_EH_PERDEV_MASK;
1378*c6fd2807SJeff Garzik 	}
1379*c6fd2807SJeff Garzik 
1380*c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
1381*c6fd2807SJeff Garzik }
1382*c6fd2807SJeff Garzik 
1383*c6fd2807SJeff Garzik /**
1384*c6fd2807SJeff Garzik  *	ata_eh_report - report error handling to user
1385*c6fd2807SJeff Garzik  *	@ap: ATA port EH is going on
1386*c6fd2807SJeff Garzik  *
1387*c6fd2807SJeff Garzik  *	Report EH to user.
1388*c6fd2807SJeff Garzik  *
1389*c6fd2807SJeff Garzik  *	LOCKING:
1390*c6fd2807SJeff Garzik  *	None.
1391*c6fd2807SJeff Garzik  */
1392*c6fd2807SJeff Garzik static void ata_eh_report(struct ata_port *ap)
1393*c6fd2807SJeff Garzik {
1394*c6fd2807SJeff Garzik 	struct ata_eh_context *ehc = &ap->eh_context;
1395*c6fd2807SJeff Garzik 	const char *frozen, *desc;
1396*c6fd2807SJeff Garzik 	int tag, nr_failed = 0;
1397*c6fd2807SJeff Garzik 
1398*c6fd2807SJeff Garzik 	desc = NULL;
1399*c6fd2807SJeff Garzik 	if (ehc->i.desc[0] != '\0')
1400*c6fd2807SJeff Garzik 		desc = ehc->i.desc;
1401*c6fd2807SJeff Garzik 
1402*c6fd2807SJeff Garzik 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1403*c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1404*c6fd2807SJeff Garzik 
1405*c6fd2807SJeff Garzik 		if (!(qc->flags & ATA_QCFLAG_FAILED))
1406*c6fd2807SJeff Garzik 			continue;
1407*c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
1408*c6fd2807SJeff Garzik 			continue;
1409*c6fd2807SJeff Garzik 
1410*c6fd2807SJeff Garzik 		nr_failed++;
1411*c6fd2807SJeff Garzik 	}
1412*c6fd2807SJeff Garzik 
1413*c6fd2807SJeff Garzik 	if (!nr_failed && !ehc->i.err_mask)
1414*c6fd2807SJeff Garzik 		return;
1415*c6fd2807SJeff Garzik 
1416*c6fd2807SJeff Garzik 	frozen = "";
1417*c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_FROZEN)
1418*c6fd2807SJeff Garzik 		frozen = " frozen";
1419*c6fd2807SJeff Garzik 
1420*c6fd2807SJeff Garzik 	if (ehc->i.dev) {
1421*c6fd2807SJeff Garzik 		ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x "
1422*c6fd2807SJeff Garzik 			       "SAct 0x%x SErr 0x%x action 0x%x%s\n",
1423*c6fd2807SJeff Garzik 			       ehc->i.err_mask, ap->sactive, ehc->i.serror,
1424*c6fd2807SJeff Garzik 			       ehc->i.action, frozen);
1425*c6fd2807SJeff Garzik 		if (desc)
1426*c6fd2807SJeff Garzik 			ata_dev_printk(ehc->i.dev, KERN_ERR, "(%s)\n", desc);
1427*c6fd2807SJeff Garzik 	} else {
1428*c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_ERR, "exception Emask 0x%x "
1429*c6fd2807SJeff Garzik 				"SAct 0x%x SErr 0x%x action 0x%x%s\n",
1430*c6fd2807SJeff Garzik 				ehc->i.err_mask, ap->sactive, ehc->i.serror,
1431*c6fd2807SJeff Garzik 				ehc->i.action, frozen);
1432*c6fd2807SJeff Garzik 		if (desc)
1433*c6fd2807SJeff Garzik 			ata_port_printk(ap, KERN_ERR, "(%s)\n", desc);
1434*c6fd2807SJeff Garzik 	}
1435*c6fd2807SJeff Garzik 
1436*c6fd2807SJeff Garzik 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1437*c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1438*c6fd2807SJeff Garzik 
1439*c6fd2807SJeff Garzik 		if (!(qc->flags & ATA_QCFLAG_FAILED) || !qc->err_mask)
1440*c6fd2807SJeff Garzik 			continue;
1441*c6fd2807SJeff Garzik 
1442*c6fd2807SJeff Garzik 		ata_dev_printk(qc->dev, KERN_ERR, "tag %d cmd 0x%x "
1443*c6fd2807SJeff Garzik 			       "Emask 0x%x stat 0x%x err 0x%x (%s)\n",
1444*c6fd2807SJeff Garzik 			       qc->tag, qc->tf.command, qc->err_mask,
1445*c6fd2807SJeff Garzik 			       qc->result_tf.command, qc->result_tf.feature,
1446*c6fd2807SJeff Garzik 			       ata_err_string(qc->err_mask));
1447*c6fd2807SJeff Garzik 	}
1448*c6fd2807SJeff Garzik }
1449*c6fd2807SJeff Garzik 
1450*c6fd2807SJeff Garzik static int ata_do_reset(struct ata_port *ap, ata_reset_fn_t reset,
1451*c6fd2807SJeff Garzik 			unsigned int *classes)
1452*c6fd2807SJeff Garzik {
1453*c6fd2807SJeff Garzik 	int i, rc;
1454*c6fd2807SJeff Garzik 
1455*c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++)
1456*c6fd2807SJeff Garzik 		classes[i] = ATA_DEV_UNKNOWN;
1457*c6fd2807SJeff Garzik 
1458*c6fd2807SJeff Garzik 	rc = reset(ap, classes);
1459*c6fd2807SJeff Garzik 	if (rc)
1460*c6fd2807SJeff Garzik 		return rc;
1461*c6fd2807SJeff Garzik 
1462*c6fd2807SJeff Garzik 	/* If any class isn't ATA_DEV_UNKNOWN, consider classification
1463*c6fd2807SJeff Garzik 	 * is complete and convert all ATA_DEV_UNKNOWN to
1464*c6fd2807SJeff Garzik 	 * ATA_DEV_NONE.
1465*c6fd2807SJeff Garzik 	 */
1466*c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++)
1467*c6fd2807SJeff Garzik 		if (classes[i] != ATA_DEV_UNKNOWN)
1468*c6fd2807SJeff Garzik 			break;
1469*c6fd2807SJeff Garzik 
1470*c6fd2807SJeff Garzik 	if (i < ATA_MAX_DEVICES)
1471*c6fd2807SJeff Garzik 		for (i = 0; i < ATA_MAX_DEVICES; i++)
1472*c6fd2807SJeff Garzik 			if (classes[i] == ATA_DEV_UNKNOWN)
1473*c6fd2807SJeff Garzik 				classes[i] = ATA_DEV_NONE;
1474*c6fd2807SJeff Garzik 
1475*c6fd2807SJeff Garzik 	return 0;
1476*c6fd2807SJeff Garzik }
1477*c6fd2807SJeff Garzik 
1478*c6fd2807SJeff Garzik static int ata_eh_followup_srst_needed(int rc, int classify,
1479*c6fd2807SJeff Garzik 				       const unsigned int *classes)
1480*c6fd2807SJeff Garzik {
1481*c6fd2807SJeff Garzik 	if (rc == -EAGAIN)
1482*c6fd2807SJeff Garzik 		return 1;
1483*c6fd2807SJeff Garzik 	if (rc != 0)
1484*c6fd2807SJeff Garzik 		return 0;
1485*c6fd2807SJeff Garzik 	if (classify && classes[0] == ATA_DEV_UNKNOWN)
1486*c6fd2807SJeff Garzik 		return 1;
1487*c6fd2807SJeff Garzik 	return 0;
1488*c6fd2807SJeff Garzik }
1489*c6fd2807SJeff Garzik 
1490*c6fd2807SJeff Garzik static int ata_eh_reset(struct ata_port *ap, int classify,
1491*c6fd2807SJeff Garzik 			ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
1492*c6fd2807SJeff Garzik 			ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
1493*c6fd2807SJeff Garzik {
1494*c6fd2807SJeff Garzik 	struct ata_eh_context *ehc = &ap->eh_context;
1495*c6fd2807SJeff Garzik 	unsigned int *classes = ehc->classes;
1496*c6fd2807SJeff Garzik 	int tries = ATA_EH_RESET_TRIES;
1497*c6fd2807SJeff Garzik 	int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
1498*c6fd2807SJeff Garzik 	unsigned int action;
1499*c6fd2807SJeff Garzik 	ata_reset_fn_t reset;
1500*c6fd2807SJeff Garzik 	int i, did_followup_srst, rc;
1501*c6fd2807SJeff Garzik 
1502*c6fd2807SJeff Garzik 	/* about to reset */
1503*c6fd2807SJeff Garzik 	ata_eh_about_to_do(ap, NULL, ehc->i.action & ATA_EH_RESET_MASK);
1504*c6fd2807SJeff Garzik 
1505*c6fd2807SJeff Garzik 	/* Determine which reset to use and record in ehc->i.action.
1506*c6fd2807SJeff Garzik 	 * prereset() may examine and modify it.
1507*c6fd2807SJeff Garzik 	 */
1508*c6fd2807SJeff Garzik 	action = ehc->i.action;
1509*c6fd2807SJeff Garzik 	ehc->i.action &= ~ATA_EH_RESET_MASK;
1510*c6fd2807SJeff Garzik 	if (softreset && (!hardreset || (!sata_set_spd_needed(ap) &&
1511*c6fd2807SJeff Garzik 					 !(action & ATA_EH_HARDRESET))))
1512*c6fd2807SJeff Garzik 		ehc->i.action |= ATA_EH_SOFTRESET;
1513*c6fd2807SJeff Garzik 	else
1514*c6fd2807SJeff Garzik 		ehc->i.action |= ATA_EH_HARDRESET;
1515*c6fd2807SJeff Garzik 
1516*c6fd2807SJeff Garzik 	if (prereset) {
1517*c6fd2807SJeff Garzik 		rc = prereset(ap);
1518*c6fd2807SJeff Garzik 		if (rc) {
1519*c6fd2807SJeff Garzik 			ata_port_printk(ap, KERN_ERR,
1520*c6fd2807SJeff Garzik 					"prereset failed (errno=%d)\n", rc);
1521*c6fd2807SJeff Garzik 			return rc;
1522*c6fd2807SJeff Garzik 		}
1523*c6fd2807SJeff Garzik 	}
1524*c6fd2807SJeff Garzik 
1525*c6fd2807SJeff Garzik 	/* prereset() might have modified ehc->i.action */
1526*c6fd2807SJeff Garzik 	if (ehc->i.action & ATA_EH_HARDRESET)
1527*c6fd2807SJeff Garzik 		reset = hardreset;
1528*c6fd2807SJeff Garzik 	else if (ehc->i.action & ATA_EH_SOFTRESET)
1529*c6fd2807SJeff Garzik 		reset = softreset;
1530*c6fd2807SJeff Garzik 	else {
1531*c6fd2807SJeff Garzik 		/* prereset told us not to reset, bang classes and return */
1532*c6fd2807SJeff Garzik 		for (i = 0; i < ATA_MAX_DEVICES; i++)
1533*c6fd2807SJeff Garzik 			classes[i] = ATA_DEV_NONE;
1534*c6fd2807SJeff Garzik 		return 0;
1535*c6fd2807SJeff Garzik 	}
1536*c6fd2807SJeff Garzik 
1537*c6fd2807SJeff Garzik 	/* did prereset() screw up?  if so, fix up to avoid oopsing */
1538*c6fd2807SJeff Garzik 	if (!reset) {
1539*c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_ERR, "BUG: prereset() requested "
1540*c6fd2807SJeff Garzik 				"invalid reset type\n");
1541*c6fd2807SJeff Garzik 		if (softreset)
1542*c6fd2807SJeff Garzik 			reset = softreset;
1543*c6fd2807SJeff Garzik 		else
1544*c6fd2807SJeff Garzik 			reset = hardreset;
1545*c6fd2807SJeff Garzik 	}
1546*c6fd2807SJeff Garzik 
1547*c6fd2807SJeff Garzik  retry:
1548*c6fd2807SJeff Garzik 	/* shut up during boot probing */
1549*c6fd2807SJeff Garzik 	if (verbose)
1550*c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_INFO, "%s resetting port\n",
1551*c6fd2807SJeff Garzik 				reset == softreset ? "soft" : "hard");
1552*c6fd2807SJeff Garzik 
1553*c6fd2807SJeff Garzik 	/* mark that this EH session started with reset */
1554*c6fd2807SJeff Garzik 	ehc->i.flags |= ATA_EHI_DID_RESET;
1555*c6fd2807SJeff Garzik 
1556*c6fd2807SJeff Garzik 	rc = ata_do_reset(ap, reset, classes);
1557*c6fd2807SJeff Garzik 
1558*c6fd2807SJeff Garzik 	did_followup_srst = 0;
1559*c6fd2807SJeff Garzik 	if (reset == hardreset &&
1560*c6fd2807SJeff Garzik 	    ata_eh_followup_srst_needed(rc, classify, classes)) {
1561*c6fd2807SJeff Garzik 		/* okay, let's do follow-up softreset */
1562*c6fd2807SJeff Garzik 		did_followup_srst = 1;
1563*c6fd2807SJeff Garzik 		reset = softreset;
1564*c6fd2807SJeff Garzik 
1565*c6fd2807SJeff Garzik 		if (!reset) {
1566*c6fd2807SJeff Garzik 			ata_port_printk(ap, KERN_ERR,
1567*c6fd2807SJeff Garzik 					"follow-up softreset required "
1568*c6fd2807SJeff Garzik 					"but no softreset avaliable\n");
1569*c6fd2807SJeff Garzik 			return -EINVAL;
1570*c6fd2807SJeff Garzik 		}
1571*c6fd2807SJeff Garzik 
1572*c6fd2807SJeff Garzik 		ata_eh_about_to_do(ap, NULL, ATA_EH_RESET_MASK);
1573*c6fd2807SJeff Garzik 		rc = ata_do_reset(ap, reset, classes);
1574*c6fd2807SJeff Garzik 
1575*c6fd2807SJeff Garzik 		if (rc == 0 && classify &&
1576*c6fd2807SJeff Garzik 		    classes[0] == ATA_DEV_UNKNOWN) {
1577*c6fd2807SJeff Garzik 			ata_port_printk(ap, KERN_ERR,
1578*c6fd2807SJeff Garzik 					"classification failed\n");
1579*c6fd2807SJeff Garzik 			return -EINVAL;
1580*c6fd2807SJeff Garzik 		}
1581*c6fd2807SJeff Garzik 	}
1582*c6fd2807SJeff Garzik 
1583*c6fd2807SJeff Garzik 	if (rc && --tries) {
1584*c6fd2807SJeff Garzik 		const char *type;
1585*c6fd2807SJeff Garzik 
1586*c6fd2807SJeff Garzik 		if (reset == softreset) {
1587*c6fd2807SJeff Garzik 			if (did_followup_srst)
1588*c6fd2807SJeff Garzik 				type = "follow-up soft";
1589*c6fd2807SJeff Garzik 			else
1590*c6fd2807SJeff Garzik 				type = "soft";
1591*c6fd2807SJeff Garzik 		} else
1592*c6fd2807SJeff Garzik 			type = "hard";
1593*c6fd2807SJeff Garzik 
1594*c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_WARNING,
1595*c6fd2807SJeff Garzik 				"%sreset failed, retrying in 5 secs\n", type);
1596*c6fd2807SJeff Garzik 		ssleep(5);
1597*c6fd2807SJeff Garzik 
1598*c6fd2807SJeff Garzik 		if (reset == hardreset)
1599*c6fd2807SJeff Garzik 			sata_down_spd_limit(ap);
1600*c6fd2807SJeff Garzik 		if (hardreset)
1601*c6fd2807SJeff Garzik 			reset = hardreset;
1602*c6fd2807SJeff Garzik 		goto retry;
1603*c6fd2807SJeff Garzik 	}
1604*c6fd2807SJeff Garzik 
1605*c6fd2807SJeff Garzik 	if (rc == 0) {
1606*c6fd2807SJeff Garzik 		/* After the reset, the device state is PIO 0 and the
1607*c6fd2807SJeff Garzik 		 * controller state is undefined.  Record the mode.
1608*c6fd2807SJeff Garzik 		 */
1609*c6fd2807SJeff Garzik 		for (i = 0; i < ATA_MAX_DEVICES; i++)
1610*c6fd2807SJeff Garzik 			ap->device[i].pio_mode = XFER_PIO_0;
1611*c6fd2807SJeff Garzik 
1612*c6fd2807SJeff Garzik 		if (postreset)
1613*c6fd2807SJeff Garzik 			postreset(ap, classes);
1614*c6fd2807SJeff Garzik 
1615*c6fd2807SJeff Garzik 		/* reset successful, schedule revalidation */
1616*c6fd2807SJeff Garzik 		ata_eh_done(ap, NULL, ehc->i.action & ATA_EH_RESET_MASK);
1617*c6fd2807SJeff Garzik 		ehc->i.action |= ATA_EH_REVALIDATE;
1618*c6fd2807SJeff Garzik 	}
1619*c6fd2807SJeff Garzik 
1620*c6fd2807SJeff Garzik 	return rc;
1621*c6fd2807SJeff Garzik }
1622*c6fd2807SJeff Garzik 
1623*c6fd2807SJeff Garzik static int ata_eh_revalidate_and_attach(struct ata_port *ap,
1624*c6fd2807SJeff Garzik 					struct ata_device **r_failed_dev)
1625*c6fd2807SJeff Garzik {
1626*c6fd2807SJeff Garzik 	struct ata_eh_context *ehc = &ap->eh_context;
1627*c6fd2807SJeff Garzik 	struct ata_device *dev;
1628*c6fd2807SJeff Garzik 	unsigned long flags;
1629*c6fd2807SJeff Garzik 	int i, rc = 0;
1630*c6fd2807SJeff Garzik 
1631*c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
1632*c6fd2807SJeff Garzik 
1633*c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
1634*c6fd2807SJeff Garzik 		unsigned int action;
1635*c6fd2807SJeff Garzik 
1636*c6fd2807SJeff Garzik 		dev = &ap->device[i];
1637*c6fd2807SJeff Garzik 		action = ata_eh_dev_action(dev);
1638*c6fd2807SJeff Garzik 
1639*c6fd2807SJeff Garzik 		if (action & ATA_EH_REVALIDATE && ata_dev_ready(dev)) {
1640*c6fd2807SJeff Garzik 			if (ata_port_offline(ap)) {
1641*c6fd2807SJeff Garzik 				rc = -EIO;
1642*c6fd2807SJeff Garzik 				break;
1643*c6fd2807SJeff Garzik 			}
1644*c6fd2807SJeff Garzik 
1645*c6fd2807SJeff Garzik 			ata_eh_about_to_do(ap, dev, ATA_EH_REVALIDATE);
1646*c6fd2807SJeff Garzik 			rc = ata_dev_revalidate(dev,
1647*c6fd2807SJeff Garzik 					ehc->i.flags & ATA_EHI_DID_RESET);
1648*c6fd2807SJeff Garzik 			if (rc)
1649*c6fd2807SJeff Garzik 				break;
1650*c6fd2807SJeff Garzik 
1651*c6fd2807SJeff Garzik 			ata_eh_done(ap, dev, ATA_EH_REVALIDATE);
1652*c6fd2807SJeff Garzik 
1653*c6fd2807SJeff Garzik 			/* schedule the scsi_rescan_device() here */
1654*c6fd2807SJeff Garzik 			queue_work(ata_aux_wq, &(ap->scsi_rescan_task));
1655*c6fd2807SJeff Garzik 		} else if (dev->class == ATA_DEV_UNKNOWN &&
1656*c6fd2807SJeff Garzik 			   ehc->tries[dev->devno] &&
1657*c6fd2807SJeff Garzik 			   ata_class_enabled(ehc->classes[dev->devno])) {
1658*c6fd2807SJeff Garzik 			dev->class = ehc->classes[dev->devno];
1659*c6fd2807SJeff Garzik 
1660*c6fd2807SJeff Garzik 			rc = ata_dev_read_id(dev, &dev->class, 1, dev->id);
1661*c6fd2807SJeff Garzik 			if (rc == 0)
1662*c6fd2807SJeff Garzik 				rc = ata_dev_configure(dev, 1);
1663*c6fd2807SJeff Garzik 
1664*c6fd2807SJeff Garzik 			if (rc) {
1665*c6fd2807SJeff Garzik 				dev->class = ATA_DEV_UNKNOWN;
1666*c6fd2807SJeff Garzik 				break;
1667*c6fd2807SJeff Garzik 			}
1668*c6fd2807SJeff Garzik 
1669*c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
1670*c6fd2807SJeff Garzik 			ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
1671*c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
1672*c6fd2807SJeff Garzik 		}
1673*c6fd2807SJeff Garzik 	}
1674*c6fd2807SJeff Garzik 
1675*c6fd2807SJeff Garzik 	if (rc)
1676*c6fd2807SJeff Garzik 		*r_failed_dev = dev;
1677*c6fd2807SJeff Garzik 
1678*c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
1679*c6fd2807SJeff Garzik 	return rc;
1680*c6fd2807SJeff Garzik }
1681*c6fd2807SJeff Garzik 
1682*c6fd2807SJeff Garzik /**
1683*c6fd2807SJeff Garzik  *	ata_eh_suspend - handle suspend EH action
1684*c6fd2807SJeff Garzik  *	@ap: target host port
1685*c6fd2807SJeff Garzik  *	@r_failed_dev: result parameter to indicate failing device
1686*c6fd2807SJeff Garzik  *
1687*c6fd2807SJeff Garzik  *	Handle suspend EH action.  Disk devices are spinned down and
1688*c6fd2807SJeff Garzik  *	other types of devices are just marked suspended.  Once
1689*c6fd2807SJeff Garzik  *	suspended, no EH action to the device is allowed until it is
1690*c6fd2807SJeff Garzik  *	resumed.
1691*c6fd2807SJeff Garzik  *
1692*c6fd2807SJeff Garzik  *	LOCKING:
1693*c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1694*c6fd2807SJeff Garzik  *
1695*c6fd2807SJeff Garzik  *	RETURNS:
1696*c6fd2807SJeff Garzik  *	0 on success, -errno otherwise
1697*c6fd2807SJeff Garzik  */
1698*c6fd2807SJeff Garzik static int ata_eh_suspend(struct ata_port *ap, struct ata_device **r_failed_dev)
1699*c6fd2807SJeff Garzik {
1700*c6fd2807SJeff Garzik 	struct ata_device *dev;
1701*c6fd2807SJeff Garzik 	int i, rc = 0;
1702*c6fd2807SJeff Garzik 
1703*c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
1704*c6fd2807SJeff Garzik 
1705*c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
1706*c6fd2807SJeff Garzik 		unsigned long flags;
1707*c6fd2807SJeff Garzik 		unsigned int action, err_mask;
1708*c6fd2807SJeff Garzik 
1709*c6fd2807SJeff Garzik 		dev = &ap->device[i];
1710*c6fd2807SJeff Garzik 		action = ata_eh_dev_action(dev);
1711*c6fd2807SJeff Garzik 
1712*c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev) || !(action & ATA_EH_SUSPEND))
1713*c6fd2807SJeff Garzik 			continue;
1714*c6fd2807SJeff Garzik 
1715*c6fd2807SJeff Garzik 		WARN_ON(dev->flags & ATA_DFLAG_SUSPENDED);
1716*c6fd2807SJeff Garzik 
1717*c6fd2807SJeff Garzik 		ata_eh_about_to_do(ap, dev, ATA_EH_SUSPEND);
1718*c6fd2807SJeff Garzik 
1719*c6fd2807SJeff Garzik 		if (dev->class == ATA_DEV_ATA && !(action & ATA_EH_PM_FREEZE)) {
1720*c6fd2807SJeff Garzik 			/* flush cache */
1721*c6fd2807SJeff Garzik 			rc = ata_flush_cache(dev);
1722*c6fd2807SJeff Garzik 			if (rc)
1723*c6fd2807SJeff Garzik 				break;
1724*c6fd2807SJeff Garzik 
1725*c6fd2807SJeff Garzik 			/* spin down */
1726*c6fd2807SJeff Garzik 			err_mask = ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1);
1727*c6fd2807SJeff Garzik 			if (err_mask) {
1728*c6fd2807SJeff Garzik 				ata_dev_printk(dev, KERN_ERR, "failed to "
1729*c6fd2807SJeff Garzik 					       "spin down (err_mask=0x%x)\n",
1730*c6fd2807SJeff Garzik 					       err_mask);
1731*c6fd2807SJeff Garzik 				rc = -EIO;
1732*c6fd2807SJeff Garzik 				break;
1733*c6fd2807SJeff Garzik 			}
1734*c6fd2807SJeff Garzik 		}
1735*c6fd2807SJeff Garzik 
1736*c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
1737*c6fd2807SJeff Garzik 		dev->flags |= ATA_DFLAG_SUSPENDED;
1738*c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
1739*c6fd2807SJeff Garzik 
1740*c6fd2807SJeff Garzik 		ata_eh_done(ap, dev, ATA_EH_SUSPEND);
1741*c6fd2807SJeff Garzik 	}
1742*c6fd2807SJeff Garzik 
1743*c6fd2807SJeff Garzik 	if (rc)
1744*c6fd2807SJeff Garzik 		*r_failed_dev = dev;
1745*c6fd2807SJeff Garzik 
1746*c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
1747*c6fd2807SJeff Garzik 	return 0;
1748*c6fd2807SJeff Garzik }
1749*c6fd2807SJeff Garzik 
1750*c6fd2807SJeff Garzik /**
1751*c6fd2807SJeff Garzik  *	ata_eh_prep_resume - prep for resume EH action
1752*c6fd2807SJeff Garzik  *	@ap: target host port
1753*c6fd2807SJeff Garzik  *
1754*c6fd2807SJeff Garzik  *	Clear SUSPENDED in preparation for scheduled resume actions.
1755*c6fd2807SJeff Garzik  *	This allows other parts of EH to access the devices being
1756*c6fd2807SJeff Garzik  *	resumed.
1757*c6fd2807SJeff Garzik  *
1758*c6fd2807SJeff Garzik  *	LOCKING:
1759*c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1760*c6fd2807SJeff Garzik  */
1761*c6fd2807SJeff Garzik static void ata_eh_prep_resume(struct ata_port *ap)
1762*c6fd2807SJeff Garzik {
1763*c6fd2807SJeff Garzik 	struct ata_device *dev;
1764*c6fd2807SJeff Garzik 	unsigned long flags;
1765*c6fd2807SJeff Garzik 	int i;
1766*c6fd2807SJeff Garzik 
1767*c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
1768*c6fd2807SJeff Garzik 
1769*c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
1770*c6fd2807SJeff Garzik 		unsigned int action;
1771*c6fd2807SJeff Garzik 
1772*c6fd2807SJeff Garzik 		dev = &ap->device[i];
1773*c6fd2807SJeff Garzik 		action = ata_eh_dev_action(dev);
1774*c6fd2807SJeff Garzik 
1775*c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev) || !(action & ATA_EH_RESUME))
1776*c6fd2807SJeff Garzik 			continue;
1777*c6fd2807SJeff Garzik 
1778*c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
1779*c6fd2807SJeff Garzik 		dev->flags &= ~ATA_DFLAG_SUSPENDED;
1780*c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
1781*c6fd2807SJeff Garzik 	}
1782*c6fd2807SJeff Garzik 
1783*c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
1784*c6fd2807SJeff Garzik }
1785*c6fd2807SJeff Garzik 
1786*c6fd2807SJeff Garzik /**
1787*c6fd2807SJeff Garzik  *	ata_eh_resume - handle resume EH action
1788*c6fd2807SJeff Garzik  *	@ap: target host port
1789*c6fd2807SJeff Garzik  *	@r_failed_dev: result parameter to indicate failing device
1790*c6fd2807SJeff Garzik  *
1791*c6fd2807SJeff Garzik  *	Handle resume EH action.  Target devices are already reset and
1792*c6fd2807SJeff Garzik  *	revalidated.  Spinning up is the only operation left.
1793*c6fd2807SJeff Garzik  *
1794*c6fd2807SJeff Garzik  *	LOCKING:
1795*c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1796*c6fd2807SJeff Garzik  *
1797*c6fd2807SJeff Garzik  *	RETURNS:
1798*c6fd2807SJeff Garzik  *	0 on success, -errno otherwise
1799*c6fd2807SJeff Garzik  */
1800*c6fd2807SJeff Garzik static int ata_eh_resume(struct ata_port *ap, struct ata_device **r_failed_dev)
1801*c6fd2807SJeff Garzik {
1802*c6fd2807SJeff Garzik 	struct ata_device *dev;
1803*c6fd2807SJeff Garzik 	int i, rc = 0;
1804*c6fd2807SJeff Garzik 
1805*c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
1806*c6fd2807SJeff Garzik 
1807*c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
1808*c6fd2807SJeff Garzik 		unsigned int action, err_mask;
1809*c6fd2807SJeff Garzik 
1810*c6fd2807SJeff Garzik 		dev = &ap->device[i];
1811*c6fd2807SJeff Garzik 		action = ata_eh_dev_action(dev);
1812*c6fd2807SJeff Garzik 
1813*c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev) || !(action & ATA_EH_RESUME))
1814*c6fd2807SJeff Garzik 			continue;
1815*c6fd2807SJeff Garzik 
1816*c6fd2807SJeff Garzik 		ata_eh_about_to_do(ap, dev, ATA_EH_RESUME);
1817*c6fd2807SJeff Garzik 
1818*c6fd2807SJeff Garzik 		if (dev->class == ATA_DEV_ATA && !(action & ATA_EH_PM_FREEZE)) {
1819*c6fd2807SJeff Garzik 			err_mask = ata_do_simple_cmd(dev,
1820*c6fd2807SJeff Garzik 						     ATA_CMD_IDLEIMMEDIATE);
1821*c6fd2807SJeff Garzik 			if (err_mask) {
1822*c6fd2807SJeff Garzik 				ata_dev_printk(dev, KERN_ERR, "failed to "
1823*c6fd2807SJeff Garzik 					       "spin up (err_mask=0x%x)\n",
1824*c6fd2807SJeff Garzik 					       err_mask);
1825*c6fd2807SJeff Garzik 				rc = -EIO;
1826*c6fd2807SJeff Garzik 				break;
1827*c6fd2807SJeff Garzik 			}
1828*c6fd2807SJeff Garzik 		}
1829*c6fd2807SJeff Garzik 
1830*c6fd2807SJeff Garzik 		ata_eh_done(ap, dev, ATA_EH_RESUME);
1831*c6fd2807SJeff Garzik 	}
1832*c6fd2807SJeff Garzik 
1833*c6fd2807SJeff Garzik 	if (rc)
1834*c6fd2807SJeff Garzik 		*r_failed_dev = dev;
1835*c6fd2807SJeff Garzik 
1836*c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
1837*c6fd2807SJeff Garzik 	return 0;
1838*c6fd2807SJeff Garzik }
1839*c6fd2807SJeff Garzik 
1840*c6fd2807SJeff Garzik static int ata_port_nr_enabled(struct ata_port *ap)
1841*c6fd2807SJeff Garzik {
1842*c6fd2807SJeff Garzik 	int i, cnt = 0;
1843*c6fd2807SJeff Garzik 
1844*c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++)
1845*c6fd2807SJeff Garzik 		if (ata_dev_enabled(&ap->device[i]))
1846*c6fd2807SJeff Garzik 			cnt++;
1847*c6fd2807SJeff Garzik 	return cnt;
1848*c6fd2807SJeff Garzik }
1849*c6fd2807SJeff Garzik 
1850*c6fd2807SJeff Garzik static int ata_port_nr_vacant(struct ata_port *ap)
1851*c6fd2807SJeff Garzik {
1852*c6fd2807SJeff Garzik 	int i, cnt = 0;
1853*c6fd2807SJeff Garzik 
1854*c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++)
1855*c6fd2807SJeff Garzik 		if (ap->device[i].class == ATA_DEV_UNKNOWN)
1856*c6fd2807SJeff Garzik 			cnt++;
1857*c6fd2807SJeff Garzik 	return cnt;
1858*c6fd2807SJeff Garzik }
1859*c6fd2807SJeff Garzik 
1860*c6fd2807SJeff Garzik static int ata_eh_skip_recovery(struct ata_port *ap)
1861*c6fd2807SJeff Garzik {
1862*c6fd2807SJeff Garzik 	struct ata_eh_context *ehc = &ap->eh_context;
1863*c6fd2807SJeff Garzik 	int i;
1864*c6fd2807SJeff Garzik 
1865*c6fd2807SJeff Garzik 	/* skip if all possible devices are suspended */
1866*c6fd2807SJeff Garzik 	for (i = 0; i < ata_port_max_devices(ap); i++) {
1867*c6fd2807SJeff Garzik 		struct ata_device *dev = &ap->device[i];
1868*c6fd2807SJeff Garzik 
1869*c6fd2807SJeff Garzik 		if (!(dev->flags & ATA_DFLAG_SUSPENDED))
1870*c6fd2807SJeff Garzik 			break;
1871*c6fd2807SJeff Garzik 	}
1872*c6fd2807SJeff Garzik 
1873*c6fd2807SJeff Garzik 	if (i == ata_port_max_devices(ap))
1874*c6fd2807SJeff Garzik 		return 1;
1875*c6fd2807SJeff Garzik 
1876*c6fd2807SJeff Garzik 	/* thaw frozen port, resume link and recover failed devices */
1877*c6fd2807SJeff Garzik 	if ((ap->pflags & ATA_PFLAG_FROZEN) ||
1878*c6fd2807SJeff Garzik 	    (ehc->i.flags & ATA_EHI_RESUME_LINK) || ata_port_nr_enabled(ap))
1879*c6fd2807SJeff Garzik 		return 0;
1880*c6fd2807SJeff Garzik 
1881*c6fd2807SJeff Garzik 	/* skip if class codes for all vacant slots are ATA_DEV_NONE */
1882*c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
1883*c6fd2807SJeff Garzik 		struct ata_device *dev = &ap->device[i];
1884*c6fd2807SJeff Garzik 
1885*c6fd2807SJeff Garzik 		if (dev->class == ATA_DEV_UNKNOWN &&
1886*c6fd2807SJeff Garzik 		    ehc->classes[dev->devno] != ATA_DEV_NONE)
1887*c6fd2807SJeff Garzik 			return 0;
1888*c6fd2807SJeff Garzik 	}
1889*c6fd2807SJeff Garzik 
1890*c6fd2807SJeff Garzik 	return 1;
1891*c6fd2807SJeff Garzik }
1892*c6fd2807SJeff Garzik 
1893*c6fd2807SJeff Garzik /**
1894*c6fd2807SJeff Garzik  *	ata_eh_recover - recover host port after error
1895*c6fd2807SJeff Garzik  *	@ap: host port to recover
1896*c6fd2807SJeff Garzik  *	@prereset: prereset method (can be NULL)
1897*c6fd2807SJeff Garzik  *	@softreset: softreset method (can be NULL)
1898*c6fd2807SJeff Garzik  *	@hardreset: hardreset method (can be NULL)
1899*c6fd2807SJeff Garzik  *	@postreset: postreset method (can be NULL)
1900*c6fd2807SJeff Garzik  *
1901*c6fd2807SJeff Garzik  *	This is the alpha and omega, eum and yang, heart and soul of
1902*c6fd2807SJeff Garzik  *	libata exception handling.  On entry, actions required to
1903*c6fd2807SJeff Garzik  *	recover the port and hotplug requests are recorded in
1904*c6fd2807SJeff Garzik  *	eh_context.  This function executes all the operations with
1905*c6fd2807SJeff Garzik  *	appropriate retrials and fallbacks to resurrect failed
1906*c6fd2807SJeff Garzik  *	devices, detach goners and greet newcomers.
1907*c6fd2807SJeff Garzik  *
1908*c6fd2807SJeff Garzik  *	LOCKING:
1909*c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1910*c6fd2807SJeff Garzik  *
1911*c6fd2807SJeff Garzik  *	RETURNS:
1912*c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
1913*c6fd2807SJeff Garzik  */
1914*c6fd2807SJeff Garzik static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
1915*c6fd2807SJeff Garzik 			  ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
1916*c6fd2807SJeff Garzik 			  ata_postreset_fn_t postreset)
1917*c6fd2807SJeff Garzik {
1918*c6fd2807SJeff Garzik 	struct ata_eh_context *ehc = &ap->eh_context;
1919*c6fd2807SJeff Garzik 	struct ata_device *dev;
1920*c6fd2807SJeff Garzik 	int down_xfermask, i, rc;
1921*c6fd2807SJeff Garzik 
1922*c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
1923*c6fd2807SJeff Garzik 
1924*c6fd2807SJeff Garzik 	/* prep for recovery */
1925*c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
1926*c6fd2807SJeff Garzik 		dev = &ap->device[i];
1927*c6fd2807SJeff Garzik 
1928*c6fd2807SJeff Garzik 		ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
1929*c6fd2807SJeff Garzik 
1930*c6fd2807SJeff Garzik 		/* process hotplug request */
1931*c6fd2807SJeff Garzik 		if (dev->flags & ATA_DFLAG_DETACH)
1932*c6fd2807SJeff Garzik 			ata_eh_detach_dev(dev);
1933*c6fd2807SJeff Garzik 
1934*c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev) &&
1935*c6fd2807SJeff Garzik 		    ((ehc->i.probe_mask & (1 << dev->devno)) &&
1936*c6fd2807SJeff Garzik 		     !(ehc->did_probe_mask & (1 << dev->devno)))) {
1937*c6fd2807SJeff Garzik 			ata_eh_detach_dev(dev);
1938*c6fd2807SJeff Garzik 			ata_dev_init(dev);
1939*c6fd2807SJeff Garzik 			ehc->did_probe_mask |= (1 << dev->devno);
1940*c6fd2807SJeff Garzik 			ehc->i.action |= ATA_EH_SOFTRESET;
1941*c6fd2807SJeff Garzik 		}
1942*c6fd2807SJeff Garzik 	}
1943*c6fd2807SJeff Garzik 
1944*c6fd2807SJeff Garzik  retry:
1945*c6fd2807SJeff Garzik 	down_xfermask = 0;
1946*c6fd2807SJeff Garzik 	rc = 0;
1947*c6fd2807SJeff Garzik 
1948*c6fd2807SJeff Garzik 	/* if UNLOADING, finish immediately */
1949*c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_UNLOADING)
1950*c6fd2807SJeff Garzik 		goto out;
1951*c6fd2807SJeff Garzik 
1952*c6fd2807SJeff Garzik 	/* prep for resume */
1953*c6fd2807SJeff Garzik 	ata_eh_prep_resume(ap);
1954*c6fd2807SJeff Garzik 
1955*c6fd2807SJeff Garzik 	/* skip EH if possible. */
1956*c6fd2807SJeff Garzik 	if (ata_eh_skip_recovery(ap))
1957*c6fd2807SJeff Garzik 		ehc->i.action = 0;
1958*c6fd2807SJeff Garzik 
1959*c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++)
1960*c6fd2807SJeff Garzik 		ehc->classes[i] = ATA_DEV_UNKNOWN;
1961*c6fd2807SJeff Garzik 
1962*c6fd2807SJeff Garzik 	/* reset */
1963*c6fd2807SJeff Garzik 	if (ehc->i.action & ATA_EH_RESET_MASK) {
1964*c6fd2807SJeff Garzik 		ata_eh_freeze_port(ap);
1965*c6fd2807SJeff Garzik 
1966*c6fd2807SJeff Garzik 		rc = ata_eh_reset(ap, ata_port_nr_vacant(ap), prereset,
1967*c6fd2807SJeff Garzik 				  softreset, hardreset, postreset);
1968*c6fd2807SJeff Garzik 		if (rc) {
1969*c6fd2807SJeff Garzik 			ata_port_printk(ap, KERN_ERR,
1970*c6fd2807SJeff Garzik 					"reset failed, giving up\n");
1971*c6fd2807SJeff Garzik 			goto out;
1972*c6fd2807SJeff Garzik 		}
1973*c6fd2807SJeff Garzik 
1974*c6fd2807SJeff Garzik 		ata_eh_thaw_port(ap);
1975*c6fd2807SJeff Garzik 	}
1976*c6fd2807SJeff Garzik 
1977*c6fd2807SJeff Garzik 	/* revalidate existing devices and attach new ones */
1978*c6fd2807SJeff Garzik 	rc = ata_eh_revalidate_and_attach(ap, &dev);
1979*c6fd2807SJeff Garzik 	if (rc)
1980*c6fd2807SJeff Garzik 		goto dev_fail;
1981*c6fd2807SJeff Garzik 
1982*c6fd2807SJeff Garzik 	/* resume devices */
1983*c6fd2807SJeff Garzik 	rc = ata_eh_resume(ap, &dev);
1984*c6fd2807SJeff Garzik 	if (rc)
1985*c6fd2807SJeff Garzik 		goto dev_fail;
1986*c6fd2807SJeff Garzik 
1987*c6fd2807SJeff Garzik 	/* configure transfer mode if the port has been reset */
1988*c6fd2807SJeff Garzik 	if (ehc->i.flags & ATA_EHI_DID_RESET) {
1989*c6fd2807SJeff Garzik 		rc = ata_set_mode(ap, &dev);
1990*c6fd2807SJeff Garzik 		if (rc) {
1991*c6fd2807SJeff Garzik 			down_xfermask = 1;
1992*c6fd2807SJeff Garzik 			goto dev_fail;
1993*c6fd2807SJeff Garzik 		}
1994*c6fd2807SJeff Garzik 	}
1995*c6fd2807SJeff Garzik 
1996*c6fd2807SJeff Garzik 	/* suspend devices */
1997*c6fd2807SJeff Garzik 	rc = ata_eh_suspend(ap, &dev);
1998*c6fd2807SJeff Garzik 	if (rc)
1999*c6fd2807SJeff Garzik 		goto dev_fail;
2000*c6fd2807SJeff Garzik 
2001*c6fd2807SJeff Garzik 	goto out;
2002*c6fd2807SJeff Garzik 
2003*c6fd2807SJeff Garzik  dev_fail:
2004*c6fd2807SJeff Garzik 	switch (rc) {
2005*c6fd2807SJeff Garzik 	case -ENODEV:
2006*c6fd2807SJeff Garzik 		/* device missing, schedule probing */
2007*c6fd2807SJeff Garzik 		ehc->i.probe_mask |= (1 << dev->devno);
2008*c6fd2807SJeff Garzik 	case -EINVAL:
2009*c6fd2807SJeff Garzik 		ehc->tries[dev->devno] = 0;
2010*c6fd2807SJeff Garzik 		break;
2011*c6fd2807SJeff Garzik 	case -EIO:
2012*c6fd2807SJeff Garzik 		sata_down_spd_limit(ap);
2013*c6fd2807SJeff Garzik 	default:
2014*c6fd2807SJeff Garzik 		ehc->tries[dev->devno]--;
2015*c6fd2807SJeff Garzik 		if (down_xfermask &&
2016*c6fd2807SJeff Garzik 		    ata_down_xfermask_limit(dev, ehc->tries[dev->devno] == 1))
2017*c6fd2807SJeff Garzik 			ehc->tries[dev->devno] = 0;
2018*c6fd2807SJeff Garzik 	}
2019*c6fd2807SJeff Garzik 
2020*c6fd2807SJeff Garzik 	if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
2021*c6fd2807SJeff Garzik 		/* disable device if it has used up all its chances */
2022*c6fd2807SJeff Garzik 		ata_dev_disable(dev);
2023*c6fd2807SJeff Garzik 
2024*c6fd2807SJeff Garzik 		/* detach if offline */
2025*c6fd2807SJeff Garzik 		if (ata_port_offline(ap))
2026*c6fd2807SJeff Garzik 			ata_eh_detach_dev(dev);
2027*c6fd2807SJeff Garzik 
2028*c6fd2807SJeff Garzik 		/* probe if requested */
2029*c6fd2807SJeff Garzik 		if ((ehc->i.probe_mask & (1 << dev->devno)) &&
2030*c6fd2807SJeff Garzik 		    !(ehc->did_probe_mask & (1 << dev->devno))) {
2031*c6fd2807SJeff Garzik 			ata_eh_detach_dev(dev);
2032*c6fd2807SJeff Garzik 			ata_dev_init(dev);
2033*c6fd2807SJeff Garzik 
2034*c6fd2807SJeff Garzik 			ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
2035*c6fd2807SJeff Garzik 			ehc->did_probe_mask |= (1 << dev->devno);
2036*c6fd2807SJeff Garzik 			ehc->i.action |= ATA_EH_SOFTRESET;
2037*c6fd2807SJeff Garzik 		}
2038*c6fd2807SJeff Garzik 	} else {
2039*c6fd2807SJeff Garzik 		/* soft didn't work?  be haaaaard */
2040*c6fd2807SJeff Garzik 		if (ehc->i.flags & ATA_EHI_DID_RESET)
2041*c6fd2807SJeff Garzik 			ehc->i.action |= ATA_EH_HARDRESET;
2042*c6fd2807SJeff Garzik 		else
2043*c6fd2807SJeff Garzik 			ehc->i.action |= ATA_EH_SOFTRESET;
2044*c6fd2807SJeff Garzik 	}
2045*c6fd2807SJeff Garzik 
2046*c6fd2807SJeff Garzik 	if (ata_port_nr_enabled(ap)) {
2047*c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_WARNING, "failed to recover some "
2048*c6fd2807SJeff Garzik 				"devices, retrying in 5 secs\n");
2049*c6fd2807SJeff Garzik 		ssleep(5);
2050*c6fd2807SJeff Garzik 	} else {
2051*c6fd2807SJeff Garzik 		/* no device left, repeat fast */
2052*c6fd2807SJeff Garzik 		msleep(500);
2053*c6fd2807SJeff Garzik 	}
2054*c6fd2807SJeff Garzik 
2055*c6fd2807SJeff Garzik 	goto retry;
2056*c6fd2807SJeff Garzik 
2057*c6fd2807SJeff Garzik  out:
2058*c6fd2807SJeff Garzik 	if (rc) {
2059*c6fd2807SJeff Garzik 		for (i = 0; i < ATA_MAX_DEVICES; i++)
2060*c6fd2807SJeff Garzik 			ata_dev_disable(&ap->device[i]);
2061*c6fd2807SJeff Garzik 	}
2062*c6fd2807SJeff Garzik 
2063*c6fd2807SJeff Garzik 	DPRINTK("EXIT, rc=%d\n", rc);
2064*c6fd2807SJeff Garzik 	return rc;
2065*c6fd2807SJeff Garzik }
2066*c6fd2807SJeff Garzik 
2067*c6fd2807SJeff Garzik /**
2068*c6fd2807SJeff Garzik  *	ata_eh_finish - finish up EH
2069*c6fd2807SJeff Garzik  *	@ap: host port to finish EH for
2070*c6fd2807SJeff Garzik  *
2071*c6fd2807SJeff Garzik  *	Recovery is complete.  Clean up EH states and retry or finish
2072*c6fd2807SJeff Garzik  *	failed qcs.
2073*c6fd2807SJeff Garzik  *
2074*c6fd2807SJeff Garzik  *	LOCKING:
2075*c6fd2807SJeff Garzik  *	None.
2076*c6fd2807SJeff Garzik  */
2077*c6fd2807SJeff Garzik static void ata_eh_finish(struct ata_port *ap)
2078*c6fd2807SJeff Garzik {
2079*c6fd2807SJeff Garzik 	int tag;
2080*c6fd2807SJeff Garzik 
2081*c6fd2807SJeff Garzik 	/* retry or finish qcs */
2082*c6fd2807SJeff Garzik 	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2083*c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2084*c6fd2807SJeff Garzik 
2085*c6fd2807SJeff Garzik 		if (!(qc->flags & ATA_QCFLAG_FAILED))
2086*c6fd2807SJeff Garzik 			continue;
2087*c6fd2807SJeff Garzik 
2088*c6fd2807SJeff Garzik 		if (qc->err_mask) {
2089*c6fd2807SJeff Garzik 			/* FIXME: Once EH migration is complete,
2090*c6fd2807SJeff Garzik 			 * generate sense data in this function,
2091*c6fd2807SJeff Garzik 			 * considering both err_mask and tf.
2092*c6fd2807SJeff Garzik 			 */
2093*c6fd2807SJeff Garzik 			if (qc->err_mask & AC_ERR_INVALID)
2094*c6fd2807SJeff Garzik 				ata_eh_qc_complete(qc);
2095*c6fd2807SJeff Garzik 			else
2096*c6fd2807SJeff Garzik 				ata_eh_qc_retry(qc);
2097*c6fd2807SJeff Garzik 		} else {
2098*c6fd2807SJeff Garzik 			if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
2099*c6fd2807SJeff Garzik 				ata_eh_qc_complete(qc);
2100*c6fd2807SJeff Garzik 			} else {
2101*c6fd2807SJeff Garzik 				/* feed zero TF to sense generation */
2102*c6fd2807SJeff Garzik 				memset(&qc->result_tf, 0, sizeof(qc->result_tf));
2103*c6fd2807SJeff Garzik 				ata_eh_qc_retry(qc);
2104*c6fd2807SJeff Garzik 			}
2105*c6fd2807SJeff Garzik 		}
2106*c6fd2807SJeff Garzik 	}
2107*c6fd2807SJeff Garzik }
2108*c6fd2807SJeff Garzik 
2109*c6fd2807SJeff Garzik /**
2110*c6fd2807SJeff Garzik  *	ata_do_eh - do standard error handling
2111*c6fd2807SJeff Garzik  *	@ap: host port to handle error for
2112*c6fd2807SJeff Garzik  *	@prereset: prereset method (can be NULL)
2113*c6fd2807SJeff Garzik  *	@softreset: softreset method (can be NULL)
2114*c6fd2807SJeff Garzik  *	@hardreset: hardreset method (can be NULL)
2115*c6fd2807SJeff Garzik  *	@postreset: postreset method (can be NULL)
2116*c6fd2807SJeff Garzik  *
2117*c6fd2807SJeff Garzik  *	Perform standard error handling sequence.
2118*c6fd2807SJeff Garzik  *
2119*c6fd2807SJeff Garzik  *	LOCKING:
2120*c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
2121*c6fd2807SJeff Garzik  */
2122*c6fd2807SJeff Garzik void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
2123*c6fd2807SJeff Garzik 	       ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2124*c6fd2807SJeff Garzik 	       ata_postreset_fn_t postreset)
2125*c6fd2807SJeff Garzik {
2126*c6fd2807SJeff Garzik 	ata_eh_autopsy(ap);
2127*c6fd2807SJeff Garzik 	ata_eh_report(ap);
2128*c6fd2807SJeff Garzik 	ata_eh_recover(ap, prereset, softreset, hardreset, postreset);
2129*c6fd2807SJeff Garzik 	ata_eh_finish(ap);
2130*c6fd2807SJeff Garzik }
2131*c6fd2807SJeff Garzik 
2132*c6fd2807SJeff Garzik /**
2133*c6fd2807SJeff Garzik  *	ata_eh_handle_port_suspend - perform port suspend operation
2134*c6fd2807SJeff Garzik  *	@ap: port to suspend
2135*c6fd2807SJeff Garzik  *
2136*c6fd2807SJeff Garzik  *	Suspend @ap.
2137*c6fd2807SJeff Garzik  *
2138*c6fd2807SJeff Garzik  *	LOCKING:
2139*c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
2140*c6fd2807SJeff Garzik  */
2141*c6fd2807SJeff Garzik static void ata_eh_handle_port_suspend(struct ata_port *ap)
2142*c6fd2807SJeff Garzik {
2143*c6fd2807SJeff Garzik 	unsigned long flags;
2144*c6fd2807SJeff Garzik 	int rc = 0;
2145*c6fd2807SJeff Garzik 
2146*c6fd2807SJeff Garzik 	/* are we suspending? */
2147*c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
2148*c6fd2807SJeff Garzik 	if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
2149*c6fd2807SJeff Garzik 	    ap->pm_mesg.event == PM_EVENT_ON) {
2150*c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
2151*c6fd2807SJeff Garzik 		return;
2152*c6fd2807SJeff Garzik 	}
2153*c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
2154*c6fd2807SJeff Garzik 
2155*c6fd2807SJeff Garzik 	WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
2156*c6fd2807SJeff Garzik 
2157*c6fd2807SJeff Garzik 	/* suspend */
2158*c6fd2807SJeff Garzik 	ata_eh_freeze_port(ap);
2159*c6fd2807SJeff Garzik 
2160*c6fd2807SJeff Garzik 	if (ap->ops->port_suspend)
2161*c6fd2807SJeff Garzik 		rc = ap->ops->port_suspend(ap, ap->pm_mesg);
2162*c6fd2807SJeff Garzik 
2163*c6fd2807SJeff Garzik 	/* report result */
2164*c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
2165*c6fd2807SJeff Garzik 
2166*c6fd2807SJeff Garzik 	ap->pflags &= ~ATA_PFLAG_PM_PENDING;
2167*c6fd2807SJeff Garzik 	if (rc == 0)
2168*c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_SUSPENDED;
2169*c6fd2807SJeff Garzik 	else
2170*c6fd2807SJeff Garzik 		ata_port_schedule_eh(ap);
2171*c6fd2807SJeff Garzik 
2172*c6fd2807SJeff Garzik 	if (ap->pm_result) {
2173*c6fd2807SJeff Garzik 		*ap->pm_result = rc;
2174*c6fd2807SJeff Garzik 		ap->pm_result = NULL;
2175*c6fd2807SJeff Garzik 	}
2176*c6fd2807SJeff Garzik 
2177*c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
2178*c6fd2807SJeff Garzik 
2179*c6fd2807SJeff Garzik 	return;
2180*c6fd2807SJeff Garzik }
2181*c6fd2807SJeff Garzik 
2182*c6fd2807SJeff Garzik /**
2183*c6fd2807SJeff Garzik  *	ata_eh_handle_port_resume - perform port resume operation
2184*c6fd2807SJeff Garzik  *	@ap: port to resume
2185*c6fd2807SJeff Garzik  *
2186*c6fd2807SJeff Garzik  *	Resume @ap.
2187*c6fd2807SJeff Garzik  *
2188*c6fd2807SJeff Garzik  *	This function also waits upto one second until all devices
2189*c6fd2807SJeff Garzik  *	hanging off this port requests resume EH action.  This is to
2190*c6fd2807SJeff Garzik  *	prevent invoking EH and thus reset multiple times on resume.
2191*c6fd2807SJeff Garzik  *
2192*c6fd2807SJeff Garzik  *	On DPM resume, where some of devices might not be resumed
2193*c6fd2807SJeff Garzik  *	together, this may delay port resume upto one second, but such
2194*c6fd2807SJeff Garzik  *	DPM resumes are rare and 1 sec delay isn't too bad.
2195*c6fd2807SJeff Garzik  *
2196*c6fd2807SJeff Garzik  *	LOCKING:
2197*c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
2198*c6fd2807SJeff Garzik  */
2199*c6fd2807SJeff Garzik static void ata_eh_handle_port_resume(struct ata_port *ap)
2200*c6fd2807SJeff Garzik {
2201*c6fd2807SJeff Garzik 	unsigned long timeout;
2202*c6fd2807SJeff Garzik 	unsigned long flags;
2203*c6fd2807SJeff Garzik 	int i, rc = 0;
2204*c6fd2807SJeff Garzik 
2205*c6fd2807SJeff Garzik 	/* are we resuming? */
2206*c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
2207*c6fd2807SJeff Garzik 	if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
2208*c6fd2807SJeff Garzik 	    ap->pm_mesg.event != PM_EVENT_ON) {
2209*c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
2210*c6fd2807SJeff Garzik 		return;
2211*c6fd2807SJeff Garzik 	}
2212*c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
2213*c6fd2807SJeff Garzik 
2214*c6fd2807SJeff Garzik 	/* spurious? */
2215*c6fd2807SJeff Garzik 	if (!(ap->pflags & ATA_PFLAG_SUSPENDED))
2216*c6fd2807SJeff Garzik 		goto done;
2217*c6fd2807SJeff Garzik 
2218*c6fd2807SJeff Garzik 	if (ap->ops->port_resume)
2219*c6fd2807SJeff Garzik 		rc = ap->ops->port_resume(ap);
2220*c6fd2807SJeff Garzik 
2221*c6fd2807SJeff Garzik 	/* give devices time to request EH */
2222*c6fd2807SJeff Garzik 	timeout = jiffies + HZ; /* 1s max */
2223*c6fd2807SJeff Garzik 	while (1) {
2224*c6fd2807SJeff Garzik 		for (i = 0; i < ATA_MAX_DEVICES; i++) {
2225*c6fd2807SJeff Garzik 			struct ata_device *dev = &ap->device[i];
2226*c6fd2807SJeff Garzik 			unsigned int action = ata_eh_dev_action(dev);
2227*c6fd2807SJeff Garzik 
2228*c6fd2807SJeff Garzik 			if ((dev->flags & ATA_DFLAG_SUSPENDED) &&
2229*c6fd2807SJeff Garzik 			    !(action & ATA_EH_RESUME))
2230*c6fd2807SJeff Garzik 				break;
2231*c6fd2807SJeff Garzik 		}
2232*c6fd2807SJeff Garzik 
2233*c6fd2807SJeff Garzik 		if (i == ATA_MAX_DEVICES || time_after(jiffies, timeout))
2234*c6fd2807SJeff Garzik 			break;
2235*c6fd2807SJeff Garzik 		msleep(10);
2236*c6fd2807SJeff Garzik 	}
2237*c6fd2807SJeff Garzik 
2238*c6fd2807SJeff Garzik  done:
2239*c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
2240*c6fd2807SJeff Garzik 	ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
2241*c6fd2807SJeff Garzik 	if (ap->pm_result) {
2242*c6fd2807SJeff Garzik 		*ap->pm_result = rc;
2243*c6fd2807SJeff Garzik 		ap->pm_result = NULL;
2244*c6fd2807SJeff Garzik 	}
2245*c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
2246*c6fd2807SJeff Garzik }
2247