1 /*
2  * Serial Attached SCSI (SAS) class SCSI Host glue.
3  *
4  * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
5  * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6  *
7  * This file is licensed under GPLv2.
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License as
11  * published by the Free Software Foundation; either version 2 of the
12  * License, or (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful, but
15  * WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22  * USA
23  *
24  */
25 
26 #include <linux/kthread.h>
27 #include <linux/firmware.h>
28 #include <linux/export.h>
29 #include <linux/ctype.h>
30 
31 #include "sas_internal.h"
32 
33 #include <scsi/scsi_host.h>
34 #include <scsi/scsi_device.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_eh.h>
38 #include <scsi/scsi_transport.h>
39 #include <scsi/scsi_transport_sas.h>
40 #include <scsi/sas_ata.h>
41 #include "../scsi_sas_internal.h"
42 #include "../scsi_transport_api.h"
43 #include "../scsi_priv.h"
44 
45 #include <linux/err.h>
46 #include <linux/blkdev.h>
47 #include <linux/freezer.h>
48 #include <linux/gfp.h>
49 #include <linux/scatterlist.h>
50 #include <linux/libata.h>
51 
52 /* record final status and free the task */
53 static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task)
54 {
55 	struct task_status_struct *ts = &task->task_status;
56 	int hs = 0, stat = 0;
57 
58 	if (ts->resp == SAS_TASK_UNDELIVERED) {
59 		/* transport error */
60 		hs = DID_NO_CONNECT;
61 	} else { /* ts->resp == SAS_TASK_COMPLETE */
62 		/* task delivered, what happened afterwards? */
63 		switch (ts->stat) {
64 		case SAS_DEV_NO_RESPONSE:
65 		case SAS_INTERRUPTED:
66 		case SAS_PHY_DOWN:
67 		case SAS_NAK_R_ERR:
68 		case SAS_OPEN_TO:
69 			hs = DID_NO_CONNECT;
70 			break;
71 		case SAS_DATA_UNDERRUN:
72 			scsi_set_resid(sc, ts->residual);
73 			if (scsi_bufflen(sc) - scsi_get_resid(sc) < sc->underflow)
74 				hs = DID_ERROR;
75 			break;
76 		case SAS_DATA_OVERRUN:
77 			hs = DID_ERROR;
78 			break;
79 		case SAS_QUEUE_FULL:
80 			hs = DID_SOFT_ERROR; /* retry */
81 			break;
82 		case SAS_DEVICE_UNKNOWN:
83 			hs = DID_BAD_TARGET;
84 			break;
85 		case SAS_SG_ERR:
86 			hs = DID_PARITY;
87 			break;
88 		case SAS_OPEN_REJECT:
89 			if (ts->open_rej_reason == SAS_OREJ_RSVD_RETRY)
90 				hs = DID_SOFT_ERROR; /* retry */
91 			else
92 				hs = DID_ERROR;
93 			break;
94 		case SAS_PROTO_RESPONSE:
95 			SAS_DPRINTK("LLDD:%s sent SAS_PROTO_RESP for an SSP "
96 				    "task; please report this\n",
97 				    task->dev->port->ha->sas_ha_name);
98 			break;
99 		case SAS_ABORTED_TASK:
100 			hs = DID_ABORT;
101 			break;
102 		case SAM_STAT_CHECK_CONDITION:
103 			memcpy(sc->sense_buffer, ts->buf,
104 			       min(SCSI_SENSE_BUFFERSIZE, ts->buf_valid_size));
105 			stat = SAM_STAT_CHECK_CONDITION;
106 			break;
107 		default:
108 			stat = ts->stat;
109 			break;
110 		}
111 	}
112 
113 	sc->result = (hs << 16) | stat;
114 	ASSIGN_SAS_TASK(sc, NULL);
115 	list_del_init(&task->list);
116 	sas_free_task(task);
117 }
118 
119 static void sas_scsi_task_done(struct sas_task *task)
120 {
121 	struct scsi_cmnd *sc = task->uldd_task;
122 	struct domain_device *dev = task->dev;
123 	struct sas_ha_struct *ha = dev->port->ha;
124 	unsigned long flags;
125 
126 	spin_lock_irqsave(&dev->done_lock, flags);
127 	if (test_bit(SAS_HA_FROZEN, &ha->state))
128 		task = NULL;
129 	else
130 		ASSIGN_SAS_TASK(sc, NULL);
131 	spin_unlock_irqrestore(&dev->done_lock, flags);
132 
133 	if (unlikely(!task)) {
134 		/* task will be completed by the error handler */
135 		SAS_DPRINTK("task done but aborted\n");
136 		return;
137 	}
138 
139 	if (unlikely(!sc)) {
140 		SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n");
141 		list_del_init(&task->list);
142 		sas_free_task(task);
143 		return;
144 	}
145 
146 	sas_end_task(sc, task);
147 	sc->scsi_done(sc);
148 }
149 
150 static struct sas_task *sas_create_task(struct scsi_cmnd *cmd,
151 					       struct domain_device *dev,
152 					       gfp_t gfp_flags)
153 {
154 	struct sas_task *task = sas_alloc_task(gfp_flags);
155 	struct scsi_lun lun;
156 
157 	if (!task)
158 		return NULL;
159 
160 	task->uldd_task = cmd;
161 	ASSIGN_SAS_TASK(cmd, task);
162 
163 	task->dev = dev;
164 	task->task_proto = task->dev->tproto; /* BUG_ON(!SSP) */
165 
166 	task->ssp_task.retry_count = 1;
167 	int_to_scsilun(cmd->device->lun, &lun);
168 	memcpy(task->ssp_task.LUN, &lun.scsi_lun, 8);
169 	task->ssp_task.task_attr = TASK_ATTR_SIMPLE;
170 	memcpy(task->ssp_task.cdb, cmd->cmnd, 16);
171 
172 	task->scatter = scsi_sglist(cmd);
173 	task->num_scatter = scsi_sg_count(cmd);
174 	task->total_xfer_len = scsi_bufflen(cmd);
175 	task->data_dir = cmd->sc_data_direction;
176 
177 	task->task_done = sas_scsi_task_done;
178 
179 	return task;
180 }
181 
182 int sas_queue_up(struct sas_task *task)
183 {
184 	struct sas_ha_struct *sas_ha = task->dev->port->ha;
185 	struct scsi_core *core = &sas_ha->core;
186 	unsigned long flags;
187 	LIST_HEAD(list);
188 
189 	spin_lock_irqsave(&core->task_queue_lock, flags);
190 	if (sas_ha->lldd_queue_size < core->task_queue_size + 1) {
191 		spin_unlock_irqrestore(&core->task_queue_lock, flags);
192 		return -SAS_QUEUE_FULL;
193 	}
194 	list_add_tail(&task->list, &core->task_queue);
195 	core->task_queue_size += 1;
196 	spin_unlock_irqrestore(&core->task_queue_lock, flags);
197 	wake_up_process(core->queue_thread);
198 
199 	return 0;
200 }
201 
202 int sas_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
203 {
204 	struct sas_internal *i = to_sas_internal(host->transportt);
205 	struct domain_device *dev = cmd_to_domain_dev(cmd);
206 	struct sas_ha_struct *sas_ha = dev->port->ha;
207 	struct sas_task *task;
208 	int res = 0;
209 
210 	/* If the device fell off, no sense in issuing commands */
211 	if (test_bit(SAS_DEV_GONE, &dev->state)) {
212 		cmd->result = DID_BAD_TARGET << 16;
213 		goto out_done;
214 	}
215 
216 	if (dev_is_sata(dev)) {
217 		spin_lock_irq(dev->sata_dev.ap->lock);
218 		res = ata_sas_queuecmd(cmd, dev->sata_dev.ap);
219 		spin_unlock_irq(dev->sata_dev.ap->lock);
220 		return res;
221 	}
222 
223 	task = sas_create_task(cmd, dev, GFP_ATOMIC);
224 	if (!task)
225 		return SCSI_MLQUEUE_HOST_BUSY;
226 
227 	/* Queue up, Direct Mode or Task Collector Mode. */
228 	if (sas_ha->lldd_max_execute_num < 2)
229 		res = i->dft->lldd_execute_task(task, 1, GFP_ATOMIC);
230 	else
231 		res = sas_queue_up(task);
232 
233 	if (res)
234 		goto out_free_task;
235 	return 0;
236 
237 out_free_task:
238 	SAS_DPRINTK("lldd_execute_task returned: %d\n", res);
239 	ASSIGN_SAS_TASK(cmd, NULL);
240 	sas_free_task(task);
241 	if (res == -SAS_QUEUE_FULL)
242 		cmd->result = DID_SOFT_ERROR << 16; /* retry */
243 	else
244 		cmd->result = DID_ERROR << 16;
245 out_done:
246 	cmd->scsi_done(cmd);
247 	return 0;
248 }
249 
250 static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
251 {
252 	struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host);
253 	struct sas_task *task = TO_SAS_TASK(cmd);
254 
255 	/* At this point, we only get called following an actual abort
256 	 * of the task, so we should be guaranteed not to be racing with
257 	 * any completions from the LLD.  Task is freed after this.
258 	 */
259 	sas_end_task(cmd, task);
260 
261 	/* now finish the command and move it on to the error
262 	 * handler done list, this also takes it off the
263 	 * error handler pending list.
264 	 */
265 	scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q);
266 }
267 
268 static void sas_eh_defer_cmd(struct scsi_cmnd *cmd)
269 {
270 	struct domain_device *dev = cmd_to_domain_dev(cmd);
271 	struct sas_ha_struct *ha = dev->port->ha;
272 	struct sas_task *task = TO_SAS_TASK(cmd);
273 
274 	if (!dev_is_sata(dev)) {
275 		sas_eh_finish_cmd(cmd);
276 		return;
277 	}
278 
279 	/* report the timeout to libata */
280 	sas_end_task(cmd, task);
281 	list_move_tail(&cmd->eh_entry, &ha->eh_ata_q);
282 }
283 
284 static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd)
285 {
286 	struct scsi_cmnd *cmd, *n;
287 
288 	list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
289 		if (cmd->device->sdev_target == my_cmd->device->sdev_target &&
290 		    cmd->device->lun == my_cmd->device->lun)
291 			sas_eh_defer_cmd(cmd);
292 	}
293 }
294 
295 static void sas_scsi_clear_queue_I_T(struct list_head *error_q,
296 				     struct domain_device *dev)
297 {
298 	struct scsi_cmnd *cmd, *n;
299 
300 	list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
301 		struct domain_device *x = cmd_to_domain_dev(cmd);
302 
303 		if (x == dev)
304 			sas_eh_finish_cmd(cmd);
305 	}
306 }
307 
308 static void sas_scsi_clear_queue_port(struct list_head *error_q,
309 				      struct asd_sas_port *port)
310 {
311 	struct scsi_cmnd *cmd, *n;
312 
313 	list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
314 		struct domain_device *dev = cmd_to_domain_dev(cmd);
315 		struct asd_sas_port *x = dev->port;
316 
317 		if (x == port)
318 			sas_eh_finish_cmd(cmd);
319 	}
320 }
321 
322 enum task_disposition {
323 	TASK_IS_DONE,
324 	TASK_IS_ABORTED,
325 	TASK_IS_AT_LU,
326 	TASK_IS_NOT_AT_HA,
327 	TASK_IS_NOT_AT_LU,
328 	TASK_ABORT_FAILED,
329 };
330 
331 static enum task_disposition sas_scsi_find_task(struct sas_task *task)
332 {
333 	struct sas_ha_struct *ha = task->dev->port->ha;
334 	unsigned long flags;
335 	int i, res;
336 	struct sas_internal *si =
337 		to_sas_internal(task->dev->port->ha->core.shost->transportt);
338 
339 	if (ha->lldd_max_execute_num > 1) {
340 		struct scsi_core *core = &ha->core;
341 		struct sas_task *t, *n;
342 
343 		mutex_lock(&core->task_queue_flush);
344 		spin_lock_irqsave(&core->task_queue_lock, flags);
345 		list_for_each_entry_safe(t, n, &core->task_queue, list)
346 			if (task == t) {
347 				list_del_init(&t->list);
348 				break;
349 			}
350 		spin_unlock_irqrestore(&core->task_queue_lock, flags);
351 		mutex_unlock(&core->task_queue_flush);
352 
353 		if (task == t)
354 			return TASK_IS_NOT_AT_HA;
355 	}
356 
357 	for (i = 0; i < 5; i++) {
358 		SAS_DPRINTK("%s: aborting task 0x%p\n", __func__, task);
359 		res = si->dft->lldd_abort_task(task);
360 
361 		spin_lock_irqsave(&task->task_state_lock, flags);
362 		if (task->task_state_flags & SAS_TASK_STATE_DONE) {
363 			spin_unlock_irqrestore(&task->task_state_lock, flags);
364 			SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
365 				    task);
366 			return TASK_IS_DONE;
367 		}
368 		spin_unlock_irqrestore(&task->task_state_lock, flags);
369 
370 		if (res == TMF_RESP_FUNC_COMPLETE) {
371 			SAS_DPRINTK("%s: task 0x%p is aborted\n",
372 				    __func__, task);
373 			return TASK_IS_ABORTED;
374 		} else if (si->dft->lldd_query_task) {
375 			SAS_DPRINTK("%s: querying task 0x%p\n",
376 				    __func__, task);
377 			res = si->dft->lldd_query_task(task);
378 			switch (res) {
379 			case TMF_RESP_FUNC_SUCC:
380 				SAS_DPRINTK("%s: task 0x%p at LU\n",
381 					    __func__, task);
382 				return TASK_IS_AT_LU;
383 			case TMF_RESP_FUNC_COMPLETE:
384 				SAS_DPRINTK("%s: task 0x%p not at LU\n",
385 					    __func__, task);
386 				return TASK_IS_NOT_AT_LU;
387 			case TMF_RESP_FUNC_FAILED:
388                                 SAS_DPRINTK("%s: task 0x%p failed to abort\n",
389                                                 __func__, task);
390                                 return TASK_ABORT_FAILED;
391                         }
392 
393 		}
394 	}
395 	return res;
396 }
397 
398 static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd)
399 {
400 	int res = TMF_RESP_FUNC_FAILED;
401 	struct scsi_lun lun;
402 	struct sas_internal *i =
403 		to_sas_internal(dev->port->ha->core.shost->transportt);
404 
405 	int_to_scsilun(cmd->device->lun, &lun);
406 
407 	SAS_DPRINTK("eh: device %llx LUN %x has the task\n",
408 		    SAS_ADDR(dev->sas_addr),
409 		    cmd->device->lun);
410 
411 	if (i->dft->lldd_abort_task_set)
412 		res = i->dft->lldd_abort_task_set(dev, lun.scsi_lun);
413 
414 	if (res == TMF_RESP_FUNC_FAILED) {
415 		if (i->dft->lldd_clear_task_set)
416 			res = i->dft->lldd_clear_task_set(dev, lun.scsi_lun);
417 	}
418 
419 	if (res == TMF_RESP_FUNC_FAILED) {
420 		if (i->dft->lldd_lu_reset)
421 			res = i->dft->lldd_lu_reset(dev, lun.scsi_lun);
422 	}
423 
424 	return res;
425 }
426 
427 static int sas_recover_I_T(struct domain_device *dev)
428 {
429 	int res = TMF_RESP_FUNC_FAILED;
430 	struct sas_internal *i =
431 		to_sas_internal(dev->port->ha->core.shost->transportt);
432 
433 	SAS_DPRINTK("I_T nexus reset for dev %016llx\n",
434 		    SAS_ADDR(dev->sas_addr));
435 
436 	if (i->dft->lldd_I_T_nexus_reset)
437 		res = i->dft->lldd_I_T_nexus_reset(dev);
438 
439 	return res;
440 }
441 
442 /* take a reference on the last known good phy for this device */
443 struct sas_phy *sas_get_local_phy(struct domain_device *dev)
444 {
445 	struct sas_ha_struct *ha = dev->port->ha;
446 	struct sas_phy *phy;
447 	unsigned long flags;
448 
449 	/* a published domain device always has a valid phy, it may be
450 	 * stale, but it is never NULL
451 	 */
452 	BUG_ON(!dev->phy);
453 
454 	spin_lock_irqsave(&ha->phy_port_lock, flags);
455 	phy = dev->phy;
456 	get_device(&phy->dev);
457 	spin_unlock_irqrestore(&ha->phy_port_lock, flags);
458 
459 	return phy;
460 }
461 EXPORT_SYMBOL_GPL(sas_get_local_phy);
462 
463 static void sas_wait_eh(struct domain_device *dev)
464 {
465 	struct sas_ha_struct *ha = dev->port->ha;
466 	DEFINE_WAIT(wait);
467 
468 	if (dev_is_sata(dev)) {
469 		ata_port_wait_eh(dev->sata_dev.ap);
470 		return;
471 	}
472  retry:
473 	spin_lock_irq(&ha->lock);
474 
475 	while (test_bit(SAS_DEV_EH_PENDING, &dev->state)) {
476 		prepare_to_wait(&ha->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
477 		spin_unlock_irq(&ha->lock);
478 		schedule();
479 		spin_lock_irq(&ha->lock);
480 	}
481 	finish_wait(&ha->eh_wait_q, &wait);
482 
483 	spin_unlock_irq(&ha->lock);
484 
485 	/* make sure SCSI EH is complete */
486 	if (scsi_host_in_recovery(ha->core.shost)) {
487 		msleep(10);
488 		goto retry;
489 	}
490 }
491 EXPORT_SYMBOL(sas_wait_eh);
492 
493 static int sas_queue_reset(struct domain_device *dev, int reset_type, int lun, int wait)
494 {
495 	struct sas_ha_struct *ha = dev->port->ha;
496 	int scheduled = 0, tries = 100;
497 
498 	/* ata: promote lun reset to bus reset */
499 	if (dev_is_sata(dev)) {
500 		sas_ata_schedule_reset(dev);
501 		if (wait)
502 			sas_ata_wait_eh(dev);
503 		return SUCCESS;
504 	}
505 
506 	while (!scheduled && tries--) {
507 		spin_lock_irq(&ha->lock);
508 		if (!test_bit(SAS_DEV_EH_PENDING, &dev->state) &&
509 		    !test_bit(reset_type, &dev->state)) {
510 			scheduled = 1;
511 			ha->eh_active++;
512 			list_add_tail(&dev->ssp_dev.eh_list_node, &ha->eh_dev_q);
513 			set_bit(SAS_DEV_EH_PENDING, &dev->state);
514 			set_bit(reset_type, &dev->state);
515 			int_to_scsilun(lun, &dev->ssp_dev.reset_lun);
516 			scsi_schedule_eh(ha->core.shost);
517 		}
518 		spin_unlock_irq(&ha->lock);
519 
520 		if (wait)
521 			sas_wait_eh(dev);
522 
523 		if (scheduled)
524 			return SUCCESS;
525 	}
526 
527 	SAS_DPRINTK("%s reset of %s failed\n",
528 		    reset_type == SAS_DEV_LU_RESET ? "LUN" : "Bus",
529 		    dev_name(&dev->rphy->dev));
530 
531 	return FAILED;
532 }
533 
534 int sas_eh_abort_handler(struct scsi_cmnd *cmd)
535 {
536 	int res;
537 	struct sas_task *task = TO_SAS_TASK(cmd);
538 	struct Scsi_Host *host = cmd->device->host;
539 	struct sas_internal *i = to_sas_internal(host->transportt);
540 
541 	if (current != host->ehandler)
542 		return FAILED;
543 
544 	if (!i->dft->lldd_abort_task)
545 		return FAILED;
546 
547 	res = i->dft->lldd_abort_task(task);
548 	if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
549 		return SUCCESS;
550 
551 	return FAILED;
552 }
553 EXPORT_SYMBOL_GPL(sas_eh_abort_handler);
554 
555 /* Attempt to send a LUN reset message to a device */
556 int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
557 {
558 	int res;
559 	struct scsi_lun lun;
560 	struct Scsi_Host *host = cmd->device->host;
561 	struct domain_device *dev = cmd_to_domain_dev(cmd);
562 	struct sas_internal *i = to_sas_internal(host->transportt);
563 
564 	if (current != host->ehandler)
565 		return sas_queue_reset(dev, SAS_DEV_LU_RESET, cmd->device->lun, 0);
566 
567 	int_to_scsilun(cmd->device->lun, &lun);
568 
569 	if (!i->dft->lldd_lu_reset)
570 		return FAILED;
571 
572 	res = i->dft->lldd_lu_reset(dev, lun.scsi_lun);
573 	if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
574 		return SUCCESS;
575 
576 	return FAILED;
577 }
578 
579 int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd)
580 {
581 	int res;
582 	struct Scsi_Host *host = cmd->device->host;
583 	struct domain_device *dev = cmd_to_domain_dev(cmd);
584 	struct sas_internal *i = to_sas_internal(host->transportt);
585 
586 	if (current != host->ehandler)
587 		return sas_queue_reset(dev, SAS_DEV_RESET, 0, 0);
588 
589 	if (!i->dft->lldd_I_T_nexus_reset)
590 		return FAILED;
591 
592 	res = i->dft->lldd_I_T_nexus_reset(dev);
593 	if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE ||
594 	    res == -ENODEV)
595 		return SUCCESS;
596 
597 	return FAILED;
598 }
599 
600 /* Try to reset a device */
601 static int try_to_reset_cmd_device(struct scsi_cmnd *cmd)
602 {
603 	int res;
604 	struct Scsi_Host *shost = cmd->device->host;
605 
606 	if (!shost->hostt->eh_device_reset_handler)
607 		goto try_bus_reset;
608 
609 	res = shost->hostt->eh_device_reset_handler(cmd);
610 	if (res == SUCCESS)
611 		return res;
612 
613 try_bus_reset:
614 	if (shost->hostt->eh_bus_reset_handler)
615 		return shost->hostt->eh_bus_reset_handler(cmd);
616 
617 	return FAILED;
618 }
619 
620 static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *work_q)
621 {
622 	struct scsi_cmnd *cmd, *n;
623 	enum task_disposition res = TASK_IS_DONE;
624 	int tmf_resp, need_reset;
625 	struct sas_internal *i = to_sas_internal(shost->transportt);
626 	unsigned long flags;
627 	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
628 	LIST_HEAD(done);
629 
630 	/* clean out any commands that won the completion vs eh race */
631 	list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
632 		struct domain_device *dev = cmd_to_domain_dev(cmd);
633 		struct sas_task *task;
634 
635 		spin_lock_irqsave(&dev->done_lock, flags);
636 		/* by this point the lldd has either observed
637 		 * SAS_HA_FROZEN and is leaving the task alone, or has
638 		 * won the race with eh and decided to complete it
639 		 */
640 		task = TO_SAS_TASK(cmd);
641 		spin_unlock_irqrestore(&dev->done_lock, flags);
642 
643 		if (!task)
644 			list_move_tail(&cmd->eh_entry, &done);
645 	}
646 
647  Again:
648 	list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
649 		struct sas_task *task = TO_SAS_TASK(cmd);
650 
651 		list_del_init(&cmd->eh_entry);
652 
653 		spin_lock_irqsave(&task->task_state_lock, flags);
654 		need_reset = task->task_state_flags & SAS_TASK_NEED_DEV_RESET;
655 		spin_unlock_irqrestore(&task->task_state_lock, flags);
656 
657 		if (need_reset) {
658 			SAS_DPRINTK("%s: task 0x%p requests reset\n",
659 				    __func__, task);
660 			goto reset;
661 		}
662 
663 		SAS_DPRINTK("trying to find task 0x%p\n", task);
664 		res = sas_scsi_find_task(task);
665 
666 		cmd->eh_eflags = 0;
667 
668 		switch (res) {
669 		case TASK_IS_NOT_AT_HA:
670 			SAS_DPRINTK("%s: task 0x%p is not at ha: %s\n",
671 				    __func__, task,
672 				    cmd->retries ? "retry" : "aborted");
673 			if (cmd->retries)
674 				cmd->retries--;
675 			sas_eh_finish_cmd(cmd);
676 			continue;
677 		case TASK_IS_DONE:
678 			SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
679 				    task);
680 			sas_eh_defer_cmd(cmd);
681 			continue;
682 		case TASK_IS_ABORTED:
683 			SAS_DPRINTK("%s: task 0x%p is aborted\n",
684 				    __func__, task);
685 			sas_eh_defer_cmd(cmd);
686 			continue;
687 		case TASK_IS_AT_LU:
688 			SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task);
689  reset:
690 			tmf_resp = sas_recover_lu(task->dev, cmd);
691 			if (tmf_resp == TMF_RESP_FUNC_COMPLETE) {
692 				SAS_DPRINTK("dev %016llx LU %x is "
693 					    "recovered\n",
694 					    SAS_ADDR(task->dev),
695 					    cmd->device->lun);
696 				sas_eh_defer_cmd(cmd);
697 				sas_scsi_clear_queue_lu(work_q, cmd);
698 				goto Again;
699 			}
700 			/* fallthrough */
701 		case TASK_IS_NOT_AT_LU:
702 		case TASK_ABORT_FAILED:
703 			SAS_DPRINTK("task 0x%p is not at LU: I_T recover\n",
704 				    task);
705 			tmf_resp = sas_recover_I_T(task->dev);
706 			if (tmf_resp == TMF_RESP_FUNC_COMPLETE ||
707 			    tmf_resp == -ENODEV) {
708 				struct domain_device *dev = task->dev;
709 				SAS_DPRINTK("I_T %016llx recovered\n",
710 					    SAS_ADDR(task->dev->sas_addr));
711 				sas_eh_finish_cmd(cmd);
712 				sas_scsi_clear_queue_I_T(work_q, dev);
713 				goto Again;
714 			}
715 			/* Hammer time :-) */
716 			try_to_reset_cmd_device(cmd);
717 			if (i->dft->lldd_clear_nexus_port) {
718 				struct asd_sas_port *port = task->dev->port;
719 				SAS_DPRINTK("clearing nexus for port:%d\n",
720 					    port->id);
721 				res = i->dft->lldd_clear_nexus_port(port);
722 				if (res == TMF_RESP_FUNC_COMPLETE) {
723 					SAS_DPRINTK("clear nexus port:%d "
724 						    "succeeded\n", port->id);
725 					sas_eh_finish_cmd(cmd);
726 					sas_scsi_clear_queue_port(work_q,
727 								  port);
728 					goto Again;
729 				}
730 			}
731 			if (i->dft->lldd_clear_nexus_ha) {
732 				SAS_DPRINTK("clear nexus ha\n");
733 				res = i->dft->lldd_clear_nexus_ha(ha);
734 				if (res == TMF_RESP_FUNC_COMPLETE) {
735 					SAS_DPRINTK("clear nexus ha "
736 						    "succeeded\n");
737 					sas_eh_finish_cmd(cmd);
738 					goto clear_q;
739 				}
740 			}
741 			/* If we are here -- this means that no amount
742 			 * of effort could recover from errors.  Quite
743 			 * possibly the HA just disappeared.
744 			 */
745 			SAS_DPRINTK("error from  device %llx, LUN %x "
746 				    "couldn't be recovered in any way\n",
747 				    SAS_ADDR(task->dev->sas_addr),
748 				    cmd->device->lun);
749 
750 			sas_eh_finish_cmd(cmd);
751 			goto clear_q;
752 		}
753 	}
754  out:
755 	list_splice_tail(&done, work_q);
756 	list_splice_tail_init(&ha->eh_ata_q, work_q);
757 	return;
758 
759  clear_q:
760 	SAS_DPRINTK("--- Exit %s -- clear_q\n", __func__);
761 	list_for_each_entry_safe(cmd, n, work_q, eh_entry)
762 		sas_eh_finish_cmd(cmd);
763 	goto out;
764 }
765 
766 static void sas_eh_handle_resets(struct Scsi_Host *shost)
767 {
768 	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
769 	struct sas_internal *i = to_sas_internal(shost->transportt);
770 
771 	/* handle directed resets to sas devices */
772 	spin_lock_irq(&ha->lock);
773 	while (!list_empty(&ha->eh_dev_q)) {
774 		struct domain_device *dev;
775 		struct ssp_device *ssp;
776 
777 		ssp = list_entry(ha->eh_dev_q.next, typeof(*ssp), eh_list_node);
778 		list_del_init(&ssp->eh_list_node);
779 		dev = container_of(ssp, typeof(*dev), ssp_dev);
780 		kref_get(&dev->kref);
781 		WARN_ONCE(dev_is_sata(dev), "ssp reset to ata device?\n");
782 
783 		spin_unlock_irq(&ha->lock);
784 
785 		if (test_and_clear_bit(SAS_DEV_LU_RESET, &dev->state))
786 			i->dft->lldd_lu_reset(dev, ssp->reset_lun.scsi_lun);
787 
788 		if (test_and_clear_bit(SAS_DEV_RESET, &dev->state))
789 			i->dft->lldd_I_T_nexus_reset(dev);
790 
791 		sas_put_device(dev);
792 		spin_lock_irq(&ha->lock);
793 		clear_bit(SAS_DEV_EH_PENDING, &dev->state);
794 		ha->eh_active--;
795 	}
796 	spin_unlock_irq(&ha->lock);
797 }
798 
799 
800 void sas_scsi_recover_host(struct Scsi_Host *shost)
801 {
802 	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
803 	LIST_HEAD(eh_work_q);
804 	int tries = 0;
805 	bool retry;
806 
807 retry:
808 	tries++;
809 	retry = true;
810 	spin_lock_irq(shost->host_lock);
811 	list_splice_init(&shost->eh_cmd_q, &eh_work_q);
812 	spin_unlock_irq(shost->host_lock);
813 
814 	SAS_DPRINTK("Enter %s busy: %d failed: %d\n",
815 		    __func__, shost->host_busy, shost->host_failed);
816 	/*
817 	 * Deal with commands that still have SAS tasks (i.e. they didn't
818 	 * complete via the normal sas_task completion mechanism),
819 	 * SAS_HA_FROZEN gives eh dominion over all sas_task completion.
820 	 */
821 	set_bit(SAS_HA_FROZEN, &ha->state);
822 	sas_eh_handle_sas_errors(shost, &eh_work_q);
823 	clear_bit(SAS_HA_FROZEN, &ha->state);
824 	if (list_empty(&eh_work_q))
825 		goto out;
826 
827 	/*
828 	 * Now deal with SCSI commands that completed ok but have a an error
829 	 * code (and hopefully sense data) attached.  This is roughly what
830 	 * scsi_unjam_host does, but we skip scsi_eh_abort_cmds because any
831 	 * command we see here has no sas_task and is thus unknown to the HA.
832 	 */
833 	sas_ata_eh(shost, &eh_work_q, &ha->eh_done_q);
834 	if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q))
835 		scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q);
836 
837 out:
838 	if (ha->lldd_max_execute_num > 1)
839 		wake_up_process(ha->core.queue_thread);
840 
841 	sas_eh_handle_resets(shost);
842 
843 	/* now link into libata eh --- if we have any ata devices */
844 	sas_ata_strategy_handler(shost);
845 
846 	scsi_eh_flush_done_q(&ha->eh_done_q);
847 
848 	/* check if any new eh work was scheduled during the last run */
849 	spin_lock_irq(&ha->lock);
850 	if (ha->eh_active == 0) {
851 		shost->host_eh_scheduled = 0;
852 		retry = false;
853 	}
854 	spin_unlock_irq(&ha->lock);
855 
856 	if (retry)
857 		goto retry;
858 
859 	SAS_DPRINTK("--- Exit %s: busy: %d failed: %d tries: %d\n",
860 		    __func__, shost->host_busy, shost->host_failed, tries);
861 }
862 
863 enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
864 {
865 	scmd_printk(KERN_DEBUG, cmd, "command %p timed out\n", cmd);
866 
867 	return BLK_EH_NOT_HANDLED;
868 }
869 
870 int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
871 {
872 	struct domain_device *dev = sdev_to_domain_dev(sdev);
873 
874 	if (dev_is_sata(dev))
875 		return ata_sas_scsi_ioctl(dev->sata_dev.ap, sdev, cmd, arg);
876 
877 	return -EINVAL;
878 }
879 
880 struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy)
881 {
882 	struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent);
883 	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
884 	struct domain_device *found_dev = NULL;
885 	int i;
886 	unsigned long flags;
887 
888 	spin_lock_irqsave(&ha->phy_port_lock, flags);
889 	for (i = 0; i < ha->num_phys; i++) {
890 		struct asd_sas_port *port = ha->sas_port[i];
891 		struct domain_device *dev;
892 
893 		spin_lock(&port->dev_list_lock);
894 		list_for_each_entry(dev, &port->dev_list, dev_list_node) {
895 			if (rphy == dev->rphy) {
896 				found_dev = dev;
897 				spin_unlock(&port->dev_list_lock);
898 				goto found;
899 			}
900 		}
901 		spin_unlock(&port->dev_list_lock);
902 	}
903  found:
904 	spin_unlock_irqrestore(&ha->phy_port_lock, flags);
905 
906 	return found_dev;
907 }
908 
909 int sas_target_alloc(struct scsi_target *starget)
910 {
911 	struct sas_rphy *rphy = dev_to_rphy(starget->dev.parent);
912 	struct domain_device *found_dev = sas_find_dev_by_rphy(rphy);
913 
914 	if (!found_dev)
915 		return -ENODEV;
916 
917 	kref_get(&found_dev->kref);
918 	starget->hostdata = found_dev;
919 	return 0;
920 }
921 
922 #define SAS_DEF_QD 256
923 
924 int sas_slave_configure(struct scsi_device *scsi_dev)
925 {
926 	struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
927 	struct sas_ha_struct *sas_ha;
928 
929 	BUG_ON(dev->rphy->identify.device_type != SAS_END_DEVICE);
930 
931 	if (dev_is_sata(dev)) {
932 		ata_sas_slave_configure(scsi_dev, dev->sata_dev.ap);
933 		return 0;
934 	}
935 
936 	sas_ha = dev->port->ha;
937 
938 	sas_read_port_mode_page(scsi_dev);
939 
940 	if (scsi_dev->tagged_supported) {
941 		scsi_set_tag_type(scsi_dev, MSG_SIMPLE_TAG);
942 		scsi_activate_tcq(scsi_dev, SAS_DEF_QD);
943 	} else {
944 		SAS_DPRINTK("device %llx, LUN %x doesn't support "
945 			    "TCQ\n", SAS_ADDR(dev->sas_addr),
946 			    scsi_dev->lun);
947 		scsi_dev->tagged_supported = 0;
948 		scsi_set_tag_type(scsi_dev, 0);
949 		scsi_deactivate_tcq(scsi_dev, 1);
950 	}
951 
952 	scsi_dev->allow_restart = 1;
953 
954 	return 0;
955 }
956 
957 int sas_change_queue_depth(struct scsi_device *sdev, int depth, int reason)
958 {
959 	struct domain_device *dev = sdev_to_domain_dev(sdev);
960 
961 	if (dev_is_sata(dev))
962 		return __ata_change_queue_depth(dev->sata_dev.ap, sdev, depth,
963 						reason);
964 
965 	switch (reason) {
966 	case SCSI_QDEPTH_DEFAULT:
967 	case SCSI_QDEPTH_RAMP_UP:
968 		if (!sdev->tagged_supported)
969 			depth = 1;
970 		scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
971 		break;
972 	case SCSI_QDEPTH_QFULL:
973 		scsi_track_queue_full(sdev, depth);
974 		break;
975 	default:
976 		return -EOPNOTSUPP;
977 	}
978 
979 	return depth;
980 }
981 
982 int sas_change_queue_type(struct scsi_device *scsi_dev, int qt)
983 {
984 	struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
985 
986 	if (dev_is_sata(dev))
987 		return -EINVAL;
988 
989 	if (!scsi_dev->tagged_supported)
990 		return 0;
991 
992 	scsi_deactivate_tcq(scsi_dev, 1);
993 
994 	scsi_set_tag_type(scsi_dev, qt);
995 	scsi_activate_tcq(scsi_dev, scsi_dev->queue_depth);
996 
997 	return qt;
998 }
999 
1000 int sas_bios_param(struct scsi_device *scsi_dev,
1001 			  struct block_device *bdev,
1002 			  sector_t capacity, int *hsc)
1003 {
1004 	hsc[0] = 255;
1005 	hsc[1] = 63;
1006 	sector_div(capacity, 255*63);
1007 	hsc[2] = capacity;
1008 
1009 	return 0;
1010 }
1011 
1012 /* ---------- Task Collector Thread implementation ---------- */
1013 
1014 static void sas_queue(struct sas_ha_struct *sas_ha)
1015 {
1016 	struct scsi_core *core = &sas_ha->core;
1017 	unsigned long flags;
1018 	LIST_HEAD(q);
1019 	int can_queue;
1020 	int res;
1021 	struct sas_internal *i = to_sas_internal(core->shost->transportt);
1022 
1023 	mutex_lock(&core->task_queue_flush);
1024 	spin_lock_irqsave(&core->task_queue_lock, flags);
1025 	while (!kthread_should_stop() &&
1026 	       !list_empty(&core->task_queue) &&
1027 	       !test_bit(SAS_HA_FROZEN, &sas_ha->state)) {
1028 
1029 		can_queue = sas_ha->lldd_queue_size - core->task_queue_size;
1030 		if (can_queue >= 0) {
1031 			can_queue = core->task_queue_size;
1032 			list_splice_init(&core->task_queue, &q);
1033 		} else {
1034 			struct list_head *a, *n;
1035 
1036 			can_queue = sas_ha->lldd_queue_size;
1037 			list_for_each_safe(a, n, &core->task_queue) {
1038 				list_move_tail(a, &q);
1039 				if (--can_queue == 0)
1040 					break;
1041 			}
1042 			can_queue = sas_ha->lldd_queue_size;
1043 		}
1044 		core->task_queue_size -= can_queue;
1045 		spin_unlock_irqrestore(&core->task_queue_lock, flags);
1046 		{
1047 			struct sas_task *task = list_entry(q.next,
1048 							   struct sas_task,
1049 							   list);
1050 			list_del_init(&q);
1051 			res = i->dft->lldd_execute_task(task, can_queue,
1052 							GFP_KERNEL);
1053 			if (unlikely(res))
1054 				__list_add(&q, task->list.prev, &task->list);
1055 		}
1056 		spin_lock_irqsave(&core->task_queue_lock, flags);
1057 		if (res) {
1058 			list_splice_init(&q, &core->task_queue); /*at head*/
1059 			core->task_queue_size += can_queue;
1060 		}
1061 	}
1062 	spin_unlock_irqrestore(&core->task_queue_lock, flags);
1063 	mutex_unlock(&core->task_queue_flush);
1064 }
1065 
1066 /**
1067  * sas_queue_thread -- The Task Collector thread
1068  * @_sas_ha: pointer to struct sas_ha
1069  */
1070 static int sas_queue_thread(void *_sas_ha)
1071 {
1072 	struct sas_ha_struct *sas_ha = _sas_ha;
1073 
1074 	while (1) {
1075 		set_current_state(TASK_INTERRUPTIBLE);
1076 		schedule();
1077 		sas_queue(sas_ha);
1078 		if (kthread_should_stop())
1079 			break;
1080 	}
1081 
1082 	return 0;
1083 }
1084 
1085 int sas_init_queue(struct sas_ha_struct *sas_ha)
1086 {
1087 	struct scsi_core *core = &sas_ha->core;
1088 
1089 	spin_lock_init(&core->task_queue_lock);
1090 	mutex_init(&core->task_queue_flush);
1091 	core->task_queue_size = 0;
1092 	INIT_LIST_HEAD(&core->task_queue);
1093 
1094 	core->queue_thread = kthread_run(sas_queue_thread, sas_ha,
1095 					 "sas_queue_%d", core->shost->host_no);
1096 	if (IS_ERR(core->queue_thread))
1097 		return PTR_ERR(core->queue_thread);
1098 	return 0;
1099 }
1100 
1101 void sas_shutdown_queue(struct sas_ha_struct *sas_ha)
1102 {
1103 	unsigned long flags;
1104 	struct scsi_core *core = &sas_ha->core;
1105 	struct sas_task *task, *n;
1106 
1107 	kthread_stop(core->queue_thread);
1108 
1109 	if (!list_empty(&core->task_queue))
1110 		SAS_DPRINTK("HA: %llx: scsi core task queue is NOT empty!?\n",
1111 			    SAS_ADDR(sas_ha->sas_addr));
1112 
1113 	spin_lock_irqsave(&core->task_queue_lock, flags);
1114 	list_for_each_entry_safe(task, n, &core->task_queue, list) {
1115 		struct scsi_cmnd *cmd = task->uldd_task;
1116 
1117 		list_del_init(&task->list);
1118 
1119 		ASSIGN_SAS_TASK(cmd, NULL);
1120 		sas_free_task(task);
1121 		cmd->result = DID_ABORT << 16;
1122 		cmd->scsi_done(cmd);
1123 	}
1124 	spin_unlock_irqrestore(&core->task_queue_lock, flags);
1125 }
1126 
1127 /*
1128  * Tell an upper layer that it needs to initiate an abort for a given task.
1129  * This should only ever be called by an LLDD.
1130  */
1131 void sas_task_abort(struct sas_task *task)
1132 {
1133 	struct scsi_cmnd *sc = task->uldd_task;
1134 
1135 	/* Escape for libsas internal commands */
1136 	if (!sc) {
1137 		struct sas_task_slow *slow = task->slow_task;
1138 
1139 		if (!slow)
1140 			return;
1141 		if (!del_timer(&slow->timer))
1142 			return;
1143 		slow->timer.function(slow->timer.data);
1144 		return;
1145 	}
1146 
1147 	if (dev_is_sata(task->dev)) {
1148 		sas_ata_task_abort(task);
1149 	} else {
1150 		struct request_queue *q = sc->device->request_queue;
1151 		unsigned long flags;
1152 
1153 		spin_lock_irqsave(q->queue_lock, flags);
1154 		blk_abort_request(sc->request);
1155 		spin_unlock_irqrestore(q->queue_lock, flags);
1156 	}
1157 }
1158 
1159 void sas_target_destroy(struct scsi_target *starget)
1160 {
1161 	struct domain_device *found_dev = starget->hostdata;
1162 
1163 	if (!found_dev)
1164 		return;
1165 
1166 	starget->hostdata = NULL;
1167 	sas_put_device(found_dev);
1168 }
1169 
1170 static void sas_parse_addr(u8 *sas_addr, const char *p)
1171 {
1172 	int i;
1173 	for (i = 0; i < SAS_ADDR_SIZE; i++) {
1174 		u8 h, l;
1175 		if (!*p)
1176 			break;
1177 		h = isdigit(*p) ? *p-'0' : toupper(*p)-'A'+10;
1178 		p++;
1179 		l = isdigit(*p) ? *p-'0' : toupper(*p)-'A'+10;
1180 		p++;
1181 		sas_addr[i] = (h<<4) | l;
1182 	}
1183 }
1184 
1185 #define SAS_STRING_ADDR_SIZE	16
1186 
1187 int sas_request_addr(struct Scsi_Host *shost, u8 *addr)
1188 {
1189 	int res;
1190 	const struct firmware *fw;
1191 
1192 	res = request_firmware(&fw, "sas_addr", &shost->shost_gendev);
1193 	if (res)
1194 		return res;
1195 
1196 	if (fw->size < SAS_STRING_ADDR_SIZE) {
1197 		res = -ENODEV;
1198 		goto out;
1199 	}
1200 
1201 	sas_parse_addr(addr, fw->data);
1202 
1203 out:
1204 	release_firmware(fw);
1205 	return res;
1206 }
1207 EXPORT_SYMBOL_GPL(sas_request_addr);
1208 
1209 EXPORT_SYMBOL_GPL(sas_queuecommand);
1210 EXPORT_SYMBOL_GPL(sas_target_alloc);
1211 EXPORT_SYMBOL_GPL(sas_slave_configure);
1212 EXPORT_SYMBOL_GPL(sas_change_queue_depth);
1213 EXPORT_SYMBOL_GPL(sas_change_queue_type);
1214 EXPORT_SYMBOL_GPL(sas_bios_param);
1215 EXPORT_SYMBOL_GPL(sas_task_abort);
1216 EXPORT_SYMBOL_GPL(sas_phy_reset);
1217 EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler);
1218 EXPORT_SYMBOL_GPL(sas_eh_bus_reset_handler);
1219 EXPORT_SYMBOL_GPL(sas_target_destroy);
1220 EXPORT_SYMBOL_GPL(sas_ioctl);
1221