xref: /openbmc/linux/drivers/scsi/libsas/sas_scsi_host.c (revision 2be6bc48df59c99d35aab16a51d4a814e9bb8c35)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Serial Attached SCSI (SAS) class SCSI Host glue.
4  *
5  * Copyright (C) 2005 Adaptec, Inc.  All rights reserved.
6  * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
7  */
8 
9 #include <linux/kthread.h>
10 #include <linux/firmware.h>
11 #include <linux/export.h>
12 #include <linux/ctype.h>
13 #include <linux/kernel.h>
14 
15 #include "sas_internal.h"
16 
17 #include <scsi/scsi_host.h>
18 #include <scsi/scsi_device.h>
19 #include <scsi/scsi_tcq.h>
20 #include <scsi/scsi.h>
21 #include <scsi/scsi_eh.h>
22 #include <scsi/scsi_transport.h>
23 #include <scsi/scsi_transport_sas.h>
24 #include <scsi/sas_ata.h>
25 #include "scsi_sas_internal.h"
26 #include "scsi_transport_api.h"
27 #include "scsi_priv.h"
28 
29 #include <linux/err.h>
30 #include <linux/blkdev.h>
31 #include <linux/freezer.h>
32 #include <linux/gfp.h>
33 #include <linux/scatterlist.h>
34 #include <linux/libata.h>
35 
36 /* record final status and free the task */
37 static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task)
38 {
39 	struct task_status_struct *ts = &task->task_status;
40 	enum scsi_host_status hs = DID_OK;
41 	enum exec_status stat = SAS_SAM_STAT_GOOD;
42 
43 	if (ts->resp == SAS_TASK_UNDELIVERED) {
44 		/* transport error */
45 		hs = DID_NO_CONNECT;
46 	} else { /* ts->resp == SAS_TASK_COMPLETE */
47 		/* task delivered, what happened afterwards? */
48 		switch (ts->stat) {
49 		case SAS_DEV_NO_RESPONSE:
50 		case SAS_INTERRUPTED:
51 		case SAS_PHY_DOWN:
52 		case SAS_NAK_R_ERR:
53 		case SAS_OPEN_TO:
54 			hs = DID_NO_CONNECT;
55 			break;
56 		case SAS_DATA_UNDERRUN:
57 			scsi_set_resid(sc, ts->residual);
58 			if (scsi_bufflen(sc) - scsi_get_resid(sc) < sc->underflow)
59 				hs = DID_ERROR;
60 			break;
61 		case SAS_DATA_OVERRUN:
62 			hs = DID_ERROR;
63 			break;
64 		case SAS_QUEUE_FULL:
65 			hs = DID_SOFT_ERROR; /* retry */
66 			break;
67 		case SAS_DEVICE_UNKNOWN:
68 			hs = DID_BAD_TARGET;
69 			break;
70 		case SAS_OPEN_REJECT:
71 			if (ts->open_rej_reason == SAS_OREJ_RSVD_RETRY)
72 				hs = DID_SOFT_ERROR; /* retry */
73 			else
74 				hs = DID_ERROR;
75 			break;
76 		case SAS_PROTO_RESPONSE:
77 			pr_notice("LLDD:%s sent SAS_PROTO_RESP for an SSP task; please report this\n",
78 				  task->dev->port->ha->sas_ha_name);
79 			break;
80 		case SAS_ABORTED_TASK:
81 			hs = DID_ABORT;
82 			break;
83 		case SAS_SAM_STAT_CHECK_CONDITION:
84 			memcpy(sc->sense_buffer, ts->buf,
85 			       min(SCSI_SENSE_BUFFERSIZE, ts->buf_valid_size));
86 			stat = SAS_SAM_STAT_CHECK_CONDITION;
87 			break;
88 		default:
89 			stat = ts->stat;
90 			break;
91 		}
92 	}
93 
94 	sc->result = (hs << 16) | stat;
95 	ASSIGN_SAS_TASK(sc, NULL);
96 	sas_free_task(task);
97 }
98 
99 static void sas_scsi_task_done(struct sas_task *task)
100 {
101 	struct scsi_cmnd *sc = task->uldd_task;
102 	struct domain_device *dev = task->dev;
103 	struct sas_ha_struct *ha = dev->port->ha;
104 	unsigned long flags;
105 
106 	spin_lock_irqsave(&dev->done_lock, flags);
107 	if (test_bit(SAS_HA_FROZEN, &ha->state))
108 		task = NULL;
109 	else
110 		ASSIGN_SAS_TASK(sc, NULL);
111 	spin_unlock_irqrestore(&dev->done_lock, flags);
112 
113 	if (unlikely(!task)) {
114 		/* task will be completed by the error handler */
115 		pr_debug("task done but aborted\n");
116 		return;
117 	}
118 
119 	if (unlikely(!sc)) {
120 		pr_debug("task_done called with non existing SCSI cmnd!\n");
121 		sas_free_task(task);
122 		return;
123 	}
124 
125 	sas_end_task(sc, task);
126 	scsi_done(sc);
127 }
128 
129 static struct sas_task *sas_create_task(struct scsi_cmnd *cmd,
130 					       struct domain_device *dev,
131 					       gfp_t gfp_flags)
132 {
133 	struct sas_task *task = sas_alloc_task(gfp_flags);
134 	struct scsi_lun lun;
135 
136 	if (!task)
137 		return NULL;
138 
139 	task->uldd_task = cmd;
140 	ASSIGN_SAS_TASK(cmd, task);
141 
142 	task->dev = dev;
143 	task->task_proto = task->dev->tproto; /* BUG_ON(!SSP) */
144 
145 	int_to_scsilun(cmd->device->lun, &lun);
146 	memcpy(task->ssp_task.LUN, &lun.scsi_lun, 8);
147 	task->ssp_task.task_attr = TASK_ATTR_SIMPLE;
148 	task->ssp_task.cmd = cmd;
149 
150 	task->scatter = scsi_sglist(cmd);
151 	task->num_scatter = scsi_sg_count(cmd);
152 	task->total_xfer_len = scsi_bufflen(cmd);
153 	task->data_dir = cmd->sc_data_direction;
154 
155 	task->task_done = sas_scsi_task_done;
156 
157 	return task;
158 }
159 
160 int sas_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
161 {
162 	struct sas_internal *i = to_sas_internal(host->transportt);
163 	struct domain_device *dev = cmd_to_domain_dev(cmd);
164 	struct sas_task *task;
165 	int res = 0;
166 
167 	/* If the device fell off, no sense in issuing commands */
168 	if (test_bit(SAS_DEV_GONE, &dev->state)) {
169 		cmd->result = DID_BAD_TARGET << 16;
170 		goto out_done;
171 	}
172 
173 	if (dev_is_sata(dev)) {
174 		spin_lock_irq(dev->sata_dev.ap->lock);
175 		res = ata_sas_queuecmd(cmd, dev->sata_dev.ap);
176 		spin_unlock_irq(dev->sata_dev.ap->lock);
177 		return res;
178 	}
179 
180 	task = sas_create_task(cmd, dev, GFP_ATOMIC);
181 	if (!task)
182 		return SCSI_MLQUEUE_HOST_BUSY;
183 
184 	res = i->dft->lldd_execute_task(task, GFP_ATOMIC);
185 	if (res)
186 		goto out_free_task;
187 	return 0;
188 
189 out_free_task:
190 	pr_debug("lldd_execute_task returned: %d\n", res);
191 	ASSIGN_SAS_TASK(cmd, NULL);
192 	sas_free_task(task);
193 	if (res == -SAS_QUEUE_FULL)
194 		cmd->result = DID_SOFT_ERROR << 16; /* retry */
195 	else
196 		cmd->result = DID_ERROR << 16;
197 out_done:
198 	scsi_done(cmd);
199 	return 0;
200 }
201 EXPORT_SYMBOL_GPL(sas_queuecommand);
202 
203 static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
204 {
205 	struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host);
206 	struct domain_device *dev = cmd_to_domain_dev(cmd);
207 	struct sas_task *task = TO_SAS_TASK(cmd);
208 
209 	/* At this point, we only get called following an actual abort
210 	 * of the task, so we should be guaranteed not to be racing with
211 	 * any completions from the LLD.  Task is freed after this.
212 	 */
213 	sas_end_task(cmd, task);
214 
215 	if (dev_is_sata(dev)) {
216 		/* defer commands to libata so that libata EH can
217 		 * handle ata qcs correctly
218 		 */
219 		list_move_tail(&cmd->eh_entry, &sas_ha->eh_ata_q);
220 		return;
221 	}
222 
223 	/* now finish the command and move it on to the error
224 	 * handler done list, this also takes it off the
225 	 * error handler pending list.
226 	 */
227 	scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q);
228 }
229 
230 static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd)
231 {
232 	struct scsi_cmnd *cmd, *n;
233 
234 	list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
235 		if (cmd->device->sdev_target == my_cmd->device->sdev_target &&
236 		    cmd->device->lun == my_cmd->device->lun)
237 			sas_eh_finish_cmd(cmd);
238 	}
239 }
240 
241 static void sas_scsi_clear_queue_I_T(struct list_head *error_q,
242 				     struct domain_device *dev)
243 {
244 	struct scsi_cmnd *cmd, *n;
245 
246 	list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
247 		struct domain_device *x = cmd_to_domain_dev(cmd);
248 
249 		if (x == dev)
250 			sas_eh_finish_cmd(cmd);
251 	}
252 }
253 
254 static void sas_scsi_clear_queue_port(struct list_head *error_q,
255 				      struct asd_sas_port *port)
256 {
257 	struct scsi_cmnd *cmd, *n;
258 
259 	list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
260 		struct domain_device *dev = cmd_to_domain_dev(cmd);
261 		struct asd_sas_port *x = dev->port;
262 
263 		if (x == port)
264 			sas_eh_finish_cmd(cmd);
265 	}
266 }
267 
268 enum task_disposition {
269 	TASK_IS_DONE,
270 	TASK_IS_ABORTED,
271 	TASK_IS_AT_LU,
272 	TASK_IS_NOT_AT_LU,
273 	TASK_ABORT_FAILED,
274 };
275 
276 static enum task_disposition sas_scsi_find_task(struct sas_task *task)
277 {
278 	unsigned long flags;
279 	int i, res;
280 	struct sas_internal *si =
281 		to_sas_internal(task->dev->port->ha->shost->transportt);
282 
283 	for (i = 0; i < 5; i++) {
284 		pr_notice("%s: aborting task 0x%p\n", __func__, task);
285 		res = si->dft->lldd_abort_task(task);
286 
287 		spin_lock_irqsave(&task->task_state_lock, flags);
288 		if (task->task_state_flags & SAS_TASK_STATE_DONE) {
289 			spin_unlock_irqrestore(&task->task_state_lock, flags);
290 			pr_debug("%s: task 0x%p is done\n", __func__, task);
291 			return TASK_IS_DONE;
292 		}
293 		spin_unlock_irqrestore(&task->task_state_lock, flags);
294 
295 		if (res == TMF_RESP_FUNC_COMPLETE) {
296 			pr_notice("%s: task 0x%p is aborted\n",
297 				  __func__, task);
298 			return TASK_IS_ABORTED;
299 		} else if (si->dft->lldd_query_task) {
300 			pr_notice("%s: querying task 0x%p\n", __func__, task);
301 			res = si->dft->lldd_query_task(task);
302 			switch (res) {
303 			case TMF_RESP_FUNC_SUCC:
304 				pr_notice("%s: task 0x%p at LU\n", __func__,
305 					  task);
306 				return TASK_IS_AT_LU;
307 			case TMF_RESP_FUNC_COMPLETE:
308 				pr_notice("%s: task 0x%p not at LU\n",
309 					  __func__, task);
310 				return TASK_IS_NOT_AT_LU;
311 			case TMF_RESP_FUNC_FAILED:
312 				pr_notice("%s: task 0x%p failed to abort\n",
313 					  __func__, task);
314 				return TASK_ABORT_FAILED;
315 			default:
316 				pr_notice("%s: task 0x%p result code %d not handled\n",
317 					  __func__, task, res);
318 			}
319 		}
320 	}
321 	return TASK_ABORT_FAILED;
322 }
323 
324 static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd)
325 {
326 	int res = TMF_RESP_FUNC_FAILED;
327 	struct scsi_lun lun;
328 	struct sas_internal *i =
329 		to_sas_internal(dev->port->ha->shost->transportt);
330 
331 	int_to_scsilun(cmd->device->lun, &lun);
332 
333 	pr_notice("eh: device %016llx LUN 0x%llx has the task\n",
334 		  SAS_ADDR(dev->sas_addr),
335 		  cmd->device->lun);
336 
337 	if (i->dft->lldd_abort_task_set)
338 		res = i->dft->lldd_abort_task_set(dev, lun.scsi_lun);
339 
340 	if (res == TMF_RESP_FUNC_FAILED) {
341 		if (i->dft->lldd_clear_task_set)
342 			res = i->dft->lldd_clear_task_set(dev, lun.scsi_lun);
343 	}
344 
345 	if (res == TMF_RESP_FUNC_FAILED) {
346 		if (i->dft->lldd_lu_reset)
347 			res = i->dft->lldd_lu_reset(dev, lun.scsi_lun);
348 	}
349 
350 	return res;
351 }
352 
353 static int sas_recover_I_T(struct domain_device *dev)
354 {
355 	int res = TMF_RESP_FUNC_FAILED;
356 	struct sas_internal *i =
357 		to_sas_internal(dev->port->ha->shost->transportt);
358 
359 	pr_notice("I_T nexus reset for dev %016llx\n",
360 		  SAS_ADDR(dev->sas_addr));
361 
362 	if (i->dft->lldd_I_T_nexus_reset)
363 		res = i->dft->lldd_I_T_nexus_reset(dev);
364 
365 	return res;
366 }
367 
368 /* take a reference on the last known good phy for this device */
369 struct sas_phy *sas_get_local_phy(struct domain_device *dev)
370 {
371 	struct sas_ha_struct *ha = dev->port->ha;
372 	struct sas_phy *phy;
373 	unsigned long flags;
374 
375 	/* a published domain device always has a valid phy, it may be
376 	 * stale, but it is never NULL
377 	 */
378 	BUG_ON(!dev->phy);
379 
380 	spin_lock_irqsave(&ha->phy_port_lock, flags);
381 	phy = dev->phy;
382 	get_device(&phy->dev);
383 	spin_unlock_irqrestore(&ha->phy_port_lock, flags);
384 
385 	return phy;
386 }
387 EXPORT_SYMBOL_GPL(sas_get_local_phy);
388 
389 static void sas_wait_eh(struct domain_device *dev)
390 {
391 	struct sas_ha_struct *ha = dev->port->ha;
392 	DEFINE_WAIT(wait);
393 
394 	if (dev_is_sata(dev)) {
395 		ata_port_wait_eh(dev->sata_dev.ap);
396 		return;
397 	}
398  retry:
399 	spin_lock_irq(&ha->lock);
400 
401 	while (test_bit(SAS_DEV_EH_PENDING, &dev->state)) {
402 		prepare_to_wait(&ha->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
403 		spin_unlock_irq(&ha->lock);
404 		schedule();
405 		spin_lock_irq(&ha->lock);
406 	}
407 	finish_wait(&ha->eh_wait_q, &wait);
408 
409 	spin_unlock_irq(&ha->lock);
410 
411 	/* make sure SCSI EH is complete */
412 	if (scsi_host_in_recovery(ha->shost)) {
413 		msleep(10);
414 		goto retry;
415 	}
416 }
417 
418 static int sas_queue_reset(struct domain_device *dev, int reset_type,
419 			   u64 lun, int wait)
420 {
421 	struct sas_ha_struct *ha = dev->port->ha;
422 	int scheduled = 0, tries = 100;
423 
424 	/* ata: promote lun reset to bus reset */
425 	if (dev_is_sata(dev)) {
426 		sas_ata_schedule_reset(dev);
427 		if (wait)
428 			sas_ata_wait_eh(dev);
429 		return SUCCESS;
430 	}
431 
432 	while (!scheduled && tries--) {
433 		spin_lock_irq(&ha->lock);
434 		if (!test_bit(SAS_DEV_EH_PENDING, &dev->state) &&
435 		    !test_bit(reset_type, &dev->state)) {
436 			scheduled = 1;
437 			ha->eh_active++;
438 			list_add_tail(&dev->ssp_dev.eh_list_node, &ha->eh_dev_q);
439 			set_bit(SAS_DEV_EH_PENDING, &dev->state);
440 			set_bit(reset_type, &dev->state);
441 			int_to_scsilun(lun, &dev->ssp_dev.reset_lun);
442 			scsi_schedule_eh(ha->shost);
443 		}
444 		spin_unlock_irq(&ha->lock);
445 
446 		if (wait)
447 			sas_wait_eh(dev);
448 
449 		if (scheduled)
450 			return SUCCESS;
451 	}
452 
453 	pr_warn("%s reset of %s failed\n",
454 		reset_type == SAS_DEV_LU_RESET ? "LUN" : "Bus",
455 		dev_name(&dev->rphy->dev));
456 
457 	return FAILED;
458 }
459 
460 int sas_eh_abort_handler(struct scsi_cmnd *cmd)
461 {
462 	int res = TMF_RESP_FUNC_FAILED;
463 	struct sas_task *task = TO_SAS_TASK(cmd);
464 	struct Scsi_Host *host = cmd->device->host;
465 	struct domain_device *dev = cmd_to_domain_dev(cmd);
466 	struct sas_internal *i = to_sas_internal(host->transportt);
467 	unsigned long flags;
468 
469 	if (!i->dft->lldd_abort_task)
470 		return FAILED;
471 
472 	spin_lock_irqsave(host->host_lock, flags);
473 	/* We cannot do async aborts for SATA devices */
474 	if (dev_is_sata(dev) && !host->host_eh_scheduled) {
475 		spin_unlock_irqrestore(host->host_lock, flags);
476 		return FAILED;
477 	}
478 	spin_unlock_irqrestore(host->host_lock, flags);
479 
480 	if (task)
481 		res = i->dft->lldd_abort_task(task);
482 	else
483 		pr_notice("no task to abort\n");
484 	if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
485 		return SUCCESS;
486 
487 	return FAILED;
488 }
489 EXPORT_SYMBOL_GPL(sas_eh_abort_handler);
490 
491 /* Attempt to send a LUN reset message to a device */
492 int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
493 {
494 	int res;
495 	struct scsi_lun lun;
496 	struct Scsi_Host *host = cmd->device->host;
497 	struct domain_device *dev = cmd_to_domain_dev(cmd);
498 	struct sas_internal *i = to_sas_internal(host->transportt);
499 
500 	if (current != host->ehandler)
501 		return sas_queue_reset(dev, SAS_DEV_LU_RESET, cmd->device->lun, 0);
502 
503 	int_to_scsilun(cmd->device->lun, &lun);
504 
505 	if (!i->dft->lldd_lu_reset)
506 		return FAILED;
507 
508 	res = i->dft->lldd_lu_reset(dev, lun.scsi_lun);
509 	if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
510 		return SUCCESS;
511 
512 	return FAILED;
513 }
514 EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler);
515 
516 int sas_eh_target_reset_handler(struct scsi_cmnd *cmd)
517 {
518 	int res;
519 	struct Scsi_Host *host = cmd->device->host;
520 	struct domain_device *dev = cmd_to_domain_dev(cmd);
521 	struct sas_internal *i = to_sas_internal(host->transportt);
522 
523 	if (current != host->ehandler)
524 		return sas_queue_reset(dev, SAS_DEV_RESET, 0, 0);
525 
526 	if (!i->dft->lldd_I_T_nexus_reset)
527 		return FAILED;
528 
529 	res = i->dft->lldd_I_T_nexus_reset(dev);
530 	if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE ||
531 	    res == -ENODEV)
532 		return SUCCESS;
533 
534 	return FAILED;
535 }
536 EXPORT_SYMBOL_GPL(sas_eh_target_reset_handler);
537 
538 /* Try to reset a device */
539 static int try_to_reset_cmd_device(struct scsi_cmnd *cmd)
540 {
541 	int res;
542 	struct Scsi_Host *shost = cmd->device->host;
543 
544 	if (!shost->hostt->eh_device_reset_handler)
545 		goto try_target_reset;
546 
547 	res = shost->hostt->eh_device_reset_handler(cmd);
548 	if (res == SUCCESS)
549 		return res;
550 
551 try_target_reset:
552 	if (shost->hostt->eh_target_reset_handler)
553 		return shost->hostt->eh_target_reset_handler(cmd);
554 
555 	return FAILED;
556 }
557 
558 static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *work_q)
559 {
560 	struct scsi_cmnd *cmd, *n;
561 	enum task_disposition res = TASK_IS_DONE;
562 	int tmf_resp, need_reset;
563 	struct sas_internal *i = to_sas_internal(shost->transportt);
564 	unsigned long flags;
565 	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
566 	LIST_HEAD(done);
567 
568 	/* clean out any commands that won the completion vs eh race */
569 	list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
570 		struct domain_device *dev = cmd_to_domain_dev(cmd);
571 		struct sas_task *task;
572 
573 		spin_lock_irqsave(&dev->done_lock, flags);
574 		/* by this point the lldd has either observed
575 		 * SAS_HA_FROZEN and is leaving the task alone, or has
576 		 * won the race with eh and decided to complete it
577 		 */
578 		task = TO_SAS_TASK(cmd);
579 		spin_unlock_irqrestore(&dev->done_lock, flags);
580 
581 		if (!task)
582 			list_move_tail(&cmd->eh_entry, &done);
583 	}
584 
585  Again:
586 	list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
587 		struct sas_task *task = TO_SAS_TASK(cmd);
588 
589 		list_del_init(&cmd->eh_entry);
590 
591 		spin_lock_irqsave(&task->task_state_lock, flags);
592 		need_reset = task->task_state_flags & SAS_TASK_NEED_DEV_RESET;
593 		spin_unlock_irqrestore(&task->task_state_lock, flags);
594 
595 		if (need_reset) {
596 			pr_notice("%s: task 0x%p requests reset\n",
597 				  __func__, task);
598 			goto reset;
599 		}
600 
601 		pr_debug("trying to find task 0x%p\n", task);
602 		res = sas_scsi_find_task(task);
603 
604 		switch (res) {
605 		case TASK_IS_DONE:
606 			pr_notice("%s: task 0x%p is done\n", __func__,
607 				    task);
608 			sas_eh_finish_cmd(cmd);
609 			continue;
610 		case TASK_IS_ABORTED:
611 			pr_notice("%s: task 0x%p is aborted\n",
612 				  __func__, task);
613 			sas_eh_finish_cmd(cmd);
614 			continue;
615 		case TASK_IS_AT_LU:
616 			pr_info("task 0x%p is at LU: lu recover\n", task);
617  reset:
618 			tmf_resp = sas_recover_lu(task->dev, cmd);
619 			if (tmf_resp == TMF_RESP_FUNC_COMPLETE) {
620 				pr_notice("dev %016llx LU 0x%llx is recovered\n",
621 					  SAS_ADDR(task->dev),
622 					  cmd->device->lun);
623 				sas_eh_finish_cmd(cmd);
624 				sas_scsi_clear_queue_lu(work_q, cmd);
625 				goto Again;
626 			}
627 			fallthrough;
628 		case TASK_IS_NOT_AT_LU:
629 		case TASK_ABORT_FAILED:
630 			pr_notice("task 0x%p is not at LU: I_T recover\n",
631 				  task);
632 			tmf_resp = sas_recover_I_T(task->dev);
633 			if (tmf_resp == TMF_RESP_FUNC_COMPLETE ||
634 			    tmf_resp == -ENODEV) {
635 				struct domain_device *dev = task->dev;
636 				pr_notice("I_T %016llx recovered\n",
637 					  SAS_ADDR(task->dev->sas_addr));
638 				sas_eh_finish_cmd(cmd);
639 				sas_scsi_clear_queue_I_T(work_q, dev);
640 				goto Again;
641 			}
642 			/* Hammer time :-) */
643 			try_to_reset_cmd_device(cmd);
644 			if (i->dft->lldd_clear_nexus_port) {
645 				struct asd_sas_port *port = task->dev->port;
646 				pr_debug("clearing nexus for port:%d\n",
647 					  port->id);
648 				res = i->dft->lldd_clear_nexus_port(port);
649 				if (res == TMF_RESP_FUNC_COMPLETE) {
650 					pr_notice("clear nexus port:%d succeeded\n",
651 						  port->id);
652 					sas_eh_finish_cmd(cmd);
653 					sas_scsi_clear_queue_port(work_q,
654 								  port);
655 					goto Again;
656 				}
657 			}
658 			if (i->dft->lldd_clear_nexus_ha) {
659 				pr_debug("clear nexus ha\n");
660 				res = i->dft->lldd_clear_nexus_ha(ha);
661 				if (res == TMF_RESP_FUNC_COMPLETE) {
662 					pr_notice("clear nexus ha succeeded\n");
663 					sas_eh_finish_cmd(cmd);
664 					goto clear_q;
665 				}
666 			}
667 			/* If we are here -- this means that no amount
668 			 * of effort could recover from errors.  Quite
669 			 * possibly the HA just disappeared.
670 			 */
671 			pr_err("error from device %016llx, LUN 0x%llx couldn't be recovered in any way\n",
672 			       SAS_ADDR(task->dev->sas_addr),
673 			       cmd->device->lun);
674 
675 			sas_eh_finish_cmd(cmd);
676 			goto clear_q;
677 		}
678 	}
679  out:
680 	list_splice_tail(&done, work_q);
681 	list_splice_tail_init(&ha->eh_ata_q, work_q);
682 	return;
683 
684  clear_q:
685 	pr_debug("--- Exit %s -- clear_q\n", __func__);
686 	list_for_each_entry_safe(cmd, n, work_q, eh_entry)
687 		sas_eh_finish_cmd(cmd);
688 	goto out;
689 }
690 
691 static void sas_eh_handle_resets(struct Scsi_Host *shost)
692 {
693 	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
694 	struct sas_internal *i = to_sas_internal(shost->transportt);
695 
696 	/* handle directed resets to sas devices */
697 	spin_lock_irq(&ha->lock);
698 	while (!list_empty(&ha->eh_dev_q)) {
699 		struct domain_device *dev;
700 		struct ssp_device *ssp;
701 
702 		ssp = list_entry(ha->eh_dev_q.next, typeof(*ssp), eh_list_node);
703 		list_del_init(&ssp->eh_list_node);
704 		dev = container_of(ssp, typeof(*dev), ssp_dev);
705 		kref_get(&dev->kref);
706 		WARN_ONCE(dev_is_sata(dev), "ssp reset to ata device?\n");
707 
708 		spin_unlock_irq(&ha->lock);
709 
710 		if (test_and_clear_bit(SAS_DEV_LU_RESET, &dev->state))
711 			i->dft->lldd_lu_reset(dev, ssp->reset_lun.scsi_lun);
712 
713 		if (test_and_clear_bit(SAS_DEV_RESET, &dev->state))
714 			i->dft->lldd_I_T_nexus_reset(dev);
715 
716 		sas_put_device(dev);
717 		spin_lock_irq(&ha->lock);
718 		clear_bit(SAS_DEV_EH_PENDING, &dev->state);
719 		ha->eh_active--;
720 	}
721 	spin_unlock_irq(&ha->lock);
722 }
723 
724 
725 void sas_scsi_recover_host(struct Scsi_Host *shost)
726 {
727 	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
728 	LIST_HEAD(eh_work_q);
729 	int tries = 0;
730 	bool retry;
731 
732 retry:
733 	tries++;
734 	retry = true;
735 	spin_lock_irq(shost->host_lock);
736 	list_splice_init(&shost->eh_cmd_q, &eh_work_q);
737 	spin_unlock_irq(shost->host_lock);
738 
739 	pr_notice("Enter %s busy: %d failed: %d\n",
740 		  __func__, scsi_host_busy(shost), shost->host_failed);
741 	/*
742 	 * Deal with commands that still have SAS tasks (i.e. they didn't
743 	 * complete via the normal sas_task completion mechanism),
744 	 * SAS_HA_FROZEN gives eh dominion over all sas_task completion.
745 	 */
746 	set_bit(SAS_HA_FROZEN, &ha->state);
747 	sas_eh_handle_sas_errors(shost, &eh_work_q);
748 	clear_bit(SAS_HA_FROZEN, &ha->state);
749 	if (list_empty(&eh_work_q))
750 		goto out;
751 
752 	/*
753 	 * Now deal with SCSI commands that completed ok but have a an error
754 	 * code (and hopefully sense data) attached.  This is roughly what
755 	 * scsi_unjam_host does, but we skip scsi_eh_abort_cmds because any
756 	 * command we see here has no sas_task and is thus unknown to the HA.
757 	 */
758 	sas_ata_eh(shost, &eh_work_q);
759 	if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q))
760 		scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q);
761 
762 out:
763 	sas_eh_handle_resets(shost);
764 
765 	/* now link into libata eh --- if we have any ata devices */
766 	sas_ata_strategy_handler(shost);
767 
768 	scsi_eh_flush_done_q(&ha->eh_done_q);
769 
770 	/* check if any new eh work was scheduled during the last run */
771 	spin_lock_irq(&ha->lock);
772 	if (ha->eh_active == 0) {
773 		shost->host_eh_scheduled = 0;
774 		retry = false;
775 	}
776 	spin_unlock_irq(&ha->lock);
777 
778 	if (retry)
779 		goto retry;
780 
781 	pr_notice("--- Exit %s: busy: %d failed: %d tries: %d\n",
782 		  __func__, scsi_host_busy(shost),
783 		  shost->host_failed, tries);
784 }
785 
786 int sas_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg)
787 {
788 	struct domain_device *dev = sdev_to_domain_dev(sdev);
789 
790 	if (dev_is_sata(dev))
791 		return ata_sas_scsi_ioctl(dev->sata_dev.ap, sdev, cmd, arg);
792 
793 	return -EINVAL;
794 }
795 EXPORT_SYMBOL_GPL(sas_ioctl);
796 
797 struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy)
798 {
799 	struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent);
800 	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
801 	struct domain_device *found_dev = NULL;
802 	int i;
803 	unsigned long flags;
804 
805 	spin_lock_irqsave(&ha->phy_port_lock, flags);
806 	for (i = 0; i < ha->num_phys; i++) {
807 		struct asd_sas_port *port = ha->sas_port[i];
808 		struct domain_device *dev;
809 
810 		spin_lock(&port->dev_list_lock);
811 		list_for_each_entry(dev, &port->dev_list, dev_list_node) {
812 			if (rphy == dev->rphy) {
813 				found_dev = dev;
814 				spin_unlock(&port->dev_list_lock);
815 				goto found;
816 			}
817 		}
818 		spin_unlock(&port->dev_list_lock);
819 	}
820  found:
821 	spin_unlock_irqrestore(&ha->phy_port_lock, flags);
822 
823 	return found_dev;
824 }
825 
826 int sas_target_alloc(struct scsi_target *starget)
827 {
828 	struct sas_rphy *rphy = dev_to_rphy(starget->dev.parent);
829 	struct domain_device *found_dev = sas_find_dev_by_rphy(rphy);
830 
831 	if (!found_dev)
832 		return -ENODEV;
833 
834 	kref_get(&found_dev->kref);
835 	starget->hostdata = found_dev;
836 	return 0;
837 }
838 EXPORT_SYMBOL_GPL(sas_target_alloc);
839 
840 #define SAS_DEF_QD 256
841 
842 int sas_slave_configure(struct scsi_device *scsi_dev)
843 {
844 	struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
845 
846 	BUG_ON(dev->rphy->identify.device_type != SAS_END_DEVICE);
847 
848 	if (dev_is_sata(dev)) {
849 		ata_sas_slave_configure(scsi_dev, dev->sata_dev.ap);
850 		return 0;
851 	}
852 
853 	sas_read_port_mode_page(scsi_dev);
854 
855 	if (scsi_dev->tagged_supported) {
856 		scsi_change_queue_depth(scsi_dev, SAS_DEF_QD);
857 	} else {
858 		pr_notice("device %016llx, LUN 0x%llx doesn't support TCQ\n",
859 			  SAS_ADDR(dev->sas_addr), scsi_dev->lun);
860 		scsi_change_queue_depth(scsi_dev, 1);
861 	}
862 
863 	scsi_dev->allow_restart = 1;
864 
865 	return 0;
866 }
867 EXPORT_SYMBOL_GPL(sas_slave_configure);
868 
869 int sas_change_queue_depth(struct scsi_device *sdev, int depth)
870 {
871 	struct domain_device *dev = sdev_to_domain_dev(sdev);
872 
873 	if (dev_is_sata(dev))
874 		return ata_change_queue_depth(dev->sata_dev.ap, sdev, depth);
875 
876 	if (!sdev->tagged_supported)
877 		depth = 1;
878 	return scsi_change_queue_depth(sdev, depth);
879 }
880 EXPORT_SYMBOL_GPL(sas_change_queue_depth);
881 
882 int sas_bios_param(struct scsi_device *scsi_dev,
883 			  struct block_device *bdev,
884 			  sector_t capacity, int *hsc)
885 {
886 	hsc[0] = 255;
887 	hsc[1] = 63;
888 	sector_div(capacity, 255*63);
889 	hsc[2] = capacity;
890 
891 	return 0;
892 }
893 EXPORT_SYMBOL_GPL(sas_bios_param);
894 
895 void sas_task_internal_done(struct sas_task *task)
896 {
897 	del_timer(&task->slow_task->timer);
898 	complete(&task->slow_task->completion);
899 }
900 
901 void sas_task_internal_timedout(struct timer_list *t)
902 {
903 	struct sas_task_slow *slow = from_timer(slow, t, timer);
904 	struct sas_task *task = slow->task;
905 	bool is_completed = true;
906 	unsigned long flags;
907 
908 	spin_lock_irqsave(&task->task_state_lock, flags);
909 	if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
910 		task->task_state_flags |= SAS_TASK_STATE_ABORTED;
911 		is_completed = false;
912 	}
913 	spin_unlock_irqrestore(&task->task_state_lock, flags);
914 
915 	if (!is_completed)
916 		complete(&task->slow_task->completion);
917 }
918 
919 #define TASK_TIMEOUT			(20 * HZ)
920 #define TASK_RETRY			3
921 
922 static int sas_execute_internal_abort(struct domain_device *device,
923 				      enum sas_internal_abort type, u16 tag,
924 				      unsigned int qid, void *data)
925 {
926 	struct sas_ha_struct *ha = device->port->ha;
927 	struct sas_internal *i = to_sas_internal(ha->shost->transportt);
928 	struct sas_task *task = NULL;
929 	int res, retry;
930 
931 	for (retry = 0; retry < TASK_RETRY; retry++) {
932 		task = sas_alloc_slow_task(GFP_KERNEL);
933 		if (!task)
934 			return -ENOMEM;
935 
936 		task->dev = device;
937 		task->task_proto = SAS_PROTOCOL_INTERNAL_ABORT;
938 		task->task_done = sas_task_internal_done;
939 		task->slow_task->timer.function = sas_task_internal_timedout;
940 		task->slow_task->timer.expires = jiffies + TASK_TIMEOUT;
941 		add_timer(&task->slow_task->timer);
942 
943 		task->abort_task.tag = tag;
944 		task->abort_task.type = type;
945 		task->abort_task.qid = qid;
946 
947 		res = i->dft->lldd_execute_task(task, GFP_KERNEL);
948 		if (res) {
949 			del_timer_sync(&task->slow_task->timer);
950 			pr_err("Executing internal abort failed %016llx (%d)\n",
951 			       SAS_ADDR(device->sas_addr), res);
952 			break;
953 		}
954 
955 		wait_for_completion(&task->slow_task->completion);
956 		res = TMF_RESP_FUNC_FAILED;
957 
958 		/* Even if the internal abort timed out, return direct. */
959 		if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
960 			bool quit = true;
961 
962 			if (i->dft->lldd_abort_timeout)
963 				quit = i->dft->lldd_abort_timeout(task, data);
964 			else
965 				pr_err("Internal abort: timeout %016llx\n",
966 				       SAS_ADDR(device->sas_addr));
967 			res = -EIO;
968 			if (quit)
969 				break;
970 		}
971 
972 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
973 			task->task_status.stat == SAS_SAM_STAT_GOOD) {
974 			res = TMF_RESP_FUNC_COMPLETE;
975 			break;
976 		}
977 
978 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
979 			task->task_status.stat == TMF_RESP_FUNC_SUCC) {
980 			res = TMF_RESP_FUNC_SUCC;
981 			break;
982 		}
983 
984 		pr_err("Internal abort: task to dev %016llx response: 0x%x status 0x%x\n",
985 		       SAS_ADDR(device->sas_addr), task->task_status.resp,
986 		       task->task_status.stat);
987 		sas_free_task(task);
988 		task = NULL;
989 	}
990 	BUG_ON(retry == TASK_RETRY && task != NULL);
991 	sas_free_task(task);
992 	return res;
993 }
994 
995 int sas_execute_internal_abort_single(struct domain_device *device, u16 tag,
996 				      unsigned int qid, void *data)
997 {
998 	return sas_execute_internal_abort(device, SAS_INTERNAL_ABORT_SINGLE,
999 					  tag, qid, data);
1000 }
1001 EXPORT_SYMBOL_GPL(sas_execute_internal_abort_single);
1002 
1003 int sas_execute_internal_abort_dev(struct domain_device *device,
1004 				   unsigned int qid, void *data)
1005 {
1006 	return sas_execute_internal_abort(device, SAS_INTERNAL_ABORT_DEV,
1007 					  SCSI_NO_TAG, qid, data);
1008 }
1009 EXPORT_SYMBOL_GPL(sas_execute_internal_abort_dev);
1010 
1011 int sas_execute_tmf(struct domain_device *device, void *parameter,
1012 		    int para_len, int force_phy_id,
1013 		    struct sas_tmf_task *tmf)
1014 {
1015 	struct sas_task *task;
1016 	struct sas_internal *i =
1017 		to_sas_internal(device->port->ha->shost->transportt);
1018 	int res, retry;
1019 
1020 	for (retry = 0; retry < TASK_RETRY; retry++) {
1021 		task = sas_alloc_slow_task(GFP_KERNEL);
1022 		if (!task)
1023 			return -ENOMEM;
1024 
1025 		task->dev = device;
1026 		task->task_proto = device->tproto;
1027 
1028 		if (dev_is_sata(device)) {
1029 			task->ata_task.device_control_reg_update = 1;
1030 			if (force_phy_id >= 0) {
1031 				task->ata_task.force_phy = true;
1032 				task->ata_task.force_phy_id = force_phy_id;
1033 			}
1034 			memcpy(&task->ata_task.fis, parameter, para_len);
1035 		} else {
1036 			memcpy(&task->ssp_task, parameter, para_len);
1037 		}
1038 
1039 		task->task_done = sas_task_internal_done;
1040 		task->tmf = tmf;
1041 
1042 		task->slow_task->timer.function = sas_task_internal_timedout;
1043 		task->slow_task->timer.expires = jiffies + TASK_TIMEOUT;
1044 		add_timer(&task->slow_task->timer);
1045 
1046 		res = i->dft->lldd_execute_task(task, GFP_KERNEL);
1047 		if (res) {
1048 			del_timer_sync(&task->slow_task->timer);
1049 			pr_err("executing TMF task failed %016llx (%d)\n",
1050 			       SAS_ADDR(device->sas_addr), res);
1051 			break;
1052 		}
1053 
1054 		wait_for_completion(&task->slow_task->completion);
1055 
1056 		if (i->dft->lldd_tmf_exec_complete)
1057 			i->dft->lldd_tmf_exec_complete(device);
1058 
1059 		res = TMF_RESP_FUNC_FAILED;
1060 
1061 		if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1062 			if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1063 				pr_err("TMF task timeout for %016llx and not done\n",
1064 				       SAS_ADDR(device->sas_addr));
1065 				if (i->dft->lldd_tmf_aborted)
1066 					i->dft->lldd_tmf_aborted(task);
1067 				break;
1068 			}
1069 			pr_warn("TMF task timeout for %016llx and done\n",
1070 				SAS_ADDR(device->sas_addr));
1071 		}
1072 
1073 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
1074 		    task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1075 			res = TMF_RESP_FUNC_COMPLETE;
1076 			break;
1077 		}
1078 
1079 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
1080 		    task->task_status.stat == TMF_RESP_FUNC_SUCC) {
1081 			res = TMF_RESP_FUNC_SUCC;
1082 			break;
1083 		}
1084 
1085 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
1086 		    task->task_status.stat == SAS_DATA_UNDERRUN) {
1087 			/* no error, but return the number of bytes of
1088 			 * underrun
1089 			 */
1090 			pr_warn("TMF task to dev %016llx resp: 0x%x sts 0x%x underrun\n",
1091 				SAS_ADDR(device->sas_addr),
1092 				task->task_status.resp,
1093 				task->task_status.stat);
1094 			res = task->task_status.residual;
1095 			break;
1096 		}
1097 
1098 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
1099 		    task->task_status.stat == SAS_DATA_OVERRUN) {
1100 			pr_warn("TMF task blocked task error %016llx\n",
1101 				SAS_ADDR(device->sas_addr));
1102 			res = -EMSGSIZE;
1103 			break;
1104 		}
1105 
1106 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
1107 		    task->task_status.stat == SAS_OPEN_REJECT) {
1108 			pr_warn("TMF task open reject failed  %016llx\n",
1109 				SAS_ADDR(device->sas_addr));
1110 			res = -EIO;
1111 		} else {
1112 			pr_warn("TMF task to dev %016llx resp: 0x%x status 0x%x\n",
1113 				SAS_ADDR(device->sas_addr),
1114 				task->task_status.resp,
1115 				task->task_status.stat);
1116 		}
1117 		sas_free_task(task);
1118 		task = NULL;
1119 	}
1120 
1121 	if (retry == TASK_RETRY)
1122 		pr_warn("executing TMF for %016llx failed after %d attempts!\n",
1123 			SAS_ADDR(device->sas_addr), TASK_RETRY);
1124 	sas_free_task(task);
1125 
1126 	return res;
1127 }
1128 
1129 static int sas_execute_ssp_tmf(struct domain_device *device, u8 *lun,
1130 			       struct sas_tmf_task *tmf)
1131 {
1132 	struct sas_ssp_task ssp_task;
1133 
1134 	if (!(device->tproto & SAS_PROTOCOL_SSP))
1135 		return TMF_RESP_FUNC_ESUPP;
1136 
1137 	memcpy(ssp_task.LUN, lun, 8);
1138 
1139 	return sas_execute_tmf(device, &ssp_task, sizeof(ssp_task), -1, tmf);
1140 }
1141 
1142 int sas_abort_task_set(struct domain_device *dev, u8 *lun)
1143 {
1144 	struct sas_tmf_task tmf_task = {
1145 		.tmf = TMF_ABORT_TASK_SET,
1146 	};
1147 
1148 	return sas_execute_ssp_tmf(dev, lun, &tmf_task);
1149 }
1150 EXPORT_SYMBOL_GPL(sas_abort_task_set);
1151 
1152 int sas_clear_task_set(struct domain_device *dev, u8 *lun)
1153 {
1154 	struct sas_tmf_task tmf_task = {
1155 		.tmf = TMF_CLEAR_TASK_SET,
1156 	};
1157 
1158 	return sas_execute_ssp_tmf(dev, lun, &tmf_task);
1159 }
1160 EXPORT_SYMBOL_GPL(sas_clear_task_set);
1161 
1162 int sas_lu_reset(struct domain_device *dev, u8 *lun)
1163 {
1164 	struct sas_tmf_task tmf_task = {
1165 		.tmf = TMF_LU_RESET,
1166 	};
1167 
1168 	return sas_execute_ssp_tmf(dev, lun, &tmf_task);
1169 }
1170 EXPORT_SYMBOL_GPL(sas_lu_reset);
1171 
1172 int sas_query_task(struct sas_task *task, u16 tag)
1173 {
1174 	struct sas_tmf_task tmf_task = {
1175 		.tmf = TMF_QUERY_TASK,
1176 		.tag_of_task_to_be_managed = tag,
1177 	};
1178 	struct scsi_cmnd *cmnd = task->uldd_task;
1179 	struct domain_device *dev = task->dev;
1180 	struct scsi_lun lun;
1181 
1182 	int_to_scsilun(cmnd->device->lun, &lun);
1183 
1184 	return sas_execute_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1185 }
1186 EXPORT_SYMBOL_GPL(sas_query_task);
1187 
1188 int sas_abort_task(struct sas_task *task, u16 tag)
1189 {
1190 	struct sas_tmf_task tmf_task = {
1191 		.tmf = TMF_ABORT_TASK,
1192 		.tag_of_task_to_be_managed = tag,
1193 	};
1194 	struct scsi_cmnd *cmnd = task->uldd_task;
1195 	struct domain_device *dev = task->dev;
1196 	struct scsi_lun lun;
1197 
1198 	int_to_scsilun(cmnd->device->lun, &lun);
1199 
1200 	return sas_execute_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1201 }
1202 EXPORT_SYMBOL_GPL(sas_abort_task);
1203 
1204 /*
1205  * Tell an upper layer that it needs to initiate an abort for a given task.
1206  * This should only ever be called by an LLDD.
1207  */
1208 void sas_task_abort(struct sas_task *task)
1209 {
1210 	struct scsi_cmnd *sc = task->uldd_task;
1211 
1212 	/* Escape for libsas internal commands */
1213 	if (!sc) {
1214 		struct sas_task_slow *slow = task->slow_task;
1215 
1216 		if (!slow)
1217 			return;
1218 		if (!del_timer(&slow->timer))
1219 			return;
1220 		slow->timer.function(&slow->timer);
1221 		return;
1222 	}
1223 
1224 	if (dev_is_sata(task->dev))
1225 		sas_ata_task_abort(task);
1226 	else
1227 		blk_abort_request(scsi_cmd_to_rq(sc));
1228 }
1229 EXPORT_SYMBOL_GPL(sas_task_abort);
1230 
1231 int sas_slave_alloc(struct scsi_device *sdev)
1232 {
1233 	if (dev_is_sata(sdev_to_domain_dev(sdev)) && sdev->lun)
1234 		return -ENXIO;
1235 
1236 	return 0;
1237 }
1238 EXPORT_SYMBOL_GPL(sas_slave_alloc);
1239 
1240 void sas_target_destroy(struct scsi_target *starget)
1241 {
1242 	struct domain_device *found_dev = starget->hostdata;
1243 
1244 	if (!found_dev)
1245 		return;
1246 
1247 	starget->hostdata = NULL;
1248 	sas_put_device(found_dev);
1249 }
1250 EXPORT_SYMBOL_GPL(sas_target_destroy);
1251 
1252 #define SAS_STRING_ADDR_SIZE	16
1253 
1254 int sas_request_addr(struct Scsi_Host *shost, u8 *addr)
1255 {
1256 	int res;
1257 	const struct firmware *fw;
1258 
1259 	res = request_firmware(&fw, "sas_addr", &shost->shost_gendev);
1260 	if (res)
1261 		return res;
1262 
1263 	if (fw->size < SAS_STRING_ADDR_SIZE) {
1264 		res = -ENODEV;
1265 		goto out;
1266 	}
1267 
1268 	res = hex2bin(addr, fw->data, strnlen(fw->data, SAS_ADDR_SIZE * 2) / 2);
1269 	if (res)
1270 		goto out;
1271 
1272 out:
1273 	release_firmware(fw);
1274 	return res;
1275 }
1276 EXPORT_SYMBOL_GPL(sas_request_addr);
1277 
1278