xref: /openbmc/linux/drivers/scsi/elx/efct/efct_scsi.c (revision 8dda2eac)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
4  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
5  */
6 
7 #include "efct_driver.h"
8 #include "efct_hw.h"
9 
10 #define enable_tsend_auto_resp(efct)	1
11 #define enable_treceive_auto_resp(efct)	0
12 
13 #define SCSI_IOFMT "[%04x][i:%04x t:%04x h:%04x]"
14 
15 #define scsi_io_printf(io, fmt, ...) \
16 	efc_log_debug(io->efct, "[%s]" SCSI_IOFMT fmt, \
17 		io->node->display_name, io->instance_index,\
18 		io->init_task_tag, io->tgt_task_tag, io->hw_tag, ##__VA_ARGS__)
19 
20 #define EFCT_LOG_ENABLE_SCSI_TRACE(efct)                \
21 		(((efct) != NULL) ? (((efct)->logmask & (1U << 2)) != 0) : 0)
22 
23 #define scsi_io_trace(io, fmt, ...) \
24 	do { \
25 		if (EFCT_LOG_ENABLE_SCSI_TRACE(io->efct)) \
26 			scsi_io_printf(io, fmt, ##__VA_ARGS__); \
27 	} while (0)
28 
29 struct efct_io *
30 efct_scsi_io_alloc(struct efct_node *node)
31 {
32 	struct efct *efct;
33 	struct efct_xport *xport;
34 	struct efct_io *io;
35 	unsigned long flags = 0;
36 
37 	efct = node->efct;
38 
39 	xport = efct->xport;
40 
41 	spin_lock_irqsave(&node->active_ios_lock, flags);
42 
43 	io = efct_io_pool_io_alloc(efct->xport->io_pool);
44 	if (!io) {
45 		efc_log_err(efct, "IO alloc Failed\n");
46 		atomic_add_return(1, &xport->io_alloc_failed_count);
47 		spin_unlock_irqrestore(&node->active_ios_lock, flags);
48 		return NULL;
49 	}
50 
51 	/* initialize refcount */
52 	kref_init(&io->ref);
53 	io->release = _efct_scsi_io_free;
54 
55 	/* set generic fields */
56 	io->efct = efct;
57 	io->node = node;
58 	kref_get(&node->ref);
59 
60 	/* set type and name */
61 	io->io_type = EFCT_IO_TYPE_IO;
62 	io->display_name = "scsi_io";
63 
64 	io->cmd_ini = false;
65 	io->cmd_tgt = true;
66 
67 	/* Add to node's active_ios list */
68 	INIT_LIST_HEAD(&io->list_entry);
69 	list_add(&io->list_entry, &node->active_ios);
70 
71 	spin_unlock_irqrestore(&node->active_ios_lock, flags);
72 
73 	return io;
74 }
75 
76 void
77 _efct_scsi_io_free(struct kref *arg)
78 {
79 	struct efct_io *io = container_of(arg, struct efct_io, ref);
80 	struct efct *efct = io->efct;
81 	struct efct_node *node = io->node;
82 	unsigned long flags = 0;
83 
84 	scsi_io_trace(io, "freeing io 0x%p %s\n", io, io->display_name);
85 
86 	if (io->io_free) {
87 		efc_log_err(efct, "IO already freed.\n");
88 		return;
89 	}
90 
91 	spin_lock_irqsave(&node->active_ios_lock, flags);
92 	list_del_init(&io->list_entry);
93 	spin_unlock_irqrestore(&node->active_ios_lock, flags);
94 
95 	kref_put(&node->ref, node->release);
96 	io->node = NULL;
97 	efct_io_pool_io_free(efct->xport->io_pool, io);
98 }
99 
100 void
101 efct_scsi_io_free(struct efct_io *io)
102 {
103 	scsi_io_trace(io, "freeing io 0x%p %s\n", io, io->display_name);
104 	WARN_ON(!refcount_read(&io->ref.refcount));
105 	kref_put(&io->ref, io->release);
106 }
107 
108 static void
109 efct_target_io_cb(struct efct_hw_io *hio, u32 length, int status,
110 		  u32 ext_status, void *app)
111 {
112 	u32 flags = 0;
113 	struct efct_io *io = app;
114 	struct efct *efct;
115 	enum efct_scsi_io_status scsi_stat = EFCT_SCSI_STATUS_GOOD;
116 	efct_scsi_io_cb_t cb;
117 
118 	if (!io || !io->efct) {
119 		pr_err("%s: IO can not be NULL\n", __func__);
120 		return;
121 	}
122 
123 	scsi_io_trace(io, "status x%x ext_status x%x\n", status, ext_status);
124 
125 	efct = io->efct;
126 
127 	io->transferred += length;
128 
129 	if (!io->scsi_tgt_cb) {
130 		efct_scsi_check_pending(efct);
131 		return;
132 	}
133 
134 	/* Call target server completion */
135 	cb = io->scsi_tgt_cb;
136 
137 	/* Clear the callback before invoking the callback */
138 	io->scsi_tgt_cb = NULL;
139 
140 	/* if status was good, and auto-good-response was set,
141 	 * then callback target-server with IO_CMPL_RSP_SENT,
142 	 * otherwise send IO_CMPL
143 	 */
144 	if (status == 0 && io->auto_resp)
145 		flags |= EFCT_SCSI_IO_CMPL_RSP_SENT;
146 	else
147 		flags |= EFCT_SCSI_IO_CMPL;
148 
149 	switch (status) {
150 	case SLI4_FC_WCQE_STATUS_SUCCESS:
151 		scsi_stat = EFCT_SCSI_STATUS_GOOD;
152 		break;
153 	case SLI4_FC_WCQE_STATUS_DI_ERROR:
154 		if (ext_status & SLI4_FC_DI_ERROR_GE)
155 			scsi_stat = EFCT_SCSI_STATUS_DIF_GUARD_ERR;
156 		else if (ext_status & SLI4_FC_DI_ERROR_AE)
157 			scsi_stat = EFCT_SCSI_STATUS_DIF_APP_TAG_ERROR;
158 		else if (ext_status & SLI4_FC_DI_ERROR_RE)
159 			scsi_stat = EFCT_SCSI_STATUS_DIF_REF_TAG_ERROR;
160 		else
161 			scsi_stat = EFCT_SCSI_STATUS_DIF_UNKNOWN_ERROR;
162 		break;
163 	case SLI4_FC_WCQE_STATUS_LOCAL_REJECT:
164 		switch (ext_status) {
165 		case SLI4_FC_LOCAL_REJECT_INVALID_RELOFFSET:
166 		case SLI4_FC_LOCAL_REJECT_ABORT_REQUESTED:
167 			scsi_stat = EFCT_SCSI_STATUS_ABORTED;
168 			break;
169 		case SLI4_FC_LOCAL_REJECT_INVALID_RPI:
170 			scsi_stat = EFCT_SCSI_STATUS_NEXUS_LOST;
171 			break;
172 		case SLI4_FC_LOCAL_REJECT_NO_XRI:
173 			scsi_stat = EFCT_SCSI_STATUS_NO_IO;
174 			break;
175 		default:
176 			/*we have seen 0x0d(TX_DMA_FAILED err)*/
177 			scsi_stat = EFCT_SCSI_STATUS_ERROR;
178 			break;
179 		}
180 		break;
181 
182 	case SLI4_FC_WCQE_STATUS_TARGET_WQE_TIMEOUT:
183 		/* target IO timed out */
184 		scsi_stat = EFCT_SCSI_STATUS_TIMEDOUT_AND_ABORTED;
185 		break;
186 
187 	case SLI4_FC_WCQE_STATUS_SHUTDOWN:
188 		/* Target IO cancelled by HW */
189 		scsi_stat = EFCT_SCSI_STATUS_SHUTDOWN;
190 		break;
191 
192 	default:
193 		scsi_stat = EFCT_SCSI_STATUS_ERROR;
194 		break;
195 	}
196 
197 	cb(io, scsi_stat, flags, io->scsi_tgt_cb_arg);
198 
199 	efct_scsi_check_pending(efct);
200 }
201 
202 static int
203 efct_scsi_build_sgls(struct efct_hw *hw, struct efct_hw_io *hio,
204 		     struct efct_scsi_sgl *sgl, u32 sgl_count,
205 		     enum efct_hw_io_type type)
206 {
207 	int rc;
208 	u32 i;
209 	struct efct *efct = hw->os;
210 
211 	/* Initialize HW SGL */
212 	rc = efct_hw_io_init_sges(hw, hio, type);
213 	if (rc) {
214 		efc_log_err(efct, "efct_hw_io_init_sges failed: %d\n", rc);
215 		return -EIO;
216 	}
217 
218 	for (i = 0; i < sgl_count; i++) {
219 		/* Add data SGE */
220 		rc = efct_hw_io_add_sge(hw, hio, sgl[i].addr, sgl[i].len);
221 		if (rc) {
222 			efc_log_err(efct, "add sge failed cnt=%d rc=%d\n",
223 				    sgl_count, rc);
224 			return rc;
225 		}
226 	}
227 
228 	return 0;
229 }
230 
231 static void efc_log_sgl(struct efct_io *io)
232 {
233 	struct efct_hw_io *hio = io->hio;
234 	struct sli4_sge *data = NULL;
235 	u32 *dword = NULL;
236 	u32 i;
237 	u32 n_sge;
238 
239 	scsi_io_trace(io, "def_sgl at 0x%x 0x%08x\n",
240 		      upper_32_bits(hio->def_sgl.phys),
241 		      lower_32_bits(hio->def_sgl.phys));
242 	n_sge = (hio->sgl == &hio->def_sgl) ? hio->n_sge : hio->def_sgl_count;
243 	for (i = 0, data = hio->def_sgl.virt; i < n_sge; i++, data++) {
244 		dword = (u32 *)data;
245 
246 		scsi_io_trace(io, "SGL %2d 0x%08x 0x%08x 0x%08x 0x%08x\n",
247 			      i, dword[0], dword[1], dword[2], dword[3]);
248 
249 		if (dword[2] & (1U << 31))
250 			break;
251 	}
252 }
253 
254 static void
255 efct_scsi_check_pending_async_cb(struct efct_hw *hw, int status,
256 				 u8 *mqe, void *arg)
257 {
258 	struct efct_io *io = arg;
259 
260 	if (io) {
261 		efct_hw_done_t cb = io->hw_cb;
262 
263 		if (!io->hw_cb)
264 			return;
265 
266 		io->hw_cb = NULL;
267 		(cb)(io->hio, 0, SLI4_FC_WCQE_STATUS_DISPATCH_ERROR, 0, io);
268 	}
269 }
270 
271 static int
272 efct_scsi_io_dispatch_hw_io(struct efct_io *io, struct efct_hw_io *hio)
273 {
274 	int rc = 0;
275 	struct efct *efct = io->efct;
276 
277 	/* Got a HW IO;
278 	 * update ini/tgt_task_tag with HW IO info and dispatch
279 	 */
280 	io->hio = hio;
281 	if (io->cmd_tgt)
282 		io->tgt_task_tag = hio->indicator;
283 	else if (io->cmd_ini)
284 		io->init_task_tag = hio->indicator;
285 	io->hw_tag = hio->reqtag;
286 
287 	hio->eq = io->hw_priv;
288 
289 	/* Copy WQ steering */
290 	switch (io->wq_steering) {
291 	case EFCT_SCSI_WQ_STEERING_CLASS >> EFCT_SCSI_WQ_STEERING_SHIFT:
292 		hio->wq_steering = EFCT_HW_WQ_STEERING_CLASS;
293 		break;
294 	case EFCT_SCSI_WQ_STEERING_REQUEST >> EFCT_SCSI_WQ_STEERING_SHIFT:
295 		hio->wq_steering = EFCT_HW_WQ_STEERING_REQUEST;
296 		break;
297 	case EFCT_SCSI_WQ_STEERING_CPU >> EFCT_SCSI_WQ_STEERING_SHIFT:
298 		hio->wq_steering = EFCT_HW_WQ_STEERING_CPU;
299 		break;
300 	}
301 
302 	switch (io->io_type) {
303 	case EFCT_IO_TYPE_IO:
304 		rc = efct_scsi_build_sgls(&efct->hw, io->hio,
305 					  io->sgl, io->sgl_count, io->hio_type);
306 		if (rc)
307 			break;
308 
309 		if (EFCT_LOG_ENABLE_SCSI_TRACE(efct))
310 			efc_log_sgl(io);
311 
312 		if (io->app_id)
313 			io->iparam.fcp_tgt.app_id = io->app_id;
314 
315 		io->iparam.fcp_tgt.vpi = io->node->vpi;
316 		io->iparam.fcp_tgt.rpi = io->node->rpi;
317 		io->iparam.fcp_tgt.s_id = io->node->port_fc_id;
318 		io->iparam.fcp_tgt.d_id = io->node->node_fc_id;
319 		io->iparam.fcp_tgt.xmit_len = io->wire_len;
320 
321 		rc = efct_hw_io_send(&io->efct->hw, io->hio_type, io->hio,
322 				     &io->iparam, io->hw_cb, io);
323 		break;
324 	default:
325 		scsi_io_printf(io, "Unknown IO type=%d\n", io->io_type);
326 		rc = -EIO;
327 		break;
328 	}
329 	return rc;
330 }
331 
332 static int
333 efct_scsi_io_dispatch_no_hw_io(struct efct_io *io)
334 {
335 	int rc;
336 
337 	switch (io->io_type) {
338 	case EFCT_IO_TYPE_ABORT: {
339 		struct efct_hw_io *hio_to_abort = NULL;
340 
341 		hio_to_abort = io->io_to_abort->hio;
342 
343 		if (!hio_to_abort) {
344 			/*
345 			 * If "IO to abort" does not have an
346 			 * associated HW IO, immediately make callback with
347 			 * success. The command must have been sent to
348 			 * the backend, but the data phase has not yet
349 			 * started, so we don't have a HW IO.
350 			 *
351 			 * Note: since the backend shims should be
352 			 * taking a reference on io_to_abort, it should not
353 			 * be possible to have been completed and freed by
354 			 * the backend before the abort got here.
355 			 */
356 			scsi_io_printf(io, "IO: not active\n");
357 			((efct_hw_done_t)io->hw_cb)(io->hio, 0,
358 					SLI4_FC_WCQE_STATUS_SUCCESS, 0, io);
359 			rc = 0;
360 			break;
361 		}
362 
363 		/* HW IO is valid, abort it */
364 		scsi_io_printf(io, "aborting\n");
365 		rc = efct_hw_io_abort(&io->efct->hw, hio_to_abort,
366 				      io->send_abts, io->hw_cb, io);
367 		if (rc) {
368 			int status = SLI4_FC_WCQE_STATUS_SUCCESS;
369 			efct_hw_done_t cb = io->hw_cb;
370 
371 			if (rc != -ENOENT && rc != -EINPROGRESS) {
372 				status = -1;
373 				scsi_io_printf(io, "Failed to abort IO rc=%d\n",
374 					       rc);
375 			}
376 			cb(io->hio, 0, status, 0, io);
377 			rc = 0;
378 		}
379 
380 		break;
381 	}
382 	default:
383 		scsi_io_printf(io, "Unknown IO type=%d\n", io->io_type);
384 		rc = -EIO;
385 		break;
386 	}
387 	return rc;
388 }
389 
390 static struct efct_io *
391 efct_scsi_dispatch_pending(struct efct *efct)
392 {
393 	struct efct_xport *xport = efct->xport;
394 	struct efct_io *io = NULL;
395 	struct efct_hw_io *hio;
396 	unsigned long flags = 0;
397 	int status;
398 
399 	spin_lock_irqsave(&xport->io_pending_lock, flags);
400 
401 	if (!list_empty(&xport->io_pending_list)) {
402 		io = list_first_entry(&xport->io_pending_list, struct efct_io,
403 				      io_pending_link);
404 		list_del_init(&io->io_pending_link);
405 	}
406 
407 	if (!io) {
408 		spin_unlock_irqrestore(&xport->io_pending_lock, flags);
409 		return NULL;
410 	}
411 
412 	if (io->io_type == EFCT_IO_TYPE_ABORT) {
413 		hio = NULL;
414 	} else {
415 		hio = efct_hw_io_alloc(&efct->hw);
416 		if (!hio) {
417 			/*
418 			 * No HW IO available.Put IO back on
419 			 * the front of pending list
420 			 */
421 			list_add(&xport->io_pending_list, &io->io_pending_link);
422 			io = NULL;
423 		} else {
424 			hio->eq = io->hw_priv;
425 		}
426 	}
427 
428 	/* Must drop the lock before dispatching the IO */
429 	spin_unlock_irqrestore(&xport->io_pending_lock, flags);
430 
431 	if (!io)
432 		return NULL;
433 
434 	/*
435 	 * We pulled an IO off the pending list,
436 	 * and either got an HW IO or don't need one
437 	 */
438 	atomic_sub_return(1, &xport->io_pending_count);
439 	if (!hio)
440 		status = efct_scsi_io_dispatch_no_hw_io(io);
441 	else
442 		status = efct_scsi_io_dispatch_hw_io(io, hio);
443 	if (status) {
444 		/*
445 		 * Invoke the HW callback, but do so in the
446 		 * separate execution context,provided by the
447 		 * NOP mailbox completion processing context
448 		 * by using efct_hw_async_call()
449 		 */
450 		if (efct_hw_async_call(&efct->hw,
451 				       efct_scsi_check_pending_async_cb, io)) {
452 			efc_log_debug(efct, "call hw async failed\n");
453 		}
454 	}
455 
456 	return io;
457 }
458 
459 void
460 efct_scsi_check_pending(struct efct *efct)
461 {
462 	struct efct_xport *xport = efct->xport;
463 	struct efct_io *io = NULL;
464 	int count = 0;
465 	unsigned long flags = 0;
466 	int dispatch = 0;
467 
468 	/* Guard against recursion */
469 	if (atomic_add_return(1, &xport->io_pending_recursing)) {
470 		/* This function is already running.  Decrement and return. */
471 		atomic_sub_return(1, &xport->io_pending_recursing);
472 		return;
473 	}
474 
475 	while (efct_scsi_dispatch_pending(efct))
476 		count++;
477 
478 	if (count) {
479 		atomic_sub_return(1, &xport->io_pending_recursing);
480 		return;
481 	}
482 
483 	/*
484 	 * If nothing was removed from the list,
485 	 * we might be in a case where we need to abort an
486 	 * active IO and the abort is on the pending list.
487 	 * Look for an abort we can dispatch.
488 	 */
489 
490 	spin_lock_irqsave(&xport->io_pending_lock, flags);
491 
492 	list_for_each_entry(io, &xport->io_pending_list, io_pending_link) {
493 		if (io->io_type == EFCT_IO_TYPE_ABORT && io->io_to_abort->hio) {
494 			/* This IO has a HW IO, so it is
495 			 * active.  Dispatch the abort.
496 			 */
497 			dispatch = 1;
498 			list_del_init(&io->io_pending_link);
499 			atomic_sub_return(1, &xport->io_pending_count);
500 			break;
501 		}
502 	}
503 
504 	spin_unlock_irqrestore(&xport->io_pending_lock, flags);
505 
506 	if (dispatch) {
507 		if (efct_scsi_io_dispatch_no_hw_io(io)) {
508 			if (efct_hw_async_call(&efct->hw,
509 				efct_scsi_check_pending_async_cb, io)) {
510 				efc_log_debug(efct, "hw async failed\n");
511 			}
512 		}
513 	}
514 
515 	atomic_sub_return(1, &xport->io_pending_recursing);
516 }
517 
518 int
519 efct_scsi_io_dispatch(struct efct_io *io, void *cb)
520 {
521 	struct efct_hw_io *hio;
522 	struct efct *efct = io->efct;
523 	struct efct_xport *xport = efct->xport;
524 	unsigned long flags = 0;
525 
526 	io->hw_cb = cb;
527 
528 	/*
529 	 * if this IO already has a HW IO, then this is either
530 	 * not the first phase of the IO. Send it to the HW.
531 	 */
532 	if (io->hio)
533 		return efct_scsi_io_dispatch_hw_io(io, io->hio);
534 
535 	/*
536 	 * We don't already have a HW IO associated with the IO. First check
537 	 * the pending list. If not empty, add IO to the tail and process the
538 	 * pending list.
539 	 */
540 	spin_lock_irqsave(&xport->io_pending_lock, flags);
541 	if (!list_empty(&xport->io_pending_list)) {
542 		/*
543 		 * If this is a low latency request,
544 		 * the put at the front of the IO pending
545 		 * queue, otherwise put it at the end of the queue.
546 		 */
547 		if (io->low_latency) {
548 			INIT_LIST_HEAD(&io->io_pending_link);
549 			list_add(&xport->io_pending_list, &io->io_pending_link);
550 		} else {
551 			INIT_LIST_HEAD(&io->io_pending_link);
552 			list_add_tail(&io->io_pending_link,
553 				      &xport->io_pending_list);
554 		}
555 		spin_unlock_irqrestore(&xport->io_pending_lock, flags);
556 		atomic_add_return(1, &xport->io_pending_count);
557 		atomic_add_return(1, &xport->io_total_pending);
558 
559 		/* process pending list */
560 		efct_scsi_check_pending(efct);
561 		return 0;
562 	}
563 	spin_unlock_irqrestore(&xport->io_pending_lock, flags);
564 
565 	/*
566 	 * We don't have a HW IO associated with the IO and there's nothing
567 	 * on the pending list. Attempt to allocate a HW IO and dispatch it.
568 	 */
569 	hio = efct_hw_io_alloc(&io->efct->hw);
570 	if (!hio) {
571 		/* Couldn't get a HW IO. Save this IO on the pending list */
572 		spin_lock_irqsave(&xport->io_pending_lock, flags);
573 		INIT_LIST_HEAD(&io->io_pending_link);
574 		list_add_tail(&io->io_pending_link, &xport->io_pending_list);
575 		spin_unlock_irqrestore(&xport->io_pending_lock, flags);
576 
577 		atomic_add_return(1, &xport->io_total_pending);
578 		atomic_add_return(1, &xport->io_pending_count);
579 		return 0;
580 	}
581 
582 	/* We successfully allocated a HW IO; dispatch to HW */
583 	return efct_scsi_io_dispatch_hw_io(io, hio);
584 }
585 
586 int
587 efct_scsi_io_dispatch_abort(struct efct_io *io, void *cb)
588 {
589 	struct efct *efct = io->efct;
590 	struct efct_xport *xport = efct->xport;
591 	unsigned long flags = 0;
592 
593 	io->hw_cb = cb;
594 
595 	/*
596 	 * For aborts, we don't need a HW IO, but we still want
597 	 * to pass through the pending list to preserve ordering.
598 	 * Thus, if the pending list is not empty, add this abort
599 	 * to the pending list and process the pending list.
600 	 */
601 	spin_lock_irqsave(&xport->io_pending_lock, flags);
602 	if (!list_empty(&xport->io_pending_list)) {
603 		INIT_LIST_HEAD(&io->io_pending_link);
604 		list_add_tail(&io->io_pending_link, &xport->io_pending_list);
605 		spin_unlock_irqrestore(&xport->io_pending_lock, flags);
606 		atomic_add_return(1, &xport->io_pending_count);
607 		atomic_add_return(1, &xport->io_total_pending);
608 
609 		/* process pending list */
610 		efct_scsi_check_pending(efct);
611 		return 0;
612 	}
613 	spin_unlock_irqrestore(&xport->io_pending_lock, flags);
614 
615 	/* nothing on pending list, dispatch abort */
616 	return efct_scsi_io_dispatch_no_hw_io(io);
617 }
618 
619 static inline int
620 efct_scsi_xfer_data(struct efct_io *io, u32 flags,
621 		    struct efct_scsi_sgl *sgl, u32 sgl_count, u64 xwire_len,
622 		    enum efct_hw_io_type type, int enable_ar,
623 		    efct_scsi_io_cb_t cb, void *arg)
624 {
625 	struct efct *efct;
626 	size_t residual = 0;
627 
628 	io->sgl_count = sgl_count;
629 
630 	efct = io->efct;
631 
632 	scsi_io_trace(io, "%s wire_len %llu\n",
633 		      (type == EFCT_HW_IO_TARGET_READ) ? "send" : "recv",
634 		      xwire_len);
635 
636 	io->hio_type = type;
637 
638 	io->scsi_tgt_cb = cb;
639 	io->scsi_tgt_cb_arg = arg;
640 
641 	residual = io->exp_xfer_len - io->transferred;
642 	io->wire_len = (xwire_len < residual) ? xwire_len : residual;
643 	residual = (xwire_len - io->wire_len);
644 
645 	memset(&io->iparam, 0, sizeof(io->iparam));
646 	io->iparam.fcp_tgt.ox_id = io->init_task_tag;
647 	io->iparam.fcp_tgt.offset = io->transferred;
648 	io->iparam.fcp_tgt.cs_ctl = io->cs_ctl;
649 	io->iparam.fcp_tgt.timeout = io->timeout;
650 
651 	/* if this is the last data phase and there is no residual, enable
652 	 * auto-good-response
653 	 */
654 	if (enable_ar && (flags & EFCT_SCSI_LAST_DATAPHASE) && residual == 0 &&
655 	    ((io->transferred + io->wire_len) == io->exp_xfer_len) &&
656 	    (!(flags & EFCT_SCSI_NO_AUTO_RESPONSE))) {
657 		io->iparam.fcp_tgt.flags |= SLI4_IO_AUTO_GOOD_RESPONSE;
658 		io->auto_resp = true;
659 	} else {
660 		io->auto_resp = false;
661 	}
662 
663 	/* save this transfer length */
664 	io->xfer_req = io->wire_len;
665 
666 	/* Adjust the transferred count to account for overrun
667 	 * when the residual is calculated in efct_scsi_send_resp
668 	 */
669 	io->transferred += residual;
670 
671 	/* Adjust the SGL size if there is overrun */
672 
673 	if (residual) {
674 		struct efct_scsi_sgl  *sgl_ptr = &io->sgl[sgl_count - 1];
675 
676 		while (residual) {
677 			size_t len = sgl_ptr->len;
678 
679 			if (len > residual) {
680 				sgl_ptr->len = len - residual;
681 				residual = 0;
682 			} else {
683 				sgl_ptr->len = 0;
684 				residual -= len;
685 				io->sgl_count--;
686 			}
687 			sgl_ptr--;
688 		}
689 	}
690 
691 	/* Set latency and WQ steering */
692 	io->low_latency = (flags & EFCT_SCSI_LOW_LATENCY) != 0;
693 	io->wq_steering = (flags & EFCT_SCSI_WQ_STEERING_MASK) >>
694 				EFCT_SCSI_WQ_STEERING_SHIFT;
695 	io->wq_class = (flags & EFCT_SCSI_WQ_CLASS_MASK) >>
696 				EFCT_SCSI_WQ_CLASS_SHIFT;
697 
698 	if (efct->xport) {
699 		struct efct_xport *xport = efct->xport;
700 
701 		if (type == EFCT_HW_IO_TARGET_READ) {
702 			xport->fcp_stats.input_requests++;
703 			xport->fcp_stats.input_bytes += xwire_len;
704 		} else if (type == EFCT_HW_IO_TARGET_WRITE) {
705 			xport->fcp_stats.output_requests++;
706 			xport->fcp_stats.output_bytes += xwire_len;
707 		}
708 	}
709 	return efct_scsi_io_dispatch(io, efct_target_io_cb);
710 }
711 
712 int
713 efct_scsi_send_rd_data(struct efct_io *io, u32 flags,
714 		       struct efct_scsi_sgl *sgl, u32 sgl_count, u64 len,
715 		       efct_scsi_io_cb_t cb, void *arg)
716 {
717 	return efct_scsi_xfer_data(io, flags, sgl, sgl_count,
718 				   len, EFCT_HW_IO_TARGET_READ,
719 				   enable_tsend_auto_resp(io->efct), cb, arg);
720 }
721 
722 int
723 efct_scsi_recv_wr_data(struct efct_io *io, u32 flags,
724 		       struct efct_scsi_sgl *sgl, u32 sgl_count, u64 len,
725 		       efct_scsi_io_cb_t cb, void *arg)
726 {
727 	return efct_scsi_xfer_data(io, flags, sgl, sgl_count, len,
728 				   EFCT_HW_IO_TARGET_WRITE,
729 				   enable_treceive_auto_resp(io->efct), cb, arg);
730 }
731 
732 int
733 efct_scsi_send_resp(struct efct_io *io, u32 flags,
734 		    struct efct_scsi_cmd_resp *rsp,
735 		    efct_scsi_io_cb_t cb, void *arg)
736 {
737 	struct efct *efct;
738 	int residual;
739 	/* Always try auto resp */
740 	bool auto_resp = true;
741 	u8 scsi_status = 0;
742 	u16 scsi_status_qualifier = 0;
743 	u8 *sense_data = NULL;
744 	u32 sense_data_length = 0;
745 
746 	efct = io->efct;
747 
748 	if (rsp) {
749 		scsi_status = rsp->scsi_status;
750 		scsi_status_qualifier = rsp->scsi_status_qualifier;
751 		sense_data = rsp->sense_data;
752 		sense_data_length = rsp->sense_data_length;
753 		residual = rsp->residual;
754 	} else {
755 		residual = io->exp_xfer_len - io->transferred;
756 	}
757 
758 	io->wire_len = 0;
759 	io->hio_type = EFCT_HW_IO_TARGET_RSP;
760 
761 	io->scsi_tgt_cb = cb;
762 	io->scsi_tgt_cb_arg = arg;
763 
764 	memset(&io->iparam, 0, sizeof(io->iparam));
765 	io->iparam.fcp_tgt.ox_id = io->init_task_tag;
766 	io->iparam.fcp_tgt.offset = 0;
767 	io->iparam.fcp_tgt.cs_ctl = io->cs_ctl;
768 	io->iparam.fcp_tgt.timeout = io->timeout;
769 
770 	/* Set low latency queueing request */
771 	io->low_latency = (flags & EFCT_SCSI_LOW_LATENCY) != 0;
772 	io->wq_steering = (flags & EFCT_SCSI_WQ_STEERING_MASK) >>
773 				EFCT_SCSI_WQ_STEERING_SHIFT;
774 	io->wq_class = (flags & EFCT_SCSI_WQ_CLASS_MASK) >>
775 				EFCT_SCSI_WQ_CLASS_SHIFT;
776 
777 	if (scsi_status != 0 || residual || sense_data_length) {
778 		struct fcp_resp_with_ext *fcprsp = io->rspbuf.virt;
779 		u8 *sns_data;
780 
781 		if (!fcprsp) {
782 			efc_log_err(efct, "NULL response buffer\n");
783 			return -EIO;
784 		}
785 
786 		sns_data = (u8 *)io->rspbuf.virt + sizeof(*fcprsp);
787 
788 		auto_resp = false;
789 
790 		memset(fcprsp, 0, sizeof(*fcprsp));
791 
792 		io->wire_len += sizeof(*fcprsp);
793 
794 		fcprsp->resp.fr_status = scsi_status;
795 		fcprsp->resp.fr_retry_delay =
796 			cpu_to_be16(scsi_status_qualifier);
797 
798 		/* set residual status if necessary */
799 		if (residual != 0) {
800 			/* FCP: if data transferred is less than the
801 			 * amount expected, then this is an underflow.
802 			 * If data transferred would have been greater
803 			 * than the amount expected this is an overflow
804 			 */
805 			if (residual > 0) {
806 				fcprsp->resp.fr_flags |= FCP_RESID_UNDER;
807 				fcprsp->ext.fr_resid =	cpu_to_be32(residual);
808 			} else {
809 				fcprsp->resp.fr_flags |= FCP_RESID_OVER;
810 				fcprsp->ext.fr_resid = cpu_to_be32(-residual);
811 			}
812 		}
813 
814 		if (EFCT_SCSI_SNS_BUF_VALID(sense_data) && sense_data_length) {
815 			if (sense_data_length > SCSI_SENSE_BUFFERSIZE) {
816 				efc_log_err(efct, "Sense exceeds max size.\n");
817 				return -EIO;
818 			}
819 
820 			fcprsp->resp.fr_flags |= FCP_SNS_LEN_VAL;
821 			memcpy(sns_data, sense_data, sense_data_length);
822 			fcprsp->ext.fr_sns_len = cpu_to_be32(sense_data_length);
823 			io->wire_len += sense_data_length;
824 		}
825 
826 		io->sgl[0].addr = io->rspbuf.phys;
827 		io->sgl[0].dif_addr = 0;
828 		io->sgl[0].len = io->wire_len;
829 		io->sgl_count = 1;
830 	}
831 
832 	if (auto_resp)
833 		io->iparam.fcp_tgt.flags |= SLI4_IO_AUTO_GOOD_RESPONSE;
834 
835 	return efct_scsi_io_dispatch(io, efct_target_io_cb);
836 }
837 
838 static int
839 efct_target_bls_resp_cb(struct efct_hw_io *hio,	u32 length, int status,
840 			u32 ext_status, void *app)
841 {
842 	struct efct_io *io = app;
843 	struct efct *efct;
844 	enum efct_scsi_io_status bls_status;
845 
846 	efct = io->efct;
847 
848 	/* BLS isn't really a "SCSI" concept, but use SCSI status */
849 	if (status) {
850 		io_error_log(io, "s=%#x x=%#x\n", status, ext_status);
851 		bls_status = EFCT_SCSI_STATUS_ERROR;
852 	} else {
853 		bls_status = EFCT_SCSI_STATUS_GOOD;
854 	}
855 
856 	if (io->bls_cb) {
857 		efct_scsi_io_cb_t bls_cb = io->bls_cb;
858 		void *bls_cb_arg = io->bls_cb_arg;
859 
860 		io->bls_cb = NULL;
861 		io->bls_cb_arg = NULL;
862 
863 		/* invoke callback */
864 		bls_cb(io, bls_status, 0, bls_cb_arg);
865 	}
866 
867 	efct_scsi_check_pending(efct);
868 	return 0;
869 }
870 
871 static int
872 efct_target_send_bls_resp(struct efct_io *io,
873 			  efct_scsi_io_cb_t cb, void *arg)
874 {
875 	struct efct_node *node = io->node;
876 	struct sli_bls_params *bls = &io->iparam.bls;
877 	struct efct *efct = node->efct;
878 	struct fc_ba_acc *acc;
879 	int rc;
880 
881 	/* fill out IO structure with everything needed to send BA_ACC */
882 	memset(&io->iparam, 0, sizeof(io->iparam));
883 	bls->ox_id = io->init_task_tag;
884 	bls->rx_id = io->abort_rx_id;
885 	bls->vpi = io->node->vpi;
886 	bls->rpi = io->node->rpi;
887 	bls->s_id = U32_MAX;
888 	bls->d_id = io->node->node_fc_id;
889 	bls->rpi_registered = true;
890 
891 	acc = (void *)bls->payload;
892 	acc->ba_ox_id = cpu_to_be16(bls->ox_id);
893 	acc->ba_rx_id = cpu_to_be16(bls->rx_id);
894 	acc->ba_high_seq_cnt = cpu_to_be16(U16_MAX);
895 
896 	/* generic io fields have already been populated */
897 
898 	/* set type and BLS-specific fields */
899 	io->io_type = EFCT_IO_TYPE_BLS_RESP;
900 	io->display_name = "bls_rsp";
901 	io->hio_type = EFCT_HW_BLS_ACC;
902 	io->bls_cb = cb;
903 	io->bls_cb_arg = arg;
904 
905 	/* dispatch IO */
906 	rc = efct_hw_bls_send(efct, FC_RCTL_BA_ACC, bls,
907 			      efct_target_bls_resp_cb, io);
908 	return rc;
909 }
910 
911 static int efct_bls_send_rjt_cb(struct efct_hw_io *hio, u32 length, int status,
912 				u32 ext_status, void *app)
913 {
914 	struct efct_io *io = app;
915 
916 	efct_scsi_io_free(io);
917 	return 0;
918 }
919 
920 struct efct_io *
921 efct_bls_send_rjt(struct efct_io *io, struct fc_frame_header *hdr)
922 {
923 	struct efct_node *node = io->node;
924 	struct sli_bls_params *bls = &io->iparam.bls;
925 	struct efct *efct = node->efct;
926 	struct fc_ba_rjt *acc;
927 	int rc;
928 
929 	/* fill out BLS Response-specific fields */
930 	io->io_type = EFCT_IO_TYPE_BLS_RESP;
931 	io->display_name = "ba_rjt";
932 	io->hio_type = EFCT_HW_BLS_RJT;
933 	io->init_task_tag = be16_to_cpu(hdr->fh_ox_id);
934 
935 	/* fill out iparam fields */
936 	memset(&io->iparam, 0, sizeof(io->iparam));
937 	bls->ox_id = be16_to_cpu(hdr->fh_ox_id);
938 	bls->rx_id = be16_to_cpu(hdr->fh_rx_id);
939 	bls->vpi = io->node->vpi;
940 	bls->rpi = io->node->rpi;
941 	bls->s_id = U32_MAX;
942 	bls->d_id = io->node->node_fc_id;
943 	bls->rpi_registered = true;
944 
945 	acc = (void *)bls->payload;
946 	acc->br_reason = ELS_RJT_UNAB;
947 	acc->br_explan = ELS_EXPL_NONE;
948 
949 	rc = efct_hw_bls_send(efct, FC_RCTL_BA_RJT, bls, efct_bls_send_rjt_cb,
950 			      io);
951 	if (rc) {
952 		efc_log_err(efct, "efct_scsi_io_dispatch() failed: %d\n", rc);
953 		efct_scsi_io_free(io);
954 		io = NULL;
955 	}
956 	return io;
957 }
958 
959 int
960 efct_scsi_send_tmf_resp(struct efct_io *io,
961 			enum efct_scsi_tmf_resp rspcode,
962 			u8 addl_rsp_info[3],
963 			efct_scsi_io_cb_t cb, void *arg)
964 {
965 	int rc;
966 	struct {
967 		struct fcp_resp_with_ext rsp_ext;
968 		struct fcp_resp_rsp_info info;
969 	} *fcprsp;
970 	u8 fcp_rspcode;
971 
972 	io->wire_len = 0;
973 
974 	switch (rspcode) {
975 	case EFCT_SCSI_TMF_FUNCTION_COMPLETE:
976 		fcp_rspcode = FCP_TMF_CMPL;
977 		break;
978 	case EFCT_SCSI_TMF_FUNCTION_SUCCEEDED:
979 	case EFCT_SCSI_TMF_FUNCTION_IO_NOT_FOUND:
980 		fcp_rspcode = FCP_TMF_CMPL;
981 		break;
982 	case EFCT_SCSI_TMF_FUNCTION_REJECTED:
983 		fcp_rspcode = FCP_TMF_REJECTED;
984 		break;
985 	case EFCT_SCSI_TMF_INCORRECT_LOGICAL_UNIT_NUMBER:
986 		fcp_rspcode = FCP_TMF_INVALID_LUN;
987 		break;
988 	case EFCT_SCSI_TMF_SERVICE_DELIVERY:
989 		fcp_rspcode = FCP_TMF_FAILED;
990 		break;
991 	default:
992 		fcp_rspcode = FCP_TMF_REJECTED;
993 		break;
994 	}
995 
996 	io->hio_type = EFCT_HW_IO_TARGET_RSP;
997 
998 	io->scsi_tgt_cb = cb;
999 	io->scsi_tgt_cb_arg = arg;
1000 
1001 	if (io->tmf_cmd == EFCT_SCSI_TMF_ABORT_TASK) {
1002 		rc = efct_target_send_bls_resp(io, cb, arg);
1003 		return rc;
1004 	}
1005 
1006 	/* populate the FCP TMF response */
1007 	fcprsp = io->rspbuf.virt;
1008 	memset(fcprsp, 0, sizeof(*fcprsp));
1009 
1010 	fcprsp->rsp_ext.resp.fr_flags |= FCP_SNS_LEN_VAL;
1011 
1012 	if (addl_rsp_info) {
1013 		memcpy(fcprsp->info._fr_resvd, addl_rsp_info,
1014 		       sizeof(fcprsp->info._fr_resvd));
1015 	}
1016 	fcprsp->info.rsp_code = fcp_rspcode;
1017 
1018 	io->wire_len = sizeof(*fcprsp);
1019 
1020 	fcprsp->rsp_ext.ext.fr_rsp_len =
1021 			cpu_to_be32(sizeof(struct fcp_resp_rsp_info));
1022 
1023 	io->sgl[0].addr = io->rspbuf.phys;
1024 	io->sgl[0].dif_addr = 0;
1025 	io->sgl[0].len = io->wire_len;
1026 	io->sgl_count = 1;
1027 
1028 	memset(&io->iparam, 0, sizeof(io->iparam));
1029 	io->iparam.fcp_tgt.ox_id = io->init_task_tag;
1030 	io->iparam.fcp_tgt.offset = 0;
1031 	io->iparam.fcp_tgt.cs_ctl = io->cs_ctl;
1032 	io->iparam.fcp_tgt.timeout = io->timeout;
1033 
1034 	rc = efct_scsi_io_dispatch(io, efct_target_io_cb);
1035 
1036 	return rc;
1037 }
1038 
1039 static int
1040 efct_target_abort_cb(struct efct_hw_io *hio, u32 length, int status,
1041 		     u32 ext_status, void *app)
1042 {
1043 	struct efct_io *io = app;
1044 	struct efct *efct;
1045 	enum efct_scsi_io_status scsi_status;
1046 	efct_scsi_io_cb_t abort_cb;
1047 	void *abort_cb_arg;
1048 
1049 	efct = io->efct;
1050 
1051 	if (!io->abort_cb)
1052 		goto done;
1053 
1054 	abort_cb = io->abort_cb;
1055 	abort_cb_arg = io->abort_cb_arg;
1056 
1057 	io->abort_cb = NULL;
1058 	io->abort_cb_arg = NULL;
1059 
1060 	switch (status) {
1061 	case SLI4_FC_WCQE_STATUS_SUCCESS:
1062 		scsi_status = EFCT_SCSI_STATUS_GOOD;
1063 		break;
1064 	case SLI4_FC_WCQE_STATUS_LOCAL_REJECT:
1065 		switch (ext_status) {
1066 		case SLI4_FC_LOCAL_REJECT_NO_XRI:
1067 			scsi_status = EFCT_SCSI_STATUS_NO_IO;
1068 			break;
1069 		case SLI4_FC_LOCAL_REJECT_ABORT_IN_PROGRESS:
1070 			scsi_status = EFCT_SCSI_STATUS_ABORT_IN_PROGRESS;
1071 			break;
1072 		default:
1073 			/*we have seen 0x15 (abort in progress)*/
1074 			scsi_status = EFCT_SCSI_STATUS_ERROR;
1075 			break;
1076 		}
1077 		break;
1078 	case SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE:
1079 		scsi_status = EFCT_SCSI_STATUS_CHECK_RESPONSE;
1080 		break;
1081 	default:
1082 		scsi_status = EFCT_SCSI_STATUS_ERROR;
1083 		break;
1084 	}
1085 	/* invoke callback */
1086 	abort_cb(io->io_to_abort, scsi_status, 0, abort_cb_arg);
1087 
1088 done:
1089 	/* done with IO to abort,efct_ref_get(): efct_scsi_tgt_abort_io() */
1090 	kref_put(&io->io_to_abort->ref, io->io_to_abort->release);
1091 
1092 	efct_io_pool_io_free(efct->xport->io_pool, io);
1093 
1094 	efct_scsi_check_pending(efct);
1095 	return 0;
1096 }
1097 
1098 int
1099 efct_scsi_tgt_abort_io(struct efct_io *io, efct_scsi_io_cb_t cb, void *arg)
1100 {
1101 	struct efct *efct;
1102 	struct efct_xport *xport;
1103 	int rc;
1104 	struct efct_io *abort_io = NULL;
1105 
1106 	efct = io->efct;
1107 	xport = efct->xport;
1108 
1109 	/* take a reference on IO being aborted */
1110 	if (kref_get_unless_zero(&io->ref) == 0) {
1111 		/* command no longer active */
1112 		scsi_io_printf(io, "command no longer active\n");
1113 		return -EIO;
1114 	}
1115 
1116 	/*
1117 	 * allocate a new IO to send the abort request. Use efct_io_alloc()
1118 	 * directly, as we need an IO object that will not fail allocation
1119 	 * due to allocations being disabled (in efct_scsi_io_alloc())
1120 	 */
1121 	abort_io = efct_io_pool_io_alloc(efct->xport->io_pool);
1122 	if (!abort_io) {
1123 		atomic_add_return(1, &xport->io_alloc_failed_count);
1124 		kref_put(&io->ref, io->release);
1125 		return -EIO;
1126 	}
1127 
1128 	/* Save the target server callback and argument */
1129 	/* set generic fields */
1130 	abort_io->cmd_tgt = true;
1131 	abort_io->node = io->node;
1132 
1133 	/* set type and abort-specific fields */
1134 	abort_io->io_type = EFCT_IO_TYPE_ABORT;
1135 	abort_io->display_name = "tgt_abort";
1136 	abort_io->io_to_abort = io;
1137 	abort_io->send_abts = false;
1138 	abort_io->abort_cb = cb;
1139 	abort_io->abort_cb_arg = arg;
1140 
1141 	/* now dispatch IO */
1142 	rc = efct_scsi_io_dispatch_abort(abort_io, efct_target_abort_cb);
1143 	if (rc)
1144 		kref_put(&io->ref, io->release);
1145 	return rc;
1146 }
1147 
1148 void
1149 efct_scsi_io_complete(struct efct_io *io)
1150 {
1151 	if (io->io_free) {
1152 		efc_log_debug(io->efct, "completion for non-busy io tag 0x%x\n",
1153 			      io->tag);
1154 		return;
1155 	}
1156 
1157 	scsi_io_trace(io, "freeing io 0x%p %s\n", io, io->display_name);
1158 	kref_put(&io->ref, io->release);
1159 }
1160