1 /*
2  *  qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
3  *
4  *  based on qla2x00t.c code:
5  *
6  *  Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
7  *  Copyright (C) 2004 - 2005 Leonid Stoljar
8  *  Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
9  *  Copyright (C) 2006 - 2010 ID7 Ltd.
10  *
11  *  Forward port and refactoring to modern qla2xxx and target/configfs
12  *
13  *  Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org>
14  *
15  *  This program is free software; you can redistribute it and/or
16  *  modify it under the terms of the GNU General Public License
17  *  as published by the Free Software Foundation, version 2
18  *  of the License.
19  *
20  *  This program is distributed in the hope that it will be useful,
21  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
22  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23  *  GNU General Public License for more details.
24  */
25 
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/types.h>
29 #include <linux/blkdev.h>
30 #include <linux/interrupt.h>
31 #include <linux/pci.h>
32 #include <linux/delay.h>
33 #include <linux/list.h>
34 #include <linux/workqueue.h>
35 #include <asm/unaligned.h>
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_host.h>
38 #include <scsi/scsi_tcq.h>
39 #include <target/target_core_base.h>
40 #include <target/target_core_fabric.h>
41 
42 #include "qla_def.h"
43 #include "qla_target.h"
44 
45 static int ql2xtgt_tape_enable;
46 module_param(ql2xtgt_tape_enable, int, S_IRUGO|S_IWUSR);
47 MODULE_PARM_DESC(ql2xtgt_tape_enable,
48 		"Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER.");
49 
50 static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
51 module_param(qlini_mode, charp, S_IRUGO);
52 MODULE_PARM_DESC(qlini_mode,
53 	"Determines when initiator mode will be enabled. Possible values: "
54 	"\"exclusive\" - initiator mode will be enabled on load, "
55 	"disabled on enabling target mode and then on disabling target mode "
56 	"enabled back; "
57 	"\"disabled\" - initiator mode will never be enabled; "
58 	"\"enabled\" (default) - initiator mode will always stay enabled.");
59 
60 int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
61 
62 static int temp_sam_status = SAM_STAT_BUSY;
63 
64 /*
65  * From scsi/fc/fc_fcp.h
66  */
67 enum fcp_resp_rsp_codes {
68 	FCP_TMF_CMPL = 0,
69 	FCP_DATA_LEN_INVALID = 1,
70 	FCP_CMND_FIELDS_INVALID = 2,
71 	FCP_DATA_PARAM_MISMATCH = 3,
72 	FCP_TMF_REJECTED = 4,
73 	FCP_TMF_FAILED = 5,
74 	FCP_TMF_INVALID_LUN = 9,
75 };
76 
77 /*
78  * fc_pri_ta from scsi/fc/fc_fcp.h
79  */
80 #define FCP_PTA_SIMPLE      0   /* simple task attribute */
81 #define FCP_PTA_HEADQ       1   /* head of queue task attribute */
82 #define FCP_PTA_ORDERED     2   /* ordered task attribute */
83 #define FCP_PTA_ACA         4   /* auto. contingent allegiance */
84 #define FCP_PTA_MASK        7   /* mask for task attribute field */
85 #define FCP_PRI_SHIFT       3   /* priority field starts in bit 3 */
86 #define FCP_PRI_RESVD_MASK  0x80        /* reserved bits in priority field */
87 
88 /*
89  * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
90  * must be called under HW lock and could unlock/lock it inside.
91  * It isn't an issue, since in the current implementation on the time when
92  * those functions are called:
93  *
94  *   - Either context is IRQ and only IRQ handler can modify HW data,
95  *     including rings related fields,
96  *
97  *   - Or access to target mode variables from struct qla_tgt doesn't
98  *     cross those functions boundaries, except tgt_stop, which
99  *     additionally protected by irq_cmd_count.
100  */
101 /* Predefs for callbacks handed to qla2xxx LLD */
102 static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
103 	struct atio_from_isp *pkt, uint8_t);
104 static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt);
105 static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
106 	int fn, void *iocb, int flags);
107 static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd
108 	*cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort);
109 static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
110 	struct qla_tgt_srr_imm *imm, int ha_lock);
111 static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
112 	struct qla_tgt_cmd *cmd);
113 static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
114 	struct atio_from_isp *atio, uint16_t status, int qfull);
115 static void qlt_disable_vha(struct scsi_qla_host *vha);
116 static void qlt_clear_tgt_db(struct qla_tgt *tgt);
117 static void qlt_send_notify_ack(struct scsi_qla_host *vha,
118 	struct imm_ntfy_from_isp *ntfy,
119 	uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
120 	uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
121 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
122 	struct imm_ntfy_from_isp *imm, int ha_locked);
123 /*
124  * Global Variables
125  */
126 static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
127 static struct kmem_cache *qla_tgt_plogi_cachep;
128 static mempool_t *qla_tgt_mgmt_cmd_mempool;
129 static struct workqueue_struct *qla_tgt_wq;
130 static DEFINE_MUTEX(qla_tgt_mutex);
131 static LIST_HEAD(qla_tgt_glist);
132 
133 /* This API intentionally takes dest as a parameter, rather than returning
134  * int value to avoid caller forgetting to issue wmb() after the store */
135 void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
136 {
137 	scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
138 	*dest = atomic_inc_return(&base_vha->generation_tick);
139 	/* memory barrier */
140 	wmb();
141 }
142 
143 /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
144 static struct qla_tgt_sess *qlt_find_sess_by_port_name(
145 	struct qla_tgt *tgt,
146 	const uint8_t *port_name)
147 {
148 	struct qla_tgt_sess *sess;
149 
150 	list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
151 		if (!memcmp(sess->port_name, port_name, WWN_SIZE))
152 			return sess;
153 	}
154 
155 	return NULL;
156 }
157 
158 /* Might release hw lock, then reaquire!! */
159 static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
160 {
161 	/* Send marker if required */
162 	if (unlikely(vha->marker_needed != 0)) {
163 		int rc = qla2x00_issue_marker(vha, vha_locked);
164 		if (rc != QLA_SUCCESS) {
165 			ql_dbg(ql_dbg_tgt, vha, 0xe03d,
166 			    "qla_target(%d): issue_marker() failed\n",
167 			    vha->vp_idx);
168 		}
169 		return rc;
170 	}
171 	return QLA_SUCCESS;
172 }
173 
174 static inline
175 struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
176 	uint8_t *d_id)
177 {
178 	struct qla_hw_data *ha = vha->hw;
179 	uint8_t vp_idx;
180 
181 	if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0]))
182 		return NULL;
183 
184 	if (vha->d_id.b.al_pa == d_id[2])
185 		return vha;
186 
187 	BUG_ON(ha->tgt.tgt_vp_map == NULL);
188 	vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx;
189 	if (likely(test_bit(vp_idx, ha->vp_idx_map)))
190 		return ha->tgt.tgt_vp_map[vp_idx].vha;
191 
192 	return NULL;
193 }
194 
195 static inline
196 struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
197 	uint16_t vp_idx)
198 {
199 	struct qla_hw_data *ha = vha->hw;
200 
201 	if (vha->vp_idx == vp_idx)
202 		return vha;
203 
204 	BUG_ON(ha->tgt.tgt_vp_map == NULL);
205 	if (likely(test_bit(vp_idx, ha->vp_idx_map)))
206 		return ha->tgt.tgt_vp_map[vp_idx].vha;
207 
208 	return NULL;
209 }
210 
211 static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha)
212 {
213 	unsigned long flags;
214 
215 	spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
216 
217 	vha->hw->tgt.num_pend_cmds++;
218 	if (vha->hw->tgt.num_pend_cmds > vha->hw->qla_stats.stat_max_pend_cmds)
219 		vha->hw->qla_stats.stat_max_pend_cmds =
220 			vha->hw->tgt.num_pend_cmds;
221 	spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
222 }
223 static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
224 {
225 	unsigned long flags;
226 
227 	spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
228 	vha->hw->tgt.num_pend_cmds--;
229 	spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
230 }
231 
232 static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
233 	struct atio_from_isp *atio, uint8_t ha_locked)
234 {
235 	ql_dbg(ql_dbg_tgt, vha, 0xe072,
236 		"%s: qla_target(%d): type %x ox_id %04x\n",
237 		__func__, vha->vp_idx, atio->u.raw.entry_type,
238 		be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
239 
240 	switch (atio->u.raw.entry_type) {
241 	case ATIO_TYPE7:
242 	{
243 		struct scsi_qla_host *host = qlt_find_host_by_d_id(vha,
244 		    atio->u.isp24.fcp_hdr.d_id);
245 		if (unlikely(NULL == host)) {
246 			ql_dbg(ql_dbg_tgt, vha, 0xe03e,
247 			    "qla_target(%d): Received ATIO_TYPE7 "
248 			    "with unknown d_id %x:%x:%x\n", vha->vp_idx,
249 			    atio->u.isp24.fcp_hdr.d_id[0],
250 			    atio->u.isp24.fcp_hdr.d_id[1],
251 			    atio->u.isp24.fcp_hdr.d_id[2]);
252 			break;
253 		}
254 		qlt_24xx_atio_pkt(host, atio, ha_locked);
255 		break;
256 	}
257 
258 	case IMMED_NOTIFY_TYPE:
259 	{
260 		struct scsi_qla_host *host = vha;
261 		struct imm_ntfy_from_isp *entry =
262 		    (struct imm_ntfy_from_isp *)atio;
263 
264 		if ((entry->u.isp24.vp_index != 0xFF) &&
265 		    (entry->u.isp24.nport_handle != 0xFFFF)) {
266 			host = qlt_find_host_by_vp_idx(vha,
267 			    entry->u.isp24.vp_index);
268 			if (unlikely(!host)) {
269 				ql_dbg(ql_dbg_tgt, vha, 0xe03f,
270 				    "qla_target(%d): Received "
271 				    "ATIO (IMMED_NOTIFY_TYPE) "
272 				    "with unknown vp_index %d\n",
273 				    vha->vp_idx, entry->u.isp24.vp_index);
274 				break;
275 			}
276 		}
277 		qlt_24xx_atio_pkt(host, atio, ha_locked);
278 		break;
279 	}
280 
281 	default:
282 		ql_dbg(ql_dbg_tgt, vha, 0xe040,
283 		    "qla_target(%d): Received unknown ATIO atio "
284 		    "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
285 		break;
286 	}
287 
288 	return false;
289 }
290 
291 void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
292 {
293 	switch (pkt->entry_type) {
294 	case CTIO_CRC2:
295 		ql_dbg(ql_dbg_tgt, vha, 0xe073,
296 			"qla_target(%d):%s: CRC2 Response pkt\n",
297 			vha->vp_idx, __func__);
298 	case CTIO_TYPE7:
299 	{
300 		struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
301 		struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
302 		    entry->vp_index);
303 		if (unlikely(!host)) {
304 			ql_dbg(ql_dbg_tgt, vha, 0xe041,
305 			    "qla_target(%d): Response pkt (CTIO_TYPE7) "
306 			    "received, with unknown vp_index %d\n",
307 			    vha->vp_idx, entry->vp_index);
308 			break;
309 		}
310 		qlt_response_pkt(host, pkt);
311 		break;
312 	}
313 
314 	case IMMED_NOTIFY_TYPE:
315 	{
316 		struct scsi_qla_host *host = vha;
317 		struct imm_ntfy_from_isp *entry =
318 		    (struct imm_ntfy_from_isp *)pkt;
319 
320 		host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
321 		if (unlikely(!host)) {
322 			ql_dbg(ql_dbg_tgt, vha, 0xe042,
323 			    "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
324 			    "received, with unknown vp_index %d\n",
325 			    vha->vp_idx, entry->u.isp24.vp_index);
326 			break;
327 		}
328 		qlt_response_pkt(host, pkt);
329 		break;
330 	}
331 
332 	case NOTIFY_ACK_TYPE:
333 	{
334 		struct scsi_qla_host *host = vha;
335 		struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
336 
337 		if (0xFF != entry->u.isp24.vp_index) {
338 			host = qlt_find_host_by_vp_idx(vha,
339 			    entry->u.isp24.vp_index);
340 			if (unlikely(!host)) {
341 				ql_dbg(ql_dbg_tgt, vha, 0xe043,
342 				    "qla_target(%d): Response "
343 				    "pkt (NOTIFY_ACK_TYPE) "
344 				    "received, with unknown "
345 				    "vp_index %d\n", vha->vp_idx,
346 				    entry->u.isp24.vp_index);
347 				break;
348 			}
349 		}
350 		qlt_response_pkt(host, pkt);
351 		break;
352 	}
353 
354 	case ABTS_RECV_24XX:
355 	{
356 		struct abts_recv_from_24xx *entry =
357 		    (struct abts_recv_from_24xx *)pkt;
358 		struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
359 		    entry->vp_index);
360 		if (unlikely(!host)) {
361 			ql_dbg(ql_dbg_tgt, vha, 0xe044,
362 			    "qla_target(%d): Response pkt "
363 			    "(ABTS_RECV_24XX) received, with unknown "
364 			    "vp_index %d\n", vha->vp_idx, entry->vp_index);
365 			break;
366 		}
367 		qlt_response_pkt(host, pkt);
368 		break;
369 	}
370 
371 	case ABTS_RESP_24XX:
372 	{
373 		struct abts_resp_to_24xx *entry =
374 		    (struct abts_resp_to_24xx *)pkt;
375 		struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
376 		    entry->vp_index);
377 		if (unlikely(!host)) {
378 			ql_dbg(ql_dbg_tgt, vha, 0xe045,
379 			    "qla_target(%d): Response pkt "
380 			    "(ABTS_RECV_24XX) received, with unknown "
381 			    "vp_index %d\n", vha->vp_idx, entry->vp_index);
382 			break;
383 		}
384 		qlt_response_pkt(host, pkt);
385 		break;
386 	}
387 
388 	default:
389 		qlt_response_pkt(vha, pkt);
390 		break;
391 	}
392 
393 }
394 
395 /*
396  * All qlt_plogi_ack_t operations are protected by hardware_lock
397  */
398 
399 /*
400  * This is a zero-base ref-counting solution, since hardware_lock
401  * guarantees that ref_count is not modified concurrently.
402  * Upon successful return content of iocb is undefined
403  */
404 static qlt_plogi_ack_t *
405 qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id,
406 		       struct imm_ntfy_from_isp *iocb)
407 {
408 	qlt_plogi_ack_t *pla;
409 
410 	list_for_each_entry(pla, &vha->plogi_ack_list, list) {
411 		if (pla->id.b24 == id->b24) {
412 			qlt_send_term_imm_notif(vha, &pla->iocb, 1);
413 			pla->iocb = *iocb;
414 			return pla;
415 		}
416 	}
417 
418 	pla = kmem_cache_zalloc(qla_tgt_plogi_cachep, GFP_ATOMIC);
419 	if (!pla) {
420 		ql_dbg(ql_dbg_async, vha, 0x5088,
421 		       "qla_target(%d): Allocation of plogi_ack failed\n",
422 		       vha->vp_idx);
423 		return NULL;
424 	}
425 
426 	pla->iocb = *iocb;
427 	pla->id = *id;
428 	list_add_tail(&pla->list, &vha->plogi_ack_list);
429 
430 	return pla;
431 }
432 
433 static void qlt_plogi_ack_unref(struct scsi_qla_host *vha, qlt_plogi_ack_t *pla)
434 {
435 	BUG_ON(!pla->ref_count);
436 	pla->ref_count--;
437 
438 	if (pla->ref_count)
439 		return;
440 
441 	ql_dbg(ql_dbg_async, vha, 0x5089,
442 	    "Sending PLOGI ACK to wwn %8phC s_id %02x:%02x:%02x loop_id %#04x"
443 	    " exch %#x ox_id %#x\n", pla->iocb.u.isp24.port_name,
444 	    pla->iocb.u.isp24.port_id[2], pla->iocb.u.isp24.port_id[1],
445 	    pla->iocb.u.isp24.port_id[0],
446 	    le16_to_cpu(pla->iocb.u.isp24.nport_handle),
447 	    pla->iocb.u.isp24.exchange_address, pla->iocb.ox_id);
448 	qlt_send_notify_ack(vha, &pla->iocb, 0, 0, 0, 0, 0, 0);
449 
450 	list_del(&pla->list);
451 	kmem_cache_free(qla_tgt_plogi_cachep, pla);
452 }
453 
454 static void
455 qlt_plogi_ack_link(struct scsi_qla_host *vha, qlt_plogi_ack_t *pla,
456     struct qla_tgt_sess *sess, qlt_plogi_link_t link)
457 {
458 	/* Inc ref_count first because link might already be pointing at pla */
459 	pla->ref_count++;
460 
461 	if (sess->plogi_link[link])
462 		qlt_plogi_ack_unref(vha, sess->plogi_link[link]);
463 
464 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097,
465 	    "Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC"
466 	    " s_id %02x:%02x:%02x, ref=%d\n", sess, link, sess->port_name,
467 	    pla->iocb.u.isp24.port_name, pla->iocb.u.isp24.port_id[2],
468 	    pla->iocb.u.isp24.port_id[1], pla->iocb.u.isp24.port_id[0],
469 	    pla->ref_count);
470 
471 	sess->plogi_link[link] = pla;
472 }
473 
474 typedef struct {
475 	/* These fields must be initialized by the caller */
476 	port_id_t id;
477 	/*
478 	 * number of cmds dropped while we were waiting for
479 	 * initiator to ack LOGO initialize to 1 if LOGO is
480 	 * triggered by a command, otherwise, to 0
481 	 */
482 	int cmd_count;
483 
484 	/* These fields are used by callee */
485 	struct list_head list;
486 } qlt_port_logo_t;
487 
488 static void
489 qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo)
490 {
491 	qlt_port_logo_t *tmp;
492 	int res;
493 
494 	mutex_lock(&vha->vha_tgt.tgt_mutex);
495 
496 	list_for_each_entry(tmp, &vha->logo_list, list) {
497 		if (tmp->id.b24 == logo->id.b24) {
498 			tmp->cmd_count += logo->cmd_count;
499 			mutex_unlock(&vha->vha_tgt.tgt_mutex);
500 			return;
501 		}
502 	}
503 
504 	list_add_tail(&logo->list, &vha->logo_list);
505 
506 	mutex_unlock(&vha->vha_tgt.tgt_mutex);
507 
508 	res = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, logo->id);
509 
510 	mutex_lock(&vha->vha_tgt.tgt_mutex);
511 	list_del(&logo->list);
512 	mutex_unlock(&vha->vha_tgt.tgt_mutex);
513 
514 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf098,
515 	    "Finished LOGO to %02x:%02x:%02x, dropped %d cmds, res = %#x\n",
516 	    logo->id.b.domain, logo->id.b.area, logo->id.b.al_pa,
517 	    logo->cmd_count, res);
518 }
519 
520 static void qlt_free_session_done(struct work_struct *work)
521 {
522 	struct qla_tgt_sess *sess = container_of(work, struct qla_tgt_sess,
523 	    free_work);
524 	struct qla_tgt *tgt = sess->tgt;
525 	struct scsi_qla_host *vha = sess->vha;
526 	struct qla_hw_data *ha = vha->hw;
527 	unsigned long flags;
528 	bool logout_started = false;
529 	fc_port_t fcport;
530 
531 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
532 		"%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
533 		" s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n",
534 		__func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
535 		sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
536 		sess->logout_on_delete, sess->keep_nport_handle,
537 		sess->send_els_logo);
538 
539 	BUG_ON(!tgt);
540 
541 	if (sess->send_els_logo) {
542 		qlt_port_logo_t logo;
543 		logo.id = sess->s_id;
544 		logo.cmd_count = 0;
545 		qlt_send_first_logo(vha, &logo);
546 	}
547 
548 	if (sess->logout_on_delete) {
549 		int rc;
550 
551 		memset(&fcport, 0, sizeof(fcport));
552 		fcport.loop_id = sess->loop_id;
553 		fcport.d_id = sess->s_id;
554 		memcpy(fcport.port_name, sess->port_name, WWN_SIZE);
555 		fcport.vha = vha;
556 		fcport.tgt_session = sess;
557 
558 		rc = qla2x00_post_async_logout_work(vha, &fcport, NULL);
559 		if (rc != QLA_SUCCESS)
560 			ql_log(ql_log_warn, vha, 0xf085,
561 			       "Schedule logo failed sess %p rc %d\n",
562 			       sess, rc);
563 		else
564 			logout_started = true;
565 	}
566 
567 	/*
568 	 * Release the target session for FC Nexus from fabric module code.
569 	 */
570 	if (sess->se_sess != NULL)
571 		ha->tgt.tgt_ops->free_session(sess);
572 
573 	if (logout_started) {
574 		bool traced = false;
575 
576 		while (!ACCESS_ONCE(sess->logout_completed)) {
577 			if (!traced) {
578 				ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086,
579 					"%s: waiting for sess %p logout\n",
580 					__func__, sess);
581 				traced = true;
582 			}
583 			msleep(100);
584 		}
585 
586 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf087,
587 			"%s: sess %p logout completed\n",
588 			__func__, sess);
589 	}
590 
591 	spin_lock_irqsave(&ha->hardware_lock, flags);
592 
593 	{
594 		qlt_plogi_ack_t *own =
595 		    sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
596 		qlt_plogi_ack_t *con =
597 		    sess->plogi_link[QLT_PLOGI_LINK_CONFLICT];
598 
599 		if (con) {
600 			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099,
601 			    "se_sess %p / sess %p port %8phC is gone,"
602 			    " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n",
603 			    sess->se_sess, sess, sess->port_name,
604 			    own ? "releasing own PLOGI" :
605 			    "no own PLOGI pending",
606 			    own ? own->ref_count : -1,
607 			    con->iocb.u.isp24.port_name, con->ref_count);
608 			qlt_plogi_ack_unref(vha, con);
609 		} else {
610 			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a,
611 			    "se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n",
612 			    sess->se_sess, sess, sess->port_name,
613 			    own ? "releasing own PLOGI" :
614 			    "no own PLOGI pending",
615 			    own ? own->ref_count : -1);
616 		}
617 
618 		if (own)
619 			qlt_plogi_ack_unref(vha, own);
620 	}
621 
622 	list_del(&sess->sess_list_entry);
623 
624 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
625 
626 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
627 	    "Unregistration of sess %p finished\n", sess);
628 
629 	kfree(sess);
630 	/*
631 	 * We need to protect against race, when tgt is freed before or
632 	 * inside wake_up()
633 	 */
634 	tgt->sess_count--;
635 	if (tgt->sess_count == 0)
636 		wake_up_all(&tgt->waitQ);
637 }
638 
639 /* ha->tgt.sess_lock supposed to be held on entry */
640 void qlt_unreg_sess(struct qla_tgt_sess *sess)
641 {
642 	struct scsi_qla_host *vha = sess->vha;
643 
644 	vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
645 
646 	if (!list_empty(&sess->del_list_entry))
647 		list_del_init(&sess->del_list_entry);
648 	sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
649 
650 	INIT_WORK(&sess->free_work, qlt_free_session_done);
651 	schedule_work(&sess->free_work);
652 }
653 EXPORT_SYMBOL(qlt_unreg_sess);
654 
655 
656 static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
657 {
658 	struct qla_hw_data *ha = vha->hw;
659 	struct qla_tgt_sess *sess = NULL;
660 	uint32_t unpacked_lun, lun = 0;
661 	uint16_t loop_id;
662 	int res = 0;
663 	struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
664 	struct atio_from_isp *a = (struct atio_from_isp *)iocb;
665 	unsigned long flags;
666 
667 	loop_id = le16_to_cpu(n->u.isp24.nport_handle);
668 	if (loop_id == 0xFFFF) {
669 		/* Global event */
670 		atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
671 		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
672 		qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
673 		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
674 #if 0 /* FIXME: do we need to choose a session here? */
675 		if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
676 			sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
677 			    typeof(*sess), sess_list_entry);
678 			switch (mcmd) {
679 			case QLA_TGT_NEXUS_LOSS_SESS:
680 				mcmd = QLA_TGT_NEXUS_LOSS;
681 				break;
682 			case QLA_TGT_ABORT_ALL_SESS:
683 				mcmd = QLA_TGT_ABORT_ALL;
684 				break;
685 			case QLA_TGT_NEXUS_LOSS:
686 			case QLA_TGT_ABORT_ALL:
687 				break;
688 			default:
689 				ql_dbg(ql_dbg_tgt, vha, 0xe046,
690 				    "qla_target(%d): Not allowed "
691 				    "command %x in %s", vha->vp_idx,
692 				    mcmd, __func__);
693 				sess = NULL;
694 				break;
695 			}
696 		} else
697 			sess = NULL;
698 #endif
699 	} else {
700 		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
701 		sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
702 		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
703 	}
704 
705 	ql_dbg(ql_dbg_tgt, vha, 0xe000,
706 	    "Using sess for qla_tgt_reset: %p\n", sess);
707 	if (!sess) {
708 		res = -ESRCH;
709 		return res;
710 	}
711 
712 	ql_dbg(ql_dbg_tgt, vha, 0xe047,
713 	    "scsi(%ld): resetting (session %p from port %8phC mcmd %x, "
714 	    "loop_id %d)\n", vha->host_no, sess, sess->port_name,
715 	    mcmd, loop_id);
716 
717 	lun = a->u.isp24.fcp_cmnd.lun;
718 	unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
719 
720 	return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd,
721 	    iocb, QLA24XX_MGMT_SEND_NACK);
722 }
723 
724 /* ha->tgt.sess_lock supposed to be held on entry */
725 static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
726 	bool immediate)
727 {
728 	struct qla_tgt *tgt = sess->tgt;
729 	uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5;
730 
731 	if (sess->deleted) {
732 		/* Upgrade to unconditional deletion in case it was temporary */
733 		if (immediate && sess->deleted == QLA_SESS_DELETION_PENDING)
734 			list_del(&sess->del_list_entry);
735 		else
736 			return;
737 	}
738 
739 	ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
740 	    "Scheduling sess %p for deletion\n", sess);
741 
742 	if (immediate) {
743 		dev_loss_tmo = 0;
744 		sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
745 		list_add(&sess->del_list_entry, &tgt->del_sess_list);
746 	} else {
747 		sess->deleted = QLA_SESS_DELETION_PENDING;
748 		list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
749 	}
750 
751 	sess->expires = jiffies + dev_loss_tmo * HZ;
752 
753 	ql_dbg(ql_dbg_tgt, sess->vha, 0xe048,
754 	    "qla_target(%d): session for port %8phC (loop ID %d s_id %02x:%02x:%02x)"
755 	    " scheduled for deletion in %u secs (expires: %lu) immed: %d, logout: %d, gen: %#x\n",
756 	    sess->vha->vp_idx, sess->port_name, sess->loop_id,
757 	    sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
758 	    dev_loss_tmo, sess->expires, immediate, sess->logout_on_delete,
759 	    sess->generation);
760 
761 	if (immediate)
762 		mod_delayed_work(system_wq, &tgt->sess_del_work, 0);
763 	else
764 		schedule_delayed_work(&tgt->sess_del_work,
765 		    sess->expires - jiffies);
766 }
767 
768 /* ha->tgt.sess_lock supposed to be held on entry */
769 static void qlt_clear_tgt_db(struct qla_tgt *tgt)
770 {
771 	struct qla_tgt_sess *sess;
772 
773 	list_for_each_entry(sess, &tgt->sess_list, sess_list_entry)
774 		qlt_schedule_sess_for_deletion(sess, true);
775 
776 	/* At this point tgt could be already dead */
777 }
778 
779 static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
780 	uint16_t *loop_id)
781 {
782 	struct qla_hw_data *ha = vha->hw;
783 	dma_addr_t gid_list_dma;
784 	struct gid_list_info *gid_list;
785 	char *id_iter;
786 	int res, rc, i;
787 	uint16_t entries;
788 
789 	gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
790 	    &gid_list_dma, GFP_KERNEL);
791 	if (!gid_list) {
792 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044,
793 		    "qla_target(%d): DMA Alloc failed of %u\n",
794 		    vha->vp_idx, qla2x00_gid_list_size(ha));
795 		return -ENOMEM;
796 	}
797 
798 	/* Get list of logged in devices */
799 	rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries);
800 	if (rc != QLA_SUCCESS) {
801 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
802 		    "qla_target(%d): get_id_list() failed: %x\n",
803 		    vha->vp_idx, rc);
804 		res = -EBUSY;
805 		goto out_free_id_list;
806 	}
807 
808 	id_iter = (char *)gid_list;
809 	res = -ENOENT;
810 	for (i = 0; i < entries; i++) {
811 		struct gid_list_info *gid = (struct gid_list_info *)id_iter;
812 		if ((gid->al_pa == s_id[2]) &&
813 		    (gid->area == s_id[1]) &&
814 		    (gid->domain == s_id[0])) {
815 			*loop_id = le16_to_cpu(gid->loop_id);
816 			res = 0;
817 			break;
818 		}
819 		id_iter += ha->gid_list_info_size;
820 	}
821 
822 out_free_id_list:
823 	dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
824 	    gid_list, gid_list_dma);
825 	return res;
826 }
827 
828 /* ha->tgt.sess_lock supposed to be held on entry */
829 static void qlt_undelete_sess(struct qla_tgt_sess *sess)
830 {
831 	BUG_ON(sess->deleted != QLA_SESS_DELETION_PENDING);
832 
833 	list_del_init(&sess->del_list_entry);
834 	sess->deleted = 0;
835 }
836 
837 static void qlt_del_sess_work_fn(struct delayed_work *work)
838 {
839 	struct qla_tgt *tgt = container_of(work, struct qla_tgt,
840 	    sess_del_work);
841 	struct scsi_qla_host *vha = tgt->vha;
842 	struct qla_hw_data *ha = vha->hw;
843 	struct qla_tgt_sess *sess;
844 	unsigned long flags, elapsed;
845 
846 	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
847 	while (!list_empty(&tgt->del_sess_list)) {
848 		sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
849 		    del_list_entry);
850 		elapsed = jiffies;
851 		if (time_after_eq(elapsed, sess->expires)) {
852 			/* No turning back */
853 			list_del_init(&sess->del_list_entry);
854 			sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
855 
856 			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
857 			    "Timeout: sess %p about to be deleted\n",
858 			    sess);
859 			ha->tgt.tgt_ops->shutdown_sess(sess);
860 			ha->tgt.tgt_ops->put_sess(sess);
861 		} else {
862 			schedule_delayed_work(&tgt->sess_del_work,
863 			    sess->expires - elapsed);
864 			break;
865 		}
866 	}
867 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
868 }
869 
870 /*
871  * Adds an extra ref to allow to drop hw lock after adding sess to the list.
872  * Caller must put it.
873  */
874 static struct qla_tgt_sess *qlt_create_sess(
875 	struct scsi_qla_host *vha,
876 	fc_port_t *fcport,
877 	bool local)
878 {
879 	struct qla_hw_data *ha = vha->hw;
880 	struct qla_tgt_sess *sess;
881 	unsigned long flags;
882 	unsigned char be_sid[3];
883 
884 	/* Check to avoid double sessions */
885 	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
886 	list_for_each_entry(sess, &vha->vha_tgt.qla_tgt->sess_list,
887 				sess_list_entry) {
888 		if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) {
889 			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005,
890 			    "Double sess %p found (s_id %x:%x:%x, "
891 			    "loop_id %d), updating to d_id %x:%x:%x, "
892 			    "loop_id %d", sess, sess->s_id.b.domain,
893 			    sess->s_id.b.al_pa, sess->s_id.b.area,
894 			    sess->loop_id, fcport->d_id.b.domain,
895 			    fcport->d_id.b.al_pa, fcport->d_id.b.area,
896 			    fcport->loop_id);
897 
898 			/* Cannot undelete at this point */
899 			if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
900 				spin_unlock_irqrestore(&ha->tgt.sess_lock,
901 				    flags);
902 				return NULL;
903 			}
904 
905 			if (sess->deleted)
906 				qlt_undelete_sess(sess);
907 
908 			kref_get(&sess->se_sess->sess_kref);
909 			ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
910 						(fcport->flags & FCF_CONF_COMP_SUPPORTED));
911 
912 			if (sess->local && !local)
913 				sess->local = 0;
914 
915 			qlt_do_generation_tick(vha, &sess->generation);
916 
917 			spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
918 
919 			return sess;
920 		}
921 	}
922 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
923 
924 	sess = kzalloc(sizeof(*sess), GFP_KERNEL);
925 	if (!sess) {
926 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04a,
927 		    "qla_target(%u): session allocation failed, all commands "
928 		    "from port %8phC will be refused", vha->vp_idx,
929 		    fcport->port_name);
930 
931 		return NULL;
932 	}
933 	sess->tgt = vha->vha_tgt.qla_tgt;
934 	sess->vha = vha;
935 	sess->s_id = fcport->d_id;
936 	sess->loop_id = fcport->loop_id;
937 	sess->local = local;
938 	INIT_LIST_HEAD(&sess->del_list_entry);
939 
940 	/* Under normal circumstances we want to logout from firmware when
941 	 * session eventually ends and release corresponding nport handle.
942 	 * In the exception cases (e.g. when new PLOGI is waiting) corresponding
943 	 * code will adjust these flags as necessary. */
944 	sess->logout_on_delete = 1;
945 	sess->keep_nport_handle = 0;
946 
947 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
948 	    "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
949 	    sess, vha->vha_tgt.qla_tgt);
950 
951 	be_sid[0] = sess->s_id.b.domain;
952 	be_sid[1] = sess->s_id.b.area;
953 	be_sid[2] = sess->s_id.b.al_pa;
954 	/*
955 	 * Determine if this fc_port->port_name is allowed to access
956 	 * target mode using explict NodeACLs+MappedLUNs, or using
957 	 * TPG demo mode.  If this is successful a target mode FC nexus
958 	 * is created.
959 	 */
960 	if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
961 	    &fcport->port_name[0], sess, &be_sid[0], fcport->loop_id) < 0) {
962 		kfree(sess);
963 		return NULL;
964 	}
965 	/*
966 	 * Take an extra reference to ->sess_kref here to handle qla_tgt_sess
967 	 * access across ->tgt.sess_lock reaquire.
968 	 */
969 	kref_get(&sess->se_sess->sess_kref);
970 
971 	sess->conf_compl_supported = (fcport->flags & FCF_CONF_COMP_SUPPORTED);
972 	BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name));
973 	memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
974 
975 	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
976 	list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list);
977 	vha->vha_tgt.qla_tgt->sess_count++;
978 	qlt_do_generation_tick(vha, &sess->generation);
979 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
980 
981 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
982 	    "qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
983 	    "s_id %x:%x:%x, confirmed completion %ssupported) added\n",
984 	    vha->vp_idx, local ?  "local " : "", fcport->port_name,
985 	    fcport->loop_id, sess->s_id.b.domain, sess->s_id.b.area,
986 	    sess->s_id.b.al_pa, sess->conf_compl_supported ?  "" : "not ");
987 
988 	return sess;
989 }
990 
991 /*
992  * Called from qla2x00_reg_remote_port()
993  */
994 void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
995 {
996 	struct qla_hw_data *ha = vha->hw;
997 	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
998 	struct qla_tgt_sess *sess;
999 	unsigned long flags;
1000 
1001 	if (!vha->hw->tgt.tgt_ops)
1002 		return;
1003 
1004 	if (!tgt || (fcport->port_type != FCT_INITIATOR))
1005 		return;
1006 
1007 	if (qla_ini_mode_enabled(vha))
1008 		return;
1009 
1010 	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1011 	if (tgt->tgt_stop) {
1012 		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1013 		return;
1014 	}
1015 	sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
1016 	if (!sess) {
1017 		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1018 
1019 		mutex_lock(&vha->vha_tgt.tgt_mutex);
1020 		sess = qlt_create_sess(vha, fcport, false);
1021 		mutex_unlock(&vha->vha_tgt.tgt_mutex);
1022 
1023 		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1024 	} else if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
1025 		/* Point of no return */
1026 		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1027 		return;
1028 	} else {
1029 		kref_get(&sess->se_sess->sess_kref);
1030 
1031 		if (sess->deleted) {
1032 			qlt_undelete_sess(sess);
1033 
1034 			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
1035 			    "qla_target(%u): %ssession for port %8phC "
1036 			    "(loop ID %d) reappeared\n", vha->vp_idx,
1037 			    sess->local ? "local " : "", sess->port_name,
1038 			    sess->loop_id);
1039 
1040 			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
1041 			    "Reappeared sess %p\n", sess);
1042 		}
1043 		ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
1044 					(fcport->flags & FCF_CONF_COMP_SUPPORTED));
1045 	}
1046 
1047 	if (sess && sess->local) {
1048 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
1049 		    "qla_target(%u): local session for "
1050 		    "port %8phC (loop ID %d) became global\n", vha->vp_idx,
1051 		    fcport->port_name, sess->loop_id);
1052 		sess->local = 0;
1053 	}
1054 	ha->tgt.tgt_ops->put_sess(sess);
1055 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1056 }
1057 
1058 /*
1059  * max_gen - specifies maximum session generation
1060  * at which this deletion requestion is still valid
1061  */
1062 void
1063 qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
1064 {
1065 	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
1066 	struct qla_tgt_sess *sess;
1067 	unsigned long flags;
1068 
1069 	if (!vha->hw->tgt.tgt_ops)
1070 		return;
1071 
1072 	if (!tgt)
1073 		return;
1074 
1075 	spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1076 	if (tgt->tgt_stop) {
1077 		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1078 		return;
1079 	}
1080 	sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
1081 	if (!sess) {
1082 		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1083 		return;
1084 	}
1085 
1086 	if (max_gen - sess->generation < 0) {
1087 		spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1088 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
1089 		    "Ignoring stale deletion request for se_sess %p / sess %p"
1090 		    " for port %8phC, req_gen %d, sess_gen %d\n",
1091 		    sess->se_sess, sess, sess->port_name, max_gen,
1092 		    sess->generation);
1093 		return;
1094 	}
1095 
1096 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
1097 
1098 	sess->local = 1;
1099 	qlt_schedule_sess_for_deletion(sess, false);
1100 	spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1101 }
1102 
1103 static inline int test_tgt_sess_count(struct qla_tgt *tgt)
1104 {
1105 	struct qla_hw_data *ha = tgt->ha;
1106 	unsigned long flags;
1107 	int res;
1108 	/*
1109 	 * We need to protect against race, when tgt is freed before or
1110 	 * inside wake_up()
1111 	 */
1112 	spin_lock_irqsave(&ha->hardware_lock, flags);
1113 	ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
1114 	    "tgt %p, empty(sess_list)=%d sess_count=%d\n",
1115 	    tgt, list_empty(&tgt->sess_list), tgt->sess_count);
1116 	res = (tgt->sess_count == 0);
1117 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1118 
1119 	return res;
1120 }
1121 
1122 /* Called by tcm_qla2xxx configfs code */
1123 int qlt_stop_phase1(struct qla_tgt *tgt)
1124 {
1125 	struct scsi_qla_host *vha = tgt->vha;
1126 	struct qla_hw_data *ha = tgt->ha;
1127 	unsigned long flags;
1128 
1129 	mutex_lock(&qla_tgt_mutex);
1130 	if (!vha->fc_vport) {
1131 		struct Scsi_Host *sh = vha->host;
1132 		struct fc_host_attrs *fc_host = shost_to_fc_host(sh);
1133 		bool npiv_vports;
1134 
1135 		spin_lock_irqsave(sh->host_lock, flags);
1136 		npiv_vports = (fc_host->npiv_vports_inuse);
1137 		spin_unlock_irqrestore(sh->host_lock, flags);
1138 
1139 		if (npiv_vports) {
1140 			mutex_unlock(&qla_tgt_mutex);
1141 			return -EPERM;
1142 		}
1143 	}
1144 	if (tgt->tgt_stop || tgt->tgt_stopped) {
1145 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
1146 		    "Already in tgt->tgt_stop or tgt_stopped state\n");
1147 		mutex_unlock(&qla_tgt_mutex);
1148 		return -EPERM;
1149 	}
1150 
1151 	ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
1152 	    vha->host_no, vha);
1153 	/*
1154 	 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
1155 	 * Lock is needed, because we still can get an incoming packet.
1156 	 */
1157 	mutex_lock(&vha->vha_tgt.tgt_mutex);
1158 	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1159 	tgt->tgt_stop = 1;
1160 	qlt_clear_tgt_db(tgt);
1161 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1162 	mutex_unlock(&vha->vha_tgt.tgt_mutex);
1163 	mutex_unlock(&qla_tgt_mutex);
1164 
1165 	flush_delayed_work(&tgt->sess_del_work);
1166 
1167 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
1168 	    "Waiting for sess works (tgt %p)", tgt);
1169 	spin_lock_irqsave(&tgt->sess_work_lock, flags);
1170 	while (!list_empty(&tgt->sess_works_list)) {
1171 		spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1172 		flush_scheduled_work();
1173 		spin_lock_irqsave(&tgt->sess_work_lock, flags);
1174 	}
1175 	spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1176 
1177 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
1178 	    "Waiting for tgt %p: list_empty(sess_list)=%d "
1179 	    "sess_count=%d\n", tgt, list_empty(&tgt->sess_list),
1180 	    tgt->sess_count);
1181 
1182 	wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
1183 
1184 	/* Big hammer */
1185 	if (!ha->flags.host_shutting_down && qla_tgt_mode_enabled(vha))
1186 		qlt_disable_vha(vha);
1187 
1188 	/* Wait for sessions to clear out (just in case) */
1189 	wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
1190 	return 0;
1191 }
1192 EXPORT_SYMBOL(qlt_stop_phase1);
1193 
1194 /* Called by tcm_qla2xxx configfs code */
1195 void qlt_stop_phase2(struct qla_tgt *tgt)
1196 {
1197 	struct qla_hw_data *ha = tgt->ha;
1198 	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1199 	unsigned long flags;
1200 
1201 	if (tgt->tgt_stopped) {
1202 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f,
1203 		    "Already in tgt->tgt_stopped state\n");
1204 		dump_stack();
1205 		return;
1206 	}
1207 
1208 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b,
1209 	    "Waiting for %d IRQ commands to complete (tgt %p)",
1210 	    tgt->irq_cmd_count, tgt);
1211 
1212 	mutex_lock(&vha->vha_tgt.tgt_mutex);
1213 	spin_lock_irqsave(&ha->hardware_lock, flags);
1214 	while ((tgt->irq_cmd_count != 0) || (tgt->atio_irq_cmd_count != 0)) {
1215 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
1216 		udelay(2);
1217 		spin_lock_irqsave(&ha->hardware_lock, flags);
1218 	}
1219 	tgt->tgt_stop = 0;
1220 	tgt->tgt_stopped = 1;
1221 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1222 	mutex_unlock(&vha->vha_tgt.tgt_mutex);
1223 
1224 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished",
1225 	    tgt);
1226 }
1227 EXPORT_SYMBOL(qlt_stop_phase2);
1228 
1229 /* Called from qlt_remove_target() -> qla2x00_remove_one() */
1230 static void qlt_release(struct qla_tgt *tgt)
1231 {
1232 	scsi_qla_host_t *vha = tgt->vha;
1233 
1234 	if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
1235 		qlt_stop_phase2(tgt);
1236 
1237 	vha->vha_tgt.qla_tgt = NULL;
1238 
1239 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d,
1240 	    "Release of tgt %p finished\n", tgt);
1241 
1242 	kfree(tgt);
1243 }
1244 
1245 /* ha->hardware_lock supposed to be held on entry */
1246 static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
1247 	const void *param, unsigned int param_size)
1248 {
1249 	struct qla_tgt_sess_work_param *prm;
1250 	unsigned long flags;
1251 
1252 	prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
1253 	if (!prm) {
1254 		ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
1255 		    "qla_target(%d): Unable to create session "
1256 		    "work, command will be refused", 0);
1257 		return -ENOMEM;
1258 	}
1259 
1260 	ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
1261 	    "Scheduling work (type %d, prm %p)"
1262 	    " to find session for param %p (size %d, tgt %p)\n",
1263 	    type, prm, param, param_size, tgt);
1264 
1265 	prm->type = type;
1266 	memcpy(&prm->tm_iocb, param, param_size);
1267 
1268 	spin_lock_irqsave(&tgt->sess_work_lock, flags);
1269 	list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
1270 	spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1271 
1272 	schedule_work(&tgt->sess_work);
1273 
1274 	return 0;
1275 }
1276 
1277 /*
1278  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1279  */
1280 static void qlt_send_notify_ack(struct scsi_qla_host *vha,
1281 	struct imm_ntfy_from_isp *ntfy,
1282 	uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
1283 	uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
1284 {
1285 	struct qla_hw_data *ha = vha->hw;
1286 	request_t *pkt;
1287 	struct nack_to_isp *nack;
1288 
1289 	ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
1290 
1291 	/* Send marker if required */
1292 	if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
1293 		return;
1294 
1295 	pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
1296 	if (!pkt) {
1297 		ql_dbg(ql_dbg_tgt, vha, 0xe049,
1298 		    "qla_target(%d): %s failed: unable to allocate "
1299 		    "request packet\n", vha->vp_idx, __func__);
1300 		return;
1301 	}
1302 
1303 	if (vha->vha_tgt.qla_tgt != NULL)
1304 		vha->vha_tgt.qla_tgt->notify_ack_expected++;
1305 
1306 	pkt->entry_type = NOTIFY_ACK_TYPE;
1307 	pkt->entry_count = 1;
1308 
1309 	nack = (struct nack_to_isp *)pkt;
1310 	nack->ox_id = ntfy->ox_id;
1311 
1312 	nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
1313 	if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
1314 		nack->u.isp24.flags = ntfy->u.isp24.flags &
1315 			cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
1316 	}
1317 	nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
1318 	nack->u.isp24.status = ntfy->u.isp24.status;
1319 	nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
1320 	nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
1321 	nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
1322 	nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
1323 	nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
1324 	nack->u.isp24.srr_flags = cpu_to_le16(srr_flags);
1325 	nack->u.isp24.srr_reject_code = srr_reject_code;
1326 	nack->u.isp24.srr_reject_code_expl = srr_explan;
1327 	nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
1328 
1329 	ql_dbg(ql_dbg_tgt, vha, 0xe005,
1330 	    "qla_target(%d): Sending 24xx Notify Ack %d\n",
1331 	    vha->vp_idx, nack->u.isp24.status);
1332 
1333 	/* Memory Barrier */
1334 	wmb();
1335 	qla2x00_start_iocbs(vha, vha->req);
1336 }
1337 
1338 /*
1339  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1340  */
1341 static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
1342 	struct abts_recv_from_24xx *abts, uint32_t status,
1343 	bool ids_reversed)
1344 {
1345 	struct qla_hw_data *ha = vha->hw;
1346 	struct abts_resp_to_24xx *resp;
1347 	uint32_t f_ctl;
1348 	uint8_t *p;
1349 
1350 	ql_dbg(ql_dbg_tgt, vha, 0xe006,
1351 	    "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
1352 	    ha, abts, status);
1353 
1354 	/* Send marker if required */
1355 	if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
1356 		return;
1357 
1358 	resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL);
1359 	if (!resp) {
1360 		ql_dbg(ql_dbg_tgt, vha, 0xe04a,
1361 		    "qla_target(%d): %s failed: unable to allocate "
1362 		    "request packet", vha->vp_idx, __func__);
1363 		return;
1364 	}
1365 
1366 	resp->entry_type = ABTS_RESP_24XX;
1367 	resp->entry_count = 1;
1368 	resp->nport_handle = abts->nport_handle;
1369 	resp->vp_index = vha->vp_idx;
1370 	resp->sof_type = abts->sof_type;
1371 	resp->exchange_address = abts->exchange_address;
1372 	resp->fcp_hdr_le = abts->fcp_hdr_le;
1373 	f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
1374 	    F_CTL_LAST_SEQ | F_CTL_END_SEQ |
1375 	    F_CTL_SEQ_INITIATIVE);
1376 	p = (uint8_t *)&f_ctl;
1377 	resp->fcp_hdr_le.f_ctl[0] = *p++;
1378 	resp->fcp_hdr_le.f_ctl[1] = *p++;
1379 	resp->fcp_hdr_le.f_ctl[2] = *p;
1380 	if (ids_reversed) {
1381 		resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0];
1382 		resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1];
1383 		resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2];
1384 		resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0];
1385 		resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1];
1386 		resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2];
1387 	} else {
1388 		resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
1389 		resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
1390 		resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
1391 		resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
1392 		resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
1393 		resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
1394 	}
1395 	resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
1396 	if (status == FCP_TMF_CMPL) {
1397 		resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
1398 		resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
1399 		resp->payload.ba_acct.low_seq_cnt = 0x0000;
1400 		resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
1401 		resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
1402 		resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
1403 	} else {
1404 		resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
1405 		resp->payload.ba_rjt.reason_code =
1406 			BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
1407 		/* Other bytes are zero */
1408 	}
1409 
1410 	vha->vha_tgt.qla_tgt->abts_resp_expected++;
1411 
1412 	/* Memory Barrier */
1413 	wmb();
1414 	qla2x00_start_iocbs(vha, vha->req);
1415 }
1416 
1417 /*
1418  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1419  */
1420 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
1421 	struct abts_resp_from_24xx_fw *entry)
1422 {
1423 	struct ctio7_to_24xx *ctio;
1424 
1425 	ql_dbg(ql_dbg_tgt, vha, 0xe007,
1426 	    "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw);
1427 	/* Send marker if required */
1428 	if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
1429 		return;
1430 
1431 	ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL);
1432 	if (ctio == NULL) {
1433 		ql_dbg(ql_dbg_tgt, vha, 0xe04b,
1434 		    "qla_target(%d): %s failed: unable to allocate "
1435 		    "request packet\n", vha->vp_idx, __func__);
1436 		return;
1437 	}
1438 
1439 	/*
1440 	 * We've got on entrance firmware's response on by us generated
1441 	 * ABTS response. So, in it ID fields are reversed.
1442 	 */
1443 
1444 	ctio->entry_type = CTIO_TYPE7;
1445 	ctio->entry_count = 1;
1446 	ctio->nport_handle = entry->nport_handle;
1447 	ctio->handle = QLA_TGT_SKIP_HANDLE |	CTIO_COMPLETION_HANDLE_MARK;
1448 	ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
1449 	ctio->vp_index = vha->vp_idx;
1450 	ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
1451 	ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
1452 	ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
1453 	ctio->exchange_addr = entry->exchange_addr_to_abort;
1454 	ctio->u.status1.flags = cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
1455 					    CTIO7_FLAGS_TERMINATE);
1456 	ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id);
1457 
1458 	/* Memory Barrier */
1459 	wmb();
1460 	qla2x00_start_iocbs(vha, vha->req);
1461 
1462 	qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry,
1463 	    FCP_TMF_CMPL, true);
1464 }
1465 
1466 static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag)
1467 {
1468 	struct qla_tgt_sess_op *op;
1469 	struct qla_tgt_cmd *cmd;
1470 
1471 	spin_lock(&vha->cmd_list_lock);
1472 
1473 	list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
1474 		if (tag == op->atio.u.isp24.exchange_addr) {
1475 			op->aborted = true;
1476 			spin_unlock(&vha->cmd_list_lock);
1477 			return 1;
1478 		}
1479 	}
1480 
1481 	list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
1482 		if (tag == cmd->atio.u.isp24.exchange_addr) {
1483 			cmd->aborted = 1;
1484 			spin_unlock(&vha->cmd_list_lock);
1485 			return 1;
1486 		}
1487 	}
1488 
1489 	spin_unlock(&vha->cmd_list_lock);
1490 	return 0;
1491 }
1492 
1493 /* drop cmds for the given lun
1494  * XXX only looks for cmds on the port through which lun reset was recieved
1495  * XXX does not go through the list of other port (which may have cmds
1496  *     for the same lun)
1497  */
1498 static void abort_cmds_for_lun(struct scsi_qla_host *vha,
1499 				uint32_t lun, uint8_t *s_id)
1500 {
1501 	struct qla_tgt_sess_op *op;
1502 	struct qla_tgt_cmd *cmd;
1503 	uint32_t key;
1504 
1505 	key = sid_to_key(s_id);
1506 	spin_lock(&vha->cmd_list_lock);
1507 	list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
1508 		uint32_t op_key;
1509 		uint32_t op_lun;
1510 
1511 		op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
1512 		op_lun = scsilun_to_int(
1513 			(struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
1514 		if (op_key == key && op_lun == lun)
1515 			op->aborted = true;
1516 	}
1517 	list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
1518 		uint32_t cmd_key;
1519 		uint32_t cmd_lun;
1520 
1521 		cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
1522 		cmd_lun = scsilun_to_int(
1523 			(struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun);
1524 		if (cmd_key == key && cmd_lun == lun)
1525 			cmd->aborted = 1;
1526 	}
1527 	spin_unlock(&vha->cmd_list_lock);
1528 }
1529 
1530 /* ha->hardware_lock supposed to be held on entry */
1531 static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1532 	struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
1533 {
1534 	struct qla_hw_data *ha = vha->hw;
1535 	struct se_session *se_sess = sess->se_sess;
1536 	struct qla_tgt_mgmt_cmd *mcmd;
1537 	struct se_cmd *se_cmd;
1538 	u32 lun = 0;
1539 	int rc;
1540 	bool found_lun = false;
1541 
1542 	spin_lock(&se_sess->sess_cmd_lock);
1543 	list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
1544 		struct qla_tgt_cmd *cmd =
1545 			container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
1546 		if (se_cmd->tag == abts->exchange_addr_to_abort) {
1547 			lun = cmd->unpacked_lun;
1548 			found_lun = true;
1549 			break;
1550 		}
1551 	}
1552 	spin_unlock(&se_sess->sess_cmd_lock);
1553 
1554 	/* cmd not in LIO lists, look in qla list */
1555 	if (!found_lun) {
1556 		if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) {
1557 			/* send TASK_ABORT response immediately */
1558 			qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_CMPL, false);
1559 			return 0;
1560 		} else {
1561 			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf081,
1562 			    "unable to find cmd in driver or LIO for tag 0x%x\n",
1563 			    abts->exchange_addr_to_abort);
1564 			return -ENOENT;
1565 		}
1566 	}
1567 
1568 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
1569 	    "qla_target(%d): task abort (tag=%d)\n",
1570 	    vha->vp_idx, abts->exchange_addr_to_abort);
1571 
1572 	mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
1573 	if (mcmd == NULL) {
1574 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051,
1575 		    "qla_target(%d): %s: Allocation of ABORT cmd failed",
1576 		    vha->vp_idx, __func__);
1577 		return -ENOMEM;
1578 	}
1579 	memset(mcmd, 0, sizeof(*mcmd));
1580 
1581 	mcmd->sess = sess;
1582 	memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
1583 	mcmd->reset_count = vha->hw->chip_reset;
1584 
1585 	rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK,
1586 	    abts->exchange_addr_to_abort);
1587 	if (rc != 0) {
1588 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
1589 		    "qla_target(%d):  tgt_ops->handle_tmr()"
1590 		    " failed: %d", vha->vp_idx, rc);
1591 		mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
1592 		return -EFAULT;
1593 	}
1594 
1595 	return 0;
1596 }
1597 
1598 /*
1599  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1600  */
1601 static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1602 	struct abts_recv_from_24xx *abts)
1603 {
1604 	struct qla_hw_data *ha = vha->hw;
1605 	struct qla_tgt_sess *sess;
1606 	uint32_t tag = abts->exchange_addr_to_abort;
1607 	uint8_t s_id[3];
1608 	int rc;
1609 	unsigned long flags;
1610 
1611 	if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
1612 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
1613 		    "qla_target(%d): ABTS: Abort Sequence not "
1614 		    "supported\n", vha->vp_idx);
1615 		qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1616 		return;
1617 	}
1618 
1619 	if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
1620 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010,
1621 		    "qla_target(%d): ABTS: Unknown Exchange "
1622 		    "Address received\n", vha->vp_idx);
1623 		qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1624 		return;
1625 	}
1626 
1627 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
1628 	    "qla_target(%d): task abort (s_id=%x:%x:%x, "
1629 	    "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2],
1630 	    abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag,
1631 	    le32_to_cpu(abts->fcp_hdr_le.parameter));
1632 
1633 	s_id[0] = abts->fcp_hdr_le.s_id[2];
1634 	s_id[1] = abts->fcp_hdr_le.s_id[1];
1635 	s_id[2] = abts->fcp_hdr_le.s_id[0];
1636 
1637 	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1638 	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
1639 	if (!sess) {
1640 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
1641 		    "qla_target(%d): task abort for non-existant session\n",
1642 		    vha->vp_idx);
1643 		rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
1644 		    QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));
1645 
1646 		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1647 
1648 		if (rc != 0) {
1649 			qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED,
1650 			    false);
1651 		}
1652 		return;
1653 	}
1654 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1655 
1656 
1657 	if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
1658 		qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1659 		return;
1660 	}
1661 
1662 	rc = __qlt_24xx_handle_abts(vha, abts, sess);
1663 	if (rc != 0) {
1664 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
1665 		    "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
1666 		    vha->vp_idx, rc);
1667 		qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1668 		return;
1669 	}
1670 }
1671 
1672 /*
1673  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1674  */
1675 static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
1676 	struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code)
1677 {
1678 	struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
1679 	struct ctio7_to_24xx *ctio;
1680 	uint16_t temp;
1681 
1682 	ql_dbg(ql_dbg_tgt, ha, 0xe008,
1683 	    "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
1684 	    ha, atio, resp_code);
1685 
1686 	/* Send marker if required */
1687 	if (qlt_issue_marker(ha, 1) != QLA_SUCCESS)
1688 		return;
1689 
1690 	ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(ha, NULL);
1691 	if (ctio == NULL) {
1692 		ql_dbg(ql_dbg_tgt, ha, 0xe04c,
1693 		    "qla_target(%d): %s failed: unable to allocate "
1694 		    "request packet\n", ha->vp_idx, __func__);
1695 		return;
1696 	}
1697 
1698 	ctio->entry_type = CTIO_TYPE7;
1699 	ctio->entry_count = 1;
1700 	ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
1701 	ctio->nport_handle = mcmd->sess->loop_id;
1702 	ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
1703 	ctio->vp_index = ha->vp_idx;
1704 	ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
1705 	ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
1706 	ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
1707 	ctio->exchange_addr = atio->u.isp24.exchange_addr;
1708 	ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
1709 	    cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS);
1710 	temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
1711 	ctio->u.status1.ox_id = cpu_to_le16(temp);
1712 	ctio->u.status1.scsi_status =
1713 	    cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
1714 	ctio->u.status1.response_len = cpu_to_le16(8);
1715 	ctio->u.status1.sense_data[0] = resp_code;
1716 
1717 	/* Memory Barrier */
1718 	wmb();
1719 	qla2x00_start_iocbs(ha, ha->req);
1720 }
1721 
1722 void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
1723 {
1724 	mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
1725 }
1726 EXPORT_SYMBOL(qlt_free_mcmd);
1727 
1728 /* callback from target fabric module code */
1729 void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
1730 {
1731 	struct scsi_qla_host *vha = mcmd->sess->vha;
1732 	struct qla_hw_data *ha = vha->hw;
1733 	unsigned long flags;
1734 
1735 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
1736 	    "TM response mcmd (%p) status %#x state %#x",
1737 	    mcmd, mcmd->fc_tm_rsp, mcmd->flags);
1738 
1739 	spin_lock_irqsave(&ha->hardware_lock, flags);
1740 
1741 	if (!vha->flags.online || mcmd->reset_count != ha->chip_reset) {
1742 		/*
1743 		 * Either the port is not online or this request was from
1744 		 * previous life, just abort the processing.
1745 		 */
1746 		ql_dbg(ql_dbg_async, vha, 0xe100,
1747 			"RESET-TMR online/active/old-count/new-count = %d/%d/%d/%d.\n",
1748 			vha->flags.online, qla2x00_reset_active(vha),
1749 			mcmd->reset_count, ha->chip_reset);
1750 		ha->tgt.tgt_ops->free_mcmd(mcmd);
1751 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
1752 		return;
1753 	}
1754 
1755 	if (mcmd->flags == QLA24XX_MGMT_SEND_NACK)
1756 		qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
1757 		    0, 0, 0, 0, 0, 0);
1758 	else {
1759 		if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX)
1760 			qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts,
1761 			    mcmd->fc_tm_rsp, false);
1762 		else
1763 			qlt_24xx_send_task_mgmt_ctio(vha, mcmd,
1764 			    mcmd->fc_tm_rsp);
1765 	}
1766 	/*
1767 	 * Make the callback for ->free_mcmd() to queue_work() and invoke
1768 	 * target_put_sess_cmd() to drop cmd_kref to 1.  The final
1769 	 * target_put_sess_cmd() call will be made from TFO->check_stop_free()
1770 	 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
1771 	 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
1772 	 * qlt_xmit_tm_rsp() returns here..
1773 	 */
1774 	ha->tgt.tgt_ops->free_mcmd(mcmd);
1775 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1776 }
1777 EXPORT_SYMBOL(qlt_xmit_tm_rsp);
1778 
1779 /* No locks */
1780 static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
1781 {
1782 	struct qla_tgt_cmd *cmd = prm->cmd;
1783 
1784 	BUG_ON(cmd->sg_cnt == 0);
1785 
1786 	prm->sg = (struct scatterlist *)cmd->sg;
1787 	prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, cmd->sg,
1788 	    cmd->sg_cnt, cmd->dma_data_direction);
1789 	if (unlikely(prm->seg_cnt == 0))
1790 		goto out_err;
1791 
1792 	prm->cmd->sg_mapped = 1;
1793 
1794 	if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) {
1795 		/*
1796 		 * If greater than four sg entries then we need to allocate
1797 		 * the continuation entries
1798 		 */
1799 		if (prm->seg_cnt > prm->tgt->datasegs_per_cmd)
1800 			prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
1801 			prm->tgt->datasegs_per_cmd,
1802 			prm->tgt->datasegs_per_cont);
1803 	} else {
1804 		/* DIF */
1805 		if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
1806 		    (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
1807 			prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz);
1808 			prm->tot_dsds = prm->seg_cnt;
1809 		} else
1810 			prm->tot_dsds = prm->seg_cnt;
1811 
1812 		if (cmd->prot_sg_cnt) {
1813 			prm->prot_sg      = cmd->prot_sg;
1814 			prm->prot_seg_cnt = pci_map_sg(prm->tgt->ha->pdev,
1815 				cmd->prot_sg, cmd->prot_sg_cnt,
1816 				cmd->dma_data_direction);
1817 			if (unlikely(prm->prot_seg_cnt == 0))
1818 				goto out_err;
1819 
1820 			if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
1821 			    (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
1822 				/* Dif Bundling not support here */
1823 				prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen,
1824 								cmd->blk_sz);
1825 				prm->tot_dsds += prm->prot_seg_cnt;
1826 			} else
1827 				prm->tot_dsds += prm->prot_seg_cnt;
1828 		}
1829 	}
1830 
1831 	return 0;
1832 
1833 out_err:
1834 	ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe04d,
1835 	    "qla_target(%d): PCI mapping failed: sg_cnt=%d",
1836 	    0, prm->cmd->sg_cnt);
1837 	return -1;
1838 }
1839 
1840 static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
1841 {
1842 	struct qla_hw_data *ha = vha->hw;
1843 
1844 	if (!cmd->sg_mapped)
1845 		return;
1846 
1847 	pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
1848 	cmd->sg_mapped = 0;
1849 
1850 	if (cmd->prot_sg_cnt)
1851 		pci_unmap_sg(ha->pdev, cmd->prot_sg, cmd->prot_sg_cnt,
1852 			cmd->dma_data_direction);
1853 
1854 	if (cmd->ctx_dsd_alloced)
1855 		qla2x00_clean_dsd_pool(ha, NULL, cmd);
1856 
1857 	if (cmd->ctx)
1858 		dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
1859 }
1860 
1861 static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
1862 	uint32_t req_cnt)
1863 {
1864 	uint32_t cnt, cnt_in;
1865 
1866 	if (vha->req->cnt < (req_cnt + 2)) {
1867 		cnt = (uint16_t)RD_REG_DWORD(vha->req->req_q_out);
1868 		cnt_in = (uint16_t)RD_REG_DWORD(vha->req->req_q_in);
1869 
1870 		if  (vha->req->ring_index < cnt)
1871 			vha->req->cnt = cnt - vha->req->ring_index;
1872 		else
1873 			vha->req->cnt = vha->req->length -
1874 			    (vha->req->ring_index - cnt);
1875 	}
1876 
1877 	if (unlikely(vha->req->cnt < (req_cnt + 2))) {
1878 		ql_dbg(ql_dbg_io, vha, 0x305a,
1879 		    "qla_target(%d): There is no room in the request ring: vha->req->ring_index=%d, vha->req->cnt=%d, req_cnt=%d Req-out=%d Req-in=%d Req-Length=%d\n",
1880 		    vha->vp_idx, vha->req->ring_index,
1881 		    vha->req->cnt, req_cnt, cnt, cnt_in, vha->req->length);
1882 		return -EAGAIN;
1883 	}
1884 	vha->req->cnt -= req_cnt;
1885 
1886 	return 0;
1887 }
1888 
1889 /*
1890  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1891  */
1892 static inline void *qlt_get_req_pkt(struct scsi_qla_host *vha)
1893 {
1894 	/* Adjust ring index. */
1895 	vha->req->ring_index++;
1896 	if (vha->req->ring_index == vha->req->length) {
1897 		vha->req->ring_index = 0;
1898 		vha->req->ring_ptr = vha->req->ring;
1899 	} else {
1900 		vha->req->ring_ptr++;
1901 	}
1902 	return (cont_entry_t *)vha->req->ring_ptr;
1903 }
1904 
1905 /* ha->hardware_lock supposed to be held on entry */
1906 static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha)
1907 {
1908 	struct qla_hw_data *ha = vha->hw;
1909 	uint32_t h;
1910 
1911 	h = ha->tgt.current_handle;
1912 	/* always increment cmd handle */
1913 	do {
1914 		++h;
1915 		if (h > DEFAULT_OUTSTANDING_COMMANDS)
1916 			h = 1; /* 0 is QLA_TGT_NULL_HANDLE */
1917 		if (h == ha->tgt.current_handle) {
1918 			ql_dbg(ql_dbg_io, vha, 0x305b,
1919 			    "qla_target(%d): Ran out of "
1920 			    "empty cmd slots in ha %p\n", vha->vp_idx, ha);
1921 			h = QLA_TGT_NULL_HANDLE;
1922 			break;
1923 		}
1924 	} while ((h == QLA_TGT_NULL_HANDLE) ||
1925 	    (h == QLA_TGT_SKIP_HANDLE) ||
1926 	    (ha->tgt.cmds[h-1] != NULL));
1927 
1928 	if (h != QLA_TGT_NULL_HANDLE)
1929 		ha->tgt.current_handle = h;
1930 
1931 	return h;
1932 }
1933 
1934 /* ha->hardware_lock supposed to be held on entry */
1935 static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
1936 	struct scsi_qla_host *vha)
1937 {
1938 	uint32_t h;
1939 	struct ctio7_to_24xx *pkt;
1940 	struct qla_hw_data *ha = vha->hw;
1941 	struct atio_from_isp *atio = &prm->cmd->atio;
1942 	uint16_t temp;
1943 
1944 	pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr;
1945 	prm->pkt = pkt;
1946 	memset(pkt, 0, sizeof(*pkt));
1947 
1948 	pkt->entry_type = CTIO_TYPE7;
1949 	pkt->entry_count = (uint8_t)prm->req_cnt;
1950 	pkt->vp_index = vha->vp_idx;
1951 
1952 	h = qlt_make_handle(vha);
1953 	if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
1954 		/*
1955 		 * CTIO type 7 from the firmware doesn't provide a way to
1956 		 * know the initiator's LOOP ID, hence we can't find
1957 		 * the session and, so, the command.
1958 		 */
1959 		return -EAGAIN;
1960 	} else
1961 		ha->tgt.cmds[h-1] = prm->cmd;
1962 
1963 	pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
1964 	pkt->nport_handle = prm->cmd->loop_id;
1965 	pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
1966 	pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
1967 	pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
1968 	pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
1969 	pkt->exchange_addr = atio->u.isp24.exchange_addr;
1970 	pkt->u.status0.flags |= (atio->u.isp24.attr << 9);
1971 	temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
1972 	pkt->u.status0.ox_id = cpu_to_le16(temp);
1973 	pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
1974 
1975 	return 0;
1976 }
1977 
1978 /*
1979  * ha->hardware_lock supposed to be held on entry. We have already made sure
1980  * that there is sufficient amount of request entries to not drop it.
1981  */
1982 static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm,
1983 	struct scsi_qla_host *vha)
1984 {
1985 	int cnt;
1986 	uint32_t *dword_ptr;
1987 	int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
1988 
1989 	/* Build continuation packets */
1990 	while (prm->seg_cnt > 0) {
1991 		cont_a64_entry_t *cont_pkt64 =
1992 			(cont_a64_entry_t *)qlt_get_req_pkt(vha);
1993 
1994 		/*
1995 		 * Make sure that from cont_pkt64 none of
1996 		 * 64-bit specific fields used for 32-bit
1997 		 * addressing. Cast to (cont_entry_t *) for
1998 		 * that.
1999 		 */
2000 
2001 		memset(cont_pkt64, 0, sizeof(*cont_pkt64));
2002 
2003 		cont_pkt64->entry_count = 1;
2004 		cont_pkt64->sys_define = 0;
2005 
2006 		if (enable_64bit_addressing) {
2007 			cont_pkt64->entry_type = CONTINUE_A64_TYPE;
2008 			dword_ptr =
2009 			    (uint32_t *)&cont_pkt64->dseg_0_address;
2010 		} else {
2011 			cont_pkt64->entry_type = CONTINUE_TYPE;
2012 			dword_ptr =
2013 			    (uint32_t *)&((cont_entry_t *)
2014 				cont_pkt64)->dseg_0_address;
2015 		}
2016 
2017 		/* Load continuation entry data segments */
2018 		for (cnt = 0;
2019 		    cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt;
2020 		    cnt++, prm->seg_cnt--) {
2021 			*dword_ptr++ =
2022 			    cpu_to_le32(pci_dma_lo32
2023 				(sg_dma_address(prm->sg)));
2024 			if (enable_64bit_addressing) {
2025 				*dword_ptr++ =
2026 				    cpu_to_le32(pci_dma_hi32
2027 					(sg_dma_address
2028 					(prm->sg)));
2029 			}
2030 			*dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
2031 
2032 			prm->sg = sg_next(prm->sg);
2033 		}
2034 	}
2035 }
2036 
2037 /*
2038  * ha->hardware_lock supposed to be held on entry. We have already made sure
2039  * that there is sufficient amount of request entries to not drop it.
2040  */
2041 static void qlt_load_data_segments(struct qla_tgt_prm *prm,
2042 	struct scsi_qla_host *vha)
2043 {
2044 	int cnt;
2045 	uint32_t *dword_ptr;
2046 	int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
2047 	struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
2048 
2049 	pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
2050 
2051 	/* Setup packet address segment pointer */
2052 	dword_ptr = pkt24->u.status0.dseg_0_address;
2053 
2054 	/* Set total data segment count */
2055 	if (prm->seg_cnt)
2056 		pkt24->dseg_count = cpu_to_le16(prm->seg_cnt);
2057 
2058 	if (prm->seg_cnt == 0) {
2059 		/* No data transfer */
2060 		*dword_ptr++ = 0;
2061 		*dword_ptr = 0;
2062 		return;
2063 	}
2064 
2065 	/* If scatter gather */
2066 
2067 	/* Load command entry data segments */
2068 	for (cnt = 0;
2069 	    (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt;
2070 	    cnt++, prm->seg_cnt--) {
2071 		*dword_ptr++ =
2072 		    cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg)));
2073 		if (enable_64bit_addressing) {
2074 			*dword_ptr++ =
2075 			    cpu_to_le32(pci_dma_hi32(
2076 				sg_dma_address(prm->sg)));
2077 		}
2078 		*dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
2079 
2080 		prm->sg = sg_next(prm->sg);
2081 	}
2082 
2083 	qlt_load_cont_data_segments(prm, vha);
2084 }
2085 
2086 static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
2087 {
2088 	return cmd->bufflen > 0;
2089 }
2090 
2091 /*
2092  * Called without ha->hardware_lock held
2093  */
2094 static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
2095 	struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status,
2096 	uint32_t *full_req_cnt)
2097 {
2098 	struct qla_tgt *tgt = cmd->tgt;
2099 	struct scsi_qla_host *vha = tgt->vha;
2100 	struct qla_hw_data *ha = vha->hw;
2101 	struct se_cmd *se_cmd = &cmd->se_cmd;
2102 
2103 	prm->cmd = cmd;
2104 	prm->tgt = tgt;
2105 	prm->rq_result = scsi_status;
2106 	prm->sense_buffer = &cmd->sense_buffer[0];
2107 	prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER;
2108 	prm->sg = NULL;
2109 	prm->seg_cnt = -1;
2110 	prm->req_cnt = 1;
2111 	prm->add_status_pkt = 0;
2112 
2113 	/* Send marker if required */
2114 	if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
2115 		return -EFAULT;
2116 
2117 	if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
2118 		if  (qlt_pci_map_calc_cnt(prm) != 0)
2119 			return -EAGAIN;
2120 	}
2121 
2122 	*full_req_cnt = prm->req_cnt;
2123 
2124 	if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
2125 		prm->residual = se_cmd->residual_count;
2126 		ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x305c,
2127 		    "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
2128 		       prm->residual, se_cmd->tag,
2129 		       se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
2130 		       cmd->bufflen, prm->rq_result);
2131 		prm->rq_result |= SS_RESIDUAL_UNDER;
2132 	} else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
2133 		prm->residual = se_cmd->residual_count;
2134 		ql_dbg(ql_dbg_io, vha, 0x305d,
2135 		    "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
2136 		       prm->residual, se_cmd->tag, se_cmd->t_task_cdb ?
2137 		       se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result);
2138 		prm->rq_result |= SS_RESIDUAL_OVER;
2139 	}
2140 
2141 	if (xmit_type & QLA_TGT_XMIT_STATUS) {
2142 		/*
2143 		 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
2144 		 * ignored in *xmit_response() below
2145 		 */
2146 		if (qlt_has_data(cmd)) {
2147 			if (QLA_TGT_SENSE_VALID(prm->sense_buffer) ||
2148 			    (IS_FWI2_CAPABLE(ha) &&
2149 			    (prm->rq_result != 0))) {
2150 				prm->add_status_pkt = 1;
2151 				(*full_req_cnt)++;
2152 			}
2153 		}
2154 	}
2155 
2156 	return 0;
2157 }
2158 
2159 static inline int qlt_need_explicit_conf(struct qla_hw_data *ha,
2160 	struct qla_tgt_cmd *cmd, int sending_sense)
2161 {
2162 	if (ha->tgt.enable_class_2)
2163 		return 0;
2164 
2165 	if (sending_sense)
2166 		return cmd->conf_compl_supported;
2167 	else
2168 		return ha->tgt.enable_explicit_conf &&
2169 		    cmd->conf_compl_supported;
2170 }
2171 
2172 #ifdef CONFIG_QLA_TGT_DEBUG_SRR
2173 /*
2174  *  Original taken from the XFS code
2175  */
2176 static unsigned long qlt_srr_random(void)
2177 {
2178 	static int Inited;
2179 	static unsigned long RandomValue;
2180 	static DEFINE_SPINLOCK(lock);
2181 	/* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
2182 	register long rv;
2183 	register long lo;
2184 	register long hi;
2185 	unsigned long flags;
2186 
2187 	spin_lock_irqsave(&lock, flags);
2188 	if (!Inited) {
2189 		RandomValue = jiffies;
2190 		Inited = 1;
2191 	}
2192 	rv = RandomValue;
2193 	hi = rv / 127773;
2194 	lo = rv % 127773;
2195 	rv = 16807 * lo - 2836 * hi;
2196 	if (rv <= 0)
2197 		rv += 2147483647;
2198 	RandomValue = rv;
2199 	spin_unlock_irqrestore(&lock, flags);
2200 	return rv;
2201 }
2202 
2203 static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
2204 {
2205 #if 0 /* This is not a real status packets lost, so it won't lead to SRR */
2206 	if ((*xmit_type & QLA_TGT_XMIT_STATUS) && (qlt_srr_random() % 200)
2207 	    == 50) {
2208 		*xmit_type &= ~QLA_TGT_XMIT_STATUS;
2209 		ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf015,
2210 		    "Dropping cmd %p (tag %d) status", cmd, se_cmd->tag);
2211 	}
2212 #endif
2213 	/*
2214 	 * It's currently not possible to simulate SRRs for FCP_WRITE without
2215 	 * a physical link layer failure, so don't even try here..
2216 	 */
2217 	if (cmd->dma_data_direction != DMA_FROM_DEVICE)
2218 		return;
2219 
2220 	if (qlt_has_data(cmd) && (cmd->sg_cnt > 1) &&
2221 	    ((qlt_srr_random() % 100) == 20)) {
2222 		int i, leave = 0;
2223 		unsigned int tot_len = 0;
2224 
2225 		while (leave == 0)
2226 			leave = qlt_srr_random() % cmd->sg_cnt;
2227 
2228 		for (i = 0; i < leave; i++)
2229 			tot_len += cmd->sg[i].length;
2230 
2231 		ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf016,
2232 		    "Cutting cmd %p (tag %d) buffer"
2233 		    " tail to len %d, sg_cnt %d (cmd->bufflen %d,"
2234 		    " cmd->sg_cnt %d)", cmd, se_cmd->tag, tot_len, leave,
2235 		    cmd->bufflen, cmd->sg_cnt);
2236 
2237 		cmd->bufflen = tot_len;
2238 		cmd->sg_cnt = leave;
2239 	}
2240 
2241 	if (qlt_has_data(cmd) && ((qlt_srr_random() % 100) == 70)) {
2242 		unsigned int offset = qlt_srr_random() % cmd->bufflen;
2243 
2244 		ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf017,
2245 		    "Cutting cmd %p (tag %d) buffer head "
2246 		    "to offset %d (cmd->bufflen %d)", cmd, se_cmd->tag, offset,
2247 		    cmd->bufflen);
2248 		if (offset == 0)
2249 			*xmit_type &= ~QLA_TGT_XMIT_DATA;
2250 		else if (qlt_set_data_offset(cmd, offset)) {
2251 			ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf018,
2252 			    "qlt_set_data_offset() failed (tag %d)", se_cmd->tag);
2253 		}
2254 	}
2255 }
2256 #else
2257 static inline void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
2258 {}
2259 #endif
2260 
2261 static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
2262 	struct qla_tgt_prm *prm)
2263 {
2264 	prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
2265 	    (uint32_t)sizeof(ctio->u.status1.sense_data));
2266 	ctio->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
2267 	if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) {
2268 		ctio->u.status0.flags |= cpu_to_le16(
2269 		    CTIO7_FLAGS_EXPLICIT_CONFORM |
2270 		    CTIO7_FLAGS_CONFORM_REQ);
2271 	}
2272 	ctio->u.status0.residual = cpu_to_le32(prm->residual);
2273 	ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result);
2274 	if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) {
2275 		int i;
2276 
2277 		if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) {
2278 			if (prm->cmd->se_cmd.scsi_status != 0) {
2279 				ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017,
2280 				    "Skipping EXPLICIT_CONFORM and "
2281 				    "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
2282 				    "non GOOD status\n");
2283 				goto skip_explict_conf;
2284 			}
2285 			ctio->u.status1.flags |= cpu_to_le16(
2286 			    CTIO7_FLAGS_EXPLICIT_CONFORM |
2287 			    CTIO7_FLAGS_CONFORM_REQ);
2288 		}
2289 skip_explict_conf:
2290 		ctio->u.status1.flags &=
2291 		    ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2292 		ctio->u.status1.flags |=
2293 		    cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2294 		ctio->u.status1.scsi_status |=
2295 		    cpu_to_le16(SS_SENSE_LEN_VALID);
2296 		ctio->u.status1.sense_length =
2297 		    cpu_to_le16(prm->sense_buffer_len);
2298 		for (i = 0; i < prm->sense_buffer_len/4; i++)
2299 			((uint32_t *)ctio->u.status1.sense_data)[i] =
2300 				cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
2301 #if 0
2302 		if (unlikely((prm->sense_buffer_len % 4) != 0)) {
2303 			static int q;
2304 			if (q < 10) {
2305 				ql_dbg(ql_dbg_tgt, vha, 0xe04f,
2306 				    "qla_target(%d): %d bytes of sense "
2307 				    "lost", prm->tgt->ha->vp_idx,
2308 				    prm->sense_buffer_len % 4);
2309 				q++;
2310 			}
2311 		}
2312 #endif
2313 	} else {
2314 		ctio->u.status1.flags &=
2315 		    ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2316 		ctio->u.status1.flags |=
2317 		    cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2318 		ctio->u.status1.sense_length = 0;
2319 		memset(ctio->u.status1.sense_data, 0,
2320 		    sizeof(ctio->u.status1.sense_data));
2321 	}
2322 
2323 	/* Sense with len > 24, is it possible ??? */
2324 }
2325 
2326 
2327 
2328 /* diff  */
2329 static inline int
2330 qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
2331 {
2332 	/*
2333 	 * Uncomment when corresponding SCSI changes are done.
2334 	 *
2335 	 if (!sp->cmd->prot_chk)
2336 	 return 0;
2337 	 *
2338 	 */
2339 	switch (se_cmd->prot_op) {
2340 	case TARGET_PROT_DOUT_INSERT:
2341 	case TARGET_PROT_DIN_STRIP:
2342 		if (ql2xenablehba_err_chk >= 1)
2343 			return 1;
2344 		break;
2345 	case TARGET_PROT_DOUT_PASS:
2346 	case TARGET_PROT_DIN_PASS:
2347 		if (ql2xenablehba_err_chk >= 2)
2348 			return 1;
2349 		break;
2350 	case TARGET_PROT_DIN_INSERT:
2351 	case TARGET_PROT_DOUT_STRIP:
2352 		return 1;
2353 	default:
2354 		break;
2355 	}
2356 	return 0;
2357 }
2358 
2359 /*
2360  * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
2361  *
2362  */
2363 static inline void
2364 qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx)
2365 {
2366 	uint32_t lba = 0xffffffff & se_cmd->t_task_lba;
2367 
2368 	/* wait til Mode Sense/Select cmd, modepage Ah, subpage 2
2369 	 * have been immplemented by TCM, before AppTag is avail.
2370 	 * Look for modesense_handlers[]
2371 	 */
2372 	ctx->app_tag = 0;
2373 	ctx->app_tag_mask[0] = 0x0;
2374 	ctx->app_tag_mask[1] = 0x0;
2375 
2376 	switch (se_cmd->prot_type) {
2377 	case TARGET_DIF_TYPE0_PROT:
2378 		/*
2379 		 * No check for ql2xenablehba_err_chk, as it would be an
2380 		 * I/O error if hba tag generation is not done.
2381 		 */
2382 		ctx->ref_tag = cpu_to_le32(lba);
2383 
2384 		if (!qlt_hba_err_chk_enabled(se_cmd))
2385 			break;
2386 
2387 		/* enable ALL bytes of the ref tag */
2388 		ctx->ref_tag_mask[0] = 0xff;
2389 		ctx->ref_tag_mask[1] = 0xff;
2390 		ctx->ref_tag_mask[2] = 0xff;
2391 		ctx->ref_tag_mask[3] = 0xff;
2392 		break;
2393 	/*
2394 	 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
2395 	 * 16 bit app tag.
2396 	 */
2397 	case TARGET_DIF_TYPE1_PROT:
2398 		ctx->ref_tag = cpu_to_le32(lba);
2399 
2400 		if (!qlt_hba_err_chk_enabled(se_cmd))
2401 			break;
2402 
2403 		/* enable ALL bytes of the ref tag */
2404 		ctx->ref_tag_mask[0] = 0xff;
2405 		ctx->ref_tag_mask[1] = 0xff;
2406 		ctx->ref_tag_mask[2] = 0xff;
2407 		ctx->ref_tag_mask[3] = 0xff;
2408 		break;
2409 	/*
2410 	 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
2411 	 * match LBA in CDB + N
2412 	 */
2413 	case TARGET_DIF_TYPE2_PROT:
2414 		ctx->ref_tag = cpu_to_le32(lba);
2415 
2416 		if (!qlt_hba_err_chk_enabled(se_cmd))
2417 			break;
2418 
2419 		/* enable ALL bytes of the ref tag */
2420 		ctx->ref_tag_mask[0] = 0xff;
2421 		ctx->ref_tag_mask[1] = 0xff;
2422 		ctx->ref_tag_mask[2] = 0xff;
2423 		ctx->ref_tag_mask[3] = 0xff;
2424 		break;
2425 
2426 	/* For Type 3 protection: 16 bit GUARD only */
2427 	case TARGET_DIF_TYPE3_PROT:
2428 		ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
2429 			ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
2430 		break;
2431 	}
2432 }
2433 
2434 
2435 static inline int
2436 qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
2437 {
2438 	uint32_t		*cur_dsd;
2439 	uint32_t		transfer_length = 0;
2440 	uint32_t		data_bytes;
2441 	uint32_t		dif_bytes;
2442 	uint8_t			bundling = 1;
2443 	uint8_t			*clr_ptr;
2444 	struct crc_context	*crc_ctx_pkt = NULL;
2445 	struct qla_hw_data	*ha;
2446 	struct ctio_crc2_to_fw	*pkt;
2447 	dma_addr_t		crc_ctx_dma;
2448 	uint16_t		fw_prot_opts = 0;
2449 	struct qla_tgt_cmd	*cmd = prm->cmd;
2450 	struct se_cmd		*se_cmd = &cmd->se_cmd;
2451 	uint32_t h;
2452 	struct atio_from_isp *atio = &prm->cmd->atio;
2453 	uint16_t t16;
2454 
2455 	ha = vha->hw;
2456 
2457 	pkt = (struct ctio_crc2_to_fw *)vha->req->ring_ptr;
2458 	prm->pkt = pkt;
2459 	memset(pkt, 0, sizeof(*pkt));
2460 
2461 	ql_dbg(ql_dbg_tgt, vha, 0xe071,
2462 		"qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n",
2463 		vha->vp_idx, __func__, se_cmd, se_cmd->prot_op,
2464 		prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba);
2465 
2466 	if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) ||
2467 	    (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP))
2468 		bundling = 0;
2469 
2470 	/* Compute dif len and adjust data len to incude protection */
2471 	data_bytes = cmd->bufflen;
2472 	dif_bytes  = (data_bytes / cmd->blk_sz) * 8;
2473 
2474 	switch (se_cmd->prot_op) {
2475 	case TARGET_PROT_DIN_INSERT:
2476 	case TARGET_PROT_DOUT_STRIP:
2477 		transfer_length = data_bytes;
2478 		data_bytes += dif_bytes;
2479 		break;
2480 
2481 	case TARGET_PROT_DIN_STRIP:
2482 	case TARGET_PROT_DOUT_INSERT:
2483 	case TARGET_PROT_DIN_PASS:
2484 	case TARGET_PROT_DOUT_PASS:
2485 		transfer_length = data_bytes + dif_bytes;
2486 		break;
2487 
2488 	default:
2489 		BUG();
2490 		break;
2491 	}
2492 
2493 	if (!qlt_hba_err_chk_enabled(se_cmd))
2494 		fw_prot_opts |= 0x10; /* Disable Guard tag checking */
2495 	/* HBA error checking enabled */
2496 	else if (IS_PI_UNINIT_CAPABLE(ha)) {
2497 		if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
2498 		    (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
2499 			fw_prot_opts |= PO_DIS_VALD_APP_ESC;
2500 		else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
2501 			fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
2502 	}
2503 
2504 	switch (se_cmd->prot_op) {
2505 	case TARGET_PROT_DIN_INSERT:
2506 	case TARGET_PROT_DOUT_INSERT:
2507 		fw_prot_opts |= PO_MODE_DIF_INSERT;
2508 		break;
2509 	case TARGET_PROT_DIN_STRIP:
2510 	case TARGET_PROT_DOUT_STRIP:
2511 		fw_prot_opts |= PO_MODE_DIF_REMOVE;
2512 		break;
2513 	case TARGET_PROT_DIN_PASS:
2514 	case TARGET_PROT_DOUT_PASS:
2515 		fw_prot_opts |= PO_MODE_DIF_PASS;
2516 		/* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */
2517 		break;
2518 	default:/* Normal Request */
2519 		fw_prot_opts |= PO_MODE_DIF_PASS;
2520 		break;
2521 	}
2522 
2523 
2524 	/* ---- PKT ---- */
2525 	/* Update entry type to indicate Command Type CRC_2 IOCB */
2526 	pkt->entry_type  = CTIO_CRC2;
2527 	pkt->entry_count = 1;
2528 	pkt->vp_index = vha->vp_idx;
2529 
2530 	h = qlt_make_handle(vha);
2531 	if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
2532 		/*
2533 		 * CTIO type 7 from the firmware doesn't provide a way to
2534 		 * know the initiator's LOOP ID, hence we can't find
2535 		 * the session and, so, the command.
2536 		 */
2537 		return -EAGAIN;
2538 	} else
2539 		ha->tgt.cmds[h-1] = prm->cmd;
2540 
2541 
2542 	pkt->handle  = h | CTIO_COMPLETION_HANDLE_MARK;
2543 	pkt->nport_handle = prm->cmd->loop_id;
2544 	pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2545 	pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
2546 	pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
2547 	pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
2548 	pkt->exchange_addr   = atio->u.isp24.exchange_addr;
2549 
2550 	/* silence compile warning */
2551 	t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2552 	pkt->ox_id  = cpu_to_le16(t16);
2553 
2554 	t16 = (atio->u.isp24.attr << 9);
2555 	pkt->flags |= cpu_to_le16(t16);
2556 	pkt->relative_offset = cpu_to_le32(prm->cmd->offset);
2557 
2558 	/* Set transfer direction */
2559 	if (cmd->dma_data_direction == DMA_TO_DEVICE)
2560 		pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_IN);
2561 	else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
2562 		pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
2563 
2564 
2565 	pkt->dseg_count = prm->tot_dsds;
2566 	/* Fibre channel byte count */
2567 	pkt->transfer_length = cpu_to_le32(transfer_length);
2568 
2569 
2570 	/* ----- CRC context -------- */
2571 
2572 	/* Allocate CRC context from global pool */
2573 	crc_ctx_pkt = cmd->ctx =
2574 	    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
2575 
2576 	if (!crc_ctx_pkt)
2577 		goto crc_queuing_error;
2578 
2579 	/* Zero out CTX area. */
2580 	clr_ptr = (uint8_t *)crc_ctx_pkt;
2581 	memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
2582 
2583 	crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
2584 	INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
2585 
2586 	/* Set handle */
2587 	crc_ctx_pkt->handle = pkt->handle;
2588 
2589 	qlt_set_t10dif_tags(se_cmd, crc_ctx_pkt);
2590 
2591 	pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
2592 	pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
2593 	pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
2594 
2595 
2596 	if (!bundling) {
2597 		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
2598 	} else {
2599 		/*
2600 		 * Configure Bundling if we need to fetch interlaving
2601 		 * protection PCI accesses
2602 		 */
2603 		fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
2604 		crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
2605 		crc_ctx_pkt->u.bundling.dseg_count =
2606 			cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt);
2607 		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
2608 	}
2609 
2610 	/* Finish the common fields of CRC pkt */
2611 	crc_ctx_pkt->blk_size   = cpu_to_le16(cmd->blk_sz);
2612 	crc_ctx_pkt->prot_opts  = cpu_to_le16(fw_prot_opts);
2613 	crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
2614 	crc_ctx_pkt->guard_seed = cpu_to_le16(0);
2615 
2616 
2617 	/* Walks data segments */
2618 	pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
2619 
2620 	if (!bundling && prm->prot_seg_cnt) {
2621 		if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
2622 			prm->tot_dsds, cmd))
2623 			goto crc_queuing_error;
2624 	} else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd,
2625 		(prm->tot_dsds - prm->prot_seg_cnt), cmd))
2626 		goto crc_queuing_error;
2627 
2628 	if (bundling && prm->prot_seg_cnt) {
2629 		/* Walks dif segments */
2630 		pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA;
2631 
2632 		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
2633 		if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
2634 			prm->prot_seg_cnt, cmd))
2635 			goto crc_queuing_error;
2636 	}
2637 	return QLA_SUCCESS;
2638 
2639 crc_queuing_error:
2640 	/* Cleanup will be performed by the caller */
2641 
2642 	return QLA_FUNCTION_FAILED;
2643 }
2644 
2645 
2646 /*
2647  * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
2648  * QLA_TGT_XMIT_STATUS for >= 24xx silicon
2649  */
2650 int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2651 	uint8_t scsi_status)
2652 {
2653 	struct scsi_qla_host *vha = cmd->vha;
2654 	struct qla_hw_data *ha = vha->hw;
2655 	struct ctio7_to_24xx *pkt;
2656 	struct qla_tgt_prm prm;
2657 	uint32_t full_req_cnt = 0;
2658 	unsigned long flags = 0;
2659 	int res;
2660 
2661 	spin_lock_irqsave(&ha->hardware_lock, flags);
2662 	if (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
2663 		cmd->state = QLA_TGT_STATE_PROCESSED;
2664 		if (cmd->sess->logout_completed)
2665 			/* no need to terminate. FW already freed exchange. */
2666 			qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
2667 		else
2668 			qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0);
2669 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
2670 		return 0;
2671 	}
2672 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2673 
2674 	memset(&prm, 0, sizeof(prm));
2675 	qlt_check_srr_debug(cmd, &xmit_type);
2676 
2677 	ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018,
2678 	    "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p]\n",
2679 	    (xmit_type & QLA_TGT_XMIT_STATUS) ?
2680 	    1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction,
2681 	    &cmd->se_cmd);
2682 
2683 	res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
2684 	    &full_req_cnt);
2685 	if (unlikely(res != 0)) {
2686 		return res;
2687 	}
2688 
2689 	spin_lock_irqsave(&ha->hardware_lock, flags);
2690 
2691 	if (xmit_type == QLA_TGT_XMIT_STATUS)
2692 		vha->tgt_counters.core_qla_snd_status++;
2693 	else
2694 		vha->tgt_counters.core_qla_que_buf++;
2695 
2696 	if (!vha->flags.online || cmd->reset_count != ha->chip_reset) {
2697 		/*
2698 		 * Either the port is not online or this request was from
2699 		 * previous life, just abort the processing.
2700 		 */
2701 		cmd->state = QLA_TGT_STATE_PROCESSED;
2702 		qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
2703 		ql_dbg(ql_dbg_async, vha, 0xe101,
2704 			"RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
2705 			vha->flags.online, qla2x00_reset_active(vha),
2706 			cmd->reset_count, ha->chip_reset);
2707 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
2708 		return 0;
2709 	}
2710 
2711 	/* Does F/W have an IOCBs for this request */
2712 	res = qlt_check_reserve_free_req(vha, full_req_cnt);
2713 	if (unlikely(res))
2714 		goto out_unmap_unlock;
2715 
2716 	if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA))
2717 		res = qlt_build_ctio_crc2_pkt(&prm, vha);
2718 	else
2719 		res = qlt_24xx_build_ctio_pkt(&prm, vha);
2720 	if (unlikely(res != 0)) {
2721 		vha->req->cnt += full_req_cnt;
2722 		goto out_unmap_unlock;
2723 	}
2724 
2725 	pkt = (struct ctio7_to_24xx *)prm.pkt;
2726 
2727 	if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
2728 		pkt->u.status0.flags |=
2729 		    cpu_to_le16(CTIO7_FLAGS_DATA_IN |
2730 			CTIO7_FLAGS_STATUS_MODE_0);
2731 
2732 		if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
2733 			qlt_load_data_segments(&prm, vha);
2734 
2735 		if (prm.add_status_pkt == 0) {
2736 			if (xmit_type & QLA_TGT_XMIT_STATUS) {
2737 				pkt->u.status0.scsi_status =
2738 				    cpu_to_le16(prm.rq_result);
2739 				pkt->u.status0.residual =
2740 				    cpu_to_le32(prm.residual);
2741 				pkt->u.status0.flags |= cpu_to_le16(
2742 				    CTIO7_FLAGS_SEND_STATUS);
2743 				if (qlt_need_explicit_conf(ha, cmd, 0)) {
2744 					pkt->u.status0.flags |=
2745 					    cpu_to_le16(
2746 						CTIO7_FLAGS_EXPLICIT_CONFORM |
2747 						CTIO7_FLAGS_CONFORM_REQ);
2748 				}
2749 			}
2750 
2751 		} else {
2752 			/*
2753 			 * We have already made sure that there is sufficient
2754 			 * amount of request entries to not drop HW lock in
2755 			 * req_pkt().
2756 			 */
2757 			struct ctio7_to_24xx *ctio =
2758 				(struct ctio7_to_24xx *)qlt_get_req_pkt(vha);
2759 
2760 			ql_dbg(ql_dbg_io, vha, 0x305e,
2761 			    "Building additional status packet 0x%p.\n",
2762 			    ctio);
2763 
2764 			/*
2765 			 * T10Dif: ctio_crc2_to_fw overlay ontop of
2766 			 * ctio7_to_24xx
2767 			 */
2768 			memcpy(ctio, pkt, sizeof(*ctio));
2769 			/* reset back to CTIO7 */
2770 			ctio->entry_count = 1;
2771 			ctio->entry_type = CTIO_TYPE7;
2772 			ctio->dseg_count = 0;
2773 			ctio->u.status1.flags &= ~cpu_to_le16(
2774 			    CTIO7_FLAGS_DATA_IN);
2775 
2776 			/* Real finish is ctio_m1's finish */
2777 			pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
2778 			pkt->u.status0.flags |= cpu_to_le16(
2779 			    CTIO7_FLAGS_DONT_RET_CTIO);
2780 
2781 			/* qlt_24xx_init_ctio_to_isp will correct
2782 			 * all neccessary fields that's part of CTIO7.
2783 			 * There should be no residual of CTIO-CRC2 data.
2784 			 */
2785 			qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
2786 			    &prm);
2787 			pr_debug("Status CTIO7: %p\n", ctio);
2788 		}
2789 	} else
2790 		qlt_24xx_init_ctio_to_isp(pkt, &prm);
2791 
2792 
2793 	cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
2794 	cmd->cmd_sent_to_fw = 1;
2795 
2796 	/* Memory Barrier */
2797 	wmb();
2798 	qla2x00_start_iocbs(vha, vha->req);
2799 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2800 
2801 	return 0;
2802 
2803 out_unmap_unlock:
2804 	qlt_unmap_sg(vha, cmd);
2805 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2806 
2807 	return res;
2808 }
2809 EXPORT_SYMBOL(qlt_xmit_response);
2810 
2811 int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
2812 {
2813 	struct ctio7_to_24xx *pkt;
2814 	struct scsi_qla_host *vha = cmd->vha;
2815 	struct qla_hw_data *ha = vha->hw;
2816 	struct qla_tgt *tgt = cmd->tgt;
2817 	struct qla_tgt_prm prm;
2818 	unsigned long flags;
2819 	int res = 0;
2820 
2821 	memset(&prm, 0, sizeof(prm));
2822 	prm.cmd = cmd;
2823 	prm.tgt = tgt;
2824 	prm.sg = NULL;
2825 	prm.req_cnt = 1;
2826 
2827 	/* Send marker if required */
2828 	if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
2829 		return -EIO;
2830 
2831 	/* Calculate number of entries and segments required */
2832 	if (qlt_pci_map_calc_cnt(&prm) != 0)
2833 		return -EAGAIN;
2834 
2835 	spin_lock_irqsave(&ha->hardware_lock, flags);
2836 
2837 	if (!vha->flags.online || (cmd->reset_count != ha->chip_reset) ||
2838 	    (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)) {
2839 		/*
2840 		 * Either the port is not online or this request was from
2841 		 * previous life, just abort the processing.
2842 		 */
2843 		cmd->state = QLA_TGT_STATE_NEED_DATA;
2844 		qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
2845 		ql_dbg(ql_dbg_async, vha, 0xe102,
2846 			"RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n",
2847 			vha->flags.online, qla2x00_reset_active(vha),
2848 			cmd->reset_count, ha->chip_reset);
2849 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
2850 		return 0;
2851 	}
2852 
2853 	/* Does F/W have an IOCBs for this request */
2854 	res = qlt_check_reserve_free_req(vha, prm.req_cnt);
2855 	if (res != 0)
2856 		goto out_unlock_free_unmap;
2857 	if (cmd->se_cmd.prot_op)
2858 		res = qlt_build_ctio_crc2_pkt(&prm, vha);
2859 	else
2860 		res = qlt_24xx_build_ctio_pkt(&prm, vha);
2861 
2862 	if (unlikely(res != 0)) {
2863 		vha->req->cnt += prm.req_cnt;
2864 		goto out_unlock_free_unmap;
2865 	}
2866 
2867 	pkt = (struct ctio7_to_24xx *)prm.pkt;
2868 	pkt->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
2869 	    CTIO7_FLAGS_STATUS_MODE_0);
2870 
2871 	if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
2872 		qlt_load_data_segments(&prm, vha);
2873 
2874 	cmd->state = QLA_TGT_STATE_NEED_DATA;
2875 	cmd->cmd_sent_to_fw = 1;
2876 
2877 	/* Memory Barrier */
2878 	wmb();
2879 	qla2x00_start_iocbs(vha, vha->req);
2880 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2881 
2882 	return res;
2883 
2884 out_unlock_free_unmap:
2885 	qlt_unmap_sg(vha, cmd);
2886 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2887 
2888 	return res;
2889 }
2890 EXPORT_SYMBOL(qlt_rdy_to_xfer);
2891 
2892 
2893 /*
2894  * Checks the guard or meta-data for the type of error
2895  * detected by the HBA.
2896  */
2897 static inline int
2898 qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
2899 		struct ctio_crc_from_fw *sts)
2900 {
2901 	uint8_t		*ap = &sts->actual_dif[0];
2902 	uint8_t		*ep = &sts->expected_dif[0];
2903 	uint32_t	e_ref_tag, a_ref_tag;
2904 	uint16_t	e_app_tag, a_app_tag;
2905 	uint16_t	e_guard, a_guard;
2906 	uint64_t	lba = cmd->se_cmd.t_task_lba;
2907 
2908 	a_guard   = be16_to_cpu(*(uint16_t *)(ap + 0));
2909 	a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2));
2910 	a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4));
2911 
2912 	e_guard   = be16_to_cpu(*(uint16_t *)(ep + 0));
2913 	e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2));
2914 	e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4));
2915 
2916 	ql_dbg(ql_dbg_tgt, vha, 0xe075,
2917 	    "iocb(s) %p Returned STATUS.\n", sts);
2918 
2919 	ql_dbg(ql_dbg_tgt, vha, 0xf075,
2920 	    "dif check TGT cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n",
2921 	    cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
2922 	    a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard);
2923 
2924 	/*
2925 	 * Ignore sector if:
2926 	 * For type     3: ref & app tag is all 'f's
2927 	 * For type 0,1,2: app tag is all 'f's
2928 	 */
2929 	if ((a_app_tag == 0xffff) &&
2930 	    ((cmd->se_cmd.prot_type != TARGET_DIF_TYPE3_PROT) ||
2931 	     (a_ref_tag == 0xffffffff))) {
2932 		uint32_t blocks_done;
2933 
2934 		/* 2TB boundary case covered automatically with this */
2935 		blocks_done = e_ref_tag - (uint32_t)lba + 1;
2936 		cmd->se_cmd.bad_sector = e_ref_tag;
2937 		cmd->se_cmd.pi_err = 0;
2938 		ql_dbg(ql_dbg_tgt, vha, 0xf074,
2939 			"need to return scsi good\n");
2940 
2941 		/* Update protection tag */
2942 		if (cmd->prot_sg_cnt) {
2943 			uint32_t i, k = 0, num_ent;
2944 			struct scatterlist *sg, *sgl;
2945 
2946 
2947 			sgl = cmd->prot_sg;
2948 
2949 			/* Patch the corresponding protection tags */
2950 			for_each_sg(sgl, sg, cmd->prot_sg_cnt, i) {
2951 				num_ent = sg_dma_len(sg) / 8;
2952 				if (k + num_ent < blocks_done) {
2953 					k += num_ent;
2954 					continue;
2955 				}
2956 				k = blocks_done;
2957 				break;
2958 			}
2959 
2960 			if (k != blocks_done) {
2961 				ql_log(ql_log_warn, vha, 0xf076,
2962 				    "unexpected tag values tag:lba=%u:%llu)\n",
2963 				    e_ref_tag, (unsigned long long)lba);
2964 				goto out;
2965 			}
2966 
2967 #if 0
2968 			struct sd_dif_tuple *spt;
2969 			/* TODO:
2970 			 * This section came from initiator. Is it valid here?
2971 			 * should ulp be override with actual val???
2972 			 */
2973 			spt = page_address(sg_page(sg)) + sg->offset;
2974 			spt += j;
2975 
2976 			spt->app_tag = 0xffff;
2977 			if (cmd->se_cmd.prot_type == SCSI_PROT_DIF_TYPE3)
2978 				spt->ref_tag = 0xffffffff;
2979 #endif
2980 		}
2981 
2982 		return 0;
2983 	}
2984 
2985 	/* check guard */
2986 	if (e_guard != a_guard) {
2987 		cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
2988 		cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba;
2989 
2990 		ql_log(ql_log_warn, vha, 0xe076,
2991 		    "Guard ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
2992 		    cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
2993 		    a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
2994 		    a_guard, e_guard, cmd);
2995 		goto out;
2996 	}
2997 
2998 	/* check ref tag */
2999 	if (e_ref_tag != a_ref_tag) {
3000 		cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
3001 		cmd->se_cmd.bad_sector = e_ref_tag;
3002 
3003 		ql_log(ql_log_warn, vha, 0xe077,
3004 			"Ref Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
3005 			cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
3006 			a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
3007 			a_guard, e_guard, cmd);
3008 		goto out;
3009 	}
3010 
3011 	/* check appl tag */
3012 	if (e_app_tag != a_app_tag) {
3013 		cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
3014 		cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba;
3015 
3016 		ql_log(ql_log_warn, vha, 0xe078,
3017 			"App Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
3018 			cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
3019 			a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
3020 			a_guard, e_guard, cmd);
3021 		goto out;
3022 	}
3023 out:
3024 	return 1;
3025 }
3026 
3027 
3028 /* If hardware_lock held on entry, might drop it, then reaquire */
3029 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
3030 static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
3031 	struct imm_ntfy_from_isp *ntfy)
3032 {
3033 	struct nack_to_isp *nack;
3034 	struct qla_hw_data *ha = vha->hw;
3035 	request_t *pkt;
3036 	int ret = 0;
3037 
3038 	ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
3039 	    "Sending TERM ELS CTIO (ha=%p)\n", ha);
3040 
3041 	pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL);
3042 	if (pkt == NULL) {
3043 		ql_dbg(ql_dbg_tgt, vha, 0xe080,
3044 		    "qla_target(%d): %s failed: unable to allocate "
3045 		    "request packet\n", vha->vp_idx, __func__);
3046 		return -ENOMEM;
3047 	}
3048 
3049 	pkt->entry_type = NOTIFY_ACK_TYPE;
3050 	pkt->entry_count = 1;
3051 	pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
3052 
3053 	nack = (struct nack_to_isp *)pkt;
3054 	nack->ox_id = ntfy->ox_id;
3055 
3056 	nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3057 	if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3058 		nack->u.isp24.flags = ntfy->u.isp24.flags &
3059 			__constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
3060 	}
3061 
3062 	/* terminate */
3063 	nack->u.isp24.flags |=
3064 		__constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE);
3065 
3066 	nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3067 	nack->u.isp24.status = ntfy->u.isp24.status;
3068 	nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3069 	nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3070 	nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3071 	nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3072 	nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3073 	nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3074 
3075 	qla2x00_start_iocbs(vha, vha->req);
3076 	return ret;
3077 }
3078 
3079 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
3080 	struct imm_ntfy_from_isp *imm, int ha_locked)
3081 {
3082 	unsigned long flags = 0;
3083 	int rc;
3084 
3085 	if (qlt_issue_marker(vha, ha_locked) < 0)
3086 		return;
3087 
3088 	if (ha_locked) {
3089 		rc = __qlt_send_term_imm_notif(vha, imm);
3090 
3091 #if 0	/* Todo  */
3092 		if (rc == -ENOMEM)
3093 			qlt_alloc_qfull_cmd(vha, imm, 0, 0);
3094 #endif
3095 		goto done;
3096 	}
3097 
3098 	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
3099 	rc = __qlt_send_term_imm_notif(vha, imm);
3100 
3101 #if 0	/* Todo */
3102 	if (rc == -ENOMEM)
3103 		qlt_alloc_qfull_cmd(vha, imm, 0, 0);
3104 #endif
3105 
3106 done:
3107 	if (!ha_locked)
3108 		spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
3109 }
3110 
3111 /* If hardware_lock held on entry, might drop it, then reaquire */
3112 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
3113 static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
3114 	struct qla_tgt_cmd *cmd,
3115 	struct atio_from_isp *atio)
3116 {
3117 	struct ctio7_to_24xx *ctio24;
3118 	struct qla_hw_data *ha = vha->hw;
3119 	request_t *pkt;
3120 	int ret = 0;
3121 	uint16_t temp;
3122 
3123 	ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
3124 
3125 	pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL);
3126 	if (pkt == NULL) {
3127 		ql_dbg(ql_dbg_tgt, vha, 0xe050,
3128 		    "qla_target(%d): %s failed: unable to allocate "
3129 		    "request packet\n", vha->vp_idx, __func__);
3130 		return -ENOMEM;
3131 	}
3132 
3133 	if (cmd != NULL) {
3134 		if (cmd->state < QLA_TGT_STATE_PROCESSED) {
3135 			ql_dbg(ql_dbg_tgt, vha, 0xe051,
3136 			    "qla_target(%d): Terminating cmd %p with "
3137 			    "incorrect state %d\n", vha->vp_idx, cmd,
3138 			    cmd->state);
3139 		} else
3140 			ret = 1;
3141 	}
3142 
3143 	vha->tgt_counters.num_term_xchg_sent++;
3144 	pkt->entry_count = 1;
3145 	pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
3146 
3147 	ctio24 = (struct ctio7_to_24xx *)pkt;
3148 	ctio24->entry_type = CTIO_TYPE7;
3149 	ctio24->nport_handle = CTIO7_NHANDLE_UNRECOGNIZED;
3150 	ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
3151 	ctio24->vp_index = vha->vp_idx;
3152 	ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
3153 	ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
3154 	ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
3155 	ctio24->exchange_addr = atio->u.isp24.exchange_addr;
3156 	ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
3157 	    cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
3158 		CTIO7_FLAGS_TERMINATE);
3159 	temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
3160 	ctio24->u.status1.ox_id = cpu_to_le16(temp);
3161 
3162 	/* Most likely, it isn't needed */
3163 	ctio24->u.status1.residual = get_unaligned((uint32_t *)
3164 	    &atio->u.isp24.fcp_cmnd.add_cdb[
3165 	    atio->u.isp24.fcp_cmnd.add_cdb_len]);
3166 	if (ctio24->u.status1.residual != 0)
3167 		ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
3168 
3169 	/* Memory Barrier */
3170 	wmb();
3171 	qla2x00_start_iocbs(vha, vha->req);
3172 	return ret;
3173 }
3174 
3175 static void qlt_send_term_exchange(struct scsi_qla_host *vha,
3176 	struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked,
3177 	int ul_abort)
3178 {
3179 	unsigned long flags = 0;
3180 	int rc;
3181 
3182 	if (qlt_issue_marker(vha, ha_locked) < 0)
3183 		return;
3184 
3185 	if (ha_locked) {
3186 		rc = __qlt_send_term_exchange(vha, cmd, atio);
3187 		if (rc == -ENOMEM)
3188 			qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3189 		goto done;
3190 	}
3191 	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
3192 	rc = __qlt_send_term_exchange(vha, cmd, atio);
3193 	if (rc == -ENOMEM)
3194 		qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3195 
3196 done:
3197 	if (cmd && !ul_abort && !cmd->aborted) {
3198 		if (cmd->sg_mapped)
3199 			qlt_unmap_sg(vha, cmd);
3200 		vha->hw->tgt.tgt_ops->free_cmd(cmd);
3201 	}
3202 
3203 	if (!ha_locked)
3204 		spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
3205 
3206 	return;
3207 }
3208 
3209 static void qlt_init_term_exchange(struct scsi_qla_host *vha)
3210 {
3211 	struct list_head free_list;
3212 	struct qla_tgt_cmd *cmd, *tcmd;
3213 
3214 	vha->hw->tgt.leak_exchg_thresh_hold =
3215 	    (vha->hw->cur_fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT;
3216 
3217 	cmd = tcmd = NULL;
3218 	if (!list_empty(&vha->hw->tgt.q_full_list)) {
3219 		INIT_LIST_HEAD(&free_list);
3220 		list_splice_init(&vha->hw->tgt.q_full_list, &free_list);
3221 
3222 		list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
3223 			list_del(&cmd->cmd_list);
3224 			/* This cmd was never sent to TCM.  There is no need
3225 			 * to schedule free or call free_cmd
3226 			 */
3227 			qlt_free_cmd(cmd);
3228 			vha->hw->tgt.num_qfull_cmds_alloc--;
3229 		}
3230 	}
3231 	vha->hw->tgt.num_qfull_cmds_dropped = 0;
3232 }
3233 
3234 static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
3235 {
3236 	uint32_t total_leaked;
3237 
3238 	total_leaked = vha->hw->tgt.num_qfull_cmds_dropped;
3239 
3240 	if (vha->hw->tgt.leak_exchg_thresh_hold &&
3241 	    (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) {
3242 
3243 		ql_dbg(ql_dbg_tgt, vha, 0xe079,
3244 		    "Chip reset due to exchange starvation: %d/%d.\n",
3245 		    total_leaked, vha->hw->cur_fw_xcb_count);
3246 
3247 		if (IS_P3P_TYPE(vha->hw))
3248 			set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
3249 		else
3250 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3251 		qla2xxx_wake_dpc(vha);
3252 	}
3253 
3254 }
3255 
3256 int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
3257 {
3258 	struct qla_tgt *tgt = cmd->tgt;
3259 	struct scsi_qla_host *vha = tgt->vha;
3260 	struct se_cmd *se_cmd = &cmd->se_cmd;
3261 	unsigned long flags;
3262 
3263 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
3264 	    "qla_target(%d): terminating exchange for aborted cmd=%p "
3265 	    "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
3266 	    se_cmd->tag);
3267 
3268 	spin_lock_irqsave(&cmd->cmd_lock, flags);
3269 	if (cmd->aborted) {
3270 		spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3271 		/*
3272 		 * It's normal to see 2 calls in this path:
3273 		 *  1) XFER Rdy completion + CMD_T_ABORT
3274 		 *  2) TCM TMR - drain_state_list
3275 		 */
3276 	        ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
3277 			"multiple abort. %p transport_state %x, t_state %x,"
3278 			" se_cmd_flags %x \n", cmd, cmd->se_cmd.transport_state,
3279 			cmd->se_cmd.t_state,cmd->se_cmd.se_cmd_flags);
3280 		return EIO;
3281 	}
3282 	cmd->aborted = 1;
3283 	cmd->cmd_flags |= BIT_6;
3284 	spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3285 
3286 	qlt_send_term_exchange(vha, cmd, &cmd->atio, 0, 1);
3287 	return 0;
3288 }
3289 EXPORT_SYMBOL(qlt_abort_cmd);
3290 
3291 void qlt_free_cmd(struct qla_tgt_cmd *cmd)
3292 {
3293 	struct qla_tgt_sess *sess = cmd->sess;
3294 
3295 	ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074,
3296 	    "%s: se_cmd[%p] ox_id %04x\n",
3297 	    __func__, &cmd->se_cmd,
3298 	    be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
3299 
3300 	BUG_ON(cmd->cmd_in_wq);
3301 
3302 	if (cmd->sg_mapped)
3303 		qlt_unmap_sg(cmd->vha, cmd);
3304 
3305 	if (!cmd->q_full)
3306 		qlt_decr_num_pend_cmds(cmd->vha);
3307 
3308 	BUG_ON(cmd->sg_mapped);
3309 	cmd->jiffies_at_free = get_jiffies_64();
3310 	if (unlikely(cmd->free_sg))
3311 		kfree(cmd->sg);
3312 
3313 	if (!sess || !sess->se_sess) {
3314 		WARN_ON(1);
3315 		return;
3316 	}
3317 	cmd->jiffies_at_free = get_jiffies_64();
3318 	percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
3319 }
3320 EXPORT_SYMBOL(qlt_free_cmd);
3321 
3322 /* ha->hardware_lock supposed to be held on entry */
3323 static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha,
3324 	struct qla_tgt_cmd *cmd, void *ctio)
3325 {
3326 	struct qla_tgt_srr_ctio *sc;
3327 	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
3328 	struct qla_tgt_srr_imm *imm;
3329 
3330 	tgt->ctio_srr_id++;
3331 	cmd->cmd_flags |= BIT_15;
3332 
3333 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
3334 	    "qla_target(%d): CTIO with SRR status received\n", vha->vp_idx);
3335 
3336 	if (!ctio) {
3337 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf055,
3338 		    "qla_target(%d): SRR CTIO, but ctio is NULL\n",
3339 		    vha->vp_idx);
3340 		return -EINVAL;
3341 	}
3342 
3343 	sc = kzalloc(sizeof(*sc), GFP_ATOMIC);
3344 	if (sc != NULL) {
3345 		sc->cmd = cmd;
3346 		/* IRQ is already OFF */
3347 		spin_lock(&tgt->srr_lock);
3348 		sc->srr_id = tgt->ctio_srr_id;
3349 		list_add_tail(&sc->srr_list_entry,
3350 		    &tgt->srr_ctio_list);
3351 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
3352 		    "CTIO SRR %p added (id %d)\n", sc, sc->srr_id);
3353 		if (tgt->imm_srr_id == tgt->ctio_srr_id) {
3354 			int found = 0;
3355 			list_for_each_entry(imm, &tgt->srr_imm_list,
3356 			    srr_list_entry) {
3357 				if (imm->srr_id == sc->srr_id) {
3358 					found = 1;
3359 					break;
3360 				}
3361 			}
3362 			if (found) {
3363 				ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01b,
3364 				    "Scheduling srr work\n");
3365 				schedule_work(&tgt->srr_work);
3366 			} else {
3367 				ql_dbg(ql_dbg_tgt_mgt, vha, 0xf056,
3368 				    "qla_target(%d): imm_srr_id "
3369 				    "== ctio_srr_id (%d), but there is no "
3370 				    "corresponding SRR IMM, deleting CTIO "
3371 				    "SRR %p\n", vha->vp_idx,
3372 				    tgt->ctio_srr_id, sc);
3373 				list_del(&sc->srr_list_entry);
3374 				spin_unlock(&tgt->srr_lock);
3375 
3376 				kfree(sc);
3377 				return -EINVAL;
3378 			}
3379 		}
3380 		spin_unlock(&tgt->srr_lock);
3381 	} else {
3382 		struct qla_tgt_srr_imm *ti;
3383 
3384 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf057,
3385 		    "qla_target(%d): Unable to allocate SRR CTIO entry\n",
3386 		    vha->vp_idx);
3387 		spin_lock(&tgt->srr_lock);
3388 		list_for_each_entry_safe(imm, ti, &tgt->srr_imm_list,
3389 		    srr_list_entry) {
3390 			if (imm->srr_id == tgt->ctio_srr_id) {
3391 				ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01c,
3392 				    "IMM SRR %p deleted (id %d)\n",
3393 				    imm, imm->srr_id);
3394 				list_del(&imm->srr_list_entry);
3395 				qlt_reject_free_srr_imm(vha, imm, 1);
3396 			}
3397 		}
3398 		spin_unlock(&tgt->srr_lock);
3399 
3400 		return -ENOMEM;
3401 	}
3402 
3403 	return 0;
3404 }
3405 
3406 /*
3407  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3408  */
3409 static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
3410 	struct qla_tgt_cmd *cmd, uint32_t status)
3411 {
3412 	int term = 0;
3413 
3414 	if (ctio != NULL) {
3415 		struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
3416 		term = !(c->flags &
3417 		    cpu_to_le16(OF_TERM_EXCH));
3418 	} else
3419 		term = 1;
3420 
3421 	if (term)
3422 		qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0);
3423 
3424 	return term;
3425 }
3426 
3427 /* ha->hardware_lock supposed to be held on entry */
3428 static inline struct qla_tgt_cmd *qlt_get_cmd(struct scsi_qla_host *vha,
3429 	uint32_t handle)
3430 {
3431 	struct qla_hw_data *ha = vha->hw;
3432 
3433 	handle--;
3434 	if (ha->tgt.cmds[handle] != NULL) {
3435 		struct qla_tgt_cmd *cmd = ha->tgt.cmds[handle];
3436 		ha->tgt.cmds[handle] = NULL;
3437 		return cmd;
3438 	} else
3439 		return NULL;
3440 }
3441 
3442 /* ha->hardware_lock supposed to be held on entry */
3443 static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
3444 	uint32_t handle, void *ctio)
3445 {
3446 	struct qla_tgt_cmd *cmd = NULL;
3447 
3448 	/* Clear out internal marks */
3449 	handle &= ~(CTIO_COMPLETION_HANDLE_MARK |
3450 	    CTIO_INTERMEDIATE_HANDLE_MARK);
3451 
3452 	if (handle != QLA_TGT_NULL_HANDLE) {
3453 		if (unlikely(handle == QLA_TGT_SKIP_HANDLE))
3454 			return NULL;
3455 
3456 		/* handle-1 is actually used */
3457 		if (unlikely(handle > DEFAULT_OUTSTANDING_COMMANDS)) {
3458 			ql_dbg(ql_dbg_tgt, vha, 0xe052,
3459 			    "qla_target(%d): Wrong handle %x received\n",
3460 			    vha->vp_idx, handle);
3461 			return NULL;
3462 		}
3463 		cmd = qlt_get_cmd(vha, handle);
3464 		if (unlikely(cmd == NULL)) {
3465 			ql_dbg(ql_dbg_tgt, vha, 0xe053,
3466 			    "qla_target(%d): Suspicious: unable to "
3467 			    "find the command with handle %x\n", vha->vp_idx,
3468 			    handle);
3469 			return NULL;
3470 		}
3471 	} else if (ctio != NULL) {
3472 		/* We can't get loop ID from CTIO7 */
3473 		ql_dbg(ql_dbg_tgt, vha, 0xe054,
3474 		    "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
3475 		    "support NULL handles\n", vha->vp_idx);
3476 		return NULL;
3477 	}
3478 
3479 	return cmd;
3480 }
3481 
3482 /* hardware_lock should be held by caller. */
3483 static void
3484 qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
3485 {
3486 	struct qla_hw_data *ha = vha->hw;
3487 	uint32_t handle;
3488 
3489 	if (cmd->sg_mapped)
3490 		qlt_unmap_sg(vha, cmd);
3491 
3492 	handle = qlt_make_handle(vha);
3493 
3494 	/* TODO: fix debug message type and ids. */
3495 	if (cmd->state == QLA_TGT_STATE_PROCESSED) {
3496 		ql_dbg(ql_dbg_io, vha, 0xff00,
3497 		    "HOST-ABORT: handle=%d, state=PROCESSED.\n", handle);
3498 	} else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
3499 		cmd->write_data_transferred = 0;
3500 		cmd->state = QLA_TGT_STATE_DATA_IN;
3501 
3502 		ql_dbg(ql_dbg_io, vha, 0xff01,
3503 		    "HOST-ABORT: handle=%d, state=DATA_IN.\n", handle);
3504 
3505 		ha->tgt.tgt_ops->handle_data(cmd);
3506 		return;
3507 	} else {
3508 		ql_dbg(ql_dbg_io, vha, 0xff03,
3509 		    "HOST-ABORT: handle=%d, state=BAD(%d).\n", handle,
3510 		    cmd->state);
3511 		dump_stack();
3512 	}
3513 
3514 	cmd->cmd_flags |= BIT_17;
3515 	ha->tgt.tgt_ops->free_cmd(cmd);
3516 }
3517 
3518 void
3519 qlt_host_reset_handler(struct qla_hw_data *ha)
3520 {
3521 	struct qla_tgt_cmd *cmd;
3522 	unsigned long flags;
3523 	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
3524 	scsi_qla_host_t *vha = NULL;
3525 	struct qla_tgt *tgt = base_vha->vha_tgt.qla_tgt;
3526 	uint32_t i;
3527 
3528 	if (!base_vha->hw->tgt.tgt_ops)
3529 		return;
3530 
3531 	if (!tgt || qla_ini_mode_enabled(base_vha)) {
3532 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
3533 			"Target mode disabled\n");
3534 		return;
3535 	}
3536 
3537 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xff10,
3538 	    "HOST-ABORT-HNDLR: base_vha->dpc_flags=%lx.\n",
3539 	    base_vha->dpc_flags);
3540 
3541 	spin_lock_irqsave(&ha->hardware_lock, flags);
3542 	for (i = 1; i < DEFAULT_OUTSTANDING_COMMANDS + 1; i++) {
3543 		cmd = qlt_get_cmd(base_vha, i);
3544 		if (!cmd)
3545 			continue;
3546 		/* ha->tgt.cmds entry is cleared by qlt_get_cmd. */
3547 		vha = cmd->vha;
3548 		qlt_abort_cmd_on_host_reset(vha, cmd);
3549 	}
3550 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
3551 }
3552 
3553 
3554 /*
3555  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3556  */
3557 static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
3558 	uint32_t status, void *ctio)
3559 {
3560 	struct qla_hw_data *ha = vha->hw;
3561 	struct se_cmd *se_cmd;
3562 	struct qla_tgt_cmd *cmd;
3563 
3564 	if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
3565 		/* That could happen only in case of an error/reset/abort */
3566 		if (status != CTIO_SUCCESS) {
3567 			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d,
3568 			    "Intermediate CTIO received"
3569 			    " (status %x)\n", status);
3570 		}
3571 		return;
3572 	}
3573 
3574 	cmd = qlt_ctio_to_cmd(vha, handle, ctio);
3575 	if (cmd == NULL)
3576 		return;
3577 
3578 	se_cmd = &cmd->se_cmd;
3579 	cmd->cmd_sent_to_fw = 0;
3580 
3581 	qlt_unmap_sg(vha, cmd);
3582 
3583 	if (unlikely(status != CTIO_SUCCESS)) {
3584 		switch (status & 0xFFFF) {
3585 		case CTIO_LIP_RESET:
3586 		case CTIO_TARGET_RESET:
3587 		case CTIO_ABORTED:
3588 			/* driver request abort via Terminate exchange */
3589 		case CTIO_TIMEOUT:
3590 		case CTIO_INVALID_RX_ID:
3591 			/* They are OK */
3592 			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
3593 			    "qla_target(%d): CTIO with "
3594 			    "status %#x received, state %x, se_cmd %p, "
3595 			    "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
3596 			    "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx,
3597 			    status, cmd->state, se_cmd);
3598 			break;
3599 
3600 		case CTIO_PORT_LOGGED_OUT:
3601 		case CTIO_PORT_UNAVAILABLE:
3602 		{
3603 			int logged_out =
3604 				(status & 0xFFFF) == CTIO_PORT_LOGGED_OUT;
3605 
3606 			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
3607 			    "qla_target(%d): CTIO with %s status %x "
3608 			    "received (state %x, se_cmd %p)\n", vha->vp_idx,
3609 			    logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE",
3610 			    status, cmd->state, se_cmd);
3611 
3612 			if (logged_out && cmd->sess) {
3613 				/*
3614 				 * Session is already logged out, but we need
3615 				 * to notify initiator, who's not aware of this
3616 				 */
3617 				cmd->sess->logout_on_delete = 0;
3618 				cmd->sess->send_els_logo = 1;
3619 				qlt_schedule_sess_for_deletion(cmd->sess, true);
3620 			}
3621 			break;
3622 		}
3623 		case CTIO_SRR_RECEIVED:
3624 			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05a,
3625 			    "qla_target(%d): CTIO with SRR_RECEIVED"
3626 			    " status %x received (state %x, se_cmd %p)\n",
3627 			    vha->vp_idx, status, cmd->state, se_cmd);
3628 			if (qlt_prepare_srr_ctio(vha, cmd, ctio) != 0)
3629 				break;
3630 			else
3631 				return;
3632 
3633 		case CTIO_DIF_ERROR: {
3634 			struct ctio_crc_from_fw *crc =
3635 				(struct ctio_crc_from_fw *)ctio;
3636 			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
3637 			    "qla_target(%d): CTIO with DIF_ERROR status %x received (state %x, se_cmd %p) actual_dif[0x%llx] expect_dif[0x%llx]\n",
3638 			    vha->vp_idx, status, cmd->state, se_cmd,
3639 			    *((u64 *)&crc->actual_dif[0]),
3640 			    *((u64 *)&crc->expected_dif[0]));
3641 
3642 			if (qlt_handle_dif_error(vha, cmd, ctio)) {
3643 				if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
3644 					/* scsi Write/xfer rdy complete */
3645 					goto skip_term;
3646 				} else {
3647 					/* scsi read/xmit respond complete
3648 					 * call handle dif to send scsi status
3649 					 * rather than terminate exchange.
3650 					 */
3651 					cmd->state = QLA_TGT_STATE_PROCESSED;
3652 					ha->tgt.tgt_ops->handle_dif_err(cmd);
3653 					return;
3654 				}
3655 			} else {
3656 				/* Need to generate a SCSI good completion.
3657 				 * because FW did not send scsi status.
3658 				 */
3659 				status = 0;
3660 				goto skip_term;
3661 			}
3662 			break;
3663 		}
3664 		default:
3665 			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
3666 			    "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n",
3667 			    vha->vp_idx, status, cmd->state, se_cmd);
3668 			break;
3669 		}
3670 
3671 
3672 		/* "cmd->aborted" means
3673 		 * cmd is already aborted/terminated, we don't
3674 		 * need to terminate again.  The exchange is already
3675 		 * cleaned up/freed at FW level.  Just cleanup at driver
3676 		 * level.
3677 		 */
3678 		if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
3679 		    (!cmd->aborted)) {
3680 			cmd->cmd_flags |= BIT_13;
3681 			if (qlt_term_ctio_exchange(vha, ctio, cmd, status))
3682 				return;
3683 		}
3684 	}
3685 skip_term:
3686 
3687 	if (cmd->state == QLA_TGT_STATE_PROCESSED) {
3688 		cmd->cmd_flags |= BIT_12;
3689 	} else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
3690 		cmd->state = QLA_TGT_STATE_DATA_IN;
3691 
3692 		if (status == CTIO_SUCCESS)
3693 			cmd->write_data_transferred = 1;
3694 
3695 		ha->tgt.tgt_ops->handle_data(cmd);
3696 		return;
3697 	} else if (cmd->aborted) {
3698 		cmd->cmd_flags |= BIT_18;
3699 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
3700 		  "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
3701 	} else {
3702 		cmd->cmd_flags |= BIT_19;
3703 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
3704 		    "qla_target(%d): A command in state (%d) should "
3705 		    "not return a CTIO complete\n", vha->vp_idx, cmd->state);
3706 	}
3707 
3708 	if (unlikely(status != CTIO_SUCCESS) &&
3709 		!cmd->aborted) {
3710 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
3711 		dump_stack();
3712 	}
3713 
3714 	ha->tgt.tgt_ops->free_cmd(cmd);
3715 }
3716 
3717 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
3718 	uint8_t task_codes)
3719 {
3720 	int fcp_task_attr;
3721 
3722 	switch (task_codes) {
3723 	case ATIO_SIMPLE_QUEUE:
3724 		fcp_task_attr = TCM_SIMPLE_TAG;
3725 		break;
3726 	case ATIO_HEAD_OF_QUEUE:
3727 		fcp_task_attr = TCM_HEAD_TAG;
3728 		break;
3729 	case ATIO_ORDERED_QUEUE:
3730 		fcp_task_attr = TCM_ORDERED_TAG;
3731 		break;
3732 	case ATIO_ACA_QUEUE:
3733 		fcp_task_attr = TCM_ACA_TAG;
3734 		break;
3735 	case ATIO_UNTAGGED:
3736 		fcp_task_attr = TCM_SIMPLE_TAG;
3737 		break;
3738 	default:
3739 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
3740 		    "qla_target: unknown task code %x, use ORDERED instead\n",
3741 		    task_codes);
3742 		fcp_task_attr = TCM_ORDERED_TAG;
3743 		break;
3744 	}
3745 
3746 	return fcp_task_attr;
3747 }
3748 
3749 static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *,
3750 					uint8_t *);
3751 /*
3752  * Process context for I/O path into tcm_qla2xxx code
3753  */
3754 static void __qlt_do_work(struct qla_tgt_cmd *cmd)
3755 {
3756 	scsi_qla_host_t *vha = cmd->vha;
3757 	struct qla_hw_data *ha = vha->hw;
3758 	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
3759 	struct qla_tgt_sess *sess = cmd->sess;
3760 	struct atio_from_isp *atio = &cmd->atio;
3761 	unsigned char *cdb;
3762 	unsigned long flags;
3763 	uint32_t data_length;
3764 	int ret, fcp_task_attr, data_dir, bidi = 0;
3765 
3766 	cmd->cmd_in_wq = 0;
3767 	cmd->cmd_flags |= BIT_1;
3768 	if (tgt->tgt_stop)
3769 		goto out_term;
3770 
3771 	if (cmd->aborted) {
3772 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
3773 		    "cmd with tag %u is aborted\n",
3774 		    cmd->atio.u.isp24.exchange_addr);
3775 		goto out_term;
3776 	}
3777 
3778 	spin_lock_init(&cmd->cmd_lock);
3779 	cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
3780 	cmd->se_cmd.tag = atio->u.isp24.exchange_addr;
3781 	cmd->unpacked_lun = scsilun_to_int(
3782 	    (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);
3783 
3784 	if (atio->u.isp24.fcp_cmnd.rddata &&
3785 	    atio->u.isp24.fcp_cmnd.wrdata) {
3786 		bidi = 1;
3787 		data_dir = DMA_TO_DEVICE;
3788 	} else if (atio->u.isp24.fcp_cmnd.rddata)
3789 		data_dir = DMA_FROM_DEVICE;
3790 	else if (atio->u.isp24.fcp_cmnd.wrdata)
3791 		data_dir = DMA_TO_DEVICE;
3792 	else
3793 		data_dir = DMA_NONE;
3794 
3795 	fcp_task_attr = qlt_get_fcp_task_attr(vha,
3796 	    atio->u.isp24.fcp_cmnd.task_attr);
3797 	data_length = be32_to_cpu(get_unaligned((uint32_t *)
3798 	    &atio->u.isp24.fcp_cmnd.add_cdb[
3799 	    atio->u.isp24.fcp_cmnd.add_cdb_len]));
3800 
3801 	ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
3802 				          fcp_task_attr, data_dir, bidi);
3803 	if (ret != 0)
3804 		goto out_term;
3805 	/*
3806 	 * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
3807 	 */
3808 	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
3809 	ha->tgt.tgt_ops->put_sess(sess);
3810 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
3811 	return;
3812 
3813 out_term:
3814 	ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd);
3815 	/*
3816 	 * cmd has not sent to target yet, so pass NULL as the second
3817 	 * argument to qlt_send_term_exchange() and free the memory here.
3818 	 */
3819 	cmd->cmd_flags |= BIT_2;
3820 	spin_lock_irqsave(&ha->hardware_lock, flags);
3821 	qlt_send_term_exchange(vha, NULL, &cmd->atio, 1, 0);
3822 
3823 	qlt_decr_num_pend_cmds(vha);
3824 	percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
3825 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
3826 
3827 	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
3828 	ha->tgt.tgt_ops->put_sess(sess);
3829 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
3830 }
3831 
3832 static void qlt_do_work(struct work_struct *work)
3833 {
3834 	struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
3835 	scsi_qla_host_t *vha = cmd->vha;
3836 	unsigned long flags;
3837 
3838 	spin_lock_irqsave(&vha->cmd_list_lock, flags);
3839 	list_del(&cmd->cmd_list);
3840 	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
3841 
3842 	__qlt_do_work(cmd);
3843 }
3844 
3845 static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
3846 				       struct qla_tgt_sess *sess,
3847 				       struct atio_from_isp *atio)
3848 {
3849 	struct se_session *se_sess = sess->se_sess;
3850 	struct qla_tgt_cmd *cmd;
3851 	int tag;
3852 
3853 	tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
3854 	if (tag < 0)
3855 		return NULL;
3856 
3857 	cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
3858 	memset(cmd, 0, sizeof(struct qla_tgt_cmd));
3859 
3860 	memcpy(&cmd->atio, atio, sizeof(*atio));
3861 	cmd->state = QLA_TGT_STATE_NEW;
3862 	cmd->tgt = vha->vha_tgt.qla_tgt;
3863 	qlt_incr_num_pend_cmds(vha);
3864 	cmd->vha = vha;
3865 	cmd->se_cmd.map_tag = tag;
3866 	cmd->sess = sess;
3867 	cmd->loop_id = sess->loop_id;
3868 	cmd->conf_compl_supported = sess->conf_compl_supported;
3869 
3870 	cmd->cmd_flags = 0;
3871 	cmd->jiffies_at_alloc = get_jiffies_64();
3872 
3873 	cmd->reset_count = vha->hw->chip_reset;
3874 
3875 	return cmd;
3876 }
3877 
3878 static void qlt_send_busy(struct scsi_qla_host *, struct atio_from_isp *,
3879 			  uint16_t);
3880 
3881 static void qlt_create_sess_from_atio(struct work_struct *work)
3882 {
3883 	struct qla_tgt_sess_op *op = container_of(work,
3884 					struct qla_tgt_sess_op, work);
3885 	scsi_qla_host_t *vha = op->vha;
3886 	struct qla_hw_data *ha = vha->hw;
3887 	struct qla_tgt_sess *sess;
3888 	struct qla_tgt_cmd *cmd;
3889 	unsigned long flags;
3890 	uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id;
3891 
3892 	spin_lock_irqsave(&vha->cmd_list_lock, flags);
3893 	list_del(&op->cmd_list);
3894 	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
3895 
3896 	if (op->aborted) {
3897 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf083,
3898 		    "sess_op with tag %u is aborted\n",
3899 		    op->atio.u.isp24.exchange_addr);
3900 		goto out_term;
3901 	}
3902 
3903 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
3904 	    "qla_target(%d): Unable to find wwn login"
3905 	    " (s_id %x:%x:%x), trying to create it manually\n",
3906 	    vha->vp_idx, s_id[0], s_id[1], s_id[2]);
3907 
3908 	if (op->atio.u.raw.entry_count > 1) {
3909 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
3910 		    "Dropping multy entry atio %p\n", &op->atio);
3911 		goto out_term;
3912 	}
3913 
3914 	sess = qlt_make_local_sess(vha, s_id);
3915 	/* sess has an extra creation ref. */
3916 
3917 	if (!sess)
3918 		goto out_term;
3919 	/*
3920 	 * Now obtain a pre-allocated session tag using the original op->atio
3921 	 * packet header, and dispatch into __qlt_do_work() using the existing
3922 	 * process context.
3923 	 */
3924 	cmd = qlt_get_tag(vha, sess, &op->atio);
3925 	if (!cmd) {
3926 		spin_lock_irqsave(&ha->hardware_lock, flags);
3927 		qlt_send_busy(vha, &op->atio, SAM_STAT_BUSY);
3928 		ha->tgt.tgt_ops->put_sess(sess);
3929 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
3930 		kfree(op);
3931 		return;
3932 	}
3933 	/*
3934 	 * __qlt_do_work() will call ha->tgt.tgt_ops->put_sess() to release
3935 	 * the extra reference taken above by qlt_make_local_sess()
3936 	 */
3937 	__qlt_do_work(cmd);
3938 	kfree(op);
3939 	return;
3940 
3941 out_term:
3942 	spin_lock_irqsave(&ha->hardware_lock, flags);
3943 	qlt_send_term_exchange(vha, NULL, &op->atio, 1, 0);
3944 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
3945 	kfree(op);
3946 
3947 }
3948 
3949 /* ha->hardware_lock supposed to be held on entry */
3950 static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
3951 	struct atio_from_isp *atio)
3952 {
3953 	struct qla_hw_data *ha = vha->hw;
3954 	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
3955 	struct qla_tgt_sess *sess;
3956 	struct qla_tgt_cmd *cmd;
3957 
3958 	if (unlikely(tgt->tgt_stop)) {
3959 		ql_dbg(ql_dbg_io, vha, 0x3061,
3960 		    "New command while device %p is shutting down\n", tgt);
3961 		return -EFAULT;
3962 	}
3963 
3964 	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
3965 	if (unlikely(!sess)) {
3966 		struct qla_tgt_sess_op *op = kzalloc(sizeof(struct qla_tgt_sess_op),
3967 						     GFP_ATOMIC);
3968 		if (!op)
3969 			return -ENOMEM;
3970 
3971 		memcpy(&op->atio, atio, sizeof(*atio));
3972 		op->vha = vha;
3973 
3974 		spin_lock(&vha->cmd_list_lock);
3975 		list_add_tail(&op->cmd_list, &vha->qla_sess_op_cmd_list);
3976 		spin_unlock(&vha->cmd_list_lock);
3977 
3978 		INIT_WORK(&op->work, qlt_create_sess_from_atio);
3979 		queue_work(qla_tgt_wq, &op->work);
3980 		return 0;
3981 	}
3982 
3983 	/* Another WWN used to have our s_id. Our PLOGI scheduled its
3984 	 * session deletion, but it's still in sess_del_work wq */
3985 	if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
3986 		ql_dbg(ql_dbg_io, vha, 0x3061,
3987 		    "New command while old session %p is being deleted\n",
3988 		    sess);
3989 		return -EFAULT;
3990 	}
3991 
3992 	/*
3993 	 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
3994 	 */
3995 	kref_get(&sess->se_sess->sess_kref);
3996 
3997 	cmd = qlt_get_tag(vha, sess, atio);
3998 	if (!cmd) {
3999 		ql_dbg(ql_dbg_io, vha, 0x3062,
4000 		    "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
4001 		ha->tgt.tgt_ops->put_sess(sess);
4002 		return -ENOMEM;
4003 	}
4004 
4005 	cmd->cmd_in_wq = 1;
4006 	cmd->cmd_flags |= BIT_0;
4007 	cmd->se_cmd.cpuid = ha->msix_count ?
4008 		ha->tgt.rspq_vector_cpuid : WORK_CPU_UNBOUND;
4009 
4010 	spin_lock(&vha->cmd_list_lock);
4011 	list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
4012 	spin_unlock(&vha->cmd_list_lock);
4013 
4014 	INIT_WORK(&cmd->work, qlt_do_work);
4015 	if (ha->msix_count) {
4016 		if (cmd->atio.u.isp24.fcp_cmnd.rddata)
4017 			queue_work_on(smp_processor_id(), qla_tgt_wq,
4018 			    &cmd->work);
4019 		else
4020 			queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq,
4021 			    &cmd->work);
4022 	} else {
4023 		queue_work(qla_tgt_wq, &cmd->work);
4024 	}
4025 	return 0;
4026 
4027 }
4028 
4029 /* ha->hardware_lock supposed to be held on entry */
4030 static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
4031 	int fn, void *iocb, int flags)
4032 {
4033 	struct scsi_qla_host *vha = sess->vha;
4034 	struct qla_hw_data *ha = vha->hw;
4035 	struct qla_tgt_mgmt_cmd *mcmd;
4036 	struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4037 	int res;
4038 	uint8_t tmr_func;
4039 
4040 	mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
4041 	if (!mcmd) {
4042 		ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009,
4043 		    "qla_target(%d): Allocation of management "
4044 		    "command failed, some commands and their data could "
4045 		    "leak\n", vha->vp_idx);
4046 		return -ENOMEM;
4047 	}
4048 	memset(mcmd, 0, sizeof(*mcmd));
4049 	mcmd->sess = sess;
4050 
4051 	if (iocb) {
4052 		memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
4053 		    sizeof(mcmd->orig_iocb.imm_ntfy));
4054 	}
4055 	mcmd->tmr_func = fn;
4056 	mcmd->flags = flags;
4057 	mcmd->reset_count = vha->hw->chip_reset;
4058 
4059 	switch (fn) {
4060 	case QLA_TGT_CLEAR_ACA:
4061 		ql_dbg(ql_dbg_tgt_tmr, vha, 0x10000,
4062 		    "qla_target(%d): CLEAR_ACA received\n", sess->vha->vp_idx);
4063 		tmr_func = TMR_CLEAR_ACA;
4064 		break;
4065 
4066 	case QLA_TGT_TARGET_RESET:
4067 		ql_dbg(ql_dbg_tgt_tmr, vha, 0x10001,
4068 		    "qla_target(%d): TARGET_RESET received\n",
4069 		    sess->vha->vp_idx);
4070 		tmr_func = TMR_TARGET_WARM_RESET;
4071 		break;
4072 
4073 	case QLA_TGT_LUN_RESET:
4074 		ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002,
4075 		    "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx);
4076 		tmr_func = TMR_LUN_RESET;
4077 		abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
4078 		break;
4079 
4080 	case QLA_TGT_CLEAR_TS:
4081 		ql_dbg(ql_dbg_tgt_tmr, vha, 0x10003,
4082 		    "qla_target(%d): CLEAR_TS received\n", sess->vha->vp_idx);
4083 		tmr_func = TMR_CLEAR_TASK_SET;
4084 		break;
4085 
4086 	case QLA_TGT_ABORT_TS:
4087 		ql_dbg(ql_dbg_tgt_tmr, vha, 0x10004,
4088 		    "qla_target(%d): ABORT_TS received\n", sess->vha->vp_idx);
4089 		tmr_func = TMR_ABORT_TASK_SET;
4090 		break;
4091 #if 0
4092 	case QLA_TGT_ABORT_ALL:
4093 		ql_dbg(ql_dbg_tgt_tmr, vha, 0x10005,
4094 		    "qla_target(%d): Doing ABORT_ALL_TASKS\n",
4095 		    sess->vha->vp_idx);
4096 		tmr_func = 0;
4097 		break;
4098 
4099 	case QLA_TGT_ABORT_ALL_SESS:
4100 		ql_dbg(ql_dbg_tgt_tmr, vha, 0x10006,
4101 		    "qla_target(%d): Doing ABORT_ALL_TASKS_SESS\n",
4102 		    sess->vha->vp_idx);
4103 		tmr_func = 0;
4104 		break;
4105 
4106 	case QLA_TGT_NEXUS_LOSS_SESS:
4107 		ql_dbg(ql_dbg_tgt_tmr, vha, 0x10007,
4108 		    "qla_target(%d): Doing NEXUS_LOSS_SESS\n",
4109 		    sess->vha->vp_idx);
4110 		tmr_func = 0;
4111 		break;
4112 
4113 	case QLA_TGT_NEXUS_LOSS:
4114 		ql_dbg(ql_dbg_tgt_tmr, vha, 0x10008,
4115 		    "qla_target(%d): Doing NEXUS_LOSS\n", sess->vha->vp_idx);
4116 		tmr_func = 0;
4117 		break;
4118 #endif
4119 	default:
4120 		ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000a,
4121 		    "qla_target(%d): Unknown task mgmt fn 0x%x\n",
4122 		    sess->vha->vp_idx, fn);
4123 		mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
4124 		return -ENOSYS;
4125 	}
4126 
4127 	res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, tmr_func, 0);
4128 	if (res != 0) {
4129 		ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b,
4130 		    "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n",
4131 		    sess->vha->vp_idx, res);
4132 		mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
4133 		return -EFAULT;
4134 	}
4135 
4136 	return 0;
4137 }
4138 
4139 /* ha->hardware_lock supposed to be held on entry */
4140 static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
4141 {
4142 	struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4143 	struct qla_hw_data *ha = vha->hw;
4144 	struct qla_tgt *tgt;
4145 	struct qla_tgt_sess *sess;
4146 	uint32_t lun, unpacked_lun;
4147 	int fn;
4148 	unsigned long flags;
4149 
4150 	tgt = vha->vha_tgt.qla_tgt;
4151 
4152 	lun = a->u.isp24.fcp_cmnd.lun;
4153 	fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
4154 
4155 	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4156 	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
4157 	    a->u.isp24.fcp_hdr.s_id);
4158 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4159 
4160 	unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
4161 
4162 	if (!sess) {
4163 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024,
4164 		    "qla_target(%d): task mgmt fn 0x%x for "
4165 		    "non-existant session\n", vha->vp_idx, fn);
4166 		return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb,
4167 		    sizeof(struct atio_from_isp));
4168 	}
4169 
4170 	if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)
4171 		return -EFAULT;
4172 
4173 	return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
4174 }
4175 
4176 /* ha->hardware_lock supposed to be held on entry */
4177 static int __qlt_abort_task(struct scsi_qla_host *vha,
4178 	struct imm_ntfy_from_isp *iocb, struct qla_tgt_sess *sess)
4179 {
4180 	struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4181 	struct qla_hw_data *ha = vha->hw;
4182 	struct qla_tgt_mgmt_cmd *mcmd;
4183 	uint32_t lun, unpacked_lun;
4184 	int rc;
4185 
4186 	mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
4187 	if (mcmd == NULL) {
4188 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f,
4189 		    "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
4190 		    vha->vp_idx, __func__);
4191 		return -ENOMEM;
4192 	}
4193 	memset(mcmd, 0, sizeof(*mcmd));
4194 
4195 	mcmd->sess = sess;
4196 	memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
4197 	    sizeof(mcmd->orig_iocb.imm_ntfy));
4198 
4199 	lun = a->u.isp24.fcp_cmnd.lun;
4200 	unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
4201 	mcmd->reset_count = vha->hw->chip_reset;
4202 
4203 	rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK,
4204 	    le16_to_cpu(iocb->u.isp2x.seq_id));
4205 	if (rc != 0) {
4206 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060,
4207 		    "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
4208 		    vha->vp_idx, rc);
4209 		mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
4210 		return -EFAULT;
4211 	}
4212 
4213 	return 0;
4214 }
4215 
4216 /* ha->hardware_lock supposed to be held on entry */
4217 static int qlt_abort_task(struct scsi_qla_host *vha,
4218 	struct imm_ntfy_from_isp *iocb)
4219 {
4220 	struct qla_hw_data *ha = vha->hw;
4221 	struct qla_tgt_sess *sess;
4222 	int loop_id;
4223 	unsigned long flags;
4224 
4225 	loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb);
4226 
4227 	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4228 	sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
4229 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4230 
4231 	if (sess == NULL) {
4232 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
4233 		    "qla_target(%d): task abort for unexisting "
4234 		    "session\n", vha->vp_idx);
4235 		return qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
4236 		    QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
4237 	}
4238 
4239 	return __qlt_abort_task(vha, iocb, sess);
4240 }
4241 
4242 void qlt_logo_completion_handler(fc_port_t *fcport, int rc)
4243 {
4244 	if (fcport->tgt_session) {
4245 		if (rc != MBS_COMMAND_COMPLETE) {
4246 			ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093,
4247 				"%s: se_sess %p / sess %p from"
4248 				" port %8phC loop_id %#04x s_id %02x:%02x:%02x"
4249 				" LOGO failed: %#x\n",
4250 				__func__,
4251 				fcport->tgt_session->se_sess,
4252 				fcport->tgt_session,
4253 				fcport->port_name, fcport->loop_id,
4254 				fcport->d_id.b.domain, fcport->d_id.b.area,
4255 				fcport->d_id.b.al_pa, rc);
4256 		}
4257 
4258 		fcport->tgt_session->logout_completed = 1;
4259 	}
4260 }
4261 
4262 /*
4263 * ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list)
4264 *
4265 * Schedules sessions with matching port_id/loop_id but different wwn for
4266 * deletion. Returns existing session with matching wwn if present.
4267 * Null otherwise.
4268 */
4269 static struct qla_tgt_sess *
4270 qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn,
4271     port_id_t port_id, uint16_t loop_id, struct qla_tgt_sess **conflict_sess)
4272 {
4273 	struct qla_tgt_sess *sess = NULL, *other_sess;
4274 	uint64_t other_wwn;
4275 
4276 	*conflict_sess = NULL;
4277 
4278 	list_for_each_entry(other_sess, &tgt->sess_list, sess_list_entry) {
4279 
4280 		other_wwn = wwn_to_u64(other_sess->port_name);
4281 
4282 		if (wwn == other_wwn) {
4283 			WARN_ON(sess);
4284 			sess = other_sess;
4285 			continue;
4286 		}
4287 
4288 		/* find other sess with nport_id collision */
4289 		if (port_id.b24 == other_sess->s_id.b24) {
4290 			if (loop_id != other_sess->loop_id) {
4291 				ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000c,
4292 				    "Invalidating sess %p loop_id %d wwn %llx.\n",
4293 				    other_sess, other_sess->loop_id, other_wwn);
4294 
4295 				/*
4296 				 * logout_on_delete is set by default, but another
4297 				 * session that has the same s_id/loop_id combo
4298 				 * might have cleared it when requested this session
4299 				 * deletion, so don't touch it
4300 				 */
4301 				qlt_schedule_sess_for_deletion(other_sess, true);
4302 			} else {
4303 				/*
4304 				 * Another wwn used to have our s_id/loop_id
4305 				 * kill the session, but don't free the loop_id
4306 				 */
4307 				other_sess->keep_nport_handle = 1;
4308 				*conflict_sess = other_sess;
4309 				qlt_schedule_sess_for_deletion(other_sess,
4310 				    true);
4311 			}
4312 			continue;
4313 		}
4314 
4315 		/* find other sess with nport handle collision */
4316 		if (loop_id == other_sess->loop_id) {
4317 			ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000d,
4318 			       "Invalidating sess %p loop_id %d wwn %llx.\n",
4319 			       other_sess, other_sess->loop_id, other_wwn);
4320 
4321 			/* Same loop_id but different s_id
4322 			 * Ok to kill and logout */
4323 			qlt_schedule_sess_for_deletion(other_sess, true);
4324 		}
4325 	}
4326 
4327 	return sess;
4328 }
4329 
4330 /* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */
4331 static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
4332 {
4333 	struct qla_tgt_sess_op *op;
4334 	struct qla_tgt_cmd *cmd;
4335 	uint32_t key;
4336 	int count = 0;
4337 
4338 	key = (((u32)s_id->b.domain << 16) |
4339 	       ((u32)s_id->b.area   <<  8) |
4340 	       ((u32)s_id->b.al_pa));
4341 
4342 	spin_lock(&vha->cmd_list_lock);
4343 	list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
4344 		uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
4345 		if (op_key == key) {
4346 			op->aborted = true;
4347 			count++;
4348 		}
4349 	}
4350 	list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
4351 		uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
4352 		if (cmd_key == key) {
4353 			cmd->aborted = 1;
4354 			count++;
4355 		}
4356 	}
4357 	spin_unlock(&vha->cmd_list_lock);
4358 
4359 	return count;
4360 }
4361 
4362 /*
4363  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4364  */
4365 static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
4366 	struct imm_ntfy_from_isp *iocb)
4367 {
4368 	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4369 	struct qla_hw_data *ha = vha->hw;
4370 	struct qla_tgt_sess *sess = NULL, *conflict_sess = NULL;
4371 	uint64_t wwn;
4372 	port_id_t port_id;
4373 	uint16_t loop_id;
4374 	uint16_t wd3_lo;
4375 	int res = 0;
4376 	qlt_plogi_ack_t *pla;
4377 	unsigned long flags;
4378 
4379 	wwn = wwn_to_u64(iocb->u.isp24.port_name);
4380 
4381 	port_id.b.domain = iocb->u.isp24.port_id[2];
4382 	port_id.b.area   = iocb->u.isp24.port_id[1];
4383 	port_id.b.al_pa  = iocb->u.isp24.port_id[0];
4384 	port_id.b.rsvd_1 = 0;
4385 
4386 	loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
4387 
4388 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
4389 	    "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n",
4390 	    vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode);
4391 
4392 	/* res = 1 means ack at the end of thread
4393 	 * res = 0 means ack async/later.
4394 	 */
4395 	switch (iocb->u.isp24.status_subcode) {
4396 	case ELS_PLOGI:
4397 
4398 		/* Mark all stale commands in qla_tgt_wq for deletion */
4399 		abort_cmds_for_s_id(vha, &port_id);
4400 
4401 		if (wwn) {
4402 			spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
4403 			sess = qlt_find_sess_invalidate_other(tgt, wwn,
4404 			    port_id, loop_id, &conflict_sess);
4405 			spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
4406 		}
4407 
4408 		if (IS_SW_RESV_ADDR(port_id) || (!sess && !conflict_sess)) {
4409 			res = 1;
4410 			break;
4411 		}
4412 
4413 		pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
4414 		if (!pla) {
4415 			qlt_send_term_imm_notif(vha, iocb, 1);
4416 
4417 			res = 0;
4418 			break;
4419 		}
4420 
4421 		res = 0;
4422 
4423 		if (conflict_sess)
4424 			qlt_plogi_ack_link(vha, pla, conflict_sess,
4425 			    QLT_PLOGI_LINK_CONFLICT);
4426 
4427 		if (!sess)
4428 			break;
4429 
4430 		qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
4431 		 /*
4432 		  * Under normal circumstances we want to release nport handle
4433 		  * during LOGO process to avoid nport handle leaks inside FW.
4434 		  * The exception is when LOGO is done while another PLOGI with
4435 		  * the same nport handle is waiting as might be the case here.
4436 		  * Note: there is always a possibily of a race where session
4437 		  * deletion has already started for other reasons (e.g. ACL
4438 		  * removal) and now PLOGI arrives:
4439 		  * 1. if PLOGI arrived in FW after nport handle has been freed,
4440 		  *    FW must have assigned this PLOGI a new/same handle and we
4441 		  *    can proceed ACK'ing it as usual when session deletion
4442 		  *    completes.
4443 		  * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
4444 		  *    bit reached it, the handle has now been released. We'll
4445 		  *    get an error when we ACK this PLOGI. Nothing will be sent
4446 		  *    back to initiator. Initiator should eventually retry
4447 		  *    PLOGI and situation will correct itself.
4448 		  */
4449 		sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
4450 					   (sess->s_id.b24 == port_id.b24));
4451 		qlt_schedule_sess_for_deletion(sess, true);
4452 		break;
4453 
4454 	case ELS_PRLI:
4455 		wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
4456 
4457 		if (wwn) {
4458 			spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
4459 			sess = qlt_find_sess_invalidate_other(tgt, wwn, port_id,
4460 			    loop_id, &conflict_sess);
4461 			spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
4462 		}
4463 
4464 		if (conflict_sess) {
4465 			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b,
4466 			    "PRLI with conflicting sess %p port %8phC\n",
4467 			    conflict_sess, conflict_sess->port_name);
4468 			qlt_send_term_imm_notif(vha, iocb, 1);
4469 			res = 0;
4470 			break;
4471 		}
4472 
4473 		if (sess != NULL) {
4474 			if (sess->deleted) {
4475 				/*
4476 				 * Impatient initiator sent PRLI before last
4477 				 * PLOGI could finish. Will force him to re-try,
4478 				 * while last one finishes.
4479 				 */
4480 				ql_log(ql_log_warn, sess->vha, 0xf095,
4481 				    "sess %p PRLI received, before plogi ack.\n",
4482 				    sess);
4483 				qlt_send_term_imm_notif(vha, iocb, 1);
4484 				res = 0;
4485 				break;
4486 			}
4487 
4488 			/*
4489 			 * This shouldn't happen under normal circumstances,
4490 			 * since we have deleted the old session during PLOGI
4491 			 */
4492 			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096,
4493 			    "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n",
4494 			    sess->loop_id, sess, iocb->u.isp24.nport_handle);
4495 
4496 			sess->local = 0;
4497 			sess->loop_id = loop_id;
4498 			sess->s_id = port_id;
4499 
4500 			if (wd3_lo & BIT_7)
4501 				sess->conf_compl_supported = 1;
4502 
4503 		}
4504 		res = 1; /* send notify ack */
4505 
4506 		/* Make session global (not used in fabric mode) */
4507 		if (ha->current_topology != ISP_CFG_F) {
4508 			set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4509 			set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4510 			qla2xxx_wake_dpc(vha);
4511 		} else {
4512 			/* todo: else - create sess here. */
4513 			res = 1; /* send notify ack */
4514 		}
4515 
4516 		break;
4517 
4518 	case ELS_LOGO:
4519 	case ELS_PRLO:
4520 		res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
4521 		break;
4522 	case ELS_PDISC:
4523 	case ELS_ADISC:
4524 	{
4525 		struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4526 		if (tgt->link_reinit_iocb_pending) {
4527 			qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
4528 			    0, 0, 0, 0, 0, 0);
4529 			tgt->link_reinit_iocb_pending = 0;
4530 		}
4531 		res = 1; /* send notify ack */
4532 		break;
4533 	}
4534 
4535 	case ELS_FLOGI:	/* should never happen */
4536 	default:
4537 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
4538 		    "qla_target(%d): Unsupported ELS command %x "
4539 		    "received\n", vha->vp_idx, iocb->u.isp24.status_subcode);
4540 		res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
4541 		break;
4542 	}
4543 
4544 	return res;
4545 }
4546 
4547 static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset)
4548 {
4549 #if 1
4550 	/*
4551 	 * FIXME: Reject non zero SRR relative offset until we can test
4552 	 * this code properly.
4553 	 */
4554 	pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset);
4555 	return -1;
4556 #else
4557 	struct scatterlist *sg, *sgp, *sg_srr, *sg_srr_start = NULL;
4558 	size_t first_offset = 0, rem_offset = offset, tmp = 0;
4559 	int i, sg_srr_cnt, bufflen = 0;
4560 
4561 	ql_dbg(ql_dbg_tgt, cmd->vha, 0xe023,
4562 	    "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, "
4563 	    "cmd->sg_cnt: %u, direction: %d\n",
4564 	    cmd, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
4565 
4566 	if (!cmd->sg || !cmd->sg_cnt) {
4567 		ql_dbg(ql_dbg_tgt, cmd->vha, 0xe055,
4568 		    "Missing cmd->sg or zero cmd->sg_cnt in"
4569 		    " qla_tgt_set_data_offset\n");
4570 		return -EINVAL;
4571 	}
4572 	/*
4573 	 * Walk the current cmd->sg list until we locate the new sg_srr_start
4574 	 */
4575 	for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) {
4576 		ql_dbg(ql_dbg_tgt, cmd->vha, 0xe024,
4577 		    "sg[%d]: %p page: %p, length: %d, offset: %d\n",
4578 		    i, sg, sg_page(sg), sg->length, sg->offset);
4579 
4580 		if ((sg->length + tmp) > offset) {
4581 			first_offset = rem_offset;
4582 			sg_srr_start = sg;
4583 			ql_dbg(ql_dbg_tgt, cmd->vha, 0xe025,
4584 			    "Found matching sg[%d], using %p as sg_srr_start, "
4585 			    "and using first_offset: %zu\n", i, sg,
4586 			    first_offset);
4587 			break;
4588 		}
4589 		tmp += sg->length;
4590 		rem_offset -= sg->length;
4591 	}
4592 
4593 	if (!sg_srr_start) {
4594 		ql_dbg(ql_dbg_tgt, cmd->vha, 0xe056,
4595 		    "Unable to locate sg_srr_start for offset: %u\n", offset);
4596 		return -EINVAL;
4597 	}
4598 	sg_srr_cnt = (cmd->sg_cnt - i);
4599 
4600 	sg_srr = kzalloc(sizeof(struct scatterlist) * sg_srr_cnt, GFP_KERNEL);
4601 	if (!sg_srr) {
4602 		ql_dbg(ql_dbg_tgt, cmd->vha, 0xe057,
4603 		    "Unable to allocate sgp\n");
4604 		return -ENOMEM;
4605 	}
4606 	sg_init_table(sg_srr, sg_srr_cnt);
4607 	sgp = &sg_srr[0];
4608 	/*
4609 	 * Walk the remaining list for sg_srr_start, mapping to the newly
4610 	 * allocated sg_srr taking first_offset into account.
4611 	 */
4612 	for_each_sg(sg_srr_start, sg, sg_srr_cnt, i) {
4613 		if (first_offset) {
4614 			sg_set_page(sgp, sg_page(sg),
4615 			    (sg->length - first_offset), first_offset);
4616 			first_offset = 0;
4617 		} else {
4618 			sg_set_page(sgp, sg_page(sg), sg->length, 0);
4619 		}
4620 		bufflen += sgp->length;
4621 
4622 		sgp = sg_next(sgp);
4623 		if (!sgp)
4624 			break;
4625 	}
4626 
4627 	cmd->sg = sg_srr;
4628 	cmd->sg_cnt = sg_srr_cnt;
4629 	cmd->bufflen = bufflen;
4630 	cmd->offset += offset;
4631 	cmd->free_sg = 1;
4632 
4633 	ql_dbg(ql_dbg_tgt, cmd->vha, 0xe026, "New cmd->sg: %p\n", cmd->sg);
4634 	ql_dbg(ql_dbg_tgt, cmd->vha, 0xe027, "New cmd->sg_cnt: %u\n",
4635 	    cmd->sg_cnt);
4636 	ql_dbg(ql_dbg_tgt, cmd->vha, 0xe028, "New cmd->bufflen: %u\n",
4637 	    cmd->bufflen);
4638 	ql_dbg(ql_dbg_tgt, cmd->vha, 0xe029, "New cmd->offset: %u\n",
4639 	    cmd->offset);
4640 
4641 	if (cmd->sg_cnt < 0)
4642 		BUG();
4643 
4644 	if (cmd->bufflen < 0)
4645 		BUG();
4646 
4647 	return 0;
4648 #endif
4649 }
4650 
4651 static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd,
4652 	uint32_t srr_rel_offs, int *xmit_type)
4653 {
4654 	int res = 0, rel_offs;
4655 
4656 	rel_offs = srr_rel_offs - cmd->offset;
4657 	ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf027, "srr_rel_offs=%d, rel_offs=%d",
4658 	    srr_rel_offs, rel_offs);
4659 
4660 	*xmit_type = QLA_TGT_XMIT_ALL;
4661 
4662 	if (rel_offs < 0) {
4663 		ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf062,
4664 		    "qla_target(%d): SRR rel_offs (%d) < 0",
4665 		    cmd->vha->vp_idx, rel_offs);
4666 		res = -1;
4667 	} else if (rel_offs == cmd->bufflen)
4668 		*xmit_type = QLA_TGT_XMIT_STATUS;
4669 	else if (rel_offs > 0)
4670 		res = qlt_set_data_offset(cmd, rel_offs);
4671 
4672 	return res;
4673 }
4674 
4675 /* No locks, thread context */
4676 static void qlt_handle_srr(struct scsi_qla_host *vha,
4677 	struct qla_tgt_srr_ctio *sctio, struct qla_tgt_srr_imm *imm)
4678 {
4679 	struct imm_ntfy_from_isp *ntfy =
4680 	    (struct imm_ntfy_from_isp *)&imm->imm_ntfy;
4681 	struct qla_hw_data *ha = vha->hw;
4682 	struct qla_tgt_cmd *cmd = sctio->cmd;
4683 	struct se_cmd *se_cmd = &cmd->se_cmd;
4684 	unsigned long flags;
4685 	int xmit_type = 0, resp = 0;
4686 	uint32_t offset;
4687 	uint16_t srr_ui;
4688 
4689 	offset = le32_to_cpu(ntfy->u.isp24.srr_rel_offs);
4690 	srr_ui = ntfy->u.isp24.srr_ui;
4691 
4692 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf028, "SRR cmd %p, srr_ui %x\n",
4693 	    cmd, srr_ui);
4694 
4695 	switch (srr_ui) {
4696 	case SRR_IU_STATUS:
4697 		spin_lock_irqsave(&ha->hardware_lock, flags);
4698 		qlt_send_notify_ack(vha, ntfy,
4699 		    0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
4700 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
4701 		xmit_type = QLA_TGT_XMIT_STATUS;
4702 		resp = 1;
4703 		break;
4704 	case SRR_IU_DATA_IN:
4705 		if (!cmd->sg || !cmd->sg_cnt) {
4706 			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf063,
4707 			    "Unable to process SRR_IU_DATA_IN due to"
4708 			    " missing cmd->sg, state: %d\n", cmd->state);
4709 			dump_stack();
4710 			goto out_reject;
4711 		}
4712 		if (se_cmd->scsi_status != 0) {
4713 			ql_dbg(ql_dbg_tgt, vha, 0xe02a,
4714 			    "Rejecting SRR_IU_DATA_IN with non GOOD "
4715 			    "scsi_status\n");
4716 			goto out_reject;
4717 		}
4718 		cmd->bufflen = se_cmd->data_length;
4719 
4720 		if (qlt_has_data(cmd)) {
4721 			if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
4722 				goto out_reject;
4723 			spin_lock_irqsave(&ha->hardware_lock, flags);
4724 			qlt_send_notify_ack(vha, ntfy,
4725 			    0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
4726 			spin_unlock_irqrestore(&ha->hardware_lock, flags);
4727 			resp = 1;
4728 		} else {
4729 			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf064,
4730 			       "qla_target(%d): SRR for in data for cmd without them (tag %lld, SCSI status %d), reject",
4731 			       vha->vp_idx, se_cmd->tag,
4732 			    cmd->se_cmd.scsi_status);
4733 			goto out_reject;
4734 		}
4735 		break;
4736 	case SRR_IU_DATA_OUT:
4737 		if (!cmd->sg || !cmd->sg_cnt) {
4738 			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf065,
4739 			    "Unable to process SRR_IU_DATA_OUT due to"
4740 			    " missing cmd->sg\n");
4741 			dump_stack();
4742 			goto out_reject;
4743 		}
4744 		if (se_cmd->scsi_status != 0) {
4745 			ql_dbg(ql_dbg_tgt, vha, 0xe02b,
4746 			    "Rejecting SRR_IU_DATA_OUT"
4747 			    " with non GOOD scsi_status\n");
4748 			goto out_reject;
4749 		}
4750 		cmd->bufflen = se_cmd->data_length;
4751 
4752 		if (qlt_has_data(cmd)) {
4753 			if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
4754 				goto out_reject;
4755 			spin_lock_irqsave(&ha->hardware_lock, flags);
4756 			qlt_send_notify_ack(vha, ntfy,
4757 			    0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
4758 			spin_unlock_irqrestore(&ha->hardware_lock, flags);
4759 			if (xmit_type & QLA_TGT_XMIT_DATA) {
4760 				cmd->cmd_flags |= BIT_8;
4761 				qlt_rdy_to_xfer(cmd);
4762 			}
4763 		} else {
4764 			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066,
4765 			    "qla_target(%d): SRR for out data for cmd without them (tag %lld, SCSI status %d), reject",
4766 			       vha->vp_idx, se_cmd->tag, cmd->se_cmd.scsi_status);
4767 			goto out_reject;
4768 		}
4769 		break;
4770 	default:
4771 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf067,
4772 		    "qla_target(%d): Unknown srr_ui value %x",
4773 		    vha->vp_idx, srr_ui);
4774 		goto out_reject;
4775 	}
4776 
4777 	/* Transmit response in case of status and data-in cases */
4778 	if (resp) {
4779 		cmd->cmd_flags |= BIT_7;
4780 		qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
4781 	}
4782 
4783 	return;
4784 
4785 out_reject:
4786 	spin_lock_irqsave(&ha->hardware_lock, flags);
4787 	qlt_send_notify_ack(vha, ntfy, 0, 0, 0,
4788 	    NOTIFY_ACK_SRR_FLAGS_REJECT,
4789 	    NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
4790 	    NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
4791 	if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
4792 		cmd->state = QLA_TGT_STATE_DATA_IN;
4793 		dump_stack();
4794 	} else {
4795 		cmd->cmd_flags |= BIT_9;
4796 		qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0);
4797 	}
4798 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
4799 }
4800 
4801 static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha,
4802 	struct qla_tgt_srr_imm *imm, int ha_locked)
4803 {
4804 	struct qla_hw_data *ha = vha->hw;
4805 	unsigned long flags = 0;
4806 
4807 #ifndef __CHECKER__
4808 	if (!ha_locked)
4809 		spin_lock_irqsave(&ha->hardware_lock, flags);
4810 #endif
4811 
4812 	qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0,
4813 	    NOTIFY_ACK_SRR_FLAGS_REJECT,
4814 	    NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
4815 	    NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
4816 
4817 #ifndef __CHECKER__
4818 	if (!ha_locked)
4819 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
4820 #endif
4821 
4822 	kfree(imm);
4823 }
4824 
4825 static void qlt_handle_srr_work(struct work_struct *work)
4826 {
4827 	struct qla_tgt *tgt = container_of(work, struct qla_tgt, srr_work);
4828 	struct scsi_qla_host *vha = tgt->vha;
4829 	struct qla_tgt_srr_ctio *sctio;
4830 	unsigned long flags;
4831 
4832 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf029, "Entering SRR work (tgt %p)\n",
4833 	    tgt);
4834 
4835 restart:
4836 	spin_lock_irqsave(&tgt->srr_lock, flags);
4837 	list_for_each_entry(sctio, &tgt->srr_ctio_list, srr_list_entry) {
4838 		struct qla_tgt_srr_imm *imm, *i, *ti;
4839 		struct qla_tgt_cmd *cmd;
4840 		struct se_cmd *se_cmd;
4841 
4842 		imm = NULL;
4843 		list_for_each_entry_safe(i, ti, &tgt->srr_imm_list,
4844 						srr_list_entry) {
4845 			if (i->srr_id == sctio->srr_id) {
4846 				list_del(&i->srr_list_entry);
4847 				if (imm) {
4848 					ql_dbg(ql_dbg_tgt_mgt, vha, 0xf068,
4849 					  "qla_target(%d): There must be "
4850 					  "only one IMM SRR per CTIO SRR "
4851 					  "(IMM SRR %p, id %d, CTIO %p\n",
4852 					  vha->vp_idx, i, i->srr_id, sctio);
4853 					qlt_reject_free_srr_imm(tgt->vha, i, 0);
4854 				} else
4855 					imm = i;
4856 			}
4857 		}
4858 
4859 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02a,
4860 		    "IMM SRR %p, CTIO SRR %p (id %d)\n", imm, sctio,
4861 		    sctio->srr_id);
4862 
4863 		if (imm == NULL) {
4864 			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02b,
4865 			    "Not found matching IMM for SRR CTIO (id %d)\n",
4866 			    sctio->srr_id);
4867 			continue;
4868 		} else
4869 			list_del(&sctio->srr_list_entry);
4870 
4871 		spin_unlock_irqrestore(&tgt->srr_lock, flags);
4872 
4873 		cmd = sctio->cmd;
4874 		/*
4875 		 * Reset qla_tgt_cmd SRR values and SGL pointer+count to follow
4876 		 * tcm_qla2xxx_write_pending() and tcm_qla2xxx_queue_data_in()
4877 		 * logic..
4878 		 */
4879 		cmd->offset = 0;
4880 		if (cmd->free_sg) {
4881 			kfree(cmd->sg);
4882 			cmd->sg = NULL;
4883 			cmd->free_sg = 0;
4884 		}
4885 		se_cmd = &cmd->se_cmd;
4886 
4887 		cmd->sg_cnt = se_cmd->t_data_nents;
4888 		cmd->sg = se_cmd->t_data_sg;
4889 
4890 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c,
4891 		       "SRR cmd %p (se_cmd %p, tag %lld, op %x), sg_cnt=%d, offset=%d",
4892 		       cmd, &cmd->se_cmd, se_cmd->tag, se_cmd->t_task_cdb ?
4893 		       se_cmd->t_task_cdb[0] : 0, cmd->sg_cnt, cmd->offset);
4894 
4895 		qlt_handle_srr(vha, sctio, imm);
4896 
4897 		kfree(imm);
4898 		kfree(sctio);
4899 		goto restart;
4900 	}
4901 	spin_unlock_irqrestore(&tgt->srr_lock, flags);
4902 }
4903 
4904 /* ha->hardware_lock supposed to be held on entry */
4905 static void qlt_prepare_srr_imm(struct scsi_qla_host *vha,
4906 	struct imm_ntfy_from_isp *iocb)
4907 {
4908 	struct qla_tgt_srr_imm *imm;
4909 	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4910 	struct qla_tgt_srr_ctio *sctio;
4911 
4912 	tgt->imm_srr_id++;
4913 
4914 	ql_log(ql_log_warn, vha, 0xf02d, "qla_target(%d): SRR received\n",
4915 	    vha->vp_idx);
4916 
4917 	imm = kzalloc(sizeof(*imm), GFP_ATOMIC);
4918 	if (imm != NULL) {
4919 		memcpy(&imm->imm_ntfy, iocb, sizeof(imm->imm_ntfy));
4920 
4921 		/* IRQ is already OFF */
4922 		spin_lock(&tgt->srr_lock);
4923 		imm->srr_id = tgt->imm_srr_id;
4924 		list_add_tail(&imm->srr_list_entry,
4925 		    &tgt->srr_imm_list);
4926 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02e,
4927 		    "IMM NTFY SRR %p added (id %d, ui %x)\n",
4928 		    imm, imm->srr_id, iocb->u.isp24.srr_ui);
4929 		if (tgt->imm_srr_id == tgt->ctio_srr_id) {
4930 			int found = 0;
4931 			list_for_each_entry(sctio, &tgt->srr_ctio_list,
4932 			    srr_list_entry) {
4933 				if (sctio->srr_id == imm->srr_id) {
4934 					found = 1;
4935 					break;
4936 				}
4937 			}
4938 			if (found) {
4939 				ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02f, "%s",
4940 				    "Scheduling srr work\n");
4941 				schedule_work(&tgt->srr_work);
4942 			} else {
4943 				ql_dbg(ql_dbg_tgt_mgt, vha, 0xf030,
4944 				    "qla_target(%d): imm_srr_id "
4945 				    "== ctio_srr_id (%d), but there is no "
4946 				    "corresponding SRR CTIO, deleting IMM "
4947 				    "SRR %p\n", vha->vp_idx, tgt->ctio_srr_id,
4948 				    imm);
4949 				list_del(&imm->srr_list_entry);
4950 
4951 				kfree(imm);
4952 
4953 				spin_unlock(&tgt->srr_lock);
4954 				goto out_reject;
4955 			}
4956 		}
4957 		spin_unlock(&tgt->srr_lock);
4958 	} else {
4959 		struct qla_tgt_srr_ctio *ts;
4960 
4961 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf069,
4962 		    "qla_target(%d): Unable to allocate SRR IMM "
4963 		    "entry, SRR request will be rejected\n", vha->vp_idx);
4964 
4965 		/* IRQ is already OFF */
4966 		spin_lock(&tgt->srr_lock);
4967 		list_for_each_entry_safe(sctio, ts, &tgt->srr_ctio_list,
4968 		    srr_list_entry) {
4969 			if (sctio->srr_id == tgt->imm_srr_id) {
4970 				ql_dbg(ql_dbg_tgt_mgt, vha, 0xf031,
4971 				    "CTIO SRR %p deleted (id %d)\n",
4972 				    sctio, sctio->srr_id);
4973 				list_del(&sctio->srr_list_entry);
4974 				qlt_send_term_exchange(vha, sctio->cmd,
4975 				    &sctio->cmd->atio, 1, 0);
4976 				kfree(sctio);
4977 			}
4978 		}
4979 		spin_unlock(&tgt->srr_lock);
4980 		goto out_reject;
4981 	}
4982 
4983 	return;
4984 
4985 out_reject:
4986 	qlt_send_notify_ack(vha, iocb, 0, 0, 0,
4987 	    NOTIFY_ACK_SRR_FLAGS_REJECT,
4988 	    NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
4989 	    NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
4990 }
4991 
4992 /*
4993  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4994  */
4995 static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
4996 	struct imm_ntfy_from_isp *iocb)
4997 {
4998 	struct qla_hw_data *ha = vha->hw;
4999 	uint32_t add_flags = 0;
5000 	int send_notify_ack = 1;
5001 	uint16_t status;
5002 
5003 	status = le16_to_cpu(iocb->u.isp2x.status);
5004 	switch (status) {
5005 	case IMM_NTFY_LIP_RESET:
5006 	{
5007 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032,
5008 		    "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
5009 		    vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle),
5010 		    iocb->u.isp24.status_subcode);
5011 
5012 		if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
5013 			send_notify_ack = 0;
5014 		break;
5015 	}
5016 
5017 	case IMM_NTFY_LIP_LINK_REINIT:
5018 	{
5019 		struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5020 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
5021 		    "qla_target(%d): LINK REINIT (loop %#x, "
5022 		    "subcode %x)\n", vha->vp_idx,
5023 		    le16_to_cpu(iocb->u.isp24.nport_handle),
5024 		    iocb->u.isp24.status_subcode);
5025 		if (tgt->link_reinit_iocb_pending) {
5026 			qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
5027 			    0, 0, 0, 0, 0, 0);
5028 		}
5029 		memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb));
5030 		tgt->link_reinit_iocb_pending = 1;
5031 		/*
5032 		 * QLogic requires to wait after LINK REINIT for possible
5033 		 * PDISC or ADISC ELS commands
5034 		 */
5035 		send_notify_ack = 0;
5036 		break;
5037 	}
5038 
5039 	case IMM_NTFY_PORT_LOGOUT:
5040 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034,
5041 		    "qla_target(%d): Port logout (loop "
5042 		    "%#x, subcode %x)\n", vha->vp_idx,
5043 		    le16_to_cpu(iocb->u.isp24.nport_handle),
5044 		    iocb->u.isp24.status_subcode);
5045 
5046 		if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0)
5047 			send_notify_ack = 0;
5048 		/* The sessions will be cleared in the callback, if needed */
5049 		break;
5050 
5051 	case IMM_NTFY_GLBL_TPRLO:
5052 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035,
5053 		    "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status);
5054 		if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
5055 			send_notify_ack = 0;
5056 		/* The sessions will be cleared in the callback, if needed */
5057 		break;
5058 
5059 	case IMM_NTFY_PORT_CONFIG:
5060 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036,
5061 		    "qla_target(%d): Port config changed (%x)\n", vha->vp_idx,
5062 		    status);
5063 		if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
5064 			send_notify_ack = 0;
5065 		/* The sessions will be cleared in the callback, if needed */
5066 		break;
5067 
5068 	case IMM_NTFY_GLBL_LOGO:
5069 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a,
5070 		    "qla_target(%d): Link failure detected\n",
5071 		    vha->vp_idx);
5072 		/* I_T nexus loss */
5073 		if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
5074 			send_notify_ack = 0;
5075 		break;
5076 
5077 	case IMM_NTFY_IOCB_OVERFLOW:
5078 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b,
5079 		    "qla_target(%d): Cannot provide requested "
5080 		    "capability (IOCB overflowed the immediate notify "
5081 		    "resource count)\n", vha->vp_idx);
5082 		break;
5083 
5084 	case IMM_NTFY_ABORT_TASK:
5085 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037,
5086 		    "qla_target(%d): Abort Task (S %08x I %#x -> "
5087 		    "L %#x)\n", vha->vp_idx,
5088 		    le16_to_cpu(iocb->u.isp2x.seq_id),
5089 		    GET_TARGET_ID(ha, (struct atio_from_isp *)iocb),
5090 		    le16_to_cpu(iocb->u.isp2x.lun));
5091 		if (qlt_abort_task(vha, iocb) == 0)
5092 			send_notify_ack = 0;
5093 		break;
5094 
5095 	case IMM_NTFY_RESOURCE:
5096 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c,
5097 		    "qla_target(%d): Out of resources, host %ld\n",
5098 		    vha->vp_idx, vha->host_no);
5099 		break;
5100 
5101 	case IMM_NTFY_MSG_RX:
5102 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
5103 		    "qla_target(%d): Immediate notify task %x\n",
5104 		    vha->vp_idx, iocb->u.isp2x.task_flags);
5105 		if (qlt_handle_task_mgmt(vha, iocb) == 0)
5106 			send_notify_ack = 0;
5107 		break;
5108 
5109 	case IMM_NTFY_ELS:
5110 		if (qlt_24xx_handle_els(vha, iocb) == 0)
5111 			send_notify_ack = 0;
5112 		break;
5113 
5114 	case IMM_NTFY_SRR:
5115 		qlt_prepare_srr_imm(vha, iocb);
5116 		send_notify_ack = 0;
5117 		break;
5118 
5119 	default:
5120 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
5121 		    "qla_target(%d): Received unknown immediate "
5122 		    "notify status %x\n", vha->vp_idx, status);
5123 		break;
5124 	}
5125 
5126 	if (send_notify_ack)
5127 		qlt_send_notify_ack(vha, iocb, add_flags, 0, 0, 0, 0, 0);
5128 }
5129 
5130 /*
5131  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
5132  * This function sends busy to ISP 2xxx or 24xx.
5133  */
5134 static int __qlt_send_busy(struct scsi_qla_host *vha,
5135 	struct atio_from_isp *atio, uint16_t status)
5136 {
5137 	struct ctio7_to_24xx *ctio24;
5138 	struct qla_hw_data *ha = vha->hw;
5139 	request_t *pkt;
5140 	struct qla_tgt_sess *sess = NULL;
5141 	unsigned long flags;
5142 
5143 	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5144 	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
5145 	    atio->u.isp24.fcp_hdr.s_id);
5146 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5147 	if (!sess) {
5148 		qlt_send_term_exchange(vha, NULL, atio, 1, 0);
5149 		return 0;
5150 	}
5151 	/* Sending marker isn't necessary, since we called from ISR */
5152 
5153 	pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
5154 	if (!pkt) {
5155 		ql_dbg(ql_dbg_io, vha, 0x3063,
5156 		    "qla_target(%d): %s failed: unable to allocate "
5157 		    "request packet", vha->vp_idx, __func__);
5158 		return -ENOMEM;
5159 	}
5160 
5161 	vha->tgt_counters.num_q_full_sent++;
5162 	pkt->entry_count = 1;
5163 	pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
5164 
5165 	ctio24 = (struct ctio7_to_24xx *)pkt;
5166 	ctio24->entry_type = CTIO_TYPE7;
5167 	ctio24->nport_handle = sess->loop_id;
5168 	ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
5169 	ctio24->vp_index = vha->vp_idx;
5170 	ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
5171 	ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
5172 	ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
5173 	ctio24->exchange_addr = atio->u.isp24.exchange_addr;
5174 	ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
5175 	    cpu_to_le16(
5176 		CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
5177 		CTIO7_FLAGS_DONT_RET_CTIO);
5178 	/*
5179 	 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
5180 	 * if the explicit conformation is used.
5181 	 */
5182 	ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
5183 	ctio24->u.status1.scsi_status = cpu_to_le16(status);
5184 	/* Memory Barrier */
5185 	wmb();
5186 	qla2x00_start_iocbs(vha, vha->req);
5187 	return 0;
5188 }
5189 
5190 /*
5191  * This routine is used to allocate a command for either a QFull condition
5192  * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go
5193  * out previously.
5194  */
5195 static void
5196 qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
5197 	struct atio_from_isp *atio, uint16_t status, int qfull)
5198 {
5199 	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5200 	struct qla_hw_data *ha = vha->hw;
5201 	struct qla_tgt_sess *sess;
5202 	struct se_session *se_sess;
5203 	struct qla_tgt_cmd *cmd;
5204 	int tag;
5205 
5206 	if (unlikely(tgt->tgt_stop)) {
5207 		ql_dbg(ql_dbg_io, vha, 0x300a,
5208 			"New command while device %p is shutting down\n", tgt);
5209 		return;
5210 	}
5211 
5212 	if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) {
5213 		vha->hw->tgt.num_qfull_cmds_dropped++;
5214 		if (vha->hw->tgt.num_qfull_cmds_dropped >
5215 			vha->hw->qla_stats.stat_max_qfull_cmds_dropped)
5216 			vha->hw->qla_stats.stat_max_qfull_cmds_dropped =
5217 				vha->hw->tgt.num_qfull_cmds_dropped;
5218 
5219 		ql_dbg(ql_dbg_io, vha, 0x3068,
5220 			"qla_target(%d): %s: QFull CMD dropped[%d]\n",
5221 			vha->vp_idx, __func__,
5222 			vha->hw->tgt.num_qfull_cmds_dropped);
5223 
5224 		qlt_chk_exch_leak_thresh_hold(vha);
5225 		return;
5226 	}
5227 
5228 	sess = ha->tgt.tgt_ops->find_sess_by_s_id
5229 		(vha, atio->u.isp24.fcp_hdr.s_id);
5230 	if (!sess)
5231 		return;
5232 
5233 	se_sess = sess->se_sess;
5234 
5235 	tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
5236 	if (tag < 0)
5237 		return;
5238 
5239 	cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
5240 	if (!cmd) {
5241 		ql_dbg(ql_dbg_io, vha, 0x3009,
5242 			"qla_target(%d): %s: Allocation of cmd failed\n",
5243 			vha->vp_idx, __func__);
5244 
5245 		vha->hw->tgt.num_qfull_cmds_dropped++;
5246 		if (vha->hw->tgt.num_qfull_cmds_dropped >
5247 			vha->hw->qla_stats.stat_max_qfull_cmds_dropped)
5248 			vha->hw->qla_stats.stat_max_qfull_cmds_dropped =
5249 				vha->hw->tgt.num_qfull_cmds_dropped;
5250 
5251 		qlt_chk_exch_leak_thresh_hold(vha);
5252 		return;
5253 	}
5254 
5255 	memset(cmd, 0, sizeof(struct qla_tgt_cmd));
5256 
5257 	qlt_incr_num_pend_cmds(vha);
5258 	INIT_LIST_HEAD(&cmd->cmd_list);
5259 	memcpy(&cmd->atio, atio, sizeof(*atio));
5260 
5261 	cmd->tgt = vha->vha_tgt.qla_tgt;
5262 	cmd->vha = vha;
5263 	cmd->reset_count = vha->hw->chip_reset;
5264 	cmd->q_full = 1;
5265 
5266 	if (qfull) {
5267 		cmd->q_full = 1;
5268 		/* NOTE: borrowing the state field to carry the status */
5269 		cmd->state = status;
5270 	} else
5271 		cmd->term_exchg = 1;
5272 
5273 	list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list);
5274 
5275 	vha->hw->tgt.num_qfull_cmds_alloc++;
5276 	if (vha->hw->tgt.num_qfull_cmds_alloc >
5277 		vha->hw->qla_stats.stat_max_qfull_cmds_alloc)
5278 		vha->hw->qla_stats.stat_max_qfull_cmds_alloc =
5279 			vha->hw->tgt.num_qfull_cmds_alloc;
5280 }
5281 
5282 int
5283 qlt_free_qfull_cmds(struct scsi_qla_host *vha)
5284 {
5285 	struct qla_hw_data *ha = vha->hw;
5286 	unsigned long flags;
5287 	struct qla_tgt_cmd *cmd, *tcmd;
5288 	struct list_head free_list;
5289 	int rc = 0;
5290 
5291 	if (list_empty(&ha->tgt.q_full_list))
5292 		return 0;
5293 
5294 	INIT_LIST_HEAD(&free_list);
5295 
5296 	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
5297 
5298 	if (list_empty(&ha->tgt.q_full_list)) {
5299 		spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
5300 		return 0;
5301 	}
5302 
5303 	list_for_each_entry_safe(cmd, tcmd, &ha->tgt.q_full_list, cmd_list) {
5304 		if (cmd->q_full)
5305 			/* cmd->state is a borrowed field to hold status */
5306 			rc = __qlt_send_busy(vha, &cmd->atio, cmd->state);
5307 		else if (cmd->term_exchg)
5308 			rc = __qlt_send_term_exchange(vha, NULL, &cmd->atio);
5309 
5310 		if (rc == -ENOMEM)
5311 			break;
5312 
5313 		if (cmd->q_full)
5314 			ql_dbg(ql_dbg_io, vha, 0x3006,
5315 			    "%s: busy sent for ox_id[%04x]\n", __func__,
5316 			    be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
5317 		else if (cmd->term_exchg)
5318 			ql_dbg(ql_dbg_io, vha, 0x3007,
5319 			    "%s: Term exchg sent for ox_id[%04x]\n", __func__,
5320 			    be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
5321 		else
5322 			ql_dbg(ql_dbg_io, vha, 0x3008,
5323 			    "%s: Unexpected cmd in QFull list %p\n", __func__,
5324 			    cmd);
5325 
5326 		list_del(&cmd->cmd_list);
5327 		list_add_tail(&cmd->cmd_list, &free_list);
5328 
5329 		/* piggy back on hardware_lock for protection */
5330 		vha->hw->tgt.num_qfull_cmds_alloc--;
5331 	}
5332 	spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
5333 
5334 	cmd = NULL;
5335 
5336 	list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
5337 		list_del(&cmd->cmd_list);
5338 		/* This cmd was never sent to TCM.  There is no need
5339 		 * to schedule free or call free_cmd
5340 		 */
5341 		qlt_free_cmd(cmd);
5342 	}
5343 	return rc;
5344 }
5345 
5346 static void
5347 qlt_send_busy(struct scsi_qla_host *vha,
5348 	struct atio_from_isp *atio, uint16_t status)
5349 {
5350 	int rc = 0;
5351 
5352 	rc = __qlt_send_busy(vha, atio, status);
5353 	if (rc == -ENOMEM)
5354 		qlt_alloc_qfull_cmd(vha, atio, status, 1);
5355 }
5356 
5357 static int
5358 qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha,
5359 	struct atio_from_isp *atio)
5360 {
5361 	struct qla_hw_data *ha = vha->hw;
5362 	uint16_t status;
5363 
5364 	if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
5365 		return 0;
5366 
5367 	status = temp_sam_status;
5368 	qlt_send_busy(vha, atio, status);
5369 	return 1;
5370 }
5371 
5372 /* ha->hardware_lock supposed to be held on entry */
5373 /* called via callback from qla2xxx */
5374 static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
5375 	struct atio_from_isp *atio, uint8_t ha_locked)
5376 {
5377 	struct qla_hw_data *ha = vha->hw;
5378 	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5379 	int rc;
5380 	unsigned long flags;
5381 
5382 	if (unlikely(tgt == NULL)) {
5383 		ql_dbg(ql_dbg_io, vha, 0x3064,
5384 		    "ATIO pkt, but no tgt (ha %p)", ha);
5385 		return;
5386 	}
5387 	/*
5388 	 * In tgt_stop mode we also should allow all requests to pass.
5389 	 * Otherwise, some commands can stuck.
5390 	 */
5391 
5392 	tgt->atio_irq_cmd_count++;
5393 
5394 	switch (atio->u.raw.entry_type) {
5395 	case ATIO_TYPE7:
5396 		if (unlikely(atio->u.isp24.exchange_addr ==
5397 		    ATIO_EXCHANGE_ADDRESS_UNKNOWN)) {
5398 			ql_dbg(ql_dbg_io, vha, 0x3065,
5399 			    "qla_target(%d): ATIO_TYPE7 "
5400 			    "received with UNKNOWN exchange address, "
5401 			    "sending QUEUE_FULL\n", vha->vp_idx);
5402 			if (!ha_locked)
5403 				spin_lock_irqsave(&ha->hardware_lock, flags);
5404 			qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL);
5405 			if (!ha_locked)
5406 				spin_unlock_irqrestore(&ha->hardware_lock, flags);
5407 			break;
5408 		}
5409 
5410 
5411 
5412 		if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
5413 			rc = qlt_chk_qfull_thresh_hold(vha, atio);
5414 			if (rc != 0) {
5415 				tgt->atio_irq_cmd_count--;
5416 				return;
5417 			}
5418 			rc = qlt_handle_cmd_for_atio(vha, atio);
5419 		} else {
5420 			rc = qlt_handle_task_mgmt(vha, atio);
5421 		}
5422 		if (unlikely(rc != 0)) {
5423 			if (rc == -ESRCH) {
5424 				if (!ha_locked)
5425 					spin_lock_irqsave
5426 						(&ha->hardware_lock, flags);
5427 
5428 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
5429 				qlt_send_busy(vha, atio, SAM_STAT_BUSY);
5430 #else
5431 				qlt_send_term_exchange(vha, NULL, atio, 1, 0);
5432 #endif
5433 
5434 				if (!ha_locked)
5435 					spin_unlock_irqrestore
5436 						(&ha->hardware_lock, flags);
5437 
5438 			} else {
5439 				if (tgt->tgt_stop) {
5440 					ql_dbg(ql_dbg_tgt, vha, 0xe059,
5441 					    "qla_target: Unable to send "
5442 					    "command to target for req, "
5443 					    "ignoring.\n");
5444 				} else {
5445 					ql_dbg(ql_dbg_tgt, vha, 0xe05a,
5446 					    "qla_target(%d): Unable to send "
5447 					    "command to target, sending BUSY "
5448 					    "status.\n", vha->vp_idx);
5449 					if (!ha_locked)
5450 						spin_lock_irqsave(
5451 						    &ha->hardware_lock, flags);
5452 					qlt_send_busy(vha, atio, SAM_STAT_BUSY);
5453 					if (!ha_locked)
5454 						spin_unlock_irqrestore(
5455 						    &ha->hardware_lock, flags);
5456 				}
5457 			}
5458 		}
5459 		break;
5460 
5461 	case IMMED_NOTIFY_TYPE:
5462 	{
5463 		if (unlikely(atio->u.isp2x.entry_status != 0)) {
5464 			ql_dbg(ql_dbg_tgt, vha, 0xe05b,
5465 			    "qla_target(%d): Received ATIO packet %x "
5466 			    "with error status %x\n", vha->vp_idx,
5467 			    atio->u.raw.entry_type,
5468 			    atio->u.isp2x.entry_status);
5469 			break;
5470 		}
5471 		ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
5472 
5473 		if (!ha_locked)
5474 			spin_lock_irqsave(&ha->hardware_lock, flags);
5475 		qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
5476 		if (!ha_locked)
5477 			spin_unlock_irqrestore(&ha->hardware_lock, flags);
5478 		break;
5479 	}
5480 
5481 	default:
5482 		ql_dbg(ql_dbg_tgt, vha, 0xe05c,
5483 		    "qla_target(%d): Received unknown ATIO atio "
5484 		    "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
5485 		break;
5486 	}
5487 
5488 	tgt->atio_irq_cmd_count--;
5489 }
5490 
5491 /* ha->hardware_lock supposed to be held on entry */
5492 /* called via callback from qla2xxx */
5493 static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
5494 {
5495 	struct qla_hw_data *ha = vha->hw;
5496 	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5497 
5498 	if (unlikely(tgt == NULL)) {
5499 		ql_dbg(ql_dbg_tgt, vha, 0xe05d,
5500 		    "qla_target(%d): Response pkt %x received, but no "
5501 		    "tgt (ha %p)\n", vha->vp_idx, pkt->entry_type, ha);
5502 		return;
5503 	}
5504 
5505 	/*
5506 	 * In tgt_stop mode we also should allow all requests to pass.
5507 	 * Otherwise, some commands can stuck.
5508 	 */
5509 
5510 	tgt->irq_cmd_count++;
5511 
5512 	switch (pkt->entry_type) {
5513 	case CTIO_CRC2:
5514 	case CTIO_TYPE7:
5515 	{
5516 		struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
5517 		qlt_do_ctio_completion(vha, entry->handle,
5518 		    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
5519 		    entry);
5520 		break;
5521 	}
5522 
5523 	case ACCEPT_TGT_IO_TYPE:
5524 	{
5525 		struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
5526 		int rc;
5527 		if (atio->u.isp2x.status !=
5528 		    cpu_to_le16(ATIO_CDB_VALID)) {
5529 			ql_dbg(ql_dbg_tgt, vha, 0xe05e,
5530 			    "qla_target(%d): ATIO with error "
5531 			    "status %x received\n", vha->vp_idx,
5532 			    le16_to_cpu(atio->u.isp2x.status));
5533 			break;
5534 		}
5535 
5536 		rc = qlt_chk_qfull_thresh_hold(vha, atio);
5537 		if (rc != 0) {
5538 			tgt->irq_cmd_count--;
5539 			return;
5540 		}
5541 
5542 		rc = qlt_handle_cmd_for_atio(vha, atio);
5543 		if (unlikely(rc != 0)) {
5544 			if (rc == -ESRCH) {
5545 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
5546 				qlt_send_busy(vha, atio, 0);
5547 #else
5548 				qlt_send_term_exchange(vha, NULL, atio, 1, 0);
5549 #endif
5550 			} else {
5551 				if (tgt->tgt_stop) {
5552 					ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5553 					    "qla_target: Unable to send "
5554 					    "command to target, sending TERM "
5555 					    "EXCHANGE for rsp\n");
5556 					qlt_send_term_exchange(vha, NULL,
5557 					    atio, 1, 0);
5558 				} else {
5559 					ql_dbg(ql_dbg_tgt, vha, 0xe060,
5560 					    "qla_target(%d): Unable to send "
5561 					    "command to target, sending BUSY "
5562 					    "status\n", vha->vp_idx);
5563 					qlt_send_busy(vha, atio, 0);
5564 				}
5565 			}
5566 		}
5567 	}
5568 	break;
5569 
5570 	case CONTINUE_TGT_IO_TYPE:
5571 	{
5572 		struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
5573 		qlt_do_ctio_completion(vha, entry->handle,
5574 		    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
5575 		    entry);
5576 		break;
5577 	}
5578 
5579 	case CTIO_A64_TYPE:
5580 	{
5581 		struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
5582 		qlt_do_ctio_completion(vha, entry->handle,
5583 		    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
5584 		    entry);
5585 		break;
5586 	}
5587 
5588 	case IMMED_NOTIFY_TYPE:
5589 		ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
5590 		qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
5591 		break;
5592 
5593 	case NOTIFY_ACK_TYPE:
5594 		if (tgt->notify_ack_expected > 0) {
5595 			struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
5596 			ql_dbg(ql_dbg_tgt, vha, 0xe036,
5597 			    "NOTIFY_ACK seq %08x status %x\n",
5598 			    le16_to_cpu(entry->u.isp2x.seq_id),
5599 			    le16_to_cpu(entry->u.isp2x.status));
5600 			tgt->notify_ack_expected--;
5601 			if (entry->u.isp2x.status !=
5602 			    cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
5603 				ql_dbg(ql_dbg_tgt, vha, 0xe061,
5604 				    "qla_target(%d): NOTIFY_ACK "
5605 				    "failed %x\n", vha->vp_idx,
5606 				    le16_to_cpu(entry->u.isp2x.status));
5607 			}
5608 		} else {
5609 			ql_dbg(ql_dbg_tgt, vha, 0xe062,
5610 			    "qla_target(%d): Unexpected NOTIFY_ACK received\n",
5611 			    vha->vp_idx);
5612 		}
5613 		break;
5614 
5615 	case ABTS_RECV_24XX:
5616 		ql_dbg(ql_dbg_tgt, vha, 0xe037,
5617 		    "ABTS_RECV_24XX: instance %d\n", vha->vp_idx);
5618 		qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt);
5619 		break;
5620 
5621 	case ABTS_RESP_24XX:
5622 		if (tgt->abts_resp_expected > 0) {
5623 			struct abts_resp_from_24xx_fw *entry =
5624 				(struct abts_resp_from_24xx_fw *)pkt;
5625 			ql_dbg(ql_dbg_tgt, vha, 0xe038,
5626 			    "ABTS_RESP_24XX: compl_status %x\n",
5627 			    entry->compl_status);
5628 			tgt->abts_resp_expected--;
5629 			if (le16_to_cpu(entry->compl_status) !=
5630 			    ABTS_RESP_COMPL_SUCCESS) {
5631 				if ((entry->error_subcode1 == 0x1E) &&
5632 				    (entry->error_subcode2 == 0)) {
5633 					/*
5634 					 * We've got a race here: aborted
5635 					 * exchange not terminated, i.e.
5636 					 * response for the aborted command was
5637 					 * sent between the abort request was
5638 					 * received and processed.
5639 					 * Unfortunately, the firmware has a
5640 					 * silly requirement that all aborted
5641 					 * exchanges must be explicitely
5642 					 * terminated, otherwise it refuses to
5643 					 * send responses for the abort
5644 					 * requests. So, we have to
5645 					 * (re)terminate the exchange and retry
5646 					 * the abort response.
5647 					 */
5648 					qlt_24xx_retry_term_exchange(vha,
5649 					    entry);
5650 				} else
5651 					ql_dbg(ql_dbg_tgt, vha, 0xe063,
5652 					    "qla_target(%d): ABTS_RESP_24XX "
5653 					    "failed %x (subcode %x:%x)",
5654 					    vha->vp_idx, entry->compl_status,
5655 					    entry->error_subcode1,
5656 					    entry->error_subcode2);
5657 			}
5658 		} else {
5659 			ql_dbg(ql_dbg_tgt, vha, 0xe064,
5660 			    "qla_target(%d): Unexpected ABTS_RESP_24XX "
5661 			    "received\n", vha->vp_idx);
5662 		}
5663 		break;
5664 
5665 	default:
5666 		ql_dbg(ql_dbg_tgt, vha, 0xe065,
5667 		    "qla_target(%d): Received unknown response pkt "
5668 		    "type %x\n", vha->vp_idx, pkt->entry_type);
5669 		break;
5670 	}
5671 
5672 	tgt->irq_cmd_count--;
5673 }
5674 
5675 /*
5676  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
5677  */
5678 void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
5679 	uint16_t *mailbox)
5680 {
5681 	struct qla_hw_data *ha = vha->hw;
5682 	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5683 	int login_code;
5684 
5685 	if (!ha->tgt.tgt_ops)
5686 		return;
5687 
5688 	if (unlikely(tgt == NULL)) {
5689 		ql_dbg(ql_dbg_tgt, vha, 0xe03a,
5690 		    "ASYNC EVENT %#x, but no tgt (ha %p)\n", code, ha);
5691 		return;
5692 	}
5693 
5694 	if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) &&
5695 	    IS_QLA2100(ha))
5696 		return;
5697 	/*
5698 	 * In tgt_stop mode we also should allow all requests to pass.
5699 	 * Otherwise, some commands can stuck.
5700 	 */
5701 
5702 	tgt->irq_cmd_count++;
5703 
5704 	switch (code) {
5705 	case MBA_RESET:			/* Reset */
5706 	case MBA_SYSTEM_ERR:		/* System Error */
5707 	case MBA_REQ_TRANSFER_ERR:	/* Request Transfer Error */
5708 	case MBA_RSP_TRANSFER_ERR:	/* Response Transfer Error */
5709 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a,
5710 		    "qla_target(%d): System error async event %#x "
5711 		    "occurred", vha->vp_idx, code);
5712 		break;
5713 	case MBA_WAKEUP_THRES:		/* Request Queue Wake-up. */
5714 		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
5715 		break;
5716 
5717 	case MBA_LOOP_UP:
5718 	{
5719 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
5720 		    "qla_target(%d): Async LOOP_UP occurred "
5721 		    "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
5722 		    le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
5723 		    le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
5724 		if (tgt->link_reinit_iocb_pending) {
5725 			qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb,
5726 			    0, 0, 0, 0, 0, 0);
5727 			tgt->link_reinit_iocb_pending = 0;
5728 		}
5729 		break;
5730 	}
5731 
5732 	case MBA_LIP_OCCURRED:
5733 	case MBA_LOOP_DOWN:
5734 	case MBA_LIP_RESET:
5735 	case MBA_RSCN_UPDATE:
5736 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
5737 		    "qla_target(%d): Async event %#x occurred "
5738 		    "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
5739 		    le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
5740 		    le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
5741 		break;
5742 
5743 	case MBA_PORT_UPDATE:
5744 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
5745 		    "qla_target(%d): Port update async event %#x "
5746 		    "occurred: updating the ports database (m[0]=%x, m[1]=%x, "
5747 		    "m[2]=%x, m[3]=%x)", vha->vp_idx, code,
5748 		    le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
5749 		    le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
5750 
5751 		login_code = le16_to_cpu(mailbox[2]);
5752 		if (login_code == 0x4)
5753 			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
5754 			    "Async MB 2: Got PLOGI Complete\n");
5755 		else if (login_code == 0x7)
5756 			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
5757 			    "Async MB 2: Port Logged Out\n");
5758 		break;
5759 
5760 	default:
5761 		break;
5762 	}
5763 
5764 	tgt->irq_cmd_count--;
5765 }
5766 
5767 static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
5768 	uint16_t loop_id)
5769 {
5770 	fc_port_t *fcport;
5771 	int rc;
5772 
5773 	fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
5774 	if (!fcport) {
5775 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
5776 		    "qla_target(%d): Allocation of tmp FC port failed",
5777 		    vha->vp_idx);
5778 		return NULL;
5779 	}
5780 
5781 	fcport->loop_id = loop_id;
5782 
5783 	rc = qla2x00_get_port_database(vha, fcport, 0);
5784 	if (rc != QLA_SUCCESS) {
5785 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
5786 		    "qla_target(%d): Failed to retrieve fcport "
5787 		    "information -- get_port_database() returned %x "
5788 		    "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
5789 		kfree(fcport);
5790 		return NULL;
5791 	}
5792 
5793 	return fcport;
5794 }
5795 
5796 /* Must be called under tgt_mutex */
5797 static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha,
5798 	uint8_t *s_id)
5799 {
5800 	struct qla_tgt_sess *sess = NULL;
5801 	fc_port_t *fcport = NULL;
5802 	int rc, global_resets;
5803 	uint16_t loop_id = 0;
5804 
5805 	mutex_lock(&vha->vha_tgt.tgt_mutex);
5806 
5807 retry:
5808 	global_resets =
5809 	    atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
5810 
5811 	rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
5812 	if (rc != 0) {
5813 		mutex_unlock(&vha->vha_tgt.tgt_mutex);
5814 
5815 		if ((s_id[0] == 0xFF) &&
5816 		    (s_id[1] == 0xFC)) {
5817 			/*
5818 			 * This is Domain Controller, so it should be
5819 			 * OK to drop SCSI commands from it.
5820 			 */
5821 			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
5822 			    "Unable to find initiator with S_ID %x:%x:%x",
5823 			    s_id[0], s_id[1], s_id[2]);
5824 		} else
5825 			ql_log(ql_log_info, vha, 0xf071,
5826 			    "qla_target(%d): Unable to find "
5827 			    "initiator with S_ID %x:%x:%x",
5828 			    vha->vp_idx, s_id[0], s_id[1],
5829 			    s_id[2]);
5830 
5831 		if (rc == -ENOENT) {
5832 			qlt_port_logo_t logo;
5833 			sid_to_portid(s_id, &logo.id);
5834 			logo.cmd_count = 1;
5835 			qlt_send_first_logo(vha, &logo);
5836 		}
5837 
5838 		return NULL;
5839 	}
5840 
5841 	fcport = qlt_get_port_database(vha, loop_id);
5842 	if (!fcport) {
5843 		mutex_unlock(&vha->vha_tgt.tgt_mutex);
5844 		return NULL;
5845 	}
5846 
5847 	if (global_resets !=
5848 	    atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {
5849 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
5850 		    "qla_target(%d): global reset during session discovery "
5851 		    "(counter was %d, new %d), retrying", vha->vp_idx,
5852 		    global_resets,
5853 		    atomic_read(&vha->vha_tgt.
5854 			qla_tgt->tgt_global_resets_count));
5855 		goto retry;
5856 	}
5857 
5858 	sess = qlt_create_sess(vha, fcport, true);
5859 
5860 	mutex_unlock(&vha->vha_tgt.tgt_mutex);
5861 
5862 	kfree(fcport);
5863 	return sess;
5864 }
5865 
5866 static void qlt_abort_work(struct qla_tgt *tgt,
5867 	struct qla_tgt_sess_work_param *prm)
5868 {
5869 	struct scsi_qla_host *vha = tgt->vha;
5870 	struct qla_hw_data *ha = vha->hw;
5871 	struct qla_tgt_sess *sess = NULL;
5872 	unsigned long flags = 0, flags2 = 0;
5873 	uint32_t be_s_id;
5874 	uint8_t s_id[3];
5875 	int rc;
5876 
5877 	spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
5878 
5879 	if (tgt->tgt_stop)
5880 		goto out_term2;
5881 
5882 	s_id[0] = prm->abts.fcp_hdr_le.s_id[2];
5883 	s_id[1] = prm->abts.fcp_hdr_le.s_id[1];
5884 	s_id[2] = prm->abts.fcp_hdr_le.s_id[0];
5885 
5886 	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
5887 	    (unsigned char *)&be_s_id);
5888 	if (!sess) {
5889 		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
5890 
5891 		sess = qlt_make_local_sess(vha, s_id);
5892 		/* sess has got an extra creation ref */
5893 
5894 		spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
5895 		if (!sess)
5896 			goto out_term2;
5897 	} else {
5898 		if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
5899 			sess = NULL;
5900 			goto out_term2;
5901 		}
5902 
5903 		kref_get(&sess->se_sess->sess_kref);
5904 	}
5905 
5906 	spin_lock_irqsave(&ha->hardware_lock, flags);
5907 
5908 	if (tgt->tgt_stop)
5909 		goto out_term;
5910 
5911 	rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
5912 	if (rc != 0)
5913 		goto out_term;
5914 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
5915 
5916 	ha->tgt.tgt_ops->put_sess(sess);
5917 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
5918 	return;
5919 
5920 out_term2:
5921 	spin_lock_irqsave(&ha->hardware_lock, flags);
5922 
5923 out_term:
5924 	qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
5925 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
5926 
5927 	if (sess)
5928 		ha->tgt.tgt_ops->put_sess(sess);
5929 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
5930 }
5931 
5932 static void qlt_tmr_work(struct qla_tgt *tgt,
5933 	struct qla_tgt_sess_work_param *prm)
5934 {
5935 	struct atio_from_isp *a = &prm->tm_iocb2;
5936 	struct scsi_qla_host *vha = tgt->vha;
5937 	struct qla_hw_data *ha = vha->hw;
5938 	struct qla_tgt_sess *sess = NULL;
5939 	unsigned long flags;
5940 	uint8_t *s_id = NULL; /* to hide compiler warnings */
5941 	int rc;
5942 	uint32_t lun, unpacked_lun;
5943 	int fn;
5944 	void *iocb;
5945 
5946 	spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5947 
5948 	if (tgt->tgt_stop)
5949 		goto out_term;
5950 
5951 	s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
5952 	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
5953 	if (!sess) {
5954 		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5955 
5956 		sess = qlt_make_local_sess(vha, s_id);
5957 		/* sess has got an extra creation ref */
5958 
5959 		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5960 		if (!sess)
5961 			goto out_term;
5962 	} else {
5963 		if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
5964 			sess = NULL;
5965 			goto out_term;
5966 		}
5967 
5968 		kref_get(&sess->se_sess->sess_kref);
5969 	}
5970 
5971 	iocb = a;
5972 	lun = a->u.isp24.fcp_cmnd.lun;
5973 	fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
5974 	unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
5975 
5976 	rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
5977 	if (rc != 0)
5978 		goto out_term;
5979 
5980 	ha->tgt.tgt_ops->put_sess(sess);
5981 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5982 	return;
5983 
5984 out_term:
5985 	qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0);
5986 	if (sess)
5987 		ha->tgt.tgt_ops->put_sess(sess);
5988 	spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5989 }
5990 
5991 static void qlt_sess_work_fn(struct work_struct *work)
5992 {
5993 	struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
5994 	struct scsi_qla_host *vha = tgt->vha;
5995 	unsigned long flags;
5996 
5997 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);
5998 
5999 	spin_lock_irqsave(&tgt->sess_work_lock, flags);
6000 	while (!list_empty(&tgt->sess_works_list)) {
6001 		struct qla_tgt_sess_work_param *prm = list_entry(
6002 		    tgt->sess_works_list.next, typeof(*prm),
6003 		    sess_works_list_entry);
6004 
6005 		/*
6006 		 * This work can be scheduled on several CPUs at time, so we
6007 		 * must delete the entry to eliminate double processing
6008 		 */
6009 		list_del(&prm->sess_works_list_entry);
6010 
6011 		spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
6012 
6013 		switch (prm->type) {
6014 		case QLA_TGT_SESS_WORK_ABORT:
6015 			qlt_abort_work(tgt, prm);
6016 			break;
6017 		case QLA_TGT_SESS_WORK_TM:
6018 			qlt_tmr_work(tgt, prm);
6019 			break;
6020 		default:
6021 			BUG_ON(1);
6022 			break;
6023 		}
6024 
6025 		spin_lock_irqsave(&tgt->sess_work_lock, flags);
6026 
6027 		kfree(prm);
6028 	}
6029 	spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
6030 }
6031 
6032 /* Must be called under tgt_host_action_mutex */
6033 int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
6034 {
6035 	struct qla_tgt *tgt;
6036 
6037 	if (!QLA_TGT_MODE_ENABLED())
6038 		return 0;
6039 
6040 	if (!IS_TGT_MODE_CAPABLE(ha)) {
6041 		ql_log(ql_log_warn, base_vha, 0xe070,
6042 		    "This adapter does not support target mode.\n");
6043 		return 0;
6044 	}
6045 
6046 	ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
6047 	    "Registering target for host %ld(%p).\n", base_vha->host_no, ha);
6048 
6049 	BUG_ON(base_vha->vha_tgt.qla_tgt != NULL);
6050 
6051 	tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
6052 	if (!tgt) {
6053 		ql_dbg(ql_dbg_tgt, base_vha, 0xe066,
6054 		    "Unable to allocate struct qla_tgt\n");
6055 		return -ENOMEM;
6056 	}
6057 
6058 	if (!(base_vha->host->hostt->supported_mode & MODE_TARGET))
6059 		base_vha->host->hostt->supported_mode |= MODE_TARGET;
6060 
6061 	tgt->ha = ha;
6062 	tgt->vha = base_vha;
6063 	init_waitqueue_head(&tgt->waitQ);
6064 	INIT_LIST_HEAD(&tgt->sess_list);
6065 	INIT_LIST_HEAD(&tgt->del_sess_list);
6066 	INIT_DELAYED_WORK(&tgt->sess_del_work,
6067 		(void (*)(struct work_struct *))qlt_del_sess_work_fn);
6068 	spin_lock_init(&tgt->sess_work_lock);
6069 	INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
6070 	INIT_LIST_HEAD(&tgt->sess_works_list);
6071 	spin_lock_init(&tgt->srr_lock);
6072 	INIT_LIST_HEAD(&tgt->srr_ctio_list);
6073 	INIT_LIST_HEAD(&tgt->srr_imm_list);
6074 	INIT_WORK(&tgt->srr_work, qlt_handle_srr_work);
6075 	atomic_set(&tgt->tgt_global_resets_count, 0);
6076 
6077 	base_vha->vha_tgt.qla_tgt = tgt;
6078 
6079 	ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
6080 		"qla_target(%d): using 64 Bit PCI addressing",
6081 		base_vha->vp_idx);
6082 	tgt->tgt_enable_64bit_addr = 1;
6083 	/* 3 is reserved */
6084 	tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3);
6085 	tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX;
6086 	tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX;
6087 
6088 	if (base_vha->fc_vport)
6089 		return 0;
6090 
6091 	mutex_lock(&qla_tgt_mutex);
6092 	list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
6093 	mutex_unlock(&qla_tgt_mutex);
6094 
6095 	return 0;
6096 }
6097 
6098 /* Must be called under tgt_host_action_mutex */
6099 int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
6100 {
6101 	if (!vha->vha_tgt.qla_tgt)
6102 		return 0;
6103 
6104 	if (vha->fc_vport) {
6105 		qlt_release(vha->vha_tgt.qla_tgt);
6106 		return 0;
6107 	}
6108 
6109 	/* free left over qfull cmds */
6110 	qlt_init_term_exchange(vha);
6111 
6112 	mutex_lock(&qla_tgt_mutex);
6113 	list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
6114 	mutex_unlock(&qla_tgt_mutex);
6115 
6116 	ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
6117 	    vha->host_no, ha);
6118 	qlt_release(vha->vha_tgt.qla_tgt);
6119 
6120 	return 0;
6121 }
6122 
6123 static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
6124 	unsigned char *b)
6125 {
6126 	int i;
6127 
6128 	pr_debug("qla2xxx HW vha->node_name: ");
6129 	for (i = 0; i < WWN_SIZE; i++)
6130 		pr_debug("%02x ", vha->node_name[i]);
6131 	pr_debug("\n");
6132 	pr_debug("qla2xxx HW vha->port_name: ");
6133 	for (i = 0; i < WWN_SIZE; i++)
6134 		pr_debug("%02x ", vha->port_name[i]);
6135 	pr_debug("\n");
6136 
6137 	pr_debug("qla2xxx passed configfs WWPN: ");
6138 	put_unaligned_be64(wwpn, b);
6139 	for (i = 0; i < WWN_SIZE; i++)
6140 		pr_debug("%02x ", b[i]);
6141 	pr_debug("\n");
6142 }
6143 
6144 /**
6145  * qla_tgt_lport_register - register lport with external module
6146  *
6147  * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops
6148  * @wwpn: Passwd FC target WWPN
6149  * @callback:  lport initialization callback for tcm_qla2xxx code
6150  * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
6151  */
6152 int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
6153 		       u64 npiv_wwpn, u64 npiv_wwnn,
6154 		       int (*callback)(struct scsi_qla_host *, void *, u64, u64))
6155 {
6156 	struct qla_tgt *tgt;
6157 	struct scsi_qla_host *vha;
6158 	struct qla_hw_data *ha;
6159 	struct Scsi_Host *host;
6160 	unsigned long flags;
6161 	int rc;
6162 	u8 b[WWN_SIZE];
6163 
6164 	mutex_lock(&qla_tgt_mutex);
6165 	list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) {
6166 		vha = tgt->vha;
6167 		ha = vha->hw;
6168 
6169 		host = vha->host;
6170 		if (!host)
6171 			continue;
6172 
6173 		if (!(host->hostt->supported_mode & MODE_TARGET))
6174 			continue;
6175 
6176 		spin_lock_irqsave(&ha->hardware_lock, flags);
6177 		if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) {
6178 			pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
6179 			    host->host_no);
6180 			spin_unlock_irqrestore(&ha->hardware_lock, flags);
6181 			continue;
6182 		}
6183 		if (tgt->tgt_stop) {
6184 			pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n",
6185 				 host->host_no);
6186 			spin_unlock_irqrestore(&ha->hardware_lock, flags);
6187 			continue;
6188 		}
6189 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
6190 
6191 		if (!scsi_host_get(host)) {
6192 			ql_dbg(ql_dbg_tgt, vha, 0xe068,
6193 			    "Unable to scsi_host_get() for"
6194 			    " qla2xxx scsi_host\n");
6195 			continue;
6196 		}
6197 		qlt_lport_dump(vha, phys_wwpn, b);
6198 
6199 		if (memcmp(vha->port_name, b, WWN_SIZE)) {
6200 			scsi_host_put(host);
6201 			continue;
6202 		}
6203 		rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn);
6204 		if (rc != 0)
6205 			scsi_host_put(host);
6206 
6207 		mutex_unlock(&qla_tgt_mutex);
6208 		return rc;
6209 	}
6210 	mutex_unlock(&qla_tgt_mutex);
6211 
6212 	return -ENODEV;
6213 }
6214 EXPORT_SYMBOL(qlt_lport_register);
6215 
6216 /**
6217  * qla_tgt_lport_deregister - Degister lport
6218  *
6219  * @vha:  Registered scsi_qla_host pointer
6220  */
6221 void qlt_lport_deregister(struct scsi_qla_host *vha)
6222 {
6223 	struct qla_hw_data *ha = vha->hw;
6224 	struct Scsi_Host *sh = vha->host;
6225 	/*
6226 	 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
6227 	 */
6228 	vha->vha_tgt.target_lport_ptr = NULL;
6229 	ha->tgt.tgt_ops = NULL;
6230 	/*
6231 	 * Release the Scsi_Host reference for the underlying qla2xxx host
6232 	 */
6233 	scsi_host_put(sh);
6234 }
6235 EXPORT_SYMBOL(qlt_lport_deregister);
6236 
6237 /* Must be called under HW lock */
6238 static void qlt_set_mode(struct scsi_qla_host *vha)
6239 {
6240 	struct qla_hw_data *ha = vha->hw;
6241 
6242 	switch (ql2x_ini_mode) {
6243 	case QLA2XXX_INI_MODE_DISABLED:
6244 	case QLA2XXX_INI_MODE_EXCLUSIVE:
6245 		vha->host->active_mode = MODE_TARGET;
6246 		break;
6247 	case QLA2XXX_INI_MODE_ENABLED:
6248 		vha->host->active_mode |= MODE_TARGET;
6249 		break;
6250 	default:
6251 		break;
6252 	}
6253 
6254 	if (ha->tgt.ini_mode_force_reverse)
6255 		qla_reverse_ini_mode(vha);
6256 }
6257 
6258 /* Must be called under HW lock */
6259 static void qlt_clear_mode(struct scsi_qla_host *vha)
6260 {
6261 	struct qla_hw_data *ha = vha->hw;
6262 
6263 	switch (ql2x_ini_mode) {
6264 	case QLA2XXX_INI_MODE_DISABLED:
6265 		vha->host->active_mode = MODE_UNKNOWN;
6266 		break;
6267 	case QLA2XXX_INI_MODE_EXCLUSIVE:
6268 		vha->host->active_mode = MODE_INITIATOR;
6269 		break;
6270 	case QLA2XXX_INI_MODE_ENABLED:
6271 		vha->host->active_mode &= ~MODE_TARGET;
6272 		break;
6273 	default:
6274 		break;
6275 	}
6276 
6277 	if (ha->tgt.ini_mode_force_reverse)
6278 		qla_reverse_ini_mode(vha);
6279 }
6280 
6281 /*
6282  * qla_tgt_enable_vha - NO LOCK HELD
6283  *
6284  * host_reset, bring up w/ Target Mode Enabled
6285  */
6286 void
6287 qlt_enable_vha(struct scsi_qla_host *vha)
6288 {
6289 	struct qla_hw_data *ha = vha->hw;
6290 	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6291 	unsigned long flags;
6292 	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
6293 	int rspq_ent = QLA83XX_RSPQ_MSIX_ENTRY_NUMBER;
6294 
6295 	if (!tgt) {
6296 		ql_dbg(ql_dbg_tgt, vha, 0xe069,
6297 		    "Unable to locate qla_tgt pointer from"
6298 		    " struct qla_hw_data\n");
6299 		dump_stack();
6300 		return;
6301 	}
6302 
6303 	spin_lock_irqsave(&ha->hardware_lock, flags);
6304 	tgt->tgt_stopped = 0;
6305 	qlt_set_mode(vha);
6306 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
6307 
6308 	if (vha->vp_idx) {
6309 		qla24xx_disable_vp(vha);
6310 		qla24xx_enable_vp(vha);
6311 	} else {
6312 		if (ha->msix_entries) {
6313 			ql_dbg(ql_dbg_tgt, vha, 0xffff,
6314 			    "%s: host%ld : vector %d cpu %d\n",
6315 			    __func__, vha->host_no,
6316 			    ha->msix_entries[rspq_ent].vector,
6317 			    ha->msix_entries[rspq_ent].cpuid);
6318 
6319 			ha->tgt.rspq_vector_cpuid =
6320 			    ha->msix_entries[rspq_ent].cpuid;
6321 		}
6322 
6323 		set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
6324 		qla2xxx_wake_dpc(base_vha);
6325 		qla2x00_wait_for_hba_online(base_vha);
6326 	}
6327 }
6328 EXPORT_SYMBOL(qlt_enable_vha);
6329 
6330 /*
6331  * qla_tgt_disable_vha - NO LOCK HELD
6332  *
6333  * Disable Target Mode and reset the adapter
6334  */
6335 static void qlt_disable_vha(struct scsi_qla_host *vha)
6336 {
6337 	struct qla_hw_data *ha = vha->hw;
6338 	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6339 	unsigned long flags;
6340 
6341 	if (!tgt) {
6342 		ql_dbg(ql_dbg_tgt, vha, 0xe06a,
6343 		    "Unable to locate qla_tgt pointer from"
6344 		    " struct qla_hw_data\n");
6345 		dump_stack();
6346 		return;
6347 	}
6348 
6349 	spin_lock_irqsave(&ha->hardware_lock, flags);
6350 	qlt_clear_mode(vha);
6351 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
6352 
6353 	set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
6354 	qla2xxx_wake_dpc(vha);
6355 	qla2x00_wait_for_hba_online(vha);
6356 }
6357 
6358 /*
6359  * Called from qla_init.c:qla24xx_vport_create() contex to setup
6360  * the target mode specific struct scsi_qla_host and struct qla_hw_data
6361  * members.
6362  */
6363 void
6364 qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
6365 {
6366 	if (!qla_tgt_mode_enabled(vha))
6367 		return;
6368 
6369 	vha->vha_tgt.qla_tgt = NULL;
6370 
6371 	mutex_init(&vha->vha_tgt.tgt_mutex);
6372 	mutex_init(&vha->vha_tgt.tgt_host_action_mutex);
6373 
6374 	qlt_clear_mode(vha);
6375 
6376 	/*
6377 	 * NOTE: Currently the value is kept the same for <24xx and
6378 	 * >=24xx ISPs. If it is necessary to change it,
6379 	 * the check should be added for specific ISPs,
6380 	 * assigning the value appropriately.
6381 	 */
6382 	ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
6383 
6384 	qlt_add_target(ha, vha);
6385 }
6386 
6387 void
6388 qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req)
6389 {
6390 	/*
6391 	 * FC-4 Feature bit 0 indicates target functionality to the name server.
6392 	 */
6393 	if (qla_tgt_mode_enabled(vha)) {
6394 		if (qla_ini_mode_enabled(vha))
6395 			ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1;
6396 		else
6397 			ct_req->req.rff_id.fc4_feature = BIT_0;
6398 	} else if (qla_ini_mode_enabled(vha)) {
6399 		ct_req->req.rff_id.fc4_feature = BIT_1;
6400 	}
6401 }
6402 
6403 /*
6404  * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
6405  * @ha: HA context
6406  *
6407  * Beginning of ATIO ring has initialization control block already built
6408  * by nvram config routine.
6409  *
6410  * Returns 0 on success.
6411  */
6412 void
6413 qlt_init_atio_q_entries(struct scsi_qla_host *vha)
6414 {
6415 	struct qla_hw_data *ha = vha->hw;
6416 	uint16_t cnt;
6417 	struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;
6418 
6419 	if (!qla_tgt_mode_enabled(vha))
6420 		return;
6421 
6422 	for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
6423 		pkt->u.raw.signature = ATIO_PROCESSED;
6424 		pkt++;
6425 	}
6426 
6427 }
6428 
6429 /*
6430  * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
6431  * @ha: SCSI driver HA context
6432  */
6433 void
6434 qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
6435 {
6436 	struct qla_hw_data *ha = vha->hw;
6437 	struct atio_from_isp *pkt;
6438 	int cnt, i;
6439 
6440 	if (!vha->flags.online)
6441 		return;
6442 
6443 	while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) {
6444 		pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
6445 		cnt = pkt->u.raw.entry_count;
6446 
6447 		qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt,
6448 		    ha_locked);
6449 
6450 		for (i = 0; i < cnt; i++) {
6451 			ha->tgt.atio_ring_index++;
6452 			if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
6453 				ha->tgt.atio_ring_index = 0;
6454 				ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
6455 			} else
6456 				ha->tgt.atio_ring_ptr++;
6457 
6458 			pkt->u.raw.signature = ATIO_PROCESSED;
6459 			pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
6460 		}
6461 		wmb();
6462 	}
6463 
6464 	/* Adjust ring index */
6465 	WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
6466 	RD_REG_DWORD_RELAXED(ISP_ATIO_Q_OUT(vha));
6467 }
6468 
6469 void
6470 qlt_24xx_config_rings(struct scsi_qla_host *vha)
6471 {
6472 	struct qla_hw_data *ha = vha->hw;
6473 	if (!QLA_TGT_MODE_ENABLED())
6474 		return;
6475 
6476 	WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0);
6477 	WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0);
6478 	RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));
6479 
6480 	if (IS_ATIO_MSIX_CAPABLE(ha)) {
6481 		struct qla_msix_entry *msix = &ha->msix_entries[2];
6482 		struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;
6483 
6484 		icb->msix_atio = cpu_to_le16(msix->entry);
6485 		ql_dbg(ql_dbg_init, vha, 0xf072,
6486 		    "Registering ICB vector 0x%x for atio que.\n",
6487 		    msix->entry);
6488 	}
6489 }
6490 
6491 void
6492 qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
6493 {
6494 	struct qla_hw_data *ha = vha->hw;
6495 
6496 	if (qla_tgt_mode_enabled(vha)) {
6497 		if (!ha->tgt.saved_set) {
6498 			/* We save only once */
6499 			ha->tgt.saved_exchange_count = nv->exchange_count;
6500 			ha->tgt.saved_firmware_options_1 =
6501 			    nv->firmware_options_1;
6502 			ha->tgt.saved_firmware_options_2 =
6503 			    nv->firmware_options_2;
6504 			ha->tgt.saved_firmware_options_3 =
6505 			    nv->firmware_options_3;
6506 			ha->tgt.saved_set = 1;
6507 		}
6508 
6509 		nv->exchange_count = cpu_to_le16(0xFFFF);
6510 
6511 		/* Enable target mode */
6512 		nv->firmware_options_1 |= cpu_to_le32(BIT_4);
6513 
6514 		/* Disable ini mode, if requested */
6515 		if (!qla_ini_mode_enabled(vha))
6516 			nv->firmware_options_1 |= cpu_to_le32(BIT_5);
6517 
6518 		/* Disable Full Login after LIP */
6519 		nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
6520 		/* Enable initial LIP */
6521 		nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
6522 		if (ql2xtgt_tape_enable)
6523 			/* Enable FC Tape support */
6524 			nv->firmware_options_2 |= cpu_to_le32(BIT_12);
6525 		else
6526 			/* Disable FC Tape support */
6527 			nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
6528 
6529 		/* Disable Full Login after LIP */
6530 		nv->host_p &= cpu_to_le32(~BIT_10);
6531 		/* Enable target PRLI control */
6532 		nv->firmware_options_2 |= cpu_to_le32(BIT_14);
6533 	} else {
6534 		if (ha->tgt.saved_set) {
6535 			nv->exchange_count = ha->tgt.saved_exchange_count;
6536 			nv->firmware_options_1 =
6537 			    ha->tgt.saved_firmware_options_1;
6538 			nv->firmware_options_2 =
6539 			    ha->tgt.saved_firmware_options_2;
6540 			nv->firmware_options_3 =
6541 			    ha->tgt.saved_firmware_options_3;
6542 		}
6543 		return;
6544 	}
6545 
6546 	/* out-of-order frames reassembly */
6547 	nv->firmware_options_3 |= BIT_6|BIT_9;
6548 
6549 	if (ha->tgt.enable_class_2) {
6550 		if (vha->flags.init_done)
6551 			fc_host_supported_classes(vha->host) =
6552 				FC_COS_CLASS2 | FC_COS_CLASS3;
6553 
6554 		nv->firmware_options_2 |= cpu_to_le32(BIT_8);
6555 	} else {
6556 		if (vha->flags.init_done)
6557 			fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
6558 
6559 		nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
6560 	}
6561 }
6562 
6563 void
6564 qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
6565 	struct init_cb_24xx *icb)
6566 {
6567 	struct qla_hw_data *ha = vha->hw;
6568 
6569 	if (!QLA_TGT_MODE_ENABLED())
6570 		return;
6571 
6572 	if (ha->tgt.node_name_set) {
6573 		memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
6574 		icb->firmware_options_1 |= cpu_to_le32(BIT_14);
6575 	}
6576 
6577 	/* disable ZIO at start time. */
6578 	if (!vha->flags.init_done) {
6579 		uint32_t tmp;
6580 		tmp = le32_to_cpu(icb->firmware_options_2);
6581 		tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
6582 		icb->firmware_options_2 = cpu_to_le32(tmp);
6583 	}
6584 }
6585 
6586 void
6587 qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
6588 {
6589 	struct qla_hw_data *ha = vha->hw;
6590 
6591 	if (!QLA_TGT_MODE_ENABLED())
6592 		return;
6593 
6594 	if (qla_tgt_mode_enabled(vha)) {
6595 		if (!ha->tgt.saved_set) {
6596 			/* We save only once */
6597 			ha->tgt.saved_exchange_count = nv->exchange_count;
6598 			ha->tgt.saved_firmware_options_1 =
6599 			    nv->firmware_options_1;
6600 			ha->tgt.saved_firmware_options_2 =
6601 			    nv->firmware_options_2;
6602 			ha->tgt.saved_firmware_options_3 =
6603 			    nv->firmware_options_3;
6604 			ha->tgt.saved_set = 1;
6605 		}
6606 
6607 		nv->exchange_count = cpu_to_le16(0xFFFF);
6608 
6609 		/* Enable target mode */
6610 		nv->firmware_options_1 |= cpu_to_le32(BIT_4);
6611 
6612 		/* Disable ini mode, if requested */
6613 		if (!qla_ini_mode_enabled(vha))
6614 			nv->firmware_options_1 |= cpu_to_le32(BIT_5);
6615 
6616 		/* Disable Full Login after LIP */
6617 		nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
6618 		/* Enable initial LIP */
6619 		nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
6620 		if (ql2xtgt_tape_enable)
6621 			/* Enable FC tape support */
6622 			nv->firmware_options_2 |= cpu_to_le32(BIT_12);
6623 		else
6624 			/* Disable FC tape support */
6625 			nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
6626 
6627 		/* Disable Full Login after LIP */
6628 		nv->host_p &= cpu_to_le32(~BIT_10);
6629 		/* Enable target PRLI control */
6630 		nv->firmware_options_2 |= cpu_to_le32(BIT_14);
6631 	} else {
6632 		if (ha->tgt.saved_set) {
6633 			nv->exchange_count = ha->tgt.saved_exchange_count;
6634 			nv->firmware_options_1 =
6635 			    ha->tgt.saved_firmware_options_1;
6636 			nv->firmware_options_2 =
6637 			    ha->tgt.saved_firmware_options_2;
6638 			nv->firmware_options_3 =
6639 			    ha->tgt.saved_firmware_options_3;
6640 		}
6641 		return;
6642 	}
6643 
6644 	/* out-of-order frames reassembly */
6645 	nv->firmware_options_3 |= BIT_6|BIT_9;
6646 
6647 	if (ha->tgt.enable_class_2) {
6648 		if (vha->flags.init_done)
6649 			fc_host_supported_classes(vha->host) =
6650 				FC_COS_CLASS2 | FC_COS_CLASS3;
6651 
6652 		nv->firmware_options_2 |= cpu_to_le32(BIT_8);
6653 	} else {
6654 		if (vha->flags.init_done)
6655 			fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
6656 
6657 		nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
6658 	}
6659 }
6660 
6661 void
6662 qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
6663 	struct init_cb_81xx *icb)
6664 {
6665 	struct qla_hw_data *ha = vha->hw;
6666 
6667 	if (!QLA_TGT_MODE_ENABLED())
6668 		return;
6669 
6670 	if (ha->tgt.node_name_set) {
6671 		memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
6672 		icb->firmware_options_1 |= cpu_to_le32(BIT_14);
6673 	}
6674 
6675 	/* disable ZIO at start time. */
6676 	if (!vha->flags.init_done) {
6677 		uint32_t tmp;
6678 		tmp = le32_to_cpu(icb->firmware_options_2);
6679 		tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
6680 		icb->firmware_options_2 = cpu_to_le32(tmp);
6681 	}
6682 
6683 }
6684 
6685 void
6686 qlt_83xx_iospace_config(struct qla_hw_data *ha)
6687 {
6688 	if (!QLA_TGT_MODE_ENABLED())
6689 		return;
6690 
6691 	ha->msix_count += 1; /* For ATIO Q */
6692 }
6693 
6694 int
6695 qlt_24xx_process_response_error(struct scsi_qla_host *vha,
6696 	struct sts_entry_24xx *pkt)
6697 {
6698 	switch (pkt->entry_type) {
6699 	case ABTS_RECV_24XX:
6700 	case ABTS_RESP_24XX:
6701 	case CTIO_TYPE7:
6702 	case NOTIFY_ACK_TYPE:
6703 	case CTIO_CRC2:
6704 		return 1;
6705 	default:
6706 		return 0;
6707 	}
6708 }
6709 
6710 void
6711 qlt_modify_vp_config(struct scsi_qla_host *vha,
6712 	struct vp_config_entry_24xx *vpmod)
6713 {
6714 	if (qla_tgt_mode_enabled(vha))
6715 		vpmod->options_idx1 &= ~BIT_5;
6716 	/* Disable ini mode, if requested */
6717 	if (!qla_ini_mode_enabled(vha))
6718 		vpmod->options_idx1 &= ~BIT_4;
6719 }
6720 
6721 void
6722 qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
6723 {
6724 	if (!QLA_TGT_MODE_ENABLED())
6725 		return;
6726 
6727 	if  (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
6728 		ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
6729 		ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
6730 	} else {
6731 		ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in;
6732 		ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
6733 	}
6734 
6735 	mutex_init(&base_vha->vha_tgt.tgt_mutex);
6736 	mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex);
6737 	qlt_clear_mode(base_vha);
6738 }
6739 
6740 irqreturn_t
6741 qla83xx_msix_atio_q(int irq, void *dev_id)
6742 {
6743 	struct rsp_que *rsp;
6744 	scsi_qla_host_t	*vha;
6745 	struct qla_hw_data *ha;
6746 	unsigned long flags;
6747 
6748 	rsp = (struct rsp_que *) dev_id;
6749 	ha = rsp->hw;
6750 	vha = pci_get_drvdata(ha->pdev);
6751 
6752 	spin_lock_irqsave(&ha->tgt.atio_lock, flags);
6753 
6754 	qlt_24xx_process_atio_queue(vha, 0);
6755 
6756 	spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
6757 
6758 	return IRQ_HANDLED;
6759 }
6760 
6761 static void
6762 qlt_handle_abts_recv_work(struct work_struct *work)
6763 {
6764 	struct qla_tgt_sess_op *op = container_of(work,
6765 		struct qla_tgt_sess_op, work);
6766 	scsi_qla_host_t *vha = op->vha;
6767 	struct qla_hw_data *ha = vha->hw;
6768 	unsigned long flags;
6769 
6770 	if (qla2x00_reset_active(vha) || (op->chip_reset != ha->chip_reset))
6771 		return;
6772 
6773 	spin_lock_irqsave(&ha->tgt.atio_lock, flags);
6774 	qlt_24xx_process_atio_queue(vha, 0);
6775 	spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
6776 
6777 	spin_lock_irqsave(&ha->hardware_lock, flags);
6778 	qlt_response_pkt_all_vps(vha, (response_t *)&op->atio);
6779 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
6780 }
6781 
6782 void
6783 qlt_handle_abts_recv(struct scsi_qla_host *vha, response_t *pkt)
6784 {
6785 	struct qla_tgt_sess_op *op;
6786 
6787 	op = kzalloc(sizeof(*op), GFP_ATOMIC);
6788 
6789 	if (!op) {
6790 		/* do not reach for ATIO queue here.  This is best effort err
6791 		 * recovery at this point.
6792 		 */
6793 		qlt_response_pkt_all_vps(vha, pkt);
6794 		return;
6795 	}
6796 
6797 	memcpy(&op->atio, pkt, sizeof(*pkt));
6798 	op->vha = vha;
6799 	op->chip_reset = vha->hw->chip_reset;
6800 	INIT_WORK(&op->work, qlt_handle_abts_recv_work);
6801 	queue_work(qla_tgt_wq, &op->work);
6802 	return;
6803 }
6804 
6805 int
6806 qlt_mem_alloc(struct qla_hw_data *ha)
6807 {
6808 	if (!QLA_TGT_MODE_ENABLED())
6809 		return 0;
6810 
6811 	ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) *
6812 	    MAX_MULTI_ID_FABRIC, GFP_KERNEL);
6813 	if (!ha->tgt.tgt_vp_map)
6814 		return -ENOMEM;
6815 
6816 	ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev,
6817 	    (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp),
6818 	    &ha->tgt.atio_dma, GFP_KERNEL);
6819 	if (!ha->tgt.atio_ring) {
6820 		kfree(ha->tgt.tgt_vp_map);
6821 		return -ENOMEM;
6822 	}
6823 	return 0;
6824 }
6825 
6826 void
6827 qlt_mem_free(struct qla_hw_data *ha)
6828 {
6829 	if (!QLA_TGT_MODE_ENABLED())
6830 		return;
6831 
6832 	if (ha->tgt.atio_ring) {
6833 		dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) *
6834 		    sizeof(struct atio_from_isp), ha->tgt.atio_ring,
6835 		    ha->tgt.atio_dma);
6836 	}
6837 	kfree(ha->tgt.tgt_vp_map);
6838 }
6839 
6840 /* vport_slock to be held by the caller */
6841 void
6842 qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
6843 {
6844 	if (!QLA_TGT_MODE_ENABLED())
6845 		return;
6846 
6847 	switch (cmd) {
6848 	case SET_VP_IDX:
6849 		vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
6850 		break;
6851 	case SET_AL_PA:
6852 		vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx;
6853 		break;
6854 	case RESET_VP_IDX:
6855 		vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
6856 		break;
6857 	case RESET_AL_PA:
6858 		vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0;
6859 		break;
6860 	}
6861 }
6862 
6863 static int __init qlt_parse_ini_mode(void)
6864 {
6865 	if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
6866 		ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
6867 	else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0)
6868 		ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED;
6869 	else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0)
6870 		ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED;
6871 	else
6872 		return false;
6873 
6874 	return true;
6875 }
6876 
6877 int __init qlt_init(void)
6878 {
6879 	int ret;
6880 
6881 	if (!qlt_parse_ini_mode()) {
6882 		ql_log(ql_log_fatal, NULL, 0xe06b,
6883 		    "qlt_parse_ini_mode() failed\n");
6884 		return -EINVAL;
6885 	}
6886 
6887 	if (!QLA_TGT_MODE_ENABLED())
6888 		return 0;
6889 
6890 	qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
6891 	    sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
6892 	    qla_tgt_mgmt_cmd), 0, NULL);
6893 	if (!qla_tgt_mgmt_cmd_cachep) {
6894 		ql_log(ql_log_fatal, NULL, 0xe06d,
6895 		    "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
6896 		return -ENOMEM;
6897 	}
6898 
6899 	qla_tgt_plogi_cachep = kmem_cache_create("qla_tgt_plogi_cachep",
6900 						 sizeof(qlt_plogi_ack_t),
6901 						 __alignof__(qlt_plogi_ack_t),
6902 						 0, NULL);
6903 
6904 	if (!qla_tgt_plogi_cachep) {
6905 		ql_log(ql_log_fatal, NULL, 0xe06d,
6906 		    "kmem_cache_create for qla_tgt_plogi_cachep failed\n");
6907 		ret = -ENOMEM;
6908 		goto out_mgmt_cmd_cachep;
6909 	}
6910 
6911 	qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
6912 	    mempool_free_slab, qla_tgt_mgmt_cmd_cachep);
6913 	if (!qla_tgt_mgmt_cmd_mempool) {
6914 		ql_log(ql_log_fatal, NULL, 0xe06e,
6915 		    "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
6916 		ret = -ENOMEM;
6917 		goto out_plogi_cachep;
6918 	}
6919 
6920 	qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0);
6921 	if (!qla_tgt_wq) {
6922 		ql_log(ql_log_fatal, NULL, 0xe06f,
6923 		    "alloc_workqueue for qla_tgt_wq failed\n");
6924 		ret = -ENOMEM;
6925 		goto out_cmd_mempool;
6926 	}
6927 	/*
6928 	 * Return 1 to signal that initiator-mode is being disabled
6929 	 */
6930 	return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0;
6931 
6932 out_cmd_mempool:
6933 	mempool_destroy(qla_tgt_mgmt_cmd_mempool);
6934 out_plogi_cachep:
6935 	kmem_cache_destroy(qla_tgt_plogi_cachep);
6936 out_mgmt_cmd_cachep:
6937 	kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
6938 	return ret;
6939 }
6940 
6941 void qlt_exit(void)
6942 {
6943 	if (!QLA_TGT_MODE_ENABLED())
6944 		return;
6945 
6946 	destroy_workqueue(qla_tgt_wq);
6947 	mempool_destroy(qla_tgt_mgmt_cmd_mempool);
6948 	kmem_cache_destroy(qla_tgt_plogi_cachep);
6949 	kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
6950 }
6951