xref: /openbmc/linux/drivers/scsi/snic/snic_scsi.c (revision c6acb1e7)
1 /*
2  * Copyright 2014 Cisco Systems, Inc.  All rights reserved.
3  *
4  * This program is free software; you may redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; version 2 of the License.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15  * SOFTWARE.
16  */
17 
18 #include <linux/mempool.h>
19 #include <linux/errno.h>
20 #include <linux/init.h>
21 #include <linux/workqueue.h>
22 #include <linux/pci.h>
23 #include <linux/spinlock.h>
24 #include <linux/delay.h>
25 #include <linux/gfp.h>
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_host.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_cmnd.h>
30 #include <scsi/scsi_tcq.h>
31 #include <scsi/scsi_dbg.h>
32 
33 #include "snic_io.h"
34 #include "snic.h"
35 
36 #define snic_cmd_tag(sc)	(scsi_cmd_to_rq(sc)->tag)
37 
38 const char *snic_state_str[] = {
39 	[SNIC_INIT]	= "SNIC_INIT",
40 	[SNIC_ERROR]	= "SNIC_ERROR",
41 	[SNIC_ONLINE]	= "SNIC_ONLINE",
42 	[SNIC_OFFLINE]	= "SNIC_OFFLINE",
43 	[SNIC_FWRESET]	= "SNIC_FWRESET",
44 };
45 
46 static const char * const snic_req_state_str[] = {
47 	[SNIC_IOREQ_NOT_INITED]	= "SNIC_IOREQ_NOT_INITED",
48 	[SNIC_IOREQ_PENDING]	= "SNIC_IOREQ_PENDING",
49 	[SNIC_IOREQ_ABTS_PENDING] = "SNIC_IOREQ_ABTS_PENDING",
50 	[SNIC_IOREQ_ABTS_COMPLETE] = "SNIC_IOREQ_ABTS_COMPLETE",
51 	[SNIC_IOREQ_LR_PENDING]	= "SNIC_IOREQ_LR_PENDING",
52 	[SNIC_IOREQ_LR_COMPLETE] = "SNIC_IOREQ_LR_COMPLETE",
53 	[SNIC_IOREQ_COMPLETE]	= "SNIC_IOREQ_CMD_COMPLETE",
54 };
55 
56 /* snic cmd status strings */
57 static const char * const snic_io_status_str[] = {
58 	[SNIC_STAT_IO_SUCCESS]	= "SNIC_STAT_IO_SUCCESS", /* 0x0 */
59 	[SNIC_STAT_INVALID_HDR] = "SNIC_STAT_INVALID_HDR",
60 	[SNIC_STAT_OUT_OF_RES]	= "SNIC_STAT_OUT_OF_RES",
61 	[SNIC_STAT_INVALID_PARM] = "SNIC_STAT_INVALID_PARM",
62 	[SNIC_STAT_REQ_NOT_SUP]	= "SNIC_STAT_REQ_NOT_SUP",
63 	[SNIC_STAT_IO_NOT_FOUND] = "SNIC_STAT_IO_NOT_FOUND",
64 	[SNIC_STAT_ABORTED]	= "SNIC_STAT_ABORTED",
65 	[SNIC_STAT_TIMEOUT]	= "SNIC_STAT_TIMEOUT",
66 	[SNIC_STAT_SGL_INVALID] = "SNIC_STAT_SGL_INVALID",
67 	[SNIC_STAT_DATA_CNT_MISMATCH] = "SNIC_STAT_DATA_CNT_MISMATCH",
68 	[SNIC_STAT_FW_ERR]	= "SNIC_STAT_FW_ERR",
69 	[SNIC_STAT_ITMF_REJECT] = "SNIC_STAT_ITMF_REJECT",
70 	[SNIC_STAT_ITMF_FAIL]	= "SNIC_STAT_ITMF_FAIL",
71 	[SNIC_STAT_ITMF_INCORRECT_LUN] = "SNIC_STAT_ITMF_INCORRECT_LUN",
72 	[SNIC_STAT_CMND_REJECT] = "SNIC_STAT_CMND_REJECT",
73 	[SNIC_STAT_DEV_OFFLINE] = "SNIC_STAT_DEV_OFFLINE",
74 	[SNIC_STAT_NO_BOOTLUN]	= "SNIC_STAT_NO_BOOTLUN",
75 	[SNIC_STAT_SCSI_ERR]	= "SNIC_STAT_SCSI_ERR",
76 	[SNIC_STAT_NOT_READY]	= "SNIC_STAT_NOT_READY",
77 	[SNIC_STAT_FATAL_ERROR]	= "SNIC_STAT_FATAL_ERROR",
78 };
79 
80 static void snic_scsi_cleanup(struct snic *, int);
81 
82 const char *
83 snic_state_to_str(unsigned int state)
84 {
85 	if (state >= ARRAY_SIZE(snic_state_str) || !snic_state_str[state])
86 		return "Unknown";
87 
88 	return snic_state_str[state];
89 }
90 
91 static const char *
92 snic_io_status_to_str(unsigned int state)
93 {
94 	if ((state >= ARRAY_SIZE(snic_io_status_str)) ||
95 	     (!snic_io_status_str[state]))
96 		return "Unknown";
97 
98 	return snic_io_status_str[state];
99 }
100 
101 static const char *
102 snic_ioreq_state_to_str(unsigned int state)
103 {
104 	if (state >= ARRAY_SIZE(snic_req_state_str) ||
105 			!snic_req_state_str[state])
106 		return "Unknown";
107 
108 	return snic_req_state_str[state];
109 }
110 
111 static inline spinlock_t *
112 snic_io_lock_hash(struct snic *snic, struct scsi_cmnd *sc)
113 {
114 	u32 hash = snic_cmd_tag(sc) & (SNIC_IO_LOCKS - 1);
115 
116 	return &snic->io_req_lock[hash];
117 }
118 
119 static inline spinlock_t *
120 snic_io_lock_tag(struct snic *snic, int tag)
121 {
122 	return &snic->io_req_lock[tag & (SNIC_IO_LOCKS - 1)];
123 }
124 
125 /* snic_release_req_buf : Releases snic_req_info */
126 static void
127 snic_release_req_buf(struct snic *snic,
128 		   struct snic_req_info *rqi,
129 		   struct scsi_cmnd *sc)
130 {
131 	struct snic_host_req *req = rqi_to_req(rqi);
132 
133 	/* Freeing cmd without marking completion, not okay */
134 	SNIC_BUG_ON(!((CMD_STATE(sc) == SNIC_IOREQ_COMPLETE) ||
135 		      (CMD_STATE(sc) == SNIC_IOREQ_ABTS_COMPLETE) ||
136 		      (CMD_FLAGS(sc) & SNIC_DEV_RST_NOTSUP) ||
137 		      (CMD_FLAGS(sc) & SNIC_IO_INTERNAL_TERM_ISSUED) ||
138 		      (CMD_FLAGS(sc) & SNIC_DEV_RST_TERM_ISSUED) ||
139 		      (CMD_FLAGS(sc) & SNIC_SCSI_CLEANUP) ||
140 		      (CMD_STATE(sc) == SNIC_IOREQ_LR_COMPLETE)));
141 
142 	SNIC_SCSI_DBG(snic->shost,
143 		      "Rel_req:sc %p:tag %x:rqi %p:ioreq %p:abt %p:dr %p: state %s:flags 0x%llx\n",
144 		      sc, snic_cmd_tag(sc), rqi, rqi->req, rqi->abort_req,
145 		      rqi->dr_req, snic_ioreq_state_to_str(CMD_STATE(sc)),
146 		      CMD_FLAGS(sc));
147 
148 	if (req->u.icmnd.sense_addr)
149 		dma_unmap_single(&snic->pdev->dev,
150 				 le64_to_cpu(req->u.icmnd.sense_addr),
151 				 SCSI_SENSE_BUFFERSIZE,
152 				 DMA_FROM_DEVICE);
153 
154 	scsi_dma_unmap(sc);
155 
156 	snic_req_free(snic, rqi);
157 } /* end of snic_release_req_buf */
158 
159 /*
160  * snic_queue_icmnd_req : Queues snic_icmnd request
161  */
162 static int
163 snic_queue_icmnd_req(struct snic *snic,
164 		     struct snic_req_info *rqi,
165 		     struct scsi_cmnd *sc,
166 		     int sg_cnt)
167 {
168 	struct scatterlist *sg;
169 	struct snic_sg_desc *sgd;
170 	dma_addr_t pa = 0;
171 	struct scsi_lun lun;
172 	u16 flags = 0;
173 	int ret = 0;
174 	unsigned int i;
175 
176 	if (sg_cnt) {
177 		flags = SNIC_ICMND_ESGL;
178 		sgd = (struct snic_sg_desc *) req_to_sgl(rqi->req);
179 
180 		for_each_sg(scsi_sglist(sc), sg, sg_cnt, i) {
181 			sgd->addr = cpu_to_le64(sg_dma_address(sg));
182 			sgd->len = cpu_to_le32(sg_dma_len(sg));
183 			sgd->_resvd = 0;
184 			sgd++;
185 		}
186 	}
187 
188 	pa = dma_map_single(&snic->pdev->dev,
189 			    sc->sense_buffer,
190 			    SCSI_SENSE_BUFFERSIZE,
191 			    DMA_FROM_DEVICE);
192 	if (dma_mapping_error(&snic->pdev->dev, pa)) {
193 		SNIC_HOST_ERR(snic->shost,
194 			      "QIcmnd:PCI Map Failed for sns buf %p tag %x\n",
195 			      sc->sense_buffer, snic_cmd_tag(sc));
196 		ret = -ENOMEM;
197 
198 		return ret;
199 	}
200 
201 	int_to_scsilun(sc->device->lun, &lun);
202 	if (sc->sc_data_direction == DMA_FROM_DEVICE)
203 		flags |= SNIC_ICMND_RD;
204 	if (sc->sc_data_direction == DMA_TO_DEVICE)
205 		flags |= SNIC_ICMND_WR;
206 
207 	/* Initialize icmnd */
208 	snic_icmnd_init(rqi->req,
209 			snic_cmd_tag(sc),
210 			snic->config.hid, /* hid */
211 			(ulong) rqi,
212 			flags, /* command flags */
213 			rqi->tgt_id,
214 			lun.scsi_lun,
215 			sc->cmnd,
216 			sc->cmd_len,
217 			scsi_bufflen(sc),
218 			sg_cnt,
219 			(ulong) req_to_sgl(rqi->req),
220 			pa, /* sense buffer pa */
221 			SCSI_SENSE_BUFFERSIZE);
222 
223 	atomic64_inc(&snic->s_stats.io.active);
224 	ret = snic_queue_wq_desc(snic, rqi->req, rqi->req_len);
225 	if (ret) {
226 		atomic64_dec(&snic->s_stats.io.active);
227 		SNIC_HOST_ERR(snic->shost,
228 			      "QIcmnd: Queuing Icmnd Failed. ret = %d\n",
229 			      ret);
230 	} else
231 		snic_stats_update_active_ios(&snic->s_stats);
232 
233 	return ret;
234 } /* end of snic_queue_icmnd_req */
235 
236 /*
237  * snic_issue_scsi_req : Prepares IO request and Issues to FW.
238  */
239 static int
240 snic_issue_scsi_req(struct snic *snic,
241 		      struct snic_tgt *tgt,
242 		      struct scsi_cmnd *sc)
243 {
244 	struct snic_req_info *rqi = NULL;
245 	int sg_cnt = 0;
246 	int ret = 0;
247 	u32 tag = snic_cmd_tag(sc);
248 	u64 cmd_trc = 0, cmd_st_flags = 0;
249 	spinlock_t *io_lock = NULL;
250 	unsigned long flags;
251 
252 	CMD_STATE(sc) = SNIC_IOREQ_NOT_INITED;
253 	CMD_FLAGS(sc) = SNIC_NO_FLAGS;
254 	sg_cnt = scsi_dma_map(sc);
255 	if (sg_cnt < 0) {
256 		SNIC_TRC((u16)snic->shost->host_no, tag, (ulong) sc, 0,
257 			 sc->cmnd[0], sg_cnt, CMD_STATE(sc));
258 
259 		SNIC_HOST_ERR(snic->shost, "issue_sc:Failed to map SG List.\n");
260 		ret = -ENOMEM;
261 
262 		goto issue_sc_end;
263 	}
264 
265 	rqi = snic_req_init(snic, sg_cnt);
266 	if (!rqi) {
267 		scsi_dma_unmap(sc);
268 		ret = -ENOMEM;
269 
270 		goto issue_sc_end;
271 	}
272 
273 	rqi->tgt_id = tgt->id;
274 	rqi->sc = sc;
275 
276 	CMD_STATE(sc) = SNIC_IOREQ_PENDING;
277 	CMD_SP(sc) = (char *) rqi;
278 	cmd_trc = SNIC_TRC_CMD(sc);
279 	CMD_FLAGS(sc) |= (SNIC_IO_INITIALIZED | SNIC_IO_ISSUED);
280 	cmd_st_flags = SNIC_TRC_CMD_STATE_FLAGS(sc);
281 	io_lock = snic_io_lock_hash(snic, sc);
282 
283 	/* create wq desc and enqueue it */
284 	ret = snic_queue_icmnd_req(snic, rqi, sc, sg_cnt);
285 	if (ret) {
286 		SNIC_HOST_ERR(snic->shost,
287 			      "issue_sc: icmnd qing Failed for sc %p, err %d\n",
288 			      sc, ret);
289 
290 		spin_lock_irqsave(io_lock, flags);
291 		rqi = (struct snic_req_info *) CMD_SP(sc);
292 		CMD_SP(sc) = NULL;
293 		CMD_STATE(sc) = SNIC_IOREQ_COMPLETE;
294 		CMD_FLAGS(sc) &= ~SNIC_IO_ISSUED; /* turn off the flag */
295 		spin_unlock_irqrestore(io_lock, flags);
296 
297 		if (rqi)
298 			snic_release_req_buf(snic, rqi, sc);
299 
300 		SNIC_TRC(snic->shost->host_no, tag, (ulong) sc, 0, 0, 0,
301 			 SNIC_TRC_CMD_STATE_FLAGS(sc));
302 	} else {
303 		u32 io_sz = scsi_bufflen(sc) >> 9;
304 		u32 qtime = jiffies - rqi->start_time;
305 		struct snic_io_stats *iostats = &snic->s_stats.io;
306 
307 		if (io_sz > atomic64_read(&iostats->max_io_sz))
308 			atomic64_set(&iostats->max_io_sz, io_sz);
309 
310 		if (qtime > atomic64_read(&iostats->max_qtime))
311 			atomic64_set(&iostats->max_qtime, qtime);
312 
313 		SNIC_SCSI_DBG(snic->shost,
314 			      "issue_sc:sc %p, tag %d queued to WQ.\n",
315 			      sc, tag);
316 
317 		SNIC_TRC(snic->shost->host_no, tag, (ulong) sc, (ulong) rqi,
318 			 sg_cnt, cmd_trc, cmd_st_flags);
319 	}
320 
321 issue_sc_end:
322 
323 	return ret;
324 } /* end of snic_issue_scsi_req */
325 
326 
327 /*
328  * snic_queuecommand
329  * Routine to send a scsi cdb to LLD
330  * Called with host_lock held and interrupts disabled
331  */
332 int
333 snic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc)
334 {
335 	struct snic_tgt *tgt = NULL;
336 	struct snic *snic = shost_priv(shost);
337 	int ret;
338 
339 	tgt = starget_to_tgt(scsi_target(sc->device));
340 	ret = snic_tgt_chkready(tgt);
341 	if (ret) {
342 		SNIC_HOST_ERR(shost, "Tgt %p id %d Not Ready.\n", tgt, tgt->id);
343 		atomic64_inc(&snic->s_stats.misc.tgt_not_rdy);
344 		sc->result = ret;
345 		scsi_done(sc);
346 
347 		return 0;
348 	}
349 
350 	if (snic_get_state(snic) != SNIC_ONLINE) {
351 		SNIC_HOST_ERR(shost, "snic state is %s\n",
352 			      snic_state_str[snic_get_state(snic)]);
353 
354 		return SCSI_MLQUEUE_HOST_BUSY;
355 	}
356 	atomic_inc(&snic->ios_inflight);
357 
358 	SNIC_SCSI_DBG(shost, "sc %p Tag %d (sc %0x) lun %lld in snic_qcmd\n",
359 		      sc, snic_cmd_tag(sc), sc->cmnd[0], sc->device->lun);
360 
361 	ret = snic_issue_scsi_req(snic, tgt, sc);
362 	if (ret) {
363 		SNIC_HOST_ERR(shost, "Failed to Q, Scsi Req w/ err %d.\n", ret);
364 		ret = SCSI_MLQUEUE_HOST_BUSY;
365 	}
366 
367 	atomic_dec(&snic->ios_inflight);
368 
369 	return ret;
370 } /* end of snic_queuecommand */
371 
372 /*
373  * snic_process_abts_pending_state:
374  * caller should hold IO lock
375  */
376 static void
377 snic_proc_tmreq_pending_state(struct snic *snic,
378 			      struct scsi_cmnd *sc,
379 			      u8 cmpl_status)
380 {
381 	int state = CMD_STATE(sc);
382 
383 	if (state == SNIC_IOREQ_ABTS_PENDING)
384 		CMD_FLAGS(sc) |= SNIC_IO_ABTS_PENDING;
385 	else if (state == SNIC_IOREQ_LR_PENDING)
386 		CMD_FLAGS(sc) |= SNIC_DEV_RST_PENDING;
387 	else
388 		SNIC_BUG_ON(1);
389 
390 	switch (cmpl_status) {
391 	case SNIC_STAT_IO_SUCCESS:
392 		CMD_FLAGS(sc) |= SNIC_IO_DONE;
393 		break;
394 
395 	case SNIC_STAT_ABORTED:
396 		CMD_FLAGS(sc) |= SNIC_IO_ABORTED;
397 		break;
398 
399 	default:
400 		SNIC_BUG_ON(1);
401 	}
402 }
403 
404 /*
405  * snic_process_io_failed_state:
406  * Processes IO's error states
407  */
408 static void
409 snic_process_io_failed_state(struct snic *snic,
410 			     struct snic_icmnd_cmpl *icmnd_cmpl,
411 			     struct scsi_cmnd *sc,
412 			     u8 cmpl_stat)
413 {
414 	int res = 0;
415 
416 	switch (cmpl_stat) {
417 	case SNIC_STAT_TIMEOUT:		/* Req was timedout */
418 		atomic64_inc(&snic->s_stats.misc.io_tmo);
419 		res = DID_TIME_OUT;
420 		break;
421 
422 	case SNIC_STAT_ABORTED:		/* Req was aborted */
423 		atomic64_inc(&snic->s_stats.misc.io_aborted);
424 		res = DID_ABORT;
425 		break;
426 
427 	case SNIC_STAT_DATA_CNT_MISMATCH:/* Recv/Sent more/less data than exp */
428 		atomic64_inc(&snic->s_stats.misc.data_cnt_mismat);
429 		scsi_set_resid(sc, le32_to_cpu(icmnd_cmpl->resid));
430 		res = DID_ERROR;
431 		break;
432 
433 	case SNIC_STAT_OUT_OF_RES: /* Out of resources to complete request */
434 		atomic64_inc(&snic->s_stats.fw.out_of_res);
435 		res = DID_REQUEUE;
436 		break;
437 
438 	case SNIC_STAT_IO_NOT_FOUND:	/* Requested I/O was not found */
439 		atomic64_inc(&snic->s_stats.io.io_not_found);
440 		res = DID_ERROR;
441 		break;
442 
443 	case SNIC_STAT_SGL_INVALID:	/* Req was aborted to due to sgl error*/
444 		atomic64_inc(&snic->s_stats.misc.sgl_inval);
445 		res = DID_ERROR;
446 		break;
447 
448 	case SNIC_STAT_FW_ERR:		/* Req terminated due to FW Error */
449 		atomic64_inc(&snic->s_stats.fw.io_errs);
450 		res = DID_ERROR;
451 		break;
452 
453 	case SNIC_STAT_SCSI_ERR:	/* FW hits SCSI Error */
454 		atomic64_inc(&snic->s_stats.fw.scsi_errs);
455 		break;
456 
457 	case SNIC_STAT_NOT_READY:	/* XPT yet to initialize */
458 	case SNIC_STAT_DEV_OFFLINE:	/* Device offline */
459 		res = DID_NO_CONNECT;
460 		break;
461 
462 	case SNIC_STAT_INVALID_HDR:	/* Hdr contains invalid data */
463 	case SNIC_STAT_INVALID_PARM:	/* Some param in req is invalid */
464 	case SNIC_STAT_REQ_NOT_SUP:	/* Req type is not supported */
465 	case SNIC_STAT_CMND_REJECT:	/* Req rejected */
466 	case SNIC_STAT_FATAL_ERROR:	/* XPT Error */
467 	default:
468 		SNIC_SCSI_DBG(snic->shost,
469 			      "Invalid Hdr/Param or Req Not Supported or Cmnd Rejected or Device Offline. or Unknown\n");
470 		res = DID_ERROR;
471 		break;
472 	}
473 
474 	SNIC_HOST_ERR(snic->shost, "fw returns failed status %s flags 0x%llx\n",
475 		      snic_io_status_to_str(cmpl_stat), CMD_FLAGS(sc));
476 
477 	/* Set sc->result */
478 	sc->result = (res << 16) | icmnd_cmpl->scsi_status;
479 } /* end of snic_process_io_failed_state */
480 
481 /*
482  * snic_tmreq_pending : is task management in progress.
483  */
484 static int
485 snic_tmreq_pending(struct scsi_cmnd *sc)
486 {
487 	int state = CMD_STATE(sc);
488 
489 	return ((state == SNIC_IOREQ_ABTS_PENDING) ||
490 			(state == SNIC_IOREQ_LR_PENDING));
491 }
492 
493 /*
494  * snic_process_icmnd_cmpl_status:
495  * Caller should hold io_lock
496  */
497 static int
498 snic_process_icmnd_cmpl_status(struct snic *snic,
499 			       struct snic_icmnd_cmpl *icmnd_cmpl,
500 			       u8 cmpl_stat,
501 			       struct scsi_cmnd *sc)
502 {
503 	u8 scsi_stat = icmnd_cmpl->scsi_status;
504 	u64 xfer_len = 0;
505 	int ret = 0;
506 
507 	/* Mark the IO as complete */
508 	CMD_STATE(sc) = SNIC_IOREQ_COMPLETE;
509 
510 	if (likely(cmpl_stat == SNIC_STAT_IO_SUCCESS)) {
511 		sc->result = (DID_OK << 16) | scsi_stat;
512 
513 		xfer_len = scsi_bufflen(sc);
514 
515 		/* Update SCSI Cmd with resid value */
516 		scsi_set_resid(sc, le32_to_cpu(icmnd_cmpl->resid));
517 
518 		if (icmnd_cmpl->flags & SNIC_ICMND_CMPL_UNDR_RUN) {
519 			xfer_len -= le32_to_cpu(icmnd_cmpl->resid);
520 			atomic64_inc(&snic->s_stats.misc.io_under_run);
521 		}
522 
523 		if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL)
524 			atomic64_inc(&snic->s_stats.misc.qfull);
525 
526 		ret = 0;
527 	} else {
528 		snic_process_io_failed_state(snic, icmnd_cmpl, sc, cmpl_stat);
529 		atomic64_inc(&snic->s_stats.io.fail);
530 		SNIC_HOST_ERR(snic->shost,
531 			      "icmnd_cmpl: IO Failed : Hdr Status %s flags 0x%llx\n",
532 			      snic_io_status_to_str(cmpl_stat), CMD_FLAGS(sc));
533 		ret = 1;
534 	}
535 
536 	return ret;
537 } /* end of snic_process_icmnd_cmpl_status */
538 
539 
540 /*
541  * snic_icmnd_cmpl_handler
542  * Routine to handle icmnd completions
543  */
544 static void
545 snic_icmnd_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
546 {
547 	u8 typ, hdr_stat;
548 	u32 cmnd_id, hid;
549 	ulong ctx;
550 	struct scsi_cmnd *sc = NULL;
551 	struct snic_icmnd_cmpl *icmnd_cmpl = NULL;
552 	struct snic_host_req *req = NULL;
553 	struct snic_req_info *rqi = NULL;
554 	unsigned long flags, start_time;
555 	spinlock_t *io_lock;
556 	u8 sc_stat = 0;
557 
558 	snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx);
559 	icmnd_cmpl = &fwreq->u.icmnd_cmpl;
560 	sc_stat = icmnd_cmpl->scsi_status;
561 
562 	SNIC_SCSI_DBG(snic->shost,
563 		      "Icmnd_cmpl: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x,i ctx = %lx\n",
564 		      typ, hdr_stat, cmnd_id, hid, ctx);
565 
566 	if (cmnd_id >= snic->max_tag_id) {
567 		SNIC_HOST_ERR(snic->shost,
568 			      "Icmnd_cmpl:Tag Error:Out of Range Tag %d, hdr status = %s\n",
569 			      cmnd_id, snic_io_status_to_str(hdr_stat));
570 		return;
571 	}
572 
573 	sc = scsi_host_find_tag(snic->shost, cmnd_id);
574 	WARN_ON_ONCE(!sc);
575 
576 	if (!sc) {
577 		atomic64_inc(&snic->s_stats.io.sc_null);
578 		SNIC_HOST_ERR(snic->shost,
579 			      "Icmnd_cmpl: Scsi Cmnd Not found, sc = NULL Hdr Status = %s tag = 0x%x fwreq = 0x%p\n",
580 			      snic_io_status_to_str(hdr_stat),
581 			      cmnd_id,
582 			      fwreq);
583 
584 		SNIC_TRC(snic->shost->host_no, cmnd_id, 0,
585 			 ((u64)hdr_stat << 16 |
586 			  (u64)sc_stat << 8 | (u64)icmnd_cmpl->flags),
587 			 (ulong) fwreq, le32_to_cpu(icmnd_cmpl->resid), ctx);
588 
589 		return;
590 	}
591 
592 	io_lock = snic_io_lock_hash(snic, sc);
593 
594 	spin_lock_irqsave(io_lock, flags);
595 	rqi = (struct snic_req_info *) CMD_SP(sc);
596 	SNIC_SCSI_DBG(snic->shost,
597 		      "Icmnd_cmpl:lun %lld sc %p cmd %xtag %d flags 0x%llx rqi %p\n",
598 		      sc->device->lun, sc, sc->cmnd[0], snic_cmd_tag(sc),
599 		      CMD_FLAGS(sc), rqi);
600 
601 	if (CMD_FLAGS(sc) & SNIC_HOST_RESET_CMD_TERM) {
602 		spin_unlock_irqrestore(io_lock, flags);
603 
604 		return;
605 	}
606 
607 	SNIC_BUG_ON(rqi != (struct snic_req_info *)ctx);
608 	WARN_ON_ONCE(req);
609 	if (!rqi) {
610 		atomic64_inc(&snic->s_stats.io.req_null);
611 		CMD_FLAGS(sc) |= SNIC_IO_REQ_NULL;
612 		spin_unlock_irqrestore(io_lock, flags);
613 
614 		SNIC_HOST_ERR(snic->shost,
615 			      "Icmnd_cmpl:Host Req Not Found(null), Hdr Status %s, Tag 0x%x, sc 0x%p flags 0x%llx\n",
616 			      snic_io_status_to_str(hdr_stat),
617 			      cmnd_id, sc, CMD_FLAGS(sc));
618 		return;
619 	}
620 
621 	rqi = (struct snic_req_info *) ctx;
622 	start_time = rqi->start_time;
623 
624 	/* firmware completed the io */
625 	rqi->io_cmpl = 1;
626 
627 	/*
628 	 * if SCSI-ML has already issued abort on this command,
629 	 * ignore completion of the IO. The abts path will clean it up
630 	 */
631 	if (unlikely(snic_tmreq_pending(sc))) {
632 		snic_proc_tmreq_pending_state(snic, sc, hdr_stat);
633 		spin_unlock_irqrestore(io_lock, flags);
634 
635 		snic_stats_update_io_cmpl(&snic->s_stats);
636 
637 		/* Expected value is SNIC_STAT_ABORTED */
638 		if (likely(hdr_stat == SNIC_STAT_ABORTED))
639 			return;
640 
641 		SNIC_SCSI_DBG(snic->shost,
642 			      "icmnd_cmpl:TM Req Pending(%s), Hdr Status %s sc 0x%p scsi status %x resid %d flags 0x%llx\n",
643 			      snic_ioreq_state_to_str(CMD_STATE(sc)),
644 			      snic_io_status_to_str(hdr_stat),
645 			      sc, sc_stat, le32_to_cpu(icmnd_cmpl->resid),
646 			      CMD_FLAGS(sc));
647 
648 		SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc,
649 			 jiffies_to_msecs(jiffies - start_time), (ulong) fwreq,
650 			 SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
651 
652 		return;
653 	}
654 
655 	if (snic_process_icmnd_cmpl_status(snic, icmnd_cmpl, hdr_stat, sc)) {
656 		scsi_print_command(sc);
657 		SNIC_HOST_ERR(snic->shost,
658 			      "icmnd_cmpl:IO Failed, sc 0x%p Tag %d Cmd %x Hdr Status %s flags 0x%llx\n",
659 			      sc, sc->cmnd[0], cmnd_id,
660 			      snic_io_status_to_str(hdr_stat), CMD_FLAGS(sc));
661 	}
662 
663 	/* Break link with the SCSI Command */
664 	CMD_SP(sc) = NULL;
665 	CMD_FLAGS(sc) |= SNIC_IO_DONE;
666 
667 	spin_unlock_irqrestore(io_lock, flags);
668 
669 	/* For now, consider only successful IO. */
670 	snic_calc_io_process_time(snic, rqi);
671 
672 	snic_release_req_buf(snic, rqi, sc);
673 
674 	SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc,
675 		 jiffies_to_msecs(jiffies - start_time), (ulong) fwreq,
676 		 SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
677 
678 
679 	scsi_done(sc);
680 
681 	snic_stats_update_io_cmpl(&snic->s_stats);
682 } /* end of snic_icmnd_cmpl_handler */
683 
684 static void
685 snic_proc_dr_cmpl_locked(struct snic *snic,
686 			 struct snic_fw_req *fwreq,
687 			 u8 cmpl_stat,
688 			 u32 cmnd_id,
689 			 struct scsi_cmnd *sc)
690 {
691 	struct snic_req_info *rqi = (struct snic_req_info *) CMD_SP(sc);
692 	u32 start_time = rqi->start_time;
693 
694 	CMD_LR_STATUS(sc) = cmpl_stat;
695 
696 	SNIC_SCSI_DBG(snic->shost, "itmf_cmpl: Cmd State = %s\n",
697 		      snic_ioreq_state_to_str(CMD_STATE(sc)));
698 
699 	if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) {
700 		CMD_FLAGS(sc) |= SNIC_DEV_RST_ABTS_PENDING;
701 
702 		SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc,
703 			 jiffies_to_msecs(jiffies - start_time),
704 			 (ulong) fwreq, 0, SNIC_TRC_CMD_STATE_FLAGS(sc));
705 
706 		SNIC_SCSI_DBG(snic->shost,
707 			      "itmf_cmpl: Terminate Pending Dev Reset Cmpl Recvd.id %x, status %s flags 0x%llx\n",
708 			      (int)(cmnd_id & SNIC_TAG_MASK),
709 			      snic_io_status_to_str(cmpl_stat),
710 			      CMD_FLAGS(sc));
711 
712 		return;
713 	}
714 
715 
716 	if (CMD_FLAGS(sc) & SNIC_DEV_RST_TIMEDOUT) {
717 		SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc,
718 			 jiffies_to_msecs(jiffies - start_time),
719 			 (ulong) fwreq, 0, SNIC_TRC_CMD_STATE_FLAGS(sc));
720 
721 		SNIC_SCSI_DBG(snic->shost,
722 			      "itmf_cmpl:Dev Reset Completion Received after timeout. id %d cmpl status %s flags 0x%llx\n",
723 			      (int)(cmnd_id & SNIC_TAG_MASK),
724 			      snic_io_status_to_str(cmpl_stat),
725 			      CMD_FLAGS(sc));
726 
727 		return;
728 	}
729 
730 	CMD_STATE(sc) = SNIC_IOREQ_LR_COMPLETE;
731 	CMD_FLAGS(sc) |= SNIC_DEV_RST_DONE;
732 
733 	SNIC_SCSI_DBG(snic->shost,
734 		      "itmf_cmpl:Dev Reset Cmpl Recvd id %d cmpl status %s flags 0x%llx\n",
735 		      (int)(cmnd_id & SNIC_TAG_MASK),
736 		      snic_io_status_to_str(cmpl_stat),
737 		      CMD_FLAGS(sc));
738 
739 	if (rqi->dr_done)
740 		complete(rqi->dr_done);
741 } /* end of snic_proc_dr_cmpl_locked */
742 
743 /*
744  * snic_update_abort_stats : Updates abort stats based on completion status.
745  */
746 static void
747 snic_update_abort_stats(struct snic *snic, u8 cmpl_stat)
748 {
749 	struct snic_abort_stats *abt_stats = &snic->s_stats.abts;
750 
751 	SNIC_SCSI_DBG(snic->shost, "Updating Abort stats.\n");
752 
753 	switch (cmpl_stat) {
754 	case  SNIC_STAT_IO_SUCCESS:
755 		break;
756 
757 	case SNIC_STAT_TIMEOUT:
758 		atomic64_inc(&abt_stats->fw_tmo);
759 		break;
760 
761 	case SNIC_STAT_IO_NOT_FOUND:
762 		atomic64_inc(&abt_stats->io_not_found);
763 		break;
764 
765 	default:
766 		atomic64_inc(&abt_stats->fail);
767 		break;
768 	}
769 }
770 
771 static int
772 snic_process_itmf_cmpl(struct snic *snic,
773 		       struct snic_fw_req *fwreq,
774 		       u32 cmnd_id,
775 		       u8 cmpl_stat,
776 		       struct scsi_cmnd *sc)
777 {
778 	struct snic_req_info *rqi = NULL;
779 	u32 tm_tags = 0;
780 	spinlock_t *io_lock = NULL;
781 	unsigned long flags;
782 	u32 start_time = 0;
783 	int ret = 0;
784 
785 	io_lock = snic_io_lock_hash(snic, sc);
786 	spin_lock_irqsave(io_lock, flags);
787 	if (CMD_FLAGS(sc) & SNIC_HOST_RESET_CMD_TERM) {
788 		spin_unlock_irqrestore(io_lock, flags);
789 
790 		return ret;
791 	}
792 	rqi = (struct snic_req_info *) CMD_SP(sc);
793 	WARN_ON_ONCE(!rqi);
794 
795 	if (!rqi) {
796 		atomic64_inc(&snic->s_stats.io.req_null);
797 		spin_unlock_irqrestore(io_lock, flags);
798 		CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_REQ_NULL;
799 		SNIC_HOST_ERR(snic->shost,
800 			      "itmf_cmpl: rqi is null,Hdr stat = %s Tag = 0x%x sc = 0x%p flags 0x%llx\n",
801 			      snic_io_status_to_str(cmpl_stat), cmnd_id, sc,
802 			      CMD_FLAGS(sc));
803 
804 		return ret;
805 	}
806 
807 	/* Extract task management flags */
808 	tm_tags = cmnd_id & ~(SNIC_TAG_MASK);
809 
810 	start_time = rqi->start_time;
811 	cmnd_id &= (SNIC_TAG_MASK);
812 
813 	switch (tm_tags) {
814 	case SNIC_TAG_ABORT:
815 		/* Abort only issued on cmd */
816 		snic_update_abort_stats(snic, cmpl_stat);
817 
818 		if (CMD_STATE(sc) != SNIC_IOREQ_ABTS_PENDING) {
819 			/* This is a late completion. Ignore it. */
820 			ret = -1;
821 			spin_unlock_irqrestore(io_lock, flags);
822 			break;
823 		}
824 
825 		CMD_STATE(sc) = SNIC_IOREQ_ABTS_COMPLETE;
826 		CMD_ABTS_STATUS(sc) = cmpl_stat;
827 		CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_DONE;
828 
829 		SNIC_SCSI_DBG(snic->shost,
830 			      "itmf_cmpl:Abort Cmpl Recvd.Tag 0x%x Status %s flags 0x%llx\n",
831 			      cmnd_id,
832 			      snic_io_status_to_str(cmpl_stat),
833 			      CMD_FLAGS(sc));
834 
835 		/*
836 		 * If scsi_eh thread is blocked waiting for abts complete,
837 		 * signal completion to it. IO will be cleaned in the thread,
838 		 * else clean it in this context.
839 		 */
840 		if (rqi->abts_done) {
841 			complete(rqi->abts_done);
842 			spin_unlock_irqrestore(io_lock, flags);
843 
844 			break; /* jump out */
845 		}
846 
847 		CMD_SP(sc) = NULL;
848 		sc->result = (DID_ERROR << 16);
849 		SNIC_SCSI_DBG(snic->shost,
850 			      "itmf_cmpl: Completing IO. sc %p flags 0x%llx\n",
851 			      sc, CMD_FLAGS(sc));
852 
853 		spin_unlock_irqrestore(io_lock, flags);
854 
855 		snic_release_req_buf(snic, rqi, sc);
856 
857 		SNIC_TRC(snic->shost->host_no, cmnd_id, (ulong) sc,
858 			 jiffies_to_msecs(jiffies - start_time),
859 			 (ulong) fwreq, SNIC_TRC_CMD(sc),
860 			 SNIC_TRC_CMD_STATE_FLAGS(sc));
861 
862 		scsi_done(sc);
863 
864 		break;
865 
866 	case SNIC_TAG_DEV_RST:
867 	case SNIC_TAG_DEV_RST | SNIC_TAG_IOCTL_DEV_RST:
868 		snic_proc_dr_cmpl_locked(snic, fwreq, cmpl_stat, cmnd_id, sc);
869 		spin_unlock_irqrestore(io_lock, flags);
870 		ret = 0;
871 
872 		break;
873 
874 	case SNIC_TAG_ABORT | SNIC_TAG_DEV_RST:
875 		/* Abort and terminate completion of device reset req */
876 
877 		CMD_STATE(sc) = SNIC_IOREQ_ABTS_COMPLETE;
878 		CMD_ABTS_STATUS(sc) = cmpl_stat;
879 		CMD_FLAGS(sc) |= SNIC_DEV_RST_DONE;
880 
881 		SNIC_SCSI_DBG(snic->shost,
882 			      "itmf_cmpl:dev reset abts cmpl recvd. id %d status %s flags 0x%llx\n",
883 			      cmnd_id, snic_io_status_to_str(cmpl_stat),
884 			      CMD_FLAGS(sc));
885 
886 		if (rqi->abts_done)
887 			complete(rqi->abts_done);
888 
889 		spin_unlock_irqrestore(io_lock, flags);
890 
891 		break;
892 
893 	default:
894 		spin_unlock_irqrestore(io_lock, flags);
895 		SNIC_HOST_ERR(snic->shost,
896 			      "itmf_cmpl: Unknown TM tag bit 0x%x\n", tm_tags);
897 
898 		SNIC_HOST_ERR(snic->shost,
899 			      "itmf_cmpl:Unexpected itmf io stat %s Tag = 0x%x flags 0x%llx\n",
900 			      snic_ioreq_state_to_str(CMD_STATE(sc)),
901 			      cmnd_id,
902 			      CMD_FLAGS(sc));
903 		ret = -1;
904 		SNIC_BUG_ON(1);
905 
906 		break;
907 	}
908 
909 	return ret;
910 } /* end of snic_process_itmf_cmpl_status */
911 
912 /*
913  * snic_itmf_cmpl_handler.
914  * Routine to handle itmf completions.
915  */
916 static void
917 snic_itmf_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
918 {
919 	struct scsi_cmnd  *sc = NULL;
920 	struct snic_req_info *rqi = NULL;
921 	struct snic_itmf_cmpl *itmf_cmpl = NULL;
922 	ulong ctx;
923 	u32 cmnd_id;
924 	u32 hid;
925 	u8 typ;
926 	u8 hdr_stat;
927 
928 	snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx);
929 	SNIC_SCSI_DBG(snic->shost,
930 		      "Itmf_cmpl: %s: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x,ctx = %lx\n",
931 		      __func__, typ, hdr_stat, cmnd_id, hid, ctx);
932 
933 	itmf_cmpl = &fwreq->u.itmf_cmpl;
934 	SNIC_SCSI_DBG(snic->shost,
935 		      "Itmf_cmpl: nterm %u , flags 0x%x\n",
936 		      le32_to_cpu(itmf_cmpl->nterminated), itmf_cmpl->flags);
937 
938 	/* spl case, dev reset issued through ioctl */
939 	if (cmnd_id & SNIC_TAG_IOCTL_DEV_RST) {
940 		rqi = (struct snic_req_info *) ctx;
941 		sc = rqi->sc;
942 
943 		goto ioctl_dev_rst;
944 	}
945 
946 	if ((cmnd_id & SNIC_TAG_MASK) >= snic->max_tag_id) {
947 		SNIC_HOST_ERR(snic->shost,
948 			      "Itmf_cmpl: Tag 0x%x out of Range,HdrStat %s\n",
949 			      cmnd_id, snic_io_status_to_str(hdr_stat));
950 		SNIC_BUG_ON(1);
951 
952 		return;
953 	}
954 
955 	sc = scsi_host_find_tag(snic->shost, cmnd_id & SNIC_TAG_MASK);
956 	WARN_ON_ONCE(!sc);
957 
958 ioctl_dev_rst:
959 	if (!sc) {
960 		atomic64_inc(&snic->s_stats.io.sc_null);
961 		SNIC_HOST_ERR(snic->shost,
962 			      "Itmf_cmpl: sc is NULL - Hdr Stat %s Tag 0x%x\n",
963 			      snic_io_status_to_str(hdr_stat), cmnd_id);
964 
965 		return;
966 	}
967 
968 	snic_process_itmf_cmpl(snic, fwreq, cmnd_id, hdr_stat, sc);
969 } /* end of snic_itmf_cmpl_handler */
970 
971 
972 
973 static void
974 snic_hba_reset_scsi_cleanup(struct snic *snic, struct scsi_cmnd *sc)
975 {
976 	struct snic_stats *st = &snic->s_stats;
977 	long act_ios = 0, act_fwreqs = 0;
978 
979 	SNIC_SCSI_DBG(snic->shost, "HBA Reset scsi cleanup.\n");
980 	snic_scsi_cleanup(snic, snic_cmd_tag(sc));
981 
982 	/* Update stats on pending IOs */
983 	act_ios = atomic64_read(&st->io.active);
984 	atomic64_add(act_ios, &st->io.compl);
985 	atomic64_sub(act_ios, &st->io.active);
986 
987 	act_fwreqs = atomic64_read(&st->fw.actv_reqs);
988 	atomic64_sub(act_fwreqs, &st->fw.actv_reqs);
989 }
990 
991 /*
992  * snic_hba_reset_cmpl_handler :
993  *
994  * Notes :
995  * 1. Cleanup all the scsi cmds, release all snic specific cmds
996  * 2. Issue Report Targets in case of SAN targets
997  */
998 static int
999 snic_hba_reset_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
1000 {
1001 	ulong ctx;
1002 	u32 cmnd_id;
1003 	u32 hid;
1004 	u8 typ;
1005 	u8 hdr_stat;
1006 	struct scsi_cmnd *sc = NULL;
1007 	struct snic_req_info *rqi = NULL;
1008 	spinlock_t *io_lock = NULL;
1009 	unsigned long flags, gflags;
1010 	int ret = 0;
1011 
1012 	snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx);
1013 	SNIC_HOST_INFO(snic->shost,
1014 		       "reset_cmpl:Tag %d ctx %lx cmpl status %s HBA Reset Completion received.\n",
1015 		       cmnd_id, ctx, snic_io_status_to_str(hdr_stat));
1016 
1017 	SNIC_SCSI_DBG(snic->shost,
1018 		      "reset_cmpl: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x, ctx = %lx\n",
1019 		      typ, hdr_stat, cmnd_id, hid, ctx);
1020 
1021 	/* spl case, host reset issued through ioctl */
1022 	if (cmnd_id == SCSI_NO_TAG) {
1023 		rqi = (struct snic_req_info *) ctx;
1024 		SNIC_HOST_INFO(snic->shost,
1025 			       "reset_cmpl:Tag %d ctx %lx cmpl stat %s\n",
1026 			       cmnd_id, ctx, snic_io_status_to_str(hdr_stat));
1027 		sc = rqi->sc;
1028 
1029 		goto ioctl_hba_rst;
1030 	}
1031 
1032 	if (cmnd_id >= snic->max_tag_id) {
1033 		SNIC_HOST_ERR(snic->shost,
1034 			      "reset_cmpl: Tag 0x%x out of Range,HdrStat %s\n",
1035 			      cmnd_id, snic_io_status_to_str(hdr_stat));
1036 		SNIC_BUG_ON(1);
1037 
1038 		return 1;
1039 	}
1040 
1041 	sc = scsi_host_find_tag(snic->shost, cmnd_id);
1042 ioctl_hba_rst:
1043 	if (!sc) {
1044 		atomic64_inc(&snic->s_stats.io.sc_null);
1045 		SNIC_HOST_ERR(snic->shost,
1046 			      "reset_cmpl: sc is NULL - Hdr Stat %s Tag 0x%x\n",
1047 			      snic_io_status_to_str(hdr_stat), cmnd_id);
1048 		ret = 1;
1049 
1050 		return ret;
1051 	}
1052 
1053 	SNIC_HOST_INFO(snic->shost,
1054 		       "reset_cmpl: sc %p rqi %p Tag %d flags 0x%llx\n",
1055 		       sc, rqi, cmnd_id, CMD_FLAGS(sc));
1056 
1057 	io_lock = snic_io_lock_hash(snic, sc);
1058 	spin_lock_irqsave(io_lock, flags);
1059 
1060 	if (!snic->remove_wait) {
1061 		spin_unlock_irqrestore(io_lock, flags);
1062 		SNIC_HOST_ERR(snic->shost,
1063 			      "reset_cmpl:host reset completed after timeout\n");
1064 		ret = 1;
1065 
1066 		return ret;
1067 	}
1068 
1069 	rqi = (struct snic_req_info *) CMD_SP(sc);
1070 	WARN_ON_ONCE(!rqi);
1071 
1072 	if (!rqi) {
1073 		atomic64_inc(&snic->s_stats.io.req_null);
1074 		spin_unlock_irqrestore(io_lock, flags);
1075 		CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_REQ_NULL;
1076 		SNIC_HOST_ERR(snic->shost,
1077 			      "reset_cmpl: rqi is null,Hdr stat %s Tag 0x%x sc 0x%p flags 0x%llx\n",
1078 			      snic_io_status_to_str(hdr_stat), cmnd_id, sc,
1079 			      CMD_FLAGS(sc));
1080 
1081 		ret = 1;
1082 
1083 		return ret;
1084 	}
1085 	/* stats */
1086 	spin_unlock_irqrestore(io_lock, flags);
1087 
1088 	/* scsi cleanup */
1089 	snic_hba_reset_scsi_cleanup(snic, sc);
1090 
1091 	SNIC_BUG_ON(snic_get_state(snic) != SNIC_OFFLINE &&
1092 		    snic_get_state(snic) != SNIC_FWRESET);
1093 
1094 	/* Careful locking between snic_lock and io lock */
1095 	spin_lock_irqsave(io_lock, flags);
1096 	spin_lock_irqsave(&snic->snic_lock, gflags);
1097 	if (snic_get_state(snic) == SNIC_FWRESET)
1098 		snic_set_state(snic, SNIC_ONLINE);
1099 	spin_unlock_irqrestore(&snic->snic_lock, gflags);
1100 
1101 	if (snic->remove_wait)
1102 		complete(snic->remove_wait);
1103 
1104 	spin_unlock_irqrestore(io_lock, flags);
1105 	atomic64_inc(&snic->s_stats.reset.hba_reset_cmpl);
1106 
1107 	ret = 0;
1108 	/* Rediscovery is for SAN */
1109 	if (snic->config.xpt_type == SNIC_DAS)
1110 			return ret;
1111 
1112 	SNIC_SCSI_DBG(snic->shost, "reset_cmpl: Queuing discovery work.\n");
1113 	queue_work(snic_glob->event_q, &snic->disc_work);
1114 
1115 	return ret;
1116 }
1117 
1118 static void
1119 snic_msg_ack_handler(struct snic *snic, struct snic_fw_req *fwreq)
1120 {
1121 	SNIC_HOST_INFO(snic->shost, "Message Ack Received.\n");
1122 
1123 	SNIC_ASSERT_NOT_IMPL(1);
1124 }
1125 
1126 static void
1127 snic_aen_handler(struct snic *snic, struct snic_fw_req *fwreq)
1128 {
1129 	u8 typ, hdr_stat;
1130 	u32 cmnd_id, hid;
1131 	ulong ctx;
1132 	struct snic_async_evnotify *aen = &fwreq->u.async_ev;
1133 	u32 event_id = 0;
1134 
1135 	snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx);
1136 	SNIC_SCSI_DBG(snic->shost,
1137 		      "aen: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x, ctx = %lx\n",
1138 		      typ, hdr_stat, cmnd_id, hid, ctx);
1139 
1140 	event_id = le32_to_cpu(aen->ev_id);
1141 
1142 	switch (event_id) {
1143 	case SNIC_EV_TGT_OFFLINE:
1144 		SNIC_HOST_INFO(snic->shost, "aen:TGT_OFFLINE Event Recvd.\n");
1145 		break;
1146 
1147 	case SNIC_EV_TGT_ONLINE:
1148 		SNIC_HOST_INFO(snic->shost, "aen:TGT_ONLINE Event Recvd.\n");
1149 		break;
1150 
1151 	case SNIC_EV_LUN_OFFLINE:
1152 		SNIC_HOST_INFO(snic->shost, "aen:LUN_OFFLINE Event Recvd.\n");
1153 		break;
1154 
1155 	case SNIC_EV_LUN_ONLINE:
1156 		SNIC_HOST_INFO(snic->shost, "aen:LUN_ONLINE Event Recvd.\n");
1157 		break;
1158 
1159 	case SNIC_EV_CONF_CHG:
1160 		SNIC_HOST_INFO(snic->shost, "aen:Config Change Event Recvd.\n");
1161 		break;
1162 
1163 	case SNIC_EV_TGT_ADDED:
1164 		SNIC_HOST_INFO(snic->shost, "aen:TGT_ADD Event Recvd.\n");
1165 		break;
1166 
1167 	case SNIC_EV_TGT_DELTD:
1168 		SNIC_HOST_INFO(snic->shost, "aen:TGT_DEL Event Recvd.\n");
1169 		break;
1170 
1171 	case SNIC_EV_LUN_ADDED:
1172 		SNIC_HOST_INFO(snic->shost, "aen:LUN_ADD Event Recvd.\n");
1173 		break;
1174 
1175 	case SNIC_EV_LUN_DELTD:
1176 		SNIC_HOST_INFO(snic->shost, "aen:LUN_DEL Event Recvd.\n");
1177 		break;
1178 
1179 	case SNIC_EV_DISC_CMPL:
1180 		SNIC_HOST_INFO(snic->shost, "aen:DISC_CMPL Event Recvd.\n");
1181 		break;
1182 
1183 	default:
1184 		SNIC_HOST_INFO(snic->shost, "aen:Unknown Event Recvd.\n");
1185 		SNIC_BUG_ON(1);
1186 		break;
1187 	}
1188 
1189 	SNIC_ASSERT_NOT_IMPL(1);
1190 } /* end of snic_aen_handler */
1191 
1192 /*
1193  * snic_io_cmpl_handler
1194  * Routine to process CQ entries(IO Completions) posted by fw.
1195  */
1196 static int
1197 snic_io_cmpl_handler(struct vnic_dev *vdev,
1198 		     unsigned int cq_idx,
1199 		     struct snic_fw_req *fwreq)
1200 {
1201 	struct snic *snic = svnic_dev_priv(vdev);
1202 	u64 start = jiffies, cmpl_time;
1203 
1204 	snic_print_desc(__func__, (char *)fwreq, sizeof(*fwreq));
1205 
1206 	/* Update FW Stats */
1207 	if ((fwreq->hdr.type >= SNIC_RSP_REPORT_TGTS_CMPL) &&
1208 		(fwreq->hdr.type <= SNIC_RSP_BOOT_LUNS_CMPL))
1209 		atomic64_dec(&snic->s_stats.fw.actv_reqs);
1210 
1211 	SNIC_BUG_ON((fwreq->hdr.type > SNIC_RSP_BOOT_LUNS_CMPL) &&
1212 		    (fwreq->hdr.type < SNIC_MSG_ASYNC_EVNOTIFY));
1213 
1214 	/* Check for snic subsys errors */
1215 	switch (fwreq->hdr.status) {
1216 	case SNIC_STAT_NOT_READY:	/* XPT yet to initialize */
1217 		SNIC_HOST_ERR(snic->shost,
1218 			      "sNIC SubSystem is NOT Ready.\n");
1219 		break;
1220 
1221 	case SNIC_STAT_FATAL_ERROR:	/* XPT Error */
1222 		SNIC_HOST_ERR(snic->shost,
1223 			      "sNIC SubSystem in Unrecoverable State.\n");
1224 		break;
1225 	}
1226 
1227 	switch (fwreq->hdr.type) {
1228 	case SNIC_RSP_EXCH_VER_CMPL:
1229 		snic_io_exch_ver_cmpl_handler(snic, fwreq);
1230 		break;
1231 
1232 	case SNIC_RSP_REPORT_TGTS_CMPL:
1233 		snic_report_tgt_cmpl_handler(snic, fwreq);
1234 		break;
1235 
1236 	case SNIC_RSP_ICMND_CMPL:
1237 		snic_icmnd_cmpl_handler(snic, fwreq);
1238 		break;
1239 
1240 	case SNIC_RSP_ITMF_CMPL:
1241 		snic_itmf_cmpl_handler(snic, fwreq);
1242 		break;
1243 
1244 	case SNIC_RSP_HBA_RESET_CMPL:
1245 		snic_hba_reset_cmpl_handler(snic, fwreq);
1246 		break;
1247 
1248 	case SNIC_MSG_ACK:
1249 		snic_msg_ack_handler(snic, fwreq);
1250 		break;
1251 
1252 	case SNIC_MSG_ASYNC_EVNOTIFY:
1253 		snic_aen_handler(snic, fwreq);
1254 		break;
1255 
1256 	default:
1257 		SNIC_BUG_ON(1);
1258 		SNIC_SCSI_DBG(snic->shost,
1259 			      "Unknown Firmware completion request type %d\n",
1260 			      fwreq->hdr.type);
1261 		break;
1262 	}
1263 
1264 	/* Update Stats */
1265 	cmpl_time = jiffies - start;
1266 	if (cmpl_time > atomic64_read(&snic->s_stats.io.max_cmpl_time))
1267 		atomic64_set(&snic->s_stats.io.max_cmpl_time, cmpl_time);
1268 
1269 	return 0;
1270 } /* end of snic_io_cmpl_handler */
1271 
1272 /*
1273  * snic_fwcq_cmpl_handler
1274  * Routine to process fwCQ
1275  * This CQ is independent, and not associated with wq/rq/wq_copy queues
1276  */
1277 int
1278 snic_fwcq_cmpl_handler(struct snic *snic, int io_cmpl_work)
1279 {
1280 	unsigned int num_ent = 0;	/* number cq entries processed */
1281 	unsigned int cq_idx;
1282 	unsigned int nent_per_cq;
1283 	struct snic_misc_stats *misc_stats = &snic->s_stats.misc;
1284 
1285 	for (cq_idx = snic->wq_count; cq_idx < snic->cq_count; cq_idx++) {
1286 		nent_per_cq = vnic_cq_fw_service(&snic->cq[cq_idx],
1287 						 snic_io_cmpl_handler,
1288 						 io_cmpl_work);
1289 		num_ent += nent_per_cq;
1290 
1291 		if (nent_per_cq > atomic64_read(&misc_stats->max_cq_ents))
1292 			atomic64_set(&misc_stats->max_cq_ents, nent_per_cq);
1293 	}
1294 
1295 	return num_ent;
1296 } /* end of snic_fwcq_cmpl_handler */
1297 
1298 /*
1299  * snic_queue_itmf_req: Common API to queue Task Management requests.
1300  * Use rqi->tm_tag for passing special tags.
1301  * @req_id : aborted request's tag, -1 for lun reset.
1302  */
1303 static int
1304 snic_queue_itmf_req(struct snic *snic,
1305 		    struct snic_host_req *tmreq,
1306 		    struct scsi_cmnd *sc,
1307 		    u32 tmf,
1308 		    u32 req_id)
1309 {
1310 	struct snic_req_info *rqi = req_to_rqi(tmreq);
1311 	struct scsi_lun lun;
1312 	int tm_tag = snic_cmd_tag(sc) | rqi->tm_tag;
1313 	int ret = 0;
1314 
1315 	SNIC_BUG_ON(!rqi);
1316 	SNIC_BUG_ON(!rqi->tm_tag);
1317 
1318 	/* fill in lun info */
1319 	int_to_scsilun(sc->device->lun, &lun);
1320 
1321 	/* Initialize snic_host_req: itmf */
1322 	snic_itmf_init(tmreq,
1323 		       tm_tag,
1324 		       snic->config.hid,
1325 		       (ulong) rqi,
1326 		       0 /* flags */,
1327 		       req_id, /* Command to be aborted. */
1328 		       rqi->tgt_id,
1329 		       lun.scsi_lun,
1330 		       tmf);
1331 
1332 	/*
1333 	 * In case of multiple aborts on same cmd,
1334 	 * use try_wait_for_completion and completion_done() to check
1335 	 * whether it queues aborts even after completion of abort issued
1336 	 * prior.SNIC_BUG_ON(completion_done(&rqi->done));
1337 	 */
1338 
1339 	ret = snic_queue_wq_desc(snic, tmreq, sizeof(*tmreq));
1340 	if (ret)
1341 		SNIC_HOST_ERR(snic->shost,
1342 			      "qitmf:Queuing ITMF(%d) Req sc %p, rqi %p, req_id %d tag %d Failed, ret = %d\n",
1343 			      tmf, sc, rqi, req_id, snic_cmd_tag(sc), ret);
1344 	else
1345 		SNIC_SCSI_DBG(snic->shost,
1346 			      "qitmf:Queuing ITMF(%d) Req sc %p, rqi %p, req_id %d, tag %d (req_id)- Success.",
1347 			      tmf, sc, rqi, req_id, snic_cmd_tag(sc));
1348 
1349 	return ret;
1350 } /* end of snic_queue_itmf_req */
1351 
1352 static int
1353 snic_issue_tm_req(struct snic *snic,
1354 		    struct snic_req_info *rqi,
1355 		    struct scsi_cmnd *sc,
1356 		    int tmf)
1357 {
1358 	struct snic_host_req *tmreq = NULL;
1359 	int req_id = 0, tag = snic_cmd_tag(sc);
1360 	int ret = 0;
1361 
1362 	if (snic_get_state(snic) == SNIC_FWRESET)
1363 		return -EBUSY;
1364 
1365 	atomic_inc(&snic->ios_inflight);
1366 
1367 	SNIC_SCSI_DBG(snic->shost,
1368 		      "issu_tmreq: Task mgmt req %d. rqi %p w/ tag %x\n",
1369 		      tmf, rqi, tag);
1370 
1371 
1372 	if (tmf == SNIC_ITMF_LUN_RESET) {
1373 		tmreq = snic_dr_req_init(snic, rqi);
1374 		req_id = SCSI_NO_TAG;
1375 	} else {
1376 		tmreq = snic_abort_req_init(snic, rqi);
1377 		req_id = tag;
1378 	}
1379 
1380 	if (!tmreq) {
1381 		ret = -ENOMEM;
1382 
1383 		goto tmreq_err;
1384 	}
1385 
1386 	ret = snic_queue_itmf_req(snic, tmreq, sc, tmf, req_id);
1387 
1388 tmreq_err:
1389 	if (ret) {
1390 		SNIC_HOST_ERR(snic->shost,
1391 			      "issu_tmreq: Queueing ITMF(%d) Req, sc %p rqi %p req_id %d tag %x fails err = %d\n",
1392 			      tmf, sc, rqi, req_id, tag, ret);
1393 	} else {
1394 		SNIC_SCSI_DBG(snic->shost,
1395 			      "issu_tmreq: Queueing ITMF(%d) Req, sc %p, rqi %p, req_id %d tag %x - Success.\n",
1396 			      tmf, sc, rqi, req_id, tag);
1397 	}
1398 
1399 	atomic_dec(&snic->ios_inflight);
1400 
1401 	return ret;
1402 }
1403 
1404 /*
1405  * snic_queue_abort_req : Queues abort req to WQ
1406  */
1407 static int
1408 snic_queue_abort_req(struct snic *snic,
1409 		     struct snic_req_info *rqi,
1410 		     struct scsi_cmnd *sc,
1411 		     int tmf)
1412 {
1413 	SNIC_SCSI_DBG(snic->shost, "q_abtreq: sc %p, rqi %p, tag %x, tmf %d\n",
1414 		      sc, rqi, snic_cmd_tag(sc), tmf);
1415 
1416 	/* Add special tag for abort */
1417 	rqi->tm_tag |= SNIC_TAG_ABORT;
1418 
1419 	return snic_issue_tm_req(snic, rqi, sc, tmf);
1420 }
1421 
1422 /*
1423  * snic_abort_finish : called by snic_abort_cmd on queuing abort successfully.
1424  */
1425 static int
1426 snic_abort_finish(struct snic *snic, struct scsi_cmnd *sc)
1427 {
1428 	struct snic_req_info *rqi = NULL;
1429 	spinlock_t *io_lock = NULL;
1430 	unsigned long flags;
1431 	int ret = 0, tag = snic_cmd_tag(sc);
1432 
1433 	io_lock = snic_io_lock_hash(snic, sc);
1434 	spin_lock_irqsave(io_lock, flags);
1435 	rqi = (struct snic_req_info *) CMD_SP(sc);
1436 	if (!rqi) {
1437 		atomic64_inc(&snic->s_stats.io.req_null);
1438 		CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_REQ_NULL;
1439 
1440 		SNIC_SCSI_DBG(snic->shost,
1441 			      "abt_fini:req info is null tag 0x%x, sc 0x%p flags 0x%llx\n",
1442 			      tag, sc, CMD_FLAGS(sc));
1443 		ret = FAILED;
1444 
1445 		goto abort_fail;
1446 	}
1447 
1448 	rqi->abts_done = NULL;
1449 
1450 	ret = FAILED;
1451 
1452 	/* Check the abort status. */
1453 	switch (CMD_ABTS_STATUS(sc)) {
1454 	case SNIC_INVALID_CODE:
1455 		/* Firmware didn't complete abort req, timedout */
1456 		CMD_FLAGS(sc) |= SNIC_IO_ABTS_TIMEDOUT;
1457 		atomic64_inc(&snic->s_stats.abts.drv_tmo);
1458 		SNIC_SCSI_DBG(snic->shost,
1459 			      "abt_fini:sc %p Tag %x Driver Timeout.flags 0x%llx\n",
1460 			      sc, snic_cmd_tag(sc), CMD_FLAGS(sc));
1461 		/* do not release snic request in timedout case */
1462 		rqi = NULL;
1463 
1464 		goto abort_fail;
1465 
1466 	case SNIC_STAT_IO_SUCCESS:
1467 	case SNIC_STAT_IO_NOT_FOUND:
1468 		ret = SUCCESS;
1469 		/*
1470 		 * If abort path doesn't call scsi_done(),
1471 		 * the # IO timeouts == 2, will cause the LUN offline.
1472 		 * Call scsi_done to complete the IO.
1473 		 */
1474 		sc->result = (DID_ERROR << 16);
1475 		scsi_done(sc);
1476 		break;
1477 
1478 	default:
1479 		/* Firmware completed abort with error */
1480 		ret = FAILED;
1481 		rqi = NULL;
1482 		break;
1483 	}
1484 
1485 	CMD_SP(sc) = NULL;
1486 	SNIC_HOST_INFO(snic->shost,
1487 		       "abt_fini: Tag %x, Cmpl Status %s flags 0x%llx\n",
1488 		       tag, snic_io_status_to_str(CMD_ABTS_STATUS(sc)),
1489 		       CMD_FLAGS(sc));
1490 
1491 abort_fail:
1492 	spin_unlock_irqrestore(io_lock, flags);
1493 	if (rqi)
1494 		snic_release_req_buf(snic, rqi, sc);
1495 
1496 	return ret;
1497 } /* end of snic_abort_finish */
1498 
1499 /*
1500  * snic_send_abort_and_wait : Issues Abort, and Waits
1501  */
1502 static int
1503 snic_send_abort_and_wait(struct snic *snic, struct scsi_cmnd *sc)
1504 {
1505 	struct snic_req_info *rqi = NULL;
1506 	enum snic_ioreq_state sv_state;
1507 	struct snic_tgt *tgt = NULL;
1508 	spinlock_t *io_lock = NULL;
1509 	DECLARE_COMPLETION_ONSTACK(tm_done);
1510 	unsigned long flags;
1511 	int ret = 0, tmf = 0, tag = snic_cmd_tag(sc);
1512 
1513 	tgt = starget_to_tgt(scsi_target(sc->device));
1514 	if ((snic_tgt_chkready(tgt) != 0) && (tgt->tdata.typ == SNIC_TGT_SAN))
1515 		tmf = SNIC_ITMF_ABTS_TASK_TERM;
1516 	else
1517 		tmf = SNIC_ITMF_ABTS_TASK;
1518 
1519 	/* stats */
1520 
1521 	io_lock = snic_io_lock_hash(snic, sc);
1522 
1523 	/*
1524 	 * Avoid a race between SCSI issuing the abort and the device
1525 	 * completing the command.
1526 	 *
1527 	 * If the command is already completed by fw_cmpl code,
1528 	 * we just return SUCCESS from here. This means that the abort
1529 	 * succeeded. In the SCSI ML, since the timeout for command has
1530 	 * happend, the completion wont actually complete the command
1531 	 * and it will be considered as an aborted command
1532 	 *
1533 	 * The CMD_SP will not be cleared except while holding io_lock
1534 	 */
1535 	spin_lock_irqsave(io_lock, flags);
1536 	rqi = (struct snic_req_info *) CMD_SP(sc);
1537 	if (!rqi) {
1538 		spin_unlock_irqrestore(io_lock, flags);
1539 
1540 		SNIC_HOST_ERR(snic->shost,
1541 			      "abt_cmd: rqi is null. Tag %d flags 0x%llx\n",
1542 			      tag, CMD_FLAGS(sc));
1543 
1544 		ret = SUCCESS;
1545 
1546 		goto send_abts_end;
1547 	}
1548 
1549 	rqi->abts_done = &tm_done;
1550 	if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) {
1551 		spin_unlock_irqrestore(io_lock, flags);
1552 
1553 		ret = 0;
1554 		goto abts_pending;
1555 	}
1556 	SNIC_BUG_ON(!rqi->abts_done);
1557 
1558 	/* Save Command State, should be restored on failed to Queue. */
1559 	sv_state = CMD_STATE(sc);
1560 
1561 	/*
1562 	 * Command is still pending, need to abort it
1563 	 * If the fw completes the command after this point,
1564 	 * the completion won't be done till mid-layer, since abot
1565 	 * has already started.
1566 	 */
1567 	CMD_STATE(sc) = SNIC_IOREQ_ABTS_PENDING;
1568 	CMD_ABTS_STATUS(sc) = SNIC_INVALID_CODE;
1569 
1570 	SNIC_SCSI_DBG(snic->shost, "send_abt_cmd: TAG 0x%x\n", tag);
1571 
1572 	spin_unlock_irqrestore(io_lock, flags);
1573 
1574 	/* Now Queue the abort command to firmware */
1575 	ret = snic_queue_abort_req(snic, rqi, sc, tmf);
1576 	if (ret) {
1577 		atomic64_inc(&snic->s_stats.abts.q_fail);
1578 		SNIC_HOST_ERR(snic->shost,
1579 			      "send_abt_cmd: IO w/ Tag 0x%x fail w/ err %d flags 0x%llx\n",
1580 			      tag, ret, CMD_FLAGS(sc));
1581 
1582 		spin_lock_irqsave(io_lock, flags);
1583 		/* Restore Command's previous state */
1584 		CMD_STATE(sc) = sv_state;
1585 		rqi = (struct snic_req_info *) CMD_SP(sc);
1586 		if (rqi)
1587 			rqi->abts_done = NULL;
1588 		spin_unlock_irqrestore(io_lock, flags);
1589 		ret = FAILED;
1590 
1591 		goto send_abts_end;
1592 	}
1593 
1594 	spin_lock_irqsave(io_lock, flags);
1595 	if (tmf == SNIC_ITMF_ABTS_TASK) {
1596 		CMD_FLAGS(sc) |= SNIC_IO_ABTS_ISSUED;
1597 		atomic64_inc(&snic->s_stats.abts.num);
1598 	} else {
1599 		/* term stats */
1600 		CMD_FLAGS(sc) |= SNIC_IO_TERM_ISSUED;
1601 	}
1602 	spin_unlock_irqrestore(io_lock, flags);
1603 
1604 	SNIC_SCSI_DBG(snic->shost,
1605 		      "send_abt_cmd: sc %p Tag %x flags 0x%llx\n",
1606 		      sc, tag, CMD_FLAGS(sc));
1607 
1608 
1609 	ret = 0;
1610 
1611 abts_pending:
1612 	/*
1613 	 * Queued an abort IO, wait for its completion.
1614 	 * Once the fw completes the abort command, it will
1615 	 * wakeup this thread.
1616 	 */
1617 	wait_for_completion_timeout(&tm_done, SNIC_ABTS_TIMEOUT);
1618 
1619 send_abts_end:
1620 	return ret;
1621 } /* end of snic_send_abort_and_wait */
1622 
1623 /*
1624  * This function is exported to SCSI for sending abort cmnds.
1625  * A SCSI IO is represent by snic_ioreq in the driver.
1626  * The snic_ioreq is linked to the SCSI Cmd, thus a link with the ULP'S IO
1627  */
1628 int
1629 snic_abort_cmd(struct scsi_cmnd *sc)
1630 {
1631 	struct snic *snic = shost_priv(sc->device->host);
1632 	int ret = SUCCESS, tag = snic_cmd_tag(sc);
1633 	u32 start_time = jiffies;
1634 
1635 	SNIC_SCSI_DBG(snic->shost, "abt_cmd:sc %p :0x%x :req = %p :tag = %d\n",
1636 		       sc, sc->cmnd[0], scsi_cmd_to_rq(sc), tag);
1637 
1638 	if (unlikely(snic_get_state(snic) != SNIC_ONLINE)) {
1639 		SNIC_HOST_ERR(snic->shost,
1640 			      "abt_cmd: tag %x Parent Devs are not rdy\n",
1641 			      tag);
1642 		ret = FAST_IO_FAIL;
1643 
1644 		goto abort_end;
1645 	}
1646 
1647 
1648 	ret = snic_send_abort_and_wait(snic, sc);
1649 	if (ret)
1650 		goto abort_end;
1651 
1652 	ret = snic_abort_finish(snic, sc);
1653 
1654 abort_end:
1655 	SNIC_TRC(snic->shost->host_no, tag, (ulong) sc,
1656 		 jiffies_to_msecs(jiffies - start_time), 0,
1657 		 SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
1658 
1659 	SNIC_SCSI_DBG(snic->shost,
1660 		      "abts: Abort Req Status = %s\n",
1661 		      (ret == SUCCESS) ? "SUCCESS" :
1662 		       ((ret == FAST_IO_FAIL) ? "FAST_IO_FAIL" : "FAILED"));
1663 
1664 	return ret;
1665 }
1666 
1667 
1668 
1669 static int
1670 snic_is_abts_pending(struct snic *snic, struct scsi_cmnd *lr_sc)
1671 {
1672 	struct snic_req_info *rqi = NULL;
1673 	struct scsi_cmnd *sc = NULL;
1674 	struct scsi_device *lr_sdev = NULL;
1675 	spinlock_t *io_lock = NULL;
1676 	u32 tag;
1677 	unsigned long flags;
1678 
1679 	if (lr_sc)
1680 		lr_sdev = lr_sc->device;
1681 
1682 	/* walk through the tag map, an dcheck if IOs are still pending in fw*/
1683 	for (tag = 0; tag < snic->max_tag_id; tag++) {
1684 		io_lock = snic_io_lock_tag(snic, tag);
1685 
1686 		spin_lock_irqsave(io_lock, flags);
1687 		sc = scsi_host_find_tag(snic->shost, tag);
1688 
1689 		if (!sc || (lr_sc && (sc->device != lr_sdev || sc == lr_sc))) {
1690 			spin_unlock_irqrestore(io_lock, flags);
1691 
1692 			continue;
1693 		}
1694 
1695 		rqi = (struct snic_req_info *) CMD_SP(sc);
1696 		if (!rqi) {
1697 			spin_unlock_irqrestore(io_lock, flags);
1698 
1699 			continue;
1700 		}
1701 
1702 		/*
1703 		 * Found IO that is still pending w/ firmware and belongs to
1704 		 * the LUN that is under reset, if lr_sc != NULL
1705 		 */
1706 		SNIC_SCSI_DBG(snic->shost, "Found IO in %s on LUN\n",
1707 			      snic_ioreq_state_to_str(CMD_STATE(sc)));
1708 
1709 		if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING) {
1710 			spin_unlock_irqrestore(io_lock, flags);
1711 
1712 			return 1;
1713 		}
1714 
1715 		spin_unlock_irqrestore(io_lock, flags);
1716 	}
1717 
1718 	return 0;
1719 } /* end of snic_is_abts_pending */
1720 
1721 static int
1722 snic_dr_clean_single_req(struct snic *snic,
1723 			 u32 tag,
1724 			 struct scsi_device *lr_sdev)
1725 {
1726 	struct snic_req_info *rqi = NULL;
1727 	struct snic_tgt *tgt = NULL;
1728 	struct scsi_cmnd *sc = NULL;
1729 	spinlock_t *io_lock = NULL;
1730 	u32 sv_state = 0, tmf = 0;
1731 	DECLARE_COMPLETION_ONSTACK(tm_done);
1732 	unsigned long flags;
1733 	int ret = 0;
1734 
1735 	io_lock = snic_io_lock_tag(snic, tag);
1736 	spin_lock_irqsave(io_lock, flags);
1737 	sc = scsi_host_find_tag(snic->shost, tag);
1738 
1739 	/* Ignore Cmd that don't belong to Lun Reset device */
1740 	if (!sc || sc->device != lr_sdev)
1741 		goto skip_clean;
1742 
1743 	rqi = (struct snic_req_info *) CMD_SP(sc);
1744 
1745 	if (!rqi)
1746 		goto skip_clean;
1747 
1748 
1749 	if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING)
1750 		goto skip_clean;
1751 
1752 
1753 	if ((CMD_FLAGS(sc) & SNIC_DEVICE_RESET) &&
1754 			(!(CMD_FLAGS(sc) & SNIC_DEV_RST_ISSUED))) {
1755 
1756 		SNIC_SCSI_DBG(snic->shost,
1757 			      "clean_single_req: devrst is not pending sc 0x%p\n",
1758 			      sc);
1759 
1760 		goto skip_clean;
1761 	}
1762 
1763 	SNIC_SCSI_DBG(snic->shost,
1764 		"clean_single_req: Found IO in %s on lun\n",
1765 		snic_ioreq_state_to_str(CMD_STATE(sc)));
1766 
1767 	/* Save Command State */
1768 	sv_state = CMD_STATE(sc);
1769 
1770 	/*
1771 	 * Any pending IO issued prior to reset is expected to be
1772 	 * in abts pending state, if not we need to set SNIC_IOREQ_ABTS_PENDING
1773 	 * to indicate the IO is abort pending.
1774 	 * When IO is completed, the IO will be handed over and handled
1775 	 * in this function.
1776 	 */
1777 
1778 	CMD_STATE(sc) = SNIC_IOREQ_ABTS_PENDING;
1779 	SNIC_BUG_ON(rqi->abts_done);
1780 
1781 	if (CMD_FLAGS(sc) & SNIC_DEVICE_RESET) {
1782 		rqi->tm_tag = SNIC_TAG_DEV_RST;
1783 
1784 		SNIC_SCSI_DBG(snic->shost,
1785 			      "clean_single_req:devrst sc 0x%p\n", sc);
1786 	}
1787 
1788 	CMD_ABTS_STATUS(sc) = SNIC_INVALID_CODE;
1789 	rqi->abts_done = &tm_done;
1790 	spin_unlock_irqrestore(io_lock, flags);
1791 
1792 	tgt = starget_to_tgt(scsi_target(sc->device));
1793 	if ((snic_tgt_chkready(tgt) != 0) && (tgt->tdata.typ == SNIC_TGT_SAN))
1794 		tmf = SNIC_ITMF_ABTS_TASK_TERM;
1795 	else
1796 		tmf = SNIC_ITMF_ABTS_TASK;
1797 
1798 	/* Now queue the abort command to firmware */
1799 	ret = snic_queue_abort_req(snic, rqi, sc, tmf);
1800 	if (ret) {
1801 		SNIC_HOST_ERR(snic->shost,
1802 			      "clean_single_req_err:sc %p, tag %d abt failed. tm_tag %d flags 0x%llx\n",
1803 			      sc, tag, rqi->tm_tag, CMD_FLAGS(sc));
1804 
1805 		spin_lock_irqsave(io_lock, flags);
1806 		rqi = (struct snic_req_info *) CMD_SP(sc);
1807 		if (rqi)
1808 			rqi->abts_done = NULL;
1809 
1810 		/* Restore Command State */
1811 		if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING)
1812 			CMD_STATE(sc) = sv_state;
1813 
1814 		ret = 1;
1815 		goto skip_clean;
1816 	}
1817 
1818 	spin_lock_irqsave(io_lock, flags);
1819 	if (CMD_FLAGS(sc) & SNIC_DEVICE_RESET)
1820 		CMD_FLAGS(sc) |= SNIC_DEV_RST_TERM_ISSUED;
1821 
1822 	CMD_FLAGS(sc) |= SNIC_IO_INTERNAL_TERM_ISSUED;
1823 	spin_unlock_irqrestore(io_lock, flags);
1824 
1825 	wait_for_completion_timeout(&tm_done, SNIC_ABTS_TIMEOUT);
1826 
1827 	/* Recheck cmd state to check if it now aborted. */
1828 	spin_lock_irqsave(io_lock, flags);
1829 	rqi = (struct snic_req_info *) CMD_SP(sc);
1830 	if (!rqi) {
1831 		CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_REQ_NULL;
1832 		goto skip_clean;
1833 	}
1834 	rqi->abts_done = NULL;
1835 
1836 	/* if abort is still pending w/ fw, fail */
1837 	if (CMD_ABTS_STATUS(sc) == SNIC_INVALID_CODE) {
1838 		SNIC_HOST_ERR(snic->shost,
1839 			      "clean_single_req_err:sc %p tag %d abt still pending w/ fw, tm_tag %d flags 0x%llx\n",
1840 			      sc, tag, rqi->tm_tag, CMD_FLAGS(sc));
1841 
1842 		CMD_FLAGS(sc) |= SNIC_IO_ABTS_TERM_DONE;
1843 		ret = 1;
1844 
1845 		goto skip_clean;
1846 	}
1847 
1848 	CMD_STATE(sc) = SNIC_IOREQ_ABTS_COMPLETE;
1849 	CMD_SP(sc) = NULL;
1850 	spin_unlock_irqrestore(io_lock, flags);
1851 
1852 	snic_release_req_buf(snic, rqi, sc);
1853 
1854 	sc->result = (DID_ERROR << 16);
1855 	scsi_done(sc);
1856 
1857 	ret = 0;
1858 
1859 	return ret;
1860 
1861 skip_clean:
1862 	spin_unlock_irqrestore(io_lock, flags);
1863 
1864 	return ret;
1865 } /* end of snic_dr_clean_single_req */
1866 
1867 static int
1868 snic_dr_clean_pending_req(struct snic *snic, struct scsi_cmnd *lr_sc)
1869 {
1870 	struct scsi_device *lr_sdev = lr_sc->device;
1871 	u32 tag = 0;
1872 	int ret = FAILED;
1873 
1874 	for (tag = 0; tag < snic->max_tag_id; tag++) {
1875 		if (tag == snic_cmd_tag(lr_sc))
1876 			continue;
1877 
1878 		ret = snic_dr_clean_single_req(snic, tag, lr_sdev);
1879 		if (ret) {
1880 			SNIC_HOST_ERR(snic->shost, "clean_err:tag = %d\n", tag);
1881 
1882 			goto clean_err;
1883 		}
1884 	}
1885 
1886 	schedule_timeout(msecs_to_jiffies(100));
1887 
1888 	/* Walk through all the cmds and check abts status. */
1889 	if (snic_is_abts_pending(snic, lr_sc)) {
1890 		ret = FAILED;
1891 
1892 		goto clean_err;
1893 	}
1894 
1895 	ret = 0;
1896 	SNIC_SCSI_DBG(snic->shost, "clean_pending_req: Success.\n");
1897 
1898 	return ret;
1899 
1900 clean_err:
1901 	ret = FAILED;
1902 	SNIC_HOST_ERR(snic->shost,
1903 		      "Failed to Clean Pending IOs on %s device.\n",
1904 		      dev_name(&lr_sdev->sdev_gendev));
1905 
1906 	return ret;
1907 
1908 } /* end of snic_dr_clean_pending_req */
1909 
1910 /*
1911  * snic_dr_finish : Called by snic_device_reset
1912  */
1913 static int
1914 snic_dr_finish(struct snic *snic, struct scsi_cmnd *sc)
1915 {
1916 	struct snic_req_info *rqi = NULL;
1917 	spinlock_t *io_lock = NULL;
1918 	unsigned long flags;
1919 	int lr_res = 0;
1920 	int ret = FAILED;
1921 
1922 	io_lock = snic_io_lock_hash(snic, sc);
1923 	spin_lock_irqsave(io_lock, flags);
1924 	rqi = (struct snic_req_info *) CMD_SP(sc);
1925 	if (!rqi) {
1926 		spin_unlock_irqrestore(io_lock, flags);
1927 		SNIC_SCSI_DBG(snic->shost,
1928 			      "dr_fini: rqi is null tag 0x%x sc 0x%p flags 0x%llx\n",
1929 			      snic_cmd_tag(sc), sc, CMD_FLAGS(sc));
1930 
1931 		ret = FAILED;
1932 		goto dr_fini_end;
1933 	}
1934 
1935 	rqi->dr_done = NULL;
1936 
1937 	lr_res = CMD_LR_STATUS(sc);
1938 
1939 	switch (lr_res) {
1940 	case SNIC_INVALID_CODE:
1941 		/* stats */
1942 		SNIC_SCSI_DBG(snic->shost,
1943 			      "dr_fini: Tag %x Dev Reset Timedout. flags 0x%llx\n",
1944 			      snic_cmd_tag(sc), CMD_FLAGS(sc));
1945 
1946 		CMD_FLAGS(sc) |= SNIC_DEV_RST_TIMEDOUT;
1947 		ret = FAILED;
1948 
1949 		goto dr_failed;
1950 
1951 	case SNIC_STAT_IO_SUCCESS:
1952 		SNIC_SCSI_DBG(snic->shost,
1953 			      "dr_fini: Tag %x Dev Reset cmpl\n",
1954 			      snic_cmd_tag(sc));
1955 		ret = 0;
1956 		break;
1957 
1958 	default:
1959 		SNIC_HOST_ERR(snic->shost,
1960 			      "dr_fini:Device Reset completed& failed.Tag = %x lr_status %s flags 0x%llx\n",
1961 			      snic_cmd_tag(sc),
1962 			      snic_io_status_to_str(lr_res), CMD_FLAGS(sc));
1963 		ret = FAILED;
1964 		goto dr_failed;
1965 	}
1966 	spin_unlock_irqrestore(io_lock, flags);
1967 
1968 	/*
1969 	 * Cleanup any IOs on this LUN that have still not completed.
1970 	 * If any of these fail, then LUN Reset fails.
1971 	 * Cleanup cleans all commands on this LUN except
1972 	 * the lun reset command. If all cmds get cleaned, the LUN Reset
1973 	 * succeeds.
1974 	 */
1975 
1976 	ret = snic_dr_clean_pending_req(snic, sc);
1977 	if (ret) {
1978 		spin_lock_irqsave(io_lock, flags);
1979 		SNIC_SCSI_DBG(snic->shost,
1980 			      "dr_fini: Device Reset Failed since could not abort all IOs. Tag = %x.\n",
1981 			      snic_cmd_tag(sc));
1982 		rqi = (struct snic_req_info *) CMD_SP(sc);
1983 
1984 		goto dr_failed;
1985 	} else {
1986 		/* Cleanup LUN Reset Command */
1987 		spin_lock_irqsave(io_lock, flags);
1988 		rqi = (struct snic_req_info *) CMD_SP(sc);
1989 		if (rqi)
1990 			ret = SUCCESS; /* Completed Successfully */
1991 		else
1992 			ret = FAILED;
1993 	}
1994 
1995 dr_failed:
1996 	lockdep_assert_held(io_lock);
1997 	if (rqi)
1998 		CMD_SP(sc) = NULL;
1999 	spin_unlock_irqrestore(io_lock, flags);
2000 
2001 	if (rqi)
2002 		snic_release_req_buf(snic, rqi, sc);
2003 
2004 dr_fini_end:
2005 	return ret;
2006 } /* end of snic_dr_finish */
2007 
2008 static int
2009 snic_queue_dr_req(struct snic *snic,
2010 		  struct snic_req_info *rqi,
2011 		  struct scsi_cmnd *sc)
2012 {
2013 	/* Add special tag for device reset */
2014 	rqi->tm_tag |= SNIC_TAG_DEV_RST;
2015 
2016 	return snic_issue_tm_req(snic, rqi, sc, SNIC_ITMF_LUN_RESET);
2017 }
2018 
2019 static int
2020 snic_send_dr_and_wait(struct snic *snic, struct scsi_cmnd *sc)
2021 {
2022 	struct snic_req_info *rqi = NULL;
2023 	enum snic_ioreq_state sv_state;
2024 	spinlock_t *io_lock = NULL;
2025 	unsigned long flags;
2026 	DECLARE_COMPLETION_ONSTACK(tm_done);
2027 	int ret = FAILED, tag = snic_cmd_tag(sc);
2028 
2029 	io_lock = snic_io_lock_hash(snic, sc);
2030 	spin_lock_irqsave(io_lock, flags);
2031 	CMD_FLAGS(sc) |= SNIC_DEVICE_RESET;
2032 	rqi = (struct snic_req_info *) CMD_SP(sc);
2033 	if (!rqi) {
2034 		SNIC_HOST_ERR(snic->shost,
2035 			      "send_dr: rqi is null, Tag 0x%x flags 0x%llx\n",
2036 			      tag, CMD_FLAGS(sc));
2037 		spin_unlock_irqrestore(io_lock, flags);
2038 
2039 		ret = FAILED;
2040 		goto send_dr_end;
2041 	}
2042 
2043 	/* Save Command state to restore in case Queuing failed. */
2044 	sv_state = CMD_STATE(sc);
2045 
2046 	CMD_STATE(sc) = SNIC_IOREQ_LR_PENDING;
2047 	CMD_LR_STATUS(sc) = SNIC_INVALID_CODE;
2048 
2049 	SNIC_SCSI_DBG(snic->shost, "dr: TAG = %x\n", tag);
2050 
2051 	rqi->dr_done = &tm_done;
2052 	SNIC_BUG_ON(!rqi->dr_done);
2053 
2054 	spin_unlock_irqrestore(io_lock, flags);
2055 	/*
2056 	 * The Command state is changed to IOREQ_PENDING,
2057 	 * in this case, if the command is completed, the icmnd_cmpl will
2058 	 * mark the cmd as completed.
2059 	 * This logic still makes LUN Reset is inevitable.
2060 	 */
2061 
2062 	ret = snic_queue_dr_req(snic, rqi, sc);
2063 	if (ret) {
2064 		SNIC_HOST_ERR(snic->shost,
2065 			      "send_dr: IO w/ Tag 0x%x Failed err = %d. flags 0x%llx\n",
2066 			      tag, ret, CMD_FLAGS(sc));
2067 
2068 		spin_lock_irqsave(io_lock, flags);
2069 		/* Restore State */
2070 		CMD_STATE(sc) = sv_state;
2071 		rqi = (struct snic_req_info *) CMD_SP(sc);
2072 		if (rqi)
2073 			rqi->dr_done = NULL;
2074 		/* rqi is freed in caller. */
2075 		spin_unlock_irqrestore(io_lock, flags);
2076 		ret = FAILED;
2077 
2078 		goto send_dr_end;
2079 	}
2080 
2081 	spin_lock_irqsave(io_lock, flags);
2082 	CMD_FLAGS(sc) |= SNIC_DEV_RST_ISSUED;
2083 	spin_unlock_irqrestore(io_lock, flags);
2084 
2085 	ret = 0;
2086 
2087 	wait_for_completion_timeout(&tm_done, SNIC_LUN_RESET_TIMEOUT);
2088 
2089 send_dr_end:
2090 	return ret;
2091 }
2092 
2093 /*
2094  * auxillary funciton to check lun reset op is supported or not
2095  * Not supported if returns 0
2096  */
2097 static int
2098 snic_dev_reset_supported(struct scsi_device *sdev)
2099 {
2100 	struct snic_tgt *tgt = starget_to_tgt(scsi_target(sdev));
2101 
2102 	if (tgt->tdata.typ == SNIC_TGT_DAS)
2103 		return 0;
2104 
2105 	return 1;
2106 }
2107 
2108 static void
2109 snic_unlink_and_release_req(struct snic *snic, struct scsi_cmnd *sc, int flag)
2110 {
2111 	struct snic_req_info *rqi = NULL;
2112 	spinlock_t *io_lock = NULL;
2113 	unsigned long flags;
2114 	u32 start_time = jiffies;
2115 
2116 	io_lock = snic_io_lock_hash(snic, sc);
2117 	spin_lock_irqsave(io_lock, flags);
2118 	rqi = (struct snic_req_info *) CMD_SP(sc);
2119 	if (rqi) {
2120 		start_time = rqi->start_time;
2121 		CMD_SP(sc) = NULL;
2122 	}
2123 
2124 	CMD_FLAGS(sc) |= flag;
2125 	spin_unlock_irqrestore(io_lock, flags);
2126 
2127 	if (rqi)
2128 		snic_release_req_buf(snic, rqi, sc);
2129 
2130 	SNIC_TRC(snic->shost->host_no, snic_cmd_tag(sc), (ulong) sc,
2131 		 jiffies_to_msecs(jiffies - start_time), (ulong) rqi,
2132 		 SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
2133 }
2134 
2135 /*
2136  * SCSI Eh thread issues a LUN Reset when one or more commands on a LUN
2137  * fail to get aborted. It calls driver's eh_device_reset with a SCSI
2138  * command on the LUN.
2139  */
2140 int
2141 snic_device_reset(struct scsi_cmnd *sc)
2142 {
2143 	struct Scsi_Host *shost = sc->device->host;
2144 	struct snic *snic = shost_priv(shost);
2145 	struct snic_req_info *rqi = NULL;
2146 	int tag = snic_cmd_tag(sc);
2147 	int start_time = jiffies;
2148 	int ret = FAILED;
2149 	int dr_supp = 0;
2150 
2151 	SNIC_SCSI_DBG(shost, "dev_reset:sc %p :0x%x :req = %p :tag = %d\n",
2152 		      sc, sc->cmnd[0], scsi_cmd_to_rq(sc),
2153 		      snic_cmd_tag(sc));
2154 	dr_supp = snic_dev_reset_supported(sc->device);
2155 	if (!dr_supp) {
2156 		/* device reset op is not supported */
2157 		SNIC_HOST_INFO(shost, "LUN Reset Op not supported.\n");
2158 		snic_unlink_and_release_req(snic, sc, SNIC_DEV_RST_NOTSUP);
2159 
2160 		goto dev_rst_end;
2161 	}
2162 
2163 	if (unlikely(snic_get_state(snic) != SNIC_ONLINE)) {
2164 		snic_unlink_and_release_req(snic, sc, 0);
2165 		SNIC_HOST_ERR(shost, "Devrst: Parent Devs are not online.\n");
2166 
2167 		goto dev_rst_end;
2168 	}
2169 
2170 	/* There is no tag when lun reset is issue through ioctl. */
2171 	if (unlikely(tag <= SNIC_NO_TAG)) {
2172 		SNIC_HOST_INFO(snic->shost,
2173 			       "Devrst: LUN Reset Recvd thru IOCTL.\n");
2174 
2175 		rqi = snic_req_init(snic, 0);
2176 		if (!rqi)
2177 			goto dev_rst_end;
2178 
2179 		memset(scsi_cmd_priv(sc), 0,
2180 			sizeof(struct snic_internal_io_state));
2181 		CMD_SP(sc) = (char *)rqi;
2182 		CMD_FLAGS(sc) = SNIC_NO_FLAGS;
2183 
2184 		/* Add special tag for dr coming from user spc */
2185 		rqi->tm_tag = SNIC_TAG_IOCTL_DEV_RST;
2186 		rqi->sc = sc;
2187 	}
2188 
2189 	ret = snic_send_dr_and_wait(snic, sc);
2190 	if (ret) {
2191 		SNIC_HOST_ERR(snic->shost,
2192 			      "Devrst: IO w/ Tag %x Failed w/ err = %d\n",
2193 			      tag, ret);
2194 
2195 		snic_unlink_and_release_req(snic, sc, 0);
2196 
2197 		goto dev_rst_end;
2198 	}
2199 
2200 	ret = snic_dr_finish(snic, sc);
2201 
2202 dev_rst_end:
2203 	SNIC_TRC(snic->shost->host_no, tag, (ulong) sc,
2204 		 jiffies_to_msecs(jiffies - start_time),
2205 		 0, SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
2206 
2207 	SNIC_SCSI_DBG(snic->shost,
2208 		      "Devrst: Returning from Device Reset : %s\n",
2209 		      (ret == SUCCESS) ? "SUCCESS" : "FAILED");
2210 
2211 	return ret;
2212 } /* end of snic_device_reset */
2213 
2214 /*
2215  * SCSI Error handling calls driver's eh_host_reset if all prior
2216  * error handling levels return FAILED.
2217  *
2218  * Host Reset is the highest level of error recovery. If this fails, then
2219  * host is offlined by SCSI.
2220  */
2221 /*
2222  * snic_issue_hba_reset : Queues FW Reset Request.
2223  */
2224 static int
2225 snic_issue_hba_reset(struct snic *snic, struct scsi_cmnd *sc)
2226 {
2227 	struct snic_req_info *rqi = NULL;
2228 	struct snic_host_req *req = NULL;
2229 	spinlock_t *io_lock = NULL;
2230 	DECLARE_COMPLETION_ONSTACK(wait);
2231 	unsigned long flags;
2232 	int ret = -ENOMEM;
2233 
2234 	rqi = snic_req_init(snic, 0);
2235 	if (!rqi) {
2236 		ret = -ENOMEM;
2237 
2238 		goto hba_rst_end;
2239 	}
2240 
2241 	if (snic_cmd_tag(sc) == SCSI_NO_TAG) {
2242 		memset(scsi_cmd_priv(sc), 0,
2243 			sizeof(struct snic_internal_io_state));
2244 		SNIC_HOST_INFO(snic->shost, "issu_hr:Host reset thru ioctl.\n");
2245 		rqi->sc = sc;
2246 	}
2247 
2248 	req = rqi_to_req(rqi);
2249 
2250 	io_lock = snic_io_lock_hash(snic, sc);
2251 	spin_lock_irqsave(io_lock, flags);
2252 	SNIC_BUG_ON(CMD_SP(sc) != NULL);
2253 	CMD_STATE(sc) = SNIC_IOREQ_PENDING;
2254 	CMD_SP(sc) = (char *) rqi;
2255 	CMD_FLAGS(sc) |= SNIC_IO_INITIALIZED;
2256 	snic->remove_wait = &wait;
2257 	spin_unlock_irqrestore(io_lock, flags);
2258 
2259 	/* Initialize Request */
2260 	snic_io_hdr_enc(&req->hdr, SNIC_REQ_HBA_RESET, 0, snic_cmd_tag(sc),
2261 			snic->config.hid, 0, (ulong) rqi);
2262 
2263 	req->u.reset.flags = 0;
2264 
2265 	ret = snic_queue_wq_desc(snic, req, sizeof(*req));
2266 	if (ret) {
2267 		SNIC_HOST_ERR(snic->shost,
2268 			      "issu_hr:Queuing HBA Reset Failed. w err %d\n",
2269 			      ret);
2270 
2271 		goto hba_rst_err;
2272 	}
2273 
2274 	spin_lock_irqsave(io_lock, flags);
2275 	CMD_FLAGS(sc) |= SNIC_HOST_RESET_ISSUED;
2276 	spin_unlock_irqrestore(io_lock, flags);
2277 	atomic64_inc(&snic->s_stats.reset.hba_resets);
2278 	SNIC_HOST_INFO(snic->shost, "Queued HBA Reset Successfully.\n");
2279 
2280 	wait_for_completion_timeout(snic->remove_wait,
2281 				    SNIC_HOST_RESET_TIMEOUT);
2282 
2283 	if (snic_get_state(snic) == SNIC_FWRESET) {
2284 		SNIC_HOST_ERR(snic->shost, "reset_cmpl: Reset Timedout.\n");
2285 		ret = -ETIMEDOUT;
2286 
2287 		goto hba_rst_err;
2288 	}
2289 
2290 	spin_lock_irqsave(io_lock, flags);
2291 	snic->remove_wait = NULL;
2292 	rqi = (struct snic_req_info *) CMD_SP(sc);
2293 	CMD_SP(sc) = NULL;
2294 	spin_unlock_irqrestore(io_lock, flags);
2295 
2296 	if (rqi)
2297 		snic_req_free(snic, rqi);
2298 
2299 	ret = 0;
2300 
2301 	return ret;
2302 
2303 hba_rst_err:
2304 	spin_lock_irqsave(io_lock, flags);
2305 	snic->remove_wait = NULL;
2306 	rqi = (struct snic_req_info *) CMD_SP(sc);
2307 	CMD_SP(sc) = NULL;
2308 	spin_unlock_irqrestore(io_lock, flags);
2309 
2310 	if (rqi)
2311 		snic_req_free(snic, rqi);
2312 
2313 hba_rst_end:
2314 	SNIC_HOST_ERR(snic->shost,
2315 		      "reset:HBA Reset Failed w/ err = %d.\n",
2316 		      ret);
2317 
2318 	return ret;
2319 } /* end of snic_issue_hba_reset */
2320 
2321 int
2322 snic_reset(struct Scsi_Host *shost, struct scsi_cmnd *sc)
2323 {
2324 	struct snic *snic = shost_priv(shost);
2325 	enum snic_state sv_state;
2326 	unsigned long flags;
2327 	int ret = FAILED;
2328 
2329 	/* Set snic state as SNIC_FWRESET*/
2330 	sv_state = snic_get_state(snic);
2331 
2332 	spin_lock_irqsave(&snic->snic_lock, flags);
2333 	if (snic_get_state(snic) == SNIC_FWRESET) {
2334 		spin_unlock_irqrestore(&snic->snic_lock, flags);
2335 		SNIC_HOST_INFO(shost, "reset:prev reset is in progress\n");
2336 
2337 		msleep(SNIC_HOST_RESET_TIMEOUT);
2338 		ret = SUCCESS;
2339 
2340 		goto reset_end;
2341 	}
2342 
2343 	snic_set_state(snic, SNIC_FWRESET);
2344 	spin_unlock_irqrestore(&snic->snic_lock, flags);
2345 
2346 
2347 	/* Wait for all the IOs that are entered in Qcmd */
2348 	while (atomic_read(&snic->ios_inflight))
2349 		schedule_timeout(msecs_to_jiffies(1));
2350 
2351 	ret = snic_issue_hba_reset(snic, sc);
2352 	if (ret) {
2353 		SNIC_HOST_ERR(shost,
2354 			      "reset:Host Reset Failed w/ err %d.\n",
2355 			      ret);
2356 		spin_lock_irqsave(&snic->snic_lock, flags);
2357 		snic_set_state(snic, sv_state);
2358 		spin_unlock_irqrestore(&snic->snic_lock, flags);
2359 		atomic64_inc(&snic->s_stats.reset.hba_reset_fail);
2360 		ret = FAILED;
2361 
2362 		goto reset_end;
2363 	}
2364 
2365 	ret = SUCCESS;
2366 
2367 reset_end:
2368 	return ret;
2369 } /* end of snic_reset */
2370 
2371 /*
2372  * SCSI Error handling calls driver's eh_host_reset if all prior
2373  * error handling levels return FAILED.
2374  *
2375  * Host Reset is the highest level of error recovery. If this fails, then
2376  * host is offlined by SCSI.
2377  */
2378 int
2379 snic_host_reset(struct scsi_cmnd *sc)
2380 {
2381 	struct Scsi_Host *shost = sc->device->host;
2382 	u32 start_time  = jiffies;
2383 	int ret;
2384 
2385 	SNIC_SCSI_DBG(shost,
2386 		      "host reset:sc %p sc_cmd 0x%x req %p tag %d flags 0x%llx\n",
2387 		      sc, sc->cmnd[0], scsi_cmd_to_rq(sc),
2388 		      snic_cmd_tag(sc), CMD_FLAGS(sc));
2389 
2390 	ret = snic_reset(shost, sc);
2391 
2392 	SNIC_TRC(shost->host_no, snic_cmd_tag(sc), (ulong) sc,
2393 		 jiffies_to_msecs(jiffies - start_time),
2394 		 0, SNIC_TRC_CMD(sc), SNIC_TRC_CMD_STATE_FLAGS(sc));
2395 
2396 	return ret;
2397 } /* end of snic_host_reset */
2398 
2399 /*
2400  * snic_cmpl_pending_tmreq : Caller should hold io_lock
2401  */
2402 static void
2403 snic_cmpl_pending_tmreq(struct snic *snic, struct scsi_cmnd *sc)
2404 {
2405 	struct snic_req_info *rqi = NULL;
2406 
2407 	SNIC_SCSI_DBG(snic->shost,
2408 		      "Completing Pending TM Req sc %p, state %s flags 0x%llx\n",
2409 		      sc, snic_io_status_to_str(CMD_STATE(sc)), CMD_FLAGS(sc));
2410 
2411 	/*
2412 	 * CASE : FW didn't post itmf completion due to PCIe Errors.
2413 	 * Marking the abort status as Success to call scsi completion
2414 	 * in snic_abort_finish()
2415 	 */
2416 	CMD_ABTS_STATUS(sc) = SNIC_STAT_IO_SUCCESS;
2417 
2418 	rqi = (struct snic_req_info *) CMD_SP(sc);
2419 	if (!rqi)
2420 		return;
2421 
2422 	if (rqi->dr_done)
2423 		complete(rqi->dr_done);
2424 	else if (rqi->abts_done)
2425 		complete(rqi->abts_done);
2426 }
2427 
2428 /*
2429  * snic_scsi_cleanup: Walks through tag map and releases the reqs
2430  */
2431 static void
2432 snic_scsi_cleanup(struct snic *snic, int ex_tag)
2433 {
2434 	struct snic_req_info *rqi = NULL;
2435 	struct scsi_cmnd *sc = NULL;
2436 	spinlock_t *io_lock = NULL;
2437 	unsigned long flags;
2438 	int tag;
2439 	u64 st_time = 0;
2440 
2441 	SNIC_SCSI_DBG(snic->shost, "sc_clean: scsi cleanup.\n");
2442 
2443 	for (tag = 0; tag < snic->max_tag_id; tag++) {
2444 		/* Skip ex_tag */
2445 		if (tag == ex_tag)
2446 			continue;
2447 
2448 		io_lock = snic_io_lock_tag(snic, tag);
2449 		spin_lock_irqsave(io_lock, flags);
2450 		sc = scsi_host_find_tag(snic->shost, tag);
2451 		if (!sc) {
2452 			spin_unlock_irqrestore(io_lock, flags);
2453 
2454 			continue;
2455 		}
2456 
2457 		if (unlikely(snic_tmreq_pending(sc))) {
2458 			/*
2459 			 * When FW Completes reset w/o sending completions
2460 			 * for outstanding ios.
2461 			 */
2462 			snic_cmpl_pending_tmreq(snic, sc);
2463 			spin_unlock_irqrestore(io_lock, flags);
2464 
2465 			continue;
2466 		}
2467 
2468 		rqi = (struct snic_req_info *) CMD_SP(sc);
2469 		if (!rqi) {
2470 			spin_unlock_irqrestore(io_lock, flags);
2471 
2472 			goto cleanup;
2473 		}
2474 
2475 		SNIC_SCSI_DBG(snic->shost,
2476 			      "sc_clean: sc %p, rqi %p, tag %d flags 0x%llx\n",
2477 			      sc, rqi, tag, CMD_FLAGS(sc));
2478 
2479 		CMD_SP(sc) = NULL;
2480 		CMD_FLAGS(sc) |= SNIC_SCSI_CLEANUP;
2481 		spin_unlock_irqrestore(io_lock, flags);
2482 		st_time = rqi->start_time;
2483 
2484 		SNIC_HOST_INFO(snic->shost,
2485 			       "sc_clean: Releasing rqi %p : flags 0x%llx\n",
2486 			       rqi, CMD_FLAGS(sc));
2487 
2488 		snic_release_req_buf(snic, rqi, sc);
2489 
2490 cleanup:
2491 		sc->result = DID_TRANSPORT_DISRUPTED << 16;
2492 		SNIC_HOST_INFO(snic->shost,
2493 			       "sc_clean: DID_TRANSPORT_DISRUPTED for sc %p, Tag %d flags 0x%llx rqi %p duration %u msecs\n",
2494 			       sc, scsi_cmd_to_rq(sc)->tag, CMD_FLAGS(sc), rqi,
2495 			       jiffies_to_msecs(jiffies - st_time));
2496 
2497 		/* Update IO stats */
2498 		snic_stats_update_io_cmpl(&snic->s_stats);
2499 
2500 		SNIC_TRC(snic->shost->host_no, tag, (ulong) sc,
2501 			 jiffies_to_msecs(jiffies - st_time), 0,
2502 			 SNIC_TRC_CMD(sc),
2503 			 SNIC_TRC_CMD_STATE_FLAGS(sc));
2504 
2505 		scsi_done(sc);
2506 	}
2507 } /* end of snic_scsi_cleanup */
2508 
2509 void
2510 snic_shutdown_scsi_cleanup(struct snic *snic)
2511 {
2512 	SNIC_HOST_INFO(snic->shost, "Shutdown time SCSI Cleanup.\n");
2513 
2514 	snic_scsi_cleanup(snic, SCSI_NO_TAG);
2515 } /* end of snic_shutdown_scsi_cleanup */
2516 
2517 /*
2518  * snic_internal_abort_io
2519  * called by : snic_tgt_scsi_abort_io
2520  */
2521 static int
2522 snic_internal_abort_io(struct snic *snic, struct scsi_cmnd *sc, int tmf)
2523 {
2524 	struct snic_req_info *rqi = NULL;
2525 	spinlock_t *io_lock = NULL;
2526 	unsigned long flags;
2527 	u32 sv_state = 0;
2528 	int ret = 0;
2529 
2530 	io_lock = snic_io_lock_hash(snic, sc);
2531 	spin_lock_irqsave(io_lock, flags);
2532 	rqi = (struct snic_req_info *) CMD_SP(sc);
2533 	if (!rqi)
2534 		goto skip_internal_abts;
2535 
2536 	if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING)
2537 		goto skip_internal_abts;
2538 
2539 	if ((CMD_FLAGS(sc) & SNIC_DEVICE_RESET) &&
2540 		(!(CMD_FLAGS(sc) & SNIC_DEV_RST_ISSUED))) {
2541 
2542 		SNIC_SCSI_DBG(snic->shost,
2543 			      "internal_abts: dev rst not pending sc 0x%p\n",
2544 			      sc);
2545 
2546 		goto skip_internal_abts;
2547 	}
2548 
2549 
2550 	if (!(CMD_FLAGS(sc) & SNIC_IO_ISSUED)) {
2551 		SNIC_SCSI_DBG(snic->shost,
2552 			"internal_abts: IO not yet issued sc 0x%p tag 0x%x flags 0x%llx state %d\n",
2553 			sc, snic_cmd_tag(sc), CMD_FLAGS(sc), CMD_STATE(sc));
2554 
2555 		goto skip_internal_abts;
2556 	}
2557 
2558 	sv_state = CMD_STATE(sc);
2559 	CMD_STATE(sc) = SNIC_IOREQ_ABTS_PENDING;
2560 	CMD_ABTS_STATUS(sc) = SNIC_INVALID_CODE;
2561 	CMD_FLAGS(sc) |= SNIC_IO_INTERNAL_TERM_PENDING;
2562 
2563 	if (CMD_FLAGS(sc) & SNIC_DEVICE_RESET) {
2564 		/* stats */
2565 		rqi->tm_tag = SNIC_TAG_DEV_RST;
2566 		SNIC_SCSI_DBG(snic->shost, "internal_abts:dev rst sc %p\n", sc);
2567 	}
2568 
2569 	SNIC_SCSI_DBG(snic->shost, "internal_abts: Issuing abts tag %x\n",
2570 		      snic_cmd_tag(sc));
2571 	SNIC_BUG_ON(rqi->abts_done);
2572 	spin_unlock_irqrestore(io_lock, flags);
2573 
2574 	ret = snic_queue_abort_req(snic, rqi, sc, tmf);
2575 	if (ret) {
2576 		SNIC_HOST_ERR(snic->shost,
2577 			      "internal_abts: Tag = %x , Failed w/ err = %d\n",
2578 			      snic_cmd_tag(sc), ret);
2579 
2580 		spin_lock_irqsave(io_lock, flags);
2581 
2582 		if (CMD_STATE(sc) == SNIC_IOREQ_ABTS_PENDING)
2583 			CMD_STATE(sc) = sv_state;
2584 
2585 		goto skip_internal_abts;
2586 	}
2587 
2588 	spin_lock_irqsave(io_lock, flags);
2589 	if (CMD_FLAGS(sc) & SNIC_DEVICE_RESET)
2590 		CMD_FLAGS(sc) |= SNIC_DEV_RST_TERM_ISSUED;
2591 	else
2592 		CMD_FLAGS(sc) |= SNIC_IO_INTERNAL_TERM_ISSUED;
2593 
2594 	ret = SUCCESS;
2595 
2596 skip_internal_abts:
2597 	lockdep_assert_held(io_lock);
2598 	spin_unlock_irqrestore(io_lock, flags);
2599 
2600 	return ret;
2601 } /* end of snic_internal_abort_io */
2602 
2603 /*
2604  * snic_tgt_scsi_abort_io : called by snic_tgt_del
2605  */
2606 int
2607 snic_tgt_scsi_abort_io(struct snic_tgt *tgt)
2608 {
2609 	struct snic *snic = NULL;
2610 	struct scsi_cmnd *sc = NULL;
2611 	struct snic_tgt *sc_tgt = NULL;
2612 	spinlock_t *io_lock = NULL;
2613 	unsigned long flags;
2614 	int ret = 0, tag, abt_cnt = 0, tmf = 0;
2615 
2616 	if (!tgt)
2617 		return -1;
2618 
2619 	snic = shost_priv(snic_tgt_to_shost(tgt));
2620 	SNIC_SCSI_DBG(snic->shost, "tgt_abt_io: Cleaning Pending IOs.\n");
2621 
2622 	if (tgt->tdata.typ == SNIC_TGT_DAS)
2623 		tmf = SNIC_ITMF_ABTS_TASK;
2624 	else
2625 		tmf = SNIC_ITMF_ABTS_TASK_TERM;
2626 
2627 	for (tag = 0; tag < snic->max_tag_id; tag++) {
2628 		io_lock = snic_io_lock_tag(snic, tag);
2629 
2630 		spin_lock_irqsave(io_lock, flags);
2631 		sc = scsi_host_find_tag(snic->shost, tag);
2632 		if (!sc) {
2633 			spin_unlock_irqrestore(io_lock, flags);
2634 
2635 			continue;
2636 		}
2637 
2638 		sc_tgt = starget_to_tgt(scsi_target(sc->device));
2639 		if (sc_tgt != tgt) {
2640 			spin_unlock_irqrestore(io_lock, flags);
2641 
2642 			continue;
2643 		}
2644 		spin_unlock_irqrestore(io_lock, flags);
2645 
2646 		ret = snic_internal_abort_io(snic, sc, tmf);
2647 		if (ret < 0) {
2648 			SNIC_HOST_ERR(snic->shost,
2649 				      "tgt_abt_io: Tag %x, Failed w err = %d\n",
2650 				      tag, ret);
2651 
2652 			continue;
2653 		}
2654 
2655 		if (ret == SUCCESS)
2656 			abt_cnt++;
2657 	}
2658 
2659 	SNIC_SCSI_DBG(snic->shost, "tgt_abt_io: abt_cnt = %d\n", abt_cnt);
2660 
2661 	return 0;
2662 } /* end of snic_tgt_scsi_abort_io */
2663