xref: /openbmc/linux/drivers/scsi/lpfc/lpfc_scsi.c (revision d2999e1b)
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2014 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21 #include <linux/pci.h>
22 #include <linux/slab.h>
23 #include <linux/interrupt.h>
24 #include <linux/export.h>
25 #include <linux/delay.h>
26 #include <asm/unaligned.h>
27 #include <linux/crc-t10dif.h>
28 #include <net/checksum.h>
29 
30 #include <scsi/scsi.h>
31 #include <scsi/scsi_device.h>
32 #include <scsi/scsi_eh.h>
33 #include <scsi/scsi_host.h>
34 #include <scsi/scsi_tcq.h>
35 #include <scsi/scsi_transport_fc.h>
36 
37 #include "lpfc_version.h"
38 #include "lpfc_hw4.h"
39 #include "lpfc_hw.h"
40 #include "lpfc_sli.h"
41 #include "lpfc_sli4.h"
42 #include "lpfc_nl.h"
43 #include "lpfc_disc.h"
44 #include "lpfc.h"
45 #include "lpfc_scsi.h"
46 #include "lpfc_logmsg.h"
47 #include "lpfc_crtn.h"
48 #include "lpfc_vport.h"
49 
50 #define LPFC_RESET_WAIT  2
51 #define LPFC_ABORT_WAIT  2
52 
53 int _dump_buf_done = 1;
54 
55 static char *dif_op_str[] = {
56 	"PROT_NORMAL",
57 	"PROT_READ_INSERT",
58 	"PROT_WRITE_STRIP",
59 	"PROT_READ_STRIP",
60 	"PROT_WRITE_INSERT",
61 	"PROT_READ_PASS",
62 	"PROT_WRITE_PASS",
63 };
64 
65 struct scsi_dif_tuple {
66 	__be16 guard_tag;       /* Checksum */
67 	__be16 app_tag;         /* Opaque storage */
68 	__be32 ref_tag;         /* Target LBA or indirect LBA */
69 };
70 
71 static struct lpfc_rport_data *
72 lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
73 {
74 	struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;
75 
76 	if (vport->phba->cfg_fof)
77 		return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
78 	else
79 		return (struct lpfc_rport_data *)sdev->hostdata;
80 }
81 
82 static void
83 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
84 static void
85 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
86 static int
87 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
88 
89 static void
90 lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
91 {
92 	void *src, *dst;
93 	struct scatterlist *sgde = scsi_sglist(cmnd);
94 
95 	if (!_dump_buf_data) {
96 		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
97 			"9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
98 				__func__);
99 		return;
100 	}
101 
102 
103 	if (!sgde) {
104 		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
105 			"9051 BLKGRD: ERROR: data scatterlist is null\n");
106 		return;
107 	}
108 
109 	dst = (void *) _dump_buf_data;
110 	while (sgde) {
111 		src = sg_virt(sgde);
112 		memcpy(dst, src, sgde->length);
113 		dst += sgde->length;
114 		sgde = sg_next(sgde);
115 	}
116 }
117 
118 static void
119 lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
120 {
121 	void *src, *dst;
122 	struct scatterlist *sgde = scsi_prot_sglist(cmnd);
123 
124 	if (!_dump_buf_dif) {
125 		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
126 			"9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
127 				__func__);
128 		return;
129 	}
130 
131 	if (!sgde) {
132 		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
133 			"9053 BLKGRD: ERROR: prot scatterlist is null\n");
134 		return;
135 	}
136 
137 	dst = _dump_buf_dif;
138 	while (sgde) {
139 		src = sg_virt(sgde);
140 		memcpy(dst, src, sgde->length);
141 		dst += sgde->length;
142 		sgde = sg_next(sgde);
143 	}
144 }
145 
146 static inline unsigned
147 lpfc_cmd_blksize(struct scsi_cmnd *sc)
148 {
149 	return sc->device->sector_size;
150 }
151 
152 #define LPFC_CHECK_PROTECT_GUARD	1
153 #define LPFC_CHECK_PROTECT_REF		2
154 static inline unsigned
155 lpfc_cmd_protect(struct scsi_cmnd *sc, int flag)
156 {
157 	return 1;
158 }
159 
160 static inline unsigned
161 lpfc_cmd_guard_csum(struct scsi_cmnd *sc)
162 {
163 	if (lpfc_prot_group_type(NULL, sc) == LPFC_PG_TYPE_NO_DIF)
164 		return 0;
165 	if (scsi_host_get_guard(sc->device->host) == SHOST_DIX_GUARD_IP)
166 		return 1;
167 	return 0;
168 }
169 
170 /**
171  * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
172  * @phba: Pointer to HBA object.
173  * @lpfc_cmd: lpfc scsi command object pointer.
174  *
175  * This function is called from the lpfc_prep_task_mgmt_cmd function to
176  * set the last bit in the response sge entry.
177  **/
178 static void
179 lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
180 				struct lpfc_scsi_buf *lpfc_cmd)
181 {
182 	struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
183 	if (sgl) {
184 		sgl += 1;
185 		sgl->word2 = le32_to_cpu(sgl->word2);
186 		bf_set(lpfc_sli4_sge_last, sgl, 1);
187 		sgl->word2 = cpu_to_le32(sgl->word2);
188 	}
189 }
190 
191 /**
192  * lpfc_update_stats - Update statistical data for the command completion
193  * @phba: Pointer to HBA object.
194  * @lpfc_cmd: lpfc scsi command object pointer.
195  *
196  * This function is called when there is a command completion and this
197  * function updates the statistical data for the command completion.
198  **/
199 static void
200 lpfc_update_stats(struct lpfc_hba *phba, struct  lpfc_scsi_buf *lpfc_cmd)
201 {
202 	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
203 	struct lpfc_nodelist *pnode = rdata->pnode;
204 	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
205 	unsigned long flags;
206 	struct Scsi_Host  *shost = cmd->device->host;
207 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
208 	unsigned long latency;
209 	int i;
210 
211 	if (cmd->result)
212 		return;
213 
214 	latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
215 
216 	spin_lock_irqsave(shost->host_lock, flags);
217 	if (!vport->stat_data_enabled ||
218 		vport->stat_data_blocked ||
219 		!pnode ||
220 		!pnode->lat_data ||
221 		(phba->bucket_type == LPFC_NO_BUCKET)) {
222 		spin_unlock_irqrestore(shost->host_lock, flags);
223 		return;
224 	}
225 
226 	if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
227 		i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
228 			phba->bucket_step;
229 		/* check array subscript bounds */
230 		if (i < 0)
231 			i = 0;
232 		else if (i >= LPFC_MAX_BUCKET_COUNT)
233 			i = LPFC_MAX_BUCKET_COUNT - 1;
234 	} else {
235 		for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
236 			if (latency <= (phba->bucket_base +
237 				((1<<i)*phba->bucket_step)))
238 				break;
239 	}
240 
241 	pnode->lat_data[i].cmd_count++;
242 	spin_unlock_irqrestore(shost->host_lock, flags);
243 }
244 
245 /**
246  * lpfc_send_sdev_queuedepth_change_event - Posts a queuedepth change event
247  * @phba: Pointer to HBA context object.
248  * @vport: Pointer to vport object.
249  * @ndlp: Pointer to FC node associated with the target.
250  * @lun: Lun number of the scsi device.
251  * @old_val: Old value of the queue depth.
252  * @new_val: New value of the queue depth.
253  *
254  * This function sends an event to the mgmt application indicating
255  * there is a change in the scsi device queue depth.
256  **/
257 static void
258 lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
259 		struct lpfc_vport  *vport,
260 		struct lpfc_nodelist *ndlp,
261 		uint32_t lun,
262 		uint32_t old_val,
263 		uint32_t new_val)
264 {
265 	struct lpfc_fast_path_event *fast_path_evt;
266 	unsigned long flags;
267 
268 	fast_path_evt = lpfc_alloc_fast_evt(phba);
269 	if (!fast_path_evt)
270 		return;
271 
272 	fast_path_evt->un.queue_depth_evt.scsi_event.event_type =
273 		FC_REG_SCSI_EVENT;
274 	fast_path_evt->un.queue_depth_evt.scsi_event.subcategory =
275 		LPFC_EVENT_VARQUEDEPTH;
276 
277 	/* Report all luns with change in queue depth */
278 	fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun;
279 	if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
280 		memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn,
281 			&ndlp->nlp_portname, sizeof(struct lpfc_name));
282 		memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn,
283 			&ndlp->nlp_nodename, sizeof(struct lpfc_name));
284 	}
285 
286 	fast_path_evt->un.queue_depth_evt.oldval = old_val;
287 	fast_path_evt->un.queue_depth_evt.newval = new_val;
288 	fast_path_evt->vport = vport;
289 
290 	fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
291 	spin_lock_irqsave(&phba->hbalock, flags);
292 	list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
293 	spin_unlock_irqrestore(&phba->hbalock, flags);
294 	lpfc_worker_wake_up(phba);
295 
296 	return;
297 }
298 
299 /**
300  * lpfc_change_queue_depth - Alter scsi device queue depth
301  * @sdev: Pointer the scsi device on which to change the queue depth.
302  * @qdepth: New queue depth to set the sdev to.
303  * @reason: The reason for the queue depth change.
304  *
305  * This function is called by the midlayer and the LLD to alter the queue
306  * depth for a scsi device. This function sets the queue depth to the new
307  * value and sends an event out to log the queue depth change.
308  **/
309 int
310 lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
311 {
312 	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
313 	struct lpfc_hba   *phba = vport->phba;
314 	struct lpfc_rport_data *rdata;
315 	unsigned long new_queue_depth, old_queue_depth;
316 
317 	old_queue_depth = sdev->queue_depth;
318 
319 	switch (reason) {
320 	case SCSI_QDEPTH_DEFAULT:
321 		/* change request from sysfs, fall through */
322 	case SCSI_QDEPTH_RAMP_UP:
323 		scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
324 		break;
325 	case SCSI_QDEPTH_QFULL:
326 		if (scsi_track_queue_full(sdev, qdepth) == 0)
327 			return sdev->queue_depth;
328 
329 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
330 				 "0711 detected queue full - lun queue "
331 				 "depth adjusted to %d.\n", sdev->queue_depth);
332 		break;
333 	default:
334 		return -EOPNOTSUPP;
335 	}
336 
337 	new_queue_depth = sdev->queue_depth;
338 	rdata = lpfc_rport_data_from_scsi_device(sdev);
339 	if (rdata)
340 		lpfc_send_sdev_queuedepth_change_event(phba, vport,
341 						       rdata->pnode, sdev->lun,
342 						       old_queue_depth,
343 						       new_queue_depth);
344 	return sdev->queue_depth;
345 }
346 
347 /**
348  * lpfc_change_queue_type() - Change a device's scsi tag queuing type
349  * @sdev: Pointer the scsi device whose queue depth is to change
350  * @tag_type: Identifier for queue tag type
351  */
352 static int
353 lpfc_change_queue_type(struct scsi_device *sdev, int tag_type)
354 {
355 	if (sdev->tagged_supported) {
356 		scsi_set_tag_type(sdev, tag_type);
357 		if (tag_type)
358 			scsi_activate_tcq(sdev, sdev->queue_depth);
359 		else
360 			scsi_deactivate_tcq(sdev, sdev->queue_depth);
361 	} else
362 		tag_type = 0;
363 
364 	return tag_type;
365 }
366 
367 /**
368  * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
369  * @phba: The Hba for which this call is being executed.
370  *
371  * This routine is called when there is resource error in driver or firmware.
372  * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
373  * posts at most 1 event each second. This routine wakes up worker thread of
374  * @phba to process WORKER_RAM_DOWN_EVENT event.
375  *
376  * This routine should be called with no lock held.
377  **/
378 void
379 lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
380 {
381 	unsigned long flags;
382 	uint32_t evt_posted;
383 
384 	spin_lock_irqsave(&phba->hbalock, flags);
385 	atomic_inc(&phba->num_rsrc_err);
386 	phba->last_rsrc_error_time = jiffies;
387 
388 	if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
389 		spin_unlock_irqrestore(&phba->hbalock, flags);
390 		return;
391 	}
392 
393 	phba->last_ramp_down_time = jiffies;
394 
395 	spin_unlock_irqrestore(&phba->hbalock, flags);
396 
397 	spin_lock_irqsave(&phba->pport->work_port_lock, flags);
398 	evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
399 	if (!evt_posted)
400 		phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
401 	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
402 
403 	if (!evt_posted)
404 		lpfc_worker_wake_up(phba);
405 	return;
406 }
407 
408 /**
409  * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
410  * @phba: The Hba for which this call is being executed.
411  *
412  * This routine is called to  process WORKER_RAMP_DOWN_QUEUE event for worker
413  * thread.This routine reduces queue depth for all scsi device on each vport
414  * associated with @phba.
415  **/
416 void
417 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
418 {
419 	struct lpfc_vport **vports;
420 	struct Scsi_Host  *shost;
421 	struct scsi_device *sdev;
422 	unsigned long new_queue_depth;
423 	unsigned long num_rsrc_err, num_cmd_success;
424 	int i;
425 
426 	num_rsrc_err = atomic_read(&phba->num_rsrc_err);
427 	num_cmd_success = atomic_read(&phba->num_cmd_success);
428 
429 	/*
430 	 * The error and success command counters are global per
431 	 * driver instance.  If another handler has already
432 	 * operated on this error event, just exit.
433 	 */
434 	if (num_rsrc_err == 0)
435 		return;
436 
437 	vports = lpfc_create_vport_work_array(phba);
438 	if (vports != NULL)
439 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
440 			shost = lpfc_shost_from_vport(vports[i]);
441 			shost_for_each_device(sdev, shost) {
442 				new_queue_depth =
443 					sdev->queue_depth * num_rsrc_err /
444 					(num_rsrc_err + num_cmd_success);
445 				if (!new_queue_depth)
446 					new_queue_depth = sdev->queue_depth - 1;
447 				else
448 					new_queue_depth = sdev->queue_depth -
449 								new_queue_depth;
450 				lpfc_change_queue_depth(sdev, new_queue_depth,
451 							SCSI_QDEPTH_DEFAULT);
452 			}
453 		}
454 	lpfc_destroy_vport_work_array(phba, vports);
455 	atomic_set(&phba->num_rsrc_err, 0);
456 	atomic_set(&phba->num_cmd_success, 0);
457 }
458 
459 /**
460  * lpfc_scsi_dev_block - set all scsi hosts to block state
461  * @phba: Pointer to HBA context object.
462  *
463  * This function walks vport list and set each SCSI host to block state
464  * by invoking fc_remote_port_delete() routine. This function is invoked
465  * with EEH when device's PCI slot has been permanently disabled.
466  **/
467 void
468 lpfc_scsi_dev_block(struct lpfc_hba *phba)
469 {
470 	struct lpfc_vport **vports;
471 	struct Scsi_Host  *shost;
472 	struct scsi_device *sdev;
473 	struct fc_rport *rport;
474 	int i;
475 
476 	vports = lpfc_create_vport_work_array(phba);
477 	if (vports != NULL)
478 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
479 			shost = lpfc_shost_from_vport(vports[i]);
480 			shost_for_each_device(sdev, shost) {
481 				rport = starget_to_rport(scsi_target(sdev));
482 				fc_remote_port_delete(rport);
483 			}
484 		}
485 	lpfc_destroy_vport_work_array(phba, vports);
486 }
487 
488 /**
489  * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
490  * @vport: The virtual port for which this call being executed.
491  * @num_to_allocate: The requested number of buffers to allocate.
492  *
493  * This routine allocates a scsi buffer for device with SLI-3 interface spec,
494  * the scsi buffer contains all the necessary information needed to initiate
495  * a SCSI I/O. The non-DMAable buffer region contains information to build
496  * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
497  * and the initial BPL. In addition to allocating memory, the FCP CMND and
498  * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
499  *
500  * Return codes:
501  *   int - number of scsi buffers that were allocated.
502  *   0 = failure, less than num_to_alloc is a partial failure.
503  **/
504 static int
505 lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
506 {
507 	struct lpfc_hba *phba = vport->phba;
508 	struct lpfc_scsi_buf *psb;
509 	struct ulp_bde64 *bpl;
510 	IOCB_t *iocb;
511 	dma_addr_t pdma_phys_fcp_cmd;
512 	dma_addr_t pdma_phys_fcp_rsp;
513 	dma_addr_t pdma_phys_bpl;
514 	uint16_t iotag;
515 	int bcnt, bpl_size;
516 
517 	bpl_size = phba->cfg_sg_dma_buf_size -
518 		(sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
519 
520 	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
521 			 "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
522 			 num_to_alloc, phba->cfg_sg_dma_buf_size,
523 			 (int)sizeof(struct fcp_cmnd),
524 			 (int)sizeof(struct fcp_rsp), bpl_size);
525 
526 	for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
527 		psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
528 		if (!psb)
529 			break;
530 
531 		/*
532 		 * Get memory from the pci pool to map the virt space to pci
533 		 * bus space for an I/O.  The DMA buffer includes space for the
534 		 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
535 		 * necessary to support the sg_tablesize.
536 		 */
537 		psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
538 					GFP_KERNEL, &psb->dma_handle);
539 		if (!psb->data) {
540 			kfree(psb);
541 			break;
542 		}
543 
544 		/* Initialize virtual ptrs to dma_buf region. */
545 		memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
546 
547 		/* Allocate iotag for psb->cur_iocbq. */
548 		iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
549 		if (iotag == 0) {
550 			pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
551 					psb->data, psb->dma_handle);
552 			kfree(psb);
553 			break;
554 		}
555 		psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
556 
557 		psb->fcp_cmnd = psb->data;
558 		psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
559 		psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
560 			sizeof(struct fcp_rsp);
561 
562 		/* Initialize local short-hand pointers. */
563 		bpl = psb->fcp_bpl;
564 		pdma_phys_fcp_cmd = psb->dma_handle;
565 		pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
566 		pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
567 			sizeof(struct fcp_rsp);
568 
569 		/*
570 		 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
571 		 * are sg list bdes.  Initialize the first two and leave the
572 		 * rest for queuecommand.
573 		 */
574 		bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
575 		bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
576 		bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
577 		bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
578 		bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
579 
580 		/* Setup the physical region for the FCP RSP */
581 		bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
582 		bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
583 		bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
584 		bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
585 		bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
586 
587 		/*
588 		 * Since the IOCB for the FCP I/O is built into this
589 		 * lpfc_scsi_buf, initialize it with all known data now.
590 		 */
591 		iocb = &psb->cur_iocbq.iocb;
592 		iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
593 		if ((phba->sli_rev == 3) &&
594 				!(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
595 			/* fill in immediate fcp command BDE */
596 			iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
597 			iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
598 			iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
599 					unsli3.fcp_ext.icd);
600 			iocb->un.fcpi64.bdl.addrHigh = 0;
601 			iocb->ulpBdeCount = 0;
602 			iocb->ulpLe = 0;
603 			/* fill in response BDE */
604 			iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
605 							BUFF_TYPE_BDE_64;
606 			iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
607 				sizeof(struct fcp_rsp);
608 			iocb->unsli3.fcp_ext.rbde.addrLow =
609 				putPaddrLow(pdma_phys_fcp_rsp);
610 			iocb->unsli3.fcp_ext.rbde.addrHigh =
611 				putPaddrHigh(pdma_phys_fcp_rsp);
612 		} else {
613 			iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
614 			iocb->un.fcpi64.bdl.bdeSize =
615 					(2 * sizeof(struct ulp_bde64));
616 			iocb->un.fcpi64.bdl.addrLow =
617 					putPaddrLow(pdma_phys_bpl);
618 			iocb->un.fcpi64.bdl.addrHigh =
619 					putPaddrHigh(pdma_phys_bpl);
620 			iocb->ulpBdeCount = 1;
621 			iocb->ulpLe = 1;
622 		}
623 		iocb->ulpClass = CLASS3;
624 		psb->status = IOSTAT_SUCCESS;
625 		/* Put it back into the SCSI buffer list */
626 		psb->cur_iocbq.context1  = psb;
627 		lpfc_release_scsi_buf_s3(phba, psb);
628 
629 	}
630 
631 	return bcnt;
632 }
633 
634 /**
635  * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport
636  * @vport: pointer to lpfc vport data structure.
637  *
638  * This routine is invoked by the vport cleanup for deletions and the cleanup
639  * for an ndlp on removal.
640  **/
641 void
642 lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
643 {
644 	struct lpfc_hba *phba = vport->phba;
645 	struct lpfc_scsi_buf *psb, *next_psb;
646 	unsigned long iflag = 0;
647 
648 	spin_lock_irqsave(&phba->hbalock, iflag);
649 	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
650 	list_for_each_entry_safe(psb, next_psb,
651 				&phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
652 		if (psb->rdata && psb->rdata->pnode
653 			&& psb->rdata->pnode->vport == vport)
654 			psb->rdata = NULL;
655 	}
656 	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
657 	spin_unlock_irqrestore(&phba->hbalock, iflag);
658 }
659 
660 /**
661  * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
662  * @phba: pointer to lpfc hba data structure.
663  * @axri: pointer to the fcp xri abort wcqe structure.
664  *
665  * This routine is invoked by the worker thread to process a SLI4 fast-path
666  * FCP aborted xri.
667  **/
668 void
669 lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
670 			  struct sli4_wcqe_xri_aborted *axri)
671 {
672 	uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
673 	uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
674 	struct lpfc_scsi_buf *psb, *next_psb;
675 	unsigned long iflag = 0;
676 	struct lpfc_iocbq *iocbq;
677 	int i;
678 	struct lpfc_nodelist *ndlp;
679 	int rrq_empty = 0;
680 	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
681 
682 	spin_lock_irqsave(&phba->hbalock, iflag);
683 	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
684 	list_for_each_entry_safe(psb, next_psb,
685 		&phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
686 		if (psb->cur_iocbq.sli4_xritag == xri) {
687 			list_del(&psb->list);
688 			psb->exch_busy = 0;
689 			psb->status = IOSTAT_SUCCESS;
690 			spin_unlock(
691 				&phba->sli4_hba.abts_scsi_buf_list_lock);
692 			if (psb->rdata && psb->rdata->pnode)
693 				ndlp = psb->rdata->pnode;
694 			else
695 				ndlp = NULL;
696 
697 			rrq_empty = list_empty(&phba->active_rrq_list);
698 			spin_unlock_irqrestore(&phba->hbalock, iflag);
699 			if (ndlp) {
700 				lpfc_set_rrq_active(phba, ndlp,
701 					psb->cur_iocbq.sli4_lxritag, rxid, 1);
702 				lpfc_sli4_abts_err_handler(phba, ndlp, axri);
703 			}
704 			lpfc_release_scsi_buf_s4(phba, psb);
705 			if (rrq_empty)
706 				lpfc_worker_wake_up(phba);
707 			return;
708 		}
709 	}
710 	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
711 	for (i = 1; i <= phba->sli.last_iotag; i++) {
712 		iocbq = phba->sli.iocbq_lookup[i];
713 
714 		if (!(iocbq->iocb_flag &  LPFC_IO_FCP) ||
715 			(iocbq->iocb_flag & LPFC_IO_LIBDFC))
716 			continue;
717 		if (iocbq->sli4_xritag != xri)
718 			continue;
719 		psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
720 		psb->exch_busy = 0;
721 		spin_unlock_irqrestore(&phba->hbalock, iflag);
722 		if (!list_empty(&pring->txq))
723 			lpfc_worker_wake_up(phba);
724 		return;
725 
726 	}
727 	spin_unlock_irqrestore(&phba->hbalock, iflag);
728 }
729 
730 /**
731  * lpfc_sli4_post_scsi_sgl_list - Psot blocks of scsi buffer sgls from a list
732  * @phba: pointer to lpfc hba data structure.
733  * @post_sblist: pointer to the scsi buffer list.
734  *
735  * This routine walks a list of scsi buffers that was passed in. It attempts
736  * to construct blocks of scsi buffer sgls which contains contiguous xris and
737  * uses the non-embedded SGL block post mailbox commands to post to the port.
738  * For single SCSI buffer sgl with non-contiguous xri, if any, it shall use
739  * embedded SGL post mailbox command for posting. The @post_sblist passed in
740  * must be local list, thus no lock is needed when manipulate the list.
741  *
742  * Returns: 0 = failure, non-zero number of successfully posted buffers.
743  **/
744 int
745 lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
746 			     struct list_head *post_sblist, int sb_count)
747 {
748 	struct lpfc_scsi_buf *psb, *psb_next;
749 	int status, sgl_size;
750 	int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
751 	dma_addr_t pdma_phys_bpl1;
752 	int last_xritag = NO_XRI;
753 	LIST_HEAD(prep_sblist);
754 	LIST_HEAD(blck_sblist);
755 	LIST_HEAD(scsi_sblist);
756 
757 	/* sanity check */
758 	if (sb_count <= 0)
759 		return -EINVAL;
760 
761 	sgl_size = phba->cfg_sg_dma_buf_size -
762 		(sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
763 
764 	list_for_each_entry_safe(psb, psb_next, post_sblist, list) {
765 		list_del_init(&psb->list);
766 		block_cnt++;
767 		if ((last_xritag != NO_XRI) &&
768 		    (psb->cur_iocbq.sli4_xritag != last_xritag + 1)) {
769 			/* a hole in xri block, form a sgl posting block */
770 			list_splice_init(&prep_sblist, &blck_sblist);
771 			post_cnt = block_cnt - 1;
772 			/* prepare list for next posting block */
773 			list_add_tail(&psb->list, &prep_sblist);
774 			block_cnt = 1;
775 		} else {
776 			/* prepare list for next posting block */
777 			list_add_tail(&psb->list, &prep_sblist);
778 			/* enough sgls for non-embed sgl mbox command */
779 			if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
780 				list_splice_init(&prep_sblist, &blck_sblist);
781 				post_cnt = block_cnt;
782 				block_cnt = 0;
783 			}
784 		}
785 		num_posting++;
786 		last_xritag = psb->cur_iocbq.sli4_xritag;
787 
788 		/* end of repost sgl list condition for SCSI buffers */
789 		if (num_posting == sb_count) {
790 			if (post_cnt == 0) {
791 				/* last sgl posting block */
792 				list_splice_init(&prep_sblist, &blck_sblist);
793 				post_cnt = block_cnt;
794 			} else if (block_cnt == 1) {
795 				/* last single sgl with non-contiguous xri */
796 				if (sgl_size > SGL_PAGE_SIZE)
797 					pdma_phys_bpl1 = psb->dma_phys_bpl +
798 								SGL_PAGE_SIZE;
799 				else
800 					pdma_phys_bpl1 = 0;
801 				status = lpfc_sli4_post_sgl(phba,
802 						psb->dma_phys_bpl,
803 						pdma_phys_bpl1,
804 						psb->cur_iocbq.sli4_xritag);
805 				if (status) {
806 					/* failure, put on abort scsi list */
807 					psb->exch_busy = 1;
808 				} else {
809 					/* success, put on SCSI buffer list */
810 					psb->exch_busy = 0;
811 					psb->status = IOSTAT_SUCCESS;
812 					num_posted++;
813 				}
814 				/* success, put on SCSI buffer sgl list */
815 				list_add_tail(&psb->list, &scsi_sblist);
816 			}
817 		}
818 
819 		/* continue until a nembed page worth of sgls */
820 		if (post_cnt == 0)
821 			continue;
822 
823 		/* post block of SCSI buffer list sgls */
824 		status = lpfc_sli4_post_scsi_sgl_block(phba, &blck_sblist,
825 						       post_cnt);
826 
827 		/* don't reset xirtag due to hole in xri block */
828 		if (block_cnt == 0)
829 			last_xritag = NO_XRI;
830 
831 		/* reset SCSI buffer post count for next round of posting */
832 		post_cnt = 0;
833 
834 		/* put posted SCSI buffer-sgl posted on SCSI buffer sgl list */
835 		while (!list_empty(&blck_sblist)) {
836 			list_remove_head(&blck_sblist, psb,
837 					 struct lpfc_scsi_buf, list);
838 			if (status) {
839 				/* failure, put on abort scsi list */
840 				psb->exch_busy = 1;
841 			} else {
842 				/* success, put on SCSI buffer list */
843 				psb->exch_busy = 0;
844 				psb->status = IOSTAT_SUCCESS;
845 				num_posted++;
846 			}
847 			list_add_tail(&psb->list, &scsi_sblist);
848 		}
849 	}
850 	/* Push SCSI buffers with sgl posted to the availble list */
851 	while (!list_empty(&scsi_sblist)) {
852 		list_remove_head(&scsi_sblist, psb,
853 				 struct lpfc_scsi_buf, list);
854 		lpfc_release_scsi_buf_s4(phba, psb);
855 	}
856 	return num_posted;
857 }
858 
859 /**
860  * lpfc_sli4_repost_scsi_sgl_list - Repsot all the allocated scsi buffer sgls
861  * @phba: pointer to lpfc hba data structure.
862  *
863  * This routine walks the list of scsi buffers that have been allocated and
864  * repost them to the port by using SGL block post. This is needed after a
865  * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
866  * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
867  * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
868  *
869  * Returns: 0 = success, non-zero failure.
870  **/
871 int
872 lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
873 {
874 	LIST_HEAD(post_sblist);
875 	int num_posted, rc = 0;
876 
877 	/* get all SCSI buffers need to repost to a local list */
878 	spin_lock_irq(&phba->scsi_buf_list_get_lock);
879 	spin_lock(&phba->scsi_buf_list_put_lock);
880 	list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist);
881 	list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist);
882 	spin_unlock(&phba->scsi_buf_list_put_lock);
883 	spin_unlock_irq(&phba->scsi_buf_list_get_lock);
884 
885 	/* post the list of scsi buffer sgls to port if available */
886 	if (!list_empty(&post_sblist)) {
887 		num_posted = lpfc_sli4_post_scsi_sgl_list(phba, &post_sblist,
888 						phba->sli4_hba.scsi_xri_cnt);
889 		/* failed to post any scsi buffer, return error */
890 		if (num_posted == 0)
891 			rc = -EIO;
892 	}
893 	return rc;
894 }
895 
896 /**
897  * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec
898  * @vport: The virtual port for which this call being executed.
899  * @num_to_allocate: The requested number of buffers to allocate.
900  *
901  * This routine allocates scsi buffers for device with SLI-4 interface spec,
902  * the scsi buffer contains all the necessary information needed to initiate
903  * a SCSI I/O. After allocating up to @num_to_allocate SCSI buffers and put
904  * them on a list, it post them to the port by using SGL block post.
905  *
906  * Return codes:
907  *   int - number of scsi buffers that were allocated and posted.
908  *   0 = failure, less than num_to_alloc is a partial failure.
909  **/
910 static int
911 lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
912 {
913 	struct lpfc_hba *phba = vport->phba;
914 	struct lpfc_scsi_buf *psb;
915 	struct sli4_sge *sgl;
916 	IOCB_t *iocb;
917 	dma_addr_t pdma_phys_fcp_cmd;
918 	dma_addr_t pdma_phys_fcp_rsp;
919 	dma_addr_t pdma_phys_bpl;
920 	uint16_t iotag, lxri = 0;
921 	int bcnt, num_posted, sgl_size;
922 	LIST_HEAD(prep_sblist);
923 	LIST_HEAD(post_sblist);
924 	LIST_HEAD(scsi_sblist);
925 
926 	sgl_size = phba->cfg_sg_dma_buf_size -
927 		(sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
928 
929 	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
930 			 "9068 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
931 			 num_to_alloc, phba->cfg_sg_dma_buf_size, sgl_size,
932 			 (int)sizeof(struct fcp_cmnd),
933 			 (int)sizeof(struct fcp_rsp));
934 
935 	for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
936 		psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
937 		if (!psb)
938 			break;
939 		/*
940 		 * Get memory from the pci pool to map the virt space to
941 		 * pci bus space for an I/O. The DMA buffer includes space
942 		 * for the struct fcp_cmnd, struct fcp_rsp and the number
943 		 * of bde's necessary to support the sg_tablesize.
944 		 */
945 		psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
946 						GFP_KERNEL, &psb->dma_handle);
947 		if (!psb->data) {
948 			kfree(psb);
949 			break;
950 		}
951 		memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
952 
953 		/*
954 		 * 4K Page alignment is CRITICAL to BlockGuard, double check
955 		 * to be sure.
956 		 */
957 		if (phba->cfg_enable_bg  && (((unsigned long)(psb->data) &
958 		    (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
959 			pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
960 				      psb->data, psb->dma_handle);
961 			kfree(psb);
962 			break;
963 		}
964 
965 
966 		lxri = lpfc_sli4_next_xritag(phba);
967 		if (lxri == NO_XRI) {
968 			pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
969 			      psb->data, psb->dma_handle);
970 			kfree(psb);
971 			break;
972 		}
973 
974 		/* Allocate iotag for psb->cur_iocbq. */
975 		iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
976 		if (iotag == 0) {
977 			pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
978 				psb->data, psb->dma_handle);
979 			kfree(psb);
980 			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
981 					"3368 Failed to allocated IOTAG for"
982 					" XRI:0x%x\n", lxri);
983 			lpfc_sli4_free_xri(phba, lxri);
984 			break;
985 		}
986 		psb->cur_iocbq.sli4_lxritag = lxri;
987 		psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
988 		psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
989 		psb->fcp_bpl = psb->data;
990 		psb->fcp_cmnd = (psb->data + sgl_size);
991 		psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
992 					sizeof(struct fcp_cmnd));
993 
994 		/* Initialize local short-hand pointers. */
995 		sgl = (struct sli4_sge *)psb->fcp_bpl;
996 		pdma_phys_bpl = psb->dma_handle;
997 		pdma_phys_fcp_cmd = (psb->dma_handle + sgl_size);
998 		pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
999 
1000 		/*
1001 		 * The first two bdes are the FCP_CMD and FCP_RSP.
1002 		 * The balance are sg list bdes. Initialize the
1003 		 * first two and leave the rest for queuecommand.
1004 		 */
1005 		sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
1006 		sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
1007 		sgl->word2 = le32_to_cpu(sgl->word2);
1008 		bf_set(lpfc_sli4_sge_last, sgl, 0);
1009 		sgl->word2 = cpu_to_le32(sgl->word2);
1010 		sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
1011 		sgl++;
1012 
1013 		/* Setup the physical region for the FCP RSP */
1014 		sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
1015 		sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
1016 		sgl->word2 = le32_to_cpu(sgl->word2);
1017 		bf_set(lpfc_sli4_sge_last, sgl, 1);
1018 		sgl->word2 = cpu_to_le32(sgl->word2);
1019 		sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
1020 
1021 		/*
1022 		 * Since the IOCB for the FCP I/O is built into this
1023 		 * lpfc_scsi_buf, initialize it with all known data now.
1024 		 */
1025 		iocb = &psb->cur_iocbq.iocb;
1026 		iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
1027 		iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
1028 		/* setting the BLP size to 2 * sizeof BDE may not be correct.
1029 		 * We are setting the bpl to point to out sgl. An sgl's
1030 		 * entries are 16 bytes, a bpl entries are 12 bytes.
1031 		 */
1032 		iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
1033 		iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
1034 		iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
1035 		iocb->ulpBdeCount = 1;
1036 		iocb->ulpLe = 1;
1037 		iocb->ulpClass = CLASS3;
1038 		psb->cur_iocbq.context1 = psb;
1039 		psb->dma_phys_bpl = pdma_phys_bpl;
1040 
1041 		/* add the scsi buffer to a post list */
1042 		list_add_tail(&psb->list, &post_sblist);
1043 		spin_lock_irq(&phba->scsi_buf_list_get_lock);
1044 		phba->sli4_hba.scsi_xri_cnt++;
1045 		spin_unlock_irq(&phba->scsi_buf_list_get_lock);
1046 	}
1047 	lpfc_printf_log(phba, KERN_INFO, LOG_BG,
1048 			"3021 Allocate %d out of %d requested new SCSI "
1049 			"buffers\n", bcnt, num_to_alloc);
1050 
1051 	/* post the list of scsi buffer sgls to port if available */
1052 	if (!list_empty(&post_sblist))
1053 		num_posted = lpfc_sli4_post_scsi_sgl_list(phba,
1054 							  &post_sblist, bcnt);
1055 	else
1056 		num_posted = 0;
1057 
1058 	return num_posted;
1059 }
1060 
1061 /**
1062  * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator
1063  * @vport: The virtual port for which this call being executed.
1064  * @num_to_allocate: The requested number of buffers to allocate.
1065  *
1066  * This routine wraps the actual SCSI buffer allocator function pointer from
1067  * the lpfc_hba struct.
1068  *
1069  * Return codes:
1070  *   int - number of scsi buffers that were allocated.
1071  *   0 = failure, less than num_to_alloc is a partial failure.
1072  **/
1073 static inline int
1074 lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
1075 {
1076 	return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc);
1077 }
1078 
1079 /**
1080  * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
1081  * @phba: The HBA for which this call is being executed.
1082  *
1083  * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
1084  * and returns to caller.
1085  *
1086  * Return codes:
1087  *   NULL - Error
1088  *   Pointer to lpfc_scsi_buf - Success
1089  **/
1090 static struct lpfc_scsi_buf*
1091 lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1092 {
1093 	struct  lpfc_scsi_buf * lpfc_cmd = NULL;
1094 	struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
1095 	unsigned long iflag = 0;
1096 
1097 	spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
1098 	list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf,
1099 			 list);
1100 	if (!lpfc_cmd) {
1101 		spin_lock(&phba->scsi_buf_list_put_lock);
1102 		list_splice(&phba->lpfc_scsi_buf_list_put,
1103 			    &phba->lpfc_scsi_buf_list_get);
1104 		INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
1105 		list_remove_head(scsi_buf_list_get, lpfc_cmd,
1106 				 struct lpfc_scsi_buf, list);
1107 		spin_unlock(&phba->scsi_buf_list_put_lock);
1108 	}
1109 	spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
1110 	return  lpfc_cmd;
1111 }
1112 /**
1113  * lpfc_get_scsi_buf_s4 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
1114  * @phba: The HBA for which this call is being executed.
1115  *
1116  * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
1117  * and returns to caller.
1118  *
1119  * Return codes:
1120  *   NULL - Error
1121  *   Pointer to lpfc_scsi_buf - Success
1122  **/
1123 static struct lpfc_scsi_buf*
1124 lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1125 {
1126 	struct lpfc_scsi_buf *lpfc_cmd, *lpfc_cmd_next;
1127 	unsigned long iflag = 0;
1128 	int found = 0;
1129 
1130 	spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
1131 	list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
1132 				 &phba->lpfc_scsi_buf_list_get, list) {
1133 		if (lpfc_test_rrq_active(phba, ndlp,
1134 					 lpfc_cmd->cur_iocbq.sli4_lxritag))
1135 			continue;
1136 		list_del(&lpfc_cmd->list);
1137 		found = 1;
1138 		break;
1139 	}
1140 	if (!found) {
1141 		spin_lock(&phba->scsi_buf_list_put_lock);
1142 		list_splice(&phba->lpfc_scsi_buf_list_put,
1143 			    &phba->lpfc_scsi_buf_list_get);
1144 		INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
1145 		spin_unlock(&phba->scsi_buf_list_put_lock);
1146 		list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
1147 					 &phba->lpfc_scsi_buf_list_get, list) {
1148 			if (lpfc_test_rrq_active(
1149 				phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag))
1150 				continue;
1151 			list_del(&lpfc_cmd->list);
1152 			found = 1;
1153 			break;
1154 		}
1155 	}
1156 	spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
1157 	if (!found)
1158 		return NULL;
1159 	return  lpfc_cmd;
1160 }
1161 /**
1162  * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
1163  * @phba: The HBA for which this call is being executed.
1164  *
1165  * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
1166  * and returns to caller.
1167  *
1168  * Return codes:
1169  *   NULL - Error
1170  *   Pointer to lpfc_scsi_buf - Success
1171  **/
1172 static struct lpfc_scsi_buf*
1173 lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1174 {
1175 	return  phba->lpfc_get_scsi_buf(phba, ndlp);
1176 }
1177 
1178 /**
1179  * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
1180  * @phba: The Hba for which this call is being executed.
1181  * @psb: The scsi buffer which is being released.
1182  *
1183  * This routine releases @psb scsi buffer by adding it to tail of @phba
1184  * lpfc_scsi_buf_list list.
1185  **/
1186 static void
1187 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1188 {
1189 	unsigned long iflag = 0;
1190 
1191 	psb->seg_cnt = 0;
1192 	psb->nonsg_phys = 0;
1193 	psb->prot_seg_cnt = 0;
1194 
1195 	spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
1196 	psb->pCmd = NULL;
1197 	psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
1198 	list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
1199 	spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
1200 }
1201 
1202 /**
1203  * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
1204  * @phba: The Hba for which this call is being executed.
1205  * @psb: The scsi buffer which is being released.
1206  *
1207  * This routine releases @psb scsi buffer by adding it to tail of @phba
1208  * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer
1209  * and cannot be reused for at least RA_TOV amount of time if it was
1210  * aborted.
1211  **/
1212 static void
1213 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1214 {
1215 	unsigned long iflag = 0;
1216 
1217 	psb->seg_cnt = 0;
1218 	psb->nonsg_phys = 0;
1219 	psb->prot_seg_cnt = 0;
1220 
1221 	if (psb->exch_busy) {
1222 		spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
1223 					iflag);
1224 		psb->pCmd = NULL;
1225 		list_add_tail(&psb->list,
1226 			&phba->sli4_hba.lpfc_abts_scsi_buf_list);
1227 		spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
1228 					iflag);
1229 	} else {
1230 		psb->pCmd = NULL;
1231 		psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
1232 		spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
1233 		list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
1234 		spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
1235 	}
1236 }
1237 
1238 /**
1239  * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
1240  * @phba: The Hba for which this call is being executed.
1241  * @psb: The scsi buffer which is being released.
1242  *
1243  * This routine releases @psb scsi buffer by adding it to tail of @phba
1244  * lpfc_scsi_buf_list list.
1245  **/
1246 static void
1247 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1248 {
1249 
1250 	phba->lpfc_release_scsi_buf(phba, psb);
1251 }
1252 
1253 /**
1254  * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
1255  * @phba: The Hba for which this call is being executed.
1256  * @lpfc_cmd: The scsi buffer which is going to be mapped.
1257  *
1258  * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
1259  * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
1260  * through sg elements and format the bdea. This routine also initializes all
1261  * IOCB fields which are dependent on scsi command request buffer.
1262  *
1263  * Return codes:
1264  *   1 - Error
1265  *   0 - Success
1266  **/
1267 static int
1268 lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1269 {
1270 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1271 	struct scatterlist *sgel = NULL;
1272 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1273 	struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
1274 	struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
1275 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1276 	struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
1277 	dma_addr_t physaddr;
1278 	uint32_t num_bde = 0;
1279 	int nseg, datadir = scsi_cmnd->sc_data_direction;
1280 
1281 	/*
1282 	 * There are three possibilities here - use scatter-gather segment, use
1283 	 * the single mapping, or neither.  Start the lpfc command prep by
1284 	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
1285 	 * data bde entry.
1286 	 */
1287 	bpl += 2;
1288 	if (scsi_sg_count(scsi_cmnd)) {
1289 		/*
1290 		 * The driver stores the segment count returned from pci_map_sg
1291 		 * because this a count of dma-mappings used to map the use_sg
1292 		 * pages.  They are not guaranteed to be the same for those
1293 		 * architectures that implement an IOMMU.
1294 		 */
1295 
1296 		nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
1297 				  scsi_sg_count(scsi_cmnd), datadir);
1298 		if (unlikely(!nseg))
1299 			return 1;
1300 
1301 		lpfc_cmd->seg_cnt = nseg;
1302 		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1303 			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1304 				"9064 BLKGRD: %s: Too many sg segments from "
1305 			       "dma_map_sg.  Config %d, seg_cnt %d\n",
1306 			       __func__, phba->cfg_sg_seg_cnt,
1307 			       lpfc_cmd->seg_cnt);
1308 			lpfc_cmd->seg_cnt = 0;
1309 			scsi_dma_unmap(scsi_cmnd);
1310 			return 1;
1311 		}
1312 
1313 		/*
1314 		 * The driver established a maximum scatter-gather segment count
1315 		 * during probe that limits the number of sg elements in any
1316 		 * single scsi command.  Just run through the seg_cnt and format
1317 		 * the bde's.
1318 		 * When using SLI-3 the driver will try to fit all the BDEs into
1319 		 * the IOCB. If it can't then the BDEs get added to a BPL as it
1320 		 * does for SLI-2 mode.
1321 		 */
1322 		scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
1323 			physaddr = sg_dma_address(sgel);
1324 			if (phba->sli_rev == 3 &&
1325 			    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1326 			    !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
1327 			    nseg <= LPFC_EXT_DATA_BDE_COUNT) {
1328 				data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1329 				data_bde->tus.f.bdeSize = sg_dma_len(sgel);
1330 				data_bde->addrLow = putPaddrLow(physaddr);
1331 				data_bde->addrHigh = putPaddrHigh(physaddr);
1332 				data_bde++;
1333 			} else {
1334 				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1335 				bpl->tus.f.bdeSize = sg_dma_len(sgel);
1336 				bpl->tus.w = le32_to_cpu(bpl->tus.w);
1337 				bpl->addrLow =
1338 					le32_to_cpu(putPaddrLow(physaddr));
1339 				bpl->addrHigh =
1340 					le32_to_cpu(putPaddrHigh(physaddr));
1341 				bpl++;
1342 			}
1343 		}
1344 	}
1345 
1346 	/*
1347 	 * Finish initializing those IOCB fields that are dependent on the
1348 	 * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
1349 	 * explicitly reinitialized and for SLI-3 the extended bde count is
1350 	 * explicitly reinitialized since all iocb memory resources are reused.
1351 	 */
1352 	if (phba->sli_rev == 3 &&
1353 	    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1354 	    !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
1355 		if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
1356 			/*
1357 			 * The extended IOCB format can only fit 3 BDE or a BPL.
1358 			 * This I/O has more than 3 BDE so the 1st data bde will
1359 			 * be a BPL that is filled in here.
1360 			 */
1361 			physaddr = lpfc_cmd->dma_handle;
1362 			data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
1363 			data_bde->tus.f.bdeSize = (num_bde *
1364 						   sizeof(struct ulp_bde64));
1365 			physaddr += (sizeof(struct fcp_cmnd) +
1366 				     sizeof(struct fcp_rsp) +
1367 				     (2 * sizeof(struct ulp_bde64)));
1368 			data_bde->addrHigh = putPaddrHigh(physaddr);
1369 			data_bde->addrLow = putPaddrLow(physaddr);
1370 			/* ebde count includes the response bde and data bpl */
1371 			iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
1372 		} else {
1373 			/* ebde count includes the response bde and data bdes */
1374 			iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
1375 		}
1376 	} else {
1377 		iocb_cmd->un.fcpi64.bdl.bdeSize =
1378 			((num_bde + 2) * sizeof(struct ulp_bde64));
1379 		iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
1380 	}
1381 	fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
1382 
1383 	/*
1384 	 * Due to difference in data length between DIF/non-DIF paths,
1385 	 * we need to set word 4 of IOCB here
1386 	 */
1387 	iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1388 	return 0;
1389 }
1390 
1391 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1392 
1393 /* Return if if error injection is detected by Initiator */
1394 #define BG_ERR_INIT	0x1
1395 /* Return if if error injection is detected by Target */
1396 #define BG_ERR_TGT	0x2
1397 /* Return if if swapping CSUM<-->CRC is required for error injection */
1398 #define BG_ERR_SWAP	0x10
1399 /* Return if disabling Guard/Ref/App checking is required for error injection */
1400 #define BG_ERR_CHECK	0x20
1401 
1402 /**
1403  * lpfc_bg_err_inject - Determine if we should inject an error
1404  * @phba: The Hba for which this call is being executed.
1405  * @sc: The SCSI command to examine
1406  * @reftag: (out) BlockGuard reference tag for transmitted data
1407  * @apptag: (out) BlockGuard application tag for transmitted data
1408  * @new_guard (in) Value to replace CRC with if needed
1409  *
1410  * Returns BG_ERR_* bit mask or 0 if request ignored
1411  **/
1412 static int
1413 lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1414 		uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
1415 {
1416 	struct scatterlist *sgpe; /* s/g prot entry */
1417 	struct scatterlist *sgde; /* s/g data entry */
1418 	struct lpfc_scsi_buf *lpfc_cmd = NULL;
1419 	struct scsi_dif_tuple *src = NULL;
1420 	struct lpfc_nodelist *ndlp;
1421 	struct lpfc_rport_data *rdata;
1422 	uint32_t op = scsi_get_prot_op(sc);
1423 	uint32_t blksize;
1424 	uint32_t numblks;
1425 	sector_t lba;
1426 	int rc = 0;
1427 	int blockoff = 0;
1428 
1429 	if (op == SCSI_PROT_NORMAL)
1430 		return 0;
1431 
1432 	sgpe = scsi_prot_sglist(sc);
1433 	sgde = scsi_sglist(sc);
1434 	lba = scsi_get_lba(sc);
1435 
1436 	/* First check if we need to match the LBA */
1437 	if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
1438 		blksize = lpfc_cmd_blksize(sc);
1439 		numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
1440 
1441 		/* Make sure we have the right LBA if one is specified */
1442 		if ((phba->lpfc_injerr_lba < lba) ||
1443 			(phba->lpfc_injerr_lba >= (lba + numblks)))
1444 			return 0;
1445 		if (sgpe) {
1446 			blockoff = phba->lpfc_injerr_lba - lba;
1447 			numblks = sg_dma_len(sgpe) /
1448 				sizeof(struct scsi_dif_tuple);
1449 			if (numblks < blockoff)
1450 				blockoff = numblks;
1451 		}
1452 	}
1453 
1454 	/* Next check if we need to match the remote NPortID or WWPN */
1455 	rdata = lpfc_rport_data_from_scsi_device(sc->device);
1456 	if (rdata && rdata->pnode) {
1457 		ndlp = rdata->pnode;
1458 
1459 		/* Make sure we have the right NPortID if one is specified */
1460 		if (phba->lpfc_injerr_nportid  &&
1461 			(phba->lpfc_injerr_nportid != ndlp->nlp_DID))
1462 			return 0;
1463 
1464 		/*
1465 		 * Make sure we have the right WWPN if one is specified.
1466 		 * wwn[0] should be a non-zero NAA in a good WWPN.
1467 		 */
1468 		if (phba->lpfc_injerr_wwpn.u.wwn[0]  &&
1469 			(memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
1470 				sizeof(struct lpfc_name)) != 0))
1471 			return 0;
1472 	}
1473 
1474 	/* Setup a ptr to the protection data if the SCSI host provides it */
1475 	if (sgpe) {
1476 		src = (struct scsi_dif_tuple *)sg_virt(sgpe);
1477 		src += blockoff;
1478 		lpfc_cmd = (struct lpfc_scsi_buf *)sc->host_scribble;
1479 	}
1480 
1481 	/* Should we change the Reference Tag */
1482 	if (reftag) {
1483 		if (phba->lpfc_injerr_wref_cnt) {
1484 			switch (op) {
1485 			case SCSI_PROT_WRITE_PASS:
1486 				if (src) {
1487 					/*
1488 					 * For WRITE_PASS, force the error
1489 					 * to be sent on the wire. It should
1490 					 * be detected by the Target.
1491 					 * If blockoff != 0 error will be
1492 					 * inserted in middle of the IO.
1493 					 */
1494 
1495 					lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1496 					"9076 BLKGRD: Injecting reftag error: "
1497 					"write lba x%lx + x%x oldrefTag x%x\n",
1498 					(unsigned long)lba, blockoff,
1499 					be32_to_cpu(src->ref_tag));
1500 
1501 					/*
1502 					 * Save the old ref_tag so we can
1503 					 * restore it on completion.
1504 					 */
1505 					if (lpfc_cmd) {
1506 						lpfc_cmd->prot_data_type =
1507 							LPFC_INJERR_REFTAG;
1508 						lpfc_cmd->prot_data_segment =
1509 							src;
1510 						lpfc_cmd->prot_data =
1511 							src->ref_tag;
1512 					}
1513 					src->ref_tag = cpu_to_be32(0xDEADBEEF);
1514 					phba->lpfc_injerr_wref_cnt--;
1515 					if (phba->lpfc_injerr_wref_cnt == 0) {
1516 						phba->lpfc_injerr_nportid = 0;
1517 						phba->lpfc_injerr_lba =
1518 							LPFC_INJERR_LBA_OFF;
1519 						memset(&phba->lpfc_injerr_wwpn,
1520 						  0, sizeof(struct lpfc_name));
1521 					}
1522 					rc = BG_ERR_TGT | BG_ERR_CHECK;
1523 
1524 					break;
1525 				}
1526 				/* Drop thru */
1527 			case SCSI_PROT_WRITE_INSERT:
1528 				/*
1529 				 * For WRITE_INSERT, force the error
1530 				 * to be sent on the wire. It should be
1531 				 * detected by the Target.
1532 				 */
1533 				/* DEADBEEF will be the reftag on the wire */
1534 				*reftag = 0xDEADBEEF;
1535 				phba->lpfc_injerr_wref_cnt--;
1536 				if (phba->lpfc_injerr_wref_cnt == 0) {
1537 					phba->lpfc_injerr_nportid = 0;
1538 					phba->lpfc_injerr_lba =
1539 					LPFC_INJERR_LBA_OFF;
1540 					memset(&phba->lpfc_injerr_wwpn,
1541 						0, sizeof(struct lpfc_name));
1542 				}
1543 				rc = BG_ERR_TGT | BG_ERR_CHECK;
1544 
1545 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1546 					"9078 BLKGRD: Injecting reftag error: "
1547 					"write lba x%lx\n", (unsigned long)lba);
1548 				break;
1549 			case SCSI_PROT_WRITE_STRIP:
1550 				/*
1551 				 * For WRITE_STRIP and WRITE_PASS,
1552 				 * force the error on data
1553 				 * being copied from SLI-Host to SLI-Port.
1554 				 */
1555 				*reftag = 0xDEADBEEF;
1556 				phba->lpfc_injerr_wref_cnt--;
1557 				if (phba->lpfc_injerr_wref_cnt == 0) {
1558 					phba->lpfc_injerr_nportid = 0;
1559 					phba->lpfc_injerr_lba =
1560 						LPFC_INJERR_LBA_OFF;
1561 					memset(&phba->lpfc_injerr_wwpn,
1562 						0, sizeof(struct lpfc_name));
1563 				}
1564 				rc = BG_ERR_INIT;
1565 
1566 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1567 					"9077 BLKGRD: Injecting reftag error: "
1568 					"write lba x%lx\n", (unsigned long)lba);
1569 				break;
1570 			}
1571 		}
1572 		if (phba->lpfc_injerr_rref_cnt) {
1573 			switch (op) {
1574 			case SCSI_PROT_READ_INSERT:
1575 			case SCSI_PROT_READ_STRIP:
1576 			case SCSI_PROT_READ_PASS:
1577 				/*
1578 				 * For READ_STRIP and READ_PASS, force the
1579 				 * error on data being read off the wire. It
1580 				 * should force an IO error to the driver.
1581 				 */
1582 				*reftag = 0xDEADBEEF;
1583 				phba->lpfc_injerr_rref_cnt--;
1584 				if (phba->lpfc_injerr_rref_cnt == 0) {
1585 					phba->lpfc_injerr_nportid = 0;
1586 					phba->lpfc_injerr_lba =
1587 						LPFC_INJERR_LBA_OFF;
1588 					memset(&phba->lpfc_injerr_wwpn,
1589 						0, sizeof(struct lpfc_name));
1590 				}
1591 				rc = BG_ERR_INIT;
1592 
1593 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1594 					"9079 BLKGRD: Injecting reftag error: "
1595 					"read lba x%lx\n", (unsigned long)lba);
1596 				break;
1597 			}
1598 		}
1599 	}
1600 
1601 	/* Should we change the Application Tag */
1602 	if (apptag) {
1603 		if (phba->lpfc_injerr_wapp_cnt) {
1604 			switch (op) {
1605 			case SCSI_PROT_WRITE_PASS:
1606 				if (src) {
1607 					/*
1608 					 * For WRITE_PASS, force the error
1609 					 * to be sent on the wire. It should
1610 					 * be detected by the Target.
1611 					 * If blockoff != 0 error will be
1612 					 * inserted in middle of the IO.
1613 					 */
1614 
1615 					lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1616 					"9080 BLKGRD: Injecting apptag error: "
1617 					"write lba x%lx + x%x oldappTag x%x\n",
1618 					(unsigned long)lba, blockoff,
1619 					be16_to_cpu(src->app_tag));
1620 
1621 					/*
1622 					 * Save the old app_tag so we can
1623 					 * restore it on completion.
1624 					 */
1625 					if (lpfc_cmd) {
1626 						lpfc_cmd->prot_data_type =
1627 							LPFC_INJERR_APPTAG;
1628 						lpfc_cmd->prot_data_segment =
1629 							src;
1630 						lpfc_cmd->prot_data =
1631 							src->app_tag;
1632 					}
1633 					src->app_tag = cpu_to_be16(0xDEAD);
1634 					phba->lpfc_injerr_wapp_cnt--;
1635 					if (phba->lpfc_injerr_wapp_cnt == 0) {
1636 						phba->lpfc_injerr_nportid = 0;
1637 						phba->lpfc_injerr_lba =
1638 							LPFC_INJERR_LBA_OFF;
1639 						memset(&phba->lpfc_injerr_wwpn,
1640 						  0, sizeof(struct lpfc_name));
1641 					}
1642 					rc = BG_ERR_TGT | BG_ERR_CHECK;
1643 					break;
1644 				}
1645 				/* Drop thru */
1646 			case SCSI_PROT_WRITE_INSERT:
1647 				/*
1648 				 * For WRITE_INSERT, force the
1649 				 * error to be sent on the wire. It should be
1650 				 * detected by the Target.
1651 				 */
1652 				/* DEAD will be the apptag on the wire */
1653 				*apptag = 0xDEAD;
1654 				phba->lpfc_injerr_wapp_cnt--;
1655 				if (phba->lpfc_injerr_wapp_cnt == 0) {
1656 					phba->lpfc_injerr_nportid = 0;
1657 					phba->lpfc_injerr_lba =
1658 						LPFC_INJERR_LBA_OFF;
1659 					memset(&phba->lpfc_injerr_wwpn,
1660 						0, sizeof(struct lpfc_name));
1661 				}
1662 				rc = BG_ERR_TGT | BG_ERR_CHECK;
1663 
1664 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1665 					"0813 BLKGRD: Injecting apptag error: "
1666 					"write lba x%lx\n", (unsigned long)lba);
1667 				break;
1668 			case SCSI_PROT_WRITE_STRIP:
1669 				/*
1670 				 * For WRITE_STRIP and WRITE_PASS,
1671 				 * force the error on data
1672 				 * being copied from SLI-Host to SLI-Port.
1673 				 */
1674 				*apptag = 0xDEAD;
1675 				phba->lpfc_injerr_wapp_cnt--;
1676 				if (phba->lpfc_injerr_wapp_cnt == 0) {
1677 					phba->lpfc_injerr_nportid = 0;
1678 					phba->lpfc_injerr_lba =
1679 						LPFC_INJERR_LBA_OFF;
1680 					memset(&phba->lpfc_injerr_wwpn,
1681 						0, sizeof(struct lpfc_name));
1682 				}
1683 				rc = BG_ERR_INIT;
1684 
1685 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1686 					"0812 BLKGRD: Injecting apptag error: "
1687 					"write lba x%lx\n", (unsigned long)lba);
1688 				break;
1689 			}
1690 		}
1691 		if (phba->lpfc_injerr_rapp_cnt) {
1692 			switch (op) {
1693 			case SCSI_PROT_READ_INSERT:
1694 			case SCSI_PROT_READ_STRIP:
1695 			case SCSI_PROT_READ_PASS:
1696 				/*
1697 				 * For READ_STRIP and READ_PASS, force the
1698 				 * error on data being read off the wire. It
1699 				 * should force an IO error to the driver.
1700 				 */
1701 				*apptag = 0xDEAD;
1702 				phba->lpfc_injerr_rapp_cnt--;
1703 				if (phba->lpfc_injerr_rapp_cnt == 0) {
1704 					phba->lpfc_injerr_nportid = 0;
1705 					phba->lpfc_injerr_lba =
1706 						LPFC_INJERR_LBA_OFF;
1707 					memset(&phba->lpfc_injerr_wwpn,
1708 						0, sizeof(struct lpfc_name));
1709 				}
1710 				rc = BG_ERR_INIT;
1711 
1712 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1713 					"0814 BLKGRD: Injecting apptag error: "
1714 					"read lba x%lx\n", (unsigned long)lba);
1715 				break;
1716 			}
1717 		}
1718 	}
1719 
1720 
1721 	/* Should we change the Guard Tag */
1722 	if (new_guard) {
1723 		if (phba->lpfc_injerr_wgrd_cnt) {
1724 			switch (op) {
1725 			case SCSI_PROT_WRITE_PASS:
1726 				rc = BG_ERR_CHECK;
1727 				/* Drop thru */
1728 
1729 			case SCSI_PROT_WRITE_INSERT:
1730 				/*
1731 				 * For WRITE_INSERT, force the
1732 				 * error to be sent on the wire. It should be
1733 				 * detected by the Target.
1734 				 */
1735 				phba->lpfc_injerr_wgrd_cnt--;
1736 				if (phba->lpfc_injerr_wgrd_cnt == 0) {
1737 					phba->lpfc_injerr_nportid = 0;
1738 					phba->lpfc_injerr_lba =
1739 						LPFC_INJERR_LBA_OFF;
1740 					memset(&phba->lpfc_injerr_wwpn,
1741 						0, sizeof(struct lpfc_name));
1742 				}
1743 
1744 				rc |= BG_ERR_TGT | BG_ERR_SWAP;
1745 				/* Signals the caller to swap CRC->CSUM */
1746 
1747 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1748 					"0817 BLKGRD: Injecting guard error: "
1749 					"write lba x%lx\n", (unsigned long)lba);
1750 				break;
1751 			case SCSI_PROT_WRITE_STRIP:
1752 				/*
1753 				 * For WRITE_STRIP and WRITE_PASS,
1754 				 * force the error on data
1755 				 * being copied from SLI-Host to SLI-Port.
1756 				 */
1757 				phba->lpfc_injerr_wgrd_cnt--;
1758 				if (phba->lpfc_injerr_wgrd_cnt == 0) {
1759 					phba->lpfc_injerr_nportid = 0;
1760 					phba->lpfc_injerr_lba =
1761 						LPFC_INJERR_LBA_OFF;
1762 					memset(&phba->lpfc_injerr_wwpn,
1763 						0, sizeof(struct lpfc_name));
1764 				}
1765 
1766 				rc = BG_ERR_INIT | BG_ERR_SWAP;
1767 				/* Signals the caller to swap CRC->CSUM */
1768 
1769 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1770 					"0816 BLKGRD: Injecting guard error: "
1771 					"write lba x%lx\n", (unsigned long)lba);
1772 				break;
1773 			}
1774 		}
1775 		if (phba->lpfc_injerr_rgrd_cnt) {
1776 			switch (op) {
1777 			case SCSI_PROT_READ_INSERT:
1778 			case SCSI_PROT_READ_STRIP:
1779 			case SCSI_PROT_READ_PASS:
1780 				/*
1781 				 * For READ_STRIP and READ_PASS, force the
1782 				 * error on data being read off the wire. It
1783 				 * should force an IO error to the driver.
1784 				 */
1785 				phba->lpfc_injerr_rgrd_cnt--;
1786 				if (phba->lpfc_injerr_rgrd_cnt == 0) {
1787 					phba->lpfc_injerr_nportid = 0;
1788 					phba->lpfc_injerr_lba =
1789 						LPFC_INJERR_LBA_OFF;
1790 					memset(&phba->lpfc_injerr_wwpn,
1791 						0, sizeof(struct lpfc_name));
1792 				}
1793 
1794 				rc = BG_ERR_INIT | BG_ERR_SWAP;
1795 				/* Signals the caller to swap CRC->CSUM */
1796 
1797 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1798 					"0818 BLKGRD: Injecting guard error: "
1799 					"read lba x%lx\n", (unsigned long)lba);
1800 			}
1801 		}
1802 	}
1803 
1804 	return rc;
1805 }
1806 #endif
1807 
1808 /**
1809  * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with
1810  * the specified SCSI command.
1811  * @phba: The Hba for which this call is being executed.
1812  * @sc: The SCSI command to examine
1813  * @txopt: (out) BlockGuard operation for transmitted data
1814  * @rxopt: (out) BlockGuard operation for received data
1815  *
1816  * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1817  *
1818  **/
1819 static int
1820 lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1821 		uint8_t *txop, uint8_t *rxop)
1822 {
1823 	uint8_t ret = 0;
1824 
1825 	if (lpfc_cmd_guard_csum(sc)) {
1826 		switch (scsi_get_prot_op(sc)) {
1827 		case SCSI_PROT_READ_INSERT:
1828 		case SCSI_PROT_WRITE_STRIP:
1829 			*rxop = BG_OP_IN_NODIF_OUT_CSUM;
1830 			*txop = BG_OP_IN_CSUM_OUT_NODIF;
1831 			break;
1832 
1833 		case SCSI_PROT_READ_STRIP:
1834 		case SCSI_PROT_WRITE_INSERT:
1835 			*rxop = BG_OP_IN_CRC_OUT_NODIF;
1836 			*txop = BG_OP_IN_NODIF_OUT_CRC;
1837 			break;
1838 
1839 		case SCSI_PROT_READ_PASS:
1840 		case SCSI_PROT_WRITE_PASS:
1841 			*rxop = BG_OP_IN_CRC_OUT_CSUM;
1842 			*txop = BG_OP_IN_CSUM_OUT_CRC;
1843 			break;
1844 
1845 		case SCSI_PROT_NORMAL:
1846 		default:
1847 			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1848 				"9063 BLKGRD: Bad op/guard:%d/IP combination\n",
1849 					scsi_get_prot_op(sc));
1850 			ret = 1;
1851 			break;
1852 
1853 		}
1854 	} else {
1855 		switch (scsi_get_prot_op(sc)) {
1856 		case SCSI_PROT_READ_STRIP:
1857 		case SCSI_PROT_WRITE_INSERT:
1858 			*rxop = BG_OP_IN_CRC_OUT_NODIF;
1859 			*txop = BG_OP_IN_NODIF_OUT_CRC;
1860 			break;
1861 
1862 		case SCSI_PROT_READ_PASS:
1863 		case SCSI_PROT_WRITE_PASS:
1864 			*rxop = BG_OP_IN_CRC_OUT_CRC;
1865 			*txop = BG_OP_IN_CRC_OUT_CRC;
1866 			break;
1867 
1868 		case SCSI_PROT_READ_INSERT:
1869 		case SCSI_PROT_WRITE_STRIP:
1870 			*rxop = BG_OP_IN_NODIF_OUT_CRC;
1871 			*txop = BG_OP_IN_CRC_OUT_NODIF;
1872 			break;
1873 
1874 		case SCSI_PROT_NORMAL:
1875 		default:
1876 			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1877 				"9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
1878 					scsi_get_prot_op(sc));
1879 			ret = 1;
1880 			break;
1881 		}
1882 	}
1883 
1884 	return ret;
1885 }
1886 
1887 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1888 /**
1889  * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with
1890  * the specified SCSI command in order to force a guard tag error.
1891  * @phba: The Hba for which this call is being executed.
1892  * @sc: The SCSI command to examine
1893  * @txopt: (out) BlockGuard operation for transmitted data
1894  * @rxopt: (out) BlockGuard operation for received data
1895  *
1896  * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1897  *
1898  **/
1899 static int
1900 lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1901 		uint8_t *txop, uint8_t *rxop)
1902 {
1903 	uint8_t ret = 0;
1904 
1905 	if (lpfc_cmd_guard_csum(sc)) {
1906 		switch (scsi_get_prot_op(sc)) {
1907 		case SCSI_PROT_READ_INSERT:
1908 		case SCSI_PROT_WRITE_STRIP:
1909 			*rxop = BG_OP_IN_NODIF_OUT_CRC;
1910 			*txop = BG_OP_IN_CRC_OUT_NODIF;
1911 			break;
1912 
1913 		case SCSI_PROT_READ_STRIP:
1914 		case SCSI_PROT_WRITE_INSERT:
1915 			*rxop = BG_OP_IN_CSUM_OUT_NODIF;
1916 			*txop = BG_OP_IN_NODIF_OUT_CSUM;
1917 			break;
1918 
1919 		case SCSI_PROT_READ_PASS:
1920 		case SCSI_PROT_WRITE_PASS:
1921 			*rxop = BG_OP_IN_CSUM_OUT_CRC;
1922 			*txop = BG_OP_IN_CRC_OUT_CSUM;
1923 			break;
1924 
1925 		case SCSI_PROT_NORMAL:
1926 		default:
1927 			break;
1928 
1929 		}
1930 	} else {
1931 		switch (scsi_get_prot_op(sc)) {
1932 		case SCSI_PROT_READ_STRIP:
1933 		case SCSI_PROT_WRITE_INSERT:
1934 			*rxop = BG_OP_IN_CSUM_OUT_NODIF;
1935 			*txop = BG_OP_IN_NODIF_OUT_CSUM;
1936 			break;
1937 
1938 		case SCSI_PROT_READ_PASS:
1939 		case SCSI_PROT_WRITE_PASS:
1940 			*rxop = BG_OP_IN_CSUM_OUT_CSUM;
1941 			*txop = BG_OP_IN_CSUM_OUT_CSUM;
1942 			break;
1943 
1944 		case SCSI_PROT_READ_INSERT:
1945 		case SCSI_PROT_WRITE_STRIP:
1946 			*rxop = BG_OP_IN_NODIF_OUT_CSUM;
1947 			*txop = BG_OP_IN_CSUM_OUT_NODIF;
1948 			break;
1949 
1950 		case SCSI_PROT_NORMAL:
1951 		default:
1952 			break;
1953 		}
1954 	}
1955 
1956 	return ret;
1957 }
1958 #endif
1959 
1960 /**
1961  * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data
1962  * @phba: The Hba for which this call is being executed.
1963  * @sc: pointer to scsi command we're working on
1964  * @bpl: pointer to buffer list for protection groups
1965  * @datacnt: number of segments of data that have been dma mapped
1966  *
1967  * This function sets up BPL buffer list for protection groups of
1968  * type LPFC_PG_TYPE_NO_DIF
1969  *
1970  * This is usually used when the HBA is instructed to generate
1971  * DIFs and insert them into data stream (or strip DIF from
1972  * incoming data stream)
1973  *
1974  * The buffer list consists of just one protection group described
1975  * below:
1976  *                                +-------------------------+
1977  *   start of prot group  -->     |          PDE_5          |
1978  *                                +-------------------------+
1979  *                                |          PDE_6          |
1980  *                                +-------------------------+
1981  *                                |         Data BDE        |
1982  *                                +-------------------------+
1983  *                                |more Data BDE's ... (opt)|
1984  *                                +-------------------------+
1985  *
1986  *
1987  * Note: Data s/g buffers have been dma mapped
1988  *
1989  * Returns the number of BDEs added to the BPL.
1990  **/
1991 static int
1992 lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1993 		struct ulp_bde64 *bpl, int datasegcnt)
1994 {
1995 	struct scatterlist *sgde = NULL; /* s/g data entry */
1996 	struct lpfc_pde5 *pde5 = NULL;
1997 	struct lpfc_pde6 *pde6 = NULL;
1998 	dma_addr_t physaddr;
1999 	int i = 0, num_bde = 0, status;
2000 	int datadir = sc->sc_data_direction;
2001 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2002 	uint32_t rc;
2003 #endif
2004 	uint32_t checking = 1;
2005 	uint32_t reftag;
2006 	unsigned blksize;
2007 	uint8_t txop, rxop;
2008 
2009 	status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2010 	if (status)
2011 		goto out;
2012 
2013 	/* extract some info from the scsi command for pde*/
2014 	blksize = lpfc_cmd_blksize(sc);
2015 	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2016 
2017 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2018 	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2019 	if (rc) {
2020 		if (rc & BG_ERR_SWAP)
2021 			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2022 		if (rc & BG_ERR_CHECK)
2023 			checking = 0;
2024 	}
2025 #endif
2026 
2027 	/* setup PDE5 with what we have */
2028 	pde5 = (struct lpfc_pde5 *) bpl;
2029 	memset(pde5, 0, sizeof(struct lpfc_pde5));
2030 	bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
2031 
2032 	/* Endianness conversion if necessary for PDE5 */
2033 	pde5->word0 = cpu_to_le32(pde5->word0);
2034 	pde5->reftag = cpu_to_le32(reftag);
2035 
2036 	/* advance bpl and increment bde count */
2037 	num_bde++;
2038 	bpl++;
2039 	pde6 = (struct lpfc_pde6 *) bpl;
2040 
2041 	/* setup PDE6 with the rest of the info */
2042 	memset(pde6, 0, sizeof(struct lpfc_pde6));
2043 	bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
2044 	bf_set(pde6_optx, pde6, txop);
2045 	bf_set(pde6_oprx, pde6, rxop);
2046 
2047 	/*
2048 	 * We only need to check the data on READs, for WRITEs
2049 	 * protection data is automatically generated, not checked.
2050 	 */
2051 	if (datadir == DMA_FROM_DEVICE) {
2052 		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
2053 			bf_set(pde6_ce, pde6, checking);
2054 		else
2055 			bf_set(pde6_ce, pde6, 0);
2056 
2057 		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2058 			bf_set(pde6_re, pde6, checking);
2059 		else
2060 			bf_set(pde6_re, pde6, 0);
2061 	}
2062 	bf_set(pde6_ai, pde6, 1);
2063 	bf_set(pde6_ae, pde6, 0);
2064 	bf_set(pde6_apptagval, pde6, 0);
2065 
2066 	/* Endianness conversion if necessary for PDE6 */
2067 	pde6->word0 = cpu_to_le32(pde6->word0);
2068 	pde6->word1 = cpu_to_le32(pde6->word1);
2069 	pde6->word2 = cpu_to_le32(pde6->word2);
2070 
2071 	/* advance bpl and increment bde count */
2072 	num_bde++;
2073 	bpl++;
2074 
2075 	/* assumption: caller has already run dma_map_sg on command data */
2076 	scsi_for_each_sg(sc, sgde, datasegcnt, i) {
2077 		physaddr = sg_dma_address(sgde);
2078 		bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
2079 		bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2080 		bpl->tus.f.bdeSize = sg_dma_len(sgde);
2081 		if (datadir == DMA_TO_DEVICE)
2082 			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2083 		else
2084 			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2085 		bpl->tus.w = le32_to_cpu(bpl->tus.w);
2086 		bpl++;
2087 		num_bde++;
2088 	}
2089 
2090 out:
2091 	return num_bde;
2092 }
2093 
2094 /**
2095  * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data
2096  * @phba: The Hba for which this call is being executed.
2097  * @sc: pointer to scsi command we're working on
2098  * @bpl: pointer to buffer list for protection groups
2099  * @datacnt: number of segments of data that have been dma mapped
2100  * @protcnt: number of segment of protection data that have been dma mapped
2101  *
2102  * This function sets up BPL buffer list for protection groups of
2103  * type LPFC_PG_TYPE_DIF
2104  *
2105  * This is usually used when DIFs are in their own buffers,
2106  * separate from the data. The HBA can then by instructed
2107  * to place the DIFs in the outgoing stream.  For read operations,
2108  * The HBA could extract the DIFs and place it in DIF buffers.
2109  *
2110  * The buffer list for this type consists of one or more of the
2111  * protection groups described below:
2112  *                                    +-------------------------+
2113  *   start of first prot group  -->   |          PDE_5          |
2114  *                                    +-------------------------+
2115  *                                    |          PDE_6          |
2116  *                                    +-------------------------+
2117  *                                    |      PDE_7 (Prot BDE)   |
2118  *                                    +-------------------------+
2119  *                                    |        Data BDE         |
2120  *                                    +-------------------------+
2121  *                                    |more Data BDE's ... (opt)|
2122  *                                    +-------------------------+
2123  *   start of new  prot group  -->    |          PDE_5          |
2124  *                                    +-------------------------+
2125  *                                    |          ...            |
2126  *                                    +-------------------------+
2127  *
2128  * Note: It is assumed that both data and protection s/g buffers have been
2129  *       mapped for DMA
2130  *
2131  * Returns the number of BDEs added to the BPL.
2132  **/
2133 static int
2134 lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2135 		struct ulp_bde64 *bpl, int datacnt, int protcnt)
2136 {
2137 	struct scatterlist *sgde = NULL; /* s/g data entry */
2138 	struct scatterlist *sgpe = NULL; /* s/g prot entry */
2139 	struct lpfc_pde5 *pde5 = NULL;
2140 	struct lpfc_pde6 *pde6 = NULL;
2141 	struct lpfc_pde7 *pde7 = NULL;
2142 	dma_addr_t dataphysaddr, protphysaddr;
2143 	unsigned short curr_data = 0, curr_prot = 0;
2144 	unsigned int split_offset;
2145 	unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2146 	unsigned int protgrp_blks, protgrp_bytes;
2147 	unsigned int remainder, subtotal;
2148 	int status;
2149 	int datadir = sc->sc_data_direction;
2150 	unsigned char pgdone = 0, alldone = 0;
2151 	unsigned blksize;
2152 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2153 	uint32_t rc;
2154 #endif
2155 	uint32_t checking = 1;
2156 	uint32_t reftag;
2157 	uint8_t txop, rxop;
2158 	int num_bde = 0;
2159 
2160 	sgpe = scsi_prot_sglist(sc);
2161 	sgde = scsi_sglist(sc);
2162 
2163 	if (!sgpe || !sgde) {
2164 		lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2165 				"9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
2166 				sgpe, sgde);
2167 		return 0;
2168 	}
2169 
2170 	status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2171 	if (status)
2172 		goto out;
2173 
2174 	/* extract some info from the scsi command */
2175 	blksize = lpfc_cmd_blksize(sc);
2176 	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2177 
2178 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2179 	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2180 	if (rc) {
2181 		if (rc & BG_ERR_SWAP)
2182 			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2183 		if (rc & BG_ERR_CHECK)
2184 			checking = 0;
2185 	}
2186 #endif
2187 
2188 	split_offset = 0;
2189 	do {
2190 		/* Check to see if we ran out of space */
2191 		if (num_bde >= (phba->cfg_total_seg_cnt - 2))
2192 			return num_bde + 3;
2193 
2194 		/* setup PDE5 with what we have */
2195 		pde5 = (struct lpfc_pde5 *) bpl;
2196 		memset(pde5, 0, sizeof(struct lpfc_pde5));
2197 		bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
2198 
2199 		/* Endianness conversion if necessary for PDE5 */
2200 		pde5->word0 = cpu_to_le32(pde5->word0);
2201 		pde5->reftag = cpu_to_le32(reftag);
2202 
2203 		/* advance bpl and increment bde count */
2204 		num_bde++;
2205 		bpl++;
2206 		pde6 = (struct lpfc_pde6 *) bpl;
2207 
2208 		/* setup PDE6 with the rest of the info */
2209 		memset(pde6, 0, sizeof(struct lpfc_pde6));
2210 		bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
2211 		bf_set(pde6_optx, pde6, txop);
2212 		bf_set(pde6_oprx, pde6, rxop);
2213 
2214 		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
2215 			bf_set(pde6_ce, pde6, checking);
2216 		else
2217 			bf_set(pde6_ce, pde6, 0);
2218 
2219 		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2220 			bf_set(pde6_re, pde6, checking);
2221 		else
2222 			bf_set(pde6_re, pde6, 0);
2223 
2224 		bf_set(pde6_ai, pde6, 1);
2225 		bf_set(pde6_ae, pde6, 0);
2226 		bf_set(pde6_apptagval, pde6, 0);
2227 
2228 		/* Endianness conversion if necessary for PDE6 */
2229 		pde6->word0 = cpu_to_le32(pde6->word0);
2230 		pde6->word1 = cpu_to_le32(pde6->word1);
2231 		pde6->word2 = cpu_to_le32(pde6->word2);
2232 
2233 		/* advance bpl and increment bde count */
2234 		num_bde++;
2235 		bpl++;
2236 
2237 		/* setup the first BDE that points to protection buffer */
2238 		protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2239 		protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2240 
2241 		/* must be integer multiple of the DIF block length */
2242 		BUG_ON(protgroup_len % 8);
2243 
2244 		pde7 = (struct lpfc_pde7 *) bpl;
2245 		memset(pde7, 0, sizeof(struct lpfc_pde7));
2246 		bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
2247 
2248 		pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
2249 		pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
2250 
2251 		protgrp_blks = protgroup_len / 8;
2252 		protgrp_bytes = protgrp_blks * blksize;
2253 
2254 		/* check if this pde is crossing the 4K boundary; if so split */
2255 		if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
2256 			protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
2257 			protgroup_offset += protgroup_remainder;
2258 			protgrp_blks = protgroup_remainder / 8;
2259 			protgrp_bytes = protgrp_blks * blksize;
2260 		} else {
2261 			protgroup_offset = 0;
2262 			curr_prot++;
2263 		}
2264 
2265 		num_bde++;
2266 
2267 		/* setup BDE's for data blocks associated with DIF data */
2268 		pgdone = 0;
2269 		subtotal = 0; /* total bytes processed for current prot grp */
2270 		while (!pgdone) {
2271 			/* Check to see if we ran out of space */
2272 			if (num_bde >= phba->cfg_total_seg_cnt)
2273 				return num_bde + 1;
2274 
2275 			if (!sgde) {
2276 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2277 					"9065 BLKGRD:%s Invalid data segment\n",
2278 						__func__);
2279 				return 0;
2280 			}
2281 			bpl++;
2282 			dataphysaddr = sg_dma_address(sgde) + split_offset;
2283 			bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
2284 			bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
2285 
2286 			remainder = sg_dma_len(sgde) - split_offset;
2287 
2288 			if ((subtotal + remainder) <= protgrp_bytes) {
2289 				/* we can use this whole buffer */
2290 				bpl->tus.f.bdeSize = remainder;
2291 				split_offset = 0;
2292 
2293 				if ((subtotal + remainder) == protgrp_bytes)
2294 					pgdone = 1;
2295 			} else {
2296 				/* must split this buffer with next prot grp */
2297 				bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
2298 				split_offset += bpl->tus.f.bdeSize;
2299 			}
2300 
2301 			subtotal += bpl->tus.f.bdeSize;
2302 
2303 			if (datadir == DMA_TO_DEVICE)
2304 				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2305 			else
2306 				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2307 			bpl->tus.w = le32_to_cpu(bpl->tus.w);
2308 
2309 			num_bde++;
2310 			curr_data++;
2311 
2312 			if (split_offset)
2313 				break;
2314 
2315 			/* Move to the next s/g segment if possible */
2316 			sgde = sg_next(sgde);
2317 
2318 		}
2319 
2320 		if (protgroup_offset) {
2321 			/* update the reference tag */
2322 			reftag += protgrp_blks;
2323 			bpl++;
2324 			continue;
2325 		}
2326 
2327 		/* are we done ? */
2328 		if (curr_prot == protcnt) {
2329 			alldone = 1;
2330 		} else if (curr_prot < protcnt) {
2331 			/* advance to next prot buffer */
2332 			sgpe = sg_next(sgpe);
2333 			bpl++;
2334 
2335 			/* update the reference tag */
2336 			reftag += protgrp_blks;
2337 		} else {
2338 			/* if we're here, we have a bug */
2339 			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2340 				"9054 BLKGRD: bug in %s\n", __func__);
2341 		}
2342 
2343 	} while (!alldone);
2344 out:
2345 
2346 	return num_bde;
2347 }
2348 
2349 /**
2350  * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data
2351  * @phba: The Hba for which this call is being executed.
2352  * @sc: pointer to scsi command we're working on
2353  * @sgl: pointer to buffer list for protection groups
2354  * @datacnt: number of segments of data that have been dma mapped
2355  *
2356  * This function sets up SGL buffer list for protection groups of
2357  * type LPFC_PG_TYPE_NO_DIF
2358  *
2359  * This is usually used when the HBA is instructed to generate
2360  * DIFs and insert them into data stream (or strip DIF from
2361  * incoming data stream)
2362  *
2363  * The buffer list consists of just one protection group described
2364  * below:
2365  *                                +-------------------------+
2366  *   start of prot group  -->     |         DI_SEED         |
2367  *                                +-------------------------+
2368  *                                |         Data SGE        |
2369  *                                +-------------------------+
2370  *                                |more Data SGE's ... (opt)|
2371  *                                +-------------------------+
2372  *
2373  *
2374  * Note: Data s/g buffers have been dma mapped
2375  *
2376  * Returns the number of SGEs added to the SGL.
2377  **/
2378 static int
2379 lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2380 		struct sli4_sge *sgl, int datasegcnt)
2381 {
2382 	struct scatterlist *sgde = NULL; /* s/g data entry */
2383 	struct sli4_sge_diseed *diseed = NULL;
2384 	dma_addr_t physaddr;
2385 	int i = 0, num_sge = 0, status;
2386 	uint32_t reftag;
2387 	unsigned blksize;
2388 	uint8_t txop, rxop;
2389 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2390 	uint32_t rc;
2391 #endif
2392 	uint32_t checking = 1;
2393 	uint32_t dma_len;
2394 	uint32_t dma_offset = 0;
2395 
2396 	status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2397 	if (status)
2398 		goto out;
2399 
2400 	/* extract some info from the scsi command for pde*/
2401 	blksize = lpfc_cmd_blksize(sc);
2402 	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2403 
2404 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2405 	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2406 	if (rc) {
2407 		if (rc & BG_ERR_SWAP)
2408 			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2409 		if (rc & BG_ERR_CHECK)
2410 			checking = 0;
2411 	}
2412 #endif
2413 
2414 	/* setup DISEED with what we have */
2415 	diseed = (struct sli4_sge_diseed *) sgl;
2416 	memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2417 	bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2418 
2419 	/* Endianness conversion if necessary */
2420 	diseed->ref_tag = cpu_to_le32(reftag);
2421 	diseed->ref_tag_tran = diseed->ref_tag;
2422 
2423 	/*
2424 	 * We only need to check the data on READs, for WRITEs
2425 	 * protection data is automatically generated, not checked.
2426 	 */
2427 	if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2428 		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
2429 			bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2430 		else
2431 			bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2432 
2433 		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2434 			bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2435 		else
2436 			bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2437 	}
2438 
2439 	/* setup DISEED with the rest of the info */
2440 	bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2441 	bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2442 
2443 	bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2444 	bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2445 
2446 	/* Endianness conversion if necessary for DISEED */
2447 	diseed->word2 = cpu_to_le32(diseed->word2);
2448 	diseed->word3 = cpu_to_le32(diseed->word3);
2449 
2450 	/* advance bpl and increment sge count */
2451 	num_sge++;
2452 	sgl++;
2453 
2454 	/* assumption: caller has already run dma_map_sg on command data */
2455 	scsi_for_each_sg(sc, sgde, datasegcnt, i) {
2456 		physaddr = sg_dma_address(sgde);
2457 		dma_len = sg_dma_len(sgde);
2458 		sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
2459 		sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
2460 		if ((i + 1) == datasegcnt)
2461 			bf_set(lpfc_sli4_sge_last, sgl, 1);
2462 		else
2463 			bf_set(lpfc_sli4_sge_last, sgl, 0);
2464 		bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2465 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2466 
2467 		sgl->sge_len = cpu_to_le32(dma_len);
2468 		dma_offset += dma_len;
2469 
2470 		sgl++;
2471 		num_sge++;
2472 	}
2473 
2474 out:
2475 	return num_sge;
2476 }
2477 
2478 /**
2479  * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data
2480  * @phba: The Hba for which this call is being executed.
2481  * @sc: pointer to scsi command we're working on
2482  * @sgl: pointer to buffer list for protection groups
2483  * @datacnt: number of segments of data that have been dma mapped
2484  * @protcnt: number of segment of protection data that have been dma mapped
2485  *
2486  * This function sets up SGL buffer list for protection groups of
2487  * type LPFC_PG_TYPE_DIF
2488  *
2489  * This is usually used when DIFs are in their own buffers,
2490  * separate from the data. The HBA can then by instructed
2491  * to place the DIFs in the outgoing stream.  For read operations,
2492  * The HBA could extract the DIFs and place it in DIF buffers.
2493  *
2494  * The buffer list for this type consists of one or more of the
2495  * protection groups described below:
2496  *                                    +-------------------------+
2497  *   start of first prot group  -->   |         DISEED          |
2498  *                                    +-------------------------+
2499  *                                    |      DIF (Prot SGE)     |
2500  *                                    +-------------------------+
2501  *                                    |        Data SGE         |
2502  *                                    +-------------------------+
2503  *                                    |more Data SGE's ... (opt)|
2504  *                                    +-------------------------+
2505  *   start of new  prot group  -->    |         DISEED          |
2506  *                                    +-------------------------+
2507  *                                    |          ...            |
2508  *                                    +-------------------------+
2509  *
2510  * Note: It is assumed that both data and protection s/g buffers have been
2511  *       mapped for DMA
2512  *
2513  * Returns the number of SGEs added to the SGL.
2514  **/
2515 static int
2516 lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2517 		struct sli4_sge *sgl, int datacnt, int protcnt)
2518 {
2519 	struct scatterlist *sgde = NULL; /* s/g data entry */
2520 	struct scatterlist *sgpe = NULL; /* s/g prot entry */
2521 	struct sli4_sge_diseed *diseed = NULL;
2522 	dma_addr_t dataphysaddr, protphysaddr;
2523 	unsigned short curr_data = 0, curr_prot = 0;
2524 	unsigned int split_offset;
2525 	unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2526 	unsigned int protgrp_blks, protgrp_bytes;
2527 	unsigned int remainder, subtotal;
2528 	int status;
2529 	unsigned char pgdone = 0, alldone = 0;
2530 	unsigned blksize;
2531 	uint32_t reftag;
2532 	uint8_t txop, rxop;
2533 	uint32_t dma_len;
2534 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2535 	uint32_t rc;
2536 #endif
2537 	uint32_t checking = 1;
2538 	uint32_t dma_offset = 0;
2539 	int num_sge = 0;
2540 
2541 	sgpe = scsi_prot_sglist(sc);
2542 	sgde = scsi_sglist(sc);
2543 
2544 	if (!sgpe || !sgde) {
2545 		lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2546 				"9082 Invalid s/g entry: data=0x%p prot=0x%p\n",
2547 				sgpe, sgde);
2548 		return 0;
2549 	}
2550 
2551 	status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2552 	if (status)
2553 		goto out;
2554 
2555 	/* extract some info from the scsi command */
2556 	blksize = lpfc_cmd_blksize(sc);
2557 	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2558 
2559 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2560 	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2561 	if (rc) {
2562 		if (rc & BG_ERR_SWAP)
2563 			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2564 		if (rc & BG_ERR_CHECK)
2565 			checking = 0;
2566 	}
2567 #endif
2568 
2569 	split_offset = 0;
2570 	do {
2571 		/* Check to see if we ran out of space */
2572 		if (num_sge >= (phba->cfg_total_seg_cnt - 2))
2573 			return num_sge + 3;
2574 
2575 		/* setup DISEED with what we have */
2576 		diseed = (struct sli4_sge_diseed *) sgl;
2577 		memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2578 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2579 
2580 		/* Endianness conversion if necessary */
2581 		diseed->ref_tag = cpu_to_le32(reftag);
2582 		diseed->ref_tag_tran = diseed->ref_tag;
2583 
2584 		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) {
2585 			bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2586 
2587 		} else {
2588 			bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2589 			/*
2590 			 * When in this mode, the hardware will replace
2591 			 * the guard tag from the host with a
2592 			 * newly generated good CRC for the wire.
2593 			 * Switch to raw mode here to avoid this
2594 			 * behavior. What the host sends gets put on the wire.
2595 			 */
2596 			if (txop == BG_OP_IN_CRC_OUT_CRC) {
2597 				txop = BG_OP_RAW_MODE;
2598 				rxop = BG_OP_RAW_MODE;
2599 			}
2600 		}
2601 
2602 
2603 		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2604 			bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2605 		else
2606 			bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2607 
2608 		/* setup DISEED with the rest of the info */
2609 		bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2610 		bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2611 
2612 		bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2613 		bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2614 
2615 		/* Endianness conversion if necessary for DISEED */
2616 		diseed->word2 = cpu_to_le32(diseed->word2);
2617 		diseed->word3 = cpu_to_le32(diseed->word3);
2618 
2619 		/* advance sgl and increment bde count */
2620 		num_sge++;
2621 		sgl++;
2622 
2623 		/* setup the first BDE that points to protection buffer */
2624 		protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2625 		protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2626 
2627 		/* must be integer multiple of the DIF block length */
2628 		BUG_ON(protgroup_len % 8);
2629 
2630 		/* Now setup DIF SGE */
2631 		sgl->word2 = 0;
2632 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
2633 		sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
2634 		sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
2635 		sgl->word2 = cpu_to_le32(sgl->word2);
2636 
2637 		protgrp_blks = protgroup_len / 8;
2638 		protgrp_bytes = protgrp_blks * blksize;
2639 
2640 		/* check if DIF SGE is crossing the 4K boundary; if so split */
2641 		if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
2642 			protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
2643 			protgroup_offset += protgroup_remainder;
2644 			protgrp_blks = protgroup_remainder / 8;
2645 			protgrp_bytes = protgrp_blks * blksize;
2646 		} else {
2647 			protgroup_offset = 0;
2648 			curr_prot++;
2649 		}
2650 
2651 		num_sge++;
2652 
2653 		/* setup SGE's for data blocks associated with DIF data */
2654 		pgdone = 0;
2655 		subtotal = 0; /* total bytes processed for current prot grp */
2656 		while (!pgdone) {
2657 			/* Check to see if we ran out of space */
2658 			if (num_sge >= phba->cfg_total_seg_cnt)
2659 				return num_sge + 1;
2660 
2661 			if (!sgde) {
2662 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2663 					"9086 BLKGRD:%s Invalid data segment\n",
2664 						__func__);
2665 				return 0;
2666 			}
2667 			sgl++;
2668 			dataphysaddr = sg_dma_address(sgde) + split_offset;
2669 
2670 			remainder = sg_dma_len(sgde) - split_offset;
2671 
2672 			if ((subtotal + remainder) <= protgrp_bytes) {
2673 				/* we can use this whole buffer */
2674 				dma_len = remainder;
2675 				split_offset = 0;
2676 
2677 				if ((subtotal + remainder) == protgrp_bytes)
2678 					pgdone = 1;
2679 			} else {
2680 				/* must split this buffer with next prot grp */
2681 				dma_len = protgrp_bytes - subtotal;
2682 				split_offset += dma_len;
2683 			}
2684 
2685 			subtotal += dma_len;
2686 
2687 			sgl->addr_lo = cpu_to_le32(putPaddrLow(dataphysaddr));
2688 			sgl->addr_hi = cpu_to_le32(putPaddrHigh(dataphysaddr));
2689 			bf_set(lpfc_sli4_sge_last, sgl, 0);
2690 			bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2691 			bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2692 
2693 			sgl->sge_len = cpu_to_le32(dma_len);
2694 			dma_offset += dma_len;
2695 
2696 			num_sge++;
2697 			curr_data++;
2698 
2699 			if (split_offset)
2700 				break;
2701 
2702 			/* Move to the next s/g segment if possible */
2703 			sgde = sg_next(sgde);
2704 		}
2705 
2706 		if (protgroup_offset) {
2707 			/* update the reference tag */
2708 			reftag += protgrp_blks;
2709 			sgl++;
2710 			continue;
2711 		}
2712 
2713 		/* are we done ? */
2714 		if (curr_prot == protcnt) {
2715 			bf_set(lpfc_sli4_sge_last, sgl, 1);
2716 			alldone = 1;
2717 		} else if (curr_prot < protcnt) {
2718 			/* advance to next prot buffer */
2719 			sgpe = sg_next(sgpe);
2720 			sgl++;
2721 
2722 			/* update the reference tag */
2723 			reftag += protgrp_blks;
2724 		} else {
2725 			/* if we're here, we have a bug */
2726 			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2727 				"9085 BLKGRD: bug in %s\n", __func__);
2728 		}
2729 
2730 	} while (!alldone);
2731 
2732 out:
2733 
2734 	return num_sge;
2735 }
2736 
2737 /**
2738  * lpfc_prot_group_type - Get prtotection group type of SCSI command
2739  * @phba: The Hba for which this call is being executed.
2740  * @sc: pointer to scsi command we're working on
2741  *
2742  * Given a SCSI command that supports DIF, determine composition of protection
2743  * groups involved in setting up buffer lists
2744  *
2745  * Returns: Protection group type (with or without DIF)
2746  *
2747  **/
2748 static int
2749 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
2750 {
2751 	int ret = LPFC_PG_TYPE_INVALID;
2752 	unsigned char op = scsi_get_prot_op(sc);
2753 
2754 	switch (op) {
2755 	case SCSI_PROT_READ_STRIP:
2756 	case SCSI_PROT_WRITE_INSERT:
2757 		ret = LPFC_PG_TYPE_NO_DIF;
2758 		break;
2759 	case SCSI_PROT_READ_INSERT:
2760 	case SCSI_PROT_WRITE_STRIP:
2761 	case SCSI_PROT_READ_PASS:
2762 	case SCSI_PROT_WRITE_PASS:
2763 		ret = LPFC_PG_TYPE_DIF_BUF;
2764 		break;
2765 	default:
2766 		if (phba)
2767 			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2768 					"9021 Unsupported protection op:%d\n",
2769 					op);
2770 		break;
2771 	}
2772 	return ret;
2773 }
2774 
2775 /**
2776  * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
2777  * @phba: The Hba for which this call is being executed.
2778  * @lpfc_cmd: The scsi buffer which is going to be adjusted.
2779  *
2780  * Adjust the data length to account for how much data
2781  * is actually on the wire.
2782  *
2783  * returns the adjusted data length
2784  **/
2785 static int
2786 lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
2787 		       struct lpfc_scsi_buf *lpfc_cmd)
2788 {
2789 	struct scsi_cmnd *sc = lpfc_cmd->pCmd;
2790 	int fcpdl;
2791 
2792 	fcpdl = scsi_bufflen(sc);
2793 
2794 	/* Check if there is protection data on the wire */
2795 	if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2796 		/* Read check for protection data */
2797 		if (scsi_get_prot_op(sc) ==  SCSI_PROT_READ_INSERT)
2798 			return fcpdl;
2799 
2800 	} else {
2801 		/* Write check for protection data */
2802 		if (scsi_get_prot_op(sc) ==  SCSI_PROT_WRITE_STRIP)
2803 			return fcpdl;
2804 	}
2805 
2806 	/*
2807 	 * If we are in DIF Type 1 mode every data block has a 8 byte
2808 	 * DIF (trailer) attached to it. Must ajust FCP data length
2809 	 * to account for the protection data.
2810 	 */
2811 	fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8;
2812 
2813 	return fcpdl;
2814 }
2815 
2816 /**
2817  * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
2818  * @phba: The Hba for which this call is being executed.
2819  * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
2820  *
2821  * This is the protection/DIF aware version of
2822  * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
2823  * two functions eventually, but for now, it's here
2824  **/
2825 static int
2826 lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2827 		struct lpfc_scsi_buf *lpfc_cmd)
2828 {
2829 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2830 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2831 	struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
2832 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2833 	uint32_t num_bde = 0;
2834 	int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
2835 	int prot_group_type = 0;
2836 	int fcpdl;
2837 
2838 	/*
2839 	 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
2840 	 *  fcp_rsp regions to the first data bde entry
2841 	 */
2842 	bpl += 2;
2843 	if (scsi_sg_count(scsi_cmnd)) {
2844 		/*
2845 		 * The driver stores the segment count returned from pci_map_sg
2846 		 * because this a count of dma-mappings used to map the use_sg
2847 		 * pages.  They are not guaranteed to be the same for those
2848 		 * architectures that implement an IOMMU.
2849 		 */
2850 		datasegcnt = dma_map_sg(&phba->pcidev->dev,
2851 					scsi_sglist(scsi_cmnd),
2852 					scsi_sg_count(scsi_cmnd), datadir);
2853 		if (unlikely(!datasegcnt))
2854 			return 1;
2855 
2856 		lpfc_cmd->seg_cnt = datasegcnt;
2857 
2858 		/* First check if data segment count from SCSI Layer is good */
2859 		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
2860 			goto err;
2861 
2862 		prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
2863 
2864 		switch (prot_group_type) {
2865 		case LPFC_PG_TYPE_NO_DIF:
2866 
2867 			/* Here we need to add a PDE5 and PDE6 to the count */
2868 			if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt)
2869 				goto err;
2870 
2871 			num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
2872 					datasegcnt);
2873 			/* we should have 2 or more entries in buffer list */
2874 			if (num_bde < 2)
2875 				goto err;
2876 			break;
2877 
2878 		case LPFC_PG_TYPE_DIF_BUF:
2879 			/*
2880 			 * This type indicates that protection buffers are
2881 			 * passed to the driver, so that needs to be prepared
2882 			 * for DMA
2883 			 */
2884 			protsegcnt = dma_map_sg(&phba->pcidev->dev,
2885 					scsi_prot_sglist(scsi_cmnd),
2886 					scsi_prot_sg_count(scsi_cmnd), datadir);
2887 			if (unlikely(!protsegcnt)) {
2888 				scsi_dma_unmap(scsi_cmnd);
2889 				return 1;
2890 			}
2891 
2892 			lpfc_cmd->prot_seg_cnt = protsegcnt;
2893 
2894 			/*
2895 			 * There is a minimun of 4 BPLs used for every
2896 			 * protection data segment.
2897 			 */
2898 			if ((lpfc_cmd->prot_seg_cnt * 4) >
2899 			    (phba->cfg_total_seg_cnt - 2))
2900 				goto err;
2901 
2902 			num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
2903 					datasegcnt, protsegcnt);
2904 			/* we should have 3 or more entries in buffer list */
2905 			if ((num_bde < 3) ||
2906 			    (num_bde > phba->cfg_total_seg_cnt))
2907 				goto err;
2908 			break;
2909 
2910 		case LPFC_PG_TYPE_INVALID:
2911 		default:
2912 			scsi_dma_unmap(scsi_cmnd);
2913 			lpfc_cmd->seg_cnt = 0;
2914 
2915 			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2916 					"9022 Unexpected protection group %i\n",
2917 					prot_group_type);
2918 			return 1;
2919 		}
2920 	}
2921 
2922 	/*
2923 	 * Finish initializing those IOCB fields that are dependent on the
2924 	 * scsi_cmnd request_buffer.  Note that the bdeSize is explicitly
2925 	 * reinitialized since all iocb memory resources are used many times
2926 	 * for transmit, receive, and continuation bpl's.
2927 	 */
2928 	iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
2929 	iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
2930 	iocb_cmd->ulpBdeCount = 1;
2931 	iocb_cmd->ulpLe = 1;
2932 
2933 	fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
2934 	fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
2935 
2936 	/*
2937 	 * Due to difference in data length between DIF/non-DIF paths,
2938 	 * we need to set word 4 of IOCB here
2939 	 */
2940 	iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
2941 
2942 	return 0;
2943 err:
2944 	if (lpfc_cmd->seg_cnt)
2945 		scsi_dma_unmap(scsi_cmnd);
2946 	if (lpfc_cmd->prot_seg_cnt)
2947 		dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
2948 			     scsi_prot_sg_count(scsi_cmnd),
2949 			     scsi_cmnd->sc_data_direction);
2950 
2951 	lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2952 			"9023 Cannot setup S/G List for HBA"
2953 			"IO segs %d/%d BPL %d SCSI %d: %d %d\n",
2954 			lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
2955 			phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
2956 			prot_group_type, num_bde);
2957 
2958 	lpfc_cmd->seg_cnt = 0;
2959 	lpfc_cmd->prot_seg_cnt = 0;
2960 	return 1;
2961 }
2962 
2963 /*
2964  * This function calcuates the T10 DIF guard tag
2965  * on the specified data using a CRC algorithmn
2966  * using crc_t10dif.
2967  */
2968 uint16_t
2969 lpfc_bg_crc(uint8_t *data, int count)
2970 {
2971 	uint16_t crc = 0;
2972 	uint16_t x;
2973 
2974 	crc = crc_t10dif(data, count);
2975 	x = cpu_to_be16(crc);
2976 	return x;
2977 }
2978 
2979 /*
2980  * This function calcuates the T10 DIF guard tag
2981  * on the specified data using a CSUM algorithmn
2982  * using ip_compute_csum.
2983  */
2984 uint16_t
2985 lpfc_bg_csum(uint8_t *data, int count)
2986 {
2987 	uint16_t ret;
2988 
2989 	ret = ip_compute_csum(data, count);
2990 	return ret;
2991 }
2992 
2993 /*
2994  * This function examines the protection data to try to determine
2995  * what type of T10-DIF error occurred.
2996  */
2997 void
2998 lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
2999 {
3000 	struct scatterlist *sgpe; /* s/g prot entry */
3001 	struct scatterlist *sgde; /* s/g data entry */
3002 	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
3003 	struct scsi_dif_tuple *src = NULL;
3004 	uint8_t *data_src = NULL;
3005 	uint16_t guard_tag, guard_type;
3006 	uint16_t start_app_tag, app_tag;
3007 	uint32_t start_ref_tag, ref_tag;
3008 	int prot, protsegcnt;
3009 	int err_type, len, data_len;
3010 	int chk_ref, chk_app, chk_guard;
3011 	uint16_t sum;
3012 	unsigned blksize;
3013 
3014 	err_type = BGS_GUARD_ERR_MASK;
3015 	sum = 0;
3016 	guard_tag = 0;
3017 
3018 	/* First check to see if there is protection data to examine */
3019 	prot = scsi_get_prot_op(cmd);
3020 	if ((prot == SCSI_PROT_READ_STRIP) ||
3021 	    (prot == SCSI_PROT_WRITE_INSERT) ||
3022 	    (prot == SCSI_PROT_NORMAL))
3023 		goto out;
3024 
3025 	/* Currently the driver just supports ref_tag and guard_tag checking */
3026 	chk_ref = 1;
3027 	chk_app = 0;
3028 	chk_guard = 0;
3029 
3030 	/* Setup a ptr to the protection data provided by the SCSI host */
3031 	sgpe = scsi_prot_sglist(cmd);
3032 	protsegcnt = lpfc_cmd->prot_seg_cnt;
3033 
3034 	if (sgpe && protsegcnt) {
3035 
3036 		/*
3037 		 * We will only try to verify guard tag if the segment
3038 		 * data length is a multiple of the blksize.
3039 		 */
3040 		sgde = scsi_sglist(cmd);
3041 		blksize = lpfc_cmd_blksize(cmd);
3042 		data_src = (uint8_t *)sg_virt(sgde);
3043 		data_len = sgde->length;
3044 		if ((data_len & (blksize - 1)) == 0)
3045 			chk_guard = 1;
3046 		guard_type = scsi_host_get_guard(cmd->device->host);
3047 
3048 		src = (struct scsi_dif_tuple *)sg_virt(sgpe);
3049 		start_ref_tag = (uint32_t)scsi_get_lba(cmd); /* Truncate LBA */
3050 		start_app_tag = src->app_tag;
3051 		len = sgpe->length;
3052 		while (src && protsegcnt) {
3053 			while (len) {
3054 
3055 				/*
3056 				 * First check to see if a protection data
3057 				 * check is valid
3058 				 */
3059 				if ((src->ref_tag == 0xffffffff) ||
3060 				    (src->app_tag == 0xffff)) {
3061 					start_ref_tag++;
3062 					goto skipit;
3063 				}
3064 
3065 				/* First Guard Tag checking */
3066 				if (chk_guard) {
3067 					guard_tag = src->guard_tag;
3068 					if (lpfc_cmd_guard_csum(cmd))
3069 						sum = lpfc_bg_csum(data_src,
3070 								   blksize);
3071 					else
3072 						sum = lpfc_bg_crc(data_src,
3073 								  blksize);
3074 					if ((guard_tag != sum)) {
3075 						err_type = BGS_GUARD_ERR_MASK;
3076 						goto out;
3077 					}
3078 				}
3079 
3080 				/* Reference Tag checking */
3081 				ref_tag = be32_to_cpu(src->ref_tag);
3082 				if (chk_ref && (ref_tag != start_ref_tag)) {
3083 					err_type = BGS_REFTAG_ERR_MASK;
3084 					goto out;
3085 				}
3086 				start_ref_tag++;
3087 
3088 				/* App Tag checking */
3089 				app_tag = src->app_tag;
3090 				if (chk_app && (app_tag != start_app_tag)) {
3091 					err_type = BGS_APPTAG_ERR_MASK;
3092 					goto out;
3093 				}
3094 skipit:
3095 				len -= sizeof(struct scsi_dif_tuple);
3096 				if (len < 0)
3097 					len = 0;
3098 				src++;
3099 
3100 				data_src += blksize;
3101 				data_len -= blksize;
3102 
3103 				/*
3104 				 * Are we at the end of the Data segment?
3105 				 * The data segment is only used for Guard
3106 				 * tag checking.
3107 				 */
3108 				if (chk_guard && (data_len == 0)) {
3109 					chk_guard = 0;
3110 					sgde = sg_next(sgde);
3111 					if (!sgde)
3112 						goto out;
3113 
3114 					data_src = (uint8_t *)sg_virt(sgde);
3115 					data_len = sgde->length;
3116 					if ((data_len & (blksize - 1)) == 0)
3117 						chk_guard = 1;
3118 				}
3119 			}
3120 
3121 			/* Goto the next Protection data segment */
3122 			sgpe = sg_next(sgpe);
3123 			if (sgpe) {
3124 				src = (struct scsi_dif_tuple *)sg_virt(sgpe);
3125 				len = sgpe->length;
3126 			} else {
3127 				src = NULL;
3128 			}
3129 			protsegcnt--;
3130 		}
3131 	}
3132 out:
3133 	if (err_type == BGS_GUARD_ERR_MASK) {
3134 		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3135 					0x10, 0x1);
3136 		cmd->result = DRIVER_SENSE << 24
3137 			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
3138 		phba->bg_guard_err_cnt++;
3139 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3140 				"9069 BLKGRD: LBA %lx grd_tag error %x != %x\n",
3141 				(unsigned long)scsi_get_lba(cmd),
3142 				sum, guard_tag);
3143 
3144 	} else if (err_type == BGS_REFTAG_ERR_MASK) {
3145 		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3146 					0x10, 0x3);
3147 		cmd->result = DRIVER_SENSE << 24
3148 			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
3149 
3150 		phba->bg_reftag_err_cnt++;
3151 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3152 				"9066 BLKGRD: LBA %lx ref_tag error %x != %x\n",
3153 				(unsigned long)scsi_get_lba(cmd),
3154 				ref_tag, start_ref_tag);
3155 
3156 	} else if (err_type == BGS_APPTAG_ERR_MASK) {
3157 		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3158 					0x10, 0x2);
3159 		cmd->result = DRIVER_SENSE << 24
3160 			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
3161 
3162 		phba->bg_apptag_err_cnt++;
3163 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3164 				"9041 BLKGRD: LBA %lx app_tag error %x != %x\n",
3165 				(unsigned long)scsi_get_lba(cmd),
3166 				app_tag, start_app_tag);
3167 	}
3168 }
3169 
3170 
3171 /*
3172  * This function checks for BlockGuard errors detected by
3173  * the HBA.  In case of errors, the ASC/ASCQ fields in the
3174  * sense buffer will be set accordingly, paired with
3175  * ILLEGAL_REQUEST to signal to the kernel that the HBA
3176  * detected corruption.
3177  *
3178  * Returns:
3179  *  0 - No error found
3180  *  1 - BlockGuard error found
3181  * -1 - Internal error (bad profile, ...etc)
3182  */
3183 static int
3184 lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
3185 			struct lpfc_iocbq *pIocbOut)
3186 {
3187 	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
3188 	struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
3189 	int ret = 0;
3190 	uint32_t bghm = bgf->bghm;
3191 	uint32_t bgstat = bgf->bgstat;
3192 	uint64_t failing_sector = 0;
3193 
3194 	spin_lock(&_dump_buf_lock);
3195 	if (!_dump_buf_done) {
3196 		lpfc_printf_log(phba, KERN_ERR, LOG_BG,  "9070 BLKGRD: Saving"
3197 			" Data for %u blocks to debugfs\n",
3198 				(cmd->cmnd[7] << 8 | cmd->cmnd[8]));
3199 		lpfc_debug_save_data(phba, cmd);
3200 
3201 		/* If we have a prot sgl, save the DIF buffer */
3202 		if (lpfc_prot_group_type(phba, cmd) ==
3203 				LPFC_PG_TYPE_DIF_BUF) {
3204 			lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: "
3205 				"Saving DIF for %u blocks to debugfs\n",
3206 				(cmd->cmnd[7] << 8 | cmd->cmnd[8]));
3207 			lpfc_debug_save_dif(phba, cmd);
3208 		}
3209 
3210 		_dump_buf_done = 1;
3211 	}
3212 	spin_unlock(&_dump_buf_lock);
3213 
3214 	if (lpfc_bgs_get_invalid_prof(bgstat)) {
3215 		cmd->result = ScsiResult(DID_ERROR, 0);
3216 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3217 				"9072 BLKGRD: Invalid BG Profile in cmd"
3218 				" 0x%x lba 0x%llx blk cnt 0x%x "
3219 				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3220 				(unsigned long long)scsi_get_lba(cmd),
3221 				blk_rq_sectors(cmd->request), bgstat, bghm);
3222 		ret = (-1);
3223 		goto out;
3224 	}
3225 
3226 	if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
3227 		cmd->result = ScsiResult(DID_ERROR, 0);
3228 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3229 				"9073 BLKGRD: Invalid BG PDIF Block in cmd"
3230 				" 0x%x lba 0x%llx blk cnt 0x%x "
3231 				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3232 				(unsigned long long)scsi_get_lba(cmd),
3233 				blk_rq_sectors(cmd->request), bgstat, bghm);
3234 		ret = (-1);
3235 		goto out;
3236 	}
3237 
3238 	if (lpfc_bgs_get_guard_err(bgstat)) {
3239 		ret = 1;
3240 
3241 		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3242 				0x10, 0x1);
3243 		cmd->result = DRIVER_SENSE << 24
3244 			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
3245 		phba->bg_guard_err_cnt++;
3246 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3247 				"9055 BLKGRD: Guard Tag error in cmd"
3248 				" 0x%x lba 0x%llx blk cnt 0x%x "
3249 				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3250 				(unsigned long long)scsi_get_lba(cmd),
3251 				blk_rq_sectors(cmd->request), bgstat, bghm);
3252 	}
3253 
3254 	if (lpfc_bgs_get_reftag_err(bgstat)) {
3255 		ret = 1;
3256 
3257 		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3258 				0x10, 0x3);
3259 		cmd->result = DRIVER_SENSE << 24
3260 			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
3261 
3262 		phba->bg_reftag_err_cnt++;
3263 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3264 				"9056 BLKGRD: Ref Tag error in cmd"
3265 				" 0x%x lba 0x%llx blk cnt 0x%x "
3266 				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3267 				(unsigned long long)scsi_get_lba(cmd),
3268 				blk_rq_sectors(cmd->request), bgstat, bghm);
3269 	}
3270 
3271 	if (lpfc_bgs_get_apptag_err(bgstat)) {
3272 		ret = 1;
3273 
3274 		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3275 				0x10, 0x2);
3276 		cmd->result = DRIVER_SENSE << 24
3277 			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
3278 
3279 		phba->bg_apptag_err_cnt++;
3280 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3281 				"9061 BLKGRD: App Tag error in cmd"
3282 				" 0x%x lba 0x%llx blk cnt 0x%x "
3283 				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3284 				(unsigned long long)scsi_get_lba(cmd),
3285 				blk_rq_sectors(cmd->request), bgstat, bghm);
3286 	}
3287 
3288 	if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
3289 		/*
3290 		 * setup sense data descriptor 0 per SPC-4 as an information
3291 		 * field, and put the failing LBA in it.
3292 		 * This code assumes there was also a guard/app/ref tag error
3293 		 * indication.
3294 		 */
3295 		cmd->sense_buffer[7] = 0xc;   /* Additional sense length */
3296 		cmd->sense_buffer[8] = 0;     /* Information descriptor type */
3297 		cmd->sense_buffer[9] = 0xa;   /* Additional descriptor length */
3298 		cmd->sense_buffer[10] = 0x80; /* Validity bit */
3299 
3300 		/* bghm is a "on the wire" FC frame based count */
3301 		switch (scsi_get_prot_op(cmd)) {
3302 		case SCSI_PROT_READ_INSERT:
3303 		case SCSI_PROT_WRITE_STRIP:
3304 			bghm /= cmd->device->sector_size;
3305 			break;
3306 		case SCSI_PROT_READ_STRIP:
3307 		case SCSI_PROT_WRITE_INSERT:
3308 		case SCSI_PROT_READ_PASS:
3309 		case SCSI_PROT_WRITE_PASS:
3310 			bghm /= (cmd->device->sector_size +
3311 				sizeof(struct scsi_dif_tuple));
3312 			break;
3313 		}
3314 
3315 		failing_sector = scsi_get_lba(cmd);
3316 		failing_sector += bghm;
3317 
3318 		/* Descriptor Information */
3319 		put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
3320 	}
3321 
3322 	if (!ret) {
3323 		/* No error was reported - problem in FW? */
3324 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3325 				"9057 BLKGRD: Unknown error in cmd"
3326 				" 0x%x lba 0x%llx blk cnt 0x%x "
3327 				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3328 				(unsigned long long)scsi_get_lba(cmd),
3329 				blk_rq_sectors(cmd->request), bgstat, bghm);
3330 
3331 		/* Calcuate what type of error it was */
3332 		lpfc_calc_bg_err(phba, lpfc_cmd);
3333 	}
3334 out:
3335 	return ret;
3336 }
3337 
3338 /**
3339  * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3340  * @phba: The Hba for which this call is being executed.
3341  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3342  *
3343  * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
3344  * field of @lpfc_cmd for device with SLI-4 interface spec.
3345  *
3346  * Return codes:
3347  *	1 - Error
3348  *	0 - Success
3349  **/
3350 static int
3351 lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3352 {
3353 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3354 	struct scatterlist *sgel = NULL;
3355 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3356 	struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
3357 	struct sli4_sge *first_data_sgl;
3358 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3359 	dma_addr_t physaddr;
3360 	uint32_t num_bde = 0;
3361 	uint32_t dma_len;
3362 	uint32_t dma_offset = 0;
3363 	int nseg;
3364 	struct ulp_bde64 *bde;
3365 
3366 	/*
3367 	 * There are three possibilities here - use scatter-gather segment, use
3368 	 * the single mapping, or neither.  Start the lpfc command prep by
3369 	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
3370 	 * data bde entry.
3371 	 */
3372 	if (scsi_sg_count(scsi_cmnd)) {
3373 		/*
3374 		 * The driver stores the segment count returned from pci_map_sg
3375 		 * because this a count of dma-mappings used to map the use_sg
3376 		 * pages.  They are not guaranteed to be the same for those
3377 		 * architectures that implement an IOMMU.
3378 		 */
3379 
3380 		nseg = scsi_dma_map(scsi_cmnd);
3381 		if (unlikely(!nseg))
3382 			return 1;
3383 		sgl += 1;
3384 		/* clear the last flag in the fcp_rsp map entry */
3385 		sgl->word2 = le32_to_cpu(sgl->word2);
3386 		bf_set(lpfc_sli4_sge_last, sgl, 0);
3387 		sgl->word2 = cpu_to_le32(sgl->word2);
3388 		sgl += 1;
3389 		first_data_sgl = sgl;
3390 		lpfc_cmd->seg_cnt = nseg;
3391 		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
3392 			lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
3393 				" %s: Too many sg segments from "
3394 				"dma_map_sg.  Config %d, seg_cnt %d\n",
3395 				__func__, phba->cfg_sg_seg_cnt,
3396 			       lpfc_cmd->seg_cnt);
3397 			lpfc_cmd->seg_cnt = 0;
3398 			scsi_dma_unmap(scsi_cmnd);
3399 			return 1;
3400 		}
3401 
3402 		/*
3403 		 * The driver established a maximum scatter-gather segment count
3404 		 * during probe that limits the number of sg elements in any
3405 		 * single scsi command.  Just run through the seg_cnt and format
3406 		 * the sge's.
3407 		 * When using SLI-3 the driver will try to fit all the BDEs into
3408 		 * the IOCB. If it can't then the BDEs get added to a BPL as it
3409 		 * does for SLI-2 mode.
3410 		 */
3411 		scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
3412 			physaddr = sg_dma_address(sgel);
3413 			dma_len = sg_dma_len(sgel);
3414 			sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
3415 			sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
3416 			sgl->word2 = le32_to_cpu(sgl->word2);
3417 			if ((num_bde + 1) == nseg)
3418 				bf_set(lpfc_sli4_sge_last, sgl, 1);
3419 			else
3420 				bf_set(lpfc_sli4_sge_last, sgl, 0);
3421 			bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
3422 			bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
3423 			sgl->word2 = cpu_to_le32(sgl->word2);
3424 			sgl->sge_len = cpu_to_le32(dma_len);
3425 			dma_offset += dma_len;
3426 			sgl++;
3427 		}
3428 		/* setup the performance hint (first data BDE) if enabled */
3429 		if (phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) {
3430 			bde = (struct ulp_bde64 *)
3431 					&(iocb_cmd->unsli3.sli3Words[5]);
3432 			bde->addrLow = first_data_sgl->addr_lo;
3433 			bde->addrHigh = first_data_sgl->addr_hi;
3434 			bde->tus.f.bdeSize =
3435 					le32_to_cpu(first_data_sgl->sge_len);
3436 			bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3437 			bde->tus.w = cpu_to_le32(bde->tus.w);
3438 		}
3439 	} else {
3440 		sgl += 1;
3441 		/* clear the last flag in the fcp_rsp map entry */
3442 		sgl->word2 = le32_to_cpu(sgl->word2);
3443 		bf_set(lpfc_sli4_sge_last, sgl, 1);
3444 		sgl->word2 = cpu_to_le32(sgl->word2);
3445 	}
3446 
3447 	/*
3448 	 * Finish initializing those IOCB fields that are dependent on the
3449 	 * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
3450 	 * explicitly reinitialized.
3451 	 * all iocb memory resources are reused.
3452 	 */
3453 	fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
3454 
3455 	/*
3456 	 * Due to difference in data length between DIF/non-DIF paths,
3457 	 * we need to set word 4 of IOCB here
3458 	 */
3459 	iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
3460 
3461 	/*
3462 	 * If the OAS driver feature is enabled and the lun is enabled for
3463 	 * OAS, set the oas iocb related flags.
3464 	 */
3465 	if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3466 		scsi_cmnd->device->hostdata)->oas_enabled)
3467 		lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_OAS;
3468 	return 0;
3469 }
3470 
3471 /**
3472  * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3473  * @phba: The Hba for which this call is being executed.
3474  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3475  *
3476  * This is the protection/DIF aware version of
3477  * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
3478  * two functions eventually, but for now, it's here
3479  **/
3480 static int
3481 lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3482 		struct lpfc_scsi_buf *lpfc_cmd)
3483 {
3484 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3485 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3486 	struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl);
3487 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3488 	uint32_t num_sge = 0;
3489 	int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
3490 	int prot_group_type = 0;
3491 	int fcpdl;
3492 
3493 	/*
3494 	 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
3495 	 *  fcp_rsp regions to the first data sge entry
3496 	 */
3497 	if (scsi_sg_count(scsi_cmnd)) {
3498 		/*
3499 		 * The driver stores the segment count returned from pci_map_sg
3500 		 * because this a count of dma-mappings used to map the use_sg
3501 		 * pages.  They are not guaranteed to be the same for those
3502 		 * architectures that implement an IOMMU.
3503 		 */
3504 		datasegcnt = dma_map_sg(&phba->pcidev->dev,
3505 					scsi_sglist(scsi_cmnd),
3506 					scsi_sg_count(scsi_cmnd), datadir);
3507 		if (unlikely(!datasegcnt))
3508 			return 1;
3509 
3510 		sgl += 1;
3511 		/* clear the last flag in the fcp_rsp map entry */
3512 		sgl->word2 = le32_to_cpu(sgl->word2);
3513 		bf_set(lpfc_sli4_sge_last, sgl, 0);
3514 		sgl->word2 = cpu_to_le32(sgl->word2);
3515 
3516 		sgl += 1;
3517 		lpfc_cmd->seg_cnt = datasegcnt;
3518 
3519 		/* First check if data segment count from SCSI Layer is good */
3520 		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
3521 			goto err;
3522 
3523 		prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
3524 
3525 		switch (prot_group_type) {
3526 		case LPFC_PG_TYPE_NO_DIF:
3527 			/* Here we need to add a DISEED to the count */
3528 			if ((lpfc_cmd->seg_cnt + 1) > phba->cfg_total_seg_cnt)
3529 				goto err;
3530 
3531 			num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
3532 					datasegcnt);
3533 
3534 			/* we should have 2 or more entries in buffer list */
3535 			if (num_sge < 2)
3536 				goto err;
3537 			break;
3538 
3539 		case LPFC_PG_TYPE_DIF_BUF:
3540 			/*
3541 			 * This type indicates that protection buffers are
3542 			 * passed to the driver, so that needs to be prepared
3543 			 * for DMA
3544 			 */
3545 			protsegcnt = dma_map_sg(&phba->pcidev->dev,
3546 					scsi_prot_sglist(scsi_cmnd),
3547 					scsi_prot_sg_count(scsi_cmnd), datadir);
3548 			if (unlikely(!protsegcnt)) {
3549 				scsi_dma_unmap(scsi_cmnd);
3550 				return 1;
3551 			}
3552 
3553 			lpfc_cmd->prot_seg_cnt = protsegcnt;
3554 			/*
3555 			 * There is a minimun of 3 SGEs used for every
3556 			 * protection data segment.
3557 			 */
3558 			if ((lpfc_cmd->prot_seg_cnt * 3) >
3559 			    (phba->cfg_total_seg_cnt - 2))
3560 				goto err;
3561 
3562 			num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
3563 					datasegcnt, protsegcnt);
3564 
3565 			/* we should have 3 or more entries in buffer list */
3566 			if ((num_sge < 3) ||
3567 			    (num_sge > phba->cfg_total_seg_cnt))
3568 				goto err;
3569 			break;
3570 
3571 		case LPFC_PG_TYPE_INVALID:
3572 		default:
3573 			scsi_dma_unmap(scsi_cmnd);
3574 			lpfc_cmd->seg_cnt = 0;
3575 
3576 			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3577 					"9083 Unexpected protection group %i\n",
3578 					prot_group_type);
3579 			return 1;
3580 		}
3581 	}
3582 
3583 	switch (scsi_get_prot_op(scsi_cmnd)) {
3584 	case SCSI_PROT_WRITE_STRIP:
3585 	case SCSI_PROT_READ_STRIP:
3586 		lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP;
3587 		break;
3588 	case SCSI_PROT_WRITE_INSERT:
3589 	case SCSI_PROT_READ_INSERT:
3590 		lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT;
3591 		break;
3592 	case SCSI_PROT_WRITE_PASS:
3593 	case SCSI_PROT_READ_PASS:
3594 		lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS;
3595 		break;
3596 	}
3597 
3598 	fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
3599 	fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
3600 
3601 	/*
3602 	 * Due to difference in data length between DIF/non-DIF paths,
3603 	 * we need to set word 4 of IOCB here
3604 	 */
3605 	iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
3606 
3607 	return 0;
3608 err:
3609 	if (lpfc_cmd->seg_cnt)
3610 		scsi_dma_unmap(scsi_cmnd);
3611 	if (lpfc_cmd->prot_seg_cnt)
3612 		dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
3613 			     scsi_prot_sg_count(scsi_cmnd),
3614 			     scsi_cmnd->sc_data_direction);
3615 
3616 	lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3617 			"9084 Cannot setup S/G List for HBA"
3618 			"IO segs %d/%d SGL %d SCSI %d: %d %d\n",
3619 			lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
3620 			phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
3621 			prot_group_type, num_sge);
3622 
3623 	lpfc_cmd->seg_cnt = 0;
3624 	lpfc_cmd->prot_seg_cnt = 0;
3625 	return 1;
3626 }
3627 
3628 /**
3629  * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3630  * @phba: The Hba for which this call is being executed.
3631  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3632  *
3633  * This routine wraps the actual DMA mapping function pointer from the
3634  * lpfc_hba struct.
3635  *
3636  * Return codes:
3637  *	1 - Error
3638  *	0 - Success
3639  **/
3640 static inline int
3641 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3642 {
3643 	return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
3644 }
3645 
3646 /**
3647  * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3648  * using BlockGuard.
3649  * @phba: The Hba for which this call is being executed.
3650  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3651  *
3652  * This routine wraps the actual DMA mapping function pointer from the
3653  * lpfc_hba struct.
3654  *
3655  * Return codes:
3656  *	1 - Error
3657  *	0 - Success
3658  **/
3659 static inline int
3660 lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3661 {
3662 	return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
3663 }
3664 
3665 /**
3666  * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
3667  * @phba: Pointer to hba context object.
3668  * @vport: Pointer to vport object.
3669  * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
3670  * @rsp_iocb: Pointer to response iocb object which reported error.
3671  *
3672  * This function posts an event when there is a SCSI command reporting
3673  * error from the scsi device.
3674  **/
3675 static void
3676 lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
3677 		struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
3678 	struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3679 	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3680 	uint32_t resp_info = fcprsp->rspStatus2;
3681 	uint32_t scsi_status = fcprsp->rspStatus3;
3682 	uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
3683 	struct lpfc_fast_path_event *fast_path_evt = NULL;
3684 	struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
3685 	unsigned long flags;
3686 
3687 	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3688 		return;
3689 
3690 	/* If there is queuefull or busy condition send a scsi event */
3691 	if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
3692 		(cmnd->result == SAM_STAT_BUSY)) {
3693 		fast_path_evt = lpfc_alloc_fast_evt(phba);
3694 		if (!fast_path_evt)
3695 			return;
3696 		fast_path_evt->un.scsi_evt.event_type =
3697 			FC_REG_SCSI_EVENT;
3698 		fast_path_evt->un.scsi_evt.subcategory =
3699 		(cmnd->result == SAM_STAT_TASK_SET_FULL) ?
3700 		LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
3701 		fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
3702 		memcpy(&fast_path_evt->un.scsi_evt.wwpn,
3703 			&pnode->nlp_portname, sizeof(struct lpfc_name));
3704 		memcpy(&fast_path_evt->un.scsi_evt.wwnn,
3705 			&pnode->nlp_nodename, sizeof(struct lpfc_name));
3706 	} else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
3707 		((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
3708 		fast_path_evt = lpfc_alloc_fast_evt(phba);
3709 		if (!fast_path_evt)
3710 			return;
3711 		fast_path_evt->un.check_cond_evt.scsi_event.event_type =
3712 			FC_REG_SCSI_EVENT;
3713 		fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
3714 			LPFC_EVENT_CHECK_COND;
3715 		fast_path_evt->un.check_cond_evt.scsi_event.lun =
3716 			cmnd->device->lun;
3717 		memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
3718 			&pnode->nlp_portname, sizeof(struct lpfc_name));
3719 		memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
3720 			&pnode->nlp_nodename, sizeof(struct lpfc_name));
3721 		fast_path_evt->un.check_cond_evt.sense_key =
3722 			cmnd->sense_buffer[2] & 0xf;
3723 		fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
3724 		fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
3725 	} else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
3726 		     fcpi_parm &&
3727 		     ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
3728 			((scsi_status == SAM_STAT_GOOD) &&
3729 			!(resp_info & (RESID_UNDER | RESID_OVER))))) {
3730 		/*
3731 		 * If status is good or resid does not match with fcp_param and
3732 		 * there is valid fcpi_parm, then there is a read_check error
3733 		 */
3734 		fast_path_evt = lpfc_alloc_fast_evt(phba);
3735 		if (!fast_path_evt)
3736 			return;
3737 		fast_path_evt->un.read_check_error.header.event_type =
3738 			FC_REG_FABRIC_EVENT;
3739 		fast_path_evt->un.read_check_error.header.subcategory =
3740 			LPFC_EVENT_FCPRDCHKERR;
3741 		memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
3742 			&pnode->nlp_portname, sizeof(struct lpfc_name));
3743 		memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
3744 			&pnode->nlp_nodename, sizeof(struct lpfc_name));
3745 		fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
3746 		fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
3747 		fast_path_evt->un.read_check_error.fcpiparam =
3748 			fcpi_parm;
3749 	} else
3750 		return;
3751 
3752 	fast_path_evt->vport = vport;
3753 	spin_lock_irqsave(&phba->hbalock, flags);
3754 	list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
3755 	spin_unlock_irqrestore(&phba->hbalock, flags);
3756 	lpfc_worker_wake_up(phba);
3757 	return;
3758 }
3759 
3760 /**
3761  * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
3762  * @phba: The HBA for which this call is being executed.
3763  * @psb: The scsi buffer which is going to be un-mapped.
3764  *
3765  * This routine does DMA un-mapping of scatter gather list of scsi command
3766  * field of @lpfc_cmd for device with SLI-3 interface spec.
3767  **/
3768 static void
3769 lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
3770 {
3771 	/*
3772 	 * There are only two special cases to consider.  (1) the scsi command
3773 	 * requested scatter-gather usage or (2) the scsi command allocated
3774 	 * a request buffer, but did not request use_sg.  There is a third
3775 	 * case, but it does not require resource deallocation.
3776 	 */
3777 	if (psb->seg_cnt > 0)
3778 		scsi_dma_unmap(psb->pCmd);
3779 	if (psb->prot_seg_cnt > 0)
3780 		dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
3781 				scsi_prot_sg_count(psb->pCmd),
3782 				psb->pCmd->sc_data_direction);
3783 }
3784 
3785 /**
3786  * lpfc_handler_fcp_err - FCP response handler
3787  * @vport: The virtual port for which this call is being executed.
3788  * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
3789  * @rsp_iocb: The response IOCB which contains FCP error.
3790  *
3791  * This routine is called to process response IOCB with status field
3792  * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
3793  * based upon SCSI and FCP error.
3794  **/
3795 static void
3796 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
3797 		    struct lpfc_iocbq *rsp_iocb)
3798 {
3799 	struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3800 	struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
3801 	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3802 	uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
3803 	uint32_t resp_info = fcprsp->rspStatus2;
3804 	uint32_t scsi_status = fcprsp->rspStatus3;
3805 	uint32_t *lp;
3806 	uint32_t host_status = DID_OK;
3807 	uint32_t rsplen = 0;
3808 	uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
3809 
3810 
3811 	/*
3812 	 *  If this is a task management command, there is no
3813 	 *  scsi packet associated with this lpfc_cmd.  The driver
3814 	 *  consumes it.
3815 	 */
3816 	if (fcpcmd->fcpCntl2) {
3817 		scsi_status = 0;
3818 		goto out;
3819 	}
3820 
3821 	if (resp_info & RSP_LEN_VALID) {
3822 		rsplen = be32_to_cpu(fcprsp->rspRspLen);
3823 		if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
3824 			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3825 				 "2719 Invalid response length: "
3826 				 "tgt x%x lun x%x cmnd x%x rsplen x%x\n",
3827 				 cmnd->device->id,
3828 				 cmnd->device->lun, cmnd->cmnd[0],
3829 				 rsplen);
3830 			host_status = DID_ERROR;
3831 			goto out;
3832 		}
3833 		if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
3834 			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3835 				 "2757 Protocol failure detected during "
3836 				 "processing of FCP I/O op: "
3837 				 "tgt x%x lun x%x cmnd x%x rspInfo3 x%x\n",
3838 				 cmnd->device->id,
3839 				 cmnd->device->lun, cmnd->cmnd[0],
3840 				 fcprsp->rspInfo3);
3841 			host_status = DID_ERROR;
3842 			goto out;
3843 		}
3844 	}
3845 
3846 	if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
3847 		uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
3848 		if (snslen > SCSI_SENSE_BUFFERSIZE)
3849 			snslen = SCSI_SENSE_BUFFERSIZE;
3850 
3851 		if (resp_info & RSP_LEN_VALID)
3852 		  rsplen = be32_to_cpu(fcprsp->rspRspLen);
3853 		memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
3854 	}
3855 	lp = (uint32_t *)cmnd->sense_buffer;
3856 
3857 	/* special handling for under run conditions */
3858 	if (!scsi_status && (resp_info & RESID_UNDER)) {
3859 		/* don't log under runs if fcp set... */
3860 		if (vport->cfg_log_verbose & LOG_FCP)
3861 			logit = LOG_FCP_ERROR;
3862 		/* unless operator says so */
3863 		if (vport->cfg_log_verbose & LOG_FCP_UNDER)
3864 			logit = LOG_FCP_UNDER;
3865 	}
3866 
3867 	lpfc_printf_vlog(vport, KERN_WARNING, logit,
3868 			 "9024 FCP command x%x failed: x%x SNS x%x x%x "
3869 			 "Data: x%x x%x x%x x%x x%x\n",
3870 			 cmnd->cmnd[0], scsi_status,
3871 			 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
3872 			 be32_to_cpu(fcprsp->rspResId),
3873 			 be32_to_cpu(fcprsp->rspSnsLen),
3874 			 be32_to_cpu(fcprsp->rspRspLen),
3875 			 fcprsp->rspInfo3);
3876 
3877 	scsi_set_resid(cmnd, 0);
3878 	if (resp_info & RESID_UNDER) {
3879 		scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
3880 
3881 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER,
3882 				 "9025 FCP Read Underrun, expected %d, "
3883 				 "residual %d Data: x%x x%x x%x\n",
3884 				 be32_to_cpu(fcpcmd->fcpDl),
3885 				 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
3886 				 cmnd->underflow);
3887 
3888 		/*
3889 		 * If there is an under run check if under run reported by
3890 		 * storage array is same as the under run reported by HBA.
3891 		 * If this is not same, there is a dropped frame.
3892 		 */
3893 		if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
3894 			fcpi_parm &&
3895 			(scsi_get_resid(cmnd) != fcpi_parm)) {
3896 			lpfc_printf_vlog(vport, KERN_WARNING,
3897 					 LOG_FCP | LOG_FCP_ERROR,
3898 					 "9026 FCP Read Check Error "
3899 					 "and Underrun Data: x%x x%x x%x x%x\n",
3900 					 be32_to_cpu(fcpcmd->fcpDl),
3901 					 scsi_get_resid(cmnd), fcpi_parm,
3902 					 cmnd->cmnd[0]);
3903 			scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3904 			host_status = DID_ERROR;
3905 		}
3906 		/*
3907 		 * The cmnd->underflow is the minimum number of bytes that must
3908 		 * be transferred for this command.  Provided a sense condition
3909 		 * is not present, make sure the actual amount transferred is at
3910 		 * least the underflow value or fail.
3911 		 */
3912 		if (!(resp_info & SNS_LEN_VALID) &&
3913 		    (scsi_status == SAM_STAT_GOOD) &&
3914 		    (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
3915 		     < cmnd->underflow)) {
3916 			lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3917 					 "9027 FCP command x%x residual "
3918 					 "underrun converted to error "
3919 					 "Data: x%x x%x x%x\n",
3920 					 cmnd->cmnd[0], scsi_bufflen(cmnd),
3921 					 scsi_get_resid(cmnd), cmnd->underflow);
3922 			host_status = DID_ERROR;
3923 		}
3924 	} else if (resp_info & RESID_OVER) {
3925 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3926 				 "9028 FCP command x%x residual overrun error. "
3927 				 "Data: x%x x%x\n", cmnd->cmnd[0],
3928 				 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
3929 		host_status = DID_ERROR;
3930 
3931 	/*
3932 	 * Check SLI validation that all the transfer was actually done
3933 	 * (fcpi_parm should be zero). Apply check only to reads.
3934 	 */
3935 	} else if (fcpi_parm && (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
3936 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
3937 				 "9029 FCP Read Check Error Data: "
3938 				 "x%x x%x x%x x%x x%x\n",
3939 				 be32_to_cpu(fcpcmd->fcpDl),
3940 				 be32_to_cpu(fcprsp->rspResId),
3941 				 fcpi_parm, cmnd->cmnd[0], scsi_status);
3942 		switch (scsi_status) {
3943 		case SAM_STAT_GOOD:
3944 		case SAM_STAT_CHECK_CONDITION:
3945 			/* Fabric dropped a data frame. Fail any successful
3946 			 * command in which we detected dropped frames.
3947 			 * A status of good or some check conditions could
3948 			 * be considered a successful command.
3949 			 */
3950 			host_status = DID_ERROR;
3951 			break;
3952 		}
3953 		scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3954 	}
3955 
3956  out:
3957 	cmnd->result = ScsiResult(host_status, scsi_status);
3958 	lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
3959 }
3960 
3961 /**
3962  * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
3963  * @phba: The Hba for which this call is being executed.
3964  * @pIocbIn: The command IOCBQ for the scsi cmnd.
3965  * @pIocbOut: The response IOCBQ for the scsi cmnd.
3966  *
3967  * This routine assigns scsi command result by looking into response IOCB
3968  * status field appropriately. This routine handles QUEUE FULL condition as
3969  * well by ramping down device queue depth.
3970  **/
3971 static void
3972 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
3973 			struct lpfc_iocbq *pIocbOut)
3974 {
3975 	struct lpfc_scsi_buf *lpfc_cmd =
3976 		(struct lpfc_scsi_buf *) pIocbIn->context1;
3977 	struct lpfc_vport      *vport = pIocbIn->vport;
3978 	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
3979 	struct lpfc_nodelist *pnode = rdata->pnode;
3980 	struct scsi_cmnd *cmd;
3981 	int result;
3982 	int depth;
3983 	unsigned long flags;
3984 	struct lpfc_fast_path_event *fast_path_evt;
3985 	struct Scsi_Host *shost;
3986 	uint32_t queue_depth, scsi_id;
3987 	uint32_t logit = LOG_FCP;
3988 
3989 	/* Sanity check on return of outstanding command */
3990 	if (!(lpfc_cmd->pCmd))
3991 		return;
3992 	cmd = lpfc_cmd->pCmd;
3993 	shost = cmd->device->host;
3994 
3995 	lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
3996 	lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
3997 	/* pick up SLI4 exhange busy status from HBA */
3998 	lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
3999 
4000 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4001 	if (lpfc_cmd->prot_data_type) {
4002 		struct scsi_dif_tuple *src = NULL;
4003 
4004 		src =  (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
4005 		/*
4006 		 * Used to restore any changes to protection
4007 		 * data for error injection.
4008 		 */
4009 		switch (lpfc_cmd->prot_data_type) {
4010 		case LPFC_INJERR_REFTAG:
4011 			src->ref_tag =
4012 				lpfc_cmd->prot_data;
4013 			break;
4014 		case LPFC_INJERR_APPTAG:
4015 			src->app_tag =
4016 				(uint16_t)lpfc_cmd->prot_data;
4017 			break;
4018 		case LPFC_INJERR_GUARD:
4019 			src->guard_tag =
4020 				(uint16_t)lpfc_cmd->prot_data;
4021 			break;
4022 		default:
4023 			break;
4024 		}
4025 
4026 		lpfc_cmd->prot_data = 0;
4027 		lpfc_cmd->prot_data_type = 0;
4028 		lpfc_cmd->prot_data_segment = NULL;
4029 	}
4030 #endif
4031 	if (pnode && NLP_CHK_NODE_ACT(pnode))
4032 		atomic_dec(&pnode->cmd_pending);
4033 
4034 	if (lpfc_cmd->status) {
4035 		if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
4036 		    (lpfc_cmd->result & IOERR_DRVR_MASK))
4037 			lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4038 		else if (lpfc_cmd->status >= IOSTAT_CNT)
4039 			lpfc_cmd->status = IOSTAT_DEFAULT;
4040 		if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
4041 		    !lpfc_cmd->fcp_rsp->rspStatus3 &&
4042 		    (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
4043 		    !(vport->cfg_log_verbose & LOG_FCP_UNDER))
4044 			logit = 0;
4045 		else
4046 			logit = LOG_FCP | LOG_FCP_UNDER;
4047 		lpfc_printf_vlog(vport, KERN_WARNING, logit,
4048 			 "9030 FCP cmd x%x failed <%d/%d> "
4049 			 "status: x%x result: x%x "
4050 			 "sid: x%x did: x%x oxid: x%x "
4051 			 "Data: x%x x%x\n",
4052 			 cmd->cmnd[0],
4053 			 cmd->device ? cmd->device->id : 0xffff,
4054 			 cmd->device ? cmd->device->lun : 0xffff,
4055 			 lpfc_cmd->status, lpfc_cmd->result,
4056 			 vport->fc_myDID,
4057 			 (pnode) ? pnode->nlp_DID : 0,
4058 			 phba->sli_rev == LPFC_SLI_REV4 ?
4059 			     lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
4060 			 pIocbOut->iocb.ulpContext,
4061 			 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
4062 
4063 		switch (lpfc_cmd->status) {
4064 		case IOSTAT_FCP_RSP_ERROR:
4065 			/* Call FCP RSP handler to determine result */
4066 			lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
4067 			break;
4068 		case IOSTAT_NPORT_BSY:
4069 		case IOSTAT_FABRIC_BSY:
4070 			cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
4071 			fast_path_evt = lpfc_alloc_fast_evt(phba);
4072 			if (!fast_path_evt)
4073 				break;
4074 			fast_path_evt->un.fabric_evt.event_type =
4075 				FC_REG_FABRIC_EVENT;
4076 			fast_path_evt->un.fabric_evt.subcategory =
4077 				(lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
4078 				LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
4079 			if (pnode && NLP_CHK_NODE_ACT(pnode)) {
4080 				memcpy(&fast_path_evt->un.fabric_evt.wwpn,
4081 					&pnode->nlp_portname,
4082 					sizeof(struct lpfc_name));
4083 				memcpy(&fast_path_evt->un.fabric_evt.wwnn,
4084 					&pnode->nlp_nodename,
4085 					sizeof(struct lpfc_name));
4086 			}
4087 			fast_path_evt->vport = vport;
4088 			fast_path_evt->work_evt.evt =
4089 				LPFC_EVT_FASTPATH_MGMT_EVT;
4090 			spin_lock_irqsave(&phba->hbalock, flags);
4091 			list_add_tail(&fast_path_evt->work_evt.evt_listp,
4092 				&phba->work_list);
4093 			spin_unlock_irqrestore(&phba->hbalock, flags);
4094 			lpfc_worker_wake_up(phba);
4095 			break;
4096 		case IOSTAT_LOCAL_REJECT:
4097 		case IOSTAT_REMOTE_STOP:
4098 			if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
4099 			    lpfc_cmd->result ==
4100 					IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
4101 			    lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
4102 			    lpfc_cmd->result ==
4103 					IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
4104 				cmd->result = ScsiResult(DID_NO_CONNECT, 0);
4105 				break;
4106 			}
4107 			if (lpfc_cmd->result == IOERR_INVALID_RPI ||
4108 			    lpfc_cmd->result == IOERR_NO_RESOURCES ||
4109 			    lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
4110 			    lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
4111 				cmd->result = ScsiResult(DID_REQUEUE, 0);
4112 				break;
4113 			}
4114 			if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
4115 			     lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
4116 			     pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
4117 				if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
4118 					/*
4119 					 * This is a response for a BG enabled
4120 					 * cmd. Parse BG error
4121 					 */
4122 					lpfc_parse_bg_err(phba, lpfc_cmd,
4123 							pIocbOut);
4124 					break;
4125 				} else {
4126 					lpfc_printf_vlog(vport, KERN_WARNING,
4127 							LOG_BG,
4128 							"9031 non-zero BGSTAT "
4129 							"on unprotected cmd\n");
4130 				}
4131 			}
4132 			if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
4133 				&& (phba->sli_rev == LPFC_SLI_REV4)
4134 				&& (pnode && NLP_CHK_NODE_ACT(pnode))) {
4135 				/* This IO was aborted by the target, we don't
4136 				 * know the rxid and because we did not send the
4137 				 * ABTS we cannot generate and RRQ.
4138 				 */
4139 				lpfc_set_rrq_active(phba, pnode,
4140 					lpfc_cmd->cur_iocbq.sli4_lxritag,
4141 					0, 0);
4142 			}
4143 		/* else: fall through */
4144 		default:
4145 			cmd->result = ScsiResult(DID_ERROR, 0);
4146 			break;
4147 		}
4148 
4149 		if (!pnode || !NLP_CHK_NODE_ACT(pnode)
4150 		    || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
4151 			cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
4152 						 SAM_STAT_BUSY);
4153 	} else
4154 		cmd->result = ScsiResult(DID_OK, 0);
4155 
4156 	if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
4157 		uint32_t *lp = (uint32_t *)cmd->sense_buffer;
4158 
4159 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4160 				 "0710 Iodone <%d/%d> cmd %p, error "
4161 				 "x%x SNS x%x x%x Data: x%x x%x\n",
4162 				 cmd->device->id, cmd->device->lun, cmd,
4163 				 cmd->result, *lp, *(lp + 3), cmd->retries,
4164 				 scsi_get_resid(cmd));
4165 	}
4166 
4167 	lpfc_update_stats(phba, lpfc_cmd);
4168 	result = cmd->result;
4169 	if (vport->cfg_max_scsicmpl_time &&
4170 	   time_after(jiffies, lpfc_cmd->start_time +
4171 		msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
4172 		spin_lock_irqsave(shost->host_lock, flags);
4173 		if (pnode && NLP_CHK_NODE_ACT(pnode)) {
4174 			if (pnode->cmd_qdepth >
4175 				atomic_read(&pnode->cmd_pending) &&
4176 				(atomic_read(&pnode->cmd_pending) >
4177 				LPFC_MIN_TGT_QDEPTH) &&
4178 				((cmd->cmnd[0] == READ_10) ||
4179 				(cmd->cmnd[0] == WRITE_10)))
4180 				pnode->cmd_qdepth =
4181 					atomic_read(&pnode->cmd_pending);
4182 
4183 			pnode->last_change_time = jiffies;
4184 		}
4185 		spin_unlock_irqrestore(shost->host_lock, flags);
4186 	} else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
4187 		if ((pnode->cmd_qdepth < vport->cfg_tgt_queue_depth) &&
4188 		   time_after(jiffies, pnode->last_change_time +
4189 			      msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
4190 			spin_lock_irqsave(shost->host_lock, flags);
4191 			depth = pnode->cmd_qdepth * LPFC_TGTQ_RAMPUP_PCENT
4192 				/ 100;
4193 			depth = depth ? depth : 1;
4194 			pnode->cmd_qdepth += depth;
4195 			if (pnode->cmd_qdepth > vport->cfg_tgt_queue_depth)
4196 				pnode->cmd_qdepth = vport->cfg_tgt_queue_depth;
4197 			pnode->last_change_time = jiffies;
4198 			spin_unlock_irqrestore(shost->host_lock, flags);
4199 		}
4200 	}
4201 
4202 	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4203 
4204 	/* The sdev is not guaranteed to be valid post scsi_done upcall. */
4205 	queue_depth = cmd->device->queue_depth;
4206 	scsi_id = cmd->device->id;
4207 	cmd->scsi_done(cmd);
4208 
4209 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4210 		spin_lock_irqsave(&phba->hbalock, flags);
4211 		lpfc_cmd->pCmd = NULL;
4212 		spin_unlock_irqrestore(&phba->hbalock, flags);
4213 
4214 		/*
4215 		 * If there is a thread waiting for command completion
4216 		 * wake up the thread.
4217 		 */
4218 		spin_lock_irqsave(shost->host_lock, flags);
4219 		if (lpfc_cmd->waitq)
4220 			wake_up(lpfc_cmd->waitq);
4221 		spin_unlock_irqrestore(shost->host_lock, flags);
4222 		lpfc_release_scsi_buf(phba, lpfc_cmd);
4223 		return;
4224 	}
4225 
4226 	spin_lock_irqsave(&phba->hbalock, flags);
4227 	lpfc_cmd->pCmd = NULL;
4228 	spin_unlock_irqrestore(&phba->hbalock, flags);
4229 
4230 	/*
4231 	 * If there is a thread waiting for command completion
4232 	 * wake up the thread.
4233 	 */
4234 	spin_lock_irqsave(shost->host_lock, flags);
4235 	if (lpfc_cmd->waitq)
4236 		wake_up(lpfc_cmd->waitq);
4237 	spin_unlock_irqrestore(shost->host_lock, flags);
4238 
4239 	lpfc_release_scsi_buf(phba, lpfc_cmd);
4240 }
4241 
4242 /**
4243  * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
4244  * @data: A pointer to the immediate command data portion of the IOCB.
4245  * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
4246  *
4247  * The routine copies the entire FCP command from @fcp_cmnd to @data while
4248  * byte swapping the data to big endian format for transmission on the wire.
4249  **/
4250 static void
4251 lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
4252 {
4253 	int i, j;
4254 	for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
4255 	     i += sizeof(uint32_t), j++) {
4256 		((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
4257 	}
4258 }
4259 
4260 /**
4261  * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
4262  * @vport: The virtual port for which this call is being executed.
4263  * @lpfc_cmd: The scsi command which needs to send.
4264  * @pnode: Pointer to lpfc_nodelist.
4265  *
4266  * This routine initializes fcp_cmnd and iocb data structure from scsi command
4267  * to transfer for device with SLI3 interface spec.
4268  **/
4269 static void
4270 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
4271 		    struct lpfc_nodelist *pnode)
4272 {
4273 	struct lpfc_hba *phba = vport->phba;
4274 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4275 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4276 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
4277 	struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
4278 	int datadir = scsi_cmnd->sc_data_direction;
4279 	char tag[2];
4280 	uint8_t *ptr;
4281 	bool sli4;
4282 	uint32_t fcpdl;
4283 
4284 	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
4285 		return;
4286 
4287 	lpfc_cmd->fcp_rsp->rspSnsLen = 0;
4288 	/* clear task management bits */
4289 	lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
4290 
4291 	int_to_scsilun(lpfc_cmd->pCmd->device->lun,
4292 			&lpfc_cmd->fcp_cmnd->fcp_lun);
4293 
4294 	ptr = &fcp_cmnd->fcpCdb[0];
4295 	memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
4296 	if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
4297 		ptr += scsi_cmnd->cmd_len;
4298 		memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
4299 	}
4300 
4301 	if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
4302 		switch (tag[0]) {
4303 		case HEAD_OF_QUEUE_TAG:
4304 			fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
4305 			break;
4306 		case ORDERED_QUEUE_TAG:
4307 			fcp_cmnd->fcpCntl1 = ORDERED_Q;
4308 			break;
4309 		default:
4310 			fcp_cmnd->fcpCntl1 = SIMPLE_Q;
4311 			break;
4312 		}
4313 	} else
4314 		fcp_cmnd->fcpCntl1 = SIMPLE_Q;
4315 
4316 	sli4 = (phba->sli_rev == LPFC_SLI_REV4);
4317 	piocbq->iocb.un.fcpi.fcpi_XRdy = 0;
4318 
4319 	/*
4320 	 * There are three possibilities here - use scatter-gather segment, use
4321 	 * the single mapping, or neither.  Start the lpfc command prep by
4322 	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
4323 	 * data bde entry.
4324 	 */
4325 	if (scsi_sg_count(scsi_cmnd)) {
4326 		if (datadir == DMA_TO_DEVICE) {
4327 			iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
4328 			iocb_cmd->ulpPU = PARM_READ_CHECK;
4329 			if (vport->cfg_first_burst_size &&
4330 			    (pnode->nlp_flag & NLP_FIRSTBURST)) {
4331 				fcpdl = scsi_bufflen(scsi_cmnd);
4332 				if (fcpdl < vport->cfg_first_burst_size)
4333 					piocbq->iocb.un.fcpi.fcpi_XRdy = fcpdl;
4334 				else
4335 					piocbq->iocb.un.fcpi.fcpi_XRdy =
4336 						vport->cfg_first_burst_size;
4337 			}
4338 			fcp_cmnd->fcpCntl3 = WRITE_DATA;
4339 			phba->fc4OutputRequests++;
4340 		} else {
4341 			iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
4342 			iocb_cmd->ulpPU = PARM_READ_CHECK;
4343 			fcp_cmnd->fcpCntl3 = READ_DATA;
4344 			phba->fc4InputRequests++;
4345 		}
4346 	} else {
4347 		iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
4348 		iocb_cmd->un.fcpi.fcpi_parm = 0;
4349 		iocb_cmd->ulpPU = 0;
4350 		fcp_cmnd->fcpCntl3 = 0;
4351 		phba->fc4ControlRequests++;
4352 	}
4353 	if (phba->sli_rev == 3 &&
4354 	    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4355 		lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
4356 	/*
4357 	 * Finish initializing those IOCB fields that are independent
4358 	 * of the scsi_cmnd request_buffer
4359 	 */
4360 	piocbq->iocb.ulpContext = pnode->nlp_rpi;
4361 	if (sli4)
4362 		piocbq->iocb.ulpContext =
4363 		  phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
4364 	if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
4365 		piocbq->iocb.ulpFCP2Rcvy = 1;
4366 	else
4367 		piocbq->iocb.ulpFCP2Rcvy = 0;
4368 
4369 	piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
4370 	piocbq->context1  = lpfc_cmd;
4371 	piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4372 	piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
4373 	piocbq->vport = vport;
4374 }
4375 
4376 /**
4377  * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
4378  * @vport: The virtual port for which this call is being executed.
4379  * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
4380  * @lun: Logical unit number.
4381  * @task_mgmt_cmd: SCSI task management command.
4382  *
4383  * This routine creates FCP information unit corresponding to @task_mgmt_cmd
4384  * for device with SLI-3 interface spec.
4385  *
4386  * Return codes:
4387  *   0 - Error
4388  *   1 - Success
4389  **/
4390 static int
4391 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
4392 			     struct lpfc_scsi_buf *lpfc_cmd,
4393 			     unsigned int lun,
4394 			     uint8_t task_mgmt_cmd)
4395 {
4396 	struct lpfc_iocbq *piocbq;
4397 	IOCB_t *piocb;
4398 	struct fcp_cmnd *fcp_cmnd;
4399 	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4400 	struct lpfc_nodelist *ndlp = rdata->pnode;
4401 
4402 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
4403 	    ndlp->nlp_state != NLP_STE_MAPPED_NODE)
4404 		return 0;
4405 
4406 	piocbq = &(lpfc_cmd->cur_iocbq);
4407 	piocbq->vport = vport;
4408 
4409 	piocb = &piocbq->iocb;
4410 
4411 	fcp_cmnd = lpfc_cmd->fcp_cmnd;
4412 	/* Clear out any old data in the FCP command area */
4413 	memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
4414 	int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
4415 	fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
4416 	if (vport->phba->sli_rev == 3 &&
4417 	    !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4418 		lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
4419 	piocb->ulpCommand = CMD_FCP_ICMND64_CR;
4420 	piocb->ulpContext = ndlp->nlp_rpi;
4421 	if (vport->phba->sli_rev == LPFC_SLI_REV4) {
4422 		piocb->ulpContext =
4423 		  vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
4424 	}
4425 	piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
4426 	piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
4427 	piocb->ulpPU = 0;
4428 	piocb->un.fcpi.fcpi_parm = 0;
4429 
4430 	/* ulpTimeout is only one byte */
4431 	if (lpfc_cmd->timeout > 0xff) {
4432 		/*
4433 		 * Do not timeout the command at the firmware level.
4434 		 * The driver will provide the timeout mechanism.
4435 		 */
4436 		piocb->ulpTimeout = 0;
4437 	} else
4438 		piocb->ulpTimeout = lpfc_cmd->timeout;
4439 
4440 	if (vport->phba->sli_rev == LPFC_SLI_REV4)
4441 		lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
4442 
4443 	return 1;
4444 }
4445 
4446 /**
4447  * lpfc_scsi_api_table_setup - Set up scsi api function jump table
4448  * @phba: The hba struct for which this call is being executed.
4449  * @dev_grp: The HBA PCI-Device group number.
4450  *
4451  * This routine sets up the SCSI interface API function jump table in @phba
4452  * struct.
4453  * Returns: 0 - success, -ENODEV - failure.
4454  **/
4455 int
4456 lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4457 {
4458 
4459 	phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
4460 	phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;
4461 
4462 	switch (dev_grp) {
4463 	case LPFC_PCI_DEV_LP:
4464 		phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
4465 		phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
4466 		phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
4467 		phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
4468 		phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
4469 		break;
4470 	case LPFC_PCI_DEV_OC:
4471 		phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
4472 		phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
4473 		phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
4474 		phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
4475 		phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
4476 		break;
4477 	default:
4478 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4479 				"1418 Invalid HBA PCI-device group: 0x%x\n",
4480 				dev_grp);
4481 		return -ENODEV;
4482 		break;
4483 	}
4484 	phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
4485 	phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4486 	return 0;
4487 }
4488 
4489 /**
4490  * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
4491  * @phba: The Hba for which this call is being executed.
4492  * @cmdiocbq: Pointer to lpfc_iocbq data structure.
4493  * @rspiocbq: Pointer to lpfc_iocbq data structure.
4494  *
4495  * This routine is IOCB completion routine for device reset and target reset
4496  * routine. This routine release scsi buffer associated with lpfc_cmd.
4497  **/
4498 static void
4499 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
4500 			struct lpfc_iocbq *cmdiocbq,
4501 			struct lpfc_iocbq *rspiocbq)
4502 {
4503 	struct lpfc_scsi_buf *lpfc_cmd =
4504 		(struct lpfc_scsi_buf *) cmdiocbq->context1;
4505 	if (lpfc_cmd)
4506 		lpfc_release_scsi_buf(phba, lpfc_cmd);
4507 	return;
4508 }
4509 
4510 /**
4511  * lpfc_info - Info entry point of scsi_host_template data structure
4512  * @host: The scsi host for which this call is being executed.
4513  *
4514  * This routine provides module information about hba.
4515  *
4516  * Reutrn code:
4517  *   Pointer to char - Success.
4518  **/
4519 const char *
4520 lpfc_info(struct Scsi_Host *host)
4521 {
4522 	struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
4523 	struct lpfc_hba   *phba = vport->phba;
4524 	int len, link_speed = 0;
4525 	static char  lpfcinfobuf[384];
4526 
4527 	memset(lpfcinfobuf,0,384);
4528 	if (phba && phba->pcidev){
4529 		strncpy(lpfcinfobuf, phba->ModelDesc, 256);
4530 		len = strlen(lpfcinfobuf);
4531 		snprintf(lpfcinfobuf + len,
4532 			384-len,
4533 			" on PCI bus %02x device %02x irq %d",
4534 			phba->pcidev->bus->number,
4535 			phba->pcidev->devfn,
4536 			phba->pcidev->irq);
4537 		len = strlen(lpfcinfobuf);
4538 		if (phba->Port[0]) {
4539 			snprintf(lpfcinfobuf + len,
4540 				 384-len,
4541 				 " port %s",
4542 				 phba->Port);
4543 		}
4544 		len = strlen(lpfcinfobuf);
4545 		if (phba->sli_rev <= LPFC_SLI_REV3) {
4546 			link_speed = lpfc_sli_port_speed_get(phba);
4547 		} else {
4548 			if (phba->sli4_hba.link_state.logical_speed)
4549 				link_speed =
4550 				      phba->sli4_hba.link_state.logical_speed;
4551 			else
4552 				link_speed = phba->sli4_hba.link_state.speed;
4553 		}
4554 		if (link_speed != 0)
4555 			snprintf(lpfcinfobuf + len, 384-len,
4556 				 " Logical Link Speed: %d Mbps", link_speed);
4557 	}
4558 	return lpfcinfobuf;
4559 }
4560 
4561 /**
4562  * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba
4563  * @phba: The Hba for which this call is being executed.
4564  *
4565  * This routine modifies fcp_poll_timer  field of @phba by cfg_poll_tmo.
4566  * The default value of cfg_poll_tmo is 10 milliseconds.
4567  **/
4568 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
4569 {
4570 	unsigned long  poll_tmo_expires =
4571 		(jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
4572 
4573 	if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq))
4574 		mod_timer(&phba->fcp_poll_timer,
4575 			  poll_tmo_expires);
4576 }
4577 
4578 /**
4579  * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
4580  * @phba: The Hba for which this call is being executed.
4581  *
4582  * This routine starts the fcp_poll_timer of @phba.
4583  **/
4584 void lpfc_poll_start_timer(struct lpfc_hba * phba)
4585 {
4586 	lpfc_poll_rearm_timer(phba);
4587 }
4588 
4589 /**
4590  * lpfc_poll_timeout - Restart polling timer
4591  * @ptr: Map to lpfc_hba data structure pointer.
4592  *
4593  * This routine restarts fcp_poll timer, when FCP ring  polling is enable
4594  * and FCP Ring interrupt is disable.
4595  **/
4596 
4597 void lpfc_poll_timeout(unsigned long ptr)
4598 {
4599 	struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
4600 
4601 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4602 		lpfc_sli_handle_fast_ring_event(phba,
4603 			&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
4604 
4605 		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4606 			lpfc_poll_rearm_timer(phba);
4607 	}
4608 }
4609 
4610 /**
4611  * lpfc_queuecommand - scsi_host_template queuecommand entry point
4612  * @cmnd: Pointer to scsi_cmnd data structure.
4613  * @done: Pointer to done routine.
4614  *
4615  * Driver registers this routine to scsi midlayer to submit a @cmd to process.
4616  * This routine prepares an IOCB from scsi command and provides to firmware.
4617  * The @done callback is invoked after driver finished processing the command.
4618  *
4619  * Return value :
4620  *   0 - Success
4621  *   SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
4622  **/
4623 static int
4624 lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
4625 {
4626 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4627 	struct lpfc_hba   *phba = vport->phba;
4628 	struct lpfc_rport_data *rdata;
4629 	struct lpfc_nodelist *ndlp;
4630 	struct lpfc_scsi_buf *lpfc_cmd;
4631 	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
4632 	int err;
4633 
4634 	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
4635 	err = fc_remote_port_chkready(rport);
4636 	if (err) {
4637 		cmnd->result = err;
4638 		goto out_fail_command;
4639 	}
4640 	ndlp = rdata->pnode;
4641 
4642 	if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
4643 		(!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
4644 
4645 		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4646 				"9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
4647 				" op:%02x str=%s without registering for"
4648 				" BlockGuard - Rejecting command\n",
4649 				cmnd->cmnd[0], scsi_get_prot_op(cmnd),
4650 				dif_op_str[scsi_get_prot_op(cmnd)]);
4651 		goto out_fail_command;
4652 	}
4653 
4654 	/*
4655 	 * Catch race where our node has transitioned, but the
4656 	 * transport is still transitioning.
4657 	 */
4658 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
4659 		goto out_tgt_busy;
4660 	if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
4661 		goto out_tgt_busy;
4662 
4663 	lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
4664 	if (lpfc_cmd == NULL) {
4665 		lpfc_rampdown_queue_depth(phba);
4666 
4667 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4668 				 "0707 driver's buffer pool is empty, "
4669 				 "IO busied\n");
4670 		goto out_host_busy;
4671 	}
4672 
4673 	/*
4674 	 * Store the midlayer's command structure for the completion phase
4675 	 * and complete the command initialization.
4676 	 */
4677 	lpfc_cmd->pCmd  = cmnd;
4678 	lpfc_cmd->rdata = rdata;
4679 	lpfc_cmd->timeout = 0;
4680 	lpfc_cmd->start_time = jiffies;
4681 	cmnd->host_scribble = (unsigned char *)lpfc_cmd;
4682 
4683 	if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
4684 		if (vport->phba->cfg_enable_bg) {
4685 			lpfc_printf_vlog(vport,
4686 					 KERN_INFO, LOG_SCSI_CMD,
4687 					 "9033 BLKGRD: rcvd %s cmd:x%x "
4688 					 "sector x%llx cnt %u pt %x\n",
4689 					 dif_op_str[scsi_get_prot_op(cmnd)],
4690 					 cmnd->cmnd[0],
4691 					 (unsigned long long)scsi_get_lba(cmnd),
4692 					 blk_rq_sectors(cmnd->request),
4693 					 (cmnd->cmnd[1]>>5));
4694 		}
4695 		err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
4696 	} else {
4697 		if (vport->phba->cfg_enable_bg) {
4698 			lpfc_printf_vlog(vport,
4699 					 KERN_INFO, LOG_SCSI_CMD,
4700 					 "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
4701 					 "x%x sector x%llx cnt %u pt %x\n",
4702 					 cmnd->cmnd[0],
4703 					 (unsigned long long)scsi_get_lba(cmnd),
4704 					 blk_rq_sectors(cmnd->request),
4705 					 (cmnd->cmnd[1]>>5));
4706 		}
4707 		err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
4708 	}
4709 
4710 	if (err)
4711 		goto out_host_busy_free_buf;
4712 
4713 	lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
4714 
4715 	atomic_inc(&ndlp->cmd_pending);
4716 	err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
4717 				  &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
4718 	if (err) {
4719 		atomic_dec(&ndlp->cmd_pending);
4720 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4721 				 "3376 FCP could not issue IOCB err %x"
4722 				 "FCP cmd x%x <%d/%d> "
4723 				 "sid: x%x did: x%x oxid: x%x "
4724 				 "Data: x%x x%x x%x x%x\n",
4725 				 err, cmnd->cmnd[0],
4726 				 cmnd->device ? cmnd->device->id : 0xffff,
4727 				 cmnd->device ? cmnd->device->lun : 0xffff,
4728 				 vport->fc_myDID, ndlp->nlp_DID,
4729 				 phba->sli_rev == LPFC_SLI_REV4 ?
4730 				 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
4731 				 lpfc_cmd->cur_iocbq.iocb.ulpContext,
4732 				 lpfc_cmd->cur_iocbq.iocb.ulpIoTag,
4733 				 lpfc_cmd->cur_iocbq.iocb.ulpTimeout,
4734 				 (uint32_t)
4735 				 (cmnd->request->timeout / 1000));
4736 
4737 
4738 		goto out_host_busy_free_buf;
4739 	}
4740 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4741 		lpfc_sli_handle_fast_ring_event(phba,
4742 			&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
4743 
4744 		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4745 			lpfc_poll_rearm_timer(phba);
4746 	}
4747 
4748 	return 0;
4749 
4750  out_host_busy_free_buf:
4751 	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4752 	lpfc_release_scsi_buf(phba, lpfc_cmd);
4753  out_host_busy:
4754 	return SCSI_MLQUEUE_HOST_BUSY;
4755 
4756  out_tgt_busy:
4757 	return SCSI_MLQUEUE_TARGET_BUSY;
4758 
4759  out_fail_command:
4760 	cmnd->scsi_done(cmnd);
4761 	return 0;
4762 }
4763 
4764 
4765 /**
4766  * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
4767  * @cmnd: Pointer to scsi_cmnd data structure.
4768  *
4769  * This routine aborts @cmnd pending in base driver.
4770  *
4771  * Return code :
4772  *   0x2003 - Error
4773  *   0x2002 - Success
4774  **/
4775 static int
4776 lpfc_abort_handler(struct scsi_cmnd *cmnd)
4777 {
4778 	struct Scsi_Host  *shost = cmnd->device->host;
4779 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4780 	struct lpfc_hba   *phba = vport->phba;
4781 	struct lpfc_iocbq *iocb;
4782 	struct lpfc_iocbq *abtsiocb;
4783 	struct lpfc_scsi_buf *lpfc_cmd;
4784 	IOCB_t *cmd, *icmd;
4785 	int ret = SUCCESS, status = 0;
4786 	struct lpfc_sli_ring *pring_s4;
4787 	int ring_number, ret_val;
4788 	unsigned long flags, iflags;
4789 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
4790 
4791 	status = fc_block_scsi_eh(cmnd);
4792 	if (status != 0 && status != SUCCESS)
4793 		return status;
4794 
4795 	spin_lock_irqsave(&phba->hbalock, flags);
4796 	/* driver queued commands are in process of being flushed */
4797 	if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
4798 		spin_unlock_irqrestore(&phba->hbalock, flags);
4799 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4800 			"3168 SCSI Layer abort requested I/O has been "
4801 			"flushed by LLD.\n");
4802 		return FAILED;
4803 	}
4804 
4805 	lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
4806 	if (!lpfc_cmd || !lpfc_cmd->pCmd) {
4807 		spin_unlock_irqrestore(&phba->hbalock, flags);
4808 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4809 			 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
4810 			 "x%x ID %d LUN %d\n",
4811 			 SUCCESS, cmnd->device->id, cmnd->device->lun);
4812 		return SUCCESS;
4813 	}
4814 
4815 	iocb = &lpfc_cmd->cur_iocbq;
4816 	/* the command is in process of being cancelled */
4817 	if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
4818 		spin_unlock_irqrestore(&phba->hbalock, flags);
4819 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4820 			"3169 SCSI Layer abort requested I/O has been "
4821 			"cancelled by LLD.\n");
4822 		return FAILED;
4823 	}
4824 	/*
4825 	 * If pCmd field of the corresponding lpfc_scsi_buf structure
4826 	 * points to a different SCSI command, then the driver has
4827 	 * already completed this command, but the midlayer did not
4828 	 * see the completion before the eh fired. Just return SUCCESS.
4829 	 */
4830 	if (lpfc_cmd->pCmd != cmnd) {
4831 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4832 			"3170 SCSI Layer abort requested I/O has been "
4833 			"completed by LLD.\n");
4834 		goto out_unlock;
4835 	}
4836 
4837 	BUG_ON(iocb->context1 != lpfc_cmd);
4838 
4839 	/* abort issued in recovery is still in progress */
4840 	if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) {
4841 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4842 			 "3389 SCSI Layer I/O Abort Request is pending\n");
4843 		spin_unlock_irqrestore(&phba->hbalock, flags);
4844 		goto wait_for_cmpl;
4845 	}
4846 
4847 	abtsiocb = __lpfc_sli_get_iocbq(phba);
4848 	if (abtsiocb == NULL) {
4849 		ret = FAILED;
4850 		goto out_unlock;
4851 	}
4852 
4853 	/* Indicate the IO is being aborted by the driver. */
4854 	iocb->iocb_flag |= LPFC_DRIVER_ABORTED;
4855 
4856 	/*
4857 	 * The scsi command can not be in txq and it is in flight because the
4858 	 * pCmd is still pointig at the SCSI command we have to abort. There
4859 	 * is no need to search the txcmplq. Just send an abort to the FW.
4860 	 */
4861 
4862 	cmd = &iocb->iocb;
4863 	icmd = &abtsiocb->iocb;
4864 	icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
4865 	icmd->un.acxri.abortContextTag = cmd->ulpContext;
4866 	if (phba->sli_rev == LPFC_SLI_REV4)
4867 		icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
4868 	else
4869 		icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
4870 
4871 	icmd->ulpLe = 1;
4872 	icmd->ulpClass = cmd->ulpClass;
4873 
4874 	/* ABTS WQE must go to the same WQ as the WQE to be aborted */
4875 	abtsiocb->fcp_wqidx = iocb->fcp_wqidx;
4876 	abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
4877 
4878 	if (lpfc_is_link_up(phba))
4879 		icmd->ulpCommand = CMD_ABORT_XRI_CN;
4880 	else
4881 		icmd->ulpCommand = CMD_CLOSE_XRI_CN;
4882 
4883 	abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
4884 	abtsiocb->vport = vport;
4885 	if (phba->sli_rev == LPFC_SLI_REV4) {
4886 		ring_number = MAX_SLI3_CONFIGURED_RINGS + iocb->fcp_wqidx;
4887 		pring_s4 = &phba->sli.ring[ring_number];
4888 		/* Note: both hbalock and ring_lock must be set here */
4889 		spin_lock_irqsave(&pring_s4->ring_lock, iflags);
4890 		ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
4891 						abtsiocb, 0);
4892 		spin_unlock_irqrestore(&pring_s4->ring_lock, iflags);
4893 	} else {
4894 		ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
4895 						abtsiocb, 0);
4896 	}
4897 	/* no longer need the lock after this point */
4898 	spin_unlock_irqrestore(&phba->hbalock, flags);
4899 
4900 
4901 	if (ret_val == IOCB_ERROR) {
4902 		lpfc_sli_release_iocbq(phba, abtsiocb);
4903 		ret = FAILED;
4904 		goto out;
4905 	}
4906 
4907 	if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4908 		lpfc_sli_handle_fast_ring_event(phba,
4909 			&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
4910 
4911 wait_for_cmpl:
4912 	lpfc_cmd->waitq = &waitq;
4913 	/* Wait for abort to complete */
4914 	wait_event_timeout(waitq,
4915 			  (lpfc_cmd->pCmd != cmnd),
4916 			   msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
4917 
4918 	spin_lock_irqsave(shost->host_lock, flags);
4919 	lpfc_cmd->waitq = NULL;
4920 	spin_unlock_irqrestore(shost->host_lock, flags);
4921 
4922 	if (lpfc_cmd->pCmd == cmnd) {
4923 		ret = FAILED;
4924 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4925 				 "0748 abort handler timed out waiting "
4926 				 "for abortng I/O (xri:x%x) to complete: "
4927 				 "ret %#x, ID %d, LUN %d\n",
4928 				 iocb->sli4_xritag, ret,
4929 				 cmnd->device->id, cmnd->device->lun);
4930 	}
4931 	goto out;
4932 
4933 out_unlock:
4934 	spin_unlock_irqrestore(&phba->hbalock, flags);
4935 out:
4936 	lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4937 			 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
4938 			 "LUN %d\n", ret, cmnd->device->id,
4939 			 cmnd->device->lun);
4940 	return ret;
4941 }
4942 
4943 static char *
4944 lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
4945 {
4946 	switch (task_mgmt_cmd) {
4947 	case FCP_ABORT_TASK_SET:
4948 		return "ABORT_TASK_SET";
4949 	case FCP_CLEAR_TASK_SET:
4950 		return "FCP_CLEAR_TASK_SET";
4951 	case FCP_BUS_RESET:
4952 		return "FCP_BUS_RESET";
4953 	case FCP_LUN_RESET:
4954 		return "FCP_LUN_RESET";
4955 	case FCP_TARGET_RESET:
4956 		return "FCP_TARGET_RESET";
4957 	case FCP_CLEAR_ACA:
4958 		return "FCP_CLEAR_ACA";
4959 	case FCP_TERMINATE_TASK:
4960 		return "FCP_TERMINATE_TASK";
4961 	default:
4962 		return "unknown";
4963 	}
4964 }
4965 
4966 
4967 /**
4968  * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed
4969  * @vport: The virtual port for which this call is being executed.
4970  * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
4971  *
4972  * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded
4973  *
4974  * Return code :
4975  *   0x2003 - Error
4976  *   0x2002 - Success
4977  **/
4978 static int
4979 lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd)
4980 {
4981 	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
4982 	uint32_t rsp_info;
4983 	uint32_t rsp_len;
4984 	uint8_t  rsp_info_code;
4985 	int ret = FAILED;
4986 
4987 
4988 	if (fcprsp == NULL)
4989 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4990 				 "0703 fcp_rsp is missing\n");
4991 	else {
4992 		rsp_info = fcprsp->rspStatus2;
4993 		rsp_len = be32_to_cpu(fcprsp->rspRspLen);
4994 		rsp_info_code = fcprsp->rspInfo3;
4995 
4996 
4997 		lpfc_printf_vlog(vport, KERN_INFO,
4998 				 LOG_FCP,
4999 				 "0706 fcp_rsp valid 0x%x,"
5000 				 " rsp len=%d code 0x%x\n",
5001 				 rsp_info,
5002 				 rsp_len, rsp_info_code);
5003 
5004 		if ((fcprsp->rspStatus2&RSP_LEN_VALID) && (rsp_len == 8)) {
5005 			switch (rsp_info_code) {
5006 			case RSP_NO_FAILURE:
5007 				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5008 						 "0715 Task Mgmt No Failure\n");
5009 				ret = SUCCESS;
5010 				break;
5011 			case RSP_TM_NOT_SUPPORTED: /* TM rejected */
5012 				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5013 						 "0716 Task Mgmt Target "
5014 						"reject\n");
5015 				break;
5016 			case RSP_TM_NOT_COMPLETED: /* TM failed */
5017 				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5018 						 "0717 Task Mgmt Target "
5019 						"failed TM\n");
5020 				break;
5021 			case RSP_TM_INVALID_LU: /* TM to invalid LU! */
5022 				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5023 						 "0718 Task Mgmt to invalid "
5024 						"LUN\n");
5025 				break;
5026 			}
5027 		}
5028 	}
5029 	return ret;
5030 }
5031 
5032 
5033 /**
5034  * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
5035  * @vport: The virtual port for which this call is being executed.
5036  * @rdata: Pointer to remote port local data
5037  * @tgt_id: Target ID of remote device.
5038  * @lun_id: Lun number for the TMF
5039  * @task_mgmt_cmd: type of TMF to send
5040  *
5041  * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
5042  * a remote port.
5043  *
5044  * Return Code:
5045  *   0x2003 - Error
5046  *   0x2002 - Success.
5047  **/
5048 static int
5049 lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
5050 		    unsigned  tgt_id, unsigned int lun_id,
5051 		    uint8_t task_mgmt_cmd)
5052 {
5053 	struct lpfc_hba   *phba = vport->phba;
5054 	struct lpfc_scsi_buf *lpfc_cmd;
5055 	struct lpfc_iocbq *iocbq;
5056 	struct lpfc_iocbq *iocbqrsp;
5057 	struct lpfc_nodelist *pnode = rdata->pnode;
5058 	int ret;
5059 	int status;
5060 
5061 	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
5062 		return FAILED;
5063 
5064 	lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode);
5065 	if (lpfc_cmd == NULL)
5066 		return FAILED;
5067 	lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
5068 	lpfc_cmd->rdata = rdata;
5069 
5070 	status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
5071 					   task_mgmt_cmd);
5072 	if (!status) {
5073 		lpfc_release_scsi_buf(phba, lpfc_cmd);
5074 		return FAILED;
5075 	}
5076 
5077 	iocbq = &lpfc_cmd->cur_iocbq;
5078 	iocbqrsp = lpfc_sli_get_iocbq(phba);
5079 	if (iocbqrsp == NULL) {
5080 		lpfc_release_scsi_buf(phba, lpfc_cmd);
5081 		return FAILED;
5082 	}
5083 	iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
5084 
5085 	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5086 			 "0702 Issue %s to TGT %d LUN %d "
5087 			 "rpi x%x nlp_flag x%x Data: x%x x%x\n",
5088 			 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
5089 			 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
5090 			 iocbq->iocb_flag);
5091 
5092 	status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
5093 					  iocbq, iocbqrsp, lpfc_cmd->timeout);
5094 	if ((status != IOCB_SUCCESS) ||
5095 	    (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) {
5096 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5097 			 "0727 TMF %s to TGT %d LUN %d failed (%d, %d) "
5098 			 "iocb_flag x%x\n",
5099 			 lpfc_taskmgmt_name(task_mgmt_cmd),
5100 			 tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
5101 			 iocbqrsp->iocb.un.ulpWord[4],
5102 			 iocbq->iocb_flag);
5103 		/* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */
5104 		if (status == IOCB_SUCCESS) {
5105 			if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
5106 				/* Something in the FCP_RSP was invalid.
5107 				 * Check conditions */
5108 				ret = lpfc_check_fcp_rsp(vport, lpfc_cmd);
5109 			else
5110 				ret = FAILED;
5111 		} else if (status == IOCB_TIMEDOUT) {
5112 			ret = TIMEOUT_ERROR;
5113 		} else {
5114 			ret = FAILED;
5115 		}
5116 		lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
5117 	} else
5118 		ret = SUCCESS;
5119 
5120 	lpfc_sli_release_iocbq(phba, iocbqrsp);
5121 
5122 	if (ret != TIMEOUT_ERROR)
5123 		lpfc_release_scsi_buf(phba, lpfc_cmd);
5124 
5125 	return ret;
5126 }
5127 
5128 /**
5129  * lpfc_chk_tgt_mapped -
5130  * @vport: The virtual port to check on
5131  * @cmnd: Pointer to scsi_cmnd data structure.
5132  *
5133  * This routine delays until the scsi target (aka rport) for the
5134  * command exists (is present and logged in) or we declare it non-existent.
5135  *
5136  * Return code :
5137  *  0x2003 - Error
5138  *  0x2002 - Success
5139  **/
5140 static int
5141 lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
5142 {
5143 	struct lpfc_rport_data *rdata;
5144 	struct lpfc_nodelist *pnode;
5145 	unsigned long later;
5146 
5147 	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5148 	if (!rdata) {
5149 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5150 			"0797 Tgt Map rport failure: rdata x%p\n", rdata);
5151 		return FAILED;
5152 	}
5153 	pnode = rdata->pnode;
5154 	/*
5155 	 * If target is not in a MAPPED state, delay until
5156 	 * target is rediscovered or devloss timeout expires.
5157 	 */
5158 	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
5159 	while (time_after(later, jiffies)) {
5160 		if (!pnode || !NLP_CHK_NODE_ACT(pnode))
5161 			return FAILED;
5162 		if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
5163 			return SUCCESS;
5164 		schedule_timeout_uninterruptible(msecs_to_jiffies(500));
5165 		rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5166 		if (!rdata)
5167 			return FAILED;
5168 		pnode = rdata->pnode;
5169 	}
5170 	if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
5171 	    (pnode->nlp_state != NLP_STE_MAPPED_NODE))
5172 		return FAILED;
5173 	return SUCCESS;
5174 }
5175 
5176 /**
5177  * lpfc_reset_flush_io_context -
5178  * @vport: The virtual port (scsi_host) for the flush context
5179  * @tgt_id: If aborting by Target contect - specifies the target id
5180  * @lun_id: If aborting by Lun context - specifies the lun id
5181  * @context: specifies the context level to flush at.
5182  *
5183  * After a reset condition via TMF, we need to flush orphaned i/o
5184  * contexts from the adapter. This routine aborts any contexts
5185  * outstanding, then waits for their completions. The wait is
5186  * bounded by devloss_tmo though.
5187  *
5188  * Return code :
5189  *  0x2003 - Error
5190  *  0x2002 - Success
5191  **/
5192 static int
5193 lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
5194 			uint64_t lun_id, lpfc_ctx_cmd context)
5195 {
5196 	struct lpfc_hba   *phba = vport->phba;
5197 	unsigned long later;
5198 	int cnt;
5199 
5200 	cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
5201 	if (cnt)
5202 		lpfc_sli_abort_taskmgmt(vport,
5203 					&phba->sli.ring[phba->sli.fcp_ring],
5204 					tgt_id, lun_id, context);
5205 	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
5206 	while (time_after(later, jiffies) && cnt) {
5207 		schedule_timeout_uninterruptible(msecs_to_jiffies(20));
5208 		cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
5209 	}
5210 	if (cnt) {
5211 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5212 			"0724 I/O flush failure for context %s : cnt x%x\n",
5213 			((context == LPFC_CTX_LUN) ? "LUN" :
5214 			 ((context == LPFC_CTX_TGT) ? "TGT" :
5215 			  ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
5216 			cnt);
5217 		return FAILED;
5218 	}
5219 	return SUCCESS;
5220 }
5221 
5222 /**
5223  * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
5224  * @cmnd: Pointer to scsi_cmnd data structure.
5225  *
5226  * This routine does a device reset by sending a LUN_RESET task management
5227  * command.
5228  *
5229  * Return code :
5230  *  0x2003 - Error
5231  *  0x2002 - Success
5232  **/
5233 static int
5234 lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
5235 {
5236 	struct Scsi_Host  *shost = cmnd->device->host;
5237 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5238 	struct lpfc_rport_data *rdata;
5239 	struct lpfc_nodelist *pnode;
5240 	unsigned tgt_id = cmnd->device->id;
5241 	unsigned int lun_id = cmnd->device->lun;
5242 	struct lpfc_scsi_event_header scsi_event;
5243 	int status;
5244 
5245 	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5246 	if (!rdata) {
5247 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5248 			"0798 Device Reset rport failure: rdata x%p\n", rdata);
5249 		return FAILED;
5250 	}
5251 	pnode = rdata->pnode;
5252 	status = fc_block_scsi_eh(cmnd);
5253 	if (status != 0 && status != SUCCESS)
5254 		return status;
5255 
5256 	status = lpfc_chk_tgt_mapped(vport, cmnd);
5257 	if (status == FAILED) {
5258 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5259 			"0721 Device Reset rport failure: rdata x%p\n", rdata);
5260 		return FAILED;
5261 	}
5262 
5263 	scsi_event.event_type = FC_REG_SCSI_EVENT;
5264 	scsi_event.subcategory = LPFC_EVENT_LUNRESET;
5265 	scsi_event.lun = lun_id;
5266 	memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
5267 	memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
5268 
5269 	fc_host_post_vendor_event(shost, fc_get_event_number(),
5270 		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5271 
5272 	status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
5273 						FCP_LUN_RESET);
5274 
5275 	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5276 			 "0713 SCSI layer issued Device Reset (%d, %d) "
5277 			 "return x%x\n", tgt_id, lun_id, status);
5278 
5279 	/*
5280 	 * We have to clean up i/o as : they may be orphaned by the TMF;
5281 	 * or if the TMF failed, they may be in an indeterminate state.
5282 	 * So, continue on.
5283 	 * We will report success if all the i/o aborts successfully.
5284 	 */
5285 	if (status == SUCCESS)
5286 		status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5287 						LPFC_CTX_LUN);
5288 
5289 	return status;
5290 }
5291 
5292 /**
5293  * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
5294  * @cmnd: Pointer to scsi_cmnd data structure.
5295  *
5296  * This routine does a target reset by sending a TARGET_RESET task management
5297  * command.
5298  *
5299  * Return code :
5300  *  0x2003 - Error
5301  *  0x2002 - Success
5302  **/
5303 static int
5304 lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
5305 {
5306 	struct Scsi_Host  *shost = cmnd->device->host;
5307 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5308 	struct lpfc_rport_data *rdata;
5309 	struct lpfc_nodelist *pnode;
5310 	unsigned tgt_id = cmnd->device->id;
5311 	unsigned int lun_id = cmnd->device->lun;
5312 	struct lpfc_scsi_event_header scsi_event;
5313 	int status;
5314 
5315 	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5316 	if (!rdata) {
5317 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5318 			"0799 Target Reset rport failure: rdata x%p\n", rdata);
5319 		return FAILED;
5320 	}
5321 	pnode = rdata->pnode;
5322 	status = fc_block_scsi_eh(cmnd);
5323 	if (status != 0 && status != SUCCESS)
5324 		return status;
5325 
5326 	status = lpfc_chk_tgt_mapped(vport, cmnd);
5327 	if (status == FAILED) {
5328 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5329 			"0722 Target Reset rport failure: rdata x%p\n", rdata);
5330 		return FAILED;
5331 	}
5332 
5333 	scsi_event.event_type = FC_REG_SCSI_EVENT;
5334 	scsi_event.subcategory = LPFC_EVENT_TGTRESET;
5335 	scsi_event.lun = 0;
5336 	memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
5337 	memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
5338 
5339 	fc_host_post_vendor_event(shost, fc_get_event_number(),
5340 		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5341 
5342 	status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
5343 					FCP_TARGET_RESET);
5344 
5345 	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5346 			 "0723 SCSI layer issued Target Reset (%d, %d) "
5347 			 "return x%x\n", tgt_id, lun_id, status);
5348 
5349 	/*
5350 	 * We have to clean up i/o as : they may be orphaned by the TMF;
5351 	 * or if the TMF failed, they may be in an indeterminate state.
5352 	 * So, continue on.
5353 	 * We will report success if all the i/o aborts successfully.
5354 	 */
5355 	if (status == SUCCESS)
5356 		status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5357 					  LPFC_CTX_TGT);
5358 	return status;
5359 }
5360 
5361 /**
5362  * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
5363  * @cmnd: Pointer to scsi_cmnd data structure.
5364  *
5365  * This routine does target reset to all targets on @cmnd->device->host.
5366  * This emulates Parallel SCSI Bus Reset Semantics.
5367  *
5368  * Return code :
5369  *  0x2003 - Error
5370  *  0x2002 - Success
5371  **/
5372 static int
5373 lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
5374 {
5375 	struct Scsi_Host  *shost = cmnd->device->host;
5376 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5377 	struct lpfc_nodelist *ndlp = NULL;
5378 	struct lpfc_scsi_event_header scsi_event;
5379 	int match;
5380 	int ret = SUCCESS, status, i;
5381 
5382 	scsi_event.event_type = FC_REG_SCSI_EVENT;
5383 	scsi_event.subcategory = LPFC_EVENT_BUSRESET;
5384 	scsi_event.lun = 0;
5385 	memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
5386 	memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
5387 
5388 	fc_host_post_vendor_event(shost, fc_get_event_number(),
5389 		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5390 
5391 	status = fc_block_scsi_eh(cmnd);
5392 	if (status != 0 && status != SUCCESS)
5393 		return status;
5394 
5395 	/*
5396 	 * Since the driver manages a single bus device, reset all
5397 	 * targets known to the driver.  Should any target reset
5398 	 * fail, this routine returns failure to the midlayer.
5399 	 */
5400 	for (i = 0; i < LPFC_MAX_TARGET; i++) {
5401 		/* Search for mapped node by target ID */
5402 		match = 0;
5403 		spin_lock_irq(shost->host_lock);
5404 		list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5405 			if (!NLP_CHK_NODE_ACT(ndlp))
5406 				continue;
5407 			if (vport->phba->cfg_fcp2_no_tgt_reset &&
5408 			    (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
5409 				continue;
5410 			if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
5411 			    ndlp->nlp_sid == i &&
5412 			    ndlp->rport) {
5413 				match = 1;
5414 				break;
5415 			}
5416 		}
5417 		spin_unlock_irq(shost->host_lock);
5418 		if (!match)
5419 			continue;
5420 
5421 		status = lpfc_send_taskmgmt(vport, ndlp->rport->dd_data,
5422 					i, 0, FCP_TARGET_RESET);
5423 
5424 		if (status != SUCCESS) {
5425 			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5426 					 "0700 Bus Reset on target %d failed\n",
5427 					 i);
5428 			ret = FAILED;
5429 		}
5430 	}
5431 	/*
5432 	 * We have to clean up i/o as : they may be orphaned by the TMFs
5433 	 * above; or if any of the TMFs failed, they may be in an
5434 	 * indeterminate state.
5435 	 * We will report success if all the i/o aborts successfully.
5436 	 */
5437 
5438 	status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
5439 	if (status != SUCCESS)
5440 		ret = FAILED;
5441 
5442 	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5443 			 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
5444 	return ret;
5445 }
5446 
5447 /**
5448  * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt
5449  * @cmnd: Pointer to scsi_cmnd data structure.
5450  *
5451  * This routine does host reset to the adaptor port. It brings the HBA
5452  * offline, performs a board restart, and then brings the board back online.
5453  * The lpfc_offline calls lpfc_sli_hba_down which will abort and local
5454  * reject all outstanding SCSI commands to the host and error returned
5455  * back to SCSI mid-level. As this will be SCSI mid-level's last resort
5456  * of error handling, it will only return error if resetting of the adapter
5457  * is not successful; in all other cases, will return success.
5458  *
5459  * Return code :
5460  *  0x2003 - Error
5461  *  0x2002 - Success
5462  **/
5463 static int
5464 lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
5465 {
5466 	struct Scsi_Host *shost = cmnd->device->host;
5467 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5468 	struct lpfc_hba *phba = vport->phba;
5469 	int rc, ret = SUCCESS;
5470 
5471 	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5472 			 "3172 SCSI layer issued Host Reset Data:\n");
5473 
5474 	lpfc_offline_prep(phba, LPFC_MBX_WAIT);
5475 	lpfc_offline(phba);
5476 	rc = lpfc_sli_brdrestart(phba);
5477 	if (rc)
5478 		ret = FAILED;
5479 	rc = lpfc_online(phba);
5480 	if (rc)
5481 		ret = FAILED;
5482 	lpfc_unblock_mgmt_io(phba);
5483 
5484 	if (ret == FAILED) {
5485 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5486 				 "3323 Failed host reset, bring it offline\n");
5487 		lpfc_sli4_offline_eratt(phba);
5488 	}
5489 	return ret;
5490 }
5491 
5492 /**
5493  * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
5494  * @sdev: Pointer to scsi_device.
5495  *
5496  * This routine populates the cmds_per_lun count + 2 scsi_bufs into  this host's
5497  * globally available list of scsi buffers. This routine also makes sure scsi
5498  * buffer is not allocated more than HBA limit conveyed to midlayer. This list
5499  * of scsi buffer exists for the lifetime of the driver.
5500  *
5501  * Return codes:
5502  *   non-0 - Error
5503  *   0 - Success
5504  **/
5505 static int
5506 lpfc_slave_alloc(struct scsi_device *sdev)
5507 {
5508 	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5509 	struct lpfc_hba   *phba = vport->phba;
5510 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
5511 	uint32_t total = 0;
5512 	uint32_t num_to_alloc = 0;
5513 	int num_allocated = 0;
5514 	uint32_t sdev_cnt;
5515 	struct lpfc_device_data *device_data;
5516 	unsigned long flags;
5517 	struct lpfc_name target_wwpn;
5518 
5519 	if (!rport || fc_remote_port_chkready(rport))
5520 		return -ENXIO;
5521 
5522 	if (phba->cfg_fof) {
5523 
5524 		/*
5525 		 * Check to see if the device data structure for the lun
5526 		 * exists.  If not, create one.
5527 		 */
5528 
5529 		u64_to_wwn(rport->port_name, target_wwpn.u.wwn);
5530 		spin_lock_irqsave(&phba->devicelock, flags);
5531 		device_data = __lpfc_get_device_data(phba,
5532 						     &phba->luns,
5533 						     &vport->fc_portname,
5534 						     &target_wwpn,
5535 						     sdev->lun);
5536 		if (!device_data) {
5537 			spin_unlock_irqrestore(&phba->devicelock, flags);
5538 			device_data = lpfc_create_device_data(phba,
5539 							&vport->fc_portname,
5540 							&target_wwpn,
5541 							sdev->lun, true);
5542 			if (!device_data)
5543 				return -ENOMEM;
5544 			spin_lock_irqsave(&phba->devicelock, flags);
5545 			list_add_tail(&device_data->listentry, &phba->luns);
5546 		}
5547 		device_data->rport_data = rport->dd_data;
5548 		device_data->available = true;
5549 		spin_unlock_irqrestore(&phba->devicelock, flags);
5550 		sdev->hostdata = device_data;
5551 	} else {
5552 		sdev->hostdata = rport->dd_data;
5553 	}
5554 	sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
5555 
5556 	/*
5557 	 * Populate the cmds_per_lun count scsi_bufs into this host's globally
5558 	 * available list of scsi buffers.  Don't allocate more than the
5559 	 * HBA limit conveyed to the midlayer via the host structure.  The
5560 	 * formula accounts for the lun_queue_depth + error handlers + 1
5561 	 * extra.  This list of scsi bufs exists for the lifetime of the driver.
5562 	 */
5563 	total = phba->total_scsi_bufs;
5564 	num_to_alloc = vport->cfg_lun_queue_depth + 2;
5565 
5566 	/* If allocated buffers are enough do nothing */
5567 	if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
5568 		return 0;
5569 
5570 	/* Allow some exchanges to be available always to complete discovery */
5571 	if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
5572 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5573 				 "0704 At limitation of %d preallocated "
5574 				 "command buffers\n", total);
5575 		return 0;
5576 	/* Allow some exchanges to be available always to complete discovery */
5577 	} else if (total + num_to_alloc >
5578 		phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
5579 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5580 				 "0705 Allocation request of %d "
5581 				 "command buffers will exceed max of %d.  "
5582 				 "Reducing allocation request to %d.\n",
5583 				 num_to_alloc, phba->cfg_hba_queue_depth,
5584 				 (phba->cfg_hba_queue_depth - total));
5585 		num_to_alloc = phba->cfg_hba_queue_depth - total;
5586 	}
5587 	num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
5588 	if (num_to_alloc != num_allocated) {
5589 			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5590 					 "0708 Allocation request of %d "
5591 					 "command buffers did not succeed.  "
5592 					 "Allocated %d buffers.\n",
5593 					 num_to_alloc, num_allocated);
5594 	}
5595 	if (num_allocated > 0)
5596 		phba->total_scsi_bufs += num_allocated;
5597 	return 0;
5598 }
5599 
5600 /**
5601  * lpfc_slave_configure - scsi_host_template slave_configure entry point
5602  * @sdev: Pointer to scsi_device.
5603  *
5604  * This routine configures following items
5605  *   - Tag command queuing support for @sdev if supported.
5606  *   - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
5607  *
5608  * Return codes:
5609  *   0 - Success
5610  **/
5611 static int
5612 lpfc_slave_configure(struct scsi_device *sdev)
5613 {
5614 	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5615 	struct lpfc_hba   *phba = vport->phba;
5616 
5617 	if (sdev->tagged_supported)
5618 		scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth);
5619 	else
5620 		scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth);
5621 
5622 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5623 		lpfc_sli_handle_fast_ring_event(phba,
5624 			&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
5625 		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5626 			lpfc_poll_rearm_timer(phba);
5627 	}
5628 
5629 	return 0;
5630 }
5631 
5632 /**
5633  * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
5634  * @sdev: Pointer to scsi_device.
5635  *
5636  * This routine sets @sdev hostatdata filed to null.
5637  **/
5638 static void
5639 lpfc_slave_destroy(struct scsi_device *sdev)
5640 {
5641 	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5642 	struct lpfc_hba   *phba = vport->phba;
5643 	unsigned long flags;
5644 	struct lpfc_device_data *device_data = sdev->hostdata;
5645 
5646 	atomic_dec(&phba->sdev_cnt);
5647 	if ((phba->cfg_fof) && (device_data)) {
5648 		spin_lock_irqsave(&phba->devicelock, flags);
5649 		device_data->available = false;
5650 		if (!device_data->oas_enabled)
5651 			lpfc_delete_device_data(phba, device_data);
5652 		spin_unlock_irqrestore(&phba->devicelock, flags);
5653 	}
5654 	sdev->hostdata = NULL;
5655 	return;
5656 }
5657 
5658 /**
5659  * lpfc_create_device_data - creates and initializes device data structure for OAS
5660  * @pha: Pointer to host bus adapter structure.
5661  * @vport_wwpn: Pointer to vport's wwpn information
5662  * @target_wwpn: Pointer to target's wwpn information
5663  * @lun: Lun on target
5664  * @atomic_create: Flag to indicate if memory should be allocated using the
5665  *		  GFP_ATOMIC flag or not.
5666  *
5667  * This routine creates a device data structure which will contain identifying
5668  * information for the device (host wwpn, target wwpn, lun), state of OAS,
5669  * whether or not the corresponding lun is available by the system,
5670  * and pointer to the rport data.
5671  *
5672  * Return codes:
5673  *   NULL - Error
5674  *   Pointer to lpfc_device_data - Success
5675  **/
5676 struct lpfc_device_data*
5677 lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5678 			struct lpfc_name *target_wwpn, uint64_t lun,
5679 			bool atomic_create)
5680 {
5681 
5682 	struct lpfc_device_data *lun_info;
5683 	int memory_flags;
5684 
5685 	if (unlikely(!phba) || !vport_wwpn || !target_wwpn  ||
5686 	    !(phba->cfg_fof))
5687 		return NULL;
5688 
5689 	/* Attempt to create the device data to contain lun info */
5690 
5691 	if (atomic_create)
5692 		memory_flags = GFP_ATOMIC;
5693 	else
5694 		memory_flags = GFP_KERNEL;
5695 	lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags);
5696 	if (!lun_info)
5697 		return NULL;
5698 	INIT_LIST_HEAD(&lun_info->listentry);
5699 	lun_info->rport_data  = NULL;
5700 	memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn,
5701 	       sizeof(struct lpfc_name));
5702 	memcpy(&lun_info->device_id.target_wwpn, target_wwpn,
5703 	       sizeof(struct lpfc_name));
5704 	lun_info->device_id.lun = lun;
5705 	lun_info->oas_enabled = false;
5706 	lun_info->available = false;
5707 	return lun_info;
5708 }
5709 
5710 /**
5711  * lpfc_delete_device_data - frees a device data structure for OAS
5712  * @pha: Pointer to host bus adapter structure.
5713  * @lun_info: Pointer to device data structure to free.
5714  *
5715  * This routine frees the previously allocated device data structure passed.
5716  *
5717  **/
5718 void
5719 lpfc_delete_device_data(struct lpfc_hba *phba,
5720 			struct lpfc_device_data *lun_info)
5721 {
5722 
5723 	if (unlikely(!phba) || !lun_info  ||
5724 	    !(phba->cfg_fof))
5725 		return;
5726 
5727 	if (!list_empty(&lun_info->listentry))
5728 		list_del(&lun_info->listentry);
5729 	mempool_free(lun_info, phba->device_data_mem_pool);
5730 	return;
5731 }
5732 
5733 /**
5734  * __lpfc_get_device_data - returns the device data for the specified lun
5735  * @pha: Pointer to host bus adapter structure.
5736  * @list: Point to list to search.
5737  * @vport_wwpn: Pointer to vport's wwpn information
5738  * @target_wwpn: Pointer to target's wwpn information
5739  * @lun: Lun on target
5740  *
5741  * This routine searches the list passed for the specified lun's device data.
5742  * This function does not hold locks, it is the responsibility of the caller
5743  * to ensure the proper lock is held before calling the function.
5744  *
5745  * Return codes:
5746  *   NULL - Error
5747  *   Pointer to lpfc_device_data - Success
5748  **/
5749 struct lpfc_device_data*
5750 __lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
5751 		       struct lpfc_name *vport_wwpn,
5752 		       struct lpfc_name *target_wwpn, uint64_t lun)
5753 {
5754 
5755 	struct lpfc_device_data *lun_info;
5756 
5757 	if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
5758 	    !phba->cfg_fof)
5759 		return NULL;
5760 
5761 	/* Check to see if the lun is already enabled for OAS. */
5762 
5763 	list_for_each_entry(lun_info, list, listentry) {
5764 		if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
5765 			    sizeof(struct lpfc_name)) == 0) &&
5766 		    (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
5767 			    sizeof(struct lpfc_name)) == 0) &&
5768 		    (lun_info->device_id.lun == lun))
5769 			return lun_info;
5770 	}
5771 
5772 	return NULL;
5773 }
5774 
5775 /**
5776  * lpfc_find_next_oas_lun - searches for the next oas lun
5777  * @pha: Pointer to host bus adapter structure.
5778  * @vport_wwpn: Pointer to vport's wwpn information
5779  * @target_wwpn: Pointer to target's wwpn information
5780  * @starting_lun: Pointer to the lun to start searching for
5781  * @found_vport_wwpn: Pointer to the found lun's vport wwpn information
5782  * @found_target_wwpn: Pointer to the found lun's target wwpn information
5783  * @found_lun: Pointer to the found lun.
5784  * @found_lun_status: Pointer to status of the found lun.
5785  *
5786  * This routine searches the luns list for the specified lun
5787  * or the first lun for the vport/target.  If the vport wwpn contains
5788  * a zero value then a specific vport is not specified. In this case
5789  * any vport which contains the lun will be considered a match.  If the
5790  * target wwpn contains a zero value then a specific target is not specified.
5791  * In this case any target which contains the lun will be considered a
5792  * match.  If the lun is found, the lun, vport wwpn, target wwpn and lun status
5793  * are returned.  The function will also return the next lun if available.
5794  * If the next lun is not found, starting_lun parameter will be set to
5795  * NO_MORE_OAS_LUN.
5796  *
5797  * Return codes:
5798  *   non-0 - Error
5799  *   0 - Success
5800  **/
5801 bool
5802 lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5803 		       struct lpfc_name *target_wwpn, uint64_t *starting_lun,
5804 		       struct lpfc_name *found_vport_wwpn,
5805 		       struct lpfc_name *found_target_wwpn,
5806 		       uint64_t *found_lun,
5807 		       uint32_t *found_lun_status)
5808 {
5809 
5810 	unsigned long flags;
5811 	struct lpfc_device_data *lun_info;
5812 	struct lpfc_device_id *device_id;
5813 	uint64_t lun;
5814 	bool found = false;
5815 
5816 	if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5817 	    !starting_lun || !found_vport_wwpn ||
5818 	    !found_target_wwpn || !found_lun || !found_lun_status ||
5819 	    (*starting_lun == NO_MORE_OAS_LUN) ||
5820 	    !phba->cfg_fof)
5821 		return false;
5822 
5823 	lun = *starting_lun;
5824 	*found_lun = NO_MORE_OAS_LUN;
5825 	*starting_lun = NO_MORE_OAS_LUN;
5826 
5827 	/* Search for lun or the lun closet in value */
5828 
5829 	spin_lock_irqsave(&phba->devicelock, flags);
5830 	list_for_each_entry(lun_info, &phba->luns, listentry) {
5831 		if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) ||
5832 		     (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
5833 			    sizeof(struct lpfc_name)) == 0)) &&
5834 		    ((wwn_to_u64(target_wwpn->u.wwn) == 0) ||
5835 		     (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
5836 			    sizeof(struct lpfc_name)) == 0)) &&
5837 		    (lun_info->oas_enabled)) {
5838 			device_id = &lun_info->device_id;
5839 			if ((!found) &&
5840 			    ((lun == FIND_FIRST_OAS_LUN) ||
5841 			     (device_id->lun == lun))) {
5842 				*found_lun = device_id->lun;
5843 				memcpy(found_vport_wwpn,
5844 				       &device_id->vport_wwpn,
5845 				       sizeof(struct lpfc_name));
5846 				memcpy(found_target_wwpn,
5847 				       &device_id->target_wwpn,
5848 				       sizeof(struct lpfc_name));
5849 				if (lun_info->available)
5850 					*found_lun_status =
5851 						OAS_LUN_STATUS_EXISTS;
5852 				else
5853 					*found_lun_status = 0;
5854 				if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)
5855 					memset(vport_wwpn, 0x0,
5856 					       sizeof(struct lpfc_name));
5857 				if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)
5858 					memset(target_wwpn, 0x0,
5859 					       sizeof(struct lpfc_name));
5860 				found = true;
5861 			} else if (found) {
5862 				*starting_lun = device_id->lun;
5863 				memcpy(vport_wwpn, &device_id->vport_wwpn,
5864 				       sizeof(struct lpfc_name));
5865 				memcpy(target_wwpn, &device_id->target_wwpn,
5866 				       sizeof(struct lpfc_name));
5867 				break;
5868 			}
5869 		}
5870 	}
5871 	spin_unlock_irqrestore(&phba->devicelock, flags);
5872 	return found;
5873 }
5874 
5875 /**
5876  * lpfc_enable_oas_lun - enables a lun for OAS operations
5877  * @pha: Pointer to host bus adapter structure.
5878  * @vport_wwpn: Pointer to vport's wwpn information
5879  * @target_wwpn: Pointer to target's wwpn information
5880  * @lun: Lun
5881  *
5882  * This routine enables a lun for oas operations.  The routines does so by
5883  * doing the following :
5884  *
5885  *   1) Checks to see if the device data for the lun has been created.
5886  *   2) If found, sets the OAS enabled flag if not set and returns.
5887  *   3) Otherwise, creates a device data structure.
5888  *   4) If successfully created, indicates the device data is for an OAS lun,
5889  *   indicates the lun is not available and add to the list of luns.
5890  *
5891  * Return codes:
5892  *   false - Error
5893  *   true - Success
5894  **/
5895 bool
5896 lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5897 		    struct lpfc_name *target_wwpn, uint64_t lun)
5898 {
5899 
5900 	struct lpfc_device_data *lun_info;
5901 	unsigned long flags;
5902 
5903 	if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5904 	    !phba->cfg_fof)
5905 		return false;
5906 
5907 	spin_lock_irqsave(&phba->devicelock, flags);
5908 
5909 	/* Check to see if the device data for the lun has been created */
5910 	lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn,
5911 					  target_wwpn, lun);
5912 	if (lun_info) {
5913 		if (!lun_info->oas_enabled)
5914 			lun_info->oas_enabled = true;
5915 		spin_unlock_irqrestore(&phba->devicelock, flags);
5916 		return true;
5917 	}
5918 
5919 	/* Create an lun info structure and add to list of luns */
5920 	lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
5921 					   false);
5922 	if (lun_info) {
5923 		lun_info->oas_enabled = true;
5924 		lun_info->available = false;
5925 		list_add_tail(&lun_info->listentry, &phba->luns);
5926 		spin_unlock_irqrestore(&phba->devicelock, flags);
5927 		return true;
5928 	}
5929 	spin_unlock_irqrestore(&phba->devicelock, flags);
5930 	return false;
5931 }
5932 
5933 /**
5934  * lpfc_disable_oas_lun - disables a lun for OAS operations
5935  * @pha: Pointer to host bus adapter structure.
5936  * @vport_wwpn: Pointer to vport's wwpn information
5937  * @target_wwpn: Pointer to target's wwpn information
5938  * @lun: Lun
5939  *
5940  * This routine disables a lun for oas operations.  The routines does so by
5941  * doing the following :
5942  *
5943  *   1) Checks to see if the device data for the lun is created.
5944  *   2) If present, clears the flag indicating this lun is for OAS.
5945  *   3) If the lun is not available by the system, the device data is
5946  *   freed.
5947  *
5948  * Return codes:
5949  *   false - Error
5950  *   true - Success
5951  **/
5952 bool
5953 lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5954 		     struct lpfc_name *target_wwpn, uint64_t lun)
5955 {
5956 
5957 	struct lpfc_device_data *lun_info;
5958 	unsigned long flags;
5959 
5960 	if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5961 	    !phba->cfg_fof)
5962 		return false;
5963 
5964 	spin_lock_irqsave(&phba->devicelock, flags);
5965 
5966 	/* Check to see if the lun is available. */
5967 	lun_info = __lpfc_get_device_data(phba,
5968 					  &phba->luns, vport_wwpn,
5969 					  target_wwpn, lun);
5970 	if (lun_info) {
5971 		lun_info->oas_enabled = false;
5972 		if (!lun_info->available)
5973 			lpfc_delete_device_data(phba, lun_info);
5974 		spin_unlock_irqrestore(&phba->devicelock, flags);
5975 		return true;
5976 	}
5977 
5978 	spin_unlock_irqrestore(&phba->devicelock, flags);
5979 	return false;
5980 }
5981 
5982 struct scsi_host_template lpfc_template = {
5983 	.module			= THIS_MODULE,
5984 	.name			= LPFC_DRIVER_NAME,
5985 	.info			= lpfc_info,
5986 	.queuecommand		= lpfc_queuecommand,
5987 	.eh_abort_handler	= lpfc_abort_handler,
5988 	.eh_device_reset_handler = lpfc_device_reset_handler,
5989 	.eh_target_reset_handler = lpfc_target_reset_handler,
5990 	.eh_bus_reset_handler	= lpfc_bus_reset_handler,
5991 	.eh_host_reset_handler  = lpfc_host_reset_handler,
5992 	.slave_alloc		= lpfc_slave_alloc,
5993 	.slave_configure	= lpfc_slave_configure,
5994 	.slave_destroy		= lpfc_slave_destroy,
5995 	.scan_finished		= lpfc_scan_finished,
5996 	.this_id		= -1,
5997 	.sg_tablesize		= LPFC_DEFAULT_SG_SEG_CNT,
5998 	.cmd_per_lun		= LPFC_CMD_PER_LUN,
5999 	.use_clustering		= ENABLE_CLUSTERING,
6000 	.shost_attrs		= lpfc_hba_attrs,
6001 	.max_sectors		= 0xFFFF,
6002 	.vendor_id		= LPFC_NL_VENDOR_ID,
6003 	.change_queue_depth	= lpfc_change_queue_depth,
6004 	.change_queue_type	= lpfc_change_queue_type,
6005 };
6006 
6007 struct scsi_host_template lpfc_vport_template = {
6008 	.module			= THIS_MODULE,
6009 	.name			= LPFC_DRIVER_NAME,
6010 	.info			= lpfc_info,
6011 	.queuecommand		= lpfc_queuecommand,
6012 	.eh_abort_handler	= lpfc_abort_handler,
6013 	.eh_device_reset_handler = lpfc_device_reset_handler,
6014 	.eh_target_reset_handler = lpfc_target_reset_handler,
6015 	.eh_bus_reset_handler	= lpfc_bus_reset_handler,
6016 	.slave_alloc		= lpfc_slave_alloc,
6017 	.slave_configure	= lpfc_slave_configure,
6018 	.slave_destroy		= lpfc_slave_destroy,
6019 	.scan_finished		= lpfc_scan_finished,
6020 	.this_id		= -1,
6021 	.sg_tablesize		= LPFC_DEFAULT_SG_SEG_CNT,
6022 	.cmd_per_lun		= LPFC_CMD_PER_LUN,
6023 	.use_clustering		= ENABLE_CLUSTERING,
6024 	.shost_attrs		= lpfc_vport_attrs,
6025 	.max_sectors		= 0xFFFF,
6026 	.change_queue_depth	= lpfc_change_queue_depth,
6027 	.change_queue_type	= lpfc_change_queue_type,
6028 };
6029