xref: /openbmc/linux/drivers/scsi/lpfc/lpfc_scsi.c (revision ee89bd6b)
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21 #include <linux/pci.h>
22 #include <linux/slab.h>
23 #include <linux/interrupt.h>
24 #include <linux/export.h>
25 #include <linux/delay.h>
26 #include <asm/unaligned.h>
27 #include <linux/crc-t10dif.h>
28 #include <net/checksum.h>
29 
30 #include <scsi/scsi.h>
31 #include <scsi/scsi_device.h>
32 #include <scsi/scsi_eh.h>
33 #include <scsi/scsi_host.h>
34 #include <scsi/scsi_tcq.h>
35 #include <scsi/scsi_transport_fc.h>
36 
37 #include "lpfc_version.h"
38 #include "lpfc_hw4.h"
39 #include "lpfc_hw.h"
40 #include "lpfc_sli.h"
41 #include "lpfc_sli4.h"
42 #include "lpfc_nl.h"
43 #include "lpfc_disc.h"
44 #include "lpfc.h"
45 #include "lpfc_scsi.h"
46 #include "lpfc_logmsg.h"
47 #include "lpfc_crtn.h"
48 #include "lpfc_vport.h"
49 
50 #define LPFC_RESET_WAIT  2
51 #define LPFC_ABORT_WAIT  2
52 
53 int _dump_buf_done = 1;
54 
55 static char *dif_op_str[] = {
56 	"PROT_NORMAL",
57 	"PROT_READ_INSERT",
58 	"PROT_WRITE_STRIP",
59 	"PROT_READ_STRIP",
60 	"PROT_WRITE_INSERT",
61 	"PROT_READ_PASS",
62 	"PROT_WRITE_PASS",
63 };
64 
65 struct scsi_dif_tuple {
66 	__be16 guard_tag;       /* Checksum */
67 	__be16 app_tag;         /* Opaque storage */
68 	__be32 ref_tag;         /* Target LBA or indirect LBA */
69 };
70 
71 #if !defined(SCSI_PROT_GUARD_CHECK) || !defined(SCSI_PROT_REF_CHECK)
72 #define scsi_prot_flagged(sc, flg)	sc
73 #endif
74 
75 static void
76 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
77 static void
78 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
79 
80 static void
81 lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
82 {
83 	void *src, *dst;
84 	struct scatterlist *sgde = scsi_sglist(cmnd);
85 
86 	if (!_dump_buf_data) {
87 		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
88 			"9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
89 				__func__);
90 		return;
91 	}
92 
93 
94 	if (!sgde) {
95 		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
96 			"9051 BLKGRD: ERROR: data scatterlist is null\n");
97 		return;
98 	}
99 
100 	dst = (void *) _dump_buf_data;
101 	while (sgde) {
102 		src = sg_virt(sgde);
103 		memcpy(dst, src, sgde->length);
104 		dst += sgde->length;
105 		sgde = sg_next(sgde);
106 	}
107 }
108 
109 static void
110 lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
111 {
112 	void *src, *dst;
113 	struct scatterlist *sgde = scsi_prot_sglist(cmnd);
114 
115 	if (!_dump_buf_dif) {
116 		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
117 			"9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
118 				__func__);
119 		return;
120 	}
121 
122 	if (!sgde) {
123 		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
124 			"9053 BLKGRD: ERROR: prot scatterlist is null\n");
125 		return;
126 	}
127 
128 	dst = _dump_buf_dif;
129 	while (sgde) {
130 		src = sg_virt(sgde);
131 		memcpy(dst, src, sgde->length);
132 		dst += sgde->length;
133 		sgde = sg_next(sgde);
134 	}
135 }
136 
137 /**
138  * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
139  * @phba: Pointer to HBA object.
140  * @lpfc_cmd: lpfc scsi command object pointer.
141  *
142  * This function is called from the lpfc_prep_task_mgmt_cmd function to
143  * set the last bit in the response sge entry.
144  **/
145 static void
146 lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
147 				struct lpfc_scsi_buf *lpfc_cmd)
148 {
149 	struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
150 	if (sgl) {
151 		sgl += 1;
152 		sgl->word2 = le32_to_cpu(sgl->word2);
153 		bf_set(lpfc_sli4_sge_last, sgl, 1);
154 		sgl->word2 = cpu_to_le32(sgl->word2);
155 	}
156 }
157 
158 /**
159  * lpfc_update_stats - Update statistical data for the command completion
160  * @phba: Pointer to HBA object.
161  * @lpfc_cmd: lpfc scsi command object pointer.
162  *
163  * This function is called when there is a command completion and this
164  * function updates the statistical data for the command completion.
165  **/
166 static void
167 lpfc_update_stats(struct lpfc_hba *phba, struct  lpfc_scsi_buf *lpfc_cmd)
168 {
169 	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
170 	struct lpfc_nodelist *pnode = rdata->pnode;
171 	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
172 	unsigned long flags;
173 	struct Scsi_Host  *shost = cmd->device->host;
174 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
175 	unsigned long latency;
176 	int i;
177 
178 	if (cmd->result)
179 		return;
180 
181 	latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
182 
183 	spin_lock_irqsave(shost->host_lock, flags);
184 	if (!vport->stat_data_enabled ||
185 		vport->stat_data_blocked ||
186 		!pnode ||
187 		!pnode->lat_data ||
188 		(phba->bucket_type == LPFC_NO_BUCKET)) {
189 		spin_unlock_irqrestore(shost->host_lock, flags);
190 		return;
191 	}
192 
193 	if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
194 		i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
195 			phba->bucket_step;
196 		/* check array subscript bounds */
197 		if (i < 0)
198 			i = 0;
199 		else if (i >= LPFC_MAX_BUCKET_COUNT)
200 			i = LPFC_MAX_BUCKET_COUNT - 1;
201 	} else {
202 		for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
203 			if (latency <= (phba->bucket_base +
204 				((1<<i)*phba->bucket_step)))
205 				break;
206 	}
207 
208 	pnode->lat_data[i].cmd_count++;
209 	spin_unlock_irqrestore(shost->host_lock, flags);
210 }
211 
212 /**
213  * lpfc_send_sdev_queuedepth_change_event - Posts a queuedepth change event
214  * @phba: Pointer to HBA context object.
215  * @vport: Pointer to vport object.
216  * @ndlp: Pointer to FC node associated with the target.
217  * @lun: Lun number of the scsi device.
218  * @old_val: Old value of the queue depth.
219  * @new_val: New value of the queue depth.
220  *
221  * This function sends an event to the mgmt application indicating
222  * there is a change in the scsi device queue depth.
223  **/
224 static void
225 lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
226 		struct lpfc_vport  *vport,
227 		struct lpfc_nodelist *ndlp,
228 		uint32_t lun,
229 		uint32_t old_val,
230 		uint32_t new_val)
231 {
232 	struct lpfc_fast_path_event *fast_path_evt;
233 	unsigned long flags;
234 
235 	fast_path_evt = lpfc_alloc_fast_evt(phba);
236 	if (!fast_path_evt)
237 		return;
238 
239 	fast_path_evt->un.queue_depth_evt.scsi_event.event_type =
240 		FC_REG_SCSI_EVENT;
241 	fast_path_evt->un.queue_depth_evt.scsi_event.subcategory =
242 		LPFC_EVENT_VARQUEDEPTH;
243 
244 	/* Report all luns with change in queue depth */
245 	fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun;
246 	if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
247 		memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn,
248 			&ndlp->nlp_portname, sizeof(struct lpfc_name));
249 		memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn,
250 			&ndlp->nlp_nodename, sizeof(struct lpfc_name));
251 	}
252 
253 	fast_path_evt->un.queue_depth_evt.oldval = old_val;
254 	fast_path_evt->un.queue_depth_evt.newval = new_val;
255 	fast_path_evt->vport = vport;
256 
257 	fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
258 	spin_lock_irqsave(&phba->hbalock, flags);
259 	list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
260 	spin_unlock_irqrestore(&phba->hbalock, flags);
261 	lpfc_worker_wake_up(phba);
262 
263 	return;
264 }
265 
266 /**
267  * lpfc_change_queue_depth - Alter scsi device queue depth
268  * @sdev: Pointer the scsi device on which to change the queue depth.
269  * @qdepth: New queue depth to set the sdev to.
270  * @reason: The reason for the queue depth change.
271  *
272  * This function is called by the midlayer and the LLD to alter the queue
273  * depth for a scsi device. This function sets the queue depth to the new
274  * value and sends an event out to log the queue depth change.
275  **/
276 int
277 lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
278 {
279 	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
280 	struct lpfc_hba   *phba = vport->phba;
281 	struct lpfc_rport_data *rdata;
282 	unsigned long new_queue_depth, old_queue_depth;
283 
284 	old_queue_depth = sdev->queue_depth;
285 	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
286 	new_queue_depth = sdev->queue_depth;
287 	rdata = sdev->hostdata;
288 	if (rdata)
289 		lpfc_send_sdev_queuedepth_change_event(phba, vport,
290 						       rdata->pnode, sdev->lun,
291 						       old_queue_depth,
292 						       new_queue_depth);
293 	return sdev->queue_depth;
294 }
295 
296 /**
297  * lpfc_change_queue_type() - Change a device's scsi tag queuing type
298  * @sdev: Pointer the scsi device whose queue depth is to change
299  * @tag_type: Identifier for queue tag type
300  */
301 static int
302 lpfc_change_queue_type(struct scsi_device *sdev, int tag_type)
303 {
304 	if (sdev->tagged_supported) {
305 		scsi_set_tag_type(sdev, tag_type);
306 		if (tag_type)
307 			scsi_activate_tcq(sdev, sdev->queue_depth);
308 		else
309 			scsi_deactivate_tcq(sdev, sdev->queue_depth);
310 	} else
311 		tag_type = 0;
312 
313 	return tag_type;
314 }
315 
316 /**
317  * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
318  * @phba: The Hba for which this call is being executed.
319  *
320  * This routine is called when there is resource error in driver or firmware.
321  * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
322  * posts at most 1 event each second. This routine wakes up worker thread of
323  * @phba to process WORKER_RAM_DOWN_EVENT event.
324  *
325  * This routine should be called with no lock held.
326  **/
327 void
328 lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
329 {
330 	unsigned long flags;
331 	uint32_t evt_posted;
332 
333 	spin_lock_irqsave(&phba->hbalock, flags);
334 	atomic_inc(&phba->num_rsrc_err);
335 	phba->last_rsrc_error_time = jiffies;
336 
337 	if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
338 		spin_unlock_irqrestore(&phba->hbalock, flags);
339 		return;
340 	}
341 
342 	phba->last_ramp_down_time = jiffies;
343 
344 	spin_unlock_irqrestore(&phba->hbalock, flags);
345 
346 	spin_lock_irqsave(&phba->pport->work_port_lock, flags);
347 	evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
348 	if (!evt_posted)
349 		phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
350 	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
351 
352 	if (!evt_posted)
353 		lpfc_worker_wake_up(phba);
354 	return;
355 }
356 
357 /**
358  * lpfc_rampup_queue_depth - Post RAMP_UP_QUEUE event for worker thread
359  * @phba: The Hba for which this call is being executed.
360  *
361  * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine
362  * post at most 1 event every 5 minute after last_ramp_up_time or
363  * last_rsrc_error_time.  This routine wakes up worker thread of @phba
364  * to process WORKER_RAM_DOWN_EVENT event.
365  *
366  * This routine should be called with no lock held.
367  **/
368 static inline void
369 lpfc_rampup_queue_depth(struct lpfc_vport  *vport,
370 			uint32_t queue_depth)
371 {
372 	unsigned long flags;
373 	struct lpfc_hba *phba = vport->phba;
374 	uint32_t evt_posted;
375 	atomic_inc(&phba->num_cmd_success);
376 
377 	if (vport->cfg_lun_queue_depth <= queue_depth)
378 		return;
379 	spin_lock_irqsave(&phba->hbalock, flags);
380 	if (time_before(jiffies,
381 			phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) ||
382 	    time_before(jiffies,
383 			phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL)) {
384 		spin_unlock_irqrestore(&phba->hbalock, flags);
385 		return;
386 	}
387 	phba->last_ramp_up_time = jiffies;
388 	spin_unlock_irqrestore(&phba->hbalock, flags);
389 
390 	spin_lock_irqsave(&phba->pport->work_port_lock, flags);
391 	evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
392 	if (!evt_posted)
393 		phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
394 	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
395 
396 	if (!evt_posted)
397 		lpfc_worker_wake_up(phba);
398 	return;
399 }
400 
401 /**
402  * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
403  * @phba: The Hba for which this call is being executed.
404  *
405  * This routine is called to  process WORKER_RAMP_DOWN_QUEUE event for worker
406  * thread.This routine reduces queue depth for all scsi device on each vport
407  * associated with @phba.
408  **/
409 void
410 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
411 {
412 	struct lpfc_vport **vports;
413 	struct Scsi_Host  *shost;
414 	struct scsi_device *sdev;
415 	unsigned long new_queue_depth;
416 	unsigned long num_rsrc_err, num_cmd_success;
417 	int i;
418 
419 	num_rsrc_err = atomic_read(&phba->num_rsrc_err);
420 	num_cmd_success = atomic_read(&phba->num_cmd_success);
421 
422 	/*
423 	 * The error and success command counters are global per
424 	 * driver instance.  If another handler has already
425 	 * operated on this error event, just exit.
426 	 */
427 	if (num_rsrc_err == 0)
428 		return;
429 
430 	vports = lpfc_create_vport_work_array(phba);
431 	if (vports != NULL)
432 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
433 			shost = lpfc_shost_from_vport(vports[i]);
434 			shost_for_each_device(sdev, shost) {
435 				new_queue_depth =
436 					sdev->queue_depth * num_rsrc_err /
437 					(num_rsrc_err + num_cmd_success);
438 				if (!new_queue_depth)
439 					new_queue_depth = sdev->queue_depth - 1;
440 				else
441 					new_queue_depth = sdev->queue_depth -
442 								new_queue_depth;
443 				lpfc_change_queue_depth(sdev, new_queue_depth,
444 							SCSI_QDEPTH_DEFAULT);
445 			}
446 		}
447 	lpfc_destroy_vport_work_array(phba, vports);
448 	atomic_set(&phba->num_rsrc_err, 0);
449 	atomic_set(&phba->num_cmd_success, 0);
450 }
451 
452 /**
453  * lpfc_ramp_up_queue_handler - WORKER_RAMP_UP_QUEUE event handler
454  * @phba: The Hba for which this call is being executed.
455  *
456  * This routine is called to  process WORKER_RAMP_UP_QUEUE event for worker
457  * thread.This routine increases queue depth for all scsi device on each vport
458  * associated with @phba by 1. This routine also sets @phba num_rsrc_err and
459  * num_cmd_success to zero.
460  **/
461 void
462 lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
463 {
464 	struct lpfc_vport **vports;
465 	struct Scsi_Host  *shost;
466 	struct scsi_device *sdev;
467 	int i;
468 
469 	vports = lpfc_create_vport_work_array(phba);
470 	if (vports != NULL)
471 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
472 			shost = lpfc_shost_from_vport(vports[i]);
473 			shost_for_each_device(sdev, shost) {
474 				if (vports[i]->cfg_lun_queue_depth <=
475 				    sdev->queue_depth)
476 					continue;
477 				lpfc_change_queue_depth(sdev,
478 							sdev->queue_depth+1,
479 							SCSI_QDEPTH_RAMP_UP);
480 			}
481 		}
482 	lpfc_destroy_vport_work_array(phba, vports);
483 	atomic_set(&phba->num_rsrc_err, 0);
484 	atomic_set(&phba->num_cmd_success, 0);
485 }
486 
487 /**
488  * lpfc_scsi_dev_block - set all scsi hosts to block state
489  * @phba: Pointer to HBA context object.
490  *
491  * This function walks vport list and set each SCSI host to block state
492  * by invoking fc_remote_port_delete() routine. This function is invoked
493  * with EEH when device's PCI slot has been permanently disabled.
494  **/
495 void
496 lpfc_scsi_dev_block(struct lpfc_hba *phba)
497 {
498 	struct lpfc_vport **vports;
499 	struct Scsi_Host  *shost;
500 	struct scsi_device *sdev;
501 	struct fc_rport *rport;
502 	int i;
503 
504 	vports = lpfc_create_vport_work_array(phba);
505 	if (vports != NULL)
506 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
507 			shost = lpfc_shost_from_vport(vports[i]);
508 			shost_for_each_device(sdev, shost) {
509 				rport = starget_to_rport(scsi_target(sdev));
510 				fc_remote_port_delete(rport);
511 			}
512 		}
513 	lpfc_destroy_vport_work_array(phba, vports);
514 }
515 
516 /**
517  * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
518  * @vport: The virtual port for which this call being executed.
519  * @num_to_allocate: The requested number of buffers to allocate.
520  *
521  * This routine allocates a scsi buffer for device with SLI-3 interface spec,
522  * the scsi buffer contains all the necessary information needed to initiate
523  * a SCSI I/O. The non-DMAable buffer region contains information to build
524  * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
525  * and the initial BPL. In addition to allocating memory, the FCP CMND and
526  * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
527  *
528  * Return codes:
529  *   int - number of scsi buffers that were allocated.
530  *   0 = failure, less than num_to_alloc is a partial failure.
531  **/
532 static int
533 lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
534 {
535 	struct lpfc_hba *phba = vport->phba;
536 	struct lpfc_scsi_buf *psb;
537 	struct ulp_bde64 *bpl;
538 	IOCB_t *iocb;
539 	dma_addr_t pdma_phys_fcp_cmd;
540 	dma_addr_t pdma_phys_fcp_rsp;
541 	dma_addr_t pdma_phys_bpl;
542 	uint16_t iotag;
543 	int bcnt, bpl_size;
544 
545 	bpl_size = phba->cfg_sg_dma_buf_size -
546 		(sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
547 
548 	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
549 			 "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
550 			 num_to_alloc, phba->cfg_sg_dma_buf_size,
551 			 (int)sizeof(struct fcp_cmnd),
552 			 (int)sizeof(struct fcp_rsp), bpl_size);
553 
554 	for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
555 		psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
556 		if (!psb)
557 			break;
558 
559 		/*
560 		 * Get memory from the pci pool to map the virt space to pci
561 		 * bus space for an I/O.  The DMA buffer includes space for the
562 		 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
563 		 * necessary to support the sg_tablesize.
564 		 */
565 		psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
566 					GFP_KERNEL, &psb->dma_handle);
567 		if (!psb->data) {
568 			kfree(psb);
569 			break;
570 		}
571 
572 		/* Initialize virtual ptrs to dma_buf region. */
573 		memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
574 
575 		/* Allocate iotag for psb->cur_iocbq. */
576 		iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
577 		if (iotag == 0) {
578 			pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
579 					psb->data, psb->dma_handle);
580 			kfree(psb);
581 			break;
582 		}
583 		psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
584 
585 		psb->fcp_cmnd = psb->data;
586 		psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
587 		psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
588 			sizeof(struct fcp_rsp);
589 
590 		/* Initialize local short-hand pointers. */
591 		bpl = psb->fcp_bpl;
592 		pdma_phys_fcp_cmd = psb->dma_handle;
593 		pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
594 		pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
595 			sizeof(struct fcp_rsp);
596 
597 		/*
598 		 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
599 		 * are sg list bdes.  Initialize the first two and leave the
600 		 * rest for queuecommand.
601 		 */
602 		bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
603 		bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
604 		bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
605 		bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
606 		bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
607 
608 		/* Setup the physical region for the FCP RSP */
609 		bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
610 		bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
611 		bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
612 		bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
613 		bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
614 
615 		/*
616 		 * Since the IOCB for the FCP I/O is built into this
617 		 * lpfc_scsi_buf, initialize it with all known data now.
618 		 */
619 		iocb = &psb->cur_iocbq.iocb;
620 		iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
621 		if ((phba->sli_rev == 3) &&
622 				!(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
623 			/* fill in immediate fcp command BDE */
624 			iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
625 			iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
626 			iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
627 					unsli3.fcp_ext.icd);
628 			iocb->un.fcpi64.bdl.addrHigh = 0;
629 			iocb->ulpBdeCount = 0;
630 			iocb->ulpLe = 0;
631 			/* fill in response BDE */
632 			iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
633 							BUFF_TYPE_BDE_64;
634 			iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
635 				sizeof(struct fcp_rsp);
636 			iocb->unsli3.fcp_ext.rbde.addrLow =
637 				putPaddrLow(pdma_phys_fcp_rsp);
638 			iocb->unsli3.fcp_ext.rbde.addrHigh =
639 				putPaddrHigh(pdma_phys_fcp_rsp);
640 		} else {
641 			iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
642 			iocb->un.fcpi64.bdl.bdeSize =
643 					(2 * sizeof(struct ulp_bde64));
644 			iocb->un.fcpi64.bdl.addrLow =
645 					putPaddrLow(pdma_phys_bpl);
646 			iocb->un.fcpi64.bdl.addrHigh =
647 					putPaddrHigh(pdma_phys_bpl);
648 			iocb->ulpBdeCount = 1;
649 			iocb->ulpLe = 1;
650 		}
651 		iocb->ulpClass = CLASS3;
652 		psb->status = IOSTAT_SUCCESS;
653 		/* Put it back into the SCSI buffer list */
654 		psb->cur_iocbq.context1  = psb;
655 		lpfc_release_scsi_buf_s3(phba, psb);
656 
657 	}
658 
659 	return bcnt;
660 }
661 
662 /**
663  * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport
664  * @vport: pointer to lpfc vport data structure.
665  *
666  * This routine is invoked by the vport cleanup for deletions and the cleanup
667  * for an ndlp on removal.
668  **/
669 void
670 lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
671 {
672 	struct lpfc_hba *phba = vport->phba;
673 	struct lpfc_scsi_buf *psb, *next_psb;
674 	unsigned long iflag = 0;
675 
676 	spin_lock_irqsave(&phba->hbalock, iflag);
677 	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
678 	list_for_each_entry_safe(psb, next_psb,
679 				&phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
680 		if (psb->rdata && psb->rdata->pnode
681 			&& psb->rdata->pnode->vport == vport)
682 			psb->rdata = NULL;
683 	}
684 	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
685 	spin_unlock_irqrestore(&phba->hbalock, iflag);
686 }
687 
688 /**
689  * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
690  * @phba: pointer to lpfc hba data structure.
691  * @axri: pointer to the fcp xri abort wcqe structure.
692  *
693  * This routine is invoked by the worker thread to process a SLI4 fast-path
694  * FCP aborted xri.
695  **/
696 void
697 lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
698 			  struct sli4_wcqe_xri_aborted *axri)
699 {
700 	uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
701 	uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
702 	struct lpfc_scsi_buf *psb, *next_psb;
703 	unsigned long iflag = 0;
704 	struct lpfc_iocbq *iocbq;
705 	int i;
706 	struct lpfc_nodelist *ndlp;
707 	int rrq_empty = 0;
708 	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
709 
710 	spin_lock_irqsave(&phba->hbalock, iflag);
711 	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
712 	list_for_each_entry_safe(psb, next_psb,
713 		&phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
714 		if (psb->cur_iocbq.sli4_xritag == xri) {
715 			list_del(&psb->list);
716 			psb->exch_busy = 0;
717 			psb->status = IOSTAT_SUCCESS;
718 			spin_unlock(
719 				&phba->sli4_hba.abts_scsi_buf_list_lock);
720 			if (psb->rdata && psb->rdata->pnode)
721 				ndlp = psb->rdata->pnode;
722 			else
723 				ndlp = NULL;
724 
725 			rrq_empty = list_empty(&phba->active_rrq_list);
726 			spin_unlock_irqrestore(&phba->hbalock, iflag);
727 			if (ndlp) {
728 				lpfc_set_rrq_active(phba, ndlp,
729 					psb->cur_iocbq.sli4_lxritag, rxid, 1);
730 				lpfc_sli4_abts_err_handler(phba, ndlp, axri);
731 			}
732 			lpfc_release_scsi_buf_s4(phba, psb);
733 			if (rrq_empty)
734 				lpfc_worker_wake_up(phba);
735 			return;
736 		}
737 	}
738 	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
739 	for (i = 1; i <= phba->sli.last_iotag; i++) {
740 		iocbq = phba->sli.iocbq_lookup[i];
741 
742 		if (!(iocbq->iocb_flag &  LPFC_IO_FCP) ||
743 			(iocbq->iocb_flag & LPFC_IO_LIBDFC))
744 			continue;
745 		if (iocbq->sli4_xritag != xri)
746 			continue;
747 		psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
748 		psb->exch_busy = 0;
749 		spin_unlock_irqrestore(&phba->hbalock, iflag);
750 		if (!list_empty(&pring->txq))
751 			lpfc_worker_wake_up(phba);
752 		return;
753 
754 	}
755 	spin_unlock_irqrestore(&phba->hbalock, iflag);
756 }
757 
758 /**
759  * lpfc_sli4_post_scsi_sgl_list - Psot blocks of scsi buffer sgls from a list
760  * @phba: pointer to lpfc hba data structure.
761  * @post_sblist: pointer to the scsi buffer list.
762  *
763  * This routine walks a list of scsi buffers that was passed in. It attempts
764  * to construct blocks of scsi buffer sgls which contains contiguous xris and
765  * uses the non-embedded SGL block post mailbox commands to post to the port.
766  * For single SCSI buffer sgl with non-contiguous xri, if any, it shall use
767  * embedded SGL post mailbox command for posting. The @post_sblist passed in
768  * must be local list, thus no lock is needed when manipulate the list.
769  *
770  * Returns: 0 = failure, non-zero number of successfully posted buffers.
771  **/
772 int
773 lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
774 			     struct list_head *post_sblist, int sb_count)
775 {
776 	struct lpfc_scsi_buf *psb, *psb_next;
777 	int status, sgl_size;
778 	int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
779 	dma_addr_t pdma_phys_bpl1;
780 	int last_xritag = NO_XRI;
781 	LIST_HEAD(prep_sblist);
782 	LIST_HEAD(blck_sblist);
783 	LIST_HEAD(scsi_sblist);
784 
785 	/* sanity check */
786 	if (sb_count <= 0)
787 		return -EINVAL;
788 
789 	sgl_size = phba->cfg_sg_dma_buf_size -
790 		(sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
791 
792 	list_for_each_entry_safe(psb, psb_next, post_sblist, list) {
793 		list_del_init(&psb->list);
794 		block_cnt++;
795 		if ((last_xritag != NO_XRI) &&
796 		    (psb->cur_iocbq.sli4_xritag != last_xritag + 1)) {
797 			/* a hole in xri block, form a sgl posting block */
798 			list_splice_init(&prep_sblist, &blck_sblist);
799 			post_cnt = block_cnt - 1;
800 			/* prepare list for next posting block */
801 			list_add_tail(&psb->list, &prep_sblist);
802 			block_cnt = 1;
803 		} else {
804 			/* prepare list for next posting block */
805 			list_add_tail(&psb->list, &prep_sblist);
806 			/* enough sgls for non-embed sgl mbox command */
807 			if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
808 				list_splice_init(&prep_sblist, &blck_sblist);
809 				post_cnt = block_cnt;
810 				block_cnt = 0;
811 			}
812 		}
813 		num_posting++;
814 		last_xritag = psb->cur_iocbq.sli4_xritag;
815 
816 		/* end of repost sgl list condition for SCSI buffers */
817 		if (num_posting == sb_count) {
818 			if (post_cnt == 0) {
819 				/* last sgl posting block */
820 				list_splice_init(&prep_sblist, &blck_sblist);
821 				post_cnt = block_cnt;
822 			} else if (block_cnt == 1) {
823 				/* last single sgl with non-contiguous xri */
824 				if (sgl_size > SGL_PAGE_SIZE)
825 					pdma_phys_bpl1 = psb->dma_phys_bpl +
826 								SGL_PAGE_SIZE;
827 				else
828 					pdma_phys_bpl1 = 0;
829 				status = lpfc_sli4_post_sgl(phba,
830 						psb->dma_phys_bpl,
831 						pdma_phys_bpl1,
832 						psb->cur_iocbq.sli4_xritag);
833 				if (status) {
834 					/* failure, put on abort scsi list */
835 					psb->exch_busy = 1;
836 				} else {
837 					/* success, put on SCSI buffer list */
838 					psb->exch_busy = 0;
839 					psb->status = IOSTAT_SUCCESS;
840 					num_posted++;
841 				}
842 				/* success, put on SCSI buffer sgl list */
843 				list_add_tail(&psb->list, &scsi_sblist);
844 			}
845 		}
846 
847 		/* continue until a nembed page worth of sgls */
848 		if (post_cnt == 0)
849 			continue;
850 
851 		/* post block of SCSI buffer list sgls */
852 		status = lpfc_sli4_post_scsi_sgl_block(phba, &blck_sblist,
853 						       post_cnt);
854 
855 		/* don't reset xirtag due to hole in xri block */
856 		if (block_cnt == 0)
857 			last_xritag = NO_XRI;
858 
859 		/* reset SCSI buffer post count for next round of posting */
860 		post_cnt = 0;
861 
862 		/* put posted SCSI buffer-sgl posted on SCSI buffer sgl list */
863 		while (!list_empty(&blck_sblist)) {
864 			list_remove_head(&blck_sblist, psb,
865 					 struct lpfc_scsi_buf, list);
866 			if (status) {
867 				/* failure, put on abort scsi list */
868 				psb->exch_busy = 1;
869 			} else {
870 				/* success, put on SCSI buffer list */
871 				psb->exch_busy = 0;
872 				psb->status = IOSTAT_SUCCESS;
873 				num_posted++;
874 			}
875 			list_add_tail(&psb->list, &scsi_sblist);
876 		}
877 	}
878 	/* Push SCSI buffers with sgl posted to the availble list */
879 	while (!list_empty(&scsi_sblist)) {
880 		list_remove_head(&scsi_sblist, psb,
881 				 struct lpfc_scsi_buf, list);
882 		lpfc_release_scsi_buf_s4(phba, psb);
883 	}
884 	return num_posted;
885 }
886 
887 /**
888  * lpfc_sli4_repost_scsi_sgl_list - Repsot all the allocated scsi buffer sgls
889  * @phba: pointer to lpfc hba data structure.
890  *
891  * This routine walks the list of scsi buffers that have been allocated and
892  * repost them to the port by using SGL block post. This is needed after a
893  * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
894  * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
895  * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
896  *
897  * Returns: 0 = success, non-zero failure.
898  **/
899 int
900 lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
901 {
902 	LIST_HEAD(post_sblist);
903 	int num_posted, rc = 0;
904 
905 	/* get all SCSI buffers need to repost to a local list */
906 	spin_lock_irq(&phba->scsi_buf_list_get_lock);
907 	spin_lock_irq(&phba->scsi_buf_list_put_lock);
908 	list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist);
909 	list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist);
910 	spin_unlock_irq(&phba->scsi_buf_list_put_lock);
911 	spin_unlock_irq(&phba->scsi_buf_list_get_lock);
912 
913 	/* post the list of scsi buffer sgls to port if available */
914 	if (!list_empty(&post_sblist)) {
915 		num_posted = lpfc_sli4_post_scsi_sgl_list(phba, &post_sblist,
916 						phba->sli4_hba.scsi_xri_cnt);
917 		/* failed to post any scsi buffer, return error */
918 		if (num_posted == 0)
919 			rc = -EIO;
920 	}
921 	return rc;
922 }
923 
924 /**
925  * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec
926  * @vport: The virtual port for which this call being executed.
927  * @num_to_allocate: The requested number of buffers to allocate.
928  *
929  * This routine allocates scsi buffers for device with SLI-4 interface spec,
930  * the scsi buffer contains all the necessary information needed to initiate
931  * a SCSI I/O. After allocating up to @num_to_allocate SCSI buffers and put
932  * them on a list, it post them to the port by using SGL block post.
933  *
934  * Return codes:
935  *   int - number of scsi buffers that were allocated and posted.
936  *   0 = failure, less than num_to_alloc is a partial failure.
937  **/
938 static int
939 lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
940 {
941 	struct lpfc_hba *phba = vport->phba;
942 	struct lpfc_scsi_buf *psb;
943 	struct sli4_sge *sgl;
944 	IOCB_t *iocb;
945 	dma_addr_t pdma_phys_fcp_cmd;
946 	dma_addr_t pdma_phys_fcp_rsp;
947 	dma_addr_t pdma_phys_bpl;
948 	uint16_t iotag, lxri = 0;
949 	int bcnt, num_posted, sgl_size;
950 	LIST_HEAD(prep_sblist);
951 	LIST_HEAD(post_sblist);
952 	LIST_HEAD(scsi_sblist);
953 
954 	sgl_size = phba->cfg_sg_dma_buf_size -
955 		(sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
956 
957 	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
958 			 "9068 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
959 			 num_to_alloc, phba->cfg_sg_dma_buf_size, sgl_size,
960 			 (int)sizeof(struct fcp_cmnd),
961 			 (int)sizeof(struct fcp_rsp));
962 
963 	for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
964 		psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
965 		if (!psb)
966 			break;
967 		/*
968 		 * Get memory from the pci pool to map the virt space to
969 		 * pci bus space for an I/O. The DMA buffer includes space
970 		 * for the struct fcp_cmnd, struct fcp_rsp and the number
971 		 * of bde's necessary to support the sg_tablesize.
972 		 */
973 		psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
974 						GFP_KERNEL, &psb->dma_handle);
975 		if (!psb->data) {
976 			kfree(psb);
977 			break;
978 		}
979 		memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
980 
981 		/* Page alignment is CRITICAL, double check to be sure */
982 		if (((unsigned long)(psb->data) &
983 		    (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0) {
984 			pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
985 				      psb->data, psb->dma_handle);
986 			kfree(psb);
987 			break;
988 		}
989 
990 		/* Allocate iotag for psb->cur_iocbq. */
991 		iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
992 		if (iotag == 0) {
993 			pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
994 				psb->data, psb->dma_handle);
995 			kfree(psb);
996 			break;
997 		}
998 
999 		lxri = lpfc_sli4_next_xritag(phba);
1000 		if (lxri == NO_XRI) {
1001 			pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
1002 			      psb->data, psb->dma_handle);
1003 			kfree(psb);
1004 			break;
1005 		}
1006 		psb->cur_iocbq.sli4_lxritag = lxri;
1007 		psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
1008 		psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
1009 		psb->fcp_bpl = psb->data;
1010 		psb->fcp_cmnd = (psb->data + sgl_size);
1011 		psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
1012 					sizeof(struct fcp_cmnd));
1013 
1014 		/* Initialize local short-hand pointers. */
1015 		sgl = (struct sli4_sge *)psb->fcp_bpl;
1016 		pdma_phys_bpl = psb->dma_handle;
1017 		pdma_phys_fcp_cmd = (psb->dma_handle + sgl_size);
1018 		pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
1019 
1020 		/*
1021 		 * The first two bdes are the FCP_CMD and FCP_RSP.
1022 		 * The balance are sg list bdes. Initialize the
1023 		 * first two and leave the rest for queuecommand.
1024 		 */
1025 		sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
1026 		sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
1027 		sgl->word2 = le32_to_cpu(sgl->word2);
1028 		bf_set(lpfc_sli4_sge_last, sgl, 0);
1029 		sgl->word2 = cpu_to_le32(sgl->word2);
1030 		sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
1031 		sgl++;
1032 
1033 		/* Setup the physical region for the FCP RSP */
1034 		sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
1035 		sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
1036 		sgl->word2 = le32_to_cpu(sgl->word2);
1037 		bf_set(lpfc_sli4_sge_last, sgl, 1);
1038 		sgl->word2 = cpu_to_le32(sgl->word2);
1039 		sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
1040 
1041 		/*
1042 		 * Since the IOCB for the FCP I/O is built into this
1043 		 * lpfc_scsi_buf, initialize it with all known data now.
1044 		 */
1045 		iocb = &psb->cur_iocbq.iocb;
1046 		iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
1047 		iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
1048 		/* setting the BLP size to 2 * sizeof BDE may not be correct.
1049 		 * We are setting the bpl to point to out sgl. An sgl's
1050 		 * entries are 16 bytes, a bpl entries are 12 bytes.
1051 		 */
1052 		iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
1053 		iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
1054 		iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
1055 		iocb->ulpBdeCount = 1;
1056 		iocb->ulpLe = 1;
1057 		iocb->ulpClass = CLASS3;
1058 		psb->cur_iocbq.context1 = psb;
1059 		psb->dma_phys_bpl = pdma_phys_bpl;
1060 
1061 		/* add the scsi buffer to a post list */
1062 		list_add_tail(&psb->list, &post_sblist);
1063 		spin_lock_irq(&phba->scsi_buf_list_get_lock);
1064 		phba->sli4_hba.scsi_xri_cnt++;
1065 		spin_unlock_irq(&phba->scsi_buf_list_get_lock);
1066 	}
1067 	lpfc_printf_log(phba, KERN_INFO, LOG_BG,
1068 			"3021 Allocate %d out of %d requested new SCSI "
1069 			"buffers\n", bcnt, num_to_alloc);
1070 
1071 	/* post the list of scsi buffer sgls to port if available */
1072 	if (!list_empty(&post_sblist))
1073 		num_posted = lpfc_sli4_post_scsi_sgl_list(phba,
1074 							  &post_sblist, bcnt);
1075 	else
1076 		num_posted = 0;
1077 
1078 	return num_posted;
1079 }
1080 
1081 /**
1082  * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator
1083  * @vport: The virtual port for which this call being executed.
1084  * @num_to_allocate: The requested number of buffers to allocate.
1085  *
1086  * This routine wraps the actual SCSI buffer allocator function pointer from
1087  * the lpfc_hba struct.
1088  *
1089  * Return codes:
1090  *   int - number of scsi buffers that were allocated.
1091  *   0 = failure, less than num_to_alloc is a partial failure.
1092  **/
1093 static inline int
1094 lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
1095 {
1096 	return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc);
1097 }
1098 
1099 /**
1100  * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
1101  * @phba: The HBA for which this call is being executed.
1102  *
1103  * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
1104  * and returns to caller.
1105  *
1106  * Return codes:
1107  *   NULL - Error
1108  *   Pointer to lpfc_scsi_buf - Success
1109  **/
1110 static struct lpfc_scsi_buf*
1111 lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1112 {
1113 	struct  lpfc_scsi_buf * lpfc_cmd = NULL;
1114 	struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
1115 	unsigned long gflag = 0;
1116 	unsigned long pflag = 0;
1117 
1118 	spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag);
1119 	list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf,
1120 			 list);
1121 	if (!lpfc_cmd) {
1122 		spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag);
1123 		list_splice(&phba->lpfc_scsi_buf_list_put,
1124 			    &phba->lpfc_scsi_buf_list_get);
1125 		INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
1126 		list_remove_head(scsi_buf_list_get, lpfc_cmd,
1127 				 struct lpfc_scsi_buf, list);
1128 		spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag);
1129 	}
1130 	spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag);
1131 	return  lpfc_cmd;
1132 }
1133 /**
1134  * lpfc_get_scsi_buf_s4 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
1135  * @phba: The HBA for which this call is being executed.
1136  *
1137  * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
1138  * and returns to caller.
1139  *
1140  * Return codes:
1141  *   NULL - Error
1142  *   Pointer to lpfc_scsi_buf - Success
1143  **/
1144 static struct lpfc_scsi_buf*
1145 lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1146 {
1147 	struct lpfc_scsi_buf *lpfc_cmd ;
1148 	unsigned long gflag = 0;
1149 	unsigned long pflag = 0;
1150 	int found = 0;
1151 
1152 	spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag);
1153 	list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list_get, list) {
1154 		if (lpfc_test_rrq_active(phba, ndlp,
1155 					 lpfc_cmd->cur_iocbq.sli4_lxritag))
1156 			continue;
1157 		list_del(&lpfc_cmd->list);
1158 		found = 1;
1159 		break;
1160 	}
1161 	if (!found) {
1162 		spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag);
1163 		list_splice(&phba->lpfc_scsi_buf_list_put,
1164 			    &phba->lpfc_scsi_buf_list_get);
1165 		INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
1166 		spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag);
1167 		list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list_get,
1168 				    list) {
1169 			if (lpfc_test_rrq_active(
1170 				phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag))
1171 				continue;
1172 			list_del(&lpfc_cmd->list);
1173 			found = 1;
1174 			break;
1175 		}
1176 	}
1177 	spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag);
1178 	if (!found)
1179 		return NULL;
1180 	return  lpfc_cmd;
1181 }
1182 /**
1183  * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
1184  * @phba: The HBA for which this call is being executed.
1185  *
1186  * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
1187  * and returns to caller.
1188  *
1189  * Return codes:
1190  *   NULL - Error
1191  *   Pointer to lpfc_scsi_buf - Success
1192  **/
1193 static struct lpfc_scsi_buf*
1194 lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1195 {
1196 	return  phba->lpfc_get_scsi_buf(phba, ndlp);
1197 }
1198 
1199 /**
1200  * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
1201  * @phba: The Hba for which this call is being executed.
1202  * @psb: The scsi buffer which is being released.
1203  *
1204  * This routine releases @psb scsi buffer by adding it to tail of @phba
1205  * lpfc_scsi_buf_list list.
1206  **/
1207 static void
1208 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1209 {
1210 	unsigned long iflag = 0;
1211 
1212 	psb->seg_cnt = 0;
1213 	psb->nonsg_phys = 0;
1214 	psb->prot_seg_cnt = 0;
1215 
1216 	spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
1217 	psb->pCmd = NULL;
1218 	psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
1219 	list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
1220 	spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
1221 }
1222 
1223 /**
1224  * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
1225  * @phba: The Hba for which this call is being executed.
1226  * @psb: The scsi buffer which is being released.
1227  *
1228  * This routine releases @psb scsi buffer by adding it to tail of @phba
1229  * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer
1230  * and cannot be reused for at least RA_TOV amount of time if it was
1231  * aborted.
1232  **/
1233 static void
1234 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1235 {
1236 	unsigned long iflag = 0;
1237 
1238 	psb->seg_cnt = 0;
1239 	psb->nonsg_phys = 0;
1240 	psb->prot_seg_cnt = 0;
1241 
1242 	if (psb->exch_busy) {
1243 		spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
1244 					iflag);
1245 		psb->pCmd = NULL;
1246 		list_add_tail(&psb->list,
1247 			&phba->sli4_hba.lpfc_abts_scsi_buf_list);
1248 		spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
1249 					iflag);
1250 	} else {
1251 		psb->pCmd = NULL;
1252 		psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
1253 		spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
1254 		list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
1255 		spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
1256 	}
1257 }
1258 
1259 /**
1260  * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
1261  * @phba: The Hba for which this call is being executed.
1262  * @psb: The scsi buffer which is being released.
1263  *
1264  * This routine releases @psb scsi buffer by adding it to tail of @phba
1265  * lpfc_scsi_buf_list list.
1266  **/
1267 static void
1268 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1269 {
1270 
1271 	phba->lpfc_release_scsi_buf(phba, psb);
1272 }
1273 
1274 /**
1275  * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
1276  * @phba: The Hba for which this call is being executed.
1277  * @lpfc_cmd: The scsi buffer which is going to be mapped.
1278  *
1279  * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
1280  * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
1281  * through sg elements and format the bdea. This routine also initializes all
1282  * IOCB fields which are dependent on scsi command request buffer.
1283  *
1284  * Return codes:
1285  *   1 - Error
1286  *   0 - Success
1287  **/
1288 static int
1289 lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1290 {
1291 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1292 	struct scatterlist *sgel = NULL;
1293 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1294 	struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
1295 	struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
1296 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1297 	struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
1298 	dma_addr_t physaddr;
1299 	uint32_t num_bde = 0;
1300 	int nseg, datadir = scsi_cmnd->sc_data_direction;
1301 
1302 	/*
1303 	 * There are three possibilities here - use scatter-gather segment, use
1304 	 * the single mapping, or neither.  Start the lpfc command prep by
1305 	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
1306 	 * data bde entry.
1307 	 */
1308 	bpl += 2;
1309 	if (scsi_sg_count(scsi_cmnd)) {
1310 		/*
1311 		 * The driver stores the segment count returned from pci_map_sg
1312 		 * because this a count of dma-mappings used to map the use_sg
1313 		 * pages.  They are not guaranteed to be the same for those
1314 		 * architectures that implement an IOMMU.
1315 		 */
1316 
1317 		nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
1318 				  scsi_sg_count(scsi_cmnd), datadir);
1319 		if (unlikely(!nseg))
1320 			return 1;
1321 
1322 		lpfc_cmd->seg_cnt = nseg;
1323 		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1324 			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1325 				"9064 BLKGRD: %s: Too many sg segments from "
1326 			       "dma_map_sg.  Config %d, seg_cnt %d\n",
1327 			       __func__, phba->cfg_sg_seg_cnt,
1328 			       lpfc_cmd->seg_cnt);
1329 			lpfc_cmd->seg_cnt = 0;
1330 			scsi_dma_unmap(scsi_cmnd);
1331 			return 1;
1332 		}
1333 
1334 		/*
1335 		 * The driver established a maximum scatter-gather segment count
1336 		 * during probe that limits the number of sg elements in any
1337 		 * single scsi command.  Just run through the seg_cnt and format
1338 		 * the bde's.
1339 		 * When using SLI-3 the driver will try to fit all the BDEs into
1340 		 * the IOCB. If it can't then the BDEs get added to a BPL as it
1341 		 * does for SLI-2 mode.
1342 		 */
1343 		scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
1344 			physaddr = sg_dma_address(sgel);
1345 			if (phba->sli_rev == 3 &&
1346 			    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1347 			    !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
1348 			    nseg <= LPFC_EXT_DATA_BDE_COUNT) {
1349 				data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1350 				data_bde->tus.f.bdeSize = sg_dma_len(sgel);
1351 				data_bde->addrLow = putPaddrLow(physaddr);
1352 				data_bde->addrHigh = putPaddrHigh(physaddr);
1353 				data_bde++;
1354 			} else {
1355 				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1356 				bpl->tus.f.bdeSize = sg_dma_len(sgel);
1357 				bpl->tus.w = le32_to_cpu(bpl->tus.w);
1358 				bpl->addrLow =
1359 					le32_to_cpu(putPaddrLow(physaddr));
1360 				bpl->addrHigh =
1361 					le32_to_cpu(putPaddrHigh(physaddr));
1362 				bpl++;
1363 			}
1364 		}
1365 	}
1366 
1367 	/*
1368 	 * Finish initializing those IOCB fields that are dependent on the
1369 	 * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
1370 	 * explicitly reinitialized and for SLI-3 the extended bde count is
1371 	 * explicitly reinitialized since all iocb memory resources are reused.
1372 	 */
1373 	if (phba->sli_rev == 3 &&
1374 	    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1375 	    !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
1376 		if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
1377 			/*
1378 			 * The extended IOCB format can only fit 3 BDE or a BPL.
1379 			 * This I/O has more than 3 BDE so the 1st data bde will
1380 			 * be a BPL that is filled in here.
1381 			 */
1382 			physaddr = lpfc_cmd->dma_handle;
1383 			data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
1384 			data_bde->tus.f.bdeSize = (num_bde *
1385 						   sizeof(struct ulp_bde64));
1386 			physaddr += (sizeof(struct fcp_cmnd) +
1387 				     sizeof(struct fcp_rsp) +
1388 				     (2 * sizeof(struct ulp_bde64)));
1389 			data_bde->addrHigh = putPaddrHigh(physaddr);
1390 			data_bde->addrLow = putPaddrLow(physaddr);
1391 			/* ebde count includes the response bde and data bpl */
1392 			iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
1393 		} else {
1394 			/* ebde count includes the response bde and data bdes */
1395 			iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
1396 		}
1397 	} else {
1398 		iocb_cmd->un.fcpi64.bdl.bdeSize =
1399 			((num_bde + 2) * sizeof(struct ulp_bde64));
1400 		iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
1401 	}
1402 	fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
1403 
1404 	/*
1405 	 * Due to difference in data length between DIF/non-DIF paths,
1406 	 * we need to set word 4 of IOCB here
1407 	 */
1408 	iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1409 	return 0;
1410 }
1411 
1412 static inline unsigned
1413 lpfc_cmd_blksize(struct scsi_cmnd *sc)
1414 {
1415 	return sc->device->sector_size;
1416 }
1417 
1418 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1419 
1420 /* Return if if error injection is detected by Initiator */
1421 #define BG_ERR_INIT	0x1
1422 /* Return if if error injection is detected by Target */
1423 #define BG_ERR_TGT	0x2
1424 /* Return if if swapping CSUM<-->CRC is required for error injection */
1425 #define BG_ERR_SWAP	0x10
1426 /* Return if disabling Guard/Ref/App checking is required for error injection */
1427 #define BG_ERR_CHECK	0x20
1428 
1429 /**
1430  * lpfc_bg_err_inject - Determine if we should inject an error
1431  * @phba: The Hba for which this call is being executed.
1432  * @sc: The SCSI command to examine
1433  * @reftag: (out) BlockGuard reference tag for transmitted data
1434  * @apptag: (out) BlockGuard application tag for transmitted data
1435  * @new_guard (in) Value to replace CRC with if needed
1436  *
1437  * Returns BG_ERR_* bit mask or 0 if request ignored
1438  **/
1439 static int
1440 lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1441 		uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
1442 {
1443 	struct scatterlist *sgpe; /* s/g prot entry */
1444 	struct scatterlist *sgde; /* s/g data entry */
1445 	struct lpfc_scsi_buf *lpfc_cmd = NULL;
1446 	struct scsi_dif_tuple *src = NULL;
1447 	struct lpfc_nodelist *ndlp;
1448 	struct lpfc_rport_data *rdata;
1449 	uint32_t op = scsi_get_prot_op(sc);
1450 	uint32_t blksize;
1451 	uint32_t numblks;
1452 	sector_t lba;
1453 	int rc = 0;
1454 	int blockoff = 0;
1455 
1456 	if (op == SCSI_PROT_NORMAL)
1457 		return 0;
1458 
1459 	sgpe = scsi_prot_sglist(sc);
1460 	sgde = scsi_sglist(sc);
1461 	lba = scsi_get_lba(sc);
1462 
1463 	/* First check if we need to match the LBA */
1464 	if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
1465 		blksize = lpfc_cmd_blksize(sc);
1466 		numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
1467 
1468 		/* Make sure we have the right LBA if one is specified */
1469 		if ((phba->lpfc_injerr_lba < lba) ||
1470 			(phba->lpfc_injerr_lba >= (lba + numblks)))
1471 			return 0;
1472 		if (sgpe) {
1473 			blockoff = phba->lpfc_injerr_lba - lba;
1474 			numblks = sg_dma_len(sgpe) /
1475 				sizeof(struct scsi_dif_tuple);
1476 			if (numblks < blockoff)
1477 				blockoff = numblks;
1478 		}
1479 	}
1480 
1481 	/* Next check if we need to match the remote NPortID or WWPN */
1482 	rdata = sc->device->hostdata;
1483 	if (rdata && rdata->pnode) {
1484 		ndlp = rdata->pnode;
1485 
1486 		/* Make sure we have the right NPortID if one is specified */
1487 		if (phba->lpfc_injerr_nportid  &&
1488 			(phba->lpfc_injerr_nportid != ndlp->nlp_DID))
1489 			return 0;
1490 
1491 		/*
1492 		 * Make sure we have the right WWPN if one is specified.
1493 		 * wwn[0] should be a non-zero NAA in a good WWPN.
1494 		 */
1495 		if (phba->lpfc_injerr_wwpn.u.wwn[0]  &&
1496 			(memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
1497 				sizeof(struct lpfc_name)) != 0))
1498 			return 0;
1499 	}
1500 
1501 	/* Setup a ptr to the protection data if the SCSI host provides it */
1502 	if (sgpe) {
1503 		src = (struct scsi_dif_tuple *)sg_virt(sgpe);
1504 		src += blockoff;
1505 		lpfc_cmd = (struct lpfc_scsi_buf *)sc->host_scribble;
1506 	}
1507 
1508 	/* Should we change the Reference Tag */
1509 	if (reftag) {
1510 		if (phba->lpfc_injerr_wref_cnt) {
1511 			switch (op) {
1512 			case SCSI_PROT_WRITE_PASS:
1513 				if (src) {
1514 					/*
1515 					 * For WRITE_PASS, force the error
1516 					 * to be sent on the wire. It should
1517 					 * be detected by the Target.
1518 					 * If blockoff != 0 error will be
1519 					 * inserted in middle of the IO.
1520 					 */
1521 
1522 					lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1523 					"9076 BLKGRD: Injecting reftag error: "
1524 					"write lba x%lx + x%x oldrefTag x%x\n",
1525 					(unsigned long)lba, blockoff,
1526 					be32_to_cpu(src->ref_tag));
1527 
1528 					/*
1529 					 * Save the old ref_tag so we can
1530 					 * restore it on completion.
1531 					 */
1532 					if (lpfc_cmd) {
1533 						lpfc_cmd->prot_data_type =
1534 							LPFC_INJERR_REFTAG;
1535 						lpfc_cmd->prot_data_segment =
1536 							src;
1537 						lpfc_cmd->prot_data =
1538 							src->ref_tag;
1539 					}
1540 					src->ref_tag = cpu_to_be32(0xDEADBEEF);
1541 					phba->lpfc_injerr_wref_cnt--;
1542 					if (phba->lpfc_injerr_wref_cnt == 0) {
1543 						phba->lpfc_injerr_nportid = 0;
1544 						phba->lpfc_injerr_lba =
1545 							LPFC_INJERR_LBA_OFF;
1546 						memset(&phba->lpfc_injerr_wwpn,
1547 						  0, sizeof(struct lpfc_name));
1548 					}
1549 					rc = BG_ERR_TGT | BG_ERR_CHECK;
1550 
1551 					break;
1552 				}
1553 				/* Drop thru */
1554 			case SCSI_PROT_WRITE_INSERT:
1555 				/*
1556 				 * For WRITE_INSERT, force the error
1557 				 * to be sent on the wire. It should be
1558 				 * detected by the Target.
1559 				 */
1560 				/* DEADBEEF will be the reftag on the wire */
1561 				*reftag = 0xDEADBEEF;
1562 				phba->lpfc_injerr_wref_cnt--;
1563 				if (phba->lpfc_injerr_wref_cnt == 0) {
1564 					phba->lpfc_injerr_nportid = 0;
1565 					phba->lpfc_injerr_lba =
1566 					LPFC_INJERR_LBA_OFF;
1567 					memset(&phba->lpfc_injerr_wwpn,
1568 						0, sizeof(struct lpfc_name));
1569 				}
1570 				rc = BG_ERR_TGT | BG_ERR_CHECK;
1571 
1572 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1573 					"9078 BLKGRD: Injecting reftag error: "
1574 					"write lba x%lx\n", (unsigned long)lba);
1575 				break;
1576 			case SCSI_PROT_WRITE_STRIP:
1577 				/*
1578 				 * For WRITE_STRIP and WRITE_PASS,
1579 				 * force the error on data
1580 				 * being copied from SLI-Host to SLI-Port.
1581 				 */
1582 				*reftag = 0xDEADBEEF;
1583 				phba->lpfc_injerr_wref_cnt--;
1584 				if (phba->lpfc_injerr_wref_cnt == 0) {
1585 					phba->lpfc_injerr_nportid = 0;
1586 					phba->lpfc_injerr_lba =
1587 						LPFC_INJERR_LBA_OFF;
1588 					memset(&phba->lpfc_injerr_wwpn,
1589 						0, sizeof(struct lpfc_name));
1590 				}
1591 				rc = BG_ERR_INIT;
1592 
1593 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1594 					"9077 BLKGRD: Injecting reftag error: "
1595 					"write lba x%lx\n", (unsigned long)lba);
1596 				break;
1597 			}
1598 		}
1599 		if (phba->lpfc_injerr_rref_cnt) {
1600 			switch (op) {
1601 			case SCSI_PROT_READ_INSERT:
1602 			case SCSI_PROT_READ_STRIP:
1603 			case SCSI_PROT_READ_PASS:
1604 				/*
1605 				 * For READ_STRIP and READ_PASS, force the
1606 				 * error on data being read off the wire. It
1607 				 * should force an IO error to the driver.
1608 				 */
1609 				*reftag = 0xDEADBEEF;
1610 				phba->lpfc_injerr_rref_cnt--;
1611 				if (phba->lpfc_injerr_rref_cnt == 0) {
1612 					phba->lpfc_injerr_nportid = 0;
1613 					phba->lpfc_injerr_lba =
1614 						LPFC_INJERR_LBA_OFF;
1615 					memset(&phba->lpfc_injerr_wwpn,
1616 						0, sizeof(struct lpfc_name));
1617 				}
1618 				rc = BG_ERR_INIT;
1619 
1620 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1621 					"9079 BLKGRD: Injecting reftag error: "
1622 					"read lba x%lx\n", (unsigned long)lba);
1623 				break;
1624 			}
1625 		}
1626 	}
1627 
1628 	/* Should we change the Application Tag */
1629 	if (apptag) {
1630 		if (phba->lpfc_injerr_wapp_cnt) {
1631 			switch (op) {
1632 			case SCSI_PROT_WRITE_PASS:
1633 				if (src) {
1634 					/*
1635 					 * For WRITE_PASS, force the error
1636 					 * to be sent on the wire. It should
1637 					 * be detected by the Target.
1638 					 * If blockoff != 0 error will be
1639 					 * inserted in middle of the IO.
1640 					 */
1641 
1642 					lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1643 					"9080 BLKGRD: Injecting apptag error: "
1644 					"write lba x%lx + x%x oldappTag x%x\n",
1645 					(unsigned long)lba, blockoff,
1646 					be16_to_cpu(src->app_tag));
1647 
1648 					/*
1649 					 * Save the old app_tag so we can
1650 					 * restore it on completion.
1651 					 */
1652 					if (lpfc_cmd) {
1653 						lpfc_cmd->prot_data_type =
1654 							LPFC_INJERR_APPTAG;
1655 						lpfc_cmd->prot_data_segment =
1656 							src;
1657 						lpfc_cmd->prot_data =
1658 							src->app_tag;
1659 					}
1660 					src->app_tag = cpu_to_be16(0xDEAD);
1661 					phba->lpfc_injerr_wapp_cnt--;
1662 					if (phba->lpfc_injerr_wapp_cnt == 0) {
1663 						phba->lpfc_injerr_nportid = 0;
1664 						phba->lpfc_injerr_lba =
1665 							LPFC_INJERR_LBA_OFF;
1666 						memset(&phba->lpfc_injerr_wwpn,
1667 						  0, sizeof(struct lpfc_name));
1668 					}
1669 					rc = BG_ERR_TGT | BG_ERR_CHECK;
1670 					break;
1671 				}
1672 				/* Drop thru */
1673 			case SCSI_PROT_WRITE_INSERT:
1674 				/*
1675 				 * For WRITE_INSERT, force the
1676 				 * error to be sent on the wire. It should be
1677 				 * detected by the Target.
1678 				 */
1679 				/* DEAD will be the apptag on the wire */
1680 				*apptag = 0xDEAD;
1681 				phba->lpfc_injerr_wapp_cnt--;
1682 				if (phba->lpfc_injerr_wapp_cnt == 0) {
1683 					phba->lpfc_injerr_nportid = 0;
1684 					phba->lpfc_injerr_lba =
1685 						LPFC_INJERR_LBA_OFF;
1686 					memset(&phba->lpfc_injerr_wwpn,
1687 						0, sizeof(struct lpfc_name));
1688 				}
1689 				rc = BG_ERR_TGT | BG_ERR_CHECK;
1690 
1691 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1692 					"0813 BLKGRD: Injecting apptag error: "
1693 					"write lba x%lx\n", (unsigned long)lba);
1694 				break;
1695 			case SCSI_PROT_WRITE_STRIP:
1696 				/*
1697 				 * For WRITE_STRIP and WRITE_PASS,
1698 				 * force the error on data
1699 				 * being copied from SLI-Host to SLI-Port.
1700 				 */
1701 				*apptag = 0xDEAD;
1702 				phba->lpfc_injerr_wapp_cnt--;
1703 				if (phba->lpfc_injerr_wapp_cnt == 0) {
1704 					phba->lpfc_injerr_nportid = 0;
1705 					phba->lpfc_injerr_lba =
1706 						LPFC_INJERR_LBA_OFF;
1707 					memset(&phba->lpfc_injerr_wwpn,
1708 						0, sizeof(struct lpfc_name));
1709 				}
1710 				rc = BG_ERR_INIT;
1711 
1712 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1713 					"0812 BLKGRD: Injecting apptag error: "
1714 					"write lba x%lx\n", (unsigned long)lba);
1715 				break;
1716 			}
1717 		}
1718 		if (phba->lpfc_injerr_rapp_cnt) {
1719 			switch (op) {
1720 			case SCSI_PROT_READ_INSERT:
1721 			case SCSI_PROT_READ_STRIP:
1722 			case SCSI_PROT_READ_PASS:
1723 				/*
1724 				 * For READ_STRIP and READ_PASS, force the
1725 				 * error on data being read off the wire. It
1726 				 * should force an IO error to the driver.
1727 				 */
1728 				*apptag = 0xDEAD;
1729 				phba->lpfc_injerr_rapp_cnt--;
1730 				if (phba->lpfc_injerr_rapp_cnt == 0) {
1731 					phba->lpfc_injerr_nportid = 0;
1732 					phba->lpfc_injerr_lba =
1733 						LPFC_INJERR_LBA_OFF;
1734 					memset(&phba->lpfc_injerr_wwpn,
1735 						0, sizeof(struct lpfc_name));
1736 				}
1737 				rc = BG_ERR_INIT;
1738 
1739 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1740 					"0814 BLKGRD: Injecting apptag error: "
1741 					"read lba x%lx\n", (unsigned long)lba);
1742 				break;
1743 			}
1744 		}
1745 	}
1746 
1747 
1748 	/* Should we change the Guard Tag */
1749 	if (new_guard) {
1750 		if (phba->lpfc_injerr_wgrd_cnt) {
1751 			switch (op) {
1752 			case SCSI_PROT_WRITE_PASS:
1753 				rc = BG_ERR_CHECK;
1754 				/* Drop thru */
1755 
1756 			case SCSI_PROT_WRITE_INSERT:
1757 				/*
1758 				 * For WRITE_INSERT, force the
1759 				 * error to be sent on the wire. It should be
1760 				 * detected by the Target.
1761 				 */
1762 				phba->lpfc_injerr_wgrd_cnt--;
1763 				if (phba->lpfc_injerr_wgrd_cnt == 0) {
1764 					phba->lpfc_injerr_nportid = 0;
1765 					phba->lpfc_injerr_lba =
1766 						LPFC_INJERR_LBA_OFF;
1767 					memset(&phba->lpfc_injerr_wwpn,
1768 						0, sizeof(struct lpfc_name));
1769 				}
1770 
1771 				rc |= BG_ERR_TGT | BG_ERR_SWAP;
1772 				/* Signals the caller to swap CRC->CSUM */
1773 
1774 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1775 					"0817 BLKGRD: Injecting guard error: "
1776 					"write lba x%lx\n", (unsigned long)lba);
1777 				break;
1778 			case SCSI_PROT_WRITE_STRIP:
1779 				/*
1780 				 * For WRITE_STRIP and WRITE_PASS,
1781 				 * force the error on data
1782 				 * being copied from SLI-Host to SLI-Port.
1783 				 */
1784 				phba->lpfc_injerr_wgrd_cnt--;
1785 				if (phba->lpfc_injerr_wgrd_cnt == 0) {
1786 					phba->lpfc_injerr_nportid = 0;
1787 					phba->lpfc_injerr_lba =
1788 						LPFC_INJERR_LBA_OFF;
1789 					memset(&phba->lpfc_injerr_wwpn,
1790 						0, sizeof(struct lpfc_name));
1791 				}
1792 
1793 				rc = BG_ERR_INIT | BG_ERR_SWAP;
1794 				/* Signals the caller to swap CRC->CSUM */
1795 
1796 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1797 					"0816 BLKGRD: Injecting guard error: "
1798 					"write lba x%lx\n", (unsigned long)lba);
1799 				break;
1800 			}
1801 		}
1802 		if (phba->lpfc_injerr_rgrd_cnt) {
1803 			switch (op) {
1804 			case SCSI_PROT_READ_INSERT:
1805 			case SCSI_PROT_READ_STRIP:
1806 			case SCSI_PROT_READ_PASS:
1807 				/*
1808 				 * For READ_STRIP and READ_PASS, force the
1809 				 * error on data being read off the wire. It
1810 				 * should force an IO error to the driver.
1811 				 */
1812 				phba->lpfc_injerr_rgrd_cnt--;
1813 				if (phba->lpfc_injerr_rgrd_cnt == 0) {
1814 					phba->lpfc_injerr_nportid = 0;
1815 					phba->lpfc_injerr_lba =
1816 						LPFC_INJERR_LBA_OFF;
1817 					memset(&phba->lpfc_injerr_wwpn,
1818 						0, sizeof(struct lpfc_name));
1819 				}
1820 
1821 				rc = BG_ERR_INIT | BG_ERR_SWAP;
1822 				/* Signals the caller to swap CRC->CSUM */
1823 
1824 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1825 					"0818 BLKGRD: Injecting guard error: "
1826 					"read lba x%lx\n", (unsigned long)lba);
1827 			}
1828 		}
1829 	}
1830 
1831 	return rc;
1832 }
1833 #endif
1834 
1835 /**
1836  * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with
1837  * the specified SCSI command.
1838  * @phba: The Hba for which this call is being executed.
1839  * @sc: The SCSI command to examine
1840  * @txopt: (out) BlockGuard operation for transmitted data
1841  * @rxopt: (out) BlockGuard operation for received data
1842  *
1843  * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1844  *
1845  **/
1846 static int
1847 lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1848 		uint8_t *txop, uint8_t *rxop)
1849 {
1850 	uint8_t guard_type = scsi_host_get_guard(sc->device->host);
1851 	uint8_t ret = 0;
1852 
1853 	if (guard_type == SHOST_DIX_GUARD_IP) {
1854 		switch (scsi_get_prot_op(sc)) {
1855 		case SCSI_PROT_READ_INSERT:
1856 		case SCSI_PROT_WRITE_STRIP:
1857 			*rxop = BG_OP_IN_NODIF_OUT_CSUM;
1858 			*txop = BG_OP_IN_CSUM_OUT_NODIF;
1859 			break;
1860 
1861 		case SCSI_PROT_READ_STRIP:
1862 		case SCSI_PROT_WRITE_INSERT:
1863 			*rxop = BG_OP_IN_CRC_OUT_NODIF;
1864 			*txop = BG_OP_IN_NODIF_OUT_CRC;
1865 			break;
1866 
1867 		case SCSI_PROT_READ_PASS:
1868 		case SCSI_PROT_WRITE_PASS:
1869 			*rxop = BG_OP_IN_CRC_OUT_CSUM;
1870 			*txop = BG_OP_IN_CSUM_OUT_CRC;
1871 			break;
1872 
1873 		case SCSI_PROT_NORMAL:
1874 		default:
1875 			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1876 				"9063 BLKGRD: Bad op/guard:%d/IP combination\n",
1877 					scsi_get_prot_op(sc));
1878 			ret = 1;
1879 			break;
1880 
1881 		}
1882 	} else {
1883 		switch (scsi_get_prot_op(sc)) {
1884 		case SCSI_PROT_READ_STRIP:
1885 		case SCSI_PROT_WRITE_INSERT:
1886 			*rxop = BG_OP_IN_CRC_OUT_NODIF;
1887 			*txop = BG_OP_IN_NODIF_OUT_CRC;
1888 			break;
1889 
1890 		case SCSI_PROT_READ_PASS:
1891 		case SCSI_PROT_WRITE_PASS:
1892 			*rxop = BG_OP_IN_CRC_OUT_CRC;
1893 			*txop = BG_OP_IN_CRC_OUT_CRC;
1894 			break;
1895 
1896 		case SCSI_PROT_READ_INSERT:
1897 		case SCSI_PROT_WRITE_STRIP:
1898 			*rxop = BG_OP_IN_NODIF_OUT_CRC;
1899 			*txop = BG_OP_IN_CRC_OUT_NODIF;
1900 			break;
1901 
1902 		case SCSI_PROT_NORMAL:
1903 		default:
1904 			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1905 				"9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
1906 					scsi_get_prot_op(sc));
1907 			ret = 1;
1908 			break;
1909 		}
1910 	}
1911 
1912 	return ret;
1913 }
1914 
1915 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1916 /**
1917  * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with
1918  * the specified SCSI command in order to force a guard tag error.
1919  * @phba: The Hba for which this call is being executed.
1920  * @sc: The SCSI command to examine
1921  * @txopt: (out) BlockGuard operation for transmitted data
1922  * @rxopt: (out) BlockGuard operation for received data
1923  *
1924  * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1925  *
1926  **/
1927 static int
1928 lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1929 		uint8_t *txop, uint8_t *rxop)
1930 {
1931 	uint8_t guard_type = scsi_host_get_guard(sc->device->host);
1932 	uint8_t ret = 0;
1933 
1934 	if (guard_type == SHOST_DIX_GUARD_IP) {
1935 		switch (scsi_get_prot_op(sc)) {
1936 		case SCSI_PROT_READ_INSERT:
1937 		case SCSI_PROT_WRITE_STRIP:
1938 			*rxop = BG_OP_IN_NODIF_OUT_CRC;
1939 			*txop = BG_OP_IN_CRC_OUT_NODIF;
1940 			break;
1941 
1942 		case SCSI_PROT_READ_STRIP:
1943 		case SCSI_PROT_WRITE_INSERT:
1944 			*rxop = BG_OP_IN_CSUM_OUT_NODIF;
1945 			*txop = BG_OP_IN_NODIF_OUT_CSUM;
1946 			break;
1947 
1948 		case SCSI_PROT_READ_PASS:
1949 		case SCSI_PROT_WRITE_PASS:
1950 			*rxop = BG_OP_IN_CSUM_OUT_CRC;
1951 			*txop = BG_OP_IN_CRC_OUT_CSUM;
1952 			break;
1953 
1954 		case SCSI_PROT_NORMAL:
1955 		default:
1956 			break;
1957 
1958 		}
1959 	} else {
1960 		switch (scsi_get_prot_op(sc)) {
1961 		case SCSI_PROT_READ_STRIP:
1962 		case SCSI_PROT_WRITE_INSERT:
1963 			*rxop = BG_OP_IN_CSUM_OUT_NODIF;
1964 			*txop = BG_OP_IN_NODIF_OUT_CSUM;
1965 			break;
1966 
1967 		case SCSI_PROT_READ_PASS:
1968 		case SCSI_PROT_WRITE_PASS:
1969 			*rxop = BG_OP_IN_CSUM_OUT_CSUM;
1970 			*txop = BG_OP_IN_CSUM_OUT_CSUM;
1971 			break;
1972 
1973 		case SCSI_PROT_READ_INSERT:
1974 		case SCSI_PROT_WRITE_STRIP:
1975 			*rxop = BG_OP_IN_NODIF_OUT_CSUM;
1976 			*txop = BG_OP_IN_CSUM_OUT_NODIF;
1977 			break;
1978 
1979 		case SCSI_PROT_NORMAL:
1980 		default:
1981 			break;
1982 		}
1983 	}
1984 
1985 	return ret;
1986 }
1987 #endif
1988 
1989 /**
1990  * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data
1991  * @phba: The Hba for which this call is being executed.
1992  * @sc: pointer to scsi command we're working on
1993  * @bpl: pointer to buffer list for protection groups
1994  * @datacnt: number of segments of data that have been dma mapped
1995  *
1996  * This function sets up BPL buffer list for protection groups of
1997  * type LPFC_PG_TYPE_NO_DIF
1998  *
1999  * This is usually used when the HBA is instructed to generate
2000  * DIFs and insert them into data stream (or strip DIF from
2001  * incoming data stream)
2002  *
2003  * The buffer list consists of just one protection group described
2004  * below:
2005  *                                +-------------------------+
2006  *   start of prot group  -->     |          PDE_5          |
2007  *                                +-------------------------+
2008  *                                |          PDE_6          |
2009  *                                +-------------------------+
2010  *                                |         Data BDE        |
2011  *                                +-------------------------+
2012  *                                |more Data BDE's ... (opt)|
2013  *                                +-------------------------+
2014  *
2015  *
2016  * Note: Data s/g buffers have been dma mapped
2017  *
2018  * Returns the number of BDEs added to the BPL.
2019  **/
2020 static int
2021 lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2022 		struct ulp_bde64 *bpl, int datasegcnt)
2023 {
2024 	struct scatterlist *sgde = NULL; /* s/g data entry */
2025 	struct lpfc_pde5 *pde5 = NULL;
2026 	struct lpfc_pde6 *pde6 = NULL;
2027 	dma_addr_t physaddr;
2028 	int i = 0, num_bde = 0, status;
2029 	int datadir = sc->sc_data_direction;
2030 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2031 	uint32_t rc;
2032 #endif
2033 	uint32_t checking = 1;
2034 	uint32_t reftag;
2035 	unsigned blksize;
2036 	uint8_t txop, rxop;
2037 
2038 	status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2039 	if (status)
2040 		goto out;
2041 
2042 	/* extract some info from the scsi command for pde*/
2043 	blksize = lpfc_cmd_blksize(sc);
2044 	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2045 
2046 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2047 	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2048 	if (rc) {
2049 		if (rc & BG_ERR_SWAP)
2050 			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2051 		if (rc & BG_ERR_CHECK)
2052 			checking = 0;
2053 	}
2054 #endif
2055 
2056 	/* setup PDE5 with what we have */
2057 	pde5 = (struct lpfc_pde5 *) bpl;
2058 	memset(pde5, 0, sizeof(struct lpfc_pde5));
2059 	bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
2060 
2061 	/* Endianness conversion if necessary for PDE5 */
2062 	pde5->word0 = cpu_to_le32(pde5->word0);
2063 	pde5->reftag = cpu_to_le32(reftag);
2064 
2065 	/* advance bpl and increment bde count */
2066 	num_bde++;
2067 	bpl++;
2068 	pde6 = (struct lpfc_pde6 *) bpl;
2069 
2070 	/* setup PDE6 with the rest of the info */
2071 	memset(pde6, 0, sizeof(struct lpfc_pde6));
2072 	bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
2073 	bf_set(pde6_optx, pde6, txop);
2074 	bf_set(pde6_oprx, pde6, rxop);
2075 
2076 	/*
2077 	 * We only need to check the data on READs, for WRITEs
2078 	 * protection data is automatically generated, not checked.
2079 	 */
2080 	if (datadir == DMA_FROM_DEVICE) {
2081 		if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK))
2082 			bf_set(pde6_ce, pde6, checking);
2083 		else
2084 			bf_set(pde6_ce, pde6, 0);
2085 
2086 		if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK))
2087 			bf_set(pde6_re, pde6, checking);
2088 		else
2089 			bf_set(pde6_re, pde6, 0);
2090 	}
2091 	bf_set(pde6_ai, pde6, 1);
2092 	bf_set(pde6_ae, pde6, 0);
2093 	bf_set(pde6_apptagval, pde6, 0);
2094 
2095 	/* Endianness conversion if necessary for PDE6 */
2096 	pde6->word0 = cpu_to_le32(pde6->word0);
2097 	pde6->word1 = cpu_to_le32(pde6->word1);
2098 	pde6->word2 = cpu_to_le32(pde6->word2);
2099 
2100 	/* advance bpl and increment bde count */
2101 	num_bde++;
2102 	bpl++;
2103 
2104 	/* assumption: caller has already run dma_map_sg on command data */
2105 	scsi_for_each_sg(sc, sgde, datasegcnt, i) {
2106 		physaddr = sg_dma_address(sgde);
2107 		bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
2108 		bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2109 		bpl->tus.f.bdeSize = sg_dma_len(sgde);
2110 		if (datadir == DMA_TO_DEVICE)
2111 			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2112 		else
2113 			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2114 		bpl->tus.w = le32_to_cpu(bpl->tus.w);
2115 		bpl++;
2116 		num_bde++;
2117 	}
2118 
2119 out:
2120 	return num_bde;
2121 }
2122 
2123 /**
2124  * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data
2125  * @phba: The Hba for which this call is being executed.
2126  * @sc: pointer to scsi command we're working on
2127  * @bpl: pointer to buffer list for protection groups
2128  * @datacnt: number of segments of data that have been dma mapped
2129  * @protcnt: number of segment of protection data that have been dma mapped
2130  *
2131  * This function sets up BPL buffer list for protection groups of
2132  * type LPFC_PG_TYPE_DIF
2133  *
2134  * This is usually used when DIFs are in their own buffers,
2135  * separate from the data. The HBA can then by instructed
2136  * to place the DIFs in the outgoing stream.  For read operations,
2137  * The HBA could extract the DIFs and place it in DIF buffers.
2138  *
2139  * The buffer list for this type consists of one or more of the
2140  * protection groups described below:
2141  *                                    +-------------------------+
2142  *   start of first prot group  -->   |          PDE_5          |
2143  *                                    +-------------------------+
2144  *                                    |          PDE_6          |
2145  *                                    +-------------------------+
2146  *                                    |      PDE_7 (Prot BDE)   |
2147  *                                    +-------------------------+
2148  *                                    |        Data BDE         |
2149  *                                    +-------------------------+
2150  *                                    |more Data BDE's ... (opt)|
2151  *                                    +-------------------------+
2152  *   start of new  prot group  -->    |          PDE_5          |
2153  *                                    +-------------------------+
2154  *                                    |          ...            |
2155  *                                    +-------------------------+
2156  *
2157  * Note: It is assumed that both data and protection s/g buffers have been
2158  *       mapped for DMA
2159  *
2160  * Returns the number of BDEs added to the BPL.
2161  **/
2162 static int
2163 lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2164 		struct ulp_bde64 *bpl, int datacnt, int protcnt)
2165 {
2166 	struct scatterlist *sgde = NULL; /* s/g data entry */
2167 	struct scatterlist *sgpe = NULL; /* s/g prot entry */
2168 	struct lpfc_pde5 *pde5 = NULL;
2169 	struct lpfc_pde6 *pde6 = NULL;
2170 	struct lpfc_pde7 *pde7 = NULL;
2171 	dma_addr_t dataphysaddr, protphysaddr;
2172 	unsigned short curr_data = 0, curr_prot = 0;
2173 	unsigned int split_offset;
2174 	unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2175 	unsigned int protgrp_blks, protgrp_bytes;
2176 	unsigned int remainder, subtotal;
2177 	int status;
2178 	int datadir = sc->sc_data_direction;
2179 	unsigned char pgdone = 0, alldone = 0;
2180 	unsigned blksize;
2181 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2182 	uint32_t rc;
2183 #endif
2184 	uint32_t checking = 1;
2185 	uint32_t reftag;
2186 	uint8_t txop, rxop;
2187 	int num_bde = 0;
2188 
2189 	sgpe = scsi_prot_sglist(sc);
2190 	sgde = scsi_sglist(sc);
2191 
2192 	if (!sgpe || !sgde) {
2193 		lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2194 				"9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
2195 				sgpe, sgde);
2196 		return 0;
2197 	}
2198 
2199 	status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2200 	if (status)
2201 		goto out;
2202 
2203 	/* extract some info from the scsi command */
2204 	blksize = lpfc_cmd_blksize(sc);
2205 	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2206 
2207 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2208 	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2209 	if (rc) {
2210 		if (rc & BG_ERR_SWAP)
2211 			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2212 		if (rc & BG_ERR_CHECK)
2213 			checking = 0;
2214 	}
2215 #endif
2216 
2217 	split_offset = 0;
2218 	do {
2219 		/* Check to see if we ran out of space */
2220 		if (num_bde >= (phba->cfg_total_seg_cnt - 2))
2221 			return num_bde + 3;
2222 
2223 		/* setup PDE5 with what we have */
2224 		pde5 = (struct lpfc_pde5 *) bpl;
2225 		memset(pde5, 0, sizeof(struct lpfc_pde5));
2226 		bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
2227 
2228 		/* Endianness conversion if necessary for PDE5 */
2229 		pde5->word0 = cpu_to_le32(pde5->word0);
2230 		pde5->reftag = cpu_to_le32(reftag);
2231 
2232 		/* advance bpl and increment bde count */
2233 		num_bde++;
2234 		bpl++;
2235 		pde6 = (struct lpfc_pde6 *) bpl;
2236 
2237 		/* setup PDE6 with the rest of the info */
2238 		memset(pde6, 0, sizeof(struct lpfc_pde6));
2239 		bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
2240 		bf_set(pde6_optx, pde6, txop);
2241 		bf_set(pde6_oprx, pde6, rxop);
2242 
2243 		if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK))
2244 			bf_set(pde6_ce, pde6, checking);
2245 		else
2246 			bf_set(pde6_ce, pde6, 0);
2247 
2248 		if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK))
2249 			bf_set(pde6_re, pde6, checking);
2250 		else
2251 			bf_set(pde6_re, pde6, 0);
2252 
2253 		bf_set(pde6_ai, pde6, 1);
2254 		bf_set(pde6_ae, pde6, 0);
2255 		bf_set(pde6_apptagval, pde6, 0);
2256 
2257 		/* Endianness conversion if necessary for PDE6 */
2258 		pde6->word0 = cpu_to_le32(pde6->word0);
2259 		pde6->word1 = cpu_to_le32(pde6->word1);
2260 		pde6->word2 = cpu_to_le32(pde6->word2);
2261 
2262 		/* advance bpl and increment bde count */
2263 		num_bde++;
2264 		bpl++;
2265 
2266 		/* setup the first BDE that points to protection buffer */
2267 		protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2268 		protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2269 
2270 		/* must be integer multiple of the DIF block length */
2271 		BUG_ON(protgroup_len % 8);
2272 
2273 		pde7 = (struct lpfc_pde7 *) bpl;
2274 		memset(pde7, 0, sizeof(struct lpfc_pde7));
2275 		bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
2276 
2277 		pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
2278 		pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
2279 
2280 		protgrp_blks = protgroup_len / 8;
2281 		protgrp_bytes = protgrp_blks * blksize;
2282 
2283 		/* check if this pde is crossing the 4K boundary; if so split */
2284 		if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
2285 			protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
2286 			protgroup_offset += protgroup_remainder;
2287 			protgrp_blks = protgroup_remainder / 8;
2288 			protgrp_bytes = protgrp_blks * blksize;
2289 		} else {
2290 			protgroup_offset = 0;
2291 			curr_prot++;
2292 		}
2293 
2294 		num_bde++;
2295 
2296 		/* setup BDE's for data blocks associated with DIF data */
2297 		pgdone = 0;
2298 		subtotal = 0; /* total bytes processed for current prot grp */
2299 		while (!pgdone) {
2300 			/* Check to see if we ran out of space */
2301 			if (num_bde >= phba->cfg_total_seg_cnt)
2302 				return num_bde + 1;
2303 
2304 			if (!sgde) {
2305 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2306 					"9065 BLKGRD:%s Invalid data segment\n",
2307 						__func__);
2308 				return 0;
2309 			}
2310 			bpl++;
2311 			dataphysaddr = sg_dma_address(sgde) + split_offset;
2312 			bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
2313 			bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
2314 
2315 			remainder = sg_dma_len(sgde) - split_offset;
2316 
2317 			if ((subtotal + remainder) <= protgrp_bytes) {
2318 				/* we can use this whole buffer */
2319 				bpl->tus.f.bdeSize = remainder;
2320 				split_offset = 0;
2321 
2322 				if ((subtotal + remainder) == protgrp_bytes)
2323 					pgdone = 1;
2324 			} else {
2325 				/* must split this buffer with next prot grp */
2326 				bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
2327 				split_offset += bpl->tus.f.bdeSize;
2328 			}
2329 
2330 			subtotal += bpl->tus.f.bdeSize;
2331 
2332 			if (datadir == DMA_TO_DEVICE)
2333 				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2334 			else
2335 				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2336 			bpl->tus.w = le32_to_cpu(bpl->tus.w);
2337 
2338 			num_bde++;
2339 			curr_data++;
2340 
2341 			if (split_offset)
2342 				break;
2343 
2344 			/* Move to the next s/g segment if possible */
2345 			sgde = sg_next(sgde);
2346 
2347 		}
2348 
2349 		if (protgroup_offset) {
2350 			/* update the reference tag */
2351 			reftag += protgrp_blks;
2352 			bpl++;
2353 			continue;
2354 		}
2355 
2356 		/* are we done ? */
2357 		if (curr_prot == protcnt) {
2358 			alldone = 1;
2359 		} else if (curr_prot < protcnt) {
2360 			/* advance to next prot buffer */
2361 			sgpe = sg_next(sgpe);
2362 			bpl++;
2363 
2364 			/* update the reference tag */
2365 			reftag += protgrp_blks;
2366 		} else {
2367 			/* if we're here, we have a bug */
2368 			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2369 				"9054 BLKGRD: bug in %s\n", __func__);
2370 		}
2371 
2372 	} while (!alldone);
2373 out:
2374 
2375 	return num_bde;
2376 }
2377 
2378 /**
2379  * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data
2380  * @phba: The Hba for which this call is being executed.
2381  * @sc: pointer to scsi command we're working on
2382  * @sgl: pointer to buffer list for protection groups
2383  * @datacnt: number of segments of data that have been dma mapped
2384  *
2385  * This function sets up SGL buffer list for protection groups of
2386  * type LPFC_PG_TYPE_NO_DIF
2387  *
2388  * This is usually used when the HBA is instructed to generate
2389  * DIFs and insert them into data stream (or strip DIF from
2390  * incoming data stream)
2391  *
2392  * The buffer list consists of just one protection group described
2393  * below:
2394  *                                +-------------------------+
2395  *   start of prot group  -->     |         DI_SEED         |
2396  *                                +-------------------------+
2397  *                                |         Data SGE        |
2398  *                                +-------------------------+
2399  *                                |more Data SGE's ... (opt)|
2400  *                                +-------------------------+
2401  *
2402  *
2403  * Note: Data s/g buffers have been dma mapped
2404  *
2405  * Returns the number of SGEs added to the SGL.
2406  **/
2407 static int
2408 lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2409 		struct sli4_sge *sgl, int datasegcnt)
2410 {
2411 	struct scatterlist *sgde = NULL; /* s/g data entry */
2412 	struct sli4_sge_diseed *diseed = NULL;
2413 	dma_addr_t physaddr;
2414 	int i = 0, num_sge = 0, status;
2415 	uint32_t reftag;
2416 	unsigned blksize;
2417 	uint8_t txop, rxop;
2418 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2419 	uint32_t rc;
2420 #endif
2421 	uint32_t checking = 1;
2422 	uint32_t dma_len;
2423 	uint32_t dma_offset = 0;
2424 
2425 	status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2426 	if (status)
2427 		goto out;
2428 
2429 	/* extract some info from the scsi command for pde*/
2430 	blksize = lpfc_cmd_blksize(sc);
2431 	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2432 
2433 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2434 	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2435 	if (rc) {
2436 		if (rc & BG_ERR_SWAP)
2437 			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2438 		if (rc & BG_ERR_CHECK)
2439 			checking = 0;
2440 	}
2441 #endif
2442 
2443 	/* setup DISEED with what we have */
2444 	diseed = (struct sli4_sge_diseed *) sgl;
2445 	memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2446 	bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2447 
2448 	/* Endianness conversion if necessary */
2449 	diseed->ref_tag = cpu_to_le32(reftag);
2450 	diseed->ref_tag_tran = diseed->ref_tag;
2451 
2452 	/*
2453 	 * We only need to check the data on READs, for WRITEs
2454 	 * protection data is automatically generated, not checked.
2455 	 */
2456 	if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2457 		if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK))
2458 			bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2459 		else
2460 			bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2461 
2462 		if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK))
2463 			bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2464 		else
2465 			bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2466 	}
2467 
2468 	/* setup DISEED with the rest of the info */
2469 	bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2470 	bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2471 
2472 	bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2473 	bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2474 
2475 	/* Endianness conversion if necessary for DISEED */
2476 	diseed->word2 = cpu_to_le32(diseed->word2);
2477 	diseed->word3 = cpu_to_le32(diseed->word3);
2478 
2479 	/* advance bpl and increment sge count */
2480 	num_sge++;
2481 	sgl++;
2482 
2483 	/* assumption: caller has already run dma_map_sg on command data */
2484 	scsi_for_each_sg(sc, sgde, datasegcnt, i) {
2485 		physaddr = sg_dma_address(sgde);
2486 		dma_len = sg_dma_len(sgde);
2487 		sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
2488 		sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
2489 		if ((i + 1) == datasegcnt)
2490 			bf_set(lpfc_sli4_sge_last, sgl, 1);
2491 		else
2492 			bf_set(lpfc_sli4_sge_last, sgl, 0);
2493 		bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2494 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2495 
2496 		sgl->sge_len = cpu_to_le32(dma_len);
2497 		dma_offset += dma_len;
2498 
2499 		sgl++;
2500 		num_sge++;
2501 	}
2502 
2503 out:
2504 	return num_sge;
2505 }
2506 
2507 /**
2508  * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data
2509  * @phba: The Hba for which this call is being executed.
2510  * @sc: pointer to scsi command we're working on
2511  * @sgl: pointer to buffer list for protection groups
2512  * @datacnt: number of segments of data that have been dma mapped
2513  * @protcnt: number of segment of protection data that have been dma mapped
2514  *
2515  * This function sets up SGL buffer list for protection groups of
2516  * type LPFC_PG_TYPE_DIF
2517  *
2518  * This is usually used when DIFs are in their own buffers,
2519  * separate from the data. The HBA can then by instructed
2520  * to place the DIFs in the outgoing stream.  For read operations,
2521  * The HBA could extract the DIFs and place it in DIF buffers.
2522  *
2523  * The buffer list for this type consists of one or more of the
2524  * protection groups described below:
2525  *                                    +-------------------------+
2526  *   start of first prot group  -->   |         DISEED          |
2527  *                                    +-------------------------+
2528  *                                    |      DIF (Prot SGE)     |
2529  *                                    +-------------------------+
2530  *                                    |        Data SGE         |
2531  *                                    +-------------------------+
2532  *                                    |more Data SGE's ... (opt)|
2533  *                                    +-------------------------+
2534  *   start of new  prot group  -->    |         DISEED          |
2535  *                                    +-------------------------+
2536  *                                    |          ...            |
2537  *                                    +-------------------------+
2538  *
2539  * Note: It is assumed that both data and protection s/g buffers have been
2540  *       mapped for DMA
2541  *
2542  * Returns the number of SGEs added to the SGL.
2543  **/
2544 static int
2545 lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2546 		struct sli4_sge *sgl, int datacnt, int protcnt)
2547 {
2548 	struct scatterlist *sgde = NULL; /* s/g data entry */
2549 	struct scatterlist *sgpe = NULL; /* s/g prot entry */
2550 	struct sli4_sge_diseed *diseed = NULL;
2551 	dma_addr_t dataphysaddr, protphysaddr;
2552 	unsigned short curr_data = 0, curr_prot = 0;
2553 	unsigned int split_offset;
2554 	unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2555 	unsigned int protgrp_blks, protgrp_bytes;
2556 	unsigned int remainder, subtotal;
2557 	int status;
2558 	unsigned char pgdone = 0, alldone = 0;
2559 	unsigned blksize;
2560 	uint32_t reftag;
2561 	uint8_t txop, rxop;
2562 	uint32_t dma_len;
2563 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2564 	uint32_t rc;
2565 #endif
2566 	uint32_t checking = 1;
2567 	uint32_t dma_offset = 0;
2568 	int num_sge = 0;
2569 
2570 	sgpe = scsi_prot_sglist(sc);
2571 	sgde = scsi_sglist(sc);
2572 
2573 	if (!sgpe || !sgde) {
2574 		lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2575 				"9082 Invalid s/g entry: data=0x%p prot=0x%p\n",
2576 				sgpe, sgde);
2577 		return 0;
2578 	}
2579 
2580 	status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2581 	if (status)
2582 		goto out;
2583 
2584 	/* extract some info from the scsi command */
2585 	blksize = lpfc_cmd_blksize(sc);
2586 	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2587 
2588 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2589 	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2590 	if (rc) {
2591 		if (rc & BG_ERR_SWAP)
2592 			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2593 		if (rc & BG_ERR_CHECK)
2594 			checking = 0;
2595 	}
2596 #endif
2597 
2598 	split_offset = 0;
2599 	do {
2600 		/* Check to see if we ran out of space */
2601 		if (num_sge >= (phba->cfg_total_seg_cnt - 2))
2602 			return num_sge + 3;
2603 
2604 		/* setup DISEED with what we have */
2605 		diseed = (struct sli4_sge_diseed *) sgl;
2606 		memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2607 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2608 
2609 		/* Endianness conversion if necessary */
2610 		diseed->ref_tag = cpu_to_le32(reftag);
2611 		diseed->ref_tag_tran = diseed->ref_tag;
2612 
2613 		if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK)) {
2614 			bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2615 
2616 		} else {
2617 			bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2618 			/*
2619 			 * When in this mode, the hardware will replace
2620 			 * the guard tag from the host with a
2621 			 * newly generated good CRC for the wire.
2622 			 * Switch to raw mode here to avoid this
2623 			 * behavior. What the host sends gets put on the wire.
2624 			 */
2625 			if (txop == BG_OP_IN_CRC_OUT_CRC) {
2626 				txop = BG_OP_RAW_MODE;
2627 				rxop = BG_OP_RAW_MODE;
2628 			}
2629 		}
2630 
2631 
2632 		if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK))
2633 			bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2634 		else
2635 			bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2636 
2637 		/* setup DISEED with the rest of the info */
2638 		bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2639 		bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2640 
2641 		bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2642 		bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2643 
2644 		/* Endianness conversion if necessary for DISEED */
2645 		diseed->word2 = cpu_to_le32(diseed->word2);
2646 		diseed->word3 = cpu_to_le32(diseed->word3);
2647 
2648 		/* advance sgl and increment bde count */
2649 		num_sge++;
2650 		sgl++;
2651 
2652 		/* setup the first BDE that points to protection buffer */
2653 		protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2654 		protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2655 
2656 		/* must be integer multiple of the DIF block length */
2657 		BUG_ON(protgroup_len % 8);
2658 
2659 		/* Now setup DIF SGE */
2660 		sgl->word2 = 0;
2661 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
2662 		sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
2663 		sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
2664 		sgl->word2 = cpu_to_le32(sgl->word2);
2665 
2666 		protgrp_blks = protgroup_len / 8;
2667 		protgrp_bytes = protgrp_blks * blksize;
2668 
2669 		/* check if DIF SGE is crossing the 4K boundary; if so split */
2670 		if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
2671 			protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
2672 			protgroup_offset += protgroup_remainder;
2673 			protgrp_blks = protgroup_remainder / 8;
2674 			protgrp_bytes = protgrp_blks * blksize;
2675 		} else {
2676 			protgroup_offset = 0;
2677 			curr_prot++;
2678 		}
2679 
2680 		num_sge++;
2681 
2682 		/* setup SGE's for data blocks associated with DIF data */
2683 		pgdone = 0;
2684 		subtotal = 0; /* total bytes processed for current prot grp */
2685 		while (!pgdone) {
2686 			/* Check to see if we ran out of space */
2687 			if (num_sge >= phba->cfg_total_seg_cnt)
2688 				return num_sge + 1;
2689 
2690 			if (!sgde) {
2691 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2692 					"9086 BLKGRD:%s Invalid data segment\n",
2693 						__func__);
2694 				return 0;
2695 			}
2696 			sgl++;
2697 			dataphysaddr = sg_dma_address(sgde) + split_offset;
2698 
2699 			remainder = sg_dma_len(sgde) - split_offset;
2700 
2701 			if ((subtotal + remainder) <= protgrp_bytes) {
2702 				/* we can use this whole buffer */
2703 				dma_len = remainder;
2704 				split_offset = 0;
2705 
2706 				if ((subtotal + remainder) == protgrp_bytes)
2707 					pgdone = 1;
2708 			} else {
2709 				/* must split this buffer with next prot grp */
2710 				dma_len = protgrp_bytes - subtotal;
2711 				split_offset += dma_len;
2712 			}
2713 
2714 			subtotal += dma_len;
2715 
2716 			sgl->addr_lo = cpu_to_le32(putPaddrLow(dataphysaddr));
2717 			sgl->addr_hi = cpu_to_le32(putPaddrHigh(dataphysaddr));
2718 			bf_set(lpfc_sli4_sge_last, sgl, 0);
2719 			bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2720 			bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2721 
2722 			sgl->sge_len = cpu_to_le32(dma_len);
2723 			dma_offset += dma_len;
2724 
2725 			num_sge++;
2726 			curr_data++;
2727 
2728 			if (split_offset)
2729 				break;
2730 
2731 			/* Move to the next s/g segment if possible */
2732 			sgde = sg_next(sgde);
2733 		}
2734 
2735 		if (protgroup_offset) {
2736 			/* update the reference tag */
2737 			reftag += protgrp_blks;
2738 			sgl++;
2739 			continue;
2740 		}
2741 
2742 		/* are we done ? */
2743 		if (curr_prot == protcnt) {
2744 			bf_set(lpfc_sli4_sge_last, sgl, 1);
2745 			alldone = 1;
2746 		} else if (curr_prot < protcnt) {
2747 			/* advance to next prot buffer */
2748 			sgpe = sg_next(sgpe);
2749 			sgl++;
2750 
2751 			/* update the reference tag */
2752 			reftag += protgrp_blks;
2753 		} else {
2754 			/* if we're here, we have a bug */
2755 			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2756 				"9085 BLKGRD: bug in %s\n", __func__);
2757 		}
2758 
2759 	} while (!alldone);
2760 
2761 out:
2762 
2763 	return num_sge;
2764 }
2765 
2766 /**
2767  * lpfc_prot_group_type - Get prtotection group type of SCSI command
2768  * @phba: The Hba for which this call is being executed.
2769  * @sc: pointer to scsi command we're working on
2770  *
2771  * Given a SCSI command that supports DIF, determine composition of protection
2772  * groups involved in setting up buffer lists
2773  *
2774  * Returns: Protection group type (with or without DIF)
2775  *
2776  **/
2777 static int
2778 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
2779 {
2780 	int ret = LPFC_PG_TYPE_INVALID;
2781 	unsigned char op = scsi_get_prot_op(sc);
2782 
2783 	switch (op) {
2784 	case SCSI_PROT_READ_STRIP:
2785 	case SCSI_PROT_WRITE_INSERT:
2786 		ret = LPFC_PG_TYPE_NO_DIF;
2787 		break;
2788 	case SCSI_PROT_READ_INSERT:
2789 	case SCSI_PROT_WRITE_STRIP:
2790 	case SCSI_PROT_READ_PASS:
2791 	case SCSI_PROT_WRITE_PASS:
2792 		ret = LPFC_PG_TYPE_DIF_BUF;
2793 		break;
2794 	default:
2795 		lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2796 				"9021 Unsupported protection op:%d\n", op);
2797 		break;
2798 	}
2799 
2800 	return ret;
2801 }
2802 
2803 /**
2804  * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
2805  * @phba: The Hba for which this call is being executed.
2806  * @lpfc_cmd: The scsi buffer which is going to be adjusted.
2807  *
2808  * Adjust the data length to account for how much data
2809  * is actually on the wire.
2810  *
2811  * returns the adjusted data length
2812  **/
2813 static int
2814 lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
2815 		       struct lpfc_scsi_buf *lpfc_cmd)
2816 {
2817 	struct scsi_cmnd *sc = lpfc_cmd->pCmd;
2818 	int fcpdl;
2819 
2820 	fcpdl = scsi_bufflen(sc);
2821 
2822 	/* Check if there is protection data on the wire */
2823 	if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2824 		/* Read */
2825 		if (scsi_get_prot_op(sc) ==  SCSI_PROT_READ_INSERT)
2826 			return fcpdl;
2827 
2828 	} else {
2829 		/* Write */
2830 		if (scsi_get_prot_op(sc) ==  SCSI_PROT_WRITE_STRIP)
2831 			return fcpdl;
2832 	}
2833 
2834 	/*
2835 	 * If we are in DIF Type 1 mode every data block has a 8 byte
2836 	 * DIF (trailer) attached to it. Must ajust FCP data length.
2837 	 */
2838 	if (scsi_prot_flagged(sc, SCSI_PROT_TRANSFER_PI))
2839 		fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8;
2840 
2841 	return fcpdl;
2842 }
2843 
2844 /**
2845  * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
2846  * @phba: The Hba for which this call is being executed.
2847  * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
2848  *
2849  * This is the protection/DIF aware version of
2850  * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
2851  * two functions eventually, but for now, it's here
2852  **/
2853 static int
2854 lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2855 		struct lpfc_scsi_buf *lpfc_cmd)
2856 {
2857 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2858 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2859 	struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
2860 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2861 	uint32_t num_bde = 0;
2862 	int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
2863 	int prot_group_type = 0;
2864 	int fcpdl;
2865 
2866 	/*
2867 	 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
2868 	 *  fcp_rsp regions to the first data bde entry
2869 	 */
2870 	bpl += 2;
2871 	if (scsi_sg_count(scsi_cmnd)) {
2872 		/*
2873 		 * The driver stores the segment count returned from pci_map_sg
2874 		 * because this a count of dma-mappings used to map the use_sg
2875 		 * pages.  They are not guaranteed to be the same for those
2876 		 * architectures that implement an IOMMU.
2877 		 */
2878 		datasegcnt = dma_map_sg(&phba->pcidev->dev,
2879 					scsi_sglist(scsi_cmnd),
2880 					scsi_sg_count(scsi_cmnd), datadir);
2881 		if (unlikely(!datasegcnt))
2882 			return 1;
2883 
2884 		lpfc_cmd->seg_cnt = datasegcnt;
2885 
2886 		/* First check if data segment count from SCSI Layer is good */
2887 		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
2888 			goto err;
2889 
2890 		prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
2891 
2892 		switch (prot_group_type) {
2893 		case LPFC_PG_TYPE_NO_DIF:
2894 
2895 			/* Here we need to add a PDE5 and PDE6 to the count */
2896 			if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt)
2897 				goto err;
2898 
2899 			num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
2900 					datasegcnt);
2901 			/* we should have 2 or more entries in buffer list */
2902 			if (num_bde < 2)
2903 				goto err;
2904 			break;
2905 
2906 		case LPFC_PG_TYPE_DIF_BUF:
2907 			/*
2908 			 * This type indicates that protection buffers are
2909 			 * passed to the driver, so that needs to be prepared
2910 			 * for DMA
2911 			 */
2912 			protsegcnt = dma_map_sg(&phba->pcidev->dev,
2913 					scsi_prot_sglist(scsi_cmnd),
2914 					scsi_prot_sg_count(scsi_cmnd), datadir);
2915 			if (unlikely(!protsegcnt)) {
2916 				scsi_dma_unmap(scsi_cmnd);
2917 				return 1;
2918 			}
2919 
2920 			lpfc_cmd->prot_seg_cnt = protsegcnt;
2921 
2922 			/*
2923 			 * There is a minimun of 4 BPLs used for every
2924 			 * protection data segment.
2925 			 */
2926 			if ((lpfc_cmd->prot_seg_cnt * 4) >
2927 			    (phba->cfg_total_seg_cnt - 2))
2928 				goto err;
2929 
2930 			num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
2931 					datasegcnt, protsegcnt);
2932 			/* we should have 3 or more entries in buffer list */
2933 			if ((num_bde < 3) ||
2934 			    (num_bde > phba->cfg_total_seg_cnt))
2935 				goto err;
2936 			break;
2937 
2938 		case LPFC_PG_TYPE_INVALID:
2939 		default:
2940 			scsi_dma_unmap(scsi_cmnd);
2941 			lpfc_cmd->seg_cnt = 0;
2942 
2943 			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2944 					"9022 Unexpected protection group %i\n",
2945 					prot_group_type);
2946 			return 1;
2947 		}
2948 	}
2949 
2950 	/*
2951 	 * Finish initializing those IOCB fields that are dependent on the
2952 	 * scsi_cmnd request_buffer.  Note that the bdeSize is explicitly
2953 	 * reinitialized since all iocb memory resources are used many times
2954 	 * for transmit, receive, and continuation bpl's.
2955 	 */
2956 	iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
2957 	iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
2958 	iocb_cmd->ulpBdeCount = 1;
2959 	iocb_cmd->ulpLe = 1;
2960 
2961 	fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
2962 	fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
2963 
2964 	/*
2965 	 * Due to difference in data length between DIF/non-DIF paths,
2966 	 * we need to set word 4 of IOCB here
2967 	 */
2968 	iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
2969 
2970 	return 0;
2971 err:
2972 	if (lpfc_cmd->seg_cnt)
2973 		scsi_dma_unmap(scsi_cmnd);
2974 	if (lpfc_cmd->prot_seg_cnt)
2975 		dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
2976 			     scsi_prot_sg_count(scsi_cmnd),
2977 			     scsi_cmnd->sc_data_direction);
2978 
2979 	lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2980 			"9023 Cannot setup S/G List for HBA"
2981 			"IO segs %d/%d BPL %d SCSI %d: %d %d\n",
2982 			lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
2983 			phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
2984 			prot_group_type, num_bde);
2985 
2986 	lpfc_cmd->seg_cnt = 0;
2987 	lpfc_cmd->prot_seg_cnt = 0;
2988 	return 1;
2989 }
2990 
2991 /*
2992  * This function calcuates the T10 DIF guard tag
2993  * on the specified data using a CRC algorithmn
2994  * using crc_t10dif.
2995  */
2996 uint16_t
2997 lpfc_bg_crc(uint8_t *data, int count)
2998 {
2999 	uint16_t crc = 0;
3000 	uint16_t x;
3001 
3002 	crc = crc_t10dif(data, count);
3003 	x = cpu_to_be16(crc);
3004 	return x;
3005 }
3006 
3007 /*
3008  * This function calcuates the T10 DIF guard tag
3009  * on the specified data using a CSUM algorithmn
3010  * using ip_compute_csum.
3011  */
3012 uint16_t
3013 lpfc_bg_csum(uint8_t *data, int count)
3014 {
3015 	uint16_t ret;
3016 
3017 	ret = ip_compute_csum(data, count);
3018 	return ret;
3019 }
3020 
3021 /*
3022  * This function examines the protection data to try to determine
3023  * what type of T10-DIF error occurred.
3024  */
3025 void
3026 lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3027 {
3028 	struct scatterlist *sgpe; /* s/g prot entry */
3029 	struct scatterlist *sgde; /* s/g data entry */
3030 	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
3031 	struct scsi_dif_tuple *src = NULL;
3032 	uint8_t *data_src = NULL;
3033 	uint16_t guard_tag, guard_type;
3034 	uint16_t start_app_tag, app_tag;
3035 	uint32_t start_ref_tag, ref_tag;
3036 	int prot, protsegcnt;
3037 	int err_type, len, data_len;
3038 	int chk_ref, chk_app, chk_guard;
3039 	uint16_t sum;
3040 	unsigned blksize;
3041 
3042 	err_type = BGS_GUARD_ERR_MASK;
3043 	sum = 0;
3044 	guard_tag = 0;
3045 
3046 	/* First check to see if there is protection data to examine */
3047 	prot = scsi_get_prot_op(cmd);
3048 	if ((prot == SCSI_PROT_READ_STRIP) ||
3049 	    (prot == SCSI_PROT_WRITE_INSERT) ||
3050 	    (prot == SCSI_PROT_NORMAL))
3051 		goto out;
3052 
3053 	/* Currently the driver just supports ref_tag and guard_tag checking */
3054 	chk_ref = 1;
3055 	chk_app = 0;
3056 	chk_guard = 0;
3057 
3058 	/* Setup a ptr to the protection data provided by the SCSI host */
3059 	sgpe = scsi_prot_sglist(cmd);
3060 	protsegcnt = lpfc_cmd->prot_seg_cnt;
3061 
3062 	if (sgpe && protsegcnt) {
3063 
3064 		/*
3065 		 * We will only try to verify guard tag if the segment
3066 		 * data length is a multiple of the blksize.
3067 		 */
3068 		sgde = scsi_sglist(cmd);
3069 		blksize = lpfc_cmd_blksize(cmd);
3070 		data_src = (uint8_t *)sg_virt(sgde);
3071 		data_len = sgde->length;
3072 		if ((data_len & (blksize - 1)) == 0)
3073 			chk_guard = 1;
3074 		guard_type = scsi_host_get_guard(cmd->device->host);
3075 
3076 		start_ref_tag = (uint32_t)scsi_get_lba(cmd); /* Truncate LBA */
3077 		start_app_tag = src->app_tag;
3078 		src = (struct scsi_dif_tuple *)sg_virt(sgpe);
3079 		len = sgpe->length;
3080 		while (src && protsegcnt) {
3081 			while (len) {
3082 
3083 				/*
3084 				 * First check to see if a protection data
3085 				 * check is valid
3086 				 */
3087 				if ((src->ref_tag == 0xffffffff) ||
3088 				    (src->app_tag == 0xffff)) {
3089 					start_ref_tag++;
3090 					goto skipit;
3091 				}
3092 
3093 				/* App Tag checking */
3094 				app_tag = src->app_tag;
3095 				if (chk_app && (app_tag != start_app_tag)) {
3096 					err_type = BGS_APPTAG_ERR_MASK;
3097 					goto out;
3098 				}
3099 
3100 				/* Reference Tag checking */
3101 				ref_tag = be32_to_cpu(src->ref_tag);
3102 				if (chk_ref && (ref_tag != start_ref_tag)) {
3103 					err_type = BGS_REFTAG_ERR_MASK;
3104 					goto out;
3105 				}
3106 				start_ref_tag++;
3107 
3108 				/* Guard Tag checking */
3109 				if (chk_guard) {
3110 					guard_tag = src->guard_tag;
3111 					if (guard_type == SHOST_DIX_GUARD_IP)
3112 						sum = lpfc_bg_csum(data_src,
3113 								   blksize);
3114 					else
3115 						sum = lpfc_bg_crc(data_src,
3116 								  blksize);
3117 					if ((guard_tag != sum)) {
3118 						err_type = BGS_GUARD_ERR_MASK;
3119 						goto out;
3120 					}
3121 				}
3122 skipit:
3123 				len -= sizeof(struct scsi_dif_tuple);
3124 				if (len < 0)
3125 					len = 0;
3126 				src++;
3127 
3128 				data_src += blksize;
3129 				data_len -= blksize;
3130 
3131 				/*
3132 				 * Are we at the end of the Data segment?
3133 				 * The data segment is only used for Guard
3134 				 * tag checking.
3135 				 */
3136 				if (chk_guard && (data_len == 0)) {
3137 					chk_guard = 0;
3138 					sgde = sg_next(sgde);
3139 					if (!sgde)
3140 						goto out;
3141 
3142 					data_src = (uint8_t *)sg_virt(sgde);
3143 					data_len = sgde->length;
3144 					if ((data_len & (blksize - 1)) == 0)
3145 						chk_guard = 1;
3146 				}
3147 			}
3148 
3149 			/* Goto the next Protection data segment */
3150 			sgpe = sg_next(sgpe);
3151 			if (sgpe) {
3152 				src = (struct scsi_dif_tuple *)sg_virt(sgpe);
3153 				len = sgpe->length;
3154 			} else {
3155 				src = NULL;
3156 			}
3157 			protsegcnt--;
3158 		}
3159 	}
3160 out:
3161 	if (err_type == BGS_GUARD_ERR_MASK) {
3162 		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3163 					0x10, 0x1);
3164 		cmd->result = DRIVER_SENSE << 24
3165 			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
3166 		phba->bg_guard_err_cnt++;
3167 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3168 				"9069 BLKGRD: LBA %lx grd_tag error %x != %x\n",
3169 				(unsigned long)scsi_get_lba(cmd),
3170 				sum, guard_tag);
3171 
3172 	} else if (err_type == BGS_REFTAG_ERR_MASK) {
3173 		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3174 					0x10, 0x3);
3175 		cmd->result = DRIVER_SENSE << 24
3176 			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
3177 
3178 		phba->bg_reftag_err_cnt++;
3179 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3180 				"9066 BLKGRD: LBA %lx ref_tag error %x != %x\n",
3181 				(unsigned long)scsi_get_lba(cmd),
3182 				ref_tag, start_ref_tag);
3183 
3184 	} else if (err_type == BGS_APPTAG_ERR_MASK) {
3185 		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3186 					0x10, 0x2);
3187 		cmd->result = DRIVER_SENSE << 24
3188 			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
3189 
3190 		phba->bg_apptag_err_cnt++;
3191 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3192 				"9041 BLKGRD: LBA %lx app_tag error %x != %x\n",
3193 				(unsigned long)scsi_get_lba(cmd),
3194 				app_tag, start_app_tag);
3195 	}
3196 }
3197 
3198 
3199 /*
3200  * This function checks for BlockGuard errors detected by
3201  * the HBA.  In case of errors, the ASC/ASCQ fields in the
3202  * sense buffer will be set accordingly, paired with
3203  * ILLEGAL_REQUEST to signal to the kernel that the HBA
3204  * detected corruption.
3205  *
3206  * Returns:
3207  *  0 - No error found
3208  *  1 - BlockGuard error found
3209  * -1 - Internal error (bad profile, ...etc)
3210  */
3211 static int
3212 lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
3213 			struct lpfc_iocbq *pIocbOut)
3214 {
3215 	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
3216 	struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
3217 	int ret = 0;
3218 	uint32_t bghm = bgf->bghm;
3219 	uint32_t bgstat = bgf->bgstat;
3220 	uint64_t failing_sector = 0;
3221 
3222 	spin_lock(&_dump_buf_lock);
3223 	if (!_dump_buf_done) {
3224 		lpfc_printf_log(phba, KERN_ERR, LOG_BG,  "9070 BLKGRD: Saving"
3225 			" Data for %u blocks to debugfs\n",
3226 				(cmd->cmnd[7] << 8 | cmd->cmnd[8]));
3227 		lpfc_debug_save_data(phba, cmd);
3228 
3229 		/* If we have a prot sgl, save the DIF buffer */
3230 		if (lpfc_prot_group_type(phba, cmd) ==
3231 				LPFC_PG_TYPE_DIF_BUF) {
3232 			lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: "
3233 				"Saving DIF for %u blocks to debugfs\n",
3234 				(cmd->cmnd[7] << 8 | cmd->cmnd[8]));
3235 			lpfc_debug_save_dif(phba, cmd);
3236 		}
3237 
3238 		_dump_buf_done = 1;
3239 	}
3240 	spin_unlock(&_dump_buf_lock);
3241 
3242 	if (lpfc_bgs_get_invalid_prof(bgstat)) {
3243 		cmd->result = ScsiResult(DID_ERROR, 0);
3244 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3245 				"9072 BLKGRD: Invalid BG Profile in cmd"
3246 				" 0x%x lba 0x%llx blk cnt 0x%x "
3247 				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3248 				(unsigned long long)scsi_get_lba(cmd),
3249 				blk_rq_sectors(cmd->request), bgstat, bghm);
3250 		ret = (-1);
3251 		goto out;
3252 	}
3253 
3254 	if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
3255 		cmd->result = ScsiResult(DID_ERROR, 0);
3256 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3257 				"9073 BLKGRD: Invalid BG PDIF Block in cmd"
3258 				" 0x%x lba 0x%llx blk cnt 0x%x "
3259 				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3260 				(unsigned long long)scsi_get_lba(cmd),
3261 				blk_rq_sectors(cmd->request), bgstat, bghm);
3262 		ret = (-1);
3263 		goto out;
3264 	}
3265 
3266 	if (lpfc_bgs_get_guard_err(bgstat)) {
3267 		ret = 1;
3268 
3269 		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3270 				0x10, 0x1);
3271 		cmd->result = DRIVER_SENSE << 24
3272 			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
3273 		phba->bg_guard_err_cnt++;
3274 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3275 				"9055 BLKGRD: Guard Tag error in cmd"
3276 				" 0x%x lba 0x%llx blk cnt 0x%x "
3277 				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3278 				(unsigned long long)scsi_get_lba(cmd),
3279 				blk_rq_sectors(cmd->request), bgstat, bghm);
3280 	}
3281 
3282 	if (lpfc_bgs_get_reftag_err(bgstat)) {
3283 		ret = 1;
3284 
3285 		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3286 				0x10, 0x3);
3287 		cmd->result = DRIVER_SENSE << 24
3288 			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
3289 
3290 		phba->bg_reftag_err_cnt++;
3291 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3292 				"9056 BLKGRD: Ref Tag error in cmd"
3293 				" 0x%x lba 0x%llx blk cnt 0x%x "
3294 				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3295 				(unsigned long long)scsi_get_lba(cmd),
3296 				blk_rq_sectors(cmd->request), bgstat, bghm);
3297 	}
3298 
3299 	if (lpfc_bgs_get_apptag_err(bgstat)) {
3300 		ret = 1;
3301 
3302 		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3303 				0x10, 0x2);
3304 		cmd->result = DRIVER_SENSE << 24
3305 			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
3306 
3307 		phba->bg_apptag_err_cnt++;
3308 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3309 				"9061 BLKGRD: App Tag error in cmd"
3310 				" 0x%x lba 0x%llx blk cnt 0x%x "
3311 				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3312 				(unsigned long long)scsi_get_lba(cmd),
3313 				blk_rq_sectors(cmd->request), bgstat, bghm);
3314 	}
3315 
3316 	if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
3317 		/*
3318 		 * setup sense data descriptor 0 per SPC-4 as an information
3319 		 * field, and put the failing LBA in it.
3320 		 * This code assumes there was also a guard/app/ref tag error
3321 		 * indication.
3322 		 */
3323 		cmd->sense_buffer[7] = 0xc;   /* Additional sense length */
3324 		cmd->sense_buffer[8] = 0;     /* Information descriptor type */
3325 		cmd->sense_buffer[9] = 0xa;   /* Additional descriptor length */
3326 		cmd->sense_buffer[10] = 0x80; /* Validity bit */
3327 
3328 		/* bghm is a "on the wire" FC frame based count */
3329 		switch (scsi_get_prot_op(cmd)) {
3330 		case SCSI_PROT_READ_INSERT:
3331 		case SCSI_PROT_WRITE_STRIP:
3332 			bghm /= cmd->device->sector_size;
3333 			break;
3334 		case SCSI_PROT_READ_STRIP:
3335 		case SCSI_PROT_WRITE_INSERT:
3336 		case SCSI_PROT_READ_PASS:
3337 		case SCSI_PROT_WRITE_PASS:
3338 			bghm /= (cmd->device->sector_size +
3339 				sizeof(struct scsi_dif_tuple));
3340 			break;
3341 		}
3342 
3343 		failing_sector = scsi_get_lba(cmd);
3344 		failing_sector += bghm;
3345 
3346 		/* Descriptor Information */
3347 		put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
3348 	}
3349 
3350 	if (!ret) {
3351 		/* No error was reported - problem in FW? */
3352 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3353 				"9057 BLKGRD: Unknown error in cmd"
3354 				" 0x%x lba 0x%llx blk cnt 0x%x "
3355 				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3356 				(unsigned long long)scsi_get_lba(cmd),
3357 				blk_rq_sectors(cmd->request), bgstat, bghm);
3358 
3359 		/* Calcuate what type of error it was */
3360 		lpfc_calc_bg_err(phba, lpfc_cmd);
3361 	}
3362 out:
3363 	return ret;
3364 }
3365 
3366 /**
3367  * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3368  * @phba: The Hba for which this call is being executed.
3369  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3370  *
3371  * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
3372  * field of @lpfc_cmd for device with SLI-4 interface spec.
3373  *
3374  * Return codes:
3375  *	1 - Error
3376  *	0 - Success
3377  **/
3378 static int
3379 lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3380 {
3381 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3382 	struct scatterlist *sgel = NULL;
3383 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3384 	struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
3385 	struct sli4_sge *first_data_sgl;
3386 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3387 	dma_addr_t physaddr;
3388 	uint32_t num_bde = 0;
3389 	uint32_t dma_len;
3390 	uint32_t dma_offset = 0;
3391 	int nseg;
3392 	struct ulp_bde64 *bde;
3393 
3394 	/*
3395 	 * There are three possibilities here - use scatter-gather segment, use
3396 	 * the single mapping, or neither.  Start the lpfc command prep by
3397 	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
3398 	 * data bde entry.
3399 	 */
3400 	if (scsi_sg_count(scsi_cmnd)) {
3401 		/*
3402 		 * The driver stores the segment count returned from pci_map_sg
3403 		 * because this a count of dma-mappings used to map the use_sg
3404 		 * pages.  They are not guaranteed to be the same for those
3405 		 * architectures that implement an IOMMU.
3406 		 */
3407 
3408 		nseg = scsi_dma_map(scsi_cmnd);
3409 		if (unlikely(!nseg))
3410 			return 1;
3411 		sgl += 1;
3412 		/* clear the last flag in the fcp_rsp map entry */
3413 		sgl->word2 = le32_to_cpu(sgl->word2);
3414 		bf_set(lpfc_sli4_sge_last, sgl, 0);
3415 		sgl->word2 = cpu_to_le32(sgl->word2);
3416 		sgl += 1;
3417 		first_data_sgl = sgl;
3418 		lpfc_cmd->seg_cnt = nseg;
3419 		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
3420 			lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
3421 				" %s: Too many sg segments from "
3422 				"dma_map_sg.  Config %d, seg_cnt %d\n",
3423 				__func__, phba->cfg_sg_seg_cnt,
3424 			       lpfc_cmd->seg_cnt);
3425 			lpfc_cmd->seg_cnt = 0;
3426 			scsi_dma_unmap(scsi_cmnd);
3427 			return 1;
3428 		}
3429 
3430 		/*
3431 		 * The driver established a maximum scatter-gather segment count
3432 		 * during probe that limits the number of sg elements in any
3433 		 * single scsi command.  Just run through the seg_cnt and format
3434 		 * the sge's.
3435 		 * When using SLI-3 the driver will try to fit all the BDEs into
3436 		 * the IOCB. If it can't then the BDEs get added to a BPL as it
3437 		 * does for SLI-2 mode.
3438 		 */
3439 		scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
3440 			physaddr = sg_dma_address(sgel);
3441 			dma_len = sg_dma_len(sgel);
3442 			sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
3443 			sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
3444 			sgl->word2 = le32_to_cpu(sgl->word2);
3445 			if ((num_bde + 1) == nseg)
3446 				bf_set(lpfc_sli4_sge_last, sgl, 1);
3447 			else
3448 				bf_set(lpfc_sli4_sge_last, sgl, 0);
3449 			bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
3450 			bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
3451 			sgl->word2 = cpu_to_le32(sgl->word2);
3452 			sgl->sge_len = cpu_to_le32(dma_len);
3453 			dma_offset += dma_len;
3454 			sgl++;
3455 		}
3456 		/* setup the performance hint (first data BDE) if enabled */
3457 		if (phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) {
3458 			bde = (struct ulp_bde64 *)
3459 					&(iocb_cmd->unsli3.sli3Words[5]);
3460 			bde->addrLow = first_data_sgl->addr_lo;
3461 			bde->addrHigh = first_data_sgl->addr_hi;
3462 			bde->tus.f.bdeSize =
3463 					le32_to_cpu(first_data_sgl->sge_len);
3464 			bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3465 			bde->tus.w = cpu_to_le32(bde->tus.w);
3466 		}
3467 	} else {
3468 		sgl += 1;
3469 		/* clear the last flag in the fcp_rsp map entry */
3470 		sgl->word2 = le32_to_cpu(sgl->word2);
3471 		bf_set(lpfc_sli4_sge_last, sgl, 1);
3472 		sgl->word2 = cpu_to_le32(sgl->word2);
3473 	}
3474 
3475 	/*
3476 	 * Finish initializing those IOCB fields that are dependent on the
3477 	 * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
3478 	 * explicitly reinitialized.
3479 	 * all iocb memory resources are reused.
3480 	 */
3481 	fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
3482 
3483 	/*
3484 	 * Due to difference in data length between DIF/non-DIF paths,
3485 	 * we need to set word 4 of IOCB here
3486 	 */
3487 	iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
3488 	return 0;
3489 }
3490 
3491 /**
3492  * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3493  * @phba: The Hba for which this call is being executed.
3494  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3495  *
3496  * This is the protection/DIF aware version of
3497  * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
3498  * two functions eventually, but for now, it's here
3499  **/
3500 static int
3501 lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3502 		struct lpfc_scsi_buf *lpfc_cmd)
3503 {
3504 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3505 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3506 	struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl);
3507 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3508 	uint32_t num_sge = 0;
3509 	int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
3510 	int prot_group_type = 0;
3511 	int fcpdl;
3512 
3513 	/*
3514 	 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
3515 	 *  fcp_rsp regions to the first data sge entry
3516 	 */
3517 	if (scsi_sg_count(scsi_cmnd)) {
3518 		/*
3519 		 * The driver stores the segment count returned from pci_map_sg
3520 		 * because this a count of dma-mappings used to map the use_sg
3521 		 * pages.  They are not guaranteed to be the same for those
3522 		 * architectures that implement an IOMMU.
3523 		 */
3524 		datasegcnt = dma_map_sg(&phba->pcidev->dev,
3525 					scsi_sglist(scsi_cmnd),
3526 					scsi_sg_count(scsi_cmnd), datadir);
3527 		if (unlikely(!datasegcnt))
3528 			return 1;
3529 
3530 		sgl += 1;
3531 		/* clear the last flag in the fcp_rsp map entry */
3532 		sgl->word2 = le32_to_cpu(sgl->word2);
3533 		bf_set(lpfc_sli4_sge_last, sgl, 0);
3534 		sgl->word2 = cpu_to_le32(sgl->word2);
3535 
3536 		sgl += 1;
3537 		lpfc_cmd->seg_cnt = datasegcnt;
3538 
3539 		/* First check if data segment count from SCSI Layer is good */
3540 		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
3541 			goto err;
3542 
3543 		prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
3544 
3545 		switch (prot_group_type) {
3546 		case LPFC_PG_TYPE_NO_DIF:
3547 			/* Here we need to add a DISEED to the count */
3548 			if ((lpfc_cmd->seg_cnt + 1) > phba->cfg_total_seg_cnt)
3549 				goto err;
3550 
3551 			num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
3552 					datasegcnt);
3553 
3554 			/* we should have 2 or more entries in buffer list */
3555 			if (num_sge < 2)
3556 				goto err;
3557 			break;
3558 
3559 		case LPFC_PG_TYPE_DIF_BUF:
3560 			/*
3561 			 * This type indicates that protection buffers are
3562 			 * passed to the driver, so that needs to be prepared
3563 			 * for DMA
3564 			 */
3565 			protsegcnt = dma_map_sg(&phba->pcidev->dev,
3566 					scsi_prot_sglist(scsi_cmnd),
3567 					scsi_prot_sg_count(scsi_cmnd), datadir);
3568 			if (unlikely(!protsegcnt)) {
3569 				scsi_dma_unmap(scsi_cmnd);
3570 				return 1;
3571 			}
3572 
3573 			lpfc_cmd->prot_seg_cnt = protsegcnt;
3574 			/*
3575 			 * There is a minimun of 3 SGEs used for every
3576 			 * protection data segment.
3577 			 */
3578 			if ((lpfc_cmd->prot_seg_cnt * 3) >
3579 			    (phba->cfg_total_seg_cnt - 2))
3580 				goto err;
3581 
3582 			num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
3583 					datasegcnt, protsegcnt);
3584 
3585 			/* we should have 3 or more entries in buffer list */
3586 			if ((num_sge < 3) ||
3587 			    (num_sge > phba->cfg_total_seg_cnt))
3588 				goto err;
3589 			break;
3590 
3591 		case LPFC_PG_TYPE_INVALID:
3592 		default:
3593 			scsi_dma_unmap(scsi_cmnd);
3594 			lpfc_cmd->seg_cnt = 0;
3595 
3596 			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3597 					"9083 Unexpected protection group %i\n",
3598 					prot_group_type);
3599 			return 1;
3600 		}
3601 	}
3602 
3603 	switch (scsi_get_prot_op(scsi_cmnd)) {
3604 	case SCSI_PROT_WRITE_STRIP:
3605 	case SCSI_PROT_READ_STRIP:
3606 		lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP;
3607 		break;
3608 	case SCSI_PROT_WRITE_INSERT:
3609 	case SCSI_PROT_READ_INSERT:
3610 		lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT;
3611 		break;
3612 	case SCSI_PROT_WRITE_PASS:
3613 	case SCSI_PROT_READ_PASS:
3614 		lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS;
3615 		break;
3616 	}
3617 
3618 	fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
3619 	fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
3620 
3621 	/*
3622 	 * Due to difference in data length between DIF/non-DIF paths,
3623 	 * we need to set word 4 of IOCB here
3624 	 */
3625 	iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
3626 
3627 	return 0;
3628 err:
3629 	if (lpfc_cmd->seg_cnt)
3630 		scsi_dma_unmap(scsi_cmnd);
3631 	if (lpfc_cmd->prot_seg_cnt)
3632 		dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
3633 			     scsi_prot_sg_count(scsi_cmnd),
3634 			     scsi_cmnd->sc_data_direction);
3635 
3636 	lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3637 			"9084 Cannot setup S/G List for HBA"
3638 			"IO segs %d/%d SGL %d SCSI %d: %d %d\n",
3639 			lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
3640 			phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
3641 			prot_group_type, num_sge);
3642 
3643 	lpfc_cmd->seg_cnt = 0;
3644 	lpfc_cmd->prot_seg_cnt = 0;
3645 	return 1;
3646 }
3647 
3648 /**
3649  * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3650  * @phba: The Hba for which this call is being executed.
3651  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3652  *
3653  * This routine wraps the actual DMA mapping function pointer from the
3654  * lpfc_hba struct.
3655  *
3656  * Return codes:
3657  *	1 - Error
3658  *	0 - Success
3659  **/
3660 static inline int
3661 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3662 {
3663 	return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
3664 }
3665 
3666 /**
3667  * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3668  * using BlockGuard.
3669  * @phba: The Hba for which this call is being executed.
3670  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3671  *
3672  * This routine wraps the actual DMA mapping function pointer from the
3673  * lpfc_hba struct.
3674  *
3675  * Return codes:
3676  *	1 - Error
3677  *	0 - Success
3678  **/
3679 static inline int
3680 lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3681 {
3682 	return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
3683 }
3684 
3685 /**
3686  * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
3687  * @phba: Pointer to hba context object.
3688  * @vport: Pointer to vport object.
3689  * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
3690  * @rsp_iocb: Pointer to response iocb object which reported error.
3691  *
3692  * This function posts an event when there is a SCSI command reporting
3693  * error from the scsi device.
3694  **/
3695 static void
3696 lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
3697 		struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
3698 	struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3699 	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3700 	uint32_t resp_info = fcprsp->rspStatus2;
3701 	uint32_t scsi_status = fcprsp->rspStatus3;
3702 	uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
3703 	struct lpfc_fast_path_event *fast_path_evt = NULL;
3704 	struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
3705 	unsigned long flags;
3706 
3707 	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3708 		return;
3709 
3710 	/* If there is queuefull or busy condition send a scsi event */
3711 	if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
3712 		(cmnd->result == SAM_STAT_BUSY)) {
3713 		fast_path_evt = lpfc_alloc_fast_evt(phba);
3714 		if (!fast_path_evt)
3715 			return;
3716 		fast_path_evt->un.scsi_evt.event_type =
3717 			FC_REG_SCSI_EVENT;
3718 		fast_path_evt->un.scsi_evt.subcategory =
3719 		(cmnd->result == SAM_STAT_TASK_SET_FULL) ?
3720 		LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
3721 		fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
3722 		memcpy(&fast_path_evt->un.scsi_evt.wwpn,
3723 			&pnode->nlp_portname, sizeof(struct lpfc_name));
3724 		memcpy(&fast_path_evt->un.scsi_evt.wwnn,
3725 			&pnode->nlp_nodename, sizeof(struct lpfc_name));
3726 	} else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
3727 		((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
3728 		fast_path_evt = lpfc_alloc_fast_evt(phba);
3729 		if (!fast_path_evt)
3730 			return;
3731 		fast_path_evt->un.check_cond_evt.scsi_event.event_type =
3732 			FC_REG_SCSI_EVENT;
3733 		fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
3734 			LPFC_EVENT_CHECK_COND;
3735 		fast_path_evt->un.check_cond_evt.scsi_event.lun =
3736 			cmnd->device->lun;
3737 		memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
3738 			&pnode->nlp_portname, sizeof(struct lpfc_name));
3739 		memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
3740 			&pnode->nlp_nodename, sizeof(struct lpfc_name));
3741 		fast_path_evt->un.check_cond_evt.sense_key =
3742 			cmnd->sense_buffer[2] & 0xf;
3743 		fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
3744 		fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
3745 	} else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
3746 		     fcpi_parm &&
3747 		     ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
3748 			((scsi_status == SAM_STAT_GOOD) &&
3749 			!(resp_info & (RESID_UNDER | RESID_OVER))))) {
3750 		/*
3751 		 * If status is good or resid does not match with fcp_param and
3752 		 * there is valid fcpi_parm, then there is a read_check error
3753 		 */
3754 		fast_path_evt = lpfc_alloc_fast_evt(phba);
3755 		if (!fast_path_evt)
3756 			return;
3757 		fast_path_evt->un.read_check_error.header.event_type =
3758 			FC_REG_FABRIC_EVENT;
3759 		fast_path_evt->un.read_check_error.header.subcategory =
3760 			LPFC_EVENT_FCPRDCHKERR;
3761 		memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
3762 			&pnode->nlp_portname, sizeof(struct lpfc_name));
3763 		memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
3764 			&pnode->nlp_nodename, sizeof(struct lpfc_name));
3765 		fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
3766 		fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
3767 		fast_path_evt->un.read_check_error.fcpiparam =
3768 			fcpi_parm;
3769 	} else
3770 		return;
3771 
3772 	fast_path_evt->vport = vport;
3773 	spin_lock_irqsave(&phba->hbalock, flags);
3774 	list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
3775 	spin_unlock_irqrestore(&phba->hbalock, flags);
3776 	lpfc_worker_wake_up(phba);
3777 	return;
3778 }
3779 
3780 /**
3781  * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
3782  * @phba: The HBA for which this call is being executed.
3783  * @psb: The scsi buffer which is going to be un-mapped.
3784  *
3785  * This routine does DMA un-mapping of scatter gather list of scsi command
3786  * field of @lpfc_cmd for device with SLI-3 interface spec.
3787  **/
3788 static void
3789 lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
3790 {
3791 	/*
3792 	 * There are only two special cases to consider.  (1) the scsi command
3793 	 * requested scatter-gather usage or (2) the scsi command allocated
3794 	 * a request buffer, but did not request use_sg.  There is a third
3795 	 * case, but it does not require resource deallocation.
3796 	 */
3797 	if (psb->seg_cnt > 0)
3798 		scsi_dma_unmap(psb->pCmd);
3799 	if (psb->prot_seg_cnt > 0)
3800 		dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
3801 				scsi_prot_sg_count(psb->pCmd),
3802 				psb->pCmd->sc_data_direction);
3803 }
3804 
3805 /**
3806  * lpfc_handler_fcp_err - FCP response handler
3807  * @vport: The virtual port for which this call is being executed.
3808  * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
3809  * @rsp_iocb: The response IOCB which contains FCP error.
3810  *
3811  * This routine is called to process response IOCB with status field
3812  * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
3813  * based upon SCSI and FCP error.
3814  **/
3815 static void
3816 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
3817 		    struct lpfc_iocbq *rsp_iocb)
3818 {
3819 	struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3820 	struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
3821 	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3822 	uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
3823 	uint32_t resp_info = fcprsp->rspStatus2;
3824 	uint32_t scsi_status = fcprsp->rspStatus3;
3825 	uint32_t *lp;
3826 	uint32_t host_status = DID_OK;
3827 	uint32_t rsplen = 0;
3828 	uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
3829 
3830 
3831 	/*
3832 	 *  If this is a task management command, there is no
3833 	 *  scsi packet associated with this lpfc_cmd.  The driver
3834 	 *  consumes it.
3835 	 */
3836 	if (fcpcmd->fcpCntl2) {
3837 		scsi_status = 0;
3838 		goto out;
3839 	}
3840 
3841 	if (resp_info & RSP_LEN_VALID) {
3842 		rsplen = be32_to_cpu(fcprsp->rspRspLen);
3843 		if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
3844 			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3845 				 "2719 Invalid response length: "
3846 				 "tgt x%x lun x%x cmnd x%x rsplen x%x\n",
3847 				 cmnd->device->id,
3848 				 cmnd->device->lun, cmnd->cmnd[0],
3849 				 rsplen);
3850 			host_status = DID_ERROR;
3851 			goto out;
3852 		}
3853 		if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
3854 			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3855 				 "2757 Protocol failure detected during "
3856 				 "processing of FCP I/O op: "
3857 				 "tgt x%x lun x%x cmnd x%x rspInfo3 x%x\n",
3858 				 cmnd->device->id,
3859 				 cmnd->device->lun, cmnd->cmnd[0],
3860 				 fcprsp->rspInfo3);
3861 			host_status = DID_ERROR;
3862 			goto out;
3863 		}
3864 	}
3865 
3866 	if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
3867 		uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
3868 		if (snslen > SCSI_SENSE_BUFFERSIZE)
3869 			snslen = SCSI_SENSE_BUFFERSIZE;
3870 
3871 		if (resp_info & RSP_LEN_VALID)
3872 		  rsplen = be32_to_cpu(fcprsp->rspRspLen);
3873 		memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
3874 	}
3875 	lp = (uint32_t *)cmnd->sense_buffer;
3876 
3877 	/* special handling for under run conditions */
3878 	if (!scsi_status && (resp_info & RESID_UNDER)) {
3879 		/* don't log under runs if fcp set... */
3880 		if (vport->cfg_log_verbose & LOG_FCP)
3881 			logit = LOG_FCP_ERROR;
3882 		/* unless operator says so */
3883 		if (vport->cfg_log_verbose & LOG_FCP_UNDER)
3884 			logit = LOG_FCP_UNDER;
3885 	}
3886 
3887 	lpfc_printf_vlog(vport, KERN_WARNING, logit,
3888 			 "9024 FCP command x%x failed: x%x SNS x%x x%x "
3889 			 "Data: x%x x%x x%x x%x x%x\n",
3890 			 cmnd->cmnd[0], scsi_status,
3891 			 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
3892 			 be32_to_cpu(fcprsp->rspResId),
3893 			 be32_to_cpu(fcprsp->rspSnsLen),
3894 			 be32_to_cpu(fcprsp->rspRspLen),
3895 			 fcprsp->rspInfo3);
3896 
3897 	scsi_set_resid(cmnd, 0);
3898 	if (resp_info & RESID_UNDER) {
3899 		scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
3900 
3901 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER,
3902 				 "9025 FCP Read Underrun, expected %d, "
3903 				 "residual %d Data: x%x x%x x%x\n",
3904 				 be32_to_cpu(fcpcmd->fcpDl),
3905 				 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
3906 				 cmnd->underflow);
3907 
3908 		/*
3909 		 * If there is an under run check if under run reported by
3910 		 * storage array is same as the under run reported by HBA.
3911 		 * If this is not same, there is a dropped frame.
3912 		 */
3913 		if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
3914 			fcpi_parm &&
3915 			(scsi_get_resid(cmnd) != fcpi_parm)) {
3916 			lpfc_printf_vlog(vport, KERN_WARNING,
3917 					 LOG_FCP | LOG_FCP_ERROR,
3918 					 "9026 FCP Read Check Error "
3919 					 "and Underrun Data: x%x x%x x%x x%x\n",
3920 					 be32_to_cpu(fcpcmd->fcpDl),
3921 					 scsi_get_resid(cmnd), fcpi_parm,
3922 					 cmnd->cmnd[0]);
3923 			scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3924 			host_status = DID_ERROR;
3925 		}
3926 		/*
3927 		 * The cmnd->underflow is the minimum number of bytes that must
3928 		 * be transferred for this command.  Provided a sense condition
3929 		 * is not present, make sure the actual amount transferred is at
3930 		 * least the underflow value or fail.
3931 		 */
3932 		if (!(resp_info & SNS_LEN_VALID) &&
3933 		    (scsi_status == SAM_STAT_GOOD) &&
3934 		    (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
3935 		     < cmnd->underflow)) {
3936 			lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3937 					 "9027 FCP command x%x residual "
3938 					 "underrun converted to error "
3939 					 "Data: x%x x%x x%x\n",
3940 					 cmnd->cmnd[0], scsi_bufflen(cmnd),
3941 					 scsi_get_resid(cmnd), cmnd->underflow);
3942 			host_status = DID_ERROR;
3943 		}
3944 	} else if (resp_info & RESID_OVER) {
3945 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3946 				 "9028 FCP command x%x residual overrun error. "
3947 				 "Data: x%x x%x\n", cmnd->cmnd[0],
3948 				 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
3949 		host_status = DID_ERROR;
3950 
3951 	/*
3952 	 * Check SLI validation that all the transfer was actually done
3953 	 * (fcpi_parm should be zero).
3954 	 */
3955 	} else if (fcpi_parm) {
3956 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
3957 				 "9029 FCP Data Transfer Check Error: "
3958 				 "x%x x%x x%x x%x x%x\n",
3959 				 be32_to_cpu(fcpcmd->fcpDl),
3960 				 be32_to_cpu(fcprsp->rspResId),
3961 				 fcpi_parm, cmnd->cmnd[0], scsi_status);
3962 		switch (scsi_status) {
3963 		case SAM_STAT_GOOD:
3964 		case SAM_STAT_CHECK_CONDITION:
3965 			/* Fabric dropped a data frame. Fail any successful
3966 			 * command in which we detected dropped frames.
3967 			 * A status of good or some check conditions could
3968 			 * be considered a successful command.
3969 			 */
3970 			host_status = DID_ERROR;
3971 			break;
3972 		}
3973 		scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3974 	}
3975 
3976  out:
3977 	cmnd->result = ScsiResult(host_status, scsi_status);
3978 	lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
3979 }
3980 
3981 /**
3982  * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
3983  * @phba: The Hba for which this call is being executed.
3984  * @pIocbIn: The command IOCBQ for the scsi cmnd.
3985  * @pIocbOut: The response IOCBQ for the scsi cmnd.
3986  *
3987  * This routine assigns scsi command result by looking into response IOCB
3988  * status field appropriately. This routine handles QUEUE FULL condition as
3989  * well by ramping down device queue depth.
3990  **/
3991 static void
3992 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
3993 			struct lpfc_iocbq *pIocbOut)
3994 {
3995 	struct lpfc_scsi_buf *lpfc_cmd =
3996 		(struct lpfc_scsi_buf *) pIocbIn->context1;
3997 	struct lpfc_vport      *vport = pIocbIn->vport;
3998 	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
3999 	struct lpfc_nodelist *pnode = rdata->pnode;
4000 	struct scsi_cmnd *cmd;
4001 	int result;
4002 	struct scsi_device *tmp_sdev;
4003 	int depth;
4004 	unsigned long flags;
4005 	struct lpfc_fast_path_event *fast_path_evt;
4006 	struct Scsi_Host *shost;
4007 	uint32_t queue_depth, scsi_id;
4008 	uint32_t logit = LOG_FCP;
4009 
4010 	/* Sanity check on return of outstanding command */
4011 	if (!(lpfc_cmd->pCmd))
4012 		return;
4013 	cmd = lpfc_cmd->pCmd;
4014 	shost = cmd->device->host;
4015 
4016 	lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
4017 	lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
4018 	/* pick up SLI4 exhange busy status from HBA */
4019 	lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
4020 
4021 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4022 	if (lpfc_cmd->prot_data_type) {
4023 		struct scsi_dif_tuple *src = NULL;
4024 
4025 		src =  (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
4026 		/*
4027 		 * Used to restore any changes to protection
4028 		 * data for error injection.
4029 		 */
4030 		switch (lpfc_cmd->prot_data_type) {
4031 		case LPFC_INJERR_REFTAG:
4032 			src->ref_tag =
4033 				lpfc_cmd->prot_data;
4034 			break;
4035 		case LPFC_INJERR_APPTAG:
4036 			src->app_tag =
4037 				(uint16_t)lpfc_cmd->prot_data;
4038 			break;
4039 		case LPFC_INJERR_GUARD:
4040 			src->guard_tag =
4041 				(uint16_t)lpfc_cmd->prot_data;
4042 			break;
4043 		default:
4044 			break;
4045 		}
4046 
4047 		lpfc_cmd->prot_data = 0;
4048 		lpfc_cmd->prot_data_type = 0;
4049 		lpfc_cmd->prot_data_segment = NULL;
4050 	}
4051 #endif
4052 	if (pnode && NLP_CHK_NODE_ACT(pnode))
4053 		atomic_dec(&pnode->cmd_pending);
4054 
4055 	if (lpfc_cmd->status) {
4056 		if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
4057 		    (lpfc_cmd->result & IOERR_DRVR_MASK))
4058 			lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4059 		else if (lpfc_cmd->status >= IOSTAT_CNT)
4060 			lpfc_cmd->status = IOSTAT_DEFAULT;
4061 		if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
4062 		    !lpfc_cmd->fcp_rsp->rspStatus3 &&
4063 		    (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
4064 		    !(vport->cfg_log_verbose & LOG_FCP_UNDER))
4065 			logit = 0;
4066 		else
4067 			logit = LOG_FCP | LOG_FCP_UNDER;
4068 		lpfc_printf_vlog(vport, KERN_WARNING, logit,
4069 			 "9030 FCP cmd x%x failed <%d/%d> "
4070 			 "status: x%x result: x%x "
4071 			 "sid: x%x did: x%x oxid: x%x "
4072 			 "Data: x%x x%x\n",
4073 			 cmd->cmnd[0],
4074 			 cmd->device ? cmd->device->id : 0xffff,
4075 			 cmd->device ? cmd->device->lun : 0xffff,
4076 			 lpfc_cmd->status, lpfc_cmd->result,
4077 			 vport->fc_myDID, pnode->nlp_DID,
4078 			 phba->sli_rev == LPFC_SLI_REV4 ?
4079 			     lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
4080 			 pIocbOut->iocb.ulpContext,
4081 			 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
4082 
4083 		switch (lpfc_cmd->status) {
4084 		case IOSTAT_FCP_RSP_ERROR:
4085 			/* Call FCP RSP handler to determine result */
4086 			lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
4087 			break;
4088 		case IOSTAT_NPORT_BSY:
4089 		case IOSTAT_FABRIC_BSY:
4090 			cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
4091 			fast_path_evt = lpfc_alloc_fast_evt(phba);
4092 			if (!fast_path_evt)
4093 				break;
4094 			fast_path_evt->un.fabric_evt.event_type =
4095 				FC_REG_FABRIC_EVENT;
4096 			fast_path_evt->un.fabric_evt.subcategory =
4097 				(lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
4098 				LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
4099 			if (pnode && NLP_CHK_NODE_ACT(pnode)) {
4100 				memcpy(&fast_path_evt->un.fabric_evt.wwpn,
4101 					&pnode->nlp_portname,
4102 					sizeof(struct lpfc_name));
4103 				memcpy(&fast_path_evt->un.fabric_evt.wwnn,
4104 					&pnode->nlp_nodename,
4105 					sizeof(struct lpfc_name));
4106 			}
4107 			fast_path_evt->vport = vport;
4108 			fast_path_evt->work_evt.evt =
4109 				LPFC_EVT_FASTPATH_MGMT_EVT;
4110 			spin_lock_irqsave(&phba->hbalock, flags);
4111 			list_add_tail(&fast_path_evt->work_evt.evt_listp,
4112 				&phba->work_list);
4113 			spin_unlock_irqrestore(&phba->hbalock, flags);
4114 			lpfc_worker_wake_up(phba);
4115 			break;
4116 		case IOSTAT_LOCAL_REJECT:
4117 		case IOSTAT_REMOTE_STOP:
4118 			if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
4119 			    lpfc_cmd->result ==
4120 					IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
4121 			    lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
4122 			    lpfc_cmd->result ==
4123 					IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
4124 				cmd->result = ScsiResult(DID_NO_CONNECT, 0);
4125 				break;
4126 			}
4127 			if (lpfc_cmd->result == IOERR_INVALID_RPI ||
4128 			    lpfc_cmd->result == IOERR_NO_RESOURCES ||
4129 			    lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
4130 			    lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
4131 				cmd->result = ScsiResult(DID_REQUEUE, 0);
4132 				break;
4133 			}
4134 			if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
4135 			     lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
4136 			     pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
4137 				if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
4138 					/*
4139 					 * This is a response for a BG enabled
4140 					 * cmd. Parse BG error
4141 					 */
4142 					lpfc_parse_bg_err(phba, lpfc_cmd,
4143 							pIocbOut);
4144 					break;
4145 				} else {
4146 					lpfc_printf_vlog(vport, KERN_WARNING,
4147 							LOG_BG,
4148 							"9031 non-zero BGSTAT "
4149 							"on unprotected cmd\n");
4150 				}
4151 			}
4152 			if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
4153 				&& (phba->sli_rev == LPFC_SLI_REV4)
4154 				&& (pnode && NLP_CHK_NODE_ACT(pnode))) {
4155 				/* This IO was aborted by the target, we don't
4156 				 * know the rxid and because we did not send the
4157 				 * ABTS we cannot generate and RRQ.
4158 				 */
4159 				lpfc_set_rrq_active(phba, pnode,
4160 					lpfc_cmd->cur_iocbq.sli4_lxritag,
4161 					0, 0);
4162 			}
4163 		/* else: fall through */
4164 		default:
4165 			cmd->result = ScsiResult(DID_ERROR, 0);
4166 			break;
4167 		}
4168 
4169 		if (!pnode || !NLP_CHK_NODE_ACT(pnode)
4170 		    || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
4171 			cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
4172 						 SAM_STAT_BUSY);
4173 	} else
4174 		cmd->result = ScsiResult(DID_OK, 0);
4175 
4176 	if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
4177 		uint32_t *lp = (uint32_t *)cmd->sense_buffer;
4178 
4179 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4180 				 "0710 Iodone <%d/%d> cmd %p, error "
4181 				 "x%x SNS x%x x%x Data: x%x x%x\n",
4182 				 cmd->device->id, cmd->device->lun, cmd,
4183 				 cmd->result, *lp, *(lp + 3), cmd->retries,
4184 				 scsi_get_resid(cmd));
4185 	}
4186 
4187 	lpfc_update_stats(phba, lpfc_cmd);
4188 	result = cmd->result;
4189 	if (vport->cfg_max_scsicmpl_time &&
4190 	   time_after(jiffies, lpfc_cmd->start_time +
4191 		msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
4192 		spin_lock_irqsave(shost->host_lock, flags);
4193 		if (pnode && NLP_CHK_NODE_ACT(pnode)) {
4194 			if (pnode->cmd_qdepth >
4195 				atomic_read(&pnode->cmd_pending) &&
4196 				(atomic_read(&pnode->cmd_pending) >
4197 				LPFC_MIN_TGT_QDEPTH) &&
4198 				((cmd->cmnd[0] == READ_10) ||
4199 				(cmd->cmnd[0] == WRITE_10)))
4200 				pnode->cmd_qdepth =
4201 					atomic_read(&pnode->cmd_pending);
4202 
4203 			pnode->last_change_time = jiffies;
4204 		}
4205 		spin_unlock_irqrestore(shost->host_lock, flags);
4206 	} else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
4207 		if ((pnode->cmd_qdepth < vport->cfg_tgt_queue_depth) &&
4208 		   time_after(jiffies, pnode->last_change_time +
4209 			      msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
4210 			spin_lock_irqsave(shost->host_lock, flags);
4211 			depth = pnode->cmd_qdepth * LPFC_TGTQ_RAMPUP_PCENT
4212 				/ 100;
4213 			depth = depth ? depth : 1;
4214 			pnode->cmd_qdepth += depth;
4215 			if (pnode->cmd_qdepth > vport->cfg_tgt_queue_depth)
4216 				pnode->cmd_qdepth = vport->cfg_tgt_queue_depth;
4217 			pnode->last_change_time = jiffies;
4218 			spin_unlock_irqrestore(shost->host_lock, flags);
4219 		}
4220 	}
4221 
4222 	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4223 
4224 	/* The sdev is not guaranteed to be valid post scsi_done upcall. */
4225 	queue_depth = cmd->device->queue_depth;
4226 	scsi_id = cmd->device->id;
4227 	cmd->scsi_done(cmd);
4228 
4229 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4230 		spin_lock_irqsave(&phba->hbalock, flags);
4231 		lpfc_cmd->pCmd = NULL;
4232 		spin_unlock_irqrestore(&phba->hbalock, flags);
4233 
4234 		/*
4235 		 * If there is a thread waiting for command completion
4236 		 * wake up the thread.
4237 		 */
4238 		spin_lock_irqsave(shost->host_lock, flags);
4239 		if (lpfc_cmd->waitq)
4240 			wake_up(lpfc_cmd->waitq);
4241 		spin_unlock_irqrestore(shost->host_lock, flags);
4242 		lpfc_release_scsi_buf(phba, lpfc_cmd);
4243 		return;
4244 	}
4245 
4246 	if (!result)
4247 		lpfc_rampup_queue_depth(vport, queue_depth);
4248 
4249 	/*
4250 	 * Check for queue full.  If the lun is reporting queue full, then
4251 	 * back off the lun queue depth to prevent target overloads.
4252 	 */
4253 	if (result == SAM_STAT_TASK_SET_FULL && pnode &&
4254 	    NLP_CHK_NODE_ACT(pnode)) {
4255 		shost_for_each_device(tmp_sdev, shost) {
4256 			if (tmp_sdev->id != scsi_id)
4257 				continue;
4258 			depth = scsi_track_queue_full(tmp_sdev,
4259 						      tmp_sdev->queue_depth-1);
4260 			if (depth <= 0)
4261 				continue;
4262 			lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4263 					 "0711 detected queue full - lun queue "
4264 					 "depth adjusted to %d.\n", depth);
4265 			lpfc_send_sdev_queuedepth_change_event(phba, vport,
4266 							       pnode,
4267 							       tmp_sdev->lun,
4268 							       depth+1, depth);
4269 		}
4270 	}
4271 
4272 	spin_lock_irqsave(&phba->hbalock, flags);
4273 	lpfc_cmd->pCmd = NULL;
4274 	spin_unlock_irqrestore(&phba->hbalock, flags);
4275 
4276 	/*
4277 	 * If there is a thread waiting for command completion
4278 	 * wake up the thread.
4279 	 */
4280 	spin_lock_irqsave(shost->host_lock, flags);
4281 	if (lpfc_cmd->waitq)
4282 		wake_up(lpfc_cmd->waitq);
4283 	spin_unlock_irqrestore(shost->host_lock, flags);
4284 
4285 	lpfc_release_scsi_buf(phba, lpfc_cmd);
4286 }
4287 
4288 /**
4289  * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
4290  * @data: A pointer to the immediate command data portion of the IOCB.
4291  * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
4292  *
4293  * The routine copies the entire FCP command from @fcp_cmnd to @data while
4294  * byte swapping the data to big endian format for transmission on the wire.
4295  **/
4296 static void
4297 lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
4298 {
4299 	int i, j;
4300 	for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
4301 	     i += sizeof(uint32_t), j++) {
4302 		((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
4303 	}
4304 }
4305 
4306 /**
4307  * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
4308  * @vport: The virtual port for which this call is being executed.
4309  * @lpfc_cmd: The scsi command which needs to send.
4310  * @pnode: Pointer to lpfc_nodelist.
4311  *
4312  * This routine initializes fcp_cmnd and iocb data structure from scsi command
4313  * to transfer for device with SLI3 interface spec.
4314  **/
4315 static void
4316 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
4317 		    struct lpfc_nodelist *pnode)
4318 {
4319 	struct lpfc_hba *phba = vport->phba;
4320 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4321 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4322 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
4323 	struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
4324 	int datadir = scsi_cmnd->sc_data_direction;
4325 	char tag[2];
4326 	uint8_t *ptr;
4327 	bool sli4;
4328 
4329 	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
4330 		return;
4331 
4332 	lpfc_cmd->fcp_rsp->rspSnsLen = 0;
4333 	/* clear task management bits */
4334 	lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
4335 
4336 	int_to_scsilun(lpfc_cmd->pCmd->device->lun,
4337 			&lpfc_cmd->fcp_cmnd->fcp_lun);
4338 
4339 	ptr = &fcp_cmnd->fcpCdb[0];
4340 	memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
4341 	if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
4342 		ptr += scsi_cmnd->cmd_len;
4343 		memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
4344 	}
4345 
4346 	if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
4347 		switch (tag[0]) {
4348 		case HEAD_OF_QUEUE_TAG:
4349 			fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
4350 			break;
4351 		case ORDERED_QUEUE_TAG:
4352 			fcp_cmnd->fcpCntl1 = ORDERED_Q;
4353 			break;
4354 		default:
4355 			fcp_cmnd->fcpCntl1 = SIMPLE_Q;
4356 			break;
4357 		}
4358 	} else
4359 		fcp_cmnd->fcpCntl1 = SIMPLE_Q;
4360 
4361 	sli4 = (phba->sli_rev == LPFC_SLI_REV4);
4362 
4363 	/*
4364 	 * There are three possibilities here - use scatter-gather segment, use
4365 	 * the single mapping, or neither.  Start the lpfc command prep by
4366 	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
4367 	 * data bde entry.
4368 	 */
4369 	if (scsi_sg_count(scsi_cmnd)) {
4370 		if (datadir == DMA_TO_DEVICE) {
4371 			iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
4372 			if (sli4)
4373 				iocb_cmd->ulpPU = PARM_READ_CHECK;
4374 			else {
4375 				iocb_cmd->un.fcpi.fcpi_parm = 0;
4376 				iocb_cmd->ulpPU = 0;
4377 			}
4378 			fcp_cmnd->fcpCntl3 = WRITE_DATA;
4379 			phba->fc4OutputRequests++;
4380 		} else {
4381 			iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
4382 			iocb_cmd->ulpPU = PARM_READ_CHECK;
4383 			fcp_cmnd->fcpCntl3 = READ_DATA;
4384 			phba->fc4InputRequests++;
4385 		}
4386 	} else {
4387 		iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
4388 		iocb_cmd->un.fcpi.fcpi_parm = 0;
4389 		iocb_cmd->ulpPU = 0;
4390 		fcp_cmnd->fcpCntl3 = 0;
4391 		phba->fc4ControlRequests++;
4392 	}
4393 	if (phba->sli_rev == 3 &&
4394 	    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4395 		lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
4396 	/*
4397 	 * Finish initializing those IOCB fields that are independent
4398 	 * of the scsi_cmnd request_buffer
4399 	 */
4400 	piocbq->iocb.ulpContext = pnode->nlp_rpi;
4401 	if (sli4)
4402 		piocbq->iocb.ulpContext =
4403 		  phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
4404 	if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
4405 		piocbq->iocb.ulpFCP2Rcvy = 1;
4406 	else
4407 		piocbq->iocb.ulpFCP2Rcvy = 0;
4408 
4409 	piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
4410 	piocbq->context1  = lpfc_cmd;
4411 	piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4412 	piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
4413 	piocbq->vport = vport;
4414 }
4415 
4416 /**
4417  * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
4418  * @vport: The virtual port for which this call is being executed.
4419  * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
4420  * @lun: Logical unit number.
4421  * @task_mgmt_cmd: SCSI task management command.
4422  *
4423  * This routine creates FCP information unit corresponding to @task_mgmt_cmd
4424  * for device with SLI-3 interface spec.
4425  *
4426  * Return codes:
4427  *   0 - Error
4428  *   1 - Success
4429  **/
4430 static int
4431 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
4432 			     struct lpfc_scsi_buf *lpfc_cmd,
4433 			     unsigned int lun,
4434 			     uint8_t task_mgmt_cmd)
4435 {
4436 	struct lpfc_iocbq *piocbq;
4437 	IOCB_t *piocb;
4438 	struct fcp_cmnd *fcp_cmnd;
4439 	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4440 	struct lpfc_nodelist *ndlp = rdata->pnode;
4441 
4442 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
4443 	    ndlp->nlp_state != NLP_STE_MAPPED_NODE)
4444 		return 0;
4445 
4446 	piocbq = &(lpfc_cmd->cur_iocbq);
4447 	piocbq->vport = vport;
4448 
4449 	piocb = &piocbq->iocb;
4450 
4451 	fcp_cmnd = lpfc_cmd->fcp_cmnd;
4452 	/* Clear out any old data in the FCP command area */
4453 	memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
4454 	int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
4455 	fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
4456 	if (vport->phba->sli_rev == 3 &&
4457 	    !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4458 		lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
4459 	piocb->ulpCommand = CMD_FCP_ICMND64_CR;
4460 	piocb->ulpContext = ndlp->nlp_rpi;
4461 	if (vport->phba->sli_rev == LPFC_SLI_REV4) {
4462 		piocb->ulpContext =
4463 		  vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
4464 	}
4465 	if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
4466 		piocb->ulpFCP2Rcvy = 1;
4467 	}
4468 	piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
4469 
4470 	/* ulpTimeout is only one byte */
4471 	if (lpfc_cmd->timeout > 0xff) {
4472 		/*
4473 		 * Do not timeout the command at the firmware level.
4474 		 * The driver will provide the timeout mechanism.
4475 		 */
4476 		piocb->ulpTimeout = 0;
4477 	} else
4478 		piocb->ulpTimeout = lpfc_cmd->timeout;
4479 
4480 	if (vport->phba->sli_rev == LPFC_SLI_REV4)
4481 		lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
4482 
4483 	return 1;
4484 }
4485 
4486 /**
4487  * lpfc_scsi_api_table_setup - Set up scsi api function jump table
4488  * @phba: The hba struct for which this call is being executed.
4489  * @dev_grp: The HBA PCI-Device group number.
4490  *
4491  * This routine sets up the SCSI interface API function jump table in @phba
4492  * struct.
4493  * Returns: 0 - success, -ENODEV - failure.
4494  **/
4495 int
4496 lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4497 {
4498 
4499 	phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
4500 	phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;
4501 
4502 	switch (dev_grp) {
4503 	case LPFC_PCI_DEV_LP:
4504 		phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
4505 		phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
4506 		phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
4507 		phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
4508 		phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
4509 		break;
4510 	case LPFC_PCI_DEV_OC:
4511 		phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
4512 		phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
4513 		phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
4514 		phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
4515 		phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
4516 		break;
4517 	default:
4518 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4519 				"1418 Invalid HBA PCI-device group: 0x%x\n",
4520 				dev_grp);
4521 		return -ENODEV;
4522 		break;
4523 	}
4524 	phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
4525 	phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4526 	return 0;
4527 }
4528 
4529 /**
4530  * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
4531  * @phba: The Hba for which this call is being executed.
4532  * @cmdiocbq: Pointer to lpfc_iocbq data structure.
4533  * @rspiocbq: Pointer to lpfc_iocbq data structure.
4534  *
4535  * This routine is IOCB completion routine for device reset and target reset
4536  * routine. This routine release scsi buffer associated with lpfc_cmd.
4537  **/
4538 static void
4539 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
4540 			struct lpfc_iocbq *cmdiocbq,
4541 			struct lpfc_iocbq *rspiocbq)
4542 {
4543 	struct lpfc_scsi_buf *lpfc_cmd =
4544 		(struct lpfc_scsi_buf *) cmdiocbq->context1;
4545 	if (lpfc_cmd)
4546 		lpfc_release_scsi_buf(phba, lpfc_cmd);
4547 	return;
4548 }
4549 
4550 /**
4551  * lpfc_info - Info entry point of scsi_host_template data structure
4552  * @host: The scsi host for which this call is being executed.
4553  *
4554  * This routine provides module information about hba.
4555  *
4556  * Reutrn code:
4557  *   Pointer to char - Success.
4558  **/
4559 const char *
4560 lpfc_info(struct Scsi_Host *host)
4561 {
4562 	struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
4563 	struct lpfc_hba   *phba = vport->phba;
4564 	int len, link_speed = 0;
4565 	static char  lpfcinfobuf[384];
4566 
4567 	memset(lpfcinfobuf,0,384);
4568 	if (phba && phba->pcidev){
4569 		strncpy(lpfcinfobuf, phba->ModelDesc, 256);
4570 		len = strlen(lpfcinfobuf);
4571 		snprintf(lpfcinfobuf + len,
4572 			384-len,
4573 			" on PCI bus %02x device %02x irq %d",
4574 			phba->pcidev->bus->number,
4575 			phba->pcidev->devfn,
4576 			phba->pcidev->irq);
4577 		len = strlen(lpfcinfobuf);
4578 		if (phba->Port[0]) {
4579 			snprintf(lpfcinfobuf + len,
4580 				 384-len,
4581 				 " port %s",
4582 				 phba->Port);
4583 		}
4584 		len = strlen(lpfcinfobuf);
4585 		if (phba->sli_rev <= LPFC_SLI_REV3) {
4586 			link_speed = lpfc_sli_port_speed_get(phba);
4587 		} else {
4588 			if (phba->sli4_hba.link_state.logical_speed)
4589 				link_speed =
4590 				      phba->sli4_hba.link_state.logical_speed;
4591 			else
4592 				link_speed = phba->sli4_hba.link_state.speed;
4593 		}
4594 		if (link_speed != 0)
4595 			snprintf(lpfcinfobuf + len, 384-len,
4596 				 " Logical Link Speed: %d Mbps", link_speed);
4597 	}
4598 	return lpfcinfobuf;
4599 }
4600 
4601 /**
4602  * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba
4603  * @phba: The Hba for which this call is being executed.
4604  *
4605  * This routine modifies fcp_poll_timer  field of @phba by cfg_poll_tmo.
4606  * The default value of cfg_poll_tmo is 10 milliseconds.
4607  **/
4608 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
4609 {
4610 	unsigned long  poll_tmo_expires =
4611 		(jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
4612 
4613 	if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq))
4614 		mod_timer(&phba->fcp_poll_timer,
4615 			  poll_tmo_expires);
4616 }
4617 
4618 /**
4619  * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
4620  * @phba: The Hba for which this call is being executed.
4621  *
4622  * This routine starts the fcp_poll_timer of @phba.
4623  **/
4624 void lpfc_poll_start_timer(struct lpfc_hba * phba)
4625 {
4626 	lpfc_poll_rearm_timer(phba);
4627 }
4628 
4629 /**
4630  * lpfc_poll_timeout - Restart polling timer
4631  * @ptr: Map to lpfc_hba data structure pointer.
4632  *
4633  * This routine restarts fcp_poll timer, when FCP ring  polling is enable
4634  * and FCP Ring interrupt is disable.
4635  **/
4636 
4637 void lpfc_poll_timeout(unsigned long ptr)
4638 {
4639 	struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
4640 
4641 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4642 		lpfc_sli_handle_fast_ring_event(phba,
4643 			&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
4644 
4645 		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4646 			lpfc_poll_rearm_timer(phba);
4647 	}
4648 }
4649 
4650 /**
4651  * lpfc_queuecommand - scsi_host_template queuecommand entry point
4652  * @cmnd: Pointer to scsi_cmnd data structure.
4653  * @done: Pointer to done routine.
4654  *
4655  * Driver registers this routine to scsi midlayer to submit a @cmd to process.
4656  * This routine prepares an IOCB from scsi command and provides to firmware.
4657  * The @done callback is invoked after driver finished processing the command.
4658  *
4659  * Return value :
4660  *   0 - Success
4661  *   SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
4662  **/
4663 static int
4664 lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
4665 {
4666 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4667 	struct lpfc_hba   *phba = vport->phba;
4668 	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
4669 	struct lpfc_nodelist *ndlp;
4670 	struct lpfc_scsi_buf *lpfc_cmd;
4671 	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
4672 	int err;
4673 
4674 	err = fc_remote_port_chkready(rport);
4675 	if (err) {
4676 		cmnd->result = err;
4677 		goto out_fail_command;
4678 	}
4679 	ndlp = rdata->pnode;
4680 
4681 	if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
4682 		(!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
4683 
4684 		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4685 				"9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
4686 				" op:%02x str=%s without registering for"
4687 				" BlockGuard - Rejecting command\n",
4688 				cmnd->cmnd[0], scsi_get_prot_op(cmnd),
4689 				dif_op_str[scsi_get_prot_op(cmnd)]);
4690 		goto out_fail_command;
4691 	}
4692 
4693 	/*
4694 	 * Catch race where our node has transitioned, but the
4695 	 * transport is still transitioning.
4696 	 */
4697 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
4698 		goto out_tgt_busy;
4699 	if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
4700 		goto out_tgt_busy;
4701 
4702 	lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
4703 	if (lpfc_cmd == NULL) {
4704 		lpfc_rampdown_queue_depth(phba);
4705 
4706 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4707 				 "0707 driver's buffer pool is empty, "
4708 				 "IO busied\n");
4709 		goto out_host_busy;
4710 	}
4711 
4712 	/*
4713 	 * Store the midlayer's command structure for the completion phase
4714 	 * and complete the command initialization.
4715 	 */
4716 	lpfc_cmd->pCmd  = cmnd;
4717 	lpfc_cmd->rdata = rdata;
4718 	lpfc_cmd->timeout = 0;
4719 	lpfc_cmd->start_time = jiffies;
4720 	cmnd->host_scribble = (unsigned char *)lpfc_cmd;
4721 
4722 	if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
4723 		if (vport->phba->cfg_enable_bg) {
4724 			lpfc_printf_vlog(vport,
4725 					 KERN_INFO, LOG_SCSI_CMD,
4726 					 "9033 BLKGRD: rcvd %s cmd:x%x "
4727 					 "sector x%llx cnt %u pt %x\n",
4728 					 dif_op_str[scsi_get_prot_op(cmnd)],
4729 					 cmnd->cmnd[0],
4730 					 (unsigned long long)scsi_get_lba(cmnd),
4731 					 blk_rq_sectors(cmnd->request),
4732 					 (cmnd->cmnd[1]>>5));
4733 		}
4734 		err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
4735 	} else {
4736 		if (vport->phba->cfg_enable_bg) {
4737 			lpfc_printf_vlog(vport,
4738 					 KERN_INFO, LOG_SCSI_CMD,
4739 					 "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
4740 					 "x%x sector x%llx cnt %u pt %x\n",
4741 					 cmnd->cmnd[0],
4742 					 (unsigned long long)scsi_get_lba(cmnd),
4743 					 blk_rq_sectors(cmnd->request),
4744 					 (cmnd->cmnd[1]>>5));
4745 		}
4746 		err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
4747 	}
4748 
4749 	if (err)
4750 		goto out_host_busy_free_buf;
4751 
4752 	lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
4753 
4754 	atomic_inc(&ndlp->cmd_pending);
4755 	err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
4756 				  &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
4757 	if (err) {
4758 		atomic_dec(&ndlp->cmd_pending);
4759 		goto out_host_busy_free_buf;
4760 	}
4761 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4762 		lpfc_sli_handle_fast_ring_event(phba,
4763 			&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
4764 
4765 		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4766 			lpfc_poll_rearm_timer(phba);
4767 	}
4768 
4769 	return 0;
4770 
4771  out_host_busy_free_buf:
4772 	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4773 	lpfc_release_scsi_buf(phba, lpfc_cmd);
4774  out_host_busy:
4775 	return SCSI_MLQUEUE_HOST_BUSY;
4776 
4777  out_tgt_busy:
4778 	return SCSI_MLQUEUE_TARGET_BUSY;
4779 
4780  out_fail_command:
4781 	cmnd->scsi_done(cmnd);
4782 	return 0;
4783 }
4784 
4785 
4786 /**
4787  * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
4788  * @cmnd: Pointer to scsi_cmnd data structure.
4789  *
4790  * This routine aborts @cmnd pending in base driver.
4791  *
4792  * Return code :
4793  *   0x2003 - Error
4794  *   0x2002 - Success
4795  **/
4796 static int
4797 lpfc_abort_handler(struct scsi_cmnd *cmnd)
4798 {
4799 	struct Scsi_Host  *shost = cmnd->device->host;
4800 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4801 	struct lpfc_hba   *phba = vport->phba;
4802 	struct lpfc_iocbq *iocb;
4803 	struct lpfc_iocbq *abtsiocb;
4804 	struct lpfc_scsi_buf *lpfc_cmd;
4805 	IOCB_t *cmd, *icmd;
4806 	int ret = SUCCESS, status = 0;
4807 	unsigned long flags;
4808 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
4809 
4810 	status = fc_block_scsi_eh(cmnd);
4811 	if (status != 0 && status != SUCCESS)
4812 		return status;
4813 
4814 	spin_lock_irqsave(&phba->hbalock, flags);
4815 	/* driver queued commands are in process of being flushed */
4816 	if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
4817 		spin_unlock_irqrestore(&phba->hbalock, flags);
4818 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4819 			"3168 SCSI Layer abort requested I/O has been "
4820 			"flushed by LLD.\n");
4821 		return FAILED;
4822 	}
4823 
4824 	lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
4825 	if (!lpfc_cmd || !lpfc_cmd->pCmd) {
4826 		spin_unlock_irqrestore(&phba->hbalock, flags);
4827 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4828 			 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
4829 			 "x%x ID %d LUN %d\n",
4830 			 SUCCESS, cmnd->device->id, cmnd->device->lun);
4831 		return SUCCESS;
4832 	}
4833 
4834 	iocb = &lpfc_cmd->cur_iocbq;
4835 	/* the command is in process of being cancelled */
4836 	if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
4837 		spin_unlock_irqrestore(&phba->hbalock, flags);
4838 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4839 			"3169 SCSI Layer abort requested I/O has been "
4840 			"cancelled by LLD.\n");
4841 		return FAILED;
4842 	}
4843 	/*
4844 	 * If pCmd field of the corresponding lpfc_scsi_buf structure
4845 	 * points to a different SCSI command, then the driver has
4846 	 * already completed this command, but the midlayer did not
4847 	 * see the completion before the eh fired. Just return SUCCESS.
4848 	 */
4849 	if (lpfc_cmd->pCmd != cmnd) {
4850 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4851 			"3170 SCSI Layer abort requested I/O has been "
4852 			"completed by LLD.\n");
4853 		goto out_unlock;
4854 	}
4855 
4856 	BUG_ON(iocb->context1 != lpfc_cmd);
4857 
4858 	abtsiocb = __lpfc_sli_get_iocbq(phba);
4859 	if (abtsiocb == NULL) {
4860 		ret = FAILED;
4861 		goto out_unlock;
4862 	}
4863 
4864 	/*
4865 	 * The scsi command can not be in txq and it is in flight because the
4866 	 * pCmd is still pointig at the SCSI command we have to abort. There
4867 	 * is no need to search the txcmplq. Just send an abort to the FW.
4868 	 */
4869 
4870 	cmd = &iocb->iocb;
4871 	icmd = &abtsiocb->iocb;
4872 	icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
4873 	icmd->un.acxri.abortContextTag = cmd->ulpContext;
4874 	if (phba->sli_rev == LPFC_SLI_REV4)
4875 		icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
4876 	else
4877 		icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
4878 
4879 	icmd->ulpLe = 1;
4880 	icmd->ulpClass = cmd->ulpClass;
4881 
4882 	/* ABTS WQE must go to the same WQ as the WQE to be aborted */
4883 	abtsiocb->fcp_wqidx = iocb->fcp_wqidx;
4884 	abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
4885 
4886 	if (lpfc_is_link_up(phba))
4887 		icmd->ulpCommand = CMD_ABORT_XRI_CN;
4888 	else
4889 		icmd->ulpCommand = CMD_CLOSE_XRI_CN;
4890 
4891 	abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
4892 	abtsiocb->vport = vport;
4893 	/* no longer need the lock after this point */
4894 	spin_unlock_irqrestore(&phba->hbalock, flags);
4895 
4896 	if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) ==
4897 	    IOCB_ERROR) {
4898 		lpfc_sli_release_iocbq(phba, abtsiocb);
4899 		ret = FAILED;
4900 		goto out;
4901 	}
4902 
4903 	if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4904 		lpfc_sli_handle_fast_ring_event(phba,
4905 			&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
4906 
4907 	lpfc_cmd->waitq = &waitq;
4908 	/* Wait for abort to complete */
4909 	wait_event_timeout(waitq,
4910 			  (lpfc_cmd->pCmd != cmnd),
4911 			   msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
4912 	lpfc_cmd->waitq = NULL;
4913 
4914 	if (lpfc_cmd->pCmd == cmnd) {
4915 		ret = FAILED;
4916 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4917 				 "0748 abort handler timed out waiting "
4918 				 "for abortng I/O (xri:x%x) to complete: "
4919 				 "ret %#x, ID %d, LUN %d\n",
4920 				 iocb->sli4_xritag, ret,
4921 				 cmnd->device->id, cmnd->device->lun);
4922 	}
4923 	goto out;
4924 
4925 out_unlock:
4926 	spin_unlock_irqrestore(&phba->hbalock, flags);
4927 out:
4928 	lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4929 			 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
4930 			 "LUN %d\n", ret, cmnd->device->id,
4931 			 cmnd->device->lun);
4932 	return ret;
4933 }
4934 
4935 static char *
4936 lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
4937 {
4938 	switch (task_mgmt_cmd) {
4939 	case FCP_ABORT_TASK_SET:
4940 		return "ABORT_TASK_SET";
4941 	case FCP_CLEAR_TASK_SET:
4942 		return "FCP_CLEAR_TASK_SET";
4943 	case FCP_BUS_RESET:
4944 		return "FCP_BUS_RESET";
4945 	case FCP_LUN_RESET:
4946 		return "FCP_LUN_RESET";
4947 	case FCP_TARGET_RESET:
4948 		return "FCP_TARGET_RESET";
4949 	case FCP_CLEAR_ACA:
4950 		return "FCP_CLEAR_ACA";
4951 	case FCP_TERMINATE_TASK:
4952 		return "FCP_TERMINATE_TASK";
4953 	default:
4954 		return "unknown";
4955 	}
4956 }
4957 
4958 /**
4959  * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
4960  * @vport: The virtual port for which this call is being executed.
4961  * @rdata: Pointer to remote port local data
4962  * @tgt_id: Target ID of remote device.
4963  * @lun_id: Lun number for the TMF
4964  * @task_mgmt_cmd: type of TMF to send
4965  *
4966  * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
4967  * a remote port.
4968  *
4969  * Return Code:
4970  *   0x2003 - Error
4971  *   0x2002 - Success.
4972  **/
4973 static int
4974 lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
4975 		    unsigned  tgt_id, unsigned int lun_id,
4976 		    uint8_t task_mgmt_cmd)
4977 {
4978 	struct lpfc_hba   *phba = vport->phba;
4979 	struct lpfc_scsi_buf *lpfc_cmd;
4980 	struct lpfc_iocbq *iocbq;
4981 	struct lpfc_iocbq *iocbqrsp;
4982 	struct lpfc_nodelist *pnode = rdata->pnode;
4983 	int ret;
4984 	int status;
4985 
4986 	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
4987 		return FAILED;
4988 
4989 	lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode);
4990 	if (lpfc_cmd == NULL)
4991 		return FAILED;
4992 	lpfc_cmd->timeout = 60;
4993 	lpfc_cmd->rdata = rdata;
4994 
4995 	status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
4996 					   task_mgmt_cmd);
4997 	if (!status) {
4998 		lpfc_release_scsi_buf(phba, lpfc_cmd);
4999 		return FAILED;
5000 	}
5001 
5002 	iocbq = &lpfc_cmd->cur_iocbq;
5003 	iocbqrsp = lpfc_sli_get_iocbq(phba);
5004 	if (iocbqrsp == NULL) {
5005 		lpfc_release_scsi_buf(phba, lpfc_cmd);
5006 		return FAILED;
5007 	}
5008 
5009 	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5010 			 "0702 Issue %s to TGT %d LUN %d "
5011 			 "rpi x%x nlp_flag x%x Data: x%x x%x\n",
5012 			 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
5013 			 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
5014 			 iocbq->iocb_flag);
5015 
5016 	status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
5017 					  iocbq, iocbqrsp, lpfc_cmd->timeout);
5018 	if (status != IOCB_SUCCESS) {
5019 		if (status == IOCB_TIMEDOUT) {
5020 			iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
5021 			ret = TIMEOUT_ERROR;
5022 		} else
5023 			ret = FAILED;
5024 		lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
5025 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5026 			 "0727 TMF %s to TGT %d LUN %d failed (%d, %d) "
5027 			 "iocb_flag x%x\n",
5028 			 lpfc_taskmgmt_name(task_mgmt_cmd),
5029 			 tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
5030 			 iocbqrsp->iocb.un.ulpWord[4],
5031 			 iocbq->iocb_flag);
5032 	} else if (status == IOCB_BUSY)
5033 		ret = FAILED;
5034 	else
5035 		ret = SUCCESS;
5036 
5037 	lpfc_sli_release_iocbq(phba, iocbqrsp);
5038 
5039 	if (ret != TIMEOUT_ERROR)
5040 		lpfc_release_scsi_buf(phba, lpfc_cmd);
5041 
5042 	return ret;
5043 }
5044 
5045 /**
5046  * lpfc_chk_tgt_mapped -
5047  * @vport: The virtual port to check on
5048  * @cmnd: Pointer to scsi_cmnd data structure.
5049  *
5050  * This routine delays until the scsi target (aka rport) for the
5051  * command exists (is present and logged in) or we declare it non-existent.
5052  *
5053  * Return code :
5054  *  0x2003 - Error
5055  *  0x2002 - Success
5056  **/
5057 static int
5058 lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
5059 {
5060 	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
5061 	struct lpfc_nodelist *pnode;
5062 	unsigned long later;
5063 
5064 	if (!rdata) {
5065 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5066 			"0797 Tgt Map rport failure: rdata x%p\n", rdata);
5067 		return FAILED;
5068 	}
5069 	pnode = rdata->pnode;
5070 	/*
5071 	 * If target is not in a MAPPED state, delay until
5072 	 * target is rediscovered or devloss timeout expires.
5073 	 */
5074 	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
5075 	while (time_after(later, jiffies)) {
5076 		if (!pnode || !NLP_CHK_NODE_ACT(pnode))
5077 			return FAILED;
5078 		if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
5079 			return SUCCESS;
5080 		schedule_timeout_uninterruptible(msecs_to_jiffies(500));
5081 		rdata = cmnd->device->hostdata;
5082 		if (!rdata)
5083 			return FAILED;
5084 		pnode = rdata->pnode;
5085 	}
5086 	if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
5087 	    (pnode->nlp_state != NLP_STE_MAPPED_NODE))
5088 		return FAILED;
5089 	return SUCCESS;
5090 }
5091 
5092 /**
5093  * lpfc_reset_flush_io_context -
5094  * @vport: The virtual port (scsi_host) for the flush context
5095  * @tgt_id: If aborting by Target contect - specifies the target id
5096  * @lun_id: If aborting by Lun context - specifies the lun id
5097  * @context: specifies the context level to flush at.
5098  *
5099  * After a reset condition via TMF, we need to flush orphaned i/o
5100  * contexts from the adapter. This routine aborts any contexts
5101  * outstanding, then waits for their completions. The wait is
5102  * bounded by devloss_tmo though.
5103  *
5104  * Return code :
5105  *  0x2003 - Error
5106  *  0x2002 - Success
5107  **/
5108 static int
5109 lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
5110 			uint64_t lun_id, lpfc_ctx_cmd context)
5111 {
5112 	struct lpfc_hba   *phba = vport->phba;
5113 	unsigned long later;
5114 	int cnt;
5115 
5116 	cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
5117 	if (cnt)
5118 		lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
5119 				    tgt_id, lun_id, context);
5120 	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
5121 	while (time_after(later, jiffies) && cnt) {
5122 		schedule_timeout_uninterruptible(msecs_to_jiffies(20));
5123 		cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
5124 	}
5125 	if (cnt) {
5126 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5127 			"0724 I/O flush failure for context %s : cnt x%x\n",
5128 			((context == LPFC_CTX_LUN) ? "LUN" :
5129 			 ((context == LPFC_CTX_TGT) ? "TGT" :
5130 			  ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
5131 			cnt);
5132 		return FAILED;
5133 	}
5134 	return SUCCESS;
5135 }
5136 
5137 /**
5138  * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
5139  * @cmnd: Pointer to scsi_cmnd data structure.
5140  *
5141  * This routine does a device reset by sending a LUN_RESET task management
5142  * command.
5143  *
5144  * Return code :
5145  *  0x2003 - Error
5146  *  0x2002 - Success
5147  **/
5148 static int
5149 lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
5150 {
5151 	struct Scsi_Host  *shost = cmnd->device->host;
5152 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5153 	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
5154 	struct lpfc_nodelist *pnode;
5155 	unsigned tgt_id = cmnd->device->id;
5156 	unsigned int lun_id = cmnd->device->lun;
5157 	struct lpfc_scsi_event_header scsi_event;
5158 	int status, ret = SUCCESS;
5159 
5160 	if (!rdata) {
5161 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5162 			"0798 Device Reset rport failure: rdata x%p\n", rdata);
5163 		return FAILED;
5164 	}
5165 	pnode = rdata->pnode;
5166 	status = fc_block_scsi_eh(cmnd);
5167 	if (status != 0 && status != SUCCESS)
5168 		return status;
5169 
5170 	status = lpfc_chk_tgt_mapped(vport, cmnd);
5171 	if (status == FAILED) {
5172 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5173 			"0721 Device Reset rport failure: rdata x%p\n", rdata);
5174 		return FAILED;
5175 	}
5176 
5177 	scsi_event.event_type = FC_REG_SCSI_EVENT;
5178 	scsi_event.subcategory = LPFC_EVENT_LUNRESET;
5179 	scsi_event.lun = lun_id;
5180 	memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
5181 	memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
5182 
5183 	fc_host_post_vendor_event(shost, fc_get_event_number(),
5184 		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5185 
5186 	status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
5187 						FCP_LUN_RESET);
5188 
5189 	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5190 			 "0713 SCSI layer issued Device Reset (%d, %d) "
5191 			 "return x%x\n", tgt_id, lun_id, status);
5192 
5193 	/*
5194 	 * We have to clean up i/o as : they may be orphaned by the TMF;
5195 	 * or if the TMF failed, they may be in an indeterminate state.
5196 	 * So, continue on.
5197 	 * We will report success if all the i/o aborts successfully.
5198 	 */
5199 	ret = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5200 						LPFC_CTX_LUN);
5201 	return ret;
5202 }
5203 
5204 /**
5205  * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
5206  * @cmnd: Pointer to scsi_cmnd data structure.
5207  *
5208  * This routine does a target reset by sending a TARGET_RESET task management
5209  * command.
5210  *
5211  * Return code :
5212  *  0x2003 - Error
5213  *  0x2002 - Success
5214  **/
5215 static int
5216 lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
5217 {
5218 	struct Scsi_Host  *shost = cmnd->device->host;
5219 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5220 	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
5221 	struct lpfc_nodelist *pnode;
5222 	unsigned tgt_id = cmnd->device->id;
5223 	unsigned int lun_id = cmnd->device->lun;
5224 	struct lpfc_scsi_event_header scsi_event;
5225 	int status, ret = SUCCESS;
5226 
5227 	if (!rdata) {
5228 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5229 			"0799 Target Reset rport failure: rdata x%p\n", rdata);
5230 		return FAILED;
5231 	}
5232 	pnode = rdata->pnode;
5233 	status = fc_block_scsi_eh(cmnd);
5234 	if (status != 0 && status != SUCCESS)
5235 		return status;
5236 
5237 	status = lpfc_chk_tgt_mapped(vport, cmnd);
5238 	if (status == FAILED) {
5239 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5240 			"0722 Target Reset rport failure: rdata x%p\n", rdata);
5241 		return FAILED;
5242 	}
5243 
5244 	scsi_event.event_type = FC_REG_SCSI_EVENT;
5245 	scsi_event.subcategory = LPFC_EVENT_TGTRESET;
5246 	scsi_event.lun = 0;
5247 	memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
5248 	memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
5249 
5250 	fc_host_post_vendor_event(shost, fc_get_event_number(),
5251 		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5252 
5253 	status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
5254 					FCP_TARGET_RESET);
5255 
5256 	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5257 			 "0723 SCSI layer issued Target Reset (%d, %d) "
5258 			 "return x%x\n", tgt_id, lun_id, status);
5259 
5260 	/*
5261 	 * We have to clean up i/o as : they may be orphaned by the TMF;
5262 	 * or if the TMF failed, they may be in an indeterminate state.
5263 	 * So, continue on.
5264 	 * We will report success if all the i/o aborts successfully.
5265 	 */
5266 	ret = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5267 					  LPFC_CTX_TGT);
5268 	return ret;
5269 }
5270 
5271 /**
5272  * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
5273  * @cmnd: Pointer to scsi_cmnd data structure.
5274  *
5275  * This routine does target reset to all targets on @cmnd->device->host.
5276  * This emulates Parallel SCSI Bus Reset Semantics.
5277  *
5278  * Return code :
5279  *  0x2003 - Error
5280  *  0x2002 - Success
5281  **/
5282 static int
5283 lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
5284 {
5285 	struct Scsi_Host  *shost = cmnd->device->host;
5286 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5287 	struct lpfc_nodelist *ndlp = NULL;
5288 	struct lpfc_scsi_event_header scsi_event;
5289 	int match;
5290 	int ret = SUCCESS, status, i;
5291 
5292 	scsi_event.event_type = FC_REG_SCSI_EVENT;
5293 	scsi_event.subcategory = LPFC_EVENT_BUSRESET;
5294 	scsi_event.lun = 0;
5295 	memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
5296 	memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
5297 
5298 	fc_host_post_vendor_event(shost, fc_get_event_number(),
5299 		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5300 
5301 	status = fc_block_scsi_eh(cmnd);
5302 	if (status != 0 && status != SUCCESS)
5303 		return status;
5304 
5305 	/*
5306 	 * Since the driver manages a single bus device, reset all
5307 	 * targets known to the driver.  Should any target reset
5308 	 * fail, this routine returns failure to the midlayer.
5309 	 */
5310 	for (i = 0; i < LPFC_MAX_TARGET; i++) {
5311 		/* Search for mapped node by target ID */
5312 		match = 0;
5313 		spin_lock_irq(shost->host_lock);
5314 		list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5315 			if (!NLP_CHK_NODE_ACT(ndlp))
5316 				continue;
5317 			if (vport->phba->cfg_fcp2_no_tgt_reset &&
5318 			    (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
5319 				continue;
5320 			if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
5321 			    ndlp->nlp_sid == i &&
5322 			    ndlp->rport) {
5323 				match = 1;
5324 				break;
5325 			}
5326 		}
5327 		spin_unlock_irq(shost->host_lock);
5328 		if (!match)
5329 			continue;
5330 
5331 		status = lpfc_send_taskmgmt(vport, ndlp->rport->dd_data,
5332 					i, 0, FCP_TARGET_RESET);
5333 
5334 		if (status != SUCCESS) {
5335 			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5336 					 "0700 Bus Reset on target %d failed\n",
5337 					 i);
5338 			ret = FAILED;
5339 		}
5340 	}
5341 	/*
5342 	 * We have to clean up i/o as : they may be orphaned by the TMFs
5343 	 * above; or if any of the TMFs failed, they may be in an
5344 	 * indeterminate state.
5345 	 * We will report success if all the i/o aborts successfully.
5346 	 */
5347 
5348 	status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
5349 	if (status != SUCCESS)
5350 		ret = FAILED;
5351 
5352 	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5353 			 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
5354 	return ret;
5355 }
5356 
5357 /**
5358  * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt
5359  * @cmnd: Pointer to scsi_cmnd data structure.
5360  *
5361  * This routine does host reset to the adaptor port. It brings the HBA
5362  * offline, performs a board restart, and then brings the board back online.
5363  * The lpfc_offline calls lpfc_sli_hba_down which will abort and local
5364  * reject all outstanding SCSI commands to the host and error returned
5365  * back to SCSI mid-level. As this will be SCSI mid-level's last resort
5366  * of error handling, it will only return error if resetting of the adapter
5367  * is not successful; in all other cases, will return success.
5368  *
5369  * Return code :
5370  *  0x2003 - Error
5371  *  0x2002 - Success
5372  **/
5373 static int
5374 lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
5375 {
5376 	struct Scsi_Host *shost = cmnd->device->host;
5377 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5378 	struct lpfc_hba *phba = vport->phba;
5379 	int rc, ret = SUCCESS;
5380 
5381 	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5382 			 "3172 SCSI layer issued Host Reset Data:\n");
5383 
5384 	lpfc_offline_prep(phba, LPFC_MBX_WAIT);
5385 	lpfc_offline(phba);
5386 	rc = lpfc_sli_brdrestart(phba);
5387 	if (rc)
5388 		ret = FAILED;
5389 	rc = lpfc_online(phba);
5390 	if (rc)
5391 		ret = FAILED;
5392 	lpfc_unblock_mgmt_io(phba);
5393 
5394 	if (ret == FAILED) {
5395 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5396 				 "3323 Failed host reset, bring it offline\n");
5397 		lpfc_sli4_offline_eratt(phba);
5398 	}
5399 	return ret;
5400 }
5401 
5402 /**
5403  * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
5404  * @sdev: Pointer to scsi_device.
5405  *
5406  * This routine populates the cmds_per_lun count + 2 scsi_bufs into  this host's
5407  * globally available list of scsi buffers. This routine also makes sure scsi
5408  * buffer is not allocated more than HBA limit conveyed to midlayer. This list
5409  * of scsi buffer exists for the lifetime of the driver.
5410  *
5411  * Return codes:
5412  *   non-0 - Error
5413  *   0 - Success
5414  **/
5415 static int
5416 lpfc_slave_alloc(struct scsi_device *sdev)
5417 {
5418 	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5419 	struct lpfc_hba   *phba = vport->phba;
5420 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
5421 	uint32_t total = 0;
5422 	uint32_t num_to_alloc = 0;
5423 	int num_allocated = 0;
5424 	uint32_t sdev_cnt;
5425 
5426 	if (!rport || fc_remote_port_chkready(rport))
5427 		return -ENXIO;
5428 
5429 	sdev->hostdata = rport->dd_data;
5430 	sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
5431 
5432 	/*
5433 	 * Populate the cmds_per_lun count scsi_bufs into this host's globally
5434 	 * available list of scsi buffers.  Don't allocate more than the
5435 	 * HBA limit conveyed to the midlayer via the host structure.  The
5436 	 * formula accounts for the lun_queue_depth + error handlers + 1
5437 	 * extra.  This list of scsi bufs exists for the lifetime of the driver.
5438 	 */
5439 	total = phba->total_scsi_bufs;
5440 	num_to_alloc = vport->cfg_lun_queue_depth + 2;
5441 
5442 	/* If allocated buffers are enough do nothing */
5443 	if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
5444 		return 0;
5445 
5446 	/* Allow some exchanges to be available always to complete discovery */
5447 	if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
5448 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5449 				 "0704 At limitation of %d preallocated "
5450 				 "command buffers\n", total);
5451 		return 0;
5452 	/* Allow some exchanges to be available always to complete discovery */
5453 	} else if (total + num_to_alloc >
5454 		phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
5455 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5456 				 "0705 Allocation request of %d "
5457 				 "command buffers will exceed max of %d.  "
5458 				 "Reducing allocation request to %d.\n",
5459 				 num_to_alloc, phba->cfg_hba_queue_depth,
5460 				 (phba->cfg_hba_queue_depth - total));
5461 		num_to_alloc = phba->cfg_hba_queue_depth - total;
5462 	}
5463 	num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
5464 	if (num_to_alloc != num_allocated) {
5465 			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5466 					 "0708 Allocation request of %d "
5467 					 "command buffers did not succeed.  "
5468 					 "Allocated %d buffers.\n",
5469 					 num_to_alloc, num_allocated);
5470 	}
5471 	if (num_allocated > 0)
5472 		phba->total_scsi_bufs += num_allocated;
5473 	return 0;
5474 }
5475 
5476 /**
5477  * lpfc_slave_configure - scsi_host_template slave_configure entry point
5478  * @sdev: Pointer to scsi_device.
5479  *
5480  * This routine configures following items
5481  *   - Tag command queuing support for @sdev if supported.
5482  *   - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
5483  *
5484  * Return codes:
5485  *   0 - Success
5486  **/
5487 static int
5488 lpfc_slave_configure(struct scsi_device *sdev)
5489 {
5490 	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5491 	struct lpfc_hba   *phba = vport->phba;
5492 
5493 	if (sdev->tagged_supported)
5494 		scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth);
5495 	else
5496 		scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth);
5497 
5498 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5499 		lpfc_sli_handle_fast_ring_event(phba,
5500 			&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
5501 		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5502 			lpfc_poll_rearm_timer(phba);
5503 	}
5504 
5505 	return 0;
5506 }
5507 
5508 /**
5509  * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
5510  * @sdev: Pointer to scsi_device.
5511  *
5512  * This routine sets @sdev hostatdata filed to null.
5513  **/
5514 static void
5515 lpfc_slave_destroy(struct scsi_device *sdev)
5516 {
5517 	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5518 	struct lpfc_hba   *phba = vport->phba;
5519 	atomic_dec(&phba->sdev_cnt);
5520 	sdev->hostdata = NULL;
5521 	return;
5522 }
5523 
5524 
5525 struct scsi_host_template lpfc_template = {
5526 	.module			= THIS_MODULE,
5527 	.name			= LPFC_DRIVER_NAME,
5528 	.info			= lpfc_info,
5529 	.queuecommand		= lpfc_queuecommand,
5530 	.eh_abort_handler	= lpfc_abort_handler,
5531 	.eh_device_reset_handler = lpfc_device_reset_handler,
5532 	.eh_target_reset_handler = lpfc_target_reset_handler,
5533 	.eh_bus_reset_handler	= lpfc_bus_reset_handler,
5534 	.eh_host_reset_handler  = lpfc_host_reset_handler,
5535 	.slave_alloc		= lpfc_slave_alloc,
5536 	.slave_configure	= lpfc_slave_configure,
5537 	.slave_destroy		= lpfc_slave_destroy,
5538 	.scan_finished		= lpfc_scan_finished,
5539 	.this_id		= -1,
5540 	.sg_tablesize		= LPFC_DEFAULT_SG_SEG_CNT,
5541 	.cmd_per_lun		= LPFC_CMD_PER_LUN,
5542 	.use_clustering		= ENABLE_CLUSTERING,
5543 	.shost_attrs		= lpfc_hba_attrs,
5544 	.max_sectors		= 0xFFFF,
5545 	.vendor_id		= LPFC_NL_VENDOR_ID,
5546 	.change_queue_depth	= lpfc_change_queue_depth,
5547 	.change_queue_type	= lpfc_change_queue_type,
5548 };
5549 
5550 struct scsi_host_template lpfc_vport_template = {
5551 	.module			= THIS_MODULE,
5552 	.name			= LPFC_DRIVER_NAME,
5553 	.info			= lpfc_info,
5554 	.queuecommand		= lpfc_queuecommand,
5555 	.eh_abort_handler	= lpfc_abort_handler,
5556 	.eh_device_reset_handler = lpfc_device_reset_handler,
5557 	.eh_target_reset_handler = lpfc_target_reset_handler,
5558 	.eh_bus_reset_handler	= lpfc_bus_reset_handler,
5559 	.slave_alloc		= lpfc_slave_alloc,
5560 	.slave_configure	= lpfc_slave_configure,
5561 	.slave_destroy		= lpfc_slave_destroy,
5562 	.scan_finished		= lpfc_scan_finished,
5563 	.this_id		= -1,
5564 	.sg_tablesize		= LPFC_DEFAULT_SG_SEG_CNT,
5565 	.cmd_per_lun		= LPFC_CMD_PER_LUN,
5566 	.use_clustering		= ENABLE_CLUSTERING,
5567 	.shost_attrs		= lpfc_vport_attrs,
5568 	.max_sectors		= 0xFFFF,
5569 	.change_queue_depth	= lpfc_change_queue_depth,
5570 	.change_queue_type	= lpfc_change_queue_type,
5571 };
5572