xref: /openbmc/linux/drivers/scsi/lpfc/lpfc_scsi.c (revision 840ef8b7cc584a23c4f9d05352f4dbaf8e56e5ab)
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2012 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21 #include <linux/pci.h>
22 #include <linux/slab.h>
23 #include <linux/interrupt.h>
24 #include <linux/export.h>
25 #include <linux/delay.h>
26 #include <asm/unaligned.h>
27 
28 #include <scsi/scsi.h>
29 #include <scsi/scsi_device.h>
30 #include <scsi/scsi_eh.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_tcq.h>
33 #include <scsi/scsi_transport_fc.h>
34 
35 #include "lpfc_version.h"
36 #include "lpfc_hw4.h"
37 #include "lpfc_hw.h"
38 #include "lpfc_sli.h"
39 #include "lpfc_sli4.h"
40 #include "lpfc_nl.h"
41 #include "lpfc_disc.h"
42 #include "lpfc.h"
43 #include "lpfc_scsi.h"
44 #include "lpfc_logmsg.h"
45 #include "lpfc_crtn.h"
46 #include "lpfc_vport.h"
47 
48 #define LPFC_RESET_WAIT  2
49 #define LPFC_ABORT_WAIT  2
50 
51 int _dump_buf_done;
52 
53 static char *dif_op_str[] = {
54 	"PROT_NORMAL",
55 	"PROT_READ_INSERT",
56 	"PROT_WRITE_STRIP",
57 	"PROT_READ_STRIP",
58 	"PROT_WRITE_INSERT",
59 	"PROT_READ_PASS",
60 	"PROT_WRITE_PASS",
61 };
62 
63 struct scsi_dif_tuple {
64 	__be16 guard_tag;       /* Checksum */
65 	__be16 app_tag;         /* Opaque storage */
66 	__be32 ref_tag;         /* Target LBA or indirect LBA */
67 };
68 
69 static void
70 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
71 static void
72 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
73 
74 static void
75 lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
76 {
77 	void *src, *dst;
78 	struct scatterlist *sgde = scsi_sglist(cmnd);
79 
80 	if (!_dump_buf_data) {
81 		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
82 			"9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
83 				__func__);
84 		return;
85 	}
86 
87 
88 	if (!sgde) {
89 		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
90 			"9051 BLKGRD: ERROR: data scatterlist is null\n");
91 		return;
92 	}
93 
94 	dst = (void *) _dump_buf_data;
95 	while (sgde) {
96 		src = sg_virt(sgde);
97 		memcpy(dst, src, sgde->length);
98 		dst += sgde->length;
99 		sgde = sg_next(sgde);
100 	}
101 }
102 
103 static void
104 lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
105 {
106 	void *src, *dst;
107 	struct scatterlist *sgde = scsi_prot_sglist(cmnd);
108 
109 	if (!_dump_buf_dif) {
110 		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
111 			"9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
112 				__func__);
113 		return;
114 	}
115 
116 	if (!sgde) {
117 		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
118 			"9053 BLKGRD: ERROR: prot scatterlist is null\n");
119 		return;
120 	}
121 
122 	dst = _dump_buf_dif;
123 	while (sgde) {
124 		src = sg_virt(sgde);
125 		memcpy(dst, src, sgde->length);
126 		dst += sgde->length;
127 		sgde = sg_next(sgde);
128 	}
129 }
130 
131 /**
132  * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
133  * @phba: Pointer to HBA object.
134  * @lpfc_cmd: lpfc scsi command object pointer.
135  *
136  * This function is called from the lpfc_prep_task_mgmt_cmd function to
137  * set the last bit in the response sge entry.
138  **/
139 static void
140 lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
141 				struct lpfc_scsi_buf *lpfc_cmd)
142 {
143 	struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
144 	if (sgl) {
145 		sgl += 1;
146 		sgl->word2 = le32_to_cpu(sgl->word2);
147 		bf_set(lpfc_sli4_sge_last, sgl, 1);
148 		sgl->word2 = cpu_to_le32(sgl->word2);
149 	}
150 }
151 
152 /**
153  * lpfc_update_stats - Update statistical data for the command completion
154  * @phba: Pointer to HBA object.
155  * @lpfc_cmd: lpfc scsi command object pointer.
156  *
157  * This function is called when there is a command completion and this
158  * function updates the statistical data for the command completion.
159  **/
160 static void
161 lpfc_update_stats(struct lpfc_hba *phba, struct  lpfc_scsi_buf *lpfc_cmd)
162 {
163 	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
164 	struct lpfc_nodelist *pnode = rdata->pnode;
165 	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
166 	unsigned long flags;
167 	struct Scsi_Host  *shost = cmd->device->host;
168 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
169 	unsigned long latency;
170 	int i;
171 
172 	if (cmd->result)
173 		return;
174 
175 	latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
176 
177 	spin_lock_irqsave(shost->host_lock, flags);
178 	if (!vport->stat_data_enabled ||
179 		vport->stat_data_blocked ||
180 		!pnode ||
181 		!pnode->lat_data ||
182 		(phba->bucket_type == LPFC_NO_BUCKET)) {
183 		spin_unlock_irqrestore(shost->host_lock, flags);
184 		return;
185 	}
186 
187 	if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
188 		i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
189 			phba->bucket_step;
190 		/* check array subscript bounds */
191 		if (i < 0)
192 			i = 0;
193 		else if (i >= LPFC_MAX_BUCKET_COUNT)
194 			i = LPFC_MAX_BUCKET_COUNT - 1;
195 	} else {
196 		for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
197 			if (latency <= (phba->bucket_base +
198 				((1<<i)*phba->bucket_step)))
199 				break;
200 	}
201 
202 	pnode->lat_data[i].cmd_count++;
203 	spin_unlock_irqrestore(shost->host_lock, flags);
204 }
205 
206 /**
207  * lpfc_send_sdev_queuedepth_change_event - Posts a queuedepth change event
208  * @phba: Pointer to HBA context object.
209  * @vport: Pointer to vport object.
210  * @ndlp: Pointer to FC node associated with the target.
211  * @lun: Lun number of the scsi device.
212  * @old_val: Old value of the queue depth.
213  * @new_val: New value of the queue depth.
214  *
215  * This function sends an event to the mgmt application indicating
216  * there is a change in the scsi device queue depth.
217  **/
218 static void
219 lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
220 		struct lpfc_vport  *vport,
221 		struct lpfc_nodelist *ndlp,
222 		uint32_t lun,
223 		uint32_t old_val,
224 		uint32_t new_val)
225 {
226 	struct lpfc_fast_path_event *fast_path_evt;
227 	unsigned long flags;
228 
229 	fast_path_evt = lpfc_alloc_fast_evt(phba);
230 	if (!fast_path_evt)
231 		return;
232 
233 	fast_path_evt->un.queue_depth_evt.scsi_event.event_type =
234 		FC_REG_SCSI_EVENT;
235 	fast_path_evt->un.queue_depth_evt.scsi_event.subcategory =
236 		LPFC_EVENT_VARQUEDEPTH;
237 
238 	/* Report all luns with change in queue depth */
239 	fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun;
240 	if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
241 		memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn,
242 			&ndlp->nlp_portname, sizeof(struct lpfc_name));
243 		memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn,
244 			&ndlp->nlp_nodename, sizeof(struct lpfc_name));
245 	}
246 
247 	fast_path_evt->un.queue_depth_evt.oldval = old_val;
248 	fast_path_evt->un.queue_depth_evt.newval = new_val;
249 	fast_path_evt->vport = vport;
250 
251 	fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
252 	spin_lock_irqsave(&phba->hbalock, flags);
253 	list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
254 	spin_unlock_irqrestore(&phba->hbalock, flags);
255 	lpfc_worker_wake_up(phba);
256 
257 	return;
258 }
259 
260 /**
261  * lpfc_change_queue_depth - Alter scsi device queue depth
262  * @sdev: Pointer the scsi device on which to change the queue depth.
263  * @qdepth: New queue depth to set the sdev to.
264  * @reason: The reason for the queue depth change.
265  *
266  * This function is called by the midlayer and the LLD to alter the queue
267  * depth for a scsi device. This function sets the queue depth to the new
268  * value and sends an event out to log the queue depth change.
269  **/
270 int
271 lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
272 {
273 	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
274 	struct lpfc_hba   *phba = vport->phba;
275 	struct lpfc_rport_data *rdata;
276 	unsigned long new_queue_depth, old_queue_depth;
277 
278 	old_queue_depth = sdev->queue_depth;
279 	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
280 	new_queue_depth = sdev->queue_depth;
281 	rdata = sdev->hostdata;
282 	if (rdata)
283 		lpfc_send_sdev_queuedepth_change_event(phba, vport,
284 						       rdata->pnode, sdev->lun,
285 						       old_queue_depth,
286 						       new_queue_depth);
287 	return sdev->queue_depth;
288 }
289 
290 /**
291  * lpfc_change_queue_type() - Change a device's scsi tag queuing type
292  * @sdev: Pointer the scsi device whose queue depth is to change
293  * @tag_type: Identifier for queue tag type
294  */
295 static int
296 lpfc_change_queue_type(struct scsi_device *sdev, int tag_type)
297 {
298 	if (sdev->tagged_supported) {
299 		scsi_set_tag_type(sdev, tag_type);
300 		if (tag_type)
301 			scsi_activate_tcq(sdev, sdev->queue_depth);
302 		else
303 			scsi_deactivate_tcq(sdev, sdev->queue_depth);
304 	} else
305 		tag_type = 0;
306 
307 	return tag_type;
308 }
309 
310 /**
311  * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
312  * @phba: The Hba for which this call is being executed.
313  *
314  * This routine is called when there is resource error in driver or firmware.
315  * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
316  * posts at most 1 event each second. This routine wakes up worker thread of
317  * @phba to process WORKER_RAM_DOWN_EVENT event.
318  *
319  * This routine should be called with no lock held.
320  **/
321 void
322 lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
323 {
324 	unsigned long flags;
325 	uint32_t evt_posted;
326 
327 	spin_lock_irqsave(&phba->hbalock, flags);
328 	atomic_inc(&phba->num_rsrc_err);
329 	phba->last_rsrc_error_time = jiffies;
330 
331 	if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
332 		spin_unlock_irqrestore(&phba->hbalock, flags);
333 		return;
334 	}
335 
336 	phba->last_ramp_down_time = jiffies;
337 
338 	spin_unlock_irqrestore(&phba->hbalock, flags);
339 
340 	spin_lock_irqsave(&phba->pport->work_port_lock, flags);
341 	evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
342 	if (!evt_posted)
343 		phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
344 	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
345 
346 	if (!evt_posted)
347 		lpfc_worker_wake_up(phba);
348 	return;
349 }
350 
351 /**
352  * lpfc_rampup_queue_depth - Post RAMP_UP_QUEUE event for worker thread
353  * @phba: The Hba for which this call is being executed.
354  *
355  * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine
356  * post at most 1 event every 5 minute after last_ramp_up_time or
357  * last_rsrc_error_time.  This routine wakes up worker thread of @phba
358  * to process WORKER_RAM_DOWN_EVENT event.
359  *
360  * This routine should be called with no lock held.
361  **/
362 static inline void
363 lpfc_rampup_queue_depth(struct lpfc_vport  *vport,
364 			uint32_t queue_depth)
365 {
366 	unsigned long flags;
367 	struct lpfc_hba *phba = vport->phba;
368 	uint32_t evt_posted;
369 	atomic_inc(&phba->num_cmd_success);
370 
371 	if (vport->cfg_lun_queue_depth <= queue_depth)
372 		return;
373 	spin_lock_irqsave(&phba->hbalock, flags);
374 	if (time_before(jiffies,
375 			phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) ||
376 	    time_before(jiffies,
377 			phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL)) {
378 		spin_unlock_irqrestore(&phba->hbalock, flags);
379 		return;
380 	}
381 	phba->last_ramp_up_time = jiffies;
382 	spin_unlock_irqrestore(&phba->hbalock, flags);
383 
384 	spin_lock_irqsave(&phba->pport->work_port_lock, flags);
385 	evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
386 	if (!evt_posted)
387 		phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
388 	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
389 
390 	if (!evt_posted)
391 		lpfc_worker_wake_up(phba);
392 	return;
393 }
394 
395 /**
396  * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
397  * @phba: The Hba for which this call is being executed.
398  *
399  * This routine is called to  process WORKER_RAMP_DOWN_QUEUE event for worker
400  * thread.This routine reduces queue depth for all scsi device on each vport
401  * associated with @phba.
402  **/
403 void
404 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
405 {
406 	struct lpfc_vport **vports;
407 	struct Scsi_Host  *shost;
408 	struct scsi_device *sdev;
409 	unsigned long new_queue_depth;
410 	unsigned long num_rsrc_err, num_cmd_success;
411 	int i;
412 
413 	num_rsrc_err = atomic_read(&phba->num_rsrc_err);
414 	num_cmd_success = atomic_read(&phba->num_cmd_success);
415 
416 	/*
417 	 * The error and success command counters are global per
418 	 * driver instance.  If another handler has already
419 	 * operated on this error event, just exit.
420 	 */
421 	if (num_rsrc_err == 0)
422 		return;
423 
424 	vports = lpfc_create_vport_work_array(phba);
425 	if (vports != NULL)
426 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
427 			shost = lpfc_shost_from_vport(vports[i]);
428 			shost_for_each_device(sdev, shost) {
429 				new_queue_depth =
430 					sdev->queue_depth * num_rsrc_err /
431 					(num_rsrc_err + num_cmd_success);
432 				if (!new_queue_depth)
433 					new_queue_depth = sdev->queue_depth - 1;
434 				else
435 					new_queue_depth = sdev->queue_depth -
436 								new_queue_depth;
437 				lpfc_change_queue_depth(sdev, new_queue_depth,
438 							SCSI_QDEPTH_DEFAULT);
439 			}
440 		}
441 	lpfc_destroy_vport_work_array(phba, vports);
442 	atomic_set(&phba->num_rsrc_err, 0);
443 	atomic_set(&phba->num_cmd_success, 0);
444 }
445 
446 /**
447  * lpfc_ramp_up_queue_handler - WORKER_RAMP_UP_QUEUE event handler
448  * @phba: The Hba for which this call is being executed.
449  *
450  * This routine is called to  process WORKER_RAMP_UP_QUEUE event for worker
451  * thread.This routine increases queue depth for all scsi device on each vport
452  * associated with @phba by 1. This routine also sets @phba num_rsrc_err and
453  * num_cmd_success to zero.
454  **/
455 void
456 lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
457 {
458 	struct lpfc_vport **vports;
459 	struct Scsi_Host  *shost;
460 	struct scsi_device *sdev;
461 	int i;
462 
463 	vports = lpfc_create_vport_work_array(phba);
464 	if (vports != NULL)
465 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
466 			shost = lpfc_shost_from_vport(vports[i]);
467 			shost_for_each_device(sdev, shost) {
468 				if (vports[i]->cfg_lun_queue_depth <=
469 				    sdev->queue_depth)
470 					continue;
471 				lpfc_change_queue_depth(sdev,
472 							sdev->queue_depth+1,
473 							SCSI_QDEPTH_RAMP_UP);
474 			}
475 		}
476 	lpfc_destroy_vport_work_array(phba, vports);
477 	atomic_set(&phba->num_rsrc_err, 0);
478 	atomic_set(&phba->num_cmd_success, 0);
479 }
480 
481 /**
482  * lpfc_scsi_dev_block - set all scsi hosts to block state
483  * @phba: Pointer to HBA context object.
484  *
485  * This function walks vport list and set each SCSI host to block state
486  * by invoking fc_remote_port_delete() routine. This function is invoked
487  * with EEH when device's PCI slot has been permanently disabled.
488  **/
489 void
490 lpfc_scsi_dev_block(struct lpfc_hba *phba)
491 {
492 	struct lpfc_vport **vports;
493 	struct Scsi_Host  *shost;
494 	struct scsi_device *sdev;
495 	struct fc_rport *rport;
496 	int i;
497 
498 	vports = lpfc_create_vport_work_array(phba);
499 	if (vports != NULL)
500 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
501 			shost = lpfc_shost_from_vport(vports[i]);
502 			shost_for_each_device(sdev, shost) {
503 				rport = starget_to_rport(scsi_target(sdev));
504 				fc_remote_port_delete(rport);
505 			}
506 		}
507 	lpfc_destroy_vport_work_array(phba, vports);
508 }
509 
510 /**
511  * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
512  * @vport: The virtual port for which this call being executed.
513  * @num_to_allocate: The requested number of buffers to allocate.
514  *
515  * This routine allocates a scsi buffer for device with SLI-3 interface spec,
516  * the scsi buffer contains all the necessary information needed to initiate
517  * a SCSI I/O. The non-DMAable buffer region contains information to build
518  * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
519  * and the initial BPL. In addition to allocating memory, the FCP CMND and
520  * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
521  *
522  * Return codes:
523  *   int - number of scsi buffers that were allocated.
524  *   0 = failure, less than num_to_alloc is a partial failure.
525  **/
526 static int
527 lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
528 {
529 	struct lpfc_hba *phba = vport->phba;
530 	struct lpfc_scsi_buf *psb;
531 	struct ulp_bde64 *bpl;
532 	IOCB_t *iocb;
533 	dma_addr_t pdma_phys_fcp_cmd;
534 	dma_addr_t pdma_phys_fcp_rsp;
535 	dma_addr_t pdma_phys_bpl;
536 	uint16_t iotag;
537 	int bcnt;
538 
539 	for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
540 		psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
541 		if (!psb)
542 			break;
543 
544 		/*
545 		 * Get memory from the pci pool to map the virt space to pci
546 		 * bus space for an I/O.  The DMA buffer includes space for the
547 		 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
548 		 * necessary to support the sg_tablesize.
549 		 */
550 		psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
551 					GFP_KERNEL, &psb->dma_handle);
552 		if (!psb->data) {
553 			kfree(psb);
554 			break;
555 		}
556 
557 		/* Initialize virtual ptrs to dma_buf region. */
558 		memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
559 
560 		/* Allocate iotag for psb->cur_iocbq. */
561 		iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
562 		if (iotag == 0) {
563 			pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
564 					psb->data, psb->dma_handle);
565 			kfree(psb);
566 			break;
567 		}
568 		psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
569 
570 		psb->fcp_cmnd = psb->data;
571 		psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
572 		psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
573 			sizeof(struct fcp_rsp);
574 
575 		/* Initialize local short-hand pointers. */
576 		bpl = psb->fcp_bpl;
577 		pdma_phys_fcp_cmd = psb->dma_handle;
578 		pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
579 		pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
580 			sizeof(struct fcp_rsp);
581 
582 		/*
583 		 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
584 		 * are sg list bdes.  Initialize the first two and leave the
585 		 * rest for queuecommand.
586 		 */
587 		bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
588 		bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
589 		bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
590 		bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
591 		bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
592 
593 		/* Setup the physical region for the FCP RSP */
594 		bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
595 		bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
596 		bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
597 		bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
598 		bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
599 
600 		/*
601 		 * Since the IOCB for the FCP I/O is built into this
602 		 * lpfc_scsi_buf, initialize it with all known data now.
603 		 */
604 		iocb = &psb->cur_iocbq.iocb;
605 		iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
606 		if ((phba->sli_rev == 3) &&
607 				!(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
608 			/* fill in immediate fcp command BDE */
609 			iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
610 			iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
611 			iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
612 					unsli3.fcp_ext.icd);
613 			iocb->un.fcpi64.bdl.addrHigh = 0;
614 			iocb->ulpBdeCount = 0;
615 			iocb->ulpLe = 0;
616 			/* fill in response BDE */
617 			iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
618 							BUFF_TYPE_BDE_64;
619 			iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
620 				sizeof(struct fcp_rsp);
621 			iocb->unsli3.fcp_ext.rbde.addrLow =
622 				putPaddrLow(pdma_phys_fcp_rsp);
623 			iocb->unsli3.fcp_ext.rbde.addrHigh =
624 				putPaddrHigh(pdma_phys_fcp_rsp);
625 		} else {
626 			iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
627 			iocb->un.fcpi64.bdl.bdeSize =
628 					(2 * sizeof(struct ulp_bde64));
629 			iocb->un.fcpi64.bdl.addrLow =
630 					putPaddrLow(pdma_phys_bpl);
631 			iocb->un.fcpi64.bdl.addrHigh =
632 					putPaddrHigh(pdma_phys_bpl);
633 			iocb->ulpBdeCount = 1;
634 			iocb->ulpLe = 1;
635 		}
636 		iocb->ulpClass = CLASS3;
637 		psb->status = IOSTAT_SUCCESS;
638 		/* Put it back into the SCSI buffer list */
639 		psb->cur_iocbq.context1  = psb;
640 		lpfc_release_scsi_buf_s3(phba, psb);
641 
642 	}
643 
644 	return bcnt;
645 }
646 
647 /**
648  * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport
649  * @vport: pointer to lpfc vport data structure.
650  *
651  * This routine is invoked by the vport cleanup for deletions and the cleanup
652  * for an ndlp on removal.
653  **/
654 void
655 lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
656 {
657 	struct lpfc_hba *phba = vport->phba;
658 	struct lpfc_scsi_buf *psb, *next_psb;
659 	unsigned long iflag = 0;
660 
661 	spin_lock_irqsave(&phba->hbalock, iflag);
662 	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
663 	list_for_each_entry_safe(psb, next_psb,
664 				&phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
665 		if (psb->rdata && psb->rdata->pnode
666 			&& psb->rdata->pnode->vport == vport)
667 			psb->rdata = NULL;
668 	}
669 	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
670 	spin_unlock_irqrestore(&phba->hbalock, iflag);
671 }
672 
673 /**
674  * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
675  * @phba: pointer to lpfc hba data structure.
676  * @axri: pointer to the fcp xri abort wcqe structure.
677  *
678  * This routine is invoked by the worker thread to process a SLI4 fast-path
679  * FCP aborted xri.
680  **/
681 void
682 lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
683 			  struct sli4_wcqe_xri_aborted *axri)
684 {
685 	uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
686 	uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
687 	struct lpfc_scsi_buf *psb, *next_psb;
688 	unsigned long iflag = 0;
689 	struct lpfc_iocbq *iocbq;
690 	int i;
691 	struct lpfc_nodelist *ndlp;
692 	int rrq_empty = 0;
693 	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
694 
695 	spin_lock_irqsave(&phba->hbalock, iflag);
696 	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
697 	list_for_each_entry_safe(psb, next_psb,
698 		&phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
699 		if (psb->cur_iocbq.sli4_xritag == xri) {
700 			list_del(&psb->list);
701 			psb->exch_busy = 0;
702 			psb->status = IOSTAT_SUCCESS;
703 			spin_unlock(
704 				&phba->sli4_hba.abts_scsi_buf_list_lock);
705 			if (psb->rdata && psb->rdata->pnode)
706 				ndlp = psb->rdata->pnode;
707 			else
708 				ndlp = NULL;
709 
710 			rrq_empty = list_empty(&phba->active_rrq_list);
711 			spin_unlock_irqrestore(&phba->hbalock, iflag);
712 			if (ndlp) {
713 				lpfc_set_rrq_active(phba, ndlp,
714 					psb->cur_iocbq.sli4_lxritag, rxid, 1);
715 				lpfc_sli4_abts_err_handler(phba, ndlp, axri);
716 			}
717 			lpfc_release_scsi_buf_s4(phba, psb);
718 			if (rrq_empty)
719 				lpfc_worker_wake_up(phba);
720 			return;
721 		}
722 	}
723 	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
724 	for (i = 1; i <= phba->sli.last_iotag; i++) {
725 		iocbq = phba->sli.iocbq_lookup[i];
726 
727 		if (!(iocbq->iocb_flag &  LPFC_IO_FCP) ||
728 			(iocbq->iocb_flag & LPFC_IO_LIBDFC))
729 			continue;
730 		if (iocbq->sli4_xritag != xri)
731 			continue;
732 		psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
733 		psb->exch_busy = 0;
734 		spin_unlock_irqrestore(&phba->hbalock, iflag);
735 		if (pring->txq_cnt)
736 			lpfc_worker_wake_up(phba);
737 		return;
738 
739 	}
740 	spin_unlock_irqrestore(&phba->hbalock, iflag);
741 }
742 
743 /**
744  * lpfc_sli4_post_scsi_sgl_list - Psot blocks of scsi buffer sgls from a list
745  * @phba: pointer to lpfc hba data structure.
746  * @post_sblist: pointer to the scsi buffer list.
747  *
748  * This routine walks a list of scsi buffers that was passed in. It attempts
749  * to construct blocks of scsi buffer sgls which contains contiguous xris and
750  * uses the non-embedded SGL block post mailbox commands to post to the port.
751  * For single SCSI buffer sgl with non-contiguous xri, if any, it shall use
752  * embedded SGL post mailbox command for posting. The @post_sblist passed in
753  * must be local list, thus no lock is needed when manipulate the list.
754  *
755  * Returns: 0 = failure, non-zero number of successfully posted buffers.
756  **/
757 int
758 lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
759 			     struct list_head *post_sblist, int sb_count)
760 {
761 	struct lpfc_scsi_buf *psb, *psb_next;
762 	int status;
763 	int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
764 	dma_addr_t pdma_phys_bpl1;
765 	int last_xritag = NO_XRI;
766 	LIST_HEAD(prep_sblist);
767 	LIST_HEAD(blck_sblist);
768 	LIST_HEAD(scsi_sblist);
769 
770 	/* sanity check */
771 	if (sb_count <= 0)
772 		return -EINVAL;
773 
774 	list_for_each_entry_safe(psb, psb_next, post_sblist, list) {
775 		list_del_init(&psb->list);
776 		block_cnt++;
777 		if ((last_xritag != NO_XRI) &&
778 		    (psb->cur_iocbq.sli4_xritag != last_xritag + 1)) {
779 			/* a hole in xri block, form a sgl posting block */
780 			list_splice_init(&prep_sblist, &blck_sblist);
781 			post_cnt = block_cnt - 1;
782 			/* prepare list for next posting block */
783 			list_add_tail(&psb->list, &prep_sblist);
784 			block_cnt = 1;
785 		} else {
786 			/* prepare list for next posting block */
787 			list_add_tail(&psb->list, &prep_sblist);
788 			/* enough sgls for non-embed sgl mbox command */
789 			if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
790 				list_splice_init(&prep_sblist, &blck_sblist);
791 				post_cnt = block_cnt;
792 				block_cnt = 0;
793 			}
794 		}
795 		num_posting++;
796 		last_xritag = psb->cur_iocbq.sli4_xritag;
797 
798 		/* end of repost sgl list condition for SCSI buffers */
799 		if (num_posting == sb_count) {
800 			if (post_cnt == 0) {
801 				/* last sgl posting block */
802 				list_splice_init(&prep_sblist, &blck_sblist);
803 				post_cnt = block_cnt;
804 			} else if (block_cnt == 1) {
805 				/* last single sgl with non-contiguous xri */
806 				if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
807 					pdma_phys_bpl1 = psb->dma_phys_bpl +
808 								SGL_PAGE_SIZE;
809 				else
810 					pdma_phys_bpl1 = 0;
811 				status = lpfc_sli4_post_sgl(phba,
812 						psb->dma_phys_bpl,
813 						pdma_phys_bpl1,
814 						psb->cur_iocbq.sli4_xritag);
815 				if (status) {
816 					/* failure, put on abort scsi list */
817 					psb->exch_busy = 1;
818 				} else {
819 					/* success, put on SCSI buffer list */
820 					psb->exch_busy = 0;
821 					psb->status = IOSTAT_SUCCESS;
822 					num_posted++;
823 				}
824 				/* success, put on SCSI buffer sgl list */
825 				list_add_tail(&psb->list, &scsi_sblist);
826 			}
827 		}
828 
829 		/* continue until a nembed page worth of sgls */
830 		if (post_cnt == 0)
831 			continue;
832 
833 		/* post block of SCSI buffer list sgls */
834 		status = lpfc_sli4_post_scsi_sgl_block(phba, &blck_sblist,
835 						       post_cnt);
836 
837 		/* don't reset xirtag due to hole in xri block */
838 		if (block_cnt == 0)
839 			last_xritag = NO_XRI;
840 
841 		/* reset SCSI buffer post count for next round of posting */
842 		post_cnt = 0;
843 
844 		/* put posted SCSI buffer-sgl posted on SCSI buffer sgl list */
845 		while (!list_empty(&blck_sblist)) {
846 			list_remove_head(&blck_sblist, psb,
847 					 struct lpfc_scsi_buf, list);
848 			if (status) {
849 				/* failure, put on abort scsi list */
850 				psb->exch_busy = 1;
851 			} else {
852 				/* success, put on SCSI buffer list */
853 				psb->exch_busy = 0;
854 				psb->status = IOSTAT_SUCCESS;
855 				num_posted++;
856 			}
857 			list_add_tail(&psb->list, &scsi_sblist);
858 		}
859 	}
860 	/* Push SCSI buffers with sgl posted to the availble list */
861 	while (!list_empty(&scsi_sblist)) {
862 		list_remove_head(&scsi_sblist, psb,
863 				 struct lpfc_scsi_buf, list);
864 		lpfc_release_scsi_buf_s4(phba, psb);
865 	}
866 	return num_posted;
867 }
868 
869 /**
870  * lpfc_sli4_repost_scsi_sgl_list - Repsot all the allocated scsi buffer sgls
871  * @phba: pointer to lpfc hba data structure.
872  *
873  * This routine walks the list of scsi buffers that have been allocated and
874  * repost them to the port by using SGL block post. This is needed after a
875  * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
876  * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
877  * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
878  *
879  * Returns: 0 = success, non-zero failure.
880  **/
881 int
882 lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
883 {
884 	LIST_HEAD(post_sblist);
885 	int num_posted, rc = 0;
886 
887 	/* get all SCSI buffers need to repost to a local list */
888 	spin_lock(&phba->scsi_buf_list_lock);
889 	list_splice_init(&phba->lpfc_scsi_buf_list, &post_sblist);
890 	spin_unlock(&phba->scsi_buf_list_lock);
891 
892 	/* post the list of scsi buffer sgls to port if available */
893 	if (!list_empty(&post_sblist)) {
894 		num_posted = lpfc_sli4_post_scsi_sgl_list(phba, &post_sblist,
895 						phba->sli4_hba.scsi_xri_cnt);
896 		/* failed to post any scsi buffer, return error */
897 		if (num_posted == 0)
898 			rc = -EIO;
899 	}
900 	return rc;
901 }
902 
903 /**
904  * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec
905  * @vport: The virtual port for which this call being executed.
906  * @num_to_allocate: The requested number of buffers to allocate.
907  *
908  * This routine allocates scsi buffers for device with SLI-4 interface spec,
909  * the scsi buffer contains all the necessary information needed to initiate
910  * a SCSI I/O. After allocating up to @num_to_allocate SCSI buffers and put
911  * them on a list, it post them to the port by using SGL block post.
912  *
913  * Return codes:
914  *   int - number of scsi buffers that were allocated and posted.
915  *   0 = failure, less than num_to_alloc is a partial failure.
916  **/
917 static int
918 lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
919 {
920 	struct lpfc_hba *phba = vport->phba;
921 	struct lpfc_scsi_buf *psb;
922 	struct sli4_sge *sgl;
923 	IOCB_t *iocb;
924 	dma_addr_t pdma_phys_fcp_cmd;
925 	dma_addr_t pdma_phys_fcp_rsp;
926 	dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
927 	uint16_t iotag, lxri = 0;
928 	int bcnt, num_posted;
929 	LIST_HEAD(prep_sblist);
930 	LIST_HEAD(post_sblist);
931 	LIST_HEAD(scsi_sblist);
932 
933 	for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
934 		psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
935 		if (!psb)
936 			break;
937 		/*
938 		 * Get memory from the pci pool to map the virt space to
939 		 * pci bus space for an I/O. The DMA buffer includes space
940 		 * for the struct fcp_cmnd, struct fcp_rsp and the number
941 		 * of bde's necessary to support the sg_tablesize.
942 		 */
943 		psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
944 						GFP_KERNEL, &psb->dma_handle);
945 		if (!psb->data) {
946 			kfree(psb);
947 			break;
948 		}
949 		memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
950 
951 		/* Allocate iotag for psb->cur_iocbq. */
952 		iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
953 		if (iotag == 0) {
954 			pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
955 				psb->data, psb->dma_handle);
956 			kfree(psb);
957 			break;
958 		}
959 
960 		lxri = lpfc_sli4_next_xritag(phba);
961 		if (lxri == NO_XRI) {
962 			pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
963 			      psb->data, psb->dma_handle);
964 			kfree(psb);
965 			break;
966 		}
967 		psb->cur_iocbq.sli4_lxritag = lxri;
968 		psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
969 		psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
970 		psb->fcp_bpl = psb->data;
971 		psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size)
972 			- (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
973 		psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
974 					sizeof(struct fcp_cmnd));
975 
976 		/* Initialize local short-hand pointers. */
977 		sgl = (struct sli4_sge *)psb->fcp_bpl;
978 		pdma_phys_bpl = psb->dma_handle;
979 		pdma_phys_fcp_cmd =
980 			(psb->dma_handle + phba->cfg_sg_dma_buf_size)
981 			 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
982 		pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
983 
984 		/*
985 		 * The first two bdes are the FCP_CMD and FCP_RSP.
986 		 * The balance are sg list bdes. Initialize the
987 		 * first two and leave the rest for queuecommand.
988 		 */
989 		sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
990 		sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
991 		sgl->word2 = le32_to_cpu(sgl->word2);
992 		bf_set(lpfc_sli4_sge_last, sgl, 0);
993 		sgl->word2 = cpu_to_le32(sgl->word2);
994 		sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
995 		sgl++;
996 
997 		/* Setup the physical region for the FCP RSP */
998 		sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
999 		sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
1000 		sgl->word2 = le32_to_cpu(sgl->word2);
1001 		bf_set(lpfc_sli4_sge_last, sgl, 1);
1002 		sgl->word2 = cpu_to_le32(sgl->word2);
1003 		sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
1004 
1005 		/*
1006 		 * Since the IOCB for the FCP I/O is built into this
1007 		 * lpfc_scsi_buf, initialize it with all known data now.
1008 		 */
1009 		iocb = &psb->cur_iocbq.iocb;
1010 		iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
1011 		iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
1012 		/* setting the BLP size to 2 * sizeof BDE may not be correct.
1013 		 * We are setting the bpl to point to out sgl. An sgl's
1014 		 * entries are 16 bytes, a bpl entries are 12 bytes.
1015 		 */
1016 		iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
1017 		iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
1018 		iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
1019 		iocb->ulpBdeCount = 1;
1020 		iocb->ulpLe = 1;
1021 		iocb->ulpClass = CLASS3;
1022 		psb->cur_iocbq.context1 = psb;
1023 		if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
1024 			pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE;
1025 		else
1026 			pdma_phys_bpl1 = 0;
1027 		psb->dma_phys_bpl = pdma_phys_bpl;
1028 
1029 		/* add the scsi buffer to a post list */
1030 		list_add_tail(&psb->list, &post_sblist);
1031 		spin_lock_irq(&phba->scsi_buf_list_lock);
1032 		phba->sli4_hba.scsi_xri_cnt++;
1033 		spin_unlock_irq(&phba->scsi_buf_list_lock);
1034 	}
1035 	lpfc_printf_log(phba, KERN_INFO, LOG_BG,
1036 			"3021 Allocate %d out of %d requested new SCSI "
1037 			"buffers\n", bcnt, num_to_alloc);
1038 
1039 	/* post the list of scsi buffer sgls to port if available */
1040 	if (!list_empty(&post_sblist))
1041 		num_posted = lpfc_sli4_post_scsi_sgl_list(phba,
1042 							  &post_sblist, bcnt);
1043 	else
1044 		num_posted = 0;
1045 
1046 	return num_posted;
1047 }
1048 
1049 /**
1050  * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator
1051  * @vport: The virtual port for which this call being executed.
1052  * @num_to_allocate: The requested number of buffers to allocate.
1053  *
1054  * This routine wraps the actual SCSI buffer allocator function pointer from
1055  * the lpfc_hba struct.
1056  *
1057  * Return codes:
1058  *   int - number of scsi buffers that were allocated.
1059  *   0 = failure, less than num_to_alloc is a partial failure.
1060  **/
1061 static inline int
1062 lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
1063 {
1064 	return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc);
1065 }
1066 
1067 /**
1068  * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
1069  * @phba: The HBA for which this call is being executed.
1070  *
1071  * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
1072  * and returns to caller.
1073  *
1074  * Return codes:
1075  *   NULL - Error
1076  *   Pointer to lpfc_scsi_buf - Success
1077  **/
1078 static struct lpfc_scsi_buf*
1079 lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1080 {
1081 	struct  lpfc_scsi_buf * lpfc_cmd = NULL;
1082 	struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
1083 	unsigned long iflag = 0;
1084 
1085 	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
1086 	list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
1087 	if (lpfc_cmd) {
1088 		lpfc_cmd->seg_cnt = 0;
1089 		lpfc_cmd->nonsg_phys = 0;
1090 		lpfc_cmd->prot_seg_cnt = 0;
1091 	}
1092 	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
1093 	return  lpfc_cmd;
1094 }
1095 /**
1096  * lpfc_get_scsi_buf_s4 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
1097  * @phba: The HBA for which this call is being executed.
1098  *
1099  * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
1100  * and returns to caller.
1101  *
1102  * Return codes:
1103  *   NULL - Error
1104  *   Pointer to lpfc_scsi_buf - Success
1105  **/
1106 static struct lpfc_scsi_buf*
1107 lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1108 {
1109 	struct lpfc_scsi_buf *lpfc_cmd ;
1110 	unsigned long iflag = 0;
1111 	int found = 0;
1112 
1113 	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
1114 	list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list,
1115 							list) {
1116 		if (lpfc_test_rrq_active(phba, ndlp,
1117 					 lpfc_cmd->cur_iocbq.sli4_lxritag))
1118 			continue;
1119 		list_del(&lpfc_cmd->list);
1120 		found = 1;
1121 		lpfc_cmd->seg_cnt = 0;
1122 		lpfc_cmd->nonsg_phys = 0;
1123 		lpfc_cmd->prot_seg_cnt = 0;
1124 		break;
1125 	}
1126 	spin_unlock_irqrestore(&phba->scsi_buf_list_lock,
1127 						 iflag);
1128 	if (!found)
1129 		return NULL;
1130 	else
1131 		return  lpfc_cmd;
1132 }
1133 /**
1134  * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
1135  * @phba: The HBA for which this call is being executed.
1136  *
1137  * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
1138  * and returns to caller.
1139  *
1140  * Return codes:
1141  *   NULL - Error
1142  *   Pointer to lpfc_scsi_buf - Success
1143  **/
1144 static struct lpfc_scsi_buf*
1145 lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1146 {
1147 	return  phba->lpfc_get_scsi_buf(phba, ndlp);
1148 }
1149 
1150 /**
1151  * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
1152  * @phba: The Hba for which this call is being executed.
1153  * @psb: The scsi buffer which is being released.
1154  *
1155  * This routine releases @psb scsi buffer by adding it to tail of @phba
1156  * lpfc_scsi_buf_list list.
1157  **/
1158 static void
1159 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1160 {
1161 	unsigned long iflag = 0;
1162 
1163 	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
1164 	psb->pCmd = NULL;
1165 	list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
1166 	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
1167 }
1168 
1169 /**
1170  * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
1171  * @phba: The Hba for which this call is being executed.
1172  * @psb: The scsi buffer which is being released.
1173  *
1174  * This routine releases @psb scsi buffer by adding it to tail of @phba
1175  * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer
1176  * and cannot be reused for at least RA_TOV amount of time if it was
1177  * aborted.
1178  **/
1179 static void
1180 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1181 {
1182 	unsigned long iflag = 0;
1183 
1184 	if (psb->exch_busy) {
1185 		spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
1186 					iflag);
1187 		psb->pCmd = NULL;
1188 		list_add_tail(&psb->list,
1189 			&phba->sli4_hba.lpfc_abts_scsi_buf_list);
1190 		spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
1191 					iflag);
1192 	} else {
1193 
1194 		spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
1195 		psb->pCmd = NULL;
1196 		list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
1197 		spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
1198 	}
1199 }
1200 
1201 /**
1202  * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
1203  * @phba: The Hba for which this call is being executed.
1204  * @psb: The scsi buffer which is being released.
1205  *
1206  * This routine releases @psb scsi buffer by adding it to tail of @phba
1207  * lpfc_scsi_buf_list list.
1208  **/
1209 static void
1210 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1211 {
1212 
1213 	phba->lpfc_release_scsi_buf(phba, psb);
1214 }
1215 
1216 /**
1217  * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
1218  * @phba: The Hba for which this call is being executed.
1219  * @lpfc_cmd: The scsi buffer which is going to be mapped.
1220  *
1221  * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
1222  * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
1223  * through sg elements and format the bdea. This routine also initializes all
1224  * IOCB fields which are dependent on scsi command request buffer.
1225  *
1226  * Return codes:
1227  *   1 - Error
1228  *   0 - Success
1229  **/
1230 static int
1231 lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1232 {
1233 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1234 	struct scatterlist *sgel = NULL;
1235 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1236 	struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
1237 	struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
1238 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1239 	struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
1240 	dma_addr_t physaddr;
1241 	uint32_t num_bde = 0;
1242 	int nseg, datadir = scsi_cmnd->sc_data_direction;
1243 
1244 	/*
1245 	 * There are three possibilities here - use scatter-gather segment, use
1246 	 * the single mapping, or neither.  Start the lpfc command prep by
1247 	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
1248 	 * data bde entry.
1249 	 */
1250 	bpl += 2;
1251 	if (scsi_sg_count(scsi_cmnd)) {
1252 		/*
1253 		 * The driver stores the segment count returned from pci_map_sg
1254 		 * because this a count of dma-mappings used to map the use_sg
1255 		 * pages.  They are not guaranteed to be the same for those
1256 		 * architectures that implement an IOMMU.
1257 		 */
1258 
1259 		nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
1260 				  scsi_sg_count(scsi_cmnd), datadir);
1261 		if (unlikely(!nseg))
1262 			return 1;
1263 
1264 		lpfc_cmd->seg_cnt = nseg;
1265 		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1266 			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1267 				"9064 BLKGRD: %s: Too many sg segments from "
1268 			       "dma_map_sg.  Config %d, seg_cnt %d\n",
1269 			       __func__, phba->cfg_sg_seg_cnt,
1270 			       lpfc_cmd->seg_cnt);
1271 			scsi_dma_unmap(scsi_cmnd);
1272 			return 1;
1273 		}
1274 
1275 		/*
1276 		 * The driver established a maximum scatter-gather segment count
1277 		 * during probe that limits the number of sg elements in any
1278 		 * single scsi command.  Just run through the seg_cnt and format
1279 		 * the bde's.
1280 		 * When using SLI-3 the driver will try to fit all the BDEs into
1281 		 * the IOCB. If it can't then the BDEs get added to a BPL as it
1282 		 * does for SLI-2 mode.
1283 		 */
1284 		scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
1285 			physaddr = sg_dma_address(sgel);
1286 			if (phba->sli_rev == 3 &&
1287 			    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1288 			    !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
1289 			    nseg <= LPFC_EXT_DATA_BDE_COUNT) {
1290 				data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1291 				data_bde->tus.f.bdeSize = sg_dma_len(sgel);
1292 				data_bde->addrLow = putPaddrLow(physaddr);
1293 				data_bde->addrHigh = putPaddrHigh(physaddr);
1294 				data_bde++;
1295 			} else {
1296 				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1297 				bpl->tus.f.bdeSize = sg_dma_len(sgel);
1298 				bpl->tus.w = le32_to_cpu(bpl->tus.w);
1299 				bpl->addrLow =
1300 					le32_to_cpu(putPaddrLow(physaddr));
1301 				bpl->addrHigh =
1302 					le32_to_cpu(putPaddrHigh(physaddr));
1303 				bpl++;
1304 			}
1305 		}
1306 	}
1307 
1308 	/*
1309 	 * Finish initializing those IOCB fields that are dependent on the
1310 	 * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
1311 	 * explicitly reinitialized and for SLI-3 the extended bde count is
1312 	 * explicitly reinitialized since all iocb memory resources are reused.
1313 	 */
1314 	if (phba->sli_rev == 3 &&
1315 	    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1316 	    !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
1317 		if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
1318 			/*
1319 			 * The extended IOCB format can only fit 3 BDE or a BPL.
1320 			 * This I/O has more than 3 BDE so the 1st data bde will
1321 			 * be a BPL that is filled in here.
1322 			 */
1323 			physaddr = lpfc_cmd->dma_handle;
1324 			data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
1325 			data_bde->tus.f.bdeSize = (num_bde *
1326 						   sizeof(struct ulp_bde64));
1327 			physaddr += (sizeof(struct fcp_cmnd) +
1328 				     sizeof(struct fcp_rsp) +
1329 				     (2 * sizeof(struct ulp_bde64)));
1330 			data_bde->addrHigh = putPaddrHigh(physaddr);
1331 			data_bde->addrLow = putPaddrLow(physaddr);
1332 			/* ebde count includes the response bde and data bpl */
1333 			iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
1334 		} else {
1335 			/* ebde count includes the response bde and data bdes */
1336 			iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
1337 		}
1338 	} else {
1339 		iocb_cmd->un.fcpi64.bdl.bdeSize =
1340 			((num_bde + 2) * sizeof(struct ulp_bde64));
1341 		iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
1342 	}
1343 	fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
1344 
1345 	/*
1346 	 * Due to difference in data length between DIF/non-DIF paths,
1347 	 * we need to set word 4 of IOCB here
1348 	 */
1349 	iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1350 	return 0;
1351 }
1352 
1353 static inline unsigned
1354 lpfc_cmd_blksize(struct scsi_cmnd *sc)
1355 {
1356 	return sc->device->sector_size;
1357 }
1358 
1359 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1360 
1361 /* Return if if error injection is detected by Initiator */
1362 #define BG_ERR_INIT	0x1
1363 /* Return if if error injection is detected by Target */
1364 #define BG_ERR_TGT	0x2
1365 /* Return if if swapping CSUM<-->CRC is required for error injection */
1366 #define BG_ERR_SWAP	0x10
1367 /* Return if disabling Guard/Ref/App checking is required for error injection */
1368 #define BG_ERR_CHECK	0x20
1369 
1370 /**
1371  * lpfc_bg_err_inject - Determine if we should inject an error
1372  * @phba: The Hba for which this call is being executed.
1373  * @sc: The SCSI command to examine
1374  * @reftag: (out) BlockGuard reference tag for transmitted data
1375  * @apptag: (out) BlockGuard application tag for transmitted data
1376  * @new_guard (in) Value to replace CRC with if needed
1377  *
1378  * Returns BG_ERR_* bit mask or 0 if request ignored
1379  **/
1380 static int
1381 lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1382 		uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
1383 {
1384 	struct scatterlist *sgpe; /* s/g prot entry */
1385 	struct scatterlist *sgde; /* s/g data entry */
1386 	struct lpfc_scsi_buf *lpfc_cmd = NULL;
1387 	struct scsi_dif_tuple *src = NULL;
1388 	struct lpfc_nodelist *ndlp;
1389 	struct lpfc_rport_data *rdata;
1390 	uint32_t op = scsi_get_prot_op(sc);
1391 	uint32_t blksize;
1392 	uint32_t numblks;
1393 	sector_t lba;
1394 	int rc = 0;
1395 	int blockoff = 0;
1396 
1397 	if (op == SCSI_PROT_NORMAL)
1398 		return 0;
1399 
1400 	sgpe = scsi_prot_sglist(sc);
1401 	sgde = scsi_sglist(sc);
1402 	lba = scsi_get_lba(sc);
1403 
1404 	/* First check if we need to match the LBA */
1405 	if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
1406 		blksize = lpfc_cmd_blksize(sc);
1407 		numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
1408 
1409 		/* Make sure we have the right LBA if one is specified */
1410 		if ((phba->lpfc_injerr_lba < lba) ||
1411 			(phba->lpfc_injerr_lba >= (lba + numblks)))
1412 			return 0;
1413 		if (sgpe) {
1414 			blockoff = phba->lpfc_injerr_lba - lba;
1415 			numblks = sg_dma_len(sgpe) /
1416 				sizeof(struct scsi_dif_tuple);
1417 			if (numblks < blockoff)
1418 				blockoff = numblks;
1419 		}
1420 	}
1421 
1422 	/* Next check if we need to match the remote NPortID or WWPN */
1423 	rdata = sc->device->hostdata;
1424 	if (rdata && rdata->pnode) {
1425 		ndlp = rdata->pnode;
1426 
1427 		/* Make sure we have the right NPortID if one is specified */
1428 		if (phba->lpfc_injerr_nportid  &&
1429 			(phba->lpfc_injerr_nportid != ndlp->nlp_DID))
1430 			return 0;
1431 
1432 		/*
1433 		 * Make sure we have the right WWPN if one is specified.
1434 		 * wwn[0] should be a non-zero NAA in a good WWPN.
1435 		 */
1436 		if (phba->lpfc_injerr_wwpn.u.wwn[0]  &&
1437 			(memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
1438 				sizeof(struct lpfc_name)) != 0))
1439 			return 0;
1440 	}
1441 
1442 	/* Setup a ptr to the protection data if the SCSI host provides it */
1443 	if (sgpe) {
1444 		src = (struct scsi_dif_tuple *)sg_virt(sgpe);
1445 		src += blockoff;
1446 		lpfc_cmd = (struct lpfc_scsi_buf *)sc->host_scribble;
1447 	}
1448 
1449 	/* Should we change the Reference Tag */
1450 	if (reftag) {
1451 		if (phba->lpfc_injerr_wref_cnt) {
1452 			switch (op) {
1453 			case SCSI_PROT_WRITE_PASS:
1454 				if (src) {
1455 					/*
1456 					 * For WRITE_PASS, force the error
1457 					 * to be sent on the wire. It should
1458 					 * be detected by the Target.
1459 					 * If blockoff != 0 error will be
1460 					 * inserted in middle of the IO.
1461 					 */
1462 
1463 					lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1464 					"9076 BLKGRD: Injecting reftag error: "
1465 					"write lba x%lx + x%x oldrefTag x%x\n",
1466 					(unsigned long)lba, blockoff,
1467 					be32_to_cpu(src->ref_tag));
1468 
1469 					/*
1470 					 * Save the old ref_tag so we can
1471 					 * restore it on completion.
1472 					 */
1473 					if (lpfc_cmd) {
1474 						lpfc_cmd->prot_data_type =
1475 							LPFC_INJERR_REFTAG;
1476 						lpfc_cmd->prot_data_segment =
1477 							src;
1478 						lpfc_cmd->prot_data =
1479 							src->ref_tag;
1480 					}
1481 					src->ref_tag = cpu_to_be32(0xDEADBEEF);
1482 					phba->lpfc_injerr_wref_cnt--;
1483 					if (phba->lpfc_injerr_wref_cnt == 0) {
1484 						phba->lpfc_injerr_nportid = 0;
1485 						phba->lpfc_injerr_lba =
1486 							LPFC_INJERR_LBA_OFF;
1487 						memset(&phba->lpfc_injerr_wwpn,
1488 						  0, sizeof(struct lpfc_name));
1489 					}
1490 					rc = BG_ERR_TGT | BG_ERR_CHECK;
1491 
1492 					break;
1493 				}
1494 				/* Drop thru */
1495 			case SCSI_PROT_WRITE_INSERT:
1496 				/*
1497 				 * For WRITE_INSERT, force the error
1498 				 * to be sent on the wire. It should be
1499 				 * detected by the Target.
1500 				 */
1501 				/* DEADBEEF will be the reftag on the wire */
1502 				*reftag = 0xDEADBEEF;
1503 				phba->lpfc_injerr_wref_cnt--;
1504 				if (phba->lpfc_injerr_wref_cnt == 0) {
1505 					phba->lpfc_injerr_nportid = 0;
1506 					phba->lpfc_injerr_lba =
1507 					LPFC_INJERR_LBA_OFF;
1508 					memset(&phba->lpfc_injerr_wwpn,
1509 						0, sizeof(struct lpfc_name));
1510 				}
1511 				rc = BG_ERR_TGT | BG_ERR_CHECK;
1512 
1513 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1514 					"9078 BLKGRD: Injecting reftag error: "
1515 					"write lba x%lx\n", (unsigned long)lba);
1516 				break;
1517 			case SCSI_PROT_WRITE_STRIP:
1518 				/*
1519 				 * For WRITE_STRIP and WRITE_PASS,
1520 				 * force the error on data
1521 				 * being copied from SLI-Host to SLI-Port.
1522 				 */
1523 				*reftag = 0xDEADBEEF;
1524 				phba->lpfc_injerr_wref_cnt--;
1525 				if (phba->lpfc_injerr_wref_cnt == 0) {
1526 					phba->lpfc_injerr_nportid = 0;
1527 					phba->lpfc_injerr_lba =
1528 						LPFC_INJERR_LBA_OFF;
1529 					memset(&phba->lpfc_injerr_wwpn,
1530 						0, sizeof(struct lpfc_name));
1531 				}
1532 				rc = BG_ERR_INIT;
1533 
1534 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1535 					"9077 BLKGRD: Injecting reftag error: "
1536 					"write lba x%lx\n", (unsigned long)lba);
1537 				break;
1538 			}
1539 		}
1540 		if (phba->lpfc_injerr_rref_cnt) {
1541 			switch (op) {
1542 			case SCSI_PROT_READ_INSERT:
1543 			case SCSI_PROT_READ_STRIP:
1544 			case SCSI_PROT_READ_PASS:
1545 				/*
1546 				 * For READ_STRIP and READ_PASS, force the
1547 				 * error on data being read off the wire. It
1548 				 * should force an IO error to the driver.
1549 				 */
1550 				*reftag = 0xDEADBEEF;
1551 				phba->lpfc_injerr_rref_cnt--;
1552 				if (phba->lpfc_injerr_rref_cnt == 0) {
1553 					phba->lpfc_injerr_nportid = 0;
1554 					phba->lpfc_injerr_lba =
1555 						LPFC_INJERR_LBA_OFF;
1556 					memset(&phba->lpfc_injerr_wwpn,
1557 						0, sizeof(struct lpfc_name));
1558 				}
1559 				rc = BG_ERR_INIT;
1560 
1561 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1562 					"9079 BLKGRD: Injecting reftag error: "
1563 					"read lba x%lx\n", (unsigned long)lba);
1564 				break;
1565 			}
1566 		}
1567 	}
1568 
1569 	/* Should we change the Application Tag */
1570 	if (apptag) {
1571 		if (phba->lpfc_injerr_wapp_cnt) {
1572 			switch (op) {
1573 			case SCSI_PROT_WRITE_PASS:
1574 				if (src) {
1575 					/*
1576 					 * For WRITE_PASS, force the error
1577 					 * to be sent on the wire. It should
1578 					 * be detected by the Target.
1579 					 * If blockoff != 0 error will be
1580 					 * inserted in middle of the IO.
1581 					 */
1582 
1583 					lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1584 					"9080 BLKGRD: Injecting apptag error: "
1585 					"write lba x%lx + x%x oldappTag x%x\n",
1586 					(unsigned long)lba, blockoff,
1587 					be16_to_cpu(src->app_tag));
1588 
1589 					/*
1590 					 * Save the old app_tag so we can
1591 					 * restore it on completion.
1592 					 */
1593 					if (lpfc_cmd) {
1594 						lpfc_cmd->prot_data_type =
1595 							LPFC_INJERR_APPTAG;
1596 						lpfc_cmd->prot_data_segment =
1597 							src;
1598 						lpfc_cmd->prot_data =
1599 							src->app_tag;
1600 					}
1601 					src->app_tag = cpu_to_be16(0xDEAD);
1602 					phba->lpfc_injerr_wapp_cnt--;
1603 					if (phba->lpfc_injerr_wapp_cnt == 0) {
1604 						phba->lpfc_injerr_nportid = 0;
1605 						phba->lpfc_injerr_lba =
1606 							LPFC_INJERR_LBA_OFF;
1607 						memset(&phba->lpfc_injerr_wwpn,
1608 						  0, sizeof(struct lpfc_name));
1609 					}
1610 					rc = BG_ERR_TGT | BG_ERR_CHECK;
1611 					break;
1612 				}
1613 				/* Drop thru */
1614 			case SCSI_PROT_WRITE_INSERT:
1615 				/*
1616 				 * For WRITE_INSERT, force the
1617 				 * error to be sent on the wire. It should be
1618 				 * detected by the Target.
1619 				 */
1620 				/* DEAD will be the apptag on the wire */
1621 				*apptag = 0xDEAD;
1622 				phba->lpfc_injerr_wapp_cnt--;
1623 				if (phba->lpfc_injerr_wapp_cnt == 0) {
1624 					phba->lpfc_injerr_nportid = 0;
1625 					phba->lpfc_injerr_lba =
1626 						LPFC_INJERR_LBA_OFF;
1627 					memset(&phba->lpfc_injerr_wwpn,
1628 						0, sizeof(struct lpfc_name));
1629 				}
1630 				rc = BG_ERR_TGT | BG_ERR_CHECK;
1631 
1632 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1633 					"0813 BLKGRD: Injecting apptag error: "
1634 					"write lba x%lx\n", (unsigned long)lba);
1635 				break;
1636 			case SCSI_PROT_WRITE_STRIP:
1637 				/*
1638 				 * For WRITE_STRIP and WRITE_PASS,
1639 				 * force the error on data
1640 				 * being copied from SLI-Host to SLI-Port.
1641 				 */
1642 				*apptag = 0xDEAD;
1643 				phba->lpfc_injerr_wapp_cnt--;
1644 				if (phba->lpfc_injerr_wapp_cnt == 0) {
1645 					phba->lpfc_injerr_nportid = 0;
1646 					phba->lpfc_injerr_lba =
1647 						LPFC_INJERR_LBA_OFF;
1648 					memset(&phba->lpfc_injerr_wwpn,
1649 						0, sizeof(struct lpfc_name));
1650 				}
1651 				rc = BG_ERR_INIT;
1652 
1653 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1654 					"0812 BLKGRD: Injecting apptag error: "
1655 					"write lba x%lx\n", (unsigned long)lba);
1656 				break;
1657 			}
1658 		}
1659 		if (phba->lpfc_injerr_rapp_cnt) {
1660 			switch (op) {
1661 			case SCSI_PROT_READ_INSERT:
1662 			case SCSI_PROT_READ_STRIP:
1663 			case SCSI_PROT_READ_PASS:
1664 				/*
1665 				 * For READ_STRIP and READ_PASS, force the
1666 				 * error on data being read off the wire. It
1667 				 * should force an IO error to the driver.
1668 				 */
1669 				*apptag = 0xDEAD;
1670 				phba->lpfc_injerr_rapp_cnt--;
1671 				if (phba->lpfc_injerr_rapp_cnt == 0) {
1672 					phba->lpfc_injerr_nportid = 0;
1673 					phba->lpfc_injerr_lba =
1674 						LPFC_INJERR_LBA_OFF;
1675 					memset(&phba->lpfc_injerr_wwpn,
1676 						0, sizeof(struct lpfc_name));
1677 				}
1678 				rc = BG_ERR_INIT;
1679 
1680 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1681 					"0814 BLKGRD: Injecting apptag error: "
1682 					"read lba x%lx\n", (unsigned long)lba);
1683 				break;
1684 			}
1685 		}
1686 	}
1687 
1688 
1689 	/* Should we change the Guard Tag */
1690 	if (new_guard) {
1691 		if (phba->lpfc_injerr_wgrd_cnt) {
1692 			switch (op) {
1693 			case SCSI_PROT_WRITE_PASS:
1694 				rc = BG_ERR_CHECK;
1695 				/* Drop thru */
1696 
1697 			case SCSI_PROT_WRITE_INSERT:
1698 				/*
1699 				 * For WRITE_INSERT, force the
1700 				 * error to be sent on the wire. It should be
1701 				 * detected by the Target.
1702 				 */
1703 				phba->lpfc_injerr_wgrd_cnt--;
1704 				if (phba->lpfc_injerr_wgrd_cnt == 0) {
1705 					phba->lpfc_injerr_nportid = 0;
1706 					phba->lpfc_injerr_lba =
1707 						LPFC_INJERR_LBA_OFF;
1708 					memset(&phba->lpfc_injerr_wwpn,
1709 						0, sizeof(struct lpfc_name));
1710 				}
1711 
1712 				rc |= BG_ERR_TGT | BG_ERR_SWAP;
1713 				/* Signals the caller to swap CRC->CSUM */
1714 
1715 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1716 					"0817 BLKGRD: Injecting guard error: "
1717 					"write lba x%lx\n", (unsigned long)lba);
1718 				break;
1719 			case SCSI_PROT_WRITE_STRIP:
1720 				/*
1721 				 * For WRITE_STRIP and WRITE_PASS,
1722 				 * force the error on data
1723 				 * being copied from SLI-Host to SLI-Port.
1724 				 */
1725 				phba->lpfc_injerr_wgrd_cnt--;
1726 				if (phba->lpfc_injerr_wgrd_cnt == 0) {
1727 					phba->lpfc_injerr_nportid = 0;
1728 					phba->lpfc_injerr_lba =
1729 						LPFC_INJERR_LBA_OFF;
1730 					memset(&phba->lpfc_injerr_wwpn,
1731 						0, sizeof(struct lpfc_name));
1732 				}
1733 
1734 				rc = BG_ERR_INIT | BG_ERR_SWAP;
1735 				/* Signals the caller to swap CRC->CSUM */
1736 
1737 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1738 					"0816 BLKGRD: Injecting guard error: "
1739 					"write lba x%lx\n", (unsigned long)lba);
1740 				break;
1741 			}
1742 		}
1743 		if (phba->lpfc_injerr_rgrd_cnt) {
1744 			switch (op) {
1745 			case SCSI_PROT_READ_INSERT:
1746 			case SCSI_PROT_READ_STRIP:
1747 			case SCSI_PROT_READ_PASS:
1748 				/*
1749 				 * For READ_STRIP and READ_PASS, force the
1750 				 * error on data being read off the wire. It
1751 				 * should force an IO error to the driver.
1752 				 */
1753 				phba->lpfc_injerr_rgrd_cnt--;
1754 				if (phba->lpfc_injerr_rgrd_cnt == 0) {
1755 					phba->lpfc_injerr_nportid = 0;
1756 					phba->lpfc_injerr_lba =
1757 						LPFC_INJERR_LBA_OFF;
1758 					memset(&phba->lpfc_injerr_wwpn,
1759 						0, sizeof(struct lpfc_name));
1760 				}
1761 
1762 				rc = BG_ERR_INIT | BG_ERR_SWAP;
1763 				/* Signals the caller to swap CRC->CSUM */
1764 
1765 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1766 					"0818 BLKGRD: Injecting guard error: "
1767 					"read lba x%lx\n", (unsigned long)lba);
1768 			}
1769 		}
1770 	}
1771 
1772 	return rc;
1773 }
1774 #endif
1775 
1776 /**
1777  * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with
1778  * the specified SCSI command.
1779  * @phba: The Hba for which this call is being executed.
1780  * @sc: The SCSI command to examine
1781  * @txopt: (out) BlockGuard operation for transmitted data
1782  * @rxopt: (out) BlockGuard operation for received data
1783  *
1784  * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1785  *
1786  **/
1787 static int
1788 lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1789 		uint8_t *txop, uint8_t *rxop)
1790 {
1791 	uint8_t guard_type = scsi_host_get_guard(sc->device->host);
1792 	uint8_t ret = 0;
1793 
1794 	if (guard_type == SHOST_DIX_GUARD_IP) {
1795 		switch (scsi_get_prot_op(sc)) {
1796 		case SCSI_PROT_READ_INSERT:
1797 		case SCSI_PROT_WRITE_STRIP:
1798 			*rxop = BG_OP_IN_NODIF_OUT_CSUM;
1799 			*txop = BG_OP_IN_CSUM_OUT_NODIF;
1800 			break;
1801 
1802 		case SCSI_PROT_READ_STRIP:
1803 		case SCSI_PROT_WRITE_INSERT:
1804 			*rxop = BG_OP_IN_CRC_OUT_NODIF;
1805 			*txop = BG_OP_IN_NODIF_OUT_CRC;
1806 			break;
1807 
1808 		case SCSI_PROT_READ_PASS:
1809 		case SCSI_PROT_WRITE_PASS:
1810 			*rxop = BG_OP_IN_CRC_OUT_CSUM;
1811 			*txop = BG_OP_IN_CSUM_OUT_CRC;
1812 			break;
1813 
1814 		case SCSI_PROT_NORMAL:
1815 		default:
1816 			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1817 				"9063 BLKGRD: Bad op/guard:%d/IP combination\n",
1818 					scsi_get_prot_op(sc));
1819 			ret = 1;
1820 			break;
1821 
1822 		}
1823 	} else {
1824 		switch (scsi_get_prot_op(sc)) {
1825 		case SCSI_PROT_READ_STRIP:
1826 		case SCSI_PROT_WRITE_INSERT:
1827 			*rxop = BG_OP_IN_CRC_OUT_NODIF;
1828 			*txop = BG_OP_IN_NODIF_OUT_CRC;
1829 			break;
1830 
1831 		case SCSI_PROT_READ_PASS:
1832 		case SCSI_PROT_WRITE_PASS:
1833 			*rxop = BG_OP_IN_CRC_OUT_CRC;
1834 			*txop = BG_OP_IN_CRC_OUT_CRC;
1835 			break;
1836 
1837 		case SCSI_PROT_READ_INSERT:
1838 		case SCSI_PROT_WRITE_STRIP:
1839 			*rxop = BG_OP_IN_NODIF_OUT_CRC;
1840 			*txop = BG_OP_IN_CRC_OUT_NODIF;
1841 			break;
1842 
1843 		case SCSI_PROT_NORMAL:
1844 		default:
1845 			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1846 				"9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
1847 					scsi_get_prot_op(sc));
1848 			ret = 1;
1849 			break;
1850 		}
1851 	}
1852 
1853 	return ret;
1854 }
1855 
1856 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1857 /**
1858  * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with
1859  * the specified SCSI command in order to force a guard tag error.
1860  * @phba: The Hba for which this call is being executed.
1861  * @sc: The SCSI command to examine
1862  * @txopt: (out) BlockGuard operation for transmitted data
1863  * @rxopt: (out) BlockGuard operation for received data
1864  *
1865  * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1866  *
1867  **/
1868 static int
1869 lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1870 		uint8_t *txop, uint8_t *rxop)
1871 {
1872 	uint8_t guard_type = scsi_host_get_guard(sc->device->host);
1873 	uint8_t ret = 0;
1874 
1875 	if (guard_type == SHOST_DIX_GUARD_IP) {
1876 		switch (scsi_get_prot_op(sc)) {
1877 		case SCSI_PROT_READ_INSERT:
1878 		case SCSI_PROT_WRITE_STRIP:
1879 			*rxop = BG_OP_IN_NODIF_OUT_CRC;
1880 			*txop = BG_OP_IN_CRC_OUT_NODIF;
1881 			break;
1882 
1883 		case SCSI_PROT_READ_STRIP:
1884 		case SCSI_PROT_WRITE_INSERT:
1885 			*rxop = BG_OP_IN_CSUM_OUT_NODIF;
1886 			*txop = BG_OP_IN_NODIF_OUT_CSUM;
1887 			break;
1888 
1889 		case SCSI_PROT_READ_PASS:
1890 		case SCSI_PROT_WRITE_PASS:
1891 			*rxop = BG_OP_IN_CSUM_OUT_CRC;
1892 			*txop = BG_OP_IN_CRC_OUT_CSUM;
1893 			break;
1894 
1895 		case SCSI_PROT_NORMAL:
1896 		default:
1897 			break;
1898 
1899 		}
1900 	} else {
1901 		switch (scsi_get_prot_op(sc)) {
1902 		case SCSI_PROT_READ_STRIP:
1903 		case SCSI_PROT_WRITE_INSERT:
1904 			*rxop = BG_OP_IN_CSUM_OUT_NODIF;
1905 			*txop = BG_OP_IN_NODIF_OUT_CSUM;
1906 			break;
1907 
1908 		case SCSI_PROT_READ_PASS:
1909 		case SCSI_PROT_WRITE_PASS:
1910 			*rxop = BG_OP_IN_CSUM_OUT_CSUM;
1911 			*txop = BG_OP_IN_CSUM_OUT_CSUM;
1912 			break;
1913 
1914 		case SCSI_PROT_READ_INSERT:
1915 		case SCSI_PROT_WRITE_STRIP:
1916 			*rxop = BG_OP_IN_NODIF_OUT_CSUM;
1917 			*txop = BG_OP_IN_CSUM_OUT_NODIF;
1918 			break;
1919 
1920 		case SCSI_PROT_NORMAL:
1921 		default:
1922 			break;
1923 		}
1924 	}
1925 
1926 	return ret;
1927 }
1928 #endif
1929 
1930 /**
1931  * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data
1932  * @phba: The Hba for which this call is being executed.
1933  * @sc: pointer to scsi command we're working on
1934  * @bpl: pointer to buffer list for protection groups
1935  * @datacnt: number of segments of data that have been dma mapped
1936  *
1937  * This function sets up BPL buffer list for protection groups of
1938  * type LPFC_PG_TYPE_NO_DIF
1939  *
1940  * This is usually used when the HBA is instructed to generate
1941  * DIFs and insert them into data stream (or strip DIF from
1942  * incoming data stream)
1943  *
1944  * The buffer list consists of just one protection group described
1945  * below:
1946  *                                +-------------------------+
1947  *   start of prot group  -->     |          PDE_5          |
1948  *                                +-------------------------+
1949  *                                |          PDE_6          |
1950  *                                +-------------------------+
1951  *                                |         Data BDE        |
1952  *                                +-------------------------+
1953  *                                |more Data BDE's ... (opt)|
1954  *                                +-------------------------+
1955  *
1956  *
1957  * Note: Data s/g buffers have been dma mapped
1958  *
1959  * Returns the number of BDEs added to the BPL.
1960  **/
1961 static int
1962 lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1963 		struct ulp_bde64 *bpl, int datasegcnt)
1964 {
1965 	struct scatterlist *sgde = NULL; /* s/g data entry */
1966 	struct lpfc_pde5 *pde5 = NULL;
1967 	struct lpfc_pde6 *pde6 = NULL;
1968 	dma_addr_t physaddr;
1969 	int i = 0, num_bde = 0, status;
1970 	int datadir = sc->sc_data_direction;
1971 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1972 	uint32_t rc;
1973 #endif
1974 	uint32_t checking = 1;
1975 	uint32_t reftag;
1976 	unsigned blksize;
1977 	uint8_t txop, rxop;
1978 
1979 	status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1980 	if (status)
1981 		goto out;
1982 
1983 	/* extract some info from the scsi command for pde*/
1984 	blksize = lpfc_cmd_blksize(sc);
1985 	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
1986 
1987 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1988 	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1989 	if (rc) {
1990 		if (rc & BG_ERR_SWAP)
1991 			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1992 		if (rc & BG_ERR_CHECK)
1993 			checking = 0;
1994 	}
1995 #endif
1996 
1997 	/* setup PDE5 with what we have */
1998 	pde5 = (struct lpfc_pde5 *) bpl;
1999 	memset(pde5, 0, sizeof(struct lpfc_pde5));
2000 	bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
2001 
2002 	/* Endianness conversion if necessary for PDE5 */
2003 	pde5->word0 = cpu_to_le32(pde5->word0);
2004 	pde5->reftag = cpu_to_le32(reftag);
2005 
2006 	/* advance bpl and increment bde count */
2007 	num_bde++;
2008 	bpl++;
2009 	pde6 = (struct lpfc_pde6 *) bpl;
2010 
2011 	/* setup PDE6 with the rest of the info */
2012 	memset(pde6, 0, sizeof(struct lpfc_pde6));
2013 	bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
2014 	bf_set(pde6_optx, pde6, txop);
2015 	bf_set(pde6_oprx, pde6, rxop);
2016 	if (datadir == DMA_FROM_DEVICE) {
2017 		bf_set(pde6_ce, pde6, checking);
2018 		bf_set(pde6_re, pde6, checking);
2019 	}
2020 	bf_set(pde6_ai, pde6, 1);
2021 	bf_set(pde6_ae, pde6, 0);
2022 	bf_set(pde6_apptagval, pde6, 0);
2023 
2024 	/* Endianness conversion if necessary for PDE6 */
2025 	pde6->word0 = cpu_to_le32(pde6->word0);
2026 	pde6->word1 = cpu_to_le32(pde6->word1);
2027 	pde6->word2 = cpu_to_le32(pde6->word2);
2028 
2029 	/* advance bpl and increment bde count */
2030 	num_bde++;
2031 	bpl++;
2032 
2033 	/* assumption: caller has already run dma_map_sg on command data */
2034 	scsi_for_each_sg(sc, sgde, datasegcnt, i) {
2035 		physaddr = sg_dma_address(sgde);
2036 		bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
2037 		bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2038 		bpl->tus.f.bdeSize = sg_dma_len(sgde);
2039 		if (datadir == DMA_TO_DEVICE)
2040 			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2041 		else
2042 			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2043 		bpl->tus.w = le32_to_cpu(bpl->tus.w);
2044 		bpl++;
2045 		num_bde++;
2046 	}
2047 
2048 out:
2049 	return num_bde;
2050 }
2051 
2052 /**
2053  * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data
2054  * @phba: The Hba for which this call is being executed.
2055  * @sc: pointer to scsi command we're working on
2056  * @bpl: pointer to buffer list for protection groups
2057  * @datacnt: number of segments of data that have been dma mapped
2058  * @protcnt: number of segment of protection data that have been dma mapped
2059  *
2060  * This function sets up BPL buffer list for protection groups of
2061  * type LPFC_PG_TYPE_DIF
2062  *
2063  * This is usually used when DIFs are in their own buffers,
2064  * separate from the data. The HBA can then by instructed
2065  * to place the DIFs in the outgoing stream.  For read operations,
2066  * The HBA could extract the DIFs and place it in DIF buffers.
2067  *
2068  * The buffer list for this type consists of one or more of the
2069  * protection groups described below:
2070  *                                    +-------------------------+
2071  *   start of first prot group  -->   |          PDE_5          |
2072  *                                    +-------------------------+
2073  *                                    |          PDE_6          |
2074  *                                    +-------------------------+
2075  *                                    |      PDE_7 (Prot BDE)   |
2076  *                                    +-------------------------+
2077  *                                    |        Data BDE         |
2078  *                                    +-------------------------+
2079  *                                    |more Data BDE's ... (opt)|
2080  *                                    +-------------------------+
2081  *   start of new  prot group  -->    |          PDE_5          |
2082  *                                    +-------------------------+
2083  *                                    |          ...            |
2084  *                                    +-------------------------+
2085  *
2086  * Note: It is assumed that both data and protection s/g buffers have been
2087  *       mapped for DMA
2088  *
2089  * Returns the number of BDEs added to the BPL.
2090  **/
2091 static int
2092 lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2093 		struct ulp_bde64 *bpl, int datacnt, int protcnt)
2094 {
2095 	struct scatterlist *sgde = NULL; /* s/g data entry */
2096 	struct scatterlist *sgpe = NULL; /* s/g prot entry */
2097 	struct lpfc_pde5 *pde5 = NULL;
2098 	struct lpfc_pde6 *pde6 = NULL;
2099 	struct lpfc_pde7 *pde7 = NULL;
2100 	dma_addr_t dataphysaddr, protphysaddr;
2101 	unsigned short curr_data = 0, curr_prot = 0;
2102 	unsigned int split_offset;
2103 	unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2104 	unsigned int protgrp_blks, protgrp_bytes;
2105 	unsigned int remainder, subtotal;
2106 	int status;
2107 	int datadir = sc->sc_data_direction;
2108 	unsigned char pgdone = 0, alldone = 0;
2109 	unsigned blksize;
2110 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2111 	uint32_t rc;
2112 #endif
2113 	uint32_t checking = 1;
2114 	uint32_t reftag;
2115 	uint8_t txop, rxop;
2116 	int num_bde = 0;
2117 
2118 	sgpe = scsi_prot_sglist(sc);
2119 	sgde = scsi_sglist(sc);
2120 
2121 	if (!sgpe || !sgde) {
2122 		lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2123 				"9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
2124 				sgpe, sgde);
2125 		return 0;
2126 	}
2127 
2128 	status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2129 	if (status)
2130 		goto out;
2131 
2132 	/* extract some info from the scsi command */
2133 	blksize = lpfc_cmd_blksize(sc);
2134 	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2135 
2136 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2137 	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2138 	if (rc) {
2139 		if (rc & BG_ERR_SWAP)
2140 			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2141 		if (rc & BG_ERR_CHECK)
2142 			checking = 0;
2143 	}
2144 #endif
2145 
2146 	split_offset = 0;
2147 	do {
2148 		/* setup PDE5 with what we have */
2149 		pde5 = (struct lpfc_pde5 *) bpl;
2150 		memset(pde5, 0, sizeof(struct lpfc_pde5));
2151 		bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
2152 
2153 		/* Endianness conversion if necessary for PDE5 */
2154 		pde5->word0 = cpu_to_le32(pde5->word0);
2155 		pde5->reftag = cpu_to_le32(reftag);
2156 
2157 		/* advance bpl and increment bde count */
2158 		num_bde++;
2159 		bpl++;
2160 		pde6 = (struct lpfc_pde6 *) bpl;
2161 
2162 		/* setup PDE6 with the rest of the info */
2163 		memset(pde6, 0, sizeof(struct lpfc_pde6));
2164 		bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
2165 		bf_set(pde6_optx, pde6, txop);
2166 		bf_set(pde6_oprx, pde6, rxop);
2167 		bf_set(pde6_ce, pde6, checking);
2168 		bf_set(pde6_re, pde6, checking);
2169 		bf_set(pde6_ai, pde6, 1);
2170 		bf_set(pde6_ae, pde6, 0);
2171 		bf_set(pde6_apptagval, pde6, 0);
2172 
2173 		/* Endianness conversion if necessary for PDE6 */
2174 		pde6->word0 = cpu_to_le32(pde6->word0);
2175 		pde6->word1 = cpu_to_le32(pde6->word1);
2176 		pde6->word2 = cpu_to_le32(pde6->word2);
2177 
2178 		/* advance bpl and increment bde count */
2179 		num_bde++;
2180 		bpl++;
2181 
2182 		/* setup the first BDE that points to protection buffer */
2183 		protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2184 		protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2185 
2186 		/* must be integer multiple of the DIF block length */
2187 		BUG_ON(protgroup_len % 8);
2188 
2189 		pde7 = (struct lpfc_pde7 *) bpl;
2190 		memset(pde7, 0, sizeof(struct lpfc_pde7));
2191 		bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
2192 
2193 		pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
2194 		pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
2195 
2196 		protgrp_blks = protgroup_len / 8;
2197 		protgrp_bytes = protgrp_blks * blksize;
2198 
2199 		/* check if this pde is crossing the 4K boundary; if so split */
2200 		if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
2201 			protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
2202 			protgroup_offset += protgroup_remainder;
2203 			protgrp_blks = protgroup_remainder / 8;
2204 			protgrp_bytes = protgrp_blks * blksize;
2205 		} else {
2206 			protgroup_offset = 0;
2207 			curr_prot++;
2208 		}
2209 
2210 		num_bde++;
2211 
2212 		/* setup BDE's for data blocks associated with DIF data */
2213 		pgdone = 0;
2214 		subtotal = 0; /* total bytes processed for current prot grp */
2215 		while (!pgdone) {
2216 			if (!sgde) {
2217 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2218 					"9065 BLKGRD:%s Invalid data segment\n",
2219 						__func__);
2220 				return 0;
2221 			}
2222 			bpl++;
2223 			dataphysaddr = sg_dma_address(sgde) + split_offset;
2224 			bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
2225 			bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
2226 
2227 			remainder = sg_dma_len(sgde) - split_offset;
2228 
2229 			if ((subtotal + remainder) <= protgrp_bytes) {
2230 				/* we can use this whole buffer */
2231 				bpl->tus.f.bdeSize = remainder;
2232 				split_offset = 0;
2233 
2234 				if ((subtotal + remainder) == protgrp_bytes)
2235 					pgdone = 1;
2236 			} else {
2237 				/* must split this buffer with next prot grp */
2238 				bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
2239 				split_offset += bpl->tus.f.bdeSize;
2240 			}
2241 
2242 			subtotal += bpl->tus.f.bdeSize;
2243 
2244 			if (datadir == DMA_TO_DEVICE)
2245 				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2246 			else
2247 				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2248 			bpl->tus.w = le32_to_cpu(bpl->tus.w);
2249 
2250 			num_bde++;
2251 			curr_data++;
2252 
2253 			if (split_offset)
2254 				break;
2255 
2256 			/* Move to the next s/g segment if possible */
2257 			sgde = sg_next(sgde);
2258 
2259 		}
2260 
2261 		if (protgroup_offset) {
2262 			/* update the reference tag */
2263 			reftag += protgrp_blks;
2264 			bpl++;
2265 			continue;
2266 		}
2267 
2268 		/* are we done ? */
2269 		if (curr_prot == protcnt) {
2270 			alldone = 1;
2271 		} else if (curr_prot < protcnt) {
2272 			/* advance to next prot buffer */
2273 			sgpe = sg_next(sgpe);
2274 			bpl++;
2275 
2276 			/* update the reference tag */
2277 			reftag += protgrp_blks;
2278 		} else {
2279 			/* if we're here, we have a bug */
2280 			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2281 				"9054 BLKGRD: bug in %s\n", __func__);
2282 		}
2283 
2284 	} while (!alldone);
2285 out:
2286 
2287 	return num_bde;
2288 }
2289 
2290 /**
2291  * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data
2292  * @phba: The Hba for which this call is being executed.
2293  * @sc: pointer to scsi command we're working on
2294  * @sgl: pointer to buffer list for protection groups
2295  * @datacnt: number of segments of data that have been dma mapped
2296  *
2297  * This function sets up SGL buffer list for protection groups of
2298  * type LPFC_PG_TYPE_NO_DIF
2299  *
2300  * This is usually used when the HBA is instructed to generate
2301  * DIFs and insert them into data stream (or strip DIF from
2302  * incoming data stream)
2303  *
2304  * The buffer list consists of just one protection group described
2305  * below:
2306  *                                +-------------------------+
2307  *   start of prot group  -->     |         DI_SEED         |
2308  *                                +-------------------------+
2309  *                                |         Data SGE        |
2310  *                                +-------------------------+
2311  *                                |more Data SGE's ... (opt)|
2312  *                                +-------------------------+
2313  *
2314  *
2315  * Note: Data s/g buffers have been dma mapped
2316  *
2317  * Returns the number of SGEs added to the SGL.
2318  **/
2319 static int
2320 lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2321 		struct sli4_sge *sgl, int datasegcnt)
2322 {
2323 	struct scatterlist *sgde = NULL; /* s/g data entry */
2324 	struct sli4_sge_diseed *diseed = NULL;
2325 	dma_addr_t physaddr;
2326 	int i = 0, num_sge = 0, status;
2327 	int datadir = sc->sc_data_direction;
2328 	uint32_t reftag;
2329 	unsigned blksize;
2330 	uint8_t txop, rxop;
2331 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2332 	uint32_t rc;
2333 #endif
2334 	uint32_t checking = 1;
2335 	uint32_t dma_len;
2336 	uint32_t dma_offset = 0;
2337 
2338 	status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2339 	if (status)
2340 		goto out;
2341 
2342 	/* extract some info from the scsi command for pde*/
2343 	blksize = lpfc_cmd_blksize(sc);
2344 	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2345 
2346 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2347 	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2348 	if (rc) {
2349 		if (rc & BG_ERR_SWAP)
2350 			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2351 		if (rc & BG_ERR_CHECK)
2352 			checking = 0;
2353 	}
2354 #endif
2355 
2356 	/* setup DISEED with what we have */
2357 	diseed = (struct sli4_sge_diseed *) sgl;
2358 	memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2359 	bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2360 
2361 	/* Endianness conversion if necessary */
2362 	diseed->ref_tag = cpu_to_le32(reftag);
2363 	diseed->ref_tag_tran = diseed->ref_tag;
2364 
2365 	/* setup DISEED with the rest of the info */
2366 	bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2367 	bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2368 	if (datadir == DMA_FROM_DEVICE) {
2369 		bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2370 		bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2371 	}
2372 	bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2373 	bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2374 
2375 	/* Endianness conversion if necessary for DISEED */
2376 	diseed->word2 = cpu_to_le32(diseed->word2);
2377 	diseed->word3 = cpu_to_le32(diseed->word3);
2378 
2379 	/* advance bpl and increment sge count */
2380 	num_sge++;
2381 	sgl++;
2382 
2383 	/* assumption: caller has already run dma_map_sg on command data */
2384 	scsi_for_each_sg(sc, sgde, datasegcnt, i) {
2385 		physaddr = sg_dma_address(sgde);
2386 		dma_len = sg_dma_len(sgde);
2387 		sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
2388 		sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
2389 		if ((i + 1) == datasegcnt)
2390 			bf_set(lpfc_sli4_sge_last, sgl, 1);
2391 		else
2392 			bf_set(lpfc_sli4_sge_last, sgl, 0);
2393 		bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2394 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2395 
2396 		sgl->sge_len = cpu_to_le32(dma_len);
2397 		dma_offset += dma_len;
2398 
2399 		sgl++;
2400 		num_sge++;
2401 	}
2402 
2403 out:
2404 	return num_sge;
2405 }
2406 
2407 /**
2408  * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data
2409  * @phba: The Hba for which this call is being executed.
2410  * @sc: pointer to scsi command we're working on
2411  * @sgl: pointer to buffer list for protection groups
2412  * @datacnt: number of segments of data that have been dma mapped
2413  * @protcnt: number of segment of protection data that have been dma mapped
2414  *
2415  * This function sets up SGL buffer list for protection groups of
2416  * type LPFC_PG_TYPE_DIF
2417  *
2418  * This is usually used when DIFs are in their own buffers,
2419  * separate from the data. The HBA can then by instructed
2420  * to place the DIFs in the outgoing stream.  For read operations,
2421  * The HBA could extract the DIFs and place it in DIF buffers.
2422  *
2423  * The buffer list for this type consists of one or more of the
2424  * protection groups described below:
2425  *                                    +-------------------------+
2426  *   start of first prot group  -->   |         DISEED          |
2427  *                                    +-------------------------+
2428  *                                    |      DIF (Prot SGE)     |
2429  *                                    +-------------------------+
2430  *                                    |        Data SGE         |
2431  *                                    +-------------------------+
2432  *                                    |more Data SGE's ... (opt)|
2433  *                                    +-------------------------+
2434  *   start of new  prot group  -->    |         DISEED          |
2435  *                                    +-------------------------+
2436  *                                    |          ...            |
2437  *                                    +-------------------------+
2438  *
2439  * Note: It is assumed that both data and protection s/g buffers have been
2440  *       mapped for DMA
2441  *
2442  * Returns the number of SGEs added to the SGL.
2443  **/
2444 static int
2445 lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2446 		struct sli4_sge *sgl, int datacnt, int protcnt)
2447 {
2448 	struct scatterlist *sgde = NULL; /* s/g data entry */
2449 	struct scatterlist *sgpe = NULL; /* s/g prot entry */
2450 	struct sli4_sge_diseed *diseed = NULL;
2451 	dma_addr_t dataphysaddr, protphysaddr;
2452 	unsigned short curr_data = 0, curr_prot = 0;
2453 	unsigned int split_offset;
2454 	unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2455 	unsigned int protgrp_blks, protgrp_bytes;
2456 	unsigned int remainder, subtotal;
2457 	int status;
2458 	unsigned char pgdone = 0, alldone = 0;
2459 	unsigned blksize;
2460 	uint32_t reftag;
2461 	uint8_t txop, rxop;
2462 	uint32_t dma_len;
2463 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2464 	uint32_t rc;
2465 #endif
2466 	uint32_t checking = 1;
2467 	uint32_t dma_offset = 0;
2468 	int num_sge = 0;
2469 
2470 	sgpe = scsi_prot_sglist(sc);
2471 	sgde = scsi_sglist(sc);
2472 
2473 	if (!sgpe || !sgde) {
2474 		lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2475 				"9082 Invalid s/g entry: data=0x%p prot=0x%p\n",
2476 				sgpe, sgde);
2477 		return 0;
2478 	}
2479 
2480 	status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2481 	if (status)
2482 		goto out;
2483 
2484 	/* extract some info from the scsi command */
2485 	blksize = lpfc_cmd_blksize(sc);
2486 	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2487 
2488 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2489 	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2490 	if (rc) {
2491 		if (rc & BG_ERR_SWAP)
2492 			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2493 		if (rc & BG_ERR_CHECK)
2494 			checking = 0;
2495 	}
2496 #endif
2497 
2498 	split_offset = 0;
2499 	do {
2500 		/* setup DISEED with what we have */
2501 		diseed = (struct sli4_sge_diseed *) sgl;
2502 		memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2503 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2504 
2505 		/* Endianness conversion if necessary */
2506 		diseed->ref_tag = cpu_to_le32(reftag);
2507 		diseed->ref_tag_tran = diseed->ref_tag;
2508 
2509 		/* setup DISEED with the rest of the info */
2510 		bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2511 		bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2512 		bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2513 		bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2514 		bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2515 		bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2516 
2517 		/* Endianness conversion if necessary for DISEED */
2518 		diseed->word2 = cpu_to_le32(diseed->word2);
2519 		diseed->word3 = cpu_to_le32(diseed->word3);
2520 
2521 		/* advance sgl and increment bde count */
2522 		num_sge++;
2523 		sgl++;
2524 
2525 		/* setup the first BDE that points to protection buffer */
2526 		protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2527 		protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2528 
2529 		/* must be integer multiple of the DIF block length */
2530 		BUG_ON(protgroup_len % 8);
2531 
2532 		/* Now setup DIF SGE */
2533 		sgl->word2 = 0;
2534 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
2535 		sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
2536 		sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
2537 		sgl->word2 = cpu_to_le32(sgl->word2);
2538 
2539 		protgrp_blks = protgroup_len / 8;
2540 		protgrp_bytes = protgrp_blks * blksize;
2541 
2542 		/* check if DIF SGE is crossing the 4K boundary; if so split */
2543 		if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
2544 			protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
2545 			protgroup_offset += protgroup_remainder;
2546 			protgrp_blks = protgroup_remainder / 8;
2547 			protgrp_bytes = protgrp_blks * blksize;
2548 		} else {
2549 			protgroup_offset = 0;
2550 			curr_prot++;
2551 		}
2552 
2553 		num_sge++;
2554 
2555 		/* setup SGE's for data blocks associated with DIF data */
2556 		pgdone = 0;
2557 		subtotal = 0; /* total bytes processed for current prot grp */
2558 		while (!pgdone) {
2559 			if (!sgde) {
2560 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2561 					"9086 BLKGRD:%s Invalid data segment\n",
2562 						__func__);
2563 				return 0;
2564 			}
2565 			sgl++;
2566 			dataphysaddr = sg_dma_address(sgde) + split_offset;
2567 
2568 			remainder = sg_dma_len(sgde) - split_offset;
2569 
2570 			if ((subtotal + remainder) <= protgrp_bytes) {
2571 				/* we can use this whole buffer */
2572 				dma_len = remainder;
2573 				split_offset = 0;
2574 
2575 				if ((subtotal + remainder) == protgrp_bytes)
2576 					pgdone = 1;
2577 			} else {
2578 				/* must split this buffer with next prot grp */
2579 				dma_len = protgrp_bytes - subtotal;
2580 				split_offset += dma_len;
2581 			}
2582 
2583 			subtotal += dma_len;
2584 
2585 			sgl->addr_lo = cpu_to_le32(putPaddrLow(dataphysaddr));
2586 			sgl->addr_hi = cpu_to_le32(putPaddrHigh(dataphysaddr));
2587 			bf_set(lpfc_sli4_sge_last, sgl, 0);
2588 			bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2589 			bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2590 
2591 			sgl->sge_len = cpu_to_le32(dma_len);
2592 			dma_offset += dma_len;
2593 
2594 			num_sge++;
2595 			curr_data++;
2596 
2597 			if (split_offset)
2598 				break;
2599 
2600 			/* Move to the next s/g segment if possible */
2601 			sgde = sg_next(sgde);
2602 		}
2603 
2604 		if (protgroup_offset) {
2605 			/* update the reference tag */
2606 			reftag += protgrp_blks;
2607 			sgl++;
2608 			continue;
2609 		}
2610 
2611 		/* are we done ? */
2612 		if (curr_prot == protcnt) {
2613 			bf_set(lpfc_sli4_sge_last, sgl, 1);
2614 			alldone = 1;
2615 		} else if (curr_prot < protcnt) {
2616 			/* advance to next prot buffer */
2617 			sgpe = sg_next(sgpe);
2618 			sgl++;
2619 
2620 			/* update the reference tag */
2621 			reftag += protgrp_blks;
2622 		} else {
2623 			/* if we're here, we have a bug */
2624 			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2625 				"9085 BLKGRD: bug in %s\n", __func__);
2626 		}
2627 
2628 	} while (!alldone);
2629 
2630 out:
2631 
2632 	return num_sge;
2633 }
2634 
2635 /**
2636  * lpfc_prot_group_type - Get prtotection group type of SCSI command
2637  * @phba: The Hba for which this call is being executed.
2638  * @sc: pointer to scsi command we're working on
2639  *
2640  * Given a SCSI command that supports DIF, determine composition of protection
2641  * groups involved in setting up buffer lists
2642  *
2643  * Returns: Protection group type (with or without DIF)
2644  *
2645  **/
2646 static int
2647 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
2648 {
2649 	int ret = LPFC_PG_TYPE_INVALID;
2650 	unsigned char op = scsi_get_prot_op(sc);
2651 
2652 	switch (op) {
2653 	case SCSI_PROT_READ_STRIP:
2654 	case SCSI_PROT_WRITE_INSERT:
2655 		ret = LPFC_PG_TYPE_NO_DIF;
2656 		break;
2657 	case SCSI_PROT_READ_INSERT:
2658 	case SCSI_PROT_WRITE_STRIP:
2659 	case SCSI_PROT_READ_PASS:
2660 	case SCSI_PROT_WRITE_PASS:
2661 		ret = LPFC_PG_TYPE_DIF_BUF;
2662 		break;
2663 	default:
2664 		lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2665 				"9021 Unsupported protection op:%d\n", op);
2666 		break;
2667 	}
2668 
2669 	return ret;
2670 }
2671 
2672 /**
2673  * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
2674  * @phba: The Hba for which this call is being executed.
2675  * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
2676  *
2677  * This is the protection/DIF aware version of
2678  * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
2679  * two functions eventually, but for now, it's here
2680  **/
2681 static int
2682 lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2683 		struct lpfc_scsi_buf *lpfc_cmd)
2684 {
2685 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2686 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2687 	struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
2688 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2689 	uint32_t num_bde = 0;
2690 	int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
2691 	int prot_group_type = 0;
2692 	int diflen, fcpdl;
2693 	unsigned blksize;
2694 
2695 	/*
2696 	 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
2697 	 *  fcp_rsp regions to the first data bde entry
2698 	 */
2699 	bpl += 2;
2700 	if (scsi_sg_count(scsi_cmnd)) {
2701 		/*
2702 		 * The driver stores the segment count returned from pci_map_sg
2703 		 * because this a count of dma-mappings used to map the use_sg
2704 		 * pages.  They are not guaranteed to be the same for those
2705 		 * architectures that implement an IOMMU.
2706 		 */
2707 		datasegcnt = dma_map_sg(&phba->pcidev->dev,
2708 					scsi_sglist(scsi_cmnd),
2709 					scsi_sg_count(scsi_cmnd), datadir);
2710 		if (unlikely(!datasegcnt))
2711 			return 1;
2712 
2713 		lpfc_cmd->seg_cnt = datasegcnt;
2714 		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
2715 			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2716 					"9067 BLKGRD: %s: Too many sg segments"
2717 					" from dma_map_sg.  Config %d, seg_cnt"
2718 					" %d\n",
2719 					__func__, phba->cfg_sg_seg_cnt,
2720 					lpfc_cmd->seg_cnt);
2721 			scsi_dma_unmap(scsi_cmnd);
2722 			return 1;
2723 		}
2724 
2725 		prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
2726 
2727 		switch (prot_group_type) {
2728 		case LPFC_PG_TYPE_NO_DIF:
2729 			num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
2730 					datasegcnt);
2731 			/* we should have 2 or more entries in buffer list */
2732 			if (num_bde < 2)
2733 				goto err;
2734 			break;
2735 		case LPFC_PG_TYPE_DIF_BUF:{
2736 			/*
2737 			 * This type indicates that protection buffers are
2738 			 * passed to the driver, so that needs to be prepared
2739 			 * for DMA
2740 			 */
2741 			protsegcnt = dma_map_sg(&phba->pcidev->dev,
2742 					scsi_prot_sglist(scsi_cmnd),
2743 					scsi_prot_sg_count(scsi_cmnd), datadir);
2744 			if (unlikely(!protsegcnt)) {
2745 				scsi_dma_unmap(scsi_cmnd);
2746 				return 1;
2747 			}
2748 
2749 			lpfc_cmd->prot_seg_cnt = protsegcnt;
2750 			if (lpfc_cmd->prot_seg_cnt
2751 			    > phba->cfg_prot_sg_seg_cnt) {
2752 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2753 					"9068 BLKGRD: %s: Too many prot sg "
2754 					"segments from dma_map_sg.  Config %d,"
2755 						"prot_seg_cnt %d\n", __func__,
2756 						phba->cfg_prot_sg_seg_cnt,
2757 						lpfc_cmd->prot_seg_cnt);
2758 				dma_unmap_sg(&phba->pcidev->dev,
2759 					     scsi_prot_sglist(scsi_cmnd),
2760 					     scsi_prot_sg_count(scsi_cmnd),
2761 					     datadir);
2762 				scsi_dma_unmap(scsi_cmnd);
2763 				return 1;
2764 			}
2765 
2766 			num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
2767 					datasegcnt, protsegcnt);
2768 			/* we should have 3 or more entries in buffer list */
2769 			if (num_bde < 3)
2770 				goto err;
2771 			break;
2772 		}
2773 		case LPFC_PG_TYPE_INVALID:
2774 		default:
2775 			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2776 					"9022 Unexpected protection group %i\n",
2777 					prot_group_type);
2778 			return 1;
2779 		}
2780 	}
2781 
2782 	/*
2783 	 * Finish initializing those IOCB fields that are dependent on the
2784 	 * scsi_cmnd request_buffer.  Note that the bdeSize is explicitly
2785 	 * reinitialized since all iocb memory resources are used many times
2786 	 * for transmit, receive, and continuation bpl's.
2787 	 */
2788 	iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
2789 	iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
2790 	iocb_cmd->ulpBdeCount = 1;
2791 	iocb_cmd->ulpLe = 1;
2792 
2793 	fcpdl = scsi_bufflen(scsi_cmnd);
2794 
2795 	if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1) {
2796 		/*
2797 		 * We are in DIF Type 1 mode
2798 		 * Every data block has a 8 byte DIF (trailer)
2799 		 * attached to it.  Must ajust FCP data length
2800 		 */
2801 		blksize = lpfc_cmd_blksize(scsi_cmnd);
2802 		diflen = (fcpdl / blksize) * 8;
2803 		fcpdl += diflen;
2804 	}
2805 	fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
2806 
2807 	/*
2808 	 * Due to difference in data length between DIF/non-DIF paths,
2809 	 * we need to set word 4 of IOCB here
2810 	 */
2811 	iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
2812 
2813 	return 0;
2814 err:
2815 	lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2816 			"9023 Could not setup all needed BDE's"
2817 			"prot_group_type=%d, num_bde=%d\n",
2818 			prot_group_type, num_bde);
2819 	return 1;
2820 }
2821 
2822 /*
2823  * This function checks for BlockGuard errors detected by
2824  * the HBA.  In case of errors, the ASC/ASCQ fields in the
2825  * sense buffer will be set accordingly, paired with
2826  * ILLEGAL_REQUEST to signal to the kernel that the HBA
2827  * detected corruption.
2828  *
2829  * Returns:
2830  *  0 - No error found
2831  *  1 - BlockGuard error found
2832  * -1 - Internal error (bad profile, ...etc)
2833  */
2834 static int
2835 lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
2836 			struct lpfc_iocbq *pIocbOut)
2837 {
2838 	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2839 	struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
2840 	int ret = 0;
2841 	uint32_t bghm = bgf->bghm;
2842 	uint32_t bgstat = bgf->bgstat;
2843 	uint64_t failing_sector = 0;
2844 
2845 	lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9069 BLKGRD: BG ERROR in cmd"
2846 			" 0x%x lba 0x%llx blk cnt 0x%x "
2847 			"bgstat=0x%x bghm=0x%x\n",
2848 			cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
2849 			blk_rq_sectors(cmd->request), bgstat, bghm);
2850 
2851 	spin_lock(&_dump_buf_lock);
2852 	if (!_dump_buf_done) {
2853 		lpfc_printf_log(phba, KERN_ERR, LOG_BG,  "9070 BLKGRD: Saving"
2854 			" Data for %u blocks to debugfs\n",
2855 				(cmd->cmnd[7] << 8 | cmd->cmnd[8]));
2856 		lpfc_debug_save_data(phba, cmd);
2857 
2858 		/* If we have a prot sgl, save the DIF buffer */
2859 		if (lpfc_prot_group_type(phba, cmd) ==
2860 				LPFC_PG_TYPE_DIF_BUF) {
2861 			lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: "
2862 				"Saving DIF for %u blocks to debugfs\n",
2863 				(cmd->cmnd[7] << 8 | cmd->cmnd[8]));
2864 			lpfc_debug_save_dif(phba, cmd);
2865 		}
2866 
2867 		_dump_buf_done = 1;
2868 	}
2869 	spin_unlock(&_dump_buf_lock);
2870 
2871 	if (lpfc_bgs_get_invalid_prof(bgstat)) {
2872 		cmd->result = ScsiResult(DID_ERROR, 0);
2873 		lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9072 BLKGRD: Invalid"
2874 			" BlockGuard profile. bgstat:0x%x\n",
2875 			bgstat);
2876 		ret = (-1);
2877 		goto out;
2878 	}
2879 
2880 	if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
2881 		cmd->result = ScsiResult(DID_ERROR, 0);
2882 		lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9073 BLKGRD: "
2883 				"Invalid BlockGuard DIF Block. bgstat:0x%x\n",
2884 				bgstat);
2885 		ret = (-1);
2886 		goto out;
2887 	}
2888 
2889 	if (lpfc_bgs_get_guard_err(bgstat)) {
2890 		ret = 1;
2891 
2892 		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2893 				0x10, 0x1);
2894 		cmd->result = DRIVER_SENSE << 24
2895 			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
2896 		phba->bg_guard_err_cnt++;
2897 		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2898 			"9055 BLKGRD: guard_tag error\n");
2899 	}
2900 
2901 	if (lpfc_bgs_get_reftag_err(bgstat)) {
2902 		ret = 1;
2903 
2904 		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2905 				0x10, 0x3);
2906 		cmd->result = DRIVER_SENSE << 24
2907 			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
2908 
2909 		phba->bg_reftag_err_cnt++;
2910 		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2911 			"9056 BLKGRD: ref_tag error\n");
2912 	}
2913 
2914 	if (lpfc_bgs_get_apptag_err(bgstat)) {
2915 		ret = 1;
2916 
2917 		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2918 				0x10, 0x2);
2919 		cmd->result = DRIVER_SENSE << 24
2920 			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
2921 
2922 		phba->bg_apptag_err_cnt++;
2923 		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2924 			"9061 BLKGRD: app_tag error\n");
2925 	}
2926 
2927 	if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
2928 		/*
2929 		 * setup sense data descriptor 0 per SPC-4 as an information
2930 		 * field, and put the failing LBA in it.
2931 		 * This code assumes there was also a guard/app/ref tag error
2932 		 * indication.
2933 		 */
2934 		cmd->sense_buffer[7] = 0xc;   /* Additional sense length */
2935 		cmd->sense_buffer[8] = 0;     /* Information descriptor type */
2936 		cmd->sense_buffer[9] = 0xa;   /* Additional descriptor length */
2937 		cmd->sense_buffer[10] = 0x80; /* Validity bit */
2938 
2939 		/* bghm is a "on the wire" FC frame based count */
2940 		switch (scsi_get_prot_op(cmd)) {
2941 		case SCSI_PROT_READ_INSERT:
2942 		case SCSI_PROT_WRITE_STRIP:
2943 			bghm /= cmd->device->sector_size;
2944 			break;
2945 		case SCSI_PROT_READ_STRIP:
2946 		case SCSI_PROT_WRITE_INSERT:
2947 		case SCSI_PROT_READ_PASS:
2948 		case SCSI_PROT_WRITE_PASS:
2949 			bghm /= (cmd->device->sector_size +
2950 				sizeof(struct scsi_dif_tuple));
2951 			break;
2952 		}
2953 
2954 		failing_sector = scsi_get_lba(cmd);
2955 		failing_sector += bghm;
2956 
2957 		/* Descriptor Information */
2958 		put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
2959 	}
2960 
2961 	if (!ret) {
2962 		/* No error was reported - problem in FW? */
2963 		cmd->result = ScsiResult(DID_ERROR, 0);
2964 		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2965 			"9057 BLKGRD: Unknown error reported!\n");
2966 	}
2967 
2968 out:
2969 	return ret;
2970 }
2971 
2972 /**
2973  * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
2974  * @phba: The Hba for which this call is being executed.
2975  * @lpfc_cmd: The scsi buffer which is going to be mapped.
2976  *
2977  * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
2978  * field of @lpfc_cmd for device with SLI-4 interface spec.
2979  *
2980  * Return codes:
2981  *	1 - Error
2982  *	0 - Success
2983  **/
2984 static int
2985 lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
2986 {
2987 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2988 	struct scatterlist *sgel = NULL;
2989 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2990 	struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
2991 	struct sli4_sge *first_data_sgl;
2992 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2993 	dma_addr_t physaddr;
2994 	uint32_t num_bde = 0;
2995 	uint32_t dma_len;
2996 	uint32_t dma_offset = 0;
2997 	int nseg;
2998 	struct ulp_bde64 *bde;
2999 
3000 	/*
3001 	 * There are three possibilities here - use scatter-gather segment, use
3002 	 * the single mapping, or neither.  Start the lpfc command prep by
3003 	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
3004 	 * data bde entry.
3005 	 */
3006 	if (scsi_sg_count(scsi_cmnd)) {
3007 		/*
3008 		 * The driver stores the segment count returned from pci_map_sg
3009 		 * because this a count of dma-mappings used to map the use_sg
3010 		 * pages.  They are not guaranteed to be the same for those
3011 		 * architectures that implement an IOMMU.
3012 		 */
3013 
3014 		nseg = scsi_dma_map(scsi_cmnd);
3015 		if (unlikely(!nseg))
3016 			return 1;
3017 		sgl += 1;
3018 		/* clear the last flag in the fcp_rsp map entry */
3019 		sgl->word2 = le32_to_cpu(sgl->word2);
3020 		bf_set(lpfc_sli4_sge_last, sgl, 0);
3021 		sgl->word2 = cpu_to_le32(sgl->word2);
3022 		sgl += 1;
3023 		first_data_sgl = sgl;
3024 		lpfc_cmd->seg_cnt = nseg;
3025 		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
3026 			lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
3027 				" %s: Too many sg segments from "
3028 				"dma_map_sg.  Config %d, seg_cnt %d\n",
3029 				__func__, phba->cfg_sg_seg_cnt,
3030 			       lpfc_cmd->seg_cnt);
3031 			scsi_dma_unmap(scsi_cmnd);
3032 			return 1;
3033 		}
3034 
3035 		/*
3036 		 * The driver established a maximum scatter-gather segment count
3037 		 * during probe that limits the number of sg elements in any
3038 		 * single scsi command.  Just run through the seg_cnt and format
3039 		 * the sge's.
3040 		 * When using SLI-3 the driver will try to fit all the BDEs into
3041 		 * the IOCB. If it can't then the BDEs get added to a BPL as it
3042 		 * does for SLI-2 mode.
3043 		 */
3044 		scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
3045 			physaddr = sg_dma_address(sgel);
3046 			dma_len = sg_dma_len(sgel);
3047 			sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
3048 			sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
3049 			sgl->word2 = le32_to_cpu(sgl->word2);
3050 			if ((num_bde + 1) == nseg)
3051 				bf_set(lpfc_sli4_sge_last, sgl, 1);
3052 			else
3053 				bf_set(lpfc_sli4_sge_last, sgl, 0);
3054 			bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
3055 			bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
3056 			sgl->word2 = cpu_to_le32(sgl->word2);
3057 			sgl->sge_len = cpu_to_le32(dma_len);
3058 			dma_offset += dma_len;
3059 			sgl++;
3060 		}
3061 		/* setup the performance hint (first data BDE) if enabled */
3062 		if (phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) {
3063 			bde = (struct ulp_bde64 *)
3064 					&(iocb_cmd->unsli3.sli3Words[5]);
3065 			bde->addrLow = first_data_sgl->addr_lo;
3066 			bde->addrHigh = first_data_sgl->addr_hi;
3067 			bde->tus.f.bdeSize =
3068 					le32_to_cpu(first_data_sgl->sge_len);
3069 			bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3070 			bde->tus.w = cpu_to_le32(bde->tus.w);
3071 		}
3072 	} else {
3073 		sgl += 1;
3074 		/* clear the last flag in the fcp_rsp map entry */
3075 		sgl->word2 = le32_to_cpu(sgl->word2);
3076 		bf_set(lpfc_sli4_sge_last, sgl, 1);
3077 		sgl->word2 = cpu_to_le32(sgl->word2);
3078 	}
3079 
3080 	/*
3081 	 * Finish initializing those IOCB fields that are dependent on the
3082 	 * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
3083 	 * explicitly reinitialized.
3084 	 * all iocb memory resources are reused.
3085 	 */
3086 	fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
3087 
3088 	/*
3089 	 * Due to difference in data length between DIF/non-DIF paths,
3090 	 * we need to set word 4 of IOCB here
3091 	 */
3092 	iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
3093 	return 0;
3094 }
3095 
3096 /**
3097  * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
3098  * @phba: The Hba for which this call is being executed.
3099  * @lpfc_cmd: The scsi buffer which is going to be adjusted.
3100  *
3101  * Adjust the data length to account for how much data
3102  * is actually on the wire.
3103  *
3104  * returns the adjusted data length
3105  **/
3106 static int
3107 lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
3108 		struct lpfc_scsi_buf *lpfc_cmd)
3109 {
3110 	struct scsi_cmnd *sc = lpfc_cmd->pCmd;
3111 	int diflen, fcpdl;
3112 	unsigned blksize;
3113 
3114 	fcpdl = scsi_bufflen(sc);
3115 
3116 	/* Check if there is protection data on the wire */
3117 	if (sc->sc_data_direction == DMA_FROM_DEVICE) {
3118 		/* Read */
3119 		if (scsi_get_prot_op(sc) ==  SCSI_PROT_READ_INSERT)
3120 			return fcpdl;
3121 
3122 	} else {
3123 		/* Write */
3124 		if (scsi_get_prot_op(sc) ==  SCSI_PROT_WRITE_STRIP)
3125 			return fcpdl;
3126 	}
3127 
3128 	/* If protection data on the wire, adjust the count accordingly */
3129 	blksize = lpfc_cmd_blksize(sc);
3130 	diflen = (fcpdl / blksize) * 8;
3131 	fcpdl += diflen;
3132 	return fcpdl;
3133 }
3134 
3135 /**
3136  * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3137  * @phba: The Hba for which this call is being executed.
3138  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3139  *
3140  * This is the protection/DIF aware version of
3141  * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
3142  * two functions eventually, but for now, it's here
3143  **/
3144 static int
3145 lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3146 		struct lpfc_scsi_buf *lpfc_cmd)
3147 {
3148 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3149 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3150 	struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl);
3151 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3152 	uint32_t num_bde = 0;
3153 	int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
3154 	int prot_group_type = 0;
3155 	int fcpdl;
3156 
3157 	/*
3158 	 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
3159 	 *  fcp_rsp regions to the first data bde entry
3160 	 */
3161 	if (scsi_sg_count(scsi_cmnd)) {
3162 		/*
3163 		 * The driver stores the segment count returned from pci_map_sg
3164 		 * because this a count of dma-mappings used to map the use_sg
3165 		 * pages.  They are not guaranteed to be the same for those
3166 		 * architectures that implement an IOMMU.
3167 		 */
3168 		datasegcnt = dma_map_sg(&phba->pcidev->dev,
3169 					scsi_sglist(scsi_cmnd),
3170 					scsi_sg_count(scsi_cmnd), datadir);
3171 		if (unlikely(!datasegcnt))
3172 			return 1;
3173 
3174 		sgl += 1;
3175 		/* clear the last flag in the fcp_rsp map entry */
3176 		sgl->word2 = le32_to_cpu(sgl->word2);
3177 		bf_set(lpfc_sli4_sge_last, sgl, 0);
3178 		sgl->word2 = cpu_to_le32(sgl->word2);
3179 
3180 		sgl += 1;
3181 		lpfc_cmd->seg_cnt = datasegcnt;
3182 		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
3183 			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
3184 					"9087 BLKGRD: %s: Too many sg segments"
3185 					" from dma_map_sg.  Config %d, seg_cnt"
3186 					" %d\n",
3187 					__func__, phba->cfg_sg_seg_cnt,
3188 					lpfc_cmd->seg_cnt);
3189 			scsi_dma_unmap(scsi_cmnd);
3190 			return 1;
3191 		}
3192 
3193 		prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
3194 
3195 		switch (prot_group_type) {
3196 		case LPFC_PG_TYPE_NO_DIF:
3197 			num_bde = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
3198 					datasegcnt);
3199 			/* we should have 2 or more entries in buffer list */
3200 			if (num_bde < 2)
3201 				goto err;
3202 			break;
3203 		case LPFC_PG_TYPE_DIF_BUF:{
3204 			/*
3205 			 * This type indicates that protection buffers are
3206 			 * passed to the driver, so that needs to be prepared
3207 			 * for DMA
3208 			 */
3209 			protsegcnt = dma_map_sg(&phba->pcidev->dev,
3210 					scsi_prot_sglist(scsi_cmnd),
3211 					scsi_prot_sg_count(scsi_cmnd), datadir);
3212 			if (unlikely(!protsegcnt)) {
3213 				scsi_dma_unmap(scsi_cmnd);
3214 				return 1;
3215 			}
3216 
3217 			lpfc_cmd->prot_seg_cnt = protsegcnt;
3218 			if (lpfc_cmd->prot_seg_cnt
3219 			    > phba->cfg_prot_sg_seg_cnt) {
3220 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
3221 					"9088 BLKGRD: %s: Too many prot sg "
3222 					"segments from dma_map_sg.  Config %d,"
3223 						"prot_seg_cnt %d\n", __func__,
3224 						phba->cfg_prot_sg_seg_cnt,
3225 						lpfc_cmd->prot_seg_cnt);
3226 				dma_unmap_sg(&phba->pcidev->dev,
3227 					     scsi_prot_sglist(scsi_cmnd),
3228 					     scsi_prot_sg_count(scsi_cmnd),
3229 					     datadir);
3230 				scsi_dma_unmap(scsi_cmnd);
3231 				return 1;
3232 			}
3233 
3234 			num_bde = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
3235 					datasegcnt, protsegcnt);
3236 			/* we should have 3 or more entries in buffer list */
3237 			if (num_bde < 3)
3238 				goto err;
3239 			break;
3240 		}
3241 		case LPFC_PG_TYPE_INVALID:
3242 		default:
3243 			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3244 					"9083 Unexpected protection group %i\n",
3245 					prot_group_type);
3246 			return 1;
3247 		}
3248 	}
3249 
3250 	switch (scsi_get_prot_op(scsi_cmnd)) {
3251 	case SCSI_PROT_WRITE_STRIP:
3252 	case SCSI_PROT_READ_STRIP:
3253 		lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP;
3254 		break;
3255 	case SCSI_PROT_WRITE_INSERT:
3256 	case SCSI_PROT_READ_INSERT:
3257 		lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT;
3258 		break;
3259 	case SCSI_PROT_WRITE_PASS:
3260 	case SCSI_PROT_READ_PASS:
3261 		lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS;
3262 		break;
3263 	}
3264 
3265 	fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
3266 
3267 	fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
3268 
3269 	/*
3270 	 * Due to difference in data length between DIF/non-DIF paths,
3271 	 * we need to set word 4 of IOCB here
3272 	 */
3273 	iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
3274 
3275 	return 0;
3276 err:
3277 	lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3278 			"9084 Could not setup all needed BDE's"
3279 			"prot_group_type=%d, num_bde=%d\n",
3280 			prot_group_type, num_bde);
3281 	return 1;
3282 }
3283 
3284 /**
3285  * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3286  * @phba: The Hba for which this call is being executed.
3287  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3288  *
3289  * This routine wraps the actual DMA mapping function pointer from the
3290  * lpfc_hba struct.
3291  *
3292  * Return codes:
3293  *	1 - Error
3294  *	0 - Success
3295  **/
3296 static inline int
3297 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3298 {
3299 	return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
3300 }
3301 
3302 /**
3303  * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3304  * using BlockGuard.
3305  * @phba: The Hba for which this call is being executed.
3306  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3307  *
3308  * This routine wraps the actual DMA mapping function pointer from the
3309  * lpfc_hba struct.
3310  *
3311  * Return codes:
3312  *	1 - Error
3313  *	0 - Success
3314  **/
3315 static inline int
3316 lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3317 {
3318 	return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
3319 }
3320 
3321 /**
3322  * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
3323  * @phba: Pointer to hba context object.
3324  * @vport: Pointer to vport object.
3325  * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
3326  * @rsp_iocb: Pointer to response iocb object which reported error.
3327  *
3328  * This function posts an event when there is a SCSI command reporting
3329  * error from the scsi device.
3330  **/
3331 static void
3332 lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
3333 		struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
3334 	struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3335 	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3336 	uint32_t resp_info = fcprsp->rspStatus2;
3337 	uint32_t scsi_status = fcprsp->rspStatus3;
3338 	uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
3339 	struct lpfc_fast_path_event *fast_path_evt = NULL;
3340 	struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
3341 	unsigned long flags;
3342 
3343 	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3344 		return;
3345 
3346 	/* If there is queuefull or busy condition send a scsi event */
3347 	if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
3348 		(cmnd->result == SAM_STAT_BUSY)) {
3349 		fast_path_evt = lpfc_alloc_fast_evt(phba);
3350 		if (!fast_path_evt)
3351 			return;
3352 		fast_path_evt->un.scsi_evt.event_type =
3353 			FC_REG_SCSI_EVENT;
3354 		fast_path_evt->un.scsi_evt.subcategory =
3355 		(cmnd->result == SAM_STAT_TASK_SET_FULL) ?
3356 		LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
3357 		fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
3358 		memcpy(&fast_path_evt->un.scsi_evt.wwpn,
3359 			&pnode->nlp_portname, sizeof(struct lpfc_name));
3360 		memcpy(&fast_path_evt->un.scsi_evt.wwnn,
3361 			&pnode->nlp_nodename, sizeof(struct lpfc_name));
3362 	} else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
3363 		((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
3364 		fast_path_evt = lpfc_alloc_fast_evt(phba);
3365 		if (!fast_path_evt)
3366 			return;
3367 		fast_path_evt->un.check_cond_evt.scsi_event.event_type =
3368 			FC_REG_SCSI_EVENT;
3369 		fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
3370 			LPFC_EVENT_CHECK_COND;
3371 		fast_path_evt->un.check_cond_evt.scsi_event.lun =
3372 			cmnd->device->lun;
3373 		memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
3374 			&pnode->nlp_portname, sizeof(struct lpfc_name));
3375 		memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
3376 			&pnode->nlp_nodename, sizeof(struct lpfc_name));
3377 		fast_path_evt->un.check_cond_evt.sense_key =
3378 			cmnd->sense_buffer[2] & 0xf;
3379 		fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
3380 		fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
3381 	} else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
3382 		     fcpi_parm &&
3383 		     ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
3384 			((scsi_status == SAM_STAT_GOOD) &&
3385 			!(resp_info & (RESID_UNDER | RESID_OVER))))) {
3386 		/*
3387 		 * If status is good or resid does not match with fcp_param and
3388 		 * there is valid fcpi_parm, then there is a read_check error
3389 		 */
3390 		fast_path_evt = lpfc_alloc_fast_evt(phba);
3391 		if (!fast_path_evt)
3392 			return;
3393 		fast_path_evt->un.read_check_error.header.event_type =
3394 			FC_REG_FABRIC_EVENT;
3395 		fast_path_evt->un.read_check_error.header.subcategory =
3396 			LPFC_EVENT_FCPRDCHKERR;
3397 		memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
3398 			&pnode->nlp_portname, sizeof(struct lpfc_name));
3399 		memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
3400 			&pnode->nlp_nodename, sizeof(struct lpfc_name));
3401 		fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
3402 		fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
3403 		fast_path_evt->un.read_check_error.fcpiparam =
3404 			fcpi_parm;
3405 	} else
3406 		return;
3407 
3408 	fast_path_evt->vport = vport;
3409 	spin_lock_irqsave(&phba->hbalock, flags);
3410 	list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
3411 	spin_unlock_irqrestore(&phba->hbalock, flags);
3412 	lpfc_worker_wake_up(phba);
3413 	return;
3414 }
3415 
3416 /**
3417  * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
3418  * @phba: The HBA for which this call is being executed.
3419  * @psb: The scsi buffer which is going to be un-mapped.
3420  *
3421  * This routine does DMA un-mapping of scatter gather list of scsi command
3422  * field of @lpfc_cmd for device with SLI-3 interface spec.
3423  **/
3424 static void
3425 lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
3426 {
3427 	/*
3428 	 * There are only two special cases to consider.  (1) the scsi command
3429 	 * requested scatter-gather usage or (2) the scsi command allocated
3430 	 * a request buffer, but did not request use_sg.  There is a third
3431 	 * case, but it does not require resource deallocation.
3432 	 */
3433 	if (psb->seg_cnt > 0)
3434 		scsi_dma_unmap(psb->pCmd);
3435 	if (psb->prot_seg_cnt > 0)
3436 		dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
3437 				scsi_prot_sg_count(psb->pCmd),
3438 				psb->pCmd->sc_data_direction);
3439 }
3440 
3441 /**
3442  * lpfc_handler_fcp_err - FCP response handler
3443  * @vport: The virtual port for which this call is being executed.
3444  * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
3445  * @rsp_iocb: The response IOCB which contains FCP error.
3446  *
3447  * This routine is called to process response IOCB with status field
3448  * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
3449  * based upon SCSI and FCP error.
3450  **/
3451 static void
3452 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
3453 		    struct lpfc_iocbq *rsp_iocb)
3454 {
3455 	struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3456 	struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
3457 	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3458 	uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
3459 	uint32_t resp_info = fcprsp->rspStatus2;
3460 	uint32_t scsi_status = fcprsp->rspStatus3;
3461 	uint32_t *lp;
3462 	uint32_t host_status = DID_OK;
3463 	uint32_t rsplen = 0;
3464 	uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
3465 
3466 
3467 	/*
3468 	 *  If this is a task management command, there is no
3469 	 *  scsi packet associated with this lpfc_cmd.  The driver
3470 	 *  consumes it.
3471 	 */
3472 	if (fcpcmd->fcpCntl2) {
3473 		scsi_status = 0;
3474 		goto out;
3475 	}
3476 
3477 	if (resp_info & RSP_LEN_VALID) {
3478 		rsplen = be32_to_cpu(fcprsp->rspRspLen);
3479 		if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
3480 			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3481 				 "2719 Invalid response length: "
3482 				 "tgt x%x lun x%x cmnd x%x rsplen x%x\n",
3483 				 cmnd->device->id,
3484 				 cmnd->device->lun, cmnd->cmnd[0],
3485 				 rsplen);
3486 			host_status = DID_ERROR;
3487 			goto out;
3488 		}
3489 		if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
3490 			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3491 				 "2757 Protocol failure detected during "
3492 				 "processing of FCP I/O op: "
3493 				 "tgt x%x lun x%x cmnd x%x rspInfo3 x%x\n",
3494 				 cmnd->device->id,
3495 				 cmnd->device->lun, cmnd->cmnd[0],
3496 				 fcprsp->rspInfo3);
3497 			host_status = DID_ERROR;
3498 			goto out;
3499 		}
3500 	}
3501 
3502 	if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
3503 		uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
3504 		if (snslen > SCSI_SENSE_BUFFERSIZE)
3505 			snslen = SCSI_SENSE_BUFFERSIZE;
3506 
3507 		if (resp_info & RSP_LEN_VALID)
3508 		  rsplen = be32_to_cpu(fcprsp->rspRspLen);
3509 		memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
3510 	}
3511 	lp = (uint32_t *)cmnd->sense_buffer;
3512 
3513 	/* special handling for under run conditions */
3514 	if (!scsi_status && (resp_info & RESID_UNDER)) {
3515 		/* don't log under runs if fcp set... */
3516 		if (vport->cfg_log_verbose & LOG_FCP)
3517 			logit = LOG_FCP_ERROR;
3518 		/* unless operator says so */
3519 		if (vport->cfg_log_verbose & LOG_FCP_UNDER)
3520 			logit = LOG_FCP_UNDER;
3521 	}
3522 
3523 	lpfc_printf_vlog(vport, KERN_WARNING, logit,
3524 			 "9024 FCP command x%x failed: x%x SNS x%x x%x "
3525 			 "Data: x%x x%x x%x x%x x%x\n",
3526 			 cmnd->cmnd[0], scsi_status,
3527 			 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
3528 			 be32_to_cpu(fcprsp->rspResId),
3529 			 be32_to_cpu(fcprsp->rspSnsLen),
3530 			 be32_to_cpu(fcprsp->rspRspLen),
3531 			 fcprsp->rspInfo3);
3532 
3533 	scsi_set_resid(cmnd, 0);
3534 	if (resp_info & RESID_UNDER) {
3535 		scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
3536 
3537 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER,
3538 				 "9025 FCP Read Underrun, expected %d, "
3539 				 "residual %d Data: x%x x%x x%x\n",
3540 				 be32_to_cpu(fcpcmd->fcpDl),
3541 				 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
3542 				 cmnd->underflow);
3543 
3544 		/*
3545 		 * If there is an under run check if under run reported by
3546 		 * storage array is same as the under run reported by HBA.
3547 		 * If this is not same, there is a dropped frame.
3548 		 */
3549 		if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
3550 			fcpi_parm &&
3551 			(scsi_get_resid(cmnd) != fcpi_parm)) {
3552 			lpfc_printf_vlog(vport, KERN_WARNING,
3553 					 LOG_FCP | LOG_FCP_ERROR,
3554 					 "9026 FCP Read Check Error "
3555 					 "and Underrun Data: x%x x%x x%x x%x\n",
3556 					 be32_to_cpu(fcpcmd->fcpDl),
3557 					 scsi_get_resid(cmnd), fcpi_parm,
3558 					 cmnd->cmnd[0]);
3559 			scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3560 			host_status = DID_ERROR;
3561 		}
3562 		/*
3563 		 * The cmnd->underflow is the minimum number of bytes that must
3564 		 * be transferred for this command.  Provided a sense condition
3565 		 * is not present, make sure the actual amount transferred is at
3566 		 * least the underflow value or fail.
3567 		 */
3568 		if (!(resp_info & SNS_LEN_VALID) &&
3569 		    (scsi_status == SAM_STAT_GOOD) &&
3570 		    (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
3571 		     < cmnd->underflow)) {
3572 			lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3573 					 "9027 FCP command x%x residual "
3574 					 "underrun converted to error "
3575 					 "Data: x%x x%x x%x\n",
3576 					 cmnd->cmnd[0], scsi_bufflen(cmnd),
3577 					 scsi_get_resid(cmnd), cmnd->underflow);
3578 			host_status = DID_ERROR;
3579 		}
3580 	} else if (resp_info & RESID_OVER) {
3581 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3582 				 "9028 FCP command x%x residual overrun error. "
3583 				 "Data: x%x x%x\n", cmnd->cmnd[0],
3584 				 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
3585 		host_status = DID_ERROR;
3586 
3587 	/*
3588 	 * Check SLI validation that all the transfer was actually done
3589 	 * (fcpi_parm should be zero).
3590 	 */
3591 	} else if (fcpi_parm) {
3592 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
3593 				 "9029 FCP Data Transfer Check Error: "
3594 				 "x%x x%x x%x x%x x%x\n",
3595 				 be32_to_cpu(fcpcmd->fcpDl),
3596 				 be32_to_cpu(fcprsp->rspResId),
3597 				 fcpi_parm, cmnd->cmnd[0], scsi_status);
3598 		switch (scsi_status) {
3599 		case SAM_STAT_GOOD:
3600 		case SAM_STAT_CHECK_CONDITION:
3601 			/* Fabric dropped a data frame. Fail any successful
3602 			 * command in which we detected dropped frames.
3603 			 * A status of good or some check conditions could
3604 			 * be considered a successful command.
3605 			 */
3606 			host_status = DID_ERROR;
3607 			break;
3608 		}
3609 		scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3610 	}
3611 
3612  out:
3613 	cmnd->result = ScsiResult(host_status, scsi_status);
3614 	lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
3615 }
3616 
3617 /**
3618  * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
3619  * @phba: The Hba for which this call is being executed.
3620  * @pIocbIn: The command IOCBQ for the scsi cmnd.
3621  * @pIocbOut: The response IOCBQ for the scsi cmnd.
3622  *
3623  * This routine assigns scsi command result by looking into response IOCB
3624  * status field appropriately. This routine handles QUEUE FULL condition as
3625  * well by ramping down device queue depth.
3626  **/
3627 static void
3628 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
3629 			struct lpfc_iocbq *pIocbOut)
3630 {
3631 	struct lpfc_scsi_buf *lpfc_cmd =
3632 		(struct lpfc_scsi_buf *) pIocbIn->context1;
3633 	struct lpfc_vport      *vport = pIocbIn->vport;
3634 	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
3635 	struct lpfc_nodelist *pnode = rdata->pnode;
3636 	struct scsi_cmnd *cmd;
3637 	int result;
3638 	struct scsi_device *tmp_sdev;
3639 	int depth;
3640 	unsigned long flags;
3641 	struct lpfc_fast_path_event *fast_path_evt;
3642 	struct Scsi_Host *shost;
3643 	uint32_t queue_depth, scsi_id;
3644 	uint32_t logit = LOG_FCP;
3645 
3646 	/* Sanity check on return of outstanding command */
3647 	if (!(lpfc_cmd->pCmd))
3648 		return;
3649 	cmd = lpfc_cmd->pCmd;
3650 	shost = cmd->device->host;
3651 
3652 	lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
3653 	lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
3654 	/* pick up SLI4 exhange busy status from HBA */
3655 	lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
3656 
3657 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
3658 	if (lpfc_cmd->prot_data_type) {
3659 		struct scsi_dif_tuple *src = NULL;
3660 
3661 		src =  (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
3662 		/*
3663 		 * Used to restore any changes to protection
3664 		 * data for error injection.
3665 		 */
3666 		switch (lpfc_cmd->prot_data_type) {
3667 		case LPFC_INJERR_REFTAG:
3668 			src->ref_tag =
3669 				lpfc_cmd->prot_data;
3670 			break;
3671 		case LPFC_INJERR_APPTAG:
3672 			src->app_tag =
3673 				(uint16_t)lpfc_cmd->prot_data;
3674 			break;
3675 		case LPFC_INJERR_GUARD:
3676 			src->guard_tag =
3677 				(uint16_t)lpfc_cmd->prot_data;
3678 			break;
3679 		default:
3680 			break;
3681 		}
3682 
3683 		lpfc_cmd->prot_data = 0;
3684 		lpfc_cmd->prot_data_type = 0;
3685 		lpfc_cmd->prot_data_segment = NULL;
3686 	}
3687 #endif
3688 	if (pnode && NLP_CHK_NODE_ACT(pnode))
3689 		atomic_dec(&pnode->cmd_pending);
3690 
3691 	if (lpfc_cmd->status) {
3692 		if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
3693 		    (lpfc_cmd->result & IOERR_DRVR_MASK))
3694 			lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
3695 		else if (lpfc_cmd->status >= IOSTAT_CNT)
3696 			lpfc_cmd->status = IOSTAT_DEFAULT;
3697 		if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
3698 		    !lpfc_cmd->fcp_rsp->rspStatus3 &&
3699 		    (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
3700 		    !(vport->cfg_log_verbose & LOG_FCP_UNDER))
3701 			logit = 0;
3702 		else
3703 			logit = LOG_FCP | LOG_FCP_UNDER;
3704 		lpfc_printf_vlog(vport, KERN_WARNING, logit,
3705 			 "9030 FCP cmd x%x failed <%d/%d> "
3706 			 "status: x%x result: x%x "
3707 			 "sid: x%x did: x%x oxid: x%x "
3708 			 "Data: x%x x%x\n",
3709 			 cmd->cmnd[0],
3710 			 cmd->device ? cmd->device->id : 0xffff,
3711 			 cmd->device ? cmd->device->lun : 0xffff,
3712 			 lpfc_cmd->status, lpfc_cmd->result,
3713 			 vport->fc_myDID, pnode->nlp_DID,
3714 			 phba->sli_rev == LPFC_SLI_REV4 ?
3715 			     lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
3716 			 pIocbOut->iocb.ulpContext,
3717 			 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
3718 
3719 		switch (lpfc_cmd->status) {
3720 		case IOSTAT_FCP_RSP_ERROR:
3721 			/* Call FCP RSP handler to determine result */
3722 			lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
3723 			break;
3724 		case IOSTAT_NPORT_BSY:
3725 		case IOSTAT_FABRIC_BSY:
3726 			cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
3727 			fast_path_evt = lpfc_alloc_fast_evt(phba);
3728 			if (!fast_path_evt)
3729 				break;
3730 			fast_path_evt->un.fabric_evt.event_type =
3731 				FC_REG_FABRIC_EVENT;
3732 			fast_path_evt->un.fabric_evt.subcategory =
3733 				(lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
3734 				LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
3735 			if (pnode && NLP_CHK_NODE_ACT(pnode)) {
3736 				memcpy(&fast_path_evt->un.fabric_evt.wwpn,
3737 					&pnode->nlp_portname,
3738 					sizeof(struct lpfc_name));
3739 				memcpy(&fast_path_evt->un.fabric_evt.wwnn,
3740 					&pnode->nlp_nodename,
3741 					sizeof(struct lpfc_name));
3742 			}
3743 			fast_path_evt->vport = vport;
3744 			fast_path_evt->work_evt.evt =
3745 				LPFC_EVT_FASTPATH_MGMT_EVT;
3746 			spin_lock_irqsave(&phba->hbalock, flags);
3747 			list_add_tail(&fast_path_evt->work_evt.evt_listp,
3748 				&phba->work_list);
3749 			spin_unlock_irqrestore(&phba->hbalock, flags);
3750 			lpfc_worker_wake_up(phba);
3751 			break;
3752 		case IOSTAT_LOCAL_REJECT:
3753 		case IOSTAT_REMOTE_STOP:
3754 			if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
3755 			    lpfc_cmd->result ==
3756 					IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
3757 			    lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
3758 			    lpfc_cmd->result ==
3759 					IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
3760 				cmd->result = ScsiResult(DID_NO_CONNECT, 0);
3761 				break;
3762 			}
3763 			if (lpfc_cmd->result == IOERR_INVALID_RPI ||
3764 			    lpfc_cmd->result == IOERR_NO_RESOURCES ||
3765 			    lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
3766 			    lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
3767 				cmd->result = ScsiResult(DID_REQUEUE, 0);
3768 				break;
3769 			}
3770 			if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
3771 			     lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
3772 			     pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
3773 				if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
3774 					/*
3775 					 * This is a response for a BG enabled
3776 					 * cmd. Parse BG error
3777 					 */
3778 					lpfc_parse_bg_err(phba, lpfc_cmd,
3779 							pIocbOut);
3780 					break;
3781 				} else {
3782 					lpfc_printf_vlog(vport, KERN_WARNING,
3783 							LOG_BG,
3784 							"9031 non-zero BGSTAT "
3785 							"on unprotected cmd\n");
3786 				}
3787 			}
3788 			if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
3789 				&& (phba->sli_rev == LPFC_SLI_REV4)
3790 				&& (pnode && NLP_CHK_NODE_ACT(pnode))) {
3791 				/* This IO was aborted by the target, we don't
3792 				 * know the rxid and because we did not send the
3793 				 * ABTS we cannot generate and RRQ.
3794 				 */
3795 				lpfc_set_rrq_active(phba, pnode,
3796 					lpfc_cmd->cur_iocbq.sli4_lxritag,
3797 					0, 0);
3798 			}
3799 		/* else: fall through */
3800 		default:
3801 			cmd->result = ScsiResult(DID_ERROR, 0);
3802 			break;
3803 		}
3804 
3805 		if (!pnode || !NLP_CHK_NODE_ACT(pnode)
3806 		    || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
3807 			cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
3808 						 SAM_STAT_BUSY);
3809 	} else
3810 		cmd->result = ScsiResult(DID_OK, 0);
3811 
3812 	if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
3813 		uint32_t *lp = (uint32_t *)cmd->sense_buffer;
3814 
3815 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3816 				 "0710 Iodone <%d/%d> cmd %p, error "
3817 				 "x%x SNS x%x x%x Data: x%x x%x\n",
3818 				 cmd->device->id, cmd->device->lun, cmd,
3819 				 cmd->result, *lp, *(lp + 3), cmd->retries,
3820 				 scsi_get_resid(cmd));
3821 	}
3822 
3823 	lpfc_update_stats(phba, lpfc_cmd);
3824 	result = cmd->result;
3825 	if (vport->cfg_max_scsicmpl_time &&
3826 	   time_after(jiffies, lpfc_cmd->start_time +
3827 		msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
3828 		spin_lock_irqsave(shost->host_lock, flags);
3829 		if (pnode && NLP_CHK_NODE_ACT(pnode)) {
3830 			if (pnode->cmd_qdepth >
3831 				atomic_read(&pnode->cmd_pending) &&
3832 				(atomic_read(&pnode->cmd_pending) >
3833 				LPFC_MIN_TGT_QDEPTH) &&
3834 				((cmd->cmnd[0] == READ_10) ||
3835 				(cmd->cmnd[0] == WRITE_10)))
3836 				pnode->cmd_qdepth =
3837 					atomic_read(&pnode->cmd_pending);
3838 
3839 			pnode->last_change_time = jiffies;
3840 		}
3841 		spin_unlock_irqrestore(shost->host_lock, flags);
3842 	} else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
3843 		if ((pnode->cmd_qdepth < vport->cfg_tgt_queue_depth) &&
3844 		   time_after(jiffies, pnode->last_change_time +
3845 			      msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
3846 			spin_lock_irqsave(shost->host_lock, flags);
3847 			depth = pnode->cmd_qdepth * LPFC_TGTQ_RAMPUP_PCENT
3848 				/ 100;
3849 			depth = depth ? depth : 1;
3850 			pnode->cmd_qdepth += depth;
3851 			if (pnode->cmd_qdepth > vport->cfg_tgt_queue_depth)
3852 				pnode->cmd_qdepth = vport->cfg_tgt_queue_depth;
3853 			pnode->last_change_time = jiffies;
3854 			spin_unlock_irqrestore(shost->host_lock, flags);
3855 		}
3856 	}
3857 
3858 	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
3859 
3860 	/* The sdev is not guaranteed to be valid post scsi_done upcall. */
3861 	queue_depth = cmd->device->queue_depth;
3862 	scsi_id = cmd->device->id;
3863 	cmd->scsi_done(cmd);
3864 
3865 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
3866 		spin_lock_irqsave(&phba->hbalock, flags);
3867 		lpfc_cmd->pCmd = NULL;
3868 		spin_unlock_irqrestore(&phba->hbalock, flags);
3869 
3870 		/*
3871 		 * If there is a thread waiting for command completion
3872 		 * wake up the thread.
3873 		 */
3874 		spin_lock_irqsave(shost->host_lock, flags);
3875 		if (lpfc_cmd->waitq)
3876 			wake_up(lpfc_cmd->waitq);
3877 		spin_unlock_irqrestore(shost->host_lock, flags);
3878 		lpfc_release_scsi_buf(phba, lpfc_cmd);
3879 		return;
3880 	}
3881 
3882 	if (!result)
3883 		lpfc_rampup_queue_depth(vport, queue_depth);
3884 
3885 	/*
3886 	 * Check for queue full.  If the lun is reporting queue full, then
3887 	 * back off the lun queue depth to prevent target overloads.
3888 	 */
3889 	if (result == SAM_STAT_TASK_SET_FULL && pnode &&
3890 	    NLP_CHK_NODE_ACT(pnode)) {
3891 		shost_for_each_device(tmp_sdev, shost) {
3892 			if (tmp_sdev->id != scsi_id)
3893 				continue;
3894 			depth = scsi_track_queue_full(tmp_sdev,
3895 						      tmp_sdev->queue_depth-1);
3896 			if (depth <= 0)
3897 				continue;
3898 			lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3899 					 "0711 detected queue full - lun queue "
3900 					 "depth adjusted to %d.\n", depth);
3901 			lpfc_send_sdev_queuedepth_change_event(phba, vport,
3902 							       pnode,
3903 							       tmp_sdev->lun,
3904 							       depth+1, depth);
3905 		}
3906 	}
3907 
3908 	spin_lock_irqsave(&phba->hbalock, flags);
3909 	lpfc_cmd->pCmd = NULL;
3910 	spin_unlock_irqrestore(&phba->hbalock, flags);
3911 
3912 	/*
3913 	 * If there is a thread waiting for command completion
3914 	 * wake up the thread.
3915 	 */
3916 	spin_lock_irqsave(shost->host_lock, flags);
3917 	if (lpfc_cmd->waitq)
3918 		wake_up(lpfc_cmd->waitq);
3919 	spin_unlock_irqrestore(shost->host_lock, flags);
3920 
3921 	lpfc_release_scsi_buf(phba, lpfc_cmd);
3922 }
3923 
3924 /**
3925  * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
3926  * @data: A pointer to the immediate command data portion of the IOCB.
3927  * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
3928  *
3929  * The routine copies the entire FCP command from @fcp_cmnd to @data while
3930  * byte swapping the data to big endian format for transmission on the wire.
3931  **/
3932 static void
3933 lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
3934 {
3935 	int i, j;
3936 	for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
3937 	     i += sizeof(uint32_t), j++) {
3938 		((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
3939 	}
3940 }
3941 
3942 /**
3943  * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
3944  * @vport: The virtual port for which this call is being executed.
3945  * @lpfc_cmd: The scsi command which needs to send.
3946  * @pnode: Pointer to lpfc_nodelist.
3947  *
3948  * This routine initializes fcp_cmnd and iocb data structure from scsi command
3949  * to transfer for device with SLI3 interface spec.
3950  **/
3951 static void
3952 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
3953 		    struct lpfc_nodelist *pnode)
3954 {
3955 	struct lpfc_hba *phba = vport->phba;
3956 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3957 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3958 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3959 	struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
3960 	int datadir = scsi_cmnd->sc_data_direction;
3961 	char tag[2];
3962 	uint8_t *ptr;
3963 	bool sli4;
3964 
3965 	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3966 		return;
3967 
3968 	lpfc_cmd->fcp_rsp->rspSnsLen = 0;
3969 	/* clear task management bits */
3970 	lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
3971 
3972 	int_to_scsilun(lpfc_cmd->pCmd->device->lun,
3973 			&lpfc_cmd->fcp_cmnd->fcp_lun);
3974 
3975 	ptr = &fcp_cmnd->fcpCdb[0];
3976 	memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
3977 	if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
3978 		ptr += scsi_cmnd->cmd_len;
3979 		memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
3980 	}
3981 
3982 	if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
3983 		switch (tag[0]) {
3984 		case HEAD_OF_QUEUE_TAG:
3985 			fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
3986 			break;
3987 		case ORDERED_QUEUE_TAG:
3988 			fcp_cmnd->fcpCntl1 = ORDERED_Q;
3989 			break;
3990 		default:
3991 			fcp_cmnd->fcpCntl1 = SIMPLE_Q;
3992 			break;
3993 		}
3994 	} else
3995 		fcp_cmnd->fcpCntl1 = SIMPLE_Q;
3996 
3997 	sli4 = (phba->sli_rev == LPFC_SLI_REV4);
3998 
3999 	/*
4000 	 * There are three possibilities here - use scatter-gather segment, use
4001 	 * the single mapping, or neither.  Start the lpfc command prep by
4002 	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
4003 	 * data bde entry.
4004 	 */
4005 	if (scsi_sg_count(scsi_cmnd)) {
4006 		if (datadir == DMA_TO_DEVICE) {
4007 			iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
4008 			if (sli4)
4009 				iocb_cmd->ulpPU = PARM_READ_CHECK;
4010 			else {
4011 				iocb_cmd->un.fcpi.fcpi_parm = 0;
4012 				iocb_cmd->ulpPU = 0;
4013 			}
4014 			fcp_cmnd->fcpCntl3 = WRITE_DATA;
4015 			phba->fc4OutputRequests++;
4016 		} else {
4017 			iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
4018 			iocb_cmd->ulpPU = PARM_READ_CHECK;
4019 			fcp_cmnd->fcpCntl3 = READ_DATA;
4020 			phba->fc4InputRequests++;
4021 		}
4022 	} else {
4023 		iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
4024 		iocb_cmd->un.fcpi.fcpi_parm = 0;
4025 		iocb_cmd->ulpPU = 0;
4026 		fcp_cmnd->fcpCntl3 = 0;
4027 		phba->fc4ControlRequests++;
4028 	}
4029 	if (phba->sli_rev == 3 &&
4030 	    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4031 		lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
4032 	/*
4033 	 * Finish initializing those IOCB fields that are independent
4034 	 * of the scsi_cmnd request_buffer
4035 	 */
4036 	piocbq->iocb.ulpContext = pnode->nlp_rpi;
4037 	if (sli4)
4038 		piocbq->iocb.ulpContext =
4039 		  phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
4040 	if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
4041 		piocbq->iocb.ulpFCP2Rcvy = 1;
4042 	else
4043 		piocbq->iocb.ulpFCP2Rcvy = 0;
4044 
4045 	piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
4046 	piocbq->context1  = lpfc_cmd;
4047 	piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4048 	piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
4049 	piocbq->vport = vport;
4050 }
4051 
4052 /**
4053  * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
4054  * @vport: The virtual port for which this call is being executed.
4055  * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
4056  * @lun: Logical unit number.
4057  * @task_mgmt_cmd: SCSI task management command.
4058  *
4059  * This routine creates FCP information unit corresponding to @task_mgmt_cmd
4060  * for device with SLI-3 interface spec.
4061  *
4062  * Return codes:
4063  *   0 - Error
4064  *   1 - Success
4065  **/
4066 static int
4067 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
4068 			     struct lpfc_scsi_buf *lpfc_cmd,
4069 			     unsigned int lun,
4070 			     uint8_t task_mgmt_cmd)
4071 {
4072 	struct lpfc_iocbq *piocbq;
4073 	IOCB_t *piocb;
4074 	struct fcp_cmnd *fcp_cmnd;
4075 	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4076 	struct lpfc_nodelist *ndlp = rdata->pnode;
4077 
4078 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
4079 	    ndlp->nlp_state != NLP_STE_MAPPED_NODE)
4080 		return 0;
4081 
4082 	piocbq = &(lpfc_cmd->cur_iocbq);
4083 	piocbq->vport = vport;
4084 
4085 	piocb = &piocbq->iocb;
4086 
4087 	fcp_cmnd = lpfc_cmd->fcp_cmnd;
4088 	/* Clear out any old data in the FCP command area */
4089 	memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
4090 	int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
4091 	fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
4092 	if (vport->phba->sli_rev == 3 &&
4093 	    !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4094 		lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
4095 	piocb->ulpCommand = CMD_FCP_ICMND64_CR;
4096 	piocb->ulpContext = ndlp->nlp_rpi;
4097 	if (vport->phba->sli_rev == LPFC_SLI_REV4) {
4098 		piocb->ulpContext =
4099 		  vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
4100 	}
4101 	if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
4102 		piocb->ulpFCP2Rcvy = 1;
4103 	}
4104 	piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
4105 
4106 	/* ulpTimeout is only one byte */
4107 	if (lpfc_cmd->timeout > 0xff) {
4108 		/*
4109 		 * Do not timeout the command at the firmware level.
4110 		 * The driver will provide the timeout mechanism.
4111 		 */
4112 		piocb->ulpTimeout = 0;
4113 	} else
4114 		piocb->ulpTimeout = lpfc_cmd->timeout;
4115 
4116 	if (vport->phba->sli_rev == LPFC_SLI_REV4)
4117 		lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
4118 
4119 	return 1;
4120 }
4121 
4122 /**
4123  * lpfc_scsi_api_table_setup - Set up scsi api function jump table
4124  * @phba: The hba struct for which this call is being executed.
4125  * @dev_grp: The HBA PCI-Device group number.
4126  *
4127  * This routine sets up the SCSI interface API function jump table in @phba
4128  * struct.
4129  * Returns: 0 - success, -ENODEV - failure.
4130  **/
4131 int
4132 lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4133 {
4134 
4135 	phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
4136 	phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;
4137 
4138 	switch (dev_grp) {
4139 	case LPFC_PCI_DEV_LP:
4140 		phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
4141 		phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
4142 		phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
4143 		phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
4144 		phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
4145 		break;
4146 	case LPFC_PCI_DEV_OC:
4147 		phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
4148 		phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
4149 		phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
4150 		phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
4151 		phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
4152 		break;
4153 	default:
4154 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4155 				"1418 Invalid HBA PCI-device group: 0x%x\n",
4156 				dev_grp);
4157 		return -ENODEV;
4158 		break;
4159 	}
4160 	phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
4161 	phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4162 	return 0;
4163 }
4164 
4165 /**
4166  * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
4167  * @phba: The Hba for which this call is being executed.
4168  * @cmdiocbq: Pointer to lpfc_iocbq data structure.
4169  * @rspiocbq: Pointer to lpfc_iocbq data structure.
4170  *
4171  * This routine is IOCB completion routine for device reset and target reset
4172  * routine. This routine release scsi buffer associated with lpfc_cmd.
4173  **/
4174 static void
4175 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
4176 			struct lpfc_iocbq *cmdiocbq,
4177 			struct lpfc_iocbq *rspiocbq)
4178 {
4179 	struct lpfc_scsi_buf *lpfc_cmd =
4180 		(struct lpfc_scsi_buf *) cmdiocbq->context1;
4181 	if (lpfc_cmd)
4182 		lpfc_release_scsi_buf(phba, lpfc_cmd);
4183 	return;
4184 }
4185 
4186 /**
4187  * lpfc_info - Info entry point of scsi_host_template data structure
4188  * @host: The scsi host for which this call is being executed.
4189  *
4190  * This routine provides module information about hba.
4191  *
4192  * Reutrn code:
4193  *   Pointer to char - Success.
4194  **/
4195 const char *
4196 lpfc_info(struct Scsi_Host *host)
4197 {
4198 	struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
4199 	struct lpfc_hba   *phba = vport->phba;
4200 	int len, link_speed = 0;
4201 	static char  lpfcinfobuf[384];
4202 
4203 	memset(lpfcinfobuf,0,384);
4204 	if (phba && phba->pcidev){
4205 		strncpy(lpfcinfobuf, phba->ModelDesc, 256);
4206 		len = strlen(lpfcinfobuf);
4207 		snprintf(lpfcinfobuf + len,
4208 			384-len,
4209 			" on PCI bus %02x device %02x irq %d",
4210 			phba->pcidev->bus->number,
4211 			phba->pcidev->devfn,
4212 			phba->pcidev->irq);
4213 		len = strlen(lpfcinfobuf);
4214 		if (phba->Port[0]) {
4215 			snprintf(lpfcinfobuf + len,
4216 				 384-len,
4217 				 " port %s",
4218 				 phba->Port);
4219 		}
4220 		len = strlen(lpfcinfobuf);
4221 		if (phba->sli_rev <= LPFC_SLI_REV3) {
4222 			link_speed = lpfc_sli_port_speed_get(phba);
4223 		} else {
4224 			if (phba->sli4_hba.link_state.logical_speed)
4225 				link_speed =
4226 				      phba->sli4_hba.link_state.logical_speed;
4227 			else
4228 				link_speed = phba->sli4_hba.link_state.speed;
4229 		}
4230 		if (link_speed != 0)
4231 			snprintf(lpfcinfobuf + len, 384-len,
4232 				 " Logical Link Speed: %d Mbps", link_speed);
4233 	}
4234 	return lpfcinfobuf;
4235 }
4236 
4237 /**
4238  * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba
4239  * @phba: The Hba for which this call is being executed.
4240  *
4241  * This routine modifies fcp_poll_timer  field of @phba by cfg_poll_tmo.
4242  * The default value of cfg_poll_tmo is 10 milliseconds.
4243  **/
4244 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
4245 {
4246 	unsigned long  poll_tmo_expires =
4247 		(jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
4248 
4249 	if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt)
4250 		mod_timer(&phba->fcp_poll_timer,
4251 			  poll_tmo_expires);
4252 }
4253 
4254 /**
4255  * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
4256  * @phba: The Hba for which this call is being executed.
4257  *
4258  * This routine starts the fcp_poll_timer of @phba.
4259  **/
4260 void lpfc_poll_start_timer(struct lpfc_hba * phba)
4261 {
4262 	lpfc_poll_rearm_timer(phba);
4263 }
4264 
4265 /**
4266  * lpfc_poll_timeout - Restart polling timer
4267  * @ptr: Map to lpfc_hba data structure pointer.
4268  *
4269  * This routine restarts fcp_poll timer, when FCP ring  polling is enable
4270  * and FCP Ring interrupt is disable.
4271  **/
4272 
4273 void lpfc_poll_timeout(unsigned long ptr)
4274 {
4275 	struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
4276 
4277 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4278 		lpfc_sli_handle_fast_ring_event(phba,
4279 			&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
4280 
4281 		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4282 			lpfc_poll_rearm_timer(phba);
4283 	}
4284 }
4285 
4286 /**
4287  * lpfc_queuecommand - scsi_host_template queuecommand entry point
4288  * @cmnd: Pointer to scsi_cmnd data structure.
4289  * @done: Pointer to done routine.
4290  *
4291  * Driver registers this routine to scsi midlayer to submit a @cmd to process.
4292  * This routine prepares an IOCB from scsi command and provides to firmware.
4293  * The @done callback is invoked after driver finished processing the command.
4294  *
4295  * Return value :
4296  *   0 - Success
4297  *   SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
4298  **/
4299 static int
4300 lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
4301 {
4302 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4303 	struct lpfc_hba   *phba = vport->phba;
4304 	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
4305 	struct lpfc_nodelist *ndlp;
4306 	struct lpfc_scsi_buf *lpfc_cmd;
4307 	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
4308 	int err;
4309 
4310 	err = fc_remote_port_chkready(rport);
4311 	if (err) {
4312 		cmnd->result = err;
4313 		goto out_fail_command;
4314 	}
4315 	ndlp = rdata->pnode;
4316 
4317 	if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
4318 		(!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
4319 
4320 		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4321 				"9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
4322 				" op:%02x str=%s without registering for"
4323 				" BlockGuard - Rejecting command\n",
4324 				cmnd->cmnd[0], scsi_get_prot_op(cmnd),
4325 				dif_op_str[scsi_get_prot_op(cmnd)]);
4326 		goto out_fail_command;
4327 	}
4328 
4329 	/*
4330 	 * Catch race where our node has transitioned, but the
4331 	 * transport is still transitioning.
4332 	 */
4333 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
4334 		goto out_tgt_busy;
4335 	if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
4336 		goto out_tgt_busy;
4337 
4338 	lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
4339 	if (lpfc_cmd == NULL) {
4340 		lpfc_rampdown_queue_depth(phba);
4341 
4342 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4343 				 "0707 driver's buffer pool is empty, "
4344 				 "IO busied\n");
4345 		goto out_host_busy;
4346 	}
4347 
4348 	/*
4349 	 * Store the midlayer's command structure for the completion phase
4350 	 * and complete the command initialization.
4351 	 */
4352 	lpfc_cmd->pCmd  = cmnd;
4353 	lpfc_cmd->rdata = rdata;
4354 	lpfc_cmd->timeout = 0;
4355 	lpfc_cmd->start_time = jiffies;
4356 	cmnd->host_scribble = (unsigned char *)lpfc_cmd;
4357 
4358 	if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
4359 		if (vport->phba->cfg_enable_bg) {
4360 			lpfc_printf_vlog(vport, KERN_INFO, LOG_BG,
4361 					 "9033 BLKGRD: rcvd %s cmd:x%x "
4362 					 "sector x%llx cnt %u pt %x\n",
4363 					 dif_op_str[scsi_get_prot_op(cmnd)],
4364 					 cmnd->cmnd[0],
4365 					 (unsigned long long)scsi_get_lba(cmnd),
4366 					 blk_rq_sectors(cmnd->request),
4367 					 (cmnd->cmnd[1]>>5));
4368 		}
4369 		err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
4370 	} else {
4371 		if (vport->phba->cfg_enable_bg) {
4372 			lpfc_printf_vlog(vport, KERN_INFO, LOG_BG,
4373 					 "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
4374 					 "x%x sector x%llx cnt %u pt %x\n",
4375 					 cmnd->cmnd[0],
4376 					 (unsigned long long)scsi_get_lba(cmnd),
4377 					 blk_rq_sectors(cmnd->request),
4378 					 (cmnd->cmnd[1]>>5));
4379 		}
4380 		err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
4381 	}
4382 
4383 	if (err)
4384 		goto out_host_busy_free_buf;
4385 
4386 	lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
4387 
4388 	atomic_inc(&ndlp->cmd_pending);
4389 	err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
4390 				  &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
4391 	if (err) {
4392 		atomic_dec(&ndlp->cmd_pending);
4393 		goto out_host_busy_free_buf;
4394 	}
4395 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4396 		lpfc_sli_handle_fast_ring_event(phba,
4397 			&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
4398 
4399 		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4400 			lpfc_poll_rearm_timer(phba);
4401 	}
4402 
4403 	return 0;
4404 
4405  out_host_busy_free_buf:
4406 	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4407 	lpfc_release_scsi_buf(phba, lpfc_cmd);
4408  out_host_busy:
4409 	return SCSI_MLQUEUE_HOST_BUSY;
4410 
4411  out_tgt_busy:
4412 	return SCSI_MLQUEUE_TARGET_BUSY;
4413 
4414  out_fail_command:
4415 	cmnd->scsi_done(cmnd);
4416 	return 0;
4417 }
4418 
4419 
4420 /**
4421  * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
4422  * @cmnd: Pointer to scsi_cmnd data structure.
4423  *
4424  * This routine aborts @cmnd pending in base driver.
4425  *
4426  * Return code :
4427  *   0x2003 - Error
4428  *   0x2002 - Success
4429  **/
4430 static int
4431 lpfc_abort_handler(struct scsi_cmnd *cmnd)
4432 {
4433 	struct Scsi_Host  *shost = cmnd->device->host;
4434 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4435 	struct lpfc_hba   *phba = vport->phba;
4436 	struct lpfc_iocbq *iocb;
4437 	struct lpfc_iocbq *abtsiocb;
4438 	struct lpfc_scsi_buf *lpfc_cmd;
4439 	IOCB_t *cmd, *icmd;
4440 	int ret = SUCCESS, status = 0;
4441 	unsigned long flags;
4442 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
4443 
4444 	status = fc_block_scsi_eh(cmnd);
4445 	if (status != 0 && status != SUCCESS)
4446 		return status;
4447 
4448 	spin_lock_irqsave(&phba->hbalock, flags);
4449 	/* driver queued commands are in process of being flushed */
4450 	if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
4451 		spin_unlock_irqrestore(&phba->hbalock, flags);
4452 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4453 			"3168 SCSI Layer abort requested I/O has been "
4454 			"flushed by LLD.\n");
4455 		return FAILED;
4456 	}
4457 
4458 	lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
4459 	if (!lpfc_cmd || !lpfc_cmd->pCmd) {
4460 		spin_unlock_irqrestore(&phba->hbalock, flags);
4461 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4462 			 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
4463 			 "x%x ID %d LUN %d\n",
4464 			 SUCCESS, cmnd->device->id, cmnd->device->lun);
4465 		return SUCCESS;
4466 	}
4467 
4468 	iocb = &lpfc_cmd->cur_iocbq;
4469 	/* the command is in process of being cancelled */
4470 	if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
4471 		spin_unlock_irqrestore(&phba->hbalock, flags);
4472 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4473 			"3169 SCSI Layer abort requested I/O has been "
4474 			"cancelled by LLD.\n");
4475 		return FAILED;
4476 	}
4477 	/*
4478 	 * If pCmd field of the corresponding lpfc_scsi_buf structure
4479 	 * points to a different SCSI command, then the driver has
4480 	 * already completed this command, but the midlayer did not
4481 	 * see the completion before the eh fired. Just return SUCCESS.
4482 	 */
4483 	if (lpfc_cmd->pCmd != cmnd) {
4484 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4485 			"3170 SCSI Layer abort requested I/O has been "
4486 			"completed by LLD.\n");
4487 		goto out_unlock;
4488 	}
4489 
4490 	BUG_ON(iocb->context1 != lpfc_cmd);
4491 
4492 	abtsiocb = __lpfc_sli_get_iocbq(phba);
4493 	if (abtsiocb == NULL) {
4494 		ret = FAILED;
4495 		goto out_unlock;
4496 	}
4497 
4498 	/*
4499 	 * The scsi command can not be in txq and it is in flight because the
4500 	 * pCmd is still pointig at the SCSI command we have to abort. There
4501 	 * is no need to search the txcmplq. Just send an abort to the FW.
4502 	 */
4503 
4504 	cmd = &iocb->iocb;
4505 	icmd = &abtsiocb->iocb;
4506 	icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
4507 	icmd->un.acxri.abortContextTag = cmd->ulpContext;
4508 	if (phba->sli_rev == LPFC_SLI_REV4)
4509 		icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
4510 	else
4511 		icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
4512 
4513 	icmd->ulpLe = 1;
4514 	icmd->ulpClass = cmd->ulpClass;
4515 
4516 	/* ABTS WQE must go to the same WQ as the WQE to be aborted */
4517 	abtsiocb->fcp_wqidx = iocb->fcp_wqidx;
4518 	abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
4519 
4520 	if (lpfc_is_link_up(phba))
4521 		icmd->ulpCommand = CMD_ABORT_XRI_CN;
4522 	else
4523 		icmd->ulpCommand = CMD_CLOSE_XRI_CN;
4524 
4525 	abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
4526 	abtsiocb->vport = vport;
4527 	/* no longer need the lock after this point */
4528 	spin_unlock_irqrestore(&phba->hbalock, flags);
4529 
4530 	if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) ==
4531 	    IOCB_ERROR) {
4532 		lpfc_sli_release_iocbq(phba, abtsiocb);
4533 		ret = FAILED;
4534 		goto out;
4535 	}
4536 
4537 	if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4538 		lpfc_sli_handle_fast_ring_event(phba,
4539 			&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
4540 
4541 	lpfc_cmd->waitq = &waitq;
4542 	/* Wait for abort to complete */
4543 	wait_event_timeout(waitq,
4544 			  (lpfc_cmd->pCmd != cmnd),
4545 			   (2*vport->cfg_devloss_tmo*HZ));
4546 	lpfc_cmd->waitq = NULL;
4547 
4548 	if (lpfc_cmd->pCmd == cmnd) {
4549 		ret = FAILED;
4550 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4551 				 "0748 abort handler timed out waiting "
4552 				 "for abortng I/O (xri:x%x) to complete: "
4553 				 "ret %#x, ID %d, LUN %d\n",
4554 				 iocb->sli4_xritag, ret,
4555 				 cmnd->device->id, cmnd->device->lun);
4556 	}
4557 	goto out;
4558 
4559 out_unlock:
4560 	spin_unlock_irqrestore(&phba->hbalock, flags);
4561 out:
4562 	lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4563 			 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
4564 			 "LUN %d\n", ret, cmnd->device->id,
4565 			 cmnd->device->lun);
4566 	return ret;
4567 }
4568 
4569 static char *
4570 lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
4571 {
4572 	switch (task_mgmt_cmd) {
4573 	case FCP_ABORT_TASK_SET:
4574 		return "ABORT_TASK_SET";
4575 	case FCP_CLEAR_TASK_SET:
4576 		return "FCP_CLEAR_TASK_SET";
4577 	case FCP_BUS_RESET:
4578 		return "FCP_BUS_RESET";
4579 	case FCP_LUN_RESET:
4580 		return "FCP_LUN_RESET";
4581 	case FCP_TARGET_RESET:
4582 		return "FCP_TARGET_RESET";
4583 	case FCP_CLEAR_ACA:
4584 		return "FCP_CLEAR_ACA";
4585 	case FCP_TERMINATE_TASK:
4586 		return "FCP_TERMINATE_TASK";
4587 	default:
4588 		return "unknown";
4589 	}
4590 }
4591 
4592 /**
4593  * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
4594  * @vport: The virtual port for which this call is being executed.
4595  * @rdata: Pointer to remote port local data
4596  * @tgt_id: Target ID of remote device.
4597  * @lun_id: Lun number for the TMF
4598  * @task_mgmt_cmd: type of TMF to send
4599  *
4600  * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
4601  * a remote port.
4602  *
4603  * Return Code:
4604  *   0x2003 - Error
4605  *   0x2002 - Success.
4606  **/
4607 static int
4608 lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
4609 		    unsigned  tgt_id, unsigned int lun_id,
4610 		    uint8_t task_mgmt_cmd)
4611 {
4612 	struct lpfc_hba   *phba = vport->phba;
4613 	struct lpfc_scsi_buf *lpfc_cmd;
4614 	struct lpfc_iocbq *iocbq;
4615 	struct lpfc_iocbq *iocbqrsp;
4616 	struct lpfc_nodelist *pnode = rdata->pnode;
4617 	int ret;
4618 	int status;
4619 
4620 	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
4621 		return FAILED;
4622 
4623 	lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode);
4624 	if (lpfc_cmd == NULL)
4625 		return FAILED;
4626 	lpfc_cmd->timeout = 60;
4627 	lpfc_cmd->rdata = rdata;
4628 
4629 	status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
4630 					   task_mgmt_cmd);
4631 	if (!status) {
4632 		lpfc_release_scsi_buf(phba, lpfc_cmd);
4633 		return FAILED;
4634 	}
4635 
4636 	iocbq = &lpfc_cmd->cur_iocbq;
4637 	iocbqrsp = lpfc_sli_get_iocbq(phba);
4638 	if (iocbqrsp == NULL) {
4639 		lpfc_release_scsi_buf(phba, lpfc_cmd);
4640 		return FAILED;
4641 	}
4642 
4643 	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4644 			 "0702 Issue %s to TGT %d LUN %d "
4645 			 "rpi x%x nlp_flag x%x Data: x%x x%x\n",
4646 			 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
4647 			 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
4648 			 iocbq->iocb_flag);
4649 
4650 	status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
4651 					  iocbq, iocbqrsp, lpfc_cmd->timeout);
4652 	if (status != IOCB_SUCCESS) {
4653 		if (status == IOCB_TIMEDOUT) {
4654 			iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
4655 			ret = TIMEOUT_ERROR;
4656 		} else
4657 			ret = FAILED;
4658 		lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4659 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4660 			 "0727 TMF %s to TGT %d LUN %d failed (%d, %d) "
4661 			 "iocb_flag x%x\n",
4662 			 lpfc_taskmgmt_name(task_mgmt_cmd),
4663 			 tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
4664 			 iocbqrsp->iocb.un.ulpWord[4],
4665 			 iocbq->iocb_flag);
4666 	} else if (status == IOCB_BUSY)
4667 		ret = FAILED;
4668 	else
4669 		ret = SUCCESS;
4670 
4671 	lpfc_sli_release_iocbq(phba, iocbqrsp);
4672 
4673 	if (ret != TIMEOUT_ERROR)
4674 		lpfc_release_scsi_buf(phba, lpfc_cmd);
4675 
4676 	return ret;
4677 }
4678 
4679 /**
4680  * lpfc_chk_tgt_mapped -
4681  * @vport: The virtual port to check on
4682  * @cmnd: Pointer to scsi_cmnd data structure.
4683  *
4684  * This routine delays until the scsi target (aka rport) for the
4685  * command exists (is present and logged in) or we declare it non-existent.
4686  *
4687  * Return code :
4688  *  0x2003 - Error
4689  *  0x2002 - Success
4690  **/
4691 static int
4692 lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
4693 {
4694 	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
4695 	struct lpfc_nodelist *pnode;
4696 	unsigned long later;
4697 
4698 	if (!rdata) {
4699 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4700 			"0797 Tgt Map rport failure: rdata x%p\n", rdata);
4701 		return FAILED;
4702 	}
4703 	pnode = rdata->pnode;
4704 	/*
4705 	 * If target is not in a MAPPED state, delay until
4706 	 * target is rediscovered or devloss timeout expires.
4707 	 */
4708 	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
4709 	while (time_after(later, jiffies)) {
4710 		if (!pnode || !NLP_CHK_NODE_ACT(pnode))
4711 			return FAILED;
4712 		if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
4713 			return SUCCESS;
4714 		schedule_timeout_uninterruptible(msecs_to_jiffies(500));
4715 		rdata = cmnd->device->hostdata;
4716 		if (!rdata)
4717 			return FAILED;
4718 		pnode = rdata->pnode;
4719 	}
4720 	if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
4721 	    (pnode->nlp_state != NLP_STE_MAPPED_NODE))
4722 		return FAILED;
4723 	return SUCCESS;
4724 }
4725 
4726 /**
4727  * lpfc_reset_flush_io_context -
4728  * @vport: The virtual port (scsi_host) for the flush context
4729  * @tgt_id: If aborting by Target contect - specifies the target id
4730  * @lun_id: If aborting by Lun context - specifies the lun id
4731  * @context: specifies the context level to flush at.
4732  *
4733  * After a reset condition via TMF, we need to flush orphaned i/o
4734  * contexts from the adapter. This routine aborts any contexts
4735  * outstanding, then waits for their completions. The wait is
4736  * bounded by devloss_tmo though.
4737  *
4738  * Return code :
4739  *  0x2003 - Error
4740  *  0x2002 - Success
4741  **/
4742 static int
4743 lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
4744 			uint64_t lun_id, lpfc_ctx_cmd context)
4745 {
4746 	struct lpfc_hba   *phba = vport->phba;
4747 	unsigned long later;
4748 	int cnt;
4749 
4750 	cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
4751 	if (cnt)
4752 		lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
4753 				    tgt_id, lun_id, context);
4754 	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
4755 	while (time_after(later, jiffies) && cnt) {
4756 		schedule_timeout_uninterruptible(msecs_to_jiffies(20));
4757 		cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
4758 	}
4759 	if (cnt) {
4760 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4761 			"0724 I/O flush failure for context %s : cnt x%x\n",
4762 			((context == LPFC_CTX_LUN) ? "LUN" :
4763 			 ((context == LPFC_CTX_TGT) ? "TGT" :
4764 			  ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
4765 			cnt);
4766 		return FAILED;
4767 	}
4768 	return SUCCESS;
4769 }
4770 
4771 /**
4772  * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
4773  * @cmnd: Pointer to scsi_cmnd data structure.
4774  *
4775  * This routine does a device reset by sending a LUN_RESET task management
4776  * command.
4777  *
4778  * Return code :
4779  *  0x2003 - Error
4780  *  0x2002 - Success
4781  **/
4782 static int
4783 lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
4784 {
4785 	struct Scsi_Host  *shost = cmnd->device->host;
4786 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4787 	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
4788 	struct lpfc_nodelist *pnode;
4789 	unsigned tgt_id = cmnd->device->id;
4790 	unsigned int lun_id = cmnd->device->lun;
4791 	struct lpfc_scsi_event_header scsi_event;
4792 	int status, ret = SUCCESS;
4793 
4794 	if (!rdata) {
4795 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4796 			"0798 Device Reset rport failure: rdata x%p\n", rdata);
4797 		return FAILED;
4798 	}
4799 	pnode = rdata->pnode;
4800 	status = fc_block_scsi_eh(cmnd);
4801 	if (status != 0 && status != SUCCESS)
4802 		return status;
4803 
4804 	status = lpfc_chk_tgt_mapped(vport, cmnd);
4805 	if (status == FAILED) {
4806 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4807 			"0721 Device Reset rport failure: rdata x%p\n", rdata);
4808 		return FAILED;
4809 	}
4810 
4811 	scsi_event.event_type = FC_REG_SCSI_EVENT;
4812 	scsi_event.subcategory = LPFC_EVENT_LUNRESET;
4813 	scsi_event.lun = lun_id;
4814 	memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
4815 	memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
4816 
4817 	fc_host_post_vendor_event(shost, fc_get_event_number(),
4818 		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
4819 
4820 	status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
4821 						FCP_LUN_RESET);
4822 
4823 	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4824 			 "0713 SCSI layer issued Device Reset (%d, %d) "
4825 			 "return x%x\n", tgt_id, lun_id, status);
4826 
4827 	/*
4828 	 * We have to clean up i/o as : they may be orphaned by the TMF;
4829 	 * or if the TMF failed, they may be in an indeterminate state.
4830 	 * So, continue on.
4831 	 * We will report success if all the i/o aborts successfully.
4832 	 */
4833 	ret = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
4834 						LPFC_CTX_LUN);
4835 	return ret;
4836 }
4837 
4838 /**
4839  * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
4840  * @cmnd: Pointer to scsi_cmnd data structure.
4841  *
4842  * This routine does a target reset by sending a TARGET_RESET task management
4843  * command.
4844  *
4845  * Return code :
4846  *  0x2003 - Error
4847  *  0x2002 - Success
4848  **/
4849 static int
4850 lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
4851 {
4852 	struct Scsi_Host  *shost = cmnd->device->host;
4853 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4854 	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
4855 	struct lpfc_nodelist *pnode;
4856 	unsigned tgt_id = cmnd->device->id;
4857 	unsigned int lun_id = cmnd->device->lun;
4858 	struct lpfc_scsi_event_header scsi_event;
4859 	int status, ret = SUCCESS;
4860 
4861 	if (!rdata) {
4862 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4863 			"0799 Target Reset rport failure: rdata x%p\n", rdata);
4864 		return FAILED;
4865 	}
4866 	pnode = rdata->pnode;
4867 	status = fc_block_scsi_eh(cmnd);
4868 	if (status != 0 && status != SUCCESS)
4869 		return status;
4870 
4871 	status = lpfc_chk_tgt_mapped(vport, cmnd);
4872 	if (status == FAILED) {
4873 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4874 			"0722 Target Reset rport failure: rdata x%p\n", rdata);
4875 		return FAILED;
4876 	}
4877 
4878 	scsi_event.event_type = FC_REG_SCSI_EVENT;
4879 	scsi_event.subcategory = LPFC_EVENT_TGTRESET;
4880 	scsi_event.lun = 0;
4881 	memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
4882 	memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
4883 
4884 	fc_host_post_vendor_event(shost, fc_get_event_number(),
4885 		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
4886 
4887 	status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
4888 					FCP_TARGET_RESET);
4889 
4890 	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4891 			 "0723 SCSI layer issued Target Reset (%d, %d) "
4892 			 "return x%x\n", tgt_id, lun_id, status);
4893 
4894 	/*
4895 	 * We have to clean up i/o as : they may be orphaned by the TMF;
4896 	 * or if the TMF failed, they may be in an indeterminate state.
4897 	 * So, continue on.
4898 	 * We will report success if all the i/o aborts successfully.
4899 	 */
4900 	ret = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
4901 					  LPFC_CTX_TGT);
4902 	return ret;
4903 }
4904 
4905 /**
4906  * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
4907  * @cmnd: Pointer to scsi_cmnd data structure.
4908  *
4909  * This routine does target reset to all targets on @cmnd->device->host.
4910  * This emulates Parallel SCSI Bus Reset Semantics.
4911  *
4912  * Return code :
4913  *  0x2003 - Error
4914  *  0x2002 - Success
4915  **/
4916 static int
4917 lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
4918 {
4919 	struct Scsi_Host  *shost = cmnd->device->host;
4920 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4921 	struct lpfc_nodelist *ndlp = NULL;
4922 	struct lpfc_scsi_event_header scsi_event;
4923 	int match;
4924 	int ret = SUCCESS, status, i;
4925 
4926 	scsi_event.event_type = FC_REG_SCSI_EVENT;
4927 	scsi_event.subcategory = LPFC_EVENT_BUSRESET;
4928 	scsi_event.lun = 0;
4929 	memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
4930 	memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
4931 
4932 	fc_host_post_vendor_event(shost, fc_get_event_number(),
4933 		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
4934 
4935 	status = fc_block_scsi_eh(cmnd);
4936 	if (status != 0 && status != SUCCESS)
4937 		return status;
4938 
4939 	/*
4940 	 * Since the driver manages a single bus device, reset all
4941 	 * targets known to the driver.  Should any target reset
4942 	 * fail, this routine returns failure to the midlayer.
4943 	 */
4944 	for (i = 0; i < LPFC_MAX_TARGET; i++) {
4945 		/* Search for mapped node by target ID */
4946 		match = 0;
4947 		spin_lock_irq(shost->host_lock);
4948 		list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
4949 			if (!NLP_CHK_NODE_ACT(ndlp))
4950 				continue;
4951 			if (vport->phba->cfg_fcp2_no_tgt_reset &&
4952 			    (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
4953 				continue;
4954 			if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
4955 			    ndlp->nlp_sid == i &&
4956 			    ndlp->rport) {
4957 				match = 1;
4958 				break;
4959 			}
4960 		}
4961 		spin_unlock_irq(shost->host_lock);
4962 		if (!match)
4963 			continue;
4964 
4965 		status = lpfc_send_taskmgmt(vport, ndlp->rport->dd_data,
4966 					i, 0, FCP_TARGET_RESET);
4967 
4968 		if (status != SUCCESS) {
4969 			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4970 					 "0700 Bus Reset on target %d failed\n",
4971 					 i);
4972 			ret = FAILED;
4973 		}
4974 	}
4975 	/*
4976 	 * We have to clean up i/o as : they may be orphaned by the TMFs
4977 	 * above; or if any of the TMFs failed, they may be in an
4978 	 * indeterminate state.
4979 	 * We will report success if all the i/o aborts successfully.
4980 	 */
4981 
4982 	status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
4983 	if (status != SUCCESS)
4984 		ret = FAILED;
4985 
4986 	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4987 			 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
4988 	return ret;
4989 }
4990 
4991 /**
4992  * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt
4993  * @cmnd: Pointer to scsi_cmnd data structure.
4994  *
4995  * This routine does host reset to the adaptor port. It brings the HBA
4996  * offline, performs a board restart, and then brings the board back online.
4997  * The lpfc_offline calls lpfc_sli_hba_down which will abort and local
4998  * reject all outstanding SCSI commands to the host and error returned
4999  * back to SCSI mid-level. As this will be SCSI mid-level's last resort
5000  * of error handling, it will only return error if resetting of the adapter
5001  * is not successful; in all other cases, will return success.
5002  *
5003  * Return code :
5004  *  0x2003 - Error
5005  *  0x2002 - Success
5006  **/
5007 static int
5008 lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
5009 {
5010 	struct Scsi_Host *shost = cmnd->device->host;
5011 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5012 	struct lpfc_hba *phba = vport->phba;
5013 	int rc, ret = SUCCESS;
5014 
5015 	lpfc_offline_prep(phba, LPFC_MBX_WAIT);
5016 	lpfc_offline(phba);
5017 	rc = lpfc_sli_brdrestart(phba);
5018 	if (rc)
5019 		ret = FAILED;
5020 	lpfc_online(phba);
5021 	lpfc_unblock_mgmt_io(phba);
5022 
5023 	lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
5024 			"3172 SCSI layer issued Host Reset Data: x%x\n", ret);
5025 	return ret;
5026 }
5027 
5028 /**
5029  * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
5030  * @sdev: Pointer to scsi_device.
5031  *
5032  * This routine populates the cmds_per_lun count + 2 scsi_bufs into  this host's
5033  * globally available list of scsi buffers. This routine also makes sure scsi
5034  * buffer is not allocated more than HBA limit conveyed to midlayer. This list
5035  * of scsi buffer exists for the lifetime of the driver.
5036  *
5037  * Return codes:
5038  *   non-0 - Error
5039  *   0 - Success
5040  **/
5041 static int
5042 lpfc_slave_alloc(struct scsi_device *sdev)
5043 {
5044 	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5045 	struct lpfc_hba   *phba = vport->phba;
5046 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
5047 	uint32_t total = 0;
5048 	uint32_t num_to_alloc = 0;
5049 	int num_allocated = 0;
5050 	uint32_t sdev_cnt;
5051 
5052 	if (!rport || fc_remote_port_chkready(rport))
5053 		return -ENXIO;
5054 
5055 	sdev->hostdata = rport->dd_data;
5056 	sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
5057 
5058 	/*
5059 	 * Populate the cmds_per_lun count scsi_bufs into this host's globally
5060 	 * available list of scsi buffers.  Don't allocate more than the
5061 	 * HBA limit conveyed to the midlayer via the host structure.  The
5062 	 * formula accounts for the lun_queue_depth + error handlers + 1
5063 	 * extra.  This list of scsi bufs exists for the lifetime of the driver.
5064 	 */
5065 	total = phba->total_scsi_bufs;
5066 	num_to_alloc = vport->cfg_lun_queue_depth + 2;
5067 
5068 	/* If allocated buffers are enough do nothing */
5069 	if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
5070 		return 0;
5071 
5072 	/* Allow some exchanges to be available always to complete discovery */
5073 	if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
5074 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5075 				 "0704 At limitation of %d preallocated "
5076 				 "command buffers\n", total);
5077 		return 0;
5078 	/* Allow some exchanges to be available always to complete discovery */
5079 	} else if (total + num_to_alloc >
5080 		phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
5081 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5082 				 "0705 Allocation request of %d "
5083 				 "command buffers will exceed max of %d.  "
5084 				 "Reducing allocation request to %d.\n",
5085 				 num_to_alloc, phba->cfg_hba_queue_depth,
5086 				 (phba->cfg_hba_queue_depth - total));
5087 		num_to_alloc = phba->cfg_hba_queue_depth - total;
5088 	}
5089 	num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
5090 	if (num_to_alloc != num_allocated) {
5091 			lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5092 				 "0708 Allocation request of %d "
5093 				 "command buffers did not succeed.  "
5094 				 "Allocated %d buffers.\n",
5095 				 num_to_alloc, num_allocated);
5096 	}
5097 	if (num_allocated > 0)
5098 		phba->total_scsi_bufs += num_allocated;
5099 	return 0;
5100 }
5101 
5102 /**
5103  * lpfc_slave_configure - scsi_host_template slave_configure entry point
5104  * @sdev: Pointer to scsi_device.
5105  *
5106  * This routine configures following items
5107  *   - Tag command queuing support for @sdev if supported.
5108  *   - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
5109  *
5110  * Return codes:
5111  *   0 - Success
5112  **/
5113 static int
5114 lpfc_slave_configure(struct scsi_device *sdev)
5115 {
5116 	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5117 	struct lpfc_hba   *phba = vport->phba;
5118 
5119 	if (sdev->tagged_supported)
5120 		scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth);
5121 	else
5122 		scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth);
5123 
5124 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5125 		lpfc_sli_handle_fast_ring_event(phba,
5126 			&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
5127 		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5128 			lpfc_poll_rearm_timer(phba);
5129 	}
5130 
5131 	return 0;
5132 }
5133 
5134 /**
5135  * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
5136  * @sdev: Pointer to scsi_device.
5137  *
5138  * This routine sets @sdev hostatdata filed to null.
5139  **/
5140 static void
5141 lpfc_slave_destroy(struct scsi_device *sdev)
5142 {
5143 	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5144 	struct lpfc_hba   *phba = vport->phba;
5145 	atomic_dec(&phba->sdev_cnt);
5146 	sdev->hostdata = NULL;
5147 	return;
5148 }
5149 
5150 
5151 struct scsi_host_template lpfc_template = {
5152 	.module			= THIS_MODULE,
5153 	.name			= LPFC_DRIVER_NAME,
5154 	.info			= lpfc_info,
5155 	.queuecommand		= lpfc_queuecommand,
5156 	.eh_abort_handler	= lpfc_abort_handler,
5157 	.eh_device_reset_handler = lpfc_device_reset_handler,
5158 	.eh_target_reset_handler = lpfc_target_reset_handler,
5159 	.eh_bus_reset_handler	= lpfc_bus_reset_handler,
5160 	.eh_host_reset_handler  = lpfc_host_reset_handler,
5161 	.slave_alloc		= lpfc_slave_alloc,
5162 	.slave_configure	= lpfc_slave_configure,
5163 	.slave_destroy		= lpfc_slave_destroy,
5164 	.scan_finished		= lpfc_scan_finished,
5165 	.this_id		= -1,
5166 	.sg_tablesize		= LPFC_DEFAULT_SG_SEG_CNT,
5167 	.cmd_per_lun		= LPFC_CMD_PER_LUN,
5168 	.use_clustering		= ENABLE_CLUSTERING,
5169 	.shost_attrs		= lpfc_hba_attrs,
5170 	.max_sectors		= 0xFFFF,
5171 	.vendor_id		= LPFC_NL_VENDOR_ID,
5172 	.change_queue_depth	= lpfc_change_queue_depth,
5173 	.change_queue_type	= lpfc_change_queue_type,
5174 };
5175 
5176 struct scsi_host_template lpfc_vport_template = {
5177 	.module			= THIS_MODULE,
5178 	.name			= LPFC_DRIVER_NAME,
5179 	.info			= lpfc_info,
5180 	.queuecommand		= lpfc_queuecommand,
5181 	.eh_abort_handler	= lpfc_abort_handler,
5182 	.eh_device_reset_handler = lpfc_device_reset_handler,
5183 	.eh_target_reset_handler = lpfc_target_reset_handler,
5184 	.eh_bus_reset_handler	= lpfc_bus_reset_handler,
5185 	.slave_alloc		= lpfc_slave_alloc,
5186 	.slave_configure	= lpfc_slave_configure,
5187 	.slave_destroy		= lpfc_slave_destroy,
5188 	.scan_finished		= lpfc_scan_finished,
5189 	.this_id		= -1,
5190 	.sg_tablesize		= LPFC_DEFAULT_SG_SEG_CNT,
5191 	.cmd_per_lun		= LPFC_CMD_PER_LUN,
5192 	.use_clustering		= ENABLE_CLUSTERING,
5193 	.shost_attrs		= lpfc_vport_attrs,
5194 	.max_sectors		= 0xFFFF,
5195 	.change_queue_depth	= lpfc_change_queue_depth,
5196 	.change_queue_type	= lpfc_change_queue_type,
5197 };
5198