xref: /openbmc/linux/drivers/scsi/lpfc/lpfc_scsi.c (revision fd589a8f)
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21 #include <linux/pci.h>
22 #include <linux/interrupt.h>
23 #include <linux/delay.h>
24 #include <asm/unaligned.h>
25 
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_eh.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_tcq.h>
31 #include <scsi/scsi_transport_fc.h>
32 
33 #include "lpfc_version.h"
34 #include "lpfc_hw4.h"
35 #include "lpfc_hw.h"
36 #include "lpfc_sli.h"
37 #include "lpfc_sli4.h"
38 #include "lpfc_nl.h"
39 #include "lpfc_disc.h"
40 #include "lpfc_scsi.h"
41 #include "lpfc.h"
42 #include "lpfc_logmsg.h"
43 #include "lpfc_crtn.h"
44 #include "lpfc_vport.h"
45 
46 #define LPFC_RESET_WAIT  2
47 #define LPFC_ABORT_WAIT  2
48 
49 int _dump_buf_done;
50 
51 static char *dif_op_str[] = {
52 	"SCSI_PROT_NORMAL",
53 	"SCSI_PROT_READ_INSERT",
54 	"SCSI_PROT_WRITE_STRIP",
55 	"SCSI_PROT_READ_STRIP",
56 	"SCSI_PROT_WRITE_INSERT",
57 	"SCSI_PROT_READ_PASS",
58 	"SCSI_PROT_WRITE_PASS",
59 	"SCSI_PROT_READ_CONVERT",
60 	"SCSI_PROT_WRITE_CONVERT"
61 };
62 static void
63 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
64 
65 static void
66 lpfc_debug_save_data(struct scsi_cmnd *cmnd)
67 {
68 	void *src, *dst;
69 	struct scatterlist *sgde = scsi_sglist(cmnd);
70 
71 	if (!_dump_buf_data) {
72 		printk(KERN_ERR "BLKGRD ERROR %s _dump_buf_data is NULL\n",
73 				__func__);
74 		return;
75 	}
76 
77 
78 	if (!sgde) {
79 		printk(KERN_ERR "BLKGRD ERROR: data scatterlist is null\n");
80 		return;
81 	}
82 
83 	dst = (void *) _dump_buf_data;
84 	while (sgde) {
85 		src = sg_virt(sgde);
86 		memcpy(dst, src, sgde->length);
87 		dst += sgde->length;
88 		sgde = sg_next(sgde);
89 	}
90 }
91 
92 static void
93 lpfc_debug_save_dif(struct scsi_cmnd *cmnd)
94 {
95 	void *src, *dst;
96 	struct scatterlist *sgde = scsi_prot_sglist(cmnd);
97 
98 	if (!_dump_buf_dif) {
99 		printk(KERN_ERR "BLKGRD ERROR %s _dump_buf_data is NULL\n",
100 				__func__);
101 		return;
102 	}
103 
104 	if (!sgde) {
105 		printk(KERN_ERR "BLKGRD ERROR: prot scatterlist is null\n");
106 		return;
107 	}
108 
109 	dst = _dump_buf_dif;
110 	while (sgde) {
111 		src = sg_virt(sgde);
112 		memcpy(dst, src, sgde->length);
113 		dst += sgde->length;
114 		sgde = sg_next(sgde);
115 	}
116 }
117 
118 /**
119  * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
120  * @phba: Pointer to HBA object.
121  * @lpfc_cmd: lpfc scsi command object pointer.
122  *
123  * This function is called from the lpfc_prep_task_mgmt_cmd function to
124  * set the last bit in the response sge entry.
125  **/
126 static void
127 lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
128 				struct lpfc_scsi_buf *lpfc_cmd)
129 {
130 	struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
131 	if (sgl) {
132 		sgl += 1;
133 		sgl->word2 = le32_to_cpu(sgl->word2);
134 		bf_set(lpfc_sli4_sge_last, sgl, 1);
135 		sgl->word2 = cpu_to_le32(sgl->word2);
136 	}
137 }
138 
139 /**
140  * lpfc_update_stats - Update statistical data for the command completion
141  * @phba: Pointer to HBA object.
142  * @lpfc_cmd: lpfc scsi command object pointer.
143  *
144  * This function is called when there is a command completion and this
145  * function updates the statistical data for the command completion.
146  **/
147 static void
148 lpfc_update_stats(struct lpfc_hba *phba, struct  lpfc_scsi_buf *lpfc_cmd)
149 {
150 	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
151 	struct lpfc_nodelist *pnode = rdata->pnode;
152 	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
153 	unsigned long flags;
154 	struct Scsi_Host  *shost = cmd->device->host;
155 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
156 	unsigned long latency;
157 	int i;
158 
159 	if (cmd->result)
160 		return;
161 
162 	latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
163 
164 	spin_lock_irqsave(shost->host_lock, flags);
165 	if (!vport->stat_data_enabled ||
166 		vport->stat_data_blocked ||
167 		!pnode->lat_data ||
168 		(phba->bucket_type == LPFC_NO_BUCKET)) {
169 		spin_unlock_irqrestore(shost->host_lock, flags);
170 		return;
171 	}
172 
173 	if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
174 		i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
175 			phba->bucket_step;
176 		/* check array subscript bounds */
177 		if (i < 0)
178 			i = 0;
179 		else if (i >= LPFC_MAX_BUCKET_COUNT)
180 			i = LPFC_MAX_BUCKET_COUNT - 1;
181 	} else {
182 		for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
183 			if (latency <= (phba->bucket_base +
184 				((1<<i)*phba->bucket_step)))
185 				break;
186 	}
187 
188 	pnode->lat_data[i].cmd_count++;
189 	spin_unlock_irqrestore(shost->host_lock, flags);
190 }
191 
192 /**
193  * lpfc_send_sdev_queuedepth_change_event - Posts a queuedepth change event
194  * @phba: Pointer to HBA context object.
195  * @vport: Pointer to vport object.
196  * @ndlp: Pointer to FC node associated with the target.
197  * @lun: Lun number of the scsi device.
198  * @old_val: Old value of the queue depth.
199  * @new_val: New value of the queue depth.
200  *
201  * This function sends an event to the mgmt application indicating
202  * there is a change in the scsi device queue depth.
203  **/
204 static void
205 lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
206 		struct lpfc_vport  *vport,
207 		struct lpfc_nodelist *ndlp,
208 		uint32_t lun,
209 		uint32_t old_val,
210 		uint32_t new_val)
211 {
212 	struct lpfc_fast_path_event *fast_path_evt;
213 	unsigned long flags;
214 
215 	fast_path_evt = lpfc_alloc_fast_evt(phba);
216 	if (!fast_path_evt)
217 		return;
218 
219 	fast_path_evt->un.queue_depth_evt.scsi_event.event_type =
220 		FC_REG_SCSI_EVENT;
221 	fast_path_evt->un.queue_depth_evt.scsi_event.subcategory =
222 		LPFC_EVENT_VARQUEDEPTH;
223 
224 	/* Report all luns with change in queue depth */
225 	fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun;
226 	if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
227 		memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn,
228 			&ndlp->nlp_portname, sizeof(struct lpfc_name));
229 		memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn,
230 			&ndlp->nlp_nodename, sizeof(struct lpfc_name));
231 	}
232 
233 	fast_path_evt->un.queue_depth_evt.oldval = old_val;
234 	fast_path_evt->un.queue_depth_evt.newval = new_val;
235 	fast_path_evt->vport = vport;
236 
237 	fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
238 	spin_lock_irqsave(&phba->hbalock, flags);
239 	list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
240 	spin_unlock_irqrestore(&phba->hbalock, flags);
241 	lpfc_worker_wake_up(phba);
242 
243 	return;
244 }
245 
246 /**
247  * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
248  * @phba: The Hba for which this call is being executed.
249  *
250  * This routine is called when there is resource error in driver or firmware.
251  * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
252  * posts at most 1 event each second. This routine wakes up worker thread of
253  * @phba to process WORKER_RAM_DOWN_EVENT event.
254  *
255  * This routine should be called with no lock held.
256  **/
257 void
258 lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
259 {
260 	unsigned long flags;
261 	uint32_t evt_posted;
262 
263 	spin_lock_irqsave(&phba->hbalock, flags);
264 	atomic_inc(&phba->num_rsrc_err);
265 	phba->last_rsrc_error_time = jiffies;
266 
267 	if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
268 		spin_unlock_irqrestore(&phba->hbalock, flags);
269 		return;
270 	}
271 
272 	phba->last_ramp_down_time = jiffies;
273 
274 	spin_unlock_irqrestore(&phba->hbalock, flags);
275 
276 	spin_lock_irqsave(&phba->pport->work_port_lock, flags);
277 	evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
278 	if (!evt_posted)
279 		phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
280 	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
281 
282 	if (!evt_posted)
283 		lpfc_worker_wake_up(phba);
284 	return;
285 }
286 
287 /**
288  * lpfc_rampup_queue_depth - Post RAMP_UP_QUEUE event for worker thread
289  * @phba: The Hba for which this call is being executed.
290  *
291  * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine
292  * post at most 1 event every 5 minute after last_ramp_up_time or
293  * last_rsrc_error_time.  This routine wakes up worker thread of @phba
294  * to process WORKER_RAM_DOWN_EVENT event.
295  *
296  * This routine should be called with no lock held.
297  **/
298 static inline void
299 lpfc_rampup_queue_depth(struct lpfc_vport  *vport,
300 			uint32_t queue_depth)
301 {
302 	unsigned long flags;
303 	struct lpfc_hba *phba = vport->phba;
304 	uint32_t evt_posted;
305 	atomic_inc(&phba->num_cmd_success);
306 
307 	if (vport->cfg_lun_queue_depth <= queue_depth)
308 		return;
309 	spin_lock_irqsave(&phba->hbalock, flags);
310 	if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) ||
311 	 ((phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL ) > jiffies)) {
312 		spin_unlock_irqrestore(&phba->hbalock, flags);
313 		return;
314 	}
315 	phba->last_ramp_up_time = jiffies;
316 	spin_unlock_irqrestore(&phba->hbalock, flags);
317 
318 	spin_lock_irqsave(&phba->pport->work_port_lock, flags);
319 	evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
320 	if (!evt_posted)
321 		phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
322 	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
323 
324 	if (!evt_posted)
325 		lpfc_worker_wake_up(phba);
326 	return;
327 }
328 
329 /**
330  * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
331  * @phba: The Hba for which this call is being executed.
332  *
333  * This routine is called to  process WORKER_RAMP_DOWN_QUEUE event for worker
334  * thread.This routine reduces queue depth for all scsi device on each vport
335  * associated with @phba.
336  **/
337 void
338 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
339 {
340 	struct lpfc_vport **vports;
341 	struct Scsi_Host  *shost;
342 	struct scsi_device *sdev;
343 	unsigned long new_queue_depth, old_queue_depth;
344 	unsigned long num_rsrc_err, num_cmd_success;
345 	int i;
346 	struct lpfc_rport_data *rdata;
347 
348 	num_rsrc_err = atomic_read(&phba->num_rsrc_err);
349 	num_cmd_success = atomic_read(&phba->num_cmd_success);
350 
351 	vports = lpfc_create_vport_work_array(phba);
352 	if (vports != NULL)
353 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
354 			shost = lpfc_shost_from_vport(vports[i]);
355 			shost_for_each_device(sdev, shost) {
356 				new_queue_depth =
357 					sdev->queue_depth * num_rsrc_err /
358 					(num_rsrc_err + num_cmd_success);
359 				if (!new_queue_depth)
360 					new_queue_depth = sdev->queue_depth - 1;
361 				else
362 					new_queue_depth = sdev->queue_depth -
363 								new_queue_depth;
364 				old_queue_depth = sdev->queue_depth;
365 				if (sdev->ordered_tags)
366 					scsi_adjust_queue_depth(sdev,
367 							MSG_ORDERED_TAG,
368 							new_queue_depth);
369 				else
370 					scsi_adjust_queue_depth(sdev,
371 							MSG_SIMPLE_TAG,
372 							new_queue_depth);
373 				rdata = sdev->hostdata;
374 				if (rdata)
375 					lpfc_send_sdev_queuedepth_change_event(
376 						phba, vports[i],
377 						rdata->pnode,
378 						sdev->lun, old_queue_depth,
379 						new_queue_depth);
380 			}
381 		}
382 	lpfc_destroy_vport_work_array(phba, vports);
383 	atomic_set(&phba->num_rsrc_err, 0);
384 	atomic_set(&phba->num_cmd_success, 0);
385 }
386 
387 /**
388  * lpfc_ramp_up_queue_handler - WORKER_RAMP_UP_QUEUE event handler
389  * @phba: The Hba for which this call is being executed.
390  *
391  * This routine is called to  process WORKER_RAMP_UP_QUEUE event for worker
392  * thread.This routine increases queue depth for all scsi device on each vport
393  * associated with @phba by 1. This routine also sets @phba num_rsrc_err and
394  * num_cmd_success to zero.
395  **/
396 void
397 lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
398 {
399 	struct lpfc_vport **vports;
400 	struct Scsi_Host  *shost;
401 	struct scsi_device *sdev;
402 	int i;
403 	struct lpfc_rport_data *rdata;
404 
405 	vports = lpfc_create_vport_work_array(phba);
406 	if (vports != NULL)
407 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
408 			shost = lpfc_shost_from_vport(vports[i]);
409 			shost_for_each_device(sdev, shost) {
410 				if (vports[i]->cfg_lun_queue_depth <=
411 				    sdev->queue_depth)
412 					continue;
413 				if (sdev->ordered_tags)
414 					scsi_adjust_queue_depth(sdev,
415 							MSG_ORDERED_TAG,
416 							sdev->queue_depth+1);
417 				else
418 					scsi_adjust_queue_depth(sdev,
419 							MSG_SIMPLE_TAG,
420 							sdev->queue_depth+1);
421 				rdata = sdev->hostdata;
422 				if (rdata)
423 					lpfc_send_sdev_queuedepth_change_event(
424 						phba, vports[i],
425 						rdata->pnode,
426 						sdev->lun,
427 						sdev->queue_depth - 1,
428 						sdev->queue_depth);
429 			}
430 		}
431 	lpfc_destroy_vport_work_array(phba, vports);
432 	atomic_set(&phba->num_rsrc_err, 0);
433 	atomic_set(&phba->num_cmd_success, 0);
434 }
435 
436 /**
437  * lpfc_scsi_dev_block - set all scsi hosts to block state
438  * @phba: Pointer to HBA context object.
439  *
440  * This function walks vport list and set each SCSI host to block state
441  * by invoking fc_remote_port_delete() routine. This function is invoked
442  * with EEH when device's PCI slot has been permanently disabled.
443  **/
444 void
445 lpfc_scsi_dev_block(struct lpfc_hba *phba)
446 {
447 	struct lpfc_vport **vports;
448 	struct Scsi_Host  *shost;
449 	struct scsi_device *sdev;
450 	struct fc_rport *rport;
451 	int i;
452 
453 	vports = lpfc_create_vport_work_array(phba);
454 	if (vports != NULL)
455 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
456 			shost = lpfc_shost_from_vport(vports[i]);
457 			shost_for_each_device(sdev, shost) {
458 				rport = starget_to_rport(scsi_target(sdev));
459 				fc_remote_port_delete(rport);
460 			}
461 		}
462 	lpfc_destroy_vport_work_array(phba, vports);
463 }
464 
465 /**
466  * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
467  * @vport: The virtual port for which this call being executed.
468  * @num_to_allocate: The requested number of buffers to allocate.
469  *
470  * This routine allocates a scsi buffer for device with SLI-3 interface spec,
471  * the scsi buffer contains all the necessary information needed to initiate
472  * a SCSI I/O. The non-DMAable buffer region contains information to build
473  * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
474  * and the initial BPL. In addition to allocating memory, the FCP CMND and
475  * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
476  *
477  * Return codes:
478  *   int - number of scsi buffers that were allocated.
479  *   0 = failure, less than num_to_alloc is a partial failure.
480  **/
481 static int
482 lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
483 {
484 	struct lpfc_hba *phba = vport->phba;
485 	struct lpfc_scsi_buf *psb;
486 	struct ulp_bde64 *bpl;
487 	IOCB_t *iocb;
488 	dma_addr_t pdma_phys_fcp_cmd;
489 	dma_addr_t pdma_phys_fcp_rsp;
490 	dma_addr_t pdma_phys_bpl;
491 	uint16_t iotag;
492 	int bcnt;
493 
494 	for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
495 		psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
496 		if (!psb)
497 			break;
498 
499 		/*
500 		 * Get memory from the pci pool to map the virt space to pci
501 		 * bus space for an I/O.  The DMA buffer includes space for the
502 		 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
503 		 * necessary to support the sg_tablesize.
504 		 */
505 		psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
506 					GFP_KERNEL, &psb->dma_handle);
507 		if (!psb->data) {
508 			kfree(psb);
509 			break;
510 		}
511 
512 		/* Initialize virtual ptrs to dma_buf region. */
513 		memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
514 
515 		/* Allocate iotag for psb->cur_iocbq. */
516 		iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
517 		if (iotag == 0) {
518 			pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
519 					psb->data, psb->dma_handle);
520 			kfree(psb);
521 			break;
522 		}
523 		psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
524 
525 		psb->fcp_cmnd = psb->data;
526 		psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
527 		psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
528 			sizeof(struct fcp_rsp);
529 
530 		/* Initialize local short-hand pointers. */
531 		bpl = psb->fcp_bpl;
532 		pdma_phys_fcp_cmd = psb->dma_handle;
533 		pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
534 		pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
535 			sizeof(struct fcp_rsp);
536 
537 		/*
538 		 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
539 		 * are sg list bdes.  Initialize the first two and leave the
540 		 * rest for queuecommand.
541 		 */
542 		bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
543 		bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
544 		bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
545 		bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
546 		bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
547 
548 		/* Setup the physical region for the FCP RSP */
549 		bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
550 		bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
551 		bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
552 		bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
553 		bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
554 
555 		/*
556 		 * Since the IOCB for the FCP I/O is built into this
557 		 * lpfc_scsi_buf, initialize it with all known data now.
558 		 */
559 		iocb = &psb->cur_iocbq.iocb;
560 		iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
561 		if ((phba->sli_rev == 3) &&
562 				!(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
563 			/* fill in immediate fcp command BDE */
564 			iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
565 			iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
566 			iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
567 					unsli3.fcp_ext.icd);
568 			iocb->un.fcpi64.bdl.addrHigh = 0;
569 			iocb->ulpBdeCount = 0;
570 			iocb->ulpLe = 0;
571 			/* fill in responce BDE */
572 			iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
573 							BUFF_TYPE_BDE_64;
574 			iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
575 				sizeof(struct fcp_rsp);
576 			iocb->unsli3.fcp_ext.rbde.addrLow =
577 				putPaddrLow(pdma_phys_fcp_rsp);
578 			iocb->unsli3.fcp_ext.rbde.addrHigh =
579 				putPaddrHigh(pdma_phys_fcp_rsp);
580 		} else {
581 			iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
582 			iocb->un.fcpi64.bdl.bdeSize =
583 					(2 * sizeof(struct ulp_bde64));
584 			iocb->un.fcpi64.bdl.addrLow =
585 					putPaddrLow(pdma_phys_bpl);
586 			iocb->un.fcpi64.bdl.addrHigh =
587 					putPaddrHigh(pdma_phys_bpl);
588 			iocb->ulpBdeCount = 1;
589 			iocb->ulpLe = 1;
590 		}
591 		iocb->ulpClass = CLASS3;
592 		psb->status = IOSTAT_SUCCESS;
593 		/* Put it back into the SCSI buffer list */
594 		lpfc_release_scsi_buf_s4(phba, psb);
595 
596 	}
597 
598 	return bcnt;
599 }
600 
601 /**
602  * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
603  * @phba: pointer to lpfc hba data structure.
604  * @axri: pointer to the fcp xri abort wcqe structure.
605  *
606  * This routine is invoked by the worker thread to process a SLI4 fast-path
607  * FCP aborted xri.
608  **/
609 void
610 lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
611 			  struct sli4_wcqe_xri_aborted *axri)
612 {
613 	uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
614 	struct lpfc_scsi_buf *psb, *next_psb;
615 	unsigned long iflag = 0;
616 
617 	spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag);
618 	list_for_each_entry_safe(psb, next_psb,
619 		&phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
620 		if (psb->cur_iocbq.sli4_xritag == xri) {
621 			list_del(&psb->list);
622 			psb->status = IOSTAT_SUCCESS;
623 			spin_unlock_irqrestore(
624 				&phba->sli4_hba.abts_scsi_buf_list_lock,
625 				iflag);
626 			lpfc_release_scsi_buf_s4(phba, psb);
627 			return;
628 		}
629 	}
630 	spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
631 				iflag);
632 }
633 
634 /**
635  * lpfc_sli4_repost_scsi_sgl_list - Repsot the Scsi buffers sgl pages as block
636  * @phba: pointer to lpfc hba data structure.
637  *
638  * This routine walks the list of scsi buffers that have been allocated and
639  * repost them to the HBA by using SGL block post. This is needed after a
640  * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
641  * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
642  * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
643  *
644  * Returns: 0 = success, non-zero failure.
645  **/
646 int
647 lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
648 {
649 	struct lpfc_scsi_buf *psb;
650 	int index, status, bcnt = 0, rcnt = 0, rc = 0;
651 	LIST_HEAD(sblist);
652 
653 	for (index = 0; index < phba->sli4_hba.scsi_xri_cnt; index++) {
654 		psb = phba->sli4_hba.lpfc_scsi_psb_array[index];
655 		if (psb) {
656 			/* Remove from SCSI buffer list */
657 			list_del(&psb->list);
658 			/* Add it to a local SCSI buffer list */
659 			list_add_tail(&psb->list, &sblist);
660 			if (++rcnt == LPFC_NEMBED_MBOX_SGL_CNT) {
661 				bcnt = rcnt;
662 				rcnt = 0;
663 			}
664 		} else
665 			/* A hole present in the XRI array, need to skip */
666 			bcnt = rcnt;
667 
668 		if (index == phba->sli4_hba.scsi_xri_cnt - 1)
669 			/* End of XRI array for SCSI buffer, complete */
670 			bcnt = rcnt;
671 
672 		/* Continue until collect up to a nembed page worth of sgls */
673 		if (bcnt == 0)
674 			continue;
675 		/* Now, post the SCSI buffer list sgls as a block */
676 		status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
677 		/* Reset SCSI buffer count for next round of posting */
678 		bcnt = 0;
679 		while (!list_empty(&sblist)) {
680 			list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
681 					 list);
682 			if (status) {
683 				/* Put this back on the abort scsi list */
684 				psb->status = IOSTAT_LOCAL_REJECT;
685 				psb->result = IOERR_ABORT_REQUESTED;
686 				rc++;
687 			} else
688 				psb->status = IOSTAT_SUCCESS;
689 			/* Put it back into the SCSI buffer list */
690 			lpfc_release_scsi_buf_s4(phba, psb);
691 		}
692 	}
693 	return rc;
694 }
695 
696 /**
697  * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec
698  * @vport: The virtual port for which this call being executed.
699  * @num_to_allocate: The requested number of buffers to allocate.
700  *
701  * This routine allocates a scsi buffer for device with SLI-4 interface spec,
702  * the scsi buffer contains all the necessary information needed to initiate
703  * a SCSI I/O.
704  *
705  * Return codes:
706  *   int - number of scsi buffers that were allocated.
707  *   0 = failure, less than num_to_alloc is a partial failure.
708  **/
709 static int
710 lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
711 {
712 	struct lpfc_hba *phba = vport->phba;
713 	struct lpfc_scsi_buf *psb;
714 	struct sli4_sge *sgl;
715 	IOCB_t *iocb;
716 	dma_addr_t pdma_phys_fcp_cmd;
717 	dma_addr_t pdma_phys_fcp_rsp;
718 	dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
719 	uint16_t iotag, last_xritag = NO_XRI;
720 	int status = 0, index;
721 	int bcnt;
722 	int non_sequential_xri = 0;
723 	int rc = 0;
724 	LIST_HEAD(sblist);
725 
726 	for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
727 		psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
728 		if (!psb)
729 			break;
730 
731 		/*
732 		 * Get memory from the pci pool to map the virt space to pci bus
733 		 * space for an I/O.  The DMA buffer includes space for the
734 		 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
735 		 * necessary to support the sg_tablesize.
736 		 */
737 		psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
738 						GFP_KERNEL, &psb->dma_handle);
739 		if (!psb->data) {
740 			kfree(psb);
741 			break;
742 		}
743 
744 		/* Initialize virtual ptrs to dma_buf region. */
745 		memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
746 
747 		/* Allocate iotag for psb->cur_iocbq. */
748 		iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
749 		if (iotag == 0) {
750 			kfree(psb);
751 			break;
752 		}
753 
754 		psb->cur_iocbq.sli4_xritag = lpfc_sli4_next_xritag(phba);
755 		if (psb->cur_iocbq.sli4_xritag == NO_XRI) {
756 			pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
757 			      psb->data, psb->dma_handle);
758 			kfree(psb);
759 			break;
760 		}
761 		if (last_xritag != NO_XRI
762 			&& psb->cur_iocbq.sli4_xritag != (last_xritag+1)) {
763 			non_sequential_xri = 1;
764 		} else
765 			list_add_tail(&psb->list, &sblist);
766 		last_xritag = psb->cur_iocbq.sli4_xritag;
767 
768 		index = phba->sli4_hba.scsi_xri_cnt++;
769 		psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
770 
771 		psb->fcp_bpl = psb->data;
772 		psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size)
773 			- (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
774 		psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
775 					sizeof(struct fcp_cmnd));
776 
777 		/* Initialize local short-hand pointers. */
778 		sgl = (struct sli4_sge *)psb->fcp_bpl;
779 		pdma_phys_bpl = psb->dma_handle;
780 		pdma_phys_fcp_cmd =
781 			(psb->dma_handle + phba->cfg_sg_dma_buf_size)
782 			 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
783 		pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
784 
785 		/*
786 		 * The first two bdes are the FCP_CMD and FCP_RSP.  The balance
787 		 * are sg list bdes.  Initialize the first two and leave the
788 		 * rest for queuecommand.
789 		 */
790 		sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
791 		sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
792 		bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_cmnd));
793 		bf_set(lpfc_sli4_sge_last, sgl, 0);
794 		sgl->word2 = cpu_to_le32(sgl->word2);
795 		sgl->word3 = cpu_to_le32(sgl->word3);
796 		sgl++;
797 
798 		/* Setup the physical region for the FCP RSP */
799 		sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
800 		sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
801 		bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_rsp));
802 		bf_set(lpfc_sli4_sge_last, sgl, 1);
803 		sgl->word2 = cpu_to_le32(sgl->word2);
804 		sgl->word3 = cpu_to_le32(sgl->word3);
805 
806 		/*
807 		 * Since the IOCB for the FCP I/O is built into this
808 		 * lpfc_scsi_buf, initialize it with all known data now.
809 		 */
810 		iocb = &psb->cur_iocbq.iocb;
811 		iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
812 		iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
813 		/* setting the BLP size to 2 * sizeof BDE may not be correct.
814 		 * We are setting the bpl to point to out sgl. An sgl's
815 		 * entries are 16 bytes, a bpl entries are 12 bytes.
816 		 */
817 		iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
818 		iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
819 		iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
820 		iocb->ulpBdeCount = 1;
821 		iocb->ulpLe = 1;
822 		iocb->ulpClass = CLASS3;
823 		if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
824 			pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE;
825 		else
826 			pdma_phys_bpl1 = 0;
827 		psb->dma_phys_bpl = pdma_phys_bpl;
828 		phba->sli4_hba.lpfc_scsi_psb_array[index] = psb;
829 		if (non_sequential_xri) {
830 			status = lpfc_sli4_post_sgl(phba, pdma_phys_bpl,
831 						pdma_phys_bpl1,
832 						psb->cur_iocbq.sli4_xritag);
833 			if (status) {
834 				/* Put this back on the abort scsi list */
835 				psb->status = IOSTAT_LOCAL_REJECT;
836 				psb->result = IOERR_ABORT_REQUESTED;
837 				rc++;
838 			} else
839 				psb->status = IOSTAT_SUCCESS;
840 			/* Put it back into the SCSI buffer list */
841 			lpfc_release_scsi_buf_s4(phba, psb);
842 			break;
843 		}
844 	}
845 	if (bcnt) {
846 		status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
847 		/* Reset SCSI buffer count for next round of posting */
848 		while (!list_empty(&sblist)) {
849 			list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
850 				 list);
851 			if (status) {
852 				/* Put this back on the abort scsi list */
853 				psb->status = IOSTAT_LOCAL_REJECT;
854 				psb->result = IOERR_ABORT_REQUESTED;
855 				rc++;
856 			} else
857 				psb->status = IOSTAT_SUCCESS;
858 			/* Put it back into the SCSI buffer list */
859 			lpfc_release_scsi_buf_s4(phba, psb);
860 		}
861 	}
862 
863 	return bcnt + non_sequential_xri - rc;
864 }
865 
866 /**
867  * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator
868  * @vport: The virtual port for which this call being executed.
869  * @num_to_allocate: The requested number of buffers to allocate.
870  *
871  * This routine wraps the actual SCSI buffer allocator function pointer from
872  * the lpfc_hba struct.
873  *
874  * Return codes:
875  *   int - number of scsi buffers that were allocated.
876  *   0 = failure, less than num_to_alloc is a partial failure.
877  **/
878 static inline int
879 lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
880 {
881 	return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc);
882 }
883 
884 /**
885  * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
886  * @phba: The HBA for which this call is being executed.
887  *
888  * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
889  * and returns to caller.
890  *
891  * Return codes:
892  *   NULL - Error
893  *   Pointer to lpfc_scsi_buf - Success
894  **/
895 static struct lpfc_scsi_buf*
896 lpfc_get_scsi_buf(struct lpfc_hba * phba)
897 {
898 	struct  lpfc_scsi_buf * lpfc_cmd = NULL;
899 	struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
900 	unsigned long iflag = 0;
901 
902 	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
903 	list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
904 	if (lpfc_cmd) {
905 		lpfc_cmd->seg_cnt = 0;
906 		lpfc_cmd->nonsg_phys = 0;
907 		lpfc_cmd->prot_seg_cnt = 0;
908 	}
909 	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
910 	return  lpfc_cmd;
911 }
912 
913 /**
914  * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
915  * @phba: The Hba for which this call is being executed.
916  * @psb: The scsi buffer which is being released.
917  *
918  * This routine releases @psb scsi buffer by adding it to tail of @phba
919  * lpfc_scsi_buf_list list.
920  **/
921 static void
922 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
923 {
924 	unsigned long iflag = 0;
925 
926 	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
927 	psb->pCmd = NULL;
928 	list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
929 	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
930 }
931 
932 /**
933  * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
934  * @phba: The Hba for which this call is being executed.
935  * @psb: The scsi buffer which is being released.
936  *
937  * This routine releases @psb scsi buffer by adding it to tail of @phba
938  * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer
939  * and cannot be reused for at least RA_TOV amount of time if it was
940  * aborted.
941  **/
942 static void
943 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
944 {
945 	unsigned long iflag = 0;
946 
947 	if (psb->status == IOSTAT_LOCAL_REJECT
948 		&& psb->result == IOERR_ABORT_REQUESTED) {
949 		spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
950 					iflag);
951 		psb->pCmd = NULL;
952 		list_add_tail(&psb->list,
953 			&phba->sli4_hba.lpfc_abts_scsi_buf_list);
954 		spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
955 					iflag);
956 	} else {
957 
958 		spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
959 		psb->pCmd = NULL;
960 		list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
961 		spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
962 	}
963 }
964 
965 /**
966  * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
967  * @phba: The Hba for which this call is being executed.
968  * @psb: The scsi buffer which is being released.
969  *
970  * This routine releases @psb scsi buffer by adding it to tail of @phba
971  * lpfc_scsi_buf_list list.
972  **/
973 static void
974 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
975 {
976 
977 	phba->lpfc_release_scsi_buf(phba, psb);
978 }
979 
980 /**
981  * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
982  * @phba: The Hba for which this call is being executed.
983  * @lpfc_cmd: The scsi buffer which is going to be mapped.
984  *
985  * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
986  * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
987  * through sg elements and format the bdea. This routine also initializes all
988  * IOCB fields which are dependent on scsi command request buffer.
989  *
990  * Return codes:
991  *   1 - Error
992  *   0 - Success
993  **/
994 static int
995 lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
996 {
997 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
998 	struct scatterlist *sgel = NULL;
999 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1000 	struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
1001 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1002 	struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
1003 	dma_addr_t physaddr;
1004 	uint32_t num_bde = 0;
1005 	int nseg, datadir = scsi_cmnd->sc_data_direction;
1006 
1007 	/*
1008 	 * There are three possibilities here - use scatter-gather segment, use
1009 	 * the single mapping, or neither.  Start the lpfc command prep by
1010 	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
1011 	 * data bde entry.
1012 	 */
1013 	bpl += 2;
1014 	if (scsi_sg_count(scsi_cmnd)) {
1015 		/*
1016 		 * The driver stores the segment count returned from pci_map_sg
1017 		 * because this a count of dma-mappings used to map the use_sg
1018 		 * pages.  They are not guaranteed to be the same for those
1019 		 * architectures that implement an IOMMU.
1020 		 */
1021 
1022 		nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
1023 				  scsi_sg_count(scsi_cmnd), datadir);
1024 		if (unlikely(!nseg))
1025 			return 1;
1026 
1027 		lpfc_cmd->seg_cnt = nseg;
1028 		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1029 			printk(KERN_ERR "%s: Too many sg segments from "
1030 			       "dma_map_sg.  Config %d, seg_cnt %d\n",
1031 			       __func__, phba->cfg_sg_seg_cnt,
1032 			       lpfc_cmd->seg_cnt);
1033 			scsi_dma_unmap(scsi_cmnd);
1034 			return 1;
1035 		}
1036 
1037 		/*
1038 		 * The driver established a maximum scatter-gather segment count
1039 		 * during probe that limits the number of sg elements in any
1040 		 * single scsi command.  Just run through the seg_cnt and format
1041 		 * the bde's.
1042 		 * When using SLI-3 the driver will try to fit all the BDEs into
1043 		 * the IOCB. If it can't then the BDEs get added to a BPL as it
1044 		 * does for SLI-2 mode.
1045 		 */
1046 		scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
1047 			physaddr = sg_dma_address(sgel);
1048 			if (phba->sli_rev == 3 &&
1049 			    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1050 			    nseg <= LPFC_EXT_DATA_BDE_COUNT) {
1051 				data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1052 				data_bde->tus.f.bdeSize = sg_dma_len(sgel);
1053 				data_bde->addrLow = putPaddrLow(physaddr);
1054 				data_bde->addrHigh = putPaddrHigh(physaddr);
1055 				data_bde++;
1056 			} else {
1057 				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1058 				bpl->tus.f.bdeSize = sg_dma_len(sgel);
1059 				bpl->tus.w = le32_to_cpu(bpl->tus.w);
1060 				bpl->addrLow =
1061 					le32_to_cpu(putPaddrLow(physaddr));
1062 				bpl->addrHigh =
1063 					le32_to_cpu(putPaddrHigh(physaddr));
1064 				bpl++;
1065 			}
1066 		}
1067 	}
1068 
1069 	/*
1070 	 * Finish initializing those IOCB fields that are dependent on the
1071 	 * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
1072 	 * explicitly reinitialized and for SLI-3 the extended bde count is
1073 	 * explicitly reinitialized since all iocb memory resources are reused.
1074 	 */
1075 	if (phba->sli_rev == 3 &&
1076 	    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
1077 		if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
1078 			/*
1079 			 * The extended IOCB format can only fit 3 BDE or a BPL.
1080 			 * This I/O has more than 3 BDE so the 1st data bde will
1081 			 * be a BPL that is filled in here.
1082 			 */
1083 			physaddr = lpfc_cmd->dma_handle;
1084 			data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
1085 			data_bde->tus.f.bdeSize = (num_bde *
1086 						   sizeof(struct ulp_bde64));
1087 			physaddr += (sizeof(struct fcp_cmnd) +
1088 				     sizeof(struct fcp_rsp) +
1089 				     (2 * sizeof(struct ulp_bde64)));
1090 			data_bde->addrHigh = putPaddrHigh(physaddr);
1091 			data_bde->addrLow = putPaddrLow(physaddr);
1092 			/* ebde count includes the responce bde and data bpl */
1093 			iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
1094 		} else {
1095 			/* ebde count includes the responce bde and data bdes */
1096 			iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
1097 		}
1098 	} else {
1099 		iocb_cmd->un.fcpi64.bdl.bdeSize =
1100 			((num_bde + 2) * sizeof(struct ulp_bde64));
1101 	}
1102 	fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
1103 
1104 	/*
1105 	 * Due to difference in data length between DIF/non-DIF paths,
1106 	 * we need to set word 4 of IOCB here
1107 	 */
1108 	iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1109 	return 0;
1110 }
1111 
1112 /*
1113  * Given a scsi cmnd, determine the BlockGuard profile to be used
1114  * with the cmd
1115  */
1116 static int
1117 lpfc_sc_to_sli_prof(struct scsi_cmnd *sc)
1118 {
1119 	uint8_t guard_type = scsi_host_get_guard(sc->device->host);
1120 	uint8_t ret_prof = LPFC_PROF_INVALID;
1121 
1122 	if (guard_type == SHOST_DIX_GUARD_IP) {
1123 		switch (scsi_get_prot_op(sc)) {
1124 		case SCSI_PROT_READ_INSERT:
1125 		case SCSI_PROT_WRITE_STRIP:
1126 			ret_prof = LPFC_PROF_AST2;
1127 			break;
1128 
1129 		case SCSI_PROT_READ_STRIP:
1130 		case SCSI_PROT_WRITE_INSERT:
1131 			ret_prof = LPFC_PROF_A1;
1132 			break;
1133 
1134 		case SCSI_PROT_READ_CONVERT:
1135 		case SCSI_PROT_WRITE_CONVERT:
1136 			ret_prof = LPFC_PROF_AST1;
1137 			break;
1138 
1139 		case SCSI_PROT_READ_PASS:
1140 		case SCSI_PROT_WRITE_PASS:
1141 		case SCSI_PROT_NORMAL:
1142 		default:
1143 			printk(KERN_ERR "Bad op/guard:%d/%d combination\n",
1144 					scsi_get_prot_op(sc), guard_type);
1145 			break;
1146 
1147 		}
1148 	} else if (guard_type == SHOST_DIX_GUARD_CRC) {
1149 		switch (scsi_get_prot_op(sc)) {
1150 		case SCSI_PROT_READ_STRIP:
1151 		case SCSI_PROT_WRITE_INSERT:
1152 			ret_prof = LPFC_PROF_A1;
1153 			break;
1154 
1155 		case SCSI_PROT_READ_PASS:
1156 		case SCSI_PROT_WRITE_PASS:
1157 			ret_prof = LPFC_PROF_C1;
1158 			break;
1159 
1160 		case SCSI_PROT_READ_CONVERT:
1161 		case SCSI_PROT_WRITE_CONVERT:
1162 		case SCSI_PROT_READ_INSERT:
1163 		case SCSI_PROT_WRITE_STRIP:
1164 		case SCSI_PROT_NORMAL:
1165 		default:
1166 			printk(KERN_ERR "Bad op/guard:%d/%d combination\n",
1167 					scsi_get_prot_op(sc), guard_type);
1168 			break;
1169 		}
1170 	} else {
1171 		/* unsupported format */
1172 		BUG();
1173 	}
1174 
1175 	return ret_prof;
1176 }
1177 
1178 struct scsi_dif_tuple {
1179 	__be16 guard_tag;       /* Checksum */
1180 	__be16 app_tag;         /* Opaque storage */
1181 	__be32 ref_tag;         /* Target LBA or indirect LBA */
1182 };
1183 
1184 static inline unsigned
1185 lpfc_cmd_blksize(struct scsi_cmnd *sc)
1186 {
1187 	return sc->device->sector_size;
1188 }
1189 
1190 /**
1191  * lpfc_get_cmd_dif_parms - Extract DIF parameters from SCSI command
1192  * @sc:             in: SCSI command
1193  * @apptagmask:     out: app tag mask
1194  * @apptagval:      out: app tag value
1195  * @reftag:         out: ref tag (reference tag)
1196  *
1197  * Description:
1198  *   Extract DIF parameters from the command if possible.  Otherwise,
1199  *   use default parameters.
1200  *
1201  **/
1202 static inline void
1203 lpfc_get_cmd_dif_parms(struct scsi_cmnd *sc, uint16_t *apptagmask,
1204 		uint16_t *apptagval, uint32_t *reftag)
1205 {
1206 	struct  scsi_dif_tuple *spt;
1207 	unsigned char op = scsi_get_prot_op(sc);
1208 	unsigned int protcnt = scsi_prot_sg_count(sc);
1209 	static int cnt;
1210 
1211 	if (protcnt && (op == SCSI_PROT_WRITE_STRIP ||
1212 				op == SCSI_PROT_WRITE_PASS ||
1213 				op == SCSI_PROT_WRITE_CONVERT)) {
1214 
1215 		cnt++;
1216 		spt = page_address(sg_page(scsi_prot_sglist(sc))) +
1217 			scsi_prot_sglist(sc)[0].offset;
1218 		*apptagmask = 0;
1219 		*apptagval = 0;
1220 		*reftag = cpu_to_be32(spt->ref_tag);
1221 
1222 	} else {
1223 		/* SBC defines ref tag to be lower 32bits of LBA */
1224 		*reftag = (uint32_t) (0xffffffff & scsi_get_lba(sc));
1225 		*apptagmask = 0;
1226 		*apptagval = 0;
1227 	}
1228 }
1229 
1230 /*
1231  * This function sets up buffer list for protection groups of
1232  * type LPFC_PG_TYPE_NO_DIF
1233  *
1234  * This is usually used when the HBA is instructed to generate
1235  * DIFs and insert them into data stream (or strip DIF from
1236  * incoming data stream)
1237  *
1238  * The buffer list consists of just one protection group described
1239  * below:
1240  *                                +-------------------------+
1241  *   start of prot group  -->     |          PDE_1          |
1242  *                                +-------------------------+
1243  *                                |         Data BDE        |
1244  *                                +-------------------------+
1245  *                                |more Data BDE's ... (opt)|
1246  *                                +-------------------------+
1247  *
1248  * @sc: pointer to scsi command we're working on
1249  * @bpl: pointer to buffer list for protection groups
1250  * @datacnt: number of segments of data that have been dma mapped
1251  *
1252  * Note: Data s/g buffers have been dma mapped
1253  */
1254 static int
1255 lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1256 		struct ulp_bde64 *bpl, int datasegcnt)
1257 {
1258 	struct scatterlist *sgde = NULL; /* s/g data entry */
1259 	struct lpfc_pde *pde1 = NULL;
1260 	dma_addr_t physaddr;
1261 	int i = 0, num_bde = 0;
1262 	int datadir = sc->sc_data_direction;
1263 	int prof = LPFC_PROF_INVALID;
1264 	unsigned blksize;
1265 	uint32_t reftag;
1266 	uint16_t apptagmask, apptagval;
1267 
1268 	pde1 = (struct lpfc_pde *) bpl;
1269 	prof = lpfc_sc_to_sli_prof(sc);
1270 
1271 	if (prof == LPFC_PROF_INVALID)
1272 		goto out;
1273 
1274 	/* extract some info from the scsi command for PDE1*/
1275 	blksize = lpfc_cmd_blksize(sc);
1276 	lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
1277 
1278 	/* setup PDE1 with what we have */
1279 	lpfc_pde_set_bg_parms(pde1, LPFC_PDE1_DESCRIPTOR, prof, blksize,
1280 			BG_EC_STOP_ERR);
1281 	lpfc_pde_set_dif_parms(pde1, apptagmask, apptagval, reftag);
1282 
1283 	num_bde++;
1284 	bpl++;
1285 
1286 	/* assumption: caller has already run dma_map_sg on command data */
1287 	scsi_for_each_sg(sc, sgde, datasegcnt, i) {
1288 		physaddr = sg_dma_address(sgde);
1289 		bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
1290 		bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1291 		bpl->tus.f.bdeSize = sg_dma_len(sgde);
1292 		if (datadir == DMA_TO_DEVICE)
1293 			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1294 		else
1295 			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1296 		bpl->tus.w = le32_to_cpu(bpl->tus.w);
1297 		bpl++;
1298 		num_bde++;
1299 	}
1300 
1301 out:
1302 	return num_bde;
1303 }
1304 
1305 /*
1306  * This function sets up buffer list for protection groups of
1307  * type LPFC_PG_TYPE_DIF_BUF
1308  *
1309  * This is usually used when DIFs are in their own buffers,
1310  * separate from the data. The HBA can then by instructed
1311  * to place the DIFs in the outgoing stream.  For read operations,
1312  * The HBA could extract the DIFs and place it in DIF buffers.
1313  *
1314  * The buffer list for this type consists of one or more of the
1315  * protection groups described below:
1316  *                                    +-------------------------+
1317  *   start of first prot group  -->   |          PDE_1          |
1318  *                                    +-------------------------+
1319  *                                    |      PDE_3 (Prot BDE)   |
1320  *                                    +-------------------------+
1321  *                                    |        Data BDE         |
1322  *                                    +-------------------------+
1323  *                                    |more Data BDE's ... (opt)|
1324  *                                    +-------------------------+
1325  *   start of new  prot group  -->    |          PDE_1          |
1326  *                                    +-------------------------+
1327  *                                    |          ...            |
1328  *                                    +-------------------------+
1329  *
1330  * @sc: pointer to scsi command we're working on
1331  * @bpl: pointer to buffer list for protection groups
1332  * @datacnt: number of segments of data that have been dma mapped
1333  * @protcnt: number of segment of protection data that have been dma mapped
1334  *
1335  * Note: It is assumed that both data and protection s/g buffers have been
1336  *       mapped for DMA
1337  */
1338 static int
1339 lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1340 		struct ulp_bde64 *bpl, int datacnt, int protcnt)
1341 {
1342 	struct scatterlist *sgde = NULL; /* s/g data entry */
1343 	struct scatterlist *sgpe = NULL; /* s/g prot entry */
1344 	struct lpfc_pde *pde1 = NULL;
1345 	struct ulp_bde64 *prot_bde = NULL;
1346 	dma_addr_t dataphysaddr, protphysaddr;
1347 	unsigned short curr_data = 0, curr_prot = 0;
1348 	unsigned int split_offset, protgroup_len;
1349 	unsigned int protgrp_blks, protgrp_bytes;
1350 	unsigned int remainder, subtotal;
1351 	int prof = LPFC_PROF_INVALID;
1352 	int datadir = sc->sc_data_direction;
1353 	unsigned char pgdone = 0, alldone = 0;
1354 	unsigned blksize;
1355 	uint32_t reftag;
1356 	uint16_t apptagmask, apptagval;
1357 	int num_bde = 0;
1358 
1359 	sgpe = scsi_prot_sglist(sc);
1360 	sgde = scsi_sglist(sc);
1361 
1362 	if (!sgpe || !sgde) {
1363 		lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1364 				"9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
1365 				sgpe, sgde);
1366 		return 0;
1367 	}
1368 
1369 	prof = lpfc_sc_to_sli_prof(sc);
1370 	if (prof == LPFC_PROF_INVALID)
1371 		goto out;
1372 
1373 	/* extract some info from the scsi command for PDE1*/
1374 	blksize = lpfc_cmd_blksize(sc);
1375 	lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
1376 
1377 	split_offset = 0;
1378 	do {
1379 		/* setup the first PDE_1 */
1380 		pde1 = (struct lpfc_pde *) bpl;
1381 
1382 		lpfc_pde_set_bg_parms(pde1, LPFC_PDE1_DESCRIPTOR, prof, blksize,
1383 				BG_EC_STOP_ERR);
1384 		lpfc_pde_set_dif_parms(pde1, apptagmask, apptagval, reftag);
1385 
1386 		num_bde++;
1387 		bpl++;
1388 
1389 		/* setup the first BDE that points to protection buffer */
1390 		prot_bde = (struct ulp_bde64 *) bpl;
1391 		protphysaddr = sg_dma_address(sgpe);
1392 		prot_bde->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
1393 		prot_bde->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
1394 		protgroup_len = sg_dma_len(sgpe);
1395 
1396 
1397 		/* must be integer multiple of the DIF block length */
1398 		BUG_ON(protgroup_len % 8);
1399 
1400 		protgrp_blks = protgroup_len / 8;
1401 		protgrp_bytes = protgrp_blks * blksize;
1402 
1403 		prot_bde->tus.f.bdeSize = protgroup_len;
1404 		if (datadir == DMA_TO_DEVICE)
1405 			prot_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1406 		else
1407 			prot_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1408 		prot_bde->tus.w = le32_to_cpu(bpl->tus.w);
1409 
1410 		curr_prot++;
1411 		num_bde++;
1412 
1413 		/* setup BDE's for data blocks associated with DIF data */
1414 		pgdone = 0;
1415 		subtotal = 0; /* total bytes processed for current prot grp */
1416 		while (!pgdone) {
1417 			if (!sgde) {
1418 				printk(KERN_ERR "%s Invalid data segment\n",
1419 						__func__);
1420 				return 0;
1421 			}
1422 			bpl++;
1423 			dataphysaddr = sg_dma_address(sgde) + split_offset;
1424 			bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
1425 			bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
1426 
1427 			remainder = sg_dma_len(sgde) - split_offset;
1428 
1429 			if ((subtotal + remainder) <= protgrp_bytes) {
1430 				/* we can use this whole buffer */
1431 				bpl->tus.f.bdeSize = remainder;
1432 				split_offset = 0;
1433 
1434 				if ((subtotal + remainder) == protgrp_bytes)
1435 					pgdone = 1;
1436 			} else {
1437 				/* must split this buffer with next prot grp */
1438 				bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
1439 				split_offset += bpl->tus.f.bdeSize;
1440 			}
1441 
1442 			subtotal += bpl->tus.f.bdeSize;
1443 
1444 			if (datadir == DMA_TO_DEVICE)
1445 				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1446 			else
1447 				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1448 			bpl->tus.w = le32_to_cpu(bpl->tus.w);
1449 
1450 			num_bde++;
1451 			curr_data++;
1452 
1453 			if (split_offset)
1454 				break;
1455 
1456 			/* Move to the next s/g segment if possible */
1457 			sgde = sg_next(sgde);
1458 		}
1459 
1460 		/* are we done ? */
1461 		if (curr_prot == protcnt) {
1462 			alldone = 1;
1463 		} else if (curr_prot < protcnt) {
1464 			/* advance to next prot buffer */
1465 			sgpe = sg_next(sgpe);
1466 			bpl++;
1467 
1468 			/* update the reference tag */
1469 			reftag += protgrp_blks;
1470 		} else {
1471 			/* if we're here, we have a bug */
1472 			printk(KERN_ERR "BLKGRD: bug in %s\n", __func__);
1473 		}
1474 
1475 	} while (!alldone);
1476 
1477 out:
1478 
1479 
1480 	return num_bde;
1481 }
1482 /*
1483  * Given a SCSI command that supports DIF, determine composition of protection
1484  * groups involved in setting up buffer lists
1485  *
1486  * Returns:
1487  *			      for DIF (for both read and write)
1488  * */
1489 static int
1490 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
1491 {
1492 	int ret = LPFC_PG_TYPE_INVALID;
1493 	unsigned char op = scsi_get_prot_op(sc);
1494 
1495 	switch (op) {
1496 	case SCSI_PROT_READ_STRIP:
1497 	case SCSI_PROT_WRITE_INSERT:
1498 		ret = LPFC_PG_TYPE_NO_DIF;
1499 		break;
1500 	case SCSI_PROT_READ_INSERT:
1501 	case SCSI_PROT_WRITE_STRIP:
1502 	case SCSI_PROT_READ_PASS:
1503 	case SCSI_PROT_WRITE_PASS:
1504 	case SCSI_PROT_WRITE_CONVERT:
1505 	case SCSI_PROT_READ_CONVERT:
1506 		ret = LPFC_PG_TYPE_DIF_BUF;
1507 		break;
1508 	default:
1509 		lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1510 				"9021 Unsupported protection op:%d\n", op);
1511 		break;
1512 	}
1513 
1514 	return ret;
1515 }
1516 
1517 /*
1518  * This is the protection/DIF aware version of
1519  * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
1520  * two functions eventually, but for now, it's here
1521  */
1522 static int
1523 lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba,
1524 		struct lpfc_scsi_buf *lpfc_cmd)
1525 {
1526 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1527 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1528 	struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
1529 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1530 	uint32_t num_bde = 0;
1531 	int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
1532 	int prot_group_type = 0;
1533 	int diflen, fcpdl;
1534 	unsigned blksize;
1535 
1536 	/*
1537 	 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
1538 	 *  fcp_rsp regions to the first data bde entry
1539 	 */
1540 	bpl += 2;
1541 	if (scsi_sg_count(scsi_cmnd)) {
1542 		/*
1543 		 * The driver stores the segment count returned from pci_map_sg
1544 		 * because this a count of dma-mappings used to map the use_sg
1545 		 * pages.  They are not guaranteed to be the same for those
1546 		 * architectures that implement an IOMMU.
1547 		 */
1548 		datasegcnt = dma_map_sg(&phba->pcidev->dev,
1549 					scsi_sglist(scsi_cmnd),
1550 					scsi_sg_count(scsi_cmnd), datadir);
1551 		if (unlikely(!datasegcnt))
1552 			return 1;
1553 
1554 		lpfc_cmd->seg_cnt = datasegcnt;
1555 		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1556 			printk(KERN_ERR "%s: Too many sg segments from "
1557 					"dma_map_sg.  Config %d, seg_cnt %d\n",
1558 					__func__, phba->cfg_sg_seg_cnt,
1559 					lpfc_cmd->seg_cnt);
1560 			scsi_dma_unmap(scsi_cmnd);
1561 			return 1;
1562 		}
1563 
1564 		prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
1565 
1566 		switch (prot_group_type) {
1567 		case LPFC_PG_TYPE_NO_DIF:
1568 			num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
1569 					datasegcnt);
1570 			/* we shoud have 2 or more entries in buffer list */
1571 			if (num_bde < 2)
1572 				goto err;
1573 			break;
1574 		case LPFC_PG_TYPE_DIF_BUF:{
1575 			/*
1576 			 * This type indicates that protection buffers are
1577 			 * passed to the driver, so that needs to be prepared
1578 			 * for DMA
1579 			 */
1580 			protsegcnt = dma_map_sg(&phba->pcidev->dev,
1581 					scsi_prot_sglist(scsi_cmnd),
1582 					scsi_prot_sg_count(scsi_cmnd), datadir);
1583 			if (unlikely(!protsegcnt)) {
1584 				scsi_dma_unmap(scsi_cmnd);
1585 				return 1;
1586 			}
1587 
1588 			lpfc_cmd->prot_seg_cnt = protsegcnt;
1589 			if (lpfc_cmd->prot_seg_cnt
1590 			    > phba->cfg_prot_sg_seg_cnt) {
1591 				printk(KERN_ERR "%s: Too many prot sg segments "
1592 						"from dma_map_sg.  Config %d,"
1593 						"prot_seg_cnt %d\n", __func__,
1594 						phba->cfg_prot_sg_seg_cnt,
1595 						lpfc_cmd->prot_seg_cnt);
1596 				dma_unmap_sg(&phba->pcidev->dev,
1597 					     scsi_prot_sglist(scsi_cmnd),
1598 					     scsi_prot_sg_count(scsi_cmnd),
1599 					     datadir);
1600 				scsi_dma_unmap(scsi_cmnd);
1601 				return 1;
1602 			}
1603 
1604 			num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
1605 					datasegcnt, protsegcnt);
1606 			/* we shoud have 3 or more entries in buffer list */
1607 			if (num_bde < 3)
1608 				goto err;
1609 			break;
1610 		}
1611 		case LPFC_PG_TYPE_INVALID:
1612 		default:
1613 			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1614 					"9022 Unexpected protection group %i\n",
1615 					prot_group_type);
1616 			return 1;
1617 		}
1618 	}
1619 
1620 	/*
1621 	 * Finish initializing those IOCB fields that are dependent on the
1622 	 * scsi_cmnd request_buffer.  Note that the bdeSize is explicitly
1623 	 * reinitialized since all iocb memory resources are used many times
1624 	 * for transmit, receive, and continuation bpl's.
1625 	 */
1626 	iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
1627 	iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
1628 	iocb_cmd->ulpBdeCount = 1;
1629 	iocb_cmd->ulpLe = 1;
1630 
1631 	fcpdl = scsi_bufflen(scsi_cmnd);
1632 
1633 	if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1) {
1634 		/*
1635 		 * We are in DIF Type 1 mode
1636 		 * Every data block has a 8 byte DIF (trailer)
1637 		 * attached to it.  Must ajust FCP data length
1638 		 */
1639 		blksize = lpfc_cmd_blksize(scsi_cmnd);
1640 		diflen = (fcpdl / blksize) * 8;
1641 		fcpdl += diflen;
1642 	}
1643 	fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
1644 
1645 	/*
1646 	 * Due to difference in data length between DIF/non-DIF paths,
1647 	 * we need to set word 4 of IOCB here
1648 	 */
1649 	iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
1650 
1651 	return 0;
1652 err:
1653 	lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1654 			"9023 Could not setup all needed BDE's"
1655 			"prot_group_type=%d, num_bde=%d\n",
1656 			prot_group_type, num_bde);
1657 	return 1;
1658 }
1659 
1660 /*
1661  * This function checks for BlockGuard errors detected by
1662  * the HBA.  In case of errors, the ASC/ASCQ fields in the
1663  * sense buffer will be set accordingly, paired with
1664  * ILLEGAL_REQUEST to signal to the kernel that the HBA
1665  * detected corruption.
1666  *
1667  * Returns:
1668  *  0 - No error found
1669  *  1 - BlockGuard error found
1670  * -1 - Internal error (bad profile, ...etc)
1671  */
1672 static int
1673 lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1674 			struct lpfc_iocbq *pIocbOut)
1675 {
1676 	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
1677 	struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
1678 	int ret = 0;
1679 	uint32_t bghm = bgf->bghm;
1680 	uint32_t bgstat = bgf->bgstat;
1681 	uint64_t failing_sector = 0;
1682 
1683 	printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%x "
1684 			"bgstat=0x%x bghm=0x%x\n",
1685 			cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
1686 			blk_rq_sectors(cmd->request), bgstat, bghm);
1687 
1688 	spin_lock(&_dump_buf_lock);
1689 	if (!_dump_buf_done) {
1690 		printk(KERN_ERR "Saving Data for %u blocks to debugfs\n",
1691 				(cmd->cmnd[7] << 8 | cmd->cmnd[8]));
1692 		lpfc_debug_save_data(cmd);
1693 
1694 		/* If we have a prot sgl, save the DIF buffer */
1695 		if (lpfc_prot_group_type(phba, cmd) ==
1696 				LPFC_PG_TYPE_DIF_BUF) {
1697 			printk(KERN_ERR "Saving DIF for %u blocks to debugfs\n",
1698 					(cmd->cmnd[7] << 8 | cmd->cmnd[8]));
1699 			lpfc_debug_save_dif(cmd);
1700 		}
1701 
1702 		_dump_buf_done = 1;
1703 	}
1704 	spin_unlock(&_dump_buf_lock);
1705 
1706 	if (lpfc_bgs_get_invalid_prof(bgstat)) {
1707 		cmd->result = ScsiResult(DID_ERROR, 0);
1708 		printk(KERN_ERR "Invalid BlockGuard profile. bgstat:0x%x\n",
1709 				bgstat);
1710 		ret = (-1);
1711 		goto out;
1712 	}
1713 
1714 	if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
1715 		cmd->result = ScsiResult(DID_ERROR, 0);
1716 		printk(KERN_ERR "Invalid BlockGuard DIF Block. bgstat:0x%x\n",
1717 				bgstat);
1718 		ret = (-1);
1719 		goto out;
1720 	}
1721 
1722 	if (lpfc_bgs_get_guard_err(bgstat)) {
1723 		ret = 1;
1724 
1725 		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1726 				0x10, 0x1);
1727 		cmd->result = DRIVER_SENSE << 24
1728 			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1729 		phba->bg_guard_err_cnt++;
1730 		printk(KERN_ERR "BLKGRD: guard_tag error\n");
1731 	}
1732 
1733 	if (lpfc_bgs_get_reftag_err(bgstat)) {
1734 		ret = 1;
1735 
1736 		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1737 				0x10, 0x3);
1738 		cmd->result = DRIVER_SENSE << 24
1739 			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1740 
1741 		phba->bg_reftag_err_cnt++;
1742 		printk(KERN_ERR "BLKGRD: ref_tag error\n");
1743 	}
1744 
1745 	if (lpfc_bgs_get_apptag_err(bgstat)) {
1746 		ret = 1;
1747 
1748 		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1749 				0x10, 0x2);
1750 		cmd->result = DRIVER_SENSE << 24
1751 			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1752 
1753 		phba->bg_apptag_err_cnt++;
1754 		printk(KERN_ERR "BLKGRD: app_tag error\n");
1755 	}
1756 
1757 	if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
1758 		/*
1759 		 * setup sense data descriptor 0 per SPC-4 as an information
1760 		 * field, and put the failing LBA in it
1761 		 */
1762 		cmd->sense_buffer[8] = 0;     /* Information */
1763 		cmd->sense_buffer[9] = 0xa;   /* Add. length */
1764 		bghm /= cmd->device->sector_size;
1765 
1766 		failing_sector = scsi_get_lba(cmd);
1767 		failing_sector += bghm;
1768 
1769 		put_unaligned_be64(failing_sector, &cmd->sense_buffer[10]);
1770 	}
1771 
1772 	if (!ret) {
1773 		/* No error was reported - problem in FW? */
1774 		cmd->result = ScsiResult(DID_ERROR, 0);
1775 		printk(KERN_ERR "BLKGRD: no errors reported!\n");
1776 	}
1777 
1778 out:
1779 	return ret;
1780 }
1781 
1782 /**
1783  * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
1784  * @phba: The Hba for which this call is being executed.
1785  * @lpfc_cmd: The scsi buffer which is going to be mapped.
1786  *
1787  * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
1788  * field of @lpfc_cmd for device with SLI-4 interface spec.
1789  *
1790  * Return codes:
1791  * 	1 - Error
1792  * 	0 - Success
1793  **/
1794 static int
1795 lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1796 {
1797 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1798 	struct scatterlist *sgel = NULL;
1799 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1800 	struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
1801 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1802 	dma_addr_t physaddr;
1803 	uint32_t num_bde = 0;
1804 	uint32_t dma_len;
1805 	uint32_t dma_offset = 0;
1806 	int nseg;
1807 
1808 	/*
1809 	 * There are three possibilities here - use scatter-gather segment, use
1810 	 * the single mapping, or neither.  Start the lpfc command prep by
1811 	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
1812 	 * data bde entry.
1813 	 */
1814 	if (scsi_sg_count(scsi_cmnd)) {
1815 		/*
1816 		 * The driver stores the segment count returned from pci_map_sg
1817 		 * because this a count of dma-mappings used to map the use_sg
1818 		 * pages.  They are not guaranteed to be the same for those
1819 		 * architectures that implement an IOMMU.
1820 		 */
1821 
1822 		nseg = scsi_dma_map(scsi_cmnd);
1823 		if (unlikely(!nseg))
1824 			return 1;
1825 		sgl += 1;
1826 		/* clear the last flag in the fcp_rsp map entry */
1827 		sgl->word2 = le32_to_cpu(sgl->word2);
1828 		bf_set(lpfc_sli4_sge_last, sgl, 0);
1829 		sgl->word2 = cpu_to_le32(sgl->word2);
1830 		sgl += 1;
1831 
1832 		lpfc_cmd->seg_cnt = nseg;
1833 		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1834 			printk(KERN_ERR "%s: Too many sg segments from "
1835 			       "dma_map_sg.  Config %d, seg_cnt %d\n",
1836 			       __func__, phba->cfg_sg_seg_cnt,
1837 			       lpfc_cmd->seg_cnt);
1838 			scsi_dma_unmap(scsi_cmnd);
1839 			return 1;
1840 		}
1841 
1842 		/*
1843 		 * The driver established a maximum scatter-gather segment count
1844 		 * during probe that limits the number of sg elements in any
1845 		 * single scsi command.  Just run through the seg_cnt and format
1846 		 * the sge's.
1847 		 * When using SLI-3 the driver will try to fit all the BDEs into
1848 		 * the IOCB. If it can't then the BDEs get added to a BPL as it
1849 		 * does for SLI-2 mode.
1850 		 */
1851 		scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
1852 			physaddr = sg_dma_address(sgel);
1853 			dma_len = sg_dma_len(sgel);
1854 			bf_set(lpfc_sli4_sge_len, sgl, sg_dma_len(sgel));
1855 			sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
1856 			sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
1857 			if ((num_bde + 1) == nseg)
1858 				bf_set(lpfc_sli4_sge_last, sgl, 1);
1859 			else
1860 				bf_set(lpfc_sli4_sge_last, sgl, 0);
1861 			bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
1862 			sgl->word2 = cpu_to_le32(sgl->word2);
1863 			sgl->word3 = cpu_to_le32(sgl->word3);
1864 			dma_offset += dma_len;
1865 			sgl++;
1866 		}
1867 	} else {
1868 		sgl += 1;
1869 		/* clear the last flag in the fcp_rsp map entry */
1870 		sgl->word2 = le32_to_cpu(sgl->word2);
1871 		bf_set(lpfc_sli4_sge_last, sgl, 1);
1872 		sgl->word2 = cpu_to_le32(sgl->word2);
1873 	}
1874 
1875 	/*
1876 	 * Finish initializing those IOCB fields that are dependent on the
1877 	 * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
1878 	 * explicitly reinitialized.
1879 	 * all iocb memory resources are reused.
1880 	 */
1881 	fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
1882 
1883 	/*
1884 	 * Due to difference in data length between DIF/non-DIF paths,
1885 	 * we need to set word 4 of IOCB here
1886 	 */
1887 	iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1888 	return 0;
1889 }
1890 
1891 /**
1892  * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
1893  * @phba: The Hba for which this call is being executed.
1894  * @lpfc_cmd: The scsi buffer which is going to be mapped.
1895  *
1896  * This routine wraps the actual DMA mapping function pointer from the
1897  * lpfc_hba struct.
1898  *
1899  * Return codes:
1900  * 	1 - Error
1901  * 	0 - Success
1902  **/
1903 static inline int
1904 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1905 {
1906 	return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
1907 }
1908 
1909 /**
1910  * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
1911  * @phba: Pointer to hba context object.
1912  * @vport: Pointer to vport object.
1913  * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
1914  * @rsp_iocb: Pointer to response iocb object which reported error.
1915  *
1916  * This function posts an event when there is a SCSI command reporting
1917  * error from the scsi device.
1918  **/
1919 static void
1920 lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
1921 		struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
1922 	struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
1923 	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
1924 	uint32_t resp_info = fcprsp->rspStatus2;
1925 	uint32_t scsi_status = fcprsp->rspStatus3;
1926 	uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
1927 	struct lpfc_fast_path_event *fast_path_evt = NULL;
1928 	struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
1929 	unsigned long flags;
1930 
1931 	/* If there is queuefull or busy condition send a scsi event */
1932 	if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
1933 		(cmnd->result == SAM_STAT_BUSY)) {
1934 		fast_path_evt = lpfc_alloc_fast_evt(phba);
1935 		if (!fast_path_evt)
1936 			return;
1937 		fast_path_evt->un.scsi_evt.event_type =
1938 			FC_REG_SCSI_EVENT;
1939 		fast_path_evt->un.scsi_evt.subcategory =
1940 		(cmnd->result == SAM_STAT_TASK_SET_FULL) ?
1941 		LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
1942 		fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
1943 		memcpy(&fast_path_evt->un.scsi_evt.wwpn,
1944 			&pnode->nlp_portname, sizeof(struct lpfc_name));
1945 		memcpy(&fast_path_evt->un.scsi_evt.wwnn,
1946 			&pnode->nlp_nodename, sizeof(struct lpfc_name));
1947 	} else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
1948 		((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
1949 		fast_path_evt = lpfc_alloc_fast_evt(phba);
1950 		if (!fast_path_evt)
1951 			return;
1952 		fast_path_evt->un.check_cond_evt.scsi_event.event_type =
1953 			FC_REG_SCSI_EVENT;
1954 		fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
1955 			LPFC_EVENT_CHECK_COND;
1956 		fast_path_evt->un.check_cond_evt.scsi_event.lun =
1957 			cmnd->device->lun;
1958 		memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
1959 			&pnode->nlp_portname, sizeof(struct lpfc_name));
1960 		memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
1961 			&pnode->nlp_nodename, sizeof(struct lpfc_name));
1962 		fast_path_evt->un.check_cond_evt.sense_key =
1963 			cmnd->sense_buffer[2] & 0xf;
1964 		fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
1965 		fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
1966 	} else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
1967 		     fcpi_parm &&
1968 		     ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
1969 			((scsi_status == SAM_STAT_GOOD) &&
1970 			!(resp_info & (RESID_UNDER | RESID_OVER))))) {
1971 		/*
1972 		 * If status is good or resid does not match with fcp_param and
1973 		 * there is valid fcpi_parm, then there is a read_check error
1974 		 */
1975 		fast_path_evt = lpfc_alloc_fast_evt(phba);
1976 		if (!fast_path_evt)
1977 			return;
1978 		fast_path_evt->un.read_check_error.header.event_type =
1979 			FC_REG_FABRIC_EVENT;
1980 		fast_path_evt->un.read_check_error.header.subcategory =
1981 			LPFC_EVENT_FCPRDCHKERR;
1982 		memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
1983 			&pnode->nlp_portname, sizeof(struct lpfc_name));
1984 		memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
1985 			&pnode->nlp_nodename, sizeof(struct lpfc_name));
1986 		fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
1987 		fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
1988 		fast_path_evt->un.read_check_error.fcpiparam =
1989 			fcpi_parm;
1990 	} else
1991 		return;
1992 
1993 	fast_path_evt->vport = vport;
1994 	spin_lock_irqsave(&phba->hbalock, flags);
1995 	list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
1996 	spin_unlock_irqrestore(&phba->hbalock, flags);
1997 	lpfc_worker_wake_up(phba);
1998 	return;
1999 }
2000 
2001 /**
2002  * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
2003  * @phba: The HBA for which this call is being executed.
2004  * @psb: The scsi buffer which is going to be un-mapped.
2005  *
2006  * This routine does DMA un-mapping of scatter gather list of scsi command
2007  * field of @lpfc_cmd for device with SLI-3 interface spec.
2008  **/
2009 static void
2010 lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
2011 {
2012 	/*
2013 	 * There are only two special cases to consider.  (1) the scsi command
2014 	 * requested scatter-gather usage or (2) the scsi command allocated
2015 	 * a request buffer, but did not request use_sg.  There is a third
2016 	 * case, but it does not require resource deallocation.
2017 	 */
2018 	if (psb->seg_cnt > 0)
2019 		scsi_dma_unmap(psb->pCmd);
2020 	if (psb->prot_seg_cnt > 0)
2021 		dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
2022 				scsi_prot_sg_count(psb->pCmd),
2023 				psb->pCmd->sc_data_direction);
2024 }
2025 
2026 /**
2027  * lpfc_handler_fcp_err - FCP response handler
2028  * @vport: The virtual port for which this call is being executed.
2029  * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2030  * @rsp_iocb: The response IOCB which contains FCP error.
2031  *
2032  * This routine is called to process response IOCB with status field
2033  * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
2034  * based upon SCSI and FCP error.
2035  **/
2036 static void
2037 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2038 		    struct lpfc_iocbq *rsp_iocb)
2039 {
2040 	struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
2041 	struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
2042 	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
2043 	uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
2044 	uint32_t resp_info = fcprsp->rspStatus2;
2045 	uint32_t scsi_status = fcprsp->rspStatus3;
2046 	uint32_t *lp;
2047 	uint32_t host_status = DID_OK;
2048 	uint32_t rsplen = 0;
2049 	uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
2050 
2051 
2052 	/*
2053 	 *  If this is a task management command, there is no
2054 	 *  scsi packet associated with this lpfc_cmd.  The driver
2055 	 *  consumes it.
2056 	 */
2057 	if (fcpcmd->fcpCntl2) {
2058 		scsi_status = 0;
2059 		goto out;
2060 	}
2061 
2062 	if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
2063 		uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
2064 		if (snslen > SCSI_SENSE_BUFFERSIZE)
2065 			snslen = SCSI_SENSE_BUFFERSIZE;
2066 
2067 		if (resp_info & RSP_LEN_VALID)
2068 		  rsplen = be32_to_cpu(fcprsp->rspRspLen);
2069 		memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
2070 	}
2071 	lp = (uint32_t *)cmnd->sense_buffer;
2072 
2073 	if (!scsi_status && (resp_info & RESID_UNDER))
2074 		logit = LOG_FCP;
2075 
2076 	lpfc_printf_vlog(vport, KERN_WARNING, logit,
2077 			 "9024 FCP command x%x failed: x%x SNS x%x x%x "
2078 			 "Data: x%x x%x x%x x%x x%x\n",
2079 			 cmnd->cmnd[0], scsi_status,
2080 			 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
2081 			 be32_to_cpu(fcprsp->rspResId),
2082 			 be32_to_cpu(fcprsp->rspSnsLen),
2083 			 be32_to_cpu(fcprsp->rspRspLen),
2084 			 fcprsp->rspInfo3);
2085 
2086 	if (resp_info & RSP_LEN_VALID) {
2087 		rsplen = be32_to_cpu(fcprsp->rspRspLen);
2088 		if ((rsplen != 0 && rsplen != 4 && rsplen != 8) ||
2089 		    (fcprsp->rspInfo3 != RSP_NO_FAILURE)) {
2090 			host_status = DID_ERROR;
2091 			goto out;
2092 		}
2093 	}
2094 
2095 	scsi_set_resid(cmnd, 0);
2096 	if (resp_info & RESID_UNDER) {
2097 		scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
2098 
2099 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2100 				 "9025 FCP Read Underrun, expected %d, "
2101 				 "residual %d Data: x%x x%x x%x\n",
2102 				 be32_to_cpu(fcpcmd->fcpDl),
2103 				 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
2104 				 cmnd->underflow);
2105 
2106 		/*
2107 		 * If there is an under run check if under run reported by
2108 		 * storage array is same as the under run reported by HBA.
2109 		 * If this is not same, there is a dropped frame.
2110 		 */
2111 		if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
2112 			fcpi_parm &&
2113 			(scsi_get_resid(cmnd) != fcpi_parm)) {
2114 			lpfc_printf_vlog(vport, KERN_WARNING,
2115 					 LOG_FCP | LOG_FCP_ERROR,
2116 					 "9026 FCP Read Check Error "
2117 					 "and Underrun Data: x%x x%x x%x x%x\n",
2118 					 be32_to_cpu(fcpcmd->fcpDl),
2119 					 scsi_get_resid(cmnd), fcpi_parm,
2120 					 cmnd->cmnd[0]);
2121 			scsi_set_resid(cmnd, scsi_bufflen(cmnd));
2122 			host_status = DID_ERROR;
2123 		}
2124 		/*
2125 		 * The cmnd->underflow is the minimum number of bytes that must
2126 		 * be transfered for this command.  Provided a sense condition
2127 		 * is not present, make sure the actual amount transferred is at
2128 		 * least the underflow value or fail.
2129 		 */
2130 		if (!(resp_info & SNS_LEN_VALID) &&
2131 		    (scsi_status == SAM_STAT_GOOD) &&
2132 		    (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
2133 		     < cmnd->underflow)) {
2134 			lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2135 					 "9027 FCP command x%x residual "
2136 					 "underrun converted to error "
2137 					 "Data: x%x x%x x%x\n",
2138 					 cmnd->cmnd[0], scsi_bufflen(cmnd),
2139 					 scsi_get_resid(cmnd), cmnd->underflow);
2140 			host_status = DID_ERROR;
2141 		}
2142 	} else if (resp_info & RESID_OVER) {
2143 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2144 				 "9028 FCP command x%x residual overrun error. "
2145 				 "Data: x%x x%x\n", cmnd->cmnd[0],
2146 				 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
2147 		host_status = DID_ERROR;
2148 
2149 	/*
2150 	 * Check SLI validation that all the transfer was actually done
2151 	 * (fcpi_parm should be zero). Apply check only to reads.
2152 	 */
2153 	} else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
2154 			(cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
2155 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
2156 				 "9029 FCP Read Check Error Data: "
2157 				 "x%x x%x x%x x%x\n",
2158 				 be32_to_cpu(fcpcmd->fcpDl),
2159 				 be32_to_cpu(fcprsp->rspResId),
2160 				 fcpi_parm, cmnd->cmnd[0]);
2161 		host_status = DID_ERROR;
2162 		scsi_set_resid(cmnd, scsi_bufflen(cmnd));
2163 	}
2164 
2165  out:
2166 	cmnd->result = ScsiResult(host_status, scsi_status);
2167 	lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
2168 }
2169 
2170 /**
2171  * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
2172  * @phba: The Hba for which this call is being executed.
2173  * @pIocbIn: The command IOCBQ for the scsi cmnd.
2174  * @pIocbOut: The response IOCBQ for the scsi cmnd.
2175  *
2176  * This routine assigns scsi command result by looking into response IOCB
2177  * status field appropriately. This routine handles QUEUE FULL condition as
2178  * well by ramping down device queue depth.
2179  **/
2180 static void
2181 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
2182 			struct lpfc_iocbq *pIocbOut)
2183 {
2184 	struct lpfc_scsi_buf *lpfc_cmd =
2185 		(struct lpfc_scsi_buf *) pIocbIn->context1;
2186 	struct lpfc_vport      *vport = pIocbIn->vport;
2187 	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
2188 	struct lpfc_nodelist *pnode = rdata->pnode;
2189 	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2190 	int result;
2191 	struct scsi_device *tmp_sdev;
2192 	int depth = 0;
2193 	unsigned long flags;
2194 	struct lpfc_fast_path_event *fast_path_evt;
2195 	struct Scsi_Host *shost = cmd->device->host;
2196 	uint32_t queue_depth, scsi_id;
2197 
2198 	lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
2199 	lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
2200 	if (pnode && NLP_CHK_NODE_ACT(pnode))
2201 		atomic_dec(&pnode->cmd_pending);
2202 
2203 	if (lpfc_cmd->status) {
2204 		if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
2205 		    (lpfc_cmd->result & IOERR_DRVR_MASK))
2206 			lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
2207 		else if (lpfc_cmd->status >= IOSTAT_CNT)
2208 			lpfc_cmd->status = IOSTAT_DEFAULT;
2209 
2210 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2211 				 "9030 FCP cmd x%x failed <%d/%d> "
2212 				 "status: x%x result: x%x Data: x%x x%x\n",
2213 				 cmd->cmnd[0],
2214 				 cmd->device ? cmd->device->id : 0xffff,
2215 				 cmd->device ? cmd->device->lun : 0xffff,
2216 				 lpfc_cmd->status, lpfc_cmd->result,
2217 				 pIocbOut->iocb.ulpContext,
2218 				 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
2219 
2220 		switch (lpfc_cmd->status) {
2221 		case IOSTAT_FCP_RSP_ERROR:
2222 			/* Call FCP RSP handler to determine result */
2223 			lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
2224 			break;
2225 		case IOSTAT_NPORT_BSY:
2226 		case IOSTAT_FABRIC_BSY:
2227 			cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
2228 			fast_path_evt = lpfc_alloc_fast_evt(phba);
2229 			if (!fast_path_evt)
2230 				break;
2231 			fast_path_evt->un.fabric_evt.event_type =
2232 				FC_REG_FABRIC_EVENT;
2233 			fast_path_evt->un.fabric_evt.subcategory =
2234 				(lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
2235 				LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
2236 			if (pnode && NLP_CHK_NODE_ACT(pnode)) {
2237 				memcpy(&fast_path_evt->un.fabric_evt.wwpn,
2238 					&pnode->nlp_portname,
2239 					sizeof(struct lpfc_name));
2240 				memcpy(&fast_path_evt->un.fabric_evt.wwnn,
2241 					&pnode->nlp_nodename,
2242 					sizeof(struct lpfc_name));
2243 			}
2244 			fast_path_evt->vport = vport;
2245 			fast_path_evt->work_evt.evt =
2246 				LPFC_EVT_FASTPATH_MGMT_EVT;
2247 			spin_lock_irqsave(&phba->hbalock, flags);
2248 			list_add_tail(&fast_path_evt->work_evt.evt_listp,
2249 				&phba->work_list);
2250 			spin_unlock_irqrestore(&phba->hbalock, flags);
2251 			lpfc_worker_wake_up(phba);
2252 			break;
2253 		case IOSTAT_LOCAL_REJECT:
2254 			if (lpfc_cmd->result == IOERR_INVALID_RPI ||
2255 			    lpfc_cmd->result == IOERR_NO_RESOURCES ||
2256 			    lpfc_cmd->result == IOERR_ABORT_REQUESTED) {
2257 				cmd->result = ScsiResult(DID_REQUEUE, 0);
2258 				break;
2259 			}
2260 
2261 			if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
2262 			     lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
2263 			     pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
2264 				if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
2265 					/*
2266 					 * This is a response for a BG enabled
2267 					 * cmd. Parse BG error
2268 					 */
2269 					lpfc_parse_bg_err(phba, lpfc_cmd,
2270 							pIocbOut);
2271 					break;
2272 				} else {
2273 					lpfc_printf_vlog(vport, KERN_WARNING,
2274 							LOG_BG,
2275 							"9031 non-zero BGSTAT "
2276 							"on unprotected cmd");
2277 				}
2278 			}
2279 
2280 		/* else: fall through */
2281 		default:
2282 			cmd->result = ScsiResult(DID_ERROR, 0);
2283 			break;
2284 		}
2285 
2286 		if (!pnode || !NLP_CHK_NODE_ACT(pnode)
2287 		    || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
2288 			cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
2289 						 SAM_STAT_BUSY);
2290 	} else {
2291 		cmd->result = ScsiResult(DID_OK, 0);
2292 	}
2293 
2294 	if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
2295 		uint32_t *lp = (uint32_t *)cmd->sense_buffer;
2296 
2297 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2298 				 "0710 Iodone <%d/%d> cmd %p, error "
2299 				 "x%x SNS x%x x%x Data: x%x x%x\n",
2300 				 cmd->device->id, cmd->device->lun, cmd,
2301 				 cmd->result, *lp, *(lp + 3), cmd->retries,
2302 				 scsi_get_resid(cmd));
2303 	}
2304 
2305 	lpfc_update_stats(phba, lpfc_cmd);
2306 	result = cmd->result;
2307 	if (vport->cfg_max_scsicmpl_time &&
2308 	   time_after(jiffies, lpfc_cmd->start_time +
2309 		msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
2310 		spin_lock_irqsave(shost->host_lock, flags);
2311 		if (pnode && NLP_CHK_NODE_ACT(pnode)) {
2312 			if (pnode->cmd_qdepth >
2313 				atomic_read(&pnode->cmd_pending) &&
2314 				(atomic_read(&pnode->cmd_pending) >
2315 				LPFC_MIN_TGT_QDEPTH) &&
2316 				((cmd->cmnd[0] == READ_10) ||
2317 				(cmd->cmnd[0] == WRITE_10)))
2318 				pnode->cmd_qdepth =
2319 					atomic_read(&pnode->cmd_pending);
2320 
2321 			pnode->last_change_time = jiffies;
2322 		}
2323 		spin_unlock_irqrestore(shost->host_lock, flags);
2324 	} else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
2325 		if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) &&
2326 		   time_after(jiffies, pnode->last_change_time +
2327 			      msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
2328 			spin_lock_irqsave(shost->host_lock, flags);
2329 			pnode->cmd_qdepth += pnode->cmd_qdepth *
2330 				LPFC_TGTQ_RAMPUP_PCENT / 100;
2331 			if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH)
2332 				pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
2333 			pnode->last_change_time = jiffies;
2334 			spin_unlock_irqrestore(shost->host_lock, flags);
2335 		}
2336 	}
2337 
2338 	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
2339 
2340 	/* The sdev is not guaranteed to be valid post scsi_done upcall. */
2341 	queue_depth = cmd->device->queue_depth;
2342 	scsi_id = cmd->device->id;
2343 	cmd->scsi_done(cmd);
2344 
2345 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
2346 		/*
2347 		 * If there is a thread waiting for command completion
2348 		 * wake up the thread.
2349 		 */
2350 		spin_lock_irqsave(shost->host_lock, flags);
2351 		lpfc_cmd->pCmd = NULL;
2352 		if (lpfc_cmd->waitq)
2353 			wake_up(lpfc_cmd->waitq);
2354 		spin_unlock_irqrestore(shost->host_lock, flags);
2355 		lpfc_release_scsi_buf(phba, lpfc_cmd);
2356 		return;
2357 	}
2358 
2359 
2360 	if (!result)
2361 		lpfc_rampup_queue_depth(vport, queue_depth);
2362 
2363 	if (!result && pnode && NLP_CHK_NODE_ACT(pnode) &&
2364 	   ((jiffies - pnode->last_ramp_up_time) >
2365 		LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
2366 	   ((jiffies - pnode->last_q_full_time) >
2367 		LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
2368 	   (vport->cfg_lun_queue_depth > queue_depth)) {
2369 		shost_for_each_device(tmp_sdev, shost) {
2370 			if (vport->cfg_lun_queue_depth > tmp_sdev->queue_depth){
2371 				if (tmp_sdev->id != scsi_id)
2372 					continue;
2373 				if (tmp_sdev->ordered_tags)
2374 					scsi_adjust_queue_depth(tmp_sdev,
2375 						MSG_ORDERED_TAG,
2376 						tmp_sdev->queue_depth+1);
2377 				else
2378 					scsi_adjust_queue_depth(tmp_sdev,
2379 						MSG_SIMPLE_TAG,
2380 						tmp_sdev->queue_depth+1);
2381 
2382 				pnode->last_ramp_up_time = jiffies;
2383 			}
2384 		}
2385 		lpfc_send_sdev_queuedepth_change_event(phba, vport, pnode,
2386 			0xFFFFFFFF,
2387 			queue_depth , queue_depth + 1);
2388 	}
2389 
2390 	/*
2391 	 * Check for queue full.  If the lun is reporting queue full, then
2392 	 * back off the lun queue depth to prevent target overloads.
2393 	 */
2394 	if (result == SAM_STAT_TASK_SET_FULL && pnode &&
2395 	    NLP_CHK_NODE_ACT(pnode)) {
2396 		pnode->last_q_full_time = jiffies;
2397 
2398 		shost_for_each_device(tmp_sdev, shost) {
2399 			if (tmp_sdev->id != scsi_id)
2400 				continue;
2401 			depth = scsi_track_queue_full(tmp_sdev,
2402 					tmp_sdev->queue_depth - 1);
2403 		}
2404 		/*
2405 		 * The queue depth cannot be lowered any more.
2406 		 * Modify the returned error code to store
2407 		 * the final depth value set by
2408 		 * scsi_track_queue_full.
2409 		 */
2410 		if (depth == -1)
2411 			depth = shost->cmd_per_lun;
2412 
2413 		if (depth) {
2414 			lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2415 					 "0711 detected queue full - lun queue "
2416 					 "depth adjusted to %d.\n", depth);
2417 			lpfc_send_sdev_queuedepth_change_event(phba, vport,
2418 				pnode, 0xFFFFFFFF,
2419 				depth+1, depth);
2420 		}
2421 	}
2422 
2423 	/*
2424 	 * If there is a thread waiting for command completion
2425 	 * wake up the thread.
2426 	 */
2427 	spin_lock_irqsave(shost->host_lock, flags);
2428 	lpfc_cmd->pCmd = NULL;
2429 	if (lpfc_cmd->waitq)
2430 		wake_up(lpfc_cmd->waitq);
2431 	spin_unlock_irqrestore(shost->host_lock, flags);
2432 
2433 	lpfc_release_scsi_buf(phba, lpfc_cmd);
2434 }
2435 
2436 /**
2437  * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
2438  * @data: A pointer to the immediate command data portion of the IOCB.
2439  * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
2440  *
2441  * The routine copies the entire FCP command from @fcp_cmnd to @data while
2442  * byte swapping the data to big endian format for transmission on the wire.
2443  **/
2444 static void
2445 lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
2446 {
2447 	int i, j;
2448 	for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
2449 	     i += sizeof(uint32_t), j++) {
2450 		((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
2451 	}
2452 }
2453 
2454 /**
2455  * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
2456  * @vport: The virtual port for which this call is being executed.
2457  * @lpfc_cmd: The scsi command which needs to send.
2458  * @pnode: Pointer to lpfc_nodelist.
2459  *
2460  * This routine initializes fcp_cmnd and iocb data structure from scsi command
2461  * to transfer for device with SLI3 interface spec.
2462  **/
2463 static void
2464 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2465 		    struct lpfc_nodelist *pnode)
2466 {
2467 	struct lpfc_hba *phba = vport->phba;
2468 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2469 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2470 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2471 	struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
2472 	int datadir = scsi_cmnd->sc_data_direction;
2473 	char tag[2];
2474 
2475 	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
2476 		return;
2477 
2478 	lpfc_cmd->fcp_rsp->rspSnsLen = 0;
2479 	/* clear task management bits */
2480 	lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
2481 
2482 	int_to_scsilun(lpfc_cmd->pCmd->device->lun,
2483 			&lpfc_cmd->fcp_cmnd->fcp_lun);
2484 
2485 	memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16);
2486 
2487 	if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
2488 		switch (tag[0]) {
2489 		case HEAD_OF_QUEUE_TAG:
2490 			fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
2491 			break;
2492 		case ORDERED_QUEUE_TAG:
2493 			fcp_cmnd->fcpCntl1 = ORDERED_Q;
2494 			break;
2495 		default:
2496 			fcp_cmnd->fcpCntl1 = SIMPLE_Q;
2497 			break;
2498 		}
2499 	} else
2500 		fcp_cmnd->fcpCntl1 = 0;
2501 
2502 	/*
2503 	 * There are three possibilities here - use scatter-gather segment, use
2504 	 * the single mapping, or neither.  Start the lpfc command prep by
2505 	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
2506 	 * data bde entry.
2507 	 */
2508 	if (scsi_sg_count(scsi_cmnd)) {
2509 		if (datadir == DMA_TO_DEVICE) {
2510 			iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
2511 			if (phba->sli_rev < LPFC_SLI_REV4) {
2512 				iocb_cmd->un.fcpi.fcpi_parm = 0;
2513 				iocb_cmd->ulpPU = 0;
2514 			} else
2515 				iocb_cmd->ulpPU = PARM_READ_CHECK;
2516 			fcp_cmnd->fcpCntl3 = WRITE_DATA;
2517 			phba->fc4OutputRequests++;
2518 		} else {
2519 			iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
2520 			iocb_cmd->ulpPU = PARM_READ_CHECK;
2521 			fcp_cmnd->fcpCntl3 = READ_DATA;
2522 			phba->fc4InputRequests++;
2523 		}
2524 	} else {
2525 		iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
2526 		iocb_cmd->un.fcpi.fcpi_parm = 0;
2527 		iocb_cmd->ulpPU = 0;
2528 		fcp_cmnd->fcpCntl3 = 0;
2529 		phba->fc4ControlRequests++;
2530 	}
2531 	if (phba->sli_rev == 3 &&
2532 	    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
2533 		lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
2534 	/*
2535 	 * Finish initializing those IOCB fields that are independent
2536 	 * of the scsi_cmnd request_buffer
2537 	 */
2538 	piocbq->iocb.ulpContext = pnode->nlp_rpi;
2539 	if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
2540 		piocbq->iocb.ulpFCP2Rcvy = 1;
2541 	else
2542 		piocbq->iocb.ulpFCP2Rcvy = 0;
2543 
2544 	piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
2545 	piocbq->context1  = lpfc_cmd;
2546 	piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
2547 	piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
2548 	piocbq->vport = vport;
2549 }
2550 
2551 /**
2552  * lpfc_scsi_prep_task_mgmt_cmnd - Convert SLI3 scsi TM cmd to FCP info unit
2553  * @vport: The virtual port for which this call is being executed.
2554  * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2555  * @lun: Logical unit number.
2556  * @task_mgmt_cmd: SCSI task management command.
2557  *
2558  * This routine creates FCP information unit corresponding to @task_mgmt_cmd
2559  * for device with SLI-3 interface spec.
2560  *
2561  * Return codes:
2562  *   0 - Error
2563  *   1 - Success
2564  **/
2565 static int
2566 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
2567 			     struct lpfc_scsi_buf *lpfc_cmd,
2568 			     unsigned int lun,
2569 			     uint8_t task_mgmt_cmd)
2570 {
2571 	struct lpfc_iocbq *piocbq;
2572 	IOCB_t *piocb;
2573 	struct fcp_cmnd *fcp_cmnd;
2574 	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
2575 	struct lpfc_nodelist *ndlp = rdata->pnode;
2576 
2577 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2578 	    ndlp->nlp_state != NLP_STE_MAPPED_NODE)
2579 		return 0;
2580 
2581 	piocbq = &(lpfc_cmd->cur_iocbq);
2582 	piocbq->vport = vport;
2583 
2584 	piocb = &piocbq->iocb;
2585 
2586 	fcp_cmnd = lpfc_cmd->fcp_cmnd;
2587 	/* Clear out any old data in the FCP command area */
2588 	memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2589 	int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
2590 	fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
2591 	if (vport->phba->sli_rev == 3 &&
2592 	    !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
2593 		lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
2594 	piocb->ulpCommand = CMD_FCP_ICMND64_CR;
2595 	piocb->ulpContext = ndlp->nlp_rpi;
2596 	if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
2597 		piocb->ulpFCP2Rcvy = 1;
2598 	}
2599 	piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
2600 
2601 	/* ulpTimeout is only one byte */
2602 	if (lpfc_cmd->timeout > 0xff) {
2603 		/*
2604 		 * Do not timeout the command at the firmware level.
2605 		 * The driver will provide the timeout mechanism.
2606 		 */
2607 		piocb->ulpTimeout = 0;
2608 	} else
2609 		piocb->ulpTimeout = lpfc_cmd->timeout;
2610 
2611 	if (vport->phba->sli_rev == LPFC_SLI_REV4)
2612 		lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
2613 
2614 	return 1;
2615 }
2616 
2617 /**
2618  * lpfc_scsi_api_table_setup - Set up scsi api fucntion jump table
2619  * @phba: The hba struct for which this call is being executed.
2620  * @dev_grp: The HBA PCI-Device group number.
2621  *
2622  * This routine sets up the SCSI interface API function jump table in @phba
2623  * struct.
2624  * Returns: 0 - success, -ENODEV - failure.
2625  **/
2626 int
2627 lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
2628 {
2629 
2630 	phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
2631 	phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;
2632 	phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf;
2633 
2634 	switch (dev_grp) {
2635 	case LPFC_PCI_DEV_LP:
2636 		phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
2637 		phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
2638 		phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
2639 		break;
2640 	case LPFC_PCI_DEV_OC:
2641 		phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
2642 		phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
2643 		phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
2644 		break;
2645 	default:
2646 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2647 				"1418 Invalid HBA PCI-device group: 0x%x\n",
2648 				dev_grp);
2649 		return -ENODEV;
2650 		break;
2651 	}
2652 	phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf;
2653 	phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
2654 	return 0;
2655 }
2656 
2657 /**
2658  * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
2659  * @phba: The Hba for which this call is being executed.
2660  * @cmdiocbq: Pointer to lpfc_iocbq data structure.
2661  * @rspiocbq: Pointer to lpfc_iocbq data structure.
2662  *
2663  * This routine is IOCB completion routine for device reset and target reset
2664  * routine. This routine release scsi buffer associated with lpfc_cmd.
2665  **/
2666 static void
2667 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
2668 			struct lpfc_iocbq *cmdiocbq,
2669 			struct lpfc_iocbq *rspiocbq)
2670 {
2671 	struct lpfc_scsi_buf *lpfc_cmd =
2672 		(struct lpfc_scsi_buf *) cmdiocbq->context1;
2673 	if (lpfc_cmd)
2674 		lpfc_release_scsi_buf(phba, lpfc_cmd);
2675 	return;
2676 }
2677 
2678 /**
2679  * lpfc_info - Info entry point of scsi_host_template data structure
2680  * @host: The scsi host for which this call is being executed.
2681  *
2682  * This routine provides module information about hba.
2683  *
2684  * Reutrn code:
2685  *   Pointer to char - Success.
2686  **/
2687 const char *
2688 lpfc_info(struct Scsi_Host *host)
2689 {
2690 	struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
2691 	struct lpfc_hba   *phba = vport->phba;
2692 	int len;
2693 	static char  lpfcinfobuf[384];
2694 
2695 	memset(lpfcinfobuf,0,384);
2696 	if (phba && phba->pcidev){
2697 		strncpy(lpfcinfobuf, phba->ModelDesc, 256);
2698 		len = strlen(lpfcinfobuf);
2699 		snprintf(lpfcinfobuf + len,
2700 			384-len,
2701 			" on PCI bus %02x device %02x irq %d",
2702 			phba->pcidev->bus->number,
2703 			phba->pcidev->devfn,
2704 			phba->pcidev->irq);
2705 		len = strlen(lpfcinfobuf);
2706 		if (phba->Port[0]) {
2707 			snprintf(lpfcinfobuf + len,
2708 				 384-len,
2709 				 " port %s",
2710 				 phba->Port);
2711 		}
2712 	}
2713 	return lpfcinfobuf;
2714 }
2715 
2716 /**
2717  * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba
2718  * @phba: The Hba for which this call is being executed.
2719  *
2720  * This routine modifies fcp_poll_timer  field of @phba by cfg_poll_tmo.
2721  * The default value of cfg_poll_tmo is 10 milliseconds.
2722  **/
2723 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
2724 {
2725 	unsigned long  poll_tmo_expires =
2726 		(jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
2727 
2728 	if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt)
2729 		mod_timer(&phba->fcp_poll_timer,
2730 			  poll_tmo_expires);
2731 }
2732 
2733 /**
2734  * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
2735  * @phba: The Hba for which this call is being executed.
2736  *
2737  * This routine starts the fcp_poll_timer of @phba.
2738  **/
2739 void lpfc_poll_start_timer(struct lpfc_hba * phba)
2740 {
2741 	lpfc_poll_rearm_timer(phba);
2742 }
2743 
2744 /**
2745  * lpfc_poll_timeout - Restart polling timer
2746  * @ptr: Map to lpfc_hba data structure pointer.
2747  *
2748  * This routine restarts fcp_poll timer, when FCP ring  polling is enable
2749  * and FCP Ring interrupt is disable.
2750  **/
2751 
2752 void lpfc_poll_timeout(unsigned long ptr)
2753 {
2754 	struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
2755 
2756 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
2757 		lpfc_sli_poll_fcp_ring (phba);
2758 		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
2759 			lpfc_poll_rearm_timer(phba);
2760 	}
2761 }
2762 
2763 /**
2764  * lpfc_queuecommand - scsi_host_template queuecommand entry point
2765  * @cmnd: Pointer to scsi_cmnd data structure.
2766  * @done: Pointer to done routine.
2767  *
2768  * Driver registers this routine to scsi midlayer to submit a @cmd to process.
2769  * This routine prepares an IOCB from scsi command and provides to firmware.
2770  * The @done callback is invoked after driver finished processing the command.
2771  *
2772  * Return value :
2773  *   0 - Success
2774  *   SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
2775  **/
2776 static int
2777 lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2778 {
2779 	struct Scsi_Host  *shost = cmnd->device->host;
2780 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2781 	struct lpfc_hba   *phba = vport->phba;
2782 	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
2783 	struct lpfc_nodelist *ndlp = rdata->pnode;
2784 	struct lpfc_scsi_buf *lpfc_cmd;
2785 	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
2786 	int err;
2787 
2788 	err = fc_remote_port_chkready(rport);
2789 	if (err) {
2790 		cmnd->result = err;
2791 		goto out_fail_command;
2792 	}
2793 
2794 	if (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
2795 		scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
2796 
2797 		printk(KERN_ERR "BLKGRD ERROR: rcvd protected cmd:%02x op:%02x "
2798 				"str=%s without registering for BlockGuard - "
2799 				"Rejecting command\n",
2800 				cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2801 				dif_op_str[scsi_get_prot_op(cmnd)]);
2802 		goto out_fail_command;
2803 	}
2804 
2805 	/*
2806 	 * Catch race where our node has transitioned, but the
2807 	 * transport is still transitioning.
2808 	 */
2809 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
2810 		cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
2811 		goto out_fail_command;
2812 	}
2813 	if (vport->cfg_max_scsicmpl_time &&
2814 		(atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth))
2815 		goto out_host_busy;
2816 
2817 	lpfc_cmd = lpfc_get_scsi_buf(phba);
2818 	if (lpfc_cmd == NULL) {
2819 		lpfc_rampdown_queue_depth(phba);
2820 
2821 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2822 				 "0707 driver's buffer pool is empty, "
2823 				 "IO busied\n");
2824 		goto out_host_busy;
2825 	}
2826 
2827 	/*
2828 	 * Store the midlayer's command structure for the completion phase
2829 	 * and complete the command initialization.
2830 	 */
2831 	lpfc_cmd->pCmd  = cmnd;
2832 	lpfc_cmd->rdata = rdata;
2833 	lpfc_cmd->timeout = 0;
2834 	lpfc_cmd->start_time = jiffies;
2835 	cmnd->host_scribble = (unsigned char *)lpfc_cmd;
2836 	cmnd->scsi_done = done;
2837 
2838 	if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
2839 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2840 				"9033 BLKGRD: rcvd protected cmd:%02x op:%02x "
2841 				"str=%s\n",
2842 				cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2843 				dif_op_str[scsi_get_prot_op(cmnd)]);
2844 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2845 				"9034 BLKGRD: CDB: %02x %02x %02x %02x %02x "
2846 				"%02x %02x %02x %02x %02x\n",
2847 				cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
2848 				cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
2849 				cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
2850 				cmnd->cmnd[9]);
2851 		if (cmnd->cmnd[0] == READ_10)
2852 			lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2853 					"9035 BLKGRD: READ @ sector %llu, "
2854 					"count %u\n",
2855 					(unsigned long long)scsi_get_lba(cmnd),
2856 					blk_rq_sectors(cmnd->request));
2857 		else if (cmnd->cmnd[0] == WRITE_10)
2858 			lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2859 					"9036 BLKGRD: WRITE @ sector %llu, "
2860 					"count %u cmd=%p\n",
2861 					(unsigned long long)scsi_get_lba(cmnd),
2862 					blk_rq_sectors(cmnd->request),
2863 					cmnd);
2864 
2865 		err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
2866 	} else {
2867 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2868 				"9038 BLKGRD: rcvd unprotected cmd:%02x op:%02x"
2869 				" str=%s\n",
2870 				cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2871 				dif_op_str[scsi_get_prot_op(cmnd)]);
2872 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2873 				 "9039 BLKGRD: CDB: %02x %02x %02x %02x %02x "
2874 				 "%02x %02x %02x %02x %02x\n",
2875 				 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
2876 				 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
2877 				 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
2878 				 cmnd->cmnd[9]);
2879 		if (cmnd->cmnd[0] == READ_10)
2880 			lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2881 					 "9040 dbg: READ @ sector %llu, "
2882 					 "count %u\n",
2883 					 (unsigned long long)scsi_get_lba(cmnd),
2884 					 blk_rq_sectors(cmnd->request));
2885 		else if (cmnd->cmnd[0] == WRITE_10)
2886 			lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2887 					 "9041 dbg: WRITE @ sector %llu, "
2888 					 "count %u cmd=%p\n",
2889 					 (unsigned long long)scsi_get_lba(cmnd),
2890 					 blk_rq_sectors(cmnd->request), cmnd);
2891 		else
2892 			lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2893 					 "9042 dbg: parser not implemented\n");
2894 		err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
2895 	}
2896 
2897 	if (err)
2898 		goto out_host_busy_free_buf;
2899 
2900 	lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
2901 
2902 	atomic_inc(&ndlp->cmd_pending);
2903 	err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
2904 				  &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
2905 	if (err) {
2906 		atomic_dec(&ndlp->cmd_pending);
2907 		goto out_host_busy_free_buf;
2908 	}
2909 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
2910 		lpfc_sli_poll_fcp_ring(phba);
2911 		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
2912 			lpfc_poll_rearm_timer(phba);
2913 	}
2914 
2915 	return 0;
2916 
2917  out_host_busy_free_buf:
2918 	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
2919 	lpfc_release_scsi_buf(phba, lpfc_cmd);
2920  out_host_busy:
2921 	return SCSI_MLQUEUE_HOST_BUSY;
2922 
2923  out_fail_command:
2924 	done(cmnd);
2925 	return 0;
2926 }
2927 
2928 /**
2929  * lpfc_block_error_handler - Routine to block error  handler
2930  * @cmnd: Pointer to scsi_cmnd data structure.
2931  *
2932  *  This routine blocks execution till fc_rport state is not FC_PORSTAT_BLCOEKD.
2933  **/
2934 static void
2935 lpfc_block_error_handler(struct scsi_cmnd *cmnd)
2936 {
2937 	struct Scsi_Host *shost = cmnd->device->host;
2938 	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
2939 
2940 	spin_lock_irq(shost->host_lock);
2941 	while (rport->port_state == FC_PORTSTATE_BLOCKED) {
2942 		spin_unlock_irq(shost->host_lock);
2943 		msleep(1000);
2944 		spin_lock_irq(shost->host_lock);
2945 	}
2946 	spin_unlock_irq(shost->host_lock);
2947 	return;
2948 }
2949 
2950 /**
2951  * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
2952  * @cmnd: Pointer to scsi_cmnd data structure.
2953  *
2954  * This routine aborts @cmnd pending in base driver.
2955  *
2956  * Return code :
2957  *   0x2003 - Error
2958  *   0x2002 - Success
2959  **/
2960 static int
2961 lpfc_abort_handler(struct scsi_cmnd *cmnd)
2962 {
2963 	struct Scsi_Host  *shost = cmnd->device->host;
2964 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2965 	struct lpfc_hba   *phba = vport->phba;
2966 	struct lpfc_iocbq *iocb;
2967 	struct lpfc_iocbq *abtsiocb;
2968 	struct lpfc_scsi_buf *lpfc_cmd;
2969 	IOCB_t *cmd, *icmd;
2970 	int ret = SUCCESS;
2971 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
2972 
2973 	lpfc_block_error_handler(cmnd);
2974 	lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
2975 	BUG_ON(!lpfc_cmd);
2976 
2977 	/*
2978 	 * If pCmd field of the corresponding lpfc_scsi_buf structure
2979 	 * points to a different SCSI command, then the driver has
2980 	 * already completed this command, but the midlayer did not
2981 	 * see the completion before the eh fired.  Just return
2982 	 * SUCCESS.
2983 	 */
2984 	iocb = &lpfc_cmd->cur_iocbq;
2985 	if (lpfc_cmd->pCmd != cmnd)
2986 		goto out;
2987 
2988 	BUG_ON(iocb->context1 != lpfc_cmd);
2989 
2990 	abtsiocb = lpfc_sli_get_iocbq(phba);
2991 	if (abtsiocb == NULL) {
2992 		ret = FAILED;
2993 		goto out;
2994 	}
2995 
2996 	/*
2997 	 * The scsi command can not be in txq and it is in flight because the
2998 	 * pCmd is still pointig at the SCSI command we have to abort. There
2999 	 * is no need to search the txcmplq. Just send an abort to the FW.
3000 	 */
3001 
3002 	cmd = &iocb->iocb;
3003 	icmd = &abtsiocb->iocb;
3004 	icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
3005 	icmd->un.acxri.abortContextTag = cmd->ulpContext;
3006 	if (phba->sli_rev == LPFC_SLI_REV4)
3007 		icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
3008 	else
3009 		icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
3010 
3011 	icmd->ulpLe = 1;
3012 	icmd->ulpClass = cmd->ulpClass;
3013 	if (lpfc_is_link_up(phba))
3014 		icmd->ulpCommand = CMD_ABORT_XRI_CN;
3015 	else
3016 		icmd->ulpCommand = CMD_CLOSE_XRI_CN;
3017 
3018 	abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
3019 	abtsiocb->vport = vport;
3020 	if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) ==
3021 	    IOCB_ERROR) {
3022 		lpfc_sli_release_iocbq(phba, abtsiocb);
3023 		ret = FAILED;
3024 		goto out;
3025 	}
3026 
3027 	if (phba->cfg_poll & DISABLE_FCP_RING_INT)
3028 		lpfc_sli_poll_fcp_ring (phba);
3029 
3030 	lpfc_cmd->waitq = &waitq;
3031 	/* Wait for abort to complete */
3032 	wait_event_timeout(waitq,
3033 			  (lpfc_cmd->pCmd != cmnd),
3034 			   (2*vport->cfg_devloss_tmo*HZ));
3035 
3036 	spin_lock_irq(shost->host_lock);
3037 	lpfc_cmd->waitq = NULL;
3038 	spin_unlock_irq(shost->host_lock);
3039 
3040 	if (lpfc_cmd->pCmd == cmnd) {
3041 		ret = FAILED;
3042 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3043 				 "0748 abort handler timed out waiting "
3044 				 "for abort to complete: ret %#x, ID %d, "
3045 				 "LUN %d, snum %#lx\n",
3046 				 ret, cmnd->device->id, cmnd->device->lun,
3047 				 cmnd->serial_number);
3048 	}
3049 
3050  out:
3051 	lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3052 			 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
3053 			 "LUN %d snum %#lx\n", ret, cmnd->device->id,
3054 			 cmnd->device->lun, cmnd->serial_number);
3055 	return ret;
3056 }
3057 
3058 static char *
3059 lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
3060 {
3061 	switch (task_mgmt_cmd) {
3062 	case FCP_ABORT_TASK_SET:
3063 		return "ABORT_TASK_SET";
3064 	case FCP_CLEAR_TASK_SET:
3065 		return "FCP_CLEAR_TASK_SET";
3066 	case FCP_BUS_RESET:
3067 		return "FCP_BUS_RESET";
3068 	case FCP_LUN_RESET:
3069 		return "FCP_LUN_RESET";
3070 	case FCP_TARGET_RESET:
3071 		return "FCP_TARGET_RESET";
3072 	case FCP_CLEAR_ACA:
3073 		return "FCP_CLEAR_ACA";
3074 	case FCP_TERMINATE_TASK:
3075 		return "FCP_TERMINATE_TASK";
3076 	default:
3077 		return "unknown";
3078 	}
3079 }
3080 
3081 /**
3082  * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
3083  * @vport: The virtual port for which this call is being executed.
3084  * @rdata: Pointer to remote port local data
3085  * @tgt_id: Target ID of remote device.
3086  * @lun_id: Lun number for the TMF
3087  * @task_mgmt_cmd: type of TMF to send
3088  *
3089  * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
3090  * a remote port.
3091  *
3092  * Return Code:
3093  *   0x2003 - Error
3094  *   0x2002 - Success.
3095  **/
3096 static int
3097 lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
3098 		    unsigned  tgt_id, unsigned int lun_id,
3099 		    uint8_t task_mgmt_cmd)
3100 {
3101 	struct lpfc_hba   *phba = vport->phba;
3102 	struct lpfc_scsi_buf *lpfc_cmd;
3103 	struct lpfc_iocbq *iocbq;
3104 	struct lpfc_iocbq *iocbqrsp;
3105 	int ret;
3106 	int status;
3107 
3108 	if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
3109 		return FAILED;
3110 
3111 	lpfc_cmd = lpfc_get_scsi_buf(phba);
3112 	if (lpfc_cmd == NULL)
3113 		return FAILED;
3114 	lpfc_cmd->timeout = 60;
3115 	lpfc_cmd->rdata = rdata;
3116 
3117 	status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
3118 					   task_mgmt_cmd);
3119 	if (!status) {
3120 		lpfc_release_scsi_buf(phba, lpfc_cmd);
3121 		return FAILED;
3122 	}
3123 
3124 	iocbq = &lpfc_cmd->cur_iocbq;
3125 	iocbqrsp = lpfc_sli_get_iocbq(phba);
3126 	if (iocbqrsp == NULL) {
3127 		lpfc_release_scsi_buf(phba, lpfc_cmd);
3128 		return FAILED;
3129 	}
3130 
3131 	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3132 			 "0702 Issue %s to TGT %d LUN %d "
3133 			 "rpi x%x nlp_flag x%x\n",
3134 			 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
3135 			 rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
3136 
3137 	status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
3138 					  iocbq, iocbqrsp, lpfc_cmd->timeout);
3139 	if (status != IOCB_SUCCESS) {
3140 		if (status == IOCB_TIMEDOUT) {
3141 			iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
3142 			ret = TIMEOUT_ERROR;
3143 		} else
3144 			ret = FAILED;
3145 		lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
3146 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3147 			 "0727 TMF %s to TGT %d LUN %d failed (%d, %d)\n",
3148 			 lpfc_taskmgmt_name(task_mgmt_cmd),
3149 			 tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
3150 			 iocbqrsp->iocb.un.ulpWord[4]);
3151 	} else
3152 		ret = SUCCESS;
3153 
3154 	lpfc_sli_release_iocbq(phba, iocbqrsp);
3155 
3156 	if (ret != TIMEOUT_ERROR)
3157 		lpfc_release_scsi_buf(phba, lpfc_cmd);
3158 
3159 	return ret;
3160 }
3161 
3162 /**
3163  * lpfc_chk_tgt_mapped -
3164  * @vport: The virtual port to check on
3165  * @cmnd: Pointer to scsi_cmnd data structure.
3166  *
3167  * This routine delays until the scsi target (aka rport) for the
3168  * command exists (is present and logged in) or we declare it non-existent.
3169  *
3170  * Return code :
3171  *  0x2003 - Error
3172  *  0x2002 - Success
3173  **/
3174 static int
3175 lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
3176 {
3177 	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
3178 	struct lpfc_nodelist *pnode = rdata->pnode;
3179 	unsigned long later;
3180 
3181 	/*
3182 	 * If target is not in a MAPPED state, delay until
3183 	 * target is rediscovered or devloss timeout expires.
3184 	 */
3185 	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
3186 	while (time_after(later, jiffies)) {
3187 		if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3188 			return FAILED;
3189 		if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
3190 			return SUCCESS;
3191 		schedule_timeout_uninterruptible(msecs_to_jiffies(500));
3192 		rdata = cmnd->device->hostdata;
3193 		if (!rdata)
3194 			return FAILED;
3195 		pnode = rdata->pnode;
3196 	}
3197 	if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
3198 	    (pnode->nlp_state != NLP_STE_MAPPED_NODE))
3199 		return FAILED;
3200 	return SUCCESS;
3201 }
3202 
3203 /**
3204  * lpfc_reset_flush_io_context -
3205  * @vport: The virtual port (scsi_host) for the flush context
3206  * @tgt_id: If aborting by Target contect - specifies the target id
3207  * @lun_id: If aborting by Lun context - specifies the lun id
3208  * @context: specifies the context level to flush at.
3209  *
3210  * After a reset condition via TMF, we need to flush orphaned i/o
3211  * contexts from the adapter. This routine aborts any contexts
3212  * outstanding, then waits for their completions. The wait is
3213  * bounded by devloss_tmo though.
3214  *
3215  * Return code :
3216  *  0x2003 - Error
3217  *  0x2002 - Success
3218  **/
3219 static int
3220 lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
3221 			uint64_t lun_id, lpfc_ctx_cmd context)
3222 {
3223 	struct lpfc_hba   *phba = vport->phba;
3224 	unsigned long later;
3225 	int cnt;
3226 
3227 	cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
3228 	if (cnt)
3229 		lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
3230 				    tgt_id, lun_id, context);
3231 	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
3232 	while (time_after(later, jiffies) && cnt) {
3233 		schedule_timeout_uninterruptible(msecs_to_jiffies(20));
3234 		cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
3235 	}
3236 	if (cnt) {
3237 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3238 			"0724 I/O flush failure for context %s : cnt x%x\n",
3239 			((context == LPFC_CTX_LUN) ? "LUN" :
3240 			 ((context == LPFC_CTX_TGT) ? "TGT" :
3241 			  ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
3242 			cnt);
3243 		return FAILED;
3244 	}
3245 	return SUCCESS;
3246 }
3247 
3248 /**
3249  * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
3250  * @cmnd: Pointer to scsi_cmnd data structure.
3251  *
3252  * This routine does a device reset by sending a LUN_RESET task management
3253  * command.
3254  *
3255  * Return code :
3256  *  0x2003 - Error
3257  *  0x2002 - Success
3258  **/
3259 static int
3260 lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
3261 {
3262 	struct Scsi_Host  *shost = cmnd->device->host;
3263 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3264 	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
3265 	struct lpfc_nodelist *pnode = rdata->pnode;
3266 	unsigned tgt_id = cmnd->device->id;
3267 	unsigned int lun_id = cmnd->device->lun;
3268 	struct lpfc_scsi_event_header scsi_event;
3269 	int status;
3270 
3271 	lpfc_block_error_handler(cmnd);
3272 
3273 	status = lpfc_chk_tgt_mapped(vport, cmnd);
3274 	if (status == FAILED) {
3275 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3276 			"0721 Device Reset rport failure: rdata x%p\n", rdata);
3277 		return FAILED;
3278 	}
3279 
3280 	scsi_event.event_type = FC_REG_SCSI_EVENT;
3281 	scsi_event.subcategory = LPFC_EVENT_LUNRESET;
3282 	scsi_event.lun = lun_id;
3283 	memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
3284 	memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
3285 
3286 	fc_host_post_vendor_event(shost, fc_get_event_number(),
3287 		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
3288 
3289 	status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
3290 						FCP_LUN_RESET);
3291 
3292 	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3293 			 "0713 SCSI layer issued Device Reset (%d, %d) "
3294 			 "return x%x\n", tgt_id, lun_id, status);
3295 
3296 	/*
3297 	 * We have to clean up i/o as : they may be orphaned by the TMF;
3298 	 * or if the TMF failed, they may be in an indeterminate state.
3299 	 * So, continue on.
3300 	 * We will report success if all the i/o aborts successfully.
3301 	 */
3302 	status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
3303 						LPFC_CTX_LUN);
3304 	return status;
3305 }
3306 
3307 /**
3308  * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
3309  * @cmnd: Pointer to scsi_cmnd data structure.
3310  *
3311  * This routine does a target reset by sending a TARGET_RESET task management
3312  * command.
3313  *
3314  * Return code :
3315  *  0x2003 - Error
3316  *  0x2002 - Success
3317  **/
3318 static int
3319 lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
3320 {
3321 	struct Scsi_Host  *shost = cmnd->device->host;
3322 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3323 	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
3324 	struct lpfc_nodelist *pnode = rdata->pnode;
3325 	unsigned tgt_id = cmnd->device->id;
3326 	unsigned int lun_id = cmnd->device->lun;
3327 	struct lpfc_scsi_event_header scsi_event;
3328 	int status;
3329 
3330 	lpfc_block_error_handler(cmnd);
3331 
3332 	status = lpfc_chk_tgt_mapped(vport, cmnd);
3333 	if (status == FAILED) {
3334 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3335 			"0722 Target Reset rport failure: rdata x%p\n", rdata);
3336 		return FAILED;
3337 	}
3338 
3339 	scsi_event.event_type = FC_REG_SCSI_EVENT;
3340 	scsi_event.subcategory = LPFC_EVENT_TGTRESET;
3341 	scsi_event.lun = 0;
3342 	memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
3343 	memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
3344 
3345 	fc_host_post_vendor_event(shost, fc_get_event_number(),
3346 		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
3347 
3348 	status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
3349 					FCP_TARGET_RESET);
3350 
3351 	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3352 			 "0723 SCSI layer issued Target Reset (%d, %d) "
3353 			 "return x%x\n", tgt_id, lun_id, status);
3354 
3355 	/*
3356 	 * We have to clean up i/o as : they may be orphaned by the TMF;
3357 	 * or if the TMF failed, they may be in an indeterminate state.
3358 	 * So, continue on.
3359 	 * We will report success if all the i/o aborts successfully.
3360 	 */
3361 	status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
3362 					LPFC_CTX_TGT);
3363 	return status;
3364 }
3365 
3366 /**
3367  * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
3368  * @cmnd: Pointer to scsi_cmnd data structure.
3369  *
3370  * This routine does target reset to all targets on @cmnd->device->host.
3371  * This emulates Parallel SCSI Bus Reset Semantics.
3372  *
3373  * Return code :
3374  *  0x2003 - Error
3375  *  0x2002 - Success
3376  **/
3377 static int
3378 lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
3379 {
3380 	struct Scsi_Host  *shost = cmnd->device->host;
3381 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3382 	struct lpfc_nodelist *ndlp = NULL;
3383 	struct lpfc_scsi_event_header scsi_event;
3384 	int match;
3385 	int ret = SUCCESS, status, i;
3386 
3387 	scsi_event.event_type = FC_REG_SCSI_EVENT;
3388 	scsi_event.subcategory = LPFC_EVENT_BUSRESET;
3389 	scsi_event.lun = 0;
3390 	memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
3391 	memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
3392 
3393 	fc_host_post_vendor_event(shost, fc_get_event_number(),
3394 		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
3395 
3396 	lpfc_block_error_handler(cmnd);
3397 
3398 	/*
3399 	 * Since the driver manages a single bus device, reset all
3400 	 * targets known to the driver.  Should any target reset
3401 	 * fail, this routine returns failure to the midlayer.
3402 	 */
3403 	for (i = 0; i < LPFC_MAX_TARGET; i++) {
3404 		/* Search for mapped node by target ID */
3405 		match = 0;
3406 		spin_lock_irq(shost->host_lock);
3407 		list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
3408 			if (!NLP_CHK_NODE_ACT(ndlp))
3409 				continue;
3410 			if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
3411 			    ndlp->nlp_sid == i &&
3412 			    ndlp->rport) {
3413 				match = 1;
3414 				break;
3415 			}
3416 		}
3417 		spin_unlock_irq(shost->host_lock);
3418 		if (!match)
3419 			continue;
3420 
3421 		status = lpfc_send_taskmgmt(vport, ndlp->rport->dd_data,
3422 					i, 0, FCP_TARGET_RESET);
3423 
3424 		if (status != SUCCESS) {
3425 			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3426 					 "0700 Bus Reset on target %d failed\n",
3427 					 i);
3428 			ret = FAILED;
3429 		}
3430 	}
3431 	/*
3432 	 * We have to clean up i/o as : they may be orphaned by the TMFs
3433 	 * above; or if any of the TMFs failed, they may be in an
3434 	 * indeterminate state.
3435 	 * We will report success if all the i/o aborts successfully.
3436 	 */
3437 
3438 	status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
3439 	if (status != SUCCESS)
3440 		ret = FAILED;
3441 
3442 	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3443 			 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
3444 	return ret;
3445 }
3446 
3447 /**
3448  * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
3449  * @sdev: Pointer to scsi_device.
3450  *
3451  * This routine populates the cmds_per_lun count + 2 scsi_bufs into  this host's
3452  * globally available list of scsi buffers. This routine also makes sure scsi
3453  * buffer is not allocated more than HBA limit conveyed to midlayer. This list
3454  * of scsi buffer exists for the lifetime of the driver.
3455  *
3456  * Return codes:
3457  *   non-0 - Error
3458  *   0 - Success
3459  **/
3460 static int
3461 lpfc_slave_alloc(struct scsi_device *sdev)
3462 {
3463 	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
3464 	struct lpfc_hba   *phba = vport->phba;
3465 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
3466 	uint32_t total = 0;
3467 	uint32_t num_to_alloc = 0;
3468 	int num_allocated = 0;
3469 
3470 	if (!rport || fc_remote_port_chkready(rport))
3471 		return -ENXIO;
3472 
3473 	sdev->hostdata = rport->dd_data;
3474 
3475 	/*
3476 	 * Populate the cmds_per_lun count scsi_bufs into this host's globally
3477 	 * available list of scsi buffers.  Don't allocate more than the
3478 	 * HBA limit conveyed to the midlayer via the host structure.  The
3479 	 * formula accounts for the lun_queue_depth + error handlers + 1
3480 	 * extra.  This list of scsi bufs exists for the lifetime of the driver.
3481 	 */
3482 	total = phba->total_scsi_bufs;
3483 	num_to_alloc = vport->cfg_lun_queue_depth + 2;
3484 
3485 	/* Allow some exchanges to be available always to complete discovery */
3486 	if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
3487 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3488 				 "0704 At limitation of %d preallocated "
3489 				 "command buffers\n", total);
3490 		return 0;
3491 	/* Allow some exchanges to be available always to complete discovery */
3492 	} else if (total + num_to_alloc >
3493 		phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
3494 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3495 				 "0705 Allocation request of %d "
3496 				 "command buffers will exceed max of %d.  "
3497 				 "Reducing allocation request to %d.\n",
3498 				 num_to_alloc, phba->cfg_hba_queue_depth,
3499 				 (phba->cfg_hba_queue_depth - total));
3500 		num_to_alloc = phba->cfg_hba_queue_depth - total;
3501 	}
3502 	num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
3503 	if (num_to_alloc != num_allocated) {
3504 			lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3505 				 "0708 Allocation request of %d "
3506 				 "command buffers did not succeed.  "
3507 				 "Allocated %d buffers.\n",
3508 				 num_to_alloc, num_allocated);
3509 	}
3510 	return 0;
3511 }
3512 
3513 /**
3514  * lpfc_slave_configure - scsi_host_template slave_configure entry point
3515  * @sdev: Pointer to scsi_device.
3516  *
3517  * This routine configures following items
3518  *   - Tag command queuing support for @sdev if supported.
3519  *   - Dev loss time out value of fc_rport.
3520  *   - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
3521  *
3522  * Return codes:
3523  *   0 - Success
3524  **/
3525 static int
3526 lpfc_slave_configure(struct scsi_device *sdev)
3527 {
3528 	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
3529 	struct lpfc_hba   *phba = vport->phba;
3530 	struct fc_rport   *rport = starget_to_rport(sdev->sdev_target);
3531 
3532 	if (sdev->tagged_supported)
3533 		scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth);
3534 	else
3535 		scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth);
3536 
3537 	/*
3538 	 * Initialize the fc transport attributes for the target
3539 	 * containing this scsi device.  Also note that the driver's
3540 	 * target pointer is stored in the starget_data for the
3541 	 * driver's sysfs entry point functions.
3542 	 */
3543 	rport->dev_loss_tmo = vport->cfg_devloss_tmo;
3544 
3545 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
3546 		lpfc_sli_poll_fcp_ring(phba);
3547 		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
3548 			lpfc_poll_rearm_timer(phba);
3549 	}
3550 
3551 	return 0;
3552 }
3553 
3554 /**
3555  * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
3556  * @sdev: Pointer to scsi_device.
3557  *
3558  * This routine sets @sdev hostatdata filed to null.
3559  **/
3560 static void
3561 lpfc_slave_destroy(struct scsi_device *sdev)
3562 {
3563 	sdev->hostdata = NULL;
3564 	return;
3565 }
3566 
3567 
3568 struct scsi_host_template lpfc_template = {
3569 	.module			= THIS_MODULE,
3570 	.name			= LPFC_DRIVER_NAME,
3571 	.info			= lpfc_info,
3572 	.queuecommand		= lpfc_queuecommand,
3573 	.eh_abort_handler	= lpfc_abort_handler,
3574 	.eh_device_reset_handler = lpfc_device_reset_handler,
3575 	.eh_target_reset_handler = lpfc_target_reset_handler,
3576 	.eh_bus_reset_handler	= lpfc_bus_reset_handler,
3577 	.slave_alloc		= lpfc_slave_alloc,
3578 	.slave_configure	= lpfc_slave_configure,
3579 	.slave_destroy		= lpfc_slave_destroy,
3580 	.scan_finished		= lpfc_scan_finished,
3581 	.this_id		= -1,
3582 	.sg_tablesize		= LPFC_DEFAULT_SG_SEG_CNT,
3583 	.cmd_per_lun		= LPFC_CMD_PER_LUN,
3584 	.use_clustering		= ENABLE_CLUSTERING,
3585 	.shost_attrs		= lpfc_hba_attrs,
3586 	.max_sectors		= 0xFFFF,
3587 	.vendor_id		= LPFC_NL_VENDOR_ID,
3588 };
3589 
3590 struct scsi_host_template lpfc_vport_template = {
3591 	.module			= THIS_MODULE,
3592 	.name			= LPFC_DRIVER_NAME,
3593 	.info			= lpfc_info,
3594 	.queuecommand		= lpfc_queuecommand,
3595 	.eh_abort_handler	= lpfc_abort_handler,
3596 	.eh_device_reset_handler = lpfc_device_reset_handler,
3597 	.eh_target_reset_handler = lpfc_target_reset_handler,
3598 	.eh_bus_reset_handler	= lpfc_bus_reset_handler,
3599 	.slave_alloc		= lpfc_slave_alloc,
3600 	.slave_configure	= lpfc_slave_configure,
3601 	.slave_destroy		= lpfc_slave_destroy,
3602 	.scan_finished		= lpfc_scan_finished,
3603 	.this_id		= -1,
3604 	.sg_tablesize		= LPFC_DEFAULT_SG_SEG_CNT,
3605 	.cmd_per_lun		= LPFC_CMD_PER_LUN,
3606 	.use_clustering		= ENABLE_CLUSTERING,
3607 	.shost_attrs		= lpfc_vport_attrs,
3608 	.max_sectors		= 0xFFFF,
3609 };
3610