xref: /openbmc/linux/drivers/scsi/lpfc/lpfc_bsg.c (revision 2f190ac2)
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
6  * Copyright (C) 2009-2015 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  *                                                                 *
10  * This program is free software; you can redistribute it and/or   *
11  * modify it under the terms of version 2 of the GNU General       *
12  * Public License as published by the Free Software Foundation.    *
13  * This program is distributed in the hope that it will be useful. *
14  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
15  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
16  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
17  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
18  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
19  * more details, a copy of which can be found in the file COPYING  *
20  * included with this package.                                     *
21  *******************************************************************/
22 
23 #include <linux/interrupt.h>
24 #include <linux/mempool.h>
25 #include <linux/pci.h>
26 #include <linux/slab.h>
27 #include <linux/delay.h>
28 #include <linux/list.h>
29 #include <linux/bsg-lib.h>
30 #include <linux/vmalloc.h>
31 
32 #include <scsi/scsi.h>
33 #include <scsi/scsi_host.h>
34 #include <scsi/scsi_transport_fc.h>
35 #include <scsi/scsi_bsg_fc.h>
36 #include <scsi/fc/fc_fs.h>
37 
38 #include "lpfc_hw4.h"
39 #include "lpfc_hw.h"
40 #include "lpfc_sli.h"
41 #include "lpfc_sli4.h"
42 #include "lpfc_nl.h"
43 #include "lpfc_bsg.h"
44 #include "lpfc_disc.h"
45 #include "lpfc_scsi.h"
46 #include "lpfc.h"
47 #include "lpfc_logmsg.h"
48 #include "lpfc_crtn.h"
49 #include "lpfc_debugfs.h"
50 #include "lpfc_vport.h"
51 #include "lpfc_version.h"
52 
53 struct lpfc_bsg_event {
54 	struct list_head node;
55 	struct kref kref;
56 	wait_queue_head_t wq;
57 
58 	/* Event type and waiter identifiers */
59 	uint32_t type_mask;
60 	uint32_t req_id;
61 	uint32_t reg_id;
62 
63 	/* next two flags are here for the auto-delete logic */
64 	unsigned long wait_time_stamp;
65 	int waiting;
66 
67 	/* seen and not seen events */
68 	struct list_head events_to_get;
69 	struct list_head events_to_see;
70 
71 	/* driver data associated with the job */
72 	void *dd_data;
73 };
74 
75 struct lpfc_bsg_iocb {
76 	struct lpfc_iocbq *cmdiocbq;
77 	struct lpfc_dmabuf *rmp;
78 	struct lpfc_nodelist *ndlp;
79 };
80 
81 struct lpfc_bsg_mbox {
82 	LPFC_MBOXQ_t *pmboxq;
83 	MAILBOX_t *mb;
84 	struct lpfc_dmabuf *dmabuffers; /* for BIU diags */
85 	uint8_t *ext; /* extended mailbox data */
86 	uint32_t mbOffset; /* from app */
87 	uint32_t inExtWLen; /* from app */
88 	uint32_t outExtWLen; /* from app */
89 };
90 
91 #define TYPE_EVT 	1
92 #define TYPE_IOCB	2
93 #define TYPE_MBOX	3
94 struct bsg_job_data {
95 	uint32_t type;
96 	struct bsg_job *set_job; /* job waiting for this iocb to finish */
97 	union {
98 		struct lpfc_bsg_event *evt;
99 		struct lpfc_bsg_iocb iocb;
100 		struct lpfc_bsg_mbox mbox;
101 	} context_un;
102 };
103 
104 struct event_data {
105 	struct list_head node;
106 	uint32_t type;
107 	uint32_t immed_dat;
108 	void *data;
109 	uint32_t len;
110 };
111 
112 #define BUF_SZ_4K 4096
113 #define SLI_CT_ELX_LOOPBACK 0x10
114 
115 enum ELX_LOOPBACK_CMD {
116 	ELX_LOOPBACK_XRI_SETUP,
117 	ELX_LOOPBACK_DATA,
118 };
119 
120 #define ELX_LOOPBACK_HEADER_SZ \
121 	(size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
122 
123 struct lpfc_dmabufext {
124 	struct lpfc_dmabuf dma;
125 	uint32_t size;
126 	uint32_t flag;
127 };
128 
129 static void
130 lpfc_free_bsg_buffers(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist)
131 {
132 	struct lpfc_dmabuf *mlast, *next_mlast;
133 
134 	if (mlist) {
135 		list_for_each_entry_safe(mlast, next_mlast, &mlist->list,
136 					 list) {
137 			lpfc_mbuf_free(phba, mlast->virt, mlast->phys);
138 			list_del(&mlast->list);
139 			kfree(mlast);
140 		}
141 		lpfc_mbuf_free(phba, mlist->virt, mlist->phys);
142 		kfree(mlist);
143 	}
144 	return;
145 }
146 
147 static struct lpfc_dmabuf *
148 lpfc_alloc_bsg_buffers(struct lpfc_hba *phba, unsigned int size,
149 		       int outbound_buffers, struct ulp_bde64 *bpl,
150 		       int *bpl_entries)
151 {
152 	struct lpfc_dmabuf *mlist = NULL;
153 	struct lpfc_dmabuf *mp;
154 	unsigned int bytes_left = size;
155 
156 	/* Verify we can support the size specified */
157 	if (!size || (size > (*bpl_entries * LPFC_BPL_SIZE)))
158 		return NULL;
159 
160 	/* Determine the number of dma buffers to allocate */
161 	*bpl_entries = (size % LPFC_BPL_SIZE ? size/LPFC_BPL_SIZE + 1 :
162 			size/LPFC_BPL_SIZE);
163 
164 	/* Allocate dma buffer and place in BPL passed */
165 	while (bytes_left) {
166 		/* Allocate dma buffer  */
167 		mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
168 		if (!mp) {
169 			if (mlist)
170 				lpfc_free_bsg_buffers(phba, mlist);
171 			return NULL;
172 		}
173 
174 		INIT_LIST_HEAD(&mp->list);
175 		mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
176 
177 		if (!mp->virt) {
178 			kfree(mp);
179 			if (mlist)
180 				lpfc_free_bsg_buffers(phba, mlist);
181 			return NULL;
182 		}
183 
184 		/* Queue it to a linked list */
185 		if (!mlist)
186 			mlist = mp;
187 		else
188 			list_add_tail(&mp->list, &mlist->list);
189 
190 		/* Add buffer to buffer pointer list */
191 		if (outbound_buffers)
192 			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
193 		else
194 			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
195 		bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys));
196 		bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys));
197 		bpl->tus.f.bdeSize = (uint16_t)
198 			(bytes_left >= LPFC_BPL_SIZE ? LPFC_BPL_SIZE :
199 			 bytes_left);
200 		bytes_left -= bpl->tus.f.bdeSize;
201 		bpl->tus.w = le32_to_cpu(bpl->tus.w);
202 		bpl++;
203 	}
204 	return mlist;
205 }
206 
207 static unsigned int
208 lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers,
209 		   struct bsg_buffer *bsg_buffers,
210 		   unsigned int bytes_to_transfer, int to_buffers)
211 {
212 
213 	struct lpfc_dmabuf *mp;
214 	unsigned int transfer_bytes, bytes_copied = 0;
215 	unsigned int sg_offset, dma_offset;
216 	unsigned char *dma_address, *sg_address;
217 	LIST_HEAD(temp_list);
218 	struct sg_mapping_iter miter;
219 	unsigned long flags;
220 	unsigned int sg_flags = SG_MITER_ATOMIC;
221 	bool sg_valid;
222 
223 	list_splice_init(&dma_buffers->list, &temp_list);
224 	list_add(&dma_buffers->list, &temp_list);
225 	sg_offset = 0;
226 	if (to_buffers)
227 		sg_flags |= SG_MITER_FROM_SG;
228 	else
229 		sg_flags |= SG_MITER_TO_SG;
230 	sg_miter_start(&miter, bsg_buffers->sg_list, bsg_buffers->sg_cnt,
231 		       sg_flags);
232 	local_irq_save(flags);
233 	sg_valid = sg_miter_next(&miter);
234 	list_for_each_entry(mp, &temp_list, list) {
235 		dma_offset = 0;
236 		while (bytes_to_transfer && sg_valid &&
237 		       (dma_offset < LPFC_BPL_SIZE)) {
238 			dma_address = mp->virt + dma_offset;
239 			if (sg_offset) {
240 				/* Continue previous partial transfer of sg */
241 				sg_address = miter.addr + sg_offset;
242 				transfer_bytes = miter.length - sg_offset;
243 			} else {
244 				sg_address = miter.addr;
245 				transfer_bytes = miter.length;
246 			}
247 			if (bytes_to_transfer < transfer_bytes)
248 				transfer_bytes = bytes_to_transfer;
249 			if (transfer_bytes > (LPFC_BPL_SIZE - dma_offset))
250 				transfer_bytes = LPFC_BPL_SIZE - dma_offset;
251 			if (to_buffers)
252 				memcpy(dma_address, sg_address, transfer_bytes);
253 			else
254 				memcpy(sg_address, dma_address, transfer_bytes);
255 			dma_offset += transfer_bytes;
256 			sg_offset += transfer_bytes;
257 			bytes_to_transfer -= transfer_bytes;
258 			bytes_copied += transfer_bytes;
259 			if (sg_offset >= miter.length) {
260 				sg_offset = 0;
261 				sg_valid = sg_miter_next(&miter);
262 			}
263 		}
264 	}
265 	sg_miter_stop(&miter);
266 	local_irq_restore(flags);
267 	list_del_init(&dma_buffers->list);
268 	list_splice(&temp_list, &dma_buffers->list);
269 	return bytes_copied;
270 }
271 
272 /**
273  * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
274  * @phba: Pointer to HBA context object.
275  * @cmdiocbq: Pointer to command iocb.
276  * @rspiocbq: Pointer to response iocb.
277  *
278  * This function is the completion handler for iocbs issued using
279  * lpfc_bsg_send_mgmt_cmd function. This function is called by the
280  * ring event handler function without any lock held. This function
281  * can be called from both worker thread context and interrupt
282  * context. This function also can be called from another thread which
283  * cleans up the SLI layer objects.
284  * This function copies the contents of the response iocb to the
285  * response iocb memory object provided by the caller of
286  * lpfc_sli_issue_iocb_wait and then wakes up the thread which
287  * sleeps for the iocb completion.
288  **/
289 static void
290 lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
291 			struct lpfc_iocbq *cmdiocbq,
292 			struct lpfc_iocbq *rspiocbq)
293 {
294 	struct bsg_job_data *dd_data;
295 	struct bsg_job *job;
296 	struct fc_bsg_reply *bsg_reply;
297 	struct lpfc_dmabuf *bmp, *cmp, *rmp;
298 	struct lpfc_nodelist *ndlp;
299 	struct lpfc_bsg_iocb *iocb;
300 	unsigned long flags;
301 	int rc = 0;
302 	u32 ulp_status, ulp_word4, total_data_placed;
303 
304 	dd_data = cmdiocbq->context_un.dd_data;
305 
306 	/* Determine if job has been aborted */
307 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
308 	job = dd_data->set_job;
309 	if (job) {
310 		bsg_reply = job->reply;
311 		/* Prevent timeout handling from trying to abort job */
312 		job->dd_data = NULL;
313 	}
314 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
315 
316 	/* Close the timeout handler abort window */
317 	spin_lock_irqsave(&phba->hbalock, flags);
318 	cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING;
319 	spin_unlock_irqrestore(&phba->hbalock, flags);
320 
321 	iocb = &dd_data->context_un.iocb;
322 	ndlp = iocb->cmdiocbq->ndlp;
323 	rmp = iocb->rmp;
324 	cmp = cmdiocbq->cmd_dmabuf;
325 	bmp = cmdiocbq->bpl_dmabuf;
326 	ulp_status = get_job_ulpstatus(phba, rspiocbq);
327 	ulp_word4 = get_job_word4(phba, rspiocbq);
328 	total_data_placed = get_job_data_placed(phba, rspiocbq);
329 
330 	/* Copy the completed data or set the error status */
331 
332 	if (job) {
333 		if (ulp_status) {
334 			if (ulp_status == IOSTAT_LOCAL_REJECT) {
335 				switch (ulp_word4 & IOERR_PARAM_MASK) {
336 				case IOERR_SEQUENCE_TIMEOUT:
337 					rc = -ETIMEDOUT;
338 					break;
339 				case IOERR_INVALID_RPI:
340 					rc = -EFAULT;
341 					break;
342 				default:
343 					rc = -EACCES;
344 					break;
345 				}
346 			} else {
347 				rc = -EACCES;
348 			}
349 		} else {
350 			bsg_reply->reply_payload_rcv_len =
351 				lpfc_bsg_copy_data(rmp, &job->reply_payload,
352 						   total_data_placed, 0);
353 		}
354 	}
355 
356 	lpfc_free_bsg_buffers(phba, cmp);
357 	lpfc_free_bsg_buffers(phba, rmp);
358 	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
359 	kfree(bmp);
360 	lpfc_nlp_put(ndlp);
361 	lpfc_sli_release_iocbq(phba, cmdiocbq);
362 	kfree(dd_data);
363 
364 	/* Complete the job if the job is still active */
365 
366 	if (job) {
367 		bsg_reply->result = rc;
368 		bsg_job_done(job, bsg_reply->result,
369 			       bsg_reply->reply_payload_rcv_len);
370 	}
371 	return;
372 }
373 
374 /**
375  * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request
376  * @job: fc_bsg_job to handle
377  **/
378 static int
379 lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
380 {
381 	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
382 	struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data;
383 	struct lpfc_hba *phba = vport->phba;
384 	struct lpfc_nodelist *ndlp = rdata->pnode;
385 	struct fc_bsg_reply *bsg_reply = job->reply;
386 	struct ulp_bde64 *bpl = NULL;
387 	struct lpfc_iocbq *cmdiocbq = NULL;
388 	struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
389 	int request_nseg, reply_nseg;
390 	u32 num_entry;
391 	struct bsg_job_data *dd_data;
392 	unsigned long flags;
393 	uint32_t creg_val;
394 	int rc = 0;
395 	int iocb_stat;
396 	u16 ulp_context;
397 
398 	/* in case no data is transferred */
399 	bsg_reply->reply_payload_rcv_len = 0;
400 
401 	if (ndlp->nlp_flag & NLP_ELS_SND_MASK)
402 		return -ENODEV;
403 
404 	/* allocate our bsg tracking structure */
405 	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
406 	if (!dd_data) {
407 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
408 				"2733 Failed allocation of dd_data\n");
409 		rc = -ENOMEM;
410 		goto no_dd_data;
411 	}
412 
413 	cmdiocbq = lpfc_sli_get_iocbq(phba);
414 	if (!cmdiocbq) {
415 		rc = -ENOMEM;
416 		goto free_dd;
417 	}
418 
419 	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
420 	if (!bmp) {
421 		rc = -ENOMEM;
422 		goto free_cmdiocbq;
423 	}
424 	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
425 	if (!bmp->virt) {
426 		rc = -ENOMEM;
427 		goto free_bmp;
428 	}
429 
430 	INIT_LIST_HEAD(&bmp->list);
431 
432 	bpl = (struct ulp_bde64 *) bmp->virt;
433 	request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64);
434 	cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
435 				     1, bpl, &request_nseg);
436 	if (!cmp) {
437 		rc = -ENOMEM;
438 		goto free_bmp;
439 	}
440 	lpfc_bsg_copy_data(cmp, &job->request_payload,
441 			   job->request_payload.payload_len, 1);
442 
443 	bpl += request_nseg;
444 	reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg;
445 	rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0,
446 				     bpl, &reply_nseg);
447 	if (!rmp) {
448 		rc = -ENOMEM;
449 		goto free_cmp;
450 	}
451 
452 	num_entry = request_nseg + reply_nseg;
453 
454 	if (phba->sli_rev == LPFC_SLI_REV4)
455 		ulp_context = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
456 	else
457 		ulp_context = ndlp->nlp_rpi;
458 
459 	lpfc_sli_prep_gen_req(phba, cmdiocbq, bmp, ulp_context, num_entry,
460 			      phba->fc_ratov * 2);
461 
462 	cmdiocbq->num_bdes = num_entry;
463 	cmdiocbq->vport = phba->pport;
464 	cmdiocbq->cmd_dmabuf = cmp;
465 	cmdiocbq->bpl_dmabuf = bmp;
466 	cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
467 
468 	cmdiocbq->cmd_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
469 	cmdiocbq->context_un.dd_data = dd_data;
470 
471 	dd_data->type = TYPE_IOCB;
472 	dd_data->set_job = job;
473 	dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
474 	dd_data->context_un.iocb.rmp = rmp;
475 	job->dd_data = dd_data;
476 
477 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
478 		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
479 			rc = -EIO ;
480 			goto free_rmp;
481 		}
482 		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
483 		writel(creg_val, phba->HCregaddr);
484 		readl(phba->HCregaddr); /* flush */
485 	}
486 
487 	cmdiocbq->ndlp = lpfc_nlp_get(ndlp);
488 	if (!cmdiocbq->ndlp) {
489 		rc = -ENODEV;
490 		goto free_rmp;
491 	}
492 
493 	iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
494 	if (iocb_stat == IOCB_SUCCESS) {
495 		spin_lock_irqsave(&phba->hbalock, flags);
496 		/* make sure the I/O had not been completed yet */
497 		if (cmdiocbq->cmd_flag & LPFC_IO_LIBDFC) {
498 			/* open up abort window to timeout handler */
499 			cmdiocbq->cmd_flag |= LPFC_IO_CMD_OUTSTANDING;
500 		}
501 		spin_unlock_irqrestore(&phba->hbalock, flags);
502 		return 0; /* done for now */
503 	} else if (iocb_stat == IOCB_BUSY) {
504 		rc = -EAGAIN;
505 	} else {
506 		rc = -EIO;
507 	}
508 
509 	/* iocb failed so cleanup */
510 	lpfc_nlp_put(ndlp);
511 
512 free_rmp:
513 	lpfc_free_bsg_buffers(phba, rmp);
514 free_cmp:
515 	lpfc_free_bsg_buffers(phba, cmp);
516 free_bmp:
517 	if (bmp->virt)
518 		lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
519 	kfree(bmp);
520 free_cmdiocbq:
521 	lpfc_sli_release_iocbq(phba, cmdiocbq);
522 free_dd:
523 	kfree(dd_data);
524 no_dd_data:
525 	/* make error code available to userspace */
526 	bsg_reply->result = rc;
527 	job->dd_data = NULL;
528 	return rc;
529 }
530 
531 /**
532  * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler
533  * @phba: Pointer to HBA context object.
534  * @cmdiocbq: Pointer to command iocb.
535  * @rspiocbq: Pointer to response iocb.
536  *
537  * This function is the completion handler for iocbs issued using
538  * lpfc_bsg_rport_els_cmp function. This function is called by the
539  * ring event handler function without any lock held. This function
540  * can be called from both worker thread context and interrupt
541  * context. This function also can be called from other thread which
542  * cleans up the SLI layer objects.
543  * This function copies the contents of the response iocb to the
544  * response iocb memory object provided by the caller of
545  * lpfc_sli_issue_iocb_wait and then wakes up the thread which
546  * sleeps for the iocb completion.
547  **/
548 static void
549 lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
550 			struct lpfc_iocbq *cmdiocbq,
551 			struct lpfc_iocbq *rspiocbq)
552 {
553 	struct bsg_job_data *dd_data;
554 	struct bsg_job *job;
555 	struct fc_bsg_reply *bsg_reply;
556 	struct lpfc_nodelist *ndlp;
557 	struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL;
558 	struct fc_bsg_ctels_reply *els_reply;
559 	uint8_t *rjt_data;
560 	unsigned long flags;
561 	unsigned int rsp_size;
562 	int rc = 0;
563 	u32 ulp_status, ulp_word4, total_data_placed;
564 
565 	dd_data = cmdiocbq->context_un.dd_data;
566 	ndlp = dd_data->context_un.iocb.ndlp;
567 	cmdiocbq->ndlp = ndlp;
568 
569 	/* Determine if job has been aborted */
570 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
571 	job = dd_data->set_job;
572 	if (job) {
573 		bsg_reply = job->reply;
574 		/* Prevent timeout handling from trying to abort job  */
575 		job->dd_data = NULL;
576 	}
577 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
578 
579 	/* Close the timeout handler abort window */
580 	spin_lock_irqsave(&phba->hbalock, flags);
581 	cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING;
582 	spin_unlock_irqrestore(&phba->hbalock, flags);
583 
584 	ulp_status = get_job_ulpstatus(phba, rspiocbq);
585 	ulp_word4 = get_job_word4(phba, rspiocbq);
586 	total_data_placed = get_job_data_placed(phba, rspiocbq);
587 	pcmd = cmdiocbq->cmd_dmabuf;
588 	prsp = (struct lpfc_dmabuf *)pcmd->list.next;
589 
590 	/* Copy the completed job data or determine the job status if job is
591 	 * still active
592 	 */
593 
594 	if (job) {
595 		if (ulp_status == IOSTAT_SUCCESS) {
596 			rsp_size = total_data_placed;
597 			bsg_reply->reply_payload_rcv_len =
598 				sg_copy_from_buffer(job->reply_payload.sg_list,
599 						    job->reply_payload.sg_cnt,
600 						    prsp->virt,
601 						    rsp_size);
602 		} else if (ulp_status == IOSTAT_LS_RJT) {
603 			bsg_reply->reply_payload_rcv_len =
604 				sizeof(struct fc_bsg_ctels_reply);
605 			/* LS_RJT data returned in word 4 */
606 			rjt_data = (uint8_t *)&ulp_word4;
607 			els_reply = &bsg_reply->reply_data.ctels_reply;
608 			els_reply->status = FC_CTELS_STATUS_REJECT;
609 			els_reply->rjt_data.action = rjt_data[3];
610 			els_reply->rjt_data.reason_code = rjt_data[2];
611 			els_reply->rjt_data.reason_explanation = rjt_data[1];
612 			els_reply->rjt_data.vendor_unique = rjt_data[0];
613 		} else if (ulp_status == IOSTAT_LOCAL_REJECT &&
614 			   (ulp_word4 & IOERR_PARAM_MASK) ==
615 			   IOERR_SEQUENCE_TIMEOUT) {
616 			rc = -ETIMEDOUT;
617 		} else {
618 			rc = -EIO;
619 		}
620 	}
621 
622 	lpfc_els_free_iocb(phba, cmdiocbq);
623 
624 	lpfc_nlp_put(ndlp);
625 	kfree(dd_data);
626 
627 	/* Complete the job if the job is still active */
628 
629 	if (job) {
630 		bsg_reply->result = rc;
631 		bsg_job_done(job, bsg_reply->result,
632 			       bsg_reply->reply_payload_rcv_len);
633 	}
634 	return;
635 }
636 
637 /**
638  * lpfc_bsg_rport_els - send an ELS command from a bsg request
639  * @job: fc_bsg_job to handle
640  **/
641 static int
642 lpfc_bsg_rport_els(struct bsg_job *job)
643 {
644 	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
645 	struct lpfc_hba *phba = vport->phba;
646 	struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data;
647 	struct lpfc_nodelist *ndlp = rdata->pnode;
648 	struct fc_bsg_request *bsg_request = job->request;
649 	struct fc_bsg_reply *bsg_reply = job->reply;
650 	uint32_t elscmd;
651 	uint32_t cmdsize;
652 	struct lpfc_iocbq *cmdiocbq;
653 	uint16_t rpi = 0;
654 	struct bsg_job_data *dd_data;
655 	unsigned long flags;
656 	uint32_t creg_val;
657 	int rc = 0;
658 
659 	/* in case no data is transferred */
660 	bsg_reply->reply_payload_rcv_len = 0;
661 
662 	/* verify the els command is not greater than the
663 	 * maximum ELS transfer size.
664 	 */
665 
666 	if (job->request_payload.payload_len > FCELSSIZE) {
667 		rc = -EINVAL;
668 		goto no_dd_data;
669 	}
670 
671 	/* allocate our bsg tracking structure */
672 	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
673 	if (!dd_data) {
674 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
675 				"2735 Failed allocation of dd_data\n");
676 		rc = -ENOMEM;
677 		goto no_dd_data;
678 	}
679 
680 	elscmd = bsg_request->rqst_data.r_els.els_code;
681 	cmdsize = job->request_payload.payload_len;
682 
683 	if (!lpfc_nlp_get(ndlp)) {
684 		rc = -ENODEV;
685 		goto free_dd_data;
686 	}
687 
688 	/* We will use the allocated dma buffers by prep els iocb for command
689 	 * and response to ensure if the job times out and the request is freed,
690 	 * we won't be dma into memory that is no longer allocated to for the
691 	 * request.
692 	 */
693 	cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
694 				      ndlp->nlp_DID, elscmd);
695 	if (!cmdiocbq) {
696 		rc = -EIO;
697 		goto release_ndlp;
698 	}
699 
700 	/* Transfer the request payload to allocated command dma buffer */
701 	sg_copy_to_buffer(job->request_payload.sg_list,
702 			  job->request_payload.sg_cnt,
703 			  cmdiocbq->cmd_dmabuf->virt,
704 			  cmdsize);
705 
706 	rpi = ndlp->nlp_rpi;
707 
708 	if (phba->sli_rev == LPFC_SLI_REV4)
709 		bf_set(wqe_ctxt_tag, &cmdiocbq->wqe.generic.wqe_com,
710 		       phba->sli4_hba.rpi_ids[rpi]);
711 	else
712 		cmdiocbq->iocb.ulpContext = rpi;
713 	cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
714 	cmdiocbq->context_un.dd_data = dd_data;
715 	cmdiocbq->ndlp = ndlp;
716 	cmdiocbq->cmd_cmpl = lpfc_bsg_rport_els_cmp;
717 	dd_data->type = TYPE_IOCB;
718 	dd_data->set_job = job;
719 	dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
720 	dd_data->context_un.iocb.ndlp = ndlp;
721 	dd_data->context_un.iocb.rmp = NULL;
722 	job->dd_data = dd_data;
723 
724 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
725 		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
726 			rc = -EIO;
727 			goto linkdown_err;
728 		}
729 		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
730 		writel(creg_val, phba->HCregaddr);
731 		readl(phba->HCregaddr); /* flush */
732 	}
733 
734 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
735 	if (rc == IOCB_SUCCESS) {
736 		spin_lock_irqsave(&phba->hbalock, flags);
737 		/* make sure the I/O had not been completed/released */
738 		if (cmdiocbq->cmd_flag & LPFC_IO_LIBDFC) {
739 			/* open up abort window to timeout handler */
740 			cmdiocbq->cmd_flag |= LPFC_IO_CMD_OUTSTANDING;
741 		}
742 		spin_unlock_irqrestore(&phba->hbalock, flags);
743 		return 0; /* done for now */
744 	} else if (rc == IOCB_BUSY) {
745 		rc = -EAGAIN;
746 	} else {
747 		rc = -EIO;
748 	}
749 
750 	/* I/O issue failed.  Cleanup resources. */
751 
752 linkdown_err:
753 	lpfc_els_free_iocb(phba, cmdiocbq);
754 
755 release_ndlp:
756 	lpfc_nlp_put(ndlp);
757 
758 free_dd_data:
759 	kfree(dd_data);
760 
761 no_dd_data:
762 	/* make error code available to userspace */
763 	bsg_reply->result = rc;
764 	job->dd_data = NULL;
765 	return rc;
766 }
767 
768 /**
769  * lpfc_bsg_event_free - frees an allocated event structure
770  * @kref: Pointer to a kref.
771  *
772  * Called from kref_put. Back cast the kref into an event structure address.
773  * Free any events to get, delete associated nodes, free any events to see,
774  * free any data then free the event itself.
775  **/
776 static void
777 lpfc_bsg_event_free(struct kref *kref)
778 {
779 	struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event,
780 						  kref);
781 	struct event_data *ed;
782 
783 	list_del(&evt->node);
784 
785 	while (!list_empty(&evt->events_to_get)) {
786 		ed = list_entry(evt->events_to_get.next, typeof(*ed), node);
787 		list_del(&ed->node);
788 		kfree(ed->data);
789 		kfree(ed);
790 	}
791 
792 	while (!list_empty(&evt->events_to_see)) {
793 		ed = list_entry(evt->events_to_see.next, typeof(*ed), node);
794 		list_del(&ed->node);
795 		kfree(ed->data);
796 		kfree(ed);
797 	}
798 
799 	kfree(evt->dd_data);
800 	kfree(evt);
801 }
802 
803 /**
804  * lpfc_bsg_event_ref - increments the kref for an event
805  * @evt: Pointer to an event structure.
806  **/
807 static inline void
808 lpfc_bsg_event_ref(struct lpfc_bsg_event *evt)
809 {
810 	kref_get(&evt->kref);
811 }
812 
813 /**
814  * lpfc_bsg_event_unref - Uses kref_put to free an event structure
815  * @evt: Pointer to an event structure.
816  **/
817 static inline void
818 lpfc_bsg_event_unref(struct lpfc_bsg_event *evt)
819 {
820 	kref_put(&evt->kref, lpfc_bsg_event_free);
821 }
822 
823 /**
824  * lpfc_bsg_event_new - allocate and initialize a event structure
825  * @ev_mask: Mask of events.
826  * @ev_reg_id: Event reg id.
827  * @ev_req_id: Event request id.
828  **/
829 static struct lpfc_bsg_event *
830 lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id)
831 {
832 	struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
833 
834 	if (!evt)
835 		return NULL;
836 
837 	INIT_LIST_HEAD(&evt->events_to_get);
838 	INIT_LIST_HEAD(&evt->events_to_see);
839 	evt->type_mask = ev_mask;
840 	evt->req_id = ev_req_id;
841 	evt->reg_id = ev_reg_id;
842 	evt->wait_time_stamp = jiffies;
843 	evt->dd_data = NULL;
844 	init_waitqueue_head(&evt->wq);
845 	kref_init(&evt->kref);
846 	return evt;
847 }
848 
849 /**
850  * diag_cmd_data_free - Frees an lpfc dma buffer extension
851  * @phba: Pointer to HBA context object.
852  * @mlist: Pointer to an lpfc dma buffer extension.
853  **/
854 static int
855 diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist)
856 {
857 	struct lpfc_dmabufext *mlast;
858 	struct pci_dev *pcidev;
859 	struct list_head head, *curr, *next;
860 
861 	if ((!mlist) || (!lpfc_is_link_up(phba) &&
862 		(phba->link_flag & LS_LOOPBACK_MODE))) {
863 		return 0;
864 	}
865 
866 	pcidev = phba->pcidev;
867 	list_add_tail(&head, &mlist->dma.list);
868 
869 	list_for_each_safe(curr, next, &head) {
870 		mlast = list_entry(curr, struct lpfc_dmabufext , dma.list);
871 		if (mlast->dma.virt)
872 			dma_free_coherent(&pcidev->dev,
873 					  mlast->size,
874 					  mlast->dma.virt,
875 					  mlast->dma.phys);
876 		kfree(mlast);
877 	}
878 	return 0;
879 }
880 
881 /*
882  * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
883  *
884  * This function is called when an unsolicited CT command is received.  It
885  * forwards the event to any processes registered to receive CT events.
886  **/
887 int
888 lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
889 			struct lpfc_iocbq *piocbq)
890 {
891 	uint32_t evt_req_id = 0;
892 	uint32_t cmd;
893 	struct lpfc_dmabuf *dmabuf = NULL;
894 	struct lpfc_bsg_event *evt;
895 	struct event_data *evt_dat = NULL;
896 	struct lpfc_iocbq *iocbq;
897 	IOCB_t *iocb = NULL;
898 	size_t offset = 0;
899 	struct list_head head;
900 	struct ulp_bde64 *bde;
901 	dma_addr_t dma_addr;
902 	int i;
903 	struct lpfc_dmabuf *bdeBuf1 = piocbq->cmd_dmabuf;
904 	struct lpfc_dmabuf *bdeBuf2 = piocbq->bpl_dmabuf;
905 	struct lpfc_sli_ct_request *ct_req;
906 	struct bsg_job *job = NULL;
907 	struct fc_bsg_reply *bsg_reply;
908 	struct bsg_job_data *dd_data = NULL;
909 	unsigned long flags;
910 	int size = 0;
911 	u32 bde_count = 0;
912 
913 	INIT_LIST_HEAD(&head);
914 	list_add_tail(&head, &piocbq->list);
915 
916 	ct_req = (struct lpfc_sli_ct_request *)bdeBuf1->virt;
917 	evt_req_id = ct_req->FsType;
918 	cmd = ct_req->CommandResponse.bits.CmdRsp;
919 
920 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
921 	list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
922 		if (!(evt->type_mask & FC_REG_CT_EVENT) ||
923 			evt->req_id != evt_req_id)
924 			continue;
925 
926 		lpfc_bsg_event_ref(evt);
927 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
928 		evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
929 		if (evt_dat == NULL) {
930 			spin_lock_irqsave(&phba->ct_ev_lock, flags);
931 			lpfc_bsg_event_unref(evt);
932 			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
933 					"2614 Memory allocation failed for "
934 					"CT event\n");
935 			break;
936 		}
937 
938 		if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
939 			/* take accumulated byte count from the last iocbq */
940 			iocbq = list_entry(head.prev, typeof(*iocbq), list);
941 			if (phba->sli_rev == LPFC_SLI_REV4)
942 				evt_dat->len = iocbq->wcqe_cmpl.total_data_placed;
943 			else
944 				evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
945 		} else {
946 			list_for_each_entry(iocbq, &head, list) {
947 				iocb = &iocbq->iocb;
948 				for (i = 0; i < iocb->ulpBdeCount;
949 				     i++)
950 					evt_dat->len +=
951 					iocb->un.cont64[i].tus.f.bdeSize;
952 			}
953 		}
954 
955 		evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
956 		if (evt_dat->data == NULL) {
957 			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
958 					"2615 Memory allocation failed for "
959 					"CT event data, size %d\n",
960 					evt_dat->len);
961 			kfree(evt_dat);
962 			spin_lock_irqsave(&phba->ct_ev_lock, flags);
963 			lpfc_bsg_event_unref(evt);
964 			spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
965 			goto error_ct_unsol_exit;
966 		}
967 
968 		list_for_each_entry(iocbq, &head, list) {
969 			size = 0;
970 			if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
971 				bdeBuf1 = iocbq->cmd_dmabuf;
972 				bdeBuf2 = iocbq->bpl_dmabuf;
973 			}
974 			if (phba->sli_rev == LPFC_SLI_REV4)
975 				bde_count = iocbq->wcqe_cmpl.word3;
976 			else
977 				bde_count = iocbq->iocb.ulpBdeCount;
978 			for (i = 0; i < bde_count; i++) {
979 				if (phba->sli3_options &
980 				    LPFC_SLI3_HBQ_ENABLED) {
981 					if (i == 0) {
982 						size = iocbq->wqe.gen_req.bde.tus.f.bdeSize;
983 						dmabuf = bdeBuf1;
984 					} else if (i == 1) {
985 						size = iocbq->unsol_rcv_len;
986 						dmabuf = bdeBuf2;
987 					}
988 					if ((offset + size) > evt_dat->len)
989 						size = evt_dat->len - offset;
990 				} else {
991 					size = iocbq->iocb.un.cont64[i].
992 						tus.f.bdeSize;
993 					bde = &iocbq->iocb.un.cont64[i];
994 					dma_addr = getPaddr(bde->addrHigh,
995 							    bde->addrLow);
996 					dmabuf = lpfc_sli_ringpostbuf_get(phba,
997 							pring, dma_addr);
998 				}
999 				if (!dmabuf) {
1000 					lpfc_printf_log(phba, KERN_ERR,
1001 						LOG_LIBDFC, "2616 No dmabuf "
1002 						"found for iocbq x%px\n",
1003 						iocbq);
1004 					kfree(evt_dat->data);
1005 					kfree(evt_dat);
1006 					spin_lock_irqsave(&phba->ct_ev_lock,
1007 						flags);
1008 					lpfc_bsg_event_unref(evt);
1009 					spin_unlock_irqrestore(
1010 						&phba->ct_ev_lock, flags);
1011 					goto error_ct_unsol_exit;
1012 				}
1013 				memcpy((char *)(evt_dat->data) + offset,
1014 				       dmabuf->virt, size);
1015 				offset += size;
1016 				if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
1017 				    !(phba->sli3_options &
1018 				      LPFC_SLI3_HBQ_ENABLED)) {
1019 					lpfc_sli_ringpostbuf_put(phba, pring,
1020 								 dmabuf);
1021 				} else {
1022 					switch (cmd) {
1023 					case ELX_LOOPBACK_DATA:
1024 						if (phba->sli_rev <
1025 						    LPFC_SLI_REV4)
1026 							diag_cmd_data_free(phba,
1027 							(struct lpfc_dmabufext
1028 							 *)dmabuf);
1029 						break;
1030 					case ELX_LOOPBACK_XRI_SETUP:
1031 						if ((phba->sli_rev ==
1032 							LPFC_SLI_REV2) ||
1033 							(phba->sli3_options &
1034 							LPFC_SLI3_HBQ_ENABLED
1035 							)) {
1036 							lpfc_in_buf_free(phba,
1037 									dmabuf);
1038 						} else {
1039 							lpfc_sli3_post_buffer(phba,
1040 									      pring,
1041 									      1);
1042 						}
1043 						break;
1044 					default:
1045 						if (!(phba->sli3_options &
1046 						      LPFC_SLI3_HBQ_ENABLED))
1047 							lpfc_sli3_post_buffer(phba,
1048 									      pring,
1049 									      1);
1050 						break;
1051 					}
1052 				}
1053 			}
1054 		}
1055 
1056 		spin_lock_irqsave(&phba->ct_ev_lock, flags);
1057 		if (phba->sli_rev == LPFC_SLI_REV4) {
1058 			evt_dat->immed_dat = phba->ctx_idx;
1059 			phba->ctx_idx = (phba->ctx_idx + 1) % LPFC_CT_CTX_MAX;
1060 			/* Provide warning for over-run of the ct_ctx array */
1061 			if (phba->ct_ctx[evt_dat->immed_dat].valid ==
1062 			    UNSOL_VALID)
1063 				lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
1064 						"2717 CT context array entry "
1065 						"[%d] over-run: oxid:x%x, "
1066 						"sid:x%x\n", phba->ctx_idx,
1067 						phba->ct_ctx[
1068 						    evt_dat->immed_dat].oxid,
1069 						phba->ct_ctx[
1070 						    evt_dat->immed_dat].SID);
1071 			phba->ct_ctx[evt_dat->immed_dat].rxid =
1072 				get_job_ulpcontext(phba, piocbq);
1073 			phba->ct_ctx[evt_dat->immed_dat].oxid =
1074 				get_job_rcvoxid(phba, piocbq);
1075 			phba->ct_ctx[evt_dat->immed_dat].SID =
1076 				bf_get(wqe_els_did,
1077 				       &piocbq->wqe.xmit_els_rsp.wqe_dest);
1078 			phba->ct_ctx[evt_dat->immed_dat].valid = UNSOL_VALID;
1079 		} else
1080 			evt_dat->immed_dat = get_job_ulpcontext(phba, piocbq);
1081 
1082 		evt_dat->type = FC_REG_CT_EVENT;
1083 		list_add(&evt_dat->node, &evt->events_to_see);
1084 		if (evt_req_id == SLI_CT_ELX_LOOPBACK) {
1085 			wake_up_interruptible(&evt->wq);
1086 			lpfc_bsg_event_unref(evt);
1087 			break;
1088 		}
1089 
1090 		list_move(evt->events_to_see.prev, &evt->events_to_get);
1091 
1092 		dd_data = (struct bsg_job_data *)evt->dd_data;
1093 		job = dd_data->set_job;
1094 		dd_data->set_job = NULL;
1095 		lpfc_bsg_event_unref(evt);
1096 		if (job) {
1097 			bsg_reply = job->reply;
1098 			bsg_reply->reply_payload_rcv_len = size;
1099 			/* make error code available to userspace */
1100 			bsg_reply->result = 0;
1101 			job->dd_data = NULL;
1102 			/* complete the job back to userspace */
1103 			spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1104 			bsg_job_done(job, bsg_reply->result,
1105 				       bsg_reply->reply_payload_rcv_len);
1106 			spin_lock_irqsave(&phba->ct_ev_lock, flags);
1107 		}
1108 	}
1109 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1110 
1111 error_ct_unsol_exit:
1112 	if (!list_empty(&head))
1113 		list_del(&head);
1114 	if ((phba->sli_rev < LPFC_SLI_REV4) &&
1115 	    (evt_req_id == SLI_CT_ELX_LOOPBACK))
1116 		return 0;
1117 	return 1;
1118 }
1119 
1120 /**
1121  * lpfc_bsg_ct_unsol_abort - handler ct abort to management plane
1122  * @phba: Pointer to HBA context object.
1123  * @dmabuf: pointer to a dmabuf that describes the FC sequence
1124  *
1125  * This function handles abort to the CT command toward management plane
1126  * for SLI4 port.
1127  *
1128  * If the pending context of a CT command to management plane present, clears
1129  * such context and returns 1 for handled; otherwise, it returns 0 indicating
1130  * no context exists.
1131  **/
1132 int
1133 lpfc_bsg_ct_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf)
1134 {
1135 	struct fc_frame_header fc_hdr;
1136 	struct fc_frame_header *fc_hdr_ptr = &fc_hdr;
1137 	int ctx_idx, handled = 0;
1138 	uint16_t oxid, rxid;
1139 	uint32_t sid;
1140 
1141 	memcpy(fc_hdr_ptr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
1142 	sid = sli4_sid_from_fc_hdr(fc_hdr_ptr);
1143 	oxid = be16_to_cpu(fc_hdr_ptr->fh_ox_id);
1144 	rxid = be16_to_cpu(fc_hdr_ptr->fh_rx_id);
1145 
1146 	for (ctx_idx = 0; ctx_idx < LPFC_CT_CTX_MAX; ctx_idx++) {
1147 		if (phba->ct_ctx[ctx_idx].valid != UNSOL_VALID)
1148 			continue;
1149 		if (phba->ct_ctx[ctx_idx].rxid != rxid)
1150 			continue;
1151 		if (phba->ct_ctx[ctx_idx].oxid != oxid)
1152 			continue;
1153 		if (phba->ct_ctx[ctx_idx].SID != sid)
1154 			continue;
1155 		phba->ct_ctx[ctx_idx].valid = UNSOL_INVALID;
1156 		handled = 1;
1157 	}
1158 	return handled;
1159 }
1160 
1161 /**
1162  * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
1163  * @job: SET_EVENT fc_bsg_job
1164  **/
1165 static int
1166 lpfc_bsg_hba_set_event(struct bsg_job *job)
1167 {
1168 	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
1169 	struct lpfc_hba *phba = vport->phba;
1170 	struct fc_bsg_request *bsg_request = job->request;
1171 	struct set_ct_event *event_req;
1172 	struct lpfc_bsg_event *evt;
1173 	int rc = 0;
1174 	struct bsg_job_data *dd_data = NULL;
1175 	uint32_t ev_mask;
1176 	unsigned long flags;
1177 
1178 	if (job->request_len <
1179 	    sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
1180 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1181 				"2612 Received SET_CT_EVENT below minimum "
1182 				"size\n");
1183 		rc = -EINVAL;
1184 		goto job_error;
1185 	}
1186 
1187 	event_req = (struct set_ct_event *)
1188 		bsg_request->rqst_data.h_vendor.vendor_cmd;
1189 	ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
1190 				FC_REG_EVENT_MASK);
1191 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
1192 	list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
1193 		if (evt->reg_id == event_req->ev_reg_id) {
1194 			lpfc_bsg_event_ref(evt);
1195 			evt->wait_time_stamp = jiffies;
1196 			dd_data = (struct bsg_job_data *)evt->dd_data;
1197 			break;
1198 		}
1199 	}
1200 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1201 
1202 	if (&evt->node == &phba->ct_ev_waiters) {
1203 		/* no event waiting struct yet - first call */
1204 		dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1205 		if (dd_data == NULL) {
1206 			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1207 					"2734 Failed allocation of dd_data\n");
1208 			rc = -ENOMEM;
1209 			goto job_error;
1210 		}
1211 		evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id,
1212 					event_req->ev_req_id);
1213 		if (!evt) {
1214 			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1215 					"2617 Failed allocation of event "
1216 					"waiter\n");
1217 			rc = -ENOMEM;
1218 			goto job_error;
1219 		}
1220 		dd_data->type = TYPE_EVT;
1221 		dd_data->set_job = NULL;
1222 		dd_data->context_un.evt = evt;
1223 		evt->dd_data = (void *)dd_data;
1224 		spin_lock_irqsave(&phba->ct_ev_lock, flags);
1225 		list_add(&evt->node, &phba->ct_ev_waiters);
1226 		lpfc_bsg_event_ref(evt);
1227 		evt->wait_time_stamp = jiffies;
1228 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1229 	}
1230 
1231 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
1232 	evt->waiting = 1;
1233 	dd_data->set_job = job; /* for unsolicited command */
1234 	job->dd_data = dd_data; /* for fc transport timeout callback*/
1235 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1236 	return 0; /* call job done later */
1237 
1238 job_error:
1239 	kfree(dd_data);
1240 	job->dd_data = NULL;
1241 	return rc;
1242 }
1243 
1244 /**
1245  * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command
1246  * @job: GET_EVENT fc_bsg_job
1247  **/
1248 static int
1249 lpfc_bsg_hba_get_event(struct bsg_job *job)
1250 {
1251 	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
1252 	struct lpfc_hba *phba = vport->phba;
1253 	struct fc_bsg_request *bsg_request = job->request;
1254 	struct fc_bsg_reply *bsg_reply = job->reply;
1255 	struct get_ct_event *event_req;
1256 	struct get_ct_event_reply *event_reply;
1257 	struct lpfc_bsg_event *evt, *evt_next;
1258 	struct event_data *evt_dat = NULL;
1259 	unsigned long flags;
1260 	uint32_t rc = 0;
1261 
1262 	if (job->request_len <
1263 	    sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
1264 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1265 				"2613 Received GET_CT_EVENT request below "
1266 				"minimum size\n");
1267 		rc = -EINVAL;
1268 		goto job_error;
1269 	}
1270 
1271 	event_req = (struct get_ct_event *)
1272 		bsg_request->rqst_data.h_vendor.vendor_cmd;
1273 
1274 	event_reply = (struct get_ct_event_reply *)
1275 		bsg_reply->reply_data.vendor_reply.vendor_rsp;
1276 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
1277 	list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) {
1278 		if (evt->reg_id == event_req->ev_reg_id) {
1279 			if (list_empty(&evt->events_to_get))
1280 				break;
1281 			lpfc_bsg_event_ref(evt);
1282 			evt->wait_time_stamp = jiffies;
1283 			evt_dat = list_entry(evt->events_to_get.prev,
1284 					     struct event_data, node);
1285 			list_del(&evt_dat->node);
1286 			break;
1287 		}
1288 	}
1289 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1290 
1291 	/* The app may continue to ask for event data until it gets
1292 	 * an error indicating that there isn't anymore
1293 	 */
1294 	if (evt_dat == NULL) {
1295 		bsg_reply->reply_payload_rcv_len = 0;
1296 		rc = -ENOENT;
1297 		goto job_error;
1298 	}
1299 
1300 	if (evt_dat->len > job->request_payload.payload_len) {
1301 		evt_dat->len = job->request_payload.payload_len;
1302 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1303 				"2618 Truncated event data at %d "
1304 				"bytes\n",
1305 				job->request_payload.payload_len);
1306 	}
1307 
1308 	event_reply->type = evt_dat->type;
1309 	event_reply->immed_data = evt_dat->immed_dat;
1310 	if (evt_dat->len > 0)
1311 		bsg_reply->reply_payload_rcv_len =
1312 			sg_copy_from_buffer(job->request_payload.sg_list,
1313 					    job->request_payload.sg_cnt,
1314 					    evt_dat->data, evt_dat->len);
1315 	else
1316 		bsg_reply->reply_payload_rcv_len = 0;
1317 
1318 	if (evt_dat) {
1319 		kfree(evt_dat->data);
1320 		kfree(evt_dat);
1321 	}
1322 
1323 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
1324 	lpfc_bsg_event_unref(evt);
1325 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1326 	job->dd_data = NULL;
1327 	bsg_reply->result = 0;
1328 	bsg_job_done(job, bsg_reply->result,
1329 		       bsg_reply->reply_payload_rcv_len);
1330 	return 0;
1331 
1332 job_error:
1333 	job->dd_data = NULL;
1334 	bsg_reply->result = rc;
1335 	return rc;
1336 }
1337 
1338 /**
1339  * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler
1340  * @phba: Pointer to HBA context object.
1341  * @cmdiocbq: Pointer to command iocb.
1342  * @rspiocbq: Pointer to response iocb.
1343  *
1344  * This function is the completion handler for iocbs issued using
1345  * lpfc_issue_ct_rsp_cmp function. This function is called by the
1346  * ring event handler function without any lock held. This function
1347  * can be called from both worker thread context and interrupt
1348  * context. This function also can be called from other thread which
1349  * cleans up the SLI layer objects.
1350  * This function copy the contents of the response iocb to the
1351  * response iocb memory object provided by the caller of
1352  * lpfc_sli_issue_iocb_wait and then wakes up the thread which
1353  * sleeps for the iocb completion.
1354  **/
1355 static void
1356 lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
1357 			struct lpfc_iocbq *cmdiocbq,
1358 			struct lpfc_iocbq *rspiocbq)
1359 {
1360 	struct bsg_job_data *dd_data;
1361 	struct bsg_job *job;
1362 	struct fc_bsg_reply *bsg_reply;
1363 	struct lpfc_dmabuf *bmp, *cmp;
1364 	struct lpfc_nodelist *ndlp;
1365 	unsigned long flags;
1366 	int rc = 0;
1367 	u32 ulp_status, ulp_word4;
1368 
1369 	dd_data = cmdiocbq->context_un.dd_data;
1370 
1371 	/* Determine if job has been aborted */
1372 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
1373 	job = dd_data->set_job;
1374 	if (job) {
1375 		/* Prevent timeout handling from trying to abort job  */
1376 		job->dd_data = NULL;
1377 	}
1378 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1379 
1380 	/* Close the timeout handler abort window */
1381 	spin_lock_irqsave(&phba->hbalock, flags);
1382 	cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING;
1383 	spin_unlock_irqrestore(&phba->hbalock, flags);
1384 
1385 	ndlp = dd_data->context_un.iocb.ndlp;
1386 	cmp = cmdiocbq->cmd_dmabuf;
1387 	bmp = cmdiocbq->bpl_dmabuf;
1388 
1389 	ulp_status = get_job_ulpstatus(phba, rspiocbq);
1390 	ulp_word4 = get_job_word4(phba, rspiocbq);
1391 
1392 	/* Copy the completed job data or set the error status */
1393 
1394 	if (job) {
1395 		bsg_reply = job->reply;
1396 		if (ulp_status) {
1397 			if (ulp_status == IOSTAT_LOCAL_REJECT) {
1398 				switch (ulp_word4 & IOERR_PARAM_MASK) {
1399 				case IOERR_SEQUENCE_TIMEOUT:
1400 					rc = -ETIMEDOUT;
1401 					break;
1402 				case IOERR_INVALID_RPI:
1403 					rc = -EFAULT;
1404 					break;
1405 				default:
1406 					rc = -EACCES;
1407 					break;
1408 				}
1409 			} else {
1410 				rc = -EACCES;
1411 			}
1412 		} else {
1413 			bsg_reply->reply_payload_rcv_len = 0;
1414 		}
1415 	}
1416 
1417 	lpfc_free_bsg_buffers(phba, cmp);
1418 	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1419 	kfree(bmp);
1420 	lpfc_sli_release_iocbq(phba, cmdiocbq);
1421 	lpfc_nlp_put(ndlp);
1422 	kfree(dd_data);
1423 
1424 	/* Complete the job if the job is still active */
1425 
1426 	if (job) {
1427 		bsg_reply->result = rc;
1428 		bsg_job_done(job, bsg_reply->result,
1429 			       bsg_reply->reply_payload_rcv_len);
1430 	}
1431 	return;
1432 }
1433 
1434 /**
1435  * lpfc_issue_ct_rsp - issue a ct response
1436  * @phba: Pointer to HBA context object.
1437  * @job: Pointer to the job object.
1438  * @tag: tag index value into the ports context exchange array.
1439  * @cmp: Pointer to a cmp dma buffer descriptor.
1440  * @bmp: Pointer to a bmp dma buffer descriptor.
1441  * @num_entry: Number of enties in the bde.
1442  **/
1443 static int
1444 lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
1445 		  struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp,
1446 		  int num_entry)
1447 {
1448 	struct lpfc_iocbq *ctiocb = NULL;
1449 	int rc = 0;
1450 	struct lpfc_nodelist *ndlp = NULL;
1451 	struct bsg_job_data *dd_data;
1452 	unsigned long flags;
1453 	uint32_t creg_val;
1454 	u16 ulp_context, iotag;
1455 
1456 	ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
1457 	if (!ndlp) {
1458 		lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
1459 				"2721 ndlp null for oxid %x SID %x\n",
1460 				phba->ct_ctx[tag].rxid,
1461 				phba->ct_ctx[tag].SID);
1462 		return IOCB_ERROR;
1463 	}
1464 
1465 	/* allocate our bsg tracking structure */
1466 	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1467 	if (!dd_data) {
1468 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1469 				"2736 Failed allocation of dd_data\n");
1470 		rc = -ENOMEM;
1471 		goto no_dd_data;
1472 	}
1473 
1474 	/* Allocate buffer for  command iocb */
1475 	ctiocb = lpfc_sli_get_iocbq(phba);
1476 	if (!ctiocb) {
1477 		rc = -ENOMEM;
1478 		goto no_ctiocb;
1479 	}
1480 
1481 	if (phba->sli_rev == LPFC_SLI_REV4) {
1482 		/* Do not issue unsol response if oxid not marked as valid */
1483 		if (phba->ct_ctx[tag].valid != UNSOL_VALID) {
1484 			rc = IOCB_ERROR;
1485 			goto issue_ct_rsp_exit;
1486 		}
1487 
1488 		lpfc_sli_prep_xmit_seq64(phba, ctiocb, bmp,
1489 					 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
1490 					 phba->ct_ctx[tag].oxid, num_entry,
1491 					 FC_RCTL_DD_SOL_CTL, 1,
1492 					 CMD_XMIT_SEQUENCE64_WQE);
1493 
1494 		/* The exchange is done, mark the entry as invalid */
1495 		phba->ct_ctx[tag].valid = UNSOL_INVALID;
1496 		iotag = get_wqe_reqtag(ctiocb);
1497 	} else {
1498 		lpfc_sli_prep_xmit_seq64(phba, ctiocb, bmp, 0, tag, num_entry,
1499 					 FC_RCTL_DD_SOL_CTL, 1,
1500 					 CMD_XMIT_SEQUENCE64_CX);
1501 		ctiocb->num_bdes = num_entry;
1502 		iotag = ctiocb->iocb.ulpIoTag;
1503 	}
1504 
1505 	ulp_context = get_job_ulpcontext(phba, ctiocb);
1506 
1507 	/* Xmit CT response on exchange <xid> */
1508 	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1509 			"2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
1510 			ulp_context, iotag, tag, phba->link_state);
1511 
1512 	ctiocb->cmd_flag |= LPFC_IO_LIBDFC;
1513 	ctiocb->vport = phba->pport;
1514 	ctiocb->context_un.dd_data = dd_data;
1515 	ctiocb->cmd_dmabuf = cmp;
1516 	ctiocb->bpl_dmabuf = bmp;
1517 	ctiocb->ndlp = ndlp;
1518 	ctiocb->cmd_cmpl = lpfc_issue_ct_rsp_cmp;
1519 
1520 	dd_data->type = TYPE_IOCB;
1521 	dd_data->set_job = job;
1522 	dd_data->context_un.iocb.cmdiocbq = ctiocb;
1523 	dd_data->context_un.iocb.ndlp = lpfc_nlp_get(ndlp);
1524 	if (!dd_data->context_un.iocb.ndlp) {
1525 		rc = -IOCB_ERROR;
1526 		goto issue_ct_rsp_exit;
1527 	}
1528 	dd_data->context_un.iocb.rmp = NULL;
1529 	job->dd_data = dd_data;
1530 
1531 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
1532 		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
1533 			rc = -IOCB_ERROR;
1534 			goto issue_ct_rsp_exit;
1535 		}
1536 		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
1537 		writel(creg_val, phba->HCregaddr);
1538 		readl(phba->HCregaddr); /* flush */
1539 	}
1540 
1541 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
1542 	if (rc == IOCB_SUCCESS) {
1543 		spin_lock_irqsave(&phba->hbalock, flags);
1544 		/* make sure the I/O had not been completed/released */
1545 		if (ctiocb->cmd_flag & LPFC_IO_LIBDFC) {
1546 			/* open up abort window to timeout handler */
1547 			ctiocb->cmd_flag |= LPFC_IO_CMD_OUTSTANDING;
1548 		}
1549 		spin_unlock_irqrestore(&phba->hbalock, flags);
1550 		return 0; /* done for now */
1551 	}
1552 
1553 	/* iocb failed so cleanup */
1554 	job->dd_data = NULL;
1555 	lpfc_nlp_put(ndlp);
1556 
1557 issue_ct_rsp_exit:
1558 	lpfc_sli_release_iocbq(phba, ctiocb);
1559 no_ctiocb:
1560 	kfree(dd_data);
1561 no_dd_data:
1562 	return rc;
1563 }
1564 
1565 /**
1566  * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command
1567  * @job: SEND_MGMT_RESP fc_bsg_job
1568  **/
1569 static int
1570 lpfc_bsg_send_mgmt_rsp(struct bsg_job *job)
1571 {
1572 	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
1573 	struct lpfc_hba *phba = vport->phba;
1574 	struct fc_bsg_request *bsg_request = job->request;
1575 	struct fc_bsg_reply *bsg_reply = job->reply;
1576 	struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
1577 		bsg_request->rqst_data.h_vendor.vendor_cmd;
1578 	struct ulp_bde64 *bpl;
1579 	struct lpfc_dmabuf *bmp = NULL, *cmp = NULL;
1580 	int bpl_entries;
1581 	uint32_t tag = mgmt_resp->tag;
1582 	unsigned long reqbfrcnt =
1583 			(unsigned long)job->request_payload.payload_len;
1584 	int rc = 0;
1585 
1586 	/* in case no data is transferred */
1587 	bsg_reply->reply_payload_rcv_len = 0;
1588 
1589 	if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
1590 		rc = -ERANGE;
1591 		goto send_mgmt_rsp_exit;
1592 	}
1593 
1594 	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1595 	if (!bmp) {
1596 		rc = -ENOMEM;
1597 		goto send_mgmt_rsp_exit;
1598 	}
1599 
1600 	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
1601 	if (!bmp->virt) {
1602 		rc = -ENOMEM;
1603 		goto send_mgmt_rsp_free_bmp;
1604 	}
1605 
1606 	INIT_LIST_HEAD(&bmp->list);
1607 	bpl = (struct ulp_bde64 *) bmp->virt;
1608 	bpl_entries = (LPFC_BPL_SIZE/sizeof(struct ulp_bde64));
1609 	cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
1610 				     1, bpl, &bpl_entries);
1611 	if (!cmp) {
1612 		rc = -ENOMEM;
1613 		goto send_mgmt_rsp_free_bmp;
1614 	}
1615 	lpfc_bsg_copy_data(cmp, &job->request_payload,
1616 			   job->request_payload.payload_len, 1);
1617 
1618 	rc = lpfc_issue_ct_rsp(phba, job, tag, cmp, bmp, bpl_entries);
1619 
1620 	if (rc == IOCB_SUCCESS)
1621 		return 0; /* done for now */
1622 
1623 	rc = -EACCES;
1624 
1625 	lpfc_free_bsg_buffers(phba, cmp);
1626 
1627 send_mgmt_rsp_free_bmp:
1628 	if (bmp->virt)
1629 		lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1630 	kfree(bmp);
1631 send_mgmt_rsp_exit:
1632 	/* make error code available to userspace */
1633 	bsg_reply->result = rc;
1634 	job->dd_data = NULL;
1635 	return rc;
1636 }
1637 
1638 /**
1639  * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode
1640  * @phba: Pointer to HBA context object.
1641  *
1642  * This function is responsible for preparing driver for diag loopback
1643  * on device.
1644  */
1645 static int
1646 lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
1647 {
1648 	struct lpfc_vport **vports;
1649 	struct Scsi_Host *shost;
1650 	struct lpfc_sli *psli;
1651 	struct lpfc_queue *qp = NULL;
1652 	struct lpfc_sli_ring *pring;
1653 	int i = 0;
1654 
1655 	psli = &phba->sli;
1656 	if (!psli)
1657 		return -ENODEV;
1658 
1659 
1660 	if ((phba->link_state == LPFC_HBA_ERROR) ||
1661 	    (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
1662 	    (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
1663 		return -EACCES;
1664 
1665 	vports = lpfc_create_vport_work_array(phba);
1666 	if (vports) {
1667 		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1668 			shost = lpfc_shost_from_vport(vports[i]);
1669 			scsi_block_requests(shost);
1670 		}
1671 		lpfc_destroy_vport_work_array(phba, vports);
1672 	} else {
1673 		shost = lpfc_shost_from_vport(phba->pport);
1674 		scsi_block_requests(shost);
1675 	}
1676 
1677 	if (phba->sli_rev != LPFC_SLI_REV4) {
1678 		pring = &psli->sli3_ring[LPFC_FCP_RING];
1679 		lpfc_emptyq_wait(phba, &pring->txcmplq, &phba->hbalock);
1680 		return 0;
1681 	}
1682 	list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
1683 		pring = qp->pring;
1684 		if (!pring || (pring->ringno != LPFC_FCP_RING))
1685 			continue;
1686 		if (!lpfc_emptyq_wait(phba, &pring->txcmplq,
1687 				      &pring->ring_lock))
1688 			break;
1689 	}
1690 	return 0;
1691 }
1692 
1693 /**
1694  * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode
1695  * @phba: Pointer to HBA context object.
1696  *
1697  * This function is responsible for driver exit processing of setting up
1698  * diag loopback mode on device.
1699  */
1700 static void
1701 lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba)
1702 {
1703 	struct Scsi_Host *shost;
1704 	struct lpfc_vport **vports;
1705 	int i;
1706 
1707 	vports = lpfc_create_vport_work_array(phba);
1708 	if (vports) {
1709 		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1710 			shost = lpfc_shost_from_vport(vports[i]);
1711 			scsi_unblock_requests(shost);
1712 		}
1713 		lpfc_destroy_vport_work_array(phba, vports);
1714 	} else {
1715 		shost = lpfc_shost_from_vport(phba->pport);
1716 		scsi_unblock_requests(shost);
1717 	}
1718 	return;
1719 }
1720 
1721 /**
1722  * lpfc_sli3_bsg_diag_loopback_mode - process an sli3 bsg vendor command
1723  * @phba: Pointer to HBA context object.
1724  * @job: LPFC_BSG_VENDOR_DIAG_MODE
1725  *
1726  * This function is responsible for placing an sli3  port into diagnostic
1727  * loopback mode in order to perform a diagnostic loopback test.
1728  * All new scsi requests are blocked, a small delay is used to allow the
1729  * scsi requests to complete then the link is brought down. If the link is
1730  * is placed in loopback mode then scsi requests are again allowed
1731  * so the scsi mid-layer doesn't give up on the port.
1732  * All of this is done in-line.
1733  */
1734 static int
1735 lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
1736 {
1737 	struct fc_bsg_request *bsg_request = job->request;
1738 	struct fc_bsg_reply *bsg_reply = job->reply;
1739 	struct diag_mode_set *loopback_mode;
1740 	uint32_t link_flags;
1741 	uint32_t timeout;
1742 	LPFC_MBOXQ_t *pmboxq  = NULL;
1743 	int mbxstatus = MBX_SUCCESS;
1744 	int i = 0;
1745 	int rc = 0;
1746 
1747 	/* no data to return just the return code */
1748 	bsg_reply->reply_payload_rcv_len = 0;
1749 
1750 	if (job->request_len < sizeof(struct fc_bsg_request) +
1751 	    sizeof(struct diag_mode_set)) {
1752 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1753 				"2738 Received DIAG MODE request size:%d "
1754 				"below the minimum size:%d\n",
1755 				job->request_len,
1756 				(int)(sizeof(struct fc_bsg_request) +
1757 				sizeof(struct diag_mode_set)));
1758 		rc = -EINVAL;
1759 		goto job_error;
1760 	}
1761 
1762 	rc = lpfc_bsg_diag_mode_enter(phba);
1763 	if (rc)
1764 		goto job_error;
1765 
1766 	/* bring the link to diagnostic mode */
1767 	loopback_mode = (struct diag_mode_set *)
1768 		bsg_request->rqst_data.h_vendor.vendor_cmd;
1769 	link_flags = loopback_mode->type;
1770 	timeout = loopback_mode->timeout * 100;
1771 
1772 	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1773 	if (!pmboxq) {
1774 		rc = -ENOMEM;
1775 		goto loopback_mode_exit;
1776 	}
1777 	memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1778 	pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
1779 	pmboxq->u.mb.mbxOwner = OWN_HOST;
1780 
1781 	mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1782 
1783 	if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) {
1784 		/* wait for link down before proceeding */
1785 		i = 0;
1786 		while (phba->link_state != LPFC_LINK_DOWN) {
1787 			if (i++ > timeout) {
1788 				rc = -ETIMEDOUT;
1789 				goto loopback_mode_exit;
1790 			}
1791 			msleep(10);
1792 		}
1793 
1794 		memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1795 		if (link_flags == INTERNAL_LOOP_BACK)
1796 			pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
1797 		else
1798 			pmboxq->u.mb.un.varInitLnk.link_flags =
1799 				FLAGS_TOPOLOGY_MODE_LOOP;
1800 
1801 		pmboxq->u.mb.mbxCommand = MBX_INIT_LINK;
1802 		pmboxq->u.mb.mbxOwner = OWN_HOST;
1803 
1804 		mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
1805 						     LPFC_MBOX_TMO);
1806 
1807 		if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
1808 			rc = -ENODEV;
1809 		else {
1810 			spin_lock_irq(&phba->hbalock);
1811 			phba->link_flag |= LS_LOOPBACK_MODE;
1812 			spin_unlock_irq(&phba->hbalock);
1813 			/* wait for the link attention interrupt */
1814 			msleep(100);
1815 
1816 			i = 0;
1817 			while (phba->link_state != LPFC_HBA_READY) {
1818 				if (i++ > timeout) {
1819 					rc = -ETIMEDOUT;
1820 					break;
1821 				}
1822 
1823 				msleep(10);
1824 			}
1825 		}
1826 
1827 	} else
1828 		rc = -ENODEV;
1829 
1830 loopback_mode_exit:
1831 	lpfc_bsg_diag_mode_exit(phba);
1832 
1833 	/*
1834 	 * Let SLI layer release mboxq if mbox command completed after timeout.
1835 	 */
1836 	if (pmboxq && mbxstatus != MBX_TIMEOUT)
1837 		mempool_free(pmboxq, phba->mbox_mem_pool);
1838 
1839 job_error:
1840 	/* make error code available to userspace */
1841 	bsg_reply->result = rc;
1842 	/* complete the job back to userspace if no error */
1843 	if (rc == 0)
1844 		bsg_job_done(job, bsg_reply->result,
1845 			       bsg_reply->reply_payload_rcv_len);
1846 	return rc;
1847 }
1848 
1849 /**
1850  * lpfc_sli4_bsg_set_link_diag_state - set sli4 link diag state
1851  * @phba: Pointer to HBA context object.
1852  * @diag: Flag for set link to diag or nomral operation state.
1853  *
1854  * This function is responsible for issuing a sli4 mailbox command for setting
1855  * link to either diag state or normal operation state.
1856  */
1857 static int
1858 lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag)
1859 {
1860 	LPFC_MBOXQ_t *pmboxq;
1861 	struct lpfc_mbx_set_link_diag_state *link_diag_state;
1862 	uint32_t req_len, alloc_len;
1863 	int mbxstatus = MBX_SUCCESS, rc;
1864 
1865 	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1866 	if (!pmboxq)
1867 		return -ENOMEM;
1868 
1869 	req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
1870 		   sizeof(struct lpfc_sli4_cfg_mhdr));
1871 	alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1872 				LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
1873 				req_len, LPFC_SLI4_MBX_EMBED);
1874 	if (alloc_len != req_len) {
1875 		rc = -ENOMEM;
1876 		goto link_diag_state_set_out;
1877 	}
1878 	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
1879 			"3128 Set link to diagnostic state:x%x (x%x/x%x)\n",
1880 			diag, phba->sli4_hba.lnk_info.lnk_tp,
1881 			phba->sli4_hba.lnk_info.lnk_no);
1882 
1883 	link_diag_state = &pmboxq->u.mqe.un.link_diag_state;
1884 	bf_set(lpfc_mbx_set_diag_state_diag_bit_valid, &link_diag_state->u.req,
1885 	       LPFC_DIAG_STATE_DIAG_BIT_VALID_CHANGE);
1886 	bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
1887 	       phba->sli4_hba.lnk_info.lnk_no);
1888 	bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
1889 	       phba->sli4_hba.lnk_info.lnk_tp);
1890 	if (diag)
1891 		bf_set(lpfc_mbx_set_diag_state_diag,
1892 		       &link_diag_state->u.req, 1);
1893 	else
1894 		bf_set(lpfc_mbx_set_diag_state_diag,
1895 		       &link_diag_state->u.req, 0);
1896 
1897 	mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1898 
1899 	if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0))
1900 		rc = 0;
1901 	else
1902 		rc = -ENODEV;
1903 
1904 link_diag_state_set_out:
1905 	if (pmboxq && (mbxstatus != MBX_TIMEOUT))
1906 		mempool_free(pmboxq, phba->mbox_mem_pool);
1907 
1908 	return rc;
1909 }
1910 
1911 /**
1912  * lpfc_sli4_bsg_set_loopback_mode - set sli4 internal loopback diagnostic
1913  * @phba: Pointer to HBA context object.
1914  * @mode: loopback mode to set
1915  * @link_no: link number for loopback mode to set
1916  *
1917  * This function is responsible for issuing a sli4 mailbox command for setting
1918  * up loopback diagnostic for a link.
1919  */
1920 static int
1921 lpfc_sli4_bsg_set_loopback_mode(struct lpfc_hba *phba, int mode,
1922 				uint32_t link_no)
1923 {
1924 	LPFC_MBOXQ_t *pmboxq;
1925 	uint32_t req_len, alloc_len;
1926 	struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
1927 	int mbxstatus = MBX_SUCCESS, rc = 0;
1928 
1929 	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1930 	if (!pmboxq)
1931 		return -ENOMEM;
1932 	req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) -
1933 		   sizeof(struct lpfc_sli4_cfg_mhdr));
1934 	alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1935 				LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK,
1936 				req_len, LPFC_SLI4_MBX_EMBED);
1937 	if (alloc_len != req_len) {
1938 		mempool_free(pmboxq, phba->mbox_mem_pool);
1939 		return -ENOMEM;
1940 	}
1941 	link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
1942 	bf_set(lpfc_mbx_set_diag_state_link_num,
1943 	       &link_diag_loopback->u.req, link_no);
1944 
1945 	if (phba->sli4_hba.conf_trunk & (1 << link_no)) {
1946 		bf_set(lpfc_mbx_set_diag_state_link_type,
1947 		       &link_diag_loopback->u.req, LPFC_LNK_FC_TRUNKED);
1948 	} else {
1949 		bf_set(lpfc_mbx_set_diag_state_link_type,
1950 		       &link_diag_loopback->u.req,
1951 		       phba->sli4_hba.lnk_info.lnk_tp);
1952 	}
1953 
1954 	bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req,
1955 	       mode);
1956 
1957 	mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1958 	if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) {
1959 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1960 				"3127 Failed setup loopback mode mailbox "
1961 				"command, rc:x%x, status:x%x\n", mbxstatus,
1962 				pmboxq->u.mb.mbxStatus);
1963 		rc = -ENODEV;
1964 	}
1965 	if (pmboxq && (mbxstatus != MBX_TIMEOUT))
1966 		mempool_free(pmboxq, phba->mbox_mem_pool);
1967 	return rc;
1968 }
1969 
1970 /**
1971  * lpfc_sli4_diag_fcport_reg_setup - setup port registrations for diagnostic
1972  * @phba: Pointer to HBA context object.
1973  *
1974  * This function set up SLI4 FC port registrations for diagnostic run, which
1975  * includes all the rpis, vfi, and also vpi.
1976  */
1977 static int
1978 lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
1979 {
1980 	int rc;
1981 
1982 	if (phba->pport->fc_flag & FC_VFI_REGISTERED) {
1983 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1984 				"3136 Port still had vfi registered: "
1985 				"mydid:x%x, fcfi:%d, vfi:%d, vpi:%d\n",
1986 				phba->pport->fc_myDID, phba->fcf.fcfi,
1987 				phba->sli4_hba.vfi_ids[phba->pport->vfi],
1988 				phba->vpi_ids[phba->pport->vpi]);
1989 		return -EINVAL;
1990 	}
1991 	rc = lpfc_issue_reg_vfi(phba->pport);
1992 	return rc;
1993 }
1994 
1995 /**
1996  * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command
1997  * @phba: Pointer to HBA context object.
1998  * @job: LPFC_BSG_VENDOR_DIAG_MODE
1999  *
2000  * This function is responsible for placing an sli4 port into diagnostic
2001  * loopback mode in order to perform a diagnostic loopback test.
2002  */
2003 static int
2004 lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
2005 {
2006 	struct fc_bsg_request *bsg_request = job->request;
2007 	struct fc_bsg_reply *bsg_reply = job->reply;
2008 	struct diag_mode_set *loopback_mode;
2009 	uint32_t link_flags, timeout, link_no;
2010 	int i, rc = 0;
2011 
2012 	/* no data to return just the return code */
2013 	bsg_reply->reply_payload_rcv_len = 0;
2014 
2015 	if (job->request_len < sizeof(struct fc_bsg_request) +
2016 	    sizeof(struct diag_mode_set)) {
2017 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2018 				"3011 Received DIAG MODE request size:%d "
2019 				"below the minimum size:%d\n",
2020 				job->request_len,
2021 				(int)(sizeof(struct fc_bsg_request) +
2022 				sizeof(struct diag_mode_set)));
2023 		rc = -EINVAL;
2024 		goto job_done;
2025 	}
2026 
2027 	loopback_mode = (struct diag_mode_set *)
2028 		bsg_request->rqst_data.h_vendor.vendor_cmd;
2029 	link_flags = loopback_mode->type;
2030 	timeout = loopback_mode->timeout * 100;
2031 
2032 	if (loopback_mode->physical_link == -1)
2033 		link_no = phba->sli4_hba.lnk_info.lnk_no;
2034 	else
2035 		link_no = loopback_mode->physical_link;
2036 
2037 	if (link_flags == DISABLE_LOOP_BACK) {
2038 		rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2039 					LPFC_DIAG_LOOPBACK_TYPE_DISABLE,
2040 					link_no);
2041 		if (!rc) {
2042 			/* Unset the need disable bit */
2043 			phba->sli4_hba.conf_trunk &= ~((1 << link_no) << 4);
2044 		}
2045 		goto job_done;
2046 	} else {
2047 		/* Check if we need to disable the loopback state */
2048 		if (phba->sli4_hba.conf_trunk & ((1 << link_no) << 4)) {
2049 			rc = -EPERM;
2050 			goto job_done;
2051 		}
2052 	}
2053 
2054 	rc = lpfc_bsg_diag_mode_enter(phba);
2055 	if (rc)
2056 		goto job_done;
2057 
2058 	/* indicate we are in loobpack diagnostic mode */
2059 	spin_lock_irq(&phba->hbalock);
2060 	phba->link_flag |= LS_LOOPBACK_MODE;
2061 	spin_unlock_irq(&phba->hbalock);
2062 
2063 	/* reset port to start frome scratch */
2064 	rc = lpfc_selective_reset(phba);
2065 	if (rc)
2066 		goto job_done;
2067 
2068 	/* bring the link to diagnostic mode */
2069 	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2070 			"3129 Bring link to diagnostic state.\n");
2071 
2072 	rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
2073 	if (rc) {
2074 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2075 				"3130 Failed to bring link to diagnostic "
2076 				"state, rc:x%x\n", rc);
2077 		goto loopback_mode_exit;
2078 	}
2079 
2080 	/* wait for link down before proceeding */
2081 	i = 0;
2082 	while (phba->link_state != LPFC_LINK_DOWN) {
2083 		if (i++ > timeout) {
2084 			rc = -ETIMEDOUT;
2085 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2086 					"3131 Timeout waiting for link to "
2087 					"diagnostic mode, timeout:%d ms\n",
2088 					timeout * 10);
2089 			goto loopback_mode_exit;
2090 		}
2091 		msleep(10);
2092 	}
2093 
2094 	/* set up loopback mode */
2095 	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2096 			"3132 Set up loopback mode:x%x\n", link_flags);
2097 
2098 	switch (link_flags) {
2099 	case INTERNAL_LOOP_BACK:
2100 		if (phba->sli4_hba.conf_trunk & (1 << link_no)) {
2101 			rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2102 					LPFC_DIAG_LOOPBACK_TYPE_INTERNAL,
2103 					link_no);
2104 		} else {
2105 			/* Trunk is configured, but link is not in this trunk */
2106 			if (phba->sli4_hba.conf_trunk) {
2107 				rc = -ELNRNG;
2108 				goto loopback_mode_exit;
2109 			}
2110 
2111 			rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2112 					LPFC_DIAG_LOOPBACK_TYPE_INTERNAL,
2113 					link_no);
2114 		}
2115 
2116 		if (!rc) {
2117 			/* Set the need disable bit */
2118 			phba->sli4_hba.conf_trunk |= (1 << link_no) << 4;
2119 		}
2120 
2121 		break;
2122 	case EXTERNAL_LOOP_BACK:
2123 		if (phba->sli4_hba.conf_trunk & (1 << link_no)) {
2124 			rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2125 				LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL_TRUNKED,
2126 				link_no);
2127 		} else {
2128 			/* Trunk is configured, but link is not in this trunk */
2129 			if (phba->sli4_hba.conf_trunk) {
2130 				rc = -ELNRNG;
2131 				goto loopback_mode_exit;
2132 			}
2133 
2134 			rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2135 						LPFC_DIAG_LOOPBACK_TYPE_SERDES,
2136 						link_no);
2137 		}
2138 
2139 		if (!rc) {
2140 			/* Set the need disable bit */
2141 			phba->sli4_hba.conf_trunk |= (1 << link_no) << 4;
2142 		}
2143 
2144 		break;
2145 	default:
2146 		rc = -EINVAL;
2147 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
2148 				"3141 Loopback mode:x%x not supported\n",
2149 				link_flags);
2150 		goto loopback_mode_exit;
2151 	}
2152 
2153 	if (!rc) {
2154 		/* wait for the link attention interrupt */
2155 		msleep(100);
2156 		i = 0;
2157 		while (phba->link_state < LPFC_LINK_UP) {
2158 			if (i++ > timeout) {
2159 				rc = -ETIMEDOUT;
2160 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2161 					"3137 Timeout waiting for link up "
2162 					"in loopback mode, timeout:%d ms\n",
2163 					timeout * 10);
2164 				break;
2165 			}
2166 			msleep(10);
2167 		}
2168 	}
2169 
2170 	/* port resource registration setup for loopback diagnostic */
2171 	if (!rc) {
2172 		/* set up a none zero myDID for loopback test */
2173 		phba->pport->fc_myDID = 1;
2174 		rc = lpfc_sli4_diag_fcport_reg_setup(phba);
2175 	} else
2176 		goto loopback_mode_exit;
2177 
2178 	if (!rc) {
2179 		/* wait for the port ready */
2180 		msleep(100);
2181 		i = 0;
2182 		while (phba->link_state != LPFC_HBA_READY) {
2183 			if (i++ > timeout) {
2184 				rc = -ETIMEDOUT;
2185 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2186 					"3133 Timeout waiting for port "
2187 					"loopback mode ready, timeout:%d ms\n",
2188 					timeout * 10);
2189 				break;
2190 			}
2191 			msleep(10);
2192 		}
2193 	}
2194 
2195 loopback_mode_exit:
2196 	/* clear loopback diagnostic mode */
2197 	if (rc) {
2198 		spin_lock_irq(&phba->hbalock);
2199 		phba->link_flag &= ~LS_LOOPBACK_MODE;
2200 		spin_unlock_irq(&phba->hbalock);
2201 	}
2202 	lpfc_bsg_diag_mode_exit(phba);
2203 
2204 job_done:
2205 	/* make error code available to userspace */
2206 	bsg_reply->result = rc;
2207 	/* complete the job back to userspace if no error */
2208 	if (rc == 0)
2209 		bsg_job_done(job, bsg_reply->result,
2210 			       bsg_reply->reply_payload_rcv_len);
2211 	return rc;
2212 }
2213 
2214 /**
2215  * lpfc_bsg_diag_loopback_mode - bsg vendor command for diag loopback mode
2216  * @job: LPFC_BSG_VENDOR_DIAG_MODE
2217  *
2218  * This function is responsible for responding to check and dispatch bsg diag
2219  * command from the user to proper driver action routines.
2220  */
2221 static int
2222 lpfc_bsg_diag_loopback_mode(struct bsg_job *job)
2223 {
2224 	struct Scsi_Host *shost;
2225 	struct lpfc_vport *vport;
2226 	struct lpfc_hba *phba;
2227 	int rc;
2228 
2229 	shost = fc_bsg_to_shost(job);
2230 	if (!shost)
2231 		return -ENODEV;
2232 	vport = shost_priv(shost);
2233 	if (!vport)
2234 		return -ENODEV;
2235 	phba = vport->phba;
2236 	if (!phba)
2237 		return -ENODEV;
2238 
2239 	if (phba->sli_rev < LPFC_SLI_REV4)
2240 		rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job);
2241 	else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
2242 		 LPFC_SLI_INTF_IF_TYPE_2)
2243 		rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job);
2244 	else
2245 		rc = -ENODEV;
2246 
2247 	return rc;
2248 }
2249 
2250 /**
2251  * lpfc_sli4_bsg_diag_mode_end - sli4 bsg vendor command for ending diag mode
2252  * @job: LPFC_BSG_VENDOR_DIAG_MODE_END
2253  *
2254  * This function is responsible for responding to check and dispatch bsg diag
2255  * command from the user to proper driver action routines.
2256  */
2257 static int
2258 lpfc_sli4_bsg_diag_mode_end(struct bsg_job *job)
2259 {
2260 	struct fc_bsg_request *bsg_request = job->request;
2261 	struct fc_bsg_reply *bsg_reply = job->reply;
2262 	struct Scsi_Host *shost;
2263 	struct lpfc_vport *vport;
2264 	struct lpfc_hba *phba;
2265 	struct diag_mode_set *loopback_mode_end_cmd;
2266 	uint32_t timeout;
2267 	int rc, i;
2268 
2269 	shost = fc_bsg_to_shost(job);
2270 	if (!shost)
2271 		return -ENODEV;
2272 	vport = shost_priv(shost);
2273 	if (!vport)
2274 		return -ENODEV;
2275 	phba = vport->phba;
2276 	if (!phba)
2277 		return -ENODEV;
2278 
2279 	if (phba->sli_rev < LPFC_SLI_REV4)
2280 		return -ENODEV;
2281 	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
2282 	    LPFC_SLI_INTF_IF_TYPE_2)
2283 		return -ENODEV;
2284 
2285 	/* clear loopback diagnostic mode */
2286 	spin_lock_irq(&phba->hbalock);
2287 	phba->link_flag &= ~LS_LOOPBACK_MODE;
2288 	spin_unlock_irq(&phba->hbalock);
2289 	loopback_mode_end_cmd = (struct diag_mode_set *)
2290 			bsg_request->rqst_data.h_vendor.vendor_cmd;
2291 	timeout = loopback_mode_end_cmd->timeout * 100;
2292 
2293 	rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
2294 	if (rc) {
2295 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2296 				"3139 Failed to bring link to diagnostic "
2297 				"state, rc:x%x\n", rc);
2298 		goto loopback_mode_end_exit;
2299 	}
2300 
2301 	/* wait for link down before proceeding */
2302 	i = 0;
2303 	while (phba->link_state != LPFC_LINK_DOWN) {
2304 		if (i++ > timeout) {
2305 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2306 					"3140 Timeout waiting for link to "
2307 					"diagnostic mode_end, timeout:%d ms\n",
2308 					timeout * 10);
2309 			/* there is nothing much we can do here */
2310 			break;
2311 		}
2312 		msleep(10);
2313 	}
2314 
2315 	/* reset port resource registrations */
2316 	rc = lpfc_selective_reset(phba);
2317 	phba->pport->fc_myDID = 0;
2318 
2319 loopback_mode_end_exit:
2320 	/* make return code available to userspace */
2321 	bsg_reply->result = rc;
2322 	/* complete the job back to userspace if no error */
2323 	if (rc == 0)
2324 		bsg_job_done(job, bsg_reply->result,
2325 			       bsg_reply->reply_payload_rcv_len);
2326 	return rc;
2327 }
2328 
2329 /**
2330  * lpfc_sli4_bsg_link_diag_test - sli4 bsg vendor command for diag link test
2331  * @job: LPFC_BSG_VENDOR_DIAG_LINK_TEST
2332  *
2333  * This function is to perform SLI4 diag link test request from the user
2334  * applicaiton.
2335  */
2336 static int
2337 lpfc_sli4_bsg_link_diag_test(struct bsg_job *job)
2338 {
2339 	struct fc_bsg_request *bsg_request = job->request;
2340 	struct fc_bsg_reply *bsg_reply = job->reply;
2341 	struct Scsi_Host *shost;
2342 	struct lpfc_vport *vport;
2343 	struct lpfc_hba *phba;
2344 	LPFC_MBOXQ_t *pmboxq;
2345 	struct sli4_link_diag *link_diag_test_cmd;
2346 	uint32_t req_len, alloc_len;
2347 	struct lpfc_mbx_run_link_diag_test *run_link_diag_test;
2348 	union lpfc_sli4_cfg_shdr *shdr;
2349 	uint32_t shdr_status, shdr_add_status;
2350 	struct diag_status *diag_status_reply;
2351 	int mbxstatus, rc = -ENODEV, rc1 = 0;
2352 
2353 	shost = fc_bsg_to_shost(job);
2354 	if (!shost)
2355 		goto job_error;
2356 
2357 	vport = shost_priv(shost);
2358 	if (!vport)
2359 		goto job_error;
2360 
2361 	phba = vport->phba;
2362 	if (!phba)
2363 		goto job_error;
2364 
2365 
2366 	if (phba->sli_rev < LPFC_SLI_REV4)
2367 		goto job_error;
2368 
2369 	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
2370 	    LPFC_SLI_INTF_IF_TYPE_2)
2371 		goto job_error;
2372 
2373 	if (job->request_len < sizeof(struct fc_bsg_request) +
2374 	    sizeof(struct sli4_link_diag)) {
2375 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2376 				"3013 Received LINK DIAG TEST request "
2377 				" size:%d below the minimum size:%d\n",
2378 				job->request_len,
2379 				(int)(sizeof(struct fc_bsg_request) +
2380 				sizeof(struct sli4_link_diag)));
2381 		rc = -EINVAL;
2382 		goto job_error;
2383 	}
2384 
2385 	rc = lpfc_bsg_diag_mode_enter(phba);
2386 	if (rc)
2387 		goto job_error;
2388 
2389 	link_diag_test_cmd = (struct sli4_link_diag *)
2390 			 bsg_request->rqst_data.h_vendor.vendor_cmd;
2391 
2392 	rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
2393 
2394 	if (rc)
2395 		goto job_error;
2396 
2397 	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2398 	if (!pmboxq) {
2399 		rc = -ENOMEM;
2400 		goto link_diag_test_exit;
2401 	}
2402 
2403 	req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
2404 		   sizeof(struct lpfc_sli4_cfg_mhdr));
2405 	alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2406 				     LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
2407 				     req_len, LPFC_SLI4_MBX_EMBED);
2408 	if (alloc_len != req_len) {
2409 		rc = -ENOMEM;
2410 		goto link_diag_test_exit;
2411 	}
2412 
2413 	run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test;
2414 	bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
2415 	       phba->sli4_hba.lnk_info.lnk_no);
2416 	bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req,
2417 	       phba->sli4_hba.lnk_info.lnk_tp);
2418 	bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req,
2419 	       link_diag_test_cmd->test_id);
2420 	bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req,
2421 	       link_diag_test_cmd->loops);
2422 	bf_set(lpfc_mbx_run_diag_test_test_ver, &run_link_diag_test->u.req,
2423 	       link_diag_test_cmd->test_version);
2424 	bf_set(lpfc_mbx_run_diag_test_err_act, &run_link_diag_test->u.req,
2425 	       link_diag_test_cmd->error_action);
2426 
2427 	mbxstatus = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
2428 
2429 	shdr = (union lpfc_sli4_cfg_shdr *)
2430 		&pmboxq->u.mqe.un.sli4_config.header.cfg_shdr;
2431 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
2432 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
2433 	if (shdr_status || shdr_add_status || mbxstatus) {
2434 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
2435 				"3010 Run link diag test mailbox failed with "
2436 				"mbx_status x%x status x%x, add_status x%x\n",
2437 				mbxstatus, shdr_status, shdr_add_status);
2438 	}
2439 
2440 	diag_status_reply = (struct diag_status *)
2441 			    bsg_reply->reply_data.vendor_reply.vendor_rsp;
2442 
2443 	if (job->reply_len < sizeof(*bsg_reply) + sizeof(*diag_status_reply)) {
2444 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2445 				"3012 Received Run link diag test reply "
2446 				"below minimum size (%d): reply_len:%d\n",
2447 				(int)(sizeof(*bsg_reply) +
2448 				sizeof(*diag_status_reply)),
2449 				job->reply_len);
2450 		rc = -EINVAL;
2451 		goto job_error;
2452 	}
2453 
2454 	diag_status_reply->mbox_status = mbxstatus;
2455 	diag_status_reply->shdr_status = shdr_status;
2456 	diag_status_reply->shdr_add_status = shdr_add_status;
2457 
2458 link_diag_test_exit:
2459 	rc1 = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
2460 
2461 	if (pmboxq)
2462 		mempool_free(pmboxq, phba->mbox_mem_pool);
2463 
2464 	lpfc_bsg_diag_mode_exit(phba);
2465 
2466 job_error:
2467 	/* make error code available to userspace */
2468 	if (rc1 && !rc)
2469 		rc = rc1;
2470 	bsg_reply->result = rc;
2471 	/* complete the job back to userspace if no error */
2472 	if (rc == 0)
2473 		bsg_job_done(job, bsg_reply->result,
2474 			       bsg_reply->reply_payload_rcv_len);
2475 	return rc;
2476 }
2477 
2478 /**
2479  * lpfcdiag_loop_self_reg - obtains a remote port login id
2480  * @phba: Pointer to HBA context object
2481  * @rpi: Pointer to a remote port login id
2482  *
2483  * This function obtains a remote port login id so the diag loopback test
2484  * can send and receive its own unsolicited CT command.
2485  **/
2486 static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
2487 {
2488 	LPFC_MBOXQ_t *mbox;
2489 	struct lpfc_dmabuf *dmabuff;
2490 	int status;
2491 
2492 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2493 	if (!mbox)
2494 		return -ENOMEM;
2495 
2496 	if (phba->sli_rev < LPFC_SLI_REV4)
2497 		status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
2498 				(uint8_t *)&phba->pport->fc_sparam,
2499 				mbox, *rpi);
2500 	else {
2501 		*rpi = lpfc_sli4_alloc_rpi(phba);
2502 		if (*rpi == LPFC_RPI_ALLOC_ERROR) {
2503 			mempool_free(mbox, phba->mbox_mem_pool);
2504 			return -EBUSY;
2505 		}
2506 		status = lpfc_reg_rpi(phba, phba->pport->vpi,
2507 				phba->pport->fc_myDID,
2508 				(uint8_t *)&phba->pport->fc_sparam,
2509 				mbox, *rpi);
2510 	}
2511 
2512 	if (status) {
2513 		mempool_free(mbox, phba->mbox_mem_pool);
2514 		if (phba->sli_rev == LPFC_SLI_REV4)
2515 			lpfc_sli4_free_rpi(phba, *rpi);
2516 		return -ENOMEM;
2517 	}
2518 
2519 	dmabuff = (struct lpfc_dmabuf *)mbox->ctx_buf;
2520 	mbox->ctx_buf = NULL;
2521 	mbox->ctx_ndlp = NULL;
2522 	status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
2523 
2524 	if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
2525 		lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
2526 		kfree(dmabuff);
2527 		if (status != MBX_TIMEOUT)
2528 			mempool_free(mbox, phba->mbox_mem_pool);
2529 		if (phba->sli_rev == LPFC_SLI_REV4)
2530 			lpfc_sli4_free_rpi(phba, *rpi);
2531 		return -ENODEV;
2532 	}
2533 
2534 	if (phba->sli_rev < LPFC_SLI_REV4)
2535 		*rpi = mbox->u.mb.un.varWords[0];
2536 
2537 	lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
2538 	kfree(dmabuff);
2539 	mempool_free(mbox, phba->mbox_mem_pool);
2540 	return 0;
2541 }
2542 
2543 /**
2544  * lpfcdiag_loop_self_unreg - unregs from the rpi
2545  * @phba: Pointer to HBA context object
2546  * @rpi: Remote port login id
2547  *
2548  * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg
2549  **/
2550 static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
2551 {
2552 	LPFC_MBOXQ_t *mbox;
2553 	int status;
2554 
2555 	/* Allocate mboxq structure */
2556 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2557 	if (mbox == NULL)
2558 		return -ENOMEM;
2559 
2560 	if (phba->sli_rev < LPFC_SLI_REV4)
2561 		lpfc_unreg_login(phba, 0, rpi, mbox);
2562 	else
2563 		lpfc_unreg_login(phba, phba->pport->vpi,
2564 				 phba->sli4_hba.rpi_ids[rpi], mbox);
2565 
2566 	status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
2567 
2568 	if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
2569 		if (status != MBX_TIMEOUT)
2570 			mempool_free(mbox, phba->mbox_mem_pool);
2571 		return -EIO;
2572 	}
2573 	mempool_free(mbox, phba->mbox_mem_pool);
2574 	if (phba->sli_rev == LPFC_SLI_REV4)
2575 		lpfc_sli4_free_rpi(phba, rpi);
2576 	return 0;
2577 }
2578 
2579 /**
2580  * lpfcdiag_loop_get_xri - obtains the transmit and receive ids
2581  * @phba: Pointer to HBA context object
2582  * @rpi: Remote port login id
2583  * @txxri: Pointer to transmit exchange id
2584  * @rxxri: Pointer to response exchabge id
2585  *
2586  * This function obtains the transmit and receive ids required to send
2587  * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp
2588  * flags are used to the unsolicted response handler is able to process
2589  * the ct command sent on the same port.
2590  **/
2591 static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
2592 			 uint16_t *txxri, uint16_t * rxxri)
2593 {
2594 	struct lpfc_bsg_event *evt;
2595 	struct lpfc_iocbq *cmdiocbq, *rspiocbq;
2596 	struct lpfc_dmabuf *dmabuf;
2597 	struct ulp_bde64 *bpl = NULL;
2598 	struct lpfc_sli_ct_request *ctreq = NULL;
2599 	int ret_val = 0;
2600 	int time_left;
2601 	int iocb_stat = IOCB_SUCCESS;
2602 	unsigned long flags;
2603 	u32 status;
2604 
2605 	*txxri = 0;
2606 	*rxxri = 0;
2607 	evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
2608 				SLI_CT_ELX_LOOPBACK);
2609 	if (!evt)
2610 		return -ENOMEM;
2611 
2612 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
2613 	list_add(&evt->node, &phba->ct_ev_waiters);
2614 	lpfc_bsg_event_ref(evt);
2615 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2616 
2617 	cmdiocbq = lpfc_sli_get_iocbq(phba);
2618 	rspiocbq = lpfc_sli_get_iocbq(phba);
2619 
2620 	dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2621 	if (dmabuf) {
2622 		dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys);
2623 		if (dmabuf->virt) {
2624 			INIT_LIST_HEAD(&dmabuf->list);
2625 			bpl = (struct ulp_bde64 *) dmabuf->virt;
2626 			memset(bpl, 0, sizeof(*bpl));
2627 			ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
2628 			bpl->addrHigh =
2629 				le32_to_cpu(putPaddrHigh(dmabuf->phys +
2630 					sizeof(*bpl)));
2631 			bpl->addrLow =
2632 				le32_to_cpu(putPaddrLow(dmabuf->phys +
2633 					sizeof(*bpl)));
2634 			bpl->tus.f.bdeFlags = 0;
2635 			bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
2636 			bpl->tus.w = le32_to_cpu(bpl->tus.w);
2637 		}
2638 	}
2639 
2640 	if (cmdiocbq == NULL || rspiocbq == NULL ||
2641 	    dmabuf == NULL || bpl == NULL || ctreq == NULL ||
2642 		dmabuf->virt == NULL) {
2643 		ret_val = -ENOMEM;
2644 		goto err_get_xri_exit;
2645 	}
2646 
2647 	memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
2648 
2649 	ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
2650 	ctreq->RevisionId.bits.InId = 0;
2651 	ctreq->FsType = SLI_CT_ELX_LOOPBACK;
2652 	ctreq->FsSubType = 0;
2653 	ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
2654 	ctreq->CommandResponse.bits.Size = 0;
2655 
2656 	cmdiocbq->bpl_dmabuf = dmabuf;
2657 	cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
2658 	cmdiocbq->vport = phba->pport;
2659 	cmdiocbq->cmd_cmpl = NULL;
2660 
2661 	lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, dmabuf, rpi, 0, 1,
2662 				 FC_RCTL_DD_SOL_CTL, 0, CMD_XMIT_SEQUENCE64_CR);
2663 
2664 	iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
2665 					     rspiocbq, (phba->fc_ratov * 2)
2666 					     + LPFC_DRVR_TIMEOUT);
2667 
2668 	status = get_job_ulpstatus(phba, rspiocbq);
2669 	if (iocb_stat != IOCB_SUCCESS || status != IOCB_SUCCESS) {
2670 		ret_val = -EIO;
2671 		goto err_get_xri_exit;
2672 	}
2673 	*txxri = get_job_ulpcontext(phba, rspiocbq);
2674 
2675 	evt->waiting = 1;
2676 	evt->wait_time_stamp = jiffies;
2677 	time_left = wait_event_interruptible_timeout(
2678 		evt->wq, !list_empty(&evt->events_to_see),
2679 		msecs_to_jiffies(1000 *
2680 			((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
2681 	if (list_empty(&evt->events_to_see))
2682 		ret_val = (time_left) ? -EINTR : -ETIMEDOUT;
2683 	else {
2684 		spin_lock_irqsave(&phba->ct_ev_lock, flags);
2685 		list_move(evt->events_to_see.prev, &evt->events_to_get);
2686 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2687 		*rxxri = (list_entry(evt->events_to_get.prev,
2688 				     typeof(struct event_data),
2689 				     node))->immed_dat;
2690 	}
2691 	evt->waiting = 0;
2692 
2693 err_get_xri_exit:
2694 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
2695 	lpfc_bsg_event_unref(evt); /* release ref */
2696 	lpfc_bsg_event_unref(evt); /* delete */
2697 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2698 
2699 	if (dmabuf) {
2700 		if (dmabuf->virt)
2701 			lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
2702 		kfree(dmabuf);
2703 	}
2704 
2705 	if (cmdiocbq && (iocb_stat != IOCB_TIMEDOUT))
2706 		lpfc_sli_release_iocbq(phba, cmdiocbq);
2707 	if (rspiocbq)
2708 		lpfc_sli_release_iocbq(phba, rspiocbq);
2709 	return ret_val;
2710 }
2711 
2712 /**
2713  * lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers
2714  * @phba: Pointer to HBA context object
2715  *
2716  * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and
2717  * returns the pointer to the buffer.
2718  **/
2719 static struct lpfc_dmabuf *
2720 lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba)
2721 {
2722 	struct lpfc_dmabuf *dmabuf;
2723 	struct pci_dev *pcidev = phba->pcidev;
2724 
2725 	/* allocate dma buffer struct */
2726 	dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2727 	if (!dmabuf)
2728 		return NULL;
2729 
2730 	INIT_LIST_HEAD(&dmabuf->list);
2731 
2732 	/* now, allocate dma buffer */
2733 	dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2734 					  &(dmabuf->phys), GFP_KERNEL);
2735 
2736 	if (!dmabuf->virt) {
2737 		kfree(dmabuf);
2738 		return NULL;
2739 	}
2740 
2741 	return dmabuf;
2742 }
2743 
2744 /**
2745  * lpfc_bsg_dma_page_free - free a bsg mbox page sized dma buffer
2746  * @phba: Pointer to HBA context object.
2747  * @dmabuf: Pointer to the bsg mbox page sized dma buffer descriptor.
2748  *
2749  * This routine just simply frees a dma buffer and its associated buffer
2750  * descriptor referred by @dmabuf.
2751  **/
2752 static void
2753 lpfc_bsg_dma_page_free(struct lpfc_hba *phba, struct lpfc_dmabuf *dmabuf)
2754 {
2755 	struct pci_dev *pcidev = phba->pcidev;
2756 
2757 	if (!dmabuf)
2758 		return;
2759 
2760 	if (dmabuf->virt)
2761 		dma_free_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2762 				  dmabuf->virt, dmabuf->phys);
2763 	kfree(dmabuf);
2764 	return;
2765 }
2766 
2767 /**
2768  * lpfc_bsg_dma_page_list_free - free a list of bsg mbox page sized dma buffers
2769  * @phba: Pointer to HBA context object.
2770  * @dmabuf_list: Pointer to a list of bsg mbox page sized dma buffer descs.
2771  *
2772  * This routine just simply frees all dma buffers and their associated buffer
2773  * descriptors referred by @dmabuf_list.
2774  **/
2775 static void
2776 lpfc_bsg_dma_page_list_free(struct lpfc_hba *phba,
2777 			    struct list_head *dmabuf_list)
2778 {
2779 	struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2780 
2781 	if (list_empty(dmabuf_list))
2782 		return;
2783 
2784 	list_for_each_entry_safe(dmabuf, next_dmabuf, dmabuf_list, list) {
2785 		list_del_init(&dmabuf->list);
2786 		lpfc_bsg_dma_page_free(phba, dmabuf);
2787 	}
2788 	return;
2789 }
2790 
2791 /**
2792  * diag_cmd_data_alloc - fills in a bde struct with dma buffers
2793  * @phba: Pointer to HBA context object
2794  * @bpl: Pointer to 64 bit bde structure
2795  * @size: Number of bytes to process
2796  * @nocopydata: Flag to copy user data into the allocated buffer
2797  *
2798  * This function allocates page size buffers and populates an lpfc_dmabufext.
2799  * If allowed the user data pointed to with indataptr is copied into the kernel
2800  * memory. The chained list of page size buffers is returned.
2801  **/
2802 static struct lpfc_dmabufext *
2803 diag_cmd_data_alloc(struct lpfc_hba *phba,
2804 		   struct ulp_bde64 *bpl, uint32_t size,
2805 		   int nocopydata)
2806 {
2807 	struct lpfc_dmabufext *mlist = NULL;
2808 	struct lpfc_dmabufext *dmp;
2809 	int cnt, offset = 0, i = 0;
2810 	struct pci_dev *pcidev;
2811 
2812 	pcidev = phba->pcidev;
2813 
2814 	while (size) {
2815 		/* We get chunks of 4K */
2816 		if (size > BUF_SZ_4K)
2817 			cnt = BUF_SZ_4K;
2818 		else
2819 			cnt = size;
2820 
2821 		/* allocate struct lpfc_dmabufext buffer header */
2822 		dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL);
2823 		if (!dmp)
2824 			goto out;
2825 
2826 		INIT_LIST_HEAD(&dmp->dma.list);
2827 
2828 		/* Queue it to a linked list */
2829 		if (mlist)
2830 			list_add_tail(&dmp->dma.list, &mlist->dma.list);
2831 		else
2832 			mlist = dmp;
2833 
2834 		/* allocate buffer */
2835 		dmp->dma.virt = dma_alloc_coherent(&pcidev->dev,
2836 						   cnt,
2837 						   &(dmp->dma.phys),
2838 						   GFP_KERNEL);
2839 
2840 		if (!dmp->dma.virt)
2841 			goto out;
2842 
2843 		dmp->size = cnt;
2844 
2845 		if (nocopydata) {
2846 			bpl->tus.f.bdeFlags = 0;
2847 		} else {
2848 			memset((uint8_t *)dmp->dma.virt, 0, cnt);
2849 			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2850 		}
2851 
2852 		/* build buffer ptr list for IOCB */
2853 		bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys));
2854 		bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys));
2855 		bpl->tus.f.bdeSize = (ushort) cnt;
2856 		bpl->tus.w = le32_to_cpu(bpl->tus.w);
2857 		bpl++;
2858 
2859 		i++;
2860 		offset += cnt;
2861 		size -= cnt;
2862 	}
2863 
2864 	if (mlist) {
2865 		mlist->flag = i;
2866 		return mlist;
2867 	}
2868 out:
2869 	diag_cmd_data_free(phba, mlist);
2870 	return NULL;
2871 }
2872 
2873 /**
2874  * lpfcdiag_sli3_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
2875  * @phba: Pointer to HBA context object
2876  * @rxxri: Receive exchange id
2877  * @len: Number of data bytes
2878  *
2879  * This function allocates and posts a data buffer of sufficient size to receive
2880  * an unsolicted CT command.
2881  **/
2882 static int lpfcdiag_sli3_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
2883 					  size_t len)
2884 {
2885 	struct lpfc_sli_ring *pring;
2886 	struct lpfc_iocbq *cmdiocbq;
2887 	IOCB_t *cmd = NULL;
2888 	struct list_head head, *curr, *next;
2889 	struct lpfc_dmabuf *rxbmp;
2890 	struct lpfc_dmabuf *dmp;
2891 	struct lpfc_dmabuf *mp[2] = {NULL, NULL};
2892 	struct ulp_bde64 *rxbpl = NULL;
2893 	uint32_t num_bde;
2894 	struct lpfc_dmabufext *rxbuffer = NULL;
2895 	int ret_val = 0;
2896 	int iocb_stat;
2897 	int i = 0;
2898 
2899 	pring = lpfc_phba_elsring(phba);
2900 
2901 	cmdiocbq = lpfc_sli_get_iocbq(phba);
2902 	rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2903 	if (rxbmp != NULL) {
2904 		rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2905 		if (rxbmp->virt) {
2906 			INIT_LIST_HEAD(&rxbmp->list);
2907 			rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2908 			rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0);
2909 		}
2910 	}
2911 
2912 	if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer || !pring) {
2913 		ret_val = -ENOMEM;
2914 		goto err_post_rxbufs_exit;
2915 	}
2916 
2917 	/* Queue buffers for the receive exchange */
2918 	num_bde = (uint32_t)rxbuffer->flag;
2919 	dmp = &rxbuffer->dma;
2920 	cmd = &cmdiocbq->iocb;
2921 	i = 0;
2922 
2923 	INIT_LIST_HEAD(&head);
2924 	list_add_tail(&head, &dmp->list);
2925 	list_for_each_safe(curr, next, &head) {
2926 		mp[i] = list_entry(curr, struct lpfc_dmabuf, list);
2927 		list_del(curr);
2928 
2929 		if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2930 			mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba);
2931 			cmd->un.quexri64cx.buff.bde.addrHigh =
2932 				putPaddrHigh(mp[i]->phys);
2933 			cmd->un.quexri64cx.buff.bde.addrLow =
2934 				putPaddrLow(mp[i]->phys);
2935 			cmd->un.quexri64cx.buff.bde.tus.f.bdeSize =
2936 				((struct lpfc_dmabufext *)mp[i])->size;
2937 			cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag;
2938 			cmd->ulpCommand = CMD_QUE_XRI64_CX;
2939 			cmd->ulpPU = 0;
2940 			cmd->ulpLe = 1;
2941 			cmd->ulpBdeCount = 1;
2942 			cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0;
2943 
2944 		} else {
2945 			cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys);
2946 			cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys);
2947 			cmd->un.cont64[i].tus.f.bdeSize =
2948 				((struct lpfc_dmabufext *)mp[i])->size;
2949 			cmd->ulpBdeCount = ++i;
2950 
2951 			if ((--num_bde > 0) && (i < 2))
2952 				continue;
2953 
2954 			cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX;
2955 			cmd->ulpLe = 1;
2956 		}
2957 
2958 		cmd->ulpClass = CLASS3;
2959 		cmd->ulpContext = rxxri;
2960 
2961 		iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
2962 						0);
2963 		if (iocb_stat == IOCB_ERROR) {
2964 			diag_cmd_data_free(phba,
2965 				(struct lpfc_dmabufext *)mp[0]);
2966 			if (mp[1])
2967 				diag_cmd_data_free(phba,
2968 					  (struct lpfc_dmabufext *)mp[1]);
2969 			dmp = list_entry(next, struct lpfc_dmabuf, list);
2970 			ret_val = -EIO;
2971 			goto err_post_rxbufs_exit;
2972 		}
2973 
2974 		lpfc_sli_ringpostbuf_put(phba, pring, mp[0]);
2975 		if (mp[1]) {
2976 			lpfc_sli_ringpostbuf_put(phba, pring, mp[1]);
2977 			mp[1] = NULL;
2978 		}
2979 
2980 		/* The iocb was freed by lpfc_sli_issue_iocb */
2981 		cmdiocbq = lpfc_sli_get_iocbq(phba);
2982 		if (!cmdiocbq) {
2983 			dmp = list_entry(next, struct lpfc_dmabuf, list);
2984 			ret_val = -EIO;
2985 			goto err_post_rxbufs_exit;
2986 		}
2987 		cmd = &cmdiocbq->iocb;
2988 		i = 0;
2989 	}
2990 	list_del(&head);
2991 
2992 err_post_rxbufs_exit:
2993 
2994 	if (rxbmp) {
2995 		if (rxbmp->virt)
2996 			lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
2997 		kfree(rxbmp);
2998 	}
2999 
3000 	if (cmdiocbq)
3001 		lpfc_sli_release_iocbq(phba, cmdiocbq);
3002 	return ret_val;
3003 }
3004 
3005 /**
3006  * lpfc_bsg_diag_loopback_run - run loopback on a port by issue ct cmd to itself
3007  * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
3008  *
3009  * This function receives a user data buffer to be transmitted and received on
3010  * the same port, the link must be up and in loopback mode prior
3011  * to being called.
3012  * 1. A kernel buffer is allocated to copy the user data into.
3013  * 2. The port registers with "itself".
3014  * 3. The transmit and receive exchange ids are obtained.
3015  * 4. The receive exchange id is posted.
3016  * 5. A new els loopback event is created.
3017  * 6. The command and response iocbs are allocated.
3018  * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback.
3019  *
3020  * This function is meant to be called n times while the port is in loopback
3021  * so it is the apps responsibility to issue a reset to take the port out
3022  * of loopback mode.
3023  **/
3024 static int
3025 lpfc_bsg_diag_loopback_run(struct bsg_job *job)
3026 {
3027 	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
3028 	struct fc_bsg_reply *bsg_reply = job->reply;
3029 	struct lpfc_hba *phba = vport->phba;
3030 	struct lpfc_bsg_event *evt;
3031 	struct event_data *evdat;
3032 	struct lpfc_sli *psli = &phba->sli;
3033 	uint32_t size;
3034 	uint32_t full_size;
3035 	size_t segment_len = 0, segment_offset = 0, current_offset = 0;
3036 	uint16_t rpi = 0;
3037 	struct lpfc_iocbq *cmdiocbq, *rspiocbq = NULL;
3038 	union lpfc_wqe128 *cmdwqe, *rspwqe;
3039 	struct lpfc_sli_ct_request *ctreq;
3040 	struct lpfc_dmabuf *txbmp;
3041 	struct ulp_bde64 *txbpl = NULL;
3042 	struct lpfc_dmabufext *txbuffer = NULL;
3043 	struct list_head head;
3044 	struct lpfc_dmabuf  *curr;
3045 	uint16_t txxri = 0, rxxri;
3046 	uint32_t num_bde;
3047 	uint8_t *ptr = NULL, *rx_databuf = NULL;
3048 	int rc = 0;
3049 	int time_left;
3050 	int iocb_stat = IOCB_SUCCESS;
3051 	unsigned long flags;
3052 	void *dataout = NULL;
3053 	uint32_t total_mem;
3054 
3055 	/* in case no data is returned return just the return code */
3056 	bsg_reply->reply_payload_rcv_len = 0;
3057 
3058 	if (job->request_len <
3059 	    sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) {
3060 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3061 				"2739 Received DIAG TEST request below minimum "
3062 				"size\n");
3063 		rc = -EINVAL;
3064 		goto loopback_test_exit;
3065 	}
3066 
3067 	if (job->request_payload.payload_len !=
3068 		job->reply_payload.payload_len) {
3069 		rc = -EINVAL;
3070 		goto loopback_test_exit;
3071 	}
3072 
3073 	if ((phba->link_state == LPFC_HBA_ERROR) ||
3074 	    (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
3075 	    (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
3076 		rc = -EACCES;
3077 		goto loopback_test_exit;
3078 	}
3079 
3080 	if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) {
3081 		rc = -EACCES;
3082 		goto loopback_test_exit;
3083 	}
3084 
3085 	size = job->request_payload.payload_len;
3086 	full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */
3087 
3088 	if ((size == 0) || (size > 80 * BUF_SZ_4K)) {
3089 		rc = -ERANGE;
3090 		goto loopback_test_exit;
3091 	}
3092 
3093 	if (full_size >= BUF_SZ_4K) {
3094 		/*
3095 		 * Allocate memory for ioctl data. If buffer is bigger than 64k,
3096 		 * then we allocate 64k and re-use that buffer over and over to
3097 		 * xfer the whole block. This is because Linux kernel has a
3098 		 * problem allocating more than 120k of kernel space memory. Saw
3099 		 * problem with GET_FCPTARGETMAPPING...
3100 		 */
3101 		if (size <= (64 * 1024))
3102 			total_mem = full_size;
3103 		else
3104 			total_mem = 64 * 1024;
3105 	} else
3106 		/* Allocate memory for ioctl data */
3107 		total_mem = BUF_SZ_4K;
3108 
3109 	dataout = kmalloc(total_mem, GFP_KERNEL);
3110 	if (dataout == NULL) {
3111 		rc = -ENOMEM;
3112 		goto loopback_test_exit;
3113 	}
3114 
3115 	ptr = dataout;
3116 	ptr += ELX_LOOPBACK_HEADER_SZ;
3117 	sg_copy_to_buffer(job->request_payload.sg_list,
3118 				job->request_payload.sg_cnt,
3119 				ptr, size);
3120 	rc = lpfcdiag_loop_self_reg(phba, &rpi);
3121 	if (rc)
3122 		goto loopback_test_exit;
3123 
3124 	if (phba->sli_rev < LPFC_SLI_REV4) {
3125 		rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
3126 		if (rc) {
3127 			lpfcdiag_loop_self_unreg(phba, rpi);
3128 			goto loopback_test_exit;
3129 		}
3130 
3131 		rc = lpfcdiag_sli3_loop_post_rxbufs(phba, rxxri, full_size);
3132 		if (rc) {
3133 			lpfcdiag_loop_self_unreg(phba, rpi);
3134 			goto loopback_test_exit;
3135 		}
3136 	}
3137 	evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
3138 				SLI_CT_ELX_LOOPBACK);
3139 	if (!evt) {
3140 		lpfcdiag_loop_self_unreg(phba, rpi);
3141 		rc = -ENOMEM;
3142 		goto loopback_test_exit;
3143 	}
3144 
3145 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
3146 	list_add(&evt->node, &phba->ct_ev_waiters);
3147 	lpfc_bsg_event_ref(evt);
3148 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3149 
3150 	cmdiocbq = lpfc_sli_get_iocbq(phba);
3151 	if (phba->sli_rev < LPFC_SLI_REV4)
3152 		rspiocbq = lpfc_sli_get_iocbq(phba);
3153 	txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3154 
3155 	if (txbmp) {
3156 		txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys);
3157 		if (txbmp->virt) {
3158 			INIT_LIST_HEAD(&txbmp->list);
3159 			txbpl = (struct ulp_bde64 *) txbmp->virt;
3160 			txbuffer = diag_cmd_data_alloc(phba,
3161 							txbpl, full_size, 0);
3162 		}
3163 	}
3164 
3165 	if (!cmdiocbq || !txbmp || !txbpl || !txbuffer || !txbmp->virt) {
3166 		rc = -ENOMEM;
3167 		goto err_loopback_test_exit;
3168 	}
3169 	if ((phba->sli_rev < LPFC_SLI_REV4) && !rspiocbq) {
3170 		rc = -ENOMEM;
3171 		goto err_loopback_test_exit;
3172 	}
3173 
3174 	cmdwqe = &cmdiocbq->wqe;
3175 	memset(cmdwqe, 0, sizeof(union lpfc_wqe));
3176 	if (phba->sli_rev < LPFC_SLI_REV4) {
3177 		rspwqe = &rspiocbq->wqe;
3178 		memset(rspwqe, 0, sizeof(union lpfc_wqe));
3179 	}
3180 
3181 	INIT_LIST_HEAD(&head);
3182 	list_add_tail(&head, &txbuffer->dma.list);
3183 	list_for_each_entry(curr, &head, list) {
3184 		segment_len = ((struct lpfc_dmabufext *)curr)->size;
3185 		if (current_offset == 0) {
3186 			ctreq = curr->virt;
3187 			memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
3188 			ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
3189 			ctreq->RevisionId.bits.InId = 0;
3190 			ctreq->FsType = SLI_CT_ELX_LOOPBACK;
3191 			ctreq->FsSubType = 0;
3192 			ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA;
3193 			ctreq->CommandResponse.bits.Size   = size;
3194 			segment_offset = ELX_LOOPBACK_HEADER_SZ;
3195 		} else
3196 			segment_offset = 0;
3197 
3198 		BUG_ON(segment_offset >= segment_len);
3199 		memcpy(curr->virt + segment_offset,
3200 			ptr + current_offset,
3201 			segment_len - segment_offset);
3202 
3203 		current_offset += segment_len - segment_offset;
3204 		BUG_ON(current_offset > size);
3205 	}
3206 	list_del(&head);
3207 
3208 	/* Build the XMIT_SEQUENCE iocb */
3209 	num_bde = (uint32_t)txbuffer->flag;
3210 
3211 	cmdiocbq->num_bdes = num_bde;
3212 	cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
3213 	cmdiocbq->cmd_flag |= LPFC_IO_LOOPBACK;
3214 	cmdiocbq->vport = phba->pport;
3215 	cmdiocbq->cmd_cmpl = NULL;
3216 	cmdiocbq->bpl_dmabuf = txbmp;
3217 
3218 	if (phba->sli_rev < LPFC_SLI_REV4) {
3219 		lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, txbmp, 0, txxri,
3220 					 num_bde, FC_RCTL_DD_UNSOL_CTL, 1,
3221 					 CMD_XMIT_SEQUENCE64_CX);
3222 
3223 	} else {
3224 		lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, txbmp,
3225 					 phba->sli4_hba.rpi_ids[rpi], 0xffff,
3226 					 full_size, FC_RCTL_DD_UNSOL_CTL, 1,
3227 					 CMD_XMIT_SEQUENCE64_WQE);
3228 		cmdiocbq->sli4_xritag = NO_XRI;
3229 	}
3230 
3231 	iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
3232 					     rspiocbq, (phba->fc_ratov * 2) +
3233 					     LPFC_DRVR_TIMEOUT);
3234 	if (iocb_stat != IOCB_SUCCESS ||
3235 	    (phba->sli_rev < LPFC_SLI_REV4 &&
3236 	     (get_job_ulpstatus(phba, rspiocbq) != IOSTAT_SUCCESS))) {
3237 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3238 				"3126 Failed loopback test issue iocb: "
3239 				"iocb_stat:x%x\n", iocb_stat);
3240 		rc = -EIO;
3241 		goto err_loopback_test_exit;
3242 	}
3243 
3244 	evt->waiting = 1;
3245 	time_left = wait_event_interruptible_timeout(
3246 		evt->wq, !list_empty(&evt->events_to_see),
3247 		msecs_to_jiffies(1000 *
3248 			((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
3249 	evt->waiting = 0;
3250 	if (list_empty(&evt->events_to_see)) {
3251 		rc = (time_left) ? -EINTR : -ETIMEDOUT;
3252 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3253 				"3125 Not receiving unsolicited event, "
3254 				"rc:x%x\n", rc);
3255 	} else {
3256 		spin_lock_irqsave(&phba->ct_ev_lock, flags);
3257 		list_move(evt->events_to_see.prev, &evt->events_to_get);
3258 		evdat = list_entry(evt->events_to_get.prev,
3259 				   typeof(*evdat), node);
3260 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3261 		rx_databuf = evdat->data;
3262 		if (evdat->len != full_size) {
3263 			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3264 				"1603 Loopback test did not receive expected "
3265 				"data length. actual length 0x%x expected "
3266 				"length 0x%x\n",
3267 				evdat->len, full_size);
3268 			rc = -EIO;
3269 		} else if (rx_databuf == NULL)
3270 			rc = -EIO;
3271 		else {
3272 			rc = IOCB_SUCCESS;
3273 			/* skip over elx loopback header */
3274 			rx_databuf += ELX_LOOPBACK_HEADER_SZ;
3275 			bsg_reply->reply_payload_rcv_len =
3276 				sg_copy_from_buffer(job->reply_payload.sg_list,
3277 						    job->reply_payload.sg_cnt,
3278 						    rx_databuf, size);
3279 			bsg_reply->reply_payload_rcv_len = size;
3280 		}
3281 	}
3282 
3283 err_loopback_test_exit:
3284 	lpfcdiag_loop_self_unreg(phba, rpi);
3285 
3286 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
3287 	lpfc_bsg_event_unref(evt); /* release ref */
3288 	lpfc_bsg_event_unref(evt); /* delete */
3289 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3290 
3291 	if ((cmdiocbq != NULL) && (iocb_stat != IOCB_TIMEDOUT))
3292 		lpfc_sli_release_iocbq(phba, cmdiocbq);
3293 
3294 	if (rspiocbq != NULL)
3295 		lpfc_sli_release_iocbq(phba, rspiocbq);
3296 
3297 	if (txbmp != NULL) {
3298 		if (txbpl != NULL) {
3299 			if (txbuffer != NULL)
3300 				diag_cmd_data_free(phba, txbuffer);
3301 			lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys);
3302 		}
3303 		kfree(txbmp);
3304 	}
3305 
3306 loopback_test_exit:
3307 	kfree(dataout);
3308 	/* make error code available to userspace */
3309 	bsg_reply->result = rc;
3310 	job->dd_data = NULL;
3311 	/* complete the job back to userspace if no error */
3312 	if (rc == IOCB_SUCCESS)
3313 		bsg_job_done(job, bsg_reply->result,
3314 			       bsg_reply->reply_payload_rcv_len);
3315 	return rc;
3316 }
3317 
3318 /**
3319  * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command
3320  * @job: GET_DFC_REV fc_bsg_job
3321  **/
3322 static int
3323 lpfc_bsg_get_dfc_rev(struct bsg_job *job)
3324 {
3325 	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
3326 	struct fc_bsg_reply *bsg_reply = job->reply;
3327 	struct lpfc_hba *phba = vport->phba;
3328 	struct get_mgmt_rev_reply *event_reply;
3329 	int rc = 0;
3330 
3331 	if (job->request_len <
3332 	    sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) {
3333 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3334 				"2740 Received GET_DFC_REV request below "
3335 				"minimum size\n");
3336 		rc = -EINVAL;
3337 		goto job_error;
3338 	}
3339 
3340 	event_reply = (struct get_mgmt_rev_reply *)
3341 		bsg_reply->reply_data.vendor_reply.vendor_rsp;
3342 
3343 	if (job->reply_len < sizeof(*bsg_reply) + sizeof(*event_reply)) {
3344 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3345 				"2741 Received GET_DFC_REV reply below "
3346 				"minimum size\n");
3347 		rc = -EINVAL;
3348 		goto job_error;
3349 	}
3350 
3351 	event_reply->info.a_Major = MANAGEMENT_MAJOR_REV;
3352 	event_reply->info.a_Minor = MANAGEMENT_MINOR_REV;
3353 job_error:
3354 	bsg_reply->result = rc;
3355 	if (rc == 0)
3356 		bsg_job_done(job, bsg_reply->result,
3357 			       bsg_reply->reply_payload_rcv_len);
3358 	return rc;
3359 }
3360 
3361 /**
3362  * lpfc_bsg_issue_mbox_cmpl - lpfc_bsg_issue_mbox mbox completion handler
3363  * @phba: Pointer to HBA context object.
3364  * @pmboxq: Pointer to mailbox command.
3365  *
3366  * This is completion handler function for mailbox commands issued from
3367  * lpfc_bsg_issue_mbox function. This function is called by the
3368  * mailbox event handler function with no lock held. This function
3369  * will wake up thread waiting on the wait queue pointed by dd_data
3370  * of the mailbox.
3371  **/
3372 static void
3373 lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3374 {
3375 	struct bsg_job_data *dd_data;
3376 	struct fc_bsg_reply *bsg_reply;
3377 	struct bsg_job *job;
3378 	uint32_t size;
3379 	unsigned long flags;
3380 	uint8_t *pmb, *pmb_buf;
3381 
3382 	dd_data = pmboxq->ctx_ndlp;
3383 
3384 	/*
3385 	 * The outgoing buffer is readily referred from the dma buffer,
3386 	 * just need to get header part from mailboxq structure.
3387 	 */
3388 	pmb = (uint8_t *)&pmboxq->u.mb;
3389 	pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
3390 	memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
3391 
3392 	/* Determine if job has been aborted */
3393 
3394 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
3395 	job = dd_data->set_job;
3396 	if (job) {
3397 		/* Prevent timeout handling from trying to abort job  */
3398 		job->dd_data = NULL;
3399 	}
3400 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3401 
3402 	/* Copy the mailbox data to the job if it is still active */
3403 
3404 	if (job) {
3405 		bsg_reply = job->reply;
3406 		size = job->reply_payload.payload_len;
3407 		bsg_reply->reply_payload_rcv_len =
3408 			sg_copy_from_buffer(job->reply_payload.sg_list,
3409 					    job->reply_payload.sg_cnt,
3410 					    pmb_buf, size);
3411 	}
3412 
3413 	dd_data->set_job = NULL;
3414 	mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
3415 	lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers);
3416 	kfree(dd_data);
3417 
3418 	/* Complete the job if the job is still active */
3419 
3420 	if (job) {
3421 		bsg_reply->result = 0;
3422 		bsg_job_done(job, bsg_reply->result,
3423 			       bsg_reply->reply_payload_rcv_len);
3424 	}
3425 	return;
3426 }
3427 
3428 /**
3429  * lpfc_bsg_check_cmd_access - test for a supported mailbox command
3430  * @phba: Pointer to HBA context object.
3431  * @mb: Pointer to a mailbox object.
3432  * @vport: Pointer to a vport object.
3433  *
3434  * Some commands require the port to be offline, some may not be called from
3435  * the application.
3436  **/
3437 static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
3438 	MAILBOX_t *mb, struct lpfc_vport *vport)
3439 {
3440 	/* return negative error values for bsg job */
3441 	switch (mb->mbxCommand) {
3442 	/* Offline only */
3443 	case MBX_INIT_LINK:
3444 	case MBX_DOWN_LINK:
3445 	case MBX_CONFIG_LINK:
3446 	case MBX_CONFIG_RING:
3447 	case MBX_RESET_RING:
3448 	case MBX_UNREG_LOGIN:
3449 	case MBX_CLEAR_LA:
3450 	case MBX_DUMP_CONTEXT:
3451 	case MBX_RUN_DIAGS:
3452 	case MBX_RESTART:
3453 	case MBX_SET_MASK:
3454 		if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
3455 			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3456 				"2743 Command 0x%x is illegal in on-line "
3457 				"state\n",
3458 				mb->mbxCommand);
3459 			return -EPERM;
3460 		}
3461 		break;
3462 	case MBX_WRITE_NV:
3463 	case MBX_WRITE_VPARMS:
3464 	case MBX_LOAD_SM:
3465 	case MBX_READ_NV:
3466 	case MBX_READ_CONFIG:
3467 	case MBX_READ_RCONFIG:
3468 	case MBX_READ_STATUS:
3469 	case MBX_READ_XRI:
3470 	case MBX_READ_REV:
3471 	case MBX_READ_LNK_STAT:
3472 	case MBX_DUMP_MEMORY:
3473 	case MBX_DOWN_LOAD:
3474 	case MBX_UPDATE_CFG:
3475 	case MBX_KILL_BOARD:
3476 	case MBX_READ_TOPOLOGY:
3477 	case MBX_LOAD_AREA:
3478 	case MBX_LOAD_EXP_ROM:
3479 	case MBX_BEACON:
3480 	case MBX_DEL_LD_ENTRY:
3481 	case MBX_SET_DEBUG:
3482 	case MBX_WRITE_WWN:
3483 	case MBX_SLI4_CONFIG:
3484 	case MBX_READ_EVENT_LOG:
3485 	case MBX_READ_EVENT_LOG_STATUS:
3486 	case MBX_WRITE_EVENT_LOG:
3487 	case MBX_PORT_CAPABILITIES:
3488 	case MBX_PORT_IOV_CONTROL:
3489 	case MBX_RUN_BIU_DIAG64:
3490 		break;
3491 	case MBX_SET_VARIABLE:
3492 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3493 			"1226 mbox: set_variable 0x%x, 0x%x\n",
3494 			mb->un.varWords[0],
3495 			mb->un.varWords[1]);
3496 		break;
3497 	case MBX_READ_SPARM64:
3498 	case MBX_REG_LOGIN:
3499 	case MBX_REG_LOGIN64:
3500 	case MBX_CONFIG_PORT:
3501 	case MBX_RUN_BIU_DIAG:
3502 	default:
3503 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3504 			"2742 Unknown Command 0x%x\n",
3505 			mb->mbxCommand);
3506 		return -EPERM;
3507 	}
3508 
3509 	return 0; /* ok */
3510 }
3511 
3512 /**
3513  * lpfc_bsg_mbox_ext_session_reset - clean up context of multi-buffer mbox session
3514  * @phba: Pointer to HBA context object.
3515  *
3516  * This is routine clean up and reset BSG handling of multi-buffer mbox
3517  * command session.
3518  **/
3519 static void
3520 lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba)
3521 {
3522 	if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE)
3523 		return;
3524 
3525 	/* free all memory, including dma buffers */
3526 	lpfc_bsg_dma_page_list_free(phba,
3527 				    &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3528 	lpfc_bsg_dma_page_free(phba, phba->mbox_ext_buf_ctx.mbx_dmabuf);
3529 	/* multi-buffer write mailbox command pass-through complete */
3530 	memset((char *)&phba->mbox_ext_buf_ctx, 0,
3531 	       sizeof(struct lpfc_mbox_ext_buf_ctx));
3532 	INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3533 
3534 	return;
3535 }
3536 
3537 /**
3538  * lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl
3539  * @phba: Pointer to HBA context object.
3540  * @pmboxq: Pointer to mailbox command.
3541  *
3542  * This is routine handles BSG job for mailbox commands completions with
3543  * multiple external buffers.
3544  **/
3545 static struct bsg_job *
3546 lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3547 {
3548 	struct bsg_job_data *dd_data;
3549 	struct bsg_job *job;
3550 	struct fc_bsg_reply *bsg_reply;
3551 	uint8_t *pmb, *pmb_buf;
3552 	unsigned long flags;
3553 	uint32_t size;
3554 	int rc = 0;
3555 	struct lpfc_dmabuf *dmabuf;
3556 	struct lpfc_sli_config_mbox *sli_cfg_mbx;
3557 	uint8_t *pmbx;
3558 
3559 	dd_data = pmboxq->ctx_buf;
3560 
3561 	/* Determine if job has been aborted */
3562 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
3563 	job = dd_data->set_job;
3564 	if (job) {
3565 		bsg_reply = job->reply;
3566 		/* Prevent timeout handling from trying to abort job  */
3567 		job->dd_data = NULL;
3568 	}
3569 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3570 
3571 	/*
3572 	 * The outgoing buffer is readily referred from the dma buffer,
3573 	 * just need to get header part from mailboxq structure.
3574 	 */
3575 
3576 	pmb = (uint8_t *)&pmboxq->u.mb;
3577 	pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
3578 	/* Copy the byte swapped response mailbox back to the user */
3579 	memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
3580 	/* if there is any non-embedded extended data copy that too */
3581 	dmabuf = phba->mbox_ext_buf_ctx.mbx_dmabuf;
3582 	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3583 	if (!bsg_bf_get(lpfc_mbox_hdr_emb,
3584 	    &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
3585 		pmbx = (uint8_t *)dmabuf->virt;
3586 		/* byte swap the extended data following the mailbox command */
3587 		lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
3588 			&pmbx[sizeof(MAILBOX_t)],
3589 			sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len);
3590 	}
3591 
3592 	/* Complete the job if the job is still active */
3593 
3594 	if (job) {
3595 		size = job->reply_payload.payload_len;
3596 		bsg_reply->reply_payload_rcv_len =
3597 			sg_copy_from_buffer(job->reply_payload.sg_list,
3598 					    job->reply_payload.sg_cnt,
3599 					    pmb_buf, size);
3600 
3601 		/* result for successful */
3602 		bsg_reply->result = 0;
3603 
3604 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3605 				"2937 SLI_CONFIG ext-buffer mailbox command "
3606 				"(x%x/x%x) complete bsg job done, bsize:%d\n",
3607 				phba->mbox_ext_buf_ctx.nembType,
3608 				phba->mbox_ext_buf_ctx.mboxType, size);
3609 		lpfc_idiag_mbxacc_dump_bsg_mbox(phba,
3610 					phba->mbox_ext_buf_ctx.nembType,
3611 					phba->mbox_ext_buf_ctx.mboxType,
3612 					dma_ebuf, sta_pos_addr,
3613 					phba->mbox_ext_buf_ctx.mbx_dmabuf, 0);
3614 	} else {
3615 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3616 				"2938 SLI_CONFIG ext-buffer mailbox "
3617 				"command (x%x/x%x) failure, rc:x%x\n",
3618 				phba->mbox_ext_buf_ctx.nembType,
3619 				phba->mbox_ext_buf_ctx.mboxType, rc);
3620 	}
3621 
3622 
3623 	/* state change */
3624 	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE;
3625 	kfree(dd_data);
3626 	return job;
3627 }
3628 
3629 /**
3630  * lpfc_bsg_issue_read_mbox_ext_cmpl - compl handler for multi-buffer read mbox
3631  * @phba: Pointer to HBA context object.
3632  * @pmboxq: Pointer to mailbox command.
3633  *
3634  * This is completion handler function for mailbox read commands with multiple
3635  * external buffers.
3636  **/
3637 static void
3638 lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3639 {
3640 	struct bsg_job *job;
3641 	struct fc_bsg_reply *bsg_reply;
3642 
3643 	job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3644 
3645 	/* handle the BSG job with mailbox command */
3646 	if (!job)
3647 		pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3648 
3649 	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3650 			"2939 SLI_CONFIG ext-buffer rd mailbox command "
3651 			"complete, ctxState:x%x, mbxStatus:x%x\n",
3652 			phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3653 
3654 	if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1)
3655 		lpfc_bsg_mbox_ext_session_reset(phba);
3656 
3657 	/* free base driver mailbox structure memory */
3658 	mempool_free(pmboxq, phba->mbox_mem_pool);
3659 
3660 	/* if the job is still active, call job done */
3661 	if (job) {
3662 		bsg_reply = job->reply;
3663 		bsg_job_done(job, bsg_reply->result,
3664 			       bsg_reply->reply_payload_rcv_len);
3665 	}
3666 	return;
3667 }
3668 
3669 /**
3670  * lpfc_bsg_issue_write_mbox_ext_cmpl - cmpl handler for multi-buffer write mbox
3671  * @phba: Pointer to HBA context object.
3672  * @pmboxq: Pointer to mailbox command.
3673  *
3674  * This is completion handler function for mailbox write commands with multiple
3675  * external buffers.
3676  **/
3677 static void
3678 lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3679 {
3680 	struct bsg_job *job;
3681 	struct fc_bsg_reply *bsg_reply;
3682 
3683 	job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3684 
3685 	/* handle the BSG job with the mailbox command */
3686 	if (!job)
3687 		pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3688 
3689 	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3690 			"2940 SLI_CONFIG ext-buffer wr mailbox command "
3691 			"complete, ctxState:x%x, mbxStatus:x%x\n",
3692 			phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3693 
3694 	/* free all memory, including dma buffers */
3695 	mempool_free(pmboxq, phba->mbox_mem_pool);
3696 	lpfc_bsg_mbox_ext_session_reset(phba);
3697 
3698 	/* if the job is still active, call job done */
3699 	if (job) {
3700 		bsg_reply = job->reply;
3701 		bsg_job_done(job, bsg_reply->result,
3702 			       bsg_reply->reply_payload_rcv_len);
3703 	}
3704 
3705 	return;
3706 }
3707 
3708 static void
3709 lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
3710 				uint32_t index, struct lpfc_dmabuf *mbx_dmabuf,
3711 				struct lpfc_dmabuf *ext_dmabuf)
3712 {
3713 	struct lpfc_sli_config_mbox *sli_cfg_mbx;
3714 
3715 	/* pointer to the start of mailbox command */
3716 	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)mbx_dmabuf->virt;
3717 
3718 	if (nemb_tp == nemb_mse) {
3719 		if (index == 0) {
3720 			sli_cfg_mbx->un.sli_config_emb0_subsys.
3721 				mse[index].pa_hi =
3722 				putPaddrHigh(mbx_dmabuf->phys +
3723 					     sizeof(MAILBOX_t));
3724 			sli_cfg_mbx->un.sli_config_emb0_subsys.
3725 				mse[index].pa_lo =
3726 				putPaddrLow(mbx_dmabuf->phys +
3727 					    sizeof(MAILBOX_t));
3728 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3729 					"2943 SLI_CONFIG(mse)[%d], "
3730 					"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3731 					index,
3732 					sli_cfg_mbx->un.sli_config_emb0_subsys.
3733 					mse[index].buf_len,
3734 					sli_cfg_mbx->un.sli_config_emb0_subsys.
3735 					mse[index].pa_hi,
3736 					sli_cfg_mbx->un.sli_config_emb0_subsys.
3737 					mse[index].pa_lo);
3738 		} else {
3739 			sli_cfg_mbx->un.sli_config_emb0_subsys.
3740 				mse[index].pa_hi =
3741 				putPaddrHigh(ext_dmabuf->phys);
3742 			sli_cfg_mbx->un.sli_config_emb0_subsys.
3743 				mse[index].pa_lo =
3744 				putPaddrLow(ext_dmabuf->phys);
3745 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3746 					"2944 SLI_CONFIG(mse)[%d], "
3747 					"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3748 					index,
3749 					sli_cfg_mbx->un.sli_config_emb0_subsys.
3750 					mse[index].buf_len,
3751 					sli_cfg_mbx->un.sli_config_emb0_subsys.
3752 					mse[index].pa_hi,
3753 					sli_cfg_mbx->un.sli_config_emb0_subsys.
3754 					mse[index].pa_lo);
3755 		}
3756 	} else {
3757 		if (index == 0) {
3758 			sli_cfg_mbx->un.sli_config_emb1_subsys.
3759 				hbd[index].pa_hi =
3760 				putPaddrHigh(mbx_dmabuf->phys +
3761 					     sizeof(MAILBOX_t));
3762 			sli_cfg_mbx->un.sli_config_emb1_subsys.
3763 				hbd[index].pa_lo =
3764 				putPaddrLow(mbx_dmabuf->phys +
3765 					    sizeof(MAILBOX_t));
3766 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3767 					"3007 SLI_CONFIG(hbd)[%d], "
3768 					"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3769 				index,
3770 				bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3771 				&sli_cfg_mbx->un.
3772 				sli_config_emb1_subsys.hbd[index]),
3773 				sli_cfg_mbx->un.sli_config_emb1_subsys.
3774 				hbd[index].pa_hi,
3775 				sli_cfg_mbx->un.sli_config_emb1_subsys.
3776 				hbd[index].pa_lo);
3777 
3778 		} else {
3779 			sli_cfg_mbx->un.sli_config_emb1_subsys.
3780 				hbd[index].pa_hi =
3781 				putPaddrHigh(ext_dmabuf->phys);
3782 			sli_cfg_mbx->un.sli_config_emb1_subsys.
3783 				hbd[index].pa_lo =
3784 				putPaddrLow(ext_dmabuf->phys);
3785 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3786 					"3008 SLI_CONFIG(hbd)[%d], "
3787 					"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3788 				index,
3789 				bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3790 				&sli_cfg_mbx->un.
3791 				sli_config_emb1_subsys.hbd[index]),
3792 				sli_cfg_mbx->un.sli_config_emb1_subsys.
3793 				hbd[index].pa_hi,
3794 				sli_cfg_mbx->un.sli_config_emb1_subsys.
3795 				hbd[index].pa_lo);
3796 		}
3797 	}
3798 	return;
3799 }
3800 
3801 /**
3802  * lpfc_bsg_sli_cfg_read_cmd_ext - sli_config non-embedded mailbox cmd read
3803  * @phba: Pointer to HBA context object.
3804  * @job: Pointer to the job object.
3805  * @nemb_tp: Enumerate of non-embedded mailbox command type.
3806  * @dmabuf: Pointer to a DMA buffer descriptor.
3807  *
3808  * This routine performs SLI_CONFIG (0x9B) read mailbox command operation with
3809  * non-embedded external buffers.
3810  **/
3811 static int
3812 lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
3813 			      enum nemb_type nemb_tp,
3814 			      struct lpfc_dmabuf *dmabuf)
3815 {
3816 	struct fc_bsg_request *bsg_request = job->request;
3817 	struct lpfc_sli_config_mbox *sli_cfg_mbx;
3818 	struct dfc_mbox_req *mbox_req;
3819 	struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
3820 	uint32_t ext_buf_cnt, ext_buf_index;
3821 	struct lpfc_dmabuf *ext_dmabuf = NULL;
3822 	struct bsg_job_data *dd_data = NULL;
3823 	LPFC_MBOXQ_t *pmboxq = NULL;
3824 	MAILBOX_t *pmb;
3825 	uint8_t *pmbx;
3826 	int rc, i;
3827 
3828 	mbox_req =
3829 	   (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
3830 
3831 	/* pointer to the start of mailbox command */
3832 	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3833 
3834 	if (nemb_tp == nemb_mse) {
3835 		ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
3836 			&sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
3837 		if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
3838 			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3839 					"2945 Handled SLI_CONFIG(mse) rd, "
3840 					"ext_buf_cnt(%d) out of range(%d)\n",
3841 					ext_buf_cnt,
3842 					LPFC_MBX_SLI_CONFIG_MAX_MSE);
3843 			rc = -ERANGE;
3844 			goto job_error;
3845 		}
3846 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3847 				"2941 Handled SLI_CONFIG(mse) rd, "
3848 				"ext_buf_cnt:%d\n", ext_buf_cnt);
3849 	} else {
3850 		/* sanity check on interface type for support */
3851 		if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
3852 		    LPFC_SLI_INTF_IF_TYPE_2) {
3853 			rc = -ENODEV;
3854 			goto job_error;
3855 		}
3856 		/* nemb_tp == nemb_hbd */
3857 		ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
3858 		if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
3859 			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3860 					"2946 Handled SLI_CONFIG(hbd) rd, "
3861 					"ext_buf_cnt(%d) out of range(%d)\n",
3862 					ext_buf_cnt,
3863 					LPFC_MBX_SLI_CONFIG_MAX_HBD);
3864 			rc = -ERANGE;
3865 			goto job_error;
3866 		}
3867 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3868 				"2942 Handled SLI_CONFIG(hbd) rd, "
3869 				"ext_buf_cnt:%d\n", ext_buf_cnt);
3870 	}
3871 
3872 	/* before dma descriptor setup */
3873 	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
3874 					sta_pre_addr, dmabuf, ext_buf_cnt);
3875 
3876 	/* reject non-embedded mailbox command with none external buffer */
3877 	if (ext_buf_cnt == 0) {
3878 		rc = -EPERM;
3879 		goto job_error;
3880 	} else if (ext_buf_cnt > 1) {
3881 		/* additional external read buffers */
3882 		for (i = 1; i < ext_buf_cnt; i++) {
3883 			ext_dmabuf = lpfc_bsg_dma_page_alloc(phba);
3884 			if (!ext_dmabuf) {
3885 				rc = -ENOMEM;
3886 				goto job_error;
3887 			}
3888 			list_add_tail(&ext_dmabuf->list,
3889 				      &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3890 		}
3891 	}
3892 
3893 	/* bsg tracking structure */
3894 	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
3895 	if (!dd_data) {
3896 		rc = -ENOMEM;
3897 		goto job_error;
3898 	}
3899 
3900 	/* mailbox command structure for base driver */
3901 	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3902 	if (!pmboxq) {
3903 		rc = -ENOMEM;
3904 		goto job_error;
3905 	}
3906 	memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3907 
3908 	/* for the first external buffer */
3909 	lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
3910 
3911 	/* for the rest of external buffer descriptors if any */
3912 	if (ext_buf_cnt > 1) {
3913 		ext_buf_index = 1;
3914 		list_for_each_entry_safe(curr_dmabuf, next_dmabuf,
3915 				&phba->mbox_ext_buf_ctx.ext_dmabuf_list, list) {
3916 			lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp,
3917 						ext_buf_index, dmabuf,
3918 						curr_dmabuf);
3919 			ext_buf_index++;
3920 		}
3921 	}
3922 
3923 	/* after dma descriptor setup */
3924 	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
3925 					sta_pos_addr, dmabuf, ext_buf_cnt);
3926 
3927 	/* construct base driver mbox command */
3928 	pmb = &pmboxq->u.mb;
3929 	pmbx = (uint8_t *)dmabuf->virt;
3930 	memcpy(pmb, pmbx, sizeof(*pmb));
3931 	pmb->mbxOwner = OWN_HOST;
3932 	pmboxq->vport = phba->pport;
3933 
3934 	/* multi-buffer handling context */
3935 	phba->mbox_ext_buf_ctx.nembType = nemb_tp;
3936 	phba->mbox_ext_buf_ctx.mboxType = mbox_rd;
3937 	phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
3938 	phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
3939 	phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
3940 	phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
3941 
3942 	/* callback for multi-buffer read mailbox command */
3943 	pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl;
3944 
3945 	/* context fields to callback function */
3946 	pmboxq->ctx_buf = dd_data;
3947 	dd_data->type = TYPE_MBOX;
3948 	dd_data->set_job = job;
3949 	dd_data->context_un.mbox.pmboxq = pmboxq;
3950 	dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
3951 	job->dd_data = dd_data;
3952 
3953 	/* state change */
3954 	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
3955 
3956 	/*
3957 	 * Non-embedded mailbox subcommand data gets byte swapped here because
3958 	 * the lower level driver code only does the first 64 mailbox words.
3959 	 */
3960 	if ((!bsg_bf_get(lpfc_mbox_hdr_emb,
3961 	    &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) &&
3962 		(nemb_tp == nemb_mse))
3963 		lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
3964 			&pmbx[sizeof(MAILBOX_t)],
3965 				sli_cfg_mbx->un.sli_config_emb0_subsys.
3966 					mse[0].buf_len);
3967 
3968 	rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3969 	if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
3970 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3971 				"2947 Issued SLI_CONFIG ext-buffer "
3972 				"mailbox command, rc:x%x\n", rc);
3973 		return SLI_CONFIG_HANDLED;
3974 	}
3975 	lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3976 			"2948 Failed to issue SLI_CONFIG ext-buffer "
3977 			"mailbox command, rc:x%x\n", rc);
3978 	rc = -EPIPE;
3979 
3980 job_error:
3981 	if (pmboxq)
3982 		mempool_free(pmboxq, phba->mbox_mem_pool);
3983 	lpfc_bsg_dma_page_list_free(phba,
3984 				    &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3985 	kfree(dd_data);
3986 	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
3987 	return rc;
3988 }
3989 
3990 /**
3991  * lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write
3992  * @phba: Pointer to HBA context object.
3993  * @job: Pointer to the job object.
3994  * @nemb_tp: Enumerate of non-embedded mailbox command type.
3995  * @dmabuf: Pointer to a DMA buffer descriptor.
3996  *
3997  * This routine performs SLI_CONFIG (0x9B) write mailbox command operation with
3998  * non-embedded external buffers.
3999  **/
4000 static int
4001 lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
4002 			       enum nemb_type nemb_tp,
4003 			       struct lpfc_dmabuf *dmabuf)
4004 {
4005 	struct fc_bsg_request *bsg_request = job->request;
4006 	struct fc_bsg_reply *bsg_reply = job->reply;
4007 	struct dfc_mbox_req *mbox_req;
4008 	struct lpfc_sli_config_mbox *sli_cfg_mbx;
4009 	uint32_t ext_buf_cnt;
4010 	struct bsg_job_data *dd_data = NULL;
4011 	LPFC_MBOXQ_t *pmboxq = NULL;
4012 	MAILBOX_t *pmb;
4013 	uint8_t *mbx;
4014 	int rc = SLI_CONFIG_NOT_HANDLED, i;
4015 
4016 	mbox_req =
4017 	   (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
4018 
4019 	/* pointer to the start of mailbox command */
4020 	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
4021 
4022 	if (nemb_tp == nemb_mse) {
4023 		ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
4024 			&sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
4025 		if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
4026 			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4027 					"2953 Failed SLI_CONFIG(mse) wr, "
4028 					"ext_buf_cnt(%d) out of range(%d)\n",
4029 					ext_buf_cnt,
4030 					LPFC_MBX_SLI_CONFIG_MAX_MSE);
4031 			return -ERANGE;
4032 		}
4033 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4034 				"2949 Handled SLI_CONFIG(mse) wr, "
4035 				"ext_buf_cnt:%d\n", ext_buf_cnt);
4036 	} else {
4037 		/* sanity check on interface type for support */
4038 		if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
4039 		    LPFC_SLI_INTF_IF_TYPE_2)
4040 			return -ENODEV;
4041 		/* nemb_tp == nemb_hbd */
4042 		ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
4043 		if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
4044 			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4045 					"2954 Failed SLI_CONFIG(hbd) wr, "
4046 					"ext_buf_cnt(%d) out of range(%d)\n",
4047 					ext_buf_cnt,
4048 					LPFC_MBX_SLI_CONFIG_MAX_HBD);
4049 			return -ERANGE;
4050 		}
4051 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4052 				"2950 Handled SLI_CONFIG(hbd) wr, "
4053 				"ext_buf_cnt:%d\n", ext_buf_cnt);
4054 	}
4055 
4056 	/* before dma buffer descriptor setup */
4057 	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
4058 					sta_pre_addr, dmabuf, ext_buf_cnt);
4059 
4060 	if (ext_buf_cnt == 0)
4061 		return -EPERM;
4062 
4063 	/* for the first external buffer */
4064 	lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
4065 
4066 	/* after dma descriptor setup */
4067 	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
4068 					sta_pos_addr, dmabuf, ext_buf_cnt);
4069 
4070 	/* log for looking forward */
4071 	for (i = 1; i < ext_buf_cnt; i++) {
4072 		if (nemb_tp == nemb_mse)
4073 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4074 				"2951 SLI_CONFIG(mse), buf[%d]-length:%d\n",
4075 				i, sli_cfg_mbx->un.sli_config_emb0_subsys.
4076 				mse[i].buf_len);
4077 		else
4078 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4079 				"2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n",
4080 				i, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
4081 				&sli_cfg_mbx->un.sli_config_emb1_subsys.
4082 				hbd[i]));
4083 	}
4084 
4085 	/* multi-buffer handling context */
4086 	phba->mbox_ext_buf_ctx.nembType = nemb_tp;
4087 	phba->mbox_ext_buf_ctx.mboxType = mbox_wr;
4088 	phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
4089 	phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
4090 	phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
4091 	phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
4092 
4093 	if (ext_buf_cnt == 1) {
4094 		/* bsg tracking structure */
4095 		dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4096 		if (!dd_data) {
4097 			rc = -ENOMEM;
4098 			goto job_error;
4099 		}
4100 
4101 		/* mailbox command structure for base driver */
4102 		pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4103 		if (!pmboxq) {
4104 			rc = -ENOMEM;
4105 			goto job_error;
4106 		}
4107 		memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
4108 		pmb = &pmboxq->u.mb;
4109 		mbx = (uint8_t *)dmabuf->virt;
4110 		memcpy(pmb, mbx, sizeof(*pmb));
4111 		pmb->mbxOwner = OWN_HOST;
4112 		pmboxq->vport = phba->pport;
4113 
4114 		/* callback for multi-buffer read mailbox command */
4115 		pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
4116 
4117 		/* context fields to callback function */
4118 		pmboxq->ctx_buf = dd_data;
4119 		dd_data->type = TYPE_MBOX;
4120 		dd_data->set_job = job;
4121 		dd_data->context_un.mbox.pmboxq = pmboxq;
4122 		dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx;
4123 		job->dd_data = dd_data;
4124 
4125 		/* state change */
4126 
4127 		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
4128 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4129 		if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
4130 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4131 					"2955 Issued SLI_CONFIG ext-buffer "
4132 					"mailbox command, rc:x%x\n", rc);
4133 			return SLI_CONFIG_HANDLED;
4134 		}
4135 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4136 				"2956 Failed to issue SLI_CONFIG ext-buffer "
4137 				"mailbox command, rc:x%x\n", rc);
4138 		rc = -EPIPE;
4139 		goto job_error;
4140 	}
4141 
4142 	/* wait for additional external buffers */
4143 
4144 	bsg_reply->result = 0;
4145 	bsg_job_done(job, bsg_reply->result,
4146 		       bsg_reply->reply_payload_rcv_len);
4147 	return SLI_CONFIG_HANDLED;
4148 
4149 job_error:
4150 	if (pmboxq)
4151 		mempool_free(pmboxq, phba->mbox_mem_pool);
4152 	kfree(dd_data);
4153 
4154 	return rc;
4155 }
4156 
4157 /**
4158  * lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer
4159  * @phba: Pointer to HBA context object.
4160  * @job: Pointer to the job object.
4161  * @dmabuf: Pointer to a DMA buffer descriptor.
4162  *
4163  * This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded
4164  * external buffers, including both 0x9B with non-embedded MSEs and 0x9B
4165  * with embedded subsystem 0x1 and opcodes with external HBDs.
4166  **/
4167 static int
4168 lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct bsg_job *job,
4169 			     struct lpfc_dmabuf *dmabuf)
4170 {
4171 	struct lpfc_sli_config_mbox *sli_cfg_mbx;
4172 	uint32_t subsys;
4173 	uint32_t opcode;
4174 	int rc = SLI_CONFIG_NOT_HANDLED;
4175 
4176 	/* state change on new multi-buffer pass-through mailbox command */
4177 	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST;
4178 
4179 	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
4180 
4181 	if (!bsg_bf_get(lpfc_mbox_hdr_emb,
4182 	    &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
4183 		subsys = bsg_bf_get(lpfc_emb0_subcmnd_subsys,
4184 				    &sli_cfg_mbx->un.sli_config_emb0_subsys);
4185 		opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode,
4186 				    &sli_cfg_mbx->un.sli_config_emb0_subsys);
4187 		if (subsys == SLI_CONFIG_SUBSYS_FCOE) {
4188 			switch (opcode) {
4189 			case FCOE_OPCODE_READ_FCF:
4190 			case FCOE_OPCODE_GET_DPORT_RESULTS:
4191 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4192 						"2957 Handled SLI_CONFIG "
4193 						"subsys_fcoe, opcode:x%x\n",
4194 						opcode);
4195 				rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
4196 							nemb_mse, dmabuf);
4197 				break;
4198 			case FCOE_OPCODE_ADD_FCF:
4199 			case FCOE_OPCODE_SET_DPORT_MODE:
4200 			case LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE:
4201 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4202 						"2958 Handled SLI_CONFIG "
4203 						"subsys_fcoe, opcode:x%x\n",
4204 						opcode);
4205 				rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
4206 							nemb_mse, dmabuf);
4207 				break;
4208 			default:
4209 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4210 						"2959 Reject SLI_CONFIG "
4211 						"subsys_fcoe, opcode:x%x\n",
4212 						opcode);
4213 				rc = -EPERM;
4214 				break;
4215 			}
4216 		} else if (subsys == SLI_CONFIG_SUBSYS_COMN) {
4217 			switch (opcode) {
4218 			case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES:
4219 			case COMN_OPCODE_GET_CNTL_ATTRIBUTES:
4220 			case COMN_OPCODE_GET_PROFILE_CONFIG:
4221 			case COMN_OPCODE_SET_FEATURES:
4222 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4223 						"3106 Handled SLI_CONFIG "
4224 						"subsys_comn, opcode:x%x\n",
4225 						opcode);
4226 				rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
4227 							nemb_mse, dmabuf);
4228 				break;
4229 			default:
4230 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4231 						"3107 Reject SLI_CONFIG "
4232 						"subsys_comn, opcode:x%x\n",
4233 						opcode);
4234 				rc = -EPERM;
4235 				break;
4236 			}
4237 		} else {
4238 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4239 					"2977 Reject SLI_CONFIG "
4240 					"subsys:x%d, opcode:x%x\n",
4241 					subsys, opcode);
4242 			rc = -EPERM;
4243 		}
4244 	} else {
4245 		subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys,
4246 				    &sli_cfg_mbx->un.sli_config_emb1_subsys);
4247 		opcode = bsg_bf_get(lpfc_emb1_subcmnd_opcode,
4248 				    &sli_cfg_mbx->un.sli_config_emb1_subsys);
4249 		if (subsys == SLI_CONFIG_SUBSYS_COMN) {
4250 			switch (opcode) {
4251 			case COMN_OPCODE_READ_OBJECT:
4252 			case COMN_OPCODE_READ_OBJECT_LIST:
4253 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4254 						"2960 Handled SLI_CONFIG "
4255 						"subsys_comn, opcode:x%x\n",
4256 						opcode);
4257 				rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
4258 							nemb_hbd, dmabuf);
4259 				break;
4260 			case COMN_OPCODE_WRITE_OBJECT:
4261 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4262 						"2961 Handled SLI_CONFIG "
4263 						"subsys_comn, opcode:x%x\n",
4264 						opcode);
4265 				rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
4266 							nemb_hbd, dmabuf);
4267 				break;
4268 			default:
4269 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4270 						"2962 Not handled SLI_CONFIG "
4271 						"subsys_comn, opcode:x%x\n",
4272 						opcode);
4273 				rc = SLI_CONFIG_NOT_HANDLED;
4274 				break;
4275 			}
4276 		} else {
4277 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4278 					"2978 Not handled SLI_CONFIG "
4279 					"subsys:x%d, opcode:x%x\n",
4280 					subsys, opcode);
4281 			rc = SLI_CONFIG_NOT_HANDLED;
4282 		}
4283 	}
4284 
4285 	/* state reset on not handled new multi-buffer mailbox command */
4286 	if (rc != SLI_CONFIG_HANDLED)
4287 		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
4288 
4289 	return rc;
4290 }
4291 
4292 /**
4293  * lpfc_bsg_mbox_ext_abort - request to abort mbox command with ext buffers
4294  * @phba: Pointer to HBA context object.
4295  *
4296  * This routine is for requesting to abort a pass-through mailbox command with
4297  * multiple external buffers due to error condition.
4298  **/
4299 static void
4300 lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba)
4301 {
4302 	if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
4303 		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
4304 	else
4305 		lpfc_bsg_mbox_ext_session_reset(phba);
4306 	return;
4307 }
4308 
4309 /**
4310  * lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer
4311  * @phba: Pointer to HBA context object.
4312  * @job: Pointer to the job object.
4313  *
4314  * This routine extracts the next mailbox read external buffer back to
4315  * user space through BSG.
4316  **/
4317 static int
4318 lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct bsg_job *job)
4319 {
4320 	struct fc_bsg_reply *bsg_reply = job->reply;
4321 	struct lpfc_sli_config_mbox *sli_cfg_mbx;
4322 	struct lpfc_dmabuf *dmabuf;
4323 	uint8_t *pbuf;
4324 	uint32_t size;
4325 	uint32_t index;
4326 
4327 	index = phba->mbox_ext_buf_ctx.seqNum;
4328 	phba->mbox_ext_buf_ctx.seqNum++;
4329 
4330 	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
4331 			phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
4332 
4333 	if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
4334 		size = bsg_bf_get(lpfc_mbox_sli_config_mse_len,
4335 			&sli_cfg_mbx->un.sli_config_emb0_subsys.mse[index]);
4336 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4337 				"2963 SLI_CONFIG (mse) ext-buffer rd get "
4338 				"buffer[%d], size:%d\n", index, size);
4339 	} else {
4340 		size = bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
4341 			&sli_cfg_mbx->un.sli_config_emb1_subsys.hbd[index]);
4342 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4343 				"2964 SLI_CONFIG (hbd) ext-buffer rd get "
4344 				"buffer[%d], size:%d\n", index, size);
4345 	}
4346 	if (list_empty(&phba->mbox_ext_buf_ctx.ext_dmabuf_list))
4347 		return -EPIPE;
4348 	dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list,
4349 				  struct lpfc_dmabuf, list);
4350 	list_del_init(&dmabuf->list);
4351 
4352 	/* after dma buffer descriptor setup */
4353 	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
4354 					mbox_rd, dma_ebuf, sta_pos_addr,
4355 					dmabuf, index);
4356 
4357 	pbuf = (uint8_t *)dmabuf->virt;
4358 	bsg_reply->reply_payload_rcv_len =
4359 		sg_copy_from_buffer(job->reply_payload.sg_list,
4360 				    job->reply_payload.sg_cnt,
4361 				    pbuf, size);
4362 
4363 	lpfc_bsg_dma_page_free(phba, dmabuf);
4364 
4365 	if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
4366 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4367 				"2965 SLI_CONFIG (hbd) ext-buffer rd mbox "
4368 				"command session done\n");
4369 		lpfc_bsg_mbox_ext_session_reset(phba);
4370 	}
4371 
4372 	bsg_reply->result = 0;
4373 	bsg_job_done(job, bsg_reply->result,
4374 		       bsg_reply->reply_payload_rcv_len);
4375 
4376 	return SLI_CONFIG_HANDLED;
4377 }
4378 
4379 /**
4380  * lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer
4381  * @phba: Pointer to HBA context object.
4382  * @job: Pointer to the job object.
4383  * @dmabuf: Pointer to a DMA buffer descriptor.
4384  *
4385  * This routine sets up the next mailbox read external buffer obtained
4386  * from user space through BSG.
4387  **/
4388 static int
4389 lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
4390 			struct lpfc_dmabuf *dmabuf)
4391 {
4392 	struct fc_bsg_reply *bsg_reply = job->reply;
4393 	struct bsg_job_data *dd_data = NULL;
4394 	LPFC_MBOXQ_t *pmboxq = NULL;
4395 	MAILBOX_t *pmb;
4396 	enum nemb_type nemb_tp;
4397 	uint8_t *pbuf;
4398 	uint32_t size;
4399 	uint32_t index;
4400 	int rc;
4401 
4402 	index = phba->mbox_ext_buf_ctx.seqNum;
4403 	phba->mbox_ext_buf_ctx.seqNum++;
4404 	nemb_tp = phba->mbox_ext_buf_ctx.nembType;
4405 
4406 	pbuf = (uint8_t *)dmabuf->virt;
4407 	size = job->request_payload.payload_len;
4408 	sg_copy_to_buffer(job->request_payload.sg_list,
4409 			  job->request_payload.sg_cnt,
4410 			  pbuf, size);
4411 
4412 	if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
4413 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4414 				"2966 SLI_CONFIG (mse) ext-buffer wr set "
4415 				"buffer[%d], size:%d\n",
4416 				phba->mbox_ext_buf_ctx.seqNum, size);
4417 
4418 	} else {
4419 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4420 				"2967 SLI_CONFIG (hbd) ext-buffer wr set "
4421 				"buffer[%d], size:%d\n",
4422 				phba->mbox_ext_buf_ctx.seqNum, size);
4423 
4424 	}
4425 
4426 	/* set up external buffer descriptor and add to external buffer list */
4427 	lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, index,
4428 					phba->mbox_ext_buf_ctx.mbx_dmabuf,
4429 					dmabuf);
4430 	list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
4431 
4432 	/* after write dma buffer */
4433 	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
4434 					mbox_wr, dma_ebuf, sta_pos_addr,
4435 					dmabuf, index);
4436 
4437 	if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
4438 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4439 				"2968 SLI_CONFIG ext-buffer wr all %d "
4440 				"ebuffers received\n",
4441 				phba->mbox_ext_buf_ctx.numBuf);
4442 
4443 		dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4444 		if (!dd_data) {
4445 			rc = -ENOMEM;
4446 			goto job_error;
4447 		}
4448 
4449 		/* mailbox command structure for base driver */
4450 		pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4451 		if (!pmboxq) {
4452 			rc = -ENOMEM;
4453 			goto job_error;
4454 		}
4455 		memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
4456 		pbuf = (uint8_t *)phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
4457 		pmb = &pmboxq->u.mb;
4458 		memcpy(pmb, pbuf, sizeof(*pmb));
4459 		pmb->mbxOwner = OWN_HOST;
4460 		pmboxq->vport = phba->pport;
4461 
4462 		/* callback for multi-buffer write mailbox command */
4463 		pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
4464 
4465 		/* context fields to callback function */
4466 		pmboxq->ctx_buf = dd_data;
4467 		dd_data->type = TYPE_MBOX;
4468 		dd_data->set_job = job;
4469 		dd_data->context_un.mbox.pmboxq = pmboxq;
4470 		dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf;
4471 		job->dd_data = dd_data;
4472 
4473 		/* state change */
4474 		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
4475 
4476 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4477 		if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
4478 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4479 					"2969 Issued SLI_CONFIG ext-buffer "
4480 					"mailbox command, rc:x%x\n", rc);
4481 			return SLI_CONFIG_HANDLED;
4482 		}
4483 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4484 				"2970 Failed to issue SLI_CONFIG ext-buffer "
4485 				"mailbox command, rc:x%x\n", rc);
4486 		rc = -EPIPE;
4487 		goto job_error;
4488 	}
4489 
4490 	/* wait for additional external buffers */
4491 	bsg_reply->result = 0;
4492 	bsg_job_done(job, bsg_reply->result,
4493 		       bsg_reply->reply_payload_rcv_len);
4494 	return SLI_CONFIG_HANDLED;
4495 
4496 job_error:
4497 	if (pmboxq)
4498 		mempool_free(pmboxq, phba->mbox_mem_pool);
4499 	lpfc_bsg_dma_page_free(phba, dmabuf);
4500 	kfree(dd_data);
4501 
4502 	return rc;
4503 }
4504 
4505 /**
4506  * lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd
4507  * @phba: Pointer to HBA context object.
4508  * @job: Pointer to the job object.
4509  * @dmabuf: Pointer to a DMA buffer descriptor.
4510  *
4511  * This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox
4512  * command with multiple non-embedded external buffers.
4513  **/
4514 static int
4515 lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct bsg_job *job,
4516 			     struct lpfc_dmabuf *dmabuf)
4517 {
4518 	int rc;
4519 
4520 	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4521 			"2971 SLI_CONFIG buffer (type:x%x)\n",
4522 			phba->mbox_ext_buf_ctx.mboxType);
4523 
4524 	if (phba->mbox_ext_buf_ctx.mboxType == mbox_rd) {
4525 		if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_DONE) {
4526 			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4527 					"2972 SLI_CONFIG rd buffer state "
4528 					"mismatch:x%x\n",
4529 					phba->mbox_ext_buf_ctx.state);
4530 			lpfc_bsg_mbox_ext_abort(phba);
4531 			return -EPIPE;
4532 		}
4533 		rc = lpfc_bsg_read_ebuf_get(phba, job);
4534 		if (rc == SLI_CONFIG_HANDLED)
4535 			lpfc_bsg_dma_page_free(phba, dmabuf);
4536 	} else { /* phba->mbox_ext_buf_ctx.mboxType == mbox_wr */
4537 		if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_HOST) {
4538 			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4539 					"2973 SLI_CONFIG wr buffer state "
4540 					"mismatch:x%x\n",
4541 					phba->mbox_ext_buf_ctx.state);
4542 			lpfc_bsg_mbox_ext_abort(phba);
4543 			return -EPIPE;
4544 		}
4545 		rc = lpfc_bsg_write_ebuf_set(phba, job, dmabuf);
4546 	}
4547 	return rc;
4548 }
4549 
4550 /**
4551  * lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer
4552  * @phba: Pointer to HBA context object.
4553  * @job: Pointer to the job object.
4554  * @dmabuf: Pointer to a DMA buffer descriptor.
4555  *
4556  * This routine checks and handles non-embedded multi-buffer SLI_CONFIG
4557  * (0x9B) mailbox commands and external buffers.
4558  **/
4559 static int
4560 lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct bsg_job *job,
4561 			    struct lpfc_dmabuf *dmabuf)
4562 {
4563 	struct fc_bsg_request *bsg_request = job->request;
4564 	struct dfc_mbox_req *mbox_req;
4565 	int rc = SLI_CONFIG_NOT_HANDLED;
4566 
4567 	mbox_req =
4568 	   (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
4569 
4570 	/* mbox command with/without single external buffer */
4571 	if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
4572 		return rc;
4573 
4574 	/* mbox command and first external buffer */
4575 	if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) {
4576 		if (mbox_req->extSeqNum == 1) {
4577 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4578 					"2974 SLI_CONFIG mailbox: tag:%d, "
4579 					"seq:%d\n", mbox_req->extMboxTag,
4580 					mbox_req->extSeqNum);
4581 			rc = lpfc_bsg_handle_sli_cfg_mbox(phba, job, dmabuf);
4582 			return rc;
4583 		} else
4584 			goto sli_cfg_ext_error;
4585 	}
4586 
4587 	/*
4588 	 * handle additional external buffers
4589 	 */
4590 
4591 	/* check broken pipe conditions */
4592 	if (mbox_req->extMboxTag != phba->mbox_ext_buf_ctx.mbxTag)
4593 		goto sli_cfg_ext_error;
4594 	if (mbox_req->extSeqNum > phba->mbox_ext_buf_ctx.numBuf)
4595 		goto sli_cfg_ext_error;
4596 	if (mbox_req->extSeqNum != phba->mbox_ext_buf_ctx.seqNum + 1)
4597 		goto sli_cfg_ext_error;
4598 
4599 	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4600 			"2975 SLI_CONFIG mailbox external buffer: "
4601 			"extSta:x%x, tag:%d, seq:%d\n",
4602 			phba->mbox_ext_buf_ctx.state, mbox_req->extMboxTag,
4603 			mbox_req->extSeqNum);
4604 	rc = lpfc_bsg_handle_sli_cfg_ebuf(phba, job, dmabuf);
4605 	return rc;
4606 
4607 sli_cfg_ext_error:
4608 	/* all other cases, broken pipe */
4609 	lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4610 			"2976 SLI_CONFIG mailbox broken pipe: "
4611 			"ctxSta:x%x, ctxNumBuf:%d "
4612 			"ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n",
4613 			phba->mbox_ext_buf_ctx.state,
4614 			phba->mbox_ext_buf_ctx.numBuf,
4615 			phba->mbox_ext_buf_ctx.mbxTag,
4616 			phba->mbox_ext_buf_ctx.seqNum,
4617 			mbox_req->extMboxTag, mbox_req->extSeqNum);
4618 
4619 	lpfc_bsg_mbox_ext_session_reset(phba);
4620 
4621 	return -EPIPE;
4622 }
4623 
4624 /**
4625  * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
4626  * @phba: Pointer to HBA context object.
4627  * @job: Pointer to the job object.
4628  * @vport: Pointer to a vport object.
4629  *
4630  * Allocate a tracking object, mailbox command memory, get a mailbox
4631  * from the mailbox pool, copy the caller mailbox command.
4632  *
4633  * If offline and the sli is active we need to poll for the command (port is
4634  * being reset) and complete the job, otherwise issue the mailbox command and
4635  * let our completion handler finish the command.
4636  **/
4637 static int
4638 lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job,
4639 	struct lpfc_vport *vport)
4640 {
4641 	struct fc_bsg_request *bsg_request = job->request;
4642 	struct fc_bsg_reply *bsg_reply = job->reply;
4643 	LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
4644 	MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
4645 	/* a 4k buffer to hold the mb and extended data from/to the bsg */
4646 	uint8_t *pmbx = NULL;
4647 	struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */
4648 	struct lpfc_dmabuf *dmabuf = NULL;
4649 	struct dfc_mbox_req *mbox_req;
4650 	struct READ_EVENT_LOG_VAR *rdEventLog;
4651 	uint32_t transmit_length, receive_length, mode;
4652 	struct lpfc_mbx_sli4_config *sli4_config;
4653 	struct lpfc_mbx_nembed_cmd *nembed_sge;
4654 	struct ulp_bde64 *bde;
4655 	uint8_t *ext = NULL;
4656 	int rc = 0;
4657 	uint8_t *from;
4658 	uint32_t size;
4659 
4660 	/* in case no data is transferred */
4661 	bsg_reply->reply_payload_rcv_len = 0;
4662 
4663 	/* sanity check to protect driver */
4664 	if (job->reply_payload.payload_len > BSG_MBOX_SIZE ||
4665 	    job->request_payload.payload_len > BSG_MBOX_SIZE) {
4666 		rc = -ERANGE;
4667 		goto job_done;
4668 	}
4669 
4670 	/*
4671 	 * Don't allow mailbox commands to be sent when blocked or when in
4672 	 * the middle of discovery
4673 	 */
4674 	if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
4675 		rc = -EAGAIN;
4676 		goto job_done;
4677 	}
4678 
4679 	mbox_req =
4680 	    (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
4681 
4682 	/* check if requested extended data lengths are valid */
4683 	if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) ||
4684 	    (mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) {
4685 		rc = -ERANGE;
4686 		goto job_done;
4687 	}
4688 
4689 	dmabuf = lpfc_bsg_dma_page_alloc(phba);
4690 	if (!dmabuf || !dmabuf->virt) {
4691 		rc = -ENOMEM;
4692 		goto job_done;
4693 	}
4694 
4695 	/* Get the mailbox command or external buffer from BSG */
4696 	pmbx = (uint8_t *)dmabuf->virt;
4697 	size = job->request_payload.payload_len;
4698 	sg_copy_to_buffer(job->request_payload.sg_list,
4699 			  job->request_payload.sg_cnt, pmbx, size);
4700 
4701 	/* Handle possible SLI_CONFIG with non-embedded payloads */
4702 	if (phba->sli_rev == LPFC_SLI_REV4) {
4703 		rc = lpfc_bsg_handle_sli_cfg_ext(phba, job, dmabuf);
4704 		if (rc == SLI_CONFIG_HANDLED)
4705 			goto job_cont;
4706 		if (rc)
4707 			goto job_done;
4708 		/* SLI_CONFIG_NOT_HANDLED for other mailbox commands */
4709 	}
4710 
4711 	rc = lpfc_bsg_check_cmd_access(phba, (MAILBOX_t *)pmbx, vport);
4712 	if (rc != 0)
4713 		goto job_done; /* must be negative */
4714 
4715 	/* allocate our bsg tracking structure */
4716 	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4717 	if (!dd_data) {
4718 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
4719 				"2727 Failed allocation of dd_data\n");
4720 		rc = -ENOMEM;
4721 		goto job_done;
4722 	}
4723 
4724 	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4725 	if (!pmboxq) {
4726 		rc = -ENOMEM;
4727 		goto job_done;
4728 	}
4729 	memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
4730 
4731 	pmb = &pmboxq->u.mb;
4732 	memcpy(pmb, pmbx, sizeof(*pmb));
4733 	pmb->mbxOwner = OWN_HOST;
4734 	pmboxq->vport = vport;
4735 
4736 	/* If HBA encountered an error attention, allow only DUMP
4737 	 * or RESTART mailbox commands until the HBA is restarted.
4738 	 */
4739 	if (phba->pport->stopped &&
4740 	    pmb->mbxCommand != MBX_DUMP_MEMORY &&
4741 	    pmb->mbxCommand != MBX_RESTART &&
4742 	    pmb->mbxCommand != MBX_WRITE_VPARMS &&
4743 	    pmb->mbxCommand != MBX_WRITE_WWN)
4744 		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
4745 				"2797 mbox: Issued mailbox cmd "
4746 				"0x%x while in stopped state.\n",
4747 				pmb->mbxCommand);
4748 
4749 	/* extended mailbox commands will need an extended buffer */
4750 	if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
4751 		from = pmbx;
4752 		ext = from + sizeof(MAILBOX_t);
4753 		pmboxq->ctx_buf = ext;
4754 		pmboxq->in_ext_byte_len =
4755 			mbox_req->inExtWLen * sizeof(uint32_t);
4756 		pmboxq->out_ext_byte_len =
4757 			mbox_req->outExtWLen * sizeof(uint32_t);
4758 		pmboxq->mbox_offset_word = mbox_req->mbOffset;
4759 	}
4760 
4761 	/* biu diag will need a kernel buffer to transfer the data
4762 	 * allocate our own buffer and setup the mailbox command to
4763 	 * use ours
4764 	 */
4765 	if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) {
4766 		transmit_length = pmb->un.varWords[1];
4767 		receive_length = pmb->un.varWords[4];
4768 		/* transmit length cannot be greater than receive length or
4769 		 * mailbox extension size
4770 		 */
4771 		if ((transmit_length > receive_length) ||
4772 			(transmit_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
4773 			rc = -ERANGE;
4774 			goto job_done;
4775 		}
4776 		pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh =
4777 			putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t));
4778 		pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow =
4779 			putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t));
4780 
4781 		pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh =
4782 			putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)
4783 			  + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
4784 		pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow =
4785 			putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)
4786 			  + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
4787 	} else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) {
4788 		rdEventLog = &pmb->un.varRdEventLog;
4789 		receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize;
4790 		mode = bf_get(lpfc_event_log, rdEventLog);
4791 
4792 		/* receive length cannot be greater than mailbox
4793 		 * extension size
4794 		 */
4795 		if (receive_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
4796 			rc = -ERANGE;
4797 			goto job_done;
4798 		}
4799 
4800 		/* mode zero uses a bde like biu diags command */
4801 		if (mode == 0) {
4802 			pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
4803 							+ sizeof(MAILBOX_t));
4804 			pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
4805 							+ sizeof(MAILBOX_t));
4806 		}
4807 	} else if (phba->sli_rev == LPFC_SLI_REV4) {
4808 		/* Let type 4 (well known data) through because the data is
4809 		 * returned in varwords[4-8]
4810 		 * otherwise check the recieve length and fetch the buffer addr
4811 		 */
4812 		if ((pmb->mbxCommand == MBX_DUMP_MEMORY) &&
4813 			(pmb->un.varDmp.type != DMP_WELL_KNOWN)) {
4814 			/* rebuild the command for sli4 using our own buffers
4815 			* like we do for biu diags
4816 			*/
4817 			receive_length = pmb->un.varWords[2];
4818 			/* receive length cannot be greater than mailbox
4819 			 * extension size
4820 			 */
4821 			if (receive_length == 0) {
4822 				rc = -ERANGE;
4823 				goto job_done;
4824 			}
4825 			pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
4826 						+ sizeof(MAILBOX_t));
4827 			pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
4828 						+ sizeof(MAILBOX_t));
4829 		} else if ((pmb->mbxCommand == MBX_UPDATE_CFG) &&
4830 			pmb->un.varUpdateCfg.co) {
4831 			bde = (struct ulp_bde64 *)&pmb->un.varWords[4];
4832 
4833 			/* bde size cannot be greater than mailbox ext size */
4834 			if (bde->tus.f.bdeSize >
4835 			    BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
4836 				rc = -ERANGE;
4837 				goto job_done;
4838 			}
4839 			bde->addrHigh = putPaddrHigh(dmabuf->phys
4840 						+ sizeof(MAILBOX_t));
4841 			bde->addrLow = putPaddrLow(dmabuf->phys
4842 						+ sizeof(MAILBOX_t));
4843 		} else if (pmb->mbxCommand == MBX_SLI4_CONFIG) {
4844 			/* Handling non-embedded SLI_CONFIG mailbox command */
4845 			sli4_config = &pmboxq->u.mqe.un.sli4_config;
4846 			if (!bf_get(lpfc_mbox_hdr_emb,
4847 			    &sli4_config->header.cfg_mhdr)) {
4848 				/* rebuild the command for sli4 using our
4849 				 * own buffers like we do for biu diags
4850 				 */
4851 				nembed_sge = (struct lpfc_mbx_nembed_cmd *)
4852 						&pmb->un.varWords[0];
4853 				receive_length = nembed_sge->sge[0].length;
4854 
4855 				/* receive length cannot be greater than
4856 				 * mailbox extension size
4857 				 */
4858 				if ((receive_length == 0) ||
4859 				    (receive_length >
4860 				     BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
4861 					rc = -ERANGE;
4862 					goto job_done;
4863 				}
4864 
4865 				nembed_sge->sge[0].pa_hi =
4866 						putPaddrHigh(dmabuf->phys
4867 						   + sizeof(MAILBOX_t));
4868 				nembed_sge->sge[0].pa_lo =
4869 						putPaddrLow(dmabuf->phys
4870 						   + sizeof(MAILBOX_t));
4871 			}
4872 		}
4873 	}
4874 
4875 	dd_data->context_un.mbox.dmabuffers = dmabuf;
4876 
4877 	/* setup wake call as IOCB callback */
4878 	pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl;
4879 
4880 	/* setup context field to pass wait_queue pointer to wake function */
4881 	pmboxq->ctx_ndlp = dd_data;
4882 	dd_data->type = TYPE_MBOX;
4883 	dd_data->set_job = job;
4884 	dd_data->context_un.mbox.pmboxq = pmboxq;
4885 	dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
4886 	dd_data->context_un.mbox.ext = ext;
4887 	dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
4888 	dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen;
4889 	dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen;
4890 	job->dd_data = dd_data;
4891 
4892 	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
4893 	    (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
4894 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
4895 		if (rc != MBX_SUCCESS) {
4896 			rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
4897 			goto job_done;
4898 		}
4899 
4900 		/* job finished, copy the data */
4901 		memcpy(pmbx, pmb, sizeof(*pmb));
4902 		bsg_reply->reply_payload_rcv_len =
4903 			sg_copy_from_buffer(job->reply_payload.sg_list,
4904 					    job->reply_payload.sg_cnt,
4905 					    pmbx, size);
4906 		/* not waiting mbox already done */
4907 		rc = 0;
4908 		goto job_done;
4909 	}
4910 
4911 	rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4912 	if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY))
4913 		return 1; /* job started */
4914 
4915 job_done:
4916 	/* common exit for error or job completed inline */
4917 	if (pmboxq)
4918 		mempool_free(pmboxq, phba->mbox_mem_pool);
4919 	lpfc_bsg_dma_page_free(phba, dmabuf);
4920 	kfree(dd_data);
4921 
4922 job_cont:
4923 	return rc;
4924 }
4925 
4926 /**
4927  * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command
4928  * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
4929  **/
4930 static int
4931 lpfc_bsg_mbox_cmd(struct bsg_job *job)
4932 {
4933 	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
4934 	struct fc_bsg_request *bsg_request = job->request;
4935 	struct fc_bsg_reply *bsg_reply = job->reply;
4936 	struct lpfc_hba *phba = vport->phba;
4937 	struct dfc_mbox_req *mbox_req;
4938 	int rc = 0;
4939 
4940 	/* mix-and-match backward compatibility */
4941 	bsg_reply->reply_payload_rcv_len = 0;
4942 	if (job->request_len <
4943 	    sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
4944 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4945 				"2737 Mix-and-match backward compatibility "
4946 				"between MBOX_REQ old size:%d and "
4947 				"new request size:%d\n",
4948 				(int)(job->request_len -
4949 				      sizeof(struct fc_bsg_request)),
4950 				(int)sizeof(struct dfc_mbox_req));
4951 		mbox_req = (struct dfc_mbox_req *)
4952 				bsg_request->rqst_data.h_vendor.vendor_cmd;
4953 		mbox_req->extMboxTag = 0;
4954 		mbox_req->extSeqNum = 0;
4955 	}
4956 
4957 	rc = lpfc_bsg_issue_mbox(phba, job, vport);
4958 
4959 	if (rc == 0) {
4960 		/* job done */
4961 		bsg_reply->result = 0;
4962 		job->dd_data = NULL;
4963 		bsg_job_done(job, bsg_reply->result,
4964 			       bsg_reply->reply_payload_rcv_len);
4965 	} else if (rc == 1)
4966 		/* job submitted, will complete later*/
4967 		rc = 0; /* return zero, no error */
4968 	else {
4969 		/* some error occurred */
4970 		bsg_reply->result = rc;
4971 		job->dd_data = NULL;
4972 	}
4973 
4974 	return rc;
4975 }
4976 
4977 static int
4978 lpfc_forced_link_speed(struct bsg_job *job)
4979 {
4980 	struct Scsi_Host *shost = fc_bsg_to_shost(job);
4981 	struct lpfc_vport *vport = shost_priv(shost);
4982 	struct lpfc_hba *phba = vport->phba;
4983 	struct fc_bsg_reply *bsg_reply = job->reply;
4984 	struct forced_link_speed_support_reply *forced_reply;
4985 	int rc = 0;
4986 
4987 	if (job->request_len <
4988 	    sizeof(struct fc_bsg_request) +
4989 	    sizeof(struct get_forced_link_speed_support)) {
4990 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
4991 				"0048 Received FORCED_LINK_SPEED request "
4992 				"below minimum size\n");
4993 		rc = -EINVAL;
4994 		goto job_error;
4995 	}
4996 
4997 	forced_reply = (struct forced_link_speed_support_reply *)
4998 		bsg_reply->reply_data.vendor_reply.vendor_rsp;
4999 
5000 	if (job->reply_len < sizeof(*bsg_reply) + sizeof(*forced_reply)) {
5001 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5002 				"0049 Received FORCED_LINK_SPEED reply below "
5003 				"minimum size\n");
5004 		rc = -EINVAL;
5005 		goto job_error;
5006 	}
5007 
5008 	forced_reply->supported = (phba->hba_flag & HBA_FORCED_LINK_SPEED)
5009 				   ? LPFC_FORCED_LINK_SPEED_SUPPORTED
5010 				   : LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED;
5011 job_error:
5012 	bsg_reply->result = rc;
5013 	if (rc == 0)
5014 		bsg_job_done(job, bsg_reply->result,
5015 			       bsg_reply->reply_payload_rcv_len);
5016 	return rc;
5017 }
5018 
5019 /**
5020  * lpfc_check_fwlog_support: Check FW log support on the adapter
5021  * @phba: Pointer to HBA context object.
5022  *
5023  * Check if FW Logging support by the adapter
5024  **/
5025 int
5026 lpfc_check_fwlog_support(struct lpfc_hba *phba)
5027 {
5028 	struct lpfc_ras_fwlog *ras_fwlog = NULL;
5029 
5030 	ras_fwlog = &phba->ras_fwlog;
5031 
5032 	if (!ras_fwlog->ras_hwsupport)
5033 		return -EACCES;
5034 	else if (!ras_fwlog->ras_enabled)
5035 		return -EPERM;
5036 	else
5037 		return 0;
5038 }
5039 
5040 /**
5041  * lpfc_bsg_get_ras_config: Get RAS configuration settings
5042  * @job: fc_bsg_job to handle
5043  *
5044  * Get RAS configuration values set.
5045  **/
5046 static int
5047 lpfc_bsg_get_ras_config(struct bsg_job *job)
5048 {
5049 	struct Scsi_Host *shost = fc_bsg_to_shost(job);
5050 	struct lpfc_vport *vport = shost_priv(shost);
5051 	struct fc_bsg_reply *bsg_reply = job->reply;
5052 	struct lpfc_hba *phba = vport->phba;
5053 	struct lpfc_bsg_get_ras_config_reply *ras_reply;
5054 	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
5055 	int rc = 0;
5056 
5057 	if (job->request_len <
5058 	    sizeof(struct fc_bsg_request) +
5059 	    sizeof(struct lpfc_bsg_ras_req)) {
5060 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5061 				"6192 FW_LOG request received "
5062 				"below minimum size\n");
5063 		rc = -EINVAL;
5064 		goto ras_job_error;
5065 	}
5066 
5067 	/* Check FW log status */
5068 	rc = lpfc_check_fwlog_support(phba);
5069 	if (rc)
5070 		goto ras_job_error;
5071 
5072 	ras_reply = (struct lpfc_bsg_get_ras_config_reply *)
5073 		bsg_reply->reply_data.vendor_reply.vendor_rsp;
5074 
5075 	/* Current logging state */
5076 	spin_lock_irq(&phba->hbalock);
5077 	if (ras_fwlog->state == ACTIVE)
5078 		ras_reply->state = LPFC_RASLOG_STATE_RUNNING;
5079 	else
5080 		ras_reply->state = LPFC_RASLOG_STATE_STOPPED;
5081 	spin_unlock_irq(&phba->hbalock);
5082 
5083 	ras_reply->log_level = phba->ras_fwlog.fw_loglevel;
5084 	ras_reply->log_buff_sz = phba->cfg_ras_fwlog_buffsize;
5085 
5086 ras_job_error:
5087 	/* make error code available to userspace */
5088 	bsg_reply->result = rc;
5089 
5090 	/* complete the job back to userspace */
5091 	if (!rc)
5092 		bsg_job_done(job, bsg_reply->result,
5093 			     bsg_reply->reply_payload_rcv_len);
5094 	return rc;
5095 }
5096 
5097 /**
5098  * lpfc_bsg_set_ras_config: Set FW logging parameters
5099  * @job: fc_bsg_job to handle
5100  *
5101  * Set log-level parameters for FW-logging in host memory
5102  **/
5103 static int
5104 lpfc_bsg_set_ras_config(struct bsg_job *job)
5105 {
5106 	struct Scsi_Host *shost = fc_bsg_to_shost(job);
5107 	struct lpfc_vport *vport = shost_priv(shost);
5108 	struct lpfc_hba *phba = vport->phba;
5109 	struct lpfc_bsg_set_ras_config_req *ras_req;
5110 	struct fc_bsg_request *bsg_request = job->request;
5111 	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
5112 	struct fc_bsg_reply *bsg_reply = job->reply;
5113 	uint8_t action = 0, log_level = 0;
5114 	int rc = 0, action_status = 0;
5115 
5116 	if (job->request_len <
5117 	    sizeof(struct fc_bsg_request) +
5118 	    sizeof(struct lpfc_bsg_set_ras_config_req)) {
5119 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5120 				"6182 Received RAS_LOG request "
5121 				"below minimum size\n");
5122 		rc = -EINVAL;
5123 		goto ras_job_error;
5124 	}
5125 
5126 	/* Check FW log status */
5127 	rc = lpfc_check_fwlog_support(phba);
5128 	if (rc)
5129 		goto ras_job_error;
5130 
5131 	ras_req = (struct lpfc_bsg_set_ras_config_req *)
5132 		bsg_request->rqst_data.h_vendor.vendor_cmd;
5133 	action = ras_req->action;
5134 	log_level = ras_req->log_level;
5135 
5136 	if (action == LPFC_RASACTION_STOP_LOGGING) {
5137 		/* Check if already disabled */
5138 		spin_lock_irq(&phba->hbalock);
5139 		if (ras_fwlog->state != ACTIVE) {
5140 			spin_unlock_irq(&phba->hbalock);
5141 			rc = -ESRCH;
5142 			goto ras_job_error;
5143 		}
5144 		spin_unlock_irq(&phba->hbalock);
5145 
5146 		/* Disable logging */
5147 		lpfc_ras_stop_fwlog(phba);
5148 	} else {
5149 		/*action = LPFC_RASACTION_START_LOGGING*/
5150 
5151 		/* Even though FW-logging is active re-initialize
5152 		 * FW-logging with new log-level. Return status
5153 		 * "Logging already Running" to caller.
5154 		 **/
5155 		spin_lock_irq(&phba->hbalock);
5156 		if (ras_fwlog->state != INACTIVE)
5157 			action_status = -EINPROGRESS;
5158 		spin_unlock_irq(&phba->hbalock);
5159 
5160 		/* Enable logging */
5161 		rc = lpfc_sli4_ras_fwlog_init(phba, log_level,
5162 					      LPFC_RAS_ENABLE_LOGGING);
5163 		if (rc) {
5164 			rc = -EINVAL;
5165 			goto ras_job_error;
5166 		}
5167 
5168 		/* Check if FW-logging is re-initialized */
5169 		if (action_status == -EINPROGRESS)
5170 			rc = action_status;
5171 	}
5172 ras_job_error:
5173 	/* make error code available to userspace */
5174 	bsg_reply->result = rc;
5175 
5176 	/* complete the job back to userspace */
5177 	if (!rc)
5178 		bsg_job_done(job, bsg_reply->result,
5179 			     bsg_reply->reply_payload_rcv_len);
5180 
5181 	return rc;
5182 }
5183 
5184 /**
5185  * lpfc_bsg_get_ras_lwpd: Get log write position data
5186  * @job: fc_bsg_job to handle
5187  *
5188  * Get Offset/Wrap count of the log message written
5189  * in host memory
5190  **/
5191 static int
5192 lpfc_bsg_get_ras_lwpd(struct bsg_job *job)
5193 {
5194 	struct Scsi_Host *shost = fc_bsg_to_shost(job);
5195 	struct lpfc_vport *vport = shost_priv(shost);
5196 	struct lpfc_bsg_get_ras_lwpd *ras_reply;
5197 	struct lpfc_hba *phba = vport->phba;
5198 	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
5199 	struct fc_bsg_reply *bsg_reply = job->reply;
5200 	u32 *lwpd_ptr = NULL;
5201 	int rc = 0;
5202 
5203 	rc = lpfc_check_fwlog_support(phba);
5204 	if (rc)
5205 		goto ras_job_error;
5206 
5207 	if (job->request_len <
5208 	    sizeof(struct fc_bsg_request) +
5209 	    sizeof(struct lpfc_bsg_ras_req)) {
5210 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5211 				"6183 Received RAS_LOG request "
5212 				"below minimum size\n");
5213 		rc = -EINVAL;
5214 		goto ras_job_error;
5215 	}
5216 
5217 	ras_reply = (struct lpfc_bsg_get_ras_lwpd *)
5218 		bsg_reply->reply_data.vendor_reply.vendor_rsp;
5219 
5220 	if (!ras_fwlog->lwpd.virt) {
5221 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5222 				"6193 Restart FW Logging\n");
5223 		rc = -EINVAL;
5224 		goto ras_job_error;
5225 	}
5226 
5227 	/* Get lwpd offset */
5228 	lwpd_ptr = (uint32_t *)(ras_fwlog->lwpd.virt);
5229 	ras_reply->offset = be32_to_cpu(*lwpd_ptr & 0xffffffff);
5230 
5231 	/* Get wrap count */
5232 	ras_reply->wrap_count = be32_to_cpu(*(++lwpd_ptr) & 0xffffffff);
5233 
5234 ras_job_error:
5235 	/* make error code available to userspace */
5236 	bsg_reply->result = rc;
5237 
5238 	/* complete the job back to userspace */
5239 	if (!rc)
5240 		bsg_job_done(job, bsg_reply->result,
5241 			     bsg_reply->reply_payload_rcv_len);
5242 
5243 	return rc;
5244 }
5245 
5246 /**
5247  * lpfc_bsg_get_ras_fwlog: Read FW log
5248  * @job: fc_bsg_job to handle
5249  *
5250  * Copy the FW log into the passed buffer.
5251  **/
5252 static int
5253 lpfc_bsg_get_ras_fwlog(struct bsg_job *job)
5254 {
5255 	struct Scsi_Host *shost = fc_bsg_to_shost(job);
5256 	struct lpfc_vport *vport = shost_priv(shost);
5257 	struct lpfc_hba *phba = vport->phba;
5258 	struct fc_bsg_request *bsg_request = job->request;
5259 	struct fc_bsg_reply *bsg_reply = job->reply;
5260 	struct lpfc_bsg_get_fwlog_req *ras_req;
5261 	u32 rd_offset, rd_index, offset;
5262 	void *src, *fwlog_buff;
5263 	struct lpfc_ras_fwlog *ras_fwlog = NULL;
5264 	struct lpfc_dmabuf *dmabuf, *next;
5265 	int rc = 0;
5266 
5267 	ras_fwlog = &phba->ras_fwlog;
5268 
5269 	rc = lpfc_check_fwlog_support(phba);
5270 	if (rc)
5271 		goto ras_job_error;
5272 
5273 	/* Logging to be stopped before reading */
5274 	spin_lock_irq(&phba->hbalock);
5275 	if (ras_fwlog->state == ACTIVE) {
5276 		spin_unlock_irq(&phba->hbalock);
5277 		rc = -EINPROGRESS;
5278 		goto ras_job_error;
5279 	}
5280 	spin_unlock_irq(&phba->hbalock);
5281 
5282 	if (job->request_len <
5283 	    sizeof(struct fc_bsg_request) +
5284 	    sizeof(struct lpfc_bsg_get_fwlog_req)) {
5285 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5286 				"6184 Received RAS_LOG request "
5287 				"below minimum size\n");
5288 		rc = -EINVAL;
5289 		goto ras_job_error;
5290 	}
5291 
5292 	ras_req = (struct lpfc_bsg_get_fwlog_req *)
5293 		bsg_request->rqst_data.h_vendor.vendor_cmd;
5294 	rd_offset = ras_req->read_offset;
5295 
5296 	/* Allocate memory to read fw log*/
5297 	fwlog_buff = vmalloc(ras_req->read_size);
5298 	if (!fwlog_buff) {
5299 		rc = -ENOMEM;
5300 		goto ras_job_error;
5301 	}
5302 
5303 	rd_index = (rd_offset / LPFC_RAS_MAX_ENTRY_SIZE);
5304 	offset = (rd_offset % LPFC_RAS_MAX_ENTRY_SIZE);
5305 
5306 	list_for_each_entry_safe(dmabuf, next,
5307 			      &ras_fwlog->fwlog_buff_list, list) {
5308 
5309 		if (dmabuf->buffer_tag < rd_index)
5310 			continue;
5311 
5312 		src = dmabuf->virt + offset;
5313 		memcpy(fwlog_buff, src, ras_req->read_size);
5314 		break;
5315 	}
5316 
5317 	bsg_reply->reply_payload_rcv_len =
5318 		sg_copy_from_buffer(job->reply_payload.sg_list,
5319 				    job->reply_payload.sg_cnt,
5320 				    fwlog_buff, ras_req->read_size);
5321 
5322 	vfree(fwlog_buff);
5323 
5324 ras_job_error:
5325 	bsg_reply->result = rc;
5326 	if (!rc)
5327 		bsg_job_done(job, bsg_reply->result,
5328 			     bsg_reply->reply_payload_rcv_len);
5329 
5330 	return rc;
5331 }
5332 
5333 static int
5334 lpfc_get_trunk_info(struct bsg_job *job)
5335 {
5336 	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
5337 	struct lpfc_hba *phba = vport->phba;
5338 	struct fc_bsg_reply *bsg_reply = job->reply;
5339 	struct lpfc_trunk_info *event_reply;
5340 	int rc = 0;
5341 
5342 	if (job->request_len <
5343 	    sizeof(struct fc_bsg_request) + sizeof(struct get_trunk_info_req)) {
5344 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5345 				"2744 Received GET TRUNK _INFO request below "
5346 				"minimum size\n");
5347 		rc = -EINVAL;
5348 		goto job_error;
5349 	}
5350 
5351 	event_reply = (struct lpfc_trunk_info *)
5352 		bsg_reply->reply_data.vendor_reply.vendor_rsp;
5353 
5354 	if (job->reply_len < sizeof(*bsg_reply) + sizeof(*event_reply)) {
5355 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5356 				"2728 Received GET TRUNK _INFO reply below "
5357 				"minimum size\n");
5358 		rc = -EINVAL;
5359 		goto job_error;
5360 	}
5361 	if (event_reply == NULL) {
5362 		rc = -EINVAL;
5363 		goto job_error;
5364 	}
5365 
5366 	bsg_bf_set(lpfc_trunk_info_link_status, event_reply,
5367 		   (phba->link_state >= LPFC_LINK_UP) ? 1 : 0);
5368 
5369 	bsg_bf_set(lpfc_trunk_info_trunk_active0, event_reply,
5370 		   (phba->trunk_link.link0.state == LPFC_LINK_UP) ? 1 : 0);
5371 
5372 	bsg_bf_set(lpfc_trunk_info_trunk_active1, event_reply,
5373 		   (phba->trunk_link.link1.state == LPFC_LINK_UP) ? 1 : 0);
5374 
5375 	bsg_bf_set(lpfc_trunk_info_trunk_active2, event_reply,
5376 		   (phba->trunk_link.link2.state == LPFC_LINK_UP) ? 1 : 0);
5377 
5378 	bsg_bf_set(lpfc_trunk_info_trunk_active3, event_reply,
5379 		   (phba->trunk_link.link3.state == LPFC_LINK_UP) ? 1 : 0);
5380 
5381 	bsg_bf_set(lpfc_trunk_info_trunk_config0, event_reply,
5382 		   bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba));
5383 
5384 	bsg_bf_set(lpfc_trunk_info_trunk_config1, event_reply,
5385 		   bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba));
5386 
5387 	bsg_bf_set(lpfc_trunk_info_trunk_config2, event_reply,
5388 		   bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba));
5389 
5390 	bsg_bf_set(lpfc_trunk_info_trunk_config3, event_reply,
5391 		   bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba));
5392 
5393 	event_reply->port_speed = phba->sli4_hba.link_state.speed / 1000;
5394 	event_reply->logical_speed =
5395 				phba->sli4_hba.link_state.logical_speed / 1000;
5396 job_error:
5397 	bsg_reply->result = rc;
5398 	if (!rc)
5399 		bsg_job_done(job, bsg_reply->result,
5400 			     bsg_reply->reply_payload_rcv_len);
5401 	return rc;
5402 
5403 }
5404 
5405 static int
5406 lpfc_get_cgnbuf_info(struct bsg_job *job)
5407 {
5408 	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
5409 	struct lpfc_hba *phba = vport->phba;
5410 	struct fc_bsg_request *bsg_request = job->request;
5411 	struct fc_bsg_reply *bsg_reply = job->reply;
5412 	struct get_cgnbuf_info_req *cgnbuf_req;
5413 	struct lpfc_cgn_info *cp;
5414 	uint8_t *cgn_buff;
5415 	int size, cinfosz;
5416 	int  rc = 0;
5417 
5418 	if (job->request_len < sizeof(struct fc_bsg_request) +
5419 	    sizeof(struct get_cgnbuf_info_req)) {
5420 		rc = -ENOMEM;
5421 		goto job_exit;
5422 	}
5423 
5424 	if (!phba->sli4_hba.pc_sli4_params.cmf) {
5425 		rc = -ENOENT;
5426 		goto job_exit;
5427 	}
5428 
5429 	if (!phba->cgn_i || !phba->cgn_i->virt) {
5430 		rc = -ENOENT;
5431 		goto job_exit;
5432 	}
5433 
5434 	cp = phba->cgn_i->virt;
5435 	if (cp->cgn_info_version < LPFC_CGN_INFO_V3) {
5436 		rc = -EPERM;
5437 		goto job_exit;
5438 	}
5439 
5440 	cgnbuf_req = (struct get_cgnbuf_info_req *)
5441 		bsg_request->rqst_data.h_vendor.vendor_cmd;
5442 
5443 	/* For reset or size == 0 */
5444 	bsg_reply->reply_payload_rcv_len = 0;
5445 
5446 	if (cgnbuf_req->reset == LPFC_BSG_CGN_RESET_STAT) {
5447 		lpfc_init_congestion_stat(phba);
5448 		goto job_exit;
5449 	}
5450 
5451 	/* We don't want to include the CRC at the end */
5452 	cinfosz = sizeof(struct lpfc_cgn_info) - sizeof(uint32_t);
5453 
5454 	size = cgnbuf_req->read_size;
5455 	if (!size)
5456 		goto job_exit;
5457 
5458 	if (size < cinfosz) {
5459 		/* Just copy back what we can */
5460 		cinfosz = size;
5461 		rc = -E2BIG;
5462 	}
5463 
5464 	/* Allocate memory to read congestion info */
5465 	cgn_buff = vmalloc(cinfosz);
5466 	if (!cgn_buff) {
5467 		rc = -ENOMEM;
5468 		goto job_exit;
5469 	}
5470 
5471 	memcpy(cgn_buff, cp, cinfosz);
5472 
5473 	bsg_reply->reply_payload_rcv_len =
5474 		sg_copy_from_buffer(job->reply_payload.sg_list,
5475 				    job->reply_payload.sg_cnt,
5476 				    cgn_buff, cinfosz);
5477 
5478 	vfree(cgn_buff);
5479 
5480 job_exit:
5481 	bsg_reply->result = rc;
5482 	if (!rc)
5483 		bsg_job_done(job, bsg_reply->result,
5484 			     bsg_reply->reply_payload_rcv_len);
5485 	else
5486 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5487 				"2724 GET CGNBUF error: %d\n", rc);
5488 	return rc;
5489 }
5490 
5491 /**
5492  * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
5493  * @job: fc_bsg_job to handle
5494  **/
5495 static int
5496 lpfc_bsg_hst_vendor(struct bsg_job *job)
5497 {
5498 	struct fc_bsg_request *bsg_request = job->request;
5499 	struct fc_bsg_reply *bsg_reply = job->reply;
5500 	int command = bsg_request->rqst_data.h_vendor.vendor_cmd[0];
5501 	int rc;
5502 
5503 	switch (command) {
5504 	case LPFC_BSG_VENDOR_SET_CT_EVENT:
5505 		rc = lpfc_bsg_hba_set_event(job);
5506 		break;
5507 	case LPFC_BSG_VENDOR_GET_CT_EVENT:
5508 		rc = lpfc_bsg_hba_get_event(job);
5509 		break;
5510 	case LPFC_BSG_VENDOR_SEND_MGMT_RESP:
5511 		rc = lpfc_bsg_send_mgmt_rsp(job);
5512 		break;
5513 	case LPFC_BSG_VENDOR_DIAG_MODE:
5514 		rc = lpfc_bsg_diag_loopback_mode(job);
5515 		break;
5516 	case LPFC_BSG_VENDOR_DIAG_MODE_END:
5517 		rc = lpfc_sli4_bsg_diag_mode_end(job);
5518 		break;
5519 	case LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK:
5520 		rc = lpfc_bsg_diag_loopback_run(job);
5521 		break;
5522 	case LPFC_BSG_VENDOR_LINK_DIAG_TEST:
5523 		rc = lpfc_sli4_bsg_link_diag_test(job);
5524 		break;
5525 	case LPFC_BSG_VENDOR_GET_MGMT_REV:
5526 		rc = lpfc_bsg_get_dfc_rev(job);
5527 		break;
5528 	case LPFC_BSG_VENDOR_MBOX:
5529 		rc = lpfc_bsg_mbox_cmd(job);
5530 		break;
5531 	case LPFC_BSG_VENDOR_FORCED_LINK_SPEED:
5532 		rc = lpfc_forced_link_speed(job);
5533 		break;
5534 	case LPFC_BSG_VENDOR_RAS_GET_LWPD:
5535 		rc = lpfc_bsg_get_ras_lwpd(job);
5536 		break;
5537 	case LPFC_BSG_VENDOR_RAS_GET_FWLOG:
5538 		rc = lpfc_bsg_get_ras_fwlog(job);
5539 		break;
5540 	case LPFC_BSG_VENDOR_RAS_GET_CONFIG:
5541 		rc = lpfc_bsg_get_ras_config(job);
5542 		break;
5543 	case LPFC_BSG_VENDOR_RAS_SET_CONFIG:
5544 		rc = lpfc_bsg_set_ras_config(job);
5545 		break;
5546 	case LPFC_BSG_VENDOR_GET_TRUNK_INFO:
5547 		rc = lpfc_get_trunk_info(job);
5548 		break;
5549 	case LPFC_BSG_VENDOR_GET_CGNBUF_INFO:
5550 		rc = lpfc_get_cgnbuf_info(job);
5551 		break;
5552 	default:
5553 		rc = -EINVAL;
5554 		bsg_reply->reply_payload_rcv_len = 0;
5555 		/* make error code available to userspace */
5556 		bsg_reply->result = rc;
5557 		break;
5558 	}
5559 
5560 	return rc;
5561 }
5562 
5563 /**
5564  * lpfc_bsg_request - handle a bsg request from the FC transport
5565  * @job: bsg_job to handle
5566  **/
5567 int
5568 lpfc_bsg_request(struct bsg_job *job)
5569 {
5570 	struct fc_bsg_request *bsg_request = job->request;
5571 	struct fc_bsg_reply *bsg_reply = job->reply;
5572 	uint32_t msgcode;
5573 	int rc;
5574 
5575 	msgcode = bsg_request->msgcode;
5576 	switch (msgcode) {
5577 	case FC_BSG_HST_VENDOR:
5578 		rc = lpfc_bsg_hst_vendor(job);
5579 		break;
5580 	case FC_BSG_RPT_ELS:
5581 		rc = lpfc_bsg_rport_els(job);
5582 		break;
5583 	case FC_BSG_RPT_CT:
5584 		rc = lpfc_bsg_send_mgmt_cmd(job);
5585 		break;
5586 	default:
5587 		rc = -EINVAL;
5588 		bsg_reply->reply_payload_rcv_len = 0;
5589 		/* make error code available to userspace */
5590 		bsg_reply->result = rc;
5591 		break;
5592 	}
5593 
5594 	return rc;
5595 }
5596 
5597 /**
5598  * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
5599  * @job: bsg_job that has timed out
5600  *
5601  * This function just aborts the job's IOCB.  The aborted IOCB will return to
5602  * the waiting function which will handle passing the error back to userspace
5603  **/
5604 int
5605 lpfc_bsg_timeout(struct bsg_job *job)
5606 {
5607 	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
5608 	struct lpfc_hba *phba = vport->phba;
5609 	struct lpfc_iocbq *cmdiocb;
5610 	struct lpfc_sli_ring *pring;
5611 	struct bsg_job_data *dd_data;
5612 	unsigned long flags;
5613 	int rc = 0;
5614 	LIST_HEAD(completions);
5615 	struct lpfc_iocbq *check_iocb, *next_iocb;
5616 
5617 	pring = lpfc_phba_elsring(phba);
5618 	if (unlikely(!pring))
5619 		return -EIO;
5620 
5621 	/* if job's driver data is NULL, the command completed or is in the
5622 	 * the process of completing.  In this case, return status to request
5623 	 * so the timeout is retried.  This avoids double completion issues
5624 	 * and the request will be pulled off the timer queue when the
5625 	 * command's completion handler executes.  Otherwise, prevent the
5626 	 * command's completion handler from executing the job done callback
5627 	 * and continue processing to abort the outstanding the command.
5628 	 */
5629 
5630 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
5631 	dd_data = (struct bsg_job_data *)job->dd_data;
5632 	if (dd_data) {
5633 		dd_data->set_job = NULL;
5634 		job->dd_data = NULL;
5635 	} else {
5636 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5637 		return -EAGAIN;
5638 	}
5639 
5640 	switch (dd_data->type) {
5641 	case TYPE_IOCB:
5642 		/* Check to see if IOCB was issued to the port or not. If not,
5643 		 * remove it from the txq queue and call cancel iocbs.
5644 		 * Otherwise, call abort iotag
5645 		 */
5646 		cmdiocb = dd_data->context_un.iocb.cmdiocbq;
5647 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5648 
5649 		spin_lock_irqsave(&phba->hbalock, flags);
5650 		/* make sure the I/O abort window is still open */
5651 		if (!(cmdiocb->cmd_flag & LPFC_IO_CMD_OUTSTANDING)) {
5652 			spin_unlock_irqrestore(&phba->hbalock, flags);
5653 			return -EAGAIN;
5654 		}
5655 		list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
5656 					 list) {
5657 			if (check_iocb == cmdiocb) {
5658 				list_move_tail(&check_iocb->list, &completions);
5659 				break;
5660 			}
5661 		}
5662 		if (list_empty(&completions))
5663 			lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb, NULL);
5664 		spin_unlock_irqrestore(&phba->hbalock, flags);
5665 		if (!list_empty(&completions)) {
5666 			lpfc_sli_cancel_iocbs(phba, &completions,
5667 					      IOSTAT_LOCAL_REJECT,
5668 					      IOERR_SLI_ABORTED);
5669 		}
5670 		break;
5671 
5672 	case TYPE_EVT:
5673 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5674 		break;
5675 
5676 	case TYPE_MBOX:
5677 		/* Update the ext buf ctx state if needed */
5678 
5679 		if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
5680 			phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
5681 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5682 		break;
5683 	default:
5684 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5685 		break;
5686 	}
5687 
5688 	/* scsi transport fc fc_bsg_job_timeout expects a zero return code,
5689 	 * otherwise an error message will be displayed on the console
5690 	 * so always return success (zero)
5691 	 */
5692 	return rc;
5693 }
5694