xref: /openbmc/linux/drivers/scsi/lpfc/lpfc_bsg.c (revision 17408e59)
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
6  * Copyright (C) 2009-2015 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  *                                                                 *
10  * This program is free software; you can redistribute it and/or   *
11  * modify it under the terms of version 2 of the GNU General       *
12  * Public License as published by the Free Software Foundation.    *
13  * This program is distributed in the hope that it will be useful. *
14  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
15  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
16  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
17  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
18  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
19  * more details, a copy of which can be found in the file COPYING  *
20  * included with this package.                                     *
21  *******************************************************************/
22 
23 #include <linux/interrupt.h>
24 #include <linux/mempool.h>
25 #include <linux/pci.h>
26 #include <linux/slab.h>
27 #include <linux/delay.h>
28 #include <linux/list.h>
29 #include <linux/bsg-lib.h>
30 #include <linux/vmalloc.h>
31 
32 #include <scsi/scsi.h>
33 #include <scsi/scsi_host.h>
34 #include <scsi/scsi_transport_fc.h>
35 #include <scsi/scsi_bsg_fc.h>
36 #include <scsi/fc/fc_fs.h>
37 
38 #include "lpfc_hw4.h"
39 #include "lpfc_hw.h"
40 #include "lpfc_sli.h"
41 #include "lpfc_sli4.h"
42 #include "lpfc_nl.h"
43 #include "lpfc_bsg.h"
44 #include "lpfc_disc.h"
45 #include "lpfc_scsi.h"
46 #include "lpfc.h"
47 #include "lpfc_logmsg.h"
48 #include "lpfc_crtn.h"
49 #include "lpfc_debugfs.h"
50 #include "lpfc_vport.h"
51 #include "lpfc_version.h"
52 
53 struct lpfc_bsg_event {
54 	struct list_head node;
55 	struct kref kref;
56 	wait_queue_head_t wq;
57 
58 	/* Event type and waiter identifiers */
59 	uint32_t type_mask;
60 	uint32_t req_id;
61 	uint32_t reg_id;
62 
63 	/* next two flags are here for the auto-delete logic */
64 	unsigned long wait_time_stamp;
65 	int waiting;
66 
67 	/* seen and not seen events */
68 	struct list_head events_to_get;
69 	struct list_head events_to_see;
70 
71 	/* driver data associated with the job */
72 	void *dd_data;
73 };
74 
75 struct lpfc_bsg_iocb {
76 	struct lpfc_iocbq *cmdiocbq;
77 	struct lpfc_dmabuf *rmp;
78 	struct lpfc_nodelist *ndlp;
79 };
80 
81 struct lpfc_bsg_mbox {
82 	LPFC_MBOXQ_t *pmboxq;
83 	MAILBOX_t *mb;
84 	struct lpfc_dmabuf *dmabuffers; /* for BIU diags */
85 	uint8_t *ext; /* extended mailbox data */
86 	uint32_t mbOffset; /* from app */
87 	uint32_t inExtWLen; /* from app */
88 	uint32_t outExtWLen; /* from app */
89 };
90 
91 #define MENLO_DID 0x0000FC0E
92 
93 struct lpfc_bsg_menlo {
94 	struct lpfc_iocbq *cmdiocbq;
95 	struct lpfc_dmabuf *rmp;
96 };
97 
98 #define TYPE_EVT 	1
99 #define TYPE_IOCB	2
100 #define TYPE_MBOX	3
101 #define TYPE_MENLO	4
102 struct bsg_job_data {
103 	uint32_t type;
104 	struct bsg_job *set_job; /* job waiting for this iocb to finish */
105 	union {
106 		struct lpfc_bsg_event *evt;
107 		struct lpfc_bsg_iocb iocb;
108 		struct lpfc_bsg_mbox mbox;
109 		struct lpfc_bsg_menlo menlo;
110 	} context_un;
111 };
112 
113 struct event_data {
114 	struct list_head node;
115 	uint32_t type;
116 	uint32_t immed_dat;
117 	void *data;
118 	uint32_t len;
119 };
120 
121 #define BUF_SZ_4K 4096
122 #define SLI_CT_ELX_LOOPBACK 0x10
123 
124 enum ELX_LOOPBACK_CMD {
125 	ELX_LOOPBACK_XRI_SETUP,
126 	ELX_LOOPBACK_DATA,
127 };
128 
129 #define ELX_LOOPBACK_HEADER_SZ \
130 	(size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
131 
132 struct lpfc_dmabufext {
133 	struct lpfc_dmabuf dma;
134 	uint32_t size;
135 	uint32_t flag;
136 };
137 
138 static void
139 lpfc_free_bsg_buffers(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist)
140 {
141 	struct lpfc_dmabuf *mlast, *next_mlast;
142 
143 	if (mlist) {
144 		list_for_each_entry_safe(mlast, next_mlast, &mlist->list,
145 					 list) {
146 			lpfc_mbuf_free(phba, mlast->virt, mlast->phys);
147 			list_del(&mlast->list);
148 			kfree(mlast);
149 		}
150 		lpfc_mbuf_free(phba, mlist->virt, mlist->phys);
151 		kfree(mlist);
152 	}
153 	return;
154 }
155 
156 static struct lpfc_dmabuf *
157 lpfc_alloc_bsg_buffers(struct lpfc_hba *phba, unsigned int size,
158 		       int outbound_buffers, struct ulp_bde64 *bpl,
159 		       int *bpl_entries)
160 {
161 	struct lpfc_dmabuf *mlist = NULL;
162 	struct lpfc_dmabuf *mp;
163 	unsigned int bytes_left = size;
164 
165 	/* Verify we can support the size specified */
166 	if (!size || (size > (*bpl_entries * LPFC_BPL_SIZE)))
167 		return NULL;
168 
169 	/* Determine the number of dma buffers to allocate */
170 	*bpl_entries = (size % LPFC_BPL_SIZE ? size/LPFC_BPL_SIZE + 1 :
171 			size/LPFC_BPL_SIZE);
172 
173 	/* Allocate dma buffer and place in BPL passed */
174 	while (bytes_left) {
175 		/* Allocate dma buffer  */
176 		mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
177 		if (!mp) {
178 			if (mlist)
179 				lpfc_free_bsg_buffers(phba, mlist);
180 			return NULL;
181 		}
182 
183 		INIT_LIST_HEAD(&mp->list);
184 		mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
185 
186 		if (!mp->virt) {
187 			kfree(mp);
188 			if (mlist)
189 				lpfc_free_bsg_buffers(phba, mlist);
190 			return NULL;
191 		}
192 
193 		/* Queue it to a linked list */
194 		if (!mlist)
195 			mlist = mp;
196 		else
197 			list_add_tail(&mp->list, &mlist->list);
198 
199 		/* Add buffer to buffer pointer list */
200 		if (outbound_buffers)
201 			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
202 		else
203 			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
204 		bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys));
205 		bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys));
206 		bpl->tus.f.bdeSize = (uint16_t)
207 			(bytes_left >= LPFC_BPL_SIZE ? LPFC_BPL_SIZE :
208 			 bytes_left);
209 		bytes_left -= bpl->tus.f.bdeSize;
210 		bpl->tus.w = le32_to_cpu(bpl->tus.w);
211 		bpl++;
212 	}
213 	return mlist;
214 }
215 
216 static unsigned int
217 lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers,
218 		   struct bsg_buffer *bsg_buffers,
219 		   unsigned int bytes_to_transfer, int to_buffers)
220 {
221 
222 	struct lpfc_dmabuf *mp;
223 	unsigned int transfer_bytes, bytes_copied = 0;
224 	unsigned int sg_offset, dma_offset;
225 	unsigned char *dma_address, *sg_address;
226 	LIST_HEAD(temp_list);
227 	struct sg_mapping_iter miter;
228 	unsigned long flags;
229 	unsigned int sg_flags = SG_MITER_ATOMIC;
230 	bool sg_valid;
231 
232 	list_splice_init(&dma_buffers->list, &temp_list);
233 	list_add(&dma_buffers->list, &temp_list);
234 	sg_offset = 0;
235 	if (to_buffers)
236 		sg_flags |= SG_MITER_FROM_SG;
237 	else
238 		sg_flags |= SG_MITER_TO_SG;
239 	sg_miter_start(&miter, bsg_buffers->sg_list, bsg_buffers->sg_cnt,
240 		       sg_flags);
241 	local_irq_save(flags);
242 	sg_valid = sg_miter_next(&miter);
243 	list_for_each_entry(mp, &temp_list, list) {
244 		dma_offset = 0;
245 		while (bytes_to_transfer && sg_valid &&
246 		       (dma_offset < LPFC_BPL_SIZE)) {
247 			dma_address = mp->virt + dma_offset;
248 			if (sg_offset) {
249 				/* Continue previous partial transfer of sg */
250 				sg_address = miter.addr + sg_offset;
251 				transfer_bytes = miter.length - sg_offset;
252 			} else {
253 				sg_address = miter.addr;
254 				transfer_bytes = miter.length;
255 			}
256 			if (bytes_to_transfer < transfer_bytes)
257 				transfer_bytes = bytes_to_transfer;
258 			if (transfer_bytes > (LPFC_BPL_SIZE - dma_offset))
259 				transfer_bytes = LPFC_BPL_SIZE - dma_offset;
260 			if (to_buffers)
261 				memcpy(dma_address, sg_address, transfer_bytes);
262 			else
263 				memcpy(sg_address, dma_address, transfer_bytes);
264 			dma_offset += transfer_bytes;
265 			sg_offset += transfer_bytes;
266 			bytes_to_transfer -= transfer_bytes;
267 			bytes_copied += transfer_bytes;
268 			if (sg_offset >= miter.length) {
269 				sg_offset = 0;
270 				sg_valid = sg_miter_next(&miter);
271 			}
272 		}
273 	}
274 	sg_miter_stop(&miter);
275 	local_irq_restore(flags);
276 	list_del_init(&dma_buffers->list);
277 	list_splice(&temp_list, &dma_buffers->list);
278 	return bytes_copied;
279 }
280 
281 /**
282  * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
283  * @phba: Pointer to HBA context object.
284  * @cmdiocbq: Pointer to command iocb.
285  * @rspiocbq: Pointer to response iocb.
286  *
287  * This function is the completion handler for iocbs issued using
288  * lpfc_bsg_send_mgmt_cmd function. This function is called by the
289  * ring event handler function without any lock held. This function
290  * can be called from both worker thread context and interrupt
291  * context. This function also can be called from another thread which
292  * cleans up the SLI layer objects.
293  * This function copies the contents of the response iocb to the
294  * response iocb memory object provided by the caller of
295  * lpfc_sli_issue_iocb_wait and then wakes up the thread which
296  * sleeps for the iocb completion.
297  **/
298 static void
299 lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
300 			struct lpfc_iocbq *cmdiocbq,
301 			struct lpfc_iocbq *rspiocbq)
302 {
303 	struct bsg_job_data *dd_data;
304 	struct bsg_job *job;
305 	struct fc_bsg_reply *bsg_reply;
306 	struct lpfc_dmabuf *bmp, *cmp, *rmp;
307 	struct lpfc_nodelist *ndlp;
308 	struct lpfc_bsg_iocb *iocb;
309 	unsigned long flags;
310 	int rc = 0;
311 	u32 ulp_status, ulp_word4, total_data_placed;
312 
313 	dd_data = cmdiocbq->context1;
314 
315 	/* Determine if job has been aborted */
316 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
317 	job = dd_data->set_job;
318 	if (job) {
319 		bsg_reply = job->reply;
320 		/* Prevent timeout handling from trying to abort job */
321 		job->dd_data = NULL;
322 	}
323 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
324 
325 	/* Close the timeout handler abort window */
326 	spin_lock_irqsave(&phba->hbalock, flags);
327 	cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING;
328 	spin_unlock_irqrestore(&phba->hbalock, flags);
329 
330 	iocb = &dd_data->context_un.iocb;
331 	ndlp = iocb->cmdiocbq->context_un.ndlp;
332 	rmp = iocb->rmp;
333 	cmp = cmdiocbq->context2;
334 	bmp = cmdiocbq->context3;
335 	ulp_status = get_job_ulpstatus(phba, rspiocbq);
336 	ulp_word4 = get_job_word4(phba, rspiocbq);
337 	total_data_placed = get_job_data_placed(phba, rspiocbq);
338 
339 	/* Copy the completed data or set the error status */
340 
341 	if (job) {
342 		if (ulp_status) {
343 			if (ulp_status == IOSTAT_LOCAL_REJECT) {
344 				switch (ulp_word4 & IOERR_PARAM_MASK) {
345 				case IOERR_SEQUENCE_TIMEOUT:
346 					rc = -ETIMEDOUT;
347 					break;
348 				case IOERR_INVALID_RPI:
349 					rc = -EFAULT;
350 					break;
351 				default:
352 					rc = -EACCES;
353 					break;
354 				}
355 			} else {
356 				rc = -EACCES;
357 			}
358 		} else {
359 			bsg_reply->reply_payload_rcv_len =
360 				lpfc_bsg_copy_data(rmp, &job->reply_payload,
361 						   total_data_placed, 0);
362 		}
363 	}
364 
365 	lpfc_free_bsg_buffers(phba, cmp);
366 	lpfc_free_bsg_buffers(phba, rmp);
367 	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
368 	kfree(bmp);
369 	lpfc_nlp_put(ndlp);
370 	lpfc_sli_release_iocbq(phba, cmdiocbq);
371 	kfree(dd_data);
372 
373 	/* Complete the job if the job is still active */
374 
375 	if (job) {
376 		bsg_reply->result = rc;
377 		bsg_job_done(job, bsg_reply->result,
378 			       bsg_reply->reply_payload_rcv_len);
379 	}
380 	return;
381 }
382 
383 /**
384  * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request
385  * @job: fc_bsg_job to handle
386  **/
387 static int
388 lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
389 {
390 	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
391 	struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data;
392 	struct lpfc_hba *phba = vport->phba;
393 	struct lpfc_nodelist *ndlp = rdata->pnode;
394 	struct fc_bsg_reply *bsg_reply = job->reply;
395 	struct ulp_bde64 *bpl = NULL;
396 	struct lpfc_iocbq *cmdiocbq = NULL;
397 	struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
398 	int request_nseg, reply_nseg;
399 	u32 num_entry;
400 	struct bsg_job_data *dd_data;
401 	unsigned long flags;
402 	uint32_t creg_val;
403 	int rc = 0;
404 	int iocb_stat;
405 	u16 ulp_context;
406 
407 	/* in case no data is transferred */
408 	bsg_reply->reply_payload_rcv_len = 0;
409 
410 	if (ndlp->nlp_flag & NLP_ELS_SND_MASK)
411 		return -ENODEV;
412 
413 	/* allocate our bsg tracking structure */
414 	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
415 	if (!dd_data) {
416 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
417 				"2733 Failed allocation of dd_data\n");
418 		rc = -ENOMEM;
419 		goto no_dd_data;
420 	}
421 
422 	cmdiocbq = lpfc_sli_get_iocbq(phba);
423 	if (!cmdiocbq) {
424 		rc = -ENOMEM;
425 		goto free_dd;
426 	}
427 
428 	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
429 	if (!bmp) {
430 		rc = -ENOMEM;
431 		goto free_cmdiocbq;
432 	}
433 	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
434 	if (!bmp->virt) {
435 		rc = -ENOMEM;
436 		goto free_bmp;
437 	}
438 
439 	INIT_LIST_HEAD(&bmp->list);
440 
441 	bpl = (struct ulp_bde64 *) bmp->virt;
442 	request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64);
443 	cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
444 				     1, bpl, &request_nseg);
445 	if (!cmp) {
446 		rc = -ENOMEM;
447 		goto free_bmp;
448 	}
449 	lpfc_bsg_copy_data(cmp, &job->request_payload,
450 			   job->request_payload.payload_len, 1);
451 
452 	bpl += request_nseg;
453 	reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg;
454 	rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0,
455 				     bpl, &reply_nseg);
456 	if (!rmp) {
457 		rc = -ENOMEM;
458 		goto free_cmp;
459 	}
460 
461 	num_entry = request_nseg + reply_nseg;
462 
463 	if (phba->sli_rev == LPFC_SLI_REV4)
464 		ulp_context = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
465 	else
466 		ulp_context = ndlp->nlp_rpi;
467 
468 	lpfc_sli_prep_gen_req(phba, cmdiocbq, bmp, ulp_context, num_entry,
469 			      phba->fc_ratov * 2);
470 
471 	cmdiocbq->num_bdes = num_entry;
472 	cmdiocbq->vport = phba->pport;
473 	cmdiocbq->context2 = cmp;
474 	cmdiocbq->context3 = bmp;
475 	cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
476 
477 	cmdiocbq->cmd_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
478 	cmdiocbq->context1 = dd_data;
479 	cmdiocbq->context2 = cmp;
480 	cmdiocbq->context3 = bmp;
481 
482 	dd_data->type = TYPE_IOCB;
483 	dd_data->set_job = job;
484 	dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
485 	dd_data->context_un.iocb.rmp = rmp;
486 	job->dd_data = dd_data;
487 
488 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
489 		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
490 			rc = -EIO ;
491 			goto free_rmp;
492 		}
493 		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
494 		writel(creg_val, phba->HCregaddr);
495 		readl(phba->HCregaddr); /* flush */
496 	}
497 
498 	cmdiocbq->context_un.ndlp = lpfc_nlp_get(ndlp);
499 	if (!cmdiocbq->context_un.ndlp) {
500 		rc = -ENODEV;
501 		goto free_rmp;
502 	}
503 
504 	iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
505 	if (iocb_stat == IOCB_SUCCESS) {
506 		spin_lock_irqsave(&phba->hbalock, flags);
507 		/* make sure the I/O had not been completed yet */
508 		if (cmdiocbq->cmd_flag & LPFC_IO_LIBDFC) {
509 			/* open up abort window to timeout handler */
510 			cmdiocbq->cmd_flag |= LPFC_IO_CMD_OUTSTANDING;
511 		}
512 		spin_unlock_irqrestore(&phba->hbalock, flags);
513 		return 0; /* done for now */
514 	} else if (iocb_stat == IOCB_BUSY) {
515 		rc = -EAGAIN;
516 	} else {
517 		rc = -EIO;
518 	}
519 
520 	/* iocb failed so cleanup */
521 	lpfc_nlp_put(ndlp);
522 
523 free_rmp:
524 	lpfc_free_bsg_buffers(phba, rmp);
525 free_cmp:
526 	lpfc_free_bsg_buffers(phba, cmp);
527 free_bmp:
528 	if (bmp->virt)
529 		lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
530 	kfree(bmp);
531 free_cmdiocbq:
532 	lpfc_sli_release_iocbq(phba, cmdiocbq);
533 free_dd:
534 	kfree(dd_data);
535 no_dd_data:
536 	/* make error code available to userspace */
537 	bsg_reply->result = rc;
538 	job->dd_data = NULL;
539 	return rc;
540 }
541 
542 /**
543  * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler
544  * @phba: Pointer to HBA context object.
545  * @cmdiocbq: Pointer to command iocb.
546  * @rspiocbq: Pointer to response iocb.
547  *
548  * This function is the completion handler for iocbs issued using
549  * lpfc_bsg_rport_els_cmp function. This function is called by the
550  * ring event handler function without any lock held. This function
551  * can be called from both worker thread context and interrupt
552  * context. This function also can be called from other thread which
553  * cleans up the SLI layer objects.
554  * This function copies the contents of the response iocb to the
555  * response iocb memory object provided by the caller of
556  * lpfc_sli_issue_iocb_wait and then wakes up the thread which
557  * sleeps for the iocb completion.
558  **/
559 static void
560 lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
561 			struct lpfc_iocbq *cmdiocbq,
562 			struct lpfc_iocbq *rspiocbq)
563 {
564 	struct bsg_job_data *dd_data;
565 	struct bsg_job *job;
566 	struct fc_bsg_reply *bsg_reply;
567 	struct lpfc_nodelist *ndlp;
568 	struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL;
569 	struct fc_bsg_ctels_reply *els_reply;
570 	uint8_t *rjt_data;
571 	unsigned long flags;
572 	unsigned int rsp_size;
573 	int rc = 0;
574 	u32 ulp_status, ulp_word4, total_data_placed;
575 
576 	dd_data = cmdiocbq->context1;
577 	ndlp = dd_data->context_un.iocb.ndlp;
578 	cmdiocbq->context1 = ndlp;
579 
580 	/* Determine if job has been aborted */
581 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
582 	job = dd_data->set_job;
583 	if (job) {
584 		bsg_reply = job->reply;
585 		/* Prevent timeout handling from trying to abort job  */
586 		job->dd_data = NULL;
587 	}
588 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
589 
590 	/* Close the timeout handler abort window */
591 	spin_lock_irqsave(&phba->hbalock, flags);
592 	cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING;
593 	spin_unlock_irqrestore(&phba->hbalock, flags);
594 
595 	ulp_status = get_job_ulpstatus(phba, rspiocbq);
596 	ulp_word4 = get_job_word4(phba, rspiocbq);
597 	total_data_placed = get_job_data_placed(phba, rspiocbq);
598 	pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2;
599 	prsp = (struct lpfc_dmabuf *)pcmd->list.next;
600 
601 	/* Copy the completed job data or determine the job status if job is
602 	 * still active
603 	 */
604 
605 	if (job) {
606 		if (ulp_status == IOSTAT_SUCCESS) {
607 			rsp_size = total_data_placed;
608 			bsg_reply->reply_payload_rcv_len =
609 				sg_copy_from_buffer(job->reply_payload.sg_list,
610 						    job->reply_payload.sg_cnt,
611 						    prsp->virt,
612 						    rsp_size);
613 		} else if (ulp_status == IOSTAT_LS_RJT) {
614 			bsg_reply->reply_payload_rcv_len =
615 				sizeof(struct fc_bsg_ctels_reply);
616 			/* LS_RJT data returned in word 4 */
617 			rjt_data = (uint8_t *)&ulp_word4;
618 			els_reply = &bsg_reply->reply_data.ctels_reply;
619 			els_reply->status = FC_CTELS_STATUS_REJECT;
620 			els_reply->rjt_data.action = rjt_data[3];
621 			els_reply->rjt_data.reason_code = rjt_data[2];
622 			els_reply->rjt_data.reason_explanation = rjt_data[1];
623 			els_reply->rjt_data.vendor_unique = rjt_data[0];
624 		} else if (ulp_status == IOSTAT_LOCAL_REJECT &&
625 			   (ulp_word4 & IOERR_PARAM_MASK) ==
626 			   IOERR_SEQUENCE_TIMEOUT) {
627 			rc = -ETIMEDOUT;
628 		} else {
629 			rc = -EIO;
630 		}
631 	}
632 
633 	lpfc_els_free_iocb(phba, cmdiocbq);
634 
635 	lpfc_nlp_put(ndlp);
636 	kfree(dd_data);
637 
638 	/* Complete the job if the job is still active */
639 
640 	if (job) {
641 		bsg_reply->result = rc;
642 		bsg_job_done(job, bsg_reply->result,
643 			       bsg_reply->reply_payload_rcv_len);
644 	}
645 	return;
646 }
647 
648 /**
649  * lpfc_bsg_rport_els - send an ELS command from a bsg request
650  * @job: fc_bsg_job to handle
651  **/
652 static int
653 lpfc_bsg_rport_els(struct bsg_job *job)
654 {
655 	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
656 	struct lpfc_hba *phba = vport->phba;
657 	struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data;
658 	struct lpfc_nodelist *ndlp = rdata->pnode;
659 	struct fc_bsg_request *bsg_request = job->request;
660 	struct fc_bsg_reply *bsg_reply = job->reply;
661 	uint32_t elscmd;
662 	uint32_t cmdsize;
663 	struct lpfc_iocbq *cmdiocbq;
664 	uint16_t rpi = 0;
665 	struct bsg_job_data *dd_data;
666 	unsigned long flags;
667 	uint32_t creg_val;
668 	int rc = 0;
669 
670 	/* in case no data is transferred */
671 	bsg_reply->reply_payload_rcv_len = 0;
672 
673 	/* verify the els command is not greater than the
674 	 * maximum ELS transfer size.
675 	 */
676 
677 	if (job->request_payload.payload_len > FCELSSIZE) {
678 		rc = -EINVAL;
679 		goto no_dd_data;
680 	}
681 
682 	/* allocate our bsg tracking structure */
683 	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
684 	if (!dd_data) {
685 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
686 				"2735 Failed allocation of dd_data\n");
687 		rc = -ENOMEM;
688 		goto no_dd_data;
689 	}
690 
691 	elscmd = bsg_request->rqst_data.r_els.els_code;
692 	cmdsize = job->request_payload.payload_len;
693 
694 	if (!lpfc_nlp_get(ndlp)) {
695 		rc = -ENODEV;
696 		goto free_dd_data;
697 	}
698 
699 	/* We will use the allocated dma buffers by prep els iocb for command
700 	 * and response to ensure if the job times out and the request is freed,
701 	 * we won't be dma into memory that is no longer allocated to for the
702 	 * request.
703 	 */
704 	cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
705 				      ndlp->nlp_DID, elscmd);
706 	if (!cmdiocbq) {
707 		rc = -EIO;
708 		goto release_ndlp;
709 	}
710 
711 	/* Transfer the request payload to allocated command dma buffer */
712 	sg_copy_to_buffer(job->request_payload.sg_list,
713 			  job->request_payload.sg_cnt,
714 			  ((struct lpfc_dmabuf *)cmdiocbq->context2)->virt,
715 			  job->request_payload.payload_len);
716 
717 	rpi = ndlp->nlp_rpi;
718 
719 	if (phba->sli_rev == LPFC_SLI_REV4)
720 		bf_set(wqe_ctxt_tag, &cmdiocbq->wqe.generic.wqe_com,
721 		       phba->sli4_hba.rpi_ids[rpi]);
722 	else
723 		cmdiocbq->iocb.ulpContext = rpi;
724 	cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
725 	cmdiocbq->context1 = dd_data;
726 	cmdiocbq->context_un.ndlp = ndlp;
727 	cmdiocbq->cmd_cmpl = lpfc_bsg_rport_els_cmp;
728 	dd_data->type = TYPE_IOCB;
729 	dd_data->set_job = job;
730 	dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
731 	dd_data->context_un.iocb.ndlp = ndlp;
732 	dd_data->context_un.iocb.rmp = NULL;
733 	job->dd_data = dd_data;
734 
735 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
736 		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
737 			rc = -EIO;
738 			goto linkdown_err;
739 		}
740 		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
741 		writel(creg_val, phba->HCregaddr);
742 		readl(phba->HCregaddr); /* flush */
743 	}
744 
745 	cmdiocbq->context1 = lpfc_nlp_get(ndlp);
746 	if (!cmdiocbq->context1) {
747 		rc = -EIO;
748 		goto linkdown_err;
749 	}
750 
751 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
752 	if (rc == IOCB_SUCCESS) {
753 		spin_lock_irqsave(&phba->hbalock, flags);
754 		/* make sure the I/O had not been completed/released */
755 		if (cmdiocbq->cmd_flag & LPFC_IO_LIBDFC) {
756 			/* open up abort window to timeout handler */
757 			cmdiocbq->cmd_flag |= LPFC_IO_CMD_OUTSTANDING;
758 		}
759 		spin_unlock_irqrestore(&phba->hbalock, flags);
760 		return 0; /* done for now */
761 	} else if (rc == IOCB_BUSY) {
762 		rc = -EAGAIN;
763 	} else {
764 		rc = -EIO;
765 	}
766 
767 	/* I/O issue failed.  Cleanup resources. */
768 
769 linkdown_err:
770 	lpfc_els_free_iocb(phba, cmdiocbq);
771 
772 release_ndlp:
773 	lpfc_nlp_put(ndlp);
774 
775 free_dd_data:
776 	kfree(dd_data);
777 
778 no_dd_data:
779 	/* make error code available to userspace */
780 	bsg_reply->result = rc;
781 	job->dd_data = NULL;
782 	return rc;
783 }
784 
785 /**
786  * lpfc_bsg_event_free - frees an allocated event structure
787  * @kref: Pointer to a kref.
788  *
789  * Called from kref_put. Back cast the kref into an event structure address.
790  * Free any events to get, delete associated nodes, free any events to see,
791  * free any data then free the event itself.
792  **/
793 static void
794 lpfc_bsg_event_free(struct kref *kref)
795 {
796 	struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event,
797 						  kref);
798 	struct event_data *ed;
799 
800 	list_del(&evt->node);
801 
802 	while (!list_empty(&evt->events_to_get)) {
803 		ed = list_entry(evt->events_to_get.next, typeof(*ed), node);
804 		list_del(&ed->node);
805 		kfree(ed->data);
806 		kfree(ed);
807 	}
808 
809 	while (!list_empty(&evt->events_to_see)) {
810 		ed = list_entry(evt->events_to_see.next, typeof(*ed), node);
811 		list_del(&ed->node);
812 		kfree(ed->data);
813 		kfree(ed);
814 	}
815 
816 	kfree(evt->dd_data);
817 	kfree(evt);
818 }
819 
820 /**
821  * lpfc_bsg_event_ref - increments the kref for an event
822  * @evt: Pointer to an event structure.
823  **/
824 static inline void
825 lpfc_bsg_event_ref(struct lpfc_bsg_event *evt)
826 {
827 	kref_get(&evt->kref);
828 }
829 
830 /**
831  * lpfc_bsg_event_unref - Uses kref_put to free an event structure
832  * @evt: Pointer to an event structure.
833  **/
834 static inline void
835 lpfc_bsg_event_unref(struct lpfc_bsg_event *evt)
836 {
837 	kref_put(&evt->kref, lpfc_bsg_event_free);
838 }
839 
840 /**
841  * lpfc_bsg_event_new - allocate and initialize a event structure
842  * @ev_mask: Mask of events.
843  * @ev_reg_id: Event reg id.
844  * @ev_req_id: Event request id.
845  **/
846 static struct lpfc_bsg_event *
847 lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id)
848 {
849 	struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
850 
851 	if (!evt)
852 		return NULL;
853 
854 	INIT_LIST_HEAD(&evt->events_to_get);
855 	INIT_LIST_HEAD(&evt->events_to_see);
856 	evt->type_mask = ev_mask;
857 	evt->req_id = ev_req_id;
858 	evt->reg_id = ev_reg_id;
859 	evt->wait_time_stamp = jiffies;
860 	evt->dd_data = NULL;
861 	init_waitqueue_head(&evt->wq);
862 	kref_init(&evt->kref);
863 	return evt;
864 }
865 
866 /**
867  * diag_cmd_data_free - Frees an lpfc dma buffer extension
868  * @phba: Pointer to HBA context object.
869  * @mlist: Pointer to an lpfc dma buffer extension.
870  **/
871 static int
872 diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist)
873 {
874 	struct lpfc_dmabufext *mlast;
875 	struct pci_dev *pcidev;
876 	struct list_head head, *curr, *next;
877 
878 	if ((!mlist) || (!lpfc_is_link_up(phba) &&
879 		(phba->link_flag & LS_LOOPBACK_MODE))) {
880 		return 0;
881 	}
882 
883 	pcidev = phba->pcidev;
884 	list_add_tail(&head, &mlist->dma.list);
885 
886 	list_for_each_safe(curr, next, &head) {
887 		mlast = list_entry(curr, struct lpfc_dmabufext , dma.list);
888 		if (mlast->dma.virt)
889 			dma_free_coherent(&pcidev->dev,
890 					  mlast->size,
891 					  mlast->dma.virt,
892 					  mlast->dma.phys);
893 		kfree(mlast);
894 	}
895 	return 0;
896 }
897 
898 /*
899  * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
900  *
901  * This function is called when an unsolicited CT command is received.  It
902  * forwards the event to any processes registered to receive CT events.
903  **/
904 int
905 lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
906 			struct lpfc_iocbq *piocbq)
907 {
908 	uint32_t evt_req_id = 0;
909 	uint32_t cmd;
910 	struct lpfc_dmabuf *dmabuf = NULL;
911 	struct lpfc_bsg_event *evt;
912 	struct event_data *evt_dat = NULL;
913 	struct lpfc_iocbq *iocbq;
914 	IOCB_t *iocb = NULL;
915 	size_t offset = 0;
916 	struct list_head head;
917 	struct ulp_bde64 *bde;
918 	dma_addr_t dma_addr;
919 	int i;
920 	struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
921 	struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
922 	struct lpfc_sli_ct_request *ct_req;
923 	struct bsg_job *job = NULL;
924 	struct fc_bsg_reply *bsg_reply;
925 	struct bsg_job_data *dd_data = NULL;
926 	unsigned long flags;
927 	int size = 0;
928 	u32 bde_count = 0;
929 
930 	INIT_LIST_HEAD(&head);
931 	list_add_tail(&head, &piocbq->list);
932 
933 	ct_req = (struct lpfc_sli_ct_request *)bdeBuf1->virt;
934 	evt_req_id = ct_req->FsType;
935 	cmd = ct_req->CommandResponse.bits.CmdRsp;
936 
937 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
938 	list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
939 		if (!(evt->type_mask & FC_REG_CT_EVENT) ||
940 			evt->req_id != evt_req_id)
941 			continue;
942 
943 		lpfc_bsg_event_ref(evt);
944 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
945 		evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
946 		if (evt_dat == NULL) {
947 			spin_lock_irqsave(&phba->ct_ev_lock, flags);
948 			lpfc_bsg_event_unref(evt);
949 			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
950 					"2614 Memory allocation failed for "
951 					"CT event\n");
952 			break;
953 		}
954 
955 		if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
956 			/* take accumulated byte count from the last iocbq */
957 			iocbq = list_entry(head.prev, typeof(*iocbq), list);
958 			if (phba->sli_rev == LPFC_SLI_REV4)
959 				evt_dat->len = iocbq->wcqe_cmpl.total_data_placed;
960 			else
961 				evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
962 		} else {
963 			list_for_each_entry(iocbq, &head, list) {
964 				iocb = &iocbq->iocb;
965 				for (i = 0; i < iocb->ulpBdeCount;
966 				     i++)
967 					evt_dat->len +=
968 					iocb->un.cont64[i].tus.f.bdeSize;
969 			}
970 		}
971 
972 		evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
973 		if (evt_dat->data == NULL) {
974 			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
975 					"2615 Memory allocation failed for "
976 					"CT event data, size %d\n",
977 					evt_dat->len);
978 			kfree(evt_dat);
979 			spin_lock_irqsave(&phba->ct_ev_lock, flags);
980 			lpfc_bsg_event_unref(evt);
981 			spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
982 			goto error_ct_unsol_exit;
983 		}
984 
985 		list_for_each_entry(iocbq, &head, list) {
986 			size = 0;
987 			if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
988 				bdeBuf1 = iocbq->context2;
989 				bdeBuf2 = iocbq->context3;
990 
991 			}
992 			if (phba->sli_rev == LPFC_SLI_REV4)
993 				bde_count = iocbq->wcqe_cmpl.word3;
994 			else
995 				bde_count = iocbq->iocb.ulpBdeCount;
996 			for (i = 0; i < bde_count; i++) {
997 				if (phba->sli3_options &
998 				    LPFC_SLI3_HBQ_ENABLED) {
999 					if (i == 0) {
1000 						size = iocbq->wqe.gen_req.bde.tus.f.bdeSize;
1001 						dmabuf = bdeBuf1;
1002 					} else if (i == 1) {
1003 						size = iocbq->unsol_rcv_len;
1004 						dmabuf = bdeBuf2;
1005 					}
1006 					if ((offset + size) > evt_dat->len)
1007 						size = evt_dat->len - offset;
1008 				} else {
1009 					size = iocbq->iocb.un.cont64[i].
1010 						tus.f.bdeSize;
1011 					bde = &iocbq->iocb.un.cont64[i];
1012 					dma_addr = getPaddr(bde->addrHigh,
1013 							    bde->addrLow);
1014 					dmabuf = lpfc_sli_ringpostbuf_get(phba,
1015 							pring, dma_addr);
1016 				}
1017 				if (!dmabuf) {
1018 					lpfc_printf_log(phba, KERN_ERR,
1019 						LOG_LIBDFC, "2616 No dmabuf "
1020 						"found for iocbq x%px\n",
1021 						iocbq);
1022 					kfree(evt_dat->data);
1023 					kfree(evt_dat);
1024 					spin_lock_irqsave(&phba->ct_ev_lock,
1025 						flags);
1026 					lpfc_bsg_event_unref(evt);
1027 					spin_unlock_irqrestore(
1028 						&phba->ct_ev_lock, flags);
1029 					goto error_ct_unsol_exit;
1030 				}
1031 				memcpy((char *)(evt_dat->data) + offset,
1032 				       dmabuf->virt, size);
1033 				offset += size;
1034 				if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
1035 				    !(phba->sli3_options &
1036 				      LPFC_SLI3_HBQ_ENABLED)) {
1037 					lpfc_sli_ringpostbuf_put(phba, pring,
1038 								 dmabuf);
1039 				} else {
1040 					switch (cmd) {
1041 					case ELX_LOOPBACK_DATA:
1042 						if (phba->sli_rev <
1043 						    LPFC_SLI_REV4)
1044 							diag_cmd_data_free(phba,
1045 							(struct lpfc_dmabufext
1046 							 *)dmabuf);
1047 						break;
1048 					case ELX_LOOPBACK_XRI_SETUP:
1049 						if ((phba->sli_rev ==
1050 							LPFC_SLI_REV2) ||
1051 							(phba->sli3_options &
1052 							LPFC_SLI3_HBQ_ENABLED
1053 							)) {
1054 							lpfc_in_buf_free(phba,
1055 									dmabuf);
1056 						} else {
1057 							lpfc_sli3_post_buffer(phba,
1058 									      pring,
1059 									      1);
1060 						}
1061 						break;
1062 					default:
1063 						if (!(phba->sli3_options &
1064 						      LPFC_SLI3_HBQ_ENABLED))
1065 							lpfc_sli3_post_buffer(phba,
1066 									      pring,
1067 									      1);
1068 						break;
1069 					}
1070 				}
1071 			}
1072 		}
1073 
1074 		spin_lock_irqsave(&phba->ct_ev_lock, flags);
1075 		if (phba->sli_rev == LPFC_SLI_REV4) {
1076 			evt_dat->immed_dat = phba->ctx_idx;
1077 			phba->ctx_idx = (phba->ctx_idx + 1) % LPFC_CT_CTX_MAX;
1078 			/* Provide warning for over-run of the ct_ctx array */
1079 			if (phba->ct_ctx[evt_dat->immed_dat].valid ==
1080 			    UNSOL_VALID)
1081 				lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
1082 						"2717 CT context array entry "
1083 						"[%d] over-run: oxid:x%x, "
1084 						"sid:x%x\n", phba->ctx_idx,
1085 						phba->ct_ctx[
1086 						    evt_dat->immed_dat].oxid,
1087 						phba->ct_ctx[
1088 						    evt_dat->immed_dat].SID);
1089 			phba->ct_ctx[evt_dat->immed_dat].rxid =
1090 				get_job_ulpcontext(phba, piocbq);
1091 			phba->ct_ctx[evt_dat->immed_dat].oxid =
1092 				get_job_rcvoxid(phba, piocbq);
1093 			phba->ct_ctx[evt_dat->immed_dat].SID =
1094 				bf_get(wqe_els_did,
1095 				       &piocbq->wqe.xmit_els_rsp.wqe_dest);
1096 			phba->ct_ctx[evt_dat->immed_dat].valid = UNSOL_VALID;
1097 		} else
1098 			evt_dat->immed_dat = get_job_ulpcontext(phba, piocbq);
1099 
1100 		evt_dat->type = FC_REG_CT_EVENT;
1101 		list_add(&evt_dat->node, &evt->events_to_see);
1102 		if (evt_req_id == SLI_CT_ELX_LOOPBACK) {
1103 			wake_up_interruptible(&evt->wq);
1104 			lpfc_bsg_event_unref(evt);
1105 			break;
1106 		}
1107 
1108 		list_move(evt->events_to_see.prev, &evt->events_to_get);
1109 
1110 		dd_data = (struct bsg_job_data *)evt->dd_data;
1111 		job = dd_data->set_job;
1112 		dd_data->set_job = NULL;
1113 		lpfc_bsg_event_unref(evt);
1114 		if (job) {
1115 			bsg_reply = job->reply;
1116 			bsg_reply->reply_payload_rcv_len = size;
1117 			/* make error code available to userspace */
1118 			bsg_reply->result = 0;
1119 			job->dd_data = NULL;
1120 			/* complete the job back to userspace */
1121 			spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1122 			bsg_job_done(job, bsg_reply->result,
1123 				       bsg_reply->reply_payload_rcv_len);
1124 			spin_lock_irqsave(&phba->ct_ev_lock, flags);
1125 		}
1126 	}
1127 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1128 
1129 error_ct_unsol_exit:
1130 	if (!list_empty(&head))
1131 		list_del(&head);
1132 	if ((phba->sli_rev < LPFC_SLI_REV4) &&
1133 	    (evt_req_id == SLI_CT_ELX_LOOPBACK))
1134 		return 0;
1135 	return 1;
1136 }
1137 
1138 /**
1139  * lpfc_bsg_ct_unsol_abort - handler ct abort to management plane
1140  * @phba: Pointer to HBA context object.
1141  * @dmabuf: pointer to a dmabuf that describes the FC sequence
1142  *
1143  * This function handles abort to the CT command toward management plane
1144  * for SLI4 port.
1145  *
1146  * If the pending context of a CT command to management plane present, clears
1147  * such context and returns 1 for handled; otherwise, it returns 0 indicating
1148  * no context exists.
1149  **/
1150 int
1151 lpfc_bsg_ct_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf)
1152 {
1153 	struct fc_frame_header fc_hdr;
1154 	struct fc_frame_header *fc_hdr_ptr = &fc_hdr;
1155 	int ctx_idx, handled = 0;
1156 	uint16_t oxid, rxid;
1157 	uint32_t sid;
1158 
1159 	memcpy(fc_hdr_ptr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
1160 	sid = sli4_sid_from_fc_hdr(fc_hdr_ptr);
1161 	oxid = be16_to_cpu(fc_hdr_ptr->fh_ox_id);
1162 	rxid = be16_to_cpu(fc_hdr_ptr->fh_rx_id);
1163 
1164 	for (ctx_idx = 0; ctx_idx < LPFC_CT_CTX_MAX; ctx_idx++) {
1165 		if (phba->ct_ctx[ctx_idx].valid != UNSOL_VALID)
1166 			continue;
1167 		if (phba->ct_ctx[ctx_idx].rxid != rxid)
1168 			continue;
1169 		if (phba->ct_ctx[ctx_idx].oxid != oxid)
1170 			continue;
1171 		if (phba->ct_ctx[ctx_idx].SID != sid)
1172 			continue;
1173 		phba->ct_ctx[ctx_idx].valid = UNSOL_INVALID;
1174 		handled = 1;
1175 	}
1176 	return handled;
1177 }
1178 
1179 /**
1180  * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
1181  * @job: SET_EVENT fc_bsg_job
1182  **/
1183 static int
1184 lpfc_bsg_hba_set_event(struct bsg_job *job)
1185 {
1186 	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
1187 	struct lpfc_hba *phba = vport->phba;
1188 	struct fc_bsg_request *bsg_request = job->request;
1189 	struct set_ct_event *event_req;
1190 	struct lpfc_bsg_event *evt;
1191 	int rc = 0;
1192 	struct bsg_job_data *dd_data = NULL;
1193 	uint32_t ev_mask;
1194 	unsigned long flags;
1195 
1196 	if (job->request_len <
1197 	    sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
1198 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1199 				"2612 Received SET_CT_EVENT below minimum "
1200 				"size\n");
1201 		rc = -EINVAL;
1202 		goto job_error;
1203 	}
1204 
1205 	event_req = (struct set_ct_event *)
1206 		bsg_request->rqst_data.h_vendor.vendor_cmd;
1207 	ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
1208 				FC_REG_EVENT_MASK);
1209 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
1210 	list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
1211 		if (evt->reg_id == event_req->ev_reg_id) {
1212 			lpfc_bsg_event_ref(evt);
1213 			evt->wait_time_stamp = jiffies;
1214 			dd_data = (struct bsg_job_data *)evt->dd_data;
1215 			break;
1216 		}
1217 	}
1218 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1219 
1220 	if (&evt->node == &phba->ct_ev_waiters) {
1221 		/* no event waiting struct yet - first call */
1222 		dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1223 		if (dd_data == NULL) {
1224 			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1225 					"2734 Failed allocation of dd_data\n");
1226 			rc = -ENOMEM;
1227 			goto job_error;
1228 		}
1229 		evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id,
1230 					event_req->ev_req_id);
1231 		if (!evt) {
1232 			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1233 					"2617 Failed allocation of event "
1234 					"waiter\n");
1235 			rc = -ENOMEM;
1236 			goto job_error;
1237 		}
1238 		dd_data->type = TYPE_EVT;
1239 		dd_data->set_job = NULL;
1240 		dd_data->context_un.evt = evt;
1241 		evt->dd_data = (void *)dd_data;
1242 		spin_lock_irqsave(&phba->ct_ev_lock, flags);
1243 		list_add(&evt->node, &phba->ct_ev_waiters);
1244 		lpfc_bsg_event_ref(evt);
1245 		evt->wait_time_stamp = jiffies;
1246 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1247 	}
1248 
1249 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
1250 	evt->waiting = 1;
1251 	dd_data->set_job = job; /* for unsolicited command */
1252 	job->dd_data = dd_data; /* for fc transport timeout callback*/
1253 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1254 	return 0; /* call job done later */
1255 
1256 job_error:
1257 	kfree(dd_data);
1258 	job->dd_data = NULL;
1259 	return rc;
1260 }
1261 
1262 /**
1263  * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command
1264  * @job: GET_EVENT fc_bsg_job
1265  **/
1266 static int
1267 lpfc_bsg_hba_get_event(struct bsg_job *job)
1268 {
1269 	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
1270 	struct lpfc_hba *phba = vport->phba;
1271 	struct fc_bsg_request *bsg_request = job->request;
1272 	struct fc_bsg_reply *bsg_reply = job->reply;
1273 	struct get_ct_event *event_req;
1274 	struct get_ct_event_reply *event_reply;
1275 	struct lpfc_bsg_event *evt, *evt_next;
1276 	struct event_data *evt_dat = NULL;
1277 	unsigned long flags;
1278 	uint32_t rc = 0;
1279 
1280 	if (job->request_len <
1281 	    sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
1282 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1283 				"2613 Received GET_CT_EVENT request below "
1284 				"minimum size\n");
1285 		rc = -EINVAL;
1286 		goto job_error;
1287 	}
1288 
1289 	event_req = (struct get_ct_event *)
1290 		bsg_request->rqst_data.h_vendor.vendor_cmd;
1291 
1292 	event_reply = (struct get_ct_event_reply *)
1293 		bsg_reply->reply_data.vendor_reply.vendor_rsp;
1294 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
1295 	list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) {
1296 		if (evt->reg_id == event_req->ev_reg_id) {
1297 			if (list_empty(&evt->events_to_get))
1298 				break;
1299 			lpfc_bsg_event_ref(evt);
1300 			evt->wait_time_stamp = jiffies;
1301 			evt_dat = list_entry(evt->events_to_get.prev,
1302 					     struct event_data, node);
1303 			list_del(&evt_dat->node);
1304 			break;
1305 		}
1306 	}
1307 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1308 
1309 	/* The app may continue to ask for event data until it gets
1310 	 * an error indicating that there isn't anymore
1311 	 */
1312 	if (evt_dat == NULL) {
1313 		bsg_reply->reply_payload_rcv_len = 0;
1314 		rc = -ENOENT;
1315 		goto job_error;
1316 	}
1317 
1318 	if (evt_dat->len > job->request_payload.payload_len) {
1319 		evt_dat->len = job->request_payload.payload_len;
1320 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1321 				"2618 Truncated event data at %d "
1322 				"bytes\n",
1323 				job->request_payload.payload_len);
1324 	}
1325 
1326 	event_reply->type = evt_dat->type;
1327 	event_reply->immed_data = evt_dat->immed_dat;
1328 	if (evt_dat->len > 0)
1329 		bsg_reply->reply_payload_rcv_len =
1330 			sg_copy_from_buffer(job->request_payload.sg_list,
1331 					    job->request_payload.sg_cnt,
1332 					    evt_dat->data, evt_dat->len);
1333 	else
1334 		bsg_reply->reply_payload_rcv_len = 0;
1335 
1336 	if (evt_dat) {
1337 		kfree(evt_dat->data);
1338 		kfree(evt_dat);
1339 	}
1340 
1341 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
1342 	lpfc_bsg_event_unref(evt);
1343 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1344 	job->dd_data = NULL;
1345 	bsg_reply->result = 0;
1346 	bsg_job_done(job, bsg_reply->result,
1347 		       bsg_reply->reply_payload_rcv_len);
1348 	return 0;
1349 
1350 job_error:
1351 	job->dd_data = NULL;
1352 	bsg_reply->result = rc;
1353 	return rc;
1354 }
1355 
1356 /**
1357  * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler
1358  * @phba: Pointer to HBA context object.
1359  * @cmdiocbq: Pointer to command iocb.
1360  * @rspiocbq: Pointer to response iocb.
1361  *
1362  * This function is the completion handler for iocbs issued using
1363  * lpfc_issue_ct_rsp_cmp function. This function is called by the
1364  * ring event handler function without any lock held. This function
1365  * can be called from both worker thread context and interrupt
1366  * context. This function also can be called from other thread which
1367  * cleans up the SLI layer objects.
1368  * This function copy the contents of the response iocb to the
1369  * response iocb memory object provided by the caller of
1370  * lpfc_sli_issue_iocb_wait and then wakes up the thread which
1371  * sleeps for the iocb completion.
1372  **/
1373 static void
1374 lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
1375 			struct lpfc_iocbq *cmdiocbq,
1376 			struct lpfc_iocbq *rspiocbq)
1377 {
1378 	struct bsg_job_data *dd_data;
1379 	struct bsg_job *job;
1380 	struct fc_bsg_reply *bsg_reply;
1381 	struct lpfc_dmabuf *bmp, *cmp;
1382 	struct lpfc_nodelist *ndlp;
1383 	unsigned long flags;
1384 	int rc = 0;
1385 	u32 ulp_status, ulp_word4;
1386 
1387 	dd_data = cmdiocbq->context1;
1388 
1389 	/* Determine if job has been aborted */
1390 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
1391 	job = dd_data->set_job;
1392 	if (job) {
1393 		/* Prevent timeout handling from trying to abort job  */
1394 		job->dd_data = NULL;
1395 	}
1396 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1397 
1398 	/* Close the timeout handler abort window */
1399 	spin_lock_irqsave(&phba->hbalock, flags);
1400 	cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING;
1401 	spin_unlock_irqrestore(&phba->hbalock, flags);
1402 
1403 	ndlp = dd_data->context_un.iocb.ndlp;
1404 	cmp = cmdiocbq->context2;
1405 	bmp = cmdiocbq->context3;
1406 
1407 	ulp_status = get_job_ulpstatus(phba, rspiocbq);
1408 	ulp_word4 = get_job_word4(phba, rspiocbq);
1409 
1410 	/* Copy the completed job data or set the error status */
1411 
1412 	if (job) {
1413 		bsg_reply = job->reply;
1414 		if (ulp_status) {
1415 			if (ulp_status == IOSTAT_LOCAL_REJECT) {
1416 				switch (ulp_word4 & IOERR_PARAM_MASK) {
1417 				case IOERR_SEQUENCE_TIMEOUT:
1418 					rc = -ETIMEDOUT;
1419 					break;
1420 				case IOERR_INVALID_RPI:
1421 					rc = -EFAULT;
1422 					break;
1423 				default:
1424 					rc = -EACCES;
1425 					break;
1426 				}
1427 			} else {
1428 				rc = -EACCES;
1429 			}
1430 		} else {
1431 			bsg_reply->reply_payload_rcv_len = 0;
1432 		}
1433 	}
1434 
1435 	lpfc_free_bsg_buffers(phba, cmp);
1436 	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1437 	kfree(bmp);
1438 	lpfc_sli_release_iocbq(phba, cmdiocbq);
1439 	lpfc_nlp_put(ndlp);
1440 	kfree(dd_data);
1441 
1442 	/* Complete the job if the job is still active */
1443 
1444 	if (job) {
1445 		bsg_reply->result = rc;
1446 		bsg_job_done(job, bsg_reply->result,
1447 			       bsg_reply->reply_payload_rcv_len);
1448 	}
1449 	return;
1450 }
1451 
1452 /**
1453  * lpfc_issue_ct_rsp - issue a ct response
1454  * @phba: Pointer to HBA context object.
1455  * @job: Pointer to the job object.
1456  * @tag: tag index value into the ports context exchange array.
1457  * @cmp: Pointer to a cmp dma buffer descriptor.
1458  * @bmp: Pointer to a bmp dma buffer descriptor.
1459  * @num_entry: Number of enties in the bde.
1460  **/
1461 static int
1462 lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
1463 		  struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp,
1464 		  int num_entry)
1465 {
1466 	struct lpfc_iocbq *ctiocb = NULL;
1467 	int rc = 0;
1468 	struct lpfc_nodelist *ndlp = NULL;
1469 	struct bsg_job_data *dd_data;
1470 	unsigned long flags;
1471 	uint32_t creg_val;
1472 	u16 ulp_context, iotag;
1473 
1474 	ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
1475 	if (!ndlp) {
1476 		lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
1477 				"2721 ndlp null for oxid %x SID %x\n",
1478 				phba->ct_ctx[tag].rxid,
1479 				phba->ct_ctx[tag].SID);
1480 		return IOCB_ERROR;
1481 	}
1482 
1483 	/* allocate our bsg tracking structure */
1484 	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1485 	if (!dd_data) {
1486 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1487 				"2736 Failed allocation of dd_data\n");
1488 		rc = -ENOMEM;
1489 		goto no_dd_data;
1490 	}
1491 
1492 	/* Allocate buffer for  command iocb */
1493 	ctiocb = lpfc_sli_get_iocbq(phba);
1494 	if (!ctiocb) {
1495 		rc = -ENOMEM;
1496 		goto no_ctiocb;
1497 	}
1498 
1499 	if (phba->sli_rev == LPFC_SLI_REV4) {
1500 		/* Do not issue unsol response if oxid not marked as valid */
1501 		if (phba->ct_ctx[tag].valid != UNSOL_VALID) {
1502 			rc = IOCB_ERROR;
1503 			goto issue_ct_rsp_exit;
1504 		}
1505 
1506 		lpfc_sli_prep_xmit_seq64(phba, ctiocb, bmp,
1507 					 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
1508 					 phba->ct_ctx[tag].oxid, num_entry,
1509 					 FC_RCTL_DD_SOL_CTL, 1,
1510 					 CMD_XMIT_SEQUENCE64_WQE);
1511 
1512 		/* The exchange is done, mark the entry as invalid */
1513 		phba->ct_ctx[tag].valid = UNSOL_INVALID;
1514 		iotag = get_wqe_reqtag(ctiocb);
1515 	} else {
1516 		lpfc_sli_prep_xmit_seq64(phba, ctiocb, bmp, 0, tag, num_entry,
1517 					 FC_RCTL_DD_SOL_CTL, 1,
1518 					 CMD_XMIT_SEQUENCE64_CX);
1519 		ctiocb->num_bdes = num_entry;
1520 		iotag = ctiocb->iocb.ulpIoTag;
1521 	}
1522 
1523 	ulp_context = get_job_ulpcontext(phba, ctiocb);
1524 
1525 	/* Xmit CT response on exchange <xid> */
1526 	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1527 			"2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
1528 			ulp_context, iotag, tag, phba->link_state);
1529 
1530 	ctiocb->cmd_flag |= LPFC_IO_LIBDFC;
1531 	ctiocb->vport = phba->pport;
1532 	ctiocb->context1 = dd_data;
1533 	ctiocb->context2 = cmp;
1534 	ctiocb->context3 = bmp;
1535 	ctiocb->context_un.ndlp = ndlp;
1536 	ctiocb->cmd_cmpl = lpfc_issue_ct_rsp_cmp;
1537 
1538 	dd_data->type = TYPE_IOCB;
1539 	dd_data->set_job = job;
1540 	dd_data->context_un.iocb.cmdiocbq = ctiocb;
1541 	dd_data->context_un.iocb.ndlp = lpfc_nlp_get(ndlp);
1542 	if (!dd_data->context_un.iocb.ndlp) {
1543 		rc = -IOCB_ERROR;
1544 		goto issue_ct_rsp_exit;
1545 	}
1546 	dd_data->context_un.iocb.rmp = NULL;
1547 	job->dd_data = dd_data;
1548 
1549 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
1550 		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
1551 			rc = -IOCB_ERROR;
1552 			goto issue_ct_rsp_exit;
1553 		}
1554 		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
1555 		writel(creg_val, phba->HCregaddr);
1556 		readl(phba->HCregaddr); /* flush */
1557 	}
1558 
1559 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
1560 	if (rc == IOCB_SUCCESS) {
1561 		spin_lock_irqsave(&phba->hbalock, flags);
1562 		/* make sure the I/O had not been completed/released */
1563 		if (ctiocb->cmd_flag & LPFC_IO_LIBDFC) {
1564 			/* open up abort window to timeout handler */
1565 			ctiocb->cmd_flag |= LPFC_IO_CMD_OUTSTANDING;
1566 		}
1567 		spin_unlock_irqrestore(&phba->hbalock, flags);
1568 		return 0; /* done for now */
1569 	}
1570 
1571 	/* iocb failed so cleanup */
1572 	job->dd_data = NULL;
1573 	lpfc_nlp_put(ndlp);
1574 
1575 issue_ct_rsp_exit:
1576 	lpfc_sli_release_iocbq(phba, ctiocb);
1577 no_ctiocb:
1578 	kfree(dd_data);
1579 no_dd_data:
1580 	return rc;
1581 }
1582 
1583 /**
1584  * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command
1585  * @job: SEND_MGMT_RESP fc_bsg_job
1586  **/
1587 static int
1588 lpfc_bsg_send_mgmt_rsp(struct bsg_job *job)
1589 {
1590 	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
1591 	struct lpfc_hba *phba = vport->phba;
1592 	struct fc_bsg_request *bsg_request = job->request;
1593 	struct fc_bsg_reply *bsg_reply = job->reply;
1594 	struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
1595 		bsg_request->rqst_data.h_vendor.vendor_cmd;
1596 	struct ulp_bde64 *bpl;
1597 	struct lpfc_dmabuf *bmp = NULL, *cmp = NULL;
1598 	int bpl_entries;
1599 	uint32_t tag = mgmt_resp->tag;
1600 	unsigned long reqbfrcnt =
1601 			(unsigned long)job->request_payload.payload_len;
1602 	int rc = 0;
1603 
1604 	/* in case no data is transferred */
1605 	bsg_reply->reply_payload_rcv_len = 0;
1606 
1607 	if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
1608 		rc = -ERANGE;
1609 		goto send_mgmt_rsp_exit;
1610 	}
1611 
1612 	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1613 	if (!bmp) {
1614 		rc = -ENOMEM;
1615 		goto send_mgmt_rsp_exit;
1616 	}
1617 
1618 	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
1619 	if (!bmp->virt) {
1620 		rc = -ENOMEM;
1621 		goto send_mgmt_rsp_free_bmp;
1622 	}
1623 
1624 	INIT_LIST_HEAD(&bmp->list);
1625 	bpl = (struct ulp_bde64 *) bmp->virt;
1626 	bpl_entries = (LPFC_BPL_SIZE/sizeof(struct ulp_bde64));
1627 	cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
1628 				     1, bpl, &bpl_entries);
1629 	if (!cmp) {
1630 		rc = -ENOMEM;
1631 		goto send_mgmt_rsp_free_bmp;
1632 	}
1633 	lpfc_bsg_copy_data(cmp, &job->request_payload,
1634 			   job->request_payload.payload_len, 1);
1635 
1636 	rc = lpfc_issue_ct_rsp(phba, job, tag, cmp, bmp, bpl_entries);
1637 
1638 	if (rc == IOCB_SUCCESS)
1639 		return 0; /* done for now */
1640 
1641 	rc = -EACCES;
1642 
1643 	lpfc_free_bsg_buffers(phba, cmp);
1644 
1645 send_mgmt_rsp_free_bmp:
1646 	if (bmp->virt)
1647 		lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1648 	kfree(bmp);
1649 send_mgmt_rsp_exit:
1650 	/* make error code available to userspace */
1651 	bsg_reply->result = rc;
1652 	job->dd_data = NULL;
1653 	return rc;
1654 }
1655 
1656 /**
1657  * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode
1658  * @phba: Pointer to HBA context object.
1659  *
1660  * This function is responsible for preparing driver for diag loopback
1661  * on device.
1662  */
1663 static int
1664 lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
1665 {
1666 	struct lpfc_vport **vports;
1667 	struct Scsi_Host *shost;
1668 	struct lpfc_sli *psli;
1669 	struct lpfc_queue *qp = NULL;
1670 	struct lpfc_sli_ring *pring;
1671 	int i = 0;
1672 
1673 	psli = &phba->sli;
1674 	if (!psli)
1675 		return -ENODEV;
1676 
1677 
1678 	if ((phba->link_state == LPFC_HBA_ERROR) ||
1679 	    (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
1680 	    (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
1681 		return -EACCES;
1682 
1683 	vports = lpfc_create_vport_work_array(phba);
1684 	if (vports) {
1685 		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1686 			shost = lpfc_shost_from_vport(vports[i]);
1687 			scsi_block_requests(shost);
1688 		}
1689 		lpfc_destroy_vport_work_array(phba, vports);
1690 	} else {
1691 		shost = lpfc_shost_from_vport(phba->pport);
1692 		scsi_block_requests(shost);
1693 	}
1694 
1695 	if (phba->sli_rev != LPFC_SLI_REV4) {
1696 		pring = &psli->sli3_ring[LPFC_FCP_RING];
1697 		lpfc_emptyq_wait(phba, &pring->txcmplq, &phba->hbalock);
1698 		return 0;
1699 	}
1700 	list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
1701 		pring = qp->pring;
1702 		if (!pring || (pring->ringno != LPFC_FCP_RING))
1703 			continue;
1704 		if (!lpfc_emptyq_wait(phba, &pring->txcmplq,
1705 				      &pring->ring_lock))
1706 			break;
1707 	}
1708 	return 0;
1709 }
1710 
1711 /**
1712  * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode
1713  * @phba: Pointer to HBA context object.
1714  *
1715  * This function is responsible for driver exit processing of setting up
1716  * diag loopback mode on device.
1717  */
1718 static void
1719 lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba)
1720 {
1721 	struct Scsi_Host *shost;
1722 	struct lpfc_vport **vports;
1723 	int i;
1724 
1725 	vports = lpfc_create_vport_work_array(phba);
1726 	if (vports) {
1727 		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1728 			shost = lpfc_shost_from_vport(vports[i]);
1729 			scsi_unblock_requests(shost);
1730 		}
1731 		lpfc_destroy_vport_work_array(phba, vports);
1732 	} else {
1733 		shost = lpfc_shost_from_vport(phba->pport);
1734 		scsi_unblock_requests(shost);
1735 	}
1736 	return;
1737 }
1738 
1739 /**
1740  * lpfc_sli3_bsg_diag_loopback_mode - process an sli3 bsg vendor command
1741  * @phba: Pointer to HBA context object.
1742  * @job: LPFC_BSG_VENDOR_DIAG_MODE
1743  *
1744  * This function is responsible for placing an sli3  port into diagnostic
1745  * loopback mode in order to perform a diagnostic loopback test.
1746  * All new scsi requests are blocked, a small delay is used to allow the
1747  * scsi requests to complete then the link is brought down. If the link is
1748  * is placed in loopback mode then scsi requests are again allowed
1749  * so the scsi mid-layer doesn't give up on the port.
1750  * All of this is done in-line.
1751  */
1752 static int
1753 lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
1754 {
1755 	struct fc_bsg_request *bsg_request = job->request;
1756 	struct fc_bsg_reply *bsg_reply = job->reply;
1757 	struct diag_mode_set *loopback_mode;
1758 	uint32_t link_flags;
1759 	uint32_t timeout;
1760 	LPFC_MBOXQ_t *pmboxq  = NULL;
1761 	int mbxstatus = MBX_SUCCESS;
1762 	int i = 0;
1763 	int rc = 0;
1764 
1765 	/* no data to return just the return code */
1766 	bsg_reply->reply_payload_rcv_len = 0;
1767 
1768 	if (job->request_len < sizeof(struct fc_bsg_request) +
1769 	    sizeof(struct diag_mode_set)) {
1770 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1771 				"2738 Received DIAG MODE request size:%d "
1772 				"below the minimum size:%d\n",
1773 				job->request_len,
1774 				(int)(sizeof(struct fc_bsg_request) +
1775 				sizeof(struct diag_mode_set)));
1776 		rc = -EINVAL;
1777 		goto job_error;
1778 	}
1779 
1780 	rc = lpfc_bsg_diag_mode_enter(phba);
1781 	if (rc)
1782 		goto job_error;
1783 
1784 	/* bring the link to diagnostic mode */
1785 	loopback_mode = (struct diag_mode_set *)
1786 		bsg_request->rqst_data.h_vendor.vendor_cmd;
1787 	link_flags = loopback_mode->type;
1788 	timeout = loopback_mode->timeout * 100;
1789 
1790 	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1791 	if (!pmboxq) {
1792 		rc = -ENOMEM;
1793 		goto loopback_mode_exit;
1794 	}
1795 	memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1796 	pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
1797 	pmboxq->u.mb.mbxOwner = OWN_HOST;
1798 
1799 	mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1800 
1801 	if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) {
1802 		/* wait for link down before proceeding */
1803 		i = 0;
1804 		while (phba->link_state != LPFC_LINK_DOWN) {
1805 			if (i++ > timeout) {
1806 				rc = -ETIMEDOUT;
1807 				goto loopback_mode_exit;
1808 			}
1809 			msleep(10);
1810 		}
1811 
1812 		memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1813 		if (link_flags == INTERNAL_LOOP_BACK)
1814 			pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
1815 		else
1816 			pmboxq->u.mb.un.varInitLnk.link_flags =
1817 				FLAGS_TOPOLOGY_MODE_LOOP;
1818 
1819 		pmboxq->u.mb.mbxCommand = MBX_INIT_LINK;
1820 		pmboxq->u.mb.mbxOwner = OWN_HOST;
1821 
1822 		mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
1823 						     LPFC_MBOX_TMO);
1824 
1825 		if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
1826 			rc = -ENODEV;
1827 		else {
1828 			spin_lock_irq(&phba->hbalock);
1829 			phba->link_flag |= LS_LOOPBACK_MODE;
1830 			spin_unlock_irq(&phba->hbalock);
1831 			/* wait for the link attention interrupt */
1832 			msleep(100);
1833 
1834 			i = 0;
1835 			while (phba->link_state != LPFC_HBA_READY) {
1836 				if (i++ > timeout) {
1837 					rc = -ETIMEDOUT;
1838 					break;
1839 				}
1840 
1841 				msleep(10);
1842 			}
1843 		}
1844 
1845 	} else
1846 		rc = -ENODEV;
1847 
1848 loopback_mode_exit:
1849 	lpfc_bsg_diag_mode_exit(phba);
1850 
1851 	/*
1852 	 * Let SLI layer release mboxq if mbox command completed after timeout.
1853 	 */
1854 	if (pmboxq && mbxstatus != MBX_TIMEOUT)
1855 		mempool_free(pmboxq, phba->mbox_mem_pool);
1856 
1857 job_error:
1858 	/* make error code available to userspace */
1859 	bsg_reply->result = rc;
1860 	/* complete the job back to userspace if no error */
1861 	if (rc == 0)
1862 		bsg_job_done(job, bsg_reply->result,
1863 			       bsg_reply->reply_payload_rcv_len);
1864 	return rc;
1865 }
1866 
1867 /**
1868  * lpfc_sli4_bsg_set_link_diag_state - set sli4 link diag state
1869  * @phba: Pointer to HBA context object.
1870  * @diag: Flag for set link to diag or nomral operation state.
1871  *
1872  * This function is responsible for issuing a sli4 mailbox command for setting
1873  * link to either diag state or normal operation state.
1874  */
1875 static int
1876 lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag)
1877 {
1878 	LPFC_MBOXQ_t *pmboxq;
1879 	struct lpfc_mbx_set_link_diag_state *link_diag_state;
1880 	uint32_t req_len, alloc_len;
1881 	int mbxstatus = MBX_SUCCESS, rc;
1882 
1883 	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1884 	if (!pmboxq)
1885 		return -ENOMEM;
1886 
1887 	req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
1888 		   sizeof(struct lpfc_sli4_cfg_mhdr));
1889 	alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1890 				LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
1891 				req_len, LPFC_SLI4_MBX_EMBED);
1892 	if (alloc_len != req_len) {
1893 		rc = -ENOMEM;
1894 		goto link_diag_state_set_out;
1895 	}
1896 	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
1897 			"3128 Set link to diagnostic state:x%x (x%x/x%x)\n",
1898 			diag, phba->sli4_hba.lnk_info.lnk_tp,
1899 			phba->sli4_hba.lnk_info.lnk_no);
1900 
1901 	link_diag_state = &pmboxq->u.mqe.un.link_diag_state;
1902 	bf_set(lpfc_mbx_set_diag_state_diag_bit_valid, &link_diag_state->u.req,
1903 	       LPFC_DIAG_STATE_DIAG_BIT_VALID_CHANGE);
1904 	bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
1905 	       phba->sli4_hba.lnk_info.lnk_no);
1906 	bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
1907 	       phba->sli4_hba.lnk_info.lnk_tp);
1908 	if (diag)
1909 		bf_set(lpfc_mbx_set_diag_state_diag,
1910 		       &link_diag_state->u.req, 1);
1911 	else
1912 		bf_set(lpfc_mbx_set_diag_state_diag,
1913 		       &link_diag_state->u.req, 0);
1914 
1915 	mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1916 
1917 	if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0))
1918 		rc = 0;
1919 	else
1920 		rc = -ENODEV;
1921 
1922 link_diag_state_set_out:
1923 	if (pmboxq && (mbxstatus != MBX_TIMEOUT))
1924 		mempool_free(pmboxq, phba->mbox_mem_pool);
1925 
1926 	return rc;
1927 }
1928 
1929 /**
1930  * lpfc_sli4_bsg_set_loopback_mode - set sli4 internal loopback diagnostic
1931  * @phba: Pointer to HBA context object.
1932  * @mode: loopback mode to set
1933  * @link_no: link number for loopback mode to set
1934  *
1935  * This function is responsible for issuing a sli4 mailbox command for setting
1936  * up loopback diagnostic for a link.
1937  */
1938 static int
1939 lpfc_sli4_bsg_set_loopback_mode(struct lpfc_hba *phba, int mode,
1940 				uint32_t link_no)
1941 {
1942 	LPFC_MBOXQ_t *pmboxq;
1943 	uint32_t req_len, alloc_len;
1944 	struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
1945 	int mbxstatus = MBX_SUCCESS, rc = 0;
1946 
1947 	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1948 	if (!pmboxq)
1949 		return -ENOMEM;
1950 	req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) -
1951 		   sizeof(struct lpfc_sli4_cfg_mhdr));
1952 	alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1953 				LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK,
1954 				req_len, LPFC_SLI4_MBX_EMBED);
1955 	if (alloc_len != req_len) {
1956 		mempool_free(pmboxq, phba->mbox_mem_pool);
1957 		return -ENOMEM;
1958 	}
1959 	link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
1960 	bf_set(lpfc_mbx_set_diag_state_link_num,
1961 	       &link_diag_loopback->u.req, link_no);
1962 
1963 	if (phba->sli4_hba.conf_trunk & (1 << link_no)) {
1964 		bf_set(lpfc_mbx_set_diag_state_link_type,
1965 		       &link_diag_loopback->u.req, LPFC_LNK_FC_TRUNKED);
1966 	} else {
1967 		bf_set(lpfc_mbx_set_diag_state_link_type,
1968 		       &link_diag_loopback->u.req,
1969 		       phba->sli4_hba.lnk_info.lnk_tp);
1970 	}
1971 
1972 	bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req,
1973 	       mode);
1974 
1975 	mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1976 	if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) {
1977 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1978 				"3127 Failed setup loopback mode mailbox "
1979 				"command, rc:x%x, status:x%x\n", mbxstatus,
1980 				pmboxq->u.mb.mbxStatus);
1981 		rc = -ENODEV;
1982 	}
1983 	if (pmboxq && (mbxstatus != MBX_TIMEOUT))
1984 		mempool_free(pmboxq, phba->mbox_mem_pool);
1985 	return rc;
1986 }
1987 
1988 /**
1989  * lpfc_sli4_diag_fcport_reg_setup - setup port registrations for diagnostic
1990  * @phba: Pointer to HBA context object.
1991  *
1992  * This function set up SLI4 FC port registrations for diagnostic run, which
1993  * includes all the rpis, vfi, and also vpi.
1994  */
1995 static int
1996 lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
1997 {
1998 	int rc;
1999 
2000 	if (phba->pport->fc_flag & FC_VFI_REGISTERED) {
2001 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2002 				"3136 Port still had vfi registered: "
2003 				"mydid:x%x, fcfi:%d, vfi:%d, vpi:%d\n",
2004 				phba->pport->fc_myDID, phba->fcf.fcfi,
2005 				phba->sli4_hba.vfi_ids[phba->pport->vfi],
2006 				phba->vpi_ids[phba->pport->vpi]);
2007 		return -EINVAL;
2008 	}
2009 	rc = lpfc_issue_reg_vfi(phba->pport);
2010 	return rc;
2011 }
2012 
2013 /**
2014  * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command
2015  * @phba: Pointer to HBA context object.
2016  * @job: LPFC_BSG_VENDOR_DIAG_MODE
2017  *
2018  * This function is responsible for placing an sli4 port into diagnostic
2019  * loopback mode in order to perform a diagnostic loopback test.
2020  */
2021 static int
2022 lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
2023 {
2024 	struct fc_bsg_request *bsg_request = job->request;
2025 	struct fc_bsg_reply *bsg_reply = job->reply;
2026 	struct diag_mode_set *loopback_mode;
2027 	uint32_t link_flags, timeout, link_no;
2028 	int i, rc = 0;
2029 
2030 	/* no data to return just the return code */
2031 	bsg_reply->reply_payload_rcv_len = 0;
2032 
2033 	if (job->request_len < sizeof(struct fc_bsg_request) +
2034 	    sizeof(struct diag_mode_set)) {
2035 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2036 				"3011 Received DIAG MODE request size:%d "
2037 				"below the minimum size:%d\n",
2038 				job->request_len,
2039 				(int)(sizeof(struct fc_bsg_request) +
2040 				sizeof(struct diag_mode_set)));
2041 		rc = -EINVAL;
2042 		goto job_done;
2043 	}
2044 
2045 	loopback_mode = (struct diag_mode_set *)
2046 		bsg_request->rqst_data.h_vendor.vendor_cmd;
2047 	link_flags = loopback_mode->type;
2048 	timeout = loopback_mode->timeout * 100;
2049 
2050 	if (loopback_mode->physical_link == -1)
2051 		link_no = phba->sli4_hba.lnk_info.lnk_no;
2052 	else
2053 		link_no = loopback_mode->physical_link;
2054 
2055 	if (link_flags == DISABLE_LOOP_BACK) {
2056 		rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2057 					LPFC_DIAG_LOOPBACK_TYPE_DISABLE,
2058 					link_no);
2059 		if (!rc) {
2060 			/* Unset the need disable bit */
2061 			phba->sli4_hba.conf_trunk &= ~((1 << link_no) << 4);
2062 		}
2063 		goto job_done;
2064 	} else {
2065 		/* Check if we need to disable the loopback state */
2066 		if (phba->sli4_hba.conf_trunk & ((1 << link_no) << 4)) {
2067 			rc = -EPERM;
2068 			goto job_done;
2069 		}
2070 	}
2071 
2072 	rc = lpfc_bsg_diag_mode_enter(phba);
2073 	if (rc)
2074 		goto job_done;
2075 
2076 	/* indicate we are in loobpack diagnostic mode */
2077 	spin_lock_irq(&phba->hbalock);
2078 	phba->link_flag |= LS_LOOPBACK_MODE;
2079 	spin_unlock_irq(&phba->hbalock);
2080 
2081 	/* reset port to start frome scratch */
2082 	rc = lpfc_selective_reset(phba);
2083 	if (rc)
2084 		goto job_done;
2085 
2086 	/* bring the link to diagnostic mode */
2087 	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2088 			"3129 Bring link to diagnostic state.\n");
2089 
2090 	rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
2091 	if (rc) {
2092 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2093 				"3130 Failed to bring link to diagnostic "
2094 				"state, rc:x%x\n", rc);
2095 		goto loopback_mode_exit;
2096 	}
2097 
2098 	/* wait for link down before proceeding */
2099 	i = 0;
2100 	while (phba->link_state != LPFC_LINK_DOWN) {
2101 		if (i++ > timeout) {
2102 			rc = -ETIMEDOUT;
2103 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2104 					"3131 Timeout waiting for link to "
2105 					"diagnostic mode, timeout:%d ms\n",
2106 					timeout * 10);
2107 			goto loopback_mode_exit;
2108 		}
2109 		msleep(10);
2110 	}
2111 
2112 	/* set up loopback mode */
2113 	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2114 			"3132 Set up loopback mode:x%x\n", link_flags);
2115 
2116 	switch (link_flags) {
2117 	case INTERNAL_LOOP_BACK:
2118 		if (phba->sli4_hba.conf_trunk & (1 << link_no)) {
2119 			rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2120 					LPFC_DIAG_LOOPBACK_TYPE_INTERNAL,
2121 					link_no);
2122 		} else {
2123 			/* Trunk is configured, but link is not in this trunk */
2124 			if (phba->sli4_hba.conf_trunk) {
2125 				rc = -ELNRNG;
2126 				goto loopback_mode_exit;
2127 			}
2128 
2129 			rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2130 					LPFC_DIAG_LOOPBACK_TYPE_INTERNAL,
2131 					link_no);
2132 		}
2133 
2134 		if (!rc) {
2135 			/* Set the need disable bit */
2136 			phba->sli4_hba.conf_trunk |= (1 << link_no) << 4;
2137 		}
2138 
2139 		break;
2140 	case EXTERNAL_LOOP_BACK:
2141 		if (phba->sli4_hba.conf_trunk & (1 << link_no)) {
2142 			rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2143 				LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL_TRUNKED,
2144 				link_no);
2145 		} else {
2146 			/* Trunk is configured, but link is not in this trunk */
2147 			if (phba->sli4_hba.conf_trunk) {
2148 				rc = -ELNRNG;
2149 				goto loopback_mode_exit;
2150 			}
2151 
2152 			rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2153 						LPFC_DIAG_LOOPBACK_TYPE_SERDES,
2154 						link_no);
2155 		}
2156 
2157 		if (!rc) {
2158 			/* Set the need disable bit */
2159 			phba->sli4_hba.conf_trunk |= (1 << link_no) << 4;
2160 		}
2161 
2162 		break;
2163 	default:
2164 		rc = -EINVAL;
2165 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
2166 				"3141 Loopback mode:x%x not supported\n",
2167 				link_flags);
2168 		goto loopback_mode_exit;
2169 	}
2170 
2171 	if (!rc) {
2172 		/* wait for the link attention interrupt */
2173 		msleep(100);
2174 		i = 0;
2175 		while (phba->link_state < LPFC_LINK_UP) {
2176 			if (i++ > timeout) {
2177 				rc = -ETIMEDOUT;
2178 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2179 					"3137 Timeout waiting for link up "
2180 					"in loopback mode, timeout:%d ms\n",
2181 					timeout * 10);
2182 				break;
2183 			}
2184 			msleep(10);
2185 		}
2186 	}
2187 
2188 	/* port resource registration setup for loopback diagnostic */
2189 	if (!rc) {
2190 		/* set up a none zero myDID for loopback test */
2191 		phba->pport->fc_myDID = 1;
2192 		rc = lpfc_sli4_diag_fcport_reg_setup(phba);
2193 	} else
2194 		goto loopback_mode_exit;
2195 
2196 	if (!rc) {
2197 		/* wait for the port ready */
2198 		msleep(100);
2199 		i = 0;
2200 		while (phba->link_state != LPFC_HBA_READY) {
2201 			if (i++ > timeout) {
2202 				rc = -ETIMEDOUT;
2203 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2204 					"3133 Timeout waiting for port "
2205 					"loopback mode ready, timeout:%d ms\n",
2206 					timeout * 10);
2207 				break;
2208 			}
2209 			msleep(10);
2210 		}
2211 	}
2212 
2213 loopback_mode_exit:
2214 	/* clear loopback diagnostic mode */
2215 	if (rc) {
2216 		spin_lock_irq(&phba->hbalock);
2217 		phba->link_flag &= ~LS_LOOPBACK_MODE;
2218 		spin_unlock_irq(&phba->hbalock);
2219 	}
2220 	lpfc_bsg_diag_mode_exit(phba);
2221 
2222 job_done:
2223 	/* make error code available to userspace */
2224 	bsg_reply->result = rc;
2225 	/* complete the job back to userspace if no error */
2226 	if (rc == 0)
2227 		bsg_job_done(job, bsg_reply->result,
2228 			       bsg_reply->reply_payload_rcv_len);
2229 	return rc;
2230 }
2231 
2232 /**
2233  * lpfc_bsg_diag_loopback_mode - bsg vendor command for diag loopback mode
2234  * @job: LPFC_BSG_VENDOR_DIAG_MODE
2235  *
2236  * This function is responsible for responding to check and dispatch bsg diag
2237  * command from the user to proper driver action routines.
2238  */
2239 static int
2240 lpfc_bsg_diag_loopback_mode(struct bsg_job *job)
2241 {
2242 	struct Scsi_Host *shost;
2243 	struct lpfc_vport *vport;
2244 	struct lpfc_hba *phba;
2245 	int rc;
2246 
2247 	shost = fc_bsg_to_shost(job);
2248 	if (!shost)
2249 		return -ENODEV;
2250 	vport = shost_priv(shost);
2251 	if (!vport)
2252 		return -ENODEV;
2253 	phba = vport->phba;
2254 	if (!phba)
2255 		return -ENODEV;
2256 
2257 	if (phba->sli_rev < LPFC_SLI_REV4)
2258 		rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job);
2259 	else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
2260 		 LPFC_SLI_INTF_IF_TYPE_2)
2261 		rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job);
2262 	else
2263 		rc = -ENODEV;
2264 
2265 	return rc;
2266 }
2267 
2268 /**
2269  * lpfc_sli4_bsg_diag_mode_end - sli4 bsg vendor command for ending diag mode
2270  * @job: LPFC_BSG_VENDOR_DIAG_MODE_END
2271  *
2272  * This function is responsible for responding to check and dispatch bsg diag
2273  * command from the user to proper driver action routines.
2274  */
2275 static int
2276 lpfc_sli4_bsg_diag_mode_end(struct bsg_job *job)
2277 {
2278 	struct fc_bsg_request *bsg_request = job->request;
2279 	struct fc_bsg_reply *bsg_reply = job->reply;
2280 	struct Scsi_Host *shost;
2281 	struct lpfc_vport *vport;
2282 	struct lpfc_hba *phba;
2283 	struct diag_mode_set *loopback_mode_end_cmd;
2284 	uint32_t timeout;
2285 	int rc, i;
2286 
2287 	shost = fc_bsg_to_shost(job);
2288 	if (!shost)
2289 		return -ENODEV;
2290 	vport = shost_priv(shost);
2291 	if (!vport)
2292 		return -ENODEV;
2293 	phba = vport->phba;
2294 	if (!phba)
2295 		return -ENODEV;
2296 
2297 	if (phba->sli_rev < LPFC_SLI_REV4)
2298 		return -ENODEV;
2299 	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
2300 	    LPFC_SLI_INTF_IF_TYPE_2)
2301 		return -ENODEV;
2302 
2303 	/* clear loopback diagnostic mode */
2304 	spin_lock_irq(&phba->hbalock);
2305 	phba->link_flag &= ~LS_LOOPBACK_MODE;
2306 	spin_unlock_irq(&phba->hbalock);
2307 	loopback_mode_end_cmd = (struct diag_mode_set *)
2308 			bsg_request->rqst_data.h_vendor.vendor_cmd;
2309 	timeout = loopback_mode_end_cmd->timeout * 100;
2310 
2311 	rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
2312 	if (rc) {
2313 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2314 				"3139 Failed to bring link to diagnostic "
2315 				"state, rc:x%x\n", rc);
2316 		goto loopback_mode_end_exit;
2317 	}
2318 
2319 	/* wait for link down before proceeding */
2320 	i = 0;
2321 	while (phba->link_state != LPFC_LINK_DOWN) {
2322 		if (i++ > timeout) {
2323 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2324 					"3140 Timeout waiting for link to "
2325 					"diagnostic mode_end, timeout:%d ms\n",
2326 					timeout * 10);
2327 			/* there is nothing much we can do here */
2328 			break;
2329 		}
2330 		msleep(10);
2331 	}
2332 
2333 	/* reset port resource registrations */
2334 	rc = lpfc_selective_reset(phba);
2335 	phba->pport->fc_myDID = 0;
2336 
2337 loopback_mode_end_exit:
2338 	/* make return code available to userspace */
2339 	bsg_reply->result = rc;
2340 	/* complete the job back to userspace if no error */
2341 	if (rc == 0)
2342 		bsg_job_done(job, bsg_reply->result,
2343 			       bsg_reply->reply_payload_rcv_len);
2344 	return rc;
2345 }
2346 
2347 /**
2348  * lpfc_sli4_bsg_link_diag_test - sli4 bsg vendor command for diag link test
2349  * @job: LPFC_BSG_VENDOR_DIAG_LINK_TEST
2350  *
2351  * This function is to perform SLI4 diag link test request from the user
2352  * applicaiton.
2353  */
2354 static int
2355 lpfc_sli4_bsg_link_diag_test(struct bsg_job *job)
2356 {
2357 	struct fc_bsg_request *bsg_request = job->request;
2358 	struct fc_bsg_reply *bsg_reply = job->reply;
2359 	struct Scsi_Host *shost;
2360 	struct lpfc_vport *vport;
2361 	struct lpfc_hba *phba;
2362 	LPFC_MBOXQ_t *pmboxq;
2363 	struct sli4_link_diag *link_diag_test_cmd;
2364 	uint32_t req_len, alloc_len;
2365 	struct lpfc_mbx_run_link_diag_test *run_link_diag_test;
2366 	union lpfc_sli4_cfg_shdr *shdr;
2367 	uint32_t shdr_status, shdr_add_status;
2368 	struct diag_status *diag_status_reply;
2369 	int mbxstatus, rc = -ENODEV, rc1 = 0;
2370 
2371 	shost = fc_bsg_to_shost(job);
2372 	if (!shost)
2373 		goto job_error;
2374 
2375 	vport = shost_priv(shost);
2376 	if (!vport)
2377 		goto job_error;
2378 
2379 	phba = vport->phba;
2380 	if (!phba)
2381 		goto job_error;
2382 
2383 
2384 	if (phba->sli_rev < LPFC_SLI_REV4)
2385 		goto job_error;
2386 
2387 	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
2388 	    LPFC_SLI_INTF_IF_TYPE_2)
2389 		goto job_error;
2390 
2391 	if (job->request_len < sizeof(struct fc_bsg_request) +
2392 	    sizeof(struct sli4_link_diag)) {
2393 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2394 				"3013 Received LINK DIAG TEST request "
2395 				" size:%d below the minimum size:%d\n",
2396 				job->request_len,
2397 				(int)(sizeof(struct fc_bsg_request) +
2398 				sizeof(struct sli4_link_diag)));
2399 		rc = -EINVAL;
2400 		goto job_error;
2401 	}
2402 
2403 	rc = lpfc_bsg_diag_mode_enter(phba);
2404 	if (rc)
2405 		goto job_error;
2406 
2407 	link_diag_test_cmd = (struct sli4_link_diag *)
2408 			 bsg_request->rqst_data.h_vendor.vendor_cmd;
2409 
2410 	rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
2411 
2412 	if (rc)
2413 		goto job_error;
2414 
2415 	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2416 	if (!pmboxq) {
2417 		rc = -ENOMEM;
2418 		goto link_diag_test_exit;
2419 	}
2420 
2421 	req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
2422 		   sizeof(struct lpfc_sli4_cfg_mhdr));
2423 	alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2424 				     LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
2425 				     req_len, LPFC_SLI4_MBX_EMBED);
2426 	if (alloc_len != req_len) {
2427 		rc = -ENOMEM;
2428 		goto link_diag_test_exit;
2429 	}
2430 
2431 	run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test;
2432 	bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
2433 	       phba->sli4_hba.lnk_info.lnk_no);
2434 	bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req,
2435 	       phba->sli4_hba.lnk_info.lnk_tp);
2436 	bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req,
2437 	       link_diag_test_cmd->test_id);
2438 	bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req,
2439 	       link_diag_test_cmd->loops);
2440 	bf_set(lpfc_mbx_run_diag_test_test_ver, &run_link_diag_test->u.req,
2441 	       link_diag_test_cmd->test_version);
2442 	bf_set(lpfc_mbx_run_diag_test_err_act, &run_link_diag_test->u.req,
2443 	       link_diag_test_cmd->error_action);
2444 
2445 	mbxstatus = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
2446 
2447 	shdr = (union lpfc_sli4_cfg_shdr *)
2448 		&pmboxq->u.mqe.un.sli4_config.header.cfg_shdr;
2449 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
2450 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
2451 	if (shdr_status || shdr_add_status || mbxstatus) {
2452 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
2453 				"3010 Run link diag test mailbox failed with "
2454 				"mbx_status x%x status x%x, add_status x%x\n",
2455 				mbxstatus, shdr_status, shdr_add_status);
2456 	}
2457 
2458 	diag_status_reply = (struct diag_status *)
2459 			    bsg_reply->reply_data.vendor_reply.vendor_rsp;
2460 
2461 	if (job->reply_len < sizeof(*bsg_reply) + sizeof(*diag_status_reply)) {
2462 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2463 				"3012 Received Run link diag test reply "
2464 				"below minimum size (%d): reply_len:%d\n",
2465 				(int)(sizeof(*bsg_reply) +
2466 				sizeof(*diag_status_reply)),
2467 				job->reply_len);
2468 		rc = -EINVAL;
2469 		goto job_error;
2470 	}
2471 
2472 	diag_status_reply->mbox_status = mbxstatus;
2473 	diag_status_reply->shdr_status = shdr_status;
2474 	diag_status_reply->shdr_add_status = shdr_add_status;
2475 
2476 link_diag_test_exit:
2477 	rc1 = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
2478 
2479 	if (pmboxq)
2480 		mempool_free(pmboxq, phba->mbox_mem_pool);
2481 
2482 	lpfc_bsg_diag_mode_exit(phba);
2483 
2484 job_error:
2485 	/* make error code available to userspace */
2486 	if (rc1 && !rc)
2487 		rc = rc1;
2488 	bsg_reply->result = rc;
2489 	/* complete the job back to userspace if no error */
2490 	if (rc == 0)
2491 		bsg_job_done(job, bsg_reply->result,
2492 			       bsg_reply->reply_payload_rcv_len);
2493 	return rc;
2494 }
2495 
2496 /**
2497  * lpfcdiag_loop_self_reg - obtains a remote port login id
2498  * @phba: Pointer to HBA context object
2499  * @rpi: Pointer to a remote port login id
2500  *
2501  * This function obtains a remote port login id so the diag loopback test
2502  * can send and receive its own unsolicited CT command.
2503  **/
2504 static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
2505 {
2506 	LPFC_MBOXQ_t *mbox;
2507 	struct lpfc_dmabuf *dmabuff;
2508 	int status;
2509 
2510 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2511 	if (!mbox)
2512 		return -ENOMEM;
2513 
2514 	if (phba->sli_rev < LPFC_SLI_REV4)
2515 		status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
2516 				(uint8_t *)&phba->pport->fc_sparam,
2517 				mbox, *rpi);
2518 	else {
2519 		*rpi = lpfc_sli4_alloc_rpi(phba);
2520 		if (*rpi == LPFC_RPI_ALLOC_ERROR) {
2521 			mempool_free(mbox, phba->mbox_mem_pool);
2522 			return -EBUSY;
2523 		}
2524 		status = lpfc_reg_rpi(phba, phba->pport->vpi,
2525 				phba->pport->fc_myDID,
2526 				(uint8_t *)&phba->pport->fc_sparam,
2527 				mbox, *rpi);
2528 	}
2529 
2530 	if (status) {
2531 		mempool_free(mbox, phba->mbox_mem_pool);
2532 		if (phba->sli_rev == LPFC_SLI_REV4)
2533 			lpfc_sli4_free_rpi(phba, *rpi);
2534 		return -ENOMEM;
2535 	}
2536 
2537 	dmabuff = (struct lpfc_dmabuf *)mbox->ctx_buf;
2538 	mbox->ctx_buf = NULL;
2539 	mbox->ctx_ndlp = NULL;
2540 	status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
2541 
2542 	if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
2543 		lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
2544 		kfree(dmabuff);
2545 		if (status != MBX_TIMEOUT)
2546 			mempool_free(mbox, phba->mbox_mem_pool);
2547 		if (phba->sli_rev == LPFC_SLI_REV4)
2548 			lpfc_sli4_free_rpi(phba, *rpi);
2549 		return -ENODEV;
2550 	}
2551 
2552 	if (phba->sli_rev < LPFC_SLI_REV4)
2553 		*rpi = mbox->u.mb.un.varWords[0];
2554 
2555 	lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
2556 	kfree(dmabuff);
2557 	mempool_free(mbox, phba->mbox_mem_pool);
2558 	return 0;
2559 }
2560 
2561 /**
2562  * lpfcdiag_loop_self_unreg - unregs from the rpi
2563  * @phba: Pointer to HBA context object
2564  * @rpi: Remote port login id
2565  *
2566  * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg
2567  **/
2568 static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
2569 {
2570 	LPFC_MBOXQ_t *mbox;
2571 	int status;
2572 
2573 	/* Allocate mboxq structure */
2574 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2575 	if (mbox == NULL)
2576 		return -ENOMEM;
2577 
2578 	if (phba->sli_rev < LPFC_SLI_REV4)
2579 		lpfc_unreg_login(phba, 0, rpi, mbox);
2580 	else
2581 		lpfc_unreg_login(phba, phba->pport->vpi,
2582 				 phba->sli4_hba.rpi_ids[rpi], mbox);
2583 
2584 	status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
2585 
2586 	if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
2587 		if (status != MBX_TIMEOUT)
2588 			mempool_free(mbox, phba->mbox_mem_pool);
2589 		return -EIO;
2590 	}
2591 	mempool_free(mbox, phba->mbox_mem_pool);
2592 	if (phba->sli_rev == LPFC_SLI_REV4)
2593 		lpfc_sli4_free_rpi(phba, rpi);
2594 	return 0;
2595 }
2596 
2597 /**
2598  * lpfcdiag_loop_get_xri - obtains the transmit and receive ids
2599  * @phba: Pointer to HBA context object
2600  * @rpi: Remote port login id
2601  * @txxri: Pointer to transmit exchange id
2602  * @rxxri: Pointer to response exchabge id
2603  *
2604  * This function obtains the transmit and receive ids required to send
2605  * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp
2606  * flags are used to the unsolicted response handler is able to process
2607  * the ct command sent on the same port.
2608  **/
2609 static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
2610 			 uint16_t *txxri, uint16_t * rxxri)
2611 {
2612 	struct lpfc_bsg_event *evt;
2613 	struct lpfc_iocbq *cmdiocbq, *rspiocbq;
2614 	struct lpfc_dmabuf *dmabuf;
2615 	struct ulp_bde64 *bpl = NULL;
2616 	struct lpfc_sli_ct_request *ctreq = NULL;
2617 	int ret_val = 0;
2618 	int time_left;
2619 	int iocb_stat = IOCB_SUCCESS;
2620 	unsigned long flags;
2621 	u32 status;
2622 
2623 	*txxri = 0;
2624 	*rxxri = 0;
2625 	evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
2626 				SLI_CT_ELX_LOOPBACK);
2627 	if (!evt)
2628 		return -ENOMEM;
2629 
2630 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
2631 	list_add(&evt->node, &phba->ct_ev_waiters);
2632 	lpfc_bsg_event_ref(evt);
2633 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2634 
2635 	cmdiocbq = lpfc_sli_get_iocbq(phba);
2636 	rspiocbq = lpfc_sli_get_iocbq(phba);
2637 
2638 	dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2639 	if (dmabuf) {
2640 		dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys);
2641 		if (dmabuf->virt) {
2642 			INIT_LIST_HEAD(&dmabuf->list);
2643 			bpl = (struct ulp_bde64 *) dmabuf->virt;
2644 			memset(bpl, 0, sizeof(*bpl));
2645 			ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
2646 			bpl->addrHigh =
2647 				le32_to_cpu(putPaddrHigh(dmabuf->phys +
2648 					sizeof(*bpl)));
2649 			bpl->addrLow =
2650 				le32_to_cpu(putPaddrLow(dmabuf->phys +
2651 					sizeof(*bpl)));
2652 			bpl->tus.f.bdeFlags = 0;
2653 			bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
2654 			bpl->tus.w = le32_to_cpu(bpl->tus.w);
2655 		}
2656 	}
2657 
2658 	if (cmdiocbq == NULL || rspiocbq == NULL ||
2659 	    dmabuf == NULL || bpl == NULL || ctreq == NULL ||
2660 		dmabuf->virt == NULL) {
2661 		ret_val = -ENOMEM;
2662 		goto err_get_xri_exit;
2663 	}
2664 
2665 	memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
2666 
2667 	ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
2668 	ctreq->RevisionId.bits.InId = 0;
2669 	ctreq->FsType = SLI_CT_ELX_LOOPBACK;
2670 	ctreq->FsSubType = 0;
2671 	ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
2672 	ctreq->CommandResponse.bits.Size = 0;
2673 
2674 	cmdiocbq->context3 = dmabuf;
2675 	cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
2676 	cmdiocbq->vport = phba->pport;
2677 	cmdiocbq->cmd_cmpl = NULL;
2678 
2679 	lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, dmabuf, rpi, 0, 1,
2680 				 FC_RCTL_DD_SOL_CTL, 0, CMD_XMIT_SEQUENCE64_CR);
2681 
2682 	iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
2683 					     rspiocbq, (phba->fc_ratov * 2)
2684 					     + LPFC_DRVR_TIMEOUT);
2685 
2686 	status = get_job_ulpstatus(phba, rspiocbq);
2687 	if (iocb_stat != IOCB_SUCCESS || status != IOCB_SUCCESS) {
2688 		ret_val = -EIO;
2689 		goto err_get_xri_exit;
2690 	}
2691 	*txxri = get_job_ulpcontext(phba, rspiocbq);
2692 
2693 	evt->waiting = 1;
2694 	evt->wait_time_stamp = jiffies;
2695 	time_left = wait_event_interruptible_timeout(
2696 		evt->wq, !list_empty(&evt->events_to_see),
2697 		msecs_to_jiffies(1000 *
2698 			((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
2699 	if (list_empty(&evt->events_to_see))
2700 		ret_val = (time_left) ? -EINTR : -ETIMEDOUT;
2701 	else {
2702 		spin_lock_irqsave(&phba->ct_ev_lock, flags);
2703 		list_move(evt->events_to_see.prev, &evt->events_to_get);
2704 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2705 		*rxxri = (list_entry(evt->events_to_get.prev,
2706 				     typeof(struct event_data),
2707 				     node))->immed_dat;
2708 	}
2709 	evt->waiting = 0;
2710 
2711 err_get_xri_exit:
2712 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
2713 	lpfc_bsg_event_unref(evt); /* release ref */
2714 	lpfc_bsg_event_unref(evt); /* delete */
2715 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2716 
2717 	if (dmabuf) {
2718 		if (dmabuf->virt)
2719 			lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
2720 		kfree(dmabuf);
2721 	}
2722 
2723 	if (cmdiocbq && (iocb_stat != IOCB_TIMEDOUT))
2724 		lpfc_sli_release_iocbq(phba, cmdiocbq);
2725 	if (rspiocbq)
2726 		lpfc_sli_release_iocbq(phba, rspiocbq);
2727 	return ret_val;
2728 }
2729 
2730 /**
2731  * lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers
2732  * @phba: Pointer to HBA context object
2733  *
2734  * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and
2735  * returns the pointer to the buffer.
2736  **/
2737 static struct lpfc_dmabuf *
2738 lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba)
2739 {
2740 	struct lpfc_dmabuf *dmabuf;
2741 	struct pci_dev *pcidev = phba->pcidev;
2742 
2743 	/* allocate dma buffer struct */
2744 	dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2745 	if (!dmabuf)
2746 		return NULL;
2747 
2748 	INIT_LIST_HEAD(&dmabuf->list);
2749 
2750 	/* now, allocate dma buffer */
2751 	dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2752 					  &(dmabuf->phys), GFP_KERNEL);
2753 
2754 	if (!dmabuf->virt) {
2755 		kfree(dmabuf);
2756 		return NULL;
2757 	}
2758 
2759 	return dmabuf;
2760 }
2761 
2762 /**
2763  * lpfc_bsg_dma_page_free - free a bsg mbox page sized dma buffer
2764  * @phba: Pointer to HBA context object.
2765  * @dmabuf: Pointer to the bsg mbox page sized dma buffer descriptor.
2766  *
2767  * This routine just simply frees a dma buffer and its associated buffer
2768  * descriptor referred by @dmabuf.
2769  **/
2770 static void
2771 lpfc_bsg_dma_page_free(struct lpfc_hba *phba, struct lpfc_dmabuf *dmabuf)
2772 {
2773 	struct pci_dev *pcidev = phba->pcidev;
2774 
2775 	if (!dmabuf)
2776 		return;
2777 
2778 	if (dmabuf->virt)
2779 		dma_free_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2780 				  dmabuf->virt, dmabuf->phys);
2781 	kfree(dmabuf);
2782 	return;
2783 }
2784 
2785 /**
2786  * lpfc_bsg_dma_page_list_free - free a list of bsg mbox page sized dma buffers
2787  * @phba: Pointer to HBA context object.
2788  * @dmabuf_list: Pointer to a list of bsg mbox page sized dma buffer descs.
2789  *
2790  * This routine just simply frees all dma buffers and their associated buffer
2791  * descriptors referred by @dmabuf_list.
2792  **/
2793 static void
2794 lpfc_bsg_dma_page_list_free(struct lpfc_hba *phba,
2795 			    struct list_head *dmabuf_list)
2796 {
2797 	struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2798 
2799 	if (list_empty(dmabuf_list))
2800 		return;
2801 
2802 	list_for_each_entry_safe(dmabuf, next_dmabuf, dmabuf_list, list) {
2803 		list_del_init(&dmabuf->list);
2804 		lpfc_bsg_dma_page_free(phba, dmabuf);
2805 	}
2806 	return;
2807 }
2808 
2809 /**
2810  * diag_cmd_data_alloc - fills in a bde struct with dma buffers
2811  * @phba: Pointer to HBA context object
2812  * @bpl: Pointer to 64 bit bde structure
2813  * @size: Number of bytes to process
2814  * @nocopydata: Flag to copy user data into the allocated buffer
2815  *
2816  * This function allocates page size buffers and populates an lpfc_dmabufext.
2817  * If allowed the user data pointed to with indataptr is copied into the kernel
2818  * memory. The chained list of page size buffers is returned.
2819  **/
2820 static struct lpfc_dmabufext *
2821 diag_cmd_data_alloc(struct lpfc_hba *phba,
2822 		   struct ulp_bde64 *bpl, uint32_t size,
2823 		   int nocopydata)
2824 {
2825 	struct lpfc_dmabufext *mlist = NULL;
2826 	struct lpfc_dmabufext *dmp;
2827 	int cnt, offset = 0, i = 0;
2828 	struct pci_dev *pcidev;
2829 
2830 	pcidev = phba->pcidev;
2831 
2832 	while (size) {
2833 		/* We get chunks of 4K */
2834 		if (size > BUF_SZ_4K)
2835 			cnt = BUF_SZ_4K;
2836 		else
2837 			cnt = size;
2838 
2839 		/* allocate struct lpfc_dmabufext buffer header */
2840 		dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL);
2841 		if (!dmp)
2842 			goto out;
2843 
2844 		INIT_LIST_HEAD(&dmp->dma.list);
2845 
2846 		/* Queue it to a linked list */
2847 		if (mlist)
2848 			list_add_tail(&dmp->dma.list, &mlist->dma.list);
2849 		else
2850 			mlist = dmp;
2851 
2852 		/* allocate buffer */
2853 		dmp->dma.virt = dma_alloc_coherent(&pcidev->dev,
2854 						   cnt,
2855 						   &(dmp->dma.phys),
2856 						   GFP_KERNEL);
2857 
2858 		if (!dmp->dma.virt)
2859 			goto out;
2860 
2861 		dmp->size = cnt;
2862 
2863 		if (nocopydata) {
2864 			bpl->tus.f.bdeFlags = 0;
2865 		} else {
2866 			memset((uint8_t *)dmp->dma.virt, 0, cnt);
2867 			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2868 		}
2869 
2870 		/* build buffer ptr list for IOCB */
2871 		bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys));
2872 		bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys));
2873 		bpl->tus.f.bdeSize = (ushort) cnt;
2874 		bpl->tus.w = le32_to_cpu(bpl->tus.w);
2875 		bpl++;
2876 
2877 		i++;
2878 		offset += cnt;
2879 		size -= cnt;
2880 	}
2881 
2882 	if (mlist) {
2883 		mlist->flag = i;
2884 		return mlist;
2885 	}
2886 out:
2887 	diag_cmd_data_free(phba, mlist);
2888 	return NULL;
2889 }
2890 
2891 /**
2892  * lpfcdiag_sli3_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
2893  * @phba: Pointer to HBA context object
2894  * @rxxri: Receive exchange id
2895  * @len: Number of data bytes
2896  *
2897  * This function allocates and posts a data buffer of sufficient size to receive
2898  * an unsolicted CT command.
2899  **/
2900 static int lpfcdiag_sli3_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
2901 					  size_t len)
2902 {
2903 	struct lpfc_sli_ring *pring;
2904 	struct lpfc_iocbq *cmdiocbq;
2905 	IOCB_t *cmd = NULL;
2906 	struct list_head head, *curr, *next;
2907 	struct lpfc_dmabuf *rxbmp;
2908 	struct lpfc_dmabuf *dmp;
2909 	struct lpfc_dmabuf *mp[2] = {NULL, NULL};
2910 	struct ulp_bde64 *rxbpl = NULL;
2911 	uint32_t num_bde;
2912 	struct lpfc_dmabufext *rxbuffer = NULL;
2913 	int ret_val = 0;
2914 	int iocb_stat;
2915 	int i = 0;
2916 
2917 	pring = lpfc_phba_elsring(phba);
2918 
2919 	cmdiocbq = lpfc_sli_get_iocbq(phba);
2920 	rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2921 	if (rxbmp != NULL) {
2922 		rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2923 		if (rxbmp->virt) {
2924 			INIT_LIST_HEAD(&rxbmp->list);
2925 			rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2926 			rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0);
2927 		}
2928 	}
2929 
2930 	if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer || !pring) {
2931 		ret_val = -ENOMEM;
2932 		goto err_post_rxbufs_exit;
2933 	}
2934 
2935 	/* Queue buffers for the receive exchange */
2936 	num_bde = (uint32_t)rxbuffer->flag;
2937 	dmp = &rxbuffer->dma;
2938 	cmd = &cmdiocbq->iocb;
2939 	i = 0;
2940 
2941 	INIT_LIST_HEAD(&head);
2942 	list_add_tail(&head, &dmp->list);
2943 	list_for_each_safe(curr, next, &head) {
2944 		mp[i] = list_entry(curr, struct lpfc_dmabuf, list);
2945 		list_del(curr);
2946 
2947 		if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2948 			mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba);
2949 			cmd->un.quexri64cx.buff.bde.addrHigh =
2950 				putPaddrHigh(mp[i]->phys);
2951 			cmd->un.quexri64cx.buff.bde.addrLow =
2952 				putPaddrLow(mp[i]->phys);
2953 			cmd->un.quexri64cx.buff.bde.tus.f.bdeSize =
2954 				((struct lpfc_dmabufext *)mp[i])->size;
2955 			cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag;
2956 			cmd->ulpCommand = CMD_QUE_XRI64_CX;
2957 			cmd->ulpPU = 0;
2958 			cmd->ulpLe = 1;
2959 			cmd->ulpBdeCount = 1;
2960 			cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0;
2961 
2962 		} else {
2963 			cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys);
2964 			cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys);
2965 			cmd->un.cont64[i].tus.f.bdeSize =
2966 				((struct lpfc_dmabufext *)mp[i])->size;
2967 			cmd->ulpBdeCount = ++i;
2968 
2969 			if ((--num_bde > 0) && (i < 2))
2970 				continue;
2971 
2972 			cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX;
2973 			cmd->ulpLe = 1;
2974 		}
2975 
2976 		cmd->ulpClass = CLASS3;
2977 		cmd->ulpContext = rxxri;
2978 
2979 		iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
2980 						0);
2981 		if (iocb_stat == IOCB_ERROR) {
2982 			diag_cmd_data_free(phba,
2983 				(struct lpfc_dmabufext *)mp[0]);
2984 			if (mp[1])
2985 				diag_cmd_data_free(phba,
2986 					  (struct lpfc_dmabufext *)mp[1]);
2987 			dmp = list_entry(next, struct lpfc_dmabuf, list);
2988 			ret_val = -EIO;
2989 			goto err_post_rxbufs_exit;
2990 		}
2991 
2992 		lpfc_sli_ringpostbuf_put(phba, pring, mp[0]);
2993 		if (mp[1]) {
2994 			lpfc_sli_ringpostbuf_put(phba, pring, mp[1]);
2995 			mp[1] = NULL;
2996 		}
2997 
2998 		/* The iocb was freed by lpfc_sli_issue_iocb */
2999 		cmdiocbq = lpfc_sli_get_iocbq(phba);
3000 		if (!cmdiocbq) {
3001 			dmp = list_entry(next, struct lpfc_dmabuf, list);
3002 			ret_val = -EIO;
3003 			goto err_post_rxbufs_exit;
3004 		}
3005 		cmd = &cmdiocbq->iocb;
3006 		i = 0;
3007 	}
3008 	list_del(&head);
3009 
3010 err_post_rxbufs_exit:
3011 
3012 	if (rxbmp) {
3013 		if (rxbmp->virt)
3014 			lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
3015 		kfree(rxbmp);
3016 	}
3017 
3018 	if (cmdiocbq)
3019 		lpfc_sli_release_iocbq(phba, cmdiocbq);
3020 	return ret_val;
3021 }
3022 
3023 /**
3024  * lpfc_bsg_diag_loopback_run - run loopback on a port by issue ct cmd to itself
3025  * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
3026  *
3027  * This function receives a user data buffer to be transmitted and received on
3028  * the same port, the link must be up and in loopback mode prior
3029  * to being called.
3030  * 1. A kernel buffer is allocated to copy the user data into.
3031  * 2. The port registers with "itself".
3032  * 3. The transmit and receive exchange ids are obtained.
3033  * 4. The receive exchange id is posted.
3034  * 5. A new els loopback event is created.
3035  * 6. The command and response iocbs are allocated.
3036  * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback.
3037  *
3038  * This function is meant to be called n times while the port is in loopback
3039  * so it is the apps responsibility to issue a reset to take the port out
3040  * of loopback mode.
3041  **/
3042 static int
3043 lpfc_bsg_diag_loopback_run(struct bsg_job *job)
3044 {
3045 	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
3046 	struct fc_bsg_reply *bsg_reply = job->reply;
3047 	struct lpfc_hba *phba = vport->phba;
3048 	struct lpfc_bsg_event *evt;
3049 	struct event_data *evdat;
3050 	struct lpfc_sli *psli = &phba->sli;
3051 	uint32_t size;
3052 	uint32_t full_size;
3053 	size_t segment_len = 0, segment_offset = 0, current_offset = 0;
3054 	uint16_t rpi = 0;
3055 	struct lpfc_iocbq *cmdiocbq, *rspiocbq = NULL;
3056 	union lpfc_wqe128 *cmdwqe, *rspwqe;
3057 	struct lpfc_sli_ct_request *ctreq;
3058 	struct lpfc_dmabuf *txbmp;
3059 	struct ulp_bde64 *txbpl = NULL;
3060 	struct lpfc_dmabufext *txbuffer = NULL;
3061 	struct list_head head;
3062 	struct lpfc_dmabuf  *curr;
3063 	uint16_t txxri = 0, rxxri;
3064 	uint32_t num_bde;
3065 	uint8_t *ptr = NULL, *rx_databuf = NULL;
3066 	int rc = 0;
3067 	int time_left;
3068 	int iocb_stat = IOCB_SUCCESS;
3069 	unsigned long flags;
3070 	void *dataout = NULL;
3071 	uint32_t total_mem;
3072 
3073 	/* in case no data is returned return just the return code */
3074 	bsg_reply->reply_payload_rcv_len = 0;
3075 
3076 	if (job->request_len <
3077 	    sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) {
3078 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3079 				"2739 Received DIAG TEST request below minimum "
3080 				"size\n");
3081 		rc = -EINVAL;
3082 		goto loopback_test_exit;
3083 	}
3084 
3085 	if (job->request_payload.payload_len !=
3086 		job->reply_payload.payload_len) {
3087 		rc = -EINVAL;
3088 		goto loopback_test_exit;
3089 	}
3090 
3091 	if ((phba->link_state == LPFC_HBA_ERROR) ||
3092 	    (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
3093 	    (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
3094 		rc = -EACCES;
3095 		goto loopback_test_exit;
3096 	}
3097 
3098 	if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) {
3099 		rc = -EACCES;
3100 		goto loopback_test_exit;
3101 	}
3102 
3103 	size = job->request_payload.payload_len;
3104 	full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */
3105 
3106 	if ((size == 0) || (size > 80 * BUF_SZ_4K)) {
3107 		rc = -ERANGE;
3108 		goto loopback_test_exit;
3109 	}
3110 
3111 	if (full_size >= BUF_SZ_4K) {
3112 		/*
3113 		 * Allocate memory for ioctl data. If buffer is bigger than 64k,
3114 		 * then we allocate 64k and re-use that buffer over and over to
3115 		 * xfer the whole block. This is because Linux kernel has a
3116 		 * problem allocating more than 120k of kernel space memory. Saw
3117 		 * problem with GET_FCPTARGETMAPPING...
3118 		 */
3119 		if (size <= (64 * 1024))
3120 			total_mem = full_size;
3121 		else
3122 			total_mem = 64 * 1024;
3123 	} else
3124 		/* Allocate memory for ioctl data */
3125 		total_mem = BUF_SZ_4K;
3126 
3127 	dataout = kmalloc(total_mem, GFP_KERNEL);
3128 	if (dataout == NULL) {
3129 		rc = -ENOMEM;
3130 		goto loopback_test_exit;
3131 	}
3132 
3133 	ptr = dataout;
3134 	ptr += ELX_LOOPBACK_HEADER_SZ;
3135 	sg_copy_to_buffer(job->request_payload.sg_list,
3136 				job->request_payload.sg_cnt,
3137 				ptr, size);
3138 	rc = lpfcdiag_loop_self_reg(phba, &rpi);
3139 	if (rc)
3140 		goto loopback_test_exit;
3141 
3142 	if (phba->sli_rev < LPFC_SLI_REV4) {
3143 		rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
3144 		if (rc) {
3145 			lpfcdiag_loop_self_unreg(phba, rpi);
3146 			goto loopback_test_exit;
3147 		}
3148 
3149 		rc = lpfcdiag_sli3_loop_post_rxbufs(phba, rxxri, full_size);
3150 		if (rc) {
3151 			lpfcdiag_loop_self_unreg(phba, rpi);
3152 			goto loopback_test_exit;
3153 		}
3154 	}
3155 	evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
3156 				SLI_CT_ELX_LOOPBACK);
3157 	if (!evt) {
3158 		lpfcdiag_loop_self_unreg(phba, rpi);
3159 		rc = -ENOMEM;
3160 		goto loopback_test_exit;
3161 	}
3162 
3163 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
3164 	list_add(&evt->node, &phba->ct_ev_waiters);
3165 	lpfc_bsg_event_ref(evt);
3166 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3167 
3168 	cmdiocbq = lpfc_sli_get_iocbq(phba);
3169 	if (phba->sli_rev < LPFC_SLI_REV4)
3170 		rspiocbq = lpfc_sli_get_iocbq(phba);
3171 	txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3172 
3173 	if (txbmp) {
3174 		txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys);
3175 		if (txbmp->virt) {
3176 			INIT_LIST_HEAD(&txbmp->list);
3177 			txbpl = (struct ulp_bde64 *) txbmp->virt;
3178 			txbuffer = diag_cmd_data_alloc(phba,
3179 							txbpl, full_size, 0);
3180 		}
3181 	}
3182 
3183 	if (!cmdiocbq || !txbmp || !txbpl || !txbuffer || !txbmp->virt) {
3184 		rc = -ENOMEM;
3185 		goto err_loopback_test_exit;
3186 	}
3187 	if ((phba->sli_rev < LPFC_SLI_REV4) && !rspiocbq) {
3188 		rc = -ENOMEM;
3189 		goto err_loopback_test_exit;
3190 	}
3191 
3192 	cmdwqe = &cmdiocbq->wqe;
3193 	memset(cmdwqe, 0, sizeof(union lpfc_wqe));
3194 	if (phba->sli_rev < LPFC_SLI_REV4) {
3195 		rspwqe = &rspiocbq->wqe;
3196 		memset(rspwqe, 0, sizeof(union lpfc_wqe));
3197 	}
3198 
3199 	INIT_LIST_HEAD(&head);
3200 	list_add_tail(&head, &txbuffer->dma.list);
3201 	list_for_each_entry(curr, &head, list) {
3202 		segment_len = ((struct lpfc_dmabufext *)curr)->size;
3203 		if (current_offset == 0) {
3204 			ctreq = curr->virt;
3205 			memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
3206 			ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
3207 			ctreq->RevisionId.bits.InId = 0;
3208 			ctreq->FsType = SLI_CT_ELX_LOOPBACK;
3209 			ctreq->FsSubType = 0;
3210 			ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA;
3211 			ctreq->CommandResponse.bits.Size   = size;
3212 			segment_offset = ELX_LOOPBACK_HEADER_SZ;
3213 		} else
3214 			segment_offset = 0;
3215 
3216 		BUG_ON(segment_offset >= segment_len);
3217 		memcpy(curr->virt + segment_offset,
3218 			ptr + current_offset,
3219 			segment_len - segment_offset);
3220 
3221 		current_offset += segment_len - segment_offset;
3222 		BUG_ON(current_offset > size);
3223 	}
3224 	list_del(&head);
3225 
3226 	/* Build the XMIT_SEQUENCE iocb */
3227 	num_bde = (uint32_t)txbuffer->flag;
3228 
3229 	cmdiocbq->num_bdes = num_bde;
3230 	cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
3231 	cmdiocbq->cmd_flag |= LPFC_IO_LOOPBACK;
3232 	cmdiocbq->vport = phba->pport;
3233 	cmdiocbq->cmd_cmpl = NULL;
3234 	cmdiocbq->context3 = txbmp;
3235 
3236 	if (phba->sli_rev < LPFC_SLI_REV4) {
3237 		lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, txbmp, 0, txxri,
3238 					 num_bde, FC_RCTL_DD_UNSOL_CTL, 1,
3239 					 CMD_XMIT_SEQUENCE64_CX);
3240 
3241 	} else {
3242 		lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, txbmp,
3243 					 phba->sli4_hba.rpi_ids[rpi], 0xffff,
3244 					 full_size, FC_RCTL_DD_UNSOL_CTL, 1,
3245 					 CMD_XMIT_SEQUENCE64_WQE);
3246 		cmdiocbq->sli4_xritag = NO_XRI;
3247 	}
3248 
3249 	iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
3250 					     rspiocbq, (phba->fc_ratov * 2) +
3251 					     LPFC_DRVR_TIMEOUT);
3252 	if (iocb_stat != IOCB_SUCCESS ||
3253 	    (phba->sli_rev < LPFC_SLI_REV4 &&
3254 	     (get_job_ulpstatus(phba, rspiocbq) != IOSTAT_SUCCESS))) {
3255 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3256 				"3126 Failed loopback test issue iocb: "
3257 				"iocb_stat:x%x\n", iocb_stat);
3258 		rc = -EIO;
3259 		goto err_loopback_test_exit;
3260 	}
3261 
3262 	evt->waiting = 1;
3263 	time_left = wait_event_interruptible_timeout(
3264 		evt->wq, !list_empty(&evt->events_to_see),
3265 		msecs_to_jiffies(1000 *
3266 			((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
3267 	evt->waiting = 0;
3268 	if (list_empty(&evt->events_to_see)) {
3269 		rc = (time_left) ? -EINTR : -ETIMEDOUT;
3270 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3271 				"3125 Not receiving unsolicited event, "
3272 				"rc:x%x\n", rc);
3273 	} else {
3274 		spin_lock_irqsave(&phba->ct_ev_lock, flags);
3275 		list_move(evt->events_to_see.prev, &evt->events_to_get);
3276 		evdat = list_entry(evt->events_to_get.prev,
3277 				   typeof(*evdat), node);
3278 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3279 		rx_databuf = evdat->data;
3280 		if (evdat->len != full_size) {
3281 			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3282 				"1603 Loopback test did not receive expected "
3283 				"data length. actual length 0x%x expected "
3284 				"length 0x%x\n",
3285 				evdat->len, full_size);
3286 			rc = -EIO;
3287 		} else if (rx_databuf == NULL)
3288 			rc = -EIO;
3289 		else {
3290 			rc = IOCB_SUCCESS;
3291 			/* skip over elx loopback header */
3292 			rx_databuf += ELX_LOOPBACK_HEADER_SZ;
3293 			bsg_reply->reply_payload_rcv_len =
3294 				sg_copy_from_buffer(job->reply_payload.sg_list,
3295 						    job->reply_payload.sg_cnt,
3296 						    rx_databuf, size);
3297 			bsg_reply->reply_payload_rcv_len = size;
3298 		}
3299 	}
3300 
3301 err_loopback_test_exit:
3302 	lpfcdiag_loop_self_unreg(phba, rpi);
3303 
3304 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
3305 	lpfc_bsg_event_unref(evt); /* release ref */
3306 	lpfc_bsg_event_unref(evt); /* delete */
3307 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3308 
3309 	if ((cmdiocbq != NULL) && (iocb_stat != IOCB_TIMEDOUT))
3310 		lpfc_sli_release_iocbq(phba, cmdiocbq);
3311 
3312 	if (rspiocbq != NULL)
3313 		lpfc_sli_release_iocbq(phba, rspiocbq);
3314 
3315 	if (txbmp != NULL) {
3316 		if (txbpl != NULL) {
3317 			if (txbuffer != NULL)
3318 				diag_cmd_data_free(phba, txbuffer);
3319 			lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys);
3320 		}
3321 		kfree(txbmp);
3322 	}
3323 
3324 loopback_test_exit:
3325 	kfree(dataout);
3326 	/* make error code available to userspace */
3327 	bsg_reply->result = rc;
3328 	job->dd_data = NULL;
3329 	/* complete the job back to userspace if no error */
3330 	if (rc == IOCB_SUCCESS)
3331 		bsg_job_done(job, bsg_reply->result,
3332 			       bsg_reply->reply_payload_rcv_len);
3333 	return rc;
3334 }
3335 
3336 /**
3337  * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command
3338  * @job: GET_DFC_REV fc_bsg_job
3339  **/
3340 static int
3341 lpfc_bsg_get_dfc_rev(struct bsg_job *job)
3342 {
3343 	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
3344 	struct fc_bsg_reply *bsg_reply = job->reply;
3345 	struct lpfc_hba *phba = vport->phba;
3346 	struct get_mgmt_rev_reply *event_reply;
3347 	int rc = 0;
3348 
3349 	if (job->request_len <
3350 	    sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) {
3351 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3352 				"2740 Received GET_DFC_REV request below "
3353 				"minimum size\n");
3354 		rc = -EINVAL;
3355 		goto job_error;
3356 	}
3357 
3358 	event_reply = (struct get_mgmt_rev_reply *)
3359 		bsg_reply->reply_data.vendor_reply.vendor_rsp;
3360 
3361 	if (job->reply_len < sizeof(*bsg_reply) + sizeof(*event_reply)) {
3362 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3363 				"2741 Received GET_DFC_REV reply below "
3364 				"minimum size\n");
3365 		rc = -EINVAL;
3366 		goto job_error;
3367 	}
3368 
3369 	event_reply->info.a_Major = MANAGEMENT_MAJOR_REV;
3370 	event_reply->info.a_Minor = MANAGEMENT_MINOR_REV;
3371 job_error:
3372 	bsg_reply->result = rc;
3373 	if (rc == 0)
3374 		bsg_job_done(job, bsg_reply->result,
3375 			       bsg_reply->reply_payload_rcv_len);
3376 	return rc;
3377 }
3378 
3379 /**
3380  * lpfc_bsg_issue_mbox_cmpl - lpfc_bsg_issue_mbox mbox completion handler
3381  * @phba: Pointer to HBA context object.
3382  * @pmboxq: Pointer to mailbox command.
3383  *
3384  * This is completion handler function for mailbox commands issued from
3385  * lpfc_bsg_issue_mbox function. This function is called by the
3386  * mailbox event handler function with no lock held. This function
3387  * will wake up thread waiting on the wait queue pointed by context1
3388  * of the mailbox.
3389  **/
3390 static void
3391 lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3392 {
3393 	struct bsg_job_data *dd_data;
3394 	struct fc_bsg_reply *bsg_reply;
3395 	struct bsg_job *job;
3396 	uint32_t size;
3397 	unsigned long flags;
3398 	uint8_t *pmb, *pmb_buf;
3399 
3400 	dd_data = pmboxq->ctx_ndlp;
3401 
3402 	/*
3403 	 * The outgoing buffer is readily referred from the dma buffer,
3404 	 * just need to get header part from mailboxq structure.
3405 	 */
3406 	pmb = (uint8_t *)&pmboxq->u.mb;
3407 	pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
3408 	memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
3409 
3410 	/* Determine if job has been aborted */
3411 
3412 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
3413 	job = dd_data->set_job;
3414 	if (job) {
3415 		/* Prevent timeout handling from trying to abort job  */
3416 		job->dd_data = NULL;
3417 	}
3418 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3419 
3420 	/* Copy the mailbox data to the job if it is still active */
3421 
3422 	if (job) {
3423 		bsg_reply = job->reply;
3424 		size = job->reply_payload.payload_len;
3425 		bsg_reply->reply_payload_rcv_len =
3426 			sg_copy_from_buffer(job->reply_payload.sg_list,
3427 					    job->reply_payload.sg_cnt,
3428 					    pmb_buf, size);
3429 	}
3430 
3431 	dd_data->set_job = NULL;
3432 	mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
3433 	lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers);
3434 	kfree(dd_data);
3435 
3436 	/* Complete the job if the job is still active */
3437 
3438 	if (job) {
3439 		bsg_reply->result = 0;
3440 		bsg_job_done(job, bsg_reply->result,
3441 			       bsg_reply->reply_payload_rcv_len);
3442 	}
3443 	return;
3444 }
3445 
3446 /**
3447  * lpfc_bsg_check_cmd_access - test for a supported mailbox command
3448  * @phba: Pointer to HBA context object.
3449  * @mb: Pointer to a mailbox object.
3450  * @vport: Pointer to a vport object.
3451  *
3452  * Some commands require the port to be offline, some may not be called from
3453  * the application.
3454  **/
3455 static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
3456 	MAILBOX_t *mb, struct lpfc_vport *vport)
3457 {
3458 	/* return negative error values for bsg job */
3459 	switch (mb->mbxCommand) {
3460 	/* Offline only */
3461 	case MBX_INIT_LINK:
3462 	case MBX_DOWN_LINK:
3463 	case MBX_CONFIG_LINK:
3464 	case MBX_CONFIG_RING:
3465 	case MBX_RESET_RING:
3466 	case MBX_UNREG_LOGIN:
3467 	case MBX_CLEAR_LA:
3468 	case MBX_DUMP_CONTEXT:
3469 	case MBX_RUN_DIAGS:
3470 	case MBX_RESTART:
3471 	case MBX_SET_MASK:
3472 		if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
3473 			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3474 				"2743 Command 0x%x is illegal in on-line "
3475 				"state\n",
3476 				mb->mbxCommand);
3477 			return -EPERM;
3478 		}
3479 		break;
3480 	case MBX_WRITE_NV:
3481 	case MBX_WRITE_VPARMS:
3482 	case MBX_LOAD_SM:
3483 	case MBX_READ_NV:
3484 	case MBX_READ_CONFIG:
3485 	case MBX_READ_RCONFIG:
3486 	case MBX_READ_STATUS:
3487 	case MBX_READ_XRI:
3488 	case MBX_READ_REV:
3489 	case MBX_READ_LNK_STAT:
3490 	case MBX_DUMP_MEMORY:
3491 	case MBX_DOWN_LOAD:
3492 	case MBX_UPDATE_CFG:
3493 	case MBX_KILL_BOARD:
3494 	case MBX_READ_TOPOLOGY:
3495 	case MBX_LOAD_AREA:
3496 	case MBX_LOAD_EXP_ROM:
3497 	case MBX_BEACON:
3498 	case MBX_DEL_LD_ENTRY:
3499 	case MBX_SET_DEBUG:
3500 	case MBX_WRITE_WWN:
3501 	case MBX_SLI4_CONFIG:
3502 	case MBX_READ_EVENT_LOG:
3503 	case MBX_READ_EVENT_LOG_STATUS:
3504 	case MBX_WRITE_EVENT_LOG:
3505 	case MBX_PORT_CAPABILITIES:
3506 	case MBX_PORT_IOV_CONTROL:
3507 	case MBX_RUN_BIU_DIAG64:
3508 		break;
3509 	case MBX_SET_VARIABLE:
3510 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3511 			"1226 mbox: set_variable 0x%x, 0x%x\n",
3512 			mb->un.varWords[0],
3513 			mb->un.varWords[1]);
3514 		if ((mb->un.varWords[0] == SETVAR_MLOMNT)
3515 			&& (mb->un.varWords[1] == 1)) {
3516 			phba->wait_4_mlo_maint_flg = 1;
3517 		} else if (mb->un.varWords[0] == SETVAR_MLORST) {
3518 			spin_lock_irq(&phba->hbalock);
3519 			phba->link_flag &= ~LS_LOOPBACK_MODE;
3520 			spin_unlock_irq(&phba->hbalock);
3521 			phba->fc_topology = LPFC_TOPOLOGY_PT_PT;
3522 		}
3523 		break;
3524 	case MBX_READ_SPARM64:
3525 	case MBX_REG_LOGIN:
3526 	case MBX_REG_LOGIN64:
3527 	case MBX_CONFIG_PORT:
3528 	case MBX_RUN_BIU_DIAG:
3529 	default:
3530 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3531 			"2742 Unknown Command 0x%x\n",
3532 			mb->mbxCommand);
3533 		return -EPERM;
3534 	}
3535 
3536 	return 0; /* ok */
3537 }
3538 
3539 /**
3540  * lpfc_bsg_mbox_ext_session_reset - clean up context of multi-buffer mbox session
3541  * @phba: Pointer to HBA context object.
3542  *
3543  * This is routine clean up and reset BSG handling of multi-buffer mbox
3544  * command session.
3545  **/
3546 static void
3547 lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba)
3548 {
3549 	if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE)
3550 		return;
3551 
3552 	/* free all memory, including dma buffers */
3553 	lpfc_bsg_dma_page_list_free(phba,
3554 				    &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3555 	lpfc_bsg_dma_page_free(phba, phba->mbox_ext_buf_ctx.mbx_dmabuf);
3556 	/* multi-buffer write mailbox command pass-through complete */
3557 	memset((char *)&phba->mbox_ext_buf_ctx, 0,
3558 	       sizeof(struct lpfc_mbox_ext_buf_ctx));
3559 	INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3560 
3561 	return;
3562 }
3563 
3564 /**
3565  * lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl
3566  * @phba: Pointer to HBA context object.
3567  * @pmboxq: Pointer to mailbox command.
3568  *
3569  * This is routine handles BSG job for mailbox commands completions with
3570  * multiple external buffers.
3571  **/
3572 static struct bsg_job *
3573 lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3574 {
3575 	struct bsg_job_data *dd_data;
3576 	struct bsg_job *job;
3577 	struct fc_bsg_reply *bsg_reply;
3578 	uint8_t *pmb, *pmb_buf;
3579 	unsigned long flags;
3580 	uint32_t size;
3581 	int rc = 0;
3582 	struct lpfc_dmabuf *dmabuf;
3583 	struct lpfc_sli_config_mbox *sli_cfg_mbx;
3584 	uint8_t *pmbx;
3585 
3586 	dd_data = pmboxq->ctx_buf;
3587 
3588 	/* Determine if job has been aborted */
3589 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
3590 	job = dd_data->set_job;
3591 	if (job) {
3592 		bsg_reply = job->reply;
3593 		/* Prevent timeout handling from trying to abort job  */
3594 		job->dd_data = NULL;
3595 	}
3596 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3597 
3598 	/*
3599 	 * The outgoing buffer is readily referred from the dma buffer,
3600 	 * just need to get header part from mailboxq structure.
3601 	 */
3602 
3603 	pmb = (uint8_t *)&pmboxq->u.mb;
3604 	pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
3605 	/* Copy the byte swapped response mailbox back to the user */
3606 	memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
3607 	/* if there is any non-embedded extended data copy that too */
3608 	dmabuf = phba->mbox_ext_buf_ctx.mbx_dmabuf;
3609 	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3610 	if (!bsg_bf_get(lpfc_mbox_hdr_emb,
3611 	    &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
3612 		pmbx = (uint8_t *)dmabuf->virt;
3613 		/* byte swap the extended data following the mailbox command */
3614 		lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
3615 			&pmbx[sizeof(MAILBOX_t)],
3616 			sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len);
3617 	}
3618 
3619 	/* Complete the job if the job is still active */
3620 
3621 	if (job) {
3622 		size = job->reply_payload.payload_len;
3623 		bsg_reply->reply_payload_rcv_len =
3624 			sg_copy_from_buffer(job->reply_payload.sg_list,
3625 					    job->reply_payload.sg_cnt,
3626 					    pmb_buf, size);
3627 
3628 		/* result for successful */
3629 		bsg_reply->result = 0;
3630 
3631 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3632 				"2937 SLI_CONFIG ext-buffer mailbox command "
3633 				"(x%x/x%x) complete bsg job done, bsize:%d\n",
3634 				phba->mbox_ext_buf_ctx.nembType,
3635 				phba->mbox_ext_buf_ctx.mboxType, size);
3636 		lpfc_idiag_mbxacc_dump_bsg_mbox(phba,
3637 					phba->mbox_ext_buf_ctx.nembType,
3638 					phba->mbox_ext_buf_ctx.mboxType,
3639 					dma_ebuf, sta_pos_addr,
3640 					phba->mbox_ext_buf_ctx.mbx_dmabuf, 0);
3641 	} else {
3642 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3643 				"2938 SLI_CONFIG ext-buffer mailbox "
3644 				"command (x%x/x%x) failure, rc:x%x\n",
3645 				phba->mbox_ext_buf_ctx.nembType,
3646 				phba->mbox_ext_buf_ctx.mboxType, rc);
3647 	}
3648 
3649 
3650 	/* state change */
3651 	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE;
3652 	kfree(dd_data);
3653 	return job;
3654 }
3655 
3656 /**
3657  * lpfc_bsg_issue_read_mbox_ext_cmpl - compl handler for multi-buffer read mbox
3658  * @phba: Pointer to HBA context object.
3659  * @pmboxq: Pointer to mailbox command.
3660  *
3661  * This is completion handler function for mailbox read commands with multiple
3662  * external buffers.
3663  **/
3664 static void
3665 lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3666 {
3667 	struct bsg_job *job;
3668 	struct fc_bsg_reply *bsg_reply;
3669 
3670 	job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3671 
3672 	/* handle the BSG job with mailbox command */
3673 	if (!job)
3674 		pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3675 
3676 	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3677 			"2939 SLI_CONFIG ext-buffer rd mailbox command "
3678 			"complete, ctxState:x%x, mbxStatus:x%x\n",
3679 			phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3680 
3681 	if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1)
3682 		lpfc_bsg_mbox_ext_session_reset(phba);
3683 
3684 	/* free base driver mailbox structure memory */
3685 	mempool_free(pmboxq, phba->mbox_mem_pool);
3686 
3687 	/* if the job is still active, call job done */
3688 	if (job) {
3689 		bsg_reply = job->reply;
3690 		bsg_job_done(job, bsg_reply->result,
3691 			       bsg_reply->reply_payload_rcv_len);
3692 	}
3693 	return;
3694 }
3695 
3696 /**
3697  * lpfc_bsg_issue_write_mbox_ext_cmpl - cmpl handler for multi-buffer write mbox
3698  * @phba: Pointer to HBA context object.
3699  * @pmboxq: Pointer to mailbox command.
3700  *
3701  * This is completion handler function for mailbox write commands with multiple
3702  * external buffers.
3703  **/
3704 static void
3705 lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3706 {
3707 	struct bsg_job *job;
3708 	struct fc_bsg_reply *bsg_reply;
3709 
3710 	job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3711 
3712 	/* handle the BSG job with the mailbox command */
3713 	if (!job)
3714 		pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3715 
3716 	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3717 			"2940 SLI_CONFIG ext-buffer wr mailbox command "
3718 			"complete, ctxState:x%x, mbxStatus:x%x\n",
3719 			phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3720 
3721 	/* free all memory, including dma buffers */
3722 	mempool_free(pmboxq, phba->mbox_mem_pool);
3723 	lpfc_bsg_mbox_ext_session_reset(phba);
3724 
3725 	/* if the job is still active, call job done */
3726 	if (job) {
3727 		bsg_reply = job->reply;
3728 		bsg_job_done(job, bsg_reply->result,
3729 			       bsg_reply->reply_payload_rcv_len);
3730 	}
3731 
3732 	return;
3733 }
3734 
3735 static void
3736 lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
3737 				uint32_t index, struct lpfc_dmabuf *mbx_dmabuf,
3738 				struct lpfc_dmabuf *ext_dmabuf)
3739 {
3740 	struct lpfc_sli_config_mbox *sli_cfg_mbx;
3741 
3742 	/* pointer to the start of mailbox command */
3743 	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)mbx_dmabuf->virt;
3744 
3745 	if (nemb_tp == nemb_mse) {
3746 		if (index == 0) {
3747 			sli_cfg_mbx->un.sli_config_emb0_subsys.
3748 				mse[index].pa_hi =
3749 				putPaddrHigh(mbx_dmabuf->phys +
3750 					     sizeof(MAILBOX_t));
3751 			sli_cfg_mbx->un.sli_config_emb0_subsys.
3752 				mse[index].pa_lo =
3753 				putPaddrLow(mbx_dmabuf->phys +
3754 					    sizeof(MAILBOX_t));
3755 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3756 					"2943 SLI_CONFIG(mse)[%d], "
3757 					"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3758 					index,
3759 					sli_cfg_mbx->un.sli_config_emb0_subsys.
3760 					mse[index].buf_len,
3761 					sli_cfg_mbx->un.sli_config_emb0_subsys.
3762 					mse[index].pa_hi,
3763 					sli_cfg_mbx->un.sli_config_emb0_subsys.
3764 					mse[index].pa_lo);
3765 		} else {
3766 			sli_cfg_mbx->un.sli_config_emb0_subsys.
3767 				mse[index].pa_hi =
3768 				putPaddrHigh(ext_dmabuf->phys);
3769 			sli_cfg_mbx->un.sli_config_emb0_subsys.
3770 				mse[index].pa_lo =
3771 				putPaddrLow(ext_dmabuf->phys);
3772 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3773 					"2944 SLI_CONFIG(mse)[%d], "
3774 					"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3775 					index,
3776 					sli_cfg_mbx->un.sli_config_emb0_subsys.
3777 					mse[index].buf_len,
3778 					sli_cfg_mbx->un.sli_config_emb0_subsys.
3779 					mse[index].pa_hi,
3780 					sli_cfg_mbx->un.sli_config_emb0_subsys.
3781 					mse[index].pa_lo);
3782 		}
3783 	} else {
3784 		if (index == 0) {
3785 			sli_cfg_mbx->un.sli_config_emb1_subsys.
3786 				hbd[index].pa_hi =
3787 				putPaddrHigh(mbx_dmabuf->phys +
3788 					     sizeof(MAILBOX_t));
3789 			sli_cfg_mbx->un.sli_config_emb1_subsys.
3790 				hbd[index].pa_lo =
3791 				putPaddrLow(mbx_dmabuf->phys +
3792 					    sizeof(MAILBOX_t));
3793 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3794 					"3007 SLI_CONFIG(hbd)[%d], "
3795 					"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3796 				index,
3797 				bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3798 				&sli_cfg_mbx->un.
3799 				sli_config_emb1_subsys.hbd[index]),
3800 				sli_cfg_mbx->un.sli_config_emb1_subsys.
3801 				hbd[index].pa_hi,
3802 				sli_cfg_mbx->un.sli_config_emb1_subsys.
3803 				hbd[index].pa_lo);
3804 
3805 		} else {
3806 			sli_cfg_mbx->un.sli_config_emb1_subsys.
3807 				hbd[index].pa_hi =
3808 				putPaddrHigh(ext_dmabuf->phys);
3809 			sli_cfg_mbx->un.sli_config_emb1_subsys.
3810 				hbd[index].pa_lo =
3811 				putPaddrLow(ext_dmabuf->phys);
3812 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3813 					"3008 SLI_CONFIG(hbd)[%d], "
3814 					"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3815 				index,
3816 				bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3817 				&sli_cfg_mbx->un.
3818 				sli_config_emb1_subsys.hbd[index]),
3819 				sli_cfg_mbx->un.sli_config_emb1_subsys.
3820 				hbd[index].pa_hi,
3821 				sli_cfg_mbx->un.sli_config_emb1_subsys.
3822 				hbd[index].pa_lo);
3823 		}
3824 	}
3825 	return;
3826 }
3827 
3828 /**
3829  * lpfc_bsg_sli_cfg_read_cmd_ext - sli_config non-embedded mailbox cmd read
3830  * @phba: Pointer to HBA context object.
3831  * @job: Pointer to the job object.
3832  * @nemb_tp: Enumerate of non-embedded mailbox command type.
3833  * @dmabuf: Pointer to a DMA buffer descriptor.
3834  *
3835  * This routine performs SLI_CONFIG (0x9B) read mailbox command operation with
3836  * non-embedded external buffers.
3837  **/
3838 static int
3839 lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
3840 			      enum nemb_type nemb_tp,
3841 			      struct lpfc_dmabuf *dmabuf)
3842 {
3843 	struct fc_bsg_request *bsg_request = job->request;
3844 	struct lpfc_sli_config_mbox *sli_cfg_mbx;
3845 	struct dfc_mbox_req *mbox_req;
3846 	struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
3847 	uint32_t ext_buf_cnt, ext_buf_index;
3848 	struct lpfc_dmabuf *ext_dmabuf = NULL;
3849 	struct bsg_job_data *dd_data = NULL;
3850 	LPFC_MBOXQ_t *pmboxq = NULL;
3851 	MAILBOX_t *pmb;
3852 	uint8_t *pmbx;
3853 	int rc, i;
3854 
3855 	mbox_req =
3856 	   (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
3857 
3858 	/* pointer to the start of mailbox command */
3859 	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3860 
3861 	if (nemb_tp == nemb_mse) {
3862 		ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
3863 			&sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
3864 		if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
3865 			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3866 					"2945 Handled SLI_CONFIG(mse) rd, "
3867 					"ext_buf_cnt(%d) out of range(%d)\n",
3868 					ext_buf_cnt,
3869 					LPFC_MBX_SLI_CONFIG_MAX_MSE);
3870 			rc = -ERANGE;
3871 			goto job_error;
3872 		}
3873 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3874 				"2941 Handled SLI_CONFIG(mse) rd, "
3875 				"ext_buf_cnt:%d\n", ext_buf_cnt);
3876 	} else {
3877 		/* sanity check on interface type for support */
3878 		if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
3879 		    LPFC_SLI_INTF_IF_TYPE_2) {
3880 			rc = -ENODEV;
3881 			goto job_error;
3882 		}
3883 		/* nemb_tp == nemb_hbd */
3884 		ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
3885 		if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
3886 			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3887 					"2946 Handled SLI_CONFIG(hbd) rd, "
3888 					"ext_buf_cnt(%d) out of range(%d)\n",
3889 					ext_buf_cnt,
3890 					LPFC_MBX_SLI_CONFIG_MAX_HBD);
3891 			rc = -ERANGE;
3892 			goto job_error;
3893 		}
3894 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3895 				"2942 Handled SLI_CONFIG(hbd) rd, "
3896 				"ext_buf_cnt:%d\n", ext_buf_cnt);
3897 	}
3898 
3899 	/* before dma descriptor setup */
3900 	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
3901 					sta_pre_addr, dmabuf, ext_buf_cnt);
3902 
3903 	/* reject non-embedded mailbox command with none external buffer */
3904 	if (ext_buf_cnt == 0) {
3905 		rc = -EPERM;
3906 		goto job_error;
3907 	} else if (ext_buf_cnt > 1) {
3908 		/* additional external read buffers */
3909 		for (i = 1; i < ext_buf_cnt; i++) {
3910 			ext_dmabuf = lpfc_bsg_dma_page_alloc(phba);
3911 			if (!ext_dmabuf) {
3912 				rc = -ENOMEM;
3913 				goto job_error;
3914 			}
3915 			list_add_tail(&ext_dmabuf->list,
3916 				      &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3917 		}
3918 	}
3919 
3920 	/* bsg tracking structure */
3921 	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
3922 	if (!dd_data) {
3923 		rc = -ENOMEM;
3924 		goto job_error;
3925 	}
3926 
3927 	/* mailbox command structure for base driver */
3928 	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3929 	if (!pmboxq) {
3930 		rc = -ENOMEM;
3931 		goto job_error;
3932 	}
3933 	memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3934 
3935 	/* for the first external buffer */
3936 	lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
3937 
3938 	/* for the rest of external buffer descriptors if any */
3939 	if (ext_buf_cnt > 1) {
3940 		ext_buf_index = 1;
3941 		list_for_each_entry_safe(curr_dmabuf, next_dmabuf,
3942 				&phba->mbox_ext_buf_ctx.ext_dmabuf_list, list) {
3943 			lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp,
3944 						ext_buf_index, dmabuf,
3945 						curr_dmabuf);
3946 			ext_buf_index++;
3947 		}
3948 	}
3949 
3950 	/* after dma descriptor setup */
3951 	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
3952 					sta_pos_addr, dmabuf, ext_buf_cnt);
3953 
3954 	/* construct base driver mbox command */
3955 	pmb = &pmboxq->u.mb;
3956 	pmbx = (uint8_t *)dmabuf->virt;
3957 	memcpy(pmb, pmbx, sizeof(*pmb));
3958 	pmb->mbxOwner = OWN_HOST;
3959 	pmboxq->vport = phba->pport;
3960 
3961 	/* multi-buffer handling context */
3962 	phba->mbox_ext_buf_ctx.nembType = nemb_tp;
3963 	phba->mbox_ext_buf_ctx.mboxType = mbox_rd;
3964 	phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
3965 	phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
3966 	phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
3967 	phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
3968 
3969 	/* callback for multi-buffer read mailbox command */
3970 	pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl;
3971 
3972 	/* context fields to callback function */
3973 	pmboxq->ctx_buf = dd_data;
3974 	dd_data->type = TYPE_MBOX;
3975 	dd_data->set_job = job;
3976 	dd_data->context_un.mbox.pmboxq = pmboxq;
3977 	dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
3978 	job->dd_data = dd_data;
3979 
3980 	/* state change */
3981 	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
3982 
3983 	/*
3984 	 * Non-embedded mailbox subcommand data gets byte swapped here because
3985 	 * the lower level driver code only does the first 64 mailbox words.
3986 	 */
3987 	if ((!bsg_bf_get(lpfc_mbox_hdr_emb,
3988 	    &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) &&
3989 		(nemb_tp == nemb_mse))
3990 		lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
3991 			&pmbx[sizeof(MAILBOX_t)],
3992 				sli_cfg_mbx->un.sli_config_emb0_subsys.
3993 					mse[0].buf_len);
3994 
3995 	rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3996 	if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
3997 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3998 				"2947 Issued SLI_CONFIG ext-buffer "
3999 				"mailbox command, rc:x%x\n", rc);
4000 		return SLI_CONFIG_HANDLED;
4001 	}
4002 	lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4003 			"2948 Failed to issue SLI_CONFIG ext-buffer "
4004 			"mailbox command, rc:x%x\n", rc);
4005 	rc = -EPIPE;
4006 
4007 job_error:
4008 	if (pmboxq)
4009 		mempool_free(pmboxq, phba->mbox_mem_pool);
4010 	lpfc_bsg_dma_page_list_free(phba,
4011 				    &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
4012 	kfree(dd_data);
4013 	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
4014 	return rc;
4015 }
4016 
4017 /**
4018  * lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write
4019  * @phba: Pointer to HBA context object.
4020  * @job: Pointer to the job object.
4021  * @nemb_tp: Enumerate of non-embedded mailbox command type.
4022  * @dmabuf: Pointer to a DMA buffer descriptor.
4023  *
4024  * This routine performs SLI_CONFIG (0x9B) write mailbox command operation with
4025  * non-embedded external buffers.
4026  **/
4027 static int
4028 lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
4029 			       enum nemb_type nemb_tp,
4030 			       struct lpfc_dmabuf *dmabuf)
4031 {
4032 	struct fc_bsg_request *bsg_request = job->request;
4033 	struct fc_bsg_reply *bsg_reply = job->reply;
4034 	struct dfc_mbox_req *mbox_req;
4035 	struct lpfc_sli_config_mbox *sli_cfg_mbx;
4036 	uint32_t ext_buf_cnt;
4037 	struct bsg_job_data *dd_data = NULL;
4038 	LPFC_MBOXQ_t *pmboxq = NULL;
4039 	MAILBOX_t *pmb;
4040 	uint8_t *mbx;
4041 	int rc = SLI_CONFIG_NOT_HANDLED, i;
4042 
4043 	mbox_req =
4044 	   (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
4045 
4046 	/* pointer to the start of mailbox command */
4047 	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
4048 
4049 	if (nemb_tp == nemb_mse) {
4050 		ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
4051 			&sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
4052 		if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
4053 			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4054 					"2953 Failed SLI_CONFIG(mse) wr, "
4055 					"ext_buf_cnt(%d) out of range(%d)\n",
4056 					ext_buf_cnt,
4057 					LPFC_MBX_SLI_CONFIG_MAX_MSE);
4058 			return -ERANGE;
4059 		}
4060 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4061 				"2949 Handled SLI_CONFIG(mse) wr, "
4062 				"ext_buf_cnt:%d\n", ext_buf_cnt);
4063 	} else {
4064 		/* sanity check on interface type for support */
4065 		if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
4066 		    LPFC_SLI_INTF_IF_TYPE_2)
4067 			return -ENODEV;
4068 		/* nemb_tp == nemb_hbd */
4069 		ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
4070 		if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
4071 			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4072 					"2954 Failed SLI_CONFIG(hbd) wr, "
4073 					"ext_buf_cnt(%d) out of range(%d)\n",
4074 					ext_buf_cnt,
4075 					LPFC_MBX_SLI_CONFIG_MAX_HBD);
4076 			return -ERANGE;
4077 		}
4078 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4079 				"2950 Handled SLI_CONFIG(hbd) wr, "
4080 				"ext_buf_cnt:%d\n", ext_buf_cnt);
4081 	}
4082 
4083 	/* before dma buffer descriptor setup */
4084 	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
4085 					sta_pre_addr, dmabuf, ext_buf_cnt);
4086 
4087 	if (ext_buf_cnt == 0)
4088 		return -EPERM;
4089 
4090 	/* for the first external buffer */
4091 	lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
4092 
4093 	/* after dma descriptor setup */
4094 	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
4095 					sta_pos_addr, dmabuf, ext_buf_cnt);
4096 
4097 	/* log for looking forward */
4098 	for (i = 1; i < ext_buf_cnt; i++) {
4099 		if (nemb_tp == nemb_mse)
4100 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4101 				"2951 SLI_CONFIG(mse), buf[%d]-length:%d\n",
4102 				i, sli_cfg_mbx->un.sli_config_emb0_subsys.
4103 				mse[i].buf_len);
4104 		else
4105 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4106 				"2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n",
4107 				i, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
4108 				&sli_cfg_mbx->un.sli_config_emb1_subsys.
4109 				hbd[i]));
4110 	}
4111 
4112 	/* multi-buffer handling context */
4113 	phba->mbox_ext_buf_ctx.nembType = nemb_tp;
4114 	phba->mbox_ext_buf_ctx.mboxType = mbox_wr;
4115 	phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
4116 	phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
4117 	phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
4118 	phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
4119 
4120 	if (ext_buf_cnt == 1) {
4121 		/* bsg tracking structure */
4122 		dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4123 		if (!dd_data) {
4124 			rc = -ENOMEM;
4125 			goto job_error;
4126 		}
4127 
4128 		/* mailbox command structure for base driver */
4129 		pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4130 		if (!pmboxq) {
4131 			rc = -ENOMEM;
4132 			goto job_error;
4133 		}
4134 		memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
4135 		pmb = &pmboxq->u.mb;
4136 		mbx = (uint8_t *)dmabuf->virt;
4137 		memcpy(pmb, mbx, sizeof(*pmb));
4138 		pmb->mbxOwner = OWN_HOST;
4139 		pmboxq->vport = phba->pport;
4140 
4141 		/* callback for multi-buffer read mailbox command */
4142 		pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
4143 
4144 		/* context fields to callback function */
4145 		pmboxq->ctx_buf = dd_data;
4146 		dd_data->type = TYPE_MBOX;
4147 		dd_data->set_job = job;
4148 		dd_data->context_un.mbox.pmboxq = pmboxq;
4149 		dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx;
4150 		job->dd_data = dd_data;
4151 
4152 		/* state change */
4153 
4154 		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
4155 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4156 		if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
4157 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4158 					"2955 Issued SLI_CONFIG ext-buffer "
4159 					"mailbox command, rc:x%x\n", rc);
4160 			return SLI_CONFIG_HANDLED;
4161 		}
4162 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4163 				"2956 Failed to issue SLI_CONFIG ext-buffer "
4164 				"mailbox command, rc:x%x\n", rc);
4165 		rc = -EPIPE;
4166 		goto job_error;
4167 	}
4168 
4169 	/* wait for additional external buffers */
4170 
4171 	bsg_reply->result = 0;
4172 	bsg_job_done(job, bsg_reply->result,
4173 		       bsg_reply->reply_payload_rcv_len);
4174 	return SLI_CONFIG_HANDLED;
4175 
4176 job_error:
4177 	if (pmboxq)
4178 		mempool_free(pmboxq, phba->mbox_mem_pool);
4179 	kfree(dd_data);
4180 
4181 	return rc;
4182 }
4183 
4184 /**
4185  * lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer
4186  * @phba: Pointer to HBA context object.
4187  * @job: Pointer to the job object.
4188  * @dmabuf: Pointer to a DMA buffer descriptor.
4189  *
4190  * This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded
4191  * external buffers, including both 0x9B with non-embedded MSEs and 0x9B
4192  * with embedded subsystem 0x1 and opcodes with external HBDs.
4193  **/
4194 static int
4195 lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct bsg_job *job,
4196 			     struct lpfc_dmabuf *dmabuf)
4197 {
4198 	struct lpfc_sli_config_mbox *sli_cfg_mbx;
4199 	uint32_t subsys;
4200 	uint32_t opcode;
4201 	int rc = SLI_CONFIG_NOT_HANDLED;
4202 
4203 	/* state change on new multi-buffer pass-through mailbox command */
4204 	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST;
4205 
4206 	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
4207 
4208 	if (!bsg_bf_get(lpfc_mbox_hdr_emb,
4209 	    &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
4210 		subsys = bsg_bf_get(lpfc_emb0_subcmnd_subsys,
4211 				    &sli_cfg_mbx->un.sli_config_emb0_subsys);
4212 		opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode,
4213 				    &sli_cfg_mbx->un.sli_config_emb0_subsys);
4214 		if (subsys == SLI_CONFIG_SUBSYS_FCOE) {
4215 			switch (opcode) {
4216 			case FCOE_OPCODE_READ_FCF:
4217 			case FCOE_OPCODE_GET_DPORT_RESULTS:
4218 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4219 						"2957 Handled SLI_CONFIG "
4220 						"subsys_fcoe, opcode:x%x\n",
4221 						opcode);
4222 				rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
4223 							nemb_mse, dmabuf);
4224 				break;
4225 			case FCOE_OPCODE_ADD_FCF:
4226 			case FCOE_OPCODE_SET_DPORT_MODE:
4227 			case LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE:
4228 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4229 						"2958 Handled SLI_CONFIG "
4230 						"subsys_fcoe, opcode:x%x\n",
4231 						opcode);
4232 				rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
4233 							nemb_mse, dmabuf);
4234 				break;
4235 			default:
4236 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4237 						"2959 Reject SLI_CONFIG "
4238 						"subsys_fcoe, opcode:x%x\n",
4239 						opcode);
4240 				rc = -EPERM;
4241 				break;
4242 			}
4243 		} else if (subsys == SLI_CONFIG_SUBSYS_COMN) {
4244 			switch (opcode) {
4245 			case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES:
4246 			case COMN_OPCODE_GET_CNTL_ATTRIBUTES:
4247 			case COMN_OPCODE_GET_PROFILE_CONFIG:
4248 			case COMN_OPCODE_SET_FEATURES:
4249 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4250 						"3106 Handled SLI_CONFIG "
4251 						"subsys_comn, opcode:x%x\n",
4252 						opcode);
4253 				rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
4254 							nemb_mse, dmabuf);
4255 				break;
4256 			default:
4257 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4258 						"3107 Reject SLI_CONFIG "
4259 						"subsys_comn, opcode:x%x\n",
4260 						opcode);
4261 				rc = -EPERM;
4262 				break;
4263 			}
4264 		} else {
4265 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4266 					"2977 Reject SLI_CONFIG "
4267 					"subsys:x%d, opcode:x%x\n",
4268 					subsys, opcode);
4269 			rc = -EPERM;
4270 		}
4271 	} else {
4272 		subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys,
4273 				    &sli_cfg_mbx->un.sli_config_emb1_subsys);
4274 		opcode = bsg_bf_get(lpfc_emb1_subcmnd_opcode,
4275 				    &sli_cfg_mbx->un.sli_config_emb1_subsys);
4276 		if (subsys == SLI_CONFIG_SUBSYS_COMN) {
4277 			switch (opcode) {
4278 			case COMN_OPCODE_READ_OBJECT:
4279 			case COMN_OPCODE_READ_OBJECT_LIST:
4280 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4281 						"2960 Handled SLI_CONFIG "
4282 						"subsys_comn, opcode:x%x\n",
4283 						opcode);
4284 				rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
4285 							nemb_hbd, dmabuf);
4286 				break;
4287 			case COMN_OPCODE_WRITE_OBJECT:
4288 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4289 						"2961 Handled SLI_CONFIG "
4290 						"subsys_comn, opcode:x%x\n",
4291 						opcode);
4292 				rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
4293 							nemb_hbd, dmabuf);
4294 				break;
4295 			default:
4296 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4297 						"2962 Not handled SLI_CONFIG "
4298 						"subsys_comn, opcode:x%x\n",
4299 						opcode);
4300 				rc = SLI_CONFIG_NOT_HANDLED;
4301 				break;
4302 			}
4303 		} else {
4304 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4305 					"2978 Not handled SLI_CONFIG "
4306 					"subsys:x%d, opcode:x%x\n",
4307 					subsys, opcode);
4308 			rc = SLI_CONFIG_NOT_HANDLED;
4309 		}
4310 	}
4311 
4312 	/* state reset on not handled new multi-buffer mailbox command */
4313 	if (rc != SLI_CONFIG_HANDLED)
4314 		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
4315 
4316 	return rc;
4317 }
4318 
4319 /**
4320  * lpfc_bsg_mbox_ext_abort - request to abort mbox command with ext buffers
4321  * @phba: Pointer to HBA context object.
4322  *
4323  * This routine is for requesting to abort a pass-through mailbox command with
4324  * multiple external buffers due to error condition.
4325  **/
4326 static void
4327 lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba)
4328 {
4329 	if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
4330 		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
4331 	else
4332 		lpfc_bsg_mbox_ext_session_reset(phba);
4333 	return;
4334 }
4335 
4336 /**
4337  * lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer
4338  * @phba: Pointer to HBA context object.
4339  * @job: Pointer to the job object.
4340  *
4341  * This routine extracts the next mailbox read external buffer back to
4342  * user space through BSG.
4343  **/
4344 static int
4345 lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct bsg_job *job)
4346 {
4347 	struct fc_bsg_reply *bsg_reply = job->reply;
4348 	struct lpfc_sli_config_mbox *sli_cfg_mbx;
4349 	struct lpfc_dmabuf *dmabuf;
4350 	uint8_t *pbuf;
4351 	uint32_t size;
4352 	uint32_t index;
4353 
4354 	index = phba->mbox_ext_buf_ctx.seqNum;
4355 	phba->mbox_ext_buf_ctx.seqNum++;
4356 
4357 	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
4358 			phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
4359 
4360 	if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
4361 		size = bsg_bf_get(lpfc_mbox_sli_config_mse_len,
4362 			&sli_cfg_mbx->un.sli_config_emb0_subsys.mse[index]);
4363 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4364 				"2963 SLI_CONFIG (mse) ext-buffer rd get "
4365 				"buffer[%d], size:%d\n", index, size);
4366 	} else {
4367 		size = bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
4368 			&sli_cfg_mbx->un.sli_config_emb1_subsys.hbd[index]);
4369 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4370 				"2964 SLI_CONFIG (hbd) ext-buffer rd get "
4371 				"buffer[%d], size:%d\n", index, size);
4372 	}
4373 	if (list_empty(&phba->mbox_ext_buf_ctx.ext_dmabuf_list))
4374 		return -EPIPE;
4375 	dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list,
4376 				  struct lpfc_dmabuf, list);
4377 	list_del_init(&dmabuf->list);
4378 
4379 	/* after dma buffer descriptor setup */
4380 	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
4381 					mbox_rd, dma_ebuf, sta_pos_addr,
4382 					dmabuf, index);
4383 
4384 	pbuf = (uint8_t *)dmabuf->virt;
4385 	bsg_reply->reply_payload_rcv_len =
4386 		sg_copy_from_buffer(job->reply_payload.sg_list,
4387 				    job->reply_payload.sg_cnt,
4388 				    pbuf, size);
4389 
4390 	lpfc_bsg_dma_page_free(phba, dmabuf);
4391 
4392 	if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
4393 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4394 				"2965 SLI_CONFIG (hbd) ext-buffer rd mbox "
4395 				"command session done\n");
4396 		lpfc_bsg_mbox_ext_session_reset(phba);
4397 	}
4398 
4399 	bsg_reply->result = 0;
4400 	bsg_job_done(job, bsg_reply->result,
4401 		       bsg_reply->reply_payload_rcv_len);
4402 
4403 	return SLI_CONFIG_HANDLED;
4404 }
4405 
4406 /**
4407  * lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer
4408  * @phba: Pointer to HBA context object.
4409  * @job: Pointer to the job object.
4410  * @dmabuf: Pointer to a DMA buffer descriptor.
4411  *
4412  * This routine sets up the next mailbox read external buffer obtained
4413  * from user space through BSG.
4414  **/
4415 static int
4416 lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
4417 			struct lpfc_dmabuf *dmabuf)
4418 {
4419 	struct fc_bsg_reply *bsg_reply = job->reply;
4420 	struct bsg_job_data *dd_data = NULL;
4421 	LPFC_MBOXQ_t *pmboxq = NULL;
4422 	MAILBOX_t *pmb;
4423 	enum nemb_type nemb_tp;
4424 	uint8_t *pbuf;
4425 	uint32_t size;
4426 	uint32_t index;
4427 	int rc;
4428 
4429 	index = phba->mbox_ext_buf_ctx.seqNum;
4430 	phba->mbox_ext_buf_ctx.seqNum++;
4431 	nemb_tp = phba->mbox_ext_buf_ctx.nembType;
4432 
4433 	pbuf = (uint8_t *)dmabuf->virt;
4434 	size = job->request_payload.payload_len;
4435 	sg_copy_to_buffer(job->request_payload.sg_list,
4436 			  job->request_payload.sg_cnt,
4437 			  pbuf, size);
4438 
4439 	if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
4440 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4441 				"2966 SLI_CONFIG (mse) ext-buffer wr set "
4442 				"buffer[%d], size:%d\n",
4443 				phba->mbox_ext_buf_ctx.seqNum, size);
4444 
4445 	} else {
4446 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4447 				"2967 SLI_CONFIG (hbd) ext-buffer wr set "
4448 				"buffer[%d], size:%d\n",
4449 				phba->mbox_ext_buf_ctx.seqNum, size);
4450 
4451 	}
4452 
4453 	/* set up external buffer descriptor and add to external buffer list */
4454 	lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, index,
4455 					phba->mbox_ext_buf_ctx.mbx_dmabuf,
4456 					dmabuf);
4457 	list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
4458 
4459 	/* after write dma buffer */
4460 	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
4461 					mbox_wr, dma_ebuf, sta_pos_addr,
4462 					dmabuf, index);
4463 
4464 	if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
4465 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4466 				"2968 SLI_CONFIG ext-buffer wr all %d "
4467 				"ebuffers received\n",
4468 				phba->mbox_ext_buf_ctx.numBuf);
4469 
4470 		dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4471 		if (!dd_data) {
4472 			rc = -ENOMEM;
4473 			goto job_error;
4474 		}
4475 
4476 		/* mailbox command structure for base driver */
4477 		pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4478 		if (!pmboxq) {
4479 			rc = -ENOMEM;
4480 			goto job_error;
4481 		}
4482 		memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
4483 		pbuf = (uint8_t *)phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
4484 		pmb = &pmboxq->u.mb;
4485 		memcpy(pmb, pbuf, sizeof(*pmb));
4486 		pmb->mbxOwner = OWN_HOST;
4487 		pmboxq->vport = phba->pport;
4488 
4489 		/* callback for multi-buffer write mailbox command */
4490 		pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
4491 
4492 		/* context fields to callback function */
4493 		pmboxq->ctx_buf = dd_data;
4494 		dd_data->type = TYPE_MBOX;
4495 		dd_data->set_job = job;
4496 		dd_data->context_un.mbox.pmboxq = pmboxq;
4497 		dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf;
4498 		job->dd_data = dd_data;
4499 
4500 		/* state change */
4501 		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
4502 
4503 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4504 		if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
4505 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4506 					"2969 Issued SLI_CONFIG ext-buffer "
4507 					"mailbox command, rc:x%x\n", rc);
4508 			return SLI_CONFIG_HANDLED;
4509 		}
4510 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4511 				"2970 Failed to issue SLI_CONFIG ext-buffer "
4512 				"mailbox command, rc:x%x\n", rc);
4513 		rc = -EPIPE;
4514 		goto job_error;
4515 	}
4516 
4517 	/* wait for additional external buffers */
4518 	bsg_reply->result = 0;
4519 	bsg_job_done(job, bsg_reply->result,
4520 		       bsg_reply->reply_payload_rcv_len);
4521 	return SLI_CONFIG_HANDLED;
4522 
4523 job_error:
4524 	if (pmboxq)
4525 		mempool_free(pmboxq, phba->mbox_mem_pool);
4526 	lpfc_bsg_dma_page_free(phba, dmabuf);
4527 	kfree(dd_data);
4528 
4529 	return rc;
4530 }
4531 
4532 /**
4533  * lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd
4534  * @phba: Pointer to HBA context object.
4535  * @job: Pointer to the job object.
4536  * @dmabuf: Pointer to a DMA buffer descriptor.
4537  *
4538  * This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox
4539  * command with multiple non-embedded external buffers.
4540  **/
4541 static int
4542 lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct bsg_job *job,
4543 			     struct lpfc_dmabuf *dmabuf)
4544 {
4545 	int rc;
4546 
4547 	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4548 			"2971 SLI_CONFIG buffer (type:x%x)\n",
4549 			phba->mbox_ext_buf_ctx.mboxType);
4550 
4551 	if (phba->mbox_ext_buf_ctx.mboxType == mbox_rd) {
4552 		if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_DONE) {
4553 			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4554 					"2972 SLI_CONFIG rd buffer state "
4555 					"mismatch:x%x\n",
4556 					phba->mbox_ext_buf_ctx.state);
4557 			lpfc_bsg_mbox_ext_abort(phba);
4558 			return -EPIPE;
4559 		}
4560 		rc = lpfc_bsg_read_ebuf_get(phba, job);
4561 		if (rc == SLI_CONFIG_HANDLED)
4562 			lpfc_bsg_dma_page_free(phba, dmabuf);
4563 	} else { /* phba->mbox_ext_buf_ctx.mboxType == mbox_wr */
4564 		if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_HOST) {
4565 			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4566 					"2973 SLI_CONFIG wr buffer state "
4567 					"mismatch:x%x\n",
4568 					phba->mbox_ext_buf_ctx.state);
4569 			lpfc_bsg_mbox_ext_abort(phba);
4570 			return -EPIPE;
4571 		}
4572 		rc = lpfc_bsg_write_ebuf_set(phba, job, dmabuf);
4573 	}
4574 	return rc;
4575 }
4576 
4577 /**
4578  * lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer
4579  * @phba: Pointer to HBA context object.
4580  * @job: Pointer to the job object.
4581  * @dmabuf: Pointer to a DMA buffer descriptor.
4582  *
4583  * This routine checks and handles non-embedded multi-buffer SLI_CONFIG
4584  * (0x9B) mailbox commands and external buffers.
4585  **/
4586 static int
4587 lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct bsg_job *job,
4588 			    struct lpfc_dmabuf *dmabuf)
4589 {
4590 	struct fc_bsg_request *bsg_request = job->request;
4591 	struct dfc_mbox_req *mbox_req;
4592 	int rc = SLI_CONFIG_NOT_HANDLED;
4593 
4594 	mbox_req =
4595 	   (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
4596 
4597 	/* mbox command with/without single external buffer */
4598 	if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
4599 		return rc;
4600 
4601 	/* mbox command and first external buffer */
4602 	if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) {
4603 		if (mbox_req->extSeqNum == 1) {
4604 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4605 					"2974 SLI_CONFIG mailbox: tag:%d, "
4606 					"seq:%d\n", mbox_req->extMboxTag,
4607 					mbox_req->extSeqNum);
4608 			rc = lpfc_bsg_handle_sli_cfg_mbox(phba, job, dmabuf);
4609 			return rc;
4610 		} else
4611 			goto sli_cfg_ext_error;
4612 	}
4613 
4614 	/*
4615 	 * handle additional external buffers
4616 	 */
4617 
4618 	/* check broken pipe conditions */
4619 	if (mbox_req->extMboxTag != phba->mbox_ext_buf_ctx.mbxTag)
4620 		goto sli_cfg_ext_error;
4621 	if (mbox_req->extSeqNum > phba->mbox_ext_buf_ctx.numBuf)
4622 		goto sli_cfg_ext_error;
4623 	if (mbox_req->extSeqNum != phba->mbox_ext_buf_ctx.seqNum + 1)
4624 		goto sli_cfg_ext_error;
4625 
4626 	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4627 			"2975 SLI_CONFIG mailbox external buffer: "
4628 			"extSta:x%x, tag:%d, seq:%d\n",
4629 			phba->mbox_ext_buf_ctx.state, mbox_req->extMboxTag,
4630 			mbox_req->extSeqNum);
4631 	rc = lpfc_bsg_handle_sli_cfg_ebuf(phba, job, dmabuf);
4632 	return rc;
4633 
4634 sli_cfg_ext_error:
4635 	/* all other cases, broken pipe */
4636 	lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4637 			"2976 SLI_CONFIG mailbox broken pipe: "
4638 			"ctxSta:x%x, ctxNumBuf:%d "
4639 			"ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n",
4640 			phba->mbox_ext_buf_ctx.state,
4641 			phba->mbox_ext_buf_ctx.numBuf,
4642 			phba->mbox_ext_buf_ctx.mbxTag,
4643 			phba->mbox_ext_buf_ctx.seqNum,
4644 			mbox_req->extMboxTag, mbox_req->extSeqNum);
4645 
4646 	lpfc_bsg_mbox_ext_session_reset(phba);
4647 
4648 	return -EPIPE;
4649 }
4650 
4651 /**
4652  * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
4653  * @phba: Pointer to HBA context object.
4654  * @job: Pointer to the job object.
4655  * @vport: Pointer to a vport object.
4656  *
4657  * Allocate a tracking object, mailbox command memory, get a mailbox
4658  * from the mailbox pool, copy the caller mailbox command.
4659  *
4660  * If offline and the sli is active we need to poll for the command (port is
4661  * being reset) and complete the job, otherwise issue the mailbox command and
4662  * let our completion handler finish the command.
4663  **/
4664 static int
4665 lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job,
4666 	struct lpfc_vport *vport)
4667 {
4668 	struct fc_bsg_request *bsg_request = job->request;
4669 	struct fc_bsg_reply *bsg_reply = job->reply;
4670 	LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
4671 	MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
4672 	/* a 4k buffer to hold the mb and extended data from/to the bsg */
4673 	uint8_t *pmbx = NULL;
4674 	struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */
4675 	struct lpfc_dmabuf *dmabuf = NULL;
4676 	struct dfc_mbox_req *mbox_req;
4677 	struct READ_EVENT_LOG_VAR *rdEventLog;
4678 	uint32_t transmit_length, receive_length, mode;
4679 	struct lpfc_mbx_sli4_config *sli4_config;
4680 	struct lpfc_mbx_nembed_cmd *nembed_sge;
4681 	struct ulp_bde64 *bde;
4682 	uint8_t *ext = NULL;
4683 	int rc = 0;
4684 	uint8_t *from;
4685 	uint32_t size;
4686 
4687 	/* in case no data is transferred */
4688 	bsg_reply->reply_payload_rcv_len = 0;
4689 
4690 	/* sanity check to protect driver */
4691 	if (job->reply_payload.payload_len > BSG_MBOX_SIZE ||
4692 	    job->request_payload.payload_len > BSG_MBOX_SIZE) {
4693 		rc = -ERANGE;
4694 		goto job_done;
4695 	}
4696 
4697 	/*
4698 	 * Don't allow mailbox commands to be sent when blocked or when in
4699 	 * the middle of discovery
4700 	 */
4701 	if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
4702 		rc = -EAGAIN;
4703 		goto job_done;
4704 	}
4705 
4706 	mbox_req =
4707 	    (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
4708 
4709 	/* check if requested extended data lengths are valid */
4710 	if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) ||
4711 	    (mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) {
4712 		rc = -ERANGE;
4713 		goto job_done;
4714 	}
4715 
4716 	dmabuf = lpfc_bsg_dma_page_alloc(phba);
4717 	if (!dmabuf || !dmabuf->virt) {
4718 		rc = -ENOMEM;
4719 		goto job_done;
4720 	}
4721 
4722 	/* Get the mailbox command or external buffer from BSG */
4723 	pmbx = (uint8_t *)dmabuf->virt;
4724 	size = job->request_payload.payload_len;
4725 	sg_copy_to_buffer(job->request_payload.sg_list,
4726 			  job->request_payload.sg_cnt, pmbx, size);
4727 
4728 	/* Handle possible SLI_CONFIG with non-embedded payloads */
4729 	if (phba->sli_rev == LPFC_SLI_REV4) {
4730 		rc = lpfc_bsg_handle_sli_cfg_ext(phba, job, dmabuf);
4731 		if (rc == SLI_CONFIG_HANDLED)
4732 			goto job_cont;
4733 		if (rc)
4734 			goto job_done;
4735 		/* SLI_CONFIG_NOT_HANDLED for other mailbox commands */
4736 	}
4737 
4738 	rc = lpfc_bsg_check_cmd_access(phba, (MAILBOX_t *)pmbx, vport);
4739 	if (rc != 0)
4740 		goto job_done; /* must be negative */
4741 
4742 	/* allocate our bsg tracking structure */
4743 	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4744 	if (!dd_data) {
4745 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
4746 				"2727 Failed allocation of dd_data\n");
4747 		rc = -ENOMEM;
4748 		goto job_done;
4749 	}
4750 
4751 	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4752 	if (!pmboxq) {
4753 		rc = -ENOMEM;
4754 		goto job_done;
4755 	}
4756 	memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
4757 
4758 	pmb = &pmboxq->u.mb;
4759 	memcpy(pmb, pmbx, sizeof(*pmb));
4760 	pmb->mbxOwner = OWN_HOST;
4761 	pmboxq->vport = vport;
4762 
4763 	/* If HBA encountered an error attention, allow only DUMP
4764 	 * or RESTART mailbox commands until the HBA is restarted.
4765 	 */
4766 	if (phba->pport->stopped &&
4767 	    pmb->mbxCommand != MBX_DUMP_MEMORY &&
4768 	    pmb->mbxCommand != MBX_RESTART &&
4769 	    pmb->mbxCommand != MBX_WRITE_VPARMS &&
4770 	    pmb->mbxCommand != MBX_WRITE_WWN)
4771 		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
4772 				"2797 mbox: Issued mailbox cmd "
4773 				"0x%x while in stopped state.\n",
4774 				pmb->mbxCommand);
4775 
4776 	/* extended mailbox commands will need an extended buffer */
4777 	if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
4778 		from = pmbx;
4779 		ext = from + sizeof(MAILBOX_t);
4780 		pmboxq->ctx_buf = ext;
4781 		pmboxq->in_ext_byte_len =
4782 			mbox_req->inExtWLen * sizeof(uint32_t);
4783 		pmboxq->out_ext_byte_len =
4784 			mbox_req->outExtWLen * sizeof(uint32_t);
4785 		pmboxq->mbox_offset_word = mbox_req->mbOffset;
4786 	}
4787 
4788 	/* biu diag will need a kernel buffer to transfer the data
4789 	 * allocate our own buffer and setup the mailbox command to
4790 	 * use ours
4791 	 */
4792 	if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) {
4793 		transmit_length = pmb->un.varWords[1];
4794 		receive_length = pmb->un.varWords[4];
4795 		/* transmit length cannot be greater than receive length or
4796 		 * mailbox extension size
4797 		 */
4798 		if ((transmit_length > receive_length) ||
4799 			(transmit_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
4800 			rc = -ERANGE;
4801 			goto job_done;
4802 		}
4803 		pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh =
4804 			putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t));
4805 		pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow =
4806 			putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t));
4807 
4808 		pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh =
4809 			putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)
4810 			  + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
4811 		pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow =
4812 			putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)
4813 			  + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
4814 	} else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) {
4815 		rdEventLog = &pmb->un.varRdEventLog;
4816 		receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize;
4817 		mode = bf_get(lpfc_event_log, rdEventLog);
4818 
4819 		/* receive length cannot be greater than mailbox
4820 		 * extension size
4821 		 */
4822 		if (receive_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
4823 			rc = -ERANGE;
4824 			goto job_done;
4825 		}
4826 
4827 		/* mode zero uses a bde like biu diags command */
4828 		if (mode == 0) {
4829 			pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
4830 							+ sizeof(MAILBOX_t));
4831 			pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
4832 							+ sizeof(MAILBOX_t));
4833 		}
4834 	} else if (phba->sli_rev == LPFC_SLI_REV4) {
4835 		/* Let type 4 (well known data) through because the data is
4836 		 * returned in varwords[4-8]
4837 		 * otherwise check the recieve length and fetch the buffer addr
4838 		 */
4839 		if ((pmb->mbxCommand == MBX_DUMP_MEMORY) &&
4840 			(pmb->un.varDmp.type != DMP_WELL_KNOWN)) {
4841 			/* rebuild the command for sli4 using our own buffers
4842 			* like we do for biu diags
4843 			*/
4844 			receive_length = pmb->un.varWords[2];
4845 			/* receive length cannot be greater than mailbox
4846 			 * extension size
4847 			 */
4848 			if (receive_length == 0) {
4849 				rc = -ERANGE;
4850 				goto job_done;
4851 			}
4852 			pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
4853 						+ sizeof(MAILBOX_t));
4854 			pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
4855 						+ sizeof(MAILBOX_t));
4856 		} else if ((pmb->mbxCommand == MBX_UPDATE_CFG) &&
4857 			pmb->un.varUpdateCfg.co) {
4858 			bde = (struct ulp_bde64 *)&pmb->un.varWords[4];
4859 
4860 			/* bde size cannot be greater than mailbox ext size */
4861 			if (bde->tus.f.bdeSize >
4862 			    BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
4863 				rc = -ERANGE;
4864 				goto job_done;
4865 			}
4866 			bde->addrHigh = putPaddrHigh(dmabuf->phys
4867 						+ sizeof(MAILBOX_t));
4868 			bde->addrLow = putPaddrLow(dmabuf->phys
4869 						+ sizeof(MAILBOX_t));
4870 		} else if (pmb->mbxCommand == MBX_SLI4_CONFIG) {
4871 			/* Handling non-embedded SLI_CONFIG mailbox command */
4872 			sli4_config = &pmboxq->u.mqe.un.sli4_config;
4873 			if (!bf_get(lpfc_mbox_hdr_emb,
4874 			    &sli4_config->header.cfg_mhdr)) {
4875 				/* rebuild the command for sli4 using our
4876 				 * own buffers like we do for biu diags
4877 				 */
4878 				nembed_sge = (struct lpfc_mbx_nembed_cmd *)
4879 						&pmb->un.varWords[0];
4880 				receive_length = nembed_sge->sge[0].length;
4881 
4882 				/* receive length cannot be greater than
4883 				 * mailbox extension size
4884 				 */
4885 				if ((receive_length == 0) ||
4886 				    (receive_length >
4887 				     BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
4888 					rc = -ERANGE;
4889 					goto job_done;
4890 				}
4891 
4892 				nembed_sge->sge[0].pa_hi =
4893 						putPaddrHigh(dmabuf->phys
4894 						   + sizeof(MAILBOX_t));
4895 				nembed_sge->sge[0].pa_lo =
4896 						putPaddrLow(dmabuf->phys
4897 						   + sizeof(MAILBOX_t));
4898 			}
4899 		}
4900 	}
4901 
4902 	dd_data->context_un.mbox.dmabuffers = dmabuf;
4903 
4904 	/* setup wake call as IOCB callback */
4905 	pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl;
4906 
4907 	/* setup context field to pass wait_queue pointer to wake function */
4908 	pmboxq->ctx_ndlp = dd_data;
4909 	dd_data->type = TYPE_MBOX;
4910 	dd_data->set_job = job;
4911 	dd_data->context_un.mbox.pmboxq = pmboxq;
4912 	dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
4913 	dd_data->context_un.mbox.ext = ext;
4914 	dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
4915 	dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen;
4916 	dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen;
4917 	job->dd_data = dd_data;
4918 
4919 	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
4920 	    (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
4921 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
4922 		if (rc != MBX_SUCCESS) {
4923 			rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
4924 			goto job_done;
4925 		}
4926 
4927 		/* job finished, copy the data */
4928 		memcpy(pmbx, pmb, sizeof(*pmb));
4929 		bsg_reply->reply_payload_rcv_len =
4930 			sg_copy_from_buffer(job->reply_payload.sg_list,
4931 					    job->reply_payload.sg_cnt,
4932 					    pmbx, size);
4933 		/* not waiting mbox already done */
4934 		rc = 0;
4935 		goto job_done;
4936 	}
4937 
4938 	rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4939 	if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY))
4940 		return 1; /* job started */
4941 
4942 job_done:
4943 	/* common exit for error or job completed inline */
4944 	if (pmboxq)
4945 		mempool_free(pmboxq, phba->mbox_mem_pool);
4946 	lpfc_bsg_dma_page_free(phba, dmabuf);
4947 	kfree(dd_data);
4948 
4949 job_cont:
4950 	return rc;
4951 }
4952 
4953 /**
4954  * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command
4955  * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
4956  **/
4957 static int
4958 lpfc_bsg_mbox_cmd(struct bsg_job *job)
4959 {
4960 	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
4961 	struct fc_bsg_request *bsg_request = job->request;
4962 	struct fc_bsg_reply *bsg_reply = job->reply;
4963 	struct lpfc_hba *phba = vport->phba;
4964 	struct dfc_mbox_req *mbox_req;
4965 	int rc = 0;
4966 
4967 	/* mix-and-match backward compatibility */
4968 	bsg_reply->reply_payload_rcv_len = 0;
4969 	if (job->request_len <
4970 	    sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
4971 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4972 				"2737 Mix-and-match backward compatibility "
4973 				"between MBOX_REQ old size:%d and "
4974 				"new request size:%d\n",
4975 				(int)(job->request_len -
4976 				      sizeof(struct fc_bsg_request)),
4977 				(int)sizeof(struct dfc_mbox_req));
4978 		mbox_req = (struct dfc_mbox_req *)
4979 				bsg_request->rqst_data.h_vendor.vendor_cmd;
4980 		mbox_req->extMboxTag = 0;
4981 		mbox_req->extSeqNum = 0;
4982 	}
4983 
4984 	rc = lpfc_bsg_issue_mbox(phba, job, vport);
4985 
4986 	if (rc == 0) {
4987 		/* job done */
4988 		bsg_reply->result = 0;
4989 		job->dd_data = NULL;
4990 		bsg_job_done(job, bsg_reply->result,
4991 			       bsg_reply->reply_payload_rcv_len);
4992 	} else if (rc == 1)
4993 		/* job submitted, will complete later*/
4994 		rc = 0; /* return zero, no error */
4995 	else {
4996 		/* some error occurred */
4997 		bsg_reply->result = rc;
4998 		job->dd_data = NULL;
4999 	}
5000 
5001 	return rc;
5002 }
5003 
5004 /**
5005  * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler
5006  * @phba: Pointer to HBA context object.
5007  * @cmdiocbq: Pointer to command iocb.
5008  * @rspiocbq: Pointer to response iocb.
5009  *
5010  * This function is the completion handler for iocbs issued using
5011  * lpfc_menlo_cmd function. This function is called by the
5012  * ring event handler function without any lock held. This function
5013  * can be called from both worker thread context and interrupt
5014  * context. This function also can be called from another thread which
5015  * cleans up the SLI layer objects.
5016  * This function copies the contents of the response iocb to the
5017  * response iocb memory object provided by the caller of
5018  * lpfc_sli_issue_iocb_wait and then wakes up the thread which
5019  * sleeps for the iocb completion.
5020  **/
5021 static void
5022 lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
5023 			struct lpfc_iocbq *cmdiocbq,
5024 			struct lpfc_iocbq *rspiocbq)
5025 {
5026 	struct bsg_job_data *dd_data;
5027 	struct bsg_job *job;
5028 	struct fc_bsg_reply *bsg_reply;
5029 	IOCB_t *rsp;
5030 	struct lpfc_dmabuf *bmp, *cmp, *rmp;
5031 	struct lpfc_bsg_menlo *menlo;
5032 	unsigned long flags;
5033 	struct menlo_response *menlo_resp;
5034 	unsigned int rsp_size;
5035 	int rc = 0;
5036 
5037 	dd_data = cmdiocbq->context1;
5038 	cmp = cmdiocbq->context2;
5039 	bmp = cmdiocbq->context3;
5040 	menlo = &dd_data->context_un.menlo;
5041 	rmp = menlo->rmp;
5042 	rsp = &rspiocbq->iocb;
5043 
5044 	/* Determine if job has been aborted */
5045 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
5046 	job = dd_data->set_job;
5047 	if (job) {
5048 		bsg_reply = job->reply;
5049 		/* Prevent timeout handling from trying to abort job  */
5050 		job->dd_data = NULL;
5051 	}
5052 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5053 
5054 	/* Copy the job data or set the failing status for the job */
5055 
5056 	if (job) {
5057 		/* always return the xri, this would be used in the case
5058 		 * of a menlo download to allow the data to be sent as a
5059 		 * continuation of the exchange.
5060 		 */
5061 
5062 		menlo_resp = (struct menlo_response *)
5063 			bsg_reply->reply_data.vendor_reply.vendor_rsp;
5064 		menlo_resp->xri = rsp->ulpContext;
5065 		if (rsp->ulpStatus) {
5066 			if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
5067 				switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
5068 				case IOERR_SEQUENCE_TIMEOUT:
5069 					rc = -ETIMEDOUT;
5070 					break;
5071 				case IOERR_INVALID_RPI:
5072 					rc = -EFAULT;
5073 					break;
5074 				default:
5075 					rc = -EACCES;
5076 					break;
5077 				}
5078 			} else {
5079 				rc = -EACCES;
5080 			}
5081 		} else {
5082 			rsp_size = rsp->un.genreq64.bdl.bdeSize;
5083 			bsg_reply->reply_payload_rcv_len =
5084 				lpfc_bsg_copy_data(rmp, &job->reply_payload,
5085 						   rsp_size, 0);
5086 		}
5087 
5088 	}
5089 
5090 	lpfc_sli_release_iocbq(phba, cmdiocbq);
5091 	lpfc_free_bsg_buffers(phba, cmp);
5092 	lpfc_free_bsg_buffers(phba, rmp);
5093 	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
5094 	kfree(bmp);
5095 	kfree(dd_data);
5096 
5097 	/* Complete the job if active */
5098 
5099 	if (job) {
5100 		bsg_reply->result = rc;
5101 		bsg_job_done(job, bsg_reply->result,
5102 			       bsg_reply->reply_payload_rcv_len);
5103 	}
5104 
5105 	return;
5106 }
5107 
5108 /**
5109  * lpfc_menlo_cmd - send an ioctl for menlo hardware
5110  * @job: fc_bsg_job to handle
5111  *
5112  * This function issues a gen request 64 CR ioctl for all menlo cmd requests,
5113  * all the command completions will return the xri for the command.
5114  * For menlo data requests a gen request 64 CX is used to continue the exchange
5115  * supplied in the menlo request header xri field.
5116  **/
5117 static int
5118 lpfc_menlo_cmd(struct bsg_job *job)
5119 {
5120 	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
5121 	struct fc_bsg_request *bsg_request = job->request;
5122 	struct fc_bsg_reply *bsg_reply = job->reply;
5123 	struct lpfc_hba *phba = vport->phba;
5124 	struct lpfc_iocbq *cmdiocbq;
5125 	IOCB_t *cmd;
5126 	int rc = 0;
5127 	struct menlo_command *menlo_cmd;
5128 	struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
5129 	int request_nseg;
5130 	int reply_nseg;
5131 	struct bsg_job_data *dd_data;
5132 	struct ulp_bde64 *bpl = NULL;
5133 
5134 	/* in case no data is returned return just the return code */
5135 	bsg_reply->reply_payload_rcv_len = 0;
5136 
5137 	if (job->request_len <
5138 	    sizeof(struct fc_bsg_request) +
5139 		sizeof(struct menlo_command)) {
5140 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5141 				"2784 Received MENLO_CMD request below "
5142 				"minimum size\n");
5143 		rc = -ERANGE;
5144 		goto no_dd_data;
5145 	}
5146 
5147 	if (job->reply_len < sizeof(*bsg_reply) +
5148 				sizeof(struct menlo_response)) {
5149 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5150 				"2785 Received MENLO_CMD reply below "
5151 				"minimum size\n");
5152 		rc = -ERANGE;
5153 		goto no_dd_data;
5154 	}
5155 
5156 	if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) {
5157 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5158 				"2786 Adapter does not support menlo "
5159 				"commands\n");
5160 		rc = -EPERM;
5161 		goto no_dd_data;
5162 	}
5163 
5164 	menlo_cmd = (struct menlo_command *)
5165 		bsg_request->rqst_data.h_vendor.vendor_cmd;
5166 
5167 	/* allocate our bsg tracking structure */
5168 	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
5169 	if (!dd_data) {
5170 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5171 				"2787 Failed allocation of dd_data\n");
5172 		rc = -ENOMEM;
5173 		goto no_dd_data;
5174 	}
5175 
5176 	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5177 	if (!bmp) {
5178 		rc = -ENOMEM;
5179 		goto free_dd;
5180 	}
5181 
5182 	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
5183 	if (!bmp->virt) {
5184 		rc = -ENOMEM;
5185 		goto free_bmp;
5186 	}
5187 
5188 	INIT_LIST_HEAD(&bmp->list);
5189 
5190 	bpl = (struct ulp_bde64 *)bmp->virt;
5191 	request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64);
5192 	cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
5193 				     1, bpl, &request_nseg);
5194 	if (!cmp) {
5195 		rc = -ENOMEM;
5196 		goto free_bmp;
5197 	}
5198 	lpfc_bsg_copy_data(cmp, &job->request_payload,
5199 			   job->request_payload.payload_len, 1);
5200 
5201 	bpl += request_nseg;
5202 	reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg;
5203 	rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0,
5204 				     bpl, &reply_nseg);
5205 	if (!rmp) {
5206 		rc = -ENOMEM;
5207 		goto free_cmp;
5208 	}
5209 
5210 	cmdiocbq = lpfc_sli_get_iocbq(phba);
5211 	if (!cmdiocbq) {
5212 		rc = -ENOMEM;
5213 		goto free_rmp;
5214 	}
5215 
5216 	cmd = &cmdiocbq->iocb;
5217 	cmd->un.genreq64.bdl.ulpIoTag32 = 0;
5218 	cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
5219 	cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
5220 	cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
5221 	cmd->un.genreq64.bdl.bdeSize =
5222 	    (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
5223 	cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
5224 	cmd->un.genreq64.w5.hcsw.Dfctl = 0;
5225 	cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD;
5226 	cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */
5227 	cmd->ulpBdeCount = 1;
5228 	cmd->ulpClass = CLASS3;
5229 	cmd->ulpOwner = OWN_CHIP;
5230 	cmd->ulpLe = 1; /* Limited Edition */
5231 	cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
5232 	cmdiocbq->vport = phba->pport;
5233 	/* We want the firmware to timeout before we do */
5234 	cmd->ulpTimeout = MENLO_TIMEOUT - 5;
5235 	cmdiocbq->cmd_cmpl = lpfc_bsg_menlo_cmd_cmp;
5236 	cmdiocbq->context1 = dd_data;
5237 	cmdiocbq->context2 = cmp;
5238 	cmdiocbq->context3 = bmp;
5239 	if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
5240 		cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
5241 		cmd->ulpPU = MENLO_PU; /* 3 */
5242 		cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */
5243 		cmd->ulpContext = MENLO_CONTEXT; /* 0 */
5244 	} else {
5245 		cmd->ulpCommand = CMD_GEN_REQUEST64_CX;
5246 		cmd->ulpPU = 1;
5247 		cmd->un.ulpWord[4] = 0;
5248 		cmd->ulpContext = menlo_cmd->xri;
5249 	}
5250 
5251 	dd_data->type = TYPE_MENLO;
5252 	dd_data->set_job = job;
5253 	dd_data->context_un.menlo.cmdiocbq = cmdiocbq;
5254 	dd_data->context_un.menlo.rmp = rmp;
5255 	job->dd_data = dd_data;
5256 
5257 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
5258 		MENLO_TIMEOUT - 5);
5259 	if (rc == IOCB_SUCCESS)
5260 		return 0; /* done for now */
5261 
5262 	lpfc_sli_release_iocbq(phba, cmdiocbq);
5263 
5264 free_rmp:
5265 	lpfc_free_bsg_buffers(phba, rmp);
5266 free_cmp:
5267 	lpfc_free_bsg_buffers(phba, cmp);
5268 free_bmp:
5269 	if (bmp->virt)
5270 		lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
5271 	kfree(bmp);
5272 free_dd:
5273 	kfree(dd_data);
5274 no_dd_data:
5275 	/* make error code available to userspace */
5276 	bsg_reply->result = rc;
5277 	job->dd_data = NULL;
5278 	return rc;
5279 }
5280 
5281 static int
5282 lpfc_forced_link_speed(struct bsg_job *job)
5283 {
5284 	struct Scsi_Host *shost = fc_bsg_to_shost(job);
5285 	struct lpfc_vport *vport = shost_priv(shost);
5286 	struct lpfc_hba *phba = vport->phba;
5287 	struct fc_bsg_reply *bsg_reply = job->reply;
5288 	struct forced_link_speed_support_reply *forced_reply;
5289 	int rc = 0;
5290 
5291 	if (job->request_len <
5292 	    sizeof(struct fc_bsg_request) +
5293 	    sizeof(struct get_forced_link_speed_support)) {
5294 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5295 				"0048 Received FORCED_LINK_SPEED request "
5296 				"below minimum size\n");
5297 		rc = -EINVAL;
5298 		goto job_error;
5299 	}
5300 
5301 	forced_reply = (struct forced_link_speed_support_reply *)
5302 		bsg_reply->reply_data.vendor_reply.vendor_rsp;
5303 
5304 	if (job->reply_len < sizeof(*bsg_reply) + sizeof(*forced_reply)) {
5305 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5306 				"0049 Received FORCED_LINK_SPEED reply below "
5307 				"minimum size\n");
5308 		rc = -EINVAL;
5309 		goto job_error;
5310 	}
5311 
5312 	forced_reply->supported = (phba->hba_flag & HBA_FORCED_LINK_SPEED)
5313 				   ? LPFC_FORCED_LINK_SPEED_SUPPORTED
5314 				   : LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED;
5315 job_error:
5316 	bsg_reply->result = rc;
5317 	if (rc == 0)
5318 		bsg_job_done(job, bsg_reply->result,
5319 			       bsg_reply->reply_payload_rcv_len);
5320 	return rc;
5321 }
5322 
5323 /**
5324  * lpfc_check_fwlog_support: Check FW log support on the adapter
5325  * @phba: Pointer to HBA context object.
5326  *
5327  * Check if FW Logging support by the adapter
5328  **/
5329 int
5330 lpfc_check_fwlog_support(struct lpfc_hba *phba)
5331 {
5332 	struct lpfc_ras_fwlog *ras_fwlog = NULL;
5333 
5334 	ras_fwlog = &phba->ras_fwlog;
5335 
5336 	if (!ras_fwlog->ras_hwsupport)
5337 		return -EACCES;
5338 	else if (!ras_fwlog->ras_enabled)
5339 		return -EPERM;
5340 	else
5341 		return 0;
5342 }
5343 
5344 /**
5345  * lpfc_bsg_get_ras_config: Get RAS configuration settings
5346  * @job: fc_bsg_job to handle
5347  *
5348  * Get RAS configuration values set.
5349  **/
5350 static int
5351 lpfc_bsg_get_ras_config(struct bsg_job *job)
5352 {
5353 	struct Scsi_Host *shost = fc_bsg_to_shost(job);
5354 	struct lpfc_vport *vport = shost_priv(shost);
5355 	struct fc_bsg_reply *bsg_reply = job->reply;
5356 	struct lpfc_hba *phba = vport->phba;
5357 	struct lpfc_bsg_get_ras_config_reply *ras_reply;
5358 	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
5359 	int rc = 0;
5360 
5361 	if (job->request_len <
5362 	    sizeof(struct fc_bsg_request) +
5363 	    sizeof(struct lpfc_bsg_ras_req)) {
5364 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5365 				"6192 FW_LOG request received "
5366 				"below minimum size\n");
5367 		rc = -EINVAL;
5368 		goto ras_job_error;
5369 	}
5370 
5371 	/* Check FW log status */
5372 	rc = lpfc_check_fwlog_support(phba);
5373 	if (rc)
5374 		goto ras_job_error;
5375 
5376 	ras_reply = (struct lpfc_bsg_get_ras_config_reply *)
5377 		bsg_reply->reply_data.vendor_reply.vendor_rsp;
5378 
5379 	/* Current logging state */
5380 	spin_lock_irq(&phba->hbalock);
5381 	if (ras_fwlog->state == ACTIVE)
5382 		ras_reply->state = LPFC_RASLOG_STATE_RUNNING;
5383 	else
5384 		ras_reply->state = LPFC_RASLOG_STATE_STOPPED;
5385 	spin_unlock_irq(&phba->hbalock);
5386 
5387 	ras_reply->log_level = phba->ras_fwlog.fw_loglevel;
5388 	ras_reply->log_buff_sz = phba->cfg_ras_fwlog_buffsize;
5389 
5390 ras_job_error:
5391 	/* make error code available to userspace */
5392 	bsg_reply->result = rc;
5393 
5394 	/* complete the job back to userspace */
5395 	if (!rc)
5396 		bsg_job_done(job, bsg_reply->result,
5397 			     bsg_reply->reply_payload_rcv_len);
5398 	return rc;
5399 }
5400 
5401 /**
5402  * lpfc_bsg_set_ras_config: Set FW logging parameters
5403  * @job: fc_bsg_job to handle
5404  *
5405  * Set log-level parameters for FW-logging in host memory
5406  **/
5407 static int
5408 lpfc_bsg_set_ras_config(struct bsg_job *job)
5409 {
5410 	struct Scsi_Host *shost = fc_bsg_to_shost(job);
5411 	struct lpfc_vport *vport = shost_priv(shost);
5412 	struct lpfc_hba *phba = vport->phba;
5413 	struct lpfc_bsg_set_ras_config_req *ras_req;
5414 	struct fc_bsg_request *bsg_request = job->request;
5415 	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
5416 	struct fc_bsg_reply *bsg_reply = job->reply;
5417 	uint8_t action = 0, log_level = 0;
5418 	int rc = 0, action_status = 0;
5419 
5420 	if (job->request_len <
5421 	    sizeof(struct fc_bsg_request) +
5422 	    sizeof(struct lpfc_bsg_set_ras_config_req)) {
5423 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5424 				"6182 Received RAS_LOG request "
5425 				"below minimum size\n");
5426 		rc = -EINVAL;
5427 		goto ras_job_error;
5428 	}
5429 
5430 	/* Check FW log status */
5431 	rc = lpfc_check_fwlog_support(phba);
5432 	if (rc)
5433 		goto ras_job_error;
5434 
5435 	ras_req = (struct lpfc_bsg_set_ras_config_req *)
5436 		bsg_request->rqst_data.h_vendor.vendor_cmd;
5437 	action = ras_req->action;
5438 	log_level = ras_req->log_level;
5439 
5440 	if (action == LPFC_RASACTION_STOP_LOGGING) {
5441 		/* Check if already disabled */
5442 		spin_lock_irq(&phba->hbalock);
5443 		if (ras_fwlog->state != ACTIVE) {
5444 			spin_unlock_irq(&phba->hbalock);
5445 			rc = -ESRCH;
5446 			goto ras_job_error;
5447 		}
5448 		spin_unlock_irq(&phba->hbalock);
5449 
5450 		/* Disable logging */
5451 		lpfc_ras_stop_fwlog(phba);
5452 	} else {
5453 		/*action = LPFC_RASACTION_START_LOGGING*/
5454 
5455 		/* Even though FW-logging is active re-initialize
5456 		 * FW-logging with new log-level. Return status
5457 		 * "Logging already Running" to caller.
5458 		 **/
5459 		spin_lock_irq(&phba->hbalock);
5460 		if (ras_fwlog->state != INACTIVE)
5461 			action_status = -EINPROGRESS;
5462 		spin_unlock_irq(&phba->hbalock);
5463 
5464 		/* Enable logging */
5465 		rc = lpfc_sli4_ras_fwlog_init(phba, log_level,
5466 					      LPFC_RAS_ENABLE_LOGGING);
5467 		if (rc) {
5468 			rc = -EINVAL;
5469 			goto ras_job_error;
5470 		}
5471 
5472 		/* Check if FW-logging is re-initialized */
5473 		if (action_status == -EINPROGRESS)
5474 			rc = action_status;
5475 	}
5476 ras_job_error:
5477 	/* make error code available to userspace */
5478 	bsg_reply->result = rc;
5479 
5480 	/* complete the job back to userspace */
5481 	if (!rc)
5482 		bsg_job_done(job, bsg_reply->result,
5483 			     bsg_reply->reply_payload_rcv_len);
5484 
5485 	return rc;
5486 }
5487 
5488 /**
5489  * lpfc_bsg_get_ras_lwpd: Get log write position data
5490  * @job: fc_bsg_job to handle
5491  *
5492  * Get Offset/Wrap count of the log message written
5493  * in host memory
5494  **/
5495 static int
5496 lpfc_bsg_get_ras_lwpd(struct bsg_job *job)
5497 {
5498 	struct Scsi_Host *shost = fc_bsg_to_shost(job);
5499 	struct lpfc_vport *vport = shost_priv(shost);
5500 	struct lpfc_bsg_get_ras_lwpd *ras_reply;
5501 	struct lpfc_hba *phba = vport->phba;
5502 	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
5503 	struct fc_bsg_reply *bsg_reply = job->reply;
5504 	u32 *lwpd_ptr = NULL;
5505 	int rc = 0;
5506 
5507 	rc = lpfc_check_fwlog_support(phba);
5508 	if (rc)
5509 		goto ras_job_error;
5510 
5511 	if (job->request_len <
5512 	    sizeof(struct fc_bsg_request) +
5513 	    sizeof(struct lpfc_bsg_ras_req)) {
5514 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5515 				"6183 Received RAS_LOG request "
5516 				"below minimum size\n");
5517 		rc = -EINVAL;
5518 		goto ras_job_error;
5519 	}
5520 
5521 	ras_reply = (struct lpfc_bsg_get_ras_lwpd *)
5522 		bsg_reply->reply_data.vendor_reply.vendor_rsp;
5523 
5524 	if (!ras_fwlog->lwpd.virt) {
5525 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5526 				"6193 Restart FW Logging\n");
5527 		rc = -EINVAL;
5528 		goto ras_job_error;
5529 	}
5530 
5531 	/* Get lwpd offset */
5532 	lwpd_ptr = (uint32_t *)(ras_fwlog->lwpd.virt);
5533 	ras_reply->offset = be32_to_cpu(*lwpd_ptr & 0xffffffff);
5534 
5535 	/* Get wrap count */
5536 	ras_reply->wrap_count = be32_to_cpu(*(++lwpd_ptr) & 0xffffffff);
5537 
5538 ras_job_error:
5539 	/* make error code available to userspace */
5540 	bsg_reply->result = rc;
5541 
5542 	/* complete the job back to userspace */
5543 	if (!rc)
5544 		bsg_job_done(job, bsg_reply->result,
5545 			     bsg_reply->reply_payload_rcv_len);
5546 
5547 	return rc;
5548 }
5549 
5550 /**
5551  * lpfc_bsg_get_ras_fwlog: Read FW log
5552  * @job: fc_bsg_job to handle
5553  *
5554  * Copy the FW log into the passed buffer.
5555  **/
5556 static int
5557 lpfc_bsg_get_ras_fwlog(struct bsg_job *job)
5558 {
5559 	struct Scsi_Host *shost = fc_bsg_to_shost(job);
5560 	struct lpfc_vport *vport = shost_priv(shost);
5561 	struct lpfc_hba *phba = vport->phba;
5562 	struct fc_bsg_request *bsg_request = job->request;
5563 	struct fc_bsg_reply *bsg_reply = job->reply;
5564 	struct lpfc_bsg_get_fwlog_req *ras_req;
5565 	u32 rd_offset, rd_index, offset;
5566 	void *src, *fwlog_buff;
5567 	struct lpfc_ras_fwlog *ras_fwlog = NULL;
5568 	struct lpfc_dmabuf *dmabuf, *next;
5569 	int rc = 0;
5570 
5571 	ras_fwlog = &phba->ras_fwlog;
5572 
5573 	rc = lpfc_check_fwlog_support(phba);
5574 	if (rc)
5575 		goto ras_job_error;
5576 
5577 	/* Logging to be stopped before reading */
5578 	spin_lock_irq(&phba->hbalock);
5579 	if (ras_fwlog->state == ACTIVE) {
5580 		spin_unlock_irq(&phba->hbalock);
5581 		rc = -EINPROGRESS;
5582 		goto ras_job_error;
5583 	}
5584 	spin_unlock_irq(&phba->hbalock);
5585 
5586 	if (job->request_len <
5587 	    sizeof(struct fc_bsg_request) +
5588 	    sizeof(struct lpfc_bsg_get_fwlog_req)) {
5589 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5590 				"6184 Received RAS_LOG request "
5591 				"below minimum size\n");
5592 		rc = -EINVAL;
5593 		goto ras_job_error;
5594 	}
5595 
5596 	ras_req = (struct lpfc_bsg_get_fwlog_req *)
5597 		bsg_request->rqst_data.h_vendor.vendor_cmd;
5598 	rd_offset = ras_req->read_offset;
5599 
5600 	/* Allocate memory to read fw log*/
5601 	fwlog_buff = vmalloc(ras_req->read_size);
5602 	if (!fwlog_buff) {
5603 		rc = -ENOMEM;
5604 		goto ras_job_error;
5605 	}
5606 
5607 	rd_index = (rd_offset / LPFC_RAS_MAX_ENTRY_SIZE);
5608 	offset = (rd_offset % LPFC_RAS_MAX_ENTRY_SIZE);
5609 
5610 	list_for_each_entry_safe(dmabuf, next,
5611 			      &ras_fwlog->fwlog_buff_list, list) {
5612 
5613 		if (dmabuf->buffer_tag < rd_index)
5614 			continue;
5615 
5616 		src = dmabuf->virt + offset;
5617 		memcpy(fwlog_buff, src, ras_req->read_size);
5618 		break;
5619 	}
5620 
5621 	bsg_reply->reply_payload_rcv_len =
5622 		sg_copy_from_buffer(job->reply_payload.sg_list,
5623 				    job->reply_payload.sg_cnt,
5624 				    fwlog_buff, ras_req->read_size);
5625 
5626 	vfree(fwlog_buff);
5627 
5628 ras_job_error:
5629 	bsg_reply->result = rc;
5630 	if (!rc)
5631 		bsg_job_done(job, bsg_reply->result,
5632 			     bsg_reply->reply_payload_rcv_len);
5633 
5634 	return rc;
5635 }
5636 
5637 static int
5638 lpfc_get_trunk_info(struct bsg_job *job)
5639 {
5640 	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
5641 	struct lpfc_hba *phba = vport->phba;
5642 	struct fc_bsg_reply *bsg_reply = job->reply;
5643 	struct lpfc_trunk_info *event_reply;
5644 	int rc = 0;
5645 
5646 	if (job->request_len <
5647 	    sizeof(struct fc_bsg_request) + sizeof(struct get_trunk_info_req)) {
5648 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5649 				"2744 Received GET TRUNK _INFO request below "
5650 				"minimum size\n");
5651 		rc = -EINVAL;
5652 		goto job_error;
5653 	}
5654 
5655 	event_reply = (struct lpfc_trunk_info *)
5656 		bsg_reply->reply_data.vendor_reply.vendor_rsp;
5657 
5658 	if (job->reply_len < sizeof(*bsg_reply) + sizeof(*event_reply)) {
5659 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5660 				"2728 Received GET TRUNK _INFO reply below "
5661 				"minimum size\n");
5662 		rc = -EINVAL;
5663 		goto job_error;
5664 	}
5665 	if (event_reply == NULL) {
5666 		rc = -EINVAL;
5667 		goto job_error;
5668 	}
5669 
5670 	bsg_bf_set(lpfc_trunk_info_link_status, event_reply,
5671 		   (phba->link_state >= LPFC_LINK_UP) ? 1 : 0);
5672 
5673 	bsg_bf_set(lpfc_trunk_info_trunk_active0, event_reply,
5674 		   (phba->trunk_link.link0.state == LPFC_LINK_UP) ? 1 : 0);
5675 
5676 	bsg_bf_set(lpfc_trunk_info_trunk_active1, event_reply,
5677 		   (phba->trunk_link.link1.state == LPFC_LINK_UP) ? 1 : 0);
5678 
5679 	bsg_bf_set(lpfc_trunk_info_trunk_active2, event_reply,
5680 		   (phba->trunk_link.link2.state == LPFC_LINK_UP) ? 1 : 0);
5681 
5682 	bsg_bf_set(lpfc_trunk_info_trunk_active3, event_reply,
5683 		   (phba->trunk_link.link3.state == LPFC_LINK_UP) ? 1 : 0);
5684 
5685 	bsg_bf_set(lpfc_trunk_info_trunk_config0, event_reply,
5686 		   bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba));
5687 
5688 	bsg_bf_set(lpfc_trunk_info_trunk_config1, event_reply,
5689 		   bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba));
5690 
5691 	bsg_bf_set(lpfc_trunk_info_trunk_config2, event_reply,
5692 		   bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba));
5693 
5694 	bsg_bf_set(lpfc_trunk_info_trunk_config3, event_reply,
5695 		   bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba));
5696 
5697 	event_reply->port_speed = phba->sli4_hba.link_state.speed / 1000;
5698 	event_reply->logical_speed =
5699 				phba->sli4_hba.link_state.logical_speed / 1000;
5700 job_error:
5701 	bsg_reply->result = rc;
5702 	if (!rc)
5703 		bsg_job_done(job, bsg_reply->result,
5704 			     bsg_reply->reply_payload_rcv_len);
5705 	return rc;
5706 
5707 }
5708 
5709 static int
5710 lpfc_get_cgnbuf_info(struct bsg_job *job)
5711 {
5712 	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
5713 	struct lpfc_hba *phba = vport->phba;
5714 	struct fc_bsg_request *bsg_request = job->request;
5715 	struct fc_bsg_reply *bsg_reply = job->reply;
5716 	struct get_cgnbuf_info_req *cgnbuf_req;
5717 	struct lpfc_cgn_info *cp;
5718 	uint8_t *cgn_buff;
5719 	int size, cinfosz;
5720 	int  rc = 0;
5721 
5722 	if (job->request_len < sizeof(struct fc_bsg_request) +
5723 	    sizeof(struct get_cgnbuf_info_req)) {
5724 		rc = -ENOMEM;
5725 		goto job_exit;
5726 	}
5727 
5728 	if (!phba->sli4_hba.pc_sli4_params.cmf) {
5729 		rc = -ENOENT;
5730 		goto job_exit;
5731 	}
5732 
5733 	if (!phba->cgn_i || !phba->cgn_i->virt) {
5734 		rc = -ENOENT;
5735 		goto job_exit;
5736 	}
5737 
5738 	cp = phba->cgn_i->virt;
5739 	if (cp->cgn_info_version < LPFC_CGN_INFO_V3) {
5740 		rc = -EPERM;
5741 		goto job_exit;
5742 	}
5743 
5744 	cgnbuf_req = (struct get_cgnbuf_info_req *)
5745 		bsg_request->rqst_data.h_vendor.vendor_cmd;
5746 
5747 	/* For reset or size == 0 */
5748 	bsg_reply->reply_payload_rcv_len = 0;
5749 
5750 	if (cgnbuf_req->reset == LPFC_BSG_CGN_RESET_STAT) {
5751 		lpfc_init_congestion_stat(phba);
5752 		goto job_exit;
5753 	}
5754 
5755 	/* We don't want to include the CRC at the end */
5756 	cinfosz = sizeof(struct lpfc_cgn_info) - sizeof(uint32_t);
5757 
5758 	size = cgnbuf_req->read_size;
5759 	if (!size)
5760 		goto job_exit;
5761 
5762 	if (size < cinfosz) {
5763 		/* Just copy back what we can */
5764 		cinfosz = size;
5765 		rc = -E2BIG;
5766 	}
5767 
5768 	/* Allocate memory to read congestion info */
5769 	cgn_buff = vmalloc(cinfosz);
5770 	if (!cgn_buff) {
5771 		rc = -ENOMEM;
5772 		goto job_exit;
5773 	}
5774 
5775 	memcpy(cgn_buff, cp, cinfosz);
5776 
5777 	bsg_reply->reply_payload_rcv_len =
5778 		sg_copy_from_buffer(job->reply_payload.sg_list,
5779 				    job->reply_payload.sg_cnt,
5780 				    cgn_buff, cinfosz);
5781 
5782 	vfree(cgn_buff);
5783 
5784 job_exit:
5785 	bsg_reply->result = rc;
5786 	if (!rc)
5787 		bsg_job_done(job, bsg_reply->result,
5788 			     bsg_reply->reply_payload_rcv_len);
5789 	else
5790 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5791 				"2724 GET CGNBUF error: %d\n", rc);
5792 	return rc;
5793 }
5794 
5795 /**
5796  * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
5797  * @job: fc_bsg_job to handle
5798  **/
5799 static int
5800 lpfc_bsg_hst_vendor(struct bsg_job *job)
5801 {
5802 	struct fc_bsg_request *bsg_request = job->request;
5803 	struct fc_bsg_reply *bsg_reply = job->reply;
5804 	int command = bsg_request->rqst_data.h_vendor.vendor_cmd[0];
5805 	int rc;
5806 
5807 	switch (command) {
5808 	case LPFC_BSG_VENDOR_SET_CT_EVENT:
5809 		rc = lpfc_bsg_hba_set_event(job);
5810 		break;
5811 	case LPFC_BSG_VENDOR_GET_CT_EVENT:
5812 		rc = lpfc_bsg_hba_get_event(job);
5813 		break;
5814 	case LPFC_BSG_VENDOR_SEND_MGMT_RESP:
5815 		rc = lpfc_bsg_send_mgmt_rsp(job);
5816 		break;
5817 	case LPFC_BSG_VENDOR_DIAG_MODE:
5818 		rc = lpfc_bsg_diag_loopback_mode(job);
5819 		break;
5820 	case LPFC_BSG_VENDOR_DIAG_MODE_END:
5821 		rc = lpfc_sli4_bsg_diag_mode_end(job);
5822 		break;
5823 	case LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK:
5824 		rc = lpfc_bsg_diag_loopback_run(job);
5825 		break;
5826 	case LPFC_BSG_VENDOR_LINK_DIAG_TEST:
5827 		rc = lpfc_sli4_bsg_link_diag_test(job);
5828 		break;
5829 	case LPFC_BSG_VENDOR_GET_MGMT_REV:
5830 		rc = lpfc_bsg_get_dfc_rev(job);
5831 		break;
5832 	case LPFC_BSG_VENDOR_MBOX:
5833 		rc = lpfc_bsg_mbox_cmd(job);
5834 		break;
5835 	case LPFC_BSG_VENDOR_MENLO_CMD:
5836 	case LPFC_BSG_VENDOR_MENLO_DATA:
5837 		rc = lpfc_menlo_cmd(job);
5838 		break;
5839 	case LPFC_BSG_VENDOR_FORCED_LINK_SPEED:
5840 		rc = lpfc_forced_link_speed(job);
5841 		break;
5842 	case LPFC_BSG_VENDOR_RAS_GET_LWPD:
5843 		rc = lpfc_bsg_get_ras_lwpd(job);
5844 		break;
5845 	case LPFC_BSG_VENDOR_RAS_GET_FWLOG:
5846 		rc = lpfc_bsg_get_ras_fwlog(job);
5847 		break;
5848 	case LPFC_BSG_VENDOR_RAS_GET_CONFIG:
5849 		rc = lpfc_bsg_get_ras_config(job);
5850 		break;
5851 	case LPFC_BSG_VENDOR_RAS_SET_CONFIG:
5852 		rc = lpfc_bsg_set_ras_config(job);
5853 		break;
5854 	case LPFC_BSG_VENDOR_GET_TRUNK_INFO:
5855 		rc = lpfc_get_trunk_info(job);
5856 		break;
5857 	case LPFC_BSG_VENDOR_GET_CGNBUF_INFO:
5858 		rc = lpfc_get_cgnbuf_info(job);
5859 		break;
5860 	default:
5861 		rc = -EINVAL;
5862 		bsg_reply->reply_payload_rcv_len = 0;
5863 		/* make error code available to userspace */
5864 		bsg_reply->result = rc;
5865 		break;
5866 	}
5867 
5868 	return rc;
5869 }
5870 
5871 /**
5872  * lpfc_bsg_request - handle a bsg request from the FC transport
5873  * @job: bsg_job to handle
5874  **/
5875 int
5876 lpfc_bsg_request(struct bsg_job *job)
5877 {
5878 	struct fc_bsg_request *bsg_request = job->request;
5879 	struct fc_bsg_reply *bsg_reply = job->reply;
5880 	uint32_t msgcode;
5881 	int rc;
5882 
5883 	msgcode = bsg_request->msgcode;
5884 	switch (msgcode) {
5885 	case FC_BSG_HST_VENDOR:
5886 		rc = lpfc_bsg_hst_vendor(job);
5887 		break;
5888 	case FC_BSG_RPT_ELS:
5889 		rc = lpfc_bsg_rport_els(job);
5890 		break;
5891 	case FC_BSG_RPT_CT:
5892 		rc = lpfc_bsg_send_mgmt_cmd(job);
5893 		break;
5894 	default:
5895 		rc = -EINVAL;
5896 		bsg_reply->reply_payload_rcv_len = 0;
5897 		/* make error code available to userspace */
5898 		bsg_reply->result = rc;
5899 		break;
5900 	}
5901 
5902 	return rc;
5903 }
5904 
5905 /**
5906  * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
5907  * @job: bsg_job that has timed out
5908  *
5909  * This function just aborts the job's IOCB.  The aborted IOCB will return to
5910  * the waiting function which will handle passing the error back to userspace
5911  **/
5912 int
5913 lpfc_bsg_timeout(struct bsg_job *job)
5914 {
5915 	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
5916 	struct lpfc_hba *phba = vport->phba;
5917 	struct lpfc_iocbq *cmdiocb;
5918 	struct lpfc_sli_ring *pring;
5919 	struct bsg_job_data *dd_data;
5920 	unsigned long flags;
5921 	int rc = 0;
5922 	LIST_HEAD(completions);
5923 	struct lpfc_iocbq *check_iocb, *next_iocb;
5924 
5925 	pring = lpfc_phba_elsring(phba);
5926 	if (unlikely(!pring))
5927 		return -EIO;
5928 
5929 	/* if job's driver data is NULL, the command completed or is in the
5930 	 * the process of completing.  In this case, return status to request
5931 	 * so the timeout is retried.  This avoids double completion issues
5932 	 * and the request will be pulled off the timer queue when the
5933 	 * command's completion handler executes.  Otherwise, prevent the
5934 	 * command's completion handler from executing the job done callback
5935 	 * and continue processing to abort the outstanding the command.
5936 	 */
5937 
5938 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
5939 	dd_data = (struct bsg_job_data *)job->dd_data;
5940 	if (dd_data) {
5941 		dd_data->set_job = NULL;
5942 		job->dd_data = NULL;
5943 	} else {
5944 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5945 		return -EAGAIN;
5946 	}
5947 
5948 	switch (dd_data->type) {
5949 	case TYPE_IOCB:
5950 		/* Check to see if IOCB was issued to the port or not. If not,
5951 		 * remove it from the txq queue and call cancel iocbs.
5952 		 * Otherwise, call abort iotag
5953 		 */
5954 		cmdiocb = dd_data->context_un.iocb.cmdiocbq;
5955 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5956 
5957 		spin_lock_irqsave(&phba->hbalock, flags);
5958 		/* make sure the I/O abort window is still open */
5959 		if (!(cmdiocb->cmd_flag & LPFC_IO_CMD_OUTSTANDING)) {
5960 			spin_unlock_irqrestore(&phba->hbalock, flags);
5961 			return -EAGAIN;
5962 		}
5963 		list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
5964 					 list) {
5965 			if (check_iocb == cmdiocb) {
5966 				list_move_tail(&check_iocb->list, &completions);
5967 				break;
5968 			}
5969 		}
5970 		if (list_empty(&completions))
5971 			lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb, NULL);
5972 		spin_unlock_irqrestore(&phba->hbalock, flags);
5973 		if (!list_empty(&completions)) {
5974 			lpfc_sli_cancel_iocbs(phba, &completions,
5975 					      IOSTAT_LOCAL_REJECT,
5976 					      IOERR_SLI_ABORTED);
5977 		}
5978 		break;
5979 
5980 	case TYPE_EVT:
5981 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5982 		break;
5983 
5984 	case TYPE_MBOX:
5985 		/* Update the ext buf ctx state if needed */
5986 
5987 		if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
5988 			phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
5989 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5990 		break;
5991 	case TYPE_MENLO:
5992 		/* Check to see if IOCB was issued to the port or not. If not,
5993 		 * remove it from the txq queue and call cancel iocbs.
5994 		 * Otherwise, call abort iotag.
5995 		 */
5996 		cmdiocb = dd_data->context_un.menlo.cmdiocbq;
5997 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5998 
5999 		spin_lock_irqsave(&phba->hbalock, flags);
6000 		list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
6001 					 list) {
6002 			if (check_iocb == cmdiocb) {
6003 				list_move_tail(&check_iocb->list, &completions);
6004 				break;
6005 			}
6006 		}
6007 		if (list_empty(&completions))
6008 			lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb, NULL);
6009 		spin_unlock_irqrestore(&phba->hbalock, flags);
6010 		if (!list_empty(&completions)) {
6011 			lpfc_sli_cancel_iocbs(phba, &completions,
6012 					      IOSTAT_LOCAL_REJECT,
6013 					      IOERR_SLI_ABORTED);
6014 		}
6015 		break;
6016 	default:
6017 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
6018 		break;
6019 	}
6020 
6021 	/* scsi transport fc fc_bsg_job_timeout expects a zero return code,
6022 	 * otherwise an error message will be displayed on the console
6023 	 * so always return success (zero)
6024 	 */
6025 	return rc;
6026 }
6027