xref: /openbmc/linux/drivers/scsi/lpfc/lpfc_bsg.c (revision fd589a8f)
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2009 Emulex.  All rights reserved.                *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  *                                                                 *
8  * This program is free software; you can redistribute it and/or   *
9  * modify it under the terms of version 2 of the GNU General       *
10  * Public License as published by the Free Software Foundation.    *
11  * This program is distributed in the hope that it will be useful. *
12  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
13  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
14  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
15  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
17  * more details, a copy of which can be found in the file COPYING  *
18  * included with this package.                                     *
19  *******************************************************************/
20 
21 #include <linux/interrupt.h>
22 #include <linux/mempool.h>
23 #include <linux/pci.h>
24 
25 #include <scsi/scsi.h>
26 #include <scsi/scsi_host.h>
27 #include <scsi/scsi_transport_fc.h>
28 #include <scsi/scsi_bsg_fc.h>
29 
30 #include "lpfc_hw4.h"
31 #include "lpfc_hw.h"
32 #include "lpfc_sli.h"
33 #include "lpfc_sli4.h"
34 #include "lpfc_nl.h"
35 #include "lpfc_disc.h"
36 #include "lpfc_scsi.h"
37 #include "lpfc.h"
38 #include "lpfc_logmsg.h"
39 #include "lpfc_crtn.h"
40 #include "lpfc_vport.h"
41 #include "lpfc_version.h"
42 
43 /**
44  * lpfc_bsg_rport_ct - send a CT command from a bsg request
45  * @job: fc_bsg_job to handle
46  */
47 static int
48 lpfc_bsg_rport_ct(struct fc_bsg_job *job)
49 {
50 	struct Scsi_Host *shost = job->shost;
51 	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
52 	struct lpfc_hba *phba = vport->phba;
53 	struct lpfc_rport_data *rdata = job->rport->dd_data;
54 	struct lpfc_nodelist *ndlp = rdata->pnode;
55 	struct ulp_bde64 *bpl = NULL;
56 	uint32_t timeout;
57 	struct lpfc_iocbq *cmdiocbq = NULL;
58 	struct lpfc_iocbq *rspiocbq = NULL;
59 	IOCB_t *cmd;
60 	IOCB_t *rsp;
61 	struct lpfc_dmabuf *bmp = NULL;
62 	int request_nseg;
63 	int reply_nseg;
64 	struct scatterlist *sgel = NULL;
65 	int numbde;
66 	dma_addr_t busaddr;
67 	int rc = 0;
68 
69 	/* in case no data is transferred */
70 	job->reply->reply_payload_rcv_len = 0;
71 
72 	if (!lpfc_nlp_get(ndlp)) {
73 		job->reply->result = -ENODEV;
74 		return 0;
75 	}
76 
77 	if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
78 		rc = -ENODEV;
79 		goto free_ndlp_exit;
80 	}
81 
82 	spin_lock_irq(shost->host_lock);
83 	cmdiocbq = lpfc_sli_get_iocbq(phba);
84 	if (!cmdiocbq) {
85 		rc = -ENOMEM;
86 		spin_unlock_irq(shost->host_lock);
87 		goto free_ndlp_exit;
88 	}
89 	cmd = &cmdiocbq->iocb;
90 
91 	rspiocbq = lpfc_sli_get_iocbq(phba);
92 	if (!rspiocbq) {
93 		rc = -ENOMEM;
94 		goto free_cmdiocbq;
95 	}
96 	spin_unlock_irq(shost->host_lock);
97 
98 	rsp = &rspiocbq->iocb;
99 
100 	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
101 	if (!bmp) {
102 		rc = -ENOMEM;
103 		spin_lock_irq(shost->host_lock);
104 		goto free_rspiocbq;
105 	}
106 
107 	spin_lock_irq(shost->host_lock);
108 	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
109 	if (!bmp->virt) {
110 		rc = -ENOMEM;
111 		goto free_bmp;
112 	}
113 	spin_unlock_irq(shost->host_lock);
114 
115 	INIT_LIST_HEAD(&bmp->list);
116 	bpl = (struct ulp_bde64 *) bmp->virt;
117 
118 	request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
119 				  job->request_payload.sg_cnt, DMA_TO_DEVICE);
120 	for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
121 		busaddr = sg_dma_address(sgel);
122 		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
123 		bpl->tus.f.bdeSize = sg_dma_len(sgel);
124 		bpl->tus.w = cpu_to_le32(bpl->tus.w);
125 		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
126 		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
127 		bpl++;
128 	}
129 
130 	reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
131 				job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
132 	for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
133 		busaddr = sg_dma_address(sgel);
134 		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
135 		bpl->tus.f.bdeSize = sg_dma_len(sgel);
136 		bpl->tus.w = cpu_to_le32(bpl->tus.w);
137 		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
138 		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
139 		bpl++;
140 	}
141 
142 	cmd->un.genreq64.bdl.ulpIoTag32 = 0;
143 	cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
144 	cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
145 	cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
146 	cmd->un.genreq64.bdl.bdeSize =
147 		(request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
148 	cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
149 	cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
150 	cmd->un.genreq64.w5.hcsw.Dfctl = 0;
151 	cmd->un.genreq64.w5.hcsw.Rctl = FC_UNSOL_CTL;
152 	cmd->un.genreq64.w5.hcsw.Type = FC_COMMON_TRANSPORT_ULP;
153 	cmd->ulpBdeCount = 1;
154 	cmd->ulpLe = 1;
155 	cmd->ulpClass = CLASS3;
156 	cmd->ulpContext = ndlp->nlp_rpi;
157 	cmd->ulpOwner = OWN_CHIP;
158 	cmdiocbq->vport = phba->pport;
159 	cmdiocbq->context1 = NULL;
160 	cmdiocbq->context2 = NULL;
161 	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
162 
163 	timeout = phba->fc_ratov * 2;
164 	job->dd_data = cmdiocbq;
165 
166 	rc = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq,
167 					timeout + LPFC_DRVR_TIMEOUT);
168 
169 	if (rc != IOCB_TIMEDOUT) {
170 		pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
171 			     job->request_payload.sg_cnt, DMA_TO_DEVICE);
172 		pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
173 			     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
174 	}
175 
176 	if (rc == IOCB_TIMEDOUT) {
177 		lpfc_sli_release_iocbq(phba, rspiocbq);
178 		rc = -EACCES;
179 		goto free_ndlp_exit;
180 	}
181 
182 	if (rc != IOCB_SUCCESS) {
183 		rc = -EACCES;
184 		goto free_outdmp;
185 	}
186 
187 	if (rsp->ulpStatus) {
188 		if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
189 			switch (rsp->un.ulpWord[4] & 0xff) {
190 			case IOERR_SEQUENCE_TIMEOUT:
191 				rc = -ETIMEDOUT;
192 				break;
193 			case IOERR_INVALID_RPI:
194 				rc = -EFAULT;
195 				break;
196 			default:
197 				rc = -EACCES;
198 				break;
199 			}
200 			goto free_outdmp;
201 		}
202 	} else
203 		job->reply->reply_payload_rcv_len =
204 			rsp->un.genreq64.bdl.bdeSize;
205 
206 free_outdmp:
207 	spin_lock_irq(shost->host_lock);
208 	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
209 free_bmp:
210 	kfree(bmp);
211 free_rspiocbq:
212 	lpfc_sli_release_iocbq(phba, rspiocbq);
213 free_cmdiocbq:
214 	lpfc_sli_release_iocbq(phba, cmdiocbq);
215 	spin_unlock_irq(shost->host_lock);
216 free_ndlp_exit:
217 	lpfc_nlp_put(ndlp);
218 
219 	/* make error code available to userspace */
220 	job->reply->result = rc;
221 	/* complete the job back to userspace */
222 	job->job_done(job);
223 
224 	return 0;
225 }
226 
227 /**
228  * lpfc_bsg_rport_els - send an ELS command from a bsg request
229  * @job: fc_bsg_job to handle
230  */
231 static int
232 lpfc_bsg_rport_els(struct fc_bsg_job *job)
233 {
234 	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
235 	struct lpfc_hba *phba = vport->phba;
236 	struct lpfc_rport_data *rdata = job->rport->dd_data;
237 	struct lpfc_nodelist *ndlp = rdata->pnode;
238 
239 	uint32_t elscmd;
240 	uint32_t cmdsize;
241 	uint32_t rspsize;
242 	struct lpfc_iocbq *rspiocbq;
243 	struct lpfc_iocbq *cmdiocbq;
244 	IOCB_t *rsp;
245 	uint16_t rpi = 0;
246 	struct lpfc_dmabuf *pcmd;
247 	struct lpfc_dmabuf *prsp;
248 	struct lpfc_dmabuf *pbuflist = NULL;
249 	struct ulp_bde64 *bpl;
250 	int iocb_status;
251 	int request_nseg;
252 	int reply_nseg;
253 	struct scatterlist *sgel = NULL;
254 	int numbde;
255 	dma_addr_t busaddr;
256 	int rc = 0;
257 
258 	/* in case no data is transferred */
259 	job->reply->reply_payload_rcv_len = 0;
260 
261 	if (!lpfc_nlp_get(ndlp)) {
262 		rc = -ENODEV;
263 		goto out;
264 	}
265 
266 	elscmd = job->request->rqst_data.r_els.els_code;
267 	cmdsize = job->request_payload.payload_len;
268 	rspsize = job->reply_payload.payload_len;
269 	rspiocbq = lpfc_sli_get_iocbq(phba);
270 	if (!rspiocbq) {
271 		lpfc_nlp_put(ndlp);
272 		rc = -ENOMEM;
273 		goto out;
274 	}
275 
276 	rsp = &rspiocbq->iocb;
277 	rpi = ndlp->nlp_rpi;
278 
279 	cmdiocbq = lpfc_prep_els_iocb(phba->pport, 1, cmdsize, 0, ndlp,
280 				      ndlp->nlp_DID, elscmd);
281 
282 	if (!cmdiocbq) {
283 		lpfc_sli_release_iocbq(phba, rspiocbq);
284 		return -EIO;
285 	}
286 
287 	job->dd_data = cmdiocbq;
288 	pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2;
289 	prsp = (struct lpfc_dmabuf *) pcmd->list.next;
290 
291 	lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
292 	kfree(pcmd);
293 	lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
294 	kfree(prsp);
295 	cmdiocbq->context2 = NULL;
296 
297 	pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
298 	bpl = (struct ulp_bde64 *) pbuflist->virt;
299 
300 	request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
301 				  job->request_payload.sg_cnt, DMA_TO_DEVICE);
302 
303 	for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
304 		busaddr = sg_dma_address(sgel);
305 		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
306 		bpl->tus.f.bdeSize = sg_dma_len(sgel);
307 		bpl->tus.w = cpu_to_le32(bpl->tus.w);
308 		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
309 		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
310 		bpl++;
311 	}
312 
313 	reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
314 				job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
315 	for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
316 		busaddr = sg_dma_address(sgel);
317 		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
318 		bpl->tus.f.bdeSize = sg_dma_len(sgel);
319 		bpl->tus.w = cpu_to_le32(bpl->tus.w);
320 		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
321 		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
322 		bpl++;
323 	}
324 
325 	cmdiocbq->iocb.un.elsreq64.bdl.bdeSize =
326 		(request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
327 	cmdiocbq->iocb.ulpContext = rpi;
328 	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
329 	cmdiocbq->context1 = NULL;
330 	cmdiocbq->context2 = NULL;
331 
332 	iocb_status = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
333 					rspiocbq, (phba->fc_ratov * 2)
334 					       + LPFC_DRVR_TIMEOUT);
335 
336 	/* release the new ndlp once the iocb completes */
337 	lpfc_nlp_put(ndlp);
338 	if (iocb_status != IOCB_TIMEDOUT) {
339 		pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
340 			     job->request_payload.sg_cnt, DMA_TO_DEVICE);
341 		pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
342 			     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
343 	}
344 
345 	if (iocb_status == IOCB_SUCCESS) {
346 		if (rsp->ulpStatus == IOSTAT_SUCCESS) {
347 			job->reply->reply_payload_rcv_len =
348 				rsp->un.elsreq64.bdl.bdeSize;
349 			rc = 0;
350 		} else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
351 			struct fc_bsg_ctels_reply *els_reply;
352 			/* LS_RJT data returned in word 4 */
353 			uint8_t *rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
354 
355 			els_reply = &job->reply->reply_data.ctels_reply;
356 			job->reply->result = 0;
357 			els_reply->status = FC_CTELS_STATUS_REJECT;
358 			els_reply->rjt_data.action = rjt_data[0];
359 			els_reply->rjt_data.reason_code = rjt_data[1];
360 			els_reply->rjt_data.reason_explanation = rjt_data[2];
361 			els_reply->rjt_data.vendor_unique = rjt_data[3];
362 		} else
363 			rc = -EIO;
364 	} else
365 		rc = -EIO;
366 
367 	if (iocb_status != IOCB_TIMEDOUT)
368 		lpfc_els_free_iocb(phba, cmdiocbq);
369 
370 	lpfc_sli_release_iocbq(phba, rspiocbq);
371 
372 out:
373 	/* make error code available to userspace */
374 	job->reply->result = rc;
375 	/* complete the job back to userspace */
376 	job->job_done(job);
377 
378 	return 0;
379 }
380 
381 struct lpfc_ct_event {
382 	struct list_head node;
383 	int ref;
384 	wait_queue_head_t wq;
385 
386 	/* Event type and waiter identifiers */
387 	uint32_t type_mask;
388 	uint32_t req_id;
389 	uint32_t reg_id;
390 
391 	/* next two flags are here for the auto-delete logic */
392 	unsigned long wait_time_stamp;
393 	int waiting;
394 
395 	/* seen and not seen events */
396 	struct list_head events_to_get;
397 	struct list_head events_to_see;
398 };
399 
400 struct event_data {
401 	struct list_head node;
402 	uint32_t type;
403 	uint32_t immed_dat;
404 	void *data;
405 	uint32_t len;
406 };
407 
408 static struct lpfc_ct_event *
409 lpfc_ct_event_new(int ev_reg_id, uint32_t ev_req_id)
410 {
411 	struct lpfc_ct_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
412 	if (!evt)
413 		return NULL;
414 
415 	INIT_LIST_HEAD(&evt->events_to_get);
416 	INIT_LIST_HEAD(&evt->events_to_see);
417 	evt->req_id = ev_req_id;
418 	evt->reg_id = ev_reg_id;
419 	evt->wait_time_stamp = jiffies;
420 	init_waitqueue_head(&evt->wq);
421 
422 	return evt;
423 }
424 
425 static void
426 lpfc_ct_event_free(struct lpfc_ct_event *evt)
427 {
428 	struct event_data *ed;
429 
430 	list_del(&evt->node);
431 
432 	while (!list_empty(&evt->events_to_get)) {
433 		ed = list_entry(evt->events_to_get.next, typeof(*ed), node);
434 		list_del(&ed->node);
435 		kfree(ed->data);
436 		kfree(ed);
437 	}
438 
439 	while (!list_empty(&evt->events_to_see)) {
440 		ed = list_entry(evt->events_to_see.next, typeof(*ed), node);
441 		list_del(&ed->node);
442 		kfree(ed->data);
443 		kfree(ed);
444 	}
445 
446 	kfree(evt);
447 }
448 
449 static inline void
450 lpfc_ct_event_ref(struct lpfc_ct_event *evt)
451 {
452 	evt->ref++;
453 }
454 
455 static inline void
456 lpfc_ct_event_unref(struct lpfc_ct_event *evt)
457 {
458 	if (--evt->ref < 0)
459 		lpfc_ct_event_free(evt);
460 }
461 
462 #define SLI_CT_ELX_LOOPBACK 0x10
463 
464 enum ELX_LOOPBACK_CMD {
465 	ELX_LOOPBACK_XRI_SETUP,
466 	ELX_LOOPBACK_DATA,
467 };
468 
469 /**
470  * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
471  * @phba:
472  * @pring:
473  * @piocbq:
474  *
475  * This function is called when an unsolicited CT command is received.  It
476  * forwards the event to any processes registerd to receive CT events.
477  */
478 void
479 lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
480 			struct lpfc_iocbq *piocbq)
481 {
482 	uint32_t evt_req_id = 0;
483 	uint32_t cmd;
484 	uint32_t len;
485 	struct lpfc_dmabuf *dmabuf = NULL;
486 	struct lpfc_ct_event *evt;
487 	struct event_data *evt_dat = NULL;
488 	struct lpfc_iocbq *iocbq;
489 	size_t offset = 0;
490 	struct list_head head;
491 	struct ulp_bde64 *bde;
492 	dma_addr_t dma_addr;
493 	int i;
494 	struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
495 	struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
496 	struct lpfc_hbq_entry *hbqe;
497 	struct lpfc_sli_ct_request *ct_req;
498 
499 	INIT_LIST_HEAD(&head);
500 	list_add_tail(&head, &piocbq->list);
501 
502 	if (piocbq->iocb.ulpBdeCount == 0 ||
503 	    piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
504 		goto error_ct_unsol_exit;
505 
506 	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
507 		dmabuf = bdeBuf1;
508 	else {
509 		dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh,
510 				    piocbq->iocb.un.cont64[0].addrLow);
511 		dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
512 	}
513 
514 	ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
515 	evt_req_id = ct_req->FsType;
516 	cmd = ct_req->CommandResponse.bits.CmdRsp;
517 	len = ct_req->CommandResponse.bits.Size;
518 	if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
519 		lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
520 
521 	mutex_lock(&phba->ct_event_mutex);
522 	list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
523 		if (evt->req_id != evt_req_id)
524 			continue;
525 
526 		lpfc_ct_event_ref(evt);
527 
528 		evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
529 		if (!evt_dat) {
530 			lpfc_ct_event_unref(evt);
531 			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
532 					"2614 Memory allocation failed for "
533 					"CT event\n");
534 			break;
535 		}
536 
537 		mutex_unlock(&phba->ct_event_mutex);
538 
539 		if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
540 			/* take accumulated byte count from the last iocbq */
541 			iocbq = list_entry(head.prev, typeof(*iocbq), list);
542 			evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
543 		} else {
544 			list_for_each_entry(iocbq, &head, list) {
545 				for (i = 0; i < iocbq->iocb.ulpBdeCount; i++)
546 					evt_dat->len +=
547 					iocbq->iocb.un.cont64[i].tus.f.bdeSize;
548 			}
549 		}
550 
551 		evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
552 		if (!evt_dat->data) {
553 			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
554 					"2615 Memory allocation failed for "
555 					"CT event data, size %d\n",
556 					evt_dat->len);
557 			kfree(evt_dat);
558 			mutex_lock(&phba->ct_event_mutex);
559 			lpfc_ct_event_unref(evt);
560 			mutex_unlock(&phba->ct_event_mutex);
561 			goto error_ct_unsol_exit;
562 		}
563 
564 		list_for_each_entry(iocbq, &head, list) {
565 			if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
566 				bdeBuf1 = iocbq->context2;
567 				bdeBuf2 = iocbq->context3;
568 			}
569 			for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
570 				int size = 0;
571 				if (phba->sli3_options &
572 				    LPFC_SLI3_HBQ_ENABLED) {
573 					if (i == 0) {
574 						hbqe = (struct lpfc_hbq_entry *)
575 						  &iocbq->iocb.un.ulpWord[0];
576 						size = hbqe->bde.tus.f.bdeSize;
577 						dmabuf = bdeBuf1;
578 					} else if (i == 1) {
579 						hbqe = (struct lpfc_hbq_entry *)
580 							&iocbq->iocb.unsli3.
581 							sli3Words[4];
582 						size = hbqe->bde.tus.f.bdeSize;
583 						dmabuf = bdeBuf2;
584 					}
585 					if ((offset + size) > evt_dat->len)
586 						size = evt_dat->len - offset;
587 				} else {
588 					size = iocbq->iocb.un.cont64[i].
589 						tus.f.bdeSize;
590 					bde = &iocbq->iocb.un.cont64[i];
591 					dma_addr = getPaddr(bde->addrHigh,
592 							    bde->addrLow);
593 					dmabuf = lpfc_sli_ringpostbuf_get(phba,
594 							pring, dma_addr);
595 				}
596 				if (!dmabuf) {
597 					lpfc_printf_log(phba, KERN_ERR,
598 						LOG_LIBDFC, "2616 No dmabuf "
599 						"found for iocbq 0x%p\n",
600 						iocbq);
601 					kfree(evt_dat->data);
602 					kfree(evt_dat);
603 					mutex_lock(&phba->ct_event_mutex);
604 					lpfc_ct_event_unref(evt);
605 					mutex_unlock(&phba->ct_event_mutex);
606 					goto error_ct_unsol_exit;
607 				}
608 				memcpy((char *)(evt_dat->data) + offset,
609 				       dmabuf->virt, size);
610 				offset += size;
611 				if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
612 				    !(phba->sli3_options &
613 				      LPFC_SLI3_HBQ_ENABLED)) {
614 					lpfc_sli_ringpostbuf_put(phba, pring,
615 								 dmabuf);
616 				} else {
617 					switch (cmd) {
618 					case ELX_LOOPBACK_XRI_SETUP:
619 						if (!(phba->sli3_options &
620 						      LPFC_SLI3_HBQ_ENABLED))
621 							lpfc_post_buffer(phba,
622 									 pring,
623 									 1);
624 						else
625 							lpfc_in_buf_free(phba,
626 									dmabuf);
627 						break;
628 					default:
629 						if (!(phba->sli3_options &
630 						      LPFC_SLI3_HBQ_ENABLED))
631 							lpfc_post_buffer(phba,
632 									 pring,
633 									 1);
634 						break;
635 					}
636 				}
637 			}
638 		}
639 
640 		mutex_lock(&phba->ct_event_mutex);
641 		if (phba->sli_rev == LPFC_SLI_REV4) {
642 			evt_dat->immed_dat = phba->ctx_idx;
643 			phba->ctx_idx = (phba->ctx_idx + 1) % 64;
644 			phba->ct_ctx[evt_dat->immed_dat].oxid =
645 						piocbq->iocb.ulpContext;
646 			phba->ct_ctx[evt_dat->immed_dat].SID =
647 				piocbq->iocb.un.rcvels.remoteID;
648 		} else
649 			evt_dat->immed_dat = piocbq->iocb.ulpContext;
650 
651 		evt_dat->type = FC_REG_CT_EVENT;
652 		list_add(&evt_dat->node, &evt->events_to_see);
653 		wake_up_interruptible(&evt->wq);
654 		lpfc_ct_event_unref(evt);
655 		if (evt_req_id == SLI_CT_ELX_LOOPBACK)
656 			break;
657 	}
658 	mutex_unlock(&phba->ct_event_mutex);
659 
660 error_ct_unsol_exit:
661 	if (!list_empty(&head))
662 		list_del(&head);
663 
664 	return;
665 }
666 
667 /**
668  * lpfc_bsg_set_event - process a SET_EVENT bsg vendor command
669  * @job: SET_EVENT fc_bsg_job
670  */
671 static int
672 lpfc_bsg_set_event(struct fc_bsg_job *job)
673 {
674 	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
675 	struct lpfc_hba *phba = vport->phba;
676 	struct set_ct_event *event_req;
677 	struct lpfc_ct_event *evt;
678 	int rc = 0;
679 
680 	if (job->request_len <
681 	    sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
682 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
683 				"2612 Received SET_CT_EVENT below minimum "
684 				"size\n");
685 		return -EINVAL;
686 	}
687 
688 	event_req = (struct set_ct_event *)
689 		job->request->rqst_data.h_vendor.vendor_cmd;
690 
691 	mutex_lock(&phba->ct_event_mutex);
692 	list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
693 		if (evt->reg_id == event_req->ev_reg_id) {
694 			lpfc_ct_event_ref(evt);
695 			evt->wait_time_stamp = jiffies;
696 			break;
697 		}
698 	}
699 	mutex_unlock(&phba->ct_event_mutex);
700 
701 	if (&evt->node == &phba->ct_ev_waiters) {
702 		/* no event waiting struct yet - first call */
703 		evt = lpfc_ct_event_new(event_req->ev_reg_id,
704 					event_req->ev_req_id);
705 		if (!evt) {
706 			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
707 					"2617 Failed allocation of event "
708 					"waiter\n");
709 			return -ENOMEM;
710 		}
711 
712 		mutex_lock(&phba->ct_event_mutex);
713 		list_add(&evt->node, &phba->ct_ev_waiters);
714 		lpfc_ct_event_ref(evt);
715 		mutex_unlock(&phba->ct_event_mutex);
716 	}
717 
718 	evt->waiting = 1;
719 	if (wait_event_interruptible(evt->wq,
720 				     !list_empty(&evt->events_to_see))) {
721 		mutex_lock(&phba->ct_event_mutex);
722 		lpfc_ct_event_unref(evt); /* release ref */
723 		lpfc_ct_event_unref(evt); /* delete */
724 		mutex_unlock(&phba->ct_event_mutex);
725 		rc = -EINTR;
726 		goto set_event_out;
727 	}
728 
729 	evt->wait_time_stamp = jiffies;
730 	evt->waiting = 0;
731 
732 	mutex_lock(&phba->ct_event_mutex);
733 	list_move(evt->events_to_see.prev, &evt->events_to_get);
734 	lpfc_ct_event_unref(evt); /* release ref */
735 	mutex_unlock(&phba->ct_event_mutex);
736 
737 set_event_out:
738 	/* set_event carries no reply payload */
739 	job->reply->reply_payload_rcv_len = 0;
740 	/* make error code available to userspace */
741 	job->reply->result = rc;
742 	/* complete the job back to userspace */
743 	job->job_done(job);
744 
745 	return 0;
746 }
747 
748 /**
749  * lpfc_bsg_get_event - process a GET_EVENT bsg vendor command
750  * @job: GET_EVENT fc_bsg_job
751  */
752 static int
753 lpfc_bsg_get_event(struct fc_bsg_job *job)
754 {
755 	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
756 	struct lpfc_hba *phba = vport->phba;
757 	struct get_ct_event *event_req;
758 	struct get_ct_event_reply *event_reply;
759 	struct lpfc_ct_event *evt;
760 	struct event_data *evt_dat = NULL;
761 	int rc = 0;
762 
763 	if (job->request_len <
764 	    sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
765 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
766 				"2613 Received GET_CT_EVENT request below "
767 				"minimum size\n");
768 		return -EINVAL;
769 	}
770 
771 	event_req = (struct get_ct_event *)
772 		job->request->rqst_data.h_vendor.vendor_cmd;
773 
774 	event_reply = (struct get_ct_event_reply *)
775 		job->reply->reply_data.vendor_reply.vendor_rsp;
776 
777 	mutex_lock(&phba->ct_event_mutex);
778 	list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
779 		if (evt->reg_id == event_req->ev_reg_id) {
780 			if (list_empty(&evt->events_to_get))
781 				break;
782 			lpfc_ct_event_ref(evt);
783 			evt->wait_time_stamp = jiffies;
784 			evt_dat = list_entry(evt->events_to_get.prev,
785 					     struct event_data, node);
786 			list_del(&evt_dat->node);
787 			break;
788 		}
789 	}
790 	mutex_unlock(&phba->ct_event_mutex);
791 
792 	if (!evt_dat) {
793 		job->reply->reply_payload_rcv_len = 0;
794 		rc = -ENOENT;
795 		goto error_get_event_exit;
796 	}
797 
798 	if (evt_dat->len > job->reply_payload.payload_len) {
799 		evt_dat->len = job->reply_payload.payload_len;
800 			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
801 					"2618 Truncated event data at %d "
802 					"bytes\n",
803 					job->reply_payload.payload_len);
804 	}
805 
806 	event_reply->immed_data = evt_dat->immed_dat;
807 
808 	if (evt_dat->len > 0)
809 		job->reply->reply_payload_rcv_len =
810 			sg_copy_from_buffer(job->reply_payload.sg_list,
811 					    job->reply_payload.sg_cnt,
812 					    evt_dat->data, evt_dat->len);
813 	else
814 		job->reply->reply_payload_rcv_len = 0;
815 	rc = 0;
816 
817 	if (evt_dat)
818 		kfree(evt_dat->data);
819 	kfree(evt_dat);
820 	mutex_lock(&phba->ct_event_mutex);
821 	lpfc_ct_event_unref(evt);
822 	mutex_unlock(&phba->ct_event_mutex);
823 
824 error_get_event_exit:
825 	/* make error code available to userspace */
826 	job->reply->result = rc;
827 	/* complete the job back to userspace */
828 	job->job_done(job);
829 
830 	return rc;
831 }
832 
833 /**
834  * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
835  * @job: fc_bsg_job to handle
836  */
837 static int
838 lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
839 {
840 	int command = job->request->rqst_data.h_vendor.vendor_cmd[0];
841 
842 	switch (command) {
843 	case LPFC_BSG_VENDOR_SET_CT_EVENT:
844 		return lpfc_bsg_set_event(job);
845 		break;
846 
847 	case LPFC_BSG_VENDOR_GET_CT_EVENT:
848 		return lpfc_bsg_get_event(job);
849 		break;
850 
851 	default:
852 		return -EINVAL;
853 	}
854 }
855 
856 /**
857  * lpfc_bsg_request - handle a bsg request from the FC transport
858  * @job: fc_bsg_job to handle
859  */
860 int
861 lpfc_bsg_request(struct fc_bsg_job *job)
862 {
863 	uint32_t msgcode;
864 	int rc = -EINVAL;
865 
866 	msgcode = job->request->msgcode;
867 
868 	switch (msgcode) {
869 	case FC_BSG_HST_VENDOR:
870 		rc = lpfc_bsg_hst_vendor(job);
871 		break;
872 	case FC_BSG_RPT_ELS:
873 		rc = lpfc_bsg_rport_els(job);
874 		break;
875 	case FC_BSG_RPT_CT:
876 		rc = lpfc_bsg_rport_ct(job);
877 		break;
878 	default:
879 		break;
880 	}
881 
882 	return rc;
883 }
884 
885 /**
886  * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
887  * @job: fc_bsg_job that has timed out
888  *
889  * This function just aborts the job's IOCB.  The aborted IOCB will return to
890  * the waiting function which will handle passing the error back to userspace
891  */
892 int
893 lpfc_bsg_timeout(struct fc_bsg_job *job)
894 {
895 	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
896 	struct lpfc_hba *phba = vport->phba;
897 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)job->dd_data;
898 	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
899 
900 	if (cmdiocb)
901 		lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
902 
903 	return 0;
904 }
905