1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2009-2010 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * * 8 * This program is free software; you can redistribute it and/or * 9 * modify it under the terms of version 2 of the GNU General * 10 * Public License as published by the Free Software Foundation. * 11 * This program is distributed in the hope that it will be useful. * 12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 17 * more details, a copy of which can be found in the file COPYING * 18 * included with this package. * 19 *******************************************************************/ 20 21 #include <linux/interrupt.h> 22 #include <linux/mempool.h> 23 #include <linux/pci.h> 24 #include <linux/slab.h> 25 #include <linux/delay.h> 26 27 #include <scsi/scsi.h> 28 #include <scsi/scsi_host.h> 29 #include <scsi/scsi_transport_fc.h> 30 #include <scsi/scsi_bsg_fc.h> 31 #include <scsi/fc/fc_fs.h> 32 33 #include "lpfc_hw4.h" 34 #include "lpfc_hw.h" 35 #include "lpfc_sli.h" 36 #include "lpfc_sli4.h" 37 #include "lpfc_nl.h" 38 #include "lpfc_bsg.h" 39 #include "lpfc_disc.h" 40 #include "lpfc_scsi.h" 41 #include "lpfc.h" 42 #include "lpfc_logmsg.h" 43 #include "lpfc_crtn.h" 44 #include "lpfc_vport.h" 45 #include "lpfc_version.h" 46 47 struct lpfc_bsg_event { 48 struct list_head node; 49 struct kref kref; 50 wait_queue_head_t wq; 51 52 /* Event type and waiter identifiers */ 53 uint32_t type_mask; 54 uint32_t req_id; 55 uint32_t reg_id; 56 57 /* next two flags are here for the auto-delete logic */ 58 unsigned long wait_time_stamp; 59 int waiting; 60 61 /* seen and not seen events */ 62 struct list_head events_to_get; 63 struct list_head events_to_see; 64 65 /* job waiting for this event to finish */ 66 struct fc_bsg_job *set_job; 67 }; 68 69 struct lpfc_bsg_iocb { 70 struct lpfc_iocbq *cmdiocbq; 71 struct lpfc_iocbq *rspiocbq; 72 struct lpfc_dmabuf *bmp; 73 struct lpfc_nodelist *ndlp; 74 75 /* job waiting for this iocb to finish */ 76 struct fc_bsg_job *set_job; 77 }; 78 79 struct lpfc_bsg_mbox { 80 LPFC_MBOXQ_t *pmboxq; 81 MAILBOX_t *mb; 82 struct lpfc_dmabuf *rxbmp; /* for BIU diags */ 83 struct lpfc_dmabufext *dmp; /* for BIU diags */ 84 uint8_t *ext; /* extended mailbox data */ 85 uint32_t mbOffset; /* from app */ 86 uint32_t inExtWLen; /* from app */ 87 uint32_t outExtWLen; /* from app */ 88 89 /* job waiting for this mbox command to finish */ 90 struct fc_bsg_job *set_job; 91 }; 92 93 #define MENLO_DID 0x0000FC0E 94 95 struct lpfc_bsg_menlo { 96 struct lpfc_iocbq *cmdiocbq; 97 struct lpfc_iocbq *rspiocbq; 98 struct lpfc_dmabuf *bmp; 99 100 /* job waiting for this iocb to finish */ 101 struct fc_bsg_job *set_job; 102 }; 103 104 #define TYPE_EVT 1 105 #define TYPE_IOCB 2 106 #define TYPE_MBOX 3 107 #define TYPE_MENLO 4 108 struct bsg_job_data { 109 uint32_t type; 110 union { 111 struct lpfc_bsg_event *evt; 112 struct lpfc_bsg_iocb iocb; 113 struct lpfc_bsg_mbox mbox; 114 struct lpfc_bsg_menlo menlo; 115 } context_un; 116 }; 117 118 struct event_data { 119 struct list_head node; 120 uint32_t type; 121 uint32_t immed_dat; 122 void *data; 123 uint32_t len; 124 }; 125 126 #define BUF_SZ_4K 4096 127 #define SLI_CT_ELX_LOOPBACK 0x10 128 129 enum ELX_LOOPBACK_CMD { 130 ELX_LOOPBACK_XRI_SETUP, 131 ELX_LOOPBACK_DATA, 132 }; 133 134 #define ELX_LOOPBACK_HEADER_SZ \ 135 (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un) 136 137 struct lpfc_dmabufext { 138 struct lpfc_dmabuf dma; 139 uint32_t size; 140 uint32_t flag; 141 }; 142 143 /** 144 * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler 145 * @phba: Pointer to HBA context object. 146 * @cmdiocbq: Pointer to command iocb. 147 * @rspiocbq: Pointer to response iocb. 148 * 149 * This function is the completion handler for iocbs issued using 150 * lpfc_bsg_send_mgmt_cmd function. This function is called by the 151 * ring event handler function without any lock held. This function 152 * can be called from both worker thread context and interrupt 153 * context. This function also can be called from another thread which 154 * cleans up the SLI layer objects. 155 * This function copies the contents of the response iocb to the 156 * response iocb memory object provided by the caller of 157 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 158 * sleeps for the iocb completion. 159 **/ 160 static void 161 lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba, 162 struct lpfc_iocbq *cmdiocbq, 163 struct lpfc_iocbq *rspiocbq) 164 { 165 struct bsg_job_data *dd_data; 166 struct fc_bsg_job *job; 167 IOCB_t *rsp; 168 struct lpfc_dmabuf *bmp; 169 struct lpfc_nodelist *ndlp; 170 struct lpfc_bsg_iocb *iocb; 171 unsigned long flags; 172 int rc = 0; 173 174 spin_lock_irqsave(&phba->ct_ev_lock, flags); 175 dd_data = cmdiocbq->context2; 176 if (!dd_data) { 177 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 178 lpfc_sli_release_iocbq(phba, cmdiocbq); 179 return; 180 } 181 182 iocb = &dd_data->context_un.iocb; 183 job = iocb->set_job; 184 job->dd_data = NULL; /* so timeout handler does not reply */ 185 186 bmp = iocb->bmp; 187 rsp = &rspiocbq->iocb; 188 ndlp = cmdiocbq->context1; 189 190 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 191 job->request_payload.sg_cnt, DMA_TO_DEVICE); 192 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, 193 job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 194 195 if (rsp->ulpStatus) { 196 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 197 switch (rsp->un.ulpWord[4] & 0xff) { 198 case IOERR_SEQUENCE_TIMEOUT: 199 rc = -ETIMEDOUT; 200 break; 201 case IOERR_INVALID_RPI: 202 rc = -EFAULT; 203 break; 204 default: 205 rc = -EACCES; 206 break; 207 } 208 } else 209 rc = -EACCES; 210 } else 211 job->reply->reply_payload_rcv_len = 212 rsp->un.genreq64.bdl.bdeSize; 213 214 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 215 lpfc_sli_release_iocbq(phba, cmdiocbq); 216 lpfc_nlp_put(ndlp); 217 kfree(bmp); 218 kfree(dd_data); 219 /* make error code available to userspace */ 220 job->reply->result = rc; 221 /* complete the job back to userspace */ 222 job->job_done(job); 223 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 224 return; 225 } 226 227 /** 228 * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request 229 * @job: fc_bsg_job to handle 230 **/ 231 static int 232 lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job) 233 { 234 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 235 struct lpfc_hba *phba = vport->phba; 236 struct lpfc_rport_data *rdata = job->rport->dd_data; 237 struct lpfc_nodelist *ndlp = rdata->pnode; 238 struct ulp_bde64 *bpl = NULL; 239 uint32_t timeout; 240 struct lpfc_iocbq *cmdiocbq = NULL; 241 IOCB_t *cmd; 242 struct lpfc_dmabuf *bmp = NULL; 243 int request_nseg; 244 int reply_nseg; 245 struct scatterlist *sgel = NULL; 246 int numbde; 247 dma_addr_t busaddr; 248 struct bsg_job_data *dd_data; 249 uint32_t creg_val; 250 int rc = 0; 251 int iocb_stat; 252 253 /* in case no data is transferred */ 254 job->reply->reply_payload_rcv_len = 0; 255 256 /* allocate our bsg tracking structure */ 257 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 258 if (!dd_data) { 259 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 260 "2733 Failed allocation of dd_data\n"); 261 rc = -ENOMEM; 262 goto no_dd_data; 263 } 264 265 if (!lpfc_nlp_get(ndlp)) { 266 rc = -ENODEV; 267 goto no_ndlp; 268 } 269 270 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 271 if (!bmp) { 272 rc = -ENOMEM; 273 goto free_ndlp; 274 } 275 276 if (ndlp->nlp_flag & NLP_ELS_SND_MASK) { 277 rc = -ENODEV; 278 goto free_bmp; 279 } 280 281 cmdiocbq = lpfc_sli_get_iocbq(phba); 282 if (!cmdiocbq) { 283 rc = -ENOMEM; 284 goto free_bmp; 285 } 286 287 cmd = &cmdiocbq->iocb; 288 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); 289 if (!bmp->virt) { 290 rc = -ENOMEM; 291 goto free_cmdiocbq; 292 } 293 294 INIT_LIST_HEAD(&bmp->list); 295 bpl = (struct ulp_bde64 *) bmp->virt; 296 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, 297 job->request_payload.sg_cnt, DMA_TO_DEVICE); 298 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { 299 busaddr = sg_dma_address(sgel); 300 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 301 bpl->tus.f.bdeSize = sg_dma_len(sgel); 302 bpl->tus.w = cpu_to_le32(bpl->tus.w); 303 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); 304 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); 305 bpl++; 306 } 307 308 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list, 309 job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 310 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) { 311 busaddr = sg_dma_address(sgel); 312 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 313 bpl->tus.f.bdeSize = sg_dma_len(sgel); 314 bpl->tus.w = cpu_to_le32(bpl->tus.w); 315 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); 316 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); 317 bpl++; 318 } 319 320 cmd->un.genreq64.bdl.ulpIoTag32 = 0; 321 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); 322 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys); 323 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 324 cmd->un.genreq64.bdl.bdeSize = 325 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64); 326 cmd->ulpCommand = CMD_GEN_REQUEST64_CR; 327 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); 328 cmd->un.genreq64.w5.hcsw.Dfctl = 0; 329 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL; 330 cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT; 331 cmd->ulpBdeCount = 1; 332 cmd->ulpLe = 1; 333 cmd->ulpClass = CLASS3; 334 cmd->ulpContext = ndlp->nlp_rpi; 335 cmd->ulpOwner = OWN_CHIP; 336 cmdiocbq->vport = phba->pport; 337 cmdiocbq->context3 = bmp; 338 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 339 timeout = phba->fc_ratov * 2; 340 cmd->ulpTimeout = timeout; 341 342 cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp; 343 cmdiocbq->context1 = ndlp; 344 cmdiocbq->context2 = dd_data; 345 dd_data->type = TYPE_IOCB; 346 dd_data->context_un.iocb.cmdiocbq = cmdiocbq; 347 dd_data->context_un.iocb.set_job = job; 348 dd_data->context_un.iocb.bmp = bmp; 349 350 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 351 creg_val = readl(phba->HCregaddr); 352 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 353 writel(creg_val, phba->HCregaddr); 354 readl(phba->HCregaddr); /* flush */ 355 } 356 357 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); 358 if (iocb_stat == IOCB_SUCCESS) 359 return 0; /* done for now */ 360 else if (iocb_stat == IOCB_BUSY) 361 rc = -EAGAIN; 362 else 363 rc = -EIO; 364 365 366 /* iocb failed so cleanup */ 367 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 368 job->request_payload.sg_cnt, DMA_TO_DEVICE); 369 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, 370 job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 371 372 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 373 374 free_cmdiocbq: 375 lpfc_sli_release_iocbq(phba, cmdiocbq); 376 free_bmp: 377 kfree(bmp); 378 free_ndlp: 379 lpfc_nlp_put(ndlp); 380 no_ndlp: 381 kfree(dd_data); 382 no_dd_data: 383 /* make error code available to userspace */ 384 job->reply->result = rc; 385 job->dd_data = NULL; 386 return rc; 387 } 388 389 /** 390 * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler 391 * @phba: Pointer to HBA context object. 392 * @cmdiocbq: Pointer to command iocb. 393 * @rspiocbq: Pointer to response iocb. 394 * 395 * This function is the completion handler for iocbs issued using 396 * lpfc_bsg_rport_els_cmp function. This function is called by the 397 * ring event handler function without any lock held. This function 398 * can be called from both worker thread context and interrupt 399 * context. This function also can be called from other thread which 400 * cleans up the SLI layer objects. 401 * This function copies the contents of the response iocb to the 402 * response iocb memory object provided by the caller of 403 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 404 * sleeps for the iocb completion. 405 **/ 406 static void 407 lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba, 408 struct lpfc_iocbq *cmdiocbq, 409 struct lpfc_iocbq *rspiocbq) 410 { 411 struct bsg_job_data *dd_data; 412 struct fc_bsg_job *job; 413 IOCB_t *rsp; 414 struct lpfc_nodelist *ndlp; 415 struct lpfc_dmabuf *pbuflist = NULL; 416 struct fc_bsg_ctels_reply *els_reply; 417 uint8_t *rjt_data; 418 unsigned long flags; 419 int rc = 0; 420 421 spin_lock_irqsave(&phba->ct_ev_lock, flags); 422 dd_data = cmdiocbq->context1; 423 /* normal completion and timeout crossed paths, already done */ 424 if (!dd_data) { 425 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 426 return; 427 } 428 429 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 430 if (cmdiocbq->context2 && rspiocbq) 431 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 432 &rspiocbq->iocb, sizeof(IOCB_t)); 433 434 job = dd_data->context_un.iocb.set_job; 435 cmdiocbq = dd_data->context_un.iocb.cmdiocbq; 436 rspiocbq = dd_data->context_un.iocb.rspiocbq; 437 rsp = &rspiocbq->iocb; 438 ndlp = dd_data->context_un.iocb.ndlp; 439 440 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 441 job->request_payload.sg_cnt, DMA_TO_DEVICE); 442 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, 443 job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 444 445 if (job->reply->result == -EAGAIN) 446 rc = -EAGAIN; 447 else if (rsp->ulpStatus == IOSTAT_SUCCESS) 448 job->reply->reply_payload_rcv_len = 449 rsp->un.elsreq64.bdl.bdeSize; 450 else if (rsp->ulpStatus == IOSTAT_LS_RJT) { 451 job->reply->reply_payload_rcv_len = 452 sizeof(struct fc_bsg_ctels_reply); 453 /* LS_RJT data returned in word 4 */ 454 rjt_data = (uint8_t *)&rsp->un.ulpWord[4]; 455 els_reply = &job->reply->reply_data.ctels_reply; 456 els_reply->status = FC_CTELS_STATUS_REJECT; 457 els_reply->rjt_data.action = rjt_data[3]; 458 els_reply->rjt_data.reason_code = rjt_data[2]; 459 els_reply->rjt_data.reason_explanation = rjt_data[1]; 460 els_reply->rjt_data.vendor_unique = rjt_data[0]; 461 } else 462 rc = -EIO; 463 464 pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3; 465 lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys); 466 lpfc_sli_release_iocbq(phba, rspiocbq); 467 lpfc_sli_release_iocbq(phba, cmdiocbq); 468 lpfc_nlp_put(ndlp); 469 kfree(dd_data); 470 /* make error code available to userspace */ 471 job->reply->result = rc; 472 job->dd_data = NULL; 473 /* complete the job back to userspace */ 474 job->job_done(job); 475 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 476 return; 477 } 478 479 /** 480 * lpfc_bsg_rport_els - send an ELS command from a bsg request 481 * @job: fc_bsg_job to handle 482 **/ 483 static int 484 lpfc_bsg_rport_els(struct fc_bsg_job *job) 485 { 486 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 487 struct lpfc_hba *phba = vport->phba; 488 struct lpfc_rport_data *rdata = job->rport->dd_data; 489 struct lpfc_nodelist *ndlp = rdata->pnode; 490 uint32_t elscmd; 491 uint32_t cmdsize; 492 uint32_t rspsize; 493 struct lpfc_iocbq *rspiocbq; 494 struct lpfc_iocbq *cmdiocbq; 495 IOCB_t *rsp; 496 uint16_t rpi = 0; 497 struct lpfc_dmabuf *pcmd; 498 struct lpfc_dmabuf *prsp; 499 struct lpfc_dmabuf *pbuflist = NULL; 500 struct ulp_bde64 *bpl; 501 int request_nseg; 502 int reply_nseg; 503 struct scatterlist *sgel = NULL; 504 int numbde; 505 dma_addr_t busaddr; 506 struct bsg_job_data *dd_data; 507 uint32_t creg_val; 508 int rc = 0; 509 510 /* in case no data is transferred */ 511 job->reply->reply_payload_rcv_len = 0; 512 513 /* allocate our bsg tracking structure */ 514 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 515 if (!dd_data) { 516 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 517 "2735 Failed allocation of dd_data\n"); 518 rc = -ENOMEM; 519 goto no_dd_data; 520 } 521 522 if (!lpfc_nlp_get(ndlp)) { 523 rc = -ENODEV; 524 goto free_dd_data; 525 } 526 527 elscmd = job->request->rqst_data.r_els.els_code; 528 cmdsize = job->request_payload.payload_len; 529 rspsize = job->reply_payload.payload_len; 530 rspiocbq = lpfc_sli_get_iocbq(phba); 531 if (!rspiocbq) { 532 lpfc_nlp_put(ndlp); 533 rc = -ENOMEM; 534 goto free_dd_data; 535 } 536 537 rsp = &rspiocbq->iocb; 538 rpi = ndlp->nlp_rpi; 539 540 cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, 541 ndlp->nlp_DID, elscmd); 542 if (!cmdiocbq) { 543 rc = -EIO; 544 goto free_rspiocbq; 545 } 546 547 /* prep els iocb set context1 to the ndlp, context2 to the command 548 * dmabuf, context3 holds the data dmabuf 549 */ 550 pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2; 551 prsp = (struct lpfc_dmabuf *) pcmd->list.next; 552 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 553 kfree(pcmd); 554 lpfc_mbuf_free(phba, prsp->virt, prsp->phys); 555 kfree(prsp); 556 cmdiocbq->context2 = NULL; 557 558 pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3; 559 bpl = (struct ulp_bde64 *) pbuflist->virt; 560 561 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, 562 job->request_payload.sg_cnt, DMA_TO_DEVICE); 563 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { 564 busaddr = sg_dma_address(sgel); 565 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 566 bpl->tus.f.bdeSize = sg_dma_len(sgel); 567 bpl->tus.w = cpu_to_le32(bpl->tus.w); 568 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); 569 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); 570 bpl++; 571 } 572 573 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list, 574 job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 575 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) { 576 busaddr = sg_dma_address(sgel); 577 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 578 bpl->tus.f.bdeSize = sg_dma_len(sgel); 579 bpl->tus.w = cpu_to_le32(bpl->tus.w); 580 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); 581 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); 582 bpl++; 583 } 584 cmdiocbq->iocb.un.elsreq64.bdl.bdeSize = 585 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64); 586 cmdiocbq->iocb.ulpContext = rpi; 587 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 588 cmdiocbq->context1 = NULL; 589 cmdiocbq->context2 = NULL; 590 591 cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp; 592 cmdiocbq->context1 = dd_data; 593 cmdiocbq->context2 = rspiocbq; 594 dd_data->type = TYPE_IOCB; 595 dd_data->context_un.iocb.cmdiocbq = cmdiocbq; 596 dd_data->context_un.iocb.rspiocbq = rspiocbq; 597 dd_data->context_un.iocb.set_job = job; 598 dd_data->context_un.iocb.bmp = NULL;; 599 dd_data->context_un.iocb.ndlp = ndlp; 600 601 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 602 creg_val = readl(phba->HCregaddr); 603 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 604 writel(creg_val, phba->HCregaddr); 605 readl(phba->HCregaddr); /* flush */ 606 } 607 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); 608 lpfc_nlp_put(ndlp); 609 if (rc == IOCB_SUCCESS) 610 return 0; /* done for now */ 611 else if (rc == IOCB_BUSY) 612 rc = -EAGAIN; 613 else 614 rc = -EIO; 615 616 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 617 job->request_payload.sg_cnt, DMA_TO_DEVICE); 618 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, 619 job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 620 621 lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys); 622 623 lpfc_sli_release_iocbq(phba, cmdiocbq); 624 625 free_rspiocbq: 626 lpfc_sli_release_iocbq(phba, rspiocbq); 627 628 free_dd_data: 629 kfree(dd_data); 630 631 no_dd_data: 632 /* make error code available to userspace */ 633 job->reply->result = rc; 634 job->dd_data = NULL; 635 return rc; 636 } 637 638 /** 639 * lpfc_bsg_event_free - frees an allocated event structure 640 * @kref: Pointer to a kref. 641 * 642 * Called from kref_put. Back cast the kref into an event structure address. 643 * Free any events to get, delete associated nodes, free any events to see, 644 * free any data then free the event itself. 645 **/ 646 static void 647 lpfc_bsg_event_free(struct kref *kref) 648 { 649 struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event, 650 kref); 651 struct event_data *ed; 652 653 list_del(&evt->node); 654 655 while (!list_empty(&evt->events_to_get)) { 656 ed = list_entry(evt->events_to_get.next, typeof(*ed), node); 657 list_del(&ed->node); 658 kfree(ed->data); 659 kfree(ed); 660 } 661 662 while (!list_empty(&evt->events_to_see)) { 663 ed = list_entry(evt->events_to_see.next, typeof(*ed), node); 664 list_del(&ed->node); 665 kfree(ed->data); 666 kfree(ed); 667 } 668 669 kfree(evt); 670 } 671 672 /** 673 * lpfc_bsg_event_ref - increments the kref for an event 674 * @evt: Pointer to an event structure. 675 **/ 676 static inline void 677 lpfc_bsg_event_ref(struct lpfc_bsg_event *evt) 678 { 679 kref_get(&evt->kref); 680 } 681 682 /** 683 * lpfc_bsg_event_unref - Uses kref_put to free an event structure 684 * @evt: Pointer to an event structure. 685 **/ 686 static inline void 687 lpfc_bsg_event_unref(struct lpfc_bsg_event *evt) 688 { 689 kref_put(&evt->kref, lpfc_bsg_event_free); 690 } 691 692 /** 693 * lpfc_bsg_event_new - allocate and initialize a event structure 694 * @ev_mask: Mask of events. 695 * @ev_reg_id: Event reg id. 696 * @ev_req_id: Event request id. 697 **/ 698 static struct lpfc_bsg_event * 699 lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id) 700 { 701 struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL); 702 703 if (!evt) 704 return NULL; 705 706 INIT_LIST_HEAD(&evt->events_to_get); 707 INIT_LIST_HEAD(&evt->events_to_see); 708 evt->type_mask = ev_mask; 709 evt->req_id = ev_req_id; 710 evt->reg_id = ev_reg_id; 711 evt->wait_time_stamp = jiffies; 712 init_waitqueue_head(&evt->wq); 713 kref_init(&evt->kref); 714 return evt; 715 } 716 717 /** 718 * diag_cmd_data_free - Frees an lpfc dma buffer extension 719 * @phba: Pointer to HBA context object. 720 * @mlist: Pointer to an lpfc dma buffer extension. 721 **/ 722 static int 723 diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist) 724 { 725 struct lpfc_dmabufext *mlast; 726 struct pci_dev *pcidev; 727 struct list_head head, *curr, *next; 728 729 if ((!mlist) || (!lpfc_is_link_up(phba) && 730 (phba->link_flag & LS_LOOPBACK_MODE))) { 731 return 0; 732 } 733 734 pcidev = phba->pcidev; 735 list_add_tail(&head, &mlist->dma.list); 736 737 list_for_each_safe(curr, next, &head) { 738 mlast = list_entry(curr, struct lpfc_dmabufext , dma.list); 739 if (mlast->dma.virt) 740 dma_free_coherent(&pcidev->dev, 741 mlast->size, 742 mlast->dma.virt, 743 mlast->dma.phys); 744 kfree(mlast); 745 } 746 return 0; 747 } 748 749 /** 750 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command 751 * @phba: 752 * @pring: 753 * @piocbq: 754 * 755 * This function is called when an unsolicited CT command is received. It 756 * forwards the event to any processes registered to receive CT events. 757 **/ 758 int 759 lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 760 struct lpfc_iocbq *piocbq) 761 { 762 uint32_t evt_req_id = 0; 763 uint32_t cmd; 764 uint32_t len; 765 struct lpfc_dmabuf *dmabuf = NULL; 766 struct lpfc_bsg_event *evt; 767 struct event_data *evt_dat = NULL; 768 struct lpfc_iocbq *iocbq; 769 size_t offset = 0; 770 struct list_head head; 771 struct ulp_bde64 *bde; 772 dma_addr_t dma_addr; 773 int i; 774 struct lpfc_dmabuf *bdeBuf1 = piocbq->context2; 775 struct lpfc_dmabuf *bdeBuf2 = piocbq->context3; 776 struct lpfc_hbq_entry *hbqe; 777 struct lpfc_sli_ct_request *ct_req; 778 struct fc_bsg_job *job = NULL; 779 unsigned long flags; 780 int size = 0; 781 782 INIT_LIST_HEAD(&head); 783 list_add_tail(&head, &piocbq->list); 784 785 if (piocbq->iocb.ulpBdeCount == 0 || 786 piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0) 787 goto error_ct_unsol_exit; 788 789 if (phba->link_state == LPFC_HBA_ERROR || 790 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) 791 goto error_ct_unsol_exit; 792 793 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 794 dmabuf = bdeBuf1; 795 else { 796 dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh, 797 piocbq->iocb.un.cont64[0].addrLow); 798 dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr); 799 } 800 if (dmabuf == NULL) 801 goto error_ct_unsol_exit; 802 ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt; 803 evt_req_id = ct_req->FsType; 804 cmd = ct_req->CommandResponse.bits.CmdRsp; 805 len = ct_req->CommandResponse.bits.Size; 806 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 807 lpfc_sli_ringpostbuf_put(phba, pring, dmabuf); 808 809 spin_lock_irqsave(&phba->ct_ev_lock, flags); 810 list_for_each_entry(evt, &phba->ct_ev_waiters, node) { 811 if (!(evt->type_mask & FC_REG_CT_EVENT) || 812 evt->req_id != evt_req_id) 813 continue; 814 815 lpfc_bsg_event_ref(evt); 816 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 817 evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL); 818 if (evt_dat == NULL) { 819 spin_lock_irqsave(&phba->ct_ev_lock, flags); 820 lpfc_bsg_event_unref(evt); 821 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 822 "2614 Memory allocation failed for " 823 "CT event\n"); 824 break; 825 } 826 827 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 828 /* take accumulated byte count from the last iocbq */ 829 iocbq = list_entry(head.prev, typeof(*iocbq), list); 830 evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len; 831 } else { 832 list_for_each_entry(iocbq, &head, list) { 833 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) 834 evt_dat->len += 835 iocbq->iocb.un.cont64[i].tus.f.bdeSize; 836 } 837 } 838 839 evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL); 840 if (evt_dat->data == NULL) { 841 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 842 "2615 Memory allocation failed for " 843 "CT event data, size %d\n", 844 evt_dat->len); 845 kfree(evt_dat); 846 spin_lock_irqsave(&phba->ct_ev_lock, flags); 847 lpfc_bsg_event_unref(evt); 848 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 849 goto error_ct_unsol_exit; 850 } 851 852 list_for_each_entry(iocbq, &head, list) { 853 size = 0; 854 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 855 bdeBuf1 = iocbq->context2; 856 bdeBuf2 = iocbq->context3; 857 } 858 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) { 859 if (phba->sli3_options & 860 LPFC_SLI3_HBQ_ENABLED) { 861 if (i == 0) { 862 hbqe = (struct lpfc_hbq_entry *) 863 &iocbq->iocb.un.ulpWord[0]; 864 size = hbqe->bde.tus.f.bdeSize; 865 dmabuf = bdeBuf1; 866 } else if (i == 1) { 867 hbqe = (struct lpfc_hbq_entry *) 868 &iocbq->iocb.unsli3. 869 sli3Words[4]; 870 size = hbqe->bde.tus.f.bdeSize; 871 dmabuf = bdeBuf2; 872 } 873 if ((offset + size) > evt_dat->len) 874 size = evt_dat->len - offset; 875 } else { 876 size = iocbq->iocb.un.cont64[i]. 877 tus.f.bdeSize; 878 bde = &iocbq->iocb.un.cont64[i]; 879 dma_addr = getPaddr(bde->addrHigh, 880 bde->addrLow); 881 dmabuf = lpfc_sli_ringpostbuf_get(phba, 882 pring, dma_addr); 883 } 884 if (!dmabuf) { 885 lpfc_printf_log(phba, KERN_ERR, 886 LOG_LIBDFC, "2616 No dmabuf " 887 "found for iocbq 0x%p\n", 888 iocbq); 889 kfree(evt_dat->data); 890 kfree(evt_dat); 891 spin_lock_irqsave(&phba->ct_ev_lock, 892 flags); 893 lpfc_bsg_event_unref(evt); 894 spin_unlock_irqrestore( 895 &phba->ct_ev_lock, flags); 896 goto error_ct_unsol_exit; 897 } 898 memcpy((char *)(evt_dat->data) + offset, 899 dmabuf->virt, size); 900 offset += size; 901 if (evt_req_id != SLI_CT_ELX_LOOPBACK && 902 !(phba->sli3_options & 903 LPFC_SLI3_HBQ_ENABLED)) { 904 lpfc_sli_ringpostbuf_put(phba, pring, 905 dmabuf); 906 } else { 907 switch (cmd) { 908 case ELX_LOOPBACK_DATA: 909 diag_cmd_data_free(phba, 910 (struct lpfc_dmabufext *) 911 dmabuf); 912 break; 913 case ELX_LOOPBACK_XRI_SETUP: 914 if ((phba->sli_rev == 915 LPFC_SLI_REV2) || 916 (phba->sli3_options & 917 LPFC_SLI3_HBQ_ENABLED 918 )) { 919 lpfc_in_buf_free(phba, 920 dmabuf); 921 } else { 922 lpfc_post_buffer(phba, 923 pring, 924 1); 925 } 926 break; 927 default: 928 if (!(phba->sli3_options & 929 LPFC_SLI3_HBQ_ENABLED)) 930 lpfc_post_buffer(phba, 931 pring, 932 1); 933 break; 934 } 935 } 936 } 937 } 938 939 spin_lock_irqsave(&phba->ct_ev_lock, flags); 940 if (phba->sli_rev == LPFC_SLI_REV4) { 941 evt_dat->immed_dat = phba->ctx_idx; 942 phba->ctx_idx = (phba->ctx_idx + 1) % 64; 943 /* Provide warning for over-run of the ct_ctx array */ 944 if (phba->ct_ctx[evt_dat->immed_dat].flags & 945 UNSOL_VALID) 946 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 947 "2717 CT context array entry " 948 "[%d] over-run: oxid:x%x, " 949 "sid:x%x\n", phba->ctx_idx, 950 phba->ct_ctx[ 951 evt_dat->immed_dat].oxid, 952 phba->ct_ctx[ 953 evt_dat->immed_dat].SID); 954 phba->ct_ctx[evt_dat->immed_dat].oxid = 955 piocbq->iocb.ulpContext; 956 phba->ct_ctx[evt_dat->immed_dat].SID = 957 piocbq->iocb.un.rcvels.remoteID; 958 phba->ct_ctx[evt_dat->immed_dat].flags = UNSOL_VALID; 959 } else 960 evt_dat->immed_dat = piocbq->iocb.ulpContext; 961 962 evt_dat->type = FC_REG_CT_EVENT; 963 list_add(&evt_dat->node, &evt->events_to_see); 964 if (evt_req_id == SLI_CT_ELX_LOOPBACK) { 965 wake_up_interruptible(&evt->wq); 966 lpfc_bsg_event_unref(evt); 967 break; 968 } 969 970 list_move(evt->events_to_see.prev, &evt->events_to_get); 971 lpfc_bsg_event_unref(evt); 972 973 job = evt->set_job; 974 evt->set_job = NULL; 975 if (job) { 976 job->reply->reply_payload_rcv_len = size; 977 /* make error code available to userspace */ 978 job->reply->result = 0; 979 job->dd_data = NULL; 980 /* complete the job back to userspace */ 981 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 982 job->job_done(job); 983 spin_lock_irqsave(&phba->ct_ev_lock, flags); 984 } 985 } 986 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 987 988 error_ct_unsol_exit: 989 if (!list_empty(&head)) 990 list_del(&head); 991 if (evt_req_id == SLI_CT_ELX_LOOPBACK) 992 return 0; 993 return 1; 994 } 995 996 /** 997 * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command 998 * @job: SET_EVENT fc_bsg_job 999 **/ 1000 static int 1001 lpfc_bsg_hba_set_event(struct fc_bsg_job *job) 1002 { 1003 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 1004 struct lpfc_hba *phba = vport->phba; 1005 struct set_ct_event *event_req; 1006 struct lpfc_bsg_event *evt; 1007 int rc = 0; 1008 struct bsg_job_data *dd_data = NULL; 1009 uint32_t ev_mask; 1010 unsigned long flags; 1011 1012 if (job->request_len < 1013 sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) { 1014 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1015 "2612 Received SET_CT_EVENT below minimum " 1016 "size\n"); 1017 rc = -EINVAL; 1018 goto job_error; 1019 } 1020 1021 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 1022 if (dd_data == NULL) { 1023 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1024 "2734 Failed allocation of dd_data\n"); 1025 rc = -ENOMEM; 1026 goto job_error; 1027 } 1028 1029 event_req = (struct set_ct_event *) 1030 job->request->rqst_data.h_vendor.vendor_cmd; 1031 ev_mask = ((uint32_t)(unsigned long)event_req->type_mask & 1032 FC_REG_EVENT_MASK); 1033 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1034 list_for_each_entry(evt, &phba->ct_ev_waiters, node) { 1035 if (evt->reg_id == event_req->ev_reg_id) { 1036 lpfc_bsg_event_ref(evt); 1037 evt->wait_time_stamp = jiffies; 1038 break; 1039 } 1040 } 1041 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1042 1043 if (&evt->node == &phba->ct_ev_waiters) { 1044 /* no event waiting struct yet - first call */ 1045 evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id, 1046 event_req->ev_req_id); 1047 if (!evt) { 1048 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1049 "2617 Failed allocation of event " 1050 "waiter\n"); 1051 rc = -ENOMEM; 1052 goto job_error; 1053 } 1054 1055 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1056 list_add(&evt->node, &phba->ct_ev_waiters); 1057 lpfc_bsg_event_ref(evt); 1058 evt->wait_time_stamp = jiffies; 1059 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1060 } 1061 1062 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1063 evt->waiting = 1; 1064 dd_data->type = TYPE_EVT; 1065 dd_data->context_un.evt = evt; 1066 evt->set_job = job; /* for unsolicited command */ 1067 job->dd_data = dd_data; /* for fc transport timeout callback*/ 1068 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1069 return 0; /* call job done later */ 1070 1071 job_error: 1072 if (dd_data != NULL) 1073 kfree(dd_data); 1074 1075 job->dd_data = NULL; 1076 return rc; 1077 } 1078 1079 /** 1080 * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command 1081 * @job: GET_EVENT fc_bsg_job 1082 **/ 1083 static int 1084 lpfc_bsg_hba_get_event(struct fc_bsg_job *job) 1085 { 1086 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 1087 struct lpfc_hba *phba = vport->phba; 1088 struct get_ct_event *event_req; 1089 struct get_ct_event_reply *event_reply; 1090 struct lpfc_bsg_event *evt; 1091 struct event_data *evt_dat = NULL; 1092 unsigned long flags; 1093 uint32_t rc = 0; 1094 1095 if (job->request_len < 1096 sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) { 1097 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1098 "2613 Received GET_CT_EVENT request below " 1099 "minimum size\n"); 1100 rc = -EINVAL; 1101 goto job_error; 1102 } 1103 1104 event_req = (struct get_ct_event *) 1105 job->request->rqst_data.h_vendor.vendor_cmd; 1106 1107 event_reply = (struct get_ct_event_reply *) 1108 job->reply->reply_data.vendor_reply.vendor_rsp; 1109 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1110 list_for_each_entry(evt, &phba->ct_ev_waiters, node) { 1111 if (evt->reg_id == event_req->ev_reg_id) { 1112 if (list_empty(&evt->events_to_get)) 1113 break; 1114 lpfc_bsg_event_ref(evt); 1115 evt->wait_time_stamp = jiffies; 1116 evt_dat = list_entry(evt->events_to_get.prev, 1117 struct event_data, node); 1118 list_del(&evt_dat->node); 1119 break; 1120 } 1121 } 1122 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1123 1124 /* The app may continue to ask for event data until it gets 1125 * an error indicating that there isn't anymore 1126 */ 1127 if (evt_dat == NULL) { 1128 job->reply->reply_payload_rcv_len = 0; 1129 rc = -ENOENT; 1130 goto job_error; 1131 } 1132 1133 if (evt_dat->len > job->request_payload.payload_len) { 1134 evt_dat->len = job->request_payload.payload_len; 1135 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1136 "2618 Truncated event data at %d " 1137 "bytes\n", 1138 job->request_payload.payload_len); 1139 } 1140 1141 event_reply->type = evt_dat->type; 1142 event_reply->immed_data = evt_dat->immed_dat; 1143 if (evt_dat->len > 0) 1144 job->reply->reply_payload_rcv_len = 1145 sg_copy_from_buffer(job->request_payload.sg_list, 1146 job->request_payload.sg_cnt, 1147 evt_dat->data, evt_dat->len); 1148 else 1149 job->reply->reply_payload_rcv_len = 0; 1150 1151 if (evt_dat) { 1152 kfree(evt_dat->data); 1153 kfree(evt_dat); 1154 } 1155 1156 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1157 lpfc_bsg_event_unref(evt); 1158 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1159 job->dd_data = NULL; 1160 job->reply->result = 0; 1161 job->job_done(job); 1162 return 0; 1163 1164 job_error: 1165 job->dd_data = NULL; 1166 job->reply->result = rc; 1167 return rc; 1168 } 1169 1170 /** 1171 * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler 1172 * @phba: Pointer to HBA context object. 1173 * @cmdiocbq: Pointer to command iocb. 1174 * @rspiocbq: Pointer to response iocb. 1175 * 1176 * This function is the completion handler for iocbs issued using 1177 * lpfc_issue_ct_rsp_cmp function. This function is called by the 1178 * ring event handler function without any lock held. This function 1179 * can be called from both worker thread context and interrupt 1180 * context. This function also can be called from other thread which 1181 * cleans up the SLI layer objects. 1182 * This function copy the contents of the response iocb to the 1183 * response iocb memory object provided by the caller of 1184 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 1185 * sleeps for the iocb completion. 1186 **/ 1187 static void 1188 lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba, 1189 struct lpfc_iocbq *cmdiocbq, 1190 struct lpfc_iocbq *rspiocbq) 1191 { 1192 struct bsg_job_data *dd_data; 1193 struct fc_bsg_job *job; 1194 IOCB_t *rsp; 1195 struct lpfc_dmabuf *bmp; 1196 struct lpfc_nodelist *ndlp; 1197 unsigned long flags; 1198 int rc = 0; 1199 1200 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1201 dd_data = cmdiocbq->context2; 1202 /* normal completion and timeout crossed paths, already done */ 1203 if (!dd_data) { 1204 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1205 return; 1206 } 1207 1208 job = dd_data->context_un.iocb.set_job; 1209 bmp = dd_data->context_un.iocb.bmp; 1210 rsp = &rspiocbq->iocb; 1211 ndlp = dd_data->context_un.iocb.ndlp; 1212 1213 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 1214 job->request_payload.sg_cnt, DMA_TO_DEVICE); 1215 1216 if (rsp->ulpStatus) { 1217 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 1218 switch (rsp->un.ulpWord[4] & 0xff) { 1219 case IOERR_SEQUENCE_TIMEOUT: 1220 rc = -ETIMEDOUT; 1221 break; 1222 case IOERR_INVALID_RPI: 1223 rc = -EFAULT; 1224 break; 1225 default: 1226 rc = -EACCES; 1227 break; 1228 } 1229 } else 1230 rc = -EACCES; 1231 } else 1232 job->reply->reply_payload_rcv_len = 1233 rsp->un.genreq64.bdl.bdeSize; 1234 1235 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 1236 lpfc_sli_release_iocbq(phba, cmdiocbq); 1237 lpfc_nlp_put(ndlp); 1238 kfree(bmp); 1239 kfree(dd_data); 1240 /* make error code available to userspace */ 1241 job->reply->result = rc; 1242 job->dd_data = NULL; 1243 /* complete the job back to userspace */ 1244 job->job_done(job); 1245 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1246 return; 1247 } 1248 1249 /** 1250 * lpfc_issue_ct_rsp - issue a ct response 1251 * @phba: Pointer to HBA context object. 1252 * @job: Pointer to the job object. 1253 * @tag: tag index value into the ports context exchange array. 1254 * @bmp: Pointer to a dma buffer descriptor. 1255 * @num_entry: Number of enties in the bde. 1256 **/ 1257 static int 1258 lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, 1259 struct lpfc_dmabuf *bmp, int num_entry) 1260 { 1261 IOCB_t *icmd; 1262 struct lpfc_iocbq *ctiocb = NULL; 1263 int rc = 0; 1264 struct lpfc_nodelist *ndlp = NULL; 1265 struct bsg_job_data *dd_data; 1266 uint32_t creg_val; 1267 1268 /* allocate our bsg tracking structure */ 1269 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 1270 if (!dd_data) { 1271 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1272 "2736 Failed allocation of dd_data\n"); 1273 rc = -ENOMEM; 1274 goto no_dd_data; 1275 } 1276 1277 /* Allocate buffer for command iocb */ 1278 ctiocb = lpfc_sli_get_iocbq(phba); 1279 if (!ctiocb) { 1280 rc = -ENOMEM; 1281 goto no_ctiocb; 1282 } 1283 1284 icmd = &ctiocb->iocb; 1285 icmd->un.xseq64.bdl.ulpIoTag32 = 0; 1286 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys); 1287 icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys); 1288 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 1289 icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64)); 1290 icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA); 1291 icmd->un.xseq64.w5.hcsw.Dfctl = 0; 1292 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL; 1293 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT; 1294 1295 /* Fill in rest of iocb */ 1296 icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX; 1297 icmd->ulpBdeCount = 1; 1298 icmd->ulpLe = 1; 1299 icmd->ulpClass = CLASS3; 1300 if (phba->sli_rev == LPFC_SLI_REV4) { 1301 /* Do not issue unsol response if oxid not marked as valid */ 1302 if (!(phba->ct_ctx[tag].flags & UNSOL_VALID)) { 1303 rc = IOCB_ERROR; 1304 goto issue_ct_rsp_exit; 1305 } 1306 icmd->ulpContext = phba->ct_ctx[tag].oxid; 1307 ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID); 1308 if (!ndlp) { 1309 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 1310 "2721 ndlp null for oxid %x SID %x\n", 1311 icmd->ulpContext, 1312 phba->ct_ctx[tag].SID); 1313 rc = IOCB_ERROR; 1314 goto issue_ct_rsp_exit; 1315 } 1316 1317 /* Check if the ndlp is active */ 1318 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 1319 rc = -IOCB_ERROR; 1320 goto issue_ct_rsp_exit; 1321 } 1322 1323 /* get a refernece count so the ndlp doesn't go away while 1324 * we respond 1325 */ 1326 if (!lpfc_nlp_get(ndlp)) { 1327 rc = -IOCB_ERROR; 1328 goto issue_ct_rsp_exit; 1329 } 1330 1331 icmd->un.ulpWord[3] = ndlp->nlp_rpi; 1332 /* The exchange is done, mark the entry as invalid */ 1333 phba->ct_ctx[tag].flags &= ~UNSOL_VALID; 1334 } else 1335 icmd->ulpContext = (ushort) tag; 1336 1337 icmd->ulpTimeout = phba->fc_ratov * 2; 1338 1339 /* Xmit CT response on exchange <xid> */ 1340 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1341 "2722 Xmit CT response on exchange x%x Data: x%x x%x\n", 1342 icmd->ulpContext, icmd->ulpIoTag, phba->link_state); 1343 1344 ctiocb->iocb_cmpl = NULL; 1345 ctiocb->iocb_flag |= LPFC_IO_LIBDFC; 1346 ctiocb->vport = phba->pport; 1347 ctiocb->context3 = bmp; 1348 1349 ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp; 1350 ctiocb->context2 = dd_data; 1351 ctiocb->context1 = ndlp; 1352 dd_data->type = TYPE_IOCB; 1353 dd_data->context_un.iocb.cmdiocbq = ctiocb; 1354 dd_data->context_un.iocb.rspiocbq = NULL; 1355 dd_data->context_un.iocb.set_job = job; 1356 dd_data->context_un.iocb.bmp = bmp; 1357 dd_data->context_un.iocb.ndlp = ndlp; 1358 1359 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 1360 creg_val = readl(phba->HCregaddr); 1361 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 1362 writel(creg_val, phba->HCregaddr); 1363 readl(phba->HCregaddr); /* flush */ 1364 } 1365 1366 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); 1367 1368 if (rc == IOCB_SUCCESS) 1369 return 0; /* done for now */ 1370 1371 issue_ct_rsp_exit: 1372 lpfc_sli_release_iocbq(phba, ctiocb); 1373 no_ctiocb: 1374 kfree(dd_data); 1375 no_dd_data: 1376 return rc; 1377 } 1378 1379 /** 1380 * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command 1381 * @job: SEND_MGMT_RESP fc_bsg_job 1382 **/ 1383 static int 1384 lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job) 1385 { 1386 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 1387 struct lpfc_hba *phba = vport->phba; 1388 struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *) 1389 job->request->rqst_data.h_vendor.vendor_cmd; 1390 struct ulp_bde64 *bpl; 1391 struct lpfc_dmabuf *bmp = NULL; 1392 struct scatterlist *sgel = NULL; 1393 int request_nseg; 1394 int numbde; 1395 dma_addr_t busaddr; 1396 uint32_t tag = mgmt_resp->tag; 1397 unsigned long reqbfrcnt = 1398 (unsigned long)job->request_payload.payload_len; 1399 int rc = 0; 1400 1401 /* in case no data is transferred */ 1402 job->reply->reply_payload_rcv_len = 0; 1403 1404 if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) { 1405 rc = -ERANGE; 1406 goto send_mgmt_rsp_exit; 1407 } 1408 1409 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1410 if (!bmp) { 1411 rc = -ENOMEM; 1412 goto send_mgmt_rsp_exit; 1413 } 1414 1415 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); 1416 if (!bmp->virt) { 1417 rc = -ENOMEM; 1418 goto send_mgmt_rsp_free_bmp; 1419 } 1420 1421 INIT_LIST_HEAD(&bmp->list); 1422 bpl = (struct ulp_bde64 *) bmp->virt; 1423 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, 1424 job->request_payload.sg_cnt, DMA_TO_DEVICE); 1425 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { 1426 busaddr = sg_dma_address(sgel); 1427 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1428 bpl->tus.f.bdeSize = sg_dma_len(sgel); 1429 bpl->tus.w = cpu_to_le32(bpl->tus.w); 1430 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); 1431 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); 1432 bpl++; 1433 } 1434 1435 rc = lpfc_issue_ct_rsp(phba, job, tag, bmp, request_nseg); 1436 1437 if (rc == IOCB_SUCCESS) 1438 return 0; /* done for now */ 1439 1440 /* TBD need to handle a timeout */ 1441 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 1442 job->request_payload.sg_cnt, DMA_TO_DEVICE); 1443 rc = -EACCES; 1444 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 1445 1446 send_mgmt_rsp_free_bmp: 1447 kfree(bmp); 1448 send_mgmt_rsp_exit: 1449 /* make error code available to userspace */ 1450 job->reply->result = rc; 1451 job->dd_data = NULL; 1452 return rc; 1453 } 1454 1455 /** 1456 * lpfc_bsg_diag_mode - process a LPFC_BSG_VENDOR_DIAG_MODE bsg vendor command 1457 * @job: LPFC_BSG_VENDOR_DIAG_MODE 1458 * 1459 * This function is responsible for placing a port into diagnostic loopback 1460 * mode in order to perform a diagnostic loopback test. 1461 * All new scsi requests are blocked, a small delay is used to allow the 1462 * scsi requests to complete then the link is brought down. If the link is 1463 * is placed in loopback mode then scsi requests are again allowed 1464 * so the scsi mid-layer doesn't give up on the port. 1465 * All of this is done in-line. 1466 */ 1467 static int 1468 lpfc_bsg_diag_mode(struct fc_bsg_job *job) 1469 { 1470 struct Scsi_Host *shost = job->shost; 1471 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 1472 struct lpfc_hba *phba = vport->phba; 1473 struct diag_mode_set *loopback_mode; 1474 struct lpfc_sli *psli = &phba->sli; 1475 struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING]; 1476 uint32_t link_flags; 1477 uint32_t timeout; 1478 struct lpfc_vport **vports; 1479 LPFC_MBOXQ_t *pmboxq; 1480 int mbxstatus; 1481 int i = 0; 1482 int rc = 0; 1483 1484 /* no data to return just the return code */ 1485 job->reply->reply_payload_rcv_len = 0; 1486 1487 if (job->request_len < 1488 sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_set)) { 1489 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1490 "2738 Received DIAG MODE request below minimum " 1491 "size\n"); 1492 rc = -EINVAL; 1493 goto job_error; 1494 } 1495 1496 loopback_mode = (struct diag_mode_set *) 1497 job->request->rqst_data.h_vendor.vendor_cmd; 1498 link_flags = loopback_mode->type; 1499 timeout = loopback_mode->timeout * 100; 1500 1501 if ((phba->link_state == LPFC_HBA_ERROR) || 1502 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) || 1503 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) { 1504 rc = -EACCES; 1505 goto job_error; 1506 } 1507 1508 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1509 if (!pmboxq) { 1510 rc = -ENOMEM; 1511 goto job_error; 1512 } 1513 1514 vports = lpfc_create_vport_work_array(phba); 1515 if (vports) { 1516 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 1517 shost = lpfc_shost_from_vport(vports[i]); 1518 scsi_block_requests(shost); 1519 } 1520 1521 lpfc_destroy_vport_work_array(phba, vports); 1522 } else { 1523 shost = lpfc_shost_from_vport(phba->pport); 1524 scsi_block_requests(shost); 1525 } 1526 1527 while (pring->txcmplq_cnt) { 1528 if (i++ > 500) /* wait up to 5 seconds */ 1529 break; 1530 1531 msleep(10); 1532 } 1533 1534 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 1535 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK; 1536 pmboxq->u.mb.mbxOwner = OWN_HOST; 1537 1538 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO); 1539 1540 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) { 1541 /* wait for link down before proceeding */ 1542 i = 0; 1543 while (phba->link_state != LPFC_LINK_DOWN) { 1544 if (i++ > timeout) { 1545 rc = -ETIMEDOUT; 1546 goto loopback_mode_exit; 1547 } 1548 1549 msleep(10); 1550 } 1551 1552 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 1553 if (link_flags == INTERNAL_LOOP_BACK) 1554 pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB; 1555 else 1556 pmboxq->u.mb.un.varInitLnk.link_flags = 1557 FLAGS_TOPOLOGY_MODE_LOOP; 1558 1559 pmboxq->u.mb.mbxCommand = MBX_INIT_LINK; 1560 pmboxq->u.mb.mbxOwner = OWN_HOST; 1561 1562 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, 1563 LPFC_MBOX_TMO); 1564 1565 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) 1566 rc = -ENODEV; 1567 else { 1568 phba->link_flag |= LS_LOOPBACK_MODE; 1569 /* wait for the link attention interrupt */ 1570 msleep(100); 1571 1572 i = 0; 1573 while (phba->link_state != LPFC_HBA_READY) { 1574 if (i++ > timeout) { 1575 rc = -ETIMEDOUT; 1576 break; 1577 } 1578 1579 msleep(10); 1580 } 1581 } 1582 1583 } else 1584 rc = -ENODEV; 1585 1586 loopback_mode_exit: 1587 vports = lpfc_create_vport_work_array(phba); 1588 if (vports) { 1589 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 1590 shost = lpfc_shost_from_vport(vports[i]); 1591 scsi_unblock_requests(shost); 1592 } 1593 lpfc_destroy_vport_work_array(phba, vports); 1594 } else { 1595 shost = lpfc_shost_from_vport(phba->pport); 1596 scsi_unblock_requests(shost); 1597 } 1598 1599 /* 1600 * Let SLI layer release mboxq if mbox command completed after timeout. 1601 */ 1602 if (mbxstatus != MBX_TIMEOUT) 1603 mempool_free(pmboxq, phba->mbox_mem_pool); 1604 1605 job_error: 1606 /* make error code available to userspace */ 1607 job->reply->result = rc; 1608 /* complete the job back to userspace if no error */ 1609 if (rc == 0) 1610 job->job_done(job); 1611 return rc; 1612 } 1613 1614 /** 1615 * lpfcdiag_loop_self_reg - obtains a remote port login id 1616 * @phba: Pointer to HBA context object 1617 * @rpi: Pointer to a remote port login id 1618 * 1619 * This function obtains a remote port login id so the diag loopback test 1620 * can send and receive its own unsolicited CT command. 1621 **/ 1622 static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi) 1623 { 1624 LPFC_MBOXQ_t *mbox; 1625 struct lpfc_dmabuf *dmabuff; 1626 int status; 1627 1628 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1629 if (!mbox) 1630 return -ENOMEM; 1631 1632 if (phba->sli_rev == LPFC_SLI_REV4) 1633 *rpi = lpfc_sli4_alloc_rpi(phba); 1634 status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID, 1635 (uint8_t *)&phba->pport->fc_sparam, mbox, *rpi); 1636 if (status) { 1637 mempool_free(mbox, phba->mbox_mem_pool); 1638 if (phba->sli_rev == LPFC_SLI_REV4) 1639 lpfc_sli4_free_rpi(phba, *rpi); 1640 return -ENOMEM; 1641 } 1642 1643 dmabuff = (struct lpfc_dmabuf *) mbox->context1; 1644 mbox->context1 = NULL; 1645 mbox->context2 = NULL; 1646 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 1647 1648 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) { 1649 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys); 1650 kfree(dmabuff); 1651 if (status != MBX_TIMEOUT) 1652 mempool_free(mbox, phba->mbox_mem_pool); 1653 if (phba->sli_rev == LPFC_SLI_REV4) 1654 lpfc_sli4_free_rpi(phba, *rpi); 1655 return -ENODEV; 1656 } 1657 1658 *rpi = mbox->u.mb.un.varWords[0]; 1659 1660 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys); 1661 kfree(dmabuff); 1662 mempool_free(mbox, phba->mbox_mem_pool); 1663 return 0; 1664 } 1665 1666 /** 1667 * lpfcdiag_loop_self_unreg - unregs from the rpi 1668 * @phba: Pointer to HBA context object 1669 * @rpi: Remote port login id 1670 * 1671 * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg 1672 **/ 1673 static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi) 1674 { 1675 LPFC_MBOXQ_t *mbox; 1676 int status; 1677 1678 /* Allocate mboxq structure */ 1679 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1680 if (mbox == NULL) 1681 return -ENOMEM; 1682 1683 lpfc_unreg_login(phba, 0, rpi, mbox); 1684 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 1685 1686 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) { 1687 if (status != MBX_TIMEOUT) 1688 mempool_free(mbox, phba->mbox_mem_pool); 1689 return -EIO; 1690 } 1691 mempool_free(mbox, phba->mbox_mem_pool); 1692 if (phba->sli_rev == LPFC_SLI_REV4) 1693 lpfc_sli4_free_rpi(phba, rpi); 1694 return 0; 1695 } 1696 1697 /** 1698 * lpfcdiag_loop_get_xri - obtains the transmit and receive ids 1699 * @phba: Pointer to HBA context object 1700 * @rpi: Remote port login id 1701 * @txxri: Pointer to transmit exchange id 1702 * @rxxri: Pointer to response exchabge id 1703 * 1704 * This function obtains the transmit and receive ids required to send 1705 * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp 1706 * flags are used to the unsolicted response handler is able to process 1707 * the ct command sent on the same port. 1708 **/ 1709 static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, 1710 uint16_t *txxri, uint16_t * rxxri) 1711 { 1712 struct lpfc_bsg_event *evt; 1713 struct lpfc_iocbq *cmdiocbq, *rspiocbq; 1714 IOCB_t *cmd, *rsp; 1715 struct lpfc_dmabuf *dmabuf; 1716 struct ulp_bde64 *bpl = NULL; 1717 struct lpfc_sli_ct_request *ctreq = NULL; 1718 int ret_val = 0; 1719 int time_left; 1720 int iocb_stat = 0; 1721 unsigned long flags; 1722 1723 *txxri = 0; 1724 *rxxri = 0; 1725 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid, 1726 SLI_CT_ELX_LOOPBACK); 1727 if (!evt) 1728 return -ENOMEM; 1729 1730 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1731 list_add(&evt->node, &phba->ct_ev_waiters); 1732 lpfc_bsg_event_ref(evt); 1733 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1734 1735 cmdiocbq = lpfc_sli_get_iocbq(phba); 1736 rspiocbq = lpfc_sli_get_iocbq(phba); 1737 1738 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1739 if (dmabuf) { 1740 dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys); 1741 if (dmabuf->virt) { 1742 INIT_LIST_HEAD(&dmabuf->list); 1743 bpl = (struct ulp_bde64 *) dmabuf->virt; 1744 memset(bpl, 0, sizeof(*bpl)); 1745 ctreq = (struct lpfc_sli_ct_request *)(bpl + 1); 1746 bpl->addrHigh = 1747 le32_to_cpu(putPaddrHigh(dmabuf->phys + 1748 sizeof(*bpl))); 1749 bpl->addrLow = 1750 le32_to_cpu(putPaddrLow(dmabuf->phys + 1751 sizeof(*bpl))); 1752 bpl->tus.f.bdeFlags = 0; 1753 bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ; 1754 bpl->tus.w = le32_to_cpu(bpl->tus.w); 1755 } 1756 } 1757 1758 if (cmdiocbq == NULL || rspiocbq == NULL || 1759 dmabuf == NULL || bpl == NULL || ctreq == NULL || 1760 dmabuf->virt == NULL) { 1761 ret_val = -ENOMEM; 1762 goto err_get_xri_exit; 1763 } 1764 1765 cmd = &cmdiocbq->iocb; 1766 rsp = &rspiocbq->iocb; 1767 1768 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ); 1769 1770 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION; 1771 ctreq->RevisionId.bits.InId = 0; 1772 ctreq->FsType = SLI_CT_ELX_LOOPBACK; 1773 ctreq->FsSubType = 0; 1774 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP; 1775 ctreq->CommandResponse.bits.Size = 0; 1776 1777 1778 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys); 1779 cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys); 1780 cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 1781 cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl); 1782 1783 cmd->un.xseq64.w5.hcsw.Fctl = LA; 1784 cmd->un.xseq64.w5.hcsw.Dfctl = 0; 1785 cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL; 1786 cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT; 1787 1788 cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR; 1789 cmd->ulpBdeCount = 1; 1790 cmd->ulpLe = 1; 1791 cmd->ulpClass = CLASS3; 1792 cmd->ulpContext = rpi; 1793 1794 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 1795 cmdiocbq->vport = phba->pport; 1796 1797 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, 1798 rspiocbq, 1799 (phba->fc_ratov * 2) 1800 + LPFC_DRVR_TIMEOUT); 1801 if (iocb_stat) { 1802 ret_val = -EIO; 1803 goto err_get_xri_exit; 1804 } 1805 *txxri = rsp->ulpContext; 1806 1807 evt->waiting = 1; 1808 evt->wait_time_stamp = jiffies; 1809 time_left = wait_event_interruptible_timeout( 1810 evt->wq, !list_empty(&evt->events_to_see), 1811 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ); 1812 if (list_empty(&evt->events_to_see)) 1813 ret_val = (time_left) ? -EINTR : -ETIMEDOUT; 1814 else { 1815 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1816 list_move(evt->events_to_see.prev, &evt->events_to_get); 1817 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1818 *rxxri = (list_entry(evt->events_to_get.prev, 1819 typeof(struct event_data), 1820 node))->immed_dat; 1821 } 1822 evt->waiting = 0; 1823 1824 err_get_xri_exit: 1825 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1826 lpfc_bsg_event_unref(evt); /* release ref */ 1827 lpfc_bsg_event_unref(evt); /* delete */ 1828 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1829 1830 if (dmabuf) { 1831 if (dmabuf->virt) 1832 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); 1833 kfree(dmabuf); 1834 } 1835 1836 if (cmdiocbq && (iocb_stat != IOCB_TIMEDOUT)) 1837 lpfc_sli_release_iocbq(phba, cmdiocbq); 1838 if (rspiocbq) 1839 lpfc_sli_release_iocbq(phba, rspiocbq); 1840 return ret_val; 1841 } 1842 1843 /** 1844 * diag_cmd_data_alloc - fills in a bde struct with dma buffers 1845 * @phba: Pointer to HBA context object 1846 * @bpl: Pointer to 64 bit bde structure 1847 * @size: Number of bytes to process 1848 * @nocopydata: Flag to copy user data into the allocated buffer 1849 * 1850 * This function allocates page size buffers and populates an lpfc_dmabufext. 1851 * If allowed the user data pointed to with indataptr is copied into the kernel 1852 * memory. The chained list of page size buffers is returned. 1853 **/ 1854 static struct lpfc_dmabufext * 1855 diag_cmd_data_alloc(struct lpfc_hba *phba, 1856 struct ulp_bde64 *bpl, uint32_t size, 1857 int nocopydata) 1858 { 1859 struct lpfc_dmabufext *mlist = NULL; 1860 struct lpfc_dmabufext *dmp; 1861 int cnt, offset = 0, i = 0; 1862 struct pci_dev *pcidev; 1863 1864 pcidev = phba->pcidev; 1865 1866 while (size) { 1867 /* We get chunks of 4K */ 1868 if (size > BUF_SZ_4K) 1869 cnt = BUF_SZ_4K; 1870 else 1871 cnt = size; 1872 1873 /* allocate struct lpfc_dmabufext buffer header */ 1874 dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL); 1875 if (!dmp) 1876 goto out; 1877 1878 INIT_LIST_HEAD(&dmp->dma.list); 1879 1880 /* Queue it to a linked list */ 1881 if (mlist) 1882 list_add_tail(&dmp->dma.list, &mlist->dma.list); 1883 else 1884 mlist = dmp; 1885 1886 /* allocate buffer */ 1887 dmp->dma.virt = dma_alloc_coherent(&pcidev->dev, 1888 cnt, 1889 &(dmp->dma.phys), 1890 GFP_KERNEL); 1891 1892 if (!dmp->dma.virt) 1893 goto out; 1894 1895 dmp->size = cnt; 1896 1897 if (nocopydata) { 1898 bpl->tus.f.bdeFlags = 0; 1899 pci_dma_sync_single_for_device(phba->pcidev, 1900 dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE); 1901 1902 } else { 1903 memset((uint8_t *)dmp->dma.virt, 0, cnt); 1904 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 1905 } 1906 1907 /* build buffer ptr list for IOCB */ 1908 bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys)); 1909 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys)); 1910 bpl->tus.f.bdeSize = (ushort) cnt; 1911 bpl->tus.w = le32_to_cpu(bpl->tus.w); 1912 bpl++; 1913 1914 i++; 1915 offset += cnt; 1916 size -= cnt; 1917 } 1918 1919 mlist->flag = i; 1920 return mlist; 1921 out: 1922 diag_cmd_data_free(phba, mlist); 1923 return NULL; 1924 } 1925 1926 /** 1927 * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd 1928 * @phba: Pointer to HBA context object 1929 * @rxxri: Receive exchange id 1930 * @len: Number of data bytes 1931 * 1932 * This function allocates and posts a data buffer of sufficient size to recieve 1933 * an unsolicted CT command. 1934 **/ 1935 static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri, 1936 size_t len) 1937 { 1938 struct lpfc_sli *psli = &phba->sli; 1939 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; 1940 struct lpfc_iocbq *cmdiocbq; 1941 IOCB_t *cmd = NULL; 1942 struct list_head head, *curr, *next; 1943 struct lpfc_dmabuf *rxbmp; 1944 struct lpfc_dmabuf *dmp; 1945 struct lpfc_dmabuf *mp[2] = {NULL, NULL}; 1946 struct ulp_bde64 *rxbpl = NULL; 1947 uint32_t num_bde; 1948 struct lpfc_dmabufext *rxbuffer = NULL; 1949 int ret_val = 0; 1950 int iocb_stat; 1951 int i = 0; 1952 1953 cmdiocbq = lpfc_sli_get_iocbq(phba); 1954 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1955 if (rxbmp != NULL) { 1956 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys); 1957 if (rxbmp->virt) { 1958 INIT_LIST_HEAD(&rxbmp->list); 1959 rxbpl = (struct ulp_bde64 *) rxbmp->virt; 1960 rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0); 1961 } 1962 } 1963 1964 if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) { 1965 ret_val = -ENOMEM; 1966 goto err_post_rxbufs_exit; 1967 } 1968 1969 /* Queue buffers for the receive exchange */ 1970 num_bde = (uint32_t)rxbuffer->flag; 1971 dmp = &rxbuffer->dma; 1972 1973 cmd = &cmdiocbq->iocb; 1974 i = 0; 1975 1976 INIT_LIST_HEAD(&head); 1977 list_add_tail(&head, &dmp->list); 1978 list_for_each_safe(curr, next, &head) { 1979 mp[i] = list_entry(curr, struct lpfc_dmabuf, list); 1980 list_del(curr); 1981 1982 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 1983 mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba); 1984 cmd->un.quexri64cx.buff.bde.addrHigh = 1985 putPaddrHigh(mp[i]->phys); 1986 cmd->un.quexri64cx.buff.bde.addrLow = 1987 putPaddrLow(mp[i]->phys); 1988 cmd->un.quexri64cx.buff.bde.tus.f.bdeSize = 1989 ((struct lpfc_dmabufext *)mp[i])->size; 1990 cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag; 1991 cmd->ulpCommand = CMD_QUE_XRI64_CX; 1992 cmd->ulpPU = 0; 1993 cmd->ulpLe = 1; 1994 cmd->ulpBdeCount = 1; 1995 cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0; 1996 1997 } else { 1998 cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys); 1999 cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys); 2000 cmd->un.cont64[i].tus.f.bdeSize = 2001 ((struct lpfc_dmabufext *)mp[i])->size; 2002 cmd->ulpBdeCount = ++i; 2003 2004 if ((--num_bde > 0) && (i < 2)) 2005 continue; 2006 2007 cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX; 2008 cmd->ulpLe = 1; 2009 } 2010 2011 cmd->ulpClass = CLASS3; 2012 cmd->ulpContext = rxxri; 2013 2014 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 2015 0); 2016 if (iocb_stat == IOCB_ERROR) { 2017 diag_cmd_data_free(phba, 2018 (struct lpfc_dmabufext *)mp[0]); 2019 if (mp[1]) 2020 diag_cmd_data_free(phba, 2021 (struct lpfc_dmabufext *)mp[1]); 2022 dmp = list_entry(next, struct lpfc_dmabuf, list); 2023 ret_val = -EIO; 2024 goto err_post_rxbufs_exit; 2025 } 2026 2027 lpfc_sli_ringpostbuf_put(phba, pring, mp[0]); 2028 if (mp[1]) { 2029 lpfc_sli_ringpostbuf_put(phba, pring, mp[1]); 2030 mp[1] = NULL; 2031 } 2032 2033 /* The iocb was freed by lpfc_sli_issue_iocb */ 2034 cmdiocbq = lpfc_sli_get_iocbq(phba); 2035 if (!cmdiocbq) { 2036 dmp = list_entry(next, struct lpfc_dmabuf, list); 2037 ret_val = -EIO; 2038 goto err_post_rxbufs_exit; 2039 } 2040 2041 cmd = &cmdiocbq->iocb; 2042 i = 0; 2043 } 2044 list_del(&head); 2045 2046 err_post_rxbufs_exit: 2047 2048 if (rxbmp) { 2049 if (rxbmp->virt) 2050 lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys); 2051 kfree(rxbmp); 2052 } 2053 2054 if (cmdiocbq) 2055 lpfc_sli_release_iocbq(phba, cmdiocbq); 2056 return ret_val; 2057 } 2058 2059 /** 2060 * lpfc_bsg_diag_test - with a port in loopback issues a Ct cmd to itself 2061 * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job 2062 * 2063 * This function receives a user data buffer to be transmitted and received on 2064 * the same port, the link must be up and in loopback mode prior 2065 * to being called. 2066 * 1. A kernel buffer is allocated to copy the user data into. 2067 * 2. The port registers with "itself". 2068 * 3. The transmit and receive exchange ids are obtained. 2069 * 4. The receive exchange id is posted. 2070 * 5. A new els loopback event is created. 2071 * 6. The command and response iocbs are allocated. 2072 * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback. 2073 * 2074 * This function is meant to be called n times while the port is in loopback 2075 * so it is the apps responsibility to issue a reset to take the port out 2076 * of loopback mode. 2077 **/ 2078 static int 2079 lpfc_bsg_diag_test(struct fc_bsg_job *job) 2080 { 2081 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 2082 struct lpfc_hba *phba = vport->phba; 2083 struct diag_mode_test *diag_mode; 2084 struct lpfc_bsg_event *evt; 2085 struct event_data *evdat; 2086 struct lpfc_sli *psli = &phba->sli; 2087 uint32_t size; 2088 uint32_t full_size; 2089 size_t segment_len = 0, segment_offset = 0, current_offset = 0; 2090 uint16_t rpi = 0; 2091 struct lpfc_iocbq *cmdiocbq, *rspiocbq; 2092 IOCB_t *cmd, *rsp; 2093 struct lpfc_sli_ct_request *ctreq; 2094 struct lpfc_dmabuf *txbmp; 2095 struct ulp_bde64 *txbpl = NULL; 2096 struct lpfc_dmabufext *txbuffer = NULL; 2097 struct list_head head; 2098 struct lpfc_dmabuf *curr; 2099 uint16_t txxri, rxxri; 2100 uint32_t num_bde; 2101 uint8_t *ptr = NULL, *rx_databuf = NULL; 2102 int rc = 0; 2103 int time_left; 2104 int iocb_stat; 2105 unsigned long flags; 2106 void *dataout = NULL; 2107 uint32_t total_mem; 2108 2109 /* in case no data is returned return just the return code */ 2110 job->reply->reply_payload_rcv_len = 0; 2111 2112 if (job->request_len < 2113 sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) { 2114 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2115 "2739 Received DIAG TEST request below minimum " 2116 "size\n"); 2117 rc = -EINVAL; 2118 goto loopback_test_exit; 2119 } 2120 2121 if (job->request_payload.payload_len != 2122 job->reply_payload.payload_len) { 2123 rc = -EINVAL; 2124 goto loopback_test_exit; 2125 } 2126 2127 diag_mode = (struct diag_mode_test *) 2128 job->request->rqst_data.h_vendor.vendor_cmd; 2129 2130 if ((phba->link_state == LPFC_HBA_ERROR) || 2131 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) || 2132 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) { 2133 rc = -EACCES; 2134 goto loopback_test_exit; 2135 } 2136 2137 if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) { 2138 rc = -EACCES; 2139 goto loopback_test_exit; 2140 } 2141 2142 size = job->request_payload.payload_len; 2143 full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */ 2144 2145 if ((size == 0) || (size > 80 * BUF_SZ_4K)) { 2146 rc = -ERANGE; 2147 goto loopback_test_exit; 2148 } 2149 2150 if (full_size >= BUF_SZ_4K) { 2151 /* 2152 * Allocate memory for ioctl data. If buffer is bigger than 64k, 2153 * then we allocate 64k and re-use that buffer over and over to 2154 * xfer the whole block. This is because Linux kernel has a 2155 * problem allocating more than 120k of kernel space memory. Saw 2156 * problem with GET_FCPTARGETMAPPING... 2157 */ 2158 if (size <= (64 * 1024)) 2159 total_mem = full_size; 2160 else 2161 total_mem = 64 * 1024; 2162 } else 2163 /* Allocate memory for ioctl data */ 2164 total_mem = BUF_SZ_4K; 2165 2166 dataout = kmalloc(total_mem, GFP_KERNEL); 2167 if (dataout == NULL) { 2168 rc = -ENOMEM; 2169 goto loopback_test_exit; 2170 } 2171 2172 ptr = dataout; 2173 ptr += ELX_LOOPBACK_HEADER_SZ; 2174 sg_copy_to_buffer(job->request_payload.sg_list, 2175 job->request_payload.sg_cnt, 2176 ptr, size); 2177 rc = lpfcdiag_loop_self_reg(phba, &rpi); 2178 if (rc) 2179 goto loopback_test_exit; 2180 2181 rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri); 2182 if (rc) { 2183 lpfcdiag_loop_self_unreg(phba, rpi); 2184 goto loopback_test_exit; 2185 } 2186 2187 rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size); 2188 if (rc) { 2189 lpfcdiag_loop_self_unreg(phba, rpi); 2190 goto loopback_test_exit; 2191 } 2192 2193 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid, 2194 SLI_CT_ELX_LOOPBACK); 2195 if (!evt) { 2196 lpfcdiag_loop_self_unreg(phba, rpi); 2197 rc = -ENOMEM; 2198 goto loopback_test_exit; 2199 } 2200 2201 spin_lock_irqsave(&phba->ct_ev_lock, flags); 2202 list_add(&evt->node, &phba->ct_ev_waiters); 2203 lpfc_bsg_event_ref(evt); 2204 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2205 2206 cmdiocbq = lpfc_sli_get_iocbq(phba); 2207 rspiocbq = lpfc_sli_get_iocbq(phba); 2208 txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2209 2210 if (txbmp) { 2211 txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys); 2212 if (txbmp->virt) { 2213 INIT_LIST_HEAD(&txbmp->list); 2214 txbpl = (struct ulp_bde64 *) txbmp->virt; 2215 txbuffer = diag_cmd_data_alloc(phba, 2216 txbpl, full_size, 0); 2217 } 2218 } 2219 2220 if (!cmdiocbq || !rspiocbq || !txbmp || !txbpl || !txbuffer || 2221 !txbmp->virt) { 2222 rc = -ENOMEM; 2223 goto err_loopback_test_exit; 2224 } 2225 2226 cmd = &cmdiocbq->iocb; 2227 rsp = &rspiocbq->iocb; 2228 2229 INIT_LIST_HEAD(&head); 2230 list_add_tail(&head, &txbuffer->dma.list); 2231 list_for_each_entry(curr, &head, list) { 2232 segment_len = ((struct lpfc_dmabufext *)curr)->size; 2233 if (current_offset == 0) { 2234 ctreq = curr->virt; 2235 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ); 2236 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION; 2237 ctreq->RevisionId.bits.InId = 0; 2238 ctreq->FsType = SLI_CT_ELX_LOOPBACK; 2239 ctreq->FsSubType = 0; 2240 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA; 2241 ctreq->CommandResponse.bits.Size = size; 2242 segment_offset = ELX_LOOPBACK_HEADER_SZ; 2243 } else 2244 segment_offset = 0; 2245 2246 BUG_ON(segment_offset >= segment_len); 2247 memcpy(curr->virt + segment_offset, 2248 ptr + current_offset, 2249 segment_len - segment_offset); 2250 2251 current_offset += segment_len - segment_offset; 2252 BUG_ON(current_offset > size); 2253 } 2254 list_del(&head); 2255 2256 /* Build the XMIT_SEQUENCE iocb */ 2257 2258 num_bde = (uint32_t)txbuffer->flag; 2259 2260 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys); 2261 cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys); 2262 cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 2263 cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64)); 2264 2265 cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA); 2266 cmd->un.xseq64.w5.hcsw.Dfctl = 0; 2267 cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL; 2268 cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT; 2269 2270 cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX; 2271 cmd->ulpBdeCount = 1; 2272 cmd->ulpLe = 1; 2273 cmd->ulpClass = CLASS3; 2274 cmd->ulpContext = txxri; 2275 2276 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 2277 cmdiocbq->vport = phba->pport; 2278 2279 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, 2280 rspiocbq, (phba->fc_ratov * 2) + 2281 LPFC_DRVR_TIMEOUT); 2282 2283 if ((iocb_stat != IOCB_SUCCESS) || (rsp->ulpStatus != IOCB_SUCCESS)) { 2284 rc = -EIO; 2285 goto err_loopback_test_exit; 2286 } 2287 2288 evt->waiting = 1; 2289 time_left = wait_event_interruptible_timeout( 2290 evt->wq, !list_empty(&evt->events_to_see), 2291 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ); 2292 evt->waiting = 0; 2293 if (list_empty(&evt->events_to_see)) 2294 rc = (time_left) ? -EINTR : -ETIMEDOUT; 2295 else { 2296 spin_lock_irqsave(&phba->ct_ev_lock, flags); 2297 list_move(evt->events_to_see.prev, &evt->events_to_get); 2298 evdat = list_entry(evt->events_to_get.prev, 2299 typeof(*evdat), node); 2300 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2301 rx_databuf = evdat->data; 2302 if (evdat->len != full_size) { 2303 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 2304 "1603 Loopback test did not receive expected " 2305 "data length. actual length 0x%x expected " 2306 "length 0x%x\n", 2307 evdat->len, full_size); 2308 rc = -EIO; 2309 } else if (rx_databuf == NULL) 2310 rc = -EIO; 2311 else { 2312 rc = IOCB_SUCCESS; 2313 /* skip over elx loopback header */ 2314 rx_databuf += ELX_LOOPBACK_HEADER_SZ; 2315 job->reply->reply_payload_rcv_len = 2316 sg_copy_from_buffer(job->reply_payload.sg_list, 2317 job->reply_payload.sg_cnt, 2318 rx_databuf, size); 2319 job->reply->reply_payload_rcv_len = size; 2320 } 2321 } 2322 2323 err_loopback_test_exit: 2324 lpfcdiag_loop_self_unreg(phba, rpi); 2325 2326 spin_lock_irqsave(&phba->ct_ev_lock, flags); 2327 lpfc_bsg_event_unref(evt); /* release ref */ 2328 lpfc_bsg_event_unref(evt); /* delete */ 2329 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2330 2331 if (cmdiocbq != NULL) 2332 lpfc_sli_release_iocbq(phba, cmdiocbq); 2333 2334 if (rspiocbq != NULL) 2335 lpfc_sli_release_iocbq(phba, rspiocbq); 2336 2337 if (txbmp != NULL) { 2338 if (txbpl != NULL) { 2339 if (txbuffer != NULL) 2340 diag_cmd_data_free(phba, txbuffer); 2341 lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys); 2342 } 2343 kfree(txbmp); 2344 } 2345 2346 loopback_test_exit: 2347 kfree(dataout); 2348 /* make error code available to userspace */ 2349 job->reply->result = rc; 2350 job->dd_data = NULL; 2351 /* complete the job back to userspace if no error */ 2352 if (rc == 0) 2353 job->job_done(job); 2354 return rc; 2355 } 2356 2357 /** 2358 * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command 2359 * @job: GET_DFC_REV fc_bsg_job 2360 **/ 2361 static int 2362 lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job) 2363 { 2364 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 2365 struct lpfc_hba *phba = vport->phba; 2366 struct get_mgmt_rev *event_req; 2367 struct get_mgmt_rev_reply *event_reply; 2368 int rc = 0; 2369 2370 if (job->request_len < 2371 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) { 2372 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2373 "2740 Received GET_DFC_REV request below " 2374 "minimum size\n"); 2375 rc = -EINVAL; 2376 goto job_error; 2377 } 2378 2379 event_req = (struct get_mgmt_rev *) 2380 job->request->rqst_data.h_vendor.vendor_cmd; 2381 2382 event_reply = (struct get_mgmt_rev_reply *) 2383 job->reply->reply_data.vendor_reply.vendor_rsp; 2384 2385 if (job->reply_len < 2386 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) { 2387 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2388 "2741 Received GET_DFC_REV reply below " 2389 "minimum size\n"); 2390 rc = -EINVAL; 2391 goto job_error; 2392 } 2393 2394 event_reply->info.a_Major = MANAGEMENT_MAJOR_REV; 2395 event_reply->info.a_Minor = MANAGEMENT_MINOR_REV; 2396 job_error: 2397 job->reply->result = rc; 2398 if (rc == 0) 2399 job->job_done(job); 2400 return rc; 2401 } 2402 2403 /** 2404 * lpfc_bsg_wake_mbox_wait - lpfc_bsg_issue_mbox mbox completion handler 2405 * @phba: Pointer to HBA context object. 2406 * @pmboxq: Pointer to mailbox command. 2407 * 2408 * This is completion handler function for mailbox commands issued from 2409 * lpfc_bsg_issue_mbox function. This function is called by the 2410 * mailbox event handler function with no lock held. This function 2411 * will wake up thread waiting on the wait queue pointed by context1 2412 * of the mailbox. 2413 **/ 2414 void 2415 lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 2416 { 2417 struct bsg_job_data *dd_data; 2418 struct fc_bsg_job *job; 2419 uint32_t size; 2420 unsigned long flags; 2421 uint8_t *to; 2422 uint8_t *from; 2423 2424 spin_lock_irqsave(&phba->ct_ev_lock, flags); 2425 dd_data = pmboxq->context1; 2426 /* job already timed out? */ 2427 if (!dd_data) { 2428 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2429 return; 2430 } 2431 2432 /* build the outgoing buffer to do an sg copy 2433 * the format is the response mailbox followed by any extended 2434 * mailbox data 2435 */ 2436 from = (uint8_t *)&pmboxq->u.mb; 2437 to = (uint8_t *)dd_data->context_un.mbox.mb; 2438 memcpy(to, from, sizeof(MAILBOX_t)); 2439 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) { 2440 /* copy the extended data if any, count is in words */ 2441 if (dd_data->context_un.mbox.outExtWLen) { 2442 from = (uint8_t *)dd_data->context_un.mbox.ext; 2443 to += sizeof(MAILBOX_t); 2444 size = dd_data->context_un.mbox.outExtWLen * 2445 sizeof(uint32_t); 2446 memcpy(to, from, size); 2447 } else if (pmboxq->u.mb.mbxCommand == MBX_RUN_BIU_DIAG64) { 2448 from = (uint8_t *)dd_data->context_un.mbox. 2449 dmp->dma.virt; 2450 to += sizeof(MAILBOX_t); 2451 size = dd_data->context_un.mbox.dmp->size; 2452 memcpy(to, from, size); 2453 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 2454 (pmboxq->u.mb.mbxCommand == MBX_DUMP_MEMORY)) { 2455 from = (uint8_t *)dd_data->context_un.mbox.dmp->dma. 2456 virt; 2457 to += sizeof(MAILBOX_t); 2458 size = pmboxq->u.mb.un.varWords[5]; 2459 memcpy(to, from, size); 2460 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 2461 (pmboxq->u.mb.mbxCommand == MBX_SLI4_CONFIG)) { 2462 struct lpfc_mbx_nembed_cmd *nembed_sge = 2463 (struct lpfc_mbx_nembed_cmd *) 2464 &pmboxq->u.mb.un.varWords[0]; 2465 2466 from = (uint8_t *)dd_data->context_un.mbox.dmp->dma. 2467 virt; 2468 to += sizeof(MAILBOX_t); 2469 size = nembed_sge->sge[0].length; 2470 memcpy(to, from, size); 2471 } else if (pmboxq->u.mb.mbxCommand == MBX_READ_EVENT_LOG) { 2472 from = (uint8_t *)dd_data->context_un. 2473 mbox.dmp->dma.virt; 2474 to += sizeof(MAILBOX_t); 2475 size = dd_data->context_un.mbox.dmp->size; 2476 memcpy(to, from, size); 2477 } 2478 } 2479 2480 from = (uint8_t *)dd_data->context_un.mbox.mb; 2481 job = dd_data->context_un.mbox.set_job; 2482 size = job->reply_payload.payload_len; 2483 job->reply->reply_payload_rcv_len = 2484 sg_copy_from_buffer(job->reply_payload.sg_list, 2485 job->reply_payload.sg_cnt, 2486 from, size); 2487 job->reply->result = 0; 2488 2489 dd_data->context_un.mbox.set_job = NULL; 2490 job->dd_data = NULL; 2491 job->job_done(job); 2492 /* need to hold the lock until we call job done to hold off 2493 * the timeout handler returning to the midlayer while 2494 * we are stillprocessing the job 2495 */ 2496 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2497 2498 kfree(dd_data->context_un.mbox.mb); 2499 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool); 2500 kfree(dd_data->context_un.mbox.ext); 2501 if (dd_data->context_un.mbox.dmp) { 2502 dma_free_coherent(&phba->pcidev->dev, 2503 dd_data->context_un.mbox.dmp->size, 2504 dd_data->context_un.mbox.dmp->dma.virt, 2505 dd_data->context_un.mbox.dmp->dma.phys); 2506 kfree(dd_data->context_un.mbox.dmp); 2507 } 2508 if (dd_data->context_un.mbox.rxbmp) { 2509 lpfc_mbuf_free(phba, dd_data->context_un.mbox.rxbmp->virt, 2510 dd_data->context_un.mbox.rxbmp->phys); 2511 kfree(dd_data->context_un.mbox.rxbmp); 2512 } 2513 kfree(dd_data); 2514 return; 2515 } 2516 2517 /** 2518 * lpfc_bsg_check_cmd_access - test for a supported mailbox command 2519 * @phba: Pointer to HBA context object. 2520 * @mb: Pointer to a mailbox object. 2521 * @vport: Pointer to a vport object. 2522 * 2523 * Some commands require the port to be offline, some may not be called from 2524 * the application. 2525 **/ 2526 static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba, 2527 MAILBOX_t *mb, struct lpfc_vport *vport) 2528 { 2529 /* return negative error values for bsg job */ 2530 switch (mb->mbxCommand) { 2531 /* Offline only */ 2532 case MBX_INIT_LINK: 2533 case MBX_DOWN_LINK: 2534 case MBX_CONFIG_LINK: 2535 case MBX_CONFIG_RING: 2536 case MBX_RESET_RING: 2537 case MBX_UNREG_LOGIN: 2538 case MBX_CLEAR_LA: 2539 case MBX_DUMP_CONTEXT: 2540 case MBX_RUN_DIAGS: 2541 case MBX_RESTART: 2542 case MBX_SET_MASK: 2543 if (!(vport->fc_flag & FC_OFFLINE_MODE)) { 2544 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2545 "2743 Command 0x%x is illegal in on-line " 2546 "state\n", 2547 mb->mbxCommand); 2548 return -EPERM; 2549 } 2550 case MBX_WRITE_NV: 2551 case MBX_WRITE_VPARMS: 2552 case MBX_LOAD_SM: 2553 case MBX_READ_NV: 2554 case MBX_READ_CONFIG: 2555 case MBX_READ_RCONFIG: 2556 case MBX_READ_STATUS: 2557 case MBX_READ_XRI: 2558 case MBX_READ_REV: 2559 case MBX_READ_LNK_STAT: 2560 case MBX_DUMP_MEMORY: 2561 case MBX_DOWN_LOAD: 2562 case MBX_UPDATE_CFG: 2563 case MBX_KILL_BOARD: 2564 case MBX_LOAD_AREA: 2565 case MBX_LOAD_EXP_ROM: 2566 case MBX_BEACON: 2567 case MBX_DEL_LD_ENTRY: 2568 case MBX_SET_DEBUG: 2569 case MBX_WRITE_WWN: 2570 case MBX_SLI4_CONFIG: 2571 case MBX_READ_EVENT_LOG: 2572 case MBX_READ_EVENT_LOG_STATUS: 2573 case MBX_WRITE_EVENT_LOG: 2574 case MBX_PORT_CAPABILITIES: 2575 case MBX_PORT_IOV_CONTROL: 2576 case MBX_RUN_BIU_DIAG64: 2577 break; 2578 case MBX_SET_VARIABLE: 2579 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2580 "1226 mbox: set_variable 0x%x, 0x%x\n", 2581 mb->un.varWords[0], 2582 mb->un.varWords[1]); 2583 if ((mb->un.varWords[0] == SETVAR_MLOMNT) 2584 && (mb->un.varWords[1] == 1)) { 2585 phba->wait_4_mlo_maint_flg = 1; 2586 } else if (mb->un.varWords[0] == SETVAR_MLORST) { 2587 phba->link_flag &= ~LS_LOOPBACK_MODE; 2588 phba->fc_topology = LPFC_TOPOLOGY_PT_PT; 2589 } 2590 break; 2591 case MBX_READ_SPARM64: 2592 case MBX_READ_TOPOLOGY: 2593 case MBX_REG_LOGIN: 2594 case MBX_REG_LOGIN64: 2595 case MBX_CONFIG_PORT: 2596 case MBX_RUN_BIU_DIAG: 2597 default: 2598 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2599 "2742 Unknown Command 0x%x\n", 2600 mb->mbxCommand); 2601 return -EPERM; 2602 } 2603 2604 return 0; /* ok */ 2605 } 2606 2607 /** 2608 * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app 2609 * @phba: Pointer to HBA context object. 2610 * @mb: Pointer to a mailbox object. 2611 * @vport: Pointer to a vport object. 2612 * 2613 * Allocate a tracking object, mailbox command memory, get a mailbox 2614 * from the mailbox pool, copy the caller mailbox command. 2615 * 2616 * If offline and the sli is active we need to poll for the command (port is 2617 * being reset) and com-plete the job, otherwise issue the mailbox command and 2618 * let our completion handler finish the command. 2619 **/ 2620 static uint32_t 2621 lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, 2622 struct lpfc_vport *vport) 2623 { 2624 LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */ 2625 MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */ 2626 /* a 4k buffer to hold the mb and extended data from/to the bsg */ 2627 MAILBOX_t *mb = NULL; 2628 struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */ 2629 uint32_t size; 2630 struct lpfc_dmabuf *rxbmp = NULL; /* for biu diag */ 2631 struct lpfc_dmabufext *dmp = NULL; /* for biu diag */ 2632 struct ulp_bde64 *rxbpl = NULL; 2633 struct dfc_mbox_req *mbox_req = (struct dfc_mbox_req *) 2634 job->request->rqst_data.h_vendor.vendor_cmd; 2635 uint8_t *ext = NULL; 2636 int rc = 0; 2637 uint8_t *from; 2638 2639 /* in case no data is transferred */ 2640 job->reply->reply_payload_rcv_len = 0; 2641 2642 /* check if requested extended data lengths are valid */ 2643 if ((mbox_req->inExtWLen > MAILBOX_EXT_SIZE) || 2644 (mbox_req->outExtWLen > MAILBOX_EXT_SIZE)) { 2645 rc = -ERANGE; 2646 goto job_done; 2647 } 2648 2649 /* allocate our bsg tracking structure */ 2650 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 2651 if (!dd_data) { 2652 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2653 "2727 Failed allocation of dd_data\n"); 2654 rc = -ENOMEM; 2655 goto job_done; 2656 } 2657 2658 mb = kzalloc(BSG_MBOX_SIZE, GFP_KERNEL); 2659 if (!mb) { 2660 rc = -ENOMEM; 2661 goto job_done; 2662 } 2663 2664 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2665 if (!pmboxq) { 2666 rc = -ENOMEM; 2667 goto job_done; 2668 } 2669 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 2670 2671 size = job->request_payload.payload_len; 2672 sg_copy_to_buffer(job->request_payload.sg_list, 2673 job->request_payload.sg_cnt, 2674 mb, size); 2675 2676 rc = lpfc_bsg_check_cmd_access(phba, mb, vport); 2677 if (rc != 0) 2678 goto job_done; /* must be negative */ 2679 2680 pmb = &pmboxq->u.mb; 2681 memcpy(pmb, mb, sizeof(*pmb)); 2682 pmb->mbxOwner = OWN_HOST; 2683 pmboxq->vport = vport; 2684 2685 /* If HBA encountered an error attention, allow only DUMP 2686 * or RESTART mailbox commands until the HBA is restarted. 2687 */ 2688 if (phba->pport->stopped && 2689 pmb->mbxCommand != MBX_DUMP_MEMORY && 2690 pmb->mbxCommand != MBX_RESTART && 2691 pmb->mbxCommand != MBX_WRITE_VPARMS && 2692 pmb->mbxCommand != MBX_WRITE_WWN) 2693 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 2694 "2797 mbox: Issued mailbox cmd " 2695 "0x%x while in stopped state.\n", 2696 pmb->mbxCommand); 2697 2698 /* Don't allow mailbox commands to be sent when blocked 2699 * or when in the middle of discovery 2700 */ 2701 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) { 2702 rc = -EAGAIN; 2703 goto job_done; 2704 } 2705 2706 /* extended mailbox commands will need an extended buffer */ 2707 if (mbox_req->inExtWLen || mbox_req->outExtWLen) { 2708 ext = kzalloc(MAILBOX_EXT_SIZE, GFP_KERNEL); 2709 if (!ext) { 2710 rc = -ENOMEM; 2711 goto job_done; 2712 } 2713 2714 /* any data for the device? */ 2715 if (mbox_req->inExtWLen) { 2716 from = (uint8_t *)mb; 2717 from += sizeof(MAILBOX_t); 2718 memcpy((uint8_t *)ext, from, 2719 mbox_req->inExtWLen * sizeof(uint32_t)); 2720 } 2721 2722 pmboxq->context2 = ext; 2723 pmboxq->in_ext_byte_len = 2724 mbox_req->inExtWLen * sizeof(uint32_t); 2725 pmboxq->out_ext_byte_len = 2726 mbox_req->outExtWLen * sizeof(uint32_t); 2727 pmboxq->mbox_offset_word = mbox_req->mbOffset; 2728 } 2729 2730 /* biu diag will need a kernel buffer to transfer the data 2731 * allocate our own buffer and setup the mailbox command to 2732 * use ours 2733 */ 2734 if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) { 2735 uint32_t transmit_length = pmb->un.varWords[1]; 2736 uint32_t receive_length = pmb->un.varWords[4]; 2737 /* transmit length cannot be greater than receive length or 2738 * mailbox extension size 2739 */ 2740 if ((transmit_length > receive_length) || 2741 (transmit_length > MAILBOX_EXT_SIZE)) { 2742 rc = -ERANGE; 2743 goto job_done; 2744 } 2745 2746 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2747 if (!rxbmp) { 2748 rc = -ENOMEM; 2749 goto job_done; 2750 } 2751 2752 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys); 2753 if (!rxbmp->virt) { 2754 rc = -ENOMEM; 2755 goto job_done; 2756 } 2757 2758 INIT_LIST_HEAD(&rxbmp->list); 2759 rxbpl = (struct ulp_bde64 *) rxbmp->virt; 2760 dmp = diag_cmd_data_alloc(phba, rxbpl, transmit_length, 0); 2761 if (!dmp) { 2762 rc = -ENOMEM; 2763 goto job_done; 2764 } 2765 2766 INIT_LIST_HEAD(&dmp->dma.list); 2767 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh = 2768 putPaddrHigh(dmp->dma.phys); 2769 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow = 2770 putPaddrLow(dmp->dma.phys); 2771 2772 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh = 2773 putPaddrHigh(dmp->dma.phys + 2774 pmb->un.varBIUdiag.un.s2. 2775 xmit_bde64.tus.f.bdeSize); 2776 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow = 2777 putPaddrLow(dmp->dma.phys + 2778 pmb->un.varBIUdiag.un.s2. 2779 xmit_bde64.tus.f.bdeSize); 2780 2781 /* copy the transmit data found in the mailbox extension area */ 2782 from = (uint8_t *)mb; 2783 from += sizeof(MAILBOX_t); 2784 memcpy((uint8_t *)dmp->dma.virt, from, transmit_length); 2785 } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) { 2786 struct READ_EVENT_LOG_VAR *rdEventLog = 2787 &pmb->un.varRdEventLog ; 2788 uint32_t receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize; 2789 uint32_t mode = bf_get(lpfc_event_log, rdEventLog); 2790 2791 /* receive length cannot be greater than mailbox 2792 * extension size 2793 */ 2794 if (receive_length > MAILBOX_EXT_SIZE) { 2795 rc = -ERANGE; 2796 goto job_done; 2797 } 2798 2799 /* mode zero uses a bde like biu diags command */ 2800 if (mode == 0) { 2801 2802 /* rebuild the command for sli4 using our own buffers 2803 * like we do for biu diags 2804 */ 2805 2806 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2807 if (!rxbmp) { 2808 rc = -ENOMEM; 2809 goto job_done; 2810 } 2811 2812 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys); 2813 rxbpl = (struct ulp_bde64 *) rxbmp->virt; 2814 if (rxbpl) { 2815 INIT_LIST_HEAD(&rxbmp->list); 2816 dmp = diag_cmd_data_alloc(phba, rxbpl, 2817 receive_length, 0); 2818 } 2819 2820 if (!dmp) { 2821 rc = -ENOMEM; 2822 goto job_done; 2823 } 2824 2825 INIT_LIST_HEAD(&dmp->dma.list); 2826 pmb->un.varWords[3] = putPaddrLow(dmp->dma.phys); 2827 pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys); 2828 } 2829 } else if (phba->sli_rev == LPFC_SLI_REV4) { 2830 if (pmb->mbxCommand == MBX_DUMP_MEMORY) { 2831 /* rebuild the command for sli4 using our own buffers 2832 * like we do for biu diags 2833 */ 2834 uint32_t receive_length = pmb->un.varWords[2]; 2835 /* receive length cannot be greater than mailbox 2836 * extension size 2837 */ 2838 if ((receive_length == 0) || 2839 (receive_length > MAILBOX_EXT_SIZE)) { 2840 rc = -ERANGE; 2841 goto job_done; 2842 } 2843 2844 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2845 if (!rxbmp) { 2846 rc = -ENOMEM; 2847 goto job_done; 2848 } 2849 2850 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys); 2851 if (!rxbmp->virt) { 2852 rc = -ENOMEM; 2853 goto job_done; 2854 } 2855 2856 INIT_LIST_HEAD(&rxbmp->list); 2857 rxbpl = (struct ulp_bde64 *) rxbmp->virt; 2858 dmp = diag_cmd_data_alloc(phba, rxbpl, receive_length, 2859 0); 2860 if (!dmp) { 2861 rc = -ENOMEM; 2862 goto job_done; 2863 } 2864 2865 INIT_LIST_HEAD(&dmp->dma.list); 2866 pmb->un.varWords[3] = putPaddrLow(dmp->dma.phys); 2867 pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys); 2868 } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) && 2869 pmb->un.varUpdateCfg.co) { 2870 struct ulp_bde64 *bde = 2871 (struct ulp_bde64 *)&pmb->un.varWords[4]; 2872 2873 /* bde size cannot be greater than mailbox ext size */ 2874 if (bde->tus.f.bdeSize > MAILBOX_EXT_SIZE) { 2875 rc = -ERANGE; 2876 goto job_done; 2877 } 2878 2879 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2880 if (!rxbmp) { 2881 rc = -ENOMEM; 2882 goto job_done; 2883 } 2884 2885 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys); 2886 if (!rxbmp->virt) { 2887 rc = -ENOMEM; 2888 goto job_done; 2889 } 2890 2891 INIT_LIST_HEAD(&rxbmp->list); 2892 rxbpl = (struct ulp_bde64 *) rxbmp->virt; 2893 dmp = diag_cmd_data_alloc(phba, rxbpl, 2894 bde->tus.f.bdeSize, 0); 2895 if (!dmp) { 2896 rc = -ENOMEM; 2897 goto job_done; 2898 } 2899 2900 INIT_LIST_HEAD(&dmp->dma.list); 2901 bde->addrHigh = putPaddrHigh(dmp->dma.phys); 2902 bde->addrLow = putPaddrLow(dmp->dma.phys); 2903 2904 /* copy the transmit data found in the mailbox 2905 * extension area 2906 */ 2907 from = (uint8_t *)mb; 2908 from += sizeof(MAILBOX_t); 2909 memcpy((uint8_t *)dmp->dma.virt, from, 2910 bde->tus.f.bdeSize); 2911 } else if (pmb->mbxCommand == MBX_SLI4_CONFIG) { 2912 struct lpfc_mbx_nembed_cmd *nembed_sge; 2913 struct mbox_header *header; 2914 uint32_t receive_length; 2915 2916 /* rebuild the command for sli4 using our own buffers 2917 * like we do for biu diags 2918 */ 2919 header = (struct mbox_header *)&pmb->un.varWords[0]; 2920 nembed_sge = (struct lpfc_mbx_nembed_cmd *) 2921 &pmb->un.varWords[0]; 2922 receive_length = nembed_sge->sge[0].length; 2923 2924 /* receive length cannot be greater than mailbox 2925 * extension size 2926 */ 2927 if ((receive_length == 0) || 2928 (receive_length > MAILBOX_EXT_SIZE)) { 2929 rc = -ERANGE; 2930 goto job_done; 2931 } 2932 2933 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2934 if (!rxbmp) { 2935 rc = -ENOMEM; 2936 goto job_done; 2937 } 2938 2939 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys); 2940 if (!rxbmp->virt) { 2941 rc = -ENOMEM; 2942 goto job_done; 2943 } 2944 2945 INIT_LIST_HEAD(&rxbmp->list); 2946 rxbpl = (struct ulp_bde64 *) rxbmp->virt; 2947 dmp = diag_cmd_data_alloc(phba, rxbpl, receive_length, 2948 0); 2949 if (!dmp) { 2950 rc = -ENOMEM; 2951 goto job_done; 2952 } 2953 2954 INIT_LIST_HEAD(&dmp->dma.list); 2955 nembed_sge->sge[0].pa_hi = putPaddrHigh(dmp->dma.phys); 2956 nembed_sge->sge[0].pa_lo = putPaddrLow(dmp->dma.phys); 2957 /* copy the transmit data found in the mailbox 2958 * extension area 2959 */ 2960 from = (uint8_t *)mb; 2961 from += sizeof(MAILBOX_t); 2962 memcpy((uint8_t *)dmp->dma.virt, from, 2963 header->cfg_mhdr.payload_length); 2964 } 2965 } 2966 2967 dd_data->context_un.mbox.rxbmp = rxbmp; 2968 dd_data->context_un.mbox.dmp = dmp; 2969 2970 /* setup wake call as IOCB callback */ 2971 pmboxq->mbox_cmpl = lpfc_bsg_wake_mbox_wait; 2972 2973 /* setup context field to pass wait_queue pointer to wake function */ 2974 pmboxq->context1 = dd_data; 2975 dd_data->type = TYPE_MBOX; 2976 dd_data->context_un.mbox.pmboxq = pmboxq; 2977 dd_data->context_un.mbox.mb = mb; 2978 dd_data->context_un.mbox.set_job = job; 2979 dd_data->context_un.mbox.ext = ext; 2980 dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset; 2981 dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen; 2982 dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen; 2983 job->dd_data = dd_data; 2984 2985 if ((vport->fc_flag & FC_OFFLINE_MODE) || 2986 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) { 2987 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 2988 if (rc != MBX_SUCCESS) { 2989 rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV; 2990 goto job_done; 2991 } 2992 2993 /* job finished, copy the data */ 2994 memcpy(mb, pmb, sizeof(*pmb)); 2995 job->reply->reply_payload_rcv_len = 2996 sg_copy_from_buffer(job->reply_payload.sg_list, 2997 job->reply_payload.sg_cnt, 2998 mb, size); 2999 /* not waiting mbox already done */ 3000 rc = 0; 3001 goto job_done; 3002 } 3003 3004 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 3005 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) 3006 return 1; /* job started */ 3007 3008 job_done: 3009 /* common exit for error or job completed inline */ 3010 kfree(mb); 3011 if (pmboxq) 3012 mempool_free(pmboxq, phba->mbox_mem_pool); 3013 kfree(ext); 3014 if (dmp) { 3015 dma_free_coherent(&phba->pcidev->dev, 3016 dmp->size, dmp->dma.virt, 3017 dmp->dma.phys); 3018 kfree(dmp); 3019 } 3020 if (rxbmp) { 3021 lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys); 3022 kfree(rxbmp); 3023 } 3024 kfree(dd_data); 3025 3026 return rc; 3027 } 3028 3029 /** 3030 * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command 3031 * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX. 3032 **/ 3033 static int 3034 lpfc_bsg_mbox_cmd(struct fc_bsg_job *job) 3035 { 3036 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 3037 struct lpfc_hba *phba = vport->phba; 3038 int rc = 0; 3039 3040 /* in case no data is transferred */ 3041 job->reply->reply_payload_rcv_len = 0; 3042 if (job->request_len < 3043 sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) { 3044 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3045 "2737 Received MBOX_REQ request below " 3046 "minimum size\n"); 3047 rc = -EINVAL; 3048 goto job_error; 3049 } 3050 3051 if (job->request_payload.payload_len != BSG_MBOX_SIZE) { 3052 rc = -EINVAL; 3053 goto job_error; 3054 } 3055 3056 if (job->reply_payload.payload_len != BSG_MBOX_SIZE) { 3057 rc = -EINVAL; 3058 goto job_error; 3059 } 3060 3061 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) { 3062 rc = -EAGAIN; 3063 goto job_error; 3064 } 3065 3066 rc = lpfc_bsg_issue_mbox(phba, job, vport); 3067 3068 job_error: 3069 if (rc == 0) { 3070 /* job done */ 3071 job->reply->result = 0; 3072 job->dd_data = NULL; 3073 job->job_done(job); 3074 } else if (rc == 1) 3075 /* job submitted, will complete later*/ 3076 rc = 0; /* return zero, no error */ 3077 else { 3078 /* some error occurred */ 3079 job->reply->result = rc; 3080 job->dd_data = NULL; 3081 } 3082 3083 return rc; 3084 } 3085 3086 /** 3087 * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler 3088 * @phba: Pointer to HBA context object. 3089 * @cmdiocbq: Pointer to command iocb. 3090 * @rspiocbq: Pointer to response iocb. 3091 * 3092 * This function is the completion handler for iocbs issued using 3093 * lpfc_menlo_cmd function. This function is called by the 3094 * ring event handler function without any lock held. This function 3095 * can be called from both worker thread context and interrupt 3096 * context. This function also can be called from another thread which 3097 * cleans up the SLI layer objects. 3098 * This function copies the contents of the response iocb to the 3099 * response iocb memory object provided by the caller of 3100 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 3101 * sleeps for the iocb completion. 3102 **/ 3103 static void 3104 lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba, 3105 struct lpfc_iocbq *cmdiocbq, 3106 struct lpfc_iocbq *rspiocbq) 3107 { 3108 struct bsg_job_data *dd_data; 3109 struct fc_bsg_job *job; 3110 IOCB_t *rsp; 3111 struct lpfc_dmabuf *bmp; 3112 struct lpfc_bsg_menlo *menlo; 3113 unsigned long flags; 3114 struct menlo_response *menlo_resp; 3115 int rc = 0; 3116 3117 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3118 dd_data = cmdiocbq->context1; 3119 if (!dd_data) { 3120 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3121 return; 3122 } 3123 3124 menlo = &dd_data->context_un.menlo; 3125 job = menlo->set_job; 3126 job->dd_data = NULL; /* so timeout handler does not reply */ 3127 3128 spin_lock(&phba->hbalock); 3129 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 3130 if (cmdiocbq->context2 && rspiocbq) 3131 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 3132 &rspiocbq->iocb, sizeof(IOCB_t)); 3133 spin_unlock(&phba->hbalock); 3134 3135 bmp = menlo->bmp; 3136 rspiocbq = menlo->rspiocbq; 3137 rsp = &rspiocbq->iocb; 3138 3139 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 3140 job->request_payload.sg_cnt, DMA_TO_DEVICE); 3141 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, 3142 job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 3143 3144 /* always return the xri, this would be used in the case 3145 * of a menlo download to allow the data to be sent as a continuation 3146 * of the exchange. 3147 */ 3148 menlo_resp = (struct menlo_response *) 3149 job->reply->reply_data.vendor_reply.vendor_rsp; 3150 menlo_resp->xri = rsp->ulpContext; 3151 if (rsp->ulpStatus) { 3152 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 3153 switch (rsp->un.ulpWord[4] & 0xff) { 3154 case IOERR_SEQUENCE_TIMEOUT: 3155 rc = -ETIMEDOUT; 3156 break; 3157 case IOERR_INVALID_RPI: 3158 rc = -EFAULT; 3159 break; 3160 default: 3161 rc = -EACCES; 3162 break; 3163 } 3164 } else 3165 rc = -EACCES; 3166 } else 3167 job->reply->reply_payload_rcv_len = 3168 rsp->un.genreq64.bdl.bdeSize; 3169 3170 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 3171 lpfc_sli_release_iocbq(phba, rspiocbq); 3172 lpfc_sli_release_iocbq(phba, cmdiocbq); 3173 kfree(bmp); 3174 kfree(dd_data); 3175 /* make error code available to userspace */ 3176 job->reply->result = rc; 3177 /* complete the job back to userspace */ 3178 job->job_done(job); 3179 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3180 return; 3181 } 3182 3183 /** 3184 * lpfc_menlo_cmd - send an ioctl for menlo hardware 3185 * @job: fc_bsg_job to handle 3186 * 3187 * This function issues a gen request 64 CR ioctl for all menlo cmd requests, 3188 * all the command completions will return the xri for the command. 3189 * For menlo data requests a gen request 64 CX is used to continue the exchange 3190 * supplied in the menlo request header xri field. 3191 **/ 3192 static int 3193 lpfc_menlo_cmd(struct fc_bsg_job *job) 3194 { 3195 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 3196 struct lpfc_hba *phba = vport->phba; 3197 struct lpfc_iocbq *cmdiocbq, *rspiocbq; 3198 IOCB_t *cmd, *rsp; 3199 int rc = 0; 3200 struct menlo_command *menlo_cmd; 3201 struct menlo_response *menlo_resp; 3202 struct lpfc_dmabuf *bmp = NULL; 3203 int request_nseg; 3204 int reply_nseg; 3205 struct scatterlist *sgel = NULL; 3206 int numbde; 3207 dma_addr_t busaddr; 3208 struct bsg_job_data *dd_data; 3209 struct ulp_bde64 *bpl = NULL; 3210 3211 /* in case no data is returned return just the return code */ 3212 job->reply->reply_payload_rcv_len = 0; 3213 3214 if (job->request_len < 3215 sizeof(struct fc_bsg_request) + 3216 sizeof(struct menlo_command)) { 3217 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3218 "2784 Received MENLO_CMD request below " 3219 "minimum size\n"); 3220 rc = -ERANGE; 3221 goto no_dd_data; 3222 } 3223 3224 if (job->reply_len < 3225 sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) { 3226 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3227 "2785 Received MENLO_CMD reply below " 3228 "minimum size\n"); 3229 rc = -ERANGE; 3230 goto no_dd_data; 3231 } 3232 3233 if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) { 3234 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3235 "2786 Adapter does not support menlo " 3236 "commands\n"); 3237 rc = -EPERM; 3238 goto no_dd_data; 3239 } 3240 3241 menlo_cmd = (struct menlo_command *) 3242 job->request->rqst_data.h_vendor.vendor_cmd; 3243 3244 menlo_resp = (struct menlo_response *) 3245 job->reply->reply_data.vendor_reply.vendor_rsp; 3246 3247 /* allocate our bsg tracking structure */ 3248 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 3249 if (!dd_data) { 3250 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3251 "2787 Failed allocation of dd_data\n"); 3252 rc = -ENOMEM; 3253 goto no_dd_data; 3254 } 3255 3256 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 3257 if (!bmp) { 3258 rc = -ENOMEM; 3259 goto free_dd; 3260 } 3261 3262 cmdiocbq = lpfc_sli_get_iocbq(phba); 3263 if (!cmdiocbq) { 3264 rc = -ENOMEM; 3265 goto free_bmp; 3266 } 3267 3268 rspiocbq = lpfc_sli_get_iocbq(phba); 3269 if (!rspiocbq) { 3270 rc = -ENOMEM; 3271 goto free_cmdiocbq; 3272 } 3273 3274 rsp = &rspiocbq->iocb; 3275 3276 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); 3277 if (!bmp->virt) { 3278 rc = -ENOMEM; 3279 goto free_rspiocbq; 3280 } 3281 3282 INIT_LIST_HEAD(&bmp->list); 3283 bpl = (struct ulp_bde64 *) bmp->virt; 3284 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, 3285 job->request_payload.sg_cnt, DMA_TO_DEVICE); 3286 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { 3287 busaddr = sg_dma_address(sgel); 3288 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 3289 bpl->tus.f.bdeSize = sg_dma_len(sgel); 3290 bpl->tus.w = cpu_to_le32(bpl->tus.w); 3291 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); 3292 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); 3293 bpl++; 3294 } 3295 3296 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list, 3297 job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 3298 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) { 3299 busaddr = sg_dma_address(sgel); 3300 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 3301 bpl->tus.f.bdeSize = sg_dma_len(sgel); 3302 bpl->tus.w = cpu_to_le32(bpl->tus.w); 3303 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); 3304 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); 3305 bpl++; 3306 } 3307 3308 cmd = &cmdiocbq->iocb; 3309 cmd->un.genreq64.bdl.ulpIoTag32 = 0; 3310 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); 3311 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys); 3312 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 3313 cmd->un.genreq64.bdl.bdeSize = 3314 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64); 3315 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); 3316 cmd->un.genreq64.w5.hcsw.Dfctl = 0; 3317 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD; 3318 cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */ 3319 cmd->ulpBdeCount = 1; 3320 cmd->ulpClass = CLASS3; 3321 cmd->ulpOwner = OWN_CHIP; 3322 cmd->ulpLe = 1; /* Limited Edition */ 3323 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 3324 cmdiocbq->vport = phba->pport; 3325 /* We want the firmware to timeout before we do */ 3326 cmd->ulpTimeout = MENLO_TIMEOUT - 5; 3327 cmdiocbq->context3 = bmp; 3328 cmdiocbq->context2 = rspiocbq; 3329 cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp; 3330 cmdiocbq->context1 = dd_data; 3331 cmdiocbq->context2 = rspiocbq; 3332 if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) { 3333 cmd->ulpCommand = CMD_GEN_REQUEST64_CR; 3334 cmd->ulpPU = MENLO_PU; /* 3 */ 3335 cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */ 3336 cmd->ulpContext = MENLO_CONTEXT; /* 0 */ 3337 } else { 3338 cmd->ulpCommand = CMD_GEN_REQUEST64_CX; 3339 cmd->ulpPU = 1; 3340 cmd->un.ulpWord[4] = 0; 3341 cmd->ulpContext = menlo_cmd->xri; 3342 } 3343 3344 dd_data->type = TYPE_MENLO; 3345 dd_data->context_un.menlo.cmdiocbq = cmdiocbq; 3346 dd_data->context_un.menlo.rspiocbq = rspiocbq; 3347 dd_data->context_un.menlo.set_job = job; 3348 dd_data->context_un.menlo.bmp = bmp; 3349 3350 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 3351 MENLO_TIMEOUT - 5); 3352 if (rc == IOCB_SUCCESS) 3353 return 0; /* done for now */ 3354 3355 /* iocb failed so cleanup */ 3356 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 3357 job->request_payload.sg_cnt, DMA_TO_DEVICE); 3358 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, 3359 job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 3360 3361 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 3362 3363 free_rspiocbq: 3364 lpfc_sli_release_iocbq(phba, rspiocbq); 3365 free_cmdiocbq: 3366 lpfc_sli_release_iocbq(phba, cmdiocbq); 3367 free_bmp: 3368 kfree(bmp); 3369 free_dd: 3370 kfree(dd_data); 3371 no_dd_data: 3372 /* make error code available to userspace */ 3373 job->reply->result = rc; 3374 job->dd_data = NULL; 3375 return rc; 3376 } 3377 /** 3378 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job 3379 * @job: fc_bsg_job to handle 3380 **/ 3381 static int 3382 lpfc_bsg_hst_vendor(struct fc_bsg_job *job) 3383 { 3384 int command = job->request->rqst_data.h_vendor.vendor_cmd[0]; 3385 int rc; 3386 3387 switch (command) { 3388 case LPFC_BSG_VENDOR_SET_CT_EVENT: 3389 rc = lpfc_bsg_hba_set_event(job); 3390 break; 3391 case LPFC_BSG_VENDOR_GET_CT_EVENT: 3392 rc = lpfc_bsg_hba_get_event(job); 3393 break; 3394 case LPFC_BSG_VENDOR_SEND_MGMT_RESP: 3395 rc = lpfc_bsg_send_mgmt_rsp(job); 3396 break; 3397 case LPFC_BSG_VENDOR_DIAG_MODE: 3398 rc = lpfc_bsg_diag_mode(job); 3399 break; 3400 case LPFC_BSG_VENDOR_DIAG_TEST: 3401 rc = lpfc_bsg_diag_test(job); 3402 break; 3403 case LPFC_BSG_VENDOR_GET_MGMT_REV: 3404 rc = lpfc_bsg_get_dfc_rev(job); 3405 break; 3406 case LPFC_BSG_VENDOR_MBOX: 3407 rc = lpfc_bsg_mbox_cmd(job); 3408 break; 3409 case LPFC_BSG_VENDOR_MENLO_CMD: 3410 case LPFC_BSG_VENDOR_MENLO_DATA: 3411 rc = lpfc_menlo_cmd(job); 3412 break; 3413 default: 3414 rc = -EINVAL; 3415 job->reply->reply_payload_rcv_len = 0; 3416 /* make error code available to userspace */ 3417 job->reply->result = rc; 3418 break; 3419 } 3420 3421 return rc; 3422 } 3423 3424 /** 3425 * lpfc_bsg_request - handle a bsg request from the FC transport 3426 * @job: fc_bsg_job to handle 3427 **/ 3428 int 3429 lpfc_bsg_request(struct fc_bsg_job *job) 3430 { 3431 uint32_t msgcode; 3432 int rc; 3433 3434 msgcode = job->request->msgcode; 3435 switch (msgcode) { 3436 case FC_BSG_HST_VENDOR: 3437 rc = lpfc_bsg_hst_vendor(job); 3438 break; 3439 case FC_BSG_RPT_ELS: 3440 rc = lpfc_bsg_rport_els(job); 3441 break; 3442 case FC_BSG_RPT_CT: 3443 rc = lpfc_bsg_send_mgmt_cmd(job); 3444 break; 3445 default: 3446 rc = -EINVAL; 3447 job->reply->reply_payload_rcv_len = 0; 3448 /* make error code available to userspace */ 3449 job->reply->result = rc; 3450 break; 3451 } 3452 3453 return rc; 3454 } 3455 3456 /** 3457 * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport 3458 * @job: fc_bsg_job that has timed out 3459 * 3460 * This function just aborts the job's IOCB. The aborted IOCB will return to 3461 * the waiting function which will handle passing the error back to userspace 3462 **/ 3463 int 3464 lpfc_bsg_timeout(struct fc_bsg_job *job) 3465 { 3466 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 3467 struct lpfc_hba *phba = vport->phba; 3468 struct lpfc_iocbq *cmdiocb; 3469 struct lpfc_bsg_event *evt; 3470 struct lpfc_bsg_iocb *iocb; 3471 struct lpfc_bsg_mbox *mbox; 3472 struct lpfc_bsg_menlo *menlo; 3473 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 3474 struct bsg_job_data *dd_data; 3475 unsigned long flags; 3476 3477 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3478 dd_data = (struct bsg_job_data *)job->dd_data; 3479 /* timeout and completion crossed paths if no dd_data */ 3480 if (!dd_data) { 3481 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3482 return 0; 3483 } 3484 3485 switch (dd_data->type) { 3486 case TYPE_IOCB: 3487 iocb = &dd_data->context_un.iocb; 3488 cmdiocb = iocb->cmdiocbq; 3489 /* hint to completion handler that the job timed out */ 3490 job->reply->result = -EAGAIN; 3491 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3492 /* this will call our completion handler */ 3493 spin_lock_irq(&phba->hbalock); 3494 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); 3495 spin_unlock_irq(&phba->hbalock); 3496 break; 3497 case TYPE_EVT: 3498 evt = dd_data->context_un.evt; 3499 /* this event has no job anymore */ 3500 evt->set_job = NULL; 3501 job->dd_data = NULL; 3502 job->reply->reply_payload_rcv_len = 0; 3503 /* Return -EAGAIN which is our way of signallying the 3504 * app to retry. 3505 */ 3506 job->reply->result = -EAGAIN; 3507 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3508 job->job_done(job); 3509 break; 3510 case TYPE_MBOX: 3511 mbox = &dd_data->context_un.mbox; 3512 /* this mbox has no job anymore */ 3513 mbox->set_job = NULL; 3514 job->dd_data = NULL; 3515 job->reply->reply_payload_rcv_len = 0; 3516 job->reply->result = -EAGAIN; 3517 /* the mbox completion handler can now be run */ 3518 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3519 job->job_done(job); 3520 break; 3521 case TYPE_MENLO: 3522 menlo = &dd_data->context_un.menlo; 3523 cmdiocb = menlo->cmdiocbq; 3524 /* hint to completion handler that the job timed out */ 3525 job->reply->result = -EAGAIN; 3526 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3527 /* this will call our completion handler */ 3528 spin_lock_irq(&phba->hbalock); 3529 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); 3530 spin_unlock_irq(&phba->hbalock); 3531 break; 3532 default: 3533 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3534 break; 3535 } 3536 3537 /* scsi transport fc fc_bsg_job_timeout expects a zero return code, 3538 * otherwise an error message will be displayed on the console 3539 * so always return success (zero) 3540 */ 3541 return 0; 3542 } 3543