1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2009-2010 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * * 8 * This program is free software; you can redistribute it and/or * 9 * modify it under the terms of version 2 of the GNU General * 10 * Public License as published by the Free Software Foundation. * 11 * This program is distributed in the hope that it will be useful. * 12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 17 * more details, a copy of which can be found in the file COPYING * 18 * included with this package. * 19 *******************************************************************/ 20 21 #include <linux/interrupt.h> 22 #include <linux/mempool.h> 23 #include <linux/pci.h> 24 #include <linux/slab.h> 25 #include <linux/delay.h> 26 27 #include <scsi/scsi.h> 28 #include <scsi/scsi_host.h> 29 #include <scsi/scsi_transport_fc.h> 30 #include <scsi/scsi_bsg_fc.h> 31 #include <scsi/fc/fc_fs.h> 32 33 #include "lpfc_hw4.h" 34 #include "lpfc_hw.h" 35 #include "lpfc_sli.h" 36 #include "lpfc_sli4.h" 37 #include "lpfc_nl.h" 38 #include "lpfc_bsg.h" 39 #include "lpfc_disc.h" 40 #include "lpfc_scsi.h" 41 #include "lpfc.h" 42 #include "lpfc_logmsg.h" 43 #include "lpfc_crtn.h" 44 #include "lpfc_vport.h" 45 #include "lpfc_version.h" 46 47 struct lpfc_bsg_event { 48 struct list_head node; 49 struct kref kref; 50 wait_queue_head_t wq; 51 52 /* Event type and waiter identifiers */ 53 uint32_t type_mask; 54 uint32_t req_id; 55 uint32_t reg_id; 56 57 /* next two flags are here for the auto-delete logic */ 58 unsigned long wait_time_stamp; 59 int waiting; 60 61 /* seen and not seen events */ 62 struct list_head events_to_get; 63 struct list_head events_to_see; 64 65 /* job waiting for this event to finish */ 66 struct fc_bsg_job *set_job; 67 }; 68 69 struct lpfc_bsg_iocb { 70 struct lpfc_iocbq *cmdiocbq; 71 struct lpfc_iocbq *rspiocbq; 72 struct lpfc_dmabuf *bmp; 73 struct lpfc_nodelist *ndlp; 74 75 /* job waiting for this iocb to finish */ 76 struct fc_bsg_job *set_job; 77 }; 78 79 struct lpfc_bsg_mbox { 80 LPFC_MBOXQ_t *pmboxq; 81 MAILBOX_t *mb; 82 struct lpfc_dmabuf *rxbmp; /* for BIU diags */ 83 struct lpfc_dmabufext *dmp; /* for BIU diags */ 84 uint8_t *ext; /* extended mailbox data */ 85 uint32_t mbOffset; /* from app */ 86 uint32_t inExtWLen; /* from app */ 87 uint32_t outExtWLen; /* from app */ 88 89 /* job waiting for this mbox command to finish */ 90 struct fc_bsg_job *set_job; 91 }; 92 93 #define MENLO_DID 0x0000FC0E 94 95 struct lpfc_bsg_menlo { 96 struct lpfc_iocbq *cmdiocbq; 97 struct lpfc_iocbq *rspiocbq; 98 struct lpfc_dmabuf *bmp; 99 100 /* job waiting for this iocb to finish */ 101 struct fc_bsg_job *set_job; 102 }; 103 104 #define TYPE_EVT 1 105 #define TYPE_IOCB 2 106 #define TYPE_MBOX 3 107 #define TYPE_MENLO 4 108 struct bsg_job_data { 109 uint32_t type; 110 union { 111 struct lpfc_bsg_event *evt; 112 struct lpfc_bsg_iocb iocb; 113 struct lpfc_bsg_mbox mbox; 114 struct lpfc_bsg_menlo menlo; 115 } context_un; 116 }; 117 118 struct event_data { 119 struct list_head node; 120 uint32_t type; 121 uint32_t immed_dat; 122 void *data; 123 uint32_t len; 124 }; 125 126 #define BUF_SZ_4K 4096 127 #define SLI_CT_ELX_LOOPBACK 0x10 128 129 enum ELX_LOOPBACK_CMD { 130 ELX_LOOPBACK_XRI_SETUP, 131 ELX_LOOPBACK_DATA, 132 }; 133 134 #define ELX_LOOPBACK_HEADER_SZ \ 135 (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un) 136 137 struct lpfc_dmabufext { 138 struct lpfc_dmabuf dma; 139 uint32_t size; 140 uint32_t flag; 141 }; 142 143 /** 144 * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler 145 * @phba: Pointer to HBA context object. 146 * @cmdiocbq: Pointer to command iocb. 147 * @rspiocbq: Pointer to response iocb. 148 * 149 * This function is the completion handler for iocbs issued using 150 * lpfc_bsg_send_mgmt_cmd function. This function is called by the 151 * ring event handler function without any lock held. This function 152 * can be called from both worker thread context and interrupt 153 * context. This function also can be called from another thread which 154 * cleans up the SLI layer objects. 155 * This function copies the contents of the response iocb to the 156 * response iocb memory object provided by the caller of 157 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 158 * sleeps for the iocb completion. 159 **/ 160 static void 161 lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba, 162 struct lpfc_iocbq *cmdiocbq, 163 struct lpfc_iocbq *rspiocbq) 164 { 165 unsigned long iflags; 166 struct bsg_job_data *dd_data; 167 struct fc_bsg_job *job; 168 IOCB_t *rsp; 169 struct lpfc_dmabuf *bmp; 170 struct lpfc_nodelist *ndlp; 171 struct lpfc_bsg_iocb *iocb; 172 unsigned long flags; 173 int rc = 0; 174 175 spin_lock_irqsave(&phba->ct_ev_lock, flags); 176 dd_data = cmdiocbq->context1; 177 if (!dd_data) { 178 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 179 return; 180 } 181 182 iocb = &dd_data->context_un.iocb; 183 job = iocb->set_job; 184 job->dd_data = NULL; /* so timeout handler does not reply */ 185 186 spin_lock_irqsave(&phba->hbalock, iflags); 187 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 188 if (cmdiocbq->context2 && rspiocbq) 189 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 190 &rspiocbq->iocb, sizeof(IOCB_t)); 191 spin_unlock_irqrestore(&phba->hbalock, iflags); 192 193 bmp = iocb->bmp; 194 rspiocbq = iocb->rspiocbq; 195 rsp = &rspiocbq->iocb; 196 ndlp = iocb->ndlp; 197 198 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 199 job->request_payload.sg_cnt, DMA_TO_DEVICE); 200 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, 201 job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 202 203 if (rsp->ulpStatus) { 204 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 205 switch (rsp->un.ulpWord[4] & 0xff) { 206 case IOERR_SEQUENCE_TIMEOUT: 207 rc = -ETIMEDOUT; 208 break; 209 case IOERR_INVALID_RPI: 210 rc = -EFAULT; 211 break; 212 default: 213 rc = -EACCES; 214 break; 215 } 216 } else 217 rc = -EACCES; 218 } else 219 job->reply->reply_payload_rcv_len = 220 rsp->un.genreq64.bdl.bdeSize; 221 222 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 223 lpfc_sli_release_iocbq(phba, rspiocbq); 224 lpfc_sli_release_iocbq(phba, cmdiocbq); 225 lpfc_nlp_put(ndlp); 226 kfree(bmp); 227 kfree(dd_data); 228 /* make error code available to userspace */ 229 job->reply->result = rc; 230 /* complete the job back to userspace */ 231 job->job_done(job); 232 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 233 return; 234 } 235 236 /** 237 * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request 238 * @job: fc_bsg_job to handle 239 **/ 240 static int 241 lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job) 242 { 243 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 244 struct lpfc_hba *phba = vport->phba; 245 struct lpfc_rport_data *rdata = job->rport->dd_data; 246 struct lpfc_nodelist *ndlp = rdata->pnode; 247 struct ulp_bde64 *bpl = NULL; 248 uint32_t timeout; 249 struct lpfc_iocbq *cmdiocbq = NULL; 250 struct lpfc_iocbq *rspiocbq = NULL; 251 IOCB_t *cmd; 252 IOCB_t *rsp; 253 struct lpfc_dmabuf *bmp = NULL; 254 int request_nseg; 255 int reply_nseg; 256 struct scatterlist *sgel = NULL; 257 int numbde; 258 dma_addr_t busaddr; 259 struct bsg_job_data *dd_data; 260 uint32_t creg_val; 261 int rc = 0; 262 int iocb_stat; 263 264 /* in case no data is transferred */ 265 job->reply->reply_payload_rcv_len = 0; 266 267 /* allocate our bsg tracking structure */ 268 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 269 if (!dd_data) { 270 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 271 "2733 Failed allocation of dd_data\n"); 272 rc = -ENOMEM; 273 goto no_dd_data; 274 } 275 276 if (!lpfc_nlp_get(ndlp)) { 277 rc = -ENODEV; 278 goto no_ndlp; 279 } 280 281 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 282 if (!bmp) { 283 rc = -ENOMEM; 284 goto free_ndlp; 285 } 286 287 if (ndlp->nlp_flag & NLP_ELS_SND_MASK) { 288 rc = -ENODEV; 289 goto free_bmp; 290 } 291 292 cmdiocbq = lpfc_sli_get_iocbq(phba); 293 if (!cmdiocbq) { 294 rc = -ENOMEM; 295 goto free_bmp; 296 } 297 298 cmd = &cmdiocbq->iocb; 299 rspiocbq = lpfc_sli_get_iocbq(phba); 300 if (!rspiocbq) { 301 rc = -ENOMEM; 302 goto free_cmdiocbq; 303 } 304 305 rsp = &rspiocbq->iocb; 306 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); 307 if (!bmp->virt) { 308 rc = -ENOMEM; 309 goto free_rspiocbq; 310 } 311 312 INIT_LIST_HEAD(&bmp->list); 313 bpl = (struct ulp_bde64 *) bmp->virt; 314 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, 315 job->request_payload.sg_cnt, DMA_TO_DEVICE); 316 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { 317 busaddr = sg_dma_address(sgel); 318 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 319 bpl->tus.f.bdeSize = sg_dma_len(sgel); 320 bpl->tus.w = cpu_to_le32(bpl->tus.w); 321 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); 322 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); 323 bpl++; 324 } 325 326 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list, 327 job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 328 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) { 329 busaddr = sg_dma_address(sgel); 330 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 331 bpl->tus.f.bdeSize = sg_dma_len(sgel); 332 bpl->tus.w = cpu_to_le32(bpl->tus.w); 333 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); 334 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); 335 bpl++; 336 } 337 338 cmd->un.genreq64.bdl.ulpIoTag32 = 0; 339 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); 340 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys); 341 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 342 cmd->un.genreq64.bdl.bdeSize = 343 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64); 344 cmd->ulpCommand = CMD_GEN_REQUEST64_CR; 345 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); 346 cmd->un.genreq64.w5.hcsw.Dfctl = 0; 347 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL; 348 cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT; 349 cmd->ulpBdeCount = 1; 350 cmd->ulpLe = 1; 351 cmd->ulpClass = CLASS3; 352 cmd->ulpContext = ndlp->nlp_rpi; 353 cmd->ulpOwner = OWN_CHIP; 354 cmdiocbq->vport = phba->pport; 355 cmdiocbq->context3 = bmp; 356 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 357 timeout = phba->fc_ratov * 2; 358 cmd->ulpTimeout = timeout; 359 360 cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp; 361 cmdiocbq->context1 = dd_data; 362 cmdiocbq->context2 = rspiocbq; 363 dd_data->type = TYPE_IOCB; 364 dd_data->context_un.iocb.cmdiocbq = cmdiocbq; 365 dd_data->context_un.iocb.rspiocbq = rspiocbq; 366 dd_data->context_un.iocb.set_job = job; 367 dd_data->context_un.iocb.bmp = bmp; 368 dd_data->context_un.iocb.ndlp = ndlp; 369 370 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 371 creg_val = readl(phba->HCregaddr); 372 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 373 writel(creg_val, phba->HCregaddr); 374 readl(phba->HCregaddr); /* flush */ 375 } 376 377 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); 378 if (iocb_stat == IOCB_SUCCESS) 379 return 0; /* done for now */ 380 else if (iocb_stat == IOCB_BUSY) 381 rc = -EAGAIN; 382 else 383 rc = -EIO; 384 385 386 /* iocb failed so cleanup */ 387 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 388 job->request_payload.sg_cnt, DMA_TO_DEVICE); 389 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, 390 job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 391 392 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 393 394 free_rspiocbq: 395 lpfc_sli_release_iocbq(phba, rspiocbq); 396 free_cmdiocbq: 397 lpfc_sli_release_iocbq(phba, cmdiocbq); 398 free_bmp: 399 kfree(bmp); 400 free_ndlp: 401 lpfc_nlp_put(ndlp); 402 no_ndlp: 403 kfree(dd_data); 404 no_dd_data: 405 /* make error code available to userspace */ 406 job->reply->result = rc; 407 job->dd_data = NULL; 408 return rc; 409 } 410 411 /** 412 * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler 413 * @phba: Pointer to HBA context object. 414 * @cmdiocbq: Pointer to command iocb. 415 * @rspiocbq: Pointer to response iocb. 416 * 417 * This function is the completion handler for iocbs issued using 418 * lpfc_bsg_rport_els_cmp function. This function is called by the 419 * ring event handler function without any lock held. This function 420 * can be called from both worker thread context and interrupt 421 * context. This function also can be called from other thread which 422 * cleans up the SLI layer objects. 423 * This function copies the contents of the response iocb to the 424 * response iocb memory object provided by the caller of 425 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 426 * sleeps for the iocb completion. 427 **/ 428 static void 429 lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba, 430 struct lpfc_iocbq *cmdiocbq, 431 struct lpfc_iocbq *rspiocbq) 432 { 433 struct bsg_job_data *dd_data; 434 struct fc_bsg_job *job; 435 IOCB_t *rsp; 436 struct lpfc_nodelist *ndlp; 437 struct lpfc_dmabuf *pbuflist = NULL; 438 struct fc_bsg_ctels_reply *els_reply; 439 uint8_t *rjt_data; 440 unsigned long flags; 441 int rc = 0; 442 443 spin_lock_irqsave(&phba->ct_ev_lock, flags); 444 dd_data = cmdiocbq->context1; 445 /* normal completion and timeout crossed paths, already done */ 446 if (!dd_data) { 447 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 448 return; 449 } 450 451 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 452 if (cmdiocbq->context2 && rspiocbq) 453 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 454 &rspiocbq->iocb, sizeof(IOCB_t)); 455 456 job = dd_data->context_un.iocb.set_job; 457 cmdiocbq = dd_data->context_un.iocb.cmdiocbq; 458 rspiocbq = dd_data->context_un.iocb.rspiocbq; 459 rsp = &rspiocbq->iocb; 460 ndlp = dd_data->context_un.iocb.ndlp; 461 462 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 463 job->request_payload.sg_cnt, DMA_TO_DEVICE); 464 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, 465 job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 466 467 if (job->reply->result == -EAGAIN) 468 rc = -EAGAIN; 469 else if (rsp->ulpStatus == IOSTAT_SUCCESS) 470 job->reply->reply_payload_rcv_len = 471 rsp->un.elsreq64.bdl.bdeSize; 472 else if (rsp->ulpStatus == IOSTAT_LS_RJT) { 473 job->reply->reply_payload_rcv_len = 474 sizeof(struct fc_bsg_ctels_reply); 475 /* LS_RJT data returned in word 4 */ 476 rjt_data = (uint8_t *)&rsp->un.ulpWord[4]; 477 els_reply = &job->reply->reply_data.ctels_reply; 478 els_reply->status = FC_CTELS_STATUS_REJECT; 479 els_reply->rjt_data.action = rjt_data[3]; 480 els_reply->rjt_data.reason_code = rjt_data[2]; 481 els_reply->rjt_data.reason_explanation = rjt_data[1]; 482 els_reply->rjt_data.vendor_unique = rjt_data[0]; 483 } else 484 rc = -EIO; 485 486 pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3; 487 lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys); 488 lpfc_sli_release_iocbq(phba, rspiocbq); 489 lpfc_sli_release_iocbq(phba, cmdiocbq); 490 lpfc_nlp_put(ndlp); 491 kfree(dd_data); 492 /* make error code available to userspace */ 493 job->reply->result = rc; 494 job->dd_data = NULL; 495 /* complete the job back to userspace */ 496 job->job_done(job); 497 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 498 return; 499 } 500 501 /** 502 * lpfc_bsg_rport_els - send an ELS command from a bsg request 503 * @job: fc_bsg_job to handle 504 **/ 505 static int 506 lpfc_bsg_rport_els(struct fc_bsg_job *job) 507 { 508 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 509 struct lpfc_hba *phba = vport->phba; 510 struct lpfc_rport_data *rdata = job->rport->dd_data; 511 struct lpfc_nodelist *ndlp = rdata->pnode; 512 uint32_t elscmd; 513 uint32_t cmdsize; 514 uint32_t rspsize; 515 struct lpfc_iocbq *rspiocbq; 516 struct lpfc_iocbq *cmdiocbq; 517 IOCB_t *rsp; 518 uint16_t rpi = 0; 519 struct lpfc_dmabuf *pcmd; 520 struct lpfc_dmabuf *prsp; 521 struct lpfc_dmabuf *pbuflist = NULL; 522 struct ulp_bde64 *bpl; 523 int request_nseg; 524 int reply_nseg; 525 struct scatterlist *sgel = NULL; 526 int numbde; 527 dma_addr_t busaddr; 528 struct bsg_job_data *dd_data; 529 uint32_t creg_val; 530 int rc = 0; 531 532 /* in case no data is transferred */ 533 job->reply->reply_payload_rcv_len = 0; 534 535 /* allocate our bsg tracking structure */ 536 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 537 if (!dd_data) { 538 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 539 "2735 Failed allocation of dd_data\n"); 540 rc = -ENOMEM; 541 goto no_dd_data; 542 } 543 544 if (!lpfc_nlp_get(ndlp)) { 545 rc = -ENODEV; 546 goto free_dd_data; 547 } 548 549 elscmd = job->request->rqst_data.r_els.els_code; 550 cmdsize = job->request_payload.payload_len; 551 rspsize = job->reply_payload.payload_len; 552 rspiocbq = lpfc_sli_get_iocbq(phba); 553 if (!rspiocbq) { 554 lpfc_nlp_put(ndlp); 555 rc = -ENOMEM; 556 goto free_dd_data; 557 } 558 559 rsp = &rspiocbq->iocb; 560 rpi = ndlp->nlp_rpi; 561 562 cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, 563 ndlp->nlp_DID, elscmd); 564 if (!cmdiocbq) { 565 rc = -EIO; 566 goto free_rspiocbq; 567 } 568 569 /* prep els iocb set context1 to the ndlp, context2 to the command 570 * dmabuf, context3 holds the data dmabuf 571 */ 572 pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2; 573 prsp = (struct lpfc_dmabuf *) pcmd->list.next; 574 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 575 kfree(pcmd); 576 lpfc_mbuf_free(phba, prsp->virt, prsp->phys); 577 kfree(prsp); 578 cmdiocbq->context2 = NULL; 579 580 pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3; 581 bpl = (struct ulp_bde64 *) pbuflist->virt; 582 583 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, 584 job->request_payload.sg_cnt, DMA_TO_DEVICE); 585 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { 586 busaddr = sg_dma_address(sgel); 587 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 588 bpl->tus.f.bdeSize = sg_dma_len(sgel); 589 bpl->tus.w = cpu_to_le32(bpl->tus.w); 590 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); 591 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); 592 bpl++; 593 } 594 595 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list, 596 job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 597 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) { 598 busaddr = sg_dma_address(sgel); 599 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 600 bpl->tus.f.bdeSize = sg_dma_len(sgel); 601 bpl->tus.w = cpu_to_le32(bpl->tus.w); 602 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); 603 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); 604 bpl++; 605 } 606 cmdiocbq->iocb.un.elsreq64.bdl.bdeSize = 607 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64); 608 cmdiocbq->iocb.ulpContext = rpi; 609 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 610 cmdiocbq->context1 = NULL; 611 cmdiocbq->context2 = NULL; 612 613 cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp; 614 cmdiocbq->context1 = dd_data; 615 cmdiocbq->context2 = rspiocbq; 616 dd_data->type = TYPE_IOCB; 617 dd_data->context_un.iocb.cmdiocbq = cmdiocbq; 618 dd_data->context_un.iocb.rspiocbq = rspiocbq; 619 dd_data->context_un.iocb.set_job = job; 620 dd_data->context_un.iocb.bmp = NULL;; 621 dd_data->context_un.iocb.ndlp = ndlp; 622 623 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 624 creg_val = readl(phba->HCregaddr); 625 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 626 writel(creg_val, phba->HCregaddr); 627 readl(phba->HCregaddr); /* flush */ 628 } 629 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); 630 lpfc_nlp_put(ndlp); 631 if (rc == IOCB_SUCCESS) 632 return 0; /* done for now */ 633 else if (rc == IOCB_BUSY) 634 rc = -EAGAIN; 635 else 636 rc = -EIO; 637 638 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 639 job->request_payload.sg_cnt, DMA_TO_DEVICE); 640 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, 641 job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 642 643 lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys); 644 645 lpfc_sli_release_iocbq(phba, cmdiocbq); 646 647 free_rspiocbq: 648 lpfc_sli_release_iocbq(phba, rspiocbq); 649 650 free_dd_data: 651 kfree(dd_data); 652 653 no_dd_data: 654 /* make error code available to userspace */ 655 job->reply->result = rc; 656 job->dd_data = NULL; 657 return rc; 658 } 659 660 /** 661 * lpfc_bsg_event_free - frees an allocated event structure 662 * @kref: Pointer to a kref. 663 * 664 * Called from kref_put. Back cast the kref into an event structure address. 665 * Free any events to get, delete associated nodes, free any events to see, 666 * free any data then free the event itself. 667 **/ 668 static void 669 lpfc_bsg_event_free(struct kref *kref) 670 { 671 struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event, 672 kref); 673 struct event_data *ed; 674 675 list_del(&evt->node); 676 677 while (!list_empty(&evt->events_to_get)) { 678 ed = list_entry(evt->events_to_get.next, typeof(*ed), node); 679 list_del(&ed->node); 680 kfree(ed->data); 681 kfree(ed); 682 } 683 684 while (!list_empty(&evt->events_to_see)) { 685 ed = list_entry(evt->events_to_see.next, typeof(*ed), node); 686 list_del(&ed->node); 687 kfree(ed->data); 688 kfree(ed); 689 } 690 691 kfree(evt); 692 } 693 694 /** 695 * lpfc_bsg_event_ref - increments the kref for an event 696 * @evt: Pointer to an event structure. 697 **/ 698 static inline void 699 lpfc_bsg_event_ref(struct lpfc_bsg_event *evt) 700 { 701 kref_get(&evt->kref); 702 } 703 704 /** 705 * lpfc_bsg_event_unref - Uses kref_put to free an event structure 706 * @evt: Pointer to an event structure. 707 **/ 708 static inline void 709 lpfc_bsg_event_unref(struct lpfc_bsg_event *evt) 710 { 711 kref_put(&evt->kref, lpfc_bsg_event_free); 712 } 713 714 /** 715 * lpfc_bsg_event_new - allocate and initialize a event structure 716 * @ev_mask: Mask of events. 717 * @ev_reg_id: Event reg id. 718 * @ev_req_id: Event request id. 719 **/ 720 static struct lpfc_bsg_event * 721 lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id) 722 { 723 struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL); 724 725 if (!evt) 726 return NULL; 727 728 INIT_LIST_HEAD(&evt->events_to_get); 729 INIT_LIST_HEAD(&evt->events_to_see); 730 evt->type_mask = ev_mask; 731 evt->req_id = ev_req_id; 732 evt->reg_id = ev_reg_id; 733 evt->wait_time_stamp = jiffies; 734 init_waitqueue_head(&evt->wq); 735 kref_init(&evt->kref); 736 return evt; 737 } 738 739 /** 740 * diag_cmd_data_free - Frees an lpfc dma buffer extension 741 * @phba: Pointer to HBA context object. 742 * @mlist: Pointer to an lpfc dma buffer extension. 743 **/ 744 static int 745 diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist) 746 { 747 struct lpfc_dmabufext *mlast; 748 struct pci_dev *pcidev; 749 struct list_head head, *curr, *next; 750 751 if ((!mlist) || (!lpfc_is_link_up(phba) && 752 (phba->link_flag & LS_LOOPBACK_MODE))) { 753 return 0; 754 } 755 756 pcidev = phba->pcidev; 757 list_add_tail(&head, &mlist->dma.list); 758 759 list_for_each_safe(curr, next, &head) { 760 mlast = list_entry(curr, struct lpfc_dmabufext , dma.list); 761 if (mlast->dma.virt) 762 dma_free_coherent(&pcidev->dev, 763 mlast->size, 764 mlast->dma.virt, 765 mlast->dma.phys); 766 kfree(mlast); 767 } 768 return 0; 769 } 770 771 /** 772 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command 773 * @phba: 774 * @pring: 775 * @piocbq: 776 * 777 * This function is called when an unsolicited CT command is received. It 778 * forwards the event to any processes registered to receive CT events. 779 **/ 780 int 781 lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 782 struct lpfc_iocbq *piocbq) 783 { 784 uint32_t evt_req_id = 0; 785 uint32_t cmd; 786 uint32_t len; 787 struct lpfc_dmabuf *dmabuf = NULL; 788 struct lpfc_bsg_event *evt; 789 struct event_data *evt_dat = NULL; 790 struct lpfc_iocbq *iocbq; 791 size_t offset = 0; 792 struct list_head head; 793 struct ulp_bde64 *bde; 794 dma_addr_t dma_addr; 795 int i; 796 struct lpfc_dmabuf *bdeBuf1 = piocbq->context2; 797 struct lpfc_dmabuf *bdeBuf2 = piocbq->context3; 798 struct lpfc_hbq_entry *hbqe; 799 struct lpfc_sli_ct_request *ct_req; 800 struct fc_bsg_job *job = NULL; 801 unsigned long flags; 802 int size = 0; 803 804 INIT_LIST_HEAD(&head); 805 list_add_tail(&head, &piocbq->list); 806 807 if (piocbq->iocb.ulpBdeCount == 0 || 808 piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0) 809 goto error_ct_unsol_exit; 810 811 if (phba->link_state == LPFC_HBA_ERROR || 812 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) 813 goto error_ct_unsol_exit; 814 815 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 816 dmabuf = bdeBuf1; 817 else { 818 dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh, 819 piocbq->iocb.un.cont64[0].addrLow); 820 dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr); 821 } 822 if (dmabuf == NULL) 823 goto error_ct_unsol_exit; 824 ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt; 825 evt_req_id = ct_req->FsType; 826 cmd = ct_req->CommandResponse.bits.CmdRsp; 827 len = ct_req->CommandResponse.bits.Size; 828 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 829 lpfc_sli_ringpostbuf_put(phba, pring, dmabuf); 830 831 spin_lock_irqsave(&phba->ct_ev_lock, flags); 832 list_for_each_entry(evt, &phba->ct_ev_waiters, node) { 833 if (!(evt->type_mask & FC_REG_CT_EVENT) || 834 evt->req_id != evt_req_id) 835 continue; 836 837 lpfc_bsg_event_ref(evt); 838 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 839 evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL); 840 if (evt_dat == NULL) { 841 spin_lock_irqsave(&phba->ct_ev_lock, flags); 842 lpfc_bsg_event_unref(evt); 843 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 844 "2614 Memory allocation failed for " 845 "CT event\n"); 846 break; 847 } 848 849 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 850 /* take accumulated byte count from the last iocbq */ 851 iocbq = list_entry(head.prev, typeof(*iocbq), list); 852 evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len; 853 } else { 854 list_for_each_entry(iocbq, &head, list) { 855 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) 856 evt_dat->len += 857 iocbq->iocb.un.cont64[i].tus.f.bdeSize; 858 } 859 } 860 861 evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL); 862 if (evt_dat->data == NULL) { 863 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 864 "2615 Memory allocation failed for " 865 "CT event data, size %d\n", 866 evt_dat->len); 867 kfree(evt_dat); 868 spin_lock_irqsave(&phba->ct_ev_lock, flags); 869 lpfc_bsg_event_unref(evt); 870 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 871 goto error_ct_unsol_exit; 872 } 873 874 list_for_each_entry(iocbq, &head, list) { 875 size = 0; 876 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 877 bdeBuf1 = iocbq->context2; 878 bdeBuf2 = iocbq->context3; 879 } 880 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) { 881 if (phba->sli3_options & 882 LPFC_SLI3_HBQ_ENABLED) { 883 if (i == 0) { 884 hbqe = (struct lpfc_hbq_entry *) 885 &iocbq->iocb.un.ulpWord[0]; 886 size = hbqe->bde.tus.f.bdeSize; 887 dmabuf = bdeBuf1; 888 } else if (i == 1) { 889 hbqe = (struct lpfc_hbq_entry *) 890 &iocbq->iocb.unsli3. 891 sli3Words[4]; 892 size = hbqe->bde.tus.f.bdeSize; 893 dmabuf = bdeBuf2; 894 } 895 if ((offset + size) > evt_dat->len) 896 size = evt_dat->len - offset; 897 } else { 898 size = iocbq->iocb.un.cont64[i]. 899 tus.f.bdeSize; 900 bde = &iocbq->iocb.un.cont64[i]; 901 dma_addr = getPaddr(bde->addrHigh, 902 bde->addrLow); 903 dmabuf = lpfc_sli_ringpostbuf_get(phba, 904 pring, dma_addr); 905 } 906 if (!dmabuf) { 907 lpfc_printf_log(phba, KERN_ERR, 908 LOG_LIBDFC, "2616 No dmabuf " 909 "found for iocbq 0x%p\n", 910 iocbq); 911 kfree(evt_dat->data); 912 kfree(evt_dat); 913 spin_lock_irqsave(&phba->ct_ev_lock, 914 flags); 915 lpfc_bsg_event_unref(evt); 916 spin_unlock_irqrestore( 917 &phba->ct_ev_lock, flags); 918 goto error_ct_unsol_exit; 919 } 920 memcpy((char *)(evt_dat->data) + offset, 921 dmabuf->virt, size); 922 offset += size; 923 if (evt_req_id != SLI_CT_ELX_LOOPBACK && 924 !(phba->sli3_options & 925 LPFC_SLI3_HBQ_ENABLED)) { 926 lpfc_sli_ringpostbuf_put(phba, pring, 927 dmabuf); 928 } else { 929 switch (cmd) { 930 case ELX_LOOPBACK_DATA: 931 diag_cmd_data_free(phba, 932 (struct lpfc_dmabufext *) 933 dmabuf); 934 break; 935 case ELX_LOOPBACK_XRI_SETUP: 936 if ((phba->sli_rev == 937 LPFC_SLI_REV2) || 938 (phba->sli3_options & 939 LPFC_SLI3_HBQ_ENABLED 940 )) { 941 lpfc_in_buf_free(phba, 942 dmabuf); 943 } else { 944 lpfc_post_buffer(phba, 945 pring, 946 1); 947 } 948 break; 949 default: 950 if (!(phba->sli3_options & 951 LPFC_SLI3_HBQ_ENABLED)) 952 lpfc_post_buffer(phba, 953 pring, 954 1); 955 break; 956 } 957 } 958 } 959 } 960 961 spin_lock_irqsave(&phba->ct_ev_lock, flags); 962 if (phba->sli_rev == LPFC_SLI_REV4) { 963 evt_dat->immed_dat = phba->ctx_idx; 964 phba->ctx_idx = (phba->ctx_idx + 1) % 64; 965 /* Provide warning for over-run of the ct_ctx array */ 966 if (phba->ct_ctx[evt_dat->immed_dat].flags & 967 UNSOL_VALID) 968 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 969 "2717 CT context array entry " 970 "[%d] over-run: oxid:x%x, " 971 "sid:x%x\n", phba->ctx_idx, 972 phba->ct_ctx[ 973 evt_dat->immed_dat].oxid, 974 phba->ct_ctx[ 975 evt_dat->immed_dat].SID); 976 phba->ct_ctx[evt_dat->immed_dat].oxid = 977 piocbq->iocb.ulpContext; 978 phba->ct_ctx[evt_dat->immed_dat].SID = 979 piocbq->iocb.un.rcvels.remoteID; 980 phba->ct_ctx[evt_dat->immed_dat].flags = UNSOL_VALID; 981 } else 982 evt_dat->immed_dat = piocbq->iocb.ulpContext; 983 984 evt_dat->type = FC_REG_CT_EVENT; 985 list_add(&evt_dat->node, &evt->events_to_see); 986 if (evt_req_id == SLI_CT_ELX_LOOPBACK) { 987 wake_up_interruptible(&evt->wq); 988 lpfc_bsg_event_unref(evt); 989 break; 990 } 991 992 list_move(evt->events_to_see.prev, &evt->events_to_get); 993 lpfc_bsg_event_unref(evt); 994 995 job = evt->set_job; 996 evt->set_job = NULL; 997 if (job) { 998 job->reply->reply_payload_rcv_len = size; 999 /* make error code available to userspace */ 1000 job->reply->result = 0; 1001 job->dd_data = NULL; 1002 /* complete the job back to userspace */ 1003 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1004 job->job_done(job); 1005 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1006 } 1007 } 1008 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1009 1010 error_ct_unsol_exit: 1011 if (!list_empty(&head)) 1012 list_del(&head); 1013 if (evt_req_id == SLI_CT_ELX_LOOPBACK) 1014 return 0; 1015 return 1; 1016 } 1017 1018 /** 1019 * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command 1020 * @job: SET_EVENT fc_bsg_job 1021 **/ 1022 static int 1023 lpfc_bsg_hba_set_event(struct fc_bsg_job *job) 1024 { 1025 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 1026 struct lpfc_hba *phba = vport->phba; 1027 struct set_ct_event *event_req; 1028 struct lpfc_bsg_event *evt; 1029 int rc = 0; 1030 struct bsg_job_data *dd_data = NULL; 1031 uint32_t ev_mask; 1032 unsigned long flags; 1033 1034 if (job->request_len < 1035 sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) { 1036 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1037 "2612 Received SET_CT_EVENT below minimum " 1038 "size\n"); 1039 rc = -EINVAL; 1040 goto job_error; 1041 } 1042 1043 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 1044 if (dd_data == NULL) { 1045 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1046 "2734 Failed allocation of dd_data\n"); 1047 rc = -ENOMEM; 1048 goto job_error; 1049 } 1050 1051 event_req = (struct set_ct_event *) 1052 job->request->rqst_data.h_vendor.vendor_cmd; 1053 ev_mask = ((uint32_t)(unsigned long)event_req->type_mask & 1054 FC_REG_EVENT_MASK); 1055 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1056 list_for_each_entry(evt, &phba->ct_ev_waiters, node) { 1057 if (evt->reg_id == event_req->ev_reg_id) { 1058 lpfc_bsg_event_ref(evt); 1059 evt->wait_time_stamp = jiffies; 1060 break; 1061 } 1062 } 1063 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1064 1065 if (&evt->node == &phba->ct_ev_waiters) { 1066 /* no event waiting struct yet - first call */ 1067 evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id, 1068 event_req->ev_req_id); 1069 if (!evt) { 1070 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1071 "2617 Failed allocation of event " 1072 "waiter\n"); 1073 rc = -ENOMEM; 1074 goto job_error; 1075 } 1076 1077 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1078 list_add(&evt->node, &phba->ct_ev_waiters); 1079 lpfc_bsg_event_ref(evt); 1080 evt->wait_time_stamp = jiffies; 1081 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1082 } 1083 1084 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1085 evt->waiting = 1; 1086 dd_data->type = TYPE_EVT; 1087 dd_data->context_un.evt = evt; 1088 evt->set_job = job; /* for unsolicited command */ 1089 job->dd_data = dd_data; /* for fc transport timeout callback*/ 1090 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1091 return 0; /* call job done later */ 1092 1093 job_error: 1094 if (dd_data != NULL) 1095 kfree(dd_data); 1096 1097 job->dd_data = NULL; 1098 return rc; 1099 } 1100 1101 /** 1102 * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command 1103 * @job: GET_EVENT fc_bsg_job 1104 **/ 1105 static int 1106 lpfc_bsg_hba_get_event(struct fc_bsg_job *job) 1107 { 1108 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 1109 struct lpfc_hba *phba = vport->phba; 1110 struct get_ct_event *event_req; 1111 struct get_ct_event_reply *event_reply; 1112 struct lpfc_bsg_event *evt; 1113 struct event_data *evt_dat = NULL; 1114 unsigned long flags; 1115 uint32_t rc = 0; 1116 1117 if (job->request_len < 1118 sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) { 1119 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1120 "2613 Received GET_CT_EVENT request below " 1121 "minimum size\n"); 1122 rc = -EINVAL; 1123 goto job_error; 1124 } 1125 1126 event_req = (struct get_ct_event *) 1127 job->request->rqst_data.h_vendor.vendor_cmd; 1128 1129 event_reply = (struct get_ct_event_reply *) 1130 job->reply->reply_data.vendor_reply.vendor_rsp; 1131 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1132 list_for_each_entry(evt, &phba->ct_ev_waiters, node) { 1133 if (evt->reg_id == event_req->ev_reg_id) { 1134 if (list_empty(&evt->events_to_get)) 1135 break; 1136 lpfc_bsg_event_ref(evt); 1137 evt->wait_time_stamp = jiffies; 1138 evt_dat = list_entry(evt->events_to_get.prev, 1139 struct event_data, node); 1140 list_del(&evt_dat->node); 1141 break; 1142 } 1143 } 1144 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1145 1146 /* The app may continue to ask for event data until it gets 1147 * an error indicating that there isn't anymore 1148 */ 1149 if (evt_dat == NULL) { 1150 job->reply->reply_payload_rcv_len = 0; 1151 rc = -ENOENT; 1152 goto job_error; 1153 } 1154 1155 if (evt_dat->len > job->request_payload.payload_len) { 1156 evt_dat->len = job->request_payload.payload_len; 1157 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1158 "2618 Truncated event data at %d " 1159 "bytes\n", 1160 job->request_payload.payload_len); 1161 } 1162 1163 event_reply->type = evt_dat->type; 1164 event_reply->immed_data = evt_dat->immed_dat; 1165 if (evt_dat->len > 0) 1166 job->reply->reply_payload_rcv_len = 1167 sg_copy_from_buffer(job->request_payload.sg_list, 1168 job->request_payload.sg_cnt, 1169 evt_dat->data, evt_dat->len); 1170 else 1171 job->reply->reply_payload_rcv_len = 0; 1172 1173 if (evt_dat) { 1174 kfree(evt_dat->data); 1175 kfree(evt_dat); 1176 } 1177 1178 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1179 lpfc_bsg_event_unref(evt); 1180 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1181 job->dd_data = NULL; 1182 job->reply->result = 0; 1183 job->job_done(job); 1184 return 0; 1185 1186 job_error: 1187 job->dd_data = NULL; 1188 job->reply->result = rc; 1189 return rc; 1190 } 1191 1192 /** 1193 * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler 1194 * @phba: Pointer to HBA context object. 1195 * @cmdiocbq: Pointer to command iocb. 1196 * @rspiocbq: Pointer to response iocb. 1197 * 1198 * This function is the completion handler for iocbs issued using 1199 * lpfc_issue_ct_rsp_cmp function. This function is called by the 1200 * ring event handler function without any lock held. This function 1201 * can be called from both worker thread context and interrupt 1202 * context. This function also can be called from other thread which 1203 * cleans up the SLI layer objects. 1204 * This function copy the contents of the response iocb to the 1205 * response iocb memory object provided by the caller of 1206 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 1207 * sleeps for the iocb completion. 1208 **/ 1209 static void 1210 lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba, 1211 struct lpfc_iocbq *cmdiocbq, 1212 struct lpfc_iocbq *rspiocbq) 1213 { 1214 struct bsg_job_data *dd_data; 1215 struct fc_bsg_job *job; 1216 IOCB_t *rsp; 1217 struct lpfc_dmabuf *bmp; 1218 struct lpfc_nodelist *ndlp; 1219 unsigned long flags; 1220 int rc = 0; 1221 1222 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1223 dd_data = cmdiocbq->context1; 1224 /* normal completion and timeout crossed paths, already done */ 1225 if (!dd_data) { 1226 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1227 return; 1228 } 1229 1230 job = dd_data->context_un.iocb.set_job; 1231 bmp = dd_data->context_un.iocb.bmp; 1232 rsp = &rspiocbq->iocb; 1233 ndlp = dd_data->context_un.iocb.ndlp; 1234 1235 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 1236 job->request_payload.sg_cnt, DMA_TO_DEVICE); 1237 1238 if (rsp->ulpStatus) { 1239 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 1240 switch (rsp->un.ulpWord[4] & 0xff) { 1241 case IOERR_SEQUENCE_TIMEOUT: 1242 rc = -ETIMEDOUT; 1243 break; 1244 case IOERR_INVALID_RPI: 1245 rc = -EFAULT; 1246 break; 1247 default: 1248 rc = -EACCES; 1249 break; 1250 } 1251 } else 1252 rc = -EACCES; 1253 } else 1254 job->reply->reply_payload_rcv_len = 1255 rsp->un.genreq64.bdl.bdeSize; 1256 1257 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 1258 lpfc_sli_release_iocbq(phba, cmdiocbq); 1259 lpfc_nlp_put(ndlp); 1260 kfree(bmp); 1261 kfree(dd_data); 1262 /* make error code available to userspace */ 1263 job->reply->result = rc; 1264 job->dd_data = NULL; 1265 /* complete the job back to userspace */ 1266 job->job_done(job); 1267 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1268 return; 1269 } 1270 1271 /** 1272 * lpfc_issue_ct_rsp - issue a ct response 1273 * @phba: Pointer to HBA context object. 1274 * @job: Pointer to the job object. 1275 * @tag: tag index value into the ports context exchange array. 1276 * @bmp: Pointer to a dma buffer descriptor. 1277 * @num_entry: Number of enties in the bde. 1278 **/ 1279 static int 1280 lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, 1281 struct lpfc_dmabuf *bmp, int num_entry) 1282 { 1283 IOCB_t *icmd; 1284 struct lpfc_iocbq *ctiocb = NULL; 1285 int rc = 0; 1286 struct lpfc_nodelist *ndlp = NULL; 1287 struct bsg_job_data *dd_data; 1288 uint32_t creg_val; 1289 1290 /* allocate our bsg tracking structure */ 1291 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 1292 if (!dd_data) { 1293 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1294 "2736 Failed allocation of dd_data\n"); 1295 rc = -ENOMEM; 1296 goto no_dd_data; 1297 } 1298 1299 /* Allocate buffer for command iocb */ 1300 ctiocb = lpfc_sli_get_iocbq(phba); 1301 if (!ctiocb) { 1302 rc = -ENOMEM; 1303 goto no_ctiocb; 1304 } 1305 1306 icmd = &ctiocb->iocb; 1307 icmd->un.xseq64.bdl.ulpIoTag32 = 0; 1308 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys); 1309 icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys); 1310 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 1311 icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64)); 1312 icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA); 1313 icmd->un.xseq64.w5.hcsw.Dfctl = 0; 1314 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL; 1315 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT; 1316 1317 /* Fill in rest of iocb */ 1318 icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX; 1319 icmd->ulpBdeCount = 1; 1320 icmd->ulpLe = 1; 1321 icmd->ulpClass = CLASS3; 1322 if (phba->sli_rev == LPFC_SLI_REV4) { 1323 /* Do not issue unsol response if oxid not marked as valid */ 1324 if (!(phba->ct_ctx[tag].flags & UNSOL_VALID)) { 1325 rc = IOCB_ERROR; 1326 goto issue_ct_rsp_exit; 1327 } 1328 icmd->ulpContext = phba->ct_ctx[tag].oxid; 1329 ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID); 1330 if (!ndlp) { 1331 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 1332 "2721 ndlp null for oxid %x SID %x\n", 1333 icmd->ulpContext, 1334 phba->ct_ctx[tag].SID); 1335 rc = IOCB_ERROR; 1336 goto issue_ct_rsp_exit; 1337 } 1338 1339 /* Check if the ndlp is active */ 1340 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 1341 rc = -IOCB_ERROR; 1342 goto issue_ct_rsp_exit; 1343 } 1344 1345 /* get a refernece count so the ndlp doesn't go away while 1346 * we respond 1347 */ 1348 if (!lpfc_nlp_get(ndlp)) { 1349 rc = -IOCB_ERROR; 1350 goto issue_ct_rsp_exit; 1351 } 1352 1353 icmd->un.ulpWord[3] = ndlp->nlp_rpi; 1354 /* The exchange is done, mark the entry as invalid */ 1355 phba->ct_ctx[tag].flags &= ~UNSOL_VALID; 1356 } else 1357 icmd->ulpContext = (ushort) tag; 1358 1359 icmd->ulpTimeout = phba->fc_ratov * 2; 1360 1361 /* Xmit CT response on exchange <xid> */ 1362 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1363 "2722 Xmit CT response on exchange x%x Data: x%x x%x\n", 1364 icmd->ulpContext, icmd->ulpIoTag, phba->link_state); 1365 1366 ctiocb->iocb_cmpl = NULL; 1367 ctiocb->iocb_flag |= LPFC_IO_LIBDFC; 1368 ctiocb->vport = phba->pport; 1369 ctiocb->context3 = bmp; 1370 1371 ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp; 1372 ctiocb->context1 = dd_data; 1373 ctiocb->context2 = NULL; 1374 dd_data->type = TYPE_IOCB; 1375 dd_data->context_un.iocb.cmdiocbq = ctiocb; 1376 dd_data->context_un.iocb.rspiocbq = NULL; 1377 dd_data->context_un.iocb.set_job = job; 1378 dd_data->context_un.iocb.bmp = bmp; 1379 dd_data->context_un.iocb.ndlp = ndlp; 1380 1381 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 1382 creg_val = readl(phba->HCregaddr); 1383 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 1384 writel(creg_val, phba->HCregaddr); 1385 readl(phba->HCregaddr); /* flush */ 1386 } 1387 1388 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); 1389 1390 if (rc == IOCB_SUCCESS) 1391 return 0; /* done for now */ 1392 1393 issue_ct_rsp_exit: 1394 lpfc_sli_release_iocbq(phba, ctiocb); 1395 no_ctiocb: 1396 kfree(dd_data); 1397 no_dd_data: 1398 return rc; 1399 } 1400 1401 /** 1402 * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command 1403 * @job: SEND_MGMT_RESP fc_bsg_job 1404 **/ 1405 static int 1406 lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job) 1407 { 1408 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 1409 struct lpfc_hba *phba = vport->phba; 1410 struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *) 1411 job->request->rqst_data.h_vendor.vendor_cmd; 1412 struct ulp_bde64 *bpl; 1413 struct lpfc_dmabuf *bmp = NULL; 1414 struct scatterlist *sgel = NULL; 1415 int request_nseg; 1416 int numbde; 1417 dma_addr_t busaddr; 1418 uint32_t tag = mgmt_resp->tag; 1419 unsigned long reqbfrcnt = 1420 (unsigned long)job->request_payload.payload_len; 1421 int rc = 0; 1422 1423 /* in case no data is transferred */ 1424 job->reply->reply_payload_rcv_len = 0; 1425 1426 if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) { 1427 rc = -ERANGE; 1428 goto send_mgmt_rsp_exit; 1429 } 1430 1431 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1432 if (!bmp) { 1433 rc = -ENOMEM; 1434 goto send_mgmt_rsp_exit; 1435 } 1436 1437 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); 1438 if (!bmp->virt) { 1439 rc = -ENOMEM; 1440 goto send_mgmt_rsp_free_bmp; 1441 } 1442 1443 INIT_LIST_HEAD(&bmp->list); 1444 bpl = (struct ulp_bde64 *) bmp->virt; 1445 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, 1446 job->request_payload.sg_cnt, DMA_TO_DEVICE); 1447 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { 1448 busaddr = sg_dma_address(sgel); 1449 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1450 bpl->tus.f.bdeSize = sg_dma_len(sgel); 1451 bpl->tus.w = cpu_to_le32(bpl->tus.w); 1452 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); 1453 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); 1454 bpl++; 1455 } 1456 1457 rc = lpfc_issue_ct_rsp(phba, job, tag, bmp, request_nseg); 1458 1459 if (rc == IOCB_SUCCESS) 1460 return 0; /* done for now */ 1461 1462 /* TBD need to handle a timeout */ 1463 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 1464 job->request_payload.sg_cnt, DMA_TO_DEVICE); 1465 rc = -EACCES; 1466 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 1467 1468 send_mgmt_rsp_free_bmp: 1469 kfree(bmp); 1470 send_mgmt_rsp_exit: 1471 /* make error code available to userspace */ 1472 job->reply->result = rc; 1473 job->dd_data = NULL; 1474 return rc; 1475 } 1476 1477 /** 1478 * lpfc_bsg_diag_mode - process a LPFC_BSG_VENDOR_DIAG_MODE bsg vendor command 1479 * @job: LPFC_BSG_VENDOR_DIAG_MODE 1480 * 1481 * This function is responsible for placing a port into diagnostic loopback 1482 * mode in order to perform a diagnostic loopback test. 1483 * All new scsi requests are blocked, a small delay is used to allow the 1484 * scsi requests to complete then the link is brought down. If the link is 1485 * is placed in loopback mode then scsi requests are again allowed 1486 * so the scsi mid-layer doesn't give up on the port. 1487 * All of this is done in-line. 1488 */ 1489 static int 1490 lpfc_bsg_diag_mode(struct fc_bsg_job *job) 1491 { 1492 struct Scsi_Host *shost = job->shost; 1493 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 1494 struct lpfc_hba *phba = vport->phba; 1495 struct diag_mode_set *loopback_mode; 1496 struct lpfc_sli *psli = &phba->sli; 1497 struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING]; 1498 uint32_t link_flags; 1499 uint32_t timeout; 1500 struct lpfc_vport **vports; 1501 LPFC_MBOXQ_t *pmboxq; 1502 int mbxstatus; 1503 int i = 0; 1504 int rc = 0; 1505 1506 /* no data to return just the return code */ 1507 job->reply->reply_payload_rcv_len = 0; 1508 1509 if (job->request_len < 1510 sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_set)) { 1511 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1512 "2738 Received DIAG MODE request below minimum " 1513 "size\n"); 1514 rc = -EINVAL; 1515 goto job_error; 1516 } 1517 1518 loopback_mode = (struct diag_mode_set *) 1519 job->request->rqst_data.h_vendor.vendor_cmd; 1520 link_flags = loopback_mode->type; 1521 timeout = loopback_mode->timeout * 100; 1522 1523 if ((phba->link_state == LPFC_HBA_ERROR) || 1524 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) || 1525 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) { 1526 rc = -EACCES; 1527 goto job_error; 1528 } 1529 1530 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1531 if (!pmboxq) { 1532 rc = -ENOMEM; 1533 goto job_error; 1534 } 1535 1536 vports = lpfc_create_vport_work_array(phba); 1537 if (vports) { 1538 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 1539 shost = lpfc_shost_from_vport(vports[i]); 1540 scsi_block_requests(shost); 1541 } 1542 1543 lpfc_destroy_vport_work_array(phba, vports); 1544 } else { 1545 shost = lpfc_shost_from_vport(phba->pport); 1546 scsi_block_requests(shost); 1547 } 1548 1549 while (pring->txcmplq_cnt) { 1550 if (i++ > 500) /* wait up to 5 seconds */ 1551 break; 1552 1553 msleep(10); 1554 } 1555 1556 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 1557 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK; 1558 pmboxq->u.mb.mbxOwner = OWN_HOST; 1559 1560 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO); 1561 1562 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) { 1563 /* wait for link down before proceeding */ 1564 i = 0; 1565 while (phba->link_state != LPFC_LINK_DOWN) { 1566 if (i++ > timeout) { 1567 rc = -ETIMEDOUT; 1568 goto loopback_mode_exit; 1569 } 1570 1571 msleep(10); 1572 } 1573 1574 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 1575 if (link_flags == INTERNAL_LOOP_BACK) 1576 pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB; 1577 else 1578 pmboxq->u.mb.un.varInitLnk.link_flags = 1579 FLAGS_TOPOLOGY_MODE_LOOP; 1580 1581 pmboxq->u.mb.mbxCommand = MBX_INIT_LINK; 1582 pmboxq->u.mb.mbxOwner = OWN_HOST; 1583 1584 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, 1585 LPFC_MBOX_TMO); 1586 1587 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) 1588 rc = -ENODEV; 1589 else { 1590 phba->link_flag |= LS_LOOPBACK_MODE; 1591 /* wait for the link attention interrupt */ 1592 msleep(100); 1593 1594 i = 0; 1595 while (phba->link_state != LPFC_HBA_READY) { 1596 if (i++ > timeout) { 1597 rc = -ETIMEDOUT; 1598 break; 1599 } 1600 1601 msleep(10); 1602 } 1603 } 1604 1605 } else 1606 rc = -ENODEV; 1607 1608 loopback_mode_exit: 1609 vports = lpfc_create_vport_work_array(phba); 1610 if (vports) { 1611 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 1612 shost = lpfc_shost_from_vport(vports[i]); 1613 scsi_unblock_requests(shost); 1614 } 1615 lpfc_destroy_vport_work_array(phba, vports); 1616 } else { 1617 shost = lpfc_shost_from_vport(phba->pport); 1618 scsi_unblock_requests(shost); 1619 } 1620 1621 /* 1622 * Let SLI layer release mboxq if mbox command completed after timeout. 1623 */ 1624 if (mbxstatus != MBX_TIMEOUT) 1625 mempool_free(pmboxq, phba->mbox_mem_pool); 1626 1627 job_error: 1628 /* make error code available to userspace */ 1629 job->reply->result = rc; 1630 /* complete the job back to userspace if no error */ 1631 if (rc == 0) 1632 job->job_done(job); 1633 return rc; 1634 } 1635 1636 /** 1637 * lpfcdiag_loop_self_reg - obtains a remote port login id 1638 * @phba: Pointer to HBA context object 1639 * @rpi: Pointer to a remote port login id 1640 * 1641 * This function obtains a remote port login id so the diag loopback test 1642 * can send and receive its own unsolicited CT command. 1643 **/ 1644 static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t * rpi) 1645 { 1646 LPFC_MBOXQ_t *mbox; 1647 struct lpfc_dmabuf *dmabuff; 1648 int status; 1649 1650 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1651 if (!mbox) 1652 return -ENOMEM; 1653 1654 status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID, 1655 (uint8_t *)&phba->pport->fc_sparam, mbox, 0); 1656 if (status) { 1657 mempool_free(mbox, phba->mbox_mem_pool); 1658 return -ENOMEM; 1659 } 1660 1661 dmabuff = (struct lpfc_dmabuf *) mbox->context1; 1662 mbox->context1 = NULL; 1663 mbox->context2 = NULL; 1664 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 1665 1666 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) { 1667 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys); 1668 kfree(dmabuff); 1669 if (status != MBX_TIMEOUT) 1670 mempool_free(mbox, phba->mbox_mem_pool); 1671 return -ENODEV; 1672 } 1673 1674 *rpi = mbox->u.mb.un.varWords[0]; 1675 1676 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys); 1677 kfree(dmabuff); 1678 mempool_free(mbox, phba->mbox_mem_pool); 1679 return 0; 1680 } 1681 1682 /** 1683 * lpfcdiag_loop_self_unreg - unregs from the rpi 1684 * @phba: Pointer to HBA context object 1685 * @rpi: Remote port login id 1686 * 1687 * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg 1688 **/ 1689 static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi) 1690 { 1691 LPFC_MBOXQ_t *mbox; 1692 int status; 1693 1694 /* Allocate mboxq structure */ 1695 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1696 if (mbox == NULL) 1697 return -ENOMEM; 1698 1699 lpfc_unreg_login(phba, 0, rpi, mbox); 1700 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 1701 1702 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) { 1703 if (status != MBX_TIMEOUT) 1704 mempool_free(mbox, phba->mbox_mem_pool); 1705 return -EIO; 1706 } 1707 1708 mempool_free(mbox, phba->mbox_mem_pool); 1709 return 0; 1710 } 1711 1712 /** 1713 * lpfcdiag_loop_get_xri - obtains the transmit and receive ids 1714 * @phba: Pointer to HBA context object 1715 * @rpi: Remote port login id 1716 * @txxri: Pointer to transmit exchange id 1717 * @rxxri: Pointer to response exchabge id 1718 * 1719 * This function obtains the transmit and receive ids required to send 1720 * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp 1721 * flags are used to the unsolicted response handler is able to process 1722 * the ct command sent on the same port. 1723 **/ 1724 static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, 1725 uint16_t *txxri, uint16_t * rxxri) 1726 { 1727 struct lpfc_bsg_event *evt; 1728 struct lpfc_iocbq *cmdiocbq, *rspiocbq; 1729 IOCB_t *cmd, *rsp; 1730 struct lpfc_dmabuf *dmabuf; 1731 struct ulp_bde64 *bpl = NULL; 1732 struct lpfc_sli_ct_request *ctreq = NULL; 1733 int ret_val = 0; 1734 int time_left; 1735 int iocb_stat = 0; 1736 unsigned long flags; 1737 1738 *txxri = 0; 1739 *rxxri = 0; 1740 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid, 1741 SLI_CT_ELX_LOOPBACK); 1742 if (!evt) 1743 return -ENOMEM; 1744 1745 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1746 list_add(&evt->node, &phba->ct_ev_waiters); 1747 lpfc_bsg_event_ref(evt); 1748 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1749 1750 cmdiocbq = lpfc_sli_get_iocbq(phba); 1751 rspiocbq = lpfc_sli_get_iocbq(phba); 1752 1753 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1754 if (dmabuf) { 1755 dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys); 1756 if (dmabuf->virt) { 1757 INIT_LIST_HEAD(&dmabuf->list); 1758 bpl = (struct ulp_bde64 *) dmabuf->virt; 1759 memset(bpl, 0, sizeof(*bpl)); 1760 ctreq = (struct lpfc_sli_ct_request *)(bpl + 1); 1761 bpl->addrHigh = 1762 le32_to_cpu(putPaddrHigh(dmabuf->phys + 1763 sizeof(*bpl))); 1764 bpl->addrLow = 1765 le32_to_cpu(putPaddrLow(dmabuf->phys + 1766 sizeof(*bpl))); 1767 bpl->tus.f.bdeFlags = 0; 1768 bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ; 1769 bpl->tus.w = le32_to_cpu(bpl->tus.w); 1770 } 1771 } 1772 1773 if (cmdiocbq == NULL || rspiocbq == NULL || 1774 dmabuf == NULL || bpl == NULL || ctreq == NULL || 1775 dmabuf->virt == NULL) { 1776 ret_val = -ENOMEM; 1777 goto err_get_xri_exit; 1778 } 1779 1780 cmd = &cmdiocbq->iocb; 1781 rsp = &rspiocbq->iocb; 1782 1783 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ); 1784 1785 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION; 1786 ctreq->RevisionId.bits.InId = 0; 1787 ctreq->FsType = SLI_CT_ELX_LOOPBACK; 1788 ctreq->FsSubType = 0; 1789 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP; 1790 ctreq->CommandResponse.bits.Size = 0; 1791 1792 1793 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys); 1794 cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys); 1795 cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 1796 cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl); 1797 1798 cmd->un.xseq64.w5.hcsw.Fctl = LA; 1799 cmd->un.xseq64.w5.hcsw.Dfctl = 0; 1800 cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL; 1801 cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT; 1802 1803 cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR; 1804 cmd->ulpBdeCount = 1; 1805 cmd->ulpLe = 1; 1806 cmd->ulpClass = CLASS3; 1807 cmd->ulpContext = rpi; 1808 1809 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 1810 cmdiocbq->vport = phba->pport; 1811 1812 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, 1813 rspiocbq, 1814 (phba->fc_ratov * 2) 1815 + LPFC_DRVR_TIMEOUT); 1816 if (iocb_stat) { 1817 ret_val = -EIO; 1818 goto err_get_xri_exit; 1819 } 1820 *txxri = rsp->ulpContext; 1821 1822 evt->waiting = 1; 1823 evt->wait_time_stamp = jiffies; 1824 time_left = wait_event_interruptible_timeout( 1825 evt->wq, !list_empty(&evt->events_to_see), 1826 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ); 1827 if (list_empty(&evt->events_to_see)) 1828 ret_val = (time_left) ? -EINTR : -ETIMEDOUT; 1829 else { 1830 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1831 list_move(evt->events_to_see.prev, &evt->events_to_get); 1832 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1833 *rxxri = (list_entry(evt->events_to_get.prev, 1834 typeof(struct event_data), 1835 node))->immed_dat; 1836 } 1837 evt->waiting = 0; 1838 1839 err_get_xri_exit: 1840 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1841 lpfc_bsg_event_unref(evt); /* release ref */ 1842 lpfc_bsg_event_unref(evt); /* delete */ 1843 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1844 1845 if (dmabuf) { 1846 if (dmabuf->virt) 1847 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); 1848 kfree(dmabuf); 1849 } 1850 1851 if (cmdiocbq && (iocb_stat != IOCB_TIMEDOUT)) 1852 lpfc_sli_release_iocbq(phba, cmdiocbq); 1853 if (rspiocbq) 1854 lpfc_sli_release_iocbq(phba, rspiocbq); 1855 return ret_val; 1856 } 1857 1858 /** 1859 * diag_cmd_data_alloc - fills in a bde struct with dma buffers 1860 * @phba: Pointer to HBA context object 1861 * @bpl: Pointer to 64 bit bde structure 1862 * @size: Number of bytes to process 1863 * @nocopydata: Flag to copy user data into the allocated buffer 1864 * 1865 * This function allocates page size buffers and populates an lpfc_dmabufext. 1866 * If allowed the user data pointed to with indataptr is copied into the kernel 1867 * memory. The chained list of page size buffers is returned. 1868 **/ 1869 static struct lpfc_dmabufext * 1870 diag_cmd_data_alloc(struct lpfc_hba *phba, 1871 struct ulp_bde64 *bpl, uint32_t size, 1872 int nocopydata) 1873 { 1874 struct lpfc_dmabufext *mlist = NULL; 1875 struct lpfc_dmabufext *dmp; 1876 int cnt, offset = 0, i = 0; 1877 struct pci_dev *pcidev; 1878 1879 pcidev = phba->pcidev; 1880 1881 while (size) { 1882 /* We get chunks of 4K */ 1883 if (size > BUF_SZ_4K) 1884 cnt = BUF_SZ_4K; 1885 else 1886 cnt = size; 1887 1888 /* allocate struct lpfc_dmabufext buffer header */ 1889 dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL); 1890 if (!dmp) 1891 goto out; 1892 1893 INIT_LIST_HEAD(&dmp->dma.list); 1894 1895 /* Queue it to a linked list */ 1896 if (mlist) 1897 list_add_tail(&dmp->dma.list, &mlist->dma.list); 1898 else 1899 mlist = dmp; 1900 1901 /* allocate buffer */ 1902 dmp->dma.virt = dma_alloc_coherent(&pcidev->dev, 1903 cnt, 1904 &(dmp->dma.phys), 1905 GFP_KERNEL); 1906 1907 if (!dmp->dma.virt) 1908 goto out; 1909 1910 dmp->size = cnt; 1911 1912 if (nocopydata) { 1913 bpl->tus.f.bdeFlags = 0; 1914 pci_dma_sync_single_for_device(phba->pcidev, 1915 dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE); 1916 1917 } else { 1918 memset((uint8_t *)dmp->dma.virt, 0, cnt); 1919 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 1920 } 1921 1922 /* build buffer ptr list for IOCB */ 1923 bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys)); 1924 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys)); 1925 bpl->tus.f.bdeSize = (ushort) cnt; 1926 bpl->tus.w = le32_to_cpu(bpl->tus.w); 1927 bpl++; 1928 1929 i++; 1930 offset += cnt; 1931 size -= cnt; 1932 } 1933 1934 mlist->flag = i; 1935 return mlist; 1936 out: 1937 diag_cmd_data_free(phba, mlist); 1938 return NULL; 1939 } 1940 1941 /** 1942 * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd 1943 * @phba: Pointer to HBA context object 1944 * @rxxri: Receive exchange id 1945 * @len: Number of data bytes 1946 * 1947 * This function allocates and posts a data buffer of sufficient size to recieve 1948 * an unsolicted CT command. 1949 **/ 1950 static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri, 1951 size_t len) 1952 { 1953 struct lpfc_sli *psli = &phba->sli; 1954 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; 1955 struct lpfc_iocbq *cmdiocbq; 1956 IOCB_t *cmd = NULL; 1957 struct list_head head, *curr, *next; 1958 struct lpfc_dmabuf *rxbmp; 1959 struct lpfc_dmabuf *dmp; 1960 struct lpfc_dmabuf *mp[2] = {NULL, NULL}; 1961 struct ulp_bde64 *rxbpl = NULL; 1962 uint32_t num_bde; 1963 struct lpfc_dmabufext *rxbuffer = NULL; 1964 int ret_val = 0; 1965 int iocb_stat; 1966 int i = 0; 1967 1968 cmdiocbq = lpfc_sli_get_iocbq(phba); 1969 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1970 if (rxbmp != NULL) { 1971 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys); 1972 if (rxbmp->virt) { 1973 INIT_LIST_HEAD(&rxbmp->list); 1974 rxbpl = (struct ulp_bde64 *) rxbmp->virt; 1975 rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0); 1976 } 1977 } 1978 1979 if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) { 1980 ret_val = -ENOMEM; 1981 goto err_post_rxbufs_exit; 1982 } 1983 1984 /* Queue buffers for the receive exchange */ 1985 num_bde = (uint32_t)rxbuffer->flag; 1986 dmp = &rxbuffer->dma; 1987 1988 cmd = &cmdiocbq->iocb; 1989 i = 0; 1990 1991 INIT_LIST_HEAD(&head); 1992 list_add_tail(&head, &dmp->list); 1993 list_for_each_safe(curr, next, &head) { 1994 mp[i] = list_entry(curr, struct lpfc_dmabuf, list); 1995 list_del(curr); 1996 1997 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 1998 mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba); 1999 cmd->un.quexri64cx.buff.bde.addrHigh = 2000 putPaddrHigh(mp[i]->phys); 2001 cmd->un.quexri64cx.buff.bde.addrLow = 2002 putPaddrLow(mp[i]->phys); 2003 cmd->un.quexri64cx.buff.bde.tus.f.bdeSize = 2004 ((struct lpfc_dmabufext *)mp[i])->size; 2005 cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag; 2006 cmd->ulpCommand = CMD_QUE_XRI64_CX; 2007 cmd->ulpPU = 0; 2008 cmd->ulpLe = 1; 2009 cmd->ulpBdeCount = 1; 2010 cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0; 2011 2012 } else { 2013 cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys); 2014 cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys); 2015 cmd->un.cont64[i].tus.f.bdeSize = 2016 ((struct lpfc_dmabufext *)mp[i])->size; 2017 cmd->ulpBdeCount = ++i; 2018 2019 if ((--num_bde > 0) && (i < 2)) 2020 continue; 2021 2022 cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX; 2023 cmd->ulpLe = 1; 2024 } 2025 2026 cmd->ulpClass = CLASS3; 2027 cmd->ulpContext = rxxri; 2028 2029 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 2030 0); 2031 if (iocb_stat == IOCB_ERROR) { 2032 diag_cmd_data_free(phba, 2033 (struct lpfc_dmabufext *)mp[0]); 2034 if (mp[1]) 2035 diag_cmd_data_free(phba, 2036 (struct lpfc_dmabufext *)mp[1]); 2037 dmp = list_entry(next, struct lpfc_dmabuf, list); 2038 ret_val = -EIO; 2039 goto err_post_rxbufs_exit; 2040 } 2041 2042 lpfc_sli_ringpostbuf_put(phba, pring, mp[0]); 2043 if (mp[1]) { 2044 lpfc_sli_ringpostbuf_put(phba, pring, mp[1]); 2045 mp[1] = NULL; 2046 } 2047 2048 /* The iocb was freed by lpfc_sli_issue_iocb */ 2049 cmdiocbq = lpfc_sli_get_iocbq(phba); 2050 if (!cmdiocbq) { 2051 dmp = list_entry(next, struct lpfc_dmabuf, list); 2052 ret_val = -EIO; 2053 goto err_post_rxbufs_exit; 2054 } 2055 2056 cmd = &cmdiocbq->iocb; 2057 i = 0; 2058 } 2059 list_del(&head); 2060 2061 err_post_rxbufs_exit: 2062 2063 if (rxbmp) { 2064 if (rxbmp->virt) 2065 lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys); 2066 kfree(rxbmp); 2067 } 2068 2069 if (cmdiocbq) 2070 lpfc_sli_release_iocbq(phba, cmdiocbq); 2071 return ret_val; 2072 } 2073 2074 /** 2075 * lpfc_bsg_diag_test - with a port in loopback issues a Ct cmd to itself 2076 * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job 2077 * 2078 * This function receives a user data buffer to be transmitted and received on 2079 * the same port, the link must be up and in loopback mode prior 2080 * to being called. 2081 * 1. A kernel buffer is allocated to copy the user data into. 2082 * 2. The port registers with "itself". 2083 * 3. The transmit and receive exchange ids are obtained. 2084 * 4. The receive exchange id is posted. 2085 * 5. A new els loopback event is created. 2086 * 6. The command and response iocbs are allocated. 2087 * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback. 2088 * 2089 * This function is meant to be called n times while the port is in loopback 2090 * so it is the apps responsibility to issue a reset to take the port out 2091 * of loopback mode. 2092 **/ 2093 static int 2094 lpfc_bsg_diag_test(struct fc_bsg_job *job) 2095 { 2096 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 2097 struct lpfc_hba *phba = vport->phba; 2098 struct diag_mode_test *diag_mode; 2099 struct lpfc_bsg_event *evt; 2100 struct event_data *evdat; 2101 struct lpfc_sli *psli = &phba->sli; 2102 uint32_t size; 2103 uint32_t full_size; 2104 size_t segment_len = 0, segment_offset = 0, current_offset = 0; 2105 uint16_t rpi; 2106 struct lpfc_iocbq *cmdiocbq, *rspiocbq; 2107 IOCB_t *cmd, *rsp; 2108 struct lpfc_sli_ct_request *ctreq; 2109 struct lpfc_dmabuf *txbmp; 2110 struct ulp_bde64 *txbpl = NULL; 2111 struct lpfc_dmabufext *txbuffer = NULL; 2112 struct list_head head; 2113 struct lpfc_dmabuf *curr; 2114 uint16_t txxri, rxxri; 2115 uint32_t num_bde; 2116 uint8_t *ptr = NULL, *rx_databuf = NULL; 2117 int rc = 0; 2118 int time_left; 2119 int iocb_stat; 2120 unsigned long flags; 2121 void *dataout = NULL; 2122 uint32_t total_mem; 2123 2124 /* in case no data is returned return just the return code */ 2125 job->reply->reply_payload_rcv_len = 0; 2126 2127 if (job->request_len < 2128 sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) { 2129 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2130 "2739 Received DIAG TEST request below minimum " 2131 "size\n"); 2132 rc = -EINVAL; 2133 goto loopback_test_exit; 2134 } 2135 2136 if (job->request_payload.payload_len != 2137 job->reply_payload.payload_len) { 2138 rc = -EINVAL; 2139 goto loopback_test_exit; 2140 } 2141 2142 diag_mode = (struct diag_mode_test *) 2143 job->request->rqst_data.h_vendor.vendor_cmd; 2144 2145 if ((phba->link_state == LPFC_HBA_ERROR) || 2146 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) || 2147 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) { 2148 rc = -EACCES; 2149 goto loopback_test_exit; 2150 } 2151 2152 if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) { 2153 rc = -EACCES; 2154 goto loopback_test_exit; 2155 } 2156 2157 size = job->request_payload.payload_len; 2158 full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */ 2159 2160 if ((size == 0) || (size > 80 * BUF_SZ_4K)) { 2161 rc = -ERANGE; 2162 goto loopback_test_exit; 2163 } 2164 2165 if (size >= BUF_SZ_4K) { 2166 /* 2167 * Allocate memory for ioctl data. If buffer is bigger than 64k, 2168 * then we allocate 64k and re-use that buffer over and over to 2169 * xfer the whole block. This is because Linux kernel has a 2170 * problem allocating more than 120k of kernel space memory. Saw 2171 * problem with GET_FCPTARGETMAPPING... 2172 */ 2173 if (size <= (64 * 1024)) 2174 total_mem = size; 2175 else 2176 total_mem = 64 * 1024; 2177 } else 2178 /* Allocate memory for ioctl data */ 2179 total_mem = BUF_SZ_4K; 2180 2181 dataout = kmalloc(total_mem, GFP_KERNEL); 2182 if (dataout == NULL) { 2183 rc = -ENOMEM; 2184 goto loopback_test_exit; 2185 } 2186 2187 ptr = dataout; 2188 ptr += ELX_LOOPBACK_HEADER_SZ; 2189 sg_copy_to_buffer(job->request_payload.sg_list, 2190 job->request_payload.sg_cnt, 2191 ptr, size); 2192 2193 rc = lpfcdiag_loop_self_reg(phba, &rpi); 2194 if (rc) 2195 goto loopback_test_exit; 2196 2197 rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri); 2198 if (rc) { 2199 lpfcdiag_loop_self_unreg(phba, rpi); 2200 goto loopback_test_exit; 2201 } 2202 2203 rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size); 2204 if (rc) { 2205 lpfcdiag_loop_self_unreg(phba, rpi); 2206 goto loopback_test_exit; 2207 } 2208 2209 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid, 2210 SLI_CT_ELX_LOOPBACK); 2211 if (!evt) { 2212 lpfcdiag_loop_self_unreg(phba, rpi); 2213 rc = -ENOMEM; 2214 goto loopback_test_exit; 2215 } 2216 2217 spin_lock_irqsave(&phba->ct_ev_lock, flags); 2218 list_add(&evt->node, &phba->ct_ev_waiters); 2219 lpfc_bsg_event_ref(evt); 2220 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2221 2222 cmdiocbq = lpfc_sli_get_iocbq(phba); 2223 rspiocbq = lpfc_sli_get_iocbq(phba); 2224 txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2225 2226 if (txbmp) { 2227 txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys); 2228 if (txbmp->virt) { 2229 INIT_LIST_HEAD(&txbmp->list); 2230 txbpl = (struct ulp_bde64 *) txbmp->virt; 2231 txbuffer = diag_cmd_data_alloc(phba, 2232 txbpl, full_size, 0); 2233 } 2234 } 2235 2236 if (!cmdiocbq || !rspiocbq || !txbmp || !txbpl || !txbuffer || 2237 !txbmp->virt) { 2238 rc = -ENOMEM; 2239 goto err_loopback_test_exit; 2240 } 2241 2242 cmd = &cmdiocbq->iocb; 2243 rsp = &rspiocbq->iocb; 2244 2245 INIT_LIST_HEAD(&head); 2246 list_add_tail(&head, &txbuffer->dma.list); 2247 list_for_each_entry(curr, &head, list) { 2248 segment_len = ((struct lpfc_dmabufext *)curr)->size; 2249 if (current_offset == 0) { 2250 ctreq = curr->virt; 2251 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ); 2252 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION; 2253 ctreq->RevisionId.bits.InId = 0; 2254 ctreq->FsType = SLI_CT_ELX_LOOPBACK; 2255 ctreq->FsSubType = 0; 2256 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA; 2257 ctreq->CommandResponse.bits.Size = size; 2258 segment_offset = ELX_LOOPBACK_HEADER_SZ; 2259 } else 2260 segment_offset = 0; 2261 2262 BUG_ON(segment_offset >= segment_len); 2263 memcpy(curr->virt + segment_offset, 2264 ptr + current_offset, 2265 segment_len - segment_offset); 2266 2267 current_offset += segment_len - segment_offset; 2268 BUG_ON(current_offset > size); 2269 } 2270 list_del(&head); 2271 2272 /* Build the XMIT_SEQUENCE iocb */ 2273 2274 num_bde = (uint32_t)txbuffer->flag; 2275 2276 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys); 2277 cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys); 2278 cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 2279 cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64)); 2280 2281 cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA); 2282 cmd->un.xseq64.w5.hcsw.Dfctl = 0; 2283 cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL; 2284 cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT; 2285 2286 cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX; 2287 cmd->ulpBdeCount = 1; 2288 cmd->ulpLe = 1; 2289 cmd->ulpClass = CLASS3; 2290 cmd->ulpContext = txxri; 2291 2292 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 2293 cmdiocbq->vport = phba->pport; 2294 2295 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, 2296 rspiocbq, (phba->fc_ratov * 2) + 2297 LPFC_DRVR_TIMEOUT); 2298 2299 if ((iocb_stat != IOCB_SUCCESS) || (rsp->ulpStatus != IOCB_SUCCESS)) { 2300 rc = -EIO; 2301 goto err_loopback_test_exit; 2302 } 2303 2304 evt->waiting = 1; 2305 time_left = wait_event_interruptible_timeout( 2306 evt->wq, !list_empty(&evt->events_to_see), 2307 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ); 2308 evt->waiting = 0; 2309 if (list_empty(&evt->events_to_see)) 2310 rc = (time_left) ? -EINTR : -ETIMEDOUT; 2311 else { 2312 spin_lock_irqsave(&phba->ct_ev_lock, flags); 2313 list_move(evt->events_to_see.prev, &evt->events_to_get); 2314 evdat = list_entry(evt->events_to_get.prev, 2315 typeof(*evdat), node); 2316 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2317 rx_databuf = evdat->data; 2318 if (evdat->len != full_size) { 2319 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 2320 "1603 Loopback test did not receive expected " 2321 "data length. actual length 0x%x expected " 2322 "length 0x%x\n", 2323 evdat->len, full_size); 2324 rc = -EIO; 2325 } else if (rx_databuf == NULL) 2326 rc = -EIO; 2327 else { 2328 rc = IOCB_SUCCESS; 2329 /* skip over elx loopback header */ 2330 rx_databuf += ELX_LOOPBACK_HEADER_SZ; 2331 job->reply->reply_payload_rcv_len = 2332 sg_copy_from_buffer(job->reply_payload.sg_list, 2333 job->reply_payload.sg_cnt, 2334 rx_databuf, size); 2335 job->reply->reply_payload_rcv_len = size; 2336 } 2337 } 2338 2339 err_loopback_test_exit: 2340 lpfcdiag_loop_self_unreg(phba, rpi); 2341 2342 spin_lock_irqsave(&phba->ct_ev_lock, flags); 2343 lpfc_bsg_event_unref(evt); /* release ref */ 2344 lpfc_bsg_event_unref(evt); /* delete */ 2345 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2346 2347 if (cmdiocbq != NULL) 2348 lpfc_sli_release_iocbq(phba, cmdiocbq); 2349 2350 if (rspiocbq != NULL) 2351 lpfc_sli_release_iocbq(phba, rspiocbq); 2352 2353 if (txbmp != NULL) { 2354 if (txbpl != NULL) { 2355 if (txbuffer != NULL) 2356 diag_cmd_data_free(phba, txbuffer); 2357 lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys); 2358 } 2359 kfree(txbmp); 2360 } 2361 2362 loopback_test_exit: 2363 kfree(dataout); 2364 /* make error code available to userspace */ 2365 job->reply->result = rc; 2366 job->dd_data = NULL; 2367 /* complete the job back to userspace if no error */ 2368 if (rc == 0) 2369 job->job_done(job); 2370 return rc; 2371 } 2372 2373 /** 2374 * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command 2375 * @job: GET_DFC_REV fc_bsg_job 2376 **/ 2377 static int 2378 lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job) 2379 { 2380 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 2381 struct lpfc_hba *phba = vport->phba; 2382 struct get_mgmt_rev *event_req; 2383 struct get_mgmt_rev_reply *event_reply; 2384 int rc = 0; 2385 2386 if (job->request_len < 2387 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) { 2388 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2389 "2740 Received GET_DFC_REV request below " 2390 "minimum size\n"); 2391 rc = -EINVAL; 2392 goto job_error; 2393 } 2394 2395 event_req = (struct get_mgmt_rev *) 2396 job->request->rqst_data.h_vendor.vendor_cmd; 2397 2398 event_reply = (struct get_mgmt_rev_reply *) 2399 job->reply->reply_data.vendor_reply.vendor_rsp; 2400 2401 if (job->reply_len < 2402 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) { 2403 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2404 "2741 Received GET_DFC_REV reply below " 2405 "minimum size\n"); 2406 rc = -EINVAL; 2407 goto job_error; 2408 } 2409 2410 event_reply->info.a_Major = MANAGEMENT_MAJOR_REV; 2411 event_reply->info.a_Minor = MANAGEMENT_MINOR_REV; 2412 job_error: 2413 job->reply->result = rc; 2414 if (rc == 0) 2415 job->job_done(job); 2416 return rc; 2417 } 2418 2419 /** 2420 * lpfc_bsg_wake_mbox_wait - lpfc_bsg_issue_mbox mbox completion handler 2421 * @phba: Pointer to HBA context object. 2422 * @pmboxq: Pointer to mailbox command. 2423 * 2424 * This is completion handler function for mailbox commands issued from 2425 * lpfc_bsg_issue_mbox function. This function is called by the 2426 * mailbox event handler function with no lock held. This function 2427 * will wake up thread waiting on the wait queue pointed by context1 2428 * of the mailbox. 2429 **/ 2430 void 2431 lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 2432 { 2433 struct bsg_job_data *dd_data; 2434 struct fc_bsg_job *job; 2435 uint32_t size; 2436 unsigned long flags; 2437 uint8_t *to; 2438 uint8_t *from; 2439 2440 spin_lock_irqsave(&phba->ct_ev_lock, flags); 2441 dd_data = pmboxq->context1; 2442 /* job already timed out? */ 2443 if (!dd_data) { 2444 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2445 return; 2446 } 2447 2448 /* build the outgoing buffer to do an sg copy 2449 * the format is the response mailbox followed by any extended 2450 * mailbox data 2451 */ 2452 from = (uint8_t *)&pmboxq->u.mb; 2453 to = (uint8_t *)dd_data->context_un.mbox.mb; 2454 memcpy(to, from, sizeof(MAILBOX_t)); 2455 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) { 2456 /* copy the extended data if any, count is in words */ 2457 if (dd_data->context_un.mbox.outExtWLen) { 2458 from = (uint8_t *)dd_data->context_un.mbox.ext; 2459 to += sizeof(MAILBOX_t); 2460 size = dd_data->context_un.mbox.outExtWLen * 2461 sizeof(uint32_t); 2462 memcpy(to, from, size); 2463 } else if (pmboxq->u.mb.mbxCommand == MBX_RUN_BIU_DIAG64) { 2464 from = (uint8_t *)dd_data->context_un.mbox. 2465 dmp->dma.virt; 2466 to += sizeof(MAILBOX_t); 2467 size = dd_data->context_un.mbox.dmp->size; 2468 memcpy(to, from, size); 2469 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 2470 (pmboxq->u.mb.mbxCommand == MBX_DUMP_MEMORY)) { 2471 from = (uint8_t *)dd_data->context_un.mbox.dmp->dma. 2472 virt; 2473 to += sizeof(MAILBOX_t); 2474 size = pmboxq->u.mb.un.varWords[5]; 2475 memcpy(to, from, size); 2476 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 2477 (pmboxq->u.mb.mbxCommand == MBX_SLI4_CONFIG)) { 2478 struct lpfc_mbx_nembed_cmd *nembed_sge = 2479 (struct lpfc_mbx_nembed_cmd *) 2480 &pmboxq->u.mb.un.varWords[0]; 2481 2482 from = (uint8_t *)dd_data->context_un.mbox.dmp->dma. 2483 virt; 2484 to += sizeof(MAILBOX_t); 2485 size = nembed_sge->sge[0].length; 2486 memcpy(to, from, size); 2487 } else if (pmboxq->u.mb.mbxCommand == MBX_READ_EVENT_LOG) { 2488 from = (uint8_t *)dd_data->context_un. 2489 mbox.dmp->dma.virt; 2490 to += sizeof(MAILBOX_t); 2491 size = dd_data->context_un.mbox.dmp->size; 2492 memcpy(to, from, size); 2493 } 2494 } 2495 2496 from = (uint8_t *)dd_data->context_un.mbox.mb; 2497 job = dd_data->context_un.mbox.set_job; 2498 size = job->reply_payload.payload_len; 2499 job->reply->reply_payload_rcv_len = 2500 sg_copy_from_buffer(job->reply_payload.sg_list, 2501 job->reply_payload.sg_cnt, 2502 from, size); 2503 job->reply->result = 0; 2504 2505 dd_data->context_un.mbox.set_job = NULL; 2506 job->dd_data = NULL; 2507 job->job_done(job); 2508 /* need to hold the lock until we call job done to hold off 2509 * the timeout handler returning to the midlayer while 2510 * we are stillprocessing the job 2511 */ 2512 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2513 2514 kfree(dd_data->context_un.mbox.mb); 2515 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool); 2516 kfree(dd_data->context_un.mbox.ext); 2517 if (dd_data->context_un.mbox.dmp) { 2518 dma_free_coherent(&phba->pcidev->dev, 2519 dd_data->context_un.mbox.dmp->size, 2520 dd_data->context_un.mbox.dmp->dma.virt, 2521 dd_data->context_un.mbox.dmp->dma.phys); 2522 kfree(dd_data->context_un.mbox.dmp); 2523 } 2524 if (dd_data->context_un.mbox.rxbmp) { 2525 lpfc_mbuf_free(phba, dd_data->context_un.mbox.rxbmp->virt, 2526 dd_data->context_un.mbox.rxbmp->phys); 2527 kfree(dd_data->context_un.mbox.rxbmp); 2528 } 2529 kfree(dd_data); 2530 return; 2531 } 2532 2533 /** 2534 * lpfc_bsg_check_cmd_access - test for a supported mailbox command 2535 * @phba: Pointer to HBA context object. 2536 * @mb: Pointer to a mailbox object. 2537 * @vport: Pointer to a vport object. 2538 * 2539 * Some commands require the port to be offline, some may not be called from 2540 * the application. 2541 **/ 2542 static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba, 2543 MAILBOX_t *mb, struct lpfc_vport *vport) 2544 { 2545 /* return negative error values for bsg job */ 2546 switch (mb->mbxCommand) { 2547 /* Offline only */ 2548 case MBX_INIT_LINK: 2549 case MBX_DOWN_LINK: 2550 case MBX_CONFIG_LINK: 2551 case MBX_CONFIG_RING: 2552 case MBX_RESET_RING: 2553 case MBX_UNREG_LOGIN: 2554 case MBX_CLEAR_LA: 2555 case MBX_DUMP_CONTEXT: 2556 case MBX_RUN_DIAGS: 2557 case MBX_RESTART: 2558 case MBX_SET_MASK: 2559 if (!(vport->fc_flag & FC_OFFLINE_MODE)) { 2560 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2561 "2743 Command 0x%x is illegal in on-line " 2562 "state\n", 2563 mb->mbxCommand); 2564 return -EPERM; 2565 } 2566 case MBX_WRITE_NV: 2567 case MBX_WRITE_VPARMS: 2568 case MBX_LOAD_SM: 2569 case MBX_READ_NV: 2570 case MBX_READ_CONFIG: 2571 case MBX_READ_RCONFIG: 2572 case MBX_READ_STATUS: 2573 case MBX_READ_XRI: 2574 case MBX_READ_REV: 2575 case MBX_READ_LNK_STAT: 2576 case MBX_DUMP_MEMORY: 2577 case MBX_DOWN_LOAD: 2578 case MBX_UPDATE_CFG: 2579 case MBX_KILL_BOARD: 2580 case MBX_LOAD_AREA: 2581 case MBX_LOAD_EXP_ROM: 2582 case MBX_BEACON: 2583 case MBX_DEL_LD_ENTRY: 2584 case MBX_SET_DEBUG: 2585 case MBX_WRITE_WWN: 2586 case MBX_SLI4_CONFIG: 2587 case MBX_READ_EVENT_LOG: 2588 case MBX_READ_EVENT_LOG_STATUS: 2589 case MBX_WRITE_EVENT_LOG: 2590 case MBX_PORT_CAPABILITIES: 2591 case MBX_PORT_IOV_CONTROL: 2592 case MBX_RUN_BIU_DIAG64: 2593 break; 2594 case MBX_SET_VARIABLE: 2595 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2596 "1226 mbox: set_variable 0x%x, 0x%x\n", 2597 mb->un.varWords[0], 2598 mb->un.varWords[1]); 2599 if ((mb->un.varWords[0] == SETVAR_MLOMNT) 2600 && (mb->un.varWords[1] == 1)) { 2601 phba->wait_4_mlo_maint_flg = 1; 2602 } else if (mb->un.varWords[0] == SETVAR_MLORST) { 2603 phba->link_flag &= ~LS_LOOPBACK_MODE; 2604 phba->fc_topology = TOPOLOGY_PT_PT; 2605 } 2606 break; 2607 case MBX_READ_SPARM64: 2608 case MBX_READ_LA: 2609 case MBX_READ_LA64: 2610 case MBX_REG_LOGIN: 2611 case MBX_REG_LOGIN64: 2612 case MBX_CONFIG_PORT: 2613 case MBX_RUN_BIU_DIAG: 2614 default: 2615 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2616 "2742 Unknown Command 0x%x\n", 2617 mb->mbxCommand); 2618 return -EPERM; 2619 } 2620 2621 return 0; /* ok */ 2622 } 2623 2624 /** 2625 * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app 2626 * @phba: Pointer to HBA context object. 2627 * @mb: Pointer to a mailbox object. 2628 * @vport: Pointer to a vport object. 2629 * 2630 * Allocate a tracking object, mailbox command memory, get a mailbox 2631 * from the mailbox pool, copy the caller mailbox command. 2632 * 2633 * If offline and the sli is active we need to poll for the command (port is 2634 * being reset) and com-plete the job, otherwise issue the mailbox command and 2635 * let our completion handler finish the command. 2636 **/ 2637 static uint32_t 2638 lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, 2639 struct lpfc_vport *vport) 2640 { 2641 LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */ 2642 MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */ 2643 /* a 4k buffer to hold the mb and extended data from/to the bsg */ 2644 MAILBOX_t *mb = NULL; 2645 struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */ 2646 uint32_t size; 2647 struct lpfc_dmabuf *rxbmp = NULL; /* for biu diag */ 2648 struct lpfc_dmabufext *dmp = NULL; /* for biu diag */ 2649 struct ulp_bde64 *rxbpl = NULL; 2650 struct dfc_mbox_req *mbox_req = (struct dfc_mbox_req *) 2651 job->request->rqst_data.h_vendor.vendor_cmd; 2652 uint8_t *ext = NULL; 2653 int rc = 0; 2654 uint8_t *from; 2655 2656 /* in case no data is transferred */ 2657 job->reply->reply_payload_rcv_len = 0; 2658 2659 /* check if requested extended data lengths are valid */ 2660 if ((mbox_req->inExtWLen > MAILBOX_EXT_SIZE) || 2661 (mbox_req->outExtWLen > MAILBOX_EXT_SIZE)) { 2662 rc = -ERANGE; 2663 goto job_done; 2664 } 2665 2666 /* allocate our bsg tracking structure */ 2667 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 2668 if (!dd_data) { 2669 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2670 "2727 Failed allocation of dd_data\n"); 2671 rc = -ENOMEM; 2672 goto job_done; 2673 } 2674 2675 mb = kzalloc(BSG_MBOX_SIZE, GFP_KERNEL); 2676 if (!mb) { 2677 rc = -ENOMEM; 2678 goto job_done; 2679 } 2680 2681 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2682 if (!pmboxq) { 2683 rc = -ENOMEM; 2684 goto job_done; 2685 } 2686 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 2687 2688 size = job->request_payload.payload_len; 2689 sg_copy_to_buffer(job->request_payload.sg_list, 2690 job->request_payload.sg_cnt, 2691 mb, size); 2692 2693 rc = lpfc_bsg_check_cmd_access(phba, mb, vport); 2694 if (rc != 0) 2695 goto job_done; /* must be negative */ 2696 2697 pmb = &pmboxq->u.mb; 2698 memcpy(pmb, mb, sizeof(*pmb)); 2699 pmb->mbxOwner = OWN_HOST; 2700 pmboxq->vport = vport; 2701 2702 /* If HBA encountered an error attention, allow only DUMP 2703 * or RESTART mailbox commands until the HBA is restarted. 2704 */ 2705 if (phba->pport->stopped && 2706 pmb->mbxCommand != MBX_DUMP_MEMORY && 2707 pmb->mbxCommand != MBX_RESTART && 2708 pmb->mbxCommand != MBX_WRITE_VPARMS && 2709 pmb->mbxCommand != MBX_WRITE_WWN) 2710 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 2711 "2797 mbox: Issued mailbox cmd " 2712 "0x%x while in stopped state.\n", 2713 pmb->mbxCommand); 2714 2715 /* Don't allow mailbox commands to be sent when blocked 2716 * or when in the middle of discovery 2717 */ 2718 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) { 2719 rc = -EAGAIN; 2720 goto job_done; 2721 } 2722 2723 /* extended mailbox commands will need an extended buffer */ 2724 if (mbox_req->inExtWLen || mbox_req->outExtWLen) { 2725 ext = kzalloc(MAILBOX_EXT_SIZE, GFP_KERNEL); 2726 if (!ext) { 2727 rc = -ENOMEM; 2728 goto job_done; 2729 } 2730 2731 /* any data for the device? */ 2732 if (mbox_req->inExtWLen) { 2733 from = (uint8_t *)mb; 2734 from += sizeof(MAILBOX_t); 2735 memcpy((uint8_t *)ext, from, 2736 mbox_req->inExtWLen * sizeof(uint32_t)); 2737 } 2738 2739 pmboxq->context2 = ext; 2740 pmboxq->in_ext_byte_len = 2741 mbox_req->inExtWLen * sizeof(uint32_t); 2742 pmboxq->out_ext_byte_len = 2743 mbox_req->outExtWLen * sizeof(uint32_t); 2744 pmboxq->mbox_offset_word = mbox_req->mbOffset; 2745 } 2746 2747 /* biu diag will need a kernel buffer to transfer the data 2748 * allocate our own buffer and setup the mailbox command to 2749 * use ours 2750 */ 2751 if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) { 2752 uint32_t transmit_length = pmb->un.varWords[1]; 2753 uint32_t receive_length = pmb->un.varWords[4]; 2754 /* transmit length cannot be greater than receive length or 2755 * mailbox extension size 2756 */ 2757 if ((transmit_length > receive_length) || 2758 (transmit_length > MAILBOX_EXT_SIZE)) { 2759 rc = -ERANGE; 2760 goto job_done; 2761 } 2762 2763 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2764 if (!rxbmp) { 2765 rc = -ENOMEM; 2766 goto job_done; 2767 } 2768 2769 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys); 2770 if (!rxbmp->virt) { 2771 rc = -ENOMEM; 2772 goto job_done; 2773 } 2774 2775 INIT_LIST_HEAD(&rxbmp->list); 2776 rxbpl = (struct ulp_bde64 *) rxbmp->virt; 2777 dmp = diag_cmd_data_alloc(phba, rxbpl, transmit_length, 0); 2778 if (!dmp) { 2779 rc = -ENOMEM; 2780 goto job_done; 2781 } 2782 2783 INIT_LIST_HEAD(&dmp->dma.list); 2784 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh = 2785 putPaddrHigh(dmp->dma.phys); 2786 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow = 2787 putPaddrLow(dmp->dma.phys); 2788 2789 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh = 2790 putPaddrHigh(dmp->dma.phys + 2791 pmb->un.varBIUdiag.un.s2. 2792 xmit_bde64.tus.f.bdeSize); 2793 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow = 2794 putPaddrLow(dmp->dma.phys + 2795 pmb->un.varBIUdiag.un.s2. 2796 xmit_bde64.tus.f.bdeSize); 2797 2798 /* copy the transmit data found in the mailbox extension area */ 2799 from = (uint8_t *)mb; 2800 from += sizeof(MAILBOX_t); 2801 memcpy((uint8_t *)dmp->dma.virt, from, transmit_length); 2802 } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) { 2803 struct READ_EVENT_LOG_VAR *rdEventLog = 2804 &pmb->un.varRdEventLog ; 2805 uint32_t receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize; 2806 uint32_t mode = bf_get(lpfc_event_log, rdEventLog); 2807 2808 /* receive length cannot be greater than mailbox 2809 * extension size 2810 */ 2811 if (receive_length > MAILBOX_EXT_SIZE) { 2812 rc = -ERANGE; 2813 goto job_done; 2814 } 2815 2816 /* mode zero uses a bde like biu diags command */ 2817 if (mode == 0) { 2818 2819 /* rebuild the command for sli4 using our own buffers 2820 * like we do for biu diags 2821 */ 2822 2823 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2824 if (!rxbmp) { 2825 rc = -ENOMEM; 2826 goto job_done; 2827 } 2828 2829 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys); 2830 rxbpl = (struct ulp_bde64 *) rxbmp->virt; 2831 if (rxbpl) { 2832 INIT_LIST_HEAD(&rxbmp->list); 2833 dmp = diag_cmd_data_alloc(phba, rxbpl, 2834 receive_length, 0); 2835 } 2836 2837 if (!dmp) { 2838 rc = -ENOMEM; 2839 goto job_done; 2840 } 2841 2842 INIT_LIST_HEAD(&dmp->dma.list); 2843 pmb->un.varWords[3] = putPaddrLow(dmp->dma.phys); 2844 pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys); 2845 } 2846 } else if (phba->sli_rev == LPFC_SLI_REV4) { 2847 if (pmb->mbxCommand == MBX_DUMP_MEMORY) { 2848 /* rebuild the command for sli4 using our own buffers 2849 * like we do for biu diags 2850 */ 2851 uint32_t receive_length = pmb->un.varWords[2]; 2852 /* receive length cannot be greater than mailbox 2853 * extension size 2854 */ 2855 if ((receive_length == 0) || 2856 (receive_length > MAILBOX_EXT_SIZE)) { 2857 rc = -ERANGE; 2858 goto job_done; 2859 } 2860 2861 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2862 if (!rxbmp) { 2863 rc = -ENOMEM; 2864 goto job_done; 2865 } 2866 2867 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys); 2868 if (!rxbmp->virt) { 2869 rc = -ENOMEM; 2870 goto job_done; 2871 } 2872 2873 INIT_LIST_HEAD(&rxbmp->list); 2874 rxbpl = (struct ulp_bde64 *) rxbmp->virt; 2875 dmp = diag_cmd_data_alloc(phba, rxbpl, receive_length, 2876 0); 2877 if (!dmp) { 2878 rc = -ENOMEM; 2879 goto job_done; 2880 } 2881 2882 INIT_LIST_HEAD(&dmp->dma.list); 2883 pmb->un.varWords[3] = putPaddrLow(dmp->dma.phys); 2884 pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys); 2885 } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) && 2886 pmb->un.varUpdateCfg.co) { 2887 struct ulp_bde64 *bde = 2888 (struct ulp_bde64 *)&pmb->un.varWords[4]; 2889 2890 /* bde size cannot be greater than mailbox ext size */ 2891 if (bde->tus.f.bdeSize > MAILBOX_EXT_SIZE) { 2892 rc = -ERANGE; 2893 goto job_done; 2894 } 2895 2896 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2897 if (!rxbmp) { 2898 rc = -ENOMEM; 2899 goto job_done; 2900 } 2901 2902 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys); 2903 if (!rxbmp->virt) { 2904 rc = -ENOMEM; 2905 goto job_done; 2906 } 2907 2908 INIT_LIST_HEAD(&rxbmp->list); 2909 rxbpl = (struct ulp_bde64 *) rxbmp->virt; 2910 dmp = diag_cmd_data_alloc(phba, rxbpl, 2911 bde->tus.f.bdeSize, 0); 2912 if (!dmp) { 2913 rc = -ENOMEM; 2914 goto job_done; 2915 } 2916 2917 INIT_LIST_HEAD(&dmp->dma.list); 2918 bde->addrHigh = putPaddrHigh(dmp->dma.phys); 2919 bde->addrLow = putPaddrLow(dmp->dma.phys); 2920 2921 /* copy the transmit data found in the mailbox 2922 * extension area 2923 */ 2924 from = (uint8_t *)mb; 2925 from += sizeof(MAILBOX_t); 2926 memcpy((uint8_t *)dmp->dma.virt, from, 2927 bde->tus.f.bdeSize); 2928 } else if (pmb->mbxCommand == MBX_SLI4_CONFIG) { 2929 struct lpfc_mbx_nembed_cmd *nembed_sge; 2930 struct mbox_header *header; 2931 uint32_t receive_length; 2932 2933 /* rebuild the command for sli4 using our own buffers 2934 * like we do for biu diags 2935 */ 2936 header = (struct mbox_header *)&pmb->un.varWords[0]; 2937 nembed_sge = (struct lpfc_mbx_nembed_cmd *) 2938 &pmb->un.varWords[0]; 2939 receive_length = nembed_sge->sge[0].length; 2940 2941 /* receive length cannot be greater than mailbox 2942 * extension size 2943 */ 2944 if ((receive_length == 0) || 2945 (receive_length > MAILBOX_EXT_SIZE)) { 2946 rc = -ERANGE; 2947 goto job_done; 2948 } 2949 2950 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2951 if (!rxbmp) { 2952 rc = -ENOMEM; 2953 goto job_done; 2954 } 2955 2956 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys); 2957 if (!rxbmp->virt) { 2958 rc = -ENOMEM; 2959 goto job_done; 2960 } 2961 2962 INIT_LIST_HEAD(&rxbmp->list); 2963 rxbpl = (struct ulp_bde64 *) rxbmp->virt; 2964 dmp = diag_cmd_data_alloc(phba, rxbpl, receive_length, 2965 0); 2966 if (!dmp) { 2967 rc = -ENOMEM; 2968 goto job_done; 2969 } 2970 2971 INIT_LIST_HEAD(&dmp->dma.list); 2972 nembed_sge->sge[0].pa_hi = putPaddrHigh(dmp->dma.phys); 2973 nembed_sge->sge[0].pa_lo = putPaddrLow(dmp->dma.phys); 2974 /* copy the transmit data found in the mailbox 2975 * extension area 2976 */ 2977 from = (uint8_t *)mb; 2978 from += sizeof(MAILBOX_t); 2979 memcpy((uint8_t *)dmp->dma.virt, from, 2980 header->cfg_mhdr.payload_length); 2981 } 2982 } 2983 2984 dd_data->context_un.mbox.rxbmp = rxbmp; 2985 dd_data->context_un.mbox.dmp = dmp; 2986 2987 /* setup wake call as IOCB callback */ 2988 pmboxq->mbox_cmpl = lpfc_bsg_wake_mbox_wait; 2989 2990 /* setup context field to pass wait_queue pointer to wake function */ 2991 pmboxq->context1 = dd_data; 2992 dd_data->type = TYPE_MBOX; 2993 dd_data->context_un.mbox.pmboxq = pmboxq; 2994 dd_data->context_un.mbox.mb = mb; 2995 dd_data->context_un.mbox.set_job = job; 2996 dd_data->context_un.mbox.ext = ext; 2997 dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset; 2998 dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen; 2999 dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen; 3000 job->dd_data = dd_data; 3001 3002 if ((vport->fc_flag & FC_OFFLINE_MODE) || 3003 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) { 3004 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 3005 if (rc != MBX_SUCCESS) { 3006 rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV; 3007 goto job_done; 3008 } 3009 3010 /* job finished, copy the data */ 3011 memcpy(mb, pmb, sizeof(*pmb)); 3012 job->reply->reply_payload_rcv_len = 3013 sg_copy_from_buffer(job->reply_payload.sg_list, 3014 job->reply_payload.sg_cnt, 3015 mb, size); 3016 /* not waiting mbox already done */ 3017 rc = 0; 3018 goto job_done; 3019 } 3020 3021 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 3022 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) 3023 return 1; /* job started */ 3024 3025 job_done: 3026 /* common exit for error or job completed inline */ 3027 kfree(mb); 3028 if (pmboxq) 3029 mempool_free(pmboxq, phba->mbox_mem_pool); 3030 kfree(ext); 3031 if (dmp) { 3032 dma_free_coherent(&phba->pcidev->dev, 3033 dmp->size, dmp->dma.virt, 3034 dmp->dma.phys); 3035 kfree(dmp); 3036 } 3037 if (rxbmp) { 3038 lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys); 3039 kfree(rxbmp); 3040 } 3041 kfree(dd_data); 3042 3043 return rc; 3044 } 3045 3046 /** 3047 * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command 3048 * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX. 3049 **/ 3050 static int 3051 lpfc_bsg_mbox_cmd(struct fc_bsg_job *job) 3052 { 3053 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 3054 struct lpfc_hba *phba = vport->phba; 3055 int rc = 0; 3056 3057 /* in case no data is transferred */ 3058 job->reply->reply_payload_rcv_len = 0; 3059 if (job->request_len < 3060 sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) { 3061 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3062 "2737 Received MBOX_REQ request below " 3063 "minimum size\n"); 3064 rc = -EINVAL; 3065 goto job_error; 3066 } 3067 3068 if (job->request_payload.payload_len != BSG_MBOX_SIZE) { 3069 rc = -EINVAL; 3070 goto job_error; 3071 } 3072 3073 if (job->reply_payload.payload_len != BSG_MBOX_SIZE) { 3074 rc = -EINVAL; 3075 goto job_error; 3076 } 3077 3078 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) { 3079 rc = -EAGAIN; 3080 goto job_error; 3081 } 3082 3083 rc = lpfc_bsg_issue_mbox(phba, job, vport); 3084 3085 job_error: 3086 if (rc == 0) { 3087 /* job done */ 3088 job->reply->result = 0; 3089 job->dd_data = NULL; 3090 job->job_done(job); 3091 } else if (rc == 1) 3092 /* job submitted, will complete later*/ 3093 rc = 0; /* return zero, no error */ 3094 else { 3095 /* some error occurred */ 3096 job->reply->result = rc; 3097 job->dd_data = NULL; 3098 } 3099 3100 return rc; 3101 } 3102 3103 /** 3104 * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler 3105 * @phba: Pointer to HBA context object. 3106 * @cmdiocbq: Pointer to command iocb. 3107 * @rspiocbq: Pointer to response iocb. 3108 * 3109 * This function is the completion handler for iocbs issued using 3110 * lpfc_menlo_cmd function. This function is called by the 3111 * ring event handler function without any lock held. This function 3112 * can be called from both worker thread context and interrupt 3113 * context. This function also can be called from another thread which 3114 * cleans up the SLI layer objects. 3115 * This function copies the contents of the response iocb to the 3116 * response iocb memory object provided by the caller of 3117 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 3118 * sleeps for the iocb completion. 3119 **/ 3120 static void 3121 lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba, 3122 struct lpfc_iocbq *cmdiocbq, 3123 struct lpfc_iocbq *rspiocbq) 3124 { 3125 struct bsg_job_data *dd_data; 3126 struct fc_bsg_job *job; 3127 IOCB_t *rsp; 3128 struct lpfc_dmabuf *bmp; 3129 struct lpfc_bsg_menlo *menlo; 3130 unsigned long flags; 3131 struct menlo_response *menlo_resp; 3132 int rc = 0; 3133 3134 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3135 dd_data = cmdiocbq->context1; 3136 if (!dd_data) { 3137 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3138 return; 3139 } 3140 3141 menlo = &dd_data->context_un.menlo; 3142 job = menlo->set_job; 3143 job->dd_data = NULL; /* so timeout handler does not reply */ 3144 3145 spin_lock(&phba->hbalock); 3146 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 3147 if (cmdiocbq->context2 && rspiocbq) 3148 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 3149 &rspiocbq->iocb, sizeof(IOCB_t)); 3150 spin_unlock(&phba->hbalock); 3151 3152 bmp = menlo->bmp; 3153 rspiocbq = menlo->rspiocbq; 3154 rsp = &rspiocbq->iocb; 3155 3156 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 3157 job->request_payload.sg_cnt, DMA_TO_DEVICE); 3158 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, 3159 job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 3160 3161 /* always return the xri, this would be used in the case 3162 * of a menlo download to allow the data to be sent as a continuation 3163 * of the exchange. 3164 */ 3165 menlo_resp = (struct menlo_response *) 3166 job->reply->reply_data.vendor_reply.vendor_rsp; 3167 menlo_resp->xri = rsp->ulpContext; 3168 if (rsp->ulpStatus) { 3169 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 3170 switch (rsp->un.ulpWord[4] & 0xff) { 3171 case IOERR_SEQUENCE_TIMEOUT: 3172 rc = -ETIMEDOUT; 3173 break; 3174 case IOERR_INVALID_RPI: 3175 rc = -EFAULT; 3176 break; 3177 default: 3178 rc = -EACCES; 3179 break; 3180 } 3181 } else 3182 rc = -EACCES; 3183 } else 3184 job->reply->reply_payload_rcv_len = 3185 rsp->un.genreq64.bdl.bdeSize; 3186 3187 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 3188 lpfc_sli_release_iocbq(phba, rspiocbq); 3189 lpfc_sli_release_iocbq(phba, cmdiocbq); 3190 kfree(bmp); 3191 kfree(dd_data); 3192 /* make error code available to userspace */ 3193 job->reply->result = rc; 3194 /* complete the job back to userspace */ 3195 job->job_done(job); 3196 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3197 return; 3198 } 3199 3200 /** 3201 * lpfc_menlo_cmd - send an ioctl for menlo hardware 3202 * @job: fc_bsg_job to handle 3203 * 3204 * This function issues a gen request 64 CR ioctl for all menlo cmd requests, 3205 * all the command completions will return the xri for the command. 3206 * For menlo data requests a gen request 64 CX is used to continue the exchange 3207 * supplied in the menlo request header xri field. 3208 **/ 3209 static int 3210 lpfc_menlo_cmd(struct fc_bsg_job *job) 3211 { 3212 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 3213 struct lpfc_hba *phba = vport->phba; 3214 struct lpfc_iocbq *cmdiocbq, *rspiocbq; 3215 IOCB_t *cmd, *rsp; 3216 int rc = 0; 3217 struct menlo_command *menlo_cmd; 3218 struct menlo_response *menlo_resp; 3219 struct lpfc_dmabuf *bmp = NULL; 3220 int request_nseg; 3221 int reply_nseg; 3222 struct scatterlist *sgel = NULL; 3223 int numbde; 3224 dma_addr_t busaddr; 3225 struct bsg_job_data *dd_data; 3226 struct ulp_bde64 *bpl = NULL; 3227 3228 /* in case no data is returned return just the return code */ 3229 job->reply->reply_payload_rcv_len = 0; 3230 3231 if (job->request_len < 3232 sizeof(struct fc_bsg_request) + 3233 sizeof(struct menlo_command)) { 3234 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3235 "2784 Received MENLO_CMD request below " 3236 "minimum size\n"); 3237 rc = -ERANGE; 3238 goto no_dd_data; 3239 } 3240 3241 if (job->reply_len < 3242 sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) { 3243 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3244 "2785 Received MENLO_CMD reply below " 3245 "minimum size\n"); 3246 rc = -ERANGE; 3247 goto no_dd_data; 3248 } 3249 3250 if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) { 3251 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3252 "2786 Adapter does not support menlo " 3253 "commands\n"); 3254 rc = -EPERM; 3255 goto no_dd_data; 3256 } 3257 3258 menlo_cmd = (struct menlo_command *) 3259 job->request->rqst_data.h_vendor.vendor_cmd; 3260 3261 menlo_resp = (struct menlo_response *) 3262 job->reply->reply_data.vendor_reply.vendor_rsp; 3263 3264 /* allocate our bsg tracking structure */ 3265 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 3266 if (!dd_data) { 3267 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3268 "2787 Failed allocation of dd_data\n"); 3269 rc = -ENOMEM; 3270 goto no_dd_data; 3271 } 3272 3273 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 3274 if (!bmp) { 3275 rc = -ENOMEM; 3276 goto free_dd; 3277 } 3278 3279 cmdiocbq = lpfc_sli_get_iocbq(phba); 3280 if (!cmdiocbq) { 3281 rc = -ENOMEM; 3282 goto free_bmp; 3283 } 3284 3285 rspiocbq = lpfc_sli_get_iocbq(phba); 3286 if (!rspiocbq) { 3287 rc = -ENOMEM; 3288 goto free_cmdiocbq; 3289 } 3290 3291 rsp = &rspiocbq->iocb; 3292 3293 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); 3294 if (!bmp->virt) { 3295 rc = -ENOMEM; 3296 goto free_rspiocbq; 3297 } 3298 3299 INIT_LIST_HEAD(&bmp->list); 3300 bpl = (struct ulp_bde64 *) bmp->virt; 3301 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, 3302 job->request_payload.sg_cnt, DMA_TO_DEVICE); 3303 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { 3304 busaddr = sg_dma_address(sgel); 3305 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 3306 bpl->tus.f.bdeSize = sg_dma_len(sgel); 3307 bpl->tus.w = cpu_to_le32(bpl->tus.w); 3308 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); 3309 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); 3310 bpl++; 3311 } 3312 3313 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list, 3314 job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 3315 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) { 3316 busaddr = sg_dma_address(sgel); 3317 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 3318 bpl->tus.f.bdeSize = sg_dma_len(sgel); 3319 bpl->tus.w = cpu_to_le32(bpl->tus.w); 3320 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); 3321 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); 3322 bpl++; 3323 } 3324 3325 cmd = &cmdiocbq->iocb; 3326 cmd->un.genreq64.bdl.ulpIoTag32 = 0; 3327 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); 3328 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys); 3329 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 3330 cmd->un.genreq64.bdl.bdeSize = 3331 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64); 3332 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); 3333 cmd->un.genreq64.w5.hcsw.Dfctl = 0; 3334 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD; 3335 cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */ 3336 cmd->ulpBdeCount = 1; 3337 cmd->ulpClass = CLASS3; 3338 cmd->ulpOwner = OWN_CHIP; 3339 cmd->ulpLe = 1; /* Limited Edition */ 3340 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 3341 cmdiocbq->vport = phba->pport; 3342 /* We want the firmware to timeout before we do */ 3343 cmd->ulpTimeout = MENLO_TIMEOUT - 5; 3344 cmdiocbq->context3 = bmp; 3345 cmdiocbq->context2 = rspiocbq; 3346 cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp; 3347 cmdiocbq->context1 = dd_data; 3348 cmdiocbq->context2 = rspiocbq; 3349 if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) { 3350 cmd->ulpCommand = CMD_GEN_REQUEST64_CR; 3351 cmd->ulpPU = MENLO_PU; /* 3 */ 3352 cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */ 3353 cmd->ulpContext = MENLO_CONTEXT; /* 0 */ 3354 } else { 3355 cmd->ulpCommand = CMD_GEN_REQUEST64_CX; 3356 cmd->ulpPU = 1; 3357 cmd->un.ulpWord[4] = 0; 3358 cmd->ulpContext = menlo_cmd->xri; 3359 } 3360 3361 dd_data->type = TYPE_MENLO; 3362 dd_data->context_un.menlo.cmdiocbq = cmdiocbq; 3363 dd_data->context_un.menlo.rspiocbq = rspiocbq; 3364 dd_data->context_un.menlo.set_job = job; 3365 dd_data->context_un.menlo.bmp = bmp; 3366 3367 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 3368 MENLO_TIMEOUT - 5); 3369 if (rc == IOCB_SUCCESS) 3370 return 0; /* done for now */ 3371 3372 /* iocb failed so cleanup */ 3373 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 3374 job->request_payload.sg_cnt, DMA_TO_DEVICE); 3375 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, 3376 job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 3377 3378 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 3379 3380 free_rspiocbq: 3381 lpfc_sli_release_iocbq(phba, rspiocbq); 3382 free_cmdiocbq: 3383 lpfc_sli_release_iocbq(phba, cmdiocbq); 3384 free_bmp: 3385 kfree(bmp); 3386 free_dd: 3387 kfree(dd_data); 3388 no_dd_data: 3389 /* make error code available to userspace */ 3390 job->reply->result = rc; 3391 job->dd_data = NULL; 3392 return rc; 3393 } 3394 /** 3395 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job 3396 * @job: fc_bsg_job to handle 3397 **/ 3398 static int 3399 lpfc_bsg_hst_vendor(struct fc_bsg_job *job) 3400 { 3401 int command = job->request->rqst_data.h_vendor.vendor_cmd[0]; 3402 int rc; 3403 3404 switch (command) { 3405 case LPFC_BSG_VENDOR_SET_CT_EVENT: 3406 rc = lpfc_bsg_hba_set_event(job); 3407 break; 3408 case LPFC_BSG_VENDOR_GET_CT_EVENT: 3409 rc = lpfc_bsg_hba_get_event(job); 3410 break; 3411 case LPFC_BSG_VENDOR_SEND_MGMT_RESP: 3412 rc = lpfc_bsg_send_mgmt_rsp(job); 3413 break; 3414 case LPFC_BSG_VENDOR_DIAG_MODE: 3415 rc = lpfc_bsg_diag_mode(job); 3416 break; 3417 case LPFC_BSG_VENDOR_DIAG_TEST: 3418 rc = lpfc_bsg_diag_test(job); 3419 break; 3420 case LPFC_BSG_VENDOR_GET_MGMT_REV: 3421 rc = lpfc_bsg_get_dfc_rev(job); 3422 break; 3423 case LPFC_BSG_VENDOR_MBOX: 3424 rc = lpfc_bsg_mbox_cmd(job); 3425 break; 3426 case LPFC_BSG_VENDOR_MENLO_CMD: 3427 case LPFC_BSG_VENDOR_MENLO_DATA: 3428 rc = lpfc_menlo_cmd(job); 3429 break; 3430 default: 3431 rc = -EINVAL; 3432 job->reply->reply_payload_rcv_len = 0; 3433 /* make error code available to userspace */ 3434 job->reply->result = rc; 3435 break; 3436 } 3437 3438 return rc; 3439 } 3440 3441 /** 3442 * lpfc_bsg_request - handle a bsg request from the FC transport 3443 * @job: fc_bsg_job to handle 3444 **/ 3445 int 3446 lpfc_bsg_request(struct fc_bsg_job *job) 3447 { 3448 uint32_t msgcode; 3449 int rc; 3450 3451 msgcode = job->request->msgcode; 3452 switch (msgcode) { 3453 case FC_BSG_HST_VENDOR: 3454 rc = lpfc_bsg_hst_vendor(job); 3455 break; 3456 case FC_BSG_RPT_ELS: 3457 rc = lpfc_bsg_rport_els(job); 3458 break; 3459 case FC_BSG_RPT_CT: 3460 rc = lpfc_bsg_send_mgmt_cmd(job); 3461 break; 3462 default: 3463 rc = -EINVAL; 3464 job->reply->reply_payload_rcv_len = 0; 3465 /* make error code available to userspace */ 3466 job->reply->result = rc; 3467 break; 3468 } 3469 3470 return rc; 3471 } 3472 3473 /** 3474 * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport 3475 * @job: fc_bsg_job that has timed out 3476 * 3477 * This function just aborts the job's IOCB. The aborted IOCB will return to 3478 * the waiting function which will handle passing the error back to userspace 3479 **/ 3480 int 3481 lpfc_bsg_timeout(struct fc_bsg_job *job) 3482 { 3483 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 3484 struct lpfc_hba *phba = vport->phba; 3485 struct lpfc_iocbq *cmdiocb; 3486 struct lpfc_bsg_event *evt; 3487 struct lpfc_bsg_iocb *iocb; 3488 struct lpfc_bsg_mbox *mbox; 3489 struct lpfc_bsg_menlo *menlo; 3490 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 3491 struct bsg_job_data *dd_data; 3492 unsigned long flags; 3493 3494 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3495 dd_data = (struct bsg_job_data *)job->dd_data; 3496 /* timeout and completion crossed paths if no dd_data */ 3497 if (!dd_data) { 3498 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3499 return 0; 3500 } 3501 3502 switch (dd_data->type) { 3503 case TYPE_IOCB: 3504 iocb = &dd_data->context_un.iocb; 3505 cmdiocb = iocb->cmdiocbq; 3506 /* hint to completion handler that the job timed out */ 3507 job->reply->result = -EAGAIN; 3508 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3509 /* this will call our completion handler */ 3510 spin_lock_irq(&phba->hbalock); 3511 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); 3512 spin_unlock_irq(&phba->hbalock); 3513 break; 3514 case TYPE_EVT: 3515 evt = dd_data->context_un.evt; 3516 /* this event has no job anymore */ 3517 evt->set_job = NULL; 3518 job->dd_data = NULL; 3519 job->reply->reply_payload_rcv_len = 0; 3520 /* Return -EAGAIN which is our way of signallying the 3521 * app to retry. 3522 */ 3523 job->reply->result = -EAGAIN; 3524 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3525 job->job_done(job); 3526 break; 3527 case TYPE_MBOX: 3528 mbox = &dd_data->context_un.mbox; 3529 /* this mbox has no job anymore */ 3530 mbox->set_job = NULL; 3531 job->dd_data = NULL; 3532 job->reply->reply_payload_rcv_len = 0; 3533 job->reply->result = -EAGAIN; 3534 /* the mbox completion handler can now be run */ 3535 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3536 job->job_done(job); 3537 break; 3538 case TYPE_MENLO: 3539 menlo = &dd_data->context_un.menlo; 3540 cmdiocb = menlo->cmdiocbq; 3541 /* hint to completion handler that the job timed out */ 3542 job->reply->result = -EAGAIN; 3543 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3544 /* this will call our completion handler */ 3545 spin_lock_irq(&phba->hbalock); 3546 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); 3547 spin_unlock_irq(&phba->hbalock); 3548 break; 3549 default: 3550 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3551 break; 3552 } 3553 3554 /* scsi transport fc fc_bsg_job_timeout expects a zero return code, 3555 * otherwise an error message will be displayed on the console 3556 * so always return success (zero) 3557 */ 3558 return 0; 3559 } 3560