1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2009-2011 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * * 8 * This program is free software; you can redistribute it and/or * 9 * modify it under the terms of version 2 of the GNU General * 10 * Public License as published by the Free Software Foundation. * 11 * This program is distributed in the hope that it will be useful. * 12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 17 * more details, a copy of which can be found in the file COPYING * 18 * included with this package. * 19 *******************************************************************/ 20 21 #include <linux/interrupt.h> 22 #include <linux/mempool.h> 23 #include <linux/pci.h> 24 #include <linux/slab.h> 25 #include <linux/delay.h> 26 #include <linux/list.h> 27 28 #include <scsi/scsi.h> 29 #include <scsi/scsi_host.h> 30 #include <scsi/scsi_transport_fc.h> 31 #include <scsi/scsi_bsg_fc.h> 32 #include <scsi/fc/fc_fs.h> 33 34 #include "lpfc_hw4.h" 35 #include "lpfc_hw.h" 36 #include "lpfc_sli.h" 37 #include "lpfc_sli4.h" 38 #include "lpfc_nl.h" 39 #include "lpfc_bsg.h" 40 #include "lpfc_disc.h" 41 #include "lpfc_scsi.h" 42 #include "lpfc.h" 43 #include "lpfc_logmsg.h" 44 #include "lpfc_crtn.h" 45 #include "lpfc_debugfs.h" 46 #include "lpfc_vport.h" 47 #include "lpfc_version.h" 48 49 struct lpfc_bsg_event { 50 struct list_head node; 51 struct kref kref; 52 wait_queue_head_t wq; 53 54 /* Event type and waiter identifiers */ 55 uint32_t type_mask; 56 uint32_t req_id; 57 uint32_t reg_id; 58 59 /* next two flags are here for the auto-delete logic */ 60 unsigned long wait_time_stamp; 61 int waiting; 62 63 /* seen and not seen events */ 64 struct list_head events_to_get; 65 struct list_head events_to_see; 66 67 /* job waiting for this event to finish */ 68 struct fc_bsg_job *set_job; 69 }; 70 71 struct lpfc_bsg_iocb { 72 struct lpfc_iocbq *cmdiocbq; 73 struct lpfc_iocbq *rspiocbq; 74 struct lpfc_dmabuf *bmp; 75 struct lpfc_nodelist *ndlp; 76 77 /* job waiting for this iocb to finish */ 78 struct fc_bsg_job *set_job; 79 }; 80 81 struct lpfc_bsg_mbox { 82 LPFC_MBOXQ_t *pmboxq; 83 MAILBOX_t *mb; 84 struct lpfc_dmabuf *dmabuffers; /* for BIU diags */ 85 uint8_t *ext; /* extended mailbox data */ 86 uint32_t mbOffset; /* from app */ 87 uint32_t inExtWLen; /* from app */ 88 uint32_t outExtWLen; /* from app */ 89 90 /* job waiting for this mbox command to finish */ 91 struct fc_bsg_job *set_job; 92 }; 93 94 #define MENLO_DID 0x0000FC0E 95 96 struct lpfc_bsg_menlo { 97 struct lpfc_iocbq *cmdiocbq; 98 struct lpfc_iocbq *rspiocbq; 99 struct lpfc_dmabuf *bmp; 100 101 /* job waiting for this iocb to finish */ 102 struct fc_bsg_job *set_job; 103 }; 104 105 #define TYPE_EVT 1 106 #define TYPE_IOCB 2 107 #define TYPE_MBOX 3 108 #define TYPE_MENLO 4 109 struct bsg_job_data { 110 uint32_t type; 111 union { 112 struct lpfc_bsg_event *evt; 113 struct lpfc_bsg_iocb iocb; 114 struct lpfc_bsg_mbox mbox; 115 struct lpfc_bsg_menlo menlo; 116 } context_un; 117 }; 118 119 struct event_data { 120 struct list_head node; 121 uint32_t type; 122 uint32_t immed_dat; 123 void *data; 124 uint32_t len; 125 }; 126 127 #define BUF_SZ_4K 4096 128 #define SLI_CT_ELX_LOOPBACK 0x10 129 130 enum ELX_LOOPBACK_CMD { 131 ELX_LOOPBACK_XRI_SETUP, 132 ELX_LOOPBACK_DATA, 133 }; 134 135 #define ELX_LOOPBACK_HEADER_SZ \ 136 (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un) 137 138 struct lpfc_dmabufext { 139 struct lpfc_dmabuf dma; 140 uint32_t size; 141 uint32_t flag; 142 }; 143 144 /** 145 * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler 146 * @phba: Pointer to HBA context object. 147 * @cmdiocbq: Pointer to command iocb. 148 * @rspiocbq: Pointer to response iocb. 149 * 150 * This function is the completion handler for iocbs issued using 151 * lpfc_bsg_send_mgmt_cmd function. This function is called by the 152 * ring event handler function without any lock held. This function 153 * can be called from both worker thread context and interrupt 154 * context. This function also can be called from another thread which 155 * cleans up the SLI layer objects. 156 * This function copies the contents of the response iocb to the 157 * response iocb memory object provided by the caller of 158 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 159 * sleeps for the iocb completion. 160 **/ 161 static void 162 lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba, 163 struct lpfc_iocbq *cmdiocbq, 164 struct lpfc_iocbq *rspiocbq) 165 { 166 struct bsg_job_data *dd_data; 167 struct fc_bsg_job *job; 168 IOCB_t *rsp; 169 struct lpfc_dmabuf *bmp; 170 struct lpfc_nodelist *ndlp; 171 struct lpfc_bsg_iocb *iocb; 172 unsigned long flags; 173 int rc = 0; 174 175 spin_lock_irqsave(&phba->ct_ev_lock, flags); 176 dd_data = cmdiocbq->context2; 177 if (!dd_data) { 178 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 179 lpfc_sli_release_iocbq(phba, cmdiocbq); 180 return; 181 } 182 183 iocb = &dd_data->context_un.iocb; 184 job = iocb->set_job; 185 job->dd_data = NULL; /* so timeout handler does not reply */ 186 187 bmp = iocb->bmp; 188 rsp = &rspiocbq->iocb; 189 ndlp = cmdiocbq->context1; 190 191 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 192 job->request_payload.sg_cnt, DMA_TO_DEVICE); 193 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, 194 job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 195 196 if (rsp->ulpStatus) { 197 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 198 switch (rsp->un.ulpWord[4] & 0xff) { 199 case IOERR_SEQUENCE_TIMEOUT: 200 rc = -ETIMEDOUT; 201 break; 202 case IOERR_INVALID_RPI: 203 rc = -EFAULT; 204 break; 205 default: 206 rc = -EACCES; 207 break; 208 } 209 } else 210 rc = -EACCES; 211 } else 212 job->reply->reply_payload_rcv_len = 213 rsp->un.genreq64.bdl.bdeSize; 214 215 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 216 lpfc_sli_release_iocbq(phba, cmdiocbq); 217 lpfc_nlp_put(ndlp); 218 kfree(bmp); 219 kfree(dd_data); 220 /* make error code available to userspace */ 221 job->reply->result = rc; 222 /* complete the job back to userspace */ 223 job->job_done(job); 224 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 225 return; 226 } 227 228 /** 229 * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request 230 * @job: fc_bsg_job to handle 231 **/ 232 static int 233 lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job) 234 { 235 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 236 struct lpfc_hba *phba = vport->phba; 237 struct lpfc_rport_data *rdata = job->rport->dd_data; 238 struct lpfc_nodelist *ndlp = rdata->pnode; 239 struct ulp_bde64 *bpl = NULL; 240 uint32_t timeout; 241 struct lpfc_iocbq *cmdiocbq = NULL; 242 IOCB_t *cmd; 243 struct lpfc_dmabuf *bmp = NULL; 244 int request_nseg; 245 int reply_nseg; 246 struct scatterlist *sgel = NULL; 247 int numbde; 248 dma_addr_t busaddr; 249 struct bsg_job_data *dd_data; 250 uint32_t creg_val; 251 int rc = 0; 252 int iocb_stat; 253 254 /* in case no data is transferred */ 255 job->reply->reply_payload_rcv_len = 0; 256 257 /* allocate our bsg tracking structure */ 258 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 259 if (!dd_data) { 260 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 261 "2733 Failed allocation of dd_data\n"); 262 rc = -ENOMEM; 263 goto no_dd_data; 264 } 265 266 if (!lpfc_nlp_get(ndlp)) { 267 rc = -ENODEV; 268 goto no_ndlp; 269 } 270 271 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 272 if (!bmp) { 273 rc = -ENOMEM; 274 goto free_ndlp; 275 } 276 277 if (ndlp->nlp_flag & NLP_ELS_SND_MASK) { 278 rc = -ENODEV; 279 goto free_bmp; 280 } 281 282 cmdiocbq = lpfc_sli_get_iocbq(phba); 283 if (!cmdiocbq) { 284 rc = -ENOMEM; 285 goto free_bmp; 286 } 287 288 cmd = &cmdiocbq->iocb; 289 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); 290 if (!bmp->virt) { 291 rc = -ENOMEM; 292 goto free_cmdiocbq; 293 } 294 295 INIT_LIST_HEAD(&bmp->list); 296 bpl = (struct ulp_bde64 *) bmp->virt; 297 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, 298 job->request_payload.sg_cnt, DMA_TO_DEVICE); 299 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { 300 busaddr = sg_dma_address(sgel); 301 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 302 bpl->tus.f.bdeSize = sg_dma_len(sgel); 303 bpl->tus.w = cpu_to_le32(bpl->tus.w); 304 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); 305 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); 306 bpl++; 307 } 308 309 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list, 310 job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 311 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) { 312 busaddr = sg_dma_address(sgel); 313 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 314 bpl->tus.f.bdeSize = sg_dma_len(sgel); 315 bpl->tus.w = cpu_to_le32(bpl->tus.w); 316 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); 317 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); 318 bpl++; 319 } 320 321 cmd->un.genreq64.bdl.ulpIoTag32 = 0; 322 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); 323 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys); 324 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 325 cmd->un.genreq64.bdl.bdeSize = 326 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64); 327 cmd->ulpCommand = CMD_GEN_REQUEST64_CR; 328 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); 329 cmd->un.genreq64.w5.hcsw.Dfctl = 0; 330 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL; 331 cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT; 332 cmd->ulpBdeCount = 1; 333 cmd->ulpLe = 1; 334 cmd->ulpClass = CLASS3; 335 cmd->ulpContext = ndlp->nlp_rpi; 336 if (phba->sli_rev == LPFC_SLI_REV4) 337 cmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 338 cmd->ulpOwner = OWN_CHIP; 339 cmdiocbq->vport = phba->pport; 340 cmdiocbq->context3 = bmp; 341 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 342 timeout = phba->fc_ratov * 2; 343 cmd->ulpTimeout = timeout; 344 345 cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp; 346 cmdiocbq->context1 = ndlp; 347 cmdiocbq->context2 = dd_data; 348 dd_data->type = TYPE_IOCB; 349 dd_data->context_un.iocb.cmdiocbq = cmdiocbq; 350 dd_data->context_un.iocb.set_job = job; 351 dd_data->context_un.iocb.bmp = bmp; 352 353 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 354 if (lpfc_readl(phba->HCregaddr, &creg_val)) { 355 rc = -EIO ; 356 goto free_cmdiocbq; 357 } 358 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 359 writel(creg_val, phba->HCregaddr); 360 readl(phba->HCregaddr); /* flush */ 361 } 362 363 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); 364 if (iocb_stat == IOCB_SUCCESS) 365 return 0; /* done for now */ 366 else if (iocb_stat == IOCB_BUSY) 367 rc = -EAGAIN; 368 else 369 rc = -EIO; 370 371 372 /* iocb failed so cleanup */ 373 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 374 job->request_payload.sg_cnt, DMA_TO_DEVICE); 375 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, 376 job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 377 378 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 379 380 free_cmdiocbq: 381 lpfc_sli_release_iocbq(phba, cmdiocbq); 382 free_bmp: 383 kfree(bmp); 384 free_ndlp: 385 lpfc_nlp_put(ndlp); 386 no_ndlp: 387 kfree(dd_data); 388 no_dd_data: 389 /* make error code available to userspace */ 390 job->reply->result = rc; 391 job->dd_data = NULL; 392 return rc; 393 } 394 395 /** 396 * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler 397 * @phba: Pointer to HBA context object. 398 * @cmdiocbq: Pointer to command iocb. 399 * @rspiocbq: Pointer to response iocb. 400 * 401 * This function is the completion handler for iocbs issued using 402 * lpfc_bsg_rport_els_cmp function. This function is called by the 403 * ring event handler function without any lock held. This function 404 * can be called from both worker thread context and interrupt 405 * context. This function also can be called from other thread which 406 * cleans up the SLI layer objects. 407 * This function copies the contents of the response iocb to the 408 * response iocb memory object provided by the caller of 409 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 410 * sleeps for the iocb completion. 411 **/ 412 static void 413 lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba, 414 struct lpfc_iocbq *cmdiocbq, 415 struct lpfc_iocbq *rspiocbq) 416 { 417 struct bsg_job_data *dd_data; 418 struct fc_bsg_job *job; 419 IOCB_t *rsp; 420 struct lpfc_nodelist *ndlp; 421 struct lpfc_dmabuf *pbuflist = NULL; 422 struct fc_bsg_ctels_reply *els_reply; 423 uint8_t *rjt_data; 424 unsigned long flags; 425 int rc = 0; 426 427 spin_lock_irqsave(&phba->ct_ev_lock, flags); 428 dd_data = cmdiocbq->context1; 429 /* normal completion and timeout crossed paths, already done */ 430 if (!dd_data) { 431 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 432 return; 433 } 434 435 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 436 if (cmdiocbq->context2 && rspiocbq) 437 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 438 &rspiocbq->iocb, sizeof(IOCB_t)); 439 440 job = dd_data->context_un.iocb.set_job; 441 cmdiocbq = dd_data->context_un.iocb.cmdiocbq; 442 rspiocbq = dd_data->context_un.iocb.rspiocbq; 443 rsp = &rspiocbq->iocb; 444 ndlp = dd_data->context_un.iocb.ndlp; 445 446 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 447 job->request_payload.sg_cnt, DMA_TO_DEVICE); 448 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, 449 job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 450 451 if (job->reply->result == -EAGAIN) 452 rc = -EAGAIN; 453 else if (rsp->ulpStatus == IOSTAT_SUCCESS) 454 job->reply->reply_payload_rcv_len = 455 rsp->un.elsreq64.bdl.bdeSize; 456 else if (rsp->ulpStatus == IOSTAT_LS_RJT) { 457 job->reply->reply_payload_rcv_len = 458 sizeof(struct fc_bsg_ctels_reply); 459 /* LS_RJT data returned in word 4 */ 460 rjt_data = (uint8_t *)&rsp->un.ulpWord[4]; 461 els_reply = &job->reply->reply_data.ctels_reply; 462 els_reply->status = FC_CTELS_STATUS_REJECT; 463 els_reply->rjt_data.action = rjt_data[3]; 464 els_reply->rjt_data.reason_code = rjt_data[2]; 465 els_reply->rjt_data.reason_explanation = rjt_data[1]; 466 els_reply->rjt_data.vendor_unique = rjt_data[0]; 467 } else 468 rc = -EIO; 469 470 pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3; 471 lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys); 472 lpfc_sli_release_iocbq(phba, rspiocbq); 473 lpfc_sli_release_iocbq(phba, cmdiocbq); 474 lpfc_nlp_put(ndlp); 475 kfree(dd_data); 476 /* make error code available to userspace */ 477 job->reply->result = rc; 478 job->dd_data = NULL; 479 /* complete the job back to userspace */ 480 job->job_done(job); 481 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 482 return; 483 } 484 485 /** 486 * lpfc_bsg_rport_els - send an ELS command from a bsg request 487 * @job: fc_bsg_job to handle 488 **/ 489 static int 490 lpfc_bsg_rport_els(struct fc_bsg_job *job) 491 { 492 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 493 struct lpfc_hba *phba = vport->phba; 494 struct lpfc_rport_data *rdata = job->rport->dd_data; 495 struct lpfc_nodelist *ndlp = rdata->pnode; 496 uint32_t elscmd; 497 uint32_t cmdsize; 498 uint32_t rspsize; 499 struct lpfc_iocbq *rspiocbq; 500 struct lpfc_iocbq *cmdiocbq; 501 IOCB_t *rsp; 502 uint16_t rpi = 0; 503 struct lpfc_dmabuf *pcmd; 504 struct lpfc_dmabuf *prsp; 505 struct lpfc_dmabuf *pbuflist = NULL; 506 struct ulp_bde64 *bpl; 507 int request_nseg; 508 int reply_nseg; 509 struct scatterlist *sgel = NULL; 510 int numbde; 511 dma_addr_t busaddr; 512 struct bsg_job_data *dd_data; 513 uint32_t creg_val; 514 int rc = 0; 515 516 /* in case no data is transferred */ 517 job->reply->reply_payload_rcv_len = 0; 518 519 /* allocate our bsg tracking structure */ 520 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 521 if (!dd_data) { 522 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 523 "2735 Failed allocation of dd_data\n"); 524 rc = -ENOMEM; 525 goto no_dd_data; 526 } 527 528 if (!lpfc_nlp_get(ndlp)) { 529 rc = -ENODEV; 530 goto free_dd_data; 531 } 532 533 elscmd = job->request->rqst_data.r_els.els_code; 534 cmdsize = job->request_payload.payload_len; 535 rspsize = job->reply_payload.payload_len; 536 rspiocbq = lpfc_sli_get_iocbq(phba); 537 if (!rspiocbq) { 538 lpfc_nlp_put(ndlp); 539 rc = -ENOMEM; 540 goto free_dd_data; 541 } 542 543 rsp = &rspiocbq->iocb; 544 rpi = ndlp->nlp_rpi; 545 546 cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, 547 ndlp->nlp_DID, elscmd); 548 if (!cmdiocbq) { 549 rc = -EIO; 550 goto free_rspiocbq; 551 } 552 553 /* prep els iocb set context1 to the ndlp, context2 to the command 554 * dmabuf, context3 holds the data dmabuf 555 */ 556 pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2; 557 prsp = (struct lpfc_dmabuf *) pcmd->list.next; 558 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 559 kfree(pcmd); 560 lpfc_mbuf_free(phba, prsp->virt, prsp->phys); 561 kfree(prsp); 562 cmdiocbq->context2 = NULL; 563 564 pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3; 565 bpl = (struct ulp_bde64 *) pbuflist->virt; 566 567 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, 568 job->request_payload.sg_cnt, DMA_TO_DEVICE); 569 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { 570 busaddr = sg_dma_address(sgel); 571 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 572 bpl->tus.f.bdeSize = sg_dma_len(sgel); 573 bpl->tus.w = cpu_to_le32(bpl->tus.w); 574 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); 575 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); 576 bpl++; 577 } 578 579 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list, 580 job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 581 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) { 582 busaddr = sg_dma_address(sgel); 583 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 584 bpl->tus.f.bdeSize = sg_dma_len(sgel); 585 bpl->tus.w = cpu_to_le32(bpl->tus.w); 586 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); 587 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); 588 bpl++; 589 } 590 cmdiocbq->iocb.un.elsreq64.bdl.bdeSize = 591 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64); 592 if (phba->sli_rev == LPFC_SLI_REV4) 593 cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi]; 594 else 595 cmdiocbq->iocb.ulpContext = rpi; 596 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 597 cmdiocbq->context1 = NULL; 598 cmdiocbq->context2 = NULL; 599 600 cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp; 601 cmdiocbq->context1 = dd_data; 602 cmdiocbq->context2 = rspiocbq; 603 dd_data->type = TYPE_IOCB; 604 dd_data->context_un.iocb.cmdiocbq = cmdiocbq; 605 dd_data->context_un.iocb.rspiocbq = rspiocbq; 606 dd_data->context_un.iocb.set_job = job; 607 dd_data->context_un.iocb.bmp = NULL; 608 dd_data->context_un.iocb.ndlp = ndlp; 609 610 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 611 if (lpfc_readl(phba->HCregaddr, &creg_val)) { 612 rc = -EIO; 613 goto linkdown_err; 614 } 615 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 616 writel(creg_val, phba->HCregaddr); 617 readl(phba->HCregaddr); /* flush */ 618 } 619 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); 620 lpfc_nlp_put(ndlp); 621 if (rc == IOCB_SUCCESS) 622 return 0; /* done for now */ 623 else if (rc == IOCB_BUSY) 624 rc = -EAGAIN; 625 else 626 rc = -EIO; 627 628 linkdown_err: 629 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 630 job->request_payload.sg_cnt, DMA_TO_DEVICE); 631 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, 632 job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 633 634 lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys); 635 636 lpfc_sli_release_iocbq(phba, cmdiocbq); 637 638 free_rspiocbq: 639 lpfc_sli_release_iocbq(phba, rspiocbq); 640 641 free_dd_data: 642 kfree(dd_data); 643 644 no_dd_data: 645 /* make error code available to userspace */ 646 job->reply->result = rc; 647 job->dd_data = NULL; 648 return rc; 649 } 650 651 /** 652 * lpfc_bsg_event_free - frees an allocated event structure 653 * @kref: Pointer to a kref. 654 * 655 * Called from kref_put. Back cast the kref into an event structure address. 656 * Free any events to get, delete associated nodes, free any events to see, 657 * free any data then free the event itself. 658 **/ 659 static void 660 lpfc_bsg_event_free(struct kref *kref) 661 { 662 struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event, 663 kref); 664 struct event_data *ed; 665 666 list_del(&evt->node); 667 668 while (!list_empty(&evt->events_to_get)) { 669 ed = list_entry(evt->events_to_get.next, typeof(*ed), node); 670 list_del(&ed->node); 671 kfree(ed->data); 672 kfree(ed); 673 } 674 675 while (!list_empty(&evt->events_to_see)) { 676 ed = list_entry(evt->events_to_see.next, typeof(*ed), node); 677 list_del(&ed->node); 678 kfree(ed->data); 679 kfree(ed); 680 } 681 682 kfree(evt); 683 } 684 685 /** 686 * lpfc_bsg_event_ref - increments the kref for an event 687 * @evt: Pointer to an event structure. 688 **/ 689 static inline void 690 lpfc_bsg_event_ref(struct lpfc_bsg_event *evt) 691 { 692 kref_get(&evt->kref); 693 } 694 695 /** 696 * lpfc_bsg_event_unref - Uses kref_put to free an event structure 697 * @evt: Pointer to an event structure. 698 **/ 699 static inline void 700 lpfc_bsg_event_unref(struct lpfc_bsg_event *evt) 701 { 702 kref_put(&evt->kref, lpfc_bsg_event_free); 703 } 704 705 /** 706 * lpfc_bsg_event_new - allocate and initialize a event structure 707 * @ev_mask: Mask of events. 708 * @ev_reg_id: Event reg id. 709 * @ev_req_id: Event request id. 710 **/ 711 static struct lpfc_bsg_event * 712 lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id) 713 { 714 struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL); 715 716 if (!evt) 717 return NULL; 718 719 INIT_LIST_HEAD(&evt->events_to_get); 720 INIT_LIST_HEAD(&evt->events_to_see); 721 evt->type_mask = ev_mask; 722 evt->req_id = ev_req_id; 723 evt->reg_id = ev_reg_id; 724 evt->wait_time_stamp = jiffies; 725 init_waitqueue_head(&evt->wq); 726 kref_init(&evt->kref); 727 return evt; 728 } 729 730 /** 731 * diag_cmd_data_free - Frees an lpfc dma buffer extension 732 * @phba: Pointer to HBA context object. 733 * @mlist: Pointer to an lpfc dma buffer extension. 734 **/ 735 static int 736 diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist) 737 { 738 struct lpfc_dmabufext *mlast; 739 struct pci_dev *pcidev; 740 struct list_head head, *curr, *next; 741 742 if ((!mlist) || (!lpfc_is_link_up(phba) && 743 (phba->link_flag & LS_LOOPBACK_MODE))) { 744 return 0; 745 } 746 747 pcidev = phba->pcidev; 748 list_add_tail(&head, &mlist->dma.list); 749 750 list_for_each_safe(curr, next, &head) { 751 mlast = list_entry(curr, struct lpfc_dmabufext , dma.list); 752 if (mlast->dma.virt) 753 dma_free_coherent(&pcidev->dev, 754 mlast->size, 755 mlast->dma.virt, 756 mlast->dma.phys); 757 kfree(mlast); 758 } 759 return 0; 760 } 761 762 /** 763 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command 764 * @phba: 765 * @pring: 766 * @piocbq: 767 * 768 * This function is called when an unsolicited CT command is received. It 769 * forwards the event to any processes registered to receive CT events. 770 **/ 771 int 772 lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 773 struct lpfc_iocbq *piocbq) 774 { 775 uint32_t evt_req_id = 0; 776 uint32_t cmd; 777 uint32_t len; 778 struct lpfc_dmabuf *dmabuf = NULL; 779 struct lpfc_bsg_event *evt; 780 struct event_data *evt_dat = NULL; 781 struct lpfc_iocbq *iocbq; 782 size_t offset = 0; 783 struct list_head head; 784 struct ulp_bde64 *bde; 785 dma_addr_t dma_addr; 786 int i; 787 struct lpfc_dmabuf *bdeBuf1 = piocbq->context2; 788 struct lpfc_dmabuf *bdeBuf2 = piocbq->context3; 789 struct lpfc_hbq_entry *hbqe; 790 struct lpfc_sli_ct_request *ct_req; 791 struct fc_bsg_job *job = NULL; 792 unsigned long flags; 793 int size = 0; 794 795 INIT_LIST_HEAD(&head); 796 list_add_tail(&head, &piocbq->list); 797 798 if (piocbq->iocb.ulpBdeCount == 0 || 799 piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0) 800 goto error_ct_unsol_exit; 801 802 if (phba->link_state == LPFC_HBA_ERROR || 803 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) 804 goto error_ct_unsol_exit; 805 806 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 807 dmabuf = bdeBuf1; 808 else { 809 dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh, 810 piocbq->iocb.un.cont64[0].addrLow); 811 dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr); 812 } 813 if (dmabuf == NULL) 814 goto error_ct_unsol_exit; 815 ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt; 816 evt_req_id = ct_req->FsType; 817 cmd = ct_req->CommandResponse.bits.CmdRsp; 818 len = ct_req->CommandResponse.bits.Size; 819 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 820 lpfc_sli_ringpostbuf_put(phba, pring, dmabuf); 821 822 spin_lock_irqsave(&phba->ct_ev_lock, flags); 823 list_for_each_entry(evt, &phba->ct_ev_waiters, node) { 824 if (!(evt->type_mask & FC_REG_CT_EVENT) || 825 evt->req_id != evt_req_id) 826 continue; 827 828 lpfc_bsg_event_ref(evt); 829 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 830 evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL); 831 if (evt_dat == NULL) { 832 spin_lock_irqsave(&phba->ct_ev_lock, flags); 833 lpfc_bsg_event_unref(evt); 834 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 835 "2614 Memory allocation failed for " 836 "CT event\n"); 837 break; 838 } 839 840 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 841 /* take accumulated byte count from the last iocbq */ 842 iocbq = list_entry(head.prev, typeof(*iocbq), list); 843 evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len; 844 } else { 845 list_for_each_entry(iocbq, &head, list) { 846 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) 847 evt_dat->len += 848 iocbq->iocb.un.cont64[i].tus.f.bdeSize; 849 } 850 } 851 852 evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL); 853 if (evt_dat->data == NULL) { 854 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 855 "2615 Memory allocation failed for " 856 "CT event data, size %d\n", 857 evt_dat->len); 858 kfree(evt_dat); 859 spin_lock_irqsave(&phba->ct_ev_lock, flags); 860 lpfc_bsg_event_unref(evt); 861 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 862 goto error_ct_unsol_exit; 863 } 864 865 list_for_each_entry(iocbq, &head, list) { 866 size = 0; 867 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 868 bdeBuf1 = iocbq->context2; 869 bdeBuf2 = iocbq->context3; 870 } 871 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) { 872 if (phba->sli3_options & 873 LPFC_SLI3_HBQ_ENABLED) { 874 if (i == 0) { 875 hbqe = (struct lpfc_hbq_entry *) 876 &iocbq->iocb.un.ulpWord[0]; 877 size = hbqe->bde.tus.f.bdeSize; 878 dmabuf = bdeBuf1; 879 } else if (i == 1) { 880 hbqe = (struct lpfc_hbq_entry *) 881 &iocbq->iocb.unsli3. 882 sli3Words[4]; 883 size = hbqe->bde.tus.f.bdeSize; 884 dmabuf = bdeBuf2; 885 } 886 if ((offset + size) > evt_dat->len) 887 size = evt_dat->len - offset; 888 } else { 889 size = iocbq->iocb.un.cont64[i]. 890 tus.f.bdeSize; 891 bde = &iocbq->iocb.un.cont64[i]; 892 dma_addr = getPaddr(bde->addrHigh, 893 bde->addrLow); 894 dmabuf = lpfc_sli_ringpostbuf_get(phba, 895 pring, dma_addr); 896 } 897 if (!dmabuf) { 898 lpfc_printf_log(phba, KERN_ERR, 899 LOG_LIBDFC, "2616 No dmabuf " 900 "found for iocbq 0x%p\n", 901 iocbq); 902 kfree(evt_dat->data); 903 kfree(evt_dat); 904 spin_lock_irqsave(&phba->ct_ev_lock, 905 flags); 906 lpfc_bsg_event_unref(evt); 907 spin_unlock_irqrestore( 908 &phba->ct_ev_lock, flags); 909 goto error_ct_unsol_exit; 910 } 911 memcpy((char *)(evt_dat->data) + offset, 912 dmabuf->virt, size); 913 offset += size; 914 if (evt_req_id != SLI_CT_ELX_LOOPBACK && 915 !(phba->sli3_options & 916 LPFC_SLI3_HBQ_ENABLED)) { 917 lpfc_sli_ringpostbuf_put(phba, pring, 918 dmabuf); 919 } else { 920 switch (cmd) { 921 case ELX_LOOPBACK_DATA: 922 if (phba->sli_rev < 923 LPFC_SLI_REV4) 924 diag_cmd_data_free(phba, 925 (struct lpfc_dmabufext 926 *)dmabuf); 927 break; 928 case ELX_LOOPBACK_XRI_SETUP: 929 if ((phba->sli_rev == 930 LPFC_SLI_REV2) || 931 (phba->sli3_options & 932 LPFC_SLI3_HBQ_ENABLED 933 )) { 934 lpfc_in_buf_free(phba, 935 dmabuf); 936 } else { 937 lpfc_post_buffer(phba, 938 pring, 939 1); 940 } 941 break; 942 default: 943 if (!(phba->sli3_options & 944 LPFC_SLI3_HBQ_ENABLED)) 945 lpfc_post_buffer(phba, 946 pring, 947 1); 948 break; 949 } 950 } 951 } 952 } 953 954 spin_lock_irqsave(&phba->ct_ev_lock, flags); 955 if (phba->sli_rev == LPFC_SLI_REV4) { 956 evt_dat->immed_dat = phba->ctx_idx; 957 phba->ctx_idx = (phba->ctx_idx + 1) % 64; 958 /* Provide warning for over-run of the ct_ctx array */ 959 if (phba->ct_ctx[evt_dat->immed_dat].flags & 960 UNSOL_VALID) 961 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 962 "2717 CT context array entry " 963 "[%d] over-run: oxid:x%x, " 964 "sid:x%x\n", phba->ctx_idx, 965 phba->ct_ctx[ 966 evt_dat->immed_dat].oxid, 967 phba->ct_ctx[ 968 evt_dat->immed_dat].SID); 969 phba->ct_ctx[evt_dat->immed_dat].rxid = 970 piocbq->iocb.ulpContext; 971 phba->ct_ctx[evt_dat->immed_dat].oxid = 972 piocbq->iocb.unsli3.rcvsli3.ox_id; 973 phba->ct_ctx[evt_dat->immed_dat].SID = 974 piocbq->iocb.un.rcvels.remoteID; 975 phba->ct_ctx[evt_dat->immed_dat].flags = UNSOL_VALID; 976 } else 977 evt_dat->immed_dat = piocbq->iocb.ulpContext; 978 979 evt_dat->type = FC_REG_CT_EVENT; 980 list_add(&evt_dat->node, &evt->events_to_see); 981 if (evt_req_id == SLI_CT_ELX_LOOPBACK) { 982 wake_up_interruptible(&evt->wq); 983 lpfc_bsg_event_unref(evt); 984 break; 985 } 986 987 list_move(evt->events_to_see.prev, &evt->events_to_get); 988 lpfc_bsg_event_unref(evt); 989 990 job = evt->set_job; 991 evt->set_job = NULL; 992 if (job) { 993 job->reply->reply_payload_rcv_len = size; 994 /* make error code available to userspace */ 995 job->reply->result = 0; 996 job->dd_data = NULL; 997 /* complete the job back to userspace */ 998 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 999 job->job_done(job); 1000 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1001 } 1002 } 1003 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1004 1005 error_ct_unsol_exit: 1006 if (!list_empty(&head)) 1007 list_del(&head); 1008 if ((phba->sli_rev < LPFC_SLI_REV4) && 1009 (evt_req_id == SLI_CT_ELX_LOOPBACK)) 1010 return 0; 1011 return 1; 1012 } 1013 1014 /** 1015 * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command 1016 * @job: SET_EVENT fc_bsg_job 1017 **/ 1018 static int 1019 lpfc_bsg_hba_set_event(struct fc_bsg_job *job) 1020 { 1021 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 1022 struct lpfc_hba *phba = vport->phba; 1023 struct set_ct_event *event_req; 1024 struct lpfc_bsg_event *evt; 1025 int rc = 0; 1026 struct bsg_job_data *dd_data = NULL; 1027 uint32_t ev_mask; 1028 unsigned long flags; 1029 1030 if (job->request_len < 1031 sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) { 1032 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1033 "2612 Received SET_CT_EVENT below minimum " 1034 "size\n"); 1035 rc = -EINVAL; 1036 goto job_error; 1037 } 1038 1039 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 1040 if (dd_data == NULL) { 1041 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1042 "2734 Failed allocation of dd_data\n"); 1043 rc = -ENOMEM; 1044 goto job_error; 1045 } 1046 1047 event_req = (struct set_ct_event *) 1048 job->request->rqst_data.h_vendor.vendor_cmd; 1049 ev_mask = ((uint32_t)(unsigned long)event_req->type_mask & 1050 FC_REG_EVENT_MASK); 1051 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1052 list_for_each_entry(evt, &phba->ct_ev_waiters, node) { 1053 if (evt->reg_id == event_req->ev_reg_id) { 1054 lpfc_bsg_event_ref(evt); 1055 evt->wait_time_stamp = jiffies; 1056 break; 1057 } 1058 } 1059 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1060 1061 if (&evt->node == &phba->ct_ev_waiters) { 1062 /* no event waiting struct yet - first call */ 1063 evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id, 1064 event_req->ev_req_id); 1065 if (!evt) { 1066 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1067 "2617 Failed allocation of event " 1068 "waiter\n"); 1069 rc = -ENOMEM; 1070 goto job_error; 1071 } 1072 1073 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1074 list_add(&evt->node, &phba->ct_ev_waiters); 1075 lpfc_bsg_event_ref(evt); 1076 evt->wait_time_stamp = jiffies; 1077 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1078 } 1079 1080 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1081 evt->waiting = 1; 1082 dd_data->type = TYPE_EVT; 1083 dd_data->context_un.evt = evt; 1084 evt->set_job = job; /* for unsolicited command */ 1085 job->dd_data = dd_data; /* for fc transport timeout callback*/ 1086 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1087 return 0; /* call job done later */ 1088 1089 job_error: 1090 if (dd_data != NULL) 1091 kfree(dd_data); 1092 1093 job->dd_data = NULL; 1094 return rc; 1095 } 1096 1097 /** 1098 * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command 1099 * @job: GET_EVENT fc_bsg_job 1100 **/ 1101 static int 1102 lpfc_bsg_hba_get_event(struct fc_bsg_job *job) 1103 { 1104 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 1105 struct lpfc_hba *phba = vport->phba; 1106 struct get_ct_event *event_req; 1107 struct get_ct_event_reply *event_reply; 1108 struct lpfc_bsg_event *evt; 1109 struct event_data *evt_dat = NULL; 1110 unsigned long flags; 1111 uint32_t rc = 0; 1112 1113 if (job->request_len < 1114 sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) { 1115 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1116 "2613 Received GET_CT_EVENT request below " 1117 "minimum size\n"); 1118 rc = -EINVAL; 1119 goto job_error; 1120 } 1121 1122 event_req = (struct get_ct_event *) 1123 job->request->rqst_data.h_vendor.vendor_cmd; 1124 1125 event_reply = (struct get_ct_event_reply *) 1126 job->reply->reply_data.vendor_reply.vendor_rsp; 1127 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1128 list_for_each_entry(evt, &phba->ct_ev_waiters, node) { 1129 if (evt->reg_id == event_req->ev_reg_id) { 1130 if (list_empty(&evt->events_to_get)) 1131 break; 1132 lpfc_bsg_event_ref(evt); 1133 evt->wait_time_stamp = jiffies; 1134 evt_dat = list_entry(evt->events_to_get.prev, 1135 struct event_data, node); 1136 list_del(&evt_dat->node); 1137 break; 1138 } 1139 } 1140 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1141 1142 /* The app may continue to ask for event data until it gets 1143 * an error indicating that there isn't anymore 1144 */ 1145 if (evt_dat == NULL) { 1146 job->reply->reply_payload_rcv_len = 0; 1147 rc = -ENOENT; 1148 goto job_error; 1149 } 1150 1151 if (evt_dat->len > job->request_payload.payload_len) { 1152 evt_dat->len = job->request_payload.payload_len; 1153 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1154 "2618 Truncated event data at %d " 1155 "bytes\n", 1156 job->request_payload.payload_len); 1157 } 1158 1159 event_reply->type = evt_dat->type; 1160 event_reply->immed_data = evt_dat->immed_dat; 1161 if (evt_dat->len > 0) 1162 job->reply->reply_payload_rcv_len = 1163 sg_copy_from_buffer(job->request_payload.sg_list, 1164 job->request_payload.sg_cnt, 1165 evt_dat->data, evt_dat->len); 1166 else 1167 job->reply->reply_payload_rcv_len = 0; 1168 1169 if (evt_dat) { 1170 kfree(evt_dat->data); 1171 kfree(evt_dat); 1172 } 1173 1174 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1175 lpfc_bsg_event_unref(evt); 1176 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1177 job->dd_data = NULL; 1178 job->reply->result = 0; 1179 job->job_done(job); 1180 return 0; 1181 1182 job_error: 1183 job->dd_data = NULL; 1184 job->reply->result = rc; 1185 return rc; 1186 } 1187 1188 /** 1189 * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler 1190 * @phba: Pointer to HBA context object. 1191 * @cmdiocbq: Pointer to command iocb. 1192 * @rspiocbq: Pointer to response iocb. 1193 * 1194 * This function is the completion handler for iocbs issued using 1195 * lpfc_issue_ct_rsp_cmp function. This function is called by the 1196 * ring event handler function without any lock held. This function 1197 * can be called from both worker thread context and interrupt 1198 * context. This function also can be called from other thread which 1199 * cleans up the SLI layer objects. 1200 * This function copy the contents of the response iocb to the 1201 * response iocb memory object provided by the caller of 1202 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 1203 * sleeps for the iocb completion. 1204 **/ 1205 static void 1206 lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba, 1207 struct lpfc_iocbq *cmdiocbq, 1208 struct lpfc_iocbq *rspiocbq) 1209 { 1210 struct bsg_job_data *dd_data; 1211 struct fc_bsg_job *job; 1212 IOCB_t *rsp; 1213 struct lpfc_dmabuf *bmp; 1214 struct lpfc_nodelist *ndlp; 1215 unsigned long flags; 1216 int rc = 0; 1217 1218 spin_lock_irqsave(&phba->ct_ev_lock, flags); 1219 dd_data = cmdiocbq->context2; 1220 /* normal completion and timeout crossed paths, already done */ 1221 if (!dd_data) { 1222 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1223 return; 1224 } 1225 1226 job = dd_data->context_un.iocb.set_job; 1227 bmp = dd_data->context_un.iocb.bmp; 1228 rsp = &rspiocbq->iocb; 1229 ndlp = dd_data->context_un.iocb.ndlp; 1230 1231 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 1232 job->request_payload.sg_cnt, DMA_TO_DEVICE); 1233 1234 if (rsp->ulpStatus) { 1235 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 1236 switch (rsp->un.ulpWord[4] & 0xff) { 1237 case IOERR_SEQUENCE_TIMEOUT: 1238 rc = -ETIMEDOUT; 1239 break; 1240 case IOERR_INVALID_RPI: 1241 rc = -EFAULT; 1242 break; 1243 default: 1244 rc = -EACCES; 1245 break; 1246 } 1247 } else 1248 rc = -EACCES; 1249 } else 1250 job->reply->reply_payload_rcv_len = 1251 rsp->un.genreq64.bdl.bdeSize; 1252 1253 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 1254 lpfc_sli_release_iocbq(phba, cmdiocbq); 1255 lpfc_nlp_put(ndlp); 1256 kfree(bmp); 1257 kfree(dd_data); 1258 /* make error code available to userspace */ 1259 job->reply->result = rc; 1260 job->dd_data = NULL; 1261 /* complete the job back to userspace */ 1262 job->job_done(job); 1263 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 1264 return; 1265 } 1266 1267 /** 1268 * lpfc_issue_ct_rsp - issue a ct response 1269 * @phba: Pointer to HBA context object. 1270 * @job: Pointer to the job object. 1271 * @tag: tag index value into the ports context exchange array. 1272 * @bmp: Pointer to a dma buffer descriptor. 1273 * @num_entry: Number of enties in the bde. 1274 **/ 1275 static int 1276 lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, 1277 struct lpfc_dmabuf *bmp, int num_entry) 1278 { 1279 IOCB_t *icmd; 1280 struct lpfc_iocbq *ctiocb = NULL; 1281 int rc = 0; 1282 struct lpfc_nodelist *ndlp = NULL; 1283 struct bsg_job_data *dd_data; 1284 uint32_t creg_val; 1285 1286 /* allocate our bsg tracking structure */ 1287 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 1288 if (!dd_data) { 1289 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1290 "2736 Failed allocation of dd_data\n"); 1291 rc = -ENOMEM; 1292 goto no_dd_data; 1293 } 1294 1295 /* Allocate buffer for command iocb */ 1296 ctiocb = lpfc_sli_get_iocbq(phba); 1297 if (!ctiocb) { 1298 rc = -ENOMEM; 1299 goto no_ctiocb; 1300 } 1301 1302 icmd = &ctiocb->iocb; 1303 icmd->un.xseq64.bdl.ulpIoTag32 = 0; 1304 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys); 1305 icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys); 1306 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 1307 icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64)); 1308 icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA); 1309 icmd->un.xseq64.w5.hcsw.Dfctl = 0; 1310 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL; 1311 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT; 1312 1313 /* Fill in rest of iocb */ 1314 icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX; 1315 icmd->ulpBdeCount = 1; 1316 icmd->ulpLe = 1; 1317 icmd->ulpClass = CLASS3; 1318 if (phba->sli_rev == LPFC_SLI_REV4) { 1319 /* Do not issue unsol response if oxid not marked as valid */ 1320 if (!(phba->ct_ctx[tag].flags & UNSOL_VALID)) { 1321 rc = IOCB_ERROR; 1322 goto issue_ct_rsp_exit; 1323 } 1324 icmd->ulpContext = phba->ct_ctx[tag].rxid; 1325 icmd->unsli3.rcvsli3.ox_id = phba->ct_ctx[tag].oxid; 1326 ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID); 1327 if (!ndlp) { 1328 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 1329 "2721 ndlp null for oxid %x SID %x\n", 1330 icmd->ulpContext, 1331 phba->ct_ctx[tag].SID); 1332 rc = IOCB_ERROR; 1333 goto issue_ct_rsp_exit; 1334 } 1335 1336 /* Check if the ndlp is active */ 1337 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 1338 rc = -IOCB_ERROR; 1339 goto issue_ct_rsp_exit; 1340 } 1341 1342 /* get a refernece count so the ndlp doesn't go away while 1343 * we respond 1344 */ 1345 if (!lpfc_nlp_get(ndlp)) { 1346 rc = -IOCB_ERROR; 1347 goto issue_ct_rsp_exit; 1348 } 1349 1350 icmd->un.ulpWord[3] = 1351 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 1352 1353 /* The exchange is done, mark the entry as invalid */ 1354 phba->ct_ctx[tag].flags &= ~UNSOL_VALID; 1355 } else 1356 icmd->ulpContext = (ushort) tag; 1357 1358 icmd->ulpTimeout = phba->fc_ratov * 2; 1359 1360 /* Xmit CT response on exchange <xid> */ 1361 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1362 "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n", 1363 icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state); 1364 1365 ctiocb->iocb_cmpl = NULL; 1366 ctiocb->iocb_flag |= LPFC_IO_LIBDFC; 1367 ctiocb->vport = phba->pport; 1368 ctiocb->context3 = bmp; 1369 1370 ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp; 1371 ctiocb->context2 = dd_data; 1372 ctiocb->context1 = ndlp; 1373 dd_data->type = TYPE_IOCB; 1374 dd_data->context_un.iocb.cmdiocbq = ctiocb; 1375 dd_data->context_un.iocb.rspiocbq = NULL; 1376 dd_data->context_un.iocb.set_job = job; 1377 dd_data->context_un.iocb.bmp = bmp; 1378 dd_data->context_un.iocb.ndlp = ndlp; 1379 1380 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 1381 if (lpfc_readl(phba->HCregaddr, &creg_val)) { 1382 rc = -IOCB_ERROR; 1383 goto issue_ct_rsp_exit; 1384 } 1385 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 1386 writel(creg_val, phba->HCregaddr); 1387 readl(phba->HCregaddr); /* flush */ 1388 } 1389 1390 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); 1391 1392 if (rc == IOCB_SUCCESS) 1393 return 0; /* done for now */ 1394 1395 issue_ct_rsp_exit: 1396 lpfc_sli_release_iocbq(phba, ctiocb); 1397 no_ctiocb: 1398 kfree(dd_data); 1399 no_dd_data: 1400 return rc; 1401 } 1402 1403 /** 1404 * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command 1405 * @job: SEND_MGMT_RESP fc_bsg_job 1406 **/ 1407 static int 1408 lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job) 1409 { 1410 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 1411 struct lpfc_hba *phba = vport->phba; 1412 struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *) 1413 job->request->rqst_data.h_vendor.vendor_cmd; 1414 struct ulp_bde64 *bpl; 1415 struct lpfc_dmabuf *bmp = NULL; 1416 struct scatterlist *sgel = NULL; 1417 int request_nseg; 1418 int numbde; 1419 dma_addr_t busaddr; 1420 uint32_t tag = mgmt_resp->tag; 1421 unsigned long reqbfrcnt = 1422 (unsigned long)job->request_payload.payload_len; 1423 int rc = 0; 1424 1425 /* in case no data is transferred */ 1426 job->reply->reply_payload_rcv_len = 0; 1427 1428 if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) { 1429 rc = -ERANGE; 1430 goto send_mgmt_rsp_exit; 1431 } 1432 1433 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1434 if (!bmp) { 1435 rc = -ENOMEM; 1436 goto send_mgmt_rsp_exit; 1437 } 1438 1439 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); 1440 if (!bmp->virt) { 1441 rc = -ENOMEM; 1442 goto send_mgmt_rsp_free_bmp; 1443 } 1444 1445 INIT_LIST_HEAD(&bmp->list); 1446 bpl = (struct ulp_bde64 *) bmp->virt; 1447 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, 1448 job->request_payload.sg_cnt, DMA_TO_DEVICE); 1449 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { 1450 busaddr = sg_dma_address(sgel); 1451 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1452 bpl->tus.f.bdeSize = sg_dma_len(sgel); 1453 bpl->tus.w = cpu_to_le32(bpl->tus.w); 1454 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); 1455 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); 1456 bpl++; 1457 } 1458 1459 rc = lpfc_issue_ct_rsp(phba, job, tag, bmp, request_nseg); 1460 1461 if (rc == IOCB_SUCCESS) 1462 return 0; /* done for now */ 1463 1464 /* TBD need to handle a timeout */ 1465 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 1466 job->request_payload.sg_cnt, DMA_TO_DEVICE); 1467 rc = -EACCES; 1468 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 1469 1470 send_mgmt_rsp_free_bmp: 1471 kfree(bmp); 1472 send_mgmt_rsp_exit: 1473 /* make error code available to userspace */ 1474 job->reply->result = rc; 1475 job->dd_data = NULL; 1476 return rc; 1477 } 1478 1479 /** 1480 * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode 1481 * @phba: Pointer to HBA context object. 1482 * 1483 * This function is responsible for preparing driver for diag loopback 1484 * on device. 1485 */ 1486 static int 1487 lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba) 1488 { 1489 struct lpfc_vport **vports; 1490 struct Scsi_Host *shost; 1491 struct lpfc_sli *psli; 1492 struct lpfc_sli_ring *pring; 1493 int i = 0; 1494 1495 psli = &phba->sli; 1496 if (!psli) 1497 return -ENODEV; 1498 1499 pring = &psli->ring[LPFC_FCP_RING]; 1500 if (!pring) 1501 return -ENODEV; 1502 1503 if ((phba->link_state == LPFC_HBA_ERROR) || 1504 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) || 1505 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) 1506 return -EACCES; 1507 1508 vports = lpfc_create_vport_work_array(phba); 1509 if (vports) { 1510 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 1511 shost = lpfc_shost_from_vport(vports[i]); 1512 scsi_block_requests(shost); 1513 } 1514 lpfc_destroy_vport_work_array(phba, vports); 1515 } else { 1516 shost = lpfc_shost_from_vport(phba->pport); 1517 scsi_block_requests(shost); 1518 } 1519 1520 while (pring->txcmplq_cnt) { 1521 if (i++ > 500) /* wait up to 5 seconds */ 1522 break; 1523 msleep(10); 1524 } 1525 return 0; 1526 } 1527 1528 /** 1529 * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode 1530 * @phba: Pointer to HBA context object. 1531 * 1532 * This function is responsible for driver exit processing of setting up 1533 * diag loopback mode on device. 1534 */ 1535 static void 1536 lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba) 1537 { 1538 struct Scsi_Host *shost; 1539 struct lpfc_vport **vports; 1540 int i; 1541 1542 vports = lpfc_create_vport_work_array(phba); 1543 if (vports) { 1544 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 1545 shost = lpfc_shost_from_vport(vports[i]); 1546 scsi_unblock_requests(shost); 1547 } 1548 lpfc_destroy_vport_work_array(phba, vports); 1549 } else { 1550 shost = lpfc_shost_from_vport(phba->pport); 1551 scsi_unblock_requests(shost); 1552 } 1553 return; 1554 } 1555 1556 /** 1557 * lpfc_sli3_bsg_diag_loopback_mode - process an sli3 bsg vendor command 1558 * @phba: Pointer to HBA context object. 1559 * @job: LPFC_BSG_VENDOR_DIAG_MODE 1560 * 1561 * This function is responsible for placing an sli3 port into diagnostic 1562 * loopback mode in order to perform a diagnostic loopback test. 1563 * All new scsi requests are blocked, a small delay is used to allow the 1564 * scsi requests to complete then the link is brought down. If the link is 1565 * is placed in loopback mode then scsi requests are again allowed 1566 * so the scsi mid-layer doesn't give up on the port. 1567 * All of this is done in-line. 1568 */ 1569 static int 1570 lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job) 1571 { 1572 struct diag_mode_set *loopback_mode; 1573 uint32_t link_flags; 1574 uint32_t timeout; 1575 LPFC_MBOXQ_t *pmboxq = NULL; 1576 int mbxstatus = MBX_SUCCESS; 1577 int i = 0; 1578 int rc = 0; 1579 1580 /* no data to return just the return code */ 1581 job->reply->reply_payload_rcv_len = 0; 1582 1583 if (job->request_len < sizeof(struct fc_bsg_request) + 1584 sizeof(struct diag_mode_set)) { 1585 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1586 "2738 Received DIAG MODE request size:%d " 1587 "below the minimum size:%d\n", 1588 job->request_len, 1589 (int)(sizeof(struct fc_bsg_request) + 1590 sizeof(struct diag_mode_set))); 1591 rc = -EINVAL; 1592 goto job_error; 1593 } 1594 1595 rc = lpfc_bsg_diag_mode_enter(phba); 1596 if (rc) 1597 goto job_error; 1598 1599 /* bring the link to diagnostic mode */ 1600 loopback_mode = (struct diag_mode_set *) 1601 job->request->rqst_data.h_vendor.vendor_cmd; 1602 link_flags = loopback_mode->type; 1603 timeout = loopback_mode->timeout * 100; 1604 1605 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1606 if (!pmboxq) { 1607 rc = -ENOMEM; 1608 goto loopback_mode_exit; 1609 } 1610 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 1611 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK; 1612 pmboxq->u.mb.mbxOwner = OWN_HOST; 1613 1614 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO); 1615 1616 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) { 1617 /* wait for link down before proceeding */ 1618 i = 0; 1619 while (phba->link_state != LPFC_LINK_DOWN) { 1620 if (i++ > timeout) { 1621 rc = -ETIMEDOUT; 1622 goto loopback_mode_exit; 1623 } 1624 msleep(10); 1625 } 1626 1627 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 1628 if (link_flags == INTERNAL_LOOP_BACK) 1629 pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB; 1630 else 1631 pmboxq->u.mb.un.varInitLnk.link_flags = 1632 FLAGS_TOPOLOGY_MODE_LOOP; 1633 1634 pmboxq->u.mb.mbxCommand = MBX_INIT_LINK; 1635 pmboxq->u.mb.mbxOwner = OWN_HOST; 1636 1637 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, 1638 LPFC_MBOX_TMO); 1639 1640 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) 1641 rc = -ENODEV; 1642 else { 1643 spin_lock_irq(&phba->hbalock); 1644 phba->link_flag |= LS_LOOPBACK_MODE; 1645 spin_unlock_irq(&phba->hbalock); 1646 /* wait for the link attention interrupt */ 1647 msleep(100); 1648 1649 i = 0; 1650 while (phba->link_state != LPFC_HBA_READY) { 1651 if (i++ > timeout) { 1652 rc = -ETIMEDOUT; 1653 break; 1654 } 1655 1656 msleep(10); 1657 } 1658 } 1659 1660 } else 1661 rc = -ENODEV; 1662 1663 loopback_mode_exit: 1664 lpfc_bsg_diag_mode_exit(phba); 1665 1666 /* 1667 * Let SLI layer release mboxq if mbox command completed after timeout. 1668 */ 1669 if (pmboxq && mbxstatus != MBX_TIMEOUT) 1670 mempool_free(pmboxq, phba->mbox_mem_pool); 1671 1672 job_error: 1673 /* make error code available to userspace */ 1674 job->reply->result = rc; 1675 /* complete the job back to userspace if no error */ 1676 if (rc == 0) 1677 job->job_done(job); 1678 return rc; 1679 } 1680 1681 /** 1682 * lpfc_sli4_bsg_set_link_diag_state - set sli4 link diag state 1683 * @phba: Pointer to HBA context object. 1684 * @diag: Flag for set link to diag or nomral operation state. 1685 * 1686 * This function is responsible for issuing a sli4 mailbox command for setting 1687 * link to either diag state or normal operation state. 1688 */ 1689 static int 1690 lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag) 1691 { 1692 LPFC_MBOXQ_t *pmboxq; 1693 struct lpfc_mbx_set_link_diag_state *link_diag_state; 1694 uint32_t req_len, alloc_len; 1695 int mbxstatus = MBX_SUCCESS, rc; 1696 1697 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1698 if (!pmboxq) 1699 return -ENOMEM; 1700 1701 req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) - 1702 sizeof(struct lpfc_sli4_cfg_mhdr)); 1703 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 1704 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE, 1705 req_len, LPFC_SLI4_MBX_EMBED); 1706 if (alloc_len != req_len) { 1707 rc = -ENOMEM; 1708 goto link_diag_state_set_out; 1709 } 1710 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 1711 "3128 Set link to diagnostic state:x%x (x%x/x%x)\n", 1712 diag, phba->sli4_hba.lnk_info.lnk_tp, 1713 phba->sli4_hba.lnk_info.lnk_no); 1714 1715 link_diag_state = &pmboxq->u.mqe.un.link_diag_state; 1716 bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req, 1717 phba->sli4_hba.lnk_info.lnk_no); 1718 bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req, 1719 phba->sli4_hba.lnk_info.lnk_tp); 1720 if (diag) 1721 bf_set(lpfc_mbx_set_diag_state_diag, 1722 &link_diag_state->u.req, 1); 1723 else 1724 bf_set(lpfc_mbx_set_diag_state_diag, 1725 &link_diag_state->u.req, 0); 1726 1727 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO); 1728 1729 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) 1730 rc = 0; 1731 else 1732 rc = -ENODEV; 1733 1734 link_diag_state_set_out: 1735 if (pmboxq && (mbxstatus != MBX_TIMEOUT)) 1736 mempool_free(pmboxq, phba->mbox_mem_pool); 1737 1738 return rc; 1739 } 1740 1741 /** 1742 * lpfc_sli4_bsg_set_internal_loopback - set sli4 internal loopback diagnostic 1743 * @phba: Pointer to HBA context object. 1744 * 1745 * This function is responsible for issuing a sli4 mailbox command for setting 1746 * up internal loopback diagnostic. 1747 */ 1748 static int 1749 lpfc_sli4_bsg_set_internal_loopback(struct lpfc_hba *phba) 1750 { 1751 LPFC_MBOXQ_t *pmboxq; 1752 uint32_t req_len, alloc_len; 1753 struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback; 1754 int mbxstatus = MBX_SUCCESS, rc = 0; 1755 1756 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1757 if (!pmboxq) 1758 return -ENOMEM; 1759 req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) - 1760 sizeof(struct lpfc_sli4_cfg_mhdr)); 1761 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 1762 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK, 1763 req_len, LPFC_SLI4_MBX_EMBED); 1764 if (alloc_len != req_len) { 1765 mempool_free(pmboxq, phba->mbox_mem_pool); 1766 return -ENOMEM; 1767 } 1768 link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback; 1769 bf_set(lpfc_mbx_set_diag_state_link_num, 1770 &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_no); 1771 bf_set(lpfc_mbx_set_diag_state_link_type, 1772 &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_tp); 1773 bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req, 1774 LPFC_DIAG_LOOPBACK_TYPE_INTERNAL); 1775 1776 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO); 1777 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) { 1778 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1779 "3127 Failed setup loopback mode mailbox " 1780 "command, rc:x%x, status:x%x\n", mbxstatus, 1781 pmboxq->u.mb.mbxStatus); 1782 rc = -ENODEV; 1783 } 1784 if (pmboxq && (mbxstatus != MBX_TIMEOUT)) 1785 mempool_free(pmboxq, phba->mbox_mem_pool); 1786 return rc; 1787 } 1788 1789 /** 1790 * lpfc_sli4_diag_fcport_reg_setup - setup port registrations for diagnostic 1791 * @phba: Pointer to HBA context object. 1792 * 1793 * This function set up SLI4 FC port registrations for diagnostic run, which 1794 * includes all the rpis, vfi, and also vpi. 1795 */ 1796 static int 1797 lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba) 1798 { 1799 int rc; 1800 1801 if (phba->pport->fc_flag & FC_VFI_REGISTERED) { 1802 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1803 "3136 Port still had vfi registered: " 1804 "mydid:x%x, fcfi:%d, vfi:%d, vpi:%d\n", 1805 phba->pport->fc_myDID, phba->fcf.fcfi, 1806 phba->sli4_hba.vfi_ids[phba->pport->vfi], 1807 phba->vpi_ids[phba->pport->vpi]); 1808 return -EINVAL; 1809 } 1810 rc = lpfc_issue_reg_vfi(phba->pport); 1811 return rc; 1812 } 1813 1814 /** 1815 * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command 1816 * @phba: Pointer to HBA context object. 1817 * @job: LPFC_BSG_VENDOR_DIAG_MODE 1818 * 1819 * This function is responsible for placing an sli4 port into diagnostic 1820 * loopback mode in order to perform a diagnostic loopback test. 1821 */ 1822 static int 1823 lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job) 1824 { 1825 struct diag_mode_set *loopback_mode; 1826 uint32_t link_flags, timeout; 1827 int i, rc = 0; 1828 1829 /* no data to return just the return code */ 1830 job->reply->reply_payload_rcv_len = 0; 1831 1832 if (job->request_len < sizeof(struct fc_bsg_request) + 1833 sizeof(struct diag_mode_set)) { 1834 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1835 "3011 Received DIAG MODE request size:%d " 1836 "below the minimum size:%d\n", 1837 job->request_len, 1838 (int)(sizeof(struct fc_bsg_request) + 1839 sizeof(struct diag_mode_set))); 1840 rc = -EINVAL; 1841 goto job_error; 1842 } 1843 1844 rc = lpfc_bsg_diag_mode_enter(phba); 1845 if (rc) 1846 goto job_error; 1847 1848 /* indicate we are in loobpack diagnostic mode */ 1849 spin_lock_irq(&phba->hbalock); 1850 phba->link_flag |= LS_LOOPBACK_MODE; 1851 spin_unlock_irq(&phba->hbalock); 1852 1853 /* reset port to start frome scratch */ 1854 rc = lpfc_selective_reset(phba); 1855 if (rc) 1856 goto job_error; 1857 1858 /* bring the link to diagnostic mode */ 1859 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 1860 "3129 Bring link to diagnostic state.\n"); 1861 loopback_mode = (struct diag_mode_set *) 1862 job->request->rqst_data.h_vendor.vendor_cmd; 1863 link_flags = loopback_mode->type; 1864 timeout = loopback_mode->timeout * 100; 1865 1866 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1); 1867 if (rc) { 1868 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 1869 "3130 Failed to bring link to diagnostic " 1870 "state, rc:x%x\n", rc); 1871 goto loopback_mode_exit; 1872 } 1873 1874 /* wait for link down before proceeding */ 1875 i = 0; 1876 while (phba->link_state != LPFC_LINK_DOWN) { 1877 if (i++ > timeout) { 1878 rc = -ETIMEDOUT; 1879 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 1880 "3131 Timeout waiting for link to " 1881 "diagnostic mode, timeout:%d ms\n", 1882 timeout * 10); 1883 goto loopback_mode_exit; 1884 } 1885 msleep(10); 1886 } 1887 1888 /* set up loopback mode */ 1889 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 1890 "3132 Set up loopback mode:x%x\n", link_flags); 1891 1892 if (link_flags == INTERNAL_LOOP_BACK) 1893 rc = lpfc_sli4_bsg_set_internal_loopback(phba); 1894 else if (link_flags == EXTERNAL_LOOP_BACK) 1895 rc = lpfc_hba_init_link_fc_topology(phba, 1896 FLAGS_TOPOLOGY_MODE_PT_PT, 1897 MBX_NOWAIT); 1898 else { 1899 rc = -EINVAL; 1900 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 1901 "3141 Loopback mode:x%x not supported\n", 1902 link_flags); 1903 goto loopback_mode_exit; 1904 } 1905 1906 if (!rc) { 1907 /* wait for the link attention interrupt */ 1908 msleep(100); 1909 i = 0; 1910 while (phba->link_state < LPFC_LINK_UP) { 1911 if (i++ > timeout) { 1912 rc = -ETIMEDOUT; 1913 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 1914 "3137 Timeout waiting for link up " 1915 "in loopback mode, timeout:%d ms\n", 1916 timeout * 10); 1917 break; 1918 } 1919 msleep(10); 1920 } 1921 } 1922 1923 /* port resource registration setup for loopback diagnostic */ 1924 if (!rc) { 1925 /* set up a none zero myDID for loopback test */ 1926 phba->pport->fc_myDID = 1; 1927 rc = lpfc_sli4_diag_fcport_reg_setup(phba); 1928 } else 1929 goto loopback_mode_exit; 1930 1931 if (!rc) { 1932 /* wait for the port ready */ 1933 msleep(100); 1934 i = 0; 1935 while (phba->link_state != LPFC_HBA_READY) { 1936 if (i++ > timeout) { 1937 rc = -ETIMEDOUT; 1938 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 1939 "3133 Timeout waiting for port " 1940 "loopback mode ready, timeout:%d ms\n", 1941 timeout * 10); 1942 break; 1943 } 1944 msleep(10); 1945 } 1946 } 1947 1948 loopback_mode_exit: 1949 /* clear loopback diagnostic mode */ 1950 if (rc) { 1951 spin_lock_irq(&phba->hbalock); 1952 phba->link_flag &= ~LS_LOOPBACK_MODE; 1953 spin_unlock_irq(&phba->hbalock); 1954 } 1955 lpfc_bsg_diag_mode_exit(phba); 1956 1957 job_error: 1958 /* make error code available to userspace */ 1959 job->reply->result = rc; 1960 /* complete the job back to userspace if no error */ 1961 if (rc == 0) 1962 job->job_done(job); 1963 return rc; 1964 } 1965 1966 /** 1967 * lpfc_bsg_diag_loopback_mode - bsg vendor command for diag loopback mode 1968 * @job: LPFC_BSG_VENDOR_DIAG_MODE 1969 * 1970 * This function is responsible for responding to check and dispatch bsg diag 1971 * command from the user to proper driver action routines. 1972 */ 1973 static int 1974 lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job) 1975 { 1976 struct Scsi_Host *shost; 1977 struct lpfc_vport *vport; 1978 struct lpfc_hba *phba; 1979 int rc; 1980 1981 shost = job->shost; 1982 if (!shost) 1983 return -ENODEV; 1984 vport = (struct lpfc_vport *)job->shost->hostdata; 1985 if (!vport) 1986 return -ENODEV; 1987 phba = vport->phba; 1988 if (!phba) 1989 return -ENODEV; 1990 1991 if (phba->sli_rev < LPFC_SLI_REV4) 1992 rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job); 1993 else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 1994 LPFC_SLI_INTF_IF_TYPE_2) 1995 rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job); 1996 else 1997 rc = -ENODEV; 1998 1999 return rc; 2000 } 2001 2002 /** 2003 * lpfc_sli4_bsg_diag_mode_end - sli4 bsg vendor command for ending diag mode 2004 * @job: LPFC_BSG_VENDOR_DIAG_MODE_END 2005 * 2006 * This function is responsible for responding to check and dispatch bsg diag 2007 * command from the user to proper driver action routines. 2008 */ 2009 static int 2010 lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job) 2011 { 2012 struct Scsi_Host *shost; 2013 struct lpfc_vport *vport; 2014 struct lpfc_hba *phba; 2015 struct diag_mode_set *loopback_mode_end_cmd; 2016 uint32_t timeout; 2017 int rc, i; 2018 2019 shost = job->shost; 2020 if (!shost) 2021 return -ENODEV; 2022 vport = (struct lpfc_vport *)job->shost->hostdata; 2023 if (!vport) 2024 return -ENODEV; 2025 phba = vport->phba; 2026 if (!phba) 2027 return -ENODEV; 2028 2029 if (phba->sli_rev < LPFC_SLI_REV4) 2030 return -ENODEV; 2031 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 2032 LPFC_SLI_INTF_IF_TYPE_2) 2033 return -ENODEV; 2034 2035 /* clear loopback diagnostic mode */ 2036 spin_lock_irq(&phba->hbalock); 2037 phba->link_flag &= ~LS_LOOPBACK_MODE; 2038 spin_unlock_irq(&phba->hbalock); 2039 loopback_mode_end_cmd = (struct diag_mode_set *) 2040 job->request->rqst_data.h_vendor.vendor_cmd; 2041 timeout = loopback_mode_end_cmd->timeout * 100; 2042 2043 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0); 2044 if (rc) { 2045 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2046 "3139 Failed to bring link to diagnostic " 2047 "state, rc:x%x\n", rc); 2048 goto loopback_mode_end_exit; 2049 } 2050 2051 /* wait for link down before proceeding */ 2052 i = 0; 2053 while (phba->link_state != LPFC_LINK_DOWN) { 2054 if (i++ > timeout) { 2055 rc = -ETIMEDOUT; 2056 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2057 "3140 Timeout waiting for link to " 2058 "diagnostic mode_end, timeout:%d ms\n", 2059 timeout * 10); 2060 /* there is nothing much we can do here */ 2061 break; 2062 } 2063 msleep(10); 2064 } 2065 2066 /* reset port resource registrations */ 2067 rc = lpfc_selective_reset(phba); 2068 phba->pport->fc_myDID = 0; 2069 2070 loopback_mode_end_exit: 2071 /* make return code available to userspace */ 2072 job->reply->result = rc; 2073 /* complete the job back to userspace if no error */ 2074 if (rc == 0) 2075 job->job_done(job); 2076 return rc; 2077 } 2078 2079 /** 2080 * lpfc_sli4_bsg_link_diag_test - sli4 bsg vendor command for diag link test 2081 * @job: LPFC_BSG_VENDOR_DIAG_LINK_TEST 2082 * 2083 * This function is to perform SLI4 diag link test request from the user 2084 * applicaiton. 2085 */ 2086 static int 2087 lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job) 2088 { 2089 struct Scsi_Host *shost; 2090 struct lpfc_vport *vport; 2091 struct lpfc_hba *phba; 2092 LPFC_MBOXQ_t *pmboxq; 2093 struct sli4_link_diag *link_diag_test_cmd; 2094 uint32_t req_len, alloc_len; 2095 uint32_t timeout; 2096 struct lpfc_mbx_run_link_diag_test *run_link_diag_test; 2097 union lpfc_sli4_cfg_shdr *shdr; 2098 uint32_t shdr_status, shdr_add_status; 2099 struct diag_status *diag_status_reply; 2100 int mbxstatus, rc = 0; 2101 2102 shost = job->shost; 2103 if (!shost) { 2104 rc = -ENODEV; 2105 goto job_error; 2106 } 2107 vport = (struct lpfc_vport *)job->shost->hostdata; 2108 if (!vport) { 2109 rc = -ENODEV; 2110 goto job_error; 2111 } 2112 phba = vport->phba; 2113 if (!phba) { 2114 rc = -ENODEV; 2115 goto job_error; 2116 } 2117 2118 if (phba->sli_rev < LPFC_SLI_REV4) { 2119 rc = -ENODEV; 2120 goto job_error; 2121 } 2122 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 2123 LPFC_SLI_INTF_IF_TYPE_2) { 2124 rc = -ENODEV; 2125 goto job_error; 2126 } 2127 2128 if (job->request_len < sizeof(struct fc_bsg_request) + 2129 sizeof(struct sli4_link_diag)) { 2130 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2131 "3013 Received LINK DIAG TEST request " 2132 " size:%d below the minimum size:%d\n", 2133 job->request_len, 2134 (int)(sizeof(struct fc_bsg_request) + 2135 sizeof(struct sli4_link_diag))); 2136 rc = -EINVAL; 2137 goto job_error; 2138 } 2139 2140 rc = lpfc_bsg_diag_mode_enter(phba); 2141 if (rc) 2142 goto job_error; 2143 2144 link_diag_test_cmd = (struct sli4_link_diag *) 2145 job->request->rqst_data.h_vendor.vendor_cmd; 2146 timeout = link_diag_test_cmd->timeout * 100; 2147 2148 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1); 2149 2150 if (rc) 2151 goto job_error; 2152 2153 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2154 if (!pmboxq) { 2155 rc = -ENOMEM; 2156 goto link_diag_test_exit; 2157 } 2158 2159 req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) - 2160 sizeof(struct lpfc_sli4_cfg_mhdr)); 2161 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 2162 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE, 2163 req_len, LPFC_SLI4_MBX_EMBED); 2164 if (alloc_len != req_len) { 2165 rc = -ENOMEM; 2166 goto link_diag_test_exit; 2167 } 2168 run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test; 2169 bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req, 2170 phba->sli4_hba.lnk_info.lnk_no); 2171 bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req, 2172 phba->sli4_hba.lnk_info.lnk_tp); 2173 bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req, 2174 link_diag_test_cmd->test_id); 2175 bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req, 2176 link_diag_test_cmd->loops); 2177 bf_set(lpfc_mbx_run_diag_test_test_ver, &run_link_diag_test->u.req, 2178 link_diag_test_cmd->test_version); 2179 bf_set(lpfc_mbx_run_diag_test_err_act, &run_link_diag_test->u.req, 2180 link_diag_test_cmd->error_action); 2181 2182 mbxstatus = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 2183 2184 shdr = (union lpfc_sli4_cfg_shdr *) 2185 &pmboxq->u.mqe.un.sli4_config.header.cfg_shdr; 2186 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 2187 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 2188 if (shdr_status || shdr_add_status || mbxstatus) { 2189 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 2190 "3010 Run link diag test mailbox failed with " 2191 "mbx_status x%x status x%x, add_status x%x\n", 2192 mbxstatus, shdr_status, shdr_add_status); 2193 } 2194 2195 diag_status_reply = (struct diag_status *) 2196 job->reply->reply_data.vendor_reply.vendor_rsp; 2197 2198 if (job->reply_len < 2199 sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) { 2200 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2201 "3012 Received Run link diag test reply " 2202 "below minimum size (%d): reply_len:%d\n", 2203 (int)(sizeof(struct fc_bsg_request) + 2204 sizeof(struct diag_status)), 2205 job->reply_len); 2206 rc = -EINVAL; 2207 goto job_error; 2208 } 2209 2210 diag_status_reply->mbox_status = mbxstatus; 2211 diag_status_reply->shdr_status = shdr_status; 2212 diag_status_reply->shdr_add_status = shdr_add_status; 2213 2214 link_diag_test_exit: 2215 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0); 2216 2217 if (pmboxq) 2218 mempool_free(pmboxq, phba->mbox_mem_pool); 2219 2220 lpfc_bsg_diag_mode_exit(phba); 2221 2222 job_error: 2223 /* make error code available to userspace */ 2224 job->reply->result = rc; 2225 /* complete the job back to userspace if no error */ 2226 if (rc == 0) 2227 job->job_done(job); 2228 return rc; 2229 } 2230 2231 /** 2232 * lpfcdiag_loop_self_reg - obtains a remote port login id 2233 * @phba: Pointer to HBA context object 2234 * @rpi: Pointer to a remote port login id 2235 * 2236 * This function obtains a remote port login id so the diag loopback test 2237 * can send and receive its own unsolicited CT command. 2238 **/ 2239 static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi) 2240 { 2241 LPFC_MBOXQ_t *mbox; 2242 struct lpfc_dmabuf *dmabuff; 2243 int status; 2244 2245 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2246 if (!mbox) 2247 return -ENOMEM; 2248 2249 if (phba->sli_rev < LPFC_SLI_REV4) 2250 status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID, 2251 (uint8_t *)&phba->pport->fc_sparam, 2252 mbox, *rpi); 2253 else { 2254 *rpi = lpfc_sli4_alloc_rpi(phba); 2255 status = lpfc_reg_rpi(phba, phba->pport->vpi, 2256 phba->pport->fc_myDID, 2257 (uint8_t *)&phba->pport->fc_sparam, 2258 mbox, *rpi); 2259 } 2260 2261 if (status) { 2262 mempool_free(mbox, phba->mbox_mem_pool); 2263 if (phba->sli_rev == LPFC_SLI_REV4) 2264 lpfc_sli4_free_rpi(phba, *rpi); 2265 return -ENOMEM; 2266 } 2267 2268 dmabuff = (struct lpfc_dmabuf *) mbox->context1; 2269 mbox->context1 = NULL; 2270 mbox->context2 = NULL; 2271 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 2272 2273 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) { 2274 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys); 2275 kfree(dmabuff); 2276 if (status != MBX_TIMEOUT) 2277 mempool_free(mbox, phba->mbox_mem_pool); 2278 if (phba->sli_rev == LPFC_SLI_REV4) 2279 lpfc_sli4_free_rpi(phba, *rpi); 2280 return -ENODEV; 2281 } 2282 2283 if (phba->sli_rev < LPFC_SLI_REV4) 2284 *rpi = mbox->u.mb.un.varWords[0]; 2285 2286 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys); 2287 kfree(dmabuff); 2288 mempool_free(mbox, phba->mbox_mem_pool); 2289 return 0; 2290 } 2291 2292 /** 2293 * lpfcdiag_loop_self_unreg - unregs from the rpi 2294 * @phba: Pointer to HBA context object 2295 * @rpi: Remote port login id 2296 * 2297 * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg 2298 **/ 2299 static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi) 2300 { 2301 LPFC_MBOXQ_t *mbox; 2302 int status; 2303 2304 /* Allocate mboxq structure */ 2305 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2306 if (mbox == NULL) 2307 return -ENOMEM; 2308 2309 if (phba->sli_rev < LPFC_SLI_REV4) 2310 lpfc_unreg_login(phba, 0, rpi, mbox); 2311 else 2312 lpfc_unreg_login(phba, phba->pport->vpi, 2313 phba->sli4_hba.rpi_ids[rpi], mbox); 2314 2315 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 2316 2317 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) { 2318 if (status != MBX_TIMEOUT) 2319 mempool_free(mbox, phba->mbox_mem_pool); 2320 return -EIO; 2321 } 2322 mempool_free(mbox, phba->mbox_mem_pool); 2323 if (phba->sli_rev == LPFC_SLI_REV4) 2324 lpfc_sli4_free_rpi(phba, rpi); 2325 return 0; 2326 } 2327 2328 /** 2329 * lpfcdiag_loop_get_xri - obtains the transmit and receive ids 2330 * @phba: Pointer to HBA context object 2331 * @rpi: Remote port login id 2332 * @txxri: Pointer to transmit exchange id 2333 * @rxxri: Pointer to response exchabge id 2334 * 2335 * This function obtains the transmit and receive ids required to send 2336 * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp 2337 * flags are used to the unsolicted response handler is able to process 2338 * the ct command sent on the same port. 2339 **/ 2340 static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, 2341 uint16_t *txxri, uint16_t * rxxri) 2342 { 2343 struct lpfc_bsg_event *evt; 2344 struct lpfc_iocbq *cmdiocbq, *rspiocbq; 2345 IOCB_t *cmd, *rsp; 2346 struct lpfc_dmabuf *dmabuf; 2347 struct ulp_bde64 *bpl = NULL; 2348 struct lpfc_sli_ct_request *ctreq = NULL; 2349 int ret_val = 0; 2350 int time_left; 2351 int iocb_stat = 0; 2352 unsigned long flags; 2353 2354 *txxri = 0; 2355 *rxxri = 0; 2356 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid, 2357 SLI_CT_ELX_LOOPBACK); 2358 if (!evt) 2359 return -ENOMEM; 2360 2361 spin_lock_irqsave(&phba->ct_ev_lock, flags); 2362 list_add(&evt->node, &phba->ct_ev_waiters); 2363 lpfc_bsg_event_ref(evt); 2364 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2365 2366 cmdiocbq = lpfc_sli_get_iocbq(phba); 2367 rspiocbq = lpfc_sli_get_iocbq(phba); 2368 2369 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2370 if (dmabuf) { 2371 dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys); 2372 if (dmabuf->virt) { 2373 INIT_LIST_HEAD(&dmabuf->list); 2374 bpl = (struct ulp_bde64 *) dmabuf->virt; 2375 memset(bpl, 0, sizeof(*bpl)); 2376 ctreq = (struct lpfc_sli_ct_request *)(bpl + 1); 2377 bpl->addrHigh = 2378 le32_to_cpu(putPaddrHigh(dmabuf->phys + 2379 sizeof(*bpl))); 2380 bpl->addrLow = 2381 le32_to_cpu(putPaddrLow(dmabuf->phys + 2382 sizeof(*bpl))); 2383 bpl->tus.f.bdeFlags = 0; 2384 bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ; 2385 bpl->tus.w = le32_to_cpu(bpl->tus.w); 2386 } 2387 } 2388 2389 if (cmdiocbq == NULL || rspiocbq == NULL || 2390 dmabuf == NULL || bpl == NULL || ctreq == NULL || 2391 dmabuf->virt == NULL) { 2392 ret_val = -ENOMEM; 2393 goto err_get_xri_exit; 2394 } 2395 2396 cmd = &cmdiocbq->iocb; 2397 rsp = &rspiocbq->iocb; 2398 2399 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ); 2400 2401 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION; 2402 ctreq->RevisionId.bits.InId = 0; 2403 ctreq->FsType = SLI_CT_ELX_LOOPBACK; 2404 ctreq->FsSubType = 0; 2405 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP; 2406 ctreq->CommandResponse.bits.Size = 0; 2407 2408 2409 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys); 2410 cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys); 2411 cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 2412 cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl); 2413 2414 cmd->un.xseq64.w5.hcsw.Fctl = LA; 2415 cmd->un.xseq64.w5.hcsw.Dfctl = 0; 2416 cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL; 2417 cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT; 2418 2419 cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR; 2420 cmd->ulpBdeCount = 1; 2421 cmd->ulpLe = 1; 2422 cmd->ulpClass = CLASS3; 2423 cmd->ulpContext = rpi; 2424 2425 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 2426 cmdiocbq->vport = phba->pport; 2427 2428 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, 2429 rspiocbq, 2430 (phba->fc_ratov * 2) 2431 + LPFC_DRVR_TIMEOUT); 2432 if (iocb_stat) { 2433 ret_val = -EIO; 2434 goto err_get_xri_exit; 2435 } 2436 *txxri = rsp->ulpContext; 2437 2438 evt->waiting = 1; 2439 evt->wait_time_stamp = jiffies; 2440 time_left = wait_event_interruptible_timeout( 2441 evt->wq, !list_empty(&evt->events_to_see), 2442 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ); 2443 if (list_empty(&evt->events_to_see)) 2444 ret_val = (time_left) ? -EINTR : -ETIMEDOUT; 2445 else { 2446 spin_lock_irqsave(&phba->ct_ev_lock, flags); 2447 list_move(evt->events_to_see.prev, &evt->events_to_get); 2448 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2449 *rxxri = (list_entry(evt->events_to_get.prev, 2450 typeof(struct event_data), 2451 node))->immed_dat; 2452 } 2453 evt->waiting = 0; 2454 2455 err_get_xri_exit: 2456 spin_lock_irqsave(&phba->ct_ev_lock, flags); 2457 lpfc_bsg_event_unref(evt); /* release ref */ 2458 lpfc_bsg_event_unref(evt); /* delete */ 2459 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2460 2461 if (dmabuf) { 2462 if (dmabuf->virt) 2463 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); 2464 kfree(dmabuf); 2465 } 2466 2467 if (cmdiocbq && (iocb_stat != IOCB_TIMEDOUT)) 2468 lpfc_sli_release_iocbq(phba, cmdiocbq); 2469 if (rspiocbq) 2470 lpfc_sli_release_iocbq(phba, rspiocbq); 2471 return ret_val; 2472 } 2473 2474 /** 2475 * lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers 2476 * @phba: Pointer to HBA context object 2477 * 2478 * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and. 2479 * retruns the pointer to the buffer. 2480 **/ 2481 static struct lpfc_dmabuf * 2482 lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba) 2483 { 2484 struct lpfc_dmabuf *dmabuf; 2485 struct pci_dev *pcidev = phba->pcidev; 2486 2487 /* allocate dma buffer struct */ 2488 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2489 if (!dmabuf) 2490 return NULL; 2491 2492 INIT_LIST_HEAD(&dmabuf->list); 2493 2494 /* now, allocate dma buffer */ 2495 dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE, 2496 &(dmabuf->phys), GFP_KERNEL); 2497 2498 if (!dmabuf->virt) { 2499 kfree(dmabuf); 2500 return NULL; 2501 } 2502 memset((uint8_t *)dmabuf->virt, 0, BSG_MBOX_SIZE); 2503 2504 return dmabuf; 2505 } 2506 2507 /** 2508 * lpfc_bsg_dma_page_free - free a bsg mbox page sized dma buffer 2509 * @phba: Pointer to HBA context object. 2510 * @dmabuf: Pointer to the bsg mbox page sized dma buffer descriptor. 2511 * 2512 * This routine just simply frees a dma buffer and its associated buffer 2513 * descriptor referred by @dmabuf. 2514 **/ 2515 static void 2516 lpfc_bsg_dma_page_free(struct lpfc_hba *phba, struct lpfc_dmabuf *dmabuf) 2517 { 2518 struct pci_dev *pcidev = phba->pcidev; 2519 2520 if (!dmabuf) 2521 return; 2522 2523 if (dmabuf->virt) 2524 dma_free_coherent(&pcidev->dev, BSG_MBOX_SIZE, 2525 dmabuf->virt, dmabuf->phys); 2526 kfree(dmabuf); 2527 return; 2528 } 2529 2530 /** 2531 * lpfc_bsg_dma_page_list_free - free a list of bsg mbox page sized dma buffers 2532 * @phba: Pointer to HBA context object. 2533 * @dmabuf_list: Pointer to a list of bsg mbox page sized dma buffer descs. 2534 * 2535 * This routine just simply frees all dma buffers and their associated buffer 2536 * descriptors referred by @dmabuf_list. 2537 **/ 2538 static void 2539 lpfc_bsg_dma_page_list_free(struct lpfc_hba *phba, 2540 struct list_head *dmabuf_list) 2541 { 2542 struct lpfc_dmabuf *dmabuf, *next_dmabuf; 2543 2544 if (list_empty(dmabuf_list)) 2545 return; 2546 2547 list_for_each_entry_safe(dmabuf, next_dmabuf, dmabuf_list, list) { 2548 list_del_init(&dmabuf->list); 2549 lpfc_bsg_dma_page_free(phba, dmabuf); 2550 } 2551 return; 2552 } 2553 2554 /** 2555 * diag_cmd_data_alloc - fills in a bde struct with dma buffers 2556 * @phba: Pointer to HBA context object 2557 * @bpl: Pointer to 64 bit bde structure 2558 * @size: Number of bytes to process 2559 * @nocopydata: Flag to copy user data into the allocated buffer 2560 * 2561 * This function allocates page size buffers and populates an lpfc_dmabufext. 2562 * If allowed the user data pointed to with indataptr is copied into the kernel 2563 * memory. The chained list of page size buffers is returned. 2564 **/ 2565 static struct lpfc_dmabufext * 2566 diag_cmd_data_alloc(struct lpfc_hba *phba, 2567 struct ulp_bde64 *bpl, uint32_t size, 2568 int nocopydata) 2569 { 2570 struct lpfc_dmabufext *mlist = NULL; 2571 struct lpfc_dmabufext *dmp; 2572 int cnt, offset = 0, i = 0; 2573 struct pci_dev *pcidev; 2574 2575 pcidev = phba->pcidev; 2576 2577 while (size) { 2578 /* We get chunks of 4K */ 2579 if (size > BUF_SZ_4K) 2580 cnt = BUF_SZ_4K; 2581 else 2582 cnt = size; 2583 2584 /* allocate struct lpfc_dmabufext buffer header */ 2585 dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL); 2586 if (!dmp) 2587 goto out; 2588 2589 INIT_LIST_HEAD(&dmp->dma.list); 2590 2591 /* Queue it to a linked list */ 2592 if (mlist) 2593 list_add_tail(&dmp->dma.list, &mlist->dma.list); 2594 else 2595 mlist = dmp; 2596 2597 /* allocate buffer */ 2598 dmp->dma.virt = dma_alloc_coherent(&pcidev->dev, 2599 cnt, 2600 &(dmp->dma.phys), 2601 GFP_KERNEL); 2602 2603 if (!dmp->dma.virt) 2604 goto out; 2605 2606 dmp->size = cnt; 2607 2608 if (nocopydata) { 2609 bpl->tus.f.bdeFlags = 0; 2610 pci_dma_sync_single_for_device(phba->pcidev, 2611 dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE); 2612 2613 } else { 2614 memset((uint8_t *)dmp->dma.virt, 0, cnt); 2615 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 2616 } 2617 2618 /* build buffer ptr list for IOCB */ 2619 bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys)); 2620 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys)); 2621 bpl->tus.f.bdeSize = (ushort) cnt; 2622 bpl->tus.w = le32_to_cpu(bpl->tus.w); 2623 bpl++; 2624 2625 i++; 2626 offset += cnt; 2627 size -= cnt; 2628 } 2629 2630 mlist->flag = i; 2631 return mlist; 2632 out: 2633 diag_cmd_data_free(phba, mlist); 2634 return NULL; 2635 } 2636 2637 /** 2638 * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd 2639 * @phba: Pointer to HBA context object 2640 * @rxxri: Receive exchange id 2641 * @len: Number of data bytes 2642 * 2643 * This function allocates and posts a data buffer of sufficient size to receive 2644 * an unsolicted CT command. 2645 **/ 2646 static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri, 2647 size_t len) 2648 { 2649 struct lpfc_sli *psli = &phba->sli; 2650 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; 2651 struct lpfc_iocbq *cmdiocbq; 2652 IOCB_t *cmd = NULL; 2653 struct list_head head, *curr, *next; 2654 struct lpfc_dmabuf *rxbmp; 2655 struct lpfc_dmabuf *dmp; 2656 struct lpfc_dmabuf *mp[2] = {NULL, NULL}; 2657 struct ulp_bde64 *rxbpl = NULL; 2658 uint32_t num_bde; 2659 struct lpfc_dmabufext *rxbuffer = NULL; 2660 int ret_val = 0; 2661 int iocb_stat; 2662 int i = 0; 2663 2664 cmdiocbq = lpfc_sli_get_iocbq(phba); 2665 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2666 if (rxbmp != NULL) { 2667 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys); 2668 if (rxbmp->virt) { 2669 INIT_LIST_HEAD(&rxbmp->list); 2670 rxbpl = (struct ulp_bde64 *) rxbmp->virt; 2671 rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0); 2672 } 2673 } 2674 2675 if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) { 2676 ret_val = -ENOMEM; 2677 goto err_post_rxbufs_exit; 2678 } 2679 2680 /* Queue buffers for the receive exchange */ 2681 num_bde = (uint32_t)rxbuffer->flag; 2682 dmp = &rxbuffer->dma; 2683 2684 cmd = &cmdiocbq->iocb; 2685 i = 0; 2686 2687 INIT_LIST_HEAD(&head); 2688 list_add_tail(&head, &dmp->list); 2689 list_for_each_safe(curr, next, &head) { 2690 mp[i] = list_entry(curr, struct lpfc_dmabuf, list); 2691 list_del(curr); 2692 2693 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 2694 mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba); 2695 cmd->un.quexri64cx.buff.bde.addrHigh = 2696 putPaddrHigh(mp[i]->phys); 2697 cmd->un.quexri64cx.buff.bde.addrLow = 2698 putPaddrLow(mp[i]->phys); 2699 cmd->un.quexri64cx.buff.bde.tus.f.bdeSize = 2700 ((struct lpfc_dmabufext *)mp[i])->size; 2701 cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag; 2702 cmd->ulpCommand = CMD_QUE_XRI64_CX; 2703 cmd->ulpPU = 0; 2704 cmd->ulpLe = 1; 2705 cmd->ulpBdeCount = 1; 2706 cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0; 2707 2708 } else { 2709 cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys); 2710 cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys); 2711 cmd->un.cont64[i].tus.f.bdeSize = 2712 ((struct lpfc_dmabufext *)mp[i])->size; 2713 cmd->ulpBdeCount = ++i; 2714 2715 if ((--num_bde > 0) && (i < 2)) 2716 continue; 2717 2718 cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX; 2719 cmd->ulpLe = 1; 2720 } 2721 2722 cmd->ulpClass = CLASS3; 2723 cmd->ulpContext = rxxri; 2724 2725 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 2726 0); 2727 if (iocb_stat == IOCB_ERROR) { 2728 diag_cmd_data_free(phba, 2729 (struct lpfc_dmabufext *)mp[0]); 2730 if (mp[1]) 2731 diag_cmd_data_free(phba, 2732 (struct lpfc_dmabufext *)mp[1]); 2733 dmp = list_entry(next, struct lpfc_dmabuf, list); 2734 ret_val = -EIO; 2735 goto err_post_rxbufs_exit; 2736 } 2737 2738 lpfc_sli_ringpostbuf_put(phba, pring, mp[0]); 2739 if (mp[1]) { 2740 lpfc_sli_ringpostbuf_put(phba, pring, mp[1]); 2741 mp[1] = NULL; 2742 } 2743 2744 /* The iocb was freed by lpfc_sli_issue_iocb */ 2745 cmdiocbq = lpfc_sli_get_iocbq(phba); 2746 if (!cmdiocbq) { 2747 dmp = list_entry(next, struct lpfc_dmabuf, list); 2748 ret_val = -EIO; 2749 goto err_post_rxbufs_exit; 2750 } 2751 2752 cmd = &cmdiocbq->iocb; 2753 i = 0; 2754 } 2755 list_del(&head); 2756 2757 err_post_rxbufs_exit: 2758 2759 if (rxbmp) { 2760 if (rxbmp->virt) 2761 lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys); 2762 kfree(rxbmp); 2763 } 2764 2765 if (cmdiocbq) 2766 lpfc_sli_release_iocbq(phba, cmdiocbq); 2767 return ret_val; 2768 } 2769 2770 /** 2771 * lpfc_bsg_diag_loopback_run - run loopback on a port by issue ct cmd to itself 2772 * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job 2773 * 2774 * This function receives a user data buffer to be transmitted and received on 2775 * the same port, the link must be up and in loopback mode prior 2776 * to being called. 2777 * 1. A kernel buffer is allocated to copy the user data into. 2778 * 2. The port registers with "itself". 2779 * 3. The transmit and receive exchange ids are obtained. 2780 * 4. The receive exchange id is posted. 2781 * 5. A new els loopback event is created. 2782 * 6. The command and response iocbs are allocated. 2783 * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback. 2784 * 2785 * This function is meant to be called n times while the port is in loopback 2786 * so it is the apps responsibility to issue a reset to take the port out 2787 * of loopback mode. 2788 **/ 2789 static int 2790 lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job) 2791 { 2792 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 2793 struct lpfc_hba *phba = vport->phba; 2794 struct diag_mode_test *diag_mode; 2795 struct lpfc_bsg_event *evt; 2796 struct event_data *evdat; 2797 struct lpfc_sli *psli = &phba->sli; 2798 uint32_t size; 2799 uint32_t full_size; 2800 size_t segment_len = 0, segment_offset = 0, current_offset = 0; 2801 uint16_t rpi = 0; 2802 struct lpfc_iocbq *cmdiocbq, *rspiocbq = NULL; 2803 IOCB_t *cmd, *rsp = NULL; 2804 struct lpfc_sli_ct_request *ctreq; 2805 struct lpfc_dmabuf *txbmp; 2806 struct ulp_bde64 *txbpl = NULL; 2807 struct lpfc_dmabufext *txbuffer = NULL; 2808 struct list_head head; 2809 struct lpfc_dmabuf *curr; 2810 uint16_t txxri = 0, rxxri; 2811 uint32_t num_bde; 2812 uint8_t *ptr = NULL, *rx_databuf = NULL; 2813 int rc = 0; 2814 int time_left; 2815 int iocb_stat; 2816 unsigned long flags; 2817 void *dataout = NULL; 2818 uint32_t total_mem; 2819 2820 /* in case no data is returned return just the return code */ 2821 job->reply->reply_payload_rcv_len = 0; 2822 2823 if (job->request_len < 2824 sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) { 2825 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2826 "2739 Received DIAG TEST request below minimum " 2827 "size\n"); 2828 rc = -EINVAL; 2829 goto loopback_test_exit; 2830 } 2831 2832 if (job->request_payload.payload_len != 2833 job->reply_payload.payload_len) { 2834 rc = -EINVAL; 2835 goto loopback_test_exit; 2836 } 2837 diag_mode = (struct diag_mode_test *) 2838 job->request->rqst_data.h_vendor.vendor_cmd; 2839 2840 if ((phba->link_state == LPFC_HBA_ERROR) || 2841 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) || 2842 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) { 2843 rc = -EACCES; 2844 goto loopback_test_exit; 2845 } 2846 2847 if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) { 2848 rc = -EACCES; 2849 goto loopback_test_exit; 2850 } 2851 2852 size = job->request_payload.payload_len; 2853 full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */ 2854 2855 if ((size == 0) || (size > 80 * BUF_SZ_4K)) { 2856 rc = -ERANGE; 2857 goto loopback_test_exit; 2858 } 2859 2860 if (full_size >= BUF_SZ_4K) { 2861 /* 2862 * Allocate memory for ioctl data. If buffer is bigger than 64k, 2863 * then we allocate 64k and re-use that buffer over and over to 2864 * xfer the whole block. This is because Linux kernel has a 2865 * problem allocating more than 120k of kernel space memory. Saw 2866 * problem with GET_FCPTARGETMAPPING... 2867 */ 2868 if (size <= (64 * 1024)) 2869 total_mem = full_size; 2870 else 2871 total_mem = 64 * 1024; 2872 } else 2873 /* Allocate memory for ioctl data */ 2874 total_mem = BUF_SZ_4K; 2875 2876 dataout = kmalloc(total_mem, GFP_KERNEL); 2877 if (dataout == NULL) { 2878 rc = -ENOMEM; 2879 goto loopback_test_exit; 2880 } 2881 2882 ptr = dataout; 2883 ptr += ELX_LOOPBACK_HEADER_SZ; 2884 sg_copy_to_buffer(job->request_payload.sg_list, 2885 job->request_payload.sg_cnt, 2886 ptr, size); 2887 rc = lpfcdiag_loop_self_reg(phba, &rpi); 2888 if (rc) 2889 goto loopback_test_exit; 2890 2891 if (phba->sli_rev < LPFC_SLI_REV4) { 2892 rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri); 2893 if (rc) { 2894 lpfcdiag_loop_self_unreg(phba, rpi); 2895 goto loopback_test_exit; 2896 } 2897 2898 rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size); 2899 if (rc) { 2900 lpfcdiag_loop_self_unreg(phba, rpi); 2901 goto loopback_test_exit; 2902 } 2903 } 2904 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid, 2905 SLI_CT_ELX_LOOPBACK); 2906 if (!evt) { 2907 lpfcdiag_loop_self_unreg(phba, rpi); 2908 rc = -ENOMEM; 2909 goto loopback_test_exit; 2910 } 2911 2912 spin_lock_irqsave(&phba->ct_ev_lock, flags); 2913 list_add(&evt->node, &phba->ct_ev_waiters); 2914 lpfc_bsg_event_ref(evt); 2915 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2916 2917 cmdiocbq = lpfc_sli_get_iocbq(phba); 2918 if (phba->sli_rev < LPFC_SLI_REV4) 2919 rspiocbq = lpfc_sli_get_iocbq(phba); 2920 txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2921 2922 if (txbmp) { 2923 txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys); 2924 if (txbmp->virt) { 2925 INIT_LIST_HEAD(&txbmp->list); 2926 txbpl = (struct ulp_bde64 *) txbmp->virt; 2927 txbuffer = diag_cmd_data_alloc(phba, 2928 txbpl, full_size, 0); 2929 } 2930 } 2931 2932 if (!cmdiocbq || !txbmp || !txbpl || !txbuffer || !txbmp->virt) { 2933 rc = -ENOMEM; 2934 goto err_loopback_test_exit; 2935 } 2936 if ((phba->sli_rev < LPFC_SLI_REV4) && !rspiocbq) { 2937 rc = -ENOMEM; 2938 goto err_loopback_test_exit; 2939 } 2940 2941 cmd = &cmdiocbq->iocb; 2942 if (phba->sli_rev < LPFC_SLI_REV4) 2943 rsp = &rspiocbq->iocb; 2944 2945 INIT_LIST_HEAD(&head); 2946 list_add_tail(&head, &txbuffer->dma.list); 2947 list_for_each_entry(curr, &head, list) { 2948 segment_len = ((struct lpfc_dmabufext *)curr)->size; 2949 if (current_offset == 0) { 2950 ctreq = curr->virt; 2951 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ); 2952 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION; 2953 ctreq->RevisionId.bits.InId = 0; 2954 ctreq->FsType = SLI_CT_ELX_LOOPBACK; 2955 ctreq->FsSubType = 0; 2956 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA; 2957 ctreq->CommandResponse.bits.Size = size; 2958 segment_offset = ELX_LOOPBACK_HEADER_SZ; 2959 } else 2960 segment_offset = 0; 2961 2962 BUG_ON(segment_offset >= segment_len); 2963 memcpy(curr->virt + segment_offset, 2964 ptr + current_offset, 2965 segment_len - segment_offset); 2966 2967 current_offset += segment_len - segment_offset; 2968 BUG_ON(current_offset > size); 2969 } 2970 list_del(&head); 2971 2972 /* Build the XMIT_SEQUENCE iocb */ 2973 num_bde = (uint32_t)txbuffer->flag; 2974 2975 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys); 2976 cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys); 2977 cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 2978 cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64)); 2979 2980 cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA); 2981 cmd->un.xseq64.w5.hcsw.Dfctl = 0; 2982 cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL; 2983 cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT; 2984 2985 cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX; 2986 cmd->ulpBdeCount = 1; 2987 cmd->ulpLe = 1; 2988 cmd->ulpClass = CLASS3; 2989 2990 if (phba->sli_rev < LPFC_SLI_REV4) { 2991 cmd->ulpContext = txxri; 2992 } else { 2993 cmd->un.xseq64.bdl.ulpIoTag32 = 0; 2994 cmd->un.ulpWord[3] = phba->sli4_hba.rpi_ids[rpi]; 2995 cmdiocbq->context3 = txbmp; 2996 cmdiocbq->sli4_xritag = NO_XRI; 2997 cmd->unsli3.rcvsli3.ox_id = 0xffff; 2998 } 2999 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 3000 cmdiocbq->vport = phba->pport; 3001 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, 3002 rspiocbq, (phba->fc_ratov * 2) + 3003 LPFC_DRVR_TIMEOUT); 3004 3005 if ((iocb_stat != IOCB_SUCCESS) || ((phba->sli_rev < LPFC_SLI_REV4) && 3006 (rsp->ulpStatus != IOCB_SUCCESS))) { 3007 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3008 "3126 Failed loopback test issue iocb: " 3009 "iocb_stat:x%x\n", iocb_stat); 3010 rc = -EIO; 3011 goto err_loopback_test_exit; 3012 } 3013 3014 evt->waiting = 1; 3015 time_left = wait_event_interruptible_timeout( 3016 evt->wq, !list_empty(&evt->events_to_see), 3017 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ); 3018 evt->waiting = 0; 3019 if (list_empty(&evt->events_to_see)) { 3020 rc = (time_left) ? -EINTR : -ETIMEDOUT; 3021 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3022 "3125 Not receiving unsolicited event, " 3023 "rc:x%x\n", rc); 3024 } else { 3025 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3026 list_move(evt->events_to_see.prev, &evt->events_to_get); 3027 evdat = list_entry(evt->events_to_get.prev, 3028 typeof(*evdat), node); 3029 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3030 rx_databuf = evdat->data; 3031 if (evdat->len != full_size) { 3032 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3033 "1603 Loopback test did not receive expected " 3034 "data length. actual length 0x%x expected " 3035 "length 0x%x\n", 3036 evdat->len, full_size); 3037 rc = -EIO; 3038 } else if (rx_databuf == NULL) 3039 rc = -EIO; 3040 else { 3041 rc = IOCB_SUCCESS; 3042 /* skip over elx loopback header */ 3043 rx_databuf += ELX_LOOPBACK_HEADER_SZ; 3044 job->reply->reply_payload_rcv_len = 3045 sg_copy_from_buffer(job->reply_payload.sg_list, 3046 job->reply_payload.sg_cnt, 3047 rx_databuf, size); 3048 job->reply->reply_payload_rcv_len = size; 3049 } 3050 } 3051 3052 err_loopback_test_exit: 3053 lpfcdiag_loop_self_unreg(phba, rpi); 3054 3055 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3056 lpfc_bsg_event_unref(evt); /* release ref */ 3057 lpfc_bsg_event_unref(evt); /* delete */ 3058 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3059 3060 if (cmdiocbq != NULL) 3061 lpfc_sli_release_iocbq(phba, cmdiocbq); 3062 3063 if (rspiocbq != NULL) 3064 lpfc_sli_release_iocbq(phba, rspiocbq); 3065 3066 if (txbmp != NULL) { 3067 if (txbpl != NULL) { 3068 if (txbuffer != NULL) 3069 diag_cmd_data_free(phba, txbuffer); 3070 lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys); 3071 } 3072 kfree(txbmp); 3073 } 3074 3075 loopback_test_exit: 3076 kfree(dataout); 3077 /* make error code available to userspace */ 3078 job->reply->result = rc; 3079 job->dd_data = NULL; 3080 /* complete the job back to userspace if no error */ 3081 if (rc == IOCB_SUCCESS) 3082 job->job_done(job); 3083 return rc; 3084 } 3085 3086 /** 3087 * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command 3088 * @job: GET_DFC_REV fc_bsg_job 3089 **/ 3090 static int 3091 lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job) 3092 { 3093 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 3094 struct lpfc_hba *phba = vport->phba; 3095 struct get_mgmt_rev *event_req; 3096 struct get_mgmt_rev_reply *event_reply; 3097 int rc = 0; 3098 3099 if (job->request_len < 3100 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) { 3101 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3102 "2740 Received GET_DFC_REV request below " 3103 "minimum size\n"); 3104 rc = -EINVAL; 3105 goto job_error; 3106 } 3107 3108 event_req = (struct get_mgmt_rev *) 3109 job->request->rqst_data.h_vendor.vendor_cmd; 3110 3111 event_reply = (struct get_mgmt_rev_reply *) 3112 job->reply->reply_data.vendor_reply.vendor_rsp; 3113 3114 if (job->reply_len < 3115 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) { 3116 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3117 "2741 Received GET_DFC_REV reply below " 3118 "minimum size\n"); 3119 rc = -EINVAL; 3120 goto job_error; 3121 } 3122 3123 event_reply->info.a_Major = MANAGEMENT_MAJOR_REV; 3124 event_reply->info.a_Minor = MANAGEMENT_MINOR_REV; 3125 job_error: 3126 job->reply->result = rc; 3127 if (rc == 0) 3128 job->job_done(job); 3129 return rc; 3130 } 3131 3132 /** 3133 * lpfc_bsg_issue_mbox_cmpl - lpfc_bsg_issue_mbox mbox completion handler 3134 * @phba: Pointer to HBA context object. 3135 * @pmboxq: Pointer to mailbox command. 3136 * 3137 * This is completion handler function for mailbox commands issued from 3138 * lpfc_bsg_issue_mbox function. This function is called by the 3139 * mailbox event handler function with no lock held. This function 3140 * will wake up thread waiting on the wait queue pointed by context1 3141 * of the mailbox. 3142 **/ 3143 void 3144 lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 3145 { 3146 struct bsg_job_data *dd_data; 3147 struct fc_bsg_job *job; 3148 uint32_t size; 3149 unsigned long flags; 3150 uint8_t *pmb, *pmb_buf; 3151 3152 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3153 dd_data = pmboxq->context1; 3154 /* job already timed out? */ 3155 if (!dd_data) { 3156 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3157 return; 3158 } 3159 3160 /* 3161 * The outgoing buffer is readily referred from the dma buffer, 3162 * just need to get header part from mailboxq structure. 3163 */ 3164 pmb = (uint8_t *)&pmboxq->u.mb; 3165 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb; 3166 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t)); 3167 3168 job = dd_data->context_un.mbox.set_job; 3169 if (job) { 3170 size = job->reply_payload.payload_len; 3171 job->reply->reply_payload_rcv_len = 3172 sg_copy_from_buffer(job->reply_payload.sg_list, 3173 job->reply_payload.sg_cnt, 3174 pmb_buf, size); 3175 /* need to hold the lock until we set job->dd_data to NULL 3176 * to hold off the timeout handler returning to the mid-layer 3177 * while we are still processing the job. 3178 */ 3179 job->dd_data = NULL; 3180 dd_data->context_un.mbox.set_job = NULL; 3181 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3182 } else { 3183 dd_data->context_un.mbox.set_job = NULL; 3184 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3185 } 3186 3187 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool); 3188 lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers); 3189 kfree(dd_data); 3190 3191 if (job) { 3192 job->reply->result = 0; 3193 job->job_done(job); 3194 } 3195 return; 3196 } 3197 3198 /** 3199 * lpfc_bsg_check_cmd_access - test for a supported mailbox command 3200 * @phba: Pointer to HBA context object. 3201 * @mb: Pointer to a mailbox object. 3202 * @vport: Pointer to a vport object. 3203 * 3204 * Some commands require the port to be offline, some may not be called from 3205 * the application. 3206 **/ 3207 static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba, 3208 MAILBOX_t *mb, struct lpfc_vport *vport) 3209 { 3210 /* return negative error values for bsg job */ 3211 switch (mb->mbxCommand) { 3212 /* Offline only */ 3213 case MBX_INIT_LINK: 3214 case MBX_DOWN_LINK: 3215 case MBX_CONFIG_LINK: 3216 case MBX_CONFIG_RING: 3217 case MBX_RESET_RING: 3218 case MBX_UNREG_LOGIN: 3219 case MBX_CLEAR_LA: 3220 case MBX_DUMP_CONTEXT: 3221 case MBX_RUN_DIAGS: 3222 case MBX_RESTART: 3223 case MBX_SET_MASK: 3224 if (!(vport->fc_flag & FC_OFFLINE_MODE)) { 3225 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3226 "2743 Command 0x%x is illegal in on-line " 3227 "state\n", 3228 mb->mbxCommand); 3229 return -EPERM; 3230 } 3231 case MBX_WRITE_NV: 3232 case MBX_WRITE_VPARMS: 3233 case MBX_LOAD_SM: 3234 case MBX_READ_NV: 3235 case MBX_READ_CONFIG: 3236 case MBX_READ_RCONFIG: 3237 case MBX_READ_STATUS: 3238 case MBX_READ_XRI: 3239 case MBX_READ_REV: 3240 case MBX_READ_LNK_STAT: 3241 case MBX_DUMP_MEMORY: 3242 case MBX_DOWN_LOAD: 3243 case MBX_UPDATE_CFG: 3244 case MBX_KILL_BOARD: 3245 case MBX_LOAD_AREA: 3246 case MBX_LOAD_EXP_ROM: 3247 case MBX_BEACON: 3248 case MBX_DEL_LD_ENTRY: 3249 case MBX_SET_DEBUG: 3250 case MBX_WRITE_WWN: 3251 case MBX_SLI4_CONFIG: 3252 case MBX_READ_EVENT_LOG: 3253 case MBX_READ_EVENT_LOG_STATUS: 3254 case MBX_WRITE_EVENT_LOG: 3255 case MBX_PORT_CAPABILITIES: 3256 case MBX_PORT_IOV_CONTROL: 3257 case MBX_RUN_BIU_DIAG64: 3258 break; 3259 case MBX_SET_VARIABLE: 3260 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3261 "1226 mbox: set_variable 0x%x, 0x%x\n", 3262 mb->un.varWords[0], 3263 mb->un.varWords[1]); 3264 if ((mb->un.varWords[0] == SETVAR_MLOMNT) 3265 && (mb->un.varWords[1] == 1)) { 3266 phba->wait_4_mlo_maint_flg = 1; 3267 } else if (mb->un.varWords[0] == SETVAR_MLORST) { 3268 spin_lock_irq(&phba->hbalock); 3269 phba->link_flag &= ~LS_LOOPBACK_MODE; 3270 spin_unlock_irq(&phba->hbalock); 3271 phba->fc_topology = LPFC_TOPOLOGY_PT_PT; 3272 } 3273 break; 3274 case MBX_READ_SPARM64: 3275 case MBX_READ_TOPOLOGY: 3276 case MBX_REG_LOGIN: 3277 case MBX_REG_LOGIN64: 3278 case MBX_CONFIG_PORT: 3279 case MBX_RUN_BIU_DIAG: 3280 default: 3281 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 3282 "2742 Unknown Command 0x%x\n", 3283 mb->mbxCommand); 3284 return -EPERM; 3285 } 3286 3287 return 0; /* ok */ 3288 } 3289 3290 /** 3291 * lpfc_bsg_mbox_ext_cleanup - clean up context of multi-buffer mbox session 3292 * @phba: Pointer to HBA context object. 3293 * 3294 * This is routine clean up and reset BSG handling of multi-buffer mbox 3295 * command session. 3296 **/ 3297 static void 3298 lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba) 3299 { 3300 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) 3301 return; 3302 3303 /* free all memory, including dma buffers */ 3304 lpfc_bsg_dma_page_list_free(phba, 3305 &phba->mbox_ext_buf_ctx.ext_dmabuf_list); 3306 lpfc_bsg_dma_page_free(phba, phba->mbox_ext_buf_ctx.mbx_dmabuf); 3307 /* multi-buffer write mailbox command pass-through complete */ 3308 memset((char *)&phba->mbox_ext_buf_ctx, 0, 3309 sizeof(struct lpfc_mbox_ext_buf_ctx)); 3310 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); 3311 3312 return; 3313 } 3314 3315 /** 3316 * lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl 3317 * @phba: Pointer to HBA context object. 3318 * @pmboxq: Pointer to mailbox command. 3319 * 3320 * This is routine handles BSG job for mailbox commands completions with 3321 * multiple external buffers. 3322 **/ 3323 static struct fc_bsg_job * 3324 lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 3325 { 3326 struct bsg_job_data *dd_data; 3327 struct fc_bsg_job *job; 3328 uint8_t *pmb, *pmb_buf; 3329 unsigned long flags; 3330 uint32_t size; 3331 int rc = 0; 3332 struct lpfc_dmabuf *dmabuf; 3333 struct lpfc_sli_config_mbox *sli_cfg_mbx; 3334 uint8_t *pmbx; 3335 3336 spin_lock_irqsave(&phba->ct_ev_lock, flags); 3337 dd_data = pmboxq->context1; 3338 /* has the job already timed out? */ 3339 if (!dd_data) { 3340 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3341 job = NULL; 3342 goto job_done_out; 3343 } 3344 3345 /* 3346 * The outgoing buffer is readily referred from the dma buffer, 3347 * just need to get header part from mailboxq structure. 3348 */ 3349 pmb = (uint8_t *)&pmboxq->u.mb; 3350 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb; 3351 /* Copy the byte swapped response mailbox back to the user */ 3352 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t)); 3353 /* if there is any non-embedded extended data copy that too */ 3354 dmabuf = phba->mbox_ext_buf_ctx.mbx_dmabuf; 3355 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; 3356 if (!bsg_bf_get(lpfc_mbox_hdr_emb, 3357 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) { 3358 pmbx = (uint8_t *)dmabuf->virt; 3359 /* byte swap the extended data following the mailbox command */ 3360 lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)], 3361 &pmbx[sizeof(MAILBOX_t)], 3362 sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len); 3363 } 3364 3365 job = dd_data->context_un.mbox.set_job; 3366 if (job) { 3367 size = job->reply_payload.payload_len; 3368 job->reply->reply_payload_rcv_len = 3369 sg_copy_from_buffer(job->reply_payload.sg_list, 3370 job->reply_payload.sg_cnt, 3371 pmb_buf, size); 3372 /* result for successful */ 3373 job->reply->result = 0; 3374 job->dd_data = NULL; 3375 /* need to hold the lock util we set job->dd_data to NULL 3376 * to hold off the timeout handler from midlayer to take 3377 * any action. 3378 */ 3379 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3380 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3381 "2937 SLI_CONFIG ext-buffer maibox command " 3382 "(x%x/x%x) complete bsg job done, bsize:%d\n", 3383 phba->mbox_ext_buf_ctx.nembType, 3384 phba->mbox_ext_buf_ctx.mboxType, size); 3385 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, 3386 phba->mbox_ext_buf_ctx.nembType, 3387 phba->mbox_ext_buf_ctx.mboxType, 3388 dma_ebuf, sta_pos_addr, 3389 phba->mbox_ext_buf_ctx.mbx_dmabuf, 0); 3390 } else 3391 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3392 3393 job_done_out: 3394 if (!job) 3395 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3396 "2938 SLI_CONFIG ext-buffer maibox " 3397 "command (x%x/x%x) failure, rc:x%x\n", 3398 phba->mbox_ext_buf_ctx.nembType, 3399 phba->mbox_ext_buf_ctx.mboxType, rc); 3400 /* state change */ 3401 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE; 3402 kfree(dd_data); 3403 3404 return job; 3405 } 3406 3407 /** 3408 * lpfc_bsg_issue_read_mbox_ext_cmpl - compl handler for multi-buffer read mbox 3409 * @phba: Pointer to HBA context object. 3410 * @pmboxq: Pointer to mailbox command. 3411 * 3412 * This is completion handler function for mailbox read commands with multiple 3413 * external buffers. 3414 **/ 3415 static void 3416 lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 3417 { 3418 struct fc_bsg_job *job; 3419 3420 /* handle the BSG job with mailbox command */ 3421 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS) 3422 pmboxq->u.mb.mbxStatus = MBXERR_ERROR; 3423 3424 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3425 "2939 SLI_CONFIG ext-buffer rd maibox command " 3426 "complete, ctxState:x%x, mbxStatus:x%x\n", 3427 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus); 3428 3429 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq); 3430 3431 if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1) 3432 lpfc_bsg_mbox_ext_session_reset(phba); 3433 3434 /* free base driver mailbox structure memory */ 3435 mempool_free(pmboxq, phba->mbox_mem_pool); 3436 3437 /* complete the bsg job if we have it */ 3438 if (job) 3439 job->job_done(job); 3440 3441 return; 3442 } 3443 3444 /** 3445 * lpfc_bsg_issue_write_mbox_ext_cmpl - cmpl handler for multi-buffer write mbox 3446 * @phba: Pointer to HBA context object. 3447 * @pmboxq: Pointer to mailbox command. 3448 * 3449 * This is completion handler function for mailbox write commands with multiple 3450 * external buffers. 3451 **/ 3452 static void 3453 lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 3454 { 3455 struct fc_bsg_job *job; 3456 3457 /* handle the BSG job with the mailbox command */ 3458 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS) 3459 pmboxq->u.mb.mbxStatus = MBXERR_ERROR; 3460 3461 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3462 "2940 SLI_CONFIG ext-buffer wr maibox command " 3463 "complete, ctxState:x%x, mbxStatus:x%x\n", 3464 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus); 3465 3466 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq); 3467 3468 /* free all memory, including dma buffers */ 3469 mempool_free(pmboxq, phba->mbox_mem_pool); 3470 lpfc_bsg_mbox_ext_session_reset(phba); 3471 3472 /* complete the bsg job if we have it */ 3473 if (job) 3474 job->job_done(job); 3475 3476 return; 3477 } 3478 3479 static void 3480 lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp, 3481 uint32_t index, struct lpfc_dmabuf *mbx_dmabuf, 3482 struct lpfc_dmabuf *ext_dmabuf) 3483 { 3484 struct lpfc_sli_config_mbox *sli_cfg_mbx; 3485 3486 /* pointer to the start of mailbox command */ 3487 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)mbx_dmabuf->virt; 3488 3489 if (nemb_tp == nemb_mse) { 3490 if (index == 0) { 3491 sli_cfg_mbx->un.sli_config_emb0_subsys. 3492 mse[index].pa_hi = 3493 putPaddrHigh(mbx_dmabuf->phys + 3494 sizeof(MAILBOX_t)); 3495 sli_cfg_mbx->un.sli_config_emb0_subsys. 3496 mse[index].pa_lo = 3497 putPaddrLow(mbx_dmabuf->phys + 3498 sizeof(MAILBOX_t)); 3499 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3500 "2943 SLI_CONFIG(mse)[%d], " 3501 "bufLen:%d, addrHi:x%x, addrLo:x%x\n", 3502 index, 3503 sli_cfg_mbx->un.sli_config_emb0_subsys. 3504 mse[index].buf_len, 3505 sli_cfg_mbx->un.sli_config_emb0_subsys. 3506 mse[index].pa_hi, 3507 sli_cfg_mbx->un.sli_config_emb0_subsys. 3508 mse[index].pa_lo); 3509 } else { 3510 sli_cfg_mbx->un.sli_config_emb0_subsys. 3511 mse[index].pa_hi = 3512 putPaddrHigh(ext_dmabuf->phys); 3513 sli_cfg_mbx->un.sli_config_emb0_subsys. 3514 mse[index].pa_lo = 3515 putPaddrLow(ext_dmabuf->phys); 3516 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3517 "2944 SLI_CONFIG(mse)[%d], " 3518 "bufLen:%d, addrHi:x%x, addrLo:x%x\n", 3519 index, 3520 sli_cfg_mbx->un.sli_config_emb0_subsys. 3521 mse[index].buf_len, 3522 sli_cfg_mbx->un.sli_config_emb0_subsys. 3523 mse[index].pa_hi, 3524 sli_cfg_mbx->un.sli_config_emb0_subsys. 3525 mse[index].pa_lo); 3526 } 3527 } else { 3528 if (index == 0) { 3529 sli_cfg_mbx->un.sli_config_emb1_subsys. 3530 hbd[index].pa_hi = 3531 putPaddrHigh(mbx_dmabuf->phys + 3532 sizeof(MAILBOX_t)); 3533 sli_cfg_mbx->un.sli_config_emb1_subsys. 3534 hbd[index].pa_lo = 3535 putPaddrLow(mbx_dmabuf->phys + 3536 sizeof(MAILBOX_t)); 3537 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3538 "3007 SLI_CONFIG(hbd)[%d], " 3539 "bufLen:%d, addrHi:x%x, addrLo:x%x\n", 3540 index, 3541 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len, 3542 &sli_cfg_mbx->un. 3543 sli_config_emb1_subsys.hbd[index]), 3544 sli_cfg_mbx->un.sli_config_emb1_subsys. 3545 hbd[index].pa_hi, 3546 sli_cfg_mbx->un.sli_config_emb1_subsys. 3547 hbd[index].pa_lo); 3548 3549 } else { 3550 sli_cfg_mbx->un.sli_config_emb1_subsys. 3551 hbd[index].pa_hi = 3552 putPaddrHigh(ext_dmabuf->phys); 3553 sli_cfg_mbx->un.sli_config_emb1_subsys. 3554 hbd[index].pa_lo = 3555 putPaddrLow(ext_dmabuf->phys); 3556 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3557 "3008 SLI_CONFIG(hbd)[%d], " 3558 "bufLen:%d, addrHi:x%x, addrLo:x%x\n", 3559 index, 3560 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len, 3561 &sli_cfg_mbx->un. 3562 sli_config_emb1_subsys.hbd[index]), 3563 sli_cfg_mbx->un.sli_config_emb1_subsys. 3564 hbd[index].pa_hi, 3565 sli_cfg_mbx->un.sli_config_emb1_subsys. 3566 hbd[index].pa_lo); 3567 } 3568 } 3569 return; 3570 } 3571 3572 /** 3573 * lpfc_bsg_sli_cfg_mse_read_cmd_ext - sli_config non-embedded mailbox cmd read 3574 * @phba: Pointer to HBA context object. 3575 * @mb: Pointer to a BSG mailbox object. 3576 * @nemb_tp: Enumerate of non-embedded mailbox command type. 3577 * @dmabuff: Pointer to a DMA buffer descriptor. 3578 * 3579 * This routine performs SLI_CONFIG (0x9B) read mailbox command operation with 3580 * non-embedded external bufffers. 3581 **/ 3582 static int 3583 lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job, 3584 enum nemb_type nemb_tp, 3585 struct lpfc_dmabuf *dmabuf) 3586 { 3587 struct lpfc_sli_config_mbox *sli_cfg_mbx; 3588 struct dfc_mbox_req *mbox_req; 3589 struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf; 3590 uint32_t ext_buf_cnt, ext_buf_index; 3591 struct lpfc_dmabuf *ext_dmabuf = NULL; 3592 struct bsg_job_data *dd_data = NULL; 3593 LPFC_MBOXQ_t *pmboxq = NULL; 3594 MAILBOX_t *pmb; 3595 uint8_t *pmbx; 3596 int rc, i; 3597 3598 mbox_req = 3599 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd; 3600 3601 /* pointer to the start of mailbox command */ 3602 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; 3603 3604 if (nemb_tp == nemb_mse) { 3605 ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt, 3606 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr); 3607 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) { 3608 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3609 "2945 Handled SLI_CONFIG(mse) rd, " 3610 "ext_buf_cnt(%d) out of range(%d)\n", 3611 ext_buf_cnt, 3612 LPFC_MBX_SLI_CONFIG_MAX_MSE); 3613 rc = -ERANGE; 3614 goto job_error; 3615 } 3616 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3617 "2941 Handled SLI_CONFIG(mse) rd, " 3618 "ext_buf_cnt:%d\n", ext_buf_cnt); 3619 } else { 3620 /* sanity check on interface type for support */ 3621 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 3622 LPFC_SLI_INTF_IF_TYPE_2) { 3623 rc = -ENODEV; 3624 goto job_error; 3625 } 3626 /* nemb_tp == nemb_hbd */ 3627 ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count; 3628 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) { 3629 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3630 "2946 Handled SLI_CONFIG(hbd) rd, " 3631 "ext_buf_cnt(%d) out of range(%d)\n", 3632 ext_buf_cnt, 3633 LPFC_MBX_SLI_CONFIG_MAX_HBD); 3634 rc = -ERANGE; 3635 goto job_error; 3636 } 3637 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3638 "2942 Handled SLI_CONFIG(hbd) rd, " 3639 "ext_buf_cnt:%d\n", ext_buf_cnt); 3640 } 3641 3642 /* before dma descriptor setup */ 3643 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox, 3644 sta_pre_addr, dmabuf, ext_buf_cnt); 3645 3646 /* reject non-embedded mailbox command with none external buffer */ 3647 if (ext_buf_cnt == 0) { 3648 rc = -EPERM; 3649 goto job_error; 3650 } else if (ext_buf_cnt > 1) { 3651 /* additional external read buffers */ 3652 for (i = 1; i < ext_buf_cnt; i++) { 3653 ext_dmabuf = lpfc_bsg_dma_page_alloc(phba); 3654 if (!ext_dmabuf) { 3655 rc = -ENOMEM; 3656 goto job_error; 3657 } 3658 list_add_tail(&ext_dmabuf->list, 3659 &phba->mbox_ext_buf_ctx.ext_dmabuf_list); 3660 } 3661 } 3662 3663 /* bsg tracking structure */ 3664 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 3665 if (!dd_data) { 3666 rc = -ENOMEM; 3667 goto job_error; 3668 } 3669 3670 /* mailbox command structure for base driver */ 3671 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3672 if (!pmboxq) { 3673 rc = -ENOMEM; 3674 goto job_error; 3675 } 3676 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 3677 3678 /* for the first external buffer */ 3679 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf); 3680 3681 /* for the rest of external buffer descriptors if any */ 3682 if (ext_buf_cnt > 1) { 3683 ext_buf_index = 1; 3684 list_for_each_entry_safe(curr_dmabuf, next_dmabuf, 3685 &phba->mbox_ext_buf_ctx.ext_dmabuf_list, list) { 3686 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 3687 ext_buf_index, dmabuf, 3688 curr_dmabuf); 3689 ext_buf_index++; 3690 } 3691 } 3692 3693 /* after dma descriptor setup */ 3694 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox, 3695 sta_pos_addr, dmabuf, ext_buf_cnt); 3696 3697 /* construct base driver mbox command */ 3698 pmb = &pmboxq->u.mb; 3699 pmbx = (uint8_t *)dmabuf->virt; 3700 memcpy(pmb, pmbx, sizeof(*pmb)); 3701 pmb->mbxOwner = OWN_HOST; 3702 pmboxq->vport = phba->pport; 3703 3704 /* multi-buffer handling context */ 3705 phba->mbox_ext_buf_ctx.nembType = nemb_tp; 3706 phba->mbox_ext_buf_ctx.mboxType = mbox_rd; 3707 phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt; 3708 phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag; 3709 phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum; 3710 phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf; 3711 3712 /* callback for multi-buffer read mailbox command */ 3713 pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl; 3714 3715 /* context fields to callback function */ 3716 pmboxq->context1 = dd_data; 3717 dd_data->type = TYPE_MBOX; 3718 dd_data->context_un.mbox.pmboxq = pmboxq; 3719 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx; 3720 dd_data->context_un.mbox.set_job = job; 3721 job->dd_data = dd_data; 3722 3723 /* state change */ 3724 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT; 3725 3726 /* 3727 * Non-embedded mailbox subcommand data gets byte swapped here because 3728 * the lower level driver code only does the first 64 mailbox words. 3729 */ 3730 if ((!bsg_bf_get(lpfc_mbox_hdr_emb, 3731 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) && 3732 (nemb_tp == nemb_mse)) 3733 lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)], 3734 &pmbx[sizeof(MAILBOX_t)], 3735 sli_cfg_mbx->un.sli_config_emb0_subsys. 3736 mse[0].buf_len); 3737 3738 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 3739 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) { 3740 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3741 "2947 Issued SLI_CONFIG ext-buffer " 3742 "maibox command, rc:x%x\n", rc); 3743 return SLI_CONFIG_HANDLED; 3744 } 3745 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3746 "2948 Failed to issue SLI_CONFIG ext-buffer " 3747 "maibox command, rc:x%x\n", rc); 3748 rc = -EPIPE; 3749 3750 job_error: 3751 if (pmboxq) 3752 mempool_free(pmboxq, phba->mbox_mem_pool); 3753 lpfc_bsg_dma_page_list_free(phba, 3754 &phba->mbox_ext_buf_ctx.ext_dmabuf_list); 3755 kfree(dd_data); 3756 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE; 3757 return rc; 3758 } 3759 3760 /** 3761 * lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write 3762 * @phba: Pointer to HBA context object. 3763 * @mb: Pointer to a BSG mailbox object. 3764 * @dmabuff: Pointer to a DMA buffer descriptor. 3765 * 3766 * This routine performs SLI_CONFIG (0x9B) write mailbox command operation with 3767 * non-embedded external bufffers. 3768 **/ 3769 static int 3770 lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job, 3771 enum nemb_type nemb_tp, 3772 struct lpfc_dmabuf *dmabuf) 3773 { 3774 struct dfc_mbox_req *mbox_req; 3775 struct lpfc_sli_config_mbox *sli_cfg_mbx; 3776 uint32_t ext_buf_cnt; 3777 struct bsg_job_data *dd_data = NULL; 3778 LPFC_MBOXQ_t *pmboxq = NULL; 3779 MAILBOX_t *pmb; 3780 uint8_t *mbx; 3781 int rc = SLI_CONFIG_NOT_HANDLED, i; 3782 3783 mbox_req = 3784 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd; 3785 3786 /* pointer to the start of mailbox command */ 3787 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; 3788 3789 if (nemb_tp == nemb_mse) { 3790 ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt, 3791 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr); 3792 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) { 3793 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3794 "2953 Failed SLI_CONFIG(mse) wr, " 3795 "ext_buf_cnt(%d) out of range(%d)\n", 3796 ext_buf_cnt, 3797 LPFC_MBX_SLI_CONFIG_MAX_MSE); 3798 return -ERANGE; 3799 } 3800 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3801 "2949 Handled SLI_CONFIG(mse) wr, " 3802 "ext_buf_cnt:%d\n", ext_buf_cnt); 3803 } else { 3804 /* sanity check on interface type for support */ 3805 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 3806 LPFC_SLI_INTF_IF_TYPE_2) 3807 return -ENODEV; 3808 /* nemb_tp == nemb_hbd */ 3809 ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count; 3810 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) { 3811 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3812 "2954 Failed SLI_CONFIG(hbd) wr, " 3813 "ext_buf_cnt(%d) out of range(%d)\n", 3814 ext_buf_cnt, 3815 LPFC_MBX_SLI_CONFIG_MAX_HBD); 3816 return -ERANGE; 3817 } 3818 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3819 "2950 Handled SLI_CONFIG(hbd) wr, " 3820 "ext_buf_cnt:%d\n", ext_buf_cnt); 3821 } 3822 3823 /* before dma buffer descriptor setup */ 3824 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox, 3825 sta_pre_addr, dmabuf, ext_buf_cnt); 3826 3827 if (ext_buf_cnt == 0) 3828 return -EPERM; 3829 3830 /* for the first external buffer */ 3831 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf); 3832 3833 /* after dma descriptor setup */ 3834 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox, 3835 sta_pos_addr, dmabuf, ext_buf_cnt); 3836 3837 /* log for looking forward */ 3838 for (i = 1; i < ext_buf_cnt; i++) { 3839 if (nemb_tp == nemb_mse) 3840 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3841 "2951 SLI_CONFIG(mse), buf[%d]-length:%d\n", 3842 i, sli_cfg_mbx->un.sli_config_emb0_subsys. 3843 mse[i].buf_len); 3844 else 3845 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3846 "2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n", 3847 i, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len, 3848 &sli_cfg_mbx->un.sli_config_emb1_subsys. 3849 hbd[i])); 3850 } 3851 3852 /* multi-buffer handling context */ 3853 phba->mbox_ext_buf_ctx.nembType = nemb_tp; 3854 phba->mbox_ext_buf_ctx.mboxType = mbox_wr; 3855 phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt; 3856 phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag; 3857 phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum; 3858 phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf; 3859 3860 if (ext_buf_cnt == 1) { 3861 /* bsg tracking structure */ 3862 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 3863 if (!dd_data) { 3864 rc = -ENOMEM; 3865 goto job_error; 3866 } 3867 3868 /* mailbox command structure for base driver */ 3869 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3870 if (!pmboxq) { 3871 rc = -ENOMEM; 3872 goto job_error; 3873 } 3874 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 3875 pmb = &pmboxq->u.mb; 3876 mbx = (uint8_t *)dmabuf->virt; 3877 memcpy(pmb, mbx, sizeof(*pmb)); 3878 pmb->mbxOwner = OWN_HOST; 3879 pmboxq->vport = phba->pport; 3880 3881 /* callback for multi-buffer read mailbox command */ 3882 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl; 3883 3884 /* context fields to callback function */ 3885 pmboxq->context1 = dd_data; 3886 dd_data->type = TYPE_MBOX; 3887 dd_data->context_un.mbox.pmboxq = pmboxq; 3888 dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx; 3889 dd_data->context_un.mbox.set_job = job; 3890 job->dd_data = dd_data; 3891 3892 /* state change */ 3893 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT; 3894 3895 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 3896 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) { 3897 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3898 "2955 Issued SLI_CONFIG ext-buffer " 3899 "maibox command, rc:x%x\n", rc); 3900 return SLI_CONFIG_HANDLED; 3901 } 3902 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 3903 "2956 Failed to issue SLI_CONFIG ext-buffer " 3904 "maibox command, rc:x%x\n", rc); 3905 rc = -EPIPE; 3906 goto job_error; 3907 } 3908 3909 /* wait for additoinal external buffers */ 3910 job->reply->result = 0; 3911 job->job_done(job); 3912 return SLI_CONFIG_HANDLED; 3913 3914 job_error: 3915 if (pmboxq) 3916 mempool_free(pmboxq, phba->mbox_mem_pool); 3917 kfree(dd_data); 3918 3919 return rc; 3920 } 3921 3922 /** 3923 * lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer 3924 * @phba: Pointer to HBA context object. 3925 * @mb: Pointer to a BSG mailbox object. 3926 * @dmabuff: Pointer to a DMA buffer descriptor. 3927 * 3928 * This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded 3929 * external bufffers, including both 0x9B with non-embedded MSEs and 0x9B 3930 * with embedded sussystem 0x1 and opcodes with external HBDs. 3931 **/ 3932 static int 3933 lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, 3934 struct lpfc_dmabuf *dmabuf) 3935 { 3936 struct lpfc_sli_config_mbox *sli_cfg_mbx; 3937 uint32_t subsys; 3938 uint32_t opcode; 3939 int rc = SLI_CONFIG_NOT_HANDLED; 3940 3941 /* state change on new multi-buffer pass-through mailbox command */ 3942 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST; 3943 3944 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt; 3945 3946 if (!bsg_bf_get(lpfc_mbox_hdr_emb, 3947 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) { 3948 subsys = bsg_bf_get(lpfc_emb0_subcmnd_subsys, 3949 &sli_cfg_mbx->un.sli_config_emb0_subsys); 3950 opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode, 3951 &sli_cfg_mbx->un.sli_config_emb0_subsys); 3952 if (subsys == SLI_CONFIG_SUBSYS_FCOE) { 3953 switch (opcode) { 3954 case FCOE_OPCODE_READ_FCF: 3955 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3956 "2957 Handled SLI_CONFIG " 3957 "subsys_fcoe, opcode:x%x\n", 3958 opcode); 3959 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job, 3960 nemb_mse, dmabuf); 3961 break; 3962 case FCOE_OPCODE_ADD_FCF: 3963 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3964 "2958 Handled SLI_CONFIG " 3965 "subsys_fcoe, opcode:x%x\n", 3966 opcode); 3967 rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job, 3968 nemb_mse, dmabuf); 3969 break; 3970 default: 3971 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3972 "2959 Reject SLI_CONFIG " 3973 "subsys_fcoe, opcode:x%x\n", 3974 opcode); 3975 rc = -EPERM; 3976 break; 3977 } 3978 } else if (subsys == SLI_CONFIG_SUBSYS_COMN) { 3979 switch (opcode) { 3980 case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES: 3981 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3982 "3106 Handled SLI_CONFIG " 3983 "subsys_comn, opcode:x%x\n", 3984 opcode); 3985 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job, 3986 nemb_mse, dmabuf); 3987 break; 3988 default: 3989 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3990 "3107 Reject SLI_CONFIG " 3991 "subsys_comn, opcode:x%x\n", 3992 opcode); 3993 rc = -EPERM; 3994 break; 3995 } 3996 } else { 3997 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3998 "2977 Reject SLI_CONFIG " 3999 "subsys:x%d, opcode:x%x\n", 4000 subsys, opcode); 4001 rc = -EPERM; 4002 } 4003 } else { 4004 subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys, 4005 &sli_cfg_mbx->un.sli_config_emb1_subsys); 4006 opcode = bsg_bf_get(lpfc_emb1_subcmnd_opcode, 4007 &sli_cfg_mbx->un.sli_config_emb1_subsys); 4008 if (subsys == SLI_CONFIG_SUBSYS_COMN) { 4009 switch (opcode) { 4010 case COMN_OPCODE_READ_OBJECT: 4011 case COMN_OPCODE_READ_OBJECT_LIST: 4012 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4013 "2960 Handled SLI_CONFIG " 4014 "subsys_comn, opcode:x%x\n", 4015 opcode); 4016 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job, 4017 nemb_hbd, dmabuf); 4018 break; 4019 case COMN_OPCODE_WRITE_OBJECT: 4020 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4021 "2961 Handled SLI_CONFIG " 4022 "subsys_comn, opcode:x%x\n", 4023 opcode); 4024 rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job, 4025 nemb_hbd, dmabuf); 4026 break; 4027 default: 4028 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4029 "2962 Not handled SLI_CONFIG " 4030 "subsys_comn, opcode:x%x\n", 4031 opcode); 4032 rc = SLI_CONFIG_NOT_HANDLED; 4033 break; 4034 } 4035 } else { 4036 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4037 "2978 Not handled SLI_CONFIG " 4038 "subsys:x%d, opcode:x%x\n", 4039 subsys, opcode); 4040 rc = SLI_CONFIG_NOT_HANDLED; 4041 } 4042 } 4043 4044 /* state reset on not handled new multi-buffer mailbox command */ 4045 if (rc != SLI_CONFIG_HANDLED) 4046 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE; 4047 4048 return rc; 4049 } 4050 4051 /** 4052 * lpfc_bsg_mbox_ext_abort_req - request to abort mbox command with ext buffers 4053 * @phba: Pointer to HBA context object. 4054 * 4055 * This routine is for requesting to abort a pass-through mailbox command with 4056 * multiple external buffers due to error condition. 4057 **/ 4058 static void 4059 lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba) 4060 { 4061 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT) 4062 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS; 4063 else 4064 lpfc_bsg_mbox_ext_session_reset(phba); 4065 return; 4066 } 4067 4068 /** 4069 * lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer 4070 * @phba: Pointer to HBA context object. 4071 * @dmabuf: Pointer to a DMA buffer descriptor. 4072 * 4073 * This routine extracts the next mailbox read external buffer back to 4074 * user space through BSG. 4075 **/ 4076 static int 4077 lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job) 4078 { 4079 struct lpfc_sli_config_mbox *sli_cfg_mbx; 4080 struct lpfc_dmabuf *dmabuf; 4081 uint8_t *pbuf; 4082 uint32_t size; 4083 uint32_t index; 4084 4085 index = phba->mbox_ext_buf_ctx.seqNum; 4086 phba->mbox_ext_buf_ctx.seqNum++; 4087 4088 sli_cfg_mbx = (struct lpfc_sli_config_mbox *) 4089 phba->mbox_ext_buf_ctx.mbx_dmabuf->virt; 4090 4091 if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) { 4092 size = bsg_bf_get(lpfc_mbox_sli_config_mse_len, 4093 &sli_cfg_mbx->un.sli_config_emb0_subsys.mse[index]); 4094 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4095 "2963 SLI_CONFIG (mse) ext-buffer rd get " 4096 "buffer[%d], size:%d\n", index, size); 4097 } else { 4098 size = bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len, 4099 &sli_cfg_mbx->un.sli_config_emb1_subsys.hbd[index]); 4100 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4101 "2964 SLI_CONFIG (hbd) ext-buffer rd get " 4102 "buffer[%d], size:%d\n", index, size); 4103 } 4104 if (list_empty(&phba->mbox_ext_buf_ctx.ext_dmabuf_list)) 4105 return -EPIPE; 4106 dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list, 4107 struct lpfc_dmabuf, list); 4108 list_del_init(&dmabuf->list); 4109 4110 /* after dma buffer descriptor setup */ 4111 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType, 4112 mbox_rd, dma_ebuf, sta_pos_addr, 4113 dmabuf, index); 4114 4115 pbuf = (uint8_t *)dmabuf->virt; 4116 job->reply->reply_payload_rcv_len = 4117 sg_copy_from_buffer(job->reply_payload.sg_list, 4118 job->reply_payload.sg_cnt, 4119 pbuf, size); 4120 4121 lpfc_bsg_dma_page_free(phba, dmabuf); 4122 4123 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) { 4124 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4125 "2965 SLI_CONFIG (hbd) ext-buffer rd mbox " 4126 "command session done\n"); 4127 lpfc_bsg_mbox_ext_session_reset(phba); 4128 } 4129 4130 job->reply->result = 0; 4131 job->job_done(job); 4132 4133 return SLI_CONFIG_HANDLED; 4134 } 4135 4136 /** 4137 * lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer 4138 * @phba: Pointer to HBA context object. 4139 * @dmabuf: Pointer to a DMA buffer descriptor. 4140 * 4141 * This routine sets up the next mailbox read external buffer obtained 4142 * from user space through BSG. 4143 **/ 4144 static int 4145 lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job, 4146 struct lpfc_dmabuf *dmabuf) 4147 { 4148 struct lpfc_sli_config_mbox *sli_cfg_mbx; 4149 struct bsg_job_data *dd_data = NULL; 4150 LPFC_MBOXQ_t *pmboxq = NULL; 4151 MAILBOX_t *pmb; 4152 enum nemb_type nemb_tp; 4153 uint8_t *pbuf; 4154 uint32_t size; 4155 uint32_t index; 4156 int rc; 4157 4158 index = phba->mbox_ext_buf_ctx.seqNum; 4159 phba->mbox_ext_buf_ctx.seqNum++; 4160 nemb_tp = phba->mbox_ext_buf_ctx.nembType; 4161 4162 sli_cfg_mbx = (struct lpfc_sli_config_mbox *) 4163 phba->mbox_ext_buf_ctx.mbx_dmabuf->virt; 4164 4165 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 4166 if (!dd_data) { 4167 rc = -ENOMEM; 4168 goto job_error; 4169 } 4170 4171 pbuf = (uint8_t *)dmabuf->virt; 4172 size = job->request_payload.payload_len; 4173 sg_copy_to_buffer(job->request_payload.sg_list, 4174 job->request_payload.sg_cnt, 4175 pbuf, size); 4176 4177 if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) { 4178 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4179 "2966 SLI_CONFIG (mse) ext-buffer wr set " 4180 "buffer[%d], size:%d\n", 4181 phba->mbox_ext_buf_ctx.seqNum, size); 4182 4183 } else { 4184 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4185 "2967 SLI_CONFIG (hbd) ext-buffer wr set " 4186 "buffer[%d], size:%d\n", 4187 phba->mbox_ext_buf_ctx.seqNum, size); 4188 4189 } 4190 4191 /* set up external buffer descriptor and add to external buffer list */ 4192 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, index, 4193 phba->mbox_ext_buf_ctx.mbx_dmabuf, 4194 dmabuf); 4195 list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list); 4196 4197 /* after write dma buffer */ 4198 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType, 4199 mbox_wr, dma_ebuf, sta_pos_addr, 4200 dmabuf, index); 4201 4202 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) { 4203 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4204 "2968 SLI_CONFIG ext-buffer wr all %d " 4205 "ebuffers received\n", 4206 phba->mbox_ext_buf_ctx.numBuf); 4207 /* mailbox command structure for base driver */ 4208 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4209 if (!pmboxq) { 4210 rc = -ENOMEM; 4211 goto job_error; 4212 } 4213 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 4214 pbuf = (uint8_t *)phba->mbox_ext_buf_ctx.mbx_dmabuf->virt; 4215 pmb = &pmboxq->u.mb; 4216 memcpy(pmb, pbuf, sizeof(*pmb)); 4217 pmb->mbxOwner = OWN_HOST; 4218 pmboxq->vport = phba->pport; 4219 4220 /* callback for multi-buffer write mailbox command */ 4221 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl; 4222 4223 /* context fields to callback function */ 4224 pmboxq->context1 = dd_data; 4225 dd_data->type = TYPE_MBOX; 4226 dd_data->context_un.mbox.pmboxq = pmboxq; 4227 dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf; 4228 dd_data->context_un.mbox.set_job = job; 4229 job->dd_data = dd_data; 4230 4231 /* state change */ 4232 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT; 4233 4234 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 4235 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) { 4236 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4237 "2969 Issued SLI_CONFIG ext-buffer " 4238 "maibox command, rc:x%x\n", rc); 4239 return SLI_CONFIG_HANDLED; 4240 } 4241 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4242 "2970 Failed to issue SLI_CONFIG ext-buffer " 4243 "maibox command, rc:x%x\n", rc); 4244 rc = -EPIPE; 4245 goto job_error; 4246 } 4247 4248 /* wait for additoinal external buffers */ 4249 job->reply->result = 0; 4250 job->job_done(job); 4251 return SLI_CONFIG_HANDLED; 4252 4253 job_error: 4254 lpfc_bsg_dma_page_free(phba, dmabuf); 4255 kfree(dd_data); 4256 4257 return rc; 4258 } 4259 4260 /** 4261 * lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd 4262 * @phba: Pointer to HBA context object. 4263 * @mb: Pointer to a BSG mailbox object. 4264 * @dmabuff: Pointer to a DMA buffer descriptor. 4265 * 4266 * This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox 4267 * command with multiple non-embedded external buffers. 4268 **/ 4269 static int 4270 lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct fc_bsg_job *job, 4271 struct lpfc_dmabuf *dmabuf) 4272 { 4273 int rc; 4274 4275 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4276 "2971 SLI_CONFIG buffer (type:x%x)\n", 4277 phba->mbox_ext_buf_ctx.mboxType); 4278 4279 if (phba->mbox_ext_buf_ctx.mboxType == mbox_rd) { 4280 if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_DONE) { 4281 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4282 "2972 SLI_CONFIG rd buffer state " 4283 "mismatch:x%x\n", 4284 phba->mbox_ext_buf_ctx.state); 4285 lpfc_bsg_mbox_ext_abort(phba); 4286 return -EPIPE; 4287 } 4288 rc = lpfc_bsg_read_ebuf_get(phba, job); 4289 if (rc == SLI_CONFIG_HANDLED) 4290 lpfc_bsg_dma_page_free(phba, dmabuf); 4291 } else { /* phba->mbox_ext_buf_ctx.mboxType == mbox_wr */ 4292 if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_HOST) { 4293 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4294 "2973 SLI_CONFIG wr buffer state " 4295 "mismatch:x%x\n", 4296 phba->mbox_ext_buf_ctx.state); 4297 lpfc_bsg_mbox_ext_abort(phba); 4298 return -EPIPE; 4299 } 4300 rc = lpfc_bsg_write_ebuf_set(phba, job, dmabuf); 4301 } 4302 return rc; 4303 } 4304 4305 /** 4306 * lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer 4307 * @phba: Pointer to HBA context object. 4308 * @mb: Pointer to a BSG mailbox object. 4309 * @dmabuff: Pointer to a DMA buffer descriptor. 4310 * 4311 * This routine checkes and handles non-embedded multi-buffer SLI_CONFIG 4312 * (0x9B) mailbox commands and external buffers. 4313 **/ 4314 static int 4315 lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct fc_bsg_job *job, 4316 struct lpfc_dmabuf *dmabuf) 4317 { 4318 struct dfc_mbox_req *mbox_req; 4319 int rc = SLI_CONFIG_NOT_HANDLED; 4320 4321 mbox_req = 4322 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd; 4323 4324 /* mbox command with/without single external buffer */ 4325 if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0) 4326 return rc; 4327 4328 /* mbox command and first external buffer */ 4329 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) { 4330 if (mbox_req->extSeqNum == 1) { 4331 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4332 "2974 SLI_CONFIG mailbox: tag:%d, " 4333 "seq:%d\n", mbox_req->extMboxTag, 4334 mbox_req->extSeqNum); 4335 rc = lpfc_bsg_handle_sli_cfg_mbox(phba, job, dmabuf); 4336 return rc; 4337 } else 4338 goto sli_cfg_ext_error; 4339 } 4340 4341 /* 4342 * handle additional external buffers 4343 */ 4344 4345 /* check broken pipe conditions */ 4346 if (mbox_req->extMboxTag != phba->mbox_ext_buf_ctx.mbxTag) 4347 goto sli_cfg_ext_error; 4348 if (mbox_req->extSeqNum > phba->mbox_ext_buf_ctx.numBuf) 4349 goto sli_cfg_ext_error; 4350 if (mbox_req->extSeqNum != phba->mbox_ext_buf_ctx.seqNum + 1) 4351 goto sli_cfg_ext_error; 4352 4353 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4354 "2975 SLI_CONFIG mailbox external buffer: " 4355 "extSta:x%x, tag:%d, seq:%d\n", 4356 phba->mbox_ext_buf_ctx.state, mbox_req->extMboxTag, 4357 mbox_req->extSeqNum); 4358 rc = lpfc_bsg_handle_sli_cfg_ebuf(phba, job, dmabuf); 4359 return rc; 4360 4361 sli_cfg_ext_error: 4362 /* all other cases, broken pipe */ 4363 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, 4364 "2976 SLI_CONFIG mailbox broken pipe: " 4365 "ctxSta:x%x, ctxNumBuf:%d " 4366 "ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n", 4367 phba->mbox_ext_buf_ctx.state, 4368 phba->mbox_ext_buf_ctx.numBuf, 4369 phba->mbox_ext_buf_ctx.mbxTag, 4370 phba->mbox_ext_buf_ctx.seqNum, 4371 mbox_req->extMboxTag, mbox_req->extSeqNum); 4372 4373 lpfc_bsg_mbox_ext_session_reset(phba); 4374 4375 return -EPIPE; 4376 } 4377 4378 /** 4379 * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app 4380 * @phba: Pointer to HBA context object. 4381 * @mb: Pointer to a mailbox object. 4382 * @vport: Pointer to a vport object. 4383 * 4384 * Allocate a tracking object, mailbox command memory, get a mailbox 4385 * from the mailbox pool, copy the caller mailbox command. 4386 * 4387 * If offline and the sli is active we need to poll for the command (port is 4388 * being reset) and com-plete the job, otherwise issue the mailbox command and 4389 * let our completion handler finish the command. 4390 **/ 4391 static uint32_t 4392 lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, 4393 struct lpfc_vport *vport) 4394 { 4395 LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */ 4396 MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */ 4397 /* a 4k buffer to hold the mb and extended data from/to the bsg */ 4398 uint8_t *pmbx = NULL; 4399 struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */ 4400 struct lpfc_dmabuf *dmabuf = NULL; 4401 struct dfc_mbox_req *mbox_req; 4402 struct READ_EVENT_LOG_VAR *rdEventLog; 4403 uint32_t transmit_length, receive_length, mode; 4404 struct lpfc_mbx_sli4_config *sli4_config; 4405 struct lpfc_mbx_nembed_cmd *nembed_sge; 4406 struct mbox_header *header; 4407 struct ulp_bde64 *bde; 4408 uint8_t *ext = NULL; 4409 int rc = 0; 4410 uint8_t *from; 4411 uint32_t size; 4412 4413 4414 /* in case no data is transferred */ 4415 job->reply->reply_payload_rcv_len = 0; 4416 4417 /* sanity check to protect driver */ 4418 if (job->reply_payload.payload_len > BSG_MBOX_SIZE || 4419 job->request_payload.payload_len > BSG_MBOX_SIZE) { 4420 rc = -ERANGE; 4421 goto job_done; 4422 } 4423 4424 /* 4425 * Don't allow mailbox commands to be sent when blocked or when in 4426 * the middle of discovery 4427 */ 4428 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) { 4429 rc = -EAGAIN; 4430 goto job_done; 4431 } 4432 4433 mbox_req = 4434 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd; 4435 4436 /* check if requested extended data lengths are valid */ 4437 if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) || 4438 (mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) { 4439 rc = -ERANGE; 4440 goto job_done; 4441 } 4442 4443 dmabuf = lpfc_bsg_dma_page_alloc(phba); 4444 if (!dmabuf || !dmabuf->virt) { 4445 rc = -ENOMEM; 4446 goto job_done; 4447 } 4448 4449 /* Get the mailbox command or external buffer from BSG */ 4450 pmbx = (uint8_t *)dmabuf->virt; 4451 size = job->request_payload.payload_len; 4452 sg_copy_to_buffer(job->request_payload.sg_list, 4453 job->request_payload.sg_cnt, pmbx, size); 4454 4455 /* Handle possible SLI_CONFIG with non-embedded payloads */ 4456 if (phba->sli_rev == LPFC_SLI_REV4) { 4457 rc = lpfc_bsg_handle_sli_cfg_ext(phba, job, dmabuf); 4458 if (rc == SLI_CONFIG_HANDLED) 4459 goto job_cont; 4460 if (rc) 4461 goto job_done; 4462 /* SLI_CONFIG_NOT_HANDLED for other mailbox commands */ 4463 } 4464 4465 rc = lpfc_bsg_check_cmd_access(phba, (MAILBOX_t *)pmbx, vport); 4466 if (rc != 0) 4467 goto job_done; /* must be negative */ 4468 4469 /* allocate our bsg tracking structure */ 4470 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 4471 if (!dd_data) { 4472 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 4473 "2727 Failed allocation of dd_data\n"); 4474 rc = -ENOMEM; 4475 goto job_done; 4476 } 4477 4478 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4479 if (!pmboxq) { 4480 rc = -ENOMEM; 4481 goto job_done; 4482 } 4483 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); 4484 4485 pmb = &pmboxq->u.mb; 4486 memcpy(pmb, pmbx, sizeof(*pmb)); 4487 pmb->mbxOwner = OWN_HOST; 4488 pmboxq->vport = vport; 4489 4490 /* If HBA encountered an error attention, allow only DUMP 4491 * or RESTART mailbox commands until the HBA is restarted. 4492 */ 4493 if (phba->pport->stopped && 4494 pmb->mbxCommand != MBX_DUMP_MEMORY && 4495 pmb->mbxCommand != MBX_RESTART && 4496 pmb->mbxCommand != MBX_WRITE_VPARMS && 4497 pmb->mbxCommand != MBX_WRITE_WWN) 4498 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 4499 "2797 mbox: Issued mailbox cmd " 4500 "0x%x while in stopped state.\n", 4501 pmb->mbxCommand); 4502 4503 /* extended mailbox commands will need an extended buffer */ 4504 if (mbox_req->inExtWLen || mbox_req->outExtWLen) { 4505 from = pmbx; 4506 ext = from + sizeof(MAILBOX_t); 4507 pmboxq->context2 = ext; 4508 pmboxq->in_ext_byte_len = 4509 mbox_req->inExtWLen * sizeof(uint32_t); 4510 pmboxq->out_ext_byte_len = 4511 mbox_req->outExtWLen * sizeof(uint32_t); 4512 pmboxq->mbox_offset_word = mbox_req->mbOffset; 4513 } 4514 4515 /* biu diag will need a kernel buffer to transfer the data 4516 * allocate our own buffer and setup the mailbox command to 4517 * use ours 4518 */ 4519 if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) { 4520 transmit_length = pmb->un.varWords[1]; 4521 receive_length = pmb->un.varWords[4]; 4522 /* transmit length cannot be greater than receive length or 4523 * mailbox extension size 4524 */ 4525 if ((transmit_length > receive_length) || 4526 (transmit_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t))) { 4527 rc = -ERANGE; 4528 goto job_done; 4529 } 4530 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh = 4531 putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)); 4532 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow = 4533 putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)); 4534 4535 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh = 4536 putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t) 4537 + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize); 4538 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow = 4539 putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t) 4540 + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize); 4541 } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) { 4542 rdEventLog = &pmb->un.varRdEventLog; 4543 receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize; 4544 mode = bf_get(lpfc_event_log, rdEventLog); 4545 4546 /* receive length cannot be greater than mailbox 4547 * extension size 4548 */ 4549 if (receive_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t)) { 4550 rc = -ERANGE; 4551 goto job_done; 4552 } 4553 4554 /* mode zero uses a bde like biu diags command */ 4555 if (mode == 0) { 4556 pmb->un.varWords[3] = putPaddrLow(dmabuf->phys 4557 + sizeof(MAILBOX_t)); 4558 pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys 4559 + sizeof(MAILBOX_t)); 4560 } 4561 } else if (phba->sli_rev == LPFC_SLI_REV4) { 4562 /* Let type 4 (well known data) through because the data is 4563 * returned in varwords[4-8] 4564 * otherwise check the recieve length and fetch the buffer addr 4565 */ 4566 if ((pmb->mbxCommand == MBX_DUMP_MEMORY) && 4567 (pmb->un.varDmp.type != DMP_WELL_KNOWN)) { 4568 /* rebuild the command for sli4 using our own buffers 4569 * like we do for biu diags 4570 */ 4571 receive_length = pmb->un.varWords[2]; 4572 /* receive length cannot be greater than mailbox 4573 * extension size 4574 */ 4575 if (receive_length == 0) { 4576 rc = -ERANGE; 4577 goto job_done; 4578 } 4579 pmb->un.varWords[3] = putPaddrLow(dmabuf->phys 4580 + sizeof(MAILBOX_t)); 4581 pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys 4582 + sizeof(MAILBOX_t)); 4583 } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) && 4584 pmb->un.varUpdateCfg.co) { 4585 bde = (struct ulp_bde64 *)&pmb->un.varWords[4]; 4586 4587 /* bde size cannot be greater than mailbox ext size */ 4588 if (bde->tus.f.bdeSize > 4589 BSG_MBOX_SIZE - sizeof(MAILBOX_t)) { 4590 rc = -ERANGE; 4591 goto job_done; 4592 } 4593 bde->addrHigh = putPaddrHigh(dmabuf->phys 4594 + sizeof(MAILBOX_t)); 4595 bde->addrLow = putPaddrLow(dmabuf->phys 4596 + sizeof(MAILBOX_t)); 4597 } else if (pmb->mbxCommand == MBX_SLI4_CONFIG) { 4598 /* Handling non-embedded SLI_CONFIG mailbox command */ 4599 sli4_config = &pmboxq->u.mqe.un.sli4_config; 4600 if (!bf_get(lpfc_mbox_hdr_emb, 4601 &sli4_config->header.cfg_mhdr)) { 4602 /* rebuild the command for sli4 using our 4603 * own buffers like we do for biu diags 4604 */ 4605 header = (struct mbox_header *) 4606 &pmb->un.varWords[0]; 4607 nembed_sge = (struct lpfc_mbx_nembed_cmd *) 4608 &pmb->un.varWords[0]; 4609 receive_length = nembed_sge->sge[0].length; 4610 4611 /* receive length cannot be greater than 4612 * mailbox extension size 4613 */ 4614 if ((receive_length == 0) || 4615 (receive_length > 4616 BSG_MBOX_SIZE - sizeof(MAILBOX_t))) { 4617 rc = -ERANGE; 4618 goto job_done; 4619 } 4620 4621 nembed_sge->sge[0].pa_hi = 4622 putPaddrHigh(dmabuf->phys 4623 + sizeof(MAILBOX_t)); 4624 nembed_sge->sge[0].pa_lo = 4625 putPaddrLow(dmabuf->phys 4626 + sizeof(MAILBOX_t)); 4627 } 4628 } 4629 } 4630 4631 dd_data->context_un.mbox.dmabuffers = dmabuf; 4632 4633 /* setup wake call as IOCB callback */ 4634 pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl; 4635 4636 /* setup context field to pass wait_queue pointer to wake function */ 4637 pmboxq->context1 = dd_data; 4638 dd_data->type = TYPE_MBOX; 4639 dd_data->context_un.mbox.pmboxq = pmboxq; 4640 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx; 4641 dd_data->context_un.mbox.set_job = job; 4642 dd_data->context_un.mbox.ext = ext; 4643 dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset; 4644 dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen; 4645 dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen; 4646 job->dd_data = dd_data; 4647 4648 if ((vport->fc_flag & FC_OFFLINE_MODE) || 4649 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) { 4650 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 4651 if (rc != MBX_SUCCESS) { 4652 rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV; 4653 goto job_done; 4654 } 4655 4656 /* job finished, copy the data */ 4657 memcpy(pmbx, pmb, sizeof(*pmb)); 4658 job->reply->reply_payload_rcv_len = 4659 sg_copy_from_buffer(job->reply_payload.sg_list, 4660 job->reply_payload.sg_cnt, 4661 pmbx, size); 4662 /* not waiting mbox already done */ 4663 rc = 0; 4664 goto job_done; 4665 } 4666 4667 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 4668 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) 4669 return 1; /* job started */ 4670 4671 job_done: 4672 /* common exit for error or job completed inline */ 4673 if (pmboxq) 4674 mempool_free(pmboxq, phba->mbox_mem_pool); 4675 lpfc_bsg_dma_page_free(phba, dmabuf); 4676 kfree(dd_data); 4677 4678 job_cont: 4679 return rc; 4680 } 4681 4682 /** 4683 * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command 4684 * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX. 4685 **/ 4686 static int 4687 lpfc_bsg_mbox_cmd(struct fc_bsg_job *job) 4688 { 4689 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 4690 struct lpfc_hba *phba = vport->phba; 4691 struct dfc_mbox_req *mbox_req; 4692 int rc = 0; 4693 4694 /* mix-and-match backward compatibility */ 4695 job->reply->reply_payload_rcv_len = 0; 4696 if (job->request_len < 4697 sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) { 4698 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 4699 "2737 Mix-and-match backward compability " 4700 "between MBOX_REQ old size:%d and " 4701 "new request size:%d\n", 4702 (int)(job->request_len - 4703 sizeof(struct fc_bsg_request)), 4704 (int)sizeof(struct dfc_mbox_req)); 4705 mbox_req = (struct dfc_mbox_req *) 4706 job->request->rqst_data.h_vendor.vendor_cmd; 4707 mbox_req->extMboxTag = 0; 4708 mbox_req->extSeqNum = 0; 4709 } 4710 4711 rc = lpfc_bsg_issue_mbox(phba, job, vport); 4712 4713 if (rc == 0) { 4714 /* job done */ 4715 job->reply->result = 0; 4716 job->dd_data = NULL; 4717 job->job_done(job); 4718 } else if (rc == 1) 4719 /* job submitted, will complete later*/ 4720 rc = 0; /* return zero, no error */ 4721 else { 4722 /* some error occurred */ 4723 job->reply->result = rc; 4724 job->dd_data = NULL; 4725 } 4726 4727 return rc; 4728 } 4729 4730 /** 4731 * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler 4732 * @phba: Pointer to HBA context object. 4733 * @cmdiocbq: Pointer to command iocb. 4734 * @rspiocbq: Pointer to response iocb. 4735 * 4736 * This function is the completion handler for iocbs issued using 4737 * lpfc_menlo_cmd function. This function is called by the 4738 * ring event handler function without any lock held. This function 4739 * can be called from both worker thread context and interrupt 4740 * context. This function also can be called from another thread which 4741 * cleans up the SLI layer objects. 4742 * This function copies the contents of the response iocb to the 4743 * response iocb memory object provided by the caller of 4744 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 4745 * sleeps for the iocb completion. 4746 **/ 4747 static void 4748 lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba, 4749 struct lpfc_iocbq *cmdiocbq, 4750 struct lpfc_iocbq *rspiocbq) 4751 { 4752 struct bsg_job_data *dd_data; 4753 struct fc_bsg_job *job; 4754 IOCB_t *rsp; 4755 struct lpfc_dmabuf *bmp; 4756 struct lpfc_bsg_menlo *menlo; 4757 unsigned long flags; 4758 struct menlo_response *menlo_resp; 4759 int rc = 0; 4760 4761 spin_lock_irqsave(&phba->ct_ev_lock, flags); 4762 dd_data = cmdiocbq->context1; 4763 if (!dd_data) { 4764 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 4765 return; 4766 } 4767 4768 menlo = &dd_data->context_un.menlo; 4769 job = menlo->set_job; 4770 job->dd_data = NULL; /* so timeout handler does not reply */ 4771 4772 spin_lock(&phba->hbalock); 4773 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 4774 if (cmdiocbq->context2 && rspiocbq) 4775 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 4776 &rspiocbq->iocb, sizeof(IOCB_t)); 4777 spin_unlock(&phba->hbalock); 4778 4779 bmp = menlo->bmp; 4780 rspiocbq = menlo->rspiocbq; 4781 rsp = &rspiocbq->iocb; 4782 4783 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 4784 job->request_payload.sg_cnt, DMA_TO_DEVICE); 4785 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, 4786 job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 4787 4788 /* always return the xri, this would be used in the case 4789 * of a menlo download to allow the data to be sent as a continuation 4790 * of the exchange. 4791 */ 4792 menlo_resp = (struct menlo_response *) 4793 job->reply->reply_data.vendor_reply.vendor_rsp; 4794 menlo_resp->xri = rsp->ulpContext; 4795 if (rsp->ulpStatus) { 4796 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 4797 switch (rsp->un.ulpWord[4] & 0xff) { 4798 case IOERR_SEQUENCE_TIMEOUT: 4799 rc = -ETIMEDOUT; 4800 break; 4801 case IOERR_INVALID_RPI: 4802 rc = -EFAULT; 4803 break; 4804 default: 4805 rc = -EACCES; 4806 break; 4807 } 4808 } else 4809 rc = -EACCES; 4810 } else 4811 job->reply->reply_payload_rcv_len = 4812 rsp->un.genreq64.bdl.bdeSize; 4813 4814 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 4815 lpfc_sli_release_iocbq(phba, rspiocbq); 4816 lpfc_sli_release_iocbq(phba, cmdiocbq); 4817 kfree(bmp); 4818 kfree(dd_data); 4819 /* make error code available to userspace */ 4820 job->reply->result = rc; 4821 /* complete the job back to userspace */ 4822 job->job_done(job); 4823 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 4824 return; 4825 } 4826 4827 /** 4828 * lpfc_menlo_cmd - send an ioctl for menlo hardware 4829 * @job: fc_bsg_job to handle 4830 * 4831 * This function issues a gen request 64 CR ioctl for all menlo cmd requests, 4832 * all the command completions will return the xri for the command. 4833 * For menlo data requests a gen request 64 CX is used to continue the exchange 4834 * supplied in the menlo request header xri field. 4835 **/ 4836 static int 4837 lpfc_menlo_cmd(struct fc_bsg_job *job) 4838 { 4839 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 4840 struct lpfc_hba *phba = vport->phba; 4841 struct lpfc_iocbq *cmdiocbq, *rspiocbq; 4842 IOCB_t *cmd, *rsp; 4843 int rc = 0; 4844 struct menlo_command *menlo_cmd; 4845 struct menlo_response *menlo_resp; 4846 struct lpfc_dmabuf *bmp = NULL; 4847 int request_nseg; 4848 int reply_nseg; 4849 struct scatterlist *sgel = NULL; 4850 int numbde; 4851 dma_addr_t busaddr; 4852 struct bsg_job_data *dd_data; 4853 struct ulp_bde64 *bpl = NULL; 4854 4855 /* in case no data is returned return just the return code */ 4856 job->reply->reply_payload_rcv_len = 0; 4857 4858 if (job->request_len < 4859 sizeof(struct fc_bsg_request) + 4860 sizeof(struct menlo_command)) { 4861 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 4862 "2784 Received MENLO_CMD request below " 4863 "minimum size\n"); 4864 rc = -ERANGE; 4865 goto no_dd_data; 4866 } 4867 4868 if (job->reply_len < 4869 sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) { 4870 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 4871 "2785 Received MENLO_CMD reply below " 4872 "minimum size\n"); 4873 rc = -ERANGE; 4874 goto no_dd_data; 4875 } 4876 4877 if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) { 4878 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 4879 "2786 Adapter does not support menlo " 4880 "commands\n"); 4881 rc = -EPERM; 4882 goto no_dd_data; 4883 } 4884 4885 menlo_cmd = (struct menlo_command *) 4886 job->request->rqst_data.h_vendor.vendor_cmd; 4887 4888 menlo_resp = (struct menlo_response *) 4889 job->reply->reply_data.vendor_reply.vendor_rsp; 4890 4891 /* allocate our bsg tracking structure */ 4892 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 4893 if (!dd_data) { 4894 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 4895 "2787 Failed allocation of dd_data\n"); 4896 rc = -ENOMEM; 4897 goto no_dd_data; 4898 } 4899 4900 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4901 if (!bmp) { 4902 rc = -ENOMEM; 4903 goto free_dd; 4904 } 4905 4906 cmdiocbq = lpfc_sli_get_iocbq(phba); 4907 if (!cmdiocbq) { 4908 rc = -ENOMEM; 4909 goto free_bmp; 4910 } 4911 4912 rspiocbq = lpfc_sli_get_iocbq(phba); 4913 if (!rspiocbq) { 4914 rc = -ENOMEM; 4915 goto free_cmdiocbq; 4916 } 4917 4918 rsp = &rspiocbq->iocb; 4919 4920 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); 4921 if (!bmp->virt) { 4922 rc = -ENOMEM; 4923 goto free_rspiocbq; 4924 } 4925 4926 INIT_LIST_HEAD(&bmp->list); 4927 bpl = (struct ulp_bde64 *) bmp->virt; 4928 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, 4929 job->request_payload.sg_cnt, DMA_TO_DEVICE); 4930 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { 4931 busaddr = sg_dma_address(sgel); 4932 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 4933 bpl->tus.f.bdeSize = sg_dma_len(sgel); 4934 bpl->tus.w = cpu_to_le32(bpl->tus.w); 4935 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); 4936 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); 4937 bpl++; 4938 } 4939 4940 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list, 4941 job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 4942 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) { 4943 busaddr = sg_dma_address(sgel); 4944 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 4945 bpl->tus.f.bdeSize = sg_dma_len(sgel); 4946 bpl->tus.w = cpu_to_le32(bpl->tus.w); 4947 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); 4948 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); 4949 bpl++; 4950 } 4951 4952 cmd = &cmdiocbq->iocb; 4953 cmd->un.genreq64.bdl.ulpIoTag32 = 0; 4954 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); 4955 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys); 4956 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 4957 cmd->un.genreq64.bdl.bdeSize = 4958 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64); 4959 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); 4960 cmd->un.genreq64.w5.hcsw.Dfctl = 0; 4961 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD; 4962 cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */ 4963 cmd->ulpBdeCount = 1; 4964 cmd->ulpClass = CLASS3; 4965 cmd->ulpOwner = OWN_CHIP; 4966 cmd->ulpLe = 1; /* Limited Edition */ 4967 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 4968 cmdiocbq->vport = phba->pport; 4969 /* We want the firmware to timeout before we do */ 4970 cmd->ulpTimeout = MENLO_TIMEOUT - 5; 4971 cmdiocbq->context3 = bmp; 4972 cmdiocbq->context2 = rspiocbq; 4973 cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp; 4974 cmdiocbq->context1 = dd_data; 4975 cmdiocbq->context2 = rspiocbq; 4976 if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) { 4977 cmd->ulpCommand = CMD_GEN_REQUEST64_CR; 4978 cmd->ulpPU = MENLO_PU; /* 3 */ 4979 cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */ 4980 cmd->ulpContext = MENLO_CONTEXT; /* 0 */ 4981 } else { 4982 cmd->ulpCommand = CMD_GEN_REQUEST64_CX; 4983 cmd->ulpPU = 1; 4984 cmd->un.ulpWord[4] = 0; 4985 cmd->ulpContext = menlo_cmd->xri; 4986 } 4987 4988 dd_data->type = TYPE_MENLO; 4989 dd_data->context_un.menlo.cmdiocbq = cmdiocbq; 4990 dd_data->context_un.menlo.rspiocbq = rspiocbq; 4991 dd_data->context_un.menlo.set_job = job; 4992 dd_data->context_un.menlo.bmp = bmp; 4993 4994 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 4995 MENLO_TIMEOUT - 5); 4996 if (rc == IOCB_SUCCESS) 4997 return 0; /* done for now */ 4998 4999 /* iocb failed so cleanup */ 5000 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, 5001 job->request_payload.sg_cnt, DMA_TO_DEVICE); 5002 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, 5003 job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 5004 5005 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 5006 5007 free_rspiocbq: 5008 lpfc_sli_release_iocbq(phba, rspiocbq); 5009 free_cmdiocbq: 5010 lpfc_sli_release_iocbq(phba, cmdiocbq); 5011 free_bmp: 5012 kfree(bmp); 5013 free_dd: 5014 kfree(dd_data); 5015 no_dd_data: 5016 /* make error code available to userspace */ 5017 job->reply->result = rc; 5018 job->dd_data = NULL; 5019 return rc; 5020 } 5021 5022 /** 5023 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job 5024 * @job: fc_bsg_job to handle 5025 **/ 5026 static int 5027 lpfc_bsg_hst_vendor(struct fc_bsg_job *job) 5028 { 5029 int command = job->request->rqst_data.h_vendor.vendor_cmd[0]; 5030 int rc; 5031 5032 switch (command) { 5033 case LPFC_BSG_VENDOR_SET_CT_EVENT: 5034 rc = lpfc_bsg_hba_set_event(job); 5035 break; 5036 case LPFC_BSG_VENDOR_GET_CT_EVENT: 5037 rc = lpfc_bsg_hba_get_event(job); 5038 break; 5039 case LPFC_BSG_VENDOR_SEND_MGMT_RESP: 5040 rc = lpfc_bsg_send_mgmt_rsp(job); 5041 break; 5042 case LPFC_BSG_VENDOR_DIAG_MODE: 5043 rc = lpfc_bsg_diag_loopback_mode(job); 5044 break; 5045 case LPFC_BSG_VENDOR_DIAG_MODE_END: 5046 rc = lpfc_sli4_bsg_diag_mode_end(job); 5047 break; 5048 case LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK: 5049 rc = lpfc_bsg_diag_loopback_run(job); 5050 break; 5051 case LPFC_BSG_VENDOR_LINK_DIAG_TEST: 5052 rc = lpfc_sli4_bsg_link_diag_test(job); 5053 break; 5054 case LPFC_BSG_VENDOR_GET_MGMT_REV: 5055 rc = lpfc_bsg_get_dfc_rev(job); 5056 break; 5057 case LPFC_BSG_VENDOR_MBOX: 5058 rc = lpfc_bsg_mbox_cmd(job); 5059 break; 5060 case LPFC_BSG_VENDOR_MENLO_CMD: 5061 case LPFC_BSG_VENDOR_MENLO_DATA: 5062 rc = lpfc_menlo_cmd(job); 5063 break; 5064 default: 5065 rc = -EINVAL; 5066 job->reply->reply_payload_rcv_len = 0; 5067 /* make error code available to userspace */ 5068 job->reply->result = rc; 5069 break; 5070 } 5071 5072 return rc; 5073 } 5074 5075 /** 5076 * lpfc_bsg_request - handle a bsg request from the FC transport 5077 * @job: fc_bsg_job to handle 5078 **/ 5079 int 5080 lpfc_bsg_request(struct fc_bsg_job *job) 5081 { 5082 uint32_t msgcode; 5083 int rc; 5084 5085 msgcode = job->request->msgcode; 5086 switch (msgcode) { 5087 case FC_BSG_HST_VENDOR: 5088 rc = lpfc_bsg_hst_vendor(job); 5089 break; 5090 case FC_BSG_RPT_ELS: 5091 rc = lpfc_bsg_rport_els(job); 5092 break; 5093 case FC_BSG_RPT_CT: 5094 rc = lpfc_bsg_send_mgmt_cmd(job); 5095 break; 5096 default: 5097 rc = -EINVAL; 5098 job->reply->reply_payload_rcv_len = 0; 5099 /* make error code available to userspace */ 5100 job->reply->result = rc; 5101 break; 5102 } 5103 5104 return rc; 5105 } 5106 5107 /** 5108 * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport 5109 * @job: fc_bsg_job that has timed out 5110 * 5111 * This function just aborts the job's IOCB. The aborted IOCB will return to 5112 * the waiting function which will handle passing the error back to userspace 5113 **/ 5114 int 5115 lpfc_bsg_timeout(struct fc_bsg_job *job) 5116 { 5117 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; 5118 struct lpfc_hba *phba = vport->phba; 5119 struct lpfc_iocbq *cmdiocb; 5120 struct lpfc_bsg_event *evt; 5121 struct lpfc_bsg_iocb *iocb; 5122 struct lpfc_bsg_mbox *mbox; 5123 struct lpfc_bsg_menlo *menlo; 5124 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 5125 struct bsg_job_data *dd_data; 5126 unsigned long flags; 5127 5128 spin_lock_irqsave(&phba->ct_ev_lock, flags); 5129 dd_data = (struct bsg_job_data *)job->dd_data; 5130 /* timeout and completion crossed paths if no dd_data */ 5131 if (!dd_data) { 5132 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5133 return 0; 5134 } 5135 5136 switch (dd_data->type) { 5137 case TYPE_IOCB: 5138 iocb = &dd_data->context_un.iocb; 5139 cmdiocb = iocb->cmdiocbq; 5140 /* hint to completion handler that the job timed out */ 5141 job->reply->result = -EAGAIN; 5142 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5143 /* this will call our completion handler */ 5144 spin_lock_irq(&phba->hbalock); 5145 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); 5146 spin_unlock_irq(&phba->hbalock); 5147 break; 5148 case TYPE_EVT: 5149 evt = dd_data->context_un.evt; 5150 /* this event has no job anymore */ 5151 evt->set_job = NULL; 5152 job->dd_data = NULL; 5153 job->reply->reply_payload_rcv_len = 0; 5154 /* Return -EAGAIN which is our way of signallying the 5155 * app to retry. 5156 */ 5157 job->reply->result = -EAGAIN; 5158 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5159 job->job_done(job); 5160 break; 5161 case TYPE_MBOX: 5162 mbox = &dd_data->context_un.mbox; 5163 /* this mbox has no job anymore */ 5164 mbox->set_job = NULL; 5165 job->dd_data = NULL; 5166 job->reply->reply_payload_rcv_len = 0; 5167 job->reply->result = -EAGAIN; 5168 /* the mbox completion handler can now be run */ 5169 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5170 job->job_done(job); 5171 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT) 5172 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS; 5173 break; 5174 case TYPE_MENLO: 5175 menlo = &dd_data->context_un.menlo; 5176 cmdiocb = menlo->cmdiocbq; 5177 /* hint to completion handler that the job timed out */ 5178 job->reply->result = -EAGAIN; 5179 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5180 /* this will call our completion handler */ 5181 spin_lock_irq(&phba->hbalock); 5182 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); 5183 spin_unlock_irq(&phba->hbalock); 5184 break; 5185 default: 5186 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 5187 break; 5188 } 5189 5190 /* scsi transport fc fc_bsg_job_timeout expects a zero return code, 5191 * otherwise an error message will be displayed on the console 5192 * so always return success (zero) 5193 */ 5194 return 0; 5195 } 5196